aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/ibm/ehea
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/ibm/ehea')
-rw-r--r--drivers/net/ethernet/ibm/ehea/Makefile6
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea.h504
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_ethtool.c295
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_hw.h292
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c3768
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_phyp.c626
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_phyp.h467
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c1031
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.h404
9 files changed, 7393 insertions, 0 deletions
diff --git a/drivers/net/ethernet/ibm/ehea/Makefile b/drivers/net/ethernet/ibm/ehea/Makefile
new file mode 100644
index 000000000000..775d9969b5c2
--- /dev/null
+++ b/drivers/net/ethernet/ibm/ehea/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for the eHEA ethernet device driver for IBM eServer System p
3#
4ehea-y = ehea_main.o ehea_phyp.o ehea_qmr.o ehea_ethtool.o ehea_phyp.o
5obj-$(CONFIG_EHEA) += ehea.o
6
diff --git a/drivers/net/ethernet/ibm/ehea/ehea.h b/drivers/net/ethernet/ibm/ehea/ehea.h
new file mode 100644
index 000000000000..7dd5e6a0d998
--- /dev/null
+++ b/drivers/net/ethernet/ibm/ehea/ehea.h
@@ -0,0 +1,504 @@
1/*
2 * linux/drivers/net/ehea/ehea.h
3 *
4 * eHEA ethernet device driver for IBM eServer System p
5 *
6 * (C) Copyright IBM Corp. 2006
7 *
8 * Authors:
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
12 *
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#ifndef __EHEA_H__
30#define __EHEA_H__
31
32#include <linux/module.h>
33#include <linux/ethtool.h>
34#include <linux/vmalloc.h>
35#include <linux/if_vlan.h>
36#include <linux/inet_lro.h>
37
38#include <asm/ibmebus.h>
39#include <asm/abs_addr.h>
40#include <asm/io.h>
41
42#define DRV_NAME "ehea"
43#define DRV_VERSION "EHEA_0107"
44
45/* eHEA capability flags */
46#define DLPAR_PORT_ADD_REM 1
47#define DLPAR_MEM_ADD 2
48#define DLPAR_MEM_REM 4
49#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM | DLPAR_MEM_ADD | DLPAR_MEM_REM)
50
51#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
52 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
53
54#define EHEA_MAX_ENTRIES_RQ1 32767
55#define EHEA_MAX_ENTRIES_RQ2 16383
56#define EHEA_MAX_ENTRIES_RQ3 16383
57#define EHEA_MAX_ENTRIES_SQ 32767
58#define EHEA_MIN_ENTRIES_QP 127
59
60#define EHEA_SMALL_QUEUES
61#define EHEA_NUM_TX_QP 1
62#define EHEA_LRO_MAX_AGGR 64
63
64#ifdef EHEA_SMALL_QUEUES
65#define EHEA_MAX_CQE_COUNT 1023
66#define EHEA_DEF_ENTRIES_SQ 1023
67#define EHEA_DEF_ENTRIES_RQ1 4095
68#define EHEA_DEF_ENTRIES_RQ2 1023
69#define EHEA_DEF_ENTRIES_RQ3 1023
70#else
71#define EHEA_MAX_CQE_COUNT 4080
72#define EHEA_DEF_ENTRIES_SQ 4080
73#define EHEA_DEF_ENTRIES_RQ1 8160
74#define EHEA_DEF_ENTRIES_RQ2 2040
75#define EHEA_DEF_ENTRIES_RQ3 2040
76#endif
77
78#define EHEA_MAX_ENTRIES_EQ 20
79
80#define EHEA_SG_SQ 2
81#define EHEA_SG_RQ1 1
82#define EHEA_SG_RQ2 0
83#define EHEA_SG_RQ3 0
84
85#define EHEA_MAX_PACKET_SIZE 9022 /* for jumbo frames */
86#define EHEA_RQ2_PKT_SIZE 1522
87#define EHEA_L_PKT_SIZE 256 /* low latency */
88
89#define MAX_LRO_DESCRIPTORS 8
90
91/* Send completion signaling */
92
93/* Protection Domain Identifier */
94#define EHEA_PD_ID 0xaabcdeff
95
96#define EHEA_RQ2_THRESHOLD 1
97#define EHEA_RQ3_THRESHOLD 9 /* use RQ3 threshold of 1522 bytes */
98
99#define EHEA_SPEED_10G 10000
100#define EHEA_SPEED_1G 1000
101#define EHEA_SPEED_100M 100
102#define EHEA_SPEED_10M 10
103#define EHEA_SPEED_AUTONEG 0
104
105/* Broadcast/Multicast registration types */
106#define EHEA_BCMC_SCOPE_ALL 0x08
107#define EHEA_BCMC_SCOPE_SINGLE 0x00
108#define EHEA_BCMC_MULTICAST 0x04
109#define EHEA_BCMC_BROADCAST 0x00
110#define EHEA_BCMC_UNTAGGED 0x02
111#define EHEA_BCMC_TAGGED 0x00
112#define EHEA_BCMC_VLANID_ALL 0x01
113#define EHEA_BCMC_VLANID_SINGLE 0x00
114
115#define EHEA_CACHE_LINE 128
116
117/* Memory Regions */
118#define EHEA_MR_ACC_CTRL 0x00800000
119
120#define EHEA_BUSMAP_START 0x8000000000000000ULL
121#define EHEA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL
122#define EHEA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */
123#define EHEA_TOP_INDEX_SHIFT (EHEA_DIR_INDEX_SHIFT * 2)
124#define EHEA_MAP_ENTRIES (1 << EHEA_DIR_INDEX_SHIFT)
125#define EHEA_MAP_SIZE (0x10000) /* currently fixed map size */
126#define EHEA_INDEX_MASK (EHEA_MAP_ENTRIES - 1)
127
128
129#define EHEA_WATCH_DOG_TIMEOUT 10*HZ
130
131/* utility functions */
132
133void ehea_dump(void *adr, int len, char *msg);
134
135#define EHEA_BMASK(pos, length) (((pos) << 16) + (length))
136
137#define EHEA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1))
138
139#define EHEA_BMASK_SHIFTPOS(mask) (((mask) >> 16) & 0xffff)
140
141#define EHEA_BMASK_MASK(mask) \
142 (0xffffffffffffffffULL >> ((64 - (mask)) & 0xffff))
143
144#define EHEA_BMASK_SET(mask, value) \
145 ((EHEA_BMASK_MASK(mask) & ((u64)(value))) << EHEA_BMASK_SHIFTPOS(mask))
146
147#define EHEA_BMASK_GET(mask, value) \
148 (EHEA_BMASK_MASK(mask) & (((u64)(value)) >> EHEA_BMASK_SHIFTPOS(mask)))
149
150/*
151 * Generic ehea page
152 */
153struct ehea_page {
154 u8 entries[PAGE_SIZE];
155};
156
157/*
158 * Generic queue in linux kernel virtual memory
159 */
160struct hw_queue {
161 u64 current_q_offset; /* current queue entry */
162 struct ehea_page **queue_pages; /* array of pages belonging to queue */
163 u32 qe_size; /* queue entry size */
164 u32 queue_length; /* queue length allocated in bytes */
165 u32 pagesize;
166 u32 toggle_state; /* toggle flag - per page */
167 u32 reserved; /* 64 bit alignment */
168};
169
170/*
171 * For pSeries this is a 64bit memory address where
172 * I/O memory is mapped into CPU address space
173 */
174struct h_epa {
175 void __iomem *addr;
176};
177
178struct h_epa_user {
179 u64 addr;
180};
181
182struct h_epas {
183 struct h_epa kernel; /* kernel space accessible resource,
184 set to 0 if unused */
185 struct h_epa_user user; /* user space accessible resource
186 set to 0 if unused */
187};
188
189/*
190 * Memory map data structures
191 */
192struct ehea_dir_bmap
193{
194 u64 ent[EHEA_MAP_ENTRIES];
195};
196struct ehea_top_bmap
197{
198 struct ehea_dir_bmap *dir[EHEA_MAP_ENTRIES];
199};
200struct ehea_bmap
201{
202 struct ehea_top_bmap *top[EHEA_MAP_ENTRIES];
203};
204
205struct ehea_qp;
206struct ehea_cq;
207struct ehea_eq;
208struct ehea_port;
209struct ehea_av;
210
211/*
212 * Queue attributes passed to ehea_create_qp()
213 */
214struct ehea_qp_init_attr {
215 /* input parameter */
216 u32 qp_token; /* queue token */
217 u8 low_lat_rq1;
218 u8 signalingtype; /* cqe generation flag */
219 u8 rq_count; /* num of receive queues */
220 u8 eqe_gen; /* eqe generation flag */
221 u16 max_nr_send_wqes; /* max number of send wqes */
222 u16 max_nr_rwqes_rq1; /* max number of receive wqes */
223 u16 max_nr_rwqes_rq2;
224 u16 max_nr_rwqes_rq3;
225 u8 wqe_size_enc_sq;
226 u8 wqe_size_enc_rq1;
227 u8 wqe_size_enc_rq2;
228 u8 wqe_size_enc_rq3;
229 u8 swqe_imm_data_len; /* immediate data length for swqes */
230 u16 port_nr;
231 u16 rq2_threshold;
232 u16 rq3_threshold;
233 u64 send_cq_handle;
234 u64 recv_cq_handle;
235 u64 aff_eq_handle;
236
237 /* output parameter */
238 u32 qp_nr;
239 u16 act_nr_send_wqes;
240 u16 act_nr_rwqes_rq1;
241 u16 act_nr_rwqes_rq2;
242 u16 act_nr_rwqes_rq3;
243 u8 act_wqe_size_enc_sq;
244 u8 act_wqe_size_enc_rq1;
245 u8 act_wqe_size_enc_rq2;
246 u8 act_wqe_size_enc_rq3;
247 u32 nr_sq_pages;
248 u32 nr_rq1_pages;
249 u32 nr_rq2_pages;
250 u32 nr_rq3_pages;
251 u32 liobn_sq;
252 u32 liobn_rq1;
253 u32 liobn_rq2;
254 u32 liobn_rq3;
255};
256
257/*
258 * Event Queue attributes, passed as parameter
259 */
260struct ehea_eq_attr {
261 u32 type;
262 u32 max_nr_of_eqes;
263 u8 eqe_gen; /* generate eqe flag */
264 u64 eq_handle;
265 u32 act_nr_of_eqes;
266 u32 nr_pages;
267 u32 ist1; /* Interrupt service token */
268 u32 ist2;
269 u32 ist3;
270 u32 ist4;
271};
272
273
274/*
275 * Event Queue
276 */
277struct ehea_eq {
278 struct ehea_adapter *adapter;
279 struct hw_queue hw_queue;
280 u64 fw_handle;
281 struct h_epas epas;
282 spinlock_t spinlock;
283 struct ehea_eq_attr attr;
284};
285
286/*
287 * HEA Queues
288 */
289struct ehea_qp {
290 struct ehea_adapter *adapter;
291 u64 fw_handle; /* QP handle for firmware calls */
292 struct hw_queue hw_squeue;
293 struct hw_queue hw_rqueue1;
294 struct hw_queue hw_rqueue2;
295 struct hw_queue hw_rqueue3;
296 struct h_epas epas;
297 struct ehea_qp_init_attr init_attr;
298};
299
300/*
301 * Completion Queue attributes
302 */
303struct ehea_cq_attr {
304 /* input parameter */
305 u32 max_nr_of_cqes;
306 u32 cq_token;
307 u64 eq_handle;
308
309 /* output parameter */
310 u32 act_nr_of_cqes;
311 u32 nr_pages;
312};
313
314/*
315 * Completion Queue
316 */
317struct ehea_cq {
318 struct ehea_adapter *adapter;
319 u64 fw_handle;
320 struct hw_queue hw_queue;
321 struct h_epas epas;
322 struct ehea_cq_attr attr;
323};
324
325/*
326 * Memory Region
327 */
328struct ehea_mr {
329 struct ehea_adapter *adapter;
330 u64 handle;
331 u64 vaddr;
332 u32 lkey;
333};
334
335/*
336 * Port state information
337 */
338struct port_stats {
339 int poll_receive_errors;
340 int queue_stopped;
341 int err_tcp_cksum;
342 int err_ip_cksum;
343 int err_frame_crc;
344};
345
346#define EHEA_IRQ_NAME_SIZE 20
347
348/*
349 * Queue SKB Array
350 */
351struct ehea_q_skb_arr {
352 struct sk_buff **arr; /* skb array for queue */
353 int len; /* array length */
354 int index; /* array index */
355 int os_skbs; /* rq2/rq3 only: outstanding skbs */
356};
357
358/*
359 * Port resources
360 */
361struct ehea_port_res {
362 struct napi_struct napi;
363 struct port_stats p_stats;
364 struct ehea_mr send_mr; /* send memory region */
365 struct ehea_mr recv_mr; /* receive memory region */
366 spinlock_t xmit_lock;
367 struct ehea_port *port;
368 char int_recv_name[EHEA_IRQ_NAME_SIZE];
369 char int_send_name[EHEA_IRQ_NAME_SIZE];
370 struct ehea_qp *qp;
371 struct ehea_cq *send_cq;
372 struct ehea_cq *recv_cq;
373 struct ehea_eq *eq;
374 struct ehea_q_skb_arr rq1_skba;
375 struct ehea_q_skb_arr rq2_skba;
376 struct ehea_q_skb_arr rq3_skba;
377 struct ehea_q_skb_arr sq_skba;
378 int sq_skba_size;
379 spinlock_t netif_queue;
380 int queue_stopped;
381 int swqe_refill_th;
382 atomic_t swqe_avail;
383 int swqe_ll_count;
384 u32 swqe_id_counter;
385 u64 tx_packets;
386 u64 tx_bytes;
387 u64 rx_packets;
388 u64 rx_bytes;
389 u32 poll_counter;
390 struct net_lro_mgr lro_mgr;
391 struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
392 int sq_restart_flag;
393};
394
395
396#define EHEA_MAX_PORTS 16
397
398#define EHEA_NUM_PORTRES_FW_HANDLES 6 /* QP handle, SendCQ handle,
399 RecvCQ handle, EQ handle,
400 SendMR handle, RecvMR handle */
401#define EHEA_NUM_PORT_FW_HANDLES 1 /* EQ handle */
402#define EHEA_NUM_ADAPTER_FW_HANDLES 2 /* MR handle, NEQ handle */
403
404struct ehea_adapter {
405 u64 handle;
406 struct platform_device *ofdev;
407 struct ehea_port *port[EHEA_MAX_PORTS];
408 struct ehea_eq *neq; /* notification event queue */
409 struct tasklet_struct neq_tasklet;
410 struct ehea_mr mr;
411 u32 pd; /* protection domain */
412 u64 max_mc_mac; /* max number of multicast mac addresses */
413 int active_ports;
414 struct list_head list;
415};
416
417
418struct ehea_mc_list {
419 struct list_head list;
420 u64 macaddr;
421};
422
423/* kdump support */
424struct ehea_fw_handle_entry {
425 u64 adh; /* Adapter Handle */
426 u64 fwh; /* Firmware Handle */
427};
428
429struct ehea_fw_handle_array {
430 struct ehea_fw_handle_entry *arr;
431 int num_entries;
432 struct mutex lock;
433};
434
435struct ehea_bcmc_reg_entry {
436 u64 adh; /* Adapter Handle */
437 u32 port_id; /* Logical Port Id */
438 u8 reg_type; /* Registration Type */
439 u64 macaddr;
440};
441
442struct ehea_bcmc_reg_array {
443 struct ehea_bcmc_reg_entry *arr;
444 int num_entries;
445 spinlock_t lock;
446};
447
448#define EHEA_PORT_UP 1
449#define EHEA_PORT_DOWN 0
450#define EHEA_PHY_LINK_UP 1
451#define EHEA_PHY_LINK_DOWN 0
452#define EHEA_MAX_PORT_RES 16
453struct ehea_port {
454 struct ehea_adapter *adapter; /* adapter that owns this port */
455 struct net_device *netdev;
456 struct net_device_stats stats;
457 struct ehea_port_res port_res[EHEA_MAX_PORT_RES];
458 struct platform_device ofdev; /* Open Firmware Device */
459 struct ehea_mc_list *mc_list; /* Multicast MAC addresses */
460 struct ehea_eq *qp_eq;
461 struct work_struct reset_task;
462 struct mutex port_lock;
463 char int_aff_name[EHEA_IRQ_NAME_SIZE];
464 int allmulti; /* Indicates IFF_ALLMULTI state */
465 int promisc; /* Indicates IFF_PROMISC state */
466 int num_tx_qps;
467 int num_add_tx_qps;
468 int num_mcs;
469 int resets;
470 unsigned long flags;
471 u64 mac_addr;
472 u32 logical_port_id;
473 u32 port_speed;
474 u32 msg_enable;
475 u32 sig_comp_iv;
476 u32 state;
477 u32 lro_max_aggr;
478 u8 phy_link;
479 u8 full_duplex;
480 u8 autoneg;
481 u8 num_def_qps;
482 wait_queue_head_t swqe_avail_wq;
483 wait_queue_head_t restart_wq;
484};
485
486struct port_res_cfg {
487 int max_entries_rcq;
488 int max_entries_scq;
489 int max_entries_sq;
490 int max_entries_rq1;
491 int max_entries_rq2;
492 int max_entries_rq3;
493};
494
495enum ehea_flag_bits {
496 __EHEA_STOP_XFER,
497 __EHEA_DISABLE_PORT_RESET
498};
499
500void ehea_set_ethtool_ops(struct net_device *netdev);
501int ehea_sense_port_attr(struct ehea_port *port);
502int ehea_set_portspeed(struct ehea_port *port, u32 port_speed);
503
504#endif /* __EHEA_H__ */
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
new file mode 100644
index 000000000000..7f642aef5e82
--- /dev/null
+++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
@@ -0,0 +1,295 @@
1/*
2 * linux/drivers/net/ehea/ehea_ethtool.c
3 *
4 * eHEA ethernet device driver for IBM eServer System p
5 *
6 * (C) Copyright IBM Corp. 2006
7 *
8 * Authors:
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
12 *
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include "ehea.h"
32#include "ehea_phyp.h"
33
34static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
35{
36 struct ehea_port *port = netdev_priv(dev);
37 u32 speed;
38 int ret;
39
40 ret = ehea_sense_port_attr(port);
41
42 if (ret)
43 return ret;
44
45 if (netif_carrier_ok(dev)) {
46 switch (port->port_speed) {
47 case EHEA_SPEED_10M:
48 speed = SPEED_10;
49 break;
50 case EHEA_SPEED_100M:
51 speed = SPEED_100;
52 break;
53 case EHEA_SPEED_1G:
54 speed = SPEED_1000;
55 break;
56 case EHEA_SPEED_10G:
57 speed = SPEED_10000;
58 break;
59 default:
60 speed = -1;
61 break; /* BUG */
62 }
63 cmd->duplex = port->full_duplex == 1 ?
64 DUPLEX_FULL : DUPLEX_HALF;
65 } else {
66 speed = ~0;
67 cmd->duplex = -1;
68 }
69 ethtool_cmd_speed_set(cmd, speed);
70
71 if (cmd->speed == SPEED_10000) {
72 cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
73 cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
74 cmd->port = PORT_FIBRE;
75 } else {
76 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full
77 | SUPPORTED_100baseT_Half | SUPPORTED_10baseT_Full
78 | SUPPORTED_10baseT_Half | SUPPORTED_Autoneg
79 | SUPPORTED_TP);
80 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg
81 | ADVERTISED_TP);
82 cmd->port = PORT_TP;
83 }
84
85 cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE;
86
87 return 0;
88}
89
90static int ehea_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
91{
92 struct ehea_port *port = netdev_priv(dev);
93 int ret = 0;
94 u32 sp;
95
96 if (cmd->autoneg == AUTONEG_ENABLE) {
97 sp = EHEA_SPEED_AUTONEG;
98 goto doit;
99 }
100
101 switch (cmd->speed) {
102 case SPEED_10:
103 if (cmd->duplex == DUPLEX_FULL)
104 sp = H_SPEED_10M_F;
105 else
106 sp = H_SPEED_10M_H;
107 break;
108
109 case SPEED_100:
110 if (cmd->duplex == DUPLEX_FULL)
111 sp = H_SPEED_100M_F;
112 else
113 sp = H_SPEED_100M_H;
114 break;
115
116 case SPEED_1000:
117 if (cmd->duplex == DUPLEX_FULL)
118 sp = H_SPEED_1G_F;
119 else
120 ret = -EINVAL;
121 break;
122
123 case SPEED_10000:
124 if (cmd->duplex == DUPLEX_FULL)
125 sp = H_SPEED_10G_F;
126 else
127 ret = -EINVAL;
128 break;
129
130 default:
131 ret = -EINVAL;
132 break;
133 }
134
135 if (ret)
136 goto out;
137doit:
138 ret = ehea_set_portspeed(port, sp);
139
140 if (!ret)
141 netdev_info(dev,
142 "Port speed successfully set: %dMbps %s Duplex\n",
143 port->port_speed,
144 port->full_duplex == 1 ? "Full" : "Half");
145out:
146 return ret;
147}
148
149static int ehea_nway_reset(struct net_device *dev)
150{
151 struct ehea_port *port = netdev_priv(dev);
152 int ret;
153
154 ret = ehea_set_portspeed(port, EHEA_SPEED_AUTONEG);
155
156 if (!ret)
157 netdev_info(port->netdev,
158 "Port speed successfully set: %dMbps %s Duplex\n",
159 port->port_speed,
160 port->full_duplex == 1 ? "Full" : "Half");
161 return ret;
162}
163
164static void ehea_get_drvinfo(struct net_device *dev,
165 struct ethtool_drvinfo *info)
166{
167 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
168 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
169}
170
171static u32 ehea_get_msglevel(struct net_device *dev)
172{
173 struct ehea_port *port = netdev_priv(dev);
174 return port->msg_enable;
175}
176
177static void ehea_set_msglevel(struct net_device *dev, u32 value)
178{
179 struct ehea_port *port = netdev_priv(dev);
180 port->msg_enable = value;
181}
182
183static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = {
184 {"sig_comp_iv"},
185 {"swqe_refill_th"},
186 {"port resets"},
187 {"Receive errors"},
188 {"TCP cksum errors"},
189 {"IP cksum errors"},
190 {"Frame cksum errors"},
191 {"num SQ stopped"},
192 {"SQ stopped"},
193 {"PR0 free_swqes"},
194 {"PR1 free_swqes"},
195 {"PR2 free_swqes"},
196 {"PR3 free_swqes"},
197 {"PR4 free_swqes"},
198 {"PR5 free_swqes"},
199 {"PR6 free_swqes"},
200 {"PR7 free_swqes"},
201 {"LRO aggregated"},
202 {"LRO flushed"},
203 {"LRO no_desc"},
204};
205
206static void ehea_get_strings(struct net_device *dev, u32 stringset, u8 *data)
207{
208 if (stringset == ETH_SS_STATS) {
209 memcpy(data, &ehea_ethtool_stats_keys,
210 sizeof(ehea_ethtool_stats_keys));
211 }
212}
213
214static int ehea_get_sset_count(struct net_device *dev, int sset)
215{
216 switch (sset) {
217 case ETH_SS_STATS:
218 return ARRAY_SIZE(ehea_ethtool_stats_keys);
219 default:
220 return -EOPNOTSUPP;
221 }
222}
223
224static void ehea_get_ethtool_stats(struct net_device *dev,
225 struct ethtool_stats *stats, u64 *data)
226{
227 int i, k, tmp;
228 struct ehea_port *port = netdev_priv(dev);
229
230 for (i = 0; i < ehea_get_sset_count(dev, ETH_SS_STATS); i++)
231 data[i] = 0;
232 i = 0;
233
234 data[i++] = port->sig_comp_iv;
235 data[i++] = port->port_res[0].swqe_refill_th;
236 data[i++] = port->resets;
237
238 for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
239 tmp += port->port_res[k].p_stats.poll_receive_errors;
240 data[i++] = tmp;
241
242 for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
243 tmp += port->port_res[k].p_stats.err_tcp_cksum;
244 data[i++] = tmp;
245
246 for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
247 tmp += port->port_res[k].p_stats.err_ip_cksum;
248 data[i++] = tmp;
249
250 for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
251 tmp += port->port_res[k].p_stats.err_frame_crc;
252 data[i++] = tmp;
253
254 for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
255 tmp += port->port_res[k].p_stats.queue_stopped;
256 data[i++] = tmp;
257
258 for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
259 tmp |= port->port_res[k].queue_stopped;
260 data[i++] = tmp;
261
262 for (k = 0; k < 8; k++)
263 data[i++] = atomic_read(&port->port_res[k].swqe_avail);
264
265 for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
266 tmp |= port->port_res[k].lro_mgr.stats.aggregated;
267 data[i++] = tmp;
268
269 for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
270 tmp |= port->port_res[k].lro_mgr.stats.flushed;
271 data[i++] = tmp;
272
273 for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
274 tmp |= port->port_res[k].lro_mgr.stats.no_desc;
275 data[i++] = tmp;
276
277}
278
279const struct ethtool_ops ehea_ethtool_ops = {
280 .get_settings = ehea_get_settings,
281 .get_drvinfo = ehea_get_drvinfo,
282 .get_msglevel = ehea_get_msglevel,
283 .set_msglevel = ehea_set_msglevel,
284 .get_link = ethtool_op_get_link,
285 .get_strings = ehea_get_strings,
286 .get_sset_count = ehea_get_sset_count,
287 .get_ethtool_stats = ehea_get_ethtool_stats,
288 .set_settings = ehea_set_settings,
289 .nway_reset = ehea_nway_reset, /* Restart autonegotiation */
290};
291
292void ehea_set_ethtool_ops(struct net_device *netdev)
293{
294 SET_ETHTOOL_OPS(netdev, &ehea_ethtool_ops);
295}
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_hw.h b/drivers/net/ethernet/ibm/ehea/ehea_hw.h
new file mode 100644
index 000000000000..567981b4b2cc
--- /dev/null
+++ b/drivers/net/ethernet/ibm/ehea/ehea_hw.h
@@ -0,0 +1,292 @@
1/*
2 * linux/drivers/net/ehea/ehea_hw.h
3 *
4 * eHEA ethernet device driver for IBM eServer System p
5 *
6 * (C) Copyright IBM Corp. 2006
7 *
8 * Authors:
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
12 *
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#ifndef __EHEA_HW_H__
30#define __EHEA_HW_H__
31
32#define QPX_SQA_VALUE EHEA_BMASK_IBM(48, 63)
33#define QPX_RQ1A_VALUE EHEA_BMASK_IBM(48, 63)
34#define QPX_RQ2A_VALUE EHEA_BMASK_IBM(48, 63)
35#define QPX_RQ3A_VALUE EHEA_BMASK_IBM(48, 63)
36
37#define QPTEMM_OFFSET(x) offsetof(struct ehea_qptemm, x)
38
39struct ehea_qptemm {
40 u64 qpx_hcr;
41 u64 qpx_c;
42 u64 qpx_herr;
43 u64 qpx_aer;
44 u64 qpx_sqa;
45 u64 qpx_sqc;
46 u64 qpx_rq1a;
47 u64 qpx_rq1c;
48 u64 qpx_st;
49 u64 qpx_aerr;
50 u64 qpx_tenure;
51 u64 qpx_reserved1[(0x098 - 0x058) / 8];
52 u64 qpx_portp;
53 u64 qpx_reserved2[(0x100 - 0x0A0) / 8];
54 u64 qpx_t;
55 u64 qpx_sqhp;
56 u64 qpx_sqptp;
57 u64 qpx_reserved3[(0x140 - 0x118) / 8];
58 u64 qpx_sqwsize;
59 u64 qpx_reserved4[(0x170 - 0x148) / 8];
60 u64 qpx_sqsize;
61 u64 qpx_reserved5[(0x1B0 - 0x178) / 8];
62 u64 qpx_sigt;
63 u64 qpx_wqecnt;
64 u64 qpx_rq1hp;
65 u64 qpx_rq1ptp;
66 u64 qpx_rq1size;
67 u64 qpx_reserved6[(0x220 - 0x1D8) / 8];
68 u64 qpx_rq1wsize;
69 u64 qpx_reserved7[(0x240 - 0x228) / 8];
70 u64 qpx_pd;
71 u64 qpx_scqn;
72 u64 qpx_rcqn;
73 u64 qpx_aeqn;
74 u64 reserved49;
75 u64 qpx_ram;
76 u64 qpx_reserved8[(0x300 - 0x270) / 8];
77 u64 qpx_rq2a;
78 u64 qpx_rq2c;
79 u64 qpx_rq2hp;
80 u64 qpx_rq2ptp;
81 u64 qpx_rq2size;
82 u64 qpx_rq2wsize;
83 u64 qpx_rq2th;
84 u64 qpx_rq3a;
85 u64 qpx_rq3c;
86 u64 qpx_rq3hp;
87 u64 qpx_rq3ptp;
88 u64 qpx_rq3size;
89 u64 qpx_rq3wsize;
90 u64 qpx_rq3th;
91 u64 qpx_lpn;
92 u64 qpx_reserved9[(0x400 - 0x378) / 8];
93 u64 reserved_ext[(0x500 - 0x400) / 8];
94 u64 reserved2[(0x1000 - 0x500) / 8];
95};
96
97#define MRx_HCR_LPARID_VALID EHEA_BMASK_IBM(0, 0)
98
99#define MRMWMM_OFFSET(x) offsetof(struct ehea_mrmwmm, x)
100
101struct ehea_mrmwmm {
102 u64 mrx_hcr;
103 u64 mrx_c;
104 u64 mrx_herr;
105 u64 mrx_aer;
106 u64 mrx_pp;
107 u64 reserved1;
108 u64 reserved2;
109 u64 reserved3;
110 u64 reserved4[(0x200 - 0x40) / 8];
111 u64 mrx_ctl[64];
112};
113
114#define QPEDMM_OFFSET(x) offsetof(struct ehea_qpedmm, x)
115
116struct ehea_qpedmm {
117
118 u64 reserved0[(0x400) / 8];
119 u64 qpedx_phh;
120 u64 qpedx_ppsgp;
121 u64 qpedx_ppsgu;
122 u64 qpedx_ppdgp;
123 u64 qpedx_ppdgu;
124 u64 qpedx_aph;
125 u64 qpedx_apsgp;
126 u64 qpedx_apsgu;
127 u64 qpedx_apdgp;
128 u64 qpedx_apdgu;
129 u64 qpedx_apav;
130 u64 qpedx_apsav;
131 u64 qpedx_hcr;
132 u64 reserved1[4];
133 u64 qpedx_rrl0;
134 u64 qpedx_rrrkey0;
135 u64 qpedx_rrva0;
136 u64 reserved2;
137 u64 qpedx_rrl1;
138 u64 qpedx_rrrkey1;
139 u64 qpedx_rrva1;
140 u64 reserved3;
141 u64 qpedx_rrl2;
142 u64 qpedx_rrrkey2;
143 u64 qpedx_rrva2;
144 u64 reserved4;
145 u64 qpedx_rrl3;
146 u64 qpedx_rrrkey3;
147 u64 qpedx_rrva3;
148};
149
150#define CQX_FECADDER EHEA_BMASK_IBM(32, 63)
151#define CQX_FEC_CQE_CNT EHEA_BMASK_IBM(32, 63)
152#define CQX_N1_GENERATE_COMP_EVENT EHEA_BMASK_IBM(0, 0)
153#define CQX_EP_EVENT_PENDING EHEA_BMASK_IBM(0, 0)
154
155#define CQTEMM_OFFSET(x) offsetof(struct ehea_cqtemm, x)
156
157struct ehea_cqtemm {
158 u64 cqx_hcr;
159 u64 cqx_c;
160 u64 cqx_herr;
161 u64 cqx_aer;
162 u64 cqx_ptp;
163 u64 cqx_tp;
164 u64 cqx_fec;
165 u64 cqx_feca;
166 u64 cqx_ep;
167 u64 cqx_eq;
168 u64 reserved1;
169 u64 cqx_n0;
170 u64 cqx_n1;
171 u64 reserved2[(0x1000 - 0x60) / 8];
172};
173
174#define EQTEMM_OFFSET(x) offsetof(struct ehea_eqtemm, x)
175
176struct ehea_eqtemm {
177 u64 eqx_hcr;
178 u64 eqx_c;
179 u64 eqx_herr;
180 u64 eqx_aer;
181 u64 eqx_ptp;
182 u64 eqx_tp;
183 u64 eqx_ssba;
184 u64 eqx_psba;
185 u64 eqx_cec;
186 u64 eqx_meql;
187 u64 eqx_xisbi;
188 u64 eqx_xisc;
189 u64 eqx_it;
190};
191
192/*
193 * These access functions will be changed when the dissuccsion about
194 * the new access methods for POWER has settled.
195 */
196
197static inline u64 epa_load(struct h_epa epa, u32 offset)
198{
199 return __raw_readq((void __iomem *)(epa.addr + offset));
200}
201
202static inline void epa_store(struct h_epa epa, u32 offset, u64 value)
203{
204 __raw_writeq(value, (void __iomem *)(epa.addr + offset));
205 epa_load(epa, offset); /* synchronize explicitly to eHEA */
206}
207
208static inline void epa_store_acc(struct h_epa epa, u32 offset, u64 value)
209{
210 __raw_writeq(value, (void __iomem *)(epa.addr + offset));
211}
212
213#define epa_store_eq(epa, offset, value)\
214 epa_store(epa, EQTEMM_OFFSET(offset), value)
215#define epa_load_eq(epa, offset)\
216 epa_load(epa, EQTEMM_OFFSET(offset))
217
218#define epa_store_cq(epa, offset, value)\
219 epa_store(epa, CQTEMM_OFFSET(offset), value)
220#define epa_load_cq(epa, offset)\
221 epa_load(epa, CQTEMM_OFFSET(offset))
222
223#define epa_store_qp(epa, offset, value)\
224 epa_store(epa, QPTEMM_OFFSET(offset), value)
225#define epa_load_qp(epa, offset)\
226 epa_load(epa, QPTEMM_OFFSET(offset))
227
228#define epa_store_qped(epa, offset, value)\
229 epa_store(epa, QPEDMM_OFFSET(offset), value)
230#define epa_load_qped(epa, offset)\
231 epa_load(epa, QPEDMM_OFFSET(offset))
232
233#define epa_store_mrmw(epa, offset, value)\
234 epa_store(epa, MRMWMM_OFFSET(offset), value)
235#define epa_load_mrmw(epa, offset)\
236 epa_load(epa, MRMWMM_OFFSET(offset))
237
238#define epa_store_base(epa, offset, value)\
239 epa_store(epa, HCAGR_OFFSET(offset), value)
240#define epa_load_base(epa, offset)\
241 epa_load(epa, HCAGR_OFFSET(offset))
242
243static inline void ehea_update_sqa(struct ehea_qp *qp, u16 nr_wqes)
244{
245 struct h_epa epa = qp->epas.kernel;
246 epa_store_acc(epa, QPTEMM_OFFSET(qpx_sqa),
247 EHEA_BMASK_SET(QPX_SQA_VALUE, nr_wqes));
248}
249
250static inline void ehea_update_rq3a(struct ehea_qp *qp, u16 nr_wqes)
251{
252 struct h_epa epa = qp->epas.kernel;
253 epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq3a),
254 EHEA_BMASK_SET(QPX_RQ1A_VALUE, nr_wqes));
255}
256
257static inline void ehea_update_rq2a(struct ehea_qp *qp, u16 nr_wqes)
258{
259 struct h_epa epa = qp->epas.kernel;
260 epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq2a),
261 EHEA_BMASK_SET(QPX_RQ2A_VALUE, nr_wqes));
262}
263
264static inline void ehea_update_rq1a(struct ehea_qp *qp, u16 nr_wqes)
265{
266 struct h_epa epa = qp->epas.kernel;
267 epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq1a),
268 EHEA_BMASK_SET(QPX_RQ3A_VALUE, nr_wqes));
269}
270
271static inline void ehea_update_feca(struct ehea_cq *cq, u32 nr_cqes)
272{
273 struct h_epa epa = cq->epas.kernel;
274 epa_store_acc(epa, CQTEMM_OFFSET(cqx_feca),
275 EHEA_BMASK_SET(CQX_FECADDER, nr_cqes));
276}
277
278static inline void ehea_reset_cq_n1(struct ehea_cq *cq)
279{
280 struct h_epa epa = cq->epas.kernel;
281 epa_store_cq(epa, cqx_n1,
282 EHEA_BMASK_SET(CQX_N1_GENERATE_COMP_EVENT, 1));
283}
284
285static inline void ehea_reset_cq_ep(struct ehea_cq *my_cq)
286{
287 struct h_epa epa = my_cq->epas.kernel;
288 epa_store_acc(epa, CQTEMM_OFFSET(cqx_ep),
289 EHEA_BMASK_SET(CQX_EP_EVENT_PENDING, 0));
290}
291
292#endif /* __EHEA_HW_H__ */
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
new file mode 100644
index 000000000000..be2cb4ab8b4f
--- /dev/null
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -0,0 +1,3768 @@
1/*
2 * linux/drivers/net/ehea/ehea_main.c
3 *
4 * eHEA ethernet device driver for IBM eServer System p
5 *
6 * (C) Copyright IBM Corp. 2006
7 *
8 * Authors:
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
12 *
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/in.h>
32#include <linux/ip.h>
33#include <linux/tcp.h>
34#include <linux/udp.h>
35#include <linux/if.h>
36#include <linux/list.h>
37#include <linux/slab.h>
38#include <linux/if_ether.h>
39#include <linux/notifier.h>
40#include <linux/reboot.h>
41#include <linux/memory.h>
42#include <asm/kexec.h>
43#include <linux/mutex.h>
44#include <linux/prefetch.h>
45
46#include <net/ip.h>
47
48#include "ehea.h"
49#include "ehea_qmr.h"
50#include "ehea_phyp.h"
51
52
53MODULE_LICENSE("GPL");
54MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
55MODULE_DESCRIPTION("IBM eServer HEA Driver");
56MODULE_VERSION(DRV_VERSION);
57
58
59static int msg_level = -1;
60static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
61static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
62static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
63static int sq_entries = EHEA_DEF_ENTRIES_SQ;
64static int use_mcs;
65static int use_lro;
66static int lro_max_aggr = EHEA_LRO_MAX_AGGR;
67static int num_tx_qps = EHEA_NUM_TX_QP;
68static int prop_carrier_state;
69
70module_param(msg_level, int, 0);
71module_param(rq1_entries, int, 0);
72module_param(rq2_entries, int, 0);
73module_param(rq3_entries, int, 0);
74module_param(sq_entries, int, 0);
75module_param(prop_carrier_state, int, 0);
76module_param(use_mcs, int, 0);
77module_param(use_lro, int, 0);
78module_param(lro_max_aggr, int, 0);
79module_param(num_tx_qps, int, 0);
80
81MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS");
82MODULE_PARM_DESC(msg_level, "msg_level");
83MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
84 "port to stack. 1:yes, 0:no. Default = 0 ");
85MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
86 "[2^x - 1], x = [6..14]. Default = "
87 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
88MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
89 "[2^x - 1], x = [6..14]. Default = "
90 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
91MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
92 "[2^x - 1], x = [6..14]. Default = "
93 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
94MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
95 "[2^x - 1], x = [6..14]. Default = "
96 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
97MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 0 ");
98
99MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = "
100 __MODULE_STRING(EHEA_LRO_MAX_AGGR));
101MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
102 "Default = 0");
103
104static int port_name_cnt;
105static LIST_HEAD(adapter_list);
106static unsigned long ehea_driver_flags;
107static DEFINE_MUTEX(dlpar_mem_lock);
108struct ehea_fw_handle_array ehea_fw_handles;
109struct ehea_bcmc_reg_array ehea_bcmc_regs;
110
111
112static int __devinit ehea_probe_adapter(struct platform_device *dev,
113 const struct of_device_id *id);
114
115static int __devexit ehea_remove(struct platform_device *dev);
116
117static struct of_device_id ehea_device_table[] = {
118 {
119 .name = "lhea",
120 .compatible = "IBM,lhea",
121 },
122 {},
123};
124MODULE_DEVICE_TABLE(of, ehea_device_table);
125
126static struct of_platform_driver ehea_driver = {
127 .driver = {
128 .name = "ehea",
129 .owner = THIS_MODULE,
130 .of_match_table = ehea_device_table,
131 },
132 .probe = ehea_probe_adapter,
133 .remove = ehea_remove,
134};
135
136void ehea_dump(void *adr, int len, char *msg)
137{
138 int x;
139 unsigned char *deb = adr;
140 for (x = 0; x < len; x += 16) {
141 pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
142 msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
143 deb += 16;
144 }
145}
146
147void ehea_schedule_port_reset(struct ehea_port *port)
148{
149 if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
150 schedule_work(&port->reset_task);
151}
152
153static void ehea_update_firmware_handles(void)
154{
155 struct ehea_fw_handle_entry *arr = NULL;
156 struct ehea_adapter *adapter;
157 int num_adapters = 0;
158 int num_ports = 0;
159 int num_portres = 0;
160 int i = 0;
161 int num_fw_handles, k, l;
162
163 /* Determine number of handles */
164 mutex_lock(&ehea_fw_handles.lock);
165
166 list_for_each_entry(adapter, &adapter_list, list) {
167 num_adapters++;
168
169 for (k = 0; k < EHEA_MAX_PORTS; k++) {
170 struct ehea_port *port = adapter->port[k];
171
172 if (!port || (port->state != EHEA_PORT_UP))
173 continue;
174
175 num_ports++;
176 num_portres += port->num_def_qps + port->num_add_tx_qps;
177 }
178 }
179
180 num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
181 num_ports * EHEA_NUM_PORT_FW_HANDLES +
182 num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
183
184 if (num_fw_handles) {
185 arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
186 if (!arr)
187 goto out; /* Keep the existing array */
188 } else
189 goto out_update;
190
191 list_for_each_entry(adapter, &adapter_list, list) {
192 if (num_adapters == 0)
193 break;
194
195 for (k = 0; k < EHEA_MAX_PORTS; k++) {
196 struct ehea_port *port = adapter->port[k];
197
198 if (!port || (port->state != EHEA_PORT_UP) ||
199 (num_ports == 0))
200 continue;
201
202 for (l = 0;
203 l < port->num_def_qps + port->num_add_tx_qps;
204 l++) {
205 struct ehea_port_res *pr = &port->port_res[l];
206
207 arr[i].adh = adapter->handle;
208 arr[i++].fwh = pr->qp->fw_handle;
209 arr[i].adh = adapter->handle;
210 arr[i++].fwh = pr->send_cq->fw_handle;
211 arr[i].adh = adapter->handle;
212 arr[i++].fwh = pr->recv_cq->fw_handle;
213 arr[i].adh = adapter->handle;
214 arr[i++].fwh = pr->eq->fw_handle;
215 arr[i].adh = adapter->handle;
216 arr[i++].fwh = pr->send_mr.handle;
217 arr[i].adh = adapter->handle;
218 arr[i++].fwh = pr->recv_mr.handle;
219 }
220 arr[i].adh = adapter->handle;
221 arr[i++].fwh = port->qp_eq->fw_handle;
222 num_ports--;
223 }
224
225 arr[i].adh = adapter->handle;
226 arr[i++].fwh = adapter->neq->fw_handle;
227
228 if (adapter->mr.handle) {
229 arr[i].adh = adapter->handle;
230 arr[i++].fwh = adapter->mr.handle;
231 }
232 num_adapters--;
233 }
234
235out_update:
236 kfree(ehea_fw_handles.arr);
237 ehea_fw_handles.arr = arr;
238 ehea_fw_handles.num_entries = i;
239out:
240 mutex_unlock(&ehea_fw_handles.lock);
241}
242
243static void ehea_update_bcmc_registrations(void)
244{
245 unsigned long flags;
246 struct ehea_bcmc_reg_entry *arr = NULL;
247 struct ehea_adapter *adapter;
248 struct ehea_mc_list *mc_entry;
249 int num_registrations = 0;
250 int i = 0;
251 int k;
252
253 spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
254
255 /* Determine number of registrations */
256 list_for_each_entry(adapter, &adapter_list, list)
257 for (k = 0; k < EHEA_MAX_PORTS; k++) {
258 struct ehea_port *port = adapter->port[k];
259
260 if (!port || (port->state != EHEA_PORT_UP))
261 continue;
262
263 num_registrations += 2; /* Broadcast registrations */
264
265 list_for_each_entry(mc_entry, &port->mc_list->list,list)
266 num_registrations += 2;
267 }
268
269 if (num_registrations) {
270 arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
271 if (!arr)
272 goto out; /* Keep the existing array */
273 } else
274 goto out_update;
275
276 list_for_each_entry(adapter, &adapter_list, list) {
277 for (k = 0; k < EHEA_MAX_PORTS; k++) {
278 struct ehea_port *port = adapter->port[k];
279
280 if (!port || (port->state != EHEA_PORT_UP))
281 continue;
282
283 if (num_registrations == 0)
284 goto out_update;
285
286 arr[i].adh = adapter->handle;
287 arr[i].port_id = port->logical_port_id;
288 arr[i].reg_type = EHEA_BCMC_BROADCAST |
289 EHEA_BCMC_UNTAGGED;
290 arr[i++].macaddr = port->mac_addr;
291
292 arr[i].adh = adapter->handle;
293 arr[i].port_id = port->logical_port_id;
294 arr[i].reg_type = EHEA_BCMC_BROADCAST |
295 EHEA_BCMC_VLANID_ALL;
296 arr[i++].macaddr = port->mac_addr;
297 num_registrations -= 2;
298
299 list_for_each_entry(mc_entry,
300 &port->mc_list->list, list) {
301 if (num_registrations == 0)
302 goto out_update;
303
304 arr[i].adh = adapter->handle;
305 arr[i].port_id = port->logical_port_id;
306 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
307 EHEA_BCMC_MULTICAST |
308 EHEA_BCMC_UNTAGGED;
309 arr[i++].macaddr = mc_entry->macaddr;
310
311 arr[i].adh = adapter->handle;
312 arr[i].port_id = port->logical_port_id;
313 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
314 EHEA_BCMC_MULTICAST |
315 EHEA_BCMC_VLANID_ALL;
316 arr[i++].macaddr = mc_entry->macaddr;
317 num_registrations -= 2;
318 }
319 }
320 }
321
322out_update:
323 kfree(ehea_bcmc_regs.arr);
324 ehea_bcmc_regs.arr = arr;
325 ehea_bcmc_regs.num_entries = i;
326out:
327 spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
328}
329
330static struct net_device_stats *ehea_get_stats(struct net_device *dev)
331{
332 struct ehea_port *port = netdev_priv(dev);
333 struct net_device_stats *stats = &port->stats;
334 struct hcp_ehea_port_cb2 *cb2;
335 u64 hret, rx_packets, tx_packets, rx_bytes = 0, tx_bytes = 0;
336 int i;
337
338 memset(stats, 0, sizeof(*stats));
339
340 cb2 = (void *)get_zeroed_page(GFP_KERNEL);
341 if (!cb2) {
342 netdev_err(dev, "no mem for cb2\n");
343 goto out;
344 }
345
346 hret = ehea_h_query_ehea_port(port->adapter->handle,
347 port->logical_port_id,
348 H_PORT_CB2, H_PORT_CB2_ALL, cb2);
349 if (hret != H_SUCCESS) {
350 netdev_err(dev, "query_ehea_port failed\n");
351 goto out_herr;
352 }
353
354 if (netif_msg_hw(port))
355 ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
356
357 rx_packets = 0;
358 for (i = 0; i < port->num_def_qps; i++) {
359 rx_packets += port->port_res[i].rx_packets;
360 rx_bytes += port->port_res[i].rx_bytes;
361 }
362
363 tx_packets = 0;
364 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
365 tx_packets += port->port_res[i].tx_packets;
366 tx_bytes += port->port_res[i].tx_bytes;
367 }
368
369 stats->tx_packets = tx_packets;
370 stats->multicast = cb2->rxmcp;
371 stats->rx_errors = cb2->rxuerr;
372 stats->rx_bytes = rx_bytes;
373 stats->tx_bytes = tx_bytes;
374 stats->rx_packets = rx_packets;
375
376out_herr:
377 free_page((unsigned long)cb2);
378out:
379 return stats;
380}
381
382static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
383{
384 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
385 struct net_device *dev = pr->port->netdev;
386 int max_index_mask = pr->rq1_skba.len - 1;
387 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
388 int adder = 0;
389 int i;
390
391 pr->rq1_skba.os_skbs = 0;
392
393 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
394 if (nr_of_wqes > 0)
395 pr->rq1_skba.index = index;
396 pr->rq1_skba.os_skbs = fill_wqes;
397 return;
398 }
399
400 for (i = 0; i < fill_wqes; i++) {
401 if (!skb_arr_rq1[index]) {
402 skb_arr_rq1[index] = netdev_alloc_skb(dev,
403 EHEA_L_PKT_SIZE);
404 if (!skb_arr_rq1[index]) {
405 netdev_info(dev, "Unable to allocate enough skb in the array\n");
406 pr->rq1_skba.os_skbs = fill_wqes - i;
407 break;
408 }
409 }
410 index--;
411 index &= max_index_mask;
412 adder++;
413 }
414
415 if (adder == 0)
416 return;
417
418 /* Ring doorbell */
419 ehea_update_rq1a(pr->qp, adder);
420}
421
422static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
423{
424 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
425 struct net_device *dev = pr->port->netdev;
426 int i;
427
428 if (nr_rq1a > pr->rq1_skba.len) {
429 netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
430 return;
431 }
432
433 for (i = 0; i < nr_rq1a; i++) {
434 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
435 if (!skb_arr_rq1[i]) {
436 netdev_info(dev, "Not enough memory to allocate skb array\n");
437 break;
438 }
439 }
440 /* Ring doorbell */
441 ehea_update_rq1a(pr->qp, i - 1);
442}
443
444static int ehea_refill_rq_def(struct ehea_port_res *pr,
445 struct ehea_q_skb_arr *q_skba, int rq_nr,
446 int num_wqes, int wqe_type, int packet_size)
447{
448 struct net_device *dev = pr->port->netdev;
449 struct ehea_qp *qp = pr->qp;
450 struct sk_buff **skb_arr = q_skba->arr;
451 struct ehea_rwqe *rwqe;
452 int i, index, max_index_mask, fill_wqes;
453 int adder = 0;
454 int ret = 0;
455
456 fill_wqes = q_skba->os_skbs + num_wqes;
457 q_skba->os_skbs = 0;
458
459 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
460 q_skba->os_skbs = fill_wqes;
461 return ret;
462 }
463
464 index = q_skba->index;
465 max_index_mask = q_skba->len - 1;
466 for (i = 0; i < fill_wqes; i++) {
467 u64 tmp_addr;
468 struct sk_buff *skb;
469
470 skb = netdev_alloc_skb_ip_align(dev, packet_size);
471 if (!skb) {
472 q_skba->os_skbs = fill_wqes - i;
473 if (q_skba->os_skbs == q_skba->len - 2) {
474 netdev_info(pr->port->netdev,
475 "rq%i ran dry - no mem for skb\n",
476 rq_nr);
477 ret = -ENOMEM;
478 }
479 break;
480 }
481
482 skb_arr[index] = skb;
483 tmp_addr = ehea_map_vaddr(skb->data);
484 if (tmp_addr == -1) {
485 dev_kfree_skb(skb);
486 q_skba->os_skbs = fill_wqes - i;
487 ret = 0;
488 break;
489 }
490
491 rwqe = ehea_get_next_rwqe(qp, rq_nr);
492 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
493 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
494 rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
495 rwqe->sg_list[0].vaddr = tmp_addr;
496 rwqe->sg_list[0].len = packet_size;
497 rwqe->data_segments = 1;
498
499 index++;
500 index &= max_index_mask;
501 adder++;
502 }
503
504 q_skba->index = index;
505 if (adder == 0)
506 goto out;
507
508 /* Ring doorbell */
509 iosync();
510 if (rq_nr == 2)
511 ehea_update_rq2a(pr->qp, adder);
512 else
513 ehea_update_rq3a(pr->qp, adder);
514out:
515 return ret;
516}
517
518
519static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
520{
521 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
522 nr_of_wqes, EHEA_RWQE2_TYPE,
523 EHEA_RQ2_PKT_SIZE);
524}
525
526
527static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
528{
529 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
530 nr_of_wqes, EHEA_RWQE3_TYPE,
531 EHEA_MAX_PACKET_SIZE);
532}
533
534static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
535{
536 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
537 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
538 return 0;
539 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
540 (cqe->header_length == 0))
541 return 0;
542 return -EINVAL;
543}
544
545static inline void ehea_fill_skb(struct net_device *dev,
546 struct sk_buff *skb, struct ehea_cqe *cqe)
547{
548 int length = cqe->num_bytes_transfered - 4; /*remove CRC */
549
550 skb_put(skb, length);
551 skb->protocol = eth_type_trans(skb, dev);
552
553 /* The packet was not an IPV4 packet so a complemented checksum was
554 calculated. The value is found in the Internet Checksum field. */
555 if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
556 skb->ip_summed = CHECKSUM_COMPLETE;
557 skb->csum = csum_unfold(~cqe->inet_checksum_value);
558 } else
559 skb->ip_summed = CHECKSUM_UNNECESSARY;
560}
561
562static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
563 int arr_len,
564 struct ehea_cqe *cqe)
565{
566 int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
567 struct sk_buff *skb;
568 void *pref;
569 int x;
570
571 x = skb_index + 1;
572 x &= (arr_len - 1);
573
574 pref = skb_array[x];
575 if (pref) {
576 prefetchw(pref);
577 prefetchw(pref + EHEA_CACHE_LINE);
578
579 pref = (skb_array[x]->data);
580 prefetch(pref);
581 prefetch(pref + EHEA_CACHE_LINE);
582 prefetch(pref + EHEA_CACHE_LINE * 2);
583 prefetch(pref + EHEA_CACHE_LINE * 3);
584 }
585
586 skb = skb_array[skb_index];
587 skb_array[skb_index] = NULL;
588 return skb;
589}
590
591static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
592 int arr_len, int wqe_index)
593{
594 struct sk_buff *skb;
595 void *pref;
596 int x;
597
598 x = wqe_index + 1;
599 x &= (arr_len - 1);
600
601 pref = skb_array[x];
602 if (pref) {
603 prefetchw(pref);
604 prefetchw(pref + EHEA_CACHE_LINE);
605
606 pref = (skb_array[x]->data);
607 prefetchw(pref);
608 prefetchw(pref + EHEA_CACHE_LINE);
609 }
610
611 skb = skb_array[wqe_index];
612 skb_array[wqe_index] = NULL;
613 return skb;
614}
615
616static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
617 struct ehea_cqe *cqe, int *processed_rq2,
618 int *processed_rq3)
619{
620 struct sk_buff *skb;
621
622 if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
623 pr->p_stats.err_tcp_cksum++;
624 if (cqe->status & EHEA_CQE_STAT_ERR_IP)
625 pr->p_stats.err_ip_cksum++;
626 if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
627 pr->p_stats.err_frame_crc++;
628
629 if (rq == 2) {
630 *processed_rq2 += 1;
631 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
632 dev_kfree_skb(skb);
633 } else if (rq == 3) {
634 *processed_rq3 += 1;
635 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
636 dev_kfree_skb(skb);
637 }
638
639 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
640 if (netif_msg_rx_err(pr->port)) {
641 pr_err("Critical receive error for QP %d. Resetting port.\n",
642 pr->qp->init_attr.qp_nr);
643 ehea_dump(cqe, sizeof(*cqe), "CQE");
644 }
645 ehea_schedule_port_reset(pr->port);
646 return 1;
647 }
648
649 return 0;
650}
651
652static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
653 void **tcph, u64 *hdr_flags, void *priv)
654{
655 struct ehea_cqe *cqe = priv;
656 unsigned int ip_len;
657 struct iphdr *iph;
658
659 /* non tcp/udp packets */
660 if (!cqe->header_length)
661 return -1;
662
663 /* non tcp packet */
664 skb_reset_network_header(skb);
665 iph = ip_hdr(skb);
666 if (iph->protocol != IPPROTO_TCP)
667 return -1;
668
669 ip_len = ip_hdrlen(skb);
670 skb_set_transport_header(skb, ip_len);
671 *tcph = tcp_hdr(skb);
672
673 /* check if ip header and tcp header are complete */
674 if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
675 return -1;
676
677 *hdr_flags = LRO_IPV4 | LRO_TCP;
678 *iphdr = iph;
679
680 return 0;
681}
682
683static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
684 struct sk_buff *skb)
685{
686 if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
687 __vlan_hwaccel_put_tag(skb, cqe->vlan_tag);
688
689 if (skb->dev->features & NETIF_F_LRO)
690 lro_receive_skb(&pr->lro_mgr, skb, cqe);
691 else
692 netif_receive_skb(skb);
693}
694
695static int ehea_proc_rwqes(struct net_device *dev,
696 struct ehea_port_res *pr,
697 int budget)
698{
699 struct ehea_port *port = pr->port;
700 struct ehea_qp *qp = pr->qp;
701 struct ehea_cqe *cqe;
702 struct sk_buff *skb;
703 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
704 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
705 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
706 int skb_arr_rq1_len = pr->rq1_skba.len;
707 int skb_arr_rq2_len = pr->rq2_skba.len;
708 int skb_arr_rq3_len = pr->rq3_skba.len;
709 int processed, processed_rq1, processed_rq2, processed_rq3;
710 u64 processed_bytes = 0;
711 int wqe_index, last_wqe_index, rq, port_reset;
712
713 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
714 last_wqe_index = 0;
715
716 cqe = ehea_poll_rq1(qp, &wqe_index);
717 while ((processed < budget) && cqe) {
718 ehea_inc_rq1(qp);
719 processed_rq1++;
720 processed++;
721 if (netif_msg_rx_status(port))
722 ehea_dump(cqe, sizeof(*cqe), "CQE");
723
724 last_wqe_index = wqe_index;
725 rmb();
726 if (!ehea_check_cqe(cqe, &rq)) {
727 if (rq == 1) {
728 /* LL RQ1 */
729 skb = get_skb_by_index_ll(skb_arr_rq1,
730 skb_arr_rq1_len,
731 wqe_index);
732 if (unlikely(!skb)) {
733 netif_info(port, rx_err, dev,
734 "LL rq1: skb=NULL\n");
735
736 skb = netdev_alloc_skb(dev,
737 EHEA_L_PKT_SIZE);
738 if (!skb) {
739 netdev_err(dev, "Not enough memory to allocate skb\n");
740 break;
741 }
742 }
743 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
744 cqe->num_bytes_transfered - 4);
745 ehea_fill_skb(dev, skb, cqe);
746 } else if (rq == 2) {
747 /* RQ2 */
748 skb = get_skb_by_index(skb_arr_rq2,
749 skb_arr_rq2_len, cqe);
750 if (unlikely(!skb)) {
751 netif_err(port, rx_err, dev,
752 "rq2: skb=NULL\n");
753 break;
754 }
755 ehea_fill_skb(dev, skb, cqe);
756 processed_rq2++;
757 } else {
758 /* RQ3 */
759 skb = get_skb_by_index(skb_arr_rq3,
760 skb_arr_rq3_len, cqe);
761 if (unlikely(!skb)) {
762 netif_err(port, rx_err, dev,
763 "rq3: skb=NULL\n");
764 break;
765 }
766 ehea_fill_skb(dev, skb, cqe);
767 processed_rq3++;
768 }
769
770 processed_bytes += skb->len;
771 ehea_proc_skb(pr, cqe, skb);
772 } else {
773 pr->p_stats.poll_receive_errors++;
774 port_reset = ehea_treat_poll_error(pr, rq, cqe,
775 &processed_rq2,
776 &processed_rq3);
777 if (port_reset)
778 break;
779 }
780 cqe = ehea_poll_rq1(qp, &wqe_index);
781 }
782 if (dev->features & NETIF_F_LRO)
783 lro_flush_all(&pr->lro_mgr);
784
785 pr->rx_packets += processed;
786 pr->rx_bytes += processed_bytes;
787
788 ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
789 ehea_refill_rq2(pr, processed_rq2);
790 ehea_refill_rq3(pr, processed_rq3);
791
792 return processed;
793}
794
795#define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
796
797static void reset_sq_restart_flag(struct ehea_port *port)
798{
799 int i;
800
801 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
802 struct ehea_port_res *pr = &port->port_res[i];
803 pr->sq_restart_flag = 0;
804 }
805 wake_up(&port->restart_wq);
806}
807
808static void check_sqs(struct ehea_port *port)
809{
810 struct ehea_swqe *swqe;
811 int swqe_index;
812 int i, k;
813
814 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
815 struct ehea_port_res *pr = &port->port_res[i];
816 int ret;
817 k = 0;
818 swqe = ehea_get_swqe(pr->qp, &swqe_index);
819 memset(swqe, 0, SWQE_HEADER_SIZE);
820 atomic_dec(&pr->swqe_avail);
821
822 swqe->tx_control |= EHEA_SWQE_PURGE;
823 swqe->wr_id = SWQE_RESTART_CHECK;
824 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
825 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
826 swqe->immediate_data_length = 80;
827
828 ehea_post_swqe(pr->qp, swqe);
829
830 ret = wait_event_timeout(port->restart_wq,
831 pr->sq_restart_flag == 0,
832 msecs_to_jiffies(100));
833
834 if (!ret) {
835 pr_err("HW/SW queues out of sync\n");
836 ehea_schedule_port_reset(pr->port);
837 return;
838 }
839 }
840}
841
842
843static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
844{
845 struct sk_buff *skb;
846 struct ehea_cq *send_cq = pr->send_cq;
847 struct ehea_cqe *cqe;
848 int quota = my_quota;
849 int cqe_counter = 0;
850 int swqe_av = 0;
851 int index;
852 unsigned long flags;
853
854 cqe = ehea_poll_cq(send_cq);
855 while (cqe && (quota > 0)) {
856 ehea_inc_cq(send_cq);
857
858 cqe_counter++;
859 rmb();
860
861 if (cqe->wr_id == SWQE_RESTART_CHECK) {
862 pr->sq_restart_flag = 1;
863 swqe_av++;
864 break;
865 }
866
867 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
868 pr_err("Bad send completion status=0x%04X\n",
869 cqe->status);
870
871 if (netif_msg_tx_err(pr->port))
872 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
873
874 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
875 pr_err("Resetting port\n");
876 ehea_schedule_port_reset(pr->port);
877 break;
878 }
879 }
880
881 if (netif_msg_tx_done(pr->port))
882 ehea_dump(cqe, sizeof(*cqe), "CQE");
883
884 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
885 == EHEA_SWQE2_TYPE)) {
886
887 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
888 skb = pr->sq_skba.arr[index];
889 dev_kfree_skb(skb);
890 pr->sq_skba.arr[index] = NULL;
891 }
892
893 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
894 quota--;
895
896 cqe = ehea_poll_cq(send_cq);
897 }
898
899 ehea_update_feca(send_cq, cqe_counter);
900 atomic_add(swqe_av, &pr->swqe_avail);
901
902 spin_lock_irqsave(&pr->netif_queue, flags);
903
904 if (pr->queue_stopped && (atomic_read(&pr->swqe_avail)
905 >= pr->swqe_refill_th)) {
906 netif_wake_queue(pr->port->netdev);
907 pr->queue_stopped = 0;
908 }
909 spin_unlock_irqrestore(&pr->netif_queue, flags);
910 wake_up(&pr->port->swqe_avail_wq);
911
912 return cqe;
913}
914
915#define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
916#define EHEA_POLL_MAX_CQES 65535
917
918static int ehea_poll(struct napi_struct *napi, int budget)
919{
920 struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
921 napi);
922 struct net_device *dev = pr->port->netdev;
923 struct ehea_cqe *cqe;
924 struct ehea_cqe *cqe_skb = NULL;
925 int force_irq, wqe_index;
926 int rx = 0;
927
928 force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ);
929 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
930
931 if (!force_irq)
932 rx += ehea_proc_rwqes(dev, pr, budget - rx);
933
934 while ((rx != budget) || force_irq) {
935 pr->poll_counter = 0;
936 force_irq = 0;
937 napi_complete(napi);
938 ehea_reset_cq_ep(pr->recv_cq);
939 ehea_reset_cq_ep(pr->send_cq);
940 ehea_reset_cq_n1(pr->recv_cq);
941 ehea_reset_cq_n1(pr->send_cq);
942 rmb();
943 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
944 cqe_skb = ehea_poll_cq(pr->send_cq);
945
946 if (!cqe && !cqe_skb)
947 return rx;
948
949 if (!napi_reschedule(napi))
950 return rx;
951
952 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
953 rx += ehea_proc_rwqes(dev, pr, budget - rx);
954 }
955
956 pr->poll_counter++;
957 return rx;
958}
959
960#ifdef CONFIG_NET_POLL_CONTROLLER
961static void ehea_netpoll(struct net_device *dev)
962{
963 struct ehea_port *port = netdev_priv(dev);
964 int i;
965
966 for (i = 0; i < port->num_def_qps; i++)
967 napi_schedule(&port->port_res[i].napi);
968}
969#endif
970
971static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
972{
973 struct ehea_port_res *pr = param;
974
975 napi_schedule(&pr->napi);
976
977 return IRQ_HANDLED;
978}
979
980static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
981{
982 struct ehea_port *port = param;
983 struct ehea_eqe *eqe;
984 struct ehea_qp *qp;
985 u32 qp_token;
986 u64 resource_type, aer, aerr;
987 int reset_port = 0;
988
989 eqe = ehea_poll_eq(port->qp_eq);
990
991 while (eqe) {
992 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
993 pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
994 eqe->entry, qp_token);
995
996 qp = port->port_res[qp_token].qp;
997
998 resource_type = ehea_error_data(port->adapter, qp->fw_handle,
999 &aer, &aerr);
1000
1001 if (resource_type == EHEA_AER_RESTYPE_QP) {
1002 if ((aer & EHEA_AER_RESET_MASK) ||
1003 (aerr & EHEA_AERR_RESET_MASK))
1004 reset_port = 1;
1005 } else
1006 reset_port = 1; /* Reset in case of CQ or EQ error */
1007
1008 eqe = ehea_poll_eq(port->qp_eq);
1009 }
1010
1011 if (reset_port) {
1012 pr_err("Resetting port\n");
1013 ehea_schedule_port_reset(port);
1014 }
1015
1016 return IRQ_HANDLED;
1017}
1018
1019static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
1020 int logical_port)
1021{
1022 int i;
1023
1024 for (i = 0; i < EHEA_MAX_PORTS; i++)
1025 if (adapter->port[i])
1026 if (adapter->port[i]->logical_port_id == logical_port)
1027 return adapter->port[i];
1028 return NULL;
1029}
1030
1031int ehea_sense_port_attr(struct ehea_port *port)
1032{
1033 int ret;
1034 u64 hret;
1035 struct hcp_ehea_port_cb0 *cb0;
1036
1037 /* may be called via ehea_neq_tasklet() */
1038 cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
1039 if (!cb0) {
1040 pr_err("no mem for cb0\n");
1041 ret = -ENOMEM;
1042 goto out;
1043 }
1044
1045 hret = ehea_h_query_ehea_port(port->adapter->handle,
1046 port->logical_port_id, H_PORT_CB0,
1047 EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
1048 cb0);
1049 if (hret != H_SUCCESS) {
1050 ret = -EIO;
1051 goto out_free;
1052 }
1053
1054 /* MAC address */
1055 port->mac_addr = cb0->port_mac_addr << 16;
1056
1057 if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
1058 ret = -EADDRNOTAVAIL;
1059 goto out_free;
1060 }
1061
1062 /* Port speed */
1063 switch (cb0->port_speed) {
1064 case H_SPEED_10M_H:
1065 port->port_speed = EHEA_SPEED_10M;
1066 port->full_duplex = 0;
1067 break;
1068 case H_SPEED_10M_F:
1069 port->port_speed = EHEA_SPEED_10M;
1070 port->full_duplex = 1;
1071 break;
1072 case H_SPEED_100M_H:
1073 port->port_speed = EHEA_SPEED_100M;
1074 port->full_duplex = 0;
1075 break;
1076 case H_SPEED_100M_F:
1077 port->port_speed = EHEA_SPEED_100M;
1078 port->full_duplex = 1;
1079 break;
1080 case H_SPEED_1G_F:
1081 port->port_speed = EHEA_SPEED_1G;
1082 port->full_duplex = 1;
1083 break;
1084 case H_SPEED_10G_F:
1085 port->port_speed = EHEA_SPEED_10G;
1086 port->full_duplex = 1;
1087 break;
1088 default:
1089 port->port_speed = 0;
1090 port->full_duplex = 0;
1091 break;
1092 }
1093
1094 port->autoneg = 1;
1095 port->num_mcs = cb0->num_default_qps;
1096
1097 /* Number of default QPs */
1098 if (use_mcs)
1099 port->num_def_qps = cb0->num_default_qps;
1100 else
1101 port->num_def_qps = 1;
1102
1103 if (!port->num_def_qps) {
1104 ret = -EINVAL;
1105 goto out_free;
1106 }
1107
1108 port->num_tx_qps = num_tx_qps;
1109
1110 if (port->num_def_qps >= port->num_tx_qps)
1111 port->num_add_tx_qps = 0;
1112 else
1113 port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps;
1114
1115 ret = 0;
1116out_free:
1117 if (ret || netif_msg_probe(port))
1118 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
1119 free_page((unsigned long)cb0);
1120out:
1121 return ret;
1122}
1123
1124int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1125{
1126 struct hcp_ehea_port_cb4 *cb4;
1127 u64 hret;
1128 int ret = 0;
1129
1130 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
1131 if (!cb4) {
1132 pr_err("no mem for cb4\n");
1133 ret = -ENOMEM;
1134 goto out;
1135 }
1136
1137 cb4->port_speed = port_speed;
1138
1139 netif_carrier_off(port->netdev);
1140
1141 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1142 port->logical_port_id,
1143 H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
1144 if (hret == H_SUCCESS) {
1145 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
1146
1147 hret = ehea_h_query_ehea_port(port->adapter->handle,
1148 port->logical_port_id,
1149 H_PORT_CB4, H_PORT_CB4_SPEED,
1150 cb4);
1151 if (hret == H_SUCCESS) {
1152 switch (cb4->port_speed) {
1153 case H_SPEED_10M_H:
1154 port->port_speed = EHEA_SPEED_10M;
1155 port->full_duplex = 0;
1156 break;
1157 case H_SPEED_10M_F:
1158 port->port_speed = EHEA_SPEED_10M;
1159 port->full_duplex = 1;
1160 break;
1161 case H_SPEED_100M_H:
1162 port->port_speed = EHEA_SPEED_100M;
1163 port->full_duplex = 0;
1164 break;
1165 case H_SPEED_100M_F:
1166 port->port_speed = EHEA_SPEED_100M;
1167 port->full_duplex = 1;
1168 break;
1169 case H_SPEED_1G_F:
1170 port->port_speed = EHEA_SPEED_1G;
1171 port->full_duplex = 1;
1172 break;
1173 case H_SPEED_10G_F:
1174 port->port_speed = EHEA_SPEED_10G;
1175 port->full_duplex = 1;
1176 break;
1177 default:
1178 port->port_speed = 0;
1179 port->full_duplex = 0;
1180 break;
1181 }
1182 } else {
1183 pr_err("Failed sensing port speed\n");
1184 ret = -EIO;
1185 }
1186 } else {
1187 if (hret == H_AUTHORITY) {
1188 pr_info("Hypervisor denied setting port speed\n");
1189 ret = -EPERM;
1190 } else {
1191 ret = -EIO;
1192 pr_err("Failed setting port speed\n");
1193 }
1194 }
1195 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
1196 netif_carrier_on(port->netdev);
1197
1198 free_page((unsigned long)cb4);
1199out:
1200 return ret;
1201}
1202
1203static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1204{
1205 int ret;
1206 u8 ec;
1207 u8 portnum;
1208 struct ehea_port *port;
1209 struct net_device *dev;
1210
1211 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1212 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1213 port = ehea_get_port(adapter, portnum);
1214 dev = port->netdev;
1215
1216 switch (ec) {
1217 case EHEA_EC_PORTSTATE_CHG: /* port state change */
1218
1219 if (!port) {
1220 netdev_err(dev, "unknown portnum %x\n", portnum);
1221 break;
1222 }
1223
1224 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1225 if (!netif_carrier_ok(dev)) {
1226 ret = ehea_sense_port_attr(port);
1227 if (ret) {
1228 netdev_err(dev, "failed resensing port attributes\n");
1229 break;
1230 }
1231
1232 netif_info(port, link, dev,
1233 "Logical port up: %dMbps %s Duplex\n",
1234 port->port_speed,
1235 port->full_duplex == 1 ?
1236 "Full" : "Half");
1237
1238 netif_carrier_on(dev);
1239 netif_wake_queue(dev);
1240 }
1241 } else
1242 if (netif_carrier_ok(dev)) {
1243 netif_info(port, link, dev,
1244 "Logical port down\n");
1245 netif_carrier_off(dev);
1246 netif_stop_queue(dev);
1247 }
1248
1249 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
1250 port->phy_link = EHEA_PHY_LINK_UP;
1251 netif_info(port, link, dev,
1252 "Physical port up\n");
1253 if (prop_carrier_state)
1254 netif_carrier_on(dev);
1255 } else {
1256 port->phy_link = EHEA_PHY_LINK_DOWN;
1257 netif_info(port, link, dev,
1258 "Physical port down\n");
1259 if (prop_carrier_state)
1260 netif_carrier_off(dev);
1261 }
1262
1263 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1264 netdev_info(dev,
1265 "External switch port is primary port\n");
1266 else
1267 netdev_info(dev,
1268 "External switch port is backup port\n");
1269
1270 break;
1271 case EHEA_EC_ADAPTER_MALFUNC:
1272 netdev_err(dev, "Adapter malfunction\n");
1273 break;
1274 case EHEA_EC_PORT_MALFUNC:
1275 netdev_info(dev, "Port malfunction\n");
1276 netif_carrier_off(dev);
1277 netif_stop_queue(dev);
1278 break;
1279 default:
1280 netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
1281 break;
1282 }
1283}
1284
1285static void ehea_neq_tasklet(unsigned long data)
1286{
1287 struct ehea_adapter *adapter = (struct ehea_adapter *)data;
1288 struct ehea_eqe *eqe;
1289 u64 event_mask;
1290
1291 eqe = ehea_poll_eq(adapter->neq);
1292 pr_debug("eqe=%p\n", eqe);
1293
1294 while (eqe) {
1295 pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
1296 ehea_parse_eqe(adapter, eqe->entry);
1297 eqe = ehea_poll_eq(adapter->neq);
1298 pr_debug("next eqe=%p\n", eqe);
1299 }
1300
1301 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
1302 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
1303 | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
1304
1305 ehea_h_reset_events(adapter->handle,
1306 adapter->neq->fw_handle, event_mask);
1307}
1308
1309static irqreturn_t ehea_interrupt_neq(int irq, void *param)
1310{
1311 struct ehea_adapter *adapter = param;
1312 tasklet_hi_schedule(&adapter->neq_tasklet);
1313 return IRQ_HANDLED;
1314}
1315
1316
1317static int ehea_fill_port_res(struct ehea_port_res *pr)
1318{
1319 int ret;
1320 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1321
1322 ehea_init_fill_rq1(pr, pr->rq1_skba.len);
1323
1324 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1325
1326 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1327
1328 return ret;
1329}
1330
1331static int ehea_reg_interrupts(struct net_device *dev)
1332{
1333 struct ehea_port *port = netdev_priv(dev);
1334 struct ehea_port_res *pr;
1335 int i, ret;
1336
1337
1338 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
1339 dev->name);
1340
1341 ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
1342 ehea_qp_aff_irq_handler,
1343 IRQF_DISABLED, port->int_aff_name, port);
1344 if (ret) {
1345 netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
1346 port->qp_eq->attr.ist1);
1347 goto out_free_qpeq;
1348 }
1349
1350 netif_info(port, ifup, dev,
1351 "irq_handle 0x%X for function qp_aff_irq_handler registered\n",
1352 port->qp_eq->attr.ist1);
1353
1354
1355 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1356 pr = &port->port_res[i];
1357 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
1358 "%s-queue%d", dev->name, i);
1359 ret = ibmebus_request_irq(pr->eq->attr.ist1,
1360 ehea_recv_irq_handler,
1361 IRQF_DISABLED, pr->int_send_name,
1362 pr);
1363 if (ret) {
1364 netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
1365 i, pr->eq->attr.ist1);
1366 goto out_free_req;
1367 }
1368 netif_info(port, ifup, dev,
1369 "irq_handle 0x%X for function ehea_queue_int %d registered\n",
1370 pr->eq->attr.ist1, i);
1371 }
1372out:
1373 return ret;
1374
1375
1376out_free_req:
1377 while (--i >= 0) {
1378 u32 ist = port->port_res[i].eq->attr.ist1;
1379 ibmebus_free_irq(ist, &port->port_res[i]);
1380 }
1381
1382out_free_qpeq:
1383 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1384 i = port->num_def_qps;
1385
1386 goto out;
1387
1388}
1389
1390static void ehea_free_interrupts(struct net_device *dev)
1391{
1392 struct ehea_port *port = netdev_priv(dev);
1393 struct ehea_port_res *pr;
1394 int i;
1395
1396 /* send */
1397
1398 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1399 pr = &port->port_res[i];
1400 ibmebus_free_irq(pr->eq->attr.ist1, pr);
1401 netif_info(port, intr, dev,
1402 "free send irq for res %d with handle 0x%X\n",
1403 i, pr->eq->attr.ist1);
1404 }
1405
1406 /* associated events */
1407 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1408 netif_info(port, intr, dev,
1409 "associated event interrupt for handle 0x%X freed\n",
1410 port->qp_eq->attr.ist1);
1411}
1412
1413static int ehea_configure_port(struct ehea_port *port)
1414{
1415 int ret, i;
1416 u64 hret, mask;
1417 struct hcp_ehea_port_cb0 *cb0;
1418
1419 ret = -ENOMEM;
1420 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1421 if (!cb0)
1422 goto out;
1423
1424 cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1425 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1426 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1427 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1428 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1429 PXLY_RC_VLAN_FILTER)
1430 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1431
1432 for (i = 0; i < port->num_mcs; i++)
1433 if (use_mcs)
1434 cb0->default_qpn_arr[i] =
1435 port->port_res[i].qp->init_attr.qp_nr;
1436 else
1437 cb0->default_qpn_arr[i] =
1438 port->port_res[0].qp->init_attr.qp_nr;
1439
1440 if (netif_msg_ifup(port))
1441 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1442
1443 mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1444 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1445
1446 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1447 port->logical_port_id,
1448 H_PORT_CB0, mask, cb0);
1449 ret = -EIO;
1450 if (hret != H_SUCCESS)
1451 goto out_free;
1452
1453 ret = 0;
1454
1455out_free:
1456 free_page((unsigned long)cb0);
1457out:
1458 return ret;
1459}
1460
1461int ehea_gen_smrs(struct ehea_port_res *pr)
1462{
1463 int ret;
1464 struct ehea_adapter *adapter = pr->port->adapter;
1465
1466 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1467 if (ret)
1468 goto out;
1469
1470 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1471 if (ret)
1472 goto out_free;
1473
1474 return 0;
1475
1476out_free:
1477 ehea_rem_mr(&pr->send_mr);
1478out:
1479 pr_err("Generating SMRS failed\n");
1480 return -EIO;
1481}
1482
1483int ehea_rem_smrs(struct ehea_port_res *pr)
1484{
1485 if ((ehea_rem_mr(&pr->send_mr)) ||
1486 (ehea_rem_mr(&pr->recv_mr)))
1487 return -EIO;
1488 else
1489 return 0;
1490}
1491
1492static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1493{
1494 int arr_size = sizeof(void *) * max_q_entries;
1495
1496 q_skba->arr = vzalloc(arr_size);
1497 if (!q_skba->arr)
1498 return -ENOMEM;
1499
1500 q_skba->len = max_q_entries;
1501 q_skba->index = 0;
1502 q_skba->os_skbs = 0;
1503
1504 return 0;
1505}
1506
1507static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1508 struct port_res_cfg *pr_cfg, int queue_token)
1509{
1510 struct ehea_adapter *adapter = port->adapter;
1511 enum ehea_eq_type eq_type = EHEA_EQ;
1512 struct ehea_qp_init_attr *init_attr = NULL;
1513 int ret = -EIO;
1514 u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
1515
1516 tx_bytes = pr->tx_bytes;
1517 tx_packets = pr->tx_packets;
1518 rx_bytes = pr->rx_bytes;
1519 rx_packets = pr->rx_packets;
1520
1521 memset(pr, 0, sizeof(struct ehea_port_res));
1522
1523 pr->tx_bytes = rx_bytes;
1524 pr->tx_packets = tx_packets;
1525 pr->rx_bytes = rx_bytes;
1526 pr->rx_packets = rx_packets;
1527
1528 pr->port = port;
1529 spin_lock_init(&pr->xmit_lock);
1530 spin_lock_init(&pr->netif_queue);
1531
1532 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1533 if (!pr->eq) {
1534 pr_err("create_eq failed (eq)\n");
1535 goto out_free;
1536 }
1537
1538 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1539 pr->eq->fw_handle,
1540 port->logical_port_id);
1541 if (!pr->recv_cq) {
1542 pr_err("create_cq failed (cq_recv)\n");
1543 goto out_free;
1544 }
1545
1546 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1547 pr->eq->fw_handle,
1548 port->logical_port_id);
1549 if (!pr->send_cq) {
1550 pr_err("create_cq failed (cq_send)\n");
1551 goto out_free;
1552 }
1553
1554 if (netif_msg_ifup(port))
1555 pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
1556 pr->send_cq->attr.act_nr_of_cqes,
1557 pr->recv_cq->attr.act_nr_of_cqes);
1558
1559 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1560 if (!init_attr) {
1561 ret = -ENOMEM;
1562 pr_err("no mem for ehea_qp_init_attr\n");
1563 goto out_free;
1564 }
1565
1566 init_attr->low_lat_rq1 = 1;
1567 init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
1568 init_attr->rq_count = 3;
1569 init_attr->qp_token = queue_token;
1570 init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1571 init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1572 init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1573 init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1574 init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1575 init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1576 init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1577 init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1578 init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1579 init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1580 init_attr->port_nr = port->logical_port_id;
1581 init_attr->send_cq_handle = pr->send_cq->fw_handle;
1582 init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1583 init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1584
1585 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1586 if (!pr->qp) {
1587 pr_err("create_qp failed\n");
1588 ret = -EIO;
1589 goto out_free;
1590 }
1591
1592 if (netif_msg_ifup(port))
1593 pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
1594 init_attr->qp_nr,
1595 init_attr->act_nr_send_wqes,
1596 init_attr->act_nr_rwqes_rq1,
1597 init_attr->act_nr_rwqes_rq2,
1598 init_attr->act_nr_rwqes_rq3);
1599
1600 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1601
1602 ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
1603 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1604 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1605 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1606 if (ret)
1607 goto out_free;
1608
1609 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1610 if (ehea_gen_smrs(pr) != 0) {
1611 ret = -EIO;
1612 goto out_free;
1613 }
1614
1615 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1616
1617 kfree(init_attr);
1618
1619 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
1620
1621 pr->lro_mgr.max_aggr = pr->port->lro_max_aggr;
1622 pr->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
1623 pr->lro_mgr.lro_arr = pr->lro_desc;
1624 pr->lro_mgr.get_skb_header = get_skb_hdr;
1625 pr->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1626 pr->lro_mgr.dev = port->netdev;
1627 pr->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1628 pr->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1629
1630 ret = 0;
1631 goto out;
1632
1633out_free:
1634 kfree(init_attr);
1635 vfree(pr->sq_skba.arr);
1636 vfree(pr->rq1_skba.arr);
1637 vfree(pr->rq2_skba.arr);
1638 vfree(pr->rq3_skba.arr);
1639 ehea_destroy_qp(pr->qp);
1640 ehea_destroy_cq(pr->send_cq);
1641 ehea_destroy_cq(pr->recv_cq);
1642 ehea_destroy_eq(pr->eq);
1643out:
1644 return ret;
1645}
1646
1647static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1648{
1649 int ret, i;
1650
1651 if (pr->qp)
1652 netif_napi_del(&pr->napi);
1653
1654 ret = ehea_destroy_qp(pr->qp);
1655
1656 if (!ret) {
1657 ehea_destroy_cq(pr->send_cq);
1658 ehea_destroy_cq(pr->recv_cq);
1659 ehea_destroy_eq(pr->eq);
1660
1661 for (i = 0; i < pr->rq1_skba.len; i++)
1662 if (pr->rq1_skba.arr[i])
1663 dev_kfree_skb(pr->rq1_skba.arr[i]);
1664
1665 for (i = 0; i < pr->rq2_skba.len; i++)
1666 if (pr->rq2_skba.arr[i])
1667 dev_kfree_skb(pr->rq2_skba.arr[i]);
1668
1669 for (i = 0; i < pr->rq3_skba.len; i++)
1670 if (pr->rq3_skba.arr[i])
1671 dev_kfree_skb(pr->rq3_skba.arr[i]);
1672
1673 for (i = 0; i < pr->sq_skba.len; i++)
1674 if (pr->sq_skba.arr[i])
1675 dev_kfree_skb(pr->sq_skba.arr[i]);
1676
1677 vfree(pr->rq1_skba.arr);
1678 vfree(pr->rq2_skba.arr);
1679 vfree(pr->rq3_skba.arr);
1680 vfree(pr->sq_skba.arr);
1681 ret = ehea_rem_smrs(pr);
1682 }
1683 return ret;
1684}
1685
1686/*
1687 * The write_* functions store information in swqe which is used by
1688 * the hardware to calculate the ip/tcp/udp checksum
1689 */
1690
1691static inline void write_ip_start_end(struct ehea_swqe *swqe,
1692 const struct sk_buff *skb)
1693{
1694 swqe->ip_start = skb_network_offset(skb);
1695 swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1);
1696}
1697
1698static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
1699 const struct sk_buff *skb)
1700{
1701 swqe->tcp_offset =
1702 (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check));
1703
1704 swqe->tcp_end = (u16)skb->len - 1;
1705}
1706
1707static inline void write_udp_offset_end(struct ehea_swqe *swqe,
1708 const struct sk_buff *skb)
1709{
1710 swqe->tcp_offset =
1711 (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check));
1712
1713 swqe->tcp_end = (u16)skb->len - 1;
1714}
1715
1716
1717static void write_swqe2_TSO(struct sk_buff *skb,
1718 struct ehea_swqe *swqe, u32 lkey)
1719{
1720 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1721 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1722 int skb_data_size = skb_headlen(skb);
1723 int headersize;
1724
1725 /* Packet is TCP with TSO enabled */
1726 swqe->tx_control |= EHEA_SWQE_TSO;
1727 swqe->mss = skb_shinfo(skb)->gso_size;
1728 /* copy only eth/ip/tcp headers to immediate data and
1729 * the rest of skb->data to sg1entry
1730 */
1731 headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1732
1733 skb_data_size = skb_headlen(skb);
1734
1735 if (skb_data_size >= headersize) {
1736 /* copy immediate data */
1737 skb_copy_from_linear_data(skb, imm_data, headersize);
1738 swqe->immediate_data_length = headersize;
1739
1740 if (skb_data_size > headersize) {
1741 /* set sg1entry data */
1742 sg1entry->l_key = lkey;
1743 sg1entry->len = skb_data_size - headersize;
1744 sg1entry->vaddr =
1745 ehea_map_vaddr(skb->data + headersize);
1746 swqe->descriptors++;
1747 }
1748 } else
1749 pr_err("cannot handle fragmented headers\n");
1750}
1751
1752static void write_swqe2_nonTSO(struct sk_buff *skb,
1753 struct ehea_swqe *swqe, u32 lkey)
1754{
1755 int skb_data_size = skb_headlen(skb);
1756 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1757 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1758
1759 /* Packet is any nonTSO type
1760 *
1761 * Copy as much as possible skb->data to immediate data and
1762 * the rest to sg1entry
1763 */
1764 if (skb_data_size >= SWQE2_MAX_IMM) {
1765 /* copy immediate data */
1766 skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM);
1767
1768 swqe->immediate_data_length = SWQE2_MAX_IMM;
1769
1770 if (skb_data_size > SWQE2_MAX_IMM) {
1771 /* copy sg1entry data */
1772 sg1entry->l_key = lkey;
1773 sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
1774 sg1entry->vaddr =
1775 ehea_map_vaddr(skb->data + SWQE2_MAX_IMM);
1776 swqe->descriptors++;
1777 }
1778 } else {
1779 skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1780 swqe->immediate_data_length = skb_data_size;
1781 }
1782}
1783
1784static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1785 struct ehea_swqe *swqe, u32 lkey)
1786{
1787 struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1788 skb_frag_t *frag;
1789 int nfrags, sg1entry_contains_frag_data, i;
1790
1791 nfrags = skb_shinfo(skb)->nr_frags;
1792 sg1entry = &swqe->u.immdata_desc.sg_entry;
1793 sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
1794 swqe->descriptors = 0;
1795 sg1entry_contains_frag_data = 0;
1796
1797 if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size)
1798 write_swqe2_TSO(skb, swqe, lkey);
1799 else
1800 write_swqe2_nonTSO(skb, swqe, lkey);
1801
1802 /* write descriptors */
1803 if (nfrags > 0) {
1804 if (swqe->descriptors == 0) {
1805 /* sg1entry not yet used */
1806 frag = &skb_shinfo(skb)->frags[0];
1807
1808 /* copy sg1entry data */
1809 sg1entry->l_key = lkey;
1810 sg1entry->len = frag->size;
1811 sg1entry->vaddr =
1812 ehea_map_vaddr(page_address(frag->page)
1813 + frag->page_offset);
1814 swqe->descriptors++;
1815 sg1entry_contains_frag_data = 1;
1816 }
1817
1818 for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1819
1820 frag = &skb_shinfo(skb)->frags[i];
1821 sgentry = &sg_list[i - sg1entry_contains_frag_data];
1822
1823 sgentry->l_key = lkey;
1824 sgentry->len = frag->size;
1825 sgentry->vaddr =
1826 ehea_map_vaddr(page_address(frag->page)
1827 + frag->page_offset);
1828 swqe->descriptors++;
1829 }
1830 }
1831}
1832
1833static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1834{
1835 int ret = 0;
1836 u64 hret;
1837 u8 reg_type;
1838
1839 /* De/Register untagged packets */
1840 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1841 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1842 port->logical_port_id,
1843 reg_type, port->mac_addr, 0, hcallid);
1844 if (hret != H_SUCCESS) {
1845 pr_err("%sregistering bc address failed (tagged)\n",
1846 hcallid == H_REG_BCMC ? "" : "de");
1847 ret = -EIO;
1848 goto out_herr;
1849 }
1850
1851 /* De/Register VLAN packets */
1852 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1853 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1854 port->logical_port_id,
1855 reg_type, port->mac_addr, 0, hcallid);
1856 if (hret != H_SUCCESS) {
1857 pr_err("%sregistering bc address failed (vlan)\n",
1858 hcallid == H_REG_BCMC ? "" : "de");
1859 ret = -EIO;
1860 }
1861out_herr:
1862 return ret;
1863}
1864
1865static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1866{
1867 struct ehea_port *port = netdev_priv(dev);
1868 struct sockaddr *mac_addr = sa;
1869 struct hcp_ehea_port_cb0 *cb0;
1870 int ret;
1871 u64 hret;
1872
1873 if (!is_valid_ether_addr(mac_addr->sa_data)) {
1874 ret = -EADDRNOTAVAIL;
1875 goto out;
1876 }
1877
1878 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1879 if (!cb0) {
1880 pr_err("no mem for cb0\n");
1881 ret = -ENOMEM;
1882 goto out;
1883 }
1884
1885 memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1886
1887 cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1888
1889 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1890 port->logical_port_id, H_PORT_CB0,
1891 EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1892 if (hret != H_SUCCESS) {
1893 ret = -EIO;
1894 goto out_free;
1895 }
1896
1897 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1898
1899 /* Deregister old MAC in pHYP */
1900 if (port->state == EHEA_PORT_UP) {
1901 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1902 if (ret)
1903 goto out_upregs;
1904 }
1905
1906 port->mac_addr = cb0->port_mac_addr << 16;
1907
1908 /* Register new MAC in pHYP */
1909 if (port->state == EHEA_PORT_UP) {
1910 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1911 if (ret)
1912 goto out_upregs;
1913 }
1914
1915 ret = 0;
1916
1917out_upregs:
1918 ehea_update_bcmc_registrations();
1919out_free:
1920 free_page((unsigned long)cb0);
1921out:
1922 return ret;
1923}
1924
1925static void ehea_promiscuous_error(u64 hret, int enable)
1926{
1927 if (hret == H_AUTHORITY)
1928 pr_info("Hypervisor denied %sabling promiscuous mode\n",
1929 enable == 1 ? "en" : "dis");
1930 else
1931 pr_err("failed %sabling promiscuous mode\n",
1932 enable == 1 ? "en" : "dis");
1933}
1934
1935static void ehea_promiscuous(struct net_device *dev, int enable)
1936{
1937 struct ehea_port *port = netdev_priv(dev);
1938 struct hcp_ehea_port_cb7 *cb7;
1939 u64 hret;
1940
1941 if (enable == port->promisc)
1942 return;
1943
1944 cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
1945 if (!cb7) {
1946 pr_err("no mem for cb7\n");
1947 goto out;
1948 }
1949
1950 /* Modify Pxs_DUCQPN in CB7 */
1951 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1952
1953 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1954 port->logical_port_id,
1955 H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1956 if (hret) {
1957 ehea_promiscuous_error(hret, enable);
1958 goto out;
1959 }
1960
1961 port->promisc = enable;
1962out:
1963 free_page((unsigned long)cb7);
1964}
1965
1966static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1967 u32 hcallid)
1968{
1969 u64 hret;
1970 u8 reg_type;
1971
1972 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1973 | EHEA_BCMC_UNTAGGED;
1974
1975 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1976 port->logical_port_id,
1977 reg_type, mc_mac_addr, 0, hcallid);
1978 if (hret)
1979 goto out;
1980
1981 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1982 | EHEA_BCMC_VLANID_ALL;
1983
1984 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1985 port->logical_port_id,
1986 reg_type, mc_mac_addr, 0, hcallid);
1987out:
1988 return hret;
1989}
1990
1991static int ehea_drop_multicast_list(struct net_device *dev)
1992{
1993 struct ehea_port *port = netdev_priv(dev);
1994 struct ehea_mc_list *mc_entry = port->mc_list;
1995 struct list_head *pos;
1996 struct list_head *temp;
1997 int ret = 0;
1998 u64 hret;
1999
2000 list_for_each_safe(pos, temp, &(port->mc_list->list)) {
2001 mc_entry = list_entry(pos, struct ehea_mc_list, list);
2002
2003 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
2004 H_DEREG_BCMC);
2005 if (hret) {
2006 pr_err("failed deregistering mcast MAC\n");
2007 ret = -EIO;
2008 }
2009
2010 list_del(pos);
2011 kfree(mc_entry);
2012 }
2013 return ret;
2014}
2015
2016static void ehea_allmulti(struct net_device *dev, int enable)
2017{
2018 struct ehea_port *port = netdev_priv(dev);
2019 u64 hret;
2020
2021 if (!port->allmulti) {
2022 if (enable) {
2023 /* Enable ALLMULTI */
2024 ehea_drop_multicast_list(dev);
2025 hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
2026 if (!hret)
2027 port->allmulti = 1;
2028 else
2029 netdev_err(dev,
2030 "failed enabling IFF_ALLMULTI\n");
2031 }
2032 } else
2033 if (!enable) {
2034 /* Disable ALLMULTI */
2035 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
2036 if (!hret)
2037 port->allmulti = 0;
2038 else
2039 netdev_err(dev,
2040 "failed disabling IFF_ALLMULTI\n");
2041 }
2042}
2043
2044static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
2045{
2046 struct ehea_mc_list *ehea_mcl_entry;
2047 u64 hret;
2048
2049 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
2050 if (!ehea_mcl_entry) {
2051 pr_err("no mem for mcl_entry\n");
2052 return;
2053 }
2054
2055 INIT_LIST_HEAD(&ehea_mcl_entry->list);
2056
2057 memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
2058
2059 hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
2060 H_REG_BCMC);
2061 if (!hret)
2062 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
2063 else {
2064 pr_err("failed registering mcast MAC\n");
2065 kfree(ehea_mcl_entry);
2066 }
2067}
2068
2069static void ehea_set_multicast_list(struct net_device *dev)
2070{
2071 struct ehea_port *port = netdev_priv(dev);
2072 struct netdev_hw_addr *ha;
2073 int ret;
2074
2075 if (port->promisc) {
2076 ehea_promiscuous(dev, 1);
2077 return;
2078 }
2079 ehea_promiscuous(dev, 0);
2080
2081 if (dev->flags & IFF_ALLMULTI) {
2082 ehea_allmulti(dev, 1);
2083 goto out;
2084 }
2085 ehea_allmulti(dev, 0);
2086
2087 if (!netdev_mc_empty(dev)) {
2088 ret = ehea_drop_multicast_list(dev);
2089 if (ret) {
2090 /* Dropping the current multicast list failed.
2091 * Enabling ALL_MULTI is the best we can do.
2092 */
2093 ehea_allmulti(dev, 1);
2094 }
2095
2096 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
2097 pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
2098 port->adapter->max_mc_mac);
2099 goto out;
2100 }
2101
2102 netdev_for_each_mc_addr(ha, dev)
2103 ehea_add_multicast_entry(port, ha->addr);
2104
2105 }
2106out:
2107 ehea_update_bcmc_registrations();
2108}
2109
2110static int ehea_change_mtu(struct net_device *dev, int new_mtu)
2111{
2112 if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
2113 return -EINVAL;
2114 dev->mtu = new_mtu;
2115 return 0;
2116}
2117
2118static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
2119 struct ehea_swqe *swqe, u32 lkey)
2120{
2121 if (skb->protocol == htons(ETH_P_IP)) {
2122 const struct iphdr *iph = ip_hdr(skb);
2123
2124 /* IPv4 */
2125 swqe->tx_control |= EHEA_SWQE_CRC
2126 | EHEA_SWQE_IP_CHECKSUM
2127 | EHEA_SWQE_TCP_CHECKSUM
2128 | EHEA_SWQE_IMM_DATA_PRESENT
2129 | EHEA_SWQE_DESCRIPTORS_PRESENT;
2130
2131 write_ip_start_end(swqe, skb);
2132
2133 if (iph->protocol == IPPROTO_UDP) {
2134 if ((iph->frag_off & IP_MF) ||
2135 (iph->frag_off & IP_OFFSET))
2136 /* IP fragment, so don't change cs */
2137 swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
2138 else
2139 write_udp_offset_end(swqe, skb);
2140 } else if (iph->protocol == IPPROTO_TCP) {
2141 write_tcp_offset_end(swqe, skb);
2142 }
2143
2144 /* icmp (big data) and ip segmentation packets (all other ip
2145 packets) do not require any special handling */
2146
2147 } else {
2148 /* Other Ethernet Protocol */
2149 swqe->tx_control |= EHEA_SWQE_CRC
2150 | EHEA_SWQE_IMM_DATA_PRESENT
2151 | EHEA_SWQE_DESCRIPTORS_PRESENT;
2152 }
2153
2154 write_swqe2_data(skb, dev, swqe, lkey);
2155}
2156
2157static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2158 struct ehea_swqe *swqe)
2159{
2160 int nfrags = skb_shinfo(skb)->nr_frags;
2161 u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
2162 skb_frag_t *frag;
2163 int i;
2164
2165 if (skb->protocol == htons(ETH_P_IP)) {
2166 const struct iphdr *iph = ip_hdr(skb);
2167
2168 /* IPv4 */
2169 write_ip_start_end(swqe, skb);
2170
2171 if (iph->protocol == IPPROTO_TCP) {
2172 swqe->tx_control |= EHEA_SWQE_CRC
2173 | EHEA_SWQE_IP_CHECKSUM
2174 | EHEA_SWQE_TCP_CHECKSUM
2175 | EHEA_SWQE_IMM_DATA_PRESENT;
2176
2177 write_tcp_offset_end(swqe, skb);
2178
2179 } else if (iph->protocol == IPPROTO_UDP) {
2180 if ((iph->frag_off & IP_MF) ||
2181 (iph->frag_off & IP_OFFSET))
2182 /* IP fragment, so don't change cs */
2183 swqe->tx_control |= EHEA_SWQE_CRC
2184 | EHEA_SWQE_IMM_DATA_PRESENT;
2185 else {
2186 swqe->tx_control |= EHEA_SWQE_CRC
2187 | EHEA_SWQE_IP_CHECKSUM
2188 | EHEA_SWQE_TCP_CHECKSUM
2189 | EHEA_SWQE_IMM_DATA_PRESENT;
2190
2191 write_udp_offset_end(swqe, skb);
2192 }
2193 } else {
2194 /* icmp (big data) and
2195 ip segmentation packets (all other ip packets) */
2196 swqe->tx_control |= EHEA_SWQE_CRC
2197 | EHEA_SWQE_IP_CHECKSUM
2198 | EHEA_SWQE_IMM_DATA_PRESENT;
2199 }
2200 } else {
2201 /* Other Ethernet Protocol */
2202 swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT;
2203 }
2204 /* copy (immediate) data */
2205 if (nfrags == 0) {
2206 /* data is in a single piece */
2207 skb_copy_from_linear_data(skb, imm_data, skb->len);
2208 } else {
2209 /* first copy data from the skb->data buffer ... */
2210 skb_copy_from_linear_data(skb, imm_data,
2211 skb_headlen(skb));
2212 imm_data += skb_headlen(skb);
2213
2214 /* ... then copy data from the fragments */
2215 for (i = 0; i < nfrags; i++) {
2216 frag = &skb_shinfo(skb)->frags[i];
2217 memcpy(imm_data,
2218 page_address(frag->page) + frag->page_offset,
2219 frag->size);
2220 imm_data += frag->size;
2221 }
2222 }
2223 swqe->immediate_data_length = skb->len;
2224 dev_kfree_skb(skb);
2225}
2226
2227static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps)
2228{
2229 struct tcphdr *tcp;
2230 u32 tmp;
2231
2232 if ((skb->protocol == htons(ETH_P_IP)) &&
2233 (ip_hdr(skb)->protocol == IPPROTO_TCP)) {
2234 tcp = (struct tcphdr *)(skb_network_header(skb) +
2235 (ip_hdr(skb)->ihl * 4));
2236 tmp = (tcp->source + (tcp->dest << 16)) % 31;
2237 tmp += ip_hdr(skb)->daddr % 31;
2238 return tmp % num_qps;
2239 } else
2240 return 0;
2241}
2242
2243static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2244{
2245 struct ehea_port *port = netdev_priv(dev);
2246 struct ehea_swqe *swqe;
2247 unsigned long flags;
2248 u32 lkey;
2249 int swqe_index;
2250 struct ehea_port_res *pr;
2251
2252 pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)];
2253
2254 if (!spin_trylock(&pr->xmit_lock))
2255 return NETDEV_TX_BUSY;
2256
2257 if (pr->queue_stopped) {
2258 spin_unlock(&pr->xmit_lock);
2259 return NETDEV_TX_BUSY;
2260 }
2261
2262 swqe = ehea_get_swqe(pr->qp, &swqe_index);
2263 memset(swqe, 0, SWQE_HEADER_SIZE);
2264 atomic_dec(&pr->swqe_avail);
2265
2266 if (vlan_tx_tag_present(skb)) {
2267 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2268 swqe->vlan_tag = vlan_tx_tag_get(skb);
2269 }
2270
2271 pr->tx_packets++;
2272 pr->tx_bytes += skb->len;
2273
2274 if (skb->len <= SWQE3_MAX_IMM) {
2275 u32 sig_iv = port->sig_comp_iv;
2276 u32 swqe_num = pr->swqe_id_counter;
2277 ehea_xmit3(skb, dev, swqe);
2278 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
2279 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
2280 if (pr->swqe_ll_count >= (sig_iv - 1)) {
2281 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
2282 sig_iv);
2283 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2284 pr->swqe_ll_count = 0;
2285 } else
2286 pr->swqe_ll_count += 1;
2287 } else {
2288 swqe->wr_id =
2289 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
2290 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
2291 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
2292 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
2293 pr->sq_skba.arr[pr->sq_skba.index] = skb;
2294
2295 pr->sq_skba.index++;
2296 pr->sq_skba.index &= (pr->sq_skba.len - 1);
2297
2298 lkey = pr->send_mr.lkey;
2299 ehea_xmit2(skb, dev, swqe, lkey);
2300 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2301 }
2302 pr->swqe_id_counter += 1;
2303
2304 netif_info(port, tx_queued, dev,
2305 "post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
2306 if (netif_msg_tx_queued(port))
2307 ehea_dump(swqe, 512, "swqe");
2308
2309 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2310 netif_stop_queue(dev);
2311 swqe->tx_control |= EHEA_SWQE_PURGE;
2312 }
2313
2314 ehea_post_swqe(pr->qp, swqe);
2315
2316 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2317 spin_lock_irqsave(&pr->netif_queue, flags);
2318 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2319 pr->p_stats.queue_stopped++;
2320 netif_stop_queue(dev);
2321 pr->queue_stopped = 1;
2322 }
2323 spin_unlock_irqrestore(&pr->netif_queue, flags);
2324 }
2325 dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
2326 spin_unlock(&pr->xmit_lock);
2327
2328 return NETDEV_TX_OK;
2329}
2330
2331static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2332{
2333 struct ehea_port *port = netdev_priv(dev);
2334 struct ehea_adapter *adapter = port->adapter;
2335 struct hcp_ehea_port_cb1 *cb1;
2336 int index;
2337 u64 hret;
2338
2339 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2340 if (!cb1) {
2341 pr_err("no mem for cb1\n");
2342 goto out;
2343 }
2344
2345 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2346 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2347 if (hret != H_SUCCESS) {
2348 pr_err("query_ehea_port failed\n");
2349 goto out;
2350 }
2351
2352 index = (vid / 64);
2353 cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
2354
2355 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2356 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2357 if (hret != H_SUCCESS)
2358 pr_err("modify_ehea_port failed\n");
2359out:
2360 free_page((unsigned long)cb1);
2361 return;
2362}
2363
2364static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2365{
2366 struct ehea_port *port = netdev_priv(dev);
2367 struct ehea_adapter *adapter = port->adapter;
2368 struct hcp_ehea_port_cb1 *cb1;
2369 int index;
2370 u64 hret;
2371
2372 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2373 if (!cb1) {
2374 pr_err("no mem for cb1\n");
2375 goto out;
2376 }
2377
2378 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2379 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2380 if (hret != H_SUCCESS) {
2381 pr_err("query_ehea_port failed\n");
2382 goto out;
2383 }
2384
2385 index = (vid / 64);
2386 cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
2387
2388 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2389 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2390 if (hret != H_SUCCESS)
2391 pr_err("modify_ehea_port failed\n");
2392out:
2393 free_page((unsigned long)cb1);
2394}
2395
2396int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2397{
2398 int ret = -EIO;
2399 u64 hret;
2400 u16 dummy16 = 0;
2401 u64 dummy64 = 0;
2402 struct hcp_modify_qp_cb0 *cb0;
2403
2404 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2405 if (!cb0) {
2406 ret = -ENOMEM;
2407 goto out;
2408 }
2409
2410 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2411 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2412 if (hret != H_SUCCESS) {
2413 pr_err("query_ehea_qp failed (1)\n");
2414 goto out;
2415 }
2416
2417 cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2418 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2419 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2420 &dummy64, &dummy64, &dummy16, &dummy16);
2421 if (hret != H_SUCCESS) {
2422 pr_err("modify_ehea_qp failed (1)\n");
2423 goto out;
2424 }
2425
2426 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2427 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2428 if (hret != H_SUCCESS) {
2429 pr_err("query_ehea_qp failed (2)\n");
2430 goto out;
2431 }
2432
2433 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2434 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2435 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2436 &dummy64, &dummy64, &dummy16, &dummy16);
2437 if (hret != H_SUCCESS) {
2438 pr_err("modify_ehea_qp failed (2)\n");
2439 goto out;
2440 }
2441
2442 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2443 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2444 if (hret != H_SUCCESS) {
2445 pr_err("query_ehea_qp failed (3)\n");
2446 goto out;
2447 }
2448
2449 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2450 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2451 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2452 &dummy64, &dummy64, &dummy16, &dummy16);
2453 if (hret != H_SUCCESS) {
2454 pr_err("modify_ehea_qp failed (3)\n");
2455 goto out;
2456 }
2457
2458 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2459 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2460 if (hret != H_SUCCESS) {
2461 pr_err("query_ehea_qp failed (4)\n");
2462 goto out;
2463 }
2464
2465 ret = 0;
2466out:
2467 free_page((unsigned long)cb0);
2468 return ret;
2469}
2470
2471static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
2472 int add_tx_qps)
2473{
2474 int ret, i;
2475 struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2476 enum ehea_eq_type eq_type = EHEA_EQ;
2477
2478 port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2479 EHEA_MAX_ENTRIES_EQ, 1);
2480 if (!port->qp_eq) {
2481 ret = -EINVAL;
2482 pr_err("ehea_create_eq failed (qp_eq)\n");
2483 goto out_kill_eq;
2484 }
2485
2486 pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2487 pr_cfg.max_entries_scq = sq_entries * 2;
2488 pr_cfg.max_entries_sq = sq_entries;
2489 pr_cfg.max_entries_rq1 = rq1_entries;
2490 pr_cfg.max_entries_rq2 = rq2_entries;
2491 pr_cfg.max_entries_rq3 = rq3_entries;
2492
2493 pr_cfg_small_rx.max_entries_rcq = 1;
2494 pr_cfg_small_rx.max_entries_scq = sq_entries;
2495 pr_cfg_small_rx.max_entries_sq = sq_entries;
2496 pr_cfg_small_rx.max_entries_rq1 = 1;
2497 pr_cfg_small_rx.max_entries_rq2 = 1;
2498 pr_cfg_small_rx.max_entries_rq3 = 1;
2499
2500 for (i = 0; i < def_qps; i++) {
2501 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2502 if (ret)
2503 goto out_clean_pr;
2504 }
2505 for (i = def_qps; i < def_qps + add_tx_qps; i++) {
2506 ret = ehea_init_port_res(port, &port->port_res[i],
2507 &pr_cfg_small_rx, i);
2508 if (ret)
2509 goto out_clean_pr;
2510 }
2511
2512 return 0;
2513
2514out_clean_pr:
2515 while (--i >= 0)
2516 ehea_clean_portres(port, &port->port_res[i]);
2517
2518out_kill_eq:
2519 ehea_destroy_eq(port->qp_eq);
2520 return ret;
2521}
2522
2523static int ehea_clean_all_portres(struct ehea_port *port)
2524{
2525 int ret = 0;
2526 int i;
2527
2528 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2529 ret |= ehea_clean_portres(port, &port->port_res[i]);
2530
2531 ret |= ehea_destroy_eq(port->qp_eq);
2532
2533 return ret;
2534}
2535
2536static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
2537{
2538 if (adapter->active_ports)
2539 return;
2540
2541 ehea_rem_mr(&adapter->mr);
2542}
2543
2544static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
2545{
2546 if (adapter->active_ports)
2547 return 0;
2548
2549 return ehea_reg_kernel_mr(adapter, &adapter->mr);
2550}
2551
2552static int ehea_up(struct net_device *dev)
2553{
2554 int ret, i;
2555 struct ehea_port *port = netdev_priv(dev);
2556
2557 if (port->state == EHEA_PORT_UP)
2558 return 0;
2559
2560 ret = ehea_port_res_setup(port, port->num_def_qps,
2561 port->num_add_tx_qps);
2562 if (ret) {
2563 netdev_err(dev, "port_res_failed\n");
2564 goto out;
2565 }
2566
2567 /* Set default QP for this port */
2568 ret = ehea_configure_port(port);
2569 if (ret) {
2570 netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
2571 goto out_clean_pr;
2572 }
2573
2574 ret = ehea_reg_interrupts(dev);
2575 if (ret) {
2576 netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
2577 goto out_clean_pr;
2578 }
2579
2580 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2581 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2582 if (ret) {
2583 netdev_err(dev, "activate_qp failed\n");
2584 goto out_free_irqs;
2585 }
2586 }
2587
2588 for (i = 0; i < port->num_def_qps; i++) {
2589 ret = ehea_fill_port_res(&port->port_res[i]);
2590 if (ret) {
2591 netdev_err(dev, "out_free_irqs\n");
2592 goto out_free_irqs;
2593 }
2594 }
2595
2596 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2597 if (ret) {
2598 ret = -EIO;
2599 goto out_free_irqs;
2600 }
2601
2602 port->state = EHEA_PORT_UP;
2603
2604 ret = 0;
2605 goto out;
2606
2607out_free_irqs:
2608 ehea_free_interrupts(dev);
2609
2610out_clean_pr:
2611 ehea_clean_all_portres(port);
2612out:
2613 if (ret)
2614 netdev_info(dev, "Failed starting. ret=%i\n", ret);
2615
2616 ehea_update_bcmc_registrations();
2617 ehea_update_firmware_handles();
2618
2619 return ret;
2620}
2621
2622static void port_napi_disable(struct ehea_port *port)
2623{
2624 int i;
2625
2626 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2627 napi_disable(&port->port_res[i].napi);
2628}
2629
2630static void port_napi_enable(struct ehea_port *port)
2631{
2632 int i;
2633
2634 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2635 napi_enable(&port->port_res[i].napi);
2636}
2637
2638static int ehea_open(struct net_device *dev)
2639{
2640 int ret;
2641 struct ehea_port *port = netdev_priv(dev);
2642
2643 mutex_lock(&port->port_lock);
2644
2645 netif_info(port, ifup, dev, "enabling port\n");
2646
2647 ret = ehea_up(dev);
2648 if (!ret) {
2649 port_napi_enable(port);
2650 netif_start_queue(dev);
2651 }
2652
2653 mutex_unlock(&port->port_lock);
2654
2655 return ret;
2656}
2657
2658static int ehea_down(struct net_device *dev)
2659{
2660 int ret;
2661 struct ehea_port *port = netdev_priv(dev);
2662
2663 if (port->state == EHEA_PORT_DOWN)
2664 return 0;
2665
2666 ehea_drop_multicast_list(dev);
2667 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2668
2669 ehea_free_interrupts(dev);
2670
2671 port->state = EHEA_PORT_DOWN;
2672
2673 ehea_update_bcmc_registrations();
2674
2675 ret = ehea_clean_all_portres(port);
2676 if (ret)
2677 netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
2678
2679 ehea_update_firmware_handles();
2680
2681 return ret;
2682}
2683
2684static int ehea_stop(struct net_device *dev)
2685{
2686 int ret;
2687 struct ehea_port *port = netdev_priv(dev);
2688
2689 netif_info(port, ifdown, dev, "disabling port\n");
2690
2691 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2692 cancel_work_sync(&port->reset_task);
2693 mutex_lock(&port->port_lock);
2694 netif_stop_queue(dev);
2695 port_napi_disable(port);
2696 ret = ehea_down(dev);
2697 mutex_unlock(&port->port_lock);
2698 clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2699 return ret;
2700}
2701
2702static void ehea_purge_sq(struct ehea_qp *orig_qp)
2703{
2704 struct ehea_qp qp = *orig_qp;
2705 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2706 struct ehea_swqe *swqe;
2707 int wqe_index;
2708 int i;
2709
2710 for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
2711 swqe = ehea_get_swqe(&qp, &wqe_index);
2712 swqe->tx_control |= EHEA_SWQE_PURGE;
2713 }
2714}
2715
2716static void ehea_flush_sq(struct ehea_port *port)
2717{
2718 int i;
2719
2720 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2721 struct ehea_port_res *pr = &port->port_res[i];
2722 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
2723 int ret;
2724
2725 ret = wait_event_timeout(port->swqe_avail_wq,
2726 atomic_read(&pr->swqe_avail) >= swqe_max,
2727 msecs_to_jiffies(100));
2728
2729 if (!ret) {
2730 pr_err("WARNING: sq not flushed completely\n");
2731 break;
2732 }
2733 }
2734}
2735
2736int ehea_stop_qps(struct net_device *dev)
2737{
2738 struct ehea_port *port = netdev_priv(dev);
2739 struct ehea_adapter *adapter = port->adapter;
2740 struct hcp_modify_qp_cb0 *cb0;
2741 int ret = -EIO;
2742 int dret;
2743 int i;
2744 u64 hret;
2745 u64 dummy64 = 0;
2746 u16 dummy16 = 0;
2747
2748 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2749 if (!cb0) {
2750 ret = -ENOMEM;
2751 goto out;
2752 }
2753
2754 for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
2755 struct ehea_port_res *pr = &port->port_res[i];
2756 struct ehea_qp *qp = pr->qp;
2757
2758 /* Purge send queue */
2759 ehea_purge_sq(qp);
2760
2761 /* Disable queue pair */
2762 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2763 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2764 cb0);
2765 if (hret != H_SUCCESS) {
2766 pr_err("query_ehea_qp failed (1)\n");
2767 goto out;
2768 }
2769
2770 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2771 cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
2772
2773 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2774 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2775 1), cb0, &dummy64,
2776 &dummy64, &dummy16, &dummy16);
2777 if (hret != H_SUCCESS) {
2778 pr_err("modify_ehea_qp failed (1)\n");
2779 goto out;
2780 }
2781
2782 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2783 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2784 cb0);
2785 if (hret != H_SUCCESS) {
2786 pr_err("query_ehea_qp failed (2)\n");
2787 goto out;
2788 }
2789
2790 /* deregister shared memory regions */
2791 dret = ehea_rem_smrs(pr);
2792 if (dret) {
2793 pr_err("unreg shared memory region failed\n");
2794 goto out;
2795 }
2796 }
2797
2798 ret = 0;
2799out:
2800 free_page((unsigned long)cb0);
2801
2802 return ret;
2803}
2804
2805void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2806{
2807 struct ehea_qp qp = *orig_qp;
2808 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2809 struct ehea_rwqe *rwqe;
2810 struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
2811 struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
2812 struct sk_buff *skb;
2813 u32 lkey = pr->recv_mr.lkey;
2814
2815
2816 int i;
2817 int index;
2818
2819 for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
2820 rwqe = ehea_get_next_rwqe(&qp, 2);
2821 rwqe->sg_list[0].l_key = lkey;
2822 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2823 skb = skba_rq2[index];
2824 if (skb)
2825 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2826 }
2827
2828 for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
2829 rwqe = ehea_get_next_rwqe(&qp, 3);
2830 rwqe->sg_list[0].l_key = lkey;
2831 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2832 skb = skba_rq3[index];
2833 if (skb)
2834 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2835 }
2836}
2837
2838int ehea_restart_qps(struct net_device *dev)
2839{
2840 struct ehea_port *port = netdev_priv(dev);
2841 struct ehea_adapter *adapter = port->adapter;
2842 int ret = 0;
2843 int i;
2844
2845 struct hcp_modify_qp_cb0 *cb0;
2846 u64 hret;
2847 u64 dummy64 = 0;
2848 u16 dummy16 = 0;
2849
2850 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2851 if (!cb0) {
2852 ret = -ENOMEM;
2853 goto out;
2854 }
2855
2856 for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
2857 struct ehea_port_res *pr = &port->port_res[i];
2858 struct ehea_qp *qp = pr->qp;
2859
2860 ret = ehea_gen_smrs(pr);
2861 if (ret) {
2862 netdev_err(dev, "creation of shared memory regions failed\n");
2863 goto out;
2864 }
2865
2866 ehea_update_rqs(qp, pr);
2867
2868 /* Enable queue pair */
2869 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2870 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2871 cb0);
2872 if (hret != H_SUCCESS) {
2873 netdev_err(dev, "query_ehea_qp failed (1)\n");
2874 goto out;
2875 }
2876
2877 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2878 cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
2879
2880 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2881 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2882 1), cb0, &dummy64,
2883 &dummy64, &dummy16, &dummy16);
2884 if (hret != H_SUCCESS) {
2885 netdev_err(dev, "modify_ehea_qp failed (1)\n");
2886 goto out;
2887 }
2888
2889 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2890 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2891 cb0);
2892 if (hret != H_SUCCESS) {
2893 netdev_err(dev, "query_ehea_qp failed (2)\n");
2894 goto out;
2895 }
2896
2897 /* refill entire queue */
2898 ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
2899 ehea_refill_rq2(pr, 0);
2900 ehea_refill_rq3(pr, 0);
2901 }
2902out:
2903 free_page((unsigned long)cb0);
2904
2905 return ret;
2906}
2907
2908static void ehea_reset_port(struct work_struct *work)
2909{
2910 int ret;
2911 struct ehea_port *port =
2912 container_of(work, struct ehea_port, reset_task);
2913 struct net_device *dev = port->netdev;
2914
2915 mutex_lock(&dlpar_mem_lock);
2916 port->resets++;
2917 mutex_lock(&port->port_lock);
2918 netif_stop_queue(dev);
2919
2920 port_napi_disable(port);
2921
2922 ehea_down(dev);
2923
2924 ret = ehea_up(dev);
2925 if (ret)
2926 goto out;
2927
2928 ehea_set_multicast_list(dev);
2929
2930 netif_info(port, timer, dev, "reset successful\n");
2931
2932 port_napi_enable(port);
2933
2934 netif_wake_queue(dev);
2935out:
2936 mutex_unlock(&port->port_lock);
2937 mutex_unlock(&dlpar_mem_lock);
2938}
2939
2940static void ehea_rereg_mrs(void)
2941{
2942 int ret, i;
2943 struct ehea_adapter *adapter;
2944
2945 pr_info("LPAR memory changed - re-initializing driver\n");
2946
2947 list_for_each_entry(adapter, &adapter_list, list)
2948 if (adapter->active_ports) {
2949 /* Shutdown all ports */
2950 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2951 struct ehea_port *port = adapter->port[i];
2952 struct net_device *dev;
2953
2954 if (!port)
2955 continue;
2956
2957 dev = port->netdev;
2958
2959 if (dev->flags & IFF_UP) {
2960 mutex_lock(&port->port_lock);
2961 netif_stop_queue(dev);
2962 ehea_flush_sq(port);
2963 ret = ehea_stop_qps(dev);
2964 if (ret) {
2965 mutex_unlock(&port->port_lock);
2966 goto out;
2967 }
2968 port_napi_disable(port);
2969 mutex_unlock(&port->port_lock);
2970 }
2971 reset_sq_restart_flag(port);
2972 }
2973
2974 /* Unregister old memory region */
2975 ret = ehea_rem_mr(&adapter->mr);
2976 if (ret) {
2977 pr_err("unregister MR failed - driver inoperable!\n");
2978 goto out;
2979 }
2980 }
2981
2982 clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
2983
2984 list_for_each_entry(adapter, &adapter_list, list)
2985 if (adapter->active_ports) {
2986 /* Register new memory region */
2987 ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
2988 if (ret) {
2989 pr_err("register MR failed - driver inoperable!\n");
2990 goto out;
2991 }
2992
2993 /* Restart all ports */
2994 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2995 struct ehea_port *port = adapter->port[i];
2996
2997 if (port) {
2998 struct net_device *dev = port->netdev;
2999
3000 if (dev->flags & IFF_UP) {
3001 mutex_lock(&port->port_lock);
3002 ret = ehea_restart_qps(dev);
3003 if (!ret) {
3004 check_sqs(port);
3005 port_napi_enable(port);
3006 netif_wake_queue(dev);
3007 } else {
3008 netdev_err(dev, "Unable to restart QPS\n");
3009 }
3010 mutex_unlock(&port->port_lock);
3011 }
3012 }
3013 }
3014 }
3015 pr_info("re-initializing driver complete\n");
3016out:
3017 return;
3018}
3019
3020static void ehea_tx_watchdog(struct net_device *dev)
3021{
3022 struct ehea_port *port = netdev_priv(dev);
3023
3024 if (netif_carrier_ok(dev) &&
3025 !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
3026 ehea_schedule_port_reset(port);
3027}
3028
3029int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
3030{
3031 struct hcp_query_ehea *cb;
3032 u64 hret;
3033 int ret;
3034
3035 cb = (void *)get_zeroed_page(GFP_KERNEL);
3036 if (!cb) {
3037 ret = -ENOMEM;
3038 goto out;
3039 }
3040
3041 hret = ehea_h_query_ehea(adapter->handle, cb);
3042
3043 if (hret != H_SUCCESS) {
3044 ret = -EIO;
3045 goto out_herr;
3046 }
3047
3048 adapter->max_mc_mac = cb->max_mc_mac - 1;
3049 ret = 0;
3050
3051out_herr:
3052 free_page((unsigned long)cb);
3053out:
3054 return ret;
3055}
3056
3057int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
3058{
3059 struct hcp_ehea_port_cb4 *cb4;
3060 u64 hret;
3061 int ret = 0;
3062
3063 *jumbo = 0;
3064
3065 /* (Try to) enable *jumbo frames */
3066 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
3067 if (!cb4) {
3068 pr_err("no mem for cb4\n");
3069 ret = -ENOMEM;
3070 goto out;
3071 } else {
3072 hret = ehea_h_query_ehea_port(port->adapter->handle,
3073 port->logical_port_id,
3074 H_PORT_CB4,
3075 H_PORT_CB4_JUMBO, cb4);
3076 if (hret == H_SUCCESS) {
3077 if (cb4->jumbo_frame)
3078 *jumbo = 1;
3079 else {
3080 cb4->jumbo_frame = 1;
3081 hret = ehea_h_modify_ehea_port(port->adapter->
3082 handle,
3083 port->
3084 logical_port_id,
3085 H_PORT_CB4,
3086 H_PORT_CB4_JUMBO,
3087 cb4);
3088 if (hret == H_SUCCESS)
3089 *jumbo = 1;
3090 }
3091 } else
3092 ret = -EINVAL;
3093
3094 free_page((unsigned long)cb4);
3095 }
3096out:
3097 return ret;
3098}
3099
3100static ssize_t ehea_show_port_id(struct device *dev,
3101 struct device_attribute *attr, char *buf)
3102{
3103 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
3104 return sprintf(buf, "%d", port->logical_port_id);
3105}
3106
3107static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
3108 NULL);
3109
3110static void __devinit logical_port_release(struct device *dev)
3111{
3112 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
3113 of_node_put(port->ofdev.dev.of_node);
3114}
3115
3116static struct device *ehea_register_port(struct ehea_port *port,
3117 struct device_node *dn)
3118{
3119 int ret;
3120
3121 port->ofdev.dev.of_node = of_node_get(dn);
3122 port->ofdev.dev.parent = &port->adapter->ofdev->dev;
3123 port->ofdev.dev.bus = &ibmebus_bus_type;
3124
3125 dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
3126 port->ofdev.dev.release = logical_port_release;
3127
3128 ret = of_device_register(&port->ofdev);
3129 if (ret) {
3130 pr_err("failed to register device. ret=%d\n", ret);
3131 goto out;
3132 }
3133
3134 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
3135 if (ret) {
3136 pr_err("failed to register attributes, ret=%d\n", ret);
3137 goto out_unreg_of_dev;
3138 }
3139
3140 return &port->ofdev.dev;
3141
3142out_unreg_of_dev:
3143 of_device_unregister(&port->ofdev);
3144out:
3145 return NULL;
3146}
3147
3148static void ehea_unregister_port(struct ehea_port *port)
3149{
3150 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
3151 of_device_unregister(&port->ofdev);
3152}
3153
3154static const struct net_device_ops ehea_netdev_ops = {
3155 .ndo_open = ehea_open,
3156 .ndo_stop = ehea_stop,
3157 .ndo_start_xmit = ehea_start_xmit,
3158#ifdef CONFIG_NET_POLL_CONTROLLER
3159 .ndo_poll_controller = ehea_netpoll,
3160#endif
3161 .ndo_get_stats = ehea_get_stats,
3162 .ndo_set_mac_address = ehea_set_mac_addr,
3163 .ndo_validate_addr = eth_validate_addr,
3164 .ndo_set_multicast_list = ehea_set_multicast_list,
3165 .ndo_change_mtu = ehea_change_mtu,
3166 .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
3167 .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
3168 .ndo_tx_timeout = ehea_tx_watchdog,
3169};
3170
3171struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3172 u32 logical_port_id,
3173 struct device_node *dn)
3174{
3175 int ret;
3176 struct net_device *dev;
3177 struct ehea_port *port;
3178 struct device *port_dev;
3179 int jumbo;
3180
3181 /* allocate memory for the port structures */
3182 dev = alloc_etherdev(sizeof(struct ehea_port));
3183
3184 if (!dev) {
3185 pr_err("no mem for net_device\n");
3186 ret = -ENOMEM;
3187 goto out_err;
3188 }
3189
3190 port = netdev_priv(dev);
3191
3192 mutex_init(&port->port_lock);
3193 port->state = EHEA_PORT_DOWN;
3194 port->sig_comp_iv = sq_entries / 10;
3195
3196 port->adapter = adapter;
3197 port->netdev = dev;
3198 port->logical_port_id = logical_port_id;
3199
3200 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
3201
3202 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
3203 if (!port->mc_list) {
3204 ret = -ENOMEM;
3205 goto out_free_ethdev;
3206 }
3207
3208 INIT_LIST_HEAD(&port->mc_list->list);
3209
3210 ret = ehea_sense_port_attr(port);
3211 if (ret)
3212 goto out_free_mc_list;
3213
3214 port_dev = ehea_register_port(port, dn);
3215 if (!port_dev)
3216 goto out_free_mc_list;
3217
3218 SET_NETDEV_DEV(dev, port_dev);
3219
3220 /* initialize net_device structure */
3221 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
3222
3223 dev->netdev_ops = &ehea_netdev_ops;
3224 ehea_set_ethtool_ops(dev);
3225
3226 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
3227 | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_LRO;
3228 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
3229 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
3230 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
3231 | NETIF_F_LLTX | NETIF_F_RXCSUM;
3232 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3233
3234 if (use_lro)
3235 dev->features |= NETIF_F_LRO;
3236
3237 INIT_WORK(&port->reset_task, ehea_reset_port);
3238
3239 init_waitqueue_head(&port->swqe_avail_wq);
3240 init_waitqueue_head(&port->restart_wq);
3241
3242 ret = register_netdev(dev);
3243 if (ret) {
3244 pr_err("register_netdev failed. ret=%d\n", ret);
3245 goto out_unreg_port;
3246 }
3247
3248 port->lro_max_aggr = lro_max_aggr;
3249
3250 ret = ehea_get_jumboframe_status(port, &jumbo);
3251 if (ret)
3252 netdev_err(dev, "failed determining jumbo frame status\n");
3253
3254 netdev_info(dev, "Jumbo frames are %sabled\n",
3255 jumbo == 1 ? "en" : "dis");
3256
3257 adapter->active_ports++;
3258
3259 return port;
3260
3261out_unreg_port:
3262 ehea_unregister_port(port);
3263
3264out_free_mc_list:
3265 kfree(port->mc_list);
3266
3267out_free_ethdev:
3268 free_netdev(dev);
3269
3270out_err:
3271 pr_err("setting up logical port with id=%d failed, ret=%d\n",
3272 logical_port_id, ret);
3273 return NULL;
3274}
3275
3276static void ehea_shutdown_single_port(struct ehea_port *port)
3277{
3278 struct ehea_adapter *adapter = port->adapter;
3279
3280 cancel_work_sync(&port->reset_task);
3281 unregister_netdev(port->netdev);
3282 ehea_unregister_port(port);
3283 kfree(port->mc_list);
3284 free_netdev(port->netdev);
3285 adapter->active_ports--;
3286}
3287
3288static int ehea_setup_ports(struct ehea_adapter *adapter)
3289{
3290 struct device_node *lhea_dn;
3291 struct device_node *eth_dn = NULL;
3292
3293 const u32 *dn_log_port_id;
3294 int i = 0;
3295
3296 lhea_dn = adapter->ofdev->dev.of_node;
3297 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3298
3299 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3300 NULL);
3301 if (!dn_log_port_id) {
3302 pr_err("bad device node: eth_dn name=%s\n",
3303 eth_dn->full_name);
3304 continue;
3305 }
3306
3307 if (ehea_add_adapter_mr(adapter)) {
3308 pr_err("creating MR failed\n");
3309 of_node_put(eth_dn);
3310 return -EIO;
3311 }
3312
3313 adapter->port[i] = ehea_setup_single_port(adapter,
3314 *dn_log_port_id,
3315 eth_dn);
3316 if (adapter->port[i])
3317 netdev_info(adapter->port[i]->netdev,
3318 "logical port id #%d\n", *dn_log_port_id);
3319 else
3320 ehea_remove_adapter_mr(adapter);
3321
3322 i++;
3323 }
3324 return 0;
3325}
3326
3327static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3328 u32 logical_port_id)
3329{
3330 struct device_node *lhea_dn;
3331 struct device_node *eth_dn = NULL;
3332 const u32 *dn_log_port_id;
3333
3334 lhea_dn = adapter->ofdev->dev.of_node;
3335 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3336
3337 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3338 NULL);
3339 if (dn_log_port_id)
3340 if (*dn_log_port_id == logical_port_id)
3341 return eth_dn;
3342 }
3343
3344 return NULL;
3345}
3346
3347static ssize_t ehea_probe_port(struct device *dev,
3348 struct device_attribute *attr,
3349 const char *buf, size_t count)
3350{
3351 struct ehea_adapter *adapter = dev_get_drvdata(dev);
3352 struct ehea_port *port;
3353 struct device_node *eth_dn = NULL;
3354 int i;
3355
3356 u32 logical_port_id;
3357
3358 sscanf(buf, "%d", &logical_port_id);
3359
3360 port = ehea_get_port(adapter, logical_port_id);
3361
3362 if (port) {
3363 netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
3364 logical_port_id);
3365 return -EINVAL;
3366 }
3367
3368 eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3369
3370 if (!eth_dn) {
3371 pr_info("no logical port with id %d found\n", logical_port_id);
3372 return -EINVAL;
3373 }
3374
3375 if (ehea_add_adapter_mr(adapter)) {
3376 pr_err("creating MR failed\n");
3377 return -EIO;
3378 }
3379
3380 port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
3381
3382 of_node_put(eth_dn);
3383
3384 if (port) {
3385 for (i = 0; i < EHEA_MAX_PORTS; i++)
3386 if (!adapter->port[i]) {
3387 adapter->port[i] = port;
3388 break;
3389 }
3390
3391 netdev_info(port->netdev, "added: (logical port id=%d)\n",
3392 logical_port_id);
3393 } else {
3394 ehea_remove_adapter_mr(adapter);
3395 return -EIO;
3396 }
3397
3398 return (ssize_t) count;
3399}
3400
3401static ssize_t ehea_remove_port(struct device *dev,
3402 struct device_attribute *attr,
3403 const char *buf, size_t count)
3404{
3405 struct ehea_adapter *adapter = dev_get_drvdata(dev);
3406 struct ehea_port *port;
3407 int i;
3408 u32 logical_port_id;
3409
3410 sscanf(buf, "%d", &logical_port_id);
3411
3412 port = ehea_get_port(adapter, logical_port_id);
3413
3414 if (port) {
3415 netdev_info(port->netdev, "removed: (logical port id=%d)\n",
3416 logical_port_id);
3417
3418 ehea_shutdown_single_port(port);
3419
3420 for (i = 0; i < EHEA_MAX_PORTS; i++)
3421 if (adapter->port[i] == port) {
3422 adapter->port[i] = NULL;
3423 break;
3424 }
3425 } else {
3426 pr_err("removing port with logical port id=%d failed. port not configured.\n",
3427 logical_port_id);
3428 return -EINVAL;
3429 }
3430
3431 ehea_remove_adapter_mr(adapter);
3432
3433 return (ssize_t) count;
3434}
3435
3436static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
3437static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
3438
3439int ehea_create_device_sysfs(struct platform_device *dev)
3440{
3441 int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
3442 if (ret)
3443 goto out;
3444
3445 ret = device_create_file(&dev->dev, &dev_attr_remove_port);
3446out:
3447 return ret;
3448}
3449
3450void ehea_remove_device_sysfs(struct platform_device *dev)
3451{
3452 device_remove_file(&dev->dev, &dev_attr_probe_port);
3453 device_remove_file(&dev->dev, &dev_attr_remove_port);
3454}
3455
3456static int __devinit ehea_probe_adapter(struct platform_device *dev,
3457 const struct of_device_id *id)
3458{
3459 struct ehea_adapter *adapter;
3460 const u64 *adapter_handle;
3461 int ret;
3462
3463 if (!dev || !dev->dev.of_node) {
3464 pr_err("Invalid ibmebus device probed\n");
3465 return -EINVAL;
3466 }
3467
3468 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3469 if (!adapter) {
3470 ret = -ENOMEM;
3471 dev_err(&dev->dev, "no mem for ehea_adapter\n");
3472 goto out;
3473 }
3474
3475 list_add(&adapter->list, &adapter_list);
3476
3477 adapter->ofdev = dev;
3478
3479 adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
3480 NULL);
3481 if (adapter_handle)
3482 adapter->handle = *adapter_handle;
3483
3484 if (!adapter->handle) {
3485 dev_err(&dev->dev, "failed getting handle for adapter"
3486 " '%s'\n", dev->dev.of_node->full_name);
3487 ret = -ENODEV;
3488 goto out_free_ad;
3489 }
3490
3491 adapter->pd = EHEA_PD_ID;
3492
3493 dev_set_drvdata(&dev->dev, adapter);
3494
3495
3496 /* initialize adapter and ports */
3497 /* get adapter properties */
3498 ret = ehea_sense_adapter_attr(adapter);
3499 if (ret) {
3500 dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
3501 goto out_free_ad;
3502 }
3503
3504 adapter->neq = ehea_create_eq(adapter,
3505 EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
3506 if (!adapter->neq) {
3507 ret = -EIO;
3508 dev_err(&dev->dev, "NEQ creation failed\n");
3509 goto out_free_ad;
3510 }
3511
3512 tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
3513 (unsigned long)adapter);
3514
3515 ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3516 ehea_interrupt_neq, IRQF_DISABLED,
3517 "ehea_neq", adapter);
3518 if (ret) {
3519 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3520 goto out_kill_eq;
3521 }
3522
3523 ret = ehea_create_device_sysfs(dev);
3524 if (ret)
3525 goto out_free_irq;
3526
3527 ret = ehea_setup_ports(adapter);
3528 if (ret) {
3529 dev_err(&dev->dev, "setup_ports failed\n");
3530 goto out_rem_dev_sysfs;
3531 }
3532
3533 ret = 0;
3534 goto out;
3535
3536out_rem_dev_sysfs:
3537 ehea_remove_device_sysfs(dev);
3538
3539out_free_irq:
3540 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3541
3542out_kill_eq:
3543 ehea_destroy_eq(adapter->neq);
3544
3545out_free_ad:
3546 list_del(&adapter->list);
3547 kfree(adapter);
3548
3549out:
3550 ehea_update_firmware_handles();
3551
3552 return ret;
3553}
3554
3555static int __devexit ehea_remove(struct platform_device *dev)
3556{
3557 struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev);
3558 int i;
3559
3560 for (i = 0; i < EHEA_MAX_PORTS; i++)
3561 if (adapter->port[i]) {
3562 ehea_shutdown_single_port(adapter->port[i]);
3563 adapter->port[i] = NULL;
3564 }
3565
3566 ehea_remove_device_sysfs(dev);
3567
3568 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3569 tasklet_kill(&adapter->neq_tasklet);
3570
3571 ehea_destroy_eq(adapter->neq);
3572 ehea_remove_adapter_mr(adapter);
3573 list_del(&adapter->list);
3574 kfree(adapter);
3575
3576 ehea_update_firmware_handles();
3577
3578 return 0;
3579}
3580
3581void ehea_crash_handler(void)
3582{
3583 int i;
3584
3585 if (ehea_fw_handles.arr)
3586 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3587 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3588 ehea_fw_handles.arr[i].fwh,
3589 FORCE_FREE);
3590
3591 if (ehea_bcmc_regs.arr)
3592 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3593 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3594 ehea_bcmc_regs.arr[i].port_id,
3595 ehea_bcmc_regs.arr[i].reg_type,
3596 ehea_bcmc_regs.arr[i].macaddr,
3597 0, H_DEREG_BCMC);
3598}
3599
3600static int ehea_mem_notifier(struct notifier_block *nb,
3601 unsigned long action, void *data)
3602{
3603 int ret = NOTIFY_BAD;
3604 struct memory_notify *arg = data;
3605
3606 mutex_lock(&dlpar_mem_lock);
3607
3608 switch (action) {
3609 case MEM_CANCEL_OFFLINE:
3610 pr_info("memory offlining canceled");
3611 /* Readd canceled memory block */
3612 case MEM_ONLINE:
3613 pr_info("memory is going online");
3614 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3615 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3616 goto out_unlock;
3617 ehea_rereg_mrs();
3618 break;
3619 case MEM_GOING_OFFLINE:
3620 pr_info("memory is going offline");
3621 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3622 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3623 goto out_unlock;
3624 ehea_rereg_mrs();
3625 break;
3626 default:
3627 break;
3628 }
3629
3630 ehea_update_firmware_handles();
3631 ret = NOTIFY_OK;
3632
3633out_unlock:
3634 mutex_unlock(&dlpar_mem_lock);
3635 return ret;
3636}
3637
3638static struct notifier_block ehea_mem_nb = {
3639 .notifier_call = ehea_mem_notifier,
3640};
3641
3642static int ehea_reboot_notifier(struct notifier_block *nb,
3643 unsigned long action, void *unused)
3644{
3645 if (action == SYS_RESTART) {
3646 pr_info("Reboot: freeing all eHEA resources\n");
3647 ibmebus_unregister_driver(&ehea_driver);
3648 }
3649 return NOTIFY_DONE;
3650}
3651
3652static struct notifier_block ehea_reboot_nb = {
3653 .notifier_call = ehea_reboot_notifier,
3654};
3655
3656static int check_module_parm(void)
3657{
3658 int ret = 0;
3659
3660 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3661 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3662 pr_info("Bad parameter: rq1_entries\n");
3663 ret = -EINVAL;
3664 }
3665 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3666 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3667 pr_info("Bad parameter: rq2_entries\n");
3668 ret = -EINVAL;
3669 }
3670 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3671 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3672 pr_info("Bad parameter: rq3_entries\n");
3673 ret = -EINVAL;
3674 }
3675 if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3676 (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3677 pr_info("Bad parameter: sq_entries\n");
3678 ret = -EINVAL;
3679 }
3680
3681 return ret;
3682}
3683
3684static ssize_t ehea_show_capabilities(struct device_driver *drv,
3685 char *buf)
3686{
3687 return sprintf(buf, "%d", EHEA_CAPABILITIES);
3688}
3689
3690static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
3691 ehea_show_capabilities, NULL);
3692
3693int __init ehea_module_init(void)
3694{
3695 int ret;
3696
3697 pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
3698
3699 memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3700 memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3701
3702 mutex_init(&ehea_fw_handles.lock);
3703 spin_lock_init(&ehea_bcmc_regs.lock);
3704
3705 ret = check_module_parm();
3706 if (ret)
3707 goto out;
3708
3709 ret = ehea_create_busmap();
3710 if (ret)
3711 goto out;
3712
3713 ret = register_reboot_notifier(&ehea_reboot_nb);
3714 if (ret)
3715 pr_info("failed registering reboot notifier\n");
3716
3717 ret = register_memory_notifier(&ehea_mem_nb);
3718 if (ret)
3719 pr_info("failed registering memory remove notifier\n");
3720
3721 ret = crash_shutdown_register(ehea_crash_handler);
3722 if (ret)
3723 pr_info("failed registering crash handler\n");
3724
3725 ret = ibmebus_register_driver(&ehea_driver);
3726 if (ret) {
3727 pr_err("failed registering eHEA device driver on ebus\n");
3728 goto out2;
3729 }
3730
3731 ret = driver_create_file(&ehea_driver.driver,
3732 &driver_attr_capabilities);
3733 if (ret) {
3734 pr_err("failed to register capabilities attribute, ret=%d\n",
3735 ret);
3736 goto out3;
3737 }
3738
3739 return ret;
3740
3741out3:
3742 ibmebus_unregister_driver(&ehea_driver);
3743out2:
3744 unregister_memory_notifier(&ehea_mem_nb);
3745 unregister_reboot_notifier(&ehea_reboot_nb);
3746 crash_shutdown_unregister(ehea_crash_handler);
3747out:
3748 return ret;
3749}
3750
3751static void __exit ehea_module_exit(void)
3752{
3753 int ret;
3754
3755 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3756 ibmebus_unregister_driver(&ehea_driver);
3757 unregister_reboot_notifier(&ehea_reboot_nb);
3758 ret = crash_shutdown_unregister(ehea_crash_handler);
3759 if (ret)
3760 pr_info("failed unregistering crash handler\n");
3761 unregister_memory_notifier(&ehea_mem_nb);
3762 kfree(ehea_fw_handles.arr);
3763 kfree(ehea_bcmc_regs.arr);
3764 ehea_destroy_busmap();
3765}
3766
3767module_init(ehea_module_init);
3768module_exit(ehea_module_exit);
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_phyp.c b/drivers/net/ethernet/ibm/ehea/ehea_phyp.c
new file mode 100644
index 000000000000..0506967b9044
--- /dev/null
+++ b/drivers/net/ethernet/ibm/ehea/ehea_phyp.c
@@ -0,0 +1,626 @@
1/*
2 * linux/drivers/net/ehea/ehea_phyp.c
3 *
4 * eHEA ethernet device driver for IBM eServer System p
5 *
6 * (C) Copyright IBM Corp. 2006
7 *
8 * Authors:
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
12 *
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include "ehea_phyp.h"
32
33
34static inline u16 get_order_of_qentries(u16 queue_entries)
35{
36 u8 ld = 1; /* logarithmus dualis */
37 while (((1U << ld) - 1) < queue_entries)
38 ld++;
39 return ld - 1;
40}
41
42/* Defines for H_CALL H_ALLOC_RESOURCE */
43#define H_ALL_RES_TYPE_QP 1
44#define H_ALL_RES_TYPE_CQ 2
45#define H_ALL_RES_TYPE_EQ 3
46#define H_ALL_RES_TYPE_MR 5
47#define H_ALL_RES_TYPE_MW 6
48
49static long ehea_plpar_hcall_norets(unsigned long opcode,
50 unsigned long arg1,
51 unsigned long arg2,
52 unsigned long arg3,
53 unsigned long arg4,
54 unsigned long arg5,
55 unsigned long arg6,
56 unsigned long arg7)
57{
58 long ret;
59 int i, sleep_msecs;
60
61 for (i = 0; i < 5; i++) {
62 ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
63 arg5, arg6, arg7);
64
65 if (H_IS_LONG_BUSY(ret)) {
66 sleep_msecs = get_longbusy_msecs(ret);
67 msleep_interruptible(sleep_msecs);
68 continue;
69 }
70
71 if (ret < H_SUCCESS)
72 pr_err("opcode=%lx ret=%lx"
73 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
74 " arg5=%lx arg6=%lx arg7=%lx\n",
75 opcode, ret,
76 arg1, arg2, arg3, arg4, arg5, arg6, arg7);
77
78 return ret;
79 }
80
81 return H_BUSY;
82}
83
84static long ehea_plpar_hcall9(unsigned long opcode,
85 unsigned long *outs, /* array of 9 outputs */
86 unsigned long arg1,
87 unsigned long arg2,
88 unsigned long arg3,
89 unsigned long arg4,
90 unsigned long arg5,
91 unsigned long arg6,
92 unsigned long arg7,
93 unsigned long arg8,
94 unsigned long arg9)
95{
96 long ret;
97 int i, sleep_msecs;
98 u8 cb_cat;
99
100 for (i = 0; i < 5; i++) {
101 ret = plpar_hcall9(opcode, outs,
102 arg1, arg2, arg3, arg4, arg5,
103 arg6, arg7, arg8, arg9);
104
105 if (H_IS_LONG_BUSY(ret)) {
106 sleep_msecs = get_longbusy_msecs(ret);
107 msleep_interruptible(sleep_msecs);
108 continue;
109 }
110
111 cb_cat = EHEA_BMASK_GET(H_MEHEAPORT_CAT, arg2);
112
113 if ((ret < H_SUCCESS) && !(((ret == H_AUTHORITY)
114 && (opcode == H_MODIFY_HEA_PORT))
115 && (((cb_cat == H_PORT_CB4) && ((arg3 == H_PORT_CB4_JUMBO)
116 || (arg3 == H_PORT_CB4_SPEED))) || ((cb_cat == H_PORT_CB7)
117 && (arg3 == H_PORT_CB7_DUCQPN)))))
118 pr_err("opcode=%lx ret=%lx"
119 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
120 " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
121 " arg9=%lx"
122 " out1=%lx out2=%lx out3=%lx out4=%lx"
123 " out5=%lx out6=%lx out7=%lx out8=%lx"
124 " out9=%lx\n",
125 opcode, ret,
126 arg1, arg2, arg3, arg4, arg5,
127 arg6, arg7, arg8, arg9,
128 outs[0], outs[1], outs[2], outs[3], outs[4],
129 outs[5], outs[6], outs[7], outs[8]);
130 return ret;
131 }
132
133 return H_BUSY;
134}
135
136u64 ehea_h_query_ehea_qp(const u64 adapter_handle, const u8 qp_category,
137 const u64 qp_handle, const u64 sel_mask, void *cb_addr)
138{
139 return ehea_plpar_hcall_norets(H_QUERY_HEA_QP,
140 adapter_handle, /* R4 */
141 qp_category, /* R5 */
142 qp_handle, /* R6 */
143 sel_mask, /* R7 */
144 virt_to_abs(cb_addr), /* R8 */
145 0, 0);
146}
147
148/* input param R5 */
149#define H_ALL_RES_QP_EQPO EHEA_BMASK_IBM(9, 11)
150#define H_ALL_RES_QP_QPP EHEA_BMASK_IBM(12, 12)
151#define H_ALL_RES_QP_RQR EHEA_BMASK_IBM(13, 15)
152#define H_ALL_RES_QP_EQEG EHEA_BMASK_IBM(16, 16)
153#define H_ALL_RES_QP_LL_QP EHEA_BMASK_IBM(17, 17)
154#define H_ALL_RES_QP_DMA128 EHEA_BMASK_IBM(19, 19)
155#define H_ALL_RES_QP_HSM EHEA_BMASK_IBM(20, 21)
156#define H_ALL_RES_QP_SIGT EHEA_BMASK_IBM(22, 23)
157#define H_ALL_RES_QP_TENURE EHEA_BMASK_IBM(48, 55)
158#define H_ALL_RES_QP_RES_TYP EHEA_BMASK_IBM(56, 63)
159
160/* input param R9 */
161#define H_ALL_RES_QP_TOKEN EHEA_BMASK_IBM(0, 31)
162#define H_ALL_RES_QP_PD EHEA_BMASK_IBM(32, 63)
163
164/* input param R10 */
165#define H_ALL_RES_QP_MAX_SWQE EHEA_BMASK_IBM(4, 7)
166#define H_ALL_RES_QP_MAX_R1WQE EHEA_BMASK_IBM(12, 15)
167#define H_ALL_RES_QP_MAX_R2WQE EHEA_BMASK_IBM(20, 23)
168#define H_ALL_RES_QP_MAX_R3WQE EHEA_BMASK_IBM(28, 31)
169/* Max Send Scatter Gather Elements */
170#define H_ALL_RES_QP_MAX_SSGE EHEA_BMASK_IBM(37, 39)
171#define H_ALL_RES_QP_MAX_R1SGE EHEA_BMASK_IBM(45, 47)
172/* Max Receive SG Elements RQ1 */
173#define H_ALL_RES_QP_MAX_R2SGE EHEA_BMASK_IBM(53, 55)
174#define H_ALL_RES_QP_MAX_R3SGE EHEA_BMASK_IBM(61, 63)
175
176/* input param R11 */
177#define H_ALL_RES_QP_SWQE_IDL EHEA_BMASK_IBM(0, 7)
178/* max swqe immediate data length */
179#define H_ALL_RES_QP_PORT_NUM EHEA_BMASK_IBM(48, 63)
180
181/* input param R12 */
182#define H_ALL_RES_QP_TH_RQ2 EHEA_BMASK_IBM(0, 15)
183/* Threshold RQ2 */
184#define H_ALL_RES_QP_TH_RQ3 EHEA_BMASK_IBM(16, 31)
185/* Threshold RQ3 */
186
187/* output param R6 */
188#define H_ALL_RES_QP_ACT_SWQE EHEA_BMASK_IBM(0, 15)
189#define H_ALL_RES_QP_ACT_R1WQE EHEA_BMASK_IBM(16, 31)
190#define H_ALL_RES_QP_ACT_R2WQE EHEA_BMASK_IBM(32, 47)
191#define H_ALL_RES_QP_ACT_R3WQE EHEA_BMASK_IBM(48, 63)
192
193/* output param, R7 */
194#define H_ALL_RES_QP_ACT_SSGE EHEA_BMASK_IBM(0, 7)
195#define H_ALL_RES_QP_ACT_R1SGE EHEA_BMASK_IBM(8, 15)
196#define H_ALL_RES_QP_ACT_R2SGE EHEA_BMASK_IBM(16, 23)
197#define H_ALL_RES_QP_ACT_R3SGE EHEA_BMASK_IBM(24, 31)
198#define H_ALL_RES_QP_ACT_SWQE_IDL EHEA_BMASK_IBM(32, 39)
199
200/* output param R8,R9 */
201#define H_ALL_RES_QP_SIZE_SQ EHEA_BMASK_IBM(0, 31)
202#define H_ALL_RES_QP_SIZE_RQ1 EHEA_BMASK_IBM(32, 63)
203#define H_ALL_RES_QP_SIZE_RQ2 EHEA_BMASK_IBM(0, 31)
204#define H_ALL_RES_QP_SIZE_RQ3 EHEA_BMASK_IBM(32, 63)
205
206/* output param R11,R12 */
207#define H_ALL_RES_QP_LIOBN_SQ EHEA_BMASK_IBM(0, 31)
208#define H_ALL_RES_QP_LIOBN_RQ1 EHEA_BMASK_IBM(32, 63)
209#define H_ALL_RES_QP_LIOBN_RQ2 EHEA_BMASK_IBM(0, 31)
210#define H_ALL_RES_QP_LIOBN_RQ3 EHEA_BMASK_IBM(32, 63)
211
212u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
213 struct ehea_qp_init_attr *init_attr, const u32 pd,
214 u64 *qp_handle, struct h_epas *h_epas)
215{
216 u64 hret;
217 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
218
219 u64 allocate_controls =
220 EHEA_BMASK_SET(H_ALL_RES_QP_EQPO, init_attr->low_lat_rq1 ? 1 : 0)
221 | EHEA_BMASK_SET(H_ALL_RES_QP_QPP, 0)
222 | EHEA_BMASK_SET(H_ALL_RES_QP_RQR, 6) /* rq1 & rq2 & rq3 */
223 | EHEA_BMASK_SET(H_ALL_RES_QP_EQEG, 0) /* EQE gen. disabled */
224 | EHEA_BMASK_SET(H_ALL_RES_QP_LL_QP, init_attr->low_lat_rq1)
225 | EHEA_BMASK_SET(H_ALL_RES_QP_DMA128, 0)
226 | EHEA_BMASK_SET(H_ALL_RES_QP_HSM, 0)
227 | EHEA_BMASK_SET(H_ALL_RES_QP_SIGT, init_attr->signalingtype)
228 | EHEA_BMASK_SET(H_ALL_RES_QP_RES_TYP, H_ALL_RES_TYPE_QP);
229
230 u64 r9_reg = EHEA_BMASK_SET(H_ALL_RES_QP_PD, pd)
231 | EHEA_BMASK_SET(H_ALL_RES_QP_TOKEN, init_attr->qp_token);
232
233 u64 max_r10_reg =
234 EHEA_BMASK_SET(H_ALL_RES_QP_MAX_SWQE,
235 get_order_of_qentries(init_attr->max_nr_send_wqes))
236 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R1WQE,
237 get_order_of_qentries(init_attr->max_nr_rwqes_rq1))
238 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R2WQE,
239 get_order_of_qentries(init_attr->max_nr_rwqes_rq2))
240 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R3WQE,
241 get_order_of_qentries(init_attr->max_nr_rwqes_rq3))
242 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_SSGE, init_attr->wqe_size_enc_sq)
243 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R1SGE,
244 init_attr->wqe_size_enc_rq1)
245 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R2SGE,
246 init_attr->wqe_size_enc_rq2)
247 | EHEA_BMASK_SET(H_ALL_RES_QP_MAX_R3SGE,
248 init_attr->wqe_size_enc_rq3);
249
250 u64 r11_in =
251 EHEA_BMASK_SET(H_ALL_RES_QP_SWQE_IDL, init_attr->swqe_imm_data_len)
252 | EHEA_BMASK_SET(H_ALL_RES_QP_PORT_NUM, init_attr->port_nr);
253 u64 threshold =
254 EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ2, init_attr->rq2_threshold)
255 | EHEA_BMASK_SET(H_ALL_RES_QP_TH_RQ3, init_attr->rq3_threshold);
256
257 hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
258 outs,
259 adapter_handle, /* R4 */
260 allocate_controls, /* R5 */
261 init_attr->send_cq_handle, /* R6 */
262 init_attr->recv_cq_handle, /* R7 */
263 init_attr->aff_eq_handle, /* R8 */
264 r9_reg, /* R9 */
265 max_r10_reg, /* R10 */
266 r11_in, /* R11 */
267 threshold); /* R12 */
268
269 *qp_handle = outs[0];
270 init_attr->qp_nr = (u32)outs[1];
271
272 init_attr->act_nr_send_wqes =
273 (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_SWQE, outs[2]);
274 init_attr->act_nr_rwqes_rq1 =
275 (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R1WQE, outs[2]);
276 init_attr->act_nr_rwqes_rq2 =
277 (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R2WQE, outs[2]);
278 init_attr->act_nr_rwqes_rq3 =
279 (u16)EHEA_BMASK_GET(H_ALL_RES_QP_ACT_R3WQE, outs[2]);
280
281 init_attr->act_wqe_size_enc_sq = init_attr->wqe_size_enc_sq;
282 init_attr->act_wqe_size_enc_rq1 = init_attr->wqe_size_enc_rq1;
283 init_attr->act_wqe_size_enc_rq2 = init_attr->wqe_size_enc_rq2;
284 init_attr->act_wqe_size_enc_rq3 = init_attr->wqe_size_enc_rq3;
285
286 init_attr->nr_sq_pages =
287 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_SQ, outs[4]);
288 init_attr->nr_rq1_pages =
289 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ1, outs[4]);
290 init_attr->nr_rq2_pages =
291 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ2, outs[5]);
292 init_attr->nr_rq3_pages =
293 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_SIZE_RQ3, outs[5]);
294
295 init_attr->liobn_sq =
296 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_SQ, outs[7]);
297 init_attr->liobn_rq1 =
298 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ1, outs[7]);
299 init_attr->liobn_rq2 =
300 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ2, outs[8]);
301 init_attr->liobn_rq3 =
302 (u32)EHEA_BMASK_GET(H_ALL_RES_QP_LIOBN_RQ3, outs[8]);
303
304 if (!hret)
305 hcp_epas_ctor(h_epas, outs[6], outs[6]);
306
307 return hret;
308}
309
310u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
311 struct ehea_cq_attr *cq_attr,
312 u64 *cq_handle, struct h_epas *epas)
313{
314 u64 hret;
315 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
316
317 hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
318 outs,
319 adapter_handle, /* R4 */
320 H_ALL_RES_TYPE_CQ, /* R5 */
321 cq_attr->eq_handle, /* R6 */
322 cq_attr->cq_token, /* R7 */
323 cq_attr->max_nr_of_cqes, /* R8 */
324 0, 0, 0, 0); /* R9-R12 */
325
326 *cq_handle = outs[0];
327 cq_attr->act_nr_of_cqes = outs[3];
328 cq_attr->nr_pages = outs[4];
329
330 if (!hret)
331 hcp_epas_ctor(epas, outs[5], outs[6]);
332
333 return hret;
334}
335
336/* Defines for H_CALL H_ALLOC_RESOURCE */
337#define H_ALL_RES_TYPE_QP 1
338#define H_ALL_RES_TYPE_CQ 2
339#define H_ALL_RES_TYPE_EQ 3
340#define H_ALL_RES_TYPE_MR 5
341#define H_ALL_RES_TYPE_MW 6
342
343/* input param R5 */
344#define H_ALL_RES_EQ_NEQ EHEA_BMASK_IBM(0, 0)
345#define H_ALL_RES_EQ_NON_NEQ_ISN EHEA_BMASK_IBM(6, 7)
346#define H_ALL_RES_EQ_INH_EQE_GEN EHEA_BMASK_IBM(16, 16)
347#define H_ALL_RES_EQ_RES_TYPE EHEA_BMASK_IBM(56, 63)
348/* input param R6 */
349#define H_ALL_RES_EQ_MAX_EQE EHEA_BMASK_IBM(32, 63)
350
351/* output param R6 */
352#define H_ALL_RES_EQ_LIOBN EHEA_BMASK_IBM(32, 63)
353
354/* output param R7 */
355#define H_ALL_RES_EQ_ACT_EQE EHEA_BMASK_IBM(32, 63)
356
357/* output param R8 */
358#define H_ALL_RES_EQ_ACT_PS EHEA_BMASK_IBM(32, 63)
359
360/* output param R9 */
361#define H_ALL_RES_EQ_ACT_EQ_IST_C EHEA_BMASK_IBM(30, 31)
362#define H_ALL_RES_EQ_ACT_EQ_IST_1 EHEA_BMASK_IBM(40, 63)
363
364/* output param R10 */
365#define H_ALL_RES_EQ_ACT_EQ_IST_2 EHEA_BMASK_IBM(40, 63)
366
367/* output param R11 */
368#define H_ALL_RES_EQ_ACT_EQ_IST_3 EHEA_BMASK_IBM(40, 63)
369
370/* output param R12 */
371#define H_ALL_RES_EQ_ACT_EQ_IST_4 EHEA_BMASK_IBM(40, 63)
372
373u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
374 struct ehea_eq_attr *eq_attr, u64 *eq_handle)
375{
376 u64 hret, allocate_controls;
377 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
378
379 /* resource type */
380 allocate_controls =
381 EHEA_BMASK_SET(H_ALL_RES_EQ_RES_TYPE, H_ALL_RES_TYPE_EQ)
382 | EHEA_BMASK_SET(H_ALL_RES_EQ_NEQ, eq_attr->type ? 1 : 0)
383 | EHEA_BMASK_SET(H_ALL_RES_EQ_INH_EQE_GEN, !eq_attr->eqe_gen)
384 | EHEA_BMASK_SET(H_ALL_RES_EQ_NON_NEQ_ISN, 1);
385
386 hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
387 outs,
388 adapter_handle, /* R4 */
389 allocate_controls, /* R5 */
390 eq_attr->max_nr_of_eqes, /* R6 */
391 0, 0, 0, 0, 0, 0); /* R7-R10 */
392
393 *eq_handle = outs[0];
394 eq_attr->act_nr_of_eqes = outs[3];
395 eq_attr->nr_pages = outs[4];
396 eq_attr->ist1 = outs[5];
397 eq_attr->ist2 = outs[6];
398 eq_attr->ist3 = outs[7];
399 eq_attr->ist4 = outs[8];
400
401 return hret;
402}
403
404u64 ehea_h_modify_ehea_qp(const u64 adapter_handle, const u8 cat,
405 const u64 qp_handle, const u64 sel_mask,
406 void *cb_addr, u64 *inv_attr_id, u64 *proc_mask,
407 u16 *out_swr, u16 *out_rwr)
408{
409 u64 hret;
410 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
411
412 hret = ehea_plpar_hcall9(H_MODIFY_HEA_QP,
413 outs,
414 adapter_handle, /* R4 */
415 (u64) cat, /* R5 */
416 qp_handle, /* R6 */
417 sel_mask, /* R7 */
418 virt_to_abs(cb_addr), /* R8 */
419 0, 0, 0, 0); /* R9-R12 */
420
421 *inv_attr_id = outs[0];
422 *out_swr = outs[3];
423 *out_rwr = outs[4];
424 *proc_mask = outs[5];
425
426 return hret;
427}
428
429u64 ehea_h_register_rpage(const u64 adapter_handle, const u8 pagesize,
430 const u8 queue_type, const u64 resource_handle,
431 const u64 log_pageaddr, u64 count)
432{
433 u64 reg_control;
434
435 reg_control = EHEA_BMASK_SET(H_REG_RPAGE_PAGE_SIZE, pagesize)
436 | EHEA_BMASK_SET(H_REG_RPAGE_QT, queue_type);
437
438 return ehea_plpar_hcall_norets(H_REGISTER_HEA_RPAGES,
439 adapter_handle, /* R4 */
440 reg_control, /* R5 */
441 resource_handle, /* R6 */
442 log_pageaddr, /* R7 */
443 count, /* R8 */
444 0, 0); /* R9-R10 */
445}
446
447u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
448 const u64 vaddr_in, const u32 access_ctrl, const u32 pd,
449 struct ehea_mr *mr)
450{
451 u64 hret;
452 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
453
454 hret = ehea_plpar_hcall9(H_REGISTER_SMR,
455 outs,
456 adapter_handle , /* R4 */
457 orig_mr_handle, /* R5 */
458 vaddr_in, /* R6 */
459 (((u64)access_ctrl) << 32ULL), /* R7 */
460 pd, /* R8 */
461 0, 0, 0, 0); /* R9-R12 */
462
463 mr->handle = outs[0];
464 mr->lkey = (u32)outs[2];
465
466 return hret;
467}
468
469u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle)
470{
471 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
472
473 return ehea_plpar_hcall9(H_DISABLE_AND_GET_HEA,
474 outs,
475 adapter_handle, /* R4 */
476 H_DISABLE_GET_EHEA_WQE_P, /* R5 */
477 qp_handle, /* R6 */
478 0, 0, 0, 0, 0, 0); /* R7-R12 */
479}
480
481u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle,
482 u64 force_bit)
483{
484 return ehea_plpar_hcall_norets(H_FREE_RESOURCE,
485 adapter_handle, /* R4 */
486 res_handle, /* R5 */
487 force_bit,
488 0, 0, 0, 0); /* R7-R10 */
489}
490
491u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
492 const u64 length, const u32 access_ctrl,
493 const u32 pd, u64 *mr_handle, u32 *lkey)
494{
495 u64 hret;
496 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
497
498 hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
499 outs,
500 adapter_handle, /* R4 */
501 5, /* R5 */
502 vaddr, /* R6 */
503 length, /* R7 */
504 (((u64) access_ctrl) << 32ULL), /* R8 */
505 pd, /* R9 */
506 0, 0, 0); /* R10-R12 */
507
508 *mr_handle = outs[0];
509 *lkey = (u32)outs[2];
510 return hret;
511}
512
513u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
514 const u8 pagesize, const u8 queue_type,
515 const u64 log_pageaddr, const u64 count)
516{
517 if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) {
518 pr_err("not on pageboundary\n");
519 return H_PARAMETER;
520 }
521
522 return ehea_h_register_rpage(adapter_handle, pagesize,
523 queue_type, mr_handle,
524 log_pageaddr, count);
525}
526
527u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr)
528{
529 u64 hret, cb_logaddr;
530
531 cb_logaddr = virt_to_abs(cb_addr);
532
533 hret = ehea_plpar_hcall_norets(H_QUERY_HEA,
534 adapter_handle, /* R4 */
535 cb_logaddr, /* R5 */
536 0, 0, 0, 0, 0); /* R6-R10 */
537#ifdef DEBUG
538 ehea_dump(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea");
539#endif
540 return hret;
541}
542
543u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
544 const u8 cb_cat, const u64 select_mask,
545 void *cb_addr)
546{
547 u64 port_info;
548 u64 cb_logaddr = virt_to_abs(cb_addr);
549 u64 arr_index = 0;
550
551 port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
552 | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
553
554 return ehea_plpar_hcall_norets(H_QUERY_HEA_PORT,
555 adapter_handle, /* R4 */
556 port_info, /* R5 */
557 select_mask, /* R6 */
558 arr_index, /* R7 */
559 cb_logaddr, /* R8 */
560 0, 0); /* R9-R10 */
561}
562
563u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
564 const u8 cb_cat, const u64 select_mask,
565 void *cb_addr)
566{
567 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
568 u64 port_info;
569 u64 arr_index = 0;
570 u64 cb_logaddr = virt_to_abs(cb_addr);
571
572 port_info = EHEA_BMASK_SET(H_MEHEAPORT_CAT, cb_cat)
573 | EHEA_BMASK_SET(H_MEHEAPORT_PN, port_num);
574#ifdef DEBUG
575 ehea_dump(cb_addr, sizeof(struct hcp_ehea_port_cb0), "Before HCALL");
576#endif
577 return ehea_plpar_hcall9(H_MODIFY_HEA_PORT,
578 outs,
579 adapter_handle, /* R4 */
580 port_info, /* R5 */
581 select_mask, /* R6 */
582 arr_index, /* R7 */
583 cb_logaddr, /* R8 */
584 0, 0, 0, 0); /* R9-R12 */
585}
586
587u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num,
588 const u8 reg_type, const u64 mc_mac_addr,
589 const u16 vlan_id, const u32 hcall_id)
590{
591 u64 r5_port_num, r6_reg_type, r7_mc_mac_addr, r8_vlan_id;
592 u64 mac_addr = mc_mac_addr >> 16;
593
594 r5_port_num = EHEA_BMASK_SET(H_REGBCMC_PN, port_num);
595 r6_reg_type = EHEA_BMASK_SET(H_REGBCMC_REGTYPE, reg_type);
596 r7_mc_mac_addr = EHEA_BMASK_SET(H_REGBCMC_MACADDR, mac_addr);
597 r8_vlan_id = EHEA_BMASK_SET(H_REGBCMC_VLANID, vlan_id);
598
599 return ehea_plpar_hcall_norets(hcall_id,
600 adapter_handle, /* R4 */
601 r5_port_num, /* R5 */
602 r6_reg_type, /* R6 */
603 r7_mc_mac_addr, /* R7 */
604 r8_vlan_id, /* R8 */
605 0, 0); /* R9-R12 */
606}
607
608u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle,
609 const u64 event_mask)
610{
611 return ehea_plpar_hcall_norets(H_RESET_EVENTS,
612 adapter_handle, /* R4 */
613 neq_handle, /* R5 */
614 event_mask, /* R6 */
615 0, 0, 0, 0); /* R7-R12 */
616}
617
618u64 ehea_h_error_data(const u64 adapter_handle, const u64 ressource_handle,
619 void *rblock)
620{
621 return ehea_plpar_hcall_norets(H_ERROR_DATA,
622 adapter_handle, /* R4 */
623 ressource_handle, /* R5 */
624 virt_to_abs(rblock), /* R6 */
625 0, 0, 0, 0); /* R7-R12 */
626}
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_phyp.h b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
new file mode 100644
index 000000000000..2f8174c248bc
--- /dev/null
+++ b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
@@ -0,0 +1,467 @@
1/*
2 * linux/drivers/net/ehea/ehea_phyp.h
3 *
4 * eHEA ethernet device driver for IBM eServer System p
5 *
6 * (C) Copyright IBM Corp. 2006
7 *
8 * Authors:
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
12 *
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#ifndef __EHEA_PHYP_H__
30#define __EHEA_PHYP_H__
31
32#include <linux/delay.h>
33#include <asm/hvcall.h>
34#include "ehea.h"
35#include "ehea_hw.h"
36
37/* Some abbreviations used here:
38 *
39 * hcp_* - structures, variables and functions releated to Hypervisor Calls
40 */
41
42static inline u32 get_longbusy_msecs(int long_busy_ret_code)
43{
44 switch (long_busy_ret_code) {
45 case H_LONG_BUSY_ORDER_1_MSEC:
46 return 1;
47 case H_LONG_BUSY_ORDER_10_MSEC:
48 return 10;
49 case H_LONG_BUSY_ORDER_100_MSEC:
50 return 100;
51 case H_LONG_BUSY_ORDER_1_SEC:
52 return 1000;
53 case H_LONG_BUSY_ORDER_10_SEC:
54 return 10000;
55 case H_LONG_BUSY_ORDER_100_SEC:
56 return 100000;
57 default:
58 return 1;
59 }
60}
61
62/* Number of pages which can be registered at once by H_REGISTER_HEA_RPAGES */
63#define EHEA_MAX_RPAGE 512
64
65/* Notification Event Queue (NEQ) Entry bit masks */
66#define NEQE_EVENT_CODE EHEA_BMASK_IBM(2, 7)
67#define NEQE_PORTNUM EHEA_BMASK_IBM(32, 47)
68#define NEQE_PORT_UP EHEA_BMASK_IBM(16, 16)
69#define NEQE_EXTSWITCH_PORT_UP EHEA_BMASK_IBM(17, 17)
70#define NEQE_EXTSWITCH_PRIMARY EHEA_BMASK_IBM(18, 18)
71#define NEQE_PLID EHEA_BMASK_IBM(16, 47)
72
73/* Notification Event Codes */
74#define EHEA_EC_PORTSTATE_CHG 0x30
75#define EHEA_EC_ADAPTER_MALFUNC 0x32
76#define EHEA_EC_PORT_MALFUNC 0x33
77
78/* Notification Event Log Register (NELR) bit masks */
79#define NELR_PORT_MALFUNC EHEA_BMASK_IBM(61, 61)
80#define NELR_ADAPTER_MALFUNC EHEA_BMASK_IBM(62, 62)
81#define NELR_PORTSTATE_CHG EHEA_BMASK_IBM(63, 63)
82
83static inline void hcp_epas_ctor(struct h_epas *epas, u64 paddr_kernel,
84 u64 paddr_user)
85{
86 /* To support 64k pages we must round to 64k page boundary */
87 epas->kernel.addr = ioremap((paddr_kernel & PAGE_MASK), PAGE_SIZE) +
88 (paddr_kernel & ~PAGE_MASK);
89 epas->user.addr = paddr_user;
90}
91
92static inline void hcp_epas_dtor(struct h_epas *epas)
93{
94 if (epas->kernel.addr)
95 iounmap((void __iomem *)((u64)epas->kernel.addr & PAGE_MASK));
96
97 epas->user.addr = 0;
98 epas->kernel.addr = 0;
99}
100
101struct hcp_modify_qp_cb0 {
102 u64 qp_ctl_reg; /* 00 */
103 u32 max_swqe; /* 02 */
104 u32 max_rwqe; /* 03 */
105 u32 port_nb; /* 04 */
106 u32 reserved0; /* 05 */
107 u64 qp_aer; /* 06 */
108 u64 qp_tenure; /* 08 */
109};
110
111/* Hcall Query/Modify Queue Pair Control Block 0 Selection Mask Bits */
112#define H_QPCB0_ALL EHEA_BMASK_IBM(0, 5)
113#define H_QPCB0_QP_CTL_REG EHEA_BMASK_IBM(0, 0)
114#define H_QPCB0_MAX_SWQE EHEA_BMASK_IBM(1, 1)
115#define H_QPCB0_MAX_RWQE EHEA_BMASK_IBM(2, 2)
116#define H_QPCB0_PORT_NB EHEA_BMASK_IBM(3, 3)
117#define H_QPCB0_QP_AER EHEA_BMASK_IBM(4, 4)
118#define H_QPCB0_QP_TENURE EHEA_BMASK_IBM(5, 5)
119
120/* Queue Pair Control Register Status Bits */
121#define H_QP_CR_ENABLED 0x8000000000000000ULL /* QP enabled */
122 /* QP States: */
123#define H_QP_CR_STATE_RESET 0x0000010000000000ULL /* Reset */
124#define H_QP_CR_STATE_INITIALIZED 0x0000020000000000ULL /* Initialized */
125#define H_QP_CR_STATE_RDY2RCV 0x0000030000000000ULL /* Ready to recv */
126#define H_QP_CR_STATE_RDY2SND 0x0000050000000000ULL /* Ready to send */
127#define H_QP_CR_STATE_ERROR 0x0000800000000000ULL /* Error */
128#define H_QP_CR_RES_STATE 0x0000007F00000000ULL /* Resultant state */
129
130struct hcp_modify_qp_cb1 {
131 u32 qpn; /* 00 */
132 u32 qp_asyn_ev_eq_nb; /* 01 */
133 u64 sq_cq_handle; /* 02 */
134 u64 rq_cq_handle; /* 04 */
135 /* sgel = scatter gather element */
136 u32 sgel_nb_sq; /* 06 */
137 u32 sgel_nb_rq1; /* 07 */
138 u32 sgel_nb_rq2; /* 08 */
139 u32 sgel_nb_rq3; /* 09 */
140};
141
142/* Hcall Query/Modify Queue Pair Control Block 1 Selection Mask Bits */
143#define H_QPCB1_ALL EHEA_BMASK_IBM(0, 7)
144#define H_QPCB1_QPN EHEA_BMASK_IBM(0, 0)
145#define H_QPCB1_ASYN_EV_EQ_NB EHEA_BMASK_IBM(1, 1)
146#define H_QPCB1_SQ_CQ_HANDLE EHEA_BMASK_IBM(2, 2)
147#define H_QPCB1_RQ_CQ_HANDLE EHEA_BMASK_IBM(3, 3)
148#define H_QPCB1_SGEL_NB_SQ EHEA_BMASK_IBM(4, 4)
149#define H_QPCB1_SGEL_NB_RQ1 EHEA_BMASK_IBM(5, 5)
150#define H_QPCB1_SGEL_NB_RQ2 EHEA_BMASK_IBM(6, 6)
151#define H_QPCB1_SGEL_NB_RQ3 EHEA_BMASK_IBM(7, 7)
152
153struct hcp_query_ehea {
154 u32 cur_num_qps; /* 00 */
155 u32 cur_num_cqs; /* 01 */
156 u32 cur_num_eqs; /* 02 */
157 u32 cur_num_mrs; /* 03 */
158 u32 auth_level; /* 04 */
159 u32 max_num_qps; /* 05 */
160 u32 max_num_cqs; /* 06 */
161 u32 max_num_eqs; /* 07 */
162 u32 max_num_mrs; /* 08 */
163 u32 reserved0; /* 09 */
164 u32 int_clock_freq; /* 10 */
165 u32 max_num_pds; /* 11 */
166 u32 max_num_addr_handles; /* 12 */
167 u32 max_num_cqes; /* 13 */
168 u32 max_num_wqes; /* 14 */
169 u32 max_num_sgel_rq1wqe; /* 15 */
170 u32 max_num_sgel_rq2wqe; /* 16 */
171 u32 max_num_sgel_rq3wqe; /* 17 */
172 u32 mr_page_size; /* 18 */
173 u32 reserved1; /* 19 */
174 u64 max_mr_size; /* 20 */
175 u64 reserved2; /* 22 */
176 u32 num_ports; /* 24 */
177 u32 reserved3; /* 25 */
178 u32 reserved4; /* 26 */
179 u32 reserved5; /* 27 */
180 u64 max_mc_mac; /* 28 */
181 u64 ehea_cap; /* 30 */
182 u32 max_isn_per_eq; /* 32 */
183 u32 max_num_neq; /* 33 */
184 u64 max_num_vlan_ids; /* 34 */
185 u32 max_num_port_group; /* 36 */
186 u32 max_num_phys_port; /* 37 */
187
188};
189
190/* Hcall Query/Modify Port Control Block defines */
191#define H_PORT_CB0 0
192#define H_PORT_CB1 1
193#define H_PORT_CB2 2
194#define H_PORT_CB3 3
195#define H_PORT_CB4 4
196#define H_PORT_CB5 5
197#define H_PORT_CB6 6
198#define H_PORT_CB7 7
199
200struct hcp_ehea_port_cb0 {
201 u64 port_mac_addr;
202 u64 port_rc;
203 u64 reserved0;
204 u32 port_op_state;
205 u32 port_speed;
206 u32 ext_swport_op_state;
207 u32 neg_tpf_prpf;
208 u32 num_default_qps;
209 u32 reserved1;
210 u64 default_qpn_arr[16];
211};
212
213/* Hcall Query/Modify Port Control Block 0 Selection Mask Bits */
214#define H_PORT_CB0_ALL EHEA_BMASK_IBM(0, 7) /* Set all bits */
215#define H_PORT_CB0_MAC EHEA_BMASK_IBM(0, 0) /* MAC address */
216#define H_PORT_CB0_PRC EHEA_BMASK_IBM(1, 1) /* Port Recv Control */
217#define H_PORT_CB0_DEFQPNARRAY EHEA_BMASK_IBM(7, 7) /* Default QPN Array */
218
219/* Hcall Query Port: Returned port speed values */
220#define H_SPEED_10M_H 1 /* 10 Mbps, Half Duplex */
221#define H_SPEED_10M_F 2 /* 10 Mbps, Full Duplex */
222#define H_SPEED_100M_H 3 /* 100 Mbps, Half Duplex */
223#define H_SPEED_100M_F 4 /* 100 Mbps, Full Duplex */
224#define H_SPEED_1G_F 6 /* 1 Gbps, Full Duplex */
225#define H_SPEED_10G_F 8 /* 10 Gbps, Full Duplex */
226
227/* Port Receive Control Status Bits */
228#define PXLY_RC_VALID EHEA_BMASK_IBM(49, 49)
229#define PXLY_RC_VLAN_XTRACT EHEA_BMASK_IBM(50, 50)
230#define PXLY_RC_TCP_6_TUPLE EHEA_BMASK_IBM(51, 51)
231#define PXLY_RC_UDP_6_TUPLE EHEA_BMASK_IBM(52, 52)
232#define PXLY_RC_TCP_3_TUPLE EHEA_BMASK_IBM(53, 53)
233#define PXLY_RC_TCP_2_TUPLE EHEA_BMASK_IBM(54, 54)
234#define PXLY_RC_LLC_SNAP EHEA_BMASK_IBM(55, 55)
235#define PXLY_RC_JUMBO_FRAME EHEA_BMASK_IBM(56, 56)
236#define PXLY_RC_FRAG_IP_PKT EHEA_BMASK_IBM(57, 57)
237#define PXLY_RC_TCP_UDP_CHKSUM EHEA_BMASK_IBM(58, 58)
238#define PXLY_RC_IP_CHKSUM EHEA_BMASK_IBM(59, 59)
239#define PXLY_RC_MAC_FILTER EHEA_BMASK_IBM(60, 60)
240#define PXLY_RC_UNTAG_FILTER EHEA_BMASK_IBM(61, 61)
241#define PXLY_RC_VLAN_TAG_FILTER EHEA_BMASK_IBM(62, 63)
242
243#define PXLY_RC_VLAN_FILTER 2
244#define PXLY_RC_VLAN_PERM 0
245
246
247#define H_PORT_CB1_ALL 0x8000000000000000ULL
248
249struct hcp_ehea_port_cb1 {
250 u64 vlan_filter[64];
251};
252
253#define H_PORT_CB2_ALL 0xFFE0000000000000ULL
254
255struct hcp_ehea_port_cb2 {
256 u64 rxo;
257 u64 rxucp;
258 u64 rxufd;
259 u64 rxuerr;
260 u64 rxftl;
261 u64 rxmcp;
262 u64 rxbcp;
263 u64 txo;
264 u64 txucp;
265 u64 txmcp;
266 u64 txbcp;
267};
268
269struct hcp_ehea_port_cb3 {
270 u64 vlan_bc_filter[64];
271 u64 vlan_mc_filter[64];
272 u64 vlan_un_filter[64];
273 u64 port_mac_hash_array[64];
274};
275
276#define H_PORT_CB4_ALL 0xF000000000000000ULL
277#define H_PORT_CB4_JUMBO 0x1000000000000000ULL
278#define H_PORT_CB4_SPEED 0x8000000000000000ULL
279
280struct hcp_ehea_port_cb4 {
281 u32 port_speed;
282 u32 pause_frame;
283 u32 ens_port_op_state;
284 u32 jumbo_frame;
285 u32 ens_port_wrap;
286};
287
288/* Hcall Query/Modify Port Control Block 5 Selection Mask Bits */
289#define H_PORT_CB5_RCU 0x0001000000000000ULL
290#define PXS_RCU EHEA_BMASK_IBM(61, 63)
291
292struct hcp_ehea_port_cb5 {
293 u64 prc; /* 00 */
294 u64 uaa; /* 01 */
295 u64 macvc; /* 02 */
296 u64 xpcsc; /* 03 */
297 u64 xpcsp; /* 04 */
298 u64 pcsid; /* 05 */
299 u64 xpcsst; /* 06 */
300 u64 pthlb; /* 07 */
301 u64 pthrb; /* 08 */
302 u64 pqu; /* 09 */
303 u64 pqd; /* 10 */
304 u64 prt; /* 11 */
305 u64 wsth; /* 12 */
306 u64 rcb; /* 13 */
307 u64 rcm; /* 14 */
308 u64 rcu; /* 15 */
309 u64 macc; /* 16 */
310 u64 pc; /* 17 */
311 u64 pst; /* 18 */
312 u64 ducqpn; /* 19 */
313 u64 mcqpn; /* 20 */
314 u64 mma; /* 21 */
315 u64 pmc0h; /* 22 */
316 u64 pmc0l; /* 23 */
317 u64 lbc; /* 24 */
318};
319
320#define H_PORT_CB6_ALL 0xFFFFFE7FFFFF8000ULL
321
322struct hcp_ehea_port_cb6 {
323 u64 rxo; /* 00 */
324 u64 rx64; /* 01 */
325 u64 rx65; /* 02 */
326 u64 rx128; /* 03 */
327 u64 rx256; /* 04 */
328 u64 rx512; /* 05 */
329 u64 rx1024; /* 06 */
330 u64 rxbfcs; /* 07 */
331 u64 rxime; /* 08 */
332 u64 rxrle; /* 09 */
333 u64 rxorle; /* 10 */
334 u64 rxftl; /* 11 */
335 u64 rxjab; /* 12 */
336 u64 rxse; /* 13 */
337 u64 rxce; /* 14 */
338 u64 rxrf; /* 15 */
339 u64 rxfrag; /* 16 */
340 u64 rxuoc; /* 17 */
341 u64 rxcpf; /* 18 */
342 u64 rxsb; /* 19 */
343 u64 rxfd; /* 20 */
344 u64 rxoerr; /* 21 */
345 u64 rxaln; /* 22 */
346 u64 ducqpn; /* 23 */
347 u64 reserved0; /* 24 */
348 u64 rxmcp; /* 25 */
349 u64 rxbcp; /* 26 */
350 u64 txmcp; /* 27 */
351 u64 txbcp; /* 28 */
352 u64 txo; /* 29 */
353 u64 tx64; /* 30 */
354 u64 tx65; /* 31 */
355 u64 tx128; /* 32 */
356 u64 tx256; /* 33 */
357 u64 tx512; /* 34 */
358 u64 tx1024; /* 35 */
359 u64 txbfcs; /* 36 */
360 u64 txcpf; /* 37 */
361 u64 txlf; /* 38 */
362 u64 txrf; /* 39 */
363 u64 txime; /* 40 */
364 u64 txsc; /* 41 */
365 u64 txmc; /* 42 */
366 u64 txsqe; /* 43 */
367 u64 txdef; /* 44 */
368 u64 txlcol; /* 45 */
369 u64 txexcol; /* 46 */
370 u64 txcse; /* 47 */
371 u64 txbor; /* 48 */
372};
373
374#define H_PORT_CB7_DUCQPN 0x8000000000000000ULL
375
376struct hcp_ehea_port_cb7 {
377 u64 def_uc_qpn;
378};
379
380u64 ehea_h_query_ehea_qp(const u64 adapter_handle,
381 const u8 qp_category,
382 const u64 qp_handle, const u64 sel_mask,
383 void *cb_addr);
384
385u64 ehea_h_modify_ehea_qp(const u64 adapter_handle,
386 const u8 cat,
387 const u64 qp_handle,
388 const u64 sel_mask,
389 void *cb_addr,
390 u64 *inv_attr_id,
391 u64 *proc_mask, u16 *out_swr, u16 *out_rwr);
392
393u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
394 struct ehea_eq_attr *eq_attr, u64 *eq_handle);
395
396u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
397 struct ehea_cq_attr *cq_attr,
398 u64 *cq_handle, struct h_epas *epas);
399
400u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
401 struct ehea_qp_init_attr *init_attr,
402 const u32 pd,
403 u64 *qp_handle, struct h_epas *h_epas);
404
405#define H_REG_RPAGE_PAGE_SIZE EHEA_BMASK_IBM(48, 55)
406#define H_REG_RPAGE_QT EHEA_BMASK_IBM(62, 63)
407
408u64 ehea_h_register_rpage(const u64 adapter_handle,
409 const u8 pagesize,
410 const u8 queue_type,
411 const u64 resource_handle,
412 const u64 log_pageaddr, u64 count);
413
414#define H_DISABLE_GET_EHEA_WQE_P 1
415#define H_DISABLE_GET_SQ_WQE_P 2
416#define H_DISABLE_GET_RQC 3
417
418u64 ehea_h_disable_and_get_hea(const u64 adapter_handle, const u64 qp_handle);
419
420#define FORCE_FREE 1
421#define NORMAL_FREE 0
422
423u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle,
424 u64 force_bit);
425
426u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
427 const u64 length, const u32 access_ctrl,
428 const u32 pd, u64 *mr_handle, u32 *lkey);
429
430u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
431 const u8 pagesize, const u8 queue_type,
432 const u64 log_pageaddr, const u64 count);
433
434u64 ehea_h_register_smr(const u64 adapter_handle, const u64 orig_mr_handle,
435 const u64 vaddr_in, const u32 access_ctrl, const u32 pd,
436 struct ehea_mr *mr);
437
438u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr);
439
440/* output param R5 */
441#define H_MEHEAPORT_CAT EHEA_BMASK_IBM(40, 47)
442#define H_MEHEAPORT_PN EHEA_BMASK_IBM(48, 63)
443
444u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
445 const u8 cb_cat, const u64 select_mask,
446 void *cb_addr);
447
448u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
449 const u8 cb_cat, const u64 select_mask,
450 void *cb_addr);
451
452#define H_REGBCMC_PN EHEA_BMASK_IBM(48, 63)
453#define H_REGBCMC_REGTYPE EHEA_BMASK_IBM(61, 63)
454#define H_REGBCMC_MACADDR EHEA_BMASK_IBM(16, 63)
455#define H_REGBCMC_VLANID EHEA_BMASK_IBM(52, 63)
456
457u64 ehea_h_reg_dereg_bcmc(const u64 adapter_handle, const u16 port_num,
458 const u8 reg_type, const u64 mc_mac_addr,
459 const u16 vlan_id, const u32 hcall_id);
460
461u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle,
462 const u64 event_mask);
463
464u64 ehea_h_error_data(const u64 adapter_handle, const u64 ressource_handle,
465 void *rblock);
466
467#endif /* __EHEA_PHYP_H__ */
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
new file mode 100644
index 000000000000..95b9f4fa811e
--- /dev/null
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -0,0 +1,1031 @@
1/*
2 * linux/drivers/net/ehea/ehea_qmr.c
3 *
4 * eHEA ethernet device driver for IBM eServer System p
5 *
6 * (C) Copyright IBM Corp. 2006
7 *
8 * Authors:
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
12 *
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/mm.h>
32#include <linux/slab.h>
33#include "ehea.h"
34#include "ehea_phyp.h"
35#include "ehea_qmr.h"
36
37struct ehea_bmap *ehea_bmap = NULL;
38
39
40
41static void *hw_qpageit_get_inc(struct hw_queue *queue)
42{
43 void *retvalue = hw_qeit_get(queue);
44
45 queue->current_q_offset += queue->pagesize;
46 if (queue->current_q_offset > queue->queue_length) {
47 queue->current_q_offset -= queue->pagesize;
48 retvalue = NULL;
49 } else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
50 pr_err("not on pageboundary\n");
51 retvalue = NULL;
52 }
53 return retvalue;
54}
55
56static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
57 const u32 pagesize, const u32 qe_size)
58{
59 int pages_per_kpage = PAGE_SIZE / pagesize;
60 int i, k;
61
62 if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
63 pr_err("pagesize conflict! kernel pagesize=%d, ehea pagesize=%d\n",
64 (int)PAGE_SIZE, (int)pagesize);
65 return -EINVAL;
66 }
67
68 queue->queue_length = nr_of_pages * pagesize;
69 queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
70 if (!queue->queue_pages) {
71 pr_err("no mem for queue_pages\n");
72 return -ENOMEM;
73 }
74
75 /*
76 * allocate pages for queue:
77 * outer loop allocates whole kernel pages (page aligned) and
78 * inner loop divides a kernel page into smaller hea queue pages
79 */
80 i = 0;
81 while (i < nr_of_pages) {
82 u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
83 if (!kpage)
84 goto out_nomem;
85 for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
86 (queue->queue_pages)[i] = (struct ehea_page *)kpage;
87 kpage += pagesize;
88 i++;
89 }
90 }
91
92 queue->current_q_offset = 0;
93 queue->qe_size = qe_size;
94 queue->pagesize = pagesize;
95 queue->toggle_state = 1;
96
97 return 0;
98out_nomem:
99 for (i = 0; i < nr_of_pages; i += pages_per_kpage) {
100 if (!(queue->queue_pages)[i])
101 break;
102 free_page((unsigned long)(queue->queue_pages)[i]);
103 }
104 return -ENOMEM;
105}
106
107static void hw_queue_dtor(struct hw_queue *queue)
108{
109 int pages_per_kpage = PAGE_SIZE / queue->pagesize;
110 int i, nr_pages;
111
112 if (!queue || !queue->queue_pages)
113 return;
114
115 nr_pages = queue->queue_length / queue->pagesize;
116
117 for (i = 0; i < nr_pages; i += pages_per_kpage)
118 free_page((unsigned long)(queue->queue_pages)[i]);
119
120 kfree(queue->queue_pages);
121}
122
123struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
124 int nr_of_cqe, u64 eq_handle, u32 cq_token)
125{
126 struct ehea_cq *cq;
127 struct h_epa epa;
128 u64 *cq_handle_ref, hret, rpage;
129 u32 act_nr_of_entries, act_pages, counter;
130 int ret;
131 void *vpage;
132
133 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
134 if (!cq) {
135 pr_err("no mem for cq\n");
136 goto out_nomem;
137 }
138
139 cq->attr.max_nr_of_cqes = nr_of_cqe;
140 cq->attr.cq_token = cq_token;
141 cq->attr.eq_handle = eq_handle;
142
143 cq->adapter = adapter;
144
145 cq_handle_ref = &cq->fw_handle;
146 act_nr_of_entries = 0;
147 act_pages = 0;
148
149 hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
150 &cq->fw_handle, &cq->epas);
151 if (hret != H_SUCCESS) {
152 pr_err("alloc_resource_cq failed\n");
153 goto out_freemem;
154 }
155
156 ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages,
157 EHEA_PAGESIZE, sizeof(struct ehea_cqe));
158 if (ret)
159 goto out_freeres;
160
161 for (counter = 0; counter < cq->attr.nr_pages; counter++) {
162 vpage = hw_qpageit_get_inc(&cq->hw_queue);
163 if (!vpage) {
164 pr_err("hw_qpageit_get_inc failed\n");
165 goto out_kill_hwq;
166 }
167
168 rpage = virt_to_abs(vpage);
169 hret = ehea_h_register_rpage(adapter->handle,
170 0, EHEA_CQ_REGISTER_ORIG,
171 cq->fw_handle, rpage, 1);
172 if (hret < H_SUCCESS) {
173 pr_err("register_rpage_cq failed ehea_cq=%p hret=%llx counter=%i act_pages=%i\n",
174 cq, hret, counter, cq->attr.nr_pages);
175 goto out_kill_hwq;
176 }
177
178 if (counter == (cq->attr.nr_pages - 1)) {
179 vpage = hw_qpageit_get_inc(&cq->hw_queue);
180
181 if ((hret != H_SUCCESS) || (vpage)) {
182 pr_err("registration of pages not complete hret=%llx\n",
183 hret);
184 goto out_kill_hwq;
185 }
186 } else {
187 if (hret != H_PAGE_REGISTERED) {
188 pr_err("CQ: registration of page failed hret=%llx\n",
189 hret);
190 goto out_kill_hwq;
191 }
192 }
193 }
194
195 hw_qeit_reset(&cq->hw_queue);
196 epa = cq->epas.kernel;
197 ehea_reset_cq_ep(cq);
198 ehea_reset_cq_n1(cq);
199
200 return cq;
201
202out_kill_hwq:
203 hw_queue_dtor(&cq->hw_queue);
204
205out_freeres:
206 ehea_h_free_resource(adapter->handle, cq->fw_handle, FORCE_FREE);
207
208out_freemem:
209 kfree(cq);
210
211out_nomem:
212 return NULL;
213}
214
215u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
216{
217 u64 hret;
218 u64 adapter_handle = cq->adapter->handle;
219
220 /* deregister all previous registered pages */
221 hret = ehea_h_free_resource(adapter_handle, cq->fw_handle, force);
222 if (hret != H_SUCCESS)
223 return hret;
224
225 hw_queue_dtor(&cq->hw_queue);
226 kfree(cq);
227
228 return hret;
229}
230
231int ehea_destroy_cq(struct ehea_cq *cq)
232{
233 u64 hret, aer, aerr;
234 if (!cq)
235 return 0;
236
237 hcp_epas_dtor(&cq->epas);
238 hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
239 if (hret == H_R_STATE) {
240 ehea_error_data(cq->adapter, cq->fw_handle, &aer, &aerr);
241 hret = ehea_destroy_cq_res(cq, FORCE_FREE);
242 }
243
244 if (hret != H_SUCCESS) {
245 pr_err("destroy CQ failed\n");
246 return -EIO;
247 }
248
249 return 0;
250}
251
252struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
253 const enum ehea_eq_type type,
254 const u32 max_nr_of_eqes, const u8 eqe_gen)
255{
256 int ret, i;
257 u64 hret, rpage;
258 void *vpage;
259 struct ehea_eq *eq;
260
261 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
262 if (!eq) {
263 pr_err("no mem for eq\n");
264 return NULL;
265 }
266
267 eq->adapter = adapter;
268 eq->attr.type = type;
269 eq->attr.max_nr_of_eqes = max_nr_of_eqes;
270 eq->attr.eqe_gen = eqe_gen;
271 spin_lock_init(&eq->spinlock);
272
273 hret = ehea_h_alloc_resource_eq(adapter->handle,
274 &eq->attr, &eq->fw_handle);
275 if (hret != H_SUCCESS) {
276 pr_err("alloc_resource_eq failed\n");
277 goto out_freemem;
278 }
279
280 ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
281 EHEA_PAGESIZE, sizeof(struct ehea_eqe));
282 if (ret) {
283 pr_err("can't allocate eq pages\n");
284 goto out_freeres;
285 }
286
287 for (i = 0; i < eq->attr.nr_pages; i++) {
288 vpage = hw_qpageit_get_inc(&eq->hw_queue);
289 if (!vpage) {
290 pr_err("hw_qpageit_get_inc failed\n");
291 hret = H_RESOURCE;
292 goto out_kill_hwq;
293 }
294
295 rpage = virt_to_abs(vpage);
296
297 hret = ehea_h_register_rpage(adapter->handle, 0,
298 EHEA_EQ_REGISTER_ORIG,
299 eq->fw_handle, rpage, 1);
300
301 if (i == (eq->attr.nr_pages - 1)) {
302 /* last page */
303 vpage = hw_qpageit_get_inc(&eq->hw_queue);
304 if ((hret != H_SUCCESS) || (vpage))
305 goto out_kill_hwq;
306
307 } else {
308 if (hret != H_PAGE_REGISTERED)
309 goto out_kill_hwq;
310
311 }
312 }
313
314 hw_qeit_reset(&eq->hw_queue);
315 return eq;
316
317out_kill_hwq:
318 hw_queue_dtor(&eq->hw_queue);
319
320out_freeres:
321 ehea_h_free_resource(adapter->handle, eq->fw_handle, FORCE_FREE);
322
323out_freemem:
324 kfree(eq);
325 return NULL;
326}
327
328struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
329{
330 struct ehea_eqe *eqe;
331 unsigned long flags;
332
333 spin_lock_irqsave(&eq->spinlock, flags);
334 eqe = hw_eqit_eq_get_inc_valid(&eq->hw_queue);
335 spin_unlock_irqrestore(&eq->spinlock, flags);
336
337 return eqe;
338}
339
340u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
341{
342 u64 hret;
343 unsigned long flags;
344
345 spin_lock_irqsave(&eq->spinlock, flags);
346
347 hret = ehea_h_free_resource(eq->adapter->handle, eq->fw_handle, force);
348 spin_unlock_irqrestore(&eq->spinlock, flags);
349
350 if (hret != H_SUCCESS)
351 return hret;
352
353 hw_queue_dtor(&eq->hw_queue);
354 kfree(eq);
355
356 return hret;
357}
358
359int ehea_destroy_eq(struct ehea_eq *eq)
360{
361 u64 hret, aer, aerr;
362 if (!eq)
363 return 0;
364
365 hcp_epas_dtor(&eq->epas);
366
367 hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
368 if (hret == H_R_STATE) {
369 ehea_error_data(eq->adapter, eq->fw_handle, &aer, &aerr);
370 hret = ehea_destroy_eq_res(eq, FORCE_FREE);
371 }
372
373 if (hret != H_SUCCESS) {
374 pr_err("destroy EQ failed\n");
375 return -EIO;
376 }
377
378 return 0;
379}
380
381/**
382 * allocates memory for a queue and registers pages in phyp
383 */
384int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
385 int nr_pages, int wqe_size, int act_nr_sges,
386 struct ehea_adapter *adapter, int h_call_q_selector)
387{
388 u64 hret, rpage;
389 int ret, cnt;
390 void *vpage;
391
392 ret = hw_queue_ctor(hw_queue, nr_pages, EHEA_PAGESIZE, wqe_size);
393 if (ret)
394 return ret;
395
396 for (cnt = 0; cnt < nr_pages; cnt++) {
397 vpage = hw_qpageit_get_inc(hw_queue);
398 if (!vpage) {
399 pr_err("hw_qpageit_get_inc failed\n");
400 goto out_kill_hwq;
401 }
402 rpage = virt_to_abs(vpage);
403 hret = ehea_h_register_rpage(adapter->handle,
404 0, h_call_q_selector,
405 qp->fw_handle, rpage, 1);
406 if (hret < H_SUCCESS) {
407 pr_err("register_rpage_qp failed\n");
408 goto out_kill_hwq;
409 }
410 }
411 hw_qeit_reset(hw_queue);
412 return 0;
413
414out_kill_hwq:
415 hw_queue_dtor(hw_queue);
416 return -EIO;
417}
418
419static inline u32 map_wqe_size(u8 wqe_enc_size)
420{
421 return 128 << wqe_enc_size;
422}
423
424struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
425 u32 pd, struct ehea_qp_init_attr *init_attr)
426{
427 int ret;
428 u64 hret;
429 struct ehea_qp *qp;
430 u32 wqe_size_in_bytes_sq, wqe_size_in_bytes_rq1;
431 u32 wqe_size_in_bytes_rq2, wqe_size_in_bytes_rq3;
432
433
434 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
435 if (!qp) {
436 pr_err("no mem for qp\n");
437 return NULL;
438 }
439
440 qp->adapter = adapter;
441
442 hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd,
443 &qp->fw_handle, &qp->epas);
444 if (hret != H_SUCCESS) {
445 pr_err("ehea_h_alloc_resource_qp failed\n");
446 goto out_freemem;
447 }
448
449 wqe_size_in_bytes_sq = map_wqe_size(init_attr->act_wqe_size_enc_sq);
450 wqe_size_in_bytes_rq1 = map_wqe_size(init_attr->act_wqe_size_enc_rq1);
451 wqe_size_in_bytes_rq2 = map_wqe_size(init_attr->act_wqe_size_enc_rq2);
452 wqe_size_in_bytes_rq3 = map_wqe_size(init_attr->act_wqe_size_enc_rq3);
453
454 ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages,
455 wqe_size_in_bytes_sq,
456 init_attr->act_wqe_size_enc_sq, adapter,
457 0);
458 if (ret) {
459 pr_err("can't register for sq ret=%x\n", ret);
460 goto out_freeres;
461 }
462
463 ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1,
464 init_attr->nr_rq1_pages,
465 wqe_size_in_bytes_rq1,
466 init_attr->act_wqe_size_enc_rq1,
467 adapter, 1);
468 if (ret) {
469 pr_err("can't register for rq1 ret=%x\n", ret);
470 goto out_kill_hwsq;
471 }
472
473 if (init_attr->rq_count > 1) {
474 ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2,
475 init_attr->nr_rq2_pages,
476 wqe_size_in_bytes_rq2,
477 init_attr->act_wqe_size_enc_rq2,
478 adapter, 2);
479 if (ret) {
480 pr_err("can't register for rq2 ret=%x\n", ret);
481 goto out_kill_hwr1q;
482 }
483 }
484
485 if (init_attr->rq_count > 2) {
486 ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue3,
487 init_attr->nr_rq3_pages,
488 wqe_size_in_bytes_rq3,
489 init_attr->act_wqe_size_enc_rq3,
490 adapter, 3);
491 if (ret) {
492 pr_err("can't register for rq3 ret=%x\n", ret);
493 goto out_kill_hwr2q;
494 }
495 }
496
497 qp->init_attr = *init_attr;
498
499 return qp;
500
501out_kill_hwr2q:
502 hw_queue_dtor(&qp->hw_rqueue2);
503
504out_kill_hwr1q:
505 hw_queue_dtor(&qp->hw_rqueue1);
506
507out_kill_hwsq:
508 hw_queue_dtor(&qp->hw_squeue);
509
510out_freeres:
511 ehea_h_disable_and_get_hea(adapter->handle, qp->fw_handle);
512 ehea_h_free_resource(adapter->handle, qp->fw_handle, FORCE_FREE);
513
514out_freemem:
515 kfree(qp);
516 return NULL;
517}
518
519u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
520{
521 u64 hret;
522 struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
523
524
525 ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle);
526 hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force);
527 if (hret != H_SUCCESS)
528 return hret;
529
530 hw_queue_dtor(&qp->hw_squeue);
531 hw_queue_dtor(&qp->hw_rqueue1);
532
533 if (qp_attr->rq_count > 1)
534 hw_queue_dtor(&qp->hw_rqueue2);
535 if (qp_attr->rq_count > 2)
536 hw_queue_dtor(&qp->hw_rqueue3);
537 kfree(qp);
538
539 return hret;
540}
541
542int ehea_destroy_qp(struct ehea_qp *qp)
543{
544 u64 hret, aer, aerr;
545 if (!qp)
546 return 0;
547
548 hcp_epas_dtor(&qp->epas);
549
550 hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
551 if (hret == H_R_STATE) {
552 ehea_error_data(qp->adapter, qp->fw_handle, &aer, &aerr);
553 hret = ehea_destroy_qp_res(qp, FORCE_FREE);
554 }
555
556 if (hret != H_SUCCESS) {
557 pr_err("destroy QP failed\n");
558 return -EIO;
559 }
560
561 return 0;
562}
563
564static inline int ehea_calc_index(unsigned long i, unsigned long s)
565{
566 return (i >> s) & EHEA_INDEX_MASK;
567}
568
569static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap,
570 int dir)
571{
572 if (!ehea_top_bmap->dir[dir]) {
573 ehea_top_bmap->dir[dir] =
574 kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL);
575 if (!ehea_top_bmap->dir[dir])
576 return -ENOMEM;
577 }
578 return 0;
579}
580
581static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir)
582{
583 if (!ehea_bmap->top[top]) {
584 ehea_bmap->top[top] =
585 kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL);
586 if (!ehea_bmap->top[top])
587 return -ENOMEM;
588 }
589 return ehea_init_top_bmap(ehea_bmap->top[top], dir);
590}
591
592static DEFINE_MUTEX(ehea_busmap_mutex);
593static unsigned long ehea_mr_len;
594
595#define EHEA_BUSMAP_ADD_SECT 1
596#define EHEA_BUSMAP_REM_SECT 0
597
598static void ehea_rebuild_busmap(void)
599{
600 u64 vaddr = EHEA_BUSMAP_START;
601 int top, dir, idx;
602
603 for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
604 struct ehea_top_bmap *ehea_top;
605 int valid_dir_entries = 0;
606
607 if (!ehea_bmap->top[top])
608 continue;
609 ehea_top = ehea_bmap->top[top];
610 for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
611 struct ehea_dir_bmap *ehea_dir;
612 int valid_entries = 0;
613
614 if (!ehea_top->dir[dir])
615 continue;
616 valid_dir_entries++;
617 ehea_dir = ehea_top->dir[dir];
618 for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
619 if (!ehea_dir->ent[idx])
620 continue;
621 valid_entries++;
622 ehea_dir->ent[idx] = vaddr;
623 vaddr += EHEA_SECTSIZE;
624 }
625 if (!valid_entries) {
626 ehea_top->dir[dir] = NULL;
627 kfree(ehea_dir);
628 }
629 }
630 if (!valid_dir_entries) {
631 ehea_bmap->top[top] = NULL;
632 kfree(ehea_top);
633 }
634 }
635}
636
637static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add)
638{
639 unsigned long i, start_section, end_section;
640
641 if (!nr_pages)
642 return 0;
643
644 if (!ehea_bmap) {
645 ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
646 if (!ehea_bmap)
647 return -ENOMEM;
648 }
649
650 start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
651 end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
652 /* Mark entries as valid or invalid only; address is assigned later */
653 for (i = start_section; i < end_section; i++) {
654 u64 flag;
655 int top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT);
656 int dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT);
657 int idx = i & EHEA_INDEX_MASK;
658
659 if (add) {
660 int ret = ehea_init_bmap(ehea_bmap, top, dir);
661 if (ret)
662 return ret;
663 flag = 1; /* valid */
664 ehea_mr_len += EHEA_SECTSIZE;
665 } else {
666 if (!ehea_bmap->top[top])
667 continue;
668 if (!ehea_bmap->top[top]->dir[dir])
669 continue;
670 flag = 0; /* invalid */
671 ehea_mr_len -= EHEA_SECTSIZE;
672 }
673
674 ehea_bmap->top[top]->dir[dir]->ent[idx] = flag;
675 }
676 ehea_rebuild_busmap(); /* Assign contiguous addresses for mr */
677 return 0;
678}
679
680int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages)
681{
682 int ret;
683
684 mutex_lock(&ehea_busmap_mutex);
685 ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
686 mutex_unlock(&ehea_busmap_mutex);
687 return ret;
688}
689
690int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages)
691{
692 int ret;
693
694 mutex_lock(&ehea_busmap_mutex);
695 ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT);
696 mutex_unlock(&ehea_busmap_mutex);
697 return ret;
698}
699
700static int ehea_is_hugepage(unsigned long pfn)
701{
702 int page_order;
703
704 if (pfn & EHEA_HUGEPAGE_PFN_MASK)
705 return 0;
706
707 page_order = compound_order(pfn_to_page(pfn));
708 if (page_order + PAGE_SHIFT != EHEA_HUGEPAGESHIFT)
709 return 0;
710
711 return 1;
712}
713
714static int ehea_create_busmap_callback(unsigned long initial_pfn,
715 unsigned long total_nr_pages, void *arg)
716{
717 int ret;
718 unsigned long pfn, start_pfn, end_pfn, nr_pages;
719
720 if ((total_nr_pages * PAGE_SIZE) < EHEA_HUGEPAGE_SIZE)
721 return ehea_update_busmap(initial_pfn, total_nr_pages,
722 EHEA_BUSMAP_ADD_SECT);
723
724 /* Given chunk is >= 16GB -> check for hugepages */
725 start_pfn = initial_pfn;
726 end_pfn = initial_pfn + total_nr_pages;
727 pfn = start_pfn;
728
729 while (pfn < end_pfn) {
730 if (ehea_is_hugepage(pfn)) {
731 /* Add mem found in front of the hugepage */
732 nr_pages = pfn - start_pfn;
733 ret = ehea_update_busmap(start_pfn, nr_pages,
734 EHEA_BUSMAP_ADD_SECT);
735 if (ret)
736 return ret;
737
738 /* Skip the hugepage */
739 pfn += (EHEA_HUGEPAGE_SIZE / PAGE_SIZE);
740 start_pfn = pfn;
741 } else
742 pfn += (EHEA_SECTSIZE / PAGE_SIZE);
743 }
744
745 /* Add mem found behind the hugepage(s) */
746 nr_pages = pfn - start_pfn;
747 return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
748}
749
750int ehea_create_busmap(void)
751{
752 int ret;
753
754 mutex_lock(&ehea_busmap_mutex);
755 ehea_mr_len = 0;
756 ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
757 ehea_create_busmap_callback);
758 mutex_unlock(&ehea_busmap_mutex);
759 return ret;
760}
761
762void ehea_destroy_busmap(void)
763{
764 int top, dir;
765 mutex_lock(&ehea_busmap_mutex);
766 if (!ehea_bmap)
767 goto out_destroy;
768
769 for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
770 if (!ehea_bmap->top[top])
771 continue;
772
773 for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
774 if (!ehea_bmap->top[top]->dir[dir])
775 continue;
776
777 kfree(ehea_bmap->top[top]->dir[dir]);
778 }
779
780 kfree(ehea_bmap->top[top]);
781 }
782
783 kfree(ehea_bmap);
784 ehea_bmap = NULL;
785out_destroy:
786 mutex_unlock(&ehea_busmap_mutex);
787}
788
789u64 ehea_map_vaddr(void *caddr)
790{
791 int top, dir, idx;
792 unsigned long index, offset;
793
794 if (!ehea_bmap)
795 return EHEA_INVAL_ADDR;
796
797 index = virt_to_abs(caddr) >> SECTION_SIZE_BITS;
798 top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK;
799 if (!ehea_bmap->top[top])
800 return EHEA_INVAL_ADDR;
801
802 dir = (index >> EHEA_DIR_INDEX_SHIFT) & EHEA_INDEX_MASK;
803 if (!ehea_bmap->top[top]->dir[dir])
804 return EHEA_INVAL_ADDR;
805
806 idx = index & EHEA_INDEX_MASK;
807 if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
808 return EHEA_INVAL_ADDR;
809
810 offset = (unsigned long)caddr & (EHEA_SECTSIZE - 1);
811 return ehea_bmap->top[top]->dir[dir]->ent[idx] | offset;
812}
813
814static inline void *ehea_calc_sectbase(int top, int dir, int idx)
815{
816 unsigned long ret = idx;
817 ret |= dir << EHEA_DIR_INDEX_SHIFT;
818 ret |= top << EHEA_TOP_INDEX_SHIFT;
819 return abs_to_virt(ret << SECTION_SIZE_BITS);
820}
821
822static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
823 struct ehea_adapter *adapter,
824 struct ehea_mr *mr)
825{
826 void *pg;
827 u64 j, m, hret;
828 unsigned long k = 0;
829 u64 pt_abs = virt_to_abs(pt);
830
831 void *sectbase = ehea_calc_sectbase(top, dir, idx);
832
833 for (j = 0; j < (EHEA_PAGES_PER_SECTION / EHEA_MAX_RPAGE); j++) {
834
835 for (m = 0; m < EHEA_MAX_RPAGE; m++) {
836 pg = sectbase + ((k++) * EHEA_PAGESIZE);
837 pt[m] = virt_to_abs(pg);
838 }
839 hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0,
840 0, pt_abs, EHEA_MAX_RPAGE);
841
842 if ((hret != H_SUCCESS) &&
843 (hret != H_PAGE_REGISTERED)) {
844 ehea_h_free_resource(adapter->handle, mr->handle,
845 FORCE_FREE);
846 pr_err("register_rpage_mr failed\n");
847 return hret;
848 }
849 }
850 return hret;
851}
852
853static u64 ehea_reg_mr_sections(int top, int dir, u64 *pt,
854 struct ehea_adapter *adapter,
855 struct ehea_mr *mr)
856{
857 u64 hret = H_SUCCESS;
858 int idx;
859
860 for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
861 if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
862 continue;
863
864 hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr);
865 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
866 return hret;
867 }
868 return hret;
869}
870
871static u64 ehea_reg_mr_dir_sections(int top, u64 *pt,
872 struct ehea_adapter *adapter,
873 struct ehea_mr *mr)
874{
875 u64 hret = H_SUCCESS;
876 int dir;
877
878 for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
879 if (!ehea_bmap->top[top]->dir[dir])
880 continue;
881
882 hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr);
883 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
884 return hret;
885 }
886 return hret;
887}
888
889int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
890{
891 int ret;
892 u64 *pt;
893 u64 hret;
894 u32 acc_ctrl = EHEA_MR_ACC_CTRL;
895
896 unsigned long top;
897
898 pt = (void *)get_zeroed_page(GFP_KERNEL);
899 if (!pt) {
900 pr_err("no mem\n");
901 ret = -ENOMEM;
902 goto out;
903 }
904
905 hret = ehea_h_alloc_resource_mr(adapter->handle, EHEA_BUSMAP_START,
906 ehea_mr_len, acc_ctrl, adapter->pd,
907 &mr->handle, &mr->lkey);
908
909 if (hret != H_SUCCESS) {
910 pr_err("alloc_resource_mr failed\n");
911 ret = -EIO;
912 goto out;
913 }
914
915 if (!ehea_bmap) {
916 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
917 pr_err("no busmap available\n");
918 ret = -EIO;
919 goto out;
920 }
921
922 for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
923 if (!ehea_bmap->top[top])
924 continue;
925
926 hret = ehea_reg_mr_dir_sections(top, pt, adapter, mr);
927 if((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
928 break;
929 }
930
931 if (hret != H_SUCCESS) {
932 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
933 pr_err("registering mr failed\n");
934 ret = -EIO;
935 goto out;
936 }
937
938 mr->vaddr = EHEA_BUSMAP_START;
939 mr->adapter = adapter;
940 ret = 0;
941out:
942 free_page((unsigned long)pt);
943 return ret;
944}
945
946int ehea_rem_mr(struct ehea_mr *mr)
947{
948 u64 hret;
949
950 if (!mr || !mr->adapter)
951 return -EINVAL;
952
953 hret = ehea_h_free_resource(mr->adapter->handle, mr->handle,
954 FORCE_FREE);
955 if (hret != H_SUCCESS) {
956 pr_err("destroy MR failed\n");
957 return -EIO;
958 }
959
960 return 0;
961}
962
963int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
964 struct ehea_mr *shared_mr)
965{
966 u64 hret;
967
968 hret = ehea_h_register_smr(adapter->handle, old_mr->handle,
969 old_mr->vaddr, EHEA_MR_ACC_CTRL,
970 adapter->pd, shared_mr);
971 if (hret != H_SUCCESS)
972 return -EIO;
973
974 shared_mr->adapter = adapter;
975
976 return 0;
977}
978
979void print_error_data(u64 *data)
980{
981 int length;
982 u64 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, data[2]);
983 u64 resource = data[1];
984
985 length = EHEA_BMASK_GET(ERROR_DATA_LENGTH, data[0]);
986
987 if (length > EHEA_PAGESIZE)
988 length = EHEA_PAGESIZE;
989
990 if (type == EHEA_AER_RESTYPE_QP)
991 pr_err("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, port=%llX\n",
992 resource, data[6], data[12], data[22]);
993 else if (type == EHEA_AER_RESTYPE_CQ)
994 pr_err("CQ (resource=%llX) state: AER=0x%llX\n",
995 resource, data[6]);
996 else if (type == EHEA_AER_RESTYPE_EQ)
997 pr_err("EQ (resource=%llX) state: AER=0x%llX\n",
998 resource, data[6]);
999
1000 ehea_dump(data, length, "error data");
1001}
1002
1003u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
1004 u64 *aer, u64 *aerr)
1005{
1006 unsigned long ret;
1007 u64 *rblock;
1008 u64 type = 0;
1009
1010 rblock = (void *)get_zeroed_page(GFP_KERNEL);
1011 if (!rblock) {
1012 pr_err("Cannot allocate rblock memory\n");
1013 goto out;
1014 }
1015
1016 ret = ehea_h_error_data(adapter->handle, res_handle, rblock);
1017
1018 if (ret == H_SUCCESS) {
1019 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
1020 *aer = rblock[6];
1021 *aerr = rblock[12];
1022 print_error_data(rblock);
1023 } else if (ret == H_R_STATE) {
1024 pr_err("No error data available: %llX\n", res_handle);
1025 } else
1026 pr_err("Error data could not be fetched: %llX\n", res_handle);
1027
1028 free_page((unsigned long)rblock);
1029out:
1030 return type;
1031}
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.h b/drivers/net/ethernet/ibm/ehea/ehea_qmr.h
new file mode 100644
index 000000000000..fddff8ec8cfd
--- /dev/null
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.h
@@ -0,0 +1,404 @@
1/*
2 * linux/drivers/net/ehea/ehea_qmr.h
3 *
4 * eHEA ethernet device driver for IBM eServer System p
5 *
6 * (C) Copyright IBM Corp. 2006
7 *
8 * Authors:
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
12 *
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
17 * any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */
28
29#ifndef __EHEA_QMR_H__
30#define __EHEA_QMR_H__
31
32#include <linux/prefetch.h>
33#include "ehea.h"
34#include "ehea_hw.h"
35
36/*
37 * page size of ehea hardware queues
38 */
39
40#define EHEA_PAGESHIFT 12
41#define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT)
42#define EHEA_SECTSIZE (1UL << 24)
43#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT)
44#define EHEA_HUGEPAGESHIFT 34
45#define EHEA_HUGEPAGE_SIZE (1UL << EHEA_HUGEPAGESHIFT)
46#define EHEA_HUGEPAGE_PFN_MASK ((EHEA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
47
48#if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE)
49#error eHEA module cannot work if kernel sectionsize < ehea sectionsize
50#endif
51
52/* Some abbreviations used here:
53 *
54 * WQE - Work Queue Entry
55 * SWQE - Send Work Queue Entry
56 * RWQE - Receive Work Queue Entry
57 * CQE - Completion Queue Entry
58 * EQE - Event Queue Entry
59 * MR - Memory Region
60 */
61
62/* Use of WR_ID field for EHEA */
63#define EHEA_WR_ID_COUNT EHEA_BMASK_IBM(0, 19)
64#define EHEA_WR_ID_TYPE EHEA_BMASK_IBM(20, 23)
65#define EHEA_SWQE2_TYPE 0x1
66#define EHEA_SWQE3_TYPE 0x2
67#define EHEA_RWQE2_TYPE 0x3
68#define EHEA_RWQE3_TYPE 0x4
69#define EHEA_WR_ID_INDEX EHEA_BMASK_IBM(24, 47)
70#define EHEA_WR_ID_REFILL EHEA_BMASK_IBM(48, 63)
71
72struct ehea_vsgentry {
73 u64 vaddr;
74 u32 l_key;
75 u32 len;
76};
77
78/* maximum number of sg entries allowed in a WQE */
79#define EHEA_MAX_WQE_SG_ENTRIES 252
80#define SWQE2_MAX_IMM (0xD0 - 0x30)
81#define SWQE3_MAX_IMM 224
82
83/* tx control flags for swqe */
84#define EHEA_SWQE_CRC 0x8000
85#define EHEA_SWQE_IP_CHECKSUM 0x4000
86#define EHEA_SWQE_TCP_CHECKSUM 0x2000
87#define EHEA_SWQE_TSO 0x1000
88#define EHEA_SWQE_SIGNALLED_COMPLETION 0x0800
89#define EHEA_SWQE_VLAN_INSERT 0x0400
90#define EHEA_SWQE_IMM_DATA_PRESENT 0x0200
91#define EHEA_SWQE_DESCRIPTORS_PRESENT 0x0100
92#define EHEA_SWQE_WRAP_CTL_REC 0x0080
93#define EHEA_SWQE_WRAP_CTL_FORCE 0x0040
94#define EHEA_SWQE_BIND 0x0020
95#define EHEA_SWQE_PURGE 0x0010
96
97/* sizeof(struct ehea_swqe) less the union */
98#define SWQE_HEADER_SIZE 32
99
100struct ehea_swqe {
101 u64 wr_id;
102 u16 tx_control;
103 u16 vlan_tag;
104 u8 reserved1;
105 u8 ip_start;
106 u8 ip_end;
107 u8 immediate_data_length;
108 u8 tcp_offset;
109 u8 reserved2;
110 u16 tcp_end;
111 u8 wrap_tag;
112 u8 descriptors; /* number of valid descriptors in WQE */
113 u16 reserved3;
114 u16 reserved4;
115 u16 mss;
116 u32 reserved5;
117 union {
118 /* Send WQE Format 1 */
119 struct {
120 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
121 } no_immediate_data;
122
123 /* Send WQE Format 2 */
124 struct {
125 struct ehea_vsgentry sg_entry;
126 /* 0x30 */
127 u8 immediate_data[SWQE2_MAX_IMM];
128 /* 0xd0 */
129 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1];
130 } immdata_desc __packed;
131
132 /* Send WQE Format 3 */
133 struct {
134 u8 immediate_data[SWQE3_MAX_IMM];
135 } immdata_nodesc;
136 } u;
137};
138
139struct ehea_rwqe {
140 u64 wr_id; /* work request ID */
141 u8 reserved1[5];
142 u8 data_segments;
143 u16 reserved2;
144 u64 reserved3;
145 u64 reserved4;
146 struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
147};
148
149#define EHEA_CQE_VLAN_TAG_XTRACT 0x0400
150
151#define EHEA_CQE_TYPE_RQ 0x60
152#define EHEA_CQE_STAT_ERR_MASK 0x700F
153#define EHEA_CQE_STAT_FAT_ERR_MASK 0xF
154#define EHEA_CQE_BLIND_CKSUM 0x8000
155#define EHEA_CQE_STAT_ERR_TCP 0x4000
156#define EHEA_CQE_STAT_ERR_IP 0x2000
157#define EHEA_CQE_STAT_ERR_CRC 0x1000
158
159/* Defines which bad send cqe stati lead to a port reset */
160#define EHEA_CQE_STAT_RESET_MASK 0x0002
161
162struct ehea_cqe {
163 u64 wr_id; /* work request ID from WQE */
164 u8 type;
165 u8 valid;
166 u16 status;
167 u16 reserved1;
168 u16 num_bytes_transfered;
169 u16 vlan_tag;
170 u16 inet_checksum_value;
171 u8 reserved2;
172 u8 header_length;
173 u16 reserved3;
174 u16 page_offset;
175 u16 wqe_count;
176 u32 qp_token;
177 u32 timestamp;
178 u32 reserved4;
179 u64 reserved5[3];
180};
181
182#define EHEA_EQE_VALID EHEA_BMASK_IBM(0, 0)
183#define EHEA_EQE_IS_CQE EHEA_BMASK_IBM(1, 1)
184#define EHEA_EQE_IDENTIFIER EHEA_BMASK_IBM(2, 7)
185#define EHEA_EQE_QP_CQ_NUMBER EHEA_BMASK_IBM(8, 31)
186#define EHEA_EQE_QP_TOKEN EHEA_BMASK_IBM(32, 63)
187#define EHEA_EQE_CQ_TOKEN EHEA_BMASK_IBM(32, 63)
188#define EHEA_EQE_KEY EHEA_BMASK_IBM(32, 63)
189#define EHEA_EQE_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
190#define EHEA_EQE_EQ_NUMBER EHEA_BMASK_IBM(48, 63)
191#define EHEA_EQE_SM_ID EHEA_BMASK_IBM(48, 63)
192#define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55)
193#define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
194
195#define EHEA_AER_RESTYPE_QP 0x8
196#define EHEA_AER_RESTYPE_CQ 0x4
197#define EHEA_AER_RESTYPE_EQ 0x3
198
199/* Defines which affiliated errors lead to a port reset */
200#define EHEA_AER_RESET_MASK 0xFFFFFFFFFEFFFFFFULL
201#define EHEA_AERR_RESET_MASK 0xFFFFFFFFFFFFFFFFULL
202
203struct ehea_eqe {
204 u64 entry;
205};
206
207#define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52, 63)
208#define ERROR_DATA_TYPE EHEA_BMASK_IBM(0, 7)
209
210static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
211{
212 struct ehea_page *current_page;
213
214 if (q_offset >= queue->queue_length)
215 q_offset -= queue->queue_length;
216 current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT];
217 return &current_page->entries[q_offset & (EHEA_PAGESIZE - 1)];
218}
219
220static inline void *hw_qeit_get(struct hw_queue *queue)
221{
222 return hw_qeit_calc(queue, queue->current_q_offset);
223}
224
225static inline void hw_qeit_inc(struct hw_queue *queue)
226{
227 queue->current_q_offset += queue->qe_size;
228 if (queue->current_q_offset >= queue->queue_length) {
229 queue->current_q_offset = 0;
230 /* toggle the valid flag */
231 queue->toggle_state = (~queue->toggle_state) & 1;
232 }
233}
234
235static inline void *hw_qeit_get_inc(struct hw_queue *queue)
236{
237 void *retvalue = hw_qeit_get(queue);
238 hw_qeit_inc(queue);
239 return retvalue;
240}
241
242static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue)
243{
244 struct ehea_cqe *retvalue = hw_qeit_get(queue);
245 u8 valid = retvalue->valid;
246 void *pref;
247
248 if ((valid >> 7) == (queue->toggle_state & 1)) {
249 /* this is a good one */
250 hw_qeit_inc(queue);
251 pref = hw_qeit_calc(queue, queue->current_q_offset);
252 prefetch(pref);
253 prefetch(pref + 128);
254 } else
255 retvalue = NULL;
256 return retvalue;
257}
258
259static inline void *hw_qeit_get_valid(struct hw_queue *queue)
260{
261 struct ehea_cqe *retvalue = hw_qeit_get(queue);
262 void *pref;
263 u8 valid;
264
265 pref = hw_qeit_calc(queue, queue->current_q_offset);
266 prefetch(pref);
267 prefetch(pref + 128);
268 prefetch(pref + 256);
269 valid = retvalue->valid;
270 if (!((valid >> 7) == (queue->toggle_state & 1)))
271 retvalue = NULL;
272 return retvalue;
273}
274
275static inline void *hw_qeit_reset(struct hw_queue *queue)
276{
277 queue->current_q_offset = 0;
278 return hw_qeit_get(queue);
279}
280
281static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue)
282{
283 u64 last_entry_in_q = queue->queue_length - queue->qe_size;
284 void *retvalue;
285
286 retvalue = hw_qeit_get(queue);
287 queue->current_q_offset += queue->qe_size;
288 if (queue->current_q_offset > last_entry_in_q) {
289 queue->current_q_offset = 0;
290 queue->toggle_state = (~queue->toggle_state) & 1;
291 }
292 return retvalue;
293}
294
295static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
296{
297 void *retvalue = hw_qeit_get(queue);
298 u32 qe = *(u8 *)retvalue;
299 if ((qe >> 7) == (queue->toggle_state & 1))
300 hw_qeit_eq_get_inc(queue);
301 else
302 retvalue = NULL;
303 return retvalue;
304}
305
306static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp,
307 int rq_nr)
308{
309 struct hw_queue *queue;
310
311 if (rq_nr == 1)
312 queue = &qp->hw_rqueue1;
313 else if (rq_nr == 2)
314 queue = &qp->hw_rqueue2;
315 else
316 queue = &qp->hw_rqueue3;
317
318 return hw_qeit_get_inc(queue);
319}
320
321static inline struct ehea_swqe *ehea_get_swqe(struct ehea_qp *my_qp,
322 int *wqe_index)
323{
324 struct hw_queue *queue = &my_qp->hw_squeue;
325 struct ehea_swqe *wqe_p;
326
327 *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ);
328 wqe_p = hw_qeit_get_inc(&my_qp->hw_squeue);
329
330 return wqe_p;
331}
332
333static inline void ehea_post_swqe(struct ehea_qp *my_qp, struct ehea_swqe *swqe)
334{
335 iosync();
336 ehea_update_sqa(my_qp, 1);
337}
338
339static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index)
340{
341 struct hw_queue *queue = &qp->hw_rqueue1;
342
343 *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1);
344 return hw_qeit_get_valid(queue);
345}
346
347static inline void ehea_inc_cq(struct ehea_cq *cq)
348{
349 hw_qeit_inc(&cq->hw_queue);
350}
351
352static inline void ehea_inc_rq1(struct ehea_qp *qp)
353{
354 hw_qeit_inc(&qp->hw_rqueue1);
355}
356
357static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq)
358{
359 return hw_qeit_get_valid(&my_cq->hw_queue);
360}
361
362#define EHEA_CQ_REGISTER_ORIG 0
363#define EHEA_EQ_REGISTER_ORIG 0
364
365enum ehea_eq_type {
366 EHEA_EQ = 0, /* event queue */
367 EHEA_NEQ /* notification event queue */
368};
369
370struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
371 enum ehea_eq_type type,
372 const u32 length, const u8 eqe_gen);
373
374int ehea_destroy_eq(struct ehea_eq *eq);
375
376struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq);
377
378struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe,
379 u64 eq_handle, u32 cq_token);
380
381int ehea_destroy_cq(struct ehea_cq *cq);
382
383struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, u32 pd,
384 struct ehea_qp_init_attr *init_attr);
385
386int ehea_destroy_qp(struct ehea_qp *qp);
387
388int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr);
389
390int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
391 struct ehea_mr *shared_mr);
392
393int ehea_rem_mr(struct ehea_mr *mr);
394
395u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
396 u64 *aer, u64 *aerr);
397
398int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages);
399int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);
400int ehea_create_busmap(void);
401void ehea_destroy_busmap(void);
402u64 ehea_map_vaddr(void *caddr);
403
404#endif /* __EHEA_QMR_H__ */