diff options
-rw-r--r-- | drivers/net/ehea/ehea.h | 4 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_main.c | 276 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_phyp.h | 1 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_qmr.c | 20 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_qmr.h | 4 |
5 files changed, 259 insertions, 46 deletions
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index c0cbd949e336..30220894b01f 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h | |||
@@ -40,13 +40,13 @@ | |||
40 | #include <asm/io.h> | 40 | #include <asm/io.h> |
41 | 41 | ||
42 | #define DRV_NAME "ehea" | 42 | #define DRV_NAME "ehea" |
43 | #define DRV_VERSION "EHEA_0074" | 43 | #define DRV_VERSION "EHEA_0077" |
44 | 44 | ||
45 | /* eHEA capability flags */ | 45 | /* eHEA capability flags */ |
46 | #define DLPAR_PORT_ADD_REM 1 | 46 | #define DLPAR_PORT_ADD_REM 1 |
47 | #define DLPAR_MEM_ADD 2 | 47 | #define DLPAR_MEM_ADD 2 |
48 | #define DLPAR_MEM_REM 4 | 48 | #define DLPAR_MEM_REM 4 |
49 | #define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM) | 49 | #define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM | DLPAR_MEM_ADD) |
50 | 50 | ||
51 | #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ | 51 | #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ |
52 | | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) | 52 | | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 62d6c1e5f9d3..5bc0a1530eb7 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -97,6 +97,7 @@ u64 ehea_driver_flags = 0; | |||
97 | struct workqueue_struct *ehea_driver_wq; | 97 | struct workqueue_struct *ehea_driver_wq; |
98 | struct work_struct ehea_rereg_mr_task; | 98 | struct work_struct ehea_rereg_mr_task; |
99 | 99 | ||
100 | struct semaphore dlpar_mem_lock; | ||
100 | 101 | ||
101 | static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev, | 102 | static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev, |
102 | const struct of_device_id *id); | 103 | const struct of_device_id *id); |
@@ -177,16 +178,24 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) | |||
177 | struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; | 178 | struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr; |
178 | struct net_device *dev = pr->port->netdev; | 179 | struct net_device *dev = pr->port->netdev; |
179 | int max_index_mask = pr->rq1_skba.len - 1; | 180 | int max_index_mask = pr->rq1_skba.len - 1; |
181 | int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes; | ||
182 | int adder = 0; | ||
180 | int i; | 183 | int i; |
181 | 184 | ||
182 | if (!nr_of_wqes) | 185 | pr->rq1_skba.os_skbs = 0; |
186 | |||
187 | if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { | ||
188 | pr->rq1_skba.index = index; | ||
189 | pr->rq1_skba.os_skbs = fill_wqes; | ||
183 | return; | 190 | return; |
191 | } | ||
184 | 192 | ||
185 | for (i = 0; i < nr_of_wqes; i++) { | 193 | for (i = 0; i < fill_wqes; i++) { |
186 | if (!skb_arr_rq1[index]) { | 194 | if (!skb_arr_rq1[index]) { |
187 | skb_arr_rq1[index] = netdev_alloc_skb(dev, | 195 | skb_arr_rq1[index] = netdev_alloc_skb(dev, |
188 | EHEA_L_PKT_SIZE); | 196 | EHEA_L_PKT_SIZE); |
189 | if (!skb_arr_rq1[index]) { | 197 | if (!skb_arr_rq1[index]) { |
198 | pr->rq1_skba.os_skbs = fill_wqes - i; | ||
190 | ehea_error("%s: no mem for skb/%d wqes filled", | 199 | ehea_error("%s: no mem for skb/%d wqes filled", |
191 | dev->name, i); | 200 | dev->name, i); |
192 | break; | 201 | break; |
@@ -194,9 +203,14 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) | |||
194 | } | 203 | } |
195 | index--; | 204 | index--; |
196 | index &= max_index_mask; | 205 | index &= max_index_mask; |
206 | adder++; | ||
197 | } | 207 | } |
208 | |||
209 | if (adder == 0) | ||
210 | return; | ||
211 | |||
198 | /* Ring doorbell */ | 212 | /* Ring doorbell */ |
199 | ehea_update_rq1a(pr->qp, i); | 213 | ehea_update_rq1a(pr->qp, adder); |
200 | } | 214 | } |
201 | 215 | ||
202 | static int ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) | 216 | static int ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) |
@@ -230,16 +244,21 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr, | |||
230 | struct sk_buff **skb_arr = q_skba->arr; | 244 | struct sk_buff **skb_arr = q_skba->arr; |
231 | struct ehea_rwqe *rwqe; | 245 | struct ehea_rwqe *rwqe; |
232 | int i, index, max_index_mask, fill_wqes; | 246 | int i, index, max_index_mask, fill_wqes; |
247 | int adder = 0; | ||
233 | int ret = 0; | 248 | int ret = 0; |
234 | 249 | ||
235 | fill_wqes = q_skba->os_skbs + num_wqes; | 250 | fill_wqes = q_skba->os_skbs + num_wqes; |
251 | q_skba->os_skbs = 0; | ||
236 | 252 | ||
237 | if (!fill_wqes) | 253 | if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { |
254 | q_skba->os_skbs = fill_wqes; | ||
238 | return ret; | 255 | return ret; |
256 | } | ||
239 | 257 | ||
240 | index = q_skba->index; | 258 | index = q_skba->index; |
241 | max_index_mask = q_skba->len - 1; | 259 | max_index_mask = q_skba->len - 1; |
242 | for (i = 0; i < fill_wqes; i++) { | 260 | for (i = 0; i < fill_wqes; i++) { |
261 | u64 tmp_addr; | ||
243 | struct sk_buff *skb = netdev_alloc_skb(dev, packet_size); | 262 | struct sk_buff *skb = netdev_alloc_skb(dev, packet_size); |
244 | if (!skb) { | 263 | if (!skb) { |
245 | ehea_error("%s: no mem for skb/%d wqes filled", | 264 | ehea_error("%s: no mem for skb/%d wqes filled", |
@@ -251,30 +270,37 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr, | |||
251 | skb_reserve(skb, NET_IP_ALIGN); | 270 | skb_reserve(skb, NET_IP_ALIGN); |
252 | 271 | ||
253 | skb_arr[index] = skb; | 272 | skb_arr[index] = skb; |
273 | tmp_addr = ehea_map_vaddr(skb->data); | ||
274 | if (tmp_addr == -1) { | ||
275 | dev_kfree_skb(skb); | ||
276 | q_skba->os_skbs = fill_wqes - i; | ||
277 | ret = 0; | ||
278 | break; | ||
279 | } | ||
254 | 280 | ||
255 | rwqe = ehea_get_next_rwqe(qp, rq_nr); | 281 | rwqe = ehea_get_next_rwqe(qp, rq_nr); |
256 | rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type) | 282 | rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type) |
257 | | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index); | 283 | | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index); |
258 | rwqe->sg_list[0].l_key = pr->recv_mr.lkey; | 284 | rwqe->sg_list[0].l_key = pr->recv_mr.lkey; |
259 | rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); | 285 | rwqe->sg_list[0].vaddr = tmp_addr; |
260 | rwqe->sg_list[0].len = packet_size; | 286 | rwqe->sg_list[0].len = packet_size; |
261 | rwqe->data_segments = 1; | 287 | rwqe->data_segments = 1; |
262 | 288 | ||
263 | index++; | 289 | index++; |
264 | index &= max_index_mask; | 290 | index &= max_index_mask; |
265 | 291 | adder++; | |
266 | if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) | ||
267 | goto out; | ||
268 | } | 292 | } |
269 | 293 | ||
270 | q_skba->index = index; | 294 | q_skba->index = index; |
295 | if (adder == 0) | ||
296 | goto out; | ||
271 | 297 | ||
272 | /* Ring doorbell */ | 298 | /* Ring doorbell */ |
273 | iosync(); | 299 | iosync(); |
274 | if (rq_nr == 2) | 300 | if (rq_nr == 2) |
275 | ehea_update_rq2a(pr->qp, i); | 301 | ehea_update_rq2a(pr->qp, adder); |
276 | else | 302 | else |
277 | ehea_update_rq3a(pr->qp, i); | 303 | ehea_update_rq3a(pr->qp, adder); |
278 | out: | 304 | out: |
279 | return ret; | 305 | return ret; |
280 | } | 306 | } |
@@ -1967,11 +1993,12 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1967 | ehea_dump(swqe, 512, "swqe"); | 1993 | ehea_dump(swqe, 512, "swqe"); |
1968 | } | 1994 | } |
1969 | 1995 | ||
1970 | if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) | 1996 | if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { |
1971 | goto out; | 1997 | netif_stop_queue(dev); |
1998 | swqe->tx_control |= EHEA_SWQE_PURGE; | ||
1999 | } | ||
1972 | 2000 | ||
1973 | ehea_post_swqe(pr->qp, swqe); | 2001 | ehea_post_swqe(pr->qp, swqe); |
1974 | pr->tx_packets++; | ||
1975 | 2002 | ||
1976 | if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { | 2003 | if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { |
1977 | spin_lock_irqsave(&pr->netif_queue, flags); | 2004 | spin_lock_irqsave(&pr->netif_queue, flags); |
@@ -1984,7 +2011,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1984 | } | 2011 | } |
1985 | dev->trans_start = jiffies; | 2012 | dev->trans_start = jiffies; |
1986 | spin_unlock(&pr->xmit_lock); | 2013 | spin_unlock(&pr->xmit_lock); |
1987 | out: | 2014 | |
1988 | return NETDEV_TX_OK; | 2015 | return NETDEV_TX_OK; |
1989 | } | 2016 | } |
1990 | 2017 | ||
@@ -2376,6 +2403,192 @@ static int ehea_stop(struct net_device *dev) | |||
2376 | return ret; | 2403 | return ret; |
2377 | } | 2404 | } |
2378 | 2405 | ||
2406 | void ehea_purge_sq(struct ehea_qp *orig_qp) | ||
2407 | { | ||
2408 | struct ehea_qp qp = *orig_qp; | ||
2409 | struct ehea_qp_init_attr *init_attr = &qp.init_attr; | ||
2410 | struct ehea_swqe *swqe; | ||
2411 | int wqe_index; | ||
2412 | int i; | ||
2413 | |||
2414 | for (i = 0; i < init_attr->act_nr_send_wqes; i++) { | ||
2415 | swqe = ehea_get_swqe(&qp, &wqe_index); | ||
2416 | swqe->tx_control |= EHEA_SWQE_PURGE; | ||
2417 | } | ||
2418 | } | ||
2419 | |||
2420 | int ehea_stop_qps(struct net_device *dev) | ||
2421 | { | ||
2422 | struct ehea_port *port = netdev_priv(dev); | ||
2423 | struct ehea_adapter *adapter = port->adapter; | ||
2424 | struct hcp_modify_qp_cb0* cb0; | ||
2425 | int ret = -EIO; | ||
2426 | int dret; | ||
2427 | int i; | ||
2428 | u64 hret; | ||
2429 | u64 dummy64 = 0; | ||
2430 | u16 dummy16 = 0; | ||
2431 | |||
2432 | cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); | ||
2433 | if (!cb0) { | ||
2434 | ret = -ENOMEM; | ||
2435 | goto out; | ||
2436 | } | ||
2437 | |||
2438 | for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) { | ||
2439 | struct ehea_port_res *pr = &port->port_res[i]; | ||
2440 | struct ehea_qp *qp = pr->qp; | ||
2441 | |||
2442 | /* Purge send queue */ | ||
2443 | ehea_purge_sq(qp); | ||
2444 | |||
2445 | /* Disable queue pair */ | ||
2446 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, | ||
2447 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), | ||
2448 | cb0); | ||
2449 | if (hret != H_SUCCESS) { | ||
2450 | ehea_error("query_ehea_qp failed (1)"); | ||
2451 | goto out; | ||
2452 | } | ||
2453 | |||
2454 | cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8; | ||
2455 | cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED; | ||
2456 | |||
2457 | hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, | ||
2458 | EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, | ||
2459 | 1), cb0, &dummy64, | ||
2460 | &dummy64, &dummy16, &dummy16); | ||
2461 | if (hret != H_SUCCESS) { | ||
2462 | ehea_error("modify_ehea_qp failed (1)"); | ||
2463 | goto out; | ||
2464 | } | ||
2465 | |||
2466 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, | ||
2467 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), | ||
2468 | cb0); | ||
2469 | if (hret != H_SUCCESS) { | ||
2470 | ehea_error("query_ehea_qp failed (2)"); | ||
2471 | goto out; | ||
2472 | } | ||
2473 | |||
2474 | /* deregister shared memory regions */ | ||
2475 | dret = ehea_rem_smrs(pr); | ||
2476 | if (dret) { | ||
2477 | ehea_error("unreg shared memory region failed"); | ||
2478 | goto out; | ||
2479 | } | ||
2480 | } | ||
2481 | |||
2482 | ret = 0; | ||
2483 | out: | ||
2484 | kfree(cb0); | ||
2485 | |||
2486 | return ret; | ||
2487 | } | ||
2488 | |||
2489 | void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res * pr) | ||
2490 | { | ||
2491 | struct ehea_qp qp = *orig_qp; | ||
2492 | struct ehea_qp_init_attr *init_attr = &qp.init_attr; | ||
2493 | struct ehea_rwqe *rwqe; | ||
2494 | struct sk_buff **skba_rq2 = pr->rq2_skba.arr; | ||
2495 | struct sk_buff **skba_rq3 = pr->rq3_skba.arr; | ||
2496 | struct sk_buff *skb; | ||
2497 | u32 lkey = pr->recv_mr.lkey; | ||
2498 | |||
2499 | |||
2500 | int i; | ||
2501 | int index; | ||
2502 | |||
2503 | for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) { | ||
2504 | rwqe = ehea_get_next_rwqe(&qp, 2); | ||
2505 | rwqe->sg_list[0].l_key = lkey; | ||
2506 | index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id); | ||
2507 | skb = skba_rq2[index]; | ||
2508 | if (skb) | ||
2509 | rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); | ||
2510 | } | ||
2511 | |||
2512 | for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) { | ||
2513 | rwqe = ehea_get_next_rwqe(&qp, 3); | ||
2514 | rwqe->sg_list[0].l_key = lkey; | ||
2515 | index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id); | ||
2516 | skb = skba_rq3[index]; | ||
2517 | if (skb) | ||
2518 | rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); | ||
2519 | } | ||
2520 | } | ||
2521 | |||
2522 | int ehea_restart_qps(struct net_device *dev) | ||
2523 | { | ||
2524 | struct ehea_port *port = netdev_priv(dev); | ||
2525 | struct ehea_adapter *adapter = port->adapter; | ||
2526 | int ret = 0; | ||
2527 | int i; | ||
2528 | |||
2529 | struct hcp_modify_qp_cb0* cb0; | ||
2530 | u64 hret; | ||
2531 | u64 dummy64 = 0; | ||
2532 | u16 dummy16 = 0; | ||
2533 | |||
2534 | cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL); | ||
2535 | if (!cb0) { | ||
2536 | ret = -ENOMEM; | ||
2537 | goto out; | ||
2538 | } | ||
2539 | |||
2540 | for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) { | ||
2541 | struct ehea_port_res *pr = &port->port_res[i]; | ||
2542 | struct ehea_qp *qp = pr->qp; | ||
2543 | |||
2544 | ret = ehea_gen_smrs(pr); | ||
2545 | if (ret) { | ||
2546 | ehea_error("creation of shared memory regions failed"); | ||
2547 | goto out; | ||
2548 | } | ||
2549 | |||
2550 | ehea_update_rqs(qp, pr); | ||
2551 | |||
2552 | /* Enable queue pair */ | ||
2553 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, | ||
2554 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), | ||
2555 | cb0); | ||
2556 | if (hret != H_SUCCESS) { | ||
2557 | ehea_error("query_ehea_qp failed (1)"); | ||
2558 | goto out; | ||
2559 | } | ||
2560 | |||
2561 | cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8; | ||
2562 | cb0->qp_ctl_reg |= H_QP_CR_ENABLED; | ||
2563 | |||
2564 | hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle, | ||
2565 | EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, | ||
2566 | 1), cb0, &dummy64, | ||
2567 | &dummy64, &dummy16, &dummy16); | ||
2568 | if (hret != H_SUCCESS) { | ||
2569 | ehea_error("modify_ehea_qp failed (1)"); | ||
2570 | goto out; | ||
2571 | } | ||
2572 | |||
2573 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, | ||
2574 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), | ||
2575 | cb0); | ||
2576 | if (hret != H_SUCCESS) { | ||
2577 | ehea_error("query_ehea_qp failed (2)"); | ||
2578 | goto out; | ||
2579 | } | ||
2580 | |||
2581 | /* refill entire queue */ | ||
2582 | ehea_refill_rq1(pr, pr->rq1_skba.index, 0); | ||
2583 | ehea_refill_rq2(pr, 0); | ||
2584 | ehea_refill_rq3(pr, 0); | ||
2585 | } | ||
2586 | out: | ||
2587 | kfree(cb0); | ||
2588 | |||
2589 | return ret; | ||
2590 | } | ||
2591 | |||
2379 | static void ehea_reset_port(struct work_struct *work) | 2592 | static void ehea_reset_port(struct work_struct *work) |
2380 | { | 2593 | { |
2381 | int ret; | 2594 | int ret; |
@@ -2395,6 +2608,8 @@ static void ehea_reset_port(struct work_struct *work) | |||
2395 | if (ret) | 2608 | if (ret) |
2396 | goto out; | 2609 | goto out; |
2397 | 2610 | ||
2611 | ehea_set_multicast_list(dev); | ||
2612 | |||
2398 | if (netif_msg_timer(port)) | 2613 | if (netif_msg_timer(port)) |
2399 | ehea_info("Device %s resetted successfully", dev->name); | 2614 | ehea_info("Device %s resetted successfully", dev->name); |
2400 | 2615 | ||
@@ -2411,6 +2626,7 @@ static void ehea_rereg_mrs(struct work_struct *work) | |||
2411 | int ret, i; | 2626 | int ret, i; |
2412 | struct ehea_adapter *adapter; | 2627 | struct ehea_adapter *adapter; |
2413 | 2628 | ||
2629 | down(&dlpar_mem_lock); | ||
2414 | ehea_info("LPAR memory enlarged - re-initializing driver"); | 2630 | ehea_info("LPAR memory enlarged - re-initializing driver"); |
2415 | 2631 | ||
2416 | list_for_each_entry(adapter, &adapter_list, list) | 2632 | list_for_each_entry(adapter, &adapter_list, list) |
@@ -2423,14 +2639,14 @@ static void ehea_rereg_mrs(struct work_struct *work) | |||
2423 | struct net_device *dev = port->netdev; | 2639 | struct net_device *dev = port->netdev; |
2424 | 2640 | ||
2425 | if (dev->flags & IFF_UP) { | 2641 | if (dev->flags & IFF_UP) { |
2426 | ehea_info("stopping %s", | ||
2427 | dev->name); | ||
2428 | down(&port->port_lock); | 2642 | down(&port->port_lock); |
2429 | netif_stop_queue(dev); | 2643 | netif_stop_queue(dev); |
2430 | 2644 | ret = ehea_stop_qps(dev); | |
2645 | if (ret) { | ||
2646 | up(&port->port_lock); | ||
2647 | goto out; | ||
2648 | } | ||
2431 | port_napi_disable(port); | 2649 | port_napi_disable(port); |
2432 | |||
2433 | ehea_down(dev); | ||
2434 | up(&port->port_lock); | 2650 | up(&port->port_lock); |
2435 | } | 2651 | } |
2436 | } | 2652 | } |
@@ -2446,10 +2662,11 @@ static void ehea_rereg_mrs(struct work_struct *work) | |||
2446 | } | 2662 | } |
2447 | 2663 | ||
2448 | ehea_destroy_busmap(); | 2664 | ehea_destroy_busmap(); |
2449 | |||
2450 | ret = ehea_create_busmap(); | 2665 | ret = ehea_create_busmap(); |
2451 | if (ret) | 2666 | if (ret) { |
2667 | ehea_error("creating ehea busmap failed"); | ||
2452 | goto out; | 2668 | goto out; |
2669 | } | ||
2453 | 2670 | ||
2454 | clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags); | 2671 | clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags); |
2455 | 2672 | ||
@@ -2471,21 +2688,18 @@ static void ehea_rereg_mrs(struct work_struct *work) | |||
2471 | struct net_device *dev = port->netdev; | 2688 | struct net_device *dev = port->netdev; |
2472 | 2689 | ||
2473 | if (dev->flags & IFF_UP) { | 2690 | if (dev->flags & IFF_UP) { |
2474 | ehea_info("restarting %s", | ||
2475 | dev->name); | ||
2476 | down(&port->port_lock); | 2691 | down(&port->port_lock); |
2477 | 2692 | port_napi_enable(port); | |
2478 | ret = ehea_up(dev); | 2693 | ret = ehea_restart_qps(dev); |
2479 | if (!ret) { | 2694 | if (!ret) |
2480 | port_napi_enable(port); | ||
2481 | netif_wake_queue(dev); | 2695 | netif_wake_queue(dev); |
2482 | } | ||
2483 | |||
2484 | up(&port->port_lock); | 2696 | up(&port->port_lock); |
2485 | } | 2697 | } |
2486 | } | 2698 | } |
2487 | } | 2699 | } |
2488 | } | 2700 | } |
2701 | up(&dlpar_mem_lock); | ||
2702 | ehea_info("re-initializing driver complete"); | ||
2489 | out: | 2703 | out: |
2490 | return; | 2704 | return; |
2491 | } | 2705 | } |
@@ -2494,7 +2708,8 @@ static void ehea_tx_watchdog(struct net_device *dev) | |||
2494 | { | 2708 | { |
2495 | struct ehea_port *port = netdev_priv(dev); | 2709 | struct ehea_port *port = netdev_priv(dev); |
2496 | 2710 | ||
2497 | if (netif_carrier_ok(dev)) | 2711 | if (netif_carrier_ok(dev) && |
2712 | !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags)) | ||
2498 | queue_work(port->adapter->ehea_wq, &port->reset_task); | 2713 | queue_work(port->adapter->ehea_wq, &port->reset_task); |
2499 | } | 2714 | } |
2500 | 2715 | ||
@@ -3139,6 +3354,7 @@ int __init ehea_module_init(void) | |||
3139 | ehea_driver_wq = create_workqueue("ehea_driver_wq"); | 3354 | ehea_driver_wq = create_workqueue("ehea_driver_wq"); |
3140 | 3355 | ||
3141 | INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs); | 3356 | INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs); |
3357 | sema_init(&dlpar_mem_lock, 1); | ||
3142 | 3358 | ||
3143 | ret = check_module_parm(); | 3359 | ret = check_module_parm(); |
3144 | if (ret) | 3360 | if (ret) |
diff --git a/drivers/net/ehea/ehea_phyp.h b/drivers/net/ehea/ehea_phyp.h index 89b63531ff26..faa191d23b86 100644 --- a/drivers/net/ehea/ehea_phyp.h +++ b/drivers/net/ehea/ehea_phyp.h | |||
@@ -126,6 +126,7 @@ struct hcp_modify_qp_cb0 { | |||
126 | #define H_QP_CR_STATE_RDY2RCV 0x0000030000000000ULL /* Ready to recv */ | 126 | #define H_QP_CR_STATE_RDY2RCV 0x0000030000000000ULL /* Ready to recv */ |
127 | #define H_QP_CR_STATE_RDY2SND 0x0000050000000000ULL /* Ready to send */ | 127 | #define H_QP_CR_STATE_RDY2SND 0x0000050000000000ULL /* Ready to send */ |
128 | #define H_QP_CR_STATE_ERROR 0x0000800000000000ULL /* Error */ | 128 | #define H_QP_CR_STATE_ERROR 0x0000800000000000ULL /* Error */ |
129 | #define H_QP_CR_RES_STATE 0x0000007F00000000ULL /* Resultant state */ | ||
129 | 130 | ||
130 | struct hcp_modify_qp_cb1 { | 131 | struct hcp_modify_qp_cb1 { |
131 | u32 qpn; /* 00 */ | 132 | u32 qpn; /* 00 */ |
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c index c82e24596074..329a25248d75 100644 --- a/drivers/net/ehea/ehea_qmr.c +++ b/drivers/net/ehea/ehea_qmr.c | |||
@@ -563,8 +563,7 @@ int ehea_destroy_qp(struct ehea_qp *qp) | |||
563 | int ehea_create_busmap( void ) | 563 | int ehea_create_busmap( void ) |
564 | { | 564 | { |
565 | u64 vaddr = EHEA_BUSMAP_START; | 565 | u64 vaddr = EHEA_BUSMAP_START; |
566 | unsigned long abs_max_pfn = 0; | 566 | unsigned long high_section_index = 0; |
567 | unsigned long sec_max_pfn; | ||
568 | int i; | 567 | int i; |
569 | 568 | ||
570 | /* | 569 | /* |
@@ -574,14 +573,10 @@ int ehea_create_busmap( void ) | |||
574 | ehea_bmap.valid_sections = 0; | 573 | ehea_bmap.valid_sections = 0; |
575 | 574 | ||
576 | for (i = 0; i < NR_MEM_SECTIONS; i++) | 575 | for (i = 0; i < NR_MEM_SECTIONS; i++) |
577 | if (valid_section_nr(i)) { | 576 | if (valid_section_nr(i)) |
578 | sec_max_pfn = section_nr_to_pfn(i); | 577 | high_section_index = i; |
579 | if (sec_max_pfn > abs_max_pfn) | ||
580 | abs_max_pfn = sec_max_pfn; | ||
581 | ehea_bmap.valid_sections++; | ||
582 | } | ||
583 | 578 | ||
584 | ehea_bmap.entries = abs_max_pfn / EHEA_PAGES_PER_SECTION + 1; | 579 | ehea_bmap.entries = high_section_index + 1; |
585 | ehea_bmap.vaddr = vmalloc(ehea_bmap.entries * sizeof(*ehea_bmap.vaddr)); | 580 | ehea_bmap.vaddr = vmalloc(ehea_bmap.entries * sizeof(*ehea_bmap.vaddr)); |
586 | 581 | ||
587 | if (!ehea_bmap.vaddr) | 582 | if (!ehea_bmap.vaddr) |
@@ -593,6 +588,7 @@ int ehea_create_busmap( void ) | |||
593 | if (pfn_valid(pfn)) { | 588 | if (pfn_valid(pfn)) { |
594 | ehea_bmap.vaddr[i] = vaddr; | 589 | ehea_bmap.vaddr[i] = vaddr; |
595 | vaddr += EHEA_SECTSIZE; | 590 | vaddr += EHEA_SECTSIZE; |
591 | ehea_bmap.valid_sections++; | ||
596 | } else | 592 | } else |
597 | ehea_bmap.vaddr[i] = 0; | 593 | ehea_bmap.vaddr[i] = 0; |
598 | } | 594 | } |
@@ -637,7 +633,7 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) | |||
637 | 633 | ||
638 | mr_len = ehea_bmap.valid_sections * EHEA_SECTSIZE; | 634 | mr_len = ehea_bmap.valid_sections * EHEA_SECTSIZE; |
639 | 635 | ||
640 | pt = kzalloc(EHEA_MAX_RPAGE * sizeof(u64), GFP_KERNEL); | 636 | pt = kzalloc(PAGE_SIZE, GFP_KERNEL); |
641 | if (!pt) { | 637 | if (!pt) { |
642 | ehea_error("no mem"); | 638 | ehea_error("no mem"); |
643 | ret = -ENOMEM; | 639 | ret = -ENOMEM; |
@@ -660,8 +656,8 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) | |||
660 | void *sectbase = __va(i << SECTION_SIZE_BITS); | 656 | void *sectbase = __va(i << SECTION_SIZE_BITS); |
661 | unsigned long k = 0; | 657 | unsigned long k = 0; |
662 | 658 | ||
663 | for (j = 0; j < (PAGES_PER_SECTION / EHEA_MAX_RPAGE); | 659 | for (j = 0; j < (EHEA_PAGES_PER_SECTION / |
664 | j++) { | 660 | EHEA_MAX_RPAGE); j++) { |
665 | 661 | ||
666 | for (m = 0; m < EHEA_MAX_RPAGE; m++) { | 662 | for (m = 0; m < EHEA_MAX_RPAGE; m++) { |
667 | pg = sectbase + ((k++) * EHEA_PAGESIZE); | 663 | pg = sectbase + ((k++) * EHEA_PAGESIZE); |
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h index b71f8452a5e3..562de0ebdd85 100644 --- a/drivers/net/ehea/ehea_qmr.h +++ b/drivers/net/ehea/ehea_qmr.h | |||
@@ -39,7 +39,7 @@ | |||
39 | #define EHEA_PAGESHIFT 12 | 39 | #define EHEA_PAGESHIFT 12 |
40 | #define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT) | 40 | #define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT) |
41 | #define EHEA_SECTSIZE (1UL << 24) | 41 | #define EHEA_SECTSIZE (1UL << 24) |
42 | #define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> PAGE_SHIFT) | 42 | #define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT) |
43 | 43 | ||
44 | #if (1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE | 44 | #if (1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE |
45 | #error eHEA module can't work if kernel sectionsize < ehea sectionsize | 45 | #error eHEA module can't work if kernel sectionsize < ehea sectionsize |
@@ -145,7 +145,7 @@ struct ehea_rwqe { | |||
145 | #define EHEA_CQE_VLAN_TAG_XTRACT 0x0400 | 145 | #define EHEA_CQE_VLAN_TAG_XTRACT 0x0400 |
146 | 146 | ||
147 | #define EHEA_CQE_TYPE_RQ 0x60 | 147 | #define EHEA_CQE_TYPE_RQ 0x60 |
148 | #define EHEA_CQE_STAT_ERR_MASK 0x721F | 148 | #define EHEA_CQE_STAT_ERR_MASK 0x720F |
149 | #define EHEA_CQE_STAT_FAT_ERR_MASK 0x1F | 149 | #define EHEA_CQE_STAT_FAT_ERR_MASK 0x1F |
150 | #define EHEA_CQE_STAT_ERR_TCP 0x4000 | 150 | #define EHEA_CQE_STAT_ERR_TCP 0x4000 |
151 | #define EHEA_CQE_STAT_ERR_IP 0x2000 | 151 | #define EHEA_CQE_STAT_ERR_IP 0x2000 |