aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ehea/ehea.h23
-rw-r--r--drivers/net/ehea/ehea_main.c144
-rw-r--r--drivers/net/ehea/ehea_phyp.h3
-rw-r--r--drivers/net/ehea/ehea_qmr.c156
-rw-r--r--drivers/net/ehea/ehea_qmr.h14
5 files changed, 275 insertions, 65 deletions
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index f03f070451de..6628fa622e2c 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -39,13 +39,13 @@
39#include <asm/io.h> 39#include <asm/io.h>
40 40
41#define DRV_NAME "ehea" 41#define DRV_NAME "ehea"
42#define DRV_VERSION "EHEA_0067" 42#define DRV_VERSION "EHEA_0070"
43 43
44/* EHEA capability flags */ 44/* eHEA capability flags */
45#define DLPAR_PORT_ADD_REM 1 45#define DLPAR_PORT_ADD_REM 1
46#define DLPAR_MEM_ADD 2 46#define DLPAR_MEM_ADD 2
47#define DLPAR_MEM_REM 4 47#define DLPAR_MEM_REM 4
48#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM) 48#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM)
49 49
50#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ 50#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
51 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) 51 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
@@ -113,6 +113,8 @@
113/* Memory Regions */ 113/* Memory Regions */
114#define EHEA_MR_ACC_CTRL 0x00800000 114#define EHEA_MR_ACC_CTRL 0x00800000
115 115
116#define EHEA_BUSMAP_START 0x8000000000000000ULL
117
116#define EHEA_WATCH_DOG_TIMEOUT 10*HZ 118#define EHEA_WATCH_DOG_TIMEOUT 10*HZ
117 119
118/* utility functions */ 120/* utility functions */
@@ -186,6 +188,12 @@ struct h_epas {
186 set to 0 if unused */ 188 set to 0 if unused */
187}; 189};
188 190
191struct ehea_busmap {
192 unsigned int entries; /* total number of entries */
193 unsigned int valid_sections; /* number of valid sections */
194 u64 *vaddr;
195};
196
189struct ehea_qp; 197struct ehea_qp;
190struct ehea_cq; 198struct ehea_cq;
191struct ehea_eq; 199struct ehea_eq;
@@ -382,6 +390,8 @@ struct ehea_adapter {
382 struct ehea_mr mr; 390 struct ehea_mr mr;
383 u32 pd; /* protection domain */ 391 u32 pd; /* protection domain */
384 u64 max_mc_mac; /* max number of multicast mac addresses */ 392 u64 max_mc_mac; /* max number of multicast mac addresses */
393 int active_ports;
394 struct list_head list;
385}; 395};
386 396
387 397
@@ -431,6 +441,9 @@ struct port_res_cfg {
431 int max_entries_rq3; 441 int max_entries_rq3;
432}; 442};
433 443
444enum ehea_flag_bits {
445 __EHEA_STOP_XFER
446};
434 447
435void ehea_set_ethtool_ops(struct net_device *netdev); 448void ehea_set_ethtool_ops(struct net_device *netdev);
436int ehea_sense_port_attr(struct ehea_port *port); 449int ehea_sense_port_attr(struct ehea_port *port);
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 383144db4d18..1d1571cf322e 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -79,6 +79,11 @@ MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
79MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 1 "); 79MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 1 ");
80 80
81static int port_name_cnt = 0; 81static int port_name_cnt = 0;
82static LIST_HEAD(adapter_list);
83u64 ehea_driver_flags = 0;
84struct workqueue_struct *ehea_driver_wq;
85struct work_struct ehea_rereg_mr_task;
86
82 87
83static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev, 88static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev,
84 const struct of_device_id *id); 89 const struct of_device_id *id);
@@ -238,13 +243,17 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
238 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type) 243 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
239 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index); 244 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
240 rwqe->sg_list[0].l_key = pr->recv_mr.lkey; 245 rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
241 rwqe->sg_list[0].vaddr = (u64)skb->data; 246 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
242 rwqe->sg_list[0].len = packet_size; 247 rwqe->sg_list[0].len = packet_size;
243 rwqe->data_segments = 1; 248 rwqe->data_segments = 1;
244 249
245 index++; 250 index++;
246 index &= max_index_mask; 251 index &= max_index_mask;
252
253 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags)))
254 goto out;
247 } 255 }
256
248 q_skba->index = index; 257 q_skba->index = index;
249 258
250 /* Ring doorbell */ 259 /* Ring doorbell */
@@ -253,7 +262,7 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
253 ehea_update_rq2a(pr->qp, i); 262 ehea_update_rq2a(pr->qp, i);
254 else 263 else
255 ehea_update_rq3a(pr->qp, i); 264 ehea_update_rq3a(pr->qp, i);
256 265out:
257 return ret; 266 return ret;
258} 267}
259 268
@@ -1321,7 +1330,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
1321 sg1entry->len = skb_data_size - headersize; 1330 sg1entry->len = skb_data_size - headersize;
1322 1331
1323 tmp_addr = (u64)(skb->data + headersize); 1332 tmp_addr = (u64)(skb->data + headersize);
1324 sg1entry->vaddr = tmp_addr; 1333 sg1entry->vaddr = ehea_map_vaddr(tmp_addr);
1325 swqe->descriptors++; 1334 swqe->descriptors++;
1326 } 1335 }
1327 } else 1336 } else
@@ -1352,7 +1361,7 @@ static void write_swqe2_nonTSO(struct sk_buff *skb,
1352 sg1entry->l_key = lkey; 1361 sg1entry->l_key = lkey;
1353 sg1entry->len = skb_data_size - SWQE2_MAX_IMM; 1362 sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
1354 tmp_addr = (u64)(skb->data + SWQE2_MAX_IMM); 1363 tmp_addr = (u64)(skb->data + SWQE2_MAX_IMM);
1355 sg1entry->vaddr = tmp_addr; 1364 sg1entry->vaddr = ehea_map_vaddr(tmp_addr);
1356 swqe->descriptors++; 1365 swqe->descriptors++;
1357 } 1366 }
1358 } else { 1367 } else {
@@ -1391,7 +1400,7 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1391 sg1entry->len = frag->size; 1400 sg1entry->len = frag->size;
1392 tmp_addr = (u64)(page_address(frag->page) 1401 tmp_addr = (u64)(page_address(frag->page)
1393 + frag->page_offset); 1402 + frag->page_offset);
1394 sg1entry->vaddr = tmp_addr; 1403 sg1entry->vaddr = ehea_map_vaddr(tmp_addr);
1395 swqe->descriptors++; 1404 swqe->descriptors++;
1396 sg1entry_contains_frag_data = 1; 1405 sg1entry_contains_frag_data = 1;
1397 } 1406 }
@@ -1406,7 +1415,7 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1406 1415
1407 tmp_addr = (u64)(page_address(frag->page) 1416 tmp_addr = (u64)(page_address(frag->page)
1408 + frag->page_offset); 1417 + frag->page_offset);
1409 sgentry->vaddr = tmp_addr; 1418 sgentry->vaddr = ehea_map_vaddr(tmp_addr);
1410 swqe->descriptors++; 1419 swqe->descriptors++;
1411 } 1420 }
1412 } 1421 }
@@ -1878,6 +1887,9 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
1878 ehea_dump(swqe, 512, "swqe"); 1887 ehea_dump(swqe, 512, "swqe");
1879 } 1888 }
1880 1889
1890 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags)))
1891 goto out;
1892
1881 ehea_post_swqe(pr->qp, swqe); 1893 ehea_post_swqe(pr->qp, swqe);
1882 pr->tx_packets++; 1894 pr->tx_packets++;
1883 1895
@@ -1892,7 +1904,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
1892 } 1904 }
1893 dev->trans_start = jiffies; 1905 dev->trans_start = jiffies;
1894 spin_unlock(&pr->xmit_lock); 1906 spin_unlock(&pr->xmit_lock);
1895 1907out:
1896 return NETDEV_TX_OK; 1908 return NETDEV_TX_OK;
1897} 1909}
1898 1910
@@ -2220,6 +2232,9 @@ out_dereg_bc:
2220out_clean_pr: 2232out_clean_pr:
2221 ehea_clean_all_portres(port); 2233 ehea_clean_all_portres(port);
2222out: 2234out:
2235 if (ret)
2236 ehea_info("Failed starting %s. ret=%i", dev->name, ret);
2237
2223 return ret; 2238 return ret;
2224} 2239}
2225 2240
@@ -2259,8 +2274,13 @@ static int ehea_down(struct net_device *dev)
2259 msleep(1); 2274 msleep(1);
2260 2275
2261 ehea_broadcast_reg_helper(port, H_DEREG_BCMC); 2276 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2262 ret = ehea_clean_all_portres(port);
2263 port->state = EHEA_PORT_DOWN; 2277 port->state = EHEA_PORT_DOWN;
2278
2279 ret = ehea_clean_all_portres(port);
2280 if (ret)
2281 ehea_info("Failed freeing resources for %s. ret=%i",
2282 dev->name, ret);
2283
2264 return ret; 2284 return ret;
2265} 2285}
2266 2286
@@ -2292,15 +2312,11 @@ static void ehea_reset_port(struct work_struct *work)
2292 netif_stop_queue(dev); 2312 netif_stop_queue(dev);
2293 netif_poll_disable(dev); 2313 netif_poll_disable(dev);
2294 2314
2295 ret = ehea_down(dev); 2315 ehea_down(dev);
2296 if (ret)
2297 ehea_error("ehea_down failed. not all resources are freed");
2298 2316
2299 ret = ehea_up(dev); 2317 ret = ehea_up(dev);
2300 if (ret) { 2318 if (ret)
2301 ehea_error("Reset device %s failed: ret=%d", dev->name, ret);
2302 goto out; 2319 goto out;
2303 }
2304 2320
2305 if (netif_msg_timer(port)) 2321 if (netif_msg_timer(port))
2306 ehea_info("Device %s resetted successfully", dev->name); 2322 ehea_info("Device %s resetted successfully", dev->name);
@@ -2312,6 +2328,88 @@ out:
2312 return; 2328 return;
2313} 2329}
2314 2330
2331static void ehea_rereg_mrs(struct work_struct *work)
2332{
2333 int ret, i;
2334 struct ehea_adapter *adapter;
2335
2336 ehea_info("LPAR memory enlarged - re-initializing driver");
2337
2338 list_for_each_entry(adapter, &adapter_list, list)
2339 if (adapter->active_ports) {
2340 /* Shutdown all ports */
2341 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2342 struct ehea_port *port = adapter->port[i];
2343
2344 if (port) {
2345 struct net_device *dev = port->netdev;
2346
2347 if (dev->flags & IFF_UP) {
2348 ehea_info("stopping %s",
2349 dev->name);
2350 down(&port->port_lock);
2351 netif_stop_queue(dev);
2352 netif_poll_disable(dev);
2353 ehea_down(dev);
2354 up(&port->port_lock);
2355 }
2356 }
2357 }
2358
2359 /* Unregister old memory region */
2360 ret = ehea_rem_mr(&adapter->mr);
2361 if (ret) {
2362 ehea_error("unregister MR failed - driver"
2363 " inoperable!");
2364 goto out;
2365 }
2366 }
2367
2368 ehea_destroy_busmap();
2369
2370 ret = ehea_create_busmap();
2371 if (ret)
2372 goto out;
2373
2374 clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
2375
2376 list_for_each_entry(adapter, &adapter_list, list)
2377 if (adapter->active_ports) {
2378 /* Register new memory region */
2379 ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
2380 if (ret) {
2381 ehea_error("register MR failed - driver"
2382 " inoperable!");
2383 goto out;
2384 }
2385
2386 /* Restart all ports */
2387 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2388 struct ehea_port *port = adapter->port[i];
2389
2390 if (port) {
2391 struct net_device *dev = port->netdev;
2392
2393 if (dev->flags & IFF_UP) {
2394 ehea_info("restarting %s",
2395 dev->name);
2396 down(&port->port_lock);
2397
2398 ret = ehea_up(dev);
2399 if (!ret) {
2400 netif_poll_enable(dev);
2401 netif_wake_queue(dev);
2402 }
2403
2404 up(&port->port_lock);
2405 }
2406 }
2407 }
2408 }
2409out:
2410 return;
2411}
2412
2315static void ehea_tx_watchdog(struct net_device *dev) 2413static void ehea_tx_watchdog(struct net_device *dev)
2316{ 2414{
2317 struct ehea_port *port = netdev_priv(dev); 2415 struct ehea_port *port = netdev_priv(dev);
@@ -2573,6 +2671,8 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
2573 ehea_info("%s: Jumbo frames are %sabled", dev->name, 2671 ehea_info("%s: Jumbo frames are %sabled", dev->name,
2574 jumbo == 1 ? "en" : "dis"); 2672 jumbo == 1 ? "en" : "dis");
2575 2673
2674 adapter->active_ports++;
2675
2576 return port; 2676 return port;
2577 2677
2578out_unreg_port: 2678out_unreg_port:
@@ -2596,6 +2696,7 @@ static void ehea_shutdown_single_port(struct ehea_port *port)
2596 ehea_unregister_port(port); 2696 ehea_unregister_port(port);
2597 kfree(port->mc_list); 2697 kfree(port->mc_list);
2598 free_netdev(port->netdev); 2698 free_netdev(port->netdev);
2699 port->adapter->active_ports--;
2599} 2700}
2600 2701
2601static int ehea_setup_ports(struct ehea_adapter *adapter) 2702static int ehea_setup_ports(struct ehea_adapter *adapter)
@@ -2788,6 +2889,8 @@ static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev,
2788 goto out; 2889 goto out;
2789 } 2890 }
2790 2891
2892 list_add(&adapter->list, &adapter_list);
2893
2791 adapter->ebus_dev = dev; 2894 adapter->ebus_dev = dev;
2792 2895
2793 adapter_handle = of_get_property(dev->ofdev.node, "ibm,hea-handle", 2896 adapter_handle = of_get_property(dev->ofdev.node, "ibm,hea-handle",
@@ -2891,7 +2994,10 @@ static int __devexit ehea_remove(struct ibmebus_dev *dev)
2891 2994
2892 ehea_destroy_eq(adapter->neq); 2995 ehea_destroy_eq(adapter->neq);
2893 ehea_remove_adapter_mr(adapter); 2996 ehea_remove_adapter_mr(adapter);
2997 list_del(&adapter->list);
2998
2894 kfree(adapter); 2999 kfree(adapter);
3000
2895 return 0; 3001 return 0;
2896} 3002}
2897 3003
@@ -2939,9 +3045,18 @@ int __init ehea_module_init(void)
2939 printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n", 3045 printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
2940 DRV_VERSION); 3046 DRV_VERSION);
2941 3047
3048 ehea_driver_wq = create_workqueue("ehea_driver_wq");
3049
3050 INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs);
3051
2942 ret = check_module_parm(); 3052 ret = check_module_parm();
2943 if (ret) 3053 if (ret)
2944 goto out; 3054 goto out;
3055
3056 ret = ehea_create_busmap();
3057 if (ret)
3058 goto out;
3059
2945 ret = ibmebus_register_driver(&ehea_driver); 3060 ret = ibmebus_register_driver(&ehea_driver);
2946 if (ret) { 3061 if (ret) {
2947 ehea_error("failed registering eHEA device driver on ebus"); 3062 ehea_error("failed registering eHEA device driver on ebus");
@@ -2965,6 +3080,7 @@ static void __exit ehea_module_exit(void)
2965{ 3080{
2966 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); 3081 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
2967 ibmebus_unregister_driver(&ehea_driver); 3082 ibmebus_unregister_driver(&ehea_driver);
3083 ehea_destroy_busmap();
2968} 3084}
2969 3085
2970module_init(ehea_module_init); 3086module_init(ehea_module_init);
diff --git a/drivers/net/ehea/ehea_phyp.h b/drivers/net/ehea/ehea_phyp.h
index d17a45a7e717..89b63531ff26 100644
--- a/drivers/net/ehea/ehea_phyp.h
+++ b/drivers/net/ehea/ehea_phyp.h
@@ -60,6 +60,9 @@ static inline u32 get_longbusy_msecs(int long_busy_ret_code)
60 } 60 }
61} 61}
62 62
63/* Number of pages which can be registered at once by H_REGISTER_HEA_RPAGES */
64#define EHEA_MAX_RPAGE 512
65
63/* Notification Event Queue (NEQ) Entry bit masks */ 66/* Notification Event Queue (NEQ) Entry bit masks */
64#define NEQE_EVENT_CODE EHEA_BMASK_IBM(2, 7) 67#define NEQE_EVENT_CODE EHEA_BMASK_IBM(2, 7)
65#define NEQE_PORTNUM EHEA_BMASK_IBM(32, 47) 68#define NEQE_PORTNUM EHEA_BMASK_IBM(32, 47)
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 29eaa46948b0..a36fa6c23fdf 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -31,6 +31,13 @@
31#include "ehea_phyp.h" 31#include "ehea_phyp.h"
32#include "ehea_qmr.h" 32#include "ehea_qmr.h"
33 33
34
35struct ehea_busmap ehea_bmap = { 0, 0, NULL };
36extern u64 ehea_driver_flags;
37extern struct workqueue_struct *ehea_driver_wq;
38extern struct work_struct ehea_rereg_mr_task;
39
40
34static void *hw_qpageit_get_inc(struct hw_queue *queue) 41static void *hw_qpageit_get_inc(struct hw_queue *queue)
35{ 42{
36 void *retvalue = hw_qeit_get(queue); 43 void *retvalue = hw_qeit_get(queue);
@@ -547,18 +554,84 @@ int ehea_destroy_qp(struct ehea_qp *qp)
547 return 0; 554 return 0;
548} 555}
549 556
557int ehea_create_busmap( void )
558{
559 u64 vaddr = EHEA_BUSMAP_START;
560 unsigned long abs_max_pfn = 0;
561 unsigned long sec_max_pfn;
562 int i;
563
564 /*
565 * Sections are not in ascending order -> Loop over all sections and
566 * find the highest PFN to compute the required map size.
567 */
568 ehea_bmap.valid_sections = 0;
569
570 for (i = 0; i < NR_MEM_SECTIONS; i++)
571 if (valid_section_nr(i)) {
572 sec_max_pfn = section_nr_to_pfn(i);
573 if (sec_max_pfn > abs_max_pfn)
574 abs_max_pfn = sec_max_pfn;
575 ehea_bmap.valid_sections++;
576 }
577
578 ehea_bmap.entries = abs_max_pfn / EHEA_PAGES_PER_SECTION + 1;
579 ehea_bmap.vaddr = vmalloc(ehea_bmap.entries * sizeof(*ehea_bmap.vaddr));
580
581 if (!ehea_bmap.vaddr)
582 return -ENOMEM;
583
584 for (i = 0 ; i < ehea_bmap.entries; i++) {
585 unsigned long pfn = section_nr_to_pfn(i);
586
587 if (pfn_valid(pfn)) {
588 ehea_bmap.vaddr[i] = vaddr;
589 vaddr += EHEA_SECTSIZE;
590 } else
591 ehea_bmap.vaddr[i] = 0;
592 }
593
594 return 0;
595}
596
597void ehea_destroy_busmap( void )
598{
599 vfree(ehea_bmap.vaddr);
600}
601
602u64 ehea_map_vaddr(void *caddr)
603{
604 u64 mapped_addr;
605 unsigned long index = __pa(caddr) >> SECTION_SIZE_BITS;
606
607 if (likely(index < ehea_bmap.entries)) {
608 mapped_addr = ehea_bmap.vaddr[index];
609 if (likely(mapped_addr))
610 mapped_addr |= (((unsigned long)caddr)
611 & (EHEA_SECTSIZE - 1));
612 else
613 mapped_addr = -1;
614 } else
615 mapped_addr = -1;
616
617 if (unlikely(mapped_addr == -1))
618 if (!test_and_set_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
619 queue_work(ehea_driver_wq, &ehea_rereg_mr_task);
620
621 return mapped_addr;
622}
623
550int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) 624int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
551{ 625{
552 int i, k, ret; 626 int ret;
553 u64 hret, pt_abs, start, end, nr_pages;
554 u32 acc_ctrl = EHEA_MR_ACC_CTRL;
555 u64 *pt; 627 u64 *pt;
628 void *pg;
629 u64 hret, pt_abs, i, j, m, mr_len;
630 u32 acc_ctrl = EHEA_MR_ACC_CTRL;
556 631
557 start = KERNELBASE; 632 mr_len = ehea_bmap.valid_sections * EHEA_SECTSIZE;
558 end = (u64)high_memory;
559 nr_pages = (end - start) / EHEA_PAGESIZE;
560 633
561 pt = kzalloc(PAGE_SIZE, GFP_KERNEL); 634 pt = kzalloc(EHEA_MAX_RPAGE * sizeof(u64), GFP_KERNEL);
562 if (!pt) { 635 if (!pt) {
563 ehea_error("no mem"); 636 ehea_error("no mem");
564 ret = -ENOMEM; 637 ret = -ENOMEM;
@@ -566,7 +639,8 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
566 } 639 }
567 pt_abs = virt_to_abs(pt); 640 pt_abs = virt_to_abs(pt);
568 641
569 hret = ehea_h_alloc_resource_mr(adapter->handle, start, end - start, 642 hret = ehea_h_alloc_resource_mr(adapter->handle,
643 EHEA_BUSMAP_START, mr_len,
570 acc_ctrl, adapter->pd, 644 acc_ctrl, adapter->pd,
571 &mr->handle, &mr->lkey); 645 &mr->handle, &mr->lkey);
572 if (hret != H_SUCCESS) { 646 if (hret != H_SUCCESS) {
@@ -575,49 +649,43 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
575 goto out; 649 goto out;
576 } 650 }
577 651
578 mr->vaddr = KERNELBASE; 652 for (i = 0 ; i < ehea_bmap.entries; i++)
579 k = 0; 653 if (ehea_bmap.vaddr[i]) {
580 654 void *sectbase = __va(i << SECTION_SIZE_BITS);
581 while (nr_pages > 0) { 655 unsigned long k = 0;
582 if (nr_pages > 1) { 656
583 u64 num_pages = min(nr_pages, (u64)512); 657 for (j = 0; j < (PAGES_PER_SECTION / EHEA_MAX_RPAGE);
584 for (i = 0; i < num_pages; i++) 658 j++) {
585 pt[i] = virt_to_abs((void*)(((u64)start) + 659
586 ((k++) * 660 for (m = 0; m < EHEA_MAX_RPAGE; m++) {
587 EHEA_PAGESIZE))); 661 pg = sectbase + ((k++) * EHEA_PAGESIZE);
588 662 pt[m] = virt_to_abs(pg);
589 hret = ehea_h_register_rpage_mr(adapter->handle, 663 }
590 mr->handle, 0, 664
591 0, (u64)pt_abs, 665 hret = ehea_h_register_rpage_mr(adapter->handle,
592 num_pages); 666 mr->handle,
593 nr_pages -= num_pages; 667 0, 0, pt_abs,
594 } else { 668 EHEA_MAX_RPAGE);
595 u64 abs_adr = virt_to_abs((void*)(((u64)start) + 669 if ((hret != H_SUCCESS)
596 (k * EHEA_PAGESIZE))); 670 && (hret != H_PAGE_REGISTERED)) {
597 671 ehea_h_free_resource(adapter->handle,
598 hret = ehea_h_register_rpage_mr(adapter->handle, 672 mr->handle,
599 mr->handle, 0, 673 FORCE_FREE);
600 0, abs_adr,1); 674 ehea_error("register_rpage_mr failed");
601 nr_pages--; 675 ret = -EIO;
602 } 676 goto out;
603 677 }
604 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) { 678 }
605 ehea_h_free_resource(adapter->handle,
606 mr->handle, FORCE_FREE);
607 ehea_error("register_rpage_mr failed");
608 ret = -EIO;
609 goto out;
610 } 679 }
611 }
612 680
613 if (hret != H_SUCCESS) { 681 if (hret != H_SUCCESS) {
614 ehea_h_free_resource(adapter->handle, mr->handle, 682 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
615 FORCE_FREE); 683 ehea_error("registering mr failed");
616 ehea_error("register_rpage failed for last page");
617 ret = -EIO; 684 ret = -EIO;
618 goto out; 685 goto out;
619 } 686 }
620 687
688 mr->vaddr = EHEA_BUSMAP_START;
621 mr->adapter = adapter; 689 mr->adapter = adapter;
622 ret = 0; 690 ret = 0;
623out: 691out:
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index c0eb3e03a102..b71f8452a5e3 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -36,8 +36,14 @@
36 * page size of ehea hardware queues 36 * page size of ehea hardware queues
37 */ 37 */
38 38
39#define EHEA_PAGESHIFT 12 39#define EHEA_PAGESHIFT 12
40#define EHEA_PAGESIZE 4096UL 40#define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT)
41#define EHEA_SECTSIZE (1UL << 24)
42#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> PAGE_SHIFT)
43
44#if (1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE
45#error eHEA module can't work if kernel sectionsize < ehea sectionsize
46#endif
41 47
42/* Some abbreviations used here: 48/* Some abbreviations used here:
43 * 49 *
@@ -372,4 +378,8 @@ int ehea_rem_mr(struct ehea_mr *mr);
372 378
373void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle); 379void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle);
374 380
381int ehea_create_busmap( void );
382void ehea_destroy_busmap( void );
383u64 ehea_map_vaddr(void *caddr);
384
375#endif /* __EHEA_QMR_H__ */ 385#endif /* __EHEA_QMR_H__ */