aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-01-17 17:28:53 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-17 17:28:53 -0500
commitf3b8436ad9a8ad36b3c9fa1fe030c7f38e5d3d0b (patch)
treebd43db7f86cd23af0c3905ff8ff991f910eba5e0 /drivers
parent0bac038a9eec00ac27f95ca0360954c0016fd859 (diff)
parentac8581d408d41ebd5583b4e85b18e7ef16bb044b (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: IB/ehca: Use consistent types for ehca_plpar_hcall9() IB/ehca: Fix printk format warnings from u64 type change IPoIB: Do not print error messages for multicast join retries IB/mlx4: Fix memory ordering problem when posting LSO sends mlx4_core: Fix min() warning IPoIB: Fix deadlock between ipoib_open() and child interface create IPoIB: Fix hang in napi_disable() if P_Key is never found
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c16
-rw-r--r--drivers/infiniband/hw/ehca/ehca_hca.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c18
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c6
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mcast.c4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c144
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c32
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_tools.h2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_uverbs.c2
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.c56
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c28
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c27
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c11
-rw-r--r--drivers/net/mlx4/profile.c6
17 files changed, 189 insertions, 171 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 2f4c28a30271..97e4b231cdc4 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -196,7 +196,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
196 196
197 if (h_ret != H_SUCCESS) { 197 if (h_ret != H_SUCCESS) {
198 ehca_err(device, "hipz_h_alloc_resource_cq() failed " 198 ehca_err(device, "hipz_h_alloc_resource_cq() failed "
199 "h_ret=%li device=%p", h_ret, device); 199 "h_ret=%lli device=%p", h_ret, device);
200 cq = ERR_PTR(ehca2ib_return_code(h_ret)); 200 cq = ERR_PTR(ehca2ib_return_code(h_ret));
201 goto create_cq_exit2; 201 goto create_cq_exit2;
202 } 202 }
@@ -232,7 +232,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
232 232
233 if (h_ret < H_SUCCESS) { 233 if (h_ret < H_SUCCESS) {
234 ehca_err(device, "hipz_h_register_rpage_cq() failed " 234 ehca_err(device, "hipz_h_register_rpage_cq() failed "
235 "ehca_cq=%p cq_num=%x h_ret=%li counter=%i " 235 "ehca_cq=%p cq_num=%x h_ret=%lli counter=%i "
236 "act_pages=%i", my_cq, my_cq->cq_number, 236 "act_pages=%i", my_cq, my_cq->cq_number,
237 h_ret, counter, param.act_pages); 237 h_ret, counter, param.act_pages);
238 cq = ERR_PTR(-EINVAL); 238 cq = ERR_PTR(-EINVAL);
@@ -244,7 +244,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
244 if ((h_ret != H_SUCCESS) || vpage) { 244 if ((h_ret != H_SUCCESS) || vpage) {
245 ehca_err(device, "Registration of pages not " 245 ehca_err(device, "Registration of pages not "
246 "complete ehca_cq=%p cq_num=%x " 246 "complete ehca_cq=%p cq_num=%x "
247 "h_ret=%li", my_cq, my_cq->cq_number, 247 "h_ret=%lli", my_cq, my_cq->cq_number,
248 h_ret); 248 h_ret);
249 cq = ERR_PTR(-EAGAIN); 249 cq = ERR_PTR(-EAGAIN);
250 goto create_cq_exit4; 250 goto create_cq_exit4;
@@ -252,7 +252,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
252 } else { 252 } else {
253 if (h_ret != H_PAGE_REGISTERED) { 253 if (h_ret != H_PAGE_REGISTERED) {
254 ehca_err(device, "Registration of page failed " 254 ehca_err(device, "Registration of page failed "
255 "ehca_cq=%p cq_num=%x h_ret=%li " 255 "ehca_cq=%p cq_num=%x h_ret=%lli "
256 "counter=%i act_pages=%i", 256 "counter=%i act_pages=%i",
257 my_cq, my_cq->cq_number, 257 my_cq, my_cq->cq_number,
258 h_ret, counter, param.act_pages); 258 h_ret, counter, param.act_pages);
@@ -266,7 +266,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
266 266
267 gal = my_cq->galpas.kernel; 267 gal = my_cq->galpas.kernel;
268 cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec)); 268 cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec));
269 ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%lx", 269 ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%llx",
270 my_cq, my_cq->cq_number, cqx_fec); 270 my_cq, my_cq->cq_number, cqx_fec);
271 271
272 my_cq->ib_cq.cqe = my_cq->nr_of_entries = 272 my_cq->ib_cq.cqe = my_cq->nr_of_entries =
@@ -307,7 +307,7 @@ create_cq_exit3:
307 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1); 307 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
308 if (h_ret != H_SUCCESS) 308 if (h_ret != H_SUCCESS)
309 ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p " 309 ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p "
310 "cq_num=%x h_ret=%li", my_cq, my_cq->cq_number, h_ret); 310 "cq_num=%x h_ret=%lli", my_cq, my_cq->cq_number, h_ret);
311 311
312create_cq_exit2: 312create_cq_exit2:
313 write_lock_irqsave(&ehca_cq_idr_lock, flags); 313 write_lock_irqsave(&ehca_cq_idr_lock, flags);
@@ -355,7 +355,7 @@ int ehca_destroy_cq(struct ib_cq *cq)
355 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0); 355 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
356 if (h_ret == H_R_STATE) { 356 if (h_ret == H_R_STATE) {
357 /* cq in err: read err data and destroy it forcibly */ 357 /* cq in err: read err data and destroy it forcibly */
358 ehca_dbg(device, "ehca_cq=%p cq_num=%x ressource=%lx in err " 358 ehca_dbg(device, "ehca_cq=%p cq_num=%x resource=%llx in err "
359 "state. Try to delete it forcibly.", 359 "state. Try to delete it forcibly.",
360 my_cq, cq_num, my_cq->ipz_cq_handle.handle); 360 my_cq, cq_num, my_cq->ipz_cq_handle.handle);
361 ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle); 361 ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle);
@@ -365,7 +365,7 @@ int ehca_destroy_cq(struct ib_cq *cq)
365 cq_num); 365 cq_num);
366 } 366 }
367 if (h_ret != H_SUCCESS) { 367 if (h_ret != H_SUCCESS) {
368 ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%li " 368 ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lli "
369 "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num); 369 "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num);
370 return ehca2ib_return_code(h_ret); 370 return ehca2ib_return_code(h_ret);
371 } 371 }
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
index 46288220cfbb..9209c5332dfe 100644
--- a/drivers/infiniband/hw/ehca/ehca_hca.c
+++ b/drivers/infiniband/hw/ehca/ehca_hca.c
@@ -393,7 +393,7 @@ int ehca_modify_port(struct ib_device *ibdev,
393 hret = hipz_h_modify_port(shca->ipz_hca_handle, port, 393 hret = hipz_h_modify_port(shca->ipz_hca_handle, port,
394 cap, props->init_type, port_modify_mask); 394 cap, props->init_type, port_modify_mask);
395 if (hret != H_SUCCESS) { 395 if (hret != H_SUCCESS) {
396 ehca_err(&shca->ib_device, "Modify port failed h_ret=%li", 396 ehca_err(&shca->ib_device, "Modify port failed h_ret=%lli",
397 hret); 397 hret);
398 ret = -EINVAL; 398 ret = -EINVAL;
399 } 399 }
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 3128a5090dbd..99bcbd7ffb0a 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -99,7 +99,7 @@ static void print_error_data(struct ehca_shca *shca, void *data,
99 return; 99 return;
100 100
101 ehca_err(&shca->ib_device, 101 ehca_err(&shca->ib_device,
102 "QP 0x%x (resource=%lx) has errors.", 102 "QP 0x%x (resource=%llx) has errors.",
103 qp->ib_qp.qp_num, resource); 103 qp->ib_qp.qp_num, resource);
104 break; 104 break;
105 } 105 }
@@ -108,21 +108,21 @@ static void print_error_data(struct ehca_shca *shca, void *data,
108 struct ehca_cq *cq = (struct ehca_cq *)data; 108 struct ehca_cq *cq = (struct ehca_cq *)data;
109 109
110 ehca_err(&shca->ib_device, 110 ehca_err(&shca->ib_device,
111 "CQ 0x%x (resource=%lx) has errors.", 111 "CQ 0x%x (resource=%llx) has errors.",
112 cq->cq_number, resource); 112 cq->cq_number, resource);
113 break; 113 break;
114 } 114 }
115 default: 115 default:
116 ehca_err(&shca->ib_device, 116 ehca_err(&shca->ib_device,
117 "Unknown error type: %lx on %s.", 117 "Unknown error type: %llx on %s.",
118 type, shca->ib_device.name); 118 type, shca->ib_device.name);
119 break; 119 break;
120 } 120 }
121 121
122 ehca_err(&shca->ib_device, "Error data is available: %lx.", resource); 122 ehca_err(&shca->ib_device, "Error data is available: %llx.", resource);
123 ehca_err(&shca->ib_device, "EHCA ----- error data begin " 123 ehca_err(&shca->ib_device, "EHCA ----- error data begin "
124 "---------------------------------------------------"); 124 "---------------------------------------------------");
125 ehca_dmp(rblock, length, "resource=%lx", resource); 125 ehca_dmp(rblock, length, "resource=%llx", resource);
126 ehca_err(&shca->ib_device, "EHCA ----- error data end " 126 ehca_err(&shca->ib_device, "EHCA ----- error data end "
127 "----------------------------------------------------"); 127 "----------------------------------------------------");
128 128
@@ -152,7 +152,7 @@ int ehca_error_data(struct ehca_shca *shca, void *data,
152 152
153 if (ret == H_R_STATE) 153 if (ret == H_R_STATE)
154 ehca_err(&shca->ib_device, 154 ehca_err(&shca->ib_device,
155 "No error data is available: %lx.", resource); 155 "No error data is available: %llx.", resource);
156 else if (ret == H_SUCCESS) { 156 else if (ret == H_SUCCESS) {
157 int length; 157 int length;
158 158
@@ -164,7 +164,7 @@ int ehca_error_data(struct ehca_shca *shca, void *data,
164 print_error_data(shca, data, rblock, length); 164 print_error_data(shca, data, rblock, length);
165 } else 165 } else
166 ehca_err(&shca->ib_device, 166 ehca_err(&shca->ib_device,
167 "Error data could not be fetched: %lx", resource); 167 "Error data could not be fetched: %llx", resource);
168 168
169 ehca_free_fw_ctrlblock(rblock); 169 ehca_free_fw_ctrlblock(rblock);
170 170
@@ -514,7 +514,7 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
514 struct ehca_cq *cq; 514 struct ehca_cq *cq;
515 515
516 eqe_value = eqe->entry; 516 eqe_value = eqe->entry;
517 ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value); 517 ehca_dbg(&shca->ib_device, "eqe_value=%llx", eqe_value);
518 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { 518 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
519 ehca_dbg(&shca->ib_device, "Got completion event"); 519 ehca_dbg(&shca->ib_device, "Got completion event");
520 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); 520 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
@@ -603,7 +603,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
603 ret = hipz_h_eoi(eq->ist); 603 ret = hipz_h_eoi(eq->ist);
604 if (ret != H_SUCCESS) 604 if (ret != H_SUCCESS)
605 ehca_err(&shca->ib_device, 605 ehca_err(&shca->ib_device,
606 "bad return code EOI -rc = %ld\n", ret); 606 "bad return code EOI -rc = %lld\n", ret);
607 ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt); 607 ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt);
608 } 608 }
609 if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE)) 609 if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index c7b8a506af65..368311ce332b 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -304,7 +304,7 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
304 304
305 h_ret = hipz_h_query_hca(shca->ipz_hca_handle, rblock); 305 h_ret = hipz_h_query_hca(shca->ipz_hca_handle, rblock);
306 if (h_ret != H_SUCCESS) { 306 if (h_ret != H_SUCCESS) {
307 ehca_gen_err("Cannot query device properties. h_ret=%li", 307 ehca_gen_err("Cannot query device properties. h_ret=%lli",
308 h_ret); 308 h_ret);
309 ret = -EPERM; 309 ret = -EPERM;
310 goto sense_attributes1; 310 goto sense_attributes1;
@@ -391,7 +391,7 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
391 port = (struct hipz_query_port *)rblock; 391 port = (struct hipz_query_port *)rblock;
392 h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port); 392 h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port);
393 if (h_ret != H_SUCCESS) { 393 if (h_ret != H_SUCCESS) {
394 ehca_gen_err("Cannot query port properties. h_ret=%li", 394 ehca_gen_err("Cannot query port properties. h_ret=%lli",
395 h_ret); 395 h_ret);
396 ret = -EPERM; 396 ret = -EPERM;
397 goto sense_attributes1; 397 goto sense_attributes1;
@@ -682,7 +682,7 @@ static ssize_t ehca_show_adapter_handle(struct device *dev,
682{ 682{
683 struct ehca_shca *shca = dev->driver_data; 683 struct ehca_shca *shca = dev->driver_data;
684 684
685 return sprintf(buf, "%lx\n", shca->ipz_hca_handle.handle); 685 return sprintf(buf, "%llx\n", shca->ipz_hca_handle.handle);
686 686
687} 687}
688static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL); 688static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
diff --git a/drivers/infiniband/hw/ehca/ehca_mcast.c b/drivers/infiniband/hw/ehca/ehca_mcast.c
index e3ef0264ccc6..120aedf9f989 100644
--- a/drivers/infiniband/hw/ehca/ehca_mcast.c
+++ b/drivers/infiniband/hw/ehca/ehca_mcast.c
@@ -88,7 +88,7 @@ int ehca_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
88 if (h_ret != H_SUCCESS) 88 if (h_ret != H_SUCCESS)
89 ehca_err(ibqp->device, 89 ehca_err(ibqp->device,
90 "ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed " 90 "ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed "
91 "h_ret=%li", my_qp, ibqp->qp_num, h_ret); 91 "h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
92 92
93 return ehca2ib_return_code(h_ret); 93 return ehca2ib_return_code(h_ret);
94} 94}
@@ -125,7 +125,7 @@ int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
125 if (h_ret != H_SUCCESS) 125 if (h_ret != H_SUCCESS)
126 ehca_err(ibqp->device, 126 ehca_err(ibqp->device,
127 "ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed " 127 "ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed "
128 "h_ret=%li", my_qp, ibqp->qp_num, h_ret); 128 "h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
129 129
130 return ehca2ib_return_code(h_ret); 130 return ehca2ib_return_code(h_ret);
131} 131}
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index f974367cad40..72f83f7df614 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -204,7 +204,7 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
204 } 204 }
205 if ((size == 0) || 205 if ((size == 0) ||
206 (((u64)iova_start + size) < (u64)iova_start)) { 206 (((u64)iova_start + size) < (u64)iova_start)) {
207 ehca_err(pd->device, "bad input values: size=%lx iova_start=%p", 207 ehca_err(pd->device, "bad input values: size=%llx iova_start=%p",
208 size, iova_start); 208 size, iova_start);
209 ib_mr = ERR_PTR(-EINVAL); 209 ib_mr = ERR_PTR(-EINVAL);
210 goto reg_phys_mr_exit0; 210 goto reg_phys_mr_exit0;
@@ -309,8 +309,8 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
309 } 309 }
310 310
311 if (length == 0 || virt + length < virt) { 311 if (length == 0 || virt + length < virt) {
312 ehca_err(pd->device, "bad input values: length=%lx " 312 ehca_err(pd->device, "bad input values: length=%llx "
313 "virt_base=%lx", length, virt); 313 "virt_base=%llx", length, virt);
314 ib_mr = ERR_PTR(-EINVAL); 314 ib_mr = ERR_PTR(-EINVAL);
315 goto reg_user_mr_exit0; 315 goto reg_user_mr_exit0;
316 } 316 }
@@ -373,7 +373,7 @@ reg_user_mr_fallback:
373 &e_mr->ib.ib_mr.rkey); 373 &e_mr->ib.ib_mr.rkey);
374 if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) { 374 if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
375 ehca_warn(pd->device, "failed to register mr " 375 ehca_warn(pd->device, "failed to register mr "
376 "with hwpage_size=%lx", hwpage_size); 376 "with hwpage_size=%llx", hwpage_size);
377 ehca_info(pd->device, "try to register mr with " 377 ehca_info(pd->device, "try to register mr with "
378 "kpage_size=%lx", PAGE_SIZE); 378 "kpage_size=%lx", PAGE_SIZE);
379 /* 379 /*
@@ -509,7 +509,7 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
509 goto rereg_phys_mr_exit1; 509 goto rereg_phys_mr_exit1;
510 if ((new_size == 0) || 510 if ((new_size == 0) ||
511 (((u64)iova_start + new_size) < (u64)iova_start)) { 511 (((u64)iova_start + new_size) < (u64)iova_start)) {
512 ehca_err(mr->device, "bad input values: new_size=%lx " 512 ehca_err(mr->device, "bad input values: new_size=%llx "
513 "iova_start=%p", new_size, iova_start); 513 "iova_start=%p", new_size, iova_start);
514 ret = -EINVAL; 514 ret = -EINVAL;
515 goto rereg_phys_mr_exit1; 515 goto rereg_phys_mr_exit1;
@@ -580,8 +580,8 @@ int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
580 580
581 h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout); 581 h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout);
582 if (h_ret != H_SUCCESS) { 582 if (h_ret != H_SUCCESS) {
583 ehca_err(mr->device, "hipz_mr_query failed, h_ret=%li mr=%p " 583 ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lli mr=%p "
584 "hca_hndl=%lx mr_hndl=%lx lkey=%x", 584 "hca_hndl=%llx mr_hndl=%llx lkey=%x",
585 h_ret, mr, shca->ipz_hca_handle.handle, 585 h_ret, mr, shca->ipz_hca_handle.handle,
586 e_mr->ipz_mr_handle.handle, mr->lkey); 586 e_mr->ipz_mr_handle.handle, mr->lkey);
587 ret = ehca2ib_return_code(h_ret); 587 ret = ehca2ib_return_code(h_ret);
@@ -630,8 +630,8 @@ int ehca_dereg_mr(struct ib_mr *mr)
630 /* TODO: BUSY: MR still has bound window(s) */ 630 /* TODO: BUSY: MR still has bound window(s) */
631 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); 631 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
632 if (h_ret != H_SUCCESS) { 632 if (h_ret != H_SUCCESS) {
633 ehca_err(mr->device, "hipz_free_mr failed, h_ret=%li shca=%p " 633 ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lli shca=%p "
634 "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x", 634 "e_mr=%p hca_hndl=%llx mr_hndl=%llx mr->lkey=%x",
635 h_ret, shca, e_mr, shca->ipz_hca_handle.handle, 635 h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
636 e_mr->ipz_mr_handle.handle, mr->lkey); 636 e_mr->ipz_mr_handle.handle, mr->lkey);
637 ret = ehca2ib_return_code(h_ret); 637 ret = ehca2ib_return_code(h_ret);
@@ -671,8 +671,8 @@ struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
671 h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw, 671 h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
672 e_pd->fw_pd, &hipzout); 672 e_pd->fw_pd, &hipzout);
673 if (h_ret != H_SUCCESS) { 673 if (h_ret != H_SUCCESS) {
674 ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%li " 674 ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lli "
675 "shca=%p hca_hndl=%lx mw=%p", 675 "shca=%p hca_hndl=%llx mw=%p",
676 h_ret, shca, shca->ipz_hca_handle.handle, e_mw); 676 h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
677 ib_mw = ERR_PTR(ehca2ib_return_code(h_ret)); 677 ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
678 goto alloc_mw_exit1; 678 goto alloc_mw_exit1;
@@ -713,8 +713,8 @@ int ehca_dealloc_mw(struct ib_mw *mw)
713 713
714 h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw); 714 h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
715 if (h_ret != H_SUCCESS) { 715 if (h_ret != H_SUCCESS) {
716 ehca_err(mw->device, "hipz_free_mw failed, h_ret=%li shca=%p " 716 ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lli shca=%p "
717 "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx", 717 "mw=%p rkey=%x hca_hndl=%llx mw_hndl=%llx",
718 h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle, 718 h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
719 e_mw->ipz_mw_handle.handle); 719 e_mw->ipz_mw_handle.handle);
720 return ehca2ib_return_code(h_ret); 720 return ehca2ib_return_code(h_ret);
@@ -840,7 +840,7 @@ int ehca_map_phys_fmr(struct ib_fmr *fmr,
840 goto map_phys_fmr_exit0; 840 goto map_phys_fmr_exit0;
841 if (iova % e_fmr->fmr_page_size) { 841 if (iova % e_fmr->fmr_page_size) {
842 /* only whole-numbered pages */ 842 /* only whole-numbered pages */
843 ehca_err(fmr->device, "bad iova, iova=%lx fmr_page_size=%x", 843 ehca_err(fmr->device, "bad iova, iova=%llx fmr_page_size=%x",
844 iova, e_fmr->fmr_page_size); 844 iova, e_fmr->fmr_page_size);
845 ret = -EINVAL; 845 ret = -EINVAL;
846 goto map_phys_fmr_exit0; 846 goto map_phys_fmr_exit0;
@@ -878,7 +878,7 @@ int ehca_map_phys_fmr(struct ib_fmr *fmr,
878map_phys_fmr_exit0: 878map_phys_fmr_exit0:
879 if (ret) 879 if (ret)
880 ehca_err(fmr->device, "ret=%i fmr=%p page_list=%p list_len=%x " 880 ehca_err(fmr->device, "ret=%i fmr=%p page_list=%p list_len=%x "
881 "iova=%lx", ret, fmr, page_list, list_len, iova); 881 "iova=%llx", ret, fmr, page_list, list_len, iova);
882 return ret; 882 return ret;
883} /* end ehca_map_phys_fmr() */ 883} /* end ehca_map_phys_fmr() */
884 884
@@ -964,8 +964,8 @@ int ehca_dealloc_fmr(struct ib_fmr *fmr)
964 964
965 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr); 965 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
966 if (h_ret != H_SUCCESS) { 966 if (h_ret != H_SUCCESS) {
967 ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%li e_fmr=%p " 967 ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lli e_fmr=%p "
968 "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x", 968 "hca_hndl=%llx fmr_hndl=%llx fmr->lkey=%x",
969 h_ret, e_fmr, shca->ipz_hca_handle.handle, 969 h_ret, e_fmr, shca->ipz_hca_handle.handle,
970 e_fmr->ipz_mr_handle.handle, fmr->lkey); 970 e_fmr->ipz_mr_handle.handle, fmr->lkey);
971 ret = ehca2ib_return_code(h_ret); 971 ret = ehca2ib_return_code(h_ret);
@@ -1007,8 +1007,8 @@ int ehca_reg_mr(struct ehca_shca *shca,
1007 (u64)iova_start, size, hipz_acl, 1007 (u64)iova_start, size, hipz_acl,
1008 e_pd->fw_pd, &hipzout); 1008 e_pd->fw_pd, &hipzout);
1009 if (h_ret != H_SUCCESS) { 1009 if (h_ret != H_SUCCESS) {
1010 ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%li " 1010 ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lli "
1011 "hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle); 1011 "hca_hndl=%llx", h_ret, shca->ipz_hca_handle.handle);
1012 ret = ehca2ib_return_code(h_ret); 1012 ret = ehca2ib_return_code(h_ret);
1013 goto ehca_reg_mr_exit0; 1013 goto ehca_reg_mr_exit0;
1014 } 1014 }
@@ -1033,9 +1033,9 @@ int ehca_reg_mr(struct ehca_shca *shca,
1033ehca_reg_mr_exit1: 1033ehca_reg_mr_exit1:
1034 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); 1034 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
1035 if (h_ret != H_SUCCESS) { 1035 if (h_ret != H_SUCCESS) {
1036 ehca_err(&shca->ib_device, "h_ret=%li shca=%p e_mr=%p " 1036 ehca_err(&shca->ib_device, "h_ret=%lli shca=%p e_mr=%p "
1037 "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x " 1037 "iova_start=%p size=%llx acl=%x e_pd=%p lkey=%x "
1038 "pginfo=%p num_kpages=%lx num_hwpages=%lx ret=%i", 1038 "pginfo=%p num_kpages=%llx num_hwpages=%llx ret=%i",
1039 h_ret, shca, e_mr, iova_start, size, acl, e_pd, 1039 h_ret, shca, e_mr, iova_start, size, acl, e_pd,
1040 hipzout.lkey, pginfo, pginfo->num_kpages, 1040 hipzout.lkey, pginfo, pginfo->num_kpages,
1041 pginfo->num_hwpages, ret); 1041 pginfo->num_hwpages, ret);
@@ -1045,8 +1045,8 @@ ehca_reg_mr_exit1:
1045ehca_reg_mr_exit0: 1045ehca_reg_mr_exit0:
1046 if (ret) 1046 if (ret)
1047 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p " 1047 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
1048 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p " 1048 "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
1049 "num_kpages=%lx num_hwpages=%lx", 1049 "num_kpages=%llx num_hwpages=%llx",
1050 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo, 1050 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
1051 pginfo->num_kpages, pginfo->num_hwpages); 1051 pginfo->num_kpages, pginfo->num_hwpages);
1052 return ret; 1052 return ret;
@@ -1116,8 +1116,8 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
1116 */ 1116 */
1117 if (h_ret != H_SUCCESS) { 1117 if (h_ret != H_SUCCESS) {
1118 ehca_err(&shca->ib_device, "last " 1118 ehca_err(&shca->ib_device, "last "
1119 "hipz_reg_rpage_mr failed, h_ret=%li " 1119 "hipz_reg_rpage_mr failed, h_ret=%lli "
1120 "e_mr=%p i=%x hca_hndl=%lx mr_hndl=%lx" 1120 "e_mr=%p i=%x hca_hndl=%llx mr_hndl=%llx"
1121 " lkey=%x", h_ret, e_mr, i, 1121 " lkey=%x", h_ret, e_mr, i,
1122 shca->ipz_hca_handle.handle, 1122 shca->ipz_hca_handle.handle,
1123 e_mr->ipz_mr_handle.handle, 1123 e_mr->ipz_mr_handle.handle,
@@ -1128,8 +1128,8 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
1128 ret = 0; 1128 ret = 0;
1129 } else if (h_ret != H_PAGE_REGISTERED) { 1129 } else if (h_ret != H_PAGE_REGISTERED) {
1130 ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, " 1130 ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
1131 "h_ret=%li e_mr=%p i=%x lkey=%x hca_hndl=%lx " 1131 "h_ret=%lli e_mr=%p i=%x lkey=%x hca_hndl=%llx "
1132 "mr_hndl=%lx", h_ret, e_mr, i, 1132 "mr_hndl=%llx", h_ret, e_mr, i,
1133 e_mr->ib.ib_mr.lkey, 1133 e_mr->ib.ib_mr.lkey,
1134 shca->ipz_hca_handle.handle, 1134 shca->ipz_hca_handle.handle,
1135 e_mr->ipz_mr_handle.handle); 1135 e_mr->ipz_mr_handle.handle);
@@ -1145,7 +1145,7 @@ ehca_reg_mr_rpages_exit1:
1145ehca_reg_mr_rpages_exit0: 1145ehca_reg_mr_rpages_exit0:
1146 if (ret) 1146 if (ret)
1147 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p pginfo=%p " 1147 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p pginfo=%p "
1148 "num_kpages=%lx num_hwpages=%lx", ret, shca, e_mr, 1148 "num_kpages=%llx num_hwpages=%llx", ret, shca, e_mr,
1149 pginfo, pginfo->num_kpages, pginfo->num_hwpages); 1149 pginfo, pginfo->num_kpages, pginfo->num_hwpages);
1150 return ret; 1150 return ret;
1151} /* end ehca_reg_mr_rpages() */ 1151} /* end ehca_reg_mr_rpages() */
@@ -1184,7 +1184,7 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
1184 ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage); 1184 ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage);
1185 if (ret) { 1185 if (ret) {
1186 ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p " 1186 ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
1187 "pginfo=%p type=%x num_kpages=%lx num_hwpages=%lx " 1187 "pginfo=%p type=%x num_kpages=%llx num_hwpages=%llx "
1188 "kpage=%p", e_mr, pginfo, pginfo->type, 1188 "kpage=%p", e_mr, pginfo, pginfo->type,
1189 pginfo->num_kpages, pginfo->num_hwpages, kpage); 1189 pginfo->num_kpages, pginfo->num_hwpages, kpage);
1190 goto ehca_rereg_mr_rereg1_exit1; 1190 goto ehca_rereg_mr_rereg1_exit1;
@@ -1205,13 +1205,13 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
1205 * (MW bound or MR is shared) 1205 * (MW bound or MR is shared)
1206 */ 1206 */
1207 ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed " 1207 ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
1208 "(Rereg1), h_ret=%li e_mr=%p", h_ret, e_mr); 1208 "(Rereg1), h_ret=%lli e_mr=%p", h_ret, e_mr);
1209 *pginfo = pginfo_save; 1209 *pginfo = pginfo_save;
1210 ret = -EAGAIN; 1210 ret = -EAGAIN;
1211 } else if ((u64 *)hipzout.vaddr != iova_start) { 1211 } else if ((u64 *)hipzout.vaddr != iova_start) {
1212 ehca_err(&shca->ib_device, "PHYP changed iova_start in " 1212 ehca_err(&shca->ib_device, "PHYP changed iova_start in "
1213 "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p " 1213 "rereg_pmr, iova_start=%p iova_start_out=%llx e_mr=%p "
1214 "mr_handle=%lx lkey=%x lkey_out=%x", iova_start, 1214 "mr_handle=%llx lkey=%x lkey_out=%x", iova_start,
1215 hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle, 1215 hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
1216 e_mr->ib.ib_mr.lkey, hipzout.lkey); 1216 e_mr->ib.ib_mr.lkey, hipzout.lkey);
1217 ret = -EFAULT; 1217 ret = -EFAULT;
@@ -1235,7 +1235,7 @@ ehca_rereg_mr_rereg1_exit1:
1235ehca_rereg_mr_rereg1_exit0: 1235ehca_rereg_mr_rereg1_exit0:
1236 if ( ret && (ret != -EAGAIN) ) 1236 if ( ret && (ret != -EAGAIN) )
1237 ehca_err(&shca->ib_device, "ret=%i lkey=%x rkey=%x " 1237 ehca_err(&shca->ib_device, "ret=%i lkey=%x rkey=%x "
1238 "pginfo=%p num_kpages=%lx num_hwpages=%lx", 1238 "pginfo=%p num_kpages=%llx num_hwpages=%llx",
1239 ret, *lkey, *rkey, pginfo, pginfo->num_kpages, 1239 ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
1240 pginfo->num_hwpages); 1240 pginfo->num_hwpages);
1241 return ret; 1241 return ret;
@@ -1263,7 +1263,7 @@ int ehca_rereg_mr(struct ehca_shca *shca,
1263 (e_mr->num_hwpages > MAX_RPAGES) || 1263 (e_mr->num_hwpages > MAX_RPAGES) ||
1264 (pginfo->num_hwpages > e_mr->num_hwpages)) { 1264 (pginfo->num_hwpages > e_mr->num_hwpages)) {
1265 ehca_dbg(&shca->ib_device, "Rereg3 case, " 1265 ehca_dbg(&shca->ib_device, "Rereg3 case, "
1266 "pginfo->num_hwpages=%lx e_mr->num_hwpages=%x", 1266 "pginfo->num_hwpages=%llx e_mr->num_hwpages=%x",
1267 pginfo->num_hwpages, e_mr->num_hwpages); 1267 pginfo->num_hwpages, e_mr->num_hwpages);
1268 rereg_1_hcall = 0; 1268 rereg_1_hcall = 0;
1269 rereg_3_hcall = 1; 1269 rereg_3_hcall = 1;
@@ -1295,7 +1295,7 @@ int ehca_rereg_mr(struct ehca_shca *shca,
1295 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); 1295 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
1296 if (h_ret != H_SUCCESS) { 1296 if (h_ret != H_SUCCESS) {
1297 ehca_err(&shca->ib_device, "hipz_free_mr failed, " 1297 ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1298 "h_ret=%li e_mr=%p hca_hndl=%lx mr_hndl=%lx " 1298 "h_ret=%lli e_mr=%p hca_hndl=%llx mr_hndl=%llx "
1299 "mr->lkey=%x", 1299 "mr->lkey=%x",
1300 h_ret, e_mr, shca->ipz_hca_handle.handle, 1300 h_ret, e_mr, shca->ipz_hca_handle.handle,
1301 e_mr->ipz_mr_handle.handle, 1301 e_mr->ipz_mr_handle.handle,
@@ -1328,8 +1328,8 @@ int ehca_rereg_mr(struct ehca_shca *shca,
1328ehca_rereg_mr_exit0: 1328ehca_rereg_mr_exit0:
1329 if (ret) 1329 if (ret)
1330 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p " 1330 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
1331 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p " 1331 "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
1332 "num_kpages=%lx lkey=%x rkey=%x rereg_1_hcall=%x " 1332 "num_kpages=%llx lkey=%x rkey=%x rereg_1_hcall=%x "
1333 "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size, 1333 "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
1334 acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey, 1334 acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
1335 rereg_1_hcall, rereg_3_hcall); 1335 rereg_1_hcall, rereg_3_hcall);
@@ -1371,8 +1371,8 @@ int ehca_unmap_one_fmr(struct ehca_shca *shca,
1371 * FMRs are not shared and no MW bound to FMRs 1371 * FMRs are not shared and no MW bound to FMRs
1372 */ 1372 */
1373 ehca_err(&shca->ib_device, "hipz_reregister_pmr failed " 1373 ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
1374 "(Rereg1), h_ret=%li e_fmr=%p hca_hndl=%lx " 1374 "(Rereg1), h_ret=%lli e_fmr=%p hca_hndl=%llx "
1375 "mr_hndl=%lx lkey=%x lkey_out=%x", 1375 "mr_hndl=%llx lkey=%x lkey_out=%x",
1376 h_ret, e_fmr, shca->ipz_hca_handle.handle, 1376 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1377 e_fmr->ipz_mr_handle.handle, 1377 e_fmr->ipz_mr_handle.handle,
1378 e_fmr->ib.ib_fmr.lkey, hipzout.lkey); 1378 e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
@@ -1383,7 +1383,7 @@ int ehca_unmap_one_fmr(struct ehca_shca *shca,
1383 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr); 1383 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
1384 if (h_ret != H_SUCCESS) { 1384 if (h_ret != H_SUCCESS) {
1385 ehca_err(&shca->ib_device, "hipz_free_mr failed, " 1385 ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1386 "h_ret=%li e_fmr=%p hca_hndl=%lx mr_hndl=%lx " 1386 "h_ret=%lli e_fmr=%p hca_hndl=%llx mr_hndl=%llx "
1387 "lkey=%x", 1387 "lkey=%x",
1388 h_ret, e_fmr, shca->ipz_hca_handle.handle, 1388 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1389 e_fmr->ipz_mr_handle.handle, 1389 e_fmr->ipz_mr_handle.handle,
@@ -1447,9 +1447,9 @@ int ehca_reg_smr(struct ehca_shca *shca,
1447 (u64)iova_start, hipz_acl, e_pd->fw_pd, 1447 (u64)iova_start, hipz_acl, e_pd->fw_pd,
1448 &hipzout); 1448 &hipzout);
1449 if (h_ret != H_SUCCESS) { 1449 if (h_ret != H_SUCCESS) {
1450 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%li " 1450 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
1451 "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x " 1451 "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
1452 "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x", 1452 "e_pd=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
1453 h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd, 1453 h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
1454 shca->ipz_hca_handle.handle, 1454 shca->ipz_hca_handle.handle,
1455 e_origmr->ipz_mr_handle.handle, 1455 e_origmr->ipz_mr_handle.handle,
@@ -1527,7 +1527,7 @@ int ehca_reg_internal_maxmr(
1527 &e_mr->ib.ib_mr.rkey); 1527 &e_mr->ib.ib_mr.rkey);
1528 if (ret) { 1528 if (ret) {
1529 ehca_err(&shca->ib_device, "reg of internal max MR failed, " 1529 ehca_err(&shca->ib_device, "reg of internal max MR failed, "
1530 "e_mr=%p iova_start=%p size_maxmr=%lx num_kpages=%x " 1530 "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x "
1531 "num_hwpages=%x", e_mr, iova_start, size_maxmr, 1531 "num_hwpages=%x", e_mr, iova_start, size_maxmr,
1532 num_kpages, num_hwpages); 1532 num_kpages, num_hwpages);
1533 goto ehca_reg_internal_maxmr_exit1; 1533 goto ehca_reg_internal_maxmr_exit1;
@@ -1573,8 +1573,8 @@ int ehca_reg_maxmr(struct ehca_shca *shca,
1573 (u64)iova_start, hipz_acl, e_pd->fw_pd, 1573 (u64)iova_start, hipz_acl, e_pd->fw_pd,
1574 &hipzout); 1574 &hipzout);
1575 if (h_ret != H_SUCCESS) { 1575 if (h_ret != H_SUCCESS) {
1576 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%li " 1576 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
1577 "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x", 1577 "e_origmr=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
1578 h_ret, e_origmr, shca->ipz_hca_handle.handle, 1578 h_ret, e_origmr, shca->ipz_hca_handle.handle,
1579 e_origmr->ipz_mr_handle.handle, 1579 e_origmr->ipz_mr_handle.handle,
1580 e_origmr->ib.ib_mr.lkey); 1580 e_origmr->ib.ib_mr.lkey);
@@ -1651,28 +1651,28 @@ int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array,
1651 /* check first buffer */ 1651 /* check first buffer */
1652 if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) { 1652 if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) {
1653 ehca_gen_err("iova_start/addr mismatch, iova_start=%p " 1653 ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
1654 "pbuf->addr=%lx pbuf->size=%lx", 1654 "pbuf->addr=%llx pbuf->size=%llx",
1655 iova_start, pbuf->addr, pbuf->size); 1655 iova_start, pbuf->addr, pbuf->size);
1656 return -EINVAL; 1656 return -EINVAL;
1657 } 1657 }
1658 if (((pbuf->addr + pbuf->size) % PAGE_SIZE) && 1658 if (((pbuf->addr + pbuf->size) % PAGE_SIZE) &&
1659 (num_phys_buf > 1)) { 1659 (num_phys_buf > 1)) {
1660 ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx " 1660 ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%llx "
1661 "pbuf->size=%lx", pbuf->addr, pbuf->size); 1661 "pbuf->size=%llx", pbuf->addr, pbuf->size);
1662 return -EINVAL; 1662 return -EINVAL;
1663 } 1663 }
1664 1664
1665 for (i = 0; i < num_phys_buf; i++) { 1665 for (i = 0; i < num_phys_buf; i++) {
1666 if ((i > 0) && (pbuf->addr % PAGE_SIZE)) { 1666 if ((i > 0) && (pbuf->addr % PAGE_SIZE)) {
1667 ehca_gen_err("bad address, i=%x pbuf->addr=%lx " 1667 ehca_gen_err("bad address, i=%x pbuf->addr=%llx "
1668 "pbuf->size=%lx", 1668 "pbuf->size=%llx",
1669 i, pbuf->addr, pbuf->size); 1669 i, pbuf->addr, pbuf->size);
1670 return -EINVAL; 1670 return -EINVAL;
1671 } 1671 }
1672 if (((i > 0) && /* not 1st */ 1672 if (((i > 0) && /* not 1st */
1673 (i < (num_phys_buf - 1)) && /* not last */ 1673 (i < (num_phys_buf - 1)) && /* not last */
1674 (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) { 1674 (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) {
1675 ehca_gen_err("bad size, i=%x pbuf->size=%lx", 1675 ehca_gen_err("bad size, i=%x pbuf->size=%llx",
1676 i, pbuf->size); 1676 i, pbuf->size);
1677 return -EINVAL; 1677 return -EINVAL;
1678 } 1678 }
@@ -1705,7 +1705,7 @@ int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
1705 page = page_list; 1705 page = page_list;
1706 for (i = 0; i < list_len; i++) { 1706 for (i = 0; i < list_len; i++) {
1707 if (*page % e_fmr->fmr_page_size) { 1707 if (*page % e_fmr->fmr_page_size) {
1708 ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p " 1708 ehca_gen_err("bad page, i=%x *page=%llx page=%p fmr=%p "
1709 "fmr_page_size=%x", i, *page, page, e_fmr, 1709 "fmr_page_size=%x", i, *page, page, e_fmr,
1710 e_fmr->fmr_page_size); 1710 e_fmr->fmr_page_size);
1711 return -EINVAL; 1711 return -EINVAL;
@@ -1743,9 +1743,9 @@ static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
1743 (pginfo->next_hwpage * 1743 (pginfo->next_hwpage *
1744 pginfo->hwpage_size)); 1744 pginfo->hwpage_size));
1745 if ( !(*kpage) ) { 1745 if ( !(*kpage) ) {
1746 ehca_gen_err("pgaddr=%lx " 1746 ehca_gen_err("pgaddr=%llx "
1747 "chunk->page_list[i]=%lx " 1747 "chunk->page_list[i]=%llx "
1748 "i=%x next_hwpage=%lx", 1748 "i=%x next_hwpage=%llx",
1749 pgaddr, (u64)sg_dma_address( 1749 pgaddr, (u64)sg_dma_address(
1750 &chunk->page_list[i]), 1750 &chunk->page_list[i]),
1751 i, pginfo->next_hwpage); 1751 i, pginfo->next_hwpage);
@@ -1795,11 +1795,11 @@ static int ehca_check_kpages_per_ate(struct scatterlist *page_list,
1795 for (t = start_idx; t <= end_idx; t++) { 1795 for (t = start_idx; t <= end_idx; t++) {
1796 u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT; 1796 u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT;
1797 if (ehca_debug_level >= 3) 1797 if (ehca_debug_level >= 3)
1798 ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr, 1798 ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr,
1799 *(u64 *)abs_to_virt(phys_to_abs(pgaddr))); 1799 *(u64 *)abs_to_virt(phys_to_abs(pgaddr)));
1800 if (pgaddr - PAGE_SIZE != *prev_pgaddr) { 1800 if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
1801 ehca_gen_err("uncontiguous page found pgaddr=%lx " 1801 ehca_gen_err("uncontiguous page found pgaddr=%llx "
1802 "prev_pgaddr=%lx page_list_i=%x", 1802 "prev_pgaddr=%llx page_list_i=%x",
1803 pgaddr, *prev_pgaddr, t); 1803 pgaddr, *prev_pgaddr, t);
1804 return -EINVAL; 1804 return -EINVAL;
1805 } 1805 }
@@ -1833,7 +1833,7 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
1833 << PAGE_SHIFT ); 1833 << PAGE_SHIFT );
1834 *kpage = phys_to_abs(pgaddr); 1834 *kpage = phys_to_abs(pgaddr);
1835 if ( !(*kpage) ) { 1835 if ( !(*kpage) ) {
1836 ehca_gen_err("pgaddr=%lx i=%x", 1836 ehca_gen_err("pgaddr=%llx i=%x",
1837 pgaddr, i); 1837 pgaddr, i);
1838 ret = -EFAULT; 1838 ret = -EFAULT;
1839 return ret; 1839 return ret;
@@ -1846,8 +1846,8 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
1846 if (pginfo->hwpage_cnt) { 1846 if (pginfo->hwpage_cnt) {
1847 ehca_gen_err( 1847 ehca_gen_err(
1848 "invalid alignment " 1848 "invalid alignment "
1849 "pgaddr=%lx i=%x " 1849 "pgaddr=%llx i=%x "
1850 "mr_pgsize=%lx", 1850 "mr_pgsize=%llx",
1851 pgaddr, i, 1851 pgaddr, i,
1852 pginfo->hwpage_size); 1852 pginfo->hwpage_size);
1853 ret = -EFAULT; 1853 ret = -EFAULT;
@@ -1866,8 +1866,8 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
1866 if (ehca_debug_level >= 3) { 1866 if (ehca_debug_level >= 3) {
1867 u64 val = *(u64 *)abs_to_virt( 1867 u64 val = *(u64 *)abs_to_virt(
1868 phys_to_abs(pgaddr)); 1868 phys_to_abs(pgaddr));
1869 ehca_gen_dbg("kpage=%lx chunk_page=%lx " 1869 ehca_gen_dbg("kpage=%llx chunk_page=%llx "
1870 "value=%016lx", 1870 "value=%016llx",
1871 *kpage, pgaddr, val); 1871 *kpage, pgaddr, val);
1872 } 1872 }
1873 prev_pgaddr = pgaddr; 1873 prev_pgaddr = pgaddr;
@@ -1944,9 +1944,9 @@ static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
1944 if ((pginfo->kpage_cnt >= pginfo->num_kpages) || 1944 if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
1945 (pginfo->hwpage_cnt >= pginfo->num_hwpages)) { 1945 (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
1946 ehca_gen_err("kpage_cnt >= num_kpages, " 1946 ehca_gen_err("kpage_cnt >= num_kpages, "
1947 "kpage_cnt=%lx num_kpages=%lx " 1947 "kpage_cnt=%llx num_kpages=%llx "
1948 "hwpage_cnt=%lx " 1948 "hwpage_cnt=%llx "
1949 "num_hwpages=%lx i=%x", 1949 "num_hwpages=%llx i=%x",
1950 pginfo->kpage_cnt, 1950 pginfo->kpage_cnt,
1951 pginfo->num_kpages, 1951 pginfo->num_kpages,
1952 pginfo->hwpage_cnt, 1952 pginfo->hwpage_cnt,
@@ -1957,8 +1957,8 @@ static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
1957 (pbuf->addr & ~(pginfo->hwpage_size - 1)) + 1957 (pbuf->addr & ~(pginfo->hwpage_size - 1)) +
1958 (pginfo->next_hwpage * pginfo->hwpage_size)); 1958 (pginfo->next_hwpage * pginfo->hwpage_size));
1959 if ( !(*kpage) && pbuf->addr ) { 1959 if ( !(*kpage) && pbuf->addr ) {
1960 ehca_gen_err("pbuf->addr=%lx pbuf->size=%lx " 1960 ehca_gen_err("pbuf->addr=%llx pbuf->size=%llx "
1961 "next_hwpage=%lx", pbuf->addr, 1961 "next_hwpage=%llx", pbuf->addr,
1962 pbuf->size, pginfo->next_hwpage); 1962 pbuf->size, pginfo->next_hwpage);
1963 return -EFAULT; 1963 return -EFAULT;
1964 } 1964 }
@@ -1996,8 +1996,8 @@ static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
1996 *kpage = phys_to_abs((*fmrlist & ~(pginfo->hwpage_size - 1)) + 1996 *kpage = phys_to_abs((*fmrlist & ~(pginfo->hwpage_size - 1)) +
1997 pginfo->next_hwpage * pginfo->hwpage_size); 1997 pginfo->next_hwpage * pginfo->hwpage_size);
1998 if ( !(*kpage) ) { 1998 if ( !(*kpage) ) {
1999 ehca_gen_err("*fmrlist=%lx fmrlist=%p " 1999 ehca_gen_err("*fmrlist=%llx fmrlist=%p "
2000 "next_listelem=%lx next_hwpage=%lx", 2000 "next_listelem=%llx next_hwpage=%llx",
2001 *fmrlist, fmrlist, 2001 *fmrlist, fmrlist,
2002 pginfo->u.fmr.next_listelem, 2002 pginfo->u.fmr.next_listelem,
2003 pginfo->next_hwpage); 2003 pginfo->next_hwpage);
@@ -2025,7 +2025,7 @@ static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
2025 ~(pginfo->hwpage_size - 1)); 2025 ~(pginfo->hwpage_size - 1));
2026 if (prev + pginfo->u.fmr.fmr_pgsize != p) { 2026 if (prev + pginfo->u.fmr.fmr_pgsize != p) {
2027 ehca_gen_err("uncontiguous fmr pages " 2027 ehca_gen_err("uncontiguous fmr pages "
2028 "found prev=%lx p=%lx " 2028 "found prev=%llx p=%llx "
2029 "idx=%x", prev, p, i + j); 2029 "idx=%x", prev, p, i + j);
2030 return -EINVAL; 2030 return -EINVAL;
2031 } 2031 }
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index f161cf173dbe..00c108159714 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -331,7 +331,7 @@ static inline int init_qp_queue(struct ehca_shca *shca,
331 if (cnt == (nr_q_pages - 1)) { /* last page! */ 331 if (cnt == (nr_q_pages - 1)) { /* last page! */
332 if (h_ret != expected_hret) { 332 if (h_ret != expected_hret) {
333 ehca_err(ib_dev, "hipz_qp_register_rpage() " 333 ehca_err(ib_dev, "hipz_qp_register_rpage() "
334 "h_ret=%li", h_ret); 334 "h_ret=%lli", h_ret);
335 ret = ehca2ib_return_code(h_ret); 335 ret = ehca2ib_return_code(h_ret);
336 goto init_qp_queue1; 336 goto init_qp_queue1;
337 } 337 }
@@ -345,7 +345,7 @@ static inline int init_qp_queue(struct ehca_shca *shca,
345 } else { 345 } else {
346 if (h_ret != H_PAGE_REGISTERED) { 346 if (h_ret != H_PAGE_REGISTERED) {
347 ehca_err(ib_dev, "hipz_qp_register_rpage() " 347 ehca_err(ib_dev, "hipz_qp_register_rpage() "
348 "h_ret=%li", h_ret); 348 "h_ret=%lli", h_ret);
349 ret = ehca2ib_return_code(h_ret); 349 ret = ehca2ib_return_code(h_ret);
350 goto init_qp_queue1; 350 goto init_qp_queue1;
351 } 351 }
@@ -709,7 +709,7 @@ static struct ehca_qp *internal_create_qp(
709 709
710 h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms); 710 h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms);
711 if (h_ret != H_SUCCESS) { 711 if (h_ret != H_SUCCESS) {
712 ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%li", 712 ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli",
713 h_ret); 713 h_ret);
714 ret = ehca2ib_return_code(h_ret); 714 ret = ehca2ib_return_code(h_ret);
715 goto create_qp_exit1; 715 goto create_qp_exit1;
@@ -1010,7 +1010,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd,
1010 mqpcb, my_qp->galpas.kernel); 1010 mqpcb, my_qp->galpas.kernel);
1011 if (hret != H_SUCCESS) { 1011 if (hret != H_SUCCESS) {
1012 ehca_err(pd->device, "Could not modify SRQ to INIT " 1012 ehca_err(pd->device, "Could not modify SRQ to INIT "
1013 "ehca_qp=%p qp_num=%x h_ret=%li", 1013 "ehca_qp=%p qp_num=%x h_ret=%lli",
1014 my_qp, my_qp->real_qp_num, hret); 1014 my_qp, my_qp->real_qp_num, hret);
1015 goto create_srq2; 1015 goto create_srq2;
1016 } 1016 }
@@ -1024,7 +1024,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd,
1024 mqpcb, my_qp->galpas.kernel); 1024 mqpcb, my_qp->galpas.kernel);
1025 if (hret != H_SUCCESS) { 1025 if (hret != H_SUCCESS) {
1026 ehca_err(pd->device, "Could not enable SRQ " 1026 ehca_err(pd->device, "Could not enable SRQ "
1027 "ehca_qp=%p qp_num=%x h_ret=%li", 1027 "ehca_qp=%p qp_num=%x h_ret=%lli",
1028 my_qp, my_qp->real_qp_num, hret); 1028 my_qp, my_qp->real_qp_num, hret);
1029 goto create_srq2; 1029 goto create_srq2;
1030 } 1030 }
@@ -1038,7 +1038,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd,
1038 mqpcb, my_qp->galpas.kernel); 1038 mqpcb, my_qp->galpas.kernel);
1039 if (hret != H_SUCCESS) { 1039 if (hret != H_SUCCESS) {
1040 ehca_err(pd->device, "Could not modify SRQ to RTR " 1040 ehca_err(pd->device, "Could not modify SRQ to RTR "
1041 "ehca_qp=%p qp_num=%x h_ret=%li", 1041 "ehca_qp=%p qp_num=%x h_ret=%lli",
1042 my_qp, my_qp->real_qp_num, hret); 1042 my_qp, my_qp->real_qp_num, hret);
1043 goto create_srq2; 1043 goto create_srq2;
1044 } 1044 }
@@ -1078,7 +1078,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
1078 &bad_send_wqe_p, NULL, 2); 1078 &bad_send_wqe_p, NULL, 2);
1079 if (h_ret != H_SUCCESS) { 1079 if (h_ret != H_SUCCESS) {
1080 ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed" 1080 ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed"
1081 " ehca_qp=%p qp_num=%x h_ret=%li", 1081 " ehca_qp=%p qp_num=%x h_ret=%lli",
1082 my_qp, qp_num, h_ret); 1082 my_qp, qp_num, h_ret);
1083 return ehca2ib_return_code(h_ret); 1083 return ehca2ib_return_code(h_ret);
1084 } 1084 }
@@ -1134,7 +1134,7 @@ static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
1134 1134
1135 if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) { 1135 if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) {
1136 ehca_gen_err("Invalid offset for calculating left cqes " 1136 ehca_gen_err("Invalid offset for calculating left cqes "
1137 "wqe_p=%#lx wqe_v=%p\n", wqe_p, wqe_v); 1137 "wqe_p=%#llx wqe_v=%p\n", wqe_p, wqe_v);
1138 return -EFAULT; 1138 return -EFAULT;
1139 } 1139 }
1140 1140
@@ -1168,7 +1168,7 @@ static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca)
1168 &send_wqe_p, &recv_wqe_p, 4); 1168 &send_wqe_p, &recv_wqe_p, 4);
1169 if (h_ret != H_SUCCESS) { 1169 if (h_ret != H_SUCCESS) {
1170 ehca_err(&shca->ib_device, "disable_and_get_wqe() " 1170 ehca_err(&shca->ib_device, "disable_and_get_wqe() "
1171 "failed ehca_qp=%p qp_num=%x h_ret=%li", 1171 "failed ehca_qp=%p qp_num=%x h_ret=%lli",
1172 my_qp, qp_num, h_ret); 1172 my_qp, qp_num, h_ret);
1173 return ehca2ib_return_code(h_ret); 1173 return ehca2ib_return_code(h_ret);
1174 } 1174 }
@@ -1261,7 +1261,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1261 mqpcb, my_qp->galpas.kernel); 1261 mqpcb, my_qp->galpas.kernel);
1262 if (h_ret != H_SUCCESS) { 1262 if (h_ret != H_SUCCESS) {
1263 ehca_err(ibqp->device, "hipz_h_query_qp() failed " 1263 ehca_err(ibqp->device, "hipz_h_query_qp() failed "
1264 "ehca_qp=%p qp_num=%x h_ret=%li", 1264 "ehca_qp=%p qp_num=%x h_ret=%lli",
1265 my_qp, ibqp->qp_num, h_ret); 1265 my_qp, ibqp->qp_num, h_ret);
1266 ret = ehca2ib_return_code(h_ret); 1266 ret = ehca2ib_return_code(h_ret);
1267 goto modify_qp_exit1; 1267 goto modify_qp_exit1;
@@ -1690,7 +1690,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1690 1690
1691 if (h_ret != H_SUCCESS) { 1691 if (h_ret != H_SUCCESS) {
1692 ret = ehca2ib_return_code(h_ret); 1692 ret = ehca2ib_return_code(h_ret);
1693 ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%li " 1693 ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%lli "
1694 "ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num); 1694 "ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num);
1695 goto modify_qp_exit2; 1695 goto modify_qp_exit2;
1696 } 1696 }
@@ -1723,7 +1723,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1723 ret = ehca2ib_return_code(h_ret); 1723 ret = ehca2ib_return_code(h_ret);
1724 ehca_err(ibqp->device, "ENABLE in context of " 1724 ehca_err(ibqp->device, "ENABLE in context of "
1725 "RESET_2_INIT failed! Maybe you didn't get " 1725 "RESET_2_INIT failed! Maybe you didn't get "
1726 "a LID h_ret=%li ehca_qp=%p qp_num=%x", 1726 "a LID h_ret=%lli ehca_qp=%p qp_num=%x",
1727 h_ret, my_qp, ibqp->qp_num); 1727 h_ret, my_qp, ibqp->qp_num);
1728 goto modify_qp_exit2; 1728 goto modify_qp_exit2;
1729 } 1729 }
@@ -1909,7 +1909,7 @@ int ehca_query_qp(struct ib_qp *qp,
1909 if (h_ret != H_SUCCESS) { 1909 if (h_ret != H_SUCCESS) {
1910 ret = ehca2ib_return_code(h_ret); 1910 ret = ehca2ib_return_code(h_ret);
1911 ehca_err(qp->device, "hipz_h_query_qp() failed " 1911 ehca_err(qp->device, "hipz_h_query_qp() failed "
1912 "ehca_qp=%p qp_num=%x h_ret=%li", 1912 "ehca_qp=%p qp_num=%x h_ret=%lli",
1913 my_qp, qp->qp_num, h_ret); 1913 my_qp, qp->qp_num, h_ret);
1914 goto query_qp_exit1; 1914 goto query_qp_exit1;
1915 } 1915 }
@@ -2074,7 +2074,7 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
2074 2074
2075 if (h_ret != H_SUCCESS) { 2075 if (h_ret != H_SUCCESS) {
2076 ret = ehca2ib_return_code(h_ret); 2076 ret = ehca2ib_return_code(h_ret);
2077 ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%li " 2077 ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%lli "
2078 "ehca_qp=%p qp_num=%x", 2078 "ehca_qp=%p qp_num=%x",
2079 h_ret, my_qp, my_qp->real_qp_num); 2079 h_ret, my_qp, my_qp->real_qp_num);
2080 } 2080 }
@@ -2108,7 +2108,7 @@ int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
2108 if (h_ret != H_SUCCESS) { 2108 if (h_ret != H_SUCCESS) {
2109 ret = ehca2ib_return_code(h_ret); 2109 ret = ehca2ib_return_code(h_ret);
2110 ehca_err(srq->device, "hipz_h_query_qp() failed " 2110 ehca_err(srq->device, "hipz_h_query_qp() failed "
2111 "ehca_qp=%p qp_num=%x h_ret=%li", 2111 "ehca_qp=%p qp_num=%x h_ret=%lli",
2112 my_qp, my_qp->real_qp_num, h_ret); 2112 my_qp, my_qp->real_qp_num, h_ret);
2113 goto query_srq_exit1; 2113 goto query_srq_exit1;
2114 } 2114 }
@@ -2179,7 +2179,7 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
2179 2179
2180 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); 2180 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
2181 if (h_ret != H_SUCCESS) { 2181 if (h_ret != H_SUCCESS) {
2182 ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%li " 2182 ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%lli "
2183 "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num); 2183 "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num);
2184 return ehca2ib_return_code(h_ret); 2184 return ehca2ib_return_code(h_ret);
2185 } 2185 }
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index c7112686782f..5a3d96f84c79 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -822,7 +822,7 @@ static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq,
822 offset = qmap->next_wqe_idx * ipz_queue->qe_size; 822 offset = qmap->next_wqe_idx * ipz_queue->qe_size;
823 wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset); 823 wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset);
824 if (!wqe) { 824 if (!wqe) {
825 ehca_err(cq->device, "Invalid wqe offset=%#lx on " 825 ehca_err(cq->device, "Invalid wqe offset=%#llx on "
826 "qp_num=%#x", offset, my_qp->real_qp_num); 826 "qp_num=%#x", offset, my_qp->real_qp_num);
827 return nr; 827 return nr;
828 } 828 }
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index 706d97ad5555..44447aaa5501 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -85,7 +85,7 @@ u64 ehca_define_sqp(struct ehca_shca *shca,
85 85
86 if (ret != H_SUCCESS) { 86 if (ret != H_SUCCESS) {
87 ehca_err(&shca->ib_device, 87 ehca_err(&shca->ib_device,
88 "Can't define AQP1 for port %x. h_ret=%li", 88 "Can't define AQP1 for port %x. h_ret=%lli",
89 port, ret); 89 port, ret);
90 return ret; 90 return ret;
91 } 91 }
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/infiniband/hw/ehca/ehca_tools.h
index 21f7d06f14ad..f09914cccf53 100644
--- a/drivers/infiniband/hw/ehca/ehca_tools.h
+++ b/drivers/infiniband/hw/ehca/ehca_tools.h
@@ -116,7 +116,7 @@ extern int ehca_debug_level;
116 unsigned char *deb = (unsigned char *)(adr); \ 116 unsigned char *deb = (unsigned char *)(adr); \
117 for (x = 0; x < l; x += 16) { \ 117 for (x = 0; x < l; x += 16) { \
118 printk(KERN_INFO "EHCA_DMP:%s " format \ 118 printk(KERN_INFO "EHCA_DMP:%s " format \
119 " adr=%p ofs=%04x %016lx %016lx\n", \ 119 " adr=%p ofs=%04x %016llx %016llx\n", \
120 __func__, ##args, deb, x, \ 120 __func__, ##args, deb, x, \
121 *((u64 *)&deb[0]), *((u64 *)&deb[8])); \ 121 *((u64 *)&deb[0]), *((u64 *)&deb[8])); \
122 deb += 16; \ 122 deb += 16; \
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
index e43ed8f8a0c8..3cb688d29131 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -114,7 +114,7 @@ static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas,
114 114
115 physical = galpas->user.fw_handle; 115 physical = galpas->user.fw_handle;
116 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 116 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
117 ehca_gen_dbg("vsize=%lx physical=%lx", vsize, physical); 117 ehca_gen_dbg("vsize=%llx physical=%llx", vsize, physical);
118 /* VM_IO | VM_RESERVED are set by remap_pfn_range() */ 118 /* VM_IO | VM_RESERVED are set by remap_pfn_range() */
119 ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT, 119 ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT,
120 vma->vm_page_prot); 120 vma->vm_page_prot);
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c
index 415d3a465de6..d0ab0c0d5e91 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.c
+++ b/drivers/infiniband/hw/ehca/hcp_if.c
@@ -226,7 +226,7 @@ u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
226 u32 *eq_ist) 226 u32 *eq_ist)
227{ 227{
228 u64 ret; 228 u64 ret;
229 u64 outs[PLPAR_HCALL9_BUFSIZE]; 229 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
230 u64 allocate_controls; 230 u64 allocate_controls;
231 231
232 /* resource type */ 232 /* resource type */
@@ -249,7 +249,7 @@ u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
249 *eq_ist = (u32)outs[5]; 249 *eq_ist = (u32)outs[5];
250 250
251 if (ret == H_NOT_ENOUGH_RESOURCES) 251 if (ret == H_NOT_ENOUGH_RESOURCES)
252 ehca_gen_err("Not enough resource - ret=%li ", ret); 252 ehca_gen_err("Not enough resource - ret=%lli ", ret);
253 253
254 return ret; 254 return ret;
255} 255}
@@ -270,7 +270,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
270 struct ehca_alloc_cq_parms *param) 270 struct ehca_alloc_cq_parms *param)
271{ 271{
272 u64 ret; 272 u64 ret;
273 u64 outs[PLPAR_HCALL9_BUFSIZE]; 273 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
274 274
275 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, 275 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
276 adapter_handle.handle, /* r4 */ 276 adapter_handle.handle, /* r4 */
@@ -287,7 +287,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
287 hcp_galpas_ctor(&cq->galpas, outs[5], outs[6]); 287 hcp_galpas_ctor(&cq->galpas, outs[5], outs[6]);
288 288
289 if (ret == H_NOT_ENOUGH_RESOURCES) 289 if (ret == H_NOT_ENOUGH_RESOURCES)
290 ehca_gen_err("Not enough resources. ret=%li", ret); 290 ehca_gen_err("Not enough resources. ret=%lli", ret);
291 291
292 return ret; 292 return ret;
293} 293}
@@ -297,7 +297,7 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
297{ 297{
298 u64 ret; 298 u64 ret;
299 u64 allocate_controls, max_r10_reg, r11, r12; 299 u64 allocate_controls, max_r10_reg, r11, r12;
300 u64 outs[PLPAR_HCALL9_BUFSIZE]; 300 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
301 301
302 allocate_controls = 302 allocate_controls =
303 EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type) 303 EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type)
@@ -362,7 +362,7 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
362 hcp_galpas_ctor(&parms->galpas, outs[6], outs[6]); 362 hcp_galpas_ctor(&parms->galpas, outs[6], outs[6]);
363 363
364 if (ret == H_NOT_ENOUGH_RESOURCES) 364 if (ret == H_NOT_ENOUGH_RESOURCES)
365 ehca_gen_err("Not enough resources. ret=%li", ret); 365 ehca_gen_err("Not enough resources. ret=%lli", ret);
366 366
367 return ret; 367 return ret;
368} 368}
@@ -454,7 +454,7 @@ u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
454 const u64 count) 454 const u64 count)
455{ 455{
456 if (count != 1) { 456 if (count != 1) {
457 ehca_gen_err("Ppage counter=%lx", count); 457 ehca_gen_err("Ppage counter=%llx", count);
458 return H_PARAMETER; 458 return H_PARAMETER;
459 } 459 }
460 return hipz_h_register_rpage(adapter_handle, 460 return hipz_h_register_rpage(adapter_handle,
@@ -489,7 +489,7 @@ u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
489 const struct h_galpa gal) 489 const struct h_galpa gal)
490{ 490{
491 if (count != 1) { 491 if (count != 1) {
492 ehca_gen_err("Page counter=%lx", count); 492 ehca_gen_err("Page counter=%llx", count);
493 return H_PARAMETER; 493 return H_PARAMETER;
494 } 494 }
495 495
@@ -508,7 +508,7 @@ u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
508 const struct h_galpa galpa) 508 const struct h_galpa galpa)
509{ 509{
510 if (count > 1) { 510 if (count > 1) {
511 ehca_gen_err("Page counter=%lx", count); 511 ehca_gen_err("Page counter=%llx", count);
512 return H_PARAMETER; 512 return H_PARAMETER;
513 } 513 }
514 514
@@ -525,7 +525,7 @@ u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
525 int dis_and_get_function_code) 525 int dis_and_get_function_code)
526{ 526{
527 u64 ret; 527 u64 ret;
528 u64 outs[PLPAR_HCALL9_BUFSIZE]; 528 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
529 529
530 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs, 530 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
531 adapter_handle.handle, /* r4 */ 531 adapter_handle.handle, /* r4 */
@@ -548,7 +548,7 @@ u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
548 struct h_galpa gal) 548 struct h_galpa gal)
549{ 549{
550 u64 ret; 550 u64 ret;
551 u64 outs[PLPAR_HCALL9_BUFSIZE]; 551 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
552 ret = ehca_plpar_hcall9(H_MODIFY_QP, outs, 552 ret = ehca_plpar_hcall9(H_MODIFY_QP, outs,
553 adapter_handle.handle, /* r4 */ 553 adapter_handle.handle, /* r4 */
554 qp_handle.handle, /* r5 */ 554 qp_handle.handle, /* r5 */
@@ -557,7 +557,7 @@ u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
557 0, 0, 0, 0, 0); 557 0, 0, 0, 0, 0);
558 558
559 if (ret == H_NOT_ENOUGH_RESOURCES) 559 if (ret == H_NOT_ENOUGH_RESOURCES)
560 ehca_gen_err("Insufficient resources ret=%li", ret); 560 ehca_gen_err("Insufficient resources ret=%lli", ret);
561 561
562 return ret; 562 return ret;
563} 563}
@@ -579,7 +579,7 @@ u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
579 struct ehca_qp *qp) 579 struct ehca_qp *qp)
580{ 580{
581 u64 ret; 581 u64 ret;
582 u64 outs[PLPAR_HCALL9_BUFSIZE]; 582 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
583 583
584 ret = hcp_galpas_dtor(&qp->galpas); 584 ret = hcp_galpas_dtor(&qp->galpas);
585 if (ret) { 585 if (ret) {
@@ -593,7 +593,7 @@ u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
593 qp->ipz_qp_handle.handle, /* r6 */ 593 qp->ipz_qp_handle.handle, /* r6 */
594 0, 0, 0, 0, 0, 0); 594 0, 0, 0, 0, 0, 0);
595 if (ret == H_HARDWARE) 595 if (ret == H_HARDWARE)
596 ehca_gen_err("HCA not operational. ret=%li", ret); 596 ehca_gen_err("HCA not operational. ret=%lli", ret);
597 597
598 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE, 598 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
599 adapter_handle.handle, /* r4 */ 599 adapter_handle.handle, /* r4 */
@@ -601,7 +601,7 @@ u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
601 0, 0, 0, 0, 0); 601 0, 0, 0, 0, 0);
602 602
603 if (ret == H_RESOURCE) 603 if (ret == H_RESOURCE)
604 ehca_gen_err("Resource still in use. ret=%li", ret); 604 ehca_gen_err("Resource still in use. ret=%lli", ret);
605 605
606 return ret; 606 return ret;
607} 607}
@@ -625,7 +625,7 @@ u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
625 u32 * bma_qp_nr) 625 u32 * bma_qp_nr)
626{ 626{
627 u64 ret; 627 u64 ret;
628 u64 outs[PLPAR_HCALL9_BUFSIZE]; 628 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
629 629
630 ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs, 630 ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs,
631 adapter_handle.handle, /* r4 */ 631 adapter_handle.handle, /* r4 */
@@ -636,7 +636,7 @@ u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
636 *bma_qp_nr = (u32)outs[1]; 636 *bma_qp_nr = (u32)outs[1];
637 637
638 if (ret == H_ALIAS_EXIST) 638 if (ret == H_ALIAS_EXIST)
639 ehca_gen_err("AQP1 already exists. ret=%li", ret); 639 ehca_gen_err("AQP1 already exists. ret=%lli", ret);
640 640
641 return ret; 641 return ret;
642} 642}
@@ -658,7 +658,7 @@ u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
658 0, 0); 658 0, 0);
659 659
660 if (ret == H_NOT_ENOUGH_RESOURCES) 660 if (ret == H_NOT_ENOUGH_RESOURCES)
661 ehca_gen_err("Not enough resources. ret=%li", ret); 661 ehca_gen_err("Not enough resources. ret=%lli", ret);
662 662
663 return ret; 663 return ret;
664} 664}
@@ -697,7 +697,7 @@ u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
697 0, 0, 0, 0); 697 0, 0, 0, 0);
698 698
699 if (ret == H_RESOURCE) 699 if (ret == H_RESOURCE)
700 ehca_gen_err("H_FREE_RESOURCE failed ret=%li ", ret); 700 ehca_gen_err("H_FREE_RESOURCE failed ret=%lli ", ret);
701 701
702 return ret; 702 return ret;
703} 703}
@@ -719,7 +719,7 @@ u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
719 0, 0, 0, 0, 0); 719 0, 0, 0, 0, 0);
720 720
721 if (ret == H_RESOURCE) 721 if (ret == H_RESOURCE)
722 ehca_gen_err("Resource in use. ret=%li ", ret); 722 ehca_gen_err("Resource in use. ret=%lli ", ret);
723 723
724 return ret; 724 return ret;
725} 725}
@@ -733,7 +733,7 @@ u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
733 struct ehca_mr_hipzout_parms *outparms) 733 struct ehca_mr_hipzout_parms *outparms)
734{ 734{
735 u64 ret; 735 u64 ret;
736 u64 outs[PLPAR_HCALL9_BUFSIZE]; 736 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
737 737
738 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, 738 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
739 adapter_handle.handle, /* r4 */ 739 adapter_handle.handle, /* r4 */
@@ -774,9 +774,9 @@ u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
774 774
775 if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) { 775 if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
776 ehca_gen_err("logical_address_of_page not on a 4k boundary " 776 ehca_gen_err("logical_address_of_page not on a 4k boundary "
777 "adapter_handle=%lx mr=%p mr_handle=%lx " 777 "adapter_handle=%llx mr=%p mr_handle=%llx "
778 "pagesize=%x queue_type=%x " 778 "pagesize=%x queue_type=%x "
779 "logical_address_of_page=%lx count=%lx", 779 "logical_address_of_page=%llx count=%llx",
780 adapter_handle.handle, mr, 780 adapter_handle.handle, mr,
781 mr->ipz_mr_handle.handle, pagesize, queue_type, 781 mr->ipz_mr_handle.handle, pagesize, queue_type,
782 logical_address_of_page, count); 782 logical_address_of_page, count);
@@ -794,7 +794,7 @@ u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
794 struct ehca_mr_hipzout_parms *outparms) 794 struct ehca_mr_hipzout_parms *outparms)
795{ 795{
796 u64 ret; 796 u64 ret;
797 u64 outs[PLPAR_HCALL9_BUFSIZE]; 797 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
798 798
799 ret = ehca_plpar_hcall9(H_QUERY_MR, outs, 799 ret = ehca_plpar_hcall9(H_QUERY_MR, outs,
800 adapter_handle.handle, /* r4 */ 800 adapter_handle.handle, /* r4 */
@@ -828,7 +828,7 @@ u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
828 struct ehca_mr_hipzout_parms *outparms) 828 struct ehca_mr_hipzout_parms *outparms)
829{ 829{
830 u64 ret; 830 u64 ret;
831 u64 outs[PLPAR_HCALL9_BUFSIZE]; 831 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
832 832
833 ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs, 833 ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs,
834 adapter_handle.handle, /* r4 */ 834 adapter_handle.handle, /* r4 */
@@ -855,7 +855,7 @@ u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
855 struct ehca_mr_hipzout_parms *outparms) 855 struct ehca_mr_hipzout_parms *outparms)
856{ 856{
857 u64 ret; 857 u64 ret;
858 u64 outs[PLPAR_HCALL9_BUFSIZE]; 858 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
859 859
860 ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs, 860 ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs,
861 adapter_handle.handle, /* r4 */ 861 adapter_handle.handle, /* r4 */
@@ -877,7 +877,7 @@ u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
877 struct ehca_mw_hipzout_parms *outparms) 877 struct ehca_mw_hipzout_parms *outparms)
878{ 878{
879 u64 ret; 879 u64 ret;
880 u64 outs[PLPAR_HCALL9_BUFSIZE]; 880 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
881 881
882 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, 882 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
883 adapter_handle.handle, /* r4 */ 883 adapter_handle.handle, /* r4 */
@@ -895,7 +895,7 @@ u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
895 struct ehca_mw_hipzout_parms *outparms) 895 struct ehca_mw_hipzout_parms *outparms)
896{ 896{
897 u64 ret; 897 u64 ret;
898 u64 outs[PLPAR_HCALL9_BUFSIZE]; 898 unsigned long outs[PLPAR_HCALL9_BUFSIZE];
899 899
900 ret = ehca_plpar_hcall9(H_QUERY_MW, outs, 900 ret = ehca_plpar_hcall9(H_QUERY_MW, outs,
901 adapter_handle.handle, /* r4 */ 901 adapter_handle.handle, /* r4 */
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 39167a797f99..a91cb4c3fa5c 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1462,7 +1462,8 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
1462} 1462}
1463 1463
1464static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, 1464static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
1465 struct mlx4_ib_qp *qp, unsigned *lso_seg_len) 1465 struct mlx4_ib_qp *qp, unsigned *lso_seg_len,
1466 __be32 *lso_hdr_sz)
1466{ 1467{
1467 unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16); 1468 unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16);
1468 1469
@@ -1479,12 +1480,8 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
1479 1480
1480 memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen); 1481 memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
1481 1482
1482 /* make sure LSO header is written before overwriting stamping */ 1483 *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
1483 wmb(); 1484 wr->wr.ud.hlen);
1484
1485 wqe->mss_hdr_size = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
1486 wr->wr.ud.hlen);
1487
1488 *lso_seg_len = halign; 1485 *lso_seg_len = halign;
1489 return 0; 1486 return 0;
1490} 1487}
@@ -1518,6 +1515,9 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1518 int uninitialized_var(stamp); 1515 int uninitialized_var(stamp);
1519 int uninitialized_var(size); 1516 int uninitialized_var(size);
1520 unsigned uninitialized_var(seglen); 1517 unsigned uninitialized_var(seglen);
1518 __be32 dummy;
1519 __be32 *lso_wqe;
1520 __be32 uninitialized_var(lso_hdr_sz);
1521 int i; 1521 int i;
1522 1522
1523 spin_lock_irqsave(&qp->sq.lock, flags); 1523 spin_lock_irqsave(&qp->sq.lock, flags);
@@ -1525,6 +1525,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1525 ind = qp->sq_next_wqe; 1525 ind = qp->sq_next_wqe;
1526 1526
1527 for (nreq = 0; wr; ++nreq, wr = wr->next) { 1527 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1528 lso_wqe = &dummy;
1529
1528 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { 1530 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1529 err = -ENOMEM; 1531 err = -ENOMEM;
1530 *bad_wr = wr; 1532 *bad_wr = wr;
@@ -1606,11 +1608,12 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1606 size += sizeof (struct mlx4_wqe_datagram_seg) / 16; 1608 size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
1607 1609
1608 if (wr->opcode == IB_WR_LSO) { 1610 if (wr->opcode == IB_WR_LSO) {
1609 err = build_lso_seg(wqe, wr, qp, &seglen); 1611 err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz);
1610 if (unlikely(err)) { 1612 if (unlikely(err)) {
1611 *bad_wr = wr; 1613 *bad_wr = wr;
1612 goto out; 1614 goto out;
1613 } 1615 }
1616 lso_wqe = (__be32 *) wqe;
1614 wqe += seglen; 1617 wqe += seglen;
1615 size += seglen / 16; 1618 size += seglen / 16;
1616 } 1619 }
@@ -1652,6 +1655,14 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1652 for (i = wr->num_sge - 1; i >= 0; --i, --dseg) 1655 for (i = wr->num_sge - 1; i >= 0; --i, --dseg)
1653 set_data_seg(dseg, wr->sg_list + i); 1656 set_data_seg(dseg, wr->sg_list + i);
1654 1657
1658 /*
1659 * Possibly overwrite stamping in cacheline with LSO
1660 * segment only after making sure all data segments
1661 * are written.
1662 */
1663 wmb();
1664 *lso_wqe = lso_hdr_sz;
1665
1655 ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? 1666 ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ?
1656 MLX4_WQE_CTRL_FENCE : 0) | size; 1667 MLX4_WQE_CTRL_FENCE : 0) | size;
1657 1668
@@ -1686,7 +1697,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1686 stamp_send_wqe(qp, stamp, size * 16); 1697 stamp_send_wqe(qp, stamp, size * 16);
1687 ind = pad_wraparound(qp, ind); 1698 ind = pad_wraparound(qp, ind);
1688 } 1699 }
1689
1690 } 1700 }
1691 1701
1692out: 1702out:
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index dce0443f9d69..0bd2a4ff0842 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -106,23 +106,17 @@ int ipoib_open(struct net_device *dev)
106 106
107 ipoib_dbg(priv, "bringing up interface\n"); 107 ipoib_dbg(priv, "bringing up interface\n");
108 108
109 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 109 if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
110 napi_enable(&priv->napi);
110 111
111 if (ipoib_pkey_dev_delay_open(dev)) 112 if (ipoib_pkey_dev_delay_open(dev))
112 return 0; 113 return 0;
113 114
114 napi_enable(&priv->napi); 115 if (ipoib_ib_dev_open(dev))
116 goto err_disable;
115 117
116 if (ipoib_ib_dev_open(dev)) { 118 if (ipoib_ib_dev_up(dev))
117 napi_disable(&priv->napi); 119 goto err_stop;
118 return -EINVAL;
119 }
120
121 if (ipoib_ib_dev_up(dev)) {
122 ipoib_ib_dev_stop(dev, 1);
123 napi_disable(&priv->napi);
124 return -EINVAL;
125 }
126 120
127 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 121 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
128 struct ipoib_dev_priv *cpriv; 122 struct ipoib_dev_priv *cpriv;
@@ -144,6 +138,15 @@ int ipoib_open(struct net_device *dev)
144 netif_start_queue(dev); 138 netif_start_queue(dev);
145 139
146 return 0; 140 return 0;
141
142err_stop:
143 ipoib_ib_dev_stop(dev, 1);
144
145err_disable:
146 napi_disable(&priv->napi);
147 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
148
149 return -EINVAL;
147} 150}
148 151
149static int ipoib_stop(struct net_device *dev) 152static int ipoib_stop(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 59d02e0b8df1..425e31112ed7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -409,7 +409,7 @@ static int ipoib_mcast_join_complete(int status,
409 } 409 }
410 410
411 if (mcast->logcount++ < 20) { 411 if (mcast->logcount++ < 20) {
412 if (status == -ETIMEDOUT) { 412 if (status == -ETIMEDOUT || status == -EAGAIN) {
413 ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n", 413 ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
414 mcast->mcmember.mgid.raw, status); 414 mcast->mcmember.mgid.raw, status);
415 } else { 415 } else {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 2cf1a4088718..5a76a5510350 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -61,6 +61,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
61 61
62 ppriv = netdev_priv(pdev); 62 ppriv = netdev_priv(pdev);
63 63
64 rtnl_lock();
64 mutex_lock(&ppriv->vlan_mutex); 65 mutex_lock(&ppriv->vlan_mutex);
65 66
66 /* 67 /*
@@ -111,7 +112,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
111 goto device_init_failed; 112 goto device_init_failed;
112 } 113 }
113 114
114 result = register_netdev(priv->dev); 115 result = register_netdevice(priv->dev);
115 if (result) { 116 if (result) {
116 ipoib_warn(priv, "failed to initialize; error %i", result); 117 ipoib_warn(priv, "failed to initialize; error %i", result);
117 goto register_failed; 118 goto register_failed;
@@ -134,12 +135,13 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
134 list_add_tail(&priv->list, &ppriv->child_intfs); 135 list_add_tail(&priv->list, &ppriv->child_intfs);
135 136
136 mutex_unlock(&ppriv->vlan_mutex); 137 mutex_unlock(&ppriv->vlan_mutex);
138 rtnl_unlock();
137 139
138 return 0; 140 return 0;
139 141
140sysfs_failed: 142sysfs_failed:
141 ipoib_delete_debug_files(priv->dev); 143 ipoib_delete_debug_files(priv->dev);
142 unregister_netdev(priv->dev); 144 unregister_netdevice(priv->dev);
143 145
144register_failed: 146register_failed:
145 ipoib_dev_cleanup(priv->dev); 147 ipoib_dev_cleanup(priv->dev);
@@ -149,6 +151,7 @@ device_init_failed:
149 151
150err: 152err:
151 mutex_unlock(&ppriv->vlan_mutex); 153 mutex_unlock(&ppriv->vlan_mutex);
154 rtnl_unlock();
152 return result; 155 return result;
153} 156}
154 157
@@ -162,10 +165,11 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
162 165
163 ppriv = netdev_priv(pdev); 166 ppriv = netdev_priv(pdev);
164 167
168 rtnl_lock();
165 mutex_lock(&ppriv->vlan_mutex); 169 mutex_lock(&ppriv->vlan_mutex);
166 list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) { 170 list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
167 if (priv->pkey == pkey) { 171 if (priv->pkey == pkey) {
168 unregister_netdev(priv->dev); 172 unregister_netdevice(priv->dev);
169 ipoib_dev_cleanup(priv->dev); 173 ipoib_dev_cleanup(priv->dev);
170 list_del(&priv->list); 174 list_del(&priv->list);
171 free_netdev(priv->dev); 175 free_netdev(priv->dev);
@@ -175,6 +179,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
175 } 179 }
176 } 180 }
177 mutex_unlock(&ppriv->vlan_mutex); 181 mutex_unlock(&ppriv->vlan_mutex);
182 rtnl_unlock();
178 183
179 return ret; 184 return ret;
180} 185}
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
index 919fb9eb1b62..cebdf3243ca1 100644
--- a/drivers/net/mlx4/profile.c
+++ b/drivers/net/mlx4/profile.c
@@ -107,9 +107,9 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
107 profile[MLX4_RES_AUXC].num = request->num_qp; 107 profile[MLX4_RES_AUXC].num = request->num_qp;
108 profile[MLX4_RES_SRQ].num = request->num_srq; 108 profile[MLX4_RES_SRQ].num = request->num_srq;
109 profile[MLX4_RES_CQ].num = request->num_cq; 109 profile[MLX4_RES_CQ].num = request->num_cq;
110 profile[MLX4_RES_EQ].num = min(dev_cap->max_eqs, 110 profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs,
111 dev_cap->reserved_eqs + 111 dev_cap->reserved_eqs +
112 num_possible_cpus() + 1); 112 num_possible_cpus() + 1);
113 profile[MLX4_RES_DMPT].num = request->num_mpt; 113 profile[MLX4_RES_DMPT].num = request->num_mpt;
114 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; 114 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
115 profile[MLX4_RES_MTT].num = request->num_mtt; 115 profile[MLX4_RES_MTT].num = request->num_mtt;