aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ehea/ehea_qmr.c
diff options
context:
space:
mode:
authorDoug Maxey <dwm@austin.ibm.com>2008-01-31 21:20:51 -0500
committerDavid S. Miller <davem@davemloft.net>2008-02-03 07:25:58 -0500
commitf67c6275185216b47ee50c8c122adee3c562bce7 (patch)
tree41a23cd9afde75032f8b0edaf1a59773b20c96c7 /drivers/net/ehea/ehea_qmr.c
parente076c872df1673f606c2e6566cea59473796633c (diff)
ehea: fix qmr checkpatch complaints
Cc: Jan-Bernd Themann <themann@de.ibm.com> Signed-off-by: Doug Maxey <dwm@austin.ibm.com> Signed-off-by: Jeff Garzik <jeff@garzik.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ehea/ehea_qmr.c')
-rw-r--r--drivers/net/ehea/ehea_qmr.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 83b76432b41a..d522e905f460 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -33,8 +33,6 @@
33 33
34 34
35struct ehea_busmap ehea_bmap = { 0, 0, NULL }; 35struct ehea_busmap ehea_bmap = { 0, 0, NULL };
36extern u64 ehea_driver_flags;
37extern struct work_struct ehea_rereg_mr_task;
38 36
39 37
40static void *hw_qpageit_get_inc(struct hw_queue *queue) 38static void *hw_qpageit_get_inc(struct hw_queue *queue)
@@ -65,7 +63,7 @@ static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
65 } 63 }
66 64
67 queue->queue_length = nr_of_pages * pagesize; 65 queue->queue_length = nr_of_pages * pagesize;
68 queue->queue_pages = kmalloc(nr_of_pages * sizeof(void*), GFP_KERNEL); 66 queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
69 if (!queue->queue_pages) { 67 if (!queue->queue_pages) {
70 ehea_error("no mem for queue_pages"); 68 ehea_error("no mem for queue_pages");
71 return -ENOMEM; 69 return -ENOMEM;
@@ -78,11 +76,11 @@ static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
78 */ 76 */
79 i = 0; 77 i = 0;
80 while (i < nr_of_pages) { 78 while (i < nr_of_pages) {
81 u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL); 79 u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
82 if (!kpage) 80 if (!kpage)
83 goto out_nomem; 81 goto out_nomem;
84 for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) { 82 for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
85 (queue->queue_pages)[i] = (struct ehea_page*)kpage; 83 (queue->queue_pages)[i] = (struct ehea_page *)kpage;
86 kpage += pagesize; 84 kpage += pagesize;
87 i++; 85 i++;
88 } 86 }
@@ -235,8 +233,8 @@ int ehea_destroy_cq(struct ehea_cq *cq)
235 return 0; 233 return 0;
236 234
237 hcp_epas_dtor(&cq->epas); 235 hcp_epas_dtor(&cq->epas);
238 236 hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
239 if ((hret = ehea_destroy_cq_res(cq, NORMAL_FREE)) == H_R_STATE) { 237 if (hret == H_R_STATE) {
240 ehea_error_data(cq->adapter, cq->fw_handle); 238 ehea_error_data(cq->adapter, cq->fw_handle);
241 hret = ehea_destroy_cq_res(cq, FORCE_FREE); 239 hret = ehea_destroy_cq_res(cq, FORCE_FREE);
242 } 240 }
@@ -301,13 +299,13 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
301 if (i == (eq->attr.nr_pages - 1)) { 299 if (i == (eq->attr.nr_pages - 1)) {
302 /* last page */ 300 /* last page */
303 vpage = hw_qpageit_get_inc(&eq->hw_queue); 301 vpage = hw_qpageit_get_inc(&eq->hw_queue);
304 if ((hret != H_SUCCESS) || (vpage)) { 302 if ((hret != H_SUCCESS) || (vpage))
305 goto out_kill_hwq; 303 goto out_kill_hwq;
306 } 304
307 } else { 305 } else {
308 if ((hret != H_PAGE_REGISTERED) || (!vpage)) { 306 if ((hret != H_PAGE_REGISTERED) || (!vpage))
309 goto out_kill_hwq; 307 goto out_kill_hwq;
310 } 308
311 } 309 }
312 } 310 }
313 311
@@ -331,7 +329,7 @@ struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
331 unsigned long flags; 329 unsigned long flags;
332 330
333 spin_lock_irqsave(&eq->spinlock, flags); 331 spin_lock_irqsave(&eq->spinlock, flags);
334 eqe = (struct ehea_eqe*)hw_eqit_eq_get_inc_valid(&eq->hw_queue); 332 eqe = (struct ehea_eqe *)hw_eqit_eq_get_inc_valid(&eq->hw_queue);
335 spin_unlock_irqrestore(&eq->spinlock, flags); 333 spin_unlock_irqrestore(&eq->spinlock, flags);
336 334
337 return eqe; 335 return eqe;
@@ -364,7 +362,8 @@ int ehea_destroy_eq(struct ehea_eq *eq)
364 362
365 hcp_epas_dtor(&eq->epas); 363 hcp_epas_dtor(&eq->epas);
366 364
367 if ((hret = ehea_destroy_eq_res(eq, NORMAL_FREE)) == H_R_STATE) { 365 hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
366 if (hret == H_R_STATE) {
368 ehea_error_data(eq->adapter, eq->fw_handle); 367 ehea_error_data(eq->adapter, eq->fw_handle);
369 hret = ehea_destroy_eq_res(eq, FORCE_FREE); 368 hret = ehea_destroy_eq_res(eq, FORCE_FREE);
370 } 369 }
@@ -546,7 +545,8 @@ int ehea_destroy_qp(struct ehea_qp *qp)
546 545
547 hcp_epas_dtor(&qp->epas); 546 hcp_epas_dtor(&qp->epas);
548 547
549 if ((hret = ehea_destroy_qp_res(qp, NORMAL_FREE)) == H_R_STATE) { 548 hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
549 if (hret == H_R_STATE) {
550 ehea_error_data(qp->adapter, qp->fw_handle); 550 ehea_error_data(qp->adapter, qp->fw_handle);
551 hret = ehea_destroy_qp_res(qp, FORCE_FREE); 551 hret = ehea_destroy_qp_res(qp, FORCE_FREE);
552 } 552 }
@@ -559,7 +559,7 @@ int ehea_destroy_qp(struct ehea_qp *qp)
559 return 0; 559 return 0;
560} 560}
561 561
562int ehea_create_busmap( void ) 562int ehea_create_busmap(void)
563{ 563{
564 u64 vaddr = EHEA_BUSMAP_START; 564 u64 vaddr = EHEA_BUSMAP_START;
565 unsigned long high_section_index = 0; 565 unsigned long high_section_index = 0;
@@ -595,7 +595,7 @@ int ehea_create_busmap( void )
595 return 0; 595 return 0;
596} 596}
597 597
598void ehea_destroy_busmap( void ) 598void ehea_destroy_busmap(void)
599{ 599{
600 vfree(ehea_bmap.vaddr); 600 vfree(ehea_bmap.vaddr);
601} 601}