aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ehea
diff options
context:
space:
mode:
authorDoug Maxey <dwm@austin.ibm.com>2008-01-31 21:20:51 -0500
committerDavid S. Miller <davem@davemloft.net>2008-02-03 07:25:58 -0500
commitf67c6275185216b47ee50c8c122adee3c562bce7 (patch)
tree41a23cd9afde75032f8b0edaf1a59773b20c96c7 /drivers/net/ehea
parente076c872df1673f606c2e6566cea59473796633c (diff)
ehea: fix qmr checkpatch complaints
Cc: Jan-Bernd Themann <themann@de.ibm.com> Signed-off-by: Doug Maxey <dwm@austin.ibm.com> Signed-off-by: Jeff Garzik <jeff@garzik.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ehea')
-rw-r--r--drivers/net/ehea/ehea_qmr.c32
-rw-r--r--drivers/net/ehea/ehea_qmr.h16
2 files changed, 24 insertions, 24 deletions
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 83b76432b41a..d522e905f460 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -33,8 +33,6 @@
33 33
34 34
35struct ehea_busmap ehea_bmap = { 0, 0, NULL }; 35struct ehea_busmap ehea_bmap = { 0, 0, NULL };
36extern u64 ehea_driver_flags;
37extern struct work_struct ehea_rereg_mr_task;
38 36
39 37
40static void *hw_qpageit_get_inc(struct hw_queue *queue) 38static void *hw_qpageit_get_inc(struct hw_queue *queue)
@@ -65,7 +63,7 @@ static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
65 } 63 }
66 64
67 queue->queue_length = nr_of_pages * pagesize; 65 queue->queue_length = nr_of_pages * pagesize;
68 queue->queue_pages = kmalloc(nr_of_pages * sizeof(void*), GFP_KERNEL); 66 queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
69 if (!queue->queue_pages) { 67 if (!queue->queue_pages) {
70 ehea_error("no mem for queue_pages"); 68 ehea_error("no mem for queue_pages");
71 return -ENOMEM; 69 return -ENOMEM;
@@ -78,11 +76,11 @@ static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
78 */ 76 */
79 i = 0; 77 i = 0;
80 while (i < nr_of_pages) { 78 while (i < nr_of_pages) {
81 u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL); 79 u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
82 if (!kpage) 80 if (!kpage)
83 goto out_nomem; 81 goto out_nomem;
84 for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) { 82 for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
85 (queue->queue_pages)[i] = (struct ehea_page*)kpage; 83 (queue->queue_pages)[i] = (struct ehea_page *)kpage;
86 kpage += pagesize; 84 kpage += pagesize;
87 i++; 85 i++;
88 } 86 }
@@ -235,8 +233,8 @@ int ehea_destroy_cq(struct ehea_cq *cq)
235 return 0; 233 return 0;
236 234
237 hcp_epas_dtor(&cq->epas); 235 hcp_epas_dtor(&cq->epas);
238 236 hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
239 if ((hret = ehea_destroy_cq_res(cq, NORMAL_FREE)) == H_R_STATE) { 237 if (hret == H_R_STATE) {
240 ehea_error_data(cq->adapter, cq->fw_handle); 238 ehea_error_data(cq->adapter, cq->fw_handle);
241 hret = ehea_destroy_cq_res(cq, FORCE_FREE); 239 hret = ehea_destroy_cq_res(cq, FORCE_FREE);
242 } 240 }
@@ -301,13 +299,13 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
301 if (i == (eq->attr.nr_pages - 1)) { 299 if (i == (eq->attr.nr_pages - 1)) {
302 /* last page */ 300 /* last page */
303 vpage = hw_qpageit_get_inc(&eq->hw_queue); 301 vpage = hw_qpageit_get_inc(&eq->hw_queue);
304 if ((hret != H_SUCCESS) || (vpage)) { 302 if ((hret != H_SUCCESS) || (vpage))
305 goto out_kill_hwq; 303 goto out_kill_hwq;
306 } 304
307 } else { 305 } else {
308 if ((hret != H_PAGE_REGISTERED) || (!vpage)) { 306 if ((hret != H_PAGE_REGISTERED) || (!vpage))
309 goto out_kill_hwq; 307 goto out_kill_hwq;
310 } 308
311 } 309 }
312 } 310 }
313 311
@@ -331,7 +329,7 @@ struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
331 unsigned long flags; 329 unsigned long flags;
332 330
333 spin_lock_irqsave(&eq->spinlock, flags); 331 spin_lock_irqsave(&eq->spinlock, flags);
334 eqe = (struct ehea_eqe*)hw_eqit_eq_get_inc_valid(&eq->hw_queue); 332 eqe = (struct ehea_eqe *)hw_eqit_eq_get_inc_valid(&eq->hw_queue);
335 spin_unlock_irqrestore(&eq->spinlock, flags); 333 spin_unlock_irqrestore(&eq->spinlock, flags);
336 334
337 return eqe; 335 return eqe;
@@ -364,7 +362,8 @@ int ehea_destroy_eq(struct ehea_eq *eq)
364 362
365 hcp_epas_dtor(&eq->epas); 363 hcp_epas_dtor(&eq->epas);
366 364
367 if ((hret = ehea_destroy_eq_res(eq, NORMAL_FREE)) == H_R_STATE) { 365 hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
366 if (hret == H_R_STATE) {
368 ehea_error_data(eq->adapter, eq->fw_handle); 367 ehea_error_data(eq->adapter, eq->fw_handle);
369 hret = ehea_destroy_eq_res(eq, FORCE_FREE); 368 hret = ehea_destroy_eq_res(eq, FORCE_FREE);
370 } 369 }
@@ -546,7 +545,8 @@ int ehea_destroy_qp(struct ehea_qp *qp)
546 545
547 hcp_epas_dtor(&qp->epas); 546 hcp_epas_dtor(&qp->epas);
548 547
549 if ((hret = ehea_destroy_qp_res(qp, NORMAL_FREE)) == H_R_STATE) { 548 hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
549 if (hret == H_R_STATE) {
550 ehea_error_data(qp->adapter, qp->fw_handle); 550 ehea_error_data(qp->adapter, qp->fw_handle);
551 hret = ehea_destroy_qp_res(qp, FORCE_FREE); 551 hret = ehea_destroy_qp_res(qp, FORCE_FREE);
552 } 552 }
@@ -559,7 +559,7 @@ int ehea_destroy_qp(struct ehea_qp *qp)
559 return 0; 559 return 0;
560} 560}
561 561
562int ehea_create_busmap( void ) 562int ehea_create_busmap(void)
563{ 563{
564 u64 vaddr = EHEA_BUSMAP_START; 564 u64 vaddr = EHEA_BUSMAP_START;
565 unsigned long high_section_index = 0; 565 unsigned long high_section_index = 0;
@@ -595,7 +595,7 @@ int ehea_create_busmap( void )
595 return 0; 595 return 0;
596} 596}
597 597
598void ehea_destroy_busmap( void ) 598void ehea_destroy_busmap(void)
599{ 599{
600 vfree(ehea_bmap.vaddr); 600 vfree(ehea_bmap.vaddr);
601} 601}
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index bc62d389c166..0bb6f92fa2f8 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -41,8 +41,8 @@
41#define EHEA_SECTSIZE (1UL << 24) 41#define EHEA_SECTSIZE (1UL << 24)
42#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT) 42#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT)
43 43
44#if (1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE 44#if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE)
45#error eHEA module can't work if kernel sectionsize < ehea sectionsize 45#error eHEA module cannot work if kernel sectionsize < ehea sectionsize
46#endif 46#endif
47 47
48/* Some abbreviations used here: 48/* Some abbreviations used here:
@@ -188,8 +188,8 @@ struct ehea_eqe {
188 u64 entry; 188 u64 entry;
189}; 189};
190 190
191#define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52,63) 191#define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52, 63)
192#define ERROR_DATA_TYPE EHEA_BMASK_IBM(0,7) 192#define ERROR_DATA_TYPE EHEA_BMASK_IBM(0, 7)
193 193
194static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset) 194static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
195{ 195{
@@ -279,7 +279,7 @@ static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue)
279static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue) 279static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
280{ 280{
281 void *retvalue = hw_qeit_get(queue); 281 void *retvalue = hw_qeit_get(queue);
282 u32 qe = *(u8*)retvalue; 282 u32 qe = *(u8 *)retvalue;
283 if ((qe >> 7) == (queue->toggle_state & 1)) 283 if ((qe >> 7) == (queue->toggle_state & 1))
284 hw_qeit_eq_get_inc(queue); 284 hw_qeit_eq_get_inc(queue);
285 else 285 else
@@ -364,7 +364,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe,
364 364
365int ehea_destroy_cq(struct ehea_cq *cq); 365int ehea_destroy_cq(struct ehea_cq *cq);
366 366
367struct ehea_qp *ehea_create_qp(struct ehea_adapter * adapter, u32 pd, 367struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, u32 pd,
368 struct ehea_qp_init_attr *init_attr); 368 struct ehea_qp_init_attr *init_attr);
369 369
370int ehea_destroy_qp(struct ehea_qp *qp); 370int ehea_destroy_qp(struct ehea_qp *qp);
@@ -378,8 +378,8 @@ int ehea_rem_mr(struct ehea_mr *mr);
378 378
379void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle); 379void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle);
380 380
381int ehea_create_busmap( void ); 381int ehea_create_busmap(void);
382void ehea_destroy_busmap( void ); 382void ehea_destroy_busmap(void);
383u64 ehea_map_vaddr(void *caddr); 383u64 ehea_map_vaddr(void *caddr);
384 384
385#endif /* __EHEA_QMR_H__ */ 385#endif /* __EHEA_QMR_H__ */