diff options
author | Jan-Bernd Themann <ossthema@de.ibm.com> | 2007-07-02 07:00:46 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2007-07-08 22:16:45 -0400 |
commit | d1d25aaba85fd24ab18b0a4d22f19be02aac65c9 (patch) | |
tree | 7b0aef881cf30ace0ec351ce39cf10f0e5f53ef8 | |
parent | 7c00db3d28131f4fff42eb49632dcd70636f31f4 (diff) |
ehea: Whitespace cleanup
This patch fixes several whitespace issues.
Signed-off-by: Jan-Bernd Themann <themann@de.ibm.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
-rw-r--r-- | drivers/net/ehea/ehea.h | 14 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_hw.h | 24 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_main.c | 32 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_qmr.c | 56 |
4 files changed, 64 insertions, 62 deletions
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index c0f81b5a30fb..abaf3ac94936 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h | |||
@@ -39,7 +39,7 @@ | |||
39 | #include <asm/io.h> | 39 | #include <asm/io.h> |
40 | 40 | ||
41 | #define DRV_NAME "ehea" | 41 | #define DRV_NAME "ehea" |
42 | #define DRV_VERSION "EHEA_0064" | 42 | #define DRV_VERSION "EHEA_0065" |
43 | 43 | ||
44 | #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ | 44 | #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ |
45 | | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) | 45 | | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) |
@@ -136,10 +136,10 @@ void ehea_dump(void *adr, int len, char *msg); | |||
136 | (0xffffffffffffffffULL >> ((64 - (mask)) & 0xffff)) | 136 | (0xffffffffffffffffULL >> ((64 - (mask)) & 0xffff)) |
137 | 137 | ||
138 | #define EHEA_BMASK_SET(mask, value) \ | 138 | #define EHEA_BMASK_SET(mask, value) \ |
139 | ((EHEA_BMASK_MASK(mask) & ((u64)(value))) << EHEA_BMASK_SHIFTPOS(mask)) | 139 | ((EHEA_BMASK_MASK(mask) & ((u64)(value))) << EHEA_BMASK_SHIFTPOS(mask)) |
140 | 140 | ||
141 | #define EHEA_BMASK_GET(mask, value) \ | 141 | #define EHEA_BMASK_GET(mask, value) \ |
142 | (EHEA_BMASK_MASK(mask) & (((u64)(value)) >> EHEA_BMASK_SHIFTPOS(mask))) | 142 | (EHEA_BMASK_MASK(mask) & (((u64)(value)) >> EHEA_BMASK_SHIFTPOS(mask))) |
143 | 143 | ||
144 | /* | 144 | /* |
145 | * Generic ehea page | 145 | * Generic ehea page |
@@ -190,7 +190,7 @@ struct ehea_av; | |||
190 | * Queue attributes passed to ehea_create_qp() | 190 | * Queue attributes passed to ehea_create_qp() |
191 | */ | 191 | */ |
192 | struct ehea_qp_init_attr { | 192 | struct ehea_qp_init_attr { |
193 | /* input parameter */ | 193 | /* input parameter */ |
194 | u32 qp_token; /* queue token */ | 194 | u32 qp_token; /* queue token */ |
195 | u8 low_lat_rq1; | 195 | u8 low_lat_rq1; |
196 | u8 signalingtype; /* cqe generation flag */ | 196 | u8 signalingtype; /* cqe generation flag */ |
@@ -212,7 +212,7 @@ struct ehea_qp_init_attr { | |||
212 | u64 recv_cq_handle; | 212 | u64 recv_cq_handle; |
213 | u64 aff_eq_handle; | 213 | u64 aff_eq_handle; |
214 | 214 | ||
215 | /* output parameter */ | 215 | /* output parameter */ |
216 | u32 qp_nr; | 216 | u32 qp_nr; |
217 | u16 act_nr_send_wqes; | 217 | u16 act_nr_send_wqes; |
218 | u16 act_nr_rwqes_rq1; | 218 | u16 act_nr_rwqes_rq1; |
@@ -279,12 +279,12 @@ struct ehea_qp { | |||
279 | * Completion Queue attributes | 279 | * Completion Queue attributes |
280 | */ | 280 | */ |
281 | struct ehea_cq_attr { | 281 | struct ehea_cq_attr { |
282 | /* input parameter */ | 282 | /* input parameter */ |
283 | u32 max_nr_of_cqes; | 283 | u32 max_nr_of_cqes; |
284 | u32 cq_token; | 284 | u32 cq_token; |
285 | u64 eq_handle; | 285 | u64 eq_handle; |
286 | 286 | ||
287 | /* output parameter */ | 287 | /* output parameter */ |
288 | u32 act_nr_of_cqes; | 288 | u32 act_nr_of_cqes; |
289 | u32 nr_pages; | 289 | u32 nr_pages; |
290 | }; | 290 | }; |
diff --git a/drivers/net/ehea/ehea_hw.h b/drivers/net/ehea/ehea_hw.h index 1246757f2c22..1af7ca499ec5 100644 --- a/drivers/net/ehea/ehea_hw.h +++ b/drivers/net/ehea/ehea_hw.h | |||
@@ -211,34 +211,34 @@ static inline void epa_store_acc(struct h_epa epa, u32 offset, u64 value) | |||
211 | } | 211 | } |
212 | 212 | ||
213 | #define epa_store_eq(epa, offset, value)\ | 213 | #define epa_store_eq(epa, offset, value)\ |
214 | epa_store(epa, EQTEMM_OFFSET(offset), value) | 214 | epa_store(epa, EQTEMM_OFFSET(offset), value) |
215 | #define epa_load_eq(epa, offset)\ | 215 | #define epa_load_eq(epa, offset)\ |
216 | epa_load(epa, EQTEMM_OFFSET(offset)) | 216 | epa_load(epa, EQTEMM_OFFSET(offset)) |
217 | 217 | ||
218 | #define epa_store_cq(epa, offset, value)\ | 218 | #define epa_store_cq(epa, offset, value)\ |
219 | epa_store(epa, CQTEMM_OFFSET(offset), value) | 219 | epa_store(epa, CQTEMM_OFFSET(offset), value) |
220 | #define epa_load_cq(epa, offset)\ | 220 | #define epa_load_cq(epa, offset)\ |
221 | epa_load(epa, CQTEMM_OFFSET(offset)) | 221 | epa_load(epa, CQTEMM_OFFSET(offset)) |
222 | 222 | ||
223 | #define epa_store_qp(epa, offset, value)\ | 223 | #define epa_store_qp(epa, offset, value)\ |
224 | epa_store(epa, QPTEMM_OFFSET(offset), value) | 224 | epa_store(epa, QPTEMM_OFFSET(offset), value) |
225 | #define epa_load_qp(epa, offset)\ | 225 | #define epa_load_qp(epa, offset)\ |
226 | epa_load(epa, QPTEMM_OFFSET(offset)) | 226 | epa_load(epa, QPTEMM_OFFSET(offset)) |
227 | 227 | ||
228 | #define epa_store_qped(epa, offset, value)\ | 228 | #define epa_store_qped(epa, offset, value)\ |
229 | epa_store(epa, QPEDMM_OFFSET(offset), value) | 229 | epa_store(epa, QPEDMM_OFFSET(offset), value) |
230 | #define epa_load_qped(epa, offset)\ | 230 | #define epa_load_qped(epa, offset)\ |
231 | epa_load(epa, QPEDMM_OFFSET(offset)) | 231 | epa_load(epa, QPEDMM_OFFSET(offset)) |
232 | 232 | ||
233 | #define epa_store_mrmw(epa, offset, value)\ | 233 | #define epa_store_mrmw(epa, offset, value)\ |
234 | epa_store(epa, MRMWMM_OFFSET(offset), value) | 234 | epa_store(epa, MRMWMM_OFFSET(offset), value) |
235 | #define epa_load_mrmw(epa, offset)\ | 235 | #define epa_load_mrmw(epa, offset)\ |
236 | epa_load(epa, MRMWMM_OFFSET(offset)) | 236 | epa_load(epa, MRMWMM_OFFSET(offset)) |
237 | 237 | ||
238 | #define epa_store_base(epa, offset, value)\ | 238 | #define epa_store_base(epa, offset, value)\ |
239 | epa_store(epa, HCAGR_OFFSET(offset), value) | 239 | epa_store(epa, HCAGR_OFFSET(offset), value) |
240 | #define epa_load_base(epa, offset)\ | 240 | #define epa_load_base(epa, offset)\ |
241 | epa_load(epa, HCAGR_OFFSET(offset)) | 241 | epa_load(epa, HCAGR_OFFSET(offset)) |
242 | 242 | ||
243 | static inline void ehea_update_sqa(struct ehea_qp *qp, u16 nr_wqes) | 243 | static inline void ehea_update_sqa(struct ehea_qp *qp, u16 nr_wqes) |
244 | { | 244 | { |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 9e13433a268a..bdb52419dbf5 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -81,7 +81,7 @@ MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 1 "); | |||
81 | static int port_name_cnt = 0; | 81 | static int port_name_cnt = 0; |
82 | 82 | ||
83 | static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev, | 83 | static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev, |
84 | const struct of_device_id *id); | 84 | const struct of_device_id *id); |
85 | 85 | ||
86 | static int __devexit ehea_remove(struct ibmebus_dev *dev); | 86 | static int __devexit ehea_remove(struct ibmebus_dev *dev); |
87 | 87 | ||
@@ -236,7 +236,7 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr, | |||
236 | 236 | ||
237 | rwqe = ehea_get_next_rwqe(qp, rq_nr); | 237 | rwqe = ehea_get_next_rwqe(qp, rq_nr); |
238 | rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type) | 238 | rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type) |
239 | | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index); | 239 | | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index); |
240 | rwqe->sg_list[0].l_key = pr->recv_mr.lkey; | 240 | rwqe->sg_list[0].l_key = pr->recv_mr.lkey; |
241 | rwqe->sg_list[0].vaddr = (u64)skb->data; | 241 | rwqe->sg_list[0].vaddr = (u64)skb->data; |
242 | rwqe->sg_list[0].len = packet_size; | 242 | rwqe->sg_list[0].len = packet_size; |
@@ -427,7 +427,7 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev, | |||
427 | break; | 427 | break; |
428 | } | 428 | } |
429 | skb_copy_to_linear_data(skb, ((char*)cqe) + 64, | 429 | skb_copy_to_linear_data(skb, ((char*)cqe) + 64, |
430 | cqe->num_bytes_transfered - 4); | 430 | cqe->num_bytes_transfered - 4); |
431 | ehea_fill_skb(port->netdev, skb, cqe); | 431 | ehea_fill_skb(port->netdev, skb, cqe); |
432 | } else if (rq == 2) { /* RQ2 */ | 432 | } else if (rq == 2) { /* RQ2 */ |
433 | skb = get_skb_by_index(skb_arr_rq2, | 433 | skb = get_skb_by_index(skb_arr_rq2, |
@@ -618,7 +618,7 @@ static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter, | |||
618 | 618 | ||
619 | for (i = 0; i < EHEA_MAX_PORTS; i++) | 619 | for (i = 0; i < EHEA_MAX_PORTS; i++) |
620 | if (adapter->port[i]) | 620 | if (adapter->port[i]) |
621 | if (adapter->port[i]->logical_port_id == logical_port) | 621 | if (adapter->port[i]->logical_port_id == logical_port) |
622 | return adapter->port[i]; | 622 | return adapter->port[i]; |
623 | return NULL; | 623 | return NULL; |
624 | } | 624 | } |
@@ -1695,6 +1695,7 @@ static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev, | |||
1695 | { | 1695 | { |
1696 | if (skb->protocol == htons(ETH_P_IP)) { | 1696 | if (skb->protocol == htons(ETH_P_IP)) { |
1697 | const struct iphdr *iph = ip_hdr(skb); | 1697 | const struct iphdr *iph = ip_hdr(skb); |
1698 | |||
1698 | /* IPv4 */ | 1699 | /* IPv4 */ |
1699 | swqe->tx_control |= EHEA_SWQE_CRC | 1700 | swqe->tx_control |= EHEA_SWQE_CRC |
1700 | | EHEA_SWQE_IP_CHECKSUM | 1701 | | EHEA_SWQE_IP_CHECKSUM |
@@ -1705,13 +1706,12 @@ static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev, | |||
1705 | write_ip_start_end(swqe, skb); | 1706 | write_ip_start_end(swqe, skb); |
1706 | 1707 | ||
1707 | if (iph->protocol == IPPROTO_UDP) { | 1708 | if (iph->protocol == IPPROTO_UDP) { |
1708 | if ((iph->frag_off & IP_MF) || | 1709 | if ((iph->frag_off & IP_MF) |
1709 | (iph->frag_off & IP_OFFSET)) | 1710 | || (iph->frag_off & IP_OFFSET)) |
1710 | /* IP fragment, so don't change cs */ | 1711 | /* IP fragment, so don't change cs */ |
1711 | swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM; | 1712 | swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM; |
1712 | else | 1713 | else |
1713 | write_udp_offset_end(swqe, skb); | 1714 | write_udp_offset_end(swqe, skb); |
1714 | |||
1715 | } else if (iph->protocol == IPPROTO_TCP) { | 1715 | } else if (iph->protocol == IPPROTO_TCP) { |
1716 | write_tcp_offset_end(swqe, skb); | 1716 | write_tcp_offset_end(swqe, skb); |
1717 | } | 1717 | } |
@@ -1739,6 +1739,7 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, | |||
1739 | 1739 | ||
1740 | if (skb->protocol == htons(ETH_P_IP)) { | 1740 | if (skb->protocol == htons(ETH_P_IP)) { |
1741 | const struct iphdr *iph = ip_hdr(skb); | 1741 | const struct iphdr *iph = ip_hdr(skb); |
1742 | |||
1742 | /* IPv4 */ | 1743 | /* IPv4 */ |
1743 | write_ip_start_end(swqe, skb); | 1744 | write_ip_start_end(swqe, skb); |
1744 | 1745 | ||
@@ -1751,8 +1752,8 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, | |||
1751 | write_tcp_offset_end(swqe, skb); | 1752 | write_tcp_offset_end(swqe, skb); |
1752 | 1753 | ||
1753 | } else if (iph->protocol == IPPROTO_UDP) { | 1754 | } else if (iph->protocol == IPPROTO_UDP) { |
1754 | if ((iph->frag_off & IP_MF) || | 1755 | if ((iph->frag_off & IP_MF) |
1755 | (iph->frag_off & IP_OFFSET)) | 1756 | || (iph->frag_off & IP_OFFSET)) |
1756 | /* IP fragment, so don't change cs */ | 1757 | /* IP fragment, so don't change cs */ |
1757 | swqe->tx_control |= EHEA_SWQE_CRC | 1758 | swqe->tx_control |= EHEA_SWQE_CRC |
1758 | | EHEA_SWQE_IMM_DATA_PRESENT; | 1759 | | EHEA_SWQE_IMM_DATA_PRESENT; |
@@ -2407,7 +2408,7 @@ static void __devinit logical_port_release(struct device *dev) | |||
2407 | } | 2408 | } |
2408 | 2409 | ||
2409 | static int ehea_driver_sysfs_add(struct device *dev, | 2410 | static int ehea_driver_sysfs_add(struct device *dev, |
2410 | struct device_driver *driver) | 2411 | struct device_driver *driver) |
2411 | { | 2412 | { |
2412 | int ret; | 2413 | int ret; |
2413 | 2414 | ||
@@ -2424,7 +2425,7 @@ static int ehea_driver_sysfs_add(struct device *dev, | |||
2424 | } | 2425 | } |
2425 | 2426 | ||
2426 | static void ehea_driver_sysfs_remove(struct device *dev, | 2427 | static void ehea_driver_sysfs_remove(struct device *dev, |
2427 | struct device_driver *driver) | 2428 | struct device_driver *driver) |
2428 | { | 2429 | { |
2429 | struct device_driver *drv = driver; | 2430 | struct device_driver *drv = driver; |
2430 | 2431 | ||
@@ -2453,7 +2454,7 @@ static struct device *ehea_register_port(struct ehea_port *port, | |||
2453 | } | 2454 | } |
2454 | 2455 | ||
2455 | ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id); | 2456 | ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id); |
2456 | if (ret) { | 2457 | if (ret) { |
2457 | ehea_error("failed to register attributes, ret=%d", ret); | 2458 | ehea_error("failed to register attributes, ret=%d", ret); |
2458 | goto out_unreg_of_dev; | 2459 | goto out_unreg_of_dev; |
2459 | } | 2460 | } |
@@ -2601,6 +2602,7 @@ static int ehea_setup_ports(struct ehea_adapter *adapter) | |||
2601 | { | 2602 | { |
2602 | struct device_node *lhea_dn; | 2603 | struct device_node *lhea_dn; |
2603 | struct device_node *eth_dn = NULL; | 2604 | struct device_node *eth_dn = NULL; |
2605 | |||
2604 | const u32 *dn_log_port_id; | 2606 | const u32 *dn_log_port_id; |
2605 | int i = 0; | 2607 | int i = 0; |
2606 | 2608 | ||
@@ -2608,7 +2610,7 @@ static int ehea_setup_ports(struct ehea_adapter *adapter) | |||
2608 | while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { | 2610 | while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { |
2609 | 2611 | ||
2610 | dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", | 2612 | dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", |
2611 | NULL); | 2613 | NULL); |
2612 | if (!dn_log_port_id) { | 2614 | if (!dn_log_port_id) { |
2613 | ehea_error("bad device node: eth_dn name=%s", | 2615 | ehea_error("bad device node: eth_dn name=%s", |
2614 | eth_dn->full_name); | 2616 | eth_dn->full_name); |
@@ -2648,7 +2650,7 @@ static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter, | |||
2648 | while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { | 2650 | while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { |
2649 | 2651 | ||
2650 | dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", | 2652 | dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", |
2651 | NULL); | 2653 | NULL); |
2652 | if (dn_log_port_id) | 2654 | if (dn_log_port_id) |
2653 | if (*dn_log_port_id == logical_port_id) | 2655 | if (*dn_log_port_id == logical_port_id) |
2654 | return eth_dn; | 2656 | return eth_dn; |
@@ -2789,7 +2791,7 @@ static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev, | |||
2789 | adapter->ebus_dev = dev; | 2791 | adapter->ebus_dev = dev; |
2790 | 2792 | ||
2791 | adapter_handle = of_get_property(dev->ofdev.node, "ibm,hea-handle", | 2793 | adapter_handle = of_get_property(dev->ofdev.node, "ibm,hea-handle", |
2792 | NULL); | 2794 | NULL); |
2793 | if (adapter_handle) | 2795 | if (adapter_handle) |
2794 | adapter->handle = *adapter_handle; | 2796 | adapter->handle = *adapter_handle; |
2795 | 2797 | ||
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c index f24a8862977d..29eaa46948b0 100644 --- a/drivers/net/ehea/ehea_qmr.c +++ b/drivers/net/ehea/ehea_qmr.c | |||
@@ -211,7 +211,7 @@ u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force) | |||
211 | u64 hret; | 211 | u64 hret; |
212 | u64 adapter_handle = cq->adapter->handle; | 212 | u64 adapter_handle = cq->adapter->handle; |
213 | 213 | ||
214 | /* deregister all previous registered pages */ | 214 | /* deregister all previous registered pages */ |
215 | hret = ehea_h_free_resource(adapter_handle, cq->fw_handle, force); | 215 | hret = ehea_h_free_resource(adapter_handle, cq->fw_handle, force); |
216 | if (hret != H_SUCCESS) | 216 | if (hret != H_SUCCESS) |
217 | return hret; | 217 | return hret; |
@@ -362,7 +362,7 @@ int ehea_destroy_eq(struct ehea_eq *eq) | |||
362 | if (hret != H_SUCCESS) { | 362 | if (hret != H_SUCCESS) { |
363 | ehea_error("destroy EQ failed"); | 363 | ehea_error("destroy EQ failed"); |
364 | return -EIO; | 364 | return -EIO; |
365 | } | 365 | } |
366 | 366 | ||
367 | return 0; | 367 | return 0; |
368 | } | 368 | } |
@@ -507,44 +507,44 @@ out_freemem: | |||
507 | 507 | ||
508 | u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force) | 508 | u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force) |
509 | { | 509 | { |
510 | u64 hret; | 510 | u64 hret; |
511 | struct ehea_qp_init_attr *qp_attr = &qp->init_attr; | 511 | struct ehea_qp_init_attr *qp_attr = &qp->init_attr; |
512 | 512 | ||
513 | 513 | ||
514 | ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle); | 514 | ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle); |
515 | hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force); | 515 | hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force); |
516 | if (hret != H_SUCCESS) | 516 | if (hret != H_SUCCESS) |
517 | return hret; | 517 | return hret; |
518 | 518 | ||
519 | hw_queue_dtor(&qp->hw_squeue); | 519 | hw_queue_dtor(&qp->hw_squeue); |
520 | hw_queue_dtor(&qp->hw_rqueue1); | 520 | hw_queue_dtor(&qp->hw_rqueue1); |
521 | 521 | ||
522 | if (qp_attr->rq_count > 1) | 522 | if (qp_attr->rq_count > 1) |
523 | hw_queue_dtor(&qp->hw_rqueue2); | 523 | hw_queue_dtor(&qp->hw_rqueue2); |
524 | if (qp_attr->rq_count > 2) | 524 | if (qp_attr->rq_count > 2) |
525 | hw_queue_dtor(&qp->hw_rqueue3); | 525 | hw_queue_dtor(&qp->hw_rqueue3); |
526 | kfree(qp); | 526 | kfree(qp); |
527 | 527 | ||
528 | return hret; | 528 | return hret; |
529 | } | 529 | } |
530 | 530 | ||
531 | int ehea_destroy_qp(struct ehea_qp *qp) | 531 | int ehea_destroy_qp(struct ehea_qp *qp) |
532 | { | 532 | { |
533 | u64 hret; | 533 | u64 hret; |
534 | if (!qp) | 534 | if (!qp) |
535 | return 0; | 535 | return 0; |
536 | 536 | ||
537 | if ((hret = ehea_destroy_qp_res(qp, NORMAL_FREE)) == H_R_STATE) { | 537 | if ((hret = ehea_destroy_qp_res(qp, NORMAL_FREE)) == H_R_STATE) { |
538 | ehea_error_data(qp->adapter, qp->fw_handle); | 538 | ehea_error_data(qp->adapter, qp->fw_handle); |
539 | hret = ehea_destroy_qp_res(qp, FORCE_FREE); | 539 | hret = ehea_destroy_qp_res(qp, FORCE_FREE); |
540 | } | 540 | } |
541 | 541 | ||
542 | if (hret != H_SUCCESS) { | 542 | if (hret != H_SUCCESS) { |
543 | ehea_error("destroy QP failed"); | 543 | ehea_error("destroy QP failed"); |
544 | return -EIO; | 544 | return -EIO; |
545 | } | 545 | } |
546 | 546 | ||
547 | return 0; | 547 | return 0; |
548 | } | 548 | } |
549 | 549 | ||
550 | int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) | 550 | int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) |