diff options
author | Joe Perches <joe@perches.com> | 2010-12-13 13:05:14 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-12-13 13:05:14 -0500 |
commit | 8c4877a4128e7931077b024a891a4b284d8756a3 (patch) | |
tree | eae8569651f3383abfa7c6a3789cd7c563f92346 | |
parent | 323e126f0c5995f779d7df7fd035f6e8fed8764d (diff) |
ehea: Use the standard logging functions
Remove ehea_error, ehea_info and ehea_debug macros.
Use pr_fmt, pr_<level>, netdev_<level> and netif_<level> as appropriate.
Fix messages to use trailing "\n", some messages had an extra one
as the old ehea_<level> macros added a trailing "\n".
Coalesced long format strings.
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: Breno Leitao <leitao@linux.vnet.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ehea/ehea.h | 13 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_ethtool.c | 18 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_main.c | 415 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_phyp.c | 40 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_qmr.c | 89 |
5 files changed, 278 insertions, 297 deletions
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index 1f2a675649b7..a724a2d14506 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h | |||
@@ -130,19 +130,6 @@ | |||
130 | 130 | ||
131 | /* utility functions */ | 131 | /* utility functions */ |
132 | 132 | ||
133 | #define ehea_info(fmt, args...) \ | ||
134 | printk(KERN_INFO DRV_NAME ": " fmt "\n", ## args) | ||
135 | |||
136 | #define ehea_error(fmt, args...) \ | ||
137 | printk(KERN_ERR DRV_NAME ": Error in %s: " fmt "\n", __func__, ## args) | ||
138 | |||
139 | #ifdef DEBUG | ||
140 | #define ehea_debug(fmt, args...) \ | ||
141 | printk(KERN_DEBUG DRV_NAME ": " fmt, ## args) | ||
142 | #else | ||
143 | #define ehea_debug(fmt, args...) do {} while (0) | ||
144 | #endif | ||
145 | |||
146 | void ehea_dump(void *adr, int len, char *msg); | 133 | void ehea_dump(void *adr, int len, char *msg); |
147 | 134 | ||
148 | #define EHEA_BMASK(pos, length) (((pos) << 16) + (length)) | 135 | #define EHEA_BMASK(pos, length) (((pos) << 16) + (length)) |
diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c index 1f37ee6b2a26..afebf2075779 100644 --- a/drivers/net/ehea/ehea_ethtool.c +++ b/drivers/net/ehea/ehea_ethtool.c | |||
@@ -26,6 +26,8 @@ | |||
26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
27 | */ | 27 | */ |
28 | 28 | ||
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
30 | |||
29 | #include "ehea.h" | 31 | #include "ehea.h" |
30 | #include "ehea_phyp.h" | 32 | #include "ehea_phyp.h" |
31 | 33 | ||
@@ -118,10 +120,10 @@ doit: | |||
118 | ret = ehea_set_portspeed(port, sp); | 120 | ret = ehea_set_portspeed(port, sp); |
119 | 121 | ||
120 | if (!ret) | 122 | if (!ret) |
121 | ehea_info("%s: Port speed successfully set: %dMbps " | 123 | netdev_info(dev, |
122 | "%s Duplex", | 124 | "Port speed successfully set: %dMbps %s Duplex\n", |
123 | port->netdev->name, port->port_speed, | 125 | port->port_speed, |
124 | port->full_duplex == 1 ? "Full" : "Half"); | 126 | port->full_duplex == 1 ? "Full" : "Half"); |
125 | out: | 127 | out: |
126 | return ret; | 128 | return ret; |
127 | } | 129 | } |
@@ -134,10 +136,10 @@ static int ehea_nway_reset(struct net_device *dev) | |||
134 | ret = ehea_set_portspeed(port, EHEA_SPEED_AUTONEG); | 136 | ret = ehea_set_portspeed(port, EHEA_SPEED_AUTONEG); |
135 | 137 | ||
136 | if (!ret) | 138 | if (!ret) |
137 | ehea_info("%s: Port speed successfully set: %dMbps " | 139 | netdev_info(port->netdev, |
138 | "%s Duplex", | 140 | "Port speed successfully set: %dMbps %s Duplex\n", |
139 | port->netdev->name, port->port_speed, | 141 | port->port_speed, |
140 | port->full_duplex == 1 ? "Full" : "Half"); | 142 | port->full_duplex == 1 ? "Full" : "Half"); |
141 | return ret; | 143 | return ret; |
142 | } | 144 | } |
143 | 145 | ||
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 81e5b7b49e9e..0dfef6d76445 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -26,6 +26,8 @@ | |||
26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
27 | */ | 27 | */ |
28 | 28 | ||
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
30 | |||
29 | #include <linux/in.h> | 31 | #include <linux/in.h> |
30 | #include <linux/ip.h> | 32 | #include <linux/ip.h> |
31 | #include <linux/tcp.h> | 33 | #include <linux/tcp.h> |
@@ -135,8 +137,8 @@ void ehea_dump(void *adr, int len, char *msg) | |||
135 | int x; | 137 | int x; |
136 | unsigned char *deb = adr; | 138 | unsigned char *deb = adr; |
137 | for (x = 0; x < len; x += 16) { | 139 | for (x = 0; x < len; x += 16) { |
138 | printk(DRV_NAME " %s adr=%p ofs=%04x %016llx %016llx\n", msg, | 140 | pr_info("%s adr=%p ofs=%04x %016llx %016llx\n", |
139 | deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8])); | 141 | msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8])); |
140 | deb += 16; | 142 | deb += 16; |
141 | } | 143 | } |
142 | } | 144 | } |
@@ -336,7 +338,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev) | |||
336 | 338 | ||
337 | cb2 = (void *)get_zeroed_page(GFP_KERNEL); | 339 | cb2 = (void *)get_zeroed_page(GFP_KERNEL); |
338 | if (!cb2) { | 340 | if (!cb2) { |
339 | ehea_error("no mem for cb2"); | 341 | netdev_err(dev, "no mem for cb2\n"); |
340 | goto out; | 342 | goto out; |
341 | } | 343 | } |
342 | 344 | ||
@@ -344,7 +346,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev) | |||
344 | port->logical_port_id, | 346 | port->logical_port_id, |
345 | H_PORT_CB2, H_PORT_CB2_ALL, cb2); | 347 | H_PORT_CB2, H_PORT_CB2_ALL, cb2); |
346 | if (hret != H_SUCCESS) { | 348 | if (hret != H_SUCCESS) { |
347 | ehea_error("query_ehea_port failed"); | 349 | netdev_err(dev, "query_ehea_port failed\n"); |
348 | goto out_herr; | 350 | goto out_herr; |
349 | } | 351 | } |
350 | 352 | ||
@@ -399,7 +401,7 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) | |||
399 | skb_arr_rq1[index] = netdev_alloc_skb(dev, | 401 | skb_arr_rq1[index] = netdev_alloc_skb(dev, |
400 | EHEA_L_PKT_SIZE); | 402 | EHEA_L_PKT_SIZE); |
401 | if (!skb_arr_rq1[index]) { | 403 | if (!skb_arr_rq1[index]) { |
402 | ehea_info("Unable to allocate enough skb in the array\n"); | 404 | netdev_info(dev, "Unable to allocate enough skb in the array\n"); |
403 | pr->rq1_skba.os_skbs = fill_wqes - i; | 405 | pr->rq1_skba.os_skbs = fill_wqes - i; |
404 | break; | 406 | break; |
405 | } | 407 | } |
@@ -423,14 +425,14 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) | |||
423 | int i; | 425 | int i; |
424 | 426 | ||
425 | if (nr_rq1a > pr->rq1_skba.len) { | 427 | if (nr_rq1a > pr->rq1_skba.len) { |
426 | ehea_error("NR_RQ1A bigger than skb array len\n"); | 428 | netdev_err(dev, "NR_RQ1A bigger than skb array len\n"); |
427 | return; | 429 | return; |
428 | } | 430 | } |
429 | 431 | ||
430 | for (i = 0; i < nr_rq1a; i++) { | 432 | for (i = 0; i < nr_rq1a; i++) { |
431 | skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); | 433 | skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); |
432 | if (!skb_arr_rq1[i]) { | 434 | if (!skb_arr_rq1[i]) { |
433 | ehea_info("No enough memory to allocate skb array\n"); | 435 | netdev_info(dev, "Not enough memory to allocate skb array\n"); |
434 | break; | 436 | break; |
435 | } | 437 | } |
436 | } | 438 | } |
@@ -468,8 +470,9 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr, | |||
468 | if (!skb) { | 470 | if (!skb) { |
469 | q_skba->os_skbs = fill_wqes - i; | 471 | q_skba->os_skbs = fill_wqes - i; |
470 | if (q_skba->os_skbs == q_skba->len - 2) { | 472 | if (q_skba->os_skbs == q_skba->len - 2) { |
471 | ehea_info("%s: rq%i ran dry - no mem for skb", | 473 | netdev_info(pr->port->netdev, |
472 | pr->port->netdev->name, rq_nr); | 474 | "rq%i ran dry - no mem for skb\n", |
475 | rq_nr); | ||
473 | ret = -ENOMEM; | 476 | ret = -ENOMEM; |
474 | } | 477 | } |
475 | break; | 478 | break; |
@@ -634,8 +637,8 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq, | |||
634 | 637 | ||
635 | if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) { | 638 | if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) { |
636 | if (netif_msg_rx_err(pr->port)) { | 639 | if (netif_msg_rx_err(pr->port)) { |
637 | ehea_error("Critical receive error for QP %d. " | 640 | pr_err("Critical receive error for QP %d. Resetting port.\n", |
638 | "Resetting port.", pr->qp->init_attr.qp_nr); | 641 | pr->qp->init_attr.qp_nr); |
639 | ehea_dump(cqe, sizeof(*cqe), "CQE"); | 642 | ehea_dump(cqe, sizeof(*cqe), "CQE"); |
640 | } | 643 | } |
641 | ehea_schedule_port_reset(pr->port); | 644 | ehea_schedule_port_reset(pr->port); |
@@ -737,13 +740,13 @@ static int ehea_proc_rwqes(struct net_device *dev, | |||
737 | skb_arr_rq1_len, | 740 | skb_arr_rq1_len, |
738 | wqe_index); | 741 | wqe_index); |
739 | if (unlikely(!skb)) { | 742 | if (unlikely(!skb)) { |
740 | if (netif_msg_rx_err(port)) | 743 | netif_err(port, rx_err, dev, |
741 | ehea_error("LL rq1: skb=NULL"); | 744 | "LL rq1: skb=NULL\n"); |
742 | 745 | ||
743 | skb = netdev_alloc_skb(dev, | 746 | skb = netdev_alloc_skb(dev, |
744 | EHEA_L_PKT_SIZE); | 747 | EHEA_L_PKT_SIZE); |
745 | if (!skb) { | 748 | if (!skb) { |
746 | ehea_info("Not enough memory to allocate skb\n"); | 749 | netdev_info(dev, "Not enough memory to allocate skb\n"); |
747 | break; | 750 | break; |
748 | } | 751 | } |
749 | } | 752 | } |
@@ -755,8 +758,8 @@ static int ehea_proc_rwqes(struct net_device *dev, | |||
755 | skb = get_skb_by_index(skb_arr_rq2, | 758 | skb = get_skb_by_index(skb_arr_rq2, |
756 | skb_arr_rq2_len, cqe); | 759 | skb_arr_rq2_len, cqe); |
757 | if (unlikely(!skb)) { | 760 | if (unlikely(!skb)) { |
758 | if (netif_msg_rx_err(port)) | 761 | netif_err(port, rx_err, dev, |
759 | ehea_error("rq2: skb=NULL"); | 762 | "rq2: skb=NULL\n"); |
760 | break; | 763 | break; |
761 | } | 764 | } |
762 | ehea_fill_skb(dev, skb, cqe); | 765 | ehea_fill_skb(dev, skb, cqe); |
@@ -766,8 +769,8 @@ static int ehea_proc_rwqes(struct net_device *dev, | |||
766 | skb = get_skb_by_index(skb_arr_rq3, | 769 | skb = get_skb_by_index(skb_arr_rq3, |
767 | skb_arr_rq3_len, cqe); | 770 | skb_arr_rq3_len, cqe); |
768 | if (unlikely(!skb)) { | 771 | if (unlikely(!skb)) { |
769 | if (netif_msg_rx_err(port)) | 772 | netif_err(port, rx_err, dev, |
770 | ehea_error("rq3: skb=NULL"); | 773 | "rq3: skb=NULL\n"); |
771 | break; | 774 | break; |
772 | } | 775 | } |
773 | ehea_fill_skb(dev, skb, cqe); | 776 | ehea_fill_skb(dev, skb, cqe); |
@@ -839,7 +842,7 @@ static void check_sqs(struct ehea_port *port) | |||
839 | msecs_to_jiffies(100)); | 842 | msecs_to_jiffies(100)); |
840 | 843 | ||
841 | if (!ret) { | 844 | if (!ret) { |
842 | ehea_error("HW/SW queues out of sync"); | 845 | pr_err("HW/SW queues out of sync\n"); |
843 | ehea_schedule_port_reset(pr->port); | 846 | ehea_schedule_port_reset(pr->port); |
844 | return; | 847 | return; |
845 | } | 848 | } |
@@ -872,14 +875,14 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) | |||
872 | } | 875 | } |
873 | 876 | ||
874 | if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { | 877 | if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { |
875 | ehea_error("Bad send completion status=0x%04X", | 878 | pr_err("Bad send completion status=0x%04X\n", |
876 | cqe->status); | 879 | cqe->status); |
877 | 880 | ||
878 | if (netif_msg_tx_err(pr->port)) | 881 | if (netif_msg_tx_err(pr->port)) |
879 | ehea_dump(cqe, sizeof(*cqe), "Send CQE"); | 882 | ehea_dump(cqe, sizeof(*cqe), "Send CQE"); |
880 | 883 | ||
881 | if (cqe->status & EHEA_CQE_STAT_RESET_MASK) { | 884 | if (cqe->status & EHEA_CQE_STAT_RESET_MASK) { |
882 | ehea_error("Resetting port"); | 885 | pr_err("Resetting port\n"); |
883 | ehea_schedule_port_reset(pr->port); | 886 | ehea_schedule_port_reset(pr->port); |
884 | break; | 887 | break; |
885 | } | 888 | } |
@@ -997,8 +1000,8 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param) | |||
997 | 1000 | ||
998 | while (eqe) { | 1001 | while (eqe) { |
999 | qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry); | 1002 | qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry); |
1000 | ehea_error("QP aff_err: entry=0x%llx, token=0x%x", | 1003 | pr_err("QP aff_err: entry=0x%llx, token=0x%x\n", |
1001 | eqe->entry, qp_token); | 1004 | eqe->entry, qp_token); |
1002 | 1005 | ||
1003 | qp = port->port_res[qp_token].qp; | 1006 | qp = port->port_res[qp_token].qp; |
1004 | 1007 | ||
@@ -1016,7 +1019,7 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param) | |||
1016 | } | 1019 | } |
1017 | 1020 | ||
1018 | if (reset_port) { | 1021 | if (reset_port) { |
1019 | ehea_error("Resetting port"); | 1022 | pr_err("Resetting port\n"); |
1020 | ehea_schedule_port_reset(port); | 1023 | ehea_schedule_port_reset(port); |
1021 | } | 1024 | } |
1022 | 1025 | ||
@@ -1044,7 +1047,7 @@ int ehea_sense_port_attr(struct ehea_port *port) | |||
1044 | /* may be called via ehea_neq_tasklet() */ | 1047 | /* may be called via ehea_neq_tasklet() */ |
1045 | cb0 = (void *)get_zeroed_page(GFP_ATOMIC); | 1048 | cb0 = (void *)get_zeroed_page(GFP_ATOMIC); |
1046 | if (!cb0) { | 1049 | if (!cb0) { |
1047 | ehea_error("no mem for cb0"); | 1050 | pr_err("no mem for cb0\n"); |
1048 | ret = -ENOMEM; | 1051 | ret = -ENOMEM; |
1049 | goto out; | 1052 | goto out; |
1050 | } | 1053 | } |
@@ -1136,7 +1139,7 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed) | |||
1136 | 1139 | ||
1137 | cb4 = (void *)get_zeroed_page(GFP_KERNEL); | 1140 | cb4 = (void *)get_zeroed_page(GFP_KERNEL); |
1138 | if (!cb4) { | 1141 | if (!cb4) { |
1139 | ehea_error("no mem for cb4"); | 1142 | pr_err("no mem for cb4\n"); |
1140 | ret = -ENOMEM; | 1143 | ret = -ENOMEM; |
1141 | goto out; | 1144 | goto out; |
1142 | } | 1145 | } |
@@ -1187,16 +1190,16 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed) | |||
1187 | break; | 1190 | break; |
1188 | } | 1191 | } |
1189 | } else { | 1192 | } else { |
1190 | ehea_error("Failed sensing port speed"); | 1193 | pr_err("Failed sensing port speed\n"); |
1191 | ret = -EIO; | 1194 | ret = -EIO; |
1192 | } | 1195 | } |
1193 | } else { | 1196 | } else { |
1194 | if (hret == H_AUTHORITY) { | 1197 | if (hret == H_AUTHORITY) { |
1195 | ehea_info("Hypervisor denied setting port speed"); | 1198 | pr_info("Hypervisor denied setting port speed\n"); |
1196 | ret = -EPERM; | 1199 | ret = -EPERM; |
1197 | } else { | 1200 | } else { |
1198 | ret = -EIO; | 1201 | ret = -EIO; |
1199 | ehea_error("Failed setting port speed"); | 1202 | pr_err("Failed setting port speed\n"); |
1200 | } | 1203 | } |
1201 | } | 1204 | } |
1202 | if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP)) | 1205 | if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP)) |
@@ -1213,80 +1216,78 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe) | |||
1213 | u8 ec; | 1216 | u8 ec; |
1214 | u8 portnum; | 1217 | u8 portnum; |
1215 | struct ehea_port *port; | 1218 | struct ehea_port *port; |
1219 | struct net_device *dev; | ||
1216 | 1220 | ||
1217 | ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe); | 1221 | ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe); |
1218 | portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe); | 1222 | portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe); |
1219 | port = ehea_get_port(adapter, portnum); | 1223 | port = ehea_get_port(adapter, portnum); |
1224 | dev = port->netdev; | ||
1220 | 1225 | ||
1221 | switch (ec) { | 1226 | switch (ec) { |
1222 | case EHEA_EC_PORTSTATE_CHG: /* port state change */ | 1227 | case EHEA_EC_PORTSTATE_CHG: /* port state change */ |
1223 | 1228 | ||
1224 | if (!port) { | 1229 | if (!port) { |
1225 | ehea_error("unknown portnum %x", portnum); | 1230 | netdev_err(dev, "unknown portnum %x\n", portnum); |
1226 | break; | 1231 | break; |
1227 | } | 1232 | } |
1228 | 1233 | ||
1229 | if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) { | 1234 | if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) { |
1230 | if (!netif_carrier_ok(port->netdev)) { | 1235 | if (!netif_carrier_ok(dev)) { |
1231 | ret = ehea_sense_port_attr(port); | 1236 | ret = ehea_sense_port_attr(port); |
1232 | if (ret) { | 1237 | if (ret) { |
1233 | ehea_error("failed resensing port " | 1238 | netdev_err(dev, "failed resensing port attributes\n"); |
1234 | "attributes"); | ||
1235 | break; | 1239 | break; |
1236 | } | 1240 | } |
1237 | 1241 | ||
1238 | if (netif_msg_link(port)) | 1242 | netif_info(port, link, dev, |
1239 | ehea_info("%s: Logical port up: %dMbps " | 1243 | "Logical port up: %dMbps %s Duplex\n", |
1240 | "%s Duplex", | 1244 | port->port_speed, |
1241 | port->netdev->name, | 1245 | port->full_duplex == 1 ? |
1242 | port->port_speed, | 1246 | "Full" : "Half"); |
1243 | port->full_duplex == | ||
1244 | 1 ? "Full" : "Half"); | ||
1245 | 1247 | ||
1246 | netif_carrier_on(port->netdev); | 1248 | netif_carrier_on(dev); |
1247 | netif_wake_queue(port->netdev); | 1249 | netif_wake_queue(dev); |
1248 | } | 1250 | } |
1249 | } else | 1251 | } else |
1250 | if (netif_carrier_ok(port->netdev)) { | 1252 | if (netif_carrier_ok(dev)) { |
1251 | if (netif_msg_link(port)) | 1253 | netif_info(port, link, dev, |
1252 | ehea_info("%s: Logical port down", | 1254 | "Logical port down\n"); |
1253 | port->netdev->name); | 1255 | netif_carrier_off(dev); |
1254 | netif_carrier_off(port->netdev); | 1256 | netif_stop_queue(dev); |
1255 | netif_stop_queue(port->netdev); | ||
1256 | } | 1257 | } |
1257 | 1258 | ||
1258 | if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) { | 1259 | if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) { |
1259 | port->phy_link = EHEA_PHY_LINK_UP; | 1260 | port->phy_link = EHEA_PHY_LINK_UP; |
1260 | if (netif_msg_link(port)) | 1261 | netif_info(port, link, dev, |
1261 | ehea_info("%s: Physical port up", | 1262 | "Physical port up\n"); |
1262 | port->netdev->name); | ||
1263 | if (prop_carrier_state) | 1263 | if (prop_carrier_state) |
1264 | netif_carrier_on(port->netdev); | 1264 | netif_carrier_on(dev); |
1265 | } else { | 1265 | } else { |
1266 | port->phy_link = EHEA_PHY_LINK_DOWN; | 1266 | port->phy_link = EHEA_PHY_LINK_DOWN; |
1267 | if (netif_msg_link(port)) | 1267 | netif_info(port, link, dev, |
1268 | ehea_info("%s: Physical port down", | 1268 | "Physical port down\n"); |
1269 | port->netdev->name); | ||
1270 | if (prop_carrier_state) | 1269 | if (prop_carrier_state) |
1271 | netif_carrier_off(port->netdev); | 1270 | netif_carrier_off(dev); |
1272 | } | 1271 | } |
1273 | 1272 | ||
1274 | if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe)) | 1273 | if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe)) |
1275 | ehea_info("External switch port is primary port"); | 1274 | netdev_info(dev, |
1275 | "External switch port is primary port\n"); | ||
1276 | else | 1276 | else |
1277 | ehea_info("External switch port is backup port"); | 1277 | netdev_info(dev, |
1278 | "External switch port is backup port\n"); | ||
1278 | 1279 | ||
1279 | break; | 1280 | break; |
1280 | case EHEA_EC_ADAPTER_MALFUNC: | 1281 | case EHEA_EC_ADAPTER_MALFUNC: |
1281 | ehea_error("Adapter malfunction"); | 1282 | netdev_err(dev, "Adapter malfunction\n"); |
1282 | break; | 1283 | break; |
1283 | case EHEA_EC_PORT_MALFUNC: | 1284 | case EHEA_EC_PORT_MALFUNC: |
1284 | ehea_info("Port malfunction: Device: %s", port->netdev->name); | 1285 | netdev_info(dev, "Port malfunction\n"); |
1285 | netif_carrier_off(port->netdev); | 1286 | netif_carrier_off(dev); |
1286 | netif_stop_queue(port->netdev); | 1287 | netif_stop_queue(dev); |
1287 | break; | 1288 | break; |
1288 | default: | 1289 | default: |
1289 | ehea_error("unknown event code %x, eqe=0x%llX", ec, eqe); | 1290 | netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe); |
1290 | break; | 1291 | break; |
1291 | } | 1292 | } |
1292 | } | 1293 | } |
@@ -1298,13 +1299,13 @@ static void ehea_neq_tasklet(unsigned long data) | |||
1298 | u64 event_mask; | 1299 | u64 event_mask; |
1299 | 1300 | ||
1300 | eqe = ehea_poll_eq(adapter->neq); | 1301 | eqe = ehea_poll_eq(adapter->neq); |
1301 | ehea_debug("eqe=%p", eqe); | 1302 | pr_debug("eqe=%p\n", eqe); |
1302 | 1303 | ||
1303 | while (eqe) { | 1304 | while (eqe) { |
1304 | ehea_debug("*eqe=%lx", eqe->entry); | 1305 | pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry); |
1305 | ehea_parse_eqe(adapter, eqe->entry); | 1306 | ehea_parse_eqe(adapter, eqe->entry); |
1306 | eqe = ehea_poll_eq(adapter->neq); | 1307 | eqe = ehea_poll_eq(adapter->neq); |
1307 | ehea_debug("next eqe=%p", eqe); | 1308 | pr_debug("next eqe=%p\n", eqe); |
1308 | } | 1309 | } |
1309 | 1310 | ||
1310 | event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1) | 1311 | event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1) |
@@ -1353,14 +1354,14 @@ static int ehea_reg_interrupts(struct net_device *dev) | |||
1353 | ehea_qp_aff_irq_handler, | 1354 | ehea_qp_aff_irq_handler, |
1354 | IRQF_DISABLED, port->int_aff_name, port); | 1355 | IRQF_DISABLED, port->int_aff_name, port); |
1355 | if (ret) { | 1356 | if (ret) { |
1356 | ehea_error("failed registering irq for qp_aff_irq_handler:" | 1357 | netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n", |
1357 | "ist=%X", port->qp_eq->attr.ist1); | 1358 | port->qp_eq->attr.ist1); |
1358 | goto out_free_qpeq; | 1359 | goto out_free_qpeq; |
1359 | } | 1360 | } |
1360 | 1361 | ||
1361 | if (netif_msg_ifup(port)) | 1362 | netif_info(port, ifup, dev, |
1362 | ehea_info("irq_handle 0x%X for function qp_aff_irq_handler " | 1363 | "irq_handle 0x%X for function qp_aff_irq_handler registered\n", |
1363 | "registered", port->qp_eq->attr.ist1); | 1364 | port->qp_eq->attr.ist1); |
1364 | 1365 | ||
1365 | 1366 | ||
1366 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { | 1367 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { |
@@ -1372,14 +1373,13 @@ static int ehea_reg_interrupts(struct net_device *dev) | |||
1372 | IRQF_DISABLED, pr->int_send_name, | 1373 | IRQF_DISABLED, pr->int_send_name, |
1373 | pr); | 1374 | pr); |
1374 | if (ret) { | 1375 | if (ret) { |
1375 | ehea_error("failed registering irq for ehea_queue " | 1376 | netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n", |
1376 | "port_res_nr:%d, ist=%X", i, | 1377 | i, pr->eq->attr.ist1); |
1377 | pr->eq->attr.ist1); | ||
1378 | goto out_free_req; | 1378 | goto out_free_req; |
1379 | } | 1379 | } |
1380 | if (netif_msg_ifup(port)) | 1380 | netif_info(port, ifup, dev, |
1381 | ehea_info("irq_handle 0x%X for function ehea_queue_int " | 1381 | "irq_handle 0x%X for function ehea_queue_int %d registered\n", |
1382 | "%d registered", pr->eq->attr.ist1, i); | 1382 | pr->eq->attr.ist1, i); |
1383 | } | 1383 | } |
1384 | out: | 1384 | out: |
1385 | return ret; | 1385 | return ret; |
@@ -1410,16 +1410,16 @@ static void ehea_free_interrupts(struct net_device *dev) | |||
1410 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { | 1410 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { |
1411 | pr = &port->port_res[i]; | 1411 | pr = &port->port_res[i]; |
1412 | ibmebus_free_irq(pr->eq->attr.ist1, pr); | 1412 | ibmebus_free_irq(pr->eq->attr.ist1, pr); |
1413 | if (netif_msg_intr(port)) | 1413 | netif_info(port, intr, dev, |
1414 | ehea_info("free send irq for res %d with handle 0x%X", | 1414 | "free send irq for res %d with handle 0x%X\n", |
1415 | i, pr->eq->attr.ist1); | 1415 | i, pr->eq->attr.ist1); |
1416 | } | 1416 | } |
1417 | 1417 | ||
1418 | /* associated events */ | 1418 | /* associated events */ |
1419 | ibmebus_free_irq(port->qp_eq->attr.ist1, port); | 1419 | ibmebus_free_irq(port->qp_eq->attr.ist1, port); |
1420 | if (netif_msg_intr(port)) | 1420 | netif_info(port, intr, dev, |
1421 | ehea_info("associated event interrupt for handle 0x%X freed", | 1421 | "associated event interrupt for handle 0x%X freed\n", |
1422 | port->qp_eq->attr.ist1); | 1422 | port->qp_eq->attr.ist1); |
1423 | } | 1423 | } |
1424 | 1424 | ||
1425 | static int ehea_configure_port(struct ehea_port *port) | 1425 | static int ehea_configure_port(struct ehea_port *port) |
@@ -1488,7 +1488,7 @@ int ehea_gen_smrs(struct ehea_port_res *pr) | |||
1488 | out_free: | 1488 | out_free: |
1489 | ehea_rem_mr(&pr->send_mr); | 1489 | ehea_rem_mr(&pr->send_mr); |
1490 | out: | 1490 | out: |
1491 | ehea_error("Generating SMRS failed\n"); | 1491 | pr_err("Generating SMRS failed\n"); |
1492 | return -EIO; | 1492 | return -EIO; |
1493 | } | 1493 | } |
1494 | 1494 | ||
@@ -1543,7 +1543,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, | |||
1543 | 1543 | ||
1544 | pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); | 1544 | pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); |
1545 | if (!pr->eq) { | 1545 | if (!pr->eq) { |
1546 | ehea_error("create_eq failed (eq)"); | 1546 | pr_err("create_eq failed (eq)\n"); |
1547 | goto out_free; | 1547 | goto out_free; |
1548 | } | 1548 | } |
1549 | 1549 | ||
@@ -1551,7 +1551,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, | |||
1551 | pr->eq->fw_handle, | 1551 | pr->eq->fw_handle, |
1552 | port->logical_port_id); | 1552 | port->logical_port_id); |
1553 | if (!pr->recv_cq) { | 1553 | if (!pr->recv_cq) { |
1554 | ehea_error("create_cq failed (cq_recv)"); | 1554 | pr_err("create_cq failed (cq_recv)\n"); |
1555 | goto out_free; | 1555 | goto out_free; |
1556 | } | 1556 | } |
1557 | 1557 | ||
@@ -1559,19 +1559,19 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, | |||
1559 | pr->eq->fw_handle, | 1559 | pr->eq->fw_handle, |
1560 | port->logical_port_id); | 1560 | port->logical_port_id); |
1561 | if (!pr->send_cq) { | 1561 | if (!pr->send_cq) { |
1562 | ehea_error("create_cq failed (cq_send)"); | 1562 | pr_err("create_cq failed (cq_send)\n"); |
1563 | goto out_free; | 1563 | goto out_free; |
1564 | } | 1564 | } |
1565 | 1565 | ||
1566 | if (netif_msg_ifup(port)) | 1566 | if (netif_msg_ifup(port)) |
1567 | ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d", | 1567 | pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n", |
1568 | pr->send_cq->attr.act_nr_of_cqes, | 1568 | pr->send_cq->attr.act_nr_of_cqes, |
1569 | pr->recv_cq->attr.act_nr_of_cqes); | 1569 | pr->recv_cq->attr.act_nr_of_cqes); |
1570 | 1570 | ||
1571 | init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); | 1571 | init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); |
1572 | if (!init_attr) { | 1572 | if (!init_attr) { |
1573 | ret = -ENOMEM; | 1573 | ret = -ENOMEM; |
1574 | ehea_error("no mem for ehea_qp_init_attr"); | 1574 | pr_err("no mem for ehea_qp_init_attr\n"); |
1575 | goto out_free; | 1575 | goto out_free; |
1576 | } | 1576 | } |
1577 | 1577 | ||
@@ -1596,18 +1596,18 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, | |||
1596 | 1596 | ||
1597 | pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr); | 1597 | pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr); |
1598 | if (!pr->qp) { | 1598 | if (!pr->qp) { |
1599 | ehea_error("create_qp failed"); | 1599 | pr_err("create_qp failed\n"); |
1600 | ret = -EIO; | 1600 | ret = -EIO; |
1601 | goto out_free; | 1601 | goto out_free; |
1602 | } | 1602 | } |
1603 | 1603 | ||
1604 | if (netif_msg_ifup(port)) | 1604 | if (netif_msg_ifup(port)) |
1605 | ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n " | 1605 | pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n", |
1606 | "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr, | 1606 | init_attr->qp_nr, |
1607 | init_attr->act_nr_send_wqes, | 1607 | init_attr->act_nr_send_wqes, |
1608 | init_attr->act_nr_rwqes_rq1, | 1608 | init_attr->act_nr_rwqes_rq1, |
1609 | init_attr->act_nr_rwqes_rq2, | 1609 | init_attr->act_nr_rwqes_rq2, |
1610 | init_attr->act_nr_rwqes_rq3); | 1610 | init_attr->act_nr_rwqes_rq3); |
1611 | 1611 | ||
1612 | pr->sq_skba_size = init_attr->act_nr_send_wqes + 1; | 1612 | pr->sq_skba_size = init_attr->act_nr_send_wqes + 1; |
1613 | 1613 | ||
@@ -1758,7 +1758,7 @@ static void write_swqe2_TSO(struct sk_buff *skb, | |||
1758 | swqe->descriptors++; | 1758 | swqe->descriptors++; |
1759 | } | 1759 | } |
1760 | } else | 1760 | } else |
1761 | ehea_error("cannot handle fragmented headers"); | 1761 | pr_err("cannot handle fragmented headers\n"); |
1762 | } | 1762 | } |
1763 | 1763 | ||
1764 | static void write_swqe2_nonTSO(struct sk_buff *skb, | 1764 | static void write_swqe2_nonTSO(struct sk_buff *skb, |
@@ -1854,8 +1854,8 @@ static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid) | |||
1854 | port->logical_port_id, | 1854 | port->logical_port_id, |
1855 | reg_type, port->mac_addr, 0, hcallid); | 1855 | reg_type, port->mac_addr, 0, hcallid); |
1856 | if (hret != H_SUCCESS) { | 1856 | if (hret != H_SUCCESS) { |
1857 | ehea_error("%sregistering bc address failed (tagged)", | 1857 | pr_err("%sregistering bc address failed (tagged)\n", |
1858 | hcallid == H_REG_BCMC ? "" : "de"); | 1858 | hcallid == H_REG_BCMC ? "" : "de"); |
1859 | ret = -EIO; | 1859 | ret = -EIO; |
1860 | goto out_herr; | 1860 | goto out_herr; |
1861 | } | 1861 | } |
@@ -1866,8 +1866,8 @@ static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid) | |||
1866 | port->logical_port_id, | 1866 | port->logical_port_id, |
1867 | reg_type, port->mac_addr, 0, hcallid); | 1867 | reg_type, port->mac_addr, 0, hcallid); |
1868 | if (hret != H_SUCCESS) { | 1868 | if (hret != H_SUCCESS) { |
1869 | ehea_error("%sregistering bc address failed (vlan)", | 1869 | pr_err("%sregistering bc address failed (vlan)\n", |
1870 | hcallid == H_REG_BCMC ? "" : "de"); | 1870 | hcallid == H_REG_BCMC ? "" : "de"); |
1871 | ret = -EIO; | 1871 | ret = -EIO; |
1872 | } | 1872 | } |
1873 | out_herr: | 1873 | out_herr: |
@@ -1889,7 +1889,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa) | |||
1889 | 1889 | ||
1890 | cb0 = (void *)get_zeroed_page(GFP_KERNEL); | 1890 | cb0 = (void *)get_zeroed_page(GFP_KERNEL); |
1891 | if (!cb0) { | 1891 | if (!cb0) { |
1892 | ehea_error("no mem for cb0"); | 1892 | pr_err("no mem for cb0\n"); |
1893 | ret = -ENOMEM; | 1893 | ret = -ENOMEM; |
1894 | goto out; | 1894 | goto out; |
1895 | } | 1895 | } |
@@ -1937,11 +1937,11 @@ out: | |||
1937 | static void ehea_promiscuous_error(u64 hret, int enable) | 1937 | static void ehea_promiscuous_error(u64 hret, int enable) |
1938 | { | 1938 | { |
1939 | if (hret == H_AUTHORITY) | 1939 | if (hret == H_AUTHORITY) |
1940 | ehea_info("Hypervisor denied %sabling promiscuous mode", | 1940 | pr_info("Hypervisor denied %sabling promiscuous mode\n", |
1941 | enable == 1 ? "en" : "dis"); | 1941 | enable == 1 ? "en" : "dis"); |
1942 | else | 1942 | else |
1943 | ehea_error("failed %sabling promiscuous mode", | 1943 | pr_err("failed %sabling promiscuous mode\n", |
1944 | enable == 1 ? "en" : "dis"); | 1944 | enable == 1 ? "en" : "dis"); |
1945 | } | 1945 | } |
1946 | 1946 | ||
1947 | static void ehea_promiscuous(struct net_device *dev, int enable) | 1947 | static void ehea_promiscuous(struct net_device *dev, int enable) |
@@ -1955,7 +1955,7 @@ static void ehea_promiscuous(struct net_device *dev, int enable) | |||
1955 | 1955 | ||
1956 | cb7 = (void *)get_zeroed_page(GFP_ATOMIC); | 1956 | cb7 = (void *)get_zeroed_page(GFP_ATOMIC); |
1957 | if (!cb7) { | 1957 | if (!cb7) { |
1958 | ehea_error("no mem for cb7"); | 1958 | pr_err("no mem for cb7\n"); |
1959 | goto out; | 1959 | goto out; |
1960 | } | 1960 | } |
1961 | 1961 | ||
@@ -2015,7 +2015,7 @@ static int ehea_drop_multicast_list(struct net_device *dev) | |||
2015 | hret = ehea_multicast_reg_helper(port, mc_entry->macaddr, | 2015 | hret = ehea_multicast_reg_helper(port, mc_entry->macaddr, |
2016 | H_DEREG_BCMC); | 2016 | H_DEREG_BCMC); |
2017 | if (hret) { | 2017 | if (hret) { |
2018 | ehea_error("failed deregistering mcast MAC"); | 2018 | pr_err("failed deregistering mcast MAC\n"); |
2019 | ret = -EIO; | 2019 | ret = -EIO; |
2020 | } | 2020 | } |
2021 | 2021 | ||
@@ -2038,7 +2038,8 @@ static void ehea_allmulti(struct net_device *dev, int enable) | |||
2038 | if (!hret) | 2038 | if (!hret) |
2039 | port->allmulti = 1; | 2039 | port->allmulti = 1; |
2040 | else | 2040 | else |
2041 | ehea_error("failed enabling IFF_ALLMULTI"); | 2041 | netdev_err(dev, |
2042 | "failed enabling IFF_ALLMULTI\n"); | ||
2042 | } | 2043 | } |
2043 | } else | 2044 | } else |
2044 | if (!enable) { | 2045 | if (!enable) { |
@@ -2047,7 +2048,8 @@ static void ehea_allmulti(struct net_device *dev, int enable) | |||
2047 | if (!hret) | 2048 | if (!hret) |
2048 | port->allmulti = 0; | 2049 | port->allmulti = 0; |
2049 | else | 2050 | else |
2050 | ehea_error("failed disabling IFF_ALLMULTI"); | 2051 | netdev_err(dev, |
2052 | "failed disabling IFF_ALLMULTI\n"); | ||
2051 | } | 2053 | } |
2052 | } | 2054 | } |
2053 | 2055 | ||
@@ -2058,7 +2060,7 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr) | |||
2058 | 2060 | ||
2059 | ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC); | 2061 | ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC); |
2060 | if (!ehea_mcl_entry) { | 2062 | if (!ehea_mcl_entry) { |
2061 | ehea_error("no mem for mcl_entry"); | 2063 | pr_err("no mem for mcl_entry\n"); |
2062 | return; | 2064 | return; |
2063 | } | 2065 | } |
2064 | 2066 | ||
@@ -2071,7 +2073,7 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr) | |||
2071 | if (!hret) | 2073 | if (!hret) |
2072 | list_add(&ehea_mcl_entry->list, &port->mc_list->list); | 2074 | list_add(&ehea_mcl_entry->list, &port->mc_list->list); |
2073 | else { | 2075 | else { |
2074 | ehea_error("failed registering mcast MAC"); | 2076 | pr_err("failed registering mcast MAC\n"); |
2075 | kfree(ehea_mcl_entry); | 2077 | kfree(ehea_mcl_entry); |
2076 | } | 2078 | } |
2077 | } | 2079 | } |
@@ -2104,9 +2106,8 @@ static void ehea_set_multicast_list(struct net_device *dev) | |||
2104 | } | 2106 | } |
2105 | 2107 | ||
2106 | if (netdev_mc_count(dev) > port->adapter->max_mc_mac) { | 2108 | if (netdev_mc_count(dev) > port->adapter->max_mc_mac) { |
2107 | ehea_info("Mcast registration limit reached (0x%llx). " | 2109 | pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n", |
2108 | "Use ALLMULTI!", | 2110 | port->adapter->max_mc_mac); |
2109 | port->adapter->max_mc_mac); | ||
2110 | goto out; | 2111 | goto out; |
2111 | } | 2112 | } |
2112 | 2113 | ||
@@ -2312,10 +2313,10 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2312 | } | 2313 | } |
2313 | pr->swqe_id_counter += 1; | 2314 | pr->swqe_id_counter += 1; |
2314 | 2315 | ||
2315 | if (netif_msg_tx_queued(port)) { | 2316 | netif_info(port, tx_queued, dev, |
2316 | ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr); | 2317 | "post swqe on QP %d\n", pr->qp->init_attr.qp_nr); |
2318 | if (netif_msg_tx_queued(port)) | ||
2317 | ehea_dump(swqe, 512, "swqe"); | 2319 | ehea_dump(swqe, 512, "swqe"); |
2318 | } | ||
2319 | 2320 | ||
2320 | if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { | 2321 | if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { |
2321 | netif_stop_queue(dev); | 2322 | netif_stop_queue(dev); |
@@ -2351,14 +2352,14 @@ static void ehea_vlan_rx_register(struct net_device *dev, | |||
2351 | 2352 | ||
2352 | cb1 = (void *)get_zeroed_page(GFP_KERNEL); | 2353 | cb1 = (void *)get_zeroed_page(GFP_KERNEL); |
2353 | if (!cb1) { | 2354 | if (!cb1) { |
2354 | ehea_error("no mem for cb1"); | 2355 | pr_err("no mem for cb1\n"); |
2355 | goto out; | 2356 | goto out; |
2356 | } | 2357 | } |
2357 | 2358 | ||
2358 | hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, | 2359 | hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, |
2359 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | 2360 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); |
2360 | if (hret != H_SUCCESS) | 2361 | if (hret != H_SUCCESS) |
2361 | ehea_error("modify_ehea_port failed"); | 2362 | pr_err("modify_ehea_port failed\n"); |
2362 | 2363 | ||
2363 | free_page((unsigned long)cb1); | 2364 | free_page((unsigned long)cb1); |
2364 | out: | 2365 | out: |
@@ -2375,14 +2376,14 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) | |||
2375 | 2376 | ||
2376 | cb1 = (void *)get_zeroed_page(GFP_KERNEL); | 2377 | cb1 = (void *)get_zeroed_page(GFP_KERNEL); |
2377 | if (!cb1) { | 2378 | if (!cb1) { |
2378 | ehea_error("no mem for cb1"); | 2379 | pr_err("no mem for cb1\n"); |
2379 | goto out; | 2380 | goto out; |
2380 | } | 2381 | } |
2381 | 2382 | ||
2382 | hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, | 2383 | hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, |
2383 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | 2384 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); |
2384 | if (hret != H_SUCCESS) { | 2385 | if (hret != H_SUCCESS) { |
2385 | ehea_error("query_ehea_port failed"); | 2386 | pr_err("query_ehea_port failed\n"); |
2386 | goto out; | 2387 | goto out; |
2387 | } | 2388 | } |
2388 | 2389 | ||
@@ -2392,7 +2393,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) | |||
2392 | hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, | 2393 | hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, |
2393 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | 2394 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); |
2394 | if (hret != H_SUCCESS) | 2395 | if (hret != H_SUCCESS) |
2395 | ehea_error("modify_ehea_port failed"); | 2396 | pr_err("modify_ehea_port failed\n"); |
2396 | out: | 2397 | out: |
2397 | free_page((unsigned long)cb1); | 2398 | free_page((unsigned long)cb1); |
2398 | return; | 2399 | return; |
@@ -2410,14 +2411,14 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
2410 | 2411 | ||
2411 | cb1 = (void *)get_zeroed_page(GFP_KERNEL); | 2412 | cb1 = (void *)get_zeroed_page(GFP_KERNEL); |
2412 | if (!cb1) { | 2413 | if (!cb1) { |
2413 | ehea_error("no mem for cb1"); | 2414 | pr_err("no mem for cb1\n"); |
2414 | goto out; | 2415 | goto out; |
2415 | } | 2416 | } |
2416 | 2417 | ||
2417 | hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, | 2418 | hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, |
2418 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | 2419 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); |
2419 | if (hret != H_SUCCESS) { | 2420 | if (hret != H_SUCCESS) { |
2420 | ehea_error("query_ehea_port failed"); | 2421 | pr_err("query_ehea_port failed\n"); |
2421 | goto out; | 2422 | goto out; |
2422 | } | 2423 | } |
2423 | 2424 | ||
@@ -2427,7 +2428,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
2427 | hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, | 2428 | hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, |
2428 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | 2429 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); |
2429 | if (hret != H_SUCCESS) | 2430 | if (hret != H_SUCCESS) |
2430 | ehea_error("modify_ehea_port failed"); | 2431 | pr_err("modify_ehea_port failed\n"); |
2431 | out: | 2432 | out: |
2432 | free_page((unsigned long)cb1); | 2433 | free_page((unsigned long)cb1); |
2433 | } | 2434 | } |
@@ -2449,7 +2450,7 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) | |||
2449 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, | 2450 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, |
2450 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); | 2451 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); |
2451 | if (hret != H_SUCCESS) { | 2452 | if (hret != H_SUCCESS) { |
2452 | ehea_error("query_ehea_qp failed (1)"); | 2453 | pr_err("query_ehea_qp failed (1)\n"); |
2453 | goto out; | 2454 | goto out; |
2454 | } | 2455 | } |
2455 | 2456 | ||
@@ -2458,14 +2459,14 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) | |||
2458 | EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, | 2459 | EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, |
2459 | &dummy64, &dummy64, &dummy16, &dummy16); | 2460 | &dummy64, &dummy64, &dummy16, &dummy16); |
2460 | if (hret != H_SUCCESS) { | 2461 | if (hret != H_SUCCESS) { |
2461 | ehea_error("modify_ehea_qp failed (1)"); | 2462 | pr_err("modify_ehea_qp failed (1)\n"); |
2462 | goto out; | 2463 | goto out; |
2463 | } | 2464 | } |
2464 | 2465 | ||
2465 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, | 2466 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, |
2466 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); | 2467 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); |
2467 | if (hret != H_SUCCESS) { | 2468 | if (hret != H_SUCCESS) { |
2468 | ehea_error("query_ehea_qp failed (2)"); | 2469 | pr_err("query_ehea_qp failed (2)\n"); |
2469 | goto out; | 2470 | goto out; |
2470 | } | 2471 | } |
2471 | 2472 | ||
@@ -2474,14 +2475,14 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) | |||
2474 | EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, | 2475 | EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, |
2475 | &dummy64, &dummy64, &dummy16, &dummy16); | 2476 | &dummy64, &dummy64, &dummy16, &dummy16); |
2476 | if (hret != H_SUCCESS) { | 2477 | if (hret != H_SUCCESS) { |
2477 | ehea_error("modify_ehea_qp failed (2)"); | 2478 | pr_err("modify_ehea_qp failed (2)\n"); |
2478 | goto out; | 2479 | goto out; |
2479 | } | 2480 | } |
2480 | 2481 | ||
2481 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, | 2482 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, |
2482 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); | 2483 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); |
2483 | if (hret != H_SUCCESS) { | 2484 | if (hret != H_SUCCESS) { |
2484 | ehea_error("query_ehea_qp failed (3)"); | 2485 | pr_err("query_ehea_qp failed (3)\n"); |
2485 | goto out; | 2486 | goto out; |
2486 | } | 2487 | } |
2487 | 2488 | ||
@@ -2490,14 +2491,14 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) | |||
2490 | EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, | 2491 | EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, |
2491 | &dummy64, &dummy64, &dummy16, &dummy16); | 2492 | &dummy64, &dummy64, &dummy16, &dummy16); |
2492 | if (hret != H_SUCCESS) { | 2493 | if (hret != H_SUCCESS) { |
2493 | ehea_error("modify_ehea_qp failed (3)"); | 2494 | pr_err("modify_ehea_qp failed (3)\n"); |
2494 | goto out; | 2495 | goto out; |
2495 | } | 2496 | } |
2496 | 2497 | ||
2497 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, | 2498 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, |
2498 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); | 2499 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); |
2499 | if (hret != H_SUCCESS) { | 2500 | if (hret != H_SUCCESS) { |
2500 | ehea_error("query_ehea_qp failed (4)"); | 2501 | pr_err("query_ehea_qp failed (4)\n"); |
2501 | goto out; | 2502 | goto out; |
2502 | } | 2503 | } |
2503 | 2504 | ||
@@ -2518,7 +2519,7 @@ static int ehea_port_res_setup(struct ehea_port *port, int def_qps, | |||
2518 | EHEA_MAX_ENTRIES_EQ, 1); | 2519 | EHEA_MAX_ENTRIES_EQ, 1); |
2519 | if (!port->qp_eq) { | 2520 | if (!port->qp_eq) { |
2520 | ret = -EINVAL; | 2521 | ret = -EINVAL; |
2521 | ehea_error("ehea_create_eq failed (qp_eq)"); | 2522 | pr_err("ehea_create_eq failed (qp_eq)\n"); |
2522 | goto out_kill_eq; | 2523 | goto out_kill_eq; |
2523 | } | 2524 | } |
2524 | 2525 | ||
@@ -2599,27 +2600,27 @@ static int ehea_up(struct net_device *dev) | |||
2599 | ret = ehea_port_res_setup(port, port->num_def_qps, | 2600 | ret = ehea_port_res_setup(port, port->num_def_qps, |
2600 | port->num_add_tx_qps); | 2601 | port->num_add_tx_qps); |
2601 | if (ret) { | 2602 | if (ret) { |
2602 | ehea_error("port_res_failed"); | 2603 | netdev_err(dev, "port_res_failed\n"); |
2603 | goto out; | 2604 | goto out; |
2604 | } | 2605 | } |
2605 | 2606 | ||
2606 | /* Set default QP for this port */ | 2607 | /* Set default QP for this port */ |
2607 | ret = ehea_configure_port(port); | 2608 | ret = ehea_configure_port(port); |
2608 | if (ret) { | 2609 | if (ret) { |
2609 | ehea_error("ehea_configure_port failed. ret:%d", ret); | 2610 | netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret); |
2610 | goto out_clean_pr; | 2611 | goto out_clean_pr; |
2611 | } | 2612 | } |
2612 | 2613 | ||
2613 | ret = ehea_reg_interrupts(dev); | 2614 | ret = ehea_reg_interrupts(dev); |
2614 | if (ret) { | 2615 | if (ret) { |
2615 | ehea_error("reg_interrupts failed. ret:%d", ret); | 2616 | netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret); |
2616 | goto out_clean_pr; | 2617 | goto out_clean_pr; |
2617 | } | 2618 | } |
2618 | 2619 | ||
2619 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { | 2620 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { |
2620 | ret = ehea_activate_qp(port->adapter, port->port_res[i].qp); | 2621 | ret = ehea_activate_qp(port->adapter, port->port_res[i].qp); |
2621 | if (ret) { | 2622 | if (ret) { |
2622 | ehea_error("activate_qp failed"); | 2623 | netdev_err(dev, "activate_qp failed\n"); |
2623 | goto out_free_irqs; | 2624 | goto out_free_irqs; |
2624 | } | 2625 | } |
2625 | } | 2626 | } |
@@ -2627,7 +2628,7 @@ static int ehea_up(struct net_device *dev) | |||
2627 | for (i = 0; i < port->num_def_qps; i++) { | 2628 | for (i = 0; i < port->num_def_qps; i++) { |
2628 | ret = ehea_fill_port_res(&port->port_res[i]); | 2629 | ret = ehea_fill_port_res(&port->port_res[i]); |
2629 | if (ret) { | 2630 | if (ret) { |
2630 | ehea_error("out_free_irqs"); | 2631 | netdev_err(dev, "out_free_irqs\n"); |
2631 | goto out_free_irqs; | 2632 | goto out_free_irqs; |
2632 | } | 2633 | } |
2633 | } | 2634 | } |
@@ -2650,7 +2651,7 @@ out_clean_pr: | |||
2650 | ehea_clean_all_portres(port); | 2651 | ehea_clean_all_portres(port); |
2651 | out: | 2652 | out: |
2652 | if (ret) | 2653 | if (ret) |
2653 | ehea_info("Failed starting %s. ret=%i", dev->name, ret); | 2654 | netdev_info(dev, "Failed starting. ret=%i\n", ret); |
2654 | 2655 | ||
2655 | ehea_update_bcmc_registrations(); | 2656 | ehea_update_bcmc_registrations(); |
2656 | ehea_update_firmware_handles(); | 2657 | ehea_update_firmware_handles(); |
@@ -2681,8 +2682,7 @@ static int ehea_open(struct net_device *dev) | |||
2681 | 2682 | ||
2682 | mutex_lock(&port->port_lock); | 2683 | mutex_lock(&port->port_lock); |
2683 | 2684 | ||
2684 | if (netif_msg_ifup(port)) | 2685 | netif_info(port, ifup, dev, "enabling port\n"); |
2685 | ehea_info("enabling port %s", dev->name); | ||
2686 | 2686 | ||
2687 | ret = ehea_up(dev); | 2687 | ret = ehea_up(dev); |
2688 | if (!ret) { | 2688 | if (!ret) { |
@@ -2717,8 +2717,7 @@ static int ehea_down(struct net_device *dev) | |||
2717 | 2717 | ||
2718 | ret = ehea_clean_all_portres(port); | 2718 | ret = ehea_clean_all_portres(port); |
2719 | if (ret) | 2719 | if (ret) |
2720 | ehea_info("Failed freeing resources for %s. ret=%i", | 2720 | netdev_info(dev, "Failed freeing resources. ret=%i\n", ret); |
2721 | dev->name, ret); | ||
2722 | 2721 | ||
2723 | ehea_update_firmware_handles(); | 2722 | ehea_update_firmware_handles(); |
2724 | 2723 | ||
@@ -2730,8 +2729,7 @@ static int ehea_stop(struct net_device *dev) | |||
2730 | int ret; | 2729 | int ret; |
2731 | struct ehea_port *port = netdev_priv(dev); | 2730 | struct ehea_port *port = netdev_priv(dev); |
2732 | 2731 | ||
2733 | if (netif_msg_ifdown(port)) | 2732 | netif_info(port, ifdown, dev, "disabling port\n"); |
2734 | ehea_info("disabling port %s", dev->name); | ||
2735 | 2733 | ||
2736 | set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags); | 2734 | set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags); |
2737 | cancel_work_sync(&port->reset_task); | 2735 | cancel_work_sync(&port->reset_task); |
@@ -2772,7 +2770,7 @@ static void ehea_flush_sq(struct ehea_port *port) | |||
2772 | msecs_to_jiffies(100)); | 2770 | msecs_to_jiffies(100)); |
2773 | 2771 | ||
2774 | if (!ret) { | 2772 | if (!ret) { |
2775 | ehea_error("WARNING: sq not flushed completely"); | 2773 | pr_err("WARNING: sq not flushed completely\n"); |
2776 | break; | 2774 | break; |
2777 | } | 2775 | } |
2778 | } | 2776 | } |
@@ -2808,7 +2806,7 @@ int ehea_stop_qps(struct net_device *dev) | |||
2808 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), | 2806 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), |
2809 | cb0); | 2807 | cb0); |
2810 | if (hret != H_SUCCESS) { | 2808 | if (hret != H_SUCCESS) { |
2811 | ehea_error("query_ehea_qp failed (1)"); | 2809 | pr_err("query_ehea_qp failed (1)\n"); |
2812 | goto out; | 2810 | goto out; |
2813 | } | 2811 | } |
2814 | 2812 | ||
@@ -2820,7 +2818,7 @@ int ehea_stop_qps(struct net_device *dev) | |||
2820 | 1), cb0, &dummy64, | 2818 | 1), cb0, &dummy64, |
2821 | &dummy64, &dummy16, &dummy16); | 2819 | &dummy64, &dummy16, &dummy16); |
2822 | if (hret != H_SUCCESS) { | 2820 | if (hret != H_SUCCESS) { |
2823 | ehea_error("modify_ehea_qp failed (1)"); | 2821 | pr_err("modify_ehea_qp failed (1)\n"); |
2824 | goto out; | 2822 | goto out; |
2825 | } | 2823 | } |
2826 | 2824 | ||
@@ -2828,14 +2826,14 @@ int ehea_stop_qps(struct net_device *dev) | |||
2828 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), | 2826 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), |
2829 | cb0); | 2827 | cb0); |
2830 | if (hret != H_SUCCESS) { | 2828 | if (hret != H_SUCCESS) { |
2831 | ehea_error("query_ehea_qp failed (2)"); | 2829 | pr_err("query_ehea_qp failed (2)\n"); |
2832 | goto out; | 2830 | goto out; |
2833 | } | 2831 | } |
2834 | 2832 | ||
2835 | /* deregister shared memory regions */ | 2833 | /* deregister shared memory regions */ |
2836 | dret = ehea_rem_smrs(pr); | 2834 | dret = ehea_rem_smrs(pr); |
2837 | if (dret) { | 2835 | if (dret) { |
2838 | ehea_error("unreg shared memory region failed"); | 2836 | pr_err("unreg shared memory region failed\n"); |
2839 | goto out; | 2837 | goto out; |
2840 | } | 2838 | } |
2841 | } | 2839 | } |
@@ -2904,7 +2902,7 @@ int ehea_restart_qps(struct net_device *dev) | |||
2904 | 2902 | ||
2905 | ret = ehea_gen_smrs(pr); | 2903 | ret = ehea_gen_smrs(pr); |
2906 | if (ret) { | 2904 | if (ret) { |
2907 | ehea_error("creation of shared memory regions failed"); | 2905 | netdev_err(dev, "creation of shared memory regions failed\n"); |
2908 | goto out; | 2906 | goto out; |
2909 | } | 2907 | } |
2910 | 2908 | ||
@@ -2915,7 +2913,7 @@ int ehea_restart_qps(struct net_device *dev) | |||
2915 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), | 2913 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), |
2916 | cb0); | 2914 | cb0); |
2917 | if (hret != H_SUCCESS) { | 2915 | if (hret != H_SUCCESS) { |
2918 | ehea_error("query_ehea_qp failed (1)"); | 2916 | netdev_err(dev, "query_ehea_qp failed (1)\n"); |
2919 | goto out; | 2917 | goto out; |
2920 | } | 2918 | } |
2921 | 2919 | ||
@@ -2927,7 +2925,7 @@ int ehea_restart_qps(struct net_device *dev) | |||
2927 | 1), cb0, &dummy64, | 2925 | 1), cb0, &dummy64, |
2928 | &dummy64, &dummy16, &dummy16); | 2926 | &dummy64, &dummy16, &dummy16); |
2929 | if (hret != H_SUCCESS) { | 2927 | if (hret != H_SUCCESS) { |
2930 | ehea_error("modify_ehea_qp failed (1)"); | 2928 | netdev_err(dev, "modify_ehea_qp failed (1)\n"); |
2931 | goto out; | 2929 | goto out; |
2932 | } | 2930 | } |
2933 | 2931 | ||
@@ -2935,7 +2933,7 @@ int ehea_restart_qps(struct net_device *dev) | |||
2935 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), | 2933 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), |
2936 | cb0); | 2934 | cb0); |
2937 | if (hret != H_SUCCESS) { | 2935 | if (hret != H_SUCCESS) { |
2938 | ehea_error("query_ehea_qp failed (2)"); | 2936 | netdev_err(dev, "query_ehea_qp failed (2)\n"); |
2939 | goto out; | 2937 | goto out; |
2940 | } | 2938 | } |
2941 | 2939 | ||
@@ -2972,8 +2970,7 @@ static void ehea_reset_port(struct work_struct *work) | |||
2972 | 2970 | ||
2973 | ehea_set_multicast_list(dev); | 2971 | ehea_set_multicast_list(dev); |
2974 | 2972 | ||
2975 | if (netif_msg_timer(port)) | 2973 | netif_info(port, timer, dev, "reset successful\n"); |
2976 | ehea_info("Device %s resetted successfully", dev->name); | ||
2977 | 2974 | ||
2978 | port_napi_enable(port); | 2975 | port_napi_enable(port); |
2979 | 2976 | ||
@@ -2988,7 +2985,7 @@ static void ehea_rereg_mrs(void) | |||
2988 | int ret, i; | 2985 | int ret, i; |
2989 | struct ehea_adapter *adapter; | 2986 | struct ehea_adapter *adapter; |
2990 | 2987 | ||
2991 | ehea_info("LPAR memory changed - re-initializing driver"); | 2988 | pr_info("LPAR memory changed - re-initializing driver\n"); |
2992 | 2989 | ||
2993 | list_for_each_entry(adapter, &adapter_list, list) | 2990 | list_for_each_entry(adapter, &adapter_list, list) |
2994 | if (adapter->active_ports) { | 2991 | if (adapter->active_ports) { |
@@ -3020,8 +3017,7 @@ static void ehea_rereg_mrs(void) | |||
3020 | /* Unregister old memory region */ | 3017 | /* Unregister old memory region */ |
3021 | ret = ehea_rem_mr(&adapter->mr); | 3018 | ret = ehea_rem_mr(&adapter->mr); |
3022 | if (ret) { | 3019 | if (ret) { |
3023 | ehea_error("unregister MR failed - driver" | 3020 | pr_err("unregister MR failed - driver inoperable!\n"); |
3024 | " inoperable!"); | ||
3025 | goto out; | 3021 | goto out; |
3026 | } | 3022 | } |
3027 | } | 3023 | } |
@@ -3033,8 +3029,7 @@ static void ehea_rereg_mrs(void) | |||
3033 | /* Register new memory region */ | 3029 | /* Register new memory region */ |
3034 | ret = ehea_reg_kernel_mr(adapter, &adapter->mr); | 3030 | ret = ehea_reg_kernel_mr(adapter, &adapter->mr); |
3035 | if (ret) { | 3031 | if (ret) { |
3036 | ehea_error("register MR failed - driver" | 3032 | pr_err("register MR failed - driver inoperable!\n"); |
3037 | " inoperable!"); | ||
3038 | goto out; | 3033 | goto out; |
3039 | } | 3034 | } |
3040 | 3035 | ||
@@ -3057,7 +3052,7 @@ static void ehea_rereg_mrs(void) | |||
3057 | } | 3052 | } |
3058 | } | 3053 | } |
3059 | } | 3054 | } |
3060 | ehea_info("re-initializing driver complete"); | 3055 | pr_info("re-initializing driver complete\n"); |
3061 | out: | 3056 | out: |
3062 | return; | 3057 | return; |
3063 | } | 3058 | } |
@@ -3110,7 +3105,7 @@ int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo) | |||
3110 | /* (Try to) enable *jumbo frames */ | 3105 | /* (Try to) enable *jumbo frames */ |
3111 | cb4 = (void *)get_zeroed_page(GFP_KERNEL); | 3106 | cb4 = (void *)get_zeroed_page(GFP_KERNEL); |
3112 | if (!cb4) { | 3107 | if (!cb4) { |
3113 | ehea_error("no mem for cb4"); | 3108 | pr_err("no mem for cb4\n"); |
3114 | ret = -ENOMEM; | 3109 | ret = -ENOMEM; |
3115 | goto out; | 3110 | goto out; |
3116 | } else { | 3111 | } else { |
@@ -3172,13 +3167,13 @@ static struct device *ehea_register_port(struct ehea_port *port, | |||
3172 | 3167 | ||
3173 | ret = of_device_register(&port->ofdev); | 3168 | ret = of_device_register(&port->ofdev); |
3174 | if (ret) { | 3169 | if (ret) { |
3175 | ehea_error("failed to register device. ret=%d", ret); | 3170 | pr_err("failed to register device. ret=%d\n", ret); |
3176 | goto out; | 3171 | goto out; |
3177 | } | 3172 | } |
3178 | 3173 | ||
3179 | ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id); | 3174 | ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id); |
3180 | if (ret) { | 3175 | if (ret) { |
3181 | ehea_error("failed to register attributes, ret=%d", ret); | 3176 | pr_err("failed to register attributes, ret=%d\n", ret); |
3182 | goto out_unreg_of_dev; | 3177 | goto out_unreg_of_dev; |
3183 | } | 3178 | } |
3184 | 3179 | ||
@@ -3228,7 +3223,7 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, | |||
3228 | dev = alloc_etherdev(sizeof(struct ehea_port)); | 3223 | dev = alloc_etherdev(sizeof(struct ehea_port)); |
3229 | 3224 | ||
3230 | if (!dev) { | 3225 | if (!dev) { |
3231 | ehea_error("no mem for net_device"); | 3226 | pr_err("no mem for net_device\n"); |
3232 | ret = -ENOMEM; | 3227 | ret = -ENOMEM; |
3233 | goto out_err; | 3228 | goto out_err; |
3234 | } | 3229 | } |
@@ -3282,7 +3277,7 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, | |||
3282 | 3277 | ||
3283 | ret = register_netdev(dev); | 3278 | ret = register_netdev(dev); |
3284 | if (ret) { | 3279 | if (ret) { |
3285 | ehea_error("register_netdev failed. ret=%d", ret); | 3280 | pr_err("register_netdev failed. ret=%d\n", ret); |
3286 | goto out_unreg_port; | 3281 | goto out_unreg_port; |
3287 | } | 3282 | } |
3288 | 3283 | ||
@@ -3290,11 +3285,10 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, | |||
3290 | 3285 | ||
3291 | ret = ehea_get_jumboframe_status(port, &jumbo); | 3286 | ret = ehea_get_jumboframe_status(port, &jumbo); |
3292 | if (ret) | 3287 | if (ret) |
3293 | ehea_error("failed determining jumbo frame status for %s", | 3288 | netdev_err(dev, "failed determining jumbo frame status\n"); |
3294 | port->netdev->name); | ||
3295 | 3289 | ||
3296 | ehea_info("%s: Jumbo frames are %sabled", dev->name, | 3290 | netdev_info(dev, "Jumbo frames are %sabled\n", |
3297 | jumbo == 1 ? "en" : "dis"); | 3291 | jumbo == 1 ? "en" : "dis"); |
3298 | 3292 | ||
3299 | adapter->active_ports++; | 3293 | adapter->active_ports++; |
3300 | 3294 | ||
@@ -3310,8 +3304,8 @@ out_free_ethdev: | |||
3310 | free_netdev(dev); | 3304 | free_netdev(dev); |
3311 | 3305 | ||
3312 | out_err: | 3306 | out_err: |
3313 | ehea_error("setting up logical port with id=%d failed, ret=%d", | 3307 | pr_err("setting up logical port with id=%d failed, ret=%d\n", |
3314 | logical_port_id, ret); | 3308 | logical_port_id, ret); |
3315 | return NULL; | 3309 | return NULL; |
3316 | } | 3310 | } |
3317 | 3311 | ||
@@ -3341,13 +3335,13 @@ static int ehea_setup_ports(struct ehea_adapter *adapter) | |||
3341 | dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", | 3335 | dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", |
3342 | NULL); | 3336 | NULL); |
3343 | if (!dn_log_port_id) { | 3337 | if (!dn_log_port_id) { |
3344 | ehea_error("bad device node: eth_dn name=%s", | 3338 | pr_err("bad device node: eth_dn name=%s\n", |
3345 | eth_dn->full_name); | 3339 | eth_dn->full_name); |
3346 | continue; | 3340 | continue; |
3347 | } | 3341 | } |
3348 | 3342 | ||
3349 | if (ehea_add_adapter_mr(adapter)) { | 3343 | if (ehea_add_adapter_mr(adapter)) { |
3350 | ehea_error("creating MR failed"); | 3344 | pr_err("creating MR failed\n"); |
3351 | of_node_put(eth_dn); | 3345 | of_node_put(eth_dn); |
3352 | return -EIO; | 3346 | return -EIO; |
3353 | } | 3347 | } |
@@ -3356,9 +3350,8 @@ static int ehea_setup_ports(struct ehea_adapter *adapter) | |||
3356 | *dn_log_port_id, | 3350 | *dn_log_port_id, |
3357 | eth_dn); | 3351 | eth_dn); |
3358 | if (adapter->port[i]) | 3352 | if (adapter->port[i]) |
3359 | ehea_info("%s -> logical port id #%d", | 3353 | netdev_info(adapter->port[i]->netdev, |
3360 | adapter->port[i]->netdev->name, | 3354 | "logical port id #%d\n", *dn_log_port_id); |
3361 | *dn_log_port_id); | ||
3362 | else | 3355 | else |
3363 | ehea_remove_adapter_mr(adapter); | 3356 | ehea_remove_adapter_mr(adapter); |
3364 | 3357 | ||
@@ -3403,21 +3396,20 @@ static ssize_t ehea_probe_port(struct device *dev, | |||
3403 | port = ehea_get_port(adapter, logical_port_id); | 3396 | port = ehea_get_port(adapter, logical_port_id); |
3404 | 3397 | ||
3405 | if (port) { | 3398 | if (port) { |
3406 | ehea_info("adding port with logical port id=%d failed. port " | 3399 | netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n", |
3407 | "already configured as %s.", logical_port_id, | 3400 | logical_port_id); |
3408 | port->netdev->name); | ||
3409 | return -EINVAL; | 3401 | return -EINVAL; |
3410 | } | 3402 | } |
3411 | 3403 | ||
3412 | eth_dn = ehea_get_eth_dn(adapter, logical_port_id); | 3404 | eth_dn = ehea_get_eth_dn(adapter, logical_port_id); |
3413 | 3405 | ||
3414 | if (!eth_dn) { | 3406 | if (!eth_dn) { |
3415 | ehea_info("no logical port with id %d found", logical_port_id); | 3407 | pr_info("no logical port with id %d found\n", logical_port_id); |
3416 | return -EINVAL; | 3408 | return -EINVAL; |
3417 | } | 3409 | } |
3418 | 3410 | ||
3419 | if (ehea_add_adapter_mr(adapter)) { | 3411 | if (ehea_add_adapter_mr(adapter)) { |
3420 | ehea_error("creating MR failed"); | 3412 | pr_err("creating MR failed\n"); |
3421 | return -EIO; | 3413 | return -EIO; |
3422 | } | 3414 | } |
3423 | 3415 | ||
@@ -3432,8 +3424,8 @@ static ssize_t ehea_probe_port(struct device *dev, | |||
3432 | break; | 3424 | break; |
3433 | } | 3425 | } |
3434 | 3426 | ||
3435 | ehea_info("added %s (logical port id=%d)", port->netdev->name, | 3427 | netdev_info(port->netdev, "added: (logical port id=%d)\n", |
3436 | logical_port_id); | 3428 | logical_port_id); |
3437 | } else { | 3429 | } else { |
3438 | ehea_remove_adapter_mr(adapter); | 3430 | ehea_remove_adapter_mr(adapter); |
3439 | return -EIO; | 3431 | return -EIO; |
@@ -3456,8 +3448,8 @@ static ssize_t ehea_remove_port(struct device *dev, | |||
3456 | port = ehea_get_port(adapter, logical_port_id); | 3448 | port = ehea_get_port(adapter, logical_port_id); |
3457 | 3449 | ||
3458 | if (port) { | 3450 | if (port) { |
3459 | ehea_info("removed %s (logical port id=%d)", port->netdev->name, | 3451 | netdev_info(port->netdev, "removed: (logical port id=%d)\n", |
3460 | logical_port_id); | 3452 | logical_port_id); |
3461 | 3453 | ||
3462 | ehea_shutdown_single_port(port); | 3454 | ehea_shutdown_single_port(port); |
3463 | 3455 | ||
@@ -3467,8 +3459,8 @@ static ssize_t ehea_remove_port(struct device *dev, | |||
3467 | break; | 3459 | break; |
3468 | } | 3460 | } |
3469 | } else { | 3461 | } else { |
3470 | ehea_error("removing port with logical port id=%d failed. port " | 3462 | pr_err("removing port with logical port id=%d failed. port not configured.\n", |
3471 | "not configured.", logical_port_id); | 3463 | logical_port_id); |
3472 | return -EINVAL; | 3464 | return -EINVAL; |
3473 | } | 3465 | } |
3474 | 3466 | ||
@@ -3505,7 +3497,7 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev, | |||
3505 | int ret; | 3497 | int ret; |
3506 | 3498 | ||
3507 | if (!dev || !dev->dev.of_node) { | 3499 | if (!dev || !dev->dev.of_node) { |
3508 | ehea_error("Invalid ibmebus device probed"); | 3500 | pr_err("Invalid ibmebus device probed\n"); |
3509 | return -EINVAL; | 3501 | return -EINVAL; |
3510 | } | 3502 | } |
3511 | 3503 | ||
@@ -3651,17 +3643,17 @@ static int ehea_mem_notifier(struct notifier_block *nb, | |||
3651 | 3643 | ||
3652 | switch (action) { | 3644 | switch (action) { |
3653 | case MEM_CANCEL_OFFLINE: | 3645 | case MEM_CANCEL_OFFLINE: |
3654 | ehea_info("memory offlining canceled"); | 3646 | pr_info("memory offlining canceled"); |
3655 | /* Readd canceled memory block */ | 3647 | /* Readd canceled memory block */ |
3656 | case MEM_ONLINE: | 3648 | case MEM_ONLINE: |
3657 | ehea_info("memory is going online"); | 3649 | pr_info("memory is going online"); |
3658 | set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); | 3650 | set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); |
3659 | if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) | 3651 | if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) |
3660 | goto out_unlock; | 3652 | goto out_unlock; |
3661 | ehea_rereg_mrs(); | 3653 | ehea_rereg_mrs(); |
3662 | break; | 3654 | break; |
3663 | case MEM_GOING_OFFLINE: | 3655 | case MEM_GOING_OFFLINE: |
3664 | ehea_info("memory is going offline"); | 3656 | pr_info("memory is going offline"); |
3665 | set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); | 3657 | set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); |
3666 | if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) | 3658 | if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) |
3667 | goto out_unlock; | 3659 | goto out_unlock; |
@@ -3687,7 +3679,7 @@ static int ehea_reboot_notifier(struct notifier_block *nb, | |||
3687 | unsigned long action, void *unused) | 3679 | unsigned long action, void *unused) |
3688 | { | 3680 | { |
3689 | if (action == SYS_RESTART) { | 3681 | if (action == SYS_RESTART) { |
3690 | ehea_info("Reboot: freeing all eHEA resources"); | 3682 | pr_info("Reboot: freeing all eHEA resources\n"); |
3691 | ibmebus_unregister_driver(&ehea_driver); | 3683 | ibmebus_unregister_driver(&ehea_driver); |
3692 | } | 3684 | } |
3693 | return NOTIFY_DONE; | 3685 | return NOTIFY_DONE; |
@@ -3703,22 +3695,22 @@ static int check_module_parm(void) | |||
3703 | 3695 | ||
3704 | if ((rq1_entries < EHEA_MIN_ENTRIES_QP) || | 3696 | if ((rq1_entries < EHEA_MIN_ENTRIES_QP) || |
3705 | (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) { | 3697 | (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) { |
3706 | ehea_info("Bad parameter: rq1_entries"); | 3698 | pr_info("Bad parameter: rq1_entries\n"); |
3707 | ret = -EINVAL; | 3699 | ret = -EINVAL; |
3708 | } | 3700 | } |
3709 | if ((rq2_entries < EHEA_MIN_ENTRIES_QP) || | 3701 | if ((rq2_entries < EHEA_MIN_ENTRIES_QP) || |
3710 | (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) { | 3702 | (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) { |
3711 | ehea_info("Bad parameter: rq2_entries"); | 3703 | pr_info("Bad parameter: rq2_entries\n"); |
3712 | ret = -EINVAL; | 3704 | ret = -EINVAL; |
3713 | } | 3705 | } |
3714 | if ((rq3_entries < EHEA_MIN_ENTRIES_QP) || | 3706 | if ((rq3_entries < EHEA_MIN_ENTRIES_QP) || |
3715 | (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) { | 3707 | (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) { |
3716 | ehea_info("Bad parameter: rq3_entries"); | 3708 | pr_info("Bad parameter: rq3_entries\n"); |
3717 | ret = -EINVAL; | 3709 | ret = -EINVAL; |
3718 | } | 3710 | } |
3719 | if ((sq_entries < EHEA_MIN_ENTRIES_QP) || | 3711 | if ((sq_entries < EHEA_MIN_ENTRIES_QP) || |
3720 | (sq_entries > EHEA_MAX_ENTRIES_SQ)) { | 3712 | (sq_entries > EHEA_MAX_ENTRIES_SQ)) { |
3721 | ehea_info("Bad parameter: sq_entries"); | 3713 | pr_info("Bad parameter: sq_entries\n"); |
3722 | ret = -EINVAL; | 3714 | ret = -EINVAL; |
3723 | } | 3715 | } |
3724 | 3716 | ||
@@ -3738,8 +3730,7 @@ int __init ehea_module_init(void) | |||
3738 | { | 3730 | { |
3739 | int ret; | 3731 | int ret; |
3740 | 3732 | ||
3741 | printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n", | 3733 | pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION); |
3742 | DRV_VERSION); | ||
3743 | 3734 | ||
3744 | memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles)); | 3735 | memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles)); |
3745 | memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs)); | 3736 | memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs)); |
@@ -3757,27 +3748,27 @@ int __init ehea_module_init(void) | |||
3757 | 3748 | ||
3758 | ret = register_reboot_notifier(&ehea_reboot_nb); | 3749 | ret = register_reboot_notifier(&ehea_reboot_nb); |
3759 | if (ret) | 3750 | if (ret) |
3760 | ehea_info("failed registering reboot notifier"); | 3751 | pr_info("failed registering reboot notifier\n"); |
3761 | 3752 | ||
3762 | ret = register_memory_notifier(&ehea_mem_nb); | 3753 | ret = register_memory_notifier(&ehea_mem_nb); |
3763 | if (ret) | 3754 | if (ret) |
3764 | ehea_info("failed registering memory remove notifier"); | 3755 | pr_info("failed registering memory remove notifier\n"); |
3765 | 3756 | ||
3766 | ret = crash_shutdown_register(ehea_crash_handler); | 3757 | ret = crash_shutdown_register(ehea_crash_handler); |
3767 | if (ret) | 3758 | if (ret) |
3768 | ehea_info("failed registering crash handler"); | 3759 | pr_info("failed registering crash handler\n"); |
3769 | 3760 | ||
3770 | ret = ibmebus_register_driver(&ehea_driver); | 3761 | ret = ibmebus_register_driver(&ehea_driver); |
3771 | if (ret) { | 3762 | if (ret) { |
3772 | ehea_error("failed registering eHEA device driver on ebus"); | 3763 | pr_err("failed registering eHEA device driver on ebus\n"); |
3773 | goto out2; | 3764 | goto out2; |
3774 | } | 3765 | } |
3775 | 3766 | ||
3776 | ret = driver_create_file(&ehea_driver.driver, | 3767 | ret = driver_create_file(&ehea_driver.driver, |
3777 | &driver_attr_capabilities); | 3768 | &driver_attr_capabilities); |
3778 | if (ret) { | 3769 | if (ret) { |
3779 | ehea_error("failed to register capabilities attribute, ret=%d", | 3770 | pr_err("failed to register capabilities attribute, ret=%d\n", |
3780 | ret); | 3771 | ret); |
3781 | goto out3; | 3772 | goto out3; |
3782 | } | 3773 | } |
3783 | 3774 | ||
@@ -3802,7 +3793,7 @@ static void __exit ehea_module_exit(void) | |||
3802 | unregister_reboot_notifier(&ehea_reboot_nb); | 3793 | unregister_reboot_notifier(&ehea_reboot_nb); |
3803 | ret = crash_shutdown_unregister(ehea_crash_handler); | 3794 | ret = crash_shutdown_unregister(ehea_crash_handler); |
3804 | if (ret) | 3795 | if (ret) |
3805 | ehea_info("failed unregistering crash handler"); | 3796 | pr_info("failed unregistering crash handler\n"); |
3806 | unregister_memory_notifier(&ehea_mem_nb); | 3797 | unregister_memory_notifier(&ehea_mem_nb); |
3807 | kfree(ehea_fw_handles.arr); | 3798 | kfree(ehea_fw_handles.arr); |
3808 | kfree(ehea_bcmc_regs.arr); | 3799 | kfree(ehea_bcmc_regs.arr); |
diff --git a/drivers/net/ehea/ehea_phyp.c b/drivers/net/ehea/ehea_phyp.c index 8fe9dcaa7538..0506967b9044 100644 --- a/drivers/net/ehea/ehea_phyp.c +++ b/drivers/net/ehea/ehea_phyp.c | |||
@@ -26,6 +26,8 @@ | |||
26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
27 | */ | 27 | */ |
28 | 28 | ||
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
30 | |||
29 | #include "ehea_phyp.h" | 31 | #include "ehea_phyp.h" |
30 | 32 | ||
31 | 33 | ||
@@ -67,12 +69,11 @@ static long ehea_plpar_hcall_norets(unsigned long opcode, | |||
67 | } | 69 | } |
68 | 70 | ||
69 | if (ret < H_SUCCESS) | 71 | if (ret < H_SUCCESS) |
70 | ehea_error("opcode=%lx ret=%lx" | 72 | pr_err("opcode=%lx ret=%lx" |
71 | " arg1=%lx arg2=%lx arg3=%lx arg4=%lx" | 73 | " arg1=%lx arg2=%lx arg3=%lx arg4=%lx" |
72 | " arg5=%lx arg6=%lx arg7=%lx ", | 74 | " arg5=%lx arg6=%lx arg7=%lx\n", |
73 | opcode, ret, | 75 | opcode, ret, |
74 | arg1, arg2, arg3, arg4, arg5, | 76 | arg1, arg2, arg3, arg4, arg5, arg6, arg7); |
75 | arg6, arg7); | ||
76 | 77 | ||
77 | return ret; | 78 | return ret; |
78 | } | 79 | } |
@@ -114,19 +115,18 @@ static long ehea_plpar_hcall9(unsigned long opcode, | |||
114 | && (((cb_cat == H_PORT_CB4) && ((arg3 == H_PORT_CB4_JUMBO) | 115 | && (((cb_cat == H_PORT_CB4) && ((arg3 == H_PORT_CB4_JUMBO) |
115 | || (arg3 == H_PORT_CB4_SPEED))) || ((cb_cat == H_PORT_CB7) | 116 | || (arg3 == H_PORT_CB4_SPEED))) || ((cb_cat == H_PORT_CB7) |
116 | && (arg3 == H_PORT_CB7_DUCQPN))))) | 117 | && (arg3 == H_PORT_CB7_DUCQPN))))) |
117 | ehea_error("opcode=%lx ret=%lx" | 118 | pr_err("opcode=%lx ret=%lx" |
118 | " arg1=%lx arg2=%lx arg3=%lx arg4=%lx" | 119 | " arg1=%lx arg2=%lx arg3=%lx arg4=%lx" |
119 | " arg5=%lx arg6=%lx arg7=%lx arg8=%lx" | 120 | " arg5=%lx arg6=%lx arg7=%lx arg8=%lx" |
120 | " arg9=%lx" | 121 | " arg9=%lx" |
121 | " out1=%lx out2=%lx out3=%lx out4=%lx" | 122 | " out1=%lx out2=%lx out3=%lx out4=%lx" |
122 | " out5=%lx out6=%lx out7=%lx out8=%lx" | 123 | " out5=%lx out6=%lx out7=%lx out8=%lx" |
123 | " out9=%lx", | 124 | " out9=%lx\n", |
124 | opcode, ret, | 125 | opcode, ret, |
125 | arg1, arg2, arg3, arg4, arg5, | 126 | arg1, arg2, arg3, arg4, arg5, |
126 | arg6, arg7, arg8, arg9, | 127 | arg6, arg7, arg8, arg9, |
127 | outs[0], outs[1], outs[2], outs[3], | 128 | outs[0], outs[1], outs[2], outs[3], outs[4], |
128 | outs[4], outs[5], outs[6], outs[7], | 129 | outs[5], outs[6], outs[7], outs[8]); |
129 | outs[8]); | ||
130 | return ret; | 130 | return ret; |
131 | } | 131 | } |
132 | 132 | ||
@@ -515,7 +515,7 @@ u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle, | |||
515 | const u64 log_pageaddr, const u64 count) | 515 | const u64 log_pageaddr, const u64 count) |
516 | { | 516 | { |
517 | if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) { | 517 | if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) { |
518 | ehea_error("not on pageboundary"); | 518 | pr_err("not on pageboundary\n"); |
519 | return H_PARAMETER; | 519 | return H_PARAMETER; |
520 | } | 520 | } |
521 | 521 | ||
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c index 89128b6373e3..cd44bb8017d9 100644 --- a/drivers/net/ehea/ehea_qmr.c +++ b/drivers/net/ehea/ehea_qmr.c | |||
@@ -26,6 +26,8 @@ | |||
26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
27 | */ | 27 | */ |
28 | 28 | ||
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
30 | |||
29 | #include <linux/mm.h> | 31 | #include <linux/mm.h> |
30 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
31 | #include "ehea.h" | 33 | #include "ehea.h" |
@@ -45,7 +47,7 @@ static void *hw_qpageit_get_inc(struct hw_queue *queue) | |||
45 | queue->current_q_offset -= queue->pagesize; | 47 | queue->current_q_offset -= queue->pagesize; |
46 | retvalue = NULL; | 48 | retvalue = NULL; |
47 | } else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) { | 49 | } else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) { |
48 | ehea_error("not on pageboundary"); | 50 | pr_err("not on pageboundary\n"); |
49 | retvalue = NULL; | 51 | retvalue = NULL; |
50 | } | 52 | } |
51 | return retvalue; | 53 | return retvalue; |
@@ -58,15 +60,15 @@ static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages, | |||
58 | int i, k; | 60 | int i, k; |
59 | 61 | ||
60 | if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) { | 62 | if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) { |
61 | ehea_error("pagesize conflict! kernel pagesize=%d, " | 63 | pr_err("pagesize conflict! kernel pagesize=%d, ehea pagesize=%d\n", |
62 | "ehea pagesize=%d", (int)PAGE_SIZE, (int)pagesize); | 64 | (int)PAGE_SIZE, (int)pagesize); |
63 | return -EINVAL; | 65 | return -EINVAL; |
64 | } | 66 | } |
65 | 67 | ||
66 | queue->queue_length = nr_of_pages * pagesize; | 68 | queue->queue_length = nr_of_pages * pagesize; |
67 | queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL); | 69 | queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL); |
68 | if (!queue->queue_pages) { | 70 | if (!queue->queue_pages) { |
69 | ehea_error("no mem for queue_pages"); | 71 | pr_err("no mem for queue_pages\n"); |
70 | return -ENOMEM; | 72 | return -ENOMEM; |
71 | } | 73 | } |
72 | 74 | ||
@@ -130,7 +132,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, | |||
130 | 132 | ||
131 | cq = kzalloc(sizeof(*cq), GFP_KERNEL); | 133 | cq = kzalloc(sizeof(*cq), GFP_KERNEL); |
132 | if (!cq) { | 134 | if (!cq) { |
133 | ehea_error("no mem for cq"); | 135 | pr_err("no mem for cq\n"); |
134 | goto out_nomem; | 136 | goto out_nomem; |
135 | } | 137 | } |
136 | 138 | ||
@@ -147,7 +149,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, | |||
147 | hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr, | 149 | hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr, |
148 | &cq->fw_handle, &cq->epas); | 150 | &cq->fw_handle, &cq->epas); |
149 | if (hret != H_SUCCESS) { | 151 | if (hret != H_SUCCESS) { |
150 | ehea_error("alloc_resource_cq failed"); | 152 | pr_err("alloc_resource_cq failed\n"); |
151 | goto out_freemem; | 153 | goto out_freemem; |
152 | } | 154 | } |
153 | 155 | ||
@@ -159,7 +161,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, | |||
159 | for (counter = 0; counter < cq->attr.nr_pages; counter++) { | 161 | for (counter = 0; counter < cq->attr.nr_pages; counter++) { |
160 | vpage = hw_qpageit_get_inc(&cq->hw_queue); | 162 | vpage = hw_qpageit_get_inc(&cq->hw_queue); |
161 | if (!vpage) { | 163 | if (!vpage) { |
162 | ehea_error("hw_qpageit_get_inc failed"); | 164 | pr_err("hw_qpageit_get_inc failed\n"); |
163 | goto out_kill_hwq; | 165 | goto out_kill_hwq; |
164 | } | 166 | } |
165 | 167 | ||
@@ -168,9 +170,8 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, | |||
168 | 0, EHEA_CQ_REGISTER_ORIG, | 170 | 0, EHEA_CQ_REGISTER_ORIG, |
169 | cq->fw_handle, rpage, 1); | 171 | cq->fw_handle, rpage, 1); |
170 | if (hret < H_SUCCESS) { | 172 | if (hret < H_SUCCESS) { |
171 | ehea_error("register_rpage_cq failed ehea_cq=%p " | 173 | pr_err("register_rpage_cq failed ehea_cq=%p hret=%llx counter=%i act_pages=%i\n", |
172 | "hret=%llx counter=%i act_pages=%i", | 174 | cq, hret, counter, cq->attr.nr_pages); |
173 | cq, hret, counter, cq->attr.nr_pages); | ||
174 | goto out_kill_hwq; | 175 | goto out_kill_hwq; |
175 | } | 176 | } |
176 | 177 | ||
@@ -178,14 +179,14 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, | |||
178 | vpage = hw_qpageit_get_inc(&cq->hw_queue); | 179 | vpage = hw_qpageit_get_inc(&cq->hw_queue); |
179 | 180 | ||
180 | if ((hret != H_SUCCESS) || (vpage)) { | 181 | if ((hret != H_SUCCESS) || (vpage)) { |
181 | ehea_error("registration of pages not " | 182 | pr_err("registration of pages not complete hret=%llx\n", |
182 | "complete hret=%llx\n", hret); | 183 | hret); |
183 | goto out_kill_hwq; | 184 | goto out_kill_hwq; |
184 | } | 185 | } |
185 | } else { | 186 | } else { |
186 | if (hret != H_PAGE_REGISTERED) { | 187 | if (hret != H_PAGE_REGISTERED) { |
187 | ehea_error("CQ: registration of page failed " | 188 | pr_err("CQ: registration of page failed hret=%llx\n", |
188 | "hret=%llx\n", hret); | 189 | hret); |
189 | goto out_kill_hwq; | 190 | goto out_kill_hwq; |
190 | } | 191 | } |
191 | } | 192 | } |
@@ -241,7 +242,7 @@ int ehea_destroy_cq(struct ehea_cq *cq) | |||
241 | } | 242 | } |
242 | 243 | ||
243 | if (hret != H_SUCCESS) { | 244 | if (hret != H_SUCCESS) { |
244 | ehea_error("destroy CQ failed"); | 245 | pr_err("destroy CQ failed\n"); |
245 | return -EIO; | 246 | return -EIO; |
246 | } | 247 | } |
247 | 248 | ||
@@ -259,7 +260,7 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter, | |||
259 | 260 | ||
260 | eq = kzalloc(sizeof(*eq), GFP_KERNEL); | 261 | eq = kzalloc(sizeof(*eq), GFP_KERNEL); |
261 | if (!eq) { | 262 | if (!eq) { |
262 | ehea_error("no mem for eq"); | 263 | pr_err("no mem for eq\n"); |
263 | return NULL; | 264 | return NULL; |
264 | } | 265 | } |
265 | 266 | ||
@@ -272,21 +273,21 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter, | |||
272 | hret = ehea_h_alloc_resource_eq(adapter->handle, | 273 | hret = ehea_h_alloc_resource_eq(adapter->handle, |
273 | &eq->attr, &eq->fw_handle); | 274 | &eq->attr, &eq->fw_handle); |
274 | if (hret != H_SUCCESS) { | 275 | if (hret != H_SUCCESS) { |
275 | ehea_error("alloc_resource_eq failed"); | 276 | pr_err("alloc_resource_eq failed\n"); |
276 | goto out_freemem; | 277 | goto out_freemem; |
277 | } | 278 | } |
278 | 279 | ||
279 | ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages, | 280 | ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages, |
280 | EHEA_PAGESIZE, sizeof(struct ehea_eqe)); | 281 | EHEA_PAGESIZE, sizeof(struct ehea_eqe)); |
281 | if (ret) { | 282 | if (ret) { |
282 | ehea_error("can't allocate eq pages"); | 283 | pr_err("can't allocate eq pages\n"); |
283 | goto out_freeres; | 284 | goto out_freeres; |
284 | } | 285 | } |
285 | 286 | ||
286 | for (i = 0; i < eq->attr.nr_pages; i++) { | 287 | for (i = 0; i < eq->attr.nr_pages; i++) { |
287 | vpage = hw_qpageit_get_inc(&eq->hw_queue); | 288 | vpage = hw_qpageit_get_inc(&eq->hw_queue); |
288 | if (!vpage) { | 289 | if (!vpage) { |
289 | ehea_error("hw_qpageit_get_inc failed"); | 290 | pr_err("hw_qpageit_get_inc failed\n"); |
290 | hret = H_RESOURCE; | 291 | hret = H_RESOURCE; |
291 | goto out_kill_hwq; | 292 | goto out_kill_hwq; |
292 | } | 293 | } |
@@ -370,7 +371,7 @@ int ehea_destroy_eq(struct ehea_eq *eq) | |||
370 | } | 371 | } |
371 | 372 | ||
372 | if (hret != H_SUCCESS) { | 373 | if (hret != H_SUCCESS) { |
373 | ehea_error("destroy EQ failed"); | 374 | pr_err("destroy EQ failed\n"); |
374 | return -EIO; | 375 | return -EIO; |
375 | } | 376 | } |
376 | 377 | ||
@@ -395,7 +396,7 @@ int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue, | |||
395 | for (cnt = 0; cnt < nr_pages; cnt++) { | 396 | for (cnt = 0; cnt < nr_pages; cnt++) { |
396 | vpage = hw_qpageit_get_inc(hw_queue); | 397 | vpage = hw_qpageit_get_inc(hw_queue); |
397 | if (!vpage) { | 398 | if (!vpage) { |
398 | ehea_error("hw_qpageit_get_inc failed"); | 399 | pr_err("hw_qpageit_get_inc failed\n"); |
399 | goto out_kill_hwq; | 400 | goto out_kill_hwq; |
400 | } | 401 | } |
401 | rpage = virt_to_abs(vpage); | 402 | rpage = virt_to_abs(vpage); |
@@ -403,7 +404,7 @@ int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue, | |||
403 | 0, h_call_q_selector, | 404 | 0, h_call_q_selector, |
404 | qp->fw_handle, rpage, 1); | 405 | qp->fw_handle, rpage, 1); |
405 | if (hret < H_SUCCESS) { | 406 | if (hret < H_SUCCESS) { |
406 | ehea_error("register_rpage_qp failed"); | 407 | pr_err("register_rpage_qp failed\n"); |
407 | goto out_kill_hwq; | 408 | goto out_kill_hwq; |
408 | } | 409 | } |
409 | } | 410 | } |
@@ -432,7 +433,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, | |||
432 | 433 | ||
433 | qp = kzalloc(sizeof(*qp), GFP_KERNEL); | 434 | qp = kzalloc(sizeof(*qp), GFP_KERNEL); |
434 | if (!qp) { | 435 | if (!qp) { |
435 | ehea_error("no mem for qp"); | 436 | pr_err("no mem for qp\n"); |
436 | return NULL; | 437 | return NULL; |
437 | } | 438 | } |
438 | 439 | ||
@@ -441,7 +442,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, | |||
441 | hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd, | 442 | hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd, |
442 | &qp->fw_handle, &qp->epas); | 443 | &qp->fw_handle, &qp->epas); |
443 | if (hret != H_SUCCESS) { | 444 | if (hret != H_SUCCESS) { |
444 | ehea_error("ehea_h_alloc_resource_qp failed"); | 445 | pr_err("ehea_h_alloc_resource_qp failed\n"); |
445 | goto out_freemem; | 446 | goto out_freemem; |
446 | } | 447 | } |
447 | 448 | ||
@@ -455,7 +456,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, | |||
455 | init_attr->act_wqe_size_enc_sq, adapter, | 456 | init_attr->act_wqe_size_enc_sq, adapter, |
456 | 0); | 457 | 0); |
457 | if (ret) { | 458 | if (ret) { |
458 | ehea_error("can't register for sq ret=%x", ret); | 459 | pr_err("can't register for sq ret=%x\n", ret); |
459 | goto out_freeres; | 460 | goto out_freeres; |
460 | } | 461 | } |
461 | 462 | ||
@@ -465,7 +466,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, | |||
465 | init_attr->act_wqe_size_enc_rq1, | 466 | init_attr->act_wqe_size_enc_rq1, |
466 | adapter, 1); | 467 | adapter, 1); |
467 | if (ret) { | 468 | if (ret) { |
468 | ehea_error("can't register for rq1 ret=%x", ret); | 469 | pr_err("can't register for rq1 ret=%x\n", ret); |
469 | goto out_kill_hwsq; | 470 | goto out_kill_hwsq; |
470 | } | 471 | } |
471 | 472 | ||
@@ -476,7 +477,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, | |||
476 | init_attr->act_wqe_size_enc_rq2, | 477 | init_attr->act_wqe_size_enc_rq2, |
477 | adapter, 2); | 478 | adapter, 2); |
478 | if (ret) { | 479 | if (ret) { |
479 | ehea_error("can't register for rq2 ret=%x", ret); | 480 | pr_err("can't register for rq2 ret=%x\n", ret); |
480 | goto out_kill_hwr1q; | 481 | goto out_kill_hwr1q; |
481 | } | 482 | } |
482 | } | 483 | } |
@@ -488,7 +489,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, | |||
488 | init_attr->act_wqe_size_enc_rq3, | 489 | init_attr->act_wqe_size_enc_rq3, |
489 | adapter, 3); | 490 | adapter, 3); |
490 | if (ret) { | 491 | if (ret) { |
491 | ehea_error("can't register for rq3 ret=%x", ret); | 492 | pr_err("can't register for rq3 ret=%x\n", ret); |
492 | goto out_kill_hwr2q; | 493 | goto out_kill_hwr2q; |
493 | } | 494 | } |
494 | } | 495 | } |
@@ -553,7 +554,7 @@ int ehea_destroy_qp(struct ehea_qp *qp) | |||
553 | } | 554 | } |
554 | 555 | ||
555 | if (hret != H_SUCCESS) { | 556 | if (hret != H_SUCCESS) { |
556 | ehea_error("destroy QP failed"); | 557 | pr_err("destroy QP failed\n"); |
557 | return -EIO; | 558 | return -EIO; |
558 | } | 559 | } |
559 | 560 | ||
@@ -842,7 +843,7 @@ static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt, | |||
842 | (hret != H_PAGE_REGISTERED)) { | 843 | (hret != H_PAGE_REGISTERED)) { |
843 | ehea_h_free_resource(adapter->handle, mr->handle, | 844 | ehea_h_free_resource(adapter->handle, mr->handle, |
844 | FORCE_FREE); | 845 | FORCE_FREE); |
845 | ehea_error("register_rpage_mr failed"); | 846 | pr_err("register_rpage_mr failed\n"); |
846 | return hret; | 847 | return hret; |
847 | } | 848 | } |
848 | } | 849 | } |
@@ -896,7 +897,7 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) | |||
896 | 897 | ||
897 | pt = (void *)get_zeroed_page(GFP_KERNEL); | 898 | pt = (void *)get_zeroed_page(GFP_KERNEL); |
898 | if (!pt) { | 899 | if (!pt) { |
899 | ehea_error("no mem"); | 900 | pr_err("no mem\n"); |
900 | ret = -ENOMEM; | 901 | ret = -ENOMEM; |
901 | goto out; | 902 | goto out; |
902 | } | 903 | } |
@@ -906,14 +907,14 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) | |||
906 | &mr->handle, &mr->lkey); | 907 | &mr->handle, &mr->lkey); |
907 | 908 | ||
908 | if (hret != H_SUCCESS) { | 909 | if (hret != H_SUCCESS) { |
909 | ehea_error("alloc_resource_mr failed"); | 910 | pr_err("alloc_resource_mr failed\n"); |
910 | ret = -EIO; | 911 | ret = -EIO; |
911 | goto out; | 912 | goto out; |
912 | } | 913 | } |
913 | 914 | ||
914 | if (!ehea_bmap) { | 915 | if (!ehea_bmap) { |
915 | ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); | 916 | ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); |
916 | ehea_error("no busmap available"); | 917 | pr_err("no busmap available\n"); |
917 | ret = -EIO; | 918 | ret = -EIO; |
918 | goto out; | 919 | goto out; |
919 | } | 920 | } |
@@ -929,7 +930,7 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) | |||
929 | 930 | ||
930 | if (hret != H_SUCCESS) { | 931 | if (hret != H_SUCCESS) { |
931 | ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); | 932 | ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); |
932 | ehea_error("registering mr failed"); | 933 | pr_err("registering mr failed\n"); |
933 | ret = -EIO; | 934 | ret = -EIO; |
934 | goto out; | 935 | goto out; |
935 | } | 936 | } |
@@ -952,7 +953,7 @@ int ehea_rem_mr(struct ehea_mr *mr) | |||
952 | hret = ehea_h_free_resource(mr->adapter->handle, mr->handle, | 953 | hret = ehea_h_free_resource(mr->adapter->handle, mr->handle, |
953 | FORCE_FREE); | 954 | FORCE_FREE); |
954 | if (hret != H_SUCCESS) { | 955 | if (hret != H_SUCCESS) { |
955 | ehea_error("destroy MR failed"); | 956 | pr_err("destroy MR failed\n"); |
956 | return -EIO; | 957 | return -EIO; |
957 | } | 958 | } |
958 | 959 | ||
@@ -987,14 +988,14 @@ void print_error_data(u64 *data) | |||
987 | length = EHEA_PAGESIZE; | 988 | length = EHEA_PAGESIZE; |
988 | 989 | ||
989 | if (type == EHEA_AER_RESTYPE_QP) | 990 | if (type == EHEA_AER_RESTYPE_QP) |
990 | ehea_error("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, " | 991 | pr_err("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, port=%llX\n", |
991 | "port=%llX", resource, data[6], data[12], data[22]); | 992 | resource, data[6], data[12], data[22]); |
992 | else if (type == EHEA_AER_RESTYPE_CQ) | 993 | else if (type == EHEA_AER_RESTYPE_CQ) |
993 | ehea_error("CQ (resource=%llX) state: AER=0x%llX", resource, | 994 | pr_err("CQ (resource=%llX) state: AER=0x%llX\n", |
994 | data[6]); | 995 | resource, data[6]); |
995 | else if (type == EHEA_AER_RESTYPE_EQ) | 996 | else if (type == EHEA_AER_RESTYPE_EQ) |
996 | ehea_error("EQ (resource=%llX) state: AER=0x%llX", resource, | 997 | pr_err("EQ (resource=%llX) state: AER=0x%llX\n", |
997 | data[6]); | 998 | resource, data[6]); |
998 | 999 | ||
999 | ehea_dump(data, length, "error data"); | 1000 | ehea_dump(data, length, "error data"); |
1000 | } | 1001 | } |
@@ -1008,7 +1009,7 @@ u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle, | |||
1008 | 1009 | ||
1009 | rblock = (void *)get_zeroed_page(GFP_KERNEL); | 1010 | rblock = (void *)get_zeroed_page(GFP_KERNEL); |
1010 | if (!rblock) { | 1011 | if (!rblock) { |
1011 | ehea_error("Cannot allocate rblock memory."); | 1012 | pr_err("Cannot allocate rblock memory\n"); |
1012 | goto out; | 1013 | goto out; |
1013 | } | 1014 | } |
1014 | 1015 | ||
@@ -1020,9 +1021,9 @@ u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle, | |||
1020 | *aerr = rblock[12]; | 1021 | *aerr = rblock[12]; |
1021 | print_error_data(rblock); | 1022 | print_error_data(rblock); |
1022 | } else if (ret == H_R_STATE) { | 1023 | } else if (ret == H_R_STATE) { |
1023 | ehea_error("No error data available: %llX.", res_handle); | 1024 | pr_err("No error data available: %llX\n", res_handle); |
1024 | } else | 1025 | } else |
1025 | ehea_error("Error data could not be fetched: %llX", res_handle); | 1026 | pr_err("Error data could not be fetched: %llX\n", res_handle); |
1026 | 1027 | ||
1027 | free_page((unsigned long)rblock); | 1028 | free_page((unsigned long)rblock); |
1028 | out: | 1029 | out: |