aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ehea
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-12-06 23:45:28 -0500
committerDavid S. Miller <davem@davemloft.net>2010-12-06 23:45:28 -0500
commitcfa969e385a23e4c85f50e0ed5de25a2e18bf9d4 (patch)
treefccb9ec6086ce79db860678a77d72269d197ef6e /drivers/net/ehea
parent40fe7d88ab3eb711b307fab1b92aa6870914c975 (diff)
Revert "ehea: Use the standard logging functions"
This reverts commit 539995d18649023199986424d140f1d620372ce5. As reported by Stephen Rothwell, this breaks the build. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ehea')
-rw-r--r--drivers/net/ehea/ehea.h13
-rw-r--r--drivers/net/ehea/ehea_ethtool.c18
-rw-r--r--drivers/net/ehea/ehea_main.c407
-rw-r--r--drivers/net/ehea/ehea_phyp.c40
-rw-r--r--drivers/net/ehea/ehea_qmr.c89
5 files changed, 293 insertions, 274 deletions
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 45e709f7609f..8e745e74828d 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -130,6 +130,19 @@
130 130
131/* utility functions */ 131/* utility functions */
132 132
133#define ehea_info(fmt, args...) \
134 printk(KERN_INFO DRV_NAME ": " fmt "\n", ## args)
135
136#define ehea_error(fmt, args...) \
137 printk(KERN_ERR DRV_NAME ": Error in %s: " fmt "\n", __func__, ## args)
138
139#ifdef DEBUG
140#define ehea_debug(fmt, args...) \
141 printk(KERN_DEBUG DRV_NAME ": " fmt, ## args)
142#else
143#define ehea_debug(fmt, args...) do {} while (0)
144#endif
145
133void ehea_dump(void *adr, int len, char *msg); 146void ehea_dump(void *adr, int len, char *msg);
134 147
135#define EHEA_BMASK(pos, length) (((pos) << 16) + (length)) 148#define EHEA_BMASK(pos, length) (((pos) << 16) + (length))
diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c
index 273fedbb6d0e..75b099ce49c9 100644
--- a/drivers/net/ehea/ehea_ethtool.c
+++ b/drivers/net/ehea/ehea_ethtool.c
@@ -26,8 +26,6 @@
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */ 27 */
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include "ehea.h" 29#include "ehea.h"
32#include "ehea_phyp.h" 30#include "ehea_phyp.h"
33 31
@@ -120,10 +118,10 @@ doit:
120 ret = ehea_set_portspeed(port, sp); 118 ret = ehea_set_portspeed(port, sp);
121 119
122 if (!ret) 120 if (!ret)
123 netdev_info(dev, 121 ehea_info("%s: Port speed successfully set: %dMbps "
124 "Port speed successfully set: %dMbps %s Duplex\n", 122 "%s Duplex",
125 port->port_speed, 123 port->netdev->name, port->port_speed,
126 port->full_duplex == 1 ? "Full" : "Half"); 124 port->full_duplex == 1 ? "Full" : "Half");
127out: 125out:
128 return ret; 126 return ret;
129} 127}
@@ -136,10 +134,10 @@ static int ehea_nway_reset(struct net_device *dev)
136 ret = ehea_set_portspeed(port, EHEA_SPEED_AUTONEG); 134 ret = ehea_set_portspeed(port, EHEA_SPEED_AUTONEG);
137 135
138 if (!ret) 136 if (!ret)
139 netdev_info(port->netdev, 137 ehea_info("%s: Port speed successfully set: %dMbps "
140 "Port speed successfully set: %dMbps %s Duplex\n", 138 "%s Duplex",
141 port->port_speed, 139 port->netdev->name, port->port_speed,
142 port->full_duplex == 1 ? "Full" : "Half"); 140 port->full_duplex == 1 ? "Full" : "Half");
143 return ret; 141 return ret;
144} 142}
145 143
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index f700c76d3e60..a84c389d3db7 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -26,8 +26,6 @@
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */ 27 */
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/in.h> 29#include <linux/in.h>
32#include <linux/ip.h> 30#include <linux/ip.h>
33#include <linux/tcp.h> 31#include <linux/tcp.h>
@@ -138,8 +136,8 @@ void ehea_dump(void *adr, int len, char *msg)
138 int x; 136 int x;
139 unsigned char *deb = adr; 137 unsigned char *deb = adr;
140 for (x = 0; x < len; x += 16) { 138 for (x = 0; x < len; x += 16) {
141 pr_info("%s adr=%p ofs=%04x %016llx %016llx\n", 139 printk(DRV_NAME " %s adr=%p ofs=%04x %016llx %016llx\n", msg,
142 msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8])); 140 deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
143 deb += 16; 141 deb += 16;
144 } 142 }
145} 143}
@@ -339,7 +337,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
339 337
340 cb2 = (void *)get_zeroed_page(GFP_KERNEL); 338 cb2 = (void *)get_zeroed_page(GFP_KERNEL);
341 if (!cb2) { 339 if (!cb2) {
342 netdev_err(dev, "no mem for cb2\n"); 340 ehea_error("no mem for cb2");
343 goto out; 341 goto out;
344 } 342 }
345 343
@@ -347,7 +345,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
347 port->logical_port_id, 345 port->logical_port_id,
348 H_PORT_CB2, H_PORT_CB2_ALL, cb2); 346 H_PORT_CB2, H_PORT_CB2_ALL, cb2);
349 if (hret != H_SUCCESS) { 347 if (hret != H_SUCCESS) {
350 netdev_err(dev, "query_ehea_port failed\n"); 348 ehea_error("query_ehea_port failed");
351 goto out_herr; 349 goto out_herr;
352 } 350 }
353 351
@@ -463,9 +461,8 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
463 if (!skb) { 461 if (!skb) {
464 q_skba->os_skbs = fill_wqes - i; 462 q_skba->os_skbs = fill_wqes - i;
465 if (q_skba->os_skbs == q_skba->len - 2) { 463 if (q_skba->os_skbs == q_skba->len - 2) {
466 netdev_info(pr->port->netdev, 464 ehea_info("%s: rq%i ran dry - no mem for skb",
467 "rq%i ran dry - no mem for skb\n", 465 pr->port->netdev->name, rq_nr);
468 rq_nr);
469 ret = -ENOMEM; 466 ret = -ENOMEM;
470 } 467 }
471 break; 468 break;
@@ -630,8 +627,8 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
630 627
631 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) { 628 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
632 if (netif_msg_rx_err(pr->port)) { 629 if (netif_msg_rx_err(pr->port)) {
633 pr_err("Critical receive error for QP %d. Resetting port.\n", 630 ehea_error("Critical receive error for QP %d. "
634 pr->qp->init_attr.qp_nr); 631 "Resetting port.", pr->qp->init_attr.qp_nr);
635 ehea_dump(cqe, sizeof(*cqe), "CQE"); 632 ehea_dump(cqe, sizeof(*cqe), "CQE");
636 } 633 }
637 ehea_schedule_port_reset(pr->port); 634 ehea_schedule_port_reset(pr->port);
@@ -733,8 +730,8 @@ static int ehea_proc_rwqes(struct net_device *dev,
733 skb_arr_rq1_len, 730 skb_arr_rq1_len,
734 wqe_index); 731 wqe_index);
735 if (unlikely(!skb)) { 732 if (unlikely(!skb)) {
736 netif_err(port, rx_err, dev, 733 if (netif_msg_rx_err(port))
737 "LL rq1: skb=NULL\n"); 734 ehea_error("LL rq1: skb=NULL");
738 735
739 skb = netdev_alloc_skb(dev, 736 skb = netdev_alloc_skb(dev,
740 EHEA_L_PKT_SIZE); 737 EHEA_L_PKT_SIZE);
@@ -749,8 +746,8 @@ static int ehea_proc_rwqes(struct net_device *dev,
749 skb = get_skb_by_index(skb_arr_rq2, 746 skb = get_skb_by_index(skb_arr_rq2,
750 skb_arr_rq2_len, cqe); 747 skb_arr_rq2_len, cqe);
751 if (unlikely(!skb)) { 748 if (unlikely(!skb)) {
752 netif_err(port, rx_err, dev, 749 if (netif_msg_rx_err(port))
753 "rq2: skb=NULL\n"); 750 ehea_error("rq2: skb=NULL");
754 break; 751 break;
755 } 752 }
756 ehea_fill_skb(dev, skb, cqe); 753 ehea_fill_skb(dev, skb, cqe);
@@ -760,8 +757,8 @@ static int ehea_proc_rwqes(struct net_device *dev,
760 skb = get_skb_by_index(skb_arr_rq3, 757 skb = get_skb_by_index(skb_arr_rq3,
761 skb_arr_rq3_len, cqe); 758 skb_arr_rq3_len, cqe);
762 if (unlikely(!skb)) { 759 if (unlikely(!skb)) {
763 netif_err(port, rx_err, dev, 760 if (netif_msg_rx_err(port))
764 "rq3: skb=NULL\n"); 761 ehea_error("rq3: skb=NULL");
765 break; 762 break;
766 } 763 }
767 ehea_fill_skb(dev, skb, cqe); 764 ehea_fill_skb(dev, skb, cqe);
@@ -833,7 +830,7 @@ static void check_sqs(struct ehea_port *port)
833 msecs_to_jiffies(100)); 830 msecs_to_jiffies(100));
834 831
835 if (!ret) { 832 if (!ret) {
836 pr_err("HW/SW queues out of sync\n"); 833 ehea_error("HW/SW queues out of sync");
837 ehea_schedule_port_reset(pr->port); 834 ehea_schedule_port_reset(pr->port);
838 return; 835 return;
839 } 836 }
@@ -866,14 +863,14 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
866 } 863 }
867 864
868 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { 865 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
869 pr_err("Bad send completion status=0x%04X\n", 866 ehea_error("Bad send completion status=0x%04X",
870 cqe->status); 867 cqe->status);
871 868
872 if (netif_msg_tx_err(pr->port)) 869 if (netif_msg_tx_err(pr->port))
873 ehea_dump(cqe, sizeof(*cqe), "Send CQE"); 870 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
874 871
875 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) { 872 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
876 pr_err("Resetting port\n"); 873 ehea_error("Resetting port");
877 ehea_schedule_port_reset(pr->port); 874 ehea_schedule_port_reset(pr->port);
878 break; 875 break;
879 } 876 }
@@ -991,8 +988,8 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
991 988
992 while (eqe) { 989 while (eqe) {
993 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry); 990 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
994 pr_err("QP aff_err: entry=0x%llx, token=0x%x\n", 991 ehea_error("QP aff_err: entry=0x%llx, token=0x%x",
995 eqe->entry, qp_token); 992 eqe->entry, qp_token);
996 993
997 qp = port->port_res[qp_token].qp; 994 qp = port->port_res[qp_token].qp;
998 995
@@ -1010,7 +1007,7 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
1010 } 1007 }
1011 1008
1012 if (reset_port) { 1009 if (reset_port) {
1013 pr_err("Resetting port\n"); 1010 ehea_error("Resetting port");
1014 ehea_schedule_port_reset(port); 1011 ehea_schedule_port_reset(port);
1015 } 1012 }
1016 1013
@@ -1038,7 +1035,7 @@ int ehea_sense_port_attr(struct ehea_port *port)
1038 /* may be called via ehea_neq_tasklet() */ 1035 /* may be called via ehea_neq_tasklet() */
1039 cb0 = (void *)get_zeroed_page(GFP_ATOMIC); 1036 cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
1040 if (!cb0) { 1037 if (!cb0) {
1041 pr_err("no mem for cb0\n"); 1038 ehea_error("no mem for cb0");
1042 ret = -ENOMEM; 1039 ret = -ENOMEM;
1043 goto out; 1040 goto out;
1044 } 1041 }
@@ -1130,7 +1127,7 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1130 1127
1131 cb4 = (void *)get_zeroed_page(GFP_KERNEL); 1128 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
1132 if (!cb4) { 1129 if (!cb4) {
1133 pr_err("no mem for cb4\n"); 1130 ehea_error("no mem for cb4");
1134 ret = -ENOMEM; 1131 ret = -ENOMEM;
1135 goto out; 1132 goto out;
1136 } 1133 }
@@ -1181,16 +1178,16 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1181 break; 1178 break;
1182 } 1179 }
1183 } else { 1180 } else {
1184 pr_err("Failed sensing port speed\n"); 1181 ehea_error("Failed sensing port speed");
1185 ret = -EIO; 1182 ret = -EIO;
1186 } 1183 }
1187 } else { 1184 } else {
1188 if (hret == H_AUTHORITY) { 1185 if (hret == H_AUTHORITY) {
1189 pr_info("Hypervisor denied setting port speed\n"); 1186 ehea_info("Hypervisor denied setting port speed");
1190 ret = -EPERM; 1187 ret = -EPERM;
1191 } else { 1188 } else {
1192 ret = -EIO; 1189 ret = -EIO;
1193 pr_err("Failed setting port speed\n"); 1190 ehea_error("Failed setting port speed");
1194 } 1191 }
1195 } 1192 }
1196 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP)) 1193 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
@@ -1207,78 +1204,80 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1207 u8 ec; 1204 u8 ec;
1208 u8 portnum; 1205 u8 portnum;
1209 struct ehea_port *port; 1206 struct ehea_port *port;
1210 struct net_device *dev;
1211 1207
1212 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe); 1208 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1213 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe); 1209 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1214 port = ehea_get_port(adapter, portnum); 1210 port = ehea_get_port(adapter, portnum);
1215 dev = port->netdev;
1216 1211
1217 switch (ec) { 1212 switch (ec) {
1218 case EHEA_EC_PORTSTATE_CHG: /* port state change */ 1213 case EHEA_EC_PORTSTATE_CHG: /* port state change */
1219 1214
1220 if (!port) { 1215 if (!port) {
1221 netdev_err(dev, "unknown portnum %x\n", portnum); 1216 ehea_error("unknown portnum %x", portnum);
1222 break; 1217 break;
1223 } 1218 }
1224 1219
1225 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) { 1220 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1226 if (!netif_carrier_ok(dev)) { 1221 if (!netif_carrier_ok(port->netdev)) {
1227 ret = ehea_sense_port_attr(port); 1222 ret = ehea_sense_port_attr(port);
1228 if (ret) { 1223 if (ret) {
1229 netdev_err(dev, "failed resensing port attributes\n"); 1224 ehea_error("failed resensing port "
1225 "attributes");
1230 break; 1226 break;
1231 } 1227 }
1232 1228
1233 netif_info(port, link, dev, 1229 if (netif_msg_link(port))
1234 "Logical port up: %dMbps %s Duplex\n", 1230 ehea_info("%s: Logical port up: %dMbps "
1235 port->port_speed, 1231 "%s Duplex",
1236 port->full_duplex == 1 ? 1232 port->netdev->name,
1237 "Full" : "Half"); 1233 port->port_speed,
1234 port->full_duplex ==
1235 1 ? "Full" : "Half");
1238 1236
1239 netif_carrier_on(dev); 1237 netif_carrier_on(port->netdev);
1240 netif_wake_queue(dev); 1238 netif_wake_queue(port->netdev);
1241 } 1239 }
1242 } else 1240 } else
1243 if (netif_carrier_ok(dev)) { 1241 if (netif_carrier_ok(port->netdev)) {
1244 netif_info(port, link, dev, 1242 if (netif_msg_link(port))
1245 "Logical port down\n"); 1243 ehea_info("%s: Logical port down",
1246 netif_carrier_off(dev); 1244 port->netdev->name);
1247 netif_stop_queue(dev); 1245 netif_carrier_off(port->netdev);
1246 netif_stop_queue(port->netdev);
1248 } 1247 }
1249 1248
1250 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) { 1249 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
1251 port->phy_link = EHEA_PHY_LINK_UP; 1250 port->phy_link = EHEA_PHY_LINK_UP;
1252 netif_info(port, link, dev, 1251 if (netif_msg_link(port))
1253 "Physical port up\n"); 1252 ehea_info("%s: Physical port up",
1253 port->netdev->name);
1254 if (prop_carrier_state) 1254 if (prop_carrier_state)
1255 netif_carrier_on(dev); 1255 netif_carrier_on(port->netdev);
1256 } else { 1256 } else {
1257 port->phy_link = EHEA_PHY_LINK_DOWN; 1257 port->phy_link = EHEA_PHY_LINK_DOWN;
1258 netif_info(port, link, dev, 1258 if (netif_msg_link(port))
1259 "Physical port down\n"); 1259 ehea_info("%s: Physical port down",
1260 port->netdev->name);
1260 if (prop_carrier_state) 1261 if (prop_carrier_state)
1261 netif_carrier_off(dev); 1262 netif_carrier_off(port->netdev);
1262 } 1263 }
1263 1264
1264 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe)) 1265 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1265 netdev_info(dev, 1266 ehea_info("External switch port is primary port");
1266 "External switch port is primary port\n");
1267 else 1267 else
1268 netdev_info(dev, 1268 ehea_info("External switch port is backup port");
1269 "External switch port is backup port\n");
1270 1269
1271 break; 1270 break;
1272 case EHEA_EC_ADAPTER_MALFUNC: 1271 case EHEA_EC_ADAPTER_MALFUNC:
1273 netdev_err(dev, "Adapter malfunction\n"); 1272 ehea_error("Adapter malfunction");
1274 break; 1273 break;
1275 case EHEA_EC_PORT_MALFUNC: 1274 case EHEA_EC_PORT_MALFUNC:
1276 netdev_info(dev, "Port malfunction\n"); 1275 ehea_info("Port malfunction: Device: %s", port->netdev->name);
1277 netif_carrier_off(dev); 1276 netif_carrier_off(port->netdev);
1278 netif_stop_queue(dev); 1277 netif_stop_queue(port->netdev);
1279 break; 1278 break;
1280 default: 1279 default:
1281 netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe); 1280 ehea_error("unknown event code %x, eqe=0x%llX", ec, eqe);
1282 break; 1281 break;
1283 } 1282 }
1284} 1283}
@@ -1290,13 +1289,13 @@ static void ehea_neq_tasklet(unsigned long data)
1290 u64 event_mask; 1289 u64 event_mask;
1291 1290
1292 eqe = ehea_poll_eq(adapter->neq); 1291 eqe = ehea_poll_eq(adapter->neq);
1293 pr_debug("eqe=%p\n", eqe); 1292 ehea_debug("eqe=%p", eqe);
1294 1293
1295 while (eqe) { 1294 while (eqe) {
1296 pr_debug("*eqe=%lx\n", eqe->entry); 1295 ehea_debug("*eqe=%lx", eqe->entry);
1297 ehea_parse_eqe(adapter, eqe->entry); 1296 ehea_parse_eqe(adapter, eqe->entry);
1298 eqe = ehea_poll_eq(adapter->neq); 1297 eqe = ehea_poll_eq(adapter->neq);
1299 pr_debug("next eqe=%p\n", eqe); 1298 ehea_debug("next eqe=%p", eqe);
1300 } 1299 }
1301 1300
1302 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1) 1301 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
@@ -1345,14 +1344,14 @@ static int ehea_reg_interrupts(struct net_device *dev)
1345 ehea_qp_aff_irq_handler, 1344 ehea_qp_aff_irq_handler,
1346 IRQF_DISABLED, port->int_aff_name, port); 1345 IRQF_DISABLED, port->int_aff_name, port);
1347 if (ret) { 1346 if (ret) {
1348 netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n", 1347 ehea_error("failed registering irq for qp_aff_irq_handler:"
1349 port->qp_eq->attr.ist1); 1348 "ist=%X", port->qp_eq->attr.ist1);
1350 goto out_free_qpeq; 1349 goto out_free_qpeq;
1351 } 1350 }
1352 1351
1353 netif_info(port, ifup, dev, 1352 if (netif_msg_ifup(port))
1354 "irq_handle 0x%X for function qp_aff_irq_handler registered\n", 1353 ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
1355 port->qp_eq->attr.ist1); 1354 "registered", port->qp_eq->attr.ist1);
1356 1355
1357 1356
1358 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 1357 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
@@ -1364,13 +1363,14 @@ static int ehea_reg_interrupts(struct net_device *dev)
1364 IRQF_DISABLED, pr->int_send_name, 1363 IRQF_DISABLED, pr->int_send_name,
1365 pr); 1364 pr);
1366 if (ret) { 1365 if (ret) {
1367 netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n", 1366 ehea_error("failed registering irq for ehea_queue "
1368 i, pr->eq->attr.ist1); 1367 "port_res_nr:%d, ist=%X", i,
1368 pr->eq->attr.ist1);
1369 goto out_free_req; 1369 goto out_free_req;
1370 } 1370 }
1371 netif_info(port, ifup, dev, 1371 if (netif_msg_ifup(port))
1372 "irq_handle 0x%X for function ehea_queue_int %d registered\n", 1372 ehea_info("irq_handle 0x%X for function ehea_queue_int "
1373 pr->eq->attr.ist1, i); 1373 "%d registered", pr->eq->attr.ist1, i);
1374 } 1374 }
1375out: 1375out:
1376 return ret; 1376 return ret;
@@ -1401,16 +1401,16 @@ static void ehea_free_interrupts(struct net_device *dev)
1401 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 1401 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1402 pr = &port->port_res[i]; 1402 pr = &port->port_res[i];
1403 ibmebus_free_irq(pr->eq->attr.ist1, pr); 1403 ibmebus_free_irq(pr->eq->attr.ist1, pr);
1404 netif_info(port, intr, dev, 1404 if (netif_msg_intr(port))
1405 "free send irq for res %d with handle 0x%X\n", 1405 ehea_info("free send irq for res %d with handle 0x%X",
1406 i, pr->eq->attr.ist1); 1406 i, pr->eq->attr.ist1);
1407 } 1407 }
1408 1408
1409 /* associated events */ 1409 /* associated events */
1410 ibmebus_free_irq(port->qp_eq->attr.ist1, port); 1410 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1411 netif_info(port, intr, dev, 1411 if (netif_msg_intr(port))
1412 "associated event interrupt for handle 0x%X freed\n", 1412 ehea_info("associated event interrupt for handle 0x%X freed",
1413 port->qp_eq->attr.ist1); 1413 port->qp_eq->attr.ist1);
1414} 1414}
1415 1415
1416static int ehea_configure_port(struct ehea_port *port) 1416static int ehea_configure_port(struct ehea_port *port)
@@ -1479,7 +1479,7 @@ int ehea_gen_smrs(struct ehea_port_res *pr)
1479out_free: 1479out_free:
1480 ehea_rem_mr(&pr->send_mr); 1480 ehea_rem_mr(&pr->send_mr);
1481out: 1481out:
1482 pr_err("Generating SMRS failed\n"); 1482 ehea_error("Generating SMRS failed\n");
1483 return -EIO; 1483 return -EIO;
1484} 1484}
1485 1485
@@ -1534,7 +1534,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1534 1534
1535 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); 1535 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1536 if (!pr->eq) { 1536 if (!pr->eq) {
1537 pr_err("create_eq failed (eq)\n"); 1537 ehea_error("create_eq failed (eq)");
1538 goto out_free; 1538 goto out_free;
1539 } 1539 }
1540 1540
@@ -1542,7 +1542,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1542 pr->eq->fw_handle, 1542 pr->eq->fw_handle,
1543 port->logical_port_id); 1543 port->logical_port_id);
1544 if (!pr->recv_cq) { 1544 if (!pr->recv_cq) {
1545 pr_err("create_cq failed (cq_recv)\n"); 1545 ehea_error("create_cq failed (cq_recv)");
1546 goto out_free; 1546 goto out_free;
1547 } 1547 }
1548 1548
@@ -1550,19 +1550,19 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1550 pr->eq->fw_handle, 1550 pr->eq->fw_handle,
1551 port->logical_port_id); 1551 port->logical_port_id);
1552 if (!pr->send_cq) { 1552 if (!pr->send_cq) {
1553 pr_err("create_cq failed (cq_send)\n"); 1553 ehea_error("create_cq failed (cq_send)");
1554 goto out_free; 1554 goto out_free;
1555 } 1555 }
1556 1556
1557 if (netif_msg_ifup(port)) 1557 if (netif_msg_ifup(port))
1558 pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n", 1558 ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d",
1559 pr->send_cq->attr.act_nr_of_cqes, 1559 pr->send_cq->attr.act_nr_of_cqes,
1560 pr->recv_cq->attr.act_nr_of_cqes); 1560 pr->recv_cq->attr.act_nr_of_cqes);
1561 1561
1562 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); 1562 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1563 if (!init_attr) { 1563 if (!init_attr) {
1564 ret = -ENOMEM; 1564 ret = -ENOMEM;
1565 pr_err("no mem for ehea_qp_init_attr\n"); 1565 ehea_error("no mem for ehea_qp_init_attr");
1566 goto out_free; 1566 goto out_free;
1567 } 1567 }
1568 1568
@@ -1587,18 +1587,18 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1587 1587
1588 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr); 1588 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1589 if (!pr->qp) { 1589 if (!pr->qp) {
1590 pr_err("create_qp failed\n"); 1590 ehea_error("create_qp failed");
1591 ret = -EIO; 1591 ret = -EIO;
1592 goto out_free; 1592 goto out_free;
1593 } 1593 }
1594 1594
1595 if (netif_msg_ifup(port)) 1595 if (netif_msg_ifup(port))
1596 pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n", 1596 ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n "
1597 init_attr->qp_nr, 1597 "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr,
1598 init_attr->act_nr_send_wqes, 1598 init_attr->act_nr_send_wqes,
1599 init_attr->act_nr_rwqes_rq1, 1599 init_attr->act_nr_rwqes_rq1,
1600 init_attr->act_nr_rwqes_rq2, 1600 init_attr->act_nr_rwqes_rq2,
1601 init_attr->act_nr_rwqes_rq3); 1601 init_attr->act_nr_rwqes_rq3);
1602 1602
1603 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1; 1603 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1604 1604
@@ -1749,7 +1749,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
1749 swqe->descriptors++; 1749 swqe->descriptors++;
1750 } 1750 }
1751 } else 1751 } else
1752 pr_err("cannot handle fragmented headers\n"); 1752 ehea_error("cannot handle fragmented headers");
1753} 1753}
1754 1754
1755static void write_swqe2_nonTSO(struct sk_buff *skb, 1755static void write_swqe2_nonTSO(struct sk_buff *skb,
@@ -1845,8 +1845,8 @@ static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1845 port->logical_port_id, 1845 port->logical_port_id,
1846 reg_type, port->mac_addr, 0, hcallid); 1846 reg_type, port->mac_addr, 0, hcallid);
1847 if (hret != H_SUCCESS) { 1847 if (hret != H_SUCCESS) {
1848 pr_err("%sregistering bc address failed (tagged)\n", 1848 ehea_error("%sregistering bc address failed (tagged)",
1849 hcallid == H_REG_BCMC ? "" : "de"); 1849 hcallid == H_REG_BCMC ? "" : "de");
1850 ret = -EIO; 1850 ret = -EIO;
1851 goto out_herr; 1851 goto out_herr;
1852 } 1852 }
@@ -1857,8 +1857,8 @@ static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1857 port->logical_port_id, 1857 port->logical_port_id,
1858 reg_type, port->mac_addr, 0, hcallid); 1858 reg_type, port->mac_addr, 0, hcallid);
1859 if (hret != H_SUCCESS) { 1859 if (hret != H_SUCCESS) {
1860 pr_err("%sregistering bc address failed (vlan)\n", 1860 ehea_error("%sregistering bc address failed (vlan)",
1861 hcallid == H_REG_BCMC ? "" : "de"); 1861 hcallid == H_REG_BCMC ? "" : "de");
1862 ret = -EIO; 1862 ret = -EIO;
1863 } 1863 }
1864out_herr: 1864out_herr:
@@ -1880,7 +1880,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1880 1880
1881 cb0 = (void *)get_zeroed_page(GFP_KERNEL); 1881 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1882 if (!cb0) { 1882 if (!cb0) {
1883 pr_err("no mem for cb0\n"); 1883 ehea_error("no mem for cb0");
1884 ret = -ENOMEM; 1884 ret = -ENOMEM;
1885 goto out; 1885 goto out;
1886 } 1886 }
@@ -1928,11 +1928,11 @@ out:
1928static void ehea_promiscuous_error(u64 hret, int enable) 1928static void ehea_promiscuous_error(u64 hret, int enable)
1929{ 1929{
1930 if (hret == H_AUTHORITY) 1930 if (hret == H_AUTHORITY)
1931 pr_info("Hypervisor denied %sabling promiscuous mode\n", 1931 ehea_info("Hypervisor denied %sabling promiscuous mode",
1932 enable == 1 ? "en" : "dis"); 1932 enable == 1 ? "en" : "dis");
1933 else 1933 else
1934 pr_err("failed %sabling promiscuous mode\n", 1934 ehea_error("failed %sabling promiscuous mode",
1935 enable == 1 ? "en" : "dis"); 1935 enable == 1 ? "en" : "dis");
1936} 1936}
1937 1937
1938static void ehea_promiscuous(struct net_device *dev, int enable) 1938static void ehea_promiscuous(struct net_device *dev, int enable)
@@ -1946,7 +1946,7 @@ static void ehea_promiscuous(struct net_device *dev, int enable)
1946 1946
1947 cb7 = (void *)get_zeroed_page(GFP_ATOMIC); 1947 cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
1948 if (!cb7) { 1948 if (!cb7) {
1949 pr_err("no mem for cb7\n"); 1949 ehea_error("no mem for cb7");
1950 goto out; 1950 goto out;
1951 } 1951 }
1952 1952
@@ -2006,7 +2006,7 @@ static int ehea_drop_multicast_list(struct net_device *dev)
2006 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr, 2006 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
2007 H_DEREG_BCMC); 2007 H_DEREG_BCMC);
2008 if (hret) { 2008 if (hret) {
2009 pr_err("failed deregistering mcast MAC\n"); 2009 ehea_error("failed deregistering mcast MAC");
2010 ret = -EIO; 2010 ret = -EIO;
2011 } 2011 }
2012 2012
@@ -2029,8 +2029,7 @@ static void ehea_allmulti(struct net_device *dev, int enable)
2029 if (!hret) 2029 if (!hret)
2030 port->allmulti = 1; 2030 port->allmulti = 1;
2031 else 2031 else
2032 netdev_err(dev, 2032 ehea_error("failed enabling IFF_ALLMULTI");
2033 "failed enabling IFF_ALLMULTI\n");
2034 } 2033 }
2035 } else 2034 } else
2036 if (!enable) { 2035 if (!enable) {
@@ -2039,8 +2038,7 @@ static void ehea_allmulti(struct net_device *dev, int enable)
2039 if (!hret) 2038 if (!hret)
2040 port->allmulti = 0; 2039 port->allmulti = 0;
2041 else 2040 else
2042 netdev_err(dev, 2041 ehea_error("failed disabling IFF_ALLMULTI");
2043 "failed disabling IFF_ALLMULTI\n");
2044 } 2042 }
2045} 2043}
2046 2044
@@ -2051,7 +2049,7 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
2051 2049
2052 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC); 2050 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
2053 if (!ehea_mcl_entry) { 2051 if (!ehea_mcl_entry) {
2054 pr_err("no mem for mcl_entry\n"); 2052 ehea_error("no mem for mcl_entry");
2055 return; 2053 return;
2056 } 2054 }
2057 2055
@@ -2064,7 +2062,7 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
2064 if (!hret) 2062 if (!hret)
2065 list_add(&ehea_mcl_entry->list, &port->mc_list->list); 2063 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
2066 else { 2064 else {
2067 pr_err("failed registering mcast MAC\n"); 2065 ehea_error("failed registering mcast MAC");
2068 kfree(ehea_mcl_entry); 2066 kfree(ehea_mcl_entry);
2069 } 2067 }
2070} 2068}
@@ -2097,8 +2095,9 @@ static void ehea_set_multicast_list(struct net_device *dev)
2097 } 2095 }
2098 2096
2099 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) { 2097 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
2100 pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n", 2098 ehea_info("Mcast registration limit reached (0x%llx). "
2101 port->adapter->max_mc_mac); 2099 "Use ALLMULTI!",
2100 port->adapter->max_mc_mac);
2102 goto out; 2101 goto out;
2103 } 2102 }
2104 2103
@@ -2304,10 +2303,10 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2304 } 2303 }
2305 pr->swqe_id_counter += 1; 2304 pr->swqe_id_counter += 1;
2306 2305
2307 netif_info(port, tx_queued, dev, 2306 if (netif_msg_tx_queued(port)) {
2308 "post swqe on QP %d\n", pr->qp->init_attr.qp_nr); 2307 ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
2309 if (netif_msg_tx_queued(port))
2310 ehea_dump(swqe, 512, "swqe"); 2308 ehea_dump(swqe, 512, "swqe");
2309 }
2311 2310
2312 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { 2311 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2313 netif_stop_queue(dev); 2312 netif_stop_queue(dev);
@@ -2343,14 +2342,14 @@ static void ehea_vlan_rx_register(struct net_device *dev,
2343 2342
2344 cb1 = (void *)get_zeroed_page(GFP_KERNEL); 2343 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2345 if (!cb1) { 2344 if (!cb1) {
2346 pr_err("no mem for cb1\n"); 2345 ehea_error("no mem for cb1");
2347 goto out; 2346 goto out;
2348 } 2347 }
2349 2348
2350 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, 2349 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2351 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2350 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2352 if (hret != H_SUCCESS) 2351 if (hret != H_SUCCESS)
2353 pr_err("modify_ehea_port failed\n"); 2352 ehea_error("modify_ehea_port failed");
2354 2353
2355 free_page((unsigned long)cb1); 2354 free_page((unsigned long)cb1);
2356out: 2355out:
@@ -2367,14 +2366,14 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2367 2366
2368 cb1 = (void *)get_zeroed_page(GFP_KERNEL); 2367 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2369 if (!cb1) { 2368 if (!cb1) {
2370 pr_err("no mem for cb1\n"); 2369 ehea_error("no mem for cb1");
2371 goto out; 2370 goto out;
2372 } 2371 }
2373 2372
2374 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, 2373 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2375 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2374 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2376 if (hret != H_SUCCESS) { 2375 if (hret != H_SUCCESS) {
2377 pr_err("query_ehea_port failed\n"); 2376 ehea_error("query_ehea_port failed");
2378 goto out; 2377 goto out;
2379 } 2378 }
2380 2379
@@ -2384,7 +2383,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2384 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, 2383 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2385 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2384 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2386 if (hret != H_SUCCESS) 2385 if (hret != H_SUCCESS)
2387 pr_err("modify_ehea_port failed\n"); 2386 ehea_error("modify_ehea_port failed");
2388out: 2387out:
2389 free_page((unsigned long)cb1); 2388 free_page((unsigned long)cb1);
2390 return; 2389 return;
@@ -2402,14 +2401,14 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2402 2401
2403 cb1 = (void *)get_zeroed_page(GFP_KERNEL); 2402 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2404 if (!cb1) { 2403 if (!cb1) {
2405 pr_err("no mem for cb1\n"); 2404 ehea_error("no mem for cb1");
2406 goto out; 2405 goto out;
2407 } 2406 }
2408 2407
2409 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, 2408 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2410 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2409 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2411 if (hret != H_SUCCESS) { 2410 if (hret != H_SUCCESS) {
2412 pr_err("query_ehea_port failed\n"); 2411 ehea_error("query_ehea_port failed");
2413 goto out; 2412 goto out;
2414 } 2413 }
2415 2414
@@ -2419,7 +2418,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2419 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, 2418 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2420 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2419 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2421 if (hret != H_SUCCESS) 2420 if (hret != H_SUCCESS)
2422 pr_err("modify_ehea_port failed\n"); 2421 ehea_error("modify_ehea_port failed");
2423out: 2422out:
2424 free_page((unsigned long)cb1); 2423 free_page((unsigned long)cb1);
2425} 2424}
@@ -2441,7 +2440,7 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2441 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2440 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2442 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); 2441 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2443 if (hret != H_SUCCESS) { 2442 if (hret != H_SUCCESS) {
2444 pr_err("query_ehea_qp failed (1)\n"); 2443 ehea_error("query_ehea_qp failed (1)");
2445 goto out; 2444 goto out;
2446 } 2445 }
2447 2446
@@ -2450,14 +2449,14 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2450 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, 2449 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2451 &dummy64, &dummy64, &dummy16, &dummy16); 2450 &dummy64, &dummy64, &dummy16, &dummy16);
2452 if (hret != H_SUCCESS) { 2451 if (hret != H_SUCCESS) {
2453 pr_err("modify_ehea_qp failed (1)\n"); 2452 ehea_error("modify_ehea_qp failed (1)");
2454 goto out; 2453 goto out;
2455 } 2454 }
2456 2455
2457 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2456 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2458 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); 2457 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2459 if (hret != H_SUCCESS) { 2458 if (hret != H_SUCCESS) {
2460 pr_err("query_ehea_qp failed (2)\n"); 2459 ehea_error("query_ehea_qp failed (2)");
2461 goto out; 2460 goto out;
2462 } 2461 }
2463 2462
@@ -2466,14 +2465,14 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2466 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, 2465 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2467 &dummy64, &dummy64, &dummy16, &dummy16); 2466 &dummy64, &dummy64, &dummy16, &dummy16);
2468 if (hret != H_SUCCESS) { 2467 if (hret != H_SUCCESS) {
2469 pr_err("modify_ehea_qp failed (2)\n"); 2468 ehea_error("modify_ehea_qp failed (2)");
2470 goto out; 2469 goto out;
2471 } 2470 }
2472 2471
2473 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2472 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2474 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); 2473 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2475 if (hret != H_SUCCESS) { 2474 if (hret != H_SUCCESS) {
2476 pr_err("query_ehea_qp failed (3)\n"); 2475 ehea_error("query_ehea_qp failed (3)");
2477 goto out; 2476 goto out;
2478 } 2477 }
2479 2478
@@ -2482,14 +2481,14 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2482 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, 2481 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2483 &dummy64, &dummy64, &dummy16, &dummy16); 2482 &dummy64, &dummy64, &dummy16, &dummy16);
2484 if (hret != H_SUCCESS) { 2483 if (hret != H_SUCCESS) {
2485 pr_err("modify_ehea_qp failed (3)\n"); 2484 ehea_error("modify_ehea_qp failed (3)");
2486 goto out; 2485 goto out;
2487 } 2486 }
2488 2487
2489 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2488 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2490 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); 2489 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2491 if (hret != H_SUCCESS) { 2490 if (hret != H_SUCCESS) {
2492 pr_err("query_ehea_qp failed (4)\n"); 2491 ehea_error("query_ehea_qp failed (4)");
2493 goto out; 2492 goto out;
2494 } 2493 }
2495 2494
@@ -2510,7 +2509,7 @@ static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
2510 EHEA_MAX_ENTRIES_EQ, 1); 2509 EHEA_MAX_ENTRIES_EQ, 1);
2511 if (!port->qp_eq) { 2510 if (!port->qp_eq) {
2512 ret = -EINVAL; 2511 ret = -EINVAL;
2513 pr_err("ehea_create_eq failed (qp_eq)\n"); 2512 ehea_error("ehea_create_eq failed (qp_eq)");
2514 goto out_kill_eq; 2513 goto out_kill_eq;
2515 } 2514 }
2516 2515
@@ -2591,27 +2590,27 @@ static int ehea_up(struct net_device *dev)
2591 ret = ehea_port_res_setup(port, port->num_def_qps, 2590 ret = ehea_port_res_setup(port, port->num_def_qps,
2592 port->num_add_tx_qps); 2591 port->num_add_tx_qps);
2593 if (ret) { 2592 if (ret) {
2594 netdev_err(dev, "port_res_failed\n"); 2593 ehea_error("port_res_failed");
2595 goto out; 2594 goto out;
2596 } 2595 }
2597 2596
2598 /* Set default QP for this port */ 2597 /* Set default QP for this port */
2599 ret = ehea_configure_port(port); 2598 ret = ehea_configure_port(port);
2600 if (ret) { 2599 if (ret) {
2601 netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret); 2600 ehea_error("ehea_configure_port failed. ret:%d", ret);
2602 goto out_clean_pr; 2601 goto out_clean_pr;
2603 } 2602 }
2604 2603
2605 ret = ehea_reg_interrupts(dev); 2604 ret = ehea_reg_interrupts(dev);
2606 if (ret) { 2605 if (ret) {
2607 netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret); 2606 ehea_error("reg_interrupts failed. ret:%d", ret);
2608 goto out_clean_pr; 2607 goto out_clean_pr;
2609 } 2608 }
2610 2609
2611 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 2610 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2612 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp); 2611 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2613 if (ret) { 2612 if (ret) {
2614 netdev_err(dev, "activate_qp failed\n"); 2613 ehea_error("activate_qp failed");
2615 goto out_free_irqs; 2614 goto out_free_irqs;
2616 } 2615 }
2617 } 2616 }
@@ -2619,7 +2618,7 @@ static int ehea_up(struct net_device *dev)
2619 for (i = 0; i < port->num_def_qps; i++) { 2618 for (i = 0; i < port->num_def_qps; i++) {
2620 ret = ehea_fill_port_res(&port->port_res[i]); 2619 ret = ehea_fill_port_res(&port->port_res[i]);
2621 if (ret) { 2620 if (ret) {
2622 netdev_err(dev, "out_free_irqs\n"); 2621 ehea_error("out_free_irqs");
2623 goto out_free_irqs; 2622 goto out_free_irqs;
2624 } 2623 }
2625 } 2624 }
@@ -2642,7 +2641,7 @@ out_clean_pr:
2642 ehea_clean_all_portres(port); 2641 ehea_clean_all_portres(port);
2643out: 2642out:
2644 if (ret) 2643 if (ret)
2645 netdev_info(dev, "Failed starting. ret=%i\n", ret); 2644 ehea_info("Failed starting %s. ret=%i", dev->name, ret);
2646 2645
2647 ehea_update_bcmc_registrations(); 2646 ehea_update_bcmc_registrations();
2648 ehea_update_firmware_handles(); 2647 ehea_update_firmware_handles();
@@ -2673,7 +2672,8 @@ static int ehea_open(struct net_device *dev)
2673 2672
2674 mutex_lock(&port->port_lock); 2673 mutex_lock(&port->port_lock);
2675 2674
2676 netif_info(port, ifup, dev, "enabling port\n"); 2675 if (netif_msg_ifup(port))
2676 ehea_info("enabling port %s", dev->name);
2677 2677
2678 ret = ehea_up(dev); 2678 ret = ehea_up(dev);
2679 if (!ret) { 2679 if (!ret) {
@@ -2708,7 +2708,8 @@ static int ehea_down(struct net_device *dev)
2708 2708
2709 ret = ehea_clean_all_portres(port); 2709 ret = ehea_clean_all_portres(port);
2710 if (ret) 2710 if (ret)
2711 netdev_info(dev, "Failed freeing resources. ret=%i\n", ret); 2711 ehea_info("Failed freeing resources for %s. ret=%i",
2712 dev->name, ret);
2712 2713
2713 ehea_update_firmware_handles(); 2714 ehea_update_firmware_handles();
2714 2715
@@ -2720,7 +2721,8 @@ static int ehea_stop(struct net_device *dev)
2720 int ret; 2721 int ret;
2721 struct ehea_port *port = netdev_priv(dev); 2722 struct ehea_port *port = netdev_priv(dev);
2722 2723
2723 netif_info(port, ifdown, dev, "disabling port\n"); 2724 if (netif_msg_ifdown(port))
2725 ehea_info("disabling port %s", dev->name);
2724 2726
2725 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags); 2727 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2726 cancel_work_sync(&port->reset_task); 2728 cancel_work_sync(&port->reset_task);
@@ -2761,7 +2763,7 @@ static void ehea_flush_sq(struct ehea_port *port)
2761 msecs_to_jiffies(100)); 2763 msecs_to_jiffies(100));
2762 2764
2763 if (!ret) { 2765 if (!ret) {
2764 pr_err("WARNING: sq not flushed completely\n"); 2766 ehea_error("WARNING: sq not flushed completely");
2765 break; 2767 break;
2766 } 2768 }
2767 } 2769 }
@@ -2797,7 +2799,7 @@ int ehea_stop_qps(struct net_device *dev)
2797 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), 2799 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2798 cb0); 2800 cb0);
2799 if (hret != H_SUCCESS) { 2801 if (hret != H_SUCCESS) {
2800 pr_err("query_ehea_qp failed (1)\n"); 2802 ehea_error("query_ehea_qp failed (1)");
2801 goto out; 2803 goto out;
2802 } 2804 }
2803 2805
@@ -2809,7 +2811,7 @@ int ehea_stop_qps(struct net_device *dev)
2809 1), cb0, &dummy64, 2811 1), cb0, &dummy64,
2810 &dummy64, &dummy16, &dummy16); 2812 &dummy64, &dummy16, &dummy16);
2811 if (hret != H_SUCCESS) { 2813 if (hret != H_SUCCESS) {
2812 pr_err("modify_ehea_qp failed (1)\n"); 2814 ehea_error("modify_ehea_qp failed (1)");
2813 goto out; 2815 goto out;
2814 } 2816 }
2815 2817
@@ -2817,14 +2819,14 @@ int ehea_stop_qps(struct net_device *dev)
2817 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), 2819 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2818 cb0); 2820 cb0);
2819 if (hret != H_SUCCESS) { 2821 if (hret != H_SUCCESS) {
2820 pr_err("query_ehea_qp failed (2)\n"); 2822 ehea_error("query_ehea_qp failed (2)");
2821 goto out; 2823 goto out;
2822 } 2824 }
2823 2825
2824 /* deregister shared memory regions */ 2826 /* deregister shared memory regions */
2825 dret = ehea_rem_smrs(pr); 2827 dret = ehea_rem_smrs(pr);
2826 if (dret) { 2828 if (dret) {
2827 pr_err("unreg shared memory region failed\n"); 2829 ehea_error("unreg shared memory region failed");
2828 goto out; 2830 goto out;
2829 } 2831 }
2830 } 2832 }
@@ -2893,7 +2895,7 @@ int ehea_restart_qps(struct net_device *dev)
2893 2895
2894 ret = ehea_gen_smrs(pr); 2896 ret = ehea_gen_smrs(pr);
2895 if (ret) { 2897 if (ret) {
2896 netdev_err(dev, "creation of shared memory regions failed\n"); 2898 ehea_error("creation of shared memory regions failed");
2897 goto out; 2899 goto out;
2898 } 2900 }
2899 2901
@@ -2904,7 +2906,7 @@ int ehea_restart_qps(struct net_device *dev)
2904 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), 2906 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2905 cb0); 2907 cb0);
2906 if (hret != H_SUCCESS) { 2908 if (hret != H_SUCCESS) {
2907 netdev_err(dev, "query_ehea_qp failed (1)\n"); 2909 ehea_error("query_ehea_qp failed (1)");
2908 goto out; 2910 goto out;
2909 } 2911 }
2910 2912
@@ -2916,7 +2918,7 @@ int ehea_restart_qps(struct net_device *dev)
2916 1), cb0, &dummy64, 2918 1), cb0, &dummy64,
2917 &dummy64, &dummy16, &dummy16); 2919 &dummy64, &dummy16, &dummy16);
2918 if (hret != H_SUCCESS) { 2920 if (hret != H_SUCCESS) {
2919 netdev_err(dev, "modify_ehea_qp failed (1)\n"); 2921 ehea_error("modify_ehea_qp failed (1)");
2920 goto out; 2922 goto out;
2921 } 2923 }
2922 2924
@@ -2924,7 +2926,7 @@ int ehea_restart_qps(struct net_device *dev)
2924 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), 2926 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2925 cb0); 2927 cb0);
2926 if (hret != H_SUCCESS) { 2928 if (hret != H_SUCCESS) {
2927 netdev_err(dev, "query_ehea_qp failed (2)\n"); 2929 ehea_error("query_ehea_qp failed (2)");
2928 goto out; 2930 goto out;
2929 } 2931 }
2930 2932
@@ -2961,7 +2963,8 @@ static void ehea_reset_port(struct work_struct *work)
2961 2963
2962 ehea_set_multicast_list(dev); 2964 ehea_set_multicast_list(dev);
2963 2965
2964 netif_info(port, timer, dev, "reset successful\n"); 2966 if (netif_msg_timer(port))
2967 ehea_info("Device %s resetted successfully", dev->name);
2965 2968
2966 port_napi_enable(port); 2969 port_napi_enable(port);
2967 2970
@@ -2976,7 +2979,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
2976 int ret, i; 2979 int ret, i;
2977 struct ehea_adapter *adapter; 2980 struct ehea_adapter *adapter;
2978 2981
2979 pr_info("LPAR memory changed - re-initializing driver\n"); 2982 ehea_info("LPAR memory changed - re-initializing driver");
2980 2983
2981 list_for_each_entry(adapter, &adapter_list, list) 2984 list_for_each_entry(adapter, &adapter_list, list)
2982 if (adapter->active_ports) { 2985 if (adapter->active_ports) {
@@ -3008,7 +3011,8 @@ static void ehea_rereg_mrs(struct work_struct *work)
3008 /* Unregister old memory region */ 3011 /* Unregister old memory region */
3009 ret = ehea_rem_mr(&adapter->mr); 3012 ret = ehea_rem_mr(&adapter->mr);
3010 if (ret) { 3013 if (ret) {
3011 pr_err("unregister MR failed - driver inoperable!\n"); 3014 ehea_error("unregister MR failed - driver"
3015 " inoperable!");
3012 goto out; 3016 goto out;
3013 } 3017 }
3014 } 3018 }
@@ -3020,7 +3024,8 @@ static void ehea_rereg_mrs(struct work_struct *work)
3020 /* Register new memory region */ 3024 /* Register new memory region */
3021 ret = ehea_reg_kernel_mr(adapter, &adapter->mr); 3025 ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
3022 if (ret) { 3026 if (ret) {
3023 pr_err("register MR failed - driver inoperable!\n"); 3027 ehea_error("register MR failed - driver"
3028 " inoperable!");
3024 goto out; 3029 goto out;
3025 } 3030 }
3026 3031
@@ -3043,7 +3048,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
3043 } 3048 }
3044 } 3049 }
3045 } 3050 }
3046 pr_info("re-initializing driver complete\n"); 3051 ehea_info("re-initializing driver complete");
3047out: 3052out:
3048 return; 3053 return;
3049} 3054}
@@ -3096,7 +3101,7 @@ int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
3096 /* (Try to) enable *jumbo frames */ 3101 /* (Try to) enable *jumbo frames */
3097 cb4 = (void *)get_zeroed_page(GFP_KERNEL); 3102 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
3098 if (!cb4) { 3103 if (!cb4) {
3099 pr_err("no mem for cb4\n"); 3104 ehea_error("no mem for cb4");
3100 ret = -ENOMEM; 3105 ret = -ENOMEM;
3101 goto out; 3106 goto out;
3102 } else { 3107 } else {
@@ -3158,13 +3163,13 @@ static struct device *ehea_register_port(struct ehea_port *port,
3158 3163
3159 ret = of_device_register(&port->ofdev); 3164 ret = of_device_register(&port->ofdev);
3160 if (ret) { 3165 if (ret) {
3161 pr_err("failed to register device. ret=%d\n", ret); 3166 ehea_error("failed to register device. ret=%d", ret);
3162 goto out; 3167 goto out;
3163 } 3168 }
3164 3169
3165 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id); 3170 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
3166 if (ret) { 3171 if (ret) {
3167 pr_err("failed to register attributes, ret=%d\n", ret); 3172 ehea_error("failed to register attributes, ret=%d", ret);
3168 goto out_unreg_of_dev; 3173 goto out_unreg_of_dev;
3169 } 3174 }
3170 3175
@@ -3214,7 +3219,7 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3214 dev = alloc_etherdev(sizeof(struct ehea_port)); 3219 dev = alloc_etherdev(sizeof(struct ehea_port));
3215 3220
3216 if (!dev) { 3221 if (!dev) {
3217 pr_err("no mem for net_device\n"); 3222 ehea_error("no mem for net_device");
3218 ret = -ENOMEM; 3223 ret = -ENOMEM;
3219 goto out_err; 3224 goto out_err;
3220 } 3225 }
@@ -3265,7 +3270,7 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3265 3270
3266 ret = register_netdev(dev); 3271 ret = register_netdev(dev);
3267 if (ret) { 3272 if (ret) {
3268 pr_err("register_netdev failed. ret=%d\n", ret); 3273 ehea_error("register_netdev failed. ret=%d", ret);
3269 goto out_unreg_port; 3274 goto out_unreg_port;
3270 } 3275 }
3271 3276
@@ -3273,10 +3278,11 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3273 3278
3274 ret = ehea_get_jumboframe_status(port, &jumbo); 3279 ret = ehea_get_jumboframe_status(port, &jumbo);
3275 if (ret) 3280 if (ret)
3276 netdev_err(dev, "failed determining jumbo frame status\n"); 3281 ehea_error("failed determining jumbo frame status for %s",
3282 port->netdev->name);
3277 3283
3278 netdev_info(dev, "Jumbo frames are %sabled\n", 3284 ehea_info("%s: Jumbo frames are %sabled", dev->name,
3279 jumbo == 1 ? "en" : "dis"); 3285 jumbo == 1 ? "en" : "dis");
3280 3286
3281 adapter->active_ports++; 3287 adapter->active_ports++;
3282 3288
@@ -3292,8 +3298,8 @@ out_free_ethdev:
3292 free_netdev(dev); 3298 free_netdev(dev);
3293 3299
3294out_err: 3300out_err:
3295 pr_err("setting up logical port with id=%d failed, ret=%d\n", 3301 ehea_error("setting up logical port with id=%d failed, ret=%d",
3296 logical_port_id, ret); 3302 logical_port_id, ret);
3297 return NULL; 3303 return NULL;
3298} 3304}
3299 3305
@@ -3321,13 +3327,13 @@ static int ehea_setup_ports(struct ehea_adapter *adapter)
3321 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", 3327 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3322 NULL); 3328 NULL);
3323 if (!dn_log_port_id) { 3329 if (!dn_log_port_id) {
3324 pr_err("bad device node: eth_dn name=%s\n", 3330 ehea_error("bad device node: eth_dn name=%s",
3325 eth_dn->full_name); 3331 eth_dn->full_name);
3326 continue; 3332 continue;
3327 } 3333 }
3328 3334
3329 if (ehea_add_adapter_mr(adapter)) { 3335 if (ehea_add_adapter_mr(adapter)) {
3330 pr_err("creating MR failed\n"); 3336 ehea_error("creating MR failed");
3331 of_node_put(eth_dn); 3337 of_node_put(eth_dn);
3332 return -EIO; 3338 return -EIO;
3333 } 3339 }
@@ -3336,8 +3342,9 @@ static int ehea_setup_ports(struct ehea_adapter *adapter)
3336 *dn_log_port_id, 3342 *dn_log_port_id,
3337 eth_dn); 3343 eth_dn);
3338 if (adapter->port[i]) 3344 if (adapter->port[i])
3339 netdev_info(adapter->port[i]->netdev, 3345 ehea_info("%s -> logical port id #%d",
3340 "logical port id #%d\n", *dn_log_port_id); 3346 adapter->port[i]->netdev->name,
3347 *dn_log_port_id);
3341 else 3348 else
3342 ehea_remove_adapter_mr(adapter); 3349 ehea_remove_adapter_mr(adapter);
3343 3350
@@ -3382,20 +3389,21 @@ static ssize_t ehea_probe_port(struct device *dev,
3382 port = ehea_get_port(adapter, logical_port_id); 3389 port = ehea_get_port(adapter, logical_port_id);
3383 3390
3384 if (port) { 3391 if (port) {
3385 netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n", 3392 ehea_info("adding port with logical port id=%d failed. port "
3386 logical_port_id); 3393 "already configured as %s.", logical_port_id,
3394 port->netdev->name);
3387 return -EINVAL; 3395 return -EINVAL;
3388 } 3396 }
3389 3397
3390 eth_dn = ehea_get_eth_dn(adapter, logical_port_id); 3398 eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3391 3399
3392 if (!eth_dn) { 3400 if (!eth_dn) {
3393 pr_info("no logical port with id %d found\n", logical_port_id); 3401 ehea_info("no logical port with id %d found", logical_port_id);
3394 return -EINVAL; 3402 return -EINVAL;
3395 } 3403 }
3396 3404
3397 if (ehea_add_adapter_mr(adapter)) { 3405 if (ehea_add_adapter_mr(adapter)) {
3398 pr_err("creating MR failed\n"); 3406 ehea_error("creating MR failed");
3399 return -EIO; 3407 return -EIO;
3400 } 3408 }
3401 3409
@@ -3410,8 +3418,8 @@ static ssize_t ehea_probe_port(struct device *dev,
3410 break; 3418 break;
3411 } 3419 }
3412 3420
3413 netdev_info(port->netdev, "added: (logical port id=%d)\n", 3421 ehea_info("added %s (logical port id=%d)", port->netdev->name,
3414 logical_port_id); 3422 logical_port_id);
3415 } else { 3423 } else {
3416 ehea_remove_adapter_mr(adapter); 3424 ehea_remove_adapter_mr(adapter);
3417 return -EIO; 3425 return -EIO;
@@ -3434,8 +3442,8 @@ static ssize_t ehea_remove_port(struct device *dev,
3434 port = ehea_get_port(adapter, logical_port_id); 3442 port = ehea_get_port(adapter, logical_port_id);
3435 3443
3436 if (port) { 3444 if (port) {
3437 netdev_info(port->netdev, "removed: (logical port id=%d)\n", 3445 ehea_info("removed %s (logical port id=%d)", port->netdev->name,
3438 logical_port_id); 3446 logical_port_id);
3439 3447
3440 ehea_shutdown_single_port(port); 3448 ehea_shutdown_single_port(port);
3441 3449
@@ -3445,8 +3453,8 @@ static ssize_t ehea_remove_port(struct device *dev,
3445 break; 3453 break;
3446 } 3454 }
3447 } else { 3455 } else {
3448 pr_err("removing port with logical port id=%d failed. port not configured.\n", 3456 ehea_error("removing port with logical port id=%d failed. port "
3449 logical_port_id); 3457 "not configured.", logical_port_id);
3450 return -EINVAL; 3458 return -EINVAL;
3451 } 3459 }
3452 3460
@@ -3483,7 +3491,7 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev,
3483 int ret; 3491 int ret;
3484 3492
3485 if (!dev || !dev->dev.of_node) { 3493 if (!dev || !dev->dev.of_node) {
3486 pr_err("Invalid ibmebus device probed\n"); 3494 ehea_error("Invalid ibmebus device probed");
3487 return -EINVAL; 3495 return -EINVAL;
3488 } 3496 }
3489 3497
@@ -3631,17 +3639,17 @@ static int ehea_mem_notifier(struct notifier_block *nb,
3631 3639
3632 switch (action) { 3640 switch (action) {
3633 case MEM_CANCEL_OFFLINE: 3641 case MEM_CANCEL_OFFLINE:
3634 pr_info("memory offlining canceled\n"); 3642 ehea_info("memory offlining canceled");
3635 /* Readd canceled memory block */ 3643 /* Readd canceled memory block */
3636 case MEM_ONLINE: 3644 case MEM_ONLINE:
3637 pr_info("memory is going online\n"); 3645 ehea_info("memory is going online");
3638 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); 3646 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3639 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) 3647 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3640 goto out_unlock; 3648 goto out_unlock;
3641 ehea_rereg_mrs(NULL); 3649 ehea_rereg_mrs(NULL);
3642 break; 3650 break;
3643 case MEM_GOING_OFFLINE: 3651 case MEM_GOING_OFFLINE:
3644 pr_info("memory is going offline\n"); 3652 ehea_info("memory is going offline");
3645 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); 3653 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3646 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) 3654 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3647 goto out_unlock; 3655 goto out_unlock;
@@ -3667,7 +3675,7 @@ static int ehea_reboot_notifier(struct notifier_block *nb,
3667 unsigned long action, void *unused) 3675 unsigned long action, void *unused)
3668{ 3676{
3669 if (action == SYS_RESTART) { 3677 if (action == SYS_RESTART) {
3670 pr_info("Reboot: freeing all eHEA resources\n"); 3678 ehea_info("Reboot: freeing all eHEA resources");
3671 ibmebus_unregister_driver(&ehea_driver); 3679 ibmebus_unregister_driver(&ehea_driver);
3672 } 3680 }
3673 return NOTIFY_DONE; 3681 return NOTIFY_DONE;
@@ -3683,22 +3691,22 @@ static int check_module_parm(void)
3683 3691
3684 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) || 3692 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3685 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) { 3693 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3686 pr_info("Bad parameter: rq1_entries\n"); 3694 ehea_info("Bad parameter: rq1_entries");
3687 ret = -EINVAL; 3695 ret = -EINVAL;
3688 } 3696 }
3689 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) || 3697 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3690 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) { 3698 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3691 pr_info("Bad parameter: rq2_entries\n"); 3699 ehea_info("Bad parameter: rq2_entries");
3692 ret = -EINVAL; 3700 ret = -EINVAL;
3693 } 3701 }
3694 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) || 3702 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3695 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) { 3703 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3696 pr_info("Bad parameter: rq3_entries\n"); 3704 ehea_info("Bad parameter: rq3_entries");
3697 ret = -EINVAL; 3705 ret = -EINVAL;
3698 } 3706 }
3699 if ((sq_entries < EHEA_MIN_ENTRIES_QP) || 3707 if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3700 (sq_entries > EHEA_MAX_ENTRIES_SQ)) { 3708 (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3701 pr_info("Bad parameter: sq_entries\n"); 3709 ehea_info("Bad parameter: sq_entries");
3702 ret = -EINVAL; 3710 ret = -EINVAL;
3703 } 3711 }
3704 3712
@@ -3718,7 +3726,8 @@ int __init ehea_module_init(void)
3718{ 3726{
3719 int ret; 3727 int ret;
3720 3728
3721 pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION); 3729 printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
3730 DRV_VERSION);
3722 3731
3723 3732
3724 INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs); 3733 INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs);
@@ -3738,27 +3747,27 @@ int __init ehea_module_init(void)
3738 3747
3739 ret = register_reboot_notifier(&ehea_reboot_nb); 3748 ret = register_reboot_notifier(&ehea_reboot_nb);
3740 if (ret) 3749 if (ret)
3741 pr_info("failed registering reboot notifier\n"); 3750 ehea_info("failed registering reboot notifier");
3742 3751
3743 ret = register_memory_notifier(&ehea_mem_nb); 3752 ret = register_memory_notifier(&ehea_mem_nb);
3744 if (ret) 3753 if (ret)
3745 pr_info("failed registering memory remove notifier\n"); 3754 ehea_info("failed registering memory remove notifier");
3746 3755
3747 ret = crash_shutdown_register(ehea_crash_handler); 3756 ret = crash_shutdown_register(ehea_crash_handler);
3748 if (ret) 3757 if (ret)
3749 pr_info("failed registering crash handler\n"); 3758 ehea_info("failed registering crash handler");
3750 3759
3751 ret = ibmebus_register_driver(&ehea_driver); 3760 ret = ibmebus_register_driver(&ehea_driver);
3752 if (ret) { 3761 if (ret) {
3753 pr_err("failed registering eHEA device driver on ebus\n"); 3762 ehea_error("failed registering eHEA device driver on ebus");
3754 goto out2; 3763 goto out2;
3755 } 3764 }
3756 3765
3757 ret = driver_create_file(&ehea_driver.driver, 3766 ret = driver_create_file(&ehea_driver.driver,
3758 &driver_attr_capabilities); 3767 &driver_attr_capabilities);
3759 if (ret) { 3768 if (ret) {
3760 pr_err("failed to register capabilities attribute, ret=%d\n", 3769 ehea_error("failed to register capabilities attribute, ret=%d",
3761 ret); 3770 ret);
3762 goto out3; 3771 goto out3;
3763 } 3772 }
3764 3773
@@ -3784,7 +3793,7 @@ static void __exit ehea_module_exit(void)
3784 unregister_reboot_notifier(&ehea_reboot_nb); 3793 unregister_reboot_notifier(&ehea_reboot_nb);
3785 ret = crash_shutdown_unregister(ehea_crash_handler); 3794 ret = crash_shutdown_unregister(ehea_crash_handler);
3786 if (ret) 3795 if (ret)
3787 pr_info("failed unregistering crash handler\n"); 3796 ehea_info("failed unregistering crash handler");
3788 unregister_memory_notifier(&ehea_mem_nb); 3797 unregister_memory_notifier(&ehea_mem_nb);
3789 kfree(ehea_fw_handles.arr); 3798 kfree(ehea_fw_handles.arr);
3790 kfree(ehea_bcmc_regs.arr); 3799 kfree(ehea_bcmc_regs.arr);
diff --git a/drivers/net/ehea/ehea_phyp.c b/drivers/net/ehea/ehea_phyp.c
index 0506967b9044..8fe9dcaa7538 100644
--- a/drivers/net/ehea/ehea_phyp.c
+++ b/drivers/net/ehea/ehea_phyp.c
@@ -26,8 +26,6 @@
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */ 27 */
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include "ehea_phyp.h" 29#include "ehea_phyp.h"
32 30
33 31
@@ -69,11 +67,12 @@ static long ehea_plpar_hcall_norets(unsigned long opcode,
69 } 67 }
70 68
71 if (ret < H_SUCCESS) 69 if (ret < H_SUCCESS)
72 pr_err("opcode=%lx ret=%lx" 70 ehea_error("opcode=%lx ret=%lx"
73 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx" 71 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
74 " arg5=%lx arg6=%lx arg7=%lx\n", 72 " arg5=%lx arg6=%lx arg7=%lx ",
75 opcode, ret, 73 opcode, ret,
76 arg1, arg2, arg3, arg4, arg5, arg6, arg7); 74 arg1, arg2, arg3, arg4, arg5,
75 arg6, arg7);
77 76
78 return ret; 77 return ret;
79 } 78 }
@@ -115,18 +114,19 @@ static long ehea_plpar_hcall9(unsigned long opcode,
115 && (((cb_cat == H_PORT_CB4) && ((arg3 == H_PORT_CB4_JUMBO) 114 && (((cb_cat == H_PORT_CB4) && ((arg3 == H_PORT_CB4_JUMBO)
116 || (arg3 == H_PORT_CB4_SPEED))) || ((cb_cat == H_PORT_CB7) 115 || (arg3 == H_PORT_CB4_SPEED))) || ((cb_cat == H_PORT_CB7)
117 && (arg3 == H_PORT_CB7_DUCQPN))))) 116 && (arg3 == H_PORT_CB7_DUCQPN)))))
118 pr_err("opcode=%lx ret=%lx" 117 ehea_error("opcode=%lx ret=%lx"
119 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx" 118 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
120 " arg5=%lx arg6=%lx arg7=%lx arg8=%lx" 119 " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
121 " arg9=%lx" 120 " arg9=%lx"
122 " out1=%lx out2=%lx out3=%lx out4=%lx" 121 " out1=%lx out2=%lx out3=%lx out4=%lx"
123 " out5=%lx out6=%lx out7=%lx out8=%lx" 122 " out5=%lx out6=%lx out7=%lx out8=%lx"
124 " out9=%lx\n", 123 " out9=%lx",
125 opcode, ret, 124 opcode, ret,
126 arg1, arg2, arg3, arg4, arg5, 125 arg1, arg2, arg3, arg4, arg5,
127 arg6, arg7, arg8, arg9, 126 arg6, arg7, arg8, arg9,
128 outs[0], outs[1], outs[2], outs[3], outs[4], 127 outs[0], outs[1], outs[2], outs[3],
129 outs[5], outs[6], outs[7], outs[8]); 128 outs[4], outs[5], outs[6], outs[7],
129 outs[8]);
130 return ret; 130 return ret;
131 } 131 }
132 132
@@ -515,7 +515,7 @@ u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
515 const u64 log_pageaddr, const u64 count) 515 const u64 log_pageaddr, const u64 count)
516{ 516{
517 if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) { 517 if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) {
518 pr_err("not on pageboundary\n"); 518 ehea_error("not on pageboundary");
519 return H_PARAMETER; 519 return H_PARAMETER;
520 } 520 }
521 521
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index cd44bb8017d9..89128b6373e3 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -26,8 +26,6 @@
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */ 27 */
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/mm.h> 29#include <linux/mm.h>
32#include <linux/slab.h> 30#include <linux/slab.h>
33#include "ehea.h" 31#include "ehea.h"
@@ -47,7 +45,7 @@ static void *hw_qpageit_get_inc(struct hw_queue *queue)
47 queue->current_q_offset -= queue->pagesize; 45 queue->current_q_offset -= queue->pagesize;
48 retvalue = NULL; 46 retvalue = NULL;
49 } else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) { 47 } else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
50 pr_err("not on pageboundary\n"); 48 ehea_error("not on pageboundary");
51 retvalue = NULL; 49 retvalue = NULL;
52 } 50 }
53 return retvalue; 51 return retvalue;
@@ -60,15 +58,15 @@ static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
60 int i, k; 58 int i, k;
61 59
62 if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) { 60 if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
63 pr_err("pagesize conflict! kernel pagesize=%d, ehea pagesize=%d\n", 61 ehea_error("pagesize conflict! kernel pagesize=%d, "
64 (int)PAGE_SIZE, (int)pagesize); 62 "ehea pagesize=%d", (int)PAGE_SIZE, (int)pagesize);
65 return -EINVAL; 63 return -EINVAL;
66 } 64 }
67 65
68 queue->queue_length = nr_of_pages * pagesize; 66 queue->queue_length = nr_of_pages * pagesize;
69 queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL); 67 queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
70 if (!queue->queue_pages) { 68 if (!queue->queue_pages) {
71 pr_err("no mem for queue_pages\n"); 69 ehea_error("no mem for queue_pages");
72 return -ENOMEM; 70 return -ENOMEM;
73 } 71 }
74 72
@@ -132,7 +130,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
132 130
133 cq = kzalloc(sizeof(*cq), GFP_KERNEL); 131 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
134 if (!cq) { 132 if (!cq) {
135 pr_err("no mem for cq\n"); 133 ehea_error("no mem for cq");
136 goto out_nomem; 134 goto out_nomem;
137 } 135 }
138 136
@@ -149,7 +147,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
149 hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr, 147 hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
150 &cq->fw_handle, &cq->epas); 148 &cq->fw_handle, &cq->epas);
151 if (hret != H_SUCCESS) { 149 if (hret != H_SUCCESS) {
152 pr_err("alloc_resource_cq failed\n"); 150 ehea_error("alloc_resource_cq failed");
153 goto out_freemem; 151 goto out_freemem;
154 } 152 }
155 153
@@ -161,7 +159,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
161 for (counter = 0; counter < cq->attr.nr_pages; counter++) { 159 for (counter = 0; counter < cq->attr.nr_pages; counter++) {
162 vpage = hw_qpageit_get_inc(&cq->hw_queue); 160 vpage = hw_qpageit_get_inc(&cq->hw_queue);
163 if (!vpage) { 161 if (!vpage) {
164 pr_err("hw_qpageit_get_inc failed\n"); 162 ehea_error("hw_qpageit_get_inc failed");
165 goto out_kill_hwq; 163 goto out_kill_hwq;
166 } 164 }
167 165
@@ -170,8 +168,9 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
170 0, EHEA_CQ_REGISTER_ORIG, 168 0, EHEA_CQ_REGISTER_ORIG,
171 cq->fw_handle, rpage, 1); 169 cq->fw_handle, rpage, 1);
172 if (hret < H_SUCCESS) { 170 if (hret < H_SUCCESS) {
173 pr_err("register_rpage_cq failed ehea_cq=%p hret=%llx counter=%i act_pages=%i\n", 171 ehea_error("register_rpage_cq failed ehea_cq=%p "
174 cq, hret, counter, cq->attr.nr_pages); 172 "hret=%llx counter=%i act_pages=%i",
173 cq, hret, counter, cq->attr.nr_pages);
175 goto out_kill_hwq; 174 goto out_kill_hwq;
176 } 175 }
177 176
@@ -179,14 +178,14 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
179 vpage = hw_qpageit_get_inc(&cq->hw_queue); 178 vpage = hw_qpageit_get_inc(&cq->hw_queue);
180 179
181 if ((hret != H_SUCCESS) || (vpage)) { 180 if ((hret != H_SUCCESS) || (vpage)) {
182 pr_err("registration of pages not complete hret=%llx\n", 181 ehea_error("registration of pages not "
183 hret); 182 "complete hret=%llx\n", hret);
184 goto out_kill_hwq; 183 goto out_kill_hwq;
185 } 184 }
186 } else { 185 } else {
187 if (hret != H_PAGE_REGISTERED) { 186 if (hret != H_PAGE_REGISTERED) {
188 pr_err("CQ: registration of page failed hret=%llx\n", 187 ehea_error("CQ: registration of page failed "
189 hret); 188 "hret=%llx\n", hret);
190 goto out_kill_hwq; 189 goto out_kill_hwq;
191 } 190 }
192 } 191 }
@@ -242,7 +241,7 @@ int ehea_destroy_cq(struct ehea_cq *cq)
242 } 241 }
243 242
244 if (hret != H_SUCCESS) { 243 if (hret != H_SUCCESS) {
245 pr_err("destroy CQ failed\n"); 244 ehea_error("destroy CQ failed");
246 return -EIO; 245 return -EIO;
247 } 246 }
248 247
@@ -260,7 +259,7 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
260 259
261 eq = kzalloc(sizeof(*eq), GFP_KERNEL); 260 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
262 if (!eq) { 261 if (!eq) {
263 pr_err("no mem for eq\n"); 262 ehea_error("no mem for eq");
264 return NULL; 263 return NULL;
265 } 264 }
266 265
@@ -273,21 +272,21 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
273 hret = ehea_h_alloc_resource_eq(adapter->handle, 272 hret = ehea_h_alloc_resource_eq(adapter->handle,
274 &eq->attr, &eq->fw_handle); 273 &eq->attr, &eq->fw_handle);
275 if (hret != H_SUCCESS) { 274 if (hret != H_SUCCESS) {
276 pr_err("alloc_resource_eq failed\n"); 275 ehea_error("alloc_resource_eq failed");
277 goto out_freemem; 276 goto out_freemem;
278 } 277 }
279 278
280 ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages, 279 ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
281 EHEA_PAGESIZE, sizeof(struct ehea_eqe)); 280 EHEA_PAGESIZE, sizeof(struct ehea_eqe));
282 if (ret) { 281 if (ret) {
283 pr_err("can't allocate eq pages\n"); 282 ehea_error("can't allocate eq pages");
284 goto out_freeres; 283 goto out_freeres;
285 } 284 }
286 285
287 for (i = 0; i < eq->attr.nr_pages; i++) { 286 for (i = 0; i < eq->attr.nr_pages; i++) {
288 vpage = hw_qpageit_get_inc(&eq->hw_queue); 287 vpage = hw_qpageit_get_inc(&eq->hw_queue);
289 if (!vpage) { 288 if (!vpage) {
290 pr_err("hw_qpageit_get_inc failed\n"); 289 ehea_error("hw_qpageit_get_inc failed");
291 hret = H_RESOURCE; 290 hret = H_RESOURCE;
292 goto out_kill_hwq; 291 goto out_kill_hwq;
293 } 292 }
@@ -371,7 +370,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
371 } 370 }
372 371
373 if (hret != H_SUCCESS) { 372 if (hret != H_SUCCESS) {
374 pr_err("destroy EQ failed\n"); 373 ehea_error("destroy EQ failed");
375 return -EIO; 374 return -EIO;
376 } 375 }
377 376
@@ -396,7 +395,7 @@ int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
396 for (cnt = 0; cnt < nr_pages; cnt++) { 395 for (cnt = 0; cnt < nr_pages; cnt++) {
397 vpage = hw_qpageit_get_inc(hw_queue); 396 vpage = hw_qpageit_get_inc(hw_queue);
398 if (!vpage) { 397 if (!vpage) {
399 pr_err("hw_qpageit_get_inc failed\n"); 398 ehea_error("hw_qpageit_get_inc failed");
400 goto out_kill_hwq; 399 goto out_kill_hwq;
401 } 400 }
402 rpage = virt_to_abs(vpage); 401 rpage = virt_to_abs(vpage);
@@ -404,7 +403,7 @@ int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
404 0, h_call_q_selector, 403 0, h_call_q_selector,
405 qp->fw_handle, rpage, 1); 404 qp->fw_handle, rpage, 1);
406 if (hret < H_SUCCESS) { 405 if (hret < H_SUCCESS) {
407 pr_err("register_rpage_qp failed\n"); 406 ehea_error("register_rpage_qp failed");
408 goto out_kill_hwq; 407 goto out_kill_hwq;
409 } 408 }
410 } 409 }
@@ -433,7 +432,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
433 432
434 qp = kzalloc(sizeof(*qp), GFP_KERNEL); 433 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
435 if (!qp) { 434 if (!qp) {
436 pr_err("no mem for qp\n"); 435 ehea_error("no mem for qp");
437 return NULL; 436 return NULL;
438 } 437 }
439 438
@@ -442,7 +441,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
442 hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd, 441 hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd,
443 &qp->fw_handle, &qp->epas); 442 &qp->fw_handle, &qp->epas);
444 if (hret != H_SUCCESS) { 443 if (hret != H_SUCCESS) {
445 pr_err("ehea_h_alloc_resource_qp failed\n"); 444 ehea_error("ehea_h_alloc_resource_qp failed");
446 goto out_freemem; 445 goto out_freemem;
447 } 446 }
448 447
@@ -456,7 +455,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
456 init_attr->act_wqe_size_enc_sq, adapter, 455 init_attr->act_wqe_size_enc_sq, adapter,
457 0); 456 0);
458 if (ret) { 457 if (ret) {
459 pr_err("can't register for sq ret=%x\n", ret); 458 ehea_error("can't register for sq ret=%x", ret);
460 goto out_freeres; 459 goto out_freeres;
461 } 460 }
462 461
@@ -466,7 +465,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
466 init_attr->act_wqe_size_enc_rq1, 465 init_attr->act_wqe_size_enc_rq1,
467 adapter, 1); 466 adapter, 1);
468 if (ret) { 467 if (ret) {
469 pr_err("can't register for rq1 ret=%x\n", ret); 468 ehea_error("can't register for rq1 ret=%x", ret);
470 goto out_kill_hwsq; 469 goto out_kill_hwsq;
471 } 470 }
472 471
@@ -477,7 +476,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
477 init_attr->act_wqe_size_enc_rq2, 476 init_attr->act_wqe_size_enc_rq2,
478 adapter, 2); 477 adapter, 2);
479 if (ret) { 478 if (ret) {
480 pr_err("can't register for rq2 ret=%x\n", ret); 479 ehea_error("can't register for rq2 ret=%x", ret);
481 goto out_kill_hwr1q; 480 goto out_kill_hwr1q;
482 } 481 }
483 } 482 }
@@ -489,7 +488,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
489 init_attr->act_wqe_size_enc_rq3, 488 init_attr->act_wqe_size_enc_rq3,
490 adapter, 3); 489 adapter, 3);
491 if (ret) { 490 if (ret) {
492 pr_err("can't register for rq3 ret=%x\n", ret); 491 ehea_error("can't register for rq3 ret=%x", ret);
493 goto out_kill_hwr2q; 492 goto out_kill_hwr2q;
494 } 493 }
495 } 494 }
@@ -554,7 +553,7 @@ int ehea_destroy_qp(struct ehea_qp *qp)
554 } 553 }
555 554
556 if (hret != H_SUCCESS) { 555 if (hret != H_SUCCESS) {
557 pr_err("destroy QP failed\n"); 556 ehea_error("destroy QP failed");
558 return -EIO; 557 return -EIO;
559 } 558 }
560 559
@@ -843,7 +842,7 @@ static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
843 (hret != H_PAGE_REGISTERED)) { 842 (hret != H_PAGE_REGISTERED)) {
844 ehea_h_free_resource(adapter->handle, mr->handle, 843 ehea_h_free_resource(adapter->handle, mr->handle,
845 FORCE_FREE); 844 FORCE_FREE);
846 pr_err("register_rpage_mr failed\n"); 845 ehea_error("register_rpage_mr failed");
847 return hret; 846 return hret;
848 } 847 }
849 } 848 }
@@ -897,7 +896,7 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
897 896
898 pt = (void *)get_zeroed_page(GFP_KERNEL); 897 pt = (void *)get_zeroed_page(GFP_KERNEL);
899 if (!pt) { 898 if (!pt) {
900 pr_err("no mem\n"); 899 ehea_error("no mem");
901 ret = -ENOMEM; 900 ret = -ENOMEM;
902 goto out; 901 goto out;
903 } 902 }
@@ -907,14 +906,14 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
907 &mr->handle, &mr->lkey); 906 &mr->handle, &mr->lkey);
908 907
909 if (hret != H_SUCCESS) { 908 if (hret != H_SUCCESS) {
910 pr_err("alloc_resource_mr failed\n"); 909 ehea_error("alloc_resource_mr failed");
911 ret = -EIO; 910 ret = -EIO;
912 goto out; 911 goto out;
913 } 912 }
914 913
915 if (!ehea_bmap) { 914 if (!ehea_bmap) {
916 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); 915 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
917 pr_err("no busmap available\n"); 916 ehea_error("no busmap available");
918 ret = -EIO; 917 ret = -EIO;
919 goto out; 918 goto out;
920 } 919 }
@@ -930,7 +929,7 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
930 929
931 if (hret != H_SUCCESS) { 930 if (hret != H_SUCCESS) {
932 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); 931 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
933 pr_err("registering mr failed\n"); 932 ehea_error("registering mr failed");
934 ret = -EIO; 933 ret = -EIO;
935 goto out; 934 goto out;
936 } 935 }
@@ -953,7 +952,7 @@ int ehea_rem_mr(struct ehea_mr *mr)
953 hret = ehea_h_free_resource(mr->adapter->handle, mr->handle, 952 hret = ehea_h_free_resource(mr->adapter->handle, mr->handle,
954 FORCE_FREE); 953 FORCE_FREE);
955 if (hret != H_SUCCESS) { 954 if (hret != H_SUCCESS) {
956 pr_err("destroy MR failed\n"); 955 ehea_error("destroy MR failed");
957 return -EIO; 956 return -EIO;
958 } 957 }
959 958
@@ -988,14 +987,14 @@ void print_error_data(u64 *data)
988 length = EHEA_PAGESIZE; 987 length = EHEA_PAGESIZE;
989 988
990 if (type == EHEA_AER_RESTYPE_QP) 989 if (type == EHEA_AER_RESTYPE_QP)
991 pr_err("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, port=%llX\n", 990 ehea_error("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, "
992 resource, data[6], data[12], data[22]); 991 "port=%llX", resource, data[6], data[12], data[22]);
993 else if (type == EHEA_AER_RESTYPE_CQ) 992 else if (type == EHEA_AER_RESTYPE_CQ)
994 pr_err("CQ (resource=%llX) state: AER=0x%llX\n", 993 ehea_error("CQ (resource=%llX) state: AER=0x%llX", resource,
995 resource, data[6]); 994 data[6]);
996 else if (type == EHEA_AER_RESTYPE_EQ) 995 else if (type == EHEA_AER_RESTYPE_EQ)
997 pr_err("EQ (resource=%llX) state: AER=0x%llX\n", 996 ehea_error("EQ (resource=%llX) state: AER=0x%llX", resource,
998 resource, data[6]); 997 data[6]);
999 998
1000 ehea_dump(data, length, "error data"); 999 ehea_dump(data, length, "error data");
1001} 1000}
@@ -1009,7 +1008,7 @@ u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
1009 1008
1010 rblock = (void *)get_zeroed_page(GFP_KERNEL); 1009 rblock = (void *)get_zeroed_page(GFP_KERNEL);
1011 if (!rblock) { 1010 if (!rblock) {
1012 pr_err("Cannot allocate rblock memory\n"); 1011 ehea_error("Cannot allocate rblock memory.");
1013 goto out; 1012 goto out;
1014 } 1013 }
1015 1014
@@ -1021,9 +1020,9 @@ u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
1021 *aerr = rblock[12]; 1020 *aerr = rblock[12];
1022 print_error_data(rblock); 1021 print_error_data(rblock);
1023 } else if (ret == H_R_STATE) { 1022 } else if (ret == H_R_STATE) {
1024 pr_err("No error data available: %llX\n", res_handle); 1023 ehea_error("No error data available: %llX.", res_handle);
1025 } else 1024 } else
1026 pr_err("Error data could not be fetched: %llX\n", res_handle); 1025 ehea_error("Error data could not be fetched: %llX", res_handle);
1027 1026
1028 free_page((unsigned long)rblock); 1027 free_page((unsigned long)rblock);
1029out: 1028out: