diff options
author | Joe Perches <joe@perches.com> | 2010-11-30 03:18:44 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-12-06 15:59:04 -0500 |
commit | 539995d18649023199986424d140f1d620372ce5 (patch) | |
tree | 37808d3bb31717397aee88941a94b68efa41a54d /drivers/net/ehea | |
parent | 7903264402546f45f9bac8ad2bfdb00d00eb124a (diff) |
ehea: Use the standard logging functions
Remove ehea_error, ehea_info and ehea_debug macros.
Use pr_fmt, pr_<level>, netdev_<level> and netif_<level> as appropriate.
Fix messages to use trailing "\n", some messages had an extra one
as the old ehea_<level> macros added a trailing "\n".
Coalesced long format strings.
Uncompiled/untested.
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: Breno Leitao<leitao@linux.vnet.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ehea')
-rw-r--r-- | drivers/net/ehea/ehea.h | 13 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_ethtool.c | 18 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_main.c | 407 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_phyp.c | 40 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_qmr.c | 89 |
5 files changed, 274 insertions, 293 deletions
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index 8e745e74828d..45e709f7609f 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h | |||
@@ -130,19 +130,6 @@ | |||
130 | 130 | ||
131 | /* utility functions */ | 131 | /* utility functions */ |
132 | 132 | ||
133 | #define ehea_info(fmt, args...) \ | ||
134 | printk(KERN_INFO DRV_NAME ": " fmt "\n", ## args) | ||
135 | |||
136 | #define ehea_error(fmt, args...) \ | ||
137 | printk(KERN_ERR DRV_NAME ": Error in %s: " fmt "\n", __func__, ## args) | ||
138 | |||
139 | #ifdef DEBUG | ||
140 | #define ehea_debug(fmt, args...) \ | ||
141 | printk(KERN_DEBUG DRV_NAME ": " fmt, ## args) | ||
142 | #else | ||
143 | #define ehea_debug(fmt, args...) do {} while (0) | ||
144 | #endif | ||
145 | |||
146 | void ehea_dump(void *adr, int len, char *msg); | 133 | void ehea_dump(void *adr, int len, char *msg); |
147 | 134 | ||
148 | #define EHEA_BMASK(pos, length) (((pos) << 16) + (length)) | 135 | #define EHEA_BMASK(pos, length) (((pos) << 16) + (length)) |
diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c index 75b099ce49c9..273fedbb6d0e 100644 --- a/drivers/net/ehea/ehea_ethtool.c +++ b/drivers/net/ehea/ehea_ethtool.c | |||
@@ -26,6 +26,8 @@ | |||
26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
27 | */ | 27 | */ |
28 | 28 | ||
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
30 | |||
29 | #include "ehea.h" | 31 | #include "ehea.h" |
30 | #include "ehea_phyp.h" | 32 | #include "ehea_phyp.h" |
31 | 33 | ||
@@ -118,10 +120,10 @@ doit: | |||
118 | ret = ehea_set_portspeed(port, sp); | 120 | ret = ehea_set_portspeed(port, sp); |
119 | 121 | ||
120 | if (!ret) | 122 | if (!ret) |
121 | ehea_info("%s: Port speed successfully set: %dMbps " | 123 | netdev_info(dev, |
122 | "%s Duplex", | 124 | "Port speed successfully set: %dMbps %s Duplex\n", |
123 | port->netdev->name, port->port_speed, | 125 | port->port_speed, |
124 | port->full_duplex == 1 ? "Full" : "Half"); | 126 | port->full_duplex == 1 ? "Full" : "Half"); |
125 | out: | 127 | out: |
126 | return ret; | 128 | return ret; |
127 | } | 129 | } |
@@ -134,10 +136,10 @@ static int ehea_nway_reset(struct net_device *dev) | |||
134 | ret = ehea_set_portspeed(port, EHEA_SPEED_AUTONEG); | 136 | ret = ehea_set_portspeed(port, EHEA_SPEED_AUTONEG); |
135 | 137 | ||
136 | if (!ret) | 138 | if (!ret) |
137 | ehea_info("%s: Port speed successfully set: %dMbps " | 139 | netdev_info(port->netdev, |
138 | "%s Duplex", | 140 | "Port speed successfully set: %dMbps %s Duplex\n", |
139 | port->netdev->name, port->port_speed, | 141 | port->port_speed, |
140 | port->full_duplex == 1 ? "Full" : "Half"); | 142 | port->full_duplex == 1 ? "Full" : "Half"); |
141 | return ret; | 143 | return ret; |
142 | } | 144 | } |
143 | 145 | ||
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index a84c389d3db7..f700c76d3e60 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -26,6 +26,8 @@ | |||
26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
27 | */ | 27 | */ |
28 | 28 | ||
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
30 | |||
29 | #include <linux/in.h> | 31 | #include <linux/in.h> |
30 | #include <linux/ip.h> | 32 | #include <linux/ip.h> |
31 | #include <linux/tcp.h> | 33 | #include <linux/tcp.h> |
@@ -136,8 +138,8 @@ void ehea_dump(void *adr, int len, char *msg) | |||
136 | int x; | 138 | int x; |
137 | unsigned char *deb = adr; | 139 | unsigned char *deb = adr; |
138 | for (x = 0; x < len; x += 16) { | 140 | for (x = 0; x < len; x += 16) { |
139 | printk(DRV_NAME " %s adr=%p ofs=%04x %016llx %016llx\n", msg, | 141 | pr_info("%s adr=%p ofs=%04x %016llx %016llx\n", |
140 | deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8])); | 142 | msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8])); |
141 | deb += 16; | 143 | deb += 16; |
142 | } | 144 | } |
143 | } | 145 | } |
@@ -337,7 +339,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev) | |||
337 | 339 | ||
338 | cb2 = (void *)get_zeroed_page(GFP_KERNEL); | 340 | cb2 = (void *)get_zeroed_page(GFP_KERNEL); |
339 | if (!cb2) { | 341 | if (!cb2) { |
340 | ehea_error("no mem for cb2"); | 342 | netdev_err(dev, "no mem for cb2\n"); |
341 | goto out; | 343 | goto out; |
342 | } | 344 | } |
343 | 345 | ||
@@ -345,7 +347,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev) | |||
345 | port->logical_port_id, | 347 | port->logical_port_id, |
346 | H_PORT_CB2, H_PORT_CB2_ALL, cb2); | 348 | H_PORT_CB2, H_PORT_CB2_ALL, cb2); |
347 | if (hret != H_SUCCESS) { | 349 | if (hret != H_SUCCESS) { |
348 | ehea_error("query_ehea_port failed"); | 350 | netdev_err(dev, "query_ehea_port failed\n"); |
349 | goto out_herr; | 351 | goto out_herr; |
350 | } | 352 | } |
351 | 353 | ||
@@ -461,8 +463,9 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr, | |||
461 | if (!skb) { | 463 | if (!skb) { |
462 | q_skba->os_skbs = fill_wqes - i; | 464 | q_skba->os_skbs = fill_wqes - i; |
463 | if (q_skba->os_skbs == q_skba->len - 2) { | 465 | if (q_skba->os_skbs == q_skba->len - 2) { |
464 | ehea_info("%s: rq%i ran dry - no mem for skb", | 466 | netdev_info(pr->port->netdev, |
465 | pr->port->netdev->name, rq_nr); | 467 | "rq%i ran dry - no mem for skb\n", |
468 | rq_nr); | ||
466 | ret = -ENOMEM; | 469 | ret = -ENOMEM; |
467 | } | 470 | } |
468 | break; | 471 | break; |
@@ -627,8 +630,8 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq, | |||
627 | 630 | ||
628 | if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) { | 631 | if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) { |
629 | if (netif_msg_rx_err(pr->port)) { | 632 | if (netif_msg_rx_err(pr->port)) { |
630 | ehea_error("Critical receive error for QP %d. " | 633 | pr_err("Critical receive error for QP %d. Resetting port.\n", |
631 | "Resetting port.", pr->qp->init_attr.qp_nr); | 634 | pr->qp->init_attr.qp_nr); |
632 | ehea_dump(cqe, sizeof(*cqe), "CQE"); | 635 | ehea_dump(cqe, sizeof(*cqe), "CQE"); |
633 | } | 636 | } |
634 | ehea_schedule_port_reset(pr->port); | 637 | ehea_schedule_port_reset(pr->port); |
@@ -730,8 +733,8 @@ static int ehea_proc_rwqes(struct net_device *dev, | |||
730 | skb_arr_rq1_len, | 733 | skb_arr_rq1_len, |
731 | wqe_index); | 734 | wqe_index); |
732 | if (unlikely(!skb)) { | 735 | if (unlikely(!skb)) { |
733 | if (netif_msg_rx_err(port)) | 736 | netif_err(port, rx_err, dev, |
734 | ehea_error("LL rq1: skb=NULL"); | 737 | "LL rq1: skb=NULL\n"); |
735 | 738 | ||
736 | skb = netdev_alloc_skb(dev, | 739 | skb = netdev_alloc_skb(dev, |
737 | EHEA_L_PKT_SIZE); | 740 | EHEA_L_PKT_SIZE); |
@@ -746,8 +749,8 @@ static int ehea_proc_rwqes(struct net_device *dev, | |||
746 | skb = get_skb_by_index(skb_arr_rq2, | 749 | skb = get_skb_by_index(skb_arr_rq2, |
747 | skb_arr_rq2_len, cqe); | 750 | skb_arr_rq2_len, cqe); |
748 | if (unlikely(!skb)) { | 751 | if (unlikely(!skb)) { |
749 | if (netif_msg_rx_err(port)) | 752 | netif_err(port, rx_err, dev, |
750 | ehea_error("rq2: skb=NULL"); | 753 | "rq2: skb=NULL\n"); |
751 | break; | 754 | break; |
752 | } | 755 | } |
753 | ehea_fill_skb(dev, skb, cqe); | 756 | ehea_fill_skb(dev, skb, cqe); |
@@ -757,8 +760,8 @@ static int ehea_proc_rwqes(struct net_device *dev, | |||
757 | skb = get_skb_by_index(skb_arr_rq3, | 760 | skb = get_skb_by_index(skb_arr_rq3, |
758 | skb_arr_rq3_len, cqe); | 761 | skb_arr_rq3_len, cqe); |
759 | if (unlikely(!skb)) { | 762 | if (unlikely(!skb)) { |
760 | if (netif_msg_rx_err(port)) | 763 | netif_err(port, rx_err, dev, |
761 | ehea_error("rq3: skb=NULL"); | 764 | "rq3: skb=NULL\n"); |
762 | break; | 765 | break; |
763 | } | 766 | } |
764 | ehea_fill_skb(dev, skb, cqe); | 767 | ehea_fill_skb(dev, skb, cqe); |
@@ -830,7 +833,7 @@ static void check_sqs(struct ehea_port *port) | |||
830 | msecs_to_jiffies(100)); | 833 | msecs_to_jiffies(100)); |
831 | 834 | ||
832 | if (!ret) { | 835 | if (!ret) { |
833 | ehea_error("HW/SW queues out of sync"); | 836 | pr_err("HW/SW queues out of sync\n"); |
834 | ehea_schedule_port_reset(pr->port); | 837 | ehea_schedule_port_reset(pr->port); |
835 | return; | 838 | return; |
836 | } | 839 | } |
@@ -863,14 +866,14 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) | |||
863 | } | 866 | } |
864 | 867 | ||
865 | if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { | 868 | if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { |
866 | ehea_error("Bad send completion status=0x%04X", | 869 | pr_err("Bad send completion status=0x%04X\n", |
867 | cqe->status); | 870 | cqe->status); |
868 | 871 | ||
869 | if (netif_msg_tx_err(pr->port)) | 872 | if (netif_msg_tx_err(pr->port)) |
870 | ehea_dump(cqe, sizeof(*cqe), "Send CQE"); | 873 | ehea_dump(cqe, sizeof(*cqe), "Send CQE"); |
871 | 874 | ||
872 | if (cqe->status & EHEA_CQE_STAT_RESET_MASK) { | 875 | if (cqe->status & EHEA_CQE_STAT_RESET_MASK) { |
873 | ehea_error("Resetting port"); | 876 | pr_err("Resetting port\n"); |
874 | ehea_schedule_port_reset(pr->port); | 877 | ehea_schedule_port_reset(pr->port); |
875 | break; | 878 | break; |
876 | } | 879 | } |
@@ -988,8 +991,8 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param) | |||
988 | 991 | ||
989 | while (eqe) { | 992 | while (eqe) { |
990 | qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry); | 993 | qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry); |
991 | ehea_error("QP aff_err: entry=0x%llx, token=0x%x", | 994 | pr_err("QP aff_err: entry=0x%llx, token=0x%x\n", |
992 | eqe->entry, qp_token); | 995 | eqe->entry, qp_token); |
993 | 996 | ||
994 | qp = port->port_res[qp_token].qp; | 997 | qp = port->port_res[qp_token].qp; |
995 | 998 | ||
@@ -1007,7 +1010,7 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param) | |||
1007 | } | 1010 | } |
1008 | 1011 | ||
1009 | if (reset_port) { | 1012 | if (reset_port) { |
1010 | ehea_error("Resetting port"); | 1013 | pr_err("Resetting port\n"); |
1011 | ehea_schedule_port_reset(port); | 1014 | ehea_schedule_port_reset(port); |
1012 | } | 1015 | } |
1013 | 1016 | ||
@@ -1035,7 +1038,7 @@ int ehea_sense_port_attr(struct ehea_port *port) | |||
1035 | /* may be called via ehea_neq_tasklet() */ | 1038 | /* may be called via ehea_neq_tasklet() */ |
1036 | cb0 = (void *)get_zeroed_page(GFP_ATOMIC); | 1039 | cb0 = (void *)get_zeroed_page(GFP_ATOMIC); |
1037 | if (!cb0) { | 1040 | if (!cb0) { |
1038 | ehea_error("no mem for cb0"); | 1041 | pr_err("no mem for cb0\n"); |
1039 | ret = -ENOMEM; | 1042 | ret = -ENOMEM; |
1040 | goto out; | 1043 | goto out; |
1041 | } | 1044 | } |
@@ -1127,7 +1130,7 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed) | |||
1127 | 1130 | ||
1128 | cb4 = (void *)get_zeroed_page(GFP_KERNEL); | 1131 | cb4 = (void *)get_zeroed_page(GFP_KERNEL); |
1129 | if (!cb4) { | 1132 | if (!cb4) { |
1130 | ehea_error("no mem for cb4"); | 1133 | pr_err("no mem for cb4\n"); |
1131 | ret = -ENOMEM; | 1134 | ret = -ENOMEM; |
1132 | goto out; | 1135 | goto out; |
1133 | } | 1136 | } |
@@ -1178,16 +1181,16 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed) | |||
1178 | break; | 1181 | break; |
1179 | } | 1182 | } |
1180 | } else { | 1183 | } else { |
1181 | ehea_error("Failed sensing port speed"); | 1184 | pr_err("Failed sensing port speed\n"); |
1182 | ret = -EIO; | 1185 | ret = -EIO; |
1183 | } | 1186 | } |
1184 | } else { | 1187 | } else { |
1185 | if (hret == H_AUTHORITY) { | 1188 | if (hret == H_AUTHORITY) { |
1186 | ehea_info("Hypervisor denied setting port speed"); | 1189 | pr_info("Hypervisor denied setting port speed\n"); |
1187 | ret = -EPERM; | 1190 | ret = -EPERM; |
1188 | } else { | 1191 | } else { |
1189 | ret = -EIO; | 1192 | ret = -EIO; |
1190 | ehea_error("Failed setting port speed"); | 1193 | pr_err("Failed setting port speed\n"); |
1191 | } | 1194 | } |
1192 | } | 1195 | } |
1193 | if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP)) | 1196 | if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP)) |
@@ -1204,80 +1207,78 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe) | |||
1204 | u8 ec; | 1207 | u8 ec; |
1205 | u8 portnum; | 1208 | u8 portnum; |
1206 | struct ehea_port *port; | 1209 | struct ehea_port *port; |
1210 | struct net_device *dev; | ||
1207 | 1211 | ||
1208 | ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe); | 1212 | ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe); |
1209 | portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe); | 1213 | portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe); |
1210 | port = ehea_get_port(adapter, portnum); | 1214 | port = ehea_get_port(adapter, portnum); |
1215 | dev = port->netdev; | ||
1211 | 1216 | ||
1212 | switch (ec) { | 1217 | switch (ec) { |
1213 | case EHEA_EC_PORTSTATE_CHG: /* port state change */ | 1218 | case EHEA_EC_PORTSTATE_CHG: /* port state change */ |
1214 | 1219 | ||
1215 | if (!port) { | 1220 | if (!port) { |
1216 | ehea_error("unknown portnum %x", portnum); | 1221 | netdev_err(dev, "unknown portnum %x\n", portnum); |
1217 | break; | 1222 | break; |
1218 | } | 1223 | } |
1219 | 1224 | ||
1220 | if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) { | 1225 | if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) { |
1221 | if (!netif_carrier_ok(port->netdev)) { | 1226 | if (!netif_carrier_ok(dev)) { |
1222 | ret = ehea_sense_port_attr(port); | 1227 | ret = ehea_sense_port_attr(port); |
1223 | if (ret) { | 1228 | if (ret) { |
1224 | ehea_error("failed resensing port " | 1229 | netdev_err(dev, "failed resensing port attributes\n"); |
1225 | "attributes"); | ||
1226 | break; | 1230 | break; |
1227 | } | 1231 | } |
1228 | 1232 | ||
1229 | if (netif_msg_link(port)) | 1233 | netif_info(port, link, dev, |
1230 | ehea_info("%s: Logical port up: %dMbps " | 1234 | "Logical port up: %dMbps %s Duplex\n", |
1231 | "%s Duplex", | 1235 | port->port_speed, |
1232 | port->netdev->name, | 1236 | port->full_duplex == 1 ? |
1233 | port->port_speed, | 1237 | "Full" : "Half"); |
1234 | port->full_duplex == | ||
1235 | 1 ? "Full" : "Half"); | ||
1236 | 1238 | ||
1237 | netif_carrier_on(port->netdev); | 1239 | netif_carrier_on(dev); |
1238 | netif_wake_queue(port->netdev); | 1240 | netif_wake_queue(dev); |
1239 | } | 1241 | } |
1240 | } else | 1242 | } else |
1241 | if (netif_carrier_ok(port->netdev)) { | 1243 | if (netif_carrier_ok(dev)) { |
1242 | if (netif_msg_link(port)) | 1244 | netif_info(port, link, dev, |
1243 | ehea_info("%s: Logical port down", | 1245 | "Logical port down\n"); |
1244 | port->netdev->name); | 1246 | netif_carrier_off(dev); |
1245 | netif_carrier_off(port->netdev); | 1247 | netif_stop_queue(dev); |
1246 | netif_stop_queue(port->netdev); | ||
1247 | } | 1248 | } |
1248 | 1249 | ||
1249 | if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) { | 1250 | if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) { |
1250 | port->phy_link = EHEA_PHY_LINK_UP; | 1251 | port->phy_link = EHEA_PHY_LINK_UP; |
1251 | if (netif_msg_link(port)) | 1252 | netif_info(port, link, dev, |
1252 | ehea_info("%s: Physical port up", | 1253 | "Physical port up\n"); |
1253 | port->netdev->name); | ||
1254 | if (prop_carrier_state) | 1254 | if (prop_carrier_state) |
1255 | netif_carrier_on(port->netdev); | 1255 | netif_carrier_on(dev); |
1256 | } else { | 1256 | } else { |
1257 | port->phy_link = EHEA_PHY_LINK_DOWN; | 1257 | port->phy_link = EHEA_PHY_LINK_DOWN; |
1258 | if (netif_msg_link(port)) | 1258 | netif_info(port, link, dev, |
1259 | ehea_info("%s: Physical port down", | 1259 | "Physical port down\n"); |
1260 | port->netdev->name); | ||
1261 | if (prop_carrier_state) | 1260 | if (prop_carrier_state) |
1262 | netif_carrier_off(port->netdev); | 1261 | netif_carrier_off(dev); |
1263 | } | 1262 | } |
1264 | 1263 | ||
1265 | if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe)) | 1264 | if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe)) |
1266 | ehea_info("External switch port is primary port"); | 1265 | netdev_info(dev, |
1266 | "External switch port is primary port\n"); | ||
1267 | else | 1267 | else |
1268 | ehea_info("External switch port is backup port"); | 1268 | netdev_info(dev, |
1269 | "External switch port is backup port\n"); | ||
1269 | 1270 | ||
1270 | break; | 1271 | break; |
1271 | case EHEA_EC_ADAPTER_MALFUNC: | 1272 | case EHEA_EC_ADAPTER_MALFUNC: |
1272 | ehea_error("Adapter malfunction"); | 1273 | netdev_err(dev, "Adapter malfunction\n"); |
1273 | break; | 1274 | break; |
1274 | case EHEA_EC_PORT_MALFUNC: | 1275 | case EHEA_EC_PORT_MALFUNC: |
1275 | ehea_info("Port malfunction: Device: %s", port->netdev->name); | 1276 | netdev_info(dev, "Port malfunction\n"); |
1276 | netif_carrier_off(port->netdev); | 1277 | netif_carrier_off(dev); |
1277 | netif_stop_queue(port->netdev); | 1278 | netif_stop_queue(dev); |
1278 | break; | 1279 | break; |
1279 | default: | 1280 | default: |
1280 | ehea_error("unknown event code %x, eqe=0x%llX", ec, eqe); | 1281 | netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe); |
1281 | break; | 1282 | break; |
1282 | } | 1283 | } |
1283 | } | 1284 | } |
@@ -1289,13 +1290,13 @@ static void ehea_neq_tasklet(unsigned long data) | |||
1289 | u64 event_mask; | 1290 | u64 event_mask; |
1290 | 1291 | ||
1291 | eqe = ehea_poll_eq(adapter->neq); | 1292 | eqe = ehea_poll_eq(adapter->neq); |
1292 | ehea_debug("eqe=%p", eqe); | 1293 | pr_debug("eqe=%p\n", eqe); |
1293 | 1294 | ||
1294 | while (eqe) { | 1295 | while (eqe) { |
1295 | ehea_debug("*eqe=%lx", eqe->entry); | 1296 | pr_debug("*eqe=%lx\n", eqe->entry); |
1296 | ehea_parse_eqe(adapter, eqe->entry); | 1297 | ehea_parse_eqe(adapter, eqe->entry); |
1297 | eqe = ehea_poll_eq(adapter->neq); | 1298 | eqe = ehea_poll_eq(adapter->neq); |
1298 | ehea_debug("next eqe=%p", eqe); | 1299 | pr_debug("next eqe=%p\n", eqe); |
1299 | } | 1300 | } |
1300 | 1301 | ||
1301 | event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1) | 1302 | event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1) |
@@ -1344,14 +1345,14 @@ static int ehea_reg_interrupts(struct net_device *dev) | |||
1344 | ehea_qp_aff_irq_handler, | 1345 | ehea_qp_aff_irq_handler, |
1345 | IRQF_DISABLED, port->int_aff_name, port); | 1346 | IRQF_DISABLED, port->int_aff_name, port); |
1346 | if (ret) { | 1347 | if (ret) { |
1347 | ehea_error("failed registering irq for qp_aff_irq_handler:" | 1348 | netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n", |
1348 | "ist=%X", port->qp_eq->attr.ist1); | 1349 | port->qp_eq->attr.ist1); |
1349 | goto out_free_qpeq; | 1350 | goto out_free_qpeq; |
1350 | } | 1351 | } |
1351 | 1352 | ||
1352 | if (netif_msg_ifup(port)) | 1353 | netif_info(port, ifup, dev, |
1353 | ehea_info("irq_handle 0x%X for function qp_aff_irq_handler " | 1354 | "irq_handle 0x%X for function qp_aff_irq_handler registered\n", |
1354 | "registered", port->qp_eq->attr.ist1); | 1355 | port->qp_eq->attr.ist1); |
1355 | 1356 | ||
1356 | 1357 | ||
1357 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { | 1358 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { |
@@ -1363,14 +1364,13 @@ static int ehea_reg_interrupts(struct net_device *dev) | |||
1363 | IRQF_DISABLED, pr->int_send_name, | 1364 | IRQF_DISABLED, pr->int_send_name, |
1364 | pr); | 1365 | pr); |
1365 | if (ret) { | 1366 | if (ret) { |
1366 | ehea_error("failed registering irq for ehea_queue " | 1367 | netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n", |
1367 | "port_res_nr:%d, ist=%X", i, | 1368 | i, pr->eq->attr.ist1); |
1368 | pr->eq->attr.ist1); | ||
1369 | goto out_free_req; | 1369 | goto out_free_req; |
1370 | } | 1370 | } |
1371 | if (netif_msg_ifup(port)) | 1371 | netif_info(port, ifup, dev, |
1372 | ehea_info("irq_handle 0x%X for function ehea_queue_int " | 1372 | "irq_handle 0x%X for function ehea_queue_int %d registered\n", |
1373 | "%d registered", pr->eq->attr.ist1, i); | 1373 | pr->eq->attr.ist1, i); |
1374 | } | 1374 | } |
1375 | out: | 1375 | out: |
1376 | return ret; | 1376 | return ret; |
@@ -1401,16 +1401,16 @@ static void ehea_free_interrupts(struct net_device *dev) | |||
1401 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { | 1401 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { |
1402 | pr = &port->port_res[i]; | 1402 | pr = &port->port_res[i]; |
1403 | ibmebus_free_irq(pr->eq->attr.ist1, pr); | 1403 | ibmebus_free_irq(pr->eq->attr.ist1, pr); |
1404 | if (netif_msg_intr(port)) | 1404 | netif_info(port, intr, dev, |
1405 | ehea_info("free send irq for res %d with handle 0x%X", | 1405 | "free send irq for res %d with handle 0x%X\n", |
1406 | i, pr->eq->attr.ist1); | 1406 | i, pr->eq->attr.ist1); |
1407 | } | 1407 | } |
1408 | 1408 | ||
1409 | /* associated events */ | 1409 | /* associated events */ |
1410 | ibmebus_free_irq(port->qp_eq->attr.ist1, port); | 1410 | ibmebus_free_irq(port->qp_eq->attr.ist1, port); |
1411 | if (netif_msg_intr(port)) | 1411 | netif_info(port, intr, dev, |
1412 | ehea_info("associated event interrupt for handle 0x%X freed", | 1412 | "associated event interrupt for handle 0x%X freed\n", |
1413 | port->qp_eq->attr.ist1); | 1413 | port->qp_eq->attr.ist1); |
1414 | } | 1414 | } |
1415 | 1415 | ||
1416 | static int ehea_configure_port(struct ehea_port *port) | 1416 | static int ehea_configure_port(struct ehea_port *port) |
@@ -1479,7 +1479,7 @@ int ehea_gen_smrs(struct ehea_port_res *pr) | |||
1479 | out_free: | 1479 | out_free: |
1480 | ehea_rem_mr(&pr->send_mr); | 1480 | ehea_rem_mr(&pr->send_mr); |
1481 | out: | 1481 | out: |
1482 | ehea_error("Generating SMRS failed\n"); | 1482 | pr_err("Generating SMRS failed\n"); |
1483 | return -EIO; | 1483 | return -EIO; |
1484 | } | 1484 | } |
1485 | 1485 | ||
@@ -1534,7 +1534,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, | |||
1534 | 1534 | ||
1535 | pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); | 1535 | pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); |
1536 | if (!pr->eq) { | 1536 | if (!pr->eq) { |
1537 | ehea_error("create_eq failed (eq)"); | 1537 | pr_err("create_eq failed (eq)\n"); |
1538 | goto out_free; | 1538 | goto out_free; |
1539 | } | 1539 | } |
1540 | 1540 | ||
@@ -1542,7 +1542,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, | |||
1542 | pr->eq->fw_handle, | 1542 | pr->eq->fw_handle, |
1543 | port->logical_port_id); | 1543 | port->logical_port_id); |
1544 | if (!pr->recv_cq) { | 1544 | if (!pr->recv_cq) { |
1545 | ehea_error("create_cq failed (cq_recv)"); | 1545 | pr_err("create_cq failed (cq_recv)\n"); |
1546 | goto out_free; | 1546 | goto out_free; |
1547 | } | 1547 | } |
1548 | 1548 | ||
@@ -1550,19 +1550,19 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, | |||
1550 | pr->eq->fw_handle, | 1550 | pr->eq->fw_handle, |
1551 | port->logical_port_id); | 1551 | port->logical_port_id); |
1552 | if (!pr->send_cq) { | 1552 | if (!pr->send_cq) { |
1553 | ehea_error("create_cq failed (cq_send)"); | 1553 | pr_err("create_cq failed (cq_send)\n"); |
1554 | goto out_free; | 1554 | goto out_free; |
1555 | } | 1555 | } |
1556 | 1556 | ||
1557 | if (netif_msg_ifup(port)) | 1557 | if (netif_msg_ifup(port)) |
1558 | ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d", | 1558 | pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n", |
1559 | pr->send_cq->attr.act_nr_of_cqes, | 1559 | pr->send_cq->attr.act_nr_of_cqes, |
1560 | pr->recv_cq->attr.act_nr_of_cqes); | 1560 | pr->recv_cq->attr.act_nr_of_cqes); |
1561 | 1561 | ||
1562 | init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); | 1562 | init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); |
1563 | if (!init_attr) { | 1563 | if (!init_attr) { |
1564 | ret = -ENOMEM; | 1564 | ret = -ENOMEM; |
1565 | ehea_error("no mem for ehea_qp_init_attr"); | 1565 | pr_err("no mem for ehea_qp_init_attr\n"); |
1566 | goto out_free; | 1566 | goto out_free; |
1567 | } | 1567 | } |
1568 | 1568 | ||
@@ -1587,18 +1587,18 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, | |||
1587 | 1587 | ||
1588 | pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr); | 1588 | pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr); |
1589 | if (!pr->qp) { | 1589 | if (!pr->qp) { |
1590 | ehea_error("create_qp failed"); | 1590 | pr_err("create_qp failed\n"); |
1591 | ret = -EIO; | 1591 | ret = -EIO; |
1592 | goto out_free; | 1592 | goto out_free; |
1593 | } | 1593 | } |
1594 | 1594 | ||
1595 | if (netif_msg_ifup(port)) | 1595 | if (netif_msg_ifup(port)) |
1596 | ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n " | 1596 | pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n", |
1597 | "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr, | 1597 | init_attr->qp_nr, |
1598 | init_attr->act_nr_send_wqes, | 1598 | init_attr->act_nr_send_wqes, |
1599 | init_attr->act_nr_rwqes_rq1, | 1599 | init_attr->act_nr_rwqes_rq1, |
1600 | init_attr->act_nr_rwqes_rq2, | 1600 | init_attr->act_nr_rwqes_rq2, |
1601 | init_attr->act_nr_rwqes_rq3); | 1601 | init_attr->act_nr_rwqes_rq3); |
1602 | 1602 | ||
1603 | pr->sq_skba_size = init_attr->act_nr_send_wqes + 1; | 1603 | pr->sq_skba_size = init_attr->act_nr_send_wqes + 1; |
1604 | 1604 | ||
@@ -1749,7 +1749,7 @@ static void write_swqe2_TSO(struct sk_buff *skb, | |||
1749 | swqe->descriptors++; | 1749 | swqe->descriptors++; |
1750 | } | 1750 | } |
1751 | } else | 1751 | } else |
1752 | ehea_error("cannot handle fragmented headers"); | 1752 | pr_err("cannot handle fragmented headers\n"); |
1753 | } | 1753 | } |
1754 | 1754 | ||
1755 | static void write_swqe2_nonTSO(struct sk_buff *skb, | 1755 | static void write_swqe2_nonTSO(struct sk_buff *skb, |
@@ -1845,8 +1845,8 @@ static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid) | |||
1845 | port->logical_port_id, | 1845 | port->logical_port_id, |
1846 | reg_type, port->mac_addr, 0, hcallid); | 1846 | reg_type, port->mac_addr, 0, hcallid); |
1847 | if (hret != H_SUCCESS) { | 1847 | if (hret != H_SUCCESS) { |
1848 | ehea_error("%sregistering bc address failed (tagged)", | 1848 | pr_err("%sregistering bc address failed (tagged)\n", |
1849 | hcallid == H_REG_BCMC ? "" : "de"); | 1849 | hcallid == H_REG_BCMC ? "" : "de"); |
1850 | ret = -EIO; | 1850 | ret = -EIO; |
1851 | goto out_herr; | 1851 | goto out_herr; |
1852 | } | 1852 | } |
@@ -1857,8 +1857,8 @@ static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid) | |||
1857 | port->logical_port_id, | 1857 | port->logical_port_id, |
1858 | reg_type, port->mac_addr, 0, hcallid); | 1858 | reg_type, port->mac_addr, 0, hcallid); |
1859 | if (hret != H_SUCCESS) { | 1859 | if (hret != H_SUCCESS) { |
1860 | ehea_error("%sregistering bc address failed (vlan)", | 1860 | pr_err("%sregistering bc address failed (vlan)\n", |
1861 | hcallid == H_REG_BCMC ? "" : "de"); | 1861 | hcallid == H_REG_BCMC ? "" : "de"); |
1862 | ret = -EIO; | 1862 | ret = -EIO; |
1863 | } | 1863 | } |
1864 | out_herr: | 1864 | out_herr: |
@@ -1880,7 +1880,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa) | |||
1880 | 1880 | ||
1881 | cb0 = (void *)get_zeroed_page(GFP_KERNEL); | 1881 | cb0 = (void *)get_zeroed_page(GFP_KERNEL); |
1882 | if (!cb0) { | 1882 | if (!cb0) { |
1883 | ehea_error("no mem for cb0"); | 1883 | pr_err("no mem for cb0\n"); |
1884 | ret = -ENOMEM; | 1884 | ret = -ENOMEM; |
1885 | goto out; | 1885 | goto out; |
1886 | } | 1886 | } |
@@ -1928,11 +1928,11 @@ out: | |||
1928 | static void ehea_promiscuous_error(u64 hret, int enable) | 1928 | static void ehea_promiscuous_error(u64 hret, int enable) |
1929 | { | 1929 | { |
1930 | if (hret == H_AUTHORITY) | 1930 | if (hret == H_AUTHORITY) |
1931 | ehea_info("Hypervisor denied %sabling promiscuous mode", | 1931 | pr_info("Hypervisor denied %sabling promiscuous mode\n", |
1932 | enable == 1 ? "en" : "dis"); | 1932 | enable == 1 ? "en" : "dis"); |
1933 | else | 1933 | else |
1934 | ehea_error("failed %sabling promiscuous mode", | 1934 | pr_err("failed %sabling promiscuous mode\n", |
1935 | enable == 1 ? "en" : "dis"); | 1935 | enable == 1 ? "en" : "dis"); |
1936 | } | 1936 | } |
1937 | 1937 | ||
1938 | static void ehea_promiscuous(struct net_device *dev, int enable) | 1938 | static void ehea_promiscuous(struct net_device *dev, int enable) |
@@ -1946,7 +1946,7 @@ static void ehea_promiscuous(struct net_device *dev, int enable) | |||
1946 | 1946 | ||
1947 | cb7 = (void *)get_zeroed_page(GFP_ATOMIC); | 1947 | cb7 = (void *)get_zeroed_page(GFP_ATOMIC); |
1948 | if (!cb7) { | 1948 | if (!cb7) { |
1949 | ehea_error("no mem for cb7"); | 1949 | pr_err("no mem for cb7\n"); |
1950 | goto out; | 1950 | goto out; |
1951 | } | 1951 | } |
1952 | 1952 | ||
@@ -2006,7 +2006,7 @@ static int ehea_drop_multicast_list(struct net_device *dev) | |||
2006 | hret = ehea_multicast_reg_helper(port, mc_entry->macaddr, | 2006 | hret = ehea_multicast_reg_helper(port, mc_entry->macaddr, |
2007 | H_DEREG_BCMC); | 2007 | H_DEREG_BCMC); |
2008 | if (hret) { | 2008 | if (hret) { |
2009 | ehea_error("failed deregistering mcast MAC"); | 2009 | pr_err("failed deregistering mcast MAC\n"); |
2010 | ret = -EIO; | 2010 | ret = -EIO; |
2011 | } | 2011 | } |
2012 | 2012 | ||
@@ -2029,7 +2029,8 @@ static void ehea_allmulti(struct net_device *dev, int enable) | |||
2029 | if (!hret) | 2029 | if (!hret) |
2030 | port->allmulti = 1; | 2030 | port->allmulti = 1; |
2031 | else | 2031 | else |
2032 | ehea_error("failed enabling IFF_ALLMULTI"); | 2032 | netdev_err(dev, |
2033 | "failed enabling IFF_ALLMULTI\n"); | ||
2033 | } | 2034 | } |
2034 | } else | 2035 | } else |
2035 | if (!enable) { | 2036 | if (!enable) { |
@@ -2038,7 +2039,8 @@ static void ehea_allmulti(struct net_device *dev, int enable) | |||
2038 | if (!hret) | 2039 | if (!hret) |
2039 | port->allmulti = 0; | 2040 | port->allmulti = 0; |
2040 | else | 2041 | else |
2041 | ehea_error("failed disabling IFF_ALLMULTI"); | 2042 | netdev_err(dev, |
2043 | "failed disabling IFF_ALLMULTI\n"); | ||
2042 | } | 2044 | } |
2043 | } | 2045 | } |
2044 | 2046 | ||
@@ -2049,7 +2051,7 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr) | |||
2049 | 2051 | ||
2050 | ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC); | 2052 | ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC); |
2051 | if (!ehea_mcl_entry) { | 2053 | if (!ehea_mcl_entry) { |
2052 | ehea_error("no mem for mcl_entry"); | 2054 | pr_err("no mem for mcl_entry\n"); |
2053 | return; | 2055 | return; |
2054 | } | 2056 | } |
2055 | 2057 | ||
@@ -2062,7 +2064,7 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr) | |||
2062 | if (!hret) | 2064 | if (!hret) |
2063 | list_add(&ehea_mcl_entry->list, &port->mc_list->list); | 2065 | list_add(&ehea_mcl_entry->list, &port->mc_list->list); |
2064 | else { | 2066 | else { |
2065 | ehea_error("failed registering mcast MAC"); | 2067 | pr_err("failed registering mcast MAC\n"); |
2066 | kfree(ehea_mcl_entry); | 2068 | kfree(ehea_mcl_entry); |
2067 | } | 2069 | } |
2068 | } | 2070 | } |
@@ -2095,9 +2097,8 @@ static void ehea_set_multicast_list(struct net_device *dev) | |||
2095 | } | 2097 | } |
2096 | 2098 | ||
2097 | if (netdev_mc_count(dev) > port->adapter->max_mc_mac) { | 2099 | if (netdev_mc_count(dev) > port->adapter->max_mc_mac) { |
2098 | ehea_info("Mcast registration limit reached (0x%llx). " | 2100 | pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n", |
2099 | "Use ALLMULTI!", | 2101 | port->adapter->max_mc_mac); |
2100 | port->adapter->max_mc_mac); | ||
2101 | goto out; | 2102 | goto out; |
2102 | } | 2103 | } |
2103 | 2104 | ||
@@ -2303,10 +2304,10 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2303 | } | 2304 | } |
2304 | pr->swqe_id_counter += 1; | 2305 | pr->swqe_id_counter += 1; |
2305 | 2306 | ||
2306 | if (netif_msg_tx_queued(port)) { | 2307 | netif_info(port, tx_queued, dev, |
2307 | ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr); | 2308 | "post swqe on QP %d\n", pr->qp->init_attr.qp_nr); |
2309 | if (netif_msg_tx_queued(port)) | ||
2308 | ehea_dump(swqe, 512, "swqe"); | 2310 | ehea_dump(swqe, 512, "swqe"); |
2309 | } | ||
2310 | 2311 | ||
2311 | if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { | 2312 | if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { |
2312 | netif_stop_queue(dev); | 2313 | netif_stop_queue(dev); |
@@ -2342,14 +2343,14 @@ static void ehea_vlan_rx_register(struct net_device *dev, | |||
2342 | 2343 | ||
2343 | cb1 = (void *)get_zeroed_page(GFP_KERNEL); | 2344 | cb1 = (void *)get_zeroed_page(GFP_KERNEL); |
2344 | if (!cb1) { | 2345 | if (!cb1) { |
2345 | ehea_error("no mem for cb1"); | 2346 | pr_err("no mem for cb1\n"); |
2346 | goto out; | 2347 | goto out; |
2347 | } | 2348 | } |
2348 | 2349 | ||
2349 | hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, | 2350 | hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, |
2350 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | 2351 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); |
2351 | if (hret != H_SUCCESS) | 2352 | if (hret != H_SUCCESS) |
2352 | ehea_error("modify_ehea_port failed"); | 2353 | pr_err("modify_ehea_port failed\n"); |
2353 | 2354 | ||
2354 | free_page((unsigned long)cb1); | 2355 | free_page((unsigned long)cb1); |
2355 | out: | 2356 | out: |
@@ -2366,14 +2367,14 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) | |||
2366 | 2367 | ||
2367 | cb1 = (void *)get_zeroed_page(GFP_KERNEL); | 2368 | cb1 = (void *)get_zeroed_page(GFP_KERNEL); |
2368 | if (!cb1) { | 2369 | if (!cb1) { |
2369 | ehea_error("no mem for cb1"); | 2370 | pr_err("no mem for cb1\n"); |
2370 | goto out; | 2371 | goto out; |
2371 | } | 2372 | } |
2372 | 2373 | ||
2373 | hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, | 2374 | hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, |
2374 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | 2375 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); |
2375 | if (hret != H_SUCCESS) { | 2376 | if (hret != H_SUCCESS) { |
2376 | ehea_error("query_ehea_port failed"); | 2377 | pr_err("query_ehea_port failed\n"); |
2377 | goto out; | 2378 | goto out; |
2378 | } | 2379 | } |
2379 | 2380 | ||
@@ -2383,7 +2384,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) | |||
2383 | hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, | 2384 | hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, |
2384 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | 2385 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); |
2385 | if (hret != H_SUCCESS) | 2386 | if (hret != H_SUCCESS) |
2386 | ehea_error("modify_ehea_port failed"); | 2387 | pr_err("modify_ehea_port failed\n"); |
2387 | out: | 2388 | out: |
2388 | free_page((unsigned long)cb1); | 2389 | free_page((unsigned long)cb1); |
2389 | return; | 2390 | return; |
@@ -2401,14 +2402,14 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
2401 | 2402 | ||
2402 | cb1 = (void *)get_zeroed_page(GFP_KERNEL); | 2403 | cb1 = (void *)get_zeroed_page(GFP_KERNEL); |
2403 | if (!cb1) { | 2404 | if (!cb1) { |
2404 | ehea_error("no mem for cb1"); | 2405 | pr_err("no mem for cb1\n"); |
2405 | goto out; | 2406 | goto out; |
2406 | } | 2407 | } |
2407 | 2408 | ||
2408 | hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, | 2409 | hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, |
2409 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | 2410 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); |
2410 | if (hret != H_SUCCESS) { | 2411 | if (hret != H_SUCCESS) { |
2411 | ehea_error("query_ehea_port failed"); | 2412 | pr_err("query_ehea_port failed\n"); |
2412 | goto out; | 2413 | goto out; |
2413 | } | 2414 | } |
2414 | 2415 | ||
@@ -2418,7 +2419,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
2418 | hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, | 2419 | hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, |
2419 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | 2420 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); |
2420 | if (hret != H_SUCCESS) | 2421 | if (hret != H_SUCCESS) |
2421 | ehea_error("modify_ehea_port failed"); | 2422 | pr_err("modify_ehea_port failed\n"); |
2422 | out: | 2423 | out: |
2423 | free_page((unsigned long)cb1); | 2424 | free_page((unsigned long)cb1); |
2424 | } | 2425 | } |
@@ -2440,7 +2441,7 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) | |||
2440 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, | 2441 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, |
2441 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); | 2442 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); |
2442 | if (hret != H_SUCCESS) { | 2443 | if (hret != H_SUCCESS) { |
2443 | ehea_error("query_ehea_qp failed (1)"); | 2444 | pr_err("query_ehea_qp failed (1)\n"); |
2444 | goto out; | 2445 | goto out; |
2445 | } | 2446 | } |
2446 | 2447 | ||
@@ -2449,14 +2450,14 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) | |||
2449 | EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, | 2450 | EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, |
2450 | &dummy64, &dummy64, &dummy16, &dummy16); | 2451 | &dummy64, &dummy64, &dummy16, &dummy16); |
2451 | if (hret != H_SUCCESS) { | 2452 | if (hret != H_SUCCESS) { |
2452 | ehea_error("modify_ehea_qp failed (1)"); | 2453 | pr_err("modify_ehea_qp failed (1)\n"); |
2453 | goto out; | 2454 | goto out; |
2454 | } | 2455 | } |
2455 | 2456 | ||
2456 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, | 2457 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, |
2457 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); | 2458 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); |
2458 | if (hret != H_SUCCESS) { | 2459 | if (hret != H_SUCCESS) { |
2459 | ehea_error("query_ehea_qp failed (2)"); | 2460 | pr_err("query_ehea_qp failed (2)\n"); |
2460 | goto out; | 2461 | goto out; |
2461 | } | 2462 | } |
2462 | 2463 | ||
@@ -2465,14 +2466,14 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) | |||
2465 | EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, | 2466 | EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, |
2466 | &dummy64, &dummy64, &dummy16, &dummy16); | 2467 | &dummy64, &dummy64, &dummy16, &dummy16); |
2467 | if (hret != H_SUCCESS) { | 2468 | if (hret != H_SUCCESS) { |
2468 | ehea_error("modify_ehea_qp failed (2)"); | 2469 | pr_err("modify_ehea_qp failed (2)\n"); |
2469 | goto out; | 2470 | goto out; |
2470 | } | 2471 | } |
2471 | 2472 | ||
2472 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, | 2473 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, |
2473 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); | 2474 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); |
2474 | if (hret != H_SUCCESS) { | 2475 | if (hret != H_SUCCESS) { |
2475 | ehea_error("query_ehea_qp failed (3)"); | 2476 | pr_err("query_ehea_qp failed (3)\n"); |
2476 | goto out; | 2477 | goto out; |
2477 | } | 2478 | } |
2478 | 2479 | ||
@@ -2481,14 +2482,14 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) | |||
2481 | EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, | 2482 | EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, |
2482 | &dummy64, &dummy64, &dummy16, &dummy16); | 2483 | &dummy64, &dummy64, &dummy16, &dummy16); |
2483 | if (hret != H_SUCCESS) { | 2484 | if (hret != H_SUCCESS) { |
2484 | ehea_error("modify_ehea_qp failed (3)"); | 2485 | pr_err("modify_ehea_qp failed (3)\n"); |
2485 | goto out; | 2486 | goto out; |
2486 | } | 2487 | } |
2487 | 2488 | ||
2488 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, | 2489 | hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, |
2489 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); | 2490 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); |
2490 | if (hret != H_SUCCESS) { | 2491 | if (hret != H_SUCCESS) { |
2491 | ehea_error("query_ehea_qp failed (4)"); | 2492 | pr_err("query_ehea_qp failed (4)\n"); |
2492 | goto out; | 2493 | goto out; |
2493 | } | 2494 | } |
2494 | 2495 | ||
@@ -2509,7 +2510,7 @@ static int ehea_port_res_setup(struct ehea_port *port, int def_qps, | |||
2509 | EHEA_MAX_ENTRIES_EQ, 1); | 2510 | EHEA_MAX_ENTRIES_EQ, 1); |
2510 | if (!port->qp_eq) { | 2511 | if (!port->qp_eq) { |
2511 | ret = -EINVAL; | 2512 | ret = -EINVAL; |
2512 | ehea_error("ehea_create_eq failed (qp_eq)"); | 2513 | pr_err("ehea_create_eq failed (qp_eq)\n"); |
2513 | goto out_kill_eq; | 2514 | goto out_kill_eq; |
2514 | } | 2515 | } |
2515 | 2516 | ||
@@ -2590,27 +2591,27 @@ static int ehea_up(struct net_device *dev) | |||
2590 | ret = ehea_port_res_setup(port, port->num_def_qps, | 2591 | ret = ehea_port_res_setup(port, port->num_def_qps, |
2591 | port->num_add_tx_qps); | 2592 | port->num_add_tx_qps); |
2592 | if (ret) { | 2593 | if (ret) { |
2593 | ehea_error("port_res_failed"); | 2594 | netdev_err(dev, "port_res_failed\n"); |
2594 | goto out; | 2595 | goto out; |
2595 | } | 2596 | } |
2596 | 2597 | ||
2597 | /* Set default QP for this port */ | 2598 | /* Set default QP for this port */ |
2598 | ret = ehea_configure_port(port); | 2599 | ret = ehea_configure_port(port); |
2599 | if (ret) { | 2600 | if (ret) { |
2600 | ehea_error("ehea_configure_port failed. ret:%d", ret); | 2601 | netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret); |
2601 | goto out_clean_pr; | 2602 | goto out_clean_pr; |
2602 | } | 2603 | } |
2603 | 2604 | ||
2604 | ret = ehea_reg_interrupts(dev); | 2605 | ret = ehea_reg_interrupts(dev); |
2605 | if (ret) { | 2606 | if (ret) { |
2606 | ehea_error("reg_interrupts failed. ret:%d", ret); | 2607 | netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret); |
2607 | goto out_clean_pr; | 2608 | goto out_clean_pr; |
2608 | } | 2609 | } |
2609 | 2610 | ||
2610 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { | 2611 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { |
2611 | ret = ehea_activate_qp(port->adapter, port->port_res[i].qp); | 2612 | ret = ehea_activate_qp(port->adapter, port->port_res[i].qp); |
2612 | if (ret) { | 2613 | if (ret) { |
2613 | ehea_error("activate_qp failed"); | 2614 | netdev_err(dev, "activate_qp failed\n"); |
2614 | goto out_free_irqs; | 2615 | goto out_free_irqs; |
2615 | } | 2616 | } |
2616 | } | 2617 | } |
@@ -2618,7 +2619,7 @@ static int ehea_up(struct net_device *dev) | |||
2618 | for (i = 0; i < port->num_def_qps; i++) { | 2619 | for (i = 0; i < port->num_def_qps; i++) { |
2619 | ret = ehea_fill_port_res(&port->port_res[i]); | 2620 | ret = ehea_fill_port_res(&port->port_res[i]); |
2620 | if (ret) { | 2621 | if (ret) { |
2621 | ehea_error("out_free_irqs"); | 2622 | netdev_err(dev, "out_free_irqs\n"); |
2622 | goto out_free_irqs; | 2623 | goto out_free_irqs; |
2623 | } | 2624 | } |
2624 | } | 2625 | } |
@@ -2641,7 +2642,7 @@ out_clean_pr: | |||
2641 | ehea_clean_all_portres(port); | 2642 | ehea_clean_all_portres(port); |
2642 | out: | 2643 | out: |
2643 | if (ret) | 2644 | if (ret) |
2644 | ehea_info("Failed starting %s. ret=%i", dev->name, ret); | 2645 | netdev_info(dev, "Failed starting. ret=%i\n", ret); |
2645 | 2646 | ||
2646 | ehea_update_bcmc_registrations(); | 2647 | ehea_update_bcmc_registrations(); |
2647 | ehea_update_firmware_handles(); | 2648 | ehea_update_firmware_handles(); |
@@ -2672,8 +2673,7 @@ static int ehea_open(struct net_device *dev) | |||
2672 | 2673 | ||
2673 | mutex_lock(&port->port_lock); | 2674 | mutex_lock(&port->port_lock); |
2674 | 2675 | ||
2675 | if (netif_msg_ifup(port)) | 2676 | netif_info(port, ifup, dev, "enabling port\n"); |
2676 | ehea_info("enabling port %s", dev->name); | ||
2677 | 2677 | ||
2678 | ret = ehea_up(dev); | 2678 | ret = ehea_up(dev); |
2679 | if (!ret) { | 2679 | if (!ret) { |
@@ -2708,8 +2708,7 @@ static int ehea_down(struct net_device *dev) | |||
2708 | 2708 | ||
2709 | ret = ehea_clean_all_portres(port); | 2709 | ret = ehea_clean_all_portres(port); |
2710 | if (ret) | 2710 | if (ret) |
2711 | ehea_info("Failed freeing resources for %s. ret=%i", | 2711 | netdev_info(dev, "Failed freeing resources. ret=%i\n", ret); |
2712 | dev->name, ret); | ||
2713 | 2712 | ||
2714 | ehea_update_firmware_handles(); | 2713 | ehea_update_firmware_handles(); |
2715 | 2714 | ||
@@ -2721,8 +2720,7 @@ static int ehea_stop(struct net_device *dev) | |||
2721 | int ret; | 2720 | int ret; |
2722 | struct ehea_port *port = netdev_priv(dev); | 2721 | struct ehea_port *port = netdev_priv(dev); |
2723 | 2722 | ||
2724 | if (netif_msg_ifdown(port)) | 2723 | netif_info(port, ifdown, dev, "disabling port\n"); |
2725 | ehea_info("disabling port %s", dev->name); | ||
2726 | 2724 | ||
2727 | set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags); | 2725 | set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags); |
2728 | cancel_work_sync(&port->reset_task); | 2726 | cancel_work_sync(&port->reset_task); |
@@ -2763,7 +2761,7 @@ static void ehea_flush_sq(struct ehea_port *port) | |||
2763 | msecs_to_jiffies(100)); | 2761 | msecs_to_jiffies(100)); |
2764 | 2762 | ||
2765 | if (!ret) { | 2763 | if (!ret) { |
2766 | ehea_error("WARNING: sq not flushed completely"); | 2764 | pr_err("WARNING: sq not flushed completely\n"); |
2767 | break; | 2765 | break; |
2768 | } | 2766 | } |
2769 | } | 2767 | } |
@@ -2799,7 +2797,7 @@ int ehea_stop_qps(struct net_device *dev) | |||
2799 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), | 2797 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), |
2800 | cb0); | 2798 | cb0); |
2801 | if (hret != H_SUCCESS) { | 2799 | if (hret != H_SUCCESS) { |
2802 | ehea_error("query_ehea_qp failed (1)"); | 2800 | pr_err("query_ehea_qp failed (1)\n"); |
2803 | goto out; | 2801 | goto out; |
2804 | } | 2802 | } |
2805 | 2803 | ||
@@ -2811,7 +2809,7 @@ int ehea_stop_qps(struct net_device *dev) | |||
2811 | 1), cb0, &dummy64, | 2809 | 1), cb0, &dummy64, |
2812 | &dummy64, &dummy16, &dummy16); | 2810 | &dummy64, &dummy16, &dummy16); |
2813 | if (hret != H_SUCCESS) { | 2811 | if (hret != H_SUCCESS) { |
2814 | ehea_error("modify_ehea_qp failed (1)"); | 2812 | pr_err("modify_ehea_qp failed (1)\n"); |
2815 | goto out; | 2813 | goto out; |
2816 | } | 2814 | } |
2817 | 2815 | ||
@@ -2819,14 +2817,14 @@ int ehea_stop_qps(struct net_device *dev) | |||
2819 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), | 2817 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), |
2820 | cb0); | 2818 | cb0); |
2821 | if (hret != H_SUCCESS) { | 2819 | if (hret != H_SUCCESS) { |
2822 | ehea_error("query_ehea_qp failed (2)"); | 2820 | pr_err("query_ehea_qp failed (2)\n"); |
2823 | goto out; | 2821 | goto out; |
2824 | } | 2822 | } |
2825 | 2823 | ||
2826 | /* deregister shared memory regions */ | 2824 | /* deregister shared memory regions */ |
2827 | dret = ehea_rem_smrs(pr); | 2825 | dret = ehea_rem_smrs(pr); |
2828 | if (dret) { | 2826 | if (dret) { |
2829 | ehea_error("unreg shared memory region failed"); | 2827 | pr_err("unreg shared memory region failed\n"); |
2830 | goto out; | 2828 | goto out; |
2831 | } | 2829 | } |
2832 | } | 2830 | } |
@@ -2895,7 +2893,7 @@ int ehea_restart_qps(struct net_device *dev) | |||
2895 | 2893 | ||
2896 | ret = ehea_gen_smrs(pr); | 2894 | ret = ehea_gen_smrs(pr); |
2897 | if (ret) { | 2895 | if (ret) { |
2898 | ehea_error("creation of shared memory regions failed"); | 2896 | netdev_err(dev, "creation of shared memory regions failed\n"); |
2899 | goto out; | 2897 | goto out; |
2900 | } | 2898 | } |
2901 | 2899 | ||
@@ -2906,7 +2904,7 @@ int ehea_restart_qps(struct net_device *dev) | |||
2906 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), | 2904 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), |
2907 | cb0); | 2905 | cb0); |
2908 | if (hret != H_SUCCESS) { | 2906 | if (hret != H_SUCCESS) { |
2909 | ehea_error("query_ehea_qp failed (1)"); | 2907 | netdev_err(dev, "query_ehea_qp failed (1)\n"); |
2910 | goto out; | 2908 | goto out; |
2911 | } | 2909 | } |
2912 | 2910 | ||
@@ -2918,7 +2916,7 @@ int ehea_restart_qps(struct net_device *dev) | |||
2918 | 1), cb0, &dummy64, | 2916 | 1), cb0, &dummy64, |
2919 | &dummy64, &dummy16, &dummy16); | 2917 | &dummy64, &dummy16, &dummy16); |
2920 | if (hret != H_SUCCESS) { | 2918 | if (hret != H_SUCCESS) { |
2921 | ehea_error("modify_ehea_qp failed (1)"); | 2919 | netdev_err(dev, "modify_ehea_qp failed (1)\n"); |
2922 | goto out; | 2920 | goto out; |
2923 | } | 2921 | } |
2924 | 2922 | ||
@@ -2926,7 +2924,7 @@ int ehea_restart_qps(struct net_device *dev) | |||
2926 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), | 2924 | EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), |
2927 | cb0); | 2925 | cb0); |
2928 | if (hret != H_SUCCESS) { | 2926 | if (hret != H_SUCCESS) { |
2929 | ehea_error("query_ehea_qp failed (2)"); | 2927 | netdev_err(dev, "query_ehea_qp failed (2)\n"); |
2930 | goto out; | 2928 | goto out; |
2931 | } | 2929 | } |
2932 | 2930 | ||
@@ -2963,8 +2961,7 @@ static void ehea_reset_port(struct work_struct *work) | |||
2963 | 2961 | ||
2964 | ehea_set_multicast_list(dev); | 2962 | ehea_set_multicast_list(dev); |
2965 | 2963 | ||
2966 | if (netif_msg_timer(port)) | 2964 | netif_info(port, timer, dev, "reset successful\n"); |
2967 | ehea_info("Device %s resetted successfully", dev->name); | ||
2968 | 2965 | ||
2969 | port_napi_enable(port); | 2966 | port_napi_enable(port); |
2970 | 2967 | ||
@@ -2979,7 +2976,7 @@ static void ehea_rereg_mrs(struct work_struct *work) | |||
2979 | int ret, i; | 2976 | int ret, i; |
2980 | struct ehea_adapter *adapter; | 2977 | struct ehea_adapter *adapter; |
2981 | 2978 | ||
2982 | ehea_info("LPAR memory changed - re-initializing driver"); | 2979 | pr_info("LPAR memory changed - re-initializing driver\n"); |
2983 | 2980 | ||
2984 | list_for_each_entry(adapter, &adapter_list, list) | 2981 | list_for_each_entry(adapter, &adapter_list, list) |
2985 | if (adapter->active_ports) { | 2982 | if (adapter->active_ports) { |
@@ -3011,8 +3008,7 @@ static void ehea_rereg_mrs(struct work_struct *work) | |||
3011 | /* Unregister old memory region */ | 3008 | /* Unregister old memory region */ |
3012 | ret = ehea_rem_mr(&adapter->mr); | 3009 | ret = ehea_rem_mr(&adapter->mr); |
3013 | if (ret) { | 3010 | if (ret) { |
3014 | ehea_error("unregister MR failed - driver" | 3011 | pr_err("unregister MR failed - driver inoperable!\n"); |
3015 | " inoperable!"); | ||
3016 | goto out; | 3012 | goto out; |
3017 | } | 3013 | } |
3018 | } | 3014 | } |
@@ -3024,8 +3020,7 @@ static void ehea_rereg_mrs(struct work_struct *work) | |||
3024 | /* Register new memory region */ | 3020 | /* Register new memory region */ |
3025 | ret = ehea_reg_kernel_mr(adapter, &adapter->mr); | 3021 | ret = ehea_reg_kernel_mr(adapter, &adapter->mr); |
3026 | if (ret) { | 3022 | if (ret) { |
3027 | ehea_error("register MR failed - driver" | 3023 | pr_err("register MR failed - driver inoperable!\n"); |
3028 | " inoperable!"); | ||
3029 | goto out; | 3024 | goto out; |
3030 | } | 3025 | } |
3031 | 3026 | ||
@@ -3048,7 +3043,7 @@ static void ehea_rereg_mrs(struct work_struct *work) | |||
3048 | } | 3043 | } |
3049 | } | 3044 | } |
3050 | } | 3045 | } |
3051 | ehea_info("re-initializing driver complete"); | 3046 | pr_info("re-initializing driver complete\n"); |
3052 | out: | 3047 | out: |
3053 | return; | 3048 | return; |
3054 | } | 3049 | } |
@@ -3101,7 +3096,7 @@ int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo) | |||
3101 | /* (Try to) enable *jumbo frames */ | 3096 | /* (Try to) enable *jumbo frames */ |
3102 | cb4 = (void *)get_zeroed_page(GFP_KERNEL); | 3097 | cb4 = (void *)get_zeroed_page(GFP_KERNEL); |
3103 | if (!cb4) { | 3098 | if (!cb4) { |
3104 | ehea_error("no mem for cb4"); | 3099 | pr_err("no mem for cb4\n"); |
3105 | ret = -ENOMEM; | 3100 | ret = -ENOMEM; |
3106 | goto out; | 3101 | goto out; |
3107 | } else { | 3102 | } else { |
@@ -3163,13 +3158,13 @@ static struct device *ehea_register_port(struct ehea_port *port, | |||
3163 | 3158 | ||
3164 | ret = of_device_register(&port->ofdev); | 3159 | ret = of_device_register(&port->ofdev); |
3165 | if (ret) { | 3160 | if (ret) { |
3166 | ehea_error("failed to register device. ret=%d", ret); | 3161 | pr_err("failed to register device. ret=%d\n", ret); |
3167 | goto out; | 3162 | goto out; |
3168 | } | 3163 | } |
3169 | 3164 | ||
3170 | ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id); | 3165 | ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id); |
3171 | if (ret) { | 3166 | if (ret) { |
3172 | ehea_error("failed to register attributes, ret=%d", ret); | 3167 | pr_err("failed to register attributes, ret=%d\n", ret); |
3173 | goto out_unreg_of_dev; | 3168 | goto out_unreg_of_dev; |
3174 | } | 3169 | } |
3175 | 3170 | ||
@@ -3219,7 +3214,7 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, | |||
3219 | dev = alloc_etherdev(sizeof(struct ehea_port)); | 3214 | dev = alloc_etherdev(sizeof(struct ehea_port)); |
3220 | 3215 | ||
3221 | if (!dev) { | 3216 | if (!dev) { |
3222 | ehea_error("no mem for net_device"); | 3217 | pr_err("no mem for net_device\n"); |
3223 | ret = -ENOMEM; | 3218 | ret = -ENOMEM; |
3224 | goto out_err; | 3219 | goto out_err; |
3225 | } | 3220 | } |
@@ -3270,7 +3265,7 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, | |||
3270 | 3265 | ||
3271 | ret = register_netdev(dev); | 3266 | ret = register_netdev(dev); |
3272 | if (ret) { | 3267 | if (ret) { |
3273 | ehea_error("register_netdev failed. ret=%d", ret); | 3268 | pr_err("register_netdev failed. ret=%d\n", ret); |
3274 | goto out_unreg_port; | 3269 | goto out_unreg_port; |
3275 | } | 3270 | } |
3276 | 3271 | ||
@@ -3278,11 +3273,10 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, | |||
3278 | 3273 | ||
3279 | ret = ehea_get_jumboframe_status(port, &jumbo); | 3274 | ret = ehea_get_jumboframe_status(port, &jumbo); |
3280 | if (ret) | 3275 | if (ret) |
3281 | ehea_error("failed determining jumbo frame status for %s", | 3276 | netdev_err(dev, "failed determining jumbo frame status\n"); |
3282 | port->netdev->name); | ||
3283 | 3277 | ||
3284 | ehea_info("%s: Jumbo frames are %sabled", dev->name, | 3278 | netdev_info(dev, "Jumbo frames are %sabled\n", |
3285 | jumbo == 1 ? "en" : "dis"); | 3279 | jumbo == 1 ? "en" : "dis"); |
3286 | 3280 | ||
3287 | adapter->active_ports++; | 3281 | adapter->active_ports++; |
3288 | 3282 | ||
@@ -3298,8 +3292,8 @@ out_free_ethdev: | |||
3298 | free_netdev(dev); | 3292 | free_netdev(dev); |
3299 | 3293 | ||
3300 | out_err: | 3294 | out_err: |
3301 | ehea_error("setting up logical port with id=%d failed, ret=%d", | 3295 | pr_err("setting up logical port with id=%d failed, ret=%d\n", |
3302 | logical_port_id, ret); | 3296 | logical_port_id, ret); |
3303 | return NULL; | 3297 | return NULL; |
3304 | } | 3298 | } |
3305 | 3299 | ||
@@ -3327,13 +3321,13 @@ static int ehea_setup_ports(struct ehea_adapter *adapter) | |||
3327 | dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", | 3321 | dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", |
3328 | NULL); | 3322 | NULL); |
3329 | if (!dn_log_port_id) { | 3323 | if (!dn_log_port_id) { |
3330 | ehea_error("bad device node: eth_dn name=%s", | 3324 | pr_err("bad device node: eth_dn name=%s\n", |
3331 | eth_dn->full_name); | 3325 | eth_dn->full_name); |
3332 | continue; | 3326 | continue; |
3333 | } | 3327 | } |
3334 | 3328 | ||
3335 | if (ehea_add_adapter_mr(adapter)) { | 3329 | if (ehea_add_adapter_mr(adapter)) { |
3336 | ehea_error("creating MR failed"); | 3330 | pr_err("creating MR failed\n"); |
3337 | of_node_put(eth_dn); | 3331 | of_node_put(eth_dn); |
3338 | return -EIO; | 3332 | return -EIO; |
3339 | } | 3333 | } |
@@ -3342,9 +3336,8 @@ static int ehea_setup_ports(struct ehea_adapter *adapter) | |||
3342 | *dn_log_port_id, | 3336 | *dn_log_port_id, |
3343 | eth_dn); | 3337 | eth_dn); |
3344 | if (adapter->port[i]) | 3338 | if (adapter->port[i]) |
3345 | ehea_info("%s -> logical port id #%d", | 3339 | netdev_info(adapter->port[i]->netdev, |
3346 | adapter->port[i]->netdev->name, | 3340 | "logical port id #%d\n", *dn_log_port_id); |
3347 | *dn_log_port_id); | ||
3348 | else | 3341 | else |
3349 | ehea_remove_adapter_mr(adapter); | 3342 | ehea_remove_adapter_mr(adapter); |
3350 | 3343 | ||
@@ -3389,21 +3382,20 @@ static ssize_t ehea_probe_port(struct device *dev, | |||
3389 | port = ehea_get_port(adapter, logical_port_id); | 3382 | port = ehea_get_port(adapter, logical_port_id); |
3390 | 3383 | ||
3391 | if (port) { | 3384 | if (port) { |
3392 | ehea_info("adding port with logical port id=%d failed. port " | 3385 | netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n", |
3393 | "already configured as %s.", logical_port_id, | 3386 | logical_port_id); |
3394 | port->netdev->name); | ||
3395 | return -EINVAL; | 3387 | return -EINVAL; |
3396 | } | 3388 | } |
3397 | 3389 | ||
3398 | eth_dn = ehea_get_eth_dn(adapter, logical_port_id); | 3390 | eth_dn = ehea_get_eth_dn(adapter, logical_port_id); |
3399 | 3391 | ||
3400 | if (!eth_dn) { | 3392 | if (!eth_dn) { |
3401 | ehea_info("no logical port with id %d found", logical_port_id); | 3393 | pr_info("no logical port with id %d found\n", logical_port_id); |
3402 | return -EINVAL; | 3394 | return -EINVAL; |
3403 | } | 3395 | } |
3404 | 3396 | ||
3405 | if (ehea_add_adapter_mr(adapter)) { | 3397 | if (ehea_add_adapter_mr(adapter)) { |
3406 | ehea_error("creating MR failed"); | 3398 | pr_err("creating MR failed\n"); |
3407 | return -EIO; | 3399 | return -EIO; |
3408 | } | 3400 | } |
3409 | 3401 | ||
@@ -3418,8 +3410,8 @@ static ssize_t ehea_probe_port(struct device *dev, | |||
3418 | break; | 3410 | break; |
3419 | } | 3411 | } |
3420 | 3412 | ||
3421 | ehea_info("added %s (logical port id=%d)", port->netdev->name, | 3413 | netdev_info(port->netdev, "added: (logical port id=%d)\n", |
3422 | logical_port_id); | 3414 | logical_port_id); |
3423 | } else { | 3415 | } else { |
3424 | ehea_remove_adapter_mr(adapter); | 3416 | ehea_remove_adapter_mr(adapter); |
3425 | return -EIO; | 3417 | return -EIO; |
@@ -3442,8 +3434,8 @@ static ssize_t ehea_remove_port(struct device *dev, | |||
3442 | port = ehea_get_port(adapter, logical_port_id); | 3434 | port = ehea_get_port(adapter, logical_port_id); |
3443 | 3435 | ||
3444 | if (port) { | 3436 | if (port) { |
3445 | ehea_info("removed %s (logical port id=%d)", port->netdev->name, | 3437 | netdev_info(port->netdev, "removed: (logical port id=%d)\n", |
3446 | logical_port_id); | 3438 | logical_port_id); |
3447 | 3439 | ||
3448 | ehea_shutdown_single_port(port); | 3440 | ehea_shutdown_single_port(port); |
3449 | 3441 | ||
@@ -3453,8 +3445,8 @@ static ssize_t ehea_remove_port(struct device *dev, | |||
3453 | break; | 3445 | break; |
3454 | } | 3446 | } |
3455 | } else { | 3447 | } else { |
3456 | ehea_error("removing port with logical port id=%d failed. port " | 3448 | pr_err("removing port with logical port id=%d failed. port not configured.\n", |
3457 | "not configured.", logical_port_id); | 3449 | logical_port_id); |
3458 | return -EINVAL; | 3450 | return -EINVAL; |
3459 | } | 3451 | } |
3460 | 3452 | ||
@@ -3491,7 +3483,7 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev, | |||
3491 | int ret; | 3483 | int ret; |
3492 | 3484 | ||
3493 | if (!dev || !dev->dev.of_node) { | 3485 | if (!dev || !dev->dev.of_node) { |
3494 | ehea_error("Invalid ibmebus device probed"); | 3486 | pr_err("Invalid ibmebus device probed\n"); |
3495 | return -EINVAL; | 3487 | return -EINVAL; |
3496 | } | 3488 | } |
3497 | 3489 | ||
@@ -3639,17 +3631,17 @@ static int ehea_mem_notifier(struct notifier_block *nb, | |||
3639 | 3631 | ||
3640 | switch (action) { | 3632 | switch (action) { |
3641 | case MEM_CANCEL_OFFLINE: | 3633 | case MEM_CANCEL_OFFLINE: |
3642 | ehea_info("memory offlining canceled"); | 3634 | pr_info("memory offlining canceled\n"); |
3643 | /* Readd canceled memory block */ | 3635 | /* Readd canceled memory block */ |
3644 | case MEM_ONLINE: | 3636 | case MEM_ONLINE: |
3645 | ehea_info("memory is going online"); | 3637 | pr_info("memory is going online\n"); |
3646 | set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); | 3638 | set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); |
3647 | if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) | 3639 | if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) |
3648 | goto out_unlock; | 3640 | goto out_unlock; |
3649 | ehea_rereg_mrs(NULL); | 3641 | ehea_rereg_mrs(NULL); |
3650 | break; | 3642 | break; |
3651 | case MEM_GOING_OFFLINE: | 3643 | case MEM_GOING_OFFLINE: |
3652 | ehea_info("memory is going offline"); | 3644 | pr_info("memory is going offline\n"); |
3653 | set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); | 3645 | set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); |
3654 | if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) | 3646 | if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) |
3655 | goto out_unlock; | 3647 | goto out_unlock; |
@@ -3675,7 +3667,7 @@ static int ehea_reboot_notifier(struct notifier_block *nb, | |||
3675 | unsigned long action, void *unused) | 3667 | unsigned long action, void *unused) |
3676 | { | 3668 | { |
3677 | if (action == SYS_RESTART) { | 3669 | if (action == SYS_RESTART) { |
3678 | ehea_info("Reboot: freeing all eHEA resources"); | 3670 | pr_info("Reboot: freeing all eHEA resources\n"); |
3679 | ibmebus_unregister_driver(&ehea_driver); | 3671 | ibmebus_unregister_driver(&ehea_driver); |
3680 | } | 3672 | } |
3681 | return NOTIFY_DONE; | 3673 | return NOTIFY_DONE; |
@@ -3691,22 +3683,22 @@ static int check_module_parm(void) | |||
3691 | 3683 | ||
3692 | if ((rq1_entries < EHEA_MIN_ENTRIES_QP) || | 3684 | if ((rq1_entries < EHEA_MIN_ENTRIES_QP) || |
3693 | (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) { | 3685 | (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) { |
3694 | ehea_info("Bad parameter: rq1_entries"); | 3686 | pr_info("Bad parameter: rq1_entries\n"); |
3695 | ret = -EINVAL; | 3687 | ret = -EINVAL; |
3696 | } | 3688 | } |
3697 | if ((rq2_entries < EHEA_MIN_ENTRIES_QP) || | 3689 | if ((rq2_entries < EHEA_MIN_ENTRIES_QP) || |
3698 | (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) { | 3690 | (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) { |
3699 | ehea_info("Bad parameter: rq2_entries"); | 3691 | pr_info("Bad parameter: rq2_entries\n"); |
3700 | ret = -EINVAL; | 3692 | ret = -EINVAL; |
3701 | } | 3693 | } |
3702 | if ((rq3_entries < EHEA_MIN_ENTRIES_QP) || | 3694 | if ((rq3_entries < EHEA_MIN_ENTRIES_QP) || |
3703 | (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) { | 3695 | (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) { |
3704 | ehea_info("Bad parameter: rq3_entries"); | 3696 | pr_info("Bad parameter: rq3_entries\n"); |
3705 | ret = -EINVAL; | 3697 | ret = -EINVAL; |
3706 | } | 3698 | } |
3707 | if ((sq_entries < EHEA_MIN_ENTRIES_QP) || | 3699 | if ((sq_entries < EHEA_MIN_ENTRIES_QP) || |
3708 | (sq_entries > EHEA_MAX_ENTRIES_SQ)) { | 3700 | (sq_entries > EHEA_MAX_ENTRIES_SQ)) { |
3709 | ehea_info("Bad parameter: sq_entries"); | 3701 | pr_info("Bad parameter: sq_entries\n"); |
3710 | ret = -EINVAL; | 3702 | ret = -EINVAL; |
3711 | } | 3703 | } |
3712 | 3704 | ||
@@ -3726,8 +3718,7 @@ int __init ehea_module_init(void) | |||
3726 | { | 3718 | { |
3727 | int ret; | 3719 | int ret; |
3728 | 3720 | ||
3729 | printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n", | 3721 | pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION); |
3730 | DRV_VERSION); | ||
3731 | 3722 | ||
3732 | 3723 | ||
3733 | INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs); | 3724 | INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs); |
@@ -3747,27 +3738,27 @@ int __init ehea_module_init(void) | |||
3747 | 3738 | ||
3748 | ret = register_reboot_notifier(&ehea_reboot_nb); | 3739 | ret = register_reboot_notifier(&ehea_reboot_nb); |
3749 | if (ret) | 3740 | if (ret) |
3750 | ehea_info("failed registering reboot notifier"); | 3741 | pr_info("failed registering reboot notifier\n"); |
3751 | 3742 | ||
3752 | ret = register_memory_notifier(&ehea_mem_nb); | 3743 | ret = register_memory_notifier(&ehea_mem_nb); |
3753 | if (ret) | 3744 | if (ret) |
3754 | ehea_info("failed registering memory remove notifier"); | 3745 | pr_info("failed registering memory remove notifier\n"); |
3755 | 3746 | ||
3756 | ret = crash_shutdown_register(ehea_crash_handler); | 3747 | ret = crash_shutdown_register(ehea_crash_handler); |
3757 | if (ret) | 3748 | if (ret) |
3758 | ehea_info("failed registering crash handler"); | 3749 | pr_info("failed registering crash handler\n"); |
3759 | 3750 | ||
3760 | ret = ibmebus_register_driver(&ehea_driver); | 3751 | ret = ibmebus_register_driver(&ehea_driver); |
3761 | if (ret) { | 3752 | if (ret) { |
3762 | ehea_error("failed registering eHEA device driver on ebus"); | 3753 | pr_err("failed registering eHEA device driver on ebus\n"); |
3763 | goto out2; | 3754 | goto out2; |
3764 | } | 3755 | } |
3765 | 3756 | ||
3766 | ret = driver_create_file(&ehea_driver.driver, | 3757 | ret = driver_create_file(&ehea_driver.driver, |
3767 | &driver_attr_capabilities); | 3758 | &driver_attr_capabilities); |
3768 | if (ret) { | 3759 | if (ret) { |
3769 | ehea_error("failed to register capabilities attribute, ret=%d", | 3760 | pr_err("failed to register capabilities attribute, ret=%d\n", |
3770 | ret); | 3761 | ret); |
3771 | goto out3; | 3762 | goto out3; |
3772 | } | 3763 | } |
3773 | 3764 | ||
@@ -3793,7 +3784,7 @@ static void __exit ehea_module_exit(void) | |||
3793 | unregister_reboot_notifier(&ehea_reboot_nb); | 3784 | unregister_reboot_notifier(&ehea_reboot_nb); |
3794 | ret = crash_shutdown_unregister(ehea_crash_handler); | 3785 | ret = crash_shutdown_unregister(ehea_crash_handler); |
3795 | if (ret) | 3786 | if (ret) |
3796 | ehea_info("failed unregistering crash handler"); | 3787 | pr_info("failed unregistering crash handler\n"); |
3797 | unregister_memory_notifier(&ehea_mem_nb); | 3788 | unregister_memory_notifier(&ehea_mem_nb); |
3798 | kfree(ehea_fw_handles.arr); | 3789 | kfree(ehea_fw_handles.arr); |
3799 | kfree(ehea_bcmc_regs.arr); | 3790 | kfree(ehea_bcmc_regs.arr); |
diff --git a/drivers/net/ehea/ehea_phyp.c b/drivers/net/ehea/ehea_phyp.c index 8fe9dcaa7538..0506967b9044 100644 --- a/drivers/net/ehea/ehea_phyp.c +++ b/drivers/net/ehea/ehea_phyp.c | |||
@@ -26,6 +26,8 @@ | |||
26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
27 | */ | 27 | */ |
28 | 28 | ||
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
30 | |||
29 | #include "ehea_phyp.h" | 31 | #include "ehea_phyp.h" |
30 | 32 | ||
31 | 33 | ||
@@ -67,12 +69,11 @@ static long ehea_plpar_hcall_norets(unsigned long opcode, | |||
67 | } | 69 | } |
68 | 70 | ||
69 | if (ret < H_SUCCESS) | 71 | if (ret < H_SUCCESS) |
70 | ehea_error("opcode=%lx ret=%lx" | 72 | pr_err("opcode=%lx ret=%lx" |
71 | " arg1=%lx arg2=%lx arg3=%lx arg4=%lx" | 73 | " arg1=%lx arg2=%lx arg3=%lx arg4=%lx" |
72 | " arg5=%lx arg6=%lx arg7=%lx ", | 74 | " arg5=%lx arg6=%lx arg7=%lx\n", |
73 | opcode, ret, | 75 | opcode, ret, |
74 | arg1, arg2, arg3, arg4, arg5, | 76 | arg1, arg2, arg3, arg4, arg5, arg6, arg7); |
75 | arg6, arg7); | ||
76 | 77 | ||
77 | return ret; | 78 | return ret; |
78 | } | 79 | } |
@@ -114,19 +115,18 @@ static long ehea_plpar_hcall9(unsigned long opcode, | |||
114 | && (((cb_cat == H_PORT_CB4) && ((arg3 == H_PORT_CB4_JUMBO) | 115 | && (((cb_cat == H_PORT_CB4) && ((arg3 == H_PORT_CB4_JUMBO) |
115 | || (arg3 == H_PORT_CB4_SPEED))) || ((cb_cat == H_PORT_CB7) | 116 | || (arg3 == H_PORT_CB4_SPEED))) || ((cb_cat == H_PORT_CB7) |
116 | && (arg3 == H_PORT_CB7_DUCQPN))))) | 117 | && (arg3 == H_PORT_CB7_DUCQPN))))) |
117 | ehea_error("opcode=%lx ret=%lx" | 118 | pr_err("opcode=%lx ret=%lx" |
118 | " arg1=%lx arg2=%lx arg3=%lx arg4=%lx" | 119 | " arg1=%lx arg2=%lx arg3=%lx arg4=%lx" |
119 | " arg5=%lx arg6=%lx arg7=%lx arg8=%lx" | 120 | " arg5=%lx arg6=%lx arg7=%lx arg8=%lx" |
120 | " arg9=%lx" | 121 | " arg9=%lx" |
121 | " out1=%lx out2=%lx out3=%lx out4=%lx" | 122 | " out1=%lx out2=%lx out3=%lx out4=%lx" |
122 | " out5=%lx out6=%lx out7=%lx out8=%lx" | 123 | " out5=%lx out6=%lx out7=%lx out8=%lx" |
123 | " out9=%lx", | 124 | " out9=%lx\n", |
124 | opcode, ret, | 125 | opcode, ret, |
125 | arg1, arg2, arg3, arg4, arg5, | 126 | arg1, arg2, arg3, arg4, arg5, |
126 | arg6, arg7, arg8, arg9, | 127 | arg6, arg7, arg8, arg9, |
127 | outs[0], outs[1], outs[2], outs[3], | 128 | outs[0], outs[1], outs[2], outs[3], outs[4], |
128 | outs[4], outs[5], outs[6], outs[7], | 129 | outs[5], outs[6], outs[7], outs[8]); |
129 | outs[8]); | ||
130 | return ret; | 130 | return ret; |
131 | } | 131 | } |
132 | 132 | ||
@@ -515,7 +515,7 @@ u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle, | |||
515 | const u64 log_pageaddr, const u64 count) | 515 | const u64 log_pageaddr, const u64 count) |
516 | { | 516 | { |
517 | if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) { | 517 | if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) { |
518 | ehea_error("not on pageboundary"); | 518 | pr_err("not on pageboundary\n"); |
519 | return H_PARAMETER; | 519 | return H_PARAMETER; |
520 | } | 520 | } |
521 | 521 | ||
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c index 89128b6373e3..cd44bb8017d9 100644 --- a/drivers/net/ehea/ehea_qmr.c +++ b/drivers/net/ehea/ehea_qmr.c | |||
@@ -26,6 +26,8 @@ | |||
26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
27 | */ | 27 | */ |
28 | 28 | ||
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
30 | |||
29 | #include <linux/mm.h> | 31 | #include <linux/mm.h> |
30 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
31 | #include "ehea.h" | 33 | #include "ehea.h" |
@@ -45,7 +47,7 @@ static void *hw_qpageit_get_inc(struct hw_queue *queue) | |||
45 | queue->current_q_offset -= queue->pagesize; | 47 | queue->current_q_offset -= queue->pagesize; |
46 | retvalue = NULL; | 48 | retvalue = NULL; |
47 | } else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) { | 49 | } else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) { |
48 | ehea_error("not on pageboundary"); | 50 | pr_err("not on pageboundary\n"); |
49 | retvalue = NULL; | 51 | retvalue = NULL; |
50 | } | 52 | } |
51 | return retvalue; | 53 | return retvalue; |
@@ -58,15 +60,15 @@ static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages, | |||
58 | int i, k; | 60 | int i, k; |
59 | 61 | ||
60 | if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) { | 62 | if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) { |
61 | ehea_error("pagesize conflict! kernel pagesize=%d, " | 63 | pr_err("pagesize conflict! kernel pagesize=%d, ehea pagesize=%d\n", |
62 | "ehea pagesize=%d", (int)PAGE_SIZE, (int)pagesize); | 64 | (int)PAGE_SIZE, (int)pagesize); |
63 | return -EINVAL; | 65 | return -EINVAL; |
64 | } | 66 | } |
65 | 67 | ||
66 | queue->queue_length = nr_of_pages * pagesize; | 68 | queue->queue_length = nr_of_pages * pagesize; |
67 | queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL); | 69 | queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL); |
68 | if (!queue->queue_pages) { | 70 | if (!queue->queue_pages) { |
69 | ehea_error("no mem for queue_pages"); | 71 | pr_err("no mem for queue_pages\n"); |
70 | return -ENOMEM; | 72 | return -ENOMEM; |
71 | } | 73 | } |
72 | 74 | ||
@@ -130,7 +132,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, | |||
130 | 132 | ||
131 | cq = kzalloc(sizeof(*cq), GFP_KERNEL); | 133 | cq = kzalloc(sizeof(*cq), GFP_KERNEL); |
132 | if (!cq) { | 134 | if (!cq) { |
133 | ehea_error("no mem for cq"); | 135 | pr_err("no mem for cq\n"); |
134 | goto out_nomem; | 136 | goto out_nomem; |
135 | } | 137 | } |
136 | 138 | ||
@@ -147,7 +149,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, | |||
147 | hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr, | 149 | hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr, |
148 | &cq->fw_handle, &cq->epas); | 150 | &cq->fw_handle, &cq->epas); |
149 | if (hret != H_SUCCESS) { | 151 | if (hret != H_SUCCESS) { |
150 | ehea_error("alloc_resource_cq failed"); | 152 | pr_err("alloc_resource_cq failed\n"); |
151 | goto out_freemem; | 153 | goto out_freemem; |
152 | } | 154 | } |
153 | 155 | ||
@@ -159,7 +161,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, | |||
159 | for (counter = 0; counter < cq->attr.nr_pages; counter++) { | 161 | for (counter = 0; counter < cq->attr.nr_pages; counter++) { |
160 | vpage = hw_qpageit_get_inc(&cq->hw_queue); | 162 | vpage = hw_qpageit_get_inc(&cq->hw_queue); |
161 | if (!vpage) { | 163 | if (!vpage) { |
162 | ehea_error("hw_qpageit_get_inc failed"); | 164 | pr_err("hw_qpageit_get_inc failed\n"); |
163 | goto out_kill_hwq; | 165 | goto out_kill_hwq; |
164 | } | 166 | } |
165 | 167 | ||
@@ -168,9 +170,8 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, | |||
168 | 0, EHEA_CQ_REGISTER_ORIG, | 170 | 0, EHEA_CQ_REGISTER_ORIG, |
169 | cq->fw_handle, rpage, 1); | 171 | cq->fw_handle, rpage, 1); |
170 | if (hret < H_SUCCESS) { | 172 | if (hret < H_SUCCESS) { |
171 | ehea_error("register_rpage_cq failed ehea_cq=%p " | 173 | pr_err("register_rpage_cq failed ehea_cq=%p hret=%llx counter=%i act_pages=%i\n", |
172 | "hret=%llx counter=%i act_pages=%i", | 174 | cq, hret, counter, cq->attr.nr_pages); |
173 | cq, hret, counter, cq->attr.nr_pages); | ||
174 | goto out_kill_hwq; | 175 | goto out_kill_hwq; |
175 | } | 176 | } |
176 | 177 | ||
@@ -178,14 +179,14 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, | |||
178 | vpage = hw_qpageit_get_inc(&cq->hw_queue); | 179 | vpage = hw_qpageit_get_inc(&cq->hw_queue); |
179 | 180 | ||
180 | if ((hret != H_SUCCESS) || (vpage)) { | 181 | if ((hret != H_SUCCESS) || (vpage)) { |
181 | ehea_error("registration of pages not " | 182 | pr_err("registration of pages not complete hret=%llx\n", |
182 | "complete hret=%llx\n", hret); | 183 | hret); |
183 | goto out_kill_hwq; | 184 | goto out_kill_hwq; |
184 | } | 185 | } |
185 | } else { | 186 | } else { |
186 | if (hret != H_PAGE_REGISTERED) { | 187 | if (hret != H_PAGE_REGISTERED) { |
187 | ehea_error("CQ: registration of page failed " | 188 | pr_err("CQ: registration of page failed hret=%llx\n", |
188 | "hret=%llx\n", hret); | 189 | hret); |
189 | goto out_kill_hwq; | 190 | goto out_kill_hwq; |
190 | } | 191 | } |
191 | } | 192 | } |
@@ -241,7 +242,7 @@ int ehea_destroy_cq(struct ehea_cq *cq) | |||
241 | } | 242 | } |
242 | 243 | ||
243 | if (hret != H_SUCCESS) { | 244 | if (hret != H_SUCCESS) { |
244 | ehea_error("destroy CQ failed"); | 245 | pr_err("destroy CQ failed\n"); |
245 | return -EIO; | 246 | return -EIO; |
246 | } | 247 | } |
247 | 248 | ||
@@ -259,7 +260,7 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter, | |||
259 | 260 | ||
260 | eq = kzalloc(sizeof(*eq), GFP_KERNEL); | 261 | eq = kzalloc(sizeof(*eq), GFP_KERNEL); |
261 | if (!eq) { | 262 | if (!eq) { |
262 | ehea_error("no mem for eq"); | 263 | pr_err("no mem for eq\n"); |
263 | return NULL; | 264 | return NULL; |
264 | } | 265 | } |
265 | 266 | ||
@@ -272,21 +273,21 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter, | |||
272 | hret = ehea_h_alloc_resource_eq(adapter->handle, | 273 | hret = ehea_h_alloc_resource_eq(adapter->handle, |
273 | &eq->attr, &eq->fw_handle); | 274 | &eq->attr, &eq->fw_handle); |
274 | if (hret != H_SUCCESS) { | 275 | if (hret != H_SUCCESS) { |
275 | ehea_error("alloc_resource_eq failed"); | 276 | pr_err("alloc_resource_eq failed\n"); |
276 | goto out_freemem; | 277 | goto out_freemem; |
277 | } | 278 | } |
278 | 279 | ||
279 | ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages, | 280 | ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages, |
280 | EHEA_PAGESIZE, sizeof(struct ehea_eqe)); | 281 | EHEA_PAGESIZE, sizeof(struct ehea_eqe)); |
281 | if (ret) { | 282 | if (ret) { |
282 | ehea_error("can't allocate eq pages"); | 283 | pr_err("can't allocate eq pages\n"); |
283 | goto out_freeres; | 284 | goto out_freeres; |
284 | } | 285 | } |
285 | 286 | ||
286 | for (i = 0; i < eq->attr.nr_pages; i++) { | 287 | for (i = 0; i < eq->attr.nr_pages; i++) { |
287 | vpage = hw_qpageit_get_inc(&eq->hw_queue); | 288 | vpage = hw_qpageit_get_inc(&eq->hw_queue); |
288 | if (!vpage) { | 289 | if (!vpage) { |
289 | ehea_error("hw_qpageit_get_inc failed"); | 290 | pr_err("hw_qpageit_get_inc failed\n"); |
290 | hret = H_RESOURCE; | 291 | hret = H_RESOURCE; |
291 | goto out_kill_hwq; | 292 | goto out_kill_hwq; |
292 | } | 293 | } |
@@ -370,7 +371,7 @@ int ehea_destroy_eq(struct ehea_eq *eq) | |||
370 | } | 371 | } |
371 | 372 | ||
372 | if (hret != H_SUCCESS) { | 373 | if (hret != H_SUCCESS) { |
373 | ehea_error("destroy EQ failed"); | 374 | pr_err("destroy EQ failed\n"); |
374 | return -EIO; | 375 | return -EIO; |
375 | } | 376 | } |
376 | 377 | ||
@@ -395,7 +396,7 @@ int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue, | |||
395 | for (cnt = 0; cnt < nr_pages; cnt++) { | 396 | for (cnt = 0; cnt < nr_pages; cnt++) { |
396 | vpage = hw_qpageit_get_inc(hw_queue); | 397 | vpage = hw_qpageit_get_inc(hw_queue); |
397 | if (!vpage) { | 398 | if (!vpage) { |
398 | ehea_error("hw_qpageit_get_inc failed"); | 399 | pr_err("hw_qpageit_get_inc failed\n"); |
399 | goto out_kill_hwq; | 400 | goto out_kill_hwq; |
400 | } | 401 | } |
401 | rpage = virt_to_abs(vpage); | 402 | rpage = virt_to_abs(vpage); |
@@ -403,7 +404,7 @@ int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue, | |||
403 | 0, h_call_q_selector, | 404 | 0, h_call_q_selector, |
404 | qp->fw_handle, rpage, 1); | 405 | qp->fw_handle, rpage, 1); |
405 | if (hret < H_SUCCESS) { | 406 | if (hret < H_SUCCESS) { |
406 | ehea_error("register_rpage_qp failed"); | 407 | pr_err("register_rpage_qp failed\n"); |
407 | goto out_kill_hwq; | 408 | goto out_kill_hwq; |
408 | } | 409 | } |
409 | } | 410 | } |
@@ -432,7 +433,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, | |||
432 | 433 | ||
433 | qp = kzalloc(sizeof(*qp), GFP_KERNEL); | 434 | qp = kzalloc(sizeof(*qp), GFP_KERNEL); |
434 | if (!qp) { | 435 | if (!qp) { |
435 | ehea_error("no mem for qp"); | 436 | pr_err("no mem for qp\n"); |
436 | return NULL; | 437 | return NULL; |
437 | } | 438 | } |
438 | 439 | ||
@@ -441,7 +442,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, | |||
441 | hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd, | 442 | hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd, |
442 | &qp->fw_handle, &qp->epas); | 443 | &qp->fw_handle, &qp->epas); |
443 | if (hret != H_SUCCESS) { | 444 | if (hret != H_SUCCESS) { |
444 | ehea_error("ehea_h_alloc_resource_qp failed"); | 445 | pr_err("ehea_h_alloc_resource_qp failed\n"); |
445 | goto out_freemem; | 446 | goto out_freemem; |
446 | } | 447 | } |
447 | 448 | ||
@@ -455,7 +456,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, | |||
455 | init_attr->act_wqe_size_enc_sq, adapter, | 456 | init_attr->act_wqe_size_enc_sq, adapter, |
456 | 0); | 457 | 0); |
457 | if (ret) { | 458 | if (ret) { |
458 | ehea_error("can't register for sq ret=%x", ret); | 459 | pr_err("can't register for sq ret=%x\n", ret); |
459 | goto out_freeres; | 460 | goto out_freeres; |
460 | } | 461 | } |
461 | 462 | ||
@@ -465,7 +466,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, | |||
465 | init_attr->act_wqe_size_enc_rq1, | 466 | init_attr->act_wqe_size_enc_rq1, |
466 | adapter, 1); | 467 | adapter, 1); |
467 | if (ret) { | 468 | if (ret) { |
468 | ehea_error("can't register for rq1 ret=%x", ret); | 469 | pr_err("can't register for rq1 ret=%x\n", ret); |
469 | goto out_kill_hwsq; | 470 | goto out_kill_hwsq; |
470 | } | 471 | } |
471 | 472 | ||
@@ -476,7 +477,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, | |||
476 | init_attr->act_wqe_size_enc_rq2, | 477 | init_attr->act_wqe_size_enc_rq2, |
477 | adapter, 2); | 478 | adapter, 2); |
478 | if (ret) { | 479 | if (ret) { |
479 | ehea_error("can't register for rq2 ret=%x", ret); | 480 | pr_err("can't register for rq2 ret=%x\n", ret); |
480 | goto out_kill_hwr1q; | 481 | goto out_kill_hwr1q; |
481 | } | 482 | } |
482 | } | 483 | } |
@@ -488,7 +489,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, | |||
488 | init_attr->act_wqe_size_enc_rq3, | 489 | init_attr->act_wqe_size_enc_rq3, |
489 | adapter, 3); | 490 | adapter, 3); |
490 | if (ret) { | 491 | if (ret) { |
491 | ehea_error("can't register for rq3 ret=%x", ret); | 492 | pr_err("can't register for rq3 ret=%x\n", ret); |
492 | goto out_kill_hwr2q; | 493 | goto out_kill_hwr2q; |
493 | } | 494 | } |
494 | } | 495 | } |
@@ -553,7 +554,7 @@ int ehea_destroy_qp(struct ehea_qp *qp) | |||
553 | } | 554 | } |
554 | 555 | ||
555 | if (hret != H_SUCCESS) { | 556 | if (hret != H_SUCCESS) { |
556 | ehea_error("destroy QP failed"); | 557 | pr_err("destroy QP failed\n"); |
557 | return -EIO; | 558 | return -EIO; |
558 | } | 559 | } |
559 | 560 | ||
@@ -842,7 +843,7 @@ static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt, | |||
842 | (hret != H_PAGE_REGISTERED)) { | 843 | (hret != H_PAGE_REGISTERED)) { |
843 | ehea_h_free_resource(adapter->handle, mr->handle, | 844 | ehea_h_free_resource(adapter->handle, mr->handle, |
844 | FORCE_FREE); | 845 | FORCE_FREE); |
845 | ehea_error("register_rpage_mr failed"); | 846 | pr_err("register_rpage_mr failed\n"); |
846 | return hret; | 847 | return hret; |
847 | } | 848 | } |
848 | } | 849 | } |
@@ -896,7 +897,7 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) | |||
896 | 897 | ||
897 | pt = (void *)get_zeroed_page(GFP_KERNEL); | 898 | pt = (void *)get_zeroed_page(GFP_KERNEL); |
898 | if (!pt) { | 899 | if (!pt) { |
899 | ehea_error("no mem"); | 900 | pr_err("no mem\n"); |
900 | ret = -ENOMEM; | 901 | ret = -ENOMEM; |
901 | goto out; | 902 | goto out; |
902 | } | 903 | } |
@@ -906,14 +907,14 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) | |||
906 | &mr->handle, &mr->lkey); | 907 | &mr->handle, &mr->lkey); |
907 | 908 | ||
908 | if (hret != H_SUCCESS) { | 909 | if (hret != H_SUCCESS) { |
909 | ehea_error("alloc_resource_mr failed"); | 910 | pr_err("alloc_resource_mr failed\n"); |
910 | ret = -EIO; | 911 | ret = -EIO; |
911 | goto out; | 912 | goto out; |
912 | } | 913 | } |
913 | 914 | ||
914 | if (!ehea_bmap) { | 915 | if (!ehea_bmap) { |
915 | ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); | 916 | ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); |
916 | ehea_error("no busmap available"); | 917 | pr_err("no busmap available\n"); |
917 | ret = -EIO; | 918 | ret = -EIO; |
918 | goto out; | 919 | goto out; |
919 | } | 920 | } |
@@ -929,7 +930,7 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) | |||
929 | 930 | ||
930 | if (hret != H_SUCCESS) { | 931 | if (hret != H_SUCCESS) { |
931 | ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); | 932 | ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); |
932 | ehea_error("registering mr failed"); | 933 | pr_err("registering mr failed\n"); |
933 | ret = -EIO; | 934 | ret = -EIO; |
934 | goto out; | 935 | goto out; |
935 | } | 936 | } |
@@ -952,7 +953,7 @@ int ehea_rem_mr(struct ehea_mr *mr) | |||
952 | hret = ehea_h_free_resource(mr->adapter->handle, mr->handle, | 953 | hret = ehea_h_free_resource(mr->adapter->handle, mr->handle, |
953 | FORCE_FREE); | 954 | FORCE_FREE); |
954 | if (hret != H_SUCCESS) { | 955 | if (hret != H_SUCCESS) { |
955 | ehea_error("destroy MR failed"); | 956 | pr_err("destroy MR failed\n"); |
956 | return -EIO; | 957 | return -EIO; |
957 | } | 958 | } |
958 | 959 | ||
@@ -987,14 +988,14 @@ void print_error_data(u64 *data) | |||
987 | length = EHEA_PAGESIZE; | 988 | length = EHEA_PAGESIZE; |
988 | 989 | ||
989 | if (type == EHEA_AER_RESTYPE_QP) | 990 | if (type == EHEA_AER_RESTYPE_QP) |
990 | ehea_error("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, " | 991 | pr_err("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, port=%llX\n", |
991 | "port=%llX", resource, data[6], data[12], data[22]); | 992 | resource, data[6], data[12], data[22]); |
992 | else if (type == EHEA_AER_RESTYPE_CQ) | 993 | else if (type == EHEA_AER_RESTYPE_CQ) |
993 | ehea_error("CQ (resource=%llX) state: AER=0x%llX", resource, | 994 | pr_err("CQ (resource=%llX) state: AER=0x%llX\n", |
994 | data[6]); | 995 | resource, data[6]); |
995 | else if (type == EHEA_AER_RESTYPE_EQ) | 996 | else if (type == EHEA_AER_RESTYPE_EQ) |
996 | ehea_error("EQ (resource=%llX) state: AER=0x%llX", resource, | 997 | pr_err("EQ (resource=%llX) state: AER=0x%llX\n", |
997 | data[6]); | 998 | resource, data[6]); |
998 | 999 | ||
999 | ehea_dump(data, length, "error data"); | 1000 | ehea_dump(data, length, "error data"); |
1000 | } | 1001 | } |
@@ -1008,7 +1009,7 @@ u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle, | |||
1008 | 1009 | ||
1009 | rblock = (void *)get_zeroed_page(GFP_KERNEL); | 1010 | rblock = (void *)get_zeroed_page(GFP_KERNEL); |
1010 | if (!rblock) { | 1011 | if (!rblock) { |
1011 | ehea_error("Cannot allocate rblock memory."); | 1012 | pr_err("Cannot allocate rblock memory\n"); |
1012 | goto out; | 1013 | goto out; |
1013 | } | 1014 | } |
1014 | 1015 | ||
@@ -1020,9 +1021,9 @@ u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle, | |||
1020 | *aerr = rblock[12]; | 1021 | *aerr = rblock[12]; |
1021 | print_error_data(rblock); | 1022 | print_error_data(rblock); |
1022 | } else if (ret == H_R_STATE) { | 1023 | } else if (ret == H_R_STATE) { |
1023 | ehea_error("No error data available: %llX.", res_handle); | 1024 | pr_err("No error data available: %llX\n", res_handle); |
1024 | } else | 1025 | } else |
1025 | ehea_error("Error data could not be fetched: %llX", res_handle); | 1026 | pr_err("Error data could not be fetched: %llX\n", res_handle); |
1026 | 1027 | ||
1027 | free_page((unsigned long)rblock); | 1028 | free_page((unsigned long)rblock); |
1028 | out: | 1029 | out: |