aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx4/en_rx.c
diff options
context:
space:
mode:
authorYevgeny Petrilin <yevgenyp@mellanox.co.il>2009-06-01 16:27:13 -0400
committerDavid S. Miller <davem@davemloft.net>2009-06-02 05:29:03 -0400
commit453a608277355735190e05c43f909808e0f73641 (patch)
tree745af467980a75013642915f458a1a8af3035211 /drivers/net/mlx4/en_rx.c
parentf771bef98004d9d141b085d987a77d06669d4f4f (diff)
mlx4_en: Giving interface name in debug messages
For each debug message, the message will show interface name in case that the net device was registered, and PCI bus ID with port number if we were not registered yet. Messages that are not port/netdev specific stayed in the old format Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.co.il> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/mlx4/en_rx.c')
-rw-r--r--drivers/net/mlx4/en_rx.c78
1 files changed, 37 insertions, 41 deletions
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 6bfab6e5ba1d..5a14899c1e25 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -114,8 +114,8 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
114 goto out; 114 goto out;
115 115
116 page_alloc->offset = priv->frag_info[i].frag_align; 116 page_alloc->offset = priv->frag_info[i].frag_align;
117 mlx4_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n", 117 en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
118 i, page_alloc->page); 118 i, page_alloc->page);
119 } 119 }
120 return 0; 120 return 0;
121 121
@@ -136,8 +136,8 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
136 136
137 for (i = 0; i < priv->num_frags; i++) { 137 for (i = 0; i < priv->num_frags; i++) {
138 page_alloc = &ring->page_alloc[i]; 138 page_alloc = &ring->page_alloc[i];
139 mlx4_dbg(DRV, priv, "Freeing allocator:%d count:%d\n", 139 en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
140 i, page_count(page_alloc->page)); 140 i, page_count(page_alloc->page));
141 141
142 put_page(page_alloc->page); 142 put_page(page_alloc->page);
143 page_alloc->page = NULL; 143 page_alloc->page = NULL;
@@ -214,10 +214,10 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
214 214
215 skb_frags = ring->rx_info + (index << priv->log_rx_info); 215 skb_frags = ring->rx_info + (index << priv->log_rx_info);
216 for (nr = 0; nr < priv->num_frags; nr++) { 216 for (nr = 0; nr < priv->num_frags; nr++) {
217 mlx4_dbg(DRV, priv, "Freeing fragment:%d\n", nr); 217 en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
218 dma = be64_to_cpu(rx_desc->data[nr].addr); 218 dma = be64_to_cpu(rx_desc->data[nr].addr);
219 219
220 mlx4_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma); 220 en_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma);
221 pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size, 221 pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
222 PCI_DMA_FROMDEVICE); 222 PCI_DMA_FROMDEVICE);
223 put_page(skb_frags[nr].page); 223 put_page(skb_frags[nr].page);
@@ -226,7 +226,6 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
226 226
227static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) 227static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
228{ 228{
229 struct mlx4_en_dev *mdev = priv->mdev;
230 struct mlx4_en_rx_ring *ring; 229 struct mlx4_en_rx_ring *ring;
231 int ring_ind; 230 int ring_ind;
232 int buf_ind; 231 int buf_ind;
@@ -239,14 +238,14 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
239 if (mlx4_en_prepare_rx_desc(priv, ring, 238 if (mlx4_en_prepare_rx_desc(priv, ring,
240 ring->actual_size)) { 239 ring->actual_size)) {
241 if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { 240 if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
242 mlx4_err(mdev, "Failed to allocate " 241 en_err(priv, "Failed to allocate "
243 "enough rx buffers\n"); 242 "enough rx buffers\n");
244 return -ENOMEM; 243 return -ENOMEM;
245 } else { 244 } else {
246 new_size = rounddown_pow_of_two(ring->actual_size); 245 new_size = rounddown_pow_of_two(ring->actual_size);
247 mlx4_warn(mdev, "Only %d buffers allocated " 246 en_warn(priv, "Only %d buffers allocated "
248 "reducing ring size to %d", 247 "reducing ring size to %d",
249 ring->actual_size, new_size); 248 ring->actual_size, new_size);
250 goto reduce_rings; 249 goto reduce_rings;
251 } 250 }
252 } 251 }
@@ -282,8 +281,7 @@ static int mlx4_en_fill_rx_buf(struct net_device *dev,
282 ring->size_mask); 281 ring->size_mask);
283 if (err) { 282 if (err) {
284 if (netif_msg_rx_err(priv)) 283 if (netif_msg_rx_err(priv))
285 mlx4_warn(priv->mdev, 284 en_warn(priv, "Failed preparing rx descriptor\n");
286 "Failed preparing rx descriptor\n");
287 priv->port_stats.rx_alloc_failed++; 285 priv->port_stats.rx_alloc_failed++;
288 break; 286 break;
289 } 287 }
@@ -301,14 +299,14 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
301{ 299{
302 int index; 300 int index;
303 301
304 mlx4_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n", 302 en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
305 ring->cons, ring->prod); 303 ring->cons, ring->prod);
306 304
307 /* Unmap and free Rx buffers */ 305 /* Unmap and free Rx buffers */
308 BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size); 306 BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
309 while (ring->cons != ring->prod) { 307 while (ring->cons != ring->prod) {
310 index = ring->cons & ring->size_mask; 308 index = ring->cons & ring->size_mask;
311 mlx4_dbg(DRV, priv, "Processing descriptor:%d\n", index); 309 en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
312 mlx4_en_free_rx_desc(priv, ring, index); 310 mlx4_en_free_rx_desc(priv, ring, index);
313 ++ring->cons; 311 ++ring->cons;
314 } 312 }
@@ -373,10 +371,10 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
373 sizeof(struct skb_frag_struct)); 371 sizeof(struct skb_frag_struct));
374 ring->rx_info = vmalloc(tmp); 372 ring->rx_info = vmalloc(tmp);
375 if (!ring->rx_info) { 373 if (!ring->rx_info) {
376 mlx4_err(mdev, "Failed allocating rx_info ring\n"); 374 en_err(priv, "Failed allocating rx_info ring\n");
377 return -ENOMEM; 375 return -ENOMEM;
378 } 376 }
379 mlx4_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", 377 en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
380 ring->rx_info, tmp); 378 ring->rx_info, tmp);
381 379
382 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, 380 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
@@ -386,7 +384,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
386 384
387 err = mlx4_en_map_buffer(&ring->wqres.buf); 385 err = mlx4_en_map_buffer(&ring->wqres.buf);
388 if (err) { 386 if (err) {
389 mlx4_err(mdev, "Failed to map RX buffer\n"); 387 en_err(priv, "Failed to map RX buffer\n");
390 goto err_hwq; 388 goto err_hwq;
391 } 389 }
392 ring->buf = ring->wqres.buf.direct.buf; 390 ring->buf = ring->wqres.buf.direct.buf;
@@ -404,7 +402,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
404 sizeof(struct net_lro_desc), 402 sizeof(struct net_lro_desc),
405 GFP_KERNEL); 403 GFP_KERNEL);
406 if (!ring->lro.lro_arr) { 404 if (!ring->lro.lro_arr) {
407 mlx4_err(mdev, "Failed to allocate lro array\n"); 405 en_err(priv, "Failed to allocate lro array\n");
408 goto err_map; 406 goto err_map;
409 } 407 }
410 ring->lro.get_frag_header = mlx4_en_get_frag_header; 408 ring->lro.get_frag_header = mlx4_en_get_frag_header;
@@ -455,7 +453,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
455 /* Initialize page allocators */ 453 /* Initialize page allocators */
456 err = mlx4_en_init_allocator(priv, ring); 454 err = mlx4_en_init_allocator(priv, ring);
457 if (err) { 455 if (err) {
458 mlx4_err(mdev, "Failed initializing ring allocator\n"); 456 en_err(priv, "Failed initializing ring allocator\n");
459 ring_ind--; 457 ring_ind--;
460 goto err_allocator; 458 goto err_allocator;
461 } 459 }
@@ -486,7 +484,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
486 err = mlx4_srq_alloc(mdev->dev, mdev->priv_pdn, &ring->wqres.mtt, 484 err = mlx4_srq_alloc(mdev->dev, mdev->priv_pdn, &ring->wqres.mtt,
487 ring->wqres.db.dma, &ring->srq); 485 ring->wqres.db.dma, &ring->srq);
488 if (err){ 486 if (err){
489 mlx4_err(mdev, "Failed to allocate srq\n"); 487 en_err(priv, "Failed to allocate srq\n");
490 ring_ind--; 488 ring_ind--;
491 goto err_srq; 489 goto err_srq;
492 } 490 }
@@ -601,7 +599,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
601 599
602 skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN); 600 skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN);
603 if (!skb) { 601 if (!skb) {
604 mlx4_dbg(RX_ERR, priv, "Failed allocating skb\n"); 602 en_dbg(RX_ERR, priv, "Failed allocating skb\n");
605 return NULL; 603 return NULL;
606 } 604 }
607 skb->dev = priv->dev; 605 skb->dev = priv->dev;
@@ -680,7 +678,6 @@ static void mlx4_en_copy_desc(struct mlx4_en_priv *priv,
680int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) 678int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
681{ 679{
682 struct mlx4_en_priv *priv = netdev_priv(dev); 680 struct mlx4_en_priv *priv = netdev_priv(dev);
683 struct mlx4_en_dev *mdev = priv->mdev;
684 struct mlx4_cqe *cqe; 681 struct mlx4_cqe *cqe;
685 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; 682 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
686 struct skb_frag_struct *skb_frags; 683 struct skb_frag_struct *skb_frags;
@@ -717,14 +714,14 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
717 /* Drop packet on bad receive or bad checksum */ 714 /* Drop packet on bad receive or bad checksum */
718 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == 715 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
719 MLX4_CQE_OPCODE_ERROR)) { 716 MLX4_CQE_OPCODE_ERROR)) {
720 mlx4_err(mdev, "CQE completed in error - vendor " 717 en_err(priv, "CQE completed in error - vendor "
721 "syndrom:%d syndrom:%d\n", 718 "syndrom:%d syndrom:%d\n",
722 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome, 719 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
723 ((struct mlx4_err_cqe *) cqe)->syndrome); 720 ((struct mlx4_err_cqe *) cqe)->syndrome);
724 goto next; 721 goto next;
725 } 722 }
726 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { 723 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
727 mlx4_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n"); 724 en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
728 goto next; 725 goto next;
729 } 726 }
730 727
@@ -874,7 +871,7 @@ static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16
874 u16 res = MLX4_EN_ALLOC_SIZE % stride; 871 u16 res = MLX4_EN_ALLOC_SIZE % stride;
875 u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align; 872 u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align;
876 873
877 mlx4_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d " 874 en_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d "
878 "res:%d offset:%d\n", stride, align, res, offset); 875 "res:%d offset:%d\n", stride, align, res, offset);
879 return offset; 876 return offset;
880} 877}
@@ -919,10 +916,10 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
919 priv->rx_skb_size = eff_mtu; 916 priv->rx_skb_size = eff_mtu;
920 priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct)); 917 priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct));
921 918
922 mlx4_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d " 919 en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
923 "num_frags:%d):\n", eff_mtu, priv->num_frags); 920 "num_frags:%d):\n", eff_mtu, priv->num_frags);
924 for (i = 0; i < priv->num_frags; i++) { 921 for (i = 0; i < priv->num_frags; i++) {
925 mlx4_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d " 922 en_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d "
926 "stride:%d last_offset:%d\n", i, 923 "stride:%d last_offset:%d\n", i,
927 priv->frag_info[i].frag_size, 924 priv->frag_info[i].frag_size,
928 priv->frag_info[i].frag_prefix_size, 925 priv->frag_info[i].frag_prefix_size,
@@ -942,12 +939,12 @@ void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
942 int i; 939 int i;
943 940
944 rss_map->size = roundup_pow_of_two(num_entries); 941 rss_map->size = roundup_pow_of_two(num_entries);
945 mlx4_dbg(DRV, priv, "Setting default RSS map of %d entires\n", 942 en_dbg(DRV, priv, "Setting default RSS map of %d entires\n",
946 rss_map->size); 943 rss_map->size);
947 944
948 for (i = 0; i < rss_map->size; i++) { 945 for (i = 0; i < rss_map->size; i++) {
949 rss_map->map[i] = i % num_rings; 946 rss_map->map[i] = i % num_rings;
950 mlx4_dbg(DRV, priv, "Entry %d ---> ring %d\n", i, rss_map->map[i]); 947 en_dbg(DRV, priv, "Entry %d ---> ring %d\n", i, rss_map->map[i]);
951 } 948 }
952} 949}
953 950
@@ -962,13 +959,13 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv,
962 959
963 context = kmalloc(sizeof *context , GFP_KERNEL); 960 context = kmalloc(sizeof *context , GFP_KERNEL);
964 if (!context) { 961 if (!context) {
965 mlx4_err(mdev, "Failed to allocate qp context\n"); 962 en_err(priv, "Failed to allocate qp context\n");
966 return -ENOMEM; 963 return -ENOMEM;
967 } 964 }
968 965
969 err = mlx4_qp_alloc(mdev->dev, qpn, qp); 966 err = mlx4_qp_alloc(mdev->dev, qpn, qp);
970 if (err) { 967 if (err) {
971 mlx4_err(mdev, "Failed to allocate qp #%d\n", qpn); 968 en_err(priv, "Failed to allocate qp #%x\n", qpn);
972 goto out; 969 goto out;
973 } 970 }
974 qp->event = mlx4_en_sqp_event; 971 qp->event = mlx4_en_sqp_event;
@@ -1000,12 +997,11 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
1000 int err = 0; 997 int err = 0;
1001 int good_qps = 0; 998 int good_qps = 0;
1002 999
1003 mlx4_dbg(DRV, priv, "Configuring rss steering for port %u\n", priv->port); 1000 en_dbg(DRV, priv, "Configuring rss steering\n");
1004 err = mlx4_qp_reserve_range(mdev->dev, rss_map->size, 1001 err = mlx4_qp_reserve_range(mdev->dev, rss_map->size,
1005 rss_map->size, &rss_map->base_qpn); 1002 rss_map->size, &rss_map->base_qpn);
1006 if (err) { 1003 if (err) {
1007 mlx4_err(mdev, "Failed reserving %d qps for port %u\n", 1004 en_err(priv, "Failed reserving %d qps\n", rss_map->size);
1008 rss_map->size, priv->port);
1009 return err; 1005 return err;
1010 } 1006 }
1011 1007
@@ -1025,13 +1021,13 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
1025 /* Configure RSS indirection qp */ 1021 /* Configure RSS indirection qp */
1026 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn); 1022 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn);
1027 if (err) { 1023 if (err) {
1028 mlx4_err(mdev, "Failed to reserve range for RSS " 1024 en_err(priv, "Failed to reserve range for RSS "
1029 "indirection qp\n"); 1025 "indirection qp\n");
1030 goto rss_err; 1026 goto rss_err;
1031 } 1027 }
1032 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp); 1028 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
1033 if (err) { 1029 if (err) {
1034 mlx4_err(mdev, "Failed to allocate RSS indirection QP\n"); 1030 en_err(priv, "Failed to allocate RSS indirection QP\n");
1035 goto reserve_err; 1031 goto reserve_err;
1036 } 1032 }
1037 rss_map->indir_qp.event = mlx4_en_sqp_event; 1033 rss_map->indir_qp.event = mlx4_en_sqp_event;