aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorEugenia Emantayev <eugenia@mellanox.com>2013-11-07 05:19:54 -0500
committerDavid S. Miller <davem@davemloft.net>2013-11-07 19:22:48 -0500
commit163561a4e2f8af44e96453bc10c7a4f9bcc736e1 (patch)
tree8d328edce3828c60e80989267eb7594d2d7e24d6 /drivers/net
parent6e7136ed7793fa4948b0192dcd6862d12a50d67c (diff)
net/mlx4_en: Datapath structures are allocated per NUMA node
For each RX/TX ring and its CQ, allocation is done on a NUMA node that corresponds to the core that the data structure should operate on. The assumption is that the core number is reflected by the ring index. The affected allocations are the ring/CQ data structures, the TX/RX info and the shared HW/SW buffer. For TX rings, each core has rings of all UPs. Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.com> Signed-off-by: Eugenia Emantayev <eugenia@mellanox.com> Reviewed-by: Hadar Hen Zion <hadarh@mellanox.com> Signed-off-by: Amir Vadai <amirv@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c11
6 files changed, 70 insertions, 33 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index d203f11b9edf..3a098cc4d349 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -45,16 +45,20 @@ static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
45 45
46int mlx4_en_create_cq(struct mlx4_en_priv *priv, 46int mlx4_en_create_cq(struct mlx4_en_priv *priv,
47 struct mlx4_en_cq **pcq, 47 struct mlx4_en_cq **pcq,
48 int entries, int ring, enum cq_type mode) 48 int entries, int ring, enum cq_type mode,
49 int node)
49{ 50{
50 struct mlx4_en_dev *mdev = priv->mdev; 51 struct mlx4_en_dev *mdev = priv->mdev;
51 struct mlx4_en_cq *cq; 52 struct mlx4_en_cq *cq;
52 int err; 53 int err;
53 54
54 cq = kzalloc(sizeof(*cq), GFP_KERNEL); 55 cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node);
55 if (!cq) { 56 if (!cq) {
56 en_err(priv, "Failed to allocate CQ structure\n"); 57 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
57 return -ENOMEM; 58 if (!cq) {
59 en_err(priv, "Failed to allocate CQ structure\n");
60 return -ENOMEM;
61 }
58 } 62 }
59 63
60 cq->size = entries; 64 cq->size = entries;
@@ -64,8 +68,13 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
64 cq->is_tx = mode; 68 cq->is_tx = mode;
65 spin_lock_init(&cq->lock); 69 spin_lock_init(&cq->lock);
66 70
71 /* Allocate HW buffers on provided NUMA node.
72 * dev->numa_node is used in mtt range allocation flow.
73 */
74 set_dev_node(&mdev->dev->pdev->dev, node);
67 err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres, 75 err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
68 cq->buf_size, 2 * PAGE_SIZE); 76 cq->buf_size, 2 * PAGE_SIZE);
77 set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
69 if (err) 78 if (err)
70 goto err_cq; 79 goto err_cq;
71 80
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index f430788cc4fe..e72d8a112a6b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1895,6 +1895,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
1895 struct mlx4_en_port_profile *prof = priv->prof; 1895 struct mlx4_en_port_profile *prof = priv->prof;
1896 int i; 1896 int i;
1897 int err; 1897 int err;
1898 int node;
1898 1899
1899 err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn); 1900 err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
1900 if (err) { 1901 if (err) {
@@ -1904,23 +1905,26 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
1904 1905
1905 /* Create tx Rings */ 1906 /* Create tx Rings */
1906 for (i = 0; i < priv->tx_ring_num; i++) { 1907 for (i = 0; i < priv->tx_ring_num; i++) {
1908 node = cpu_to_node(i % num_online_cpus());
1907 if (mlx4_en_create_cq(priv, &priv->tx_cq[i], 1909 if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
1908 prof->tx_ring_size, i, TX)) 1910 prof->tx_ring_size, i, TX, node))
1909 goto err; 1911 goto err;
1910 1912
1911 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i, 1913 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
1912 prof->tx_ring_size, TXBB_SIZE)) 1914 prof->tx_ring_size, TXBB_SIZE, node))
1913 goto err; 1915 goto err;
1914 } 1916 }
1915 1917
1916 /* Create rx Rings */ 1918 /* Create rx Rings */
1917 for (i = 0; i < priv->rx_ring_num; i++) { 1919 for (i = 0; i < priv->rx_ring_num; i++) {
1920 node = cpu_to_node(i % num_online_cpus());
1918 if (mlx4_en_create_cq(priv, &priv->rx_cq[i], 1921 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
1919 prof->rx_ring_size, i, RX)) 1922 prof->rx_ring_size, i, RX, node))
1920 goto err; 1923 goto err;
1921 1924
1922 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], 1925 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
1923 prof->rx_ring_size, priv->stride)) 1926 prof->rx_ring_size, priv->stride,
1927 node))
1924 goto err; 1928 goto err;
1925 } 1929 }
1926 1930
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 1c45f88776c5..07a1d0fbae47 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -320,17 +320,20 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
320 320
321int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, 321int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
322 struct mlx4_en_rx_ring **pring, 322 struct mlx4_en_rx_ring **pring,
323 u32 size, u16 stride) 323 u32 size, u16 stride, int node)
324{ 324{
325 struct mlx4_en_dev *mdev = priv->mdev; 325 struct mlx4_en_dev *mdev = priv->mdev;
326 struct mlx4_en_rx_ring *ring; 326 struct mlx4_en_rx_ring *ring;
327 int err = -ENOMEM; 327 int err = -ENOMEM;
328 int tmp; 328 int tmp;
329 329
330 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 330 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
331 if (!ring) { 331 if (!ring) {
332 en_err(priv, "Failed to allocate RX ring structure\n"); 332 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
333 return -ENOMEM; 333 if (!ring) {
334 en_err(priv, "Failed to allocate RX ring structure\n");
335 return -ENOMEM;
336 }
334 } 337 }
335 338
336 ring->prod = 0; 339 ring->prod = 0;
@@ -343,17 +346,23 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
343 346
344 tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * 347 tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
345 sizeof(struct mlx4_en_rx_alloc)); 348 sizeof(struct mlx4_en_rx_alloc));
346 ring->rx_info = vmalloc(tmp); 349 ring->rx_info = vmalloc_node(tmp, node);
347 if (!ring->rx_info) { 350 if (!ring->rx_info) {
348 err = -ENOMEM; 351 ring->rx_info = vmalloc(tmp);
349 goto err_ring; 352 if (!ring->rx_info) {
353 err = -ENOMEM;
354 goto err_ring;
355 }
350 } 356 }
351 357
352 en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", 358 en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
353 ring->rx_info, tmp); 359 ring->rx_info, tmp);
354 360
361 /* Allocate HW buffers on provided NUMA node */
362 set_dev_node(&mdev->dev->pdev->dev, node);
355 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, 363 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
356 ring->buf_size, 2 * PAGE_SIZE); 364 ring->buf_size, 2 * PAGE_SIZE);
365 set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
357 if (err) 366 if (err)
358 goto err_info; 367 goto err_info;
359 368
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index d4e4cf30a720..f54ebd5a1702 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -55,17 +55,20 @@ MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
55 55
56int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 56int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
57 struct mlx4_en_tx_ring **pring, int qpn, u32 size, 57 struct mlx4_en_tx_ring **pring, int qpn, u32 size,
58 u16 stride) 58 u16 stride, int node)
59{ 59{
60 struct mlx4_en_dev *mdev = priv->mdev; 60 struct mlx4_en_dev *mdev = priv->mdev;
61 struct mlx4_en_tx_ring *ring; 61 struct mlx4_en_tx_ring *ring;
62 int tmp; 62 int tmp;
63 int err; 63 int err;
64 64
65 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 65 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
66 if (!ring) { 66 if (!ring) {
67 en_err(priv, "Failed allocating TX ring\n"); 67 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
68 return -ENOMEM; 68 if (!ring) {
69 en_err(priv, "Failed allocating TX ring\n");
70 return -ENOMEM;
71 }
69 } 72 }
70 73
71 ring->size = size; 74 ring->size = size;
@@ -75,24 +78,33 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
75 inline_thold = min(inline_thold, MAX_INLINE); 78 inline_thold = min(inline_thold, MAX_INLINE);
76 79
77 tmp = size * sizeof(struct mlx4_en_tx_info); 80 tmp = size * sizeof(struct mlx4_en_tx_info);
78 ring->tx_info = vmalloc(tmp); 81 ring->tx_info = vmalloc_node(tmp, node);
79 if (!ring->tx_info) { 82 if (!ring->tx_info) {
80 err = -ENOMEM; 83 ring->tx_info = vmalloc(tmp);
81 goto err_ring; 84 if (!ring->tx_info) {
85 err = -ENOMEM;
86 goto err_ring;
87 }
82 } 88 }
83 89
84 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", 90 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
85 ring->tx_info, tmp); 91 ring->tx_info, tmp);
86 92
87 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); 93 ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, GFP_KERNEL, node);
88 if (!ring->bounce_buf) { 94 if (!ring->bounce_buf) {
89 err = -ENOMEM; 95 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
90 goto err_info; 96 if (!ring->bounce_buf) {
97 err = -ENOMEM;
98 goto err_info;
99 }
91 } 100 }
92 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); 101 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
93 102
103 /* Allocate HW buffers on provided NUMA node */
104 set_dev_node(&mdev->dev->pdev->dev, node);
94 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 105 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
95 2 * PAGE_SIZE); 106 2 * PAGE_SIZE);
107 set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node);
96 if (err) { 108 if (err) {
97 en_err(priv, "Failed allocating hwq resources\n"); 109 en_err(priv, "Failed allocating hwq resources\n");
98 goto err_bounce; 110 goto err_bounce;
@@ -118,7 +130,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
118 } 130 }
119 ring->qp.event = mlx4_en_sqp_event; 131 ring->qp.event = mlx4_en_sqp_event;
120 132
121 err = mlx4_bf_alloc(mdev->dev, &ring->bf); 133 err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
122 if (err) { 134 if (err) {
123 en_dbg(DRV, priv, "working without blueflame (%d)", err); 135 en_dbg(DRV, priv, "working without blueflame (%d)", err);
124 ring->bf.uar = &mdev->priv_uar; 136 ring->bf.uar = &mdev->priv_uar;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index b2547ae07dfa..f3758de59c05 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -705,7 +705,7 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv);
705int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); 705int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
706 706
707int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq, 707int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
708 int entries, int ring, enum cq_type mode); 708 int entries, int ring, enum cq_type mode, int node);
709void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq); 709void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq);
710int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, 710int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
711 int cq_idx); 711 int cq_idx);
@@ -719,7 +719,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
719 719
720int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 720int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
721 struct mlx4_en_tx_ring **pring, 721 struct mlx4_en_tx_ring **pring,
722 int qpn, u32 size, u16 stride); 722 int qpn, u32 size, u16 stride, int node);
723void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, 723void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
724 struct mlx4_en_tx_ring **pring); 724 struct mlx4_en_tx_ring **pring);
725int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 725int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
@@ -730,7 +730,7 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
730 730
731int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, 731int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
732 struct mlx4_en_rx_ring **pring, 732 struct mlx4_en_rx_ring **pring,
733 u32 size, u16 stride); 733 u32 size, u16 stride, int node);
734void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, 734void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
735 struct mlx4_en_rx_ring **pring, 735 struct mlx4_en_rx_ring **pring,
736 u32 size, u16 stride); 736 u32 size, u16 stride);
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 00f223acada7..84cfb40bf451 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -168,7 +168,7 @@ void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
168} 168}
169EXPORT_SYMBOL_GPL(mlx4_uar_free); 169EXPORT_SYMBOL_GPL(mlx4_uar_free);
170 170
171int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf) 171int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
172{ 172{
173 struct mlx4_priv *priv = mlx4_priv(dev); 173 struct mlx4_priv *priv = mlx4_priv(dev);
174 struct mlx4_uar *uar; 174 struct mlx4_uar *uar;
@@ -186,10 +186,13 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf)
186 err = -ENOMEM; 186 err = -ENOMEM;
187 goto out; 187 goto out;
188 } 188 }
189 uar = kmalloc(sizeof *uar, GFP_KERNEL); 189 uar = kmalloc_node(sizeof(*uar), GFP_KERNEL, node);
190 if (!uar) { 190 if (!uar) {
191 err = -ENOMEM; 191 uar = kmalloc(sizeof(*uar), GFP_KERNEL);
192 goto out; 192 if (!uar) {
193 err = -ENOMEM;
194 goto out;
195 }
193 } 196 }
194 err = mlx4_uar_alloc(dev, uar); 197 err = mlx4_uar_alloc(dev, uar);
195 if (err) 198 if (err)