aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIdo Shamay <idos@mellanox.com>2014-09-18 04:51:00 -0400
committerDavid S. Miller <davem@davemloft.net>2014-09-19 17:30:10 -0400
commit43c816c67a536cfcfc24da50153115b75eca94f0 (patch)
tree1079096c4a0b59899c5e953be2b33723776f5c32
parent77507aa249aecd06fa25ad058b64481e46887a01 (diff)
net/mlx4_core: Cache line EQE size support
Enable mlx4 interrupt handler to work with EQE stride feature, The feature may be enabled when cache line is bigger than 64B. The EQE size will then be the cache line size, and the context segment resides in [0-31] offset. Signed-off-by: Ido Shamay <idos@mellanox.com> Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il> Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c30
1 files changed, 19 insertions, 11 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 2a004b347e1d..a49c9d11d8a5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -101,21 +101,24 @@ static void eq_set_ci(struct mlx4_eq *eq, int req_not)
101 mb(); 101 mb();
102} 102}
103 103
104static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor) 104static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor,
105 u8 eqe_size)
105{ 106{
106 /* (entry & (eq->nent - 1)) gives us a cyclic array */ 107 /* (entry & (eq->nent - 1)) gives us a cyclic array */
107 unsigned long offset = (entry & (eq->nent - 1)) * (MLX4_EQ_ENTRY_SIZE << eqe_factor); 108 unsigned long offset = (entry & (eq->nent - 1)) * eqe_size;
108 /* CX3 is capable of extending the EQE from 32 to 64 bytes. 109 /* CX3 is capable of extending the EQE from 32 to 64 bytes with
109 * When this feature is enabled, the first (in the lower addresses) 110 * strides of 64B,128B and 256B.
111 * When 64B EQE is used, the first (in the lower addresses)
110 * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes 112 * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes
111 * contain the legacy EQE information. 113 * contain the legacy EQE information.
114 * In all other cases, the first 32B contains the legacy EQE info.
112 */ 115 */
113 return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE; 116 return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE;
114} 117}
115 118
116static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor) 119static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor, u8 size)
117{ 120{
118 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor); 121 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor, size);
119 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; 122 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
120} 123}
121 124
@@ -459,8 +462,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
459 enum slave_port_gen_event gen_event; 462 enum slave_port_gen_event gen_event;
460 unsigned long flags; 463 unsigned long flags;
461 struct mlx4_vport_state *s_info; 464 struct mlx4_vport_state *s_info;
465 int eqe_size = dev->caps.eqe_size;
462 466
463 while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor))) { 467 while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor, eqe_size))) {
464 /* 468 /*
465 * Make sure we read EQ entry contents after we've 469 * Make sure we read EQ entry contents after we've
466 * checked the ownership bit. 470 * checked the ownership bit.
@@ -894,8 +898,10 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
894 898
895 eq->dev = dev; 899 eq->dev = dev;
896 eq->nent = roundup_pow_of_two(max(nent, 2)); 900 eq->nent = roundup_pow_of_two(max(nent, 2));
897 /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */ 901 /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with
898 npages = PAGE_ALIGN(eq->nent * (MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor)) / PAGE_SIZE; 902 * strides of 64B,128B and 256B.
903 */
904 npages = PAGE_ALIGN(eq->nent * dev->caps.eqe_size) / PAGE_SIZE;
899 905
900 eq->page_list = kmalloc(npages * sizeof *eq->page_list, 906 eq->page_list = kmalloc(npages * sizeof *eq->page_list,
901 GFP_KERNEL); 907 GFP_KERNEL);
@@ -997,8 +1003,10 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
997 struct mlx4_cmd_mailbox *mailbox; 1003 struct mlx4_cmd_mailbox *mailbox;
998 int err; 1004 int err;
999 int i; 1005 int i;
1000 /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */ 1006 /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with
1001 int npages = PAGE_ALIGN((MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor) * eq->nent) / PAGE_SIZE; 1007 * strides of 64B,128B and 256B
1008 */
1009 int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE;
1002 1010
1003 mailbox = mlx4_alloc_cmd_mailbox(dev); 1011 mailbox = mlx4_alloc_cmd_mailbox(dev);
1004 if (IS_ERR(mailbox)) 1012 if (IS_ERR(mailbox))