aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorShaobo Xu <xushaobo2@huawei.com>2016-11-23 14:41:08 -0500
committerDoug Ledford <dledford@redhat.com>2016-12-03 14:20:42 -0500
commit82547469782a952452c84c055c7911e635c77cd0 (patch)
tree758e21bccc0f7bf22d8191e9c0df0792142b5daa
parent5e6ff78a229c2f231f2f743b017987621e469858 (diff)
IB/hns: Implement the add_gid/del_gid and optimize the GIDs management
IB core has implemented the calculation of GIDs and the management of GID tables, and it is now responsible to supply query function for GIDs. So the calculation of GIDs and the management of GID tables in the RoCE driver is redundant. The patch is to implement the add_gid/del_gid to set the GIDs in the RoCE driver, remove the redundant calculation and management of GIDs in the notifier call of the net device and the inet, and update the query_gid. Signed-off-by: Shaobo Xu <xushaobo2@huawei.com> Reviewed-by: Wei Hu (Xavier) <xavier.huwei@huawei.com> Signed-off-by: Salil Mehta <salil.mehta@huawei.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c270
2 files changed, 48 insertions, 224 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 593a42a198f6..9ef1cc3ec930 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -429,8 +429,6 @@ struct hns_roce_ib_iboe {
429 struct net_device *netdevs[HNS_ROCE_MAX_PORTS]; 429 struct net_device *netdevs[HNS_ROCE_MAX_PORTS];
430 struct notifier_block nb; 430 struct notifier_block nb;
431 struct notifier_block nb_inet; 431 struct notifier_block nb_inet;
432 /* 16 GID is shared by 6 port in v1 engine. */
433 union ib_gid gid_table[HNS_ROCE_MAX_GID_NUM];
434 u8 phy_port[HNS_ROCE_MAX_PORTS]; 432 u8 phy_port[HNS_ROCE_MAX_PORTS];
435}; 433};
436 434
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 67701719bad1..795ef97bfcbf 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -35,52 +35,13 @@
35#include <rdma/ib_addr.h> 35#include <rdma/ib_addr.h>
36#include <rdma/ib_smi.h> 36#include <rdma/ib_smi.h>
37#include <rdma/ib_user_verbs.h> 37#include <rdma/ib_user_verbs.h>
38#include <rdma/ib_cache.h>
38#include "hns_roce_common.h" 39#include "hns_roce_common.h"
39#include "hns_roce_device.h" 40#include "hns_roce_device.h"
40#include "hns_roce_user.h" 41#include "hns_roce_user.h"
41#include "hns_roce_hem.h" 42#include "hns_roce_hem.h"
42 43
43/** 44/**
44 * hns_roce_addrconf_ifid_eui48 - Get default gid.
45 * @eui: eui.
46 * @vlan_id: gid
47 * @dev: net device
48 * Description:
49 * MAC convert to GID
50 * gid[0..7] = fe80 0000 0000 0000
51 * gid[8] = mac[0] ^ 2
52 * gid[9] = mac[1]
53 * gid[10] = mac[2]
54 * gid[11] = ff (VLAN ID high byte (4 MS bits))
55 * gid[12] = fe (VLAN ID low byte)
56 * gid[13] = mac[3]
57 * gid[14] = mac[4]
58 * gid[15] = mac[5]
59 */
60static void hns_roce_addrconf_ifid_eui48(u8 *eui, u16 vlan_id,
61 struct net_device *dev)
62{
63 memcpy(eui, dev->dev_addr, 3);
64 memcpy(eui + 5, dev->dev_addr + 3, 3);
65 if (vlan_id < 0x1000) {
66 eui[3] = vlan_id >> 8;
67 eui[4] = vlan_id & 0xff;
68 } else {
69 eui[3] = 0xff;
70 eui[4] = 0xfe;
71 }
72 eui[0] ^= 2;
73}
74
75static void hns_roce_make_default_gid(struct net_device *dev, union ib_gid *gid)
76{
77 memset(gid, 0, sizeof(*gid));
78 gid->raw[0] = 0xFE;
79 gid->raw[1] = 0x80;
80 hns_roce_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev);
81}
82
83/**
84 * hns_get_gid_index - Get gid index. 45 * hns_get_gid_index - Get gid index.
85 * @hr_dev: pointer to structure hns_roce_dev. 46 * @hr_dev: pointer to structure hns_roce_dev.
86 * @port: port, value range: 0 ~ MAX 47 * @port: port, value range: 0 ~ MAX
@@ -96,30 +57,6 @@ int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
96 return gid_index * hr_dev->caps.num_ports + port; 57 return gid_index * hr_dev->caps.num_ports + port;
97} 58}
98 59
99static int hns_roce_set_gid(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
100 union ib_gid *gid)
101{
102 struct device *dev = &hr_dev->pdev->dev;
103 u8 gid_idx = 0;
104
105 if (gid_index >= hr_dev->caps.gid_table_len[port]) {
106 dev_err(dev, "gid_index %d illegal, port %d gid range: 0~%d\n",
107 gid_index, port, hr_dev->caps.gid_table_len[port] - 1);
108 return -EINVAL;
109 }
110
111 gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
112
113 if (!memcmp(gid, &hr_dev->iboe.gid_table[gid_idx], sizeof(*gid)))
114 return -EINVAL;
115
116 memcpy(&hr_dev->iboe.gid_table[gid_idx], gid, sizeof(*gid));
117
118 hr_dev->hw->set_gid(hr_dev, port, gid_index, gid);
119
120 return 0;
121}
122
123static void hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr) 60static void hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
124{ 61{
125 u8 phy_port; 62 u8 phy_port;
@@ -147,15 +84,44 @@ static void hns_roce_set_mtu(struct hns_roce_dev *hr_dev, u8 port, int mtu)
147 hr_dev->hw->set_mtu(hr_dev, phy_port, tmp); 84 hr_dev->hw->set_mtu(hr_dev, phy_port, tmp);
148} 85}
149 86
150static void hns_roce_update_gids(struct hns_roce_dev *hr_dev, int port) 87static int hns_roce_add_gid(struct ib_device *device, u8 port_num,
88 unsigned int index, const union ib_gid *gid,
89 const struct ib_gid_attr *attr, void **context)
90{
91 struct hns_roce_dev *hr_dev = to_hr_dev(device);
92 u8 port = port_num - 1;
93 unsigned long flags;
94
95 if (port >= hr_dev->caps.num_ports)
96 return -EINVAL;
97
98 spin_lock_irqsave(&hr_dev->iboe.lock, flags);
99
100 hr_dev->hw->set_gid(hr_dev, port, index, (union ib_gid *)gid);
101
102 spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
103
104 return 0;
105}
106
107static int hns_roce_del_gid(struct ib_device *device, u8 port_num,
108 unsigned int index, void **context)
151{ 109{
152 struct ib_event event; 110 struct hns_roce_dev *hr_dev = to_hr_dev(device);
111 union ib_gid zgid = { {0} };
112 u8 port = port_num - 1;
113 unsigned long flags;
114
115 if (port >= hr_dev->caps.num_ports)
116 return -EINVAL;
153 117
154 /* Refresh gid in ib_cache */ 118 spin_lock_irqsave(&hr_dev->iboe.lock, flags);
155 event.device = &hr_dev->ib_dev; 119
156 event.element.port_num = port + 1; 120 hr_dev->hw->set_gid(hr_dev, port, index, &zgid);
157 event.event = IB_EVENT_GID_CHANGE; 121
158 ib_dispatch_event(&event); 122 spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
123
124 return 0;
159} 125}
160 126
161static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port, 127static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
@@ -164,8 +130,6 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
164 struct device *dev = &hr_dev->pdev->dev; 130 struct device *dev = &hr_dev->pdev->dev;
165 struct net_device *netdev; 131 struct net_device *netdev;
166 unsigned long flags; 132 unsigned long flags;
167 union ib_gid gid;
168 int ret = 0;
169 133
170 netdev = hr_dev->iboe.netdevs[port]; 134 netdev = hr_dev->iboe.netdevs[port];
171 if (!netdev) { 135 if (!netdev) {
@@ -181,10 +145,6 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
181 case NETDEV_REGISTER: 145 case NETDEV_REGISTER:
182 case NETDEV_CHANGEADDR: 146 case NETDEV_CHANGEADDR:
183 hns_roce_set_mac(hr_dev, port, netdev->dev_addr); 147 hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
184 hns_roce_make_default_gid(netdev, &gid);
185 ret = hns_roce_set_gid(hr_dev, port, 0, &gid);
186 if (!ret)
187 hns_roce_update_gids(hr_dev, port);
188 break; 148 break;
189 case NETDEV_DOWN: 149 case NETDEV_DOWN:
190 /* 150 /*
@@ -197,7 +157,7 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
197 } 157 }
198 158
199 spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); 159 spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
200 return ret; 160 return 0;
201} 161}
202 162
203static int hns_roce_netdev_event(struct notifier_block *self, 163static int hns_roce_netdev_event(struct notifier_block *self,
@@ -224,118 +184,17 @@ static int hns_roce_netdev_event(struct notifier_block *self,
224 return NOTIFY_DONE; 184 return NOTIFY_DONE;
225} 185}
226 186
227static void hns_roce_addr_event(int event, struct net_device *event_netdev, 187static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
228 struct hns_roce_dev *hr_dev, union ib_gid *gid)
229{
230 struct hns_roce_ib_iboe *iboe = NULL;
231 int gid_table_len = 0;
232 unsigned long flags;
233 union ib_gid zgid;
234 u8 gid_idx = 0;
235 u8 port = 0;
236 int i = 0;
237 int free;
238 struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
239 rdma_vlan_dev_real_dev(event_netdev) :
240 event_netdev;
241
242 if (event != NETDEV_UP && event != NETDEV_DOWN)
243 return;
244
245 iboe = &hr_dev->iboe;
246 while (port < hr_dev->caps.num_ports) {
247 if (real_dev == iboe->netdevs[port])
248 break;
249 port++;
250 }
251
252 if (port >= hr_dev->caps.num_ports) {
253 dev_dbg(&hr_dev->pdev->dev, "can't find netdev\n");
254 return;
255 }
256
257 memset(zgid.raw, 0, sizeof(zgid.raw));
258 free = -1;
259 gid_table_len = hr_dev->caps.gid_table_len[port];
260
261 spin_lock_irqsave(&hr_dev->iboe.lock, flags);
262
263 for (i = 0; i < gid_table_len; i++) {
264 gid_idx = hns_get_gid_index(hr_dev, port, i);
265 if (!memcmp(gid->raw, iboe->gid_table[gid_idx].raw,
266 sizeof(gid->raw)))
267 break;
268 if (free < 0 && !memcmp(zgid.raw,
269 iboe->gid_table[gid_idx].raw, sizeof(zgid.raw)))
270 free = i;
271 }
272
273 if (i >= gid_table_len) {
274 if (free < 0) {
275 spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
276 dev_dbg(&hr_dev->pdev->dev,
277 "gid_index overflow, port(%d)\n", port);
278 return;
279 }
280 if (!hns_roce_set_gid(hr_dev, port, free, gid))
281 hns_roce_update_gids(hr_dev, port);
282 } else if (event == NETDEV_DOWN) {
283 if (!hns_roce_set_gid(hr_dev, port, i, &zgid))
284 hns_roce_update_gids(hr_dev, port);
285 }
286
287 spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
288}
289
290static int hns_roce_inet_event(struct notifier_block *self, unsigned long event,
291 void *ptr)
292{
293 struct in_ifaddr *ifa = ptr;
294 struct hns_roce_dev *hr_dev;
295 struct net_device *dev = ifa->ifa_dev->dev;
296 union ib_gid gid;
297
298 ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
299
300 hr_dev = container_of(self, struct hns_roce_dev, iboe.nb_inet);
301
302 hns_roce_addr_event(event, dev, hr_dev, &gid);
303
304 return NOTIFY_DONE;
305}
306
307static int hns_roce_setup_mtu_gids(struct hns_roce_dev *hr_dev)
308{ 188{
309 struct in_ifaddr *ifa_list = NULL; 189 u8 i;
310 union ib_gid gid = {{0} };
311 u32 ipaddr = 0;
312 int index = 0;
313 int ret = 0;
314 u8 i = 0;
315 190
316 for (i = 0; i < hr_dev->caps.num_ports; i++) { 191 for (i = 0; i < hr_dev->caps.num_ports; i++) {
317 hns_roce_set_mtu(hr_dev, i, 192 hns_roce_set_mtu(hr_dev, i,
318 ib_mtu_enum_to_int(hr_dev->caps.max_mtu)); 193 ib_mtu_enum_to_int(hr_dev->caps.max_mtu));
319 hns_roce_set_mac(hr_dev, i, hr_dev->iboe.netdevs[i]->dev_addr); 194 hns_roce_set_mac(hr_dev, i, hr_dev->iboe.netdevs[i]->dev_addr);
320
321 if (hr_dev->iboe.netdevs[i]->ip_ptr) {
322 ifa_list = hr_dev->iboe.netdevs[i]->ip_ptr->ifa_list;
323 index = 1;
324 while (ifa_list) {
325 ipaddr = ifa_list->ifa_address;
326 ipv6_addr_set_v4mapped(ipaddr,
327 (struct in6_addr *)&gid);
328 ret = hns_roce_set_gid(hr_dev, i, index, &gid);
329 if (ret)
330 break;
331 index++;
332 ifa_list = ifa_list->ifa_next;
333 }
334 hns_roce_update_gids(hr_dev, i);
335 }
336 } 195 }
337 196
338 return ret; 197 return 0;
339} 198}
340 199
341static int hns_roce_query_device(struct ib_device *ib_dev, 200static int hns_roce_query_device(struct ib_device *ib_dev,
@@ -444,31 +303,6 @@ static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
444static int hns_roce_query_gid(struct ib_device *ib_dev, u8 port_num, int index, 303static int hns_roce_query_gid(struct ib_device *ib_dev, u8 port_num, int index,
445 union ib_gid *gid) 304 union ib_gid *gid)
446{ 305{
447 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
448 struct device *dev = &hr_dev->pdev->dev;
449 u8 gid_idx = 0;
450 u8 port;
451
452 if (port_num < 1 || port_num > hr_dev->caps.num_ports ||
453 index >= hr_dev->caps.gid_table_len[port_num - 1]) {
454 dev_err(dev,
455 "port_num %d index %d illegal! correct range: port_num 1~%d index 0~%d!\n",
456 port_num, index, hr_dev->caps.num_ports,
457 hr_dev->caps.gid_table_len[port_num - 1] - 1);
458 return -EINVAL;
459 }
460
461 port = port_num - 1;
462 gid_idx = hns_get_gid_index(hr_dev, port, index);
463 if (gid_idx >= HNS_ROCE_MAX_GID_NUM) {
464 dev_err(dev, "port_num %d index %d illegal! total gid num %d!\n",
465 port_num, index, HNS_ROCE_MAX_GID_NUM);
466 return -EINVAL;
467 }
468
469 memcpy(gid->raw, hr_dev->iboe.gid_table[gid_idx].raw,
470 HNS_ROCE_GID_SIZE);
471
472 return 0; 306 return 0;
473} 307}
474 308
@@ -646,6 +480,8 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
646 ib_dev->get_link_layer = hns_roce_get_link_layer; 480 ib_dev->get_link_layer = hns_roce_get_link_layer;
647 ib_dev->get_netdev = hns_roce_get_netdev; 481 ib_dev->get_netdev = hns_roce_get_netdev;
648 ib_dev->query_gid = hns_roce_query_gid; 482 ib_dev->query_gid = hns_roce_query_gid;
483 ib_dev->add_gid = hns_roce_add_gid;
484 ib_dev->del_gid = hns_roce_del_gid;
649 ib_dev->query_pkey = hns_roce_query_pkey; 485 ib_dev->query_pkey = hns_roce_query_pkey;
650 ib_dev->alloc_ucontext = hns_roce_alloc_ucontext; 486 ib_dev->alloc_ucontext = hns_roce_alloc_ucontext;
651 ib_dev->dealloc_ucontext = hns_roce_dealloc_ucontext; 487 ib_dev->dealloc_ucontext = hns_roce_dealloc_ucontext;
@@ -688,32 +524,22 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
688 return ret; 524 return ret;
689 } 525 }
690 526
691 ret = hns_roce_setup_mtu_gids(hr_dev); 527 ret = hns_roce_setup_mtu_mac(hr_dev);
692 if (ret) { 528 if (ret) {
693 dev_err(dev, "roce_setup_mtu_gids failed!\n"); 529 dev_err(dev, "setup_mtu_mac failed!\n");
694 goto error_failed_setup_mtu_gids; 530 goto error_failed_setup_mtu_mac;
695 } 531 }
696 532
697 iboe->nb.notifier_call = hns_roce_netdev_event; 533 iboe->nb.notifier_call = hns_roce_netdev_event;
698 ret = register_netdevice_notifier(&iboe->nb); 534 ret = register_netdevice_notifier(&iboe->nb);
699 if (ret) { 535 if (ret) {
700 dev_err(dev, "register_netdevice_notifier failed!\n"); 536 dev_err(dev, "register_netdevice_notifier failed!\n");
701 goto error_failed_setup_mtu_gids; 537 goto error_failed_setup_mtu_mac;
702 }
703
704 iboe->nb_inet.notifier_call = hns_roce_inet_event;
705 ret = register_inetaddr_notifier(&iboe->nb_inet);
706 if (ret) {
707 dev_err(dev, "register inet addr notifier failed!\n");
708 goto error_failed_register_inetaddr_notifier;
709 } 538 }
710 539
711 return 0; 540 return 0;
712 541
713error_failed_register_inetaddr_notifier: 542error_failed_setup_mtu_mac:
714 unregister_netdevice_notifier(&iboe->nb);
715
716error_failed_setup_mtu_gids:
717 ib_unregister_device(ib_dev); 543 ib_unregister_device(ib_dev);
718 544
719 return ret; 545 return ret;