aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/cxgbi/cxgb4i
diff options
context:
space:
mode:
authorAnish Bhatt <anish@chelsio.com>2014-07-17 03:18:18 -0400
committerDavid S. Miller <davem@davemloft.net>2014-07-17 19:06:03 -0400
commit759a0cc5a3e1bc2cc48fa3c0b91bdcad8b8f87d6 (patch)
treed5f1540e22aea963868cb4c1371f2c734b934c7d /drivers/scsi/cxgbi/cxgb4i
parentfc8d0590d9142d01e4ccea3aa57c894bd6e53662 (diff)
cxgb4i: Add ipv6 code to driver, call into libcxgbi ipv6 api
Signed-off-by: Anish Bhatt <anish@chelsio.com> Signed-off-by: Karen Xie <kxie@chelsio.com> Signed-off-by: Manoj Malviya <manojmalviya@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/scsi/cxgbi/cxgb4i')
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c353
1 files changed, 314 insertions, 39 deletions
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index e8ee5e5fe0ef..1041574edcfc 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -19,6 +19,7 @@
19#include <net/tcp.h> 19#include <net/tcp.h>
20#include <net/dst.h> 20#include <net/dst.h>
21#include <linux/netdevice.h> 21#include <linux/netdevice.h>
22#include <net/addrconf.h>
22 23
23#include "t4_regs.h" 24#include "t4_regs.h"
24#include "t4_msg.h" 25#include "t4_msg.h"
@@ -150,6 +151,7 @@ static struct scsi_transport_template *cxgb4i_stt;
150 * The section below implments CPLs that related to iscsi tcp connection 151 * The section below implments CPLs that related to iscsi tcp connection
151 * open/close/abort and data send/receive. 152 * open/close/abort and data send/receive.
152 */ 153 */
154
153#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) 155#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
154#define RCV_BUFSIZ_MASK 0x3FFU 156#define RCV_BUFSIZ_MASK 0x3FFU
155#define MAX_IMM_TX_PKT_LEN 128 157#define MAX_IMM_TX_PKT_LEN 128
@@ -179,6 +181,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
179 struct l2t_entry *e) 181 struct l2t_entry *e)
180{ 182{
181 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 183 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
184 int t4 = is_t4(lldi->adapter_type);
182 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); 185 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
183 unsigned long long opt0; 186 unsigned long long opt0;
184 unsigned int opt2; 187 unsigned int opt2;
@@ -248,6 +251,97 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
248 } 251 }
249 252
250 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); 253 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
254
255 pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n",
256 (&csk->saddr), (&csk->daddr), t4 ? 4 : 5, csk,
257 csk->state, csk->flags, csk->atid, csk->rss_qid);
258
259 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
260}
261
262static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
263 struct l2t_entry *e)
264{
265 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
266 int t4 = is_t4(lldi->adapter_type);
267 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
268 unsigned long long opt0;
269 unsigned int opt2;
270 unsigned int qid_atid = ((unsigned int)csk->atid) |
271 (((unsigned int)csk->rss_qid) << 14);
272
273 opt0 = KEEP_ALIVE(1) |
274 WND_SCALE(wscale) |
275 MSS_IDX(csk->mss_idx) |
276 L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) |
277 TX_CHAN(csk->tx_chan) |
278 SMAC_SEL(csk->smac_idx) |
279 ULP_MODE(ULP_MODE_ISCSI) |
280 RCV_BUFSIZ(cxgb4i_rcv_win >> 10);
281
282 opt2 = RX_CHANNEL(0) |
283 RSS_QUEUE_VALID |
284 RX_FC_DISABLE |
285 RSS_QUEUE(csk->rss_qid);
286
287 if (t4) {
288 struct cpl_act_open_req6 *req =
289 (struct cpl_act_open_req6 *)skb->head;
290
291 INIT_TP_WR(req, 0);
292 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
293 qid_atid));
294 req->local_port = csk->saddr6.sin6_port;
295 req->peer_port = csk->daddr6.sin6_port;
296
297 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
298 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
299 8);
300 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
301 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
302 8);
303
304 req->opt0 = cpu_to_be64(opt0);
305
306 opt2 |= RX_FC_VALID;
307 req->opt2 = cpu_to_be32(opt2);
308
309 req->params = cpu_to_be32(cxgb4_select_ntuple(
310 csk->cdev->ports[csk->port_id],
311 csk->l2t));
312 } else {
313 struct cpl_t5_act_open_req6 *req =
314 (struct cpl_t5_act_open_req6 *)skb->head;
315
316 INIT_TP_WR(req, 0);
317 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
318 qid_atid));
319 req->local_port = csk->saddr6.sin6_port;
320 req->peer_port = csk->daddr6.sin6_port;
321 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
322 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
323 8);
324 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
325 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
326 8);
327 req->opt0 = cpu_to_be64(opt0);
328
329 opt2 |= T5_OPT_2_VALID;
330 req->opt2 = cpu_to_be32(opt2);
331
332 req->params = cpu_to_be64(V_FILTER_TUPLE(cxgb4_select_ntuple(
333 csk->cdev->ports[csk->port_id],
334 csk->l2t)));
335 }
336
337 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
338
339 pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n",
340 t4 ? 4 : 5, csk, csk->state, csk->flags, csk->atid,
341 &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port),
342 &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port),
343 csk->rss_qid);
344
251 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); 345 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
252} 346}
253 347
@@ -586,9 +680,11 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
586 goto rel_skb; 680 goto rel_skb;
587 } 681 }
588 682
589 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 683 pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
590 "csk 0x%p,%u,0x%lx, tid %u, atid %u, rseq %u.\n", 684 (&csk->saddr), (&csk->daddr),
591 csk, csk->state, csk->flags, tid, atid, rcv_isn); 685 atid, tid, csk, csk->state, csk->flags, rcv_isn);
686
687 module_put(THIS_MODULE);
592 688
593 cxgbi_sock_get(csk); 689 cxgbi_sock_get(csk);
594 csk->tid = tid; 690 csk->tid = tid;
@@ -663,6 +759,9 @@ static void csk_act_open_retry_timer(unsigned long data)
663 struct sk_buff *skb; 759 struct sk_buff *skb;
664 struct cxgbi_sock *csk = (struct cxgbi_sock *)data; 760 struct cxgbi_sock *csk = (struct cxgbi_sock *)data;
665 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); 761 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
762 void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *,
763 struct l2t_entry *);
764 int t4 = is_t4(lldi->adapter_type), size, size6;
666 765
667 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 766 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
668 "csk 0x%p,%u,0x%lx,%u.\n", 767 "csk 0x%p,%u,0x%lx,%u.\n",
@@ -670,20 +769,35 @@ static void csk_act_open_retry_timer(unsigned long data)
670 769
671 cxgbi_sock_get(csk); 770 cxgbi_sock_get(csk);
672 spin_lock_bh(&csk->lock); 771 spin_lock_bh(&csk->lock);
673 skb = alloc_wr(is_t4(lldi->adapter_type) ? 772
674 sizeof(struct cpl_act_open_req) : 773 if (t4) {
675 sizeof(struct cpl_t5_act_open_req), 774 size = sizeof(struct cpl_act_open_req);
676 0, GFP_ATOMIC); 775 size6 = sizeof(struct cpl_act_open_req6);
776 } else {
777 size = sizeof(struct cpl_t5_act_open_req);
778 size6 = sizeof(struct cpl_t5_act_open_req6);
779 }
780
781 if (csk->csk_family == AF_INET) {
782 send_act_open_func = send_act_open_req;
783 skb = alloc_wr(size, 0, GFP_ATOMIC);
784 } else {
785 send_act_open_func = send_act_open_req6;
786 skb = alloc_wr(size6, 0, GFP_ATOMIC);
787 }
788
677 if (!skb) 789 if (!skb)
678 cxgbi_sock_fail_act_open(csk, -ENOMEM); 790 cxgbi_sock_fail_act_open(csk, -ENOMEM);
679 else { 791 else {
680 skb->sk = (struct sock *)csk; 792 skb->sk = (struct sock *)csk;
681 t4_set_arp_err_handler(skb, csk, 793 t4_set_arp_err_handler(skb, csk,
682 cxgbi_sock_act_open_req_arp_failure); 794 cxgbi_sock_act_open_req_arp_failure);
683 send_act_open_req(csk, skb, csk->l2t); 795 send_act_open_func(csk, skb, csk->l2t);
684 } 796 }
797
685 spin_unlock_bh(&csk->lock); 798 spin_unlock_bh(&csk->lock);
686 cxgbi_sock_put(csk); 799 cxgbi_sock_put(csk);
800
687} 801}
688 802
689static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) 803static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
@@ -703,10 +817,9 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
703 goto rel_skb; 817 goto rel_skb;
704 } 818 }
705 819
706 pr_info("%pI4:%u-%pI4:%u, atid %u,%u, status %u, csk 0x%p,%u,0x%lx.\n", 820 pr_info_ipaddr("tid %u/%u, status %u.\n"
707 &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port), 821 "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr),
708 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port), 822 atid, tid, status, csk, csk->state, csk->flags);
709 atid, tid, status, csk, csk->state, csk->flags);
710 823
711 if (status == CPL_ERR_RTX_NEG_ADVICE) 824 if (status == CPL_ERR_RTX_NEG_ADVICE)
712 goto rel_skb; 825 goto rel_skb;
@@ -746,9 +859,9 @@ static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb)
746 pr_err("can't find connection for tid %u.\n", tid); 859 pr_err("can't find connection for tid %u.\n", tid);
747 goto rel_skb; 860 goto rel_skb;
748 } 861 }
749 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 862 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
750 "csk 0x%p,%u,0x%lx,%u.\n", 863 (&csk->saddr), (&csk->daddr),
751 csk, csk->state, csk->flags, csk->tid); 864 csk, csk->state, csk->flags, csk->tid);
752 cxgbi_sock_rcv_peer_close(csk); 865 cxgbi_sock_rcv_peer_close(csk);
753rel_skb: 866rel_skb:
754 __kfree_skb(skb); 867 __kfree_skb(skb);
@@ -767,9 +880,9 @@ static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
767 pr_err("can't find connection for tid %u.\n", tid); 880 pr_err("can't find connection for tid %u.\n", tid);
768 goto rel_skb; 881 goto rel_skb;
769 } 882 }
770 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 883 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
771 "csk 0x%p,%u,0x%lx,%u.\n", 884 (&csk->saddr), (&csk->daddr),
772 csk, csk->state, csk->flags, csk->tid); 885 csk, csk->state, csk->flags, csk->tid);
773 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); 886 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
774rel_skb: 887rel_skb:
775 __kfree_skb(skb); 888 __kfree_skb(skb);
@@ -808,9 +921,9 @@ static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
808 goto rel_skb; 921 goto rel_skb;
809 } 922 }
810 923
811 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 924 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
812 "csk 0x%p,%u,0x%lx, tid %u, status 0x%x.\n", 925 (&csk->saddr), (&csk->daddr),
813 csk, csk->state, csk->flags, csk->tid, req->status); 926 csk, csk->state, csk->flags, csk->tid, req->status);
814 927
815 if (req->status == CPL_ERR_RTX_NEG_ADVICE || 928 if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
816 req->status == CPL_ERR_PERSIST_NEG_ADVICE) 929 req->status == CPL_ERR_PERSIST_NEG_ADVICE)
@@ -851,10 +964,10 @@ static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
851 if (!csk) 964 if (!csk)
852 goto rel_skb; 965 goto rel_skb;
853 966
854 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 967 if (csk)
855 "status 0x%x, csk 0x%p, s %u, 0x%lx.\n", 968 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
856 rpl->status, csk, csk ? csk->state : 0, 969 (&csk->saddr), (&csk->daddr), csk,
857 csk ? csk->flags : 0UL); 970 csk->state, csk->flags, csk->tid, rpl->status);
858 971
859 if (rpl->status == CPL_ERR_ABORT_FAILED) 972 if (rpl->status == CPL_ERR_ABORT_FAILED)
860 goto rel_skb; 973 goto rel_skb;
@@ -1163,15 +1276,29 @@ static int init_act_open(struct cxgbi_sock *csk)
1163 struct cxgbi_device *cdev = csk->cdev; 1276 struct cxgbi_device *cdev = csk->cdev;
1164 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); 1277 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1165 struct net_device *ndev = cdev->ports[csk->port_id]; 1278 struct net_device *ndev = cdev->ports[csk->port_id];
1166 struct port_info *pi = netdev_priv(ndev);
1167 struct sk_buff *skb = NULL; 1279 struct sk_buff *skb = NULL;
1168 struct neighbour *n; 1280 struct neighbour *n = NULL;
1281 void *daddr;
1169 unsigned int step; 1282 unsigned int step;
1283 unsigned int size, size6;
1284 int t4 = is_t4(lldi->adapter_type);
1170 1285
1171 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1286 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1172 "csk 0x%p,%u,0x%lx,%u.\n", 1287 "csk 0x%p,%u,0x%lx,%u.\n",
1173 csk, csk->state, csk->flags, csk->tid); 1288 csk, csk->state, csk->flags, csk->tid);
1174 1289
1290 if (csk->csk_family == AF_INET)
1291 daddr = &csk->daddr.sin_addr.s_addr;
1292 else
1293 daddr = &csk->daddr6.sin6_addr;
1294
1295 n = dst_neigh_lookup(csk->dst, daddr);
1296
1297 if (!n) {
1298 pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
1299 goto rel_resource;
1300 }
1301
1175 csk->atid = cxgb4_alloc_atid(lldi->tids, csk); 1302 csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
1176 if (csk->atid < 0) { 1303 if (csk->atid < 0) {
1177 pr_err("%s, NO atid available.\n", ndev->name); 1304 pr_err("%s, NO atid available.\n", ndev->name);
@@ -1192,10 +1319,19 @@ static int init_act_open(struct cxgbi_sock *csk)
1192 } 1319 }
1193 cxgbi_sock_get(csk); 1320 cxgbi_sock_get(csk);
1194 1321
1195 skb = alloc_wr(is_t4(lldi->adapter_type) ? 1322 if (t4) {
1196 sizeof(struct cpl_act_open_req) : 1323 size = sizeof(struct cpl_act_open_req);
1197 sizeof(struct cpl_t5_act_open_req), 1324 size6 = sizeof(struct cpl_act_open_req6);
1198 0, GFP_ATOMIC); 1325 } else {
1326 size = sizeof(struct cpl_t5_act_open_req);
1327 size6 = sizeof(struct cpl_t5_act_open_req6);
1328 }
1329
1330 if (csk->csk_family == AF_INET)
1331 skb = alloc_wr(size, 0, GFP_NOIO);
1332 else
1333 skb = alloc_wr(size6, 0, GFP_NOIO);
1334
1199 if (!skb) 1335 if (!skb)
1200 goto rel_resource; 1336 goto rel_resource;
1201 skb->sk = (struct sock *)csk; 1337 skb->sk = (struct sock *)csk;
@@ -1211,19 +1347,27 @@ static int init_act_open(struct cxgbi_sock *csk)
1211 csk->txq_idx = cxgb4_port_idx(ndev) * step; 1347 csk->txq_idx = cxgb4_port_idx(ndev) * step;
1212 step = lldi->nrxq / lldi->nchan; 1348 step = lldi->nrxq / lldi->nchan;
1213 csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step]; 1349 csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step];
1214 csk->wr_max_cred = csk->wr_cred = lldi->wr_cred; 1350 csk->wr_cred = lldi->wr_cred -
1351 DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
1352 csk->wr_max_cred = csk->wr_cred;
1215 csk->wr_una_cred = 0; 1353 csk->wr_una_cred = 0;
1216 cxgbi_sock_reset_wr_list(csk); 1354 cxgbi_sock_reset_wr_list(csk);
1217 csk->err = 0; 1355 csk->err = 0;
1218 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1219 "csk 0x%p,p%d,%s, %u,%u,%u, mss %u,%u, smac %u.\n",
1220 csk, pi->port_id, ndev->name, csk->tx_chan,
1221 csk->txq_idx, csk->rss_qid, csk->mtu, csk->mss_idx,
1222 csk->smac_idx);
1223 1356
1357 pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n",
1358 (&csk->saddr), (&csk->daddr), csk, csk->state,
1359 csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid,
1360 csk->mtu, csk->mss_idx, csk->smac_idx);
1361
1362 /* must wait for either a act_open_rpl or act_open_establish */
1363 try_module_get(THIS_MODULE);
1224 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); 1364 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1225 send_act_open_req(csk, skb, csk->l2t); 1365 if (csk->csk_family == AF_INET)
1366 send_act_open_req(csk, skb, csk->l2t);
1367 else
1368 send_act_open_req6(csk, skb, csk->l2t);
1226 neigh_release(n); 1369 neigh_release(n);
1370
1227 return 0; 1371 return 0;
1228 1372
1229rel_resource: 1373rel_resource:
@@ -1487,6 +1631,131 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
1487 return 0; 1631 return 0;
1488} 1632}
1489 1633
1634static int cxgbi_inet6addr_handler(struct notifier_block *this,
1635 unsigned long event, void *data)
1636{
1637 struct inet6_ifaddr *ifa = data;
1638 struct net_device *event_dev = ifa->idev->dev;
1639 struct cxgbi_device *cdev;
1640 int ret = NOTIFY_DONE;
1641
1642 rcu_read_lock();
1643
1644 if (event_dev->priv_flags & IFF_802_1Q_VLAN)
1645 event_dev = vlan_dev_real_dev(event_dev);
1646
1647 cdev = cxgbi_device_find_by_netdev(event_dev, NULL);
1648 if (!cdev) {
1649 rcu_read_unlock();
1650 return ret;
1651 }
1652 switch (event) {
1653 case NETDEV_UP:
1654 ret = cxgb4_clip_get(event_dev,
1655 (const struct in6_addr *)
1656 ((ifa)->addr.s6_addr));
1657 if (ret < 0) {
1658 rcu_read_unlock();
1659 return ret;
1660 }
1661 ret = NOTIFY_OK;
1662 break;
1663
1664 case NETDEV_DOWN:
1665 cxgb4_clip_release(event_dev,
1666 (const struct in6_addr *)
1667 ((ifa)->addr.s6_addr));
1668 ret = NOTIFY_OK;
1669 break;
1670
1671 default:
1672 break;
1673 }
1674
1675 rcu_read_unlock();
1676 return ret;
1677}
1678
1679static struct notifier_block cxgbi_inet6addr_notifier = {
1680 .notifier_call = cxgbi_inet6addr_handler
1681};
1682
1683/* Retrieve IPv6 addresses from a root device (bond, vlan) associated with
1684 * a physical device.
1685 * The physical device reference is needed to send the actual CLIP command.
1686 */
1687static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
1688{
1689 struct inet6_dev *idev = NULL;
1690 struct inet6_ifaddr *ifa;
1691 int ret = 0;
1692
1693 idev = __in6_dev_get(root_dev);
1694 if (!idev)
1695 return ret;
1696
1697 read_lock_bh(&idev->lock);
1698 list_for_each_entry(ifa, &idev->addr_list, if_list) {
1699 pr_info("updating the clip for addr %pI6\n",
1700 ifa->addr.s6_addr);
1701 ret = cxgb4_clip_get(dev, (const struct in6_addr *)
1702 ifa->addr.s6_addr);
1703 if (ret < 0)
1704 break;
1705 }
1706
1707 read_unlock_bh(&idev->lock);
1708 return ret;
1709}
1710
1711static int update_root_dev_clip(struct net_device *dev)
1712{
1713 struct net_device *root_dev = NULL;
1714 int i, ret = 0;
1715
1716 /* First populate the real net device's IPv6 address */
1717 ret = update_dev_clip(dev, dev);
1718 if (ret)
1719 return ret;
1720
1721 /* Parse all bond and vlan devices layered on top of the physical dev */
1722 root_dev = netdev_master_upper_dev_get(dev);
1723 if (root_dev) {
1724 ret = update_dev_clip(root_dev, dev);
1725 if (ret)
1726 return ret;
1727 }
1728
1729 for (i = 0; i < VLAN_N_VID; i++) {
1730 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
1731 if (!root_dev)
1732 continue;
1733
1734 ret = update_dev_clip(root_dev, dev);
1735 if (ret)
1736 break;
1737 }
1738 return ret;
1739}
1740
1741static void cxgbi_update_clip(struct cxgbi_device *cdev)
1742{
1743 int i;
1744
1745 rcu_read_lock();
1746
1747 for (i = 0; i < cdev->nports; i++) {
1748 struct net_device *dev = cdev->ports[i];
1749 int ret = 0;
1750
1751 if (dev)
1752 ret = update_root_dev_clip(dev);
1753 if (ret < 0)
1754 break;
1755 }
1756 rcu_read_unlock();
1757}
1758
1490static void *t4_uld_add(const struct cxgb4_lld_info *lldi) 1759static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
1491{ 1760{
1492 struct cxgbi_device *cdev; 1761 struct cxgbi_device *cdev;
@@ -1605,6 +1874,7 @@ static int t4_uld_state_change(void *handle, enum cxgb4_state state)
1605 switch (state) { 1874 switch (state) {
1606 case CXGB4_STATE_UP: 1875 case CXGB4_STATE_UP:
1607 pr_info("cdev 0x%p, UP.\n", cdev); 1876 pr_info("cdev 0x%p, UP.\n", cdev);
1877 cxgbi_update_clip(cdev);
1608 /* re-initialize */ 1878 /* re-initialize */
1609 break; 1879 break;
1610 case CXGB4_STATE_START_RECOVERY: 1880 case CXGB4_STATE_START_RECOVERY:
@@ -1635,11 +1905,16 @@ static int __init cxgb4i_init_module(void)
1635 if (rc < 0) 1905 if (rc < 0)
1636 return rc; 1906 return rc;
1637 cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info); 1907 cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
1908
1909 register_inet6addr_notifier(&cxgbi_inet6addr_notifier);
1910
1638 return 0; 1911 return 0;
1639} 1912}
1640 1913
1641static void __exit cxgb4i_exit_module(void) 1914static void __exit cxgb4i_exit_module(void)
1642{ 1915{
1916 unregister_inet6addr_notifier(&cxgbi_inet6addr_notifier);
1917
1643 cxgb4_unregister_uld(CXGB4_ULD_ISCSI); 1918 cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
1644 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4); 1919 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
1645 cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt); 1920 cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);