aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Jurgens <danielj@mellanox.com>2017-11-29 13:10:39 -0500
committerJason Gunthorpe <jgg@mellanox.com>2017-12-01 14:21:28 -0500
commit315d160c5a4e034a576a13aa21e7235d5c9ec609 (patch)
tree56706edba26991ea0522a77baef77b9eab64f7dc
parent378efe798ecf0e7d9730a595ef3419b046e34fb4 (diff)
IB/core: Only enforce security for InfiniBand
For now the only LSM security enforcement mechanism available is specific to InfiniBand. Bypass enforcement for non-IB link types. This fixes a regression where modify_qp fails for iWARP because querying the PKEY returns -EINVAL. Cc: Paul Moore <paul@paul-moore.com> Cc: Don Dutile <ddutile@redhat.com> Cc: stable@vger.kernel.org Reported-by: Potnuri Bharat Teja <bharat@chelsio.com> Fixes: d291f1a65232("IB/core: Enforce PKey security on QPs") Fixes: 47a2b338fe63("IB/core: Enforce security on management datagrams") Signed-off-by: Daniel Jurgens <danielj@mellanox.com> Reviewed-by: Parav Pandit <parav@mellanox.com> Tested-by: Potnuri Bharat Teja <bharat@chelsio.com> Signed-off-by: Leon Romanovsky <leon@kernel.org> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
-rw-r--r--drivers/infiniband/core/security.c50
1 files changed, 46 insertions, 4 deletions
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index 23278ed5be45..a337386652b0 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -417,8 +417,17 @@ void ib_close_shared_qp_security(struct ib_qp_security *sec)
417 417
418int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev) 418int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
419{ 419{
420 u8 i = rdma_start_port(dev);
421 bool is_ib = false;
420 int ret; 422 int ret;
421 423
424 while (i <= rdma_end_port(dev) && !is_ib)
425 is_ib = rdma_protocol_ib(dev, i++);
426
427 /* If this isn't an IB device don't create the security context */
428 if (!is_ib)
429 return 0;
430
422 qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL); 431 qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
423 if (!qp->qp_sec) 432 if (!qp->qp_sec)
424 return -ENOMEM; 433 return -ENOMEM;
@@ -441,6 +450,10 @@ EXPORT_SYMBOL(ib_create_qp_security);
441 450
442void ib_destroy_qp_security_begin(struct ib_qp_security *sec) 451void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
443{ 452{
453 /* Return if not IB */
454 if (!sec)
455 return;
456
444 mutex_lock(&sec->mutex); 457 mutex_lock(&sec->mutex);
445 458
446 /* Remove the QP from the lists so it won't get added to 459 /* Remove the QP from the lists so it won't get added to
@@ -470,6 +483,10 @@ void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
470 int ret; 483 int ret;
471 int i; 484 int i;
472 485
486 /* Return if not IB */
487 if (!sec)
488 return;
489
473 /* If a concurrent cache update is in progress this 490 /* If a concurrent cache update is in progress this
474 * QP security could be marked for an error state 491 * QP security could be marked for an error state
475 * transition. Wait for this to complete. 492 * transition. Wait for this to complete.
@@ -505,6 +522,10 @@ void ib_destroy_qp_security_end(struct ib_qp_security *sec)
505{ 522{
506 int i; 523 int i;
507 524
525 /* Return if not IB */
526 if (!sec)
527 return;
528
508 /* If a concurrent cache update is occurring we must 529 /* If a concurrent cache update is occurring we must
509 * wait until this QP security structure is processed 530 * wait until this QP security structure is processed
510 * in the QP to error flow before destroying it because 531 * in the QP to error flow before destroying it because
@@ -557,7 +578,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
557{ 578{
558 int ret = 0; 579 int ret = 0;
559 struct ib_ports_pkeys *tmp_pps; 580 struct ib_ports_pkeys *tmp_pps;
560 struct ib_ports_pkeys *new_pps; 581 struct ib_ports_pkeys *new_pps = NULL;
561 struct ib_qp *real_qp = qp->real_qp; 582 struct ib_qp *real_qp = qp->real_qp;
562 bool special_qp = (real_qp->qp_type == IB_QPT_SMI || 583 bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
563 real_qp->qp_type == IB_QPT_GSI || 584 real_qp->qp_type == IB_QPT_GSI ||
@@ -565,18 +586,27 @@ int ib_security_modify_qp(struct ib_qp *qp,
565 bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) || 586 bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
566 (qp_attr_mask & IB_QP_ALT_PATH)); 587 (qp_attr_mask & IB_QP_ALT_PATH));
567 588
589 WARN_ONCE((qp_attr_mask & IB_QP_PORT &&
590 rdma_protocol_ib(real_qp->device, qp_attr->port_num) &&
591 !real_qp->qp_sec),
592 "%s: QP security is not initialized for IB QP: %d\n",
593 __func__, real_qp->qp_num);
594
568 /* The port/pkey settings are maintained only for the real QP. Open 595 /* The port/pkey settings are maintained only for the real QP. Open
569 * handles on the real QP will be in the shared_qp_list. When 596 * handles on the real QP will be in the shared_qp_list. When
570 * enforcing security on the real QP all the shared QPs will be 597 * enforcing security on the real QP all the shared QPs will be
571 * checked as well. 598 * checked as well.
572 */ 599 */
573 600
574 if (pps_change && !special_qp) { 601 if (pps_change && !special_qp && real_qp->qp_sec) {
575 mutex_lock(&real_qp->qp_sec->mutex); 602 mutex_lock(&real_qp->qp_sec->mutex);
576 new_pps = get_new_pps(real_qp, 603 new_pps = get_new_pps(real_qp,
577 qp_attr, 604 qp_attr,
578 qp_attr_mask); 605 qp_attr_mask);
579 606 if (!new_pps) {
607 mutex_unlock(&real_qp->qp_sec->mutex);
608 return -ENOMEM;
609 }
580 /* Add this QP to the lists for the new port 610 /* Add this QP to the lists for the new port
581 * and pkey settings before checking for permission 611 * and pkey settings before checking for permission
582 * in case there is a concurrent cache update 612 * in case there is a concurrent cache update
@@ -600,7 +630,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
600 qp_attr_mask, 630 qp_attr_mask,
601 udata); 631 udata);
602 632
603 if (pps_change && !special_qp) { 633 if (new_pps) {
604 /* Clean up the lists and free the appropriate 634 /* Clean up the lists and free the appropriate
605 * ports_pkeys structure. 635 * ports_pkeys structure.
606 */ 636 */
@@ -631,6 +661,9 @@ int ib_security_pkey_access(struct ib_device *dev,
631 u16 pkey; 661 u16 pkey;
632 int ret; 662 int ret;
633 663
664 if (!rdma_protocol_ib(dev, port_num))
665 return 0;
666
634 ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey); 667 ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey);
635 if (ret) 668 if (ret)
636 return ret; 669 return ret;
@@ -665,6 +698,9 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
665{ 698{
666 int ret; 699 int ret;
667 700
701 if (!rdma_protocol_ib(agent->device, agent->port_num))
702 return 0;
703
668 ret = security_ib_alloc_security(&agent->security); 704 ret = security_ib_alloc_security(&agent->security);
669 if (ret) 705 if (ret)
670 return ret; 706 return ret;
@@ -690,6 +726,9 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
690 726
691void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) 727void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
692{ 728{
729 if (!rdma_protocol_ib(agent->device, agent->port_num))
730 return;
731
693 security_ib_free_security(agent->security); 732 security_ib_free_security(agent->security);
694 if (agent->lsm_nb_reg) 733 if (agent->lsm_nb_reg)
695 unregister_lsm_notifier(&agent->lsm_nb); 734 unregister_lsm_notifier(&agent->lsm_nb);
@@ -697,6 +736,9 @@ void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
697 736
698int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index) 737int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
699{ 738{
739 if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
740 return 0;
741
700 if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed) 742 if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed)
701 return -EACCES; 743 return -EACCES;
702 744