diff options
author | Jack Wang <jinpu.wang@profitbricks.com> | 2017-01-17 04:11:12 -0500 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2017-01-24 16:20:37 -0500 |
commit | 21d6454a392d552c7e845f39884f7cf86f9426b9 (patch) | |
tree | c8d266a076d56d53d02e55a13faac61917c53de3 | |
parent | dfc0e5550664a727a59921db7d9e7a41c21d03bb (diff) |
RDMA/core: create struct ib_port_cache
As Jason suggested, we have 4 elements for per port arrays,
it's better to have a separate structure to represent them.
It simplifies code a bit, ~ 30 lines of code less :)
Signed-off-by: Jack Wang <jinpu.wang@profitbricks.com>
Reviewed-by: Michael Wang <yun.wang@profitbricks.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r-- | drivers/infiniband/core/cache.c | 134 | ||||
-rw-r--r-- | include/rdma/ib_verbs.h | 12 |
2 files changed, 59 insertions, 87 deletions
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index f91886bab9d1..2e52021aa999 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c | |||
@@ -314,14 +314,13 @@ static void make_default_gid(struct net_device *dev, union ib_gid *gid) | |||
314 | int ib_cache_gid_add(struct ib_device *ib_dev, u8 port, | 314 | int ib_cache_gid_add(struct ib_device *ib_dev, u8 port, |
315 | union ib_gid *gid, struct ib_gid_attr *attr) | 315 | union ib_gid *gid, struct ib_gid_attr *attr) |
316 | { | 316 | { |
317 | struct ib_gid_table **ports_table = ib_dev->cache.gid_cache; | ||
318 | struct ib_gid_table *table; | 317 | struct ib_gid_table *table; |
319 | int ix; | 318 | int ix; |
320 | int ret = 0; | 319 | int ret = 0; |
321 | struct net_device *idev; | 320 | struct net_device *idev; |
322 | int empty; | 321 | int empty; |
323 | 322 | ||
324 | table = ports_table[port - rdma_start_port(ib_dev)]; | 323 | table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; |
325 | 324 | ||
326 | if (!memcmp(gid, &zgid, sizeof(*gid))) | 325 | if (!memcmp(gid, &zgid, sizeof(*gid))) |
327 | return -EINVAL; | 326 | return -EINVAL; |
@@ -369,11 +368,10 @@ out_unlock: | |||
369 | int ib_cache_gid_del(struct ib_device *ib_dev, u8 port, | 368 | int ib_cache_gid_del(struct ib_device *ib_dev, u8 port, |
370 | union ib_gid *gid, struct ib_gid_attr *attr) | 369 | union ib_gid *gid, struct ib_gid_attr *attr) |
371 | { | 370 | { |
372 | struct ib_gid_table **ports_table = ib_dev->cache.gid_cache; | ||
373 | struct ib_gid_table *table; | 371 | struct ib_gid_table *table; |
374 | int ix; | 372 | int ix; |
375 | 373 | ||
376 | table = ports_table[port - rdma_start_port(ib_dev)]; | 374 | table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; |
377 | 375 | ||
378 | mutex_lock(&table->lock); | 376 | mutex_lock(&table->lock); |
379 | write_lock_irq(&table->rwlock); | 377 | write_lock_irq(&table->rwlock); |
@@ -399,12 +397,11 @@ out_unlock: | |||
399 | int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, | 397 | int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, |
400 | struct net_device *ndev) | 398 | struct net_device *ndev) |
401 | { | 399 | { |
402 | struct ib_gid_table **ports_table = ib_dev->cache.gid_cache; | ||
403 | struct ib_gid_table *table; | 400 | struct ib_gid_table *table; |
404 | int ix; | 401 | int ix; |
405 | bool deleted = false; | 402 | bool deleted = false; |
406 | 403 | ||
407 | table = ports_table[port - rdma_start_port(ib_dev)]; | 404 | table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; |
408 | 405 | ||
409 | mutex_lock(&table->lock); | 406 | mutex_lock(&table->lock); |
410 | write_lock_irq(&table->rwlock); | 407 | write_lock_irq(&table->rwlock); |
@@ -428,10 +425,9 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, | |||
428 | static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index, | 425 | static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index, |
429 | union ib_gid *gid, struct ib_gid_attr *attr) | 426 | union ib_gid *gid, struct ib_gid_attr *attr) |
430 | { | 427 | { |
431 | struct ib_gid_table **ports_table = ib_dev->cache.gid_cache; | ||
432 | struct ib_gid_table *table; | 428 | struct ib_gid_table *table; |
433 | 429 | ||
434 | table = ports_table[port - rdma_start_port(ib_dev)]; | 430 | table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; |
435 | 431 | ||
436 | if (index < 0 || index >= table->sz) | 432 | if (index < 0 || index >= table->sz) |
437 | return -EINVAL; | 433 | return -EINVAL; |
@@ -455,14 +451,13 @@ static int _ib_cache_gid_table_find(struct ib_device *ib_dev, | |||
455 | unsigned long mask, | 451 | unsigned long mask, |
456 | u8 *port, u16 *index) | 452 | u8 *port, u16 *index) |
457 | { | 453 | { |
458 | struct ib_gid_table **ports_table = ib_dev->cache.gid_cache; | ||
459 | struct ib_gid_table *table; | 454 | struct ib_gid_table *table; |
460 | u8 p; | 455 | u8 p; |
461 | int local_index; | 456 | int local_index; |
462 | unsigned long flags; | 457 | unsigned long flags; |
463 | 458 | ||
464 | for (p = 0; p < ib_dev->phys_port_cnt; p++) { | 459 | for (p = 0; p < ib_dev->phys_port_cnt; p++) { |
465 | table = ports_table[p]; | 460 | table = ib_dev->cache.ports[p].gid; |
466 | read_lock_irqsave(&table->rwlock, flags); | 461 | read_lock_irqsave(&table->rwlock, flags); |
467 | local_index = find_gid(table, gid, val, false, mask, NULL); | 462 | local_index = find_gid(table, gid, val, false, mask, NULL); |
468 | if (local_index >= 0) { | 463 | if (local_index >= 0) { |
@@ -503,7 +498,6 @@ int ib_find_cached_gid_by_port(struct ib_device *ib_dev, | |||
503 | u16 *index) | 498 | u16 *index) |
504 | { | 499 | { |
505 | int local_index; | 500 | int local_index; |
506 | struct ib_gid_table **ports_table = ib_dev->cache.gid_cache; | ||
507 | struct ib_gid_table *table; | 501 | struct ib_gid_table *table; |
508 | unsigned long mask = GID_ATTR_FIND_MASK_GID | | 502 | unsigned long mask = GID_ATTR_FIND_MASK_GID | |
509 | GID_ATTR_FIND_MASK_GID_TYPE; | 503 | GID_ATTR_FIND_MASK_GID_TYPE; |
@@ -514,7 +508,7 @@ int ib_find_cached_gid_by_port(struct ib_device *ib_dev, | |||
514 | port > rdma_end_port(ib_dev)) | 508 | port > rdma_end_port(ib_dev)) |
515 | return -ENOENT; | 509 | return -ENOENT; |
516 | 510 | ||
517 | table = ports_table[port - rdma_start_port(ib_dev)]; | 511 | table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; |
518 | 512 | ||
519 | if (ndev) | 513 | if (ndev) |
520 | mask |= GID_ATTR_FIND_MASK_NETDEV; | 514 | mask |= GID_ATTR_FIND_MASK_NETDEV; |
@@ -562,21 +556,18 @@ static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev, | |||
562 | void *context, | 556 | void *context, |
563 | u16 *index) | 557 | u16 *index) |
564 | { | 558 | { |
565 | struct ib_gid_table **ports_table = ib_dev->cache.gid_cache; | ||
566 | struct ib_gid_table *table; | 559 | struct ib_gid_table *table; |
567 | unsigned int i; | 560 | unsigned int i; |
568 | unsigned long flags; | 561 | unsigned long flags; |
569 | bool found = false; | 562 | bool found = false; |
570 | 563 | ||
571 | if (!ports_table) | ||
572 | return -EOPNOTSUPP; | ||
573 | 564 | ||
574 | if (port < rdma_start_port(ib_dev) || | 565 | if (port < rdma_start_port(ib_dev) || |
575 | port > rdma_end_port(ib_dev) || | 566 | port > rdma_end_port(ib_dev) || |
576 | !rdma_protocol_roce(ib_dev, port)) | 567 | !rdma_protocol_roce(ib_dev, port)) |
577 | return -EPROTONOSUPPORT; | 568 | return -EPROTONOSUPPORT; |
578 | 569 | ||
579 | table = ports_table[port - rdma_start_port(ib_dev)]; | 570 | table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; |
580 | 571 | ||
581 | read_lock_irqsave(&table->rwlock, flags); | 572 | read_lock_irqsave(&table->rwlock, flags); |
582 | for (i = 0; i < table->sz; i++) { | 573 | for (i = 0; i < table->sz; i++) { |
@@ -668,14 +659,13 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, | |||
668 | unsigned long gid_type_mask, | 659 | unsigned long gid_type_mask, |
669 | enum ib_cache_gid_default_mode mode) | 660 | enum ib_cache_gid_default_mode mode) |
670 | { | 661 | { |
671 | struct ib_gid_table **ports_table = ib_dev->cache.gid_cache; | ||
672 | union ib_gid gid; | 662 | union ib_gid gid; |
673 | struct ib_gid_attr gid_attr; | 663 | struct ib_gid_attr gid_attr; |
674 | struct ib_gid_attr zattr_type = zattr; | 664 | struct ib_gid_attr zattr_type = zattr; |
675 | struct ib_gid_table *table; | 665 | struct ib_gid_table *table; |
676 | unsigned int gid_type; | 666 | unsigned int gid_type; |
677 | 667 | ||
678 | table = ports_table[port - rdma_start_port(ib_dev)]; | 668 | table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; |
679 | 669 | ||
680 | make_default_gid(ndev, &gid); | 670 | make_default_gid(ndev, &gid); |
681 | memset(&gid_attr, 0, sizeof(gid_attr)); | 671 | memset(&gid_attr, 0, sizeof(gid_attr)); |
@@ -766,71 +756,64 @@ static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port, | |||
766 | static int _gid_table_setup_one(struct ib_device *ib_dev) | 756 | static int _gid_table_setup_one(struct ib_device *ib_dev) |
767 | { | 757 | { |
768 | u8 port; | 758 | u8 port; |
769 | struct ib_gid_table **table; | 759 | struct ib_gid_table *table; |
770 | int err = 0; | 760 | int err = 0; |
771 | 761 | ||
772 | table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL); | ||
773 | if (!table) | ||
774 | return -ENOMEM; | ||
775 | |||
776 | for (port = 0; port < ib_dev->phys_port_cnt; port++) { | 762 | for (port = 0; port < ib_dev->phys_port_cnt; port++) { |
777 | u8 rdma_port = port + rdma_start_port(ib_dev); | 763 | u8 rdma_port = port + rdma_start_port(ib_dev); |
778 | 764 | ||
779 | table[port] = | 765 | table = |
780 | alloc_gid_table( | 766 | alloc_gid_table( |
781 | ib_dev->port_immutable[rdma_port].gid_tbl_len); | 767 | ib_dev->port_immutable[rdma_port].gid_tbl_len); |
782 | if (!table[port]) { | 768 | if (!table) { |
783 | err = -ENOMEM; | 769 | err = -ENOMEM; |
784 | goto rollback_table_setup; | 770 | goto rollback_table_setup; |
785 | } | 771 | } |
786 | 772 | ||
787 | err = gid_table_reserve_default(ib_dev, | 773 | err = gid_table_reserve_default(ib_dev, |
788 | port + rdma_start_port(ib_dev), | 774 | port + rdma_start_port(ib_dev), |
789 | table[port]); | 775 | table); |
790 | if (err) | 776 | if (err) |
791 | goto rollback_table_setup; | 777 | goto rollback_table_setup; |
778 | ib_dev->cache.ports[port].gid = table; | ||
792 | } | 779 | } |
793 | 780 | ||
794 | ib_dev->cache.gid_cache = table; | ||
795 | return 0; | 781 | return 0; |
796 | 782 | ||
797 | rollback_table_setup: | 783 | rollback_table_setup: |
798 | for (port = 0; port < ib_dev->phys_port_cnt; port++) { | 784 | for (port = 0; port < ib_dev->phys_port_cnt; port++) { |
785 | table = ib_dev->cache.ports[port].gid; | ||
786 | |||
799 | cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev), | 787 | cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev), |
800 | table[port]); | 788 | table); |
801 | release_gid_table(table[port]); | 789 | release_gid_table(table); |
802 | } | 790 | } |
803 | 791 | ||
804 | kfree(table); | ||
805 | return err; | 792 | return err; |
806 | } | 793 | } |
807 | 794 | ||
808 | static void gid_table_release_one(struct ib_device *ib_dev) | 795 | static void gid_table_release_one(struct ib_device *ib_dev) |
809 | { | 796 | { |
810 | struct ib_gid_table **table = ib_dev->cache.gid_cache; | 797 | struct ib_gid_table *table; |
811 | u8 port; | 798 | u8 port; |
812 | 799 | ||
813 | if (!table) | 800 | for (port = 0; port < ib_dev->phys_port_cnt; port++) { |
814 | return; | 801 | table = ib_dev->cache.ports[port].gid; |
815 | 802 | release_gid_table(table); | |
816 | for (port = 0; port < ib_dev->phys_port_cnt; port++) | 803 | ib_dev->cache.ports[port].gid = NULL; |
817 | release_gid_table(table[port]); | 804 | } |
818 | |||
819 | kfree(table); | ||
820 | ib_dev->cache.gid_cache = NULL; | ||
821 | } | 805 | } |
822 | 806 | ||
823 | static void gid_table_cleanup_one(struct ib_device *ib_dev) | 807 | static void gid_table_cleanup_one(struct ib_device *ib_dev) |
824 | { | 808 | { |
825 | struct ib_gid_table **table = ib_dev->cache.gid_cache; | 809 | struct ib_gid_table *table; |
826 | u8 port; | 810 | u8 port; |
827 | 811 | ||
828 | if (!table) | 812 | for (port = 0; port < ib_dev->phys_port_cnt; port++) { |
829 | return; | 813 | table = ib_dev->cache.ports[port].gid; |
830 | |||
831 | for (port = 0; port < ib_dev->phys_port_cnt; port++) | ||
832 | cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev), | 814 | cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev), |
833 | table[port]); | 815 | table); |
816 | } | ||
834 | } | 817 | } |
835 | 818 | ||
836 | static int gid_table_setup_one(struct ib_device *ib_dev) | 819 | static int gid_table_setup_one(struct ib_device *ib_dev) |
@@ -860,12 +843,12 @@ int ib_get_cached_gid(struct ib_device *device, | |||
860 | { | 843 | { |
861 | int res; | 844 | int res; |
862 | unsigned long flags; | 845 | unsigned long flags; |
863 | struct ib_gid_table **ports_table = device->cache.gid_cache; | 846 | struct ib_gid_table *table; |
864 | struct ib_gid_table *table = ports_table[port_num - rdma_start_port(device)]; | ||
865 | 847 | ||
866 | if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) | 848 | if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) |
867 | return -EINVAL; | 849 | return -EINVAL; |
868 | 850 | ||
851 | table = device->cache.ports[port_num - rdma_start_port(device)].gid; | ||
869 | read_lock_irqsave(&table->rwlock, flags); | 852 | read_lock_irqsave(&table->rwlock, flags); |
870 | res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr); | 853 | res = __ib_cache_gid_get(device, port_num, index, gid, gid_attr); |
871 | read_unlock_irqrestore(&table->rwlock, flags); | 854 | read_unlock_irqrestore(&table->rwlock, flags); |
@@ -917,7 +900,7 @@ int ib_get_cached_pkey(struct ib_device *device, | |||
917 | 900 | ||
918 | read_lock_irqsave(&device->cache.lock, flags); | 901 | read_lock_irqsave(&device->cache.lock, flags); |
919 | 902 | ||
920 | cache = device->cache.pkey_cache[port_num - rdma_start_port(device)]; | 903 | cache = device->cache.ports[port_num - rdma_start_port(device)].pkey; |
921 | 904 | ||
922 | if (index < 0 || index >= cache->table_len) | 905 | if (index < 0 || index >= cache->table_len) |
923 | ret = -EINVAL; | 906 | ret = -EINVAL; |
@@ -946,7 +929,7 @@ int ib_find_cached_pkey(struct ib_device *device, | |||
946 | 929 | ||
947 | read_lock_irqsave(&device->cache.lock, flags); | 930 | read_lock_irqsave(&device->cache.lock, flags); |
948 | 931 | ||
949 | cache = device->cache.pkey_cache[port_num - rdma_start_port(device)]; | 932 | cache = device->cache.ports[port_num - rdma_start_port(device)].pkey; |
950 | 933 | ||
951 | *index = -1; | 934 | *index = -1; |
952 | 935 | ||
@@ -986,7 +969,7 @@ int ib_find_exact_cached_pkey(struct ib_device *device, | |||
986 | 969 | ||
987 | read_lock_irqsave(&device->cache.lock, flags); | 970 | read_lock_irqsave(&device->cache.lock, flags); |
988 | 971 | ||
989 | cache = device->cache.pkey_cache[port_num - rdma_start_port(device)]; | 972 | cache = device->cache.ports[port_num - rdma_start_port(device)].pkey; |
990 | 973 | ||
991 | *index = -1; | 974 | *index = -1; |
992 | 975 | ||
@@ -1014,7 +997,7 @@ int ib_get_cached_lmc(struct ib_device *device, | |||
1014 | return -EINVAL; | 997 | return -EINVAL; |
1015 | 998 | ||
1016 | read_lock_irqsave(&device->cache.lock, flags); | 999 | read_lock_irqsave(&device->cache.lock, flags); |
1017 | *lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)]; | 1000 | *lmc = device->cache.ports[port_num - rdma_start_port(device)].lmc; |
1018 | read_unlock_irqrestore(&device->cache.lock, flags); | 1001 | read_unlock_irqrestore(&device->cache.lock, flags); |
1019 | 1002 | ||
1020 | return ret; | 1003 | return ret; |
@@ -1032,7 +1015,8 @@ int ib_get_cached_port_state(struct ib_device *device, | |||
1032 | return -EINVAL; | 1015 | return -EINVAL; |
1033 | 1016 | ||
1034 | read_lock_irqsave(&device->cache.lock, flags); | 1017 | read_lock_irqsave(&device->cache.lock, flags); |
1035 | *port_state = device->cache.port_state_cache[port_num - rdma_start_port(device)]; | 1018 | *port_state = device->cache.ports[port_num |
1019 | - rdma_start_port(device)].port_state; | ||
1036 | read_unlock_irqrestore(&device->cache.lock, flags); | 1020 | read_unlock_irqrestore(&device->cache.lock, flags); |
1037 | 1021 | ||
1038 | return ret; | 1022 | return ret; |
@@ -1051,14 +1035,13 @@ static void ib_cache_update(struct ib_device *device, | |||
1051 | int i; | 1035 | int i; |
1052 | int ret; | 1036 | int ret; |
1053 | struct ib_gid_table *table; | 1037 | struct ib_gid_table *table; |
1054 | struct ib_gid_table **ports_table = device->cache.gid_cache; | ||
1055 | bool use_roce_gid_table = | 1038 | bool use_roce_gid_table = |
1056 | rdma_cap_roce_gid_table(device, port); | 1039 | rdma_cap_roce_gid_table(device, port); |
1057 | 1040 | ||
1058 | if (port < rdma_start_port(device) || port > rdma_end_port(device)) | 1041 | if (port < rdma_start_port(device) || port > rdma_end_port(device)) |
1059 | return; | 1042 | return; |
1060 | 1043 | ||
1061 | table = ports_table[port - rdma_start_port(device)]; | 1044 | table = device->cache.ports[port - rdma_start_port(device)].gid; |
1062 | 1045 | ||
1063 | tprops = kmalloc(sizeof *tprops, GFP_KERNEL); | 1046 | tprops = kmalloc(sizeof *tprops, GFP_KERNEL); |
1064 | if (!tprops) | 1047 | if (!tprops) |
@@ -1110,9 +1093,10 @@ static void ib_cache_update(struct ib_device *device, | |||
1110 | 1093 | ||
1111 | write_lock_irq(&device->cache.lock); | 1094 | write_lock_irq(&device->cache.lock); |
1112 | 1095 | ||
1113 | old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)]; | 1096 | old_pkey_cache = device->cache.ports[port - |
1097 | rdma_start_port(device)].pkey; | ||
1114 | 1098 | ||
1115 | device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache; | 1099 | device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache; |
1116 | if (!use_roce_gid_table) { | 1100 | if (!use_roce_gid_table) { |
1117 | write_lock(&table->rwlock); | 1101 | write_lock(&table->rwlock); |
1118 | for (i = 0; i < gid_cache->table_len; i++) { | 1102 | for (i = 0; i < gid_cache->table_len; i++) { |
@@ -1122,8 +1106,8 @@ static void ib_cache_update(struct ib_device *device, | |||
1122 | write_unlock(&table->rwlock); | 1106 | write_unlock(&table->rwlock); |
1123 | } | 1107 | } |
1124 | 1108 | ||
1125 | device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc; | 1109 | device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc; |
1126 | device->cache.port_state_cache[port - rdma_start_port(device)] = | 1110 | device->cache.ports[port - rdma_start_port(device)].port_state = |
1127 | tprops->state; | 1111 | tprops->state; |
1128 | 1112 | ||
1129 | write_unlock_irq(&device->cache.lock); | 1113 | write_unlock_irq(&device->cache.lock); |
@@ -1177,26 +1161,17 @@ int ib_cache_setup_one(struct ib_device *device) | |||
1177 | 1161 | ||
1178 | rwlock_init(&device->cache.lock); | 1162 | rwlock_init(&device->cache.lock); |
1179 | 1163 | ||
1180 | device->cache.pkey_cache = | 1164 | device->cache.ports = |
1181 | kzalloc(sizeof *device->cache.pkey_cache * | 1165 | kzalloc(sizeof(*device->cache.ports) * |
1182 | (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL); | 1166 | (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL); |
1183 | device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache * | 1167 | if (!device->cache.ports) { |
1184 | (rdma_end_port(device) - | ||
1185 | rdma_start_port(device) + 1), | ||
1186 | GFP_KERNEL); | ||
1187 | device->cache.port_state_cache = kmalloc(sizeof *device->cache.port_state_cache * | ||
1188 | (rdma_end_port(device) - | ||
1189 | rdma_start_port(device) + 1), | ||
1190 | GFP_KERNEL); | ||
1191 | if (!device->cache.pkey_cache || !device->cache.port_state_cache || | ||
1192 | !device->cache.lmc_cache) { | ||
1193 | err = -ENOMEM; | 1168 | err = -ENOMEM; |
1194 | goto free; | 1169 | goto out; |
1195 | } | 1170 | } |
1196 | 1171 | ||
1197 | err = gid_table_setup_one(device); | 1172 | err = gid_table_setup_one(device); |
1198 | if (err) | 1173 | if (err) |
1199 | goto free; | 1174 | goto out; |
1200 | 1175 | ||
1201 | for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) | 1176 | for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) |
1202 | ib_cache_update(device, p + rdma_start_port(device)); | 1177 | ib_cache_update(device, p + rdma_start_port(device)); |
@@ -1211,10 +1186,7 @@ int ib_cache_setup_one(struct ib_device *device) | |||
1211 | 1186 | ||
1212 | err: | 1187 | err: |
1213 | gid_table_cleanup_one(device); | 1188 | gid_table_cleanup_one(device); |
1214 | free: | 1189 | out: |
1215 | kfree(device->cache.pkey_cache); | ||
1216 | kfree(device->cache.lmc_cache); | ||
1217 | kfree(device->cache.port_state_cache); | ||
1218 | return err; | 1190 | return err; |
1219 | } | 1191 | } |
1220 | 1192 | ||
@@ -1228,15 +1200,11 @@ void ib_cache_release_one(struct ib_device *device) | |||
1228 | * all the device's resources when the cache could no | 1200 | * all the device's resources when the cache could no |
1229 | * longer be accessed. | 1201 | * longer be accessed. |
1230 | */ | 1202 | */ |
1231 | if (device->cache.pkey_cache) | 1203 | for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) |
1232 | for (p = 0; | 1204 | kfree(device->cache.ports[p].pkey); |
1233 | p <= rdma_end_port(device) - rdma_start_port(device); ++p) | ||
1234 | kfree(device->cache.pkey_cache[p]); | ||
1235 | 1205 | ||
1236 | gid_table_release_one(device); | 1206 | gid_table_release_one(device); |
1237 | kfree(device->cache.pkey_cache); | 1207 | kfree(device->cache.ports); |
1238 | kfree(device->cache.lmc_cache); | ||
1239 | kfree(device->cache.port_state_cache); | ||
1240 | } | 1208 | } |
1241 | 1209 | ||
1242 | void ib_cache_cleanup_one(struct ib_device *device) | 1210 | void ib_cache_cleanup_one(struct ib_device *device) |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index fafa988e0e9a..e55afec6bb84 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
@@ -1775,13 +1775,17 @@ enum ib_mad_result { | |||
1775 | 1775 | ||
1776 | #define IB_DEVICE_NAME_MAX 64 | 1776 | #define IB_DEVICE_NAME_MAX 64 |
1777 | 1777 | ||
1778 | struct ib_port_cache { | ||
1779 | struct ib_pkey_cache *pkey; | ||
1780 | struct ib_gid_table *gid; | ||
1781 | u8 lmc; | ||
1782 | enum ib_port_state port_state; | ||
1783 | }; | ||
1784 | |||
1778 | struct ib_cache { | 1785 | struct ib_cache { |
1779 | rwlock_t lock; | 1786 | rwlock_t lock; |
1780 | struct ib_event_handler event_handler; | 1787 | struct ib_event_handler event_handler; |
1781 | struct ib_pkey_cache **pkey_cache; | 1788 | struct ib_port_cache *ports; |
1782 | struct ib_gid_table **gid_cache; | ||
1783 | u8 *lmc_cache; | ||
1784 | enum ib_port_state *port_state_cache; | ||
1785 | }; | 1789 | }; |
1786 | 1790 | ||
1787 | struct ib_dma_mapping_ops { | 1791 | struct ib_dma_mapping_ops { |