diff options
34 files changed, 532 insertions, 350 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h index 0d13fe0a260b..3d6d9461c31d 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes.h +++ b/drivers/infiniband/hw/ehca/ehca_classes.h | |||
| @@ -160,6 +160,7 @@ struct ehca_qp { | |||
| 160 | }; | 160 | }; |
| 161 | u32 qp_type; | 161 | u32 qp_type; |
| 162 | enum ehca_ext_qp_type ext_type; | 162 | enum ehca_ext_qp_type ext_type; |
| 163 | enum ib_qp_state state; | ||
| 163 | struct ipz_queue ipz_squeue; | 164 | struct ipz_queue ipz_squeue; |
| 164 | struct ipz_queue ipz_rqueue; | 165 | struct ipz_queue ipz_rqueue; |
| 165 | struct h_galpas galpas; | 166 | struct h_galpas galpas; |
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index b5ca94c6b8d9..ca5eb0cb628c 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c | |||
| @@ -633,7 +633,7 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool) | |||
| 633 | unsigned long flags; | 633 | unsigned long flags; |
| 634 | 634 | ||
| 635 | WARN_ON_ONCE(!in_interrupt()); | 635 | WARN_ON_ONCE(!in_interrupt()); |
| 636 | if (ehca_debug_level) | 636 | if (ehca_debug_level >= 3) |
| 637 | ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); | 637 | ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); |
| 638 | 638 | ||
| 639 | spin_lock_irqsave(&pool->last_cpu_lock, flags); | 639 | spin_lock_irqsave(&pool->last_cpu_lock, flags); |
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index 65b3362cdb9b..65048976198c 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c | |||
| @@ -50,7 +50,7 @@ | |||
| 50 | #include "ehca_tools.h" | 50 | #include "ehca_tools.h" |
| 51 | #include "hcp_if.h" | 51 | #include "hcp_if.h" |
| 52 | 52 | ||
| 53 | #define HCAD_VERSION "0025" | 53 | #define HCAD_VERSION "0026" |
| 54 | 54 | ||
| 55 | MODULE_LICENSE("Dual BSD/GPL"); | 55 | MODULE_LICENSE("Dual BSD/GPL"); |
| 56 | MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); | 56 | MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); |
| @@ -60,7 +60,6 @@ MODULE_VERSION(HCAD_VERSION); | |||
| 60 | static int ehca_open_aqp1 = 0; | 60 | static int ehca_open_aqp1 = 0; |
| 61 | static int ehca_hw_level = 0; | 61 | static int ehca_hw_level = 0; |
| 62 | static int ehca_poll_all_eqs = 1; | 62 | static int ehca_poll_all_eqs = 1; |
| 63 | static int ehca_mr_largepage = 1; | ||
| 64 | 63 | ||
| 65 | int ehca_debug_level = 0; | 64 | int ehca_debug_level = 0; |
| 66 | int ehca_nr_ports = 2; | 65 | int ehca_nr_ports = 2; |
| @@ -70,45 +69,40 @@ int ehca_static_rate = -1; | |||
| 70 | int ehca_scaling_code = 0; | 69 | int ehca_scaling_code = 0; |
| 71 | int ehca_lock_hcalls = -1; | 70 | int ehca_lock_hcalls = -1; |
| 72 | 71 | ||
| 73 | module_param_named(open_aqp1, ehca_open_aqp1, int, S_IRUGO); | 72 | module_param_named(open_aqp1, ehca_open_aqp1, bool, S_IRUGO); |
| 74 | module_param_named(debug_level, ehca_debug_level, int, S_IRUGO); | 73 | module_param_named(debug_level, ehca_debug_level, int, S_IRUGO); |
| 75 | module_param_named(hw_level, ehca_hw_level, int, S_IRUGO); | 74 | module_param_named(hw_level, ehca_hw_level, int, S_IRUGO); |
| 76 | module_param_named(nr_ports, ehca_nr_ports, int, S_IRUGO); | 75 | module_param_named(nr_ports, ehca_nr_ports, int, S_IRUGO); |
| 77 | module_param_named(use_hp_mr, ehca_use_hp_mr, int, S_IRUGO); | 76 | module_param_named(use_hp_mr, ehca_use_hp_mr, bool, S_IRUGO); |
| 78 | module_param_named(port_act_time, ehca_port_act_time, int, S_IRUGO); | 77 | module_param_named(port_act_time, ehca_port_act_time, int, S_IRUGO); |
| 79 | module_param_named(poll_all_eqs, ehca_poll_all_eqs, int, S_IRUGO); | 78 | module_param_named(poll_all_eqs, ehca_poll_all_eqs, bool, S_IRUGO); |
| 80 | module_param_named(static_rate, ehca_static_rate, int, S_IRUGO); | 79 | module_param_named(static_rate, ehca_static_rate, int, S_IRUGO); |
| 81 | module_param_named(scaling_code, ehca_scaling_code, int, S_IRUGO); | 80 | module_param_named(scaling_code, ehca_scaling_code, bool, S_IRUGO); |
| 82 | module_param_named(mr_largepage, ehca_mr_largepage, int, S_IRUGO); | ||
| 83 | module_param_named(lock_hcalls, ehca_lock_hcalls, bool, S_IRUGO); | 81 | module_param_named(lock_hcalls, ehca_lock_hcalls, bool, S_IRUGO); |
| 84 | 82 | ||
| 85 | MODULE_PARM_DESC(open_aqp1, | 83 | MODULE_PARM_DESC(open_aqp1, |
| 86 | "AQP1 on startup (0: no (default), 1: yes)"); | 84 | "Open AQP1 on startup (default: no)"); |
| 87 | MODULE_PARM_DESC(debug_level, | 85 | MODULE_PARM_DESC(debug_level, |
| 88 | "debug level" | 86 | "Amount of debug output (0: none (default), 1: traces, " |
| 89 | " (0: no debug traces (default), 1: with debug traces)"); | 87 | "2: some dumps, 3: lots)"); |
| 90 | MODULE_PARM_DESC(hw_level, | 88 | MODULE_PARM_DESC(hw_level, |
| 91 | "hardware level" | 89 | "Hardware level (0: autosensing (default), " |
| 92 | " (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)"); | 90 | "0x10..0x14: eHCA, 0x20..0x23: eHCA2)"); |
| 93 | MODULE_PARM_DESC(nr_ports, | 91 | MODULE_PARM_DESC(nr_ports, |
| 94 | "number of connected ports (-1: autodetect, 1: port one only, " | 92 | "number of connected ports (-1: autodetect, 1: port one only, " |
| 95 | "2: two ports (default)"); | 93 | "2: two ports (default)"); |
| 96 | MODULE_PARM_DESC(use_hp_mr, | 94 | MODULE_PARM_DESC(use_hp_mr, |
| 97 | "high performance MRs (0: no (default), 1: yes)"); | 95 | "Use high performance MRs (default: no)"); |
| 98 | MODULE_PARM_DESC(port_act_time, | 96 | MODULE_PARM_DESC(port_act_time, |
| 99 | "time to wait for port activation (default: 30 sec)"); | 97 | "Time to wait for port activation (default: 30 sec)"); |
| 100 | MODULE_PARM_DESC(poll_all_eqs, | 98 | MODULE_PARM_DESC(poll_all_eqs, |
| 101 | "polls all event queues periodically" | 99 | "Poll all event queues periodically (default: yes)"); |
| 102 | " (0: no, 1: yes (default))"); | ||
| 103 | MODULE_PARM_DESC(static_rate, | 100 | MODULE_PARM_DESC(static_rate, |
| 104 | "set permanent static rate (default: disabled)"); | 101 | "Set permanent static rate (default: no static rate)"); |
| 105 | MODULE_PARM_DESC(scaling_code, | 102 | MODULE_PARM_DESC(scaling_code, |
| 106 | "set scaling code (0: disabled/default, 1: enabled)"); | 103 | "Enable scaling code (default: no)"); |
| 107 | MODULE_PARM_DESC(mr_largepage, | ||
| 108 | "use large page for MR (0: use PAGE_SIZE (default), " | ||
| 109 | "1: use large page depending on MR size"); | ||
| 110 | MODULE_PARM_DESC(lock_hcalls, | 104 | MODULE_PARM_DESC(lock_hcalls, |
| 111 | "serialize all hCalls made by the driver " | 105 | "Serialize all hCalls made by the driver " |
| 112 | "(default: autodetect)"); | 106 | "(default: autodetect)"); |
| 113 | 107 | ||
| 114 | DEFINE_RWLOCK(ehca_qp_idr_lock); | 108 | DEFINE_RWLOCK(ehca_qp_idr_lock); |
| @@ -275,6 +269,7 @@ static int ehca_sense_attributes(struct ehca_shca *shca) | |||
| 275 | u64 h_ret; | 269 | u64 h_ret; |
| 276 | struct hipz_query_hca *rblock; | 270 | struct hipz_query_hca *rblock; |
| 277 | struct hipz_query_port *port; | 271 | struct hipz_query_port *port; |
| 272 | const char *loc_code; | ||
| 278 | 273 | ||
| 279 | static const u32 pgsize_map[] = { | 274 | static const u32 pgsize_map[] = { |
| 280 | HCA_CAP_MR_PGSIZE_4K, 0x1000, | 275 | HCA_CAP_MR_PGSIZE_4K, 0x1000, |
| @@ -283,6 +278,12 @@ static int ehca_sense_attributes(struct ehca_shca *shca) | |||
| 283 | HCA_CAP_MR_PGSIZE_16M, 0x1000000, | 278 | HCA_CAP_MR_PGSIZE_16M, 0x1000000, |
| 284 | }; | 279 | }; |
| 285 | 280 | ||
| 281 | ehca_gen_dbg("Probing adapter %s...", | ||
| 282 | shca->ofdev->node->full_name); | ||
| 283 | loc_code = of_get_property(shca->ofdev->node, "ibm,loc-code", NULL); | ||
| 284 | if (loc_code) | ||
| 285 | ehca_gen_dbg(" ... location lode=%s", loc_code); | ||
| 286 | |||
| 286 | rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); | 287 | rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); |
| 287 | if (!rblock) { | 288 | if (!rblock) { |
| 288 | ehca_gen_err("Cannot allocate rblock memory."); | 289 | ehca_gen_err("Cannot allocate rblock memory."); |
| @@ -350,11 +351,9 @@ static int ehca_sense_attributes(struct ehca_shca *shca) | |||
| 350 | 351 | ||
| 351 | /* translate supported MR page sizes; always support 4K */ | 352 | /* translate supported MR page sizes; always support 4K */ |
| 352 | shca->hca_cap_mr_pgsize = EHCA_PAGESIZE; | 353 | shca->hca_cap_mr_pgsize = EHCA_PAGESIZE; |
| 353 | if (ehca_mr_largepage) { /* support extra sizes only if enabled */ | 354 | for (i = 0; i < ARRAY_SIZE(pgsize_map); i += 2) |
| 354 | for (i = 0; i < ARRAY_SIZE(pgsize_map); i += 2) | 355 | if (rblock->memory_page_size_supported & pgsize_map[i]) |
| 355 | if (rblock->memory_page_size_supported & pgsize_map[i]) | 356 | shca->hca_cap_mr_pgsize |= pgsize_map[i + 1]; |
| 356 | shca->hca_cap_mr_pgsize |= pgsize_map[i + 1]; | ||
| 357 | } | ||
| 358 | 357 | ||
| 359 | /* query max MTU from first port -- it's the same for all ports */ | 358 | /* query max MTU from first port -- it's the same for all ports */ |
| 360 | port = (struct hipz_query_port *)rblock; | 359 | port = (struct hipz_query_port *)rblock; |
| @@ -567,8 +566,7 @@ static int ehca_destroy_aqp1(struct ehca_sport *sport) | |||
| 567 | 566 | ||
| 568 | static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf) | 567 | static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf) |
| 569 | { | 568 | { |
| 570 | return snprintf(buf, PAGE_SIZE, "%d\n", | 569 | return snprintf(buf, PAGE_SIZE, "%d\n", ehca_debug_level); |
| 571 | ehca_debug_level); | ||
| 572 | } | 570 | } |
| 573 | 571 | ||
| 574 | static ssize_t ehca_store_debug_level(struct device_driver *ddp, | 572 | static ssize_t ehca_store_debug_level(struct device_driver *ddp, |
| @@ -657,14 +655,6 @@ static ssize_t ehca_show_adapter_handle(struct device *dev, | |||
| 657 | } | 655 | } |
| 658 | static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL); | 656 | static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL); |
| 659 | 657 | ||
| 660 | static ssize_t ehca_show_mr_largepage(struct device *dev, | ||
| 661 | struct device_attribute *attr, | ||
| 662 | char *buf) | ||
| 663 | { | ||
| 664 | return sprintf(buf, "%d\n", ehca_mr_largepage); | ||
| 665 | } | ||
| 666 | static DEVICE_ATTR(mr_largepage, S_IRUGO, ehca_show_mr_largepage, NULL); | ||
| 667 | |||
| 668 | static struct attribute *ehca_dev_attrs[] = { | 658 | static struct attribute *ehca_dev_attrs[] = { |
| 669 | &dev_attr_adapter_handle.attr, | 659 | &dev_attr_adapter_handle.attr, |
| 670 | &dev_attr_num_ports.attr, | 660 | &dev_attr_num_ports.attr, |
| @@ -681,7 +671,6 @@ static struct attribute *ehca_dev_attrs[] = { | |||
| 681 | &dev_attr_cur_mw.attr, | 671 | &dev_attr_cur_mw.attr, |
| 682 | &dev_attr_max_pd.attr, | 672 | &dev_attr_max_pd.attr, |
| 683 | &dev_attr_max_ah.attr, | 673 | &dev_attr_max_ah.attr, |
| 684 | &dev_attr_mr_largepage.attr, | ||
| 685 | NULL | 674 | NULL |
| 686 | }; | 675 | }; |
| 687 | 676 | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c index f26997fc00f8..46ae4eb2c4e1 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.c +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c | |||
| @@ -1794,8 +1794,9 @@ static int ehca_check_kpages_per_ate(struct scatterlist *page_list, | |||
| 1794 | int t; | 1794 | int t; |
| 1795 | for (t = start_idx; t <= end_idx; t++) { | 1795 | for (t = start_idx; t <= end_idx; t++) { |
| 1796 | u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT; | 1796 | u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT; |
| 1797 | ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr, | 1797 | if (ehca_debug_level >= 3) |
| 1798 | *(u64 *)abs_to_virt(phys_to_abs(pgaddr))); | 1798 | ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr, |
| 1799 | *(u64 *)abs_to_virt(phys_to_abs(pgaddr))); | ||
| 1799 | if (pgaddr - PAGE_SIZE != *prev_pgaddr) { | 1800 | if (pgaddr - PAGE_SIZE != *prev_pgaddr) { |
| 1800 | ehca_gen_err("uncontiguous page found pgaddr=%lx " | 1801 | ehca_gen_err("uncontiguous page found pgaddr=%lx " |
| 1801 | "prev_pgaddr=%lx page_list_i=%x", | 1802 | "prev_pgaddr=%lx page_list_i=%x", |
| @@ -1862,10 +1863,13 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo, | |||
| 1862 | pgaddr & | 1863 | pgaddr & |
| 1863 | ~(pginfo->hwpage_size - 1)); | 1864 | ~(pginfo->hwpage_size - 1)); |
| 1864 | } | 1865 | } |
| 1865 | ehca_gen_dbg("kpage=%lx chunk_page=%lx " | 1866 | if (ehca_debug_level >= 3) { |
| 1866 | "value=%016lx", *kpage, pgaddr, | 1867 | u64 val = *(u64 *)abs_to_virt( |
| 1867 | *(u64 *)abs_to_virt( | 1868 | phys_to_abs(pgaddr)); |
| 1868 | phys_to_abs(pgaddr))); | 1869 | ehca_gen_dbg("kpage=%lx chunk_page=%lx " |
| 1870 | "value=%016lx", | ||
| 1871 | *kpage, pgaddr, val); | ||
| 1872 | } | ||
| 1869 | prev_pgaddr = pgaddr; | 1873 | prev_pgaddr = pgaddr; |
| 1870 | i++; | 1874 | i++; |
| 1871 | pginfo->kpage_cnt++; | 1875 | pginfo->kpage_cnt++; |
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index 3eb14a52cbf2..57bef1152cc2 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c | |||
| @@ -550,6 +550,7 @@ static struct ehca_qp *internal_create_qp( | |||
| 550 | spin_lock_init(&my_qp->spinlock_r); | 550 | spin_lock_init(&my_qp->spinlock_r); |
| 551 | my_qp->qp_type = qp_type; | 551 | my_qp->qp_type = qp_type; |
| 552 | my_qp->ext_type = parms.ext_type; | 552 | my_qp->ext_type = parms.ext_type; |
| 553 | my_qp->state = IB_QPS_RESET; | ||
| 553 | 554 | ||
| 554 | if (init_attr->recv_cq) | 555 | if (init_attr->recv_cq) |
| 555 | my_qp->recv_cq = | 556 | my_qp->recv_cq = |
| @@ -965,7 +966,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca, | |||
| 965 | qp_num, bad_send_wqe_p); | 966 | qp_num, bad_send_wqe_p); |
| 966 | /* convert wqe pointer to vadr */ | 967 | /* convert wqe pointer to vadr */ |
| 967 | bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p); | 968 | bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p); |
| 968 | if (ehca_debug_level) | 969 | if (ehca_debug_level >= 2) |
| 969 | ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num); | 970 | ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num); |
| 970 | squeue = &my_qp->ipz_squeue; | 971 | squeue = &my_qp->ipz_squeue; |
| 971 | if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) { | 972 | if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) { |
| @@ -978,7 +979,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca, | |||
| 978 | wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs); | 979 | wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs); |
| 979 | *bad_wqe_cnt = 0; | 980 | *bad_wqe_cnt = 0; |
| 980 | while (wqe->optype != 0xff && wqe->wqef != 0xff) { | 981 | while (wqe->optype != 0xff && wqe->wqef != 0xff) { |
| 981 | if (ehca_debug_level) | 982 | if (ehca_debug_level >= 2) |
| 982 | ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num); | 983 | ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num); |
| 983 | wqe->nr_of_data_seg = 0; /* suppress data access */ | 984 | wqe->nr_of_data_seg = 0; /* suppress data access */ |
| 984 | wqe->wqef = WQEF_PURGE; /* WQE to be purged */ | 985 | wqe->wqef = WQEF_PURGE; /* WQE to be purged */ |
| @@ -1450,7 +1451,7 @@ static int internal_modify_qp(struct ib_qp *ibqp, | |||
| 1450 | /* no support for max_send/recv_sge yet */ | 1451 | /* no support for max_send/recv_sge yet */ |
| 1451 | } | 1452 | } |
| 1452 | 1453 | ||
| 1453 | if (ehca_debug_level) | 1454 | if (ehca_debug_level >= 2) |
| 1454 | ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num); | 1455 | ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num); |
| 1455 | 1456 | ||
| 1456 | h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, | 1457 | h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, |
| @@ -1508,6 +1509,8 @@ static int internal_modify_qp(struct ib_qp *ibqp, | |||
| 1508 | if (attr_mask & IB_QP_QKEY) | 1509 | if (attr_mask & IB_QP_QKEY) |
| 1509 | my_qp->qkey = attr->qkey; | 1510 | my_qp->qkey = attr->qkey; |
| 1510 | 1511 | ||
| 1512 | my_qp->state = qp_new_state; | ||
| 1513 | |||
| 1511 | modify_qp_exit2: | 1514 | modify_qp_exit2: |
| 1512 | if (squeue_locked) { /* this means: sqe -> rts */ | 1515 | if (squeue_locked) { /* this means: sqe -> rts */ |
| 1513 | spin_unlock_irqrestore(&my_qp->spinlock_s, flags); | 1516 | spin_unlock_irqrestore(&my_qp->spinlock_s, flags); |
| @@ -1763,7 +1766,7 @@ int ehca_query_qp(struct ib_qp *qp, | |||
| 1763 | if (qp_init_attr) | 1766 | if (qp_init_attr) |
| 1764 | *qp_init_attr = my_qp->init_attr; | 1767 | *qp_init_attr = my_qp->init_attr; |
| 1765 | 1768 | ||
| 1766 | if (ehca_debug_level) | 1769 | if (ehca_debug_level >= 2) |
| 1767 | ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num); | 1770 | ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num); |
| 1768 | 1771 | ||
| 1769 | query_qp_exit1: | 1772 | query_qp_exit1: |
| @@ -1811,7 +1814,7 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | |||
| 1811 | goto modify_srq_exit0; | 1814 | goto modify_srq_exit0; |
| 1812 | } | 1815 | } |
| 1813 | 1816 | ||
| 1814 | if (ehca_debug_level) | 1817 | if (ehca_debug_level >= 2) |
| 1815 | ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); | 1818 | ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); |
| 1816 | 1819 | ||
| 1817 | h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle, | 1820 | h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle, |
| @@ -1864,7 +1867,7 @@ int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr) | |||
| 1864 | srq_attr->srq_limit = EHCA_BMASK_GET( | 1867 | srq_attr->srq_limit = EHCA_BMASK_GET( |
| 1865 | MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit); | 1868 | MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit); |
| 1866 | 1869 | ||
| 1867 | if (ehca_debug_level) | 1870 | if (ehca_debug_level >= 2) |
| 1868 | ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); | 1871 | ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); |
| 1869 | 1872 | ||
| 1870 | query_srq_exit1: | 1873 | query_srq_exit1: |
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c index a20bbf466188..bbe0436f4f75 100644 --- a/drivers/infiniband/hw/ehca/ehca_reqs.c +++ b/drivers/infiniband/hw/ehca/ehca_reqs.c | |||
| @@ -81,7 +81,7 @@ static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue, | |||
| 81 | recv_wr->sg_list[cnt_ds].length; | 81 | recv_wr->sg_list[cnt_ds].length; |
| 82 | } | 82 | } |
| 83 | 83 | ||
| 84 | if (ehca_debug_level) { | 84 | if (ehca_debug_level >= 3) { |
| 85 | ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", | 85 | ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", |
| 86 | ipz_rqueue); | 86 | ipz_rqueue); |
| 87 | ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe"); | 87 | ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe"); |
| @@ -281,7 +281,7 @@ static inline int ehca_write_swqe(struct ehca_qp *qp, | |||
| 281 | return -EINVAL; | 281 | return -EINVAL; |
| 282 | } | 282 | } |
| 283 | 283 | ||
| 284 | if (ehca_debug_level) { | 284 | if (ehca_debug_level >= 3) { |
| 285 | ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp); | 285 | ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp); |
| 286 | ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe"); | 286 | ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe"); |
| 287 | } | 287 | } |
| @@ -421,6 +421,11 @@ int ehca_post_send(struct ib_qp *qp, | |||
| 421 | int ret = 0; | 421 | int ret = 0; |
| 422 | unsigned long flags; | 422 | unsigned long flags; |
| 423 | 423 | ||
| 424 | if (unlikely(my_qp->state != IB_QPS_RTS)) { | ||
| 425 | ehca_err(qp->device, "QP not in RTS state qpn=%x", qp->qp_num); | ||
| 426 | return -EINVAL; | ||
| 427 | } | ||
| 428 | |||
| 424 | /* LOCK the QUEUE */ | 429 | /* LOCK the QUEUE */ |
| 425 | spin_lock_irqsave(&my_qp->spinlock_s, flags); | 430 | spin_lock_irqsave(&my_qp->spinlock_s, flags); |
| 426 | 431 | ||
| @@ -454,13 +459,14 @@ int ehca_post_send(struct ib_qp *qp, | |||
| 454 | goto post_send_exit0; | 459 | goto post_send_exit0; |
| 455 | } | 460 | } |
| 456 | wqe_cnt++; | 461 | wqe_cnt++; |
| 457 | ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d", | ||
| 458 | my_qp, qp->qp_num, wqe_cnt); | ||
| 459 | } /* eof for cur_send_wr */ | 462 | } /* eof for cur_send_wr */ |
| 460 | 463 | ||
| 461 | post_send_exit0: | 464 | post_send_exit0: |
| 462 | iosync(); /* serialize GAL register access */ | 465 | iosync(); /* serialize GAL register access */ |
| 463 | hipz_update_sqa(my_qp, wqe_cnt); | 466 | hipz_update_sqa(my_qp, wqe_cnt); |
| 467 | if (unlikely(ret || ehca_debug_level >= 2)) | ||
| 468 | ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i", | ||
| 469 | my_qp, qp->qp_num, wqe_cnt, ret); | ||
| 464 | my_qp->message_count += wqe_cnt; | 470 | my_qp->message_count += wqe_cnt; |
| 465 | spin_unlock_irqrestore(&my_qp->spinlock_s, flags); | 471 | spin_unlock_irqrestore(&my_qp->spinlock_s, flags); |
| 466 | return ret; | 472 | return ret; |
| @@ -520,13 +526,14 @@ static int internal_post_recv(struct ehca_qp *my_qp, | |||
| 520 | goto post_recv_exit0; | 526 | goto post_recv_exit0; |
| 521 | } | 527 | } |
| 522 | wqe_cnt++; | 528 | wqe_cnt++; |
| 523 | ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d", | ||
| 524 | my_qp, my_qp->real_qp_num, wqe_cnt); | ||
| 525 | } /* eof for cur_recv_wr */ | 529 | } /* eof for cur_recv_wr */ |
| 526 | 530 | ||
| 527 | post_recv_exit0: | 531 | post_recv_exit0: |
| 528 | iosync(); /* serialize GAL register access */ | 532 | iosync(); /* serialize GAL register access */ |
| 529 | hipz_update_rqa(my_qp, wqe_cnt); | 533 | hipz_update_rqa(my_qp, wqe_cnt); |
| 534 | if (unlikely(ret || ehca_debug_level >= 2)) | ||
| 535 | ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i", | ||
| 536 | my_qp, my_qp->real_qp_num, wqe_cnt, ret); | ||
| 530 | spin_unlock_irqrestore(&my_qp->spinlock_r, flags); | 537 | spin_unlock_irqrestore(&my_qp->spinlock_r, flags); |
| 531 | return ret; | 538 | return ret; |
| 532 | } | 539 | } |
| @@ -570,16 +577,17 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc) | |||
| 570 | struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); | 577 | struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); |
| 571 | struct ehca_cqe *cqe; | 578 | struct ehca_cqe *cqe; |
| 572 | struct ehca_qp *my_qp; | 579 | struct ehca_qp *my_qp; |
| 573 | int cqe_count = 0; | 580 | int cqe_count = 0, is_error; |
| 574 | 581 | ||
| 575 | poll_cq_one_read_cqe: | 582 | poll_cq_one_read_cqe: |
| 576 | cqe = (struct ehca_cqe *) | 583 | cqe = (struct ehca_cqe *) |
| 577 | ipz_qeit_get_inc_valid(&my_cq->ipz_queue); | 584 | ipz_qeit_get_inc_valid(&my_cq->ipz_queue); |
| 578 | if (!cqe) { | 585 | if (!cqe) { |
| 579 | ret = -EAGAIN; | 586 | ret = -EAGAIN; |
| 580 | ehca_dbg(cq->device, "Completion queue is empty ehca_cq=%p " | 587 | if (ehca_debug_level >= 3) |
| 581 | "cq_num=%x ret=%i", my_cq, my_cq->cq_number, ret); | 588 | ehca_dbg(cq->device, "Completion queue is empty " |
| 582 | goto poll_cq_one_exit0; | 589 | "my_cq=%p cq_num=%x", my_cq, my_cq->cq_number); |
| 590 | goto poll_cq_one_exit0; | ||
| 583 | } | 591 | } |
| 584 | 592 | ||
| 585 | /* prevents loads being reordered across this point */ | 593 | /* prevents loads being reordered across this point */ |
| @@ -609,7 +617,7 @@ poll_cq_one_read_cqe: | |||
| 609 | ehca_dbg(cq->device, | 617 | ehca_dbg(cq->device, |
| 610 | "Got CQE with purged bit qp_num=%x src_qp=%x", | 618 | "Got CQE with purged bit qp_num=%x src_qp=%x", |
| 611 | cqe->local_qp_number, cqe->remote_qp_number); | 619 | cqe->local_qp_number, cqe->remote_qp_number); |
| 612 | if (ehca_debug_level) | 620 | if (ehca_debug_level >= 2) |
| 613 | ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x", | 621 | ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x", |
| 614 | cqe->local_qp_number, | 622 | cqe->local_qp_number, |
| 615 | cqe->remote_qp_number); | 623 | cqe->remote_qp_number); |
| @@ -622,11 +630,13 @@ poll_cq_one_read_cqe: | |||
| 622 | } | 630 | } |
| 623 | } | 631 | } |
| 624 | 632 | ||
| 625 | /* tracing cqe */ | 633 | is_error = cqe->status & WC_STATUS_ERROR_BIT; |
| 626 | if (unlikely(ehca_debug_level)) { | 634 | |
| 635 | /* trace error CQEs if debug_level >= 1, trace all CQEs if >= 3 */ | ||
| 636 | if (unlikely(ehca_debug_level >= 3 || (ehca_debug_level && is_error))) { | ||
| 627 | ehca_dbg(cq->device, | 637 | ehca_dbg(cq->device, |
| 628 | "Received COMPLETION ehca_cq=%p cq_num=%x -----", | 638 | "Received %sCOMPLETION ehca_cq=%p cq_num=%x -----", |
| 629 | my_cq, my_cq->cq_number); | 639 | is_error ? "ERROR " : "", my_cq, my_cq->cq_number); |
| 630 | ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x", | 640 | ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x", |
| 631 | my_cq, my_cq->cq_number); | 641 | my_cq, my_cq->cq_number); |
| 632 | ehca_dbg(cq->device, | 642 | ehca_dbg(cq->device, |
| @@ -649,8 +659,9 @@ poll_cq_one_read_cqe: | |||
| 649 | /* update also queue adder to throw away this entry!!! */ | 659 | /* update also queue adder to throw away this entry!!! */ |
| 650 | goto poll_cq_one_exit0; | 660 | goto poll_cq_one_exit0; |
| 651 | } | 661 | } |
| 662 | |||
| 652 | /* eval ib_wc_status */ | 663 | /* eval ib_wc_status */ |
| 653 | if (unlikely(cqe->status & WC_STATUS_ERROR_BIT)) { | 664 | if (unlikely(is_error)) { |
| 654 | /* complete with errors */ | 665 | /* complete with errors */ |
| 655 | map_ib_wc_status(cqe->status, &wc->status); | 666 | map_ib_wc_status(cqe->status, &wc->status); |
| 656 | wc->vendor_err = wc->status; | 667 | wc->vendor_err = wc->status; |
| @@ -671,14 +682,6 @@ poll_cq_one_read_cqe: | |||
| 671 | wc->imm_data = cpu_to_be32(cqe->immediate_data); | 682 | wc->imm_data = cpu_to_be32(cqe->immediate_data); |
| 672 | wc->sl = cqe->service_level; | 683 | wc->sl = cqe->service_level; |
| 673 | 684 | ||
| 674 | if (unlikely(wc->status != IB_WC_SUCCESS)) | ||
| 675 | ehca_dbg(cq->device, | ||
| 676 | "ehca_cq=%p cq_num=%x WARNING unsuccessful cqe " | ||
| 677 | "OPType=%x status=%x qp_num=%x src_qp=%x wr_id=%lx " | ||
| 678 | "cqe=%p", my_cq, my_cq->cq_number, cqe->optype, | ||
| 679 | cqe->status, cqe->local_qp_number, | ||
| 680 | cqe->remote_qp_number, cqe->work_request_id, cqe); | ||
| 681 | |||
| 682 | poll_cq_one_exit0: | 685 | poll_cq_one_exit0: |
| 683 | if (cqe_count > 0) | 686 | if (cqe_count > 0) |
| 684 | hipz_update_feca(my_cq, cqe_count); | 687 | hipz_update_feca(my_cq, cqe_count); |
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c index 1b07f2beafaf..e43ed8f8a0c8 100644 --- a/drivers/infiniband/hw/ehca/ehca_uverbs.c +++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c | |||
| @@ -211,8 +211,7 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp, | |||
| 211 | break; | 211 | break; |
| 212 | 212 | ||
| 213 | case 1: /* qp rqueue_addr */ | 213 | case 1: /* qp rqueue_addr */ |
| 214 | ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue", | 214 | ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp->ib_qp.qp_num); |
| 215 | qp->ib_qp.qp_num); | ||
| 216 | ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, | 215 | ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, |
| 217 | &qp->mm_count_rqueue); | 216 | &qp->mm_count_rqueue); |
| 218 | if (unlikely(ret)) { | 217 | if (unlikely(ret)) { |
| @@ -224,8 +223,7 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp, | |||
| 224 | break; | 223 | break; |
| 225 | 224 | ||
| 226 | case 2: /* qp squeue_addr */ | 225 | case 2: /* qp squeue_addr */ |
| 227 | ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue", | 226 | ehca_dbg(qp->ib_qp.device, "qp_num=%x sq", qp->ib_qp.qp_num); |
| 228 | qp->ib_qp.qp_num); | ||
| 229 | ret = ehca_mmap_queue(vma, &qp->ipz_squeue, | 227 | ret = ehca_mmap_queue(vma, &qp->ipz_squeue, |
| 230 | &qp->mm_count_squeue); | 228 | &qp->mm_count_squeue); |
| 231 | if (unlikely(ret)) { | 229 | if (unlikely(ret)) { |
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c index 7029aa653751..5245e13c3a30 100644 --- a/drivers/infiniband/hw/ehca/hcp_if.c +++ b/drivers/infiniband/hw/ehca/hcp_if.c | |||
| @@ -123,8 +123,9 @@ static long ehca_plpar_hcall_norets(unsigned long opcode, | |||
| 123 | int i, sleep_msecs; | 123 | int i, sleep_msecs; |
| 124 | unsigned long flags = 0; | 124 | unsigned long flags = 0; |
| 125 | 125 | ||
| 126 | ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT, | 126 | if (unlikely(ehca_debug_level >= 2)) |
| 127 | opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7); | 127 | ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT, |
| 128 | opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7); | ||
| 128 | 129 | ||
| 129 | for (i = 0; i < 5; i++) { | 130 | for (i = 0; i < 5; i++) { |
| 130 | /* serialize hCalls to work around firmware issue */ | 131 | /* serialize hCalls to work around firmware issue */ |
| @@ -148,7 +149,8 @@ static long ehca_plpar_hcall_norets(unsigned long opcode, | |||
| 148 | opcode, ret, arg1, arg2, arg3, | 149 | opcode, ret, arg1, arg2, arg3, |
| 149 | arg4, arg5, arg6, arg7); | 150 | arg4, arg5, arg6, arg7); |
| 150 | else | 151 | else |
| 151 | ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret); | 152 | if (unlikely(ehca_debug_level >= 2)) |
| 153 | ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret); | ||
| 152 | 154 | ||
| 153 | return ret; | 155 | return ret; |
| 154 | } | 156 | } |
| @@ -172,8 +174,10 @@ static long ehca_plpar_hcall9(unsigned long opcode, | |||
| 172 | int i, sleep_msecs; | 174 | int i, sleep_msecs; |
| 173 | unsigned long flags = 0; | 175 | unsigned long flags = 0; |
| 174 | 176 | ||
| 175 | ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode, | 177 | if (unlikely(ehca_debug_level >= 2)) |
| 176 | arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9); | 178 | ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode, |
| 179 | arg1, arg2, arg3, arg4, arg5, | ||
| 180 | arg6, arg7, arg8, arg9); | ||
| 177 | 181 | ||
| 178 | for (i = 0; i < 5; i++) { | 182 | for (i = 0; i < 5; i++) { |
| 179 | /* serialize hCalls to work around firmware issue */ | 183 | /* serialize hCalls to work around firmware issue */ |
| @@ -201,7 +205,7 @@ static long ehca_plpar_hcall9(unsigned long opcode, | |||
| 201 | ret, outs[0], outs[1], outs[2], outs[3], | 205 | ret, outs[0], outs[1], outs[2], outs[3], |
| 202 | outs[4], outs[5], outs[6], outs[7], | 206 | outs[4], outs[5], outs[6], outs[7], |
| 203 | outs[8]); | 207 | outs[8]); |
| 204 | } else | 208 | } else if (unlikely(ehca_debug_level >= 2)) |
| 205 | ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT, | 209 | ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT, |
| 206 | ret, outs[0], outs[1], outs[2], outs[3], | 210 | ret, outs[0], outs[1], outs[2], outs[3], |
| 207 | outs[4], outs[5], outs[6], outs[7], | 211 | outs[4], outs[5], outs[6], outs[7], |
| @@ -381,7 +385,7 @@ u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle, | |||
| 381 | r_cb, /* r6 */ | 385 | r_cb, /* r6 */ |
| 382 | 0, 0, 0, 0); | 386 | 0, 0, 0, 0); |
| 383 | 387 | ||
| 384 | if (ehca_debug_level) | 388 | if (ehca_debug_level >= 2) |
| 385 | ehca_dmp(query_port_response_block, 64, "response_block"); | 389 | ehca_dmp(query_port_response_block, 64, "response_block"); |
| 386 | 390 | ||
| 387 | return ret; | 391 | return ret; |
| @@ -731,9 +735,6 @@ u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle, | |||
| 731 | u64 ret; | 735 | u64 ret; |
| 732 | u64 outs[PLPAR_HCALL9_BUFSIZE]; | 736 | u64 outs[PLPAR_HCALL9_BUFSIZE]; |
| 733 | 737 | ||
| 734 | ehca_gen_dbg("kernel PAGE_SIZE=%x access_ctrl=%016x " | ||
| 735 | "vaddr=%lx length=%lx", | ||
| 736 | (u32)PAGE_SIZE, access_ctrl, vaddr, length); | ||
| 737 | ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, | 738 | ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, |
| 738 | adapter_handle.handle, /* r4 */ | 739 | adapter_handle.handle, /* r4 */ |
| 739 | 5, /* r5 */ | 740 | 5, /* r5 */ |
| @@ -758,7 +759,7 @@ u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle, | |||
| 758 | { | 759 | { |
| 759 | u64 ret; | 760 | u64 ret; |
| 760 | 761 | ||
| 761 | if (unlikely(ehca_debug_level >= 2)) { | 762 | if (unlikely(ehca_debug_level >= 3)) { |
| 762 | if (count > 1) { | 763 | if (count > 1) { |
| 763 | u64 *kpage; | 764 | u64 *kpage; |
| 764 | int i; | 765 | int i; |
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 3557e7edc9b6..5e570bb0bb6f 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c | |||
| @@ -204,7 +204,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector | |||
| 204 | 204 | ||
| 205 | uar = &to_mucontext(context)->uar; | 205 | uar = &to_mucontext(context)->uar; |
| 206 | } else { | 206 | } else { |
| 207 | err = mlx4_ib_db_alloc(dev, &cq->db, 1); | 207 | err = mlx4_db_alloc(dev->dev, &cq->db, 1); |
| 208 | if (err) | 208 | if (err) |
| 209 | goto err_cq; | 209 | goto err_cq; |
| 210 | 210 | ||
| @@ -250,7 +250,7 @@ err_mtt: | |||
| 250 | 250 | ||
| 251 | err_db: | 251 | err_db: |
| 252 | if (!context) | 252 | if (!context) |
| 253 | mlx4_ib_db_free(dev, &cq->db); | 253 | mlx4_db_free(dev->dev, &cq->db); |
| 254 | 254 | ||
| 255 | err_cq: | 255 | err_cq: |
| 256 | kfree(cq); | 256 | kfree(cq); |
| @@ -435,7 +435,7 @@ int mlx4_ib_destroy_cq(struct ib_cq *cq) | |||
| 435 | ib_umem_release(mcq->umem); | 435 | ib_umem_release(mcq->umem); |
| 436 | } else { | 436 | } else { |
| 437 | mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe + 1); | 437 | mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe + 1); |
| 438 | mlx4_ib_db_free(dev, &mcq->db); | 438 | mlx4_db_free(dev->dev, &mcq->db); |
| 439 | } | 439 | } |
| 440 | 440 | ||
| 441 | kfree(mcq); | 441 | kfree(mcq); |
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c index 1c36087aef14..8e342cc9baec 100644 --- a/drivers/infiniband/hw/mlx4/doorbell.c +++ b/drivers/infiniband/hw/mlx4/doorbell.c | |||
| @@ -34,124 +34,6 @@ | |||
| 34 | 34 | ||
| 35 | #include "mlx4_ib.h" | 35 | #include "mlx4_ib.h" |
| 36 | 36 | ||
| 37 | struct mlx4_ib_db_pgdir { | ||
| 38 | struct list_head list; | ||
| 39 | DECLARE_BITMAP(order0, MLX4_IB_DB_PER_PAGE); | ||
| 40 | DECLARE_BITMAP(order1, MLX4_IB_DB_PER_PAGE / 2); | ||
| 41 | unsigned long *bits[2]; | ||
| 42 | __be32 *db_page; | ||
| 43 | dma_addr_t db_dma; | ||
| 44 | }; | ||
| 45 | |||
| 46 | static struct mlx4_ib_db_pgdir *mlx4_ib_alloc_db_pgdir(struct mlx4_ib_dev *dev) | ||
| 47 | { | ||
| 48 | struct mlx4_ib_db_pgdir *pgdir; | ||
| 49 | |||
| 50 | pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL); | ||
| 51 | if (!pgdir) | ||
| 52 | return NULL; | ||
| 53 | |||
| 54 | bitmap_fill(pgdir->order1, MLX4_IB_DB_PER_PAGE / 2); | ||
| 55 | pgdir->bits[0] = pgdir->order0; | ||
| 56 | pgdir->bits[1] = pgdir->order1; | ||
| 57 | pgdir->db_page = dma_alloc_coherent(dev->ib_dev.dma_device, | ||
| 58 | PAGE_SIZE, &pgdir->db_dma, | ||
| 59 | GFP_KERNEL); | ||
| 60 | if (!pgdir->db_page) { | ||
| 61 | kfree(pgdir); | ||
| 62 | return NULL; | ||
| 63 | } | ||
| 64 | |||
| 65 | return pgdir; | ||
| 66 | } | ||
| 67 | |||
| 68 | static int mlx4_ib_alloc_db_from_pgdir(struct mlx4_ib_db_pgdir *pgdir, | ||
| 69 | struct mlx4_ib_db *db, int order) | ||
| 70 | { | ||
| 71 | int o; | ||
| 72 | int i; | ||
| 73 | |||
| 74 | for (o = order; o <= 1; ++o) { | ||
| 75 | i = find_first_bit(pgdir->bits[o], MLX4_IB_DB_PER_PAGE >> o); | ||
| 76 | if (i < MLX4_IB_DB_PER_PAGE >> o) | ||
| 77 | goto found; | ||
| 78 | } | ||
| 79 | |||
| 80 | return -ENOMEM; | ||
| 81 | |||
| 82 | found: | ||
| 83 | clear_bit(i, pgdir->bits[o]); | ||
| 84 | |||
| 85 | i <<= o; | ||
| 86 | |||
| 87 | if (o > order) | ||
| 88 | set_bit(i ^ 1, pgdir->bits[order]); | ||
| 89 | |||
| 90 | db->u.pgdir = pgdir; | ||
| 91 | db->index = i; | ||
| 92 | db->db = pgdir->db_page + db->index; | ||
| 93 | db->dma = pgdir->db_dma + db->index * 4; | ||
| 94 | db->order = order; | ||
| 95 | |||
| 96 | return 0; | ||
| 97 | } | ||
| 98 | |||
| 99 | int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order) | ||
| 100 | { | ||
| 101 | struct mlx4_ib_db_pgdir *pgdir; | ||
| 102 | int ret = 0; | ||
| 103 | |||
| 104 | mutex_lock(&dev->pgdir_mutex); | ||
| 105 | |||
| 106 | list_for_each_entry(pgdir, &dev->pgdir_list, list) | ||
| 107 | if (!mlx4_ib_alloc_db_from_pgdir(pgdir, db, order)) | ||
| 108 | goto out; | ||
| 109 | |||
| 110 | pgdir = mlx4_ib_alloc_db_pgdir(dev); | ||
| 111 | if (!pgdir) { | ||
| 112 | ret = -ENOMEM; | ||
| 113 | goto out; | ||
| 114 | } | ||
| 115 | |||
| 116 | list_add(&pgdir->list, &dev->pgdir_list); | ||
| 117 | |||
| 118 | /* This should never fail -- we just allocated an empty page: */ | ||
| 119 | WARN_ON(mlx4_ib_alloc_db_from_pgdir(pgdir, db, order)); | ||
| 120 | |||
| 121 | out: | ||
| 122 | mutex_unlock(&dev->pgdir_mutex); | ||
| 123 | |||
| 124 | return ret; | ||
| 125 | } | ||
| 126 | |||
| 127 | void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db) | ||
| 128 | { | ||
| 129 | int o; | ||
| 130 | int i; | ||
| 131 | |||
| 132 | mutex_lock(&dev->pgdir_mutex); | ||
| 133 | |||
| 134 | o = db->order; | ||
| 135 | i = db->index; | ||
| 136 | |||
| 137 | if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) { | ||
| 138 | clear_bit(i ^ 1, db->u.pgdir->order0); | ||
| 139 | ++o; | ||
| 140 | } | ||
| 141 | |||
| 142 | i >>= o; | ||
| 143 | set_bit(i, db->u.pgdir->bits[o]); | ||
| 144 | |||
| 145 | if (bitmap_full(db->u.pgdir->order1, MLX4_IB_DB_PER_PAGE / 2)) { | ||
| 146 | dma_free_coherent(dev->ib_dev.dma_device, PAGE_SIZE, | ||
| 147 | db->u.pgdir->db_page, db->u.pgdir->db_dma); | ||
| 148 | list_del(&db->u.pgdir->list); | ||
| 149 | kfree(db->u.pgdir); | ||
| 150 | } | ||
| 151 | |||
| 152 | mutex_unlock(&dev->pgdir_mutex); | ||
| 153 | } | ||
| 154 | |||
| 155 | struct mlx4_ib_user_db_page { | 37 | struct mlx4_ib_user_db_page { |
| 156 | struct list_head list; | 38 | struct list_head list; |
| 157 | struct ib_umem *umem; | 39 | struct ib_umem *umem; |
| @@ -160,7 +42,7 @@ struct mlx4_ib_user_db_page { | |||
| 160 | }; | 42 | }; |
| 161 | 43 | ||
| 162 | int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, | 44 | int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, |
| 163 | struct mlx4_ib_db *db) | 45 | struct mlx4_db *db) |
| 164 | { | 46 | { |
| 165 | struct mlx4_ib_user_db_page *page; | 47 | struct mlx4_ib_user_db_page *page; |
| 166 | struct ib_umem_chunk *chunk; | 48 | struct ib_umem_chunk *chunk; |
| @@ -202,7 +84,7 @@ out: | |||
| 202 | return err; | 84 | return err; |
| 203 | } | 85 | } |
| 204 | 86 | ||
| 205 | void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db) | 87 | void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db) |
| 206 | { | 88 | { |
| 207 | mutex_lock(&context->db_page_mutex); | 89 | mutex_lock(&context->db_page_mutex); |
| 208 | 90 | ||
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 4d9b5ac42202..4d61e32866c6 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
| @@ -557,9 +557,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
| 557 | goto err_uar; | 557 | goto err_uar; |
| 558 | MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); | 558 | MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); |
| 559 | 559 | ||
| 560 | INIT_LIST_HEAD(&ibdev->pgdir_list); | ||
| 561 | mutex_init(&ibdev->pgdir_mutex); | ||
| 562 | |||
| 563 | ibdev->dev = dev; | 560 | ibdev->dev = dev; |
| 564 | 561 | ||
| 565 | strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); | 562 | strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 9e637323c155..5cf994794d25 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
| @@ -43,24 +43,6 @@ | |||
| 43 | #include <linux/mlx4/device.h> | 43 | #include <linux/mlx4/device.h> |
| 44 | #include <linux/mlx4/doorbell.h> | 44 | #include <linux/mlx4/doorbell.h> |
| 45 | 45 | ||
| 46 | enum { | ||
| 47 | MLX4_IB_DB_PER_PAGE = PAGE_SIZE / 4 | ||
| 48 | }; | ||
| 49 | |||
| 50 | struct mlx4_ib_db_pgdir; | ||
| 51 | struct mlx4_ib_user_db_page; | ||
| 52 | |||
| 53 | struct mlx4_ib_db { | ||
| 54 | __be32 *db; | ||
| 55 | union { | ||
| 56 | struct mlx4_ib_db_pgdir *pgdir; | ||
| 57 | struct mlx4_ib_user_db_page *user_page; | ||
| 58 | } u; | ||
| 59 | dma_addr_t dma; | ||
| 60 | int index; | ||
| 61 | int order; | ||
| 62 | }; | ||
| 63 | |||
| 64 | struct mlx4_ib_ucontext { | 46 | struct mlx4_ib_ucontext { |
| 65 | struct ib_ucontext ibucontext; | 47 | struct ib_ucontext ibucontext; |
| 66 | struct mlx4_uar uar; | 48 | struct mlx4_uar uar; |
| @@ -88,7 +70,7 @@ struct mlx4_ib_cq { | |||
| 88 | struct mlx4_cq mcq; | 70 | struct mlx4_cq mcq; |
| 89 | struct mlx4_ib_cq_buf buf; | 71 | struct mlx4_ib_cq_buf buf; |
| 90 | struct mlx4_ib_cq_resize *resize_buf; | 72 | struct mlx4_ib_cq_resize *resize_buf; |
| 91 | struct mlx4_ib_db db; | 73 | struct mlx4_db db; |
| 92 | spinlock_t lock; | 74 | spinlock_t lock; |
| 93 | struct mutex resize_mutex; | 75 | struct mutex resize_mutex; |
| 94 | struct ib_umem *umem; | 76 | struct ib_umem *umem; |
| @@ -127,7 +109,7 @@ struct mlx4_ib_qp { | |||
| 127 | struct mlx4_qp mqp; | 109 | struct mlx4_qp mqp; |
| 128 | struct mlx4_buf buf; | 110 | struct mlx4_buf buf; |
| 129 | 111 | ||
| 130 | struct mlx4_ib_db db; | 112 | struct mlx4_db db; |
| 131 | struct mlx4_ib_wq rq; | 113 | struct mlx4_ib_wq rq; |
| 132 | 114 | ||
| 133 | u32 doorbell_qpn; | 115 | u32 doorbell_qpn; |
| @@ -154,7 +136,7 @@ struct mlx4_ib_srq { | |||
| 154 | struct ib_srq ibsrq; | 136 | struct ib_srq ibsrq; |
| 155 | struct mlx4_srq msrq; | 137 | struct mlx4_srq msrq; |
| 156 | struct mlx4_buf buf; | 138 | struct mlx4_buf buf; |
| 157 | struct mlx4_ib_db db; | 139 | struct mlx4_db db; |
| 158 | u64 *wrid; | 140 | u64 *wrid; |
| 159 | spinlock_t lock; | 141 | spinlock_t lock; |
| 160 | int head; | 142 | int head; |
| @@ -175,9 +157,6 @@ struct mlx4_ib_dev { | |||
| 175 | struct mlx4_dev *dev; | 157 | struct mlx4_dev *dev; |
| 176 | void __iomem *uar_map; | 158 | void __iomem *uar_map; |
| 177 | 159 | ||
| 178 | struct list_head pgdir_list; | ||
| 179 | struct mutex pgdir_mutex; | ||
| 180 | |||
| 181 | struct mlx4_uar priv_uar; | 160 | struct mlx4_uar priv_uar; |
| 182 | u32 priv_pdn; | 161 | u32 priv_pdn; |
| 183 | MLX4_DECLARE_DOORBELL_LOCK(uar_lock); | 162 | MLX4_DECLARE_DOORBELL_LOCK(uar_lock); |
| @@ -248,11 +227,9 @@ static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah) | |||
| 248 | return container_of(ibah, struct mlx4_ib_ah, ibah); | 227 | return container_of(ibah, struct mlx4_ib_ah, ibah); |
| 249 | } | 228 | } |
| 250 | 229 | ||
| 251 | int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order); | ||
| 252 | void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db); | ||
| 253 | int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, | 230 | int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, |
| 254 | struct mlx4_ib_db *db); | 231 | struct mlx4_db *db); |
| 255 | void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db); | 232 | void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db); |
| 256 | 233 | ||
| 257 | struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc); | 234 | struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc); |
| 258 | int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, | 235 | int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index b75efae7e449..80ea8b9e7761 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
| @@ -514,7 +514,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
| 514 | goto err; | 514 | goto err; |
| 515 | 515 | ||
| 516 | if (!init_attr->srq) { | 516 | if (!init_attr->srq) { |
| 517 | err = mlx4_ib_db_alloc(dev, &qp->db, 0); | 517 | err = mlx4_db_alloc(dev->dev, &qp->db, 0); |
| 518 | if (err) | 518 | if (err) |
| 519 | goto err; | 519 | goto err; |
| 520 | 520 | ||
| @@ -580,7 +580,7 @@ err_buf: | |||
| 580 | 580 | ||
| 581 | err_db: | 581 | err_db: |
| 582 | if (!pd->uobject && !init_attr->srq) | 582 | if (!pd->uobject && !init_attr->srq) |
| 583 | mlx4_ib_db_free(dev, &qp->db); | 583 | mlx4_db_free(dev->dev, &qp->db); |
| 584 | 584 | ||
| 585 | err: | 585 | err: |
| 586 | return err; | 586 | return err; |
| @@ -666,7 +666,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, | |||
| 666 | kfree(qp->rq.wrid); | 666 | kfree(qp->rq.wrid); |
| 667 | mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); | 667 | mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); |
| 668 | if (!qp->ibqp.srq) | 668 | if (!qp->ibqp.srq) |
| 669 | mlx4_ib_db_free(dev, &qp->db); | 669 | mlx4_db_free(dev->dev, &qp->db); |
| 670 | } | 670 | } |
| 671 | } | 671 | } |
| 672 | 672 | ||
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index beaa3b06cf58..204619702f9d 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c | |||
| @@ -129,7 +129,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, | |||
| 129 | if (err) | 129 | if (err) |
| 130 | goto err_mtt; | 130 | goto err_mtt; |
| 131 | } else { | 131 | } else { |
| 132 | err = mlx4_ib_db_alloc(dev, &srq->db, 0); | 132 | err = mlx4_db_alloc(dev->dev, &srq->db, 0); |
| 133 | if (err) | 133 | if (err) |
| 134 | goto err_srq; | 134 | goto err_srq; |
| 135 | 135 | ||
| @@ -200,7 +200,7 @@ err_buf: | |||
| 200 | 200 | ||
| 201 | err_db: | 201 | err_db: |
| 202 | if (!pd->uobject) | 202 | if (!pd->uobject) |
| 203 | mlx4_ib_db_free(dev, &srq->db); | 203 | mlx4_db_free(dev->dev, &srq->db); |
| 204 | 204 | ||
| 205 | err_srq: | 205 | err_srq: |
| 206 | kfree(srq); | 206 | kfree(srq); |
| @@ -267,7 +267,7 @@ int mlx4_ib_destroy_srq(struct ib_srq *srq) | |||
| 267 | kfree(msrq->wrid); | 267 | kfree(msrq->wrid); |
| 268 | mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift, | 268 | mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift, |
| 269 | &msrq->buf); | 269 | &msrq->buf); |
| 270 | mlx4_ib_db_free(dev, &msrq->db); | 270 | mlx4_db_free(dev->dev, &msrq->db); |
| 271 | } | 271 | } |
| 272 | 272 | ||
| 273 | kfree(msrq); | 273 | kfree(msrq); |
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index b046262ed638..a4e9269a29bd 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c | |||
| @@ -139,8 +139,9 @@ static int nes_inetaddr_event(struct notifier_block *notifier, | |||
| 139 | 139 | ||
| 140 | addr = ntohl(ifa->ifa_address); | 140 | addr = ntohl(ifa->ifa_address); |
| 141 | mask = ntohl(ifa->ifa_mask); | 141 | mask = ntohl(ifa->ifa_mask); |
| 142 | nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address %08X, netmask %08X.\n", | 142 | nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address " NIPQUAD_FMT |
| 143 | addr, mask); | 143 | ", netmask " NIPQUAD_FMT ".\n", |
| 144 | HIPQUAD(addr), HIPQUAD(mask)); | ||
| 144 | list_for_each_entry(nesdev, &nes_dev_list, list) { | 145 | list_for_each_entry(nesdev, &nes_dev_list, list) { |
| 145 | nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p. (%s)\n", | 146 | nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p. (%s)\n", |
| 146 | nesdev, nesdev->netdev[0]->name); | 147 | nesdev, nesdev->netdev[0]->name); |
| @@ -353,13 +354,11 @@ struct ib_qp *nes_get_qp(struct ib_device *device, int qpn) | |||
| 353 | */ | 354 | */ |
| 354 | static void nes_print_macaddr(struct net_device *netdev) | 355 | static void nes_print_macaddr(struct net_device *netdev) |
| 355 | { | 356 | { |
| 356 | nes_debug(NES_DBG_INIT, "%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, IRQ %u\n", | 357 | DECLARE_MAC_BUF(mac); |
| 357 | netdev->name, | ||
| 358 | netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], | ||
| 359 | netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5], | ||
| 360 | netdev->irq); | ||
| 361 | } | ||
| 362 | 358 | ||
| 359 | nes_debug(NES_DBG_INIT, "%s: %s, IRQ %u\n", | ||
| 360 | netdev->name, print_mac(mac, netdev->dev_addr), netdev->irq); | ||
| 361 | } | ||
| 363 | 362 | ||
| 364 | /** | 363 | /** |
| 365 | * nes_interrupt - handle interrupts | 364 | * nes_interrupt - handle interrupts |
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index d0738623bcf3..d940fc27129a 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
| @@ -852,8 +852,8 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core, | |||
| 852 | /* get a handle on the hte */ | 852 | /* get a handle on the hte */ |
| 853 | hte = &cm_core->connected_nodes; | 853 | hte = &cm_core->connected_nodes; |
| 854 | 854 | ||
| 855 | nes_debug(NES_DBG_CM, "Searching for an owner node:%x:%x from core %p->%p\n", | 855 | nes_debug(NES_DBG_CM, "Searching for an owner node: " NIPQUAD_FMT ":%x from core %p->%p\n", |
| 856 | loc_addr, loc_port, cm_core, hte); | 856 | HIPQUAD(loc_addr), loc_port, cm_core, hte); |
| 857 | 857 | ||
| 858 | /* walk list and find cm_node associated with this session ID */ | 858 | /* walk list and find cm_node associated with this session ID */ |
| 859 | spin_lock_irqsave(&cm_core->ht_lock, flags); | 859 | spin_lock_irqsave(&cm_core->ht_lock, flags); |
| @@ -902,8 +902,8 @@ static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core, | |||
| 902 | } | 902 | } |
| 903 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); | 903 | spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); |
| 904 | 904 | ||
| 905 | nes_debug(NES_DBG_CM, "Unable to find listener- %x:%x\n", | 905 | nes_debug(NES_DBG_CM, "Unable to find listener for " NIPQUAD_FMT ":%x\n", |
| 906 | dst_addr, dst_port); | 906 | HIPQUAD(dst_addr), dst_port); |
| 907 | 907 | ||
| 908 | /* no listener */ | 908 | /* no listener */ |
| 909 | return NULL; | 909 | return NULL; |
| @@ -1054,6 +1054,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, | |||
| 1054 | int arpindex = 0; | 1054 | int arpindex = 0; |
| 1055 | struct nes_device *nesdev; | 1055 | struct nes_device *nesdev; |
| 1056 | struct nes_adapter *nesadapter; | 1056 | struct nes_adapter *nesadapter; |
| 1057 | DECLARE_MAC_BUF(mac); | ||
| 1057 | 1058 | ||
| 1058 | /* create an hte and cm_node for this instance */ | 1059 | /* create an hte and cm_node for this instance */ |
| 1059 | cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC); | 1060 | cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC); |
| @@ -1066,8 +1067,9 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, | |||
| 1066 | cm_node->loc_port = cm_info->loc_port; | 1067 | cm_node->loc_port = cm_info->loc_port; |
| 1067 | cm_node->rem_port = cm_info->rem_port; | 1068 | cm_node->rem_port = cm_info->rem_port; |
| 1068 | cm_node->send_write0 = send_first; | 1069 | cm_node->send_write0 = send_first; |
| 1069 | nes_debug(NES_DBG_CM, "Make node addresses : loc = %x:%x, rem = %x:%x\n", | 1070 | nes_debug(NES_DBG_CM, "Make node addresses : loc = " NIPQUAD_FMT ":%x, rem = " NIPQUAD_FMT ":%x\n", |
| 1070 | cm_node->loc_addr, cm_node->loc_port, cm_node->rem_addr, cm_node->rem_port); | 1071 | HIPQUAD(cm_node->loc_addr), cm_node->loc_port, |
| 1072 | HIPQUAD(cm_node->rem_addr), cm_node->rem_port); | ||
| 1071 | cm_node->listener = listener; | 1073 | cm_node->listener = listener; |
| 1072 | cm_node->netdev = nesvnic->netdev; | 1074 | cm_node->netdev = nesvnic->netdev; |
| 1073 | cm_node->cm_id = cm_info->cm_id; | 1075 | cm_node->cm_id = cm_info->cm_id; |
| @@ -1116,11 +1118,8 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, | |||
| 1116 | 1118 | ||
| 1117 | /* copy the mac addr to node context */ | 1119 | /* copy the mac addr to node context */ |
| 1118 | memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN); | 1120 | memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN); |
| 1119 | nes_debug(NES_DBG_CM, "Remote mac addr from arp table:%02x," | 1121 | nes_debug(NES_DBG_CM, "Remote mac addr from arp table: %s\n", |
| 1120 | " %02x, %02x, %02x, %02x, %02x\n", | 1122 | print_mac(mac, cm_node->rem_mac)); |
| 1121 | cm_node->rem_mac[0], cm_node->rem_mac[1], | ||
| 1122 | cm_node->rem_mac[2], cm_node->rem_mac[3], | ||
| 1123 | cm_node->rem_mac[4], cm_node->rem_mac[5]); | ||
| 1124 | 1123 | ||
| 1125 | add_hte_node(cm_core, cm_node); | 1124 | add_hte_node(cm_core, cm_node); |
| 1126 | atomic_inc(&cm_nodes_created); | 1125 | atomic_inc(&cm_nodes_created); |
| @@ -1850,8 +1849,10 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvni | |||
| 1850 | nfo.rem_addr = ntohl(iph->saddr); | 1849 | nfo.rem_addr = ntohl(iph->saddr); |
| 1851 | nfo.rem_port = ntohs(tcph->source); | 1850 | nfo.rem_port = ntohs(tcph->source); |
| 1852 | 1851 | ||
| 1853 | nes_debug(NES_DBG_CM, "Received packet: dest=0x%08X:0x%04X src=0x%08X:0x%04X\n", | 1852 | nes_debug(NES_DBG_CM, "Received packet: dest=" NIPQUAD_FMT |
| 1854 | iph->daddr, tcph->dest, iph->saddr, tcph->source); | 1853 | ":0x%04X src=" NIPQUAD_FMT ":0x%04X\n", |
| 1854 | NIPQUAD(iph->daddr), tcph->dest, | ||
| 1855 | NIPQUAD(iph->saddr), tcph->source); | ||
| 1855 | 1856 | ||
| 1856 | /* note: this call is going to increment cm_node ref count */ | 1857 | /* note: this call is going to increment cm_node ref count */ |
| 1857 | cm_node = find_node(cm_core, | 1858 | cm_node = find_node(cm_core, |
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index aa53aab91bf8..08964cc7e98a 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c | |||
| @@ -636,6 +636,15 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_ | |||
| 636 | nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n"); | 636 | nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n"); |
| 637 | return 0; | 637 | return 0; |
| 638 | } | 638 | } |
| 639 | |||
| 640 | i = 0; | ||
| 641 | while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000) | ||
| 642 | mdelay(1); | ||
| 643 | if (i >= 10000) { | ||
| 644 | printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n", | ||
| 645 | nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS)); | ||
| 646 | return 0; | ||
| 647 | } | ||
| 639 | } | 648 | } |
| 640 | 649 | ||
| 641 | /* port reset */ | 650 | /* port reset */ |
| @@ -684,17 +693,6 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_ | |||
| 684 | } | 693 | } |
| 685 | } | 694 | } |
| 686 | 695 | ||
| 687 | |||
| 688 | |||
| 689 | i = 0; | ||
| 690 | while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000) | ||
| 691 | mdelay(1); | ||
| 692 | if (i >= 10000) { | ||
| 693 | printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n", | ||
| 694 | nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS)); | ||
| 695 | return 0; | ||
| 696 | } | ||
| 697 | |||
| 698 | return port_count; | 696 | return port_count; |
| 699 | } | 697 | } |
| 700 | 698 | ||
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h index b7e2844f096b..8f36e231bdf5 100644 --- a/drivers/infiniband/hw/nes/nes_hw.h +++ b/drivers/infiniband/hw/nes/nes_hw.h | |||
| @@ -905,7 +905,7 @@ struct nes_hw_qp { | |||
| 905 | }; | 905 | }; |
| 906 | 906 | ||
| 907 | struct nes_hw_cq { | 907 | struct nes_hw_cq { |
| 908 | struct nes_hw_cqe volatile *cq_vbase; /* PCI memory for host rings */ | 908 | struct nes_hw_cqe *cq_vbase; /* PCI memory for host rings */ |
| 909 | void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_cq *cq); | 909 | void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_cq *cq); |
| 910 | dma_addr_t cq_pbase; /* PCI memory for host rings */ | 910 | dma_addr_t cq_pbase; /* PCI memory for host rings */ |
| 911 | u16 cq_head; | 911 | u16 cq_head; |
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 01cd0effc492..e5366b013c1a 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c | |||
| @@ -787,16 +787,14 @@ static int nes_netdev_set_mac_address(struct net_device *netdev, void *p) | |||
| 787 | int i; | 787 | int i; |
| 788 | u32 macaddr_low; | 788 | u32 macaddr_low; |
| 789 | u16 macaddr_high; | 789 | u16 macaddr_high; |
| 790 | DECLARE_MAC_BUF(mac); | ||
| 790 | 791 | ||
| 791 | if (!is_valid_ether_addr(mac_addr->sa_data)) | 792 | if (!is_valid_ether_addr(mac_addr->sa_data)) |
| 792 | return -EADDRNOTAVAIL; | 793 | return -EADDRNOTAVAIL; |
| 793 | 794 | ||
| 794 | memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len); | 795 | memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len); |
| 795 | printk(PFX "%s: Address length = %d, Address = %02X%02X%02X%02X%02X%02X..\n", | 796 | printk(PFX "%s: Address length = %d, Address = %s\n", |
| 796 | __func__, netdev->addr_len, | 797 | __func__, netdev->addr_len, print_mac(mac, mac_addr->sa_data)); |
| 797 | mac_addr->sa_data[0], mac_addr->sa_data[1], | ||
| 798 | mac_addr->sa_data[2], mac_addr->sa_data[3], | ||
| 799 | mac_addr->sa_data[4], mac_addr->sa_data[5]); | ||
| 800 | macaddr_high = ((u16)netdev->dev_addr[0]) << 8; | 798 | macaddr_high = ((u16)netdev->dev_addr[0]) << 8; |
| 801 | macaddr_high += (u16)netdev->dev_addr[1]; | 799 | macaddr_high += (u16)netdev->dev_addr[1]; |
| 802 | macaddr_low = ((u32)netdev->dev_addr[2]) << 24; | 800 | macaddr_low = ((u32)netdev->dev_addr[2]) << 24; |
| @@ -878,11 +876,11 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev) | |||
| 878 | if (mc_nic_index < 0) | 876 | if (mc_nic_index < 0) |
| 879 | mc_nic_index = nesvnic->nic_index; | 877 | mc_nic_index = nesvnic->nic_index; |
| 880 | if (multicast_addr) { | 878 | if (multicast_addr) { |
| 881 | nes_debug(NES_DBG_NIC_RX, "Assigning MC Address = %02X%02X%02X%02X%02X%02X to register 0x%04X nic_idx=%d\n", | 879 | DECLARE_MAC_BUF(mac); |
| 882 | multicast_addr->dmi_addr[0], multicast_addr->dmi_addr[1], | 880 | nes_debug(NES_DBG_NIC_RX, "Assigning MC Address %s to register 0x%04X nic_idx=%d\n", |
| 883 | multicast_addr->dmi_addr[2], multicast_addr->dmi_addr[3], | 881 | print_mac(mac, multicast_addr->dmi_addr), |
| 884 | multicast_addr->dmi_addr[4], multicast_addr->dmi_addr[5], | 882 | perfect_filter_register_address+(mc_index * 8), |
| 885 | perfect_filter_register_address+(mc_index * 8), mc_nic_index); | 883 | mc_nic_index); |
| 886 | macaddr_high = ((u16)multicast_addr->dmi_addr[0]) << 8; | 884 | macaddr_high = ((u16)multicast_addr->dmi_addr[0]) << 8; |
| 887 | macaddr_high += (u16)multicast_addr->dmi_addr[1]; | 885 | macaddr_high += (u16)multicast_addr->dmi_addr[1]; |
| 888 | macaddr_low = ((u32)multicast_addr->dmi_addr[2]) << 24; | 886 | macaddr_low = ((u32)multicast_addr->dmi_addr[2]) << 24; |
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c index f9db07c2717d..c6d5631a6995 100644 --- a/drivers/infiniband/hw/nes/nes_utils.c +++ b/drivers/infiniband/hw/nes/nes_utils.c | |||
| @@ -660,7 +660,9 @@ int nes_arp_table(struct nes_device *nesdev, u32 ip_addr, u8 *mac_addr, u32 acti | |||
| 660 | 660 | ||
| 661 | /* DELETE or RESOLVE */ | 661 | /* DELETE or RESOLVE */ |
| 662 | if (arp_index == nesadapter->arp_table_size) { | 662 | if (arp_index == nesadapter->arp_table_size) { |
| 663 | nes_debug(NES_DBG_NETDEV, "mac address not in ARP table - cannot delete or resolve\n"); | 663 | nes_debug(NES_DBG_NETDEV, "MAC for " NIPQUAD_FMT " not in ARP table - cannot %s\n", |
| 664 | HIPQUAD(ip_addr), | ||
| 665 | action == NES_ARP_RESOLVE ? "resolve" : "delete"); | ||
| 664 | return -1; | 666 | return -1; |
| 665 | } | 667 | } |
| 666 | 668 | ||
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index f9a5d4390892..ee74f7c7a6da 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
| @@ -1976,7 +1976,7 @@ static int nes_destroy_cq(struct ib_cq *ib_cq) | |||
| 1976 | 1976 | ||
| 1977 | if (nescq->cq_mem_size) | 1977 | if (nescq->cq_mem_size) |
| 1978 | pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, | 1978 | pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, |
| 1979 | (void *)nescq->hw_cq.cq_vbase, nescq->hw_cq.cq_pbase); | 1979 | nescq->hw_cq.cq_vbase, nescq->hw_cq.cq_pbase); |
| 1980 | kfree(nescq); | 1980 | kfree(nescq); |
| 1981 | 1981 | ||
| 1982 | return ret; | 1982 | return ret; |
| @@ -3610,6 +3610,12 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) | |||
| 3610 | while (cqe_count < num_entries) { | 3610 | while (cqe_count < num_entries) { |
| 3611 | if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) & | 3611 | if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) & |
| 3612 | NES_CQE_VALID) { | 3612 | NES_CQE_VALID) { |
| 3613 | /* | ||
| 3614 | * Make sure we read CQ entry contents *after* | ||
| 3615 | * we've checked the valid bit. | ||
| 3616 | */ | ||
| 3617 | rmb(); | ||
| 3618 | |||
| 3613 | cqe = nescq->hw_cq.cq_vbase[head]; | 3619 | cqe = nescq->hw_cq.cq_vbase[head]; |
| 3614 | nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0; | 3620 | nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0; |
| 3615 | u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]); | 3621 | u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 73b2b176ad0e..f1f142dc64b1 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
| @@ -56,11 +56,11 @@ | |||
| 56 | /* constants */ | 56 | /* constants */ |
| 57 | 57 | ||
| 58 | enum { | 58 | enum { |
| 59 | IPOIB_PACKET_SIZE = 2048, | ||
| 60 | IPOIB_BUF_SIZE = IPOIB_PACKET_SIZE + IB_GRH_BYTES, | ||
| 61 | |||
| 62 | IPOIB_ENCAP_LEN = 4, | 59 | IPOIB_ENCAP_LEN = 4, |
| 63 | 60 | ||
| 61 | IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN, | ||
| 62 | IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */ | ||
| 63 | |||
| 64 | IPOIB_CM_MTU = 0x10000 - 0x10, /* padding to align header to 16 */ | 64 | IPOIB_CM_MTU = 0x10000 - 0x10, /* padding to align header to 16 */ |
| 65 | IPOIB_CM_BUF_SIZE = IPOIB_CM_MTU + IPOIB_ENCAP_LEN, | 65 | IPOIB_CM_BUF_SIZE = IPOIB_CM_MTU + IPOIB_ENCAP_LEN, |
| 66 | IPOIB_CM_HEAD_SIZE = IPOIB_CM_BUF_SIZE % PAGE_SIZE, | 66 | IPOIB_CM_HEAD_SIZE = IPOIB_CM_BUF_SIZE % PAGE_SIZE, |
| @@ -139,7 +139,7 @@ struct ipoib_mcast { | |||
| 139 | 139 | ||
| 140 | struct ipoib_rx_buf { | 140 | struct ipoib_rx_buf { |
| 141 | struct sk_buff *skb; | 141 | struct sk_buff *skb; |
| 142 | u64 mapping; | 142 | u64 mapping[IPOIB_UD_RX_SG]; |
| 143 | }; | 143 | }; |
| 144 | 144 | ||
| 145 | struct ipoib_tx_buf { | 145 | struct ipoib_tx_buf { |
| @@ -294,6 +294,7 @@ struct ipoib_dev_priv { | |||
| 294 | 294 | ||
| 295 | unsigned int admin_mtu; | 295 | unsigned int admin_mtu; |
| 296 | unsigned int mcast_mtu; | 296 | unsigned int mcast_mtu; |
| 297 | unsigned int max_ib_mtu; | ||
| 297 | 298 | ||
| 298 | struct ipoib_rx_buf *rx_ring; | 299 | struct ipoib_rx_buf *rx_ring; |
| 299 | 300 | ||
| @@ -305,6 +306,9 @@ struct ipoib_dev_priv { | |||
| 305 | struct ib_send_wr tx_wr; | 306 | struct ib_send_wr tx_wr; |
| 306 | unsigned tx_outstanding; | 307 | unsigned tx_outstanding; |
| 307 | 308 | ||
| 309 | struct ib_recv_wr rx_wr; | ||
| 310 | struct ib_sge rx_sge[IPOIB_UD_RX_SG]; | ||
| 311 | |||
| 308 | struct ib_wc ibwc[IPOIB_NUM_WC]; | 312 | struct ib_wc ibwc[IPOIB_NUM_WC]; |
| 309 | 313 | ||
| 310 | struct list_head dead_ahs; | 314 | struct list_head dead_ahs; |
| @@ -366,6 +370,14 @@ struct ipoib_neigh { | |||
| 366 | struct list_head list; | 370 | struct list_head list; |
| 367 | }; | 371 | }; |
| 368 | 372 | ||
| 373 | #define IPOIB_UD_MTU(ib_mtu) (ib_mtu - IPOIB_ENCAP_LEN) | ||
| 374 | #define IPOIB_UD_BUF_SIZE(ib_mtu) (ib_mtu + IB_GRH_BYTES) | ||
| 375 | |||
| 376 | static inline int ipoib_ud_need_sg(unsigned int ib_mtu) | ||
| 377 | { | ||
| 378 | return IPOIB_UD_BUF_SIZE(ib_mtu) > PAGE_SIZE; | ||
| 379 | } | ||
| 380 | |||
| 369 | /* | 381 | /* |
| 370 | * We stash a pointer to our private neighbour information after our | 382 | * We stash a pointer to our private neighbour information after our |
| 371 | * hardware address in neigh->ha. The ALIGN() expression here makes | 383 | * hardware address in neigh->ha. The ALIGN() expression here makes |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 0205eb7c1bd3..7cf1fa7074ab 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
| @@ -89,28 +89,59 @@ void ipoib_free_ah(struct kref *kref) | |||
| 89 | spin_unlock_irqrestore(&priv->lock, flags); | 89 | spin_unlock_irqrestore(&priv->lock, flags); |
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv, | ||
| 93 | u64 mapping[IPOIB_UD_RX_SG]) | ||
| 94 | { | ||
| 95 | if (ipoib_ud_need_sg(priv->max_ib_mtu)) { | ||
| 96 | ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE, | ||
| 97 | DMA_FROM_DEVICE); | ||
| 98 | ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE, | ||
| 99 | DMA_FROM_DEVICE); | ||
| 100 | } else | ||
| 101 | ib_dma_unmap_single(priv->ca, mapping[0], | ||
| 102 | IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), | ||
| 103 | DMA_FROM_DEVICE); | ||
| 104 | } | ||
| 105 | |||
| 106 | static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv, | ||
| 107 | struct sk_buff *skb, | ||
| 108 | unsigned int length) | ||
| 109 | { | ||
| 110 | if (ipoib_ud_need_sg(priv->max_ib_mtu)) { | ||
| 111 | skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; | ||
| 112 | unsigned int size; | ||
| 113 | /* | ||
| 114 | * There is only two buffers needed for max_payload = 4K, | ||
| 115 | * first buf size is IPOIB_UD_HEAD_SIZE | ||
| 116 | */ | ||
| 117 | skb->tail += IPOIB_UD_HEAD_SIZE; | ||
| 118 | skb->len += length; | ||
| 119 | |||
| 120 | size = length - IPOIB_UD_HEAD_SIZE; | ||
| 121 | |||
| 122 | frag->size = size; | ||
| 123 | skb->data_len += size; | ||
| 124 | skb->truesize += size; | ||
| 125 | } else | ||
| 126 | skb_put(skb, length); | ||
| 127 | |||
| 128 | } | ||
| 129 | |||
| 92 | static int ipoib_ib_post_receive(struct net_device *dev, int id) | 130 | static int ipoib_ib_post_receive(struct net_device *dev, int id) |
| 93 | { | 131 | { |
| 94 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 132 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
| 95 | struct ib_sge list; | ||
| 96 | struct ib_recv_wr param; | ||
| 97 | struct ib_recv_wr *bad_wr; | 133 | struct ib_recv_wr *bad_wr; |
| 98 | int ret; | 134 | int ret; |
| 99 | 135 | ||
| 100 | list.addr = priv->rx_ring[id].mapping; | 136 | priv->rx_wr.wr_id = id | IPOIB_OP_RECV; |
| 101 | list.length = IPOIB_BUF_SIZE; | 137 | priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0]; |
| 102 | list.lkey = priv->mr->lkey; | 138 | priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1]; |
| 103 | 139 | ||
| 104 | param.next = NULL; | ||
| 105 | param.wr_id = id | IPOIB_OP_RECV; | ||
| 106 | param.sg_list = &list; | ||
| 107 | param.num_sge = 1; | ||
| 108 | 140 | ||
| 109 | ret = ib_post_recv(priv->qp, ¶m, &bad_wr); | 141 | ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr); |
| 110 | if (unlikely(ret)) { | 142 | if (unlikely(ret)) { |
| 111 | ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); | 143 | ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); |
| 112 | ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping, | 144 | ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping); |
| 113 | IPOIB_BUF_SIZE, DMA_FROM_DEVICE); | ||
| 114 | dev_kfree_skb_any(priv->rx_ring[id].skb); | 145 | dev_kfree_skb_any(priv->rx_ring[id].skb); |
| 115 | priv->rx_ring[id].skb = NULL; | 146 | priv->rx_ring[id].skb = NULL; |
| 116 | } | 147 | } |
| @@ -118,15 +149,21 @@ static int ipoib_ib_post_receive(struct net_device *dev, int id) | |||
| 118 | return ret; | 149 | return ret; |
| 119 | } | 150 | } |
| 120 | 151 | ||
| 121 | static int ipoib_alloc_rx_skb(struct net_device *dev, int id) | 152 | static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id) |
| 122 | { | 153 | { |
| 123 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 154 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
| 124 | struct sk_buff *skb; | 155 | struct sk_buff *skb; |
| 125 | u64 addr; | 156 | int buf_size; |
| 157 | u64 *mapping; | ||
| 126 | 158 | ||
| 127 | skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4); | 159 | if (ipoib_ud_need_sg(priv->max_ib_mtu)) |
| 128 | if (!skb) | 160 | buf_size = IPOIB_UD_HEAD_SIZE; |
| 129 | return -ENOMEM; | 161 | else |
| 162 | buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); | ||
| 163 | |||
| 164 | skb = dev_alloc_skb(buf_size + 4); | ||
| 165 | if (unlikely(!skb)) | ||
| 166 | return NULL; | ||
| 130 | 167 | ||
| 131 | /* | 168 | /* |
| 132 | * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte | 169 | * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte |
| @@ -135,17 +172,32 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id) | |||
| 135 | */ | 172 | */ |
| 136 | skb_reserve(skb, 4); | 173 | skb_reserve(skb, 4); |
| 137 | 174 | ||
| 138 | addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE, | 175 | mapping = priv->rx_ring[id].mapping; |
| 139 | DMA_FROM_DEVICE); | 176 | mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size, |
| 140 | if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { | 177 | DMA_FROM_DEVICE); |
| 141 | dev_kfree_skb_any(skb); | 178 | if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) |
| 142 | return -EIO; | 179 | goto error; |
| 180 | |||
| 181 | if (ipoib_ud_need_sg(priv->max_ib_mtu)) { | ||
| 182 | struct page *page = alloc_page(GFP_ATOMIC); | ||
| 183 | if (!page) | ||
| 184 | goto partial_error; | ||
| 185 | skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE); | ||
| 186 | mapping[1] = | ||
| 187 | ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[0].page, | ||
| 188 | 0, PAGE_SIZE, DMA_FROM_DEVICE); | ||
| 189 | if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1]))) | ||
| 190 | goto partial_error; | ||
| 143 | } | 191 | } |
| 144 | 192 | ||
| 145 | priv->rx_ring[id].skb = skb; | 193 | priv->rx_ring[id].skb = skb; |
| 146 | priv->rx_ring[id].mapping = addr; | 194 | return skb; |
| 147 | 195 | ||
| 148 | return 0; | 196 | partial_error: |
| 197 | ib_dma_unmap_single(priv->ca, mapping[0], buf_size, DMA_FROM_DEVICE); | ||
| 198 | error: | ||
| 199 | dev_kfree_skb_any(skb); | ||
| 200 | return NULL; | ||
| 149 | } | 201 | } |
| 150 | 202 | ||
| 151 | static int ipoib_ib_post_receives(struct net_device *dev) | 203 | static int ipoib_ib_post_receives(struct net_device *dev) |
| @@ -154,7 +206,7 @@ static int ipoib_ib_post_receives(struct net_device *dev) | |||
| 154 | int i; | 206 | int i; |
| 155 | 207 | ||
| 156 | for (i = 0; i < ipoib_recvq_size; ++i) { | 208 | for (i = 0; i < ipoib_recvq_size; ++i) { |
| 157 | if (ipoib_alloc_rx_skb(dev, i)) { | 209 | if (!ipoib_alloc_rx_skb(dev, i)) { |
| 158 | ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); | 210 | ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); |
| 159 | return -ENOMEM; | 211 | return -ENOMEM; |
| 160 | } | 212 | } |
| @@ -172,7 +224,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
| 172 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 224 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
| 173 | unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; | 225 | unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; |
| 174 | struct sk_buff *skb; | 226 | struct sk_buff *skb; |
| 175 | u64 addr; | 227 | u64 mapping[IPOIB_UD_RX_SG]; |
| 176 | 228 | ||
| 177 | ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n", | 229 | ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n", |
| 178 | wr_id, wc->status); | 230 | wr_id, wc->status); |
| @@ -184,15 +236,13 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
| 184 | } | 236 | } |
| 185 | 237 | ||
| 186 | skb = priv->rx_ring[wr_id].skb; | 238 | skb = priv->rx_ring[wr_id].skb; |
| 187 | addr = priv->rx_ring[wr_id].mapping; | ||
| 188 | 239 | ||
| 189 | if (unlikely(wc->status != IB_WC_SUCCESS)) { | 240 | if (unlikely(wc->status != IB_WC_SUCCESS)) { |
| 190 | if (wc->status != IB_WC_WR_FLUSH_ERR) | 241 | if (wc->status != IB_WC_WR_FLUSH_ERR) |
| 191 | ipoib_warn(priv, "failed recv event " | 242 | ipoib_warn(priv, "failed recv event " |
| 192 | "(status=%d, wrid=%d vend_err %x)\n", | 243 | "(status=%d, wrid=%d vend_err %x)\n", |
| 193 | wc->status, wr_id, wc->vendor_err); | 244 | wc->status, wr_id, wc->vendor_err); |
| 194 | ib_dma_unmap_single(priv->ca, addr, | 245 | ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping); |
| 195 | IPOIB_BUF_SIZE, DMA_FROM_DEVICE); | ||
| 196 | dev_kfree_skb_any(skb); | 246 | dev_kfree_skb_any(skb); |
| 197 | priv->rx_ring[wr_id].skb = NULL; | 247 | priv->rx_ring[wr_id].skb = NULL; |
| 198 | return; | 248 | return; |
| @@ -205,11 +255,14 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
| 205 | if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) | 255 | if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) |
| 206 | goto repost; | 256 | goto repost; |
| 207 | 257 | ||
| 258 | memcpy(mapping, priv->rx_ring[wr_id].mapping, | ||
| 259 | IPOIB_UD_RX_SG * sizeof *mapping); | ||
| 260 | |||
| 208 | /* | 261 | /* |
| 209 | * If we can't allocate a new RX buffer, dump | 262 | * If we can't allocate a new RX buffer, dump |
| 210 | * this packet and reuse the old buffer. | 263 | * this packet and reuse the old buffer. |
| 211 | */ | 264 | */ |
| 212 | if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) { | 265 | if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) { |
| 213 | ++dev->stats.rx_dropped; | 266 | ++dev->stats.rx_dropped; |
| 214 | goto repost; | 267 | goto repost; |
| 215 | } | 268 | } |
| @@ -217,9 +270,9 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
| 217 | ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", | 270 | ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", |
| 218 | wc->byte_len, wc->slid); | 271 | wc->byte_len, wc->slid); |
| 219 | 272 | ||
| 220 | ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE); | 273 | ipoib_ud_dma_unmap_rx(priv, mapping); |
| 274 | ipoib_ud_skb_put_frags(priv, skb, wc->byte_len); | ||
| 221 | 275 | ||
| 222 | skb_put(skb, wc->byte_len); | ||
| 223 | skb_pull(skb, IB_GRH_BYTES); | 276 | skb_pull(skb, IB_GRH_BYTES); |
| 224 | 277 | ||
| 225 | skb->protocol = ((struct ipoib_header *) skb->data)->proto; | 278 | skb->protocol = ((struct ipoib_header *) skb->data)->proto; |
| @@ -733,10 +786,8 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush) | |||
| 733 | rx_req = &priv->rx_ring[i]; | 786 | rx_req = &priv->rx_ring[i]; |
| 734 | if (!rx_req->skb) | 787 | if (!rx_req->skb) |
| 735 | continue; | 788 | continue; |
| 736 | ib_dma_unmap_single(priv->ca, | 789 | ipoib_ud_dma_unmap_rx(priv, |
| 737 | rx_req->mapping, | 790 | priv->rx_ring[i].mapping); |
| 738 | IPOIB_BUF_SIZE, | ||
| 739 | DMA_FROM_DEVICE); | ||
| 740 | dev_kfree_skb_any(rx_req->skb); | 791 | dev_kfree_skb_any(rx_req->skb); |
| 741 | rx_req->skb = NULL; | 792 | rx_req->skb = NULL; |
| 742 | } | 793 | } |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index bd07f02cf02b..7a4ed9d3d844 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
| @@ -195,7 +195,7 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu) | |||
| 195 | return 0; | 195 | return 0; |
| 196 | } | 196 | } |
| 197 | 197 | ||
| 198 | if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN) | 198 | if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu)) |
| 199 | return -EINVAL; | 199 | return -EINVAL; |
| 200 | 200 | ||
| 201 | priv->admin_mtu = new_mtu; | 201 | priv->admin_mtu = new_mtu; |
| @@ -971,10 +971,6 @@ static void ipoib_setup(struct net_device *dev) | |||
| 971 | NETIF_F_LLTX | | 971 | NETIF_F_LLTX | |
| 972 | NETIF_F_HIGHDMA); | 972 | NETIF_F_HIGHDMA); |
| 973 | 973 | ||
| 974 | /* MTU will be reset when mcast join happens */ | ||
| 975 | dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN; | ||
| 976 | priv->mcast_mtu = priv->admin_mtu = dev->mtu; | ||
| 977 | |||
| 978 | memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); | 974 | memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); |
| 979 | 975 | ||
| 980 | netif_carrier_off(dev); | 976 | netif_carrier_off(dev); |
| @@ -1107,6 +1103,7 @@ static struct net_device *ipoib_add_port(const char *format, | |||
| 1107 | { | 1103 | { |
| 1108 | struct ipoib_dev_priv *priv; | 1104 | struct ipoib_dev_priv *priv; |
| 1109 | struct ib_device_attr *device_attr; | 1105 | struct ib_device_attr *device_attr; |
| 1106 | struct ib_port_attr attr; | ||
| 1110 | int result = -ENOMEM; | 1107 | int result = -ENOMEM; |
| 1111 | 1108 | ||
| 1112 | priv = ipoib_intf_alloc(format); | 1109 | priv = ipoib_intf_alloc(format); |
| @@ -1115,6 +1112,18 @@ static struct net_device *ipoib_add_port(const char *format, | |||
| 1115 | 1112 | ||
| 1116 | SET_NETDEV_DEV(priv->dev, hca->dma_device); | 1113 | SET_NETDEV_DEV(priv->dev, hca->dma_device); |
| 1117 | 1114 | ||
| 1115 | if (!ib_query_port(hca, port, &attr)) | ||
| 1116 | priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); | ||
| 1117 | else { | ||
| 1118 | printk(KERN_WARNING "%s: ib_query_port %d failed\n", | ||
| 1119 | hca->name, port); | ||
| 1120 | goto device_init_failed; | ||
| 1121 | } | ||
| 1122 | |||
| 1123 | /* MTU will be reset when mcast join happens */ | ||
| 1124 | priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); | ||
| 1125 | priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; | ||
| 1126 | |||
| 1118 | result = ib_query_pkey(hca, port, 0, &priv->pkey); | 1127 | result = ib_query_pkey(hca, port, 0, &priv->pkey); |
| 1119 | if (result) { | 1128 | if (result) { |
| 1120 | printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n", | 1129 | printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n", |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 31a53c5bcb13..d00a2c174aee 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
| @@ -567,8 +567,7 @@ void ipoib_mcast_join_task(struct work_struct *work) | |||
| 567 | return; | 567 | return; |
| 568 | } | 568 | } |
| 569 | 569 | ||
| 570 | priv->mcast_mtu = ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu) - | 570 | priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu)); |
| 571 | IPOIB_ENCAP_LEN; | ||
| 572 | 571 | ||
| 573 | if (!ipoib_cm_admin_enabled(dev)) | 572 | if (!ipoib_cm_admin_enabled(dev)) |
| 574 | dev->mtu = min(priv->mcast_mtu, priv->admin_mtu); | 573 | dev->mtu = min(priv->mcast_mtu, priv->admin_mtu); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c index 8a20e3742c43..07c03f178a49 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c | |||
| @@ -150,7 +150,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) | |||
| 150 | .max_send_wr = ipoib_sendq_size, | 150 | .max_send_wr = ipoib_sendq_size, |
| 151 | .max_recv_wr = ipoib_recvq_size, | 151 | .max_recv_wr = ipoib_recvq_size, |
| 152 | .max_send_sge = 1, | 152 | .max_send_sge = 1, |
| 153 | .max_recv_sge = 1 | 153 | .max_recv_sge = IPOIB_UD_RX_SG |
| 154 | }, | 154 | }, |
| 155 | .sq_sig_type = IB_SIGNAL_ALL_WR, | 155 | .sq_sig_type = IB_SIGNAL_ALL_WR, |
| 156 | .qp_type = IB_QPT_UD | 156 | .qp_type = IB_QPT_UD |
| @@ -215,6 +215,19 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) | |||
| 215 | priv->tx_wr.sg_list = priv->tx_sge; | 215 | priv->tx_wr.sg_list = priv->tx_sge; |
| 216 | priv->tx_wr.send_flags = IB_SEND_SIGNALED; | 216 | priv->tx_wr.send_flags = IB_SEND_SIGNALED; |
| 217 | 217 | ||
| 218 | priv->rx_sge[0].lkey = priv->mr->lkey; | ||
| 219 | if (ipoib_ud_need_sg(priv->max_ib_mtu)) { | ||
| 220 | priv->rx_sge[0].length = IPOIB_UD_HEAD_SIZE; | ||
| 221 | priv->rx_sge[1].length = PAGE_SIZE; | ||
| 222 | priv->rx_sge[1].lkey = priv->mr->lkey; | ||
| 223 | priv->rx_wr.num_sge = IPOIB_UD_RX_SG; | ||
| 224 | } else { | ||
| 225 | priv->rx_sge[0].length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); | ||
| 226 | priv->rx_wr.num_sge = 1; | ||
| 227 | } | ||
| 228 | priv->rx_wr.next = NULL; | ||
| 229 | priv->rx_wr.sg_list = priv->rx_sge; | ||
| 230 | |||
| 218 | return 0; | 231 | return 0; |
| 219 | 232 | ||
| 220 | out_free_cq: | 233 | out_free_cq: |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 293f5b892e3f..431fdeaa2dc4 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c | |||
| @@ -89,6 +89,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) | |||
| 89 | goto err; | 89 | goto err; |
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | priv->max_ib_mtu = ppriv->max_ib_mtu; | ||
| 92 | set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); | 93 | set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); |
| 93 | 94 | ||
| 94 | priv->pkey = pkey; | 95 | priv->pkey = pkey; |
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c index 75ef9d0d974d..f9d6b4dca180 100644 --- a/drivers/net/mlx4/alloc.c +++ b/drivers/net/mlx4/alloc.c | |||
| @@ -196,3 +196,160 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) | |||
| 196 | } | 196 | } |
| 197 | } | 197 | } |
| 198 | EXPORT_SYMBOL_GPL(mlx4_buf_free); | 198 | EXPORT_SYMBOL_GPL(mlx4_buf_free); |
| 199 | |||
| 200 | static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device) | ||
| 201 | { | ||
| 202 | struct mlx4_db_pgdir *pgdir; | ||
| 203 | |||
| 204 | pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL); | ||
| 205 | if (!pgdir) | ||
| 206 | return NULL; | ||
| 207 | |||
| 208 | bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2); | ||
| 209 | pgdir->bits[0] = pgdir->order0; | ||
| 210 | pgdir->bits[1] = pgdir->order1; | ||
| 211 | pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE, | ||
| 212 | &pgdir->db_dma, GFP_KERNEL); | ||
| 213 | if (!pgdir->db_page) { | ||
| 214 | kfree(pgdir); | ||
| 215 | return NULL; | ||
| 216 | } | ||
| 217 | |||
| 218 | return pgdir; | ||
| 219 | } | ||
| 220 | |||
| 221 | static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir, | ||
| 222 | struct mlx4_db *db, int order) | ||
| 223 | { | ||
| 224 | int o; | ||
| 225 | int i; | ||
| 226 | |||
| 227 | for (o = order; o <= 1; ++o) { | ||
| 228 | i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o); | ||
| 229 | if (i < MLX4_DB_PER_PAGE >> o) | ||
| 230 | goto found; | ||
| 231 | } | ||
| 232 | |||
| 233 | return -ENOMEM; | ||
| 234 | |||
| 235 | found: | ||
| 236 | clear_bit(i, pgdir->bits[o]); | ||
| 237 | |||
| 238 | i <<= o; | ||
| 239 | |||
| 240 | if (o > order) | ||
| 241 | set_bit(i ^ 1, pgdir->bits[order]); | ||
| 242 | |||
| 243 | db->u.pgdir = pgdir; | ||
| 244 | db->index = i; | ||
| 245 | db->db = pgdir->db_page + db->index; | ||
| 246 | db->dma = pgdir->db_dma + db->index * 4; | ||
| 247 | db->order = order; | ||
| 248 | |||
| 249 | return 0; | ||
| 250 | } | ||
| 251 | |||
| 252 | int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order) | ||
| 253 | { | ||
| 254 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 255 | struct mlx4_db_pgdir *pgdir; | ||
| 256 | int ret = 0; | ||
| 257 | |||
| 258 | mutex_lock(&priv->pgdir_mutex); | ||
| 259 | |||
| 260 | list_for_each_entry(pgdir, &priv->pgdir_list, list) | ||
| 261 | if (!mlx4_alloc_db_from_pgdir(pgdir, db, order)) | ||
| 262 | goto out; | ||
| 263 | |||
| 264 | pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev)); | ||
| 265 | if (!pgdir) { | ||
| 266 | ret = -ENOMEM; | ||
| 267 | goto out; | ||
| 268 | } | ||
| 269 | |||
| 270 | list_add(&pgdir->list, &priv->pgdir_list); | ||
| 271 | |||
| 272 | /* This should never fail -- we just allocated an empty page: */ | ||
| 273 | WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order)); | ||
| 274 | |||
| 275 | out: | ||
| 276 | mutex_unlock(&priv->pgdir_mutex); | ||
| 277 | |||
| 278 | return ret; | ||
| 279 | } | ||
| 280 | EXPORT_SYMBOL_GPL(mlx4_db_alloc); | ||
| 281 | |||
| 282 | void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db) | ||
| 283 | { | ||
| 284 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 285 | int o; | ||
| 286 | int i; | ||
| 287 | |||
| 288 | mutex_lock(&priv->pgdir_mutex); | ||
| 289 | |||
| 290 | o = db->order; | ||
| 291 | i = db->index; | ||
| 292 | |||
| 293 | if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) { | ||
| 294 | clear_bit(i ^ 1, db->u.pgdir->order0); | ||
| 295 | ++o; | ||
| 296 | } | ||
| 297 | i >>= o; | ||
| 298 | set_bit(i, db->u.pgdir->bits[o]); | ||
| 299 | |||
| 300 | if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) { | ||
| 301 | dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, | ||
| 302 | db->u.pgdir->db_page, db->u.pgdir->db_dma); | ||
| 303 | list_del(&db->u.pgdir->list); | ||
| 304 | kfree(db->u.pgdir); | ||
| 305 | } | ||
| 306 | |||
| 307 | mutex_unlock(&priv->pgdir_mutex); | ||
| 308 | } | ||
| 309 | EXPORT_SYMBOL_GPL(mlx4_db_free); | ||
| 310 | |||
| 311 | int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, | ||
| 312 | int size, int max_direct) | ||
| 313 | { | ||
| 314 | int err; | ||
| 315 | |||
| 316 | err = mlx4_db_alloc(dev, &wqres->db, 1); | ||
| 317 | if (err) | ||
| 318 | return err; | ||
| 319 | |||
| 320 | *wqres->db.db = 0; | ||
| 321 | |||
| 322 | err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf); | ||
| 323 | if (err) | ||
| 324 | goto err_db; | ||
| 325 | |||
| 326 | err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift, | ||
| 327 | &wqres->mtt); | ||
| 328 | if (err) | ||
| 329 | goto err_buf; | ||
| 330 | |||
| 331 | err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf); | ||
| 332 | if (err) | ||
| 333 | goto err_mtt; | ||
| 334 | |||
| 335 | return 0; | ||
| 336 | |||
| 337 | err_mtt: | ||
| 338 | mlx4_mtt_cleanup(dev, &wqres->mtt); | ||
| 339 | err_buf: | ||
| 340 | mlx4_buf_free(dev, size, &wqres->buf); | ||
| 341 | err_db: | ||
| 342 | mlx4_db_free(dev, &wqres->db); | ||
| 343 | |||
| 344 | return err; | ||
| 345 | } | ||
| 346 | EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res); | ||
| 347 | |||
| 348 | void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, | ||
| 349 | int size) | ||
| 350 | { | ||
| 351 | mlx4_mtt_cleanup(dev, &wqres->mtt); | ||
| 352 | mlx4_buf_free(dev, size, &wqres->buf); | ||
| 353 | mlx4_db_free(dev, &wqres->db); | ||
| 354 | } | ||
| 355 | EXPORT_SYMBOL_GPL(mlx4_free_hwq_res); | ||
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c index caa5bcf54e35..6fda0af9d0a6 100644 --- a/drivers/net/mlx4/cq.c +++ b/drivers/net/mlx4/cq.c | |||
| @@ -180,7 +180,7 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq, | |||
| 180 | cq_context->mtt_base_addr_h = mtt_addr >> 32; | 180 | cq_context->mtt_base_addr_h = mtt_addr >> 32; |
| 181 | cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); | 181 | cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); |
| 182 | 182 | ||
| 183 | err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1); | 183 | err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0); |
| 184 | 184 | ||
| 185 | mlx4_free_cmd_mailbox(dev, mailbox); | 185 | mlx4_free_cmd_mailbox(dev, mailbox); |
| 186 | return err; | 186 | return err; |
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 49a4acab5e82..a6aa49fc1d68 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c | |||
| @@ -798,6 +798,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 798 | INIT_LIST_HEAD(&priv->ctx_list); | 798 | INIT_LIST_HEAD(&priv->ctx_list); |
| 799 | spin_lock_init(&priv->ctx_lock); | 799 | spin_lock_init(&priv->ctx_lock); |
| 800 | 800 | ||
| 801 | INIT_LIST_HEAD(&priv->pgdir_list); | ||
| 802 | mutex_init(&priv->pgdir_mutex); | ||
| 803 | |||
| 801 | /* | 804 | /* |
| 802 | * Now reset the HCA before we touch the PCI capabilities or | 805 | * Now reset the HCA before we touch the PCI capabilities or |
| 803 | * attempt a firmware command, since a boot ROM may have left | 806 | * attempt a firmware command, since a boot ROM may have left |
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index 73336810e652..a4023c2dd050 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h | |||
| @@ -257,6 +257,9 @@ struct mlx4_priv { | |||
| 257 | struct list_head ctx_list; | 257 | struct list_head ctx_list; |
| 258 | spinlock_t ctx_lock; | 258 | spinlock_t ctx_lock; |
| 259 | 259 | ||
| 260 | struct list_head pgdir_list; | ||
| 261 | struct mutex pgdir_mutex; | ||
| 262 | |||
| 260 | struct mlx4_fw fw; | 263 | struct mlx4_fw fw; |
| 261 | struct mlx4_cmd cmd; | 264 | struct mlx4_cmd cmd; |
| 262 | 265 | ||
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c index fa24e6597591..ee5484c44a18 100644 --- a/drivers/net/mlx4/qp.c +++ b/drivers/net/mlx4/qp.c | |||
| @@ -299,3 +299,34 @@ int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, | |||
| 299 | } | 299 | } |
| 300 | EXPORT_SYMBOL_GPL(mlx4_qp_query); | 300 | EXPORT_SYMBOL_GPL(mlx4_qp_query); |
| 301 | 301 | ||
| 302 | int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | ||
| 303 | struct mlx4_qp_context *context, | ||
| 304 | struct mlx4_qp *qp, enum mlx4_qp_state *qp_state) | ||
| 305 | { | ||
| 306 | int err; | ||
| 307 | int i; | ||
| 308 | enum mlx4_qp_state states[] = { | ||
| 309 | MLX4_QP_STATE_RST, | ||
| 310 | MLX4_QP_STATE_INIT, | ||
| 311 | MLX4_QP_STATE_RTR, | ||
| 312 | MLX4_QP_STATE_RTS | ||
| 313 | }; | ||
| 314 | |||
| 315 | for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { | ||
| 316 | context->flags &= cpu_to_be32(~(0xf << 28)); | ||
| 317 | context->flags |= cpu_to_be32(states[i + 1] << 28); | ||
| 318 | err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], | ||
| 319 | context, 0, 0, qp); | ||
| 320 | if (err) { | ||
| 321 | mlx4_err(dev, "Failed to bring QP to state: " | ||
| 322 | "%d with error: %d\n", | ||
| 323 | states[i + 1], err); | ||
| 324 | return err; | ||
| 325 | } | ||
| 326 | |||
| 327 | *qp_state = states[i + 1]; | ||
| 328 | } | ||
| 329 | |||
| 330 | return 0; | ||
| 331 | } | ||
| 332 | EXPORT_SYMBOL_GPL(mlx4_qp_to_ready); | ||
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index ff7df1a2222f..9fa1a8002ce2 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
| @@ -208,6 +208,38 @@ struct mlx4_mtt { | |||
| 208 | int page_shift; | 208 | int page_shift; |
| 209 | }; | 209 | }; |
| 210 | 210 | ||
| 211 | enum { | ||
| 212 | MLX4_DB_PER_PAGE = PAGE_SIZE / 4 | ||
| 213 | }; | ||
| 214 | |||
| 215 | struct mlx4_db_pgdir { | ||
| 216 | struct list_head list; | ||
| 217 | DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE); | ||
| 218 | DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2); | ||
| 219 | unsigned long *bits[2]; | ||
| 220 | __be32 *db_page; | ||
| 221 | dma_addr_t db_dma; | ||
| 222 | }; | ||
| 223 | |||
| 224 | struct mlx4_ib_user_db_page; | ||
| 225 | |||
| 226 | struct mlx4_db { | ||
| 227 | __be32 *db; | ||
| 228 | union { | ||
| 229 | struct mlx4_db_pgdir *pgdir; | ||
| 230 | struct mlx4_ib_user_db_page *user_page; | ||
| 231 | } u; | ||
| 232 | dma_addr_t dma; | ||
| 233 | int index; | ||
| 234 | int order; | ||
| 235 | }; | ||
| 236 | |||
| 237 | struct mlx4_hwq_resources { | ||
| 238 | struct mlx4_db db; | ||
| 239 | struct mlx4_mtt mtt; | ||
| 240 | struct mlx4_buf buf; | ||
| 241 | }; | ||
| 242 | |||
| 211 | struct mlx4_mr { | 243 | struct mlx4_mr { |
| 212 | struct mlx4_mtt mtt; | 244 | struct mlx4_mtt mtt; |
| 213 | u64 iova; | 245 | u64 iova; |
| @@ -341,6 +373,14 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | |||
| 341 | int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | 373 | int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, |
| 342 | struct mlx4_buf *buf); | 374 | struct mlx4_buf *buf); |
| 343 | 375 | ||
| 376 | int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order); | ||
| 377 | void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db); | ||
| 378 | |||
| 379 | int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, | ||
| 380 | int size, int max_direct); | ||
| 381 | void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres, | ||
| 382 | int size); | ||
| 383 | |||
| 344 | int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, | 384 | int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, |
| 345 | struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq); | 385 | struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq); |
| 346 | void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); | 386 | void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); |
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index a5e43febee4f..7f128b266faa 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h | |||
| @@ -296,6 +296,10 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | |||
| 296 | int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, | 296 | int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, |
| 297 | struct mlx4_qp_context *context); | 297 | struct mlx4_qp_context *context); |
| 298 | 298 | ||
| 299 | int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | ||
| 300 | struct mlx4_qp_context *context, | ||
| 301 | struct mlx4_qp *qp, enum mlx4_qp_state *qp_state); | ||
| 302 | |||
| 299 | static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) | 303 | static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) |
| 300 | { | 304 | { |
| 301 | return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps - 1)); | 305 | return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps - 1)); |
