aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorIshai Rabinovitz <ishai@mellanox.co.il>2006-03-02 01:33:11 -0500
committerRoland Dreier <rolandd@cisco.com>2006-03-20 13:08:19 -0500
commit8d3ef29d6be1e750512e0a9dbea6225290b81d0a (patch)
tree9384d9275d5122574665db18b5196f9c9a96d7ac /drivers
parent67e7377661db4e341ed5e9a0358d11a55e532aa8 (diff)
IB/mthca: Use an enum for HCA page size
Use a named enum for the HCA's internal page size, rather than having magic values of 4096 and shifts by 12 all over the code. Also, fix one minor bug in EQ handling: only one HCA page is mapped to the HCA during initialization, but a full kernel page is unmapped during cleanup. This might cause problems when PAGE_SIZE != 4096. Signed-off-by: Ishai Rabinovitz <ishai@mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c26
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c29
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.h10
4 files changed, 37 insertions, 30 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index d1e7ecb5f233..948a2861cae3 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -652,8 +652,9 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
652 * address or size and use that as our log2 size. 652 * address or size and use that as our log2 size.
653 */ 653 */
654 lg = ffs(mthca_icm_addr(&iter) | mthca_icm_size(&iter)) - 1; 654 lg = ffs(mthca_icm_addr(&iter) | mthca_icm_size(&iter)) - 1;
655 if (lg < 12) { 655 if (lg < MTHCA_ICM_PAGE_SHIFT) {
656 mthca_warn(dev, "Got FW area not aligned to 4K (%llx/%lx).\n", 656 mthca_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
657 MTHCA_ICM_PAGE_SIZE,
657 (unsigned long long) mthca_icm_addr(&iter), 658 (unsigned long long) mthca_icm_addr(&iter),
658 mthca_icm_size(&iter)); 659 mthca_icm_size(&iter));
659 err = -EINVAL; 660 err = -EINVAL;
@@ -665,8 +666,9 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
665 virt += 1 << lg; 666 virt += 1 << lg;
666 } 667 }
667 668
668 pages[nent * 2 + 1] = cpu_to_be64((mthca_icm_addr(&iter) + 669 pages[nent * 2 + 1] =
669 (i << lg)) | (lg - 12)); 670 cpu_to_be64((mthca_icm_addr(&iter) + (i << lg)) |
671 (lg - MTHCA_ICM_PAGE_SHIFT));
670 ts += 1 << (lg - 10); 672 ts += 1 << (lg - 10);
671 ++tc; 673 ++tc;
672 674
@@ -822,12 +824,12 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
822 mthca_dbg(dev, "FW size %d KB\n", dev->fw.arbel.fw_pages << 2); 824 mthca_dbg(dev, "FW size %d KB\n", dev->fw.arbel.fw_pages << 2);
823 825
824 /* 826 /*
825 * Arbel page size is always 4 KB; round up number of 827 * Round up number of system pages needed in case
826 * system pages needed. 828 * MTHCA_ICM_PAGE_SIZE < PAGE_SIZE.
827 */ 829 */
828 dev->fw.arbel.fw_pages = 830 dev->fw.arbel.fw_pages =
829 ALIGN(dev->fw.arbel.fw_pages, PAGE_SIZE >> 12) >> 831 ALIGN(dev->fw.arbel.fw_pages, PAGE_SIZE / MTHCA_ICM_PAGE_SIZE) >>
830 (PAGE_SHIFT - 12); 832 (PAGE_SHIFT - MTHCA_ICM_PAGE_SHIFT);
831 833
832 mthca_dbg(dev, "Clear int @ %llx, EQ arm @ %llx, EQ set CI @ %llx\n", 834 mthca_dbg(dev, "Clear int @ %llx, EQ arm @ %llx, EQ set CI @ %llx\n",
833 (unsigned long long) dev->fw.arbel.clr_int_base, 835 (unsigned long long) dev->fw.arbel.clr_int_base,
@@ -1540,11 +1542,11 @@ int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
1540 return ret; 1542 return ret;
1541 1543
1542 /* 1544 /*
1543 * Arbel page size is always 4 KB; round up number of system 1545 * Round up number of system pages needed in case
1544 * pages needed. 1546 * MTHCA_ICM_PAGE_SIZE < PAGE_SIZE.
1545 */ 1547 */
1546 *aux_pages = (*aux_pages + (1 << (PAGE_SHIFT - 12)) - 1) >> (PAGE_SHIFT - 12); 1548 *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MTHCA_ICM_PAGE_SIZE) >>
1547 *aux_pages = ALIGN(*aux_pages, PAGE_SIZE >> 12) >> (PAGE_SHIFT - 12); 1549 (PAGE_SHIFT - MTHCA_ICM_PAGE_SHIFT);
1548 1550
1549 return 0; 1551 return 0;
1550} 1552}
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index cf43a5388397..a44b12dd7952 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -825,7 +825,7 @@ void __devexit mthca_unmap_eq_icm(struct mthca_dev *dev)
825{ 825{
826 u8 status; 826 u8 status;
827 827
828 mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, PAGE_SIZE / 4096, &status); 828 mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1, &status);
829 pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE, 829 pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
830 PCI_DMA_BIDIRECTIONAL); 830 PCI_DMA_BIDIRECTIONAL);
831 __free_page(dev->eq_table.icm_page); 831 __free_page(dev->eq_table.icm_page);
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index d709cb162a72..15cc2f6eb475 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -202,7 +202,8 @@ void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int o
202 202
203 if (--table->icm[i]->refcount == 0) { 203 if (--table->icm[i]->refcount == 0) {
204 mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE, 204 mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
205 MTHCA_TABLE_CHUNK_SIZE >> 12, &status); 205 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
206 &status);
206 mthca_free_icm(dev, table->icm[i]); 207 mthca_free_icm(dev, table->icm[i]);
207 table->icm[i] = NULL; 208 table->icm[i] = NULL;
208 } 209 }
@@ -336,7 +337,8 @@ err:
336 for (i = 0; i < num_icm; ++i) 337 for (i = 0; i < num_icm; ++i)
337 if (table->icm[i]) { 338 if (table->icm[i]) {
338 mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE, 339 mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE,
339 MTHCA_TABLE_CHUNK_SIZE >> 12, &status); 340 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
341 &status);
340 mthca_free_icm(dev, table->icm[i]); 342 mthca_free_icm(dev, table->icm[i]);
341 } 343 }
342 344
@@ -353,7 +355,8 @@ void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table)
353 for (i = 0; i < table->num_icm; ++i) 355 for (i = 0; i < table->num_icm; ++i)
354 if (table->icm[i]) { 356 if (table->icm[i]) {
355 mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE, 357 mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
356 MTHCA_TABLE_CHUNK_SIZE >> 12, &status); 358 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
359 &status);
357 mthca_free_icm(dev, table->icm[i]); 360 mthca_free_icm(dev, table->icm[i]);
358 } 361 }
359 362
@@ -364,7 +367,7 @@ static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int pag
364{ 367{
365 return dev->uar_table.uarc_base + 368 return dev->uar_table.uarc_base +
366 uar->index * dev->uar_table.uarc_size + 369 uar->index * dev->uar_table.uarc_size +
367 page * 4096; 370 page * MTHCA_ICM_PAGE_SIZE;
368} 371}
369 372
370int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, 373int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
@@ -401,7 +404,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
401 if (ret < 0) 404 if (ret < 0)
402 goto out; 405 goto out;
403 406
404 db_tab->page[i].mem.length = 4096; 407 db_tab->page[i].mem.length = MTHCA_ICM_PAGE_SIZE;
405 db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK; 408 db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK;
406 409
407 ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); 410 ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
@@ -455,7 +458,7 @@ struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev)
455 if (!mthca_is_memfree(dev)) 458 if (!mthca_is_memfree(dev))
456 return NULL; 459 return NULL;
457 460
458 npages = dev->uar_table.uarc_size / 4096; 461 npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
459 db_tab = kmalloc(sizeof *db_tab + npages * sizeof *db_tab->page, GFP_KERNEL); 462 db_tab = kmalloc(sizeof *db_tab + npages * sizeof *db_tab->page, GFP_KERNEL);
460 if (!db_tab) 463 if (!db_tab)
461 return ERR_PTR(-ENOMEM); 464 return ERR_PTR(-ENOMEM);
@@ -478,7 +481,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
478 if (!mthca_is_memfree(dev)) 481 if (!mthca_is_memfree(dev))
479 return; 482 return;
480 483
481 for (i = 0; i < dev->uar_table.uarc_size / 4096; ++i) { 484 for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) {
482 if (db_tab->page[i].uvirt) { 485 if (db_tab->page[i].uvirt) {
483 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status); 486 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status);
484 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); 487 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
@@ -551,20 +554,20 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
551 page = dev->db_tab->page + end; 554 page = dev->db_tab->page + end;
552 555
553alloc: 556alloc:
554 page->db_rec = dma_alloc_coherent(&dev->pdev->dev, 4096, 557 page->db_rec = dma_alloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
555 &page->mapping, GFP_KERNEL); 558 &page->mapping, GFP_KERNEL);
556 if (!page->db_rec) { 559 if (!page->db_rec) {
557 ret = -ENOMEM; 560 ret = -ENOMEM;
558 goto out; 561 goto out;
559 } 562 }
560 memset(page->db_rec, 0, 4096); 563 memset(page->db_rec, 0, MTHCA_ICM_PAGE_SIZE);
561 564
562 ret = mthca_MAP_ICM_page(dev, page->mapping, 565 ret = mthca_MAP_ICM_page(dev, page->mapping,
563 mthca_uarc_virt(dev, &dev->driver_uar, i), &status); 566 mthca_uarc_virt(dev, &dev->driver_uar, i), &status);
564 if (!ret && status) 567 if (!ret && status)
565 ret = -EINVAL; 568 ret = -EINVAL;
566 if (ret) { 569 if (ret) {
567 dma_free_coherent(&dev->pdev->dev, 4096, 570 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
568 page->db_rec, page->mapping); 571 page->db_rec, page->mapping);
569 goto out; 572 goto out;
570 } 573 }
@@ -612,7 +615,7 @@ void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
612 i >= dev->db_tab->max_group1 - 1) { 615 i >= dev->db_tab->max_group1 - 1) {
613 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status); 616 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
614 617
615 dma_free_coherent(&dev->pdev->dev, 4096, 618 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
616 page->db_rec, page->mapping); 619 page->db_rec, page->mapping);
617 page->db_rec = NULL; 620 page->db_rec = NULL;
618 621
@@ -640,7 +643,7 @@ int mthca_init_db_tab(struct mthca_dev *dev)
640 643
641 mutex_init(&dev->db_tab->mutex); 644 mutex_init(&dev->db_tab->mutex);
642 645
643 dev->db_tab->npages = dev->uar_table.uarc_size / 4096; 646 dev->db_tab->npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
644 dev->db_tab->max_group1 = 0; 647 dev->db_tab->max_group1 = 0;
645 dev->db_tab->min_group2 = dev->db_tab->npages - 1; 648 dev->db_tab->min_group2 = dev->db_tab->npages - 1;
646 649
@@ -681,7 +684,7 @@ void mthca_cleanup_db_tab(struct mthca_dev *dev)
681 684
682 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status); 685 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
683 686
684 dma_free_coherent(&dev->pdev->dev, 4096, 687 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
685 dev->db_tab->page[i].db_rec, 688 dev->db_tab->page[i].db_rec,
686 dev->db_tab->page[i].mapping); 689 dev->db_tab->page[i].mapping);
687 } 690 }
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.h b/drivers/infiniband/hw/mthca/mthca_memfree.h
index 36f1141a08aa..6d42947e1dc4 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.h
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.h
@@ -45,6 +45,12 @@
45 ((256 - sizeof (struct list_head) - 2 * sizeof (int)) / \ 45 ((256 - sizeof (struct list_head) - 2 * sizeof (int)) / \
46 (sizeof (struct scatterlist))) 46 (sizeof (struct scatterlist)))
47 47
48enum {
49 MTHCA_ICM_PAGE_SHIFT = 12,
50 MTHCA_ICM_PAGE_SIZE = 1 << MTHCA_ICM_PAGE_SHIFT,
51 MTHCA_DB_REC_PER_PAGE = MTHCA_ICM_PAGE_SIZE / 8
52};
53
48struct mthca_icm_chunk { 54struct mthca_icm_chunk {
49 struct list_head list; 55 struct list_head list;
50 int npages; 56 int npages;
@@ -131,10 +137,6 @@ static inline unsigned long mthca_icm_size(struct mthca_icm_iter *iter)
131 return sg_dma_len(&iter->chunk->mem[iter->page_idx]); 137 return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
132} 138}
133 139
134enum {
135 MTHCA_DB_REC_PER_PAGE = 4096 / 8
136};
137
138struct mthca_db_page { 140struct mthca_db_page {
139 DECLARE_BITMAP(used, MTHCA_DB_REC_PER_PAGE); 141 DECLARE_BITMAP(used, MTHCA_DB_REC_PER_PAGE);
140 __be64 *db_rec; 142 __be64 *db_rec;