diff options
author | Michael Chan <mchan@broadcom.com> | 2014-03-17 23:19:07 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-03-18 16:02:16 -0400 |
commit | be1fefc21433f6202fcd76bdc7916e557fe80b9a (patch) | |
tree | 48f0fcfefc21f1be95c27673435c8c5cf2a5e15c | |
parent | f7bd12d09ed6e4093a56dbbfbe8411cc52a738d1 (diff) |
cnic,bnx2i,bnx2fc: Fix inconsistent use of page size
The bnx2/bnx2x rings are made up of linked pages. However there is an
upper limit on the page size as some the page size settings are 16-bit
in the hardware/firmware interface. In the current code, some parts
use BNX2_PAGE_SIZE which has a 16K upper limit and some parts use
PAGE_SIZE. On archs with >= 64K PAGE_SIZE, it generates some compile
warnings. Define a new CNIC_PAGE_SZIE which has an upper limit of
16K and use it consistently in all relevant parts.
Signed-off-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: Eddie Wai <eddie.wai@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/broadcom/cnic.c | 104 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/cnic_if.h | 10 | ||||
-rw-r--r-- | drivers/scsi/bnx2fc/bnx2fc_io.c | 16 | ||||
-rw-r--r-- | drivers/scsi/bnx2fc/bnx2fc_tgt.c | 38 | ||||
-rw-r--r-- | drivers/scsi/bnx2i/bnx2i_hwi.c | 52 | ||||
-rw-r--r-- | drivers/scsi/bnx2i/bnx2i_iscsi.c | 23 |
6 files changed, 129 insertions, 114 deletions
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 2cb248269c8f..ea37ea209234 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c | |||
@@ -726,7 +726,7 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma) | |||
726 | 726 | ||
727 | for (i = 0; i < dma->num_pages; i++) { | 727 | for (i = 0; i < dma->num_pages; i++) { |
728 | if (dma->pg_arr[i]) { | 728 | if (dma->pg_arr[i]) { |
729 | dma_free_coherent(&dev->pcidev->dev, BNX2_PAGE_SIZE, | 729 | dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE, |
730 | dma->pg_arr[i], dma->pg_map_arr[i]); | 730 | dma->pg_arr[i], dma->pg_map_arr[i]); |
731 | dma->pg_arr[i] = NULL; | 731 | dma->pg_arr[i] = NULL; |
732 | } | 732 | } |
@@ -785,7 +785,7 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, | |||
785 | 785 | ||
786 | for (i = 0; i < pages; i++) { | 786 | for (i = 0; i < pages; i++) { |
787 | dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev, | 787 | dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev, |
788 | BNX2_PAGE_SIZE, | 788 | CNIC_PAGE_SIZE, |
789 | &dma->pg_map_arr[i], | 789 | &dma->pg_map_arr[i], |
790 | GFP_ATOMIC); | 790 | GFP_ATOMIC); |
791 | if (dma->pg_arr[i] == NULL) | 791 | if (dma->pg_arr[i] == NULL) |
@@ -794,8 +794,8 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, | |||
794 | if (!use_pg_tbl) | 794 | if (!use_pg_tbl) |
795 | return 0; | 795 | return 0; |
796 | 796 | ||
797 | dma->pgtbl_size = ((pages * 8) + BNX2_PAGE_SIZE - 1) & | 797 | dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) & |
798 | ~(BNX2_PAGE_SIZE - 1); | 798 | ~(CNIC_PAGE_SIZE - 1); |
799 | dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size, | 799 | dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size, |
800 | &dma->pgtbl_map, GFP_ATOMIC); | 800 | &dma->pgtbl_map, GFP_ATOMIC); |
801 | if (dma->pgtbl == NULL) | 801 | if (dma->pgtbl == NULL) |
@@ -900,8 +900,8 @@ static int cnic_alloc_context(struct cnic_dev *dev) | |||
900 | if (BNX2_CHIP(cp) == BNX2_CHIP_5709) { | 900 | if (BNX2_CHIP(cp) == BNX2_CHIP_5709) { |
901 | int i, k, arr_size; | 901 | int i, k, arr_size; |
902 | 902 | ||
903 | cp->ctx_blk_size = BNX2_PAGE_SIZE; | 903 | cp->ctx_blk_size = CNIC_PAGE_SIZE; |
904 | cp->cids_per_blk = BNX2_PAGE_SIZE / 128; | 904 | cp->cids_per_blk = CNIC_PAGE_SIZE / 128; |
905 | arr_size = BNX2_MAX_CID / cp->cids_per_blk * | 905 | arr_size = BNX2_MAX_CID / cp->cids_per_blk * |
906 | sizeof(struct cnic_ctx); | 906 | sizeof(struct cnic_ctx); |
907 | cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); | 907 | cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); |
@@ -933,7 +933,7 @@ static int cnic_alloc_context(struct cnic_dev *dev) | |||
933 | for (i = 0; i < cp->ctx_blks; i++) { | 933 | for (i = 0; i < cp->ctx_blks; i++) { |
934 | cp->ctx_arr[i].ctx = | 934 | cp->ctx_arr[i].ctx = |
935 | dma_alloc_coherent(&dev->pcidev->dev, | 935 | dma_alloc_coherent(&dev->pcidev->dev, |
936 | BNX2_PAGE_SIZE, | 936 | CNIC_PAGE_SIZE, |
937 | &cp->ctx_arr[i].mapping, | 937 | &cp->ctx_arr[i].mapping, |
938 | GFP_KERNEL); | 938 | GFP_KERNEL); |
939 | if (cp->ctx_arr[i].ctx == NULL) | 939 | if (cp->ctx_arr[i].ctx == NULL) |
@@ -1013,7 +1013,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages) | |||
1013 | if (udev->l2_ring) | 1013 | if (udev->l2_ring) |
1014 | return 0; | 1014 | return 0; |
1015 | 1015 | ||
1016 | udev->l2_ring_size = pages * BNX2_PAGE_SIZE; | 1016 | udev->l2_ring_size = pages * CNIC_PAGE_SIZE; |
1017 | udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, | 1017 | udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, |
1018 | &udev->l2_ring_map, | 1018 | &udev->l2_ring_map, |
1019 | GFP_KERNEL | __GFP_COMP); | 1019 | GFP_KERNEL | __GFP_COMP); |
@@ -1021,7 +1021,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages) | |||
1021 | return -ENOMEM; | 1021 | return -ENOMEM; |
1022 | 1022 | ||
1023 | udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; | 1023 | udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; |
1024 | udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); | 1024 | udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size); |
1025 | udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, | 1025 | udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, |
1026 | &udev->l2_buf_map, | 1026 | &udev->l2_buf_map, |
1027 | GFP_KERNEL | __GFP_COMP); | 1027 | GFP_KERNEL | __GFP_COMP); |
@@ -1102,7 +1102,7 @@ static int cnic_init_uio(struct cnic_dev *dev) | |||
1102 | uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID + | 1102 | uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID + |
1103 | TX_MAX_TSS_RINGS + 1); | 1103 | TX_MAX_TSS_RINGS + 1); |
1104 | uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & | 1104 | uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & |
1105 | PAGE_MASK; | 1105 | CNIC_PAGE_MASK; |
1106 | if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) | 1106 | if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) |
1107 | uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; | 1107 | uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; |
1108 | else | 1108 | else |
@@ -1113,7 +1113,7 @@ static int cnic_init_uio(struct cnic_dev *dev) | |||
1113 | uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0); | 1113 | uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0); |
1114 | 1114 | ||
1115 | uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & | 1115 | uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & |
1116 | PAGE_MASK; | 1116 | CNIC_PAGE_MASK; |
1117 | uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk); | 1117 | uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk); |
1118 | 1118 | ||
1119 | uinfo->name = "bnx2x_cnic"; | 1119 | uinfo->name = "bnx2x_cnic"; |
@@ -1267,14 +1267,14 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) | |||
1267 | for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++) | 1267 | for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++) |
1268 | cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE; | 1268 | cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE; |
1269 | 1269 | ||
1270 | pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / | 1270 | pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / |
1271 | PAGE_SIZE; | 1271 | CNIC_PAGE_SIZE; |
1272 | 1272 | ||
1273 | ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); | 1273 | ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); |
1274 | if (ret) | 1274 | if (ret) |
1275 | return -ENOMEM; | 1275 | return -ENOMEM; |
1276 | 1276 | ||
1277 | n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; | 1277 | n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; |
1278 | for (i = 0, j = 0; i < cp->max_cid_space; i++) { | 1278 | for (i = 0, j = 0; i < cp->max_cid_space; i++) { |
1279 | long off = CNIC_KWQ16_DATA_SIZE * (i % n); | 1279 | long off = CNIC_KWQ16_DATA_SIZE * (i % n); |
1280 | 1280 | ||
@@ -1296,7 +1296,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) | |||
1296 | goto error; | 1296 | goto error; |
1297 | } | 1297 | } |
1298 | 1298 | ||
1299 | pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE; | 1299 | pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE; |
1300 | ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); | 1300 | ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); |
1301 | if (ret) | 1301 | if (ret) |
1302 | goto error; | 1302 | goto error; |
@@ -1466,8 +1466,8 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) | |||
1466 | cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS * | 1466 | cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS * |
1467 | BNX2X_ISCSI_R2TQE_SIZE; | 1467 | BNX2X_ISCSI_R2TQE_SIZE; |
1468 | cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; | 1468 | cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; |
1469 | pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; | 1469 | pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE; |
1470 | hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); | 1470 | hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); |
1471 | cp->num_cqs = req1->num_cqs; | 1471 | cp->num_cqs = req1->num_cqs; |
1472 | 1472 | ||
1473 | if (!dev->max_iscsi_conn) | 1473 | if (!dev->max_iscsi_conn) |
@@ -1477,9 +1477,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) | |||
1477 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid), | 1477 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid), |
1478 | req1->rq_num_wqes); | 1478 | req1->rq_num_wqes); |
1479 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), | 1479 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), |
1480 | PAGE_SIZE); | 1480 | CNIC_PAGE_SIZE); |
1481 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | 1481 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + |
1482 | TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); | 1482 | TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); |
1483 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + | 1483 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + |
1484 | TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), | 1484 | TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), |
1485 | req1->num_tasks_per_conn); | 1485 | req1->num_tasks_per_conn); |
@@ -1489,9 +1489,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) | |||
1489 | USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid), | 1489 | USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid), |
1490 | req1->rq_buffer_size); | 1490 | req1->rq_buffer_size); |
1491 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), | 1491 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), |
1492 | PAGE_SIZE); | 1492 | CNIC_PAGE_SIZE); |
1493 | CNIC_WR8(dev, BAR_USTRORM_INTMEM + | 1493 | CNIC_WR8(dev, BAR_USTRORM_INTMEM + |
1494 | USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); | 1494 | USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); |
1495 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + | 1495 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + |
1496 | USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), | 1496 | USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), |
1497 | req1->num_tasks_per_conn); | 1497 | req1->num_tasks_per_conn); |
@@ -1504,9 +1504,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) | |||
1504 | 1504 | ||
1505 | /* init Xstorm RAM */ | 1505 | /* init Xstorm RAM */ |
1506 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), | 1506 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), |
1507 | PAGE_SIZE); | 1507 | CNIC_PAGE_SIZE); |
1508 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | 1508 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + |
1509 | XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); | 1509 | XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); |
1510 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + | 1510 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + |
1511 | XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), | 1511 | XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), |
1512 | req1->num_tasks_per_conn); | 1512 | req1->num_tasks_per_conn); |
@@ -1519,9 +1519,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) | |||
1519 | 1519 | ||
1520 | /* init Cstorm RAM */ | 1520 | /* init Cstorm RAM */ |
1521 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), | 1521 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), |
1522 | PAGE_SIZE); | 1522 | CNIC_PAGE_SIZE); |
1523 | CNIC_WR8(dev, BAR_CSTRORM_INTMEM + | 1523 | CNIC_WR8(dev, BAR_CSTRORM_INTMEM + |
1524 | CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); | 1524 | CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); |
1525 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + | 1525 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + |
1526 | CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), | 1526 | CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), |
1527 | req1->num_tasks_per_conn); | 1527 | req1->num_tasks_per_conn); |
@@ -1623,18 +1623,18 @@ static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) | |||
1623 | } | 1623 | } |
1624 | 1624 | ||
1625 | ctx->cid = cid; | 1625 | ctx->cid = cid; |
1626 | pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE; | 1626 | pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE; |
1627 | 1627 | ||
1628 | ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1); | 1628 | ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1); |
1629 | if (ret) | 1629 | if (ret) |
1630 | goto error; | 1630 | goto error; |
1631 | 1631 | ||
1632 | pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE; | 1632 | pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE; |
1633 | ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1); | 1633 | ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1); |
1634 | if (ret) | 1634 | if (ret) |
1635 | goto error; | 1635 | goto error; |
1636 | 1636 | ||
1637 | pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; | 1637 | pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE; |
1638 | ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1); | 1638 | ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1); |
1639 | if (ret) | 1639 | if (ret) |
1640 | goto error; | 1640 | goto error; |
@@ -1760,7 +1760,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], | |||
1760 | ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; | 1760 | ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; |
1761 | /* TSTORM requires the base address of RQ DB & not PTE */ | 1761 | /* TSTORM requires the base address of RQ DB & not PTE */ |
1762 | ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = | 1762 | ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = |
1763 | req2->rq_page_table_addr_lo & PAGE_MASK; | 1763 | req2->rq_page_table_addr_lo & CNIC_PAGE_MASK; |
1764 | ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi = | 1764 | ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi = |
1765 | req2->rq_page_table_addr_hi; | 1765 | req2->rq_page_table_addr_hi; |
1766 | ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id; | 1766 | ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id; |
@@ -1842,7 +1842,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], | |||
1842 | /* CSTORM and USTORM initialization is different, CSTORM requires | 1842 | /* CSTORM and USTORM initialization is different, CSTORM requires |
1843 | * CQ DB base & not PTE addr */ | 1843 | * CQ DB base & not PTE addr */ |
1844 | ictx->cstorm_st_context.cq_db_base.lo = | 1844 | ictx->cstorm_st_context.cq_db_base.lo = |
1845 | req1->cq_page_table_addr_lo & PAGE_MASK; | 1845 | req1->cq_page_table_addr_lo & CNIC_PAGE_MASK; |
1846 | ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi; | 1846 | ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi; |
1847 | ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; | 1847 | ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; |
1848 | ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; | 1848 | ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; |
@@ -2911,7 +2911,7 @@ static int cnic_l2_completion(struct cnic_local *cp) | |||
2911 | u16 hw_cons, sw_cons; | 2911 | u16 hw_cons, sw_cons; |
2912 | struct cnic_uio_dev *udev = cp->udev; | 2912 | struct cnic_uio_dev *udev = cp->udev; |
2913 | union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) | 2913 | union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) |
2914 | (udev->l2_ring + (2 * BNX2_PAGE_SIZE)); | 2914 | (udev->l2_ring + (2 * CNIC_PAGE_SIZE)); |
2915 | u32 cmd; | 2915 | u32 cmd; |
2916 | int comp = 0; | 2916 | int comp = 0; |
2917 | 2917 | ||
@@ -4385,7 +4385,7 @@ static int cnic_setup_5709_context(struct cnic_dev *dev, int valid) | |||
4385 | u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; | 4385 | u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; |
4386 | u32 val; | 4386 | u32 val; |
4387 | 4387 | ||
4388 | memset(cp->ctx_arr[i].ctx, 0, BNX2_PAGE_SIZE); | 4388 | memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE); |
4389 | 4389 | ||
4390 | CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, | 4390 | CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, |
4391 | (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); | 4391 | (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); |
@@ -4629,7 +4629,7 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) | |||
4629 | val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); | 4629 | val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); |
4630 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); | 4630 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); |
4631 | 4631 | ||
4632 | rxbd = udev->l2_ring + BNX2_PAGE_SIZE; | 4632 | rxbd = udev->l2_ring + CNIC_PAGE_SIZE; |
4633 | for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) { | 4633 | for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) { |
4634 | dma_addr_t buf_map; | 4634 | dma_addr_t buf_map; |
4635 | int n = (i % cp->l2_rx_ring_size) + 1; | 4635 | int n = (i % cp->l2_rx_ring_size) + 1; |
@@ -4640,11 +4640,11 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) | |||
4640 | rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; | 4640 | rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; |
4641 | rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; | 4641 | rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; |
4642 | } | 4642 | } |
4643 | val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32; | 4643 | val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32; |
4644 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); | 4644 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); |
4645 | rxbd->rx_bd_haddr_hi = val; | 4645 | rxbd->rx_bd_haddr_hi = val; |
4646 | 4646 | ||
4647 | val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff; | 4647 | val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff; |
4648 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); | 4648 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); |
4649 | rxbd->rx_bd_haddr_lo = val; | 4649 | rxbd->rx_bd_haddr_lo = val; |
4650 | 4650 | ||
@@ -4710,10 +4710,10 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) | |||
4710 | 4710 | ||
4711 | val = CNIC_RD(dev, BNX2_MQ_CONFIG); | 4711 | val = CNIC_RD(dev, BNX2_MQ_CONFIG); |
4712 | val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; | 4712 | val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; |
4713 | if (BNX2_PAGE_BITS > 12) | 4713 | if (CNIC_PAGE_BITS > 12) |
4714 | val |= (12 - 8) << 4; | 4714 | val |= (12 - 8) << 4; |
4715 | else | 4715 | else |
4716 | val |= (BNX2_PAGE_BITS - 8) << 4; | 4716 | val |= (CNIC_PAGE_BITS - 8) << 4; |
4717 | 4717 | ||
4718 | CNIC_WR(dev, BNX2_MQ_CONFIG, val); | 4718 | CNIC_WR(dev, BNX2_MQ_CONFIG, val); |
4719 | 4719 | ||
@@ -4743,13 +4743,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) | |||
4743 | 4743 | ||
4744 | /* Initialize the kernel work queue context. */ | 4744 | /* Initialize the kernel work queue context. */ |
4745 | val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | | 4745 | val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | |
4746 | (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; | 4746 | (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; |
4747 | cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val); | 4747 | cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val); |
4748 | 4748 | ||
4749 | val = (BNX2_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; | 4749 | val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; |
4750 | cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); | 4750 | cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); |
4751 | 4751 | ||
4752 | val = ((BNX2_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; | 4752 | val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; |
4753 | cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); | 4753 | cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); |
4754 | 4754 | ||
4755 | val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); | 4755 | val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); |
@@ -4769,13 +4769,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) | |||
4769 | 4769 | ||
4770 | /* Initialize the kernel complete queue context. */ | 4770 | /* Initialize the kernel complete queue context. */ |
4771 | val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | | 4771 | val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | |
4772 | (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; | 4772 | (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; |
4773 | cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val); | 4773 | cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val); |
4774 | 4774 | ||
4775 | val = (BNX2_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; | 4775 | val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; |
4776 | cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); | 4776 | cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); |
4777 | 4777 | ||
4778 | val = ((BNX2_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; | 4778 | val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; |
4779 | cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); | 4779 | cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); |
4780 | 4780 | ||
4781 | val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32); | 4781 | val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32); |
@@ -4919,7 +4919,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev, | |||
4919 | u32 cli = cp->ethdev->iscsi_l2_client_id; | 4919 | u32 cli = cp->ethdev->iscsi_l2_client_id; |
4920 | u32 val; | 4920 | u32 val; |
4921 | 4921 | ||
4922 | memset(txbd, 0, BNX2_PAGE_SIZE); | 4922 | memset(txbd, 0, CNIC_PAGE_SIZE); |
4923 | 4923 | ||
4924 | buf_map = udev->l2_buf_map; | 4924 | buf_map = udev->l2_buf_map; |
4925 | for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) { | 4925 | for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) { |
@@ -4979,9 +4979,9 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, | |||
4979 | struct bnx2x *bp = netdev_priv(dev->netdev); | 4979 | struct bnx2x *bp = netdev_priv(dev->netdev); |
4980 | struct cnic_uio_dev *udev = cp->udev; | 4980 | struct cnic_uio_dev *udev = cp->udev; |
4981 | struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + | 4981 | struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + |
4982 | BNX2_PAGE_SIZE); | 4982 | CNIC_PAGE_SIZE); |
4983 | struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) | 4983 | struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) |
4984 | (udev->l2_ring + (2 * BNX2_PAGE_SIZE)); | 4984 | (udev->l2_ring + (2 * CNIC_PAGE_SIZE)); |
4985 | struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; | 4985 | struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; |
4986 | int i; | 4986 | int i; |
4987 | u32 cli = cp->ethdev->iscsi_l2_client_id; | 4987 | u32 cli = cp->ethdev->iscsi_l2_client_id; |
@@ -5005,20 +5005,20 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, | |||
5005 | rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); | 5005 | rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); |
5006 | } | 5006 | } |
5007 | 5007 | ||
5008 | val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32; | 5008 | val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32; |
5009 | rxbd->addr_hi = cpu_to_le32(val); | 5009 | rxbd->addr_hi = cpu_to_le32(val); |
5010 | data->rx.bd_page_base.hi = cpu_to_le32(val); | 5010 | data->rx.bd_page_base.hi = cpu_to_le32(val); |
5011 | 5011 | ||
5012 | val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff; | 5012 | val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff; |
5013 | rxbd->addr_lo = cpu_to_le32(val); | 5013 | rxbd->addr_lo = cpu_to_le32(val); |
5014 | data->rx.bd_page_base.lo = cpu_to_le32(val); | 5014 | data->rx.bd_page_base.lo = cpu_to_le32(val); |
5015 | 5015 | ||
5016 | rxcqe += BNX2X_MAX_RCQ_DESC_CNT; | 5016 | rxcqe += BNX2X_MAX_RCQ_DESC_CNT; |
5017 | val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) >> 32; | 5017 | val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32; |
5018 | rxcqe->addr_hi = cpu_to_le32(val); | 5018 | rxcqe->addr_hi = cpu_to_le32(val); |
5019 | data->rx.cqe_page_base.hi = cpu_to_le32(val); | 5019 | data->rx.cqe_page_base.hi = cpu_to_le32(val); |
5020 | 5020 | ||
5021 | val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) & 0xffffffff; | 5021 | val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff; |
5022 | rxcqe->addr_lo = cpu_to_le32(val); | 5022 | rxcqe->addr_lo = cpu_to_le32(val); |
5023 | data->rx.cqe_page_base.lo = cpu_to_le32(val); | 5023 | data->rx.cqe_page_base.lo = cpu_to_le32(val); |
5024 | 5024 | ||
@@ -5266,8 +5266,8 @@ static void cnic_shutdown_rings(struct cnic_dev *dev) | |||
5266 | msleep(10); | 5266 | msleep(10); |
5267 | } | 5267 | } |
5268 | clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); | 5268 | clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); |
5269 | rx_ring = udev->l2_ring + BNX2_PAGE_SIZE; | 5269 | rx_ring = udev->l2_ring + CNIC_PAGE_SIZE; |
5270 | memset(rx_ring, 0, BNX2_PAGE_SIZE); | 5270 | memset(rx_ring, 0, CNIC_PAGE_SIZE); |
5271 | } | 5271 | } |
5272 | 5272 | ||
5273 | static int cnic_register_netdev(struct cnic_dev *dev) | 5273 | static int cnic_register_netdev(struct cnic_dev *dev) |
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h index 8cf6b1926069..40bb5630d79b 100644 --- a/drivers/net/ethernet/broadcom/cnic_if.h +++ b/drivers/net/ethernet/broadcom/cnic_if.h | |||
@@ -24,6 +24,16 @@ | |||
24 | #define MAX_CNIC_ULP_TYPE_EXT 3 | 24 | #define MAX_CNIC_ULP_TYPE_EXT 3 |
25 | #define MAX_CNIC_ULP_TYPE 4 | 25 | #define MAX_CNIC_ULP_TYPE 4 |
26 | 26 | ||
27 | /* Use CPU native page size up to 16K for cnic ring sizes. */ | ||
28 | #if (PAGE_SHIFT > 14) | ||
29 | #define CNIC_PAGE_BITS 14 | ||
30 | #else | ||
31 | #define CNIC_PAGE_BITS PAGE_SHIFT | ||
32 | #endif | ||
33 | #define CNIC_PAGE_SIZE (1 << (CNIC_PAGE_BITS)) | ||
34 | #define CNIC_PAGE_ALIGN(addr) ALIGN(addr, CNIC_PAGE_SIZE) | ||
35 | #define CNIC_PAGE_MASK (~((CNIC_PAGE_SIZE) - 1)) | ||
36 | |||
27 | struct kwqe { | 37 | struct kwqe { |
28 | u32 kwqe_op_flag; | 38 | u32 kwqe_op_flag; |
29 | 39 | ||
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index ed880891cb7c..e9279a8c1e1c 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c | |||
@@ -594,13 +594,13 @@ static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) | |||
594 | mp_req->mp_resp_bd = NULL; | 594 | mp_req->mp_resp_bd = NULL; |
595 | } | 595 | } |
596 | if (mp_req->req_buf) { | 596 | if (mp_req->req_buf) { |
597 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 597 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
598 | mp_req->req_buf, | 598 | mp_req->req_buf, |
599 | mp_req->req_buf_dma); | 599 | mp_req->req_buf_dma); |
600 | mp_req->req_buf = NULL; | 600 | mp_req->req_buf = NULL; |
601 | } | 601 | } |
602 | if (mp_req->resp_buf) { | 602 | if (mp_req->resp_buf) { |
603 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 603 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
604 | mp_req->resp_buf, | 604 | mp_req->resp_buf, |
605 | mp_req->resp_buf_dma); | 605 | mp_req->resp_buf_dma); |
606 | mp_req->resp_buf = NULL; | 606 | mp_req->resp_buf = NULL; |
@@ -622,7 +622,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) | |||
622 | 622 | ||
623 | mp_req->req_len = sizeof(struct fcp_cmnd); | 623 | mp_req->req_len = sizeof(struct fcp_cmnd); |
624 | io_req->data_xfer_len = mp_req->req_len; | 624 | io_req->data_xfer_len = mp_req->req_len; |
625 | mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 625 | mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
626 | &mp_req->req_buf_dma, | 626 | &mp_req->req_buf_dma, |
627 | GFP_ATOMIC); | 627 | GFP_ATOMIC); |
628 | if (!mp_req->req_buf) { | 628 | if (!mp_req->req_buf) { |
@@ -631,7 +631,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) | |||
631 | return FAILED; | 631 | return FAILED; |
632 | } | 632 | } |
633 | 633 | ||
634 | mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 634 | mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
635 | &mp_req->resp_buf_dma, | 635 | &mp_req->resp_buf_dma, |
636 | GFP_ATOMIC); | 636 | GFP_ATOMIC); |
637 | if (!mp_req->resp_buf) { | 637 | if (!mp_req->resp_buf) { |
@@ -639,8 +639,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) | |||
639 | bnx2fc_free_mp_resc(io_req); | 639 | bnx2fc_free_mp_resc(io_req); |
640 | return FAILED; | 640 | return FAILED; |
641 | } | 641 | } |
642 | memset(mp_req->req_buf, 0, PAGE_SIZE); | 642 | memset(mp_req->req_buf, 0, CNIC_PAGE_SIZE); |
643 | memset(mp_req->resp_buf, 0, PAGE_SIZE); | 643 | memset(mp_req->resp_buf, 0, CNIC_PAGE_SIZE); |
644 | 644 | ||
645 | /* Allocate and map mp_req_bd and mp_resp_bd */ | 645 | /* Allocate and map mp_req_bd and mp_resp_bd */ |
646 | sz = sizeof(struct fcoe_bd_ctx); | 646 | sz = sizeof(struct fcoe_bd_ctx); |
@@ -665,7 +665,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) | |||
665 | mp_req_bd = mp_req->mp_req_bd; | 665 | mp_req_bd = mp_req->mp_req_bd; |
666 | mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff; | 666 | mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff; |
667 | mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32); | 667 | mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32); |
668 | mp_req_bd->buf_len = PAGE_SIZE; | 668 | mp_req_bd->buf_len = CNIC_PAGE_SIZE; |
669 | mp_req_bd->flags = 0; | 669 | mp_req_bd->flags = 0; |
670 | 670 | ||
671 | /* | 671 | /* |
@@ -677,7 +677,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) | |||
677 | addr = mp_req->resp_buf_dma; | 677 | addr = mp_req->resp_buf_dma; |
678 | mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff; | 678 | mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff; |
679 | mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32); | 679 | mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32); |
680 | mp_resp_bd->buf_len = PAGE_SIZE; | 680 | mp_resp_bd->buf_len = CNIC_PAGE_SIZE; |
681 | mp_resp_bd->flags = 0; | 681 | mp_resp_bd->flags = 0; |
682 | 682 | ||
683 | return SUCCESS; | 683 | return SUCCESS; |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c index 4d93177dfb53..d9bae5672273 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c | |||
@@ -673,7 +673,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
673 | 673 | ||
674 | /* Allocate and map SQ */ | 674 | /* Allocate and map SQ */ |
675 | tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE; | 675 | tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE; |
676 | tgt->sq_mem_size = (tgt->sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 676 | tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
677 | CNIC_PAGE_MASK; | ||
677 | 678 | ||
678 | tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, | 679 | tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, |
679 | &tgt->sq_dma, GFP_KERNEL); | 680 | &tgt->sq_dma, GFP_KERNEL); |
@@ -686,7 +687,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
686 | 687 | ||
687 | /* Allocate and map CQ */ | 688 | /* Allocate and map CQ */ |
688 | tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE; | 689 | tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE; |
689 | tgt->cq_mem_size = (tgt->cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 690 | tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
691 | CNIC_PAGE_MASK; | ||
690 | 692 | ||
691 | tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, | 693 | tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, |
692 | &tgt->cq_dma, GFP_KERNEL); | 694 | &tgt->cq_dma, GFP_KERNEL); |
@@ -699,7 +701,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
699 | 701 | ||
700 | /* Allocate and map RQ and RQ PBL */ | 702 | /* Allocate and map RQ and RQ PBL */ |
701 | tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE; | 703 | tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE; |
702 | tgt->rq_mem_size = (tgt->rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 704 | tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
705 | CNIC_PAGE_MASK; | ||
703 | 706 | ||
704 | tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, | 707 | tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, |
705 | &tgt->rq_dma, GFP_KERNEL); | 708 | &tgt->rq_dma, GFP_KERNEL); |
@@ -710,8 +713,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
710 | } | 713 | } |
711 | memset(tgt->rq, 0, tgt->rq_mem_size); | 714 | memset(tgt->rq, 0, tgt->rq_mem_size); |
712 | 715 | ||
713 | tgt->rq_pbl_size = (tgt->rq_mem_size / PAGE_SIZE) * sizeof(void *); | 716 | tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); |
714 | tgt->rq_pbl_size = (tgt->rq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 717 | tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) & |
718 | CNIC_PAGE_MASK; | ||
715 | 719 | ||
716 | tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, | 720 | tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, |
717 | &tgt->rq_pbl_dma, GFP_KERNEL); | 721 | &tgt->rq_pbl_dma, GFP_KERNEL); |
@@ -722,7 +726,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
722 | } | 726 | } |
723 | 727 | ||
724 | memset(tgt->rq_pbl, 0, tgt->rq_pbl_size); | 728 | memset(tgt->rq_pbl, 0, tgt->rq_pbl_size); |
725 | num_pages = tgt->rq_mem_size / PAGE_SIZE; | 729 | num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE; |
726 | page = tgt->rq_dma; | 730 | page = tgt->rq_dma; |
727 | pbl = (u32 *)tgt->rq_pbl; | 731 | pbl = (u32 *)tgt->rq_pbl; |
728 | 732 | ||
@@ -731,13 +735,13 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
731 | pbl++; | 735 | pbl++; |
732 | *pbl = (u32)((u64)page >> 32); | 736 | *pbl = (u32)((u64)page >> 32); |
733 | pbl++; | 737 | pbl++; |
734 | page += PAGE_SIZE; | 738 | page += CNIC_PAGE_SIZE; |
735 | } | 739 | } |
736 | 740 | ||
737 | /* Allocate and map XFERQ */ | 741 | /* Allocate and map XFERQ */ |
738 | tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE; | 742 | tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE; |
739 | tgt->xferq_mem_size = (tgt->xferq_mem_size + (PAGE_SIZE - 1)) & | 743 | tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
740 | PAGE_MASK; | 744 | CNIC_PAGE_MASK; |
741 | 745 | ||
742 | tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, | 746 | tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, |
743 | &tgt->xferq_dma, GFP_KERNEL); | 747 | &tgt->xferq_dma, GFP_KERNEL); |
@@ -750,8 +754,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
750 | 754 | ||
751 | /* Allocate and map CONFQ & CONFQ PBL */ | 755 | /* Allocate and map CONFQ & CONFQ PBL */ |
752 | tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE; | 756 | tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE; |
753 | tgt->confq_mem_size = (tgt->confq_mem_size + (PAGE_SIZE - 1)) & | 757 | tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
754 | PAGE_MASK; | 758 | CNIC_PAGE_MASK; |
755 | 759 | ||
756 | tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size, | 760 | tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size, |
757 | &tgt->confq_dma, GFP_KERNEL); | 761 | &tgt->confq_dma, GFP_KERNEL); |
@@ -763,9 +767,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
763 | memset(tgt->confq, 0, tgt->confq_mem_size); | 767 | memset(tgt->confq, 0, tgt->confq_mem_size); |
764 | 768 | ||
765 | tgt->confq_pbl_size = | 769 | tgt->confq_pbl_size = |
766 | (tgt->confq_mem_size / PAGE_SIZE) * sizeof(void *); | 770 | (tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); |
767 | tgt->confq_pbl_size = | 771 | tgt->confq_pbl_size = |
768 | (tgt->confq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 772 | (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
769 | 773 | ||
770 | tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev, | 774 | tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev, |
771 | tgt->confq_pbl_size, | 775 | tgt->confq_pbl_size, |
@@ -777,7 +781,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
777 | } | 781 | } |
778 | 782 | ||
779 | memset(tgt->confq_pbl, 0, tgt->confq_pbl_size); | 783 | memset(tgt->confq_pbl, 0, tgt->confq_pbl_size); |
780 | num_pages = tgt->confq_mem_size / PAGE_SIZE; | 784 | num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE; |
781 | page = tgt->confq_dma; | 785 | page = tgt->confq_dma; |
782 | pbl = (u32 *)tgt->confq_pbl; | 786 | pbl = (u32 *)tgt->confq_pbl; |
783 | 787 | ||
@@ -786,7 +790,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
786 | pbl++; | 790 | pbl++; |
787 | *pbl = (u32)((u64)page >> 32); | 791 | *pbl = (u32)((u64)page >> 32); |
788 | pbl++; | 792 | pbl++; |
789 | page += PAGE_SIZE; | 793 | page += CNIC_PAGE_SIZE; |
790 | } | 794 | } |
791 | 795 | ||
792 | /* Allocate and map ConnDB */ | 796 | /* Allocate and map ConnDB */ |
@@ -805,8 +809,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
805 | 809 | ||
806 | /* Allocate and map LCQ */ | 810 | /* Allocate and map LCQ */ |
807 | tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE; | 811 | tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE; |
808 | tgt->lcq_mem_size = (tgt->lcq_mem_size + (PAGE_SIZE - 1)) & | 812 | tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
809 | PAGE_MASK; | 813 | CNIC_PAGE_MASK; |
810 | 814 | ||
811 | tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, | 815 | tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, |
812 | &tgt->lcq_dma, GFP_KERNEL); | 816 | &tgt->lcq_dma, GFP_KERNEL); |
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index e4cf23df4b4f..b87a1933f880 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c | |||
@@ -61,7 +61,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba) | |||
61 | * yield integral num of page buffers | 61 | * yield integral num of page buffers |
62 | */ | 62 | */ |
63 | /* adjust SQ */ | 63 | /* adjust SQ */ |
64 | num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE; | 64 | num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE; |
65 | if (hba->max_sqes < num_elements_per_pg) | 65 | if (hba->max_sqes < num_elements_per_pg) |
66 | hba->max_sqes = num_elements_per_pg; | 66 | hba->max_sqes = num_elements_per_pg; |
67 | else if (hba->max_sqes % num_elements_per_pg) | 67 | else if (hba->max_sqes % num_elements_per_pg) |
@@ -69,7 +69,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba) | |||
69 | ~(num_elements_per_pg - 1); | 69 | ~(num_elements_per_pg - 1); |
70 | 70 | ||
71 | /* adjust CQ */ | 71 | /* adjust CQ */ |
72 | num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE; | 72 | num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_CQE_SIZE; |
73 | if (hba->max_cqes < num_elements_per_pg) | 73 | if (hba->max_cqes < num_elements_per_pg) |
74 | hba->max_cqes = num_elements_per_pg; | 74 | hba->max_cqes = num_elements_per_pg; |
75 | else if (hba->max_cqes % num_elements_per_pg) | 75 | else if (hba->max_cqes % num_elements_per_pg) |
@@ -77,7 +77,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba) | |||
77 | ~(num_elements_per_pg - 1); | 77 | ~(num_elements_per_pg - 1); |
78 | 78 | ||
79 | /* adjust RQ */ | 79 | /* adjust RQ */ |
80 | num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE; | 80 | num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_RQ_WQE_SIZE; |
81 | if (hba->max_rqes < num_elements_per_pg) | 81 | if (hba->max_rqes < num_elements_per_pg) |
82 | hba->max_rqes = num_elements_per_pg; | 82 | hba->max_rqes = num_elements_per_pg; |
83 | else if (hba->max_rqes % num_elements_per_pg) | 83 | else if (hba->max_rqes % num_elements_per_pg) |
@@ -959,7 +959,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
959 | 959 | ||
960 | /* SQ page table */ | 960 | /* SQ page table */ |
961 | memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size); | 961 | memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size); |
962 | num_pages = ep->qp.sq_mem_size / PAGE_SIZE; | 962 | num_pages = ep->qp.sq_mem_size / CNIC_PAGE_SIZE; |
963 | page = ep->qp.sq_phys; | 963 | page = ep->qp.sq_phys; |
964 | 964 | ||
965 | if (cnic_dev_10g) | 965 | if (cnic_dev_10g) |
@@ -973,7 +973,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
973 | ptbl++; | 973 | ptbl++; |
974 | *ptbl = (u32) ((u64) page >> 32); | 974 | *ptbl = (u32) ((u64) page >> 32); |
975 | ptbl++; | 975 | ptbl++; |
976 | page += PAGE_SIZE; | 976 | page += CNIC_PAGE_SIZE; |
977 | } else { | 977 | } else { |
978 | /* PTE is written in big endian format for | 978 | /* PTE is written in big endian format for |
979 | * 5706/5708/5709 devices */ | 979 | * 5706/5708/5709 devices */ |
@@ -981,13 +981,13 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
981 | ptbl++; | 981 | ptbl++; |
982 | *ptbl = (u32) page; | 982 | *ptbl = (u32) page; |
983 | ptbl++; | 983 | ptbl++; |
984 | page += PAGE_SIZE; | 984 | page += CNIC_PAGE_SIZE; |
985 | } | 985 | } |
986 | } | 986 | } |
987 | 987 | ||
988 | /* RQ page table */ | 988 | /* RQ page table */ |
989 | memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size); | 989 | memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size); |
990 | num_pages = ep->qp.rq_mem_size / PAGE_SIZE; | 990 | num_pages = ep->qp.rq_mem_size / CNIC_PAGE_SIZE; |
991 | page = ep->qp.rq_phys; | 991 | page = ep->qp.rq_phys; |
992 | 992 | ||
993 | if (cnic_dev_10g) | 993 | if (cnic_dev_10g) |
@@ -1001,7 +1001,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
1001 | ptbl++; | 1001 | ptbl++; |
1002 | *ptbl = (u32) ((u64) page >> 32); | 1002 | *ptbl = (u32) ((u64) page >> 32); |
1003 | ptbl++; | 1003 | ptbl++; |
1004 | page += PAGE_SIZE; | 1004 | page += CNIC_PAGE_SIZE; |
1005 | } else { | 1005 | } else { |
1006 | /* PTE is written in big endian format for | 1006 | /* PTE is written in big endian format for |
1007 | * 5706/5708/5709 devices */ | 1007 | * 5706/5708/5709 devices */ |
@@ -1009,13 +1009,13 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
1009 | ptbl++; | 1009 | ptbl++; |
1010 | *ptbl = (u32) page; | 1010 | *ptbl = (u32) page; |
1011 | ptbl++; | 1011 | ptbl++; |
1012 | page += PAGE_SIZE; | 1012 | page += CNIC_PAGE_SIZE; |
1013 | } | 1013 | } |
1014 | } | 1014 | } |
1015 | 1015 | ||
1016 | /* CQ page table */ | 1016 | /* CQ page table */ |
1017 | memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size); | 1017 | memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size); |
1018 | num_pages = ep->qp.cq_mem_size / PAGE_SIZE; | 1018 | num_pages = ep->qp.cq_mem_size / CNIC_PAGE_SIZE; |
1019 | page = ep->qp.cq_phys; | 1019 | page = ep->qp.cq_phys; |
1020 | 1020 | ||
1021 | if (cnic_dev_10g) | 1021 | if (cnic_dev_10g) |
@@ -1029,7 +1029,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
1029 | ptbl++; | 1029 | ptbl++; |
1030 | *ptbl = (u32) ((u64) page >> 32); | 1030 | *ptbl = (u32) ((u64) page >> 32); |
1031 | ptbl++; | 1031 | ptbl++; |
1032 | page += PAGE_SIZE; | 1032 | page += CNIC_PAGE_SIZE; |
1033 | } else { | 1033 | } else { |
1034 | /* PTE is written in big endian format for | 1034 | /* PTE is written in big endian format for |
1035 | * 5706/5708/5709 devices */ | 1035 | * 5706/5708/5709 devices */ |
@@ -1037,7 +1037,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
1037 | ptbl++; | 1037 | ptbl++; |
1038 | *ptbl = (u32) page; | 1038 | *ptbl = (u32) page; |
1039 | ptbl++; | 1039 | ptbl++; |
1040 | page += PAGE_SIZE; | 1040 | page += CNIC_PAGE_SIZE; |
1041 | } | 1041 | } |
1042 | } | 1042 | } |
1043 | } | 1043 | } |
@@ -1064,11 +1064,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) | |||
1064 | /* Allocate page table memory for SQ which is page aligned */ | 1064 | /* Allocate page table memory for SQ which is page aligned */ |
1065 | ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE; | 1065 | ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE; |
1066 | ep->qp.sq_mem_size = | 1066 | ep->qp.sq_mem_size = |
1067 | (ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 1067 | (ep->qp.sq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
1068 | ep->qp.sq_pgtbl_size = | 1068 | ep->qp.sq_pgtbl_size = |
1069 | (ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *); | 1069 | (ep->qp.sq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); |
1070 | ep->qp.sq_pgtbl_size = | 1070 | ep->qp.sq_pgtbl_size = |
1071 | (ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 1071 | (ep->qp.sq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
1072 | 1072 | ||
1073 | ep->qp.sq_pgtbl_virt = | 1073 | ep->qp.sq_pgtbl_virt = |
1074 | dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, | 1074 | dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, |
@@ -1101,11 +1101,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) | |||
1101 | /* Allocate page table memory for CQ which is page aligned */ | 1101 | /* Allocate page table memory for CQ which is page aligned */ |
1102 | ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE; | 1102 | ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE; |
1103 | ep->qp.cq_mem_size = | 1103 | ep->qp.cq_mem_size = |
1104 | (ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 1104 | (ep->qp.cq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
1105 | ep->qp.cq_pgtbl_size = | 1105 | ep->qp.cq_pgtbl_size = |
1106 | (ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *); | 1106 | (ep->qp.cq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); |
1107 | ep->qp.cq_pgtbl_size = | 1107 | ep->qp.cq_pgtbl_size = |
1108 | (ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 1108 | (ep->qp.cq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
1109 | 1109 | ||
1110 | ep->qp.cq_pgtbl_virt = | 1110 | ep->qp.cq_pgtbl_virt = |
1111 | dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, | 1111 | dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, |
@@ -1144,11 +1144,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) | |||
1144 | /* Allocate page table memory for RQ which is page aligned */ | 1144 | /* Allocate page table memory for RQ which is page aligned */ |
1145 | ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE; | 1145 | ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE; |
1146 | ep->qp.rq_mem_size = | 1146 | ep->qp.rq_mem_size = |
1147 | (ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 1147 | (ep->qp.rq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
1148 | ep->qp.rq_pgtbl_size = | 1148 | ep->qp.rq_pgtbl_size = |
1149 | (ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *); | 1149 | (ep->qp.rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); |
1150 | ep->qp.rq_pgtbl_size = | 1150 | ep->qp.rq_pgtbl_size = |
1151 | (ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 1151 | (ep->qp.rq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
1152 | 1152 | ||
1153 | ep->qp.rq_pgtbl_virt = | 1153 | ep->qp.rq_pgtbl_virt = |
1154 | dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, | 1154 | dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, |
@@ -1270,7 +1270,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba) | |||
1270 | bnx2i_adjust_qp_size(hba); | 1270 | bnx2i_adjust_qp_size(hba); |
1271 | 1271 | ||
1272 | iscsi_init.flags = | 1272 | iscsi_init.flags = |
1273 | ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT; | 1273 | (CNIC_PAGE_BITS - 8) << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT; |
1274 | if (en_tcp_dack) | 1274 | if (en_tcp_dack) |
1275 | iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE; | 1275 | iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE; |
1276 | iscsi_init.reserved0 = 0; | 1276 | iscsi_init.reserved0 = 0; |
@@ -1288,15 +1288,15 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba) | |||
1288 | ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16)); | 1288 | ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16)); |
1289 | iscsi_init.num_ccells_per_conn = hba->num_ccell; | 1289 | iscsi_init.num_ccells_per_conn = hba->num_ccell; |
1290 | iscsi_init.num_tasks_per_conn = hba->max_sqes; | 1290 | iscsi_init.num_tasks_per_conn = hba->max_sqes; |
1291 | iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE; | 1291 | iscsi_init.sq_wqes_per_page = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE; |
1292 | iscsi_init.sq_num_wqes = hba->max_sqes; | 1292 | iscsi_init.sq_num_wqes = hba->max_sqes; |
1293 | iscsi_init.cq_log_wqes_per_page = | 1293 | iscsi_init.cq_log_wqes_per_page = |
1294 | (u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE); | 1294 | (u8) bnx2i_power_of2(CNIC_PAGE_SIZE / BNX2I_CQE_SIZE); |
1295 | iscsi_init.cq_num_wqes = hba->max_cqes; | 1295 | iscsi_init.cq_num_wqes = hba->max_cqes; |
1296 | iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE + | 1296 | iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE + |
1297 | (PAGE_SIZE - 1)) / PAGE_SIZE; | 1297 | (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE; |
1298 | iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE + | 1298 | iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE + |
1299 | (PAGE_SIZE - 1)) / PAGE_SIZE; | 1299 | (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE; |
1300 | iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE; | 1300 | iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE; |
1301 | iscsi_init.rq_num_wqes = hba->max_rqes; | 1301 | iscsi_init.rq_num_wqes = hba->max_rqes; |
1302 | 1302 | ||
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 854dad7d5b03..c8b0aff5bbd4 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c | |||
@@ -525,7 +525,7 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) | |||
525 | struct iscsi_bd *mp_bdt; | 525 | struct iscsi_bd *mp_bdt; |
526 | u64 addr; | 526 | u64 addr; |
527 | 527 | ||
528 | hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 528 | hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
529 | &hba->mp_bd_dma, GFP_KERNEL); | 529 | &hba->mp_bd_dma, GFP_KERNEL); |
530 | if (!hba->mp_bd_tbl) { | 530 | if (!hba->mp_bd_tbl) { |
531 | printk(KERN_ERR "unable to allocate Middle Path BDT\n"); | 531 | printk(KERN_ERR "unable to allocate Middle Path BDT\n"); |
@@ -533,11 +533,12 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) | |||
533 | goto out; | 533 | goto out; |
534 | } | 534 | } |
535 | 535 | ||
536 | hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 536 | hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, |
537 | CNIC_PAGE_SIZE, | ||
537 | &hba->dummy_buf_dma, GFP_KERNEL); | 538 | &hba->dummy_buf_dma, GFP_KERNEL); |
538 | if (!hba->dummy_buffer) { | 539 | if (!hba->dummy_buffer) { |
539 | printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n"); | 540 | printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n"); |
540 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 541 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
541 | hba->mp_bd_tbl, hba->mp_bd_dma); | 542 | hba->mp_bd_tbl, hba->mp_bd_dma); |
542 | hba->mp_bd_tbl = NULL; | 543 | hba->mp_bd_tbl = NULL; |
543 | rc = -1; | 544 | rc = -1; |
@@ -548,7 +549,7 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) | |||
548 | addr = (unsigned long) hba->dummy_buf_dma; | 549 | addr = (unsigned long) hba->dummy_buf_dma; |
549 | mp_bdt->buffer_addr_lo = addr & 0xffffffff; | 550 | mp_bdt->buffer_addr_lo = addr & 0xffffffff; |
550 | mp_bdt->buffer_addr_hi = addr >> 32; | 551 | mp_bdt->buffer_addr_hi = addr >> 32; |
551 | mp_bdt->buffer_length = PAGE_SIZE; | 552 | mp_bdt->buffer_length = CNIC_PAGE_SIZE; |
552 | mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN | | 553 | mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN | |
553 | ISCSI_BD_FIRST_IN_BD_CHAIN; | 554 | ISCSI_BD_FIRST_IN_BD_CHAIN; |
554 | out: | 555 | out: |
@@ -565,12 +566,12 @@ out: | |||
565 | static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba) | 566 | static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba) |
566 | { | 567 | { |
567 | if (hba->mp_bd_tbl) { | 568 | if (hba->mp_bd_tbl) { |
568 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 569 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
569 | hba->mp_bd_tbl, hba->mp_bd_dma); | 570 | hba->mp_bd_tbl, hba->mp_bd_dma); |
570 | hba->mp_bd_tbl = NULL; | 571 | hba->mp_bd_tbl = NULL; |
571 | } | 572 | } |
572 | if (hba->dummy_buffer) { | 573 | if (hba->dummy_buffer) { |
573 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 574 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
574 | hba->dummy_buffer, hba->dummy_buf_dma); | 575 | hba->dummy_buffer, hba->dummy_buf_dma); |
575 | hba->dummy_buffer = NULL; | 576 | hba->dummy_buffer = NULL; |
576 | } | 577 | } |
@@ -934,14 +935,14 @@ static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba, | |||
934 | struct bnx2i_conn *bnx2i_conn) | 935 | struct bnx2i_conn *bnx2i_conn) |
935 | { | 936 | { |
936 | if (bnx2i_conn->gen_pdu.resp_bd_tbl) { | 937 | if (bnx2i_conn->gen_pdu.resp_bd_tbl) { |
937 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 938 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
938 | bnx2i_conn->gen_pdu.resp_bd_tbl, | 939 | bnx2i_conn->gen_pdu.resp_bd_tbl, |
939 | bnx2i_conn->gen_pdu.resp_bd_dma); | 940 | bnx2i_conn->gen_pdu.resp_bd_dma); |
940 | bnx2i_conn->gen_pdu.resp_bd_tbl = NULL; | 941 | bnx2i_conn->gen_pdu.resp_bd_tbl = NULL; |
941 | } | 942 | } |
942 | 943 | ||
943 | if (bnx2i_conn->gen_pdu.req_bd_tbl) { | 944 | if (bnx2i_conn->gen_pdu.req_bd_tbl) { |
944 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 945 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
945 | bnx2i_conn->gen_pdu.req_bd_tbl, | 946 | bnx2i_conn->gen_pdu.req_bd_tbl, |
946 | bnx2i_conn->gen_pdu.req_bd_dma); | 947 | bnx2i_conn->gen_pdu.req_bd_dma); |
947 | bnx2i_conn->gen_pdu.req_bd_tbl = NULL; | 948 | bnx2i_conn->gen_pdu.req_bd_tbl = NULL; |
@@ -998,13 +999,13 @@ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba, | |||
998 | bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf; | 999 | bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf; |
999 | 1000 | ||
1000 | bnx2i_conn->gen_pdu.req_bd_tbl = | 1001 | bnx2i_conn->gen_pdu.req_bd_tbl = |
1001 | dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 1002 | dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
1002 | &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL); | 1003 | &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL); |
1003 | if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL) | 1004 | if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL) |
1004 | goto login_req_bd_tbl_failure; | 1005 | goto login_req_bd_tbl_failure; |
1005 | 1006 | ||
1006 | bnx2i_conn->gen_pdu.resp_bd_tbl = | 1007 | bnx2i_conn->gen_pdu.resp_bd_tbl = |
1007 | dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 1008 | dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
1008 | &bnx2i_conn->gen_pdu.resp_bd_dma, | 1009 | &bnx2i_conn->gen_pdu.resp_bd_dma, |
1009 | GFP_KERNEL); | 1010 | GFP_KERNEL); |
1010 | if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL) | 1011 | if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL) |
@@ -1013,7 +1014,7 @@ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba, | |||
1013 | return 0; | 1014 | return 0; |
1014 | 1015 | ||
1015 | login_resp_bd_tbl_failure: | 1016 | login_resp_bd_tbl_failure: |
1016 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 1017 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
1017 | bnx2i_conn->gen_pdu.req_bd_tbl, | 1018 | bnx2i_conn->gen_pdu.req_bd_tbl, |
1018 | bnx2i_conn->gen_pdu.req_bd_dma); | 1019 | bnx2i_conn->gen_pdu.req_bd_dma); |
1019 | bnx2i_conn->gen_pdu.req_bd_tbl = NULL; | 1020 | bnx2i_conn->gen_pdu.req_bd_tbl = NULL; |