diff options
47 files changed, 508 insertions, 306 deletions
diff --git a/Documentation/devicetree/bindings/net/micrel-ks8851.txt b/Documentation/devicetree/bindings/net/micrel-ks8851.txt index 11ace3c3d805..4fc392763611 100644 --- a/Documentation/devicetree/bindings/net/micrel-ks8851.txt +++ b/Documentation/devicetree/bindings/net/micrel-ks8851.txt | |||
| @@ -7,3 +7,4 @@ Required properties: | |||
| 7 | 7 | ||
| 8 | Optional properties: | 8 | Optional properties: |
| 9 | - local-mac-address : Ethernet mac address to use | 9 | - local-mac-address : Ethernet mac address to use |
| 10 | - vdd-supply: supply for Ethernet mac | ||
diff --git a/Documentation/networking/netlink_mmap.txt b/Documentation/networking/netlink_mmap.txt index b26122973525..c6af4bac5aa8 100644 --- a/Documentation/networking/netlink_mmap.txt +++ b/Documentation/networking/netlink_mmap.txt | |||
| @@ -226,9 +226,9 @@ Ring setup: | |||
| 226 | void *rx_ring, *tx_ring; | 226 | void *rx_ring, *tx_ring; |
| 227 | 227 | ||
| 228 | /* Configure ring parameters */ | 228 | /* Configure ring parameters */ |
| 229 | if (setsockopt(fd, NETLINK_RX_RING, &req, sizeof(req)) < 0) | 229 | if (setsockopt(fd, SOL_NETLINK, NETLINK_RX_RING, &req, sizeof(req)) < 0) |
| 230 | exit(1); | 230 | exit(1); |
| 231 | if (setsockopt(fd, NETLINK_TX_RING, &req, sizeof(req)) < 0) | 231 | if (setsockopt(fd, SOL_NETLINK, NETLINK_TX_RING, &req, sizeof(req)) < 0) |
| 232 | exit(1) | 232 | exit(1) |
| 233 | 233 | ||
| 234 | /* Calculate size of each individual ring */ | 234 | /* Calculate size of each individual ring */ |
diff --git a/MAINTAINERS b/MAINTAINERS index 55a57edf437f..82640e640f36 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -4545,6 +4545,7 @@ M: Greg Rose <gregory.v.rose@intel.com> | |||
| 4545 | M: Alex Duyck <alexander.h.duyck@intel.com> | 4545 | M: Alex Duyck <alexander.h.duyck@intel.com> |
| 4546 | M: John Ronciak <john.ronciak@intel.com> | 4546 | M: John Ronciak <john.ronciak@intel.com> |
| 4547 | M: Mitch Williams <mitch.a.williams@intel.com> | 4547 | M: Mitch Williams <mitch.a.williams@intel.com> |
| 4548 | M: Linux NICS <linux.nics@intel.com> | ||
| 4548 | L: e1000-devel@lists.sourceforge.net | 4549 | L: e1000-devel@lists.sourceforge.net |
| 4549 | W: http://www.intel.com/support/feedback.htm | 4550 | W: http://www.intel.com/support/feedback.htm |
| 4550 | W: http://e1000.sourceforge.net/ | 4551 | W: http://e1000.sourceforge.net/ |
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig index f04686580040..9816c51eb5c2 100644 --- a/drivers/isdn/capi/Kconfig +++ b/drivers/isdn/capi/Kconfig | |||
| @@ -16,9 +16,17 @@ config CAPI_TRACE | |||
| 16 | This will increase the size of the kernelcapi module by 20 KB. | 16 | This will increase the size of the kernelcapi module by 20 KB. |
| 17 | If unsure, say Y. | 17 | If unsure, say Y. |
| 18 | 18 | ||
| 19 | config ISDN_CAPI_CAPI20 | ||
| 20 | tristate "CAPI2.0 /dev/capi support" | ||
| 21 | help | ||
| 22 | This option will provide the CAPI 2.0 interface to userspace | ||
| 23 | applications via /dev/capi20. Applications should use the | ||
| 24 | standardized libcapi20 to access this functionality. You should say | ||
| 25 | Y/M here. | ||
| 26 | |||
| 19 | config ISDN_CAPI_MIDDLEWARE | 27 | config ISDN_CAPI_MIDDLEWARE |
| 20 | bool "CAPI2.0 Middleware support" | 28 | bool "CAPI2.0 Middleware support" |
| 21 | depends on TTY | 29 | depends on ISDN_CAPI_CAPI20 && TTY |
| 22 | help | 30 | help |
| 23 | This option will enhance the capabilities of the /dev/capi20 | 31 | This option will enhance the capabilities of the /dev/capi20 |
| 24 | interface. It will provide a means of moving a data connection, | 32 | interface. It will provide a means of moving a data connection, |
| @@ -26,14 +34,6 @@ config ISDN_CAPI_MIDDLEWARE | |||
| 26 | device. If you want to use pppd with pppdcapiplugin to dial up to | 34 | device. If you want to use pppd with pppdcapiplugin to dial up to |
| 27 | your ISP, say Y here. | 35 | your ISP, say Y here. |
| 28 | 36 | ||
| 29 | config ISDN_CAPI_CAPI20 | ||
| 30 | tristate "CAPI2.0 /dev/capi support" | ||
| 31 | help | ||
| 32 | This option will provide the CAPI 2.0 interface to userspace | ||
| 33 | applications via /dev/capi20. Applications should use the | ||
| 34 | standardized libcapi20 to access this functionality. You should say | ||
| 35 | Y/M here. | ||
| 36 | |||
| 37 | config ISDN_CAPI_CAPIDRV | 37 | config ISDN_CAPI_CAPIDRV |
| 38 | tristate "CAPI2.0 capidrv interface support" | 38 | tristate "CAPI2.0 capidrv interface support" |
| 39 | depends on ISDN_I4L | 39 | depends on ISDN_I4L |
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 2e45f6ec1bf0..380d24922049 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c | |||
| @@ -1248,19 +1248,13 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 1248 | * shared register for the high 32 bits, so only a single, aligned, | 1248 | * shared register for the high 32 bits, so only a single, aligned, |
| 1249 | * 4 GB physical address range can be used for descriptors. | 1249 | * 4 GB physical address range can be used for descriptors. |
| 1250 | */ | 1250 | */ |
| 1251 | if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && | 1251 | if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { |
| 1252 | !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { | ||
| 1253 | dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n"); | 1252 | dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n"); |
| 1254 | } else { | 1253 | } else { |
| 1255 | err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); | 1254 | err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); |
| 1256 | if (err) { | 1255 | if (err) { |
| 1257 | err = dma_set_coherent_mask(&pdev->dev, | 1256 | dev_err(&pdev->dev, "No usable DMA config, aborting\n"); |
| 1258 | DMA_BIT_MASK(32)); | 1257 | goto out_pci_disable; |
| 1259 | if (err) { | ||
| 1260 | dev_err(&pdev->dev, | ||
| 1261 | "No usable DMA config, aborting\n"); | ||
| 1262 | goto out_pci_disable; | ||
| 1263 | } | ||
| 1264 | } | 1258 | } |
| 1265 | } | 1259 | } |
| 1266 | 1260 | ||
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index d5c2d3e912e5..422aab27ea1b 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c | |||
| @@ -2436,7 +2436,7 @@ err_reset: | |||
| 2436 | err_register: | 2436 | err_register: |
| 2437 | err_sw_init: | 2437 | err_sw_init: |
| 2438 | err_eeprom: | 2438 | err_eeprom: |
| 2439 | iounmap(adapter->hw.hw_addr); | 2439 | pci_iounmap(pdev, adapter->hw.hw_addr); |
| 2440 | err_init_netdev: | 2440 | err_init_netdev: |
| 2441 | err_ioremap: | 2441 | err_ioremap: |
| 2442 | free_netdev(netdev); | 2442 | free_netdev(netdev); |
| @@ -2474,7 +2474,7 @@ static void atl1e_remove(struct pci_dev *pdev) | |||
| 2474 | unregister_netdev(netdev); | 2474 | unregister_netdev(netdev); |
| 2475 | atl1e_free_ring_resources(adapter); | 2475 | atl1e_free_ring_resources(adapter); |
| 2476 | atl1e_force_ps(&adapter->hw); | 2476 | atl1e_force_ps(&adapter->hw); |
| 2477 | iounmap(adapter->hw.hw_addr); | 2477 | pci_iounmap(pdev, adapter->hw.hw_addr); |
| 2478 | pci_release_regions(pdev); | 2478 | pci_release_regions(pdev); |
| 2479 | free_netdev(netdev); | 2479 | free_netdev(netdev); |
| 2480 | pci_disable_device(pdev); | 2480 | pci_disable_device(pdev); |
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index fcf9105a5476..09f3fefcbf9c 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* cnic.c: Broadcom CNIC core network driver. | 1 | /* cnic.c: Broadcom CNIC core network driver. |
| 2 | * | 2 | * |
| 3 | * Copyright (c) 2006-2013 Broadcom Corporation | 3 | * Copyright (c) 2006-2014 Broadcom Corporation |
| 4 | * | 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
| @@ -342,7 +342,7 @@ static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, | |||
| 342 | while (retry < 3) { | 342 | while (retry < 3) { |
| 343 | rc = 0; | 343 | rc = 0; |
| 344 | rcu_read_lock(); | 344 | rcu_read_lock(); |
| 345 | ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]); | 345 | ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]); |
| 346 | if (ulp_ops) | 346 | if (ulp_ops) |
| 347 | rc = ulp_ops->iscsi_nl_send_msg( | 347 | rc = ulp_ops->iscsi_nl_send_msg( |
| 348 | cp->ulp_handle[CNIC_ULP_ISCSI], | 348 | cp->ulp_handle[CNIC_ULP_ISCSI], |
| @@ -726,7 +726,7 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma) | |||
| 726 | 726 | ||
| 727 | for (i = 0; i < dma->num_pages; i++) { | 727 | for (i = 0; i < dma->num_pages; i++) { |
| 728 | if (dma->pg_arr[i]) { | 728 | if (dma->pg_arr[i]) { |
| 729 | dma_free_coherent(&dev->pcidev->dev, BNX2_PAGE_SIZE, | 729 | dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE, |
| 730 | dma->pg_arr[i], dma->pg_map_arr[i]); | 730 | dma->pg_arr[i], dma->pg_map_arr[i]); |
| 731 | dma->pg_arr[i] = NULL; | 731 | dma->pg_arr[i] = NULL; |
| 732 | } | 732 | } |
| @@ -785,7 +785,7 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, | |||
| 785 | 785 | ||
| 786 | for (i = 0; i < pages; i++) { | 786 | for (i = 0; i < pages; i++) { |
| 787 | dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev, | 787 | dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev, |
| 788 | BNX2_PAGE_SIZE, | 788 | CNIC_PAGE_SIZE, |
| 789 | &dma->pg_map_arr[i], | 789 | &dma->pg_map_arr[i], |
| 790 | GFP_ATOMIC); | 790 | GFP_ATOMIC); |
| 791 | if (dma->pg_arr[i] == NULL) | 791 | if (dma->pg_arr[i] == NULL) |
| @@ -794,8 +794,8 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, | |||
| 794 | if (!use_pg_tbl) | 794 | if (!use_pg_tbl) |
| 795 | return 0; | 795 | return 0; |
| 796 | 796 | ||
| 797 | dma->pgtbl_size = ((pages * 8) + BNX2_PAGE_SIZE - 1) & | 797 | dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) & |
| 798 | ~(BNX2_PAGE_SIZE - 1); | 798 | ~(CNIC_PAGE_SIZE - 1); |
| 799 | dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size, | 799 | dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size, |
| 800 | &dma->pgtbl_map, GFP_ATOMIC); | 800 | &dma->pgtbl_map, GFP_ATOMIC); |
| 801 | if (dma->pgtbl == NULL) | 801 | if (dma->pgtbl == NULL) |
| @@ -900,8 +900,8 @@ static int cnic_alloc_context(struct cnic_dev *dev) | |||
| 900 | if (BNX2_CHIP(cp) == BNX2_CHIP_5709) { | 900 | if (BNX2_CHIP(cp) == BNX2_CHIP_5709) { |
| 901 | int i, k, arr_size; | 901 | int i, k, arr_size; |
| 902 | 902 | ||
| 903 | cp->ctx_blk_size = BNX2_PAGE_SIZE; | 903 | cp->ctx_blk_size = CNIC_PAGE_SIZE; |
| 904 | cp->cids_per_blk = BNX2_PAGE_SIZE / 128; | 904 | cp->cids_per_blk = CNIC_PAGE_SIZE / 128; |
| 905 | arr_size = BNX2_MAX_CID / cp->cids_per_blk * | 905 | arr_size = BNX2_MAX_CID / cp->cids_per_blk * |
| 906 | sizeof(struct cnic_ctx); | 906 | sizeof(struct cnic_ctx); |
| 907 | cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); | 907 | cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); |
| @@ -933,7 +933,7 @@ static int cnic_alloc_context(struct cnic_dev *dev) | |||
| 933 | for (i = 0; i < cp->ctx_blks; i++) { | 933 | for (i = 0; i < cp->ctx_blks; i++) { |
| 934 | cp->ctx_arr[i].ctx = | 934 | cp->ctx_arr[i].ctx = |
| 935 | dma_alloc_coherent(&dev->pcidev->dev, | 935 | dma_alloc_coherent(&dev->pcidev->dev, |
| 936 | BNX2_PAGE_SIZE, | 936 | CNIC_PAGE_SIZE, |
| 937 | &cp->ctx_arr[i].mapping, | 937 | &cp->ctx_arr[i].mapping, |
| 938 | GFP_KERNEL); | 938 | GFP_KERNEL); |
| 939 | if (cp->ctx_arr[i].ctx == NULL) | 939 | if (cp->ctx_arr[i].ctx == NULL) |
| @@ -1013,7 +1013,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages) | |||
| 1013 | if (udev->l2_ring) | 1013 | if (udev->l2_ring) |
| 1014 | return 0; | 1014 | return 0; |
| 1015 | 1015 | ||
| 1016 | udev->l2_ring_size = pages * BNX2_PAGE_SIZE; | 1016 | udev->l2_ring_size = pages * CNIC_PAGE_SIZE; |
| 1017 | udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, | 1017 | udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, |
| 1018 | &udev->l2_ring_map, | 1018 | &udev->l2_ring_map, |
| 1019 | GFP_KERNEL | __GFP_COMP); | 1019 | GFP_KERNEL | __GFP_COMP); |
| @@ -1021,7 +1021,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages) | |||
| 1021 | return -ENOMEM; | 1021 | return -ENOMEM; |
| 1022 | 1022 | ||
| 1023 | udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; | 1023 | udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; |
| 1024 | udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); | 1024 | udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size); |
| 1025 | udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, | 1025 | udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, |
| 1026 | &udev->l2_buf_map, | 1026 | &udev->l2_buf_map, |
| 1027 | GFP_KERNEL | __GFP_COMP); | 1027 | GFP_KERNEL | __GFP_COMP); |
| @@ -1102,7 +1102,7 @@ static int cnic_init_uio(struct cnic_dev *dev) | |||
| 1102 | uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID + | 1102 | uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID + |
| 1103 | TX_MAX_TSS_RINGS + 1); | 1103 | TX_MAX_TSS_RINGS + 1); |
| 1104 | uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & | 1104 | uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & |
| 1105 | PAGE_MASK; | 1105 | CNIC_PAGE_MASK; |
| 1106 | if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) | 1106 | if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) |
| 1107 | uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; | 1107 | uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; |
| 1108 | else | 1108 | else |
| @@ -1113,7 +1113,7 @@ static int cnic_init_uio(struct cnic_dev *dev) | |||
| 1113 | uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0); | 1113 | uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0); |
| 1114 | 1114 | ||
| 1115 | uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & | 1115 | uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & |
| 1116 | PAGE_MASK; | 1116 | CNIC_PAGE_MASK; |
| 1117 | uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk); | 1117 | uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk); |
| 1118 | 1118 | ||
| 1119 | uinfo->name = "bnx2x_cnic"; | 1119 | uinfo->name = "bnx2x_cnic"; |
| @@ -1267,14 +1267,14 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) | |||
| 1267 | for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++) | 1267 | for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++) |
| 1268 | cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE; | 1268 | cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE; |
| 1269 | 1269 | ||
| 1270 | pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / | 1270 | pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / |
| 1271 | PAGE_SIZE; | 1271 | CNIC_PAGE_SIZE; |
| 1272 | 1272 | ||
| 1273 | ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); | 1273 | ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); |
| 1274 | if (ret) | 1274 | if (ret) |
| 1275 | return -ENOMEM; | 1275 | return -ENOMEM; |
| 1276 | 1276 | ||
| 1277 | n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; | 1277 | n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; |
| 1278 | for (i = 0, j = 0; i < cp->max_cid_space; i++) { | 1278 | for (i = 0, j = 0; i < cp->max_cid_space; i++) { |
| 1279 | long off = CNIC_KWQ16_DATA_SIZE * (i % n); | 1279 | long off = CNIC_KWQ16_DATA_SIZE * (i % n); |
| 1280 | 1280 | ||
| @@ -1296,7 +1296,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) | |||
| 1296 | goto error; | 1296 | goto error; |
| 1297 | } | 1297 | } |
| 1298 | 1298 | ||
| 1299 | pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE; | 1299 | pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE; |
| 1300 | ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); | 1300 | ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); |
| 1301 | if (ret) | 1301 | if (ret) |
| 1302 | goto error; | 1302 | goto error; |
| @@ -1466,8 +1466,8 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) | |||
| 1466 | cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS * | 1466 | cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS * |
| 1467 | BNX2X_ISCSI_R2TQE_SIZE; | 1467 | BNX2X_ISCSI_R2TQE_SIZE; |
| 1468 | cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; | 1468 | cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; |
| 1469 | pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; | 1469 | pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE; |
| 1470 | hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); | 1470 | hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); |
| 1471 | cp->num_cqs = req1->num_cqs; | 1471 | cp->num_cqs = req1->num_cqs; |
| 1472 | 1472 | ||
| 1473 | if (!dev->max_iscsi_conn) | 1473 | if (!dev->max_iscsi_conn) |
| @@ -1477,9 +1477,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) | |||
| 1477 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid), | 1477 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid), |
| 1478 | req1->rq_num_wqes); | 1478 | req1->rq_num_wqes); |
| 1479 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), | 1479 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), |
| 1480 | PAGE_SIZE); | 1480 | CNIC_PAGE_SIZE); |
| 1481 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | 1481 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + |
| 1482 | TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); | 1482 | TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); |
| 1483 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + | 1483 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + |
| 1484 | TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), | 1484 | TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), |
| 1485 | req1->num_tasks_per_conn); | 1485 | req1->num_tasks_per_conn); |
| @@ -1489,9 +1489,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) | |||
| 1489 | USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid), | 1489 | USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid), |
| 1490 | req1->rq_buffer_size); | 1490 | req1->rq_buffer_size); |
| 1491 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), | 1491 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), |
| 1492 | PAGE_SIZE); | 1492 | CNIC_PAGE_SIZE); |
| 1493 | CNIC_WR8(dev, BAR_USTRORM_INTMEM + | 1493 | CNIC_WR8(dev, BAR_USTRORM_INTMEM + |
| 1494 | USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); | 1494 | USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); |
| 1495 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + | 1495 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + |
| 1496 | USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), | 1496 | USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), |
| 1497 | req1->num_tasks_per_conn); | 1497 | req1->num_tasks_per_conn); |
| @@ -1504,9 +1504,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) | |||
| 1504 | 1504 | ||
| 1505 | /* init Xstorm RAM */ | 1505 | /* init Xstorm RAM */ |
| 1506 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), | 1506 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), |
| 1507 | PAGE_SIZE); | 1507 | CNIC_PAGE_SIZE); |
| 1508 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | 1508 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + |
| 1509 | XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); | 1509 | XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); |
| 1510 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + | 1510 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + |
| 1511 | XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), | 1511 | XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), |
| 1512 | req1->num_tasks_per_conn); | 1512 | req1->num_tasks_per_conn); |
| @@ -1519,9 +1519,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) | |||
| 1519 | 1519 | ||
| 1520 | /* init Cstorm RAM */ | 1520 | /* init Cstorm RAM */ |
| 1521 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), | 1521 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), |
| 1522 | PAGE_SIZE); | 1522 | CNIC_PAGE_SIZE); |
| 1523 | CNIC_WR8(dev, BAR_CSTRORM_INTMEM + | 1523 | CNIC_WR8(dev, BAR_CSTRORM_INTMEM + |
| 1524 | CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); | 1524 | CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); |
| 1525 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + | 1525 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + |
| 1526 | CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), | 1526 | CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), |
| 1527 | req1->num_tasks_per_conn); | 1527 | req1->num_tasks_per_conn); |
| @@ -1623,18 +1623,18 @@ static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) | |||
| 1623 | } | 1623 | } |
| 1624 | 1624 | ||
| 1625 | ctx->cid = cid; | 1625 | ctx->cid = cid; |
| 1626 | pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE; | 1626 | pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE; |
| 1627 | 1627 | ||
| 1628 | ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1); | 1628 | ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1); |
| 1629 | if (ret) | 1629 | if (ret) |
| 1630 | goto error; | 1630 | goto error; |
| 1631 | 1631 | ||
| 1632 | pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE; | 1632 | pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE; |
| 1633 | ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1); | 1633 | ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1); |
| 1634 | if (ret) | 1634 | if (ret) |
| 1635 | goto error; | 1635 | goto error; |
| 1636 | 1636 | ||
| 1637 | pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; | 1637 | pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE; |
| 1638 | ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1); | 1638 | ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1); |
| 1639 | if (ret) | 1639 | if (ret) |
| 1640 | goto error; | 1640 | goto error; |
| @@ -1760,7 +1760,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], | |||
| 1760 | ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; | 1760 | ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; |
| 1761 | /* TSTORM requires the base address of RQ DB & not PTE */ | 1761 | /* TSTORM requires the base address of RQ DB & not PTE */ |
| 1762 | ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = | 1762 | ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = |
| 1763 | req2->rq_page_table_addr_lo & PAGE_MASK; | 1763 | req2->rq_page_table_addr_lo & CNIC_PAGE_MASK; |
| 1764 | ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi = | 1764 | ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi = |
| 1765 | req2->rq_page_table_addr_hi; | 1765 | req2->rq_page_table_addr_hi; |
| 1766 | ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id; | 1766 | ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id; |
| @@ -1842,7 +1842,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], | |||
| 1842 | /* CSTORM and USTORM initialization is different, CSTORM requires | 1842 | /* CSTORM and USTORM initialization is different, CSTORM requires |
| 1843 | * CQ DB base & not PTE addr */ | 1843 | * CQ DB base & not PTE addr */ |
| 1844 | ictx->cstorm_st_context.cq_db_base.lo = | 1844 | ictx->cstorm_st_context.cq_db_base.lo = |
| 1845 | req1->cq_page_table_addr_lo & PAGE_MASK; | 1845 | req1->cq_page_table_addr_lo & CNIC_PAGE_MASK; |
| 1846 | ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi; | 1846 | ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi; |
| 1847 | ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; | 1847 | ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; |
| 1848 | ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; | 1848 | ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; |
| @@ -2911,7 +2911,7 @@ static int cnic_l2_completion(struct cnic_local *cp) | |||
| 2911 | u16 hw_cons, sw_cons; | 2911 | u16 hw_cons, sw_cons; |
| 2912 | struct cnic_uio_dev *udev = cp->udev; | 2912 | struct cnic_uio_dev *udev = cp->udev; |
| 2913 | union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) | 2913 | union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) |
| 2914 | (udev->l2_ring + (2 * BNX2_PAGE_SIZE)); | 2914 | (udev->l2_ring + (2 * CNIC_PAGE_SIZE)); |
| 2915 | u32 cmd; | 2915 | u32 cmd; |
| 2916 | int comp = 0; | 2916 | int comp = 0; |
| 2917 | 2917 | ||
| @@ -3244,7 +3244,8 @@ static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type) | |||
| 3244 | int rc; | 3244 | int rc; |
| 3245 | 3245 | ||
| 3246 | mutex_lock(&cnic_lock); | 3246 | mutex_lock(&cnic_lock); |
| 3247 | ulp_ops = cnic_ulp_tbl_prot(ulp_type); | 3247 | ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type], |
| 3248 | lockdep_is_held(&cnic_lock)); | ||
| 3248 | if (ulp_ops && ulp_ops->cnic_get_stats) | 3249 | if (ulp_ops && ulp_ops->cnic_get_stats) |
| 3249 | rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]); | 3250 | rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]); |
| 3250 | else | 3251 | else |
| @@ -4384,7 +4385,7 @@ static int cnic_setup_5709_context(struct cnic_dev *dev, int valid) | |||
| 4384 | u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; | 4385 | u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; |
| 4385 | u32 val; | 4386 | u32 val; |
| 4386 | 4387 | ||
| 4387 | memset(cp->ctx_arr[i].ctx, 0, BNX2_PAGE_SIZE); | 4388 | memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE); |
| 4388 | 4389 | ||
| 4389 | CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, | 4390 | CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, |
| 4390 | (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); | 4391 | (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); |
| @@ -4628,7 +4629,7 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) | |||
| 4628 | val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); | 4629 | val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); |
| 4629 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); | 4630 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); |
| 4630 | 4631 | ||
| 4631 | rxbd = udev->l2_ring + BNX2_PAGE_SIZE; | 4632 | rxbd = udev->l2_ring + CNIC_PAGE_SIZE; |
| 4632 | for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) { | 4633 | for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) { |
| 4633 | dma_addr_t buf_map; | 4634 | dma_addr_t buf_map; |
| 4634 | int n = (i % cp->l2_rx_ring_size) + 1; | 4635 | int n = (i % cp->l2_rx_ring_size) + 1; |
| @@ -4639,11 +4640,11 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) | |||
| 4639 | rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; | 4640 | rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; |
| 4640 | rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; | 4641 | rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; |
| 4641 | } | 4642 | } |
| 4642 | val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32; | 4643 | val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32; |
| 4643 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); | 4644 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); |
| 4644 | rxbd->rx_bd_haddr_hi = val; | 4645 | rxbd->rx_bd_haddr_hi = val; |
| 4645 | 4646 | ||
| 4646 | val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff; | 4647 | val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff; |
| 4647 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); | 4648 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); |
| 4648 | rxbd->rx_bd_haddr_lo = val; | 4649 | rxbd->rx_bd_haddr_lo = val; |
| 4649 | 4650 | ||
| @@ -4709,10 +4710,10 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) | |||
| 4709 | 4710 | ||
| 4710 | val = CNIC_RD(dev, BNX2_MQ_CONFIG); | 4711 | val = CNIC_RD(dev, BNX2_MQ_CONFIG); |
| 4711 | val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; | 4712 | val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; |
| 4712 | if (BNX2_PAGE_BITS > 12) | 4713 | if (CNIC_PAGE_BITS > 12) |
| 4713 | val |= (12 - 8) << 4; | 4714 | val |= (12 - 8) << 4; |
| 4714 | else | 4715 | else |
| 4715 | val |= (BNX2_PAGE_BITS - 8) << 4; | 4716 | val |= (CNIC_PAGE_BITS - 8) << 4; |
| 4716 | 4717 | ||
| 4717 | CNIC_WR(dev, BNX2_MQ_CONFIG, val); | 4718 | CNIC_WR(dev, BNX2_MQ_CONFIG, val); |
| 4718 | 4719 | ||
| @@ -4742,13 +4743,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) | |||
| 4742 | 4743 | ||
| 4743 | /* Initialize the kernel work queue context. */ | 4744 | /* Initialize the kernel work queue context. */ |
| 4744 | val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | | 4745 | val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | |
| 4745 | (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; | 4746 | (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; |
| 4746 | cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val); | 4747 | cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val); |
| 4747 | 4748 | ||
| 4748 | val = (BNX2_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; | 4749 | val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; |
| 4749 | cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); | 4750 | cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); |
| 4750 | 4751 | ||
| 4751 | val = ((BNX2_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; | 4752 | val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; |
| 4752 | cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); | 4753 | cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); |
| 4753 | 4754 | ||
| 4754 | val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); | 4755 | val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); |
| @@ -4768,13 +4769,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) | |||
| 4768 | 4769 | ||
| 4769 | /* Initialize the kernel complete queue context. */ | 4770 | /* Initialize the kernel complete queue context. */ |
| 4770 | val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | | 4771 | val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | |
| 4771 | (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; | 4772 | (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; |
| 4772 | cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val); | 4773 | cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val); |
| 4773 | 4774 | ||
| 4774 | val = (BNX2_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; | 4775 | val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; |
| 4775 | cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); | 4776 | cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); |
| 4776 | 4777 | ||
| 4777 | val = ((BNX2_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; | 4778 | val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; |
| 4778 | cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); | 4779 | cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); |
| 4779 | 4780 | ||
| 4780 | val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32); | 4781 | val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32); |
| @@ -4918,7 +4919,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev, | |||
| 4918 | u32 cli = cp->ethdev->iscsi_l2_client_id; | 4919 | u32 cli = cp->ethdev->iscsi_l2_client_id; |
| 4919 | u32 val; | 4920 | u32 val; |
| 4920 | 4921 | ||
| 4921 | memset(txbd, 0, BNX2_PAGE_SIZE); | 4922 | memset(txbd, 0, CNIC_PAGE_SIZE); |
| 4922 | 4923 | ||
| 4923 | buf_map = udev->l2_buf_map; | 4924 | buf_map = udev->l2_buf_map; |
| 4924 | for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) { | 4925 | for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) { |
| @@ -4978,9 +4979,9 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, | |||
| 4978 | struct bnx2x *bp = netdev_priv(dev->netdev); | 4979 | struct bnx2x *bp = netdev_priv(dev->netdev); |
| 4979 | struct cnic_uio_dev *udev = cp->udev; | 4980 | struct cnic_uio_dev *udev = cp->udev; |
| 4980 | struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + | 4981 | struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + |
| 4981 | BNX2_PAGE_SIZE); | 4982 | CNIC_PAGE_SIZE); |
| 4982 | struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) | 4983 | struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) |
| 4983 | (udev->l2_ring + (2 * BNX2_PAGE_SIZE)); | 4984 | (udev->l2_ring + (2 * CNIC_PAGE_SIZE)); |
| 4984 | struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; | 4985 | struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; |
| 4985 | int i; | 4986 | int i; |
| 4986 | u32 cli = cp->ethdev->iscsi_l2_client_id; | 4987 | u32 cli = cp->ethdev->iscsi_l2_client_id; |
| @@ -5004,20 +5005,20 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, | |||
| 5004 | rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); | 5005 | rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); |
| 5005 | } | 5006 | } |
| 5006 | 5007 | ||
| 5007 | val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32; | 5008 | val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32; |
| 5008 | rxbd->addr_hi = cpu_to_le32(val); | 5009 | rxbd->addr_hi = cpu_to_le32(val); |
| 5009 | data->rx.bd_page_base.hi = cpu_to_le32(val); | 5010 | data->rx.bd_page_base.hi = cpu_to_le32(val); |
| 5010 | 5011 | ||
| 5011 | val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff; | 5012 | val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff; |
| 5012 | rxbd->addr_lo = cpu_to_le32(val); | 5013 | rxbd->addr_lo = cpu_to_le32(val); |
| 5013 | data->rx.bd_page_base.lo = cpu_to_le32(val); | 5014 | data->rx.bd_page_base.lo = cpu_to_le32(val); |
| 5014 | 5015 | ||
| 5015 | rxcqe += BNX2X_MAX_RCQ_DESC_CNT; | 5016 | rxcqe += BNX2X_MAX_RCQ_DESC_CNT; |
| 5016 | val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) >> 32; | 5017 | val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32; |
| 5017 | rxcqe->addr_hi = cpu_to_le32(val); | 5018 | rxcqe->addr_hi = cpu_to_le32(val); |
| 5018 | data->rx.cqe_page_base.hi = cpu_to_le32(val); | 5019 | data->rx.cqe_page_base.hi = cpu_to_le32(val); |
| 5019 | 5020 | ||
| 5020 | val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) & 0xffffffff; | 5021 | val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff; |
| 5021 | rxcqe->addr_lo = cpu_to_le32(val); | 5022 | rxcqe->addr_lo = cpu_to_le32(val); |
| 5022 | data->rx.cqe_page_base.lo = cpu_to_le32(val); | 5023 | data->rx.cqe_page_base.lo = cpu_to_le32(val); |
| 5023 | 5024 | ||
| @@ -5265,8 +5266,8 @@ static void cnic_shutdown_rings(struct cnic_dev *dev) | |||
| 5265 | msleep(10); | 5266 | msleep(10); |
| 5266 | } | 5267 | } |
| 5267 | clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); | 5268 | clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); |
| 5268 | rx_ring = udev->l2_ring + BNX2_PAGE_SIZE; | 5269 | rx_ring = udev->l2_ring + CNIC_PAGE_SIZE; |
| 5269 | memset(rx_ring, 0, BNX2_PAGE_SIZE); | 5270 | memset(rx_ring, 0, CNIC_PAGE_SIZE); |
| 5270 | } | 5271 | } |
| 5271 | 5272 | ||
| 5272 | static int cnic_register_netdev(struct cnic_dev *dev) | 5273 | static int cnic_register_netdev(struct cnic_dev *dev) |
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h index 0d6b13f854d9..d535ae4228b4 100644 --- a/drivers/net/ethernet/broadcom/cnic.h +++ b/drivers/net/ethernet/broadcom/cnic.h | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* cnic.h: Broadcom CNIC core network driver. | 1 | /* cnic.h: Broadcom CNIC core network driver. |
| 2 | * | 2 | * |
| 3 | * Copyright (c) 2006-2013 Broadcom Corporation | 3 | * Copyright (c) 2006-2014 Broadcom Corporation |
| 4 | * | 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h index 95a8e4b11c9f..dcbca6997e8f 100644 --- a/drivers/net/ethernet/broadcom/cnic_defs.h +++ b/drivers/net/ethernet/broadcom/cnic_defs.h | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | 1 | ||
| 2 | /* cnic.c: Broadcom CNIC core network driver. | 2 | /* cnic.c: Broadcom CNIC core network driver. |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2006-2013 Broadcom Corporation | 4 | * Copyright (c) 2006-2014 Broadcom Corporation |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h index 8cf6b1926069..5f4d5573a73d 100644 --- a/drivers/net/ethernet/broadcom/cnic_if.h +++ b/drivers/net/ethernet/broadcom/cnic_if.h | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | /* cnic_if.h: Broadcom CNIC core network driver. | 1 | /* cnic_if.h: Broadcom CNIC core network driver. |
| 2 | * | 2 | * |
| 3 | * Copyright (c) 2006-2013 Broadcom Corporation | 3 | * Copyright (c) 2006-2014 Broadcom Corporation |
| 4 | * | 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
| @@ -14,8 +14,8 @@ | |||
| 14 | 14 | ||
| 15 | #include "bnx2x/bnx2x_mfw_req.h" | 15 | #include "bnx2x/bnx2x_mfw_req.h" |
| 16 | 16 | ||
| 17 | #define CNIC_MODULE_VERSION "2.5.19" | 17 | #define CNIC_MODULE_VERSION "2.5.20" |
| 18 | #define CNIC_MODULE_RELDATE "December 19, 2013" | 18 | #define CNIC_MODULE_RELDATE "March 14, 2014" |
| 19 | 19 | ||
| 20 | #define CNIC_ULP_RDMA 0 | 20 | #define CNIC_ULP_RDMA 0 |
| 21 | #define CNIC_ULP_ISCSI 1 | 21 | #define CNIC_ULP_ISCSI 1 |
| @@ -24,6 +24,16 @@ | |||
| 24 | #define MAX_CNIC_ULP_TYPE_EXT 3 | 24 | #define MAX_CNIC_ULP_TYPE_EXT 3 |
| 25 | #define MAX_CNIC_ULP_TYPE 4 | 25 | #define MAX_CNIC_ULP_TYPE 4 |
| 26 | 26 | ||
| 27 | /* Use CPU native page size up to 16K for cnic ring sizes. */ | ||
| 28 | #if (PAGE_SHIFT > 14) | ||
| 29 | #define CNIC_PAGE_BITS 14 | ||
| 30 | #else | ||
| 31 | #define CNIC_PAGE_BITS PAGE_SHIFT | ||
| 32 | #endif | ||
| 33 | #define CNIC_PAGE_SIZE (1 << (CNIC_PAGE_BITS)) | ||
| 34 | #define CNIC_PAGE_ALIGN(addr) ALIGN(addr, CNIC_PAGE_SIZE) | ||
| 35 | #define CNIC_PAGE_MASK (~((CNIC_PAGE_SIZE) - 1)) | ||
| 36 | |||
| 27 | struct kwqe { | 37 | struct kwqe { |
| 28 | u32 kwqe_op_flag; | 38 | u32 kwqe_op_flag; |
| 29 | 39 | ||
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index 727b546a9eb8..e0c92e0e5e1d 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | #include <linux/crc32.h> | 23 | #include <linux/crc32.h> |
| 24 | #include <linux/mii.h> | 24 | #include <linux/mii.h> |
| 25 | #include <linux/eeprom_93cx6.h> | 25 | #include <linux/eeprom_93cx6.h> |
| 26 | #include <linux/regulator/consumer.h> | ||
| 26 | 27 | ||
| 27 | #include <linux/spi/spi.h> | 28 | #include <linux/spi/spi.h> |
| 28 | 29 | ||
| @@ -83,6 +84,7 @@ union ks8851_tx_hdr { | |||
| 83 | * @rc_rxqcr: Cached copy of KS_RXQCR. | 84 | * @rc_rxqcr: Cached copy of KS_RXQCR. |
| 84 | * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom | 85 | * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom |
| 85 | * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM. | 86 | * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM. |
| 87 | * @vdd_reg: Optional regulator supplying the chip | ||
| 86 | * | 88 | * |
| 87 | * The @lock ensures that the chip is protected when certain operations are | 89 | * The @lock ensures that the chip is protected when certain operations are |
| 88 | * in progress. When the read or write packet transfer is in progress, most | 90 | * in progress. When the read or write packet transfer is in progress, most |
| @@ -130,6 +132,7 @@ struct ks8851_net { | |||
| 130 | struct spi_transfer spi_xfer2[2]; | 132 | struct spi_transfer spi_xfer2[2]; |
| 131 | 133 | ||
| 132 | struct eeprom_93cx6 eeprom; | 134 | struct eeprom_93cx6 eeprom; |
| 135 | struct regulator *vdd_reg; | ||
| 133 | }; | 136 | }; |
| 134 | 137 | ||
| 135 | static int msg_enable; | 138 | static int msg_enable; |
| @@ -1414,6 +1417,21 @@ static int ks8851_probe(struct spi_device *spi) | |||
| 1414 | ks->spidev = spi; | 1417 | ks->spidev = spi; |
| 1415 | ks->tx_space = 6144; | 1418 | ks->tx_space = 6144; |
| 1416 | 1419 | ||
| 1420 | ks->vdd_reg = regulator_get_optional(&spi->dev, "vdd"); | ||
| 1421 | if (IS_ERR(ks->vdd_reg)) { | ||
| 1422 | ret = PTR_ERR(ks->vdd_reg); | ||
| 1423 | if (ret == -EPROBE_DEFER) | ||
| 1424 | goto err_reg; | ||
| 1425 | } else { | ||
| 1426 | ret = regulator_enable(ks->vdd_reg); | ||
| 1427 | if (ret) { | ||
| 1428 | dev_err(&spi->dev, "regulator enable fail: %d\n", | ||
| 1429 | ret); | ||
| 1430 | goto err_reg_en; | ||
| 1431 | } | ||
| 1432 | } | ||
| 1433 | |||
| 1434 | |||
| 1417 | mutex_init(&ks->lock); | 1435 | mutex_init(&ks->lock); |
| 1418 | spin_lock_init(&ks->statelock); | 1436 | spin_lock_init(&ks->statelock); |
| 1419 | 1437 | ||
| @@ -1508,8 +1526,14 @@ static int ks8851_probe(struct spi_device *spi) | |||
| 1508 | err_netdev: | 1526 | err_netdev: |
| 1509 | free_irq(ndev->irq, ks); | 1527 | free_irq(ndev->irq, ks); |
| 1510 | 1528 | ||
| 1511 | err_id: | ||
| 1512 | err_irq: | 1529 | err_irq: |
| 1530 | err_id: | ||
| 1531 | if (!IS_ERR(ks->vdd_reg)) | ||
| 1532 | regulator_disable(ks->vdd_reg); | ||
| 1533 | err_reg_en: | ||
| 1534 | if (!IS_ERR(ks->vdd_reg)) | ||
| 1535 | regulator_put(ks->vdd_reg); | ||
| 1536 | err_reg: | ||
| 1513 | free_netdev(ndev); | 1537 | free_netdev(ndev); |
| 1514 | return ret; | 1538 | return ret; |
| 1515 | } | 1539 | } |
| @@ -1523,6 +1547,10 @@ static int ks8851_remove(struct spi_device *spi) | |||
| 1523 | 1547 | ||
| 1524 | unregister_netdev(priv->netdev); | 1548 | unregister_netdev(priv->netdev); |
| 1525 | free_irq(spi->irq, priv); | 1549 | free_irq(spi->irq, priv); |
| 1550 | if (!IS_ERR(priv->vdd_reg)) { | ||
| 1551 | regulator_disable(priv->vdd_reg); | ||
| 1552 | regulator_put(priv->vdd_reg); | ||
| 1553 | } | ||
| 1526 | free_netdev(priv->netdev); | 1554 | free_netdev(priv->netdev); |
| 1527 | 1555 | ||
| 1528 | return 0; | 1556 | return 0; |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index ffd4d12acf6d..7d6d8ec676c8 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
| @@ -2229,10 +2229,6 @@ static int cpsw_probe(struct platform_device *pdev) | |||
| 2229 | goto clean_ale_ret; | 2229 | goto clean_ale_ret; |
| 2230 | } | 2230 | } |
| 2231 | 2231 | ||
| 2232 | if (cpts_register(&pdev->dev, priv->cpts, | ||
| 2233 | data->cpts_clock_mult, data->cpts_clock_shift)) | ||
| 2234 | dev_err(priv->dev, "error registering cpts device\n"); | ||
| 2235 | |||
| 2236 | cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n", | 2232 | cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n", |
| 2237 | &ss_res->start, ndev->irq); | 2233 | &ss_res->start, ndev->irq); |
| 2238 | 2234 | ||
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 364d0c7952c0..88ef27067bf2 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c | |||
| @@ -355,7 +355,7 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) | |||
| 355 | int i; | 355 | int i; |
| 356 | 356 | ||
| 357 | spin_lock_irqsave(&ctlr->lock, flags); | 357 | spin_lock_irqsave(&ctlr->lock, flags); |
| 358 | if (ctlr->state != CPDMA_STATE_ACTIVE) { | 358 | if (ctlr->state == CPDMA_STATE_TEARDOWN) { |
| 359 | spin_unlock_irqrestore(&ctlr->lock, flags); | 359 | spin_unlock_irqrestore(&ctlr->lock, flags); |
| 360 | return -EINVAL; | 360 | return -EINVAL; |
| 361 | } | 361 | } |
| @@ -891,7 +891,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan) | |||
| 891 | unsigned timeout; | 891 | unsigned timeout; |
| 892 | 892 | ||
| 893 | spin_lock_irqsave(&chan->lock, flags); | 893 | spin_lock_irqsave(&chan->lock, flags); |
| 894 | if (chan->state != CPDMA_STATE_ACTIVE) { | 894 | if (chan->state == CPDMA_STATE_TEARDOWN) { |
| 895 | spin_unlock_irqrestore(&chan->lock, flags); | 895 | spin_unlock_irqrestore(&chan->lock, flags); |
| 896 | return -EINVAL; | 896 | return -EINVAL; |
| 897 | } | 897 | } |
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index cd9b164a0434..8f0e69ce07ca 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c | |||
| @@ -1532,9 +1532,9 @@ static int emac_dev_open(struct net_device *ndev) | |||
| 1532 | struct device *emac_dev = &ndev->dev; | 1532 | struct device *emac_dev = &ndev->dev; |
| 1533 | u32 cnt; | 1533 | u32 cnt; |
| 1534 | struct resource *res; | 1534 | struct resource *res; |
| 1535 | int ret; | 1535 | int q, m, ret; |
| 1536 | int res_num = 0, irq_num = 0; | ||
| 1536 | int i = 0; | 1537 | int i = 0; |
| 1537 | int k = 0; | ||
| 1538 | struct emac_priv *priv = netdev_priv(ndev); | 1538 | struct emac_priv *priv = netdev_priv(ndev); |
| 1539 | 1539 | ||
| 1540 | pm_runtime_get(&priv->pdev->dev); | 1540 | pm_runtime_get(&priv->pdev->dev); |
| @@ -1564,15 +1564,24 @@ static int emac_dev_open(struct net_device *ndev) | |||
| 1564 | } | 1564 | } |
| 1565 | 1565 | ||
| 1566 | /* Request IRQ */ | 1566 | /* Request IRQ */ |
| 1567 | while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, | ||
| 1568 | res_num))) { | ||
| 1569 | for (irq_num = res->start; irq_num <= res->end; irq_num++) { | ||
| 1570 | dev_err(emac_dev, "Request IRQ %d\n", irq_num); | ||
| 1571 | if (request_irq(irq_num, emac_irq, 0, ndev->name, | ||
| 1572 | ndev)) { | ||
| 1573 | dev_err(emac_dev, | ||
| 1574 | "DaVinci EMAC: request_irq() failed\n"); | ||
| 1575 | ret = -EBUSY; | ||
| 1567 | 1576 | ||
| 1568 | while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { | ||
| 1569 | for (i = res->start; i <= res->end; i++) { | ||
| 1570 | if (devm_request_irq(&priv->pdev->dev, i, emac_irq, | ||
| 1571 | 0, ndev->name, ndev)) | ||
| 1572 | goto rollback; | 1577 | goto rollback; |
| 1578 | } | ||
| 1573 | } | 1579 | } |
| 1574 | k++; | 1580 | res_num++; |
| 1575 | } | 1581 | } |
| 1582 | /* prepare counters for rollback in case of an error */ | ||
| 1583 | res_num--; | ||
| 1584 | irq_num--; | ||
| 1576 | 1585 | ||
| 1577 | /* Start/Enable EMAC hardware */ | 1586 | /* Start/Enable EMAC hardware */ |
| 1578 | emac_hw_enable(priv); | 1587 | emac_hw_enable(priv); |
| @@ -1639,11 +1648,23 @@ static int emac_dev_open(struct net_device *ndev) | |||
| 1639 | 1648 | ||
| 1640 | return 0; | 1649 | return 0; |
| 1641 | 1650 | ||
| 1642 | rollback: | ||
| 1643 | |||
| 1644 | dev_err(emac_dev, "DaVinci EMAC: devm_request_irq() failed"); | ||
| 1645 | ret = -EBUSY; | ||
| 1646 | err: | 1651 | err: |
| 1652 | emac_int_disable(priv); | ||
| 1653 | napi_disable(&priv->napi); | ||
| 1654 | |||
| 1655 | rollback: | ||
| 1656 | for (q = res_num; q >= 0; q--) { | ||
| 1657 | res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, q); | ||
| 1658 | /* at the first iteration, irq_num is already set to the | ||
| 1659 | * right value | ||
| 1660 | */ | ||
| 1661 | if (q != res_num) | ||
| 1662 | irq_num = res->end; | ||
| 1663 | |||
| 1664 | for (m = irq_num; m >= res->start; m--) | ||
| 1665 | free_irq(m, ndev); | ||
| 1666 | } | ||
| 1667 | cpdma_ctlr_stop(priv->dma); | ||
| 1647 | pm_runtime_put(&priv->pdev->dev); | 1668 | pm_runtime_put(&priv->pdev->dev); |
| 1648 | return ret; | 1669 | return ret; |
| 1649 | } | 1670 | } |
| @@ -1659,6 +1680,9 @@ err: | |||
| 1659 | */ | 1680 | */ |
| 1660 | static int emac_dev_stop(struct net_device *ndev) | 1681 | static int emac_dev_stop(struct net_device *ndev) |
| 1661 | { | 1682 | { |
| 1683 | struct resource *res; | ||
| 1684 | int i = 0; | ||
| 1685 | int irq_num; | ||
| 1662 | struct emac_priv *priv = netdev_priv(ndev); | 1686 | struct emac_priv *priv = netdev_priv(ndev); |
| 1663 | struct device *emac_dev = &ndev->dev; | 1687 | struct device *emac_dev = &ndev->dev; |
| 1664 | 1688 | ||
| @@ -1674,6 +1698,13 @@ static int emac_dev_stop(struct net_device *ndev) | |||
| 1674 | if (priv->phydev) | 1698 | if (priv->phydev) |
| 1675 | phy_disconnect(priv->phydev); | 1699 | phy_disconnect(priv->phydev); |
| 1676 | 1700 | ||
| 1701 | /* Free IRQ */ | ||
| 1702 | while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) { | ||
| 1703 | for (irq_num = res->start; irq_num <= res->end; irq_num++) | ||
| 1704 | free_irq(irq_num, priv->ndev); | ||
| 1705 | i++; | ||
| 1706 | } | ||
| 1707 | |||
| 1677 | if (netif_msg_drv(priv)) | 1708 | if (netif_msg_drv(priv)) |
| 1678 | dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name); | 1709 | dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name); |
| 1679 | 1710 | ||
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index ef312bc6b865..6ac20a6738f4 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c | |||
| @@ -923,7 +923,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 923 | if (rc) { | 923 | if (rc) { |
| 924 | dev_err(&pdev->dev, | 924 | dev_err(&pdev->dev, |
| 925 | "32-bit PCI DMA addresses not supported by the card!?\n"); | 925 | "32-bit PCI DMA addresses not supported by the card!?\n"); |
| 926 | goto err_out; | 926 | goto err_out_pci_disable; |
| 927 | } | 927 | } |
| 928 | 928 | ||
| 929 | /* sanity check */ | 929 | /* sanity check */ |
| @@ -931,7 +931,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 931 | (pci_resource_len(pdev, 1) < io_size)) { | 931 | (pci_resource_len(pdev, 1) < io_size)) { |
| 932 | rc = -EIO; | 932 | rc = -EIO; |
| 933 | dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n"); | 933 | dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n"); |
| 934 | goto err_out; | 934 | goto err_out_pci_disable; |
| 935 | } | 935 | } |
| 936 | 936 | ||
| 937 | pioaddr = pci_resource_start(pdev, 0); | 937 | pioaddr = pci_resource_start(pdev, 0); |
| @@ -942,7 +942,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 942 | dev = alloc_etherdev(sizeof(struct rhine_private)); | 942 | dev = alloc_etherdev(sizeof(struct rhine_private)); |
| 943 | if (!dev) { | 943 | if (!dev) { |
| 944 | rc = -ENOMEM; | 944 | rc = -ENOMEM; |
| 945 | goto err_out; | 945 | goto err_out_pci_disable; |
| 946 | } | 946 | } |
| 947 | SET_NETDEV_DEV(dev, &pdev->dev); | 947 | SET_NETDEV_DEV(dev, &pdev->dev); |
| 948 | 948 | ||
| @@ -1084,6 +1084,8 @@ err_out_free_res: | |||
| 1084 | pci_release_regions(pdev); | 1084 | pci_release_regions(pdev); |
| 1085 | err_out_free_netdev: | 1085 | err_out_free_netdev: |
| 1086 | free_netdev(dev); | 1086 | free_netdev(dev); |
| 1087 | err_out_pci_disable: | ||
| 1088 | pci_disable_device(pdev); | ||
| 1087 | err_out: | 1089 | err_out: |
| 1088 | return rc; | 1090 | return rc; |
| 1089 | } | 1091 | } |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 4b970f7624c0..2f6989b1e0dc 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
| @@ -683,10 +683,9 @@ EXPORT_SYMBOL(phy_detach); | |||
| 683 | int phy_suspend(struct phy_device *phydev) | 683 | int phy_suspend(struct phy_device *phydev) |
| 684 | { | 684 | { |
| 685 | struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver); | 685 | struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver); |
| 686 | struct ethtool_wolinfo wol; | 686 | struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; |
| 687 | 687 | ||
| 688 | /* If the device has WOL enabled, we cannot suspend the PHY */ | 688 | /* If the device has WOL enabled, we cannot suspend the PHY */ |
| 689 | wol.cmd = ETHTOOL_GWOL; | ||
| 690 | phy_ethtool_get_wol(phydev, &wol); | 689 | phy_ethtool_get_wol(phydev, &wol); |
| 691 | if (wol.wolopts) | 690 | if (wol.wolopts) |
| 692 | return -EBUSY; | 691 | return -EBUSY; |
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index dbff290ed0e4..d350d2795e10 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c | |||
| @@ -68,7 +68,6 @@ static struct usb_driver cdc_ncm_driver; | |||
| 68 | static int cdc_ncm_setup(struct usbnet *dev) | 68 | static int cdc_ncm_setup(struct usbnet *dev) |
| 69 | { | 69 | { |
| 70 | struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; | 70 | struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; |
| 71 | struct usb_cdc_ncm_ntb_parameters ncm_parm; | ||
| 72 | u32 val; | 71 | u32 val; |
| 73 | u8 flags; | 72 | u8 flags; |
| 74 | u8 iface_no; | 73 | u8 iface_no; |
| @@ -82,22 +81,22 @@ static int cdc_ncm_setup(struct usbnet *dev) | |||
| 82 | err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS, | 81 | err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS, |
| 83 | USB_TYPE_CLASS | USB_DIR_IN | 82 | USB_TYPE_CLASS | USB_DIR_IN |
| 84 | |USB_RECIP_INTERFACE, | 83 | |USB_RECIP_INTERFACE, |
| 85 | 0, iface_no, &ncm_parm, | 84 | 0, iface_no, &ctx->ncm_parm, |
| 86 | sizeof(ncm_parm)); | 85 | sizeof(ctx->ncm_parm)); |
| 87 | if (err < 0) { | 86 | if (err < 0) { |
| 88 | dev_err(&dev->intf->dev, "failed GET_NTB_PARAMETERS\n"); | 87 | dev_err(&dev->intf->dev, "failed GET_NTB_PARAMETERS\n"); |
| 89 | return err; /* GET_NTB_PARAMETERS is required */ | 88 | return err; /* GET_NTB_PARAMETERS is required */ |
| 90 | } | 89 | } |
| 91 | 90 | ||
| 92 | /* read correct set of parameters according to device mode */ | 91 | /* read correct set of parameters according to device mode */ |
| 93 | ctx->rx_max = le32_to_cpu(ncm_parm.dwNtbInMaxSize); | 92 | ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize); |
| 94 | ctx->tx_max = le32_to_cpu(ncm_parm.dwNtbOutMaxSize); | 93 | ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize); |
| 95 | ctx->tx_remainder = le16_to_cpu(ncm_parm.wNdpOutPayloadRemainder); | 94 | ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder); |
| 96 | ctx->tx_modulus = le16_to_cpu(ncm_parm.wNdpOutDivisor); | 95 | ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor); |
| 97 | ctx->tx_ndp_modulus = le16_to_cpu(ncm_parm.wNdpOutAlignment); | 96 | ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment); |
| 98 | /* devices prior to NCM Errata shall set this field to zero */ | 97 | /* devices prior to NCM Errata shall set this field to zero */ |
| 99 | ctx->tx_max_datagrams = le16_to_cpu(ncm_parm.wNtbOutMaxDatagrams); | 98 | ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams); |
| 100 | ntb_fmt_supported = le16_to_cpu(ncm_parm.bmNtbFormatsSupported); | 99 | ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported); |
| 101 | 100 | ||
| 102 | /* there are some minor differences in NCM and MBIM defaults */ | 101 | /* there are some minor differences in NCM and MBIM defaults */ |
| 103 | if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) { | 102 | if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) { |
| @@ -146,7 +145,7 @@ static int cdc_ncm_setup(struct usbnet *dev) | |||
| 146 | } | 145 | } |
| 147 | 146 | ||
| 148 | /* inform device about NTB input size changes */ | 147 | /* inform device about NTB input size changes */ |
| 149 | if (ctx->rx_max != le32_to_cpu(ncm_parm.dwNtbInMaxSize)) { | 148 | if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) { |
| 150 | __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); | 149 | __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); |
| 151 | 150 | ||
| 152 | err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE, | 151 | err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE, |
| @@ -162,14 +161,6 @@ static int cdc_ncm_setup(struct usbnet *dev) | |||
| 162 | dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n", | 161 | dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n", |
| 163 | CDC_NCM_NTB_MAX_SIZE_TX); | 162 | CDC_NCM_NTB_MAX_SIZE_TX); |
| 164 | ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX; | 163 | ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX; |
| 165 | |||
| 166 | /* Adding a pad byte here simplifies the handling in | ||
| 167 | * cdc_ncm_fill_tx_frame, by making tx_max always | ||
| 168 | * represent the real skb max size. | ||
| 169 | */ | ||
| 170 | if (ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0) | ||
| 171 | ctx->tx_max++; | ||
| 172 | |||
| 173 | } | 164 | } |
| 174 | 165 | ||
| 175 | /* | 166 | /* |
| @@ -439,6 +430,10 @@ advance: | |||
| 439 | goto error2; | 430 | goto error2; |
| 440 | } | 431 | } |
| 441 | 432 | ||
| 433 | /* initialize data interface */ | ||
| 434 | if (cdc_ncm_setup(dev)) | ||
| 435 | goto error2; | ||
| 436 | |||
| 442 | /* configure data interface */ | 437 | /* configure data interface */ |
| 443 | temp = usb_set_interface(dev->udev, iface_no, data_altsetting); | 438 | temp = usb_set_interface(dev->udev, iface_no, data_altsetting); |
| 444 | if (temp) { | 439 | if (temp) { |
| @@ -453,12 +448,6 @@ advance: | |||
| 453 | goto error2; | 448 | goto error2; |
| 454 | } | 449 | } |
| 455 | 450 | ||
| 456 | /* initialize data interface */ | ||
| 457 | if (cdc_ncm_setup(dev)) { | ||
| 458 | dev_dbg(&intf->dev, "cdc_ncm_setup() failed\n"); | ||
| 459 | goto error2; | ||
| 460 | } | ||
| 461 | |||
| 462 | usb_set_intfdata(ctx->data, dev); | 451 | usb_set_intfdata(ctx->data, dev); |
| 463 | usb_set_intfdata(ctx->control, dev); | 452 | usb_set_intfdata(ctx->control, dev); |
| 464 | 453 | ||
| @@ -475,6 +464,15 @@ advance: | |||
| 475 | dev->hard_mtu = ctx->tx_max; | 464 | dev->hard_mtu = ctx->tx_max; |
| 476 | dev->rx_urb_size = ctx->rx_max; | 465 | dev->rx_urb_size = ctx->rx_max; |
| 477 | 466 | ||
| 467 | /* cdc_ncm_setup will override dwNtbOutMaxSize if it is | ||
| 468 | * outside the sane range. Adding a pad byte here if necessary | ||
| 469 | * simplifies the handling in cdc_ncm_fill_tx_frame, making | ||
| 470 | * tx_max always represent the real skb max size. | ||
| 471 | */ | ||
| 472 | if (ctx->tx_max != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) && | ||
| 473 | ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0) | ||
| 474 | ctx->tx_max++; | ||
| 475 | |||
| 478 | return 0; | 476 | return 0; |
| 479 | 477 | ||
| 480 | error2: | 478 | error2: |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index b0f705c2378f..1236812c7be6 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
| @@ -1318,6 +1318,9 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb) | |||
| 1318 | 1318 | ||
| 1319 | neigh_release(n); | 1319 | neigh_release(n); |
| 1320 | 1320 | ||
| 1321 | if (reply == NULL) | ||
| 1322 | goto out; | ||
| 1323 | |||
| 1321 | skb_reset_mac_header(reply); | 1324 | skb_reset_mac_header(reply); |
| 1322 | __skb_pull(reply, skb_network_offset(reply)); | 1325 | __skb_pull(reply, skb_network_offset(reply)); |
| 1323 | reply->ip_summed = CHECKSUM_UNNECESSARY; | 1326 | reply->ip_summed = CHECKSUM_UNNECESSARY; |
| @@ -1339,15 +1342,103 @@ out: | |||
| 1339 | } | 1342 | } |
| 1340 | 1343 | ||
| 1341 | #if IS_ENABLED(CONFIG_IPV6) | 1344 | #if IS_ENABLED(CONFIG_IPV6) |
| 1345 | |||
| 1346 | static struct sk_buff *vxlan_na_create(struct sk_buff *request, | ||
| 1347 | struct neighbour *n, bool isrouter) | ||
| 1348 | { | ||
| 1349 | struct net_device *dev = request->dev; | ||
| 1350 | struct sk_buff *reply; | ||
| 1351 | struct nd_msg *ns, *na; | ||
| 1352 | struct ipv6hdr *pip6; | ||
| 1353 | u8 *daddr; | ||
| 1354 | int na_olen = 8; /* opt hdr + ETH_ALEN for target */ | ||
| 1355 | int ns_olen; | ||
| 1356 | int i, len; | ||
| 1357 | |||
| 1358 | if (dev == NULL) | ||
| 1359 | return NULL; | ||
| 1360 | |||
| 1361 | len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) + | ||
| 1362 | sizeof(*na) + na_olen + dev->needed_tailroom; | ||
| 1363 | reply = alloc_skb(len, GFP_ATOMIC); | ||
| 1364 | if (reply == NULL) | ||
| 1365 | return NULL; | ||
| 1366 | |||
| 1367 | reply->protocol = htons(ETH_P_IPV6); | ||
| 1368 | reply->dev = dev; | ||
| 1369 | skb_reserve(reply, LL_RESERVED_SPACE(request->dev)); | ||
| 1370 | skb_push(reply, sizeof(struct ethhdr)); | ||
| 1371 | skb_set_mac_header(reply, 0); | ||
| 1372 | |||
| 1373 | ns = (struct nd_msg *)skb_transport_header(request); | ||
| 1374 | |||
| 1375 | daddr = eth_hdr(request)->h_source; | ||
| 1376 | ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns); | ||
| 1377 | for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) { | ||
| 1378 | if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) { | ||
| 1379 | daddr = ns->opt + i + sizeof(struct nd_opt_hdr); | ||
| 1380 | break; | ||
| 1381 | } | ||
| 1382 | } | ||
| 1383 | |||
| 1384 | /* Ethernet header */ | ||
| 1385 | ether_addr_copy(eth_hdr(reply)->h_dest, daddr); | ||
| 1386 | ether_addr_copy(eth_hdr(reply)->h_source, n->ha); | ||
| 1387 | eth_hdr(reply)->h_proto = htons(ETH_P_IPV6); | ||
| 1388 | reply->protocol = htons(ETH_P_IPV6); | ||
| 1389 | |||
| 1390 | skb_pull(reply, sizeof(struct ethhdr)); | ||
| 1391 | skb_set_network_header(reply, 0); | ||
| 1392 | skb_put(reply, sizeof(struct ipv6hdr)); | ||
| 1393 | |||
| 1394 | /* IPv6 header */ | ||
| 1395 | |||
| 1396 | pip6 = ipv6_hdr(reply); | ||
| 1397 | memset(pip6, 0, sizeof(struct ipv6hdr)); | ||
| 1398 | pip6->version = 6; | ||
| 1399 | pip6->priority = ipv6_hdr(request)->priority; | ||
| 1400 | pip6->nexthdr = IPPROTO_ICMPV6; | ||
| 1401 | pip6->hop_limit = 255; | ||
| 1402 | pip6->daddr = ipv6_hdr(request)->saddr; | ||
| 1403 | pip6->saddr = *(struct in6_addr *)n->primary_key; | ||
| 1404 | |||
| 1405 | skb_pull(reply, sizeof(struct ipv6hdr)); | ||
| 1406 | skb_set_transport_header(reply, 0); | ||
| 1407 | |||
| 1408 | na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen); | ||
| 1409 | |||
| 1410 | /* Neighbor Advertisement */ | ||
| 1411 | memset(na, 0, sizeof(*na)+na_olen); | ||
| 1412 | na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT; | ||
| 1413 | na->icmph.icmp6_router = isrouter; | ||
| 1414 | na->icmph.icmp6_override = 1; | ||
| 1415 | na->icmph.icmp6_solicited = 1; | ||
| 1416 | na->target = ns->target; | ||
| 1417 | ether_addr_copy(&na->opt[2], n->ha); | ||
| 1418 | na->opt[0] = ND_OPT_TARGET_LL_ADDR; | ||
| 1419 | na->opt[1] = na_olen >> 3; | ||
| 1420 | |||
| 1421 | na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr, | ||
| 1422 | &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6, | ||
| 1423 | csum_partial(na, sizeof(*na)+na_olen, 0)); | ||
| 1424 | |||
| 1425 | pip6->payload_len = htons(sizeof(*na)+na_olen); | ||
| 1426 | |||
| 1427 | skb_push(reply, sizeof(struct ipv6hdr)); | ||
| 1428 | |||
| 1429 | reply->ip_summed = CHECKSUM_UNNECESSARY; | ||
| 1430 | |||
| 1431 | return reply; | ||
| 1432 | } | ||
| 1433 | |||
| 1342 | static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) | 1434 | static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) |
| 1343 | { | 1435 | { |
| 1344 | struct vxlan_dev *vxlan = netdev_priv(dev); | 1436 | struct vxlan_dev *vxlan = netdev_priv(dev); |
| 1345 | struct neighbour *n; | 1437 | struct nd_msg *msg; |
| 1346 | union vxlan_addr ipa; | ||
| 1347 | const struct ipv6hdr *iphdr; | 1438 | const struct ipv6hdr *iphdr; |
| 1348 | const struct in6_addr *saddr, *daddr; | 1439 | const struct in6_addr *saddr, *daddr; |
| 1349 | struct nd_msg *msg; | 1440 | struct neighbour *n; |
| 1350 | struct inet6_dev *in6_dev = NULL; | 1441 | struct inet6_dev *in6_dev; |
| 1351 | 1442 | ||
| 1352 | in6_dev = __in6_dev_get(dev); | 1443 | in6_dev = __in6_dev_get(dev); |
| 1353 | if (!in6_dev) | 1444 | if (!in6_dev) |
| @@ -1360,19 +1451,20 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) | |||
| 1360 | saddr = &iphdr->saddr; | 1451 | saddr = &iphdr->saddr; |
| 1361 | daddr = &iphdr->daddr; | 1452 | daddr = &iphdr->daddr; |
| 1362 | 1453 | ||
| 1363 | if (ipv6_addr_loopback(daddr) || | ||
| 1364 | ipv6_addr_is_multicast(daddr)) | ||
| 1365 | goto out; | ||
| 1366 | |||
| 1367 | msg = (struct nd_msg *)skb_transport_header(skb); | 1454 | msg = (struct nd_msg *)skb_transport_header(skb); |
| 1368 | if (msg->icmph.icmp6_code != 0 || | 1455 | if (msg->icmph.icmp6_code != 0 || |
| 1369 | msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION) | 1456 | msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION) |
| 1370 | goto out; | 1457 | goto out; |
| 1371 | 1458 | ||
| 1372 | n = neigh_lookup(ipv6_stub->nd_tbl, daddr, dev); | 1459 | if (ipv6_addr_loopback(daddr) || |
| 1460 | ipv6_addr_is_multicast(&msg->target)) | ||
| 1461 | goto out; | ||
| 1462 | |||
| 1463 | n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev); | ||
| 1373 | 1464 | ||
| 1374 | if (n) { | 1465 | if (n) { |
| 1375 | struct vxlan_fdb *f; | 1466 | struct vxlan_fdb *f; |
| 1467 | struct sk_buff *reply; | ||
| 1376 | 1468 | ||
| 1377 | if (!(n->nud_state & NUD_CONNECTED)) { | 1469 | if (!(n->nud_state & NUD_CONNECTED)) { |
| 1378 | neigh_release(n); | 1470 | neigh_release(n); |
| @@ -1386,13 +1478,23 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) | |||
| 1386 | goto out; | 1478 | goto out; |
| 1387 | } | 1479 | } |
| 1388 | 1480 | ||
| 1389 | ipv6_stub->ndisc_send_na(dev, n, saddr, &msg->target, | 1481 | reply = vxlan_na_create(skb, n, |
| 1390 | !!in6_dev->cnf.forwarding, | 1482 | !!(f ? f->flags & NTF_ROUTER : 0)); |
| 1391 | true, false, false); | 1483 | |
| 1392 | neigh_release(n); | 1484 | neigh_release(n); |
| 1485 | |||
| 1486 | if (reply == NULL) | ||
| 1487 | goto out; | ||
| 1488 | |||
| 1489 | if (netif_rx_ni(reply) == NET_RX_DROP) | ||
| 1490 | dev->stats.rx_dropped++; | ||
| 1491 | |||
| 1393 | } else if (vxlan->flags & VXLAN_F_L3MISS) { | 1492 | } else if (vxlan->flags & VXLAN_F_L3MISS) { |
| 1394 | ipa.sin6.sin6_addr = *daddr; | 1493 | union vxlan_addr ipa = { |
| 1395 | ipa.sa.sa_family = AF_INET6; | 1494 | .sin6.sin6_addr = msg->target, |
| 1495 | .sa.sa_family = AF_INET6, | ||
| 1496 | }; | ||
| 1497 | |||
| 1396 | vxlan_ip_miss(dev, &ipa); | 1498 | vxlan_ip_miss(dev, &ipa); |
| 1397 | } | 1499 | } |
| 1398 | 1500 | ||
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 303ce27964c1..9078a6c5a74e 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c | |||
| @@ -1548,6 +1548,7 @@ bool ath9k_hw_check_alive(struct ath_hw *ah) | |||
| 1548 | if (reg != last_val) | 1548 | if (reg != last_val) |
| 1549 | return true; | 1549 | return true; |
| 1550 | 1550 | ||
| 1551 | udelay(1); | ||
| 1551 | last_val = reg; | 1552 | last_val = reg; |
| 1552 | if ((reg & 0x7E7FFFEF) == 0x00702400) | 1553 | if ((reg & 0x7E7FFFEF) == 0x00702400) |
| 1553 | continue; | 1554 | continue; |
| @@ -1560,8 +1561,6 @@ bool ath9k_hw_check_alive(struct ath_hw *ah) | |||
| 1560 | default: | 1561 | default: |
| 1561 | return true; | 1562 | return true; |
| 1562 | } | 1563 | } |
| 1563 | |||
| 1564 | udelay(1); | ||
| 1565 | } while (count-- > 0); | 1564 | } while (count-- > 0); |
| 1566 | 1565 | ||
| 1567 | return false; | 1566 | return false; |
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index f042a18c8495..55897d508a76 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
| @@ -2063,7 +2063,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, | |||
| 2063 | 2063 | ||
| 2064 | ATH_TXBUF_RESET(bf); | 2064 | ATH_TXBUF_RESET(bf); |
| 2065 | 2065 | ||
| 2066 | if (tid) { | 2066 | if (tid && ieee80211_is_data_present(hdr->frame_control)) { |
| 2067 | fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; | 2067 | fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; |
| 2068 | seqno = tid->seq_next; | 2068 | seqno = tid->seq_next; |
| 2069 | hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); | 2069 | hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); |
| @@ -2186,7 +2186,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
| 2186 | txq->stopped = true; | 2186 | txq->stopped = true; |
| 2187 | } | 2187 | } |
| 2188 | 2188 | ||
| 2189 | if (txctl->an) | 2189 | if (txctl->an && ieee80211_is_data_present(hdr->frame_control)) |
| 2190 | tid = ath_get_skb_tid(sc, txctl->an, skb); | 2190 | tid = ath_get_skb_tid(sc, txctl->an, skb); |
| 2191 | 2191 | ||
| 2192 | if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) { | 2192 | if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) { |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index 119ee6eaf1c3..ddaa9efd053d 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c | |||
| @@ -1948,8 +1948,10 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus, | |||
| 1948 | if (pkt_pad == NULL) | 1948 | if (pkt_pad == NULL) |
| 1949 | return -ENOMEM; | 1949 | return -ENOMEM; |
| 1950 | ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad); | 1950 | ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad); |
| 1951 | if (unlikely(ret < 0)) | 1951 | if (unlikely(ret < 0)) { |
| 1952 | kfree_skb(pkt_pad); | ||
| 1952 | return ret; | 1953 | return ret; |
| 1954 | } | ||
| 1953 | memcpy(pkt_pad->data, | 1955 | memcpy(pkt_pad->data, |
| 1954 | pkt->data + pkt->len - tail_chop, | 1956 | pkt->data + pkt->len - tail_chop, |
| 1955 | tail_chop); | 1957 | tail_chop); |
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 7f8b5d156c8c..41d4a8167dc3 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c | |||
| @@ -5460,14 +5460,15 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev) | |||
| 5460 | 5460 | ||
| 5461 | rt2800_bbp_write(rt2x00dev, 68, 0x0b); | 5461 | rt2800_bbp_write(rt2x00dev, 68, 0x0b); |
| 5462 | 5462 | ||
| 5463 | rt2800_bbp_write(rt2x00dev, 69, 0x0d); | 5463 | rt2800_bbp_write(rt2x00dev, 69, 0x12); |
| 5464 | rt2800_bbp_write(rt2x00dev, 70, 0x06); | ||
| 5465 | rt2800_bbp_write(rt2x00dev, 73, 0x13); | 5464 | rt2800_bbp_write(rt2x00dev, 73, 0x13); |
| 5466 | rt2800_bbp_write(rt2x00dev, 75, 0x46); | 5465 | rt2800_bbp_write(rt2x00dev, 75, 0x46); |
| 5467 | rt2800_bbp_write(rt2x00dev, 76, 0x28); | 5466 | rt2800_bbp_write(rt2x00dev, 76, 0x28); |
| 5468 | 5467 | ||
| 5469 | rt2800_bbp_write(rt2x00dev, 77, 0x59); | 5468 | rt2800_bbp_write(rt2x00dev, 77, 0x59); |
| 5470 | 5469 | ||
| 5470 | rt2800_bbp_write(rt2x00dev, 70, 0x0a); | ||
| 5471 | |||
| 5471 | rt2800_bbp_write(rt2x00dev, 79, 0x13); | 5472 | rt2800_bbp_write(rt2x00dev, 79, 0x13); |
| 5472 | rt2800_bbp_write(rt2x00dev, 80, 0x05); | 5473 | rt2800_bbp_write(rt2x00dev, 80, 0x05); |
| 5473 | rt2800_bbp_write(rt2x00dev, 81, 0x33); | 5474 | rt2800_bbp_write(rt2x00dev, 81, 0x33); |
| @@ -5510,7 +5511,6 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev) | |||
| 5510 | if (rt2x00_rt(rt2x00dev, RT5392)) { | 5511 | if (rt2x00_rt(rt2x00dev, RT5392)) { |
| 5511 | rt2800_bbp_write(rt2x00dev, 134, 0xd0); | 5512 | rt2800_bbp_write(rt2x00dev, 134, 0xd0); |
| 5512 | rt2800_bbp_write(rt2x00dev, 135, 0xf6); | 5513 | rt2800_bbp_write(rt2x00dev, 135, 0xf6); |
| 5513 | rt2800_bbp_write(rt2x00dev, 148, 0x84); | ||
| 5514 | } | 5514 | } |
| 5515 | 5515 | ||
| 5516 | rt2800_disable_unused_dac_adc(rt2x00dev); | 5516 | rt2800_disable_unused_dac_adc(rt2x00dev); |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index ed880891cb7c..e9279a8c1e1c 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c | |||
| @@ -594,13 +594,13 @@ static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) | |||
| 594 | mp_req->mp_resp_bd = NULL; | 594 | mp_req->mp_resp_bd = NULL; |
| 595 | } | 595 | } |
| 596 | if (mp_req->req_buf) { | 596 | if (mp_req->req_buf) { |
| 597 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 597 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
| 598 | mp_req->req_buf, | 598 | mp_req->req_buf, |
| 599 | mp_req->req_buf_dma); | 599 | mp_req->req_buf_dma); |
| 600 | mp_req->req_buf = NULL; | 600 | mp_req->req_buf = NULL; |
| 601 | } | 601 | } |
| 602 | if (mp_req->resp_buf) { | 602 | if (mp_req->resp_buf) { |
| 603 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 603 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
| 604 | mp_req->resp_buf, | 604 | mp_req->resp_buf, |
| 605 | mp_req->resp_buf_dma); | 605 | mp_req->resp_buf_dma); |
| 606 | mp_req->resp_buf = NULL; | 606 | mp_req->resp_buf = NULL; |
| @@ -622,7 +622,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) | |||
| 622 | 622 | ||
| 623 | mp_req->req_len = sizeof(struct fcp_cmnd); | 623 | mp_req->req_len = sizeof(struct fcp_cmnd); |
| 624 | io_req->data_xfer_len = mp_req->req_len; | 624 | io_req->data_xfer_len = mp_req->req_len; |
| 625 | mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 625 | mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
| 626 | &mp_req->req_buf_dma, | 626 | &mp_req->req_buf_dma, |
| 627 | GFP_ATOMIC); | 627 | GFP_ATOMIC); |
| 628 | if (!mp_req->req_buf) { | 628 | if (!mp_req->req_buf) { |
| @@ -631,7 +631,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) | |||
| 631 | return FAILED; | 631 | return FAILED; |
| 632 | } | 632 | } |
| 633 | 633 | ||
| 634 | mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 634 | mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
| 635 | &mp_req->resp_buf_dma, | 635 | &mp_req->resp_buf_dma, |
| 636 | GFP_ATOMIC); | 636 | GFP_ATOMIC); |
| 637 | if (!mp_req->resp_buf) { | 637 | if (!mp_req->resp_buf) { |
| @@ -639,8 +639,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) | |||
| 639 | bnx2fc_free_mp_resc(io_req); | 639 | bnx2fc_free_mp_resc(io_req); |
| 640 | return FAILED; | 640 | return FAILED; |
| 641 | } | 641 | } |
| 642 | memset(mp_req->req_buf, 0, PAGE_SIZE); | 642 | memset(mp_req->req_buf, 0, CNIC_PAGE_SIZE); |
| 643 | memset(mp_req->resp_buf, 0, PAGE_SIZE); | 643 | memset(mp_req->resp_buf, 0, CNIC_PAGE_SIZE); |
| 644 | 644 | ||
| 645 | /* Allocate and map mp_req_bd and mp_resp_bd */ | 645 | /* Allocate and map mp_req_bd and mp_resp_bd */ |
| 646 | sz = sizeof(struct fcoe_bd_ctx); | 646 | sz = sizeof(struct fcoe_bd_ctx); |
| @@ -665,7 +665,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) | |||
| 665 | mp_req_bd = mp_req->mp_req_bd; | 665 | mp_req_bd = mp_req->mp_req_bd; |
| 666 | mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff; | 666 | mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff; |
| 667 | mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32); | 667 | mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32); |
| 668 | mp_req_bd->buf_len = PAGE_SIZE; | 668 | mp_req_bd->buf_len = CNIC_PAGE_SIZE; |
| 669 | mp_req_bd->flags = 0; | 669 | mp_req_bd->flags = 0; |
| 670 | 670 | ||
| 671 | /* | 671 | /* |
| @@ -677,7 +677,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) | |||
| 677 | addr = mp_req->resp_buf_dma; | 677 | addr = mp_req->resp_buf_dma; |
| 678 | mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff; | 678 | mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff; |
| 679 | mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32); | 679 | mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32); |
| 680 | mp_resp_bd->buf_len = PAGE_SIZE; | 680 | mp_resp_bd->buf_len = CNIC_PAGE_SIZE; |
| 681 | mp_resp_bd->flags = 0; | 681 | mp_resp_bd->flags = 0; |
| 682 | 682 | ||
| 683 | return SUCCESS; | 683 | return SUCCESS; |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c index 4d93177dfb53..d9bae5672273 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c | |||
| @@ -673,7 +673,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
| 673 | 673 | ||
| 674 | /* Allocate and map SQ */ | 674 | /* Allocate and map SQ */ |
| 675 | tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE; | 675 | tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE; |
| 676 | tgt->sq_mem_size = (tgt->sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 676 | tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
| 677 | CNIC_PAGE_MASK; | ||
| 677 | 678 | ||
| 678 | tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, | 679 | tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, |
| 679 | &tgt->sq_dma, GFP_KERNEL); | 680 | &tgt->sq_dma, GFP_KERNEL); |
| @@ -686,7 +687,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
| 686 | 687 | ||
| 687 | /* Allocate and map CQ */ | 688 | /* Allocate and map CQ */ |
| 688 | tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE; | 689 | tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE; |
| 689 | tgt->cq_mem_size = (tgt->cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 690 | tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
| 691 | CNIC_PAGE_MASK; | ||
| 690 | 692 | ||
| 691 | tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, | 693 | tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, |
| 692 | &tgt->cq_dma, GFP_KERNEL); | 694 | &tgt->cq_dma, GFP_KERNEL); |
| @@ -699,7 +701,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
| 699 | 701 | ||
| 700 | /* Allocate and map RQ and RQ PBL */ | 702 | /* Allocate and map RQ and RQ PBL */ |
| 701 | tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE; | 703 | tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE; |
| 702 | tgt->rq_mem_size = (tgt->rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 704 | tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
| 705 | CNIC_PAGE_MASK; | ||
| 703 | 706 | ||
| 704 | tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, | 707 | tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, |
| 705 | &tgt->rq_dma, GFP_KERNEL); | 708 | &tgt->rq_dma, GFP_KERNEL); |
| @@ -710,8 +713,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
| 710 | } | 713 | } |
| 711 | memset(tgt->rq, 0, tgt->rq_mem_size); | 714 | memset(tgt->rq, 0, tgt->rq_mem_size); |
| 712 | 715 | ||
| 713 | tgt->rq_pbl_size = (tgt->rq_mem_size / PAGE_SIZE) * sizeof(void *); | 716 | tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); |
| 714 | tgt->rq_pbl_size = (tgt->rq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 717 | tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) & |
| 718 | CNIC_PAGE_MASK; | ||
| 715 | 719 | ||
| 716 | tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, | 720 | tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, |
| 717 | &tgt->rq_pbl_dma, GFP_KERNEL); | 721 | &tgt->rq_pbl_dma, GFP_KERNEL); |
| @@ -722,7 +726,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
| 722 | } | 726 | } |
| 723 | 727 | ||
| 724 | memset(tgt->rq_pbl, 0, tgt->rq_pbl_size); | 728 | memset(tgt->rq_pbl, 0, tgt->rq_pbl_size); |
| 725 | num_pages = tgt->rq_mem_size / PAGE_SIZE; | 729 | num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE; |
| 726 | page = tgt->rq_dma; | 730 | page = tgt->rq_dma; |
| 727 | pbl = (u32 *)tgt->rq_pbl; | 731 | pbl = (u32 *)tgt->rq_pbl; |
| 728 | 732 | ||
| @@ -731,13 +735,13 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
| 731 | pbl++; | 735 | pbl++; |
| 732 | *pbl = (u32)((u64)page >> 32); | 736 | *pbl = (u32)((u64)page >> 32); |
| 733 | pbl++; | 737 | pbl++; |
| 734 | page += PAGE_SIZE; | 738 | page += CNIC_PAGE_SIZE; |
| 735 | } | 739 | } |
| 736 | 740 | ||
| 737 | /* Allocate and map XFERQ */ | 741 | /* Allocate and map XFERQ */ |
| 738 | tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE; | 742 | tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE; |
| 739 | tgt->xferq_mem_size = (tgt->xferq_mem_size + (PAGE_SIZE - 1)) & | 743 | tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
| 740 | PAGE_MASK; | 744 | CNIC_PAGE_MASK; |
| 741 | 745 | ||
| 742 | tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, | 746 | tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, |
| 743 | &tgt->xferq_dma, GFP_KERNEL); | 747 | &tgt->xferq_dma, GFP_KERNEL); |
| @@ -750,8 +754,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
| 750 | 754 | ||
| 751 | /* Allocate and map CONFQ & CONFQ PBL */ | 755 | /* Allocate and map CONFQ & CONFQ PBL */ |
| 752 | tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE; | 756 | tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE; |
| 753 | tgt->confq_mem_size = (tgt->confq_mem_size + (PAGE_SIZE - 1)) & | 757 | tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
| 754 | PAGE_MASK; | 758 | CNIC_PAGE_MASK; |
| 755 | 759 | ||
| 756 | tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size, | 760 | tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size, |
| 757 | &tgt->confq_dma, GFP_KERNEL); | 761 | &tgt->confq_dma, GFP_KERNEL); |
| @@ -763,9 +767,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
| 763 | memset(tgt->confq, 0, tgt->confq_mem_size); | 767 | memset(tgt->confq, 0, tgt->confq_mem_size); |
| 764 | 768 | ||
| 765 | tgt->confq_pbl_size = | 769 | tgt->confq_pbl_size = |
| 766 | (tgt->confq_mem_size / PAGE_SIZE) * sizeof(void *); | 770 | (tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); |
| 767 | tgt->confq_pbl_size = | 771 | tgt->confq_pbl_size = |
| 768 | (tgt->confq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 772 | (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
| 769 | 773 | ||
| 770 | tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev, | 774 | tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev, |
| 771 | tgt->confq_pbl_size, | 775 | tgt->confq_pbl_size, |
| @@ -777,7 +781,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
| 777 | } | 781 | } |
| 778 | 782 | ||
| 779 | memset(tgt->confq_pbl, 0, tgt->confq_pbl_size); | 783 | memset(tgt->confq_pbl, 0, tgt->confq_pbl_size); |
| 780 | num_pages = tgt->confq_mem_size / PAGE_SIZE; | 784 | num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE; |
| 781 | page = tgt->confq_dma; | 785 | page = tgt->confq_dma; |
| 782 | pbl = (u32 *)tgt->confq_pbl; | 786 | pbl = (u32 *)tgt->confq_pbl; |
| 783 | 787 | ||
| @@ -786,7 +790,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
| 786 | pbl++; | 790 | pbl++; |
| 787 | *pbl = (u32)((u64)page >> 32); | 791 | *pbl = (u32)((u64)page >> 32); |
| 788 | pbl++; | 792 | pbl++; |
| 789 | page += PAGE_SIZE; | 793 | page += CNIC_PAGE_SIZE; |
| 790 | } | 794 | } |
| 791 | 795 | ||
| 792 | /* Allocate and map ConnDB */ | 796 | /* Allocate and map ConnDB */ |
| @@ -805,8 +809,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
| 805 | 809 | ||
| 806 | /* Allocate and map LCQ */ | 810 | /* Allocate and map LCQ */ |
| 807 | tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE; | 811 | tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE; |
| 808 | tgt->lcq_mem_size = (tgt->lcq_mem_size + (PAGE_SIZE - 1)) & | 812 | tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
| 809 | PAGE_MASK; | 813 | CNIC_PAGE_MASK; |
| 810 | 814 | ||
| 811 | tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, | 815 | tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, |
| 812 | &tgt->lcq_dma, GFP_KERNEL); | 816 | &tgt->lcq_dma, GFP_KERNEL); |
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index e4cf23df4b4f..b87a1933f880 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c | |||
| @@ -61,7 +61,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba) | |||
| 61 | * yield integral num of page buffers | 61 | * yield integral num of page buffers |
| 62 | */ | 62 | */ |
| 63 | /* adjust SQ */ | 63 | /* adjust SQ */ |
| 64 | num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE; | 64 | num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE; |
| 65 | if (hba->max_sqes < num_elements_per_pg) | 65 | if (hba->max_sqes < num_elements_per_pg) |
| 66 | hba->max_sqes = num_elements_per_pg; | 66 | hba->max_sqes = num_elements_per_pg; |
| 67 | else if (hba->max_sqes % num_elements_per_pg) | 67 | else if (hba->max_sqes % num_elements_per_pg) |
| @@ -69,7 +69,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba) | |||
| 69 | ~(num_elements_per_pg - 1); | 69 | ~(num_elements_per_pg - 1); |
| 70 | 70 | ||
| 71 | /* adjust CQ */ | 71 | /* adjust CQ */ |
| 72 | num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE; | 72 | num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_CQE_SIZE; |
| 73 | if (hba->max_cqes < num_elements_per_pg) | 73 | if (hba->max_cqes < num_elements_per_pg) |
| 74 | hba->max_cqes = num_elements_per_pg; | 74 | hba->max_cqes = num_elements_per_pg; |
| 75 | else if (hba->max_cqes % num_elements_per_pg) | 75 | else if (hba->max_cqes % num_elements_per_pg) |
| @@ -77,7 +77,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba) | |||
| 77 | ~(num_elements_per_pg - 1); | 77 | ~(num_elements_per_pg - 1); |
| 78 | 78 | ||
| 79 | /* adjust RQ */ | 79 | /* adjust RQ */ |
| 80 | num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE; | 80 | num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_RQ_WQE_SIZE; |
| 81 | if (hba->max_rqes < num_elements_per_pg) | 81 | if (hba->max_rqes < num_elements_per_pg) |
| 82 | hba->max_rqes = num_elements_per_pg; | 82 | hba->max_rqes = num_elements_per_pg; |
| 83 | else if (hba->max_rqes % num_elements_per_pg) | 83 | else if (hba->max_rqes % num_elements_per_pg) |
| @@ -959,7 +959,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
| 959 | 959 | ||
| 960 | /* SQ page table */ | 960 | /* SQ page table */ |
| 961 | memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size); | 961 | memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size); |
| 962 | num_pages = ep->qp.sq_mem_size / PAGE_SIZE; | 962 | num_pages = ep->qp.sq_mem_size / CNIC_PAGE_SIZE; |
| 963 | page = ep->qp.sq_phys; | 963 | page = ep->qp.sq_phys; |
| 964 | 964 | ||
| 965 | if (cnic_dev_10g) | 965 | if (cnic_dev_10g) |
| @@ -973,7 +973,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
| 973 | ptbl++; | 973 | ptbl++; |
| 974 | *ptbl = (u32) ((u64) page >> 32); | 974 | *ptbl = (u32) ((u64) page >> 32); |
| 975 | ptbl++; | 975 | ptbl++; |
| 976 | page += PAGE_SIZE; | 976 | page += CNIC_PAGE_SIZE; |
| 977 | } else { | 977 | } else { |
| 978 | /* PTE is written in big endian format for | 978 | /* PTE is written in big endian format for |
| 979 | * 5706/5708/5709 devices */ | 979 | * 5706/5708/5709 devices */ |
| @@ -981,13 +981,13 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
| 981 | ptbl++; | 981 | ptbl++; |
| 982 | *ptbl = (u32) page; | 982 | *ptbl = (u32) page; |
| 983 | ptbl++; | 983 | ptbl++; |
| 984 | page += PAGE_SIZE; | 984 | page += CNIC_PAGE_SIZE; |
| 985 | } | 985 | } |
| 986 | } | 986 | } |
| 987 | 987 | ||
| 988 | /* RQ page table */ | 988 | /* RQ page table */ |
| 989 | memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size); | 989 | memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size); |
| 990 | num_pages = ep->qp.rq_mem_size / PAGE_SIZE; | 990 | num_pages = ep->qp.rq_mem_size / CNIC_PAGE_SIZE; |
| 991 | page = ep->qp.rq_phys; | 991 | page = ep->qp.rq_phys; |
| 992 | 992 | ||
| 993 | if (cnic_dev_10g) | 993 | if (cnic_dev_10g) |
| @@ -1001,7 +1001,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
| 1001 | ptbl++; | 1001 | ptbl++; |
| 1002 | *ptbl = (u32) ((u64) page >> 32); | 1002 | *ptbl = (u32) ((u64) page >> 32); |
| 1003 | ptbl++; | 1003 | ptbl++; |
| 1004 | page += PAGE_SIZE; | 1004 | page += CNIC_PAGE_SIZE; |
| 1005 | } else { | 1005 | } else { |
| 1006 | /* PTE is written in big endian format for | 1006 | /* PTE is written in big endian format for |
| 1007 | * 5706/5708/5709 devices */ | 1007 | * 5706/5708/5709 devices */ |
| @@ -1009,13 +1009,13 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
| 1009 | ptbl++; | 1009 | ptbl++; |
| 1010 | *ptbl = (u32) page; | 1010 | *ptbl = (u32) page; |
| 1011 | ptbl++; | 1011 | ptbl++; |
| 1012 | page += PAGE_SIZE; | 1012 | page += CNIC_PAGE_SIZE; |
| 1013 | } | 1013 | } |
| 1014 | } | 1014 | } |
| 1015 | 1015 | ||
| 1016 | /* CQ page table */ | 1016 | /* CQ page table */ |
| 1017 | memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size); | 1017 | memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size); |
| 1018 | num_pages = ep->qp.cq_mem_size / PAGE_SIZE; | 1018 | num_pages = ep->qp.cq_mem_size / CNIC_PAGE_SIZE; |
| 1019 | page = ep->qp.cq_phys; | 1019 | page = ep->qp.cq_phys; |
| 1020 | 1020 | ||
| 1021 | if (cnic_dev_10g) | 1021 | if (cnic_dev_10g) |
| @@ -1029,7 +1029,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
| 1029 | ptbl++; | 1029 | ptbl++; |
| 1030 | *ptbl = (u32) ((u64) page >> 32); | 1030 | *ptbl = (u32) ((u64) page >> 32); |
| 1031 | ptbl++; | 1031 | ptbl++; |
| 1032 | page += PAGE_SIZE; | 1032 | page += CNIC_PAGE_SIZE; |
| 1033 | } else { | 1033 | } else { |
| 1034 | /* PTE is written in big endian format for | 1034 | /* PTE is written in big endian format for |
| 1035 | * 5706/5708/5709 devices */ | 1035 | * 5706/5708/5709 devices */ |
| @@ -1037,7 +1037,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
| 1037 | ptbl++; | 1037 | ptbl++; |
| 1038 | *ptbl = (u32) page; | 1038 | *ptbl = (u32) page; |
| 1039 | ptbl++; | 1039 | ptbl++; |
| 1040 | page += PAGE_SIZE; | 1040 | page += CNIC_PAGE_SIZE; |
| 1041 | } | 1041 | } |
| 1042 | } | 1042 | } |
| 1043 | } | 1043 | } |
| @@ -1064,11 +1064,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) | |||
| 1064 | /* Allocate page table memory for SQ which is page aligned */ | 1064 | /* Allocate page table memory for SQ which is page aligned */ |
| 1065 | ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE; | 1065 | ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE; |
| 1066 | ep->qp.sq_mem_size = | 1066 | ep->qp.sq_mem_size = |
| 1067 | (ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 1067 | (ep->qp.sq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
| 1068 | ep->qp.sq_pgtbl_size = | 1068 | ep->qp.sq_pgtbl_size = |
| 1069 | (ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *); | 1069 | (ep->qp.sq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); |
| 1070 | ep->qp.sq_pgtbl_size = | 1070 | ep->qp.sq_pgtbl_size = |
| 1071 | (ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 1071 | (ep->qp.sq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
| 1072 | 1072 | ||
| 1073 | ep->qp.sq_pgtbl_virt = | 1073 | ep->qp.sq_pgtbl_virt = |
| 1074 | dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, | 1074 | dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, |
| @@ -1101,11 +1101,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) | |||
| 1101 | /* Allocate page table memory for CQ which is page aligned */ | 1101 | /* Allocate page table memory for CQ which is page aligned */ |
| 1102 | ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE; | 1102 | ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE; |
| 1103 | ep->qp.cq_mem_size = | 1103 | ep->qp.cq_mem_size = |
| 1104 | (ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 1104 | (ep->qp.cq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
| 1105 | ep->qp.cq_pgtbl_size = | 1105 | ep->qp.cq_pgtbl_size = |
| 1106 | (ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *); | 1106 | (ep->qp.cq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); |
| 1107 | ep->qp.cq_pgtbl_size = | 1107 | ep->qp.cq_pgtbl_size = |
| 1108 | (ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 1108 | (ep->qp.cq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
| 1109 | 1109 | ||
| 1110 | ep->qp.cq_pgtbl_virt = | 1110 | ep->qp.cq_pgtbl_virt = |
| 1111 | dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, | 1111 | dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, |
| @@ -1144,11 +1144,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) | |||
| 1144 | /* Allocate page table memory for RQ which is page aligned */ | 1144 | /* Allocate page table memory for RQ which is page aligned */ |
| 1145 | ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE; | 1145 | ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE; |
| 1146 | ep->qp.rq_mem_size = | 1146 | ep->qp.rq_mem_size = |
| 1147 | (ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 1147 | (ep->qp.rq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
| 1148 | ep->qp.rq_pgtbl_size = | 1148 | ep->qp.rq_pgtbl_size = |
| 1149 | (ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *); | 1149 | (ep->qp.rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); |
| 1150 | ep->qp.rq_pgtbl_size = | 1150 | ep->qp.rq_pgtbl_size = |
| 1151 | (ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 1151 | (ep->qp.rq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
| 1152 | 1152 | ||
| 1153 | ep->qp.rq_pgtbl_virt = | 1153 | ep->qp.rq_pgtbl_virt = |
| 1154 | dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, | 1154 | dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, |
| @@ -1270,7 +1270,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba) | |||
| 1270 | bnx2i_adjust_qp_size(hba); | 1270 | bnx2i_adjust_qp_size(hba); |
| 1271 | 1271 | ||
| 1272 | iscsi_init.flags = | 1272 | iscsi_init.flags = |
| 1273 | ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT; | 1273 | (CNIC_PAGE_BITS - 8) << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT; |
| 1274 | if (en_tcp_dack) | 1274 | if (en_tcp_dack) |
| 1275 | iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE; | 1275 | iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE; |
| 1276 | iscsi_init.reserved0 = 0; | 1276 | iscsi_init.reserved0 = 0; |
| @@ -1288,15 +1288,15 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba) | |||
| 1288 | ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16)); | 1288 | ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16)); |
| 1289 | iscsi_init.num_ccells_per_conn = hba->num_ccell; | 1289 | iscsi_init.num_ccells_per_conn = hba->num_ccell; |
| 1290 | iscsi_init.num_tasks_per_conn = hba->max_sqes; | 1290 | iscsi_init.num_tasks_per_conn = hba->max_sqes; |
| 1291 | iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE; | 1291 | iscsi_init.sq_wqes_per_page = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE; |
| 1292 | iscsi_init.sq_num_wqes = hba->max_sqes; | 1292 | iscsi_init.sq_num_wqes = hba->max_sqes; |
| 1293 | iscsi_init.cq_log_wqes_per_page = | 1293 | iscsi_init.cq_log_wqes_per_page = |
| 1294 | (u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE); | 1294 | (u8) bnx2i_power_of2(CNIC_PAGE_SIZE / BNX2I_CQE_SIZE); |
| 1295 | iscsi_init.cq_num_wqes = hba->max_cqes; | 1295 | iscsi_init.cq_num_wqes = hba->max_cqes; |
| 1296 | iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE + | 1296 | iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE + |
| 1297 | (PAGE_SIZE - 1)) / PAGE_SIZE; | 1297 | (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE; |
| 1298 | iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE + | 1298 | iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE + |
| 1299 | (PAGE_SIZE - 1)) / PAGE_SIZE; | 1299 | (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE; |
| 1300 | iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE; | 1300 | iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE; |
| 1301 | iscsi_init.rq_num_wqes = hba->max_rqes; | 1301 | iscsi_init.rq_num_wqes = hba->max_rqes; |
| 1302 | 1302 | ||
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 854dad7d5b03..c8b0aff5bbd4 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c | |||
| @@ -525,7 +525,7 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) | |||
| 525 | struct iscsi_bd *mp_bdt; | 525 | struct iscsi_bd *mp_bdt; |
| 526 | u64 addr; | 526 | u64 addr; |
| 527 | 527 | ||
| 528 | hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 528 | hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
| 529 | &hba->mp_bd_dma, GFP_KERNEL); | 529 | &hba->mp_bd_dma, GFP_KERNEL); |
| 530 | if (!hba->mp_bd_tbl) { | 530 | if (!hba->mp_bd_tbl) { |
| 531 | printk(KERN_ERR "unable to allocate Middle Path BDT\n"); | 531 | printk(KERN_ERR "unable to allocate Middle Path BDT\n"); |
| @@ -533,11 +533,12 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) | |||
| 533 | goto out; | 533 | goto out; |
| 534 | } | 534 | } |
| 535 | 535 | ||
| 536 | hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 536 | hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, |
| 537 | CNIC_PAGE_SIZE, | ||
| 537 | &hba->dummy_buf_dma, GFP_KERNEL); | 538 | &hba->dummy_buf_dma, GFP_KERNEL); |
| 538 | if (!hba->dummy_buffer) { | 539 | if (!hba->dummy_buffer) { |
| 539 | printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n"); | 540 | printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n"); |
| 540 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 541 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
| 541 | hba->mp_bd_tbl, hba->mp_bd_dma); | 542 | hba->mp_bd_tbl, hba->mp_bd_dma); |
| 542 | hba->mp_bd_tbl = NULL; | 543 | hba->mp_bd_tbl = NULL; |
| 543 | rc = -1; | 544 | rc = -1; |
| @@ -548,7 +549,7 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) | |||
| 548 | addr = (unsigned long) hba->dummy_buf_dma; | 549 | addr = (unsigned long) hba->dummy_buf_dma; |
| 549 | mp_bdt->buffer_addr_lo = addr & 0xffffffff; | 550 | mp_bdt->buffer_addr_lo = addr & 0xffffffff; |
| 550 | mp_bdt->buffer_addr_hi = addr >> 32; | 551 | mp_bdt->buffer_addr_hi = addr >> 32; |
| 551 | mp_bdt->buffer_length = PAGE_SIZE; | 552 | mp_bdt->buffer_length = CNIC_PAGE_SIZE; |
| 552 | mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN | | 553 | mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN | |
| 553 | ISCSI_BD_FIRST_IN_BD_CHAIN; | 554 | ISCSI_BD_FIRST_IN_BD_CHAIN; |
| 554 | out: | 555 | out: |
| @@ -565,12 +566,12 @@ out: | |||
| 565 | static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba) | 566 | static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba) |
| 566 | { | 567 | { |
| 567 | if (hba->mp_bd_tbl) { | 568 | if (hba->mp_bd_tbl) { |
| 568 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 569 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
| 569 | hba->mp_bd_tbl, hba->mp_bd_dma); | 570 | hba->mp_bd_tbl, hba->mp_bd_dma); |
| 570 | hba->mp_bd_tbl = NULL; | 571 | hba->mp_bd_tbl = NULL; |
| 571 | } | 572 | } |
| 572 | if (hba->dummy_buffer) { | 573 | if (hba->dummy_buffer) { |
| 573 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 574 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
| 574 | hba->dummy_buffer, hba->dummy_buf_dma); | 575 | hba->dummy_buffer, hba->dummy_buf_dma); |
| 575 | hba->dummy_buffer = NULL; | 576 | hba->dummy_buffer = NULL; |
| 576 | } | 577 | } |
| @@ -934,14 +935,14 @@ static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba, | |||
| 934 | struct bnx2i_conn *bnx2i_conn) | 935 | struct bnx2i_conn *bnx2i_conn) |
| 935 | { | 936 | { |
| 936 | if (bnx2i_conn->gen_pdu.resp_bd_tbl) { | 937 | if (bnx2i_conn->gen_pdu.resp_bd_tbl) { |
| 937 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 938 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
| 938 | bnx2i_conn->gen_pdu.resp_bd_tbl, | 939 | bnx2i_conn->gen_pdu.resp_bd_tbl, |
| 939 | bnx2i_conn->gen_pdu.resp_bd_dma); | 940 | bnx2i_conn->gen_pdu.resp_bd_dma); |
| 940 | bnx2i_conn->gen_pdu.resp_bd_tbl = NULL; | 941 | bnx2i_conn->gen_pdu.resp_bd_tbl = NULL; |
| 941 | } | 942 | } |
| 942 | 943 | ||
| 943 | if (bnx2i_conn->gen_pdu.req_bd_tbl) { | 944 | if (bnx2i_conn->gen_pdu.req_bd_tbl) { |
| 944 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 945 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
| 945 | bnx2i_conn->gen_pdu.req_bd_tbl, | 946 | bnx2i_conn->gen_pdu.req_bd_tbl, |
| 946 | bnx2i_conn->gen_pdu.req_bd_dma); | 947 | bnx2i_conn->gen_pdu.req_bd_dma); |
| 947 | bnx2i_conn->gen_pdu.req_bd_tbl = NULL; | 948 | bnx2i_conn->gen_pdu.req_bd_tbl = NULL; |
| @@ -998,13 +999,13 @@ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba, | |||
| 998 | bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf; | 999 | bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf; |
| 999 | 1000 | ||
| 1000 | bnx2i_conn->gen_pdu.req_bd_tbl = | 1001 | bnx2i_conn->gen_pdu.req_bd_tbl = |
| 1001 | dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 1002 | dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
| 1002 | &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL); | 1003 | &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL); |
| 1003 | if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL) | 1004 | if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL) |
| 1004 | goto login_req_bd_tbl_failure; | 1005 | goto login_req_bd_tbl_failure; |
| 1005 | 1006 | ||
| 1006 | bnx2i_conn->gen_pdu.resp_bd_tbl = | 1007 | bnx2i_conn->gen_pdu.resp_bd_tbl = |
| 1007 | dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 1008 | dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
| 1008 | &bnx2i_conn->gen_pdu.resp_bd_dma, | 1009 | &bnx2i_conn->gen_pdu.resp_bd_dma, |
| 1009 | GFP_KERNEL); | 1010 | GFP_KERNEL); |
| 1010 | if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL) | 1011 | if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL) |
| @@ -1013,7 +1014,7 @@ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba, | |||
| 1013 | return 0; | 1014 | return 0; |
| 1014 | 1015 | ||
| 1015 | login_resp_bd_tbl_failure: | 1016 | login_resp_bd_tbl_failure: |
| 1016 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 1017 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
| 1017 | bnx2i_conn->gen_pdu.req_bd_tbl, | 1018 | bnx2i_conn->gen_pdu.req_bd_tbl, |
| 1018 | bnx2i_conn->gen_pdu.req_bd_dma); | 1019 | bnx2i_conn->gen_pdu.req_bd_dma); |
| 1019 | bnx2i_conn->gen_pdu.req_bd_tbl = NULL; | 1020 | bnx2i_conn->gen_pdu.req_bd_tbl = NULL; |
diff --git a/include/linux/security.h b/include/linux/security.h index 5623a7f965b7..2fc42d191f79 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
| @@ -1040,6 +1040,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
| 1040 | * Allocate a security structure to the xp->security field; the security | 1040 | * Allocate a security structure to the xp->security field; the security |
| 1041 | * field is initialized to NULL when the xfrm_policy is allocated. | 1041 | * field is initialized to NULL when the xfrm_policy is allocated. |
| 1042 | * Return 0 if operation was successful (memory to allocate, legal context) | 1042 | * Return 0 if operation was successful (memory to allocate, legal context) |
| 1043 | * @gfp is to specify the context for the allocation | ||
| 1043 | * @xfrm_policy_clone_security: | 1044 | * @xfrm_policy_clone_security: |
| 1044 | * @old_ctx contains an existing xfrm_sec_ctx. | 1045 | * @old_ctx contains an existing xfrm_sec_ctx. |
| 1045 | * @new_ctxp contains a new xfrm_sec_ctx being cloned from old. | 1046 | * @new_ctxp contains a new xfrm_sec_ctx being cloned from old. |
| @@ -1683,7 +1684,7 @@ struct security_operations { | |||
| 1683 | 1684 | ||
| 1684 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 1685 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
| 1685 | int (*xfrm_policy_alloc_security) (struct xfrm_sec_ctx **ctxp, | 1686 | int (*xfrm_policy_alloc_security) (struct xfrm_sec_ctx **ctxp, |
| 1686 | struct xfrm_user_sec_ctx *sec_ctx); | 1687 | struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp); |
| 1687 | int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx); | 1688 | int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx); |
| 1688 | void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx); | 1689 | void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx); |
| 1689 | int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx); | 1690 | int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx); |
| @@ -2859,7 +2860,8 @@ static inline void security_skb_owned_by(struct sk_buff *skb, struct sock *sk) | |||
| 2859 | 2860 | ||
| 2860 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 2861 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
| 2861 | 2862 | ||
| 2862 | int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx); | 2863 | int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, |
| 2864 | struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp); | ||
| 2863 | int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp); | 2865 | int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp); |
| 2864 | void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx); | 2866 | void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx); |
| 2865 | int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx); | 2867 | int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx); |
| @@ -2877,7 +2879,9 @@ void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl); | |||
| 2877 | 2879 | ||
| 2878 | #else /* CONFIG_SECURITY_NETWORK_XFRM */ | 2880 | #else /* CONFIG_SECURITY_NETWORK_XFRM */ |
| 2879 | 2881 | ||
| 2880 | static inline int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx) | 2882 | static inline int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, |
| 2883 | struct xfrm_user_sec_ctx *sec_ctx, | ||
| 2884 | gfp_t gfp) | ||
| 2881 | { | 2885 | { |
| 2882 | return 0; | 2886 | return 0; |
| 2883 | } | 2887 | } |
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h index c3fa80745996..2c14d9cdd57a 100644 --- a/include/linux/usb/cdc_ncm.h +++ b/include/linux/usb/cdc_ncm.h | |||
| @@ -88,6 +88,7 @@ | |||
| 88 | #define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB) | 88 | #define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB) |
| 89 | 89 | ||
| 90 | struct cdc_ncm_ctx { | 90 | struct cdc_ncm_ctx { |
| 91 | struct usb_cdc_ncm_ntb_parameters ncm_parm; | ||
| 91 | struct hrtimer tx_timer; | 92 | struct hrtimer tx_timer; |
| 92 | struct tasklet_struct bh; | 93 | struct tasklet_struct bh; |
| 93 | 94 | ||
diff --git a/include/net/tcp.h b/include/net/tcp.h index 8c4dd63134d4..743accec6c76 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
| @@ -480,20 +480,21 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
| 480 | #ifdef CONFIG_SYN_COOKIES | 480 | #ifdef CONFIG_SYN_COOKIES |
| 481 | #include <linux/ktime.h> | 481 | #include <linux/ktime.h> |
| 482 | 482 | ||
| 483 | /* Syncookies use a monotonic timer which increments every 64 seconds. | 483 | /* Syncookies use a monotonic timer which increments every 60 seconds. |
| 484 | * This counter is used both as a hash input and partially encoded into | 484 | * This counter is used both as a hash input and partially encoded into |
| 485 | * the cookie value. A cookie is only validated further if the delta | 485 | * the cookie value. A cookie is only validated further if the delta |
| 486 | * between the current counter value and the encoded one is less than this, | 486 | * between the current counter value and the encoded one is less than this, |
| 487 | * i.e. a sent cookie is valid only at most for 128 seconds (or less if | 487 | * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if |
| 488 | * the counter advances immediately after a cookie is generated). | 488 | * the counter advances immediately after a cookie is generated). |
| 489 | */ | 489 | */ |
| 490 | #define MAX_SYNCOOKIE_AGE 2 | 490 | #define MAX_SYNCOOKIE_AGE 2 |
| 491 | 491 | ||
| 492 | static inline u32 tcp_cookie_time(void) | 492 | static inline u32 tcp_cookie_time(void) |
| 493 | { | 493 | { |
| 494 | struct timespec now; | 494 | u64 val = get_jiffies_64(); |
| 495 | getnstimeofday(&now); | 495 | |
| 496 | return now.tv_sec >> 6; /* 64 seconds granularity */ | 496 | do_div(val, 60 * HZ); |
| 497 | return val; | ||
| 497 | } | 498 | } |
| 498 | 499 | ||
| 499 | u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, | 500 | u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index a664f7829a6d..df9e6b1a9759 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
| @@ -742,7 +742,7 @@ static bool pkt_is_ns(struct sk_buff *skb) | |||
| 742 | struct nd_msg *msg; | 742 | struct nd_msg *msg; |
| 743 | struct ipv6hdr *hdr; | 743 | struct ipv6hdr *hdr; |
| 744 | 744 | ||
| 745 | if (skb->protocol != htons(ETH_P_ARP)) | 745 | if (skb->protocol != htons(ETH_P_IPV6)) |
| 746 | return false; | 746 | return false; |
| 747 | if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg))) | 747 | if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg))) |
| 748 | return false; | 748 | return false; |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 1a0dac2ef9ad..120eecc0f5a4 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
| @@ -2121,12 +2121,13 @@ EXPORT_SYMBOL(rtmsg_ifinfo); | |||
| 2121 | static int nlmsg_populate_fdb_fill(struct sk_buff *skb, | 2121 | static int nlmsg_populate_fdb_fill(struct sk_buff *skb, |
| 2122 | struct net_device *dev, | 2122 | struct net_device *dev, |
| 2123 | u8 *addr, u32 pid, u32 seq, | 2123 | u8 *addr, u32 pid, u32 seq, |
| 2124 | int type, unsigned int flags) | 2124 | int type, unsigned int flags, |
| 2125 | int nlflags) | ||
| 2125 | { | 2126 | { |
| 2126 | struct nlmsghdr *nlh; | 2127 | struct nlmsghdr *nlh; |
| 2127 | struct ndmsg *ndm; | 2128 | struct ndmsg *ndm; |
| 2128 | 2129 | ||
| 2129 | nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), NLM_F_MULTI); | 2130 | nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags); |
| 2130 | if (!nlh) | 2131 | if (!nlh) |
| 2131 | return -EMSGSIZE; | 2132 | return -EMSGSIZE; |
| 2132 | 2133 | ||
| @@ -2164,7 +2165,7 @@ static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type) | |||
| 2164 | if (!skb) | 2165 | if (!skb) |
| 2165 | goto errout; | 2166 | goto errout; |
| 2166 | 2167 | ||
| 2167 | err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF); | 2168 | err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF, 0); |
| 2168 | if (err < 0) { | 2169 | if (err < 0) { |
| 2169 | kfree_skb(skb); | 2170 | kfree_skb(skb); |
| 2170 | goto errout; | 2171 | goto errout; |
| @@ -2389,7 +2390,8 @@ static int nlmsg_populate_fdb(struct sk_buff *skb, | |||
| 2389 | 2390 | ||
| 2390 | err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, | 2391 | err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, |
| 2391 | portid, seq, | 2392 | portid, seq, |
| 2392 | RTM_NEWNEIGH, NTF_SELF); | 2393 | RTM_NEWNEIGH, NTF_SELF, |
| 2394 | NLM_F_MULTI); | ||
| 2393 | if (err < 0) | 2395 | if (err < 0) |
| 2394 | return err; | 2396 | return err; |
| 2395 | skip: | 2397 | skip: |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index b9b3472975ba..28863570dd60 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
| @@ -2255,13 +2255,14 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb, | |||
| 2255 | } | 2255 | } |
| 2256 | 2256 | ||
| 2257 | static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, | 2257 | static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, |
| 2258 | u32 portid, u32 seq, struct mfc_cache *c, int cmd) | 2258 | u32 portid, u32 seq, struct mfc_cache *c, int cmd, |
| 2259 | int flags) | ||
| 2259 | { | 2260 | { |
| 2260 | struct nlmsghdr *nlh; | 2261 | struct nlmsghdr *nlh; |
| 2261 | struct rtmsg *rtm; | 2262 | struct rtmsg *rtm; |
| 2262 | int err; | 2263 | int err; |
| 2263 | 2264 | ||
| 2264 | nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI); | 2265 | nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags); |
| 2265 | if (nlh == NULL) | 2266 | if (nlh == NULL) |
| 2266 | return -EMSGSIZE; | 2267 | return -EMSGSIZE; |
| 2267 | 2268 | ||
| @@ -2329,7 +2330,7 @@ static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc, | |||
| 2329 | if (skb == NULL) | 2330 | if (skb == NULL) |
| 2330 | goto errout; | 2331 | goto errout; |
| 2331 | 2332 | ||
| 2332 | err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd); | 2333 | err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0); |
| 2333 | if (err < 0) | 2334 | if (err < 0) |
| 2334 | goto errout; | 2335 | goto errout; |
| 2335 | 2336 | ||
| @@ -2368,7 +2369,8 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 2368 | if (ipmr_fill_mroute(mrt, skb, | 2369 | if (ipmr_fill_mroute(mrt, skb, |
| 2369 | NETLINK_CB(cb->skb).portid, | 2370 | NETLINK_CB(cb->skb).portid, |
| 2370 | cb->nlh->nlmsg_seq, | 2371 | cb->nlh->nlmsg_seq, |
| 2371 | mfc, RTM_NEWROUTE) < 0) | 2372 | mfc, RTM_NEWROUTE, |
| 2373 | NLM_F_MULTI) < 0) | ||
| 2372 | goto done; | 2374 | goto done; |
| 2373 | next_entry: | 2375 | next_entry: |
| 2374 | e++; | 2376 | e++; |
| @@ -2382,7 +2384,8 @@ next_entry: | |||
| 2382 | if (ipmr_fill_mroute(mrt, skb, | 2384 | if (ipmr_fill_mroute(mrt, skb, |
| 2383 | NETLINK_CB(cb->skb).portid, | 2385 | NETLINK_CB(cb->skb).portid, |
| 2384 | cb->nlh->nlmsg_seq, | 2386 | cb->nlh->nlmsg_seq, |
| 2385 | mfc, RTM_NEWROUTE) < 0) { | 2387 | mfc, RTM_NEWROUTE, |
| 2388 | NLM_F_MULTI) < 0) { | ||
| 2386 | spin_unlock_bh(&mfc_unres_lock); | 2389 | spin_unlock_bh(&mfc_unres_lock); |
| 2387 | goto done; | 2390 | goto done; |
| 2388 | } | 2391 | } |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 16f91a2e7888..64d6073731d3 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
| @@ -1101,21 +1101,19 @@ static void ip6_append_data_mtu(unsigned int *mtu, | |||
| 1101 | unsigned int fragheaderlen, | 1101 | unsigned int fragheaderlen, |
| 1102 | struct sk_buff *skb, | 1102 | struct sk_buff *skb, |
| 1103 | struct rt6_info *rt, | 1103 | struct rt6_info *rt, |
| 1104 | bool pmtuprobe) | 1104 | unsigned int orig_mtu) |
| 1105 | { | 1105 | { |
| 1106 | if (!(rt->dst.flags & DST_XFRM_TUNNEL)) { | 1106 | if (!(rt->dst.flags & DST_XFRM_TUNNEL)) { |
| 1107 | if (skb == NULL) { | 1107 | if (skb == NULL) { |
| 1108 | /* first fragment, reserve header_len */ | 1108 | /* first fragment, reserve header_len */ |
| 1109 | *mtu = *mtu - rt->dst.header_len; | 1109 | *mtu = orig_mtu - rt->dst.header_len; |
| 1110 | 1110 | ||
| 1111 | } else { | 1111 | } else { |
| 1112 | /* | 1112 | /* |
| 1113 | * this fragment is not first, the headers | 1113 | * this fragment is not first, the headers |
| 1114 | * space is regarded as data space. | 1114 | * space is regarded as data space. |
| 1115 | */ | 1115 | */ |
| 1116 | *mtu = min(*mtu, pmtuprobe ? | 1116 | *mtu = orig_mtu; |
| 1117 | rt->dst.dev->mtu : | ||
| 1118 | dst_mtu(rt->dst.path)); | ||
| 1119 | } | 1117 | } |
| 1120 | *maxfraglen = ((*mtu - fragheaderlen) & ~7) | 1118 | *maxfraglen = ((*mtu - fragheaderlen) & ~7) |
| 1121 | + fragheaderlen - sizeof(struct frag_hdr); | 1119 | + fragheaderlen - sizeof(struct frag_hdr); |
| @@ -1132,7 +1130,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, | |||
| 1132 | struct ipv6_pinfo *np = inet6_sk(sk); | 1130 | struct ipv6_pinfo *np = inet6_sk(sk); |
| 1133 | struct inet_cork *cork; | 1131 | struct inet_cork *cork; |
| 1134 | struct sk_buff *skb, *skb_prev = NULL; | 1132 | struct sk_buff *skb, *skb_prev = NULL; |
| 1135 | unsigned int maxfraglen, fragheaderlen, mtu; | 1133 | unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu; |
| 1136 | int exthdrlen; | 1134 | int exthdrlen; |
| 1137 | int dst_exthdrlen; | 1135 | int dst_exthdrlen; |
| 1138 | int hh_len; | 1136 | int hh_len; |
| @@ -1214,6 +1212,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, | |||
| 1214 | dst_exthdrlen = 0; | 1212 | dst_exthdrlen = 0; |
| 1215 | mtu = cork->fragsize; | 1213 | mtu = cork->fragsize; |
| 1216 | } | 1214 | } |
| 1215 | orig_mtu = mtu; | ||
| 1217 | 1216 | ||
| 1218 | hh_len = LL_RESERVED_SPACE(rt->dst.dev); | 1217 | hh_len = LL_RESERVED_SPACE(rt->dst.dev); |
| 1219 | 1218 | ||
| @@ -1311,8 +1310,7 @@ alloc_new_skb: | |||
| 1311 | if (skb == NULL || skb_prev == NULL) | 1310 | if (skb == NULL || skb_prev == NULL) |
| 1312 | ip6_append_data_mtu(&mtu, &maxfraglen, | 1311 | ip6_append_data_mtu(&mtu, &maxfraglen, |
| 1313 | fragheaderlen, skb, rt, | 1312 | fragheaderlen, skb, rt, |
| 1314 | np->pmtudisc >= | 1313 | orig_mtu); |
| 1315 | IPV6_PMTUDISC_PROBE); | ||
| 1316 | 1314 | ||
| 1317 | skb_prev = skb; | 1315 | skb_prev = skb; |
| 1318 | 1316 | ||
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 0eb4038a4d63..8737400af0a0 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
| @@ -2349,13 +2349,14 @@ int ip6mr_get_route(struct net *net, | |||
| 2349 | } | 2349 | } |
| 2350 | 2350 | ||
| 2351 | static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, | 2351 | static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, |
| 2352 | u32 portid, u32 seq, struct mfc6_cache *c, int cmd) | 2352 | u32 portid, u32 seq, struct mfc6_cache *c, int cmd, |
| 2353 | int flags) | ||
| 2353 | { | 2354 | { |
| 2354 | struct nlmsghdr *nlh; | 2355 | struct nlmsghdr *nlh; |
| 2355 | struct rtmsg *rtm; | 2356 | struct rtmsg *rtm; |
| 2356 | int err; | 2357 | int err; |
| 2357 | 2358 | ||
| 2358 | nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI); | 2359 | nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags); |
| 2359 | if (nlh == NULL) | 2360 | if (nlh == NULL) |
| 2360 | return -EMSGSIZE; | 2361 | return -EMSGSIZE; |
| 2361 | 2362 | ||
| @@ -2423,7 +2424,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc, | |||
| 2423 | if (skb == NULL) | 2424 | if (skb == NULL) |
| 2424 | goto errout; | 2425 | goto errout; |
| 2425 | 2426 | ||
| 2426 | err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd); | 2427 | err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0); |
| 2427 | if (err < 0) | 2428 | if (err < 0) |
| 2428 | goto errout; | 2429 | goto errout; |
| 2429 | 2430 | ||
| @@ -2462,7 +2463,8 @@ static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 2462 | if (ip6mr_fill_mroute(mrt, skb, | 2463 | if (ip6mr_fill_mroute(mrt, skb, |
| 2463 | NETLINK_CB(cb->skb).portid, | 2464 | NETLINK_CB(cb->skb).portid, |
| 2464 | cb->nlh->nlmsg_seq, | 2465 | cb->nlh->nlmsg_seq, |
| 2465 | mfc, RTM_NEWROUTE) < 0) | 2466 | mfc, RTM_NEWROUTE, |
| 2467 | NLM_F_MULTI) < 0) | ||
| 2466 | goto done; | 2468 | goto done; |
| 2467 | next_entry: | 2469 | next_entry: |
| 2468 | e++; | 2470 | e++; |
| @@ -2476,7 +2478,8 @@ next_entry: | |||
| 2476 | if (ip6mr_fill_mroute(mrt, skb, | 2478 | if (ip6mr_fill_mroute(mrt, skb, |
| 2477 | NETLINK_CB(cb->skb).portid, | 2479 | NETLINK_CB(cb->skb).portid, |
| 2478 | cb->nlh->nlmsg_seq, | 2480 | cb->nlh->nlmsg_seq, |
| 2479 | mfc, RTM_NEWROUTE) < 0) { | 2481 | mfc, RTM_NEWROUTE, |
| 2482 | NLM_F_MULTI) < 0) { | ||
| 2480 | spin_unlock_bh(&mfc_unres_lock); | 2483 | spin_unlock_bh(&mfc_unres_lock); |
| 2481 | goto done; | 2484 | goto done; |
| 2482 | } | 2485 | } |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 1a04c1329362..79326978517a 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
| @@ -433,12 +433,13 @@ static inline int verify_sec_ctx_len(const void *p) | |||
| 433 | return 0; | 433 | return 0; |
| 434 | } | 434 | } |
| 435 | 435 | ||
| 436 | static inline struct xfrm_user_sec_ctx *pfkey_sadb2xfrm_user_sec_ctx(const struct sadb_x_sec_ctx *sec_ctx) | 436 | static inline struct xfrm_user_sec_ctx *pfkey_sadb2xfrm_user_sec_ctx(const struct sadb_x_sec_ctx *sec_ctx, |
| 437 | gfp_t gfp) | ||
| 437 | { | 438 | { |
| 438 | struct xfrm_user_sec_ctx *uctx = NULL; | 439 | struct xfrm_user_sec_ctx *uctx = NULL; |
| 439 | int ctx_size = sec_ctx->sadb_x_ctx_len; | 440 | int ctx_size = sec_ctx->sadb_x_ctx_len; |
| 440 | 441 | ||
| 441 | uctx = kmalloc((sizeof(*uctx)+ctx_size), GFP_KERNEL); | 442 | uctx = kmalloc((sizeof(*uctx)+ctx_size), gfp); |
| 442 | 443 | ||
| 443 | if (!uctx) | 444 | if (!uctx) |
| 444 | return NULL; | 445 | return NULL; |
| @@ -1124,7 +1125,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, | |||
| 1124 | 1125 | ||
| 1125 | sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1]; | 1126 | sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1]; |
| 1126 | if (sec_ctx != NULL) { | 1127 | if (sec_ctx != NULL) { |
| 1127 | struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx); | 1128 | struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_KERNEL); |
| 1128 | 1129 | ||
| 1129 | if (!uctx) | 1130 | if (!uctx) |
| 1130 | goto out; | 1131 | goto out; |
| @@ -2231,14 +2232,14 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_ | |||
| 2231 | 2232 | ||
| 2232 | sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1]; | 2233 | sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1]; |
| 2233 | if (sec_ctx != NULL) { | 2234 | if (sec_ctx != NULL) { |
| 2234 | struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx); | 2235 | struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_KERNEL); |
| 2235 | 2236 | ||
| 2236 | if (!uctx) { | 2237 | if (!uctx) { |
| 2237 | err = -ENOBUFS; | 2238 | err = -ENOBUFS; |
| 2238 | goto out; | 2239 | goto out; |
| 2239 | } | 2240 | } |
| 2240 | 2241 | ||
| 2241 | err = security_xfrm_policy_alloc(&xp->security, uctx); | 2242 | err = security_xfrm_policy_alloc(&xp->security, uctx, GFP_KERNEL); |
| 2242 | kfree(uctx); | 2243 | kfree(uctx); |
| 2243 | 2244 | ||
| 2244 | if (err) | 2245 | if (err) |
| @@ -2335,12 +2336,12 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa | |||
| 2335 | 2336 | ||
| 2336 | sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1]; | 2337 | sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1]; |
| 2337 | if (sec_ctx != NULL) { | 2338 | if (sec_ctx != NULL) { |
| 2338 | struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx); | 2339 | struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_KERNEL); |
| 2339 | 2340 | ||
| 2340 | if (!uctx) | 2341 | if (!uctx) |
| 2341 | return -ENOMEM; | 2342 | return -ENOMEM; |
| 2342 | 2343 | ||
| 2343 | err = security_xfrm_policy_alloc(&pol_ctx, uctx); | 2344 | err = security_xfrm_policy_alloc(&pol_ctx, uctx, GFP_KERNEL); |
| 2344 | kfree(uctx); | 2345 | kfree(uctx); |
| 2345 | if (err) | 2346 | if (err) |
| 2346 | return err; | 2347 | return err; |
| @@ -3239,8 +3240,8 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt, | |||
| 3239 | } | 3240 | } |
| 3240 | if ((*dir = verify_sec_ctx_len(p))) | 3241 | if ((*dir = verify_sec_ctx_len(p))) |
| 3241 | goto out; | 3242 | goto out; |
| 3242 | uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx); | 3243 | uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_ATOMIC); |
| 3243 | *dir = security_xfrm_policy_alloc(&xp->security, uctx); | 3244 | *dir = security_xfrm_policy_alloc(&xp->security, uctx, GFP_ATOMIC); |
| 3244 | kfree(uctx); | 3245 | kfree(uctx); |
| 3245 | 3246 | ||
| 3246 | if (*dir) | 3247 | if (*dir) |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index e9a48baf8551..8601b320b443 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
| @@ -1174,7 +1174,7 @@ static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *in | |||
| 1174 | struct datapath *dp; | 1174 | struct datapath *dp; |
| 1175 | 1175 | ||
| 1176 | dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); | 1176 | dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); |
| 1177 | if (!dp) | 1177 | if (IS_ERR(dp)) |
| 1178 | return; | 1178 | return; |
| 1179 | 1179 | ||
| 1180 | WARN(dp->user_features, "Dropping previously announced user features\n"); | 1180 | WARN(dp->user_features, "Dropping previously announced user features\n"); |
| @@ -1762,11 +1762,12 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 1762 | int bucket = cb->args[0], skip = cb->args[1]; | 1762 | int bucket = cb->args[0], skip = cb->args[1]; |
| 1763 | int i, j = 0; | 1763 | int i, j = 0; |
| 1764 | 1764 | ||
| 1765 | rcu_read_lock(); | ||
| 1765 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); | 1766 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); |
| 1766 | if (!dp) | 1767 | if (!dp) { |
| 1768 | rcu_read_unlock(); | ||
| 1767 | return -ENODEV; | 1769 | return -ENODEV; |
| 1768 | 1770 | } | |
| 1769 | rcu_read_lock(); | ||
| 1770 | for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) { | 1771 | for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) { |
| 1771 | struct vport *vport; | 1772 | struct vport *vport; |
| 1772 | 1773 | ||
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 16f4b46161d4..dda451f4429c 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c | |||
| @@ -73,6 +73,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb) | |||
| 73 | 73 | ||
| 74 | if ((flow->key.eth.type == htons(ETH_P_IP) || | 74 | if ((flow->key.eth.type == htons(ETH_P_IP) || |
| 75 | flow->key.eth.type == htons(ETH_P_IPV6)) && | 75 | flow->key.eth.type == htons(ETH_P_IPV6)) && |
| 76 | flow->key.ip.frag != OVS_FRAG_TYPE_LATER && | ||
| 76 | flow->key.ip.proto == IPPROTO_TCP && | 77 | flow->key.ip.proto == IPPROTO_TCP && |
| 77 | likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) { | 78 | likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) { |
| 78 | tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb)); | 79 | tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb)); |
| @@ -91,7 +92,7 @@ static void stats_read(struct flow_stats *stats, | |||
| 91 | unsigned long *used, __be16 *tcp_flags) | 92 | unsigned long *used, __be16 *tcp_flags) |
| 92 | { | 93 | { |
| 93 | spin_lock(&stats->lock); | 94 | spin_lock(&stats->lock); |
| 94 | if (time_after(stats->used, *used)) | 95 | if (!*used || time_after(stats->used, *used)) |
| 95 | *used = stats->used; | 96 | *used = stats->used; |
| 96 | *tcp_flags |= stats->tcp_flags; | 97 | *tcp_flags |= stats->tcp_flags; |
| 97 | ovs_stats->n_packets += stats->packet_count; | 98 | ovs_stats->n_packets += stats->packet_count; |
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 11c9ae00837d..642437231ad5 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c | |||
| @@ -263,9 +263,9 @@ static void subscr_cancel(struct tipc_subscr *s, | |||
| 263 | * | 263 | * |
| 264 | * Called with subscriber lock held. | 264 | * Called with subscriber lock held. |
| 265 | */ | 265 | */ |
| 266 | static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, | 266 | static int subscr_subscribe(struct tipc_subscr *s, |
| 267 | struct tipc_subscriber *subscriber) | 267 | struct tipc_subscriber *subscriber, |
| 268 | { | 268 | struct tipc_subscription **sub_p) { |
| 269 | struct tipc_subscription *sub; | 269 | struct tipc_subscription *sub; |
| 270 | int swap; | 270 | int swap; |
| 271 | 271 | ||
| @@ -276,23 +276,21 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, | |||
| 276 | if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) { | 276 | if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) { |
| 277 | s->filter &= ~htohl(TIPC_SUB_CANCEL, swap); | 277 | s->filter &= ~htohl(TIPC_SUB_CANCEL, swap); |
| 278 | subscr_cancel(s, subscriber); | 278 | subscr_cancel(s, subscriber); |
| 279 | return NULL; | 279 | return 0; |
| 280 | } | 280 | } |
| 281 | 281 | ||
| 282 | /* Refuse subscription if global limit exceeded */ | 282 | /* Refuse subscription if global limit exceeded */ |
| 283 | if (atomic_read(&subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) { | 283 | if (atomic_read(&subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) { |
| 284 | pr_warn("Subscription rejected, limit reached (%u)\n", | 284 | pr_warn("Subscription rejected, limit reached (%u)\n", |
| 285 | TIPC_MAX_SUBSCRIPTIONS); | 285 | TIPC_MAX_SUBSCRIPTIONS); |
| 286 | subscr_terminate(subscriber); | 286 | return -EINVAL; |
| 287 | return NULL; | ||
| 288 | } | 287 | } |
| 289 | 288 | ||
| 290 | /* Allocate subscription object */ | 289 | /* Allocate subscription object */ |
| 291 | sub = kmalloc(sizeof(*sub), GFP_ATOMIC); | 290 | sub = kmalloc(sizeof(*sub), GFP_ATOMIC); |
| 292 | if (!sub) { | 291 | if (!sub) { |
| 293 | pr_warn("Subscription rejected, no memory\n"); | 292 | pr_warn("Subscription rejected, no memory\n"); |
| 294 | subscr_terminate(subscriber); | 293 | return -ENOMEM; |
| 295 | return NULL; | ||
| 296 | } | 294 | } |
| 297 | 295 | ||
| 298 | /* Initialize subscription object */ | 296 | /* Initialize subscription object */ |
| @@ -306,8 +304,7 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, | |||
| 306 | (sub->seq.lower > sub->seq.upper)) { | 304 | (sub->seq.lower > sub->seq.upper)) { |
| 307 | pr_warn("Subscription rejected, illegal request\n"); | 305 | pr_warn("Subscription rejected, illegal request\n"); |
| 308 | kfree(sub); | 306 | kfree(sub); |
| 309 | subscr_terminate(subscriber); | 307 | return -EINVAL; |
| 310 | return NULL; | ||
| 311 | } | 308 | } |
| 312 | INIT_LIST_HEAD(&sub->nameseq_list); | 309 | INIT_LIST_HEAD(&sub->nameseq_list); |
| 313 | list_add(&sub->subscription_list, &subscriber->subscription_list); | 310 | list_add(&sub->subscription_list, &subscriber->subscription_list); |
| @@ -320,8 +317,8 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, | |||
| 320 | (Handler)subscr_timeout, (unsigned long)sub); | 317 | (Handler)subscr_timeout, (unsigned long)sub); |
| 321 | k_start_timer(&sub->timer, sub->timeout); | 318 | k_start_timer(&sub->timer, sub->timeout); |
| 322 | } | 319 | } |
| 323 | 320 | *sub_p = sub; | |
| 324 | return sub; | 321 | return 0; |
| 325 | } | 322 | } |
| 326 | 323 | ||
| 327 | /* Handle one termination request for the subscriber */ | 324 | /* Handle one termination request for the subscriber */ |
| @@ -335,10 +332,14 @@ static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr, | |||
| 335 | void *usr_data, void *buf, size_t len) | 332 | void *usr_data, void *buf, size_t len) |
| 336 | { | 333 | { |
| 337 | struct tipc_subscriber *subscriber = usr_data; | 334 | struct tipc_subscriber *subscriber = usr_data; |
| 338 | struct tipc_subscription *sub; | 335 | struct tipc_subscription *sub = NULL; |
| 339 | 336 | ||
| 340 | spin_lock_bh(&subscriber->lock); | 337 | spin_lock_bh(&subscriber->lock); |
| 341 | sub = subscr_subscribe((struct tipc_subscr *)buf, subscriber); | 338 | if (subscr_subscribe((struct tipc_subscr *)buf, subscriber, &sub) < 0) { |
| 339 | spin_unlock_bh(&subscriber->lock); | ||
| 340 | subscr_terminate(subscriber); | ||
| 341 | return; | ||
| 342 | } | ||
| 342 | if (sub) | 343 | if (sub) |
| 343 | tipc_nametbl_subscribe(sub); | 344 | tipc_nametbl_subscribe(sub); |
| 344 | spin_unlock_bh(&subscriber->lock); | 345 | spin_unlock_bh(&subscriber->lock); |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index c274179d60a2..2f7ddc3a59b4 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
| @@ -1221,7 +1221,7 @@ static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs | |||
| 1221 | return 0; | 1221 | return 0; |
| 1222 | 1222 | ||
| 1223 | uctx = nla_data(rt); | 1223 | uctx = nla_data(rt); |
| 1224 | return security_xfrm_policy_alloc(&pol->security, uctx); | 1224 | return security_xfrm_policy_alloc(&pol->security, uctx, GFP_KERNEL); |
| 1225 | } | 1225 | } |
| 1226 | 1226 | ||
| 1227 | static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut, | 1227 | static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut, |
| @@ -1626,7 +1626,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
| 1626 | if (rt) { | 1626 | if (rt) { |
| 1627 | struct xfrm_user_sec_ctx *uctx = nla_data(rt); | 1627 | struct xfrm_user_sec_ctx *uctx = nla_data(rt); |
| 1628 | 1628 | ||
| 1629 | err = security_xfrm_policy_alloc(&ctx, uctx); | 1629 | err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL); |
| 1630 | if (err) | 1630 | if (err) |
| 1631 | return err; | 1631 | return err; |
| 1632 | } | 1632 | } |
| @@ -1928,7 +1928,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
| 1928 | if (rt) { | 1928 | if (rt) { |
| 1929 | struct xfrm_user_sec_ctx *uctx = nla_data(rt); | 1929 | struct xfrm_user_sec_ctx *uctx = nla_data(rt); |
| 1930 | 1930 | ||
| 1931 | err = security_xfrm_policy_alloc(&ctx, uctx); | 1931 | err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL); |
| 1932 | if (err) | 1932 | if (err) |
| 1933 | return err; | 1933 | return err; |
| 1934 | } | 1934 | } |
diff --git a/security/capability.c b/security/capability.c index 8b4f24ae4338..21e2b9cae685 100644 --- a/security/capability.c +++ b/security/capability.c | |||
| @@ -757,7 +757,8 @@ static void cap_skb_owned_by(struct sk_buff *skb, struct sock *sk) | |||
| 757 | 757 | ||
| 758 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 758 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
| 759 | static int cap_xfrm_policy_alloc_security(struct xfrm_sec_ctx **ctxp, | 759 | static int cap_xfrm_policy_alloc_security(struct xfrm_sec_ctx **ctxp, |
| 760 | struct xfrm_user_sec_ctx *sec_ctx) | 760 | struct xfrm_user_sec_ctx *sec_ctx, |
| 761 | gfp_t gfp) | ||
| 761 | { | 762 | { |
| 762 | return 0; | 763 | return 0; |
| 763 | } | 764 | } |
diff --git a/security/security.c b/security/security.c index 15b6928592ef..919cad93ac82 100644 --- a/security/security.c +++ b/security/security.c | |||
| @@ -1317,9 +1317,11 @@ void security_skb_owned_by(struct sk_buff *skb, struct sock *sk) | |||
| 1317 | 1317 | ||
| 1318 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 1318 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
| 1319 | 1319 | ||
| 1320 | int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx) | 1320 | int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, |
| 1321 | struct xfrm_user_sec_ctx *sec_ctx, | ||
| 1322 | gfp_t gfp) | ||
| 1321 | { | 1323 | { |
| 1322 | return security_ops->xfrm_policy_alloc_security(ctxp, sec_ctx); | 1324 | return security_ops->xfrm_policy_alloc_security(ctxp, sec_ctx, gfp); |
| 1323 | } | 1325 | } |
| 1324 | EXPORT_SYMBOL(security_xfrm_policy_alloc); | 1326 | EXPORT_SYMBOL(security_xfrm_policy_alloc); |
| 1325 | 1327 | ||
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 4b34847208cc..b332e2cc0954 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c | |||
| @@ -668,7 +668,7 @@ static int selinux_set_mnt_opts(struct super_block *sb, | |||
| 668 | if (flags[i] == SBLABEL_MNT) | 668 | if (flags[i] == SBLABEL_MNT) |
| 669 | continue; | 669 | continue; |
| 670 | rc = security_context_to_sid(mount_options[i], | 670 | rc = security_context_to_sid(mount_options[i], |
| 671 | strlen(mount_options[i]), &sid); | 671 | strlen(mount_options[i]), &sid, GFP_KERNEL); |
| 672 | if (rc) { | 672 | if (rc) { |
| 673 | printk(KERN_WARNING "SELinux: security_context_to_sid" | 673 | printk(KERN_WARNING "SELinux: security_context_to_sid" |
| 674 | "(%s) failed for (dev %s, type %s) errno=%d\n", | 674 | "(%s) failed for (dev %s, type %s) errno=%d\n", |
| @@ -2489,7 +2489,8 @@ static int selinux_sb_remount(struct super_block *sb, void *data) | |||
| 2489 | if (flags[i] == SBLABEL_MNT) | 2489 | if (flags[i] == SBLABEL_MNT) |
| 2490 | continue; | 2490 | continue; |
| 2491 | len = strlen(mount_options[i]); | 2491 | len = strlen(mount_options[i]); |
| 2492 | rc = security_context_to_sid(mount_options[i], len, &sid); | 2492 | rc = security_context_to_sid(mount_options[i], len, &sid, |
| 2493 | GFP_KERNEL); | ||
| 2493 | if (rc) { | 2494 | if (rc) { |
| 2494 | printk(KERN_WARNING "SELinux: security_context_to_sid" | 2495 | printk(KERN_WARNING "SELinux: security_context_to_sid" |
| 2495 | "(%s) failed for (dev %s, type %s) errno=%d\n", | 2496 | "(%s) failed for (dev %s, type %s) errno=%d\n", |
| @@ -2893,7 +2894,7 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name, | |||
| 2893 | if (rc) | 2894 | if (rc) |
| 2894 | return rc; | 2895 | return rc; |
| 2895 | 2896 | ||
| 2896 | rc = security_context_to_sid(value, size, &newsid); | 2897 | rc = security_context_to_sid(value, size, &newsid, GFP_KERNEL); |
| 2897 | if (rc == -EINVAL) { | 2898 | if (rc == -EINVAL) { |
| 2898 | if (!capable(CAP_MAC_ADMIN)) { | 2899 | if (!capable(CAP_MAC_ADMIN)) { |
| 2899 | struct audit_buffer *ab; | 2900 | struct audit_buffer *ab; |
| @@ -3050,7 +3051,7 @@ static int selinux_inode_setsecurity(struct inode *inode, const char *name, | |||
| 3050 | if (!value || !size) | 3051 | if (!value || !size) |
| 3051 | return -EACCES; | 3052 | return -EACCES; |
| 3052 | 3053 | ||
| 3053 | rc = security_context_to_sid((void *)value, size, &newsid); | 3054 | rc = security_context_to_sid((void *)value, size, &newsid, GFP_KERNEL); |
| 3054 | if (rc) | 3055 | if (rc) |
| 3055 | return rc; | 3056 | return rc; |
| 3056 | 3057 | ||
| @@ -5529,7 +5530,7 @@ static int selinux_setprocattr(struct task_struct *p, | |||
| 5529 | str[size-1] = 0; | 5530 | str[size-1] = 0; |
| 5530 | size--; | 5531 | size--; |
| 5531 | } | 5532 | } |
| 5532 | error = security_context_to_sid(value, size, &sid); | 5533 | error = security_context_to_sid(value, size, &sid, GFP_KERNEL); |
| 5533 | if (error == -EINVAL && !strcmp(name, "fscreate")) { | 5534 | if (error == -EINVAL && !strcmp(name, "fscreate")) { |
| 5534 | if (!capable(CAP_MAC_ADMIN)) { | 5535 | if (!capable(CAP_MAC_ADMIN)) { |
| 5535 | struct audit_buffer *ab; | 5536 | struct audit_buffer *ab; |
| @@ -5638,7 +5639,7 @@ static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) | |||
| 5638 | 5639 | ||
| 5639 | static int selinux_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid) | 5640 | static int selinux_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid) |
| 5640 | { | 5641 | { |
| 5641 | return security_context_to_sid(secdata, seclen, secid); | 5642 | return security_context_to_sid(secdata, seclen, secid, GFP_KERNEL); |
| 5642 | } | 5643 | } |
| 5643 | 5644 | ||
| 5644 | static void selinux_release_secctx(char *secdata, u32 seclen) | 5645 | static void selinux_release_secctx(char *secdata, u32 seclen) |
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h index 8ed8daf7f1ee..ce7852cf526b 100644 --- a/security/selinux/include/security.h +++ b/security/selinux/include/security.h | |||
| @@ -134,7 +134,7 @@ int security_sid_to_context(u32 sid, char **scontext, | |||
| 134 | int security_sid_to_context_force(u32 sid, char **scontext, u32 *scontext_len); | 134 | int security_sid_to_context_force(u32 sid, char **scontext, u32 *scontext_len); |
| 135 | 135 | ||
| 136 | int security_context_to_sid(const char *scontext, u32 scontext_len, | 136 | int security_context_to_sid(const char *scontext, u32 scontext_len, |
| 137 | u32 *out_sid); | 137 | u32 *out_sid, gfp_t gfp); |
| 138 | 138 | ||
| 139 | int security_context_to_sid_default(const char *scontext, u32 scontext_len, | 139 | int security_context_to_sid_default(const char *scontext, u32 scontext_len, |
| 140 | u32 *out_sid, u32 def_sid, gfp_t gfp_flags); | 140 | u32 *out_sid, u32 def_sid, gfp_t gfp_flags); |
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h index 48c3cc94c168..9f0584710c85 100644 --- a/security/selinux/include/xfrm.h +++ b/security/selinux/include/xfrm.h | |||
| @@ -10,7 +10,8 @@ | |||
| 10 | #include <net/flow.h> | 10 | #include <net/flow.h> |
| 11 | 11 | ||
| 12 | int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, | 12 | int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, |
| 13 | struct xfrm_user_sec_ctx *uctx); | 13 | struct xfrm_user_sec_ctx *uctx, |
| 14 | gfp_t gfp); | ||
| 14 | int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, | 15 | int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, |
| 15 | struct xfrm_sec_ctx **new_ctxp); | 16 | struct xfrm_sec_ctx **new_ctxp); |
| 16 | void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx); | 17 | void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx); |
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 5122affe06a8..d60c0ee66387 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c | |||
| @@ -576,7 +576,7 @@ static ssize_t sel_write_context(struct file *file, char *buf, size_t size) | |||
| 576 | if (length) | 576 | if (length) |
| 577 | goto out; | 577 | goto out; |
| 578 | 578 | ||
| 579 | length = security_context_to_sid(buf, size, &sid); | 579 | length = security_context_to_sid(buf, size, &sid, GFP_KERNEL); |
| 580 | if (length) | 580 | if (length) |
| 581 | goto out; | 581 | goto out; |
| 582 | 582 | ||
| @@ -731,11 +731,13 @@ static ssize_t sel_write_access(struct file *file, char *buf, size_t size) | |||
| 731 | if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) | 731 | if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) |
| 732 | goto out; | 732 | goto out; |
| 733 | 733 | ||
| 734 | length = security_context_to_sid(scon, strlen(scon) + 1, &ssid); | 734 | length = security_context_to_sid(scon, strlen(scon) + 1, &ssid, |
| 735 | GFP_KERNEL); | ||
| 735 | if (length) | 736 | if (length) |
| 736 | goto out; | 737 | goto out; |
| 737 | 738 | ||
| 738 | length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid); | 739 | length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid, |
| 740 | GFP_KERNEL); | ||
| 739 | if (length) | 741 | if (length) |
| 740 | goto out; | 742 | goto out; |
| 741 | 743 | ||
| @@ -817,11 +819,13 @@ static ssize_t sel_write_create(struct file *file, char *buf, size_t size) | |||
| 817 | objname = namebuf; | 819 | objname = namebuf; |
| 818 | } | 820 | } |
| 819 | 821 | ||
| 820 | length = security_context_to_sid(scon, strlen(scon) + 1, &ssid); | 822 | length = security_context_to_sid(scon, strlen(scon) + 1, &ssid, |
| 823 | GFP_KERNEL); | ||
| 821 | if (length) | 824 | if (length) |
| 822 | goto out; | 825 | goto out; |
| 823 | 826 | ||
| 824 | length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid); | 827 | length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid, |
| 828 | GFP_KERNEL); | ||
| 825 | if (length) | 829 | if (length) |
| 826 | goto out; | 830 | goto out; |
| 827 | 831 | ||
| @@ -878,11 +882,13 @@ static ssize_t sel_write_relabel(struct file *file, char *buf, size_t size) | |||
| 878 | if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) | 882 | if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) |
| 879 | goto out; | 883 | goto out; |
| 880 | 884 | ||
| 881 | length = security_context_to_sid(scon, strlen(scon) + 1, &ssid); | 885 | length = security_context_to_sid(scon, strlen(scon) + 1, &ssid, |
| 886 | GFP_KERNEL); | ||
| 882 | if (length) | 887 | if (length) |
| 883 | goto out; | 888 | goto out; |
| 884 | 889 | ||
| 885 | length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid); | 890 | length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid, |
| 891 | GFP_KERNEL); | ||
| 886 | if (length) | 892 | if (length) |
| 887 | goto out; | 893 | goto out; |
| 888 | 894 | ||
| @@ -934,7 +940,7 @@ static ssize_t sel_write_user(struct file *file, char *buf, size_t size) | |||
| 934 | if (sscanf(buf, "%s %s", con, user) != 2) | 940 | if (sscanf(buf, "%s %s", con, user) != 2) |
| 935 | goto out; | 941 | goto out; |
| 936 | 942 | ||
| 937 | length = security_context_to_sid(con, strlen(con) + 1, &sid); | 943 | length = security_context_to_sid(con, strlen(con) + 1, &sid, GFP_KERNEL); |
| 938 | if (length) | 944 | if (length) |
| 939 | goto out; | 945 | goto out; |
| 940 | 946 | ||
| @@ -994,11 +1000,13 @@ static ssize_t sel_write_member(struct file *file, char *buf, size_t size) | |||
| 994 | if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) | 1000 | if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) |
| 995 | goto out; | 1001 | goto out; |
| 996 | 1002 | ||
| 997 | length = security_context_to_sid(scon, strlen(scon) + 1, &ssid); | 1003 | length = security_context_to_sid(scon, strlen(scon) + 1, &ssid, |
| 1004 | GFP_KERNEL); | ||
| 998 | if (length) | 1005 | if (length) |
| 999 | goto out; | 1006 | goto out; |
| 1000 | 1007 | ||
| 1001 | length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid); | 1008 | length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid, |
| 1009 | GFP_KERNEL); | ||
| 1002 | if (length) | 1010 | if (length) |
| 1003 | goto out; | 1011 | goto out; |
| 1004 | 1012 | ||
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index 5d0144ee8ed6..4bca49414a40 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c | |||
| @@ -1289,16 +1289,18 @@ out: | |||
| 1289 | * @scontext: security context | 1289 | * @scontext: security context |
| 1290 | * @scontext_len: length in bytes | 1290 | * @scontext_len: length in bytes |
| 1291 | * @sid: security identifier, SID | 1291 | * @sid: security identifier, SID |
| 1292 | * @gfp: context for the allocation | ||
| 1292 | * | 1293 | * |
| 1293 | * Obtains a SID associated with the security context that | 1294 | * Obtains a SID associated with the security context that |
| 1294 | * has the string representation specified by @scontext. | 1295 | * has the string representation specified by @scontext. |
| 1295 | * Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient | 1296 | * Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient |
| 1296 | * memory is available, or 0 on success. | 1297 | * memory is available, or 0 on success. |
| 1297 | */ | 1298 | */ |
| 1298 | int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid) | 1299 | int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid, |
| 1300 | gfp_t gfp) | ||
| 1299 | { | 1301 | { |
| 1300 | return security_context_to_sid_core(scontext, scontext_len, | 1302 | return security_context_to_sid_core(scontext, scontext_len, |
| 1301 | sid, SECSID_NULL, GFP_KERNEL, 0); | 1303 | sid, SECSID_NULL, gfp, 0); |
| 1302 | } | 1304 | } |
| 1303 | 1305 | ||
| 1304 | /** | 1306 | /** |
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c index 0462cb3ff0a7..98b042630a9e 100644 --- a/security/selinux/xfrm.c +++ b/security/selinux/xfrm.c | |||
| @@ -78,7 +78,8 @@ static inline int selinux_authorizable_xfrm(struct xfrm_state *x) | |||
| 78 | * xfrm_user_sec_ctx context. | 78 | * xfrm_user_sec_ctx context. |
| 79 | */ | 79 | */ |
| 80 | static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp, | 80 | static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp, |
| 81 | struct xfrm_user_sec_ctx *uctx) | 81 | struct xfrm_user_sec_ctx *uctx, |
| 82 | gfp_t gfp) | ||
| 82 | { | 83 | { |
| 83 | int rc; | 84 | int rc; |
| 84 | const struct task_security_struct *tsec = current_security(); | 85 | const struct task_security_struct *tsec = current_security(); |
| @@ -94,7 +95,7 @@ static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp, | |||
| 94 | if (str_len >= PAGE_SIZE) | 95 | if (str_len >= PAGE_SIZE) |
| 95 | return -ENOMEM; | 96 | return -ENOMEM; |
| 96 | 97 | ||
| 97 | ctx = kmalloc(sizeof(*ctx) + str_len + 1, GFP_KERNEL); | 98 | ctx = kmalloc(sizeof(*ctx) + str_len + 1, gfp); |
| 98 | if (!ctx) | 99 | if (!ctx) |
| 99 | return -ENOMEM; | 100 | return -ENOMEM; |
| 100 | 101 | ||
| @@ -103,7 +104,7 @@ static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp, | |||
| 103 | ctx->ctx_len = str_len; | 104 | ctx->ctx_len = str_len; |
| 104 | memcpy(ctx->ctx_str, &uctx[1], str_len); | 105 | memcpy(ctx->ctx_str, &uctx[1], str_len); |
| 105 | ctx->ctx_str[str_len] = '\0'; | 106 | ctx->ctx_str[str_len] = '\0'; |
| 106 | rc = security_context_to_sid(ctx->ctx_str, str_len, &ctx->ctx_sid); | 107 | rc = security_context_to_sid(ctx->ctx_str, str_len, &ctx->ctx_sid, gfp); |
| 107 | if (rc) | 108 | if (rc) |
| 108 | goto err; | 109 | goto err; |
| 109 | 110 | ||
| @@ -282,9 +283,10 @@ int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid) | |||
| 282 | * LSM hook implementation that allocs and transfers uctx spec to xfrm_policy. | 283 | * LSM hook implementation that allocs and transfers uctx spec to xfrm_policy. |
| 283 | */ | 284 | */ |
| 284 | int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, | 285 | int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, |
| 285 | struct xfrm_user_sec_ctx *uctx) | 286 | struct xfrm_user_sec_ctx *uctx, |
| 287 | gfp_t gfp) | ||
| 286 | { | 288 | { |
| 287 | return selinux_xfrm_alloc_user(ctxp, uctx); | 289 | return selinux_xfrm_alloc_user(ctxp, uctx, gfp); |
| 288 | } | 290 | } |
| 289 | 291 | ||
| 290 | /* | 292 | /* |
| @@ -332,7 +334,7 @@ int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx) | |||
| 332 | int selinux_xfrm_state_alloc(struct xfrm_state *x, | 334 | int selinux_xfrm_state_alloc(struct xfrm_state *x, |
| 333 | struct xfrm_user_sec_ctx *uctx) | 335 | struct xfrm_user_sec_ctx *uctx) |
| 334 | { | 336 | { |
| 335 | return selinux_xfrm_alloc_user(&x->security, uctx); | 337 | return selinux_xfrm_alloc_user(&x->security, uctx, GFP_KERNEL); |
| 336 | } | 338 | } |
| 337 | 339 | ||
| 338 | /* | 340 | /* |
