diff options
author | NeilBrown <neilb@suse.com> | 2018-01-08 20:19:38 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2018-01-09 09:48:45 -0500 |
commit | 8d60ecd99c9bafedfb49e7a3bc0cc31887100559 (patch) | |
tree | 24ac46d2f98c6cd72d7bff33de328e05e358858f | |
parent | d0157f0c7ef02e022a6dc063ddece4a28004c710 (diff) |
staging: lustre: replace LIBCFS_CPT_ALLOC()
LIBCFS_APT_ALLOC() calls kvmalloc_node() with GFP_NOFS
which is not permitted.
Mostly, a kmalloc_node(GFP_NOFS) is appropriate, though occasionally
the allocation is large and GFP_KERNEL is acceptable, so
kvmalloc_node() can be used.
This patch introduces 4 alternatives to LIBCFS_CPT_ALLOC():
kmalloc_cpt()
kzalloc_cpt()
kvmalloc_cpt()
kvzalloc_cpt().
Each takes a size, gfp flags, and cpt number.
Almost every call to LIBCFS_CPT_ALLOC() passes lnet_cpt_table()
as the table. This patch embeds that choice in the k*alloc_cpt()
macros, and opencode kzalloc_node(..., cfs_cpt_spread_node(..))
in the one case that lnet_cpt_table() isn't used.
When LIBCFS_CPT_ALLOC() is replaced, the matching LIBCFS_FREE()
is also replaced, with with kfree() or kvfree() as appropriate.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | drivers/staging/lustre/include/linux/libcfs/libcfs_private.h | 19 | ||||
-rw-r--r-- | drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c | 99 | ||||
-rw-r--r-- | drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c | 15 | ||||
-rw-r--r-- | drivers/staging/lustre/lnet/libcfs/libcfs_mem.c | 9 | ||||
-rw-r--r-- | drivers/staging/lustre/lnet/lnet/api-ni.c | 11 | ||||
-rw-r--r-- | drivers/staging/lustre/lnet/lnet/lib-msg.c | 14 | ||||
-rw-r--r-- | drivers/staging/lustre/lnet/lnet/lib-ptl.c | 6 | ||||
-rw-r--r-- | drivers/staging/lustre/lnet/lnet/peer.c | 8 | ||||
-rw-r--r-- | drivers/staging/lustre/lnet/lnet/router.c | 8 | ||||
-rw-r--r-- | drivers/staging/lustre/lnet/selftest/rpc.c | 9 |
10 files changed, 88 insertions, 110 deletions
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h index d230c7f7cced..50a600564fb2 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h | |||
@@ -126,6 +126,25 @@ do { \ | |||
126 | kvfree(ptr); \ | 126 | kvfree(ptr); \ |
127 | } while (0) | 127 | } while (0) |
128 | 128 | ||
129 | /* | ||
130 | * Use #define rather than inline, as lnet_cpt_table() might | ||
131 | * not be defined yet | ||
132 | */ | ||
133 | #define kmalloc_cpt(size, flags, cpt) \ | ||
134 | kmalloc_node(size, flags, cfs_cpt_spread_node(lnet_cpt_table(), cpt)) | ||
135 | |||
136 | #define kzalloc_cpt(size, flags, cpt) \ | ||
137 | kmalloc_node(size, flags | __GFP_ZERO, \ | ||
138 | cfs_cpt_spread_node(lnet_cpt_table(), cpt)) | ||
139 | |||
140 | #define kvmalloc_cpt(size, flags, cpt) \ | ||
141 | kvmalloc_node(size, flags, \ | ||
142 | cfs_cpt_spread_node(lnet_cpt_table(), cpt)) | ||
143 | |||
144 | #define kvzalloc_cpt(size, flags, cpt) \ | ||
145 | kvmalloc_node(size, flags | __GFP_ZERO, \ | ||
146 | cfs_cpt_spread_node(lnet_cpt_table(), cpt)) | ||
147 | |||
129 | /******************************************************************************/ | 148 | /******************************************************************************/ |
130 | 149 | ||
131 | void libcfs_debug_dumplog(void); | 150 | void libcfs_debug_dumplog(void); |
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c index bb7b19473e3a..2ebc484385b3 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c | |||
@@ -325,7 +325,7 @@ int kiblnd_create_peer(struct lnet_ni *ni, struct kib_peer **peerp, | |||
325 | LASSERT(net); | 325 | LASSERT(net); |
326 | LASSERT(nid != LNET_NID_ANY); | 326 | LASSERT(nid != LNET_NID_ANY); |
327 | 327 | ||
328 | LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer)); | 328 | peer = kzalloc_cpt(sizeof(*peer), GFP_NOFS, cpt); |
329 | if (!peer) { | 329 | if (!peer) { |
330 | CERROR("Cannot allocate peer\n"); | 330 | CERROR("Cannot allocate peer\n"); |
331 | return -ENOMEM; | 331 | return -ENOMEM; |
@@ -656,15 +656,14 @@ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cm | |||
656 | 656 | ||
657 | LASSERT(sched->ibs_nthreads > 0); | 657 | LASSERT(sched->ibs_nthreads > 0); |
658 | 658 | ||
659 | LIBCFS_CPT_ALLOC(init_qp_attr, lnet_cpt_table(), cpt, | 659 | init_qp_attr = kzalloc_cpt(sizeof(*init_qp_attr), GFP_NOFS, cpt); |
660 | sizeof(*init_qp_attr)); | ||
661 | if (!init_qp_attr) { | 660 | if (!init_qp_attr) { |
662 | CERROR("Can't allocate qp_attr for %s\n", | 661 | CERROR("Can't allocate qp_attr for %s\n", |
663 | libcfs_nid2str(peer->ibp_nid)); | 662 | libcfs_nid2str(peer->ibp_nid)); |
664 | goto failed_0; | 663 | goto failed_0; |
665 | } | 664 | } |
666 | 665 | ||
667 | LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn)); | 666 | conn = kzalloc_cpt(sizeof(*conn), GFP_NOFS, cpt); |
668 | if (!conn) { | 667 | if (!conn) { |
669 | CERROR("Can't allocate connection for %s\n", | 668 | CERROR("Can't allocate connection for %s\n", |
670 | libcfs_nid2str(peer->ibp_nid)); | 669 | libcfs_nid2str(peer->ibp_nid)); |
@@ -687,8 +686,7 @@ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cm | |||
687 | INIT_LIST_HEAD(&conn->ibc_active_txs); | 686 | INIT_LIST_HEAD(&conn->ibc_active_txs); |
688 | spin_lock_init(&conn->ibc_lock); | 687 | spin_lock_init(&conn->ibc_lock); |
689 | 688 | ||
690 | LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt, | 689 | conn->ibc_connvars = kzalloc_cpt(sizeof(*conn->ibc_connvars), GFP_NOFS, cpt); |
691 | sizeof(*conn->ibc_connvars)); | ||
692 | if (!conn->ibc_connvars) { | 690 | if (!conn->ibc_connvars) { |
693 | CERROR("Can't allocate in-progress connection state\n"); | 691 | CERROR("Can't allocate in-progress connection state\n"); |
694 | goto failed_2; | 692 | goto failed_2; |
@@ -722,8 +720,8 @@ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cm | |||
722 | 720 | ||
723 | write_unlock_irqrestore(glock, flags); | 721 | write_unlock_irqrestore(glock, flags); |
724 | 722 | ||
725 | LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt, | 723 | conn->ibc_rxs = kzalloc_cpt(IBLND_RX_MSGS(conn) * sizeof(struct kib_rx), |
726 | IBLND_RX_MSGS(conn) * sizeof(struct kib_rx)); | 724 | GFP_NOFS, cpt); |
727 | if (!conn->ibc_rxs) { | 725 | if (!conn->ibc_rxs) { |
728 | CERROR("Cannot allocate RX buffers\n"); | 726 | CERROR("Cannot allocate RX buffers\n"); |
729 | goto failed_2; | 727 | goto failed_2; |
@@ -877,11 +875,7 @@ void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn) | |||
877 | if (conn->ibc_rx_pages) | 875 | if (conn->ibc_rx_pages) |
878 | kiblnd_unmap_rx_descs(conn); | 876 | kiblnd_unmap_rx_descs(conn); |
879 | 877 | ||
880 | if (conn->ibc_rxs) { | 878 | kfree(conn->ibc_rxs); |
881 | LIBCFS_FREE(conn->ibc_rxs, | ||
882 | IBLND_RX_MSGS(conn) * sizeof(struct kib_rx)); | ||
883 | } | ||
884 | |||
885 | kfree(conn->ibc_connvars); | 879 | kfree(conn->ibc_connvars); |
886 | 880 | ||
887 | if (conn->ibc_hdev) | 881 | if (conn->ibc_hdev) |
@@ -1088,7 +1082,7 @@ static void kiblnd_free_pages(struct kib_pages *p) | |||
1088 | __free_page(p->ibp_pages[i]); | 1082 | __free_page(p->ibp_pages[i]); |
1089 | } | 1083 | } |
1090 | 1084 | ||
1091 | LIBCFS_FREE(p, offsetof(struct kib_pages, ibp_pages[npages])); | 1085 | kfree(p); |
1092 | } | 1086 | } |
1093 | 1087 | ||
1094 | int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages) | 1088 | int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages) |
@@ -1096,14 +1090,13 @@ int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages) | |||
1096 | struct kib_pages *p; | 1090 | struct kib_pages *p; |
1097 | int i; | 1091 | int i; |
1098 | 1092 | ||
1099 | LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt, | 1093 | p = kzalloc_cpt(offsetof(struct kib_pages, ibp_pages[npages]), |
1100 | offsetof(struct kib_pages, ibp_pages[npages])); | 1094 | GFP_NOFS, cpt); |
1101 | if (!p) { | 1095 | if (!p) { |
1102 | CERROR("Can't allocate descriptor for %d pages\n", npages); | 1096 | CERROR("Can't allocate descriptor for %d pages\n", npages); |
1103 | return -ENOMEM; | 1097 | return -ENOMEM; |
1104 | } | 1098 | } |
1105 | 1099 | ||
1106 | memset(p, 0, offsetof(struct kib_pages, ibp_pages[npages])); | ||
1107 | p->ibp_npages = npages; | 1100 | p->ibp_npages = npages; |
1108 | 1101 | ||
1109 | for (i = 0; i < npages; i++) { | 1102 | for (i = 0; i < npages; i++) { |
@@ -1375,8 +1368,7 @@ static int kiblnd_alloc_freg_pool(struct kib_fmr_poolset *fps, struct kib_fmr_po | |||
1375 | INIT_LIST_HEAD(&fpo->fast_reg.fpo_pool_list); | 1368 | INIT_LIST_HEAD(&fpo->fast_reg.fpo_pool_list); |
1376 | fpo->fast_reg.fpo_pool_size = 0; | 1369 | fpo->fast_reg.fpo_pool_size = 0; |
1377 | for (i = 0; i < fps->fps_pool_size; i++) { | 1370 | for (i = 0; i < fps->fps_pool_size; i++) { |
1378 | LIBCFS_CPT_ALLOC(frd, lnet_cpt_table(), fps->fps_cpt, | 1371 | frd = kzalloc_cpt(sizeof(*frd), GFP_NOFS, fps->fps_cpt); |
1379 | sizeof(*frd)); | ||
1380 | if (!frd) { | 1372 | if (!frd) { |
1381 | CERROR("Failed to allocate a new fast_reg descriptor\n"); | 1373 | CERROR("Failed to allocate a new fast_reg descriptor\n"); |
1382 | rc = -ENOMEM; | 1374 | rc = -ENOMEM; |
@@ -1425,7 +1417,7 @@ static int kiblnd_create_fmr_pool(struct kib_fmr_poolset *fps, | |||
1425 | struct kib_fmr_pool *fpo; | 1417 | struct kib_fmr_pool *fpo; |
1426 | int rc; | 1418 | int rc; |
1427 | 1419 | ||
1428 | LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo)); | 1420 | fpo = kzalloc_cpt(sizeof(*fpo), GFP_NOFS, fps->fps_cpt); |
1429 | if (!fpo) | 1421 | if (!fpo) |
1430 | return -ENOMEM; | 1422 | return -ENOMEM; |
1431 | 1423 | ||
@@ -1984,30 +1976,14 @@ static void kiblnd_destroy_tx_pool(struct kib_pool *pool) | |||
1984 | struct kib_tx *tx = &tpo->tpo_tx_descs[i]; | 1976 | struct kib_tx *tx = &tpo->tpo_tx_descs[i]; |
1985 | 1977 | ||
1986 | list_del(&tx->tx_list); | 1978 | list_del(&tx->tx_list); |
1987 | if (tx->tx_pages) | 1979 | kfree(tx->tx_pages); |
1988 | LIBCFS_FREE(tx->tx_pages, | 1980 | kfree(tx->tx_frags); |
1989 | LNET_MAX_IOV * | 1981 | kfree(tx->tx_wrq); |
1990 | sizeof(*tx->tx_pages)); | 1982 | kfree(tx->tx_sge); |
1991 | if (tx->tx_frags) | 1983 | kfree(tx->tx_rd); |
1992 | LIBCFS_FREE(tx->tx_frags, | 1984 | } |
1993 | (1 + IBLND_MAX_RDMA_FRAGS) * | 1985 | |
1994 | sizeof(*tx->tx_frags)); | 1986 | kfree(tpo->tpo_tx_descs); |
1995 | if (tx->tx_wrq) | ||
1996 | LIBCFS_FREE(tx->tx_wrq, | ||
1997 | (1 + IBLND_MAX_RDMA_FRAGS) * | ||
1998 | sizeof(*tx->tx_wrq)); | ||
1999 | if (tx->tx_sge) | ||
2000 | LIBCFS_FREE(tx->tx_sge, | ||
2001 | (1 + IBLND_MAX_RDMA_FRAGS) * | ||
2002 | sizeof(*tx->tx_sge)); | ||
2003 | if (tx->tx_rd) | ||
2004 | LIBCFS_FREE(tx->tx_rd, | ||
2005 | offsetof(struct kib_rdma_desc, | ||
2006 | rd_frags[IBLND_MAX_RDMA_FRAGS])); | ||
2007 | } | ||
2008 | |||
2009 | LIBCFS_FREE(tpo->tpo_tx_descs, | ||
2010 | pool->po_size * sizeof(struct kib_tx)); | ||
2011 | out: | 1987 | out: |
2012 | kiblnd_fini_pool(pool); | 1988 | kiblnd_fini_pool(pool); |
2013 | kfree(tpo); | 1989 | kfree(tpo); |
@@ -2028,7 +2004,7 @@ static int kiblnd_create_tx_pool(struct kib_poolset *ps, int size, | |||
2028 | struct kib_pool *pool; | 2004 | struct kib_pool *pool; |
2029 | struct kib_tx_pool *tpo; | 2005 | struct kib_tx_pool *tpo; |
2030 | 2006 | ||
2031 | LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo)); | 2007 | tpo = kzalloc_cpt(sizeof(*tpo), GFP_NOFS, ps->ps_cpt); |
2032 | if (!tpo) { | 2008 | if (!tpo) { |
2033 | CERROR("Failed to allocate TX pool\n"); | 2009 | CERROR("Failed to allocate TX pool\n"); |
2034 | return -ENOMEM; | 2010 | return -ENOMEM; |
@@ -2046,8 +2022,8 @@ static int kiblnd_create_tx_pool(struct kib_poolset *ps, int size, | |||
2046 | return -ENOMEM; | 2022 | return -ENOMEM; |
2047 | } | 2023 | } |
2048 | 2024 | ||
2049 | LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt, | 2025 | tpo->tpo_tx_descs = kzalloc_cpt(size * sizeof(struct kib_tx), |
2050 | size * sizeof(struct kib_tx)); | 2026 | GFP_NOFS, ps->ps_cpt); |
2051 | if (!tpo->tpo_tx_descs) { | 2027 | if (!tpo->tpo_tx_descs) { |
2052 | CERROR("Can't allocate %d tx descriptors\n", size); | 2028 | CERROR("Can't allocate %d tx descriptors\n", size); |
2053 | ps->ps_pool_destroy(pool); | 2029 | ps->ps_pool_destroy(pool); |
@@ -2061,36 +2037,35 @@ static int kiblnd_create_tx_pool(struct kib_poolset *ps, int size, | |||
2061 | 2037 | ||
2062 | tx->tx_pool = tpo; | 2038 | tx->tx_pool = tpo; |
2063 | if (ps->ps_net->ibn_fmr_ps) { | 2039 | if (ps->ps_net->ibn_fmr_ps) { |
2064 | LIBCFS_CPT_ALLOC(tx->tx_pages, | 2040 | tx->tx_pages = kzalloc_cpt(LNET_MAX_IOV * sizeof(*tx->tx_pages), |
2065 | lnet_cpt_table(), ps->ps_cpt, | 2041 | GFP_NOFS, ps->ps_cpt); |
2066 | LNET_MAX_IOV * sizeof(*tx->tx_pages)); | ||
2067 | if (!tx->tx_pages) | 2042 | if (!tx->tx_pages) |
2068 | break; | 2043 | break; |
2069 | } | 2044 | } |
2070 | 2045 | ||
2071 | LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt, | 2046 | tx->tx_frags = kzalloc_cpt((1 + IBLND_MAX_RDMA_FRAGS) * |
2072 | (1 + IBLND_MAX_RDMA_FRAGS) * | 2047 | sizeof(*tx->tx_frags), |
2073 | sizeof(*tx->tx_frags)); | 2048 | GFP_NOFS, ps->ps_cpt); |
2074 | if (!tx->tx_frags) | 2049 | if (!tx->tx_frags) |
2075 | break; | 2050 | break; |
2076 | 2051 | ||
2077 | sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS + 1); | 2052 | sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS + 1); |
2078 | 2053 | ||
2079 | LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt, | 2054 | tx->tx_wrq = kzalloc_cpt((1 + IBLND_MAX_RDMA_FRAGS) * |
2080 | (1 + IBLND_MAX_RDMA_FRAGS) * | 2055 | sizeof(*tx->tx_wrq), |
2081 | sizeof(*tx->tx_wrq)); | 2056 | GFP_NOFS, ps->ps_cpt); |
2082 | if (!tx->tx_wrq) | 2057 | if (!tx->tx_wrq) |
2083 | break; | 2058 | break; |
2084 | 2059 | ||
2085 | LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt, | 2060 | tx->tx_sge = kzalloc_cpt((1 + IBLND_MAX_RDMA_FRAGS) * |
2086 | (1 + IBLND_MAX_RDMA_FRAGS) * | 2061 | sizeof(*tx->tx_sge), |
2087 | sizeof(*tx->tx_sge)); | 2062 | GFP_NOFS, ps->ps_cpt); |
2088 | if (!tx->tx_sge) | 2063 | if (!tx->tx_sge) |
2089 | break; | 2064 | break; |
2090 | 2065 | ||
2091 | LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt, | 2066 | tx->tx_rd = kzalloc_cpt(offsetof(struct kib_rdma_desc, |
2092 | offsetof(struct kib_rdma_desc, | 2067 | rd_frags[IBLND_MAX_RDMA_FRAGS]), |
2093 | rd_frags[IBLND_MAX_RDMA_FRAGS])); | 2068 | GFP_NOFS, ps->ps_cpt); |
2094 | if (!tx->tx_rd) | 2069 | if (!tx->tx_rd) |
2095 | break; | 2070 | break; |
2096 | } | 2071 | } |
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c index 7dba949a95a7..ff292216290d 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c | |||
@@ -108,7 +108,7 @@ ksocknal_create_peer(struct ksock_peer **peerp, struct lnet_ni *ni, | |||
108 | LASSERT(id.pid != LNET_PID_ANY); | 108 | LASSERT(id.pid != LNET_PID_ANY); |
109 | LASSERT(!in_interrupt()); | 109 | LASSERT(!in_interrupt()); |
110 | 110 | ||
111 | LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer)); | 111 | peer = kzalloc_cpt(sizeof(*peer), GFP_NOFS, cpt); |
112 | if (!peer) | 112 | if (!peer) |
113 | return -ENOMEM; | 113 | return -ENOMEM; |
114 | 114 | ||
@@ -2257,13 +2257,8 @@ ksocknal_free_buffers(void) | |||
2257 | struct ksock_sched_info *info; | 2257 | struct ksock_sched_info *info; |
2258 | int i; | 2258 | int i; |
2259 | 2259 | ||
2260 | cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) { | 2260 | cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) |
2261 | if (info->ksi_scheds) { | 2261 | kfree(info->ksi_scheds); |
2262 | LIBCFS_FREE(info->ksi_scheds, | ||
2263 | info->ksi_nthreads_max * | ||
2264 | sizeof(info->ksi_scheds[0])); | ||
2265 | } | ||
2266 | } | ||
2267 | cfs_percpt_free(ksocknal_data.ksnd_sched_info); | 2262 | cfs_percpt_free(ksocknal_data.ksnd_sched_info); |
2268 | } | 2263 | } |
2269 | 2264 | ||
@@ -2452,8 +2447,8 @@ ksocknal_base_startup(void) | |||
2452 | info->ksi_nthreads_max = nthrs; | 2447 | info->ksi_nthreads_max = nthrs; |
2453 | info->ksi_cpt = i; | 2448 | info->ksi_cpt = i; |
2454 | 2449 | ||
2455 | LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i, | 2450 | info->ksi_scheds = kzalloc_cpt(info->ksi_nthreads_max * sizeof(*sched), |
2456 | info->ksi_nthreads_max * sizeof(*sched)); | 2451 | GFP_NOFS, i); |
2457 | if (!info->ksi_scheds) | 2452 | if (!info->ksi_scheds) |
2458 | goto failed; | 2453 | goto failed; |
2459 | 2454 | ||
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c index 8e2b4f1db0a1..7faed94994ea 100644 --- a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c +++ b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c | |||
@@ -49,10 +49,8 @@ cfs_percpt_free(void *vars) | |||
49 | 49 | ||
50 | arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); | 50 | arr = container_of(vars, struct cfs_var_array, va_ptrs[0]); |
51 | 51 | ||
52 | for (i = 0; i < arr->va_count; i++) { | 52 | for (i = 0; i < arr->va_count; i++) |
53 | if (arr->va_ptrs[i]) | 53 | kfree(arr->va_ptrs[i]); |
54 | LIBCFS_FREE(arr->va_ptrs[i], arr->va_size); | ||
55 | } | ||
56 | 54 | ||
57 | kvfree(arr); | 55 | kvfree(arr); |
58 | } | 56 | } |
@@ -89,7 +87,8 @@ cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size) | |||
89 | arr->va_cptab = cptab; | 87 | arr->va_cptab = cptab; |
90 | 88 | ||
91 | for (i = 0; i < count; i++) { | 89 | for (i = 0; i < count; i++) { |
92 | LIBCFS_CPT_ALLOC(arr->va_ptrs[i], cptab, i, size); | 90 | arr->va_ptrs[i] = kzalloc_node(size, GFP_KERNEL, |
91 | cfs_cpt_spread_node(cptab, i)); | ||
93 | if (!arr->va_ptrs[i]) { | 92 | if (!arr->va_ptrs[i]) { |
94 | cfs_percpt_free((void *)&arr->va_ptrs[0]); | 93 | cfs_percpt_free((void *)&arr->va_ptrs[0]); |
95 | return NULL; | 94 | return NULL; |
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c index 6a1fb0397604..2c7abad57104 100644 --- a/drivers/staging/lustre/lnet/lnet/api-ni.c +++ b/drivers/staging/lustre/lnet/lnet/api-ni.c | |||
@@ -404,11 +404,8 @@ lnet_res_container_cleanup(struct lnet_res_container *rec) | |||
404 | count, lnet_res_type2str(rec->rec_type)); | 404 | count, lnet_res_type2str(rec->rec_type)); |
405 | } | 405 | } |
406 | 406 | ||
407 | if (rec->rec_lh_hash) { | 407 | kfree(rec->rec_lh_hash); |
408 | LIBCFS_FREE(rec->rec_lh_hash, | 408 | rec->rec_lh_hash = NULL; |
409 | LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0])); | ||
410 | rec->rec_lh_hash = NULL; | ||
411 | } | ||
412 | 409 | ||
413 | rec->rec_type = 0; /* mark it as finalized */ | 410 | rec->rec_type = 0; /* mark it as finalized */ |
414 | } | 411 | } |
@@ -426,8 +423,8 @@ lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type) | |||
426 | rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type; | 423 | rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type; |
427 | 424 | ||
428 | /* Arbitrary choice of hash table size */ | 425 | /* Arbitrary choice of hash table size */ |
429 | LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt, | 426 | rec->rec_lh_hash = kvmalloc_cpt(LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]), |
430 | LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0])); | 427 | GFP_KERNEL, cpt); |
431 | if (!rec->rec_lh_hash) { | 428 | if (!rec->rec_lh_hash) { |
432 | rc = -ENOMEM; | 429 | rc = -ENOMEM; |
433 | goto out; | 430 | goto out; |
diff --git a/drivers/staging/lustre/lnet/lnet/lib-msg.c b/drivers/staging/lustre/lnet/lnet/lib-msg.c index ff6c43323fb5..0091273c04b9 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-msg.c +++ b/drivers/staging/lustre/lnet/lnet/lib-msg.c | |||
@@ -553,12 +553,8 @@ lnet_msg_container_cleanup(struct lnet_msg_container *container) | |||
553 | if (count > 0) | 553 | if (count > 0) |
554 | CERROR("%d active msg on exit\n", count); | 554 | CERROR("%d active msg on exit\n", count); |
555 | 555 | ||
556 | if (container->msc_finalizers) { | 556 | kvfree(container->msc_finalizers); |
557 | LIBCFS_FREE(container->msc_finalizers, | 557 | container->msc_finalizers = NULL; |
558 | container->msc_nfinalizers * | ||
559 | sizeof(*container->msc_finalizers)); | ||
560 | container->msc_finalizers = NULL; | ||
561 | } | ||
562 | container->msc_init = 0; | 558 | container->msc_init = 0; |
563 | } | 559 | } |
564 | 560 | ||
@@ -573,9 +569,9 @@ lnet_msg_container_setup(struct lnet_msg_container *container, int cpt) | |||
573 | /* number of CPUs */ | 569 | /* number of CPUs */ |
574 | container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt); | 570 | container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt); |
575 | 571 | ||
576 | LIBCFS_CPT_ALLOC(container->msc_finalizers, lnet_cpt_table(), cpt, | 572 | container->msc_finalizers = kvzalloc_cpt(container->msc_nfinalizers * |
577 | container->msc_nfinalizers * | 573 | sizeof(*container->msc_finalizers), |
578 | sizeof(*container->msc_finalizers)); | 574 | GFP_KERNEL, cpt); |
579 | 575 | ||
580 | if (!container->msc_finalizers) { | 576 | if (!container->msc_finalizers) { |
581 | CERROR("Failed to allocate message finalizers\n"); | 577 | CERROR("Failed to allocate message finalizers\n"); |
diff --git a/drivers/staging/lustre/lnet/lnet/lib-ptl.c b/drivers/staging/lustre/lnet/lnet/lib-ptl.c index 519cfebaaa88..471f2f6c86f4 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-ptl.c +++ b/drivers/staging/lustre/lnet/lnet/lib-ptl.c | |||
@@ -775,7 +775,7 @@ lnet_ptl_cleanup(struct lnet_portal *ptl) | |||
775 | } | 775 | } |
776 | } | 776 | } |
777 | /* the extra entry is for MEs with ignore bits */ | 777 | /* the extra entry is for MEs with ignore bits */ |
778 | LIBCFS_FREE(mhash, sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1)); | 778 | kvfree(mhash); |
779 | } | 779 | } |
780 | 780 | ||
781 | cfs_percpt_free(ptl->ptl_mtables); | 781 | cfs_percpt_free(ptl->ptl_mtables); |
@@ -803,8 +803,8 @@ lnet_ptl_setup(struct lnet_portal *ptl, int index) | |||
803 | spin_lock_init(&ptl->ptl_lock); | 803 | spin_lock_init(&ptl->ptl_lock); |
804 | cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) { | 804 | cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) { |
805 | /* the extra entry is for MEs with ignore bits */ | 805 | /* the extra entry is for MEs with ignore bits */ |
806 | LIBCFS_CPT_ALLOC(mhash, lnet_cpt_table(), i, | 806 | mhash = kvzalloc_cpt(sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1), |
807 | sizeof(*mhash) * (LNET_MT_HASH_SIZE + 1)); | 807 | GFP_KERNEL, i); |
808 | if (!mhash) { | 808 | if (!mhash) { |
809 | CERROR("Failed to create match hash for portal %d\n", | 809 | CERROR("Failed to create match hash for portal %d\n", |
810 | index); | 810 | index); |
diff --git a/drivers/staging/lustre/lnet/lnet/peer.c b/drivers/staging/lustre/lnet/lnet/peer.c index 19fcbcf0f642..3e157c10fec4 100644 --- a/drivers/staging/lustre/lnet/lnet/peer.c +++ b/drivers/staging/lustre/lnet/lnet/peer.c | |||
@@ -56,8 +56,8 @@ lnet_peer_tables_create(void) | |||
56 | cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) { | 56 | cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) { |
57 | INIT_LIST_HEAD(&ptable->pt_deathrow); | 57 | INIT_LIST_HEAD(&ptable->pt_deathrow); |
58 | 58 | ||
59 | LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i, | 59 | hash = kvmalloc_cpt(LNET_PEER_HASH_SIZE * sizeof(*hash), |
60 | LNET_PEER_HASH_SIZE * sizeof(*hash)); | 60 | GFP_KERNEL, i); |
61 | if (!hash) { | 61 | if (!hash) { |
62 | CERROR("Failed to create peer hash table\n"); | 62 | CERROR("Failed to create peer hash table\n"); |
63 | lnet_peer_tables_destroy(); | 63 | lnet_peer_tables_destroy(); |
@@ -94,7 +94,7 @@ lnet_peer_tables_destroy(void) | |||
94 | for (j = 0; j < LNET_PEER_HASH_SIZE; j++) | 94 | for (j = 0; j < LNET_PEER_HASH_SIZE; j++) |
95 | LASSERT(list_empty(&hash[j])); | 95 | LASSERT(list_empty(&hash[j])); |
96 | 96 | ||
97 | LIBCFS_FREE(hash, LNET_PEER_HASH_SIZE * sizeof(*hash)); | 97 | kvfree(hash); |
98 | } | 98 | } |
99 | 99 | ||
100 | cfs_percpt_free(the_lnet.ln_peer_tables); | 100 | cfs_percpt_free(the_lnet.ln_peer_tables); |
@@ -297,7 +297,7 @@ lnet_nid2peer_locked(struct lnet_peer **lpp, lnet_nid_t nid, int cpt) | |||
297 | if (lp) | 297 | if (lp) |
298 | memset(lp, 0, sizeof(*lp)); | 298 | memset(lp, 0, sizeof(*lp)); |
299 | else | 299 | else |
300 | LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), cpt2, sizeof(*lp)); | 300 | lp = kzalloc_cpt(sizeof(*lp), GFP_NOFS, cpt2); |
301 | 301 | ||
302 | if (!lp) { | 302 | if (!lp) { |
303 | rc = -ENOMEM; | 303 | rc = -ENOMEM; |
diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c index 476d6d296037..6504761ca598 100644 --- a/drivers/staging/lustre/lnet/lnet/router.c +++ b/drivers/staging/lustre/lnet/lnet/router.c | |||
@@ -1296,12 +1296,10 @@ rescan: | |||
1296 | void | 1296 | void |
1297 | lnet_destroy_rtrbuf(struct lnet_rtrbuf *rb, int npages) | 1297 | lnet_destroy_rtrbuf(struct lnet_rtrbuf *rb, int npages) |
1298 | { | 1298 | { |
1299 | int sz = offsetof(struct lnet_rtrbuf, rb_kiov[npages]); | ||
1300 | |||
1301 | while (--npages >= 0) | 1299 | while (--npages >= 0) |
1302 | __free_page(rb->rb_kiov[npages].bv_page); | 1300 | __free_page(rb->rb_kiov[npages].bv_page); |
1303 | 1301 | ||
1304 | LIBCFS_FREE(rb, sz); | 1302 | kfree(rb); |
1305 | } | 1303 | } |
1306 | 1304 | ||
1307 | static struct lnet_rtrbuf * | 1305 | static struct lnet_rtrbuf * |
@@ -1313,7 +1311,7 @@ lnet_new_rtrbuf(struct lnet_rtrbufpool *rbp, int cpt) | |||
1313 | struct lnet_rtrbuf *rb; | 1311 | struct lnet_rtrbuf *rb; |
1314 | int i; | 1312 | int i; |
1315 | 1313 | ||
1316 | LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz); | 1314 | rb = kzalloc_cpt(sz, GFP_NOFS, cpt); |
1317 | if (!rb) | 1315 | if (!rb) |
1318 | return NULL; | 1316 | return NULL; |
1319 | 1317 | ||
@@ -1327,7 +1325,7 @@ lnet_new_rtrbuf(struct lnet_rtrbufpool *rbp, int cpt) | |||
1327 | while (--i >= 0) | 1325 | while (--i >= 0) |
1328 | __free_page(rb->rb_kiov[i].bv_page); | 1326 | __free_page(rb->rb_kiov[i].bv_page); |
1329 | 1327 | ||
1330 | LIBCFS_FREE(rb, sz); | 1328 | kfree(rb); |
1331 | return NULL; | 1329 | return NULL; |
1332 | } | 1330 | } |
1333 | 1331 | ||
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c index 4ebb5a1107be..b6c9ab92c288 100644 --- a/drivers/staging/lustre/lnet/selftest/rpc.c +++ b/drivers/staging/lustre/lnet/selftest/rpc.c | |||
@@ -113,7 +113,7 @@ srpc_free_bulk(struct srpc_bulk *bk) | |||
113 | __free_page(pg); | 113 | __free_page(pg); |
114 | } | 114 | } |
115 | 115 | ||
116 | LIBCFS_FREE(bk, offsetof(struct srpc_bulk, bk_iovs[bk->bk_niov])); | 116 | kfree(bk); |
117 | } | 117 | } |
118 | 118 | ||
119 | struct srpc_bulk * | 119 | struct srpc_bulk * |
@@ -125,8 +125,8 @@ srpc_alloc_bulk(int cpt, unsigned int bulk_off, unsigned int bulk_npg, | |||
125 | 125 | ||
126 | LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV); | 126 | LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV); |
127 | 127 | ||
128 | LIBCFS_CPT_ALLOC(bk, lnet_cpt_table(), cpt, | 128 | bk = kzalloc_cpt(offsetof(struct srpc_bulk, bk_iovs[bulk_npg]), |
129 | offsetof(struct srpc_bulk, bk_iovs[bulk_npg])); | 129 | GFP_KERNEL, cpt); |
130 | if (!bk) { | 130 | if (!bk) { |
131 | CERROR("Can't allocate descriptor for %d pages\n", bulk_npg); | 131 | CERROR("Can't allocate descriptor for %d pages\n", bulk_npg); |
132 | return NULL; | 132 | return NULL; |
@@ -294,8 +294,7 @@ srpc_service_init(struct srpc_service *svc) | |||
294 | } | 294 | } |
295 | 295 | ||
296 | for (j = 0; j < nrpcs; j++) { | 296 | for (j = 0; j < nrpcs; j++) { |
297 | LIBCFS_CPT_ALLOC(rpc, lnet_cpt_table(), | 297 | rpc = kzalloc_cpt(sizeof(*rpc), GFP_NOFS, i); |
298 | i, sizeof(*rpc)); | ||
299 | if (!rpc) { | 298 | if (!rpc) { |
300 | srpc_service_fini(svc); | 299 | srpc_service_fini(svc); |
301 | return -ENOMEM; | 300 | return -ENOMEM; |