diff options
author | Andy Grover <andy.grover@oracle.com> | 2010-01-12 15:56:06 -0500 |
---|---|---|
committer | Andy Grover <andy.grover@oracle.com> | 2010-09-08 21:11:36 -0400 |
commit | fc445084f185cdd877bec323bfe724a361e2292a (patch) | |
tree | eda014c09872cbbacc411ea3b89f359291ccb577 /net | |
parent | 3ef13f3c22aaea28aff383cb0883481d24885456 (diff) |
RDS: Explicitly allocate rm in sendmsg()
r_m_copy_from_user used to allocate the rm as well as kernel
buffers for the data, and then copy the data in. Now, sendmsg()
allocates the rm, although the data buffer alloc still happens
in r_m_copy_from_user.
SGs are still allocated with rm, but now r_m_alloc_sgs() is
used to reserve them. This allows multiple SG lists to be
allocated from the one rm -- this is important once we also
want to alloc our rdma sgl from this pool.
Signed-off-by: Andy Grover <andy.grover@oracle.com>
Diffstat (limited to 'net')
-rw-r--r-- | net/rds/message.c | 51 | ||||
-rw-r--r-- | net/rds/rds.h | 7 | ||||
-rw-r--r-- | net/rds/send.c | 31 |
3 files changed, 62 insertions, 27 deletions
diff --git a/net/rds/message.c b/net/rds/message.c index 4421d160b1a4..3498cbcc7542 100644 --- a/net/rds/message.c +++ b/net/rds/message.c | |||
@@ -214,17 +214,22 @@ int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 o | |||
214 | } | 214 | } |
215 | EXPORT_SYMBOL_GPL(rds_message_add_rdma_dest_extension); | 215 | EXPORT_SYMBOL_GPL(rds_message_add_rdma_dest_extension); |
216 | 216 | ||
217 | struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp) | 217 | /* |
218 | * Each rds_message is allocated with extra space for the scatterlist entries | ||
219 | * rds ops will need. This is to minimize memory allocation count. Then, each rds op | ||
220 | * can grab SGs when initializing its part of the rds_message. | ||
221 | */ | ||
222 | struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp) | ||
218 | { | 223 | { |
219 | struct rds_message *rm; | 224 | struct rds_message *rm; |
220 | 225 | ||
221 | rm = kzalloc(sizeof(struct rds_message) + | 226 | rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp); |
222 | (nents * sizeof(struct scatterlist)), gfp); | ||
223 | if (!rm) | 227 | if (!rm) |
224 | goto out; | 228 | goto out; |
225 | 229 | ||
226 | if (nents) | 230 | rm->m_used_sgs = 0; |
227 | sg_init_table(rm->data.m_sg, nents); | 231 | rm->m_total_sgs = extra_len / sizeof(struct scatterlist); |
232 | |||
228 | atomic_set(&rm->m_refcount, 1); | 233 | atomic_set(&rm->m_refcount, 1); |
229 | INIT_LIST_HEAD(&rm->m_sock_item); | 234 | INIT_LIST_HEAD(&rm->m_sock_item); |
230 | INIT_LIST_HEAD(&rm->m_conn_item); | 235 | INIT_LIST_HEAD(&rm->m_conn_item); |
@@ -234,6 +239,23 @@ out: | |||
234 | return rm; | 239 | return rm; |
235 | } | 240 | } |
236 | 241 | ||
242 | /* | ||
243 | * RDS ops use this to grab SG entries from the rm's sg pool. | ||
244 | */ | ||
245 | struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents) | ||
246 | { | ||
247 | struct scatterlist *sg_first = (struct scatterlist *) &rm[1]; | ||
248 | struct scatterlist *sg_ret; | ||
249 | |||
250 | WARN_ON(rm->m_used_sgs + nents > rm->m_total_sgs); | ||
251 | |||
252 | sg_ret = &sg_first[rm->m_used_sgs]; | ||
253 | |||
254 | rm->m_used_sgs += nents; | ||
255 | |||
256 | return sg_ret; | ||
257 | } | ||
258 | |||
237 | struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len) | 259 | struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len) |
238 | { | 260 | { |
239 | struct rds_message *rm; | 261 | struct rds_message *rm; |
@@ -256,22 +278,15 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in | |||
256 | return rm; | 278 | return rm; |
257 | } | 279 | } |
258 | 280 | ||
259 | struct rds_message *rds_message_copy_from_user(struct iovec *first_iov, | 281 | int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov, |
260 | size_t total_len) | 282 | size_t total_len) |
261 | { | 283 | { |
262 | unsigned long to_copy; | 284 | unsigned long to_copy; |
263 | unsigned long iov_off; | 285 | unsigned long iov_off; |
264 | unsigned long sg_off; | 286 | unsigned long sg_off; |
265 | struct rds_message *rm; | ||
266 | struct iovec *iov; | 287 | struct iovec *iov; |
267 | struct scatterlist *sg; | 288 | struct scatterlist *sg; |
268 | int ret; | 289 | int ret = 0; |
269 | |||
270 | rm = rds_message_alloc(ceil(total_len, PAGE_SIZE), GFP_KERNEL); | ||
271 | if (rm == NULL) { | ||
272 | ret = -ENOMEM; | ||
273 | goto out; | ||
274 | } | ||
275 | 290 | ||
276 | rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); | 291 | rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); |
277 | 292 | ||
@@ -320,14 +335,8 @@ struct rds_message *rds_message_copy_from_user(struct iovec *first_iov, | |||
320 | sg++; | 335 | sg++; |
321 | } | 336 | } |
322 | 337 | ||
323 | ret = 0; | ||
324 | out: | 338 | out: |
325 | if (ret) { | 339 | return ret; |
326 | if (rm) | ||
327 | rds_message_put(rm); | ||
328 | rm = ERR_PTR(ret); | ||
329 | } | ||
330 | return rm; | ||
331 | } | 340 | } |
332 | 341 | ||
333 | int rds_message_inc_copy_to_user(struct rds_incoming *inc, | 342 | int rds_message_inc_copy_to_user(struct rds_incoming *inc, |
diff --git a/net/rds/rds.h b/net/rds/rds.h index 07a750b3fb31..d29c71aabbd4 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h | |||
@@ -268,9 +268,11 @@ struct rds_message { | |||
268 | struct { | 268 | struct { |
269 | unsigned int m_nents; | 269 | unsigned int m_nents; |
270 | unsigned int m_count; | 270 | unsigned int m_count; |
271 | struct scatterlist m_sg[0]; | 271 | struct scatterlist *m_sg; |
272 | } data; | 272 | } data; |
273 | }; | 273 | }; |
274 | unsigned int m_used_sgs; | ||
275 | unsigned int m_total_sgs; | ||
274 | }; | 276 | }; |
275 | 277 | ||
276 | /* | 278 | /* |
@@ -573,7 +575,8 @@ rds_conn_connecting(struct rds_connection *conn) | |||
573 | 575 | ||
574 | /* message.c */ | 576 | /* message.c */ |
575 | struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp); | 577 | struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp); |
576 | struct rds_message *rds_message_copy_from_user(struct iovec *first_iov, | 578 | struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents); |
579 | int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov, | ||
577 | size_t total_len); | 580 | size_t total_len); |
578 | struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len); | 581 | struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len); |
579 | void rds_message_populate_header(struct rds_header *hdr, __be16 sport, | 582 | void rds_message_populate_header(struct rds_header *hdr, __be16 sport, |
diff --git a/net/rds/send.c b/net/rds/send.c index 19dfd025498e..28d09447207b 100644 --- a/net/rds/send.c +++ b/net/rds/send.c | |||
@@ -758,6 +758,19 @@ out: | |||
758 | return *queued; | 758 | return *queued; |
759 | } | 759 | } |
760 | 760 | ||
761 | /* | ||
762 | * rds_message is getting to be quite complicated, and we'd like to allocate | ||
763 | * it all in one go. This figures out how big it needs to be up front. | ||
764 | */ | ||
765 | static int rds_rm_size(struct msghdr *msg, int data_len) | ||
766 | { | ||
767 | int size = 0; | ||
768 | |||
769 | size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist); | ||
770 | |||
771 | return size; | ||
772 | } | ||
773 | |||
761 | static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm, | 774 | static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm, |
762 | struct msghdr *msg, int *allocated_mr) | 775 | struct msghdr *msg, int *allocated_mr) |
763 | { | 776 | { |
@@ -845,13 +858,23 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
845 | goto out; | 858 | goto out; |
846 | } | 859 | } |
847 | 860 | ||
848 | rm = rds_message_copy_from_user(msg->msg_iov, payload_len); | 861 | /* size of rm including all sgs */ |
849 | if (IS_ERR(rm)) { | 862 | ret = rds_rm_size(msg, payload_len); |
850 | ret = PTR_ERR(rm); | 863 | if (ret < 0) |
851 | rm = NULL; | 864 | goto out; |
865 | |||
866 | rm = rds_message_alloc(ret, GFP_KERNEL); | ||
867 | if (!rm) { | ||
868 | ret = -ENOMEM; | ||
852 | goto out; | 869 | goto out; |
853 | } | 870 | } |
854 | 871 | ||
872 | rm->data.m_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE)); | ||
873 | /* XXX fix this to not allocate memory */ | ||
874 | ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len); | ||
875 | if (ret) | ||
876 | goto out; | ||
877 | |||
855 | rm->m_daddr = daddr; | 878 | rm->m_daddr = daddr; |
856 | 879 | ||
857 | /* rds_conn_create has a spinlock that runs with IRQ off. | 880 | /* rds_conn_create has a spinlock that runs with IRQ off. |