aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/ceph/caps.c4
-rw-r--r--fs/ceph/file.c4
-rw-r--r--fs/ceph/mds_client.c40
-rw-r--r--fs/ceph/messenger.c20
-rw-r--r--fs/ceph/mon_client.c25
-rw-r--r--fs/ceph/msgpool.c13
-rw-r--r--fs/ceph/osd_client.c22
7 files changed, 48 insertions, 80 deletions
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 51fd39da1470..8755e2d83d4c 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -939,8 +939,8 @@ static int send_cap_msg(struct ceph_mds_session *session,
939 xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0); 939 xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0);
940 940
941 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc), 0, 0, NULL); 941 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc), 0, 0, NULL);
942 if (IS_ERR(msg)) 942 if (!msg)
943 return PTR_ERR(msg); 943 return -ENOMEM;
944 944
945 msg->hdr.tid = cpu_to_le64(flush_tid); 945 msg->hdr.tid = cpu_to_le64(flush_tid);
946 946
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 834d2b83834a..b0426090e8c3 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -649,8 +649,8 @@ more:
649 do_sync, 649 do_sync,
650 ci->i_truncate_seq, ci->i_truncate_size, 650 ci->i_truncate_seq, ci->i_truncate_size,
651 &mtime, false, 2); 651 &mtime, false, 2);
652 if (IS_ERR(req)) 652 if (!req)
653 return PTR_ERR(req); 653 return -ENOMEM;
654 654
655 num_pages = calc_pages_for(pos, len); 655 num_pages = calc_pages_for(pos, len);
656 656
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index d549ab3adfda..7e89c185d38d 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -666,9 +666,9 @@ static struct ceph_msg *create_session_msg(u32 op, u64 seq)
666 struct ceph_mds_session_head *h; 666 struct ceph_mds_session_head *h;
667 667
668 msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), 0, 0, NULL); 668 msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), 0, 0, NULL);
669 if (IS_ERR(msg)) { 669 if (!msg) {
670 pr_err("create_session_msg ENOMEM creating msg\n"); 670 pr_err("create_session_msg ENOMEM creating msg\n");
671 return ERR_PTR(PTR_ERR(msg)); 671 return NULL;
672 } 672 }
673 h = msg->front.iov_base; 673 h = msg->front.iov_base;
674 h->op = cpu_to_le32(op); 674 h->op = cpu_to_le32(op);
@@ -687,7 +687,6 @@ static int __open_session(struct ceph_mds_client *mdsc,
687 struct ceph_msg *msg; 687 struct ceph_msg *msg;
688 int mstate; 688 int mstate;
689 int mds = session->s_mds; 689 int mds = session->s_mds;
690 int err = 0;
691 690
692 /* wait for mds to go active? */ 691 /* wait for mds to go active? */
693 mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds); 692 mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
@@ -698,13 +697,9 @@ static int __open_session(struct ceph_mds_client *mdsc,
698 697
699 /* send connect message */ 698 /* send connect message */
700 msg = create_session_msg(CEPH_SESSION_REQUEST_OPEN, session->s_seq); 699 msg = create_session_msg(CEPH_SESSION_REQUEST_OPEN, session->s_seq);
701 if (IS_ERR(msg)) { 700 if (!msg)
702 err = PTR_ERR(msg); 701 return -ENOMEM;
703 goto out;
704 }
705 ceph_con_send(&session->s_con, msg); 702 ceph_con_send(&session->s_con, msg);
706
707out:
708 return 0; 703 return 0;
709} 704}
710 705
@@ -883,8 +878,8 @@ static int send_renew_caps(struct ceph_mds_client *mdsc,
883 ceph_mds_state_name(state)); 878 ceph_mds_state_name(state));
884 msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS, 879 msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
885 ++session->s_renew_seq); 880 ++session->s_renew_seq);
886 if (IS_ERR(msg)) 881 if (!msg)
887 return PTR_ERR(msg); 882 return -ENOMEM;
888 ceph_con_send(&session->s_con, msg); 883 ceph_con_send(&session->s_con, msg);
889 return 0; 884 return 0;
890} 885}
@@ -931,17 +926,15 @@ static int request_close_session(struct ceph_mds_client *mdsc,
931 struct ceph_mds_session *session) 926 struct ceph_mds_session *session)
932{ 927{
933 struct ceph_msg *msg; 928 struct ceph_msg *msg;
934 int err = 0;
935 929
936 dout("request_close_session mds%d state %s seq %lld\n", 930 dout("request_close_session mds%d state %s seq %lld\n",
937 session->s_mds, session_state_name(session->s_state), 931 session->s_mds, session_state_name(session->s_state),
938 session->s_seq); 932 session->s_seq);
939 msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq); 933 msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
940 if (IS_ERR(msg)) 934 if (!msg)
941 err = PTR_ERR(msg); 935 return -ENOMEM;
942 else 936 ceph_con_send(&session->s_con, msg);
943 ceph_con_send(&session->s_con, msg); 937 return 0;
944 return err;
945} 938}
946 939
947/* 940/*
@@ -1426,8 +1419,10 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
1426 len += req->r_old_dentry->d_name.len; 1419 len += req->r_old_dentry->d_name.len;
1427 1420
1428 msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, 0, 0, NULL); 1421 msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, 0, 0, NULL);
1429 if (IS_ERR(msg)) 1422 if (!msg) {
1423 msg = ERR_PTR(-ENOMEM);
1430 goto out_free2; 1424 goto out_free2;
1425 }
1431 1426
1432 msg->hdr.tid = cpu_to_le64(req->r_tid); 1427 msg->hdr.tid = cpu_to_le64(req->r_tid);
1433 1428
@@ -1518,7 +1513,7 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc,
1518 if (IS_ERR(msg)) { 1513 if (IS_ERR(msg)) {
1519 req->r_err = PTR_ERR(msg); 1514 req->r_err = PTR_ERR(msg);
1520 complete_request(mdsc, req); 1515 complete_request(mdsc, req);
1521 return -PTR_ERR(msg); 1516 return PTR_ERR(msg);
1522 } 1517 }
1523 req->r_request = msg; 1518 req->r_request = msg;
1524 1519
@@ -2158,11 +2153,10 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, int mds)
2158 goto fail_nopagelist; 2153 goto fail_nopagelist;
2159 ceph_pagelist_init(pagelist); 2154 ceph_pagelist_init(pagelist);
2160 2155
2156 err = -ENOMEM;
2161 reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, 0, 0, NULL); 2157 reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, 0, 0, NULL);
2162 if (IS_ERR(reply)) { 2158 if (!reply)
2163 err = PTR_ERR(reply);
2164 goto fail_nomsg; 2159 goto fail_nomsg;
2165 }
2166 2160
2167 /* find session */ 2161 /* find session */
2168 session = __ceph_lookup_mds_session(mdsc, mds); 2162 session = __ceph_lookup_mds_session(mdsc, mds);
@@ -2469,7 +2463,7 @@ void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
2469 len += dnamelen; 2463 len += dnamelen;
2470 2464
2471 msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, 0, 0, NULL); 2465 msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, 0, 0, NULL);
2472 if (IS_ERR(msg)) 2466 if (!msg)
2473 return; 2467 return;
2474 lease = msg->front.iov_base; 2468 lease = msg->front.iov_base;
2475 lease->action = action; 2469 lease->action = action;
diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c
index af3b143fbf02..fe7d0c52ae3c 100644
--- a/fs/ceph/messenger.c
+++ b/fs/ceph/messenger.c
@@ -1402,19 +1402,17 @@ static int read_partial_message(struct ceph_connection *con)
1402 con->in_msg = ceph_alloc_msg(con, &con->in_hdr, &skip); 1402 con->in_msg = ceph_alloc_msg(con, &con->in_hdr, &skip);
1403 if (skip) { 1403 if (skip) {
1404 /* skip this message */ 1404 /* skip this message */
1405 dout("alloc_msg returned NULL, skipping message\n"); 1405 dout("alloc_msg said skip message\n");
1406 con->in_base_pos = -front_len - middle_len - data_len - 1406 con->in_base_pos = -front_len - middle_len - data_len -
1407 sizeof(m->footer); 1407 sizeof(m->footer);
1408 con->in_tag = CEPH_MSGR_TAG_READY; 1408 con->in_tag = CEPH_MSGR_TAG_READY;
1409 con->in_seq++; 1409 con->in_seq++;
1410 return 0; 1410 return 0;
1411 } 1411 }
1412 if (IS_ERR(con->in_msg)) { 1412 if (!con->in_msg) {
1413 ret = PTR_ERR(con->in_msg);
1414 con->in_msg = NULL;
1415 con->error_msg = 1413 con->error_msg =
1416 "error allocating memory for incoming message"; 1414 "error allocating memory for incoming message";
1417 return ret; 1415 return -ENOMEM;
1418 } 1416 }
1419 m = con->in_msg; 1417 m = con->in_msg;
1420 m->front.iov_len = 0; /* haven't read it yet */ 1418 m->front.iov_len = 0; /* haven't read it yet */
@@ -2147,7 +2145,7 @@ out2:
2147 ceph_msg_put(m); 2145 ceph_msg_put(m);
2148out: 2146out:
2149 pr_err("msg_new can't create type %d len %d\n", type, front_len); 2147 pr_err("msg_new can't create type %d len %d\n", type, front_len);
2150 return ERR_PTR(-ENOMEM); 2148 return NULL;
2151} 2149}
2152 2150
2153/* 2151/*
@@ -2190,10 +2188,7 @@ static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
2190 mutex_unlock(&con->mutex); 2188 mutex_unlock(&con->mutex);
2191 msg = con->ops->alloc_msg(con, hdr, skip); 2189 msg = con->ops->alloc_msg(con, hdr, skip);
2192 mutex_lock(&con->mutex); 2190 mutex_lock(&con->mutex);
2193 if (IS_ERR(msg)) 2191 if (!msg || *skip)
2194 return msg;
2195
2196 if (*skip)
2197 return NULL; 2192 return NULL;
2198 } 2193 }
2199 if (!msg) { 2194 if (!msg) {
@@ -2202,17 +2197,16 @@ static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
2202 if (!msg) { 2197 if (!msg) {
2203 pr_err("unable to allocate msg type %d len %d\n", 2198 pr_err("unable to allocate msg type %d len %d\n",
2204 type, front_len); 2199 type, front_len);
2205 return ERR_PTR(-ENOMEM); 2200 return NULL;
2206 } 2201 }
2207 } 2202 }
2208 memcpy(&msg->hdr, &con->in_hdr, sizeof(con->in_hdr)); 2203 memcpy(&msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
2209 2204
2210 if (middle_len) { 2205 if (middle_len) {
2211 ret = ceph_alloc_middle(con, msg); 2206 ret = ceph_alloc_middle(con, msg);
2212
2213 if (ret < 0) { 2207 if (ret < 0) {
2214 ceph_msg_put(msg); 2208 ceph_msg_put(msg);
2215 return msg; 2209 return NULL;
2216 } 2210 }
2217 } 2211 }
2218 2212
diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c
index 5bee9250bf2a..35f593e7e364 100644
--- a/fs/ceph/mon_client.c
+++ b/fs/ceph/mon_client.c
@@ -490,16 +490,13 @@ int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf)
490 req->buf = buf; 490 req->buf = buf;
491 init_completion(&req->completion); 491 init_completion(&req->completion);
492 492
493 err = -ENOMEM;
493 req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), 0, 0, NULL); 494 req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), 0, 0, NULL);
494 if (IS_ERR(req->request)) { 495 if (!req->request)
495 err = PTR_ERR(req->request);
496 goto out; 496 goto out;
497 }
498 req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 1024, 0, 0, NULL); 497 req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 1024, 0, 0, NULL);
499 if (IS_ERR(req->reply)) { 498 if (!req->reply)
500 err = PTR_ERR(req->reply);
501 goto out; 499 goto out;
502 }
503 500
504 /* fill out request */ 501 /* fill out request */
505 h = req->request->front.iov_base; 502 h = req->request->front.iov_base;
@@ -634,30 +631,22 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
634 CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS; 631 CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS;
635 632
636 /* msg pools */ 633 /* msg pools */
634 err = -ENOMEM;
637 monc->m_subscribe_ack = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE_ACK, 635 monc->m_subscribe_ack = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE_ACK,
638 sizeof(struct ceph_mon_subscribe_ack), 636 sizeof(struct ceph_mon_subscribe_ack),
639 0, 0, NULL); 637 0, 0, NULL);
640 if (IS_ERR(monc->m_subscribe_ack)) { 638 if (!monc->m_subscribe_ack)
641 err = PTR_ERR(monc->m_subscribe_ack);
642 monc->m_subscribe_ack = NULL;
643 goto out_monmap; 639 goto out_monmap;
644 }
645 640
646 monc->m_auth_reply = ceph_msg_new(CEPH_MSG_AUTH_REPLY, 4096, 0, 0, 641 monc->m_auth_reply = ceph_msg_new(CEPH_MSG_AUTH_REPLY, 4096, 0, 0,
647 NULL); 642 NULL);
648 if (IS_ERR(monc->m_auth_reply)) { 643 if (!monc->m_auth_reply)
649 err = PTR_ERR(monc->m_auth_reply);
650 monc->m_auth_reply = NULL;
651 goto out_subscribe_ack; 644 goto out_subscribe_ack;
652 }
653 645
654 monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, 0, 0, NULL); 646 monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, 0, 0, NULL);
655 monc->pending_auth = 0; 647 monc->pending_auth = 0;
656 if (IS_ERR(monc->m_auth)) { 648 if (!monc->m_auth)
657 err = PTR_ERR(monc->m_auth);
658 monc->m_auth = NULL;
659 goto out_auth_reply; 649 goto out_auth_reply;
660 }
661 650
662 monc->cur_mon = -1; 651 monc->cur_mon = -1;
663 monc->hunting = true; 652 monc->hunting = true;
diff --git a/fs/ceph/msgpool.c b/fs/ceph/msgpool.c
index ca032223e87b..04fea84890da 100644
--- a/fs/ceph/msgpool.c
+++ b/fs/ceph/msgpool.c
@@ -10,12 +10,8 @@
10static void *alloc_fn(gfp_t gfp_mask, void *arg) 10static void *alloc_fn(gfp_t gfp_mask, void *arg)
11{ 11{
12 struct ceph_msgpool *pool = arg; 12 struct ceph_msgpool *pool = arg;
13 struct ceph_msg *m;
14 13
15 m = ceph_msg_new(0, pool->front_len, 0, 0, NULL); 14 return ceph_msg_new(0, pool->front_len, 0, 0, NULL);
16 if (IS_ERR(m))
17 return NULL;
18 return m;
19} 15}
20 16
21static void free_fn(void *element, void *arg) 17static void free_fn(void *element, void *arg)
@@ -42,17 +38,12 @@ struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
42 int front_len) 38 int front_len)
43{ 39{
44 if (front_len > pool->front_len) { 40 if (front_len > pool->front_len) {
45 struct ceph_msg *msg;
46
47 pr_err("msgpool_get pool %p need front %d, pool size is %d\n", 41 pr_err("msgpool_get pool %p need front %d, pool size is %d\n",
48 pool, front_len, pool->front_len); 42 pool, front_len, pool->front_len);
49 WARN_ON(1); 43 WARN_ON(1);
50 44
51 /* try to alloc a fresh message */ 45 /* try to alloc a fresh message */
52 msg = ceph_msg_new(0, front_len, 0, 0, NULL); 46 return ceph_msg_new(0, front_len, 0, 0, NULL);
53 if (!IS_ERR(msg))
54 return msg;
55 return NULL;
56 } 47 }
57 48
58 return mempool_alloc(pool->pool, GFP_NOFS); 49 return mempool_alloc(pool->pool, GFP_NOFS);
diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c
index 3d2bfbc232dc..a51d0df2af30 100644
--- a/fs/ceph/osd_client.c
+++ b/fs/ceph/osd_client.c
@@ -147,7 +147,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
147 req = kzalloc(sizeof(*req), GFP_NOFS); 147 req = kzalloc(sizeof(*req), GFP_NOFS);
148 } 148 }
149 if (req == NULL) 149 if (req == NULL)
150 return ERR_PTR(-ENOMEM); 150 return NULL;
151 151
152 req->r_osdc = osdc; 152 req->r_osdc = osdc;
153 req->r_mempool = use_mempool; 153 req->r_mempool = use_mempool;
@@ -165,9 +165,9 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
165 else 165 else
166 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, 166 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
167 OSD_OPREPLY_FRONT_LEN, 0, 0, NULL); 167 OSD_OPREPLY_FRONT_LEN, 0, 0, NULL);
168 if (IS_ERR(msg)) { 168 if (!msg) {
169 ceph_osdc_put_request(req); 169 ceph_osdc_put_request(req);
170 return ERR_PTR(PTR_ERR(msg)); 170 return NULL;
171 } 171 }
172 req->r_reply = msg; 172 req->r_reply = msg;
173 173
@@ -179,9 +179,9 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
179 msg = ceph_msgpool_get(&osdc->msgpool_op, 0); 179 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
180 else 180 else
181 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, 0, 0, NULL); 181 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, 0, 0, NULL);
182 if (IS_ERR(msg)) { 182 if (!msg) {
183 ceph_osdc_put_request(req); 183 ceph_osdc_put_request(req);
184 return ERR_PTR(PTR_ERR(msg)); 184 return NULL;
185 } 185 }
186 msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP); 186 msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP);
187 memset(msg->front.iov_base, 0, msg->front.iov_len); 187 memset(msg->front.iov_base, 0, msg->front.iov_len);
@@ -1263,8 +1263,8 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc,
1263 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, 1263 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
1264 NULL, 0, truncate_seq, truncate_size, NULL, 1264 NULL, 0, truncate_seq, truncate_size, NULL,
1265 false, 1); 1265 false, 1);
1266 if (IS_ERR(req)) 1266 if (!req)
1267 return PTR_ERR(req); 1267 return -ENOMEM;
1268 1268
1269 /* it may be a short read due to an object boundary */ 1269 /* it may be a short read due to an object boundary */
1270 req->r_pages = pages; 1270 req->r_pages = pages;
@@ -1306,8 +1306,8 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
1306 snapc, do_sync, 1306 snapc, do_sync,
1307 truncate_seq, truncate_size, mtime, 1307 truncate_seq, truncate_size, mtime,
1308 nofail, 1); 1308 nofail, 1);
1309 if (IS_ERR(req)) 1309 if (!req)
1310 return PTR_ERR(req); 1310 return -ENOMEM;
1311 1311
1312 /* it may be a short write due to an object boundary */ 1312 /* it may be a short write due to an object boundary */
1313 req->r_pages = pages; 1313 req->r_pages = pages;
@@ -1393,7 +1393,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
1393 pr_warning("get_reply front %d > preallocated %d\n", 1393 pr_warning("get_reply front %d > preallocated %d\n",
1394 front, (int)req->r_reply->front.iov_len); 1394 front, (int)req->r_reply->front.iov_len);
1395 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, 0, 0, NULL); 1395 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, 0, 0, NULL);
1396 if (IS_ERR(m)) 1396 if (!m)
1397 goto out; 1397 goto out;
1398 ceph_msg_put(req->r_reply); 1398 ceph_msg_put(req->r_reply);
1399 req->r_reply = m; 1399 req->r_reply = m;
@@ -1409,7 +1409,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
1409 tid, want, m->nr_pages); 1409 tid, want, m->nr_pages);
1410 *skip = 1; 1410 *skip = 1;
1411 ceph_msg_put(m); 1411 ceph_msg_put(m);
1412 m = ERR_PTR(-EIO); 1412 m = NULL;
1413 goto out; 1413 goto out;
1414 } 1414 }
1415 m->pages = req->r_pages; 1415 m->pages = req->r_pages;