aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2008-03-19 17:59:44 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2008-03-19 17:59:44 -0400
commitc7c350e92aab1bba68f26a6027b734adcf9824ba (patch)
treeaa99bd94c3049dd871d9c030d70a5f3d87591a95 /fs
parent2f42b5d043ee271d1e5d30ecd77186b6c4d4e534 (diff)
parentf8512ad0da16cbe156f3a7627971cdf0b39c4138 (diff)
Merge branch 'hotfixes' into devel
Diffstat (limited to 'fs')
-rw-r--r--fs/bio.c2
-rw-r--r--fs/hfs/brec.c18
-rw-r--r--fs/nfs/read.c5
-rw-r--r--fs/nfs/write.c8
-rw-r--r--fs/nfsd/nfsfh.c4
-rw-r--r--fs/ocfs2/cluster/tcp.c9
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h21
-rw-r--r--fs/ocfs2/dlm/dlmconvert.c2
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c103
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c18
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c57
-rw-r--r--fs/ocfs2/dlm/dlmthread.c6
-rw-r--r--fs/ocfs2/dlmglue.c2
-rw-r--r--fs/ocfs2/resize.c2
-rw-r--r--fs/proc/base.c25
-rw-r--r--fs/proc/generic.c26
-rw-r--r--fs/proc/internal.h7
-rw-r--r--fs/proc/proc_net.c117
-rw-r--r--fs/proc/task_mmu.c18
19 files changed, 332 insertions, 118 deletions
diff --git a/fs/bio.c b/fs/bio.c
index 3312fcc3c098..553b5b7960ad 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1194,6 +1194,8 @@ EXPORT_SYMBOL(bio_hw_segments);
1194EXPORT_SYMBOL(bio_add_page); 1194EXPORT_SYMBOL(bio_add_page);
1195EXPORT_SYMBOL(bio_add_pc_page); 1195EXPORT_SYMBOL(bio_add_pc_page);
1196EXPORT_SYMBOL(bio_get_nr_vecs); 1196EXPORT_SYMBOL(bio_get_nr_vecs);
1197EXPORT_SYMBOL(bio_map_user);
1198EXPORT_SYMBOL(bio_unmap_user);
1197EXPORT_SYMBOL(bio_map_kern); 1199EXPORT_SYMBOL(bio_map_kern);
1198EXPORT_SYMBOL(bio_pair_release); 1200EXPORT_SYMBOL(bio_pair_release);
1199EXPORT_SYMBOL(bio_split); 1201EXPORT_SYMBOL(bio_split);
diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
index 878bf25dbc6a..92fb358ce824 100644
--- a/fs/hfs/brec.c
+++ b/fs/hfs/brec.c
@@ -229,7 +229,7 @@ skip:
229static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd) 229static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd)
230{ 230{
231 struct hfs_btree *tree; 231 struct hfs_btree *tree;
232 struct hfs_bnode *node, *new_node; 232 struct hfs_bnode *node, *new_node, *next_node;
233 struct hfs_bnode_desc node_desc; 233 struct hfs_bnode_desc node_desc;
234 int num_recs, new_rec_off, new_off, old_rec_off; 234 int num_recs, new_rec_off, new_off, old_rec_off;
235 int data_start, data_end, size; 235 int data_start, data_end, size;
@@ -248,6 +248,17 @@ static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd)
248 new_node->type = node->type; 248 new_node->type = node->type;
249 new_node->height = node->height; 249 new_node->height = node->height;
250 250
251 if (node->next)
252 next_node = hfs_bnode_find(tree, node->next);
253 else
254 next_node = NULL;
255
256 if (IS_ERR(next_node)) {
257 hfs_bnode_put(node);
258 hfs_bnode_put(new_node);
259 return next_node;
260 }
261
251 size = tree->node_size / 2 - node->num_recs * 2 - 14; 262 size = tree->node_size / 2 - node->num_recs * 2 - 14;
252 old_rec_off = tree->node_size - 4; 263 old_rec_off = tree->node_size - 4;
253 num_recs = 1; 264 num_recs = 1;
@@ -261,6 +272,8 @@ static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd)
261 /* panic? */ 272 /* panic? */
262 hfs_bnode_put(node); 273 hfs_bnode_put(node);
263 hfs_bnode_put(new_node); 274 hfs_bnode_put(new_node);
275 if (next_node)
276 hfs_bnode_put(next_node);
264 return ERR_PTR(-ENOSPC); 277 return ERR_PTR(-ENOSPC);
265 } 278 }
266 279
@@ -315,8 +328,7 @@ static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd)
315 hfs_bnode_write(node, &node_desc, 0, sizeof(node_desc)); 328 hfs_bnode_write(node, &node_desc, 0, sizeof(node_desc));
316 329
317 /* update next bnode header */ 330 /* update next bnode header */
318 if (new_node->next) { 331 if (next_node) {
319 struct hfs_bnode *next_node = hfs_bnode_find(tree, new_node->next);
320 next_node->prev = new_node->this; 332 next_node->prev = new_node->this;
321 hfs_bnode_read(next_node, &node_desc, 0, sizeof(node_desc)); 333 hfs_bnode_read(next_node, &node_desc, 0, sizeof(node_desc));
322 node_desc.prev = cpu_to_be32(next_node->prev); 334 node_desc.prev = cpu_to_be32(next_node->prev);
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index be9e8270f4d7..ab2f7d233e01 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -531,7 +531,10 @@ readpage_async_filler(void *data, struct page *page)
531 531
532 if (len < PAGE_CACHE_SIZE) 532 if (len < PAGE_CACHE_SIZE)
533 zero_user_segment(page, len, PAGE_CACHE_SIZE); 533 zero_user_segment(page, len, PAGE_CACHE_SIZE);
534 nfs_pageio_add_request(desc->pgio, new); 534 if (!nfs_pageio_add_request(desc->pgio, new)) {
535 error = desc->pgio->pg_error;
536 goto out_unlock;
537 }
535 return 0; 538 return 0;
536out_error: 539out_error:
537 error = PTR_ERR(new); 540 error = PTR_ERR(new);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 1667e3984418..4cb88df12f83 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -39,6 +39,7 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context*,
39 unsigned int, unsigned int); 39 unsigned int, unsigned int);
40static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc, 40static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,
41 struct inode *inode, int ioflags); 41 struct inode *inode, int ioflags);
42static void nfs_redirty_request(struct nfs_page *req);
42static const struct rpc_call_ops nfs_write_partial_ops; 43static const struct rpc_call_ops nfs_write_partial_ops;
43static const struct rpc_call_ops nfs_write_full_ops; 44static const struct rpc_call_ops nfs_write_full_ops;
44static const struct rpc_call_ops nfs_commit_ops; 45static const struct rpc_call_ops nfs_commit_ops;
@@ -279,7 +280,12 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
279 BUG(); 280 BUG();
280 } 281 }
281 spin_unlock(&inode->i_lock); 282 spin_unlock(&inode->i_lock);
282 nfs_pageio_add_request(pgio, req); 283 if (!nfs_pageio_add_request(pgio, req)) {
284 nfs_redirty_request(req);
285 nfs_end_page_writeback(page);
286 nfs_clear_page_tag_locked(req);
287 return pgio->pg_error;
288 }
283 return 0; 289 return 0;
284} 290}
285 291
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 1eb771d79cca..3e6b3f41ee1f 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -232,6 +232,7 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
232 fhp->fh_dentry = dentry; 232 fhp->fh_dentry = dentry;
233 fhp->fh_export = exp; 233 fhp->fh_export = exp;
234 nfsd_nr_verified++; 234 nfsd_nr_verified++;
235 cache_get(&exp->h);
235 } else { 236 } else {
236 /* 237 /*
237 * just rechecking permissions 238 * just rechecking permissions
@@ -241,6 +242,7 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
241 dprintk("nfsd: fh_verify - just checking\n"); 242 dprintk("nfsd: fh_verify - just checking\n");
242 dentry = fhp->fh_dentry; 243 dentry = fhp->fh_dentry;
243 exp = fhp->fh_export; 244 exp = fhp->fh_export;
245 cache_get(&exp->h);
244 /* 246 /*
245 * Set user creds for this exportpoint; necessary even 247 * Set user creds for this exportpoint; necessary even
246 * in the "just checking" case because this may be a 248 * in the "just checking" case because this may be a
@@ -252,8 +254,6 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
252 if (error) 254 if (error)
253 goto out; 255 goto out;
254 } 256 }
255 cache_get(&exp->h);
256
257 257
258 error = nfsd_mode_check(rqstp, dentry->d_inode->i_mode, type); 258 error = nfsd_mode_check(rqstp, dentry->d_inode->i_mode, type);
259 if (error) 259 if (error)
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index ee50c9610e7f..b8057c51b205 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -451,9 +451,9 @@ static void o2net_set_nn_state(struct o2net_node *nn,
451 /* delay if we're withing a RECONNECT_DELAY of the 451 /* delay if we're withing a RECONNECT_DELAY of the
452 * last attempt */ 452 * last attempt */
453 delay = (nn->nn_last_connect_attempt + 453 delay = (nn->nn_last_connect_attempt +
454 msecs_to_jiffies(o2net_reconnect_delay(sc->sc_node))) 454 msecs_to_jiffies(o2net_reconnect_delay(NULL)))
455 - jiffies; 455 - jiffies;
456 if (delay > msecs_to_jiffies(o2net_reconnect_delay(sc->sc_node))) 456 if (delay > msecs_to_jiffies(o2net_reconnect_delay(NULL)))
457 delay = 0; 457 delay = 0;
458 mlog(ML_CONN, "queueing conn attempt in %lu jiffies\n", delay); 458 mlog(ML_CONN, "queueing conn attempt in %lu jiffies\n", delay);
459 queue_delayed_work(o2net_wq, &nn->nn_connect_work, delay); 459 queue_delayed_work(o2net_wq, &nn->nn_connect_work, delay);
@@ -1552,12 +1552,11 @@ static void o2net_connect_expired(struct work_struct *work)
1552 1552
1553 spin_lock(&nn->nn_lock); 1553 spin_lock(&nn->nn_lock);
1554 if (!nn->nn_sc_valid) { 1554 if (!nn->nn_sc_valid) {
1555 struct o2nm_node *node = nn->nn_sc->sc_node;
1556 mlog(ML_ERROR, "no connection established with node %u after " 1555 mlog(ML_ERROR, "no connection established with node %u after "
1557 "%u.%u seconds, giving up and returning errors.\n", 1556 "%u.%u seconds, giving up and returning errors.\n",
1558 o2net_num_from_nn(nn), 1557 o2net_num_from_nn(nn),
1559 o2net_idle_timeout(node) / 1000, 1558 o2net_idle_timeout(NULL) / 1000,
1560 o2net_idle_timeout(node) % 1000); 1559 o2net_idle_timeout(NULL) % 1000);
1561 1560
1562 o2net_set_nn_state(nn, NULL, 0, -ENOTCONN); 1561 o2net_set_nn_state(nn, NULL, 0, -ENOTCONN);
1563 } 1562 }
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index 9843ee17ea27..dc8ea666efdb 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -176,6 +176,7 @@ struct dlm_mig_lockres_priv
176{ 176{
177 struct dlm_lock_resource *lockres; 177 struct dlm_lock_resource *lockres;
178 u8 real_master; 178 u8 real_master;
179 u8 extra_ref;
179}; 180};
180 181
181struct dlm_assert_master_priv 182struct dlm_assert_master_priv
@@ -602,17 +603,19 @@ enum dlm_query_join_response_code {
602 JOIN_PROTOCOL_MISMATCH, 603 JOIN_PROTOCOL_MISMATCH,
603}; 604};
604 605
606struct dlm_query_join_packet {
607 u8 code; /* Response code. dlm_minor and fs_minor
608 are only valid if this is JOIN_OK */
609 u8 dlm_minor; /* The minor version of the protocol the
610 dlm is speaking. */
611 u8 fs_minor; /* The minor version of the protocol the
612 filesystem is speaking. */
613 u8 reserved;
614};
615
605union dlm_query_join_response { 616union dlm_query_join_response {
606 u32 intval; 617 u32 intval;
607 struct { 618 struct dlm_query_join_packet packet;
608 u8 code; /* Response code. dlm_minor and fs_minor
609 are only valid if this is JOIN_OK */
610 u8 dlm_minor; /* The minor version of the protocol the
611 dlm is speaking. */
612 u8 fs_minor; /* The minor version of the protocol the
613 filesystem is speaking. */
614 u8 reserved;
615 } packet;
616}; 619};
617 620
618struct dlm_lock_request 621struct dlm_lock_request
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
index ecb4d997221e..75997b4deaf3 100644
--- a/fs/ocfs2/dlm/dlmconvert.c
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -487,7 +487,7 @@ int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data,
487 "cookie=%u:%llu\n", 487 "cookie=%u:%llu\n",
488 dlm_get_lock_cookie_node(be64_to_cpu(cnv->cookie)), 488 dlm_get_lock_cookie_node(be64_to_cpu(cnv->cookie)),
489 dlm_get_lock_cookie_seq(be64_to_cpu(cnv->cookie))); 489 dlm_get_lock_cookie_seq(be64_to_cpu(cnv->cookie)));
490 __dlm_print_one_lock_resource(res); 490 dlm_print_one_lock_resource(res);
491 goto leave; 491 goto leave;
492 } 492 }
493 493
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 638d2ebb892b..0879d86113e3 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -713,14 +713,46 @@ static int dlm_query_join_proto_check(char *proto_type, int node,
713 return rc; 713 return rc;
714} 714}
715 715
716/*
717 * struct dlm_query_join_packet is made up of four one-byte fields. They
718 * are effectively in big-endian order already. However, little-endian
719 * machines swap them before putting the packet on the wire (because
720 * query_join's response is a status, and that status is treated as a u32
721 * on the wire). Thus, a big-endian and little-endian machines will treat
722 * this structure differently.
723 *
724 * The solution is to have little-endian machines swap the structure when
725 * converting from the structure to the u32 representation. This will
726 * result in the structure having the correct format on the wire no matter
727 * the host endian format.
728 */
729static void dlm_query_join_packet_to_wire(struct dlm_query_join_packet *packet,
730 u32 *wire)
731{
732 union dlm_query_join_response response;
733
734 response.packet = *packet;
735 *wire = cpu_to_be32(response.intval);
736}
737
738static void dlm_query_join_wire_to_packet(u32 wire,
739 struct dlm_query_join_packet *packet)
740{
741 union dlm_query_join_response response;
742
743 response.intval = cpu_to_be32(wire);
744 *packet = response.packet;
745}
746
716static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data, 747static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
717 void **ret_data) 748 void **ret_data)
718{ 749{
719 struct dlm_query_join_request *query; 750 struct dlm_query_join_request *query;
720 union dlm_query_join_response response = { 751 struct dlm_query_join_packet packet = {
721 .packet.code = JOIN_DISALLOW, 752 .code = JOIN_DISALLOW,
722 }; 753 };
723 struct dlm_ctxt *dlm = NULL; 754 struct dlm_ctxt *dlm = NULL;
755 u32 response;
724 u8 nodenum; 756 u8 nodenum;
725 757
726 query = (struct dlm_query_join_request *) msg->buf; 758 query = (struct dlm_query_join_request *) msg->buf;
@@ -737,11 +769,11 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
737 mlog(0, "node %u is not in our live map yet\n", 769 mlog(0, "node %u is not in our live map yet\n",
738 query->node_idx); 770 query->node_idx);
739 771
740 response.packet.code = JOIN_DISALLOW; 772 packet.code = JOIN_DISALLOW;
741 goto respond; 773 goto respond;
742 } 774 }
743 775
744 response.packet.code = JOIN_OK_NO_MAP; 776 packet.code = JOIN_OK_NO_MAP;
745 777
746 spin_lock(&dlm_domain_lock); 778 spin_lock(&dlm_domain_lock);
747 dlm = __dlm_lookup_domain_full(query->domain, query->name_len); 779 dlm = __dlm_lookup_domain_full(query->domain, query->name_len);
@@ -760,7 +792,7 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
760 mlog(0, "disallow join as node %u does not " 792 mlog(0, "disallow join as node %u does not "
761 "have node %u in its nodemap\n", 793 "have node %u in its nodemap\n",
762 query->node_idx, nodenum); 794 query->node_idx, nodenum);
763 response.packet.code = JOIN_DISALLOW; 795 packet.code = JOIN_DISALLOW;
764 goto unlock_respond; 796 goto unlock_respond;
765 } 797 }
766 } 798 }
@@ -780,23 +812,23 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
780 /*If this is a brand new context and we 812 /*If this is a brand new context and we
781 * haven't started our join process yet, then 813 * haven't started our join process yet, then
782 * the other node won the race. */ 814 * the other node won the race. */
783 response.packet.code = JOIN_OK_NO_MAP; 815 packet.code = JOIN_OK_NO_MAP;
784 } else if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) { 816 } else if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) {
785 /* Disallow parallel joins. */ 817 /* Disallow parallel joins. */
786 response.packet.code = JOIN_DISALLOW; 818 packet.code = JOIN_DISALLOW;
787 } else if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) { 819 } else if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) {
788 mlog(0, "node %u trying to join, but recovery " 820 mlog(0, "node %u trying to join, but recovery "
789 "is ongoing.\n", bit); 821 "is ongoing.\n", bit);
790 response.packet.code = JOIN_DISALLOW; 822 packet.code = JOIN_DISALLOW;
791 } else if (test_bit(bit, dlm->recovery_map)) { 823 } else if (test_bit(bit, dlm->recovery_map)) {
792 mlog(0, "node %u trying to join, but it " 824 mlog(0, "node %u trying to join, but it "
793 "still needs recovery.\n", bit); 825 "still needs recovery.\n", bit);
794 response.packet.code = JOIN_DISALLOW; 826 packet.code = JOIN_DISALLOW;
795 } else if (test_bit(bit, dlm->domain_map)) { 827 } else if (test_bit(bit, dlm->domain_map)) {
796 mlog(0, "node %u trying to join, but it " 828 mlog(0, "node %u trying to join, but it "
797 "is still in the domain! needs recovery?\n", 829 "is still in the domain! needs recovery?\n",
798 bit); 830 bit);
799 response.packet.code = JOIN_DISALLOW; 831 packet.code = JOIN_DISALLOW;
800 } else { 832 } else {
801 /* Alright we're fully a part of this domain 833 /* Alright we're fully a part of this domain
802 * so we keep some state as to who's joining 834 * so we keep some state as to who's joining
@@ -807,19 +839,15 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
807 if (dlm_query_join_proto_check("DLM", bit, 839 if (dlm_query_join_proto_check("DLM", bit,
808 &dlm->dlm_locking_proto, 840 &dlm->dlm_locking_proto,
809 &query->dlm_proto)) { 841 &query->dlm_proto)) {
810 response.packet.code = 842 packet.code = JOIN_PROTOCOL_MISMATCH;
811 JOIN_PROTOCOL_MISMATCH;
812 } else if (dlm_query_join_proto_check("fs", bit, 843 } else if (dlm_query_join_proto_check("fs", bit,
813 &dlm->fs_locking_proto, 844 &dlm->fs_locking_proto,
814 &query->fs_proto)) { 845 &query->fs_proto)) {
815 response.packet.code = 846 packet.code = JOIN_PROTOCOL_MISMATCH;
816 JOIN_PROTOCOL_MISMATCH;
817 } else { 847 } else {
818 response.packet.dlm_minor = 848 packet.dlm_minor = query->dlm_proto.pv_minor;
819 query->dlm_proto.pv_minor; 849 packet.fs_minor = query->fs_proto.pv_minor;
820 response.packet.fs_minor = 850 packet.code = JOIN_OK;
821 query->fs_proto.pv_minor;
822 response.packet.code = JOIN_OK;
823 __dlm_set_joining_node(dlm, query->node_idx); 851 __dlm_set_joining_node(dlm, query->node_idx);
824 } 852 }
825 } 853 }
@@ -830,9 +858,10 @@ unlock_respond:
830 spin_unlock(&dlm_domain_lock); 858 spin_unlock(&dlm_domain_lock);
831 859
832respond: 860respond:
833 mlog(0, "We respond with %u\n", response.packet.code); 861 mlog(0, "We respond with %u\n", packet.code);
834 862
835 return response.intval; 863 dlm_query_join_packet_to_wire(&packet, &response);
864 return response;
836} 865}
837 866
838static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data, 867static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
@@ -937,7 +966,7 @@ static int dlm_send_join_cancels(struct dlm_ctxt *dlm,
937 sizeof(unsigned long))) { 966 sizeof(unsigned long))) {
938 mlog(ML_ERROR, 967 mlog(ML_ERROR,
939 "map_size %u != BITS_TO_LONGS(O2NM_MAX_NODES) %u\n", 968 "map_size %u != BITS_TO_LONGS(O2NM_MAX_NODES) %u\n",
940 map_size, BITS_TO_LONGS(O2NM_MAX_NODES)); 969 map_size, (unsigned)BITS_TO_LONGS(O2NM_MAX_NODES));
941 return -EINVAL; 970 return -EINVAL;
942 } 971 }
943 972
@@ -968,7 +997,8 @@ static int dlm_request_join(struct dlm_ctxt *dlm,
968{ 997{
969 int status; 998 int status;
970 struct dlm_query_join_request join_msg; 999 struct dlm_query_join_request join_msg;
971 union dlm_query_join_response join_resp; 1000 struct dlm_query_join_packet packet;
1001 u32 join_resp;
972 1002
973 mlog(0, "querying node %d\n", node); 1003 mlog(0, "querying node %d\n", node);
974 1004
@@ -984,11 +1014,12 @@ static int dlm_request_join(struct dlm_ctxt *dlm,
984 1014
985 status = o2net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg, 1015 status = o2net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg,
986 sizeof(join_msg), node, 1016 sizeof(join_msg), node,
987 &join_resp.intval); 1017 &join_resp);
988 if (status < 0 && status != -ENOPROTOOPT) { 1018 if (status < 0 && status != -ENOPROTOOPT) {
989 mlog_errno(status); 1019 mlog_errno(status);
990 goto bail; 1020 goto bail;
991 } 1021 }
1022 dlm_query_join_wire_to_packet(join_resp, &packet);
992 1023
993 /* -ENOPROTOOPT from the net code means the other side isn't 1024 /* -ENOPROTOOPT from the net code means the other side isn't
994 listening for our message type -- that's fine, it means 1025 listening for our message type -- that's fine, it means
@@ -997,10 +1028,10 @@ static int dlm_request_join(struct dlm_ctxt *dlm,
997 if (status == -ENOPROTOOPT) { 1028 if (status == -ENOPROTOOPT) {
998 status = 0; 1029 status = 0;
999 *response = JOIN_OK_NO_MAP; 1030 *response = JOIN_OK_NO_MAP;
1000 } else if (join_resp.packet.code == JOIN_DISALLOW || 1031 } else if (packet.code == JOIN_DISALLOW ||
1001 join_resp.packet.code == JOIN_OK_NO_MAP) { 1032 packet.code == JOIN_OK_NO_MAP) {
1002 *response = join_resp.packet.code; 1033 *response = packet.code;
1003 } else if (join_resp.packet.code == JOIN_PROTOCOL_MISMATCH) { 1034 } else if (packet.code == JOIN_PROTOCOL_MISMATCH) {
1004 mlog(ML_NOTICE, 1035 mlog(ML_NOTICE,
1005 "This node requested DLM locking protocol %u.%u and " 1036 "This node requested DLM locking protocol %u.%u and "
1006 "filesystem locking protocol %u.%u. At least one of " 1037 "filesystem locking protocol %u.%u. At least one of "
@@ -1012,14 +1043,12 @@ static int dlm_request_join(struct dlm_ctxt *dlm,
1012 dlm->fs_locking_proto.pv_minor, 1043 dlm->fs_locking_proto.pv_minor,
1013 node); 1044 node);
1014 status = -EPROTO; 1045 status = -EPROTO;
1015 *response = join_resp.packet.code; 1046 *response = packet.code;
1016 } else if (join_resp.packet.code == JOIN_OK) { 1047 } else if (packet.code == JOIN_OK) {
1017 *response = join_resp.packet.code; 1048 *response = packet.code;
1018 /* Use the same locking protocol as the remote node */ 1049 /* Use the same locking protocol as the remote node */
1019 dlm->dlm_locking_proto.pv_minor = 1050 dlm->dlm_locking_proto.pv_minor = packet.dlm_minor;
1020 join_resp.packet.dlm_minor; 1051 dlm->fs_locking_proto.pv_minor = packet.fs_minor;
1021 dlm->fs_locking_proto.pv_minor =
1022 join_resp.packet.fs_minor;
1023 mlog(0, 1052 mlog(0,
1024 "Node %d responds JOIN_OK with DLM locking protocol " 1053 "Node %d responds JOIN_OK with DLM locking protocol "
1025 "%u.%u and fs locking protocol %u.%u\n", 1054 "%u.%u and fs locking protocol %u.%u\n",
@@ -1031,11 +1060,11 @@ static int dlm_request_join(struct dlm_ctxt *dlm,
1031 } else { 1060 } else {
1032 status = -EINVAL; 1061 status = -EINVAL;
1033 mlog(ML_ERROR, "invalid response %d from node %u\n", 1062 mlog(ML_ERROR, "invalid response %d from node %u\n",
1034 join_resp.packet.code, node); 1063 packet.code, node);
1035 } 1064 }
1036 1065
1037 mlog(0, "status %d, node %d response is %d\n", status, node, 1066 mlog(0, "status %d, node %d response is %d\n", status, node,
1038 *response); 1067 *response);
1039 1068
1040bail: 1069bail:
1041 return status; 1070 return status;
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index c92d1b19fc0b..ea6b89577860 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -1663,7 +1663,12 @@ way_up_top:
1663 dlm_put_mle(tmpmle); 1663 dlm_put_mle(tmpmle);
1664 } 1664 }
1665send_response: 1665send_response:
1666 1666 /*
1667 * __dlm_lookup_lockres() grabbed a reference to this lockres.
1668 * The reference is released by dlm_assert_master_worker() under
1669 * the call to dlm_dispatch_assert_master(). If
1670 * dlm_assert_master_worker() isn't called, we drop it here.
1671 */
1667 if (dispatch_assert) { 1672 if (dispatch_assert) {
1668 if (response != DLM_MASTER_RESP_YES) 1673 if (response != DLM_MASTER_RESP_YES)
1669 mlog(ML_ERROR, "invalid response %d\n", response); 1674 mlog(ML_ERROR, "invalid response %d\n", response);
@@ -1678,7 +1683,11 @@ send_response:
1678 if (ret < 0) { 1683 if (ret < 0) {
1679 mlog(ML_ERROR, "failed to dispatch assert master work\n"); 1684 mlog(ML_ERROR, "failed to dispatch assert master work\n");
1680 response = DLM_MASTER_RESP_ERROR; 1685 response = DLM_MASTER_RESP_ERROR;
1686 dlm_lockres_put(res);
1681 } 1687 }
1688 } else {
1689 if (res)
1690 dlm_lockres_put(res);
1682 } 1691 }
1683 1692
1684 dlm_put(dlm); 1693 dlm_put(dlm);
@@ -2348,7 +2357,7 @@ int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
2348 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref " 2357 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2349 "but it is already dropped!\n", dlm->name, 2358 "but it is already dropped!\n", dlm->name,
2350 res->lockname.len, res->lockname.name, node); 2359 res->lockname.len, res->lockname.name, node);
2351 __dlm_print_one_lock_resource(res); 2360 dlm_print_one_lock_resource(res);
2352 } 2361 }
2353 ret = 0; 2362 ret = 0;
2354 goto done; 2363 goto done;
@@ -2408,7 +2417,7 @@ static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
2408 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref " 2417 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2409 "but it is already dropped!\n", dlm->name, 2418 "but it is already dropped!\n", dlm->name,
2410 res->lockname.len, res->lockname.name, node); 2419 res->lockname.len, res->lockname.name, node);
2411 __dlm_print_one_lock_resource(res); 2420 dlm_print_one_lock_resource(res);
2412 } 2421 }
2413 2422
2414 dlm_lockres_put(res); 2423 dlm_lockres_put(res);
@@ -2933,6 +2942,9 @@ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2933 dlm_lockres_clear_refmap_bit(lock->ml.node, res); 2942 dlm_lockres_clear_refmap_bit(lock->ml.node, res);
2934 list_del_init(&lock->list); 2943 list_del_init(&lock->list);
2935 dlm_lock_put(lock); 2944 dlm_lock_put(lock);
2945 /* In a normal unlock, we would have added a
2946 * DLM_UNLOCK_FREE_LOCK action. Force it. */
2947 dlm_lock_put(lock);
2936 } 2948 }
2937 } 2949 }
2938 queue++; 2950 queue++;
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 91f747b8a538..bcb9260c3735 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -519,9 +519,9 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
519 return 0; 519 return 0;
520 520
521master_here: 521master_here:
522 mlog(0, "(%d) mastering recovery of %s:%u here(this=%u)!\n", 522 mlog(ML_NOTICE, "(%d) Node %u is the Recovery Master for the Dead Node "
523 task_pid_nr(dlm->dlm_reco_thread_task), 523 "%u for Domain %s\n", task_pid_nr(dlm->dlm_reco_thread_task),
524 dlm->name, dlm->reco.dead_node, dlm->node_num); 524 dlm->node_num, dlm->reco.dead_node, dlm->name);
525 525
526 status = dlm_remaster_locks(dlm, dlm->reco.dead_node); 526 status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
527 if (status < 0) { 527 if (status < 0) {
@@ -1191,7 +1191,7 @@ static int dlm_add_lock_to_array(struct dlm_lock *lock,
1191 (ml->type == LKM_EXMODE || 1191 (ml->type == LKM_EXMODE ||
1192 memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) { 1192 memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) {
1193 mlog(ML_ERROR, "mismatched lvbs!\n"); 1193 mlog(ML_ERROR, "mismatched lvbs!\n");
1194 __dlm_print_one_lock_resource(lock->lockres); 1194 dlm_print_one_lock_resource(lock->lockres);
1195 BUG(); 1195 BUG();
1196 } 1196 }
1197 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN); 1197 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
@@ -1327,6 +1327,7 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
1327 (struct dlm_migratable_lockres *)msg->buf; 1327 (struct dlm_migratable_lockres *)msg->buf;
1328 int ret = 0; 1328 int ret = 0;
1329 u8 real_master; 1329 u8 real_master;
1330 u8 extra_refs = 0;
1330 char *buf = NULL; 1331 char *buf = NULL;
1331 struct dlm_work_item *item = NULL; 1332 struct dlm_work_item *item = NULL;
1332 struct dlm_lock_resource *res = NULL; 1333 struct dlm_lock_resource *res = NULL;
@@ -1404,16 +1405,28 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
1404 __dlm_insert_lockres(dlm, res); 1405 __dlm_insert_lockres(dlm, res);
1405 spin_unlock(&dlm->spinlock); 1406 spin_unlock(&dlm->spinlock);
1406 1407
1408 /* Add an extra ref for this lock-less lockres lest the
1409 * dlm_thread purges it before we get the chance to add
1410 * locks to it */
1411 dlm_lockres_get(res);
1412
1413 /* There are three refs that need to be put.
1414 * 1. Taken above.
1415 * 2. kref_init in dlm_new_lockres()->dlm_init_lockres().
1416 * 3. dlm_lookup_lockres()
1417 * The first one is handled at the end of this function. The
1418 * other two are handled in the worker thread after locks have
1419 * been attached. Yes, we don't wait for purge time to match
1420 * kref_init. The lockres will still have atleast one ref
1421 * added because it is in the hash __dlm_insert_lockres() */
1422 extra_refs++;
1423
1407 /* now that the new lockres is inserted, 1424 /* now that the new lockres is inserted,
1408 * make it usable by other processes */ 1425 * make it usable by other processes */
1409 spin_lock(&res->spinlock); 1426 spin_lock(&res->spinlock);
1410 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 1427 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1411 spin_unlock(&res->spinlock); 1428 spin_unlock(&res->spinlock);
1412 wake_up(&res->wq); 1429 wake_up(&res->wq);
1413
1414 /* add an extra ref for just-allocated lockres
1415 * otherwise the lockres will be purged immediately */
1416 dlm_lockres_get(res);
1417 } 1430 }
1418 1431
1419 /* at this point we have allocated everything we need, 1432 /* at this point we have allocated everything we need,
@@ -1443,12 +1456,17 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
1443 dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf); 1456 dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
1444 item->u.ml.lockres = res; /* already have a ref */ 1457 item->u.ml.lockres = res; /* already have a ref */
1445 item->u.ml.real_master = real_master; 1458 item->u.ml.real_master = real_master;
1459 item->u.ml.extra_ref = extra_refs;
1446 spin_lock(&dlm->work_lock); 1460 spin_lock(&dlm->work_lock);
1447 list_add_tail(&item->list, &dlm->work_list); 1461 list_add_tail(&item->list, &dlm->work_list);
1448 spin_unlock(&dlm->work_lock); 1462 spin_unlock(&dlm->work_lock);
1449 queue_work(dlm->dlm_worker, &dlm->dispatched_work); 1463 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
1450 1464
1451leave: 1465leave:
1466 /* One extra ref taken needs to be put here */
1467 if (extra_refs)
1468 dlm_lockres_put(res);
1469
1452 dlm_put(dlm); 1470 dlm_put(dlm);
1453 if (ret < 0) { 1471 if (ret < 0) {
1454 if (buf) 1472 if (buf)
@@ -1464,17 +1482,19 @@ leave:
1464 1482
1465static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data) 1483static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
1466{ 1484{
1467 struct dlm_ctxt *dlm = data; 1485 struct dlm_ctxt *dlm;
1468 struct dlm_migratable_lockres *mres; 1486 struct dlm_migratable_lockres *mres;
1469 int ret = 0; 1487 int ret = 0;
1470 struct dlm_lock_resource *res; 1488 struct dlm_lock_resource *res;
1471 u8 real_master; 1489 u8 real_master;
1490 u8 extra_ref;
1472 1491
1473 dlm = item->dlm; 1492 dlm = item->dlm;
1474 mres = (struct dlm_migratable_lockres *)data; 1493 mres = (struct dlm_migratable_lockres *)data;
1475 1494
1476 res = item->u.ml.lockres; 1495 res = item->u.ml.lockres;
1477 real_master = item->u.ml.real_master; 1496 real_master = item->u.ml.real_master;
1497 extra_ref = item->u.ml.extra_ref;
1478 1498
1479 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { 1499 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1480 /* this case is super-rare. only occurs if 1500 /* this case is super-rare. only occurs if
@@ -1517,6 +1537,12 @@ again:
1517 } 1537 }
1518 1538
1519leave: 1539leave:
1540 /* See comment in dlm_mig_lockres_handler() */
1541 if (res) {
1542 if (extra_ref)
1543 dlm_lockres_put(res);
1544 dlm_lockres_put(res);
1545 }
1520 kfree(data); 1546 kfree(data);
1521 mlog_exit(ret); 1547 mlog_exit(ret);
1522} 1548}
@@ -1644,7 +1670,8 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
1644 /* retry!? */ 1670 /* retry!? */
1645 BUG(); 1671 BUG();
1646 } 1672 }
1647 } 1673 } else /* put.. incase we are not the master */
1674 dlm_lockres_put(res);
1648 spin_unlock(&res->spinlock); 1675 spin_unlock(&res->spinlock);
1649 } 1676 }
1650 spin_unlock(&dlm->spinlock); 1677 spin_unlock(&dlm->spinlock);
@@ -1921,6 +1948,7 @@ void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
1921 "Recovering res %s:%.*s, is already on recovery list!\n", 1948 "Recovering res %s:%.*s, is already on recovery list!\n",
1922 dlm->name, res->lockname.len, res->lockname.name); 1949 dlm->name, res->lockname.len, res->lockname.name);
1923 list_del_init(&res->recovering); 1950 list_del_init(&res->recovering);
1951 dlm_lockres_put(res);
1924 } 1952 }
1925 /* We need to hold a reference while on the recovery list */ 1953 /* We need to hold a reference while on the recovery list */
1926 dlm_lockres_get(res); 1954 dlm_lockres_get(res);
@@ -2130,11 +2158,16 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2130 assert_spin_locked(&dlm->spinlock); 2158 assert_spin_locked(&dlm->spinlock);
2131 assert_spin_locked(&res->spinlock); 2159 assert_spin_locked(&res->spinlock);
2132 2160
2161 /* We do two dlm_lock_put(). One for removing from list and the other is
2162 * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */
2163
2133 /* TODO: check pending_asts, pending_basts here */ 2164 /* TODO: check pending_asts, pending_basts here */
2134 list_for_each_entry_safe(lock, next, &res->granted, list) { 2165 list_for_each_entry_safe(lock, next, &res->granted, list) {
2135 if (lock->ml.node == dead_node) { 2166 if (lock->ml.node == dead_node) {
2136 list_del_init(&lock->list); 2167 list_del_init(&lock->list);
2137 dlm_lock_put(lock); 2168 dlm_lock_put(lock);
2169 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2170 dlm_lock_put(lock);
2138 freed++; 2171 freed++;
2139 } 2172 }
2140 } 2173 }
@@ -2142,6 +2175,8 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2142 if (lock->ml.node == dead_node) { 2175 if (lock->ml.node == dead_node) {
2143 list_del_init(&lock->list); 2176 list_del_init(&lock->list);
2144 dlm_lock_put(lock); 2177 dlm_lock_put(lock);
2178 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2179 dlm_lock_put(lock);
2145 freed++; 2180 freed++;
2146 } 2181 }
2147 } 2182 }
@@ -2149,6 +2184,8 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2149 if (lock->ml.node == dead_node) { 2184 if (lock->ml.node == dead_node) {
2150 list_del_init(&lock->list); 2185 list_del_init(&lock->list);
2151 dlm_lock_put(lock); 2186 dlm_lock_put(lock);
2187 /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2188 dlm_lock_put(lock);
2152 freed++; 2189 freed++;
2153 } 2190 }
2154 } 2191 }
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index cebd089f8955..4060bb328bc8 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -176,12 +176,14 @@ static int dlm_purge_lockres(struct dlm_ctxt *dlm,
176 res->lockname.name, master); 176 res->lockname.name, master);
177 177
178 if (!master) { 178 if (!master) {
179 /* drop spinlock... retake below */
180 spin_unlock(&dlm->spinlock);
181
179 spin_lock(&res->spinlock); 182 spin_lock(&res->spinlock);
180 /* This ensures that clear refmap is sent after the set */ 183 /* This ensures that clear refmap is sent after the set */
181 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); 184 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
182 spin_unlock(&res->spinlock); 185 spin_unlock(&res->spinlock);
183 /* drop spinlock to do messaging, retake below */ 186
184 spin_unlock(&dlm->spinlock);
185 /* clear our bit from the master's refmap, ignore errors */ 187 /* clear our bit from the master's refmap, ignore errors */
186 ret = dlm_drop_lockres_ref(dlm, res); 188 ret = dlm_drop_lockres_ref(dlm, res);
187 if (ret < 0) { 189 if (ret < 0) {
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index f7794306b2bd..1f1873bf41fb 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -2409,7 +2409,7 @@ static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
2409 return 0; 2409 return 0;
2410} 2410}
2411 2411
2412static struct seq_operations ocfs2_dlm_seq_ops = { 2412static const struct seq_operations ocfs2_dlm_seq_ops = {
2413 .start = ocfs2_dlm_seq_start, 2413 .start = ocfs2_dlm_seq_start,
2414 .stop = ocfs2_dlm_seq_stop, 2414 .stop = ocfs2_dlm_seq_stop,
2415 .next = ocfs2_dlm_seq_next, 2415 .next = ocfs2_dlm_seq_next,
diff --git a/fs/ocfs2/resize.c b/fs/ocfs2/resize.c
index 37835ffcb039..8166968e9015 100644
--- a/fs/ocfs2/resize.c
+++ b/fs/ocfs2/resize.c
@@ -597,7 +597,7 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input)
597 memset(cr, 0, sizeof(struct ocfs2_chain_rec)); 597 memset(cr, 0, sizeof(struct ocfs2_chain_rec));
598 } 598 }
599 599
600 cr->c_blkno = le64_to_cpu(input->group); 600 cr->c_blkno = cpu_to_le64(input->group);
601 le32_add_cpu(&cr->c_total, input->clusters * cl_bpc); 601 le32_add_cpu(&cr->c_total, input->clusters * cl_bpc);
602 le32_add_cpu(&cr->c_free, input->frees * cl_bpc); 602 le32_add_cpu(&cr->c_free, input->frees * cl_bpc);
603 603
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 91a1bd67ac1d..8a10f6fe24a1 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1036,6 +1036,26 @@ static const struct file_operations proc_loginuid_operations = {
1036 .read = proc_loginuid_read, 1036 .read = proc_loginuid_read,
1037 .write = proc_loginuid_write, 1037 .write = proc_loginuid_write,
1038}; 1038};
1039
1040static ssize_t proc_sessionid_read(struct file * file, char __user * buf,
1041 size_t count, loff_t *ppos)
1042{
1043 struct inode * inode = file->f_path.dentry->d_inode;
1044 struct task_struct *task = get_proc_task(inode);
1045 ssize_t length;
1046 char tmpbuf[TMPBUFLEN];
1047
1048 if (!task)
1049 return -ESRCH;
1050 length = scnprintf(tmpbuf, TMPBUFLEN, "%u",
1051 audit_get_sessionid(task));
1052 put_task_struct(task);
1053 return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
1054}
1055
1056static const struct file_operations proc_sessionid_operations = {
1057 .read = proc_sessionid_read,
1058};
1039#endif 1059#endif
1040 1060
1041#ifdef CONFIG_FAULT_INJECTION 1061#ifdef CONFIG_FAULT_INJECTION
@@ -2269,6 +2289,9 @@ static const struct pid_entry tgid_base_stuff[] = {
2269 DIR("task", S_IRUGO|S_IXUGO, task), 2289 DIR("task", S_IRUGO|S_IXUGO, task),
2270 DIR("fd", S_IRUSR|S_IXUSR, fd), 2290 DIR("fd", S_IRUSR|S_IXUSR, fd),
2271 DIR("fdinfo", S_IRUSR|S_IXUSR, fdinfo), 2291 DIR("fdinfo", S_IRUSR|S_IXUSR, fdinfo),
2292#ifdef CONFIG_NET
2293 DIR("net", S_IRUGO|S_IXUSR, net),
2294#endif
2272 REG("environ", S_IRUSR, environ), 2295 REG("environ", S_IRUSR, environ),
2273 INF("auxv", S_IRUSR, pid_auxv), 2296 INF("auxv", S_IRUSR, pid_auxv),
2274 ONE("status", S_IRUGO, pid_status), 2297 ONE("status", S_IRUGO, pid_status),
@@ -2316,6 +2339,7 @@ static const struct pid_entry tgid_base_stuff[] = {
2316 REG("oom_adj", S_IRUGO|S_IWUSR, oom_adjust), 2339 REG("oom_adj", S_IRUGO|S_IWUSR, oom_adjust),
2317#ifdef CONFIG_AUDITSYSCALL 2340#ifdef CONFIG_AUDITSYSCALL
2318 REG("loginuid", S_IWUSR|S_IRUGO, loginuid), 2341 REG("loginuid", S_IWUSR|S_IRUGO, loginuid),
2342 REG("sessionid", S_IRUSR, sessionid),
2319#endif 2343#endif
2320#ifdef CONFIG_FAULT_INJECTION 2344#ifdef CONFIG_FAULT_INJECTION
2321 REG("make-it-fail", S_IRUGO|S_IWUSR, fault_inject), 2345 REG("make-it-fail", S_IRUGO|S_IWUSR, fault_inject),
@@ -2646,6 +2670,7 @@ static const struct pid_entry tid_base_stuff[] = {
2646 REG("oom_adj", S_IRUGO|S_IWUSR, oom_adjust), 2670 REG("oom_adj", S_IRUGO|S_IWUSR, oom_adjust),
2647#ifdef CONFIG_AUDITSYSCALL 2671#ifdef CONFIG_AUDITSYSCALL
2648 REG("loginuid", S_IWUSR|S_IRUGO, loginuid), 2672 REG("loginuid", S_IWUSR|S_IRUGO, loginuid),
2673 REG("sessionid", S_IRUSR, sessionid),
2649#endif 2674#endif
2650#ifdef CONFIG_FAULT_INJECTION 2675#ifdef CONFIG_FAULT_INJECTION
2651 REG("make-it-fail", S_IRUGO|S_IWUSR, fault_inject), 2676 REG("make-it-fail", S_IRUGO|S_IWUSR, fault_inject),
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 68971e66cd41..a36ad3c75cf4 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -377,15 +377,14 @@ static struct dentry_operations proc_dentry_operations =
377 * Don't create negative dentries here, return -ENOENT by hand 377 * Don't create negative dentries here, return -ENOENT by hand
378 * instead. 378 * instead.
379 */ 379 */
380struct dentry *proc_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd) 380struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir,
381 struct dentry *dentry)
381{ 382{
382 struct inode *inode = NULL; 383 struct inode *inode = NULL;
383 struct proc_dir_entry * de;
384 int error = -ENOENT; 384 int error = -ENOENT;
385 385
386 lock_kernel(); 386 lock_kernel();
387 spin_lock(&proc_subdir_lock); 387 spin_lock(&proc_subdir_lock);
388 de = PDE(dir);
389 if (de) { 388 if (de) {
390 for (de = de->subdir; de ; de = de->next) { 389 for (de = de->subdir; de ; de = de->next) {
391 if (de->namelen != dentry->d_name.len) 390 if (de->namelen != dentry->d_name.len)
@@ -393,8 +392,6 @@ struct dentry *proc_lookup(struct inode * dir, struct dentry *dentry, struct nam
393 if (!memcmp(dentry->d_name.name, de->name, de->namelen)) { 392 if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
394 unsigned int ino; 393 unsigned int ino;
395 394
396 if (de->shadow_proc)
397 de = de->shadow_proc(current, de);
398 ino = de->low_ino; 395 ino = de->low_ino;
399 de_get(de); 396 de_get(de);
400 spin_unlock(&proc_subdir_lock); 397 spin_unlock(&proc_subdir_lock);
@@ -417,6 +414,12 @@ out_unlock:
417 return ERR_PTR(error); 414 return ERR_PTR(error);
418} 415}
419 416
417struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
418 struct nameidata *nd)
419{
420 return proc_lookup_de(PDE(dir), dir, dentry);
421}
422
420/* 423/*
421 * This returns non-zero if at EOF, so that the /proc 424 * This returns non-zero if at EOF, so that the /proc
422 * root directory can use this and check if it should 425 * root directory can use this and check if it should
@@ -426,10 +429,9 @@ out_unlock:
426 * value of the readdir() call, as long as it's non-negative 429 * value of the readdir() call, as long as it's non-negative
427 * for success.. 430 * for success..
428 */ 431 */
429int proc_readdir(struct file * filp, 432int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent,
430 void * dirent, filldir_t filldir) 433 filldir_t filldir)
431{ 434{
432 struct proc_dir_entry * de;
433 unsigned int ino; 435 unsigned int ino;
434 int i; 436 int i;
435 struct inode *inode = filp->f_path.dentry->d_inode; 437 struct inode *inode = filp->f_path.dentry->d_inode;
@@ -438,7 +440,6 @@ int proc_readdir(struct file * filp,
438 lock_kernel(); 440 lock_kernel();
439 441
440 ino = inode->i_ino; 442 ino = inode->i_ino;
441 de = PDE(inode);
442 if (!de) { 443 if (!de) {
443 ret = -EINVAL; 444 ret = -EINVAL;
444 goto out; 445 goto out;
@@ -499,6 +500,13 @@ out: unlock_kernel();
499 return ret; 500 return ret;
500} 501}
501 502
503int proc_readdir(struct file *filp, void *dirent, filldir_t filldir)
504{
505 struct inode *inode = filp->f_path.dentry->d_inode;
506
507 return proc_readdir_de(PDE(inode), filp, dirent, filldir);
508}
509
502/* 510/*
503 * These are the generic /proc directory operations. They 511 * These are the generic /proc directory operations. They
504 * use the in-memory "struct proc_dir_entry" tree to parse 512 * use the in-memory "struct proc_dir_entry" tree to parse
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 1c81c8f1aeed..bc72f5c8c47d 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -64,6 +64,8 @@ extern const struct file_operations proc_numa_maps_operations;
64extern const struct file_operations proc_smaps_operations; 64extern const struct file_operations proc_smaps_operations;
65extern const struct file_operations proc_clear_refs_operations; 65extern const struct file_operations proc_clear_refs_operations;
66extern const struct file_operations proc_pagemap_operations; 66extern const struct file_operations proc_pagemap_operations;
67extern const struct file_operations proc_net_operations;
68extern const struct inode_operations proc_net_inode_operations;
67 69
68void free_proc_entry(struct proc_dir_entry *de); 70void free_proc_entry(struct proc_dir_entry *de);
69 71
@@ -83,3 +85,8 @@ static inline int proc_fd(struct inode *inode)
83{ 85{
84 return PROC_I(inode)->fd; 86 return PROC_I(inode)->fd;
85} 87}
88
89struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *ino,
90 struct dentry *dentry);
91int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent,
92 filldir_t filldir);
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index 14e9b5aaf863..4caa5f774fb7 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -63,6 +63,82 @@ int seq_release_net(struct inode *ino, struct file *f)
63} 63}
64EXPORT_SYMBOL_GPL(seq_release_net); 64EXPORT_SYMBOL_GPL(seq_release_net);
65 65
66static struct net *get_proc_task_net(struct inode *dir)
67{
68 struct task_struct *task;
69 struct nsproxy *ns;
70 struct net *net = NULL;
71
72 rcu_read_lock();
73 task = pid_task(proc_pid(dir), PIDTYPE_PID);
74 if (task != NULL) {
75 ns = task_nsproxy(task);
76 if (ns != NULL)
77 net = get_net(ns->net_ns);
78 }
79 rcu_read_unlock();
80
81 return net;
82}
83
84static struct dentry *proc_tgid_net_lookup(struct inode *dir,
85 struct dentry *dentry, struct nameidata *nd)
86{
87 struct dentry *de;
88 struct net *net;
89
90 de = ERR_PTR(-ENOENT);
91 net = get_proc_task_net(dir);
92 if (net != NULL) {
93 de = proc_lookup_de(net->proc_net, dir, dentry);
94 put_net(net);
95 }
96 return de;
97}
98
99static int proc_tgid_net_getattr(struct vfsmount *mnt, struct dentry *dentry,
100 struct kstat *stat)
101{
102 struct inode *inode = dentry->d_inode;
103 struct net *net;
104
105 net = get_proc_task_net(inode);
106
107 generic_fillattr(inode, stat);
108
109 if (net != NULL) {
110 stat->nlink = net->proc_net->nlink;
111 put_net(net);
112 }
113
114 return 0;
115}
116
117const struct inode_operations proc_net_inode_operations = {
118 .lookup = proc_tgid_net_lookup,
119 .getattr = proc_tgid_net_getattr,
120};
121
122static int proc_tgid_net_readdir(struct file *filp, void *dirent,
123 filldir_t filldir)
124{
125 int ret;
126 struct net *net;
127
128 ret = -EINVAL;
129 net = get_proc_task_net(filp->f_path.dentry->d_inode);
130 if (net != NULL) {
131 ret = proc_readdir_de(net->proc_net, filp, dirent, filldir);
132 put_net(net);
133 }
134 return ret;
135}
136
137const struct file_operations proc_net_operations = {
138 .read = generic_read_dir,
139 .readdir = proc_tgid_net_readdir,
140};
141
66 142
67struct proc_dir_entry *proc_net_fops_create(struct net *net, 143struct proc_dir_entry *proc_net_fops_create(struct net *net,
68 const char *name, mode_t mode, const struct file_operations *fops) 144 const char *name, mode_t mode, const struct file_operations *fops)
@@ -83,14 +159,6 @@ struct net *get_proc_net(const struct inode *inode)
83} 159}
84EXPORT_SYMBOL_GPL(get_proc_net); 160EXPORT_SYMBOL_GPL(get_proc_net);
85 161
86static struct proc_dir_entry *shadow_pde;
87
88static struct proc_dir_entry *proc_net_shadow(struct task_struct *task,
89 struct proc_dir_entry *de)
90{
91 return task->nsproxy->net_ns->proc_net;
92}
93
94struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name, 162struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name,
95 struct proc_dir_entry *parent) 163 struct proc_dir_entry *parent)
96{ 164{
@@ -104,45 +172,39 @@ EXPORT_SYMBOL_GPL(proc_net_mkdir);
104 172
105static __net_init int proc_net_ns_init(struct net *net) 173static __net_init int proc_net_ns_init(struct net *net)
106{ 174{
107 struct proc_dir_entry *root, *netd, *net_statd; 175 struct proc_dir_entry *netd, *net_statd;
108 int err; 176 int err;
109 177
110 err = -ENOMEM; 178 err = -ENOMEM;
111 root = kzalloc(sizeof(*root), GFP_KERNEL); 179 netd = kzalloc(sizeof(*netd), GFP_KERNEL);
112 if (!root) 180 if (!netd)
113 goto out; 181 goto out;
114 182
115 err = -EEXIST; 183 netd->data = net;
116 netd = proc_net_mkdir(net, "net", root); 184 netd->nlink = 2;
117 if (!netd) 185 netd->name = "net";
118 goto free_root; 186 netd->namelen = 3;
187 netd->parent = &proc_root;
119 188
120 err = -EEXIST; 189 err = -EEXIST;
121 net_statd = proc_net_mkdir(net, "stat", netd); 190 net_statd = proc_net_mkdir(net, "stat", netd);
122 if (!net_statd) 191 if (!net_statd)
123 goto free_net; 192 goto free_net;
124 193
125 root->data = net;
126
127 net->proc_net_root = root;
128 net->proc_net = netd; 194 net->proc_net = netd;
129 net->proc_net_stat = net_statd; 195 net->proc_net_stat = net_statd;
130 err = 0; 196 return 0;
131 197
198free_net:
199 kfree(netd);
132out: 200out:
133 return err; 201 return err;
134free_net:
135 remove_proc_entry("net", root);
136free_root:
137 kfree(root);
138 goto out;
139} 202}
140 203
141static __net_exit void proc_net_ns_exit(struct net *net) 204static __net_exit void proc_net_ns_exit(struct net *net)
142{ 205{
143 remove_proc_entry("stat", net->proc_net); 206 remove_proc_entry("stat", net->proc_net);
144 remove_proc_entry("net", net->proc_net_root); 207 kfree(net->proc_net);
145 kfree(net->proc_net_root);
146} 208}
147 209
148static struct pernet_operations __net_initdata proc_net_ns_ops = { 210static struct pernet_operations __net_initdata proc_net_ns_ops = {
@@ -152,8 +214,7 @@ static struct pernet_operations __net_initdata proc_net_ns_ops = {
152 214
153int __init proc_net_init(void) 215int __init proc_net_init(void)
154{ 216{
155 shadow_pde = proc_mkdir("net", NULL); 217 proc_symlink("net", NULL, "self/net");
156 shadow_pde->shadow_proc = proc_net_shadow;
157 218
158 return register_pernet_subsys(&proc_net_ns_ops); 219 return register_pernet_subsys(&proc_net_ns_ops);
159} 220}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 6dc0334815f7..4206454734e0 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -640,17 +640,17 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
640 640
641 ret = -EACCES; 641 ret = -EACCES;
642 if (!ptrace_may_attach(task)) 642 if (!ptrace_may_attach(task))
643 goto out; 643 goto out_task;
644 644
645 ret = -EINVAL; 645 ret = -EINVAL;
646 /* file position must be aligned */ 646 /* file position must be aligned */
647 if (*ppos % PM_ENTRY_BYTES) 647 if (*ppos % PM_ENTRY_BYTES)
648 goto out; 648 goto out_task;
649 649
650 ret = 0; 650 ret = 0;
651 mm = get_task_mm(task); 651 mm = get_task_mm(task);
652 if (!mm) 652 if (!mm)
653 goto out; 653 goto out_task;
654 654
655 ret = -ENOMEM; 655 ret = -ENOMEM;
656 uaddr = (unsigned long)buf & PAGE_MASK; 656 uaddr = (unsigned long)buf & PAGE_MASK;
@@ -658,7 +658,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
658 pagecount = (PAGE_ALIGN(uend) - uaddr) / PAGE_SIZE; 658 pagecount = (PAGE_ALIGN(uend) - uaddr) / PAGE_SIZE;
659 pages = kmalloc(pagecount * sizeof(struct page *), GFP_KERNEL); 659 pages = kmalloc(pagecount * sizeof(struct page *), GFP_KERNEL);
660 if (!pages) 660 if (!pages)
661 goto out_task; 661 goto out_mm;
662 662
663 down_read(&current->mm->mmap_sem); 663 down_read(&current->mm->mmap_sem);
664 ret = get_user_pages(current, current->mm, uaddr, pagecount, 664 ret = get_user_pages(current, current->mm, uaddr, pagecount,
@@ -668,6 +668,12 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
668 if (ret < 0) 668 if (ret < 0)
669 goto out_free; 669 goto out_free;
670 670
671 if (ret != pagecount) {
672 pagecount = ret;
673 ret = -EFAULT;
674 goto out_pages;
675 }
676
671 pm.out = buf; 677 pm.out = buf;
672 pm.end = buf + count; 678 pm.end = buf + count;
673 679
@@ -699,15 +705,17 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
699 ret = pm.out - buf; 705 ret = pm.out - buf;
700 } 706 }
701 707
708out_pages:
702 for (; pagecount; pagecount--) { 709 for (; pagecount; pagecount--) {
703 page = pages[pagecount-1]; 710 page = pages[pagecount-1];
704 if (!PageReserved(page)) 711 if (!PageReserved(page))
705 SetPageDirty(page); 712 SetPageDirty(page);
706 page_cache_release(page); 713 page_cache_release(page);
707 } 714 }
708 mmput(mm);
709out_free: 715out_free:
710 kfree(pages); 716 kfree(pages);
717out_mm:
718 mmput(mm);
711out_task: 719out_task:
712 put_task_struct(task); 720 put_task_struct(task);
713out: 721out: