aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.osdl.org>2006-12-07 12:13:20 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-07 12:13:20 -0500
commit1c1afa3c053d4ccdf44e5a4e159005cdfd48bfc6 (patch)
tree3e686ad4cf1ae2300e7190ff83afc3f3dd4ba740
parent0a01707b289853f56d1c000057b27e243c039722 (diff)
parentac33d0710595579e3cfca42dde2257eb0b123f6d (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw
* master.kernel.org:/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw: (73 commits) [DLM] Clean up lowcomms [GFS2] Change gfs2_fsync() to use write_inode_now() [GFS2] Fix indent in recovery.c [GFS2] Don't flush everything on fdatasync [GFS2] Add a comment about reading the super block [GFS2] Mount problem with the GFS2 code [GFS2] Remove gfs2_check_acl() [DLM] fix format warnings in rcom.c and recoverd.c [GFS2] lock function parameter [DLM] don't accept replies to old recovery messages [DLM] fix size of STATUS_REPLY message [GFS2] fs/gfs2/log.c:log_bmap() fix printk format warning [DLM] fix add_requestqueue checking nodes list [GFS2] Fix recursive locking in gfs2_getattr [GFS2] Fix recursive locking in gfs2_permission [GFS2] Reduce number of arguments to meta_io.c:getbuf() [GFS2] Move gfs2_meta_syncfs() into log.c [GFS2] Fix journal flush problem [GFS2] mark_inode_dirty after write to stuffed file [GFS2] Fix glock ordering on inode creation ...
-rw-r--r--fs/dlm/Kconfig20
-rw-r--r--fs/dlm/Makefile4
-rw-r--r--fs/dlm/dlm_internal.h4
-rw-r--r--fs/dlm/lock.c16
-rw-r--r--fs/dlm/lockspace.c4
-rw-r--r--fs/dlm/lowcomms-sctp.c (renamed from fs/dlm/lowcomms.c)264
-rw-r--r--fs/dlm/lowcomms-tcp.c1189
-rw-r--r--fs/dlm/lowcomms.h2
-rw-r--r--fs/dlm/main.c10
-rw-r--r--fs/dlm/member.c8
-rw-r--r--fs/dlm/rcom.c58
-rw-r--r--fs/dlm/recover.c1
-rw-r--r--fs/dlm/recoverd.c44
-rw-r--r--fs/dlm/requestqueue.c26
-rw-r--r--fs/dlm/requestqueue.h2
-rw-r--r--fs/gfs2/Kconfig1
-rw-r--r--fs/gfs2/acl.c39
-rw-r--r--fs/gfs2/acl.h1
-rw-r--r--fs/gfs2/bmap.c179
-rw-r--r--fs/gfs2/daemon.c7
-rw-r--r--fs/gfs2/dir.c93
-rw-r--r--fs/gfs2/dir.h8
-rw-r--r--fs/gfs2/eaops.c2
-rw-r--r--fs/gfs2/eattr.c66
-rw-r--r--fs/gfs2/eattr.h6
-rw-r--r--fs/gfs2/glock.c36
-rw-r--r--fs/gfs2/glock.h3
-rw-r--r--fs/gfs2/glops.c138
-rw-r--r--fs/gfs2/incore.h43
-rw-r--r--fs/gfs2/inode.c406
-rw-r--r--fs/gfs2/inode.h20
-rw-r--r--fs/gfs2/log.c41
-rw-r--r--fs/gfs2/log.h2
-rw-r--r--fs/gfs2/lops.c40
-rw-r--r--fs/gfs2/lops.h2
-rw-r--r--fs/gfs2/meta_io.c46
-rw-r--r--fs/gfs2/meta_io.h1
-rw-r--r--fs/gfs2/ondisk.c138
-rw-r--r--fs/gfs2/ops_address.c52
-rw-r--r--fs/gfs2/ops_dentry.c4
-rw-r--r--fs/gfs2/ops_export.c38
-rw-r--r--fs/gfs2/ops_export.h2
-rw-r--r--fs/gfs2/ops_file.c66
-rw-r--r--fs/gfs2/ops_file.h2
-rw-r--r--fs/gfs2/ops_fstype.c4
-rw-r--r--fs/gfs2/ops_inode.c134
-rw-r--r--fs/gfs2/ops_super.c11
-rw-r--r--fs/gfs2/ops_vm.c2
-rw-r--r--fs/gfs2/quota.c15
-rw-r--r--fs/gfs2/recovery.c29
-rw-r--r--fs/gfs2/recovery.h2
-rw-r--r--fs/gfs2/rgrp.c13
-rw-r--r--fs/gfs2/super.c50
-rw-r--r--fs/gfs2/super.h6
-rw-r--r--fs/gfs2/sys.c8
-rw-r--r--fs/gfs2/util.h6
-rw-r--r--include/linux/gfs2_ondisk.h138
57 files changed, 2341 insertions, 1211 deletions
diff --git a/fs/dlm/Kconfig b/fs/dlm/Kconfig
index 81b2c6465eeb..b5654a284fef 100644
--- a/fs/dlm/Kconfig
+++ b/fs/dlm/Kconfig
@@ -1,14 +1,32 @@
1menu "Distributed Lock Manager" 1menu "Distributed Lock Manager"
2 depends on INET && IP_SCTP && EXPERIMENTAL 2 depends on EXPERIMENTAL && INET
3 3
4config DLM 4config DLM
5 tristate "Distributed Lock Manager (DLM)" 5 tristate "Distributed Lock Manager (DLM)"
6 depends on IPV6 || IPV6=n 6 depends on IPV6 || IPV6=n
7 select CONFIGFS_FS 7 select CONFIGFS_FS
8 select IP_SCTP if DLM_SCTP
8 help 9 help
9 A general purpose distributed lock manager for kernel or userspace 10 A general purpose distributed lock manager for kernel or userspace
10 applications. 11 applications.
11 12
13choice
14 prompt "Select DLM communications protocol"
15 depends on DLM
16 default DLM_TCP
17 help
18 The DLM Can use TCP or SCTP for it's network communications.
19 SCTP supports multi-homed operations whereas TCP doesn't.
20 However, SCTP seems to have stability problems at the moment.
21
22config DLM_TCP
23 bool "TCP/IP"
24
25config DLM_SCTP
26 bool "SCTP"
27
28endchoice
29
12config DLM_DEBUG 30config DLM_DEBUG
13 bool "DLM debugging" 31 bool "DLM debugging"
14 depends on DLM 32 depends on DLM
diff --git a/fs/dlm/Makefile b/fs/dlm/Makefile
index 1832e0297f7d..65388944eba0 100644
--- a/fs/dlm/Makefile
+++ b/fs/dlm/Makefile
@@ -4,7 +4,6 @@ dlm-y := ast.o \
4 dir.o \ 4 dir.o \
5 lock.o \ 5 lock.o \
6 lockspace.o \ 6 lockspace.o \
7 lowcomms.o \
8 main.o \ 7 main.o \
9 member.o \ 8 member.o \
10 memory.o \ 9 memory.o \
@@ -17,3 +16,6 @@ dlm-y := ast.o \
17 util.o 16 util.o
18dlm-$(CONFIG_DLM_DEBUG) += debug_fs.o 17dlm-$(CONFIG_DLM_DEBUG) += debug_fs.o
19 18
19dlm-$(CONFIG_DLM_TCP) += lowcomms-tcp.o
20
21dlm-$(CONFIG_DLM_SCTP) += lowcomms-sctp.o \ No newline at end of file
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 1e5cd67e1b7a..1ee8195e6fc0 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -471,6 +471,7 @@ struct dlm_ls {
471 char *ls_recover_buf; 471 char *ls_recover_buf;
472 int ls_recover_nodeid; /* for debugging */ 472 int ls_recover_nodeid; /* for debugging */
473 uint64_t ls_rcom_seq; 473 uint64_t ls_rcom_seq;
474 spinlock_t ls_rcom_spin;
474 struct list_head ls_recover_list; 475 struct list_head ls_recover_list;
475 spinlock_t ls_recover_list_lock; 476 spinlock_t ls_recover_list_lock;
476 int ls_recover_list_count; 477 int ls_recover_list_count;
@@ -488,7 +489,8 @@ struct dlm_ls {
488#define LSFL_RUNNING 1 489#define LSFL_RUNNING 1
489#define LSFL_RECOVERY_STOP 2 490#define LSFL_RECOVERY_STOP 2
490#define LSFL_RCOM_READY 3 491#define LSFL_RCOM_READY 3
491#define LSFL_UEVENT_WAIT 4 492#define LSFL_RCOM_WAIT 4
493#define LSFL_UEVENT_WAIT 5
492 494
493/* much of this is just saving user space pointers associated with the 495/* much of this is just saving user space pointers associated with the
494 lock that we pass back to the user lib with an ast */ 496 lock that we pass back to the user lib with an ast */
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 3f2befa4797b..30878defaeb6 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -2372,6 +2372,7 @@ static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
2372static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms) 2372static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
2373{ 2373{
2374 lkb->lkb_exflags = ms->m_exflags; 2374 lkb->lkb_exflags = ms->m_exflags;
2375 lkb->lkb_sbflags = ms->m_sbflags;
2375 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) | 2376 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
2376 (ms->m_flags & 0x0000FFFF); 2377 (ms->m_flags & 0x0000FFFF);
2377} 2378}
@@ -3028,10 +3029,17 @@ int dlm_receive_message(struct dlm_header *hd, int nodeid, int recovery)
3028 3029
3029 while (1) { 3030 while (1) {
3030 if (dlm_locking_stopped(ls)) { 3031 if (dlm_locking_stopped(ls)) {
3031 if (!recovery) 3032 if (recovery) {
3032 dlm_add_requestqueue(ls, nodeid, hd); 3033 error = -EINTR;
3033 error = -EINTR; 3034 goto out;
3034 goto out; 3035 }
3036 error = dlm_add_requestqueue(ls, nodeid, hd);
3037 if (error == -EAGAIN)
3038 continue;
3039 else {
3040 error = -EINTR;
3041 goto out;
3042 }
3035 } 3043 }
3036 3044
3037 if (lock_recovery_try(ls)) 3045 if (lock_recovery_try(ls))
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index f8842ca443c2..59012b089e8d 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -22,6 +22,7 @@
22#include "memory.h" 22#include "memory.h"
23#include "lock.h" 23#include "lock.h"
24#include "recover.h" 24#include "recover.h"
25#include "requestqueue.h"
25 26
26#ifdef CONFIG_DLM_DEBUG 27#ifdef CONFIG_DLM_DEBUG
27int dlm_create_debug_file(struct dlm_ls *ls); 28int dlm_create_debug_file(struct dlm_ls *ls);
@@ -478,6 +479,8 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
478 ls->ls_recoverd_task = NULL; 479 ls->ls_recoverd_task = NULL;
479 mutex_init(&ls->ls_recoverd_active); 480 mutex_init(&ls->ls_recoverd_active);
480 spin_lock_init(&ls->ls_recover_lock); 481 spin_lock_init(&ls->ls_recover_lock);
482 spin_lock_init(&ls->ls_rcom_spin);
483 get_random_bytes(&ls->ls_rcom_seq, sizeof(uint64_t));
481 ls->ls_recover_status = 0; 484 ls->ls_recover_status = 0;
482 ls->ls_recover_seq = 0; 485 ls->ls_recover_seq = 0;
483 ls->ls_recover_args = NULL; 486 ls->ls_recover_args = NULL;
@@ -684,6 +687,7 @@ static int release_lockspace(struct dlm_ls *ls, int force)
684 * Free structures on any other lists 687 * Free structures on any other lists
685 */ 688 */
686 689
690 dlm_purge_requestqueue(ls);
687 kfree(ls->ls_recover_args); 691 kfree(ls->ls_recover_args);
688 dlm_clear_free_entries(ls); 692 dlm_clear_free_entries(ls);
689 dlm_clear_members(ls); 693 dlm_clear_members(ls);
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms-sctp.c
index 6da6b14d5a61..fe158d7a9285 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms-sctp.c
@@ -2,7 +2,7 @@
2******************************************************************************* 2*******************************************************************************
3** 3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. 5** Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
6** 6**
7** This copyrighted material is made available to anyone wishing to use, 7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions 8** modify, copy, or redistribute it subject to the terms and conditions
@@ -75,13 +75,13 @@ struct nodeinfo {
75}; 75};
76 76
77static DEFINE_IDR(nodeinfo_idr); 77static DEFINE_IDR(nodeinfo_idr);
78static struct rw_semaphore nodeinfo_lock; 78static DECLARE_RWSEM(nodeinfo_lock);
79static int max_nodeid; 79static int max_nodeid;
80 80
81struct cbuf { 81struct cbuf {
82 unsigned base; 82 unsigned int base;
83 unsigned len; 83 unsigned int len;
84 unsigned mask; 84 unsigned int mask;
85}; 85};
86 86
87/* Just the one of these, now. But this struct keeps 87/* Just the one of these, now. But this struct keeps
@@ -90,9 +90,9 @@ struct cbuf {
90#define CF_READ_PENDING 1 90#define CF_READ_PENDING 1
91 91
92struct connection { 92struct connection {
93 struct socket *sock; 93 struct socket *sock;
94 unsigned long flags; 94 unsigned long flags;
95 struct page *rx_page; 95 struct page *rx_page;
96 atomic_t waiting_requests; 96 atomic_t waiting_requests;
97 struct cbuf cb; 97 struct cbuf cb;
98 int eagain_flag; 98 int eagain_flag;
@@ -102,36 +102,40 @@ struct connection {
102 102
103struct writequeue_entry { 103struct writequeue_entry {
104 struct list_head list; 104 struct list_head list;
105 struct page *page; 105 struct page *page;
106 int offset; 106 int offset;
107 int len; 107 int len;
108 int end; 108 int end;
109 int users; 109 int users;
110 struct nodeinfo *ni; 110 struct nodeinfo *ni;
111}; 111};
112 112
113#define CBUF_ADD(cb, n) do { (cb)->len += n; } while(0) 113static void cbuf_add(struct cbuf *cb, int n)
114#define CBUF_EMPTY(cb) ((cb)->len == 0) 114{
115#define CBUF_MAY_ADD(cb, n) (((cb)->len + (n)) < ((cb)->mask + 1)) 115 cb->len += n;
116#define CBUF_DATA(cb) (((cb)->base + (cb)->len) & (cb)->mask) 116}
117 117
118#define CBUF_INIT(cb, size) \ 118static int cbuf_data(struct cbuf *cb)
119do { \ 119{
120 (cb)->base = (cb)->len = 0; \ 120 return ((cb->base + cb->len) & cb->mask);
121 (cb)->mask = ((size)-1); \ 121}
122} while(0)
123 122
124#define CBUF_EAT(cb, n) \ 123static void cbuf_init(struct cbuf *cb, int size)
125do { \ 124{
126 (cb)->len -= (n); \ 125 cb->base = cb->len = 0;
127 (cb)->base += (n); \ 126 cb->mask = size-1;
128 (cb)->base &= (cb)->mask; \ 127}
129} while(0)
130 128
129static void cbuf_eat(struct cbuf *cb, int n)
130{
131 cb->len -= n;
132 cb->base += n;
133 cb->base &= cb->mask;
134}
131 135
132/* List of nodes which have writes pending */ 136/* List of nodes which have writes pending */
133static struct list_head write_nodes; 137static LIST_HEAD(write_nodes);
134static spinlock_t write_nodes_lock; 138static DEFINE_SPINLOCK(write_nodes_lock);
135 139
136/* Maximum number of incoming messages to process before 140/* Maximum number of incoming messages to process before
137 * doing a schedule() 141 * doing a schedule()
@@ -141,8 +145,7 @@ static spinlock_t write_nodes_lock;
141/* Manage daemons */ 145/* Manage daemons */
142static struct task_struct *recv_task; 146static struct task_struct *recv_task;
143static struct task_struct *send_task; 147static struct task_struct *send_task;
144static wait_queue_head_t lowcomms_recv_wait; 148static DECLARE_WAIT_QUEUE_HEAD(lowcomms_recv_wait);
145static atomic_t accepting;
146 149
147/* The SCTP connection */ 150/* The SCTP connection */
148static struct connection sctp_con; 151static struct connection sctp_con;
@@ -161,11 +164,11 @@ static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr)
161 return error; 164 return error;
162 165
163 if (dlm_local_addr[0]->ss_family == AF_INET) { 166 if (dlm_local_addr[0]->ss_family == AF_INET) {
164 struct sockaddr_in *in4 = (struct sockaddr_in *) &addr; 167 struct sockaddr_in *in4 = (struct sockaddr_in *) &addr;
165 struct sockaddr_in *ret4 = (struct sockaddr_in *) retaddr; 168 struct sockaddr_in *ret4 = (struct sockaddr_in *) retaddr;
166 ret4->sin_addr.s_addr = in4->sin_addr.s_addr; 169 ret4->sin_addr.s_addr = in4->sin_addr.s_addr;
167 } else { 170 } else {
168 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &addr; 171 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &addr;
169 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr; 172 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr;
170 memcpy(&ret6->sin6_addr, &in6->sin6_addr, 173 memcpy(&ret6->sin6_addr, &in6->sin6_addr,
171 sizeof(in6->sin6_addr)); 174 sizeof(in6->sin6_addr));
@@ -174,6 +177,8 @@ static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr)
174 return 0; 177 return 0;
175} 178}
176 179
180/* If alloc is 0 here we will not attempt to allocate a new
181 nodeinfo struct */
177static struct nodeinfo *nodeid2nodeinfo(int nodeid, gfp_t alloc) 182static struct nodeinfo *nodeid2nodeinfo(int nodeid, gfp_t alloc)
178{ 183{
179 struct nodeinfo *ni; 184 struct nodeinfo *ni;
@@ -184,44 +189,45 @@ static struct nodeinfo *nodeid2nodeinfo(int nodeid, gfp_t alloc)
184 ni = idr_find(&nodeinfo_idr, nodeid); 189 ni = idr_find(&nodeinfo_idr, nodeid);
185 up_read(&nodeinfo_lock); 190 up_read(&nodeinfo_lock);
186 191
187 if (!ni && alloc) { 192 if (ni || !alloc)
188 down_write(&nodeinfo_lock); 193 return ni;
189 194
190 ni = idr_find(&nodeinfo_idr, nodeid); 195 down_write(&nodeinfo_lock);
191 if (ni)
192 goto out_up;
193 196
194 r = idr_pre_get(&nodeinfo_idr, alloc); 197 ni = idr_find(&nodeinfo_idr, nodeid);
195 if (!r) 198 if (ni)
196 goto out_up; 199 goto out_up;
197 200
198 ni = kmalloc(sizeof(struct nodeinfo), alloc); 201 r = idr_pre_get(&nodeinfo_idr, alloc);
199 if (!ni) 202 if (!r)
200 goto out_up; 203 goto out_up;
201 204
202 r = idr_get_new_above(&nodeinfo_idr, ni, nodeid, &n); 205 ni = kmalloc(sizeof(struct nodeinfo), alloc);
203 if (r) { 206 if (!ni)
204 kfree(ni); 207 goto out_up;
205 ni = NULL; 208
206 goto out_up; 209 r = idr_get_new_above(&nodeinfo_idr, ni, nodeid, &n);
207 } 210 if (r) {
208 if (n != nodeid) { 211 kfree(ni);
209 idr_remove(&nodeinfo_idr, n); 212 ni = NULL;
210 kfree(ni); 213 goto out_up;
211 ni = NULL;
212 goto out_up;
213 }
214 memset(ni, 0, sizeof(struct nodeinfo));
215 spin_lock_init(&ni->lock);
216 INIT_LIST_HEAD(&ni->writequeue);
217 spin_lock_init(&ni->writequeue_lock);
218 ni->nodeid = nodeid;
219
220 if (nodeid > max_nodeid)
221 max_nodeid = nodeid;
222 out_up:
223 up_write(&nodeinfo_lock);
224 } 214 }
215 if (n != nodeid) {
216 idr_remove(&nodeinfo_idr, n);
217 kfree(ni);
218 ni = NULL;
219 goto out_up;
220 }
221 memset(ni, 0, sizeof(struct nodeinfo));
222 spin_lock_init(&ni->lock);
223 INIT_LIST_HEAD(&ni->writequeue);
224 spin_lock_init(&ni->writequeue_lock);
225 ni->nodeid = nodeid;
226
227 if (nodeid > max_nodeid)
228 max_nodeid = nodeid;
229out_up:
230 up_write(&nodeinfo_lock);
225 231
226 return ni; 232 return ni;
227} 233}
@@ -279,13 +285,13 @@ static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
279 in4_addr->sin_port = cpu_to_be16(port); 285 in4_addr->sin_port = cpu_to_be16(port);
280 memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero)); 286 memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero));
281 memset(in4_addr+1, 0, sizeof(struct sockaddr_storage) - 287 memset(in4_addr+1, 0, sizeof(struct sockaddr_storage) -
282 sizeof(struct sockaddr_in)); 288 sizeof(struct sockaddr_in));
283 *addr_len = sizeof(struct sockaddr_in); 289 *addr_len = sizeof(struct sockaddr_in);
284 } else { 290 } else {
285 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr; 291 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
286 in6_addr->sin6_port = cpu_to_be16(port); 292 in6_addr->sin6_port = cpu_to_be16(port);
287 memset(in6_addr+1, 0, sizeof(struct sockaddr_storage) - 293 memset(in6_addr+1, 0, sizeof(struct sockaddr_storage) -
288 sizeof(struct sockaddr_in6)); 294 sizeof(struct sockaddr_in6));
289 *addr_len = sizeof(struct sockaddr_in6); 295 *addr_len = sizeof(struct sockaddr_in6);
290 } 296 }
291} 297}
@@ -324,7 +330,7 @@ static void send_shutdown(sctp_assoc_t associd)
324 cmsg->cmsg_type = SCTP_SNDRCV; 330 cmsg->cmsg_type = SCTP_SNDRCV;
325 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 331 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
326 outmessage.msg_controllen = cmsg->cmsg_len; 332 outmessage.msg_controllen = cmsg->cmsg_len;
327 sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg); 333 sinfo = CMSG_DATA(cmsg);
328 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo)); 334 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
329 335
330 sinfo->sinfo_flags |= MSG_EOF; 336 sinfo->sinfo_flags |= MSG_EOF;
@@ -387,7 +393,7 @@ static void process_sctp_notification(struct msghdr *msg, char *buf)
387 393
388 if ((int)sn->sn_assoc_change.sac_assoc_id <= 0) { 394 if ((int)sn->sn_assoc_change.sac_assoc_id <= 0) {
389 log_print("COMM_UP for invalid assoc ID %d", 395 log_print("COMM_UP for invalid assoc ID %d",
390 (int)sn->sn_assoc_change.sac_assoc_id); 396 (int)sn->sn_assoc_change.sac_assoc_id);
391 init_failed(); 397 init_failed();
392 return; 398 return;
393 } 399 }
@@ -398,15 +404,18 @@ static void process_sctp_notification(struct msghdr *msg, char *buf)
398 fs = get_fs(); 404 fs = get_fs();
399 set_fs(get_ds()); 405 set_fs(get_ds());
400 ret = sctp_con.sock->ops->getsockopt(sctp_con.sock, 406 ret = sctp_con.sock->ops->getsockopt(sctp_con.sock,
401 IPPROTO_SCTP, SCTP_PRIMARY_ADDR, 407 IPPROTO_SCTP,
402 (char*)&prim, &prim_len); 408 SCTP_PRIMARY_ADDR,
409 (char*)&prim,
410 &prim_len);
403 set_fs(fs); 411 set_fs(fs);
404 if (ret < 0) { 412 if (ret < 0) {
405 struct nodeinfo *ni; 413 struct nodeinfo *ni;
406 414
407 log_print("getsockopt/sctp_primary_addr on " 415 log_print("getsockopt/sctp_primary_addr on "
408 "new assoc %d failed : %d", 416 "new assoc %d failed : %d",
409 (int)sn->sn_assoc_change.sac_assoc_id, ret); 417 (int)sn->sn_assoc_change.sac_assoc_id,
418 ret);
410 419
411 /* Retry INIT later */ 420 /* Retry INIT later */
412 ni = assoc2nodeinfo(sn->sn_assoc_change.sac_assoc_id); 421 ni = assoc2nodeinfo(sn->sn_assoc_change.sac_assoc_id);
@@ -426,12 +435,10 @@ static void process_sctp_notification(struct msghdr *msg, char *buf)
426 return; 435 return;
427 436
428 /* Save the assoc ID */ 437 /* Save the assoc ID */
429 spin_lock(&ni->lock);
430 ni->assoc_id = sn->sn_assoc_change.sac_assoc_id; 438 ni->assoc_id = sn->sn_assoc_change.sac_assoc_id;
431 spin_unlock(&ni->lock);
432 439
433 log_print("got new/restarted association %d nodeid %d", 440 log_print("got new/restarted association %d nodeid %d",
434 (int)sn->sn_assoc_change.sac_assoc_id, nodeid); 441 (int)sn->sn_assoc_change.sac_assoc_id, nodeid);
435 442
436 /* Send any pending writes */ 443 /* Send any pending writes */
437 clear_bit(NI_INIT_PENDING, &ni->flags); 444 clear_bit(NI_INIT_PENDING, &ni->flags);
@@ -507,13 +514,12 @@ static int receive_from_sock(void)
507 sctp_con.rx_page = alloc_page(GFP_ATOMIC); 514 sctp_con.rx_page = alloc_page(GFP_ATOMIC);
508 if (sctp_con.rx_page == NULL) 515 if (sctp_con.rx_page == NULL)
509 goto out_resched; 516 goto out_resched;
510 CBUF_INIT(&sctp_con.cb, PAGE_CACHE_SIZE); 517 cbuf_init(&sctp_con.cb, PAGE_CACHE_SIZE);
511 } 518 }
512 519
513 memset(&incmsg, 0, sizeof(incmsg)); 520 memset(&incmsg, 0, sizeof(incmsg));
514 memset(&msgname, 0, sizeof(msgname)); 521 memset(&msgname, 0, sizeof(msgname));
515 522
516 memset(incmsg, 0, sizeof(incmsg));
517 msg.msg_name = &msgname; 523 msg.msg_name = &msgname;
518 msg.msg_namelen = sizeof(msgname); 524 msg.msg_namelen = sizeof(msgname);
519 msg.msg_flags = 0; 525 msg.msg_flags = 0;
@@ -532,17 +538,17 @@ static int receive_from_sock(void)
532 * iov[0] is the bit of the circular buffer between the current end 538 * iov[0] is the bit of the circular buffer between the current end
533 * point (cb.base + cb.len) and the end of the buffer. 539 * point (cb.base + cb.len) and the end of the buffer.
534 */ 540 */
535 iov[0].iov_len = sctp_con.cb.base - CBUF_DATA(&sctp_con.cb); 541 iov[0].iov_len = sctp_con.cb.base - cbuf_data(&sctp_con.cb);
536 iov[0].iov_base = page_address(sctp_con.rx_page) + 542 iov[0].iov_base = page_address(sctp_con.rx_page) +
537 CBUF_DATA(&sctp_con.cb); 543 cbuf_data(&sctp_con.cb);
538 iov[1].iov_len = 0; 544 iov[1].iov_len = 0;
539 545
540 /* 546 /*
541 * iov[1] is the bit of the circular buffer between the start of the 547 * iov[1] is the bit of the circular buffer between the start of the
542 * buffer and the start of the currently used section (cb.base) 548 * buffer and the start of the currently used section (cb.base)
543 */ 549 */
544 if (CBUF_DATA(&sctp_con.cb) >= sctp_con.cb.base) { 550 if (cbuf_data(&sctp_con.cb) >= sctp_con.cb.base) {
545 iov[0].iov_len = PAGE_CACHE_SIZE - CBUF_DATA(&sctp_con.cb); 551 iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&sctp_con.cb);
546 iov[1].iov_len = sctp_con.cb.base; 552 iov[1].iov_len = sctp_con.cb.base;
547 iov[1].iov_base = page_address(sctp_con.rx_page); 553 iov[1].iov_base = page_address(sctp_con.rx_page);
548 msg.msg_iovlen = 2; 554 msg.msg_iovlen = 2;
@@ -557,7 +563,7 @@ static int receive_from_sock(void)
557 msg.msg_control = incmsg; 563 msg.msg_control = incmsg;
558 msg.msg_controllen = sizeof(incmsg); 564 msg.msg_controllen = sizeof(incmsg);
559 cmsg = CMSG_FIRSTHDR(&msg); 565 cmsg = CMSG_FIRSTHDR(&msg);
560 sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg); 566 sinfo = CMSG_DATA(cmsg);
561 567
562 if (msg.msg_flags & MSG_NOTIFICATION) { 568 if (msg.msg_flags & MSG_NOTIFICATION) {
563 process_sctp_notification(&msg, page_address(sctp_con.rx_page)); 569 process_sctp_notification(&msg, page_address(sctp_con.rx_page));
@@ -583,29 +589,29 @@ static int receive_from_sock(void)
583 if (r == 1) 589 if (r == 1)
584 return 0; 590 return 0;
585 591
586 CBUF_ADD(&sctp_con.cb, ret); 592 cbuf_add(&sctp_con.cb, ret);
587 ret = dlm_process_incoming_buffer(cpu_to_le32(sinfo->sinfo_ppid), 593 ret = dlm_process_incoming_buffer(cpu_to_le32(sinfo->sinfo_ppid),
588 page_address(sctp_con.rx_page), 594 page_address(sctp_con.rx_page),
589 sctp_con.cb.base, sctp_con.cb.len, 595 sctp_con.cb.base, sctp_con.cb.len,
590 PAGE_CACHE_SIZE); 596 PAGE_CACHE_SIZE);
591 if (ret < 0) 597 if (ret < 0)
592 goto out_close; 598 goto out_close;
593 CBUF_EAT(&sctp_con.cb, ret); 599 cbuf_eat(&sctp_con.cb, ret);
594 600
595 out: 601out:
596 ret = 0; 602 ret = 0;
597 goto out_ret; 603 goto out_ret;
598 604
599 out_resched: 605out_resched:
600 lowcomms_data_ready(sctp_con.sock->sk, 0); 606 lowcomms_data_ready(sctp_con.sock->sk, 0);
601 ret = 0; 607 ret = 0;
602 schedule(); 608 cond_resched();
603 goto out_ret; 609 goto out_ret;
604 610
605 out_close: 611out_close:
606 if (ret != -EAGAIN) 612 if (ret != -EAGAIN)
607 log_print("error reading from sctp socket: %d", ret); 613 log_print("error reading from sctp socket: %d", ret);
608 out_ret: 614out_ret:
609 return ret; 615 return ret;
610} 616}
611 617
@@ -619,10 +625,12 @@ static int add_bind_addr(struct sockaddr_storage *addr, int addr_len, int num)
619 set_fs(get_ds()); 625 set_fs(get_ds());
620 if (num == 1) 626 if (num == 1)
621 result = sctp_con.sock->ops->bind(sctp_con.sock, 627 result = sctp_con.sock->ops->bind(sctp_con.sock,
622 (struct sockaddr *) addr, addr_len); 628 (struct sockaddr *) addr,
629 addr_len);
623 else 630 else
624 result = sctp_con.sock->ops->setsockopt(sctp_con.sock, SOL_SCTP, 631 result = sctp_con.sock->ops->setsockopt(sctp_con.sock, SOL_SCTP,
625 SCTP_SOCKOPT_BINDX_ADD, (char *)addr, addr_len); 632 SCTP_SOCKOPT_BINDX_ADD,
633 (char *)addr, addr_len);
626 set_fs(fs); 634 set_fs(fs);
627 635
628 if (result < 0) 636 if (result < 0)
@@ -719,10 +727,10 @@ static int init_sock(void)
719 727
720 return 0; 728 return 0;
721 729
722 create_delsock: 730create_delsock:
723 sock_release(sock); 731 sock_release(sock);
724 sctp_con.sock = NULL; 732 sctp_con.sock = NULL;
725 out: 733out:
726 return result; 734 return result;
727} 735}
728 736
@@ -756,16 +764,13 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
756 int users = 0; 764 int users = 0;
757 struct nodeinfo *ni; 765 struct nodeinfo *ni;
758 766
759 if (!atomic_read(&accepting))
760 return NULL;
761
762 ni = nodeid2nodeinfo(nodeid, allocation); 767 ni = nodeid2nodeinfo(nodeid, allocation);
763 if (!ni) 768 if (!ni)
764 return NULL; 769 return NULL;
765 770
766 spin_lock(&ni->writequeue_lock); 771 spin_lock(&ni->writequeue_lock);
767 e = list_entry(ni->writequeue.prev, struct writequeue_entry, list); 772 e = list_entry(ni->writequeue.prev, struct writequeue_entry, list);
768 if (((struct list_head *) e == &ni->writequeue) || 773 if ((&e->list == &ni->writequeue) ||
769 (PAGE_CACHE_SIZE - e->end < len)) { 774 (PAGE_CACHE_SIZE - e->end < len)) {
770 e = NULL; 775 e = NULL;
771 } else { 776 } else {
@@ -776,7 +781,7 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
776 spin_unlock(&ni->writequeue_lock); 781 spin_unlock(&ni->writequeue_lock);
777 782
778 if (e) { 783 if (e) {
779 got_one: 784 got_one:
780 if (users == 0) 785 if (users == 0)
781 kmap(e->page); 786 kmap(e->page);
782 *ppc = page_address(e->page) + offset; 787 *ppc = page_address(e->page) + offset;
@@ -803,9 +808,6 @@ void dlm_lowcomms_commit_buffer(void *arg)
803 int users; 808 int users;
804 struct nodeinfo *ni = e->ni; 809 struct nodeinfo *ni = e->ni;
805 810
806 if (!atomic_read(&accepting))
807 return;
808
809 spin_lock(&ni->writequeue_lock); 811 spin_lock(&ni->writequeue_lock);
810 users = --e->users; 812 users = --e->users;
811 if (users) 813 if (users)
@@ -822,7 +824,7 @@ void dlm_lowcomms_commit_buffer(void *arg)
822 } 824 }
823 return; 825 return;
824 826
825 out: 827out:
826 spin_unlock(&ni->writequeue_lock); 828 spin_unlock(&ni->writequeue_lock);
827 return; 829 return;
828} 830}
@@ -878,7 +880,7 @@ static void initiate_association(int nodeid)
878 cmsg->cmsg_level = IPPROTO_SCTP; 880 cmsg->cmsg_level = IPPROTO_SCTP;
879 cmsg->cmsg_type = SCTP_SNDRCV; 881 cmsg->cmsg_type = SCTP_SNDRCV;
880 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 882 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
881 sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg); 883 sinfo = CMSG_DATA(cmsg);
882 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo)); 884 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
883 sinfo->sinfo_ppid = cpu_to_le32(dlm_local_nodeid); 885 sinfo->sinfo_ppid = cpu_to_le32(dlm_local_nodeid);
884 886
@@ -892,7 +894,7 @@ static void initiate_association(int nodeid)
892} 894}
893 895
894/* Send a message */ 896/* Send a message */
895static int send_to_sock(struct nodeinfo *ni) 897static void send_to_sock(struct nodeinfo *ni)
896{ 898{
897 int ret = 0; 899 int ret = 0;
898 struct writequeue_entry *e; 900 struct writequeue_entry *e;
@@ -903,13 +905,13 @@ static int send_to_sock(struct nodeinfo *ni)
903 struct sctp_sndrcvinfo *sinfo; 905 struct sctp_sndrcvinfo *sinfo;
904 struct kvec iov; 906 struct kvec iov;
905 907
906 /* See if we need to init an association before we start 908 /* See if we need to init an association before we start
907 sending precious messages */ 909 sending precious messages */
908 spin_lock(&ni->lock); 910 spin_lock(&ni->lock);
909 if (!ni->assoc_id && !test_and_set_bit(NI_INIT_PENDING, &ni->flags)) { 911 if (!ni->assoc_id && !test_and_set_bit(NI_INIT_PENDING, &ni->flags)) {
910 spin_unlock(&ni->lock); 912 spin_unlock(&ni->lock);
911 initiate_association(ni->nodeid); 913 initiate_association(ni->nodeid);
912 return 0; 914 return;
913 } 915 }
914 spin_unlock(&ni->lock); 916 spin_unlock(&ni->lock);
915 917
@@ -923,7 +925,7 @@ static int send_to_sock(struct nodeinfo *ni)
923 cmsg->cmsg_level = IPPROTO_SCTP; 925 cmsg->cmsg_level = IPPROTO_SCTP;
924 cmsg->cmsg_type = SCTP_SNDRCV; 926 cmsg->cmsg_type = SCTP_SNDRCV;
925 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 927 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
926 sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg); 928 sinfo = CMSG_DATA(cmsg);
927 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo)); 929 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
928 sinfo->sinfo_ppid = cpu_to_le32(dlm_local_nodeid); 930 sinfo->sinfo_ppid = cpu_to_le32(dlm_local_nodeid);
929 sinfo->sinfo_assoc_id = ni->assoc_id; 931 sinfo->sinfo_assoc_id = ni->assoc_id;
@@ -955,7 +957,7 @@ static int send_to_sock(struct nodeinfo *ni)
955 goto send_error; 957 goto send_error;
956 } else { 958 } else {
957 /* Don't starve people filling buffers */ 959 /* Don't starve people filling buffers */
958 schedule(); 960 cond_resched();
959 } 961 }
960 962
961 spin_lock(&ni->writequeue_lock); 963 spin_lock(&ni->writequeue_lock);
@@ -964,15 +966,16 @@ static int send_to_sock(struct nodeinfo *ni)
964 966
965 if (e->len == 0 && e->users == 0) { 967 if (e->len == 0 && e->users == 0) {
966 list_del(&e->list); 968 list_del(&e->list);
969 kunmap(e->page);
967 free_entry(e); 970 free_entry(e);
968 continue; 971 continue;
969 } 972 }
970 } 973 }
971 spin_unlock(&ni->writequeue_lock); 974 spin_unlock(&ni->writequeue_lock);
972 out: 975out:
973 return ret; 976 return;
974 977
975 send_error: 978send_error:
976 log_print("Error sending to node %d %d", ni->nodeid, ret); 979 log_print("Error sending to node %d %d", ni->nodeid, ret);
977 spin_lock(&ni->lock); 980 spin_lock(&ni->lock);
978 if (!test_and_set_bit(NI_INIT_PENDING, &ni->flags)) { 981 if (!test_and_set_bit(NI_INIT_PENDING, &ni->flags)) {
@@ -982,7 +985,7 @@ static int send_to_sock(struct nodeinfo *ni)
982 } else 985 } else
983 spin_unlock(&ni->lock); 986 spin_unlock(&ni->lock);
984 987
985 return ret; 988 return;
986} 989}
987 990
988/* Try to send any messages that are pending */ 991/* Try to send any messages that are pending */
@@ -994,7 +997,7 @@ static void process_output_queue(void)
994 spin_lock_bh(&write_nodes_lock); 997 spin_lock_bh(&write_nodes_lock);
995 list_for_each_safe(list, temp, &write_nodes) { 998 list_for_each_safe(list, temp, &write_nodes) {
996 struct nodeinfo *ni = 999 struct nodeinfo *ni =
997 list_entry(list, struct nodeinfo, write_list); 1000 list_entry(list, struct nodeinfo, write_list);
998 clear_bit(NI_WRITE_PENDING, &ni->flags); 1001 clear_bit(NI_WRITE_PENDING, &ni->flags);
999 list_del(&ni->write_list); 1002 list_del(&ni->write_list);
1000 1003
@@ -1106,7 +1109,7 @@ static int dlm_recvd(void *data)
1106 set_current_state(TASK_INTERRUPTIBLE); 1109 set_current_state(TASK_INTERRUPTIBLE);
1107 add_wait_queue(&lowcomms_recv_wait, &wait); 1110 add_wait_queue(&lowcomms_recv_wait, &wait);
1108 if (!test_bit(CF_READ_PENDING, &sctp_con.flags)) 1111 if (!test_bit(CF_READ_PENDING, &sctp_con.flags))
1109 schedule(); 1112 cond_resched();
1110 remove_wait_queue(&lowcomms_recv_wait, &wait); 1113 remove_wait_queue(&lowcomms_recv_wait, &wait);
1111 set_current_state(TASK_RUNNING); 1114 set_current_state(TASK_RUNNING);
1112 1115
@@ -1118,12 +1121,12 @@ static int dlm_recvd(void *data)
1118 1121
1119 /* Don't starve out everyone else */ 1122 /* Don't starve out everyone else */
1120 if (++count >= MAX_RX_MSG_COUNT) { 1123 if (++count >= MAX_RX_MSG_COUNT) {
1121 schedule(); 1124 cond_resched();
1122 count = 0; 1125 count = 0;
1123 } 1126 }
1124 } while (!kthread_should_stop() && ret >=0); 1127 } while (!kthread_should_stop() && ret >=0);
1125 } 1128 }
1126 schedule(); 1129 cond_resched();
1127 } 1130 }
1128 1131
1129 return 0; 1132 return 0;
@@ -1138,7 +1141,7 @@ static int dlm_sendd(void *data)
1138 while (!kthread_should_stop()) { 1141 while (!kthread_should_stop()) {
1139 set_current_state(TASK_INTERRUPTIBLE); 1142 set_current_state(TASK_INTERRUPTIBLE);
1140 if (write_list_empty()) 1143 if (write_list_empty())
1141 schedule(); 1144 cond_resched();
1142 set_current_state(TASK_RUNNING); 1145 set_current_state(TASK_RUNNING);
1143 1146
1144 if (sctp_con.eagain_flag) { 1147 if (sctp_con.eagain_flag) {
@@ -1166,7 +1169,7 @@ static int daemons_start(void)
1166 1169
1167 p = kthread_run(dlm_recvd, NULL, "dlm_recvd"); 1170 p = kthread_run(dlm_recvd, NULL, "dlm_recvd");
1168 error = IS_ERR(p); 1171 error = IS_ERR(p);
1169 if (error) { 1172 if (error) {
1170 log_print("can't start dlm_recvd %d", error); 1173 log_print("can't start dlm_recvd %d", error);
1171 return error; 1174 return error;
1172 } 1175 }
@@ -1174,7 +1177,7 @@ static int daemons_start(void)
1174 1177
1175 p = kthread_run(dlm_sendd, NULL, "dlm_sendd"); 1178 p = kthread_run(dlm_sendd, NULL, "dlm_sendd");
1176 error = IS_ERR(p); 1179 error = IS_ERR(p);
1177 if (error) { 1180 if (error) {
1178 log_print("can't start dlm_sendd %d", error); 1181 log_print("can't start dlm_sendd %d", error);
1179 kthread_stop(recv_task); 1182 kthread_stop(recv_task);
1180 return error; 1183 return error;
@@ -1197,43 +1200,28 @@ int dlm_lowcomms_start(void)
1197 error = daemons_start(); 1200 error = daemons_start();
1198 if (error) 1201 if (error)
1199 goto fail_sock; 1202 goto fail_sock;
1200 atomic_set(&accepting, 1);
1201 return 0; 1203 return 0;
1202 1204
1203 fail_sock: 1205fail_sock:
1204 close_connection(); 1206 close_connection();
1205 return error; 1207 return error;
1206} 1208}
1207 1209
1208/* Set all the activity flags to prevent any socket activity. */
1209
1210void dlm_lowcomms_stop(void) 1210void dlm_lowcomms_stop(void)
1211{ 1211{
1212 atomic_set(&accepting, 0); 1212 int i;
1213
1213 sctp_con.flags = 0x7; 1214 sctp_con.flags = 0x7;
1214 daemons_stop(); 1215 daemons_stop();
1215 clean_writequeues(); 1216 clean_writequeues();
1216 close_connection(); 1217 close_connection();
1217 dealloc_nodeinfo(); 1218 dealloc_nodeinfo();
1218 max_nodeid = 0; 1219 max_nodeid = 0;
1219}
1220 1220
1221int dlm_lowcomms_init(void) 1221 dlm_local_count = 0;
1222{ 1222 dlm_local_nodeid = 0;
1223 init_waitqueue_head(&lowcomms_recv_wait);
1224 spin_lock_init(&write_nodes_lock);
1225 INIT_LIST_HEAD(&write_nodes);
1226 init_rwsem(&nodeinfo_lock);
1227 return 0;
1228}
1229
1230void dlm_lowcomms_exit(void)
1231{
1232 int i;
1233 1223
1234 for (i = 0; i < dlm_local_count; i++) 1224 for (i = 0; i < dlm_local_count; i++)
1235 kfree(dlm_local_addr[i]); 1225 kfree(dlm_local_addr[i]);
1236 dlm_local_count = 0;
1237 dlm_local_nodeid = 0;
1238} 1226}
1239 1227
diff --git a/fs/dlm/lowcomms-tcp.c b/fs/dlm/lowcomms-tcp.c
new file mode 100644
index 000000000000..8f2791fc8447
--- /dev/null
+++ b/fs/dlm/lowcomms-tcp.c
@@ -0,0 +1,1189 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14/*
15 * lowcomms.c
16 *
17 * This is the "low-level" comms layer.
18 *
19 * It is responsible for sending/receiving messages
20 * from other nodes in the cluster.
21 *
22 * Cluster nodes are referred to by their nodeids. nodeids are
23 * simply 32 bit numbers to the locking module - if they need to
24 * be expanded for the cluster infrastructure then that is it's
25 * responsibility. It is this layer's
26 * responsibility to resolve these into IP address or
27 * whatever it needs for inter-node communication.
28 *
29 * The comms level is two kernel threads that deal mainly with
30 * the receiving of messages from other nodes and passing them
31 * up to the mid-level comms layer (which understands the
32 * message format) for execution by the locking core, and
33 * a send thread which does all the setting up of connections
34 * to remote nodes and the sending of data. Threads are not allowed
35 * to send their own data because it may cause them to wait in times
36 * of high load. Also, this way, the sending thread can collect together
37 * messages bound for one node and send them in one block.
38 *
39 * I don't see any problem with the recv thread executing the locking
40 * code on behalf of remote processes as the locking code is
41 * short, efficient and never waits.
42 *
43 */
44
45
46#include <asm/ioctls.h>
47#include <net/sock.h>
48#include <net/tcp.h>
49#include <linux/pagemap.h>
50
51#include "dlm_internal.h"
52#include "lowcomms.h"
53#include "midcomms.h"
54#include "config.h"
55
56struct cbuf {
57 unsigned int base;
58 unsigned int len;
59 unsigned int mask;
60};
61
62#define NODE_INCREMENT 32
63static void cbuf_add(struct cbuf *cb, int n)
64{
65 cb->len += n;
66}
67
68static int cbuf_data(struct cbuf *cb)
69{
70 return ((cb->base + cb->len) & cb->mask);
71}
72
73static void cbuf_init(struct cbuf *cb, int size)
74{
75 cb->base = cb->len = 0;
76 cb->mask = size-1;
77}
78
79static void cbuf_eat(struct cbuf *cb, int n)
80{
81 cb->len -= n;
82 cb->base += n;
83 cb->base &= cb->mask;
84}
85
86static bool cbuf_empty(struct cbuf *cb)
87{
88 return cb->len == 0;
89}
90
91/* Maximum number of incoming messages to process before
92 doing a cond_resched()
93*/
94#define MAX_RX_MSG_COUNT 25
95
96struct connection {
97 struct socket *sock; /* NULL if not connected */
98 uint32_t nodeid; /* So we know who we are in the list */
99 struct rw_semaphore sock_sem; /* Stop connect races */
100 struct list_head read_list; /* On this list when ready for reading */
101 struct list_head write_list; /* On this list when ready for writing */
102 struct list_head state_list; /* On this list when ready to connect */
103 unsigned long flags; /* bit 1,2 = We are on the read/write lists */
104#define CF_READ_PENDING 1
105#define CF_WRITE_PENDING 2
106#define CF_CONNECT_PENDING 3
107#define CF_IS_OTHERCON 4
108 struct list_head writequeue; /* List of outgoing writequeue_entries */
109 struct list_head listenlist; /* List of allocated listening sockets */
110 spinlock_t writequeue_lock;
111 int (*rx_action) (struct connection *); /* What to do when active */
112 struct page *rx_page;
113 struct cbuf cb;
114 int retries;
115 atomic_t waiting_requests;
116#define MAX_CONNECT_RETRIES 3
117 struct connection *othercon;
118};
119#define sock2con(x) ((struct connection *)(x)->sk_user_data)
120
121/* An entry waiting to be sent */
122struct writequeue_entry {
123 struct list_head list;
124 struct page *page;
125 int offset;
126 int len;
127 int end;
128 int users;
129 struct connection *con;
130};
131
132static struct sockaddr_storage dlm_local_addr;
133
134/* Manage daemons */
135static struct task_struct *recv_task;
136static struct task_struct *send_task;
137
138static wait_queue_t lowcomms_send_waitq_head;
139static DECLARE_WAIT_QUEUE_HEAD(lowcomms_send_waitq);
140static wait_queue_t lowcomms_recv_waitq_head;
141static DECLARE_WAIT_QUEUE_HEAD(lowcomms_recv_waitq);
142
143/* An array of pointers to connections, indexed by NODEID */
144static struct connection **connections;
145static DECLARE_MUTEX(connections_lock);
146static kmem_cache_t *con_cache;
147static int conn_array_size;
148
149/* List of sockets that have reads pending */
150static LIST_HEAD(read_sockets);
151static DEFINE_SPINLOCK(read_sockets_lock);
152
153/* List of sockets which have writes pending */
154static LIST_HEAD(write_sockets);
155static DEFINE_SPINLOCK(write_sockets_lock);
156
157/* List of sockets which have connects pending */
158static LIST_HEAD(state_sockets);
159static DEFINE_SPINLOCK(state_sockets_lock);
160
161static struct connection *nodeid2con(int nodeid, gfp_t allocation)
162{
163 struct connection *con = NULL;
164
165 down(&connections_lock);
166 if (nodeid >= conn_array_size) {
167 int new_size = nodeid + NODE_INCREMENT;
168 struct connection **new_conns;
169
170 new_conns = kzalloc(sizeof(struct connection *) *
171 new_size, allocation);
172 if (!new_conns)
173 goto finish;
174
175 memcpy(new_conns, connections, sizeof(struct connection *) * conn_array_size);
176 conn_array_size = new_size;
177 kfree(connections);
178 connections = new_conns;
179
180 }
181
182 con = connections[nodeid];
183 if (con == NULL && allocation) {
184 con = kmem_cache_zalloc(con_cache, allocation);
185 if (!con)
186 goto finish;
187
188 con->nodeid = nodeid;
189 init_rwsem(&con->sock_sem);
190 INIT_LIST_HEAD(&con->writequeue);
191 spin_lock_init(&con->writequeue_lock);
192
193 connections[nodeid] = con;
194 }
195
196finish:
197 up(&connections_lock);
198 return con;
199}
200
201/* Data available on socket or listen socket received a connect */
202static void lowcomms_data_ready(struct sock *sk, int count_unused)
203{
204 struct connection *con = sock2con(sk);
205
206 atomic_inc(&con->waiting_requests);
207 if (test_and_set_bit(CF_READ_PENDING, &con->flags))
208 return;
209
210 spin_lock_bh(&read_sockets_lock);
211 list_add_tail(&con->read_list, &read_sockets);
212 spin_unlock_bh(&read_sockets_lock);
213
214 wake_up_interruptible(&lowcomms_recv_waitq);
215}
216
217static void lowcomms_write_space(struct sock *sk)
218{
219 struct connection *con = sock2con(sk);
220
221 if (test_and_set_bit(CF_WRITE_PENDING, &con->flags))
222 return;
223
224 spin_lock_bh(&write_sockets_lock);
225 list_add_tail(&con->write_list, &write_sockets);
226 spin_unlock_bh(&write_sockets_lock);
227
228 wake_up_interruptible(&lowcomms_send_waitq);
229}
230
231static inline void lowcomms_connect_sock(struct connection *con)
232{
233 if (test_and_set_bit(CF_CONNECT_PENDING, &con->flags))
234 return;
235
236 spin_lock_bh(&state_sockets_lock);
237 list_add_tail(&con->state_list, &state_sockets);
238 spin_unlock_bh(&state_sockets_lock);
239
240 wake_up_interruptible(&lowcomms_send_waitq);
241}
242
243static void lowcomms_state_change(struct sock *sk)
244{
245 if (sk->sk_state == TCP_ESTABLISHED)
246 lowcomms_write_space(sk);
247}
248
249/* Make a socket active */
250static int add_sock(struct socket *sock, struct connection *con)
251{
252 con->sock = sock;
253
254 /* Install a data_ready callback */
255 con->sock->sk->sk_data_ready = lowcomms_data_ready;
256 con->sock->sk->sk_write_space = lowcomms_write_space;
257 con->sock->sk->sk_state_change = lowcomms_state_change;
258
259 return 0;
260}
261
262/* Add the port number to an IP6 or 4 sockaddr and return the address
263 length */
264static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
265 int *addr_len)
266{
267 saddr->ss_family = dlm_local_addr.ss_family;
268 if (saddr->ss_family == AF_INET) {
269 struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
270 in4_addr->sin_port = cpu_to_be16(port);
271 *addr_len = sizeof(struct sockaddr_in);
272 } else {
273 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
274 in6_addr->sin6_port = cpu_to_be16(port);
275 *addr_len = sizeof(struct sockaddr_in6);
276 }
277}
278
279/* Close a remote connection and tidy up */
280static void close_connection(struct connection *con, bool and_other)
281{
282 down_write(&con->sock_sem);
283
284 if (con->sock) {
285 sock_release(con->sock);
286 con->sock = NULL;
287 }
288 if (con->othercon && and_other) {
289 /* Will only re-enter once. */
290 close_connection(con->othercon, false);
291 }
292 if (con->rx_page) {
293 __free_page(con->rx_page);
294 con->rx_page = NULL;
295 }
296 con->retries = 0;
297 up_write(&con->sock_sem);
298}
299
300/* Data received from remote end */
301static int receive_from_sock(struct connection *con)
302{
303 int ret = 0;
304 struct msghdr msg;
305 struct iovec iov[2];
306 mm_segment_t fs;
307 unsigned len;
308 int r;
309 int call_again_soon = 0;
310
311 down_read(&con->sock_sem);
312
313 if (con->sock == NULL)
314 goto out;
315 if (con->rx_page == NULL) {
316 /*
317 * This doesn't need to be atomic, but I think it should
318 * improve performance if it is.
319 */
320 con->rx_page = alloc_page(GFP_ATOMIC);
321 if (con->rx_page == NULL)
322 goto out_resched;
323 cbuf_init(&con->cb, PAGE_CACHE_SIZE);
324 }
325
326 msg.msg_control = NULL;
327 msg.msg_controllen = 0;
328 msg.msg_iovlen = 1;
329 msg.msg_iov = iov;
330 msg.msg_name = NULL;
331 msg.msg_namelen = 0;
332 msg.msg_flags = 0;
333
334 /*
335 * iov[0] is the bit of the circular buffer between the current end
336 * point (cb.base + cb.len) and the end of the buffer.
337 */
338 iov[0].iov_len = con->cb.base - cbuf_data(&con->cb);
339 iov[0].iov_base = page_address(con->rx_page) + cbuf_data(&con->cb);
340 iov[1].iov_len = 0;
341
342 /*
343 * iov[1] is the bit of the circular buffer between the start of the
344 * buffer and the start of the currently used section (cb.base)
345 */
346 if (cbuf_data(&con->cb) >= con->cb.base) {
347 iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&con->cb);
348 iov[1].iov_len = con->cb.base;
349 iov[1].iov_base = page_address(con->rx_page);
350 msg.msg_iovlen = 2;
351 }
352 len = iov[0].iov_len + iov[1].iov_len;
353
354 fs = get_fs();
355 set_fs(get_ds());
356 r = ret = sock_recvmsg(con->sock, &msg, len,
357 MSG_DONTWAIT | MSG_NOSIGNAL);
358 set_fs(fs);
359
360 if (ret <= 0)
361 goto out_close;
362 if (ret == len)
363 call_again_soon = 1;
364 cbuf_add(&con->cb, ret);
365 ret = dlm_process_incoming_buffer(con->nodeid,
366 page_address(con->rx_page),
367 con->cb.base, con->cb.len,
368 PAGE_CACHE_SIZE);
369 if (ret == -EBADMSG) {
370 printk(KERN_INFO "dlm: lowcomms: addr=%p, base=%u, len=%u, "
371 "iov_len=%u, iov_base[0]=%p, read=%d\n",
372 page_address(con->rx_page), con->cb.base, con->cb.len,
373 len, iov[0].iov_base, r);
374 }
375 if (ret < 0)
376 goto out_close;
377 cbuf_eat(&con->cb, ret);
378
379 if (cbuf_empty(&con->cb) && !call_again_soon) {
380 __free_page(con->rx_page);
381 con->rx_page = NULL;
382 }
383
384out:
385 if (call_again_soon)
386 goto out_resched;
387 up_read(&con->sock_sem);
388 return 0;
389
390out_resched:
391 lowcomms_data_ready(con->sock->sk, 0);
392 up_read(&con->sock_sem);
393 cond_resched();
394 return 0;
395
396out_close:
397 up_read(&con->sock_sem);
398 if (ret != -EAGAIN && !test_bit(CF_IS_OTHERCON, &con->flags)) {
399 close_connection(con, false);
400 /* Reconnect when there is something to send */
401 }
402
403 return ret;
404}
405
406/* Listening socket is busy, accept a connection */
407static int accept_from_sock(struct connection *con)
408{
409 int result;
410 struct sockaddr_storage peeraddr;
411 struct socket *newsock;
412 int len;
413 int nodeid;
414 struct connection *newcon;
415
416 memset(&peeraddr, 0, sizeof(peeraddr));
417 result = sock_create_kern(dlm_local_addr.ss_family, SOCK_STREAM,
418 IPPROTO_TCP, &newsock);
419 if (result < 0)
420 return -ENOMEM;
421
422 down_read(&con->sock_sem);
423
424 result = -ENOTCONN;
425 if (con->sock == NULL)
426 goto accept_err;
427
428 newsock->type = con->sock->type;
429 newsock->ops = con->sock->ops;
430
431 result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK);
432 if (result < 0)
433 goto accept_err;
434
435 /* Get the connected socket's peer */
436 memset(&peeraddr, 0, sizeof(peeraddr));
437 if (newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr,
438 &len, 2)) {
439 result = -ECONNABORTED;
440 goto accept_err;
441 }
442
443 /* Get the new node's NODEID */
444 make_sockaddr(&peeraddr, 0, &len);
445 if (dlm_addr_to_nodeid(&peeraddr, &nodeid)) {
446 printk("dlm: connect from non cluster node\n");
447 sock_release(newsock);
448 up_read(&con->sock_sem);
449 return -1;
450 }
451
452 log_print("got connection from %d", nodeid);
453
454 /* Check to see if we already have a connection to this node. This
455 * could happen if the two nodes initiate a connection at roughly
456 * the same time and the connections cross on the wire.
457 * TEMPORARY FIX:
458 * In this case we store the incoming one in "othercon"
459 */
460 newcon = nodeid2con(nodeid, GFP_KERNEL);
461 if (!newcon) {
462 result = -ENOMEM;
463 goto accept_err;
464 }
465 down_write(&newcon->sock_sem);
466 if (newcon->sock) {
467 struct connection *othercon = newcon->othercon;
468
469 if (!othercon) {
470 othercon = kmem_cache_zalloc(con_cache, GFP_KERNEL);
471 if (!othercon) {
472 printk("dlm: failed to allocate incoming socket\n");
473 up_write(&newcon->sock_sem);
474 result = -ENOMEM;
475 goto accept_err;
476 }
477 othercon->nodeid = nodeid;
478 othercon->rx_action = receive_from_sock;
479 init_rwsem(&othercon->sock_sem);
480 set_bit(CF_IS_OTHERCON, &othercon->flags);
481 newcon->othercon = othercon;
482 }
483 othercon->sock = newsock;
484 newsock->sk->sk_user_data = othercon;
485 add_sock(newsock, othercon);
486 }
487 else {
488 newsock->sk->sk_user_data = newcon;
489 newcon->rx_action = receive_from_sock;
490 add_sock(newsock, newcon);
491
492 }
493
494 up_write(&newcon->sock_sem);
495
496 /*
497 * Add it to the active queue in case we got data
498 * beween processing the accept adding the socket
499 * to the read_sockets list
500 */
501 lowcomms_data_ready(newsock->sk, 0);
502 up_read(&con->sock_sem);
503
504 return 0;
505
506accept_err:
507 up_read(&con->sock_sem);
508 sock_release(newsock);
509
510 if (result != -EAGAIN)
511 printk("dlm: error accepting connection from node: %d\n", result);
512 return result;
513}
514
515/* Connect a new socket to its peer */
516static void connect_to_sock(struct connection *con)
517{
518 int result = -EHOSTUNREACH;
519 struct sockaddr_storage saddr;
520 int addr_len;
521 struct socket *sock;
522
523 if (con->nodeid == 0) {
524 log_print("attempt to connect sock 0 foiled");
525 return;
526 }
527
528 down_write(&con->sock_sem);
529 if (con->retries++ > MAX_CONNECT_RETRIES)
530 goto out;
531
532 /* Some odd races can cause double-connects, ignore them */
533 if (con->sock) {
534 result = 0;
535 goto out;
536 }
537
538 /* Create a socket to communicate with */
539 result = sock_create_kern(dlm_local_addr.ss_family, SOCK_STREAM,
540 IPPROTO_TCP, &sock);
541 if (result < 0)
542 goto out_err;
543
544 memset(&saddr, 0, sizeof(saddr));
545 if (dlm_nodeid_to_addr(con->nodeid, &saddr))
546 goto out_err;
547
548 sock->sk->sk_user_data = con;
549 con->rx_action = receive_from_sock;
550
551 make_sockaddr(&saddr, dlm_config.tcp_port, &addr_len);
552
553 add_sock(sock, con);
554
555 log_print("connecting to %d", con->nodeid);
556 result =
557 sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len,
558 O_NONBLOCK);
559 if (result == -EINPROGRESS)
560 result = 0;
561 if (result == 0)
562 goto out;
563
564out_err:
565 if (con->sock) {
566 sock_release(con->sock);
567 con->sock = NULL;
568 }
569 /*
570 * Some errors are fatal and this list might need adjusting. For other
571 * errors we try again until the max number of retries is reached.
572 */
573 if (result != -EHOSTUNREACH && result != -ENETUNREACH &&
574 result != -ENETDOWN && result != EINVAL
575 && result != -EPROTONOSUPPORT) {
576 lowcomms_connect_sock(con);
577 result = 0;
578 }
579out:
580 up_write(&con->sock_sem);
581 return;
582}
583
584static struct socket *create_listen_sock(struct connection *con,
585 struct sockaddr_storage *saddr)
586{
587 struct socket *sock = NULL;
588 mm_segment_t fs;
589 int result = 0;
590 int one = 1;
591 int addr_len;
592
593 if (dlm_local_addr.ss_family == AF_INET)
594 addr_len = sizeof(struct sockaddr_in);
595 else
596 addr_len = sizeof(struct sockaddr_in6);
597
598 /* Create a socket to communicate with */
599 result = sock_create_kern(dlm_local_addr.ss_family, SOCK_STREAM, IPPROTO_TCP, &sock);
600 if (result < 0) {
601 printk("dlm: Can't create listening comms socket\n");
602 goto create_out;
603 }
604
605 fs = get_fs();
606 set_fs(get_ds());
607 result = sock_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
608 (char *)&one, sizeof(one));
609 set_fs(fs);
610 if (result < 0) {
611 printk("dlm: Failed to set SO_REUSEADDR on socket: result=%d\n",
612 result);
613 }
614 sock->sk->sk_user_data = con;
615 con->rx_action = accept_from_sock;
616 con->sock = sock;
617
618 /* Bind to our port */
619 make_sockaddr(saddr, dlm_config.tcp_port, &addr_len);
620 result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len);
621 if (result < 0) {
622 printk("dlm: Can't bind to port %d\n", dlm_config.tcp_port);
623 sock_release(sock);
624 sock = NULL;
625 con->sock = NULL;
626 goto create_out;
627 }
628
629 fs = get_fs();
630 set_fs(get_ds());
631
632 result = sock_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
633 (char *)&one, sizeof(one));
634 set_fs(fs);
635 if (result < 0) {
636 printk("dlm: Set keepalive failed: %d\n", result);
637 }
638
639 result = sock->ops->listen(sock, 5);
640 if (result < 0) {
641 printk("dlm: Can't listen on port %d\n", dlm_config.tcp_port);
642 sock_release(sock);
643 sock = NULL;
644 goto create_out;
645 }
646
647create_out:
648 return sock;
649}
650
651
652/* Listen on all interfaces */
653static int listen_for_all(void)
654{
655 struct socket *sock = NULL;
656 struct connection *con = nodeid2con(0, GFP_KERNEL);
657 int result = -EINVAL;
658
659 /* We don't support multi-homed hosts */
660 set_bit(CF_IS_OTHERCON, &con->flags);
661
662 sock = create_listen_sock(con, &dlm_local_addr);
663 if (sock) {
664 add_sock(sock, con);
665 result = 0;
666 }
667 else {
668 result = -EADDRINUSE;
669 }
670
671 return result;
672}
673
674
675
676static struct writequeue_entry *new_writequeue_entry(struct connection *con,
677 gfp_t allocation)
678{
679 struct writequeue_entry *entry;
680
681 entry = kmalloc(sizeof(struct writequeue_entry), allocation);
682 if (!entry)
683 return NULL;
684
685 entry->page = alloc_page(allocation);
686 if (!entry->page) {
687 kfree(entry);
688 return NULL;
689 }
690
691 entry->offset = 0;
692 entry->len = 0;
693 entry->end = 0;
694 entry->users = 0;
695 entry->con = con;
696
697 return entry;
698}
699
700void *dlm_lowcomms_get_buffer(int nodeid, int len,
701 gfp_t allocation, char **ppc)
702{
703 struct connection *con;
704 struct writequeue_entry *e;
705 int offset = 0;
706 int users = 0;
707
708 con = nodeid2con(nodeid, allocation);
709 if (!con)
710 return NULL;
711
712 e = list_entry(con->writequeue.prev, struct writequeue_entry, list);
713 if ((&e->list == &con->writequeue) ||
714 (PAGE_CACHE_SIZE - e->end < len)) {
715 e = NULL;
716 } else {
717 offset = e->end;
718 e->end += len;
719 users = e->users++;
720 }
721 spin_unlock(&con->writequeue_lock);
722
723 if (e) {
724 got_one:
725 if (users == 0)
726 kmap(e->page);
727 *ppc = page_address(e->page) + offset;
728 return e;
729 }
730
731 e = new_writequeue_entry(con, allocation);
732 if (e) {
733 spin_lock(&con->writequeue_lock);
734 offset = e->end;
735 e->end += len;
736 users = e->users++;
737 list_add_tail(&e->list, &con->writequeue);
738 spin_unlock(&con->writequeue_lock);
739 goto got_one;
740 }
741 return NULL;
742}
743
744void dlm_lowcomms_commit_buffer(void *mh)
745{
746 struct writequeue_entry *e = (struct writequeue_entry *)mh;
747 struct connection *con = e->con;
748 int users;
749
750 users = --e->users;
751 if (users)
752 goto out;
753 e->len = e->end - e->offset;
754 kunmap(e->page);
755 spin_unlock(&con->writequeue_lock);
756
757 if (test_and_set_bit(CF_WRITE_PENDING, &con->flags) == 0) {
758 spin_lock_bh(&write_sockets_lock);
759 list_add_tail(&con->write_list, &write_sockets);
760 spin_unlock_bh(&write_sockets_lock);
761
762 wake_up_interruptible(&lowcomms_send_waitq);
763 }
764 return;
765
766out:
767 spin_unlock(&con->writequeue_lock);
768 return;
769}
770
771static void free_entry(struct writequeue_entry *e)
772{
773 __free_page(e->page);
774 kfree(e);
775}
776
777/* Send a message */
778static void send_to_sock(struct connection *con)
779{
780 int ret = 0;
781 ssize_t(*sendpage) (struct socket *, struct page *, int, size_t, int);
782 const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
783 struct writequeue_entry *e;
784 int len, offset;
785
786 down_read(&con->sock_sem);
787 if (con->sock == NULL)
788 goto out_connect;
789
790 sendpage = con->sock->ops->sendpage;
791
792 spin_lock(&con->writequeue_lock);
793 for (;;) {
794 e = list_entry(con->writequeue.next, struct writequeue_entry,
795 list);
796 if ((struct list_head *) e == &con->writequeue)
797 break;
798
799 len = e->len;
800 offset = e->offset;
801 BUG_ON(len == 0 && e->users == 0);
802 spin_unlock(&con->writequeue_lock);
803
804 ret = 0;
805 if (len) {
806 ret = sendpage(con->sock, e->page, offset, len,
807 msg_flags);
808 if (ret == -EAGAIN || ret == 0)
809 goto out;
810 if (ret <= 0)
811 goto send_error;
812 }
813 else {
814 /* Don't starve people filling buffers */
815 cond_resched();
816 }
817
818 spin_lock(&con->writequeue_lock);
819 e->offset += ret;
820 e->len -= ret;
821
822 if (e->len == 0 && e->users == 0) {
823 list_del(&e->list);
824 kunmap(e->page);
825 free_entry(e);
826 continue;
827 }
828 }
829 spin_unlock(&con->writequeue_lock);
830out:
831 up_read(&con->sock_sem);
832 return;
833
834send_error:
835 up_read(&con->sock_sem);
836 close_connection(con, false);
837 lowcomms_connect_sock(con);
838 return;
839
840out_connect:
841 up_read(&con->sock_sem);
842 lowcomms_connect_sock(con);
843 return;
844}
845
846static void clean_one_writequeue(struct connection *con)
847{
848 struct list_head *list;
849 struct list_head *temp;
850
851 spin_lock(&con->writequeue_lock);
852 list_for_each_safe(list, temp, &con->writequeue) {
853 struct writequeue_entry *e =
854 list_entry(list, struct writequeue_entry, list);
855 list_del(&e->list);
856 free_entry(e);
857 }
858 spin_unlock(&con->writequeue_lock);
859}
860
861/* Called from recovery when it knows that a node has
862 left the cluster */
863int dlm_lowcomms_close(int nodeid)
864{
865 struct connection *con;
866
867 if (!connections)
868 goto out;
869
870 log_print("closing connection to node %d", nodeid);
871 con = nodeid2con(nodeid, 0);
872 if (con) {
873 clean_one_writequeue(con);
874 close_connection(con, true);
875 atomic_set(&con->waiting_requests, 0);
876 }
877 return 0;
878
879out:
880 return -1;
881}
882
883/* API send message call, may queue the request */
884/* N.B. This is the old interface - use the new one for new calls */
885int lowcomms_send_message(int nodeid, char *buf, int len, gfp_t allocation)
886{
887 struct writequeue_entry *e;
888 char *b;
889
890 e = dlm_lowcomms_get_buffer(nodeid, len, allocation, &b);
891 if (e) {
892 memcpy(b, buf, len);
893 dlm_lowcomms_commit_buffer(e);
894 return 0;
895 }
896 return -ENOBUFS;
897}
898
899/* Look for activity on active sockets */
900static void process_sockets(void)
901{
902 struct list_head *list;
903 struct list_head *temp;
904 int count = 0;
905
906 spin_lock_bh(&read_sockets_lock);
907 list_for_each_safe(list, temp, &read_sockets) {
908
909 struct connection *con =
910 list_entry(list, struct connection, read_list);
911 list_del(&con->read_list);
912 clear_bit(CF_READ_PENDING, &con->flags);
913
914 spin_unlock_bh(&read_sockets_lock);
915
916 /* This can reach zero if we are processing requests
917 * as they come in.
918 */
919 if (atomic_read(&con->waiting_requests) == 0) {
920 spin_lock_bh(&read_sockets_lock);
921 continue;
922 }
923
924 do {
925 con->rx_action(con);
926
927 /* Don't starve out everyone else */
928 if (++count >= MAX_RX_MSG_COUNT) {
929 cond_resched();
930 count = 0;
931 }
932
933 } while (!atomic_dec_and_test(&con->waiting_requests) &&
934 !kthread_should_stop());
935
936 spin_lock_bh(&read_sockets_lock);
937 }
938 spin_unlock_bh(&read_sockets_lock);
939}
940
941/* Try to send any messages that are pending
942 */
943static void process_output_queue(void)
944{
945 struct list_head *list;
946 struct list_head *temp;
947
948 spin_lock_bh(&write_sockets_lock);
949 list_for_each_safe(list, temp, &write_sockets) {
950 struct connection *con =
951 list_entry(list, struct connection, write_list);
952 clear_bit(CF_WRITE_PENDING, &con->flags);
953 list_del(&con->write_list);
954
955 spin_unlock_bh(&write_sockets_lock);
956 send_to_sock(con);
957 spin_lock_bh(&write_sockets_lock);
958 }
959 spin_unlock_bh(&write_sockets_lock);
960}
961
962static void process_state_queue(void)
963{
964 struct list_head *list;
965 struct list_head *temp;
966
967 spin_lock_bh(&state_sockets_lock);
968 list_for_each_safe(list, temp, &state_sockets) {
969 struct connection *con =
970 list_entry(list, struct connection, state_list);
971 list_del(&con->state_list);
972 clear_bit(CF_CONNECT_PENDING, &con->flags);
973 spin_unlock_bh(&state_sockets_lock);
974
975 connect_to_sock(con);
976 spin_lock_bh(&state_sockets_lock);
977 }
978 spin_unlock_bh(&state_sockets_lock);
979}
980
981
982/* Discard all entries on the write queues */
983static void clean_writequeues(void)
984{
985 int nodeid;
986
987 for (nodeid = 1; nodeid < conn_array_size; nodeid++) {
988 struct connection *con = nodeid2con(nodeid, 0);
989
990 if (con)
991 clean_one_writequeue(con);
992 }
993}
994
995static int read_list_empty(void)
996{
997 int status;
998
999 spin_lock_bh(&read_sockets_lock);
1000 status = list_empty(&read_sockets);
1001 spin_unlock_bh(&read_sockets_lock);
1002
1003 return status;
1004}
1005
1006/* DLM Transport comms receive daemon */
1007static int dlm_recvd(void *data)
1008{
1009 init_waitqueue_entry(&lowcomms_recv_waitq_head, current);
1010 add_wait_queue(&lowcomms_recv_waitq, &lowcomms_recv_waitq_head);
1011
1012 while (!kthread_should_stop()) {
1013 set_current_state(TASK_INTERRUPTIBLE);
1014 if (read_list_empty())
1015 cond_resched();
1016 set_current_state(TASK_RUNNING);
1017
1018 process_sockets();
1019 }
1020
1021 return 0;
1022}
1023
1024static int write_and_state_lists_empty(void)
1025{
1026 int status;
1027
1028 spin_lock_bh(&write_sockets_lock);
1029 status = list_empty(&write_sockets);
1030 spin_unlock_bh(&write_sockets_lock);
1031
1032 spin_lock_bh(&state_sockets_lock);
1033 if (list_empty(&state_sockets) == 0)
1034 status = 0;
1035 spin_unlock_bh(&state_sockets_lock);
1036
1037 return status;
1038}
1039
1040/* DLM Transport send daemon */
1041static int dlm_sendd(void *data)
1042{
1043 init_waitqueue_entry(&lowcomms_send_waitq_head, current);
1044 add_wait_queue(&lowcomms_send_waitq, &lowcomms_send_waitq_head);
1045
1046 while (!kthread_should_stop()) {
1047 set_current_state(TASK_INTERRUPTIBLE);
1048 if (write_and_state_lists_empty())
1049 cond_resched();
1050 set_current_state(TASK_RUNNING);
1051
1052 process_state_queue();
1053 process_output_queue();
1054 }
1055
1056 return 0;
1057}
1058
1059static void daemons_stop(void)
1060{
1061 kthread_stop(recv_task);
1062 kthread_stop(send_task);
1063}
1064
1065static int daemons_start(void)
1066{
1067 struct task_struct *p;
1068 int error;
1069
1070 p = kthread_run(dlm_recvd, NULL, "dlm_recvd");
1071 error = IS_ERR(p);
1072 if (error) {
1073 log_print("can't start dlm_recvd %d", error);
1074 return error;
1075 }
1076 recv_task = p;
1077
1078 p = kthread_run(dlm_sendd, NULL, "dlm_sendd");
1079 error = IS_ERR(p);
1080 if (error) {
1081 log_print("can't start dlm_sendd %d", error);
1082 kthread_stop(recv_task);
1083 return error;
1084 }
1085 send_task = p;
1086
1087 return 0;
1088}
1089
1090/*
1091 * Return the largest buffer size we can cope with.
1092 */
1093int lowcomms_max_buffer_size(void)
1094{
1095 return PAGE_CACHE_SIZE;
1096}
1097
1098void dlm_lowcomms_stop(void)
1099{
1100 int i;
1101
1102 /* Set all the flags to prevent any
1103 socket activity.
1104 */
1105 for (i = 0; i < conn_array_size; i++) {
1106 if (connections[i])
1107 connections[i]->flags |= 0xFF;
1108 }
1109
1110 daemons_stop();
1111 clean_writequeues();
1112
1113 for (i = 0; i < conn_array_size; i++) {
1114 if (connections[i]) {
1115 close_connection(connections[i], true);
1116 if (connections[i]->othercon)
1117 kmem_cache_free(con_cache, connections[i]->othercon);
1118 kmem_cache_free(con_cache, connections[i]);
1119 }
1120 }
1121
1122 kfree(connections);
1123 connections = NULL;
1124
1125 kmem_cache_destroy(con_cache);
1126}
1127
1128/* This is quite likely to sleep... */
1129int dlm_lowcomms_start(void)
1130{
1131 int error = 0;
1132
1133 error = -ENOMEM;
1134 connections = kzalloc(sizeof(struct connection *) *
1135 NODE_INCREMENT, GFP_KERNEL);
1136 if (!connections)
1137 goto out;
1138
1139 conn_array_size = NODE_INCREMENT;
1140
1141 if (dlm_our_addr(&dlm_local_addr, 0)) {
1142 log_print("no local IP address has been set");
1143 goto fail_free_conn;
1144 }
1145 if (!dlm_our_addr(&dlm_local_addr, 1)) {
1146 log_print("This dlm comms module does not support multi-homed clustering");
1147 goto fail_free_conn;
1148 }
1149
1150 con_cache = kmem_cache_create("dlm_conn", sizeof(struct connection),
1151 __alignof__(struct connection), 0,
1152 NULL, NULL);
1153 if (!con_cache)
1154 goto fail_free_conn;
1155
1156
1157 /* Start listening */
1158 error = listen_for_all();
1159 if (error)
1160 goto fail_unlisten;
1161
1162 error = daemons_start();
1163 if (error)
1164 goto fail_unlisten;
1165
1166 return 0;
1167
1168fail_unlisten:
1169 close_connection(connections[0], false);
1170 kmem_cache_free(con_cache, connections[0]);
1171 kmem_cache_destroy(con_cache);
1172
1173fail_free_conn:
1174 kfree(connections);
1175
1176out:
1177 return error;
1178}
1179
1180/*
1181 * Overrides for Emacs so that we follow Linus's tabbing style.
1182 * Emacs will notice this stuff at the end of the file and automatically
1183 * adjust the settings for this buffer only. This must remain at the end
1184 * of the file.
1185 * ---------------------------------------------------------------------------
1186 * Local variables:
1187 * c-file-style: "linux"
1188 * End:
1189 */
diff --git a/fs/dlm/lowcomms.h b/fs/dlm/lowcomms.h
index 2d045e0daae1..a9a9618c0d3f 100644
--- a/fs/dlm/lowcomms.h
+++ b/fs/dlm/lowcomms.h
@@ -14,8 +14,6 @@
14#ifndef __LOWCOMMS_DOT_H__ 14#ifndef __LOWCOMMS_DOT_H__
15#define __LOWCOMMS_DOT_H__ 15#define __LOWCOMMS_DOT_H__
16 16
17int dlm_lowcomms_init(void);
18void dlm_lowcomms_exit(void);
19int dlm_lowcomms_start(void); 17int dlm_lowcomms_start(void);
20void dlm_lowcomms_stop(void); 18void dlm_lowcomms_stop(void);
21int dlm_lowcomms_close(int nodeid); 19int dlm_lowcomms_close(int nodeid);
diff --git a/fs/dlm/main.c b/fs/dlm/main.c
index a8da8dc36b2e..162fbae58fe5 100644
--- a/fs/dlm/main.c
+++ b/fs/dlm/main.c
@@ -16,7 +16,6 @@
16#include "lock.h" 16#include "lock.h"
17#include "user.h" 17#include "user.h"
18#include "memory.h" 18#include "memory.h"
19#include "lowcomms.h"
20#include "config.h" 19#include "config.h"
21 20
22#ifdef CONFIG_DLM_DEBUG 21#ifdef CONFIG_DLM_DEBUG
@@ -47,20 +46,14 @@ static int __init init_dlm(void)
47 if (error) 46 if (error)
48 goto out_config; 47 goto out_config;
49 48
50 error = dlm_lowcomms_init();
51 if (error)
52 goto out_debug;
53
54 error = dlm_user_init(); 49 error = dlm_user_init();
55 if (error) 50 if (error)
56 goto out_lowcomms; 51 goto out_debug;
57 52
58 printk("DLM (built %s %s) installed\n", __DATE__, __TIME__); 53 printk("DLM (built %s %s) installed\n", __DATE__, __TIME__);
59 54
60 return 0; 55 return 0;
61 56
62 out_lowcomms:
63 dlm_lowcomms_exit();
64 out_debug: 57 out_debug:
65 dlm_unregister_debugfs(); 58 dlm_unregister_debugfs();
66 out_config: 59 out_config:
@@ -76,7 +69,6 @@ static int __init init_dlm(void)
76static void __exit exit_dlm(void) 69static void __exit exit_dlm(void)
77{ 70{
78 dlm_user_exit(); 71 dlm_user_exit();
79 dlm_lowcomms_exit();
80 dlm_config_exit(); 72 dlm_config_exit();
81 dlm_memory_exit(); 73 dlm_memory_exit();
82 dlm_lockspace_exit(); 74 dlm_lockspace_exit();
diff --git a/fs/dlm/member.c b/fs/dlm/member.c
index a3f7de7f3a8f..85e2897bd740 100644
--- a/fs/dlm/member.c
+++ b/fs/dlm/member.c
@@ -186,6 +186,14 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
186 struct dlm_member *memb, *safe; 186 struct dlm_member *memb, *safe;
187 int i, error, found, pos = 0, neg = 0, low = -1; 187 int i, error, found, pos = 0, neg = 0, low = -1;
188 188
189 /* previously removed members that we've not finished removing need to
190 count as a negative change so the "neg" recovery steps will happen */
191
192 list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
193 log_debug(ls, "prev removed member %d", memb->nodeid);
194 neg++;
195 }
196
189 /* move departed members from ls_nodes to ls_nodes_gone */ 197 /* move departed members from ls_nodes to ls_nodes_gone */
190 198
191 list_for_each_entry_safe(memb, safe, &ls->ls_nodes, list) { 199 list_for_each_entry_safe(memb, safe, &ls->ls_nodes, list) {
diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c
index 518239a8b1e9..4cc31be9cd9d 100644
--- a/fs/dlm/rcom.c
+++ b/fs/dlm/rcom.c
@@ -90,13 +90,28 @@ static int check_config(struct dlm_ls *ls, struct rcom_config *rf, int nodeid)
90 return 0; 90 return 0;
91} 91}
92 92
93static void allow_sync_reply(struct dlm_ls *ls, uint64_t *new_seq)
94{
95 spin_lock(&ls->ls_rcom_spin);
96 *new_seq = ++ls->ls_rcom_seq;
97 set_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
98 spin_unlock(&ls->ls_rcom_spin);
99}
100
101static void disallow_sync_reply(struct dlm_ls *ls)
102{
103 spin_lock(&ls->ls_rcom_spin);
104 clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
105 clear_bit(LSFL_RCOM_READY, &ls->ls_flags);
106 spin_unlock(&ls->ls_rcom_spin);
107}
108
93int dlm_rcom_status(struct dlm_ls *ls, int nodeid) 109int dlm_rcom_status(struct dlm_ls *ls, int nodeid)
94{ 110{
95 struct dlm_rcom *rc; 111 struct dlm_rcom *rc;
96 struct dlm_mhandle *mh; 112 struct dlm_mhandle *mh;
97 int error = 0; 113 int error = 0;
98 114
99 memset(ls->ls_recover_buf, 0, dlm_config.buffer_size);
100 ls->ls_recover_nodeid = nodeid; 115 ls->ls_recover_nodeid = nodeid;
101 116
102 if (nodeid == dlm_our_nodeid()) { 117 if (nodeid == dlm_our_nodeid()) {
@@ -108,12 +123,14 @@ int dlm_rcom_status(struct dlm_ls *ls, int nodeid)
108 error = create_rcom(ls, nodeid, DLM_RCOM_STATUS, 0, &rc, &mh); 123 error = create_rcom(ls, nodeid, DLM_RCOM_STATUS, 0, &rc, &mh);
109 if (error) 124 if (error)
110 goto out; 125 goto out;
111 rc->rc_id = ++ls->ls_rcom_seq; 126
127 allow_sync_reply(ls, &rc->rc_id);
128 memset(ls->ls_recover_buf, 0, dlm_config.buffer_size);
112 129
113 send_rcom(ls, mh, rc); 130 send_rcom(ls, mh, rc);
114 131
115 error = dlm_wait_function(ls, &rcom_response); 132 error = dlm_wait_function(ls, &rcom_response);
116 clear_bit(LSFL_RCOM_READY, &ls->ls_flags); 133 disallow_sync_reply(ls);
117 if (error) 134 if (error)
118 goto out; 135 goto out;
119 136
@@ -150,14 +167,21 @@ static void receive_rcom_status(struct dlm_ls *ls, struct dlm_rcom *rc_in)
150 167
151static void receive_sync_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in) 168static void receive_sync_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
152{ 169{
153 if (rc_in->rc_id != ls->ls_rcom_seq) { 170 spin_lock(&ls->ls_rcom_spin);
154 log_debug(ls, "reject old reply %d got %llx wanted %llx", 171 if (!test_bit(LSFL_RCOM_WAIT, &ls->ls_flags) ||
155 rc_in->rc_type, rc_in->rc_id, ls->ls_rcom_seq); 172 rc_in->rc_id != ls->ls_rcom_seq) {
156 return; 173 log_debug(ls, "reject reply %d from %d seq %llx expect %llx",
174 rc_in->rc_type, rc_in->rc_header.h_nodeid,
175 (unsigned long long)rc_in->rc_id,
176 (unsigned long long)ls->ls_rcom_seq);
177 goto out;
157 } 178 }
158 memcpy(ls->ls_recover_buf, rc_in, rc_in->rc_header.h_length); 179 memcpy(ls->ls_recover_buf, rc_in, rc_in->rc_header.h_length);
159 set_bit(LSFL_RCOM_READY, &ls->ls_flags); 180 set_bit(LSFL_RCOM_READY, &ls->ls_flags);
181 clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
160 wake_up(&ls->ls_wait_general); 182 wake_up(&ls->ls_wait_general);
183 out:
184 spin_unlock(&ls->ls_rcom_spin);
161} 185}
162 186
163static void receive_rcom_status_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in) 187static void receive_rcom_status_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
@@ -171,7 +195,6 @@ int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len)
171 struct dlm_mhandle *mh; 195 struct dlm_mhandle *mh;
172 int error = 0, len = sizeof(struct dlm_rcom); 196 int error = 0, len = sizeof(struct dlm_rcom);
173 197
174 memset(ls->ls_recover_buf, 0, dlm_config.buffer_size);
175 ls->ls_recover_nodeid = nodeid; 198 ls->ls_recover_nodeid = nodeid;
176 199
177 if (nodeid == dlm_our_nodeid()) { 200 if (nodeid == dlm_our_nodeid()) {
@@ -185,12 +208,14 @@ int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len)
185 if (error) 208 if (error)
186 goto out; 209 goto out;
187 memcpy(rc->rc_buf, last_name, last_len); 210 memcpy(rc->rc_buf, last_name, last_len);
188 rc->rc_id = ++ls->ls_rcom_seq; 211
212 allow_sync_reply(ls, &rc->rc_id);
213 memset(ls->ls_recover_buf, 0, dlm_config.buffer_size);
189 214
190 send_rcom(ls, mh, rc); 215 send_rcom(ls, mh, rc);
191 216
192 error = dlm_wait_function(ls, &rcom_response); 217 error = dlm_wait_function(ls, &rcom_response);
193 clear_bit(LSFL_RCOM_READY, &ls->ls_flags); 218 disallow_sync_reply(ls);
194 out: 219 out:
195 return error; 220 return error;
196} 221}
@@ -370,9 +395,10 @@ static void receive_rcom_lock_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
370static int send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in) 395static int send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
371{ 396{
372 struct dlm_rcom *rc; 397 struct dlm_rcom *rc;
398 struct rcom_config *rf;
373 struct dlm_mhandle *mh; 399 struct dlm_mhandle *mh;
374 char *mb; 400 char *mb;
375 int mb_len = sizeof(struct dlm_rcom); 401 int mb_len = sizeof(struct dlm_rcom) + sizeof(struct rcom_config);
376 402
377 mh = dlm_lowcomms_get_buffer(nodeid, mb_len, GFP_KERNEL, &mb); 403 mh = dlm_lowcomms_get_buffer(nodeid, mb_len, GFP_KERNEL, &mb);
378 if (!mh) 404 if (!mh)
@@ -391,6 +417,9 @@ static int send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
391 rc->rc_id = rc_in->rc_id; 417 rc->rc_id = rc_in->rc_id;
392 rc->rc_result = -ESRCH; 418 rc->rc_result = -ESRCH;
393 419
420 rf = (struct rcom_config *) rc->rc_buf;
421 rf->rf_lvblen = -1;
422
394 dlm_rcom_out(rc); 423 dlm_rcom_out(rc);
395 dlm_lowcomms_commit_buffer(mh); 424 dlm_lowcomms_commit_buffer(mh);
396 425
@@ -412,9 +441,10 @@ void dlm_receive_rcom(struct dlm_header *hd, int nodeid)
412 441
413 ls = dlm_find_lockspace_global(hd->h_lockspace); 442 ls = dlm_find_lockspace_global(hd->h_lockspace);
414 if (!ls) { 443 if (!ls) {
415 log_print("lockspace %x from %d not found", 444 log_print("lockspace %x from %d type %x not found",
416 hd->h_lockspace, nodeid); 445 hd->h_lockspace, nodeid, rc->rc_type);
417 send_ls_not_ready(nodeid, rc); 446 if (rc->rc_type == DLM_RCOM_STATUS)
447 send_ls_not_ready(nodeid, rc);
418 return; 448 return;
419 } 449 }
420 450
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
index a5e6d184872e..cf9f6831bab5 100644
--- a/fs/dlm/recover.c
+++ b/fs/dlm/recover.c
@@ -252,6 +252,7 @@ static void recover_list_clear(struct dlm_ls *ls)
252 spin_lock(&ls->ls_recover_list_lock); 252 spin_lock(&ls->ls_recover_list_lock);
253 list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) { 253 list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) {
254 list_del_init(&r->res_recover_list); 254 list_del_init(&r->res_recover_list);
255 r->res_recover_locks_count = 0;
255 dlm_put_rsb(r); 256 dlm_put_rsb(r);
256 ls->ls_recover_list_count--; 257 ls->ls_recover_list_count--;
257 } 258 }
diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c
index 362e3eff4dc9..650536aa5139 100644
--- a/fs/dlm/recoverd.c
+++ b/fs/dlm/recoverd.c
@@ -45,7 +45,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
45 unsigned long start; 45 unsigned long start;
46 int error, neg = 0; 46 int error, neg = 0;
47 47
48 log_debug(ls, "recover %llx", rv->seq); 48 log_debug(ls, "recover %llx", (unsigned long long)rv->seq);
49 49
50 mutex_lock(&ls->ls_recoverd_active); 50 mutex_lock(&ls->ls_recoverd_active);
51 51
@@ -94,14 +94,6 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
94 } 94 }
95 95
96 /* 96 /*
97 * Purge directory-related requests that are saved in requestqueue.
98 * All dir requests from before recovery are invalid now due to the dir
99 * rebuild and will be resent by the requesting nodes.
100 */
101
102 dlm_purge_requestqueue(ls);
103
104 /*
105 * Wait for all nodes to complete directory rebuild. 97 * Wait for all nodes to complete directory rebuild.
106 */ 98 */
107 99
@@ -164,10 +156,31 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
164 */ 156 */
165 157
166 dlm_recover_rsbs(ls); 158 dlm_recover_rsbs(ls);
159 } else {
160 /*
161 * Other lockspace members may be going through the "neg" steps
162 * while also adding us to the lockspace, in which case they'll
163 * be doing the recover_locks (RS_LOCKS) barrier.
164 */
165 dlm_set_recover_status(ls, DLM_RS_LOCKS);
166
167 error = dlm_recover_locks_wait(ls);
168 if (error) {
169 log_error(ls, "recover_locks_wait failed %d", error);
170 goto fail;
171 }
167 } 172 }
168 173
169 dlm_release_root_list(ls); 174 dlm_release_root_list(ls);
170 175
176 /*
177 * Purge directory-related requests that are saved in requestqueue.
178 * All dir requests from before recovery are invalid now due to the dir
179 * rebuild and will be resent by the requesting nodes.
180 */
181
182 dlm_purge_requestqueue(ls);
183
171 dlm_set_recover_status(ls, DLM_RS_DONE); 184 dlm_set_recover_status(ls, DLM_RS_DONE);
172 error = dlm_recover_done_wait(ls); 185 error = dlm_recover_done_wait(ls);
173 if (error) { 186 if (error) {
@@ -199,7 +212,8 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
199 212
200 dlm_astd_wake(); 213 dlm_astd_wake();
201 214
202 log_debug(ls, "recover %llx done: %u ms", rv->seq, 215 log_debug(ls, "recover %llx done: %u ms",
216 (unsigned long long)rv->seq,
203 jiffies_to_msecs(jiffies - start)); 217 jiffies_to_msecs(jiffies - start));
204 mutex_unlock(&ls->ls_recoverd_active); 218 mutex_unlock(&ls->ls_recoverd_active);
205 219
@@ -207,11 +221,16 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
207 221
208 fail: 222 fail:
209 dlm_release_root_list(ls); 223 dlm_release_root_list(ls);
210 log_debug(ls, "recover %llx error %d", rv->seq, error); 224 log_debug(ls, "recover %llx error %d",
225 (unsigned long long)rv->seq, error);
211 mutex_unlock(&ls->ls_recoverd_active); 226 mutex_unlock(&ls->ls_recoverd_active);
212 return error; 227 return error;
213} 228}
214 229
230/* The dlm_ls_start() that created the rv we take here may already have been
231 stopped via dlm_ls_stop(); in that case we need to leave the RECOVERY_STOP
232 flag set. */
233
215static void do_ls_recovery(struct dlm_ls *ls) 234static void do_ls_recovery(struct dlm_ls *ls)
216{ 235{
217 struct dlm_recover *rv = NULL; 236 struct dlm_recover *rv = NULL;
@@ -219,7 +238,8 @@ static void do_ls_recovery(struct dlm_ls *ls)
219 spin_lock(&ls->ls_recover_lock); 238 spin_lock(&ls->ls_recover_lock);
220 rv = ls->ls_recover_args; 239 rv = ls->ls_recover_args;
221 ls->ls_recover_args = NULL; 240 ls->ls_recover_args = NULL;
222 clear_bit(LSFL_RECOVERY_STOP, &ls->ls_flags); 241 if (rv && ls->ls_recover_seq == rv->seq)
242 clear_bit(LSFL_RECOVERY_STOP, &ls->ls_flags);
223 spin_unlock(&ls->ls_recover_lock); 243 spin_unlock(&ls->ls_recover_lock);
224 244
225 if (rv) { 245 if (rv) {
diff --git a/fs/dlm/requestqueue.c b/fs/dlm/requestqueue.c
index 7b2b089634a2..65008d79c96d 100644
--- a/fs/dlm/requestqueue.c
+++ b/fs/dlm/requestqueue.c
@@ -30,26 +30,36 @@ struct rq_entry {
30 * lockspace is enabled on some while still suspended on others. 30 * lockspace is enabled on some while still suspended on others.
31 */ 31 */
32 32
33void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd) 33int dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd)
34{ 34{
35 struct rq_entry *e; 35 struct rq_entry *e;
36 int length = hd->h_length; 36 int length = hd->h_length;
37 37 int rv = 0;
38 if (dlm_is_removed(ls, nodeid))
39 return;
40 38
41 e = kmalloc(sizeof(struct rq_entry) + length, GFP_KERNEL); 39 e = kmalloc(sizeof(struct rq_entry) + length, GFP_KERNEL);
42 if (!e) { 40 if (!e) {
43 log_print("dlm_add_requestqueue: out of memory\n"); 41 log_print("dlm_add_requestqueue: out of memory\n");
44 return; 42 return 0;
45 } 43 }
46 44
47 e->nodeid = nodeid; 45 e->nodeid = nodeid;
48 memcpy(e->request, hd, length); 46 memcpy(e->request, hd, length);
49 47
48 /* We need to check dlm_locking_stopped() after taking the mutex to
49 avoid a race where dlm_recoverd enables locking and runs
50 process_requestqueue between our earlier dlm_locking_stopped check
51 and this addition to the requestqueue. */
52
50 mutex_lock(&ls->ls_requestqueue_mutex); 53 mutex_lock(&ls->ls_requestqueue_mutex);
51 list_add_tail(&e->list, &ls->ls_requestqueue); 54 if (dlm_locking_stopped(ls))
55 list_add_tail(&e->list, &ls->ls_requestqueue);
56 else {
57 log_debug(ls, "dlm_add_requestqueue skip from %d", nodeid);
58 kfree(e);
59 rv = -EAGAIN;
60 }
52 mutex_unlock(&ls->ls_requestqueue_mutex); 61 mutex_unlock(&ls->ls_requestqueue_mutex);
62 return rv;
53} 63}
54 64
55int dlm_process_requestqueue(struct dlm_ls *ls) 65int dlm_process_requestqueue(struct dlm_ls *ls)
@@ -120,6 +130,10 @@ static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid)
120{ 130{
121 uint32_t type = ms->m_type; 131 uint32_t type = ms->m_type;
122 132
133 /* the ls is being cleaned up and freed by release_lockspace */
134 if (!ls->ls_count)
135 return 1;
136
123 if (dlm_is_removed(ls, nodeid)) 137 if (dlm_is_removed(ls, nodeid))
124 return 1; 138 return 1;
125 139
diff --git a/fs/dlm/requestqueue.h b/fs/dlm/requestqueue.h
index 349f0d292d95..6a53ea03335d 100644
--- a/fs/dlm/requestqueue.h
+++ b/fs/dlm/requestqueue.h
@@ -13,7 +13,7 @@
13#ifndef __REQUESTQUEUE_DOT_H__ 13#ifndef __REQUESTQUEUE_DOT_H__
14#define __REQUESTQUEUE_DOT_H__ 14#define __REQUESTQUEUE_DOT_H__
15 15
16void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd); 16int dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd);
17int dlm_process_requestqueue(struct dlm_ls *ls); 17int dlm_process_requestqueue(struct dlm_ls *ls);
18void dlm_wait_requestqueue(struct dlm_ls *ls); 18void dlm_wait_requestqueue(struct dlm_ls *ls);
19void dlm_purge_requestqueue(struct dlm_ls *ls); 19void dlm_purge_requestqueue(struct dlm_ls *ls);
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index 8c27de8b9568..c0791cbacad9 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -2,6 +2,7 @@ config GFS2_FS
2 tristate "GFS2 file system support" 2 tristate "GFS2 file system support"
3 depends on EXPERIMENTAL 3 depends on EXPERIMENTAL
4 select FS_POSIX_ACL 4 select FS_POSIX_ACL
5 select CRC32
5 help 6 help
6 A cluster filesystem. 7 A cluster filesystem.
7 8
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 5f959b8ce406..6e80844367ee 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -74,11 +74,11 @@ int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access)
74{ 74{
75 if (!GFS2_SB(&ip->i_inode)->sd_args.ar_posix_acl) 75 if (!GFS2_SB(&ip->i_inode)->sd_args.ar_posix_acl)
76 return -EOPNOTSUPP; 76 return -EOPNOTSUPP;
77 if (current->fsuid != ip->i_di.di_uid && !capable(CAP_FOWNER)) 77 if (current->fsuid != ip->i_inode.i_uid && !capable(CAP_FOWNER))
78 return -EPERM; 78 return -EPERM;
79 if (S_ISLNK(ip->i_di.di_mode)) 79 if (S_ISLNK(ip->i_inode.i_mode))
80 return -EOPNOTSUPP; 80 return -EOPNOTSUPP;
81 if (!access && !S_ISDIR(ip->i_di.di_mode)) 81 if (!access && !S_ISDIR(ip->i_inode.i_mode))
82 return -EACCES; 82 return -EACCES;
83 83
84 return 0; 84 return 0;
@@ -145,14 +145,14 @@ out:
145} 145}
146 146
147/** 147/**
148 * gfs2_check_acl_locked - Check an ACL to see if we're allowed to do something 148 * gfs2_check_acl - Check an ACL to see if we're allowed to do something
149 * @inode: the file we want to do something to 149 * @inode: the file we want to do something to
150 * @mask: what we want to do 150 * @mask: what we want to do
151 * 151 *
152 * Returns: errno 152 * Returns: errno
153 */ 153 */
154 154
155int gfs2_check_acl_locked(struct inode *inode, int mask) 155int gfs2_check_acl(struct inode *inode, int mask)
156{ 156{
157 struct posix_acl *acl = NULL; 157 struct posix_acl *acl = NULL;
158 int error; 158 int error;
@@ -170,21 +170,6 @@ int gfs2_check_acl_locked(struct inode *inode, int mask)
170 return -EAGAIN; 170 return -EAGAIN;
171} 171}
172 172
173int gfs2_check_acl(struct inode *inode, int mask)
174{
175 struct gfs2_inode *ip = GFS2_I(inode);
176 struct gfs2_holder i_gh;
177 int error;
178
179 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
180 if (!error) {
181 error = gfs2_check_acl_locked(inode, mask);
182 gfs2_glock_dq_uninit(&i_gh);
183 }
184
185 return error;
186}
187
188static int munge_mode(struct gfs2_inode *ip, mode_t mode) 173static int munge_mode(struct gfs2_inode *ip, mode_t mode)
189{ 174{
190 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 175 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
@@ -198,10 +183,10 @@ static int munge_mode(struct gfs2_inode *ip, mode_t mode)
198 error = gfs2_meta_inode_buffer(ip, &dibh); 183 error = gfs2_meta_inode_buffer(ip, &dibh);
199 if (!error) { 184 if (!error) {
200 gfs2_assert_withdraw(sdp, 185 gfs2_assert_withdraw(sdp,
201 (ip->i_di.di_mode & S_IFMT) == (mode & S_IFMT)); 186 (ip->i_inode.i_mode & S_IFMT) == (mode & S_IFMT));
202 ip->i_di.di_mode = mode; 187 ip->i_inode.i_mode = mode;
203 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 188 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
204 gfs2_dinode_out(&ip->i_di, dibh->b_data); 189 gfs2_dinode_out(ip, dibh->b_data);
205 brelse(dibh); 190 brelse(dibh);
206 } 191 }
207 192
@@ -215,12 +200,12 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip)
215 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); 200 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
216 struct posix_acl *acl = NULL, *clone; 201 struct posix_acl *acl = NULL, *clone;
217 struct gfs2_ea_request er; 202 struct gfs2_ea_request er;
218 mode_t mode = ip->i_di.di_mode; 203 mode_t mode = ip->i_inode.i_mode;
219 int error; 204 int error;
220 205
221 if (!sdp->sd_args.ar_posix_acl) 206 if (!sdp->sd_args.ar_posix_acl)
222 return 0; 207 return 0;
223 if (S_ISLNK(ip->i_di.di_mode)) 208 if (S_ISLNK(ip->i_inode.i_mode))
224 return 0; 209 return 0;
225 210
226 memset(&er, 0, sizeof(struct gfs2_ea_request)); 211 memset(&er, 0, sizeof(struct gfs2_ea_request));
@@ -232,7 +217,7 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip)
232 return error; 217 return error;
233 if (!acl) { 218 if (!acl) {
234 mode &= ~current->fs->umask; 219 mode &= ~current->fs->umask;
235 if (mode != ip->i_di.di_mode) 220 if (mode != ip->i_inode.i_mode)
236 error = munge_mode(ip, mode); 221 error = munge_mode(ip, mode);
237 return error; 222 return error;
238 } 223 }
@@ -244,7 +229,7 @@ int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip)
244 posix_acl_release(acl); 229 posix_acl_release(acl);
245 acl = clone; 230 acl = clone;
246 231
247 if (S_ISDIR(ip->i_di.di_mode)) { 232 if (S_ISDIR(ip->i_inode.i_mode)) {
248 er.er_name = GFS2_POSIX_ACL_DEFAULT; 233 er.er_name = GFS2_POSIX_ACL_DEFAULT;
249 er.er_name_len = GFS2_POSIX_ACL_DEFAULT_LEN; 234 er.er_name_len = GFS2_POSIX_ACL_DEFAULT_LEN;
250 error = gfs2_system_eaops.eo_set(ip, &er); 235 error = gfs2_system_eaops.eo_set(ip, &er);
diff --git a/fs/gfs2/acl.h b/fs/gfs2/acl.h
index 05c294fe0d78..6751930bfb64 100644
--- a/fs/gfs2/acl.h
+++ b/fs/gfs2/acl.h
@@ -31,7 +31,6 @@ int gfs2_acl_validate_set(struct gfs2_inode *ip, int access,
31 struct gfs2_ea_request *er, 31 struct gfs2_ea_request *er,
32 int *remove, mode_t *mode); 32 int *remove, mode_t *mode);
33int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access); 33int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access);
34int gfs2_check_acl_locked(struct inode *inode, int mask);
35int gfs2_check_acl(struct inode *inode, int mask); 34int gfs2_check_acl(struct inode *inode, int mask);
36int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip); 35int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip);
37int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr); 36int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 06e9a8cb45e9..8240c1ff94f4 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -38,8 +38,8 @@ struct metapath {
38}; 38};
39 39
40typedef int (*block_call_t) (struct gfs2_inode *ip, struct buffer_head *dibh, 40typedef int (*block_call_t) (struct gfs2_inode *ip, struct buffer_head *dibh,
41 struct buffer_head *bh, u64 *top, 41 struct buffer_head *bh, __be64 *top,
42 u64 *bottom, unsigned int height, 42 __be64 *bottom, unsigned int height,
43 void *data); 43 void *data);
44 44
45struct strip_mine { 45struct strip_mine {
@@ -163,6 +163,7 @@ int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
163 if (ip->i_di.di_size) { 163 if (ip->i_di.di_size) {
164 *(__be64 *)(di + 1) = cpu_to_be64(block); 164 *(__be64 *)(di + 1) = cpu_to_be64(block);
165 ip->i_di.di_blocks++; 165 ip->i_di.di_blocks++;
166 gfs2_set_inode_blocks(&ip->i_inode);
166 di->di_blocks = cpu_to_be64(ip->i_di.di_blocks); 167 di->di_blocks = cpu_to_be64(ip->i_di.di_blocks);
167 } 168 }
168 169
@@ -230,7 +231,7 @@ static int build_height(struct inode *inode, unsigned height)
230 struct buffer_head *blocks[GFS2_MAX_META_HEIGHT]; 231 struct buffer_head *blocks[GFS2_MAX_META_HEIGHT];
231 struct gfs2_dinode *di; 232 struct gfs2_dinode *di;
232 int error; 233 int error;
233 u64 *bp; 234 __be64 *bp;
234 u64 bn; 235 u64 bn;
235 unsigned n; 236 unsigned n;
236 237
@@ -255,7 +256,7 @@ static int build_height(struct inode *inode, unsigned height)
255 GFS2_FORMAT_IN); 256 GFS2_FORMAT_IN);
256 gfs2_buffer_clear_tail(blocks[n], 257 gfs2_buffer_clear_tail(blocks[n],
257 sizeof(struct gfs2_meta_header)); 258 sizeof(struct gfs2_meta_header));
258 bp = (u64 *)(blocks[n]->b_data + 259 bp = (__be64 *)(blocks[n]->b_data +
259 sizeof(struct gfs2_meta_header)); 260 sizeof(struct gfs2_meta_header));
260 *bp = cpu_to_be64(blocks[n+1]->b_blocknr); 261 *bp = cpu_to_be64(blocks[n+1]->b_blocknr);
261 brelse(blocks[n]); 262 brelse(blocks[n]);
@@ -272,6 +273,7 @@ static int build_height(struct inode *inode, unsigned height)
272 *(__be64 *)(di + 1) = cpu_to_be64(bn); 273 *(__be64 *)(di + 1) = cpu_to_be64(bn);
273 ip->i_di.di_height += new_height; 274 ip->i_di.di_height += new_height;
274 ip->i_di.di_blocks += new_height; 275 ip->i_di.di_blocks += new_height;
276 gfs2_set_inode_blocks(&ip->i_inode);
275 di->di_height = cpu_to_be16(ip->i_di.di_height); 277 di->di_height = cpu_to_be16(ip->i_di.di_height);
276 di->di_blocks = cpu_to_be64(ip->i_di.di_blocks); 278 di->di_blocks = cpu_to_be64(ip->i_di.di_blocks);
277 brelse(dibh); 279 brelse(dibh);
@@ -360,15 +362,15 @@ static void find_metapath(struct gfs2_inode *ip, u64 block,
360 * metadata tree. 362 * metadata tree.
361 */ 363 */
362 364
363static inline u64 *metapointer(struct buffer_head *bh, int *boundary, 365static inline __be64 *metapointer(struct buffer_head *bh, int *boundary,
364 unsigned int height, const struct metapath *mp) 366 unsigned int height, const struct metapath *mp)
365{ 367{
366 unsigned int head_size = (height > 0) ? 368 unsigned int head_size = (height > 0) ?
367 sizeof(struct gfs2_meta_header) : sizeof(struct gfs2_dinode); 369 sizeof(struct gfs2_meta_header) : sizeof(struct gfs2_dinode);
368 u64 *ptr; 370 __be64 *ptr;
369 *boundary = 0; 371 *boundary = 0;
370 ptr = ((u64 *)(bh->b_data + head_size)) + mp->mp_list[height]; 372 ptr = ((__be64 *)(bh->b_data + head_size)) + mp->mp_list[height];
371 if (ptr + 1 == (u64 *)(bh->b_data + bh->b_size)) 373 if (ptr + 1 == (__be64 *)(bh->b_data + bh->b_size))
372 *boundary = 1; 374 *boundary = 1;
373 return ptr; 375 return ptr;
374} 376}
@@ -394,7 +396,7 @@ static int lookup_block(struct gfs2_inode *ip, struct buffer_head *bh,
394 int *new, u64 *block) 396 int *new, u64 *block)
395{ 397{
396 int boundary; 398 int boundary;
397 u64 *ptr = metapointer(bh, &boundary, height, mp); 399 __be64 *ptr = metapointer(bh, &boundary, height, mp);
398 400
399 if (*ptr) { 401 if (*ptr) {
400 *block = be64_to_cpu(*ptr); 402 *block = be64_to_cpu(*ptr);
@@ -415,17 +417,35 @@ static int lookup_block(struct gfs2_inode *ip, struct buffer_head *bh,
415 417
416 *ptr = cpu_to_be64(*block); 418 *ptr = cpu_to_be64(*block);
417 ip->i_di.di_blocks++; 419 ip->i_di.di_blocks++;
420 gfs2_set_inode_blocks(&ip->i_inode);
418 421
419 *new = 1; 422 *new = 1;
420 return 0; 423 return 0;
421} 424}
422 425
426static inline void bmap_lock(struct inode *inode, int create)
427{
428 struct gfs2_inode *ip = GFS2_I(inode);
429 if (create)
430 down_write(&ip->i_rw_mutex);
431 else
432 down_read(&ip->i_rw_mutex);
433}
434
435static inline void bmap_unlock(struct inode *inode, int create)
436{
437 struct gfs2_inode *ip = GFS2_I(inode);
438 if (create)
439 up_write(&ip->i_rw_mutex);
440 else
441 up_read(&ip->i_rw_mutex);
442}
443
423/** 444/**
424 * gfs2_block_pointers - Map a block from an inode to a disk block 445 * gfs2_block_map - Map a block from an inode to a disk block
425 * @inode: The inode 446 * @inode: The inode
426 * @lblock: The logical block number 447 * @lblock: The logical block number
427 * @map_bh: The bh to be mapped 448 * @bh_map: The bh to be mapped
428 * @mp: metapath to use
429 * 449 *
430 * Find the block number on the current device which corresponds to an 450 * Find the block number on the current device which corresponds to an
431 * inode's block. If the block had to be created, "new" will be set. 451 * inode's block. If the block had to be created, "new" will be set.
@@ -433,8 +453,8 @@ static int lookup_block(struct gfs2_inode *ip, struct buffer_head *bh,
433 * Returns: errno 453 * Returns: errno
434 */ 454 */
435 455
436static int gfs2_block_pointers(struct inode *inode, u64 lblock, int create, 456int gfs2_block_map(struct inode *inode, u64 lblock, int create,
437 struct buffer_head *bh_map, struct metapath *mp) 457 struct buffer_head *bh_map)
438{ 458{
439 struct gfs2_inode *ip = GFS2_I(inode); 459 struct gfs2_inode *ip = GFS2_I(inode);
440 struct gfs2_sbd *sdp = GFS2_SB(inode); 460 struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -448,57 +468,61 @@ static int gfs2_block_pointers(struct inode *inode, u64 lblock, int create,
448 u64 dblock = 0; 468 u64 dblock = 0;
449 int boundary; 469 int boundary;
450 unsigned int maxlen = bh_map->b_size >> inode->i_blkbits; 470 unsigned int maxlen = bh_map->b_size >> inode->i_blkbits;
471 struct metapath mp;
472 u64 size;
451 473
452 BUG_ON(maxlen == 0); 474 BUG_ON(maxlen == 0);
453 475
454 if (gfs2_assert_warn(sdp, !gfs2_is_stuffed(ip))) 476 if (gfs2_assert_warn(sdp, !gfs2_is_stuffed(ip)))
455 return 0; 477 return 0;
456 478
479 bmap_lock(inode, create);
480 clear_buffer_mapped(bh_map);
481 clear_buffer_new(bh_map);
482 clear_buffer_boundary(bh_map);
457 bsize = gfs2_is_dir(ip) ? sdp->sd_jbsize : sdp->sd_sb.sb_bsize; 483 bsize = gfs2_is_dir(ip) ? sdp->sd_jbsize : sdp->sd_sb.sb_bsize;
458 484 size = (lblock + 1) * bsize;
459 height = calc_tree_height(ip, (lblock + 1) * bsize); 485
460 if (ip->i_di.di_height < height) { 486 if (size > ip->i_di.di_size) {
461 if (!create) 487 height = calc_tree_height(ip, size);
462 return 0; 488 if (ip->i_di.di_height < height) {
463 489 if (!create)
464 error = build_height(inode, height); 490 goto out_ok;
465 if (error) 491
466 return error; 492 error = build_height(inode, height);
493 if (error)
494 goto out_fail;
495 }
467 } 496 }
468 497
469 find_metapath(ip, lblock, mp); 498 find_metapath(ip, lblock, &mp);
470 end_of_metadata = ip->i_di.di_height - 1; 499 end_of_metadata = ip->i_di.di_height - 1;
471
472 error = gfs2_meta_inode_buffer(ip, &bh); 500 error = gfs2_meta_inode_buffer(ip, &bh);
473 if (error) 501 if (error)
474 return error; 502 goto out_fail;
475 503
476 for (x = 0; x < end_of_metadata; x++) { 504 for (x = 0; x < end_of_metadata; x++) {
477 lookup_block(ip, bh, x, mp, create, &new, &dblock); 505 lookup_block(ip, bh, x, &mp, create, &new, &dblock);
478 brelse(bh); 506 brelse(bh);
479 if (!dblock) 507 if (!dblock)
480 return 0; 508 goto out_ok;
481 509
482 error = gfs2_meta_indirect_buffer(ip, x+1, dblock, new, &bh); 510 error = gfs2_meta_indirect_buffer(ip, x+1, dblock, new, &bh);
483 if (error) 511 if (error)
484 return error; 512 goto out_fail;
485 } 513 }
486 514
487 boundary = lookup_block(ip, bh, end_of_metadata, mp, create, &new, &dblock); 515 boundary = lookup_block(ip, bh, end_of_metadata, &mp, create, &new, &dblock);
488 clear_buffer_mapped(bh_map);
489 clear_buffer_new(bh_map);
490 clear_buffer_boundary(bh_map);
491
492 if (dblock) { 516 if (dblock) {
493 map_bh(bh_map, inode->i_sb, dblock); 517 map_bh(bh_map, inode->i_sb, dblock);
494 if (boundary) 518 if (boundary)
495 set_buffer_boundary(bh); 519 set_buffer_boundary(bh_map);
496 if (new) { 520 if (new) {
497 struct buffer_head *dibh; 521 struct buffer_head *dibh;
498 error = gfs2_meta_inode_buffer(ip, &dibh); 522 error = gfs2_meta_inode_buffer(ip, &dibh);
499 if (!error) { 523 if (!error) {
500 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 524 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
501 gfs2_dinode_out(&ip->i_di, dibh->b_data); 525 gfs2_dinode_out(ip, dibh->b_data);
502 brelse(dibh); 526 brelse(dibh);
503 } 527 }
504 set_buffer_new(bh_map); 528 set_buffer_new(bh_map);
@@ -507,8 +531,8 @@ static int gfs2_block_pointers(struct inode *inode, u64 lblock, int create,
507 while(--maxlen && !buffer_boundary(bh_map)) { 531 while(--maxlen && !buffer_boundary(bh_map)) {
508 u64 eblock; 532 u64 eblock;
509 533
510 mp->mp_list[end_of_metadata]++; 534 mp.mp_list[end_of_metadata]++;
511 boundary = lookup_block(ip, bh, end_of_metadata, mp, 0, &new, &eblock); 535 boundary = lookup_block(ip, bh, end_of_metadata, &mp, 0, &new, &eblock);
512 if (eblock != ++dblock) 536 if (eblock != ++dblock)
513 break; 537 break;
514 bh_map->b_size += (1 << inode->i_blkbits); 538 bh_map->b_size += (1 << inode->i_blkbits);
@@ -518,43 +542,15 @@ static int gfs2_block_pointers(struct inode *inode, u64 lblock, int create,
518 } 542 }
519out_brelse: 543out_brelse:
520 brelse(bh); 544 brelse(bh);
521 return 0; 545out_ok:
522} 546 error = 0;
523 547out_fail:
524
525static inline void bmap_lock(struct inode *inode, int create)
526{
527 struct gfs2_inode *ip = GFS2_I(inode);
528 if (create)
529 down_write(&ip->i_rw_mutex);
530 else
531 down_read(&ip->i_rw_mutex);
532}
533
534static inline void bmap_unlock(struct inode *inode, int create)
535{
536 struct gfs2_inode *ip = GFS2_I(inode);
537 if (create)
538 up_write(&ip->i_rw_mutex);
539 else
540 up_read(&ip->i_rw_mutex);
541}
542
543int gfs2_block_map(struct inode *inode, u64 lblock, int create,
544 struct buffer_head *bh)
545{
546 struct metapath mp;
547 int ret;
548
549 bmap_lock(inode, create);
550 ret = gfs2_block_pointers(inode, lblock, create, bh, &mp);
551 bmap_unlock(inode, create); 548 bmap_unlock(inode, create);
552 return ret; 549 return error;
553} 550}
554 551
555int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen) 552int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
556{ 553{
557 struct metapath mp;
558 struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 }; 554 struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
559 int ret; 555 int ret;
560 int create = *new; 556 int create = *new;
@@ -564,9 +560,7 @@ int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsi
564 BUG_ON(!new); 560 BUG_ON(!new);
565 561
566 bh.b_size = 1 << (inode->i_blkbits + 5); 562 bh.b_size = 1 << (inode->i_blkbits + 5);
567 bmap_lock(inode, create); 563 ret = gfs2_block_map(inode, lblock, create, &bh);
568 ret = gfs2_block_pointers(inode, lblock, create, &bh, &mp);
569 bmap_unlock(inode, create);
570 *extlen = bh.b_size >> inode->i_blkbits; 564 *extlen = bh.b_size >> inode->i_blkbits;
571 *dblock = bh.b_blocknr; 565 *dblock = bh.b_blocknr;
572 if (buffer_new(&bh)) 566 if (buffer_new(&bh))
@@ -600,7 +594,7 @@ static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh,
600{ 594{
601 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 595 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
602 struct buffer_head *bh = NULL; 596 struct buffer_head *bh = NULL;
603 u64 *top, *bottom; 597 __be64 *top, *bottom;
604 u64 bn; 598 u64 bn;
605 int error; 599 int error;
606 int mh_size = sizeof(struct gfs2_meta_header); 600 int mh_size = sizeof(struct gfs2_meta_header);
@@ -611,17 +605,17 @@ static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh,
611 return error; 605 return error;
612 dibh = bh; 606 dibh = bh;
613 607
614 top = (u64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + mp->mp_list[0]; 608 top = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + mp->mp_list[0];
615 bottom = (u64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + sdp->sd_diptrs; 609 bottom = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + sdp->sd_diptrs;
616 } else { 610 } else {
617 error = gfs2_meta_indirect_buffer(ip, height, block, 0, &bh); 611 error = gfs2_meta_indirect_buffer(ip, height, block, 0, &bh);
618 if (error) 612 if (error)
619 return error; 613 return error;
620 614
621 top = (u64 *)(bh->b_data + mh_size) + 615 top = (__be64 *)(bh->b_data + mh_size) +
622 (first ? mp->mp_list[height] : 0); 616 (first ? mp->mp_list[height] : 0);
623 617
624 bottom = (u64 *)(bh->b_data + mh_size) + sdp->sd_inptrs; 618 bottom = (__be64 *)(bh->b_data + mh_size) + sdp->sd_inptrs;
625 } 619 }
626 620
627 error = bc(ip, dibh, bh, top, bottom, height, data); 621 error = bc(ip, dibh, bh, top, bottom, height, data);
@@ -660,7 +654,7 @@ out:
660 */ 654 */
661 655
662static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, 656static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
663 struct buffer_head *bh, u64 *top, u64 *bottom, 657 struct buffer_head *bh, __be64 *top, __be64 *bottom,
664 unsigned int height, void *data) 658 unsigned int height, void *data)
665{ 659{
666 struct strip_mine *sm = data; 660 struct strip_mine *sm = data;
@@ -668,7 +662,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
668 struct gfs2_rgrp_list rlist; 662 struct gfs2_rgrp_list rlist;
669 u64 bn, bstart; 663 u64 bn, bstart;
670 u32 blen; 664 u32 blen;
671 u64 *p; 665 __be64 *p;
672 unsigned int rg_blocks = 0; 666 unsigned int rg_blocks = 0;
673 int metadata; 667 int metadata;
674 unsigned int revokes = 0; 668 unsigned int revokes = 0;
@@ -770,6 +764,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
770 if (!ip->i_di.di_blocks) 764 if (!ip->i_di.di_blocks)
771 gfs2_consist_inode(ip); 765 gfs2_consist_inode(ip);
772 ip->i_di.di_blocks--; 766 ip->i_di.di_blocks--;
767 gfs2_set_inode_blocks(&ip->i_inode);
773 } 768 }
774 if (bstart) { 769 if (bstart) {
775 if (metadata) 770 if (metadata)
@@ -778,9 +773,9 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
778 gfs2_free_data(ip, bstart, blen); 773 gfs2_free_data(ip, bstart, blen);
779 } 774 }
780 775
781 ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds(); 776 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
782 777
783 gfs2_dinode_out(&ip->i_di, dibh->b_data); 778 gfs2_dinode_out(ip, dibh->b_data);
784 779
785 up_write(&ip->i_rw_mutex); 780 up_write(&ip->i_rw_mutex);
786 781
@@ -819,7 +814,7 @@ static int do_grow(struct gfs2_inode *ip, u64 size)
819 if (error) 814 if (error)
820 goto out; 815 goto out;
821 816
822 error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid); 817 error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
823 if (error) 818 if (error)
824 goto out_gunlock_q; 819 goto out_gunlock_q;
825 820
@@ -853,14 +848,14 @@ static int do_grow(struct gfs2_inode *ip, u64 size)
853 } 848 }
854 849
855 ip->i_di.di_size = size; 850 ip->i_di.di_size = size;
856 ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds(); 851 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
857 852
858 error = gfs2_meta_inode_buffer(ip, &dibh); 853 error = gfs2_meta_inode_buffer(ip, &dibh);
859 if (error) 854 if (error)
860 goto out_end_trans; 855 goto out_end_trans;
861 856
862 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 857 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
863 gfs2_dinode_out(&ip->i_di, dibh->b_data); 858 gfs2_dinode_out(ip, dibh->b_data);
864 brelse(dibh); 859 brelse(dibh);
865 860
866out_end_trans: 861out_end_trans:
@@ -968,9 +963,9 @@ static int trunc_start(struct gfs2_inode *ip, u64 size)
968 963
969 if (gfs2_is_stuffed(ip)) { 964 if (gfs2_is_stuffed(ip)) {
970 ip->i_di.di_size = size; 965 ip->i_di.di_size = size;
971 ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds(); 966 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
972 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 967 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
973 gfs2_dinode_out(&ip->i_di, dibh->b_data); 968 gfs2_dinode_out(ip, dibh->b_data);
974 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + size); 969 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + size);
975 error = 1; 970 error = 1;
976 971
@@ -980,10 +975,10 @@ static int trunc_start(struct gfs2_inode *ip, u64 size)
980 975
981 if (!error) { 976 if (!error) {
982 ip->i_di.di_size = size; 977 ip->i_di.di_size = size;
983 ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds(); 978 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
984 ip->i_di.di_flags |= GFS2_DIF_TRUNC_IN_PROG; 979 ip->i_di.di_flags |= GFS2_DIF_TRUNC_IN_PROG;
985 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 980 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
986 gfs2_dinode_out(&ip->i_di, dibh->b_data); 981 gfs2_dinode_out(ip, dibh->b_data);
987 } 982 }
988 } 983 }
989 984
@@ -1053,11 +1048,11 @@ static int trunc_end(struct gfs2_inode *ip)
1053 ip->i_num.no_addr; 1048 ip->i_num.no_addr;
1054 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); 1049 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
1055 } 1050 }
1056 ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds(); 1051 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
1057 ip->i_di.di_flags &= ~GFS2_DIF_TRUNC_IN_PROG; 1052 ip->i_di.di_flags &= ~GFS2_DIF_TRUNC_IN_PROG;
1058 1053
1059 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1054 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1060 gfs2_dinode_out(&ip->i_di, dibh->b_data); 1055 gfs2_dinode_out(ip, dibh->b_data);
1061 brelse(dibh); 1056 brelse(dibh);
1062 1057
1063out: 1058out:
@@ -1109,7 +1104,7 @@ int gfs2_truncatei(struct gfs2_inode *ip, u64 size)
1109{ 1104{
1110 int error; 1105 int error;
1111 1106
1112 if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), S_ISREG(ip->i_di.di_mode))) 1107 if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), S_ISREG(ip->i_inode.i_mode)))
1113 return -EINVAL; 1108 return -EINVAL;
1114 1109
1115 if (size > ip->i_di.di_size) 1110 if (size > ip->i_di.di_size)
diff --git a/fs/gfs2/daemon.c b/fs/gfs2/daemon.c
index cab1f68d4685..683cb5bda870 100644
--- a/fs/gfs2/daemon.c
+++ b/fs/gfs2/daemon.c
@@ -112,6 +112,7 @@ int gfs2_logd(void *data)
112 struct gfs2_sbd *sdp = data; 112 struct gfs2_sbd *sdp = data;
113 struct gfs2_holder ji_gh; 113 struct gfs2_holder ji_gh;
114 unsigned long t; 114 unsigned long t;
115 int need_flush;
115 116
116 while (!kthread_should_stop()) { 117 while (!kthread_should_stop()) {
117 /* Advance the log tail */ 118 /* Advance the log tail */
@@ -120,8 +121,10 @@ int gfs2_logd(void *data)
120 gfs2_tune_get(sdp, gt_log_flush_secs) * HZ; 121 gfs2_tune_get(sdp, gt_log_flush_secs) * HZ;
121 122
122 gfs2_ail1_empty(sdp, DIO_ALL); 123 gfs2_ail1_empty(sdp, DIO_ALL);
123 124 gfs2_log_lock(sdp);
124 if (time_after_eq(jiffies, t)) { 125 need_flush = sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks);
126 gfs2_log_unlock(sdp);
127 if (need_flush || time_after_eq(jiffies, t)) {
125 gfs2_log_flush(sdp, NULL); 128 gfs2_log_flush(sdp, NULL);
126 sdp->sd_log_flush_time = jiffies; 129 sdp->sd_log_flush_time = jiffies;
127 } 130 }
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index e24af28b1a12..0fdcb7713cd9 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -131,8 +131,8 @@ static int gfs2_dir_write_stuffed(struct gfs2_inode *ip, const char *buf,
131 memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size); 131 memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size);
132 if (ip->i_di.di_size < offset + size) 132 if (ip->i_di.di_size < offset + size)
133 ip->i_di.di_size = offset + size; 133 ip->i_di.di_size = offset + size;
134 ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds(); 134 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
135 gfs2_dinode_out(&ip->i_di, dibh->b_data); 135 gfs2_dinode_out(ip, dibh->b_data);
136 136
137 brelse(dibh); 137 brelse(dibh);
138 138
@@ -229,10 +229,10 @@ out:
229 229
230 if (ip->i_di.di_size < offset + copied) 230 if (ip->i_di.di_size < offset + copied)
231 ip->i_di.di_size = offset + copied; 231 ip->i_di.di_size = offset + copied;
232 ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds(); 232 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
233 233
234 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 234 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
235 gfs2_dinode_out(&ip->i_di, dibh->b_data); 235 gfs2_dinode_out(ip, dibh->b_data);
236 brelse(dibh); 236 brelse(dibh);
237 237
238 return copied; 238 return copied;
@@ -340,10 +340,15 @@ fail:
340 return (copied) ? copied : error; 340 return (copied) ? copied : error;
341} 341}
342 342
343static inline int gfs2_dirent_sentinel(const struct gfs2_dirent *dent)
344{
345 return dent->de_inum.no_addr == 0 || dent->de_inum.no_formal_ino == 0;
346}
347
343static inline int __gfs2_dirent_find(const struct gfs2_dirent *dent, 348static inline int __gfs2_dirent_find(const struct gfs2_dirent *dent,
344 const struct qstr *name, int ret) 349 const struct qstr *name, int ret)
345{ 350{
346 if (dent->de_inum.no_addr != 0 && 351 if (!gfs2_dirent_sentinel(dent) &&
347 be32_to_cpu(dent->de_hash) == name->hash && 352 be32_to_cpu(dent->de_hash) == name->hash &&
348 be16_to_cpu(dent->de_name_len) == name->len && 353 be16_to_cpu(dent->de_name_len) == name->len &&
349 memcmp(dent+1, name->name, name->len) == 0) 354 memcmp(dent+1, name->name, name->len) == 0)
@@ -388,7 +393,7 @@ static int gfs2_dirent_find_space(const struct gfs2_dirent *dent,
388 unsigned actual = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len)); 393 unsigned actual = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len));
389 unsigned totlen = be16_to_cpu(dent->de_rec_len); 394 unsigned totlen = be16_to_cpu(dent->de_rec_len);
390 395
391 if (!dent->de_inum.no_addr) 396 if (gfs2_dirent_sentinel(dent))
392 actual = GFS2_DIRENT_SIZE(0); 397 actual = GFS2_DIRENT_SIZE(0);
393 if (totlen - actual >= required) 398 if (totlen - actual >= required)
394 return 1; 399 return 1;
@@ -405,7 +410,7 @@ static int gfs2_dirent_gather(const struct gfs2_dirent *dent,
405 void *opaque) 410 void *opaque)
406{ 411{
407 struct dirent_gather *g = opaque; 412 struct dirent_gather *g = opaque;
408 if (dent->de_inum.no_addr) { 413 if (!gfs2_dirent_sentinel(dent)) {
409 g->pdent[g->offset++] = dent; 414 g->pdent[g->offset++] = dent;
410 } 415 }
411 return 0; 416 return 0;
@@ -433,10 +438,10 @@ static int gfs2_check_dirent(struct gfs2_dirent *dent, unsigned int offset,
433 if (unlikely(offset + size > len)) 438 if (unlikely(offset + size > len))
434 goto error; 439 goto error;
435 msg = "zero inode number"; 440 msg = "zero inode number";
436 if (unlikely(!first && !dent->de_inum.no_addr)) 441 if (unlikely(!first && gfs2_dirent_sentinel(dent)))
437 goto error; 442 goto error;
438 msg = "name length is greater than space in dirent"; 443 msg = "name length is greater than space in dirent";
439 if (dent->de_inum.no_addr && 444 if (!gfs2_dirent_sentinel(dent) &&
440 unlikely(sizeof(struct gfs2_dirent)+be16_to_cpu(dent->de_name_len) > 445 unlikely(sizeof(struct gfs2_dirent)+be16_to_cpu(dent->de_name_len) >
441 size)) 446 size))
442 goto error; 447 goto error;
@@ -598,7 +603,7 @@ static int dirent_next(struct gfs2_inode *dip, struct buffer_head *bh,
598 return ret; 603 return ret;
599 604
600 /* Only the first dent could ever have de_inum.no_addr == 0 */ 605 /* Only the first dent could ever have de_inum.no_addr == 0 */
601 if (!tmp->de_inum.no_addr) { 606 if (gfs2_dirent_sentinel(tmp)) {
602 gfs2_consist_inode(dip); 607 gfs2_consist_inode(dip);
603 return -EIO; 608 return -EIO;
604 } 609 }
@@ -621,7 +626,7 @@ static void dirent_del(struct gfs2_inode *dip, struct buffer_head *bh,
621{ 626{
622 u16 cur_rec_len, prev_rec_len; 627 u16 cur_rec_len, prev_rec_len;
623 628
624 if (!cur->de_inum.no_addr) { 629 if (gfs2_dirent_sentinel(cur)) {
625 gfs2_consist_inode(dip); 630 gfs2_consist_inode(dip);
626 return; 631 return;
627 } 632 }
@@ -633,7 +638,8 @@ static void dirent_del(struct gfs2_inode *dip, struct buffer_head *bh,
633 out the inode number and return. */ 638 out the inode number and return. */
634 639
635 if (!prev) { 640 if (!prev) {
636 cur->de_inum.no_addr = 0; /* No endianess worries */ 641 cur->de_inum.no_addr = 0;
642 cur->de_inum.no_formal_ino = 0;
637 return; 643 return;
638 } 644 }
639 645
@@ -664,7 +670,7 @@ static struct gfs2_dirent *gfs2_init_dirent(struct inode *inode,
664 struct gfs2_dirent *ndent; 670 struct gfs2_dirent *ndent;
665 unsigned offset = 0, totlen; 671 unsigned offset = 0, totlen;
666 672
667 if (dent->de_inum.no_addr) 673 if (!gfs2_dirent_sentinel(dent))
668 offset = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len)); 674 offset = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len));
669 totlen = be16_to_cpu(dent->de_rec_len); 675 totlen = be16_to_cpu(dent->de_rec_len);
670 BUG_ON(offset + name->len > totlen); 676 BUG_ON(offset + name->len > totlen);
@@ -713,12 +719,12 @@ static int get_leaf(struct gfs2_inode *dip, u64 leaf_no,
713static int get_leaf_nr(struct gfs2_inode *dip, u32 index, 719static int get_leaf_nr(struct gfs2_inode *dip, u32 index,
714 u64 *leaf_out) 720 u64 *leaf_out)
715{ 721{
716 u64 leaf_no; 722 __be64 leaf_no;
717 int error; 723 int error;
718 724
719 error = gfs2_dir_read_data(dip, (char *)&leaf_no, 725 error = gfs2_dir_read_data(dip, (char *)&leaf_no,
720 index * sizeof(u64), 726 index * sizeof(__be64),
721 sizeof(u64), 0); 727 sizeof(__be64), 0);
722 if (error != sizeof(u64)) 728 if (error != sizeof(u64))
723 return (error < 0) ? error : -EIO; 729 return (error < 0) ? error : -EIO;
724 730
@@ -837,7 +843,8 @@ static int dir_make_exhash(struct inode *inode)
837 struct gfs2_leaf *leaf; 843 struct gfs2_leaf *leaf;
838 int y; 844 int y;
839 u32 x; 845 u32 x;
840 u64 *lp, bn; 846 __be64 *lp;
847 u64 bn;
841 int error; 848 int error;
842 849
843 error = gfs2_meta_inode_buffer(dip, &dibh); 850 error = gfs2_meta_inode_buffer(dip, &dibh);
@@ -893,20 +900,20 @@ static int dir_make_exhash(struct inode *inode)
893 gfs2_trans_add_bh(dip->i_gl, dibh, 1); 900 gfs2_trans_add_bh(dip->i_gl, dibh, 1);
894 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); 901 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
895 902
896 lp = (u64 *)(dibh->b_data + sizeof(struct gfs2_dinode)); 903 lp = (__be64 *)(dibh->b_data + sizeof(struct gfs2_dinode));
897 904
898 for (x = sdp->sd_hash_ptrs; x--; lp++) 905 for (x = sdp->sd_hash_ptrs; x--; lp++)
899 *lp = cpu_to_be64(bn); 906 *lp = cpu_to_be64(bn);
900 907
901 dip->i_di.di_size = sdp->sd_sb.sb_bsize / 2; 908 dip->i_di.di_size = sdp->sd_sb.sb_bsize / 2;
902 dip->i_di.di_blocks++; 909 dip->i_di.di_blocks++;
910 gfs2_set_inode_blocks(&dip->i_inode);
903 dip->i_di.di_flags |= GFS2_DIF_EXHASH; 911 dip->i_di.di_flags |= GFS2_DIF_EXHASH;
904 dip->i_di.di_payload_format = 0;
905 912
906 for (x = sdp->sd_hash_ptrs, y = -1; x; x >>= 1, y++) ; 913 for (x = sdp->sd_hash_ptrs, y = -1; x; x >>= 1, y++) ;
907 dip->i_di.di_depth = y; 914 dip->i_di.di_depth = y;
908 915
909 gfs2_dinode_out(&dip->i_di, dibh->b_data); 916 gfs2_dinode_out(dip, dibh->b_data);
910 917
911 brelse(dibh); 918 brelse(dibh);
912 919
@@ -929,7 +936,8 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
929 struct gfs2_leaf *nleaf, *oleaf; 936 struct gfs2_leaf *nleaf, *oleaf;
930 struct gfs2_dirent *dent = NULL, *prev = NULL, *next = NULL, *new; 937 struct gfs2_dirent *dent = NULL, *prev = NULL, *next = NULL, *new;
931 u32 start, len, half_len, divider; 938 u32 start, len, half_len, divider;
932 u64 bn, *lp, leaf_no; 939 u64 bn, leaf_no;
940 __be64 *lp;
933 u32 index; 941 u32 index;
934 int x, moved = 0; 942 int x, moved = 0;
935 int error; 943 int error;
@@ -974,7 +982,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
974 /* Change the pointers. 982 /* Change the pointers.
975 Don't bother distinguishing stuffed from non-stuffed. 983 Don't bother distinguishing stuffed from non-stuffed.
976 This code is complicated enough already. */ 984 This code is complicated enough already. */
977 lp = kmalloc(half_len * sizeof(u64), GFP_NOFS | __GFP_NOFAIL); 985 lp = kmalloc(half_len * sizeof(__be64), GFP_NOFS | __GFP_NOFAIL);
978 /* Change the pointers */ 986 /* Change the pointers */
979 for (x = 0; x < half_len; x++) 987 for (x = 0; x < half_len; x++)
980 lp[x] = cpu_to_be64(bn); 988 lp[x] = cpu_to_be64(bn);
@@ -1000,7 +1008,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
1000 if (dirent_next(dip, obh, &next)) 1008 if (dirent_next(dip, obh, &next))
1001 next = NULL; 1009 next = NULL;
1002 1010
1003 if (dent->de_inum.no_addr && 1011 if (!gfs2_dirent_sentinel(dent) &&
1004 be32_to_cpu(dent->de_hash) < divider) { 1012 be32_to_cpu(dent->de_hash) < divider) {
1005 struct qstr str; 1013 struct qstr str;
1006 str.name = (char*)(dent+1); 1014 str.name = (char*)(dent+1);
@@ -1037,7 +1045,8 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
1037 error = gfs2_meta_inode_buffer(dip, &dibh); 1045 error = gfs2_meta_inode_buffer(dip, &dibh);
1038 if (!gfs2_assert_withdraw(GFS2_SB(&dip->i_inode), !error)) { 1046 if (!gfs2_assert_withdraw(GFS2_SB(&dip->i_inode), !error)) {
1039 dip->i_di.di_blocks++; 1047 dip->i_di.di_blocks++;
1040 gfs2_dinode_out(&dip->i_di, dibh->b_data); 1048 gfs2_set_inode_blocks(&dip->i_inode);
1049 gfs2_dinode_out(dip, dibh->b_data);
1041 brelse(dibh); 1050 brelse(dibh);
1042 } 1051 }
1043 1052
@@ -1117,7 +1126,7 @@ static int dir_double_exhash(struct gfs2_inode *dip)
1117 error = gfs2_meta_inode_buffer(dip, &dibh); 1126 error = gfs2_meta_inode_buffer(dip, &dibh);
1118 if (!gfs2_assert_withdraw(sdp, !error)) { 1127 if (!gfs2_assert_withdraw(sdp, !error)) {
1119 dip->i_di.di_depth++; 1128 dip->i_di.di_depth++;
1120 gfs2_dinode_out(&dip->i_di, dibh->b_data); 1129 gfs2_dinode_out(dip, dibh->b_data);
1121 brelse(dibh); 1130 brelse(dibh);
1122 } 1131 }
1123 1132
@@ -1194,7 +1203,7 @@ static int do_filldir_main(struct gfs2_inode *dip, u64 *offset,
1194 int *copied) 1203 int *copied)
1195{ 1204{
1196 const struct gfs2_dirent *dent, *dent_next; 1205 const struct gfs2_dirent *dent, *dent_next;
1197 struct gfs2_inum inum; 1206 struct gfs2_inum_host inum;
1198 u64 off, off_next; 1207 u64 off, off_next;
1199 unsigned int x, y; 1208 unsigned int x, y;
1200 int run = 0; 1209 int run = 0;
@@ -1341,7 +1350,7 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
1341 u32 hsize, len = 0; 1350 u32 hsize, len = 0;
1342 u32 ht_offset, lp_offset, ht_offset_cur = -1; 1351 u32 ht_offset, lp_offset, ht_offset_cur = -1;
1343 u32 hash, index; 1352 u32 hash, index;
1344 u64 *lp; 1353 __be64 *lp;
1345 int copied = 0; 1354 int copied = 0;
1346 int error = 0; 1355 int error = 0;
1347 unsigned depth = 0; 1356 unsigned depth = 0;
@@ -1365,7 +1374,7 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
1365 1374
1366 if (ht_offset_cur != ht_offset) { 1375 if (ht_offset_cur != ht_offset) {
1367 error = gfs2_dir_read_data(dip, (char *)lp, 1376 error = gfs2_dir_read_data(dip, (char *)lp,
1368 ht_offset * sizeof(u64), 1377 ht_offset * sizeof(__be64),
1369 sdp->sd_hash_bsize, 1); 1378 sdp->sd_hash_bsize, 1);
1370 if (error != sdp->sd_hash_bsize) { 1379 if (error != sdp->sd_hash_bsize) {
1371 if (error >= 0) 1380 if (error >= 0)
@@ -1456,7 +1465,7 @@ out:
1456 */ 1465 */
1457 1466
1458int gfs2_dir_search(struct inode *dir, const struct qstr *name, 1467int gfs2_dir_search(struct inode *dir, const struct qstr *name,
1459 struct gfs2_inum *inum, unsigned int *type) 1468 struct gfs2_inum_host *inum, unsigned int *type)
1460{ 1469{
1461 struct buffer_head *bh; 1470 struct buffer_head *bh;
1462 struct gfs2_dirent *dent; 1471 struct gfs2_dirent *dent;
@@ -1515,7 +1524,8 @@ static int dir_new_leaf(struct inode *inode, const struct qstr *name)
1515 return error; 1524 return error;
1516 gfs2_trans_add_bh(ip->i_gl, bh, 1); 1525 gfs2_trans_add_bh(ip->i_gl, bh, 1);
1517 ip->i_di.di_blocks++; 1526 ip->i_di.di_blocks++;
1518 gfs2_dinode_out(&ip->i_di, bh->b_data); 1527 gfs2_set_inode_blocks(&ip->i_inode);
1528 gfs2_dinode_out(ip, bh->b_data);
1519 brelse(bh); 1529 brelse(bh);
1520 return 0; 1530 return 0;
1521} 1531}
@@ -1531,7 +1541,7 @@ static int dir_new_leaf(struct inode *inode, const struct qstr *name)
1531 */ 1541 */
1532 1542
1533int gfs2_dir_add(struct inode *inode, const struct qstr *name, 1543int gfs2_dir_add(struct inode *inode, const struct qstr *name,
1534 const struct gfs2_inum *inum, unsigned type) 1544 const struct gfs2_inum_host *inum, unsigned type)
1535{ 1545{
1536 struct gfs2_inode *ip = GFS2_I(inode); 1546 struct gfs2_inode *ip = GFS2_I(inode);
1537 struct buffer_head *bh; 1547 struct buffer_head *bh;
@@ -1558,8 +1568,8 @@ int gfs2_dir_add(struct inode *inode, const struct qstr *name,
1558 break; 1568 break;
1559 gfs2_trans_add_bh(ip->i_gl, bh, 1); 1569 gfs2_trans_add_bh(ip->i_gl, bh, 1);
1560 ip->i_di.di_entries++; 1570 ip->i_di.di_entries++;
1561 ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds(); 1571 ip->i_inode.i_mtime.tv_sec = ip->i_inode.i_ctime.tv_sec = get_seconds();
1562 gfs2_dinode_out(&ip->i_di, bh->b_data); 1572 gfs2_dinode_out(ip, bh->b_data);
1563 brelse(bh); 1573 brelse(bh);
1564 error = 0; 1574 error = 0;
1565 break; 1575 break;
@@ -1644,8 +1654,8 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *name)
1644 gfs2_consist_inode(dip); 1654 gfs2_consist_inode(dip);
1645 gfs2_trans_add_bh(dip->i_gl, bh, 1); 1655 gfs2_trans_add_bh(dip->i_gl, bh, 1);
1646 dip->i_di.di_entries--; 1656 dip->i_di.di_entries--;
1647 dip->i_di.di_mtime = dip->i_di.di_ctime = get_seconds(); 1657 dip->i_inode.i_mtime.tv_sec = dip->i_inode.i_ctime.tv_sec = get_seconds();
1648 gfs2_dinode_out(&dip->i_di, bh->b_data); 1658 gfs2_dinode_out(dip, bh->b_data);
1649 brelse(bh); 1659 brelse(bh);
1650 mark_inode_dirty(&dip->i_inode); 1660 mark_inode_dirty(&dip->i_inode);
1651 1661
@@ -1666,7 +1676,7 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *name)
1666 */ 1676 */
1667 1677
1668int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename, 1678int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
1669 struct gfs2_inum *inum, unsigned int new_type) 1679 struct gfs2_inum_host *inum, unsigned int new_type)
1670{ 1680{
1671 struct buffer_head *bh; 1681 struct buffer_head *bh;
1672 struct gfs2_dirent *dent; 1682 struct gfs2_dirent *dent;
@@ -1692,8 +1702,8 @@ int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
1692 gfs2_trans_add_bh(dip->i_gl, bh, 1); 1702 gfs2_trans_add_bh(dip->i_gl, bh, 1);
1693 } 1703 }
1694 1704
1695 dip->i_di.di_mtime = dip->i_di.di_ctime = get_seconds(); 1705 dip->i_inode.i_mtime.tv_sec = dip->i_inode.i_ctime.tv_sec = get_seconds();
1696 gfs2_dinode_out(&dip->i_di, bh->b_data); 1706 gfs2_dinode_out(dip, bh->b_data);
1697 brelse(bh); 1707 brelse(bh);
1698 return 0; 1708 return 0;
1699} 1709}
@@ -1715,7 +1725,7 @@ static int foreach_leaf(struct gfs2_inode *dip, leaf_call_t lc, void *data)
1715 u32 hsize, len; 1725 u32 hsize, len;
1716 u32 ht_offset, lp_offset, ht_offset_cur = -1; 1726 u32 ht_offset, lp_offset, ht_offset_cur = -1;
1717 u32 index = 0; 1727 u32 index = 0;
1718 u64 *lp; 1728 __be64 *lp;
1719 u64 leaf_no; 1729 u64 leaf_no;
1720 int error = 0; 1730 int error = 0;
1721 1731
@@ -1735,7 +1745,7 @@ static int foreach_leaf(struct gfs2_inode *dip, leaf_call_t lc, void *data)
1735 1745
1736 if (ht_offset_cur != ht_offset) { 1746 if (ht_offset_cur != ht_offset) {
1737 error = gfs2_dir_read_data(dip, (char *)lp, 1747 error = gfs2_dir_read_data(dip, (char *)lp,
1738 ht_offset * sizeof(u64), 1748 ht_offset * sizeof(__be64),
1739 sdp->sd_hash_bsize, 1); 1749 sdp->sd_hash_bsize, 1);
1740 if (error != sdp->sd_hash_bsize) { 1750 if (error != sdp->sd_hash_bsize) {
1741 if (error >= 0) 1751 if (error >= 0)
@@ -1859,6 +1869,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
1859 if (!dip->i_di.di_blocks) 1869 if (!dip->i_di.di_blocks)
1860 gfs2_consist_inode(dip); 1870 gfs2_consist_inode(dip);
1861 dip->i_di.di_blocks--; 1871 dip->i_di.di_blocks--;
1872 gfs2_set_inode_blocks(&dip->i_inode);
1862 } 1873 }
1863 1874
1864 error = gfs2_dir_write_data(dip, ht, index * sizeof(u64), size); 1875 error = gfs2_dir_write_data(dip, ht, index * sizeof(u64), size);
@@ -1873,7 +1884,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
1873 goto out_end_trans; 1884 goto out_end_trans;
1874 1885
1875 gfs2_trans_add_bh(dip->i_gl, dibh, 1); 1886 gfs2_trans_add_bh(dip->i_gl, dibh, 1);
1876 gfs2_dinode_out(&dip->i_di, dibh->b_data); 1887 gfs2_dinode_out(dip, dibh->b_data);
1877 brelse(dibh); 1888 brelse(dibh);
1878 1889
1879out_end_trans: 1890out_end_trans:
diff --git a/fs/gfs2/dir.h b/fs/gfs2/dir.h
index 371233419b07..b21b33668a5b 100644
--- a/fs/gfs2/dir.h
+++ b/fs/gfs2/dir.h
@@ -31,17 +31,17 @@ struct gfs2_inum;
31typedef int (*gfs2_filldir_t) (void *opaque, 31typedef int (*gfs2_filldir_t) (void *opaque,
32 const char *name, unsigned int length, 32 const char *name, unsigned int length,
33 u64 offset, 33 u64 offset,
34 struct gfs2_inum *inum, unsigned int type); 34 struct gfs2_inum_host *inum, unsigned int type);
35 35
36int gfs2_dir_search(struct inode *dir, const struct qstr *filename, 36int gfs2_dir_search(struct inode *dir, const struct qstr *filename,
37 struct gfs2_inum *inum, unsigned int *type); 37 struct gfs2_inum_host *inum, unsigned int *type);
38int gfs2_dir_add(struct inode *inode, const struct qstr *filename, 38int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
39 const struct gfs2_inum *inum, unsigned int type); 39 const struct gfs2_inum_host *inum, unsigned int type);
40int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *filename); 40int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *filename);
41int gfs2_dir_read(struct inode *inode, u64 * offset, void *opaque, 41int gfs2_dir_read(struct inode *inode, u64 * offset, void *opaque,
42 gfs2_filldir_t filldir); 42 gfs2_filldir_t filldir);
43int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename, 43int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
44 struct gfs2_inum *new_inum, unsigned int new_type); 44 struct gfs2_inum_host *new_inum, unsigned int new_type);
45 45
46int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip); 46int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip);
47 47
diff --git a/fs/gfs2/eaops.c b/fs/gfs2/eaops.c
index 92c54e9b0dc3..cd747c00f670 100644
--- a/fs/gfs2/eaops.c
+++ b/fs/gfs2/eaops.c
@@ -120,7 +120,7 @@ static int system_eo_set(struct gfs2_inode *ip, struct gfs2_ea_request *er)
120 120
121 if (GFS2_ACL_IS_ACCESS(er->er_name, er->er_name_len)) { 121 if (GFS2_ACL_IS_ACCESS(er->er_name, er->er_name_len)) {
122 if (!(er->er_flags & GFS2_ERF_MODE)) { 122 if (!(er->er_flags & GFS2_ERF_MODE)) {
123 er->er_mode = ip->i_di.di_mode; 123 er->er_mode = ip->i_inode.i_mode;
124 er->er_flags |= GFS2_ERF_MODE; 124 er->er_flags |= GFS2_ERF_MODE;
125 } 125 }
126 error = gfs2_acl_validate_set(ip, 1, er, 126 error = gfs2_acl_validate_set(ip, 1, er,
diff --git a/fs/gfs2/eattr.c b/fs/gfs2/eattr.c
index a65a4ccfd4dd..ebebbdcd7057 100644
--- a/fs/gfs2/eattr.c
+++ b/fs/gfs2/eattr.c
@@ -112,7 +112,7 @@ fail:
112static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data) 112static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
113{ 113{
114 struct buffer_head *bh, *eabh; 114 struct buffer_head *bh, *eabh;
115 u64 *eablk, *end; 115 __be64 *eablk, *end;
116 int error; 116 int error;
117 117
118 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &bh); 118 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &bh);
@@ -129,7 +129,7 @@ static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
129 goto out; 129 goto out;
130 } 130 }
131 131
132 eablk = (u64 *)(bh->b_data + sizeof(struct gfs2_meta_header)); 132 eablk = (__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header));
133 end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs; 133 end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs;
134 134
135 for (; eablk < end; eablk++) { 135 for (; eablk < end; eablk++) {
@@ -224,7 +224,8 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
224 struct gfs2_rgrpd *rgd; 224 struct gfs2_rgrpd *rgd;
225 struct gfs2_holder rg_gh; 225 struct gfs2_holder rg_gh;
226 struct buffer_head *dibh; 226 struct buffer_head *dibh;
227 u64 *dataptrs, bn = 0; 227 __be64 *dataptrs;
228 u64 bn = 0;
228 u64 bstart = 0; 229 u64 bstart = 0;
229 unsigned int blen = 0; 230 unsigned int blen = 0;
230 unsigned int blks = 0; 231 unsigned int blks = 0;
@@ -280,6 +281,7 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
280 if (!ip->i_di.di_blocks) 281 if (!ip->i_di.di_blocks)
281 gfs2_consist_inode(ip); 282 gfs2_consist_inode(ip);
282 ip->i_di.di_blocks--; 283 ip->i_di.di_blocks--;
284 gfs2_set_inode_blocks(&ip->i_inode);
283 } 285 }
284 if (bstart) 286 if (bstart)
285 gfs2_free_meta(ip, bstart, blen); 287 gfs2_free_meta(ip, bstart, blen);
@@ -299,9 +301,9 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
299 301
300 error = gfs2_meta_inode_buffer(ip, &dibh); 302 error = gfs2_meta_inode_buffer(ip, &dibh);
301 if (!error) { 303 if (!error) {
302 ip->i_di.di_ctime = get_seconds(); 304 ip->i_inode.i_ctime.tv_sec = get_seconds();
303 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 305 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
304 gfs2_dinode_out(&ip->i_di, dibh->b_data); 306 gfs2_dinode_out(ip, dibh->b_data);
305 brelse(dibh); 307 brelse(dibh);
306 } 308 }
307 309
@@ -444,7 +446,7 @@ static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
444 struct buffer_head **bh; 446 struct buffer_head **bh;
445 unsigned int amount = GFS2_EA_DATA_LEN(ea); 447 unsigned int amount = GFS2_EA_DATA_LEN(ea);
446 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize); 448 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
447 u64 *dataptrs = GFS2_EA2DATAPTRS(ea); 449 __be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
448 unsigned int x; 450 unsigned int x;
449 int error = 0; 451 int error = 0;
450 452
@@ -597,6 +599,7 @@ static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
597 ea->ea_num_ptrs = 0; 599 ea->ea_num_ptrs = 0;
598 600
599 ip->i_di.di_blocks++; 601 ip->i_di.di_blocks++;
602 gfs2_set_inode_blocks(&ip->i_inode);
600 603
601 return 0; 604 return 0;
602} 605}
@@ -629,7 +632,7 @@ static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
629 ea->ea_num_ptrs = 0; 632 ea->ea_num_ptrs = 0;
630 memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len); 633 memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
631 } else { 634 } else {
632 u64 *dataptr = GFS2_EA2DATAPTRS(ea); 635 __be64 *dataptr = GFS2_EA2DATAPTRS(ea);
633 const char *data = er->er_data; 636 const char *data = er->er_data;
634 unsigned int data_len = er->er_data_len; 637 unsigned int data_len = er->er_data_len;
635 unsigned int copy; 638 unsigned int copy;
@@ -648,6 +651,7 @@ static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
648 gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED); 651 gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
649 652
650 ip->i_di.di_blocks++; 653 ip->i_di.di_blocks++;
654 gfs2_set_inode_blocks(&ip->i_inode);
651 655
652 copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize : 656 copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize :
653 data_len; 657 data_len;
@@ -686,7 +690,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
686 if (error) 690 if (error)
687 goto out; 691 goto out;
688 692
689 error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid); 693 error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
690 if (error) 694 if (error)
691 goto out_gunlock_q; 695 goto out_gunlock_q;
692 696
@@ -710,13 +714,13 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
710 if (!error) { 714 if (!error) {
711 if (er->er_flags & GFS2_ERF_MODE) { 715 if (er->er_flags & GFS2_ERF_MODE) {
712 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), 716 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
713 (ip->i_di.di_mode & S_IFMT) == 717 (ip->i_inode.i_mode & S_IFMT) ==
714 (er->er_mode & S_IFMT)); 718 (er->er_mode & S_IFMT));
715 ip->i_di.di_mode = er->er_mode; 719 ip->i_inode.i_mode = er->er_mode;
716 } 720 }
717 ip->i_di.di_ctime = get_seconds(); 721 ip->i_inode.i_ctime.tv_sec = get_seconds();
718 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 722 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
719 gfs2_dinode_out(&ip->i_di, dibh->b_data); 723 gfs2_dinode_out(ip, dibh->b_data);
720 brelse(dibh); 724 brelse(dibh);
721 } 725 }
722 726
@@ -846,12 +850,12 @@ static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
846 850
847 if (er->er_flags & GFS2_ERF_MODE) { 851 if (er->er_flags & GFS2_ERF_MODE) {
848 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), 852 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
849 (ip->i_di.di_mode & S_IFMT) == (er->er_mode & S_IFMT)); 853 (ip->i_inode.i_mode & S_IFMT) == (er->er_mode & S_IFMT));
850 ip->i_di.di_mode = er->er_mode; 854 ip->i_inode.i_mode = er->er_mode;
851 } 855 }
852 ip->i_di.di_ctime = get_seconds(); 856 ip->i_inode.i_ctime.tv_sec = get_seconds();
853 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 857 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
854 gfs2_dinode_out(&ip->i_di, dibh->b_data); 858 gfs2_dinode_out(ip, dibh->b_data);
855 brelse(dibh); 859 brelse(dibh);
856out: 860out:
857 gfs2_trans_end(GFS2_SB(&ip->i_inode)); 861 gfs2_trans_end(GFS2_SB(&ip->i_inode));
@@ -931,12 +935,12 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
931{ 935{
932 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 936 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
933 struct buffer_head *indbh, *newbh; 937 struct buffer_head *indbh, *newbh;
934 u64 *eablk; 938 __be64 *eablk;
935 int error; 939 int error;
936 int mh_size = sizeof(struct gfs2_meta_header); 940 int mh_size = sizeof(struct gfs2_meta_header);
937 941
938 if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) { 942 if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
939 u64 *end; 943 __be64 *end;
940 944
941 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, 945 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT,
942 &indbh); 946 &indbh);
@@ -948,7 +952,7 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
948 goto out; 952 goto out;
949 } 953 }
950 954
951 eablk = (u64 *)(indbh->b_data + mh_size); 955 eablk = (__be64 *)(indbh->b_data + mh_size);
952 end = eablk + sdp->sd_inptrs; 956 end = eablk + sdp->sd_inptrs;
953 957
954 for (; eablk < end; eablk++) 958 for (; eablk < end; eablk++)
@@ -971,11 +975,12 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
971 gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN); 975 gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
972 gfs2_buffer_clear_tail(indbh, mh_size); 976 gfs2_buffer_clear_tail(indbh, mh_size);
973 977
974 eablk = (u64 *)(indbh->b_data + mh_size); 978 eablk = (__be64 *)(indbh->b_data + mh_size);
975 *eablk = cpu_to_be64(ip->i_di.di_eattr); 979 *eablk = cpu_to_be64(ip->i_di.di_eattr);
976 ip->i_di.di_eattr = blk; 980 ip->i_di.di_eattr = blk;
977 ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT; 981 ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT;
978 ip->i_di.di_blocks++; 982 ip->i_di.di_blocks++;
983 gfs2_set_inode_blocks(&ip->i_inode);
979 984
980 eablk++; 985 eablk++;
981 } 986 }
@@ -1129,9 +1134,9 @@ static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
1129 1134
1130 error = gfs2_meta_inode_buffer(ip, &dibh); 1135 error = gfs2_meta_inode_buffer(ip, &dibh);
1131 if (!error) { 1136 if (!error) {
1132 ip->i_di.di_ctime = get_seconds(); 1137 ip->i_inode.i_ctime.tv_sec = get_seconds();
1133 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1138 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1134 gfs2_dinode_out(&ip->i_di, dibh->b_data); 1139 gfs2_dinode_out(ip, dibh->b_data);
1135 brelse(dibh); 1140 brelse(dibh);
1136 } 1141 }
1137 1142
@@ -1202,7 +1207,7 @@ static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
1202 struct buffer_head **bh; 1207 struct buffer_head **bh;
1203 unsigned int amount = GFS2_EA_DATA_LEN(ea); 1208 unsigned int amount = GFS2_EA_DATA_LEN(ea);
1204 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize); 1209 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
1205 u64 *dataptrs = GFS2_EA2DATAPTRS(ea); 1210 __be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
1206 unsigned int x; 1211 unsigned int x;
1207 int error; 1212 int error;
1208 1213
@@ -1284,9 +1289,8 @@ int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
1284 if (!error) { 1289 if (!error) {
1285 error = inode_setattr(&ip->i_inode, attr); 1290 error = inode_setattr(&ip->i_inode, attr);
1286 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error); 1291 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1287 gfs2_inode_attr_out(ip);
1288 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1292 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1289 gfs2_dinode_out(&ip->i_di, dibh->b_data); 1293 gfs2_dinode_out(ip, dibh->b_data);
1290 brelse(dibh); 1294 brelse(dibh);
1291 } 1295 }
1292 1296
@@ -1300,7 +1304,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
1300 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1304 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1301 struct gfs2_rgrp_list rlist; 1305 struct gfs2_rgrp_list rlist;
1302 struct buffer_head *indbh, *dibh; 1306 struct buffer_head *indbh, *dibh;
1303 u64 *eablk, *end; 1307 __be64 *eablk, *end;
1304 unsigned int rg_blocks = 0; 1308 unsigned int rg_blocks = 0;
1305 u64 bstart = 0; 1309 u64 bstart = 0;
1306 unsigned int blen = 0; 1310 unsigned int blen = 0;
@@ -1319,7 +1323,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
1319 goto out; 1323 goto out;
1320 } 1324 }
1321 1325
1322 eablk = (u64 *)(indbh->b_data + sizeof(struct gfs2_meta_header)); 1326 eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1323 end = eablk + sdp->sd_inptrs; 1327 end = eablk + sdp->sd_inptrs;
1324 1328
1325 for (; eablk < end; eablk++) { 1329 for (; eablk < end; eablk++) {
@@ -1363,7 +1367,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
1363 1367
1364 gfs2_trans_add_bh(ip->i_gl, indbh, 1); 1368 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
1365 1369
1366 eablk = (u64 *)(indbh->b_data + sizeof(struct gfs2_meta_header)); 1370 eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1367 bstart = 0; 1371 bstart = 0;
1368 blen = 0; 1372 blen = 0;
1369 1373
@@ -1387,6 +1391,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
1387 if (!ip->i_di.di_blocks) 1391 if (!ip->i_di.di_blocks)
1388 gfs2_consist_inode(ip); 1392 gfs2_consist_inode(ip);
1389 ip->i_di.di_blocks--; 1393 ip->i_di.di_blocks--;
1394 gfs2_set_inode_blocks(&ip->i_inode);
1390 } 1395 }
1391 if (bstart) 1396 if (bstart)
1392 gfs2_free_meta(ip, bstart, blen); 1397 gfs2_free_meta(ip, bstart, blen);
@@ -1396,7 +1401,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
1396 error = gfs2_meta_inode_buffer(ip, &dibh); 1401 error = gfs2_meta_inode_buffer(ip, &dibh);
1397 if (!error) { 1402 if (!error) {
1398 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1403 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1399 gfs2_dinode_out(&ip->i_di, dibh->b_data); 1404 gfs2_dinode_out(ip, dibh->b_data);
1400 brelse(dibh); 1405 brelse(dibh);
1401 } 1406 }
1402 1407
@@ -1441,11 +1446,12 @@ static int ea_dealloc_block(struct gfs2_inode *ip)
1441 if (!ip->i_di.di_blocks) 1446 if (!ip->i_di.di_blocks)
1442 gfs2_consist_inode(ip); 1447 gfs2_consist_inode(ip);
1443 ip->i_di.di_blocks--; 1448 ip->i_di.di_blocks--;
1449 gfs2_set_inode_blocks(&ip->i_inode);
1444 1450
1445 error = gfs2_meta_inode_buffer(ip, &dibh); 1451 error = gfs2_meta_inode_buffer(ip, &dibh);
1446 if (!error) { 1452 if (!error) {
1447 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1453 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1448 gfs2_dinode_out(&ip->i_di, dibh->b_data); 1454 gfs2_dinode_out(ip, dibh->b_data);
1449 brelse(dibh); 1455 brelse(dibh);
1450 } 1456 }
1451 1457
diff --git a/fs/gfs2/eattr.h b/fs/gfs2/eattr.h
index ffa65947d686..c82dbe01d713 100644
--- a/fs/gfs2/eattr.h
+++ b/fs/gfs2/eattr.h
@@ -19,7 +19,7 @@ struct iattr;
19#define GFS2_EA_SIZE(ea) \ 19#define GFS2_EA_SIZE(ea) \
20ALIGN(sizeof(struct gfs2_ea_header) + (ea)->ea_name_len + \ 20ALIGN(sizeof(struct gfs2_ea_header) + (ea)->ea_name_len + \
21 ((GFS2_EA_IS_STUFFED(ea)) ? GFS2_EA_DATA_LEN(ea) : \ 21 ((GFS2_EA_IS_STUFFED(ea)) ? GFS2_EA_DATA_LEN(ea) : \
22 (sizeof(u64) * (ea)->ea_num_ptrs)), 8) 22 (sizeof(__be64) * (ea)->ea_num_ptrs)), 8)
23 23
24#define GFS2_EA_IS_STUFFED(ea) (!(ea)->ea_num_ptrs) 24#define GFS2_EA_IS_STUFFED(ea) (!(ea)->ea_num_ptrs)
25#define GFS2_EA_IS_LAST(ea) ((ea)->ea_flags & GFS2_EAFLAG_LAST) 25#define GFS2_EA_IS_LAST(ea) ((ea)->ea_flags & GFS2_EAFLAG_LAST)
@@ -29,13 +29,13 @@ ALIGN(sizeof(struct gfs2_ea_header) + (er)->er_name_len + (er)->er_data_len, 8)
29 29
30#define GFS2_EAREQ_SIZE_UNSTUFFED(sdp, er) \ 30#define GFS2_EAREQ_SIZE_UNSTUFFED(sdp, er) \
31ALIGN(sizeof(struct gfs2_ea_header) + (er)->er_name_len + \ 31ALIGN(sizeof(struct gfs2_ea_header) + (er)->er_name_len + \
32 sizeof(u64) * DIV_ROUND_UP((er)->er_data_len, (sdp)->sd_jbsize), 8) 32 sizeof(__be64) * DIV_ROUND_UP((er)->er_data_len, (sdp)->sd_jbsize), 8)
33 33
34#define GFS2_EA2NAME(ea) ((char *)((struct gfs2_ea_header *)(ea) + 1)) 34#define GFS2_EA2NAME(ea) ((char *)((struct gfs2_ea_header *)(ea) + 1))
35#define GFS2_EA2DATA(ea) (GFS2_EA2NAME(ea) + (ea)->ea_name_len) 35#define GFS2_EA2DATA(ea) (GFS2_EA2NAME(ea) + (ea)->ea_name_len)
36 36
37#define GFS2_EA2DATAPTRS(ea) \ 37#define GFS2_EA2DATAPTRS(ea) \
38((u64 *)(GFS2_EA2NAME(ea) + ALIGN((ea)->ea_name_len, 8))) 38((__be64 *)(GFS2_EA2NAME(ea) + ALIGN((ea)->ea_name_len, 8)))
39 39
40#define GFS2_EA2NEXT(ea) \ 40#define GFS2_EA2NEXT(ea) \
41((struct gfs2_ea_header *)((char *)(ea) + GFS2_EA_REC_LEN(ea))) 41((struct gfs2_ea_header *)((char *)(ea) + GFS2_EA_REC_LEN(ea)))
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 55f5333dae99..438146904b58 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -96,7 +96,7 @@ static inline rwlock_t *gl_lock_addr(unsigned int x)
96 return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)]; 96 return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
97} 97}
98#else /* not SMP, so no spinlocks required */ 98#else /* not SMP, so no spinlocks required */
99static inline rwlock_t *gl_lock_addr(x) 99static inline rwlock_t *gl_lock_addr(unsigned int x)
100{ 100{
101 return NULL; 101 return NULL;
102} 102}
@@ -769,7 +769,7 @@ restart:
769 } else { 769 } else {
770 spin_unlock(&gl->gl_spin); 770 spin_unlock(&gl->gl_spin);
771 771
772 new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_KERNEL); 772 new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_NOFS);
773 if (!new_gh) 773 if (!new_gh)
774 return; 774 return;
775 set_bit(HIF_DEMOTE, &new_gh->gh_iflags); 775 set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
@@ -785,21 +785,6 @@ out:
785 gfs2_holder_put(new_gh); 785 gfs2_holder_put(new_gh);
786} 786}
787 787
788void gfs2_glock_inode_squish(struct inode *inode)
789{
790 struct gfs2_holder gh;
791 struct gfs2_glock *gl = GFS2_I(inode)->i_gl;
792 gfs2_holder_init(gl, LM_ST_UNLOCKED, 0, &gh);
793 set_bit(HIF_DEMOTE, &gh.gh_iflags);
794 spin_lock(&gl->gl_spin);
795 gfs2_assert(inode->i_sb->s_fs_info, list_empty(&gl->gl_holders));
796 list_add_tail(&gh.gh_list, &gl->gl_waiters2);
797 run_queue(gl);
798 spin_unlock(&gl->gl_spin);
799 wait_for_completion(&gh.gh_wait);
800 gfs2_holder_uninit(&gh);
801}
802
803/** 788/**
804 * state_change - record that the glock is now in a different state 789 * state_change - record that the glock is now in a different state
805 * @gl: the glock 790 * @gl: the glock
@@ -847,12 +832,12 @@ static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
847 832
848 if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) { 833 if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
849 if (glops->go_inval) 834 if (glops->go_inval)
850 glops->go_inval(gl, DIO_METADATA | DIO_DATA); 835 glops->go_inval(gl, DIO_METADATA);
851 } else if (gl->gl_state == LM_ST_DEFERRED) { 836 } else if (gl->gl_state == LM_ST_DEFERRED) {
852 /* We might not want to do this here. 837 /* We might not want to do this here.
853 Look at moving to the inode glops. */ 838 Look at moving to the inode glops. */
854 if (glops->go_inval) 839 if (glops->go_inval)
855 glops->go_inval(gl, DIO_DATA); 840 glops->go_inval(gl, 0);
856 } 841 }
857 842
858 /* Deal with each possible exit condition */ 843 /* Deal with each possible exit condition */
@@ -954,7 +939,7 @@ void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags)
954 gfs2_assert_warn(sdp, state != gl->gl_state); 939 gfs2_assert_warn(sdp, state != gl->gl_state);
955 940
956 if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync) 941 if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
957 glops->go_sync(gl, DIO_METADATA | DIO_DATA | DIO_RELEASE); 942 glops->go_sync(gl);
958 943
959 gfs2_glock_hold(gl); 944 gfs2_glock_hold(gl);
960 gl->gl_req_bh = xmote_bh; 945 gl->gl_req_bh = xmote_bh;
@@ -995,7 +980,7 @@ static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
995 state_change(gl, LM_ST_UNLOCKED); 980 state_change(gl, LM_ST_UNLOCKED);
996 981
997 if (glops->go_inval) 982 if (glops->go_inval)
998 glops->go_inval(gl, DIO_METADATA | DIO_DATA); 983 glops->go_inval(gl, DIO_METADATA);
999 984
1000 if (gh) { 985 if (gh) {
1001 spin_lock(&gl->gl_spin); 986 spin_lock(&gl->gl_spin);
@@ -1041,7 +1026,7 @@ void gfs2_glock_drop_th(struct gfs2_glock *gl)
1041 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED); 1026 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
1042 1027
1043 if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync) 1028 if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
1044 glops->go_sync(gl, DIO_METADATA | DIO_DATA | DIO_RELEASE); 1029 glops->go_sync(gl);
1045 1030
1046 gfs2_glock_hold(gl); 1031 gfs2_glock_hold(gl);
1047 gl->gl_req_bh = drop_bh; 1032 gl->gl_req_bh = drop_bh;
@@ -1244,9 +1229,6 @@ restart:
1244 1229
1245 clear_bit(GLF_PREFETCH, &gl->gl_flags); 1230 clear_bit(GLF_PREFETCH, &gl->gl_flags);
1246 1231
1247 if (error == GLR_TRYFAILED && (gh->gh_flags & GL_DUMP))
1248 dump_glock(gl);
1249
1250 return error; 1232 return error;
1251} 1233}
1252 1234
@@ -1923,7 +1905,7 @@ out:
1923 1905
1924static void scan_glock(struct gfs2_glock *gl) 1906static void scan_glock(struct gfs2_glock *gl)
1925{ 1907{
1926 if (gl->gl_ops == &gfs2_inode_glops) 1908 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
1927 return; 1909 return;
1928 1910
1929 if (gfs2_glmutex_trylock(gl)) { 1911 if (gfs2_glmutex_trylock(gl)) {
@@ -2078,7 +2060,7 @@ static int dump_inode(struct gfs2_inode *ip)
2078 printk(KERN_INFO " num = %llu %llu\n", 2060 printk(KERN_INFO " num = %llu %llu\n",
2079 (unsigned long long)ip->i_num.no_formal_ino, 2061 (unsigned long long)ip->i_num.no_formal_ino,
2080 (unsigned long long)ip->i_num.no_addr); 2062 (unsigned long long)ip->i_num.no_addr);
2081 printk(KERN_INFO " type = %u\n", IF2DT(ip->i_di.di_mode)); 2063 printk(KERN_INFO " type = %u\n", IF2DT(ip->i_inode.i_mode));
2082 printk(KERN_INFO " i_flags ="); 2064 printk(KERN_INFO " i_flags =");
2083 for (x = 0; x < 32; x++) 2065 for (x = 0; x < 32; x++)
2084 if (test_bit(x, &ip->i_flags)) 2066 if (test_bit(x, &ip->i_flags))
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 2b2a889ee2cc..fb39108fc05c 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -27,8 +27,6 @@
27#define GL_ATIME 0x00000200 27#define GL_ATIME 0x00000200
28#define GL_NOCACHE 0x00000400 28#define GL_NOCACHE 0x00000400
29#define GL_NOCANCEL 0x00001000 29#define GL_NOCANCEL 0x00001000
30#define GL_AOP 0x00004000
31#define GL_DUMP 0x00008000
32 30
33#define GLR_TRYFAILED 13 31#define GLR_TRYFAILED 13
34#define GLR_CANCELED 14 32#define GLR_CANCELED 14
@@ -108,7 +106,6 @@ void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs);
108void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, u64 number, 106void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, u64 number,
109 const struct gfs2_glock_operations *glops, 107 const struct gfs2_glock_operations *glops,
110 unsigned int state, int flags); 108 unsigned int state, int flags);
111void gfs2_glock_inode_squish(struct inode *inode);
112 109
113/** 110/**
114 * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock 111 * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 41a6b6818a50..b068d10bcb6e 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -92,7 +92,7 @@ static void gfs2_pte_inval(struct gfs2_glock *gl)
92 92
93 ip = gl->gl_object; 93 ip = gl->gl_object;
94 inode = &ip->i_inode; 94 inode = &ip->i_inode;
95 if (!ip || !S_ISREG(ip->i_di.di_mode)) 95 if (!ip || !S_ISREG(inode->i_mode))
96 return; 96 return;
97 97
98 if (!test_bit(GIF_PAGED, &ip->i_flags)) 98 if (!test_bit(GIF_PAGED, &ip->i_flags))
@@ -107,89 +107,20 @@ static void gfs2_pte_inval(struct gfs2_glock *gl)
107} 107}
108 108
109/** 109/**
110 * gfs2_page_inval - Invalidate all pages associated with a glock
111 * @gl: the glock
112 *
113 */
114
115static void gfs2_page_inval(struct gfs2_glock *gl)
116{
117 struct gfs2_inode *ip;
118 struct inode *inode;
119
120 ip = gl->gl_object;
121 inode = &ip->i_inode;
122 if (!ip || !S_ISREG(ip->i_di.di_mode))
123 return;
124
125 truncate_inode_pages(inode->i_mapping, 0);
126 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), !inode->i_mapping->nrpages);
127 clear_bit(GIF_PAGED, &ip->i_flags);
128}
129
130/**
131 * gfs2_page_wait - Wait for writeback of data
132 * @gl: the glock
133 *
134 * Syncs data (not metadata) for a regular file.
135 * No-op for all other types.
136 */
137
138static void gfs2_page_wait(struct gfs2_glock *gl)
139{
140 struct gfs2_inode *ip = gl->gl_object;
141 struct inode *inode = &ip->i_inode;
142 struct address_space *mapping = inode->i_mapping;
143 int error;
144
145 if (!S_ISREG(ip->i_di.di_mode))
146 return;
147
148 error = filemap_fdatawait(mapping);
149
150 /* Put back any errors cleared by filemap_fdatawait()
151 so they can be caught by someone who can pass them
152 up to user space. */
153
154 if (error == -ENOSPC)
155 set_bit(AS_ENOSPC, &mapping->flags);
156 else if (error)
157 set_bit(AS_EIO, &mapping->flags);
158
159}
160
161static void gfs2_page_writeback(struct gfs2_glock *gl)
162{
163 struct gfs2_inode *ip = gl->gl_object;
164 struct inode *inode = &ip->i_inode;
165 struct address_space *mapping = inode->i_mapping;
166
167 if (!S_ISREG(ip->i_di.di_mode))
168 return;
169
170 filemap_fdatawrite(mapping);
171}
172
173/**
174 * meta_go_sync - sync out the metadata for this glock 110 * meta_go_sync - sync out the metadata for this glock
175 * @gl: the glock 111 * @gl: the glock
176 * @flags: DIO_*
177 * 112 *
178 * Called when demoting or unlocking an EX glock. We must flush 113 * Called when demoting or unlocking an EX glock. We must flush
179 * to disk all dirty buffers/pages relating to this glock, and must not 114 * to disk all dirty buffers/pages relating to this glock, and must not
180 * not return to caller to demote/unlock the glock until I/O is complete. 115 * not return to caller to demote/unlock the glock until I/O is complete.
181 */ 116 */
182 117
183static void meta_go_sync(struct gfs2_glock *gl, int flags) 118static void meta_go_sync(struct gfs2_glock *gl)
184{ 119{
185 if (!(flags & DIO_METADATA))
186 return;
187
188 if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) { 120 if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) {
189 gfs2_log_flush(gl->gl_sbd, gl); 121 gfs2_log_flush(gl->gl_sbd, gl);
190 gfs2_meta_sync(gl); 122 gfs2_meta_sync(gl);
191 if (flags & DIO_RELEASE) 123 gfs2_ail_empty_gl(gl);
192 gfs2_ail_empty_gl(gl);
193 } 124 }
194 125
195} 126}
@@ -264,31 +195,31 @@ static void inode_go_drop_th(struct gfs2_glock *gl)
264/** 195/**
265 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock 196 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
266 * @gl: the glock protecting the inode 197 * @gl: the glock protecting the inode
267 * @flags:
268 * 198 *
269 */ 199 */
270 200
271static void inode_go_sync(struct gfs2_glock *gl, int flags) 201static void inode_go_sync(struct gfs2_glock *gl)
272{ 202{
273 int meta = (flags & DIO_METADATA); 203 struct gfs2_inode *ip = gl->gl_object;
274 int data = (flags & DIO_DATA); 204
205 if (ip && !S_ISREG(ip->i_inode.i_mode))
206 ip = NULL;
275 207
276 if (test_bit(GLF_DIRTY, &gl->gl_flags)) { 208 if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
277 if (meta && data) { 209 gfs2_log_flush(gl->gl_sbd, gl);
278 gfs2_page_writeback(gl); 210 if (ip)
279 gfs2_log_flush(gl->gl_sbd, gl); 211 filemap_fdatawrite(ip->i_inode.i_mapping);
280 gfs2_meta_sync(gl); 212 gfs2_meta_sync(gl);
281 gfs2_page_wait(gl); 213 if (ip) {
282 clear_bit(GLF_DIRTY, &gl->gl_flags); 214 struct address_space *mapping = ip->i_inode.i_mapping;
283 } else if (meta) { 215 int error = filemap_fdatawait(mapping);
284 gfs2_log_flush(gl->gl_sbd, gl); 216 if (error == -ENOSPC)
285 gfs2_meta_sync(gl); 217 set_bit(AS_ENOSPC, &mapping->flags);
286 } else if (data) { 218 else if (error)
287 gfs2_page_writeback(gl); 219 set_bit(AS_EIO, &mapping->flags);
288 gfs2_page_wait(gl);
289 } 220 }
290 if (flags & DIO_RELEASE) 221 clear_bit(GLF_DIRTY, &gl->gl_flags);
291 gfs2_ail_empty_gl(gl); 222 gfs2_ail_empty_gl(gl);
292 } 223 }
293} 224}
294 225
@@ -301,15 +232,20 @@ static void inode_go_sync(struct gfs2_glock *gl, int flags)
301 232
302static void inode_go_inval(struct gfs2_glock *gl, int flags) 233static void inode_go_inval(struct gfs2_glock *gl, int flags)
303{ 234{
235 struct gfs2_inode *ip = gl->gl_object;
304 int meta = (flags & DIO_METADATA); 236 int meta = (flags & DIO_METADATA);
305 int data = (flags & DIO_DATA);
306 237
307 if (meta) { 238 if (meta) {
308 gfs2_meta_inval(gl); 239 gfs2_meta_inval(gl);
309 gl->gl_vn++; 240 if (ip)
241 set_bit(GIF_INVALID, &ip->i_flags);
242 }
243
244 if (ip && S_ISREG(ip->i_inode.i_mode)) {
245 truncate_inode_pages(ip->i_inode.i_mapping, 0);
246 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), !ip->i_inode.i_mapping->nrpages);
247 clear_bit(GIF_PAGED, &ip->i_flags);
310 } 248 }
311 if (data)
312 gfs2_page_inval(gl);
313} 249}
314 250
315/** 251/**
@@ -351,11 +287,10 @@ static int inode_go_lock(struct gfs2_holder *gh)
351 if (!ip) 287 if (!ip)
352 return 0; 288 return 0;
353 289
354 if (ip->i_vn != gl->gl_vn) { 290 if (test_bit(GIF_INVALID, &ip->i_flags)) {
355 error = gfs2_inode_refresh(ip); 291 error = gfs2_inode_refresh(ip);
356 if (error) 292 if (error)
357 return error; 293 return error;
358 gfs2_inode_attr_in(ip);
359 } 294 }
360 295
361 if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) && 296 if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) &&
@@ -379,11 +314,8 @@ static void inode_go_unlock(struct gfs2_holder *gh)
379 struct gfs2_glock *gl = gh->gh_gl; 314 struct gfs2_glock *gl = gh->gh_gl;
380 struct gfs2_inode *ip = gl->gl_object; 315 struct gfs2_inode *ip = gl->gl_object;
381 316
382 if (ip == NULL) 317 if (ip)
383 return; 318 gfs2_meta_cache_flush(ip);
384 if (test_bit(GLF_DIRTY, &gl->gl_flags))
385 gfs2_inode_attr_in(ip);
386 gfs2_meta_cache_flush(ip);
387} 319}
388 320
389/** 321/**
@@ -491,13 +423,13 @@ static void trans_go_xmote_bh(struct gfs2_glock *gl)
491 struct gfs2_sbd *sdp = gl->gl_sbd; 423 struct gfs2_sbd *sdp = gl->gl_sbd;
492 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); 424 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
493 struct gfs2_glock *j_gl = ip->i_gl; 425 struct gfs2_glock *j_gl = ip->i_gl;
494 struct gfs2_log_header head; 426 struct gfs2_log_header_host head;
495 int error; 427 int error;
496 428
497 if (gl->gl_state != LM_ST_UNLOCKED && 429 if (gl->gl_state != LM_ST_UNLOCKED &&
498 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { 430 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
499 gfs2_meta_cache_flush(GFS2_I(sdp->sd_jdesc->jd_inode)); 431 gfs2_meta_cache_flush(GFS2_I(sdp->sd_jdesc->jd_inode));
500 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA | DIO_DATA); 432 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
501 433
502 error = gfs2_find_jhead(sdp->sd_jdesc, &head); 434 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
503 if (error) 435 if (error)
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 118dc693d111..734421edae85 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -14,8 +14,6 @@
14 14
15#define DIO_WAIT 0x00000010 15#define DIO_WAIT 0x00000010
16#define DIO_METADATA 0x00000020 16#define DIO_METADATA 0x00000020
17#define DIO_DATA 0x00000040
18#define DIO_RELEASE 0x00000080
19#define DIO_ALL 0x00000100 17#define DIO_ALL 0x00000100
20 18
21struct gfs2_log_operations; 19struct gfs2_log_operations;
@@ -41,7 +39,7 @@ struct gfs2_log_operations {
41 void (*lo_before_commit) (struct gfs2_sbd *sdp); 39 void (*lo_before_commit) (struct gfs2_sbd *sdp);
42 void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_ail *ai); 40 void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_ail *ai);
43 void (*lo_before_scan) (struct gfs2_jdesc *jd, 41 void (*lo_before_scan) (struct gfs2_jdesc *jd,
44 struct gfs2_log_header *head, int pass); 42 struct gfs2_log_header_host *head, int pass);
45 int (*lo_scan_elements) (struct gfs2_jdesc *jd, unsigned int start, 43 int (*lo_scan_elements) (struct gfs2_jdesc *jd, unsigned int start,
46 struct gfs2_log_descriptor *ld, __be64 *ptr, 44 struct gfs2_log_descriptor *ld, __be64 *ptr,
47 int pass); 45 int pass);
@@ -67,8 +65,8 @@ struct gfs2_rgrpd {
67 struct list_head rd_list_mru; 65 struct list_head rd_list_mru;
68 struct list_head rd_recent; /* Recently used rgrps */ 66 struct list_head rd_recent; /* Recently used rgrps */
69 struct gfs2_glock *rd_gl; /* Glock for this rgrp */ 67 struct gfs2_glock *rd_gl; /* Glock for this rgrp */
70 struct gfs2_rindex rd_ri; 68 struct gfs2_rindex_host rd_ri;
71 struct gfs2_rgrp rd_rg; 69 struct gfs2_rgrp_host rd_rg;
72 u64 rd_rg_vn; 70 u64 rd_rg_vn;
73 struct gfs2_bitmap *rd_bits; 71 struct gfs2_bitmap *rd_bits;
74 unsigned int rd_bh_count; 72 unsigned int rd_bh_count;
@@ -103,18 +101,17 @@ struct gfs2_bufdata {
103}; 101};
104 102
105struct gfs2_glock_operations { 103struct gfs2_glock_operations {
106 void (*go_xmote_th) (struct gfs2_glock * gl, unsigned int state, 104 void (*go_xmote_th) (struct gfs2_glock *gl, unsigned int state, int flags);
107 int flags); 105 void (*go_xmote_bh) (struct gfs2_glock *gl);
108 void (*go_xmote_bh) (struct gfs2_glock * gl); 106 void (*go_drop_th) (struct gfs2_glock *gl);
109 void (*go_drop_th) (struct gfs2_glock * gl); 107 void (*go_drop_bh) (struct gfs2_glock *gl);
110 void (*go_drop_bh) (struct gfs2_glock * gl); 108 void (*go_sync) (struct gfs2_glock *gl);
111 void (*go_sync) (struct gfs2_glock * gl, int flags); 109 void (*go_inval) (struct gfs2_glock *gl, int flags);
112 void (*go_inval) (struct gfs2_glock * gl, int flags); 110 int (*go_demote_ok) (struct gfs2_glock *gl);
113 int (*go_demote_ok) (struct gfs2_glock * gl); 111 int (*go_lock) (struct gfs2_holder *gh);
114 int (*go_lock) (struct gfs2_holder * gh); 112 void (*go_unlock) (struct gfs2_holder *gh);
115 void (*go_unlock) (struct gfs2_holder * gh); 113 void (*go_callback) (struct gfs2_glock *gl, unsigned int state);
116 void (*go_callback) (struct gfs2_glock * gl, unsigned int state); 114 void (*go_greedy) (struct gfs2_glock *gl);
117 void (*go_greedy) (struct gfs2_glock * gl);
118 const int go_type; 115 const int go_type;
119}; 116};
120 117
@@ -217,6 +214,7 @@ struct gfs2_alloc {
217}; 214};
218 215
219enum { 216enum {
217 GIF_INVALID = 0,
220 GIF_QD_LOCKED = 1, 218 GIF_QD_LOCKED = 1,
221 GIF_PAGED = 2, 219 GIF_PAGED = 2,
222 GIF_SW_PAGED = 3, 220 GIF_SW_PAGED = 3,
@@ -224,12 +222,11 @@ enum {
224 222
225struct gfs2_inode { 223struct gfs2_inode {
226 struct inode i_inode; 224 struct inode i_inode;
227 struct gfs2_inum i_num; 225 struct gfs2_inum_host i_num;
228 226
229 unsigned long i_flags; /* GIF_... */ 227 unsigned long i_flags; /* GIF_... */
230 228
231 u64 i_vn; 229 struct gfs2_dinode_host i_di; /* To be replaced by ref to block */
232 struct gfs2_dinode i_di; /* To be replaced by ref to block */
233 230
234 struct gfs2_glock *i_gl; /* Move into i_gh? */ 231 struct gfs2_glock *i_gl; /* Move into i_gh? */
235 struct gfs2_holder i_iopen_gh; 232 struct gfs2_holder i_iopen_gh;
@@ -450,7 +447,7 @@ struct gfs2_sbd {
450 struct super_block *sd_vfs_meta; 447 struct super_block *sd_vfs_meta;
451 struct kobject sd_kobj; 448 struct kobject sd_kobj;
452 unsigned long sd_flags; /* SDF_... */ 449 unsigned long sd_flags; /* SDF_... */
453 struct gfs2_sb sd_sb; 450 struct gfs2_sb_host sd_sb;
454 451
455 /* Constants computed on mount */ 452 /* Constants computed on mount */
456 453
@@ -503,8 +500,8 @@ struct gfs2_sbd {
503 500
504 spinlock_t sd_statfs_spin; 501 spinlock_t sd_statfs_spin;
505 struct mutex sd_statfs_mutex; 502 struct mutex sd_statfs_mutex;
506 struct gfs2_statfs_change sd_statfs_master; 503 struct gfs2_statfs_change_host sd_statfs_master;
507 struct gfs2_statfs_change sd_statfs_local; 504 struct gfs2_statfs_change_host sd_statfs_local;
508 unsigned long sd_statfs_sync_time; 505 unsigned long sd_statfs_sync_time;
509 506
510 /* Resource group stuff */ 507 /* Resource group stuff */
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index d470e5286ecd..d122074c45e1 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -38,83 +38,12 @@
38#include "trans.h" 38#include "trans.h"
39#include "util.h" 39#include "util.h"
40 40
41/**
42 * gfs2_inode_attr_in - Copy attributes from the dinode into the VFS inode
43 * @ip: The GFS2 inode (with embedded disk inode data)
44 * @inode: The Linux VFS inode
45 *
46 */
47
48void gfs2_inode_attr_in(struct gfs2_inode *ip)
49{
50 struct inode *inode = &ip->i_inode;
51 struct gfs2_dinode *di = &ip->i_di;
52
53 inode->i_ino = ip->i_num.no_addr;
54
55 switch (di->di_mode & S_IFMT) {
56 case S_IFBLK:
57 case S_IFCHR:
58 inode->i_rdev = MKDEV(di->di_major, di->di_minor);
59 break;
60 default:
61 inode->i_rdev = 0;
62 break;
63 };
64
65 inode->i_mode = di->di_mode;
66 inode->i_nlink = di->di_nlink;
67 inode->i_uid = di->di_uid;
68 inode->i_gid = di->di_gid;
69 i_size_write(inode, di->di_size);
70 inode->i_atime.tv_sec = di->di_atime;
71 inode->i_mtime.tv_sec = di->di_mtime;
72 inode->i_ctime.tv_sec = di->di_ctime;
73 inode->i_atime.tv_nsec = 0;
74 inode->i_mtime.tv_nsec = 0;
75 inode->i_ctime.tv_nsec = 0;
76 inode->i_blocks = di->di_blocks <<
77 (GFS2_SB(inode)->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT);
78
79 if (di->di_flags & GFS2_DIF_IMMUTABLE)
80 inode->i_flags |= S_IMMUTABLE;
81 else
82 inode->i_flags &= ~S_IMMUTABLE;
83
84 if (di->di_flags & GFS2_DIF_APPENDONLY)
85 inode->i_flags |= S_APPEND;
86 else
87 inode->i_flags &= ~S_APPEND;
88}
89
90/**
91 * gfs2_inode_attr_out - Copy attributes from VFS inode into the dinode
92 * @ip: The GFS2 inode
93 *
94 * Only copy out the attributes that we want the VFS layer
95 * to be able to modify.
96 */
97
98void gfs2_inode_attr_out(struct gfs2_inode *ip)
99{
100 struct inode *inode = &ip->i_inode;
101 struct gfs2_dinode *di = &ip->i_di;
102 gfs2_assert_withdraw(GFS2_SB(inode),
103 (di->di_mode & S_IFMT) == (inode->i_mode & S_IFMT));
104 di->di_mode = inode->i_mode;
105 di->di_uid = inode->i_uid;
106 di->di_gid = inode->i_gid;
107 di->di_atime = inode->i_atime.tv_sec;
108 di->di_mtime = inode->i_mtime.tv_sec;
109 di->di_ctime = inode->i_ctime.tv_sec;
110}
111
112static int iget_test(struct inode *inode, void *opaque) 41static int iget_test(struct inode *inode, void *opaque)
113{ 42{
114 struct gfs2_inode *ip = GFS2_I(inode); 43 struct gfs2_inode *ip = GFS2_I(inode);
115 struct gfs2_inum *inum = opaque; 44 struct gfs2_inum_host *inum = opaque;
116 45
117 if (ip && ip->i_num.no_addr == inum->no_addr) 46 if (ip->i_num.no_addr == inum->no_addr)
118 return 1; 47 return 1;
119 48
120 return 0; 49 return 0;
@@ -123,19 +52,20 @@ static int iget_test(struct inode *inode, void *opaque)
123static int iget_set(struct inode *inode, void *opaque) 52static int iget_set(struct inode *inode, void *opaque)
124{ 53{
125 struct gfs2_inode *ip = GFS2_I(inode); 54 struct gfs2_inode *ip = GFS2_I(inode);
126 struct gfs2_inum *inum = opaque; 55 struct gfs2_inum_host *inum = opaque;
127 56
128 ip->i_num = *inum; 57 ip->i_num = *inum;
58 inode->i_ino = inum->no_addr;
129 return 0; 59 return 0;
130} 60}
131 61
132struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum *inum) 62struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum_host *inum)
133{ 63{
134 return ilookup5(sb, (unsigned long)inum->no_formal_ino, 64 return ilookup5(sb, (unsigned long)inum->no_formal_ino,
135 iget_test, inum); 65 iget_test, inum);
136} 66}
137 67
138static struct inode *gfs2_iget(struct super_block *sb, struct gfs2_inum *inum) 68static struct inode *gfs2_iget(struct super_block *sb, struct gfs2_inum_host *inum)
139{ 69{
140 return iget5_locked(sb, (unsigned long)inum->no_formal_ino, 70 return iget5_locked(sb, (unsigned long)inum->no_formal_ino,
141 iget_test, iget_set, inum); 71 iget_test, iget_set, inum);
@@ -150,7 +80,7 @@ static struct inode *gfs2_iget(struct super_block *sb, struct gfs2_inum *inum)
150 * Returns: A VFS inode, or an error 80 * Returns: A VFS inode, or an error
151 */ 81 */
152 82
153struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum *inum, unsigned int type) 83struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum_host *inum, unsigned int type)
154{ 84{
155 struct inode *inode = gfs2_iget(sb, inum); 85 struct inode *inode = gfs2_iget(sb, inum);
156 struct gfs2_inode *ip = GFS2_I(inode); 86 struct gfs2_inode *ip = GFS2_I(inode);
@@ -188,7 +118,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum *inum,
188 if (unlikely(error)) 118 if (unlikely(error))
189 goto fail_put; 119 goto fail_put;
190 120
191 ip->i_vn = ip->i_gl->gl_vn - 1; 121 set_bit(GIF_INVALID, &ip->i_flags);
192 error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh); 122 error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
193 if (unlikely(error)) 123 if (unlikely(error))
194 goto fail_iopen; 124 goto fail_iopen;
@@ -208,6 +138,63 @@ fail:
208 return ERR_PTR(error); 138 return ERR_PTR(error);
209} 139}
210 140
141static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
142{
143 struct gfs2_dinode_host *di = &ip->i_di;
144 const struct gfs2_dinode *str = buf;
145
146 if (ip->i_num.no_addr != be64_to_cpu(str->di_num.no_addr)) {
147 if (gfs2_consist_inode(ip))
148 gfs2_dinode_print(ip);
149 return -EIO;
150 }
151 if (ip->i_num.no_formal_ino != be64_to_cpu(str->di_num.no_formal_ino))
152 return -ESTALE;
153
154 ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
155 ip->i_inode.i_rdev = 0;
156 switch (ip->i_inode.i_mode & S_IFMT) {
157 case S_IFBLK:
158 case S_IFCHR:
159 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
160 be32_to_cpu(str->di_minor));
161 break;
162 };
163
164 ip->i_inode.i_uid = be32_to_cpu(str->di_uid);
165 ip->i_inode.i_gid = be32_to_cpu(str->di_gid);
166 /*
167 * We will need to review setting the nlink count here in the
168 * light of the forthcoming ro bind mount work. This is a reminder
169 * to do that.
170 */
171 ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink);
172 di->di_size = be64_to_cpu(str->di_size);
173 i_size_write(&ip->i_inode, di->di_size);
174 di->di_blocks = be64_to_cpu(str->di_blocks);
175 gfs2_set_inode_blocks(&ip->i_inode);
176 ip->i_inode.i_atime.tv_sec = be64_to_cpu(str->di_atime);
177 ip->i_inode.i_atime.tv_nsec = 0;
178 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
179 ip->i_inode.i_mtime.tv_nsec = 0;
180 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
181 ip->i_inode.i_ctime.tv_nsec = 0;
182
183 di->di_goal_meta = be64_to_cpu(str->di_goal_meta);
184 di->di_goal_data = be64_to_cpu(str->di_goal_data);
185 di->di_generation = be64_to_cpu(str->di_generation);
186
187 di->di_flags = be32_to_cpu(str->di_flags);
188 gfs2_set_inode_flags(&ip->i_inode);
189 di->di_height = be16_to_cpu(str->di_height);
190
191 di->di_depth = be16_to_cpu(str->di_depth);
192 di->di_entries = be32_to_cpu(str->di_entries);
193
194 di->di_eattr = be64_to_cpu(str->di_eattr);
195 return 0;
196}
197
211/** 198/**
212 * gfs2_inode_refresh - Refresh the incore copy of the dinode 199 * gfs2_inode_refresh - Refresh the incore copy of the dinode
213 * @ip: The GFS2 inode 200 * @ip: The GFS2 inode
@@ -229,21 +216,11 @@ int gfs2_inode_refresh(struct gfs2_inode *ip)
229 return -EIO; 216 return -EIO;
230 } 217 }
231 218
232 gfs2_dinode_in(&ip->i_di, dibh->b_data); 219 error = gfs2_dinode_in(ip, dibh->b_data);
233
234 brelse(dibh); 220 brelse(dibh);
221 clear_bit(GIF_INVALID, &ip->i_flags);
235 222
236 if (ip->i_num.no_addr != ip->i_di.di_num.no_addr) { 223 return error;
237 if (gfs2_consist_inode(ip))
238 gfs2_dinode_print(&ip->i_di);
239 return -EIO;
240 }
241 if (ip->i_num.no_formal_ino != ip->i_di.di_num.no_formal_ino)
242 return -ESTALE;
243
244 ip->i_vn = ip->i_gl->gl_vn;
245
246 return 0;
247} 224}
248 225
249int gfs2_dinode_dealloc(struct gfs2_inode *ip) 226int gfs2_dinode_dealloc(struct gfs2_inode *ip)
@@ -255,7 +232,7 @@ int gfs2_dinode_dealloc(struct gfs2_inode *ip)
255 232
256 if (ip->i_di.di_blocks != 1) { 233 if (ip->i_di.di_blocks != 1) {
257 if (gfs2_consist_inode(ip)) 234 if (gfs2_consist_inode(ip))
258 gfs2_dinode_print(&ip->i_di); 235 gfs2_dinode_print(ip);
259 return -EIO; 236 return -EIO;
260 } 237 }
261 238
@@ -318,14 +295,14 @@ int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
318 u32 nlink; 295 u32 nlink;
319 int error; 296 int error;
320 297
321 BUG_ON(ip->i_di.di_nlink != ip->i_inode.i_nlink); 298 BUG_ON(diff != 1 && diff != -1);
322 nlink = ip->i_di.di_nlink + diff; 299 nlink = ip->i_inode.i_nlink + diff;
323 300
324 /* If we are reducing the nlink count, but the new value ends up being 301 /* If we are reducing the nlink count, but the new value ends up being
325 bigger than the old one, we must have underflowed. */ 302 bigger than the old one, we must have underflowed. */
326 if (diff < 0 && nlink > ip->i_di.di_nlink) { 303 if (diff < 0 && nlink > ip->i_inode.i_nlink) {
327 if (gfs2_consist_inode(ip)) 304 if (gfs2_consist_inode(ip))
328 gfs2_dinode_print(&ip->i_di); 305 gfs2_dinode_print(ip);
329 return -EIO; 306 return -EIO;
330 } 307 }
331 308
@@ -333,16 +310,19 @@ int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
333 if (error) 310 if (error)
334 return error; 311 return error;
335 312
336 ip->i_di.di_nlink = nlink; 313 if (diff > 0)
337 ip->i_di.di_ctime = get_seconds(); 314 inc_nlink(&ip->i_inode);
338 ip->i_inode.i_nlink = nlink; 315 else
316 drop_nlink(&ip->i_inode);
317
318 ip->i_inode.i_ctime.tv_sec = get_seconds();
339 319
340 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 320 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
341 gfs2_dinode_out(&ip->i_di, dibh->b_data); 321 gfs2_dinode_out(ip, dibh->b_data);
342 brelse(dibh); 322 brelse(dibh);
343 mark_inode_dirty(&ip->i_inode); 323 mark_inode_dirty(&ip->i_inode);
344 324
345 if (ip->i_di.di_nlink == 0) { 325 if (ip->i_inode.i_nlink == 0) {
346 struct gfs2_rgrpd *rgd; 326 struct gfs2_rgrpd *rgd;
347 struct gfs2_holder ri_gh, rg_gh; 327 struct gfs2_holder ri_gh, rg_gh;
348 328
@@ -357,7 +337,6 @@ int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
357 if (error) 337 if (error)
358 goto out_norgrp; 338 goto out_norgrp;
359 339
360 clear_nlink(&ip->i_inode);
361 gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */ 340 gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */
362 gfs2_glock_dq_uninit(&rg_gh); 341 gfs2_glock_dq_uninit(&rg_gh);
363out_norgrp: 342out_norgrp:
@@ -394,7 +373,7 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
394 struct super_block *sb = dir->i_sb; 373 struct super_block *sb = dir->i_sb;
395 struct gfs2_inode *dip = GFS2_I(dir); 374 struct gfs2_inode *dip = GFS2_I(dir);
396 struct gfs2_holder d_gh; 375 struct gfs2_holder d_gh;
397 struct gfs2_inum inum; 376 struct gfs2_inum_host inum;
398 unsigned int type; 377 unsigned int type;
399 int error = 0; 378 int error = 0;
400 struct inode *inode = NULL; 379 struct inode *inode = NULL;
@@ -436,7 +415,7 @@ static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino)
436{ 415{
437 struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode); 416 struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
438 struct buffer_head *bh; 417 struct buffer_head *bh;
439 struct gfs2_inum_range ir; 418 struct gfs2_inum_range_host ir;
440 int error; 419 int error;
441 420
442 error = gfs2_trans_begin(sdp, RES_DINODE, 0); 421 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
@@ -479,7 +458,7 @@ static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino)
479 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_inum_inode); 458 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_inum_inode);
480 struct gfs2_holder gh; 459 struct gfs2_holder gh;
481 struct buffer_head *bh; 460 struct buffer_head *bh;
482 struct gfs2_inum_range ir; 461 struct gfs2_inum_range_host ir;
483 int error; 462 int error;
484 463
485 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 464 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
@@ -500,21 +479,22 @@ static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino)
500 if (!ir.ir_length) { 479 if (!ir.ir_length) {
501 struct buffer_head *m_bh; 480 struct buffer_head *m_bh;
502 u64 x, y; 481 u64 x, y;
482 __be64 z;
503 483
504 error = gfs2_meta_inode_buffer(m_ip, &m_bh); 484 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
505 if (error) 485 if (error)
506 goto out_brelse; 486 goto out_brelse;
507 487
508 x = *(u64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)); 488 z = *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode));
509 x = y = be64_to_cpu(x); 489 x = y = be64_to_cpu(z);
510 ir.ir_start = x; 490 ir.ir_start = x;
511 ir.ir_length = GFS2_INUM_QUANTUM; 491 ir.ir_length = GFS2_INUM_QUANTUM;
512 x += GFS2_INUM_QUANTUM; 492 x += GFS2_INUM_QUANTUM;
513 if (x < y) 493 if (x < y)
514 gfs2_consist_inode(m_ip); 494 gfs2_consist_inode(m_ip);
515 x = cpu_to_be64(x); 495 z = cpu_to_be64(x);
516 gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1); 496 gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
517 *(u64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = x; 497 *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = z;
518 498
519 brelse(m_bh); 499 brelse(m_bh);
520 } 500 }
@@ -567,7 +547,7 @@ static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
567 return error; 547 return error;
568 548
569 /* Don't create entries in an unlinked directory */ 549 /* Don't create entries in an unlinked directory */
570 if (!dip->i_di.di_nlink) 550 if (!dip->i_inode.i_nlink)
571 return -EPERM; 551 return -EPERM;
572 552
573 error = gfs2_dir_search(&dip->i_inode, name, NULL, NULL); 553 error = gfs2_dir_search(&dip->i_inode, name, NULL, NULL);
@@ -583,7 +563,7 @@ static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
583 563
584 if (dip->i_di.di_entries == (u32)-1) 564 if (dip->i_di.di_entries == (u32)-1)
585 return -EFBIG; 565 return -EFBIG;
586 if (S_ISDIR(mode) && dip->i_di.di_nlink == (u32)-1) 566 if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1)
587 return -EMLINK; 567 return -EMLINK;
588 568
589 return 0; 569 return 0;
@@ -593,24 +573,24 @@ static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode,
593 unsigned int *uid, unsigned int *gid) 573 unsigned int *uid, unsigned int *gid)
594{ 574{
595 if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir && 575 if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
596 (dip->i_di.di_mode & S_ISUID) && dip->i_di.di_uid) { 576 (dip->i_inode.i_mode & S_ISUID) && dip->i_inode.i_uid) {
597 if (S_ISDIR(*mode)) 577 if (S_ISDIR(*mode))
598 *mode |= S_ISUID; 578 *mode |= S_ISUID;
599 else if (dip->i_di.di_uid != current->fsuid) 579 else if (dip->i_inode.i_uid != current->fsuid)
600 *mode &= ~07111; 580 *mode &= ~07111;
601 *uid = dip->i_di.di_uid; 581 *uid = dip->i_inode.i_uid;
602 } else 582 } else
603 *uid = current->fsuid; 583 *uid = current->fsuid;
604 584
605 if (dip->i_di.di_mode & S_ISGID) { 585 if (dip->i_inode.i_mode & S_ISGID) {
606 if (S_ISDIR(*mode)) 586 if (S_ISDIR(*mode))
607 *mode |= S_ISGID; 587 *mode |= S_ISGID;
608 *gid = dip->i_di.di_gid; 588 *gid = dip->i_inode.i_gid;
609 } else 589 } else
610 *gid = current->fsgid; 590 *gid = current->fsgid;
611} 591}
612 592
613static int alloc_dinode(struct gfs2_inode *dip, struct gfs2_inum *inum, 593static int alloc_dinode(struct gfs2_inode *dip, struct gfs2_inum_host *inum,
614 u64 *generation) 594 u64 *generation)
615{ 595{
616 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); 596 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
@@ -650,9 +630,9 @@ out:
650 */ 630 */
651 631
652static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl, 632static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
653 const struct gfs2_inum *inum, unsigned int mode, 633 const struct gfs2_inum_host *inum, unsigned int mode,
654 unsigned int uid, unsigned int gid, 634 unsigned int uid, unsigned int gid,
655 const u64 *generation) 635 const u64 *generation, dev_t dev)
656{ 636{
657 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); 637 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
658 struct gfs2_dinode *di; 638 struct gfs2_dinode *di;
@@ -669,14 +649,15 @@ static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
669 di->di_mode = cpu_to_be32(mode); 649 di->di_mode = cpu_to_be32(mode);
670 di->di_uid = cpu_to_be32(uid); 650 di->di_uid = cpu_to_be32(uid);
671 di->di_gid = cpu_to_be32(gid); 651 di->di_gid = cpu_to_be32(gid);
672 di->di_nlink = cpu_to_be32(0); 652 di->di_nlink = 0;
673 di->di_size = cpu_to_be64(0); 653 di->di_size = 0;
674 di->di_blocks = cpu_to_be64(1); 654 di->di_blocks = cpu_to_be64(1);
675 di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(get_seconds()); 655 di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(get_seconds());
676 di->di_major = di->di_minor = cpu_to_be32(0); 656 di->di_major = cpu_to_be32(MAJOR(dev));
657 di->di_minor = cpu_to_be32(MINOR(dev));
677 di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr); 658 di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr);
678 di->di_generation = cpu_to_be64(*generation); 659 di->di_generation = cpu_to_be64(*generation);
679 di->di_flags = cpu_to_be32(0); 660 di->di_flags = 0;
680 661
681 if (S_ISREG(mode)) { 662 if (S_ISREG(mode)) {
682 if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) || 663 if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) ||
@@ -693,22 +674,22 @@ static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
693 } 674 }
694 675
695 di->__pad1 = 0; 676 di->__pad1 = 0;
696 di->di_payload_format = cpu_to_be32(0); 677 di->di_payload_format = cpu_to_be32(S_ISDIR(mode) ? GFS2_FORMAT_DE : 0);
697 di->di_height = cpu_to_be32(0); 678 di->di_height = 0;
698 di->__pad2 = 0; 679 di->__pad2 = 0;
699 di->__pad3 = 0; 680 di->__pad3 = 0;
700 di->di_depth = cpu_to_be16(0); 681 di->di_depth = 0;
701 di->di_entries = cpu_to_be32(0); 682 di->di_entries = 0;
702 memset(&di->__pad4, 0, sizeof(di->__pad4)); 683 memset(&di->__pad4, 0, sizeof(di->__pad4));
703 di->di_eattr = cpu_to_be64(0); 684 di->di_eattr = 0;
704 memset(&di->di_reserved, 0, sizeof(di->di_reserved)); 685 memset(&di->di_reserved, 0, sizeof(di->di_reserved));
705 686
706 brelse(dibh); 687 brelse(dibh);
707} 688}
708 689
709static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl, 690static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
710 unsigned int mode, const struct gfs2_inum *inum, 691 unsigned int mode, const struct gfs2_inum_host *inum,
711 const u64 *generation) 692 const u64 *generation, dev_t dev)
712{ 693{
713 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); 694 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
714 unsigned int uid, gid; 695 unsigned int uid, gid;
@@ -729,7 +710,7 @@ static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
729 if (error) 710 if (error)
730 goto out_quota; 711 goto out_quota;
731 712
732 init_dinode(dip, gl, inum, mode, uid, gid, generation); 713 init_dinode(dip, gl, inum, mode, uid, gid, generation, dev);
733 gfs2_quota_change(dip, +1, uid, gid); 714 gfs2_quota_change(dip, +1, uid, gid);
734 gfs2_trans_end(sdp); 715 gfs2_trans_end(sdp);
735 716
@@ -759,8 +740,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
759 if (alloc_required < 0) 740 if (alloc_required < 0)
760 goto fail; 741 goto fail;
761 if (alloc_required) { 742 if (alloc_required) {
762 error = gfs2_quota_check(dip, dip->i_di.di_uid, 743 error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid);
763 dip->i_di.di_gid);
764 if (error) 744 if (error)
765 goto fail_quota_locks; 745 goto fail_quota_locks;
766 746
@@ -782,16 +762,16 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
782 goto fail_quota_locks; 762 goto fail_quota_locks;
783 } 763 }
784 764
785 error = gfs2_dir_add(&dip->i_inode, name, &ip->i_num, IF2DT(ip->i_di.di_mode)); 765 error = gfs2_dir_add(&dip->i_inode, name, &ip->i_num, IF2DT(ip->i_inode.i_mode));
786 if (error) 766 if (error)
787 goto fail_end_trans; 767 goto fail_end_trans;
788 768
789 error = gfs2_meta_inode_buffer(ip, &dibh); 769 error = gfs2_meta_inode_buffer(ip, &dibh);
790 if (error) 770 if (error)
791 goto fail_end_trans; 771 goto fail_end_trans;
792 ip->i_di.di_nlink = 1; 772 ip->i_inode.i_nlink = 1;
793 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 773 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
794 gfs2_dinode_out(&ip->i_di, dibh->b_data); 774 gfs2_dinode_out(ip, dibh->b_data);
795 brelse(dibh); 775 brelse(dibh);
796 return 0; 776 return 0;
797 777
@@ -860,13 +840,13 @@ static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip)
860 */ 840 */
861 841
862struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name, 842struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
863 unsigned int mode) 843 unsigned int mode, dev_t dev)
864{ 844{
865 struct inode *inode; 845 struct inode *inode;
866 struct gfs2_inode *dip = ghs->gh_gl->gl_object; 846 struct gfs2_inode *dip = ghs->gh_gl->gl_object;
867 struct inode *dir = &dip->i_inode; 847 struct inode *dir = &dip->i_inode;
868 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); 848 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
869 struct gfs2_inum inum; 849 struct gfs2_inum_host inum;
870 int error; 850 int error;
871 u64 generation; 851 u64 generation;
872 852
@@ -890,35 +870,12 @@ struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
890 if (error) 870 if (error)
891 goto fail_gunlock; 871 goto fail_gunlock;
892 872
893 if (inum.no_addr < dip->i_num.no_addr) { 873 error = gfs2_glock_nq_num(sdp, inum.no_addr, &gfs2_inode_glops,
894 gfs2_glock_dq(ghs); 874 LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
895 875 if (error)
896 error = gfs2_glock_nq_num(sdp, inum.no_addr, 876 goto fail_gunlock;
897 &gfs2_inode_glops, LM_ST_EXCLUSIVE,
898 GL_SKIP, ghs + 1);
899 if (error) {
900 return ERR_PTR(error);
901 }
902
903 gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
904 error = gfs2_glock_nq(ghs);
905 if (error) {
906 gfs2_glock_dq_uninit(ghs + 1);
907 return ERR_PTR(error);
908 }
909
910 error = create_ok(dip, name, mode);
911 if (error)
912 goto fail_gunlock2;
913 } else {
914 error = gfs2_glock_nq_num(sdp, inum.no_addr,
915 &gfs2_inode_glops, LM_ST_EXCLUSIVE,
916 GL_SKIP, ghs + 1);
917 if (error)
918 goto fail_gunlock;
919 }
920 877
921 error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation); 878 error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev);
922 if (error) 879 if (error)
923 goto fail_gunlock2; 880 goto fail_gunlock2;
924 881
@@ -975,7 +932,7 @@ int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
975 932
976 if (ip->i_di.di_entries != 2) { 933 if (ip->i_di.di_entries != 2) {
977 if (gfs2_consist_inode(ip)) 934 if (gfs2_consist_inode(ip))
978 gfs2_dinode_print(&ip->i_di); 935 gfs2_dinode_print(ip);
979 return -EIO; 936 return -EIO;
980 } 937 }
981 938
@@ -997,7 +954,12 @@ int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
997 if (error) 954 if (error)
998 return error; 955 return error;
999 956
1000 error = gfs2_change_nlink(ip, -2); 957 /* It looks odd, but it really should be done twice */
958 error = gfs2_change_nlink(ip, -1);
959 if (error)
960 return error;
961
962 error = gfs2_change_nlink(ip, -1);
1001 if (error) 963 if (error)
1002 return error; 964 return error;
1003 965
@@ -1018,16 +980,16 @@ int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
1018int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name, 980int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
1019 struct gfs2_inode *ip) 981 struct gfs2_inode *ip)
1020{ 982{
1021 struct gfs2_inum inum; 983 struct gfs2_inum_host inum;
1022 unsigned int type; 984 unsigned int type;
1023 int error; 985 int error;
1024 986
1025 if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode)) 987 if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
1026 return -EPERM; 988 return -EPERM;
1027 989
1028 if ((dip->i_di.di_mode & S_ISVTX) && 990 if ((dip->i_inode.i_mode & S_ISVTX) &&
1029 dip->i_di.di_uid != current->fsuid && 991 dip->i_inode.i_uid != current->fsuid &&
1030 ip->i_di.di_uid != current->fsuid && !capable(CAP_FOWNER)) 992 ip->i_inode.i_uid != current->fsuid && !capable(CAP_FOWNER))
1031 return -EPERM; 993 return -EPERM;
1032 994
1033 if (IS_APPEND(&dip->i_inode)) 995 if (IS_APPEND(&dip->i_inode))
@@ -1044,7 +1006,7 @@ int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
1044 if (!gfs2_inum_equal(&inum, &ip->i_num)) 1006 if (!gfs2_inum_equal(&inum, &ip->i_num))
1045 return -ENOENT; 1007 return -ENOENT;
1046 1008
1047 if (IF2DT(ip->i_di.di_mode) != type) { 1009 if (IF2DT(ip->i_inode.i_mode) != type) {
1048 gfs2_consist_inode(dip); 1010 gfs2_consist_inode(dip);
1049 return -EIO; 1011 return -EIO;
1050 } 1012 }
@@ -1194,7 +1156,7 @@ int gfs2_glock_nq_atime(struct gfs2_holder *gh)
1194 return 0; 1156 return 0;
1195 1157
1196 curtime = get_seconds(); 1158 curtime = get_seconds();
1197 if (curtime - ip->i_di.di_atime >= quantum) { 1159 if (curtime - ip->i_inode.i_atime.tv_sec >= quantum) {
1198 gfs2_glock_dq(gh); 1160 gfs2_glock_dq(gh);
1199 gfs2_holder_reinit(LM_ST_EXCLUSIVE, gh->gh_flags & ~LM_FLAG_ANY, 1161 gfs2_holder_reinit(LM_ST_EXCLUSIVE, gh->gh_flags & ~LM_FLAG_ANY,
1200 gh); 1162 gh);
@@ -1206,7 +1168,7 @@ int gfs2_glock_nq_atime(struct gfs2_holder *gh)
1206 trying to get exclusive lock. */ 1168 trying to get exclusive lock. */
1207 1169
1208 curtime = get_seconds(); 1170 curtime = get_seconds();
1209 if (curtime - ip->i_di.di_atime >= quantum) { 1171 if (curtime - ip->i_inode.i_atime.tv_sec >= quantum) {
1210 struct buffer_head *dibh; 1172 struct buffer_head *dibh;
1211 struct gfs2_dinode *di; 1173 struct gfs2_dinode *di;
1212 1174
@@ -1220,11 +1182,11 @@ int gfs2_glock_nq_atime(struct gfs2_holder *gh)
1220 if (error) 1182 if (error)
1221 goto fail_end_trans; 1183 goto fail_end_trans;
1222 1184
1223 ip->i_di.di_atime = curtime; 1185 ip->i_inode.i_atime.tv_sec = curtime;
1224 1186
1225 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1187 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1226 di = (struct gfs2_dinode *)dibh->b_data; 1188 di = (struct gfs2_dinode *)dibh->b_data;
1227 di->di_atime = cpu_to_be64(ip->i_di.di_atime); 1189 di->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
1228 brelse(dibh); 1190 brelse(dibh);
1229 1191
1230 gfs2_trans_end(sdp); 1192 gfs2_trans_end(sdp);
@@ -1249,92 +1211,6 @@ fail:
1249 return error; 1211 return error;
1250} 1212}
1251 1213
1252/**
1253 * glock_compare_atime - Compare two struct gfs2_glock structures for sort
1254 * @arg_a: the first structure
1255 * @arg_b: the second structure
1256 *
1257 * Returns: 1 if A > B
1258 * -1 if A < B
1259 * 0 if A == B
1260 */
1261
1262static int glock_compare_atime(const void *arg_a, const void *arg_b)
1263{
1264 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1265 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1266 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1267 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1268
1269 if (a->ln_number > b->ln_number)
1270 return 1;
1271 if (a->ln_number < b->ln_number)
1272 return -1;
1273 if (gh_a->gh_state == LM_ST_SHARED && gh_b->gh_state == LM_ST_EXCLUSIVE)
1274 return 1;
1275 if (gh_a->gh_state == LM_ST_SHARED && (gh_b->gh_flags & GL_ATIME))
1276 return 1;
1277
1278 return 0;
1279}
1280
1281/**
1282 * gfs2_glock_nq_m_atime - acquire multiple glocks where one may need an
1283 * atime update
1284 * @num_gh: the number of structures
1285 * @ghs: an array of struct gfs2_holder structures
1286 *
1287 * Returns: 0 on success (all glocks acquired),
1288 * errno on failure (no glocks acquired)
1289 */
1290
1291int gfs2_glock_nq_m_atime(unsigned int num_gh, struct gfs2_holder *ghs)
1292{
1293 struct gfs2_holder **p;
1294 unsigned int x;
1295 int error = 0;
1296
1297 if (!num_gh)
1298 return 0;
1299
1300 if (num_gh == 1) {
1301 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1302 if (ghs->gh_flags & GL_ATIME)
1303 error = gfs2_glock_nq_atime(ghs);
1304 else
1305 error = gfs2_glock_nq(ghs);
1306 return error;
1307 }
1308
1309 p = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
1310 if (!p)
1311 return -ENOMEM;
1312
1313 for (x = 0; x < num_gh; x++)
1314 p[x] = &ghs[x];
1315
1316 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare_atime,NULL);
1317
1318 for (x = 0; x < num_gh; x++) {
1319 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1320
1321 if (p[x]->gh_flags & GL_ATIME)
1322 error = gfs2_glock_nq_atime(p[x]);
1323 else
1324 error = gfs2_glock_nq(p[x]);
1325
1326 if (error) {
1327 while (x--)
1328 gfs2_glock_dq(p[x]);
1329 break;
1330 }
1331 }
1332
1333 kfree(p);
1334 return error;
1335}
1336
1337
1338static int 1214static int
1339__gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr) 1215__gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
1340{ 1216{
@@ -1345,10 +1221,8 @@ __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
1345 if (!error) { 1221 if (!error) {
1346 error = inode_setattr(&ip->i_inode, attr); 1222 error = inode_setattr(&ip->i_inode, attr);
1347 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error); 1223 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1348 gfs2_inode_attr_out(ip);
1349
1350 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1224 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1351 gfs2_dinode_out(&ip->i_di, dibh->b_data); 1225 gfs2_dinode_out(ip, dibh->b_data);
1352 brelse(dibh); 1226 brelse(dibh);
1353 } 1227 }
1354 return error; 1228 return error;
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index f5d861760579..b57f448b15bc 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -22,13 +22,19 @@ static inline int gfs2_is_jdata(struct gfs2_inode *ip)
22 22
23static inline int gfs2_is_dir(struct gfs2_inode *ip) 23static inline int gfs2_is_dir(struct gfs2_inode *ip)
24{ 24{
25 return S_ISDIR(ip->i_di.di_mode); 25 return S_ISDIR(ip->i_inode.i_mode);
26}
27
28static inline void gfs2_set_inode_blocks(struct inode *inode)
29{
30 struct gfs2_inode *ip = GFS2_I(inode);
31 inode->i_blocks = ip->i_di.di_blocks <<
32 (GFS2_SB(inode)->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT);
26} 33}
27 34
28void gfs2_inode_attr_in(struct gfs2_inode *ip); 35void gfs2_inode_attr_in(struct gfs2_inode *ip);
29void gfs2_inode_attr_out(struct gfs2_inode *ip); 36struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum_host *inum, unsigned type);
30struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum *inum, unsigned type); 37struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum_host *inum);
31struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum *inum);
32 38
33int gfs2_inode_refresh(struct gfs2_inode *ip); 39int gfs2_inode_refresh(struct gfs2_inode *ip);
34 40
@@ -37,19 +43,15 @@ int gfs2_change_nlink(struct gfs2_inode *ip, int diff);
37struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name, 43struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
38 int is_root, struct nameidata *nd); 44 int is_root, struct nameidata *nd);
39struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name, 45struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
40 unsigned int mode); 46 unsigned int mode, dev_t dev);
41int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name, 47int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
42 struct gfs2_inode *ip); 48 struct gfs2_inode *ip);
43int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name, 49int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
44 struct gfs2_inode *ip); 50 struct gfs2_inode *ip);
45int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to); 51int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to);
46int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len); 52int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len);
47
48int gfs2_glock_nq_atime(struct gfs2_holder *gh); 53int gfs2_glock_nq_atime(struct gfs2_holder *gh);
49int gfs2_glock_nq_m_atime(unsigned int num_gh, struct gfs2_holder *ghs);
50
51int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr); 54int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr);
52
53struct inode *gfs2_lookup_simple(struct inode *dip, const char *name); 55struct inode *gfs2_lookup_simple(struct inode *dip, const char *name);
54 56
55#endif /* __INODE_DOT_H__ */ 57#endif /* __INODE_DOT_H__ */
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 0cace3da9dbb..291415ddfe51 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -15,6 +15,7 @@
15#include <linux/gfs2_ondisk.h> 15#include <linux/gfs2_ondisk.h>
16#include <linux/crc32.h> 16#include <linux/crc32.h>
17#include <linux/lm_interface.h> 17#include <linux/lm_interface.h>
18#include <linux/delay.h>
18 19
19#include "gfs2.h" 20#include "gfs2.h"
20#include "incore.h" 21#include "incore.h"
@@ -142,7 +143,7 @@ static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int fl
142 return list_empty(&ai->ai_ail1_list); 143 return list_empty(&ai->ai_ail1_list);
143} 144}
144 145
145void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags) 146static void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags)
146{ 147{
147 struct list_head *head = &sdp->sd_ail1_list; 148 struct list_head *head = &sdp->sd_ail1_list;
148 u64 sync_gen; 149 u64 sync_gen;
@@ -261,6 +262,12 @@ static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
261 * @sdp: The GFS2 superblock 262 * @sdp: The GFS2 superblock
262 * @blks: The number of blocks to reserve 263 * @blks: The number of blocks to reserve
263 * 264 *
265 * Note that we never give out the last 6 blocks of the journal. Thats
266 * due to the fact that there is are a small number of header blocks
267 * associated with each log flush. The exact number can't be known until
268 * flush time, so we ensure that we have just enough free blocks at all
269 * times to avoid running out during a log flush.
270 *
264 * Returns: errno 271 * Returns: errno
265 */ 272 */
266 273
@@ -274,7 +281,7 @@ int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
274 281
275 mutex_lock(&sdp->sd_log_reserve_mutex); 282 mutex_lock(&sdp->sd_log_reserve_mutex);
276 gfs2_log_lock(sdp); 283 gfs2_log_lock(sdp);
277 while(sdp->sd_log_blks_free <= blks) { 284 while(sdp->sd_log_blks_free <= (blks + 6)) {
278 gfs2_log_unlock(sdp); 285 gfs2_log_unlock(sdp);
279 gfs2_ail1_empty(sdp, 0); 286 gfs2_ail1_empty(sdp, 0);
280 gfs2_log_flush(sdp, NULL); 287 gfs2_log_flush(sdp, NULL);
@@ -319,7 +326,8 @@ static u64 log_bmap(struct gfs2_sbd *sdp, unsigned int lbn)
319 bh_map.b_size = 1 << inode->i_blkbits; 326 bh_map.b_size = 1 << inode->i_blkbits;
320 error = gfs2_block_map(inode, lbn, 0, &bh_map); 327 error = gfs2_block_map(inode, lbn, 0, &bh_map);
321 if (error || !bh_map.b_blocknr) 328 if (error || !bh_map.b_blocknr)
322 printk(KERN_INFO "error=%d, dbn=%llu lbn=%u", error, bh_map.b_blocknr, lbn); 329 printk(KERN_INFO "error=%d, dbn=%llu lbn=%u", error,
330 (unsigned long long)bh_map.b_blocknr, lbn);
323 gfs2_assert_withdraw(sdp, !error && bh_map.b_blocknr); 331 gfs2_assert_withdraw(sdp, !error && bh_map.b_blocknr);
324 332
325 return bh_map.b_blocknr; 333 return bh_map.b_blocknr;
@@ -643,12 +651,9 @@ void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
643 up_read(&sdp->sd_log_flush_lock); 651 up_read(&sdp->sd_log_flush_lock);
644 652
645 gfs2_log_lock(sdp); 653 gfs2_log_lock(sdp);
646 if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks)) { 654 if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks))
647 gfs2_log_unlock(sdp); 655 wake_up_process(sdp->sd_logd_process);
648 gfs2_log_flush(sdp, NULL); 656 gfs2_log_unlock(sdp);
649 } else {
650 gfs2_log_unlock(sdp);
651 }
652} 657}
653 658
654/** 659/**
@@ -686,3 +691,21 @@ void gfs2_log_shutdown(struct gfs2_sbd *sdp)
686 up_write(&sdp->sd_log_flush_lock); 691 up_write(&sdp->sd_log_flush_lock);
687} 692}
688 693
694
695/**
696 * gfs2_meta_syncfs - sync all the buffers in a filesystem
697 * @sdp: the filesystem
698 *
699 */
700
701void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
702{
703 gfs2_log_flush(sdp, NULL);
704 for (;;) {
705 gfs2_ail1_start(sdp, DIO_ALL);
706 if (gfs2_ail1_empty(sdp, DIO_ALL))
707 break;
708 msleep(10);
709 }
710}
711
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
index 7f5737d55612..8e7aa0f29109 100644
--- a/fs/gfs2/log.h
+++ b/fs/gfs2/log.h
@@ -48,7 +48,6 @@ static inline void gfs2_log_pointers_init(struct gfs2_sbd *sdp,
48unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct, 48unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
49 unsigned int ssize); 49 unsigned int ssize);
50 50
51void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags);
52int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags); 51int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags);
53 52
54int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks); 53int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks);
@@ -61,5 +60,6 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl);
61void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans); 60void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans);
62 61
63void gfs2_log_shutdown(struct gfs2_sbd *sdp); 62void gfs2_log_shutdown(struct gfs2_sbd *sdp);
63void gfs2_meta_syncfs(struct gfs2_sbd *sdp);
64 64
65#endif /* __LOG_DOT_H__ */ 65#endif /* __LOG_DOT_H__ */
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index ab6d1115f95d..4d7f94d8c7bd 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -182,7 +182,7 @@ static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
182} 182}
183 183
184static void buf_lo_before_scan(struct gfs2_jdesc *jd, 184static void buf_lo_before_scan(struct gfs2_jdesc *jd,
185 struct gfs2_log_header *head, int pass) 185 struct gfs2_log_header_host *head, int pass)
186{ 186{
187 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 187 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
188 188
@@ -328,7 +328,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
328} 328}
329 329
330static void revoke_lo_before_scan(struct gfs2_jdesc *jd, 330static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
331 struct gfs2_log_header *head, int pass) 331 struct gfs2_log_header_host *head, int pass)
332{ 332{
333 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 333 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
334 334
@@ -509,7 +509,7 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
509{ 509{
510 LIST_HEAD(started); 510 LIST_HEAD(started);
511 struct gfs2_bufdata *bd1 = NULL, *bd2, *bdt; 511 struct gfs2_bufdata *bd1 = NULL, *bd2, *bdt;
512 struct buffer_head *bh = NULL; 512 struct buffer_head *bh = NULL,*bh1 = NULL;
513 unsigned int offset = sizeof(struct gfs2_log_descriptor); 513 unsigned int offset = sizeof(struct gfs2_log_descriptor);
514 struct gfs2_log_descriptor *ld; 514 struct gfs2_log_descriptor *ld;
515 unsigned int limit; 515 unsigned int limit;
@@ -537,8 +537,13 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
537 list_for_each_entry_safe_continue(bd1, bdt, 537 list_for_each_entry_safe_continue(bd1, bdt,
538 &sdp->sd_log_le_databuf, 538 &sdp->sd_log_le_databuf,
539 bd_le.le_list) { 539 bd_le.le_list) {
540 /* store off the buffer head in a local ptr since
541 * gfs2_bufdata might change when we drop the log lock
542 */
543 bh1 = bd1->bd_bh;
544
540 /* An ordered write buffer */ 545 /* An ordered write buffer */
541 if (bd1->bd_bh && !buffer_pinned(bd1->bd_bh)) { 546 if (bh1 && !buffer_pinned(bh1)) {
542 list_move(&bd1->bd_le.le_list, &started); 547 list_move(&bd1->bd_le.le_list, &started);
543 if (bd1 == bd2) { 548 if (bd1 == bd2) {
544 bd2 = NULL; 549 bd2 = NULL;
@@ -547,20 +552,21 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
547 bd_le.le_list); 552 bd_le.le_list);
548 } 553 }
549 total_dbuf--; 554 total_dbuf--;
550 if (bd1->bd_bh) { 555 if (bh1) {
551 get_bh(bd1->bd_bh); 556 if (buffer_dirty(bh1)) {
552 if (buffer_dirty(bd1->bd_bh)) { 557 get_bh(bh1);
558
553 gfs2_log_unlock(sdp); 559 gfs2_log_unlock(sdp);
554 wait_on_buffer(bd1->bd_bh); 560
555 ll_rw_block(WRITE, 1, 561 ll_rw_block(SWRITE, 1, &bh1);
556 &bd1->bd_bh); 562 brelse(bh1);
563
557 gfs2_log_lock(sdp); 564 gfs2_log_lock(sdp);
558 } 565 }
559 brelse(bd1->bd_bh);
560 continue; 566 continue;
561 } 567 }
562 continue; 568 continue;
563 } else if (bd1->bd_bh) { /* A journaled buffer */ 569 } else if (bh1) { /* A journaled buffer */
564 int magic; 570 int magic;
565 gfs2_log_unlock(sdp); 571 gfs2_log_unlock(sdp);
566 if (!bh) { 572 if (!bh) {
@@ -582,16 +588,16 @@ static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
582 ld->ld_data2 = cpu_to_be32(0); 588 ld->ld_data2 = cpu_to_be32(0);
583 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved)); 589 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
584 } 590 }
585 magic = gfs2_check_magic(bd1->bd_bh); 591 magic = gfs2_check_magic(bh1);
586 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr); 592 *ptr++ = cpu_to_be64(bh1->b_blocknr);
587 *ptr++ = cpu_to_be64((__u64)magic); 593 *ptr++ = cpu_to_be64((__u64)magic);
588 clear_buffer_escaped(bd1->bd_bh); 594 clear_buffer_escaped(bh1);
589 if (unlikely(magic != 0)) 595 if (unlikely(magic != 0))
590 set_buffer_escaped(bd1->bd_bh); 596 set_buffer_escaped(bh1);
591 gfs2_log_lock(sdp); 597 gfs2_log_lock(sdp);
592 if (n++ > num) 598 if (n++ > num)
593 break; 599 break;
594 } else if (!bd1->bd_bh) { 600 } else if (!bh1) {
595 total_dbuf--; 601 total_dbuf--;
596 sdp->sd_log_num_databuf--; 602 sdp->sd_log_num_databuf--;
597 list_del_init(&bd1->bd_le.le_list); 603 list_del_init(&bd1->bd_le.le_list);
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h
index 5839c05ae6be..965bc65c7c64 100644
--- a/fs/gfs2/lops.h
+++ b/fs/gfs2/lops.h
@@ -60,7 +60,7 @@ static inline void lops_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
60} 60}
61 61
62static inline void lops_before_scan(struct gfs2_jdesc *jd, 62static inline void lops_before_scan(struct gfs2_jdesc *jd,
63 struct gfs2_log_header *head, 63 struct gfs2_log_header_host *head,
64 unsigned int pass) 64 unsigned int pass)
65{ 65{
66 int x; 66 int x;
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 3912d6a4b1e6..0e34d9918973 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -127,17 +127,17 @@ void gfs2_meta_sync(struct gfs2_glock *gl)
127 127
128/** 128/**
129 * getbuf - Get a buffer with a given address space 129 * getbuf - Get a buffer with a given address space
130 * @sdp: the filesystem 130 * @gl: the glock
131 * @aspace: the address space
132 * @blkno: the block number (filesystem scope) 131 * @blkno: the block number (filesystem scope)
133 * @create: 1 if the buffer should be created 132 * @create: 1 if the buffer should be created
134 * 133 *
135 * Returns: the buffer 134 * Returns: the buffer
136 */ 135 */
137 136
138static struct buffer_head *getbuf(struct gfs2_sbd *sdp, struct inode *aspace, 137static struct buffer_head *getbuf(struct gfs2_glock *gl, u64 blkno, int create)
139 u64 blkno, int create)
140{ 138{
139 struct address_space *mapping = gl->gl_aspace->i_mapping;
140 struct gfs2_sbd *sdp = gl->gl_sbd;
141 struct page *page; 141 struct page *page;
142 struct buffer_head *bh; 142 struct buffer_head *bh;
143 unsigned int shift; 143 unsigned int shift;
@@ -150,13 +150,13 @@ static struct buffer_head *getbuf(struct gfs2_sbd *sdp, struct inode *aspace,
150 150
151 if (create) { 151 if (create) {
152 for (;;) { 152 for (;;) {
153 page = grab_cache_page(aspace->i_mapping, index); 153 page = grab_cache_page(mapping, index);
154 if (page) 154 if (page)
155 break; 155 break;
156 yield(); 156 yield();
157 } 157 }
158 } else { 158 } else {
159 page = find_lock_page(aspace->i_mapping, index); 159 page = find_lock_page(mapping, index);
160 if (!page) 160 if (!page)
161 return NULL; 161 return NULL;
162 } 162 }
@@ -202,7 +202,7 @@ static void meta_prep_new(struct buffer_head *bh)
202struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno) 202struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
203{ 203{
204 struct buffer_head *bh; 204 struct buffer_head *bh;
205 bh = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE); 205 bh = getbuf(gl, blkno, CREATE);
206 meta_prep_new(bh); 206 meta_prep_new(bh);
207 return bh; 207 return bh;
208} 208}
@@ -220,7 +220,7 @@ struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
220int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, 220int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
221 struct buffer_head **bhp) 221 struct buffer_head **bhp)
222{ 222{
223 *bhp = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE); 223 *bhp = getbuf(gl, blkno, CREATE);
224 if (!buffer_uptodate(*bhp)) 224 if (!buffer_uptodate(*bhp))
225 ll_rw_block(READ_META, 1, bhp); 225 ll_rw_block(READ_META, 1, bhp);
226 if (flags & DIO_WAIT) { 226 if (flags & DIO_WAIT) {
@@ -379,11 +379,10 @@ void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
379void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen) 379void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
380{ 380{
381 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 381 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
382 struct inode *aspace = ip->i_gl->gl_aspace;
383 struct buffer_head *bh; 382 struct buffer_head *bh;
384 383
385 while (blen) { 384 while (blen) {
386 bh = getbuf(sdp, aspace, bstart, NO_CREATE); 385 bh = getbuf(ip->i_gl, bstart, NO_CREATE);
387 if (bh) { 386 if (bh) {
388 struct gfs2_bufdata *bd = bh->b_private; 387 struct gfs2_bufdata *bd = bh->b_private;
389 388
@@ -472,6 +471,9 @@ int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
472 struct buffer_head *bh = NULL, **bh_slot = ip->i_cache + height; 471 struct buffer_head *bh = NULL, **bh_slot = ip->i_cache + height;
473 int in_cache = 0; 472 int in_cache = 0;
474 473
474 BUG_ON(!gl);
475 BUG_ON(!sdp);
476
475 spin_lock(&ip->i_spin); 477 spin_lock(&ip->i_spin);
476 if (*bh_slot && (*bh_slot)->b_blocknr == num) { 478 if (*bh_slot && (*bh_slot)->b_blocknr == num) {
477 bh = *bh_slot; 479 bh = *bh_slot;
@@ -481,7 +483,7 @@ int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
481 spin_unlock(&ip->i_spin); 483 spin_unlock(&ip->i_spin);
482 484
483 if (!bh) 485 if (!bh)
484 bh = getbuf(gl->gl_sbd, gl->gl_aspace, num, CREATE); 486 bh = getbuf(gl, num, CREATE);
485 487
486 if (!bh) 488 if (!bh)
487 return -ENOBUFS; 489 return -ENOBUFS;
@@ -532,7 +534,6 @@ err:
532struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen) 534struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
533{ 535{
534 struct gfs2_sbd *sdp = gl->gl_sbd; 536 struct gfs2_sbd *sdp = gl->gl_sbd;
535 struct inode *aspace = gl->gl_aspace;
536 struct buffer_head *first_bh, *bh; 537 struct buffer_head *first_bh, *bh;
537 u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >> 538 u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
538 sdp->sd_sb.sb_bsize_shift; 539 sdp->sd_sb.sb_bsize_shift;
@@ -544,7 +545,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
544 if (extlen > max_ra) 545 if (extlen > max_ra)
545 extlen = max_ra; 546 extlen = max_ra;
546 547
547 first_bh = getbuf(sdp, aspace, dblock, CREATE); 548 first_bh = getbuf(gl, dblock, CREATE);
548 549
549 if (buffer_uptodate(first_bh)) 550 if (buffer_uptodate(first_bh))
550 goto out; 551 goto out;
@@ -555,7 +556,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
555 extlen--; 556 extlen--;
556 557
557 while (extlen) { 558 while (extlen) {
558 bh = getbuf(sdp, aspace, dblock, CREATE); 559 bh = getbuf(gl, dblock, CREATE);
559 560
560 if (!buffer_uptodate(bh) && !buffer_locked(bh)) 561 if (!buffer_uptodate(bh) && !buffer_locked(bh))
561 ll_rw_block(READA, 1, &bh); 562 ll_rw_block(READA, 1, &bh);
@@ -571,20 +572,3 @@ out:
571 return first_bh; 572 return first_bh;
572} 573}
573 574
574/**
575 * gfs2_meta_syncfs - sync all the buffers in a filesystem
576 * @sdp: the filesystem
577 *
578 */
579
580void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
581{
582 gfs2_log_flush(sdp, NULL);
583 for (;;) {
584 gfs2_ail1_start(sdp, DIO_ALL);
585 if (gfs2_ail1_empty(sdp, DIO_ALL))
586 break;
587 msleep(10);
588 }
589}
590
diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h
index 3ec939e20dff..e037425bc042 100644
--- a/fs/gfs2/meta_io.h
+++ b/fs/gfs2/meta_io.h
@@ -67,7 +67,6 @@ static inline int gfs2_meta_inode_buffer(struct gfs2_inode *ip,
67} 67}
68 68
69struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen); 69struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen);
70void gfs2_meta_syncfs(struct gfs2_sbd *sdp);
71 70
72#define buffer_busy(bh) \ 71#define buffer_busy(bh) \
73((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock) | (1ul << BH_Pinned))) 72((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock) | (1ul << BH_Pinned)))
diff --git a/fs/gfs2/ondisk.c b/fs/gfs2/ondisk.c
index 1025960b0e6e..f2495f1e21ad 100644
--- a/fs/gfs2/ondisk.c
+++ b/fs/gfs2/ondisk.c
@@ -15,6 +15,8 @@
15 15
16#include "gfs2.h" 16#include "gfs2.h"
17#include <linux/gfs2_ondisk.h> 17#include <linux/gfs2_ondisk.h>
18#include <linux/lm_interface.h>
19#include "incore.h"
18 20
19#define pv(struct, member, fmt) printk(KERN_INFO " "#member" = "fmt"\n", \ 21#define pv(struct, member, fmt) printk(KERN_INFO " "#member" = "fmt"\n", \
20 struct->member); 22 struct->member);
@@ -32,7 +34,7 @@
32 * first arg: the cpu-order structure 34 * first arg: the cpu-order structure
33 */ 35 */
34 36
35void gfs2_inum_in(struct gfs2_inum *no, const void *buf) 37void gfs2_inum_in(struct gfs2_inum_host *no, const void *buf)
36{ 38{
37 const struct gfs2_inum *str = buf; 39 const struct gfs2_inum *str = buf;
38 40
@@ -40,7 +42,7 @@ void gfs2_inum_in(struct gfs2_inum *no, const void *buf)
40 no->no_addr = be64_to_cpu(str->no_addr); 42 no->no_addr = be64_to_cpu(str->no_addr);
41} 43}
42 44
43void gfs2_inum_out(const struct gfs2_inum *no, void *buf) 45void gfs2_inum_out(const struct gfs2_inum_host *no, void *buf)
44{ 46{
45 struct gfs2_inum *str = buf; 47 struct gfs2_inum *str = buf;
46 48
@@ -48,13 +50,13 @@ void gfs2_inum_out(const struct gfs2_inum *no, void *buf)
48 str->no_addr = cpu_to_be64(no->no_addr); 50 str->no_addr = cpu_to_be64(no->no_addr);
49} 51}
50 52
51static void gfs2_inum_print(const struct gfs2_inum *no) 53static void gfs2_inum_print(const struct gfs2_inum_host *no)
52{ 54{
53 printk(KERN_INFO " no_formal_ino = %llu\n", (unsigned long long)no->no_formal_ino); 55 printk(KERN_INFO " no_formal_ino = %llu\n", (unsigned long long)no->no_formal_ino);
54 printk(KERN_INFO " no_addr = %llu\n", (unsigned long long)no->no_addr); 56 printk(KERN_INFO " no_addr = %llu\n", (unsigned long long)no->no_addr);
55} 57}
56 58
57static void gfs2_meta_header_in(struct gfs2_meta_header *mh, const void *buf) 59static void gfs2_meta_header_in(struct gfs2_meta_header_host *mh, const void *buf)
58{ 60{
59 const struct gfs2_meta_header *str = buf; 61 const struct gfs2_meta_header *str = buf;
60 62
@@ -63,23 +65,7 @@ static void gfs2_meta_header_in(struct gfs2_meta_header *mh, const void *buf)
63 mh->mh_format = be32_to_cpu(str->mh_format); 65 mh->mh_format = be32_to_cpu(str->mh_format);
64} 66}
65 67
66static void gfs2_meta_header_out(const struct gfs2_meta_header *mh, void *buf) 68void gfs2_sb_in(struct gfs2_sb_host *sb, const void *buf)
67{
68 struct gfs2_meta_header *str = buf;
69
70 str->mh_magic = cpu_to_be32(mh->mh_magic);
71 str->mh_type = cpu_to_be32(mh->mh_type);
72 str->mh_format = cpu_to_be32(mh->mh_format);
73}
74
75static void gfs2_meta_header_print(const struct gfs2_meta_header *mh)
76{
77 pv(mh, mh_magic, "0x%.8X");
78 pv(mh, mh_type, "%u");
79 pv(mh, mh_format, "%u");
80}
81
82void gfs2_sb_in(struct gfs2_sb *sb, const void *buf)
83{ 69{
84 const struct gfs2_sb *str = buf; 70 const struct gfs2_sb *str = buf;
85 71
@@ -97,7 +83,7 @@ void gfs2_sb_in(struct gfs2_sb *sb, const void *buf)
97 memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN); 83 memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN);
98} 84}
99 85
100void gfs2_rindex_in(struct gfs2_rindex *ri, const void *buf) 86void gfs2_rindex_in(struct gfs2_rindex_host *ri, const void *buf)
101{ 87{
102 const struct gfs2_rindex *str = buf; 88 const struct gfs2_rindex *str = buf;
103 89
@@ -109,7 +95,7 @@ void gfs2_rindex_in(struct gfs2_rindex *ri, const void *buf)
109 95
110} 96}
111 97
112void gfs2_rindex_print(const struct gfs2_rindex *ri) 98void gfs2_rindex_print(const struct gfs2_rindex_host *ri)
113{ 99{
114 printk(KERN_INFO " ri_addr = %llu\n", (unsigned long long)ri->ri_addr); 100 printk(KERN_INFO " ri_addr = %llu\n", (unsigned long long)ri->ri_addr);
115 pv(ri, ri_length, "%u"); 101 pv(ri, ri_length, "%u");
@@ -120,22 +106,20 @@ void gfs2_rindex_print(const struct gfs2_rindex *ri)
120 pv(ri, ri_bitbytes, "%u"); 106 pv(ri, ri_bitbytes, "%u");
121} 107}
122 108
123void gfs2_rgrp_in(struct gfs2_rgrp *rg, const void *buf) 109void gfs2_rgrp_in(struct gfs2_rgrp_host *rg, const void *buf)
124{ 110{
125 const struct gfs2_rgrp *str = buf; 111 const struct gfs2_rgrp *str = buf;
126 112
127 gfs2_meta_header_in(&rg->rg_header, buf);
128 rg->rg_flags = be32_to_cpu(str->rg_flags); 113 rg->rg_flags = be32_to_cpu(str->rg_flags);
129 rg->rg_free = be32_to_cpu(str->rg_free); 114 rg->rg_free = be32_to_cpu(str->rg_free);
130 rg->rg_dinodes = be32_to_cpu(str->rg_dinodes); 115 rg->rg_dinodes = be32_to_cpu(str->rg_dinodes);
131 rg->rg_igeneration = be64_to_cpu(str->rg_igeneration); 116 rg->rg_igeneration = be64_to_cpu(str->rg_igeneration);
132} 117}
133 118
134void gfs2_rgrp_out(const struct gfs2_rgrp *rg, void *buf) 119void gfs2_rgrp_out(const struct gfs2_rgrp_host *rg, void *buf)
135{ 120{
136 struct gfs2_rgrp *str = buf; 121 struct gfs2_rgrp *str = buf;
137 122
138 gfs2_meta_header_out(&rg->rg_header, buf);
139 str->rg_flags = cpu_to_be32(rg->rg_flags); 123 str->rg_flags = cpu_to_be32(rg->rg_flags);
140 str->rg_free = cpu_to_be32(rg->rg_free); 124 str->rg_free = cpu_to_be32(rg->rg_free);
141 str->rg_dinodes = cpu_to_be32(rg->rg_dinodes); 125 str->rg_dinodes = cpu_to_be32(rg->rg_dinodes);
@@ -144,7 +128,7 @@ void gfs2_rgrp_out(const struct gfs2_rgrp *rg, void *buf)
144 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved)); 128 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
145} 129}
146 130
147void gfs2_quota_in(struct gfs2_quota *qu, const void *buf) 131void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf)
148{ 132{
149 const struct gfs2_quota *str = buf; 133 const struct gfs2_quota *str = buf;
150 134
@@ -153,96 +137,56 @@ void gfs2_quota_in(struct gfs2_quota *qu, const void *buf)
153 qu->qu_value = be64_to_cpu(str->qu_value); 137 qu->qu_value = be64_to_cpu(str->qu_value);
154} 138}
155 139
156void gfs2_dinode_in(struct gfs2_dinode *di, const void *buf) 140void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
157{
158 const struct gfs2_dinode *str = buf;
159
160 gfs2_meta_header_in(&di->di_header, buf);
161 gfs2_inum_in(&di->di_num, &str->di_num);
162
163 di->di_mode = be32_to_cpu(str->di_mode);
164 di->di_uid = be32_to_cpu(str->di_uid);
165 di->di_gid = be32_to_cpu(str->di_gid);
166 di->di_nlink = be32_to_cpu(str->di_nlink);
167 di->di_size = be64_to_cpu(str->di_size);
168 di->di_blocks = be64_to_cpu(str->di_blocks);
169 di->di_atime = be64_to_cpu(str->di_atime);
170 di->di_mtime = be64_to_cpu(str->di_mtime);
171 di->di_ctime = be64_to_cpu(str->di_ctime);
172 di->di_major = be32_to_cpu(str->di_major);
173 di->di_minor = be32_to_cpu(str->di_minor);
174
175 di->di_goal_meta = be64_to_cpu(str->di_goal_meta);
176 di->di_goal_data = be64_to_cpu(str->di_goal_data);
177 di->di_generation = be64_to_cpu(str->di_generation);
178
179 di->di_flags = be32_to_cpu(str->di_flags);
180 di->di_payload_format = be32_to_cpu(str->di_payload_format);
181 di->di_height = be16_to_cpu(str->di_height);
182
183 di->di_depth = be16_to_cpu(str->di_depth);
184 di->di_entries = be32_to_cpu(str->di_entries);
185
186 di->di_eattr = be64_to_cpu(str->di_eattr);
187
188}
189
190void gfs2_dinode_out(const struct gfs2_dinode *di, void *buf)
191{ 141{
142 const struct gfs2_dinode_host *di = &ip->i_di;
192 struct gfs2_dinode *str = buf; 143 struct gfs2_dinode *str = buf;
193 144
194 gfs2_meta_header_out(&di->di_header, buf); 145 str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
195 gfs2_inum_out(&di->di_num, (char *)&str->di_num); 146 str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
147 str->di_header.__pad0 = 0;
148 str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
149 str->di_header.__pad1 = 0;
196 150
197 str->di_mode = cpu_to_be32(di->di_mode); 151 gfs2_inum_out(&ip->i_num, &str->di_num);
198 str->di_uid = cpu_to_be32(di->di_uid); 152
199 str->di_gid = cpu_to_be32(di->di_gid); 153 str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
200 str->di_nlink = cpu_to_be32(di->di_nlink); 154 str->di_uid = cpu_to_be32(ip->i_inode.i_uid);
155 str->di_gid = cpu_to_be32(ip->i_inode.i_gid);
156 str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
201 str->di_size = cpu_to_be64(di->di_size); 157 str->di_size = cpu_to_be64(di->di_size);
202 str->di_blocks = cpu_to_be64(di->di_blocks); 158 str->di_blocks = cpu_to_be64(di->di_blocks);
203 str->di_atime = cpu_to_be64(di->di_atime); 159 str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
204 str->di_mtime = cpu_to_be64(di->di_mtime); 160 str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
205 str->di_ctime = cpu_to_be64(di->di_ctime); 161 str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
206 str->di_major = cpu_to_be32(di->di_major);
207 str->di_minor = cpu_to_be32(di->di_minor);
208 162
209 str->di_goal_meta = cpu_to_be64(di->di_goal_meta); 163 str->di_goal_meta = cpu_to_be64(di->di_goal_meta);
210 str->di_goal_data = cpu_to_be64(di->di_goal_data); 164 str->di_goal_data = cpu_to_be64(di->di_goal_data);
211 str->di_generation = cpu_to_be64(di->di_generation); 165 str->di_generation = cpu_to_be64(di->di_generation);
212 166
213 str->di_flags = cpu_to_be32(di->di_flags); 167 str->di_flags = cpu_to_be32(di->di_flags);
214 str->di_payload_format = cpu_to_be32(di->di_payload_format);
215 str->di_height = cpu_to_be16(di->di_height); 168 str->di_height = cpu_to_be16(di->di_height);
216 169 str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
170 !(ip->i_di.di_flags & GFS2_DIF_EXHASH) ?
171 GFS2_FORMAT_DE : 0);
217 str->di_depth = cpu_to_be16(di->di_depth); 172 str->di_depth = cpu_to_be16(di->di_depth);
218 str->di_entries = cpu_to_be32(di->di_entries); 173 str->di_entries = cpu_to_be32(di->di_entries);
219 174
220 str->di_eattr = cpu_to_be64(di->di_eattr); 175 str->di_eattr = cpu_to_be64(di->di_eattr);
221
222} 176}
223 177
224void gfs2_dinode_print(const struct gfs2_dinode *di) 178void gfs2_dinode_print(const struct gfs2_inode *ip)
225{ 179{
226 gfs2_meta_header_print(&di->di_header); 180 const struct gfs2_dinode_host *di = &ip->i_di;
227 gfs2_inum_print(&di->di_num); 181
182 gfs2_inum_print(&ip->i_num);
228 183
229 pv(di, di_mode, "0%o");
230 pv(di, di_uid, "%u");
231 pv(di, di_gid, "%u");
232 pv(di, di_nlink, "%u");
233 printk(KERN_INFO " di_size = %llu\n", (unsigned long long)di->di_size); 184 printk(KERN_INFO " di_size = %llu\n", (unsigned long long)di->di_size);
234 printk(KERN_INFO " di_blocks = %llu\n", (unsigned long long)di->di_blocks); 185 printk(KERN_INFO " di_blocks = %llu\n", (unsigned long long)di->di_blocks);
235 printk(KERN_INFO " di_atime = %lld\n", (long long)di->di_atime);
236 printk(KERN_INFO " di_mtime = %lld\n", (long long)di->di_mtime);
237 printk(KERN_INFO " di_ctime = %lld\n", (long long)di->di_ctime);
238 pv(di, di_major, "%u");
239 pv(di, di_minor, "%u");
240
241 printk(KERN_INFO " di_goal_meta = %llu\n", (unsigned long long)di->di_goal_meta); 186 printk(KERN_INFO " di_goal_meta = %llu\n", (unsigned long long)di->di_goal_meta);
242 printk(KERN_INFO " di_goal_data = %llu\n", (unsigned long long)di->di_goal_data); 187 printk(KERN_INFO " di_goal_data = %llu\n", (unsigned long long)di->di_goal_data);
243 188
244 pv(di, di_flags, "0x%.8X"); 189 pv(di, di_flags, "0x%.8X");
245 pv(di, di_payload_format, "%u");
246 pv(di, di_height, "%u"); 190 pv(di, di_height, "%u");
247 191
248 pv(di, di_depth, "%u"); 192 pv(di, di_depth, "%u");
@@ -251,7 +195,7 @@ void gfs2_dinode_print(const struct gfs2_dinode *di)
251 printk(KERN_INFO " di_eattr = %llu\n", (unsigned long long)di->di_eattr); 195 printk(KERN_INFO " di_eattr = %llu\n", (unsigned long long)di->di_eattr);
252} 196}
253 197
254void gfs2_log_header_in(struct gfs2_log_header *lh, const void *buf) 198void gfs2_log_header_in(struct gfs2_log_header_host *lh, const void *buf)
255{ 199{
256 const struct gfs2_log_header *str = buf; 200 const struct gfs2_log_header *str = buf;
257 201
@@ -263,7 +207,7 @@ void gfs2_log_header_in(struct gfs2_log_header *lh, const void *buf)
263 lh->lh_hash = be32_to_cpu(str->lh_hash); 207 lh->lh_hash = be32_to_cpu(str->lh_hash);
264} 208}
265 209
266void gfs2_inum_range_in(struct gfs2_inum_range *ir, const void *buf) 210void gfs2_inum_range_in(struct gfs2_inum_range_host *ir, const void *buf)
267{ 211{
268 const struct gfs2_inum_range *str = buf; 212 const struct gfs2_inum_range *str = buf;
269 213
@@ -271,7 +215,7 @@ void gfs2_inum_range_in(struct gfs2_inum_range *ir, const void *buf)
271 ir->ir_length = be64_to_cpu(str->ir_length); 215 ir->ir_length = be64_to_cpu(str->ir_length);
272} 216}
273 217
274void gfs2_inum_range_out(const struct gfs2_inum_range *ir, void *buf) 218void gfs2_inum_range_out(const struct gfs2_inum_range_host *ir, void *buf)
275{ 219{
276 struct gfs2_inum_range *str = buf; 220 struct gfs2_inum_range *str = buf;
277 221
@@ -279,7 +223,7 @@ void gfs2_inum_range_out(const struct gfs2_inum_range *ir, void *buf)
279 str->ir_length = cpu_to_be64(ir->ir_length); 223 str->ir_length = cpu_to_be64(ir->ir_length);
280} 224}
281 225
282void gfs2_statfs_change_in(struct gfs2_statfs_change *sc, const void *buf) 226void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
283{ 227{
284 const struct gfs2_statfs_change *str = buf; 228 const struct gfs2_statfs_change *str = buf;
285 229
@@ -288,7 +232,7 @@ void gfs2_statfs_change_in(struct gfs2_statfs_change *sc, const void *buf)
288 sc->sc_dinodes = be64_to_cpu(str->sc_dinodes); 232 sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
289} 233}
290 234
291void gfs2_statfs_change_out(const struct gfs2_statfs_change *sc, void *buf) 235void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
292{ 236{
293 struct gfs2_statfs_change *str = buf; 237 struct gfs2_statfs_change *str = buf;
294 238
@@ -297,7 +241,7 @@ void gfs2_statfs_change_out(const struct gfs2_statfs_change *sc, void *buf)
297 str->sc_dinodes = cpu_to_be64(sc->sc_dinodes); 241 str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
298} 242}
299 243
300void gfs2_quota_change_in(struct gfs2_quota_change *qc, const void *buf) 244void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
301{ 245{
302 const struct gfs2_quota_change *str = buf; 246 const struct gfs2_quota_change *str = buf;
303 247
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
index 015640b3f123..d8d69a72a10d 100644
--- a/fs/gfs2/ops_address.c
+++ b/fs/gfs2/ops_address.c
@@ -156,19 +156,6 @@ out_ignore:
156 return 0; 156 return 0;
157} 157}
158 158
159static int zero_readpage(struct page *page)
160{
161 void *kaddr;
162
163 kaddr = kmap_atomic(page, KM_USER0);
164 memset(kaddr, 0, PAGE_CACHE_SIZE);
165 kunmap_atomic(kaddr, KM_USER0);
166
167 SetPageUptodate(page);
168
169 return 0;
170}
171
172/** 159/**
173 * stuffed_readpage - Fill in a Linux page with stuffed file data 160 * stuffed_readpage - Fill in a Linux page with stuffed file data
174 * @ip: the inode 161 * @ip: the inode
@@ -183,9 +170,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
183 void *kaddr; 170 void *kaddr;
184 int error; 171 int error;
185 172
186 /* Only the first page of a stuffed file might contain data */ 173 BUG_ON(page->index);
187 if (unlikely(page->index))
188 return zero_readpage(page);
189 174
190 error = gfs2_meta_inode_buffer(ip, &dibh); 175 error = gfs2_meta_inode_buffer(ip, &dibh);
191 if (error) 176 if (error)
@@ -230,9 +215,9 @@ static int gfs2_readpage(struct file *file, struct page *page)
230 /* gfs2_sharewrite_nopage has grabbed the ip->i_gl already */ 215 /* gfs2_sharewrite_nopage has grabbed the ip->i_gl already */
231 goto skip_lock; 216 goto skip_lock;
232 } 217 }
233 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|GL_AOP, &gh); 218 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh);
234 do_unlock = 1; 219 do_unlock = 1;
235 error = gfs2_glock_nq_m_atime(1, &gh); 220 error = gfs2_glock_nq_atime(&gh);
236 if (unlikely(error)) 221 if (unlikely(error))
237 goto out_unlock; 222 goto out_unlock;
238 } 223 }
@@ -254,6 +239,8 @@ skip_lock:
254out: 239out:
255 return error; 240 return error;
256out_unlock: 241out_unlock:
242 if (error == GLR_TRYFAILED)
243 error = AOP_TRUNCATED_PAGE;
257 unlock_page(page); 244 unlock_page(page);
258 if (do_unlock) 245 if (do_unlock)
259 gfs2_holder_uninit(&gh); 246 gfs2_holder_uninit(&gh);
@@ -293,9 +280,9 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping,
293 goto skip_lock; 280 goto skip_lock;
294 } 281 }
295 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 282 gfs2_holder_init(ip->i_gl, LM_ST_SHARED,
296 LM_FLAG_TRY_1CB|GL_ATIME|GL_AOP, &gh); 283 LM_FLAG_TRY_1CB|GL_ATIME, &gh);
297 do_unlock = 1; 284 do_unlock = 1;
298 ret = gfs2_glock_nq_m_atime(1, &gh); 285 ret = gfs2_glock_nq_atime(&gh);
299 if (ret == GLR_TRYFAILED) 286 if (ret == GLR_TRYFAILED)
300 goto out_noerror; 287 goto out_noerror;
301 if (unlikely(ret)) 288 if (unlikely(ret))
@@ -366,10 +353,13 @@ static int gfs2_prepare_write(struct file *file, struct page *page,
366 unsigned int write_len = to - from; 353 unsigned int write_len = to - from;
367 354
368 355
369 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|GL_AOP, &ip->i_gh); 356 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|LM_FLAG_TRY_1CB, &ip->i_gh);
370 error = gfs2_glock_nq_m_atime(1, &ip->i_gh); 357 error = gfs2_glock_nq_atime(&ip->i_gh);
371 if (error) 358 if (unlikely(error)) {
359 if (error == GLR_TRYFAILED)
360 error = AOP_TRUNCATED_PAGE;
372 goto out_uninit; 361 goto out_uninit;
362 }
373 363
374 gfs2_write_calc_reserv(ip, write_len, &data_blocks, &ind_blocks); 364 gfs2_write_calc_reserv(ip, write_len, &data_blocks, &ind_blocks);
375 365
@@ -386,7 +376,7 @@ static int gfs2_prepare_write(struct file *file, struct page *page,
386 if (error) 376 if (error)
387 goto out_alloc_put; 377 goto out_alloc_put;
388 378
389 error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid); 379 error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
390 if (error) 380 if (error)
391 goto out_qunlock; 381 goto out_qunlock;
392 382
@@ -482,8 +472,10 @@ static int gfs2_commit_write(struct file *file, struct page *page,
482 472
483 SetPageUptodate(page); 473 SetPageUptodate(page);
484 474
485 if (inode->i_size < file_size) 475 if (inode->i_size < file_size) {
486 i_size_write(inode, file_size); 476 i_size_write(inode, file_size);
477 mark_inode_dirty(inode);
478 }
487 } else { 479 } else {
488 if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || 480 if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED ||
489 gfs2_is_jdata(ip)) 481 gfs2_is_jdata(ip))
@@ -498,11 +490,6 @@ static int gfs2_commit_write(struct file *file, struct page *page,
498 di->di_size = cpu_to_be64(inode->i_size); 490 di->di_size = cpu_to_be64(inode->i_size);
499 } 491 }
500 492
501 di->di_mode = cpu_to_be32(inode->i_mode);
502 di->di_atime = cpu_to_be64(inode->i_atime.tv_sec);
503 di->di_mtime = cpu_to_be64(inode->i_mtime.tv_sec);
504 di->di_ctime = cpu_to_be64(inode->i_ctime.tv_sec);
505
506 brelse(dibh); 493 brelse(dibh);
507 gfs2_trans_end(sdp); 494 gfs2_trans_end(sdp);
508 if (al->al_requested) { 495 if (al->al_requested) {
@@ -624,7 +611,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
624 * on this path. All we need change is atime. 611 * on this path. All we need change is atime.
625 */ 612 */
626 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh); 613 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
627 rv = gfs2_glock_nq_m_atime(1, &gh); 614 rv = gfs2_glock_nq_atime(&gh);
628 if (rv) 615 if (rv)
629 goto out; 616 goto out;
630 617
@@ -737,6 +724,9 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
737 if (!atomic_read(&aspace->i_writecount)) 724 if (!atomic_read(&aspace->i_writecount))
738 return 0; 725 return 0;
739 726
727 if (!(gfp_mask & __GFP_WAIT))
728 return 0;
729
740 if (time_after_eq(jiffies, t)) { 730 if (time_after_eq(jiffies, t)) {
741 stuck_releasepage(bh); 731 stuck_releasepage(bh);
742 /* should we withdraw here? */ 732 /* should we withdraw here? */
diff --git a/fs/gfs2/ops_dentry.c b/fs/gfs2/ops_dentry.c
index 00041b1b8025..d355899585d8 100644
--- a/fs/gfs2/ops_dentry.c
+++ b/fs/gfs2/ops_dentry.c
@@ -43,7 +43,7 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
43 struct inode *inode = dentry->d_inode; 43 struct inode *inode = dentry->d_inode;
44 struct gfs2_holder d_gh; 44 struct gfs2_holder d_gh;
45 struct gfs2_inode *ip; 45 struct gfs2_inode *ip;
46 struct gfs2_inum inum; 46 struct gfs2_inum_host inum;
47 unsigned int type; 47 unsigned int type;
48 int error; 48 int error;
49 49
@@ -76,7 +76,7 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
76 if (!gfs2_inum_equal(&ip->i_num, &inum)) 76 if (!gfs2_inum_equal(&ip->i_num, &inum))
77 goto invalid_gunlock; 77 goto invalid_gunlock;
78 78
79 if (IF2DT(ip->i_di.di_mode) != type) { 79 if (IF2DT(ip->i_inode.i_mode) != type) {
80 gfs2_consist_inode(dip); 80 gfs2_consist_inode(dip);
81 goto fail_gunlock; 81 goto fail_gunlock;
82 } 82 }
diff --git a/fs/gfs2/ops_export.c b/fs/gfs2/ops_export.c
index 86127d93bd35..b4e7b8775315 100644
--- a/fs/gfs2/ops_export.c
+++ b/fs/gfs2/ops_export.c
@@ -27,15 +27,16 @@
27#include "util.h" 27#include "util.h"
28 28
29static struct dentry *gfs2_decode_fh(struct super_block *sb, 29static struct dentry *gfs2_decode_fh(struct super_block *sb,
30 __u32 *fh, 30 __u32 *p,
31 int fh_len, 31 int fh_len,
32 int fh_type, 32 int fh_type,
33 int (*acceptable)(void *context, 33 int (*acceptable)(void *context,
34 struct dentry *dentry), 34 struct dentry *dentry),
35 void *context) 35 void *context)
36{ 36{
37 __be32 *fh = (__force __be32 *)p;
37 struct gfs2_fh_obj fh_obj; 38 struct gfs2_fh_obj fh_obj;
38 struct gfs2_inum *this, parent; 39 struct gfs2_inum_host *this, parent;
39 40
40 if (fh_type != fh_len) 41 if (fh_type != fh_len)
41 return NULL; 42 return NULL;
@@ -65,9 +66,10 @@ static struct dentry *gfs2_decode_fh(struct super_block *sb,
65 acceptable, context); 66 acceptable, context);
66} 67}
67 68
68static int gfs2_encode_fh(struct dentry *dentry, __u32 *fh, int *len, 69static int gfs2_encode_fh(struct dentry *dentry, __u32 *p, int *len,
69 int connectable) 70 int connectable)
70{ 71{
72 __be32 *fh = (__force __be32 *)p;
71 struct inode *inode = dentry->d_inode; 73 struct inode *inode = dentry->d_inode;
72 struct super_block *sb = inode->i_sb; 74 struct super_block *sb = inode->i_sb;
73 struct gfs2_inode *ip = GFS2_I(inode); 75 struct gfs2_inode *ip = GFS2_I(inode);
@@ -76,14 +78,10 @@ static int gfs2_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
76 (connectable && *len < GFS2_LARGE_FH_SIZE)) 78 (connectable && *len < GFS2_LARGE_FH_SIZE))
77 return 255; 79 return 255;
78 80
79 fh[0] = ip->i_num.no_formal_ino >> 32; 81 fh[0] = cpu_to_be32(ip->i_num.no_formal_ino >> 32);
80 fh[0] = cpu_to_be32(fh[0]); 82 fh[1] = cpu_to_be32(ip->i_num.no_formal_ino & 0xFFFFFFFF);
81 fh[1] = ip->i_num.no_formal_ino & 0xFFFFFFFF; 83 fh[2] = cpu_to_be32(ip->i_num.no_addr >> 32);
82 fh[1] = cpu_to_be32(fh[1]); 84 fh[3] = cpu_to_be32(ip->i_num.no_addr & 0xFFFFFFFF);
83 fh[2] = ip->i_num.no_addr >> 32;
84 fh[2] = cpu_to_be32(fh[2]);
85 fh[3] = ip->i_num.no_addr & 0xFFFFFFFF;
86 fh[3] = cpu_to_be32(fh[3]);
87 *len = GFS2_SMALL_FH_SIZE; 85 *len = GFS2_SMALL_FH_SIZE;
88 86
89 if (!connectable || inode == sb->s_root->d_inode) 87 if (!connectable || inode == sb->s_root->d_inode)
@@ -95,14 +93,10 @@ static int gfs2_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
95 igrab(inode); 93 igrab(inode);
96 spin_unlock(&dentry->d_lock); 94 spin_unlock(&dentry->d_lock);
97 95
98 fh[4] = ip->i_num.no_formal_ino >> 32; 96 fh[4] = cpu_to_be32(ip->i_num.no_formal_ino >> 32);
99 fh[4] = cpu_to_be32(fh[4]); 97 fh[5] = cpu_to_be32(ip->i_num.no_formal_ino & 0xFFFFFFFF);
100 fh[5] = ip->i_num.no_formal_ino & 0xFFFFFFFF; 98 fh[6] = cpu_to_be32(ip->i_num.no_addr >> 32);
101 fh[5] = cpu_to_be32(fh[5]); 99 fh[7] = cpu_to_be32(ip->i_num.no_addr & 0xFFFFFFFF);
102 fh[6] = ip->i_num.no_addr >> 32;
103 fh[6] = cpu_to_be32(fh[6]);
104 fh[7] = ip->i_num.no_addr & 0xFFFFFFFF;
105 fh[7] = cpu_to_be32(fh[7]);
106 100
107 fh[8] = cpu_to_be32(inode->i_mode); 101 fh[8] = cpu_to_be32(inode->i_mode);
108 fh[9] = 0; /* pad to double word */ 102 fh[9] = 0; /* pad to double word */
@@ -114,12 +108,12 @@ static int gfs2_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
114} 108}
115 109
116struct get_name_filldir { 110struct get_name_filldir {
117 struct gfs2_inum inum; 111 struct gfs2_inum_host inum;
118 char *name; 112 char *name;
119}; 113};
120 114
121static int get_name_filldir(void *opaque, const char *name, unsigned int length, 115static int get_name_filldir(void *opaque, const char *name, unsigned int length,
122 u64 offset, struct gfs2_inum *inum, 116 u64 offset, struct gfs2_inum_host *inum,
123 unsigned int type) 117 unsigned int type)
124{ 118{
125 struct get_name_filldir *gnfd = (struct get_name_filldir *)opaque; 119 struct get_name_filldir *gnfd = (struct get_name_filldir *)opaque;
@@ -202,7 +196,7 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb, void *inum_obj)
202{ 196{
203 struct gfs2_sbd *sdp = sb->s_fs_info; 197 struct gfs2_sbd *sdp = sb->s_fs_info;
204 struct gfs2_fh_obj *fh_obj = (struct gfs2_fh_obj *)inum_obj; 198 struct gfs2_fh_obj *fh_obj = (struct gfs2_fh_obj *)inum_obj;
205 struct gfs2_inum *inum = &fh_obj->this; 199 struct gfs2_inum_host *inum = &fh_obj->this;
206 struct gfs2_holder i_gh, ri_gh, rgd_gh; 200 struct gfs2_holder i_gh, ri_gh, rgd_gh;
207 struct gfs2_rgrpd *rgd; 201 struct gfs2_rgrpd *rgd;
208 struct inode *inode; 202 struct inode *inode;
diff --git a/fs/gfs2/ops_export.h b/fs/gfs2/ops_export.h
index 09aca5046fb1..f925a955b3b8 100644
--- a/fs/gfs2/ops_export.h
+++ b/fs/gfs2/ops_export.h
@@ -15,7 +15,7 @@
15 15
16extern struct export_operations gfs2_export_ops; 16extern struct export_operations gfs2_export_ops;
17struct gfs2_fh_obj { 17struct gfs2_fh_obj {
18 struct gfs2_inum this; 18 struct gfs2_inum_host this;
19 __u32 imode; 19 __u32 imode;
20}; 20};
21 21
diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c
index 3064f133bf3c..b3f1e0349ae0 100644
--- a/fs/gfs2/ops_file.c
+++ b/fs/gfs2/ops_file.c
@@ -22,6 +22,7 @@
22#include <linux/ext2_fs.h> 22#include <linux/ext2_fs.h>
23#include <linux/crc32.h> 23#include <linux/crc32.h>
24#include <linux/lm_interface.h> 24#include <linux/lm_interface.h>
25#include <linux/writeback.h>
25#include <asm/uaccess.h> 26#include <asm/uaccess.h>
26 27
27#include "gfs2.h" 28#include "gfs2.h"
@@ -71,7 +72,7 @@ static int gfs2_read_actor(read_descriptor_t *desc, struct page *page,
71 size = count; 72 size = count;
72 73
73 kaddr = kmap(page); 74 kaddr = kmap(page);
74 memcpy(desc->arg.buf, kaddr + offset, size); 75 memcpy(desc->arg.data, kaddr + offset, size);
75 kunmap(page); 76 kunmap(page);
76 77
77 desc->count = count - size; 78 desc->count = count - size;
@@ -86,7 +87,7 @@ int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state,
86 struct inode *inode = &ip->i_inode; 87 struct inode *inode = &ip->i_inode;
87 read_descriptor_t desc; 88 read_descriptor_t desc;
88 desc.written = 0; 89 desc.written = 0;
89 desc.arg.buf = buf; 90 desc.arg.data = buf;
90 desc.count = size; 91 desc.count = size;
91 desc.error = 0; 92 desc.error = 0;
92 do_generic_mapping_read(inode->i_mapping, ra_state, 93 do_generic_mapping_read(inode->i_mapping, ra_state,
@@ -139,7 +140,7 @@ static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin)
139 */ 140 */
140 141
141static int filldir_func(void *opaque, const char *name, unsigned int length, 142static int filldir_func(void *opaque, const char *name, unsigned int length,
142 u64 offset, struct gfs2_inum *inum, 143 u64 offset, struct gfs2_inum_host *inum,
143 unsigned int type) 144 unsigned int type)
144{ 145{
145 struct filldir_reg *fdr = (struct filldir_reg *)opaque; 146 struct filldir_reg *fdr = (struct filldir_reg *)opaque;
@@ -253,7 +254,7 @@ static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
253 u32 fsflags; 254 u32 fsflags;
254 255
255 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh); 256 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
256 error = gfs2_glock_nq_m_atime(1, &gh); 257 error = gfs2_glock_nq_atime(&gh);
257 if (error) 258 if (error)
258 return error; 259 return error;
259 260
@@ -266,6 +267,24 @@ static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
266 return error; 267 return error;
267} 268}
268 269
270void gfs2_set_inode_flags(struct inode *inode)
271{
272 struct gfs2_inode *ip = GFS2_I(inode);
273 struct gfs2_dinode_host *di = &ip->i_di;
274 unsigned int flags = inode->i_flags;
275
276 flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
277 if (di->di_flags & GFS2_DIF_IMMUTABLE)
278 flags |= S_IMMUTABLE;
279 if (di->di_flags & GFS2_DIF_APPENDONLY)
280 flags |= S_APPEND;
281 if (di->di_flags & GFS2_DIF_NOATIME)
282 flags |= S_NOATIME;
283 if (di->di_flags & GFS2_DIF_SYNC)
284 flags |= S_SYNC;
285 inode->i_flags = flags;
286}
287
269/* Flags that can be set by user space */ 288/* Flags that can be set by user space */
270#define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \ 289#define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
271 GFS2_DIF_DIRECTIO| \ 290 GFS2_DIF_DIRECTIO| \
@@ -336,8 +355,9 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
336 goto out_trans_end; 355 goto out_trans_end;
337 gfs2_trans_add_bh(ip->i_gl, bh, 1); 356 gfs2_trans_add_bh(ip->i_gl, bh, 1);
338 ip->i_di.di_flags = new_flags; 357 ip->i_di.di_flags = new_flags;
339 gfs2_dinode_out(&ip->i_di, bh->b_data); 358 gfs2_dinode_out(ip, bh->b_data);
340 brelse(bh); 359 brelse(bh);
360 gfs2_set_inode_flags(inode);
341out_trans_end: 361out_trans_end:
342 gfs2_trans_end(sdp); 362 gfs2_trans_end(sdp);
343out: 363out:
@@ -425,7 +445,7 @@ static int gfs2_open(struct inode *inode, struct file *file)
425 gfs2_assert_warn(GFS2_SB(inode), !file->private_data); 445 gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
426 file->private_data = fp; 446 file->private_data = fp;
427 447
428 if (S_ISREG(ip->i_di.di_mode)) { 448 if (S_ISREG(ip->i_inode.i_mode)) {
429 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, 449 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
430 &i_gh); 450 &i_gh);
431 if (error) 451 if (error)
@@ -484,16 +504,40 @@ static int gfs2_close(struct inode *inode, struct file *file)
484 * @file: the file that points to the dentry (we ignore this) 504 * @file: the file that points to the dentry (we ignore this)
485 * @dentry: the dentry that points to the inode to sync 505 * @dentry: the dentry that points to the inode to sync
486 * 506 *
507 * The VFS will flush "normal" data for us. We only need to worry
508 * about metadata here. For journaled data, we just do a log flush
509 * as we can't avoid it. Otherwise we can just bale out if datasync
510 * is set. For stuffed inodes we must flush the log in order to
511 * ensure that all data is on disk.
512 *
513 * The call to write_inode_now() is there to write back metadata and
514 * the inode itself. It does also try and write the data, but thats
515 * (hopefully) a no-op due to the VFS having already called filemap_fdatawrite()
516 * for us.
517 *
487 * Returns: errno 518 * Returns: errno
488 */ 519 */
489 520
490static int gfs2_fsync(struct file *file, struct dentry *dentry, int datasync) 521static int gfs2_fsync(struct file *file, struct dentry *dentry, int datasync)
491{ 522{
492 struct gfs2_inode *ip = GFS2_I(dentry->d_inode); 523 struct inode *inode = dentry->d_inode;
524 int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC);
525 int ret = 0;
493 526
494 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl); 527 if (gfs2_is_jdata(GFS2_I(inode))) {
528 gfs2_log_flush(GFS2_SB(inode), GFS2_I(inode)->i_gl);
529 return 0;
530 }
495 531
496 return 0; 532 if (sync_state != 0) {
533 if (!datasync)
534 ret = write_inode_now(inode, 0);
535
536 if (gfs2_is_stuffed(GFS2_I(inode)))
537 gfs2_log_flush(GFS2_SB(inode), GFS2_I(inode)->i_gl);
538 }
539
540 return ret;
497} 541}
498 542
499/** 543/**
@@ -515,7 +559,7 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
515 559
516 if (!(fl->fl_flags & FL_POSIX)) 560 if (!(fl->fl_flags & FL_POSIX))
517 return -ENOLCK; 561 return -ENOLCK;
518 if ((ip->i_di.di_mode & (S_ISGID | S_IXGRP)) == S_ISGID) 562 if ((ip->i_inode.i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
519 return -ENOLCK; 563 return -ENOLCK;
520 564
521 if (sdp->sd_args.ar_localflocks) { 565 if (sdp->sd_args.ar_localflocks) {
@@ -617,7 +661,7 @@ static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
617 661
618 if (!(fl->fl_flags & FL_FLOCK)) 662 if (!(fl->fl_flags & FL_FLOCK))
619 return -ENOLCK; 663 return -ENOLCK;
620 if ((ip->i_di.di_mode & (S_ISGID | S_IXGRP)) == S_ISGID) 664 if ((ip->i_inode.i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
621 return -ENOLCK; 665 return -ENOLCK;
622 666
623 if (sdp->sd_args.ar_localflocks) 667 if (sdp->sd_args.ar_localflocks)
diff --git a/fs/gfs2/ops_file.h b/fs/gfs2/ops_file.h
index ce319f89ec8e..7e5d8ec9c846 100644
--- a/fs/gfs2/ops_file.h
+++ b/fs/gfs2/ops_file.h
@@ -17,7 +17,7 @@ extern struct file gfs2_internal_file_sentinel;
17extern int gfs2_internal_read(struct gfs2_inode *ip, 17extern int gfs2_internal_read(struct gfs2_inode *ip,
18 struct file_ra_state *ra_state, 18 struct file_ra_state *ra_state,
19 char *buf, loff_t *pos, unsigned size); 19 char *buf, loff_t *pos, unsigned size);
20 20extern void gfs2_set_inode_flags(struct inode *inode);
21extern const struct file_operations gfs2_file_fops; 21extern const struct file_operations gfs2_file_fops;
22extern const struct file_operations gfs2_dir_fops; 22extern const struct file_operations gfs2_dir_fops;
23 23
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 882873a6bd69..d14e139d2674 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -237,7 +237,7 @@ fail:
237} 237}
238 238
239static struct inode *gfs2_lookup_root(struct super_block *sb, 239static struct inode *gfs2_lookup_root(struct super_block *sb,
240 struct gfs2_inum *inum) 240 struct gfs2_inum_host *inum)
241{ 241{
242 return gfs2_inode_lookup(sb, inum, DT_DIR); 242 return gfs2_inode_lookup(sb, inum, DT_DIR);
243} 243}
@@ -246,7 +246,7 @@ static int init_sb(struct gfs2_sbd *sdp, int silent, int undo)
246{ 246{
247 struct super_block *sb = sdp->sd_vfs; 247 struct super_block *sb = sdp->sd_vfs;
248 struct gfs2_holder sb_gh; 248 struct gfs2_holder sb_gh;
249 struct gfs2_inum *inum; 249 struct gfs2_inum_host *inum;
250 struct inode *inode; 250 struct inode *inode;
251 int error = 0; 251 int error = 0;
252 252
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index ef6e5ed70e94..636dda4c7d38 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -59,7 +59,7 @@ static int gfs2_create(struct inode *dir, struct dentry *dentry,
59 gfs2_holder_init(dip->i_gl, 0, 0, ghs); 59 gfs2_holder_init(dip->i_gl, 0, 0, ghs);
60 60
61 for (;;) { 61 for (;;) {
62 inode = gfs2_createi(ghs, &dentry->d_name, S_IFREG | mode); 62 inode = gfs2_createi(ghs, &dentry->d_name, S_IFREG | mode, 0);
63 if (!IS_ERR(inode)) { 63 if (!IS_ERR(inode)) {
64 gfs2_trans_end(sdp); 64 gfs2_trans_end(sdp);
65 if (dip->i_alloc.al_rgd) 65 if (dip->i_alloc.al_rgd)
@@ -144,7 +144,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
144 int alloc_required; 144 int alloc_required;
145 int error; 145 int error;
146 146
147 if (S_ISDIR(ip->i_di.di_mode)) 147 if (S_ISDIR(inode->i_mode))
148 return -EPERM; 148 return -EPERM;
149 149
150 gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); 150 gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
@@ -169,7 +169,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
169 } 169 }
170 170
171 error = -EINVAL; 171 error = -EINVAL;
172 if (!dip->i_di.di_nlink) 172 if (!dip->i_inode.i_nlink)
173 goto out_gunlock; 173 goto out_gunlock;
174 error = -EFBIG; 174 error = -EFBIG;
175 if (dip->i_di.di_entries == (u32)-1) 175 if (dip->i_di.di_entries == (u32)-1)
@@ -178,10 +178,10 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
178 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) 178 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
179 goto out_gunlock; 179 goto out_gunlock;
180 error = -EINVAL; 180 error = -EINVAL;
181 if (!ip->i_di.di_nlink) 181 if (!ip->i_inode.i_nlink)
182 goto out_gunlock; 182 goto out_gunlock;
183 error = -EMLINK; 183 error = -EMLINK;
184 if (ip->i_di.di_nlink == (u32)-1) 184 if (ip->i_inode.i_nlink == (u32)-1)
185 goto out_gunlock; 185 goto out_gunlock;
186 186
187 alloc_required = error = gfs2_diradd_alloc_required(dir, &dentry->d_name); 187 alloc_required = error = gfs2_diradd_alloc_required(dir, &dentry->d_name);
@@ -196,8 +196,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
196 if (error) 196 if (error)
197 goto out_alloc; 197 goto out_alloc;
198 198
199 error = gfs2_quota_check(dip, dip->i_di.di_uid, 199 error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid);
200 dip->i_di.di_gid);
201 if (error) 200 if (error)
202 goto out_gunlock_q; 201 goto out_gunlock_q;
203 202
@@ -220,7 +219,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
220 } 219 }
221 220
222 error = gfs2_dir_add(dir, &dentry->d_name, &ip->i_num, 221 error = gfs2_dir_add(dir, &dentry->d_name, &ip->i_num,
223 IF2DT(ip->i_di.di_mode)); 222 IF2DT(inode->i_mode));
224 if (error) 223 if (error)
225 goto out_end_trans; 224 goto out_end_trans;
226 225
@@ -326,7 +325,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
326 325
327 gfs2_holder_init(dip->i_gl, 0, 0, ghs); 326 gfs2_holder_init(dip->i_gl, 0, 0, ghs);
328 327
329 inode = gfs2_createi(ghs, &dentry->d_name, S_IFLNK | S_IRWXUGO); 328 inode = gfs2_createi(ghs, &dentry->d_name, S_IFLNK | S_IRWXUGO, 0);
330 if (IS_ERR(inode)) { 329 if (IS_ERR(inode)) {
331 gfs2_holder_uninit(ghs); 330 gfs2_holder_uninit(ghs);
332 return PTR_ERR(inode); 331 return PTR_ERR(inode);
@@ -339,7 +338,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
339 error = gfs2_meta_inode_buffer(ip, &dibh); 338 error = gfs2_meta_inode_buffer(ip, &dibh);
340 339
341 if (!gfs2_assert_withdraw(sdp, !error)) { 340 if (!gfs2_assert_withdraw(sdp, !error)) {
342 gfs2_dinode_out(&ip->i_di, dibh->b_data); 341 gfs2_dinode_out(ip, dibh->b_data);
343 memcpy(dibh->b_data + sizeof(struct gfs2_dinode), symname, 342 memcpy(dibh->b_data + sizeof(struct gfs2_dinode), symname,
344 size); 343 size);
345 brelse(dibh); 344 brelse(dibh);
@@ -379,7 +378,7 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode)
379 378
380 gfs2_holder_init(dip->i_gl, 0, 0, ghs); 379 gfs2_holder_init(dip->i_gl, 0, 0, ghs);
381 380
382 inode = gfs2_createi(ghs, &dentry->d_name, S_IFDIR | mode); 381 inode = gfs2_createi(ghs, &dentry->d_name, S_IFDIR | mode, 0);
383 if (IS_ERR(inode)) { 382 if (IS_ERR(inode)) {
384 gfs2_holder_uninit(ghs); 383 gfs2_holder_uninit(ghs);
385 return PTR_ERR(inode); 384 return PTR_ERR(inode);
@@ -387,10 +386,9 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode)
387 386
388 ip = ghs[1].gh_gl->gl_object; 387 ip = ghs[1].gh_gl->gl_object;
389 388
390 ip->i_di.di_nlink = 2; 389 ip->i_inode.i_nlink = 2;
391 ip->i_di.di_size = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode); 390 ip->i_di.di_size = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode);
392 ip->i_di.di_flags |= GFS2_DIF_JDATA; 391 ip->i_di.di_flags |= GFS2_DIF_JDATA;
393 ip->i_di.di_payload_format = GFS2_FORMAT_DE;
394 ip->i_di.di_entries = 2; 392 ip->i_di.di_entries = 2;
395 393
396 error = gfs2_meta_inode_buffer(ip, &dibh); 394 error = gfs2_meta_inode_buffer(ip, &dibh);
@@ -414,7 +412,7 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode)
414 gfs2_inum_out(&dip->i_num, &dent->de_inum); 412 gfs2_inum_out(&dip->i_num, &dent->de_inum);
415 dent->de_type = cpu_to_be16(DT_DIR); 413 dent->de_type = cpu_to_be16(DT_DIR);
416 414
417 gfs2_dinode_out(&ip->i_di, di); 415 gfs2_dinode_out(ip, di);
418 416
419 brelse(dibh); 417 brelse(dibh);
420 } 418 }
@@ -467,7 +465,7 @@ static int gfs2_rmdir(struct inode *dir, struct dentry *dentry)
467 465
468 if (ip->i_di.di_entries < 2) { 466 if (ip->i_di.di_entries < 2) {
469 if (gfs2_consist_inode(ip)) 467 if (gfs2_consist_inode(ip))
470 gfs2_dinode_print(&ip->i_di); 468 gfs2_dinode_print(ip);
471 error = -EIO; 469 error = -EIO;
472 goto out_gunlock; 470 goto out_gunlock;
473 } 471 }
@@ -504,47 +502,19 @@ out:
504static int gfs2_mknod(struct inode *dir, struct dentry *dentry, int mode, 502static int gfs2_mknod(struct inode *dir, struct dentry *dentry, int mode,
505 dev_t dev) 503 dev_t dev)
506{ 504{
507 struct gfs2_inode *dip = GFS2_I(dir), *ip; 505 struct gfs2_inode *dip = GFS2_I(dir);
508 struct gfs2_sbd *sdp = GFS2_SB(dir); 506 struct gfs2_sbd *sdp = GFS2_SB(dir);
509 struct gfs2_holder ghs[2]; 507 struct gfs2_holder ghs[2];
510 struct inode *inode; 508 struct inode *inode;
511 struct buffer_head *dibh;
512 u32 major = 0, minor = 0;
513 int error;
514
515 switch (mode & S_IFMT) {
516 case S_IFBLK:
517 case S_IFCHR:
518 major = MAJOR(dev);
519 minor = MINOR(dev);
520 break;
521 case S_IFIFO:
522 case S_IFSOCK:
523 break;
524 default:
525 return -EOPNOTSUPP;
526 };
527 509
528 gfs2_holder_init(dip->i_gl, 0, 0, ghs); 510 gfs2_holder_init(dip->i_gl, 0, 0, ghs);
529 511
530 inode = gfs2_createi(ghs, &dentry->d_name, mode); 512 inode = gfs2_createi(ghs, &dentry->d_name, mode, dev);
531 if (IS_ERR(inode)) { 513 if (IS_ERR(inode)) {
532 gfs2_holder_uninit(ghs); 514 gfs2_holder_uninit(ghs);
533 return PTR_ERR(inode); 515 return PTR_ERR(inode);
534 } 516 }
535 517
536 ip = ghs[1].gh_gl->gl_object;
537
538 ip->i_di.di_major = major;
539 ip->i_di.di_minor = minor;
540
541 error = gfs2_meta_inode_buffer(ip, &dibh);
542
543 if (!gfs2_assert_withdraw(sdp, !error)) {
544 gfs2_dinode_out(&ip->i_di, dibh->b_data);
545 brelse(dibh);
546 }
547
548 gfs2_trans_end(sdp); 518 gfs2_trans_end(sdp);
549 if (dip->i_alloc.al_rgd) 519 if (dip->i_alloc.al_rgd)
550 gfs2_inplace_release(dip); 520 gfs2_inplace_release(dip);
@@ -592,11 +562,10 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
592 562
593 /* Make sure we aren't trying to move a dirctory into it's subdir */ 563 /* Make sure we aren't trying to move a dirctory into it's subdir */
594 564
595 if (S_ISDIR(ip->i_di.di_mode) && odip != ndip) { 565 if (S_ISDIR(ip->i_inode.i_mode) && odip != ndip) {
596 dir_rename = 1; 566 dir_rename = 1;
597 567
598 error = gfs2_glock_nq_init(sdp->sd_rename_gl, 568 error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE, 0,
599 LM_ST_EXCLUSIVE, 0,
600 &r_gh); 569 &r_gh);
601 if (error) 570 if (error)
602 goto out; 571 goto out;
@@ -637,10 +606,10 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
637 if (error) 606 if (error)
638 goto out_gunlock; 607 goto out_gunlock;
639 608
640 if (S_ISDIR(nip->i_di.di_mode)) { 609 if (S_ISDIR(nip->i_inode.i_mode)) {
641 if (nip->i_di.di_entries < 2) { 610 if (nip->i_di.di_entries < 2) {
642 if (gfs2_consist_inode(nip)) 611 if (gfs2_consist_inode(nip))
643 gfs2_dinode_print(&nip->i_di); 612 gfs2_dinode_print(nip);
644 error = -EIO; 613 error = -EIO;
645 goto out_gunlock; 614 goto out_gunlock;
646 } 615 }
@@ -666,7 +635,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
666 }; 635 };
667 636
668 if (odip != ndip) { 637 if (odip != ndip) {
669 if (!ndip->i_di.di_nlink) { 638 if (!ndip->i_inode.i_nlink) {
670 error = -EINVAL; 639 error = -EINVAL;
671 goto out_gunlock; 640 goto out_gunlock;
672 } 641 }
@@ -674,8 +643,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
674 error = -EFBIG; 643 error = -EFBIG;
675 goto out_gunlock; 644 goto out_gunlock;
676 } 645 }
677 if (S_ISDIR(ip->i_di.di_mode) && 646 if (S_ISDIR(ip->i_inode.i_mode) &&
678 ndip->i_di.di_nlink == (u32)-1) { 647 ndip->i_inode.i_nlink == (u32)-1) {
679 error = -EMLINK; 648 error = -EMLINK;
680 goto out_gunlock; 649 goto out_gunlock;
681 } 650 }
@@ -702,8 +671,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
702 if (error) 671 if (error)
703 goto out_alloc; 672 goto out_alloc;
704 673
705 error = gfs2_quota_check(ndip, ndip->i_di.di_uid, 674 error = gfs2_quota_check(ndip, ndip->i_inode.i_uid, ndip->i_inode.i_gid);
706 ndip->i_di.di_gid);
707 if (error) 675 if (error)
708 goto out_gunlock_q; 676 goto out_gunlock_q;
709 677
@@ -729,7 +697,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
729 /* Remove the target file, if it exists */ 697 /* Remove the target file, if it exists */
730 698
731 if (nip) { 699 if (nip) {
732 if (S_ISDIR(nip->i_di.di_mode)) 700 if (S_ISDIR(nip->i_inode.i_mode))
733 error = gfs2_rmdiri(ndip, &ndentry->d_name, nip); 701 error = gfs2_rmdiri(ndip, &ndentry->d_name, nip);
734 else { 702 else {
735 error = gfs2_dir_del(ndip, &ndentry->d_name); 703 error = gfs2_dir_del(ndip, &ndentry->d_name);
@@ -760,9 +728,9 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
760 error = gfs2_meta_inode_buffer(ip, &dibh); 728 error = gfs2_meta_inode_buffer(ip, &dibh);
761 if (error) 729 if (error)
762 goto out_end_trans; 730 goto out_end_trans;
763 ip->i_di.di_ctime = get_seconds(); 731 ip->i_inode.i_ctime.tv_sec = get_seconds();
764 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 732 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
765 gfs2_dinode_out(&ip->i_di, dibh->b_data); 733 gfs2_dinode_out(ip, dibh->b_data);
766 brelse(dibh); 734 brelse(dibh);
767 } 735 }
768 736
@@ -771,7 +739,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
771 goto out_end_trans; 739 goto out_end_trans;
772 740
773 error = gfs2_dir_add(ndir, &ndentry->d_name, &ip->i_num, 741 error = gfs2_dir_add(ndir, &ndentry->d_name, &ip->i_num,
774 IF2DT(ip->i_di.di_mode)); 742 IF2DT(ip->i_inode.i_mode));
775 if (error) 743 if (error)
776 goto out_end_trans; 744 goto out_end_trans;
777 745
@@ -867,6 +835,10 @@ static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
867 * @mask: 835 * @mask:
868 * @nd: passed from Linux VFS, ignored by us 836 * @nd: passed from Linux VFS, ignored by us
869 * 837 *
838 * This may be called from the VFS directly, or from within GFS2 with the
839 * inode locked, so we look to see if the glock is already locked and only
840 * lock the glock if its not already been done.
841 *
870 * Returns: errno 842 * Returns: errno
871 */ 843 */
872 844
@@ -875,15 +847,18 @@ static int gfs2_permission(struct inode *inode, int mask, struct nameidata *nd)
875 struct gfs2_inode *ip = GFS2_I(inode); 847 struct gfs2_inode *ip = GFS2_I(inode);
876 struct gfs2_holder i_gh; 848 struct gfs2_holder i_gh;
877 int error; 849 int error;
850 int unlock = 0;
878 851
879 if (ip->i_vn == ip->i_gl->gl_vn) 852 if (gfs2_glock_is_locked_by_me(ip->i_gl) == 0) {
880 return generic_permission(inode, mask, gfs2_check_acl); 853 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
854 if (error)
855 return error;
856 unlock = 1;
857 }
881 858
882 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); 859 error = generic_permission(inode, mask, gfs2_check_acl);
883 if (!error) { 860 if (unlock)
884 error = generic_permission(inode, mask, gfs2_check_acl_locked);
885 gfs2_glock_dq_uninit(&i_gh); 861 gfs2_glock_dq_uninit(&i_gh);
886 }
887 862
888 return error; 863 return error;
889} 864}
@@ -914,8 +889,8 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
914 u32 ouid, ogid, nuid, ngid; 889 u32 ouid, ogid, nuid, ngid;
915 int error; 890 int error;
916 891
917 ouid = ip->i_di.di_uid; 892 ouid = inode->i_uid;
918 ogid = ip->i_di.di_gid; 893 ogid = inode->i_gid;
919 nuid = attr->ia_uid; 894 nuid = attr->ia_uid;
920 ngid = attr->ia_gid; 895 ngid = attr->ia_gid;
921 896
@@ -946,10 +921,9 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
946 921
947 error = inode_setattr(inode, attr); 922 error = inode_setattr(inode, attr);
948 gfs2_assert_warn(sdp, !error); 923 gfs2_assert_warn(sdp, !error);
949 gfs2_inode_attr_out(ip);
950 924
951 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 925 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
952 gfs2_dinode_out(&ip->i_di, dibh->b_data); 926 gfs2_dinode_out(ip, dibh->b_data);
953 brelse(dibh); 927 brelse(dibh);
954 928
955 if (ouid != NO_QUOTA_CHANGE || ogid != NO_QUOTA_CHANGE) { 929 if (ouid != NO_QUOTA_CHANGE || ogid != NO_QUOTA_CHANGE) {
@@ -1018,6 +992,12 @@ out:
1018 * @dentry: The dentry to stat 992 * @dentry: The dentry to stat
1019 * @stat: The inode's stats 993 * @stat: The inode's stats
1020 * 994 *
995 * This may be called from the VFS directly, or from within GFS2 with the
996 * inode locked, so we look to see if the glock is already locked and only
997 * lock the glock if its not already been done. Note that its the NFS
998 * readdirplus operation which causes this to be called (from filldir)
999 * with the glock already held.
1000 *
1021 * Returns: errno 1001 * Returns: errno
1022 */ 1002 */
1023 1003
@@ -1028,14 +1008,20 @@ static int gfs2_getattr(struct vfsmount *mnt, struct dentry *dentry,
1028 struct gfs2_inode *ip = GFS2_I(inode); 1008 struct gfs2_inode *ip = GFS2_I(inode);
1029 struct gfs2_holder gh; 1009 struct gfs2_holder gh;
1030 int error; 1010 int error;
1011 int unlock = 0;
1031 1012
1032 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); 1013 if (gfs2_glock_is_locked_by_me(ip->i_gl) == 0) {
1033 if (!error) { 1014 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
1034 generic_fillattr(inode, stat); 1015 if (error)
1035 gfs2_glock_dq_uninit(&gh); 1016 return error;
1017 unlock = 1;
1036 } 1018 }
1037 1019
1038 return error; 1020 generic_fillattr(inode, stat);
1021 if (unlock);
1022 gfs2_glock_dq_uninit(&gh);
1023
1024 return 0;
1039} 1025}
1040 1026
1041static int gfs2_setxattr(struct dentry *dentry, const char *name, 1027static int gfs2_setxattr(struct dentry *dentry, const char *name,
diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c
index b47d9598c047..7685b46f934b 100644
--- a/fs/gfs2/ops_super.c
+++ b/fs/gfs2/ops_super.c
@@ -157,7 +157,8 @@ static void gfs2_write_super(struct super_block *sb)
157static int gfs2_sync_fs(struct super_block *sb, int wait) 157static int gfs2_sync_fs(struct super_block *sb, int wait)
158{ 158{
159 sb->s_dirt = 0; 159 sb->s_dirt = 0;
160 gfs2_log_flush(sb->s_fs_info, NULL); 160 if (wait)
161 gfs2_log_flush(sb->s_fs_info, NULL);
161 return 0; 162 return 0;
162} 163}
163 164
@@ -215,7 +216,7 @@ static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
215{ 216{
216 struct super_block *sb = dentry->d_inode->i_sb; 217 struct super_block *sb = dentry->d_inode->i_sb;
217 struct gfs2_sbd *sdp = sb->s_fs_info; 218 struct gfs2_sbd *sdp = sb->s_fs_info;
218 struct gfs2_statfs_change sc; 219 struct gfs2_statfs_change_host sc;
219 int error; 220 int error;
220 221
221 if (gfs2_tune_get(sdp, gt_statfs_slow)) 222 if (gfs2_tune_get(sdp, gt_statfs_slow))
@@ -293,8 +294,6 @@ static void gfs2_clear_inode(struct inode *inode)
293 */ 294 */
294 if (inode->i_private) { 295 if (inode->i_private) {
295 struct gfs2_inode *ip = GFS2_I(inode); 296 struct gfs2_inode *ip = GFS2_I(inode);
296 gfs2_glock_inode_squish(inode);
297 gfs2_assert(inode->i_sb->s_fs_info, ip->i_gl->gl_state == LM_ST_UNLOCKED);
298 ip->i_gl->gl_object = NULL; 297 ip->i_gl->gl_object = NULL;
299 gfs2_glock_schedule_for_reclaim(ip->i_gl); 298 gfs2_glock_schedule_for_reclaim(ip->i_gl);
300 gfs2_glock_put(ip->i_gl); 299 gfs2_glock_put(ip->i_gl);
@@ -395,7 +394,7 @@ static void gfs2_delete_inode(struct inode *inode)
395 if (!inode->i_private) 394 if (!inode->i_private)
396 goto out; 395 goto out;
397 396
398 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &gh); 397 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB, &gh);
399 if (unlikely(error)) { 398 if (unlikely(error)) {
400 gfs2_glock_dq_uninit(&ip->i_iopen_gh); 399 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
401 goto out; 400 goto out;
@@ -407,7 +406,7 @@ static void gfs2_delete_inode(struct inode *inode)
407 if (error) 406 if (error)
408 goto out_uninit; 407 goto out_uninit;
409 408
410 if (S_ISDIR(ip->i_di.di_mode) && 409 if (S_ISDIR(inode->i_mode) &&
411 (ip->i_di.di_flags & GFS2_DIF_EXHASH)) { 410 (ip->i_di.di_flags & GFS2_DIF_EXHASH)) {
412 error = gfs2_dir_exhash_dealloc(ip); 411 error = gfs2_dir_exhash_dealloc(ip);
413 if (error) 412 if (error)
diff --git a/fs/gfs2/ops_vm.c b/fs/gfs2/ops_vm.c
index 5453d2947ab3..45a5f11fc39a 100644
--- a/fs/gfs2/ops_vm.c
+++ b/fs/gfs2/ops_vm.c
@@ -76,7 +76,7 @@ static int alloc_page_backing(struct gfs2_inode *ip, struct page *page)
76 if (error) 76 if (error)
77 goto out; 77 goto out;
78 78
79 error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid); 79 error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
80 if (error) 80 if (error)
81 goto out_gunlock_q; 81 goto out_gunlock_q;
82 82
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index a3deae7416c9..d0db881b55d2 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -452,19 +452,19 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
452 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) 452 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
453 return 0; 453 return 0;
454 454
455 error = qdsb_get(sdp, QUOTA_USER, ip->i_di.di_uid, CREATE, qd); 455 error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, CREATE, qd);
456 if (error) 456 if (error)
457 goto out; 457 goto out;
458 al->al_qd_num++; 458 al->al_qd_num++;
459 qd++; 459 qd++;
460 460
461 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_di.di_gid, CREATE, qd); 461 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, CREATE, qd);
462 if (error) 462 if (error)
463 goto out; 463 goto out;
464 al->al_qd_num++; 464 al->al_qd_num++;
465 qd++; 465 qd++;
466 466
467 if (uid != NO_QUOTA_CHANGE && uid != ip->i_di.di_uid) { 467 if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
468 error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd); 468 error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd);
469 if (error) 469 if (error)
470 goto out; 470 goto out;
@@ -472,7 +472,7 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
472 qd++; 472 qd++;
473 } 473 }
474 474
475 if (gid != NO_QUOTA_CHANGE && gid != ip->i_di.di_gid) { 475 if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
476 error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd); 476 error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd);
477 if (error) 477 if (error)
478 goto out; 478 goto out;
@@ -539,8 +539,7 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
539 qc->qc_id = cpu_to_be32(qd->qd_id); 539 qc->qc_id = cpu_to_be32(qd->qd_id);
540 } 540 }
541 541
542 x = qc->qc_change; 542 x = be64_to_cpu(qc->qc_change) + change;
543 x = be64_to_cpu(x) + change;
544 qc->qc_change = cpu_to_be64(x); 543 qc->qc_change = cpu_to_be64(x);
545 544
546 spin_lock(&sdp->sd_quota_spin); 545 spin_lock(&sdp->sd_quota_spin);
@@ -743,7 +742,7 @@ static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
743 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; 742 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
744 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); 743 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
745 struct gfs2_holder i_gh; 744 struct gfs2_holder i_gh;
746 struct gfs2_quota q; 745 struct gfs2_quota_host q;
747 char buf[sizeof(struct gfs2_quota)]; 746 char buf[sizeof(struct gfs2_quota)];
748 struct file_ra_state ra_state; 747 struct file_ra_state ra_state;
749 int error; 748 int error;
@@ -1103,7 +1102,7 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
1103 1102
1104 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots; 1103 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1105 y++, slot++) { 1104 y++, slot++) {
1106 struct gfs2_quota_change qc; 1105 struct gfs2_quota_change_host qc;
1107 struct gfs2_quota_data *qd; 1106 struct gfs2_quota_data *qd;
1108 1107
1109 gfs2_quota_change_in(&qc, bh->b_data + 1108 gfs2_quota_change_in(&qc, bh->b_data +
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 62cd223819b7..d0c806b85c86 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -132,10 +132,11 @@ void gfs2_revoke_clean(struct gfs2_sbd *sdp)
132 */ 132 */
133 133
134static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk, 134static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk,
135 struct gfs2_log_header *head) 135 struct gfs2_log_header_host *head)
136{ 136{
137 struct buffer_head *bh; 137 struct buffer_head *bh;
138 struct gfs2_log_header lh; 138 struct gfs2_log_header_host lh;
139 const u32 nothing = 0;
139 u32 hash; 140 u32 hash;
140 int error; 141 int error;
141 142
@@ -143,11 +144,11 @@ static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk,
143 if (error) 144 if (error)
144 return error; 145 return error;
145 146
146 memcpy(&lh, bh->b_data, sizeof(struct gfs2_log_header)); 147 hash = crc32_le((u32)~0, bh->b_data, sizeof(struct gfs2_log_header) -
147 lh.lh_hash = 0; 148 sizeof(u32));
148 hash = gfs2_disk_hash((char *)&lh, sizeof(struct gfs2_log_header)); 149 hash = crc32_le(hash, (unsigned char const *)&nothing, sizeof(nothing));
150 hash ^= (u32)~0;
149 gfs2_log_header_in(&lh, bh->b_data); 151 gfs2_log_header_in(&lh, bh->b_data);
150
151 brelse(bh); 152 brelse(bh);
152 153
153 if (lh.lh_header.mh_magic != GFS2_MAGIC || 154 if (lh.lh_header.mh_magic != GFS2_MAGIC ||
@@ -174,7 +175,7 @@ static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk,
174 */ 175 */
175 176
176static int find_good_lh(struct gfs2_jdesc *jd, unsigned int *blk, 177static int find_good_lh(struct gfs2_jdesc *jd, unsigned int *blk,
177 struct gfs2_log_header *head) 178 struct gfs2_log_header_host *head)
178{ 179{
179 unsigned int orig_blk = *blk; 180 unsigned int orig_blk = *blk;
180 int error; 181 int error;
@@ -205,10 +206,10 @@ static int find_good_lh(struct gfs2_jdesc *jd, unsigned int *blk,
205 * Returns: errno 206 * Returns: errno
206 */ 207 */
207 208
208static int jhead_scan(struct gfs2_jdesc *jd, struct gfs2_log_header *head) 209static int jhead_scan(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
209{ 210{
210 unsigned int blk = head->lh_blkno; 211 unsigned int blk = head->lh_blkno;
211 struct gfs2_log_header lh; 212 struct gfs2_log_header_host lh;
212 int error; 213 int error;
213 214
214 for (;;) { 215 for (;;) {
@@ -245,9 +246,9 @@ static int jhead_scan(struct gfs2_jdesc *jd, struct gfs2_log_header *head)
245 * Returns: errno 246 * Returns: errno
246 */ 247 */
247 248
248int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header *head) 249int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
249{ 250{
250 struct gfs2_log_header lh_1, lh_m; 251 struct gfs2_log_header_host lh_1, lh_m;
251 u32 blk_1, blk_2, blk_m; 252 u32 blk_1, blk_2, blk_m;
252 int error; 253 int error;
253 254
@@ -320,7 +321,7 @@ static int foreach_descriptor(struct gfs2_jdesc *jd, unsigned int start,
320 length = be32_to_cpu(ld->ld_length); 321 length = be32_to_cpu(ld->ld_length);
321 322
322 if (be32_to_cpu(ld->ld_header.mh_type) == GFS2_METATYPE_LH) { 323 if (be32_to_cpu(ld->ld_header.mh_type) == GFS2_METATYPE_LH) {
323 struct gfs2_log_header lh; 324 struct gfs2_log_header_host lh;
324 error = get_log_header(jd, start, &lh); 325 error = get_log_header(jd, start, &lh);
325 if (!error) { 326 if (!error) {
326 gfs2_replay_incr_blk(sdp, &start); 327 gfs2_replay_incr_blk(sdp, &start);
@@ -363,7 +364,7 @@ static int foreach_descriptor(struct gfs2_jdesc *jd, unsigned int start,
363 * Returns: errno 364 * Returns: errno
364 */ 365 */
365 366
366static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header *head) 367static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
367{ 368{
368 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 369 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
369 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 370 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
@@ -425,7 +426,7 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd)
425{ 426{
426 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 427 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
427 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 428 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
428 struct gfs2_log_header head; 429 struct gfs2_log_header_host head;
429 struct gfs2_holder j_gh, ji_gh, t_gh; 430 struct gfs2_holder j_gh, ji_gh, t_gh;
430 unsigned long t; 431 unsigned long t;
431 int ro = 0; 432 int ro = 0;
diff --git a/fs/gfs2/recovery.h b/fs/gfs2/recovery.h
index 961feedf4d8b..f7235e61c723 100644
--- a/fs/gfs2/recovery.h
+++ b/fs/gfs2/recovery.h
@@ -26,7 +26,7 @@ int gfs2_revoke_check(struct gfs2_sbd *sdp, u64 blkno, unsigned int where);
26void gfs2_revoke_clean(struct gfs2_sbd *sdp); 26void gfs2_revoke_clean(struct gfs2_sbd *sdp);
27 27
28int gfs2_find_jhead(struct gfs2_jdesc *jd, 28int gfs2_find_jhead(struct gfs2_jdesc *jd,
29 struct gfs2_log_header *head); 29 struct gfs2_log_header_host *head);
30int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd); 30int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd);
31void gfs2_check_journals(struct gfs2_sbd *sdp); 31void gfs2_check_journals(struct gfs2_sbd *sdp);
32 32
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index b261385c0065..ff0846528d54 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -253,7 +253,7 @@ void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
253 253
254} 254}
255 255
256static inline int rgrp_contains_block(struct gfs2_rindex *ri, u64 block) 256static inline int rgrp_contains_block(struct gfs2_rindex_host *ri, u64 block)
257{ 257{
258 u64 first = ri->ri_data0; 258 u64 first = ri->ri_data0;
259 u64 last = first + ri->ri_data; 259 u64 last = first + ri->ri_data;
@@ -1217,7 +1217,7 @@ u64 gfs2_alloc_data(struct gfs2_inode *ip)
1217 al->al_alloced++; 1217 al->al_alloced++;
1218 1218
1219 gfs2_statfs_change(sdp, 0, -1, 0); 1219 gfs2_statfs_change(sdp, 0, -1, 0);
1220 gfs2_quota_change(ip, +1, ip->i_di.di_uid, ip->i_di.di_gid); 1220 gfs2_quota_change(ip, +1, ip->i_inode.i_uid, ip->i_inode.i_gid);
1221 1221
1222 spin_lock(&sdp->sd_rindex_spin); 1222 spin_lock(&sdp->sd_rindex_spin);
1223 rgd->rd_free_clone--; 1223 rgd->rd_free_clone--;
@@ -1261,7 +1261,7 @@ u64 gfs2_alloc_meta(struct gfs2_inode *ip)
1261 al->al_alloced++; 1261 al->al_alloced++;
1262 1262
1263 gfs2_statfs_change(sdp, 0, -1, 0); 1263 gfs2_statfs_change(sdp, 0, -1, 0);
1264 gfs2_quota_change(ip, +1, ip->i_di.di_uid, ip->i_di.di_gid); 1264 gfs2_quota_change(ip, +1, ip->i_inode.i_uid, ip->i_inode.i_gid);
1265 gfs2_trans_add_unrevoke(sdp, block); 1265 gfs2_trans_add_unrevoke(sdp, block);
1266 1266
1267 spin_lock(&sdp->sd_rindex_spin); 1267 spin_lock(&sdp->sd_rindex_spin);
@@ -1337,8 +1337,7 @@ void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen)
1337 gfs2_trans_add_rg(rgd); 1337 gfs2_trans_add_rg(rgd);
1338 1338
1339 gfs2_statfs_change(sdp, 0, +blen, 0); 1339 gfs2_statfs_change(sdp, 0, +blen, 0);
1340 gfs2_quota_change(ip, -(s64)blen, 1340 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
1341 ip->i_di.di_uid, ip->i_di.di_gid);
1342} 1341}
1343 1342
1344/** 1343/**
@@ -1366,7 +1365,7 @@ void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
1366 gfs2_trans_add_rg(rgd); 1365 gfs2_trans_add_rg(rgd);
1367 1366
1368 gfs2_statfs_change(sdp, 0, +blen, 0); 1367 gfs2_statfs_change(sdp, 0, +blen, 0);
1369 gfs2_quota_change(ip, -(s64)blen, ip->i_di.di_uid, ip->i_di.di_gid); 1368 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
1370 gfs2_meta_wipe(ip, bstart, blen); 1369 gfs2_meta_wipe(ip, bstart, blen);
1371} 1370}
1372 1371
@@ -1411,7 +1410,7 @@ static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
1411void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip) 1410void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
1412{ 1411{
1413 gfs2_free_uninit_di(rgd, ip->i_num.no_addr); 1412 gfs2_free_uninit_di(rgd, ip->i_num.no_addr);
1414 gfs2_quota_change(ip, -1, ip->i_di.di_uid, ip->i_di.di_gid); 1413 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
1415 gfs2_meta_wipe(ip, ip->i_num.no_addr, 1); 1414 gfs2_meta_wipe(ip, ip->i_num.no_addr, 1);
1416} 1415}
1417 1416
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 6a78b1b32e25..43a24f2e5905 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -97,7 +97,7 @@ void gfs2_tune_init(struct gfs2_tune *gt)
97 * changed. 97 * changed.
98 */ 98 */
99 99
100int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb *sb, int silent) 100int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int silent)
101{ 101{
102 unsigned int x; 102 unsigned int x;
103 103
@@ -180,6 +180,24 @@ static int end_bio_io_page(struct bio *bio, unsigned int bytes_done, int error)
180 return 0; 180 return 0;
181} 181}
182 182
183/**
184 * gfs2_read_super - Read the gfs2 super block from disk
185 * @sb: The VFS super block
186 * @sector: The location of the super block
187 *
188 * This uses the bio functions to read the super block from disk
189 * because we want to be 100% sure that we never read cached data.
190 * A super block is read twice only during each GFS2 mount and is
191 * never written to by the filesystem. The first time its read no
192 * locks are held, and the only details which are looked at are those
193 * relating to the locking protocol. Once locking is up and working,
194 * the sb is read again under the lock to establish the location of
195 * the master directory (contains pointers to journals etc) and the
196 * root directory.
197 *
198 * Returns: A page containing the sb or NULL
199 */
200
183struct page *gfs2_read_super(struct super_block *sb, sector_t sector) 201struct page *gfs2_read_super(struct super_block *sb, sector_t sector)
184{ 202{
185 struct page *page; 203 struct page *page;
@@ -199,7 +217,7 @@ struct page *gfs2_read_super(struct super_block *sb, sector_t sector)
199 return NULL; 217 return NULL;
200 } 218 }
201 219
202 bio->bi_sector = sector; 220 bio->bi_sector = sector * (sb->s_blocksize >> 9);
203 bio->bi_bdev = sb->s_bdev; 221 bio->bi_bdev = sb->s_bdev;
204 bio_add_page(bio, page, PAGE_SIZE, 0); 222 bio_add_page(bio, page, PAGE_SIZE, 0);
205 223
@@ -508,7 +526,7 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
508 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); 526 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
509 struct gfs2_glock *j_gl = ip->i_gl; 527 struct gfs2_glock *j_gl = ip->i_gl;
510 struct gfs2_holder t_gh; 528 struct gfs2_holder t_gh;
511 struct gfs2_log_header head; 529 struct gfs2_log_header_host head;
512 int error; 530 int error;
513 531
514 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, 532 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED,
@@ -517,7 +535,7 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
517 return error; 535 return error;
518 536
519 gfs2_meta_cache_flush(ip); 537 gfs2_meta_cache_flush(ip);
520 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA | DIO_DATA); 538 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
521 539
522 error = gfs2_find_jhead(sdp->sd_jdesc, &head); 540 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
523 if (error) 541 if (error)
@@ -587,9 +605,9 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
587int gfs2_statfs_init(struct gfs2_sbd *sdp) 605int gfs2_statfs_init(struct gfs2_sbd *sdp)
588{ 606{
589 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 607 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
590 struct gfs2_statfs_change *m_sc = &sdp->sd_statfs_master; 608 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
591 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 609 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
592 struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local; 610 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
593 struct buffer_head *m_bh, *l_bh; 611 struct buffer_head *m_bh, *l_bh;
594 struct gfs2_holder gh; 612 struct gfs2_holder gh;
595 int error; 613 int error;
@@ -634,7 +652,7 @@ void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
634 s64 dinodes) 652 s64 dinodes)
635{ 653{
636 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 654 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
637 struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local; 655 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
638 struct buffer_head *l_bh; 656 struct buffer_head *l_bh;
639 int error; 657 int error;
640 658
@@ -660,8 +678,8 @@ int gfs2_statfs_sync(struct gfs2_sbd *sdp)
660{ 678{
661 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 679 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
662 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 680 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
663 struct gfs2_statfs_change *m_sc = &sdp->sd_statfs_master; 681 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
664 struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local; 682 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
665 struct gfs2_holder gh; 683 struct gfs2_holder gh;
666 struct buffer_head *m_bh, *l_bh; 684 struct buffer_head *m_bh, *l_bh;
667 int error; 685 int error;
@@ -727,10 +745,10 @@ out:
727 * Returns: errno 745 * Returns: errno
728 */ 746 */
729 747
730int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc) 748int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
731{ 749{
732 struct gfs2_statfs_change *m_sc = &sdp->sd_statfs_master; 750 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
733 struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local; 751 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
734 752
735 spin_lock(&sdp->sd_statfs_spin); 753 spin_lock(&sdp->sd_statfs_spin);
736 754
@@ -760,7 +778,7 @@ int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc)
760 */ 778 */
761 779
762static int statfs_slow_fill(struct gfs2_rgrpd *rgd, 780static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
763 struct gfs2_statfs_change *sc) 781 struct gfs2_statfs_change_host *sc)
764{ 782{
765 gfs2_rgrp_verify(rgd); 783 gfs2_rgrp_verify(rgd);
766 sc->sc_total += rgd->rd_ri.ri_data; 784 sc->sc_total += rgd->rd_ri.ri_data;
@@ -782,7 +800,7 @@ static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
782 * Returns: errno 800 * Returns: errno
783 */ 801 */
784 802
785int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc) 803int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
786{ 804{
787 struct gfs2_holder ri_gh; 805 struct gfs2_holder ri_gh;
788 struct gfs2_rgrpd *rgd_next; 806 struct gfs2_rgrpd *rgd_next;
@@ -792,7 +810,7 @@ int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc)
792 int done; 810 int done;
793 int error = 0, err; 811 int error = 0, err;
794 812
795 memset(sc, 0, sizeof(struct gfs2_statfs_change)); 813 memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
796 gha = kcalloc(slots, sizeof(struct gfs2_holder), GFP_KERNEL); 814 gha = kcalloc(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
797 if (!gha) 815 if (!gha)
798 return -ENOMEM; 816 return -ENOMEM;
@@ -873,7 +891,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp,
873 struct gfs2_jdesc *jd; 891 struct gfs2_jdesc *jd;
874 struct lfcc *lfcc; 892 struct lfcc *lfcc;
875 LIST_HEAD(list); 893 LIST_HEAD(list);
876 struct gfs2_log_header lh; 894 struct gfs2_log_header_host lh;
877 int error; 895 int error;
878 896
879 error = gfs2_jindex_hold(sdp, &ji_gh); 897 error = gfs2_jindex_hold(sdp, &ji_gh);
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index 5bb443ae0f59..e590b2df11dc 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -14,7 +14,7 @@
14 14
15void gfs2_tune_init(struct gfs2_tune *gt); 15void gfs2_tune_init(struct gfs2_tune *gt);
16 16
17int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb *sb, int silent); 17int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int silent);
18int gfs2_read_sb(struct gfs2_sbd *sdp, struct gfs2_glock *gl, int silent); 18int gfs2_read_sb(struct gfs2_sbd *sdp, struct gfs2_glock *gl, int silent);
19struct page *gfs2_read_super(struct super_block *sb, sector_t sector); 19struct page *gfs2_read_super(struct super_block *sb, sector_t sector);
20 20
@@ -45,8 +45,8 @@ int gfs2_statfs_init(struct gfs2_sbd *sdp);
45void gfs2_statfs_change(struct gfs2_sbd *sdp, 45void gfs2_statfs_change(struct gfs2_sbd *sdp,
46 s64 total, s64 free, s64 dinodes); 46 s64 total, s64 free, s64 dinodes);
47int gfs2_statfs_sync(struct gfs2_sbd *sdp); 47int gfs2_statfs_sync(struct gfs2_sbd *sdp);
48int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc); 48int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc);
49int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc); 49int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc);
50 50
51int gfs2_freeze_fs(struct gfs2_sbd *sdp); 51int gfs2_freeze_fs(struct gfs2_sbd *sdp);
52void gfs2_unfreeze_fs(struct gfs2_sbd *sdp); 52void gfs2_unfreeze_fs(struct gfs2_sbd *sdp);
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 0e0ec988f731..983eaf1e06be 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -426,9 +426,6 @@ static ssize_t name##_store(struct gfs2_sbd *sdp, const char *buf, size_t len)\
426} \ 426} \
427TUNE_ATTR_2(name, name##_store) 427TUNE_ATTR_2(name, name##_store)
428 428
429TUNE_ATTR(ilimit, 0);
430TUNE_ATTR(ilimit_tries, 0);
431TUNE_ATTR(ilimit_min, 0);
432TUNE_ATTR(demote_secs, 0); 429TUNE_ATTR(demote_secs, 0);
433TUNE_ATTR(incore_log_blocks, 0); 430TUNE_ATTR(incore_log_blocks, 0);
434TUNE_ATTR(log_flush_secs, 0); 431TUNE_ATTR(log_flush_secs, 0);
@@ -447,7 +444,6 @@ TUNE_ATTR(quota_simul_sync, 1);
447TUNE_ATTR(quota_cache_secs, 1); 444TUNE_ATTR(quota_cache_secs, 1);
448TUNE_ATTR(max_atomic_write, 1); 445TUNE_ATTR(max_atomic_write, 1);
449TUNE_ATTR(stall_secs, 1); 446TUNE_ATTR(stall_secs, 1);
450TUNE_ATTR(entries_per_readdir, 1);
451TUNE_ATTR(greedy_default, 1); 447TUNE_ATTR(greedy_default, 1);
452TUNE_ATTR(greedy_quantum, 1); 448TUNE_ATTR(greedy_quantum, 1);
453TUNE_ATTR(greedy_max, 1); 449TUNE_ATTR(greedy_max, 1);
@@ -459,9 +455,6 @@ TUNE_ATTR_DAEMON(quotad_secs, quotad_process);
459TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store); 455TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
460 456
461static struct attribute *tune_attrs[] = { 457static struct attribute *tune_attrs[] = {
462 &tune_attr_ilimit.attr,
463 &tune_attr_ilimit_tries.attr,
464 &tune_attr_ilimit_min.attr,
465 &tune_attr_demote_secs.attr, 458 &tune_attr_demote_secs.attr,
466 &tune_attr_incore_log_blocks.attr, 459 &tune_attr_incore_log_blocks.attr,
467 &tune_attr_log_flush_secs.attr, 460 &tune_attr_log_flush_secs.attr,
@@ -478,7 +471,6 @@ static struct attribute *tune_attrs[] = {
478 &tune_attr_quota_cache_secs.attr, 471 &tune_attr_quota_cache_secs.attr,
479 &tune_attr_max_atomic_write.attr, 472 &tune_attr_max_atomic_write.attr,
480 &tune_attr_stall_secs.attr, 473 &tune_attr_stall_secs.attr,
481 &tune_attr_entries_per_readdir.attr,
482 &tune_attr_greedy_default.attr, 474 &tune_attr_greedy_default.attr,
483 &tune_attr_greedy_quantum.attr, 475 &tune_attr_greedy_quantum.attr,
484 &tune_attr_greedy_max.attr, 476 &tune_attr_greedy_max.attr,
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index 7984dcf89ad0..28938a46cf47 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -83,8 +83,7 @@ static inline int gfs2_meta_check_i(struct gfs2_sbd *sdp,
83 char *file, unsigned int line) 83 char *file, unsigned int line)
84{ 84{
85 struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data; 85 struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
86 u32 magic = mh->mh_magic; 86 u32 magic = be32_to_cpu(mh->mh_magic);
87 magic = be32_to_cpu(magic);
88 if (unlikely(magic != GFS2_MAGIC)) 87 if (unlikely(magic != GFS2_MAGIC))
89 return gfs2_meta_check_ii(sdp, bh, "magic number", function, 88 return gfs2_meta_check_ii(sdp, bh, "magic number", function,
90 file, line); 89 file, line);
@@ -107,9 +106,8 @@ static inline int gfs2_metatype_check_i(struct gfs2_sbd *sdp,
107 char *file, unsigned int line) 106 char *file, unsigned int line)
108{ 107{
109 struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data; 108 struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
110 u32 magic = mh->mh_magic; 109 u32 magic = be32_to_cpu(mh->mh_magic);
111 u16 t = be32_to_cpu(mh->mh_type); 110 u16 t = be32_to_cpu(mh->mh_type);
112 magic = be32_to_cpu(magic);
113 if (unlikely(magic != GFS2_MAGIC)) 111 if (unlikely(magic != GFS2_MAGIC))
114 return gfs2_meta_check_ii(sdp, bh, "magic number", function, 112 return gfs2_meta_check_ii(sdp, bh, "magic number", function,
115 file, line); 113 file, line);
diff --git a/include/linux/gfs2_ondisk.h b/include/linux/gfs2_ondisk.h
index a7ae7c177cac..8b7e4c1e32ae 100644
--- a/include/linux/gfs2_ondisk.h
+++ b/include/linux/gfs2_ondisk.h
@@ -54,8 +54,13 @@ struct gfs2_inum {
54 __be64 no_addr; 54 __be64 no_addr;
55}; 55};
56 56
57static inline int gfs2_inum_equal(const struct gfs2_inum *ino1, 57struct gfs2_inum_host {
58 const struct gfs2_inum *ino2) 58 __u64 no_formal_ino;
59 __u64 no_addr;
60};
61
62static inline int gfs2_inum_equal(const struct gfs2_inum_host *ino1,
63 const struct gfs2_inum_host *ino2)
59{ 64{
60 return ino1->no_formal_ino == ino2->no_formal_ino && 65 return ino1->no_formal_ino == ino2->no_formal_ino &&
61 ino1->no_addr == ino2->no_addr; 66 ino1->no_addr == ino2->no_addr;
@@ -89,6 +94,12 @@ struct gfs2_meta_header {
89 __be32 __pad1; /* Was incarnation number in gfs1 */ 94 __be32 __pad1; /* Was incarnation number in gfs1 */
90}; 95};
91 96
97struct gfs2_meta_header_host {
98 __u32 mh_magic;
99 __u32 mh_type;
100 __u32 mh_format;
101};
102
92/* 103/*
93 * super-block structure 104 * super-block structure
94 * 105 *
@@ -128,6 +139,23 @@ struct gfs2_sb {
128 /* In gfs1, quota and license dinodes followed */ 139 /* In gfs1, quota and license dinodes followed */
129}; 140};
130 141
142struct gfs2_sb_host {
143 struct gfs2_meta_header_host sb_header;
144
145 __u32 sb_fs_format;
146 __u32 sb_multihost_format;
147
148 __u32 sb_bsize;
149 __u32 sb_bsize_shift;
150
151 struct gfs2_inum_host sb_master_dir; /* Was jindex dinode in gfs1 */
152 struct gfs2_inum_host sb_root_dir;
153
154 char sb_lockproto[GFS2_LOCKNAME_LEN];
155 char sb_locktable[GFS2_LOCKNAME_LEN];
156 /* In gfs1, quota and license dinodes followed */
157};
158
131/* 159/*
132 * resource index structure 160 * resource index structure
133 */ 161 */
@@ -145,6 +173,14 @@ struct gfs2_rindex {
145 __u8 ri_reserved[64]; 173 __u8 ri_reserved[64];
146}; 174};
147 175
176struct gfs2_rindex_host {
177 __u64 ri_addr; /* grp block disk address */
178 __u64 ri_data0; /* first data location */
179 __u32 ri_length; /* length of rgrp header in fs blocks */
180 __u32 ri_data; /* num of data blocks in rgrp */
181 __u32 ri_bitbytes; /* number of bytes in data bitmaps */
182};
183
148/* 184/*
149 * resource group header structure 185 * resource group header structure
150 */ 186 */
@@ -176,6 +212,13 @@ struct gfs2_rgrp {
176 __u8 rg_reserved[80]; /* Several fields from gfs1 now reserved */ 212 __u8 rg_reserved[80]; /* Several fields from gfs1 now reserved */
177}; 213};
178 214
215struct gfs2_rgrp_host {
216 __u32 rg_flags;
217 __u32 rg_free;
218 __u32 rg_dinodes;
219 __u64 rg_igeneration;
220};
221
179/* 222/*
180 * quota structure 223 * quota structure
181 */ 224 */
@@ -187,6 +230,12 @@ struct gfs2_quota {
187 __u8 qu_reserved[64]; 230 __u8 qu_reserved[64];
188}; 231};
189 232
233struct gfs2_quota_host {
234 __u64 qu_limit;
235 __u64 qu_warn;
236 __u64 qu_value;
237};
238
190/* 239/*
191 * dinode structure 240 * dinode structure
192 */ 241 */
@@ -270,6 +319,27 @@ struct gfs2_dinode {
270 __u8 di_reserved[56]; 319 __u8 di_reserved[56];
271}; 320};
272 321
322struct gfs2_dinode_host {
323 __u64 di_size; /* number of bytes in file */
324 __u64 di_blocks; /* number of blocks in file */
325
326 /* This section varies from gfs1. Padding added to align with
327 * remainder of dinode
328 */
329 __u64 di_goal_meta; /* rgrp to alloc from next */
330 __u64 di_goal_data; /* data block goal */
331 __u64 di_generation; /* generation number for NFS */
332
333 __u32 di_flags; /* GFS2_DIF_... */
334 __u16 di_height; /* height of metadata */
335
336 /* These only apply to directories */
337 __u16 di_depth; /* Number of bits in the table */
338 __u32 di_entries; /* The number of entries in the directory */
339
340 __u64 di_eattr; /* extended attribute block number */
341};
342
273/* 343/*
274 * directory structure - many of these per directory file 344 * directory structure - many of these per directory file
275 */ 345 */
@@ -344,6 +414,16 @@ struct gfs2_log_header {
344 __be32 lh_hash; 414 __be32 lh_hash;
345}; 415};
346 416
417struct gfs2_log_header_host {
418 struct gfs2_meta_header_host lh_header;
419
420 __u64 lh_sequence; /* Sequence number of this transaction */
421 __u32 lh_flags; /* GFS2_LOG_HEAD_... */
422 __u32 lh_tail; /* Block number of log tail */
423 __u32 lh_blkno;
424 __u32 lh_hash;
425};
426
347/* 427/*
348 * Log type descriptor 428 * Log type descriptor
349 */ 429 */
@@ -384,6 +464,11 @@ struct gfs2_inum_range {
384 __be64 ir_length; 464 __be64 ir_length;
385}; 465};
386 466
467struct gfs2_inum_range_host {
468 __u64 ir_start;
469 __u64 ir_length;
470};
471
387/* 472/*
388 * Statfs change 473 * Statfs change
389 * Describes an change to the pool of free and allocated 474 * Describes an change to the pool of free and allocated
@@ -396,6 +481,12 @@ struct gfs2_statfs_change {
396 __be64 sc_dinodes; 481 __be64 sc_dinodes;
397}; 482};
398 483
484struct gfs2_statfs_change_host {
485 __u64 sc_total;
486 __u64 sc_free;
487 __u64 sc_dinodes;
488};
489
399/* 490/*
400 * Quota change 491 * Quota change
401 * Describes an allocation change for a particular 492 * Describes an allocation change for a particular
@@ -410,33 +501,38 @@ struct gfs2_quota_change {
410 __be32 qc_id; 501 __be32 qc_id;
411}; 502};
412 503
504struct gfs2_quota_change_host {
505 __u64 qc_change;
506 __u32 qc_flags; /* GFS2_QCF_... */
507 __u32 qc_id;
508};
509
413#ifdef __KERNEL__ 510#ifdef __KERNEL__
414/* Translation functions */ 511/* Translation functions */
415 512
416extern void gfs2_inum_in(struct gfs2_inum *no, const void *buf); 513extern void gfs2_inum_in(struct gfs2_inum_host *no, const void *buf);
417extern void gfs2_inum_out(const struct gfs2_inum *no, void *buf); 514extern void gfs2_inum_out(const struct gfs2_inum_host *no, void *buf);
418extern void gfs2_sb_in(struct gfs2_sb *sb, const void *buf); 515extern void gfs2_sb_in(struct gfs2_sb_host *sb, const void *buf);
419extern void gfs2_rindex_in(struct gfs2_rindex *ri, const void *buf); 516extern void gfs2_rindex_in(struct gfs2_rindex_host *ri, const void *buf);
420extern void gfs2_rindex_out(const struct gfs2_rindex *ri, void *buf); 517extern void gfs2_rindex_out(const struct gfs2_rindex_host *ri, void *buf);
421extern void gfs2_rgrp_in(struct gfs2_rgrp *rg, const void *buf); 518extern void gfs2_rgrp_in(struct gfs2_rgrp_host *rg, const void *buf);
422extern void gfs2_rgrp_out(const struct gfs2_rgrp *rg, void *buf); 519extern void gfs2_rgrp_out(const struct gfs2_rgrp_host *rg, void *buf);
423extern void gfs2_quota_in(struct gfs2_quota *qu, const void *buf); 520extern void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf);
424extern void gfs2_quota_out(const struct gfs2_quota *qu, void *buf); 521struct gfs2_inode;
425extern void gfs2_dinode_in(struct gfs2_dinode *di, const void *buf); 522extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf);
426extern void gfs2_dinode_out(const struct gfs2_dinode *di, void *buf);
427extern void gfs2_ea_header_in(struct gfs2_ea_header *ea, const void *buf); 523extern void gfs2_ea_header_in(struct gfs2_ea_header *ea, const void *buf);
428extern void gfs2_ea_header_out(const struct gfs2_ea_header *ea, void *buf); 524extern void gfs2_ea_header_out(const struct gfs2_ea_header *ea, void *buf);
429extern void gfs2_log_header_in(struct gfs2_log_header *lh, const void *buf); 525extern void gfs2_log_header_in(struct gfs2_log_header_host *lh, const void *buf);
430extern void gfs2_inum_range_in(struct gfs2_inum_range *ir, const void *buf); 526extern void gfs2_inum_range_in(struct gfs2_inum_range_host *ir, const void *buf);
431extern void gfs2_inum_range_out(const struct gfs2_inum_range *ir, void *buf); 527extern void gfs2_inum_range_out(const struct gfs2_inum_range_host *ir, void *buf);
432extern void gfs2_statfs_change_in(struct gfs2_statfs_change *sc, const void *buf); 528extern void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf);
433extern void gfs2_statfs_change_out(const struct gfs2_statfs_change *sc, void *buf); 529extern void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf);
434extern void gfs2_quota_change_in(struct gfs2_quota_change *qc, const void *buf); 530extern void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf);
435 531
436/* Printing functions */ 532/* Printing functions */
437 533
438extern void gfs2_rindex_print(const struct gfs2_rindex *ri); 534extern void gfs2_rindex_print(const struct gfs2_rindex_host *ri);
439extern void gfs2_dinode_print(const struct gfs2_dinode *di); 535extern void gfs2_dinode_print(const struct gfs2_inode *ip);
440 536
441#endif /* __KERNEL__ */ 537#endif /* __KERNEL__ */
442 538