aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/Kconfig13
-rw-r--r--fs/afs/Makefile7
-rw-r--r--fs/afs/afs.h146
-rw-r--r--fs/afs/afs_cm.h32
-rw-r--r--fs/afs/afs_fs.h48
-rw-r--r--fs/afs/afs_vl.h (renamed from fs/afs/vlclient.h)49
-rw-r--r--fs/afs/cache.c256
-rw-r--r--fs/afs/cache.h12
-rw-r--r--fs/afs/callback.c509
-rw-r--r--fs/afs/cell.c471
-rw-r--r--fs/afs/cell.h78
-rw-r--r--fs/afs/cmservice.c926
-rw-r--r--fs/afs/cmservice.h29
-rw-r--r--fs/afs/dir.c852
-rw-r--r--fs/afs/errors.h34
-rw-r--r--fs/afs/file.c124
-rw-r--r--fs/afs/fsclient.c1528
-rw-r--r--fs/afs/fsclient.h54
-rw-r--r--fs/afs/inode.c248
-rw-r--r--fs/afs/internal.h755
-rw-r--r--fs/afs/kafsasyncd.c255
-rw-r--r--fs/afs/kafsasyncd.h52
-rw-r--r--fs/afs/kafstimod.c205
-rw-r--r--fs/afs/kafstimod.h49
-rw-r--r--fs/afs/main.c262
-rw-r--r--fs/afs/misc.c38
-rw-r--r--fs/afs/mntpt.c141
-rw-r--r--fs/afs/mount.h23
-rw-r--r--fs/afs/proc.c230
-rw-r--r--fs/afs/rxrpc.c782
-rw-r--r--fs/afs/security.c356
-rw-r--r--fs/afs/server.c647
-rw-r--r--fs/afs/server.h102
-rw-r--r--fs/afs/super.c326
-rw-r--r--fs/afs/super.h45
-rw-r--r--fs/afs/transport.h21
-rw-r--r--fs/afs/types.h125
-rw-r--r--fs/afs/use-rtnetlink.c473
-rw-r--r--fs/afs/vlclient.c737
-rw-r--r--fs/afs/vlocation.c1225
-rw-r--r--fs/afs/vnode.c731
-rw-r--r--fs/afs/vnode.h94
-rw-r--r--fs/afs/volume.c290
-rw-r--r--fs/afs/volume.h140
-rw-r--r--fs/compat_ioctl.c18
-rw-r--r--fs/ecryptfs/netlink.c6
-rw-r--r--fs/jffs2/fs.c12
-rw-r--r--fs/jffs2/os-linux.h6
-rw-r--r--fs/jffs2/wbuf.c24
-rw-r--r--fs/ocfs2/alloc.c3037
-rw-r--r--fs/ocfs2/alloc.h27
-rw-r--r--fs/ocfs2/aops.c1011
-rw-r--r--fs/ocfs2/aops.h77
-rw-r--r--fs/ocfs2/cluster/quorum.c5
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h5
-rw-r--r--fs/ocfs2/dir.c15
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c5
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c2
-rw-r--r--fs/ocfs2/dlmglue.c143
-rw-r--r--fs/ocfs2/dlmglue.h3
-rw-r--r--fs/ocfs2/extent_map.c1233
-rw-r--r--fs/ocfs2/extent_map.h39
-rw-r--r--fs/ocfs2/file.c637
-rw-r--r--fs/ocfs2/file.h5
-rw-r--r--fs/ocfs2/inode.c199
-rw-r--r--fs/ocfs2/inode.h23
-rw-r--r--fs/ocfs2/journal.c24
-rw-r--r--fs/ocfs2/journal.h2
-rw-r--r--fs/ocfs2/mmap.c7
-rw-r--r--fs/ocfs2/namei.c23
-rw-r--r--fs/ocfs2/ocfs2.h55
-rw-r--r--fs/ocfs2/ocfs2_fs.h31
-rw-r--r--fs/ocfs2/ocfs2_lockid.h5
-rw-r--r--fs/ocfs2/slot_map.c2
-rw-r--r--fs/ocfs2/suballoc.c3
-rw-r--r--fs/ocfs2/super.c7
-rw-r--r--fs/ocfs2/vote.c289
-rw-r--r--fs/ocfs2/vote.h3
-rw-r--r--fs/sync.c8
79 files changed, 12190 insertions, 8321 deletions
diff --git a/fs/Kconfig b/fs/Kconfig
index 3c4886b849f5..e33c08924572 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -2019,7 +2019,7 @@ config CODA_FS_OLD_API
2019config AFS_FS 2019config AFS_FS
2020 tristate "Andrew File System support (AFS) (EXPERIMENTAL)" 2020 tristate "Andrew File System support (AFS) (EXPERIMENTAL)"
2021 depends on INET && EXPERIMENTAL 2021 depends on INET && EXPERIMENTAL
2022 select RXRPC 2022 select AF_RXRPC
2023 help 2023 help
2024 If you say Y here, you will get an experimental Andrew File System 2024 If you say Y here, you will get an experimental Andrew File System
2025 driver. It currently only supports unsecured read-only AFS access. 2025 driver. It currently only supports unsecured read-only AFS access.
@@ -2028,8 +2028,15 @@ config AFS_FS
2028 2028
2029 If unsure, say N. 2029 If unsure, say N.
2030 2030
2031config RXRPC 2031config AFS_DEBUG
2032 tristate 2032 bool "AFS dynamic debugging"
2033 depends on AFS_FS
2034 help
2035 Say Y here to make runtime controllable debugging messages appear.
2036
2037 See <file:Documentation/filesystems/afs.txt> for more information.
2038
2039 If unsure, say N.
2033 2040
2034config 9P_FS 2041config 9P_FS
2035 tristate "Plan 9 Resource Sharing Support (9P2000) (Experimental)" 2042 tristate "Plan 9 Resource Sharing Support (9P2000) (Experimental)"
diff --git a/fs/afs/Makefile b/fs/afs/Makefile
index 4029c9da4b86..01545eb1d872 100644
--- a/fs/afs/Makefile
+++ b/fs/afs/Makefile
@@ -2,8 +2,6 @@
2# Makefile for Red Hat Linux AFS client. 2# Makefile for Red Hat Linux AFS client.
3# 3#
4 4
5#CFLAGS += -finstrument-functions
6
7kafs-objs := \ 5kafs-objs := \
8 callback.o \ 6 callback.o \
9 cell.o \ 7 cell.o \
@@ -12,14 +10,15 @@ kafs-objs := \
12 file.o \ 10 file.o \
13 fsclient.o \ 11 fsclient.o \
14 inode.o \ 12 inode.o \
15 kafsasyncd.o \
16 kafstimod.o \
17 main.o \ 13 main.o \
18 misc.o \ 14 misc.o \
19 mntpt.o \ 15 mntpt.o \
20 proc.o \ 16 proc.o \
17 rxrpc.o \
18 security.o \
21 server.o \ 19 server.o \
22 super.o \ 20 super.o \
21 use-rtnetlink.o \
23 vlclient.o \ 22 vlclient.o \
24 vlocation.o \ 23 vlocation.o \
25 vnode.o \ 24 vnode.o \
diff --git a/fs/afs/afs.h b/fs/afs/afs.h
new file mode 100644
index 000000000000..52d0752265b8
--- /dev/null
+++ b/fs/afs/afs.h
@@ -0,0 +1,146 @@
1/* AFS common types
2 *
3 * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef AFS_H
13#define AFS_H
14
15#include <linux/in.h>
16
17#define AFS_MAXCELLNAME 64 /* maximum length of a cell name */
18#define AFS_MAXVOLNAME 64 /* maximum length of a volume name */
19
20typedef unsigned afs_volid_t;
21typedef unsigned afs_vnodeid_t;
22typedef unsigned long long afs_dataversion_t;
23
24typedef enum {
25 AFSVL_RWVOL, /* read/write volume */
26 AFSVL_ROVOL, /* read-only volume */
27 AFSVL_BACKVOL, /* backup volume */
28} __attribute__((packed)) afs_voltype_t;
29
30typedef enum {
31 AFS_FTYPE_INVALID = 0,
32 AFS_FTYPE_FILE = 1,
33 AFS_FTYPE_DIR = 2,
34 AFS_FTYPE_SYMLINK = 3,
35} afs_file_type_t;
36
37/*
38 * AFS file identifier
39 */
40struct afs_fid {
41 afs_volid_t vid; /* volume ID */
42 afs_vnodeid_t vnode; /* file index within volume */
43 unsigned unique; /* unique ID number (file index version) */
44};
45
46/*
47 * AFS callback notification
48 */
49typedef enum {
50 AFSCM_CB_UNTYPED = 0, /* no type set on CB break */
51 AFSCM_CB_EXCLUSIVE = 1, /* CB exclusive to CM [not implemented] */
52 AFSCM_CB_SHARED = 2, /* CB shared by other CM's */
53 AFSCM_CB_DROPPED = 3, /* CB promise cancelled by file server */
54} afs_callback_type_t;
55
56struct afs_callback {
57 struct afs_fid fid; /* file identifier */
58 unsigned version; /* callback version */
59 unsigned expiry; /* time at which expires */
60 afs_callback_type_t type; /* type of callback */
61};
62
63#define AFSCBMAX 50 /* maximum callbacks transferred per bulk op */
64
65/*
66 * AFS volume information
67 */
68struct afs_volume_info {
69 afs_volid_t vid; /* volume ID */
70 afs_voltype_t type; /* type of this volume */
71 afs_volid_t type_vids[5]; /* volume ID's for possible types for this vol */
72
73 /* list of fileservers serving this volume */
74 size_t nservers; /* number of entries used in servers[] */
75 struct {
76 struct in_addr addr; /* fileserver address */
77 } servers[8];
78};
79
80/*
81 * AFS security ACE access mask
82 */
83typedef u32 afs_access_t;
84#define AFS_ACE_READ 0x00000001U /* - permission to read a file/dir */
85#define AFS_ACE_WRITE 0x00000002U /* - permission to write/chmod a file */
86#define AFS_ACE_INSERT 0x00000004U /* - permission to create dirent in a dir */
87#define AFS_ACE_LOOKUP 0x00000008U /* - permission to lookup a file/dir in a dir */
88#define AFS_ACE_DELETE 0x00000010U /* - permission to delete a dirent from a dir */
89#define AFS_ACE_LOCK 0x00000020U /* - permission to lock a file */
90#define AFS_ACE_ADMINISTER 0x00000040U /* - permission to change ACL */
91#define AFS_ACE_USER_A 0x01000000U /* - 'A' user-defined permission */
92#define AFS_ACE_USER_B 0x02000000U /* - 'B' user-defined permission */
93#define AFS_ACE_USER_C 0x04000000U /* - 'C' user-defined permission */
94#define AFS_ACE_USER_D 0x08000000U /* - 'D' user-defined permission */
95#define AFS_ACE_USER_E 0x10000000U /* - 'E' user-defined permission */
96#define AFS_ACE_USER_F 0x20000000U /* - 'F' user-defined permission */
97#define AFS_ACE_USER_G 0x40000000U /* - 'G' user-defined permission */
98#define AFS_ACE_USER_H 0x80000000U /* - 'H' user-defined permission */
99
100/*
101 * AFS file status information
102 */
103struct afs_file_status {
104 unsigned if_version; /* interface version */
105#define AFS_FSTATUS_VERSION 1
106
107 afs_file_type_t type; /* file type */
108 unsigned nlink; /* link count */
109 u64 size; /* file size */
110 afs_dataversion_t data_version; /* current data version */
111 u32 author; /* author ID */
112 u32 owner; /* owner ID */
113 u32 group; /* group ID */
114 afs_access_t caller_access; /* access rights for authenticated caller */
115 afs_access_t anon_access; /* access rights for unauthenticated caller */
116 umode_t mode; /* UNIX mode */
117 struct afs_fid parent; /* parent dir ID for non-dirs only */
118 time_t mtime_client; /* last time client changed data */
119 time_t mtime_server; /* last time server changed data */
120};
121
122/*
123 * AFS file status change request
124 */
125struct afs_store_status {
126 u32 mask; /* which bits of the struct are set */
127 u32 mtime_client; /* last time client changed data */
128 u32 owner; /* owner ID */
129 u32 group; /* group ID */
130 umode_t mode; /* UNIX mode */
131};
132
133#define AFS_SET_MTIME 0x01 /* set the mtime */
134#define AFS_SET_OWNER 0x02 /* set the owner ID */
135#define AFS_SET_GROUP 0x04 /* set the group ID (unsupported?) */
136#define AFS_SET_MODE 0x08 /* set the UNIX mode */
137#define AFS_SET_SEG_SIZE 0x10 /* set the segment size (unsupported) */
138
139/*
140 * AFS volume synchronisation information
141 */
142struct afs_volsync {
143 time_t creation; /* volume creation time */
144};
145
146#endif /* AFS_H */
diff --git a/fs/afs/afs_cm.h b/fs/afs/afs_cm.h
new file mode 100644
index 000000000000..7b4d4fab4c80
--- /dev/null
+++ b/fs/afs/afs_cm.h
@@ -0,0 +1,32 @@
1/* AFS Cache Manager definitions
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef AFS_CM_H
13#define AFS_CM_H
14
15#define AFS_CM_PORT 7001 /* AFS file server port */
16#define CM_SERVICE 1 /* AFS File Service ID */
17
18enum AFS_CM_Operations {
19 CBCallBack = 204, /* break callback promises */
20 CBInitCallBackState = 205, /* initialise callback state */
21 CBProbe = 206, /* probe client */
22 CBGetLock = 207, /* get contents of CM lock table */
23 CBGetCE = 208, /* get cache file description */
24 CBGetXStatsVersion = 209, /* get version of extended statistics */
25 CBGetXStats = 210, /* get contents of extended statistics data */
26 CBInitCallBackState3 = 213, /* initialise callback state, version 3 */
27 CBGetCapabilities = 65538, /* get client capabilities */
28};
29
30#define AFS_CAP_ERROR_TRANSLATION 0x1
31
32#endif /* AFS_FS_H */
diff --git a/fs/afs/afs_fs.h b/fs/afs/afs_fs.h
new file mode 100644
index 000000000000..89e0d1650a72
--- /dev/null
+++ b/fs/afs/afs_fs.h
@@ -0,0 +1,48 @@
1/* AFS File Service definitions
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef AFS_FS_H
13#define AFS_FS_H
14
15#define AFS_FS_PORT 7000 /* AFS file server port */
16#define FS_SERVICE 1 /* AFS File Service ID */
17
18enum AFS_FS_Operations {
19 FSFETCHDATA = 130, /* AFS Fetch file data */
20 FSFETCHSTATUS = 132, /* AFS Fetch file status */
21 FSREMOVEFILE = 136, /* AFS Remove a file */
22 FSCREATEFILE = 137, /* AFS Create a file */
23 FSRENAME = 138, /* AFS Rename or move a file or directory */
24 FSSYMLINK = 139, /* AFS Create a symbolic link */
25 FSLINK = 140, /* AFS Create a hard link */
26 FSMAKEDIR = 141, /* AFS Create a directory */
27 FSREMOVEDIR = 142, /* AFS Remove a directory */
28 FSGIVEUPCALLBACKS = 147, /* AFS Discard callback promises */
29 FSGETVOLUMEINFO = 148, /* AFS Get root volume information */
30 FSGETROOTVOLUME = 151, /* AFS Get root volume name */
31 FSLOOKUP = 161, /* AFS lookup file in directory */
32};
33
34enum AFS_FS_Errors {
35 VSALVAGE = 101, /* volume needs salvaging */
36 VNOVNODE = 102, /* no such file/dir (vnode) */
37 VNOVOL = 103, /* no such volume or volume unavailable */
38 VVOLEXISTS = 104, /* volume name already exists */
39 VNOSERVICE = 105, /* volume not currently in service */
40 VOFFLINE = 106, /* volume is currently offline (more info available [VVL-spec]) */
41 VONLINE = 107, /* volume is already online */
42 VDISKFULL = 108, /* disk partition is full */
43 VOVERQUOTA = 109, /* volume's maximum quota exceeded */
44 VBUSY = 110, /* volume is temporarily unavailable */
45 VMOVED = 111, /* volume moved to new server - ask this FS where */
46};
47
48#endif /* AFS_FS_H */
diff --git a/fs/afs/vlclient.h b/fs/afs/afs_vl.h
index e3d601179c46..8bbefe009ed4 100644
--- a/fs/afs/vlclient.h
+++ b/fs/afs/afs_vl.h
@@ -1,6 +1,6 @@
1/* vlclient.h: Volume Location Service client interface 1/* AFS Volume Location Service client interface
2 * 2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
@@ -9,10 +9,19 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#ifndef _LINUX_AFS_VLCLIENT_H 12#ifndef AFS_VL_H
13#define _LINUX_AFS_VLCLIENT_H 13#define AFS_VL_H
14 14
15#include "types.h" 15#include "afs.h"
16
17#define AFS_VL_PORT 7003 /* volume location service port */
18#define VL_SERVICE 52 /* RxRPC service ID for the Volume Location service */
19
20enum AFSVL_Operations {
21 VLGETENTRYBYID = 503, /* AFS Get Cache Entry By ID operation ID */
22 VLGETENTRYBYNAME = 504, /* AFS Get Cache Entry By Name operation ID */
23 VLPROBE = 514, /* AFS Probe Volume Location Service operation ID */
24};
16 25
17enum AFSVL_Errors { 26enum AFSVL_Errors {
18 AFSVL_IDEXIST = 363520, /* Volume Id entry exists in vl database */ 27 AFSVL_IDEXIST = 363520, /* Volume Id entry exists in vl database */
@@ -40,14 +49,16 @@ enum AFSVL_Errors {
40 AFSVL_BADVOLOPER = 363542, /* Bad volume operation code */ 49 AFSVL_BADVOLOPER = 363542, /* Bad volume operation code */
41 AFSVL_BADRELLOCKTYPE = 363543, /* Bad release lock type */ 50 AFSVL_BADRELLOCKTYPE = 363543, /* Bad release lock type */
42 AFSVL_RERELEASE = 363544, /* Status report: last release was aborted */ 51 AFSVL_RERELEASE = 363544, /* Status report: last release was aborted */
43 AFSVL_BADSERVERFLAG = 363545, /* Invalid replication site server °ag */ 52 AFSVL_BADSERVERFLAG = 363545, /* Invalid replication site server °ag */
44 AFSVL_PERM = 363546, /* No permission access */ 53 AFSVL_PERM = 363546, /* No permission access */
45 AFSVL_NOMEM = 363547, /* malloc/realloc failed to alloc enough memory */ 54 AFSVL_NOMEM = 363547, /* malloc/realloc failed to alloc enough memory */
46}; 55};
47 56
48/* maps to "struct vldbentry" in vvl-spec.pdf */ 57/*
58 * maps to "struct vldbentry" in vvl-spec.pdf
59 */
49struct afs_vldbentry { 60struct afs_vldbentry {
50 char name[65]; /* name of volume (including NUL char) */ 61 char name[65]; /* name of volume (with NUL char) */
51 afs_voltype_t type; /* volume type */ 62 afs_voltype_t type; /* volume type */
52 unsigned num_servers; /* num servers that hold instances of this vol */ 63 unsigned num_servers; /* num servers that hold instances of this vol */
53 unsigned clone_id; /* cloning ID */ 64 unsigned clone_id; /* cloning ID */
@@ -68,26 +79,6 @@ struct afs_vldbentry {
68#define AFS_VLSF_RWVOL 0x0004 /* this server holds a R/W instance of the volume */ 79#define AFS_VLSF_RWVOL 0x0004 /* this server holds a R/W instance of the volume */
69#define AFS_VLSF_BACKVOL 0x0008 /* this server holds a backup instance of the volume */ 80#define AFS_VLSF_BACKVOL 0x0008 /* this server holds a backup instance of the volume */
70 } servers[8]; 81 } servers[8];
71
72}; 82};
73 83
74/* look up a volume location database entry by name */ 84#endif /* AFS_VL_H */
75extern int afs_rxvl_get_entry_by_name(struct afs_server *server,
76 const char *volname,
77 unsigned volnamesz,
78 struct afs_cache_vlocation *entry);
79
80/* look up a volume location database entry by ID */
81extern int afs_rxvl_get_entry_by_id(struct afs_server *server,
82 afs_volid_t volid,
83 afs_voltype_t voltype,
84 struct afs_cache_vlocation *entry);
85
86extern int afs_rxvl_get_entry_by_id_async(struct afs_async_op *op,
87 afs_volid_t volid,
88 afs_voltype_t voltype);
89
90extern int afs_rxvl_get_entry_by_id_async2(struct afs_async_op *op,
91 struct afs_cache_vlocation *entry);
92
93#endif /* _LINUX_AFS_VLCLIENT_H */
diff --git a/fs/afs/cache.c b/fs/afs/cache.c
new file mode 100644
index 000000000000..de0d7de69edc
--- /dev/null
+++ b/fs/afs/cache.c
@@ -0,0 +1,256 @@
1/* AFS caching stuff
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifdef AFS_CACHING_SUPPORT
13static cachefs_match_val_t afs_cell_cache_match(void *target,
14 const void *entry);
15static void afs_cell_cache_update(void *source, void *entry);
16
17struct cachefs_index_def afs_cache_cell_index_def = {
18 .name = "cell_ix",
19 .data_size = sizeof(struct afs_cache_cell),
20 .keys[0] = { CACHEFS_INDEX_KEYS_ASCIIZ, 64 },
21 .match = afs_cell_cache_match,
22 .update = afs_cell_cache_update,
23};
24#endif
25
26/*
27 * match a cell record obtained from the cache
28 */
29#ifdef AFS_CACHING_SUPPORT
30static cachefs_match_val_t afs_cell_cache_match(void *target,
31 const void *entry)
32{
33 const struct afs_cache_cell *ccell = entry;
34 struct afs_cell *cell = target;
35
36 _enter("{%s},{%s}", ccell->name, cell->name);
37
38 if (strncmp(ccell->name, cell->name, sizeof(ccell->name)) == 0) {
39 _leave(" = SUCCESS");
40 return CACHEFS_MATCH_SUCCESS;
41 }
42
43 _leave(" = FAILED");
44 return CACHEFS_MATCH_FAILED;
45}
46#endif
47
48/*
49 * update a cell record in the cache
50 */
51#ifdef AFS_CACHING_SUPPORT
52static void afs_cell_cache_update(void *source, void *entry)
53{
54 struct afs_cache_cell *ccell = entry;
55 struct afs_cell *cell = source;
56
57 _enter("%p,%p", source, entry);
58
59 strncpy(ccell->name, cell->name, sizeof(ccell->name));
60
61 memcpy(ccell->vl_servers,
62 cell->vl_addrs,
63 min(sizeof(ccell->vl_servers), sizeof(cell->vl_addrs)));
64
65}
66#endif
67
68#ifdef AFS_CACHING_SUPPORT
69static cachefs_match_val_t afs_vlocation_cache_match(void *target,
70 const void *entry);
71static void afs_vlocation_cache_update(void *source, void *entry);
72
73struct cachefs_index_def afs_vlocation_cache_index_def = {
74 .name = "vldb",
75 .data_size = sizeof(struct afs_cache_vlocation),
76 .keys[0] = { CACHEFS_INDEX_KEYS_ASCIIZ, 64 },
77 .match = afs_vlocation_cache_match,
78 .update = afs_vlocation_cache_update,
79};
80#endif
81
82/*
83 * match a VLDB record stored in the cache
84 * - may also load target from entry
85 */
86#ifdef AFS_CACHING_SUPPORT
87static cachefs_match_val_t afs_vlocation_cache_match(void *target,
88 const void *entry)
89{
90 const struct afs_cache_vlocation *vldb = entry;
91 struct afs_vlocation *vlocation = target;
92
93 _enter("{%s},{%s}", vlocation->vldb.name, vldb->name);
94
95 if (strncmp(vlocation->vldb.name, vldb->name, sizeof(vldb->name)) == 0
96 ) {
97 if (!vlocation->valid ||
98 vlocation->vldb.rtime == vldb->rtime
99 ) {
100 vlocation->vldb = *vldb;
101 vlocation->valid = 1;
102 _leave(" = SUCCESS [c->m]");
103 return CACHEFS_MATCH_SUCCESS;
104 } else if (memcmp(&vlocation->vldb, vldb, sizeof(*vldb)) != 0) {
105 /* delete if VIDs for this name differ */
106 if (memcmp(&vlocation->vldb.vid,
107 &vldb->vid,
108 sizeof(vldb->vid)) != 0) {
109 _leave(" = DELETE");
110 return CACHEFS_MATCH_SUCCESS_DELETE;
111 }
112
113 _leave(" = UPDATE");
114 return CACHEFS_MATCH_SUCCESS_UPDATE;
115 } else {
116 _leave(" = SUCCESS");
117 return CACHEFS_MATCH_SUCCESS;
118 }
119 }
120
121 _leave(" = FAILED");
122 return CACHEFS_MATCH_FAILED;
123}
124#endif
125
126/*
127 * update a VLDB record stored in the cache
128 */
129#ifdef AFS_CACHING_SUPPORT
130static void afs_vlocation_cache_update(void *source, void *entry)
131{
132 struct afs_cache_vlocation *vldb = entry;
133 struct afs_vlocation *vlocation = source;
134
135 _enter("");
136
137 *vldb = vlocation->vldb;
138}
139#endif
140
141#ifdef AFS_CACHING_SUPPORT
142static cachefs_match_val_t afs_volume_cache_match(void *target,
143 const void *entry);
144static void afs_volume_cache_update(void *source, void *entry);
145
146struct cachefs_index_def afs_volume_cache_index_def = {
147 .name = "volume",
148 .data_size = sizeof(struct afs_cache_vhash),
149 .keys[0] = { CACHEFS_INDEX_KEYS_BIN, 1 },
150 .keys[1] = { CACHEFS_INDEX_KEYS_BIN, 1 },
151 .match = afs_volume_cache_match,
152 .update = afs_volume_cache_update,
153};
154#endif
155
156/*
157 * match a volume hash record stored in the cache
158 */
159#ifdef AFS_CACHING_SUPPORT
160static cachefs_match_val_t afs_volume_cache_match(void *target,
161 const void *entry)
162{
163 const struct afs_cache_vhash *vhash = entry;
164 struct afs_volume *volume = target;
165
166 _enter("{%u},{%u}", volume->type, vhash->vtype);
167
168 if (volume->type == vhash->vtype) {
169 _leave(" = SUCCESS");
170 return CACHEFS_MATCH_SUCCESS;
171 }
172
173 _leave(" = FAILED");
174 return CACHEFS_MATCH_FAILED;
175}
176#endif
177
178/*
179 * update a volume hash record stored in the cache
180 */
181#ifdef AFS_CACHING_SUPPORT
182static void afs_volume_cache_update(void *source, void *entry)
183{
184 struct afs_cache_vhash *vhash = entry;
185 struct afs_volume *volume = source;
186
187 _enter("");
188
189 vhash->vtype = volume->type;
190}
191#endif
192
193#ifdef AFS_CACHING_SUPPORT
194static cachefs_match_val_t afs_vnode_cache_match(void *target,
195 const void *entry);
196static void afs_vnode_cache_update(void *source, void *entry);
197
198struct cachefs_index_def afs_vnode_cache_index_def = {
199 .name = "vnode",
200 .data_size = sizeof(struct afs_cache_vnode),
201 .keys[0] = { CACHEFS_INDEX_KEYS_BIN, 4 },
202 .match = afs_vnode_cache_match,
203 .update = afs_vnode_cache_update,
204};
205#endif
206
207/*
208 * match a vnode record stored in the cache
209 */
210#ifdef AFS_CACHING_SUPPORT
211static cachefs_match_val_t afs_vnode_cache_match(void *target,
212 const void *entry)
213{
214 const struct afs_cache_vnode *cvnode = entry;
215 struct afs_vnode *vnode = target;
216
217 _enter("{%x,%x,%Lx},{%x,%x,%Lx}",
218 vnode->fid.vnode,
219 vnode->fid.unique,
220 vnode->status.version,
221 cvnode->vnode_id,
222 cvnode->vnode_unique,
223 cvnode->data_version);
224
225 if (vnode->fid.vnode != cvnode->vnode_id) {
226 _leave(" = FAILED");
227 return CACHEFS_MATCH_FAILED;
228 }
229
230 if (vnode->fid.unique != cvnode->vnode_unique ||
231 vnode->status.version != cvnode->data_version) {
232 _leave(" = DELETE");
233 return CACHEFS_MATCH_SUCCESS_DELETE;
234 }
235
236 _leave(" = SUCCESS");
237 return CACHEFS_MATCH_SUCCESS;
238}
239#endif
240
241/*
242 * update a vnode record stored in the cache
243 */
244#ifdef AFS_CACHING_SUPPORT
245static void afs_vnode_cache_update(void *source, void *entry)
246{
247 struct afs_cache_vnode *cvnode = entry;
248 struct afs_vnode *vnode = source;
249
250 _enter("");
251
252 cvnode->vnode_id = vnode->fid.vnode;
253 cvnode->vnode_unique = vnode->fid.unique;
254 cvnode->data_version = vnode->status.version;
255}
256#endif
diff --git a/fs/afs/cache.h b/fs/afs/cache.h
index 9eb7722b34d5..36a3642cf90e 100644
--- a/fs/afs/cache.h
+++ b/fs/afs/cache.h
@@ -1,4 +1,4 @@
1/* cache.h: AFS local cache management interface 1/* AFS local cache management interface
2 * 2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
@@ -9,8 +9,8 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#ifndef _LINUX_AFS_CACHE_H 12#ifndef AFS_CACHE_H
13#define _LINUX_AFS_CACHE_H 13#define AFS_CACHE_H
14 14
15#undef AFS_CACHING_SUPPORT 15#undef AFS_CACHING_SUPPORT
16 16
@@ -20,8 +20,4 @@
20#endif 20#endif
21#include "types.h" 21#include "types.h"
22 22
23#ifdef __KERNEL__ 23#endif /* AFS_CACHE_H */
24
25#endif /* __KERNEL__ */
26
27#endif /* _LINUX_AFS_CACHE_H */
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index 9cb206e9d4be..639399f0ab6f 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2002 Red Hat, Inc. All rights reserved. 2 * Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved.
3 * 3 *
4 * This software may be freely redistributed under the terms of the 4 * This software may be freely redistributed under the terms of the
5 * GNU General Public License. 5 * GNU General Public License.
@@ -16,85 +16,187 @@
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include "server.h" 19#include <linux/circ_buf.h>
20#include "vnode.h"
21#include "internal.h" 20#include "internal.h"
22#include "cmservice.h"
23 21
24/*****************************************************************************/ 22unsigned afs_vnode_update_timeout = 10;
23
24#define afs_breakring_space(server) \
25 CIRC_SPACE((server)->cb_break_head, (server)->cb_break_tail, \
26 ARRAY_SIZE((server)->cb_break))
27
28//static void afs_callback_updater(struct work_struct *);
29
30static struct workqueue_struct *afs_callback_update_worker;
31
25/* 32/*
26 * allow the fileserver to request callback state (re-)initialisation 33 * allow the fileserver to request callback state (re-)initialisation
27 */ 34 */
28int SRXAFSCM_InitCallBackState(struct afs_server *server) 35void afs_init_callback_state(struct afs_server *server)
29{ 36{
30 struct list_head callbacks; 37 struct afs_vnode *vnode;
31 38
32 _enter("%p", server); 39 _enter("{%p}", server);
33 40
34 INIT_LIST_HEAD(&callbacks);
35
36 /* transfer the callback list from the server to a temp holding area */
37 spin_lock(&server->cb_lock); 41 spin_lock(&server->cb_lock);
38 42
39 list_add(&callbacks, &server->cb_promises); 43 /* kill all the promises on record from this server */
40 list_del_init(&server->cb_promises); 44 while (!RB_EMPTY_ROOT(&server->cb_promises)) {
45 vnode = rb_entry(server->cb_promises.rb_node,
46 struct afs_vnode, cb_promise);
47 _debug("UNPROMISE { vid=%x vn=%u uq=%u}",
48 vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
49 rb_erase(&vnode->cb_promise, &server->cb_promises);
50 vnode->cb_promised = false;
51 }
41 52
42 /* munch our way through the list, grabbing the inode, dropping all the 53 spin_unlock(&server->cb_lock);
43 * locks and regetting them in the right order 54 _leave("");
44 */ 55}
45 while (!list_empty(&callbacks)) {
46 struct afs_vnode *vnode;
47 struct inode *inode;
48 56
49 vnode = list_entry(callbacks.next, struct afs_vnode, cb_link); 57/*
50 list_del_init(&vnode->cb_link); 58 * handle the data invalidation side of a callback being broken
59 */
60void afs_broken_callback_work(struct work_struct *work)
61{
62 struct afs_vnode *vnode =
63 container_of(work, struct afs_vnode, cb_broken_work);
51 64
52 /* try and grab the inode - may fail */ 65 _enter("");
53 inode = igrab(AFS_VNODE_TO_I(vnode));
54 if (inode) {
55 int release = 0;
56 66
57 spin_unlock(&server->cb_lock); 67 if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
58 spin_lock(&vnode->lock); 68 return;
59 69
60 if (vnode->cb_server == server) { 70 /* we're only interested in dealing with a broken callback on *this*
61 vnode->cb_server = NULL; 71 * vnode and only if no-one else has dealt with it yet */
62 afs_kafstimod_del_timer(&vnode->cb_timeout); 72 if (!mutex_trylock(&vnode->validate_lock))
63 spin_lock(&afs_cb_hash_lock); 73 return; /* someone else is dealing with it */
64 list_del_init(&vnode->cb_hash_link);
65 spin_unlock(&afs_cb_hash_lock);
66 release = 1;
67 }
68 74
69 spin_unlock(&vnode->lock); 75 if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) {
76 if (S_ISDIR(vnode->vfs_inode.i_mode))
77 afs_clear_permits(vnode);
70 78
71 iput(inode); 79 if (afs_vnode_fetch_status(vnode, NULL, NULL) < 0)
72 afs_put_server(server); 80 goto out;
73 81
74 spin_lock(&server->cb_lock); 82 if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
83 goto out;
84
85 /* if the vnode's data version number changed then its contents
86 * are different */
87 if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
88 _debug("zap data {%x:%u}",
89 vnode->fid.vid, vnode->fid.vnode);
90 invalidate_remote_inode(&vnode->vfs_inode);
75 } 91 }
76 } 92 }
77 93
78 spin_unlock(&server->cb_lock); 94out:
95 mutex_unlock(&vnode->validate_lock);
79 96
80 _leave(" = 0"); 97 /* avoid the potential race whereby the mutex_trylock() in this
81 return 0; 98 * function happens again between the clear_bit() and the
82} /* end SRXAFSCM_InitCallBackState() */ 99 * mutex_unlock() */
100 if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) {
101 _debug("requeue");
102 queue_work(afs_callback_update_worker, &vnode->cb_broken_work);
103 }
104 _leave("");
105}
106
107/*
108 * actually break a callback
109 */
110static void afs_break_callback(struct afs_server *server,
111 struct afs_vnode *vnode)
112{
113 _enter("");
114
115 set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
116
117 if (vnode->cb_promised) {
118 spin_lock(&vnode->lock);
119
120 _debug("break callback");
121
122 spin_lock(&server->cb_lock);
123 if (vnode->cb_promised) {
124 rb_erase(&vnode->cb_promise, &server->cb_promises);
125 vnode->cb_promised = false;
126 }
127 spin_unlock(&server->cb_lock);
128
129 queue_work(afs_callback_update_worker, &vnode->cb_broken_work);
130 spin_unlock(&vnode->lock);
131 }
132}
133
134/*
135 * allow the fileserver to explicitly break one callback
136 * - happens when
137 * - the backing file is changed
138 * - a lock is released
139 */
140static void afs_break_one_callback(struct afs_server *server,
141 struct afs_fid *fid)
142{
143 struct afs_vnode *vnode;
144 struct rb_node *p;
145
146 _debug("find");
147 spin_lock(&server->fs_lock);
148 p = server->fs_vnodes.rb_node;
149 while (p) {
150 vnode = rb_entry(p, struct afs_vnode, server_rb);
151 if (fid->vid < vnode->fid.vid)
152 p = p->rb_left;
153 else if (fid->vid > vnode->fid.vid)
154 p = p->rb_right;
155 else if (fid->vnode < vnode->fid.vnode)
156 p = p->rb_left;
157 else if (fid->vnode > vnode->fid.vnode)
158 p = p->rb_right;
159 else if (fid->unique < vnode->fid.unique)
160 p = p->rb_left;
161 else if (fid->unique > vnode->fid.unique)
162 p = p->rb_right;
163 else
164 goto found;
165 }
166
167 /* not found so we just ignore it (it may have moved to another
168 * server) */
169not_available:
170 _debug("not avail");
171 spin_unlock(&server->fs_lock);
172 _leave("");
173 return;
174
175found:
176 _debug("found");
177 ASSERTCMP(server, ==, vnode->server);
178
179 if (!igrab(AFS_VNODE_TO_I(vnode)))
180 goto not_available;
181 spin_unlock(&server->fs_lock);
182
183 afs_break_callback(server, vnode);
184 iput(&vnode->vfs_inode);
185 _leave("");
186}
83 187
84/*****************************************************************************/
85/* 188/*
86 * allow the fileserver to break callback promises 189 * allow the fileserver to break callback promises
87 */ 190 */
88int SRXAFSCM_CallBack(struct afs_server *server, size_t count, 191void afs_break_callbacks(struct afs_server *server, size_t count,
89 struct afs_callback callbacks[]) 192 struct afs_callback callbacks[])
90{ 193{
91 _enter("%p,%u,", server, count); 194 _enter("%p,%zu,", server, count);
92 195
93 for (; count > 0; callbacks++, count--) { 196 ASSERT(server != NULL);
94 struct afs_vnode *vnode = NULL; 197 ASSERTCMP(count, <=, AFSCBMAX);
95 struct inode *inode = NULL;
96 int valid = 0;
97 198
199 for (; count > 0; callbacks++, count--) {
98 _debug("- Fid { vl=%08x n=%u u=%u } CB { v=%u x=%u t=%u }", 200 _debug("- Fid { vl=%08x n=%u u=%u } CB { v=%u x=%u t=%u }",
99 callbacks->fid.vid, 201 callbacks->fid.vid,
100 callbacks->fid.vnode, 202 callbacks->fid.vnode,
@@ -103,67 +205,270 @@ int SRXAFSCM_CallBack(struct afs_server *server, size_t count,
103 callbacks->expiry, 205 callbacks->expiry,
104 callbacks->type 206 callbacks->type
105 ); 207 );
208 afs_break_one_callback(server, &callbacks->fid);
209 }
106 210
107 /* find the inode for this fid */ 211 _leave("");
108 spin_lock(&afs_cb_hash_lock); 212 return;
213}
109 214
110 list_for_each_entry(vnode, 215/*
111 &afs_cb_hash(server, &callbacks->fid), 216 * record the callback for breaking
112 cb_hash_link) { 217 * - the caller must hold server->cb_lock
113 if (memcmp(&vnode->fid, &callbacks->fid, 218 */
114 sizeof(struct afs_fid)) != 0) 219static void afs_do_give_up_callback(struct afs_server *server,
115 continue; 220 struct afs_vnode *vnode)
221{
222 struct afs_callback *cb;
116 223
117 /* right vnode, but is it same server? */ 224 _enter("%p,%p", server, vnode);
118 if (vnode->cb_server != server)
119 break; /* no */
120 225
121 /* try and nail the inode down */ 226 cb = &server->cb_break[server->cb_break_head];
122 inode = igrab(AFS_VNODE_TO_I(vnode)); 227 cb->fid = vnode->fid;
123 break; 228 cb->version = vnode->cb_version;
229 cb->expiry = vnode->cb_expiry;
230 cb->type = vnode->cb_type;
231 smp_wmb();
232 server->cb_break_head =
233 (server->cb_break_head + 1) &
234 (ARRAY_SIZE(server->cb_break) - 1);
235
236 /* defer the breaking of callbacks to try and collect as many as
237 * possible to ship in one operation */
238 switch (atomic_inc_return(&server->cb_break_n)) {
239 case 1 ... AFSCBMAX - 1:
240 queue_delayed_work(afs_callback_update_worker,
241 &server->cb_break_work, HZ * 2);
242 break;
243 case AFSCBMAX:
244 afs_flush_callback_breaks(server);
245 break;
246 default:
247 break;
248 }
249
250 ASSERT(server->cb_promises.rb_node != NULL);
251 rb_erase(&vnode->cb_promise, &server->cb_promises);
252 vnode->cb_promised = false;
253 _leave("");
254}
255
256/*
257 * discard the callback on a deleted item
258 */
259void afs_discard_callback_on_delete(struct afs_vnode *vnode)
260{
261 struct afs_server *server = vnode->server;
262
263 _enter("%d", vnode->cb_promised);
264
265 if (!vnode->cb_promised) {
266 _leave(" [not promised]");
267 return;
268 }
269
270 ASSERT(server != NULL);
271
272 spin_lock(&server->cb_lock);
273 if (vnode->cb_promised) {
274 ASSERT(server->cb_promises.rb_node != NULL);
275 rb_erase(&vnode->cb_promise, &server->cb_promises);
276 vnode->cb_promised = false;
277 }
278 spin_unlock(&server->cb_lock);
279 _leave("");
280}
281
282/*
283 * give up the callback registered for a vnode on the file server when the
284 * inode is being cleared
285 */
286void afs_give_up_callback(struct afs_vnode *vnode)
287{
288 struct afs_server *server = vnode->server;
289
290 DECLARE_WAITQUEUE(myself, current);
291
292 _enter("%d", vnode->cb_promised);
293
294 _debug("GIVE UP INODE %p", &vnode->vfs_inode);
295
296 if (!vnode->cb_promised) {
297 _leave(" [not promised]");
298 return;
299 }
300
301 ASSERT(server != NULL);
302
303 spin_lock(&server->cb_lock);
304 if (vnode->cb_promised && afs_breakring_space(server) == 0) {
305 add_wait_queue(&server->cb_break_waitq, &myself);
306 for (;;) {
307 set_current_state(TASK_UNINTERRUPTIBLE);
308 if (!vnode->cb_promised ||
309 afs_breakring_space(server) != 0)
310 break;
311 spin_unlock(&server->cb_lock);
312 schedule();
313 spin_lock(&server->cb_lock);
124 } 314 }
315 remove_wait_queue(&server->cb_break_waitq, &myself);
316 __set_current_state(TASK_RUNNING);
317 }
318
319 /* of course, it's always possible for the server to break this vnode's
320 * callback first... */
321 if (vnode->cb_promised)
322 afs_do_give_up_callback(server, vnode);
323
324 spin_unlock(&server->cb_lock);
325 _leave("");
326}
327
328/*
329 * dispatch a deferred give up callbacks operation
330 */
331void afs_dispatch_give_up_callbacks(struct work_struct *work)
332{
333 struct afs_server *server =
334 container_of(work, struct afs_server, cb_break_work.work);
335
336 _enter("");
337
338 /* tell the fileserver to discard the callback promises it has
339 * - in the event of ENOMEM or some other error, we just forget that we
340 * had callbacks entirely, and the server will call us later to break
341 * them
342 */
343 afs_fs_give_up_callbacks(server, &afs_async_call);
344}
345
346/*
347 * flush the outstanding callback breaks on a server
348 */
349void afs_flush_callback_breaks(struct afs_server *server)
350{
351 cancel_delayed_work(&server->cb_break_work);
352 queue_delayed_work(afs_callback_update_worker,
353 &server->cb_break_work, 0);
354}
125 355
126 spin_unlock(&afs_cb_hash_lock); 356#if 0
127 357/*
128 if (inode) { 358 * update a bunch of callbacks
129 /* we've found the record for this vnode */ 359 */
130 spin_lock(&vnode->lock); 360static void afs_callback_updater(struct work_struct *work)
131 if (vnode->cb_server == server) { 361{
132 /* the callback _is_ on the calling server */ 362 struct afs_server *server;
133 vnode->cb_server = NULL; 363 struct afs_vnode *vnode, *xvnode;
134 valid = 1; 364 time_t now;
135 365 long timeout;
136 afs_kafstimod_del_timer(&vnode->cb_timeout); 366 int ret;
137 vnode->flags |= AFS_VNODE_CHANGED; 367
138 368 server = container_of(work, struct afs_server, updater);
139 spin_lock(&server->cb_lock); 369
140 list_del_init(&vnode->cb_link); 370 _enter("");
141 spin_unlock(&server->cb_lock); 371
142 372 now = get_seconds();
143 spin_lock(&afs_cb_hash_lock); 373
144 list_del_init(&vnode->cb_hash_link); 374 /* find the first vnode to update */
145 spin_unlock(&afs_cb_hash_lock); 375 spin_lock(&server->cb_lock);
146 } 376 for (;;) {
147 spin_unlock(&vnode->lock); 377 if (RB_EMPTY_ROOT(&server->cb_promises)) {
148 378 spin_unlock(&server->cb_lock);
149 if (valid) { 379 _leave(" [nothing]");
150 invalidate_remote_inode(inode); 380 return;
151 afs_put_server(server);
152 }
153 iput(inode);
154 } 381 }
382
383 vnode = rb_entry(rb_first(&server->cb_promises),
384 struct afs_vnode, cb_promise);
385 if (atomic_read(&vnode->usage) > 0)
386 break;
387 rb_erase(&vnode->cb_promise, &server->cb_promises);
388 vnode->cb_promised = false;
155 } 389 }
156 390
157 _leave(" = 0"); 391 timeout = vnode->update_at - now;
158 return 0; 392 if (timeout > 0) {
159} /* end SRXAFSCM_CallBack() */ 393 queue_delayed_work(afs_vnode_update_worker,
394 &afs_vnode_update, timeout * HZ);
395 spin_unlock(&server->cb_lock);
396 _leave(" [nothing]");
397 return;
398 }
399
400 list_del_init(&vnode->update);
401 atomic_inc(&vnode->usage);
402 spin_unlock(&server->cb_lock);
403
404 /* we can now perform the update */
405 _debug("update %s", vnode->vldb.name);
406 vnode->state = AFS_VL_UPDATING;
407 vnode->upd_rej_cnt = 0;
408 vnode->upd_busy_cnt = 0;
409
410 ret = afs_vnode_update_record(vl, &vldb);
411 switch (ret) {
412 case 0:
413 afs_vnode_apply_update(vl, &vldb);
414 vnode->state = AFS_VL_UPDATING;
415 break;
416 case -ENOMEDIUM:
417 vnode->state = AFS_VL_VOLUME_DELETED;
418 break;
419 default:
420 vnode->state = AFS_VL_UNCERTAIN;
421 break;
422 }
423
424 /* and then reschedule */
425 _debug("reschedule");
426 vnode->update_at = get_seconds() + afs_vnode_update_timeout;
427
428 spin_lock(&server->cb_lock);
429
430 if (!list_empty(&server->cb_promises)) {
431 /* next update in 10 minutes, but wait at least 1 second more
432 * than the newest record already queued so that we don't spam
433 * the VL server suddenly with lots of requests
434 */
435 xvnode = list_entry(server->cb_promises.prev,
436 struct afs_vnode, update);
437 if (vnode->update_at <= xvnode->update_at)
438 vnode->update_at = xvnode->update_at + 1;
439 xvnode = list_entry(server->cb_promises.next,
440 struct afs_vnode, update);
441 timeout = xvnode->update_at - now;
442 if (timeout < 0)
443 timeout = 0;
444 } else {
445 timeout = afs_vnode_update_timeout;
446 }
447
448 list_add_tail(&vnode->update, &server->cb_promises);
449
450 _debug("timeout %ld", timeout);
451 queue_delayed_work(afs_vnode_update_worker,
452 &afs_vnode_update, timeout * HZ);
453 spin_unlock(&server->cb_lock);
454 afs_put_vnode(vl);
455}
456#endif
457
458/*
459 * initialise the callback update process
460 */
461int __init afs_callback_update_init(void)
462{
463 afs_callback_update_worker =
464 create_singlethread_workqueue("kafs_callbackd");
465 return afs_callback_update_worker ? 0 : -ENOMEM;
466}
160 467
161/*****************************************************************************/
162/* 468/*
163 * allow the fileserver to see if the cache manager is still alive 469 * shut down the callback update process
164 */ 470 */
165int SRXAFSCM_Probe(struct afs_server *server) 471void __exit afs_callback_update_kill(void)
166{ 472{
167 _debug("SRXAFSCM_Probe(%p)\n", server); 473 destroy_workqueue(afs_callback_update_worker);
168 return 0; 474}
169} /* end SRXAFSCM_Probe() */
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index 1fc578372759..9b1311a1df51 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -1,4 +1,4 @@
1/* cell.c: AFS cell and server record management 1/* AFS cell and server record management
2 * 2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
@@ -11,15 +11,9 @@
11 11
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <rxrpc/peer.h> 14#include <linux/key.h>
15#include <rxrpc/connection.h> 15#include <linux/ctype.h>
16#include "volume.h" 16#include <keys/rxrpc-type.h>
17#include "cell.h"
18#include "server.h"
19#include "transport.h"
20#include "vlclient.h"
21#include "kafstimod.h"
22#include "super.h"
23#include "internal.h" 17#include "internal.h"
24 18
25DECLARE_RWSEM(afs_proc_cells_sem); 19DECLARE_RWSEM(afs_proc_cells_sem);
@@ -28,66 +22,47 @@ LIST_HEAD(afs_proc_cells);
28static struct list_head afs_cells = LIST_HEAD_INIT(afs_cells); 22static struct list_head afs_cells = LIST_HEAD_INIT(afs_cells);
29static DEFINE_RWLOCK(afs_cells_lock); 23static DEFINE_RWLOCK(afs_cells_lock);
30static DECLARE_RWSEM(afs_cells_sem); /* add/remove serialisation */ 24static DECLARE_RWSEM(afs_cells_sem); /* add/remove serialisation */
25static DECLARE_WAIT_QUEUE_HEAD(afs_cells_freeable_wq);
31static struct afs_cell *afs_cell_root; 26static struct afs_cell *afs_cell_root;
32 27
33#ifdef AFS_CACHING_SUPPORT
34static cachefs_match_val_t afs_cell_cache_match(void *target,
35 const void *entry);
36static void afs_cell_cache_update(void *source, void *entry);
37
38struct cachefs_index_def afs_cache_cell_index_def = {
39 .name = "cell_ix",
40 .data_size = sizeof(struct afs_cache_cell),
41 .keys[0] = { CACHEFS_INDEX_KEYS_ASCIIZ, 64 },
42 .match = afs_cell_cache_match,
43 .update = afs_cell_cache_update,
44};
45#endif
46
47/*****************************************************************************/
48/* 28/*
49 * create a cell record 29 * allocate a cell record and fill in its name, VL server address list and
50 * - "name" is the name of the cell 30 * allocate an anonymous key
51 * - "vllist" is a colon separated list of IP addresses in "a.b.c.d" format
52 */ 31 */
53int afs_cell_create(const char *name, char *vllist, struct afs_cell **_cell) 32static struct afs_cell *afs_cell_alloc(const char *name, char *vllist)
54{ 33{
55 struct afs_cell *cell; 34 struct afs_cell *cell;
56 char *next; 35 size_t namelen;
36 char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp, *next;
57 int ret; 37 int ret;
58 38
59 _enter("%s", name); 39 _enter("%s,%s", name, vllist);
60 40
61 BUG_ON(!name); /* TODO: want to look up "this cell" in the cache */ 41 BUG_ON(!name); /* TODO: want to look up "this cell" in the cache */
62 42
43 namelen = strlen(name);
44 if (namelen > AFS_MAXCELLNAME)
45 return ERR_PTR(-ENAMETOOLONG);
46
63 /* allocate and initialise a cell record */ 47 /* allocate and initialise a cell record */
64 cell = kmalloc(sizeof(struct afs_cell) + strlen(name) + 1, GFP_KERNEL); 48 cell = kzalloc(sizeof(struct afs_cell) + namelen + 1, GFP_KERNEL);
65 if (!cell) { 49 if (!cell) {
66 _leave(" = -ENOMEM"); 50 _leave(" = -ENOMEM");
67 return -ENOMEM; 51 return ERR_PTR(-ENOMEM);
68 } 52 }
69 53
70 down_write(&afs_cells_sem); 54 memcpy(cell->name, name, namelen);
71 55 cell->name[namelen] = 0;
72 memset(cell, 0, sizeof(struct afs_cell));
73 atomic_set(&cell->usage, 0);
74 56
57 atomic_set(&cell->usage, 1);
75 INIT_LIST_HEAD(&cell->link); 58 INIT_LIST_HEAD(&cell->link);
76 59 rwlock_init(&cell->servers_lock);
77 rwlock_init(&cell->sv_lock); 60 INIT_LIST_HEAD(&cell->servers);
78 INIT_LIST_HEAD(&cell->sv_list);
79 INIT_LIST_HEAD(&cell->sv_graveyard);
80 spin_lock_init(&cell->sv_gylock);
81
82 init_rwsem(&cell->vl_sem); 61 init_rwsem(&cell->vl_sem);
83 INIT_LIST_HEAD(&cell->vl_list); 62 INIT_LIST_HEAD(&cell->vl_list);
84 INIT_LIST_HEAD(&cell->vl_graveyard); 63 spin_lock_init(&cell->vl_lock);
85 spin_lock_init(&cell->vl_gylock);
86
87 strcpy(cell->name,name);
88 64
89 /* fill in the VL server list from the rest of the string */ 65 /* fill in the VL server list from the rest of the string */
90 ret = -EINVAL;
91 do { 66 do {
92 unsigned a, b, c, d; 67 unsigned a, b, c, d;
93 68
@@ -96,20 +71,75 @@ int afs_cell_create(const char *name, char *vllist, struct afs_cell **_cell)
96 *next++ = 0; 71 *next++ = 0;
97 72
98 if (sscanf(vllist, "%u.%u.%u.%u", &a, &b, &c, &d) != 4) 73 if (sscanf(vllist, "%u.%u.%u.%u", &a, &b, &c, &d) != 4)
99 goto badaddr; 74 goto bad_address;
100 75
101 if (a > 255 || b > 255 || c > 255 || d > 255) 76 if (a > 255 || b > 255 || c > 255 || d > 255)
102 goto badaddr; 77 goto bad_address;
103 78
104 cell->vl_addrs[cell->vl_naddrs++].s_addr = 79 cell->vl_addrs[cell->vl_naddrs++].s_addr =
105 htonl((a << 24) | (b << 16) | (c << 8) | d); 80 htonl((a << 24) | (b << 16) | (c << 8) | d);
106 81
107 if (cell->vl_naddrs >= AFS_CELL_MAX_ADDRS) 82 } while (cell->vl_naddrs < AFS_CELL_MAX_ADDRS && (vllist = next));
108 break; 83
84 /* create a key to represent an anonymous user */
85 memcpy(keyname, "afs@", 4);
86 dp = keyname + 4;
87 cp = cell->name;
88 do {
89 *dp++ = toupper(*cp);
90 } while (*cp++);
91 cell->anonymous_key = key_alloc(&key_type_rxrpc, keyname, 0, 0, current,
92 KEY_POS_SEARCH, KEY_ALLOC_NOT_IN_QUOTA);
93 if (IS_ERR(cell->anonymous_key)) {
94 _debug("no key");
95 ret = PTR_ERR(cell->anonymous_key);
96 goto error;
97 }
98
99 ret = key_instantiate_and_link(cell->anonymous_key, NULL, 0,
100 NULL, NULL);
101 if (ret < 0) {
102 _debug("instantiate failed");
103 goto error;
104 }
105
106 _debug("anon key %p{%x}",
107 cell->anonymous_key, key_serial(cell->anonymous_key));
108
109 _leave(" = %p", cell);
110 return cell;
111
112bad_address:
113 printk(KERN_ERR "kAFS: bad VL server IP address\n");
114 ret = -EINVAL;
115error:
116 key_put(cell->anonymous_key);
117 kfree(cell);
118 _leave(" = %d", ret);
119 return ERR_PTR(ret);
120}
121
122/*
123 * create a cell record
124 * - "name" is the name of the cell
125 * - "vllist" is a colon separated list of IP addresses in "a.b.c.d" format
126 */
127struct afs_cell *afs_cell_create(const char *name, char *vllist)
128{
129 struct afs_cell *cell;
130 int ret;
131
132 _enter("%s,%s", name, vllist);
109 133
110 } while(vllist = next, vllist); 134 cell = afs_cell_alloc(name, vllist);
135 if (IS_ERR(cell)) {
136 _leave(" = %ld", PTR_ERR(cell));
137 return cell;
138 }
139
140 down_write(&afs_cells_sem);
111 141
112 /* add a proc dir for this cell */ 142 /* add a proc directory for this cell */
113 ret = afs_proc_cell_setup(cell); 143 ret = afs_proc_cell_setup(cell);
114 if (ret < 0) 144 if (ret < 0)
115 goto error; 145 goto error;
@@ -130,31 +160,28 @@ int afs_cell_create(const char *name, char *vllist, struct afs_cell **_cell)
130 down_write(&afs_proc_cells_sem); 160 down_write(&afs_proc_cells_sem);
131 list_add_tail(&cell->proc_link, &afs_proc_cells); 161 list_add_tail(&cell->proc_link, &afs_proc_cells);
132 up_write(&afs_proc_cells_sem); 162 up_write(&afs_proc_cells_sem);
133
134 *_cell = cell;
135 up_write(&afs_cells_sem); 163 up_write(&afs_cells_sem);
136 164
137 _leave(" = 0 (%p)", cell); 165 _leave(" = %p", cell);
138 return 0; 166 return cell;
139 167
140 badaddr: 168error:
141 printk(KERN_ERR "kAFS: bad VL server IP address: '%s'\n", vllist);
142 error:
143 up_write(&afs_cells_sem); 169 up_write(&afs_cells_sem);
170 key_put(cell->anonymous_key);
144 kfree(cell); 171 kfree(cell);
145 _leave(" = %d", ret); 172 _leave(" = %d", ret);
146 return ret; 173 return ERR_PTR(ret);
147} /* end afs_cell_create() */ 174}
148 175
149/*****************************************************************************/
150/* 176/*
151 * initialise the cell database from module parameters 177 * set the root cell information
178 * - can be called with a module parameter string
179 * - can be called from a write to /proc/fs/afs/rootcell
152 */ 180 */
153int afs_cell_init(char *rootcell) 181int afs_cell_init(char *rootcell)
154{ 182{
155 struct afs_cell *old_root, *new_root; 183 struct afs_cell *old_root, *new_root;
156 char *cp; 184 char *cp;
157 int ret;
158 185
159 _enter(""); 186 _enter("");
160 187
@@ -162,82 +189,60 @@ int afs_cell_init(char *rootcell)
162 /* module is loaded with no parameters, or built statically. 189 /* module is loaded with no parameters, or built statically.
163 * - in the future we might initialize cell DB here. 190 * - in the future we might initialize cell DB here.
164 */ 191 */
165 _leave(" = 0 (but no root)"); 192 _leave(" = 0 [no root]");
166 return 0; 193 return 0;
167 } 194 }
168 195
169 cp = strchr(rootcell, ':'); 196 cp = strchr(rootcell, ':');
170 if (!cp) { 197 if (!cp) {
171 printk(KERN_ERR "kAFS: no VL server IP addresses specified\n"); 198 printk(KERN_ERR "kAFS: no VL server IP addresses specified\n");
172 _leave(" = %d (no colon)", -EINVAL); 199 _leave(" = -EINVAL");
173 return -EINVAL; 200 return -EINVAL;
174 } 201 }
175 202
176 /* allocate a cell record for the root cell */ 203 /* allocate a cell record for the root cell */
177 *cp++ = 0; 204 *cp++ = 0;
178 ret = afs_cell_create(rootcell, cp, &new_root); 205 new_root = afs_cell_create(rootcell, cp);
179 if (ret < 0) { 206 if (IS_ERR(new_root)) {
180 _leave(" = %d", ret); 207 _leave(" = %ld", PTR_ERR(new_root));
181 return ret; 208 return PTR_ERR(new_root);
182 } 209 }
183 210
184 /* as afs_put_cell() takes locks by itself, we have to do 211 /* install the new cell */
185 * a little gymnastics to be race-free.
186 */
187 afs_get_cell(new_root);
188
189 write_lock(&afs_cells_lock); 212 write_lock(&afs_cells_lock);
190 while (afs_cell_root) { 213 old_root = afs_cell_root;
191 old_root = afs_cell_root;
192 afs_cell_root = NULL;
193 write_unlock(&afs_cells_lock);
194 afs_put_cell(old_root);
195 write_lock(&afs_cells_lock);
196 }
197 afs_cell_root = new_root; 214 afs_cell_root = new_root;
198 write_unlock(&afs_cells_lock); 215 write_unlock(&afs_cells_lock);
216 afs_put_cell(old_root);
199 217
200 _leave(" = %d", ret); 218 _leave(" = 0");
201 return ret; 219 return 0;
202 220}
203} /* end afs_cell_init() */
204 221
205/*****************************************************************************/
206/* 222/*
207 * lookup a cell record 223 * lookup a cell record
208 */ 224 */
209int afs_cell_lookup(const char *name, unsigned namesz, struct afs_cell **_cell) 225struct afs_cell *afs_cell_lookup(const char *name, unsigned namesz)
210{ 226{
211 struct afs_cell *cell; 227 struct afs_cell *cell;
212 int ret;
213 228
214 _enter("\"%*.*s\",", namesz, namesz, name ? name : ""); 229 _enter("\"%*.*s\",", namesz, namesz, name ? name : "");
215 230
216 *_cell = NULL; 231 down_read(&afs_cells_sem);
232 read_lock(&afs_cells_lock);
217 233
218 if (name) { 234 if (name) {
219 /* if the cell was named, look for it in the cell record list */ 235 /* if the cell was named, look for it in the cell record list */
220 ret = -ENOENT;
221 cell = NULL;
222 read_lock(&afs_cells_lock);
223
224 list_for_each_entry(cell, &afs_cells, link) { 236 list_for_each_entry(cell, &afs_cells, link) {
225 if (strncmp(cell->name, name, namesz) == 0) { 237 if (strncmp(cell->name, name, namesz) == 0) {
226 afs_get_cell(cell); 238 afs_get_cell(cell);
227 goto found; 239 goto found;
228 } 240 }
229 } 241 }
230 cell = NULL; 242 cell = ERR_PTR(-ENOENT);
231 found: 243 found:
232 244 ;
233 read_unlock(&afs_cells_lock); 245 } else {
234
235 if (cell)
236 ret = 0;
237 }
238 else {
239 read_lock(&afs_cells_lock);
240
241 cell = afs_cell_root; 246 cell = afs_cell_root;
242 if (!cell) { 247 if (!cell) {
243 /* this should not happen unless user tries to mount 248 /* this should not happen unless user tries to mount
@@ -246,44 +251,35 @@ int afs_cell_lookup(const char *name, unsigned namesz, struct afs_cell **_cell)
246 * ENOENT might be "more appropriate" but they happen 251 * ENOENT might be "more appropriate" but they happen
247 * for other reasons. 252 * for other reasons.
248 */ 253 */
249 ret = -EDESTADDRREQ; 254 cell = ERR_PTR(-EDESTADDRREQ);
250 } 255 } else {
251 else {
252 afs_get_cell(cell); 256 afs_get_cell(cell);
253 ret = 0;
254 } 257 }
255 258
256 read_unlock(&afs_cells_lock);
257 } 259 }
258 260
259 *_cell = cell; 261 read_unlock(&afs_cells_lock);
260 _leave(" = %d (%p)", ret, cell); 262 up_read(&afs_cells_sem);
261 return ret; 263 _leave(" = %p", cell);
262 264 return cell;
263} /* end afs_cell_lookup() */ 265}
264 266
265/*****************************************************************************/
266/* 267/*
267 * try and get a cell record 268 * try and get a cell record
268 */ 269 */
269struct afs_cell *afs_get_cell_maybe(struct afs_cell **_cell) 270struct afs_cell *afs_get_cell_maybe(struct afs_cell *cell)
270{ 271{
271 struct afs_cell *cell;
272
273 write_lock(&afs_cells_lock); 272 write_lock(&afs_cells_lock);
274 273
275 cell = *_cell;
276 if (cell && !list_empty(&cell->link)) 274 if (cell && !list_empty(&cell->link))
277 afs_get_cell(cell); 275 afs_get_cell(cell);
278 else 276 else
279 cell = NULL; 277 cell = NULL;
280 278
281 write_unlock(&afs_cells_lock); 279 write_unlock(&afs_cells_lock);
282
283 return cell; 280 return cell;
284} /* end afs_get_cell_maybe() */ 281}
285 282
286/*****************************************************************************/
287/* 283/*
288 * destroy a cell record 284 * destroy a cell record
289 */ 285 */
@@ -294,8 +290,7 @@ void afs_put_cell(struct afs_cell *cell)
294 290
295 _enter("%p{%d,%s}", cell, atomic_read(&cell->usage), cell->name); 291 _enter("%p{%d,%s}", cell, atomic_read(&cell->usage), cell->name);
296 292
297 /* sanity check */ 293 ASSERTCMP(atomic_read(&cell->usage), >, 0);
298 BUG_ON(atomic_read(&cell->usage) <= 0);
299 294
300 /* to prevent a race, the decrement and the dequeue must be effectively 295 /* to prevent a race, the decrement and the dequeue must be effectively
301 * atomic */ 296 * atomic */
@@ -307,36 +302,49 @@ void afs_put_cell(struct afs_cell *cell)
307 return; 302 return;
308 } 303 }
309 304
305 ASSERT(list_empty(&cell->servers));
306 ASSERT(list_empty(&cell->vl_list));
307
310 write_unlock(&afs_cells_lock); 308 write_unlock(&afs_cells_lock);
311 309
312 BUG_ON(!list_empty(&cell->sv_list)); 310 wake_up(&afs_cells_freeable_wq);
313 BUG_ON(!list_empty(&cell->sv_graveyard));
314 BUG_ON(!list_empty(&cell->vl_list));
315 BUG_ON(!list_empty(&cell->vl_graveyard));
316 311
317 _leave(" [unused]"); 312 _leave(" [unused]");
318} /* end afs_put_cell() */ 313}
319 314
320/*****************************************************************************/
321/* 315/*
322 * destroy a cell record 316 * destroy a cell record
317 * - must be called with the afs_cells_sem write-locked
318 * - cell->link should have been broken by the caller
323 */ 319 */
324static void afs_cell_destroy(struct afs_cell *cell) 320static void afs_cell_destroy(struct afs_cell *cell)
325{ 321{
326 _enter("%p{%d,%s}", cell, atomic_read(&cell->usage), cell->name); 322 _enter("%p{%d,%s}", cell, atomic_read(&cell->usage), cell->name);
327 323
328 /* to prevent a race, the decrement and the dequeue must be effectively 324 ASSERTCMP(atomic_read(&cell->usage), >=, 0);
329 * atomic */ 325 ASSERT(list_empty(&cell->link));
330 write_lock(&afs_cells_lock);
331 326
332 /* sanity check */ 327 /* wait for everyone to stop using the cell */
333 BUG_ON(atomic_read(&cell->usage) != 0); 328 if (atomic_read(&cell->usage) > 0) {
329 DECLARE_WAITQUEUE(myself, current);
334 330
335 list_del_init(&cell->link); 331 _debug("wait for cell %s", cell->name);
332 set_current_state(TASK_UNINTERRUPTIBLE);
333 add_wait_queue(&afs_cells_freeable_wq, &myself);
336 334
337 write_unlock(&afs_cells_lock); 335 while (atomic_read(&cell->usage) > 0) {
336 schedule();
337 set_current_state(TASK_UNINTERRUPTIBLE);
338 }
338 339
339 down_write(&afs_cells_sem); 340 remove_wait_queue(&afs_cells_freeable_wq, &myself);
341 set_current_state(TASK_RUNNING);
342 }
343
344 _debug("cell dead");
345 ASSERTCMP(atomic_read(&cell->usage), ==, 0);
346 ASSERT(list_empty(&cell->servers));
347 ASSERT(list_empty(&cell->vl_list));
340 348
341 afs_proc_cell_remove(cell); 349 afs_proc_cell_remove(cell);
342 350
@@ -348,104 +356,26 @@ static void afs_cell_destroy(struct afs_cell *cell)
348 cachefs_relinquish_cookie(cell->cache, 0); 356 cachefs_relinquish_cookie(cell->cache, 0);
349#endif 357#endif
350 358
351 up_write(&afs_cells_sem); 359 key_put(cell->anonymous_key);
352
353 BUG_ON(!list_empty(&cell->sv_list));
354 BUG_ON(!list_empty(&cell->sv_graveyard));
355 BUG_ON(!list_empty(&cell->vl_list));
356 BUG_ON(!list_empty(&cell->vl_graveyard));
357
358 /* finish cleaning up the cell */
359 kfree(cell); 360 kfree(cell);
360 361
361 _leave(" [destroyed]"); 362 _leave(" [destroyed]");
362} /* end afs_cell_destroy() */ 363}
363
364/*****************************************************************************/
365/*
366 * lookup the server record corresponding to an Rx RPC peer
367 */
368int afs_server_find_by_peer(const struct rxrpc_peer *peer,
369 struct afs_server **_server)
370{
371 struct afs_server *server;
372 struct afs_cell *cell;
373
374 _enter("%p{a=%08x},", peer, ntohl(peer->addr.s_addr));
375
376 /* search the cell list */
377 read_lock(&afs_cells_lock);
378
379 list_for_each_entry(cell, &afs_cells, link) {
380
381 _debug("? cell %s",cell->name);
382
383 write_lock(&cell->sv_lock);
384
385 /* check the active list */
386 list_for_each_entry(server, &cell->sv_list, link) {
387 _debug("?? server %08x", ntohl(server->addr.s_addr));
388
389 if (memcmp(&server->addr, &peer->addr,
390 sizeof(struct in_addr)) == 0)
391 goto found_server;
392 }
393 364
394 /* check the inactive list */
395 spin_lock(&cell->sv_gylock);
396 list_for_each_entry(server, &cell->sv_graveyard, link) {
397 _debug("?? dead server %08x",
398 ntohl(server->addr.s_addr));
399
400 if (memcmp(&server->addr, &peer->addr,
401 sizeof(struct in_addr)) == 0)
402 goto found_dead_server;
403 }
404 spin_unlock(&cell->sv_gylock);
405
406 write_unlock(&cell->sv_lock);
407 }
408 read_unlock(&afs_cells_lock);
409
410 _leave(" = -ENOENT");
411 return -ENOENT;
412
413 /* we found it in the graveyard - resurrect it */
414 found_dead_server:
415 list_move_tail(&server->link, &cell->sv_list);
416 afs_get_server(server);
417 afs_kafstimod_del_timer(&server->timeout);
418 spin_unlock(&cell->sv_gylock);
419 goto success;
420
421 /* we found it - increment its ref count and return it */
422 found_server:
423 afs_get_server(server);
424
425 success:
426 write_unlock(&cell->sv_lock);
427 read_unlock(&afs_cells_lock);
428
429 *_server = server;
430 _leave(" = 0 (s=%p c=%p)", server, cell);
431 return 0;
432
433} /* end afs_server_find_by_peer() */
434
435/*****************************************************************************/
436/* 365/*
437 * purge in-memory cell database on module unload or afs_init() failure 366 * purge in-memory cell database on module unload or afs_init() failure
438 * - the timeout daemon is stopped before calling this 367 * - the timeout daemon is stopped before calling this
439 */ 368 */
440void afs_cell_purge(void) 369void afs_cell_purge(void)
441{ 370{
442 struct afs_vlocation *vlocation;
443 struct afs_cell *cell; 371 struct afs_cell *cell;
444 372
445 _enter(""); 373 _enter("");
446 374
447 afs_put_cell(afs_cell_root); 375 afs_put_cell(afs_cell_root);
448 376
377 down_write(&afs_cells_sem);
378
449 while (!list_empty(&afs_cells)) { 379 while (!list_empty(&afs_cells)) {
450 cell = NULL; 380 cell = NULL;
451 381
@@ -464,104 +394,11 @@ void afs_cell_purge(void)
464 _debug("PURGING CELL %s (%d)", 394 _debug("PURGING CELL %s (%d)",
465 cell->name, atomic_read(&cell->usage)); 395 cell->name, atomic_read(&cell->usage));
466 396
467 BUG_ON(!list_empty(&cell->sv_list));
468 BUG_ON(!list_empty(&cell->vl_list));
469
470 /* purge the cell's VL graveyard list */
471 _debug(" - clearing VL graveyard");
472
473 spin_lock(&cell->vl_gylock);
474
475 while (!list_empty(&cell->vl_graveyard)) {
476 vlocation = list_entry(cell->vl_graveyard.next,
477 struct afs_vlocation,
478 link);
479 list_del_init(&vlocation->link);
480
481 afs_kafstimod_del_timer(&vlocation->timeout);
482
483 spin_unlock(&cell->vl_gylock);
484
485 afs_vlocation_do_timeout(vlocation);
486 /* TODO: race if move to use krxtimod instead
487 * of kafstimod */
488
489 spin_lock(&cell->vl_gylock);
490 }
491
492 spin_unlock(&cell->vl_gylock);
493
494 /* purge the cell's server graveyard list */
495 _debug(" - clearing server graveyard");
496
497 spin_lock(&cell->sv_gylock);
498
499 while (!list_empty(&cell->sv_graveyard)) {
500 struct afs_server *server;
501
502 server = list_entry(cell->sv_graveyard.next,
503 struct afs_server, link);
504 list_del_init(&server->link);
505
506 afs_kafstimod_del_timer(&server->timeout);
507
508 spin_unlock(&cell->sv_gylock);
509
510 afs_server_do_timeout(server);
511
512 spin_lock(&cell->sv_gylock);
513 }
514
515 spin_unlock(&cell->sv_gylock);
516
517 /* now the cell should be left with no references */ 397 /* now the cell should be left with no references */
518 afs_cell_destroy(cell); 398 afs_cell_destroy(cell);
519 } 399 }
520 } 400 }
521 401
402 up_write(&afs_cells_sem);
522 _leave(""); 403 _leave("");
523} /* end afs_cell_purge() */ 404}
524
525/*****************************************************************************/
526/*
527 * match a cell record obtained from the cache
528 */
529#ifdef AFS_CACHING_SUPPORT
530static cachefs_match_val_t afs_cell_cache_match(void *target,
531 const void *entry)
532{
533 const struct afs_cache_cell *ccell = entry;
534 struct afs_cell *cell = target;
535
536 _enter("{%s},{%s}", ccell->name, cell->name);
537
538 if (strncmp(ccell->name, cell->name, sizeof(ccell->name)) == 0) {
539 _leave(" = SUCCESS");
540 return CACHEFS_MATCH_SUCCESS;
541 }
542
543 _leave(" = FAILED");
544 return CACHEFS_MATCH_FAILED;
545} /* end afs_cell_cache_match() */
546#endif
547
548/*****************************************************************************/
549/*
550 * update a cell record in the cache
551 */
552#ifdef AFS_CACHING_SUPPORT
553static void afs_cell_cache_update(void *source, void *entry)
554{
555 struct afs_cache_cell *ccell = entry;
556 struct afs_cell *cell = source;
557
558 _enter("%p,%p", source, entry);
559
560 strncpy(ccell->name, cell->name, sizeof(ccell->name));
561
562 memcpy(ccell->vl_servers,
563 cell->vl_addrs,
564 min(sizeof(ccell->vl_servers), sizeof(cell->vl_addrs)));
565
566} /* end afs_cell_cache_update() */
567#endif
diff --git a/fs/afs/cell.h b/fs/afs/cell.h
deleted file mode 100644
index 48349108fb00..000000000000
--- a/fs/afs/cell.h
+++ /dev/null
@@ -1,78 +0,0 @@
1/* cell.h: AFS cell record
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef _LINUX_AFS_CELL_H
13#define _LINUX_AFS_CELL_H
14
15#include "types.h"
16#include "cache.h"
17
18#define AFS_CELL_MAX_ADDRS 15
19
20extern volatile int afs_cells_being_purged; /* T when cells are being purged by rmmod */
21
22/*****************************************************************************/
23/*
24 * entry in the cached cell catalogue
25 */
26struct afs_cache_cell
27{
28 char name[64]; /* cell name (padded with NULs) */
29 struct in_addr vl_servers[15]; /* cached cell VL servers */
30};
31
32/*****************************************************************************/
33/*
34 * AFS cell record
35 */
36struct afs_cell
37{
38 atomic_t usage;
39 struct list_head link; /* main cell list link */
40 struct list_head proc_link; /* /proc cell list link */
41 struct proc_dir_entry *proc_dir; /* /proc dir for this cell */
42#ifdef AFS_CACHING_SUPPORT
43 struct cachefs_cookie *cache; /* caching cookie */
44#endif
45
46 /* server record management */
47 rwlock_t sv_lock; /* active server list lock */
48 struct list_head sv_list; /* active server list */
49 struct list_head sv_graveyard; /* inactive server list */
50 spinlock_t sv_gylock; /* inactive server list lock */
51
52 /* volume location record management */
53 struct rw_semaphore vl_sem; /* volume management serialisation semaphore */
54 struct list_head vl_list; /* cell's active VL record list */
55 struct list_head vl_graveyard; /* cell's inactive VL record list */
56 spinlock_t vl_gylock; /* graveyard lock */
57 unsigned short vl_naddrs; /* number of VL servers in addr list */
58 unsigned short vl_curr_svix; /* current server index */
59 struct in_addr vl_addrs[AFS_CELL_MAX_ADDRS]; /* cell VL server addresses */
60
61 char name[0]; /* cell name - must go last */
62};
63
64extern int afs_cell_init(char *rootcell);
65
66extern int afs_cell_create(const char *name, char *vllist, struct afs_cell **_cell);
67
68extern int afs_cell_lookup(const char *name, unsigned nmsize, struct afs_cell **_cell);
69
70#define afs_get_cell(C) do { atomic_inc(&(C)->usage); } while(0)
71
72extern struct afs_cell *afs_get_cell_maybe(struct afs_cell **_cell);
73
74extern void afs_put_cell(struct afs_cell *cell);
75
76extern void afs_cell_purge(void);
77
78#endif /* _LINUX_AFS_CELL_H */
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 3d097fddcb7a..6685f4cbccb3 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -1,4 +1,4 @@
1/* cmservice.c: AFS Cache Manager Service 1/* AFS Cache Manager Service
2 * 2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
@@ -12,641 +12,463 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/completion.h> 15#include <linux/ip.h>
16#include "server.h"
17#include "cell.h"
18#include "transport.h"
19#include <rxrpc/rxrpc.h>
20#include <rxrpc/transport.h>
21#include <rxrpc/connection.h>
22#include <rxrpc/call.h>
23#include "cmservice.h"
24#include "internal.h" 16#include "internal.h"
17#include "afs_cm.h"
25 18
26static unsigned afscm_usage; /* AFS cache manager usage count */ 19struct workqueue_struct *afs_cm_workqueue;
27static struct rw_semaphore afscm_sem; /* AFS cache manager start/stop semaphore */
28
29static int afscm_new_call(struct rxrpc_call *call);
30static void afscm_attention(struct rxrpc_call *call);
31static void afscm_error(struct rxrpc_call *call);
32static void afscm_aemap(struct rxrpc_call *call);
33
34static void _SRXAFSCM_CallBack(struct rxrpc_call *call);
35static void _SRXAFSCM_InitCallBackState(struct rxrpc_call *call);
36static void _SRXAFSCM_Probe(struct rxrpc_call *call);
37
38typedef void (*_SRXAFSCM_xxxx_t)(struct rxrpc_call *call);
39
40static const struct rxrpc_operation AFSCM_ops[] = {
41 {
42 .id = 204,
43 .asize = RXRPC_APP_MARK_EOF,
44 .name = "CallBack",
45 .user = _SRXAFSCM_CallBack,
46 },
47 {
48 .id = 205,
49 .asize = RXRPC_APP_MARK_EOF,
50 .name = "InitCallBackState",
51 .user = _SRXAFSCM_InitCallBackState,
52 },
53 {
54 .id = 206,
55 .asize = RXRPC_APP_MARK_EOF,
56 .name = "Probe",
57 .user = _SRXAFSCM_Probe,
58 },
59#if 0
60 {
61 .id = 207,
62 .asize = RXRPC_APP_MARK_EOF,
63 .name = "GetLock",
64 .user = _SRXAFSCM_GetLock,
65 },
66 {
67 .id = 208,
68 .asize = RXRPC_APP_MARK_EOF,
69 .name = "GetCE",
70 .user = _SRXAFSCM_GetCE,
71 },
72 {
73 .id = 209,
74 .asize = RXRPC_APP_MARK_EOF,
75 .name = "GetXStatsVersion",
76 .user = _SRXAFSCM_GetXStatsVersion,
77 },
78 {
79 .id = 210,
80 .asize = RXRPC_APP_MARK_EOF,
81 .name = "GetXStats",
82 .user = _SRXAFSCM_GetXStats,
83 }
84#endif
85};
86 20
87static struct rxrpc_service AFSCM_service = { 21static int afs_deliver_cb_init_call_back_state(struct afs_call *,
88 .name = "AFS/CM", 22 struct sk_buff *, bool);
89 .owner = THIS_MODULE, 23static int afs_deliver_cb_init_call_back_state3(struct afs_call *,
90 .link = LIST_HEAD_INIT(AFSCM_service.link), 24 struct sk_buff *, bool);
91 .new_call = afscm_new_call, 25static int afs_deliver_cb_probe(struct afs_call *, struct sk_buff *, bool);
92 .service_id = 1, 26static int afs_deliver_cb_callback(struct afs_call *, struct sk_buff *, bool);
93 .attn_func = afscm_attention, 27static int afs_deliver_cb_get_capabilities(struct afs_call *, struct sk_buff *,
94 .error_func = afscm_error, 28 bool);
95 .aemap_func = afscm_aemap, 29static void afs_cm_destructor(struct afs_call *);
96 .ops_begin = &AFSCM_ops[0],
97 .ops_end = &AFSCM_ops[ARRAY_SIZE(AFSCM_ops)],
98};
99 30
100static DECLARE_COMPLETION(kafscmd_alive);
101static DECLARE_COMPLETION(kafscmd_dead);
102static DECLARE_WAIT_QUEUE_HEAD(kafscmd_sleepq);
103static LIST_HEAD(kafscmd_attention_list);
104static LIST_HEAD(afscm_calls);
105static DEFINE_SPINLOCK(afscm_calls_lock);
106static DEFINE_SPINLOCK(kafscmd_attention_lock);
107static int kafscmd_die;
108
109/*****************************************************************************/
110/* 31/*
111 * AFS Cache Manager kernel thread 32 * CB.CallBack operation type
112 */ 33 */
113static int kafscmd(void *arg) 34static const struct afs_call_type afs_SRXCBCallBack = {
114{ 35 .name = "CB.CallBack",
115 DECLARE_WAITQUEUE(myself, current); 36 .deliver = afs_deliver_cb_callback,
116 37 .abort_to_error = afs_abort_to_error,
117 struct rxrpc_call *call; 38 .destructor = afs_cm_destructor,
118 _SRXAFSCM_xxxx_t func; 39};
119 int die;
120
121 printk(KERN_INFO "kAFS: Started kafscmd %d\n", current->pid);
122
123 daemonize("kafscmd");
124
125 complete(&kafscmd_alive);
126
127 /* loop around looking for things to attend to */
128 do {
129 if (list_empty(&kafscmd_attention_list)) {
130 set_current_state(TASK_INTERRUPTIBLE);
131 add_wait_queue(&kafscmd_sleepq, &myself);
132
133 for (;;) {
134 set_current_state(TASK_INTERRUPTIBLE);
135 if (!list_empty(&kafscmd_attention_list) ||
136 signal_pending(current) ||
137 kafscmd_die)
138 break;
139
140 schedule();
141 }
142
143 remove_wait_queue(&kafscmd_sleepq, &myself);
144 set_current_state(TASK_RUNNING);
145 }
146
147 die = kafscmd_die;
148
149 /* dequeue the next call requiring attention */
150 call = NULL;
151 spin_lock(&kafscmd_attention_lock);
152
153 if (!list_empty(&kafscmd_attention_list)) {
154 call = list_entry(kafscmd_attention_list.next,
155 struct rxrpc_call,
156 app_attn_link);
157 list_del_init(&call->app_attn_link);
158 die = 0;
159 }
160
161 spin_unlock(&kafscmd_attention_lock);
162
163 if (call) {
164 /* act upon it */
165 _debug("@@@ Begin Attend Call %p", call);
166
167 func = call->app_user;
168 if (func)
169 func(call);
170
171 rxrpc_put_call(call);
172
173 _debug("@@@ End Attend Call %p", call);
174 }
175
176 } while(!die);
177
178 /* and that's all */
179 complete_and_exit(&kafscmd_dead, 0);
180
181} /* end kafscmd() */
182 40
183/*****************************************************************************/
184/* 41/*
185 * handle a call coming in to the cache manager 42 * CB.InitCallBackState operation type
186 * - if I want to keep the call, I must increment its usage count
187 * - the return value will be negated and passed back in an abort packet if
188 * non-zero
189 * - serialised by virtue of there only being one krxiod
190 */ 43 */
191static int afscm_new_call(struct rxrpc_call *call) 44static const struct afs_call_type afs_SRXCBInitCallBackState = {
192{ 45 .name = "CB.InitCallBackState",
193 _enter("%p{cid=%u u=%d}", 46 .deliver = afs_deliver_cb_init_call_back_state,
194 call, ntohl(call->call_id), atomic_read(&call->usage)); 47 .abort_to_error = afs_abort_to_error,
195 48 .destructor = afs_cm_destructor,
196 rxrpc_get_call(call); 49};
197
198 /* add to my current call list */
199 spin_lock(&afscm_calls_lock);
200 list_add(&call->app_link,&afscm_calls);
201 spin_unlock(&afscm_calls_lock);
202
203 _leave(" = 0");
204 return 0;
205
206} /* end afscm_new_call() */
207 50
208/*****************************************************************************/
209/* 51/*
210 * queue on the kafscmd queue for attention 52 * CB.InitCallBackState3 operation type
211 */ 53 */
212static void afscm_attention(struct rxrpc_call *call) 54static const struct afs_call_type afs_SRXCBInitCallBackState3 = {
213{ 55 .name = "CB.InitCallBackState3",
214 _enter("%p{cid=%u u=%d}", 56 .deliver = afs_deliver_cb_init_call_back_state3,
215 call, ntohl(call->call_id), atomic_read(&call->usage)); 57 .abort_to_error = afs_abort_to_error,
216 58 .destructor = afs_cm_destructor,
217 spin_lock(&kafscmd_attention_lock); 59};
218
219 if (list_empty(&call->app_attn_link)) {
220 list_add_tail(&call->app_attn_link, &kafscmd_attention_list);
221 rxrpc_get_call(call);
222 }
223
224 spin_unlock(&kafscmd_attention_lock);
225
226 wake_up(&kafscmd_sleepq);
227
228 _leave(" {u=%d}", atomic_read(&call->usage));
229} /* end afscm_attention() */
230 60
231/*****************************************************************************/
232/* 61/*
233 * handle my call being aborted 62 * CB.Probe operation type
234 * - clean up, dequeue and put my ref to the call
235 */ 63 */
236static void afscm_error(struct rxrpc_call *call) 64static const struct afs_call_type afs_SRXCBProbe = {
237{ 65 .name = "CB.Probe",
238 int removed; 66 .deliver = afs_deliver_cb_probe,
239 67 .abort_to_error = afs_abort_to_error,
240 _enter("%p{est=%s ac=%u er=%d}", 68 .destructor = afs_cm_destructor,
241 call, 69};
242 rxrpc_call_error_states[call->app_err_state],
243 call->app_abort_code,
244 call->app_errno);
245
246 spin_lock(&kafscmd_attention_lock);
247
248 if (list_empty(&call->app_attn_link)) {
249 list_add_tail(&call->app_attn_link, &kafscmd_attention_list);
250 rxrpc_get_call(call);
251 }
252
253 spin_unlock(&kafscmd_attention_lock);
254
255 removed = 0;
256 spin_lock(&afscm_calls_lock);
257 if (!list_empty(&call->app_link)) {
258 list_del_init(&call->app_link);
259 removed = 1;
260 }
261 spin_unlock(&afscm_calls_lock);
262
263 if (removed)
264 rxrpc_put_call(call);
265
266 wake_up(&kafscmd_sleepq);
267 70
268 _leave(""); 71/*
269} /* end afscm_error() */ 72 * CB.GetCapabilities operation type
73 */
74static const struct afs_call_type afs_SRXCBGetCapabilites = {
75 .name = "CB.GetCapabilities",
76 .deliver = afs_deliver_cb_get_capabilities,
77 .abort_to_error = afs_abort_to_error,
78 .destructor = afs_cm_destructor,
79};
270 80
271/*****************************************************************************/
272/* 81/*
273 * map afs abort codes to/from Linux error codes 82 * route an incoming cache manager call
274 * - called with call->lock held 83 * - return T if supported, F if not
275 */ 84 */
276static void afscm_aemap(struct rxrpc_call *call) 85bool afs_cm_incoming_call(struct afs_call *call)
277{ 86{
278 switch (call->app_err_state) { 87 u32 operation_id = ntohl(call->operation_ID);
279 case RXRPC_ESTATE_LOCAL_ABORT: 88
280 call->app_abort_code = -call->app_errno; 89 _enter("{CB.OP %u}", operation_id);
281 break; 90
282 case RXRPC_ESTATE_PEER_ABORT: 91 switch (operation_id) {
283 call->app_errno = -ECONNABORTED; 92 case CBCallBack:
284 break; 93 call->type = &afs_SRXCBCallBack;
94 return true;
95 case CBInitCallBackState:
96 call->type = &afs_SRXCBInitCallBackState;
97 return true;
98 case CBInitCallBackState3:
99 call->type = &afs_SRXCBInitCallBackState3;
100 return true;
101 case CBProbe:
102 call->type = &afs_SRXCBProbe;
103 return true;
104 case CBGetCapabilities:
105 call->type = &afs_SRXCBGetCapabilites;
106 return true;
285 default: 107 default:
286 break; 108 return false;
287 } 109 }
288} /* end afscm_aemap() */ 110}
289 111
290/*****************************************************************************/
291/* 112/*
292 * start the cache manager service if not already started 113 * clean up a cache manager call
293 */ 114 */
294int afscm_start(void) 115static void afs_cm_destructor(struct afs_call *call)
295{ 116{
296 int ret; 117 _enter("");
297
298 down_write(&afscm_sem);
299 if (!afscm_usage) {
300 ret = kernel_thread(kafscmd, NULL, 0);
301 if (ret < 0)
302 goto out;
303
304 wait_for_completion(&kafscmd_alive);
305
306 ret = rxrpc_add_service(afs_transport, &AFSCM_service);
307 if (ret < 0)
308 goto kill;
309
310 afs_kafstimod_add_timer(&afs_mntpt_expiry_timer,
311 afs_mntpt_expiry_timeout * HZ);
312 }
313
314 afscm_usage++;
315 up_write(&afscm_sem);
316
317 return 0;
318
319 kill:
320 kafscmd_die = 1;
321 wake_up(&kafscmd_sleepq);
322 wait_for_completion(&kafscmd_dead);
323
324 out:
325 up_write(&afscm_sem);
326 return ret;
327 118
328} /* end afscm_start() */ 119 afs_put_server(call->server);
120 call->server = NULL;
121 kfree(call->buffer);
122 call->buffer = NULL;
123}
329 124
330/*****************************************************************************/
331/* 125/*
332 * stop the cache manager service 126 * allow the fileserver to see if the cache manager is still alive
333 */ 127 */
334void afscm_stop(void) 128static void SRXAFSCB_CallBack(struct work_struct *work)
335{ 129{
336 struct rxrpc_call *call; 130 struct afs_call *call = container_of(work, struct afs_call, work);
337 131
338 down_write(&afscm_sem); 132 _enter("");
339 133
340 BUG_ON(afscm_usage == 0); 134 /* be sure to send the reply *before* attempting to spam the AFS server
341 afscm_usage--; 135 * with FSFetchStatus requests on the vnodes with broken callbacks lest
136 * the AFS server get into a vicious cycle of trying to break further
137 * callbacks because it hadn't received completion of the CBCallBack op
138 * yet */
139 afs_send_empty_reply(call);
342 140
343 if (afscm_usage == 0) { 141 afs_break_callbacks(call->server, call->count, call->request);
344 /* don't want more incoming calls */ 142 _leave("");
345 rxrpc_del_service(afs_transport, &AFSCM_service); 143}
346
347 /* abort any calls I've still got open (the afscm_error() will
348 * dequeue them) */
349 spin_lock(&afscm_calls_lock);
350 while (!list_empty(&afscm_calls)) {
351 call = list_entry(afscm_calls.next,
352 struct rxrpc_call,
353 app_link);
354 144
355 list_del_init(&call->app_link); 145/*
356 rxrpc_get_call(call); 146 * deliver request data to a CB.CallBack call
357 spin_unlock(&afscm_calls_lock); 147 */
148static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb,
149 bool last)
150{
151 struct afs_callback *cb;
152 struct afs_server *server;
153 struct in_addr addr;
154 __be32 *bp;
155 u32 tmp;
156 int ret, loop;
157
158 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
159
160 switch (call->unmarshall) {
161 case 0:
162 call->offset = 0;
163 call->unmarshall++;
164
165 /* extract the FID array and its count in two steps */
166 case 1:
167 _debug("extract FID count");
168 ret = afs_extract_data(call, skb, last, &call->tmp, 4);
169 switch (ret) {
170 case 0: break;
171 case -EAGAIN: return 0;
172 default: return ret;
173 }
358 174
359 rxrpc_call_abort(call, -ESRCH); /* abort, dequeue and 175 call->count = ntohl(call->tmp);
360 * put */ 176 _debug("FID count: %u", call->count);
177 if (call->count > AFSCBMAX)
178 return -EBADMSG;
179
180 call->buffer = kmalloc(call->count * 3 * 4, GFP_KERNEL);
181 if (!call->buffer)
182 return -ENOMEM;
183 call->offset = 0;
184 call->unmarshall++;
185
186 case 2:
187 _debug("extract FID array");
188 ret = afs_extract_data(call, skb, last, call->buffer,
189 call->count * 3 * 4);
190 switch (ret) {
191 case 0: break;
192 case -EAGAIN: return 0;
193 default: return ret;
194 }
361 195
362 _debug("nuking active call %08x.%d", 196 _debug("unmarshall FID array");
363 ntohl(call->conn->conn_id), 197 call->request = kcalloc(call->count,
364 ntohl(call->call_id)); 198 sizeof(struct afs_callback),
365 rxrpc_put_call(call); 199 GFP_KERNEL);
366 rxrpc_put_call(call); 200 if (!call->request)
201 return -ENOMEM;
202
203 cb = call->request;
204 bp = call->buffer;
205 for (loop = call->count; loop > 0; loop--, cb++) {
206 cb->fid.vid = ntohl(*bp++);
207 cb->fid.vnode = ntohl(*bp++);
208 cb->fid.unique = ntohl(*bp++);
209 cb->type = AFSCM_CB_UNTYPED;
210 }
367 211
368 spin_lock(&afscm_calls_lock); 212 call->offset = 0;
213 call->unmarshall++;
214
215 /* extract the callback array and its count in two steps */
216 case 3:
217 _debug("extract CB count");
218 ret = afs_extract_data(call, skb, last, &call->tmp, 4);
219 switch (ret) {
220 case 0: break;
221 case -EAGAIN: return 0;
222 default: return ret;
369 } 223 }
370 spin_unlock(&afscm_calls_lock);
371 224
372 /* get rid of my daemon */ 225 tmp = ntohl(call->tmp);
373 kafscmd_die = 1; 226 _debug("CB count: %u", tmp);
374 wake_up(&kafscmd_sleepq); 227 if (tmp != call->count && tmp != 0)
375 wait_for_completion(&kafscmd_dead); 228 return -EBADMSG;
229 call->offset = 0;
230 call->unmarshall++;
231 if (tmp == 0)
232 goto empty_cb_array;
233
234 case 4:
235 _debug("extract CB array");
236 ret = afs_extract_data(call, skb, last, call->request,
237 call->count * 3 * 4);
238 switch (ret) {
239 case 0: break;
240 case -EAGAIN: return 0;
241 default: return ret;
242 }
376 243
377 /* dispose of any calls waiting for attention */ 244 _debug("unmarshall CB array");
378 spin_lock(&kafscmd_attention_lock); 245 cb = call->request;
379 while (!list_empty(&kafscmd_attention_list)) { 246 bp = call->buffer;
380 call = list_entry(kafscmd_attention_list.next, 247 for (loop = call->count; loop > 0; loop--, cb++) {
381 struct rxrpc_call, 248 cb->version = ntohl(*bp++);
382 app_attn_link); 249 cb->expiry = ntohl(*bp++);
250 cb->type = ntohl(*bp++);
251 }
383 252
384 list_del_init(&call->app_attn_link); 253 empty_cb_array:
385 spin_unlock(&kafscmd_attention_lock); 254 call->offset = 0;
255 call->unmarshall++;
386 256
387 rxrpc_put_call(call); 257 case 5:
258 _debug("trailer");
259 if (skb->len != 0)
260 return -EBADMSG;
261 break;
262 }
388 263
389 spin_lock(&kafscmd_attention_lock); 264 if (!last)
390 } 265 return 0;
391 spin_unlock(&kafscmd_attention_lock);
392 266
393 afs_kafstimod_del_timer(&afs_mntpt_expiry_timer); 267 call->state = AFS_CALL_REPLYING;
394 }
395 268
396 up_write(&afscm_sem); 269 /* we'll need the file server record as that tells us which set of
270 * vnodes to operate upon */
271 memcpy(&addr, &ip_hdr(skb)->saddr, 4);
272 server = afs_find_server(&addr);
273 if (!server)
274 return -ENOTCONN;
275 call->server = server;
397 276
398} /* end afscm_stop() */ 277 INIT_WORK(&call->work, SRXAFSCB_CallBack);
278 schedule_work(&call->work);
279 return 0;
280}
399 281
400/*****************************************************************************/
401/* 282/*
402 * handle the fileserver breaking a set of callbacks 283 * allow the fileserver to request callback state (re-)initialisation
403 */ 284 */
404static void _SRXAFSCM_CallBack(struct rxrpc_call *call) 285static void SRXAFSCB_InitCallBackState(struct work_struct *work)
405{ 286{
406 struct afs_server *server; 287 struct afs_call *call = container_of(work, struct afs_call, work);
407 size_t count, qty, tmp;
408 int ret = 0, removed;
409
410 _enter("%p{acs=%s}", call, rxrpc_call_states[call->app_call_state]);
411
412 server = afs_server_get_from_peer(call->conn->peer);
413
414 switch (call->app_call_state) {
415 /* we've received the last packet
416 * - drain all the data from the call and send the reply
417 */
418 case RXRPC_CSTATE_SRVR_GOT_ARGS:
419 ret = -EBADMSG;
420 qty = call->app_ready_qty;
421 if (qty < 8 || qty > 50 * (6 * 4) + 8)
422 break;
423
424 {
425 struct afs_callback *cb, *pcb;
426 int loop;
427 __be32 *fp, *bp;
428
429 fp = rxrpc_call_alloc_scratch(call, qty);
430
431 /* drag the entire argument block out to the scratch
432 * space */
433 ret = rxrpc_call_read_data(call, fp, qty, 0);
434 if (ret < 0)
435 break;
436
437 /* and unmarshall the parameter block */
438 ret = -EBADMSG;
439 count = ntohl(*fp++);
440 if (count>AFSCBMAX ||
441 (count * (3 * 4) + 8 != qty &&
442 count * (6 * 4) + 8 != qty))
443 break;
444
445 bp = fp + count*3;
446 tmp = ntohl(*bp++);
447 if (tmp > 0 && tmp != count)
448 break;
449 if (tmp == 0)
450 bp = NULL;
451
452 pcb = cb = rxrpc_call_alloc_scratch_s(
453 call, struct afs_callback);
454
455 for (loop = count - 1; loop >= 0; loop--) {
456 pcb->fid.vid = ntohl(*fp++);
457 pcb->fid.vnode = ntohl(*fp++);
458 pcb->fid.unique = ntohl(*fp++);
459 if (bp) {
460 pcb->version = ntohl(*bp++);
461 pcb->expiry = ntohl(*bp++);
462 pcb->type = ntohl(*bp++);
463 }
464 else {
465 pcb->version = 0;
466 pcb->expiry = 0;
467 pcb->type = AFSCM_CB_UNTYPED;
468 }
469 pcb++;
470 }
471
472 /* invoke the actual service routine */
473 ret = SRXAFSCM_CallBack(server, count, cb);
474 if (ret < 0)
475 break;
476 }
477 288
478 /* send the reply */ 289 _enter("{%p}", call->server);
479 ret = rxrpc_call_write_data(call, 0, NULL, RXRPC_LAST_PACKET,
480 GFP_KERNEL, 0, &count);
481 if (ret < 0)
482 break;
483 break;
484
485 /* operation complete */
486 case RXRPC_CSTATE_COMPLETE:
487 call->app_user = NULL;
488 removed = 0;
489 spin_lock(&afscm_calls_lock);
490 if (!list_empty(&call->app_link)) {
491 list_del_init(&call->app_link);
492 removed = 1;
493 }
494 spin_unlock(&afscm_calls_lock);
495 290
496 if (removed) 291 afs_init_callback_state(call->server);
497 rxrpc_put_call(call); 292 afs_send_empty_reply(call);
498 break; 293 _leave("");
294}
499 295
500 /* operation terminated on error */ 296/*
501 case RXRPC_CSTATE_ERROR: 297 * deliver request data to a CB.InitCallBackState call
502 call->app_user = NULL; 298 */
503 break; 299static int afs_deliver_cb_init_call_back_state(struct afs_call *call,
300 struct sk_buff *skb,
301 bool last)
302{
303 struct afs_server *server;
304 struct in_addr addr;
504 305
505 default: 306 _enter(",{%u},%d", skb->len, last);
506 break;
507 }
508 307
509 if (ret < 0) 308 if (skb->len > 0)
510 rxrpc_call_abort(call, ret); 309 return -EBADMSG;
310 if (!last)
311 return 0;
511 312
512 afs_put_server(server); 313 /* no unmarshalling required */
314 call->state = AFS_CALL_REPLYING;
513 315
514 _leave(" = %d", ret); 316 /* we'll need the file server record as that tells us which set of
317 * vnodes to operate upon */
318 memcpy(&addr, &ip_hdr(skb)->saddr, 4);
319 server = afs_find_server(&addr);
320 if (!server)
321 return -ENOTCONN;
322 call->server = server;
515 323
516} /* end _SRXAFSCM_CallBack() */ 324 INIT_WORK(&call->work, SRXAFSCB_InitCallBackState);
325 schedule_work(&call->work);
326 return 0;
327}
517 328
518/*****************************************************************************/
519/* 329/*
520 * handle the fileserver asking us to initialise our callback state 330 * deliver request data to a CB.InitCallBackState3 call
521 */ 331 */
522static void _SRXAFSCM_InitCallBackState(struct rxrpc_call *call) 332static int afs_deliver_cb_init_call_back_state3(struct afs_call *call,
333 struct sk_buff *skb,
334 bool last)
523{ 335{
524 struct afs_server *server; 336 struct afs_server *server;
525 size_t count; 337 struct in_addr addr;
526 int ret = 0, removed;
527 338
528 _enter("%p{acs=%s}", call, rxrpc_call_states[call->app_call_state]); 339 _enter(",{%u},%d", skb->len, last);
529 340
530 server = afs_server_get_from_peer(call->conn->peer); 341 if (!last)
342 return 0;
531 343
532 switch (call->app_call_state) { 344 /* no unmarshalling required */
533 /* we've received the last packet - drain all the data from the 345 call->state = AFS_CALL_REPLYING;
534 * call */
535 case RXRPC_CSTATE_SRVR_GOT_ARGS:
536 /* shouldn't be any args */
537 ret = -EBADMSG;
538 break;
539
540 /* send the reply when asked for it */
541 case RXRPC_CSTATE_SRVR_SND_REPLY:
542 /* invoke the actual service routine */
543 ret = SRXAFSCM_InitCallBackState(server);
544 if (ret < 0)
545 break;
546
547 ret = rxrpc_call_write_data(call, 0, NULL, RXRPC_LAST_PACKET,
548 GFP_KERNEL, 0, &count);
549 if (ret < 0)
550 break;
551 break;
552 346
553 /* operation complete */ 347 /* we'll need the file server record as that tells us which set of
554 case RXRPC_CSTATE_COMPLETE: 348 * vnodes to operate upon */
555 call->app_user = NULL; 349 memcpy(&addr, &ip_hdr(skb)->saddr, 4);
556 removed = 0; 350 server = afs_find_server(&addr);
557 spin_lock(&afscm_calls_lock); 351 if (!server)
558 if (!list_empty(&call->app_link)) { 352 return -ENOTCONN;
559 list_del_init(&call->app_link); 353 call->server = server;
560 removed = 1;
561 }
562 spin_unlock(&afscm_calls_lock);
563 354
564 if (removed) 355 INIT_WORK(&call->work, SRXAFSCB_InitCallBackState);
565 rxrpc_put_call(call); 356 schedule_work(&call->work);
566 break; 357 return 0;
567 358}
568 /* operation terminated on error */
569 case RXRPC_CSTATE_ERROR:
570 call->app_user = NULL;
571 break;
572
573 default:
574 break;
575 }
576
577 if (ret < 0)
578 rxrpc_call_abort(call, ret);
579
580 afs_put_server(server);
581 359
582 _leave(" = %d", ret); 360/*
361 * allow the fileserver to see if the cache manager is still alive
362 */
363static void SRXAFSCB_Probe(struct work_struct *work)
364{
365 struct afs_call *call = container_of(work, struct afs_call, work);
583 366
584} /* end _SRXAFSCM_InitCallBackState() */ 367 _enter("");
368 afs_send_empty_reply(call);
369 _leave("");
370}
585 371
586/*****************************************************************************/
587/* 372/*
588 * handle a probe from a fileserver 373 * deliver request data to a CB.Probe call
589 */ 374 */
590static void _SRXAFSCM_Probe(struct rxrpc_call *call) 375static int afs_deliver_cb_probe(struct afs_call *call, struct sk_buff *skb,
376 bool last)
591{ 377{
592 struct afs_server *server; 378 _enter(",{%u},%d", skb->len, last);
593 size_t count;
594 int ret = 0, removed;
595
596 _enter("%p{acs=%s}", call, rxrpc_call_states[call->app_call_state]);
597 379
598 server = afs_server_get_from_peer(call->conn->peer); 380 if (skb->len > 0)
381 return -EBADMSG;
382 if (!last)
383 return 0;
599 384
600 switch (call->app_call_state) { 385 /* no unmarshalling required */
601 /* we've received the last packet - drain all the data from the 386 call->state = AFS_CALL_REPLYING;
602 * call */
603 case RXRPC_CSTATE_SRVR_GOT_ARGS:
604 /* shouldn't be any args */
605 ret = -EBADMSG;
606 break;
607 387
608 /* send the reply when asked for it */ 388 INIT_WORK(&call->work, SRXAFSCB_Probe);
609 case RXRPC_CSTATE_SRVR_SND_REPLY: 389 schedule_work(&call->work);
610 /* invoke the actual service routine */ 390 return 0;
611 ret = SRXAFSCM_Probe(server); 391}
612 if (ret < 0)
613 break;
614
615 ret = rxrpc_call_write_data(call, 0, NULL, RXRPC_LAST_PACKET,
616 GFP_KERNEL, 0, &count);
617 if (ret < 0)
618 break;
619 break;
620 392
621 /* operation complete */ 393/*
622 case RXRPC_CSTATE_COMPLETE: 394 * allow the fileserver to ask about the cache manager's capabilities
623 call->app_user = NULL; 395 */
624 removed = 0; 396static void SRXAFSCB_GetCapabilities(struct work_struct *work)
625 spin_lock(&afscm_calls_lock); 397{
626 if (!list_empty(&call->app_link)) { 398 struct afs_interface *ifs;
627 list_del_init(&call->app_link); 399 struct afs_call *call = container_of(work, struct afs_call, work);
628 removed = 1; 400 int loop, nifs;
401
402 struct {
403 struct /* InterfaceAddr */ {
404 __be32 nifs;
405 __be32 uuid[11];
406 __be32 ifaddr[32];
407 __be32 netmask[32];
408 __be32 mtu[32];
409 } ia;
410 struct /* Capabilities */ {
411 __be32 capcount;
412 __be32 caps[1];
413 } cap;
414 } reply;
415
416 _enter("");
417
418 nifs = 0;
419 ifs = kcalloc(32, sizeof(*ifs), GFP_KERNEL);
420 if (ifs) {
421 nifs = afs_get_ipv4_interfaces(ifs, 32, false);
422 if (nifs < 0) {
423 kfree(ifs);
424 ifs = NULL;
425 nifs = 0;
629 } 426 }
630 spin_unlock(&afscm_calls_lock); 427 }
631 428
632 if (removed) 429 memset(&reply, 0, sizeof(reply));
633 rxrpc_put_call(call); 430 reply.ia.nifs = htonl(nifs);
634 break; 431
432 reply.ia.uuid[0] = htonl(afs_uuid.time_low);
433 reply.ia.uuid[1] = htonl(afs_uuid.time_mid);
434 reply.ia.uuid[2] = htonl(afs_uuid.time_hi_and_version);
435 reply.ia.uuid[3] = htonl((s8) afs_uuid.clock_seq_hi_and_reserved);
436 reply.ia.uuid[4] = htonl((s8) afs_uuid.clock_seq_low);
437 for (loop = 0; loop < 6; loop++)
438 reply.ia.uuid[loop + 5] = htonl((s8) afs_uuid.node[loop]);
439
440 if (ifs) {
441 for (loop = 0; loop < nifs; loop++) {
442 reply.ia.ifaddr[loop] = ifs[loop].address.s_addr;
443 reply.ia.netmask[loop] = ifs[loop].netmask.s_addr;
444 reply.ia.mtu[loop] = htonl(ifs[loop].mtu);
445 }
446 }
635 447
636 /* operation terminated on error */ 448 reply.cap.capcount = htonl(1);
637 case RXRPC_CSTATE_ERROR: 449 reply.cap.caps[0] = htonl(AFS_CAP_ERROR_TRANSLATION);
638 call->app_user = NULL; 450 afs_send_simple_reply(call, &reply, sizeof(reply));
639 break;
640 451
641 default: 452 _leave("");
642 break; 453}
643 }
644 454
645 if (ret < 0) 455/*
646 rxrpc_call_abort(call, ret); 456 * deliver request data to a CB.GetCapabilities call
457 */
458static int afs_deliver_cb_get_capabilities(struct afs_call *call,
459 struct sk_buff *skb, bool last)
460{
461 _enter(",{%u},%d", skb->len, last);
647 462
648 afs_put_server(server); 463 if (skb->len > 0)
464 return -EBADMSG;
465 if (!last)
466 return 0;
649 467
650 _leave(" = %d", ret); 468 /* no unmarshalling required */
469 call->state = AFS_CALL_REPLYING;
651 470
652} /* end _SRXAFSCM_Probe() */ 471 INIT_WORK(&call->work, SRXAFSCB_GetCapabilities);
472 schedule_work(&call->work);
473 return 0;
474}
diff --git a/fs/afs/cmservice.h b/fs/afs/cmservice.h
deleted file mode 100644
index af8d4d689cb2..000000000000
--- a/fs/afs/cmservice.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/* cmservice.h: AFS Cache Manager Service declarations
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef _LINUX_AFS_CMSERVICE_H
13#define _LINUX_AFS_CMSERVICE_H
14
15#include <rxrpc/transport.h>
16#include "types.h"
17
18/* cache manager start/stop */
19extern int afscm_start(void);
20extern void afscm_stop(void);
21
22/* cache manager server functions */
23extern int SRXAFSCM_InitCallBackState(struct afs_server *server);
24extern int SRXAFSCM_CallBack(struct afs_server *server,
25 size_t count,
26 struct afs_callback callbacks[]);
27extern int SRXAFSCM_Probe(struct afs_server *server);
28
29#endif /* _LINUX_AFS_CMSERVICE_H */
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index b6dc2ebe47a8..dac5b990c0cd 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -15,45 +15,53 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/fs.h> 16#include <linux/fs.h>
17#include <linux/pagemap.h> 17#include <linux/pagemap.h>
18#include <linux/smp_lock.h> 18#include <linux/ctype.h>
19#include "vnode.h"
20#include "volume.h"
21#include <rxrpc/call.h>
22#include "super.h"
23#include "internal.h" 19#include "internal.h"
24 20
25static struct dentry *afs_dir_lookup(struct inode *dir, struct dentry *dentry, 21static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
26 struct nameidata *nd); 22 struct nameidata *nd);
27static int afs_dir_open(struct inode *inode, struct file *file); 23static int afs_dir_open(struct inode *inode, struct file *file);
28static int afs_dir_readdir(struct file *file, void *dirent, filldir_t filldir); 24static int afs_readdir(struct file *file, void *dirent, filldir_t filldir);
29static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd); 25static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd);
30static int afs_d_delete(struct dentry *dentry); 26static int afs_d_delete(struct dentry *dentry);
31static int afs_dir_lookup_filldir(void *_cookie, const char *name, int nlen, 27static void afs_d_release(struct dentry *dentry);
28static int afs_lookup_filldir(void *_cookie, const char *name, int nlen,
32 loff_t fpos, u64 ino, unsigned dtype); 29 loff_t fpos, u64 ino, unsigned dtype);
30static int afs_create(struct inode *dir, struct dentry *dentry, int mode,
31 struct nameidata *nd);
32static int afs_mkdir(struct inode *dir, struct dentry *dentry, int mode);
33static int afs_rmdir(struct inode *dir, struct dentry *dentry);
34static int afs_unlink(struct inode *dir, struct dentry *dentry);
35static int afs_link(struct dentry *from, struct inode *dir,
36 struct dentry *dentry);
37static int afs_symlink(struct inode *dir, struct dentry *dentry,
38 const char *content);
39static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
40 struct inode *new_dir, struct dentry *new_dentry);
33 41
34const struct file_operations afs_dir_file_operations = { 42const struct file_operations afs_dir_file_operations = {
35 .open = afs_dir_open, 43 .open = afs_dir_open,
36 .readdir = afs_dir_readdir, 44 .release = afs_release,
45 .readdir = afs_readdir,
37}; 46};
38 47
39const struct inode_operations afs_dir_inode_operations = { 48const struct inode_operations afs_dir_inode_operations = {
40 .lookup = afs_dir_lookup, 49 .create = afs_create,
50 .lookup = afs_lookup,
51 .link = afs_link,
52 .unlink = afs_unlink,
53 .symlink = afs_symlink,
54 .mkdir = afs_mkdir,
55 .rmdir = afs_rmdir,
56 .rename = afs_rename,
57 .permission = afs_permission,
41 .getattr = afs_inode_getattr, 58 .getattr = afs_inode_getattr,
42#if 0 /* TODO */
43 .create = afs_dir_create,
44 .link = afs_dir_link,
45 .unlink = afs_dir_unlink,
46 .symlink = afs_dir_symlink,
47 .mkdir = afs_dir_mkdir,
48 .rmdir = afs_dir_rmdir,
49 .mknod = afs_dir_mknod,
50 .rename = afs_dir_rename,
51#endif
52}; 59};
53 60
54static struct dentry_operations afs_fs_dentry_operations = { 61static struct dentry_operations afs_fs_dentry_operations = {
55 .d_revalidate = afs_d_revalidate, 62 .d_revalidate = afs_d_revalidate,
56 .d_delete = afs_d_delete, 63 .d_delete = afs_d_delete,
64 .d_release = afs_d_release,
57}; 65};
58 66
59#define AFS_DIR_HASHTBL_SIZE 128 67#define AFS_DIR_HASHTBL_SIZE 128
@@ -105,14 +113,13 @@ struct afs_dir_page {
105 union afs_dir_block blocks[PAGE_SIZE / sizeof(union afs_dir_block)]; 113 union afs_dir_block blocks[PAGE_SIZE / sizeof(union afs_dir_block)];
106}; 114};
107 115
108struct afs_dir_lookup_cookie { 116struct afs_lookup_cookie {
109 struct afs_fid fid; 117 struct afs_fid fid;
110 const char *name; 118 const char *name;
111 size_t nlen; 119 size_t nlen;
112 int found; 120 int found;
113}; 121};
114 122
115/*****************************************************************************/
116/* 123/*
117 * check that a directory page is valid 124 * check that a directory page is valid
118 */ 125 */
@@ -128,9 +135,10 @@ static inline void afs_dir_check_page(struct inode *dir, struct page *page)
128 if (qty == 0) 135 if (qty == 0)
129 goto error; 136 goto error;
130 137
131 if (page->index==0 && qty!=ntohs(dbuf->blocks[0].pagehdr.npages)) { 138 if (page->index == 0 && qty != ntohs(dbuf->blocks[0].pagehdr.npages)) {
132 printk("kAFS: %s(%lu): wrong number of dir blocks %d!=%hu\n", 139 printk("kAFS: %s(%lu): wrong number of dir blocks %d!=%hu\n",
133 __FUNCTION__,dir->i_ino,qty,ntohs(dbuf->blocks[0].pagehdr.npages)); 140 __FUNCTION__, dir->i_ino, qty,
141 ntohs(dbuf->blocks[0].pagehdr.npages));
134 goto error; 142 goto error;
135 } 143 }
136#endif 144#endif
@@ -157,13 +165,11 @@ static inline void afs_dir_check_page(struct inode *dir, struct page *page)
157 SetPageChecked(page); 165 SetPageChecked(page);
158 return; 166 return;
159 167
160 error: 168error:
161 SetPageChecked(page); 169 SetPageChecked(page);
162 SetPageError(page); 170 SetPageError(page);
171}
163 172
164} /* end afs_dir_check_page() */
165
166/*****************************************************************************/
167/* 173/*
168 * discard a page cached in the pagecache 174 * discard a page cached in the pagecache
169 */ 175 */
@@ -171,20 +177,22 @@ static inline void afs_dir_put_page(struct page *page)
171{ 177{
172 kunmap(page); 178 kunmap(page);
173 page_cache_release(page); 179 page_cache_release(page);
180}
174 181
175} /* end afs_dir_put_page() */
176
177/*****************************************************************************/
178/* 182/*
179 * get a page into the pagecache 183 * get a page into the pagecache
180 */ 184 */
181static struct page *afs_dir_get_page(struct inode *dir, unsigned long index) 185static struct page *afs_dir_get_page(struct inode *dir, unsigned long index,
186 struct key *key)
182{ 187{
183 struct page *page; 188 struct page *page;
189 struct file file = {
190 .private_data = key,
191 };
184 192
185 _enter("{%lu},%lu", dir->i_ino, index); 193 _enter("{%lu},%lu", dir->i_ino, index);
186 194
187 page = read_mapping_page(dir->i_mapping, index, NULL); 195 page = read_mapping_page(dir->i_mapping, index, &file);
188 if (!IS_ERR(page)) { 196 if (!IS_ERR(page)) {
189 wait_on_page_locked(page); 197 wait_on_page_locked(page);
190 kmap(page); 198 kmap(page);
@@ -197,12 +205,12 @@ static struct page *afs_dir_get_page(struct inode *dir, unsigned long index)
197 } 205 }
198 return page; 206 return page;
199 207
200 fail: 208fail:
201 afs_dir_put_page(page); 209 afs_dir_put_page(page);
210 _leave(" = -EIO");
202 return ERR_PTR(-EIO); 211 return ERR_PTR(-EIO);
203} /* end afs_dir_get_page() */ 212}
204 213
205/*****************************************************************************/
206/* 214/*
207 * open an AFS directory file 215 * open an AFS directory file
208 */ 216 */
@@ -213,15 +221,12 @@ static int afs_dir_open(struct inode *inode, struct file *file)
213 BUILD_BUG_ON(sizeof(union afs_dir_block) != 2048); 221 BUILD_BUG_ON(sizeof(union afs_dir_block) != 2048);
214 BUILD_BUG_ON(sizeof(union afs_dirent) != 32); 222 BUILD_BUG_ON(sizeof(union afs_dirent) != 32);
215 223
216 if (AFS_FS_I(inode)->flags & AFS_VNODE_DELETED) 224 if (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(inode)->flags))
217 return -ENOENT; 225 return -ENOENT;
218 226
219 _leave(" = 0"); 227 return afs_open(inode, file);
220 return 0; 228}
221
222} /* end afs_dir_open() */
223 229
224/*****************************************************************************/
225/* 230/*
226 * deal with one block in an AFS directory 231 * deal with one block in an AFS directory
227 */ 232 */
@@ -250,7 +255,7 @@ static int afs_dir_iterate_block(unsigned *fpos,
250 /* skip entries marked unused in the bitmap */ 255 /* skip entries marked unused in the bitmap */
251 if (!(block->pagehdr.bitmap[offset / 8] & 256 if (!(block->pagehdr.bitmap[offset / 8] &
252 (1 << (offset % 8)))) { 257 (1 << (offset % 8)))) {
253 _debug("ENT[%Zu.%u]: unused\n", 258 _debug("ENT[%Zu.%u]: unused",
254 blkoff / sizeof(union afs_dir_block), offset); 259 blkoff / sizeof(union afs_dir_block), offset);
255 if (offset >= curr) 260 if (offset >= curr)
256 *fpos = blkoff + 261 *fpos = blkoff +
@@ -264,7 +269,7 @@ static int afs_dir_iterate_block(unsigned *fpos,
264 sizeof(*block) - 269 sizeof(*block) -
265 offset * sizeof(union afs_dirent)); 270 offset * sizeof(union afs_dirent));
266 271
267 _debug("ENT[%Zu.%u]: %s %Zu \"%s\"\n", 272 _debug("ENT[%Zu.%u]: %s %Zu \"%s\"",
268 blkoff / sizeof(union afs_dir_block), offset, 273 blkoff / sizeof(union afs_dir_block), offset,
269 (offset < curr ? "skip" : "fill"), 274 (offset < curr ? "skip" : "fill"),
270 nlen, dire->u.name); 275 nlen, dire->u.name);
@@ -274,7 +279,7 @@ static int afs_dir_iterate_block(unsigned *fpos,
274 if (next >= AFS_DIRENT_PER_BLOCK) { 279 if (next >= AFS_DIRENT_PER_BLOCK) {
275 _debug("ENT[%Zu.%u]:" 280 _debug("ENT[%Zu.%u]:"
276 " %u travelled beyond end dir block" 281 " %u travelled beyond end dir block"
277 " (len %u/%Zu)\n", 282 " (len %u/%Zu)",
278 blkoff / sizeof(union afs_dir_block), 283 blkoff / sizeof(union afs_dir_block),
279 offset, next, tmp, nlen); 284 offset, next, tmp, nlen);
280 return -EIO; 285 return -EIO;
@@ -282,13 +287,13 @@ static int afs_dir_iterate_block(unsigned *fpos,
282 if (!(block->pagehdr.bitmap[next / 8] & 287 if (!(block->pagehdr.bitmap[next / 8] &
283 (1 << (next % 8)))) { 288 (1 << (next % 8)))) {
284 _debug("ENT[%Zu.%u]:" 289 _debug("ENT[%Zu.%u]:"
285 " %u unmarked extension (len %u/%Zu)\n", 290 " %u unmarked extension (len %u/%Zu)",
286 blkoff / sizeof(union afs_dir_block), 291 blkoff / sizeof(union afs_dir_block),
287 offset, next, tmp, nlen); 292 offset, next, tmp, nlen);
288 return -EIO; 293 return -EIO;
289 } 294 }
290 295
291 _debug("ENT[%Zu.%u]: ext %u/%Zu\n", 296 _debug("ENT[%Zu.%u]: ext %u/%Zu",
292 blkoff / sizeof(union afs_dir_block), 297 blkoff / sizeof(union afs_dir_block),
293 next, tmp, nlen); 298 next, tmp, nlen);
294 next++; 299 next++;
@@ -304,7 +309,7 @@ static int afs_dir_iterate_block(unsigned *fpos,
304 nlen, 309 nlen,
305 blkoff + offset * sizeof(union afs_dirent), 310 blkoff + offset * sizeof(union afs_dirent),
306 ntohl(dire->u.vnode), 311 ntohl(dire->u.vnode),
307 filldir == afs_dir_lookup_filldir ? 312 filldir == afs_lookup_filldir ?
308 ntohl(dire->u.unique) : DT_UNKNOWN); 313 ntohl(dire->u.unique) : DT_UNKNOWN);
309 if (ret < 0) { 314 if (ret < 0) {
310 _leave(" = 0 [full]"); 315 _leave(" = 0 [full]");
@@ -316,16 +321,15 @@ static int afs_dir_iterate_block(unsigned *fpos,
316 321
317 _leave(" = 1 [more]"); 322 _leave(" = 1 [more]");
318 return 1; 323 return 1;
319} /* end afs_dir_iterate_block() */ 324}
320 325
321/*****************************************************************************/
322/* 326/*
323 * read an AFS directory 327 * iterate through the data blob that lists the contents of an AFS directory
324 */ 328 */
325static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie, 329static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie,
326 filldir_t filldir) 330 filldir_t filldir, struct key *key)
327{ 331{
328 union afs_dir_block *dblock; 332 union afs_dir_block *dblock;
329 struct afs_dir_page *dbuf; 333 struct afs_dir_page *dbuf;
330 struct page *page; 334 struct page *page;
331 unsigned blkoff, limit; 335 unsigned blkoff, limit;
@@ -333,7 +337,7 @@ static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie,
333 337
334 _enter("{%lu},%u,,", dir->i_ino, *fpos); 338 _enter("{%lu},%u,,", dir->i_ino, *fpos);
335 339
336 if (AFS_FS_I(dir)->flags & AFS_VNODE_DELETED) { 340 if (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dir)->flags)) {
337 _leave(" = -ESTALE"); 341 _leave(" = -ESTALE");
338 return -ESTALE; 342 return -ESTALE;
339 } 343 }
@@ -348,7 +352,7 @@ static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie,
348 blkoff = *fpos & ~(sizeof(union afs_dir_block) - 1); 352 blkoff = *fpos & ~(sizeof(union afs_dir_block) - 1);
349 353
350 /* fetch the appropriate page from the directory */ 354 /* fetch the appropriate page from the directory */
351 page = afs_dir_get_page(dir, blkoff / PAGE_SIZE); 355 page = afs_dir_get_page(dir, blkoff / PAGE_SIZE, key);
352 if (IS_ERR(page)) { 356 if (IS_ERR(page)) {
353 ret = PTR_ERR(page); 357 ret = PTR_ERR(page);
354 break; 358 break;
@@ -377,43 +381,50 @@ static int afs_dir_iterate(struct inode *dir, unsigned *fpos, void *cookie,
377 ret = 0; 381 ret = 0;
378 } 382 }
379 383
380 out: 384out:
381 _leave(" = %d", ret); 385 _leave(" = %d", ret);
382 return ret; 386 return ret;
383} /* end afs_dir_iterate() */ 387}
384 388
385/*****************************************************************************/
386/* 389/*
387 * read an AFS directory 390 * read an AFS directory
388 */ 391 */
389static int afs_dir_readdir(struct file *file, void *cookie, filldir_t filldir) 392static int afs_readdir(struct file *file, void *cookie, filldir_t filldir)
390{ 393{
391 unsigned fpos; 394 unsigned fpos;
392 int ret; 395 int ret;
393 396
394 _enter("{%Ld,{%lu}}", file->f_pos, file->f_path.dentry->d_inode->i_ino); 397 _enter("{%Ld,{%lu}}",
398 file->f_pos, file->f_path.dentry->d_inode->i_ino);
399
400 ASSERT(file->private_data != NULL);
395 401
396 fpos = file->f_pos; 402 fpos = file->f_pos;
397 ret = afs_dir_iterate(file->f_path.dentry->d_inode, &fpos, cookie, filldir); 403 ret = afs_dir_iterate(file->f_path.dentry->d_inode, &fpos,
404 cookie, filldir, file->private_data);
398 file->f_pos = fpos; 405 file->f_pos = fpos;
399 406
400 _leave(" = %d", ret); 407 _leave(" = %d", ret);
401 return ret; 408 return ret;
402} /* end afs_dir_readdir() */ 409}
403 410
404/*****************************************************************************/
405/* 411/*
406 * search the directory for a name 412 * search the directory for a name
407 * - if afs_dir_iterate_block() spots this function, it'll pass the FID 413 * - if afs_dir_iterate_block() spots this function, it'll pass the FID
408 * uniquifier through dtype 414 * uniquifier through dtype
409 */ 415 */
410static int afs_dir_lookup_filldir(void *_cookie, const char *name, int nlen, 416static int afs_lookup_filldir(void *_cookie, const char *name, int nlen,
411 loff_t fpos, u64 ino, unsigned dtype) 417 loff_t fpos, u64 ino, unsigned dtype)
412{ 418{
413 struct afs_dir_lookup_cookie *cookie = _cookie; 419 struct afs_lookup_cookie *cookie = _cookie;
414 420
415 _enter("{%s,%Zu},%s,%u,,%lu,%u", 421 _enter("{%s,%Zu},%s,%u,,%llu,%u",
416 cookie->name, cookie->nlen, name, nlen, ino, dtype); 422 cookie->name, cookie->nlen, name, nlen,
423 (unsigned long long) ino, dtype);
424
425 /* insanity checks first */
426 BUILD_BUG_ON(sizeof(union afs_dir_block) != 2048);
427 BUILD_BUG_ON(sizeof(union afs_dirent) != 32);
417 428
418 if (cookie->nlen != nlen || memcmp(cookie->name, name, nlen) != 0) { 429 if (cookie->nlen != nlen || memcmp(cookie->name, name, nlen) != 0) {
419 _leave(" = 0 [no]"); 430 _leave(" = 0 [no]");
@@ -426,216 +437,254 @@ static int afs_dir_lookup_filldir(void *_cookie, const char *name, int nlen,
426 437
427 _leave(" = -1 [found]"); 438 _leave(" = -1 [found]");
428 return -1; 439 return -1;
429} /* end afs_dir_lookup_filldir() */ 440}
430 441
431/*****************************************************************************/
432/* 442/*
433 * look up an entry in a directory 443 * do a lookup in a directory
444 * - just returns the FID the dentry name maps to if found
434 */ 445 */
435static struct dentry *afs_dir_lookup(struct inode *dir, struct dentry *dentry, 446static int afs_do_lookup(struct inode *dir, struct dentry *dentry,
436 struct nameidata *nd) 447 struct afs_fid *fid, struct key *key)
437{ 448{
438 struct afs_dir_lookup_cookie cookie; 449 struct afs_lookup_cookie cookie;
439 struct afs_super_info *as; 450 struct afs_super_info *as;
451 unsigned fpos;
452 int ret;
453
454 _enter("{%lu},%p{%s},", dir->i_ino, dentry, dentry->d_name.name);
455
456 as = dir->i_sb->s_fs_info;
457
458 /* search the directory */
459 cookie.name = dentry->d_name.name;
460 cookie.nlen = dentry->d_name.len;
461 cookie.fid.vid = as->volume->vid;
462 cookie.found = 0;
463
464 fpos = 0;
465 ret = afs_dir_iterate(dir, &fpos, &cookie, afs_lookup_filldir,
466 key);
467 if (ret < 0) {
468 _leave(" = %d [iter]", ret);
469 return ret;
470 }
471
472 ret = -ENOENT;
473 if (!cookie.found) {
474 _leave(" = -ENOENT [not found]");
475 return -ENOENT;
476 }
477
478 *fid = cookie.fid;
479 _leave(" = 0 { vn=%u u=%u }", fid->vnode, fid->unique);
480 return 0;
481}
482
483/*
484 * look up an entry in a directory
485 */
486static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
487 struct nameidata *nd)
488{
440 struct afs_vnode *vnode; 489 struct afs_vnode *vnode;
490 struct afs_fid fid;
441 struct inode *inode; 491 struct inode *inode;
442 unsigned fpos; 492 struct key *key;
443 int ret; 493 int ret;
444 494
445 _enter("{%lu},%p{%s}", dir->i_ino, dentry, dentry->d_name.name); 495 vnode = AFS_FS_I(dir);
446 496
447 /* insanity checks first */ 497 _enter("{%x:%d},%p{%s},",
448 BUILD_BUG_ON(sizeof(union afs_dir_block) != 2048); 498 vnode->fid.vid, vnode->fid.vnode, dentry, dentry->d_name.name);
449 BUILD_BUG_ON(sizeof(union afs_dirent) != 32); 499
500 ASSERTCMP(dentry->d_inode, ==, NULL);
450 501
451 if (dentry->d_name.len > 255) { 502 if (dentry->d_name.len > 255) {
452 _leave(" = -ENAMETOOLONG"); 503 _leave(" = -ENAMETOOLONG");
453 return ERR_PTR(-ENAMETOOLONG); 504 return ERR_PTR(-ENAMETOOLONG);
454 } 505 }
455 506
456 vnode = AFS_FS_I(dir); 507 if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
457 if (vnode->flags & AFS_VNODE_DELETED) {
458 _leave(" = -ESTALE"); 508 _leave(" = -ESTALE");
459 return ERR_PTR(-ESTALE); 509 return ERR_PTR(-ESTALE);
460 } 510 }
461 511
462 as = dir->i_sb->s_fs_info; 512 key = afs_request_key(vnode->volume->cell);
463 513 if (IS_ERR(key)) {
464 /* search the directory */ 514 _leave(" = %ld [key]", PTR_ERR(key));
465 cookie.name = dentry->d_name.name; 515 return ERR_PTR(PTR_ERR(key));
466 cookie.nlen = dentry->d_name.len; 516 }
467 cookie.fid.vid = as->volume->vid;
468 cookie.found = 0;
469 517
470 fpos = 0; 518 ret = afs_validate(vnode, key);
471 ret = afs_dir_iterate(dir, &fpos, &cookie, afs_dir_lookup_filldir);
472 if (ret < 0) { 519 if (ret < 0) {
473 _leave(" = %d", ret); 520 key_put(key);
521 _leave(" = %d [val]", ret);
474 return ERR_PTR(ret); 522 return ERR_PTR(ret);
475 } 523 }
476 524
477 ret = -ENOENT; 525 ret = afs_do_lookup(dir, dentry, &fid, key);
478 if (!cookie.found) { 526 if (ret < 0) {
479 _leave(" = %d", ret); 527 key_put(key);
528 if (ret == -ENOENT) {
529 d_add(dentry, NULL);
530 _leave(" = NULL [negative]");
531 return NULL;
532 }
533 _leave(" = %d [do]", ret);
480 return ERR_PTR(ret); 534 return ERR_PTR(ret);
481 } 535 }
536 dentry->d_fsdata = (void *)(unsigned long) vnode->status.data_version;
482 537
483 /* instantiate the dentry */ 538 /* instantiate the dentry */
484 ret = afs_iget(dir->i_sb, &cookie.fid, &inode); 539 inode = afs_iget(dir->i_sb, key, &fid, NULL, NULL);
485 if (ret < 0) { 540 key_put(key);
486 _leave(" = %d", ret); 541 if (IS_ERR(inode)) {
487 return ERR_PTR(ret); 542 _leave(" = %ld", PTR_ERR(inode));
543 return ERR_PTR(PTR_ERR(inode));
488 } 544 }
489 545
490 dentry->d_op = &afs_fs_dentry_operations; 546 dentry->d_op = &afs_fs_dentry_operations;
491 dentry->d_fsdata = (void *) (unsigned long) vnode->status.version;
492 547
493 d_add(dentry, inode); 548 d_add(dentry, inode);
494 _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%lu }", 549 _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%lu }",
495 cookie.fid.vnode, 550 fid.vnode,
496 cookie.fid.unique, 551 fid.unique,
497 dentry->d_inode->i_ino, 552 dentry->d_inode->i_ino,
498 dentry->d_inode->i_version); 553 dentry->d_inode->i_version);
499 554
500 return NULL; 555 return NULL;
501} /* end afs_dir_lookup() */ 556}
502 557
503/*****************************************************************************/
504/* 558/*
505 * check that a dentry lookup hit has found a valid entry 559 * check that a dentry lookup hit has found a valid entry
506 * - NOTE! the hit can be a negative hit too, so we can't assume we have an 560 * - NOTE! the hit can be a negative hit too, so we can't assume we have an
507 * inode 561 * inode
508 * (derived from nfs_lookup_revalidate)
509 */ 562 */
510static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd) 563static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
511{ 564{
512 struct afs_dir_lookup_cookie cookie; 565 struct afs_vnode *vnode, *dir;
566 struct afs_fid fid;
513 struct dentry *parent; 567 struct dentry *parent;
514 struct inode *inode, *dir; 568 struct key *key;
515 unsigned fpos; 569 void *dir_version;
516 int ret; 570 int ret;
517 571
518 _enter("{sb=%p n=%s},", dentry->d_sb, dentry->d_name.name); 572 vnode = AFS_FS_I(dentry->d_inode);
519 573
520 /* lock down the parent dentry so we can peer at it */ 574 if (dentry->d_inode)
521 parent = dget_parent(dentry->d_parent); 575 _enter("{v={%x:%u} n=%s fl=%lx},",
576 vnode->fid.vid, vnode->fid.vnode, dentry->d_name.name,
577 vnode->flags);
578 else
579 _enter("{neg n=%s}", dentry->d_name.name);
522 580
523 dir = parent->d_inode; 581 key = afs_request_key(AFS_FS_S(dentry->d_sb)->volume->cell);
524 inode = dentry->d_inode; 582 if (IS_ERR(key))
583 key = NULL;
525 584
526 /* handle a negative dentry */ 585 /* lock down the parent dentry so we can peer at it */
527 if (!inode) 586 parent = dget_parent(dentry);
587 if (!parent->d_inode)
528 goto out_bad; 588 goto out_bad;
529 589
530 /* handle a bad inode */ 590 dir = AFS_FS_I(parent->d_inode);
531 if (is_bad_inode(inode)) {
532 printk("kAFS: afs_d_revalidate: %s/%s has bad inode\n",
533 dentry->d_parent->d_name.name, dentry->d_name.name);
534 goto out_bad;
535 }
536 591
537 /* force a full look up if the parent directory changed since last the 592 /* validate the parent directory */
538 * server was consulted 593 if (test_bit(AFS_VNODE_MODIFIED, &dir->flags))
539 * - otherwise this inode must still exist, even if the inode details 594 afs_validate(dir, key);
540 * themselves have changed
541 */
542 if (AFS_FS_I(dir)->flags & AFS_VNODE_CHANGED)
543 afs_vnode_fetch_status(AFS_FS_I(dir));
544 595
545 if (AFS_FS_I(dir)->flags & AFS_VNODE_DELETED) { 596 if (test_bit(AFS_VNODE_DELETED, &dir->flags)) {
546 _debug("%s: parent dir deleted", dentry->d_name.name); 597 _debug("%s: parent dir deleted", dentry->d_name.name);
547 goto out_bad; 598 goto out_bad;
548 } 599 }
549 600
550 if (AFS_FS_I(inode)->flags & AFS_VNODE_DELETED) { 601 dir_version = (void *) (unsigned long) dir->status.data_version;
551 _debug("%s: file already deleted", dentry->d_name.name); 602 if (dentry->d_fsdata == dir_version)
552 goto out_bad; 603 goto out_valid; /* the dir contents are unchanged */
553 }
554
555 if ((unsigned long) dentry->d_fsdata !=
556 (unsigned long) AFS_FS_I(dir)->status.version) {
557 _debug("%s: parent changed %lu -> %u",
558 dentry->d_name.name,
559 (unsigned long) dentry->d_fsdata,
560 (unsigned) AFS_FS_I(dir)->status.version);
561 604
562 /* search the directory for this vnode */ 605 _debug("dir modified");
563 cookie.name = dentry->d_name.name;
564 cookie.nlen = dentry->d_name.len;
565 cookie.fid.vid = AFS_FS_I(inode)->volume->vid;
566 cookie.found = 0;
567 606
568 fpos = 0; 607 /* search the directory for this vnode */
569 ret = afs_dir_iterate(dir, &fpos, &cookie, 608 ret = afs_do_lookup(&dir->vfs_inode, dentry, &fid, key);
570 afs_dir_lookup_filldir); 609 switch (ret) {
571 if (ret < 0) { 610 case 0:
572 _debug("failed to iterate dir %s: %d", 611 /* the filename maps to something */
573 parent->d_name.name, ret); 612 if (!dentry->d_inode)
613 goto out_bad;
614 if (is_bad_inode(dentry->d_inode)) {
615 printk("kAFS: afs_d_revalidate: %s/%s has bad inode\n",
616 parent->d_name.name, dentry->d_name.name);
574 goto out_bad; 617 goto out_bad;
575 }
576
577 if (!cookie.found) {
578 _debug("%s: dirent not found", dentry->d_name.name);
579 goto not_found;
580 } 618 }
581 619
582 /* if the vnode ID has changed, then the dirent points to a 620 /* if the vnode ID has changed, then the dirent points to a
583 * different file */ 621 * different file */
584 if (cookie.fid.vnode != AFS_FS_I(inode)->fid.vnode) { 622 if (fid.vnode != vnode->fid.vnode) {
585 _debug("%s: dirent changed", dentry->d_name.name); 623 _debug("%s: dirent changed [%u != %u]",
624 dentry->d_name.name, fid.vnode,
625 vnode->fid.vnode);
586 goto not_found; 626 goto not_found;
587 } 627 }
588 628
589 /* if the vnode ID uniqifier has changed, then the file has 629 /* if the vnode ID uniqifier has changed, then the file has
590 * been deleted */ 630 * been deleted and replaced, and the original vnode ID has
591 if (cookie.fid.unique != AFS_FS_I(inode)->fid.unique) { 631 * been reused */
632 if (fid.unique != vnode->fid.unique) {
592 _debug("%s: file deleted (uq %u -> %u I:%lu)", 633 _debug("%s: file deleted (uq %u -> %u I:%lu)",
593 dentry->d_name.name, 634 dentry->d_name.name, fid.unique,
594 cookie.fid.unique, 635 vnode->fid.unique, dentry->d_inode->i_version);
595 AFS_FS_I(inode)->fid.unique, 636 spin_lock(&vnode->lock);
596 inode->i_version); 637 set_bit(AFS_VNODE_DELETED, &vnode->flags);
597 spin_lock(&AFS_FS_I(inode)->lock); 638 spin_unlock(&vnode->lock);
598 AFS_FS_I(inode)->flags |= AFS_VNODE_DELETED; 639 goto not_found;
599 spin_unlock(&AFS_FS_I(inode)->lock);
600 invalidate_remote_inode(inode);
601 goto out_bad;
602 } 640 }
641 goto out_valid;
642
643 case -ENOENT:
644 /* the filename is unknown */
645 _debug("%s: dirent not found", dentry->d_name.name);
646 if (dentry->d_inode)
647 goto not_found;
648 goto out_valid;
603 649
604 dentry->d_fsdata = 650 default:
605 (void *) (unsigned long) AFS_FS_I(dir)->status.version; 651 _debug("failed to iterate dir %s: %d",
652 parent->d_name.name, ret);
653 goto out_bad;
606 } 654 }
607 655
608 out_valid: 656out_valid:
657 dentry->d_fsdata = dir_version;
658out_skip:
609 dput(parent); 659 dput(parent);
660 key_put(key);
610 _leave(" = 1 [valid]"); 661 _leave(" = 1 [valid]");
611 return 1; 662 return 1;
612 663
613 /* the dirent, if it exists, now points to a different vnode */ 664 /* the dirent, if it exists, now points to a different vnode */
614 not_found: 665not_found:
615 spin_lock(&dentry->d_lock); 666 spin_lock(&dentry->d_lock);
616 dentry->d_flags |= DCACHE_NFSFS_RENAMED; 667 dentry->d_flags |= DCACHE_NFSFS_RENAMED;
617 spin_unlock(&dentry->d_lock); 668 spin_unlock(&dentry->d_lock);
618 669
619 out_bad: 670out_bad:
620 if (inode) { 671 if (dentry->d_inode) {
621 /* don't unhash if we have submounts */ 672 /* don't unhash if we have submounts */
622 if (have_submounts(dentry)) 673 if (have_submounts(dentry))
623 goto out_valid; 674 goto out_skip;
624 } 675 }
625 676
626 shrink_dcache_parent(dentry);
627
628 _debug("dropping dentry %s/%s", 677 _debug("dropping dentry %s/%s",
629 dentry->d_parent->d_name.name, dentry->d_name.name); 678 parent->d_name.name, dentry->d_name.name);
679 shrink_dcache_parent(dentry);
630 d_drop(dentry); 680 d_drop(dentry);
631
632 dput(parent); 681 dput(parent);
682 key_put(key);
633 683
634 _leave(" = 0 [bad]"); 684 _leave(" = 0 [bad]");
635 return 0; 685 return 0;
636} /* end afs_d_revalidate() */ 686}
637 687
638/*****************************************************************************/
639/* 688/*
640 * allow the VFS to enquire as to whether a dentry should be unhashed (mustn't 689 * allow the VFS to enquire as to whether a dentry should be unhashed (mustn't
641 * sleep) 690 * sleep)
@@ -649,15 +698,444 @@ static int afs_d_delete(struct dentry *dentry)
649 if (dentry->d_flags & DCACHE_NFSFS_RENAMED) 698 if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
650 goto zap; 699 goto zap;
651 700
652 if (dentry->d_inode) { 701 if (dentry->d_inode &&
653 if (AFS_FS_I(dentry->d_inode)->flags & AFS_VNODE_DELETED) 702 test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dentry->d_inode)->flags))
654 goto zap; 703 goto zap;
655 }
656 704
657 _leave(" = 0 [keep]"); 705 _leave(" = 0 [keep]");
658 return 0; 706 return 0;
659 707
660 zap: 708zap:
661 _leave(" = 1 [zap]"); 709 _leave(" = 1 [zap]");
662 return 1; 710 return 1;
663} /* end afs_d_delete() */ 711}
712
713/*
714 * handle dentry release
715 */
716static void afs_d_release(struct dentry *dentry)
717{
718 _enter("%s", dentry->d_name.name);
719}
720
721/*
722 * create a directory on an AFS filesystem
723 */
724static int afs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
725{
726 struct afs_file_status status;
727 struct afs_callback cb;
728 struct afs_server *server;
729 struct afs_vnode *dvnode, *vnode;
730 struct afs_fid fid;
731 struct inode *inode;
732 struct key *key;
733 int ret;
734
735 dvnode = AFS_FS_I(dir);
736
737 _enter("{%x:%d},{%s},%o",
738 dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name, mode);
739
740 ret = -ENAMETOOLONG;
741 if (dentry->d_name.len > 255)
742 goto error;
743
744 key = afs_request_key(dvnode->volume->cell);
745 if (IS_ERR(key)) {
746 ret = PTR_ERR(key);
747 goto error;
748 }
749
750 mode |= S_IFDIR;
751 ret = afs_vnode_create(dvnode, key, dentry->d_name.name,
752 mode, &fid, &status, &cb, &server);
753 if (ret < 0)
754 goto mkdir_error;
755
756 inode = afs_iget(dir->i_sb, key, &fid, &status, &cb);
757 if (IS_ERR(inode)) {
758 /* ENOMEM at a really inconvenient time - just abandon the new
759 * directory on the server */
760 ret = PTR_ERR(inode);
761 goto iget_error;
762 }
763
764 /* apply the status report we've got for the new vnode */
765 vnode = AFS_FS_I(inode);
766 spin_lock(&vnode->lock);
767 vnode->update_cnt++;
768 spin_unlock(&vnode->lock);
769 afs_vnode_finalise_status_update(vnode, server);
770 afs_put_server(server);
771
772 d_instantiate(dentry, inode);
773 if (d_unhashed(dentry)) {
774 _debug("not hashed");
775 d_rehash(dentry);
776 }
777 key_put(key);
778 _leave(" = 0");
779 return 0;
780
781iget_error:
782 afs_put_server(server);
783mkdir_error:
784 key_put(key);
785error:
786 d_drop(dentry);
787 _leave(" = %d", ret);
788 return ret;
789}
790
791/*
792 * remove a directory from an AFS filesystem
793 */
794static int afs_rmdir(struct inode *dir, struct dentry *dentry)
795{
796 struct afs_vnode *dvnode, *vnode;
797 struct key *key;
798 int ret;
799
800 dvnode = AFS_FS_I(dir);
801
802 _enter("{%x:%d},{%s}",
803 dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name);
804
805 ret = -ENAMETOOLONG;
806 if (dentry->d_name.len > 255)
807 goto error;
808
809 key = afs_request_key(dvnode->volume->cell);
810 if (IS_ERR(key)) {
811 ret = PTR_ERR(key);
812 goto error;
813 }
814
815 ret = afs_vnode_remove(dvnode, key, dentry->d_name.name, true);
816 if (ret < 0)
817 goto rmdir_error;
818
819 if (dentry->d_inode) {
820 vnode = AFS_FS_I(dentry->d_inode);
821 clear_nlink(&vnode->vfs_inode);
822 set_bit(AFS_VNODE_DELETED, &vnode->flags);
823 afs_discard_callback_on_delete(vnode);
824 }
825
826 key_put(key);
827 _leave(" = 0");
828 return 0;
829
830rmdir_error:
831 key_put(key);
832error:
833 _leave(" = %d", ret);
834 return ret;
835}
836
837/*
838 * remove a file from an AFS filesystem
839 */
840static int afs_unlink(struct inode *dir, struct dentry *dentry)
841{
842 struct afs_vnode *dvnode, *vnode;
843 struct key *key;
844 int ret;
845
846 dvnode = AFS_FS_I(dir);
847
848 _enter("{%x:%d},{%s}",
849 dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name);
850
851 ret = -ENAMETOOLONG;
852 if (dentry->d_name.len > 255)
853 goto error;
854
855 key = afs_request_key(dvnode->volume->cell);
856 if (IS_ERR(key)) {
857 ret = PTR_ERR(key);
858 goto error;
859 }
860
861 if (dentry->d_inode) {
862 vnode = AFS_FS_I(dentry->d_inode);
863
864 /* make sure we have a callback promise on the victim */
865 ret = afs_validate(vnode, key);
866 if (ret < 0)
867 goto error;
868 }
869
870 ret = afs_vnode_remove(dvnode, key, dentry->d_name.name, false);
871 if (ret < 0)
872 goto remove_error;
873
874 if (dentry->d_inode) {
875 /* if the file wasn't deleted due to excess hard links, the
876 * fileserver will break the callback promise on the file - if
877 * it had one - before it returns to us, and if it was deleted,
878 * it won't
879 *
880 * however, if we didn't have a callback promise outstanding,
881 * or it was outstanding on a different server, then it won't
882 * break it either...
883 */
884 vnode = AFS_FS_I(dentry->d_inode);
885 if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
886 _debug("AFS_VNODE_DELETED");
887 if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags))
888 _debug("AFS_VNODE_CB_BROKEN");
889 set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
890 ret = afs_validate(vnode, key);
891 _debug("nlink %d [val %d]", vnode->vfs_inode.i_nlink, ret);
892 }
893
894 key_put(key);
895 _leave(" = 0");
896 return 0;
897
898remove_error:
899 key_put(key);
900error:
901 _leave(" = %d", ret);
902 return ret;
903}
904
905/*
906 * create a regular file on an AFS filesystem
907 */
908static int afs_create(struct inode *dir, struct dentry *dentry, int mode,
909 struct nameidata *nd)
910{
911 struct afs_file_status status;
912 struct afs_callback cb;
913 struct afs_server *server;
914 struct afs_vnode *dvnode, *vnode;
915 struct afs_fid fid;
916 struct inode *inode;
917 struct key *key;
918 int ret;
919
920 dvnode = AFS_FS_I(dir);
921
922 _enter("{%x:%d},{%s},%o,",
923 dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name, mode);
924
925 ret = -ENAMETOOLONG;
926 if (dentry->d_name.len > 255)
927 goto error;
928
929 key = afs_request_key(dvnode->volume->cell);
930 if (IS_ERR(key)) {
931 ret = PTR_ERR(key);
932 goto error;
933 }
934
935 mode |= S_IFREG;
936 ret = afs_vnode_create(dvnode, key, dentry->d_name.name,
937 mode, &fid, &status, &cb, &server);
938 if (ret < 0)
939 goto create_error;
940
941 inode = afs_iget(dir->i_sb, key, &fid, &status, &cb);
942 if (IS_ERR(inode)) {
943 /* ENOMEM at a really inconvenient time - just abandon the new
944 * directory on the server */
945 ret = PTR_ERR(inode);
946 goto iget_error;
947 }
948
949 /* apply the status report we've got for the new vnode */
950 vnode = AFS_FS_I(inode);
951 spin_lock(&vnode->lock);
952 vnode->update_cnt++;
953 spin_unlock(&vnode->lock);
954 afs_vnode_finalise_status_update(vnode, server);
955 afs_put_server(server);
956
957 d_instantiate(dentry, inode);
958 if (d_unhashed(dentry)) {
959 _debug("not hashed");
960 d_rehash(dentry);
961 }
962 key_put(key);
963 _leave(" = 0");
964 return 0;
965
966iget_error:
967 afs_put_server(server);
968create_error:
969 key_put(key);
970error:
971 d_drop(dentry);
972 _leave(" = %d", ret);
973 return ret;
974}
975
976/*
977 * create a hard link between files in an AFS filesystem
978 */
979static int afs_link(struct dentry *from, struct inode *dir,
980 struct dentry *dentry)
981{
982 struct afs_vnode *dvnode, *vnode;
983 struct key *key;
984 int ret;
985
986 vnode = AFS_FS_I(from->d_inode);
987 dvnode = AFS_FS_I(dir);
988
989 _enter("{%x:%d},{%x:%d},{%s}",
990 vnode->fid.vid, vnode->fid.vnode,
991 dvnode->fid.vid, dvnode->fid.vnode,
992 dentry->d_name.name);
993
994 ret = -ENAMETOOLONG;
995 if (dentry->d_name.len > 255)
996 goto error;
997
998 key = afs_request_key(dvnode->volume->cell);
999 if (IS_ERR(key)) {
1000 ret = PTR_ERR(key);
1001 goto error;
1002 }
1003
1004 ret = afs_vnode_link(dvnode, vnode, key, dentry->d_name.name);
1005 if (ret < 0)
1006 goto link_error;
1007
1008 atomic_inc(&vnode->vfs_inode.i_count);
1009 d_instantiate(dentry, &vnode->vfs_inode);
1010 key_put(key);
1011 _leave(" = 0");
1012 return 0;
1013
1014link_error:
1015 key_put(key);
1016error:
1017 d_drop(dentry);
1018 _leave(" = %d", ret);
1019 return ret;
1020}
1021
1022/*
1023 * create a symlink in an AFS filesystem
1024 */
1025static int afs_symlink(struct inode *dir, struct dentry *dentry,
1026 const char *content)
1027{
1028 struct afs_file_status status;
1029 struct afs_server *server;
1030 struct afs_vnode *dvnode, *vnode;
1031 struct afs_fid fid;
1032 struct inode *inode;
1033 struct key *key;
1034 int ret;
1035
1036 dvnode = AFS_FS_I(dir);
1037
1038 _enter("{%x:%d},{%s},%s",
1039 dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name,
1040 content);
1041
1042 ret = -ENAMETOOLONG;
1043 if (dentry->d_name.len > 255)
1044 goto error;
1045
1046 ret = -EINVAL;
1047 if (strlen(content) > 1023)
1048 goto error;
1049
1050 key = afs_request_key(dvnode->volume->cell);
1051 if (IS_ERR(key)) {
1052 ret = PTR_ERR(key);
1053 goto error;
1054 }
1055
1056 ret = afs_vnode_symlink(dvnode, key, dentry->d_name.name, content,
1057 &fid, &status, &server);
1058 if (ret < 0)
1059 goto create_error;
1060
1061 inode = afs_iget(dir->i_sb, key, &fid, &status, NULL);
1062 if (IS_ERR(inode)) {
1063 /* ENOMEM at a really inconvenient time - just abandon the new
1064 * directory on the server */
1065 ret = PTR_ERR(inode);
1066 goto iget_error;
1067 }
1068
1069 /* apply the status report we've got for the new vnode */
1070 vnode = AFS_FS_I(inode);
1071 spin_lock(&vnode->lock);
1072 vnode->update_cnt++;
1073 spin_unlock(&vnode->lock);
1074 afs_vnode_finalise_status_update(vnode, server);
1075 afs_put_server(server);
1076
1077 d_instantiate(dentry, inode);
1078 if (d_unhashed(dentry)) {
1079 _debug("not hashed");
1080 d_rehash(dentry);
1081 }
1082 key_put(key);
1083 _leave(" = 0");
1084 return 0;
1085
1086iget_error:
1087 afs_put_server(server);
1088create_error:
1089 key_put(key);
1090error:
1091 d_drop(dentry);
1092 _leave(" = %d", ret);
1093 return ret;
1094}
1095
1096/*
1097 * rename a file in an AFS filesystem and/or move it between directories
1098 */
1099static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
1100 struct inode *new_dir, struct dentry *new_dentry)
1101{
1102 struct afs_vnode *orig_dvnode, *new_dvnode, *vnode;
1103 struct key *key;
1104 int ret;
1105
1106 vnode = AFS_FS_I(old_dentry->d_inode);
1107 orig_dvnode = AFS_FS_I(old_dir);
1108 new_dvnode = AFS_FS_I(new_dir);
1109
1110 _enter("{%x:%d},{%x:%d},{%x:%d},{%s}",
1111 orig_dvnode->fid.vid, orig_dvnode->fid.vnode,
1112 vnode->fid.vid, vnode->fid.vnode,
1113 new_dvnode->fid.vid, new_dvnode->fid.vnode,
1114 new_dentry->d_name.name);
1115
1116 ret = -ENAMETOOLONG;
1117 if (new_dentry->d_name.len > 255)
1118 goto error;
1119
1120 key = afs_request_key(orig_dvnode->volume->cell);
1121 if (IS_ERR(key)) {
1122 ret = PTR_ERR(key);
1123 goto error;
1124 }
1125
1126 ret = afs_vnode_rename(orig_dvnode, new_dvnode, key,
1127 old_dentry->d_name.name,
1128 new_dentry->d_name.name);
1129 if (ret < 0)
1130 goto rename_error;
1131 key_put(key);
1132 _leave(" = 0");
1133 return 0;
1134
1135rename_error:
1136 key_put(key);
1137error:
1138 d_drop(new_dentry);
1139 _leave(" = %d", ret);
1140 return ret;
1141}
diff --git a/fs/afs/errors.h b/fs/afs/errors.h
deleted file mode 100644
index 574d94ac8d05..000000000000
--- a/fs/afs/errors.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/* errors.h: AFS abort/error codes
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef _LINUX_AFS_ERRORS_H
13#define _LINUX_AFS_ERRORS_H
14
15#include "types.h"
16
17/* file server abort codes */
18typedef enum {
19 VSALVAGE = 101, /* volume needs salvaging */
20 VNOVNODE = 102, /* no such file/dir (vnode) */
21 VNOVOL = 103, /* no such volume or volume unavailable */
22 VVOLEXISTS = 104, /* volume name already exists */
23 VNOSERVICE = 105, /* volume not currently in service */
24 VOFFLINE = 106, /* volume is currently offline (more info available [VVL-spec]) */
25 VONLINE = 107, /* volume is already online */
26 VDISKFULL = 108, /* disk partition is full */
27 VOVERQUOTA = 109, /* volume's maximum quota exceeded */
28 VBUSY = 110, /* volume is temporarily unavailable */
29 VMOVED = 111, /* volume moved to new server - ask this FS where */
30} afs_rxfs_abort_t;
31
32extern int afs_abort_to_error(int abortcode);
33
34#endif /* _LINUX_AFS_ERRORS_H */
diff --git a/fs/afs/file.c b/fs/afs/file.c
index b17634541f67..ae256498f4f7 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -1,6 +1,6 @@
1/* file.c: AFS filesystem file handling 1/* AFS filesystem file handling
2 * 2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
@@ -15,22 +15,25 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/fs.h> 16#include <linux/fs.h>
17#include <linux/pagemap.h> 17#include <linux/pagemap.h>
18#include "volume.h"
19#include "vnode.h"
20#include <rxrpc/call.h>
21#include "internal.h" 18#include "internal.h"
22 19
23#if 0
24static int afs_file_open(struct inode *inode, struct file *file);
25static int afs_file_release(struct inode *inode, struct file *file);
26#endif
27
28static int afs_file_readpage(struct file *file, struct page *page); 20static int afs_file_readpage(struct file *file, struct page *page);
29static void afs_file_invalidatepage(struct page *page, unsigned long offset); 21static void afs_file_invalidatepage(struct page *page, unsigned long offset);
30static int afs_file_releasepage(struct page *page, gfp_t gfp_flags); 22static int afs_file_releasepage(struct page *page, gfp_t gfp_flags);
31 23
24const struct file_operations afs_file_operations = {
25 .open = afs_open,
26 .release = afs_release,
27 .llseek = generic_file_llseek,
28 .read = do_sync_read,
29 .aio_read = generic_file_aio_read,
30 .mmap = generic_file_readonly_mmap,
31 .sendfile = generic_file_sendfile,
32};
33
32const struct inode_operations afs_file_inode_operations = { 34const struct inode_operations afs_file_inode_operations = {
33 .getattr = afs_inode_getattr, 35 .getattr = afs_inode_getattr,
36 .permission = afs_permission,
34}; 37};
35 38
36const struct address_space_operations afs_fs_aops = { 39const struct address_space_operations afs_fs_aops = {
@@ -40,7 +43,48 @@ const struct address_space_operations afs_fs_aops = {
40 .invalidatepage = afs_file_invalidatepage, 43 .invalidatepage = afs_file_invalidatepage,
41}; 44};
42 45
43/*****************************************************************************/ 46/*
47 * open an AFS file or directory and attach a key to it
48 */
49int afs_open(struct inode *inode, struct file *file)
50{
51 struct afs_vnode *vnode = AFS_FS_I(inode);
52 struct key *key;
53 int ret;
54
55 _enter("{%x:%x},", vnode->fid.vid, vnode->fid.vnode);
56
57 key = afs_request_key(vnode->volume->cell);
58 if (IS_ERR(key)) {
59 _leave(" = %ld [key]", PTR_ERR(key));
60 return PTR_ERR(key);
61 }
62
63 ret = afs_validate(vnode, key);
64 if (ret < 0) {
65 _leave(" = %d [val]", ret);
66 return ret;
67 }
68
69 file->private_data = key;
70 _leave(" = 0");
71 return 0;
72}
73
74/*
75 * release an AFS file or directory and discard its key
76 */
77int afs_release(struct inode *inode, struct file *file)
78{
79 struct afs_vnode *vnode = AFS_FS_I(inode);
80
81 _enter("{%x:%x},", vnode->fid.vid, vnode->fid.vnode);
82
83 key_put(file->private_data);
84 _leave(" = 0");
85 return 0;
86}
87
44/* 88/*
45 * deal with notification that a page was read from the cache 89 * deal with notification that a page was read from the cache
46 */ 90 */
@@ -58,10 +102,9 @@ static void afs_file_readpage_read_complete(void *cookie_data,
58 SetPageUptodate(page); 102 SetPageUptodate(page);
59 unlock_page(page); 103 unlock_page(page);
60 104
61} /* end afs_file_readpage_read_complete() */ 105}
62#endif 106#endif
63 107
64/*****************************************************************************/
65/* 108/*
66 * deal with notification that a page was written to the cache 109 * deal with notification that a page was written to the cache
67 */ 110 */
@@ -74,41 +117,38 @@ static void afs_file_readpage_write_complete(void *cookie_data,
74 _enter("%p,%p,%p,%d", cookie_data, page, data, error); 117 _enter("%p,%p,%p,%d", cookie_data, page, data, error);
75 118
76 unlock_page(page); 119 unlock_page(page);
77 120}
78} /* end afs_file_readpage_write_complete() */
79#endif 121#endif
80 122
81/*****************************************************************************/
82/* 123/*
83 * AFS read page from file (or symlink) 124 * AFS read page from file (or symlink)
84 */ 125 */
85static int afs_file_readpage(struct file *file, struct page *page) 126static int afs_file_readpage(struct file *file, struct page *page)
86{ 127{
87 struct afs_rxfs_fetch_descriptor desc;
88#ifdef AFS_CACHING_SUPPORT
89 struct cachefs_page *pageio;
90#endif
91 struct afs_vnode *vnode; 128 struct afs_vnode *vnode;
92 struct inode *inode; 129 struct inode *inode;
130 struct key *key;
131 size_t len;
132 off_t offset;
93 int ret; 133 int ret;
94 134
95 inode = page->mapping->host; 135 inode = page->mapping->host;
96 136
97 _enter("{%lu},{%lu}", inode->i_ino, page->index); 137 ASSERT(file != NULL);
138 key = file->private_data;
139 ASSERT(key != NULL);
140
141 _enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index);
98 142
99 vnode = AFS_FS_I(inode); 143 vnode = AFS_FS_I(inode);
100 144
101 BUG_ON(!PageLocked(page)); 145 BUG_ON(!PageLocked(page));
102 146
103 ret = -ESTALE; 147 ret = -ESTALE;
104 if (vnode->flags & AFS_VNODE_DELETED) 148 if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
105 goto error; 149 goto error;
106 150
107#ifdef AFS_CACHING_SUPPORT 151#ifdef AFS_CACHING_SUPPORT
108 ret = cachefs_page_get_private(page, &pageio, GFP_NOIO);
109 if (ret < 0)
110 goto error;
111
112 /* is it cached? */ 152 /* is it cached? */
113 ret = cachefs_read_or_alloc_page(vnode->cache, 153 ret = cachefs_read_or_alloc_page(vnode->cache,
114 page, 154 page,
@@ -132,26 +172,19 @@ static int afs_file_readpage(struct file *file, struct page *page)
132 case -ENOBUFS: 172 case -ENOBUFS:
133 case -ENODATA: 173 case -ENODATA:
134 default: 174 default:
135 desc.fid = vnode->fid; 175 offset = page->index << PAGE_CACHE_SHIFT;
136 desc.offset = page->index << PAGE_CACHE_SHIFT; 176 len = min_t(size_t, i_size_read(inode) - offset, PAGE_SIZE);
137 desc.size = min((size_t) (inode->i_size - desc.offset),
138 (size_t) PAGE_SIZE);
139 desc.buffer = kmap(page);
140
141 clear_page(desc.buffer);
142 177
143 /* read the contents of the file from the server into the 178 /* read the contents of the file from the server into the
144 * page */ 179 * page */
145 ret = afs_vnode_fetch_data(vnode, &desc); 180 ret = afs_vnode_fetch_data(vnode, key, offset, len, page);
146 kunmap(page);
147 if (ret < 0) { 181 if (ret < 0) {
148 if (ret==-ENOENT) { 182 if (ret == -ENOENT) {
149 _debug("got NOENT from server" 183 _debug("got NOENT from server"
150 " - marking file deleted and stale"); 184 " - marking file deleted and stale");
151 vnode->flags |= AFS_VNODE_DELETED; 185 set_bit(AFS_VNODE_DELETED, &vnode->flags);
152 ret = -ESTALE; 186 ret = -ESTALE;
153 } 187 }
154
155#ifdef AFS_CACHING_SUPPORT 188#ifdef AFS_CACHING_SUPPORT
156 cachefs_uncache_page(vnode->cache, page); 189 cachefs_uncache_page(vnode->cache, page);
157#endif 190#endif
@@ -178,16 +211,13 @@ static int afs_file_readpage(struct file *file, struct page *page)
178 _leave(" = 0"); 211 _leave(" = 0");
179 return 0; 212 return 0;
180 213
181 error: 214error:
182 SetPageError(page); 215 SetPageError(page);
183 unlock_page(page); 216 unlock_page(page);
184
185 _leave(" = %d", ret); 217 _leave(" = %d", ret);
186 return ret; 218 return ret;
219}
187 220
188} /* end afs_file_readpage() */
189
190/*****************************************************************************/
191/* 221/*
192 * get a page cookie for the specified page 222 * get a page cookie for the specified page
193 */ 223 */
@@ -202,10 +232,9 @@ int afs_cache_get_page_cookie(struct page *page,
202 232
203 _leave(" = %d", ret); 233 _leave(" = %d", ret);
204 return ret; 234 return ret;
205} /* end afs_cache_get_page_cookie() */ 235}
206#endif 236#endif
207 237
208/*****************************************************************************/
209/* 238/*
210 * invalidate part or all of a page 239 * invalidate part or all of a page
211 */ 240 */
@@ -240,9 +269,8 @@ static void afs_file_invalidatepage(struct page *page, unsigned long offset)
240 } 269 }
241 270
242 _leave(" = %d", ret); 271 _leave(" = %d", ret);
243} /* end afs_file_invalidatepage() */ 272}
244 273
245/*****************************************************************************/
246/* 274/*
247 * release a page and cleanup its private data 275 * release a page and cleanup its private data
248 */ 276 */
@@ -267,4 +295,4 @@ static int afs_file_releasepage(struct page *page, gfp_t gfp_flags)
267 295
268 _leave(" = 0"); 296 _leave(" = 0");
269 return 0; 297 return 0;
270} /* end afs_file_releasepage() */ 298}
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 61bc371532ab..2393d2a08d79 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -1,6 +1,6 @@
1/* fsclient.c: AFS File Server client stubs 1/* AFS File Server client stubs
2 * 2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
@@ -11,827 +11,927 @@
11 11
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <rxrpc/rxrpc.h> 14#include <linux/circ_buf.h>
15#include <rxrpc/transport.h>
16#include <rxrpc/connection.h>
17#include <rxrpc/call.h>
18#include "fsclient.h"
19#include "cmservice.h"
20#include "vnode.h"
21#include "server.h"
22#include "errors.h"
23#include "internal.h" 15#include "internal.h"
16#include "afs_fs.h"
24 17
25#define FSFETCHSTATUS 132 /* AFS Fetch file status */
26#define FSFETCHDATA 130 /* AFS Fetch file data */
27#define FSGIVEUPCALLBACKS 147 /* AFS Discard callback promises */
28#define FSGETVOLUMEINFO 148 /* AFS Get root volume information */
29#define FSGETROOTVOLUME 151 /* AFS Get root volume name */
30#define FSLOOKUP 161 /* AFS lookup file in directory */
31
32/*****************************************************************************/
33/* 18/*
34 * map afs abort codes to/from Linux error codes 19 * decode an AFSFid block
35 * - called with call->lock held
36 */ 20 */
37static void afs_rxfs_aemap(struct rxrpc_call *call) 21static void xdr_decode_AFSFid(const __be32 **_bp, struct afs_fid *fid)
38{ 22{
39 switch (call->app_err_state) { 23 const __be32 *bp = *_bp;
40 case RXRPC_ESTATE_LOCAL_ABORT: 24
41 call->app_abort_code = -call->app_errno; 25 fid->vid = ntohl(*bp++);
42 break; 26 fid->vnode = ntohl(*bp++);
43 case RXRPC_ESTATE_PEER_ABORT: 27 fid->unique = ntohl(*bp++);
44 call->app_errno = afs_abort_to_error(call->app_abort_code); 28 *_bp = bp;
45 break; 29}
46 default:
47 break;
48 }
49} /* end afs_rxfs_aemap() */
50 30
51/*****************************************************************************/
52/* 31/*
53 * get the root volume name from a fileserver 32 * decode an AFSFetchStatus block
54 * - this operation doesn't seem to work correctly in OpenAFS server 1.2.2
55 */ 33 */
56#if 0 34static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
57int afs_rxfs_get_root_volume(struct afs_server *server, 35 struct afs_file_status *status,
58 char *buf, size_t *buflen) 36 struct afs_vnode *vnode)
59{ 37{
60 struct rxrpc_connection *conn; 38 const __be32 *bp = *_bp;
61 struct rxrpc_call *call; 39 umode_t mode;
62 struct kvec piov[2]; 40 u64 data_version, size;
63 size_t sent; 41 u32 changed = 0; /* becomes non-zero if ctime-type changes seen */
64 int ret; 42
65 u32 param[1]; 43#define EXTRACT(DST) \
44 do { \
45 u32 x = ntohl(*bp++); \
46 changed |= DST - x; \
47 DST = x; \
48 } while (0)
49
50 status->if_version = ntohl(*bp++);
51 EXTRACT(status->type);
52 EXTRACT(status->nlink);
53 size = ntohl(*bp++);
54 data_version = ntohl(*bp++);
55 EXTRACT(status->author);
56 EXTRACT(status->owner);
57 EXTRACT(status->caller_access); /* call ticket dependent */
58 EXTRACT(status->anon_access);
59 EXTRACT(status->mode);
60 EXTRACT(status->parent.vnode);
61 EXTRACT(status->parent.unique);
62 bp++; /* seg size */
63 status->mtime_client = ntohl(*bp++);
64 status->mtime_server = ntohl(*bp++);
65 EXTRACT(status->group);
66 bp++; /* sync counter */
67 data_version |= (u64) ntohl(*bp++) << 32;
68 bp++; /* lock count */
69 size |= (u64) ntohl(*bp++) << 32;
70 bp++; /* spare 4 */
71 *_bp = bp;
72
73 if (size != status->size) {
74 status->size = size;
75 changed |= true;
76 }
77 status->mode &= S_IALLUGO;
78
79 _debug("vnode time %lx, %lx",
80 status->mtime_client, status->mtime_server);
81
82 if (vnode) {
83 status->parent.vid = vnode->fid.vid;
84 if (changed && !test_bit(AFS_VNODE_UNSET, &vnode->flags)) {
85 _debug("vnode changed");
86 i_size_write(&vnode->vfs_inode, size);
87 vnode->vfs_inode.i_uid = status->owner;
88 vnode->vfs_inode.i_gid = status->group;
89 vnode->vfs_inode.i_version = vnode->fid.unique;
90 vnode->vfs_inode.i_nlink = status->nlink;
91
92 mode = vnode->vfs_inode.i_mode;
93 mode &= ~S_IALLUGO;
94 mode |= status->mode;
95 barrier();
96 vnode->vfs_inode.i_mode = mode;
97 }
66 98
67 DECLARE_WAITQUEUE(myself, current); 99 vnode->vfs_inode.i_ctime.tv_sec = status->mtime_server;
100 vnode->vfs_inode.i_mtime = vnode->vfs_inode.i_ctime;
101 vnode->vfs_inode.i_atime = vnode->vfs_inode.i_ctime;
102 }
68 103
69 kenter("%p,%p,%u",server, buf, *buflen); 104 if (status->data_version != data_version) {
105 status->data_version = data_version;
106 if (vnode && !test_bit(AFS_VNODE_UNSET, &vnode->flags)) {
107 _debug("vnode modified %llx on {%x:%u}",
108 (unsigned long long) data_version,
109 vnode->fid.vid, vnode->fid.vnode);
110 set_bit(AFS_VNODE_MODIFIED, &vnode->flags);
111 set_bit(AFS_VNODE_ZAP_DATA, &vnode->flags);
112 }
113 }
114}
70 115
71 /* get hold of the fileserver connection */ 116/*
72 ret = afs_server_get_fsconn(server, &conn); 117 * decode an AFSCallBack block
73 if (ret < 0) 118 */
74 goto out; 119static void xdr_decode_AFSCallBack(const __be32 **_bp, struct afs_vnode *vnode)
120{
121 const __be32 *bp = *_bp;
75 122
76 /* create a call through that connection */ 123 vnode->cb_version = ntohl(*bp++);
77 ret = rxrpc_create_call(conn, NULL, NULL, afs_rxfs_aemap, &call); 124 vnode->cb_expiry = ntohl(*bp++);
78 if (ret < 0) { 125 vnode->cb_type = ntohl(*bp++);
79 printk("kAFS: Unable to create call: %d\n", ret); 126 vnode->cb_expires = vnode->cb_expiry + get_seconds();
80 goto out_put_conn; 127 *_bp = bp;
81 } 128}
82 call->app_opcode = FSGETROOTVOLUME;
83 129
84 /* we want to get event notifications from the call */ 130static void xdr_decode_AFSCallBack_raw(const __be32 **_bp,
85 add_wait_queue(&call->waitq, &myself); 131 struct afs_callback *cb)
132{
133 const __be32 *bp = *_bp;
86 134
87 /* marshall the parameters */ 135 cb->version = ntohl(*bp++);
88 param[0] = htonl(FSGETROOTVOLUME); 136 cb->expiry = ntohl(*bp++);
89 137 cb->type = ntohl(*bp++);
90 piov[0].iov_len = sizeof(param); 138 *_bp = bp;
91 piov[0].iov_base = param; 139}
92
93 /* send the parameters to the server */
94 ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS,
95 0, &sent);
96 if (ret < 0)
97 goto abort;
98
99 /* wait for the reply to completely arrive */
100 for (;;) {
101 set_current_state(TASK_INTERRUPTIBLE);
102 if (call->app_call_state != RXRPC_CSTATE_CLNT_RCV_REPLY ||
103 signal_pending(current))
104 break;
105 schedule();
106 }
107 set_current_state(TASK_RUNNING);
108 140
109 ret = -EINTR; 141/*
110 if (signal_pending(current)) 142 * decode an AFSVolSync block
111 goto abort; 143 */
144static void xdr_decode_AFSVolSync(const __be32 **_bp,
145 struct afs_volsync *volsync)
146{
147 const __be32 *bp = *_bp;
112 148
113 switch (call->app_call_state) { 149 volsync->creation = ntohl(*bp++);
114 case RXRPC_CSTATE_ERROR: 150 bp++; /* spare2 */
115 ret = call->app_errno; 151 bp++; /* spare3 */
116 kdebug("Got Error: %d", ret); 152 bp++; /* spare4 */
117 goto out_unwait; 153 bp++; /* spare5 */
154 bp++; /* spare6 */
155 *_bp = bp;
156}
118 157
119 case RXRPC_CSTATE_CLNT_GOT_REPLY: 158/*
120 /* read the reply */ 159 * deliver reply data to an FS.FetchStatus
121 kdebug("Got Reply: qty=%d", call->app_ready_qty); 160 */
161static int afs_deliver_fs_fetch_status(struct afs_call *call,
162 struct sk_buff *skb, bool last)
163{
164 struct afs_vnode *vnode = call->reply;
165 const __be32 *bp;
122 166
123 ret = -EBADMSG; 167 _enter(",,%u", last);
124 if (call->app_ready_qty <= 4)
125 goto abort;
126 168
127 ret = rxrpc_call_read_data(call, NULL, call->app_ready_qty, 0); 169 afs_transfer_reply(call, skb);
128 if (ret < 0) 170 if (!last)
129 goto abort; 171 return 0;
130 172
131#if 0 173 if (call->reply_size != call->reply_max)
132 /* unmarshall the reply */ 174 return -EBADMSG;
133 bp = buffer;
134 for (loop = 0; loop < 65; loop++)
135 entry->name[loop] = ntohl(*bp++);
136 entry->name[64] = 0;
137 175
138 entry->type = ntohl(*bp++); 176 /* unmarshall the reply once we've received all of it */
139 entry->num_servers = ntohl(*bp++); 177 bp = call->buffer;
178 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode);
179 xdr_decode_AFSCallBack(&bp, vnode);
180 if (call->reply2)
181 xdr_decode_AFSVolSync(&bp, call->reply2);
140 182
141 for (loop = 0; loop < 8; loop++) 183 _leave(" = 0 [done]");
142 entry->servers[loop].addr.s_addr = *bp++; 184 return 0;
185}
143 186
144 for (loop = 0; loop < 8; loop++) 187/*
145 entry->servers[loop].partition = ntohl(*bp++); 188 * FS.FetchStatus operation type
189 */
190static const struct afs_call_type afs_RXFSFetchStatus = {
191 .name = "FS.FetchStatus",
192 .deliver = afs_deliver_fs_fetch_status,
193 .abort_to_error = afs_abort_to_error,
194 .destructor = afs_flat_call_destructor,
195};
146 196
147 for (loop = 0; loop < 8; loop++) 197/*
148 entry->servers[loop].flags = ntohl(*bp++); 198 * fetch the status information for a file
199 */
200int afs_fs_fetch_file_status(struct afs_server *server,
201 struct key *key,
202 struct afs_vnode *vnode,
203 struct afs_volsync *volsync,
204 const struct afs_wait_mode *wait_mode)
205{
206 struct afs_call *call;
207 __be32 *bp;
149 208
150 for (loop = 0; loop < 3; loop++) 209 _enter(",%x,{%x:%d},,",
151 entry->volume_ids[loop] = ntohl(*bp++); 210 key_serial(key), vnode->fid.vid, vnode->fid.vnode);
152 211
153 entry->clone_id = ntohl(*bp++); 212 call = afs_alloc_flat_call(&afs_RXFSFetchStatus, 16, (21 + 3 + 6) * 4);
154 entry->flags = ntohl(*bp); 213 if (!call)
155#endif 214 return -ENOMEM;
156 215
157 /* success */ 216 call->key = key;
158 ret = 0; 217 call->reply = vnode;
159 goto out_unwait; 218 call->reply2 = volsync;
219 call->service_id = FS_SERVICE;
220 call->port = htons(AFS_FS_PORT);
160 221
161 default: 222 /* marshall the parameters */
162 BUG(); 223 bp = call->request;
163 } 224 bp[0] = htonl(FSFETCHSTATUS);
225 bp[1] = htonl(vnode->fid.vid);
226 bp[2] = htonl(vnode->fid.vnode);
227 bp[3] = htonl(vnode->fid.unique);
228
229 return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
230}
164 231
165 abort:
166 set_current_state(TASK_UNINTERRUPTIBLE);
167 rxrpc_call_abort(call, ret);
168 schedule();
169 out_unwait:
170 set_current_state(TASK_RUNNING);
171 remove_wait_queue(&call->waitq, &myself);
172 rxrpc_put_call(call);
173 out_put_conn:
174 afs_server_release_fsconn(server, conn);
175 out:
176 kleave("");
177 return ret;
178} /* end afs_rxfs_get_root_volume() */
179#endif
180
181/*****************************************************************************/
182/* 232/*
183 * get information about a volume 233 * deliver reply data to an FS.FetchData
184 */ 234 */
185#if 0 235static int afs_deliver_fs_fetch_data(struct afs_call *call,
186int afs_rxfs_get_volume_info(struct afs_server *server, 236 struct sk_buff *skb, bool last)
187 const char *name,
188 struct afs_volume_info *vinfo)
189{ 237{
190 struct rxrpc_connection *conn; 238 struct afs_vnode *vnode = call->reply;
191 struct rxrpc_call *call; 239 const __be32 *bp;
192 struct kvec piov[3]; 240 struct page *page;
193 size_t sent; 241 void *buffer;
194 int ret; 242 int ret;
195 u32 param[2], *bp, zero;
196 243
197 DECLARE_WAITQUEUE(myself, current); 244 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
245
246 switch (call->unmarshall) {
247 case 0:
248 call->offset = 0;
249 call->unmarshall++;
250
251 /* extract the returned data length */
252 case 1:
253 _debug("extract data length");
254 ret = afs_extract_data(call, skb, last, &call->tmp, 4);
255 switch (ret) {
256 case 0: break;
257 case -EAGAIN: return 0;
258 default: return ret;
259 }
198 260
199 _enter("%p,%s,%p", server, name, vinfo); 261 call->count = ntohl(call->tmp);
262 _debug("DATA length: %u", call->count);
263 if (call->count > PAGE_SIZE)
264 return -EBADMSG;
265 call->offset = 0;
266 call->unmarshall++;
267
268 if (call->count < PAGE_SIZE) {
269 buffer = kmap_atomic(call->reply3, KM_USER0);
270 memset(buffer + PAGE_SIZE - call->count, 0,
271 call->count);
272 kunmap_atomic(buffer, KM_USER0);
273 }
200 274
201 /* get hold of the fileserver connection */ 275 /* extract the returned data */
202 ret = afs_server_get_fsconn(server, &conn); 276 case 2:
203 if (ret < 0) 277 _debug("extract data");
204 goto out; 278 page = call->reply3;
279 buffer = kmap_atomic(page, KM_USER0);
280 ret = afs_extract_data(call, skb, last, buffer, call->count);
281 kunmap_atomic(buffer, KM_USER0);
282 switch (ret) {
283 case 0: break;
284 case -EAGAIN: return 0;
285 default: return ret;
286 }
205 287
206 /* create a call through that connection */ 288 call->offset = 0;
207 ret = rxrpc_create_call(conn, NULL, NULL, afs_rxfs_aemap, &call); 289 call->unmarshall++;
208 if (ret < 0) { 290
209 printk("kAFS: Unable to create call: %d\n", ret); 291 /* extract the metadata */
210 goto out_put_conn; 292 case 3:
211 } 293 ret = afs_extract_data(call, skb, last, call->buffer,
212 call->app_opcode = FSGETVOLUMEINFO; 294 (21 + 3 + 6) * 4);
295 switch (ret) {
296 case 0: break;
297 case -EAGAIN: return 0;
298 default: return ret;
299 }
213 300
214 /* we want to get event notifications from the call */ 301 bp = call->buffer;
215 add_wait_queue(&call->waitq, &myself); 302 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode);
303 xdr_decode_AFSCallBack(&bp, vnode);
304 if (call->reply2)
305 xdr_decode_AFSVolSync(&bp, call->reply2);
216 306
217 /* marshall the parameters */ 307 call->offset = 0;
218 piov[1].iov_len = strlen(name); 308 call->unmarshall++;
219 piov[1].iov_base = (char *) name; 309
220 310 case 4:
221 zero = 0; 311 _debug("trailer");
222 piov[2].iov_len = (4 - (piov[1].iov_len & 3)) & 3; 312 if (skb->len != 0)
223 piov[2].iov_base = &zero; 313 return -EBADMSG;
224 314 break;
225 param[0] = htonl(FSGETVOLUMEINFO);
226 param[1] = htonl(piov[1].iov_len);
227
228 piov[0].iov_len = sizeof(param);
229 piov[0].iov_base = param;
230
231 /* send the parameters to the server */
232 ret = rxrpc_call_write_data(call, 3, piov, RXRPC_LAST_PACKET, GFP_NOFS,
233 0, &sent);
234 if (ret < 0)
235 goto abort;
236
237 /* wait for the reply to completely arrive */
238 bp = rxrpc_call_alloc_scratch(call, 64);
239
240 ret = rxrpc_call_read_data(call, bp, 64,
241 RXRPC_CALL_READ_BLOCK |
242 RXRPC_CALL_READ_ALL);
243 if (ret < 0) {
244 if (ret == -ECONNABORTED) {
245 ret = call->app_errno;
246 goto out_unwait;
247 }
248 goto abort;
249 } 315 }
250 316
251 /* unmarshall the reply */ 317 if (!last)
252 vinfo->vid = ntohl(*bp++); 318 return 0;
253 vinfo->type = ntohl(*bp++); 319
254 320 _leave(" = 0 [done]");
255 vinfo->type_vids[0] = ntohl(*bp++); 321 return 0;
256 vinfo->type_vids[1] = ntohl(*bp++); 322}
257 vinfo->type_vids[2] = ntohl(*bp++); 323
258 vinfo->type_vids[3] = ntohl(*bp++);
259 vinfo->type_vids[4] = ntohl(*bp++);
260
261 vinfo->nservers = ntohl(*bp++);
262 vinfo->servers[0].addr.s_addr = *bp++;
263 vinfo->servers[1].addr.s_addr = *bp++;
264 vinfo->servers[2].addr.s_addr = *bp++;
265 vinfo->servers[3].addr.s_addr = *bp++;
266 vinfo->servers[4].addr.s_addr = *bp++;
267 vinfo->servers[5].addr.s_addr = *bp++;
268 vinfo->servers[6].addr.s_addr = *bp++;
269 vinfo->servers[7].addr.s_addr = *bp++;
270
271 ret = -EBADMSG;
272 if (vinfo->nservers > 8)
273 goto abort;
274
275 /* success */
276 ret = 0;
277
278 out_unwait:
279 set_current_state(TASK_RUNNING);
280 remove_wait_queue(&call->waitq, &myself);
281 rxrpc_put_call(call);
282 out_put_conn:
283 afs_server_release_fsconn(server, conn);
284 out:
285 _leave("");
286 return ret;
287
288 abort:
289 set_current_state(TASK_UNINTERRUPTIBLE);
290 rxrpc_call_abort(call, ret);
291 schedule();
292 goto out_unwait;
293
294} /* end afs_rxfs_get_volume_info() */
295#endif
296
297/*****************************************************************************/
298/* 324/*
299 * fetch the status information for a file 325 * FS.FetchData operation type
326 */
327static const struct afs_call_type afs_RXFSFetchData = {
328 .name = "FS.FetchData",
329 .deliver = afs_deliver_fs_fetch_data,
330 .abort_to_error = afs_abort_to_error,
331 .destructor = afs_flat_call_destructor,
332};
333
334/*
335 * fetch data from a file
300 */ 336 */
301int afs_rxfs_fetch_file_status(struct afs_server *server, 337int afs_fs_fetch_data(struct afs_server *server,
302 struct afs_vnode *vnode, 338 struct key *key,
303 struct afs_volsync *volsync) 339 struct afs_vnode *vnode,
340 off_t offset, size_t length,
341 struct page *buffer,
342 const struct afs_wait_mode *wait_mode)
304{ 343{
305 struct afs_server_callslot callslot; 344 struct afs_call *call;
306 struct rxrpc_call *call;
307 struct kvec piov[1];
308 size_t sent;
309 int ret;
310 __be32 *bp; 345 __be32 *bp;
311 346
312 DECLARE_WAITQUEUE(myself, current); 347 _enter("");
313 348
314 _enter("%p,{%u,%u,%u}", 349 call = afs_alloc_flat_call(&afs_RXFSFetchData, 24, (21 + 3 + 6) * 4);
315 server, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); 350 if (!call)
351 return -ENOMEM;
316 352
317 /* get hold of the fileserver connection */ 353 call->key = key;
318 ret = afs_server_request_callslot(server, &callslot); 354 call->reply = vnode;
319 if (ret < 0) 355 call->reply2 = NULL; /* volsync */
320 goto out; 356 call->reply3 = buffer;
321 357 call->service_id = FS_SERVICE;
322 /* create a call through that connection */ 358 call->port = htons(AFS_FS_PORT);
323 ret = rxrpc_create_call(callslot.conn, NULL, NULL, afs_rxfs_aemap,
324 &call);
325 if (ret < 0) {
326 printk("kAFS: Unable to create call: %d\n", ret);
327 goto out_put_conn;
328 }
329 call->app_opcode = FSFETCHSTATUS;
330
331 /* we want to get event notifications from the call */
332 add_wait_queue(&call->waitq, &myself);
333 359
334 /* marshall the parameters */ 360 /* marshall the parameters */
335 bp = rxrpc_call_alloc_scratch(call, 16); 361 bp = call->request;
336 bp[0] = htonl(FSFETCHSTATUS); 362 bp[0] = htonl(FSFETCHDATA);
337 bp[1] = htonl(vnode->fid.vid); 363 bp[1] = htonl(vnode->fid.vid);
338 bp[2] = htonl(vnode->fid.vnode); 364 bp[2] = htonl(vnode->fid.vnode);
339 bp[3] = htonl(vnode->fid.unique); 365 bp[3] = htonl(vnode->fid.unique);
366 bp[4] = htonl(offset);
367 bp[5] = htonl(length);
340 368
341 piov[0].iov_len = 16; 369 return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
342 piov[0].iov_base = bp; 370}
343
344 /* send the parameters to the server */
345 ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS,
346 0, &sent);
347 if (ret < 0)
348 goto abort;
349
350 /* wait for the reply to completely arrive */
351 bp = rxrpc_call_alloc_scratch(call, 120);
352
353 ret = rxrpc_call_read_data(call, bp, 120,
354 RXRPC_CALL_READ_BLOCK |
355 RXRPC_CALL_READ_ALL);
356 if (ret < 0) {
357 if (ret == -ECONNABORTED) {
358 ret = call->app_errno;
359 goto out_unwait;
360 }
361 goto abort;
362 }
363 371
364 /* unmarshall the reply */ 372/*
365 vnode->status.if_version = ntohl(*bp++); 373 * deliver reply data to an FS.GiveUpCallBacks
366 vnode->status.type = ntohl(*bp++); 374 */
367 vnode->status.nlink = ntohl(*bp++); 375static int afs_deliver_fs_give_up_callbacks(struct afs_call *call,
368 vnode->status.size = ntohl(*bp++); 376 struct sk_buff *skb, bool last)
369 vnode->status.version = ntohl(*bp++); 377{
370 vnode->status.author = ntohl(*bp++); 378 _enter(",{%u},%d", skb->len, last);
371 vnode->status.owner = ntohl(*bp++);
372 vnode->status.caller_access = ntohl(*bp++);
373 vnode->status.anon_access = ntohl(*bp++);
374 vnode->status.mode = ntohl(*bp++);
375 vnode->status.parent.vid = vnode->fid.vid;
376 vnode->status.parent.vnode = ntohl(*bp++);
377 vnode->status.parent.unique = ntohl(*bp++);
378 bp++; /* seg size */
379 vnode->status.mtime_client = ntohl(*bp++);
380 vnode->status.mtime_server = ntohl(*bp++);
381 bp++; /* group */
382 bp++; /* sync counter */
383 vnode->status.version |= ((unsigned long long) ntohl(*bp++)) << 32;
384 bp++; /* spare2 */
385 bp++; /* spare3 */
386 bp++; /* spare4 */
387 379
388 vnode->cb_version = ntohl(*bp++); 380 if (skb->len > 0)
389 vnode->cb_expiry = ntohl(*bp++); 381 return -EBADMSG; /* shouldn't be any reply data */
390 vnode->cb_type = ntohl(*bp++); 382 return 0;
391 383}
392 if (volsync) {
393 volsync->creation = ntohl(*bp++);
394 bp++; /* spare2 */
395 bp++; /* spare3 */
396 bp++; /* spare4 */
397 bp++; /* spare5 */
398 bp++; /* spare6 */
399 }
400 384
401 /* success */
402 ret = 0;
403
404 out_unwait:
405 set_current_state(TASK_RUNNING);
406 remove_wait_queue(&call->waitq, &myself);
407 rxrpc_put_call(call);
408 out_put_conn:
409 afs_server_release_callslot(server, &callslot);
410 out:
411 _leave("");
412 return ret;
413
414 abort:
415 set_current_state(TASK_UNINTERRUPTIBLE);
416 rxrpc_call_abort(call, ret);
417 schedule();
418 goto out_unwait;
419} /* end afs_rxfs_fetch_file_status() */
420
421/*****************************************************************************/
422/* 385/*
423 * fetch the contents of a file or directory 386 * FS.GiveUpCallBacks operation type
424 */ 387 */
425int afs_rxfs_fetch_file_data(struct afs_server *server, 388static const struct afs_call_type afs_RXFSGiveUpCallBacks = {
426 struct afs_vnode *vnode, 389 .name = "FS.GiveUpCallBacks",
427 struct afs_rxfs_fetch_descriptor *desc, 390 .deliver = afs_deliver_fs_give_up_callbacks,
428 struct afs_volsync *volsync) 391 .abort_to_error = afs_abort_to_error,
392 .destructor = afs_flat_call_destructor,
393};
394
395/*
396 * give up a set of callbacks
397 * - the callbacks are held in the server->cb_break ring
398 */
399int afs_fs_give_up_callbacks(struct afs_server *server,
400 const struct afs_wait_mode *wait_mode)
429{ 401{
430 struct afs_server_callslot callslot; 402 struct afs_call *call;
431 struct rxrpc_call *call; 403 size_t ncallbacks;
432 struct kvec piov[1]; 404 __be32 *bp, *tp;
433 size_t sent; 405 int loop;
434 int ret;
435 __be32 *bp;
436 406
437 DECLARE_WAITQUEUE(myself, current); 407 ncallbacks = CIRC_CNT(server->cb_break_head, server->cb_break_tail,
438 408 ARRAY_SIZE(server->cb_break));
439 _enter("%p,{fid={%u,%u,%u},sz=%Zu,of=%lu}", 409
440 server, 410 _enter("{%zu},", ncallbacks);
441 desc->fid.vid, 411
442 desc->fid.vnode, 412 if (ncallbacks == 0)
443 desc->fid.unique, 413 return 0;
444 desc->size, 414 if (ncallbacks > AFSCBMAX)
445 desc->offset); 415 ncallbacks = AFSCBMAX;
446 416
447 /* get hold of the fileserver connection */ 417 _debug("break %zu callbacks", ncallbacks);
448 ret = afs_server_request_callslot(server, &callslot);
449 if (ret < 0)
450 goto out;
451
452 /* create a call through that connection */
453 ret = rxrpc_create_call(callslot.conn, NULL, NULL, afs_rxfs_aemap, &call);
454 if (ret < 0) {
455 printk("kAFS: Unable to create call: %d\n", ret);
456 goto out_put_conn;
457 }
458 call->app_opcode = FSFETCHDATA;
459 418
460 /* we want to get event notifications from the call */ 419 call = afs_alloc_flat_call(&afs_RXFSGiveUpCallBacks,
461 add_wait_queue(&call->waitq, &myself); 420 12 + ncallbacks * 6 * 4, 0);
421 if (!call)
422 return -ENOMEM;
423
424 call->service_id = FS_SERVICE;
425 call->port = htons(AFS_FS_PORT);
462 426
463 /* marshall the parameters */ 427 /* marshall the parameters */
464 bp = rxrpc_call_alloc_scratch(call, 24); 428 bp = call->request;
465 bp[0] = htonl(FSFETCHDATA); 429 tp = bp + 2 + ncallbacks * 3;
466 bp[1] = htonl(desc->fid.vid); 430 *bp++ = htonl(FSGIVEUPCALLBACKS);
467 bp[2] = htonl(desc->fid.vnode); 431 *bp++ = htonl(ncallbacks);
468 bp[3] = htonl(desc->fid.unique); 432 *tp++ = htonl(ncallbacks);
469 bp[4] = htonl(desc->offset); 433
470 bp[5] = htonl(desc->size); 434 atomic_sub(ncallbacks, &server->cb_break_n);
471 435 for (loop = ncallbacks; loop > 0; loop--) {
472 piov[0].iov_len = 24; 436 struct afs_callback *cb =
473 piov[0].iov_base = bp; 437 &server->cb_break[server->cb_break_tail];
474 438
475 /* send the parameters to the server */ 439 *bp++ = htonl(cb->fid.vid);
476 ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS, 440 *bp++ = htonl(cb->fid.vnode);
477 0, &sent); 441 *bp++ = htonl(cb->fid.unique);
478 if (ret < 0) 442 *tp++ = htonl(cb->version);
479 goto abort; 443 *tp++ = htonl(cb->expiry);
480 444 *tp++ = htonl(cb->type);
481 /* wait for the data count to arrive */ 445 smp_mb();
482 ret = rxrpc_call_read_data(call, bp, 4, RXRPC_CALL_READ_BLOCK); 446 server->cb_break_tail =
483 if (ret < 0) 447 (server->cb_break_tail + 1) &
484 goto read_failed; 448 (ARRAY_SIZE(server->cb_break) - 1);
485
486 desc->actual = ntohl(bp[0]);
487 if (desc->actual != desc->size) {
488 ret = -EBADMSG;
489 goto abort;
490 } 449 }
491 450
492 /* call the app to read the actual data */ 451 ASSERT(ncallbacks > 0);
493 rxrpc_call_reset_scratch(call); 452 wake_up_nr(&server->cb_break_waitq, ncallbacks);
494
495 ret = rxrpc_call_read_data(call, desc->buffer, desc->actual,
496 RXRPC_CALL_READ_BLOCK);
497 if (ret < 0)
498 goto read_failed;
499
500 /* wait for the rest of the reply to completely arrive */
501 rxrpc_call_reset_scratch(call);
502 bp = rxrpc_call_alloc_scratch(call, 120);
503
504 ret = rxrpc_call_read_data(call, bp, 120,
505 RXRPC_CALL_READ_BLOCK |
506 RXRPC_CALL_READ_ALL);
507 if (ret < 0)
508 goto read_failed;
509
510 /* unmarshall the reply */
511 vnode->status.if_version = ntohl(*bp++);
512 vnode->status.type = ntohl(*bp++);
513 vnode->status.nlink = ntohl(*bp++);
514 vnode->status.size = ntohl(*bp++);
515 vnode->status.version = ntohl(*bp++);
516 vnode->status.author = ntohl(*bp++);
517 vnode->status.owner = ntohl(*bp++);
518 vnode->status.caller_access = ntohl(*bp++);
519 vnode->status.anon_access = ntohl(*bp++);
520 vnode->status.mode = ntohl(*bp++);
521 vnode->status.parent.vid = desc->fid.vid;
522 vnode->status.parent.vnode = ntohl(*bp++);
523 vnode->status.parent.unique = ntohl(*bp++);
524 bp++; /* seg size */
525 vnode->status.mtime_client = ntohl(*bp++);
526 vnode->status.mtime_server = ntohl(*bp++);
527 bp++; /* group */
528 bp++; /* sync counter */
529 vnode->status.version |= ((unsigned long long) ntohl(*bp++)) << 32;
530 bp++; /* spare2 */
531 bp++; /* spare3 */
532 bp++; /* spare4 */
533 453
534 vnode->cb_version = ntohl(*bp++); 454 return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
535 vnode->cb_expiry = ntohl(*bp++); 455}
536 vnode->cb_type = ntohl(*bp++);
537
538 if (volsync) {
539 volsync->creation = ntohl(*bp++);
540 bp++; /* spare2 */
541 bp++; /* spare3 */
542 bp++; /* spare4 */
543 bp++; /* spare5 */
544 bp++; /* spare6 */
545 }
546 456
547 /* success */ 457/*
548 ret = 0; 458 * deliver reply data to an FS.CreateFile or an FS.MakeDir
549 459 */
550 out_unwait: 460static int afs_deliver_fs_create_vnode(struct afs_call *call,
551 set_current_state(TASK_RUNNING); 461 struct sk_buff *skb, bool last)
552 remove_wait_queue(&call->waitq,&myself); 462{
553 rxrpc_put_call(call); 463 struct afs_vnode *vnode = call->reply;
554 out_put_conn: 464 const __be32 *bp;
555 afs_server_release_callslot(server, &callslot); 465
556 out: 466 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
557 _leave(" = %d", ret);
558 return ret;
559
560 read_failed:
561 if (ret == -ECONNABORTED) {
562 ret = call->app_errno;
563 goto out_unwait;
564 }
565 467
566 abort: 468 afs_transfer_reply(call, skb);
567 set_current_state(TASK_UNINTERRUPTIBLE); 469 if (!last)
568 rxrpc_call_abort(call, ret); 470 return 0;
569 schedule();
570 goto out_unwait;
571 471
572} /* end afs_rxfs_fetch_file_data() */ 472 if (call->reply_size != call->reply_max)
473 return -EBADMSG;
474
475 /* unmarshall the reply once we've received all of it */
476 bp = call->buffer;
477 xdr_decode_AFSFid(&bp, call->reply2);
478 xdr_decode_AFSFetchStatus(&bp, call->reply3, NULL);
479 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode);
480 xdr_decode_AFSCallBack_raw(&bp, call->reply4);
481 /* xdr_decode_AFSVolSync(&bp, call->replyX); */
482
483 _leave(" = 0 [done]");
484 return 0;
485}
486
487/*
488 * FS.CreateFile and FS.MakeDir operation type
489 */
490static const struct afs_call_type afs_RXFSCreateXXXX = {
491 .name = "FS.CreateXXXX",
492 .deliver = afs_deliver_fs_create_vnode,
493 .abort_to_error = afs_abort_to_error,
494 .destructor = afs_flat_call_destructor,
495};
573 496
574/*****************************************************************************/
575/* 497/*
576 * ask the AFS fileserver to discard a callback request on a file 498 * create a file or make a directory
577 */ 499 */
578int afs_rxfs_give_up_callback(struct afs_server *server, 500int afs_fs_create(struct afs_server *server,
579 struct afs_vnode *vnode) 501 struct key *key,
502 struct afs_vnode *vnode,
503 const char *name,
504 umode_t mode,
505 struct afs_fid *newfid,
506 struct afs_file_status *newstatus,
507 struct afs_callback *newcb,
508 const struct afs_wait_mode *wait_mode)
580{ 509{
581 struct afs_server_callslot callslot; 510 struct afs_call *call;
582 struct rxrpc_call *call; 511 size_t namesz, reqsz, padsz;
583 struct kvec piov[1];
584 size_t sent;
585 int ret;
586 __be32 *bp; 512 __be32 *bp;
587 513
588 DECLARE_WAITQUEUE(myself, current); 514 _enter("");
589 515
590 _enter("%p,{%u,%u,%u}", 516 namesz = strlen(name);
591 server, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); 517 padsz = (4 - (namesz & 3)) & 3;
518 reqsz = (5 * 4) + namesz + padsz + (6 * 4);
592 519
593 /* get hold of the fileserver connection */ 520 call = afs_alloc_flat_call(&afs_RXFSCreateXXXX, reqsz,
594 ret = afs_server_request_callslot(server, &callslot); 521 (3 + 21 + 21 + 3 + 6) * 4);
595 if (ret < 0) 522 if (!call)
596 goto out; 523 return -ENOMEM;
597 524
598 /* create a call through that connection */ 525 call->key = key;
599 ret = rxrpc_create_call(callslot.conn, NULL, NULL, afs_rxfs_aemap, &call); 526 call->reply = vnode;
600 if (ret < 0) { 527 call->reply2 = newfid;
601 printk("kAFS: Unable to create call: %d\n", ret); 528 call->reply3 = newstatus;
602 goto out_put_conn; 529 call->reply4 = newcb;
530 call->service_id = FS_SERVICE;
531 call->port = htons(AFS_FS_PORT);
532
533 /* marshall the parameters */
534 bp = call->request;
535 *bp++ = htonl(S_ISDIR(mode) ? FSMAKEDIR : FSCREATEFILE);
536 *bp++ = htonl(vnode->fid.vid);
537 *bp++ = htonl(vnode->fid.vnode);
538 *bp++ = htonl(vnode->fid.unique);
539 *bp++ = htonl(namesz);
540 memcpy(bp, name, namesz);
541 bp = (void *) bp + namesz;
542 if (padsz > 0) {
543 memset(bp, 0, padsz);
544 bp = (void *) bp + padsz;
603 } 545 }
604 call->app_opcode = FSGIVEUPCALLBACKS; 546 *bp++ = htonl(AFS_SET_MODE);
547 *bp++ = 0; /* mtime */
548 *bp++ = 0; /* owner */
549 *bp++ = 0; /* group */
550 *bp++ = htonl(mode & S_IALLUGO); /* unix mode */
551 *bp++ = 0; /* segment size */
605 552
606 /* we want to get event notifications from the call */ 553 return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
607 add_wait_queue(&call->waitq, &myself); 554}
608 555
609 /* marshall the parameters */ 556/*
610 bp = rxrpc_call_alloc_scratch(call, (1 + 4 + 4) * 4); 557 * deliver reply data to an FS.RemoveFile or FS.RemoveDir
558 */
559static int afs_deliver_fs_remove(struct afs_call *call,
560 struct sk_buff *skb, bool last)
561{
562 struct afs_vnode *vnode = call->reply;
563 const __be32 *bp;
611 564
612 piov[0].iov_len = (1 + 4 + 4) * 4; 565 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
613 piov[0].iov_base = bp;
614 566
615 *bp++ = htonl(FSGIVEUPCALLBACKS); 567 afs_transfer_reply(call, skb);
616 *bp++ = htonl(1); 568 if (!last)
569 return 0;
570
571 if (call->reply_size != call->reply_max)
572 return -EBADMSG;
573
574 /* unmarshall the reply once we've received all of it */
575 bp = call->buffer;
576 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode);
577 /* xdr_decode_AFSVolSync(&bp, call->replyX); */
578
579 _leave(" = 0 [done]");
580 return 0;
581}
582
583/*
584 * FS.RemoveDir/FS.RemoveFile operation type
585 */
586static const struct afs_call_type afs_RXFSRemoveXXXX = {
587 .name = "FS.RemoveXXXX",
588 .deliver = afs_deliver_fs_remove,
589 .abort_to_error = afs_abort_to_error,
590 .destructor = afs_flat_call_destructor,
591};
592
593/*
594 * remove a file or directory
595 */
596int afs_fs_remove(struct afs_server *server,
597 struct key *key,
598 struct afs_vnode *vnode,
599 const char *name,
600 bool isdir,
601 const struct afs_wait_mode *wait_mode)
602{
603 struct afs_call *call;
604 size_t namesz, reqsz, padsz;
605 __be32 *bp;
606
607 _enter("");
608
609 namesz = strlen(name);
610 padsz = (4 - (namesz & 3)) & 3;
611 reqsz = (5 * 4) + namesz + padsz;
612
613 call = afs_alloc_flat_call(&afs_RXFSRemoveXXXX, reqsz, (21 + 6) * 4);
614 if (!call)
615 return -ENOMEM;
616
617 call->key = key;
618 call->reply = vnode;
619 call->service_id = FS_SERVICE;
620 call->port = htons(AFS_FS_PORT);
621
622 /* marshall the parameters */
623 bp = call->request;
624 *bp++ = htonl(isdir ? FSREMOVEDIR : FSREMOVEFILE);
617 *bp++ = htonl(vnode->fid.vid); 625 *bp++ = htonl(vnode->fid.vid);
618 *bp++ = htonl(vnode->fid.vnode); 626 *bp++ = htonl(vnode->fid.vnode);
619 *bp++ = htonl(vnode->fid.unique); 627 *bp++ = htonl(vnode->fid.unique);
620 *bp++ = htonl(1); 628 *bp++ = htonl(namesz);
621 *bp++ = htonl(vnode->cb_version); 629 memcpy(bp, name, namesz);
622 *bp++ = htonl(vnode->cb_expiry); 630 bp = (void *) bp + namesz;
623 *bp++ = htonl(vnode->cb_type); 631 if (padsz > 0) {
624 632 memset(bp, 0, padsz);
625 /* send the parameters to the server */ 633 bp = (void *) bp + padsz;
626 ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS,
627 0, &sent);
628 if (ret < 0)
629 goto abort;
630
631 /* wait for the reply to completely arrive */
632 for (;;) {
633 set_current_state(TASK_INTERRUPTIBLE);
634 if (call->app_call_state != RXRPC_CSTATE_CLNT_RCV_REPLY ||
635 signal_pending(current))
636 break;
637 schedule();
638 } 634 }
639 set_current_state(TASK_RUNNING);
640 635
641 ret = -EINTR; 636 return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
642 if (signal_pending(current)) 637}
643 goto abort;
644 638
645 switch (call->app_call_state) { 639/*
646 case RXRPC_CSTATE_ERROR: 640 * deliver reply data to an FS.Link
647 ret = call->app_errno; 641 */
648 goto out_unwait; 642static int afs_deliver_fs_link(struct afs_call *call,
643 struct sk_buff *skb, bool last)
644{
645 struct afs_vnode *dvnode = call->reply, *vnode = call->reply2;
646 const __be32 *bp;
649 647
650 case RXRPC_CSTATE_CLNT_GOT_REPLY: 648 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
651 ret = 0;
652 goto out_unwait;
653 649
654 default: 650 afs_transfer_reply(call, skb);
655 BUG(); 651 if (!last)
656 } 652 return 0;
653
654 if (call->reply_size != call->reply_max)
655 return -EBADMSG;
656
657 /* unmarshall the reply once we've received all of it */
658 bp = call->buffer;
659 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode);
660 xdr_decode_AFSFetchStatus(&bp, &dvnode->status, dvnode);
661 /* xdr_decode_AFSVolSync(&bp, call->replyX); */
662
663 _leave(" = 0 [done]");
664 return 0;
665}
666
667/*
668 * FS.Link operation type
669 */
670static const struct afs_call_type afs_RXFSLink = {
671 .name = "FS.Link",
672 .deliver = afs_deliver_fs_link,
673 .abort_to_error = afs_abort_to_error,
674 .destructor = afs_flat_call_destructor,
675};
657 676
658 out_unwait:
659 set_current_state(TASK_RUNNING);
660 remove_wait_queue(&call->waitq, &myself);
661 rxrpc_put_call(call);
662 out_put_conn:
663 afs_server_release_callslot(server, &callslot);
664 out:
665 _leave("");
666 return ret;
667
668 abort:
669 set_current_state(TASK_UNINTERRUPTIBLE);
670 rxrpc_call_abort(call, ret);
671 schedule();
672 goto out_unwait;
673} /* end afs_rxfs_give_up_callback() */
674
675/*****************************************************************************/
676/* 677/*
677 * look a filename up in a directory 678 * make a hard link
678 * - this operation doesn't seem to work correctly in OpenAFS server 1.2.2
679 */ 679 */
680#if 0 680int afs_fs_link(struct afs_server *server,
681int afs_rxfs_lookup(struct afs_server *server, 681 struct key *key,
682 struct afs_vnode *dir, 682 struct afs_vnode *dvnode,
683 const char *filename, 683 struct afs_vnode *vnode,
684 struct afs_vnode *vnode, 684 const char *name,
685 struct afs_volsync *volsync) 685 const struct afs_wait_mode *wait_mode)
686{ 686{
687 struct rxrpc_connection *conn; 687 struct afs_call *call;
688 struct rxrpc_call *call; 688 size_t namesz, reqsz, padsz;
689 struct kvec piov[3]; 689 __be32 *bp;
690 size_t sent;
691 int ret;
692 u32 *bp, zero;
693 690
694 DECLARE_WAITQUEUE(myself, current); 691 _enter("");
695 692
696 kenter("%p,{%u,%u,%u},%s", 693 namesz = strlen(name);
697 server, fid->vid, fid->vnode, fid->unique, filename); 694 padsz = (4 - (namesz & 3)) & 3;
695 reqsz = (5 * 4) + namesz + padsz + (3 * 4);
698 696
699 /* get hold of the fileserver connection */ 697 call = afs_alloc_flat_call(&afs_RXFSLink, reqsz, (21 + 21 + 6) * 4);
700 ret = afs_server_get_fsconn(server, &conn); 698 if (!call)
701 if (ret < 0) 699 return -ENOMEM;
702 goto out;
703 700
704 /* create a call through that connection */ 701 call->key = key;
705 ret = rxrpc_create_call(conn, NULL, NULL, afs_rxfs_aemap, &call); 702 call->reply = dvnode;
706 if (ret < 0) { 703 call->reply2 = vnode;
707 printk("kAFS: Unable to create call: %d\n", ret); 704 call->service_id = FS_SERVICE;
708 goto out_put_conn; 705 call->port = htons(AFS_FS_PORT);
706
707 /* marshall the parameters */
708 bp = call->request;
709 *bp++ = htonl(FSLINK);
710 *bp++ = htonl(dvnode->fid.vid);
711 *bp++ = htonl(dvnode->fid.vnode);
712 *bp++ = htonl(dvnode->fid.unique);
713 *bp++ = htonl(namesz);
714 memcpy(bp, name, namesz);
715 bp = (void *) bp + namesz;
716 if (padsz > 0) {
717 memset(bp, 0, padsz);
718 bp = (void *) bp + padsz;
709 } 719 }
710 call->app_opcode = FSLOOKUP; 720 *bp++ = htonl(vnode->fid.vid);
721 *bp++ = htonl(vnode->fid.vnode);
722 *bp++ = htonl(vnode->fid.unique);
723
724 return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
725}
726
727/*
728 * deliver reply data to an FS.Symlink
729 */
730static int afs_deliver_fs_symlink(struct afs_call *call,
731 struct sk_buff *skb, bool last)
732{
733 struct afs_vnode *vnode = call->reply;
734 const __be32 *bp;
711 735
712 /* we want to get event notifications from the call */ 736 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
713 add_wait_queue(&call->waitq,&myself); 737
738 afs_transfer_reply(call, skb);
739 if (!last)
740 return 0;
741
742 if (call->reply_size != call->reply_max)
743 return -EBADMSG;
744
745 /* unmarshall the reply once we've received all of it */
746 bp = call->buffer;
747 xdr_decode_AFSFid(&bp, call->reply2);
748 xdr_decode_AFSFetchStatus(&bp, call->reply3, NULL);
749 xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode);
750 /* xdr_decode_AFSVolSync(&bp, call->replyX); */
751
752 _leave(" = 0 [done]");
753 return 0;
754}
755
756/*
757 * FS.Symlink operation type
758 */
759static const struct afs_call_type afs_RXFSSymlink = {
760 .name = "FS.Symlink",
761 .deliver = afs_deliver_fs_symlink,
762 .abort_to_error = afs_abort_to_error,
763 .destructor = afs_flat_call_destructor,
764};
765
766/*
767 * create a symbolic link
768 */
769int afs_fs_symlink(struct afs_server *server,
770 struct key *key,
771 struct afs_vnode *vnode,
772 const char *name,
773 const char *contents,
774 struct afs_fid *newfid,
775 struct afs_file_status *newstatus,
776 const struct afs_wait_mode *wait_mode)
777{
778 struct afs_call *call;
779 size_t namesz, reqsz, padsz, c_namesz, c_padsz;
780 __be32 *bp;
781
782 _enter("");
783
784 namesz = strlen(name);
785 padsz = (4 - (namesz & 3)) & 3;
786
787 c_namesz = strlen(contents);
788 c_padsz = (4 - (c_namesz & 3)) & 3;
789
790 reqsz = (6 * 4) + namesz + padsz + c_namesz + c_padsz + (6 * 4);
791
792 call = afs_alloc_flat_call(&afs_RXFSSymlink, reqsz,
793 (3 + 21 + 21 + 6) * 4);
794 if (!call)
795 return -ENOMEM;
796
797 call->key = key;
798 call->reply = vnode;
799 call->reply2 = newfid;
800 call->reply3 = newstatus;
801 call->service_id = FS_SERVICE;
802 call->port = htons(AFS_FS_PORT);
714 803
715 /* marshall the parameters */ 804 /* marshall the parameters */
716 bp = rxrpc_call_alloc_scratch(call, 20); 805 bp = call->request;
717 806 *bp++ = htonl(FSSYMLINK);
718 zero = 0; 807 *bp++ = htonl(vnode->fid.vid);
719 808 *bp++ = htonl(vnode->fid.vnode);
720 piov[0].iov_len = 20; 809 *bp++ = htonl(vnode->fid.unique);
721 piov[0].iov_base = bp; 810 *bp++ = htonl(namesz);
722 piov[1].iov_len = strlen(filename); 811 memcpy(bp, name, namesz);
723 piov[1].iov_base = (char *) filename; 812 bp = (void *) bp + namesz;
724 piov[2].iov_len = (4 - (piov[1].iov_len & 3)) & 3; 813 if (padsz > 0) {
725 piov[2].iov_base = &zero; 814 memset(bp, 0, padsz);
726 815 bp = (void *) bp + padsz;
727 *bp++ = htonl(FSLOOKUP);
728 *bp++ = htonl(dirfid->vid);
729 *bp++ = htonl(dirfid->vnode);
730 *bp++ = htonl(dirfid->unique);
731 *bp++ = htonl(piov[1].iov_len);
732
733 /* send the parameters to the server */
734 ret = rxrpc_call_write_data(call, 3, piov, RXRPC_LAST_PACKET, GFP_NOFS,
735 0, &sent);
736 if (ret < 0)
737 goto abort;
738
739 /* wait for the reply to completely arrive */
740 bp = rxrpc_call_alloc_scratch(call, 220);
741
742 ret = rxrpc_call_read_data(call, bp, 220,
743 RXRPC_CALL_READ_BLOCK |
744 RXRPC_CALL_READ_ALL);
745 if (ret < 0) {
746 if (ret == -ECONNABORTED) {
747 ret = call->app_errno;
748 goto out_unwait;
749 }
750 goto abort;
751 } 816 }
817 *bp++ = htonl(c_namesz);
818 memcpy(bp, contents, c_namesz);
819 bp = (void *) bp + c_namesz;
820 if (c_padsz > 0) {
821 memset(bp, 0, c_padsz);
822 bp = (void *) bp + c_padsz;
823 }
824 *bp++ = htonl(AFS_SET_MODE);
825 *bp++ = 0; /* mtime */
826 *bp++ = 0; /* owner */
827 *bp++ = 0; /* group */
828 *bp++ = htonl(S_IRWXUGO); /* unix mode */
829 *bp++ = 0; /* segment size */
752 830
753 /* unmarshall the reply */ 831 return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
754 fid->vid = ntohl(*bp++); 832}
755 fid->vnode = ntohl(*bp++);
756 fid->unique = ntohl(*bp++);
757 833
758 vnode->status.if_version = ntohl(*bp++); 834/*
759 vnode->status.type = ntohl(*bp++); 835 * deliver reply data to an FS.Rename
760 vnode->status.nlink = ntohl(*bp++); 836 */
761 vnode->status.size = ntohl(*bp++); 837static int afs_deliver_fs_rename(struct afs_call *call,
762 vnode->status.version = ntohl(*bp++); 838 struct sk_buff *skb, bool last)
763 vnode->status.author = ntohl(*bp++); 839{
764 vnode->status.owner = ntohl(*bp++); 840 struct afs_vnode *orig_dvnode = call->reply, *new_dvnode = call->reply2;
765 vnode->status.caller_access = ntohl(*bp++); 841 const __be32 *bp;
766 vnode->status.anon_access = ntohl(*bp++);
767 vnode->status.mode = ntohl(*bp++);
768 vnode->status.parent.vid = dirfid->vid;
769 vnode->status.parent.vnode = ntohl(*bp++);
770 vnode->status.parent.unique = ntohl(*bp++);
771 bp++; /* seg size */
772 vnode->status.mtime_client = ntohl(*bp++);
773 vnode->status.mtime_server = ntohl(*bp++);
774 bp++; /* group */
775 bp++; /* sync counter */
776 vnode->status.version |= ((unsigned long long) ntohl(*bp++)) << 32;
777 bp++; /* spare2 */
778 bp++; /* spare3 */
779 bp++; /* spare4 */
780 842
781 dir->status.if_version = ntohl(*bp++); 843 _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
782 dir->status.type = ntohl(*bp++); 844
783 dir->status.nlink = ntohl(*bp++); 845 afs_transfer_reply(call, skb);
784 dir->status.size = ntohl(*bp++); 846 if (!last)
785 dir->status.version = ntohl(*bp++); 847 return 0;
786 dir->status.author = ntohl(*bp++); 848
787 dir->status.owner = ntohl(*bp++); 849 if (call->reply_size != call->reply_max)
788 dir->status.caller_access = ntohl(*bp++); 850 return -EBADMSG;
789 dir->status.anon_access = ntohl(*bp++); 851
790 dir->status.mode = ntohl(*bp++); 852 /* unmarshall the reply once we've received all of it */
791 dir->status.parent.vid = dirfid->vid; 853 bp = call->buffer;
792 dir->status.parent.vnode = ntohl(*bp++); 854 xdr_decode_AFSFetchStatus(&bp, &orig_dvnode->status, orig_dvnode);
793 dir->status.parent.unique = ntohl(*bp++); 855 if (new_dvnode != orig_dvnode)
794 bp++; /* seg size */ 856 xdr_decode_AFSFetchStatus(&bp, &new_dvnode->status, new_dvnode);
795 dir->status.mtime_client = ntohl(*bp++); 857 /* xdr_decode_AFSVolSync(&bp, call->replyX); */
796 dir->status.mtime_server = ntohl(*bp++); 858
797 bp++; /* group */ 859 _leave(" = 0 [done]");
798 bp++; /* sync counter */ 860 return 0;
799 dir->status.version |= ((unsigned long long) ntohl(*bp++)) << 32; 861}
800 bp++; /* spare2 */ 862
801 bp++; /* spare3 */ 863/*
802 bp++; /* spare4 */ 864 * FS.Rename operation type
865 */
866static const struct afs_call_type afs_RXFSRename = {
867 .name = "FS.Rename",
868 .deliver = afs_deliver_fs_rename,
869 .abort_to_error = afs_abort_to_error,
870 .destructor = afs_flat_call_destructor,
871};
872
873/*
874 * create a symbolic link
875 */
876int afs_fs_rename(struct afs_server *server,
877 struct key *key,
878 struct afs_vnode *orig_dvnode,
879 const char *orig_name,
880 struct afs_vnode *new_dvnode,
881 const char *new_name,
882 const struct afs_wait_mode *wait_mode)
883{
884 struct afs_call *call;
885 size_t reqsz, o_namesz, o_padsz, n_namesz, n_padsz;
886 __be32 *bp;
887
888 _enter("");
889
890 o_namesz = strlen(orig_name);
891 o_padsz = (4 - (o_namesz & 3)) & 3;
892
893 n_namesz = strlen(new_name);
894 n_padsz = (4 - (n_namesz & 3)) & 3;
895
896 reqsz = (4 * 4) +
897 4 + o_namesz + o_padsz +
898 (3 * 4) +
899 4 + n_namesz + n_padsz;
900
901 call = afs_alloc_flat_call(&afs_RXFSRename, reqsz, (21 + 21 + 6) * 4);
902 if (!call)
903 return -ENOMEM;
904
905 call->key = key;
906 call->reply = orig_dvnode;
907 call->reply2 = new_dvnode;
908 call->service_id = FS_SERVICE;
909 call->port = htons(AFS_FS_PORT);
910
911 /* marshall the parameters */
912 bp = call->request;
913 *bp++ = htonl(FSRENAME);
914 *bp++ = htonl(orig_dvnode->fid.vid);
915 *bp++ = htonl(orig_dvnode->fid.vnode);
916 *bp++ = htonl(orig_dvnode->fid.unique);
917 *bp++ = htonl(o_namesz);
918 memcpy(bp, orig_name, o_namesz);
919 bp = (void *) bp + o_namesz;
920 if (o_padsz > 0) {
921 memset(bp, 0, o_padsz);
922 bp = (void *) bp + o_padsz;
923 }
803 924
804 callback->fid = *fid; 925 *bp++ = htonl(new_dvnode->fid.vid);
805 callback->version = ntohl(*bp++); 926 *bp++ = htonl(new_dvnode->fid.vnode);
806 callback->expiry = ntohl(*bp++); 927 *bp++ = htonl(new_dvnode->fid.unique);
807 callback->type = ntohl(*bp++); 928 *bp++ = htonl(n_namesz);
808 929 memcpy(bp, new_name, n_namesz);
809 if (volsync) { 930 bp = (void *) bp + n_namesz;
810 volsync->creation = ntohl(*bp++); 931 if (n_padsz > 0) {
811 bp++; /* spare2 */ 932 memset(bp, 0, n_padsz);
812 bp++; /* spare3 */ 933 bp = (void *) bp + n_padsz;
813 bp++; /* spare4 */
814 bp++; /* spare5 */
815 bp++; /* spare6 */
816 } 934 }
817 935
818 /* success */ 936 return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
819 ret = 0; 937}
820
821 out_unwait:
822 set_current_state(TASK_RUNNING);
823 remove_wait_queue(&call->waitq, &myself);
824 rxrpc_put_call(call);
825 out_put_conn:
826 afs_server_release_fsconn(server, conn);
827 out:
828 kleave("");
829 return ret;
830
831 abort:
832 set_current_state(TASK_UNINTERRUPTIBLE);
833 rxrpc_call_abort(call, ret);
834 schedule();
835 goto out_unwait;
836} /* end afs_rxfs_lookup() */
837#endif
diff --git a/fs/afs/fsclient.h b/fs/afs/fsclient.h
deleted file mode 100644
index 8ba3e749ee3c..000000000000
--- a/fs/afs/fsclient.h
+++ /dev/null
@@ -1,54 +0,0 @@
1/* fsclient.h: AFS File Server client stub declarations
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef _LINUX_AFS_FSCLIENT_H
13#define _LINUX_AFS_FSCLIENT_H
14
15#include "server.h"
16
17extern int afs_rxfs_get_volume_info(struct afs_server *server,
18 const char *name,
19 struct afs_volume_info *vinfo);
20
21extern int afs_rxfs_fetch_file_status(struct afs_server *server,
22 struct afs_vnode *vnode,
23 struct afs_volsync *volsync);
24
25struct afs_rxfs_fetch_descriptor {
26 struct afs_fid fid; /* file ID to fetch */
27 size_t size; /* total number of bytes to fetch */
28 off_t offset; /* offset in file to start from */
29 void *buffer; /* read buffer */
30 size_t actual; /* actual size sent back by server */
31};
32
33extern int afs_rxfs_fetch_file_data(struct afs_server *server,
34 struct afs_vnode *vnode,
35 struct afs_rxfs_fetch_descriptor *desc,
36 struct afs_volsync *volsync);
37
38extern int afs_rxfs_give_up_callback(struct afs_server *server,
39 struct afs_vnode *vnode);
40
41/* this doesn't appear to work in OpenAFS server */
42extern int afs_rxfs_lookup(struct afs_server *server,
43 struct afs_vnode *dir,
44 const char *filename,
45 struct afs_vnode *vnode,
46 struct afs_volsync *volsync);
47
48/* this is apparently mis-implemented in OpenAFS server */
49extern int afs_rxfs_get_root_volume(struct afs_server *server,
50 char *buf,
51 size_t *buflen);
52
53
54#endif /* _LINUX_AFS_FSCLIENT_H */
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 9d9bca6c28b5..c184a4ee5995 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -19,9 +19,6 @@
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/pagemap.h> 21#include <linux/pagemap.h>
22#include "volume.h"
23#include "vnode.h"
24#include "super.h"
25#include "internal.h" 22#include "internal.h"
26 23
27struct afs_iget_data { 24struct afs_iget_data {
@@ -29,26 +26,25 @@ struct afs_iget_data {
29 struct afs_volume *volume; /* volume on which resides */ 26 struct afs_volume *volume; /* volume on which resides */
30}; 27};
31 28
32/*****************************************************************************/
33/* 29/*
34 * map the AFS file status to the inode member variables 30 * map the AFS file status to the inode member variables
35 */ 31 */
36static int afs_inode_map_status(struct afs_vnode *vnode) 32static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
37{ 33{
38 struct inode *inode = AFS_VNODE_TO_I(vnode); 34 struct inode *inode = AFS_VNODE_TO_I(vnode);
39 35
40 _debug("FS: ft=%d lk=%d sz=%Zu ver=%Lu mod=%hu", 36 _debug("FS: ft=%d lk=%d sz=%llu ver=%Lu mod=%hu",
41 vnode->status.type, 37 vnode->status.type,
42 vnode->status.nlink, 38 vnode->status.nlink,
43 vnode->status.size, 39 (unsigned long long) vnode->status.size,
44 vnode->status.version, 40 vnode->status.data_version,
45 vnode->status.mode); 41 vnode->status.mode);
46 42
47 switch (vnode->status.type) { 43 switch (vnode->status.type) {
48 case AFS_FTYPE_FILE: 44 case AFS_FTYPE_FILE:
49 inode->i_mode = S_IFREG | vnode->status.mode; 45 inode->i_mode = S_IFREG | vnode->status.mode;
50 inode->i_op = &afs_file_inode_operations; 46 inode->i_op = &afs_file_inode_operations;
51 inode->i_fop = &generic_ro_fops; 47 inode->i_fop = &afs_file_operations;
52 break; 48 break;
53 case AFS_FTYPE_DIR: 49 case AFS_FTYPE_DIR:
54 inode->i_mode = S_IFDIR | vnode->status.mode; 50 inode->i_mode = S_IFDIR | vnode->status.mode;
@@ -77,9 +73,9 @@ static int afs_inode_map_status(struct afs_vnode *vnode)
77 73
78 /* check to see whether a symbolic link is really a mountpoint */ 74 /* check to see whether a symbolic link is really a mountpoint */
79 if (vnode->status.type == AFS_FTYPE_SYMLINK) { 75 if (vnode->status.type == AFS_FTYPE_SYMLINK) {
80 afs_mntpt_check_symlink(vnode); 76 afs_mntpt_check_symlink(vnode, key);
81 77
82 if (vnode->flags & AFS_VNODE_MOUNTPOINT) { 78 if (test_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags)) {
83 inode->i_mode = S_IFDIR | vnode->status.mode; 79 inode->i_mode = S_IFDIR | vnode->status.mode;
84 inode->i_op = &afs_mntpt_inode_operations; 80 inode->i_op = &afs_mntpt_inode_operations;
85 inode->i_fop = &afs_mntpt_file_operations; 81 inode->i_fop = &afs_mntpt_file_operations;
@@ -87,30 +83,8 @@ static int afs_inode_map_status(struct afs_vnode *vnode)
87 } 83 }
88 84
89 return 0; 85 return 0;
90} /* end afs_inode_map_status() */ 86}
91 87
92/*****************************************************************************/
93/*
94 * attempt to fetch the status of an inode, coelescing multiple simultaneous
95 * fetches
96 */
97static int afs_inode_fetch_status(struct inode *inode)
98{
99 struct afs_vnode *vnode;
100 int ret;
101
102 vnode = AFS_FS_I(inode);
103
104 ret = afs_vnode_fetch_status(vnode);
105
106 if (ret == 0)
107 ret = afs_inode_map_status(vnode);
108
109 return ret;
110
111} /* end afs_inode_fetch_status() */
112
113/*****************************************************************************/
114/* 88/*
115 * iget5() comparator 89 * iget5() comparator
116 */ 90 */
@@ -120,9 +94,8 @@ static int afs_iget5_test(struct inode *inode, void *opaque)
120 94
121 return inode->i_ino == data->fid.vnode && 95 return inode->i_ino == data->fid.vnode &&
122 inode->i_version == data->fid.unique; 96 inode->i_version == data->fid.unique;
123} /* end afs_iget5_test() */ 97}
124 98
125/*****************************************************************************/
126/* 99/*
127 * iget5() inode initialiser 100 * iget5() inode initialiser
128 */ 101 */
@@ -137,14 +110,14 @@ static int afs_iget5_set(struct inode *inode, void *opaque)
137 vnode->volume = data->volume; 110 vnode->volume = data->volume;
138 111
139 return 0; 112 return 0;
140} /* end afs_iget5_set() */ 113}
141 114
142/*****************************************************************************/
143/* 115/*
144 * inode retrieval 116 * inode retrieval
145 */ 117 */
146inline int afs_iget(struct super_block *sb, struct afs_fid *fid, 118struct inode *afs_iget(struct super_block *sb, struct key *key,
147 struct inode **_inode) 119 struct afs_fid *fid, struct afs_file_status *status,
120 struct afs_callback *cb)
148{ 121{
149 struct afs_iget_data data = { .fid = *fid }; 122 struct afs_iget_data data = { .fid = *fid };
150 struct afs_super_info *as; 123 struct afs_super_info *as;
@@ -161,20 +134,18 @@ inline int afs_iget(struct super_block *sb, struct afs_fid *fid,
161 &data); 134 &data);
162 if (!inode) { 135 if (!inode) {
163 _leave(" = -ENOMEM"); 136 _leave(" = -ENOMEM");
164 return -ENOMEM; 137 return ERR_PTR(-ENOMEM);
165 } 138 }
166 139
140 _debug("GOT INODE %p { vl=%x vn=%x, u=%x }",
141 inode, fid->vid, fid->vnode, fid->unique);
142
167 vnode = AFS_FS_I(inode); 143 vnode = AFS_FS_I(inode);
168 144
169 /* deal with an existing inode */ 145 /* deal with an existing inode */
170 if (!(inode->i_state & I_NEW)) { 146 if (!(inode->i_state & I_NEW)) {
171 ret = afs_vnode_fetch_status(vnode); 147 _leave(" = %p", inode);
172 if (ret==0) 148 return inode;
173 *_inode = inode;
174 else
175 iput(inode);
176 _leave(" = %d", ret);
177 return ret;
178 } 149 }
179 150
180#ifdef AFS_CACHING_SUPPORT 151#ifdef AFS_CACHING_SUPPORT
@@ -186,100 +157,185 @@ inline int afs_iget(struct super_block *sb, struct afs_fid *fid,
186 &vnode->cache); 157 &vnode->cache);
187#endif 158#endif
188 159
189 /* okay... it's a new inode */ 160 if (!status) {
190 inode->i_flags |= S_NOATIME; 161 /* it's a remotely extant inode */
191 vnode->flags |= AFS_VNODE_CHANGED; 162 set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
192 ret = afs_inode_fetch_status(inode); 163 ret = afs_vnode_fetch_status(vnode, NULL, key);
193 if (ret<0) 164 if (ret < 0)
165 goto bad_inode;
166 } else {
167 /* it's an inode we just created */
168 memcpy(&vnode->status, status, sizeof(vnode->status));
169
170 if (!cb) {
171 /* it's a symlink we just created (the fileserver
172 * didn't give us a callback) */
173 vnode->cb_version = 0;
174 vnode->cb_expiry = 0;
175 vnode->cb_type = 0;
176 vnode->cb_expires = get_seconds();
177 } else {
178 vnode->cb_version = cb->version;
179 vnode->cb_expiry = cb->expiry;
180 vnode->cb_type = cb->type;
181 vnode->cb_expires = vnode->cb_expiry + get_seconds();
182 }
183 }
184
185 ret = afs_inode_map_status(vnode, key);
186 if (ret < 0)
194 goto bad_inode; 187 goto bad_inode;
195 188
196 /* success */ 189 /* success */
190 clear_bit(AFS_VNODE_UNSET, &vnode->flags);
191 inode->i_flags |= S_NOATIME;
197 unlock_new_inode(inode); 192 unlock_new_inode(inode);
198 193 _leave(" = %p [CB { v=%u t=%u }]", inode, vnode->cb_version, vnode->cb_type);
199 *_inode = inode; 194 return inode;
200 _leave(" = 0 [CB { v=%u x=%lu t=%u }]",
201 vnode->cb_version,
202 vnode->cb_timeout.timo_jif,
203 vnode->cb_type);
204 return 0;
205 195
206 /* failure */ 196 /* failure */
207 bad_inode: 197bad_inode:
208 make_bad_inode(inode); 198 make_bad_inode(inode);
209 unlock_new_inode(inode); 199 unlock_new_inode(inode);
210 iput(inode); 200 iput(inode);
211 201
212 _leave(" = %d [bad]", ret); 202 _leave(" = %d [bad]", ret);
203 return ERR_PTR(ret);
204}
205
206/*
207 * validate a vnode/inode
208 * - there are several things we need to check
209 * - parent dir data changes (rm, rmdir, rename, mkdir, create, link,
210 * symlink)
211 * - parent dir metadata changed (security changes)
212 * - dentry data changed (write, truncate)
213 * - dentry metadata changed (security changes)
214 */
215int afs_validate(struct afs_vnode *vnode, struct key *key)
216{
217 int ret;
218
219 _enter("{v={%x:%u} fl=%lx},%x",
220 vnode->fid.vid, vnode->fid.vnode, vnode->flags,
221 key_serial(key));
222
223 if (vnode->cb_promised &&
224 !test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) &&
225 !test_bit(AFS_VNODE_MODIFIED, &vnode->flags) &&
226 !test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
227 if (vnode->cb_expires < get_seconds() + 10) {
228 _debug("callback expired");
229 set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
230 } else {
231 goto valid;
232 }
233 }
234
235 if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
236 goto valid;
237
238 mutex_lock(&vnode->validate_lock);
239
240 /* if the promise has expired, we need to check the server again to get
241 * a new promise - note that if the (parent) directory's metadata was
242 * changed then the security may be different and we may no longer have
243 * access */
244 if (!vnode->cb_promised ||
245 test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) {
246 _debug("not promised");
247 ret = afs_vnode_fetch_status(vnode, NULL, key);
248 if (ret < 0)
249 goto error_unlock;
250 _debug("new promise [fl=%lx]", vnode->flags);
251 }
252
253 if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
254 _debug("file already deleted");
255 ret = -ESTALE;
256 goto error_unlock;
257 }
258
259 /* if the vnode's data version number changed then its contents are
260 * different */
261 if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
262 _debug("zap data {%x:%d}", vnode->fid.vid, vnode->fid.vnode);
263 invalidate_remote_inode(&vnode->vfs_inode);
264 }
265
266 clear_bit(AFS_VNODE_MODIFIED, &vnode->flags);
267 mutex_unlock(&vnode->validate_lock);
268valid:
269 _leave(" = 0");
270 return 0;
271
272error_unlock:
273 mutex_unlock(&vnode->validate_lock);
274 _leave(" = %d", ret);
213 return ret; 275 return ret;
214} /* end afs_iget() */ 276}
215 277
216/*****************************************************************************/
217/* 278/*
218 * read the attributes of an inode 279 * read the attributes of an inode
219 */ 280 */
220int afs_inode_getattr(struct vfsmount *mnt, struct dentry *dentry, 281int afs_inode_getattr(struct vfsmount *mnt, struct dentry *dentry,
221 struct kstat *stat) 282 struct kstat *stat)
222{ 283{
223 struct afs_vnode *vnode;
224 struct inode *inode; 284 struct inode *inode;
225 int ret;
226 285
227 inode = dentry->d_inode; 286 inode = dentry->d_inode;
228 287
229 _enter("{ ino=%lu v=%lu }", inode->i_ino, inode->i_version); 288 _enter("{ ino=%lu v=%lu }", inode->i_ino, inode->i_version);
230 289
231 vnode = AFS_FS_I(inode);
232
233 ret = afs_inode_fetch_status(inode);
234 if (ret == -ENOENT) {
235 _leave(" = %d [%d %p]",
236 ret, atomic_read(&dentry->d_count), dentry->d_inode);
237 return ret;
238 }
239 else if (ret < 0) {
240 make_bad_inode(inode);
241 _leave(" = %d", ret);
242 return ret;
243 }
244
245 /* transfer attributes from the inode structure to the stat
246 * structure */
247 generic_fillattr(inode, stat); 290 generic_fillattr(inode, stat);
248
249 _leave(" = 0 CB { v=%u x=%u t=%u }",
250 vnode->cb_version,
251 vnode->cb_expiry,
252 vnode->cb_type);
253
254 return 0; 291 return 0;
255} /* end afs_inode_getattr() */ 292}
256 293
257/*****************************************************************************/
258/* 294/*
259 * clear an AFS inode 295 * clear an AFS inode
260 */ 296 */
261void afs_clear_inode(struct inode *inode) 297void afs_clear_inode(struct inode *inode)
262{ 298{
299 struct afs_permits *permits;
263 struct afs_vnode *vnode; 300 struct afs_vnode *vnode;
264 301
265 vnode = AFS_FS_I(inode); 302 vnode = AFS_FS_I(inode);
266 303
267 _enter("ino=%lu { vn=%08x v=%u x=%u t=%u }", 304 _enter("{%x:%d.%d} v=%u x=%u t=%u }",
268 inode->i_ino, 305 vnode->fid.vid,
269 vnode->fid.vnode, 306 vnode->fid.vnode,
307 vnode->fid.unique,
270 vnode->cb_version, 308 vnode->cb_version,
271 vnode->cb_expiry, 309 vnode->cb_expiry,
272 vnode->cb_type 310 vnode->cb_type);
273 );
274 311
275 BUG_ON(inode->i_ino != vnode->fid.vnode); 312 _debug("CLEAR INODE %p", inode);
276 313
277 afs_vnode_give_up_callback(vnode); 314 ASSERTCMP(inode->i_ino, ==, vnode->fid.vnode);
315
316 afs_give_up_callback(vnode);
317
318 if (vnode->server) {
319 spin_lock(&vnode->server->fs_lock);
320 rb_erase(&vnode->server_rb, &vnode->server->fs_vnodes);
321 spin_unlock(&vnode->server->fs_lock);
322 afs_put_server(vnode->server);
323 vnode->server = NULL;
324 }
325
326 ASSERT(!vnode->cb_promised);
278 327
279#ifdef AFS_CACHING_SUPPORT 328#ifdef AFS_CACHING_SUPPORT
280 cachefs_relinquish_cookie(vnode->cache, 0); 329 cachefs_relinquish_cookie(vnode->cache, 0);
281 vnode->cache = NULL; 330 vnode->cache = NULL;
282#endif 331#endif
283 332
333 mutex_lock(&vnode->permits_lock);
334 permits = vnode->permits;
335 rcu_assign_pointer(vnode->permits, NULL);
336 mutex_unlock(&vnode->permits_lock);
337 if (permits)
338 call_rcu(&permits->rcu, afs_zap_permits);
339
284 _leave(""); 340 _leave("");
285} /* end afs_clear_inode() */ 341}
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 5151d5da2c2f..6dd3197d1d8d 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -1,6 +1,6 @@
1/* internal.h: internal AFS stuff 1/* internal AFS stuff
2 * 2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
@@ -9,48 +9,391 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#ifndef AFS_INTERNAL_H
13#define AFS_INTERNAL_H
14
15#include <linux/compiler.h> 12#include <linux/compiler.h>
16#include <linux/kernel.h> 13#include <linux/kernel.h>
17#include <linux/fs.h> 14#include <linux/fs.h>
18#include <linux/pagemap.h> 15#include <linux/pagemap.h>
16#include <linux/skbuff.h>
17#include <linux/rxrpc.h>
18#include <linux/key.h>
19#include "afs.h"
20#include "afs_vl.h"
21
22#define AFS_CELL_MAX_ADDRS 15
23
24struct afs_call;
25
26typedef enum {
27 AFS_VL_NEW, /* new, uninitialised record */
28 AFS_VL_CREATING, /* creating record */
29 AFS_VL_VALID, /* record is pending */
30 AFS_VL_NO_VOLUME, /* no such volume available */
31 AFS_VL_UPDATING, /* update in progress */
32 AFS_VL_VOLUME_DELETED, /* volume was deleted */
33 AFS_VL_UNCERTAIN, /* uncertain state (update failed) */
34} __attribute__((packed)) afs_vlocation_state_t;
35
36struct afs_mount_params {
37 bool rwpath; /* T if the parent should be considered R/W */
38 bool force; /* T to force cell type */
39 afs_voltype_t type; /* type of volume requested */
40 int volnamesz; /* size of volume name */
41 const char *volname; /* name of volume to mount */
42 struct afs_cell *cell; /* cell in which to find volume */
43 struct afs_volume *volume; /* volume record */
44 struct key *key; /* key to use for secure mounting */
45};
19 46
20/* 47/*
21 * debug tracing 48 * definition of how to wait for the completion of an operation
22 */ 49 */
23#define kenter(FMT, a...) printk("==> %s("FMT")\n",__FUNCTION__ , ## a) 50struct afs_wait_mode {
24#define kleave(FMT, a...) printk("<== %s()"FMT"\n",__FUNCTION__ , ## a) 51 /* RxRPC received message notification */
25#define kdebug(FMT, a...) printk(FMT"\n" , ## a) 52 void (*rx_wakeup)(struct afs_call *call);
26#define kproto(FMT, a...) printk("### "FMT"\n" , ## a)
27#define knet(FMT, a...) printk(FMT"\n" , ## a)
28
29#ifdef __KDEBUG
30#define _enter(FMT, a...) kenter(FMT , ## a)
31#define _leave(FMT, a...) kleave(FMT , ## a)
32#define _debug(FMT, a...) kdebug(FMT , ## a)
33#define _proto(FMT, a...) kproto(FMT , ## a)
34#define _net(FMT, a...) knet(FMT , ## a)
35#else
36#define _enter(FMT, a...) do { } while(0)
37#define _leave(FMT, a...) do { } while(0)
38#define _debug(FMT, a...) do { } while(0)
39#define _proto(FMT, a...) do { } while(0)
40#define _net(FMT, a...) do { } while(0)
41#endif
42 53
43static inline void afs_discard_my_signals(void) 54 /* synchronous call waiter and call dispatched notification */
44{ 55 int (*wait)(struct afs_call *call);
45 while (signal_pending(current)) { 56
46 siginfo_t sinfo; 57 /* asynchronous call completion */
58 void (*async_complete)(void *reply, int error);
59};
60
61extern const struct afs_wait_mode afs_sync_call;
62extern const struct afs_wait_mode afs_async_call;
47 63
48 spin_lock_irq(&current->sighand->siglock); 64/*
49 dequeue_signal(current,&current->blocked, &sinfo); 65 * a record of an in-progress RxRPC call
50 spin_unlock_irq(&current->sighand->siglock); 66 */
51 } 67struct afs_call {
68 const struct afs_call_type *type; /* type of call */
69 const struct afs_wait_mode *wait_mode; /* completion wait mode */
70 wait_queue_head_t waitq; /* processes awaiting completion */
71 struct work_struct async_work; /* asynchronous work processor */
72 struct work_struct work; /* actual work processor */
73 struct sk_buff_head rx_queue; /* received packets */
74 struct rxrpc_call *rxcall; /* RxRPC call handle */
75 struct key *key; /* security for this call */
76 struct afs_server *server; /* server affected by incoming CM call */
77 void *request; /* request data (first part) */
78 void *request2; /* request data (second part) */
79 void *buffer; /* reply receive buffer */
80 void *reply; /* reply buffer (first part) */
81 void *reply2; /* reply buffer (second part) */
82 void *reply3; /* reply buffer (third part) */
83 void *reply4; /* reply buffer (fourth part) */
84 enum { /* call state */
85 AFS_CALL_REQUESTING, /* request is being sent for outgoing call */
86 AFS_CALL_AWAIT_REPLY, /* awaiting reply to outgoing call */
87 AFS_CALL_AWAIT_OP_ID, /* awaiting op ID on incoming call */
88 AFS_CALL_AWAIT_REQUEST, /* awaiting request data on incoming call */
89 AFS_CALL_REPLYING, /* replying to incoming call */
90 AFS_CALL_AWAIT_ACK, /* awaiting final ACK of incoming call */
91 AFS_CALL_COMPLETE, /* successfully completed */
92 AFS_CALL_BUSY, /* server was busy */
93 AFS_CALL_ABORTED, /* call was aborted */
94 AFS_CALL_ERROR, /* call failed due to error */
95 } state;
96 int error; /* error code */
97 unsigned request_size; /* size of request data */
98 unsigned reply_max; /* maximum size of reply */
99 unsigned reply_size; /* current size of reply */
100 unsigned short offset; /* offset into received data store */
101 unsigned char unmarshall; /* unmarshalling phase */
102 bool incoming; /* T if incoming call */
103 u16 service_id; /* RxRPC service ID to call */
104 __be16 port; /* target UDP port */
105 __be32 operation_ID; /* operation ID for an incoming call */
106 u32 count; /* count for use in unmarshalling */
107 __be32 tmp; /* place to extract temporary data */
108};
109
110struct afs_call_type {
111 const char *name;
112
113 /* deliver request or reply data to an call
114 * - returning an error will cause the call to be aborted
115 */
116 int (*deliver)(struct afs_call *call, struct sk_buff *skb,
117 bool last);
118
119 /* map an abort code to an error number */
120 int (*abort_to_error)(u32 abort_code);
121
122 /* clean up a call */
123 void (*destructor)(struct afs_call *call);
124};
125
126/*
127 * AFS superblock private data
128 * - there's one superblock per volume
129 */
130struct afs_super_info {
131 struct afs_volume *volume; /* volume record */
132 char rwparent; /* T if parent is R/W AFS volume */
133};
134
135static inline struct afs_super_info *AFS_FS_S(struct super_block *sb)
136{
137 return sb->s_fs_info;
52} 138}
53 139
140extern struct file_system_type afs_fs_type;
141
142/*
143 * entry in the cached cell catalogue
144 */
145struct afs_cache_cell {
146 char name[AFS_MAXCELLNAME]; /* cell name (padded with NULs) */
147 struct in_addr vl_servers[15]; /* cached cell VL servers */
148};
149
150/*
151 * AFS cell record
152 */
153struct afs_cell {
154 atomic_t usage;
155 struct list_head link; /* main cell list link */
156 struct key *anonymous_key; /* anonymous user key for this cell */
157 struct list_head proc_link; /* /proc cell list link */
158 struct proc_dir_entry *proc_dir; /* /proc dir for this cell */
159#ifdef AFS_CACHING_SUPPORT
160 struct cachefs_cookie *cache; /* caching cookie */
161#endif
162
163 /* server record management */
164 rwlock_t servers_lock; /* active server list lock */
165 struct list_head servers; /* active server list */
166
167 /* volume location record management */
168 struct rw_semaphore vl_sem; /* volume management serialisation semaphore */
169 struct list_head vl_list; /* cell's active VL record list */
170 spinlock_t vl_lock; /* vl_list lock */
171 unsigned short vl_naddrs; /* number of VL servers in addr list */
172 unsigned short vl_curr_svix; /* current server index */
173 struct in_addr vl_addrs[AFS_CELL_MAX_ADDRS]; /* cell VL server addresses */
174
175 char name[0]; /* cell name - must go last */
176};
177
178/*
179 * entry in the cached volume location catalogue
180 */
181struct afs_cache_vlocation {
182 /* volume name (lowercase, padded with NULs) */
183 uint8_t name[AFS_MAXVOLNAME + 1];
184
185 uint8_t nservers; /* number of entries used in servers[] */
186 uint8_t vidmask; /* voltype mask for vid[] */
187 uint8_t srvtmask[8]; /* voltype masks for servers[] */
188#define AFS_VOL_VTM_RW 0x01 /* R/W version of the volume is available (on this server) */
189#define AFS_VOL_VTM_RO 0x02 /* R/O version of the volume is available (on this server) */
190#define AFS_VOL_VTM_BAK 0x04 /* backup version of the volume is available (on this server) */
191
192 afs_volid_t vid[3]; /* volume IDs for R/W, R/O and Bak volumes */
193 struct in_addr servers[8]; /* fileserver addresses */
194 time_t rtime; /* last retrieval time */
195};
196
197/*
198 * volume -> vnode hash table entry
199 */
200struct afs_cache_vhash {
201 afs_voltype_t vtype; /* which volume variation */
202 uint8_t hash_bucket; /* which hash bucket this represents */
203} __attribute__((packed));
204
205/*
206 * AFS volume location record
207 */
208struct afs_vlocation {
209 atomic_t usage;
210 time_t time_of_death; /* time at which put reduced usage to 0 */
211 struct list_head link; /* link in cell volume location list */
212 struct list_head grave; /* link in master graveyard list */
213 struct list_head update; /* link in master update list */
214 struct afs_cell *cell; /* cell to which volume belongs */
215#ifdef AFS_CACHING_SUPPORT
216 struct cachefs_cookie *cache; /* caching cookie */
217#endif
218 struct afs_cache_vlocation vldb; /* volume information DB record */
219 struct afs_volume *vols[3]; /* volume access record pointer (index by type) */
220 wait_queue_head_t waitq; /* status change waitqueue */
221 time_t update_at; /* time at which record should be updated */
222 spinlock_t lock; /* access lock */
223 afs_vlocation_state_t state; /* volume location state */
224 unsigned short upd_rej_cnt; /* ENOMEDIUM count during update */
225 unsigned short upd_busy_cnt; /* EBUSY count during update */
226 bool valid; /* T if valid */
227};
228
229/*
230 * AFS fileserver record
231 */
232struct afs_server {
233 atomic_t usage;
234 time_t time_of_death; /* time at which put reduced usage to 0 */
235 struct in_addr addr; /* server address */
236 struct afs_cell *cell; /* cell in which server resides */
237 struct list_head link; /* link in cell's server list */
238 struct list_head grave; /* link in master graveyard list */
239 struct rb_node master_rb; /* link in master by-addr tree */
240 struct rw_semaphore sem; /* access lock */
241
242 /* file service access */
243 struct rb_root fs_vnodes; /* vnodes backed by this server (ordered by FID) */
244 unsigned long fs_act_jif; /* time at which last activity occurred */
245 unsigned long fs_dead_jif; /* time at which no longer to be considered dead */
246 spinlock_t fs_lock; /* access lock */
247 int fs_state; /* 0 or reason FS currently marked dead (-errno) */
248
249 /* callback promise management */
250 struct rb_root cb_promises; /* vnode expiration list (ordered earliest first) */
251 struct delayed_work cb_updater; /* callback updater */
252 struct delayed_work cb_break_work; /* collected break dispatcher */
253 wait_queue_head_t cb_break_waitq; /* space available in cb_break waitqueue */
254 spinlock_t cb_lock; /* access lock */
255 struct afs_callback cb_break[64]; /* ring of callbacks awaiting breaking */
256 atomic_t cb_break_n; /* number of pending breaks */
257 u8 cb_break_head; /* head of callback breaking ring */
258 u8 cb_break_tail; /* tail of callback breaking ring */
259};
260
261/*
262 * AFS volume access record
263 */
264struct afs_volume {
265 atomic_t usage;
266 struct afs_cell *cell; /* cell to which belongs (unrefd ptr) */
267 struct afs_vlocation *vlocation; /* volume location */
268#ifdef AFS_CACHING_SUPPORT
269 struct cachefs_cookie *cache; /* caching cookie */
270#endif
271 afs_volid_t vid; /* volume ID */
272 afs_voltype_t type; /* type of volume */
273 char type_force; /* force volume type (suppress R/O -> R/W) */
274 unsigned short nservers; /* number of server slots filled */
275 unsigned short rjservers; /* number of servers discarded due to -ENOMEDIUM */
276 struct afs_server *servers[8]; /* servers on which volume resides (ordered) */
277 struct rw_semaphore server_sem; /* lock for accessing current server */
278};
279
280/*
281 * vnode catalogue entry
282 */
283struct afs_cache_vnode {
284 afs_vnodeid_t vnode_id; /* vnode ID */
285 unsigned vnode_unique; /* vnode ID uniquifier */
286 afs_dataversion_t data_version; /* data version */
287};
288
289/*
290 * AFS inode private data
291 */
292struct afs_vnode {
293 struct inode vfs_inode; /* the VFS's inode record */
294
295 struct afs_volume *volume; /* volume on which vnode resides */
296 struct afs_server *server; /* server currently supplying this file */
297 struct afs_fid fid; /* the file identifier for this inode */
298 struct afs_file_status status; /* AFS status info for this file */
299#ifdef AFS_CACHING_SUPPORT
300 struct cachefs_cookie *cache; /* caching cookie */
301#endif
302 struct afs_permits *permits; /* cache of permits so far obtained */
303 struct mutex permits_lock; /* lock for altering permits list */
304 struct mutex validate_lock; /* lock for validating this vnode */
305 wait_queue_head_t update_waitq; /* status fetch waitqueue */
306 int update_cnt; /* number of outstanding ops that will update the
307 * status */
308 spinlock_t lock; /* waitqueue/flags lock */
309 unsigned long flags;
310#define AFS_VNODE_CB_BROKEN 0 /* set if vnode's callback was broken */
311#define AFS_VNODE_UNSET 1 /* set if vnode attributes not yet set */
312#define AFS_VNODE_MODIFIED 2 /* set if vnode's data modified */
313#define AFS_VNODE_ZAP_DATA 3 /* set if vnode's data should be invalidated */
314#define AFS_VNODE_DELETED 4 /* set if vnode deleted on server */
315#define AFS_VNODE_MOUNTPOINT 5 /* set if vnode is a mountpoint symlink */
316
317 long acl_order; /* ACL check count (callback break count) */
318
319 /* outstanding callback notification on this file */
320 struct rb_node server_rb; /* link in server->fs_vnodes */
321 struct rb_node cb_promise; /* link in server->cb_promises */
322 struct work_struct cb_broken_work; /* work to be done on callback break */
323 time_t cb_expires; /* time at which callback expires */
324 time_t cb_expires_at; /* time used to order cb_promise */
325 unsigned cb_version; /* callback version */
326 unsigned cb_expiry; /* callback expiry time */
327 afs_callback_type_t cb_type; /* type of callback */
328 bool cb_promised; /* true if promise still holds */
329};
330
331/*
332 * cached security record for one user's attempt to access a vnode
333 */
334struct afs_permit {
335 struct key *key; /* RxRPC ticket holding a security context */
336 afs_access_t access_mask; /* access mask for this key */
337};
338
339/*
340 * cache of security records from attempts to access a vnode
341 */
342struct afs_permits {
343 struct rcu_head rcu; /* disposal procedure */
344 int count; /* number of records */
345 struct afs_permit permits[0]; /* the permits so far examined */
346};
347
348/*
349 * record of one of a system's set of network interfaces
350 */
351struct afs_interface {
352 unsigned index; /* interface index */
353 struct in_addr address; /* IPv4 address bound to interface */
354 struct in_addr netmask; /* netmask applied to address */
355 unsigned mtu; /* MTU of interface */
356};
357
358/*
359 * UUID definition [internet draft]
360 * - the timestamp is a 60-bit value, split 32/16/12, and goes in 100ns
361 * increments since midnight 15th October 1582
362 * - add AFS_UUID_TO_UNIX_TIME to convert unix time in 100ns units to UUID
363 * time
364 * - the clock sequence is a 14-bit counter to avoid duplicate times
365 */
366struct afs_uuid {
367 u32 time_low; /* low part of timestamp */
368 u16 time_mid; /* mid part of timestamp */
369 u16 time_hi_and_version; /* high part of timestamp and version */
370#define AFS_UUID_TO_UNIX_TIME 0x01b21dd213814000
371#define AFS_UUID_TIMEHI_MASK 0x0fff
372#define AFS_UUID_VERSION_TIME 0x1000 /* time-based UUID */
373#define AFS_UUID_VERSION_NAME 0x3000 /* name-based UUID */
374#define AFS_UUID_VERSION_RANDOM 0x4000 /* (pseudo-)random generated UUID */
375 u8 clock_seq_hi_and_reserved; /* clock seq hi and variant */
376#define AFS_UUID_CLOCKHI_MASK 0x3f
377#define AFS_UUID_VARIANT_STD 0x80
378 u8 clock_seq_low; /* clock seq low */
379 u8 node[6]; /* spatially unique node ID (MAC addr) */
380};
381
382/*****************************************************************************/
383/*
384 * callback.c
385 */
386extern void afs_init_callback_state(struct afs_server *);
387extern void afs_broken_callback_work(struct work_struct *);
388extern void afs_break_callbacks(struct afs_server *, size_t,
389 struct afs_callback[]);
390extern void afs_discard_callback_on_delete(struct afs_vnode *);
391extern void afs_give_up_callback(struct afs_vnode *);
392extern void afs_dispatch_give_up_callbacks(struct work_struct *);
393extern void afs_flush_callback_breaks(struct afs_server *);
394extern int __init afs_callback_update_init(void);
395extern void __exit afs_callback_update_kill(void);
396
54/* 397/*
55 * cell.c 398 * cell.c
56 */ 399 */
@@ -60,57 +403,156 @@ extern struct list_head afs_proc_cells;
60extern struct cachefs_index_def afs_cache_cell_index_def; 403extern struct cachefs_index_def afs_cache_cell_index_def;
61#endif 404#endif
62 405
406#define afs_get_cell(C) do { atomic_inc(&(C)->usage); } while(0)
407extern int afs_cell_init(char *);
408extern struct afs_cell *afs_cell_create(const char *, char *);
409extern struct afs_cell *afs_cell_lookup(const char *, unsigned);
410extern struct afs_cell *afs_grab_cell(struct afs_cell *);
411extern void afs_put_cell(struct afs_cell *);
412extern void afs_cell_purge(void);
413
414/*
415 * cmservice.c
416 */
417extern bool afs_cm_incoming_call(struct afs_call *);
418
63/* 419/*
64 * dir.c 420 * dir.c
65 */ 421 */
66extern const struct inode_operations afs_dir_inode_operations; 422extern const struct inode_operations afs_dir_inode_operations;
67extern const struct file_operations afs_dir_file_operations; 423extern const struct file_operations afs_dir_file_operations;
68 424
425extern int afs_permission(struct inode *, int, struct nameidata *);
426
69/* 427/*
70 * file.c 428 * file.c
71 */ 429 */
72extern const struct address_space_operations afs_fs_aops; 430extern const struct address_space_operations afs_fs_aops;
73extern const struct inode_operations afs_file_inode_operations; 431extern const struct inode_operations afs_file_inode_operations;
432extern const struct file_operations afs_file_operations;
433
434extern int afs_open(struct inode *, struct file *);
435extern int afs_release(struct inode *, struct file *);
74 436
75#ifdef AFS_CACHING_SUPPORT 437#ifdef AFS_CACHING_SUPPORT
76extern int afs_cache_get_page_cookie(struct page *page, 438extern int afs_cache_get_page_cookie(struct page *, struct cachefs_page **);
77 struct cachefs_page **_page_cookie);
78#endif 439#endif
79 440
80/* 441/*
81 * inode.c 442 * fsclient.c
82 */ 443 */
83extern int afs_iget(struct super_block *sb, struct afs_fid *fid, 444extern int afs_fs_fetch_file_status(struct afs_server *, struct key *,
84 struct inode **_inode); 445 struct afs_vnode *, struct afs_volsync *,
85extern int afs_inode_getattr(struct vfsmount *mnt, struct dentry *dentry, 446 const struct afs_wait_mode *);
86 struct kstat *stat); 447extern int afs_fs_give_up_callbacks(struct afs_server *,
87extern void afs_clear_inode(struct inode *inode); 448 const struct afs_wait_mode *);
449extern int afs_fs_fetch_data(struct afs_server *, struct key *,
450 struct afs_vnode *, off_t, size_t, struct page *,
451 const struct afs_wait_mode *);
452extern int afs_fs_create(struct afs_server *, struct key *,
453 struct afs_vnode *, const char *, umode_t,
454 struct afs_fid *, struct afs_file_status *,
455 struct afs_callback *,
456 const struct afs_wait_mode *);
457extern int afs_fs_remove(struct afs_server *, struct key *,
458 struct afs_vnode *, const char *, bool,
459 const struct afs_wait_mode *);
460extern int afs_fs_link(struct afs_server *, struct key *, struct afs_vnode *,
461 struct afs_vnode *, const char *,
462 const struct afs_wait_mode *);
463extern int afs_fs_symlink(struct afs_server *, struct key *,
464 struct afs_vnode *, const char *, const char *,
465 struct afs_fid *, struct afs_file_status *,
466 const struct afs_wait_mode *);
467extern int afs_fs_rename(struct afs_server *, struct key *,
468 struct afs_vnode *, const char *,
469 struct afs_vnode *, const char *,
470 const struct afs_wait_mode *);
88 471
89/* 472/*
90 * key_afs.c 473 * inode.c
91 */ 474 */
92#ifdef CONFIG_KEYS 475extern struct inode *afs_iget(struct super_block *, struct key *,
93extern int afs_key_register(void); 476 struct afs_fid *, struct afs_file_status *,
94extern void afs_key_unregister(void); 477 struct afs_callback *);
95#endif 478extern int afs_validate(struct afs_vnode *, struct key *);
479extern int afs_inode_getattr(struct vfsmount *, struct dentry *,
480 struct kstat *);
481extern void afs_zap_permits(struct rcu_head *);
482extern void afs_clear_inode(struct inode *);
96 483
97/* 484/*
98 * main.c 485 * main.c
99 */ 486 */
487extern struct afs_uuid afs_uuid;
100#ifdef AFS_CACHING_SUPPORT 488#ifdef AFS_CACHING_SUPPORT
101extern struct cachefs_netfs afs_cache_netfs; 489extern struct cachefs_netfs afs_cache_netfs;
102#endif 490#endif
103 491
104/* 492/*
493 * misc.c
494 */
495extern int afs_abort_to_error(u32);
496
497/*
105 * mntpt.c 498 * mntpt.c
106 */ 499 */
107extern const struct inode_operations afs_mntpt_inode_operations; 500extern const struct inode_operations afs_mntpt_inode_operations;
108extern const struct file_operations afs_mntpt_file_operations; 501extern const struct file_operations afs_mntpt_file_operations;
109extern struct afs_timer afs_mntpt_expiry_timer;
110extern struct afs_timer_ops afs_mntpt_expiry_timer_ops;
111extern unsigned long afs_mntpt_expiry_timeout; 502extern unsigned long afs_mntpt_expiry_timeout;
112 503
113extern int afs_mntpt_check_symlink(struct afs_vnode *vnode); 504extern int afs_mntpt_check_symlink(struct afs_vnode *, struct key *);
505extern void afs_mntpt_kill_timer(void);
506extern void afs_umount_begin(struct vfsmount *, int);
507
508/*
509 * proc.c
510 */
511extern int afs_proc_init(void);
512extern void afs_proc_cleanup(void);
513extern int afs_proc_cell_setup(struct afs_cell *);
514extern void afs_proc_cell_remove(struct afs_cell *);
515
516/*
517 * rxrpc.c
518 */
519extern int afs_open_socket(void);
520extern void afs_close_socket(void);
521extern int afs_make_call(struct in_addr *, struct afs_call *, gfp_t,
522 const struct afs_wait_mode *);
523extern struct afs_call *afs_alloc_flat_call(const struct afs_call_type *,
524 size_t, size_t);
525extern void afs_flat_call_destructor(struct afs_call *);
526extern void afs_transfer_reply(struct afs_call *, struct sk_buff *);
527extern void afs_send_empty_reply(struct afs_call *);
528extern void afs_send_simple_reply(struct afs_call *, const void *, size_t);
529extern int afs_extract_data(struct afs_call *, struct sk_buff *, bool, void *,
530 size_t);
531
532/*
533 * security.c
534 */
535extern void afs_clear_permits(struct afs_vnode *);
536extern void afs_cache_permit(struct afs_vnode *, struct key *, long);
537extern struct key *afs_request_key(struct afs_cell *);
538extern int afs_permission(struct inode *, int, struct nameidata *);
539
540/*
541 * server.c
542 */
543extern spinlock_t afs_server_peer_lock;
544
545#define afs_get_server(S) \
546do { \
547 _debug("GET SERVER %d", atomic_read(&(S)->usage)); \
548 atomic_inc(&(S)->usage); \
549} while(0)
550
551extern struct afs_server *afs_lookup_server(struct afs_cell *,
552 const struct in_addr *);
553extern struct afs_server *afs_find_server(const struct in_addr *);
554extern void afs_put_server(struct afs_server *);
555extern void __exit afs_purge_servers(void);
114 556
115/* 557/*
116 * super.c 558 * super.c
@@ -118,22 +560,211 @@ extern int afs_mntpt_check_symlink(struct afs_vnode *vnode);
118extern int afs_fs_init(void); 560extern int afs_fs_init(void);
119extern void afs_fs_exit(void); 561extern void afs_fs_exit(void);
120 562
121#define AFS_CB_HASH_COUNT (PAGE_SIZE / sizeof(struct list_head)) 563/*
564 * use-rtnetlink.c
565 */
566extern int afs_get_ipv4_interfaces(struct afs_interface *, size_t, bool);
567extern int afs_get_MAC_address(u8 [6]);
122 568
123extern struct list_head afs_cb_hash_tbl[]; 569/*
124extern spinlock_t afs_cb_hash_lock; 570 * vlclient.c
571 */
572#ifdef AFS_CACHING_SUPPORT
573extern struct cachefs_index_def afs_vlocation_cache_index_def;
574#endif
125 575
126#define afs_cb_hash(SRV,FID) \ 576extern int afs_vl_get_entry_by_name(struct in_addr *, struct key *,
127 afs_cb_hash_tbl[((unsigned long)(SRV) + \ 577 const char *, struct afs_cache_vlocation *,
128 (FID)->vid + (FID)->vnode + (FID)->unique) % \ 578 const struct afs_wait_mode *);
129 AFS_CB_HASH_COUNT] 579extern int afs_vl_get_entry_by_id(struct in_addr *, struct key *,
580 afs_volid_t, afs_voltype_t,
581 struct afs_cache_vlocation *,
582 const struct afs_wait_mode *);
130 583
131/* 584/*
132 * proc.c 585 * vlocation.c
133 */ 586 */
134extern int afs_proc_init(void); 587#define afs_get_vlocation(V) do { atomic_inc(&(V)->usage); } while(0)
135extern void afs_proc_cleanup(void); 588
136extern int afs_proc_cell_setup(struct afs_cell *cell); 589extern int __init afs_vlocation_update_init(void);
137extern void afs_proc_cell_remove(struct afs_cell *cell); 590extern struct afs_vlocation *afs_vlocation_lookup(struct afs_cell *,
591 struct key *,
592 const char *, size_t);
593extern void afs_put_vlocation(struct afs_vlocation *);
594extern void __exit afs_vlocation_purge(void);
595
596/*
597 * vnode.c
598 */
599#ifdef AFS_CACHING_SUPPORT
600extern struct cachefs_index_def afs_vnode_cache_index_def;
601#endif
602
603extern struct afs_timer_ops afs_vnode_cb_timed_out_ops;
604
605static inline struct afs_vnode *AFS_FS_I(struct inode *inode)
606{
607 return container_of(inode, struct afs_vnode, vfs_inode);
608}
609
610static inline struct inode *AFS_VNODE_TO_I(struct afs_vnode *vnode)
611{
612 return &vnode->vfs_inode;
613}
614
615extern void afs_vnode_finalise_status_update(struct afs_vnode *,
616 struct afs_server *);
617extern int afs_vnode_fetch_status(struct afs_vnode *, struct afs_vnode *,
618 struct key *);
619extern int afs_vnode_fetch_data(struct afs_vnode *, struct key *,
620 off_t, size_t, struct page *);
621extern int afs_vnode_create(struct afs_vnode *, struct key *, const char *,
622 umode_t, struct afs_fid *, struct afs_file_status *,
623 struct afs_callback *, struct afs_server **);
624extern int afs_vnode_remove(struct afs_vnode *, struct key *, const char *,
625 bool);
626extern int afs_vnode_link(struct afs_vnode *, struct afs_vnode *, struct key *,
627 const char *);
628extern int afs_vnode_symlink(struct afs_vnode *, struct key *, const char *,
629 const char *, struct afs_fid *,
630 struct afs_file_status *, struct afs_server **);
631extern int afs_vnode_rename(struct afs_vnode *, struct afs_vnode *,
632 struct key *, const char *, const char *);
633
634/*
635 * volume.c
636 */
637#ifdef AFS_CACHING_SUPPORT
638extern struct cachefs_index_def afs_volume_cache_index_def;
639#endif
640
641#define afs_get_volume(V) do { atomic_inc(&(V)->usage); } while(0)
642
643extern void afs_put_volume(struct afs_volume *);
644extern struct afs_volume *afs_volume_lookup(struct afs_mount_params *);
645extern struct afs_server *afs_volume_pick_fileserver(struct afs_vnode *);
646extern int afs_volume_release_fileserver(struct afs_vnode *,
647 struct afs_server *, int);
648
649/*****************************************************************************/
650/*
651 * debug tracing
652 */
653extern unsigned afs_debug;
654
655#define dbgprintk(FMT,...) \
656 printk("[%x%-6.6s] "FMT"\n", smp_processor_id(), current->comm ,##__VA_ARGS__)
657
658/* make sure we maintain the format strings, even when debugging is disabled */
659static inline __attribute__((format(printf,1,2)))
660void _dbprintk(const char *fmt, ...)
661{
662}
663
664#define kenter(FMT,...) dbgprintk("==> %s("FMT")",__FUNCTION__ ,##__VA_ARGS__)
665#define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__FUNCTION__ ,##__VA_ARGS__)
666#define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__)
667
668
669#if defined(__KDEBUG)
670#define _enter(FMT,...) kenter(FMT,##__VA_ARGS__)
671#define _leave(FMT,...) kleave(FMT,##__VA_ARGS__)
672#define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__)
673
674#elif defined(CONFIG_AFS_DEBUG)
675#define AFS_DEBUG_KENTER 0x01
676#define AFS_DEBUG_KLEAVE 0x02
677#define AFS_DEBUG_KDEBUG 0x04
678
679#define _enter(FMT,...) \
680do { \
681 if (unlikely(afs_debug & AFS_DEBUG_KENTER)) \
682 kenter(FMT,##__VA_ARGS__); \
683} while (0)
684
685#define _leave(FMT,...) \
686do { \
687 if (unlikely(afs_debug & AFS_DEBUG_KLEAVE)) \
688 kleave(FMT,##__VA_ARGS__); \
689} while (0)
690
691#define _debug(FMT,...) \
692do { \
693 if (unlikely(afs_debug & AFS_DEBUG_KDEBUG)) \
694 kdebug(FMT,##__VA_ARGS__); \
695} while (0)
696
697#else
698#define _enter(FMT,...) _dbprintk("==> %s("FMT")",__FUNCTION__ ,##__VA_ARGS__)
699#define _leave(FMT,...) _dbprintk("<== %s()"FMT"",__FUNCTION__ ,##__VA_ARGS__)
700#define _debug(FMT,...) _dbprintk(" "FMT ,##__VA_ARGS__)
701#endif
702
703/*
704 * debug assertion checking
705 */
706#if 1 // defined(__KDEBUGALL)
707
708#define ASSERT(X) \
709do { \
710 if (unlikely(!(X))) { \
711 printk(KERN_ERR "\n"); \
712 printk(KERN_ERR "AFS: Assertion failed\n"); \
713 BUG(); \
714 } \
715} while(0)
716
717#define ASSERTCMP(X, OP, Y) \
718do { \
719 if (unlikely(!((X) OP (Y)))) { \
720 printk(KERN_ERR "\n"); \
721 printk(KERN_ERR "AFS: Assertion failed\n"); \
722 printk(KERN_ERR "%lu " #OP " %lu is false\n", \
723 (unsigned long)(X), (unsigned long)(Y)); \
724 printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n", \
725 (unsigned long)(X), (unsigned long)(Y)); \
726 BUG(); \
727 } \
728} while(0)
729
730#define ASSERTIF(C, X) \
731do { \
732 if (unlikely((C) && !(X))) { \
733 printk(KERN_ERR "\n"); \
734 printk(KERN_ERR "AFS: Assertion failed\n"); \
735 BUG(); \
736 } \
737} while(0)
738
739#define ASSERTIFCMP(C, X, OP, Y) \
740do { \
741 if (unlikely((C) && !((X) OP (Y)))) { \
742 printk(KERN_ERR "\n"); \
743 printk(KERN_ERR "AFS: Assertion failed\n"); \
744 printk(KERN_ERR "%lu " #OP " %lu is false\n", \
745 (unsigned long)(X), (unsigned long)(Y)); \
746 printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n", \
747 (unsigned long)(X), (unsigned long)(Y)); \
748 BUG(); \
749 } \
750} while(0)
751
752#else
753
754#define ASSERT(X) \
755do { \
756} while(0)
757
758#define ASSERTCMP(X, OP, Y) \
759do { \
760} while(0)
761
762#define ASSERTIF(C, X) \
763do { \
764} while(0)
765
766#define ASSERTIFCMP(C, X, OP, Y) \
767do { \
768} while(0)
138 769
139#endif /* AFS_INTERNAL_H */ 770#endif /* __KDEBUGALL */
diff --git a/fs/afs/kafsasyncd.c b/fs/afs/kafsasyncd.c
deleted file mode 100644
index 615df2407cb2..000000000000
--- a/fs/afs/kafsasyncd.c
+++ /dev/null
@@ -1,255 +0,0 @@
1/* kafsasyncd.c: AFS asynchronous operation daemon
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 *
12 * The AFS async daemon is used to the following:
13 * - probe "dead" servers to see whether they've come back to life yet.
14 * - probe "live" servers that we haven't talked to for a while to see if they are better
15 * candidates for serving than what we're currently using
16 * - poll volume location servers to keep up to date volume location lists
17 */
18
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/sched.h>
22#include <linux/completion.h>
23#include <linux/freezer.h>
24#include "cell.h"
25#include "server.h"
26#include "volume.h"
27#include "kafsasyncd.h"
28#include "kafstimod.h"
29#include <rxrpc/call.h>
30#include <asm/errno.h>
31#include "internal.h"
32
33static DECLARE_COMPLETION(kafsasyncd_alive);
34static DECLARE_COMPLETION(kafsasyncd_dead);
35static DECLARE_WAIT_QUEUE_HEAD(kafsasyncd_sleepq);
36static struct task_struct *kafsasyncd_task;
37static int kafsasyncd_die;
38
39static int kafsasyncd(void *arg);
40
41static LIST_HEAD(kafsasyncd_async_attnq);
42static LIST_HEAD(kafsasyncd_async_busyq);
43static DEFINE_SPINLOCK(kafsasyncd_async_lock);
44
45static void kafsasyncd_null_call_attn_func(struct rxrpc_call *call)
46{
47}
48
49static void kafsasyncd_null_call_error_func(struct rxrpc_call *call)
50{
51}
52
53/*****************************************************************************/
54/*
55 * start the async daemon
56 */
57int afs_kafsasyncd_start(void)
58{
59 int ret;
60
61 ret = kernel_thread(kafsasyncd, NULL, 0);
62 if (ret < 0)
63 return ret;
64
65 wait_for_completion(&kafsasyncd_alive);
66
67 return ret;
68} /* end afs_kafsasyncd_start() */
69
70/*****************************************************************************/
71/*
72 * stop the async daemon
73 */
74void afs_kafsasyncd_stop(void)
75{
76 /* get rid of my daemon */
77 kafsasyncd_die = 1;
78 wake_up(&kafsasyncd_sleepq);
79 wait_for_completion(&kafsasyncd_dead);
80
81} /* end afs_kafsasyncd_stop() */
82
83/*****************************************************************************/
84/*
85 * probing daemon
86 */
87static int kafsasyncd(void *arg)
88{
89 struct afs_async_op *op;
90 int die;
91
92 DECLARE_WAITQUEUE(myself, current);
93
94 kafsasyncd_task = current;
95
96 printk("kAFS: Started kafsasyncd %d\n", current->pid);
97
98 daemonize("kafsasyncd");
99
100 complete(&kafsasyncd_alive);
101
102 /* loop around looking for things to attend to */
103 do {
104 set_current_state(TASK_INTERRUPTIBLE);
105 add_wait_queue(&kafsasyncd_sleepq, &myself);
106
107 for (;;) {
108 if (!list_empty(&kafsasyncd_async_attnq) ||
109 signal_pending(current) ||
110 kafsasyncd_die)
111 break;
112
113 schedule();
114 set_current_state(TASK_INTERRUPTIBLE);
115 }
116
117 remove_wait_queue(&kafsasyncd_sleepq, &myself);
118 set_current_state(TASK_RUNNING);
119
120 try_to_freeze();
121
122 /* discard pending signals */
123 afs_discard_my_signals();
124
125 die = kafsasyncd_die;
126
127 /* deal with the next asynchronous operation requiring
128 * attention */
129 if (!list_empty(&kafsasyncd_async_attnq)) {
130 struct afs_async_op *op;
131
132 _debug("@@@ Begin Asynchronous Operation");
133
134 op = NULL;
135 spin_lock(&kafsasyncd_async_lock);
136
137 if (!list_empty(&kafsasyncd_async_attnq)) {
138 op = list_entry(kafsasyncd_async_attnq.next,
139 struct afs_async_op, link);
140 list_move_tail(&op->link,
141 &kafsasyncd_async_busyq);
142 }
143
144 spin_unlock(&kafsasyncd_async_lock);
145
146 _debug("@@@ Operation %p {%p}\n",
147 op, op ? op->ops : NULL);
148
149 if (op)
150 op->ops->attend(op);
151
152 _debug("@@@ End Asynchronous Operation");
153 }
154
155 } while(!die);
156
157 /* need to kill all outstanding asynchronous operations before
158 * exiting */
159 kafsasyncd_task = NULL;
160 spin_lock(&kafsasyncd_async_lock);
161
162 /* fold the busy and attention queues together */
163 list_splice_init(&kafsasyncd_async_busyq,
164 &kafsasyncd_async_attnq);
165
166 /* dequeue kafsasyncd from all their wait queues */
167 list_for_each_entry(op, &kafsasyncd_async_attnq, link) {
168 op->call->app_attn_func = kafsasyncd_null_call_attn_func;
169 op->call->app_error_func = kafsasyncd_null_call_error_func;
170 remove_wait_queue(&op->call->waitq, &op->waiter);
171 }
172
173 spin_unlock(&kafsasyncd_async_lock);
174
175 /* abort all the operations */
176 while (!list_empty(&kafsasyncd_async_attnq)) {
177 op = list_entry(kafsasyncd_async_attnq.next, struct afs_async_op, link);
178 list_del_init(&op->link);
179
180 rxrpc_call_abort(op->call, -EIO);
181 rxrpc_put_call(op->call);
182 op->call = NULL;
183
184 op->ops->discard(op);
185 }
186
187 /* and that's all */
188 _leave("");
189 complete_and_exit(&kafsasyncd_dead, 0);
190
191} /* end kafsasyncd() */
192
193/*****************************************************************************/
194/*
195 * begin an operation
196 * - place operation on busy queue
197 */
198void afs_kafsasyncd_begin_op(struct afs_async_op *op)
199{
200 _enter("");
201
202 spin_lock(&kafsasyncd_async_lock);
203
204 init_waitqueue_entry(&op->waiter, kafsasyncd_task);
205 add_wait_queue(&op->call->waitq, &op->waiter);
206
207 list_move_tail(&op->link, &kafsasyncd_async_busyq);
208
209 spin_unlock(&kafsasyncd_async_lock);
210
211 _leave("");
212} /* end afs_kafsasyncd_begin_op() */
213
214/*****************************************************************************/
215/*
216 * request attention for an operation
217 * - move to attention queue
218 */
219void afs_kafsasyncd_attend_op(struct afs_async_op *op)
220{
221 _enter("");
222
223 spin_lock(&kafsasyncd_async_lock);
224
225 list_move_tail(&op->link, &kafsasyncd_async_attnq);
226
227 spin_unlock(&kafsasyncd_async_lock);
228
229 wake_up(&kafsasyncd_sleepq);
230
231 _leave("");
232} /* end afs_kafsasyncd_attend_op() */
233
234/*****************************************************************************/
235/*
236 * terminate an operation
237 * - remove from either queue
238 */
239void afs_kafsasyncd_terminate_op(struct afs_async_op *op)
240{
241 _enter("");
242
243 spin_lock(&kafsasyncd_async_lock);
244
245 if (!list_empty(&op->link)) {
246 list_del_init(&op->link);
247 remove_wait_queue(&op->call->waitq, &op->waiter);
248 }
249
250 spin_unlock(&kafsasyncd_async_lock);
251
252 wake_up(&kafsasyncd_sleepq);
253
254 _leave("");
255} /* end afs_kafsasyncd_terminate_op() */
diff --git a/fs/afs/kafsasyncd.h b/fs/afs/kafsasyncd.h
deleted file mode 100644
index 791803f9a6fb..000000000000
--- a/fs/afs/kafsasyncd.h
+++ /dev/null
@@ -1,52 +0,0 @@
1/* kafsasyncd.h: AFS asynchronous operation daemon
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef _LINUX_AFS_KAFSASYNCD_H
13#define _LINUX_AFS_KAFSASYNCD_H
14
15#include "types.h"
16
17struct afs_async_op;
18
19struct afs_async_op_ops {
20 void (*attend)(struct afs_async_op *op);
21 void (*discard)(struct afs_async_op *op);
22};
23
24/*****************************************************************************/
25/*
26 * asynchronous operation record
27 */
28struct afs_async_op
29{
30 struct list_head link;
31 struct afs_server *server; /* server being contacted */
32 struct rxrpc_call *call; /* RxRPC call performing op */
33 wait_queue_t waiter; /* wait queue for kafsasyncd */
34 const struct afs_async_op_ops *ops; /* operations */
35};
36
37static inline void afs_async_op_init(struct afs_async_op *op,
38 const struct afs_async_op_ops *ops)
39{
40 INIT_LIST_HEAD(&op->link);
41 op->call = NULL;
42 op->ops = ops;
43}
44
45extern int afs_kafsasyncd_start(void);
46extern void afs_kafsasyncd_stop(void);
47
48extern void afs_kafsasyncd_begin_op(struct afs_async_op *op);
49extern void afs_kafsasyncd_attend_op(struct afs_async_op *op);
50extern void afs_kafsasyncd_terminate_op(struct afs_async_op *op);
51
52#endif /* _LINUX_AFS_KAFSASYNCD_H */
diff --git a/fs/afs/kafstimod.c b/fs/afs/kafstimod.c
deleted file mode 100644
index 694344e4d3c7..000000000000
--- a/fs/afs/kafstimod.c
+++ /dev/null
@@ -1,205 +0,0 @@
1/* kafstimod.c: AFS timeout daemon
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/sched.h>
15#include <linux/completion.h>
16#include <linux/freezer.h>
17#include "cell.h"
18#include "volume.h"
19#include "kafstimod.h"
20#include <asm/errno.h>
21#include "internal.h"
22
23static DECLARE_COMPLETION(kafstimod_alive);
24static DECLARE_COMPLETION(kafstimod_dead);
25static DECLARE_WAIT_QUEUE_HEAD(kafstimod_sleepq);
26static int kafstimod_die;
27
28static LIST_HEAD(kafstimod_list);
29static DEFINE_SPINLOCK(kafstimod_lock);
30
31static int kafstimod(void *arg);
32
33/*****************************************************************************/
34/*
35 * start the timeout daemon
36 */
37int afs_kafstimod_start(void)
38{
39 int ret;
40
41 ret = kernel_thread(kafstimod, NULL, 0);
42 if (ret < 0)
43 return ret;
44
45 wait_for_completion(&kafstimod_alive);
46
47 return ret;
48} /* end afs_kafstimod_start() */
49
50/*****************************************************************************/
51/*
52 * stop the timeout daemon
53 */
54void afs_kafstimod_stop(void)
55{
56 /* get rid of my daemon */
57 kafstimod_die = 1;
58 wake_up(&kafstimod_sleepq);
59 wait_for_completion(&kafstimod_dead);
60
61} /* end afs_kafstimod_stop() */
62
63/*****************************************************************************/
64/*
65 * timeout processing daemon
66 */
67static int kafstimod(void *arg)
68{
69 struct afs_timer *timer;
70
71 DECLARE_WAITQUEUE(myself, current);
72
73 printk("kAFS: Started kafstimod %d\n", current->pid);
74
75 daemonize("kafstimod");
76
77 complete(&kafstimod_alive);
78
79 /* loop around looking for things to attend to */
80 loop:
81 set_current_state(TASK_INTERRUPTIBLE);
82 add_wait_queue(&kafstimod_sleepq, &myself);
83
84 for (;;) {
85 unsigned long jif;
86 signed long timeout;
87
88 /* deal with the server being asked to die */
89 if (kafstimod_die) {
90 remove_wait_queue(&kafstimod_sleepq, &myself);
91 _leave("");
92 complete_and_exit(&kafstimod_dead, 0);
93 }
94
95 try_to_freeze();
96
97 /* discard pending signals */
98 afs_discard_my_signals();
99
100 /* work out the time to elapse before the next event */
101 spin_lock(&kafstimod_lock);
102 if (list_empty(&kafstimod_list)) {
103 timeout = MAX_SCHEDULE_TIMEOUT;
104 }
105 else {
106 timer = list_entry(kafstimod_list.next,
107 struct afs_timer, link);
108 timeout = timer->timo_jif;
109 jif = jiffies;
110
111 if (time_before_eq((unsigned long) timeout, jif))
112 goto immediate;
113
114 else {
115 timeout = (long) timeout - (long) jiffies;
116 }
117 }
118 spin_unlock(&kafstimod_lock);
119
120 schedule_timeout(timeout);
121
122 set_current_state(TASK_INTERRUPTIBLE);
123 }
124
125 /* the thing on the front of the queue needs processing
126 * - we come here with the lock held and timer pointing to the expired
127 * entry
128 */
129 immediate:
130 remove_wait_queue(&kafstimod_sleepq, &myself);
131 set_current_state(TASK_RUNNING);
132
133 _debug("@@@ Begin Timeout of %p", timer);
134
135 /* dequeue the timer */
136 list_del_init(&timer->link);
137 spin_unlock(&kafstimod_lock);
138
139 /* call the timeout function */
140 timer->ops->timed_out(timer);
141
142 _debug("@@@ End Timeout");
143 goto loop;
144
145} /* end kafstimod() */
146
147/*****************************************************************************/
148/*
149 * (re-)queue a timer
150 */
151void afs_kafstimod_add_timer(struct afs_timer *timer, unsigned long timeout)
152{
153 struct afs_timer *ptimer;
154 struct list_head *_p;
155
156 _enter("%p,%lu", timer, timeout);
157
158 spin_lock(&kafstimod_lock);
159
160 list_del(&timer->link);
161
162 /* the timer was deferred or reset - put it back in the queue at the
163 * right place */
164 timer->timo_jif = jiffies + timeout;
165
166 list_for_each(_p, &kafstimod_list) {
167 ptimer = list_entry(_p, struct afs_timer, link);
168 if (time_before(timer->timo_jif, ptimer->timo_jif))
169 break;
170 }
171
172 list_add_tail(&timer->link, _p); /* insert before stopping point */
173
174 spin_unlock(&kafstimod_lock);
175
176 wake_up(&kafstimod_sleepq);
177
178 _leave("");
179} /* end afs_kafstimod_add_timer() */
180
181/*****************************************************************************/
182/*
183 * dequeue a timer
184 * - returns 0 if the timer was deleted or -ENOENT if it wasn't queued
185 */
186int afs_kafstimod_del_timer(struct afs_timer *timer)
187{
188 int ret = 0;
189
190 _enter("%p", timer);
191
192 spin_lock(&kafstimod_lock);
193
194 if (list_empty(&timer->link))
195 ret = -ENOENT;
196 else
197 list_del_init(&timer->link);
198
199 spin_unlock(&kafstimod_lock);
200
201 wake_up(&kafstimod_sleepq);
202
203 _leave(" = %d", ret);
204 return ret;
205} /* end afs_kafstimod_del_timer() */
diff --git a/fs/afs/kafstimod.h b/fs/afs/kafstimod.h
deleted file mode 100644
index e312f1a61a7f..000000000000
--- a/fs/afs/kafstimod.h
+++ /dev/null
@@ -1,49 +0,0 @@
1/* kafstimod.h: AFS timeout daemon
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef _LINUX_AFS_KAFSTIMOD_H
13#define _LINUX_AFS_KAFSTIMOD_H
14
15#include "types.h"
16
17struct afs_timer;
18
19struct afs_timer_ops {
20 /* called when the front of the timer queue has timed out */
21 void (*timed_out)(struct afs_timer *timer);
22};
23
24/*****************************************************************************/
25/*
26 * AFS timer/timeout record
27 */
28struct afs_timer
29{
30 struct list_head link; /* link in timer queue */
31 unsigned long timo_jif; /* timeout time */
32 const struct afs_timer_ops *ops; /* timeout expiry function */
33};
34
35static inline void afs_timer_init(struct afs_timer *timer,
36 const struct afs_timer_ops *ops)
37{
38 INIT_LIST_HEAD(&timer->link);
39 timer->ops = ops;
40}
41
42extern int afs_kafstimod_start(void);
43extern void afs_kafstimod_stop(void);
44
45extern void afs_kafstimod_add_timer(struct afs_timer *timer,
46 unsigned long timeout);
47extern int afs_kafstimod_del_timer(struct afs_timer *timer);
48
49#endif /* _LINUX_AFS_KAFSTIMOD_H */
diff --git a/fs/afs/main.c b/fs/afs/main.c
index f2704ba53857..40c2704e7557 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -1,4 +1,4 @@
1/* main.c: AFS client file system 1/* AFS client file system
2 * 2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
@@ -13,43 +13,21 @@
13#include <linux/moduleparam.h> 13#include <linux/moduleparam.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/completion.h> 15#include <linux/completion.h>
16#include <rxrpc/rxrpc.h>
17#include <rxrpc/transport.h>
18#include <rxrpc/call.h>
19#include <rxrpc/peer.h>
20#include "cache.h"
21#include "cell.h"
22#include "server.h"
23#include "fsclient.h"
24#include "cmservice.h"
25#include "kafstimod.h"
26#include "kafsasyncd.h"
27#include "internal.h" 16#include "internal.h"
28 17
29struct rxrpc_transport *afs_transport;
30
31static int afs_adding_peer(struct rxrpc_peer *peer);
32static void afs_discarding_peer(struct rxrpc_peer *peer);
33
34
35MODULE_DESCRIPTION("AFS Client File System"); 18MODULE_DESCRIPTION("AFS Client File System");
36MODULE_AUTHOR("Red Hat, Inc."); 19MODULE_AUTHOR("Red Hat, Inc.");
37MODULE_LICENSE("GPL"); 20MODULE_LICENSE("GPL");
38 21
22unsigned afs_debug;
23module_param_named(debug, afs_debug, uint, S_IWUSR | S_IRUGO);
24MODULE_PARM_DESC(afs_debug, "AFS debugging mask");
25
39static char *rootcell; 26static char *rootcell;
40 27
41module_param(rootcell, charp, 0); 28module_param(rootcell, charp, 0);
42MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list"); 29MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list");
43 30
44
45static struct rxrpc_peer_ops afs_peer_ops = {
46 .adding = afs_adding_peer,
47 .discarding = afs_discarding_peer,
48};
49
50struct list_head afs_cb_hash_tbl[AFS_CB_HASH_COUNT];
51DEFINE_SPINLOCK(afs_cb_hash_lock);
52
53#ifdef AFS_CACHING_SUPPORT 31#ifdef AFS_CACHING_SUPPORT
54static struct cachefs_netfs_operations afs_cache_ops = { 32static struct cachefs_netfs_operations afs_cache_ops = {
55 .get_page_cookie = afs_cache_get_page_cookie, 33 .get_page_cookie = afs_cache_get_page_cookie,
@@ -62,20 +40,63 @@ struct cachefs_netfs afs_cache_netfs = {
62}; 40};
63#endif 41#endif
64 42
65/*****************************************************************************/ 43struct afs_uuid afs_uuid;
44
45/*
46 * get a client UUID
47 */
48static int __init afs_get_client_UUID(void)
49{
50 struct timespec ts;
51 u64 uuidtime;
52 u16 clockseq;
53 int ret;
54
55 /* read the MAC address of one of the external interfaces and construct
56 * a UUID from it */
57 ret = afs_get_MAC_address(afs_uuid.node);
58 if (ret < 0)
59 return ret;
60
61 getnstimeofday(&ts);
62 uuidtime = (u64) ts.tv_sec * 1000 * 1000 * 10;
63 uuidtime += ts.tv_nsec / 100;
64 uuidtime += AFS_UUID_TO_UNIX_TIME;
65 afs_uuid.time_low = uuidtime;
66 afs_uuid.time_mid = uuidtime >> 32;
67 afs_uuid.time_hi_and_version = (uuidtime >> 48) & AFS_UUID_TIMEHI_MASK;
68 afs_uuid.time_hi_and_version = AFS_UUID_VERSION_TIME;
69
70 get_random_bytes(&clockseq, 2);
71 afs_uuid.clock_seq_low = clockseq;
72 afs_uuid.clock_seq_hi_and_reserved =
73 (clockseq >> 8) & AFS_UUID_CLOCKHI_MASK;
74 afs_uuid.clock_seq_hi_and_reserved = AFS_UUID_VARIANT_STD;
75
76 _debug("AFS UUID: %08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x",
77 afs_uuid.time_low,
78 afs_uuid.time_mid,
79 afs_uuid.time_hi_and_version,
80 afs_uuid.clock_seq_hi_and_reserved,
81 afs_uuid.clock_seq_low,
82 afs_uuid.node[0], afs_uuid.node[1], afs_uuid.node[2],
83 afs_uuid.node[3], afs_uuid.node[4], afs_uuid.node[5]);
84
85 return 0;
86}
87
66/* 88/*
67 * initialise the AFS client FS module 89 * initialise the AFS client FS module
68 */ 90 */
69static int __init afs_init(void) 91static int __init afs_init(void)
70{ 92{
71 int loop, ret; 93 int ret;
72 94
73 printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 registering.\n"); 95 printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 registering.\n");
74 96
75 /* initialise the callback hash table */ 97 ret = afs_get_client_UUID();
76 spin_lock_init(&afs_cb_hash_lock); 98 if (ret < 0)
77 for (loop = AFS_CB_HASH_COUNT - 1; loop >= 0; loop--) 99 return ret;
78 INIT_LIST_HEAD(&afs_cb_hash_tbl[loop]);
79 100
80 /* register the /proc stuff */ 101 /* register the /proc stuff */
81 ret = afs_proc_init(); 102 ret = afs_proc_init();
@@ -87,70 +108,56 @@ static int __init afs_init(void)
87 ret = cachefs_register_netfs(&afs_cache_netfs, 108 ret = cachefs_register_netfs(&afs_cache_netfs,
88 &afs_cache_cell_index_def); 109 &afs_cache_cell_index_def);
89 if (ret < 0) 110 if (ret < 0)
90 goto error;
91#endif
92
93#ifdef CONFIG_KEYS_TURNED_OFF
94 ret = afs_key_register();
95 if (ret < 0)
96 goto error_cache; 111 goto error_cache;
97#endif 112#endif
98 113
99 /* initialise the cell DB */ 114 /* initialise the cell DB */
100 ret = afs_cell_init(rootcell); 115 ret = afs_cell_init(rootcell);
101 if (ret < 0) 116 if (ret < 0)
102 goto error_keys; 117 goto error_cell_init;
103 118
104 /* start the timeout daemon */ 119 /* initialise the VL update process */
105 ret = afs_kafstimod_start(); 120 ret = afs_vlocation_update_init();
106 if (ret < 0) 121 if (ret < 0)
107 goto error_keys; 122 goto error_vl_update_init;
108 123
109 /* start the async operation daemon */ 124 /* initialise the callback update process */
110 ret = afs_kafsasyncd_start(); 125 ret = afs_callback_update_init();
111 if (ret < 0)
112 goto error_kafstimod;
113 126
114 /* create the RxRPC transport */ 127 /* create the RxRPC transport */
115 ret = rxrpc_create_transport(7001, &afs_transport); 128 ret = afs_open_socket();
116 if (ret < 0) 129 if (ret < 0)
117 goto error_kafsasyncd; 130 goto error_open_socket;
118
119 afs_transport->peer_ops = &afs_peer_ops;
120 131
121 /* register the filesystems */ 132 /* register the filesystems */
122 ret = afs_fs_init(); 133 ret = afs_fs_init();
123 if (ret < 0) 134 if (ret < 0)
124 goto error_transport; 135 goto error_fs;
125 136
126 return ret; 137 return ret;
127 138
128 error_transport: 139error_fs:
129 rxrpc_put_transport(afs_transport); 140 afs_close_socket();
130 error_kafsasyncd: 141error_open_socket:
131 afs_kafsasyncd_stop(); 142error_vl_update_init:
132 error_kafstimod: 143error_cell_init:
133 afs_kafstimod_stop();
134 error_keys:
135#ifdef CONFIG_KEYS_TURNED_OFF
136 afs_key_unregister();
137 error_cache:
138#endif
139#ifdef AFS_CACHING_SUPPORT 144#ifdef AFS_CACHING_SUPPORT
140 cachefs_unregister_netfs(&afs_cache_netfs); 145 cachefs_unregister_netfs(&afs_cache_netfs);
141 error: 146error_cache:
142#endif 147#endif
148 afs_callback_update_kill();
149 afs_vlocation_purge();
143 afs_cell_purge(); 150 afs_cell_purge();
144 afs_proc_cleanup(); 151 afs_proc_cleanup();
145 printk(KERN_ERR "kAFS: failed to register: %d\n", ret); 152 printk(KERN_ERR "kAFS: failed to register: %d\n", ret);
146 return ret; 153 return ret;
147} /* end afs_init() */ 154}
148 155
149/* XXX late_initcall is kludgy, but the only alternative seems to create 156/* XXX late_initcall is kludgy, but the only alternative seems to create
150 * a transport upon the first mount, which is worse. Or is it? 157 * a transport upon the first mount, which is worse. Or is it?
151 */ 158 */
152late_initcall(afs_init); /* must be called after net/ to create socket */ 159late_initcall(afs_init); /* must be called after net/ to create socket */
153/*****************************************************************************/ 160
154/* 161/*
155 * clean up on module removal 162 * clean up on module removal
156 */ 163 */
@@ -159,127 +166,16 @@ static void __exit afs_exit(void)
159 printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 unregistering.\n"); 166 printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 unregistering.\n");
160 167
161 afs_fs_exit(); 168 afs_fs_exit();
162 rxrpc_put_transport(afs_transport); 169 afs_close_socket();
163 afs_kafstimod_stop(); 170 afs_purge_servers();
164 afs_kafsasyncd_stop(); 171 afs_callback_update_kill();
172 afs_vlocation_purge();
173 flush_scheduled_work();
165 afs_cell_purge(); 174 afs_cell_purge();
166#ifdef CONFIG_KEYS_TURNED_OFF
167 afs_key_unregister();
168#endif
169#ifdef AFS_CACHING_SUPPORT 175#ifdef AFS_CACHING_SUPPORT
170 cachefs_unregister_netfs(&afs_cache_netfs); 176 cachefs_unregister_netfs(&afs_cache_netfs);
171#endif 177#endif
172 afs_proc_cleanup(); 178 afs_proc_cleanup();
173
174} /* end afs_exit() */
175
176module_exit(afs_exit);
177
178/*****************************************************************************/
179/*
180 * notification that new peer record is being added
181 * - called from krxsecd
182 * - return an error to induce an abort
183 * - mustn't sleep (caller holds an rwlock)
184 */
185static int afs_adding_peer(struct rxrpc_peer *peer)
186{
187 struct afs_server *server;
188 int ret;
189
190 _debug("kAFS: Adding new peer %08x\n", ntohl(peer->addr.s_addr));
191
192 /* determine which server the peer resides in (if any) */
193 ret = afs_server_find_by_peer(peer, &server);
194 if (ret < 0)
195 return ret; /* none that we recognise, so abort */
196
197 _debug("Server %p{u=%d}\n", server, atomic_read(&server->usage));
198
199 _debug("Cell %p{u=%d}\n",
200 server->cell, atomic_read(&server->cell->usage));
201
202 /* cross-point the structs under a global lock */
203 spin_lock(&afs_server_peer_lock);
204 peer->user = server;
205 server->peer = peer;
206 spin_unlock(&afs_server_peer_lock);
207
208 afs_put_server(server);
209
210 return 0;
211} /* end afs_adding_peer() */
212
213/*****************************************************************************/
214/*
215 * notification that a peer record is being discarded
216 * - called from krxiod or krxsecd
217 */
218static void afs_discarding_peer(struct rxrpc_peer *peer)
219{
220 struct afs_server *server;
221
222 _enter("%p",peer);
223
224 _debug("Discarding peer %08x (rtt=%lu.%lumS)\n",
225 ntohl(peer->addr.s_addr),
226 (long) (peer->rtt / 1000),
227 (long) (peer->rtt % 1000));
228
229 /* uncross-point the structs under a global lock */
230 spin_lock(&afs_server_peer_lock);
231 server = peer->user;
232 if (server) {
233 peer->user = NULL;
234 server->peer = NULL;
235 }
236 spin_unlock(&afs_server_peer_lock);
237
238 _leave("");
239
240} /* end afs_discarding_peer() */
241
242/*****************************************************************************/
243/*
244 * clear the dead space between task_struct and kernel stack
245 * - called by supplying -finstrument-functions to gcc
246 */
247#if 0
248void __cyg_profile_func_enter (void *this_fn, void *call_site)
249__attribute__((no_instrument_function));
250
251void __cyg_profile_func_enter (void *this_fn, void *call_site)
252{
253 asm volatile(" movl %%esp,%%edi \n"
254 " andl %0,%%edi \n"
255 " addl %1,%%edi \n"
256 " movl %%esp,%%ecx \n"
257 " subl %%edi,%%ecx \n"
258 " shrl $2,%%ecx \n"
259 " movl $0xedededed,%%eax \n"
260 " rep stosl \n"
261 :
262 : "i"(~(THREAD_SIZE - 1)), "i"(sizeof(struct thread_info))
263 : "eax", "ecx", "edi", "memory", "cc"
264 );
265} 179}
266 180
267void __cyg_profile_func_exit(void *this_fn, void *call_site) 181module_exit(afs_exit);
268__attribute__((no_instrument_function));
269
270void __cyg_profile_func_exit(void *this_fn, void *call_site)
271{
272 asm volatile(" movl %%esp,%%edi \n"
273 " andl %0,%%edi \n"
274 " addl %1,%%edi \n"
275 " movl %%esp,%%ecx \n"
276 " subl %%edi,%%ecx \n"
277 " shrl $2,%%ecx \n"
278 " movl $0xdadadada,%%eax \n"
279 " rep stosl \n"
280 :
281 : "i"(~(THREAD_SIZE - 1)), "i"(sizeof(struct thread_info))
282 : "eax", "ecx", "edi", "memory", "cc"
283 );
284}
285#endif
diff --git a/fs/afs/misc.c b/fs/afs/misc.c
index e4fce66d76e0..cdb9792d8161 100644
--- a/fs/afs/misc.c
+++ b/fs/afs/misc.c
@@ -1,6 +1,6 @@
1/* misc.c: miscellaneous bits 1/* miscellaneous bits
2 * 2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
@@ -12,19 +12,20 @@
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/errno.h> 14#include <linux/errno.h>
15#include "errors.h"
16#include "internal.h" 15#include "internal.h"
16#include "afs_fs.h"
17 17
18/*****************************************************************************/
19/* 18/*
20 * convert an AFS abort code to a Linux error number 19 * convert an AFS abort code to a Linux error number
21 */ 20 */
22int afs_abort_to_error(int abortcode) 21int afs_abort_to_error(u32 abort_code)
23{ 22{
24 switch (abortcode) { 23 switch (abort_code) {
24 case 13: return -EACCES;
25 case 30: return -EROFS;
25 case VSALVAGE: return -EIO; 26 case VSALVAGE: return -EIO;
26 case VNOVNODE: return -ENOENT; 27 case VNOVNODE: return -ENOENT;
27 case VNOVOL: return -ENXIO; 28 case VNOVOL: return -ENOMEDIUM;
28 case VVOLEXISTS: return -EEXIST; 29 case VVOLEXISTS: return -EEXIST;
29 case VNOSERVICE: return -EIO; 30 case VNOSERVICE: return -EIO;
30 case VOFFLINE: return -ENOENT; 31 case VOFFLINE: return -ENOENT;
@@ -33,7 +34,24 @@ int afs_abort_to_error(int abortcode)
33 case VOVERQUOTA: return -EDQUOT; 34 case VOVERQUOTA: return -EDQUOT;
34 case VBUSY: return -EBUSY; 35 case VBUSY: return -EBUSY;
35 case VMOVED: return -ENXIO; 36 case VMOVED: return -ENXIO;
36 default: return -EIO; 37 case 0x2f6df0c: return -EACCES;
38 case 0x2f6df0f: return -EBUSY;
39 case 0x2f6df10: return -EEXIST;
40 case 0x2f6df11: return -EXDEV;
41 case 0x2f6df13: return -ENOTDIR;
42 case 0x2f6df14: return -EISDIR;
43 case 0x2f6df15: return -EINVAL;
44 case 0x2f6df1a: return -EFBIG;
45 case 0x2f6df1b: return -ENOSPC;
46 case 0x2f6df1d: return -EROFS;
47 case 0x2f6df1e: return -EMLINK;
48 case 0x2f6df20: return -EDOM;
49 case 0x2f6df21: return -ERANGE;
50 case 0x2f6df22: return -EDEADLK;
51 case 0x2f6df23: return -ENAMETOOLONG;
52 case 0x2f6df24: return -ENOLCK;
53 case 0x2f6df26: return -ENOTEMPTY;
54 case 0x2f6df78: return -EDQUOT;
55 default: return -EREMOTEIO;
37 } 56 }
38 57}
39} /* end afs_abort_to_error() */
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 68495f0de7b3..b905ae37f912 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -1,4 +1,4 @@
1/* mntpt.c: mountpoint management 1/* mountpoint management
2 * 2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
@@ -18,10 +18,6 @@
18#include <linux/mount.h> 18#include <linux/mount.h>
19#include <linux/namei.h> 19#include <linux/namei.h>
20#include <linux/mnt_namespace.h> 20#include <linux/mnt_namespace.h>
21#include "super.h"
22#include "cell.h"
23#include "volume.h"
24#include "vnode.h"
25#include "internal.h" 21#include "internal.h"
26 22
27 23
@@ -30,6 +26,7 @@ static struct dentry *afs_mntpt_lookup(struct inode *dir,
30 struct nameidata *nd); 26 struct nameidata *nd);
31static int afs_mntpt_open(struct inode *inode, struct file *file); 27static int afs_mntpt_open(struct inode *inode, struct file *file);
32static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd); 28static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd);
29static void afs_mntpt_expiry_timed_out(struct work_struct *work);
33 30
34const struct file_operations afs_mntpt_file_operations = { 31const struct file_operations afs_mntpt_file_operations = {
35 .open = afs_mntpt_open, 32 .open = afs_mntpt_open,
@@ -43,24 +40,19 @@ const struct inode_operations afs_mntpt_inode_operations = {
43}; 40};
44 41
45static LIST_HEAD(afs_vfsmounts); 42static LIST_HEAD(afs_vfsmounts);
43static DECLARE_DELAYED_WORK(afs_mntpt_expiry_timer, afs_mntpt_expiry_timed_out);
46 44
47static void afs_mntpt_expiry_timed_out(struct afs_timer *timer); 45unsigned long afs_mntpt_expiry_timeout = 10 * 60;
48 46
49struct afs_timer_ops afs_mntpt_expiry_timer_ops = {
50 .timed_out = afs_mntpt_expiry_timed_out,
51};
52
53struct afs_timer afs_mntpt_expiry_timer;
54
55unsigned long afs_mntpt_expiry_timeout = 20;
56
57/*****************************************************************************/
58/* 47/*
59 * check a symbolic link to see whether it actually encodes a mountpoint 48 * check a symbolic link to see whether it actually encodes a mountpoint
60 * - sets the AFS_VNODE_MOUNTPOINT flag on the vnode appropriately 49 * - sets the AFS_VNODE_MOUNTPOINT flag on the vnode appropriately
61 */ 50 */
62int afs_mntpt_check_symlink(struct afs_vnode *vnode) 51int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key)
63{ 52{
53 struct file file = {
54 .private_data = key,
55 };
64 struct page *page; 56 struct page *page;
65 size_t size; 57 size_t size;
66 char *buf; 58 char *buf;
@@ -69,7 +61,7 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode)
69 _enter("{%u,%u}", vnode->fid.vnode, vnode->fid.unique); 61 _enter("{%u,%u}", vnode->fid.vnode, vnode->fid.unique);
70 62
71 /* read the contents of the symlink into the pagecache */ 63 /* read the contents of the symlink into the pagecache */
72 page = read_mapping_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0, NULL); 64 page = read_mapping_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0, &file);
73 if (IS_ERR(page)) { 65 if (IS_ERR(page)) {
74 ret = PTR_ERR(page); 66 ret = PTR_ERR(page);
75 goto out; 67 goto out;
@@ -85,7 +77,7 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode)
85 77
86 /* examine the symlink's contents */ 78 /* examine the symlink's contents */
87 size = vnode->status.size; 79 size = vnode->status.size;
88 _debug("symlink to %*.*s", size, (int) size, buf); 80 _debug("symlink to %*.*s", (int) size, (int) size, buf);
89 81
90 if (size > 2 && 82 if (size > 2 &&
91 (buf[0] == '%' || buf[0] == '#') && 83 (buf[0] == '%' || buf[0] == '#') &&
@@ -93,22 +85,20 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode)
93 ) { 85 ) {
94 _debug("symlink is a mountpoint"); 86 _debug("symlink is a mountpoint");
95 spin_lock(&vnode->lock); 87 spin_lock(&vnode->lock);
96 vnode->flags |= AFS_VNODE_MOUNTPOINT; 88 set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
97 spin_unlock(&vnode->lock); 89 spin_unlock(&vnode->lock);
98 } 90 }
99 91
100 ret = 0; 92 ret = 0;
101 93
102 out_free: 94out_free:
103 kunmap(page); 95 kunmap(page);
104 page_cache_release(page); 96 page_cache_release(page);
105 out: 97out:
106 _leave(" = %d", ret); 98 _leave(" = %d", ret);
107 return ret; 99 return ret;
100}
108 101
109} /* end afs_mntpt_check_symlink() */
110
111/*****************************************************************************/
112/* 102/*
113 * no valid lookup procedure on this sort of dir 103 * no valid lookup procedure on this sort of dir
114 */ 104 */
@@ -116,7 +106,7 @@ static struct dentry *afs_mntpt_lookup(struct inode *dir,
116 struct dentry *dentry, 106 struct dentry *dentry,
117 struct nameidata *nd) 107 struct nameidata *nd)
118{ 108{
119 kenter("%p,%p{%p{%s},%s}", 109 _enter("%p,%p{%p{%s},%s}",
120 dir, 110 dir,
121 dentry, 111 dentry,
122 dentry->d_parent, 112 dentry->d_parent,
@@ -125,15 +115,14 @@ static struct dentry *afs_mntpt_lookup(struct inode *dir,
125 dentry->d_name.name); 115 dentry->d_name.name);
126 116
127 return ERR_PTR(-EREMOTE); 117 return ERR_PTR(-EREMOTE);
128} /* end afs_mntpt_lookup() */ 118}
129 119
130/*****************************************************************************/
131/* 120/*
132 * no valid open procedure on this sort of dir 121 * no valid open procedure on this sort of dir
133 */ 122 */
134static int afs_mntpt_open(struct inode *inode, struct file *file) 123static int afs_mntpt_open(struct inode *inode, struct file *file)
135{ 124{
136 kenter("%p,%p{%p{%s},%s}", 125 _enter("%p,%p{%p{%s},%s}",
137 inode, file, 126 inode, file,
138 file->f_path.dentry->d_parent, 127 file->f_path.dentry->d_parent,
139 file->f_path.dentry->d_parent ? 128 file->f_path.dentry->d_parent ?
@@ -142,9 +131,8 @@ static int afs_mntpt_open(struct inode *inode, struct file *file)
142 file->f_path.dentry->d_name.name); 131 file->f_path.dentry->d_name.name);
143 132
144 return -EREMOTE; 133 return -EREMOTE;
145} /* end afs_mntpt_open() */ 134}
146 135
147/*****************************************************************************/
148/* 136/*
149 * create a vfsmount to be automounted 137 * create a vfsmount to be automounted
150 */ 138 */
@@ -157,7 +145,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
157 char *buf, *devname = NULL, *options = NULL; 145 char *buf, *devname = NULL, *options = NULL;
158 int ret; 146 int ret;
159 147
160 kenter("{%s}", mntpt->d_name.name); 148 _enter("{%s}", mntpt->d_name.name);
161 149
162 BUG_ON(!mntpt->d_inode); 150 BUG_ON(!mntpt->d_inode);
163 151
@@ -201,79 +189,108 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
201 strcat(options, ",rwpath"); 189 strcat(options, ",rwpath");
202 190
203 /* try and do the mount */ 191 /* try and do the mount */
204 kdebug("--- attempting mount %s -o %s ---", devname, options); 192 _debug("--- attempting mount %s -o %s ---", devname, options);
205 mnt = vfs_kern_mount(&afs_fs_type, 0, devname, options); 193 mnt = vfs_kern_mount(&afs_fs_type, 0, devname, options);
206 kdebug("--- mount result %p ---", mnt); 194 _debug("--- mount result %p ---", mnt);
207 195
208 free_page((unsigned long) devname); 196 free_page((unsigned long) devname);
209 free_page((unsigned long) options); 197 free_page((unsigned long) options);
210 kleave(" = %p", mnt); 198 _leave(" = %p", mnt);
211 return mnt; 199 return mnt;
212 200
213 error: 201error:
214 if (page) 202 if (page)
215 page_cache_release(page); 203 page_cache_release(page);
216 if (devname) 204 if (devname)
217 free_page((unsigned long) devname); 205 free_page((unsigned long) devname);
218 if (options) 206 if (options)
219 free_page((unsigned long) options); 207 free_page((unsigned long) options);
220 kleave(" = %d", ret); 208 _leave(" = %d", ret);
221 return ERR_PTR(ret); 209 return ERR_PTR(ret);
222} /* end afs_mntpt_do_automount() */ 210}
223 211
224/*****************************************************************************/
225/* 212/*
226 * follow a link from a mountpoint directory, thus causing it to be mounted 213 * follow a link from a mountpoint directory, thus causing it to be mounted
227 */ 214 */
228static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd) 215static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd)
229{ 216{
230 struct vfsmount *newmnt; 217 struct vfsmount *newmnt;
231 struct dentry *old_dentry;
232 int err; 218 int err;
233 219
234 kenter("%p{%s},{%s:%p{%s}}", 220 _enter("%p{%s},{%s:%p{%s},}",
235 dentry, 221 dentry,
236 dentry->d_name.name, 222 dentry->d_name.name,
237 nd->mnt->mnt_devname, 223 nd->mnt->mnt_devname,
238 dentry, 224 dentry,
239 nd->dentry->d_name.name); 225 nd->dentry->d_name.name);
240 226
241 newmnt = afs_mntpt_do_automount(dentry); 227 dput(nd->dentry);
228 nd->dentry = dget(dentry);
229
230 newmnt = afs_mntpt_do_automount(nd->dentry);
242 if (IS_ERR(newmnt)) { 231 if (IS_ERR(newmnt)) {
243 path_release(nd); 232 path_release(nd);
244 return (void *)newmnt; 233 return (void *)newmnt;
245 } 234 }
246 235
247 old_dentry = nd->dentry; 236 mntget(newmnt);
248 nd->dentry = dentry; 237 err = do_add_mount(newmnt, nd, MNT_SHRINKABLE, &afs_vfsmounts);
249 err = do_add_mount(newmnt, nd, 0, &afs_vfsmounts); 238 switch (err) {
250 nd->dentry = old_dentry; 239 case 0:
251 240 mntput(nd->mnt);
252 path_release(nd); 241 dput(nd->dentry);
253
254 if (!err) {
255 mntget(newmnt);
256 nd->mnt = newmnt; 242 nd->mnt = newmnt;
257 dget(newmnt->mnt_root); 243 nd->dentry = dget(newmnt->mnt_root);
258 nd->dentry = newmnt->mnt_root; 244 schedule_delayed_work(&afs_mntpt_expiry_timer,
245 afs_mntpt_expiry_timeout * HZ);
246 break;
247 case -EBUSY:
248 /* someone else made a mount here whilst we were busy */
249 while (d_mountpoint(nd->dentry) &&
250 follow_down(&nd->mnt, &nd->dentry))
251 ;
252 err = 0;
253 default:
254 mntput(newmnt);
255 break;
259 } 256 }
260 257
261 kleave(" = %d", err); 258 _leave(" = %d", err);
262 return ERR_PTR(err); 259 return ERR_PTR(err);
263} /* end afs_mntpt_follow_link() */ 260}
264 261
265/*****************************************************************************/
266/* 262/*
267 * handle mountpoint expiry timer going off 263 * handle mountpoint expiry timer going off
268 */ 264 */
269static void afs_mntpt_expiry_timed_out(struct afs_timer *timer) 265static void afs_mntpt_expiry_timed_out(struct work_struct *work)
270{ 266{
271 kenter(""); 267 _enter("");
272 268
273 mark_mounts_for_expiry(&afs_vfsmounts); 269 if (!list_empty(&afs_vfsmounts)) {
270 mark_mounts_for_expiry(&afs_vfsmounts);
271 schedule_delayed_work(&afs_mntpt_expiry_timer,
272 afs_mntpt_expiry_timeout * HZ);
273 }
274
275 _leave("");
276}
274 277
275 afs_kafstimod_add_timer(&afs_mntpt_expiry_timer, 278/*
276 afs_mntpt_expiry_timeout * HZ); 279 * kill the AFS mountpoint timer if it's still running
280 */
281void afs_mntpt_kill_timer(void)
282{
283 _enter("");
277 284
278 kleave(""); 285 ASSERT(list_empty(&afs_vfsmounts));
279} /* end afs_mntpt_expiry_timed_out() */ 286 cancel_delayed_work(&afs_mntpt_expiry_timer);
287 flush_scheduled_work();
288}
289
290/*
291 * begin unmount by attempting to remove all automounted mountpoints we added
292 */
293void afs_umount_begin(struct vfsmount *vfsmnt, int flags)
294{
295 shrink_submounts(vfsmnt, &afs_vfsmounts);
296}
diff --git a/fs/afs/mount.h b/fs/afs/mount.h
deleted file mode 100644
index 9d2f46ec549f..000000000000
--- a/fs/afs/mount.h
+++ /dev/null
@@ -1,23 +0,0 @@
1/* mount.h: mount parameters
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef _LINUX_AFS_MOUNT_H
13#define _LINUX_AFS_MOUNT_H
14
15struct afs_mountdata {
16 const char *volume; /* name of volume */
17 const char *cell; /* name of cell containing volume */
18 const char *cache; /* name of cache block device */
19 size_t nservers; /* number of server addresses listed */
20 uint32_t servers[10]; /* IP addresses of servers in this cell */
21};
22
23#endif /* _LINUX_AFS_MOUNT_H */
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index ae6b85b1e484..d5601f617cdb 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -1,4 +1,4 @@
1/* proc.c: /proc interface for AFS 1/* /proc interface for AFS
2 * 2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
@@ -13,8 +13,6 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/proc_fs.h> 14#include <linux/proc_fs.h>
15#include <linux/seq_file.h> 15#include <linux/seq_file.h>
16#include "cell.h"
17#include "volume.h"
18#include <asm/uaccess.h> 16#include <asm/uaccess.h>
19#include "internal.h" 17#include "internal.h"
20 18
@@ -130,7 +128,6 @@ static const struct file_operations afs_proc_cell_servers_fops = {
130 .release = afs_proc_cell_servers_release, 128 .release = afs_proc_cell_servers_release,
131}; 129};
132 130
133/*****************************************************************************/
134/* 131/*
135 * initialise the /proc/fs/afs/ directory 132 * initialise the /proc/fs/afs/ directory
136 */ 133 */
@@ -142,47 +139,43 @@ int afs_proc_init(void)
142 139
143 proc_afs = proc_mkdir("fs/afs", NULL); 140 proc_afs = proc_mkdir("fs/afs", NULL);
144 if (!proc_afs) 141 if (!proc_afs)
145 goto error; 142 goto error_dir;
146 proc_afs->owner = THIS_MODULE; 143 proc_afs->owner = THIS_MODULE;
147 144
148 p = create_proc_entry("cells", 0, proc_afs); 145 p = create_proc_entry("cells", 0, proc_afs);
149 if (!p) 146 if (!p)
150 goto error_proc; 147 goto error_cells;
151 p->proc_fops = &afs_proc_cells_fops; 148 p->proc_fops = &afs_proc_cells_fops;
152 p->owner = THIS_MODULE; 149 p->owner = THIS_MODULE;
153 150
154 p = create_proc_entry("rootcell", 0, proc_afs); 151 p = create_proc_entry("rootcell", 0, proc_afs);
155 if (!p) 152 if (!p)
156 goto error_cells; 153 goto error_rootcell;
157 p->proc_fops = &afs_proc_rootcell_fops; 154 p->proc_fops = &afs_proc_rootcell_fops;
158 p->owner = THIS_MODULE; 155 p->owner = THIS_MODULE;
159 156
160 _leave(" = 0"); 157 _leave(" = 0");
161 return 0; 158 return 0;
162 159
163 error_cells: 160error_rootcell:
164 remove_proc_entry("cells", proc_afs); 161 remove_proc_entry("cells", proc_afs);
165 error_proc: 162error_cells:
166 remove_proc_entry("fs/afs", NULL); 163 remove_proc_entry("fs/afs", NULL);
167 error: 164error_dir:
168 _leave(" = -ENOMEM"); 165 _leave(" = -ENOMEM");
169 return -ENOMEM; 166 return -ENOMEM;
167}
170 168
171} /* end afs_proc_init() */
172
173/*****************************************************************************/
174/* 169/*
175 * clean up the /proc/fs/afs/ directory 170 * clean up the /proc/fs/afs/ directory
176 */ 171 */
177void afs_proc_cleanup(void) 172void afs_proc_cleanup(void)
178{ 173{
174 remove_proc_entry("rootcell", proc_afs);
179 remove_proc_entry("cells", proc_afs); 175 remove_proc_entry("cells", proc_afs);
180
181 remove_proc_entry("fs/afs", NULL); 176 remove_proc_entry("fs/afs", NULL);
177}
182 178
183} /* end afs_proc_cleanup() */
184
185/*****************************************************************************/
186/* 179/*
187 * open "/proc/fs/afs/cells" which provides a summary of extant cells 180 * open "/proc/fs/afs/cells" which provides a summary of extant cells
188 */ 181 */
@@ -199,9 +192,8 @@ static int afs_proc_cells_open(struct inode *inode, struct file *file)
199 m->private = PDE(inode)->data; 192 m->private = PDE(inode)->data;
200 193
201 return 0; 194 return 0;
202} /* end afs_proc_cells_open() */ 195}
203 196
204/*****************************************************************************/
205/* 197/*
206 * set up the iterator to start reading from the cells list and return the 198 * set up the iterator to start reading from the cells list and return the
207 * first item 199 * first item
@@ -225,9 +217,8 @@ static void *afs_proc_cells_start(struct seq_file *m, loff_t *_pos)
225 break; 217 break;
226 218
227 return _p != &afs_proc_cells ? _p : NULL; 219 return _p != &afs_proc_cells ? _p : NULL;
228} /* end afs_proc_cells_start() */ 220}
229 221
230/*****************************************************************************/
231/* 222/*
232 * move to next cell in cells list 223 * move to next cell in cells list
233 */ 224 */
@@ -241,19 +232,16 @@ static void *afs_proc_cells_next(struct seq_file *p, void *v, loff_t *pos)
241 _p = v == (void *) 1 ? afs_proc_cells.next : _p->next; 232 _p = v == (void *) 1 ? afs_proc_cells.next : _p->next;
242 233
243 return _p != &afs_proc_cells ? _p : NULL; 234 return _p != &afs_proc_cells ? _p : NULL;
244} /* end afs_proc_cells_next() */ 235}
245 236
246/*****************************************************************************/
247/* 237/*
248 * clean up after reading from the cells list 238 * clean up after reading from the cells list
249 */ 239 */
250static void afs_proc_cells_stop(struct seq_file *p, void *v) 240static void afs_proc_cells_stop(struct seq_file *p, void *v)
251{ 241{
252 up_read(&afs_proc_cells_sem); 242 up_read(&afs_proc_cells_sem);
243}
253 244
254} /* end afs_proc_cells_stop() */
255
256/*****************************************************************************/
257/* 245/*
258 * display a header line followed by a load of cell lines 246 * display a header line followed by a load of cell lines
259 */ 247 */
@@ -261,19 +249,18 @@ static int afs_proc_cells_show(struct seq_file *m, void *v)
261{ 249{
262 struct afs_cell *cell = list_entry(v, struct afs_cell, proc_link); 250 struct afs_cell *cell = list_entry(v, struct afs_cell, proc_link);
263 251
264 /* display header on line 1 */
265 if (v == (void *) 1) { 252 if (v == (void *) 1) {
253 /* display header on line 1 */
266 seq_puts(m, "USE NAME\n"); 254 seq_puts(m, "USE NAME\n");
267 return 0; 255 return 0;
268 } 256 }
269 257
270 /* display one cell per line on subsequent lines */ 258 /* display one cell per line on subsequent lines */
271 seq_printf(m, "%3d %s\n", atomic_read(&cell->usage), cell->name); 259 seq_printf(m, "%3d %s\n",
272 260 atomic_read(&cell->usage), cell->name);
273 return 0; 261 return 0;
274} /* end afs_proc_cells_show() */ 262}
275 263
276/*****************************************************************************/
277/* 264/*
278 * handle writes to /proc/fs/afs/cells 265 * handle writes to /proc/fs/afs/cells
279 * - to add cells: echo "add <cellname> <IP>[:<IP>][:<IP>]" 266 * - to add cells: echo "add <cellname> <IP>[:<IP>][:<IP>]"
@@ -326,30 +313,32 @@ static ssize_t afs_proc_cells_write(struct file *file, const char __user *buf,
326 313
327 if (strcmp(kbuf, "add") == 0) { 314 if (strcmp(kbuf, "add") == 0) {
328 struct afs_cell *cell; 315 struct afs_cell *cell;
329 ret = afs_cell_create(name, args, &cell); 316
330 if (ret < 0) 317 cell = afs_cell_create(name, args);
318 if (IS_ERR(cell)) {
319 ret = PTR_ERR(cell);
331 goto done; 320 goto done;
321 }
332 322
323 afs_put_cell(cell);
333 printk("kAFS: Added new cell '%s'\n", name); 324 printk("kAFS: Added new cell '%s'\n", name);
334 } 325 } else {
335 else {
336 goto inval; 326 goto inval;
337 } 327 }
338 328
339 ret = size; 329 ret = size;
340 330
341 done: 331done:
342 kfree(kbuf); 332 kfree(kbuf);
343 _leave(" = %d", ret); 333 _leave(" = %d", ret);
344 return ret; 334 return ret;
345 335
346 inval: 336inval:
347 ret = -EINVAL; 337 ret = -EINVAL;
348 printk("kAFS: Invalid Command on /proc/fs/afs/cells file\n"); 338 printk("kAFS: Invalid Command on /proc/fs/afs/cells file\n");
349 goto done; 339 goto done;
350} /* end afs_proc_cells_write() */ 340}
351 341
352/*****************************************************************************/
353/* 342/*
354 * Stubs for /proc/fs/afs/rootcell 343 * Stubs for /proc/fs/afs/rootcell
355 */ 344 */
@@ -369,7 +358,6 @@ static ssize_t afs_proc_rootcell_read(struct file *file, char __user *buf,
369 return 0; 358 return 0;
370} 359}
371 360
372/*****************************************************************************/
373/* 361/*
374 * handle writes to /proc/fs/afs/rootcell 362 * handle writes to /proc/fs/afs/rootcell
375 * - to initialize rootcell: echo "cell.name:192.168.231.14" 363 * - to initialize rootcell: echo "cell.name:192.168.231.14"
@@ -407,14 +395,13 @@ static ssize_t afs_proc_rootcell_write(struct file *file,
407 if (ret >= 0) 395 if (ret >= 0)
408 ret = size; /* consume everything, always */ 396 ret = size; /* consume everything, always */
409 397
410 infault: 398infault:
411 kfree(kbuf); 399 kfree(kbuf);
412 nomem: 400nomem:
413 _leave(" = %d", ret); 401 _leave(" = %d", ret);
414 return ret; 402 return ret;
415} /* end afs_proc_rootcell_write() */ 403}
416 404
417/*****************************************************************************/
418/* 405/*
419 * initialise /proc/fs/afs/<cell>/ 406 * initialise /proc/fs/afs/<cell>/
420 */ 407 */
@@ -426,25 +413,25 @@ int afs_proc_cell_setup(struct afs_cell *cell)
426 413
427 cell->proc_dir = proc_mkdir(cell->name, proc_afs); 414 cell->proc_dir = proc_mkdir(cell->name, proc_afs);
428 if (!cell->proc_dir) 415 if (!cell->proc_dir)
429 return -ENOMEM; 416 goto error_dir;
430 417
431 p = create_proc_entry("servers", 0, cell->proc_dir); 418 p = create_proc_entry("servers", 0, cell->proc_dir);
432 if (!p) 419 if (!p)
433 goto error_proc; 420 goto error_servers;
434 p->proc_fops = &afs_proc_cell_servers_fops; 421 p->proc_fops = &afs_proc_cell_servers_fops;
435 p->owner = THIS_MODULE; 422 p->owner = THIS_MODULE;
436 p->data = cell; 423 p->data = cell;
437 424
438 p = create_proc_entry("vlservers", 0, cell->proc_dir); 425 p = create_proc_entry("vlservers", 0, cell->proc_dir);
439 if (!p) 426 if (!p)
440 goto error_servers; 427 goto error_vlservers;
441 p->proc_fops = &afs_proc_cell_vlservers_fops; 428 p->proc_fops = &afs_proc_cell_vlservers_fops;
442 p->owner = THIS_MODULE; 429 p->owner = THIS_MODULE;
443 p->data = cell; 430 p->data = cell;
444 431
445 p = create_proc_entry("volumes", 0, cell->proc_dir); 432 p = create_proc_entry("volumes", 0, cell->proc_dir);
446 if (!p) 433 if (!p)
447 goto error_vlservers; 434 goto error_volumes;
448 p->proc_fops = &afs_proc_cell_volumes_fops; 435 p->proc_fops = &afs_proc_cell_volumes_fops;
449 p->owner = THIS_MODULE; 436 p->owner = THIS_MODULE;
450 p->data = cell; 437 p->data = cell;
@@ -452,17 +439,17 @@ int afs_proc_cell_setup(struct afs_cell *cell)
452 _leave(" = 0"); 439 _leave(" = 0");
453 return 0; 440 return 0;
454 441
455 error_vlservers: 442error_volumes:
456 remove_proc_entry("vlservers", cell->proc_dir); 443 remove_proc_entry("vlservers", cell->proc_dir);
457 error_servers: 444error_vlservers:
458 remove_proc_entry("servers", cell->proc_dir); 445 remove_proc_entry("servers", cell->proc_dir);
459 error_proc: 446error_servers:
460 remove_proc_entry(cell->name, proc_afs); 447 remove_proc_entry(cell->name, proc_afs);
448error_dir:
461 _leave(" = -ENOMEM"); 449 _leave(" = -ENOMEM");
462 return -ENOMEM; 450 return -ENOMEM;
463} /* end afs_proc_cell_setup() */ 451}
464 452
465/*****************************************************************************/
466/* 453/*
467 * remove /proc/fs/afs/<cell>/ 454 * remove /proc/fs/afs/<cell>/
468 */ 455 */
@@ -476,9 +463,8 @@ void afs_proc_cell_remove(struct afs_cell *cell)
476 remove_proc_entry(cell->name, proc_afs); 463 remove_proc_entry(cell->name, proc_afs);
477 464
478 _leave(""); 465 _leave("");
479} /* end afs_proc_cell_remove() */ 466}
480 467
481/*****************************************************************************/
482/* 468/*
483 * open "/proc/fs/afs/<cell>/volumes" which provides a summary of extant cells 469 * open "/proc/fs/afs/<cell>/volumes" which provides a summary of extant cells
484 */ 470 */
@@ -488,7 +474,7 @@ static int afs_proc_cell_volumes_open(struct inode *inode, struct file *file)
488 struct seq_file *m; 474 struct seq_file *m;
489 int ret; 475 int ret;
490 476
491 cell = afs_get_cell_maybe((struct afs_cell **) &PDE(inode)->data); 477 cell = PDE(inode)->data;
492 if (!cell) 478 if (!cell)
493 return -ENOENT; 479 return -ENOENT;
494 480
@@ -500,25 +486,16 @@ static int afs_proc_cell_volumes_open(struct inode *inode, struct file *file)
500 m->private = cell; 486 m->private = cell;
501 487
502 return 0; 488 return 0;
503} /* end afs_proc_cell_volumes_open() */ 489}
504 490
505/*****************************************************************************/
506/* 491/*
507 * close the file and release the ref to the cell 492 * close the file and release the ref to the cell
508 */ 493 */
509static int afs_proc_cell_volumes_release(struct inode *inode, struct file *file) 494static int afs_proc_cell_volumes_release(struct inode *inode, struct file *file)
510{ 495{
511 struct afs_cell *cell = PDE(inode)->data; 496 return seq_release(inode, file);
512 int ret; 497}
513
514 ret = seq_release(inode,file);
515
516 afs_put_cell(cell);
517
518 return ret;
519} /* end afs_proc_cell_volumes_release() */
520 498
521/*****************************************************************************/
522/* 499/*
523 * set up the iterator to start reading from the cells list and return the 500 * set up the iterator to start reading from the cells list and return the
524 * first item 501 * first item
@@ -545,9 +522,8 @@ static void *afs_proc_cell_volumes_start(struct seq_file *m, loff_t *_pos)
545 break; 522 break;
546 523
547 return _p != &cell->vl_list ? _p : NULL; 524 return _p != &cell->vl_list ? _p : NULL;
548} /* end afs_proc_cell_volumes_start() */ 525}
549 526
550/*****************************************************************************/
551/* 527/*
552 * move to next cell in cells list 528 * move to next cell in cells list
553 */ 529 */
@@ -562,12 +538,11 @@ static void *afs_proc_cell_volumes_next(struct seq_file *p, void *v,
562 (*_pos)++; 538 (*_pos)++;
563 539
564 _p = v; 540 _p = v;
565 _p = v == (void *) 1 ? cell->vl_list.next : _p->next; 541 _p = (v == (void *) 1) ? cell->vl_list.next : _p->next;
566 542
567 return _p != &cell->vl_list ? _p : NULL; 543 return (_p != &cell->vl_list) ? _p : NULL;
568} /* end afs_proc_cell_volumes_next() */ 544}
569 545
570/*****************************************************************************/
571/* 546/*
572 * clean up after reading from the cells list 547 * clean up after reading from the cells list
573 */ 548 */
@@ -576,10 +551,18 @@ static void afs_proc_cell_volumes_stop(struct seq_file *p, void *v)
576 struct afs_cell *cell = p->private; 551 struct afs_cell *cell = p->private;
577 552
578 up_read(&cell->vl_sem); 553 up_read(&cell->vl_sem);
554}
579 555
580} /* end afs_proc_cell_volumes_stop() */ 556const char afs_vlocation_states[][4] = {
557 [AFS_VL_NEW] = "New",
558 [AFS_VL_CREATING] = "Crt",
559 [AFS_VL_VALID] = "Val",
560 [AFS_VL_NO_VOLUME] = "NoV",
561 [AFS_VL_UPDATING] = "Upd",
562 [AFS_VL_VOLUME_DELETED] = "Del",
563 [AFS_VL_UNCERTAIN] = "Unc",
564};
581 565
582/*****************************************************************************/
583/* 566/*
584 * display a header line followed by a load of volume lines 567 * display a header line followed by a load of volume lines
585 */ 568 */
@@ -590,23 +573,22 @@ static int afs_proc_cell_volumes_show(struct seq_file *m, void *v)
590 573
591 /* display header on line 1 */ 574 /* display header on line 1 */
592 if (v == (void *) 1) { 575 if (v == (void *) 1) {
593 seq_puts(m, "USE VLID[0] VLID[1] VLID[2] NAME\n"); 576 seq_puts(m, "USE STT VLID[0] VLID[1] VLID[2] NAME\n");
594 return 0; 577 return 0;
595 } 578 }
596 579
597 /* display one cell per line on subsequent lines */ 580 /* display one cell per line on subsequent lines */
598 seq_printf(m, "%3d %08x %08x %08x %s\n", 581 seq_printf(m, "%3d %s %08x %08x %08x %s\n",
599 atomic_read(&vlocation->usage), 582 atomic_read(&vlocation->usage),
583 afs_vlocation_states[vlocation->state],
600 vlocation->vldb.vid[0], 584 vlocation->vldb.vid[0],
601 vlocation->vldb.vid[1], 585 vlocation->vldb.vid[1],
602 vlocation->vldb.vid[2], 586 vlocation->vldb.vid[2],
603 vlocation->vldb.name 587 vlocation->vldb.name);
604 );
605 588
606 return 0; 589 return 0;
607} /* end afs_proc_cell_volumes_show() */ 590}
608 591
609/*****************************************************************************/
610/* 592/*
611 * open "/proc/fs/afs/<cell>/vlservers" which provides a list of volume 593 * open "/proc/fs/afs/<cell>/vlservers" which provides a list of volume
612 * location server 594 * location server
@@ -617,11 +599,11 @@ static int afs_proc_cell_vlservers_open(struct inode *inode, struct file *file)
617 struct seq_file *m; 599 struct seq_file *m;
618 int ret; 600 int ret;
619 601
620 cell = afs_get_cell_maybe((struct afs_cell**)&PDE(inode)->data); 602 cell = PDE(inode)->data;
621 if (!cell) 603 if (!cell)
622 return -ENOENT; 604 return -ENOENT;
623 605
624 ret = seq_open(file,&afs_proc_cell_vlservers_ops); 606 ret = seq_open(file, &afs_proc_cell_vlservers_ops);
625 if (ret<0) 607 if (ret<0)
626 return ret; 608 return ret;
627 609
@@ -629,26 +611,17 @@ static int afs_proc_cell_vlservers_open(struct inode *inode, struct file *file)
629 m->private = cell; 611 m->private = cell;
630 612
631 return 0; 613 return 0;
632} /* end afs_proc_cell_vlservers_open() */ 614}
633 615
634/*****************************************************************************/
635/* 616/*
636 * close the file and release the ref to the cell 617 * close the file and release the ref to the cell
637 */ 618 */
638static int afs_proc_cell_vlservers_release(struct inode *inode, 619static int afs_proc_cell_vlservers_release(struct inode *inode,
639 struct file *file) 620 struct file *file)
640{ 621{
641 struct afs_cell *cell = PDE(inode)->data; 622 return seq_release(inode, file);
642 int ret; 623}
643
644 ret = seq_release(inode,file);
645
646 afs_put_cell(cell);
647
648 return ret;
649} /* end afs_proc_cell_vlservers_release() */
650 624
651/*****************************************************************************/
652/* 625/*
653 * set up the iterator to start reading from the cells list and return the 626 * set up the iterator to start reading from the cells list and return the
654 * first item 627 * first item
@@ -672,9 +645,8 @@ static void *afs_proc_cell_vlservers_start(struct seq_file *m, loff_t *_pos)
672 return NULL; 645 return NULL;
673 646
674 return &cell->vl_addrs[pos]; 647 return &cell->vl_addrs[pos];
675} /* end afs_proc_cell_vlservers_start() */ 648}
676 649
677/*****************************************************************************/
678/* 650/*
679 * move to next cell in cells list 651 * move to next cell in cells list
680 */ 652 */
@@ -692,9 +664,8 @@ static void *afs_proc_cell_vlservers_next(struct seq_file *p, void *v,
692 return NULL; 664 return NULL;
693 665
694 return &cell->vl_addrs[pos]; 666 return &cell->vl_addrs[pos];
695} /* end afs_proc_cell_vlservers_next() */ 667}
696 668
697/*****************************************************************************/
698/* 669/*
699 * clean up after reading from the cells list 670 * clean up after reading from the cells list
700 */ 671 */
@@ -703,10 +674,8 @@ static void afs_proc_cell_vlservers_stop(struct seq_file *p, void *v)
703 struct afs_cell *cell = p->private; 674 struct afs_cell *cell = p->private;
704 675
705 up_read(&cell->vl_sem); 676 up_read(&cell->vl_sem);
677}
706 678
707} /* end afs_proc_cell_vlservers_stop() */
708
709/*****************************************************************************/
710/* 679/*
711 * display a header line followed by a load of volume lines 680 * display a header line followed by a load of volume lines
712 */ 681 */
@@ -722,11 +691,9 @@ static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v)
722 691
723 /* display one cell per line on subsequent lines */ 692 /* display one cell per line on subsequent lines */
724 seq_printf(m, "%u.%u.%u.%u\n", NIPQUAD(addr->s_addr)); 693 seq_printf(m, "%u.%u.%u.%u\n", NIPQUAD(addr->s_addr));
725
726 return 0; 694 return 0;
727} /* end afs_proc_cell_vlservers_show() */ 695}
728 696
729/*****************************************************************************/
730/* 697/*
731 * open "/proc/fs/afs/<cell>/servers" which provides a summary of active 698 * open "/proc/fs/afs/<cell>/servers" which provides a summary of active
732 * servers 699 * servers
@@ -737,7 +704,7 @@ static int afs_proc_cell_servers_open(struct inode *inode, struct file *file)
737 struct seq_file *m; 704 struct seq_file *m;
738 int ret; 705 int ret;
739 706
740 cell = afs_get_cell_maybe((struct afs_cell **) &PDE(inode)->data); 707 cell = PDE(inode)->data;
741 if (!cell) 708 if (!cell)
742 return -ENOENT; 709 return -ENOENT;
743 710
@@ -747,34 +714,24 @@ static int afs_proc_cell_servers_open(struct inode *inode, struct file *file)
747 714
748 m = file->private_data; 715 m = file->private_data;
749 m->private = cell; 716 m->private = cell;
750
751 return 0; 717 return 0;
752} /* end afs_proc_cell_servers_open() */ 718}
753 719
754/*****************************************************************************/
755/* 720/*
756 * close the file and release the ref to the cell 721 * close the file and release the ref to the cell
757 */ 722 */
758static int afs_proc_cell_servers_release(struct inode *inode, 723static int afs_proc_cell_servers_release(struct inode *inode,
759 struct file *file) 724 struct file *file)
760{ 725{
761 struct afs_cell *cell = PDE(inode)->data; 726 return seq_release(inode, file);
762 int ret; 727}
763
764 ret = seq_release(inode, file);
765
766 afs_put_cell(cell);
767
768 return ret;
769} /* end afs_proc_cell_servers_release() */
770 728
771/*****************************************************************************/
772/* 729/*
773 * set up the iterator to start reading from the cells list and return the 730 * set up the iterator to start reading from the cells list and return the
774 * first item 731 * first item
775 */ 732 */
776static void *afs_proc_cell_servers_start(struct seq_file *m, loff_t *_pos) 733static void *afs_proc_cell_servers_start(struct seq_file *m, loff_t *_pos)
777 __acquires(m->private->sv_lock) 734 __acquires(m->private->servers_lock)
778{ 735{
779 struct list_head *_p; 736 struct list_head *_p;
780 struct afs_cell *cell = m->private; 737 struct afs_cell *cell = m->private;
@@ -783,7 +740,7 @@ static void *afs_proc_cell_servers_start(struct seq_file *m, loff_t *_pos)
783 _enter("cell=%p pos=%Ld", cell, *_pos); 740 _enter("cell=%p pos=%Ld", cell, *_pos);
784 741
785 /* lock the list against modification */ 742 /* lock the list against modification */
786 read_lock(&cell->sv_lock); 743 read_lock(&cell->servers_lock);
787 744
788 /* allow for the header line */ 745 /* allow for the header line */
789 if (!pos) 746 if (!pos)
@@ -791,14 +748,13 @@ static void *afs_proc_cell_servers_start(struct seq_file *m, loff_t *_pos)
791 pos--; 748 pos--;
792 749
793 /* find the n'th element in the list */ 750 /* find the n'th element in the list */
794 list_for_each(_p, &cell->sv_list) 751 list_for_each(_p, &cell->servers)
795 if (!pos--) 752 if (!pos--)
796 break; 753 break;
797 754
798 return _p != &cell->sv_list ? _p : NULL; 755 return _p != &cell->servers ? _p : NULL;
799} /* end afs_proc_cell_servers_start() */ 756}
800 757
801/*****************************************************************************/
802/* 758/*
803 * move to next cell in cells list 759 * move to next cell in cells list
804 */ 760 */
@@ -813,25 +769,22 @@ static void *afs_proc_cell_servers_next(struct seq_file *p, void *v,
813 (*_pos)++; 769 (*_pos)++;
814 770
815 _p = v; 771 _p = v;
816 _p = v == (void *) 1 ? cell->sv_list.next : _p->next; 772 _p = v == (void *) 1 ? cell->servers.next : _p->next;
817 773
818 return _p != &cell->sv_list ? _p : NULL; 774 return _p != &cell->servers ? _p : NULL;
819} /* end afs_proc_cell_servers_next() */ 775}
820 776
821/*****************************************************************************/
822/* 777/*
823 * clean up after reading from the cells list 778 * clean up after reading from the cells list
824 */ 779 */
825static void afs_proc_cell_servers_stop(struct seq_file *p, void *v) 780static void afs_proc_cell_servers_stop(struct seq_file *p, void *v)
826 __releases(p->private->sv_lock) 781 __releases(p->private->servers_lock)
827{ 782{
828 struct afs_cell *cell = p->private; 783 struct afs_cell *cell = p->private;
829 784
830 read_unlock(&cell->sv_lock); 785 read_unlock(&cell->servers_lock);
831 786}
832} /* end afs_proc_cell_servers_stop() */
833 787
834/*****************************************************************************/
835/* 788/*
836 * display a header line followed by a load of volume lines 789 * display a header line followed by a load of volume lines
837 */ 790 */
@@ -849,10 +802,7 @@ static int afs_proc_cell_servers_show(struct seq_file *m, void *v)
849 /* display one cell per line on subsequent lines */ 802 /* display one cell per line on subsequent lines */
850 sprintf(ipaddr, "%u.%u.%u.%u", NIPQUAD(server->addr)); 803 sprintf(ipaddr, "%u.%u.%u.%u", NIPQUAD(server->addr));
851 seq_printf(m, "%3d %-15.15s %5d\n", 804 seq_printf(m, "%3d %-15.15s %5d\n",
852 atomic_read(&server->usage), 805 atomic_read(&server->usage), ipaddr, server->fs_state);
853 ipaddr,
854 server->fs_state
855 );
856 806
857 return 0; 807 return 0;
858} /* end afs_proc_cell_servers_show() */ 808}
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
new file mode 100644
index 000000000000..e7b047328a39
--- /dev/null
+++ b/fs/afs/rxrpc.c
@@ -0,0 +1,782 @@
1/* Maintain an RxRPC server socket to do AFS communications through
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <net/sock.h>
13#include <net/af_rxrpc.h>
14#include <rxrpc/packet.h>
15#include "internal.h"
16#include "afs_cm.h"
17
18static struct socket *afs_socket; /* my RxRPC socket */
19static struct workqueue_struct *afs_async_calls;
20static atomic_t afs_outstanding_calls;
21static atomic_t afs_outstanding_skbs;
22
23static void afs_wake_up_call_waiter(struct afs_call *);
24static int afs_wait_for_call_to_complete(struct afs_call *);
25static void afs_wake_up_async_call(struct afs_call *);
26static int afs_dont_wait_for_call_to_complete(struct afs_call *);
27static void afs_process_async_call(struct work_struct *);
28static void afs_rx_interceptor(struct sock *, unsigned long, struct sk_buff *);
29static int afs_deliver_cm_op_id(struct afs_call *, struct sk_buff *, bool);
30
31/* synchronous call management */
32const struct afs_wait_mode afs_sync_call = {
33 .rx_wakeup = afs_wake_up_call_waiter,
34 .wait = afs_wait_for_call_to_complete,
35};
36
37/* asynchronous call management */
38const struct afs_wait_mode afs_async_call = {
39 .rx_wakeup = afs_wake_up_async_call,
40 .wait = afs_dont_wait_for_call_to_complete,
41};
42
43/* asynchronous incoming call management */
44static const struct afs_wait_mode afs_async_incoming_call = {
45 .rx_wakeup = afs_wake_up_async_call,
46};
47
48/* asynchronous incoming call initial processing */
49static const struct afs_call_type afs_RXCMxxxx = {
50 .name = "CB.xxxx",
51 .deliver = afs_deliver_cm_op_id,
52 .abort_to_error = afs_abort_to_error,
53};
54
55static void afs_collect_incoming_call(struct work_struct *);
56
57static struct sk_buff_head afs_incoming_calls;
58static DECLARE_WORK(afs_collect_incoming_call_work, afs_collect_incoming_call);
59
60/*
61 * open an RxRPC socket and bind it to be a server for callback notifications
62 * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT
63 */
64int afs_open_socket(void)
65{
66 struct sockaddr_rxrpc srx;
67 struct socket *socket;
68 int ret;
69
70 _enter("");
71
72 skb_queue_head_init(&afs_incoming_calls);
73
74 afs_async_calls = create_singlethread_workqueue("kafsd");
75 if (!afs_async_calls) {
76 _leave(" = -ENOMEM [wq]");
77 return -ENOMEM;
78 }
79
80 ret = sock_create_kern(AF_RXRPC, SOCK_DGRAM, PF_INET, &socket);
81 if (ret < 0) {
82 destroy_workqueue(afs_async_calls);
83 _leave(" = %d [socket]", ret);
84 return ret;
85 }
86
87 socket->sk->sk_allocation = GFP_NOFS;
88
89 /* bind the callback manager's address to make this a server socket */
90 srx.srx_family = AF_RXRPC;
91 srx.srx_service = CM_SERVICE;
92 srx.transport_type = SOCK_DGRAM;
93 srx.transport_len = sizeof(srx.transport.sin);
94 srx.transport.sin.sin_family = AF_INET;
95 srx.transport.sin.sin_port = htons(AFS_CM_PORT);
96 memset(&srx.transport.sin.sin_addr, 0,
97 sizeof(srx.transport.sin.sin_addr));
98
99 ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx));
100 if (ret < 0) {
101 sock_release(socket);
102 _leave(" = %d [bind]", ret);
103 return ret;
104 }
105
106 rxrpc_kernel_intercept_rx_messages(socket, afs_rx_interceptor);
107
108 afs_socket = socket;
109 _leave(" = 0");
110 return 0;
111}
112
113/*
114 * close the RxRPC socket AFS was using
115 */
116void afs_close_socket(void)
117{
118 _enter("");
119
120 sock_release(afs_socket);
121
122 _debug("dework");
123 destroy_workqueue(afs_async_calls);
124
125 ASSERTCMP(atomic_read(&afs_outstanding_skbs), ==, 0);
126 ASSERTCMP(atomic_read(&afs_outstanding_calls), ==, 0);
127 _leave("");
128}
129
130/*
131 * note that the data in a socket buffer is now delivered and that the buffer
132 * should be freed
133 */
134static void afs_data_delivered(struct sk_buff *skb)
135{
136 if (!skb) {
137 _debug("DLVR NULL [%d]", atomic_read(&afs_outstanding_skbs));
138 dump_stack();
139 } else {
140 _debug("DLVR %p{%u} [%d]",
141 skb, skb->mark, atomic_read(&afs_outstanding_skbs));
142 if (atomic_dec_return(&afs_outstanding_skbs) == -1)
143 BUG();
144 rxrpc_kernel_data_delivered(skb);
145 }
146}
147
148/*
149 * free a socket buffer
150 */
151static void afs_free_skb(struct sk_buff *skb)
152{
153 if (!skb) {
154 _debug("FREE NULL [%d]", atomic_read(&afs_outstanding_skbs));
155 dump_stack();
156 } else {
157 _debug("FREE %p{%u} [%d]",
158 skb, skb->mark, atomic_read(&afs_outstanding_skbs));
159 if (atomic_dec_return(&afs_outstanding_skbs) == -1)
160 BUG();
161 rxrpc_kernel_free_skb(skb);
162 }
163}
164
165/*
166 * free a call
167 */
168static void afs_free_call(struct afs_call *call)
169{
170 _debug("DONE %p{%s} [%d]",
171 call, call->type->name, atomic_read(&afs_outstanding_calls));
172 if (atomic_dec_return(&afs_outstanding_calls) == -1)
173 BUG();
174
175 ASSERTCMP(call->rxcall, ==, NULL);
176 ASSERT(!work_pending(&call->async_work));
177 ASSERT(skb_queue_empty(&call->rx_queue));
178 ASSERT(call->type->name != NULL);
179
180 kfree(call->request);
181 kfree(call);
182}
183
184/*
185 * allocate a call with flat request and reply buffers
186 */
187struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type,
188 size_t request_size, size_t reply_size)
189{
190 struct afs_call *call;
191
192 call = kzalloc(sizeof(*call), GFP_NOFS);
193 if (!call)
194 goto nomem_call;
195
196 _debug("CALL %p{%s} [%d]",
197 call, type->name, atomic_read(&afs_outstanding_calls));
198 atomic_inc(&afs_outstanding_calls);
199
200 call->type = type;
201 call->request_size = request_size;
202 call->reply_max = reply_size;
203
204 if (request_size) {
205 call->request = kmalloc(request_size, GFP_NOFS);
206 if (!call->request)
207 goto nomem_free;
208 }
209
210 if (reply_size) {
211 call->buffer = kmalloc(reply_size, GFP_NOFS);
212 if (!call->buffer)
213 goto nomem_free;
214 }
215
216 init_waitqueue_head(&call->waitq);
217 skb_queue_head_init(&call->rx_queue);
218 return call;
219
220nomem_free:
221 afs_free_call(call);
222nomem_call:
223 return NULL;
224}
225
226/*
227 * clean up a call with flat buffer
228 */
229void afs_flat_call_destructor(struct afs_call *call)
230{
231 _enter("");
232
233 kfree(call->request);
234 call->request = NULL;
235 kfree(call->buffer);
236 call->buffer = NULL;
237}
238
239/*
240 * initiate a call
241 */
242int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
243 const struct afs_wait_mode *wait_mode)
244{
245 struct sockaddr_rxrpc srx;
246 struct rxrpc_call *rxcall;
247 struct msghdr msg;
248 struct kvec iov[1];
249 int ret;
250
251 _enter("%x,{%d},", addr->s_addr, ntohs(call->port));
252
253 ASSERT(call->type != NULL);
254 ASSERT(call->type->name != NULL);
255
256 _debug("MAKE %p{%s} [%d]",
257 call, call->type->name, atomic_read(&afs_outstanding_calls));
258
259 call->wait_mode = wait_mode;
260 INIT_WORK(&call->async_work, afs_process_async_call);
261
262 memset(&srx, 0, sizeof(srx));
263 srx.srx_family = AF_RXRPC;
264 srx.srx_service = call->service_id;
265 srx.transport_type = SOCK_DGRAM;
266 srx.transport_len = sizeof(srx.transport.sin);
267 srx.transport.sin.sin_family = AF_INET;
268 srx.transport.sin.sin_port = call->port;
269 memcpy(&srx.transport.sin.sin_addr, addr, 4);
270
271 /* create a call */
272 rxcall = rxrpc_kernel_begin_call(afs_socket, &srx, call->key,
273 (unsigned long) call, gfp);
274 call->key = NULL;
275 if (IS_ERR(rxcall)) {
276 ret = PTR_ERR(rxcall);
277 goto error_kill_call;
278 }
279
280 call->rxcall = rxcall;
281
282 /* send the request */
283 iov[0].iov_base = call->request;
284 iov[0].iov_len = call->request_size;
285
286 msg.msg_name = NULL;
287 msg.msg_namelen = 0;
288 msg.msg_iov = (struct iovec *) iov;
289 msg.msg_iovlen = 1;
290 msg.msg_control = NULL;
291 msg.msg_controllen = 0;
292 msg.msg_flags = 0;
293
294 /* have to change the state *before* sending the last packet as RxRPC
295 * might give us the reply before it returns from sending the
296 * request */
297 call->state = AFS_CALL_AWAIT_REPLY;
298 ret = rxrpc_kernel_send_data(rxcall, &msg, call->request_size);
299 if (ret < 0)
300 goto error_do_abort;
301
302 /* at this point, an async call may no longer exist as it may have
303 * already completed */
304 return wait_mode->wait(call);
305
306error_do_abort:
307 rxrpc_kernel_abort_call(rxcall, RX_USER_ABORT);
308 rxrpc_kernel_end_call(rxcall);
309 call->rxcall = NULL;
310error_kill_call:
311 call->type->destructor(call);
312 afs_free_call(call);
313 _leave(" = %d", ret);
314 return ret;
315}
316
317/*
318 * handles intercepted messages that were arriving in the socket's Rx queue
319 * - called with the socket receive queue lock held to ensure message ordering
320 * - called with softirqs disabled
321 */
322static void afs_rx_interceptor(struct sock *sk, unsigned long user_call_ID,
323 struct sk_buff *skb)
324{
325 struct afs_call *call = (struct afs_call *) user_call_ID;
326
327 _enter("%p,,%u", call, skb->mark);
328
329 _debug("ICPT %p{%u} [%d]",
330 skb, skb->mark, atomic_read(&afs_outstanding_skbs));
331
332 ASSERTCMP(sk, ==, afs_socket->sk);
333 atomic_inc(&afs_outstanding_skbs);
334
335 if (!call) {
336 /* its an incoming call for our callback service */
337 skb_queue_tail(&afs_incoming_calls, skb);
338 schedule_work(&afs_collect_incoming_call_work);
339 } else {
340 /* route the messages directly to the appropriate call */
341 skb_queue_tail(&call->rx_queue, skb);
342 call->wait_mode->rx_wakeup(call);
343 }
344
345 _leave("");
346}
347
348/*
349 * deliver messages to a call
350 */
351static void afs_deliver_to_call(struct afs_call *call)
352{
353 struct sk_buff *skb;
354 bool last;
355 u32 abort_code;
356 int ret;
357
358 _enter("");
359
360 while ((call->state == AFS_CALL_AWAIT_REPLY ||
361 call->state == AFS_CALL_AWAIT_OP_ID ||
362 call->state == AFS_CALL_AWAIT_REQUEST ||
363 call->state == AFS_CALL_AWAIT_ACK) &&
364 (skb = skb_dequeue(&call->rx_queue))) {
365 switch (skb->mark) {
366 case RXRPC_SKB_MARK_DATA:
367 _debug("Rcv DATA");
368 last = rxrpc_kernel_is_data_last(skb);
369 ret = call->type->deliver(call, skb, last);
370 switch (ret) {
371 case 0:
372 if (last &&
373 call->state == AFS_CALL_AWAIT_REPLY)
374 call->state = AFS_CALL_COMPLETE;
375 break;
376 case -ENOTCONN:
377 abort_code = RX_CALL_DEAD;
378 goto do_abort;
379 case -ENOTSUPP:
380 abort_code = RX_INVALID_OPERATION;
381 goto do_abort;
382 default:
383 abort_code = RXGEN_CC_UNMARSHAL;
384 if (call->state != AFS_CALL_AWAIT_REPLY)
385 abort_code = RXGEN_SS_UNMARSHAL;
386 do_abort:
387 rxrpc_kernel_abort_call(call->rxcall,
388 abort_code);
389 call->error = ret;
390 call->state = AFS_CALL_ERROR;
391 break;
392 }
393 afs_data_delivered(skb);
394 skb = NULL;
395 continue;
396 case RXRPC_SKB_MARK_FINAL_ACK:
397 _debug("Rcv ACK");
398 call->state = AFS_CALL_COMPLETE;
399 break;
400 case RXRPC_SKB_MARK_BUSY:
401 _debug("Rcv BUSY");
402 call->error = -EBUSY;
403 call->state = AFS_CALL_BUSY;
404 break;
405 case RXRPC_SKB_MARK_REMOTE_ABORT:
406 abort_code = rxrpc_kernel_get_abort_code(skb);
407 call->error = call->type->abort_to_error(abort_code);
408 call->state = AFS_CALL_ABORTED;
409 _debug("Rcv ABORT %u -> %d", abort_code, call->error);
410 break;
411 case RXRPC_SKB_MARK_NET_ERROR:
412 call->error = -rxrpc_kernel_get_error_number(skb);
413 call->state = AFS_CALL_ERROR;
414 _debug("Rcv NET ERROR %d", call->error);
415 break;
416 case RXRPC_SKB_MARK_LOCAL_ERROR:
417 call->error = -rxrpc_kernel_get_error_number(skb);
418 call->state = AFS_CALL_ERROR;
419 _debug("Rcv LOCAL ERROR %d", call->error);
420 break;
421 default:
422 BUG();
423 break;
424 }
425
426 afs_free_skb(skb);
427 }
428
429 /* make sure the queue is empty if the call is done with (we might have
430 * aborted the call early because of an unmarshalling error) */
431 if (call->state >= AFS_CALL_COMPLETE) {
432 while ((skb = skb_dequeue(&call->rx_queue)))
433 afs_free_skb(skb);
434 if (call->incoming) {
435 rxrpc_kernel_end_call(call->rxcall);
436 call->rxcall = NULL;
437 call->type->destructor(call);
438 afs_free_call(call);
439 }
440 }
441
442 _leave("");
443}
444
445/*
446 * wait synchronously for a call to complete
447 */
448static int afs_wait_for_call_to_complete(struct afs_call *call)
449{
450 struct sk_buff *skb;
451 int ret;
452
453 DECLARE_WAITQUEUE(myself, current);
454
455 _enter("");
456
457 add_wait_queue(&call->waitq, &myself);
458 for (;;) {
459 set_current_state(TASK_INTERRUPTIBLE);
460
461 /* deliver any messages that are in the queue */
462 if (!skb_queue_empty(&call->rx_queue)) {
463 __set_current_state(TASK_RUNNING);
464 afs_deliver_to_call(call);
465 continue;
466 }
467
468 ret = call->error;
469 if (call->state >= AFS_CALL_COMPLETE)
470 break;
471 ret = -EINTR;
472 if (signal_pending(current))
473 break;
474 schedule();
475 }
476
477 remove_wait_queue(&call->waitq, &myself);
478 __set_current_state(TASK_RUNNING);
479
480 /* kill the call */
481 if (call->state < AFS_CALL_COMPLETE) {
482 _debug("call incomplete");
483 rxrpc_kernel_abort_call(call->rxcall, RX_CALL_DEAD);
484 while ((skb = skb_dequeue(&call->rx_queue)))
485 afs_free_skb(skb);
486 }
487
488 _debug("call complete");
489 rxrpc_kernel_end_call(call->rxcall);
490 call->rxcall = NULL;
491 call->type->destructor(call);
492 afs_free_call(call);
493 _leave(" = %d", ret);
494 return ret;
495}
496
497/*
498 * wake up a waiting call
499 */
500static void afs_wake_up_call_waiter(struct afs_call *call)
501{
502 wake_up(&call->waitq);
503}
504
505/*
506 * wake up an asynchronous call
507 */
508static void afs_wake_up_async_call(struct afs_call *call)
509{
510 _enter("");
511 queue_work(afs_async_calls, &call->async_work);
512}
513
514/*
515 * put a call into asynchronous mode
516 * - mustn't touch the call descriptor as the call my have completed by the
517 * time we get here
518 */
519static int afs_dont_wait_for_call_to_complete(struct afs_call *call)
520{
521 _enter("");
522 return -EINPROGRESS;
523}
524
525/*
526 * delete an asynchronous call
527 */
528static void afs_delete_async_call(struct work_struct *work)
529{
530 struct afs_call *call =
531 container_of(work, struct afs_call, async_work);
532
533 _enter("");
534
535 afs_free_call(call);
536
537 _leave("");
538}
539
540/*
541 * perform processing on an asynchronous call
542 * - on a multiple-thread workqueue this work item may try to run on several
543 * CPUs at the same time
544 */
545static void afs_process_async_call(struct work_struct *work)
546{
547 struct afs_call *call =
548 container_of(work, struct afs_call, async_work);
549
550 _enter("");
551
552 if (!skb_queue_empty(&call->rx_queue))
553 afs_deliver_to_call(call);
554
555 if (call->state >= AFS_CALL_COMPLETE && call->wait_mode) {
556 if (call->wait_mode->async_complete)
557 call->wait_mode->async_complete(call->reply,
558 call->error);
559 call->reply = NULL;
560
561 /* kill the call */
562 rxrpc_kernel_end_call(call->rxcall);
563 call->rxcall = NULL;
564 if (call->type->destructor)
565 call->type->destructor(call);
566
567 /* we can't just delete the call because the work item may be
568 * queued */
569 PREPARE_WORK(&call->async_work, afs_delete_async_call);
570 queue_work(afs_async_calls, &call->async_work);
571 }
572
573 _leave("");
574}
575
576/*
577 * empty a socket buffer into a flat reply buffer
578 */
579void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb)
580{
581 size_t len = skb->len;
582
583 if (skb_copy_bits(skb, 0, call->buffer + call->reply_size, len) < 0)
584 BUG();
585 call->reply_size += len;
586}
587
588/*
589 * accept the backlog of incoming calls
590 */
591static void afs_collect_incoming_call(struct work_struct *work)
592{
593 struct rxrpc_call *rxcall;
594 struct afs_call *call = NULL;
595 struct sk_buff *skb;
596
597 while ((skb = skb_dequeue(&afs_incoming_calls))) {
598 _debug("new call");
599
600 /* don't need the notification */
601 afs_free_skb(skb);
602
603 if (!call) {
604 call = kzalloc(sizeof(struct afs_call), GFP_KERNEL);
605 if (!call) {
606 rxrpc_kernel_reject_call(afs_socket);
607 return;
608 }
609
610 INIT_WORK(&call->async_work, afs_process_async_call);
611 call->wait_mode = &afs_async_incoming_call;
612 call->type = &afs_RXCMxxxx;
613 init_waitqueue_head(&call->waitq);
614 skb_queue_head_init(&call->rx_queue);
615 call->state = AFS_CALL_AWAIT_OP_ID;
616
617 _debug("CALL %p{%s} [%d]",
618 call, call->type->name,
619 atomic_read(&afs_outstanding_calls));
620 atomic_inc(&afs_outstanding_calls);
621 }
622
623 rxcall = rxrpc_kernel_accept_call(afs_socket,
624 (unsigned long) call);
625 if (!IS_ERR(rxcall)) {
626 call->rxcall = rxcall;
627 call = NULL;
628 }
629 }
630
631 if (call)
632 afs_free_call(call);
633}
634
635/*
636 * grab the operation ID from an incoming cache manager call
637 */
638static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb,
639 bool last)
640{
641 size_t len = skb->len;
642 void *oibuf = (void *) &call->operation_ID;
643
644 _enter("{%u},{%zu},%d", call->offset, len, last);
645
646 ASSERTCMP(call->offset, <, 4);
647
648 /* the operation ID forms the first four bytes of the request data */
649 len = min_t(size_t, len, 4 - call->offset);
650 if (skb_copy_bits(skb, 0, oibuf + call->offset, len) < 0)
651 BUG();
652 if (!pskb_pull(skb, len))
653 BUG();
654 call->offset += len;
655
656 if (call->offset < 4) {
657 if (last) {
658 _leave(" = -EBADMSG [op ID short]");
659 return -EBADMSG;
660 }
661 _leave(" = 0 [incomplete]");
662 return 0;
663 }
664
665 call->state = AFS_CALL_AWAIT_REQUEST;
666
667 /* ask the cache manager to route the call (it'll change the call type
668 * if successful) */
669 if (!afs_cm_incoming_call(call))
670 return -ENOTSUPP;
671
672 /* pass responsibility for the remainer of this message off to the
673 * cache manager op */
674 return call->type->deliver(call, skb, last);
675}
676
677/*
678 * send an empty reply
679 */
680void afs_send_empty_reply(struct afs_call *call)
681{
682 struct msghdr msg;
683 struct iovec iov[1];
684
685 _enter("");
686
687 iov[0].iov_base = NULL;
688 iov[0].iov_len = 0;
689 msg.msg_name = NULL;
690 msg.msg_namelen = 0;
691 msg.msg_iov = iov;
692 msg.msg_iovlen = 0;
693 msg.msg_control = NULL;
694 msg.msg_controllen = 0;
695 msg.msg_flags = 0;
696
697 call->state = AFS_CALL_AWAIT_ACK;
698 switch (rxrpc_kernel_send_data(call->rxcall, &msg, 0)) {
699 case 0:
700 _leave(" [replied]");
701 return;
702
703 case -ENOMEM:
704 _debug("oom");
705 rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
706 default:
707 rxrpc_kernel_end_call(call->rxcall);
708 call->rxcall = NULL;
709 call->type->destructor(call);
710 afs_free_call(call);
711 _leave(" [error]");
712 return;
713 }
714}
715
716/*
717 * send a simple reply
718 */
719void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
720{
721 struct msghdr msg;
722 struct iovec iov[1];
723
724 _enter("");
725
726 iov[0].iov_base = (void *) buf;
727 iov[0].iov_len = len;
728 msg.msg_name = NULL;
729 msg.msg_namelen = 0;
730 msg.msg_iov = iov;
731 msg.msg_iovlen = 1;
732 msg.msg_control = NULL;
733 msg.msg_controllen = 0;
734 msg.msg_flags = 0;
735
736 call->state = AFS_CALL_AWAIT_ACK;
737 switch (rxrpc_kernel_send_data(call->rxcall, &msg, len)) {
738 case 0:
739 _leave(" [replied]");
740 return;
741
742 case -ENOMEM:
743 _debug("oom");
744 rxrpc_kernel_abort_call(call->rxcall, RX_USER_ABORT);
745 default:
746 rxrpc_kernel_end_call(call->rxcall);
747 call->rxcall = NULL;
748 call->type->destructor(call);
749 afs_free_call(call);
750 _leave(" [error]");
751 return;
752 }
753}
754
755/*
756 * extract a piece of data from the received data socket buffers
757 */
758int afs_extract_data(struct afs_call *call, struct sk_buff *skb,
759 bool last, void *buf, size_t count)
760{
761 size_t len = skb->len;
762
763 _enter("{%u},{%zu},%d,,%zu", call->offset, len, last, count);
764
765 ASSERTCMP(call->offset, <, count);
766
767 len = min_t(size_t, len, count - call->offset);
768 if (skb_copy_bits(skb, 0, buf + call->offset, len) < 0 ||
769 !pskb_pull(skb, len))
770 BUG();
771 call->offset += len;
772
773 if (call->offset < count) {
774 if (last) {
775 _leave(" = -EBADMSG [%d < %lu]", call->offset, count);
776 return -EBADMSG;
777 }
778 _leave(" = -EAGAIN");
779 return -EAGAIN;
780 }
781 return 0;
782}
diff --git a/fs/afs/security.c b/fs/afs/security.c
new file mode 100644
index 000000000000..f9f424d80458
--- /dev/null
+++ b/fs/afs/security.c
@@ -0,0 +1,356 @@
1/* AFS security handling
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/init.h>
13#include <linux/slab.h>
14#include <linux/fs.h>
15#include <linux/ctype.h>
16#include <keys/rxrpc-type.h>
17#include "internal.h"
18
19/*
20 * get a key
21 */
22struct key *afs_request_key(struct afs_cell *cell)
23{
24 struct key *key;
25
26 _enter("{%x}", key_serial(cell->anonymous_key));
27
28 _debug("key %s", cell->anonymous_key->description);
29 key = request_key(&key_type_rxrpc, cell->anonymous_key->description,
30 NULL);
31 if (IS_ERR(key)) {
32 if (PTR_ERR(key) != -ENOKEY) {
33 _leave(" = %ld", PTR_ERR(key));
34 return key;
35 }
36
37 /* act as anonymous user */
38 _leave(" = {%x} [anon]", key_serial(cell->anonymous_key));
39 return key_get(cell->anonymous_key);
40 } else {
41 /* act as authorised user */
42 _leave(" = {%x} [auth]", key_serial(key));
43 return key;
44 }
45}
46
47/*
48 * dispose of a permits list
49 */
50void afs_zap_permits(struct rcu_head *rcu)
51{
52 struct afs_permits *permits =
53 container_of(rcu, struct afs_permits, rcu);
54 int loop;
55
56 _enter("{%d}", permits->count);
57
58 for (loop = permits->count - 1; loop >= 0; loop--)
59 key_put(permits->permits[loop].key);
60 kfree(permits);
61}
62
63/*
64 * dispose of a permits list in which all the key pointers have been copied
65 */
66static void afs_dispose_of_permits(struct rcu_head *rcu)
67{
68 struct afs_permits *permits =
69 container_of(rcu, struct afs_permits, rcu);
70
71 _enter("{%d}", permits->count);
72
73 kfree(permits);
74}
75
76/*
77 * get the authorising vnode - this is the specified inode itself if it's a
78 * directory or it's the parent directory if the specified inode is a file or
79 * symlink
80 * - the caller must release the ref on the inode
81 */
82static struct afs_vnode *afs_get_auth_inode(struct afs_vnode *vnode,
83 struct key *key)
84{
85 struct afs_vnode *auth_vnode;
86 struct inode *auth_inode;
87
88 _enter("");
89
90 if (S_ISDIR(vnode->vfs_inode.i_mode)) {
91 auth_inode = igrab(&vnode->vfs_inode);
92 ASSERT(auth_inode != NULL);
93 } else {
94 auth_inode = afs_iget(vnode->vfs_inode.i_sb, key,
95 &vnode->status.parent, NULL, NULL);
96 if (IS_ERR(auth_inode))
97 return ERR_PTR(PTR_ERR(auth_inode));
98 }
99
100 auth_vnode = AFS_FS_I(auth_inode);
101 _leave(" = {%x}", auth_vnode->fid.vnode);
102 return auth_vnode;
103}
104
105/*
106 * clear the permit cache on a directory vnode
107 */
108void afs_clear_permits(struct afs_vnode *vnode)
109{
110 struct afs_permits *permits;
111
112 _enter("{%x}", vnode->fid.vnode);
113
114 mutex_lock(&vnode->permits_lock);
115 permits = vnode->permits;
116 rcu_assign_pointer(vnode->permits, NULL);
117 mutex_unlock(&vnode->permits_lock);
118
119 if (permits)
120 call_rcu(&permits->rcu, afs_zap_permits);
121 _leave("");
122}
123
124/*
125 * add the result obtained for a vnode to its or its parent directory's cache
126 * for the key used to access it
127 */
128void afs_cache_permit(struct afs_vnode *vnode, struct key *key, long acl_order)
129{
130 struct afs_permits *permits, *xpermits;
131 struct afs_permit *permit;
132 struct afs_vnode *auth_vnode;
133 int count, loop;
134
135 _enter("{%x},%x,%lx", vnode->fid.vnode, key_serial(key), acl_order);
136
137 auth_vnode = afs_get_auth_inode(vnode, key);
138 if (IS_ERR(auth_vnode)) {
139 _leave(" [get error %ld]", PTR_ERR(auth_vnode));
140 return;
141 }
142
143 mutex_lock(&auth_vnode->permits_lock);
144
145 /* guard against a rename being detected whilst we waited for the
146 * lock */
147 if (memcmp(&auth_vnode->fid, &vnode->status.parent,
148 sizeof(struct afs_fid)) != 0) {
149 _debug("renamed");
150 goto out_unlock;
151 }
152
153 /* have to be careful as the directory's callback may be broken between
154 * us receiving the status we're trying to cache and us getting the
155 * lock to update the cache for the status */
156 if (auth_vnode->acl_order - acl_order > 0) {
157 _debug("ACL changed?");
158 goto out_unlock;
159 }
160
161 /* always update the anonymous mask */
162 _debug("anon access %x", vnode->status.anon_access);
163 auth_vnode->status.anon_access = vnode->status.anon_access;
164 if (key == vnode->volume->cell->anonymous_key)
165 goto out_unlock;
166
167 xpermits = auth_vnode->permits;
168 count = 0;
169 if (xpermits) {
170 /* see if the permit is already in the list
171 * - if it is then we just amend the list
172 */
173 count = xpermits->count;
174 permit = xpermits->permits;
175 for (loop = count; loop > 0; loop--) {
176 if (permit->key == key) {
177 permit->access_mask =
178 vnode->status.caller_access;
179 goto out_unlock;
180 }
181 permit++;
182 }
183 }
184
185 permits = kmalloc(sizeof(*permits) + sizeof(*permit) * (count + 1),
186 GFP_NOFS);
187 if (!permits)
188 goto out_unlock;
189
190 memcpy(permits->permits, xpermits->permits,
191 count * sizeof(struct afs_permit));
192
193 _debug("key %x access %x",
194 key_serial(key), vnode->status.caller_access);
195 permits->permits[count].access_mask = vnode->status.caller_access;
196 permits->permits[count].key = key_get(key);
197 permits->count = count + 1;
198
199 rcu_assign_pointer(auth_vnode->permits, permits);
200 if (xpermits)
201 call_rcu(&xpermits->rcu, afs_dispose_of_permits);
202
203out_unlock:
204 mutex_unlock(&auth_vnode->permits_lock);
205 iput(&auth_vnode->vfs_inode);
206 _leave("");
207}
208
209/*
210 * check with the fileserver to see if the directory or parent directory is
211 * permitted to be accessed with this authorisation, and if so, what access it
212 * is granted
213 */
214static int afs_check_permit(struct afs_vnode *vnode, struct key *key,
215 afs_access_t *_access)
216{
217 struct afs_permits *permits;
218 struct afs_permit *permit;
219 struct afs_vnode *auth_vnode;
220 bool valid;
221 int loop, ret;
222
223 _enter("");
224
225 auth_vnode = afs_get_auth_inode(vnode, key);
226 if (IS_ERR(auth_vnode)) {
227 *_access = 0;
228 _leave(" = %ld", PTR_ERR(auth_vnode));
229 return PTR_ERR(auth_vnode);
230 }
231
232 ASSERT(S_ISDIR(auth_vnode->vfs_inode.i_mode));
233
234 /* check the permits to see if we've got one yet */
235 if (key == auth_vnode->volume->cell->anonymous_key) {
236 _debug("anon");
237 *_access = auth_vnode->status.anon_access;
238 valid = true;
239 } else {
240 valid = false;
241 rcu_read_lock();
242 permits = rcu_dereference(auth_vnode->permits);
243 if (permits) {
244 permit = permits->permits;
245 for (loop = permits->count; loop > 0; loop--) {
246 if (permit->key == key) {
247 _debug("found in cache");
248 *_access = permit->access_mask;
249 valid = true;
250 break;
251 }
252 permit++;
253 }
254 }
255 rcu_read_unlock();
256 }
257
258 if (!valid) {
259 /* check the status on the file we're actually interested in
260 * (the post-processing will cache the result on auth_vnode) */
261 _debug("no valid permit");
262
263 set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
264 ret = afs_vnode_fetch_status(vnode, auth_vnode, key);
265 if (ret < 0) {
266 iput(&auth_vnode->vfs_inode);
267 *_access = 0;
268 _leave(" = %d", ret);
269 return ret;
270 }
271 }
272
273 *_access = vnode->status.caller_access;
274 iput(&auth_vnode->vfs_inode);
275 _leave(" = 0 [access %x]", *_access);
276 return 0;
277}
278
279/*
280 * check the permissions on an AFS file
281 * - AFS ACLs are attached to directories only, and a file is controlled by its
282 * parent directory's ACL
283 */
284int afs_permission(struct inode *inode, int mask, struct nameidata *nd)
285{
286 struct afs_vnode *vnode = AFS_FS_I(inode);
287 afs_access_t access;
288 struct key *key;
289 int ret;
290
291 _enter("{{%x:%x},%lx},%x,",
292 vnode->fid.vid, vnode->fid.vnode, vnode->flags, mask);
293
294 key = afs_request_key(vnode->volume->cell);
295 if (IS_ERR(key)) {
296 _leave(" = %ld [key]", PTR_ERR(key));
297 return PTR_ERR(key);
298 }
299
300 /* if the promise has expired, we need to check the server again */
301 if (!vnode->cb_promised) {
302 _debug("not promised");
303 ret = afs_vnode_fetch_status(vnode, NULL, key);
304 if (ret < 0)
305 goto error;
306 _debug("new promise [fl=%lx]", vnode->flags);
307 }
308
309 /* check the permits to see if we've got one yet */
310 ret = afs_check_permit(vnode, key, &access);
311 if (ret < 0)
312 goto error;
313
314 /* interpret the access mask */
315 _debug("REQ %x ACC %x on %s",
316 mask, access, S_ISDIR(inode->i_mode) ? "dir" : "file");
317
318 if (S_ISDIR(inode->i_mode)) {
319 if (mask & MAY_EXEC) {
320 if (!(access & AFS_ACE_LOOKUP))
321 goto permission_denied;
322 } else if (mask & MAY_READ) {
323 if (!(access & AFS_ACE_READ))
324 goto permission_denied;
325 } else if (mask & MAY_WRITE) {
326 if (!(access & (AFS_ACE_DELETE | /* rmdir, unlink, rename from */
327 AFS_ACE_INSERT | /* create, mkdir, symlink, rename to */
328 AFS_ACE_WRITE))) /* chmod */
329 goto permission_denied;
330 } else {
331 BUG();
332 }
333 } else {
334 if (!(access & AFS_ACE_LOOKUP))
335 goto permission_denied;
336 if (mask & (MAY_EXEC | MAY_READ)) {
337 if (!(access & AFS_ACE_READ))
338 goto permission_denied;
339 } else if (mask & MAY_WRITE) {
340 if (!(access & AFS_ACE_WRITE))
341 goto permission_denied;
342 }
343 }
344
345 key_put(key);
346 ret = generic_permission(inode, mask, NULL);
347 _leave(" = %d", ret);
348 return ret;
349
350permission_denied:
351 ret = -EACCES;
352error:
353 key_put(key);
354 _leave(" = %d", ret);
355 return ret;
356}
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 44aff81dc6a7..96bb23b476a2 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -1,6 +1,6 @@
1/* server.c: AFS server record management 1/* AFS server record management
2 * 2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
@@ -11,489 +11,314 @@
11 11
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <rxrpc/peer.h>
15#include <rxrpc/connection.h>
16#include "volume.h"
17#include "cell.h"
18#include "server.h"
19#include "transport.h"
20#include "vlclient.h"
21#include "kafstimod.h"
22#include "internal.h" 14#include "internal.h"
23 15
24DEFINE_SPINLOCK(afs_server_peer_lock); 16unsigned afs_server_timeout = 10; /* server timeout in seconds */
25 17
26#define FS_SERVICE_ID 1 /* AFS Volume Location Service ID */ 18static void afs_reap_server(struct work_struct *);
27#define VL_SERVICE_ID 52 /* AFS Volume Location Service ID */
28 19
29static void __afs_server_timeout(struct afs_timer *timer) 20/* tree of all the servers, indexed by IP address */
21static struct rb_root afs_servers = RB_ROOT;
22static DEFINE_RWLOCK(afs_servers_lock);
23
24/* LRU list of all the servers not currently in use */
25static LIST_HEAD(afs_server_graveyard);
26static DEFINE_SPINLOCK(afs_server_graveyard_lock);
27static DECLARE_DELAYED_WORK(afs_server_reaper, afs_reap_server);
28
29/*
30 * install a server record in the master tree
31 */
32static int afs_install_server(struct afs_server *server)
30{ 33{
31 struct afs_server *server = 34 struct afs_server *xserver;
32 list_entry(timer, struct afs_server, timeout); 35 struct rb_node **pp, *p;
36 int ret;
33 37
34 _debug("SERVER TIMEOUT [%p{u=%d}]", 38 _enter("%p", server);
35 server, atomic_read(&server->usage));
36 39
37 afs_server_do_timeout(server); 40 write_lock(&afs_servers_lock);
38} 41
42 ret = -EEXIST;
43 pp = &afs_servers.rb_node;
44 p = NULL;
45 while (*pp) {
46 p = *pp;
47 _debug("- consider %p", p);
48 xserver = rb_entry(p, struct afs_server, master_rb);
49 if (server->addr.s_addr < xserver->addr.s_addr)
50 pp = &(*pp)->rb_left;
51 else if (server->addr.s_addr > xserver->addr.s_addr)
52 pp = &(*pp)->rb_right;
53 else
54 goto error;
55 }
39 56
40static const struct afs_timer_ops afs_server_timer_ops = { 57 rb_link_node(&server->master_rb, p, pp);
41 .timed_out = __afs_server_timeout, 58 rb_insert_color(&server->master_rb, &afs_servers);
42}; 59 ret = 0;
60
61error:
62 write_unlock(&afs_servers_lock);
63 return ret;
64}
43 65
44/*****************************************************************************/
45/* 66/*
46 * lookup a server record in a cell 67 * allocate a new server record
47 * - TODO: search the cell's server list
48 */ 68 */
49int afs_server_lookup(struct afs_cell *cell, const struct in_addr *addr, 69static struct afs_server *afs_alloc_server(struct afs_cell *cell,
50 struct afs_server **_server) 70 const struct in_addr *addr)
51{ 71{
52 struct afs_server *server, *active, *zombie; 72 struct afs_server *server;
53 int loop;
54 73
55 _enter("%p,%08x,", cell, ntohl(addr->s_addr)); 74 _enter("");
56 75
57 /* allocate and initialise a server record */
58 server = kzalloc(sizeof(struct afs_server), GFP_KERNEL); 76 server = kzalloc(sizeof(struct afs_server), GFP_KERNEL);
59 if (!server) { 77 if (server) {
60 _leave(" = -ENOMEM"); 78 atomic_set(&server->usage, 1);
61 return -ENOMEM; 79 server->cell = cell;
80
81 INIT_LIST_HEAD(&server->link);
82 INIT_LIST_HEAD(&server->grave);
83 init_rwsem(&server->sem);
84 spin_lock_init(&server->fs_lock);
85 server->fs_vnodes = RB_ROOT;
86 server->cb_promises = RB_ROOT;
87 spin_lock_init(&server->cb_lock);
88 init_waitqueue_head(&server->cb_break_waitq);
89 INIT_DELAYED_WORK(&server->cb_break_work,
90 afs_dispatch_give_up_callbacks);
91
92 memcpy(&server->addr, addr, sizeof(struct in_addr));
93 server->addr.s_addr = addr->s_addr;
62 } 94 }
63 95
64 atomic_set(&server->usage, 1); 96 _leave(" = %p{%d}", server, atomic_read(&server->usage));
65 97 return server;
66 INIT_LIST_HEAD(&server->link); 98}
67 init_rwsem(&server->sem);
68 INIT_LIST_HEAD(&server->fs_callq);
69 spin_lock_init(&server->fs_lock);
70 INIT_LIST_HEAD(&server->cb_promises);
71 spin_lock_init(&server->cb_lock);
72
73 for (loop = 0; loop < AFS_SERVER_CONN_LIST_SIZE; loop++)
74 server->fs_conn_cnt[loop] = 4;
75 99
76 memcpy(&server->addr, addr, sizeof(struct in_addr)); 100/*
77 server->addr.s_addr = addr->s_addr; 101 * get an FS-server record for a cell
102 */
103struct afs_server *afs_lookup_server(struct afs_cell *cell,
104 const struct in_addr *addr)
105{
106 struct afs_server *server, *candidate;
78 107
79 afs_timer_init(&server->timeout, &afs_server_timer_ops); 108 _enter("%p,"NIPQUAD_FMT, cell, NIPQUAD(addr->s_addr));
80 109
81 /* add to the cell */ 110 /* quick scan of the list to see if we already have the server */
82 write_lock(&cell->sv_lock); 111 read_lock(&cell->servers_lock);
83 112
84 /* check the active list */ 113 list_for_each_entry(server, &cell->servers, link) {
85 list_for_each_entry(active, &cell->sv_list, link) { 114 if (server->addr.s_addr == addr->s_addr)
86 if (active->addr.s_addr == addr->s_addr) 115 goto found_server_quickly;
87 goto use_active_server;
88 } 116 }
117 read_unlock(&cell->servers_lock);
89 118
90 /* check the inactive list */ 119 candidate = afs_alloc_server(cell, addr);
91 spin_lock(&cell->sv_gylock); 120 if (!candidate) {
92 list_for_each_entry(zombie, &cell->sv_graveyard, link) { 121 _leave(" = -ENOMEM");
93 if (zombie->addr.s_addr == addr->s_addr) 122 return ERR_PTR(-ENOMEM);
94 goto resurrect_server;
95 } 123 }
96 spin_unlock(&cell->sv_gylock);
97 124
98 afs_get_cell(cell); 125 write_lock(&cell->servers_lock);
99 server->cell = cell;
100 list_add_tail(&server->link, &cell->sv_list);
101 126
102 write_unlock(&cell->sv_lock); 127 /* check the cell's server list again */
128 list_for_each_entry(server, &cell->servers, link) {
129 if (server->addr.s_addr == addr->s_addr)
130 goto found_server;
131 }
103 132
104 *_server = server; 133 _debug("new");
105 _leave(" = 0 (%p)", server); 134 server = candidate;
106 return 0; 135 if (afs_install_server(server) < 0)
136 goto server_in_two_cells;
107 137
108 /* found a matching active server */ 138 afs_get_cell(cell);
109 use_active_server: 139 list_add_tail(&server->link, &cell->servers);
110 _debug("active server"); 140
111 afs_get_server(active); 141 write_unlock(&cell->servers_lock);
112 write_unlock(&cell->sv_lock); 142 _leave(" = %p{%d}", server, atomic_read(&server->usage));
143 return server;
144
145 /* found a matching server quickly */
146found_server_quickly:
147 _debug("found quickly");
148 afs_get_server(server);
149 read_unlock(&cell->servers_lock);
150no_longer_unused:
151 if (!list_empty(&server->grave)) {
152 spin_lock(&afs_server_graveyard_lock);
153 list_del_init(&server->grave);
154 spin_unlock(&afs_server_graveyard_lock);
155 }
156 _leave(" = %p{%d}", server, atomic_read(&server->usage));
157 return server;
158
159 /* found a matching server on the second pass */
160found_server:
161 _debug("found");
162 afs_get_server(server);
163 write_unlock(&cell->servers_lock);
164 kfree(candidate);
165 goto no_longer_unused;
166
167 /* found a server that seems to be in two cells */
168server_in_two_cells:
169 write_unlock(&cell->servers_lock);
170 kfree(candidate);
171 printk(KERN_NOTICE "kAFS:"
172 " Server "NIPQUAD_FMT" appears to be in two cells\n",
173 NIPQUAD(*addr));
174 _leave(" = -EEXIST");
175 return ERR_PTR(-EEXIST);
176}
113 177
114 kfree(server); 178/*
179 * look up a server by its IP address
180 */
181struct afs_server *afs_find_server(const struct in_addr *_addr)
182{
183 struct afs_server *server = NULL;
184 struct rb_node *p;
185 struct in_addr addr = *_addr;
115 186
116 *_server = active; 187 _enter(NIPQUAD_FMT, NIPQUAD(addr.s_addr));
117 _leave(" = 0 (%p)", active);
118 return 0;
119 188
120 /* found a matching server in the graveyard, so resurrect it and 189 read_lock(&afs_servers_lock);
121 * dispose of the new record */
122 resurrect_server:
123 _debug("resurrecting server");
124 190
125 list_move_tail(&zombie->link, &cell->sv_list); 191 p = afs_servers.rb_node;
126 afs_get_server(zombie); 192 while (p) {
127 afs_kafstimod_del_timer(&zombie->timeout); 193 server = rb_entry(p, struct afs_server, master_rb);
128 spin_unlock(&cell->sv_gylock);
129 write_unlock(&cell->sv_lock);
130 194
131 kfree(server); 195 _debug("- consider %p", p);
132 196
133 *_server = zombie; 197 if (addr.s_addr < server->addr.s_addr) {
134 _leave(" = 0 (%p)", zombie); 198 p = p->rb_left;
135 return 0; 199 } else if (addr.s_addr > server->addr.s_addr) {
200 p = p->rb_right;
201 } else {
202 afs_get_server(server);
203 goto found;
204 }
205 }
136 206
137} /* end afs_server_lookup() */ 207 server = NULL;
208found:
209 read_unlock(&afs_servers_lock);
210 ASSERTIFCMP(server, server->addr.s_addr, ==, addr.s_addr);
211 _leave(" = %p", server);
212 return server;
213}
138 214
139/*****************************************************************************/
140/* 215/*
141 * destroy a server record 216 * destroy a server record
142 * - removes from the cell list 217 * - removes from the cell list
143 */ 218 */
144void afs_put_server(struct afs_server *server) 219void afs_put_server(struct afs_server *server)
145{ 220{
146 struct afs_cell *cell;
147
148 if (!server) 221 if (!server)
149 return; 222 return;
150 223
151 _enter("%p", server); 224 _enter("%p{%d}", server, atomic_read(&server->usage));
152
153 cell = server->cell;
154 225
155 /* sanity check */ 226 _debug("PUT SERVER %d", atomic_read(&server->usage));
156 BUG_ON(atomic_read(&server->usage) <= 0);
157 227
158 /* to prevent a race, the decrement and the dequeue must be effectively 228 ASSERTCMP(atomic_read(&server->usage), >, 0);
159 * atomic */
160 write_lock(&cell->sv_lock);
161 229
162 if (likely(!atomic_dec_and_test(&server->usage))) { 230 if (likely(!atomic_dec_and_test(&server->usage))) {
163 write_unlock(&cell->sv_lock);
164 _leave(""); 231 _leave("");
165 return; 232 return;
166 } 233 }
167 234
168 spin_lock(&cell->sv_gylock); 235 afs_flush_callback_breaks(server);
169 list_move_tail(&server->link, &cell->sv_graveyard);
170 236
171 /* time out in 10 secs */ 237 spin_lock(&afs_server_graveyard_lock);
172 afs_kafstimod_add_timer(&server->timeout, 10 * HZ); 238 if (atomic_read(&server->usage) == 0) {
173 239 list_move_tail(&server->grave, &afs_server_graveyard);
174 spin_unlock(&cell->sv_gylock); 240 server->time_of_death = get_seconds();
175 write_unlock(&cell->sv_lock); 241 schedule_delayed_work(&afs_server_reaper,
176 242 afs_server_timeout * HZ);
177 _leave(" [killed]"); 243 }
178} /* end afs_put_server() */ 244 spin_unlock(&afs_server_graveyard_lock);
245 _leave(" [dead]");
246}
179 247
180/*****************************************************************************/
181/* 248/*
182 * timeout server record 249 * destroy a dead server
183 * - removes from the cell's graveyard if the usage count is zero
184 */ 250 */
185void afs_server_do_timeout(struct afs_server *server) 251static void afs_destroy_server(struct afs_server *server)
186{ 252{
187 struct rxrpc_peer *peer;
188 struct afs_cell *cell;
189 int loop;
190
191 _enter("%p", server); 253 _enter("%p", server);
192 254
193 cell = server->cell; 255 ASSERTCMP(server->fs_vnodes.rb_node, ==, NULL);
194 256 ASSERTCMP(server->cb_promises.rb_node, ==, NULL);
195 BUG_ON(atomic_read(&server->usage) < 0); 257 ASSERTCMP(server->cb_break_head, ==, server->cb_break_tail);
196 258 ASSERTCMP(atomic_read(&server->cb_break_n), ==, 0);
197 /* remove from graveyard if still dead */
198 spin_lock(&cell->vl_gylock);
199 if (atomic_read(&server->usage) == 0)
200 list_del_init(&server->link);
201 else
202 server = NULL;
203 spin_unlock(&cell->vl_gylock);
204
205 if (!server) {
206 _leave("");
207 return; /* resurrected */
208 }
209
210 /* we can now destroy it properly */
211 afs_put_cell(cell);
212
213 /* uncross-point the structs under a global lock */
214 spin_lock(&afs_server_peer_lock);
215 peer = server->peer;
216 if (peer) {
217 server->peer = NULL;
218 peer->user = NULL;
219 }
220 spin_unlock(&afs_server_peer_lock);
221
222 /* finish cleaning up the server */
223 for (loop = AFS_SERVER_CONN_LIST_SIZE - 1; loop >= 0; loop--)
224 if (server->fs_conn[loop])
225 rxrpc_put_connection(server->fs_conn[loop]);
226
227 if (server->vlserver)
228 rxrpc_put_connection(server->vlserver);
229 259
260 afs_put_cell(server->cell);
230 kfree(server); 261 kfree(server);
262}
231 263
232 _leave(" [destroyed]");
233} /* end afs_server_do_timeout() */
234
235/*****************************************************************************/
236/* 264/*
237 * get a callslot on a connection to the fileserver on the specified server 265 * reap dead server records
238 */ 266 */
239int afs_server_request_callslot(struct afs_server *server, 267static void afs_reap_server(struct work_struct *work)
240 struct afs_server_callslot *callslot)
241{ 268{
242 struct afs_server_callslot *pcallslot; 269 LIST_HEAD(corpses);
243 struct rxrpc_connection *conn; 270 struct afs_server *server;
244 int nconn, ret; 271 unsigned long delay, expiry;
245 272 time_t now;
246 _enter("%p,",server); 273
247 274 now = get_seconds();
248 INIT_LIST_HEAD(&callslot->link); 275 spin_lock(&afs_server_graveyard_lock);
249 callslot->task = current; 276
250 callslot->conn = NULL; 277 while (!list_empty(&afs_server_graveyard)) {
251 callslot->nconn = -1; 278 server = list_entry(afs_server_graveyard.next,
252 callslot->ready = 0; 279 struct afs_server, grave);
253 280
254 ret = 0; 281 /* the queue is ordered most dead first */
255 conn = NULL; 282 expiry = server->time_of_death + afs_server_timeout;
256 283 if (expiry > now) {
257 /* get hold of a callslot first */ 284 delay = (expiry - now) * HZ;
258 spin_lock(&server->fs_lock); 285 if (!schedule_delayed_work(&afs_server_reaper, delay)) {
259 286 cancel_delayed_work(&afs_server_reaper);
260 /* resurrect the server if it's death timeout has expired */ 287 schedule_delayed_work(&afs_server_reaper,
261 if (server->fs_state) { 288 delay);
262 if (time_before(jiffies, server->fs_dead_jif)) { 289 }
263 ret = server->fs_state; 290 break;
264 spin_unlock(&server->fs_lock);
265 _leave(" = %d [still dead]", ret);
266 return ret;
267 } 291 }
268 292
269 server->fs_state = 0; 293 write_lock(&server->cell->servers_lock);
270 } 294 write_lock(&afs_servers_lock);
271 295 if (atomic_read(&server->usage) > 0) {
272 /* try and find a connection that has spare callslots */ 296 list_del_init(&server->grave);
273 for (nconn = 0; nconn < AFS_SERVER_CONN_LIST_SIZE; nconn++) { 297 } else {
274 if (server->fs_conn_cnt[nconn] > 0) { 298 list_move_tail(&server->grave, &corpses);
275 server->fs_conn_cnt[nconn]--; 299 list_del_init(&server->link);
276 spin_unlock(&server->fs_lock); 300 rb_erase(&server->master_rb, &afs_servers);
277 callslot->nconn = nconn;
278 goto obtained_slot;
279 } 301 }
302 write_unlock(&afs_servers_lock);
303 write_unlock(&server->cell->servers_lock);
280 } 304 }
281 305
282 /* none were available - wait interruptibly for one to become 306 spin_unlock(&afs_server_graveyard_lock);
283 * available */
284 set_current_state(TASK_INTERRUPTIBLE);
285 list_add_tail(&callslot->link, &server->fs_callq);
286 spin_unlock(&server->fs_lock);
287
288 while (!callslot->ready && !signal_pending(current)) {
289 schedule();
290 set_current_state(TASK_INTERRUPTIBLE);
291 }
292
293 set_current_state(TASK_RUNNING);
294
295 /* even if we were interrupted we may still be queued */
296 if (!callslot->ready) {
297 spin_lock(&server->fs_lock);
298 list_del_init(&callslot->link);
299 spin_unlock(&server->fs_lock);
300 }
301
302 nconn = callslot->nconn;
303 307
304 /* if interrupted, we must release any slot we also got before 308 /* now reap the corpses we've extracted */
305 * returning an error */ 309 while (!list_empty(&corpses)) {
306 if (signal_pending(current)) { 310 server = list_entry(corpses.next, struct afs_server, grave);
307 ret = -EINTR; 311 list_del(&server->grave);
308 goto error_release; 312 afs_destroy_server(server);
309 } 313 }
314}
310 315
311 /* if we were woken up with an error, then pass that error back to the
312 * called */
313 if (nconn < 0) {
314 _leave(" = %d", callslot->errno);
315 return callslot->errno;
316 }
317
318 /* were we given a connection directly? */
319 if (callslot->conn) {
320 /* yes - use it */
321 _leave(" = 0 (nc=%d)", nconn);
322 return 0;
323 }
324
325 /* got a callslot, but no connection */
326 obtained_slot:
327
328 /* need to get hold of the RxRPC connection */
329 down_write(&server->sem);
330
331 /* quick check to see if there's an outstanding error */
332 ret = server->fs_state;
333 if (ret)
334 goto error_release_upw;
335
336 if (server->fs_conn[nconn]) {
337 /* reuse an existing connection */
338 rxrpc_get_connection(server->fs_conn[nconn]);
339 callslot->conn = server->fs_conn[nconn];
340 }
341 else {
342 /* create a new connection */
343 ret = rxrpc_create_connection(afs_transport,
344 htons(7000),
345 server->addr.s_addr,
346 FS_SERVICE_ID,
347 NULL,
348 &server->fs_conn[nconn]);
349
350 if (ret < 0)
351 goto error_release_upw;
352
353 callslot->conn = server->fs_conn[0];
354 rxrpc_get_connection(callslot->conn);
355 }
356
357 up_write(&server->sem);
358
359 _leave(" = 0");
360 return 0;
361
362 /* handle an error occurring */
363 error_release_upw:
364 up_write(&server->sem);
365
366 error_release:
367 /* either release the callslot or pass it along to another deserving
368 * task */
369 spin_lock(&server->fs_lock);
370
371 if (nconn < 0) {
372 /* no callslot allocated */
373 }
374 else if (list_empty(&server->fs_callq)) {
375 /* no one waiting */
376 server->fs_conn_cnt[nconn]++;
377 spin_unlock(&server->fs_lock);
378 }
379 else {
380 /* someone's waiting - dequeue them and wake them up */
381 pcallslot = list_entry(server->fs_callq.next,
382 struct afs_server_callslot, link);
383 list_del_init(&pcallslot->link);
384
385 pcallslot->errno = server->fs_state;
386 if (!pcallslot->errno) {
387 /* pass them out callslot details */
388 callslot->conn = xchg(&pcallslot->conn,
389 callslot->conn);
390 pcallslot->nconn = nconn;
391 callslot->nconn = nconn = -1;
392 }
393 pcallslot->ready = 1;
394 wake_up_process(pcallslot->task);
395 spin_unlock(&server->fs_lock);
396 }
397
398 rxrpc_put_connection(callslot->conn);
399 callslot->conn = NULL;
400
401 _leave(" = %d", ret);
402 return ret;
403
404} /* end afs_server_request_callslot() */
405
406/*****************************************************************************/
407/*
408 * release a callslot back to the server
409 * - transfers the RxRPC connection to the next pending callslot if possible
410 */
411void afs_server_release_callslot(struct afs_server *server,
412 struct afs_server_callslot *callslot)
413{
414 struct afs_server_callslot *pcallslot;
415
416 _enter("{ad=%08x,cnt=%u},{%d}",
417 ntohl(server->addr.s_addr),
418 server->fs_conn_cnt[callslot->nconn],
419 callslot->nconn);
420
421 BUG_ON(callslot->nconn < 0);
422
423 spin_lock(&server->fs_lock);
424
425 if (list_empty(&server->fs_callq)) {
426 /* no one waiting */
427 server->fs_conn_cnt[callslot->nconn]++;
428 spin_unlock(&server->fs_lock);
429 }
430 else {
431 /* someone's waiting - dequeue them and wake them up */
432 pcallslot = list_entry(server->fs_callq.next,
433 struct afs_server_callslot, link);
434 list_del_init(&pcallslot->link);
435
436 pcallslot->errno = server->fs_state;
437 if (!pcallslot->errno) {
438 /* pass them out callslot details */
439 callslot->conn = xchg(&pcallslot->conn, callslot->conn);
440 pcallslot->nconn = callslot->nconn;
441 callslot->nconn = -1;
442 }
443
444 pcallslot->ready = 1;
445 wake_up_process(pcallslot->task);
446 spin_unlock(&server->fs_lock);
447 }
448
449 rxrpc_put_connection(callslot->conn);
450
451 _leave("");
452} /* end afs_server_release_callslot() */
453
454/*****************************************************************************/
455/* 316/*
456 * get a handle to a connection to the vlserver (volume location) on the 317 * discard all the server records for rmmod
457 * specified server
458 */ 318 */
459int afs_server_get_vlconn(struct afs_server *server, 319void __exit afs_purge_servers(void)
460 struct rxrpc_connection **_conn)
461{ 320{
462 struct rxrpc_connection *conn; 321 afs_server_timeout = 0;
463 int ret; 322 cancel_delayed_work(&afs_server_reaper);
464 323 schedule_delayed_work(&afs_server_reaper, 0);
465 _enter("%p,", server); 324}
466
467 ret = 0;
468 conn = NULL;
469 down_read(&server->sem);
470
471 if (server->vlserver) {
472 /* reuse an existing connection */
473 rxrpc_get_connection(server->vlserver);
474 conn = server->vlserver;
475 up_read(&server->sem);
476 }
477 else {
478 /* create a new connection */
479 up_read(&server->sem);
480 down_write(&server->sem);
481 if (!server->vlserver) {
482 ret = rxrpc_create_connection(afs_transport,
483 htons(7003),
484 server->addr.s_addr,
485 VL_SERVICE_ID,
486 NULL,
487 &server->vlserver);
488 }
489 if (ret == 0) {
490 rxrpc_get_connection(server->vlserver);
491 conn = server->vlserver;
492 }
493 up_write(&server->sem);
494 }
495
496 *_conn = conn;
497 _leave(" = %d", ret);
498 return ret;
499} /* end afs_server_get_vlconn() */
diff --git a/fs/afs/server.h b/fs/afs/server.h
deleted file mode 100644
index c3d24115578f..000000000000
--- a/fs/afs/server.h
+++ /dev/null
@@ -1,102 +0,0 @@
1/* server.h: AFS server record
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef _LINUX_AFS_SERVER_H
13#define _LINUX_AFS_SERVER_H
14
15#include "types.h"
16#include "kafstimod.h"
17#include <rxrpc/peer.h>
18#include <linux/rwsem.h>
19
20extern spinlock_t afs_server_peer_lock;
21
22/*****************************************************************************/
23/*
24 * AFS server record
25 */
26struct afs_server
27{
28 atomic_t usage;
29 struct afs_cell *cell; /* cell in which server resides */
30 struct list_head link; /* link in cell's server list */
31 struct rw_semaphore sem; /* access lock */
32 struct afs_timer timeout; /* graveyard timeout */
33 struct in_addr addr; /* server address */
34 struct rxrpc_peer *peer; /* peer record for this server */
35 struct rxrpc_connection *vlserver; /* connection to the volume location service */
36
37 /* file service access */
38#define AFS_SERVER_CONN_LIST_SIZE 2
39 struct rxrpc_connection *fs_conn[AFS_SERVER_CONN_LIST_SIZE]; /* FS connections */
40 unsigned fs_conn_cnt[AFS_SERVER_CONN_LIST_SIZE]; /* per conn call count */
41 struct list_head fs_callq; /* queue of processes waiting to make a call */
42 spinlock_t fs_lock; /* access lock */
43 int fs_state; /* 0 or reason FS currently marked dead (-errno) */
44 unsigned fs_rtt; /* FS round trip time */
45 unsigned long fs_act_jif; /* time at which last activity occurred */
46 unsigned long fs_dead_jif; /* time at which no longer to be considered dead */
47
48 /* callback promise management */
49 struct list_head cb_promises; /* as yet unbroken promises from this server */
50 spinlock_t cb_lock; /* access lock */
51};
52
53extern int afs_server_lookup(struct afs_cell *cell,
54 const struct in_addr *addr,
55 struct afs_server **_server);
56
57#define afs_get_server(S) do { atomic_inc(&(S)->usage); } while(0)
58
59extern void afs_put_server(struct afs_server *server);
60extern void afs_server_do_timeout(struct afs_server *server);
61
62extern int afs_server_find_by_peer(const struct rxrpc_peer *peer,
63 struct afs_server **_server);
64
65extern int afs_server_get_vlconn(struct afs_server *server,
66 struct rxrpc_connection **_conn);
67
68static inline
69struct afs_server *afs_server_get_from_peer(struct rxrpc_peer *peer)
70{
71 struct afs_server *server;
72
73 spin_lock(&afs_server_peer_lock);
74 server = peer->user;
75 if (server)
76 afs_get_server(server);
77 spin_unlock(&afs_server_peer_lock);
78
79 return server;
80}
81
82/*****************************************************************************/
83/*
84 * AFS server callslot grant record
85 */
86struct afs_server_callslot
87{
88 struct list_head link; /* link in server's list */
89 struct task_struct *task; /* process waiting to make call */
90 struct rxrpc_connection *conn; /* connection to use (or NULL on error) */
91 short nconn; /* connection slot number (-1 on error) */
92 char ready; /* T when ready */
93 int errno; /* error number if nconn==-1 */
94};
95
96extern int afs_server_request_callslot(struct afs_server *server,
97 struct afs_server_callslot *callslot);
98
99extern void afs_server_release_callslot(struct afs_server *server,
100 struct afs_server_callslot *callslot);
101
102#endif /* _LINUX_AFS_SERVER_H */
diff --git a/fs/afs/super.c b/fs/afs/super.c
index eb7e32349da3..cebd03c91f57 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -1,5 +1,6 @@
1/* 1/* AFS superblock handling
2 * Copyright (c) 2002 Red Hat, Inc. All rights reserved. 2 *
3 * Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved.
3 * 4 *
4 * This software may be freely redistributed under the terms of the 5 * This software may be freely redistributed under the terms of the
5 * GNU General Public License. 6 * GNU General Public License.
@@ -9,7 +10,7 @@
9 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 10 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
10 * 11 *
11 * Authors: David Howells <dhowells@redhat.com> 12 * Authors: David Howells <dhowells@redhat.com>
12 * David Woodhouse <dwmw2@cambridge.redhat.com> 13 * David Woodhouse <dwmw2@redhat.com>
13 * 14 *
14 */ 15 */
15 16
@@ -19,22 +20,10 @@
19#include <linux/slab.h> 20#include <linux/slab.h>
20#include <linux/fs.h> 21#include <linux/fs.h>
21#include <linux/pagemap.h> 22#include <linux/pagemap.h>
22#include "vnode.h"
23#include "volume.h"
24#include "cell.h"
25#include "cmservice.h"
26#include "fsclient.h"
27#include "super.h"
28#include "internal.h" 23#include "internal.h"
29 24
30#define AFS_FS_MAGIC 0x6B414653 /* 'kAFS' */ 25#define AFS_FS_MAGIC 0x6B414653 /* 'kAFS' */
31 26
32struct afs_mount_params {
33 int rwpath;
34 struct afs_cell *default_cell;
35 struct afs_volume *volume;
36};
37
38static void afs_i_init_once(void *foo, struct kmem_cache *cachep, 27static void afs_i_init_once(void *foo, struct kmem_cache *cachep,
39 unsigned long flags); 28 unsigned long flags);
40 29
@@ -62,13 +51,13 @@ static const struct super_operations afs_super_ops = {
62 .drop_inode = generic_delete_inode, 51 .drop_inode = generic_delete_inode,
63 .destroy_inode = afs_destroy_inode, 52 .destroy_inode = afs_destroy_inode,
64 .clear_inode = afs_clear_inode, 53 .clear_inode = afs_clear_inode,
54 .umount_begin = afs_umount_begin,
65 .put_super = afs_put_super, 55 .put_super = afs_put_super,
66}; 56};
67 57
68static struct kmem_cache *afs_inode_cachep; 58static struct kmem_cache *afs_inode_cachep;
69static atomic_t afs_count_active_inodes; 59static atomic_t afs_count_active_inodes;
70 60
71/*****************************************************************************/
72/* 61/*
73 * initialise the filesystem 62 * initialise the filesystem
74 */ 63 */
@@ -78,8 +67,6 @@ int __init afs_fs_init(void)
78 67
79 _enter(""); 68 _enter("");
80 69
81 afs_timer_init(&afs_mntpt_expiry_timer, &afs_mntpt_expiry_timer_ops);
82
83 /* create ourselves an inode cache */ 70 /* create ourselves an inode cache */
84 atomic_set(&afs_count_active_inodes, 0); 71 atomic_set(&afs_count_active_inodes, 0);
85 72
@@ -99,20 +86,22 @@ int __init afs_fs_init(void)
99 ret = register_filesystem(&afs_fs_type); 86 ret = register_filesystem(&afs_fs_type);
100 if (ret < 0) { 87 if (ret < 0) {
101 kmem_cache_destroy(afs_inode_cachep); 88 kmem_cache_destroy(afs_inode_cachep);
102 kleave(" = %d", ret); 89 _leave(" = %d", ret);
103 return ret; 90 return ret;
104 } 91 }
105 92
106 kleave(" = 0"); 93 _leave(" = 0");
107 return 0; 94 return 0;
108} /* end afs_fs_init() */ 95}
109 96
110/*****************************************************************************/
111/* 97/*
112 * clean up the filesystem 98 * clean up the filesystem
113 */ 99 */
114void __exit afs_fs_exit(void) 100void __exit afs_fs_exit(void)
115{ 101{
102 _enter("");
103
104 afs_mntpt_kill_timer();
116 unregister_filesystem(&afs_fs_type); 105 unregister_filesystem(&afs_fs_type);
117 106
118 if (atomic_read(&afs_count_active_inodes) != 0) { 107 if (atomic_read(&afs_count_active_inodes) != 0) {
@@ -122,10 +111,9 @@ void __exit afs_fs_exit(void)
122 } 111 }
123 112
124 kmem_cache_destroy(afs_inode_cachep); 113 kmem_cache_destroy(afs_inode_cachep);
114 _leave("");
115}
125 116
126} /* end afs_fs_exit() */
127
128/*****************************************************************************/
129/* 117/*
130 * check that an argument has a value 118 * check that an argument has a value
131 */ 119 */
@@ -136,9 +124,8 @@ static int want_arg(char **_value, const char *option)
136 return 0; 124 return 0;
137 } 125 }
138 return 1; 126 return 1;
139} /* end want_arg() */ 127}
140 128
141/*****************************************************************************/
142/* 129/*
143 * check that there's no subsequent value 130 * check that there's no subsequent value
144 */ 131 */
@@ -150,18 +137,17 @@ static int want_no_value(char *const *_value, const char *option)
150 return 0; 137 return 0;
151 } 138 }
152 return 1; 139 return 1;
153} /* end want_no_value() */ 140}
154 141
155/*****************************************************************************/
156/* 142/*
157 * parse the mount options 143 * parse the mount options
158 * - this function has been shamelessly adapted from the ext3 fs which 144 * - this function has been shamelessly adapted from the ext3 fs which
159 * shamelessly adapted it from the msdos fs 145 * shamelessly adapted it from the msdos fs
160 */ 146 */
161static int afs_super_parse_options(struct afs_mount_params *params, 147static int afs_parse_options(struct afs_mount_params *params,
162 char *options, 148 char *options, const char **devname)
163 const char **devname)
164{ 149{
150 struct afs_cell *cell;
165 char *key, *value; 151 char *key, *value;
166 int ret; 152 int ret;
167 153
@@ -170,51 +156,135 @@ static int afs_super_parse_options(struct afs_mount_params *params,
170 options[PAGE_SIZE - 1] = 0; 156 options[PAGE_SIZE - 1] = 0;
171 157
172 ret = 0; 158 ret = 0;
173 while ((key = strsep(&options, ",")) != 0) 159 while ((key = strsep(&options, ","))) {
174 {
175 value = strchr(key, '='); 160 value = strchr(key, '=');
176 if (value) 161 if (value)
177 *value++ = 0; 162 *value++ = 0;
178 163
179 printk("kAFS: KEY: %s, VAL:%s\n", key, value ?: "-"); 164 _debug("kAFS: KEY: %s, VAL:%s", key, value ?: "-");
180 165
181 if (strcmp(key, "rwpath") == 0) { 166 if (strcmp(key, "rwpath") == 0) {
182 if (!want_no_value(&value, "rwpath")) 167 if (!want_no_value(&value, "rwpath"))
183 return -EINVAL; 168 return -EINVAL;
184 params->rwpath = 1; 169 params->rwpath = 1;
185 continue; 170 } else if (strcmp(key, "vol") == 0) {
186 }
187 else if (strcmp(key, "vol") == 0) {
188 if (!want_arg(&value, "vol")) 171 if (!want_arg(&value, "vol"))
189 return -EINVAL; 172 return -EINVAL;
190 *devname = value; 173 *devname = value;
191 continue; 174 } else if (strcmp(key, "cell") == 0) {
192 }
193 else if (strcmp(key, "cell") == 0) {
194 if (!want_arg(&value, "cell")) 175 if (!want_arg(&value, "cell"))
195 return -EINVAL; 176 return -EINVAL;
196 afs_put_cell(params->default_cell); 177 cell = afs_cell_lookup(value, strlen(value));
197 ret = afs_cell_lookup(value, 178 if (IS_ERR(cell))
198 strlen(value), 179 return PTR_ERR(cell);
199 &params->default_cell); 180 afs_put_cell(params->cell);
200 if (ret < 0) 181 params->cell = cell;
201 return -EINVAL; 182 } else {
202 continue; 183 printk("kAFS: Unknown mount option: '%s'\n", key);
184 ret = -EINVAL;
185 goto error;
203 } 186 }
204
205 printk("kAFS: Unknown mount option: '%s'\n", key);
206 ret = -EINVAL;
207 goto error;
208 } 187 }
209 188
210 ret = 0; 189 ret = 0;
211 190error:
212 error:
213 _leave(" = %d", ret); 191 _leave(" = %d", ret);
214 return ret; 192 return ret;
215} /* end afs_super_parse_options() */ 193}
194
195/*
196 * parse a device name to get cell name, volume name, volume type and R/W
197 * selector
198 * - this can be one of the following:
199 * "%[cell:]volume[.]" R/W volume
200 * "#[cell:]volume[.]" R/O or R/W volume (rwpath=0),
201 * or R/W (rwpath=1) volume
202 * "%[cell:]volume.readonly" R/O volume
203 * "#[cell:]volume.readonly" R/O volume
204 * "%[cell:]volume.backup" Backup volume
205 * "#[cell:]volume.backup" Backup volume
206 */
207static int afs_parse_device_name(struct afs_mount_params *params,
208 const char *name)
209{
210 struct afs_cell *cell;
211 const char *cellname, *suffix;
212 int cellnamesz;
213
214 _enter(",%s", name);
215
216 if (!name) {
217 printk(KERN_ERR "kAFS: no volume name specified\n");
218 return -EINVAL;
219 }
220
221 if ((name[0] != '%' && name[0] != '#') || !name[1]) {
222 printk(KERN_ERR "kAFS: unparsable volume name\n");
223 return -EINVAL;
224 }
225
226 /* determine the type of volume we're looking for */
227 params->type = AFSVL_ROVOL;
228 params->force = false;
229 if (params->rwpath || name[0] == '%') {
230 params->type = AFSVL_RWVOL;
231 params->force = true;
232 }
233 name++;
234
235 /* split the cell name out if there is one */
236 params->volname = strchr(name, ':');
237 if (params->volname) {
238 cellname = name;
239 cellnamesz = params->volname - name;
240 params->volname++;
241 } else {
242 params->volname = name;
243 cellname = NULL;
244 cellnamesz = 0;
245 }
246
247 /* the volume type is further affected by a possible suffix */
248 suffix = strrchr(params->volname, '.');
249 if (suffix) {
250 if (strcmp(suffix, ".readonly") == 0) {
251 params->type = AFSVL_ROVOL;
252 params->force = true;
253 } else if (strcmp(suffix, ".backup") == 0) {
254 params->type = AFSVL_BACKVOL;
255 params->force = true;
256 } else if (suffix[1] == 0) {
257 } else {
258 suffix = NULL;
259 }
260 }
261
262 params->volnamesz = suffix ?
263 suffix - params->volname : strlen(params->volname);
264
265 _debug("cell %*.*s [%p]",
266 cellnamesz, cellnamesz, cellname ?: "", params->cell);
267
268 /* lookup the cell record */
269 if (cellname || !params->cell) {
270 cell = afs_cell_lookup(cellname, cellnamesz);
271 if (IS_ERR(cell)) {
272 printk(KERN_ERR "kAFS: unable to lookup cell '%s'\n",
273 cellname ?: "");
274 return PTR_ERR(cell);
275 }
276 afs_put_cell(params->cell);
277 params->cell = cell;
278 }
279
280 _debug("CELL:%s [%p] VOLUME:%*.*s SUFFIX:%s TYPE:%d%s",
281 params->cell->name, params->cell,
282 params->volnamesz, params->volnamesz, params->volname,
283 suffix ?: "-", params->type, params->force ? " FORCE" : "");
284
285 return 0;
286}
216 287
217/*****************************************************************************/
218/* 288/*
219 * check a superblock to see if it's the one we're looking for 289 * check a superblock to see if it's the one we're looking for
220 */ 290 */
@@ -224,13 +294,12 @@ static int afs_test_super(struct super_block *sb, void *data)
224 struct afs_super_info *as = sb->s_fs_info; 294 struct afs_super_info *as = sb->s_fs_info;
225 295
226 return as->volume == params->volume; 296 return as->volume == params->volume;
227} /* end afs_test_super() */ 297}
228 298
229/*****************************************************************************/
230/* 299/*
231 * fill in the superblock 300 * fill in the superblock
232 */ 301 */
233static int afs_fill_super(struct super_block *sb, void *data, int silent) 302static int afs_fill_super(struct super_block *sb, void *data)
234{ 303{
235 struct afs_mount_params *params = data; 304 struct afs_mount_params *params = data;
236 struct afs_super_info *as = NULL; 305 struct afs_super_info *as = NULL;
@@ -239,7 +308,7 @@ static int afs_fill_super(struct super_block *sb, void *data, int silent)
239 struct inode *inode = NULL; 308 struct inode *inode = NULL;
240 int ret; 309 int ret;
241 310
242 kenter(""); 311 _enter("");
243 312
244 /* allocate a superblock info record */ 313 /* allocate a superblock info record */
245 as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL); 314 as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL);
@@ -262,9 +331,9 @@ static int afs_fill_super(struct super_block *sb, void *data, int silent)
262 fid.vid = as->volume->vid; 331 fid.vid = as->volume->vid;
263 fid.vnode = 1; 332 fid.vnode = 1;
264 fid.unique = 1; 333 fid.unique = 1;
265 ret = afs_iget(sb, &fid, &inode); 334 inode = afs_iget(sb, params->key, &fid, NULL, NULL);
266 if (ret < 0) 335 if (IS_ERR(inode))
267 goto error; 336 goto error_inode;
268 337
269 ret = -ENOMEM; 338 ret = -ENOMEM;
270 root = d_alloc_root(inode); 339 root = d_alloc_root(inode);
@@ -273,21 +342,23 @@ static int afs_fill_super(struct super_block *sb, void *data, int silent)
273 342
274 sb->s_root = root; 343 sb->s_root = root;
275 344
276 kleave(" = 0"); 345 _leave(" = 0");
277 return 0; 346 return 0;
278 347
279 error: 348error_inode:
349 ret = PTR_ERR(inode);
350 inode = NULL;
351error:
280 iput(inode); 352 iput(inode);
281 afs_put_volume(as->volume); 353 afs_put_volume(as->volume);
282 kfree(as); 354 kfree(as);
283 355
284 sb->s_fs_info = NULL; 356 sb->s_fs_info = NULL;
285 357
286 kleave(" = %d", ret); 358 _leave(" = %d", ret);
287 return ret; 359 return ret;
288} /* end afs_fill_super() */ 360}
289 361
290/*****************************************************************************/
291/* 362/*
292 * get an AFS superblock 363 * get an AFS superblock
293 * - TODO: don't use get_sb_nodev(), but rather call sget() directly 364 * - TODO: don't use get_sb_nodev(), but rather call sget() directly
@@ -300,69 +371,80 @@ static int afs_get_sb(struct file_system_type *fs_type,
300{ 371{
301 struct afs_mount_params params; 372 struct afs_mount_params params;
302 struct super_block *sb; 373 struct super_block *sb;
374 struct afs_volume *vol;
375 struct key *key;
303 int ret; 376 int ret;
304 377
305 _enter(",,%s,%p", dev_name, options); 378 _enter(",,%s,%p", dev_name, options);
306 379
307 memset(&params, 0, sizeof(params)); 380 memset(&params, 0, sizeof(params));
308 381
309 /* start the cache manager */ 382 /* parse the options and device name */
310 ret = afscm_start();
311 if (ret < 0) {
312 _leave(" = %d", ret);
313 return ret;
314 }
315
316 /* parse the options */
317 if (options) { 383 if (options) {
318 ret = afs_super_parse_options(&params, options, &dev_name); 384 ret = afs_parse_options(&params, options, &dev_name);
319 if (ret < 0) 385 if (ret < 0)
320 goto error; 386 goto error;
321 if (!dev_name) {
322 printk("kAFS: no volume name specified\n");
323 ret = -EINVAL;
324 goto error;
325 }
326 } 387 }
327 388
328 /* parse the device name */ 389
329 ret = afs_volume_lookup(dev_name, 390 ret = afs_parse_device_name(&params, dev_name);
330 params.default_cell,
331 params.rwpath,
332 &params.volume);
333 if (ret < 0) 391 if (ret < 0)
334 goto error; 392 goto error;
335 393
336 /* allocate a deviceless superblock */ 394 /* try and do the mount securely */
337 sb = sget(fs_type, afs_test_super, set_anon_super, &params); 395 key = afs_request_key(params.cell);
338 if (IS_ERR(sb)) 396 if (IS_ERR(key)) {
397 _leave(" = %ld [key]", PTR_ERR(key));
398 ret = PTR_ERR(key);
339 goto error; 399 goto error;
400 }
401 params.key = key;
340 402
341 sb->s_flags = flags; 403 /* parse the device name */
404 vol = afs_volume_lookup(&params);
405 if (IS_ERR(vol)) {
406 ret = PTR_ERR(vol);
407 goto error;
408 }
409 params.volume = vol;
342 410
343 ret = afs_fill_super(sb, &params, flags & MS_SILENT ? 1 : 0); 411 /* allocate a deviceless superblock */
344 if (ret < 0) { 412 sb = sget(fs_type, afs_test_super, set_anon_super, &params);
345 up_write(&sb->s_umount); 413 if (IS_ERR(sb)) {
346 deactivate_super(sb); 414 ret = PTR_ERR(sb);
347 goto error; 415 goto error;
348 } 416 }
349 sb->s_flags |= MS_ACTIVE;
350 simple_set_mnt(mnt, sb);
351 417
418 if (!sb->s_root) {
419 /* initial superblock/root creation */
420 _debug("create");
421 sb->s_flags = flags;
422 ret = afs_fill_super(sb, &params);
423 if (ret < 0) {
424 up_write(&sb->s_umount);
425 deactivate_super(sb);
426 goto error;
427 }
428 sb->s_flags |= MS_ACTIVE;
429 } else {
430 _debug("reuse");
431 ASSERTCMP(sb->s_flags, &, MS_ACTIVE);
432 }
433
434 simple_set_mnt(mnt, sb);
352 afs_put_volume(params.volume); 435 afs_put_volume(params.volume);
353 afs_put_cell(params.default_cell); 436 afs_put_cell(params.cell);
354 _leave(" = 0 [%p]", 0, sb); 437 _leave(" = 0 [%p]", sb);
355 return 0; 438 return 0;
356 439
357 error: 440error:
358 afs_put_volume(params.volume); 441 afs_put_volume(params.volume);
359 afs_put_cell(params.default_cell); 442 afs_put_cell(params.cell);
360 afscm_stop(); 443 key_put(params.key);
361 _leave(" = %d", ret); 444 _leave(" = %d", ret);
362 return ret; 445 return ret;
363} /* end afs_get_sb() */ 446}
364 447
365/*****************************************************************************/
366/* 448/*
367 * finish the unmounting process on the superblock 449 * finish the unmounting process on the superblock
368 */ 450 */
@@ -373,35 +455,30 @@ static void afs_put_super(struct super_block *sb)
373 _enter(""); 455 _enter("");
374 456
375 afs_put_volume(as->volume); 457 afs_put_volume(as->volume);
376 afscm_stop();
377 458
378 _leave(""); 459 _leave("");
379} /* end afs_put_super() */ 460}
380 461
381/*****************************************************************************/
382/* 462/*
383 * initialise an inode cache slab element prior to any use 463 * initialise an inode cache slab element prior to any use
384 */ 464 */
385static void afs_i_init_once(void *_vnode, struct kmem_cache *cachep, 465static void afs_i_init_once(void *_vnode, struct kmem_cache *cachep,
386 unsigned long flags) 466 unsigned long flags)
387{ 467{
388 struct afs_vnode *vnode = (struct afs_vnode *) _vnode; 468 struct afs_vnode *vnode = _vnode;
389 469
390 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == 470 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
391 SLAB_CTOR_CONSTRUCTOR) { 471 SLAB_CTOR_CONSTRUCTOR) {
392 memset(vnode, 0, sizeof(*vnode)); 472 memset(vnode, 0, sizeof(*vnode));
393 inode_init_once(&vnode->vfs_inode); 473 inode_init_once(&vnode->vfs_inode);
394 init_waitqueue_head(&vnode->update_waitq); 474 init_waitqueue_head(&vnode->update_waitq);
475 mutex_init(&vnode->permits_lock);
476 mutex_init(&vnode->validate_lock);
395 spin_lock_init(&vnode->lock); 477 spin_lock_init(&vnode->lock);
396 INIT_LIST_HEAD(&vnode->cb_link); 478 INIT_WORK(&vnode->cb_broken_work, afs_broken_callback_work);
397 INIT_LIST_HEAD(&vnode->cb_hash_link);
398 afs_timer_init(&vnode->cb_timeout,
399 &afs_vnode_cb_timed_out_ops);
400 } 479 }
480}
401 481
402} /* end afs_i_init_once() */
403
404/*****************************************************************************/
405/* 482/*
406 * allocate an AFS inode struct from our slab cache 483 * allocate an AFS inode struct from our slab cache
407 */ 484 */
@@ -409,8 +486,7 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
409{ 486{
410 struct afs_vnode *vnode; 487 struct afs_vnode *vnode;
411 488
412 vnode = (struct afs_vnode *) 489 vnode = kmem_cache_alloc(afs_inode_cachep, GFP_KERNEL);
413 kmem_cache_alloc(afs_inode_cachep, GFP_KERNEL);
414 if (!vnode) 490 if (!vnode)
415 return NULL; 491 return NULL;
416 492
@@ -421,21 +497,25 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
421 497
422 vnode->volume = NULL; 498 vnode->volume = NULL;
423 vnode->update_cnt = 0; 499 vnode->update_cnt = 0;
424 vnode->flags = 0; 500 vnode->flags = 1 << AFS_VNODE_UNSET;
501 vnode->cb_promised = false;
425 502
426 return &vnode->vfs_inode; 503 return &vnode->vfs_inode;
427} /* end afs_alloc_inode() */ 504}
428 505
429/*****************************************************************************/
430/* 506/*
431 * destroy an AFS inode struct 507 * destroy an AFS inode struct
432 */ 508 */
433static void afs_destroy_inode(struct inode *inode) 509static void afs_destroy_inode(struct inode *inode)
434{ 510{
511 struct afs_vnode *vnode = AFS_FS_I(inode);
512
435 _enter("{%lu}", inode->i_ino); 513 _enter("{%lu}", inode->i_ino);
436 514
437 kmem_cache_free(afs_inode_cachep, AFS_FS_I(inode)); 515 _debug("DESTROY INODE %p", inode);
438 516
439 atomic_dec(&afs_count_active_inodes); 517 ASSERTCMP(vnode->server, ==, NULL);
440 518
441} /* end afs_destroy_inode() */ 519 kmem_cache_free(afs_inode_cachep, vnode);
520 atomic_dec(&afs_count_active_inodes);
521}
diff --git a/fs/afs/super.h b/fs/afs/super.h
deleted file mode 100644
index 32de8cc6fae8..000000000000
--- a/fs/afs/super.h
+++ /dev/null
@@ -1,45 +0,0 @@
1/* super.h: AFS filesystem internal private data
2 *
3 * Copyright (c) 2002 Red Hat, Inc. All rights reserved.
4 *
5 * This software may be freely redistributed under the terms of the
6 * GNU General Public License.
7 *
8 * You should have received a copy of the GNU General Public License
9 * along with this program; if not, write to the Free Software
10 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
11 *
12 * Authors: David Woodhouse <dwmw2@cambridge.redhat.com>
13 * David Howells <dhowells@redhat.com>
14 *
15 */
16
17#ifndef _LINUX_AFS_SUPER_H
18#define _LINUX_AFS_SUPER_H
19
20#include <linux/fs.h>
21#include "server.h"
22
23#ifdef __KERNEL__
24
25/*****************************************************************************/
26/*
27 * AFS superblock private data
28 * - there's one superblock per volume
29 */
30struct afs_super_info
31{
32 struct afs_volume *volume; /* volume record */
33 char rwparent; /* T if parent is R/W AFS volume */
34};
35
36static inline struct afs_super_info *AFS_FS_S(struct super_block *sb)
37{
38 return sb->s_fs_info;
39}
40
41extern struct file_system_type afs_fs_type;
42
43#endif /* __KERNEL__ */
44
45#endif /* _LINUX_AFS_SUPER_H */
diff --git a/fs/afs/transport.h b/fs/afs/transport.h
deleted file mode 100644
index 7013ae6ccc8c..000000000000
--- a/fs/afs/transport.h
+++ /dev/null
@@ -1,21 +0,0 @@
1/* transport.h: AFS transport management
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef _LINUX_AFS_TRANSPORT_H
13#define _LINUX_AFS_TRANSPORT_H
14
15#include "types.h"
16#include <rxrpc/transport.h>
17
18/* the cache manager transport endpoint */
19extern struct rxrpc_transport *afs_transport;
20
21#endif /* _LINUX_AFS_TRANSPORT_H */
diff --git a/fs/afs/types.h b/fs/afs/types.h
deleted file mode 100644
index b1a2367c7587..000000000000
--- a/fs/afs/types.h
+++ /dev/null
@@ -1,125 +0,0 @@
1/* types.h: AFS types
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef _LINUX_AFS_TYPES_H
13#define _LINUX_AFS_TYPES_H
14
15#ifdef __KERNEL__
16#include <rxrpc/types.h>
17#endif /* __KERNEL__ */
18
19typedef unsigned afs_volid_t;
20typedef unsigned afs_vnodeid_t;
21typedef unsigned long long afs_dataversion_t;
22
23typedef enum {
24 AFSVL_RWVOL, /* read/write volume */
25 AFSVL_ROVOL, /* read-only volume */
26 AFSVL_BACKVOL, /* backup volume */
27} __attribute__((packed)) afs_voltype_t;
28
29typedef enum {
30 AFS_FTYPE_INVALID = 0,
31 AFS_FTYPE_FILE = 1,
32 AFS_FTYPE_DIR = 2,
33 AFS_FTYPE_SYMLINK = 3,
34} afs_file_type_t;
35
36#ifdef __KERNEL__
37
38struct afs_cell;
39struct afs_vnode;
40
41/*****************************************************************************/
42/*
43 * AFS file identifier
44 */
45struct afs_fid
46{
47 afs_volid_t vid; /* volume ID */
48 afs_vnodeid_t vnode; /* file index within volume */
49 unsigned unique; /* unique ID number (file index version) */
50};
51
52/*****************************************************************************/
53/*
54 * AFS callback notification
55 */
56typedef enum {
57 AFSCM_CB_UNTYPED = 0, /* no type set on CB break */
58 AFSCM_CB_EXCLUSIVE = 1, /* CB exclusive to CM [not implemented] */
59 AFSCM_CB_SHARED = 2, /* CB shared by other CM's */
60 AFSCM_CB_DROPPED = 3, /* CB promise cancelled by file server */
61} afs_callback_type_t;
62
63struct afs_callback
64{
65 struct afs_server *server; /* server that made the promise */
66 struct afs_fid fid; /* file identifier */
67 unsigned version; /* callback version */
68 unsigned expiry; /* time at which expires */
69 afs_callback_type_t type; /* type of callback */
70};
71
72#define AFSCBMAX 50
73
74/*****************************************************************************/
75/*
76 * AFS volume information
77 */
78struct afs_volume_info
79{
80 afs_volid_t vid; /* volume ID */
81 afs_voltype_t type; /* type of this volume */
82 afs_volid_t type_vids[5]; /* volume ID's for possible types for this vol */
83
84 /* list of fileservers serving this volume */
85 size_t nservers; /* number of entries used in servers[] */
86 struct {
87 struct in_addr addr; /* fileserver address */
88 } servers[8];
89};
90
91/*****************************************************************************/
92/*
93 * AFS file status information
94 */
95struct afs_file_status
96{
97 unsigned if_version; /* interface version */
98#define AFS_FSTATUS_VERSION 1
99
100 afs_file_type_t type; /* file type */
101 unsigned nlink; /* link count */
102 size_t size; /* file size */
103 afs_dataversion_t version; /* current data version */
104 unsigned author; /* author ID */
105 unsigned owner; /* owner ID */
106 unsigned caller_access; /* access rights for authenticated caller */
107 unsigned anon_access; /* access rights for unauthenticated caller */
108 umode_t mode; /* UNIX mode */
109 struct afs_fid parent; /* parent file ID */
110 time_t mtime_client; /* last time client changed data */
111 time_t mtime_server; /* last time server changed data */
112};
113
114/*****************************************************************************/
115/*
116 * AFS volume synchronisation information
117 */
118struct afs_volsync
119{
120 time_t creation; /* volume creation time */
121};
122
123#endif /* __KERNEL__ */
124
125#endif /* _LINUX_AFS_TYPES_H */
diff --git a/fs/afs/use-rtnetlink.c b/fs/afs/use-rtnetlink.c
new file mode 100644
index 000000000000..82f0daa28970
--- /dev/null
+++ b/fs/afs/use-rtnetlink.c
@@ -0,0 +1,473 @@
1/* RTNETLINK client
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/netlink.h>
12#include <linux/rtnetlink.h>
13#include <linux/if_addr.h>
14#include <linux/if_arp.h>
15#include <linux/inetdevice.h>
16#include <net/netlink.h>
17#include "internal.h"
18
19struct afs_rtm_desc {
20 struct socket *nlsock;
21 struct afs_interface *bufs;
22 u8 *mac;
23 size_t nbufs;
24 size_t maxbufs;
25 void *data;
26 ssize_t datalen;
27 size_t datamax;
28 int msg_seq;
29 unsigned mac_index;
30 bool wantloopback;
31 int (*parse)(struct afs_rtm_desc *, struct nlmsghdr *);
32};
33
34/*
35 * parse an RTM_GETADDR response
36 */
37static int afs_rtm_getaddr_parse(struct afs_rtm_desc *desc,
38 struct nlmsghdr *nlhdr)
39{
40 struct afs_interface *this;
41 struct ifaddrmsg *ifa;
42 struct rtattr *rtattr;
43 const char *name;
44 size_t len;
45
46 ifa = (struct ifaddrmsg *) NLMSG_DATA(nlhdr);
47
48 _enter("{ix=%d,af=%d}", ifa->ifa_index, ifa->ifa_family);
49
50 if (ifa->ifa_family != AF_INET) {
51 _leave(" = 0 [family %d]", ifa->ifa_family);
52 return 0;
53 }
54 if (desc->nbufs >= desc->maxbufs) {
55 _leave(" = 0 [max %zu/%zu]", desc->nbufs, desc->maxbufs);
56 return 0;
57 }
58
59 this = &desc->bufs[desc->nbufs];
60
61 this->index = ifa->ifa_index;
62 this->netmask.s_addr = inet_make_mask(ifa->ifa_prefixlen);
63 this->mtu = 0;
64
65 rtattr = NLMSG_DATA(nlhdr) + NLMSG_ALIGN(sizeof(struct ifaddrmsg));
66 len = NLMSG_PAYLOAD(nlhdr, sizeof(struct ifaddrmsg));
67
68 name = "unknown";
69 for (; RTA_OK(rtattr, len); rtattr = RTA_NEXT(rtattr, len)) {
70 switch (rtattr->rta_type) {
71 case IFA_ADDRESS:
72 memcpy(&this->address, RTA_DATA(rtattr), 4);
73 break;
74 case IFA_LABEL:
75 name = RTA_DATA(rtattr);
76 break;
77 }
78 }
79
80 _debug("%s: "NIPQUAD_FMT"/"NIPQUAD_FMT,
81 name, NIPQUAD(this->address), NIPQUAD(this->netmask));
82
83 desc->nbufs++;
84 _leave(" = 0");
85 return 0;
86}
87
88/*
89 * parse an RTM_GETLINK response for MTUs
90 */
91static int afs_rtm_getlink_if_parse(struct afs_rtm_desc *desc,
92 struct nlmsghdr *nlhdr)
93{
94 struct afs_interface *this;
95 struct ifinfomsg *ifi;
96 struct rtattr *rtattr;
97 const char *name;
98 size_t len, loop;
99
100 ifi = (struct ifinfomsg *) NLMSG_DATA(nlhdr);
101
102 _enter("{ix=%d}", ifi->ifi_index);
103
104 for (loop = 0; loop < desc->nbufs; loop++) {
105 this = &desc->bufs[loop];
106 if (this->index == ifi->ifi_index)
107 goto found;
108 }
109
110 _leave(" = 0 [no match]");
111 return 0;
112
113found:
114 if (ifi->ifi_type == ARPHRD_LOOPBACK && !desc->wantloopback) {
115 _leave(" = 0 [loopback]");
116 return 0;
117 }
118
119 rtattr = NLMSG_DATA(nlhdr) + NLMSG_ALIGN(sizeof(struct ifinfomsg));
120 len = NLMSG_PAYLOAD(nlhdr, sizeof(struct ifinfomsg));
121
122 name = "unknown";
123 for (; RTA_OK(rtattr, len); rtattr = RTA_NEXT(rtattr, len)) {
124 switch (rtattr->rta_type) {
125 case IFLA_MTU:
126 memcpy(&this->mtu, RTA_DATA(rtattr), 4);
127 break;
128 case IFLA_IFNAME:
129 name = RTA_DATA(rtattr);
130 break;
131 }
132 }
133
134 _debug("%s: "NIPQUAD_FMT"/"NIPQUAD_FMT" mtu %u",
135 name, NIPQUAD(this->address), NIPQUAD(this->netmask),
136 this->mtu);
137
138 _leave(" = 0");
139 return 0;
140}
141
142/*
143 * parse an RTM_GETLINK response for the MAC address belonging to the lowest
144 * non-internal interface
145 */
146static int afs_rtm_getlink_mac_parse(struct afs_rtm_desc *desc,
147 struct nlmsghdr *nlhdr)
148{
149 struct ifinfomsg *ifi;
150 struct rtattr *rtattr;
151 const char *name;
152 size_t remain, len;
153 bool set;
154
155 ifi = (struct ifinfomsg *) NLMSG_DATA(nlhdr);
156
157 _enter("{ix=%d}", ifi->ifi_index);
158
159 if (ifi->ifi_index >= desc->mac_index) {
160 _leave(" = 0 [high]");
161 return 0;
162 }
163 if (ifi->ifi_type == ARPHRD_LOOPBACK) {
164 _leave(" = 0 [loopback]");
165 return 0;
166 }
167
168 rtattr = NLMSG_DATA(nlhdr) + NLMSG_ALIGN(sizeof(struct ifinfomsg));
169 remain = NLMSG_PAYLOAD(nlhdr, sizeof(struct ifinfomsg));
170
171 name = "unknown";
172 set = false;
173 for (; RTA_OK(rtattr, remain); rtattr = RTA_NEXT(rtattr, remain)) {
174 switch (rtattr->rta_type) {
175 case IFLA_ADDRESS:
176 len = RTA_PAYLOAD(rtattr);
177 memcpy(desc->mac, RTA_DATA(rtattr),
178 min_t(size_t, len, 6));
179 desc->mac_index = ifi->ifi_index;
180 set = true;
181 break;
182 case IFLA_IFNAME:
183 name = RTA_DATA(rtattr);
184 break;
185 }
186 }
187
188 if (set)
189 _debug("%s: %02x:%02x:%02x:%02x:%02x:%02x",
190 name,
191 desc->mac[0], desc->mac[1], desc->mac[2],
192 desc->mac[3], desc->mac[4], desc->mac[5]);
193
194 _leave(" = 0");
195 return 0;
196}
197
198/*
199 * read the rtnetlink response and pass to parsing routine
200 */
201static int afs_read_rtm(struct afs_rtm_desc *desc)
202{
203 struct nlmsghdr *nlhdr, tmphdr;
204 struct msghdr msg;
205 struct kvec iov[1];
206 void *data;
207 bool last = false;
208 int len, ret, remain;
209
210 _enter("");
211
212 do {
213 /* first of all peek to see how big the packet is */
214 memset(&msg, 0, sizeof(msg));
215 iov[0].iov_base = &tmphdr;
216 iov[0].iov_len = sizeof(tmphdr);
217 len = kernel_recvmsg(desc->nlsock, &msg, iov, 1,
218 sizeof(tmphdr), MSG_PEEK | MSG_TRUNC);
219 if (len < 0) {
220 _leave(" = %d [peek]", len);
221 return len;
222 }
223 if (len == 0)
224 continue;
225 if (len < sizeof(tmphdr) || len < NLMSG_PAYLOAD(&tmphdr, 0)) {
226 _leave(" = -EMSGSIZE");
227 return -EMSGSIZE;
228 }
229
230 if (desc->datamax < len) {
231 kfree(desc->data);
232 desc->data = NULL;
233 data = kmalloc(len, GFP_KERNEL);
234 if (!data)
235 return -ENOMEM;
236 desc->data = data;
237 }
238 desc->datamax = len;
239
240 /* read all the data from this packet */
241 iov[0].iov_base = desc->data;
242 iov[0].iov_len = desc->datamax;
243 desc->datalen = kernel_recvmsg(desc->nlsock, &msg, iov, 1,
244 desc->datamax, 0);
245 if (desc->datalen < 0) {
246 _leave(" = %ld [recv]", desc->datalen);
247 return desc->datalen;
248 }
249
250 nlhdr = desc->data;
251
252 /* check if the header is valid */
253 if (!NLMSG_OK(nlhdr, desc->datalen) ||
254 nlhdr->nlmsg_type == NLMSG_ERROR) {
255 _leave(" = -EIO");
256 return -EIO;
257 }
258
259 /* see if this is the last message */
260 if (nlhdr->nlmsg_type == NLMSG_DONE ||
261 !(nlhdr->nlmsg_flags & NLM_F_MULTI))
262 last = true;
263
264 /* parse the bits we got this time */
265 nlmsg_for_each_msg(nlhdr, desc->data, desc->datalen, remain) {
266 ret = desc->parse(desc, nlhdr);
267 if (ret < 0) {
268 _leave(" = %d [parse]", ret);
269 return ret;
270 }
271 }
272
273 } while (!last);
274
275 _leave(" = 0");
276 return 0;
277}
278
279/*
280 * list the interface bound addresses to get the address and netmask
281 */
282static int afs_rtm_getaddr(struct afs_rtm_desc *desc)
283{
284 struct msghdr msg;
285 struct kvec iov[1];
286 int ret;
287
288 struct {
289 struct nlmsghdr nl_msg __attribute__((aligned(NLMSG_ALIGNTO)));
290 struct ifaddrmsg addr_msg __attribute__((aligned(NLMSG_ALIGNTO)));
291 } request;
292
293 _enter("");
294
295 memset(&request, 0, sizeof(request));
296
297 request.nl_msg.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifaddrmsg));
298 request.nl_msg.nlmsg_type = RTM_GETADDR;
299 request.nl_msg.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
300 request.nl_msg.nlmsg_seq = desc->msg_seq++;
301 request.nl_msg.nlmsg_pid = 0;
302
303 memset(&msg, 0, sizeof(msg));
304 iov[0].iov_base = &request;
305 iov[0].iov_len = sizeof(request);
306
307 ret = kernel_sendmsg(desc->nlsock, &msg, iov, 1, iov[0].iov_len);
308 _leave(" = %d", ret);
309 return ret;
310}
311
312/*
313 * list the interface link statuses to get the MTUs
314 */
315static int afs_rtm_getlink(struct afs_rtm_desc *desc)
316{
317 struct msghdr msg;
318 struct kvec iov[1];
319 int ret;
320
321 struct {
322 struct nlmsghdr nl_msg __attribute__((aligned(NLMSG_ALIGNTO)));
323 struct ifinfomsg link_msg __attribute__((aligned(NLMSG_ALIGNTO)));
324 } request;
325
326 _enter("");
327
328 memset(&request, 0, sizeof(request));
329
330 request.nl_msg.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
331 request.nl_msg.nlmsg_type = RTM_GETLINK;
332 request.nl_msg.nlmsg_flags = NLM_F_REQUEST | NLM_F_ROOT;
333 request.nl_msg.nlmsg_seq = desc->msg_seq++;
334 request.nl_msg.nlmsg_pid = 0;
335
336 memset(&msg, 0, sizeof(msg));
337 iov[0].iov_base = &request;
338 iov[0].iov_len = sizeof(request);
339
340 ret = kernel_sendmsg(desc->nlsock, &msg, iov, 1, iov[0].iov_len);
341 _leave(" = %d", ret);
342 return ret;
343}
344
345/*
346 * cull any interface records for which there isn't an MTU value
347 */
348static void afs_cull_interfaces(struct afs_rtm_desc *desc)
349{
350 struct afs_interface *bufs = desc->bufs;
351 size_t nbufs = desc->nbufs;
352 int loop, point = 0;
353
354 _enter("{%zu}", nbufs);
355
356 for (loop = 0; loop < nbufs; loop++) {
357 if (desc->bufs[loop].mtu != 0) {
358 if (loop != point) {
359 ASSERTCMP(loop, >, point);
360 bufs[point] = bufs[loop];
361 }
362 point++;
363 }
364 }
365
366 desc->nbufs = point;
367 _leave(" [%zu/%zu]", desc->nbufs, nbufs);
368}
369
370/*
371 * get a list of this system's interface IPv4 addresses, netmasks and MTUs
372 * - returns the number of interface records in the buffer
373 */
374int afs_get_ipv4_interfaces(struct afs_interface *bufs, size_t maxbufs,
375 bool wantloopback)
376{
377 struct afs_rtm_desc desc;
378 int ret, loop;
379
380 _enter("");
381
382 memset(&desc, 0, sizeof(desc));
383 desc.bufs = bufs;
384 desc.maxbufs = maxbufs;
385 desc.wantloopback = wantloopback;
386
387 ret = sock_create_kern(AF_NETLINK, SOCK_DGRAM, NETLINK_ROUTE,
388 &desc.nlsock);
389 if (ret < 0) {
390 _leave(" = %d [sock]", ret);
391 return ret;
392 }
393
394 /* issue RTM_GETADDR */
395 desc.parse = afs_rtm_getaddr_parse;
396 ret = afs_rtm_getaddr(&desc);
397 if (ret < 0)
398 goto error;
399 ret = afs_read_rtm(&desc);
400 if (ret < 0)
401 goto error;
402
403 /* issue RTM_GETLINK */
404 desc.parse = afs_rtm_getlink_if_parse;
405 ret = afs_rtm_getlink(&desc);
406 if (ret < 0)
407 goto error;
408 ret = afs_read_rtm(&desc);
409 if (ret < 0)
410 goto error;
411
412 afs_cull_interfaces(&desc);
413 ret = desc.nbufs;
414
415 for (loop = 0; loop < ret; loop++)
416 _debug("[%d] "NIPQUAD_FMT"/"NIPQUAD_FMT" mtu %u",
417 bufs[loop].index,
418 NIPQUAD(bufs[loop].address),
419 NIPQUAD(bufs[loop].netmask),
420 bufs[loop].mtu);
421
422error:
423 kfree(desc.data);
424 sock_release(desc.nlsock);
425 _leave(" = %d", ret);
426 return ret;
427}
428
429/*
430 * get a MAC address from a random ethernet interface that has a real one
431 * - the buffer should be 6 bytes in size
432 */
433int afs_get_MAC_address(u8 mac[6])
434{
435 struct afs_rtm_desc desc;
436 int ret;
437
438 _enter("");
439
440 memset(&desc, 0, sizeof(desc));
441 desc.mac = mac;
442 desc.mac_index = UINT_MAX;
443
444 ret = sock_create_kern(AF_NETLINK, SOCK_DGRAM, NETLINK_ROUTE,
445 &desc.nlsock);
446 if (ret < 0) {
447 _leave(" = %d [sock]", ret);
448 return ret;
449 }
450
451 /* issue RTM_GETLINK */
452 desc.parse = afs_rtm_getlink_mac_parse;
453 ret = afs_rtm_getlink(&desc);
454 if (ret < 0)
455 goto error;
456 ret = afs_read_rtm(&desc);
457 if (ret < 0)
458 goto error;
459
460 if (desc.mac_index < UINT_MAX) {
461 /* got a MAC address */
462 _debug("[%d] %02x:%02x:%02x:%02x:%02x:%02x",
463 desc.mac_index,
464 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
465 } else {
466 ret = -ENONET;
467 }
468
469error:
470 sock_release(desc.nlsock);
471 _leave(" = %d", ret);
472 return ret;
473}
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
index 7b0e3192ee39..36c1306e09e0 100644
--- a/fs/afs/vlclient.c
+++ b/fs/afs/vlclient.c
@@ -1,4 +1,4 @@
1/* vlclient.c: AFS Volume Location Service client 1/* AFS Volume Location Service client
2 * 2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
@@ -11,247 +11,76 @@
11 11
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <rxrpc/rxrpc.h>
15#include <rxrpc/transport.h>
16#include <rxrpc/connection.h>
17#include <rxrpc/call.h>
18#include "server.h"
19#include "volume.h"
20#include "vlclient.h"
21#include "kafsasyncd.h"
22#include "kafstimod.h"
23#include "errors.h"
24#include "internal.h" 14#include "internal.h"
25 15
26#define VLGETENTRYBYID 503 /* AFS Get Cache Entry By ID operation ID */
27#define VLGETENTRYBYNAME 504 /* AFS Get Cache Entry By Name operation ID */
28#define VLPROBE 514 /* AFS Probe Volume Location Service operation ID */
29
30static void afs_rxvl_get_entry_by_id_attn(struct rxrpc_call *call);
31static void afs_rxvl_get_entry_by_id_error(struct rxrpc_call *call);
32
33/*****************************************************************************/
34/* 16/*
35 * map afs VL abort codes to/from Linux error codes 17 * map volume locator abort codes to error codes
36 * - called with call->lock held
37 */ 18 */
38static void afs_rxvl_aemap(struct rxrpc_call *call) 19static int afs_vl_abort_to_error(u32 abort_code)
39{ 20{
40 int err; 21 _enter("%u", abort_code);
41 22
42 _enter("{%u,%u,%d}", 23 switch (abort_code) {
43 call->app_err_state, call->app_abort_code, call->app_errno); 24 case AFSVL_IDEXIST: return -EEXIST;
44 25 case AFSVL_IO: return -EREMOTEIO;
45 switch (call->app_err_state) { 26 case AFSVL_NAMEEXIST: return -EEXIST;
46 case RXRPC_ESTATE_LOCAL_ABORT: 27 case AFSVL_CREATEFAIL: return -EREMOTEIO;
47 call->app_abort_code = -call->app_errno; 28 case AFSVL_NOENT: return -ENOMEDIUM;
48 return; 29 case AFSVL_EMPTY: return -ENOMEDIUM;
49 30 case AFSVL_ENTDELETED: return -ENOMEDIUM;
50 case RXRPC_ESTATE_PEER_ABORT: 31 case AFSVL_BADNAME: return -EINVAL;
51 switch (call->app_abort_code) { 32 case AFSVL_BADINDEX: return -EINVAL;
52 case AFSVL_IDEXIST: err = -EEXIST; break; 33 case AFSVL_BADVOLTYPE: return -EINVAL;
53 case AFSVL_IO: err = -EREMOTEIO; break; 34 case AFSVL_BADSERVER: return -EINVAL;
54 case AFSVL_NAMEEXIST: err = -EEXIST; break; 35 case AFSVL_BADPARTITION: return -EINVAL;
55 case AFSVL_CREATEFAIL: err = -EREMOTEIO; break; 36 case AFSVL_REPSFULL: return -EFBIG;
56 case AFSVL_NOENT: err = -ENOMEDIUM; break; 37 case AFSVL_NOREPSERVER: return -ENOENT;
57 case AFSVL_EMPTY: err = -ENOMEDIUM; break; 38 case AFSVL_DUPREPSERVER: return -EEXIST;
58 case AFSVL_ENTDELETED: err = -ENOMEDIUM; break; 39 case AFSVL_RWNOTFOUND: return -ENOENT;
59 case AFSVL_BADNAME: err = -EINVAL; break; 40 case AFSVL_BADREFCOUNT: return -EINVAL;
60 case AFSVL_BADINDEX: err = -EINVAL; break; 41 case AFSVL_SIZEEXCEEDED: return -EINVAL;
61 case AFSVL_BADVOLTYPE: err = -EINVAL; break; 42 case AFSVL_BADENTRY: return -EINVAL;
62 case AFSVL_BADSERVER: err = -EINVAL; break; 43 case AFSVL_BADVOLIDBUMP: return -EINVAL;
63 case AFSVL_BADPARTITION: err = -EINVAL; break; 44 case AFSVL_IDALREADYHASHED: return -EINVAL;
64 case AFSVL_REPSFULL: err = -EFBIG; break; 45 case AFSVL_ENTRYLOCKED: return -EBUSY;
65 case AFSVL_NOREPSERVER: err = -ENOENT; break; 46 case AFSVL_BADVOLOPER: return -EBADRQC;
66 case AFSVL_DUPREPSERVER: err = -EEXIST; break; 47 case AFSVL_BADRELLOCKTYPE: return -EINVAL;
67 case AFSVL_RWNOTFOUND: err = -ENOENT; break; 48 case AFSVL_RERELEASE: return -EREMOTEIO;
68 case AFSVL_BADREFCOUNT: err = -EINVAL; break; 49 case AFSVL_BADSERVERFLAG: return -EINVAL;
69 case AFSVL_SIZEEXCEEDED: err = -EINVAL; break; 50 case AFSVL_PERM: return -EACCES;
70 case AFSVL_BADENTRY: err = -EINVAL; break; 51 case AFSVL_NOMEM: return -EREMOTEIO;
71 case AFSVL_BADVOLIDBUMP: err = -EINVAL; break;
72 case AFSVL_IDALREADYHASHED: err = -EINVAL; break;
73 case AFSVL_ENTRYLOCKED: err = -EBUSY; break;
74 case AFSVL_BADVOLOPER: err = -EBADRQC; break;
75 case AFSVL_BADRELLOCKTYPE: err = -EINVAL; break;
76 case AFSVL_RERELEASE: err = -EREMOTEIO; break;
77 case AFSVL_BADSERVERFLAG: err = -EINVAL; break;
78 case AFSVL_PERM: err = -EACCES; break;
79 case AFSVL_NOMEM: err = -EREMOTEIO; break;
80 default:
81 err = afs_abort_to_error(call->app_abort_code);
82 break;
83 }
84 call->app_errno = err;
85 return;
86
87 default: 52 default:
88 return; 53 return afs_abort_to_error(abort_code);
89 } 54 }
90} /* end afs_rxvl_aemap() */ 55}
91 56
92#if 0
93/*****************************************************************************/
94/* 57/*
95 * probe a volume location server to see if it is still alive -- unused 58 * deliver reply data to a VL.GetEntryByXXX call
96 */ 59 */
97static int afs_rxvl_probe(struct afs_server *server, int alloc_flags) 60static int afs_deliver_vl_get_entry_by_xxx(struct afs_call *call,
61 struct sk_buff *skb, bool last)
98{ 62{
99 struct rxrpc_connection *conn; 63 struct afs_cache_vlocation *entry;
100 struct rxrpc_call *call; 64 __be32 *bp;
101 struct kvec piov[1]; 65 u32 tmp;
102 size_t sent; 66 int loop;
103 int ret;
104 __be32 param[1];
105
106 DECLARE_WAITQUEUE(myself, current);
107
108 /* get hold of the vlserver connection */
109 ret = afs_server_get_vlconn(server, &conn);
110 if (ret < 0)
111 goto out;
112
113 /* create a call through that connection */
114 ret = rxrpc_create_call(conn, NULL, NULL, afs_rxvl_aemap, &call);
115 if (ret < 0) {
116 printk("kAFS: Unable to create call: %d\n", ret);
117 goto out_put_conn;
118 }
119 call->app_opcode = VLPROBE;
120
121 /* we want to get event notifications from the call */
122 add_wait_queue(&call->waitq, &myself);
123
124 /* marshall the parameters */
125 param[0] = htonl(VLPROBE);
126 piov[0].iov_len = sizeof(param);
127 piov[0].iov_base = param;
128
129 /* send the parameters to the server */
130 ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET,
131 alloc_flags, 0, &sent);
132 if (ret < 0)
133 goto abort;
134
135 /* wait for the reply to completely arrive */
136 for (;;) {
137 set_current_state(TASK_INTERRUPTIBLE);
138 if (call->app_call_state != RXRPC_CSTATE_CLNT_RCV_REPLY ||
139 signal_pending(current))
140 break;
141 schedule();
142 }
143 set_current_state(TASK_RUNNING);
144
145 ret = -EINTR;
146 if (signal_pending(current))
147 goto abort;
148
149 switch (call->app_call_state) {
150 case RXRPC_CSTATE_ERROR:
151 ret = call->app_errno;
152 goto out_unwait;
153
154 case RXRPC_CSTATE_CLNT_GOT_REPLY:
155 ret = 0;
156 goto out_unwait;
157
158 default:
159 BUG();
160 }
161
162 abort:
163 set_current_state(TASK_UNINTERRUPTIBLE);
164 rxrpc_call_abort(call, ret);
165 schedule();
166
167 out_unwait:
168 set_current_state(TASK_RUNNING);
169 remove_wait_queue(&call->waitq, &myself);
170 rxrpc_put_call(call);
171 out_put_conn:
172 rxrpc_put_connection(conn);
173 out:
174 return ret;
175 67
176} /* end afs_rxvl_probe() */ 68 _enter(",,%u", last);
177#endif
178 69
179/*****************************************************************************/ 70 afs_transfer_reply(call, skb);
180/* 71 if (!last)
181 * look up a volume location database entry by name 72 return 0;
182 */
183int afs_rxvl_get_entry_by_name(struct afs_server *server,
184 const char *volname,
185 unsigned volnamesz,
186 struct afs_cache_vlocation *entry)
187{
188 DECLARE_WAITQUEUE(myself, current);
189
190 struct rxrpc_connection *conn;
191 struct rxrpc_call *call;
192 struct kvec piov[3];
193 unsigned tmp;
194 size_t sent;
195 int ret, loop;
196 __be32 *bp, param[2], zero;
197
198 _enter(",%*.*s,%u,", volnamesz, volnamesz, volname, volnamesz);
199
200 memset(entry, 0, sizeof(*entry));
201
202 /* get hold of the vlserver connection */
203 ret = afs_server_get_vlconn(server, &conn);
204 if (ret < 0)
205 goto out;
206
207 /* create a call through that connection */
208 ret = rxrpc_create_call(conn, NULL, NULL, afs_rxvl_aemap, &call);
209 if (ret < 0) {
210 printk("kAFS: Unable to create call: %d\n", ret);
211 goto out_put_conn;
212 }
213 call->app_opcode = VLGETENTRYBYNAME;
214 73
215 /* we want to get event notifications from the call */ 74 if (call->reply_size != call->reply_max)
216 add_wait_queue(&call->waitq, &myself); 75 return -EBADMSG;
217 76
218 /* marshall the parameters */ 77 /* unmarshall the reply once we've received all of it */
219 piov[1].iov_len = volnamesz; 78 entry = call->reply;
220 piov[1].iov_base = (char *) volname; 79 bp = call->buffer;
221
222 zero = 0;
223 piov[2].iov_len = (4 - (piov[1].iov_len & 3)) & 3;
224 piov[2].iov_base = &zero;
225
226 param[0] = htonl(VLGETENTRYBYNAME);
227 param[1] = htonl(piov[1].iov_len);
228
229 piov[0].iov_len = sizeof(param);
230 piov[0].iov_base = param;
231
232 /* send the parameters to the server */
233 ret = rxrpc_call_write_data(call, 3, piov, RXRPC_LAST_PACKET, GFP_NOFS,
234 0, &sent);
235 if (ret < 0)
236 goto abort;
237
238 /* wait for the reply to completely arrive */
239 bp = rxrpc_call_alloc_scratch(call, 384);
240
241 ret = rxrpc_call_read_data(call, bp, 384,
242 RXRPC_CALL_READ_BLOCK |
243 RXRPC_CALL_READ_ALL);
244 if (ret < 0) {
245 if (ret == -ECONNABORTED) {
246 ret = call->app_errno;
247 goto out_unwait;
248 }
249 goto abort;
250 }
251 80
252 /* unmarshall the reply */
253 for (loop = 0; loop < 64; loop++) 81 for (loop = 0; loop < 64; loop++)
254 entry->name[loop] = ntohl(*bp++); 82 entry->name[loop] = ntohl(*bp++);
83 entry->name[loop] = 0;
255 bp++; /* final NUL */ 84 bp++; /* final NUL */
256 85
257 bp++; /* type */ 86 bp++; /* type */
@@ -264,6 +93,7 @@ int afs_rxvl_get_entry_by_name(struct afs_server *server,
264 93
265 for (loop = 0; loop < 8; loop++) { 94 for (loop = 0; loop < 8; loop++) {
266 tmp = ntohl(*bp++); 95 tmp = ntohl(*bp++);
96 entry->srvtmask[loop] = 0;
267 if (tmp & AFS_VLSF_RWVOL) 97 if (tmp & AFS_VLSF_RWVOL)
268 entry->srvtmask[loop] |= AFS_VOL_VTM_RW; 98 entry->srvtmask[loop] |= AFS_VOL_VTM_RW;
269 if (tmp & AFS_VLSF_ROVOL) 99 if (tmp & AFS_VLSF_ROVOL)
@@ -279,417 +109,110 @@ int afs_rxvl_get_entry_by_name(struct afs_server *server,
279 bp++; /* clone ID */ 109 bp++; /* clone ID */
280 110
281 tmp = ntohl(*bp++); /* flags */ 111 tmp = ntohl(*bp++); /* flags */
112 entry->vidmask = 0;
282 if (tmp & AFS_VLF_RWEXISTS) 113 if (tmp & AFS_VLF_RWEXISTS)
283 entry->vidmask |= AFS_VOL_VTM_RW; 114 entry->vidmask |= AFS_VOL_VTM_RW;
284 if (tmp & AFS_VLF_ROEXISTS) 115 if (tmp & AFS_VLF_ROEXISTS)
285 entry->vidmask |= AFS_VOL_VTM_RO; 116 entry->vidmask |= AFS_VOL_VTM_RO;
286 if (tmp & AFS_VLF_BACKEXISTS) 117 if (tmp & AFS_VLF_BACKEXISTS)
287 entry->vidmask |= AFS_VOL_VTM_BAK; 118 entry->vidmask |= AFS_VOL_VTM_BAK;
288
289 ret = -ENOMEDIUM;
290 if (!entry->vidmask) 119 if (!entry->vidmask)
291 goto abort; 120 return -EBADMSG;
292
293 /* success */
294 entry->rtime = get_seconds();
295 ret = 0;
296
297 out_unwait:
298 set_current_state(TASK_RUNNING);
299 remove_wait_queue(&call->waitq, &myself);
300 rxrpc_put_call(call);
301 out_put_conn:
302 rxrpc_put_connection(conn);
303 out:
304 _leave(" = %d", ret);
305 return ret;
306
307 abort:
308 set_current_state(TASK_UNINTERRUPTIBLE);
309 rxrpc_call_abort(call, ret);
310 schedule();
311 goto out_unwait;
312} /* end afs_rxvl_get_entry_by_name() */
313
314/*****************************************************************************/
315/*
316 * look up a volume location database entry by ID
317 */
318int afs_rxvl_get_entry_by_id(struct afs_server *server,
319 afs_volid_t volid,
320 afs_voltype_t voltype,
321 struct afs_cache_vlocation *entry)
322{
323 DECLARE_WAITQUEUE(myself, current);
324
325 struct rxrpc_connection *conn;
326 struct rxrpc_call *call;
327 struct kvec piov[1];
328 unsigned tmp;
329 size_t sent;
330 int ret, loop;
331 __be32 *bp, param[3];
332
333 _enter(",%x,%d,", volid, voltype);
334
335 memset(entry, 0, sizeof(*entry));
336
337 /* get hold of the vlserver connection */
338 ret = afs_server_get_vlconn(server, &conn);
339 if (ret < 0)
340 goto out;
341
342 /* create a call through that connection */
343 ret = rxrpc_create_call(conn, NULL, NULL, afs_rxvl_aemap, &call);
344 if (ret < 0) {
345 printk("kAFS: Unable to create call: %d\n", ret);
346 goto out_put_conn;
347 }
348 call->app_opcode = VLGETENTRYBYID;
349
350 /* we want to get event notifications from the call */
351 add_wait_queue(&call->waitq, &myself);
352
353 /* marshall the parameters */
354 param[0] = htonl(VLGETENTRYBYID);
355 param[1] = htonl(volid);
356 param[2] = htonl(voltype);
357
358 piov[0].iov_len = sizeof(param);
359 piov[0].iov_base = param;
360
361 /* send the parameters to the server */
362 ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS,
363 0, &sent);
364 if (ret < 0)
365 goto abort;
366
367 /* wait for the reply to completely arrive */
368 bp = rxrpc_call_alloc_scratch(call, 384);
369
370 ret = rxrpc_call_read_data(call, bp, 384,
371 RXRPC_CALL_READ_BLOCK |
372 RXRPC_CALL_READ_ALL);
373 if (ret < 0) {
374 if (ret == -ECONNABORTED) {
375 ret = call->app_errno;
376 goto out_unwait;
377 }
378 goto abort;
379 }
380
381 /* unmarshall the reply */
382 for (loop = 0; loop < 64; loop++)
383 entry->name[loop] = ntohl(*bp++);
384 bp++; /* final NUL */
385 121
386 bp++; /* type */ 122 _leave(" = 0 [done]");
387 entry->nservers = ntohl(*bp++); 123 return 0;
388 124}
389 for (loop = 0; loop < 8; loop++)
390 entry->servers[loop].s_addr = *bp++;
391
392 bp += 8; /* partition IDs */
393 125
394 for (loop = 0; loop < 8; loop++) {
395 tmp = ntohl(*bp++);
396 if (tmp & AFS_VLSF_RWVOL)
397 entry->srvtmask[loop] |= AFS_VOL_VTM_RW;
398 if (tmp & AFS_VLSF_ROVOL)
399 entry->srvtmask[loop] |= AFS_VOL_VTM_RO;
400 if (tmp & AFS_VLSF_BACKVOL)
401 entry->srvtmask[loop] |= AFS_VOL_VTM_BAK;
402 }
403
404 entry->vid[0] = ntohl(*bp++);
405 entry->vid[1] = ntohl(*bp++);
406 entry->vid[2] = ntohl(*bp++);
407
408 bp++; /* clone ID */
409
410 tmp = ntohl(*bp++); /* flags */
411 if (tmp & AFS_VLF_RWEXISTS)
412 entry->vidmask |= AFS_VOL_VTM_RW;
413 if (tmp & AFS_VLF_ROEXISTS)
414 entry->vidmask |= AFS_VOL_VTM_RO;
415 if (tmp & AFS_VLF_BACKEXISTS)
416 entry->vidmask |= AFS_VOL_VTM_BAK;
417
418 ret = -ENOMEDIUM;
419 if (!entry->vidmask)
420 goto abort;
421
422#if 0 /* TODO: remove */
423 entry->nservers = 3;
424 entry->servers[0].s_addr = htonl(0xac101249);
425 entry->servers[1].s_addr = htonl(0xac101243);
426 entry->servers[2].s_addr = htonl(0xac10125b /*0xac10125b*/);
427
428 entry->srvtmask[0] = AFS_VOL_VTM_RO;
429 entry->srvtmask[1] = AFS_VOL_VTM_RO;
430 entry->srvtmask[2] = AFS_VOL_VTM_RO | AFS_VOL_VTM_RW;
431#endif
432
433 /* success */
434 entry->rtime = get_seconds();
435 ret = 0;
436
437 out_unwait:
438 set_current_state(TASK_RUNNING);
439 remove_wait_queue(&call->waitq, &myself);
440 rxrpc_put_call(call);
441 out_put_conn:
442 rxrpc_put_connection(conn);
443 out:
444 _leave(" = %d", ret);
445 return ret;
446
447 abort:
448 set_current_state(TASK_UNINTERRUPTIBLE);
449 rxrpc_call_abort(call, ret);
450 schedule();
451 goto out_unwait;
452} /* end afs_rxvl_get_entry_by_id() */
453
454/*****************************************************************************/
455/* 126/*
456 * look up a volume location database entry by ID asynchronously 127 * VL.GetEntryByName operation type
457 */ 128 */
458int afs_rxvl_get_entry_by_id_async(struct afs_async_op *op, 129static const struct afs_call_type afs_RXVLGetEntryByName = {
459 afs_volid_t volid, 130 .name = "VL.GetEntryByName",
460 afs_voltype_t voltype) 131 .deliver = afs_deliver_vl_get_entry_by_xxx,
461{ 132 .abort_to_error = afs_vl_abort_to_error,
462 struct rxrpc_connection *conn; 133 .destructor = afs_flat_call_destructor,
463 struct rxrpc_call *call; 134};
464 struct kvec piov[1];
465 size_t sent;
466 int ret;
467 __be32 param[3];
468
469 _enter(",%x,%d,", volid, voltype);
470
471 /* get hold of the vlserver connection */
472 ret = afs_server_get_vlconn(op->server, &conn);
473 if (ret < 0) {
474 _leave(" = %d", ret);
475 return ret;
476 }
477
478 /* create a call through that connection */
479 ret = rxrpc_create_call(conn,
480 afs_rxvl_get_entry_by_id_attn,
481 afs_rxvl_get_entry_by_id_error,
482 afs_rxvl_aemap,
483 &op->call);
484 rxrpc_put_connection(conn);
485
486 if (ret < 0) {
487 printk("kAFS: Unable to create call: %d\n", ret);
488 _leave(" = %d", ret);
489 return ret;
490 }
491 135
492 op->call->app_opcode = VLGETENTRYBYID; 136/*
493 op->call->app_user = op; 137 * VL.GetEntryById operation type
494 138 */
495 call = op->call; 139static const struct afs_call_type afs_RXVLGetEntryById = {
496 rxrpc_get_call(call); 140 .name = "VL.GetEntryById",
497 141 .deliver = afs_deliver_vl_get_entry_by_xxx,
498 /* send event notifications from the call to kafsasyncd */ 142 .abort_to_error = afs_vl_abort_to_error,
499 afs_kafsasyncd_begin_op(op); 143 .destructor = afs_flat_call_destructor,
500 144};
501 /* marshall the parameters */
502 param[0] = htonl(VLGETENTRYBYID);
503 param[1] = htonl(volid);
504 param[2] = htonl(voltype);
505
506 piov[0].iov_len = sizeof(param);
507 piov[0].iov_base = param;
508
509 /* allocate result read buffer in scratch space */
510 call->app_scr_ptr = rxrpc_call_alloc_scratch(op->call, 384);
511
512 /* send the parameters to the server */
513 ret = rxrpc_call_write_data(call, 1, piov, RXRPC_LAST_PACKET, GFP_NOFS,
514 0, &sent);
515 if (ret < 0) {
516 rxrpc_call_abort(call, ret); /* handle from kafsasyncd */
517 ret = 0;
518 goto out;
519 }
520
521 /* wait for the reply to completely arrive */
522 ret = rxrpc_call_read_data(call, call->app_scr_ptr, 384, 0);
523 switch (ret) {
524 case 0:
525 case -EAGAIN:
526 case -ECONNABORTED:
527 ret = 0;
528 break; /* all handled by kafsasyncd */
529
530 default:
531 rxrpc_call_abort(call, ret); /* make kafsasyncd handle it */
532 ret = 0;
533 break;
534 }
535
536 out:
537 rxrpc_put_call(call);
538 _leave(" = %d", ret);
539 return ret;
540
541} /* end afs_rxvl_get_entry_by_id_async() */
542 145
543/*****************************************************************************/
544/* 146/*
545 * attend to the asynchronous get VLDB entry by ID 147 * dispatch a get volume entry by name operation
546 */ 148 */
547int afs_rxvl_get_entry_by_id_async2(struct afs_async_op *op, 149int afs_vl_get_entry_by_name(struct in_addr *addr,
548 struct afs_cache_vlocation *entry) 150 struct key *key,
151 const char *volname,
152 struct afs_cache_vlocation *entry,
153 const struct afs_wait_mode *wait_mode)
549{ 154{
155 struct afs_call *call;
156 size_t volnamesz, reqsz, padsz;
550 __be32 *bp; 157 __be32 *bp;
551 __u32 tmp;
552 int loop, ret;
553
554 _enter("{op=%p cst=%u}", op, op->call->app_call_state);
555
556 memset(entry, 0, sizeof(*entry));
557
558 if (op->call->app_call_state == RXRPC_CSTATE_COMPLETE) {
559 /* operation finished */
560 afs_kafsasyncd_terminate_op(op);
561
562 bp = op->call->app_scr_ptr;
563
564 /* unmarshall the reply */
565 for (loop = 0; loop < 64; loop++)
566 entry->name[loop] = ntohl(*bp++);
567 bp++; /* final NUL */
568
569 bp++; /* type */
570 entry->nservers = ntohl(*bp++);
571
572 for (loop = 0; loop < 8; loop++)
573 entry->servers[loop].s_addr = *bp++;
574
575 bp += 8; /* partition IDs */
576
577 for (loop = 0; loop < 8; loop++) {
578 tmp = ntohl(*bp++);
579 if (tmp & AFS_VLSF_RWVOL)
580 entry->srvtmask[loop] |= AFS_VOL_VTM_RW;
581 if (tmp & AFS_VLSF_ROVOL)
582 entry->srvtmask[loop] |= AFS_VOL_VTM_RO;
583 if (tmp & AFS_VLSF_BACKVOL)
584 entry->srvtmask[loop] |= AFS_VOL_VTM_BAK;
585 }
586
587 entry->vid[0] = ntohl(*bp++);
588 entry->vid[1] = ntohl(*bp++);
589 entry->vid[2] = ntohl(*bp++);
590
591 bp++; /* clone ID */
592
593 tmp = ntohl(*bp++); /* flags */
594 if (tmp & AFS_VLF_RWEXISTS)
595 entry->vidmask |= AFS_VOL_VTM_RW;
596 if (tmp & AFS_VLF_ROEXISTS)
597 entry->vidmask |= AFS_VOL_VTM_RO;
598 if (tmp & AFS_VLF_BACKEXISTS)
599 entry->vidmask |= AFS_VOL_VTM_BAK;
600
601 ret = -ENOMEDIUM;
602 if (!entry->vidmask) {
603 rxrpc_call_abort(op->call, ret);
604 goto done;
605 }
606
607#if 0 /* TODO: remove */
608 entry->nservers = 3;
609 entry->servers[0].s_addr = htonl(0xac101249);
610 entry->servers[1].s_addr = htonl(0xac101243);
611 entry->servers[2].s_addr = htonl(0xac10125b /*0xac10125b*/);
612
613 entry->srvtmask[0] = AFS_VOL_VTM_RO;
614 entry->srvtmask[1] = AFS_VOL_VTM_RO;
615 entry->srvtmask[2] = AFS_VOL_VTM_RO | AFS_VOL_VTM_RW;
616#endif
617
618 /* success */
619 entry->rtime = get_seconds();
620 ret = 0;
621 goto done;
622 }
623 158
624 if (op->call->app_call_state == RXRPC_CSTATE_ERROR) { 159 _enter("");
625 /* operation error */
626 ret = op->call->app_errno;
627 goto done;
628 }
629 160
630 _leave(" = -EAGAIN"); 161 volnamesz = strlen(volname);
631 return -EAGAIN; 162 padsz = (4 - (volnamesz & 3)) & 3;
163 reqsz = 8 + volnamesz + padsz;
632 164
633 done: 165 call = afs_alloc_flat_call(&afs_RXVLGetEntryByName, reqsz, 384);
634 rxrpc_put_call(op->call); 166 if (!call)
635 op->call = NULL; 167 return -ENOMEM;
636 _leave(" = %d", ret);
637 return ret;
638} /* end afs_rxvl_get_entry_by_id_async2() */
639 168
640/*****************************************************************************/ 169 call->key = key;
641/* 170 call->reply = entry;
642 * handle attention events on an async get-entry-by-ID op 171 call->service_id = VL_SERVICE;
643 * - called from krxiod 172 call->port = htons(AFS_VL_PORT);
644 */
645static void afs_rxvl_get_entry_by_id_attn(struct rxrpc_call *call)
646{
647 struct afs_async_op *op = call->app_user;
648
649 _enter("{op=%p cst=%u}", op, call->app_call_state);
650
651 switch (call->app_call_state) {
652 case RXRPC_CSTATE_COMPLETE:
653 afs_kafsasyncd_attend_op(op);
654 break;
655 case RXRPC_CSTATE_CLNT_RCV_REPLY:
656 if (call->app_async_read)
657 break;
658 case RXRPC_CSTATE_CLNT_GOT_REPLY:
659 if (call->app_read_count == 0)
660 break;
661 printk("kAFS: Reply bigger than expected"
662 " {cst=%u asyn=%d mark=%Zu rdy=%Zu pr=%u%s}",
663 call->app_call_state,
664 call->app_async_read,
665 call->app_mark,
666 call->app_ready_qty,
667 call->pkt_rcv_count,
668 call->app_last_rcv ? " last" : "");
669
670 rxrpc_call_abort(call, -EBADMSG);
671 break;
672 default:
673 BUG();
674 }
675 173
676 _leave(""); 174 /* marshall the parameters */
175 bp = call->request;
176 *bp++ = htonl(VLGETENTRYBYNAME);
177 *bp++ = htonl(volnamesz);
178 memcpy(bp, volname, volnamesz);
179 if (padsz > 0)
180 memset((void *) bp + volnamesz, 0, padsz);
677 181
678} /* end afs_rxvl_get_entry_by_id_attn() */ 182 /* initiate the call */
183 return afs_make_call(addr, call, GFP_KERNEL, wait_mode);
184}
679 185
680/*****************************************************************************/
681/* 186/*
682 * handle error events on an async get-entry-by-ID op 187 * dispatch a get volume entry by ID operation
683 * - called from krxiod
684 */ 188 */
685static void afs_rxvl_get_entry_by_id_error(struct rxrpc_call *call) 189int afs_vl_get_entry_by_id(struct in_addr *addr,
190 struct key *key,
191 afs_volid_t volid,
192 afs_voltype_t voltype,
193 struct afs_cache_vlocation *entry,
194 const struct afs_wait_mode *wait_mode)
686{ 195{
687 struct afs_async_op *op = call->app_user; 196 struct afs_call *call;
197 __be32 *bp;
688 198
689 _enter("{op=%p cst=%u}", op, call->app_call_state); 199 _enter("");
690 200
691 afs_kafsasyncd_attend_op(op); 201 call = afs_alloc_flat_call(&afs_RXVLGetEntryById, 12, 384);
202 if (!call)
203 return -ENOMEM;
692 204
693 _leave(""); 205 call->key = key;
206 call->reply = entry;
207 call->service_id = VL_SERVICE;
208 call->port = htons(AFS_VL_PORT);
694 209
695} /* end afs_rxvl_get_entry_by_id_error() */ 210 /* marshall the parameters */
211 bp = call->request;
212 *bp++ = htonl(VLGETENTRYBYID);
213 *bp++ = htonl(volid);
214 *bp = htonl(voltype);
215
216 /* initiate the call */
217 return afs_make_call(addr, call, GFP_KERNEL, wait_mode);
218}
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
index 782ee7c600ca..74cce174882a 100644
--- a/fs/afs/vlocation.c
+++ b/fs/afs/vlocation.c
@@ -1,6 +1,6 @@
1/* vlocation.c: volume location management 1/* AFS volume location management
2 * 2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
@@ -12,131 +12,61 @@
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/fs.h>
17#include <linux/pagemap.h>
18#include "volume.h"
19#include "cell.h"
20#include "cmservice.h"
21#include "fsclient.h"
22#include "vlclient.h"
23#include "kafstimod.h"
24#include <rxrpc/connection.h>
25#include "internal.h" 15#include "internal.h"
26 16
27#define AFS_VLDB_TIMEOUT HZ*1000 17unsigned afs_vlocation_timeout = 10; /* volume location timeout in seconds */
18unsigned afs_vlocation_update_timeout = 10 * 60;
28 19
29static void afs_vlocation_update_timer(struct afs_timer *timer); 20static void afs_vlocation_reaper(struct work_struct *);
30static void afs_vlocation_update_attend(struct afs_async_op *op); 21static void afs_vlocation_updater(struct work_struct *);
31static void afs_vlocation_update_discard(struct afs_async_op *op);
32static void __afs_put_vlocation(struct afs_vlocation *vlocation);
33 22
34static void __afs_vlocation_timeout(struct afs_timer *timer) 23static LIST_HEAD(afs_vlocation_updates);
35{ 24static LIST_HEAD(afs_vlocation_graveyard);
36 struct afs_vlocation *vlocation = 25static DEFINE_SPINLOCK(afs_vlocation_updates_lock);
37 list_entry(timer, struct afs_vlocation, timeout); 26static DEFINE_SPINLOCK(afs_vlocation_graveyard_lock);
38 27static DECLARE_DELAYED_WORK(afs_vlocation_reap, afs_vlocation_reaper);
39 _debug("VL TIMEOUT [%s{u=%d}]", 28static DECLARE_DELAYED_WORK(afs_vlocation_update, afs_vlocation_updater);
40 vlocation->vldb.name, atomic_read(&vlocation->usage)); 29static struct workqueue_struct *afs_vlocation_update_worker;
41
42 afs_vlocation_do_timeout(vlocation);
43}
44
45static const struct afs_timer_ops afs_vlocation_timer_ops = {
46 .timed_out = __afs_vlocation_timeout,
47};
48 30
49static const struct afs_timer_ops afs_vlocation_update_timer_ops = {
50 .timed_out = afs_vlocation_update_timer,
51};
52
53static const struct afs_async_op_ops afs_vlocation_update_op_ops = {
54 .attend = afs_vlocation_update_attend,
55 .discard = afs_vlocation_update_discard,
56};
57
58static LIST_HEAD(afs_vlocation_update_pendq); /* queue of VLs awaiting update */
59static struct afs_vlocation *afs_vlocation_update; /* VL currently being updated */
60static DEFINE_SPINLOCK(afs_vlocation_update_lock); /* lock guarding update queue */
61
62#ifdef AFS_CACHING_SUPPORT
63static cachefs_match_val_t afs_vlocation_cache_match(void *target,
64 const void *entry);
65static void afs_vlocation_cache_update(void *source, void *entry);
66
67struct cachefs_index_def afs_vlocation_cache_index_def = {
68 .name = "vldb",
69 .data_size = sizeof(struct afs_cache_vlocation),
70 .keys[0] = { CACHEFS_INDEX_KEYS_ASCIIZ, 64 },
71 .match = afs_vlocation_cache_match,
72 .update = afs_vlocation_cache_update,
73};
74#endif
75
76/*****************************************************************************/
77/* 31/*
78 * iterate through the VL servers in a cell until one of them admits knowing 32 * iterate through the VL servers in a cell until one of them admits knowing
79 * about the volume in question 33 * about the volume in question
80 * - caller must have cell->vl_sem write-locked
81 */ 34 */
82static int afs_vlocation_access_vl_by_name(struct afs_vlocation *vlocation, 35static int afs_vlocation_access_vl_by_name(struct afs_vlocation *vl,
83 const char *name, 36 struct key *key,
84 unsigned namesz,
85 struct afs_cache_vlocation *vldb) 37 struct afs_cache_vlocation *vldb)
86{ 38{
87 struct afs_server *server = NULL; 39 struct afs_cell *cell = vl->cell;
88 struct afs_cell *cell = vlocation->cell; 40 struct in_addr addr;
89 int count, ret; 41 int count, ret;
90 42
91 _enter("%s,%*.*s,%u", cell->name, namesz, namesz, name, namesz); 43 _enter("%s,%s", cell->name, vl->vldb.name);
92 44
45 down_write(&vl->cell->vl_sem);
93 ret = -ENOMEDIUM; 46 ret = -ENOMEDIUM;
94 for (count = cell->vl_naddrs; count > 0; count--) { 47 for (count = cell->vl_naddrs; count > 0; count--) {
95 _debug("CellServ[%hu]: %08x", 48 addr = cell->vl_addrs[cell->vl_curr_svix];
96 cell->vl_curr_svix, 49
97 cell->vl_addrs[cell->vl_curr_svix].s_addr); 50 _debug("CellServ[%hu]: %08x", cell->vl_curr_svix, addr.s_addr);
98
99 /* try and create a server */
100 ret = afs_server_lookup(cell,
101 &cell->vl_addrs[cell->vl_curr_svix],
102 &server);
103 switch (ret) {
104 case 0:
105 break;
106 case -ENOMEM:
107 case -ENONET:
108 goto out;
109 default:
110 goto rotate;
111 }
112 51
113 /* attempt to access the VL server */ 52 /* attempt to access the VL server */
114 ret = afs_rxvl_get_entry_by_name(server, name, namesz, vldb); 53 ret = afs_vl_get_entry_by_name(&addr, key, vl->vldb.name, vldb,
54 &afs_sync_call);
115 switch (ret) { 55 switch (ret) {
116 case 0: 56 case 0:
117 afs_put_server(server);
118 goto out; 57 goto out;
119 case -ENOMEM: 58 case -ENOMEM:
120 case -ENONET: 59 case -ENONET:
121 case -ENETUNREACH: 60 case -ENETUNREACH:
122 case -EHOSTUNREACH: 61 case -EHOSTUNREACH:
123 case -ECONNREFUSED: 62 case -ECONNREFUSED:
124 down_write(&server->sem);
125 if (server->vlserver) {
126 rxrpc_put_connection(server->vlserver);
127 server->vlserver = NULL;
128 }
129 up_write(&server->sem);
130 afs_put_server(server);
131 if (ret == -ENOMEM || ret == -ENONET) 63 if (ret == -ENOMEM || ret == -ENONET)
132 goto out; 64 goto out;
133 goto rotate; 65 goto rotate;
134 case -ENOMEDIUM: 66 case -ENOMEDIUM:
135 afs_put_server(server);
136 goto out; 67 goto out;
137 default: 68 default:
138 afs_put_server(server); 69 ret = -EIO;
139 ret = -ENOMEDIUM;
140 goto rotate; 70 goto rotate;
141 } 71 }
142 72
@@ -146,76 +76,66 @@ static int afs_vlocation_access_vl_by_name(struct afs_vlocation *vlocation,
146 cell->vl_curr_svix %= cell->vl_naddrs; 76 cell->vl_curr_svix %= cell->vl_naddrs;
147 } 77 }
148 78
149 out: 79out:
80 up_write(&vl->cell->vl_sem);
150 _leave(" = %d", ret); 81 _leave(" = %d", ret);
151 return ret; 82 return ret;
83}
152 84
153} /* end afs_vlocation_access_vl_by_name() */
154
155/*****************************************************************************/
156/* 85/*
157 * iterate through the VL servers in a cell until one of them admits knowing 86 * iterate through the VL servers in a cell until one of them admits knowing
158 * about the volume in question 87 * about the volume in question
159 * - caller must have cell->vl_sem write-locked
160 */ 88 */
161static int afs_vlocation_access_vl_by_id(struct afs_vlocation *vlocation, 89static int afs_vlocation_access_vl_by_id(struct afs_vlocation *vl,
90 struct key *key,
162 afs_volid_t volid, 91 afs_volid_t volid,
163 afs_voltype_t voltype, 92 afs_voltype_t voltype,
164 struct afs_cache_vlocation *vldb) 93 struct afs_cache_vlocation *vldb)
165{ 94{
166 struct afs_server *server = NULL; 95 struct afs_cell *cell = vl->cell;
167 struct afs_cell *cell = vlocation->cell; 96 struct in_addr addr;
168 int count, ret; 97 int count, ret;
169 98
170 _enter("%s,%x,%d,", cell->name, volid, voltype); 99 _enter("%s,%x,%d,", cell->name, volid, voltype);
171 100
101 down_write(&vl->cell->vl_sem);
172 ret = -ENOMEDIUM; 102 ret = -ENOMEDIUM;
173 for (count = cell->vl_naddrs; count > 0; count--) { 103 for (count = cell->vl_naddrs; count > 0; count--) {
174 _debug("CellServ[%hu]: %08x", 104 addr = cell->vl_addrs[cell->vl_curr_svix];
175 cell->vl_curr_svix, 105
176 cell->vl_addrs[cell->vl_curr_svix].s_addr); 106 _debug("CellServ[%hu]: %08x", cell->vl_curr_svix, addr.s_addr);
177
178 /* try and create a server */
179 ret = afs_server_lookup(cell,
180 &cell->vl_addrs[cell->vl_curr_svix],
181 &server);
182 switch (ret) {
183 case 0:
184 break;
185 case -ENOMEM:
186 case -ENONET:
187 goto out;
188 default:
189 goto rotate;
190 }
191 107
192 /* attempt to access the VL server */ 108 /* attempt to access the VL server */
193 ret = afs_rxvl_get_entry_by_id(server, volid, voltype, vldb); 109 ret = afs_vl_get_entry_by_id(&addr, key, volid, voltype, vldb,
110 &afs_sync_call);
194 switch (ret) { 111 switch (ret) {
195 case 0: 112 case 0:
196 afs_put_server(server);
197 goto out; 113 goto out;
198 case -ENOMEM: 114 case -ENOMEM:
199 case -ENONET: 115 case -ENONET:
200 case -ENETUNREACH: 116 case -ENETUNREACH:
201 case -EHOSTUNREACH: 117 case -EHOSTUNREACH:
202 case -ECONNREFUSED: 118 case -ECONNREFUSED:
203 down_write(&server->sem);
204 if (server->vlserver) {
205 rxrpc_put_connection(server->vlserver);
206 server->vlserver = NULL;
207 }
208 up_write(&server->sem);
209 afs_put_server(server);
210 if (ret == -ENOMEM || ret == -ENONET) 119 if (ret == -ENOMEM || ret == -ENONET)
211 goto out; 120 goto out;
212 goto rotate; 121 goto rotate;
122 case -EBUSY:
123 vl->upd_busy_cnt++;
124 if (vl->upd_busy_cnt <= 3) {
125 if (vl->upd_busy_cnt > 1) {
126 /* second+ BUSY - sleep a little bit */
127 set_current_state(TASK_UNINTERRUPTIBLE);
128 schedule_timeout(1);
129 __set_current_state(TASK_RUNNING);
130 }
131 continue;
132 }
133 break;
213 case -ENOMEDIUM: 134 case -ENOMEDIUM:
214 afs_put_server(server); 135 vl->upd_rej_cnt++;
215 goto out; 136 goto rotate;
216 default: 137 default:
217 afs_put_server(server); 138 ret = -EIO;
218 ret = -ENOMEDIUM;
219 goto rotate; 139 goto rotate;
220 } 140 }
221 141
@@ -223,729 +143,580 @@ static int afs_vlocation_access_vl_by_id(struct afs_vlocation *vlocation,
223 rotate: 143 rotate:
224 cell->vl_curr_svix++; 144 cell->vl_curr_svix++;
225 cell->vl_curr_svix %= cell->vl_naddrs; 145 cell->vl_curr_svix %= cell->vl_naddrs;
146 vl->upd_busy_cnt = 0;
226 } 147 }
227 148
228 out: 149out:
150 if (ret < 0 && vl->upd_rej_cnt > 0) {
151 printk(KERN_NOTICE "kAFS:"
152 " Active volume no longer valid '%s'\n",
153 vl->vldb.name);
154 vl->valid = 0;
155 ret = -ENOMEDIUM;
156 }
157
158 up_write(&vl->cell->vl_sem);
229 _leave(" = %d", ret); 159 _leave(" = %d", ret);
230 return ret; 160 return ret;
161}
231 162
232} /* end afs_vlocation_access_vl_by_id() */
233
234/*****************************************************************************/
235/* 163/*
236 * lookup volume location 164 * allocate a volume location record
237 * - caller must have cell->vol_sem write-locked
238 * - iterate through the VL servers in a cell until one of them admits knowing
239 * about the volume in question
240 * - lookup in the local cache if not able to find on the VL server
241 * - insert/update in the local cache if did get a VL response
242 */ 165 */
243int afs_vlocation_lookup(struct afs_cell *cell, 166static struct afs_vlocation *afs_vlocation_alloc(struct afs_cell *cell,
244 const char *name, 167 const char *name,
245 unsigned namesz, 168 size_t namesz)
246 struct afs_vlocation **_vlocation)
247{ 169{
248 struct afs_cache_vlocation vldb; 170 struct afs_vlocation *vl;
249 struct afs_vlocation *vlocation; 171
250 afs_voltype_t voltype; 172 vl = kzalloc(sizeof(struct afs_vlocation), GFP_KERNEL);
251 afs_volid_t vid; 173 if (vl) {
252 int active = 0, ret; 174 vl->cell = cell;
253 175 vl->state = AFS_VL_NEW;
254 _enter("{%s},%*.*s,%u,", cell->name, namesz, namesz, name, namesz); 176 atomic_set(&vl->usage, 1);
255 177 INIT_LIST_HEAD(&vl->link);
256 if (namesz > sizeof(vlocation->vldb.name)) { 178 INIT_LIST_HEAD(&vl->grave);
257 _leave(" = -ENAMETOOLONG"); 179 INIT_LIST_HEAD(&vl->update);
258 return -ENAMETOOLONG; 180 init_waitqueue_head(&vl->waitq);
259 } 181 spin_lock_init(&vl->lock);
260 182 memcpy(vl->vldb.name, name, namesz);
261 /* search the cell's active list first */
262 list_for_each_entry(vlocation, &cell->vl_list, link) {
263 if (namesz < sizeof(vlocation->vldb.name) &&
264 vlocation->vldb.name[namesz] != '\0')
265 continue;
266
267 if (memcmp(vlocation->vldb.name, name, namesz) == 0)
268 goto found_in_memory;
269 }
270
271 /* search the cell's graveyard list second */
272 spin_lock(&cell->vl_gylock);
273 list_for_each_entry(vlocation, &cell->vl_graveyard, link) {
274 if (namesz < sizeof(vlocation->vldb.name) &&
275 vlocation->vldb.name[namesz] != '\0')
276 continue;
277
278 if (memcmp(vlocation->vldb.name, name, namesz) == 0)
279 goto found_in_graveyard;
280 }
281 spin_unlock(&cell->vl_gylock);
282
283 /* not in the cell's in-memory lists - create a new record */
284 vlocation = kzalloc(sizeof(struct afs_vlocation), GFP_KERNEL);
285 if (!vlocation)
286 return -ENOMEM;
287
288 atomic_set(&vlocation->usage, 1);
289 INIT_LIST_HEAD(&vlocation->link);
290 rwlock_init(&vlocation->lock);
291 memcpy(vlocation->vldb.name, name, namesz);
292
293 afs_timer_init(&vlocation->timeout, &afs_vlocation_timer_ops);
294 afs_timer_init(&vlocation->upd_timer, &afs_vlocation_update_timer_ops);
295 afs_async_op_init(&vlocation->upd_op, &afs_vlocation_update_op_ops);
296
297 afs_get_cell(cell);
298 vlocation->cell = cell;
299
300 list_add_tail(&vlocation->link, &cell->vl_list);
301
302#ifdef AFS_CACHING_SUPPORT
303 /* we want to store it in the cache, plus it might already be
304 * encached */
305 cachefs_acquire_cookie(cell->cache,
306 &afs_volume_cache_index_def,
307 vlocation,
308 &vlocation->cache);
309
310 if (vlocation->valid)
311 goto found_in_cache;
312#endif
313
314 /* try to look up an unknown volume in the cell VL databases by name */
315 ret = afs_vlocation_access_vl_by_name(vlocation, name, namesz, &vldb);
316 if (ret < 0) {
317 printk("kAFS: failed to locate '%*.*s' in cell '%s'\n",
318 namesz, namesz, name, cell->name);
319 goto error;
320 } 183 }
321 184
322 goto found_on_vlserver; 185 _leave(" = %p", vl);
323 186 return vl;
324 found_in_graveyard: 187}
325 /* found in the graveyard - resurrect */
326 _debug("found in graveyard");
327 atomic_inc(&vlocation->usage);
328 list_move_tail(&vlocation->link, &cell->vl_list);
329 spin_unlock(&cell->vl_gylock);
330
331 afs_kafstimod_del_timer(&vlocation->timeout);
332 goto active;
333
334 found_in_memory:
335 /* found in memory - check to see if it's active */
336 _debug("found in memory");
337 atomic_inc(&vlocation->usage);
338 188
339 active: 189/*
340 active = 1; 190 * update record if we found it in the cache
191 */
192static int afs_vlocation_update_record(struct afs_vlocation *vl,
193 struct key *key,
194 struct afs_cache_vlocation *vldb)
195{
196 afs_voltype_t voltype;
197 afs_volid_t vid;
198 int ret;
341 199
342#ifdef AFS_CACHING_SUPPORT
343 found_in_cache:
344#endif
345 /* try to look up a cached volume in the cell VL databases by ID */ 200 /* try to look up a cached volume in the cell VL databases by ID */
346 _debug("found in cache");
347
348 _debug("Locally Cached: %s %02x { %08x(%x) %08x(%x) %08x(%x) }", 201 _debug("Locally Cached: %s %02x { %08x(%x) %08x(%x) %08x(%x) }",
349 vlocation->vldb.name, 202 vl->vldb.name,
350 vlocation->vldb.vidmask, 203 vl->vldb.vidmask,
351 ntohl(vlocation->vldb.servers[0].s_addr), 204 ntohl(vl->vldb.servers[0].s_addr),
352 vlocation->vldb.srvtmask[0], 205 vl->vldb.srvtmask[0],
353 ntohl(vlocation->vldb.servers[1].s_addr), 206 ntohl(vl->vldb.servers[1].s_addr),
354 vlocation->vldb.srvtmask[1], 207 vl->vldb.srvtmask[1],
355 ntohl(vlocation->vldb.servers[2].s_addr), 208 ntohl(vl->vldb.servers[2].s_addr),
356 vlocation->vldb.srvtmask[2] 209 vl->vldb.srvtmask[2]);
357 );
358 210
359 _debug("Vids: %08x %08x %08x", 211 _debug("Vids: %08x %08x %08x",
360 vlocation->vldb.vid[0], 212 vl->vldb.vid[0],
361 vlocation->vldb.vid[1], 213 vl->vldb.vid[1],
362 vlocation->vldb.vid[2]); 214 vl->vldb.vid[2]);
363 215
364 if (vlocation->vldb.vidmask & AFS_VOL_VTM_RW) { 216 if (vl->vldb.vidmask & AFS_VOL_VTM_RW) {
365 vid = vlocation->vldb.vid[0]; 217 vid = vl->vldb.vid[0];
366 voltype = AFSVL_RWVOL; 218 voltype = AFSVL_RWVOL;
367 } 219 } else if (vl->vldb.vidmask & AFS_VOL_VTM_RO) {
368 else if (vlocation->vldb.vidmask & AFS_VOL_VTM_RO) { 220 vid = vl->vldb.vid[1];
369 vid = vlocation->vldb.vid[1];
370 voltype = AFSVL_ROVOL; 221 voltype = AFSVL_ROVOL;
371 } 222 } else if (vl->vldb.vidmask & AFS_VOL_VTM_BAK) {
372 else if (vlocation->vldb.vidmask & AFS_VOL_VTM_BAK) { 223 vid = vl->vldb.vid[2];
373 vid = vlocation->vldb.vid[2];
374 voltype = AFSVL_BACKVOL; 224 voltype = AFSVL_BACKVOL;
375 } 225 } else {
376 else {
377 BUG(); 226 BUG();
378 vid = 0; 227 vid = 0;
379 voltype = 0; 228 voltype = 0;
380 } 229 }
381 230
382 ret = afs_vlocation_access_vl_by_id(vlocation, vid, voltype, &vldb); 231 /* contact the server to make sure the volume is still available
232 * - TODO: need to handle disconnected operation here
233 */
234 ret = afs_vlocation_access_vl_by_id(vl, key, vid, voltype, vldb);
383 switch (ret) { 235 switch (ret) {
384 /* net error */ 236 /* net error */
385 default: 237 default:
386 printk("kAFS: failed to volume '%*.*s' (%x) up in '%s': %d\n", 238 printk(KERN_WARNING "kAFS:"
387 namesz, namesz, name, vid, cell->name, ret); 239 " failed to update volume '%s' (%x) up in '%s': %d\n",
388 goto error; 240 vl->vldb.name, vid, vl->cell->name, ret);
241 _leave(" = %d", ret);
242 return ret;
389 243
390 /* pulled from local cache into memory */ 244 /* pulled from local cache into memory */
391 case 0: 245 case 0:
392 goto found_on_vlserver; 246 _leave(" = 0");
247 return 0;
393 248
394 /* uh oh... looks like the volume got deleted */ 249 /* uh oh... looks like the volume got deleted */
395 case -ENOMEDIUM: 250 case -ENOMEDIUM:
396 printk("kAFS: volume '%*.*s' (%x) does not exist '%s'\n", 251 printk(KERN_ERR "kAFS:"
397 namesz, namesz, name, vid, cell->name); 252 " volume '%s' (%x) does not exist '%s'\n",
253 vl->vldb.name, vid, vl->cell->name);
398 254
399 /* TODO: make existing record unavailable */ 255 /* TODO: make existing record unavailable */
400 goto error; 256 _leave(" = %d", ret);
257 return ret;
401 } 258 }
259}
402 260
403 found_on_vlserver: 261/*
404 _debug("Done VL Lookup: %*.*s %02x { %08x(%x) %08x(%x) %08x(%x) }", 262 * apply the update to a VL record
405 namesz, namesz, name, 263 */
406 vldb.vidmask, 264static void afs_vlocation_apply_update(struct afs_vlocation *vl,
407 ntohl(vldb.servers[0].s_addr), vldb.srvtmask[0], 265 struct afs_cache_vlocation *vldb)
408 ntohl(vldb.servers[1].s_addr), vldb.srvtmask[1], 266{
409 ntohl(vldb.servers[2].s_addr), vldb.srvtmask[2] 267 _debug("Done VL Lookup: %s %02x { %08x(%x) %08x(%x) %08x(%x) }",
410 ); 268 vldb->name, vldb->vidmask,
411 269 ntohl(vldb->servers[0].s_addr), vldb->srvtmask[0],
412 _debug("Vids: %08x %08x %08x", vldb.vid[0], vldb.vid[1], vldb.vid[2]); 270 ntohl(vldb->servers[1].s_addr), vldb->srvtmask[1],
271 ntohl(vldb->servers[2].s_addr), vldb->srvtmask[2]);
413 272
414 if ((namesz < sizeof(vlocation->vldb.name) && 273 _debug("Vids: %08x %08x %08x",
415 vlocation->vldb.name[namesz] != '\0') || 274 vldb->vid[0], vldb->vid[1], vldb->vid[2]);
416 memcmp(vldb.name, name, namesz) != 0)
417 printk("kAFS: name of volume '%*.*s' changed to '%s' on server\n",
418 namesz, namesz, name, vldb.name);
419 275
420 memcpy(&vlocation->vldb, &vldb, sizeof(vlocation->vldb)); 276 if (strcmp(vldb->name, vl->vldb.name) != 0)
277 printk(KERN_NOTICE "kAFS:"
278 " name of volume '%s' changed to '%s' on server\n",
279 vl->vldb.name, vldb->name);
421 280
422 afs_kafstimod_add_timer(&vlocation->upd_timer, 10 * HZ); 281 vl->vldb = *vldb;
423 282
424#ifdef AFS_CACHING_SUPPORT 283#ifdef AFS_CACHING_SUPPORT
425 /* update volume entry in local cache */ 284 /* update volume entry in local cache */
426 cachefs_update_cookie(vlocation->cache); 285 cachefs_update_cookie(vl->cache);
427#endif
428
429 *_vlocation = vlocation;
430 _leave(" = 0 (%p)",vlocation);
431 return 0;
432
433 error:
434 if (vlocation) {
435 if (active) {
436 __afs_put_vlocation(vlocation);
437 }
438 else {
439 list_del(&vlocation->link);
440#ifdef AFS_CACHING_SUPPORT
441 cachefs_relinquish_cookie(vlocation->cache, 0);
442#endif 286#endif
443 afs_put_cell(vlocation->cell); 287}
444 kfree(vlocation);
445 }
446 }
447
448 _leave(" = %d", ret);
449 return ret;
450} /* end afs_vlocation_lookup() */
451 288
452/*****************************************************************************/
453/* 289/*
454 * finish using a volume location record 290 * fill in a volume location record, consulting the cache and the VL server
455 * - caller must have cell->vol_sem write-locked 291 * both
456 */ 292 */
457static void __afs_put_vlocation(struct afs_vlocation *vlocation) 293static int afs_vlocation_fill_in_record(struct afs_vlocation *vl,
294 struct key *key)
458{ 295{
459 struct afs_cell *cell; 296 struct afs_cache_vlocation vldb;
297 int ret;
460 298
461 if (!vlocation) 299 _enter("");
462 return;
463 300
464 _enter("%s", vlocation->vldb.name); 301 ASSERTCMP(vl->valid, ==, 0);
465 302
466 cell = vlocation->cell; 303 memset(&vldb, 0, sizeof(vldb));
467 304
468 /* sanity check */ 305 /* see if we have an in-cache copy (will set vl->valid if there is) */
469 BUG_ON(atomic_read(&vlocation->usage) <= 0); 306#ifdef AFS_CACHING_SUPPORT
307 cachefs_acquire_cookie(cell->cache,
308 &afs_volume_cache_index_def,
309 vlocation,
310 &vl->cache);
311#endif
470 312
471 spin_lock(&cell->vl_gylock); 313 if (vl->valid) {
472 if (likely(!atomic_dec_and_test(&vlocation->usage))) { 314 /* try to update a known volume in the cell VL databases by
473 spin_unlock(&cell->vl_gylock); 315 * ID as the name may have changed */
474 _leave(""); 316 _debug("found in cache");
475 return; 317 ret = afs_vlocation_update_record(vl, key, &vldb);
318 } else {
319 /* try to look up an unknown volume in the cell VL databases by
320 * name */
321 ret = afs_vlocation_access_vl_by_name(vl, key, &vldb);
322 if (ret < 0) {
323 printk("kAFS: failed to locate '%s' in cell '%s'\n",
324 vl->vldb.name, vl->cell->name);
325 return ret;
326 }
476 } 327 }
477 328
478 /* move to graveyard queue */ 329 afs_vlocation_apply_update(vl, &vldb);
479 list_move_tail(&vlocation->link,&cell->vl_graveyard); 330 _leave(" = 0");
480 331 return 0;
481 /* remove from pending timeout queue (refcounted if actually being 332}
482 * updated) */
483 list_del_init(&vlocation->upd_op.link);
484
485 /* time out in 10 secs */
486 afs_kafstimod_del_timer(&vlocation->upd_timer);
487 afs_kafstimod_add_timer(&vlocation->timeout, 10 * HZ);
488
489 spin_unlock(&cell->vl_gylock);
490
491 _leave(" [killed]");
492} /* end __afs_put_vlocation() */
493
494/*****************************************************************************/
495/*
496 * finish using a volume location record
497 */
498void afs_put_vlocation(struct afs_vlocation *vlocation)
499{
500 if (vlocation) {
501 struct afs_cell *cell = vlocation->cell;
502
503 down_write(&cell->vl_sem);
504 __afs_put_vlocation(vlocation);
505 up_write(&cell->vl_sem);
506 }
507} /* end afs_put_vlocation() */
508 333
509/*****************************************************************************/
510/* 334/*
511 * timeout vlocation record 335 * queue a vlocation record for updates
512 * - removes from the cell's graveyard if the usage count is zero
513 */ 336 */
514void afs_vlocation_do_timeout(struct afs_vlocation *vlocation) 337void afs_vlocation_queue_for_updates(struct afs_vlocation *vl)
515{ 338{
516 struct afs_cell *cell; 339 struct afs_vlocation *xvl;
517 340
518 _enter("%s", vlocation->vldb.name); 341 /* wait at least 10 minutes before updating... */
342 vl->update_at = get_seconds() + afs_vlocation_update_timeout;
519 343
520 cell = vlocation->cell; 344 spin_lock(&afs_vlocation_updates_lock);
521 345
522 BUG_ON(atomic_read(&vlocation->usage) < 0); 346 if (!list_empty(&afs_vlocation_updates)) {
523 347 /* ... but wait at least 1 second more than the newest record
524 /* remove from graveyard if still dead */ 348 * already queued so that we don't spam the VL server suddenly
525 spin_lock(&cell->vl_gylock); 349 * with lots of requests
526 if (atomic_read(&vlocation->usage) == 0) 350 */
527 list_del_init(&vlocation->link); 351 xvl = list_entry(afs_vlocation_updates.prev,
528 else 352 struct afs_vlocation, update);
529 vlocation = NULL; 353 if (vl->update_at <= xvl->update_at)
530 spin_unlock(&cell->vl_gylock); 354 vl->update_at = xvl->update_at + 1;
531 355 } else {
532 if (!vlocation) { 356 queue_delayed_work(afs_vlocation_update_worker,
533 _leave(""); 357 &afs_vlocation_update,
534 return; /* resurrected */ 358 afs_vlocation_update_timeout * HZ);
535 } 359 }
536 360
537 /* we can now destroy it properly */ 361 list_add_tail(&vl->update, &afs_vlocation_updates);
538#ifdef AFS_CACHING_SUPPORT 362 spin_unlock(&afs_vlocation_updates_lock);
539 cachefs_relinquish_cookie(vlocation->cache, 0); 363}
540#endif
541 afs_put_cell(cell);
542
543 kfree(vlocation);
544
545 _leave(" [destroyed]");
546} /* end afs_vlocation_do_timeout() */
547 364
548/*****************************************************************************/
549/* 365/*
550 * send an update operation to the currently selected server 366 * lookup volume location
367 * - iterate through the VL servers in a cell until one of them admits knowing
368 * about the volume in question
369 * - lookup in the local cache if not able to find on the VL server
370 * - insert/update in the local cache if did get a VL response
551 */ 371 */
552static int afs_vlocation_update_begin(struct afs_vlocation *vlocation) 372struct afs_vlocation *afs_vlocation_lookup(struct afs_cell *cell,
373 struct key *key,
374 const char *name,
375 size_t namesz)
553{ 376{
554 afs_voltype_t voltype; 377 struct afs_vlocation *vl;
555 afs_volid_t vid;
556 int ret; 378 int ret;
557 379
558 _enter("%s{ufs=%u ucs=%u}", 380 _enter("{%s},{%x},%*.*s,%zu",
559 vlocation->vldb.name, 381 cell->name, key_serial(key),
560 vlocation->upd_first_svix, 382 (int) namesz, (int) namesz, name, namesz);
561 vlocation->upd_curr_svix);
562 383
563 /* try to look up a cached volume in the cell VL databases by ID */ 384 if (namesz > sizeof(vl->vldb.name)) {
564 if (vlocation->vldb.vidmask & AFS_VOL_VTM_RW) { 385 _leave(" = -ENAMETOOLONG");
565 vid = vlocation->vldb.vid[0]; 386 return ERR_PTR(-ENAMETOOLONG);
566 voltype = AFSVL_RWVOL;
567 }
568 else if (vlocation->vldb.vidmask & AFS_VOL_VTM_RO) {
569 vid = vlocation->vldb.vid[1];
570 voltype = AFSVL_ROVOL;
571 } 387 }
572 else if (vlocation->vldb.vidmask & AFS_VOL_VTM_BAK) { 388
573 vid = vlocation->vldb.vid[2]; 389 /* see if we have an in-memory copy first */
574 voltype = AFSVL_BACKVOL; 390 down_write(&cell->vl_sem);
391 spin_lock(&cell->vl_lock);
392 list_for_each_entry(vl, &cell->vl_list, link) {
393 if (vl->vldb.name[namesz] != '\0')
394 continue;
395 if (memcmp(vl->vldb.name, name, namesz) == 0)
396 goto found_in_memory;
575 } 397 }
576 else { 398 spin_unlock(&cell->vl_lock);
577 BUG(); 399
578 vid = 0; 400 /* not in the cell's in-memory lists - create a new record */
579 voltype = 0; 401 vl = afs_vlocation_alloc(cell, name, namesz);
402 if (!vl) {
403 up_write(&cell->vl_sem);
404 return ERR_PTR(-ENOMEM);
580 } 405 }
581 406
582 /* contact the chosen server */ 407 afs_get_cell(cell);
583 ret = afs_server_lookup(
584 vlocation->cell,
585 &vlocation->cell->vl_addrs[vlocation->upd_curr_svix],
586 &vlocation->upd_op.server);
587 408
588 switch (ret) { 409 list_add_tail(&vl->link, &cell->vl_list);
589 case 0: 410 vl->state = AFS_VL_CREATING;
590 break; 411 up_write(&cell->vl_sem);
591 case -ENOMEM:
592 case -ENONET:
593 default:
594 _leave(" = %d", ret);
595 return ret;
596 }
597 412
598 /* initiate the update operation */ 413fill_in_record:
599 ret = afs_rxvl_get_entry_by_id_async(&vlocation->upd_op, vid, voltype); 414 ret = afs_vlocation_fill_in_record(vl, key);
600 if (ret < 0) { 415 if (ret < 0)
601 _leave(" = %d", ret); 416 goto error_abandon;
602 return ret; 417 spin_lock(&vl->lock);
418 vl->state = AFS_VL_VALID;
419 wake_up(&vl->waitq);
420 spin_unlock(&vl->lock);
421
422 /* schedule for regular updates */
423 afs_vlocation_queue_for_updates(vl);
424 goto success;
425
426found_in_memory:
427 /* found in memory */
428 _debug("found in memory");
429 atomic_inc(&vl->usage);
430 spin_unlock(&cell->vl_lock);
431 if (!list_empty(&vl->grave)) {
432 spin_lock(&afs_vlocation_graveyard_lock);
433 list_del_init(&vl->grave);
434 spin_unlock(&afs_vlocation_graveyard_lock);
603 } 435 }
436 up_write(&cell->vl_sem);
437
438 /* see if it was an abandoned record that we might try filling in */
439 spin_lock(&vl->lock);
440 while (vl->state != AFS_VL_VALID) {
441 afs_vlocation_state_t state = vl->state;
442
443 _debug("invalid [state %d]", state);
444
445 if ((state == AFS_VL_NEW || state == AFS_VL_NO_VOLUME)) {
446 vl->state = AFS_VL_CREATING;
447 spin_unlock(&vl->lock);
448 goto fill_in_record;
449 }
450
451 /* must now wait for creation or update by someone else to
452 * complete */
453 _debug("wait");
604 454
455 spin_unlock(&vl->lock);
456 ret = wait_event_interruptible(
457 vl->waitq,
458 vl->state == AFS_VL_NEW ||
459 vl->state == AFS_VL_VALID ||
460 vl->state == AFS_VL_NO_VOLUME);
461 if (ret < 0)
462 goto error;
463 spin_lock(&vl->lock);
464 }
465 spin_unlock(&vl->lock);
466
467success:
468 _leave(" = %p",vl);
469 return vl;
470
471error_abandon:
472 spin_lock(&vl->lock);
473 vl->state = AFS_VL_NEW;
474 wake_up(&vl->waitq);
475 spin_unlock(&vl->lock);
476error:
477 ASSERT(vl != NULL);
478 afs_put_vlocation(vl);
605 _leave(" = %d", ret); 479 _leave(" = %d", ret);
606 return ret; 480 return ERR_PTR(ret);
607} /* end afs_vlocation_update_begin() */ 481}
608 482
609/*****************************************************************************/
610/* 483/*
611 * abandon updating a VL record 484 * finish using a volume location record
612 * - does not restart the update timer
613 */ 485 */
614static void afs_vlocation_update_abandon(struct afs_vlocation *vlocation, 486void afs_put_vlocation(struct afs_vlocation *vl)
615 afs_vlocation_upd_t state,
616 int ret)
617{ 487{
618 _enter("%s,%u", vlocation->vldb.name, state); 488 if (!vl)
619 489 return;
620 if (ret < 0)
621 printk("kAFS: Abandoning VL update '%s': %d\n",
622 vlocation->vldb.name, ret);
623
624 /* discard the server record */
625 afs_put_server(vlocation->upd_op.server);
626 vlocation->upd_op.server = NULL;
627 490
628 spin_lock(&afs_vlocation_update_lock); 491 _enter("%s", vl->vldb.name);
629 afs_vlocation_update = NULL;
630 vlocation->upd_state = state;
631 492
632 /* TODO: start updating next VL record on pending list */ 493 ASSERTCMP(atomic_read(&vl->usage), >, 0);
633 494
634 spin_unlock(&afs_vlocation_update_lock); 495 if (likely(!atomic_dec_and_test(&vl->usage))) {
496 _leave("");
497 return;
498 }
635 499
636 _leave(""); 500 spin_lock(&afs_vlocation_graveyard_lock);
637} /* end afs_vlocation_update_abandon() */ 501 if (atomic_read(&vl->usage) == 0) {
502 _debug("buried");
503 list_move_tail(&vl->grave, &afs_vlocation_graveyard);
504 vl->time_of_death = get_seconds();
505 schedule_delayed_work(&afs_vlocation_reap,
506 afs_vlocation_timeout * HZ);
507
508 /* suspend updates on this record */
509 if (!list_empty(&vl->update)) {
510 spin_lock(&afs_vlocation_updates_lock);
511 list_del_init(&vl->update);
512 spin_unlock(&afs_vlocation_updates_lock);
513 }
514 }
515 spin_unlock(&afs_vlocation_graveyard_lock);
516 _leave(" [killed?]");
517}
638 518
639/*****************************************************************************/
640/* 519/*
641 * handle periodic update timeouts and busy retry timeouts 520 * destroy a dead volume location record
642 * - called from kafstimod
643 */ 521 */
644static void afs_vlocation_update_timer(struct afs_timer *timer) 522static void afs_vlocation_destroy(struct afs_vlocation *vl)
645{ 523{
646 struct afs_vlocation *vlocation = 524 _enter("%p", vl);
647 list_entry(timer, struct afs_vlocation, upd_timer);
648 int ret;
649 525
650 _enter("%s", vlocation->vldb.name); 526#ifdef AFS_CACHING_SUPPORT
527 cachefs_relinquish_cookie(vl->cache, 0);
528#endif
651 529
652 /* only update if not in the graveyard (defend against putting too) */ 530 afs_put_cell(vl->cell);
653 spin_lock(&vlocation->cell->vl_gylock); 531 kfree(vl);
532}
654 533
655 if (!atomic_read(&vlocation->usage)) 534/*
656 goto out_unlock1; 535 * reap dead volume location records
536 */
537static void afs_vlocation_reaper(struct work_struct *work)
538{
539 LIST_HEAD(corpses);
540 struct afs_vlocation *vl;
541 unsigned long delay, expiry;
542 time_t now;
657 543
658 spin_lock(&afs_vlocation_update_lock); 544 _enter("");
659 545
660 /* if we were woken up due to EBUSY sleep then restart immediately if 546 now = get_seconds();
661 * possible or else jump to front of pending queue */ 547 spin_lock(&afs_vlocation_graveyard_lock);
662 if (vlocation->upd_state == AFS_VLUPD_BUSYSLEEP) { 548
663 if (afs_vlocation_update) { 549 while (!list_empty(&afs_vlocation_graveyard)) {
664 list_add(&vlocation->upd_op.link, 550 vl = list_entry(afs_vlocation_graveyard.next,
665 &afs_vlocation_update_pendq); 551 struct afs_vlocation, grave);
552
553 _debug("check %p", vl);
554
555 /* the queue is ordered most dead first */
556 expiry = vl->time_of_death + afs_vlocation_timeout;
557 if (expiry > now) {
558 delay = (expiry - now) * HZ;
559 _debug("delay %lu", delay);
560 if (!schedule_delayed_work(&afs_vlocation_reap,
561 delay)) {
562 cancel_delayed_work(&afs_vlocation_reap);
563 schedule_delayed_work(&afs_vlocation_reap,
564 delay);
565 }
566 break;
666 } 567 }
667 else { 568
668 afs_get_vlocation(vlocation); 569 spin_lock(&vl->cell->vl_lock);
669 afs_vlocation_update = vlocation; 570 if (atomic_read(&vl->usage) > 0) {
670 vlocation->upd_state = AFS_VLUPD_INPROGRESS; 571 _debug("no reap");
572 list_del_init(&vl->grave);
573 } else {
574 _debug("reap");
575 list_move_tail(&vl->grave, &corpses);
576 list_del_init(&vl->link);
671 } 577 }
672 goto out_unlock2; 578 spin_unlock(&vl->cell->vl_lock);
673 } 579 }
674 580
675 /* put on pending queue if there's already another update in progress */ 581 spin_unlock(&afs_vlocation_graveyard_lock);
676 if (afs_vlocation_update) {
677 vlocation->upd_state = AFS_VLUPD_PENDING;
678 list_add_tail(&vlocation->upd_op.link,
679 &afs_vlocation_update_pendq);
680 goto out_unlock2;
681 }
682 582
683 /* hold a ref on it while actually updating */ 583 /* now reap the corpses we've extracted */
684 afs_get_vlocation(vlocation); 584 while (!list_empty(&corpses)) {
685 afs_vlocation_update = vlocation; 585 vl = list_entry(corpses.next, struct afs_vlocation, grave);
686 vlocation->upd_state = AFS_VLUPD_INPROGRESS; 586 list_del(&vl->grave);
687 587 afs_vlocation_destroy(vl);
688 spin_unlock(&afs_vlocation_update_lock);
689 spin_unlock(&vlocation->cell->vl_gylock);
690
691 /* okay... we can start the update */
692 _debug("BEGIN VL UPDATE [%s]", vlocation->vldb.name);
693 vlocation->upd_first_svix = vlocation->cell->vl_curr_svix;
694 vlocation->upd_curr_svix = vlocation->upd_first_svix;
695 vlocation->upd_rej_cnt = 0;
696 vlocation->upd_busy_cnt = 0;
697
698 ret = afs_vlocation_update_begin(vlocation);
699 if (ret < 0) {
700 afs_vlocation_update_abandon(vlocation, AFS_VLUPD_SLEEP, ret);
701 afs_kafstimod_add_timer(&vlocation->upd_timer,
702 AFS_VLDB_TIMEOUT);
703 afs_put_vlocation(vlocation);
704 } 588 }
705 589
706 _leave(""); 590 _leave("");
707 return; 591}
708 592
709 out_unlock2: 593/*
710 spin_unlock(&afs_vlocation_update_lock); 594 * initialise the VL update process
711 out_unlock1: 595 */
712 spin_unlock(&vlocation->cell->vl_gylock); 596int __init afs_vlocation_update_init(void)
713 _leave(""); 597{
714 return; 598 afs_vlocation_update_worker =
599 create_singlethread_workqueue("kafs_vlupdated");
600 return afs_vlocation_update_worker ? 0 : -ENOMEM;
601}
715 602
716} /* end afs_vlocation_update_timer() */ 603/*
604 * discard all the volume location records for rmmod
605 */
606void __exit afs_vlocation_purge(void)
607{
608 afs_vlocation_timeout = 0;
609
610 spin_lock(&afs_vlocation_updates_lock);
611 list_del_init(&afs_vlocation_updates);
612 spin_unlock(&afs_vlocation_updates_lock);
613 cancel_delayed_work(&afs_vlocation_update);
614 queue_delayed_work(afs_vlocation_update_worker,
615 &afs_vlocation_update, 0);
616 destroy_workqueue(afs_vlocation_update_worker);
617
618 cancel_delayed_work(&afs_vlocation_reap);
619 schedule_delayed_work(&afs_vlocation_reap, 0);
620}
717 621
718/*****************************************************************************/
719/* 622/*
720 * attend to an update operation upon which an event happened 623 * update a volume location
721 * - called in kafsasyncd context
722 */ 624 */
723static void afs_vlocation_update_attend(struct afs_async_op *op) 625static void afs_vlocation_updater(struct work_struct *work)
724{ 626{
725 struct afs_cache_vlocation vldb; 627 struct afs_cache_vlocation vldb;
726 struct afs_vlocation *vlocation = 628 struct afs_vlocation *vl, *xvl;
727 list_entry(op, struct afs_vlocation, upd_op); 629 time_t now;
728 unsigned tmp; 630 long timeout;
729 int ret; 631 int ret;
730 632
731 _enter("%s", vlocation->vldb.name); 633 _enter("");
732
733 ret = afs_rxvl_get_entry_by_id_async2(op, &vldb);
734 switch (ret) {
735 case -EAGAIN:
736 _leave(" [unfinished]");
737 return;
738
739 case 0:
740 _debug("END VL UPDATE: %d\n", ret);
741 vlocation->valid = 1;
742
743 _debug("Done VL Lookup: %02x { %08x(%x) %08x(%x) %08x(%x) }",
744 vldb.vidmask,
745 ntohl(vldb.servers[0].s_addr), vldb.srvtmask[0],
746 ntohl(vldb.servers[1].s_addr), vldb.srvtmask[1],
747 ntohl(vldb.servers[2].s_addr), vldb.srvtmask[2]
748 );
749
750 _debug("Vids: %08x %08x %08x",
751 vldb.vid[0], vldb.vid[1], vldb.vid[2]);
752
753 afs_vlocation_update_abandon(vlocation, AFS_VLUPD_SLEEP, 0);
754
755 down_write(&vlocation->cell->vl_sem);
756
757 /* actually update the cache */
758 if (strncmp(vldb.name, vlocation->vldb.name,
759 sizeof(vlocation->vldb.name)) != 0)
760 printk("kAFS: name of volume '%s'"
761 " changed to '%s' on server\n",
762 vlocation->vldb.name, vldb.name);
763
764 memcpy(&vlocation->vldb, &vldb, sizeof(vlocation->vldb));
765
766#if 0
767 /* TODO update volume entry in local cache */
768#endif
769
770 up_write(&vlocation->cell->vl_sem);
771
772 if (ret < 0)
773 printk("kAFS: failed to update local cache: %d\n", ret);
774
775 afs_kafstimod_add_timer(&vlocation->upd_timer,
776 AFS_VLDB_TIMEOUT);
777 afs_put_vlocation(vlocation);
778 _leave(" [found]");
779 return;
780
781 case -ENOMEDIUM:
782 vlocation->upd_rej_cnt++;
783 goto try_next;
784
785 /* the server is locked - retry in a very short while */
786 case -EBUSY:
787 vlocation->upd_busy_cnt++;
788 if (vlocation->upd_busy_cnt > 3)
789 goto try_next; /* too many retries */
790
791 afs_vlocation_update_abandon(vlocation,
792 AFS_VLUPD_BUSYSLEEP, 0);
793 afs_kafstimod_add_timer(&vlocation->upd_timer, HZ / 2);
794 afs_put_vlocation(vlocation);
795 _leave(" [busy]");
796 return;
797
798 case -ENETUNREACH:
799 case -EHOSTUNREACH:
800 case -ECONNREFUSED:
801 case -EREMOTEIO:
802 /* record bad vlserver info in the cell too
803 * - TODO: use down_write_trylock() if available
804 */
805 if (vlocation->upd_curr_svix == vlocation->cell->vl_curr_svix)
806 vlocation->cell->vl_curr_svix =
807 vlocation->cell->vl_curr_svix %
808 vlocation->cell->vl_naddrs;
809
810 case -EBADRQC:
811 case -EINVAL:
812 case -EACCES:
813 case -EBADMSG:
814 goto try_next;
815
816 default:
817 goto abandon;
818 }
819
820 /* try contacting the next server */
821 try_next:
822 vlocation->upd_busy_cnt = 0;
823
824 /* discard the server record */
825 afs_put_server(vlocation->upd_op.server);
826 vlocation->upd_op.server = NULL;
827 634
828 tmp = vlocation->cell->vl_naddrs; 635 now = get_seconds();
829 if (tmp == 0)
830 goto abandon;
831 636
832 vlocation->upd_curr_svix++; 637 /* find a record to update */
833 if (vlocation->upd_curr_svix >= tmp) 638 spin_lock(&afs_vlocation_updates_lock);
834 vlocation->upd_curr_svix = 0; 639 for (;;) {
835 if (vlocation->upd_first_svix >= tmp) 640 if (list_empty(&afs_vlocation_updates)) {
836 vlocation->upd_first_svix = tmp - 1; 641 spin_unlock(&afs_vlocation_updates_lock);
642 _leave(" [nothing]");
643 return;
644 }
837 645
838 /* move to the next server */ 646 vl = list_entry(afs_vlocation_updates.next,
839 if (vlocation->upd_curr_svix != vlocation->upd_first_svix) { 647 struct afs_vlocation, update);
840 afs_vlocation_update_begin(vlocation); 648 if (atomic_read(&vl->usage) > 0)
841 _leave(" [next]"); 649 break;
842 return; 650 list_del_init(&vl->update);
843 } 651 }
844 652
845 /* run out of servers to try - was the volume rejected? */ 653 timeout = vl->update_at - now;
846 if (vlocation->upd_rej_cnt > 0) { 654 if (timeout > 0) {
847 printk("kAFS: Active volume no longer valid '%s'\n", 655 queue_delayed_work(afs_vlocation_update_worker,
848 vlocation->vldb.name); 656 &afs_vlocation_update, timeout * HZ);
849 vlocation->valid = 0; 657 spin_unlock(&afs_vlocation_updates_lock);
850 afs_vlocation_update_abandon(vlocation, AFS_VLUPD_SLEEP, 0); 658 _leave(" [nothing]");
851 afs_kafstimod_add_timer(&vlocation->upd_timer,
852 AFS_VLDB_TIMEOUT);
853 afs_put_vlocation(vlocation);
854 _leave(" [invalidated]");
855 return; 659 return;
856 } 660 }
857 661
858 /* abandon the update */ 662 list_del_init(&vl->update);
859 abandon: 663 atomic_inc(&vl->usage);
860 afs_vlocation_update_abandon(vlocation, AFS_VLUPD_SLEEP, ret); 664 spin_unlock(&afs_vlocation_updates_lock);
861 afs_kafstimod_add_timer(&vlocation->upd_timer, HZ * 10);
862 afs_put_vlocation(vlocation);
863 _leave(" [abandoned]");
864
865} /* end afs_vlocation_update_attend() */
866
867/*****************************************************************************/
868/*
869 * deal with an update operation being discarded
870 * - called in kafsasyncd context when it's dying due to rmmod
871 * - the call has already been aborted and put()'d
872 */
873static void afs_vlocation_update_discard(struct afs_async_op *op)
874{
875 struct afs_vlocation *vlocation =
876 list_entry(op, struct afs_vlocation, upd_op);
877 665
878 _enter("%s", vlocation->vldb.name); 666 /* we can now perform the update */
667 _debug("update %s", vl->vldb.name);
668 vl->state = AFS_VL_UPDATING;
669 vl->upd_rej_cnt = 0;
670 vl->upd_busy_cnt = 0;
879 671
880 afs_put_server(op->server); 672 ret = afs_vlocation_update_record(vl, NULL, &vldb);
881 op->server = NULL; 673 spin_lock(&vl->lock);
674 switch (ret) {
675 case 0:
676 afs_vlocation_apply_update(vl, &vldb);
677 vl->state = AFS_VL_VALID;
678 wake_up(&vl->waitq);
679 break;
680 case -ENOMEDIUM:
681 vl->state = AFS_VL_VOLUME_DELETED;
682 break;
683 default:
684 vl->state = AFS_VL_UNCERTAIN;
685 break;
686 }
687 spin_unlock(&vl->lock);
882 688
883 afs_put_vlocation(vlocation); 689 /* and then reschedule */
690 _debug("reschedule");
691 vl->update_at = get_seconds() + afs_vlocation_update_timeout;
884 692
885 _leave(""); 693 spin_lock(&afs_vlocation_updates_lock);
886} /* end afs_vlocation_update_discard() */
887 694
888/*****************************************************************************/ 695 if (!list_empty(&afs_vlocation_updates)) {
889/* 696 /* next update in 10 minutes, but wait at least 1 second more
890 * match a VLDB record stored in the cache 697 * than the newest record already queued so that we don't spam
891 * - may also load target from entry 698 * the VL server suddenly with lots of requests
892 */ 699 */
893#ifdef AFS_CACHING_SUPPORT 700 xvl = list_entry(afs_vlocation_updates.prev,
894static cachefs_match_val_t afs_vlocation_cache_match(void *target, 701 struct afs_vlocation, update);
895 const void *entry) 702 if (vl->update_at <= xvl->update_at)
896{ 703 vl->update_at = xvl->update_at + 1;
897 const struct afs_cache_vlocation *vldb = entry; 704 xvl = list_entry(afs_vlocation_updates.next,
898 struct afs_vlocation *vlocation = target; 705 struct afs_vlocation, update);
899 706 timeout = xvl->update_at - now;
900 _enter("{%s},{%s}", vlocation->vldb.name, vldb->name); 707 if (timeout < 0)
901 708 timeout = 0;
902 if (strncmp(vlocation->vldb.name, vldb->name, sizeof(vldb->name)) == 0 709 } else {
903 ) { 710 timeout = afs_vlocation_update_timeout;
904 if (!vlocation->valid ||
905 vlocation->vldb.rtime == vldb->rtime
906 ) {
907 vlocation->vldb = *vldb;
908 vlocation->valid = 1;
909 _leave(" = SUCCESS [c->m]");
910 return CACHEFS_MATCH_SUCCESS;
911 }
912 /* need to update cache if cached info differs */
913 else if (memcmp(&vlocation->vldb, vldb, sizeof(*vldb)) != 0) {
914 /* delete if VIDs for this name differ */
915 if (memcmp(&vlocation->vldb.vid,
916 &vldb->vid,
917 sizeof(vldb->vid)) != 0) {
918 _leave(" = DELETE");
919 return CACHEFS_MATCH_SUCCESS_DELETE;
920 }
921
922 _leave(" = UPDATE");
923 return CACHEFS_MATCH_SUCCESS_UPDATE;
924 }
925 else {
926 _leave(" = SUCCESS");
927 return CACHEFS_MATCH_SUCCESS;
928 }
929 } 711 }
930 712
931 _leave(" = FAILED"); 713 ASSERT(list_empty(&vl->update));
932 return CACHEFS_MATCH_FAILED;
933} /* end afs_vlocation_cache_match() */
934#endif
935
936/*****************************************************************************/
937/*
938 * update a VLDB record stored in the cache
939 */
940#ifdef AFS_CACHING_SUPPORT
941static void afs_vlocation_cache_update(void *source, void *entry)
942{
943 struct afs_cache_vlocation *vldb = entry;
944 struct afs_vlocation *vlocation = source;
945 714
946 _enter(""); 715 list_add_tail(&vl->update, &afs_vlocation_updates);
947
948 *vldb = vlocation->vldb;
949 716
950} /* end afs_vlocation_cache_update() */ 717 _debug("timeout %ld", timeout);
951#endif 718 queue_delayed_work(afs_vlocation_update_worker,
719 &afs_vlocation_update, timeout * HZ);
720 spin_unlock(&afs_vlocation_updates_lock);
721 afs_put_vlocation(vl);
722}
diff --git a/fs/afs/vnode.c b/fs/afs/vnode.c
index cf62da5d7825..a1904ab8426a 100644
--- a/fs/afs/vnode.c
+++ b/fs/afs/vnode.c
@@ -1,6 +1,6 @@
1/* vnode.c: AFS vnode management 1/* AFS vnode management
2 * 2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
@@ -14,142 +14,237 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/fs.h> 16#include <linux/fs.h>
17#include <linux/pagemap.h>
18#include "volume.h"
19#include "cell.h"
20#include "cmservice.h"
21#include "fsclient.h"
22#include "vlclient.h"
23#include "vnode.h"
24#include "internal.h" 17#include "internal.h"
25 18
26static void afs_vnode_cb_timed_out(struct afs_timer *timer); 19#if 0
20static noinline bool dump_tree_aux(struct rb_node *node, struct rb_node *parent,
21 int depth, char lr)
22{
23 struct afs_vnode *vnode;
24 bool bad = false;
25
26 if (!node)
27 return false;
28
29 if (node->rb_left)
30 bad = dump_tree_aux(node->rb_left, node, depth + 2, '/');
31
32 vnode = rb_entry(node, struct afs_vnode, cb_promise);
33 _debug("%c %*.*s%c%p {%d}",
34 rb_is_red(node) ? 'R' : 'B',
35 depth, depth, "", lr,
36 vnode, vnode->cb_expires_at);
37 if (rb_parent(node) != parent) {
38 printk("BAD: %p != %p\n", rb_parent(node), parent);
39 bad = true;
40 }
27 41
28struct afs_timer_ops afs_vnode_cb_timed_out_ops = { 42 if (node->rb_right)
29 .timed_out = afs_vnode_cb_timed_out, 43 bad |= dump_tree_aux(node->rb_right, node, depth + 2, '\\');
30};
31 44
32#ifdef AFS_CACHING_SUPPORT 45 return bad;
33static cachefs_match_val_t afs_vnode_cache_match(void *target, 46}
34 const void *entry);
35static void afs_vnode_cache_update(void *source, void *entry);
36 47
37struct cachefs_index_def afs_vnode_cache_index_def = { 48static noinline void dump_tree(const char *name, struct afs_server *server)
38 .name = "vnode", 49{
39 .data_size = sizeof(struct afs_cache_vnode), 50 _enter("%s", name);
40 .keys[0] = { CACHEFS_INDEX_KEYS_BIN, 4 }, 51 if (dump_tree_aux(server->cb_promises.rb_node, NULL, 0, '-'))
41 .match = afs_vnode_cache_match, 52 BUG();
42 .update = afs_vnode_cache_update, 53}
43};
44#endif 54#endif
45 55
46/*****************************************************************************/
47/* 56/*
48 * handle a callback timing out 57 * insert a vnode into the backing server's vnode tree
49 * TODO: retain a ref to vnode struct for an outstanding callback timeout
50 */ 58 */
51static void afs_vnode_cb_timed_out(struct afs_timer *timer) 59static void afs_install_vnode(struct afs_vnode *vnode,
60 struct afs_server *server)
52{ 61{
53 struct afs_server *oldserver; 62 struct afs_server *old_server = vnode->server;
54 struct afs_vnode *vnode; 63 struct afs_vnode *xvnode;
64 struct rb_node *parent, **p;
55 65
56 vnode = list_entry(timer, struct afs_vnode, cb_timeout); 66 _enter("%p,%p", vnode, server);
57 67
58 _enter("%p", vnode); 68 if (old_server) {
69 spin_lock(&old_server->fs_lock);
70 rb_erase(&vnode->server_rb, &old_server->fs_vnodes);
71 spin_unlock(&old_server->fs_lock);
72 }
59 73
60 /* set the changed flag in the vnode and release the server */ 74 afs_get_server(server);
61 spin_lock(&vnode->lock); 75 vnode->server = server;
76 afs_put_server(old_server);
77
78 /* insert into the server's vnode tree in FID order */
79 spin_lock(&server->fs_lock);
80
81 parent = NULL;
82 p = &server->fs_vnodes.rb_node;
83 while (*p) {
84 parent = *p;
85 xvnode = rb_entry(parent, struct afs_vnode, server_rb);
86 if (vnode->fid.vid < xvnode->fid.vid)
87 p = &(*p)->rb_left;
88 else if (vnode->fid.vid > xvnode->fid.vid)
89 p = &(*p)->rb_right;
90 else if (vnode->fid.vnode < xvnode->fid.vnode)
91 p = &(*p)->rb_left;
92 else if (vnode->fid.vnode > xvnode->fid.vnode)
93 p = &(*p)->rb_right;
94 else if (vnode->fid.unique < xvnode->fid.unique)
95 p = &(*p)->rb_left;
96 else if (vnode->fid.unique > xvnode->fid.unique)
97 p = &(*p)->rb_right;
98 else
99 BUG(); /* can't happen unless afs_iget() malfunctions */
100 }
101
102 rb_link_node(&vnode->server_rb, parent, p);
103 rb_insert_color(&vnode->server_rb, &server->fs_vnodes);
62 104
63 oldserver = xchg(&vnode->cb_server, NULL); 105 spin_unlock(&server->fs_lock);
64 if (oldserver) { 106 _leave("");
65 vnode->flags |= AFS_VNODE_CHANGED; 107}
66 108
67 spin_lock(&afs_cb_hash_lock); 109/*
68 list_del_init(&vnode->cb_hash_link); 110 * insert a vnode into the promising server's update/expiration tree
69 spin_unlock(&afs_cb_hash_lock); 111 * - caller must hold vnode->lock
112 */
113static void afs_vnode_note_promise(struct afs_vnode *vnode,
114 struct afs_server *server)
115{
116 struct afs_server *old_server;
117 struct afs_vnode *xvnode;
118 struct rb_node *parent, **p;
70 119
71 spin_lock(&oldserver->cb_lock); 120 _enter("%p,%p", vnode, server);
72 list_del_init(&vnode->cb_link); 121
73 spin_unlock(&oldserver->cb_lock); 122 ASSERT(server != NULL);
123
124 old_server = vnode->server;
125 if (vnode->cb_promised) {
126 if (server == old_server &&
127 vnode->cb_expires == vnode->cb_expires_at) {
128 _leave(" [no change]");
129 return;
130 }
131
132 spin_lock(&old_server->cb_lock);
133 if (vnode->cb_promised) {
134 _debug("delete");
135 rb_erase(&vnode->cb_promise, &old_server->cb_promises);
136 vnode->cb_promised = false;
137 }
138 spin_unlock(&old_server->cb_lock);
74 } 139 }
75 140
76 spin_unlock(&vnode->lock); 141 if (vnode->server != server)
142 afs_install_vnode(vnode, server);
143
144 vnode->cb_expires_at = vnode->cb_expires;
145 _debug("PROMISE on %p {%lu}",
146 vnode, (unsigned long) vnode->cb_expires_at);
147
148 /* abuse an RB-tree to hold the expiration order (we may have multiple
149 * items with the same expiration time) */
150 spin_lock(&server->cb_lock);
151
152 parent = NULL;
153 p = &server->cb_promises.rb_node;
154 while (*p) {
155 parent = *p;
156 xvnode = rb_entry(parent, struct afs_vnode, cb_promise);
157 if (vnode->cb_expires_at < xvnode->cb_expires_at)
158 p = &(*p)->rb_left;
159 else
160 p = &(*p)->rb_right;
161 }
77 162
78 afs_put_server(oldserver); 163 rb_link_node(&vnode->cb_promise, parent, p);
164 rb_insert_color(&vnode->cb_promise, &server->cb_promises);
165 vnode->cb_promised = true;
79 166
167 spin_unlock(&server->cb_lock);
80 _leave(""); 168 _leave("");
81} /* end afs_vnode_cb_timed_out() */ 169}
82 170
83/*****************************************************************************/
84/* 171/*
85 * finish off updating the recorded status of a file 172 * handle remote file deletion by discarding the callback promise
173 */
174static void afs_vnode_deleted_remotely(struct afs_vnode *vnode)
175{
176 struct afs_server *server;
177
178 set_bit(AFS_VNODE_DELETED, &vnode->flags);
179
180 server = vnode->server;
181 if (vnode->cb_promised) {
182 spin_lock(&server->cb_lock);
183 if (vnode->cb_promised) {
184 rb_erase(&vnode->cb_promise, &server->cb_promises);
185 vnode->cb_promised = false;
186 }
187 spin_unlock(&server->cb_lock);
188 }
189
190 spin_lock(&vnode->server->fs_lock);
191 rb_erase(&vnode->server_rb, &vnode->server->fs_vnodes);
192 spin_unlock(&vnode->server->fs_lock);
193
194 vnode->server = NULL;
195 afs_put_server(server);
196}
197
198/*
199 * finish off updating the recorded status of a file after a successful
200 * operation completion
86 * - starts callback expiry timer 201 * - starts callback expiry timer
87 * - adds to server's callback list 202 * - adds to server's callback list
88 */ 203 */
89static void afs_vnode_finalise_status_update(struct afs_vnode *vnode, 204void afs_vnode_finalise_status_update(struct afs_vnode *vnode,
90 struct afs_server *server, 205 struct afs_server *server)
91 int ret)
92{ 206{
93 struct afs_server *oldserver = NULL; 207 struct afs_server *oldserver = NULL;
94 208
95 _enter("%p,%p,%d", vnode, server, ret); 209 _enter("%p,%p", vnode, server);
96 210
97 spin_lock(&vnode->lock); 211 spin_lock(&vnode->lock);
212 clear_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
213 afs_vnode_note_promise(vnode, server);
214 vnode->update_cnt--;
215 ASSERTCMP(vnode->update_cnt, >=, 0);
216 spin_unlock(&vnode->lock);
217
218 wake_up_all(&vnode->update_waitq);
219 afs_put_server(oldserver);
220 _leave("");
221}
98 222
99 vnode->flags &= ~AFS_VNODE_CHANGED; 223/*
224 * finish off updating the recorded status of a file after an operation failed
225 */
226static void afs_vnode_status_update_failed(struct afs_vnode *vnode, int ret)
227{
228 _enter("%p,%d", vnode, ret);
100 229
101 if (ret == 0) { 230 spin_lock(&vnode->lock);
102 /* adjust the callback timeout appropriately */
103 afs_kafstimod_add_timer(&vnode->cb_timeout,
104 vnode->cb_expiry * HZ);
105
106 spin_lock(&afs_cb_hash_lock);
107 list_move_tail(&vnode->cb_hash_link,
108 &afs_cb_hash(server, &vnode->fid));
109 spin_unlock(&afs_cb_hash_lock);
110
111 /* swap ref to old callback server with that for new callback
112 * server */
113 oldserver = xchg(&vnode->cb_server, server);
114 if (oldserver != server) {
115 if (oldserver) {
116 spin_lock(&oldserver->cb_lock);
117 list_del_init(&vnode->cb_link);
118 spin_unlock(&oldserver->cb_lock);
119 }
120 231
121 afs_get_server(server); 232 clear_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
122 spin_lock(&server->cb_lock);
123 list_add_tail(&vnode->cb_link, &server->cb_promises);
124 spin_unlock(&server->cb_lock);
125 }
126 else {
127 /* same server */
128 oldserver = NULL;
129 }
130 }
131 else if (ret == -ENOENT) {
132 /* the file was deleted - clear the callback timeout */
133 oldserver = xchg(&vnode->cb_server, NULL);
134 afs_kafstimod_del_timer(&vnode->cb_timeout);
135 233
234 if (ret == -ENOENT) {
235 /* the file was deleted on the server */
136 _debug("got NOENT from server - marking file deleted"); 236 _debug("got NOENT from server - marking file deleted");
137 vnode->flags |= AFS_VNODE_DELETED; 237 afs_vnode_deleted_remotely(vnode);
138 } 238 }
139 239
140 vnode->update_cnt--; 240 vnode->update_cnt--;
141 241 ASSERTCMP(vnode->update_cnt, >=, 0);
142 spin_unlock(&vnode->lock); 242 spin_unlock(&vnode->lock);
143 243
144 wake_up_all(&vnode->update_waitq); 244 wake_up_all(&vnode->update_waitq);
145
146 afs_put_server(oldserver);
147
148 _leave(""); 245 _leave("");
246}
149 247
150} /* end afs_vnode_finalise_status_update() */
151
152/*****************************************************************************/
153/* 248/*
154 * fetch file status from the volume 249 * fetch file status from the volume
155 * - don't issue a fetch if: 250 * - don't issue a fetch if:
@@ -157,9 +252,11 @@ static void afs_vnode_finalise_status_update(struct afs_vnode *vnode,
157 * - there are any outstanding ops that will fetch the status 252 * - there are any outstanding ops that will fetch the status
158 * - TODO implement local caching 253 * - TODO implement local caching
159 */ 254 */
160int afs_vnode_fetch_status(struct afs_vnode *vnode) 255int afs_vnode_fetch_status(struct afs_vnode *vnode,
256 struct afs_vnode *auth_vnode, struct key *key)
161{ 257{
162 struct afs_server *server; 258 struct afs_server *server;
259 unsigned long acl_order;
163 int ret; 260 int ret;
164 261
165 DECLARE_WAITQUEUE(myself, current); 262 DECLARE_WAITQUEUE(myself, current);
@@ -168,38 +265,49 @@ int afs_vnode_fetch_status(struct afs_vnode *vnode)
168 vnode->volume->vlocation->vldb.name, 265 vnode->volume->vlocation->vldb.name,
169 vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); 266 vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
170 267
171 if (!(vnode->flags & AFS_VNODE_CHANGED) && vnode->cb_server) { 268 if (!test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) &&
269 vnode->cb_promised) {
172 _leave(" [unchanged]"); 270 _leave(" [unchanged]");
173 return 0; 271 return 0;
174 } 272 }
175 273
176 if (vnode->flags & AFS_VNODE_DELETED) { 274 if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
177 _leave(" [deleted]"); 275 _leave(" [deleted]");
178 return -ENOENT; 276 return -ENOENT;
179 } 277 }
180 278
279 acl_order = 0;
280 if (auth_vnode)
281 acl_order = auth_vnode->acl_order;
282
181 spin_lock(&vnode->lock); 283 spin_lock(&vnode->lock);
182 284
183 if (!(vnode->flags & AFS_VNODE_CHANGED)) { 285 if (!test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) &&
286 vnode->cb_promised) {
184 spin_unlock(&vnode->lock); 287 spin_unlock(&vnode->lock);
185 _leave(" [unchanged]"); 288 _leave(" [unchanged]");
186 return 0; 289 return 0;
187 } 290 }
188 291
292 ASSERTCMP(vnode->update_cnt, >=, 0);
293
189 if (vnode->update_cnt > 0) { 294 if (vnode->update_cnt > 0) {
190 /* someone else started a fetch */ 295 /* someone else started a fetch */
296 _debug("wait on fetch %d", vnode->update_cnt);
297
191 set_current_state(TASK_UNINTERRUPTIBLE); 298 set_current_state(TASK_UNINTERRUPTIBLE);
299 ASSERT(myself.func != NULL);
192 add_wait_queue(&vnode->update_waitq, &myself); 300 add_wait_queue(&vnode->update_waitq, &myself);
193 301
194 /* wait for the status to be updated */ 302 /* wait for the status to be updated */
195 for (;;) { 303 for (;;) {
196 if (!(vnode->flags & AFS_VNODE_CHANGED)) 304 if (!test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags))
197 break; 305 break;
198 if (vnode->flags & AFS_VNODE_DELETED) 306 if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
199 break; 307 break;
200 308
201 /* it got updated and invalidated all before we saw 309 /* check to see if it got updated and invalidated all
202 * it */ 310 * before we saw it */
203 if (vnode->update_cnt == 0) { 311 if (vnode->update_cnt == 0) {
204 remove_wait_queue(&vnode->update_waitq, 312 remove_wait_queue(&vnode->update_waitq,
205 &myself); 313 &myself);
@@ -219,10 +327,11 @@ int afs_vnode_fetch_status(struct afs_vnode *vnode)
219 spin_unlock(&vnode->lock); 327 spin_unlock(&vnode->lock);
220 set_current_state(TASK_RUNNING); 328 set_current_state(TASK_RUNNING);
221 329
222 return vnode->flags & AFS_VNODE_DELETED ? -ENOENT : 0; 330 return test_bit(AFS_VNODE_DELETED, &vnode->flags) ?
331 -ENOENT : 0;
223 } 332 }
224 333
225 get_anyway: 334get_anyway:
226 /* okay... we're going to have to initiate the op */ 335 /* okay... we're going to have to initiate the op */
227 vnode->update_cnt++; 336 vnode->update_cnt++;
228 337
@@ -232,39 +341,60 @@ int afs_vnode_fetch_status(struct afs_vnode *vnode)
232 * vnode */ 341 * vnode */
233 do { 342 do {
234 /* pick a server to query */ 343 /* pick a server to query */
235 ret = afs_volume_pick_fileserver(vnode->volume, &server); 344 server = afs_volume_pick_fileserver(vnode);
236 if (ret<0) 345 if (IS_ERR(server))
237 return ret; 346 goto no_server;
238 347
239 _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); 348 _debug("USING SERVER: %p{%08x}",
349 server, ntohl(server->addr.s_addr));
240 350
241 ret = afs_rxfs_fetch_file_status(server, vnode, NULL); 351 ret = afs_fs_fetch_file_status(server, key, vnode, NULL,
352 &afs_sync_call);
242 353
243 } while (!afs_volume_release_fileserver(vnode->volume, server, ret)); 354 } while (!afs_volume_release_fileserver(vnode, server, ret));
244 355
245 /* adjust the flags */ 356 /* adjust the flags */
246 afs_vnode_finalise_status_update(vnode, server, ret); 357 if (ret == 0) {
358 _debug("adjust");
359 if (auth_vnode)
360 afs_cache_permit(vnode, key, acl_order);
361 afs_vnode_finalise_status_update(vnode, server);
362 afs_put_server(server);
363 } else {
364 _debug("failed [%d]", ret);
365 afs_vnode_status_update_failed(vnode, ret);
366 }
247 367
248 _leave(" = %d", ret); 368 ASSERTCMP(vnode->update_cnt, >=, 0);
369
370 _leave(" = %d [cnt %d]", ret, vnode->update_cnt);
249 return ret; 371 return ret;
250} /* end afs_vnode_fetch_status() */
251 372
252/*****************************************************************************/ 373no_server:
374 spin_lock(&vnode->lock);
375 vnode->update_cnt--;
376 ASSERTCMP(vnode->update_cnt, >=, 0);
377 spin_unlock(&vnode->lock);
378 _leave(" = %ld [cnt %d]", PTR_ERR(server), vnode->update_cnt);
379 return PTR_ERR(server);
380}
381
253/* 382/*
254 * fetch file data from the volume 383 * fetch file data from the volume
255 * - TODO implement caching and server failover 384 * - TODO implement caching
256 */ 385 */
257int afs_vnode_fetch_data(struct afs_vnode *vnode, 386int afs_vnode_fetch_data(struct afs_vnode *vnode, struct key *key,
258 struct afs_rxfs_fetch_descriptor *desc) 387 off_t offset, size_t length, struct page *page)
259{ 388{
260 struct afs_server *server; 389 struct afs_server *server;
261 int ret; 390 int ret;
262 391
263 _enter("%s,{%u,%u,%u}", 392 _enter("%s{%u,%u,%u},%x,,,",
264 vnode->volume->vlocation->vldb.name, 393 vnode->volume->vlocation->vldb.name,
265 vnode->fid.vid, 394 vnode->fid.vid,
266 vnode->fid.vnode, 395 vnode->fid.vnode,
267 vnode->fid.unique); 396 vnode->fid.unique,
397 key_serial(key));
268 398
269 /* this op will fetch the status */ 399 /* this op will fetch the status */
270 spin_lock(&vnode->lock); 400 spin_lock(&vnode->lock);
@@ -275,120 +405,351 @@ int afs_vnode_fetch_data(struct afs_vnode *vnode,
275 * vnode */ 405 * vnode */
276 do { 406 do {
277 /* pick a server to query */ 407 /* pick a server to query */
278 ret = afs_volume_pick_fileserver(vnode->volume, &server); 408 server = afs_volume_pick_fileserver(vnode);
279 if (ret < 0) 409 if (IS_ERR(server))
280 return ret; 410 goto no_server;
281 411
282 _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr)); 412 _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
283 413
284 ret = afs_rxfs_fetch_file_data(server, vnode, desc, NULL); 414 ret = afs_fs_fetch_data(server, key, vnode, offset, length,
415 page, &afs_sync_call);
285 416
286 } while (!afs_volume_release_fileserver(vnode->volume, server, ret)); 417 } while (!afs_volume_release_fileserver(vnode, server, ret));
287 418
288 /* adjust the flags */ 419 /* adjust the flags */
289 afs_vnode_finalise_status_update(vnode, server, ret); 420 if (ret == 0) {
421 afs_vnode_finalise_status_update(vnode, server);
422 afs_put_server(server);
423 } else {
424 afs_vnode_status_update_failed(vnode, ret);
425 }
290 426
291 _leave(" = %d", ret); 427 _leave(" = %d", ret);
292 return ret; 428 return ret;
293 429
294} /* end afs_vnode_fetch_data() */ 430no_server:
431 spin_lock(&vnode->lock);
432 vnode->update_cnt--;
433 ASSERTCMP(vnode->update_cnt, >=, 0);
434 spin_unlock(&vnode->lock);
435 return PTR_ERR(server);
436}
295 437
296/*****************************************************************************/
297/* 438/*
298 * break any outstanding callback on a vnode 439 * make a file or a directory
299 * - only relevent to server that issued it
300 */ 440 */
301int afs_vnode_give_up_callback(struct afs_vnode *vnode) 441int afs_vnode_create(struct afs_vnode *vnode, struct key *key,
442 const char *name, umode_t mode, struct afs_fid *newfid,
443 struct afs_file_status *newstatus,
444 struct afs_callback *newcb, struct afs_server **_server)
302{ 445{
303 struct afs_server *server; 446 struct afs_server *server;
304 int ret; 447 int ret;
305 448
306 _enter("%s,{%u,%u,%u}", 449 _enter("%s{%u,%u,%u},%x,%s,,",
307 vnode->volume->vlocation->vldb.name, 450 vnode->volume->vlocation->vldb.name,
308 vnode->fid.vid, 451 vnode->fid.vid,
309 vnode->fid.vnode, 452 vnode->fid.vnode,
310 vnode->fid.unique); 453 vnode->fid.unique,
311 454 key_serial(key),
312 spin_lock(&afs_cb_hash_lock); 455 name);
313 list_del_init(&vnode->cb_hash_link);
314 spin_unlock(&afs_cb_hash_lock);
315 456
316 /* set the changed flag in the vnode and release the server */ 457 /* this op will fetch the status on the directory we're creating in */
317 spin_lock(&vnode->lock); 458 spin_lock(&vnode->lock);
459 vnode->update_cnt++;
460 spin_unlock(&vnode->lock);
318 461
319 afs_kafstimod_del_timer(&vnode->cb_timeout); 462 do {
463 /* pick a server to query */
464 server = afs_volume_pick_fileserver(vnode);
465 if (IS_ERR(server))
466 goto no_server;
467
468 _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
320 469
321 server = xchg(&vnode->cb_server, NULL); 470 ret = afs_fs_create(server, key, vnode, name, mode, newfid,
322 if (server) { 471 newstatus, newcb, &afs_sync_call);
323 vnode->flags |= AFS_VNODE_CHANGED;
324 472
325 spin_lock(&server->cb_lock); 473 } while (!afs_volume_release_fileserver(vnode, server, ret));
326 list_del_init(&vnode->cb_link); 474
327 spin_unlock(&server->cb_lock); 475 /* adjust the flags */
476 if (ret == 0) {
477 afs_vnode_finalise_status_update(vnode, server);
478 *_server = server;
479 } else {
480 afs_vnode_status_update_failed(vnode, ret);
481 *_server = NULL;
328 } 482 }
329 483
484 _leave(" = %d [cnt %d]", ret, vnode->update_cnt);
485 return ret;
486
487no_server:
488 spin_lock(&vnode->lock);
489 vnode->update_cnt--;
490 ASSERTCMP(vnode->update_cnt, >=, 0);
330 spin_unlock(&vnode->lock); 491 spin_unlock(&vnode->lock);
492 _leave(" = %ld [cnt %d]", PTR_ERR(server), vnode->update_cnt);
493 return PTR_ERR(server);
494}
331 495
332 ret = 0; 496/*
333 if (server) { 497 * remove a file or directory
334 ret = afs_rxfs_give_up_callback(server, vnode); 498 */
499int afs_vnode_remove(struct afs_vnode *vnode, struct key *key, const char *name,
500 bool isdir)
501{
502 struct afs_server *server;
503 int ret;
504
505 _enter("%s{%u,%u,%u},%x,%s",
506 vnode->volume->vlocation->vldb.name,
507 vnode->fid.vid,
508 vnode->fid.vnode,
509 vnode->fid.unique,
510 key_serial(key),
511 name);
512
513 /* this op will fetch the status on the directory we're removing from */
514 spin_lock(&vnode->lock);
515 vnode->update_cnt++;
516 spin_unlock(&vnode->lock);
517
518 do {
519 /* pick a server to query */
520 server = afs_volume_pick_fileserver(vnode);
521 if (IS_ERR(server))
522 goto no_server;
523
524 _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
525
526 ret = afs_fs_remove(server, key, vnode, name, isdir,
527 &afs_sync_call);
528
529 } while (!afs_volume_release_fileserver(vnode, server, ret));
530
531 /* adjust the flags */
532 if (ret == 0) {
533 afs_vnode_finalise_status_update(vnode, server);
335 afs_put_server(server); 534 afs_put_server(server);
535 } else {
536 afs_vnode_status_update_failed(vnode, ret);
336 } 537 }
337 538
338 _leave(" = %d", ret); 539 _leave(" = %d [cnt %d]", ret, vnode->update_cnt);
339 return ret; 540 return ret;
340} /* end afs_vnode_give_up_callback() */
341 541
342/*****************************************************************************/ 542no_server:
543 spin_lock(&vnode->lock);
544 vnode->update_cnt--;
545 ASSERTCMP(vnode->update_cnt, >=, 0);
546 spin_unlock(&vnode->lock);
547 _leave(" = %ld [cnt %d]", PTR_ERR(server), vnode->update_cnt);
548 return PTR_ERR(server);
549}
550
343/* 551/*
344 * match a vnode record stored in the cache 552 * create a hard link
345 */ 553 */
346#ifdef AFS_CACHING_SUPPORT 554extern int afs_vnode_link(struct afs_vnode *dvnode, struct afs_vnode *vnode,
347static cachefs_match_val_t afs_vnode_cache_match(void *target, 555 struct key *key, const char *name)
348 const void *entry)
349{ 556{
350 const struct afs_cache_vnode *cvnode = entry; 557 struct afs_server *server;
351 struct afs_vnode *vnode = target; 558 int ret;
352 559
353 _enter("{%x,%x,%Lx},{%x,%x,%Lx}", 560 _enter("%s{%u,%u,%u},%s{%u,%u,%u},%x,%s",
561 dvnode->volume->vlocation->vldb.name,
562 dvnode->fid.vid,
563 dvnode->fid.vnode,
564 dvnode->fid.unique,
565 vnode->volume->vlocation->vldb.name,
566 vnode->fid.vid,
354 vnode->fid.vnode, 567 vnode->fid.vnode,
355 vnode->fid.unique, 568 vnode->fid.unique,
356 vnode->status.version, 569 key_serial(key),
357 cvnode->vnode_id, 570 name);
358 cvnode->vnode_unique, 571
359 cvnode->data_version); 572 /* this op will fetch the status on the directory we're removing from */
360 573 spin_lock(&vnode->lock);
361 if (vnode->fid.vnode != cvnode->vnode_id) { 574 vnode->update_cnt++;
362 _leave(" = FAILED"); 575 spin_unlock(&vnode->lock);
363 return CACHEFS_MATCH_FAILED; 576 spin_lock(&dvnode->lock);
577 dvnode->update_cnt++;
578 spin_unlock(&dvnode->lock);
579
580 do {
581 /* pick a server to query */
582 server = afs_volume_pick_fileserver(dvnode);
583 if (IS_ERR(server))
584 goto no_server;
585
586 _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
587
588 ret = afs_fs_link(server, key, dvnode, vnode, name,
589 &afs_sync_call);
590
591 } while (!afs_volume_release_fileserver(dvnode, server, ret));
592
593 /* adjust the flags */
594 if (ret == 0) {
595 afs_vnode_finalise_status_update(vnode, server);
596 afs_vnode_finalise_status_update(dvnode, server);
597 afs_put_server(server);
598 } else {
599 afs_vnode_status_update_failed(vnode, ret);
600 afs_vnode_status_update_failed(dvnode, ret);
364 } 601 }
365 602
366 if (vnode->fid.unique != cvnode->vnode_unique || 603 _leave(" = %d [cnt %d]", ret, vnode->update_cnt);
367 vnode->status.version != cvnode->data_version) { 604 return ret;
368 _leave(" = DELETE"); 605
369 return CACHEFS_MATCH_SUCCESS_DELETE; 606no_server:
607 spin_lock(&vnode->lock);
608 vnode->update_cnt--;
609 ASSERTCMP(vnode->update_cnt, >=, 0);
610 spin_unlock(&vnode->lock);
611 spin_lock(&dvnode->lock);
612 dvnode->update_cnt--;
613 ASSERTCMP(dvnode->update_cnt, >=, 0);
614 spin_unlock(&dvnode->lock);
615 _leave(" = %ld [cnt %d]", PTR_ERR(server), vnode->update_cnt);
616 return PTR_ERR(server);
617}
618
619/*
620 * create a symbolic link
621 */
622int afs_vnode_symlink(struct afs_vnode *vnode, struct key *key,
623 const char *name, const char *content,
624 struct afs_fid *newfid,
625 struct afs_file_status *newstatus,
626 struct afs_server **_server)
627{
628 struct afs_server *server;
629 int ret;
630
631 _enter("%s{%u,%u,%u},%x,%s,%s,,,",
632 vnode->volume->vlocation->vldb.name,
633 vnode->fid.vid,
634 vnode->fid.vnode,
635 vnode->fid.unique,
636 key_serial(key),
637 name, content);
638
639 /* this op will fetch the status on the directory we're creating in */
640 spin_lock(&vnode->lock);
641 vnode->update_cnt++;
642 spin_unlock(&vnode->lock);
643
644 do {
645 /* pick a server to query */
646 server = afs_volume_pick_fileserver(vnode);
647 if (IS_ERR(server))
648 goto no_server;
649
650 _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
651
652 ret = afs_fs_symlink(server, key, vnode, name, content,
653 newfid, newstatus, &afs_sync_call);
654
655 } while (!afs_volume_release_fileserver(vnode, server, ret));
656
657 /* adjust the flags */
658 if (ret == 0) {
659 afs_vnode_finalise_status_update(vnode, server);
660 *_server = server;
661 } else {
662 afs_vnode_status_update_failed(vnode, ret);
663 *_server = NULL;
370 } 664 }
371 665
372 _leave(" = SUCCESS"); 666 _leave(" = %d [cnt %d]", ret, vnode->update_cnt);
373 return CACHEFS_MATCH_SUCCESS; 667 return ret;
374} /* end afs_vnode_cache_match() */ 668
375#endif 669no_server:
670 spin_lock(&vnode->lock);
671 vnode->update_cnt--;
672 ASSERTCMP(vnode->update_cnt, >=, 0);
673 spin_unlock(&vnode->lock);
674 _leave(" = %ld [cnt %d]", PTR_ERR(server), vnode->update_cnt);
675 return PTR_ERR(server);
676}
376 677
377/*****************************************************************************/
378/* 678/*
379 * update a vnode record stored in the cache 679 * rename a file
380 */ 680 */
381#ifdef AFS_CACHING_SUPPORT 681int afs_vnode_rename(struct afs_vnode *orig_dvnode,
382static void afs_vnode_cache_update(void *source, void *entry) 682 struct afs_vnode *new_dvnode,
683 struct key *key,
684 const char *orig_name,
685 const char *new_name)
383{ 686{
384 struct afs_cache_vnode *cvnode = entry; 687 struct afs_server *server;
385 struct afs_vnode *vnode = source; 688 int ret;
386 689
387 _enter(""); 690 _enter("%s{%u,%u,%u},%s{%u,%u,%u},%x,%s,%s",
691 orig_dvnode->volume->vlocation->vldb.name,
692 orig_dvnode->fid.vid,
693 orig_dvnode->fid.vnode,
694 orig_dvnode->fid.unique,
695 new_dvnode->volume->vlocation->vldb.name,
696 new_dvnode->fid.vid,
697 new_dvnode->fid.vnode,
698 new_dvnode->fid.unique,
699 key_serial(key),
700 orig_name,
701 new_name);
702
703 /* this op will fetch the status on both the directories we're dealing
704 * with */
705 spin_lock(&orig_dvnode->lock);
706 orig_dvnode->update_cnt++;
707 spin_unlock(&orig_dvnode->lock);
708 if (new_dvnode != orig_dvnode) {
709 spin_lock(&new_dvnode->lock);
710 new_dvnode->update_cnt++;
711 spin_unlock(&new_dvnode->lock);
712 }
388 713
389 cvnode->vnode_id = vnode->fid.vnode; 714 do {
390 cvnode->vnode_unique = vnode->fid.unique; 715 /* pick a server to query */
391 cvnode->data_version = vnode->status.version; 716 server = afs_volume_pick_fileserver(orig_dvnode);
717 if (IS_ERR(server))
718 goto no_server;
392 719
393} /* end afs_vnode_cache_update() */ 720 _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
394#endif 721
722 ret = afs_fs_rename(server, key, orig_dvnode, orig_name,
723 new_dvnode, new_name, &afs_sync_call);
724
725 } while (!afs_volume_release_fileserver(orig_dvnode, server, ret));
726
727 /* adjust the flags */
728 if (ret == 0) {
729 afs_vnode_finalise_status_update(orig_dvnode, server);
730 if (new_dvnode != orig_dvnode)
731 afs_vnode_finalise_status_update(new_dvnode, server);
732 afs_put_server(server);
733 } else {
734 afs_vnode_status_update_failed(orig_dvnode, ret);
735 if (new_dvnode != orig_dvnode)
736 afs_vnode_status_update_failed(new_dvnode, ret);
737 }
738
739 _leave(" = %d [cnt %d]", ret, orig_dvnode->update_cnt);
740 return ret;
741
742no_server:
743 spin_lock(&orig_dvnode->lock);
744 orig_dvnode->update_cnt--;
745 ASSERTCMP(orig_dvnode->update_cnt, >=, 0);
746 spin_unlock(&orig_dvnode->lock);
747 if (new_dvnode != orig_dvnode) {
748 spin_lock(&new_dvnode->lock);
749 new_dvnode->update_cnt--;
750 ASSERTCMP(new_dvnode->update_cnt, >=, 0);
751 spin_unlock(&new_dvnode->lock);
752 }
753 _leave(" = %ld [cnt %d]", PTR_ERR(server), orig_dvnode->update_cnt);
754 return PTR_ERR(server);
755}
diff --git a/fs/afs/vnode.h b/fs/afs/vnode.h
deleted file mode 100644
index b86a97102e8b..000000000000
--- a/fs/afs/vnode.h
+++ /dev/null
@@ -1,94 +0,0 @@
1/* vnode.h: AFS vnode record
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef _LINUX_AFS_VNODE_H
13#define _LINUX_AFS_VNODE_H
14
15#include <linux/fs.h>
16#include "server.h"
17#include "kafstimod.h"
18#include "cache.h"
19
20#ifdef __KERNEL__
21
22struct afs_rxfs_fetch_descriptor;
23
24/*****************************************************************************/
25/*
26 * vnode catalogue entry
27 */
28struct afs_cache_vnode
29{
30 afs_vnodeid_t vnode_id; /* vnode ID */
31 unsigned vnode_unique; /* vnode ID uniquifier */
32 afs_dataversion_t data_version; /* data version */
33};
34
35#ifdef AFS_CACHING_SUPPORT
36extern struct cachefs_index_def afs_vnode_cache_index_def;
37#endif
38
39/*****************************************************************************/
40/*
41 * AFS inode private data
42 */
43struct afs_vnode
44{
45 struct inode vfs_inode; /* the VFS's inode record */
46
47 struct afs_volume *volume; /* volume on which vnode resides */
48 struct afs_fid fid; /* the file identifier for this inode */
49 struct afs_file_status status; /* AFS status info for this file */
50#ifdef AFS_CACHING_SUPPORT
51 struct cachefs_cookie *cache; /* caching cookie */
52#endif
53
54 wait_queue_head_t update_waitq; /* status fetch waitqueue */
55 unsigned update_cnt; /* number of outstanding ops that will update the
56 * status */
57 spinlock_t lock; /* waitqueue/flags lock */
58 unsigned flags;
59#define AFS_VNODE_CHANGED 0x00000001 /* set if vnode reported changed by callback */
60#define AFS_VNODE_DELETED 0x00000002 /* set if vnode deleted on server */
61#define AFS_VNODE_MOUNTPOINT 0x00000004 /* set if vnode is a mountpoint symlink */
62
63 /* outstanding callback notification on this file */
64 struct afs_server *cb_server; /* server that made the current promise */
65 struct list_head cb_link; /* link in server's promises list */
66 struct list_head cb_hash_link; /* link in master callback hash */
67 struct afs_timer cb_timeout; /* timeout on promise */
68 unsigned cb_version; /* callback version */
69 unsigned cb_expiry; /* callback expiry time */
70 afs_callback_type_t cb_type; /* type of callback */
71};
72
73static inline struct afs_vnode *AFS_FS_I(struct inode *inode)
74{
75 return container_of(inode,struct afs_vnode,vfs_inode);
76}
77
78static inline struct inode *AFS_VNODE_TO_I(struct afs_vnode *vnode)
79{
80 return &vnode->vfs_inode;
81}
82
83extern int afs_vnode_fetch_status(struct afs_vnode *vnode);
84
85extern int afs_vnode_fetch_data(struct afs_vnode *vnode,
86 struct afs_rxfs_fetch_descriptor *desc);
87
88extern int afs_vnode_give_up_callback(struct afs_vnode *vnode);
89
90extern struct afs_timer_ops afs_vnode_cb_timed_out_ops;
91
92#endif /* __KERNEL__ */
93
94#endif /* _LINUX_AFS_VNODE_H */
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index 768c6dbd323a..dd160cada45d 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -1,6 +1,6 @@
1/* volume.c: AFS volume management 1/* AFS volume management
2 * 2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
@@ -15,35 +15,10 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/fs.h> 16#include <linux/fs.h>
17#include <linux/pagemap.h> 17#include <linux/pagemap.h>
18#include "volume.h"
19#include "vnode.h"
20#include "cell.h"
21#include "cache.h"
22#include "cmservice.h"
23#include "fsclient.h"
24#include "vlclient.h"
25#include "internal.h" 18#include "internal.h"
26 19
27#ifdef __KDEBUG
28static const char *afs_voltypes[] = { "R/W", "R/O", "BAK" }; 20static const char *afs_voltypes[] = { "R/W", "R/O", "BAK" };
29#endif
30
31#ifdef AFS_CACHING_SUPPORT
32static cachefs_match_val_t afs_volume_cache_match(void *target,
33 const void *entry);
34static void afs_volume_cache_update(void *source, void *entry);
35
36struct cachefs_index_def afs_volume_cache_index_def = {
37 .name = "volume",
38 .data_size = sizeof(struct afs_cache_vhash),
39 .keys[0] = { CACHEFS_INDEX_KEYS_BIN, 1 },
40 .keys[1] = { CACHEFS_INDEX_KEYS_BIN, 1 },
41 .match = afs_volume_cache_match,
42 .update = afs_volume_cache_update,
43};
44#endif
45 21
46/*****************************************************************************/
47/* 22/*
48 * lookup a volume by name 23 * lookup a volume by name
49 * - this can be one of the following: 24 * - this can be one of the following:
@@ -66,118 +41,52 @@ struct cachefs_index_def afs_volume_cache_index_def = {
66 * - Rule 3: If parent volume is R/W, then only mount R/W volume unless 41 * - Rule 3: If parent volume is R/W, then only mount R/W volume unless
67 * explicitly told otherwise 42 * explicitly told otherwise
68 */ 43 */
69int afs_volume_lookup(const char *name, struct afs_cell *cell, int rwpath, 44struct afs_volume *afs_volume_lookup(struct afs_mount_params *params)
70 struct afs_volume **_volume)
71{ 45{
72 struct afs_vlocation *vlocation = NULL; 46 struct afs_vlocation *vlocation = NULL;
73 struct afs_volume *volume = NULL; 47 struct afs_volume *volume = NULL;
74 afs_voltype_t type; 48 struct afs_server *server = NULL;
75 const char *cellname, *volname, *suffix;
76 char srvtmask; 49 char srvtmask;
77 int force, ret, loop, cellnamesz, volnamesz; 50 int ret, loop;
78
79 _enter("%s,,%d,", name, rwpath);
80
81 if (!name || (name[0] != '%' && name[0] != '#') || !name[1]) {
82 printk("kAFS: unparsable volume name\n");
83 return -EINVAL;
84 }
85
86 /* determine the type of volume we're looking for */
87 force = 0;
88 type = AFSVL_ROVOL;
89
90 if (rwpath || name[0] == '%') {
91 type = AFSVL_RWVOL;
92 force = 1;
93 }
94
95 suffix = strrchr(name, '.');
96 if (suffix) {
97 if (strcmp(suffix, ".readonly") == 0) {
98 type = AFSVL_ROVOL;
99 force = 1;
100 }
101 else if (strcmp(suffix, ".backup") == 0) {
102 type = AFSVL_BACKVOL;
103 force = 1;
104 }
105 else if (suffix[1] == 0) {
106 }
107 else {
108 suffix = NULL;
109 }
110 }
111 51
112 /* split the cell and volume names */ 52 _enter("{%*.*s,%d}",
113 name++; 53 params->volnamesz, params->volnamesz, params->volname, params->rwpath);
114 volname = strchr(name, ':');
115 if (volname) {
116 cellname = name;
117 cellnamesz = volname - name;
118 volname++;
119 }
120 else {
121 volname = name;
122 cellname = NULL;
123 cellnamesz = 0;
124 }
125
126 volnamesz = suffix ? suffix - volname : strlen(volname);
127
128 _debug("CELL:%*.*s [%p] VOLUME:%*.*s SUFFIX:%s TYPE:%d%s",
129 cellnamesz, cellnamesz, cellname ?: "", cell,
130 volnamesz, volnamesz, volname, suffix ?: "-",
131 type,
132 force ? " FORCE" : "");
133
134 /* lookup the cell record */
135 if (cellname || !cell) {
136 ret = afs_cell_lookup(cellname, cellnamesz, &cell);
137 if (ret<0) {
138 printk("kAFS: unable to lookup cell '%s'\n",
139 cellname ?: "");
140 goto error;
141 }
142 }
143 else {
144 afs_get_cell(cell);
145 }
146 54
147 /* lookup the volume location record */ 55 /* lookup the volume location record */
148 ret = afs_vlocation_lookup(cell, volname, volnamesz, &vlocation); 56 vlocation = afs_vlocation_lookup(params->cell, params->key,
149 if (ret < 0) 57 params->volname, params->volnamesz);
58 if (IS_ERR(vlocation)) {
59 ret = PTR_ERR(vlocation);
60 vlocation = NULL;
150 goto error; 61 goto error;
62 }
151 63
152 /* make the final decision on the type we want */ 64 /* make the final decision on the type we want */
153 ret = -ENOMEDIUM; 65 ret = -ENOMEDIUM;
154 if (force && !(vlocation->vldb.vidmask & (1 << type))) 66 if (params->force && !(vlocation->vldb.vidmask & (1 << params->type)))
155 goto error; 67 goto error;
156 68
157 srvtmask = 0; 69 srvtmask = 0;
158 for (loop = 0; loop < vlocation->vldb.nservers; loop++) 70 for (loop = 0; loop < vlocation->vldb.nservers; loop++)
159 srvtmask |= vlocation->vldb.srvtmask[loop]; 71 srvtmask |= vlocation->vldb.srvtmask[loop];
160 72
161 if (force) { 73 if (params->force) {
162 if (!(srvtmask & (1 << type))) 74 if (!(srvtmask & (1 << params->type)))
163 goto error; 75 goto error;
164 } 76 } else if (srvtmask & AFS_VOL_VTM_RO) {
165 else if (srvtmask & AFS_VOL_VTM_RO) { 77 params->type = AFSVL_ROVOL;
166 type = AFSVL_ROVOL; 78 } else if (srvtmask & AFS_VOL_VTM_RW) {
167 } 79 params->type = AFSVL_RWVOL;
168 else if (srvtmask & AFS_VOL_VTM_RW) { 80 } else {
169 type = AFSVL_RWVOL;
170 }
171 else {
172 goto error; 81 goto error;
173 } 82 }
174 83
175 down_write(&cell->vl_sem); 84 down_write(&params->cell->vl_sem);
176 85
177 /* is the volume already active? */ 86 /* is the volume already active? */
178 if (vlocation->vols[type]) { 87 if (vlocation->vols[params->type]) {
179 /* yes - re-use it */ 88 /* yes - re-use it */
180 volume = vlocation->vols[type]; 89 volume = vlocation->vols[params->type];
181 afs_get_volume(volume); 90 afs_get_volume(volume);
182 goto success; 91 goto success;
183 } 92 }
@@ -191,23 +100,24 @@ int afs_volume_lookup(const char *name, struct afs_cell *cell, int rwpath,
191 goto error_up; 100 goto error_up;
192 101
193 atomic_set(&volume->usage, 1); 102 atomic_set(&volume->usage, 1);
194 volume->type = type; 103 volume->type = params->type;
195 volume->type_force = force; 104 volume->type_force = params->force;
196 volume->cell = cell; 105 volume->cell = params->cell;
197 volume->vid = vlocation->vldb.vid[type]; 106 volume->vid = vlocation->vldb.vid[params->type];
198 107
199 init_rwsem(&volume->server_sem); 108 init_rwsem(&volume->server_sem);
200 109
201 /* look up all the applicable server records */ 110 /* look up all the applicable server records */
202 for (loop = 0; loop < 8; loop++) { 111 for (loop = 0; loop < 8; loop++) {
203 if (vlocation->vldb.srvtmask[loop] & (1 << volume->type)) { 112 if (vlocation->vldb.srvtmask[loop] & (1 << volume->type)) {
204 ret = afs_server_lookup( 113 server = afs_lookup_server(
205 volume->cell, 114 volume->cell, &vlocation->vldb.servers[loop]);
206 &vlocation->vldb.servers[loop], 115 if (IS_ERR(server)) {
207 &volume->servers[volume->nservers]); 116 ret = PTR_ERR(server);
208 if (ret < 0)
209 goto error_discard; 117 goto error_discard;
118 }
210 119
120 volume->servers[volume->nservers] = server;
211 volume->nservers++; 121 volume->nservers++;
212 } 122 }
213 } 123 }
@@ -223,35 +133,34 @@ int afs_volume_lookup(const char *name, struct afs_cell *cell, int rwpath,
223 afs_get_vlocation(vlocation); 133 afs_get_vlocation(vlocation);
224 volume->vlocation = vlocation; 134 volume->vlocation = vlocation;
225 135
226 vlocation->vols[type] = volume; 136 vlocation->vols[volume->type] = volume;
227 137
228 success: 138success:
229 _debug("kAFS selected %s volume %08x", 139 _debug("kAFS selected %s volume %08x",
230 afs_voltypes[volume->type], volume->vid); 140 afs_voltypes[volume->type], volume->vid);
231 *_volume = volume; 141 up_write(&params->cell->vl_sem);
232 ret = 0; 142 afs_put_vlocation(vlocation);
143 _leave(" = %p", volume);
144 return volume;
233 145
234 /* clean up */ 146 /* clean up */
235 error_up: 147error_up:
236 up_write(&cell->vl_sem); 148 up_write(&params->cell->vl_sem);
237 error: 149error:
238 afs_put_vlocation(vlocation); 150 afs_put_vlocation(vlocation);
239 afs_put_cell(cell); 151 _leave(" = %d", ret);
240 152 return ERR_PTR(ret);
241 _leave(" = %d (%p)", ret, volume);
242 return ret;
243 153
244 error_discard: 154error_discard:
245 up_write(&cell->vl_sem); 155 up_write(&params->cell->vl_sem);
246 156
247 for (loop = volume->nservers - 1; loop >= 0; loop--) 157 for (loop = volume->nservers - 1; loop >= 0; loop--)
248 afs_put_server(volume->servers[loop]); 158 afs_put_server(volume->servers[loop]);
249 159
250 kfree(volume); 160 kfree(volume);
251 goto error; 161 goto error;
252} /* end afs_volume_lookup() */ 162}
253 163
254/*****************************************************************************/
255/* 164/*
256 * destroy a volume record 165 * destroy a volume record
257 */ 166 */
@@ -265,10 +174,9 @@ void afs_put_volume(struct afs_volume *volume)
265 174
266 _enter("%p", volume); 175 _enter("%p", volume);
267 176
268 vlocation = volume->vlocation; 177 ASSERTCMP(atomic_read(&volume->usage), >, 0);
269 178
270 /* sanity check */ 179 vlocation = volume->vlocation;
271 BUG_ON(atomic_read(&volume->usage) <= 0);
272 180
273 /* to prevent a race, the decrement and the dequeue must be effectively 181 /* to prevent a race, the decrement and the dequeue must be effectively
274 * atomic */ 182 * atomic */
@@ -296,21 +204,27 @@ void afs_put_volume(struct afs_volume *volume)
296 kfree(volume); 204 kfree(volume);
297 205
298 _leave(" [destroyed]"); 206 _leave(" [destroyed]");
299} /* end afs_put_volume() */ 207}
300 208
301/*****************************************************************************/
302/* 209/*
303 * pick a server to use to try accessing this volume 210 * pick a server to use to try accessing this volume
304 * - returns with an elevated usage count on the server chosen 211 * - returns with an elevated usage count on the server chosen
305 */ 212 */
306int afs_volume_pick_fileserver(struct afs_volume *volume, 213struct afs_server *afs_volume_pick_fileserver(struct afs_vnode *vnode)
307 struct afs_server **_server)
308{ 214{
215 struct afs_volume *volume = vnode->volume;
309 struct afs_server *server; 216 struct afs_server *server;
310 int ret, state, loop; 217 int ret, state, loop;
311 218
312 _enter("%s", volume->vlocation->vldb.name); 219 _enter("%s", volume->vlocation->vldb.name);
313 220
221 /* stick with the server we're already using if we can */
222 if (vnode->server && vnode->server->fs_state == 0) {
223 afs_get_server(vnode->server);
224 _leave(" = %p [current]", vnode->server);
225 return vnode->server;
226 }
227
314 down_read(&volume->server_sem); 228 down_read(&volume->server_sem);
315 229
316 /* handle the no-server case */ 230 /* handle the no-server case */
@@ -318,7 +232,7 @@ int afs_volume_pick_fileserver(struct afs_volume *volume,
318 ret = volume->rjservers ? -ENOMEDIUM : -ESTALE; 232 ret = volume->rjservers ? -ENOMEDIUM : -ESTALE;
319 up_read(&volume->server_sem); 233 up_read(&volume->server_sem);
320 _leave(" = %d [no servers]", ret); 234 _leave(" = %d [no servers]", ret);
321 return ret; 235 return ERR_PTR(ret);
322 } 236 }
323 237
324 /* basically, just search the list for the first live server and use 238 /* basically, just search the list for the first live server and use
@@ -328,15 +242,16 @@ int afs_volume_pick_fileserver(struct afs_volume *volume,
328 server = volume->servers[loop]; 242 server = volume->servers[loop];
329 state = server->fs_state; 243 state = server->fs_state;
330 244
245 _debug("consider %d [%d]", loop, state);
246
331 switch (state) { 247 switch (state) {
332 /* found an apparently healthy server */ 248 /* found an apparently healthy server */
333 case 0: 249 case 0:
334 afs_get_server(server); 250 afs_get_server(server);
335 up_read(&volume->server_sem); 251 up_read(&volume->server_sem);
336 *_server = server; 252 _leave(" = %p (picked %08x)",
337 _leave(" = 0 (picked %08x)", 253 server, ntohl(server->addr.s_addr));
338 ntohl(server->addr.s_addr)); 254 return server;
339 return 0;
340 255
341 case -ENETUNREACH: 256 case -ENETUNREACH:
342 if (ret == 0) 257 if (ret == 0)
@@ -372,20 +287,21 @@ int afs_volume_pick_fileserver(struct afs_volume *volume,
372 */ 287 */
373 up_read(&volume->server_sem); 288 up_read(&volume->server_sem);
374 _leave(" = %d", ret); 289 _leave(" = %d", ret);
375 return ret; 290 return ERR_PTR(ret);
376} /* end afs_volume_pick_fileserver() */ 291}
377 292
378/*****************************************************************************/
379/* 293/*
380 * release a server after use 294 * release a server after use
381 * - releases the ref on the server struct that was acquired by picking 295 * - releases the ref on the server struct that was acquired by picking
382 * - records result of using a particular server to access a volume 296 * - records result of using a particular server to access a volume
383 * - return 0 to try again, 1 if okay or to issue error 297 * - return 0 to try again, 1 if okay or to issue error
298 * - the caller must release the server struct if result was 0
384 */ 299 */
385int afs_volume_release_fileserver(struct afs_volume *volume, 300int afs_volume_release_fileserver(struct afs_vnode *vnode,
386 struct afs_server *server, 301 struct afs_server *server,
387 int result) 302 int result)
388{ 303{
304 struct afs_volume *volume = vnode->volume;
389 unsigned loop; 305 unsigned loop;
390 306
391 _enter("%s,%08x,%d", 307 _enter("%s,%08x,%d",
@@ -396,14 +312,16 @@ int afs_volume_release_fileserver(struct afs_volume *volume,
396 /* success */ 312 /* success */
397 case 0: 313 case 0:
398 server->fs_act_jif = jiffies; 314 server->fs_act_jif = jiffies;
399 break; 315 server->fs_state = 0;
316 _leave("");
317 return 1;
400 318
401 /* the fileserver denied all knowledge of the volume */ 319 /* the fileserver denied all knowledge of the volume */
402 case -ENOMEDIUM: 320 case -ENOMEDIUM:
403 server->fs_act_jif = jiffies; 321 server->fs_act_jif = jiffies;
404 down_write(&volume->server_sem); 322 down_write(&volume->server_sem);
405 323
406 /* first, find where the server is in the active list (if it 324 /* firstly, find where the server is in the active list (if it
407 * is) */ 325 * is) */
408 for (loop = 0; loop < volume->nservers; loop++) 326 for (loop = 0; loop < volume->nservers; loop++)
409 if (volume->servers[loop] == server) 327 if (volume->servers[loop] == server)
@@ -441,6 +359,7 @@ int afs_volume_release_fileserver(struct afs_volume *volume,
441 case -ENETUNREACH: 359 case -ENETUNREACH:
442 case -EHOSTUNREACH: 360 case -EHOSTUNREACH:
443 case -ECONNREFUSED: 361 case -ECONNREFUSED:
362 case -ETIME:
444 case -ETIMEDOUT: 363 case -ETIMEDOUT:
445 case -EREMOTEIO: 364 case -EREMOTEIO:
446 /* mark the server as dead 365 /* mark the server as dead
@@ -460,60 +379,17 @@ int afs_volume_release_fileserver(struct afs_volume *volume,
460 server->fs_act_jif = jiffies; 379 server->fs_act_jif = jiffies;
461 case -ENOMEM: 380 case -ENOMEM:
462 case -ENONET: 381 case -ENONET:
463 break; 382 /* tell the caller to accept the result */
383 afs_put_server(server);
384 _leave(" [local failure]");
385 return 1;
464 } 386 }
465 387
466 /* tell the caller to accept the result */
467 afs_put_server(server);
468 _leave("");
469 return 1;
470
471 /* tell the caller to loop around and try the next server */ 388 /* tell the caller to loop around and try the next server */
472 try_next_server_upw: 389try_next_server_upw:
473 up_write(&volume->server_sem); 390 up_write(&volume->server_sem);
474 try_next_server: 391try_next_server:
475 afs_put_server(server); 392 afs_put_server(server);
476 _leave(" [try next server]"); 393 _leave(" [try next server]");
477 return 0; 394 return 0;
478 395}
479} /* end afs_volume_release_fileserver() */
480
481/*****************************************************************************/
482/*
483 * match a volume hash record stored in the cache
484 */
485#ifdef AFS_CACHING_SUPPORT
486static cachefs_match_val_t afs_volume_cache_match(void *target,
487 const void *entry)
488{
489 const struct afs_cache_vhash *vhash = entry;
490 struct afs_volume *volume = target;
491
492 _enter("{%u},{%u}", volume->type, vhash->vtype);
493
494 if (volume->type == vhash->vtype) {
495 _leave(" = SUCCESS");
496 return CACHEFS_MATCH_SUCCESS;
497 }
498
499 _leave(" = FAILED");
500 return CACHEFS_MATCH_FAILED;
501} /* end afs_volume_cache_match() */
502#endif
503
504/*****************************************************************************/
505/*
506 * update a volume hash record stored in the cache
507 */
508#ifdef AFS_CACHING_SUPPORT
509static void afs_volume_cache_update(void *source, void *entry)
510{
511 struct afs_cache_vhash *vhash = entry;
512 struct afs_volume *volume = source;
513
514 _enter("");
515
516 vhash->vtype = volume->type;
517
518} /* end afs_volume_cache_update() */
519#endif
diff --git a/fs/afs/volume.h b/fs/afs/volume.h
deleted file mode 100644
index bfdcf19ba3f3..000000000000
--- a/fs/afs/volume.h
+++ /dev/null
@@ -1,140 +0,0 @@
1/* volume.h: AFS volume management
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef _LINUX_AFS_VOLUME_H
13#define _LINUX_AFS_VOLUME_H
14
15#include "types.h"
16#include "fsclient.h"
17#include "kafstimod.h"
18#include "kafsasyncd.h"
19#include "cache.h"
20
21typedef enum {
22 AFS_VLUPD_SLEEP, /* sleeping waiting for update timer to fire */
23 AFS_VLUPD_PENDING, /* on pending queue */
24 AFS_VLUPD_INPROGRESS, /* op in progress */
25 AFS_VLUPD_BUSYSLEEP, /* sleeping because server returned EBUSY */
26
27} __attribute__((packed)) afs_vlocation_upd_t;
28
29/*****************************************************************************/
30/*
31 * entry in the cached volume location catalogue
32 */
33struct afs_cache_vlocation
34{
35 uint8_t name[64]; /* volume name (lowercase, padded with NULs) */
36 uint8_t nservers; /* number of entries used in servers[] */
37 uint8_t vidmask; /* voltype mask for vid[] */
38 uint8_t srvtmask[8]; /* voltype masks for servers[] */
39#define AFS_VOL_VTM_RW 0x01 /* R/W version of the volume is available (on this server) */
40#define AFS_VOL_VTM_RO 0x02 /* R/O version of the volume is available (on this server) */
41#define AFS_VOL_VTM_BAK 0x04 /* backup version of the volume is available (on this server) */
42
43 afs_volid_t vid[3]; /* volume IDs for R/W, R/O and Bak volumes */
44 struct in_addr servers[8]; /* fileserver addresses */
45 time_t rtime; /* last retrieval time */
46};
47
48#ifdef AFS_CACHING_SUPPORT
49extern struct cachefs_index_def afs_vlocation_cache_index_def;
50#endif
51
52/*****************************************************************************/
53/*
54 * volume -> vnode hash table entry
55 */
56struct afs_cache_vhash
57{
58 afs_voltype_t vtype; /* which volume variation */
59 uint8_t hash_bucket; /* which hash bucket this represents */
60} __attribute__((packed));
61
62#ifdef AFS_CACHING_SUPPORT
63extern struct cachefs_index_def afs_volume_cache_index_def;
64#endif
65
66/*****************************************************************************/
67/*
68 * AFS volume location record
69 */
70struct afs_vlocation
71{
72 atomic_t usage;
73 struct list_head link; /* link in cell volume location list */
74 struct afs_timer timeout; /* decaching timer */
75 struct afs_cell *cell; /* cell to which volume belongs */
76#ifdef AFS_CACHING_SUPPORT
77 struct cachefs_cookie *cache; /* caching cookie */
78#endif
79 struct afs_cache_vlocation vldb; /* volume information DB record */
80 struct afs_volume *vols[3]; /* volume access record pointer (index by type) */
81 rwlock_t lock; /* access lock */
82 unsigned long read_jif; /* time at which last read from vlserver */
83 struct afs_timer upd_timer; /* update timer */
84 struct afs_async_op upd_op; /* update operation */
85 afs_vlocation_upd_t upd_state; /* update state */
86 unsigned short upd_first_svix; /* first server index during update */
87 unsigned short upd_curr_svix; /* current server index during update */
88 unsigned short upd_rej_cnt; /* ENOMEDIUM count during update */
89 unsigned short upd_busy_cnt; /* EBUSY count during update */
90 unsigned short valid; /* T if valid */
91};
92
93extern int afs_vlocation_lookup(struct afs_cell *cell,
94 const char *name,
95 unsigned namesz,
96 struct afs_vlocation **_vlocation);
97
98#define afs_get_vlocation(V) do { atomic_inc(&(V)->usage); } while(0)
99
100extern void afs_put_vlocation(struct afs_vlocation *vlocation);
101extern void afs_vlocation_do_timeout(struct afs_vlocation *vlocation);
102
103/*****************************************************************************/
104/*
105 * AFS volume access record
106 */
107struct afs_volume
108{
109 atomic_t usage;
110 struct afs_cell *cell; /* cell to which belongs (unrefd ptr) */
111 struct afs_vlocation *vlocation; /* volume location */
112#ifdef AFS_CACHING_SUPPORT
113 struct cachefs_cookie *cache; /* caching cookie */
114#endif
115 afs_volid_t vid; /* volume ID */
116 afs_voltype_t type; /* type of volume */
117 char type_force; /* force volume type (suppress R/O -> R/W) */
118 unsigned short nservers; /* number of server slots filled */
119 unsigned short rjservers; /* number of servers discarded due to -ENOMEDIUM */
120 struct afs_server *servers[8]; /* servers on which volume resides (ordered) */
121 struct rw_semaphore server_sem; /* lock for accessing current server */
122};
123
124extern int afs_volume_lookup(const char *name,
125 struct afs_cell *cell,
126 int rwpath,
127 struct afs_volume **_volume);
128
129#define afs_get_volume(V) do { atomic_inc(&(V)->usage); } while(0)
130
131extern void afs_put_volume(struct afs_volume *volume);
132
133extern int afs_volume_pick_fileserver(struct afs_volume *volume,
134 struct afs_server **_server);
135
136extern int afs_volume_release_fileserver(struct afs_volume *volume,
137 struct afs_server *server,
138 int result);
139
140#endif /* _LINUX_AFS_VOLUME_H */
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 8b1c5d8bf4ef..c68b055fa26e 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -266,6 +266,23 @@ static int do_siocgstamp(unsigned int fd, unsigned int cmd, unsigned long arg)
266 return err; 266 return err;
267} 267}
268 268
269static int do_siocgstampns(unsigned int fd, unsigned int cmd, unsigned long arg)
270{
271 struct compat_timespec __user *up = compat_ptr(arg);
272 struct timespec kts;
273 mm_segment_t old_fs = get_fs();
274 int err;
275
276 set_fs(KERNEL_DS);
277 err = sys_ioctl(fd, cmd, (unsigned long)&kts);
278 set_fs(old_fs);
279 if (!err) {
280 err = put_user(kts.tv_sec, &up->tv_sec);
281 err |= __put_user(kts.tv_nsec, &up->tv_nsec);
282 }
283 return err;
284}
285
269struct ifmap32 { 286struct ifmap32 {
270 compat_ulong_t mem_start; 287 compat_ulong_t mem_start;
271 compat_ulong_t mem_end; 288 compat_ulong_t mem_end;
@@ -2437,6 +2454,7 @@ HANDLE_IOCTL(SIOCBRDELIF, dev_ifsioc)
2437/* Note SIOCRTMSG is no longer, so this is safe and * the user would have seen just an -EINVAL anyways. */ 2454/* Note SIOCRTMSG is no longer, so this is safe and * the user would have seen just an -EINVAL anyways. */
2438HANDLE_IOCTL(SIOCRTMSG, ret_einval) 2455HANDLE_IOCTL(SIOCRTMSG, ret_einval)
2439HANDLE_IOCTL(SIOCGSTAMP, do_siocgstamp) 2456HANDLE_IOCTL(SIOCGSTAMP, do_siocgstamp)
2457HANDLE_IOCTL(SIOCGSTAMPNS, do_siocgstampns)
2440#endif 2458#endif
2441#ifdef CONFIG_BLOCK 2459#ifdef CONFIG_BLOCK
2442HANDLE_IOCTL(HDIO_GETGEO, hdio_getgeo) 2460HANDLE_IOCTL(HDIO_GETGEO, hdio_getgeo)
diff --git a/fs/ecryptfs/netlink.c b/fs/ecryptfs/netlink.c
index e3aa2253c850..fe9186312d7c 100644
--- a/fs/ecryptfs/netlink.c
+++ b/fs/ecryptfs/netlink.c
@@ -97,7 +97,7 @@ out:
97 */ 97 */
98static int ecryptfs_process_nl_response(struct sk_buff *skb) 98static int ecryptfs_process_nl_response(struct sk_buff *skb)
99{ 99{
100 struct nlmsghdr *nlh = (struct nlmsghdr*)skb->data; 100 struct nlmsghdr *nlh = nlmsg_hdr(skb);
101 struct ecryptfs_message *msg = NLMSG_DATA(nlh); 101 struct ecryptfs_message *msg = NLMSG_DATA(nlh);
102 int rc; 102 int rc;
103 103
@@ -181,7 +181,7 @@ receive:
181 "rc = [%d]\n", rc); 181 "rc = [%d]\n", rc);
182 return; 182 return;
183 } 183 }
184 nlh = (struct nlmsghdr *)skb->data; 184 nlh = nlmsg_hdr(skb);
185 if (!NLMSG_OK(nlh, skb->len)) { 185 if (!NLMSG_OK(nlh, skb->len)) {
186 ecryptfs_printk(KERN_ERR, "Received corrupt netlink " 186 ecryptfs_printk(KERN_ERR, "Received corrupt netlink "
187 "message\n"); 187 "message\n");
@@ -229,7 +229,7 @@ int ecryptfs_init_netlink(void)
229 229
230 ecryptfs_nl_sock = netlink_kernel_create(NETLINK_ECRYPTFS, 0, 230 ecryptfs_nl_sock = netlink_kernel_create(NETLINK_ECRYPTFS, 0,
231 ecryptfs_receive_nl_message, 231 ecryptfs_receive_nl_message,
232 THIS_MODULE); 232 NULL, THIS_MODULE);
233 if (!ecryptfs_nl_sock) { 233 if (!ecryptfs_nl_sock) {
234 rc = -EIO; 234 rc = -EIO;
235 ecryptfs_printk(KERN_ERR, "Failed to create netlink socket\n"); 235 ecryptfs_printk(KERN_ERR, "Failed to create netlink socket\n");
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 23029f42ae8c..1d3b7a9fc828 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -670,6 +670,13 @@ static int jffs2_flash_setup(struct jffs2_sb_info *c) {
670 return ret; 670 return ret;
671 } 671 }
672 672
673 /* and an UBI volume */
674 if (jffs2_ubivol(c)) {
675 ret = jffs2_ubivol_setup(c);
676 if (ret)
677 return ret;
678 }
679
673 return ret; 680 return ret;
674} 681}
675 682
@@ -688,4 +695,9 @@ void jffs2_flash_cleanup(struct jffs2_sb_info *c) {
688 if (jffs2_nor_wbuf_flash(c)) { 695 if (jffs2_nor_wbuf_flash(c)) {
689 jffs2_nor_wbuf_flash_cleanup(c); 696 jffs2_nor_wbuf_flash_cleanup(c);
690 } 697 }
698
699 /* and an UBI volume */
700 if (jffs2_ubivol(c)) {
701 jffs2_ubivol_cleanup(c);
702 }
691} 703}
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index 2379c7e88735..80daea96bbc2 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -96,6 +96,9 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
96#define jffs2_nor_wbuf_flash(c) (0) 96#define jffs2_nor_wbuf_flash(c) (0)
97#define jffs2_nor_wbuf_flash_setup(c) (0) 97#define jffs2_nor_wbuf_flash_setup(c) (0)
98#define jffs2_nor_wbuf_flash_cleanup(c) do {} while (0) 98#define jffs2_nor_wbuf_flash_cleanup(c) do {} while (0)
99#define jffs2_ubivol(c) (0)
100#define jffs2_ubivol_setup(c) (0)
101#define jffs2_ubivol_cleanup(c) do {} while (0)
99 102
100#else /* NAND and/or ECC'd NOR support present */ 103#else /* NAND and/or ECC'd NOR support present */
101 104
@@ -131,6 +134,9 @@ void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c);
131#define jffs2_dataflash(c) (c->mtd->type == MTD_DATAFLASH) 134#define jffs2_dataflash(c) (c->mtd->type == MTD_DATAFLASH)
132int jffs2_dataflash_setup(struct jffs2_sb_info *c); 135int jffs2_dataflash_setup(struct jffs2_sb_info *c);
133void jffs2_dataflash_cleanup(struct jffs2_sb_info *c); 136void jffs2_dataflash_cleanup(struct jffs2_sb_info *c);
137#define jffs2_ubivol(c) (c->mtd->type == MTD_UBIVOLUME)
138int jffs2_ubivol_setup(struct jffs2_sb_info *c);
139void jffs2_ubivol_cleanup(struct jffs2_sb_info *c);
134 140
135#define jffs2_nor_wbuf_flash(c) (c->mtd->type == MTD_NORFLASH && ! (c->mtd->flags & MTD_BIT_WRITEABLE)) 141#define jffs2_nor_wbuf_flash(c) (c->mtd->type == MTD_NORFLASH && ! (c->mtd->flags & MTD_BIT_WRITEABLE))
136int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c); 142int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c);
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index dafcd4102401..c556e85a565c 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -1209,3 +1209,27 @@ int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
1209void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) { 1209void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
1210 kfree(c->wbuf); 1210 kfree(c->wbuf);
1211} 1211}
1212
1213int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
1214 c->cleanmarker_size = 0;
1215
1216 if (c->mtd->writesize == 1)
1217 /* We do not need write-buffer */
1218 return 0;
1219
1220 init_rwsem(&c->wbuf_sem);
1221
1222 c->wbuf_pagesize = c->mtd->writesize;
1223 c->wbuf_ofs = 0xFFFFFFFF;
1224 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1225 if (!c->wbuf)
1226 return -ENOMEM;
1227
1228 printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size);
1229
1230 return 0;
1231}
1232
1233void jffs2_ubivol_cleanup(struct jffs2_sb_info *c) {
1234 kfree(c->wbuf);
1235}
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index f27e5378caf2..a0c8667caa72 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -27,6 +27,7 @@
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/highmem.h> 29#include <linux/highmem.h>
30#include <linux/swap.h>
30 31
31#define MLOG_MASK_PREFIX ML_DISK_ALLOC 32#define MLOG_MASK_PREFIX ML_DISK_ALLOC
32#include <cluster/masklog.h> 33#include <cluster/masklog.h>
@@ -34,6 +35,7 @@
34#include "ocfs2.h" 35#include "ocfs2.h"
35 36
36#include "alloc.h" 37#include "alloc.h"
38#include "aops.h"
37#include "dlmglue.h" 39#include "dlmglue.h"
38#include "extent_map.h" 40#include "extent_map.h"
39#include "inode.h" 41#include "inode.h"
@@ -47,63 +49,243 @@
47 49
48#include "buffer_head_io.h" 50#include "buffer_head_io.h"
49 51
50static int ocfs2_extent_contig(struct inode *inode, 52static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc);
51 struct ocfs2_extent_rec *ext,
52 u64 blkno);
53 53
54static int ocfs2_create_new_meta_bhs(struct ocfs2_super *osb, 54/*
55 handle_t *handle, 55 * Structures which describe a path through a btree, and functions to
56 struct inode *inode, 56 * manipulate them.
57 int wanted, 57 *
58 struct ocfs2_alloc_context *meta_ac, 58 * The idea here is to be as generic as possible with the tree
59 struct buffer_head *bhs[]); 59 * manipulation code.
60 */
61struct ocfs2_path_item {
62 struct buffer_head *bh;
63 struct ocfs2_extent_list *el;
64};
60 65
61static int ocfs2_add_branch(struct ocfs2_super *osb, 66#define OCFS2_MAX_PATH_DEPTH 5
62 handle_t *handle,
63 struct inode *inode,
64 struct buffer_head *fe_bh,
65 struct buffer_head *eb_bh,
66 struct buffer_head *last_eb_bh,
67 struct ocfs2_alloc_context *meta_ac);
68 67
69static int ocfs2_shift_tree_depth(struct ocfs2_super *osb, 68struct ocfs2_path {
70 handle_t *handle, 69 int p_tree_depth;
71 struct inode *inode, 70 struct ocfs2_path_item p_node[OCFS2_MAX_PATH_DEPTH];
72 struct buffer_head *fe_bh, 71};
73 struct ocfs2_alloc_context *meta_ac,
74 struct buffer_head **ret_new_eb_bh);
75 72
76static int ocfs2_do_insert_extent(struct ocfs2_super *osb, 73#define path_root_bh(_path) ((_path)->p_node[0].bh)
77 handle_t *handle, 74#define path_root_el(_path) ((_path)->p_node[0].el)
78 struct inode *inode, 75#define path_leaf_bh(_path) ((_path)->p_node[(_path)->p_tree_depth].bh)
79 struct buffer_head *fe_bh, 76#define path_leaf_el(_path) ((_path)->p_node[(_path)->p_tree_depth].el)
80 u64 blkno, 77#define path_num_items(_path) ((_path)->p_tree_depth + 1)
81 u32 new_clusters);
82 78
83static int ocfs2_find_branch_target(struct ocfs2_super *osb, 79/*
84 struct inode *inode, 80 * Reset the actual path elements so that we can re-use the structure
85 struct buffer_head *fe_bh, 81 * to build another path. Generally, this involves freeing the buffer
86 struct buffer_head **target_bh); 82 * heads.
83 */
84static void ocfs2_reinit_path(struct ocfs2_path *path, int keep_root)
85{
86 int i, start = 0, depth = 0;
87 struct ocfs2_path_item *node;
87 88
88static int ocfs2_find_new_last_ext_blk(struct ocfs2_super *osb, 89 if (keep_root)
89 struct inode *inode, 90 start = 1;
90 struct ocfs2_dinode *fe, 91
91 unsigned int new_i_clusters, 92 for(i = start; i < path_num_items(path); i++) {
92 struct buffer_head *old_last_eb, 93 node = &path->p_node[i];
93 struct buffer_head **new_last_eb); 94
95 brelse(node->bh);
96 node->bh = NULL;
97 node->el = NULL;
98 }
99
100 /*
101 * Tree depth may change during truncate, or insert. If we're
102 * keeping the root extent list, then make sure that our path
103 * structure reflects the proper depth.
104 */
105 if (keep_root)
106 depth = le16_to_cpu(path_root_el(path)->l_tree_depth);
107
108 path->p_tree_depth = depth;
109}
110
111static void ocfs2_free_path(struct ocfs2_path *path)
112{
113 if (path) {
114 ocfs2_reinit_path(path, 0);
115 kfree(path);
116 }
117}
118
119/*
120 * Make the *dest path the same as src and re-initialize src path to
121 * have a root only.
122 */
123static void ocfs2_mv_path(struct ocfs2_path *dest, struct ocfs2_path *src)
124{
125 int i;
126
127 BUG_ON(path_root_bh(dest) != path_root_bh(src));
128
129 for(i = 1; i < OCFS2_MAX_PATH_DEPTH; i++) {
130 brelse(dest->p_node[i].bh);
131
132 dest->p_node[i].bh = src->p_node[i].bh;
133 dest->p_node[i].el = src->p_node[i].el;
134
135 src->p_node[i].bh = NULL;
136 src->p_node[i].el = NULL;
137 }
138}
139
140/*
141 * Insert an extent block at given index.
142 *
143 * This will not take an additional reference on eb_bh.
144 */
145static inline void ocfs2_path_insert_eb(struct ocfs2_path *path, int index,
146 struct buffer_head *eb_bh)
147{
148 struct ocfs2_extent_block *eb = (struct ocfs2_extent_block *)eb_bh->b_data;
149
150 /*
151 * Right now, no root bh is an extent block, so this helps
152 * catch code errors with dinode trees. The assertion can be
153 * safely removed if we ever need to insert extent block
154 * structures at the root.
155 */
156 BUG_ON(index == 0);
157
158 path->p_node[index].bh = eb_bh;
159 path->p_node[index].el = &eb->h_list;
160}
161
162static struct ocfs2_path *ocfs2_new_path(struct buffer_head *root_bh,
163 struct ocfs2_extent_list *root_el)
164{
165 struct ocfs2_path *path;
166
167 BUG_ON(le16_to_cpu(root_el->l_tree_depth) >= OCFS2_MAX_PATH_DEPTH);
168
169 path = kzalloc(sizeof(*path), GFP_NOFS);
170 if (path) {
171 path->p_tree_depth = le16_to_cpu(root_el->l_tree_depth);
172 get_bh(root_bh);
173 path_root_bh(path) = root_bh;
174 path_root_el(path) = root_el;
175 }
176
177 return path;
178}
179
180/*
181 * Allocate and initialize a new path based on a disk inode tree.
182 */
183static struct ocfs2_path *ocfs2_new_inode_path(struct buffer_head *di_bh)
184{
185 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
186 struct ocfs2_extent_list *el = &di->id2.i_list;
187
188 return ocfs2_new_path(di_bh, el);
189}
190
191/*
192 * Convenience function to journal all components in a path.
193 */
194static int ocfs2_journal_access_path(struct inode *inode, handle_t *handle,
195 struct ocfs2_path *path)
196{
197 int i, ret = 0;
198
199 if (!path)
200 goto out;
201
202 for(i = 0; i < path_num_items(path); i++) {
203 ret = ocfs2_journal_access(handle, inode, path->p_node[i].bh,
204 OCFS2_JOURNAL_ACCESS_WRITE);
205 if (ret < 0) {
206 mlog_errno(ret);
207 goto out;
208 }
209 }
210
211out:
212 return ret;
213}
214
215enum ocfs2_contig_type {
216 CONTIG_NONE = 0,
217 CONTIG_LEFT,
218 CONTIG_RIGHT
219};
94 220
95static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc);
96 221
97static int ocfs2_extent_contig(struct inode *inode, 222/*
98 struct ocfs2_extent_rec *ext, 223 * NOTE: ocfs2_block_extent_contig(), ocfs2_extents_adjacent() and
99 u64 blkno) 224 * ocfs2_extent_contig only work properly against leaf nodes!
225 */
226static int ocfs2_block_extent_contig(struct super_block *sb,
227 struct ocfs2_extent_rec *ext,
228 u64 blkno)
229{
230 u64 blk_end = le64_to_cpu(ext->e_blkno);
231
232 blk_end += ocfs2_clusters_to_blocks(sb,
233 le16_to_cpu(ext->e_leaf_clusters));
234
235 return blkno == blk_end;
236}
237
238static int ocfs2_extents_adjacent(struct ocfs2_extent_rec *left,
239 struct ocfs2_extent_rec *right)
240{
241 u32 left_range;
242
243 left_range = le32_to_cpu(left->e_cpos) +
244 le16_to_cpu(left->e_leaf_clusters);
245
246 return (left_range == le32_to_cpu(right->e_cpos));
247}
248
249static enum ocfs2_contig_type
250 ocfs2_extent_contig(struct inode *inode,
251 struct ocfs2_extent_rec *ext,
252 struct ocfs2_extent_rec *insert_rec)
100{ 253{
101 return blkno == (le64_to_cpu(ext->e_blkno) + 254 u64 blkno = le64_to_cpu(insert_rec->e_blkno);
102 ocfs2_clusters_to_blocks(inode->i_sb, 255
103 le32_to_cpu(ext->e_clusters))); 256 if (ocfs2_extents_adjacent(ext, insert_rec) &&
257 ocfs2_block_extent_contig(inode->i_sb, ext, blkno))
258 return CONTIG_RIGHT;
259
260 blkno = le64_to_cpu(ext->e_blkno);
261 if (ocfs2_extents_adjacent(insert_rec, ext) &&
262 ocfs2_block_extent_contig(inode->i_sb, insert_rec, blkno))
263 return CONTIG_LEFT;
264
265 return CONTIG_NONE;
104} 266}
105 267
106/* 268/*
269 * NOTE: We can have pretty much any combination of contiguousness and
270 * appending.
271 *
272 * The usefulness of APPEND_TAIL is more in that it lets us know that
273 * we'll have to update the path to that leaf.
274 */
275enum ocfs2_append_type {
276 APPEND_NONE = 0,
277 APPEND_TAIL,
278};
279
280struct ocfs2_insert_type {
281 enum ocfs2_append_type ins_appending;
282 enum ocfs2_contig_type ins_contig;
283 int ins_contig_index;
284 int ins_free_records;
285 int ins_tree_depth;
286};
287
288/*
107 * How many free extents have we got before we need more meta data? 289 * How many free extents have we got before we need more meta data?
108 */ 290 */
109int ocfs2_num_free_extents(struct ocfs2_super *osb, 291int ocfs2_num_free_extents(struct ocfs2_super *osb,
@@ -242,6 +424,28 @@ bail:
242} 424}
243 425
244/* 426/*
427 * Helper function for ocfs2_add_branch() and ocfs2_shift_tree_depth().
428 *
429 * Returns the sum of the rightmost extent rec logical offset and
430 * cluster count.
431 *
432 * ocfs2_add_branch() uses this to determine what logical cluster
433 * value should be populated into the leftmost new branch records.
434 *
435 * ocfs2_shift_tree_depth() uses this to determine the # clusters
436 * value for the new topmost tree record.
437 */
438static inline u32 ocfs2_sum_rightmost_rec(struct ocfs2_extent_list *el)
439{
440 int i;
441
442 i = le16_to_cpu(el->l_next_free_rec) - 1;
443
444 return le32_to_cpu(el->l_recs[i].e_cpos) +
445 ocfs2_rec_clusters(el, &el->l_recs[i]);
446}
447
448/*
245 * Add an entire tree branch to our inode. eb_bh is the extent block 449 * Add an entire tree branch to our inode. eb_bh is the extent block
246 * to start at, if we don't want to start the branch at the dinode 450 * to start at, if we don't want to start the branch at the dinode
247 * structure. 451 * structure.
@@ -250,7 +454,7 @@ bail:
250 * for the new last extent block. 454 * for the new last extent block.
251 * 455 *
252 * the new branch will be 'empty' in the sense that every block will 456 * the new branch will be 'empty' in the sense that every block will
253 * contain a single record with e_clusters == 0. 457 * contain a single record with cluster count == 0.
254 */ 458 */
255static int ocfs2_add_branch(struct ocfs2_super *osb, 459static int ocfs2_add_branch(struct ocfs2_super *osb,
256 handle_t *handle, 460 handle_t *handle,
@@ -268,6 +472,7 @@ static int ocfs2_add_branch(struct ocfs2_super *osb,
268 struct ocfs2_extent_block *eb; 472 struct ocfs2_extent_block *eb;
269 struct ocfs2_extent_list *eb_el; 473 struct ocfs2_extent_list *eb_el;
270 struct ocfs2_extent_list *el; 474 struct ocfs2_extent_list *el;
475 u32 new_cpos;
271 476
272 mlog_entry_void(); 477 mlog_entry_void();
273 478
@@ -302,6 +507,9 @@ static int ocfs2_add_branch(struct ocfs2_super *osb,
302 goto bail; 507 goto bail;
303 } 508 }
304 509
510 eb = (struct ocfs2_extent_block *)last_eb_bh->b_data;
511 new_cpos = ocfs2_sum_rightmost_rec(&eb->h_list);
512
305 /* Note: new_eb_bhs[new_blocks - 1] is the guy which will be 513 /* Note: new_eb_bhs[new_blocks - 1] is the guy which will be
306 * linked with the rest of the tree. 514 * linked with the rest of the tree.
307 * conversly, new_eb_bhs[0] is the new bottommost leaf. 515 * conversly, new_eb_bhs[0] is the new bottommost leaf.
@@ -330,9 +538,18 @@ static int ocfs2_add_branch(struct ocfs2_super *osb,
330 eb->h_next_leaf_blk = 0; 538 eb->h_next_leaf_blk = 0;
331 eb_el->l_tree_depth = cpu_to_le16(i); 539 eb_el->l_tree_depth = cpu_to_le16(i);
332 eb_el->l_next_free_rec = cpu_to_le16(1); 540 eb_el->l_next_free_rec = cpu_to_le16(1);
333 eb_el->l_recs[0].e_cpos = fe->i_clusters; 541 /*
542 * This actually counts as an empty extent as
543 * c_clusters == 0
544 */
545 eb_el->l_recs[0].e_cpos = cpu_to_le32(new_cpos);
334 eb_el->l_recs[0].e_blkno = cpu_to_le64(next_blkno); 546 eb_el->l_recs[0].e_blkno = cpu_to_le64(next_blkno);
335 eb_el->l_recs[0].e_clusters = cpu_to_le32(0); 547 /*
548 * eb_el isn't always an interior node, but even leaf
549 * nodes want a zero'd flags and reserved field so
550 * this gets the whole 32 bits regardless of use.
551 */
552 eb_el->l_recs[0].e_int_clusters = cpu_to_le32(0);
336 if (!eb_el->l_tree_depth) 553 if (!eb_el->l_tree_depth)
337 new_last_eb_blk = le64_to_cpu(eb->h_blkno); 554 new_last_eb_blk = le64_to_cpu(eb->h_blkno);
338 555
@@ -376,8 +593,8 @@ static int ocfs2_add_branch(struct ocfs2_super *osb,
376 * either be on the fe, or the extent block passed in. */ 593 * either be on the fe, or the extent block passed in. */
377 i = le16_to_cpu(el->l_next_free_rec); 594 i = le16_to_cpu(el->l_next_free_rec);
378 el->l_recs[i].e_blkno = cpu_to_le64(next_blkno); 595 el->l_recs[i].e_blkno = cpu_to_le64(next_blkno);
379 el->l_recs[i].e_cpos = fe->i_clusters; 596 el->l_recs[i].e_cpos = cpu_to_le32(new_cpos);
380 el->l_recs[i].e_clusters = 0; 597 el->l_recs[i].e_int_clusters = 0;
381 le16_add_cpu(&el->l_next_free_rec, 1); 598 le16_add_cpu(&el->l_next_free_rec, 1);
382 599
383 /* fe needs a new last extent block pointer, as does the 600 /* fe needs a new last extent block pointer, as does the
@@ -425,6 +642,7 @@ static int ocfs2_shift_tree_depth(struct ocfs2_super *osb,
425 struct buffer_head **ret_new_eb_bh) 642 struct buffer_head **ret_new_eb_bh)
426{ 643{
427 int status, i; 644 int status, i;
645 u32 new_clusters;
428 struct buffer_head *new_eb_bh = NULL; 646 struct buffer_head *new_eb_bh = NULL;
429 struct ocfs2_dinode *fe; 647 struct ocfs2_dinode *fe;
430 struct ocfs2_extent_block *eb; 648 struct ocfs2_extent_block *eb;
@@ -461,11 +679,8 @@ static int ocfs2_shift_tree_depth(struct ocfs2_super *osb,
461 /* copy the fe data into the new extent block */ 679 /* copy the fe data into the new extent block */
462 eb_el->l_tree_depth = fe_el->l_tree_depth; 680 eb_el->l_tree_depth = fe_el->l_tree_depth;
463 eb_el->l_next_free_rec = fe_el->l_next_free_rec; 681 eb_el->l_next_free_rec = fe_el->l_next_free_rec;
464 for(i = 0; i < le16_to_cpu(fe_el->l_next_free_rec); i++) { 682 for(i = 0; i < le16_to_cpu(fe_el->l_next_free_rec); i++)
465 eb_el->l_recs[i].e_cpos = fe_el->l_recs[i].e_cpos; 683 eb_el->l_recs[i] = fe_el->l_recs[i];
466 eb_el->l_recs[i].e_clusters = fe_el->l_recs[i].e_clusters;
467 eb_el->l_recs[i].e_blkno = fe_el->l_recs[i].e_blkno;
468 }
469 684
470 status = ocfs2_journal_dirty(handle, new_eb_bh); 685 status = ocfs2_journal_dirty(handle, new_eb_bh);
471 if (status < 0) { 686 if (status < 0) {
@@ -480,16 +695,15 @@ static int ocfs2_shift_tree_depth(struct ocfs2_super *osb,
480 goto bail; 695 goto bail;
481 } 696 }
482 697
698 new_clusters = ocfs2_sum_rightmost_rec(eb_el);
699
483 /* update fe now */ 700 /* update fe now */
484 le16_add_cpu(&fe_el->l_tree_depth, 1); 701 le16_add_cpu(&fe_el->l_tree_depth, 1);
485 fe_el->l_recs[0].e_cpos = 0; 702 fe_el->l_recs[0].e_cpos = 0;
486 fe_el->l_recs[0].e_blkno = eb->h_blkno; 703 fe_el->l_recs[0].e_blkno = eb->h_blkno;
487 fe_el->l_recs[0].e_clusters = fe->i_clusters; 704 fe_el->l_recs[0].e_int_clusters = cpu_to_le32(new_clusters);
488 for(i = 1; i < le16_to_cpu(fe_el->l_next_free_rec); i++) { 705 for(i = 1; i < le16_to_cpu(fe_el->l_next_free_rec); i++)
489 fe_el->l_recs[i].e_cpos = 0; 706 memset(&fe_el->l_recs[i], 0, sizeof(struct ocfs2_extent_rec));
490 fe_el->l_recs[i].e_clusters = 0;
491 fe_el->l_recs[i].e_blkno = 0;
492 }
493 fe_el->l_next_free_rec = cpu_to_le16(1); 707 fe_el->l_next_free_rec = cpu_to_le16(1);
494 708
495 /* If this is our 1st tree depth shift, then last_eb_blk 709 /* If this is our 1st tree depth shift, then last_eb_blk
@@ -515,199 +729,6 @@ bail:
515} 729}
516 730
517/* 731/*
518 * Expects the tree to already have room in the rightmost leaf for the
519 * extent. Updates all the extent blocks (and the dinode) on the way
520 * down.
521 */
522static int ocfs2_do_insert_extent(struct ocfs2_super *osb,
523 handle_t *handle,
524 struct inode *inode,
525 struct buffer_head *fe_bh,
526 u64 start_blk,
527 u32 new_clusters)
528{
529 int status, i, num_bhs = 0;
530 u64 next_blkno;
531 u16 next_free;
532 struct buffer_head **eb_bhs = NULL;
533 struct ocfs2_dinode *fe;
534 struct ocfs2_extent_block *eb;
535 struct ocfs2_extent_list *el;
536
537 mlog_entry_void();
538
539 status = ocfs2_journal_access(handle, inode, fe_bh,
540 OCFS2_JOURNAL_ACCESS_WRITE);
541 if (status < 0) {
542 mlog_errno(status);
543 goto bail;
544 }
545
546 fe = (struct ocfs2_dinode *) fe_bh->b_data;
547 el = &fe->id2.i_list;
548 if (el->l_tree_depth) {
549 /* This is another operation where we want to be
550 * careful about our tree updates. An error here means
551 * none of the previous changes we made should roll
552 * forward. As a result, we have to record the buffers
553 * for this part of the tree in an array and reserve a
554 * journal write to them before making any changes. */
555 num_bhs = le16_to_cpu(fe->id2.i_list.l_tree_depth);
556 eb_bhs = kcalloc(num_bhs, sizeof(struct buffer_head *),
557 GFP_KERNEL);
558 if (!eb_bhs) {
559 status = -ENOMEM;
560 mlog_errno(status);
561 goto bail;
562 }
563
564 i = 0;
565 while(el->l_tree_depth) {
566 next_free = le16_to_cpu(el->l_next_free_rec);
567 if (next_free == 0) {
568 ocfs2_error(inode->i_sb,
569 "Dinode %llu has a bad extent list",
570 (unsigned long long)OCFS2_I(inode)->ip_blkno);
571 status = -EIO;
572 goto bail;
573 }
574 next_blkno = le64_to_cpu(el->l_recs[next_free - 1].e_blkno);
575
576 BUG_ON(i >= num_bhs);
577 status = ocfs2_read_block(osb, next_blkno, &eb_bhs[i],
578 OCFS2_BH_CACHED, inode);
579 if (status < 0) {
580 mlog_errno(status);
581 goto bail;
582 }
583 eb = (struct ocfs2_extent_block *) eb_bhs[i]->b_data;
584 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
585 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb,
586 eb);
587 status = -EIO;
588 goto bail;
589 }
590
591 status = ocfs2_journal_access(handle, inode, eb_bhs[i],
592 OCFS2_JOURNAL_ACCESS_WRITE);
593 if (status < 0) {
594 mlog_errno(status);
595 goto bail;
596 }
597
598 el = &eb->h_list;
599 i++;
600 /* When we leave this loop, eb_bhs[num_bhs - 1] will
601 * hold the bottom-most leaf extent block. */
602 }
603 BUG_ON(el->l_tree_depth);
604
605 el = &fe->id2.i_list;
606 /* If we have tree depth, then the fe update is
607 * trivial, and we want to switch el out for the
608 * bottom-most leaf in order to update it with the
609 * actual extent data below. */
610 next_free = le16_to_cpu(el->l_next_free_rec);
611 if (next_free == 0) {
612 ocfs2_error(inode->i_sb,
613 "Dinode %llu has a bad extent list",
614 (unsigned long long)OCFS2_I(inode)->ip_blkno);
615 status = -EIO;
616 goto bail;
617 }
618 le32_add_cpu(&el->l_recs[next_free - 1].e_clusters,
619 new_clusters);
620 /* (num_bhs - 1) to avoid the leaf */
621 for(i = 0; i < (num_bhs - 1); i++) {
622 eb = (struct ocfs2_extent_block *) eb_bhs[i]->b_data;
623 el = &eb->h_list;
624
625 /* finally, make our actual change to the
626 * intermediate extent blocks. */
627 next_free = le16_to_cpu(el->l_next_free_rec);
628 le32_add_cpu(&el->l_recs[next_free - 1].e_clusters,
629 new_clusters);
630
631 status = ocfs2_journal_dirty(handle, eb_bhs[i]);
632 if (status < 0)
633 mlog_errno(status);
634 }
635 BUG_ON(i != (num_bhs - 1));
636 /* note that the leaf block wasn't touched in
637 * the loop above */
638 eb = (struct ocfs2_extent_block *) eb_bhs[num_bhs - 1]->b_data;
639 el = &eb->h_list;
640 BUG_ON(el->l_tree_depth);
641 }
642
643 /* yay, we can finally add the actual extent now! */
644 i = le16_to_cpu(el->l_next_free_rec) - 1;
645 if (le16_to_cpu(el->l_next_free_rec) &&
646 ocfs2_extent_contig(inode, &el->l_recs[i], start_blk)) {
647 le32_add_cpu(&el->l_recs[i].e_clusters, new_clusters);
648 } else if (le16_to_cpu(el->l_next_free_rec) &&
649 (le32_to_cpu(el->l_recs[i].e_clusters) == 0)) {
650 /* having an empty extent at eof is legal. */
651 if (el->l_recs[i].e_cpos != fe->i_clusters) {
652 ocfs2_error(inode->i_sb,
653 "Dinode %llu trailing extent is bad: "
654 "cpos (%u) != number of clusters (%u)",
655 (unsigned long long)OCFS2_I(inode)->ip_blkno,
656 le32_to_cpu(el->l_recs[i].e_cpos),
657 le32_to_cpu(fe->i_clusters));
658 status = -EIO;
659 goto bail;
660 }
661 el->l_recs[i].e_blkno = cpu_to_le64(start_blk);
662 el->l_recs[i].e_clusters = cpu_to_le32(new_clusters);
663 } else {
664 /* No contiguous record, or no empty record at eof, so
665 * we add a new one. */
666
667 BUG_ON(le16_to_cpu(el->l_next_free_rec) >=
668 le16_to_cpu(el->l_count));
669 i = le16_to_cpu(el->l_next_free_rec);
670
671 el->l_recs[i].e_blkno = cpu_to_le64(start_blk);
672 el->l_recs[i].e_clusters = cpu_to_le32(new_clusters);
673 el->l_recs[i].e_cpos = fe->i_clusters;
674 le16_add_cpu(&el->l_next_free_rec, 1);
675 }
676
677 /*
678 * extent_map errors are not fatal, so they are ignored outside
679 * of flushing the thing.
680 */
681 status = ocfs2_extent_map_append(inode, &el->l_recs[i],
682 new_clusters);
683 if (status) {
684 mlog_errno(status);
685 ocfs2_extent_map_drop(inode, le32_to_cpu(fe->i_clusters));
686 }
687
688 status = ocfs2_journal_dirty(handle, fe_bh);
689 if (status < 0)
690 mlog_errno(status);
691 if (fe->id2.i_list.l_tree_depth) {
692 status = ocfs2_journal_dirty(handle, eb_bhs[num_bhs - 1]);
693 if (status < 0)
694 mlog_errno(status);
695 }
696
697 status = 0;
698bail:
699 if (eb_bhs) {
700 for (i = 0; i < num_bhs; i++)
701 if (eb_bhs[i])
702 brelse(eb_bhs[i]);
703 kfree(eb_bhs);
704 }
705
706 mlog_exit(status);
707 return status;
708}
709
710/*
711 * Should only be called when there is no space left in any of the 732 * Should only be called when there is no space left in any of the
712 * leaf nodes. What we want to do is find the lowest tree depth 733 * leaf nodes. What we want to do is find the lowest tree depth
713 * non-leaf extent block with room for new records. There are three 734 * non-leaf extent block with room for new records. There are three
@@ -807,53 +828,1548 @@ bail:
807 return status; 828 return status;
808} 829}
809 830
810/* the caller needs to update fe->i_clusters */ 831/*
811int ocfs2_insert_extent(struct ocfs2_super *osb, 832 * This is only valid for leaf nodes, which are the only ones that can
812 handle_t *handle, 833 * have empty extents anyway.
813 struct inode *inode, 834 */
814 struct buffer_head *fe_bh, 835static inline int ocfs2_is_empty_extent(struct ocfs2_extent_rec *rec)
815 u64 start_blk,
816 u32 new_clusters,
817 struct ocfs2_alloc_context *meta_ac)
818{ 836{
819 int status, i, shift; 837 return !rec->e_leaf_clusters;
820 struct buffer_head *last_eb_bh = NULL; 838}
839
840/*
841 * This function will discard the rightmost extent record.
842 */
843static void ocfs2_shift_records_right(struct ocfs2_extent_list *el)
844{
845 int next_free = le16_to_cpu(el->l_next_free_rec);
846 int count = le16_to_cpu(el->l_count);
847 unsigned int num_bytes;
848
849 BUG_ON(!next_free);
850 /* This will cause us to go off the end of our extent list. */
851 BUG_ON(next_free >= count);
852
853 num_bytes = sizeof(struct ocfs2_extent_rec) * next_free;
854
855 memmove(&el->l_recs[1], &el->l_recs[0], num_bytes);
856}
857
858static void ocfs2_rotate_leaf(struct ocfs2_extent_list *el,
859 struct ocfs2_extent_rec *insert_rec)
860{
861 int i, insert_index, next_free, has_empty, num_bytes;
862 u32 insert_cpos = le32_to_cpu(insert_rec->e_cpos);
863 struct ocfs2_extent_rec *rec;
864
865 next_free = le16_to_cpu(el->l_next_free_rec);
866 has_empty = ocfs2_is_empty_extent(&el->l_recs[0]);
867
868 BUG_ON(!next_free);
869
870 /* The tree code before us didn't allow enough room in the leaf. */
871 if (el->l_next_free_rec == el->l_count && !has_empty)
872 BUG();
873
874 /*
875 * The easiest way to approach this is to just remove the
876 * empty extent and temporarily decrement next_free.
877 */
878 if (has_empty) {
879 /*
880 * If next_free was 1 (only an empty extent), this
881 * loop won't execute, which is fine. We still want
882 * the decrement above to happen.
883 */
884 for(i = 0; i < (next_free - 1); i++)
885 el->l_recs[i] = el->l_recs[i+1];
886
887 next_free--;
888 }
889
890 /*
891 * Figure out what the new record index should be.
892 */
893 for(i = 0; i < next_free; i++) {
894 rec = &el->l_recs[i];
895
896 if (insert_cpos < le32_to_cpu(rec->e_cpos))
897 break;
898 }
899 insert_index = i;
900
901 mlog(0, "ins %u: index %d, has_empty %d, next_free %d, count %d\n",
902 insert_cpos, insert_index, has_empty, next_free, le16_to_cpu(el->l_count));
903
904 BUG_ON(insert_index < 0);
905 BUG_ON(insert_index >= le16_to_cpu(el->l_count));
906 BUG_ON(insert_index > next_free);
907
908 /*
909 * No need to memmove if we're just adding to the tail.
910 */
911 if (insert_index != next_free) {
912 BUG_ON(next_free >= le16_to_cpu(el->l_count));
913
914 num_bytes = next_free - insert_index;
915 num_bytes *= sizeof(struct ocfs2_extent_rec);
916 memmove(&el->l_recs[insert_index + 1],
917 &el->l_recs[insert_index],
918 num_bytes);
919 }
920
921 /*
922 * Either we had an empty extent, and need to re-increment or
923 * there was no empty extent on a non full rightmost leaf node,
924 * in which case we still need to increment.
925 */
926 next_free++;
927 el->l_next_free_rec = cpu_to_le16(next_free);
928 /*
929 * Make sure none of the math above just messed up our tree.
930 */
931 BUG_ON(le16_to_cpu(el->l_next_free_rec) > le16_to_cpu(el->l_count));
932
933 el->l_recs[insert_index] = *insert_rec;
934
935}
936
937/*
938 * Create an empty extent record .
939 *
940 * l_next_free_rec may be updated.
941 *
942 * If an empty extent already exists do nothing.
943 */
944static void ocfs2_create_empty_extent(struct ocfs2_extent_list *el)
945{
946 int next_free = le16_to_cpu(el->l_next_free_rec);
947
948 BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
949
950 if (next_free == 0)
951 goto set_and_inc;
952
953 if (ocfs2_is_empty_extent(&el->l_recs[0]))
954 return;
955
956 mlog_bug_on_msg(el->l_count == el->l_next_free_rec,
957 "Asked to create an empty extent in a full list:\n"
958 "count = %u, tree depth = %u",
959 le16_to_cpu(el->l_count),
960 le16_to_cpu(el->l_tree_depth));
961
962 ocfs2_shift_records_right(el);
963
964set_and_inc:
965 le16_add_cpu(&el->l_next_free_rec, 1);
966 memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
967}
968
969/*
970 * For a rotation which involves two leaf nodes, the "root node" is
971 * the lowest level tree node which contains a path to both leafs. This
972 * resulting set of information can be used to form a complete "subtree"
973 *
974 * This function is passed two full paths from the dinode down to a
975 * pair of adjacent leaves. It's task is to figure out which path
976 * index contains the subtree root - this can be the root index itself
977 * in a worst-case rotation.
978 *
979 * The array index of the subtree root is passed back.
980 */
981static int ocfs2_find_subtree_root(struct inode *inode,
982 struct ocfs2_path *left,
983 struct ocfs2_path *right)
984{
985 int i = 0;
986
987 /*
988 * Check that the caller passed in two paths from the same tree.
989 */
990 BUG_ON(path_root_bh(left) != path_root_bh(right));
991
992 do {
993 i++;
994
995 /*
996 * The caller didn't pass two adjacent paths.
997 */
998 mlog_bug_on_msg(i > left->p_tree_depth,
999 "Inode %lu, left depth %u, right depth %u\n"
1000 "left leaf blk %llu, right leaf blk %llu\n",
1001 inode->i_ino, left->p_tree_depth,
1002 right->p_tree_depth,
1003 (unsigned long long)path_leaf_bh(left)->b_blocknr,
1004 (unsigned long long)path_leaf_bh(right)->b_blocknr);
1005 } while (left->p_node[i].bh->b_blocknr ==
1006 right->p_node[i].bh->b_blocknr);
1007
1008 return i - 1;
1009}
1010
1011typedef void (path_insert_t)(void *, struct buffer_head *);
1012
1013/*
1014 * Traverse a btree path in search of cpos, starting at root_el.
1015 *
1016 * This code can be called with a cpos larger than the tree, in which
1017 * case it will return the rightmost path.
1018 */
1019static int __ocfs2_find_path(struct inode *inode,
1020 struct ocfs2_extent_list *root_el, u32 cpos,
1021 path_insert_t *func, void *data)
1022{
1023 int i, ret = 0;
1024 u32 range;
1025 u64 blkno;
821 struct buffer_head *bh = NULL; 1026 struct buffer_head *bh = NULL;
822 struct ocfs2_dinode *fe;
823 struct ocfs2_extent_block *eb; 1027 struct ocfs2_extent_block *eb;
824 struct ocfs2_extent_list *el; 1028 struct ocfs2_extent_list *el;
1029 struct ocfs2_extent_rec *rec;
1030 struct ocfs2_inode_info *oi = OCFS2_I(inode);
825 1031
826 mlog_entry_void(); 1032 el = root_el;
1033 while (el->l_tree_depth) {
1034 if (le16_to_cpu(el->l_next_free_rec) == 0) {
1035 ocfs2_error(inode->i_sb,
1036 "Inode %llu has empty extent list at "
1037 "depth %u\n",
1038 (unsigned long long)oi->ip_blkno,
1039 le16_to_cpu(el->l_tree_depth));
1040 ret = -EROFS;
1041 goto out;
827 1042
828 mlog(0, "add %u clusters starting at block %llu to inode %llu\n", 1043 }
829 new_clusters, (unsigned long long)start_blk,
830 (unsigned long long)OCFS2_I(inode)->ip_blkno);
831 1044
832 fe = (struct ocfs2_dinode *) fe_bh->b_data; 1045 for(i = 0; i < le16_to_cpu(el->l_next_free_rec) - 1; i++) {
833 el = &fe->id2.i_list; 1046 rec = &el->l_recs[i];
1047
1048 /*
1049 * In the case that cpos is off the allocation
1050 * tree, this should just wind up returning the
1051 * rightmost record.
1052 */
1053 range = le32_to_cpu(rec->e_cpos) +
1054 ocfs2_rec_clusters(el, rec);
1055 if (cpos >= le32_to_cpu(rec->e_cpos) && cpos < range)
1056 break;
1057 }
834 1058
835 if (el->l_tree_depth) { 1059 blkno = le64_to_cpu(el->l_recs[i].e_blkno);
836 /* jump to end of tree */ 1060 if (blkno == 0) {
837 status = ocfs2_read_block(osb, le64_to_cpu(fe->i_last_eb_blk), 1061 ocfs2_error(inode->i_sb,
838 &last_eb_bh, OCFS2_BH_CACHED, inode); 1062 "Inode %llu has bad blkno in extent list "
839 if (status < 0) { 1063 "at depth %u (index %d)\n",
840 mlog_exit(status); 1064 (unsigned long long)oi->ip_blkno,
841 goto bail; 1065 le16_to_cpu(el->l_tree_depth), i);
1066 ret = -EROFS;
1067 goto out;
842 } 1068 }
843 eb = (struct ocfs2_extent_block *) last_eb_bh->b_data; 1069
1070 brelse(bh);
1071 bh = NULL;
1072 ret = ocfs2_read_block(OCFS2_SB(inode->i_sb), blkno,
1073 &bh, OCFS2_BH_CACHED, inode);
1074 if (ret) {
1075 mlog_errno(ret);
1076 goto out;
1077 }
1078
1079 eb = (struct ocfs2_extent_block *) bh->b_data;
844 el = &eb->h_list; 1080 el = &eb->h_list;
1081 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
1082 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
1083 ret = -EIO;
1084 goto out;
1085 }
1086
1087 if (le16_to_cpu(el->l_next_free_rec) >
1088 le16_to_cpu(el->l_count)) {
1089 ocfs2_error(inode->i_sb,
1090 "Inode %llu has bad count in extent list "
1091 "at block %llu (next free=%u, count=%u)\n",
1092 (unsigned long long)oi->ip_blkno,
1093 (unsigned long long)bh->b_blocknr,
1094 le16_to_cpu(el->l_next_free_rec),
1095 le16_to_cpu(el->l_count));
1096 ret = -EROFS;
1097 goto out;
1098 }
1099
1100 if (func)
1101 func(data, bh);
1102 }
1103
1104out:
1105 /*
1106 * Catch any trailing bh that the loop didn't handle.
1107 */
1108 brelse(bh);
1109
1110 return ret;
1111}
1112
1113/*
1114 * Given an initialized path (that is, it has a valid root extent
1115 * list), this function will traverse the btree in search of the path
1116 * which would contain cpos.
1117 *
1118 * The path traveled is recorded in the path structure.
1119 *
1120 * Note that this will not do any comparisons on leaf node extent
1121 * records, so it will work fine in the case that we just added a tree
1122 * branch.
1123 */
1124struct find_path_data {
1125 int index;
1126 struct ocfs2_path *path;
1127};
1128static void find_path_ins(void *data, struct buffer_head *bh)
1129{
1130 struct find_path_data *fp = data;
1131
1132 get_bh(bh);
1133 ocfs2_path_insert_eb(fp->path, fp->index, bh);
1134 fp->index++;
1135}
1136static int ocfs2_find_path(struct inode *inode, struct ocfs2_path *path,
1137 u32 cpos)
1138{
1139 struct find_path_data data;
1140
1141 data.index = 1;
1142 data.path = path;
1143 return __ocfs2_find_path(inode, path_root_el(path), cpos,
1144 find_path_ins, &data);
1145}
1146
1147static void find_leaf_ins(void *data, struct buffer_head *bh)
1148{
1149 struct ocfs2_extent_block *eb =(struct ocfs2_extent_block *)bh->b_data;
1150 struct ocfs2_extent_list *el = &eb->h_list;
1151 struct buffer_head **ret = data;
1152
1153 /* We want to retain only the leaf block. */
1154 if (le16_to_cpu(el->l_tree_depth) == 0) {
1155 get_bh(bh);
1156 *ret = bh;
1157 }
1158}
1159/*
1160 * Find the leaf block in the tree which would contain cpos. No
1161 * checking of the actual leaf is done.
1162 *
1163 * Some paths want to call this instead of allocating a path structure
1164 * and calling ocfs2_find_path().
1165 *
1166 * This function doesn't handle non btree extent lists.
1167 */
1168int ocfs2_find_leaf(struct inode *inode, struct ocfs2_extent_list *root_el,
1169 u32 cpos, struct buffer_head **leaf_bh)
1170{
1171 int ret;
1172 struct buffer_head *bh = NULL;
1173
1174 ret = __ocfs2_find_path(inode, root_el, cpos, find_leaf_ins, &bh);
1175 if (ret) {
1176 mlog_errno(ret);
1177 goto out;
1178 }
1179
1180 *leaf_bh = bh;
1181out:
1182 return ret;
1183}
1184
1185/*
1186 * Adjust the adjacent records (left_rec, right_rec) involved in a rotation.
1187 *
1188 * Basically, we've moved stuff around at the bottom of the tree and
1189 * we need to fix up the extent records above the changes to reflect
1190 * the new changes.
1191 *
1192 * left_rec: the record on the left.
1193 * left_child_el: is the child list pointed to by left_rec
1194 * right_rec: the record to the right of left_rec
1195 * right_child_el: is the child list pointed to by right_rec
1196 *
1197 * By definition, this only works on interior nodes.
1198 */
1199static void ocfs2_adjust_adjacent_records(struct ocfs2_extent_rec *left_rec,
1200 struct ocfs2_extent_list *left_child_el,
1201 struct ocfs2_extent_rec *right_rec,
1202 struct ocfs2_extent_list *right_child_el)
1203{
1204 u32 left_clusters, right_end;
1205
1206 /*
1207 * Interior nodes never have holes. Their cpos is the cpos of
1208 * the leftmost record in their child list. Their cluster
1209 * count covers the full theoretical range of their child list
1210 * - the range between their cpos and the cpos of the record
1211 * immediately to their right.
1212 */
1213 left_clusters = le32_to_cpu(right_child_el->l_recs[0].e_cpos);
1214 left_clusters -= le32_to_cpu(left_rec->e_cpos);
1215 left_rec->e_int_clusters = cpu_to_le32(left_clusters);
1216
1217 /*
1218 * Calculate the rightmost cluster count boundary before
1219 * moving cpos - we will need to adjust clusters after
1220 * updating e_cpos to keep the same highest cluster count.
1221 */
1222 right_end = le32_to_cpu(right_rec->e_cpos);
1223 right_end += le32_to_cpu(right_rec->e_int_clusters);
1224
1225 right_rec->e_cpos = left_rec->e_cpos;
1226 le32_add_cpu(&right_rec->e_cpos, left_clusters);
1227
1228 right_end -= le32_to_cpu(right_rec->e_cpos);
1229 right_rec->e_int_clusters = cpu_to_le32(right_end);
1230}
1231
1232/*
1233 * Adjust the adjacent root node records involved in a
1234 * rotation. left_el_blkno is passed in as a key so that we can easily
1235 * find it's index in the root list.
1236 */
1237static void ocfs2_adjust_root_records(struct ocfs2_extent_list *root_el,
1238 struct ocfs2_extent_list *left_el,
1239 struct ocfs2_extent_list *right_el,
1240 u64 left_el_blkno)
1241{
1242 int i;
1243
1244 BUG_ON(le16_to_cpu(root_el->l_tree_depth) <=
1245 le16_to_cpu(left_el->l_tree_depth));
1246
1247 for(i = 0; i < le16_to_cpu(root_el->l_next_free_rec) - 1; i++) {
1248 if (le64_to_cpu(root_el->l_recs[i].e_blkno) == left_el_blkno)
1249 break;
1250 }
1251
1252 /*
1253 * The path walking code should have never returned a root and
1254 * two paths which are not adjacent.
1255 */
1256 BUG_ON(i >= (le16_to_cpu(root_el->l_next_free_rec) - 1));
1257
1258 ocfs2_adjust_adjacent_records(&root_el->l_recs[i], left_el,
1259 &root_el->l_recs[i + 1], right_el);
1260}
1261
1262/*
1263 * We've changed a leaf block (in right_path) and need to reflect that
1264 * change back up the subtree.
1265 *
1266 * This happens in multiple places:
1267 * - When we've moved an extent record from the left path leaf to the right
1268 * path leaf to make room for an empty extent in the left path leaf.
1269 * - When our insert into the right path leaf is at the leftmost edge
1270 * and requires an update of the path immediately to it's left. This
1271 * can occur at the end of some types of rotation and appending inserts.
1272 */
1273static void ocfs2_complete_edge_insert(struct inode *inode, handle_t *handle,
1274 struct ocfs2_path *left_path,
1275 struct ocfs2_path *right_path,
1276 int subtree_index)
1277{
1278 int ret, i, idx;
1279 struct ocfs2_extent_list *el, *left_el, *right_el;
1280 struct ocfs2_extent_rec *left_rec, *right_rec;
1281 struct buffer_head *root_bh = left_path->p_node[subtree_index].bh;
1282
1283 /*
1284 * Update the counts and position values within all the
1285 * interior nodes to reflect the leaf rotation we just did.
1286 *
1287 * The root node is handled below the loop.
1288 *
1289 * We begin the loop with right_el and left_el pointing to the
1290 * leaf lists and work our way up.
1291 *
1292 * NOTE: within this loop, left_el and right_el always refer
1293 * to the *child* lists.
1294 */
1295 left_el = path_leaf_el(left_path);
1296 right_el = path_leaf_el(right_path);
1297 for(i = left_path->p_tree_depth - 1; i > subtree_index; i--) {
1298 mlog(0, "Adjust records at index %u\n", i);
1299
1300 /*
1301 * One nice property of knowing that all of these
1302 * nodes are below the root is that we only deal with
1303 * the leftmost right node record and the rightmost
1304 * left node record.
1305 */
1306 el = left_path->p_node[i].el;
1307 idx = le16_to_cpu(left_el->l_next_free_rec) - 1;
1308 left_rec = &el->l_recs[idx];
1309
1310 el = right_path->p_node[i].el;
1311 right_rec = &el->l_recs[0];
1312
1313 ocfs2_adjust_adjacent_records(left_rec, left_el, right_rec,
1314 right_el);
1315
1316 ret = ocfs2_journal_dirty(handle, left_path->p_node[i].bh);
1317 if (ret)
1318 mlog_errno(ret);
1319
1320 ret = ocfs2_journal_dirty(handle, right_path->p_node[i].bh);
1321 if (ret)
1322 mlog_errno(ret);
1323
1324 /*
1325 * Setup our list pointers now so that the current
1326 * parents become children in the next iteration.
1327 */
1328 left_el = left_path->p_node[i].el;
1329 right_el = right_path->p_node[i].el;
1330 }
1331
1332 /*
1333 * At the root node, adjust the two adjacent records which
1334 * begin our path to the leaves.
1335 */
1336
1337 el = left_path->p_node[subtree_index].el;
1338 left_el = left_path->p_node[subtree_index + 1].el;
1339 right_el = right_path->p_node[subtree_index + 1].el;
1340
1341 ocfs2_adjust_root_records(el, left_el, right_el,
1342 left_path->p_node[subtree_index + 1].bh->b_blocknr);
1343
1344 root_bh = left_path->p_node[subtree_index].bh;
1345
1346 ret = ocfs2_journal_dirty(handle, root_bh);
1347 if (ret)
1348 mlog_errno(ret);
1349}
1350
1351static int ocfs2_rotate_subtree_right(struct inode *inode,
1352 handle_t *handle,
1353 struct ocfs2_path *left_path,
1354 struct ocfs2_path *right_path,
1355 int subtree_index)
1356{
1357 int ret, i;
1358 struct buffer_head *right_leaf_bh;
1359 struct buffer_head *left_leaf_bh = NULL;
1360 struct buffer_head *root_bh;
1361 struct ocfs2_extent_list *right_el, *left_el;
1362 struct ocfs2_extent_rec move_rec;
1363
1364 left_leaf_bh = path_leaf_bh(left_path);
1365 left_el = path_leaf_el(left_path);
1366
1367 if (left_el->l_next_free_rec != left_el->l_count) {
1368 ocfs2_error(inode->i_sb,
1369 "Inode %llu has non-full interior leaf node %llu"
1370 "(next free = %u)",
1371 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1372 (unsigned long long)left_leaf_bh->b_blocknr,
1373 le16_to_cpu(left_el->l_next_free_rec));
1374 return -EROFS;
1375 }
1376
1377 /*
1378 * This extent block may already have an empty record, so we
1379 * return early if so.
1380 */
1381 if (ocfs2_is_empty_extent(&left_el->l_recs[0]))
1382 return 0;
1383
1384 root_bh = left_path->p_node[subtree_index].bh;
1385 BUG_ON(root_bh != right_path->p_node[subtree_index].bh);
1386
1387 ret = ocfs2_journal_access(handle, inode, root_bh,
1388 OCFS2_JOURNAL_ACCESS_WRITE);
1389 if (ret) {
1390 mlog_errno(ret);
1391 goto out;
1392 }
1393
1394 for(i = subtree_index + 1; i < path_num_items(right_path); i++) {
1395 ret = ocfs2_journal_access(handle, inode,
1396 right_path->p_node[i].bh,
1397 OCFS2_JOURNAL_ACCESS_WRITE);
1398 if (ret) {
1399 mlog_errno(ret);
1400 goto out;
1401 }
1402
1403 ret = ocfs2_journal_access(handle, inode,
1404 left_path->p_node[i].bh,
1405 OCFS2_JOURNAL_ACCESS_WRITE);
1406 if (ret) {
1407 mlog_errno(ret);
1408 goto out;
1409 }
1410 }
1411
1412 right_leaf_bh = path_leaf_bh(right_path);
1413 right_el = path_leaf_el(right_path);
1414
1415 /* This is a code error, not a disk corruption. */
1416 mlog_bug_on_msg(!right_el->l_next_free_rec, "Inode %llu: Rotate fails "
1417 "because rightmost leaf block %llu is empty\n",
1418 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1419 (unsigned long long)right_leaf_bh->b_blocknr);
1420
1421 ocfs2_create_empty_extent(right_el);
1422
1423 ret = ocfs2_journal_dirty(handle, right_leaf_bh);
1424 if (ret) {
1425 mlog_errno(ret);
1426 goto out;
1427 }
1428
1429 /* Do the copy now. */
1430 i = le16_to_cpu(left_el->l_next_free_rec) - 1;
1431 move_rec = left_el->l_recs[i];
1432 right_el->l_recs[0] = move_rec;
1433
1434 /*
1435 * Clear out the record we just copied and shift everything
1436 * over, leaving an empty extent in the left leaf.
1437 *
1438 * We temporarily subtract from next_free_rec so that the
1439 * shift will lose the tail record (which is now defunct).
1440 */
1441 le16_add_cpu(&left_el->l_next_free_rec, -1);
1442 ocfs2_shift_records_right(left_el);
1443 memset(&left_el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
1444 le16_add_cpu(&left_el->l_next_free_rec, 1);
1445
1446 ret = ocfs2_journal_dirty(handle, left_leaf_bh);
1447 if (ret) {
1448 mlog_errno(ret);
1449 goto out;
1450 }
1451
1452 ocfs2_complete_edge_insert(inode, handle, left_path, right_path,
1453 subtree_index);
1454
1455out:
1456 return ret;
1457}
1458
1459/*
1460 * Given a full path, determine what cpos value would return us a path
1461 * containing the leaf immediately to the left of the current one.
1462 *
1463 * Will return zero if the path passed in is already the leftmost path.
1464 */
1465static int ocfs2_find_cpos_for_left_leaf(struct super_block *sb,
1466 struct ocfs2_path *path, u32 *cpos)
1467{
1468 int i, j, ret = 0;
1469 u64 blkno;
1470 struct ocfs2_extent_list *el;
1471
1472 BUG_ON(path->p_tree_depth == 0);
1473
1474 *cpos = 0;
1475
1476 blkno = path_leaf_bh(path)->b_blocknr;
1477
1478 /* Start at the tree node just above the leaf and work our way up. */
1479 i = path->p_tree_depth - 1;
1480 while (i >= 0) {
1481 el = path->p_node[i].el;
1482
1483 /*
1484 * Find the extent record just before the one in our
1485 * path.
1486 */
1487 for(j = 0; j < le16_to_cpu(el->l_next_free_rec); j++) {
1488 if (le64_to_cpu(el->l_recs[j].e_blkno) == blkno) {
1489 if (j == 0) {
1490 if (i == 0) {
1491 /*
1492 * We've determined that the
1493 * path specified is already
1494 * the leftmost one - return a
1495 * cpos of zero.
1496 */
1497 goto out;
1498 }
1499 /*
1500 * The leftmost record points to our
1501 * leaf - we need to travel up the
1502 * tree one level.
1503 */
1504 goto next_node;
1505 }
1506
1507 *cpos = le32_to_cpu(el->l_recs[j - 1].e_cpos);
1508 *cpos = *cpos + ocfs2_rec_clusters(el,
1509 &el->l_recs[j - 1]);
1510 *cpos = *cpos - 1;
1511 goto out;
1512 }
1513 }
1514
1515 /*
1516 * If we got here, we never found a valid node where
1517 * the tree indicated one should be.
1518 */
1519 ocfs2_error(sb,
1520 "Invalid extent tree at extent block %llu\n",
1521 (unsigned long long)blkno);
1522 ret = -EROFS;
1523 goto out;
1524
1525next_node:
1526 blkno = path->p_node[i].bh->b_blocknr;
1527 i--;
1528 }
1529
1530out:
1531 return ret;
1532}
1533
1534static int ocfs2_extend_rotate_transaction(handle_t *handle, int subtree_depth,
1535 struct ocfs2_path *path)
1536{
1537 int credits = (path->p_tree_depth - subtree_depth) * 2 + 1;
1538
1539 if (handle->h_buffer_credits < credits)
1540 return ocfs2_extend_trans(handle, credits);
1541
1542 return 0;
1543}
1544
1545/*
1546 * Trap the case where we're inserting into the theoretical range past
1547 * the _actual_ left leaf range. Otherwise, we'll rotate a record
1548 * whose cpos is less than ours into the right leaf.
1549 *
1550 * It's only necessary to look at the rightmost record of the left
1551 * leaf because the logic that calls us should ensure that the
1552 * theoretical ranges in the path components above the leaves are
1553 * correct.
1554 */
1555static int ocfs2_rotate_requires_path_adjustment(struct ocfs2_path *left_path,
1556 u32 insert_cpos)
1557{
1558 struct ocfs2_extent_list *left_el;
1559 struct ocfs2_extent_rec *rec;
1560 int next_free;
1561
1562 left_el = path_leaf_el(left_path);
1563 next_free = le16_to_cpu(left_el->l_next_free_rec);
1564 rec = &left_el->l_recs[next_free - 1];
1565
1566 if (insert_cpos > le32_to_cpu(rec->e_cpos))
1567 return 1;
1568 return 0;
1569}
1570
1571/*
1572 * Rotate all the records in a btree right one record, starting at insert_cpos.
1573 *
1574 * The path to the rightmost leaf should be passed in.
1575 *
1576 * The array is assumed to be large enough to hold an entire path (tree depth).
1577 *
1578 * Upon succesful return from this function:
1579 *
1580 * - The 'right_path' array will contain a path to the leaf block
1581 * whose range contains e_cpos.
1582 * - That leaf block will have a single empty extent in list index 0.
1583 * - In the case that the rotation requires a post-insert update,
1584 * *ret_left_path will contain a valid path which can be passed to
1585 * ocfs2_insert_path().
1586 */
1587static int ocfs2_rotate_tree_right(struct inode *inode,
1588 handle_t *handle,
1589 u32 insert_cpos,
1590 struct ocfs2_path *right_path,
1591 struct ocfs2_path **ret_left_path)
1592{
1593 int ret, start;
1594 u32 cpos;
1595 struct ocfs2_path *left_path = NULL;
1596
1597 *ret_left_path = NULL;
1598
1599 left_path = ocfs2_new_path(path_root_bh(right_path),
1600 path_root_el(right_path));
1601 if (!left_path) {
1602 ret = -ENOMEM;
1603 mlog_errno(ret);
1604 goto out;
1605 }
1606
1607 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, right_path, &cpos);
1608 if (ret) {
1609 mlog_errno(ret);
1610 goto out;
1611 }
1612
1613 mlog(0, "Insert: %u, first left path cpos: %u\n", insert_cpos, cpos);
1614
1615 /*
1616 * What we want to do here is:
1617 *
1618 * 1) Start with the rightmost path.
1619 *
1620 * 2) Determine a path to the leaf block directly to the left
1621 * of that leaf.
1622 *
1623 * 3) Determine the 'subtree root' - the lowest level tree node
1624 * which contains a path to both leaves.
1625 *
1626 * 4) Rotate the subtree.
1627 *
1628 * 5) Find the next subtree by considering the left path to be
1629 * the new right path.
1630 *
1631 * The check at the top of this while loop also accepts
1632 * insert_cpos == cpos because cpos is only a _theoretical_
1633 * value to get us the left path - insert_cpos might very well
1634 * be filling that hole.
1635 *
1636 * Stop at a cpos of '0' because we either started at the
1637 * leftmost branch (i.e., a tree with one branch and a
1638 * rotation inside of it), or we've gone as far as we can in
1639 * rotating subtrees.
1640 */
1641 while (cpos && insert_cpos <= cpos) {
1642 mlog(0, "Rotating a tree: ins. cpos: %u, left path cpos: %u\n",
1643 insert_cpos, cpos);
1644
1645 ret = ocfs2_find_path(inode, left_path, cpos);
1646 if (ret) {
1647 mlog_errno(ret);
1648 goto out;
1649 }
1650
1651 mlog_bug_on_msg(path_leaf_bh(left_path) ==
1652 path_leaf_bh(right_path),
1653 "Inode %lu: error during insert of %u "
1654 "(left path cpos %u) results in two identical "
1655 "paths ending at %llu\n",
1656 inode->i_ino, insert_cpos, cpos,
1657 (unsigned long long)
1658 path_leaf_bh(left_path)->b_blocknr);
1659
1660 if (ocfs2_rotate_requires_path_adjustment(left_path,
1661 insert_cpos)) {
1662 mlog(0, "Path adjustment required\n");
1663
1664 /*
1665 * We've rotated the tree as much as we
1666 * should. The rest is up to
1667 * ocfs2_insert_path() to complete, after the
1668 * record insertion. We indicate this
1669 * situation by returning the left path.
1670 *
1671 * The reason we don't adjust the records here
1672 * before the record insert is that an error
1673 * later might break the rule where a parent
1674 * record e_cpos will reflect the actual
1675 * e_cpos of the 1st nonempty record of the
1676 * child list.
1677 */
1678 *ret_left_path = left_path;
1679 goto out_ret_path;
1680 }
1681
1682 start = ocfs2_find_subtree_root(inode, left_path, right_path);
1683
1684 mlog(0, "Subtree root at index %d (blk %llu, depth %d)\n",
1685 start,
1686 (unsigned long long) right_path->p_node[start].bh->b_blocknr,
1687 right_path->p_tree_depth);
1688
1689 ret = ocfs2_extend_rotate_transaction(handle, start,
1690 right_path);
1691 if (ret) {
1692 mlog_errno(ret);
1693 goto out;
1694 }
1695
1696 ret = ocfs2_rotate_subtree_right(inode, handle, left_path,
1697 right_path, start);
1698 if (ret) {
1699 mlog_errno(ret);
1700 goto out;
1701 }
1702
1703 /*
1704 * There is no need to re-read the next right path
1705 * as we know that it'll be our current left
1706 * path. Optimize by copying values instead.
1707 */
1708 ocfs2_mv_path(right_path, left_path);
1709
1710 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, right_path,
1711 &cpos);
1712 if (ret) {
1713 mlog_errno(ret);
1714 goto out;
1715 }
1716 }
1717
1718out:
1719 ocfs2_free_path(left_path);
1720
1721out_ret_path:
1722 return ret;
1723}
1724
1725/*
1726 * Do the final bits of extent record insertion at the target leaf
1727 * list. If this leaf is part of an allocation tree, it is assumed
1728 * that the tree above has been prepared.
1729 */
1730static void ocfs2_insert_at_leaf(struct ocfs2_extent_rec *insert_rec,
1731 struct ocfs2_extent_list *el,
1732 struct ocfs2_insert_type *insert,
1733 struct inode *inode)
1734{
1735 int i = insert->ins_contig_index;
1736 unsigned int range;
1737 struct ocfs2_extent_rec *rec;
1738
1739 BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
1740
1741 /*
1742 * Contiguous insert - either left or right.
1743 */
1744 if (insert->ins_contig != CONTIG_NONE) {
1745 rec = &el->l_recs[i];
1746 if (insert->ins_contig == CONTIG_LEFT) {
1747 rec->e_blkno = insert_rec->e_blkno;
1748 rec->e_cpos = insert_rec->e_cpos;
1749 }
1750 le16_add_cpu(&rec->e_leaf_clusters,
1751 le16_to_cpu(insert_rec->e_leaf_clusters));
1752 return;
1753 }
1754
1755 /*
1756 * Handle insert into an empty leaf.
1757 */
1758 if (le16_to_cpu(el->l_next_free_rec) == 0 ||
1759 ((le16_to_cpu(el->l_next_free_rec) == 1) &&
1760 ocfs2_is_empty_extent(&el->l_recs[0]))) {
1761 el->l_recs[0] = *insert_rec;
1762 el->l_next_free_rec = cpu_to_le16(1);
1763 return;
1764 }
1765
1766 /*
1767 * Appending insert.
1768 */
1769 if (insert->ins_appending == APPEND_TAIL) {
1770 i = le16_to_cpu(el->l_next_free_rec) - 1;
1771 rec = &el->l_recs[i];
1772 range = le32_to_cpu(rec->e_cpos)
1773 + le16_to_cpu(rec->e_leaf_clusters);
1774 BUG_ON(le32_to_cpu(insert_rec->e_cpos) < range);
1775
1776 mlog_bug_on_msg(le16_to_cpu(el->l_next_free_rec) >=
1777 le16_to_cpu(el->l_count),
1778 "inode %lu, depth %u, count %u, next free %u, "
1779 "rec.cpos %u, rec.clusters %u, "
1780 "insert.cpos %u, insert.clusters %u\n",
1781 inode->i_ino,
1782 le16_to_cpu(el->l_tree_depth),
1783 le16_to_cpu(el->l_count),
1784 le16_to_cpu(el->l_next_free_rec),
1785 le32_to_cpu(el->l_recs[i].e_cpos),
1786 le16_to_cpu(el->l_recs[i].e_leaf_clusters),
1787 le32_to_cpu(insert_rec->e_cpos),
1788 le16_to_cpu(insert_rec->e_leaf_clusters));
1789 i++;
1790 el->l_recs[i] = *insert_rec;
1791 le16_add_cpu(&el->l_next_free_rec, 1);
1792 return;
1793 }
1794
1795 /*
1796 * Ok, we have to rotate.
1797 *
1798 * At this point, it is safe to assume that inserting into an
1799 * empty leaf and appending to a leaf have both been handled
1800 * above.
1801 *
1802 * This leaf needs to have space, either by the empty 1st
1803 * extent record, or by virtue of an l_next_rec < l_count.
1804 */
1805 ocfs2_rotate_leaf(el, insert_rec);
1806}
1807
1808static inline void ocfs2_update_dinode_clusters(struct inode *inode,
1809 struct ocfs2_dinode *di,
1810 u32 clusters)
1811{
1812 le32_add_cpu(&di->i_clusters, clusters);
1813 spin_lock(&OCFS2_I(inode)->ip_lock);
1814 OCFS2_I(inode)->ip_clusters = le32_to_cpu(di->i_clusters);
1815 spin_unlock(&OCFS2_I(inode)->ip_lock);
1816}
1817
1818static int ocfs2_append_rec_to_path(struct inode *inode, handle_t *handle,
1819 struct ocfs2_extent_rec *insert_rec,
1820 struct ocfs2_path *right_path,
1821 struct ocfs2_path **ret_left_path)
1822{
1823 int ret, i, next_free;
1824 struct buffer_head *bh;
1825 struct ocfs2_extent_list *el;
1826 struct ocfs2_path *left_path = NULL;
1827
1828 *ret_left_path = NULL;
1829
1830 /*
1831 * This shouldn't happen for non-trees. The extent rec cluster
1832 * count manipulation below only works for interior nodes.
1833 */
1834 BUG_ON(right_path->p_tree_depth == 0);
1835
1836 /*
1837 * If our appending insert is at the leftmost edge of a leaf,
1838 * then we might need to update the rightmost records of the
1839 * neighboring path.
1840 */
1841 el = path_leaf_el(right_path);
1842 next_free = le16_to_cpu(el->l_next_free_rec);
1843 if (next_free == 0 ||
1844 (next_free == 1 && ocfs2_is_empty_extent(&el->l_recs[0]))) {
1845 u32 left_cpos;
1846
1847 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, right_path,
1848 &left_cpos);
1849 if (ret) {
1850 mlog_errno(ret);
1851 goto out;
1852 }
1853
1854 mlog(0, "Append may need a left path update. cpos: %u, "
1855 "left_cpos: %u\n", le32_to_cpu(insert_rec->e_cpos),
1856 left_cpos);
1857
1858 /*
1859 * No need to worry if the append is already in the
1860 * leftmost leaf.
1861 */
1862 if (left_cpos) {
1863 left_path = ocfs2_new_path(path_root_bh(right_path),
1864 path_root_el(right_path));
1865 if (!left_path) {
1866 ret = -ENOMEM;
1867 mlog_errno(ret);
1868 goto out;
1869 }
1870
1871 ret = ocfs2_find_path(inode, left_path, left_cpos);
1872 if (ret) {
1873 mlog_errno(ret);
1874 goto out;
1875 }
1876
1877 /*
1878 * ocfs2_insert_path() will pass the left_path to the
1879 * journal for us.
1880 */
1881 }
1882 }
1883
1884 ret = ocfs2_journal_access_path(inode, handle, right_path);
1885 if (ret) {
1886 mlog_errno(ret);
1887 goto out;
1888 }
1889
1890 el = path_root_el(right_path);
1891 bh = path_root_bh(right_path);
1892 i = 0;
1893 while (1) {
1894 struct ocfs2_extent_rec *rec;
1895
1896 next_free = le16_to_cpu(el->l_next_free_rec);
1897 if (next_free == 0) {
1898 ocfs2_error(inode->i_sb,
1899 "Dinode %llu has a bad extent list",
1900 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1901 ret = -EIO;
1902 goto out;
1903 }
1904
1905 rec = &el->l_recs[next_free - 1];
1906
1907 rec->e_int_clusters = insert_rec->e_cpos;
1908 le32_add_cpu(&rec->e_int_clusters,
1909 le16_to_cpu(insert_rec->e_leaf_clusters));
1910 le32_add_cpu(&rec->e_int_clusters,
1911 -le32_to_cpu(rec->e_cpos));
1912
1913 ret = ocfs2_journal_dirty(handle, bh);
1914 if (ret)
1915 mlog_errno(ret);
1916
1917 /* Don't touch the leaf node */
1918 if (++i >= right_path->p_tree_depth)
1919 break;
1920
1921 bh = right_path->p_node[i].bh;
1922 el = right_path->p_node[i].el;
1923 }
1924
1925 *ret_left_path = left_path;
1926 ret = 0;
1927out:
1928 if (ret != 0)
1929 ocfs2_free_path(left_path);
1930
1931 return ret;
1932}
1933
1934/*
1935 * This function only does inserts on an allocation b-tree. For dinode
1936 * lists, ocfs2_insert_at_leaf() is called directly.
1937 *
1938 * right_path is the path we want to do the actual insert
1939 * in. left_path should only be passed in if we need to update that
1940 * portion of the tree after an edge insert.
1941 */
1942static int ocfs2_insert_path(struct inode *inode,
1943 handle_t *handle,
1944 struct ocfs2_path *left_path,
1945 struct ocfs2_path *right_path,
1946 struct ocfs2_extent_rec *insert_rec,
1947 struct ocfs2_insert_type *insert)
1948{
1949 int ret, subtree_index;
1950 struct buffer_head *leaf_bh = path_leaf_bh(right_path);
1951 struct ocfs2_extent_list *el;
1952
1953 /*
1954 * Pass both paths to the journal. The majority of inserts
1955 * will be touching all components anyway.
1956 */
1957 ret = ocfs2_journal_access_path(inode, handle, right_path);
1958 if (ret < 0) {
1959 mlog_errno(ret);
1960 goto out;
1961 }
1962
1963 if (left_path) {
1964 int credits = handle->h_buffer_credits;
1965
1966 /*
1967 * There's a chance that left_path got passed back to
1968 * us without being accounted for in the
1969 * journal. Extend our transaction here to be sure we
1970 * can change those blocks.
1971 */
1972 credits += left_path->p_tree_depth;
1973
1974 ret = ocfs2_extend_trans(handle, credits);
1975 if (ret < 0) {
1976 mlog_errno(ret);
1977 goto out;
1978 }
1979
1980 ret = ocfs2_journal_access_path(inode, handle, left_path);
1981 if (ret < 0) {
1982 mlog_errno(ret);
1983 goto out;
1984 }
1985 }
1986
1987 el = path_leaf_el(right_path);
1988
1989 ocfs2_insert_at_leaf(insert_rec, el, insert, inode);
1990 ret = ocfs2_journal_dirty(handle, leaf_bh);
1991 if (ret)
1992 mlog_errno(ret);
1993
1994 if (left_path) {
1995 /*
1996 * The rotate code has indicated that we need to fix
1997 * up portions of the tree after the insert.
1998 *
1999 * XXX: Should we extend the transaction here?
2000 */
2001 subtree_index = ocfs2_find_subtree_root(inode, left_path,
2002 right_path);
2003 ocfs2_complete_edge_insert(inode, handle, left_path,
2004 right_path, subtree_index);
2005 }
2006
2007 ret = 0;
2008out:
2009 return ret;
2010}
2011
2012static int ocfs2_do_insert_extent(struct inode *inode,
2013 handle_t *handle,
2014 struct buffer_head *di_bh,
2015 struct ocfs2_extent_rec *insert_rec,
2016 struct ocfs2_insert_type *type)
2017{
2018 int ret, rotate = 0;
2019 u32 cpos;
2020 struct ocfs2_path *right_path = NULL;
2021 struct ocfs2_path *left_path = NULL;
2022 struct ocfs2_dinode *di;
2023 struct ocfs2_extent_list *el;
2024
2025 di = (struct ocfs2_dinode *) di_bh->b_data;
2026 el = &di->id2.i_list;
2027
2028 ret = ocfs2_journal_access(handle, inode, di_bh,
2029 OCFS2_JOURNAL_ACCESS_WRITE);
2030 if (ret) {
2031 mlog_errno(ret);
2032 goto out;
2033 }
2034
2035 if (le16_to_cpu(el->l_tree_depth) == 0) {
2036 ocfs2_insert_at_leaf(insert_rec, el, type, inode);
2037 goto out_update_clusters;
2038 }
2039
2040 right_path = ocfs2_new_inode_path(di_bh);
2041 if (!right_path) {
2042 ret = -ENOMEM;
2043 mlog_errno(ret);
2044 goto out;
2045 }
2046
2047 /*
2048 * Determine the path to start with. Rotations need the
2049 * rightmost path, everything else can go directly to the
2050 * target leaf.
2051 */
2052 cpos = le32_to_cpu(insert_rec->e_cpos);
2053 if (type->ins_appending == APPEND_NONE &&
2054 type->ins_contig == CONTIG_NONE) {
2055 rotate = 1;
2056 cpos = UINT_MAX;
2057 }
2058
2059 ret = ocfs2_find_path(inode, right_path, cpos);
2060 if (ret) {
2061 mlog_errno(ret);
2062 goto out;
2063 }
2064
2065 /*
2066 * Rotations and appends need special treatment - they modify
2067 * parts of the tree's above them.
2068 *
2069 * Both might pass back a path immediate to the left of the
2070 * one being inserted to. This will be cause
2071 * ocfs2_insert_path() to modify the rightmost records of
2072 * left_path to account for an edge insert.
2073 *
2074 * XXX: When modifying this code, keep in mind that an insert
2075 * can wind up skipping both of these two special cases...
2076 */
2077 if (rotate) {
2078 ret = ocfs2_rotate_tree_right(inode, handle,
2079 le32_to_cpu(insert_rec->e_cpos),
2080 right_path, &left_path);
2081 if (ret) {
2082 mlog_errno(ret);
2083 goto out;
2084 }
2085 } else if (type->ins_appending == APPEND_TAIL
2086 && type->ins_contig != CONTIG_LEFT) {
2087 ret = ocfs2_append_rec_to_path(inode, handle, insert_rec,
2088 right_path, &left_path);
2089 if (ret) {
2090 mlog_errno(ret);
2091 goto out;
2092 }
2093 }
2094
2095 ret = ocfs2_insert_path(inode, handle, left_path, right_path,
2096 insert_rec, type);
2097 if (ret) {
2098 mlog_errno(ret);
2099 goto out;
2100 }
2101
2102out_update_clusters:
2103 ocfs2_update_dinode_clusters(inode, di,
2104 le16_to_cpu(insert_rec->e_leaf_clusters));
2105
2106 ret = ocfs2_journal_dirty(handle, di_bh);
2107 if (ret)
2108 mlog_errno(ret);
2109
2110out:
2111 ocfs2_free_path(left_path);
2112 ocfs2_free_path(right_path);
2113
2114 return ret;
2115}
2116
2117static void ocfs2_figure_contig_type(struct inode *inode,
2118 struct ocfs2_insert_type *insert,
2119 struct ocfs2_extent_list *el,
2120 struct ocfs2_extent_rec *insert_rec)
2121{
2122 int i;
2123 enum ocfs2_contig_type contig_type = CONTIG_NONE;
2124
2125 BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
2126
2127 for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
2128 contig_type = ocfs2_extent_contig(inode, &el->l_recs[i],
2129 insert_rec);
2130 if (contig_type != CONTIG_NONE) {
2131 insert->ins_contig_index = i;
2132 break;
2133 }
2134 }
2135 insert->ins_contig = contig_type;
2136}
2137
2138/*
2139 * This should only be called against the righmost leaf extent list.
2140 *
2141 * ocfs2_figure_appending_type() will figure out whether we'll have to
2142 * insert at the tail of the rightmost leaf.
2143 *
2144 * This should also work against the dinode list for tree's with 0
2145 * depth. If we consider the dinode list to be the rightmost leaf node
2146 * then the logic here makes sense.
2147 */
2148static void ocfs2_figure_appending_type(struct ocfs2_insert_type *insert,
2149 struct ocfs2_extent_list *el,
2150 struct ocfs2_extent_rec *insert_rec)
2151{
2152 int i;
2153 u32 cpos = le32_to_cpu(insert_rec->e_cpos);
2154 struct ocfs2_extent_rec *rec;
2155
2156 insert->ins_appending = APPEND_NONE;
2157
2158 BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
2159
2160 if (!el->l_next_free_rec)
2161 goto set_tail_append;
2162
2163 if (ocfs2_is_empty_extent(&el->l_recs[0])) {
2164 /* Were all records empty? */
2165 if (le16_to_cpu(el->l_next_free_rec) == 1)
2166 goto set_tail_append;
845 } 2167 }
846 2168
847 /* Can we allocate without adding/shifting tree bits? */
848 i = le16_to_cpu(el->l_next_free_rec) - 1; 2169 i = le16_to_cpu(el->l_next_free_rec) - 1;
849 if (le16_to_cpu(el->l_next_free_rec) == 0 2170 rec = &el->l_recs[i];
850 || (le16_to_cpu(el->l_next_free_rec) < le16_to_cpu(el->l_count)) 2171
851 || le32_to_cpu(el->l_recs[i].e_clusters) == 0 2172 if (cpos >=
852 || ocfs2_extent_contig(inode, &el->l_recs[i], start_blk)) 2173 (le32_to_cpu(rec->e_cpos) + le16_to_cpu(rec->e_leaf_clusters)))
853 goto out_add; 2174 goto set_tail_append;
2175
2176 return;
2177
2178set_tail_append:
2179 insert->ins_appending = APPEND_TAIL;
2180}
2181
2182/*
2183 * Helper function called at the begining of an insert.
2184 *
2185 * This computes a few things that are commonly used in the process of
2186 * inserting into the btree:
2187 * - Whether the new extent is contiguous with an existing one.
2188 * - The current tree depth.
2189 * - Whether the insert is an appending one.
2190 * - The total # of free records in the tree.
2191 *
2192 * All of the information is stored on the ocfs2_insert_type
2193 * structure.
2194 */
2195static int ocfs2_figure_insert_type(struct inode *inode,
2196 struct buffer_head *di_bh,
2197 struct buffer_head **last_eb_bh,
2198 struct ocfs2_extent_rec *insert_rec,
2199 struct ocfs2_insert_type *insert)
2200{
2201 int ret;
2202 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2203 struct ocfs2_extent_block *eb;
2204 struct ocfs2_extent_list *el;
2205 struct ocfs2_path *path = NULL;
2206 struct buffer_head *bh = NULL;
2207
2208 el = &di->id2.i_list;
2209 insert->ins_tree_depth = le16_to_cpu(el->l_tree_depth);
2210
2211 if (el->l_tree_depth) {
2212 /*
2213 * If we have tree depth, we read in the
2214 * rightmost extent block ahead of time as
2215 * ocfs2_figure_insert_type() and ocfs2_add_branch()
2216 * may want it later.
2217 */
2218 ret = ocfs2_read_block(OCFS2_SB(inode->i_sb),
2219 le64_to_cpu(di->i_last_eb_blk), &bh,
2220 OCFS2_BH_CACHED, inode);
2221 if (ret) {
2222 mlog_exit(ret);
2223 goto out;
2224 }
2225 eb = (struct ocfs2_extent_block *) bh->b_data;
2226 el = &eb->h_list;
2227 }
2228
2229 /*
2230 * Unless we have a contiguous insert, we'll need to know if
2231 * there is room left in our allocation tree for another
2232 * extent record.
2233 *
2234 * XXX: This test is simplistic, we can search for empty
2235 * extent records too.
2236 */
2237 insert->ins_free_records = le16_to_cpu(el->l_count) -
2238 le16_to_cpu(el->l_next_free_rec);
2239
2240 if (!insert->ins_tree_depth) {
2241 ocfs2_figure_contig_type(inode, insert, el, insert_rec);
2242 ocfs2_figure_appending_type(insert, el, insert_rec);
2243 return 0;
2244 }
2245
2246 path = ocfs2_new_inode_path(di_bh);
2247 if (!path) {
2248 ret = -ENOMEM;
2249 mlog_errno(ret);
2250 goto out;
2251 }
2252
2253 /*
2254 * In the case that we're inserting past what the tree
2255 * currently accounts for, ocfs2_find_path() will return for
2256 * us the rightmost tree path. This is accounted for below in
2257 * the appending code.
2258 */
2259 ret = ocfs2_find_path(inode, path, le32_to_cpu(insert_rec->e_cpos));
2260 if (ret) {
2261 mlog_errno(ret);
2262 goto out;
2263 }
2264
2265 el = path_leaf_el(path);
2266
2267 /*
2268 * Now that we have the path, there's two things we want to determine:
2269 * 1) Contiguousness (also set contig_index if this is so)
2270 *
2271 * 2) Are we doing an append? We can trivially break this up
2272 * into two types of appends: simple record append, or a
2273 * rotate inside the tail leaf.
2274 */
2275 ocfs2_figure_contig_type(inode, insert, el, insert_rec);
2276
2277 /*
2278 * The insert code isn't quite ready to deal with all cases of
2279 * left contiguousness. Specifically, if it's an insert into
2280 * the 1st record in a leaf, it will require the adjustment of
2281 * cluster count on the last record of the path directly to it's
2282 * left. For now, just catch that case and fool the layers
2283 * above us. This works just fine for tree_depth == 0, which
2284 * is why we allow that above.
2285 */
2286 if (insert->ins_contig == CONTIG_LEFT &&
2287 insert->ins_contig_index == 0)
2288 insert->ins_contig = CONTIG_NONE;
2289
2290 /*
2291 * Ok, so we can simply compare against last_eb to figure out
2292 * whether the path doesn't exist. This will only happen in
2293 * the case that we're doing a tail append, so maybe we can
2294 * take advantage of that information somehow.
2295 */
2296 if (le64_to_cpu(di->i_last_eb_blk) == path_leaf_bh(path)->b_blocknr) {
2297 /*
2298 * Ok, ocfs2_find_path() returned us the rightmost
2299 * tree path. This might be an appending insert. There are
2300 * two cases:
2301 * 1) We're doing a true append at the tail:
2302 * -This might even be off the end of the leaf
2303 * 2) We're "appending" by rotating in the tail
2304 */
2305 ocfs2_figure_appending_type(insert, el, insert_rec);
2306 }
2307
2308out:
2309 ocfs2_free_path(path);
2310
2311 if (ret == 0)
2312 *last_eb_bh = bh;
2313 else
2314 brelse(bh);
2315 return ret;
2316}
2317
2318/*
2319 * Insert an extent into an inode btree.
2320 *
2321 * The caller needs to update fe->i_clusters
2322 */
2323int ocfs2_insert_extent(struct ocfs2_super *osb,
2324 handle_t *handle,
2325 struct inode *inode,
2326 struct buffer_head *fe_bh,
2327 u32 cpos,
2328 u64 start_blk,
2329 u32 new_clusters,
2330 struct ocfs2_alloc_context *meta_ac)
2331{
2332 int status, shift;
2333 struct buffer_head *last_eb_bh = NULL;
2334 struct buffer_head *bh = NULL;
2335 struct ocfs2_insert_type insert = {0, };
2336 struct ocfs2_extent_rec rec;
2337
2338 mlog(0, "add %u clusters at position %u to inode %llu\n",
2339 new_clusters, cpos, (unsigned long long)OCFS2_I(inode)->ip_blkno);
2340
2341 mlog_bug_on_msg(!ocfs2_sparse_alloc(osb) &&
2342 (OCFS2_I(inode)->ip_clusters != cpos),
2343 "Device %s, asking for sparse allocation: inode %llu, "
2344 "cpos %u, clusters %u\n",
2345 osb->dev_str,
2346 (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos,
2347 OCFS2_I(inode)->ip_clusters);
2348
2349 memset(&rec, 0, sizeof(rec));
2350 rec.e_cpos = cpu_to_le32(cpos);
2351 rec.e_blkno = cpu_to_le64(start_blk);
2352 rec.e_leaf_clusters = cpu_to_le16(new_clusters);
2353
2354 status = ocfs2_figure_insert_type(inode, fe_bh, &last_eb_bh, &rec,
2355 &insert);
2356 if (status < 0) {
2357 mlog_errno(status);
2358 goto bail;
2359 }
854 2360
855 mlog(0, "ocfs2_allocate_extent: couldn't do a simple add, traversing " 2361 mlog(0, "Insert.appending: %u, Insert.Contig: %u, "
856 "tree now.\n"); 2362 "Insert.contig_index: %d, Insert.free_records: %d, "
2363 "Insert.tree_depth: %d\n",
2364 insert.ins_appending, insert.ins_contig, insert.ins_contig_index,
2365 insert.ins_free_records, insert.ins_tree_depth);
2366
2367 /*
2368 * Avoid growing the tree unless we're out of records and the
2369 * insert type requres one.
2370 */
2371 if (insert.ins_contig != CONTIG_NONE || insert.ins_free_records)
2372 goto out_add;
857 2373
858 shift = ocfs2_find_branch_target(osb, inode, fe_bh, &bh); 2374 shift = ocfs2_find_branch_target(osb, inode, fe_bh, &bh);
859 if (shift < 0) { 2375 if (shift < 0) {
@@ -866,13 +2382,9 @@ int ocfs2_insert_extent(struct ocfs2_super *osb,
866 * and didn't find room for any more extents - we need to add 2382 * and didn't find room for any more extents - we need to add
867 * another tree level */ 2383 * another tree level */
868 if (shift) { 2384 if (shift) {
869 /* if we hit a leaf, we'd better be empty :) */
870 BUG_ON(le16_to_cpu(el->l_next_free_rec) !=
871 le16_to_cpu(el->l_count));
872 BUG_ON(bh); 2385 BUG_ON(bh);
873 mlog(0, "ocfs2_allocate_extent: need to shift tree depth " 2386 mlog(0, "need to shift tree depth "
874 "(current = %u)\n", 2387 "(current = %d)\n", insert.ins_tree_depth);
875 le16_to_cpu(fe->id2.i_list.l_tree_depth));
876 2388
877 /* ocfs2_shift_tree_depth will return us a buffer with 2389 /* ocfs2_shift_tree_depth will return us a buffer with
878 * the new extent block (so we can pass that to 2390 * the new extent block (so we can pass that to
@@ -883,15 +2395,16 @@ int ocfs2_insert_extent(struct ocfs2_super *osb,
883 mlog_errno(status); 2395 mlog_errno(status);
884 goto bail; 2396 goto bail;
885 } 2397 }
2398 insert.ins_tree_depth++;
886 /* Special case: we have room now if we shifted from 2399 /* Special case: we have room now if we shifted from
887 * tree_depth 0 */ 2400 * tree_depth 0 */
888 if (fe->id2.i_list.l_tree_depth == cpu_to_le16(1)) 2401 if (insert.ins_tree_depth == 1)
889 goto out_add; 2402 goto out_add;
890 } 2403 }
891 2404
892 /* call ocfs2_add_branch to add the final part of the tree with 2405 /* call ocfs2_add_branch to add the final part of the tree with
893 * the new data. */ 2406 * the new data. */
894 mlog(0, "ocfs2_allocate_extent: add branch. bh = %p\n", bh); 2407 mlog(0, "add branch. bh = %p\n", bh);
895 status = ocfs2_add_branch(osb, handle, inode, fe_bh, bh, last_eb_bh, 2408 status = ocfs2_add_branch(osb, handle, inode, fe_bh, bh, last_eb_bh,
896 meta_ac); 2409 meta_ac);
897 if (status < 0) { 2410 if (status < 0) {
@@ -900,11 +2413,12 @@ int ocfs2_insert_extent(struct ocfs2_super *osb,
900 } 2413 }
901 2414
902out_add: 2415out_add:
903 /* Finally, we can add clusters. */ 2416 /* Finally, we can add clusters. This might rotate the tree for us. */
904 status = ocfs2_do_insert_extent(osb, handle, inode, fe_bh, 2417 status = ocfs2_do_insert_extent(inode, handle, fe_bh, &rec, &insert);
905 start_blk, new_clusters);
906 if (status < 0) 2418 if (status < 0)
907 mlog_errno(status); 2419 mlog_errno(status);
2420 else
2421 ocfs2_extent_map_insert_rec(inode, &rec);
908 2422
909bail: 2423bail:
910 if (bh) 2424 if (bh)
@@ -1447,168 +2961,389 @@ int ocfs2_truncate_log_init(struct ocfs2_super *osb)
1447 * block will be deleted, and if it will, what the new last extent 2961 * block will be deleted, and if it will, what the new last extent
1448 * block will be so we can update his h_next_leaf_blk field, as well 2962 * block will be so we can update his h_next_leaf_blk field, as well
1449 * as the dinodes i_last_eb_blk */ 2963 * as the dinodes i_last_eb_blk */
1450static int ocfs2_find_new_last_ext_blk(struct ocfs2_super *osb, 2964static int ocfs2_find_new_last_ext_blk(struct inode *inode,
1451 struct inode *inode, 2965 unsigned int clusters_to_del,
1452 struct ocfs2_dinode *fe, 2966 struct ocfs2_path *path,
1453 u32 new_i_clusters,
1454 struct buffer_head *old_last_eb,
1455 struct buffer_head **new_last_eb) 2967 struct buffer_head **new_last_eb)
1456{ 2968{
1457 int i, status = 0; 2969 int next_free, ret = 0;
1458 u64 block = 0; 2970 u32 cpos;
2971 struct ocfs2_extent_rec *rec;
1459 struct ocfs2_extent_block *eb; 2972 struct ocfs2_extent_block *eb;
1460 struct ocfs2_extent_list *el; 2973 struct ocfs2_extent_list *el;
1461 struct buffer_head *bh = NULL; 2974 struct buffer_head *bh = NULL;
1462 2975
1463 *new_last_eb = NULL; 2976 *new_last_eb = NULL;
1464 2977
1465 if (!OCFS2_IS_VALID_DINODE(fe)) {
1466 OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
1467 status = -EIO;
1468 goto bail;
1469 }
1470
1471 /* we have no tree, so of course, no last_eb. */ 2978 /* we have no tree, so of course, no last_eb. */
1472 if (!fe->id2.i_list.l_tree_depth) 2979 if (!path->p_tree_depth)
1473 goto bail; 2980 goto out;
1474 2981
1475 /* trunc to zero special case - this makes tree_depth = 0 2982 /* trunc to zero special case - this makes tree_depth = 0
1476 * regardless of what it is. */ 2983 * regardless of what it is. */
1477 if (!new_i_clusters) 2984 if (OCFS2_I(inode)->ip_clusters == clusters_to_del)
1478 goto bail; 2985 goto out;
1479 2986
1480 eb = (struct ocfs2_extent_block *) old_last_eb->b_data; 2987 el = path_leaf_el(path);
1481 el = &(eb->h_list);
1482 BUG_ON(!el->l_next_free_rec); 2988 BUG_ON(!el->l_next_free_rec);
1483 2989
1484 /* Make sure that this guy will actually be empty after we 2990 /*
1485 * clear away the data. */ 2991 * Make sure that this extent list will actually be empty
1486 if (le32_to_cpu(el->l_recs[0].e_cpos) < new_i_clusters) 2992 * after we clear away the data. We can shortcut out if
1487 goto bail; 2993 * there's more than one non-empty extent in the
2994 * list. Otherwise, a check of the remaining extent is
2995 * necessary.
2996 */
2997 next_free = le16_to_cpu(el->l_next_free_rec);
2998 rec = NULL;
2999 if (ocfs2_is_empty_extent(&el->l_recs[0])) {
3000 if (next_free > 2)
3001 goto out;
1488 3002
1489 /* Ok, at this point, we know that last_eb will definitely 3003 /* We may have a valid extent in index 1, check it. */
1490 * change, so lets traverse the tree and find the second to 3004 if (next_free == 2)
1491 * last extent block. */ 3005 rec = &el->l_recs[1];
1492 el = &(fe->id2.i_list); 3006
1493 /* go down the tree, */ 3007 /*
1494 do { 3008 * Fall through - no more nonempty extents, so we want
1495 for(i = (le16_to_cpu(el->l_next_free_rec) - 1); i >= 0; i--) { 3009 * to delete this leaf.
1496 if (le32_to_cpu(el->l_recs[i].e_cpos) < 3010 */
1497 new_i_clusters) { 3011 } else {
1498 block = le64_to_cpu(el->l_recs[i].e_blkno); 3012 if (next_free > 1)
1499 break; 3013 goto out;
1500 } 3014
3015 rec = &el->l_recs[0];
3016 }
3017
3018 if (rec) {
3019 /*
3020 * Check it we'll only be trimming off the end of this
3021 * cluster.
3022 */
3023 if (le16_to_cpu(rec->e_leaf_clusters) > clusters_to_del)
3024 goto out;
3025 }
3026
3027 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb, path, &cpos);
3028 if (ret) {
3029 mlog_errno(ret);
3030 goto out;
3031 }
3032
3033 ret = ocfs2_find_leaf(inode, path_root_el(path), cpos, &bh);
3034 if (ret) {
3035 mlog_errno(ret);
3036 goto out;
3037 }
3038
3039 eb = (struct ocfs2_extent_block *) bh->b_data;
3040 el = &eb->h_list;
3041 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
3042 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
3043 ret = -EROFS;
3044 goto out;
3045 }
3046
3047 *new_last_eb = bh;
3048 get_bh(*new_last_eb);
3049 mlog(0, "returning block %llu, (cpos: %u)\n",
3050 (unsigned long long)le64_to_cpu(eb->h_blkno), cpos);
3051out:
3052 brelse(bh);
3053
3054 return ret;
3055}
3056
3057/*
3058 * Trim some clusters off the rightmost edge of a tree. Only called
3059 * during truncate.
3060 *
3061 * The caller needs to:
3062 * - start journaling of each path component.
3063 * - compute and fully set up any new last ext block
3064 */
3065static int ocfs2_trim_tree(struct inode *inode, struct ocfs2_path *path,
3066 handle_t *handle, struct ocfs2_truncate_context *tc,
3067 u32 clusters_to_del, u64 *delete_start)
3068{
3069 int ret, i, index = path->p_tree_depth;
3070 u32 new_edge = 0;
3071 u64 deleted_eb = 0;
3072 struct buffer_head *bh;
3073 struct ocfs2_extent_list *el;
3074 struct ocfs2_extent_rec *rec;
3075
3076 *delete_start = 0;
3077
3078 while (index >= 0) {
3079 bh = path->p_node[index].bh;
3080 el = path->p_node[index].el;
3081
3082 mlog(0, "traveling tree (index = %d, block = %llu)\n",
3083 index, (unsigned long long)bh->b_blocknr);
3084
3085 BUG_ON(le16_to_cpu(el->l_next_free_rec) == 0);
3086
3087 if (index !=
3088 (path->p_tree_depth - le16_to_cpu(el->l_tree_depth))) {
3089 ocfs2_error(inode->i_sb,
3090 "Inode %lu has invalid ext. block %llu",
3091 inode->i_ino,
3092 (unsigned long long)bh->b_blocknr);
3093 ret = -EROFS;
3094 goto out;
1501 } 3095 }
1502 BUG_ON(i < 0);
1503 3096
1504 if (bh) { 3097find_tail_record:
1505 brelse(bh); 3098 i = le16_to_cpu(el->l_next_free_rec) - 1;
1506 bh = NULL; 3099 rec = &el->l_recs[i];
3100
3101 mlog(0, "Extent list before: record %d: (%u, %u, %llu), "
3102 "next = %u\n", i, le32_to_cpu(rec->e_cpos),
3103 ocfs2_rec_clusters(el, rec),
3104 (unsigned long long)le64_to_cpu(rec->e_blkno),
3105 le16_to_cpu(el->l_next_free_rec));
3106
3107 BUG_ON(ocfs2_rec_clusters(el, rec) < clusters_to_del);
3108
3109 if (le16_to_cpu(el->l_tree_depth) == 0) {
3110 /*
3111 * If the leaf block contains a single empty
3112 * extent and no records, we can just remove
3113 * the block.
3114 */
3115 if (i == 0 && ocfs2_is_empty_extent(rec)) {
3116 memset(rec, 0,
3117 sizeof(struct ocfs2_extent_rec));
3118 el->l_next_free_rec = cpu_to_le16(0);
3119
3120 goto delete;
3121 }
3122
3123 /*
3124 * Remove any empty extents by shifting things
3125 * left. That should make life much easier on
3126 * the code below. This condition is rare
3127 * enough that we shouldn't see a performance
3128 * hit.
3129 */
3130 if (ocfs2_is_empty_extent(&el->l_recs[0])) {
3131 le16_add_cpu(&el->l_next_free_rec, -1);
3132
3133 for(i = 0;
3134 i < le16_to_cpu(el->l_next_free_rec); i++)
3135 el->l_recs[i] = el->l_recs[i + 1];
3136
3137 memset(&el->l_recs[i], 0,
3138 sizeof(struct ocfs2_extent_rec));
3139
3140 /*
3141 * We've modified our extent list. The
3142 * simplest way to handle this change
3143 * is to being the search from the
3144 * start again.
3145 */
3146 goto find_tail_record;
3147 }
3148
3149 le16_add_cpu(&rec->e_leaf_clusters, -clusters_to_del);
3150
3151 /*
3152 * We'll use "new_edge" on our way back up the
3153 * tree to know what our rightmost cpos is.
3154 */
3155 new_edge = le16_to_cpu(rec->e_leaf_clusters);
3156 new_edge += le32_to_cpu(rec->e_cpos);
3157
3158 /*
3159 * The caller will use this to delete data blocks.
3160 */
3161 *delete_start = le64_to_cpu(rec->e_blkno)
3162 + ocfs2_clusters_to_blocks(inode->i_sb,
3163 le16_to_cpu(rec->e_leaf_clusters));
3164
3165 /*
3166 * If it's now empty, remove this record.
3167 */
3168 if (le16_to_cpu(rec->e_leaf_clusters) == 0) {
3169 memset(rec, 0,
3170 sizeof(struct ocfs2_extent_rec));
3171 le16_add_cpu(&el->l_next_free_rec, -1);
3172 }
3173 } else {
3174 if (le64_to_cpu(rec->e_blkno) == deleted_eb) {
3175 memset(rec, 0,
3176 sizeof(struct ocfs2_extent_rec));
3177 le16_add_cpu(&el->l_next_free_rec, -1);
3178
3179 goto delete;
3180 }
3181
3182 /* Can this actually happen? */
3183 if (le16_to_cpu(el->l_next_free_rec) == 0)
3184 goto delete;
3185
3186 /*
3187 * We never actually deleted any clusters
3188 * because our leaf was empty. There's no
3189 * reason to adjust the rightmost edge then.
3190 */
3191 if (new_edge == 0)
3192 goto delete;
3193
3194 rec->e_int_clusters = cpu_to_le32(new_edge);
3195 le32_add_cpu(&rec->e_int_clusters,
3196 -le32_to_cpu(rec->e_cpos));
3197
3198 /*
3199 * A deleted child record should have been
3200 * caught above.
3201 */
3202 BUG_ON(le32_to_cpu(rec->e_int_clusters) == 0);
1507 } 3203 }
1508 3204
1509 status = ocfs2_read_block(osb, block, &bh, OCFS2_BH_CACHED, 3205delete:
1510 inode); 3206 ret = ocfs2_journal_dirty(handle, bh);
1511 if (status < 0) { 3207 if (ret) {
1512 mlog_errno(status); 3208 mlog_errno(ret);
1513 goto bail; 3209 goto out;
1514 } 3210 }
1515 eb = (struct ocfs2_extent_block *) bh->b_data; 3211
1516 el = &eb->h_list; 3212 mlog(0, "extent list container %llu, after: record %d: "
1517 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { 3213 "(%u, %u, %llu), next = %u.\n",
1518 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); 3214 (unsigned long long)bh->b_blocknr, i,
1519 status = -EIO; 3215 le32_to_cpu(rec->e_cpos), ocfs2_rec_clusters(el, rec),
1520 goto bail; 3216 (unsigned long long)le64_to_cpu(rec->e_blkno),
3217 le16_to_cpu(el->l_next_free_rec));
3218
3219 /*
3220 * We must be careful to only attempt delete of an
3221 * extent block (and not the root inode block).
3222 */
3223 if (index > 0 && le16_to_cpu(el->l_next_free_rec) == 0) {
3224 struct ocfs2_extent_block *eb =
3225 (struct ocfs2_extent_block *)bh->b_data;
3226
3227 /*
3228 * Save this for use when processing the
3229 * parent block.
3230 */
3231 deleted_eb = le64_to_cpu(eb->h_blkno);
3232
3233 mlog(0, "deleting this extent block.\n");
3234
3235 ocfs2_remove_from_cache(inode, bh);
3236
3237 BUG_ON(ocfs2_rec_clusters(el, &el->l_recs[0]));
3238 BUG_ON(le32_to_cpu(el->l_recs[0].e_cpos));
3239 BUG_ON(le64_to_cpu(el->l_recs[0].e_blkno));
3240
3241 if (le16_to_cpu(eb->h_suballoc_slot) == 0) {
3242 /*
3243 * This code only understands how to
3244 * lock the suballocator in slot 0,
3245 * which is fine because allocation is
3246 * only ever done out of that
3247 * suballocator too. A future version
3248 * might change that however, so avoid
3249 * a free if we don't know how to
3250 * handle it. This way an fs incompat
3251 * bit will not be necessary.
3252 */
3253 ret = ocfs2_free_extent_block(handle,
3254 tc->tc_ext_alloc_inode,
3255 tc->tc_ext_alloc_bh,
3256 eb);
3257
3258 /* An error here is not fatal. */
3259 if (ret < 0)
3260 mlog_errno(ret);
3261 }
3262 } else {
3263 deleted_eb = 0;
1521 } 3264 }
1522 } while (el->l_tree_depth);
1523 3265
1524 *new_last_eb = bh; 3266 index--;
1525 get_bh(*new_last_eb); 3267 }
1526 mlog(0, "returning block %llu\n",
1527 (unsigned long long)le64_to_cpu(eb->h_blkno));
1528bail:
1529 if (bh)
1530 brelse(bh);
1531 3268
1532 return status; 3269 ret = 0;
3270out:
3271 return ret;
1533} 3272}
1534 3273
1535static int ocfs2_do_truncate(struct ocfs2_super *osb, 3274static int ocfs2_do_truncate(struct ocfs2_super *osb,
1536 unsigned int clusters_to_del, 3275 unsigned int clusters_to_del,
1537 struct inode *inode, 3276 struct inode *inode,
1538 struct buffer_head *fe_bh, 3277 struct buffer_head *fe_bh,
1539 struct buffer_head *old_last_eb_bh,
1540 handle_t *handle, 3278 handle_t *handle,
1541 struct ocfs2_truncate_context *tc) 3279 struct ocfs2_truncate_context *tc,
3280 struct ocfs2_path *path)
1542{ 3281{
1543 int status, i, depth; 3282 int status;
1544 struct ocfs2_dinode *fe; 3283 struct ocfs2_dinode *fe;
1545 struct ocfs2_extent_block *eb;
1546 struct ocfs2_extent_block *last_eb = NULL; 3284 struct ocfs2_extent_block *last_eb = NULL;
1547 struct ocfs2_extent_list *el; 3285 struct ocfs2_extent_list *el;
1548 struct buffer_head *eb_bh = NULL;
1549 struct buffer_head *last_eb_bh = NULL; 3286 struct buffer_head *last_eb_bh = NULL;
1550 u64 next_eb = 0;
1551 u64 delete_blk = 0; 3287 u64 delete_blk = 0;
1552 3288
1553 fe = (struct ocfs2_dinode *) fe_bh->b_data; 3289 fe = (struct ocfs2_dinode *) fe_bh->b_data;
1554 3290
1555 status = ocfs2_find_new_last_ext_blk(osb, 3291 status = ocfs2_find_new_last_ext_blk(inode, clusters_to_del,
1556 inode, 3292 path, &last_eb_bh);
1557 fe,
1558 le32_to_cpu(fe->i_clusters) -
1559 clusters_to_del,
1560 old_last_eb_bh,
1561 &last_eb_bh);
1562 if (status < 0) { 3293 if (status < 0) {
1563 mlog_errno(status); 3294 mlog_errno(status);
1564 goto bail; 3295 goto bail;
1565 } 3296 }
1566 if (last_eb_bh)
1567 last_eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
1568 3297
1569 status = ocfs2_journal_access(handle, inode, fe_bh, 3298 /*
1570 OCFS2_JOURNAL_ACCESS_WRITE); 3299 * Each component will be touched, so we might as well journal
3300 * here to avoid having to handle errors later.
3301 */
3302 status = ocfs2_journal_access_path(inode, handle, path);
1571 if (status < 0) { 3303 if (status < 0) {
1572 mlog_errno(status); 3304 mlog_errno(status);
1573 goto bail; 3305 goto bail;
1574 } 3306 }
3307
3308 if (last_eb_bh) {
3309 status = ocfs2_journal_access(handle, inode, last_eb_bh,
3310 OCFS2_JOURNAL_ACCESS_WRITE);
3311 if (status < 0) {
3312 mlog_errno(status);
3313 goto bail;
3314 }
3315
3316 last_eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
3317 }
3318
1575 el = &(fe->id2.i_list); 3319 el = &(fe->id2.i_list);
1576 3320
3321 /*
3322 * Lower levels depend on this never happening, but it's best
3323 * to check it up here before changing the tree.
3324 */
3325 if (el->l_tree_depth && el->l_recs[0].e_int_clusters == 0) {
3326 ocfs2_error(inode->i_sb,
3327 "Inode %lu has an empty extent record, depth %u\n",
3328 inode->i_ino, le16_to_cpu(el->l_tree_depth));
3329 status = -EROFS;
3330 goto bail;
3331 }
3332
1577 spin_lock(&OCFS2_I(inode)->ip_lock); 3333 spin_lock(&OCFS2_I(inode)->ip_lock);
1578 OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters) - 3334 OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters) -
1579 clusters_to_del; 3335 clusters_to_del;
1580 spin_unlock(&OCFS2_I(inode)->ip_lock); 3336 spin_unlock(&OCFS2_I(inode)->ip_lock);
1581 le32_add_cpu(&fe->i_clusters, -clusters_to_del); 3337 le32_add_cpu(&fe->i_clusters, -clusters_to_del);
1582 fe->i_mtime = cpu_to_le64(CURRENT_TIME.tv_sec);
1583 fe->i_mtime_nsec = cpu_to_le32(CURRENT_TIME.tv_nsec);
1584
1585 i = le16_to_cpu(el->l_next_free_rec) - 1;
1586
1587 BUG_ON(le32_to_cpu(el->l_recs[i].e_clusters) < clusters_to_del);
1588 le32_add_cpu(&el->l_recs[i].e_clusters, -clusters_to_del);
1589 /* tree depth zero, we can just delete the clusters, otherwise
1590 * we need to record the offset of the next level extent block
1591 * as we may overwrite it. */
1592 if (!el->l_tree_depth)
1593 delete_blk = le64_to_cpu(el->l_recs[i].e_blkno)
1594 + ocfs2_clusters_to_blocks(osb->sb,
1595 le32_to_cpu(el->l_recs[i].e_clusters));
1596 else
1597 next_eb = le64_to_cpu(el->l_recs[i].e_blkno);
1598 3338
1599 if (!el->l_recs[i].e_clusters) { 3339 status = ocfs2_trim_tree(inode, path, handle, tc,
1600 /* if we deleted the whole extent record, then clear 3340 clusters_to_del, &delete_blk);
1601 * out the other fields and update the extent 3341 if (status) {
1602 * list. For depth > 0 trees, we've already recorded 3342 mlog_errno(status);
1603 * the extent block in 'next_eb' */ 3343 goto bail;
1604 el->l_recs[i].e_cpos = 0;
1605 el->l_recs[i].e_blkno = 0;
1606 BUG_ON(!el->l_next_free_rec);
1607 le16_add_cpu(&el->l_next_free_rec, -1);
1608 } 3344 }
1609 3345
1610 depth = le16_to_cpu(el->l_tree_depth); 3346 if (le32_to_cpu(fe->i_clusters) == 0) {
1611 if (!fe->i_clusters) {
1612 /* trunc to zero is a special case. */ 3347 /* trunc to zero is a special case. */
1613 el->l_tree_depth = 0; 3348 el->l_tree_depth = 0;
1614 fe->i_last_eb_blk = 0; 3349 fe->i_last_eb_blk = 0;
@@ -1625,12 +3360,6 @@ static int ocfs2_do_truncate(struct ocfs2_super *osb,
1625 /* If there will be a new last extent block, then by 3360 /* If there will be a new last extent block, then by
1626 * definition, there cannot be any leaves to the right of 3361 * definition, there cannot be any leaves to the right of
1627 * him. */ 3362 * him. */
1628 status = ocfs2_journal_access(handle, inode, last_eb_bh,
1629 OCFS2_JOURNAL_ACCESS_WRITE);
1630 if (status < 0) {
1631 mlog_errno(status);
1632 goto bail;
1633 }
1634 last_eb->h_next_leaf_blk = 0; 3363 last_eb->h_next_leaf_blk = 0;
1635 status = ocfs2_journal_dirty(handle, last_eb_bh); 3364 status = ocfs2_journal_dirty(handle, last_eb_bh);
1636 if (status < 0) { 3365 if (status < 0) {
@@ -1639,123 +3368,247 @@ static int ocfs2_do_truncate(struct ocfs2_super *osb,
1639 } 3368 }
1640 } 3369 }
1641 3370
1642 /* if our tree depth > 0, update all the tree blocks below us. */ 3371 if (delete_blk) {
1643 while (depth) { 3372 status = ocfs2_truncate_log_append(osb, handle, delete_blk,
1644 mlog(0, "traveling tree (depth = %d, next_eb = %llu)\n", 3373 clusters_to_del);
1645 depth, (unsigned long long)next_eb);
1646 status = ocfs2_read_block(osb, next_eb, &eb_bh,
1647 OCFS2_BH_CACHED, inode);
1648 if (status < 0) { 3374 if (status < 0) {
1649 mlog_errno(status); 3375 mlog_errno(status);
1650 goto bail; 3376 goto bail;
1651 } 3377 }
1652 eb = (struct ocfs2_extent_block *)eb_bh->b_data; 3378 }
1653 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { 3379 status = 0;
1654 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); 3380bail:
1655 status = -EIO; 3381
1656 goto bail; 3382 mlog_exit(status);
3383 return status;
3384}
3385
3386static int ocfs2_writeback_zero_func(handle_t *handle, struct buffer_head *bh)
3387{
3388 set_buffer_uptodate(bh);
3389 mark_buffer_dirty(bh);
3390 return 0;
3391}
3392
3393static int ocfs2_ordered_zero_func(handle_t *handle, struct buffer_head *bh)
3394{
3395 set_buffer_uptodate(bh);
3396 mark_buffer_dirty(bh);
3397 return ocfs2_journal_dirty_data(handle, bh);
3398}
3399
3400static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t isize,
3401 struct page **pages, int numpages,
3402 u64 phys, handle_t *handle)
3403{
3404 int i, ret, partial = 0;
3405 void *kaddr;
3406 struct page *page;
3407 unsigned int from, to = PAGE_CACHE_SIZE;
3408 struct super_block *sb = inode->i_sb;
3409
3410 BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb)));
3411
3412 if (numpages == 0)
3413 goto out;
3414
3415 from = isize & (PAGE_CACHE_SIZE - 1); /* 1st page offset */
3416 if (PAGE_CACHE_SHIFT > OCFS2_SB(sb)->s_clustersize_bits) {
3417 /*
3418 * Since 'from' has been capped to a value below page
3419 * size, this calculation won't be able to overflow
3420 * 'to'
3421 */
3422 to = ocfs2_align_bytes_to_clusters(sb, from);
3423
3424 /*
3425 * The truncate tail in this case should never contain
3426 * more than one page at maximum. The loop below also
3427 * assumes this.
3428 */
3429 BUG_ON(numpages != 1);
3430 }
3431
3432 for(i = 0; i < numpages; i++) {
3433 page = pages[i];
3434
3435 BUG_ON(from > PAGE_CACHE_SIZE);
3436 BUG_ON(to > PAGE_CACHE_SIZE);
3437
3438 ret = ocfs2_map_page_blocks(page, &phys, inode, from, to, 0);
3439 if (ret)
3440 mlog_errno(ret);
3441
3442 kaddr = kmap_atomic(page, KM_USER0);
3443 memset(kaddr + from, 0, to - from);
3444 kunmap_atomic(kaddr, KM_USER0);
3445
3446 /*
3447 * Need to set the buffers we zero'd into uptodate
3448 * here if they aren't - ocfs2_map_page_blocks()
3449 * might've skipped some
3450 */
3451 if (ocfs2_should_order_data(inode)) {
3452 ret = walk_page_buffers(handle,
3453 page_buffers(page),
3454 from, to, &partial,
3455 ocfs2_ordered_zero_func);
3456 if (ret < 0)
3457 mlog_errno(ret);
3458 } else {
3459 ret = walk_page_buffers(handle, page_buffers(page),
3460 from, to, &partial,
3461 ocfs2_writeback_zero_func);
3462 if (ret < 0)
3463 mlog_errno(ret);
1657 } 3464 }
1658 el = &(eb->h_list);
1659 3465
1660 status = ocfs2_journal_access(handle, inode, eb_bh, 3466 if (!partial)
1661 OCFS2_JOURNAL_ACCESS_WRITE); 3467 SetPageUptodate(page);
1662 if (status < 0) { 3468
1663 mlog_errno(status); 3469 flush_dcache_page(page);
1664 goto bail; 3470
3471 /*
3472 * Every page after the 1st one should be completely zero'd.
3473 */
3474 from = 0;
3475 }
3476out:
3477 if (pages) {
3478 for (i = 0; i < numpages; i++) {
3479 page = pages[i];
3480 unlock_page(page);
3481 mark_page_accessed(page);
3482 page_cache_release(page);
1665 } 3483 }
3484 }
3485}
1666 3486
1667 BUG_ON(le16_to_cpu(el->l_next_free_rec) == 0); 3487static int ocfs2_grab_eof_pages(struct inode *inode, loff_t isize, struct page **pages,
1668 BUG_ON(depth != (le16_to_cpu(el->l_tree_depth) + 1)); 3488 int *num, u64 *phys)
3489{
3490 int i, numpages = 0, ret = 0;
3491 unsigned int csize = OCFS2_SB(inode->i_sb)->s_clustersize;
3492 unsigned int ext_flags;
3493 struct super_block *sb = inode->i_sb;
3494 struct address_space *mapping = inode->i_mapping;
3495 unsigned long index;
3496 u64 next_cluster_bytes;
3497
3498 BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb)));
3499
3500 /* Cluster boundary, so we don't need to grab any pages. */
3501 if ((isize & (csize - 1)) == 0)
3502 goto out;
1669 3503
1670 i = le16_to_cpu(el->l_next_free_rec) - 1; 3504 ret = ocfs2_extent_map_get_blocks(inode, isize >> sb->s_blocksize_bits,
3505 phys, NULL, &ext_flags);
3506 if (ret) {
3507 mlog_errno(ret);
3508 goto out;
3509 }
1671 3510
1672 mlog(0, "extent block %llu, before: record %d: " 3511 /* Tail is a hole. */
1673 "(%u, %u, %llu), next = %u\n", 3512 if (*phys == 0)
1674 (unsigned long long)le64_to_cpu(eb->h_blkno), i, 3513 goto out;
1675 le32_to_cpu(el->l_recs[i].e_cpos),
1676 le32_to_cpu(el->l_recs[i].e_clusters),
1677 (unsigned long long)le64_to_cpu(el->l_recs[i].e_blkno),
1678 le16_to_cpu(el->l_next_free_rec));
1679 3514
1680 BUG_ON(le32_to_cpu(el->l_recs[i].e_clusters) < clusters_to_del); 3515 /* Tail is marked as unwritten, we can count on write to zero
1681 le32_add_cpu(&el->l_recs[i].e_clusters, -clusters_to_del); 3516 * in that case. */
1682 3517 if (ext_flags & OCFS2_EXT_UNWRITTEN)
1683 next_eb = le64_to_cpu(el->l_recs[i].e_blkno); 3518 goto out;
1684 /* bottom-most block requires us to delete data.*/
1685 if (!el->l_tree_depth)
1686 delete_blk = le64_to_cpu(el->l_recs[i].e_blkno)
1687 + ocfs2_clusters_to_blocks(osb->sb,
1688 le32_to_cpu(el->l_recs[i].e_clusters));
1689 if (!el->l_recs[i].e_clusters) {
1690 el->l_recs[i].e_cpos = 0;
1691 el->l_recs[i].e_blkno = 0;
1692 BUG_ON(!el->l_next_free_rec);
1693 le16_add_cpu(&el->l_next_free_rec, -1);
1694 }
1695 mlog(0, "extent block %llu, after: record %d: "
1696 "(%u, %u, %llu), next = %u\n",
1697 (unsigned long long)le64_to_cpu(eb->h_blkno), i,
1698 le32_to_cpu(el->l_recs[i].e_cpos),
1699 le32_to_cpu(el->l_recs[i].e_clusters),
1700 (unsigned long long)le64_to_cpu(el->l_recs[i].e_blkno),
1701 le16_to_cpu(el->l_next_free_rec));
1702 3519
1703 status = ocfs2_journal_dirty(handle, eb_bh); 3520 next_cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, isize);
1704 if (status < 0) { 3521 index = isize >> PAGE_CACHE_SHIFT;
1705 mlog_errno(status); 3522 do {
1706 goto bail; 3523 pages[numpages] = grab_cache_page(mapping, index);
3524 if (!pages[numpages]) {
3525 ret = -ENOMEM;
3526 mlog_errno(ret);
3527 goto out;
1707 } 3528 }
1708 3529
1709 if (!el->l_next_free_rec) { 3530 numpages++;
1710 mlog(0, "deleting this extent block.\n"); 3531 index++;
1711 3532 } while (index < (next_cluster_bytes >> PAGE_CACHE_SHIFT));
1712 ocfs2_remove_from_cache(inode, eb_bh);
1713 3533
1714 BUG_ON(el->l_recs[0].e_clusters); 3534out:
1715 BUG_ON(el->l_recs[0].e_cpos); 3535 if (ret != 0) {
1716 BUG_ON(el->l_recs[0].e_blkno); 3536 if (pages) {
1717 if (eb->h_suballoc_slot == 0) { 3537 for (i = 0; i < numpages; i++) {
1718 /* 3538 if (pages[i]) {
1719 * This code only understands how to 3539 unlock_page(pages[i]);
1720 * lock the suballocator in slot 0, 3540 page_cache_release(pages[i]);
1721 * which is fine because allocation is
1722 * only ever done out of that
1723 * suballocator too. A future version
1724 * might change that however, so avoid
1725 * a free if we don't know how to
1726 * handle it. This way an fs incompat
1727 * bit will not be necessary.
1728 */
1729 status = ocfs2_free_extent_block(handle,
1730 tc->tc_ext_alloc_inode,
1731 tc->tc_ext_alloc_bh,
1732 eb);
1733 if (status < 0) {
1734 mlog_errno(status);
1735 goto bail;
1736 } 3541 }
1737 } 3542 }
1738 } 3543 }
1739 brelse(eb_bh); 3544 numpages = 0;
1740 eb_bh = NULL;
1741 depth--;
1742 } 3545 }
1743 3546
1744 BUG_ON(!delete_blk); 3547 *num = numpages;
1745 status = ocfs2_truncate_log_append(osb, handle, delete_blk, 3548
1746 clusters_to_del); 3549 return ret;
1747 if (status < 0) { 3550}
1748 mlog_errno(status); 3551
1749 goto bail; 3552/*
3553 * Zero the area past i_size but still within an allocated
3554 * cluster. This avoids exposing nonzero data on subsequent file
3555 * extends.
3556 *
3557 * We need to call this before i_size is updated on the inode because
3558 * otherwise block_write_full_page() will skip writeout of pages past
3559 * i_size. The new_i_size parameter is passed for this reason.
3560 */
3561int ocfs2_zero_tail_for_truncate(struct inode *inode, handle_t *handle,
3562 u64 new_i_size)
3563{
3564 int ret, numpages;
3565 loff_t endbyte;
3566 struct page **pages = NULL;
3567 u64 phys;
3568
3569 /*
3570 * File systems which don't support sparse files zero on every
3571 * extend.
3572 */
3573 if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
3574 return 0;
3575
3576 pages = kcalloc(ocfs2_pages_per_cluster(inode->i_sb),
3577 sizeof(struct page *), GFP_NOFS);
3578 if (pages == NULL) {
3579 ret = -ENOMEM;
3580 mlog_errno(ret);
3581 goto out;
1750 } 3582 }
1751 status = 0; 3583
1752bail: 3584 ret = ocfs2_grab_eof_pages(inode, new_i_size, pages, &numpages, &phys);
1753 if (!status) 3585 if (ret) {
1754 ocfs2_extent_map_trunc(inode, le32_to_cpu(fe->i_clusters)); 3586 mlog_errno(ret);
1755 else 3587 goto out;
1756 ocfs2_extent_map_drop(inode, 0); 3588 }
1757 mlog_exit(status); 3589
1758 return status; 3590 if (numpages == 0)
3591 goto out;
3592
3593 ocfs2_zero_cluster_pages(inode, new_i_size, pages, numpages, phys,
3594 handle);
3595
3596 /*
3597 * Initiate writeout of the pages we zero'd here. We don't
3598 * wait on them - the truncate_inode_pages() call later will
3599 * do that for us.
3600 */
3601 endbyte = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
3602 ret = do_sync_mapping_range(inode->i_mapping, new_i_size,
3603 endbyte - 1, SYNC_FILE_RANGE_WRITE);
3604 if (ret)
3605 mlog_errno(ret);
3606
3607out:
3608 if (pages)
3609 kfree(pages);
3610
3611 return ret;
1759} 3612}
1760 3613
1761/* 3614/*
@@ -1770,82 +3623,90 @@ int ocfs2_commit_truncate(struct ocfs2_super *osb,
1770 struct ocfs2_truncate_context *tc) 3623 struct ocfs2_truncate_context *tc)
1771{ 3624{
1772 int status, i, credits, tl_sem = 0; 3625 int status, i, credits, tl_sem = 0;
1773 u32 clusters_to_del, target_i_clusters; 3626 u32 clusters_to_del, new_highest_cpos, range;
1774 u64 last_eb = 0;
1775 struct ocfs2_dinode *fe;
1776 struct ocfs2_extent_block *eb;
1777 struct ocfs2_extent_list *el; 3627 struct ocfs2_extent_list *el;
1778 struct buffer_head *last_eb_bh;
1779 handle_t *handle = NULL; 3628 handle_t *handle = NULL;
1780 struct inode *tl_inode = osb->osb_tl_inode; 3629 struct inode *tl_inode = osb->osb_tl_inode;
3630 struct ocfs2_path *path = NULL;
1781 3631
1782 mlog_entry_void(); 3632 mlog_entry_void();
1783 3633
1784 down_write(&OCFS2_I(inode)->ip_alloc_sem); 3634 down_write(&OCFS2_I(inode)->ip_alloc_sem);
1785 3635
1786 target_i_clusters = ocfs2_clusters_for_bytes(osb->sb, 3636 new_highest_cpos = ocfs2_clusters_for_bytes(osb->sb,
1787 i_size_read(inode)); 3637 i_size_read(inode));
1788 3638
1789 last_eb_bh = tc->tc_last_eb_bh; 3639 path = ocfs2_new_inode_path(fe_bh);
1790 tc->tc_last_eb_bh = NULL; 3640 if (!path) {
3641 status = -ENOMEM;
3642 mlog_errno(status);
3643 goto bail;
3644 }
1791 3645
1792 fe = (struct ocfs2_dinode *) fe_bh->b_data; 3646 ocfs2_extent_map_trunc(inode, new_highest_cpos);
1793 3647
1794 if (fe->id2.i_list.l_tree_depth) {
1795 eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
1796 el = &eb->h_list;
1797 } else
1798 el = &fe->id2.i_list;
1799 last_eb = le64_to_cpu(fe->i_last_eb_blk);
1800start: 3648start:
1801 mlog(0, "ocfs2_commit_truncate: fe->i_clusters = %u, " 3649 /*
1802 "last_eb = %llu, fe->i_last_eb_blk = %llu, " 3650 * Check that we still have allocation to delete.
1803 "fe->id2.i_list.l_tree_depth = %u last_eb_bh = %p\n", 3651 */
1804 le32_to_cpu(fe->i_clusters), (unsigned long long)last_eb, 3652 if (OCFS2_I(inode)->ip_clusters == 0) {
1805 (unsigned long long)le64_to_cpu(fe->i_last_eb_blk), 3653 status = 0;
1806 le16_to_cpu(fe->id2.i_list.l_tree_depth), last_eb_bh); 3654 goto bail;
1807 3655 }
1808 if (last_eb != le64_to_cpu(fe->i_last_eb_blk)) {
1809 mlog(0, "last_eb changed!\n");
1810 BUG_ON(!fe->id2.i_list.l_tree_depth);
1811 last_eb = le64_to_cpu(fe->i_last_eb_blk);
1812 /* i_last_eb_blk may have changed, read it if
1813 * necessary. We don't have to worry about the
1814 * truncate to zero case here (where there becomes no
1815 * last_eb) because we never loop back after our work
1816 * is done. */
1817 if (last_eb_bh) {
1818 brelse(last_eb_bh);
1819 last_eb_bh = NULL;
1820 }
1821 3656
1822 status = ocfs2_read_block(osb, last_eb, 3657 /*
1823 &last_eb_bh, OCFS2_BH_CACHED, 3658 * Truncate always works against the rightmost tree branch.
1824 inode); 3659 */
1825 if (status < 0) { 3660 status = ocfs2_find_path(inode, path, UINT_MAX);
1826 mlog_errno(status); 3661 if (status) {
1827 goto bail; 3662 mlog_errno(status);
1828 } 3663 goto bail;
1829 eb = (struct ocfs2_extent_block *) last_eb_bh->b_data; 3664 }
1830 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) { 3665
1831 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb); 3666 mlog(0, "inode->ip_clusters = %u, tree_depth = %u\n",
1832 status = -EIO; 3667 OCFS2_I(inode)->ip_clusters, path->p_tree_depth);
1833 goto bail; 3668
1834 } 3669 /*
1835 el = &(eb->h_list); 3670 * By now, el will point to the extent list on the bottom most
3671 * portion of this tree. Only the tail record is considered in
3672 * each pass.
3673 *
3674 * We handle the following cases, in order:
3675 * - empty extent: delete the remaining branch
3676 * - remove the entire record
3677 * - remove a partial record
3678 * - no record needs to be removed (truncate has completed)
3679 */
3680 el = path_leaf_el(path);
3681 if (le16_to_cpu(el->l_next_free_rec) == 0) {
3682 ocfs2_error(inode->i_sb,
3683 "Inode %llu has empty extent block at %llu\n",
3684 (unsigned long long)OCFS2_I(inode)->ip_blkno,
3685 (unsigned long long)path_leaf_bh(path)->b_blocknr);
3686 status = -EROFS;
3687 goto bail;
1836 } 3688 }
1837 3689
1838 /* by now, el will point to the extent list on the bottom most
1839 * portion of this tree. */
1840 i = le16_to_cpu(el->l_next_free_rec) - 1; 3690 i = le16_to_cpu(el->l_next_free_rec) - 1;
1841 if (le32_to_cpu(el->l_recs[i].e_cpos) >= target_i_clusters) 3691 range = le32_to_cpu(el->l_recs[i].e_cpos) +
1842 clusters_to_del = le32_to_cpu(el->l_recs[i].e_clusters); 3692 ocfs2_rec_clusters(el, &el->l_recs[i]);
1843 else 3693 if (i == 0 && ocfs2_is_empty_extent(&el->l_recs[i])) {
1844 clusters_to_del = (le32_to_cpu(el->l_recs[i].e_clusters) + 3694 clusters_to_del = 0;
3695 } else if (le32_to_cpu(el->l_recs[i].e_cpos) >= new_highest_cpos) {
3696 clusters_to_del = ocfs2_rec_clusters(el, &el->l_recs[i]);
3697 } else if (range > new_highest_cpos) {
3698 clusters_to_del = (ocfs2_rec_clusters(el, &el->l_recs[i]) +
1845 le32_to_cpu(el->l_recs[i].e_cpos)) - 3699 le32_to_cpu(el->l_recs[i].e_cpos)) -
1846 target_i_clusters; 3700 new_highest_cpos;
3701 } else {
3702 status = 0;
3703 goto bail;
3704 }
1847 3705
1848 mlog(0, "clusters_to_del = %u in this pass\n", clusters_to_del); 3706 mlog(0, "clusters_to_del = %u in this pass, tail blk=%llu\n",
3707 clusters_to_del, (unsigned long long)path_leaf_bh(path)->b_blocknr);
3708
3709 BUG_ON(clusters_to_del == 0);
1849 3710
1850 mutex_lock(&tl_inode->i_mutex); 3711 mutex_lock(&tl_inode->i_mutex);
1851 tl_sem = 1; 3712 tl_sem = 1;
@@ -1861,7 +3722,8 @@ start:
1861 } 3722 }
1862 3723
1863 credits = ocfs2_calc_tree_trunc_credits(osb->sb, clusters_to_del, 3724 credits = ocfs2_calc_tree_trunc_credits(osb->sb, clusters_to_del,
1864 fe, el); 3725 (struct ocfs2_dinode *)fe_bh->b_data,
3726 el);
1865 handle = ocfs2_start_trans(osb, credits); 3727 handle = ocfs2_start_trans(osb, credits);
1866 if (IS_ERR(handle)) { 3728 if (IS_ERR(handle)) {
1867 status = PTR_ERR(handle); 3729 status = PTR_ERR(handle);
@@ -1870,13 +3732,8 @@ start:
1870 goto bail; 3732 goto bail;
1871 } 3733 }
1872 3734
1873 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 3735 status = ocfs2_do_truncate(osb, clusters_to_del, inode, fe_bh, handle,
1874 status = ocfs2_mark_inode_dirty(handle, inode, fe_bh); 3736 tc, path);
1875 if (status < 0)
1876 mlog_errno(status);
1877
1878 status = ocfs2_do_truncate(osb, clusters_to_del, inode, fe_bh,
1879 last_eb_bh, handle, tc);
1880 if (status < 0) { 3737 if (status < 0) {
1881 mlog_errno(status); 3738 mlog_errno(status);
1882 goto bail; 3739 goto bail;
@@ -1888,9 +3745,14 @@ start:
1888 ocfs2_commit_trans(osb, handle); 3745 ocfs2_commit_trans(osb, handle);
1889 handle = NULL; 3746 handle = NULL;
1890 3747
1891 BUG_ON(le32_to_cpu(fe->i_clusters) < target_i_clusters); 3748 ocfs2_reinit_path(path, 1);
1892 if (le32_to_cpu(fe->i_clusters) > target_i_clusters) 3749
1893 goto start; 3750 /*
3751 * The check above will catch the case where we've truncated
3752 * away all allocation.
3753 */
3754 goto start;
3755
1894bail: 3756bail:
1895 up_write(&OCFS2_I(inode)->ip_alloc_sem); 3757 up_write(&OCFS2_I(inode)->ip_alloc_sem);
1896 3758
@@ -1902,8 +3764,7 @@ bail:
1902 if (handle) 3764 if (handle)
1903 ocfs2_commit_trans(osb, handle); 3765 ocfs2_commit_trans(osb, handle);
1904 3766
1905 if (last_eb_bh) 3767 ocfs2_free_path(path);
1906 brelse(last_eb_bh);
1907 3768
1908 /* This will drop the ext_alloc cluster lock for us */ 3769 /* This will drop the ext_alloc cluster lock for us */
1909 ocfs2_free_truncate_context(tc); 3770 ocfs2_free_truncate_context(tc);
@@ -1912,7 +3773,6 @@ bail:
1912 return status; 3773 return status;
1913} 3774}
1914 3775
1915
1916/* 3776/*
1917 * Expects the inode to already be locked. This will figure out which 3777 * Expects the inode to already be locked. This will figure out which
1918 * inodes need to be locked and will put them on the returned truncate 3778 * inodes need to be locked and will put them on the returned truncate
@@ -1923,7 +3783,7 @@ int ocfs2_prepare_truncate(struct ocfs2_super *osb,
1923 struct buffer_head *fe_bh, 3783 struct buffer_head *fe_bh,
1924 struct ocfs2_truncate_context **tc) 3784 struct ocfs2_truncate_context **tc)
1925{ 3785{
1926 int status, metadata_delete; 3786 int status, metadata_delete, i;
1927 unsigned int new_i_clusters; 3787 unsigned int new_i_clusters;
1928 struct ocfs2_dinode *fe; 3788 struct ocfs2_dinode *fe;
1929 struct ocfs2_extent_block *eb; 3789 struct ocfs2_extent_block *eb;
@@ -1944,21 +3804,6 @@ int ocfs2_prepare_truncate(struct ocfs2_super *osb,
1944 "%llu\n", fe->i_clusters, new_i_clusters, 3804 "%llu\n", fe->i_clusters, new_i_clusters,
1945 (unsigned long long)fe->i_size); 3805 (unsigned long long)fe->i_size);
1946 3806
1947 if (le32_to_cpu(fe->i_clusters) <= new_i_clusters) {
1948 ocfs2_error(inode->i_sb, "Dinode %llu has cluster count "
1949 "%u and size %llu whereas struct inode has "
1950 "cluster count %u and size %llu which caused an "
1951 "invalid truncate to %u clusters.",
1952 (unsigned long long)le64_to_cpu(fe->i_blkno),
1953 le32_to_cpu(fe->i_clusters),
1954 (unsigned long long)le64_to_cpu(fe->i_size),
1955 OCFS2_I(inode)->ip_clusters, i_size_read(inode),
1956 new_i_clusters);
1957 mlog_meta_lvb(ML_ERROR, &OCFS2_I(inode)->ip_meta_lockres);
1958 status = -EIO;
1959 goto bail;
1960 }
1961
1962 *tc = kzalloc(sizeof(struct ocfs2_truncate_context), GFP_KERNEL); 3807 *tc = kzalloc(sizeof(struct ocfs2_truncate_context), GFP_KERNEL);
1963 if (!(*tc)) { 3808 if (!(*tc)) {
1964 status = -ENOMEM; 3809 status = -ENOMEM;
@@ -1986,7 +3831,15 @@ int ocfs2_prepare_truncate(struct ocfs2_super *osb,
1986 goto bail; 3831 goto bail;
1987 } 3832 }
1988 el = &(eb->h_list); 3833 el = &(eb->h_list);
1989 if (le32_to_cpu(el->l_recs[0].e_cpos) >= new_i_clusters) 3834
3835 i = 0;
3836 if (ocfs2_is_empty_extent(&el->l_recs[0]))
3837 i = 1;
3838 /*
3839 * XXX: Should we check that next_free_rec contains
3840 * the extent?
3841 */
3842 if (le32_to_cpu(el->l_recs[i].e_cpos) >= new_i_clusters)
1990 metadata_delete = 1; 3843 metadata_delete = 1;
1991 } 3844 }
1992 3845
diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h
index 0b82e8044325..fbcb5934a081 100644
--- a/fs/ocfs2/alloc.h
+++ b/fs/ocfs2/alloc.h
@@ -31,7 +31,8 @@ int ocfs2_insert_extent(struct ocfs2_super *osb,
31 handle_t *handle, 31 handle_t *handle,
32 struct inode *inode, 32 struct inode *inode,
33 struct buffer_head *fe_bh, 33 struct buffer_head *fe_bh,
34 u64 blkno, 34 u32 cpos,
35 u64 start_blk,
35 u32 new_clusters, 36 u32 new_clusters,
36 struct ocfs2_alloc_context *meta_ac); 37 struct ocfs2_alloc_context *meta_ac);
37int ocfs2_num_free_extents(struct ocfs2_super *osb, 38int ocfs2_num_free_extents(struct ocfs2_super *osb,
@@ -70,6 +71,8 @@ struct ocfs2_truncate_context {
70 struct buffer_head *tc_last_eb_bh; 71 struct buffer_head *tc_last_eb_bh;
71}; 72};
72 73
74int ocfs2_zero_tail_for_truncate(struct inode *inode, handle_t *handle,
75 u64 new_i_size);
73int ocfs2_prepare_truncate(struct ocfs2_super *osb, 76int ocfs2_prepare_truncate(struct ocfs2_super *osb,
74 struct inode *inode, 77 struct inode *inode,
75 struct buffer_head *fe_bh, 78 struct buffer_head *fe_bh,
@@ -79,4 +82,26 @@ int ocfs2_commit_truncate(struct ocfs2_super *osb,
79 struct buffer_head *fe_bh, 82 struct buffer_head *fe_bh,
80 struct ocfs2_truncate_context *tc); 83 struct ocfs2_truncate_context *tc);
81 84
85int ocfs2_find_leaf(struct inode *inode, struct ocfs2_extent_list *root_el,
86 u32 cpos, struct buffer_head **leaf_bh);
87
88/*
89 * Helper function to look at the # of clusters in an extent record.
90 */
91static inline unsigned int ocfs2_rec_clusters(struct ocfs2_extent_list *el,
92 struct ocfs2_extent_rec *rec)
93{
94 /*
95 * Cluster count in extent records is slightly different
96 * between interior nodes and leaf nodes. This is to support
97 * unwritten extents which need a flags field in leaf node
98 * records, thus shrinking the available space for a clusters
99 * field.
100 */
101 if (el->l_tree_depth)
102 return le32_to_cpu(rec->e_int_clusters);
103 else
104 return le16_to_cpu(rec->e_leaf_clusters);
105}
106
82#endif /* OCFS2_ALLOC_H */ 107#endif /* OCFS2_ALLOC_H */
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 875c11443817..56963e6c46c0 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -24,6 +24,8 @@
24#include <linux/highmem.h> 24#include <linux/highmem.h>
25#include <linux/pagemap.h> 25#include <linux/pagemap.h>
26#include <asm/byteorder.h> 26#include <asm/byteorder.h>
27#include <linux/swap.h>
28#include <linux/pipe_fs_i.h>
27 29
28#define MLOG_MASK_PREFIX ML_FILE_IO 30#define MLOG_MASK_PREFIX ML_FILE_IO
29#include <cluster/masklog.h> 31#include <cluster/masklog.h>
@@ -37,6 +39,7 @@
37#include "file.h" 39#include "file.h"
38#include "inode.h" 40#include "inode.h"
39#include "journal.h" 41#include "journal.h"
42#include "suballoc.h"
40#include "super.h" 43#include "super.h"
41#include "symlink.h" 44#include "symlink.h"
42 45
@@ -134,7 +137,9 @@ static int ocfs2_get_block(struct inode *inode, sector_t iblock,
134 struct buffer_head *bh_result, int create) 137 struct buffer_head *bh_result, int create)
135{ 138{
136 int err = 0; 139 int err = 0;
140 unsigned int ext_flags;
137 u64 p_blkno, past_eof; 141 u64 p_blkno, past_eof;
142 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
138 143
139 mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode, 144 mlog_entry("(0x%p, %llu, 0x%p, %d)\n", inode,
140 (unsigned long long)iblock, bh_result, create); 145 (unsigned long long)iblock, bh_result, create);
@@ -149,17 +154,8 @@ static int ocfs2_get_block(struct inode *inode, sector_t iblock,
149 goto bail; 154 goto bail;
150 } 155 }
151 156
152 /* this can happen if another node truncs after our extend! */ 157 err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, NULL,
153 spin_lock(&OCFS2_I(inode)->ip_lock); 158 &ext_flags);
154 if (iblock >= ocfs2_clusters_to_blocks(inode->i_sb,
155 OCFS2_I(inode)->ip_clusters))
156 err = -EIO;
157 spin_unlock(&OCFS2_I(inode)->ip_lock);
158 if (err)
159 goto bail;
160
161 err = ocfs2_extent_map_get_blocks(inode, iblock, 1, &p_blkno,
162 NULL);
163 if (err) { 159 if (err) {
164 mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, " 160 mlog(ML_ERROR, "Error %d from get_blocks(0x%p, %llu, 1, "
165 "%llu, NULL)\n", err, inode, (unsigned long long)iblock, 161 "%llu, NULL)\n", err, inode, (unsigned long long)iblock,
@@ -167,22 +163,39 @@ static int ocfs2_get_block(struct inode *inode, sector_t iblock,
167 goto bail; 163 goto bail;
168 } 164 }
169 165
170 map_bh(bh_result, inode->i_sb, p_blkno); 166 /*
171 167 * ocfs2 never allocates in this function - the only time we
172 if (bh_result->b_blocknr == 0) { 168 * need to use BH_New is when we're extending i_size on a file
173 err = -EIO; 169 * system which doesn't support holes, in which case BH_New
174 mlog(ML_ERROR, "iblock = %llu p_blkno = %llu blkno=(%llu)\n", 170 * allows block_prepare_write() to zero.
175 (unsigned long long)iblock, 171 */
176 (unsigned long long)p_blkno, 172 mlog_bug_on_msg(create && p_blkno == 0 && ocfs2_sparse_alloc(osb),
177 (unsigned long long)OCFS2_I(inode)->ip_blkno); 173 "ino %lu, iblock %llu\n", inode->i_ino,
178 } 174 (unsigned long long)iblock);
175
176 /* Treat the unwritten extent as a hole for zeroing purposes. */
177 if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN))
178 map_bh(bh_result, inode->i_sb, p_blkno);
179
180 if (!ocfs2_sparse_alloc(osb)) {
181 if (p_blkno == 0) {
182 err = -EIO;
183 mlog(ML_ERROR,
184 "iblock = %llu p_blkno = %llu blkno=(%llu)\n",
185 (unsigned long long)iblock,
186 (unsigned long long)p_blkno,
187 (unsigned long long)OCFS2_I(inode)->ip_blkno);
188 mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters);
189 dump_stack();
190 }
179 191
180 past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); 192 past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
181 mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino, 193 mlog(0, "Inode %lu, past_eof = %llu\n", inode->i_ino,
182 (unsigned long long)past_eof); 194 (unsigned long long)past_eof);
183 195
184 if (create && (iblock >= past_eof)) 196 if (create && (iblock >= past_eof))
185 set_buffer_new(bh_result); 197 set_buffer_new(bh_result);
198 }
186 199
187bail: 200bail:
188 if (err < 0) 201 if (err < 0)
@@ -276,8 +289,11 @@ static int ocfs2_writepage(struct page *page, struct writeback_control *wbc)
276 return ret; 289 return ret;
277} 290}
278 291
279/* This can also be called from ocfs2_write_zero_page() which has done 292/*
280 * it's own cluster locking. */ 293 * This is called from ocfs2_write_zero_page() which has handled it's
294 * own cluster locking and has ensured allocation exists for those
295 * blocks to be written.
296 */
281int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page, 297int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
282 unsigned from, unsigned to) 298 unsigned from, unsigned to)
283{ 299{
@@ -292,44 +308,17 @@ int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page,
292 return ret; 308 return ret;
293} 309}
294 310
295/*
296 * ocfs2_prepare_write() can be an outer-most ocfs2 call when it is called
297 * from loopback. It must be able to perform its own locking around
298 * ocfs2_get_block().
299 */
300static int ocfs2_prepare_write(struct file *file, struct page *page,
301 unsigned from, unsigned to)
302{
303 struct inode *inode = page->mapping->host;
304 int ret;
305
306 mlog_entry("(0x%p, 0x%p, %u, %u)\n", file, page, from, to);
307
308 ret = ocfs2_meta_lock_with_page(inode, NULL, 0, page);
309 if (ret != 0) {
310 mlog_errno(ret);
311 goto out;
312 }
313
314 ret = ocfs2_prepare_write_nolock(inode, page, from, to);
315
316 ocfs2_meta_unlock(inode, 0);
317out:
318 mlog_exit(ret);
319 return ret;
320}
321
322/* Taken from ext3. We don't necessarily need the full blown 311/* Taken from ext3. We don't necessarily need the full blown
323 * functionality yet, but IMHO it's better to cut and paste the whole 312 * functionality yet, but IMHO it's better to cut and paste the whole
324 * thing so we can avoid introducing our own bugs (and easily pick up 313 * thing so we can avoid introducing our own bugs (and easily pick up
325 * their fixes when they happen) --Mark */ 314 * their fixes when they happen) --Mark */
326static int walk_page_buffers( handle_t *handle, 315int walk_page_buffers( handle_t *handle,
327 struct buffer_head *head, 316 struct buffer_head *head,
328 unsigned from, 317 unsigned from,
329 unsigned to, 318 unsigned to,
330 int *partial, 319 int *partial,
331 int (*fn)( handle_t *handle, 320 int (*fn)( handle_t *handle,
332 struct buffer_head *bh)) 321 struct buffer_head *bh))
333{ 322{
334 struct buffer_head *bh; 323 struct buffer_head *bh;
335 unsigned block_start, block_end; 324 unsigned block_start, block_end;
@@ -388,95 +377,6 @@ out:
388 return handle; 377 return handle;
389} 378}
390 379
391static int ocfs2_commit_write(struct file *file, struct page *page,
392 unsigned from, unsigned to)
393{
394 int ret;
395 struct buffer_head *di_bh = NULL;
396 struct inode *inode = page->mapping->host;
397 handle_t *handle = NULL;
398 struct ocfs2_dinode *di;
399
400 mlog_entry("(0x%p, 0x%p, %u, %u)\n", file, page, from, to);
401
402 /* NOTE: ocfs2_file_aio_write has ensured that it's safe for
403 * us to continue here without rechecking the I/O against
404 * changed inode values.
405 *
406 * 1) We're currently holding the inode alloc lock, so no
407 * nodes can change it underneath us.
408 *
409 * 2) We've had to take the metadata lock at least once
410 * already to check for extending writes, suid removal, etc.
411 * The meta data update code then ensures that we don't get a
412 * stale inode allocation image (i_size, i_clusters, etc).
413 */
414
415 ret = ocfs2_meta_lock_with_page(inode, &di_bh, 1, page);
416 if (ret != 0) {
417 mlog_errno(ret);
418 goto out;
419 }
420
421 ret = ocfs2_data_lock_with_page(inode, 1, page);
422 if (ret != 0) {
423 mlog_errno(ret);
424 goto out_unlock_meta;
425 }
426
427 handle = ocfs2_start_walk_page_trans(inode, page, from, to);
428 if (IS_ERR(handle)) {
429 ret = PTR_ERR(handle);
430 goto out_unlock_data;
431 }
432
433 /* Mark our buffer early. We'd rather catch this error up here
434 * as opposed to after a successful commit_write which would
435 * require us to set back inode->i_size. */
436 ret = ocfs2_journal_access(handle, inode, di_bh,
437 OCFS2_JOURNAL_ACCESS_WRITE);
438 if (ret < 0) {
439 mlog_errno(ret);
440 goto out_commit;
441 }
442
443 /* might update i_size */
444 ret = generic_commit_write(file, page, from, to);
445 if (ret < 0) {
446 mlog_errno(ret);
447 goto out_commit;
448 }
449
450 di = (struct ocfs2_dinode *)di_bh->b_data;
451
452 /* ocfs2_mark_inode_dirty() is too heavy to use here. */
453 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
454 di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
455 di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
456
457 inode->i_blocks = ocfs2_align_bytes_to_sectors((u64)(i_size_read(inode)));
458 di->i_size = cpu_to_le64((u64)i_size_read(inode));
459
460 ret = ocfs2_journal_dirty(handle, di_bh);
461 if (ret < 0) {
462 mlog_errno(ret);
463 goto out_commit;
464 }
465
466out_commit:
467 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
468out_unlock_data:
469 ocfs2_data_unlock(inode, 1);
470out_unlock_meta:
471 ocfs2_meta_unlock(inode, 1);
472out:
473 if (di_bh)
474 brelse(di_bh);
475
476 mlog_exit(ret);
477 return ret;
478}
479
480static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block) 380static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
481{ 381{
482 sector_t status; 382 sector_t status;
@@ -499,8 +399,7 @@ static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block)
499 down_read(&OCFS2_I(inode)->ip_alloc_sem); 399 down_read(&OCFS2_I(inode)->ip_alloc_sem);
500 } 400 }
501 401
502 err = ocfs2_extent_map_get_blocks(inode, block, 1, &p_blkno, 402 err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL, NULL);
503 NULL);
504 403
505 if (!INODE_JOURNAL(inode)) { 404 if (!INODE_JOURNAL(inode)) {
506 up_read(&OCFS2_I(inode)->ip_alloc_sem); 405 up_read(&OCFS2_I(inode)->ip_alloc_sem);
@@ -540,8 +439,8 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
540 struct buffer_head *bh_result, int create) 439 struct buffer_head *bh_result, int create)
541{ 440{
542 int ret; 441 int ret;
543 u64 p_blkno, inode_blocks; 442 u64 p_blkno, inode_blocks, contig_blocks;
544 int contig_blocks; 443 unsigned int ext_flags;
545 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; 444 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
546 unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits; 445 unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits;
547 446
@@ -549,33 +448,20 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
549 * nicely aligned and of the right size, so there's no need 448 * nicely aligned and of the right size, so there's no need
550 * for us to check any of that. */ 449 * for us to check any of that. */
551 450
552 spin_lock(&OCFS2_I(inode)->ip_lock); 451 inode_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
553 inode_blocks = ocfs2_clusters_to_blocks(inode->i_sb,
554 OCFS2_I(inode)->ip_clusters);
555
556 /*
557 * For a read which begins past the end of file, we return a hole.
558 */
559 if (!create && (iblock >= inode_blocks)) {
560 spin_unlock(&OCFS2_I(inode)->ip_lock);
561 ret = 0;
562 goto bail;
563 }
564 452
565 /* 453 /*
566 * Any write past EOF is not allowed because we'd be extending. 454 * Any write past EOF is not allowed because we'd be extending.
567 */ 455 */
568 if (create && (iblock + max_blocks) > inode_blocks) { 456 if (create && (iblock + max_blocks) > inode_blocks) {
569 spin_unlock(&OCFS2_I(inode)->ip_lock);
570 ret = -EIO; 457 ret = -EIO;
571 goto bail; 458 goto bail;
572 } 459 }
573 spin_unlock(&OCFS2_I(inode)->ip_lock);
574 460
575 /* This figures out the size of the next contiguous block, and 461 /* This figures out the size of the next contiguous block, and
576 * our logical offset */ 462 * our logical offset */
577 ret = ocfs2_extent_map_get_blocks(inode, iblock, 1, &p_blkno, 463 ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno,
578 &contig_blocks); 464 &contig_blocks, &ext_flags);
579 if (ret) { 465 if (ret) {
580 mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n", 466 mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n",
581 (unsigned long long)iblock); 467 (unsigned long long)iblock);
@@ -583,7 +469,37 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
583 goto bail; 469 goto bail;
584 } 470 }
585 471
586 map_bh(bh_result, inode->i_sb, p_blkno); 472 if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)) && !p_blkno) {
473 ocfs2_error(inode->i_sb,
474 "Inode %llu has a hole at block %llu\n",
475 (unsigned long long)OCFS2_I(inode)->ip_blkno,
476 (unsigned long long)iblock);
477 ret = -EROFS;
478 goto bail;
479 }
480
481 /*
482 * get_more_blocks() expects us to describe a hole by clearing
483 * the mapped bit on bh_result().
484 *
485 * Consider an unwritten extent as a hole.
486 */
487 if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN))
488 map_bh(bh_result, inode->i_sb, p_blkno);
489 else {
490 /*
491 * ocfs2_prepare_inode_for_write() should have caught
492 * the case where we'd be filling a hole and triggered
493 * a buffered write instead.
494 */
495 if (create) {
496 ret = -EIO;
497 mlog_errno(ret);
498 goto bail;
499 }
500
501 clear_buffer_mapped(bh_result);
502 }
587 503
588 /* make sure we don't map more than max_blocks blocks here as 504 /* make sure we don't map more than max_blocks blocks here as
589 that's all the kernel will handle at this point. */ 505 that's all the kernel will handle at this point. */
@@ -606,12 +522,17 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
606 void *private) 522 void *private)
607{ 523{
608 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; 524 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
525 int level;
609 526
610 /* this io's submitter should not have unlocked this before we could */ 527 /* this io's submitter should not have unlocked this before we could */
611 BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); 528 BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
529
612 ocfs2_iocb_clear_rw_locked(iocb); 530 ocfs2_iocb_clear_rw_locked(iocb);
613 up_read(&inode->i_alloc_sem); 531
614 ocfs2_rw_unlock(inode, 0); 532 level = ocfs2_iocb_rw_locked_level(iocb);
533 if (!level)
534 up_read(&inode->i_alloc_sem);
535 ocfs2_rw_unlock(inode, level);
615} 536}
616 537
617/* 538/*
@@ -647,23 +568,27 @@ static ssize_t ocfs2_direct_IO(int rw,
647 568
648 mlog_entry_void(); 569 mlog_entry_void();
649 570
650 /* 571 if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) {
651 * We get PR data locks even for O_DIRECT. This allows 572 /*
652 * concurrent O_DIRECT I/O but doesn't let O_DIRECT with 573 * We get PR data locks even for O_DIRECT. This
653 * extending and buffered zeroing writes race. If they did 574 * allows concurrent O_DIRECT I/O but doesn't let
654 * race then the buffered zeroing could be written back after 575 * O_DIRECT with extending and buffered zeroing writes
655 * the O_DIRECT I/O. It's one thing to tell people not to mix 576 * race. If they did race then the buffered zeroing
656 * buffered and O_DIRECT writes, but expecting them to 577 * could be written back after the O_DIRECT I/O. It's
657 * understand that file extension is also an implicit buffered 578 * one thing to tell people not to mix buffered and
658 * write is too much. By getting the PR we force writeback of 579 * O_DIRECT writes, but expecting them to understand
659 * the buffered zeroing before proceeding. 580 * that file extension is also an implicit buffered
660 */ 581 * write is too much. By getting the PR we force
661 ret = ocfs2_data_lock(inode, 0); 582 * writeback of the buffered zeroing before
662 if (ret < 0) { 583 * proceeding.
663 mlog_errno(ret); 584 */
664 goto out; 585 ret = ocfs2_data_lock(inode, 0);
586 if (ret < 0) {
587 mlog_errno(ret);
588 goto out;
589 }
590 ocfs2_data_unlock(inode, 0);
665 } 591 }
666 ocfs2_data_unlock(inode, 0);
667 592
668 ret = blockdev_direct_IO_no_locking(rw, iocb, inode, 593 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
669 inode->i_sb->s_bdev, iov, offset, 594 inode->i_sb->s_bdev, iov, offset,
@@ -675,11 +600,715 @@ out:
675 return ret; 600 return ret;
676} 601}
677 602
603static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
604 u32 cpos,
605 unsigned int *start,
606 unsigned int *end)
607{
608 unsigned int cluster_start = 0, cluster_end = PAGE_CACHE_SIZE;
609
610 if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) {
611 unsigned int cpp;
612
613 cpp = 1 << (PAGE_CACHE_SHIFT - osb->s_clustersize_bits);
614
615 cluster_start = cpos % cpp;
616 cluster_start = cluster_start << osb->s_clustersize_bits;
617
618 cluster_end = cluster_start + osb->s_clustersize;
619 }
620
621 BUG_ON(cluster_start > PAGE_SIZE);
622 BUG_ON(cluster_end > PAGE_SIZE);
623
624 if (start)
625 *start = cluster_start;
626 if (end)
627 *end = cluster_end;
628}
629
630/*
631 * 'from' and 'to' are the region in the page to avoid zeroing.
632 *
633 * If pagesize > clustersize, this function will avoid zeroing outside
634 * of the cluster boundary.
635 *
636 * from == to == 0 is code for "zero the entire cluster region"
637 */
638static void ocfs2_clear_page_regions(struct page *page,
639 struct ocfs2_super *osb, u32 cpos,
640 unsigned from, unsigned to)
641{
642 void *kaddr;
643 unsigned int cluster_start, cluster_end;
644
645 ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end);
646
647 kaddr = kmap_atomic(page, KM_USER0);
648
649 if (from || to) {
650 if (from > cluster_start)
651 memset(kaddr + cluster_start, 0, from - cluster_start);
652 if (to < cluster_end)
653 memset(kaddr + to, 0, cluster_end - to);
654 } else {
655 memset(kaddr + cluster_start, 0, cluster_end - cluster_start);
656 }
657
658 kunmap_atomic(kaddr, KM_USER0);
659}
660
661/*
662 * Some of this taken from block_prepare_write(). We already have our
663 * mapping by now though, and the entire write will be allocating or
664 * it won't, so not much need to use BH_New.
665 *
666 * This will also skip zeroing, which is handled externally.
667 */
668int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
669 struct inode *inode, unsigned int from,
670 unsigned int to, int new)
671{
672 int ret = 0;
673 struct buffer_head *head, *bh, *wait[2], **wait_bh = wait;
674 unsigned int block_end, block_start;
675 unsigned int bsize = 1 << inode->i_blkbits;
676
677 if (!page_has_buffers(page))
678 create_empty_buffers(page, bsize, 0);
679
680 head = page_buffers(page);
681 for (bh = head, block_start = 0; bh != head || !block_start;
682 bh = bh->b_this_page, block_start += bsize) {
683 block_end = block_start + bsize;
684
685 /*
686 * Ignore blocks outside of our i/o range -
687 * they may belong to unallocated clusters.
688 */
689 if (block_start >= to || block_end <= from) {
690 if (PageUptodate(page))
691 set_buffer_uptodate(bh);
692 continue;
693 }
694
695 /*
696 * For an allocating write with cluster size >= page
697 * size, we always write the entire page.
698 */
699
700 if (buffer_new(bh))
701 clear_buffer_new(bh);
702
703 if (!buffer_mapped(bh)) {
704 map_bh(bh, inode->i_sb, *p_blkno);
705 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
706 }
707
708 if (PageUptodate(page)) {
709 if (!buffer_uptodate(bh))
710 set_buffer_uptodate(bh);
711 } else if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
712 (block_start < from || block_end > to)) {
713 ll_rw_block(READ, 1, &bh);
714 *wait_bh++=bh;
715 }
716
717 *p_blkno = *p_blkno + 1;
718 }
719
720 /*
721 * If we issued read requests - let them complete.
722 */
723 while(wait_bh > wait) {
724 wait_on_buffer(*--wait_bh);
725 if (!buffer_uptodate(*wait_bh))
726 ret = -EIO;
727 }
728
729 if (ret == 0 || !new)
730 return ret;
731
732 /*
733 * If we get -EIO above, zero out any newly allocated blocks
734 * to avoid exposing stale data.
735 */
736 bh = head;
737 block_start = 0;
738 do {
739 void *kaddr;
740
741 block_end = block_start + bsize;
742 if (block_end <= from)
743 goto next_bh;
744 if (block_start >= to)
745 break;
746
747 kaddr = kmap_atomic(page, KM_USER0);
748 memset(kaddr+block_start, 0, bh->b_size);
749 flush_dcache_page(page);
750 kunmap_atomic(kaddr, KM_USER0);
751 set_buffer_uptodate(bh);
752 mark_buffer_dirty(bh);
753
754next_bh:
755 block_start = block_end;
756 bh = bh->b_this_page;
757 } while (bh != head);
758
759 return ret;
760}
761
762/*
763 * This will copy user data from the buffer page in the splice
764 * context.
765 *
766 * For now, we ignore SPLICE_F_MOVE as that would require some extra
767 * communication out all the way to ocfs2_write().
768 */
769int ocfs2_map_and_write_splice_data(struct inode *inode,
770 struct ocfs2_write_ctxt *wc, u64 *p_blkno,
771 unsigned int *ret_from, unsigned int *ret_to)
772{
773 int ret;
774 unsigned int to, from, cluster_start, cluster_end;
775 char *src, *dst;
776 struct ocfs2_splice_write_priv *sp = wc->w_private;
777 struct pipe_buffer *buf = sp->s_buf;
778 unsigned long bytes, src_from;
779 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
780
781 ocfs2_figure_cluster_boundaries(osb, wc->w_cpos, &cluster_start,
782 &cluster_end);
783
784 from = sp->s_offset;
785 src_from = sp->s_buf_offset;
786 bytes = wc->w_count;
787
788 if (wc->w_large_pages) {
789 /*
790 * For cluster size < page size, we have to
791 * calculate pos within the cluster and obey
792 * the rightmost boundary.
793 */
794 bytes = min(bytes, (unsigned long)(osb->s_clustersize
795 - (wc->w_pos & (osb->s_clustersize - 1))));
796 }
797 to = from + bytes;
798
799 if (wc->w_this_page_new)
800 ret = ocfs2_map_page_blocks(wc->w_this_page, p_blkno, inode,
801 cluster_start, cluster_end, 1);
802 else
803 ret = ocfs2_map_page_blocks(wc->w_this_page, p_blkno, inode,
804 from, to, 0);
805 if (ret) {
806 mlog_errno(ret);
807 goto out;
808 }
809
810 BUG_ON(from > PAGE_CACHE_SIZE);
811 BUG_ON(to > PAGE_CACHE_SIZE);
812 BUG_ON(from > osb->s_clustersize);
813 BUG_ON(to > osb->s_clustersize);
814
815 src = buf->ops->map(sp->s_pipe, buf, 1);
816 dst = kmap_atomic(wc->w_this_page, KM_USER1);
817 memcpy(dst + from, src + src_from, bytes);
818 kunmap_atomic(wc->w_this_page, KM_USER1);
819 buf->ops->unmap(sp->s_pipe, buf, src);
820
821 wc->w_finished_copy = 1;
822
823 *ret_from = from;
824 *ret_to = to;
825out:
826
827 return bytes ? (unsigned int)bytes : ret;
828}
829
830/*
831 * This will copy user data from the iovec in the buffered write
832 * context.
833 */
834int ocfs2_map_and_write_user_data(struct inode *inode,
835 struct ocfs2_write_ctxt *wc, u64 *p_blkno,
836 unsigned int *ret_from, unsigned int *ret_to)
837{
838 int ret;
839 unsigned int to, from, cluster_start, cluster_end;
840 unsigned long bytes, src_from;
841 char *dst;
842 struct ocfs2_buffered_write_priv *bp = wc->w_private;
843 const struct iovec *cur_iov = bp->b_cur_iov;
844 char __user *buf;
845 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
846
847 ocfs2_figure_cluster_boundaries(osb, wc->w_cpos, &cluster_start,
848 &cluster_end);
849
850 buf = cur_iov->iov_base + bp->b_cur_off;
851 src_from = (unsigned long)buf & ~PAGE_CACHE_MASK;
852
853 from = wc->w_pos & (PAGE_CACHE_SIZE - 1);
854
855 /*
856 * This is a lot of comparisons, but it reads quite
857 * easily, which is important here.
858 */
859 /* Stay within the src page */
860 bytes = PAGE_SIZE - src_from;
861 /* Stay within the vector */
862 bytes = min(bytes,
863 (unsigned long)(cur_iov->iov_len - bp->b_cur_off));
864 /* Stay within count */
865 bytes = min(bytes, (unsigned long)wc->w_count);
866 /*
867 * For clustersize > page size, just stay within
868 * target page, otherwise we have to calculate pos
869 * within the cluster and obey the rightmost
870 * boundary.
871 */
872 if (wc->w_large_pages) {
873 /*
874 * For cluster size < page size, we have to
875 * calculate pos within the cluster and obey
876 * the rightmost boundary.
877 */
878 bytes = min(bytes, (unsigned long)(osb->s_clustersize
879 - (wc->w_pos & (osb->s_clustersize - 1))));
880 } else {
881 /*
882 * cluster size > page size is the most common
883 * case - we just stay within the target page
884 * boundary.
885 */
886 bytes = min(bytes, PAGE_CACHE_SIZE - from);
887 }
888
889 to = from + bytes;
890
891 if (wc->w_this_page_new)
892 ret = ocfs2_map_page_blocks(wc->w_this_page, p_blkno, inode,
893 cluster_start, cluster_end, 1);
894 else
895 ret = ocfs2_map_page_blocks(wc->w_this_page, p_blkno, inode,
896 from, to, 0);
897 if (ret) {
898 mlog_errno(ret);
899 goto out;
900 }
901
902 BUG_ON(from > PAGE_CACHE_SIZE);
903 BUG_ON(to > PAGE_CACHE_SIZE);
904 BUG_ON(from > osb->s_clustersize);
905 BUG_ON(to > osb->s_clustersize);
906
907 dst = kmap(wc->w_this_page);
908 memcpy(dst + from, bp->b_src_buf + src_from, bytes);
909 kunmap(wc->w_this_page);
910
911 /*
912 * XXX: This is slow, but simple. The caller of
913 * ocfs2_buffered_write_cluster() is responsible for
914 * passing through the iovecs, so it's difficult to
915 * predict what our next step is in here after our
916 * initial write. A future version should be pushing
917 * that iovec manipulation further down.
918 *
919 * By setting this, we indicate that a copy from user
920 * data was done, and subsequent calls for this
921 * cluster will skip copying more data.
922 */
923 wc->w_finished_copy = 1;
924
925 *ret_from = from;
926 *ret_to = to;
927out:
928
929 return bytes ? (unsigned int)bytes : ret;
930}
931
932/*
933 * Map, fill and write a page to disk.
934 *
935 * The work of copying data is done via callback. Newly allocated
936 * pages which don't take user data will be zero'd (set 'new' to
937 * indicate an allocating write)
938 *
939 * Returns a negative error code or the number of bytes copied into
940 * the page.
941 */
942int ocfs2_write_data_page(struct inode *inode, handle_t *handle,
943 u64 *p_blkno, struct page *page,
944 struct ocfs2_write_ctxt *wc, int new)
945{
946 int ret, copied = 0;
947 unsigned int from = 0, to = 0;
948 unsigned int cluster_start, cluster_end;
949 unsigned int zero_from = 0, zero_to = 0;
950
951 ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), wc->w_cpos,
952 &cluster_start, &cluster_end);
953
954 if ((wc->w_pos >> PAGE_CACHE_SHIFT) == page->index
955 && !wc->w_finished_copy) {
956
957 wc->w_this_page = page;
958 wc->w_this_page_new = new;
959 ret = wc->w_write_data_page(inode, wc, p_blkno, &from, &to);
960 if (ret < 0) {
961 mlog_errno(ret);
962 goto out;
963 }
964
965 copied = ret;
966
967 zero_from = from;
968 zero_to = to;
969 if (new) {
970 from = cluster_start;
971 to = cluster_end;
972 }
973 } else {
974 /*
975 * If we haven't allocated the new page yet, we
976 * shouldn't be writing it out without copying user
977 * data. This is likely a math error from the caller.
978 */
979 BUG_ON(!new);
980
981 from = cluster_start;
982 to = cluster_end;
983
984 ret = ocfs2_map_page_blocks(page, p_blkno, inode,
985 cluster_start, cluster_end, 1);
986 if (ret) {
987 mlog_errno(ret);
988 goto out;
989 }
990 }
991
992 /*
993 * Parts of newly allocated pages need to be zero'd.
994 *
995 * Above, we have also rewritten 'to' and 'from' - as far as
996 * the rest of the function is concerned, the entire cluster
997 * range inside of a page needs to be written.
998 *
999 * We can skip this if the page is up to date - it's already
1000 * been zero'd from being read in as a hole.
1001 */
1002 if (new && !PageUptodate(page))
1003 ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb),
1004 wc->w_cpos, zero_from, zero_to);
1005
1006 flush_dcache_page(page);
1007
1008 if (ocfs2_should_order_data(inode)) {
1009 ret = walk_page_buffers(handle,
1010 page_buffers(page),
1011 from, to, NULL,
1012 ocfs2_journal_dirty_data);
1013 if (ret < 0)
1014 mlog_errno(ret);
1015 }
1016
1017 /*
1018 * We don't use generic_commit_write() because we need to
1019 * handle our own i_size update.
1020 */
1021 ret = block_commit_write(page, from, to);
1022 if (ret)
1023 mlog_errno(ret);
1024out:
1025
1026 return copied ? copied : ret;
1027}
1028
1029/*
1030 * Do the actual write of some data into an inode. Optionally allocate
1031 * in order to fulfill the write.
1032 *
1033 * cpos is the logical cluster offset within the file to write at
1034 *
1035 * 'phys' is the physical mapping of that offset. a 'phys' value of
1036 * zero indicates that allocation is required. In this case, data_ac
1037 * and meta_ac should be valid (meta_ac can be null if metadata
1038 * allocation isn't required).
1039 */
1040static ssize_t ocfs2_write(struct file *file, u32 phys, handle_t *handle,
1041 struct buffer_head *di_bh,
1042 struct ocfs2_alloc_context *data_ac,
1043 struct ocfs2_alloc_context *meta_ac,
1044 struct ocfs2_write_ctxt *wc)
1045{
1046 int ret, i, numpages = 1, new;
1047 unsigned int copied = 0;
1048 u32 tmp_pos;
1049 u64 v_blkno, p_blkno;
1050 struct address_space *mapping = file->f_mapping;
1051 struct inode *inode = mapping->host;
1052 unsigned long index, start;
1053 struct page **cpages;
1054
1055 new = phys == 0 ? 1 : 0;
1056
1057 /*
1058 * Figure out how many pages we'll be manipulating here. For
1059 * non allocating write, we just change the one
1060 * page. Otherwise, we'll need a whole clusters worth.
1061 */
1062 if (new)
1063 numpages = ocfs2_pages_per_cluster(inode->i_sb);
1064
1065 cpages = kzalloc(sizeof(*cpages) * numpages, GFP_NOFS);
1066 if (!cpages) {
1067 ret = -ENOMEM;
1068 mlog_errno(ret);
1069 return ret;
1070 }
1071
1072 /*
1073 * Fill our page array first. That way we've grabbed enough so
1074 * that we can zero and flush if we error after adding the
1075 * extent.
1076 */
1077 if (new) {
1078 start = ocfs2_align_clusters_to_page_index(inode->i_sb,
1079 wc->w_cpos);
1080 v_blkno = ocfs2_clusters_to_blocks(inode->i_sb, wc->w_cpos);
1081 } else {
1082 start = wc->w_pos >> PAGE_CACHE_SHIFT;
1083 v_blkno = wc->w_pos >> inode->i_sb->s_blocksize_bits;
1084 }
1085
1086 for(i = 0; i < numpages; i++) {
1087 index = start + i;
1088
1089 cpages[i] = grab_cache_page(mapping, index);
1090 if (!cpages[i]) {
1091 ret = -ENOMEM;
1092 mlog_errno(ret);
1093 goto out;
1094 }
1095 }
1096
1097 if (new) {
1098 /*
1099 * This is safe to call with the page locks - it won't take
1100 * any additional semaphores or cluster locks.
1101 */
1102 tmp_pos = wc->w_cpos;
1103 ret = ocfs2_do_extend_allocation(OCFS2_SB(inode->i_sb), inode,
1104 &tmp_pos, 1, di_bh, handle,
1105 data_ac, meta_ac, NULL);
1106 /*
1107 * This shouldn't happen because we must have already
1108 * calculated the correct meta data allocation required. The
1109 * internal tree allocation code should know how to increase
1110 * transaction credits itself.
1111 *
1112 * If need be, we could handle -EAGAIN for a
1113 * RESTART_TRANS here.
1114 */
1115 mlog_bug_on_msg(ret == -EAGAIN,
1116 "Inode %llu: EAGAIN return during allocation.\n",
1117 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1118 if (ret < 0) {
1119 mlog_errno(ret);
1120 goto out;
1121 }
1122 }
1123
1124 ret = ocfs2_extent_map_get_blocks(inode, v_blkno, &p_blkno, NULL,
1125 NULL);
1126 if (ret < 0) {
1127
1128 /*
1129 * XXX: Should we go readonly here?
1130 */
1131
1132 mlog_errno(ret);
1133 goto out;
1134 }
1135
1136 BUG_ON(p_blkno == 0);
1137
1138 for(i = 0; i < numpages; i++) {
1139 ret = ocfs2_write_data_page(inode, handle, &p_blkno, cpages[i],
1140 wc, new);
1141 if (ret < 0) {
1142 mlog_errno(ret);
1143 goto out;
1144 }
1145
1146 copied += ret;
1147 }
1148
1149out:
1150 for(i = 0; i < numpages; i++) {
1151 unlock_page(cpages[i]);
1152 mark_page_accessed(cpages[i]);
1153 page_cache_release(cpages[i]);
1154 }
1155 kfree(cpages);
1156
1157 return copied ? copied : ret;
1158}
1159
1160static void ocfs2_write_ctxt_init(struct ocfs2_write_ctxt *wc,
1161 struct ocfs2_super *osb, loff_t pos,
1162 size_t count, ocfs2_page_writer *cb,
1163 void *cb_priv)
1164{
1165 wc->w_count = count;
1166 wc->w_pos = pos;
1167 wc->w_cpos = wc->w_pos >> osb->s_clustersize_bits;
1168 wc->w_finished_copy = 0;
1169
1170 if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits))
1171 wc->w_large_pages = 1;
1172 else
1173 wc->w_large_pages = 0;
1174
1175 wc->w_write_data_page = cb;
1176 wc->w_private = cb_priv;
1177}
1178
1179/*
1180 * Write a cluster to an inode. The cluster may not be allocated yet,
1181 * in which case it will be. This only exists for buffered writes -
1182 * O_DIRECT takes a more "traditional" path through the kernel.
1183 *
1184 * The caller is responsible for incrementing pos, written counts, etc
1185 *
1186 * For file systems that don't support sparse files, pre-allocation
1187 * and page zeroing up until cpos should be done prior to this
1188 * function call.
1189 *
1190 * Callers should be holding i_sem, and the rw cluster lock.
1191 *
1192 * Returns the number of user bytes written, or less than zero for
1193 * error.
1194 */
1195ssize_t ocfs2_buffered_write_cluster(struct file *file, loff_t pos,
1196 size_t count, ocfs2_page_writer *actor,
1197 void *priv)
1198{
1199 int ret, credits = OCFS2_INODE_UPDATE_CREDITS;
1200 ssize_t written = 0;
1201 u32 phys;
1202 struct inode *inode = file->f_mapping->host;
1203 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1204 struct buffer_head *di_bh = NULL;
1205 struct ocfs2_dinode *di;
1206 struct ocfs2_alloc_context *data_ac = NULL;
1207 struct ocfs2_alloc_context *meta_ac = NULL;
1208 handle_t *handle;
1209 struct ocfs2_write_ctxt wc;
1210
1211 ocfs2_write_ctxt_init(&wc, osb, pos, count, actor, priv);
1212
1213 ret = ocfs2_meta_lock(inode, &di_bh, 1);
1214 if (ret) {
1215 mlog_errno(ret);
1216 goto out;
1217 }
1218 di = (struct ocfs2_dinode *)di_bh->b_data;
1219
1220 /*
1221 * Take alloc sem here to prevent concurrent lookups. That way
1222 * the mapping, zeroing and tree manipulation within
1223 * ocfs2_write() will be safe against ->readpage(). This
1224 * should also serve to lock out allocation from a shared
1225 * writeable region.
1226 */
1227 down_write(&OCFS2_I(inode)->ip_alloc_sem);
1228
1229 ret = ocfs2_get_clusters(inode, wc.w_cpos, &phys, NULL, NULL);
1230 if (ret) {
1231 mlog_errno(ret);
1232 goto out_meta;
1233 }
1234
1235 /* phys == 0 means that allocation is required. */
1236 if (phys == 0) {
1237 ret = ocfs2_lock_allocators(inode, di, 1, &data_ac, &meta_ac);
1238 if (ret) {
1239 mlog_errno(ret);
1240 goto out_meta;
1241 }
1242
1243 credits = ocfs2_calc_extend_credits(inode->i_sb, di, 1);
1244 }
1245
1246 ret = ocfs2_data_lock(inode, 1);
1247 if (ret) {
1248 mlog_errno(ret);
1249 goto out_meta;
1250 }
1251
1252 handle = ocfs2_start_trans(osb, credits);
1253 if (IS_ERR(handle)) {
1254 ret = PTR_ERR(handle);
1255 mlog_errno(ret);
1256 goto out_data;
1257 }
1258
1259 written = ocfs2_write(file, phys, handle, di_bh, data_ac,
1260 meta_ac, &wc);
1261 if (written < 0) {
1262 ret = written;
1263 mlog_errno(ret);
1264 goto out_commit;
1265 }
1266
1267 ret = ocfs2_journal_access(handle, inode, di_bh,
1268 OCFS2_JOURNAL_ACCESS_WRITE);
1269 if (ret) {
1270 mlog_errno(ret);
1271 goto out_commit;
1272 }
1273
1274 pos += written;
1275 if (pos > inode->i_size) {
1276 i_size_write(inode, pos);
1277 mark_inode_dirty(inode);
1278 }
1279 inode->i_blocks = ocfs2_inode_sector_count(inode);
1280 di->i_size = cpu_to_le64((u64)i_size_read(inode));
1281 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1282 di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
1283 di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
1284
1285 ret = ocfs2_journal_dirty(handle, di_bh);
1286 if (ret)
1287 mlog_errno(ret);
1288
1289out_commit:
1290 ocfs2_commit_trans(osb, handle);
1291
1292out_data:
1293 ocfs2_data_unlock(inode, 1);
1294
1295out_meta:
1296 up_write(&OCFS2_I(inode)->ip_alloc_sem);
1297 ocfs2_meta_unlock(inode, 1);
1298
1299out:
1300 brelse(di_bh);
1301 if (data_ac)
1302 ocfs2_free_alloc_context(data_ac);
1303 if (meta_ac)
1304 ocfs2_free_alloc_context(meta_ac);
1305
1306 return written ? written : ret;
1307}
1308
678const struct address_space_operations ocfs2_aops = { 1309const struct address_space_operations ocfs2_aops = {
679 .readpage = ocfs2_readpage, 1310 .readpage = ocfs2_readpage,
680 .writepage = ocfs2_writepage, 1311 .writepage = ocfs2_writepage,
681 .prepare_write = ocfs2_prepare_write,
682 .commit_write = ocfs2_commit_write,
683 .bmap = ocfs2_bmap, 1312 .bmap = ocfs2_bmap,
684 .sync_page = block_sync_page, 1313 .sync_page = block_sync_page,
685 .direct_IO = ocfs2_direct_IO, 1314 .direct_IO = ocfs2_direct_IO,
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
index f446a15eab88..45821d479b5a 100644
--- a/fs/ocfs2/aops.h
+++ b/fs/ocfs2/aops.h
@@ -30,12 +30,83 @@ handle_t *ocfs2_start_walk_page_trans(struct inode *inode,
30 unsigned from, 30 unsigned from,
31 unsigned to); 31 unsigned to);
32 32
33int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
34 struct inode *inode, unsigned int from,
35 unsigned int to, int new);
36
37int walk_page_buffers( handle_t *handle,
38 struct buffer_head *head,
39 unsigned from,
40 unsigned to,
41 int *partial,
42 int (*fn)( handle_t *handle,
43 struct buffer_head *bh));
44
45struct ocfs2_write_ctxt;
46typedef int (ocfs2_page_writer)(struct inode *, struct ocfs2_write_ctxt *,
47 u64 *, unsigned int *, unsigned int *);
48
49ssize_t ocfs2_buffered_write_cluster(struct file *file, loff_t pos,
50 size_t count, ocfs2_page_writer *actor,
51 void *priv);
52
53struct ocfs2_write_ctxt {
54 size_t w_count;
55 loff_t w_pos;
56 u32 w_cpos;
57 unsigned int w_finished_copy;
58
59 /* This is true if page_size > cluster_size */
60 unsigned int w_large_pages;
61
62 /* Filler callback and private data */
63 ocfs2_page_writer *w_write_data_page;
64 void *w_private;
65
66 /* Only valid for the filler callback */
67 struct page *w_this_page;
68 unsigned int w_this_page_new;
69};
70
71struct ocfs2_buffered_write_priv {
72 char *b_src_buf;
73 const struct iovec *b_cur_iov; /* Current iovec */
74 size_t b_cur_off; /* Offset in the
75 * current iovec */
76};
77int ocfs2_map_and_write_user_data(struct inode *inode,
78 struct ocfs2_write_ctxt *wc,
79 u64 *p_blkno,
80 unsigned int *ret_from,
81 unsigned int *ret_to);
82
83struct ocfs2_splice_write_priv {
84 struct splice_desc *s_sd;
85 struct pipe_buffer *s_buf;
86 struct pipe_inode_info *s_pipe;
87 /* Neither offset value is ever larger than one page */
88 unsigned int s_offset;
89 unsigned int s_buf_offset;
90};
91int ocfs2_map_and_write_splice_data(struct inode *inode,
92 struct ocfs2_write_ctxt *wc,
93 u64 *p_blkno,
94 unsigned int *ret_from,
95 unsigned int *ret_to);
96
33/* all ocfs2_dio_end_io()'s fault */ 97/* all ocfs2_dio_end_io()'s fault */
34#define ocfs2_iocb_is_rw_locked(iocb) \ 98#define ocfs2_iocb_is_rw_locked(iocb) \
35 test_bit(0, (unsigned long *)&iocb->private) 99 test_bit(0, (unsigned long *)&iocb->private)
36#define ocfs2_iocb_set_rw_locked(iocb) \ 100static inline void ocfs2_iocb_set_rw_locked(struct kiocb *iocb, int level)
37 set_bit(0, (unsigned long *)&iocb->private) 101{
102 set_bit(0, (unsigned long *)&iocb->private);
103 if (level)
104 set_bit(1, (unsigned long *)&iocb->private);
105 else
106 clear_bit(1, (unsigned long *)&iocb->private);
107}
38#define ocfs2_iocb_clear_rw_locked(iocb) \ 108#define ocfs2_iocb_clear_rw_locked(iocb) \
39 clear_bit(0, (unsigned long *)&iocb->private) 109 clear_bit(0, (unsigned long *)&iocb->private)
40 110#define ocfs2_iocb_rw_locked_level(iocb) \
111 test_bit(1, (unsigned long *)&iocb->private)
41#endif /* OCFS2_FILE_H */ 112#endif /* OCFS2_FILE_H */
diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c
index 4705d659fe57..bbacf7da48a4 100644
--- a/fs/ocfs2/cluster/quorum.c
+++ b/fs/ocfs2/cluster/quorum.c
@@ -46,6 +46,7 @@
46#include <linux/kernel.h> 46#include <linux/kernel.h>
47#include <linux/slab.h> 47#include <linux/slab.h>
48#include <linux/workqueue.h> 48#include <linux/workqueue.h>
49#include <linux/reboot.h>
49 50
50#include "heartbeat.h" 51#include "heartbeat.h"
51#include "nodemanager.h" 52#include "nodemanager.h"
@@ -72,7 +73,9 @@ static void o2quo_fence_self(void)
72 /* panic spins with interrupts enabled. with preempt 73 /* panic spins with interrupts enabled. with preempt
73 * threads can still schedule, etc, etc */ 74 * threads can still schedule, etc, etc */
74 o2hb_stop_all_regions(); 75 o2hb_stop_all_regions();
75 panic("ocfs2 is very sorry to be fencing this system by panicing\n"); 76
77 printk("ocfs2 is very sorry to be fencing this system by restarting\n");
78 emergency_restart();
76} 79}
77 80
78/* Indicate that a timeout occured on a hearbeat region write. The 81/* Indicate that a timeout occured on a hearbeat region write. The
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index 4dae5df5e467..9606111fe89d 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -38,6 +38,9 @@
38 * locking semantics of the file system using the protocol. It should 38 * locking semantics of the file system using the protocol. It should
39 * be somewhere else, I'm sure, but right now it isn't. 39 * be somewhere else, I'm sure, but right now it isn't.
40 * 40 *
41 * New in version 8:
42 * - Replace delete inode votes with a cluster lock
43 *
41 * New in version 7: 44 * New in version 7:
42 * - DLM join domain includes the live nodemap 45 * - DLM join domain includes the live nodemap
43 * 46 *
@@ -57,7 +60,7 @@
57 * - full 64 bit i_size in the metadata lock lvbs 60 * - full 64 bit i_size in the metadata lock lvbs
58 * - introduction of "rw" lock and pushing meta/data locking down 61 * - introduction of "rw" lock and pushing meta/data locking down
59 */ 62 */
60#define O2NET_PROTOCOL_VERSION 7ULL 63#define O2NET_PROTOCOL_VERSION 8ULL
61struct o2net_handshake { 64struct o2net_handshake {
62 __be64 protocol_version; 65 __be64 protocol_version;
63 __be64 connector_id; 66 __be64 connector_id;
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 66821e178167..67e6866a2a4f 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -358,15 +358,17 @@ int ocfs2_do_extend_dir(struct super_block *sb,
358{ 358{
359 int status; 359 int status;
360 int extend; 360 int extend;
361 u64 p_blkno; 361 u64 p_blkno, v_blkno;
362 362
363 spin_lock(&OCFS2_I(dir)->ip_lock); 363 spin_lock(&OCFS2_I(dir)->ip_lock);
364 extend = (i_size_read(dir) == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters)); 364 extend = (i_size_read(dir) == ocfs2_clusters_to_bytes(sb, OCFS2_I(dir)->ip_clusters));
365 spin_unlock(&OCFS2_I(dir)->ip_lock); 365 spin_unlock(&OCFS2_I(dir)->ip_lock);
366 366
367 if (extend) { 367 if (extend) {
368 status = ocfs2_do_extend_allocation(OCFS2_SB(sb), dir, 1, 368 u32 offset = OCFS2_I(dir)->ip_clusters;
369 parent_fe_bh, handle, 369
370 status = ocfs2_do_extend_allocation(OCFS2_SB(sb), dir, &offset,
371 1, parent_fe_bh, handle,
370 data_ac, meta_ac, NULL); 372 data_ac, meta_ac, NULL);
371 BUG_ON(status == -EAGAIN); 373 BUG_ON(status == -EAGAIN);
372 if (status < 0) { 374 if (status < 0) {
@@ -375,9 +377,8 @@ int ocfs2_do_extend_dir(struct super_block *sb,
375 } 377 }
376 } 378 }
377 379
378 status = ocfs2_extent_map_get_blocks(dir, (dir->i_blocks >> 380 v_blkno = ocfs2_blocks_for_bytes(sb, i_size_read(dir));
379 (sb->s_blocksize_bits - 9)), 381 status = ocfs2_extent_map_get_blocks(dir, v_blkno, &p_blkno, NULL, NULL);
380 1, &p_blkno, NULL);
381 if (status < 0) { 382 if (status < 0) {
382 mlog_errno(status); 383 mlog_errno(status);
383 goto bail; 384 goto bail;
@@ -486,7 +487,7 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb,
486 487
487 dir_i_size += dir->i_sb->s_blocksize; 488 dir_i_size += dir->i_sb->s_blocksize;
488 i_size_write(dir, dir_i_size); 489 i_size_write(dir, dir_i_size);
489 dir->i_blocks = ocfs2_align_bytes_to_sectors(dir_i_size); 490 dir->i_blocks = ocfs2_inode_sector_count(dir);
490 status = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh); 491 status = ocfs2_mark_inode_dirty(handle, dir, parent_fe_bh);
491 if (status < 0) { 492 if (status < 0) {
492 mlog_errno(status); 493 mlog_errno(status);
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index c558442a0b44..d836b98dd99a 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -430,11 +430,10 @@ redo_bucket:
430 430
431 dlm_lockres_put(res); 431 dlm_lockres_put(res);
432 432
433 cond_resched_lock(&dlm->spinlock);
434
435 if (dropped) 433 if (dropped)
436 goto redo_bucket; 434 goto redo_bucket;
437 } 435 }
436 cond_resched_lock(&dlm->spinlock);
438 num += n; 437 num += n;
439 mlog(0, "%s: touched %d lockreses in bucket %d " 438 mlog(0, "%s: touched %d lockreses in bucket %d "
440 "(tot=%d)\n", dlm->name, n, i, num); 439 "(tot=%d)\n", dlm->name, n, i, num);
@@ -1035,7 +1034,7 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
1035{ 1034{
1036 int status = 0, tmpstat, node; 1035 int status = 0, tmpstat, node;
1037 struct domain_join_ctxt *ctxt; 1036 struct domain_join_ctxt *ctxt;
1038 enum dlm_query_join_response response; 1037 enum dlm_query_join_response response = JOIN_DISALLOW;
1039 1038
1040 mlog_entry("%p", dlm); 1039 mlog_entry("%p", dlm);
1041 1040
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 6d4a83d50152..c1807a42c49f 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -611,6 +611,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
611 } 611 }
612 } while (status != 0); 612 } while (status != 0);
613 613
614 spin_lock(&dlm_reco_state_lock);
614 switch (ndata->state) { 615 switch (ndata->state) {
615 case DLM_RECO_NODE_DATA_INIT: 616 case DLM_RECO_NODE_DATA_INIT:
616 case DLM_RECO_NODE_DATA_FINALIZE_SENT: 617 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
@@ -641,6 +642,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
641 ndata->node_num, dead_node); 642 ndata->node_num, dead_node);
642 break; 643 break;
643 } 644 }
645 spin_unlock(&dlm_reco_state_lock);
644 } 646 }
645 647
646 mlog(0, "done requesting all lock info\n"); 648 mlog(0, "done requesting all lock info\n");
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index e335541727f9..27e43b0c0eae 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -225,11 +225,17 @@ static struct ocfs2_lock_res_ops ocfs2_dentry_lops = {
225 .flags = 0, 225 .flags = 0,
226}; 226};
227 227
228static struct ocfs2_lock_res_ops ocfs2_inode_open_lops = {
229 .get_osb = ocfs2_get_inode_osb,
230 .flags = 0,
231};
232
228static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres) 233static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
229{ 234{
230 return lockres->l_type == OCFS2_LOCK_TYPE_META || 235 return lockres->l_type == OCFS2_LOCK_TYPE_META ||
231 lockres->l_type == OCFS2_LOCK_TYPE_DATA || 236 lockres->l_type == OCFS2_LOCK_TYPE_DATA ||
232 lockres->l_type == OCFS2_LOCK_TYPE_RW; 237 lockres->l_type == OCFS2_LOCK_TYPE_RW ||
238 lockres->l_type == OCFS2_LOCK_TYPE_OPEN;
233} 239}
234 240
235static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres) 241static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
@@ -373,6 +379,9 @@ void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
373 case OCFS2_LOCK_TYPE_DATA: 379 case OCFS2_LOCK_TYPE_DATA:
374 ops = &ocfs2_inode_data_lops; 380 ops = &ocfs2_inode_data_lops;
375 break; 381 break;
382 case OCFS2_LOCK_TYPE_OPEN:
383 ops = &ocfs2_inode_open_lops;
384 break;
376 default: 385 default:
377 mlog_bug_on_msg(1, "type: %d\n", type); 386 mlog_bug_on_msg(1, "type: %d\n", type);
378 ops = NULL; /* thanks, gcc */ 387 ops = NULL; /* thanks, gcc */
@@ -1129,6 +1138,12 @@ int ocfs2_create_new_inode_locks(struct inode *inode)
1129 goto bail; 1138 goto bail;
1130 } 1139 }
1131 1140
1141 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0);
1142 if (ret) {
1143 mlog_errno(ret);
1144 goto bail;
1145 }
1146
1132bail: 1147bail:
1133 mlog_exit(ret); 1148 mlog_exit(ret);
1134 return ret; 1149 return ret;
@@ -1182,6 +1197,99 @@ void ocfs2_rw_unlock(struct inode *inode, int write)
1182 mlog_exit_void(); 1197 mlog_exit_void();
1183} 1198}
1184 1199
1200/*
1201 * ocfs2_open_lock always get PR mode lock.
1202 */
1203int ocfs2_open_lock(struct inode *inode)
1204{
1205 int status = 0;
1206 struct ocfs2_lock_res *lockres;
1207 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1208
1209 BUG_ON(!inode);
1210
1211 mlog_entry_void();
1212
1213 mlog(0, "inode %llu take PRMODE open lock\n",
1214 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1215
1216 if (ocfs2_mount_local(osb))
1217 goto out;
1218
1219 lockres = &OCFS2_I(inode)->ip_open_lockres;
1220
1221 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1222 LKM_PRMODE, 0, 0);
1223 if (status < 0)
1224 mlog_errno(status);
1225
1226out:
1227 mlog_exit(status);
1228 return status;
1229}
1230
1231int ocfs2_try_open_lock(struct inode *inode, int write)
1232{
1233 int status = 0, level;
1234 struct ocfs2_lock_res *lockres;
1235 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1236
1237 BUG_ON(!inode);
1238
1239 mlog_entry_void();
1240
1241 mlog(0, "inode %llu try to take %s open lock\n",
1242 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1243 write ? "EXMODE" : "PRMODE");
1244
1245 if (ocfs2_mount_local(osb))
1246 goto out;
1247
1248 lockres = &OCFS2_I(inode)->ip_open_lockres;
1249
1250 level = write ? LKM_EXMODE : LKM_PRMODE;
1251
1252 /*
1253 * The file system may already holding a PRMODE/EXMODE open lock.
1254 * Since we pass LKM_NOQUEUE, the request won't block waiting on
1255 * other nodes and the -EAGAIN will indicate to the caller that
1256 * this inode is still in use.
1257 */
1258 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1259 level, LKM_NOQUEUE, 0);
1260
1261out:
1262 mlog_exit(status);
1263 return status;
1264}
1265
1266/*
1267 * ocfs2_open_unlock unlock PR and EX mode open locks.
1268 */
1269void ocfs2_open_unlock(struct inode *inode)
1270{
1271 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres;
1272 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1273
1274 mlog_entry_void();
1275
1276 mlog(0, "inode %llu drop open lock\n",
1277 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1278
1279 if (ocfs2_mount_local(osb))
1280 goto out;
1281
1282 if(lockres->l_ro_holders)
1283 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1284 LKM_PRMODE);
1285 if(lockres->l_ex_holders)
1286 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1287 LKM_EXMODE);
1288
1289out:
1290 mlog_exit_void();
1291}
1292
1185int ocfs2_data_lock_full(struct inode *inode, 1293int ocfs2_data_lock_full(struct inode *inode,
1186 int write, 1294 int write,
1187 int arg_flags) 1295 int arg_flags)
@@ -1387,8 +1495,7 @@ static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
1387 if (S_ISLNK(inode->i_mode) && !oi->ip_clusters) 1495 if (S_ISLNK(inode->i_mode) && !oi->ip_clusters)
1388 inode->i_blocks = 0; 1496 inode->i_blocks = 0;
1389 else 1497 else
1390 inode->i_blocks = 1498 inode->i_blocks = ocfs2_inode_sector_count(inode);
1391 ocfs2_align_bytes_to_sectors(i_size_read(inode));
1392 1499
1393 inode->i_uid = be32_to_cpu(lvb->lvb_iuid); 1500 inode->i_uid = be32_to_cpu(lvb->lvb_iuid);
1394 inode->i_gid = be32_to_cpu(lvb->lvb_igid); 1501 inode->i_gid = be32_to_cpu(lvb->lvb_igid);
@@ -1479,12 +1586,15 @@ static int ocfs2_meta_lock_update(struct inode *inode,
1479{ 1586{
1480 int status = 0; 1587 int status = 0;
1481 struct ocfs2_inode_info *oi = OCFS2_I(inode); 1588 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1482 struct ocfs2_lock_res *lockres = NULL; 1589 struct ocfs2_lock_res *lockres = &oi->ip_meta_lockres;
1483 struct ocfs2_dinode *fe; 1590 struct ocfs2_dinode *fe;
1484 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1591 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1485 1592
1486 mlog_entry_void(); 1593 mlog_entry_void();
1487 1594
1595 if (ocfs2_mount_local(osb))
1596 goto bail;
1597
1488 spin_lock(&oi->ip_lock); 1598 spin_lock(&oi->ip_lock);
1489 if (oi->ip_flags & OCFS2_INODE_DELETED) { 1599 if (oi->ip_flags & OCFS2_INODE_DELETED) {
1490 mlog(0, "Orphaned inode %llu was deleted while we " 1600 mlog(0, "Orphaned inode %llu was deleted while we "
@@ -1496,22 +1606,16 @@ static int ocfs2_meta_lock_update(struct inode *inode,
1496 } 1606 }
1497 spin_unlock(&oi->ip_lock); 1607 spin_unlock(&oi->ip_lock);
1498 1608
1499 if (!ocfs2_mount_local(osb)) { 1609 if (!ocfs2_should_refresh_lock_res(lockres))
1500 lockres = &oi->ip_meta_lockres; 1610 goto bail;
1501
1502 if (!ocfs2_should_refresh_lock_res(lockres))
1503 goto bail;
1504 }
1505 1611
1506 /* This will discard any caching information we might have had 1612 /* This will discard any caching information we might have had
1507 * for the inode metadata. */ 1613 * for the inode metadata. */
1508 ocfs2_metadata_cache_purge(inode); 1614 ocfs2_metadata_cache_purge(inode);
1509 1615
1510 /* will do nothing for inode types that don't use the extent
1511 * map (directories, bitmap files, etc) */
1512 ocfs2_extent_map_trunc(inode, 0); 1616 ocfs2_extent_map_trunc(inode, 0);
1513 1617
1514 if (lockres && ocfs2_meta_lvb_is_trustable(inode, lockres)) { 1618 if (ocfs2_meta_lvb_is_trustable(inode, lockres)) {
1515 mlog(0, "Trusting LVB on inode %llu\n", 1619 mlog(0, "Trusting LVB on inode %llu\n",
1516 (unsigned long long)oi->ip_blkno); 1620 (unsigned long long)oi->ip_blkno);
1517 ocfs2_refresh_inode_from_lvb(inode); 1621 ocfs2_refresh_inode_from_lvb(inode);
@@ -1558,8 +1662,7 @@ static int ocfs2_meta_lock_update(struct inode *inode,
1558 1662
1559 status = 0; 1663 status = 0;
1560bail_refresh: 1664bail_refresh:
1561 if (lockres) 1665 ocfs2_complete_lock_res_refresh(lockres, status);
1562 ocfs2_complete_lock_res_refresh(lockres, status);
1563bail: 1666bail:
1564 mlog_exit(status); 1667 mlog_exit(status);
1565 return status; 1668 return status;
@@ -1630,7 +1733,6 @@ int ocfs2_meta_lock_full(struct inode *inode,
1630 wait_event(osb->recovery_event, 1733 wait_event(osb->recovery_event,
1631 ocfs2_node_map_is_empty(osb, &osb->recovery_map)); 1734 ocfs2_node_map_is_empty(osb, &osb->recovery_map));
1632 1735
1633 acquired = 0;
1634 lockres = &OCFS2_I(inode)->ip_meta_lockres; 1736 lockres = &OCFS2_I(inode)->ip_meta_lockres;
1635 level = ex ? LKM_EXMODE : LKM_PRMODE; 1737 level = ex ? LKM_EXMODE : LKM_PRMODE;
1636 dlm_flags = 0; 1738 dlm_flags = 0;
@@ -2458,13 +2560,20 @@ int ocfs2_drop_inode_locks(struct inode *inode)
2458 * ocfs2_clear_inode has done it for us. */ 2560 * ocfs2_clear_inode has done it for us. */
2459 2561
2460 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb), 2562 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
2461 &OCFS2_I(inode)->ip_data_lockres); 2563 &OCFS2_I(inode)->ip_open_lockres);
2462 if (err < 0) 2564 if (err < 0)
2463 mlog_errno(err); 2565 mlog_errno(err);
2464 2566
2465 status = err; 2567 status = err;
2466 2568
2467 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb), 2569 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
2570 &OCFS2_I(inode)->ip_data_lockres);
2571 if (err < 0)
2572 mlog_errno(err);
2573 if (err < 0 && !status)
2574 status = err;
2575
2576 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
2468 &OCFS2_I(inode)->ip_meta_lockres); 2577 &OCFS2_I(inode)->ip_meta_lockres);
2469 if (err < 0) 2578 if (err < 0)
2470 mlog_errno(err); 2579 mlog_errno(err);
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
index c343fca68cf1..59cb566e7983 100644
--- a/fs/ocfs2/dlmglue.h
+++ b/fs/ocfs2/dlmglue.h
@@ -80,6 +80,9 @@ void ocfs2_data_unlock(struct inode *inode,
80 int write); 80 int write);
81int ocfs2_rw_lock(struct inode *inode, int write); 81int ocfs2_rw_lock(struct inode *inode, int write);
82void ocfs2_rw_unlock(struct inode *inode, int write); 82void ocfs2_rw_unlock(struct inode *inode, int write);
83int ocfs2_open_lock(struct inode *inode);
84int ocfs2_try_open_lock(struct inode *inode, int write);
85void ocfs2_open_unlock(struct inode *inode);
83int ocfs2_meta_lock_atime(struct inode *inode, 86int ocfs2_meta_lock_atime(struct inode *inode,
84 struct vfsmount *vfsmnt, 87 struct vfsmount *vfsmnt,
85 int *level); 88 int *level);
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 80ac69f11d9f..ba2b2ab1c6e4 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -3,8 +3,7 @@
3 * 3 *
4 * extent_map.c 4 * extent_map.c
5 * 5 *
6 * In-memory extent map for OCFS2. Man, this code was prettier in 6 * Block/Cluster mapping functions
7 * the library.
8 * 7 *
9 * Copyright (C) 2004 Oracle. All rights reserved. 8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * 9 *
@@ -26,1016 +25,528 @@
26#include <linux/fs.h> 25#include <linux/fs.h>
27#include <linux/init.h> 26#include <linux/init.h>
28#include <linux/types.h> 27#include <linux/types.h>
29#include <linux/slab.h>
30#include <linux/rbtree.h>
31 28
32#define MLOG_MASK_PREFIX ML_EXTENT_MAP 29#define MLOG_MASK_PREFIX ML_EXTENT_MAP
33#include <cluster/masklog.h> 30#include <cluster/masklog.h>
34 31
35#include "ocfs2.h" 32#include "ocfs2.h"
36 33
34#include "alloc.h"
37#include "extent_map.h" 35#include "extent_map.h"
38#include "inode.h" 36#include "inode.h"
39#include "super.h" 37#include "super.h"
40 38
41#include "buffer_head_io.h" 39#include "buffer_head_io.h"
42 40
43
44/* 41/*
45 * SUCK SUCK SUCK 42 * The extent caching implementation is intentionally trivial.
46 * Our headers are so bad that struct ocfs2_extent_map is in ocfs.h
47 */
48
49struct ocfs2_extent_map_entry {
50 struct rb_node e_node;
51 int e_tree_depth;
52 struct ocfs2_extent_rec e_rec;
53};
54
55struct ocfs2_em_insert_context {
56 int need_left;
57 int need_right;
58 struct ocfs2_extent_map_entry *new_ent;
59 struct ocfs2_extent_map_entry *old_ent;
60 struct ocfs2_extent_map_entry *left_ent;
61 struct ocfs2_extent_map_entry *right_ent;
62};
63
64static struct kmem_cache *ocfs2_em_ent_cachep = NULL;
65
66
67static struct ocfs2_extent_map_entry *
68ocfs2_extent_map_lookup(struct ocfs2_extent_map *em,
69 u32 cpos, u32 clusters,
70 struct rb_node ***ret_p,
71 struct rb_node **ret_parent);
72static int ocfs2_extent_map_insert(struct inode *inode,
73 struct ocfs2_extent_rec *rec,
74 int tree_depth);
75static int ocfs2_extent_map_insert_entry(struct ocfs2_extent_map *em,
76 struct ocfs2_extent_map_entry *ent);
77static int ocfs2_extent_map_find_leaf(struct inode *inode,
78 u32 cpos, u32 clusters,
79 struct ocfs2_extent_list *el);
80static int ocfs2_extent_map_lookup_read(struct inode *inode,
81 u32 cpos, u32 clusters,
82 struct ocfs2_extent_map_entry **ret_ent);
83static int ocfs2_extent_map_try_insert(struct inode *inode,
84 struct ocfs2_extent_rec *rec,
85 int tree_depth,
86 struct ocfs2_em_insert_context *ctxt);
87
88/* returns 1 only if the rec contains all the given clusters -- that is that
89 * rec's cpos is <= the cluster cpos and that the rec endpoint (cpos +
90 * clusters) is >= the argument's endpoint */
91static int ocfs2_extent_rec_contains_clusters(struct ocfs2_extent_rec *rec,
92 u32 cpos, u32 clusters)
93{
94 if (le32_to_cpu(rec->e_cpos) > cpos)
95 return 0;
96 if (cpos + clusters > le32_to_cpu(rec->e_cpos) +
97 le32_to_cpu(rec->e_clusters))
98 return 0;
99 return 1;
100}
101
102
103/*
104 * Find an entry in the tree that intersects the region passed in.
105 * Note that this will find straddled intervals, it is up to the
106 * callers to enforce any boundary conditions.
107 *
108 * Callers must hold ip_lock. This lookup is not guaranteed to return
109 * a tree_depth 0 match, and as such can race inserts if the lock
110 * were not held.
111 * 43 *
112 * The rb_node garbage lets insertion share the search. Trivial 44 * We only cache a small number of extents stored directly on the
113 * callers pass NULL. 45 * inode, so linear order operations are acceptable. If we ever want
46 * to increase the size of the extent map, then these algorithms must
47 * get smarter.
114 */ 48 */
115static struct ocfs2_extent_map_entry * 49
116ocfs2_extent_map_lookup(struct ocfs2_extent_map *em, 50void ocfs2_extent_map_init(struct inode *inode)
117 u32 cpos, u32 clusters,
118 struct rb_node ***ret_p,
119 struct rb_node **ret_parent)
120{ 51{
121 struct rb_node **p = &em->em_extents.rb_node; 52 struct ocfs2_inode_info *oi = OCFS2_I(inode);
122 struct rb_node *parent = NULL;
123 struct ocfs2_extent_map_entry *ent = NULL;
124
125 while (*p)
126 {
127 parent = *p;
128 ent = rb_entry(parent, struct ocfs2_extent_map_entry,
129 e_node);
130 if ((cpos + clusters) <= le32_to_cpu(ent->e_rec.e_cpos)) {
131 p = &(*p)->rb_left;
132 ent = NULL;
133 } else if (cpos >= (le32_to_cpu(ent->e_rec.e_cpos) +
134 le32_to_cpu(ent->e_rec.e_clusters))) {
135 p = &(*p)->rb_right;
136 ent = NULL;
137 } else
138 break;
139 }
140 53
141 if (ret_p != NULL) 54 oi->ip_extent_map.em_num_items = 0;
142 *ret_p = p; 55 INIT_LIST_HEAD(&oi->ip_extent_map.em_list);
143 if (ret_parent != NULL)
144 *ret_parent = parent;
145 return ent;
146} 56}
147 57
148/* 58static void __ocfs2_extent_map_lookup(struct ocfs2_extent_map *em,
149 * Find the leaf containing the interval we want. While we're on our 59 unsigned int cpos,
150 * way down the tree, fill in every record we see at any depth, because 60 struct ocfs2_extent_map_item **ret_emi)
151 * we might want it later.
152 *
153 * Note that this code is run without ip_lock. That's because it
154 * sleeps while reading. If someone is also filling the extent list at
155 * the same time we are, we might have to restart.
156 */
157static int ocfs2_extent_map_find_leaf(struct inode *inode,
158 u32 cpos, u32 clusters,
159 struct ocfs2_extent_list *el)
160{ 61{
161 int i, ret; 62 unsigned int range;
162 struct buffer_head *eb_bh = NULL; 63 struct ocfs2_extent_map_item *emi;
163 u64 blkno;
164 u32 rec_end;
165 struct ocfs2_extent_block *eb;
166 struct ocfs2_extent_rec *rec;
167
168 /*
169 * The bh data containing the el cannot change here, because
170 * we hold alloc_sem. So we can do this without other
171 * locks.
172 */
173 while (el->l_tree_depth)
174 {
175 blkno = 0;
176 for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
177 rec = &el->l_recs[i];
178 rec_end = (le32_to_cpu(rec->e_cpos) +
179 le32_to_cpu(rec->e_clusters));
180
181 ret = -EBADR;
182 if (rec_end > OCFS2_I(inode)->ip_clusters) {
183 mlog_errno(ret);
184 ocfs2_error(inode->i_sb,
185 "Extent %d at e_blkno %llu of inode %llu goes past ip_clusters of %u\n",
186 i,
187 (unsigned long long)le64_to_cpu(rec->e_blkno),
188 (unsigned long long)OCFS2_I(inode)->ip_blkno,
189 OCFS2_I(inode)->ip_clusters);
190 goto out_free;
191 }
192
193 if (rec_end <= cpos) {
194 ret = ocfs2_extent_map_insert(inode, rec,
195 le16_to_cpu(el->l_tree_depth));
196 if (ret && (ret != -EEXIST)) {
197 mlog_errno(ret);
198 goto out_free;
199 }
200 continue;
201 }
202 if ((cpos + clusters) <= le32_to_cpu(rec->e_cpos)) {
203 ret = ocfs2_extent_map_insert(inode, rec,
204 le16_to_cpu(el->l_tree_depth));
205 if (ret && (ret != -EEXIST)) {
206 mlog_errno(ret);
207 goto out_free;
208 }
209 continue;
210 }
211 64
212 /* 65 *ret_emi = NULL;
213 * We've found a record that matches our
214 * interval. We don't insert it because we're
215 * about to traverse it.
216 */
217
218 /* Check to see if we're stradling */
219 ret = -ESRCH;
220 if (!ocfs2_extent_rec_contains_clusters(rec,
221 cpos,
222 clusters)) {
223 mlog_errno(ret);
224 goto out_free;
225 }
226 66
227 /* 67 list_for_each_entry(emi, &em->em_list, ei_list) {
228 * If we've already found a record, the el has 68 range = emi->ei_cpos + emi->ei_clusters;
229 * two records covering the same interval.
230 * EEEK!
231 */
232 ret = -EBADR;
233 if (blkno) {
234 mlog_errno(ret);
235 ocfs2_error(inode->i_sb,
236 "Multiple extents for (cpos = %u, clusters = %u) on inode %llu; e_blkno %llu and rec %d at e_blkno %llu\n",
237 cpos, clusters,
238 (unsigned long long)OCFS2_I(inode)->ip_blkno,
239 (unsigned long long)blkno, i,
240 (unsigned long long)le64_to_cpu(rec->e_blkno));
241 goto out_free;
242 }
243 69
244 blkno = le64_to_cpu(rec->e_blkno); 70 if (cpos >= emi->ei_cpos && cpos < range) {
245 } 71 list_move(&emi->ei_list, &em->em_list);
246 72
247 /* 73 *ret_emi = emi;
248 * We don't support holes, and we're still up 74 break;
249 * in the branches, so we'd better have found someone
250 */
251 ret = -EBADR;
252 if (!blkno) {
253 ocfs2_error(inode->i_sb,
254 "No record found for (cpos = %u, clusters = %u) on inode %llu\n",
255 cpos, clusters,
256 (unsigned long long)OCFS2_I(inode)->ip_blkno);
257 mlog_errno(ret);
258 goto out_free;
259 }
260
261 if (eb_bh) {
262 brelse(eb_bh);
263 eb_bh = NULL;
264 }
265 ret = ocfs2_read_block(OCFS2_SB(inode->i_sb),
266 blkno, &eb_bh, OCFS2_BH_CACHED,
267 inode);
268 if (ret) {
269 mlog_errno(ret);
270 goto out_free;
271 }
272 eb = (struct ocfs2_extent_block *)eb_bh->b_data;
273 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
274 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
275 ret = -EIO;
276 goto out_free;
277 } 75 }
278 el = &eb->h_list;
279 } 76 }
77}
280 78
281 BUG_ON(el->l_tree_depth); 79static int ocfs2_extent_map_lookup(struct inode *inode, unsigned int cpos,
282 80 unsigned int *phys, unsigned int *len,
283 for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) { 81 unsigned int *flags)
284 rec = &el->l_recs[i]; 82{
285 83 unsigned int coff;
286 if ((le32_to_cpu(rec->e_cpos) + le32_to_cpu(rec->e_clusters)) > 84 struct ocfs2_inode_info *oi = OCFS2_I(inode);
287 OCFS2_I(inode)->ip_clusters) { 85 struct ocfs2_extent_map_item *emi;
288 ret = -EBADR; 86
289 mlog_errno(ret); 87 spin_lock(&oi->ip_lock);
290 ocfs2_error(inode->i_sb, 88
291 "Extent %d at e_blkno %llu of inode %llu goes past ip_clusters of %u\n", 89 __ocfs2_extent_map_lookup(&oi->ip_extent_map, cpos, &emi);
292 i, 90 if (emi) {
293 (unsigned long long)le64_to_cpu(rec->e_blkno), 91 coff = cpos - emi->ei_cpos;
294 (unsigned long long)OCFS2_I(inode)->ip_blkno, 92 *phys = emi->ei_phys + coff;
295 OCFS2_I(inode)->ip_clusters); 93 if (len)
296 return ret; 94 *len = emi->ei_clusters - coff;
297 } 95 if (flags)
298 96 *flags = emi->ei_flags;
299 ret = ocfs2_extent_map_insert(inode, rec,
300 le16_to_cpu(el->l_tree_depth));
301 if (ret && (ret != -EEXIST)) {
302 mlog_errno(ret);
303 goto out_free;
304 }
305 } 97 }
306 98
307 ret = 0; 99 spin_unlock(&oi->ip_lock);
308 100
309out_free: 101 if (emi == NULL)
310 if (eb_bh) 102 return -ENOENT;
311 brelse(eb_bh);
312 103
313 return ret; 104 return 0;
314} 105}
315 106
316/* 107/*
317 * This lookup actually will read from disk. It has one invariant: 108 * Forget about all clusters equal to or greater than cpos.
318 * It will never re-traverse blocks. This means that all inserts should
319 * be new regions or more granular regions (both allowed by insert).
320 */ 109 */
321static int ocfs2_extent_map_lookup_read(struct inode *inode, 110void ocfs2_extent_map_trunc(struct inode *inode, unsigned int cpos)
322 u32 cpos,
323 u32 clusters,
324 struct ocfs2_extent_map_entry **ret_ent)
325{ 111{
326 int ret; 112 struct list_head *p, *n;
327 u64 blkno; 113 struct ocfs2_extent_map_item *emi;
328 struct ocfs2_extent_map *em = &OCFS2_I(inode)->ip_map; 114 struct ocfs2_inode_info *oi = OCFS2_I(inode);
329 struct ocfs2_extent_map_entry *ent; 115 struct ocfs2_extent_map *em = &oi->ip_extent_map;
330 struct buffer_head *bh = NULL; 116 LIST_HEAD(tmp_list);
331 struct ocfs2_extent_block *eb; 117 unsigned int range;
332 struct ocfs2_dinode *di; 118
333 struct ocfs2_extent_list *el; 119 spin_lock(&oi->ip_lock);
334 120 list_for_each_safe(p, n, &em->em_list) {
335 spin_lock(&OCFS2_I(inode)->ip_lock); 121 emi = list_entry(p, struct ocfs2_extent_map_item, ei_list);
336 ent = ocfs2_extent_map_lookup(em, cpos, clusters, NULL, NULL); 122
337 if (ent) { 123 if (emi->ei_cpos >= cpos) {
338 if (!ent->e_tree_depth) { 124 /* Full truncate of this record. */
339 spin_unlock(&OCFS2_I(inode)->ip_lock); 125 list_move(&emi->ei_list, &tmp_list);
340 *ret_ent = ent; 126 BUG_ON(em->em_num_items == 0);
341 return 0; 127 em->em_num_items--;
342 } 128 continue;
343 blkno = le64_to_cpu(ent->e_rec.e_blkno);
344 spin_unlock(&OCFS2_I(inode)->ip_lock);
345
346 ret = ocfs2_read_block(OCFS2_SB(inode->i_sb), blkno, &bh,
347 OCFS2_BH_CACHED, inode);
348 if (ret) {
349 mlog_errno(ret);
350 if (bh)
351 brelse(bh);
352 return ret;
353 } 129 }
354 eb = (struct ocfs2_extent_block *)bh->b_data;
355 if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
356 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, eb);
357 brelse(bh);
358 return -EIO;
359 }
360 el = &eb->h_list;
361 } else {
362 spin_unlock(&OCFS2_I(inode)->ip_lock);
363 130
364 ret = ocfs2_read_block(OCFS2_SB(inode->i_sb), 131 range = emi->ei_cpos + emi->ei_clusters;
365 OCFS2_I(inode)->ip_blkno, &bh, 132 if (range > cpos) {
366 OCFS2_BH_CACHED, inode); 133 /* Partial truncate */
367 if (ret) { 134 emi->ei_clusters = cpos - emi->ei_cpos;
368 mlog_errno(ret);
369 if (bh)
370 brelse(bh);
371 return ret;
372 } 135 }
373 di = (struct ocfs2_dinode *)bh->b_data;
374 if (!OCFS2_IS_VALID_DINODE(di)) {
375 brelse(bh);
376 OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, di);
377 return -EIO;
378 }
379 el = &di->id2.i_list;
380 }
381
382 ret = ocfs2_extent_map_find_leaf(inode, cpos, clusters, el);
383 brelse(bh);
384 if (ret) {
385 mlog_errno(ret);
386 return ret;
387 } 136 }
137 spin_unlock(&oi->ip_lock);
388 138
389 ent = ocfs2_extent_map_lookup(em, cpos, clusters, NULL, NULL); 139 list_for_each_safe(p, n, &tmp_list) {
390 if (!ent) { 140 emi = list_entry(p, struct ocfs2_extent_map_item, ei_list);
391 ret = -ESRCH; 141 list_del(&emi->ei_list);
392 mlog_errno(ret); 142 kfree(emi);
393 return ret;
394 } 143 }
395
396 /* FIXME: Make sure this isn't a corruption */
397 BUG_ON(ent->e_tree_depth);
398
399 *ret_ent = ent;
400
401 return 0;
402} 144}
403 145
404/* 146/*
405 * Callers must hold ip_lock. This can insert pieces of the tree, 147 * Is any part of emi2 contained within emi1
406 * thus racing lookup if the lock weren't held.
407 */ 148 */
408static int ocfs2_extent_map_insert_entry(struct ocfs2_extent_map *em, 149static int ocfs2_ei_is_contained(struct ocfs2_extent_map_item *emi1,
409 struct ocfs2_extent_map_entry *ent) 150 struct ocfs2_extent_map_item *emi2)
410{ 151{
411 struct rb_node **p, *parent; 152 unsigned int range1, range2;
412 struct ocfs2_extent_map_entry *old_ent;
413 153
414 old_ent = ocfs2_extent_map_lookup(em, le32_to_cpu(ent->e_rec.e_cpos), 154 /*
415 le32_to_cpu(ent->e_rec.e_clusters), 155 * Check if logical start of emi2 is inside emi1
416 &p, &parent); 156 */
417 if (old_ent) 157 range1 = emi1->ei_cpos + emi1->ei_clusters;
418 return -EEXIST; 158 if (emi2->ei_cpos >= emi1->ei_cpos && emi2->ei_cpos < range1)
159 return 1;
419 160
420 rb_link_node(&ent->e_node, parent, p); 161 /*
421 rb_insert_color(&ent->e_node, &em->em_extents); 162 * Check if logical end of emi2 is inside emi1
163 */
164 range2 = emi2->ei_cpos + emi2->ei_clusters;
165 if (range2 > emi1->ei_cpos && range2 <= range1)
166 return 1;
422 167
423 return 0; 168 return 0;
424} 169}
425 170
171static void ocfs2_copy_emi_fields(struct ocfs2_extent_map_item *dest,
172 struct ocfs2_extent_map_item *src)
173{
174 dest->ei_cpos = src->ei_cpos;
175 dest->ei_phys = src->ei_phys;
176 dest->ei_clusters = src->ei_clusters;
177 dest->ei_flags = src->ei_flags;
178}
426 179
427/* 180/*
428 * Simple rule: on any return code other than -EAGAIN, anything left 181 * Try to merge emi with ins. Returns 1 if merge succeeds, zero
429 * in the insert_context will be freed. 182 * otherwise.
430 *
431 * Simple rule #2: A return code of -EEXIST from this function or
432 * its calls to ocfs2_extent_map_insert_entry() signifies that another
433 * thread beat us to the insert. It is not an actual error, but it
434 * tells the caller we have no more work to do.
435 */ 183 */
436static int ocfs2_extent_map_try_insert(struct inode *inode, 184static int ocfs2_try_to_merge_extent_map(struct ocfs2_extent_map_item *emi,
437 struct ocfs2_extent_rec *rec, 185 struct ocfs2_extent_map_item *ins)
438 int tree_depth,
439 struct ocfs2_em_insert_context *ctxt)
440{ 186{
441 int ret;
442 struct ocfs2_extent_map *em = &OCFS2_I(inode)->ip_map;
443 struct ocfs2_extent_map_entry *old_ent;
444
445 ctxt->need_left = 0;
446 ctxt->need_right = 0;
447 ctxt->old_ent = NULL;
448
449 spin_lock(&OCFS2_I(inode)->ip_lock);
450 ret = ocfs2_extent_map_insert_entry(em, ctxt->new_ent);
451 if (!ret) {
452 ctxt->new_ent = NULL;
453 goto out_unlock;
454 }
455
456 /* Since insert_entry failed, the map MUST have old_ent */
457 old_ent = ocfs2_extent_map_lookup(em, le32_to_cpu(rec->e_cpos),
458 le32_to_cpu(rec->e_clusters),
459 NULL, NULL);
460
461 BUG_ON(!old_ent);
462
463 if (old_ent->e_tree_depth < tree_depth) {
464 /* Another thread beat us to the lower tree_depth */
465 ret = -EEXIST;
466 goto out_unlock;
467 }
468
469 if (old_ent->e_tree_depth == tree_depth) {
470 /*
471 * Another thread beat us to this tree_depth.
472 * Let's make sure we agree with that thread (the
473 * extent_rec should be identical).
474 */
475 if (!memcmp(rec, &old_ent->e_rec,
476 sizeof(struct ocfs2_extent_rec)))
477 ret = 0;
478 else
479 /* FIXME: Should this be ESRCH/EBADR??? */
480 ret = -EEXIST;
481
482 goto out_unlock;
483 }
484
485 /* 187 /*
486 * We do it in this order specifically so that no actual tree 188 * Handle contiguousness
487 * changes occur until we have all the pieces we need. We
488 * don't want malloc failures to leave an inconsistent tree.
489 * Whenever we drop the lock, another process could be
490 * inserting. Also note that, if another process just beat us
491 * to an insert, we might not need the same pieces we needed
492 * the first go round. In the end, the pieces we need will
493 * be used, and the pieces we don't will be freed.
494 */ 189 */
495 ctxt->need_left = !!(le32_to_cpu(rec->e_cpos) > 190 if (ins->ei_phys == (emi->ei_phys + emi->ei_clusters) &&
496 le32_to_cpu(old_ent->e_rec.e_cpos)); 191 ins->ei_cpos == (emi->ei_cpos + emi->ei_clusters) &&
497 ctxt->need_right = !!((le32_to_cpu(old_ent->e_rec.e_cpos) + 192 ins->ei_flags == emi->ei_flags) {
498 le32_to_cpu(old_ent->e_rec.e_clusters)) > 193 emi->ei_clusters += ins->ei_clusters;
499 (le32_to_cpu(rec->e_cpos) + le32_to_cpu(rec->e_clusters))); 194 return 1;
500 ret = -EAGAIN; 195 } else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys &&
501 if (ctxt->need_left) { 196 (ins->ei_cpos + ins->ei_clusters) == emi->ei_phys &&
502 if (!ctxt->left_ent) 197 ins->ei_flags == emi->ei_flags) {
503 goto out_unlock; 198 emi->ei_phys = ins->ei_phys;
504 *(ctxt->left_ent) = *old_ent; 199 emi->ei_cpos = ins->ei_cpos;
505 ctxt->left_ent->e_rec.e_clusters = 200 emi->ei_clusters += ins->ei_clusters;
506 cpu_to_le32(le32_to_cpu(rec->e_cpos) - 201 return 1;
507 le32_to_cpu(ctxt->left_ent->e_rec.e_cpos));
508 }
509 if (ctxt->need_right) {
510 if (!ctxt->right_ent)
511 goto out_unlock;
512 *(ctxt->right_ent) = *old_ent;
513 ctxt->right_ent->e_rec.e_cpos =
514 cpu_to_le32(le32_to_cpu(rec->e_cpos) +
515 le32_to_cpu(rec->e_clusters));
516 ctxt->right_ent->e_rec.e_clusters =
517 cpu_to_le32((le32_to_cpu(old_ent->e_rec.e_cpos) +
518 le32_to_cpu(old_ent->e_rec.e_clusters)) -
519 le32_to_cpu(ctxt->right_ent->e_rec.e_cpos));
520 }
521
522 rb_erase(&old_ent->e_node, &em->em_extents);
523 /* Now that he's erased, set him up for deletion */
524 ctxt->old_ent = old_ent;
525
526 if (ctxt->need_left) {
527 ret = ocfs2_extent_map_insert_entry(em,
528 ctxt->left_ent);
529 if (ret)
530 goto out_unlock;
531 ctxt->left_ent = NULL;
532 } 202 }
533 203
534 if (ctxt->need_right) { 204 /*
535 ret = ocfs2_extent_map_insert_entry(em, 205 * Overlapping extents - this shouldn't happen unless we've
536 ctxt->right_ent); 206 * split an extent to change it's flags. That is exceedingly
537 if (ret) 207 * rare, so there's no sense in trying to optimize it yet.
538 goto out_unlock; 208 */
539 ctxt->right_ent = NULL; 209 if (ocfs2_ei_is_contained(emi, ins) ||
210 ocfs2_ei_is_contained(ins, emi)) {
211 ocfs2_copy_emi_fields(emi, ins);
212 return 1;
540 } 213 }
541 214
542 ret = ocfs2_extent_map_insert_entry(em, ctxt->new_ent); 215 /* No merge was possible. */
543 216 return 0;
544 if (!ret)
545 ctxt->new_ent = NULL;
546
547out_unlock:
548 spin_unlock(&OCFS2_I(inode)->ip_lock);
549
550 return ret;
551} 217}
552 218
553 219/*
554static int ocfs2_extent_map_insert(struct inode *inode, 220 * In order to reduce complexity on the caller, this insert function
555 struct ocfs2_extent_rec *rec, 221 * is intentionally liberal in what it will accept.
556 int tree_depth) 222 *
223 * The only rule is that the truncate call *must* be used whenever
224 * records have been deleted. This avoids inserting overlapping
225 * records with different physical mappings.
226 */
227void ocfs2_extent_map_insert_rec(struct inode *inode,
228 struct ocfs2_extent_rec *rec)
557{ 229{
558 int ret; 230 struct ocfs2_inode_info *oi = OCFS2_I(inode);
559 struct ocfs2_em_insert_context ctxt = {0, }; 231 struct ocfs2_extent_map *em = &oi->ip_extent_map;
560 232 struct ocfs2_extent_map_item *emi, *new_emi = NULL;
561 if ((le32_to_cpu(rec->e_cpos) + le32_to_cpu(rec->e_clusters)) > 233 struct ocfs2_extent_map_item ins;
562 OCFS2_I(inode)->ip_map.em_clusters) { 234
563 ret = -EBADR; 235 ins.ei_cpos = le32_to_cpu(rec->e_cpos);
564 mlog_errno(ret); 236 ins.ei_phys = ocfs2_blocks_to_clusters(inode->i_sb,
565 return ret; 237 le64_to_cpu(rec->e_blkno));
238 ins.ei_clusters = le16_to_cpu(rec->e_leaf_clusters);
239 ins.ei_flags = rec->e_flags;
240
241search:
242 spin_lock(&oi->ip_lock);
243
244 list_for_each_entry(emi, &em->em_list, ei_list) {
245 if (ocfs2_try_to_merge_extent_map(emi, &ins)) {
246 list_move(&emi->ei_list, &em->em_list);
247 spin_unlock(&oi->ip_lock);
248 goto out;
249 }
566 } 250 }
567 251
568 /* Zero e_clusters means a truncated tail record. It better be EOF */ 252 /*
569 if (!rec->e_clusters) { 253 * No item could be merged.
570 if ((le32_to_cpu(rec->e_cpos) + le32_to_cpu(rec->e_clusters)) != 254 *
571 OCFS2_I(inode)->ip_map.em_clusters) { 255 * Either allocate and add a new item, or overwrite the last recently
572 ret = -EBADR; 256 * inserted.
573 mlog_errno(ret); 257 */
574 ocfs2_error(inode->i_sb,
575 "Zero e_clusters on non-tail extent record at e_blkno %llu on inode %llu\n",
576 (unsigned long long)le64_to_cpu(rec->e_blkno),
577 (unsigned long long)OCFS2_I(inode)->ip_blkno);
578 return ret;
579 }
580 258
581 /* Ignore the truncated tail */ 259 if (em->em_num_items < OCFS2_MAX_EXTENT_MAP_ITEMS) {
582 return 0; 260 if (new_emi == NULL) {
583 } 261 spin_unlock(&oi->ip_lock);
584 262
585 ret = -ENOMEM; 263 new_emi = kmalloc(sizeof(*new_emi), GFP_NOFS);
586 ctxt.new_ent = kmem_cache_alloc(ocfs2_em_ent_cachep, 264 if (new_emi == NULL)
587 GFP_NOFS); 265 goto out;
588 if (!ctxt.new_ent) {
589 mlog_errno(ret);
590 return ret;
591 }
592 266
593 ctxt.new_ent->e_rec = *rec; 267 goto search;
594 ctxt.new_ent->e_tree_depth = tree_depth;
595
596 do {
597 ret = -ENOMEM;
598 if (ctxt.need_left && !ctxt.left_ent) {
599 ctxt.left_ent =
600 kmem_cache_alloc(ocfs2_em_ent_cachep,
601 GFP_NOFS);
602 if (!ctxt.left_ent)
603 break;
604 }
605 if (ctxt.need_right && !ctxt.right_ent) {
606 ctxt.right_ent =
607 kmem_cache_alloc(ocfs2_em_ent_cachep,
608 GFP_NOFS);
609 if (!ctxt.right_ent)
610 break;
611 } 268 }
612 269
613 ret = ocfs2_extent_map_try_insert(inode, rec, 270 ocfs2_copy_emi_fields(new_emi, &ins);
614 tree_depth, &ctxt); 271 list_add(&new_emi->ei_list, &em->em_list);
615 } while (ret == -EAGAIN); 272 em->em_num_items++;
616 273 new_emi = NULL;
617 if ((ret < 0) && (ret != -EEXIST)) 274 } else {
618 mlog_errno(ret); 275 BUG_ON(list_empty(&em->em_list) || em->em_num_items == 0);
276 emi = list_entry(em->em_list.prev,
277 struct ocfs2_extent_map_item, ei_list);
278 list_move(&emi->ei_list, &em->em_list);
279 ocfs2_copy_emi_fields(emi, &ins);
280 }
619 281
620 if (ctxt.left_ent) 282 spin_unlock(&oi->ip_lock);
621 kmem_cache_free(ocfs2_em_ent_cachep, ctxt.left_ent);
622 if (ctxt.right_ent)
623 kmem_cache_free(ocfs2_em_ent_cachep, ctxt.right_ent);
624 if (ctxt.old_ent)
625 kmem_cache_free(ocfs2_em_ent_cachep, ctxt.old_ent);
626 if (ctxt.new_ent)
627 kmem_cache_free(ocfs2_em_ent_cachep, ctxt.new_ent);
628 283
629 return ret; 284out:
285 if (new_emi)
286 kfree(new_emi);
630} 287}
631 288
632/* 289/*
633 * Append this record to the tail of the extent map. It must be 290 * Return the 1st index within el which contains an extent start
634 * tree_depth 0. The record might be an extension of an existing 291 * larger than v_cluster.
635 * record, and as such that needs to be handled. eg:
636 *
637 * Existing record in the extent map:
638 *
639 * cpos = 10, len = 10
640 * |---------|
641 *
642 * New Record:
643 *
644 * cpos = 10, len = 20
645 * |------------------|
646 *
647 * The passed record is the new on-disk record. The new_clusters value
648 * is how many clusters were added to the file. If the append is a
649 * contiguous append, the new_clusters has been added to
650 * rec->e_clusters. If the append is an entirely new extent, then
651 * rec->e_clusters is == new_clusters.
652 */ 292 */
653int ocfs2_extent_map_append(struct inode *inode, 293static int ocfs2_search_for_hole_index(struct ocfs2_extent_list *el,
654 struct ocfs2_extent_rec *rec, 294 u32 v_cluster)
655 u32 new_clusters)
656{ 295{
657 int ret; 296 int i;
658 struct ocfs2_extent_map *em = &OCFS2_I(inode)->ip_map; 297 struct ocfs2_extent_rec *rec;
659 struct ocfs2_extent_map_entry *ent;
660 struct ocfs2_extent_rec *old;
661
662 BUG_ON(!new_clusters);
663 BUG_ON(le32_to_cpu(rec->e_clusters) < new_clusters);
664 298
665 if (em->em_clusters < OCFS2_I(inode)->ip_clusters) { 299 for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
666 /* 300 rec = &el->l_recs[i];
667 * Size changed underneath us on disk. Drop any
668 * straddling records and update our idea of
669 * i_clusters
670 */
671 ocfs2_extent_map_drop(inode, em->em_clusters - 1);
672 em->em_clusters = OCFS2_I(inode)->ip_clusters;
673 }
674 301
675 mlog_bug_on_msg((le32_to_cpu(rec->e_cpos) + 302 if (v_cluster < le32_to_cpu(rec->e_cpos))
676 le32_to_cpu(rec->e_clusters)) != 303 break;
677 (em->em_clusters + new_clusters),
678 "Inode %llu:\n"
679 "rec->e_cpos = %u + rec->e_clusters = %u = %u\n"
680 "em->em_clusters = %u + new_clusters = %u = %u\n",
681 (unsigned long long)OCFS2_I(inode)->ip_blkno,
682 le32_to_cpu(rec->e_cpos), le32_to_cpu(rec->e_clusters),
683 le32_to_cpu(rec->e_cpos) + le32_to_cpu(rec->e_clusters),
684 em->em_clusters, new_clusters,
685 em->em_clusters + new_clusters);
686
687 em->em_clusters += new_clusters;
688
689 ret = -ENOENT;
690 if (le32_to_cpu(rec->e_clusters) > new_clusters) {
691 /* This is a contiguous append */
692 ent = ocfs2_extent_map_lookup(em, le32_to_cpu(rec->e_cpos), 1,
693 NULL, NULL);
694 if (ent) {
695 old = &ent->e_rec;
696 BUG_ON((le32_to_cpu(rec->e_cpos) +
697 le32_to_cpu(rec->e_clusters)) !=
698 (le32_to_cpu(old->e_cpos) +
699 le32_to_cpu(old->e_clusters) +
700 new_clusters));
701 if (ent->e_tree_depth == 0) {
702 BUG_ON(le32_to_cpu(old->e_cpos) !=
703 le32_to_cpu(rec->e_cpos));
704 BUG_ON(le64_to_cpu(old->e_blkno) !=
705 le64_to_cpu(rec->e_blkno));
706 ret = 0;
707 }
708 /*
709 * Let non-leafs fall through as -ENOENT to
710 * force insertion of the new leaf.
711 */
712 le32_add_cpu(&old->e_clusters, new_clusters);
713 }
714 } 304 }
715 305
716 if (ret == -ENOENT) 306 return i;
717 ret = ocfs2_extent_map_insert(inode, rec, 0);
718 if (ret < 0)
719 mlog_errno(ret);
720 return ret;
721} 307}
722 308
723#if 0
724/* Code here is included but defined out as it completes the extent
725 * map api and may be used in the future. */
726
727/* 309/*
728 * Look up the record containing this cluster offset. This record is 310 * Figure out the size of a hole which starts at v_cluster within the given
729 * part of the extent map. Do not free it. Any changes you make to 311 * extent list.
730 * it will reflect in the extent map. So, if your last extent
731 * is (cpos = 10, clusters = 10) and you truncate the file by 5
732 * clusters, you can do:
733 * 312 *
734 * ret = ocfs2_extent_map_get_rec(em, orig_size - 5, &rec); 313 * If there is no more allocation past v_cluster, we return the maximum
735 * rec->e_clusters -= 5; 314 * cluster size minus v_cluster.
736 * 315 *
737 * The lookup does not read from disk. If the map isn't filled in for 316 * If we have in-inode extents, then el points to the dinode list and
738 * an entry, you won't find it. 317 * eb_bh is NULL. Otherwise, eb_bh should point to the extent block
739 * 318 * containing el.
740 * Also note that the returned record is valid until alloc_sem is
741 * dropped. After that, truncate and extend can happen. Caveat Emptor.
742 */ 319 */
743int ocfs2_extent_map_get_rec(struct inode *inode, u32 cpos, 320static int ocfs2_figure_hole_clusters(struct inode *inode,
744 struct ocfs2_extent_rec **rec, 321 struct ocfs2_extent_list *el,
745 int *tree_depth) 322 struct buffer_head *eb_bh,
323 u32 v_cluster,
324 u32 *num_clusters)
746{ 325{
747 int ret = -ENOENT; 326 int ret, i;
748 struct ocfs2_extent_map *em = &OCFS2_I(inode)->ip_map; 327 struct buffer_head *next_eb_bh = NULL;
749 struct ocfs2_extent_map_entry *ent; 328 struct ocfs2_extent_block *eb, *next_eb;
750 329
751 *rec = NULL; 330 i = ocfs2_search_for_hole_index(el, v_cluster);
752 331
753 if (cpos >= OCFS2_I(inode)->ip_clusters) 332 if (i == le16_to_cpu(el->l_next_free_rec) && eb_bh) {
754 return -EINVAL; 333 eb = (struct ocfs2_extent_block *)eb_bh->b_data;
755 334
756 if (cpos >= em->em_clusters) {
757 /* 335 /*
758 * Size changed underneath us on disk. Drop any 336 * Check the next leaf for any extents.
759 * straddling records and update our idea of
760 * i_clusters
761 */ 337 */
762 ocfs2_extent_map_drop(inode, em->em_clusters - 1);
763 em->em_clusters = OCFS2_I(inode)->ip_clusters ;
764 }
765
766 ent = ocfs2_extent_map_lookup(&OCFS2_I(inode)->ip_map, cpos, 1,
767 NULL, NULL);
768 338
769 if (ent) { 339 if (le64_to_cpu(eb->h_next_leaf_blk) == 0ULL)
770 *rec = &ent->e_rec; 340 goto no_more_extents;
771 if (tree_depth)
772 *tree_depth = ent->e_tree_depth;
773 ret = 0;
774 }
775 341
776 return ret; 342 ret = ocfs2_read_block(OCFS2_SB(inode->i_sb),
777} 343 le64_to_cpu(eb->h_next_leaf_blk),
344 &next_eb_bh, OCFS2_BH_CACHED, inode);
345 if (ret) {
346 mlog_errno(ret);
347 goto out;
348 }
349 next_eb = (struct ocfs2_extent_block *)next_eb_bh->b_data;
778 350
779int ocfs2_extent_map_get_clusters(struct inode *inode, 351 if (!OCFS2_IS_VALID_EXTENT_BLOCK(next_eb)) {
780 u32 v_cpos, int count, 352 ret = -EROFS;
781 u32 *p_cpos, int *ret_count) 353 OCFS2_RO_ON_INVALID_EXTENT_BLOCK(inode->i_sb, next_eb);
782{ 354 goto out;
783 int ret; 355 }
784 u32 coff, ccount;
785 struct ocfs2_extent_map *em = &OCFS2_I(inode)->ip_map;
786 struct ocfs2_extent_map_entry *ent = NULL;
787 356
788 *p_cpos = ccount = 0; 357 el = &next_eb->h_list;
789 358
790 if ((v_cpos + count) > OCFS2_I(inode)->ip_clusters) 359 i = ocfs2_search_for_hole_index(el, v_cluster);
791 return -EINVAL; 360 }
792 361
793 if ((v_cpos + count) > em->em_clusters) { 362no_more_extents:
363 if (i == le16_to_cpu(el->l_next_free_rec)) {
794 /* 364 /*
795 * Size changed underneath us on disk. Drop any 365 * We're at the end of our existing allocation. Just
796 * straddling records and update our idea of 366 * return the maximum number of clusters we could
797 * i_clusters 367 * possibly allocate.
798 */ 368 */
799 ocfs2_extent_map_drop(inode, em->em_clusters - 1); 369 *num_clusters = UINT_MAX - v_cluster;
800 em->em_clusters = OCFS2_I(inode)->ip_clusters; 370 } else {
371 *num_clusters = le32_to_cpu(el->l_recs[i].e_cpos) - v_cluster;
801 } 372 }
802 373
374 ret = 0;
375out:
376 brelse(next_eb_bh);
377 return ret;
378}
803 379
804 ret = ocfs2_extent_map_lookup_read(inode, v_cpos, count, &ent); 380/*
805 if (ret) 381 * Return the index of the extent record which contains cluster #v_cluster.
806 return ret; 382 * -1 is returned if it was not found.
383 *
384 * Should work fine on interior and exterior nodes.
385 */
386static int ocfs2_search_extent_list(struct ocfs2_extent_list *el,
387 u32 v_cluster)
388{
389 int ret = -1;
390 int i;
391 struct ocfs2_extent_rec *rec;
392 u32 rec_end, rec_start, clusters;
807 393
808 if (ent) { 394 for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
809 /* We should never find ourselves straddling an interval */ 395 rec = &el->l_recs[i];
810 if (!ocfs2_extent_rec_contains_clusters(&ent->e_rec,
811 v_cpos,
812 count))
813 return -ESRCH;
814 396
815 coff = v_cpos - le32_to_cpu(ent->e_rec.e_cpos); 397 rec_start = le32_to_cpu(rec->e_cpos);
816 *p_cpos = ocfs2_blocks_to_clusters(inode->i_sb, 398 clusters = ocfs2_rec_clusters(el, rec);
817 le64_to_cpu(ent->e_rec.e_blkno)) +
818 coff;
819 399
820 if (ret_count) 400 rec_end = rec_start + clusters;
821 *ret_count = le32_to_cpu(ent->e_rec.e_clusters) - coff;
822 401
823 return 0; 402 if (v_cluster >= rec_start && v_cluster < rec_end) {
403 ret = i;
404 break;
405 }
824 } 406 }
825 407
826 408 return ret;
827 return -ENOENT;
828} 409}
829 410
830#endif /* 0 */ 411int ocfs2_get_clusters(struct inode *inode, u32 v_cluster,
831 412 u32 *p_cluster, u32 *num_clusters,
832int ocfs2_extent_map_get_blocks(struct inode *inode, 413 unsigned int *extent_flags)
833 u64 v_blkno, int count,
834 u64 *p_blkno, int *ret_count)
835{ 414{
836 int ret; 415 int ret, i;
837 u64 boff; 416 unsigned int flags = 0;
838 u32 cpos, clusters; 417 struct buffer_head *di_bh = NULL;
839 int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); 418 struct buffer_head *eb_bh = NULL;
840 struct ocfs2_extent_map_entry *ent = NULL; 419 struct ocfs2_dinode *di;
841 struct ocfs2_extent_map *em = &OCFS2_I(inode)->ip_map; 420 struct ocfs2_extent_block *eb;
421 struct ocfs2_extent_list *el;
842 struct ocfs2_extent_rec *rec; 422 struct ocfs2_extent_rec *rec;
423 u32 coff;
843 424
844 *p_blkno = 0; 425 ret = ocfs2_extent_map_lookup(inode, v_cluster, p_cluster,
845 426 num_clusters, extent_flags);
846 cpos = ocfs2_blocks_to_clusters(inode->i_sb, v_blkno); 427 if (ret == 0)
847 clusters = ocfs2_blocks_to_clusters(inode->i_sb, 428 goto out;
848 (u64)count + bpc - 1);
849 if ((cpos + clusters) > OCFS2_I(inode)->ip_clusters) {
850 ret = -EINVAL;
851 mlog_errno(ret);
852 return ret;
853 }
854
855 if ((cpos + clusters) > em->em_clusters) {
856 /*
857 * Size changed underneath us on disk. Drop any
858 * straddling records and update our idea of
859 * i_clusters
860 */
861 ocfs2_extent_map_drop(inode, em->em_clusters - 1);
862 em->em_clusters = OCFS2_I(inode)->ip_clusters;
863 }
864 429
865 ret = ocfs2_extent_map_lookup_read(inode, cpos, clusters, &ent); 430 ret = ocfs2_read_block(OCFS2_SB(inode->i_sb), OCFS2_I(inode)->ip_blkno,
431 &di_bh, OCFS2_BH_CACHED, inode);
866 if (ret) { 432 if (ret) {
867 mlog_errno(ret); 433 mlog_errno(ret);
868 return ret; 434 goto out;
869 } 435 }
870 436
871 if (ent) 437 di = (struct ocfs2_dinode *) di_bh->b_data;
872 { 438 el = &di->id2.i_list;
873 rec = &ent->e_rec;
874 439
875 /* We should never find ourselves straddling an interval */ 440 if (el->l_tree_depth) {
876 if (!ocfs2_extent_rec_contains_clusters(rec, cpos, clusters)) { 441 ret = ocfs2_find_leaf(inode, el, v_cluster, &eb_bh);
877 ret = -ESRCH; 442 if (ret) {
878 mlog_errno(ret); 443 mlog_errno(ret);
879 return ret; 444 goto out;
880 } 445 }
881 446
882 boff = ocfs2_clusters_to_blocks(inode->i_sb, cpos - 447 eb = (struct ocfs2_extent_block *) eb_bh->b_data;
883 le32_to_cpu(rec->e_cpos)); 448 el = &eb->h_list;
884 boff += (v_blkno & (u64)(bpc - 1));
885 *p_blkno = le64_to_cpu(rec->e_blkno) + boff;
886 449
887 if (ret_count) { 450 if (el->l_tree_depth) {
888 *ret_count = ocfs2_clusters_to_blocks(inode->i_sb, 451 ocfs2_error(inode->i_sb,
889 le32_to_cpu(rec->e_clusters)) - boff; 452 "Inode %lu has non zero tree depth in "
453 "leaf block %llu\n", inode->i_ino,
454 (unsigned long long)eb_bh->b_blocknr);
455 ret = -EROFS;
456 goto out;
890 } 457 }
891
892 return 0;
893 } 458 }
894 459
895 return -ENOENT; 460 i = ocfs2_search_extent_list(el, v_cluster);
896} 461 if (i == -1) {
897 462 /*
898int ocfs2_extent_map_init(struct inode *inode) 463 * A hole was found. Return some canned values that
899{ 464 * callers can key on. If asked for, num_clusters will
900 struct ocfs2_extent_map *em = &OCFS2_I(inode)->ip_map; 465 * be populated with the size of the hole.
901 466 */
902 em->em_extents = RB_ROOT; 467 *p_cluster = 0;
903 em->em_clusters = 0; 468 if (num_clusters) {
904 469 ret = ocfs2_figure_hole_clusters(inode, el, eb_bh,
905 return 0; 470 v_cluster,
906} 471 num_clusters);
907 472 if (ret) {
908/* Needs the lock */ 473 mlog_errno(ret);
909static void __ocfs2_extent_map_drop(struct inode *inode, 474 goto out;
910 u32 new_clusters, 475 }
911 struct rb_node **free_head, 476 }
912 struct ocfs2_extent_map_entry **tail_ent) 477 } else {
913{ 478 rec = &el->l_recs[i];
914 struct rb_node *node, *next;
915 struct ocfs2_extent_map *em = &OCFS2_I(inode)->ip_map;
916 struct ocfs2_extent_map_entry *ent;
917 479
918 *free_head = NULL; 480 BUG_ON(v_cluster < le32_to_cpu(rec->e_cpos));
919 481
920 ent = NULL; 482 if (!rec->e_blkno) {
921 node = rb_last(&em->em_extents); 483 ocfs2_error(inode->i_sb, "Inode %lu has bad extent "
922 while (node) 484 "record (%u, %u, 0)", inode->i_ino,
923 { 485 le32_to_cpu(rec->e_cpos),
924 next = rb_prev(node); 486 ocfs2_rec_clusters(el, rec));
487 ret = -EROFS;
488 goto out;
489 }
925 490
926 ent = rb_entry(node, struct ocfs2_extent_map_entry, 491 coff = v_cluster - le32_to_cpu(rec->e_cpos);
927 e_node);
928 if (le32_to_cpu(ent->e_rec.e_cpos) < new_clusters)
929 break;
930 492
931 rb_erase(&ent->e_node, &em->em_extents); 493 *p_cluster = ocfs2_blocks_to_clusters(inode->i_sb,
494 le64_to_cpu(rec->e_blkno));
495 *p_cluster = *p_cluster + coff;
932 496
933 node->rb_right = *free_head; 497 if (num_clusters)
934 *free_head = node; 498 *num_clusters = ocfs2_rec_clusters(el, rec) - coff;
935 499
936 ent = NULL; 500 flags = rec->e_flags;
937 node = next;
938 }
939 501
940 /* Do we have an entry straddling new_clusters? */ 502 ocfs2_extent_map_insert_rec(inode, rec);
941 if (tail_ent) {
942 if (ent &&
943 ((le32_to_cpu(ent->e_rec.e_cpos) +
944 le32_to_cpu(ent->e_rec.e_clusters)) > new_clusters))
945 *tail_ent = ent;
946 else
947 *tail_ent = NULL;
948 } 503 }
949}
950
951static void __ocfs2_extent_map_drop_cleanup(struct rb_node *free_head)
952{
953 struct rb_node *node;
954 struct ocfs2_extent_map_entry *ent;
955 504
956 while (free_head) { 505 if (extent_flags)
957 node = free_head; 506 *extent_flags = flags;
958 free_head = node->rb_right;
959 507
960 ent = rb_entry(node, struct ocfs2_extent_map_entry, 508out:
961 e_node); 509 brelse(di_bh);
962 kmem_cache_free(ocfs2_em_ent_cachep, ent); 510 brelse(eb_bh);
963 } 511 return ret;
964} 512}
965 513
966/* 514/*
967 * Remove all entries past new_clusters, inclusive of an entry that 515 * This expects alloc_sem to be held. The allocation cannot change at
968 * contains new_clusters. This is effectively a cache forget. 516 * all while the map is in the process of being updated.
969 *
970 * If you want to also clip the last extent by some number of clusters,
971 * you need to call ocfs2_extent_map_trunc().
972 * This code does not check or modify ip_clusters.
973 */ 517 */
974int ocfs2_extent_map_drop(struct inode *inode, u32 new_clusters) 518int ocfs2_extent_map_get_blocks(struct inode *inode, u64 v_blkno, u64 *p_blkno,
519 u64 *ret_count, unsigned int *extent_flags)
975{ 520{
976 struct rb_node *free_head = NULL; 521 int ret;
977 struct ocfs2_extent_map *em = &OCFS2_I(inode)->ip_map; 522 int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
978 struct ocfs2_extent_map_entry *ent; 523 u32 cpos, num_clusters, p_cluster;
979 524 u64 boff = 0;
980 spin_lock(&OCFS2_I(inode)->ip_lock);
981 525
982 __ocfs2_extent_map_drop(inode, new_clusters, &free_head, &ent); 526 cpos = ocfs2_blocks_to_clusters(inode->i_sb, v_blkno);
983 527
984 if (ent) { 528 ret = ocfs2_get_clusters(inode, cpos, &p_cluster, &num_clusters,
985 rb_erase(&ent->e_node, &em->em_extents); 529 extent_flags);
986 ent->e_node.rb_right = free_head; 530 if (ret) {
987 free_head = &ent->e_node; 531 mlog_errno(ret);
532 goto out;
988 } 533 }
989 534
990 spin_unlock(&OCFS2_I(inode)->ip_lock); 535 /*
991 536 * p_cluster == 0 indicates a hole.
992 if (free_head) 537 */
993 __ocfs2_extent_map_drop_cleanup(free_head); 538 if (p_cluster) {
994 539 boff = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
995 return 0; 540 boff += (v_blkno & (u64)(bpc - 1));
996} 541 }
997
998/*
999 * Remove all entries past new_clusters and also clip any extent
1000 * straddling new_clusters, if there is one. This does not check
1001 * or modify ip_clusters
1002 */
1003int ocfs2_extent_map_trunc(struct inode *inode, u32 new_clusters)
1004{
1005 struct rb_node *free_head = NULL;
1006 struct ocfs2_extent_map_entry *ent = NULL;
1007
1008 spin_lock(&OCFS2_I(inode)->ip_lock);
1009
1010 __ocfs2_extent_map_drop(inode, new_clusters, &free_head, &ent);
1011
1012 if (ent)
1013 ent->e_rec.e_clusters = cpu_to_le32(new_clusters -
1014 le32_to_cpu(ent->e_rec.e_cpos));
1015
1016 OCFS2_I(inode)->ip_map.em_clusters = new_clusters;
1017
1018 spin_unlock(&OCFS2_I(inode)->ip_lock);
1019
1020 if (free_head)
1021 __ocfs2_extent_map_drop_cleanup(free_head);
1022
1023 return 0;
1024}
1025 542
1026int __init init_ocfs2_extent_maps(void) 543 *p_blkno = boff;
1027{
1028 ocfs2_em_ent_cachep =
1029 kmem_cache_create("ocfs2_em_ent",
1030 sizeof(struct ocfs2_extent_map_entry),
1031 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
1032 if (!ocfs2_em_ent_cachep)
1033 return -ENOMEM;
1034 544
1035 return 0; 545 if (ret_count) {
1036} 546 *ret_count = ocfs2_clusters_to_blocks(inode->i_sb, num_clusters);
547 *ret_count -= v_blkno & (u64)(bpc - 1);
548 }
1037 549
1038void exit_ocfs2_extent_maps(void) 550out:
1039{ 551 return ret;
1040 kmem_cache_destroy(ocfs2_em_ent_cachep);
1041} 552}
diff --git a/fs/ocfs2/extent_map.h b/fs/ocfs2/extent_map.h
index fa3745efa886..de91e3e41a22 100644
--- a/fs/ocfs2/extent_map.h
+++ b/fs/ocfs2/extent_map.h
@@ -25,22 +25,29 @@
25#ifndef _EXTENT_MAP_H 25#ifndef _EXTENT_MAP_H
26#define _EXTENT_MAP_H 26#define _EXTENT_MAP_H
27 27
28int init_ocfs2_extent_maps(void); 28struct ocfs2_extent_map_item {
29void exit_ocfs2_extent_maps(void); 29 unsigned int ei_cpos;
30 unsigned int ei_phys;
31 unsigned int ei_clusters;
32 unsigned int ei_flags;
30 33
31/* 34 struct list_head ei_list;
32 * EVERY CALL here except _init, _trunc, and _drop expects alloc_sem 35};
33 * to be held. The allocation cannot change at all while the map is 36
34 * in the process of being updated. 37#define OCFS2_MAX_EXTENT_MAP_ITEMS 3
35 */ 38struct ocfs2_extent_map {
36int ocfs2_extent_map_init(struct inode *inode); 39 unsigned int em_num_items;
37int ocfs2_extent_map_append(struct inode *inode, 40 struct list_head em_list;
38 struct ocfs2_extent_rec *rec, 41};
39 u32 new_clusters); 42
40int ocfs2_extent_map_get_blocks(struct inode *inode, 43void ocfs2_extent_map_init(struct inode *inode);
41 u64 v_blkno, int count, 44void ocfs2_extent_map_trunc(struct inode *inode, unsigned int cluster);
42 u64 *p_blkno, int *ret_count); 45void ocfs2_extent_map_insert_rec(struct inode *inode,
43int ocfs2_extent_map_drop(struct inode *inode, u32 new_clusters); 46 struct ocfs2_extent_rec *rec);
44int ocfs2_extent_map_trunc(struct inode *inode, u32 new_clusters); 47
48int ocfs2_get_clusters(struct inode *inode, u32 v_cluster, u32 *p_cluster,
49 u32 *num_clusters, unsigned int *extent_flags);
50int ocfs2_extent_map_get_blocks(struct inode *inode, u64 v_blkno, u64 *p_blkno,
51 u64 *ret_count, unsigned int *extent_flags);
45 52
46#endif /* _EXTENT_MAP_H */ 53#endif /* _EXTENT_MAP_H */
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index f2cd3bf9efb2..520a2a6d7670 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -33,6 +33,7 @@
33#include <linux/sched.h> 33#include <linux/sched.h>
34#include <linux/pipe_fs_i.h> 34#include <linux/pipe_fs_i.h>
35#include <linux/mount.h> 35#include <linux/mount.h>
36#include <linux/writeback.h>
36 37
37#define MLOG_MASK_PREFIX ML_INODE 38#define MLOG_MASK_PREFIX ML_INODE
38#include <cluster/masklog.h> 39#include <cluster/masklog.h>
@@ -215,7 +216,7 @@ int ocfs2_set_inode_size(handle_t *handle,
215 216
216 mlog_entry_void(); 217 mlog_entry_void();
217 i_size_write(inode, new_i_size); 218 i_size_write(inode, new_i_size);
218 inode->i_blocks = ocfs2_align_bytes_to_sectors(new_i_size); 219 inode->i_blocks = ocfs2_inode_sector_count(inode);
219 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 220 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
220 221
221 status = ocfs2_mark_inode_dirty(handle, inode, fe_bh); 222 status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
@@ -261,6 +262,7 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
261{ 262{
262 int status; 263 int status;
263 handle_t *handle; 264 handle_t *handle;
265 struct ocfs2_dinode *di;
264 266
265 mlog_entry_void(); 267 mlog_entry_void();
266 268
@@ -274,12 +276,39 @@ static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
274 goto out; 276 goto out;
275 } 277 }
276 278
277 status = ocfs2_set_inode_size(handle, inode, fe_bh, new_i_size); 279 status = ocfs2_journal_access(handle, inode, fe_bh,
280 OCFS2_JOURNAL_ACCESS_WRITE);
281 if (status < 0) {
282 mlog_errno(status);
283 goto out_commit;
284 }
285
286 /*
287 * Do this before setting i_size.
288 */
289 status = ocfs2_zero_tail_for_truncate(inode, handle, new_i_size);
290 if (status) {
291 mlog_errno(status);
292 goto out_commit;
293 }
294
295 i_size_write(inode, new_i_size);
296 inode->i_blocks = ocfs2_align_bytes_to_sectors(new_i_size);
297 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
298
299 di = (struct ocfs2_dinode *) fe_bh->b_data;
300 di->i_size = cpu_to_le64(new_i_size);
301 di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
302 di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
303
304 status = ocfs2_journal_dirty(handle, fe_bh);
278 if (status < 0) 305 if (status < 0)
279 mlog_errno(status); 306 mlog_errno(status);
280 307
308out_commit:
281 ocfs2_commit_trans(osb, handle); 309 ocfs2_commit_trans(osb, handle);
282out: 310out:
311
283 mlog_exit(status); 312 mlog_exit(status);
284 return status; 313 return status;
285} 314}
@@ -342,19 +371,6 @@ static int ocfs2_truncate_file(struct inode *inode,
342 mlog_errno(status); 371 mlog_errno(status);
343 goto bail; 372 goto bail;
344 } 373 }
345 ocfs2_data_unlock(inode, 1);
346
347 if (le32_to_cpu(fe->i_clusters) ==
348 ocfs2_clusters_for_bytes(osb->sb, new_i_size)) {
349 mlog(0, "fe->i_clusters = %u, so we do a simple truncate\n",
350 fe->i_clusters);
351 /* No allocation change is required, so lets fast path
352 * this truncate. */
353 status = ocfs2_simple_size_update(inode, di_bh, new_i_size);
354 if (status < 0)
355 mlog_errno(status);
356 goto bail;
357 }
358 374
359 /* alright, we're going to need to do a full blown alloc size 375 /* alright, we're going to need to do a full blown alloc size
360 * change. Orphan the inode so that recovery can complete the 376 * change. Orphan the inode so that recovery can complete the
@@ -363,22 +379,25 @@ static int ocfs2_truncate_file(struct inode *inode,
363 status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size); 379 status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
364 if (status < 0) { 380 if (status < 0) {
365 mlog_errno(status); 381 mlog_errno(status);
366 goto bail; 382 goto bail_unlock_data;
367 } 383 }
368 384
369 status = ocfs2_prepare_truncate(osb, inode, di_bh, &tc); 385 status = ocfs2_prepare_truncate(osb, inode, di_bh, &tc);
370 if (status < 0) { 386 if (status < 0) {
371 mlog_errno(status); 387 mlog_errno(status);
372 goto bail; 388 goto bail_unlock_data;
373 } 389 }
374 390
375 status = ocfs2_commit_truncate(osb, inode, di_bh, tc); 391 status = ocfs2_commit_truncate(osb, inode, di_bh, tc);
376 if (status < 0) { 392 if (status < 0) {
377 mlog_errno(status); 393 mlog_errno(status);
378 goto bail; 394 goto bail_unlock_data;
379 } 395 }
380 396
381 /* TODO: orphan dir cleanup here. */ 397 /* TODO: orphan dir cleanup here. */
398bail_unlock_data:
399 ocfs2_data_unlock(inode, 1);
400
382bail: 401bail:
383 402
384 mlog_exit(status); 403 mlog_exit(status);
@@ -397,6 +416,7 @@ bail:
397 */ 416 */
398int ocfs2_do_extend_allocation(struct ocfs2_super *osb, 417int ocfs2_do_extend_allocation(struct ocfs2_super *osb,
399 struct inode *inode, 418 struct inode *inode,
419 u32 *logical_offset,
400 u32 clusters_to_add, 420 u32 clusters_to_add,
401 struct buffer_head *fe_bh, 421 struct buffer_head *fe_bh,
402 handle_t *handle, 422 handle_t *handle,
@@ -460,18 +480,14 @@ int ocfs2_do_extend_allocation(struct ocfs2_super *osb,
460 block = ocfs2_clusters_to_blocks(osb->sb, bit_off); 480 block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
461 mlog(0, "Allocating %u clusters at block %u for inode %llu\n", 481 mlog(0, "Allocating %u clusters at block %u for inode %llu\n",
462 num_bits, bit_off, (unsigned long long)OCFS2_I(inode)->ip_blkno); 482 num_bits, bit_off, (unsigned long long)OCFS2_I(inode)->ip_blkno);
463 status = ocfs2_insert_extent(osb, handle, inode, fe_bh, block, 483 status = ocfs2_insert_extent(osb, handle, inode, fe_bh,
464 num_bits, meta_ac); 484 *logical_offset, block, num_bits,
485 meta_ac);
465 if (status < 0) { 486 if (status < 0) {
466 mlog_errno(status); 487 mlog_errno(status);
467 goto leave; 488 goto leave;
468 } 489 }
469 490
470 le32_add_cpu(&fe->i_clusters, num_bits);
471 spin_lock(&OCFS2_I(inode)->ip_lock);
472 OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
473 spin_unlock(&OCFS2_I(inode)->ip_lock);
474
475 status = ocfs2_journal_dirty(handle, fe_bh); 491 status = ocfs2_journal_dirty(handle, fe_bh);
476 if (status < 0) { 492 if (status < 0) {
477 mlog_errno(status); 493 mlog_errno(status);
@@ -479,6 +495,7 @@ int ocfs2_do_extend_allocation(struct ocfs2_super *osb,
479 } 495 }
480 496
481 clusters_to_add -= num_bits; 497 clusters_to_add -= num_bits;
498 *logical_offset += num_bits;
482 499
483 if (clusters_to_add) { 500 if (clusters_to_add) {
484 mlog(0, "need to alloc once more, clusters = %u, wanted = " 501 mlog(0, "need to alloc once more, clusters = %u, wanted = "
@@ -494,14 +511,87 @@ leave:
494 return status; 511 return status;
495} 512}
496 513
514/*
515 * For a given allocation, determine which allocators will need to be
516 * accessed, and lock them, reserving the appropriate number of bits.
517 *
518 * Called from ocfs2_extend_allocation() for file systems which don't
519 * support holes, and from ocfs2_write() for file systems which
520 * understand sparse inodes.
521 */
522int ocfs2_lock_allocators(struct inode *inode, struct ocfs2_dinode *di,
523 u32 clusters_to_add,
524 struct ocfs2_alloc_context **data_ac,
525 struct ocfs2_alloc_context **meta_ac)
526{
527 int ret, num_free_extents;
528 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
529
530 *meta_ac = NULL;
531 *data_ac = NULL;
532
533 mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u, "
534 "clusters_to_add = %u\n",
535 (unsigned long long)OCFS2_I(inode)->ip_blkno, i_size_read(inode),
536 le32_to_cpu(di->i_clusters), clusters_to_add);
537
538 num_free_extents = ocfs2_num_free_extents(osb, inode, di);
539 if (num_free_extents < 0) {
540 ret = num_free_extents;
541 mlog_errno(ret);
542 goto out;
543 }
544
545 /*
546 * Sparse allocation file systems need to be more conservative
547 * with reserving room for expansion - the actual allocation
548 * happens while we've got a journal handle open so re-taking
549 * a cluster lock (because we ran out of room for another
550 * extent) will violate ordering rules.
551 *
552 * Most of the time we'll only be seeing this 1 cluster at a time
553 * anyway.
554 */
555 if (!num_free_extents ||
556 (ocfs2_sparse_alloc(osb) && num_free_extents < clusters_to_add)) {
557 ret = ocfs2_reserve_new_metadata(osb, di, meta_ac);
558 if (ret < 0) {
559 if (ret != -ENOSPC)
560 mlog_errno(ret);
561 goto out;
562 }
563 }
564
565 ret = ocfs2_reserve_clusters(osb, clusters_to_add, data_ac);
566 if (ret < 0) {
567 if (ret != -ENOSPC)
568 mlog_errno(ret);
569 goto out;
570 }
571
572out:
573 if (ret) {
574 if (*meta_ac) {
575 ocfs2_free_alloc_context(*meta_ac);
576 *meta_ac = NULL;
577 }
578
579 /*
580 * We cannot have an error and a non null *data_ac.
581 */
582 }
583
584 return ret;
585}
586
497static int ocfs2_extend_allocation(struct inode *inode, 587static int ocfs2_extend_allocation(struct inode *inode,
498 u32 clusters_to_add) 588 u32 clusters_to_add)
499{ 589{
500 int status = 0; 590 int status = 0;
501 int restart_func = 0; 591 int restart_func = 0;
502 int drop_alloc_sem = 0; 592 int drop_alloc_sem = 0;
503 int credits, num_free_extents; 593 int credits;
504 u32 prev_clusters; 594 u32 prev_clusters, logical_start;
505 struct buffer_head *bh = NULL; 595 struct buffer_head *bh = NULL;
506 struct ocfs2_dinode *fe = NULL; 596 struct ocfs2_dinode *fe = NULL;
507 handle_t *handle = NULL; 597 handle_t *handle = NULL;
@@ -512,6 +602,12 @@ static int ocfs2_extend_allocation(struct inode *inode,
512 602
513 mlog_entry("(clusters_to_add = %u)\n", clusters_to_add); 603 mlog_entry("(clusters_to_add = %u)\n", clusters_to_add);
514 604
605 /*
606 * This function only exists for file systems which don't
607 * support holes.
608 */
609 BUG_ON(ocfs2_sparse_alloc(osb));
610
515 status = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno, &bh, 611 status = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno, &bh,
516 OCFS2_BH_CACHED, inode); 612 OCFS2_BH_CACHED, inode);
517 if (status < 0) { 613 if (status < 0) {
@@ -526,39 +622,11 @@ static int ocfs2_extend_allocation(struct inode *inode,
526 goto leave; 622 goto leave;
527 } 623 }
528 624
625 logical_start = OCFS2_I(inode)->ip_clusters;
626
529restart_all: 627restart_all:
530 BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters); 628 BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
531 629
532 mlog(0, "extend inode %llu, i_size = %lld, fe->i_clusters = %u, "
533 "clusters_to_add = %u\n",
534 (unsigned long long)OCFS2_I(inode)->ip_blkno, i_size_read(inode),
535 fe->i_clusters, clusters_to_add);
536
537 num_free_extents = ocfs2_num_free_extents(osb,
538 inode,
539 fe);
540 if (num_free_extents < 0) {
541 status = num_free_extents;
542 mlog_errno(status);
543 goto leave;
544 }
545
546 if (!num_free_extents) {
547 status = ocfs2_reserve_new_metadata(osb, fe, &meta_ac);
548 if (status < 0) {
549 if (status != -ENOSPC)
550 mlog_errno(status);
551 goto leave;
552 }
553 }
554
555 status = ocfs2_reserve_clusters(osb, clusters_to_add, &data_ac);
556 if (status < 0) {
557 if (status != -ENOSPC)
558 mlog_errno(status);
559 goto leave;
560 }
561
562 /* blocks peope in read/write from reading our allocation 630 /* blocks peope in read/write from reading our allocation
563 * until we're done changing it. We depend on i_mutex to block 631 * until we're done changing it. We depend on i_mutex to block
564 * other extend/truncate calls while we're here. Ordering wrt 632 * other extend/truncate calls while we're here. Ordering wrt
@@ -566,6 +634,13 @@ restart_all:
566 down_write(&OCFS2_I(inode)->ip_alloc_sem); 634 down_write(&OCFS2_I(inode)->ip_alloc_sem);
567 drop_alloc_sem = 1; 635 drop_alloc_sem = 1;
568 636
637 status = ocfs2_lock_allocators(inode, fe, clusters_to_add, &data_ac,
638 &meta_ac);
639 if (status) {
640 mlog_errno(status);
641 goto leave;
642 }
643
569 credits = ocfs2_calc_extend_credits(osb->sb, fe, clusters_to_add); 644 credits = ocfs2_calc_extend_credits(osb->sb, fe, clusters_to_add);
570 handle = ocfs2_start_trans(osb, credits); 645 handle = ocfs2_start_trans(osb, credits);
571 if (IS_ERR(handle)) { 646 if (IS_ERR(handle)) {
@@ -590,6 +665,7 @@ restarted_transaction:
590 665
591 status = ocfs2_do_extend_allocation(osb, 666 status = ocfs2_do_extend_allocation(osb,
592 inode, 667 inode,
668 &logical_start,
593 clusters_to_add, 669 clusters_to_add,
594 bh, 670 bh,
595 handle, 671 handle,
@@ -778,7 +854,7 @@ static int ocfs2_extend_file(struct inode *inode,
778 size_t tail_to_skip) 854 size_t tail_to_skip)
779{ 855{
780 int ret = 0; 856 int ret = 0;
781 u32 clusters_to_add; 857 u32 clusters_to_add = 0;
782 858
783 BUG_ON(!tail_to_skip && !di_bh); 859 BUG_ON(!tail_to_skip && !di_bh);
784 860
@@ -790,6 +866,11 @@ static int ocfs2_extend_file(struct inode *inode,
790 goto out; 866 goto out;
791 BUG_ON(new_i_size < i_size_read(inode)); 867 BUG_ON(new_i_size < i_size_read(inode));
792 868
869 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) {
870 BUG_ON(tail_to_skip != 0);
871 goto out_update_size;
872 }
873
793 clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size) - 874 clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size) -
794 OCFS2_I(inode)->ip_clusters; 875 OCFS2_I(inode)->ip_clusters;
795 876
@@ -825,6 +906,7 @@ static int ocfs2_extend_file(struct inode *inode,
825 goto out_unlock; 906 goto out_unlock;
826 } 907 }
827 908
909out_update_size:
828 if (!tail_to_skip) { 910 if (!tail_to_skip) {
829 /* We're being called from ocfs2_setattr() which wants 911 /* We're being called from ocfs2_setattr() which wants
830 * us to update i_size */ 912 * us to update i_size */
@@ -834,7 +916,8 @@ static int ocfs2_extend_file(struct inode *inode,
834 } 916 }
835 917
836out_unlock: 918out_unlock:
837 ocfs2_data_unlock(inode, 1); 919 if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
920 ocfs2_data_unlock(inode, 1);
838 921
839out: 922out:
840 return ret; 923 return ret;
@@ -972,7 +1055,8 @@ int ocfs2_permission(struct inode *inode, int mask, struct nameidata *nd)
972 1055
973 ret = ocfs2_meta_lock(inode, NULL, 0); 1056 ret = ocfs2_meta_lock(inode, NULL, 0);
974 if (ret) { 1057 if (ret) {
975 mlog_errno(ret); 1058 if (ret != -ENOENT)
1059 mlog_errno(ret);
976 goto out; 1060 goto out;
977 } 1061 }
978 1062
@@ -1035,10 +1119,49 @@ out:
1035 return ret; 1119 return ret;
1036} 1120}
1037 1121
1122/*
1123 * Will look for holes and unwritten extents in the range starting at
1124 * pos for count bytes (inclusive).
1125 */
1126static int ocfs2_check_range_for_holes(struct inode *inode, loff_t pos,
1127 size_t count)
1128{
1129 int ret = 0;
1130 unsigned int extent_flags;
1131 u32 cpos, clusters, extent_len, phys_cpos;
1132 struct super_block *sb = inode->i_sb;
1133
1134 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
1135 clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
1136
1137 while (clusters) {
1138 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
1139 &extent_flags);
1140 if (ret < 0) {
1141 mlog_errno(ret);
1142 goto out;
1143 }
1144
1145 if (phys_cpos == 0 || (extent_flags & OCFS2_EXT_UNWRITTEN)) {
1146 ret = 1;
1147 break;
1148 }
1149
1150 if (extent_len > clusters)
1151 extent_len = clusters;
1152
1153 clusters -= extent_len;
1154 cpos += extent_len;
1155 }
1156out:
1157 return ret;
1158}
1159
1038static int ocfs2_prepare_inode_for_write(struct dentry *dentry, 1160static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
1039 loff_t *ppos, 1161 loff_t *ppos,
1040 size_t count, 1162 size_t count,
1041 int appending) 1163 int appending,
1164 int *direct_io)
1042{ 1165{
1043 int ret = 0, meta_level = appending; 1166 int ret = 0, meta_level = appending;
1044 struct inode *inode = dentry->d_inode; 1167 struct inode *inode = dentry->d_inode;
@@ -1089,6 +1212,49 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
1089 } else { 1212 } else {
1090 saved_pos = *ppos; 1213 saved_pos = *ppos;
1091 } 1214 }
1215
1216 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) {
1217 loff_t end = saved_pos + count;
1218
1219 /*
1220 * Skip the O_DIRECT checks if we don't need
1221 * them.
1222 */
1223 if (!direct_io || !(*direct_io))
1224 break;
1225
1226 /*
1227 * Allowing concurrent direct writes means
1228 * i_size changes wouldn't be synchronized, so
1229 * one node could wind up truncating another
1230 * nodes writes.
1231 */
1232 if (end > i_size_read(inode)) {
1233 *direct_io = 0;
1234 break;
1235 }
1236
1237 /*
1238 * We don't fill holes during direct io, so
1239 * check for them here. If any are found, the
1240 * caller will have to retake some cluster
1241 * locks and initiate the io as buffered.
1242 */
1243 ret = ocfs2_check_range_for_holes(inode, saved_pos,
1244 count);
1245 if (ret == 1) {
1246 *direct_io = 0;
1247 ret = 0;
1248 } else if (ret < 0)
1249 mlog_errno(ret);
1250 break;
1251 }
1252
1253 /*
1254 * The rest of this loop is concerned with legacy file
1255 * systems which don't support sparse files.
1256 */
1257
1092 newsize = count + saved_pos; 1258 newsize = count + saved_pos;
1093 1259
1094 mlog(0, "pos=%lld newsize=%lld cursize=%lld\n", 1260 mlog(0, "pos=%lld newsize=%lld cursize=%lld\n",
@@ -1141,55 +1307,264 @@ out:
1141 return ret; 1307 return ret;
1142} 1308}
1143 1309
1310static inline void
1311ocfs2_set_next_iovec(const struct iovec **iovp, size_t *basep, size_t bytes)
1312{
1313 const struct iovec *iov = *iovp;
1314 size_t base = *basep;
1315
1316 do {
1317 int copy = min(bytes, iov->iov_len - base);
1318
1319 bytes -= copy;
1320 base += copy;
1321 if (iov->iov_len == base) {
1322 iov++;
1323 base = 0;
1324 }
1325 } while (bytes);
1326 *iovp = iov;
1327 *basep = base;
1328}
1329
1330static struct page * ocfs2_get_write_source(struct ocfs2_buffered_write_priv *bp,
1331 const struct iovec *cur_iov,
1332 size_t iov_offset)
1333{
1334 int ret;
1335 char *buf;
1336 struct page *src_page = NULL;
1337
1338 buf = cur_iov->iov_base + iov_offset;
1339
1340 if (!segment_eq(get_fs(), KERNEL_DS)) {
1341 /*
1342 * Pull in the user page. We want to do this outside
1343 * of the meta data locks in order to preserve locking
1344 * order in case of page fault.
1345 */
1346 ret = get_user_pages(current, current->mm,
1347 (unsigned long)buf & PAGE_CACHE_MASK, 1,
1348 0, 0, &src_page, NULL);
1349 if (ret == 1)
1350 bp->b_src_buf = kmap(src_page);
1351 else
1352 src_page = ERR_PTR(-EFAULT);
1353 } else {
1354 bp->b_src_buf = buf;
1355 }
1356
1357 return src_page;
1358}
1359
1360static void ocfs2_put_write_source(struct ocfs2_buffered_write_priv *bp,
1361 struct page *page)
1362{
1363 if (page) {
1364 kunmap(page);
1365 page_cache_release(page);
1366 }
1367}
1368
1369static ssize_t ocfs2_file_buffered_write(struct file *file, loff_t *ppos,
1370 const struct iovec *iov,
1371 unsigned long nr_segs,
1372 size_t count,
1373 ssize_t o_direct_written)
1374{
1375 int ret = 0;
1376 ssize_t copied, total = 0;
1377 size_t iov_offset = 0;
1378 const struct iovec *cur_iov = iov;
1379 struct ocfs2_buffered_write_priv bp;
1380 struct page *page;
1381
1382 /*
1383 * handle partial DIO write. Adjust cur_iov if needed.
1384 */
1385 ocfs2_set_next_iovec(&cur_iov, &iov_offset, o_direct_written);
1386
1387 do {
1388 bp.b_cur_off = iov_offset;
1389 bp.b_cur_iov = cur_iov;
1390
1391 page = ocfs2_get_write_source(&bp, cur_iov, iov_offset);
1392 if (IS_ERR(page)) {
1393 ret = PTR_ERR(page);
1394 goto out;
1395 }
1396
1397 copied = ocfs2_buffered_write_cluster(file, *ppos, count,
1398 ocfs2_map_and_write_user_data,
1399 &bp);
1400
1401 ocfs2_put_write_source(&bp, page);
1402
1403 if (copied < 0) {
1404 mlog_errno(copied);
1405 ret = copied;
1406 goto out;
1407 }
1408
1409 total += copied;
1410 *ppos = *ppos + copied;
1411 count -= copied;
1412
1413 ocfs2_set_next_iovec(&cur_iov, &iov_offset, copied);
1414 } while(count);
1415
1416out:
1417 return total ? total : ret;
1418}
1419
1420static int ocfs2_check_iovec(const struct iovec *iov, size_t *counted,
1421 unsigned long *nr_segs)
1422{
1423 size_t ocount; /* original count */
1424 unsigned long seg;
1425
1426 ocount = 0;
1427 for (seg = 0; seg < *nr_segs; seg++) {
1428 const struct iovec *iv = &iov[seg];
1429
1430 /*
1431 * If any segment has a negative length, or the cumulative
1432 * length ever wraps negative then return -EINVAL.
1433 */
1434 ocount += iv->iov_len;
1435 if (unlikely((ssize_t)(ocount|iv->iov_len) < 0))
1436 return -EINVAL;
1437 if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
1438 continue;
1439 if (seg == 0)
1440 return -EFAULT;
1441 *nr_segs = seg;
1442 ocount -= iv->iov_len; /* This segment is no good */
1443 break;
1444 }
1445
1446 *counted = ocount;
1447 return 0;
1448}
1449
1144static ssize_t ocfs2_file_aio_write(struct kiocb *iocb, 1450static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
1145 const struct iovec *iov, 1451 const struct iovec *iov,
1146 unsigned long nr_segs, 1452 unsigned long nr_segs,
1147 loff_t pos) 1453 loff_t pos)
1148{ 1454{
1149 int ret, rw_level, have_alloc_sem = 0; 1455 int ret, direct_io, appending, rw_level, have_alloc_sem = 0;
1150 struct file *filp = iocb->ki_filp; 1456 int can_do_direct, sync = 0;
1151 struct inode *inode = filp->f_path.dentry->d_inode; 1457 ssize_t written = 0;
1152 int appending = filp->f_flags & O_APPEND ? 1 : 0; 1458 size_t ocount; /* original count */
1153 1459 size_t count; /* after file limit checks */
1154 mlog_entry("(0x%p, %u, '%.*s')\n", filp, 1460 loff_t *ppos = &iocb->ki_pos;
1461 struct file *file = iocb->ki_filp;
1462 struct inode *inode = file->f_path.dentry->d_inode;
1463
1464 mlog_entry("(0x%p, %u, '%.*s')\n", file,
1155 (unsigned int)nr_segs, 1465 (unsigned int)nr_segs,
1156 filp->f_path.dentry->d_name.len, 1466 file->f_path.dentry->d_name.len,
1157 filp->f_path.dentry->d_name.name); 1467 file->f_path.dentry->d_name.name);
1158 1468
1159 /* happy write of zero bytes */
1160 if (iocb->ki_left == 0) 1469 if (iocb->ki_left == 0)
1161 return 0; 1470 return 0;
1162 1471
1472 ret = ocfs2_check_iovec(iov, &ocount, &nr_segs);
1473 if (ret)
1474 return ret;
1475
1476 count = ocount;
1477
1478 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
1479
1480 appending = file->f_flags & O_APPEND ? 1 : 0;
1481 direct_io = file->f_flags & O_DIRECT ? 1 : 0;
1482
1163 mutex_lock(&inode->i_mutex); 1483 mutex_lock(&inode->i_mutex);
1484
1485relock:
1164 /* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */ 1486 /* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */
1165 if (filp->f_flags & O_DIRECT) { 1487 if (direct_io) {
1166 have_alloc_sem = 1;
1167 down_read(&inode->i_alloc_sem); 1488 down_read(&inode->i_alloc_sem);
1489 have_alloc_sem = 1;
1168 } 1490 }
1169 1491
1170 /* concurrent O_DIRECT writes are allowed */ 1492 /* concurrent O_DIRECT writes are allowed */
1171 rw_level = (filp->f_flags & O_DIRECT) ? 0 : 1; 1493 rw_level = !direct_io;
1172 ret = ocfs2_rw_lock(inode, rw_level); 1494 ret = ocfs2_rw_lock(inode, rw_level);
1173 if (ret < 0) { 1495 if (ret < 0) {
1174 rw_level = -1;
1175 mlog_errno(ret); 1496 mlog_errno(ret);
1176 goto out; 1497 goto out_sems;
1177 } 1498 }
1178 1499
1179 ret = ocfs2_prepare_inode_for_write(filp->f_path.dentry, &iocb->ki_pos, 1500 can_do_direct = direct_io;
1180 iocb->ki_left, appending); 1501 ret = ocfs2_prepare_inode_for_write(file->f_path.dentry, ppos,
1502 iocb->ki_left, appending,
1503 &can_do_direct);
1181 if (ret < 0) { 1504 if (ret < 0) {
1182 mlog_errno(ret); 1505 mlog_errno(ret);
1183 goto out; 1506 goto out;
1184 } 1507 }
1185 1508
1186 /* communicate with ocfs2_dio_end_io */ 1509 /*
1187 ocfs2_iocb_set_rw_locked(iocb); 1510 * We can't complete the direct I/O as requested, fall back to
1511 * buffered I/O.
1512 */
1513 if (direct_io && !can_do_direct) {
1514 ocfs2_rw_unlock(inode, rw_level);
1515 up_read(&inode->i_alloc_sem);
1516
1517 have_alloc_sem = 0;
1518 rw_level = -1;
1188 1519
1189 ret = generic_file_aio_write_nolock(iocb, iov, nr_segs, iocb->ki_pos); 1520 direct_io = 0;
1521 sync = 1;
1522 goto relock;
1523 }
1524
1525 if (!sync && ((file->f_flags & O_SYNC) || IS_SYNC(inode)))
1526 sync = 1;
1527
1528 /*
1529 * XXX: Is it ok to execute these checks a second time?
1530 */
1531 ret = generic_write_checks(file, ppos, &count, S_ISBLK(inode->i_mode));
1532 if (ret)
1533 goto out;
1534
1535 /*
1536 * Set pos so that sync_page_range_nolock() below understands
1537 * where to start from. We might've moved it around via the
1538 * calls above. The range we want to actually sync starts from
1539 * *ppos here.
1540 *
1541 */
1542 pos = *ppos;
1543
1544 /* communicate with ocfs2_dio_end_io */
1545 ocfs2_iocb_set_rw_locked(iocb, rw_level);
1546
1547 if (direct_io) {
1548 written = generic_file_direct_write(iocb, iov, &nr_segs, *ppos,
1549 ppos, count, ocount);
1550 if (written < 0) {
1551 ret = written;
1552 goto out_dio;
1553 }
1554 } else {
1555 written = ocfs2_file_buffered_write(file, ppos, iov, nr_segs,
1556 count, written);
1557 if (written < 0) {
1558 ret = written;
1559 if (ret != -EFAULT || ret != -ENOSPC)
1560 mlog_errno(ret);
1561 goto out;
1562 }
1563 }
1190 1564
1565out_dio:
1191 /* buffered aio wouldn't have proper lock coverage today */ 1566 /* buffered aio wouldn't have proper lock coverage today */
1192 BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT)); 1567 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
1193 1568
1194 /* 1569 /*
1195 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io 1570 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
@@ -1207,13 +1582,102 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
1207 } 1582 }
1208 1583
1209out: 1584out:
1585 if (rw_level != -1)
1586 ocfs2_rw_unlock(inode, rw_level);
1587
1588out_sems:
1210 if (have_alloc_sem) 1589 if (have_alloc_sem)
1211 up_read(&inode->i_alloc_sem); 1590 up_read(&inode->i_alloc_sem);
1212 if (rw_level != -1) 1591
1213 ocfs2_rw_unlock(inode, rw_level); 1592 if (written > 0 && sync) {
1593 ssize_t err;
1594
1595 err = sync_page_range_nolock(inode, file->f_mapping, pos, count);
1596 if (err < 0)
1597 written = err;
1598 }
1599
1214 mutex_unlock(&inode->i_mutex); 1600 mutex_unlock(&inode->i_mutex);
1215 1601
1216 mlog_exit(ret); 1602 mlog_exit(ret);
1603 return written ? written : ret;
1604}
1605
1606static int ocfs2_splice_write_actor(struct pipe_inode_info *pipe,
1607 struct pipe_buffer *buf,
1608 struct splice_desc *sd)
1609{
1610 int ret, count, total = 0;
1611 ssize_t copied = 0;
1612 struct ocfs2_splice_write_priv sp;
1613
1614 ret = buf->ops->pin(pipe, buf);
1615 if (ret)
1616 goto out;
1617
1618 sp.s_sd = sd;
1619 sp.s_buf = buf;
1620 sp.s_pipe = pipe;
1621 sp.s_offset = sd->pos & ~PAGE_CACHE_MASK;
1622 sp.s_buf_offset = buf->offset;
1623
1624 count = sd->len;
1625 if (count + sp.s_offset > PAGE_CACHE_SIZE)
1626 count = PAGE_CACHE_SIZE - sp.s_offset;
1627
1628 do {
1629 /*
1630 * splice wants us to copy up to one page at a
1631 * time. For pagesize > cluster size, this means we
1632 * might enter ocfs2_buffered_write_cluster() more
1633 * than once, so keep track of our progress here.
1634 */
1635 copied = ocfs2_buffered_write_cluster(sd->file,
1636 (loff_t)sd->pos + total,
1637 count,
1638 ocfs2_map_and_write_splice_data,
1639 &sp);
1640 if (copied < 0) {
1641 mlog_errno(copied);
1642 ret = copied;
1643 goto out;
1644 }
1645
1646 count -= copied;
1647 sp.s_offset += copied;
1648 sp.s_buf_offset += copied;
1649 total += copied;
1650 } while (count);
1651
1652 ret = 0;
1653out:
1654
1655 return total ? total : ret;
1656}
1657
1658static ssize_t __ocfs2_file_splice_write(struct pipe_inode_info *pipe,
1659 struct file *out,
1660 loff_t *ppos,
1661 size_t len,
1662 unsigned int flags)
1663{
1664 int ret, err;
1665 struct address_space *mapping = out->f_mapping;
1666 struct inode *inode = mapping->host;
1667
1668 ret = __splice_from_pipe(pipe, out, ppos, len, flags,
1669 ocfs2_splice_write_actor);
1670 if (ret > 0) {
1671 *ppos += ret;
1672
1673 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) {
1674 err = generic_osync_inode(inode, mapping,
1675 OSYNC_METADATA|OSYNC_DATA);
1676 if (err)
1677 ret = err;
1678 }
1679 }
1680
1217 return ret; 1681 return ret;
1218} 1682}
1219 1683
@@ -1239,14 +1703,15 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
1239 goto out; 1703 goto out;
1240 } 1704 }
1241 1705
1242 ret = ocfs2_prepare_inode_for_write(out->f_path.dentry, ppos, len, 0); 1706 ret = ocfs2_prepare_inode_for_write(out->f_path.dentry, ppos, len, 0,
1707 NULL);
1243 if (ret < 0) { 1708 if (ret < 0) {
1244 mlog_errno(ret); 1709 mlog_errno(ret);
1245 goto out_unlock; 1710 goto out_unlock;
1246 } 1711 }
1247 1712
1248 /* ok, we're done with i_size and alloc work */ 1713 /* ok, we're done with i_size and alloc work */
1249 ret = generic_file_splice_write_nolock(pipe, out, ppos, len, flags); 1714 ret = __ocfs2_file_splice_write(pipe, out, ppos, len, flags);
1250 1715
1251out_unlock: 1716out_unlock:
1252 ocfs2_rw_unlock(inode, 1); 1717 ocfs2_rw_unlock(inode, 1);
@@ -1323,7 +1788,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
1323 } 1788 }
1324 rw_level = 0; 1789 rw_level = 0;
1325 /* communicate with ocfs2_dio_end_io */ 1790 /* communicate with ocfs2_dio_end_io */
1326 ocfs2_iocb_set_rw_locked(iocb); 1791 ocfs2_iocb_set_rw_locked(iocb, rw_level);
1327 } 1792 }
1328 1793
1329 /* 1794 /*
diff --git a/fs/ocfs2/file.h b/fs/ocfs2/file.h
index cc973f01f6ce..2c4460fced52 100644
--- a/fs/ocfs2/file.h
+++ b/fs/ocfs2/file.h
@@ -39,12 +39,17 @@ enum ocfs2_alloc_restarted {
39}; 39};
40int ocfs2_do_extend_allocation(struct ocfs2_super *osb, 40int ocfs2_do_extend_allocation(struct ocfs2_super *osb,
41 struct inode *inode, 41 struct inode *inode,
42 u32 *cluster_start,
42 u32 clusters_to_add, 43 u32 clusters_to_add,
43 struct buffer_head *fe_bh, 44 struct buffer_head *fe_bh,
44 handle_t *handle, 45 handle_t *handle,
45 struct ocfs2_alloc_context *data_ac, 46 struct ocfs2_alloc_context *data_ac,
46 struct ocfs2_alloc_context *meta_ac, 47 struct ocfs2_alloc_context *meta_ac,
47 enum ocfs2_alloc_restarted *reason); 48 enum ocfs2_alloc_restarted *reason);
49int ocfs2_lock_allocators(struct inode *inode, struct ocfs2_dinode *di,
50 u32 clusters_to_add,
51 struct ocfs2_alloc_context **data_ac,
52 struct ocfs2_alloc_context **meta_ac);
48int ocfs2_setattr(struct dentry *dentry, struct iattr *attr); 53int ocfs2_setattr(struct dentry *dentry, struct iattr *attr);
49int ocfs2_getattr(struct vfsmount *mnt, struct dentry *dentry, 54int ocfs2_getattr(struct vfsmount *mnt, struct dentry *dentry,
50 struct kstat *stat); 55 struct kstat *stat);
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 28ab56f2b98c..21a605079c62 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -89,24 +89,6 @@ void ocfs2_set_inode_flags(struct inode *inode)
89 inode->i_flags |= S_DIRSYNC; 89 inode->i_flags |= S_DIRSYNC;
90} 90}
91 91
92struct inode *ocfs2_ilookup_for_vote(struct ocfs2_super *osb,
93 u64 blkno,
94 int delete_vote)
95{
96 struct ocfs2_find_inode_args args;
97
98 /* ocfs2_ilookup_for_vote should *only* be called from the
99 * vote thread */
100 BUG_ON(current != osb->vote_task);
101
102 args.fi_blkno = blkno;
103 args.fi_flags = OCFS2_FI_FLAG_NOWAIT;
104 if (delete_vote)
105 args.fi_flags |= OCFS2_FI_FLAG_DELETE;
106 args.fi_ino = ino_from_blkno(osb->sb, blkno);
107 return ilookup5(osb->sb, args.fi_ino, ocfs2_find_actor, &args);
108}
109
110struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, int flags) 92struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, int flags)
111{ 93{
112 struct inode *inode = NULL; 94 struct inode *inode = NULL;
@@ -182,28 +164,6 @@ static int ocfs2_find_actor(struct inode *inode, void *opaque)
182 if (oi->ip_blkno != args->fi_blkno) 164 if (oi->ip_blkno != args->fi_blkno)
183 goto bail; 165 goto bail;
184 166
185 /* OCFS2_FI_FLAG_NOWAIT is *only* set from
186 * ocfs2_ilookup_for_vote which won't create an inode for one
187 * that isn't found. The vote thread which doesn't want to get
188 * an inode which is in the process of going away - otherwise
189 * the call to __wait_on_freeing_inode in find_inode_fast will
190 * cause it to deadlock on an inode which may be waiting on a
191 * vote (or lock release) in delete_inode */
192 if ((args->fi_flags & OCFS2_FI_FLAG_NOWAIT) &&
193 (inode->i_state & (I_FREEING|I_CLEAR))) {
194 /* As stated above, we're not going to return an
195 * inode. In the case of a delete vote, the voting
196 * code is going to signal the other node to go
197 * ahead. Mark that state here, so this freeing inode
198 * has the state when it gets to delete_inode. */
199 if (args->fi_flags & OCFS2_FI_FLAG_DELETE) {
200 spin_lock(&oi->ip_lock);
201 ocfs2_mark_inode_remotely_deleted(inode);
202 spin_unlock(&oi->ip_lock);
203 }
204 goto bail;
205 }
206
207 ret = 1; 167 ret = 1;
208bail: 168bail:
209 mlog_exit(ret); 169 mlog_exit(ret);
@@ -261,6 +221,9 @@ int ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
261 goto bail; 221 goto bail;
262 } 222 }
263 223
224 OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
225 OCFS2_I(inode)->ip_attr = le32_to_cpu(fe->i_attr);
226
264 inode->i_version = 1; 227 inode->i_version = 1;
265 inode->i_generation = le32_to_cpu(fe->i_generation); 228 inode->i_generation = le32_to_cpu(fe->i_generation);
266 inode->i_rdev = huge_decode_dev(le64_to_cpu(fe->id1.dev1.i_rdev)); 229 inode->i_rdev = huge_decode_dev(le64_to_cpu(fe->id1.dev1.i_rdev));
@@ -272,8 +235,7 @@ int ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
272 if (S_ISLNK(inode->i_mode) && !fe->i_clusters) 235 if (S_ISLNK(inode->i_mode) && !fe->i_clusters)
273 inode->i_blocks = 0; 236 inode->i_blocks = 0;
274 else 237 else
275 inode->i_blocks = 238 inode->i_blocks = ocfs2_inode_sector_count(inode);
276 ocfs2_align_bytes_to_sectors(le64_to_cpu(fe->i_size));
277 inode->i_mapping->a_ops = &ocfs2_aops; 239 inode->i_mapping->a_ops = &ocfs2_aops;
278 inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime); 240 inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime);
279 inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec); 241 inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec);
@@ -288,10 +250,6 @@ int ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
288 (unsigned long long)OCFS2_I(inode)->ip_blkno, 250 (unsigned long long)OCFS2_I(inode)->ip_blkno,
289 (unsigned long long)fe->i_blkno); 251 (unsigned long long)fe->i_blkno);
290 252
291 OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
292 OCFS2_I(inode)->ip_orphaned_slot = OCFS2_INVALID_SLOT;
293 OCFS2_I(inode)->ip_attr = le32_to_cpu(fe->i_attr);
294
295 inode->i_nlink = le16_to_cpu(fe->i_links_count); 253 inode->i_nlink = le16_to_cpu(fe->i_links_count);
296 254
297 if (fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL)) 255 if (fe->i_flags & cpu_to_le32(OCFS2_SYSTEM_FL))
@@ -347,6 +305,9 @@ int ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
347 305
348 ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_meta_lockres, 306 ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_meta_lockres,
349 OCFS2_LOCK_TYPE_META, 0, inode); 307 OCFS2_LOCK_TYPE_META, 0, inode);
308
309 ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_open_lockres,
310 OCFS2_LOCK_TYPE_OPEN, 0, inode);
350 } 311 }
351 312
352 ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_rw_lockres, 313 ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_rw_lockres,
@@ -421,7 +382,7 @@ static int ocfs2_read_locked_inode(struct inode *inode,
421 * cluster lock before trusting anything anyway. 382 * cluster lock before trusting anything anyway.
422 */ 383 */
423 can_lock = !(args->fi_flags & OCFS2_FI_FLAG_SYSFILE) 384 can_lock = !(args->fi_flags & OCFS2_FI_FLAG_SYSFILE)
424 && !(args->fi_flags & OCFS2_FI_FLAG_NOLOCK) 385 && !(args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY)
425 && !ocfs2_mount_local(osb); 386 && !ocfs2_mount_local(osb);
426 387
427 /* 388 /*
@@ -438,7 +399,17 @@ static int ocfs2_read_locked_inode(struct inode *inode,
438 OCFS2_LOCK_TYPE_META, 399 OCFS2_LOCK_TYPE_META,
439 generation, inode); 400 generation, inode);
440 401
402 ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_open_lockres,
403 OCFS2_LOCK_TYPE_OPEN,
404 0, inode);
405
441 if (can_lock) { 406 if (can_lock) {
407 status = ocfs2_open_lock(inode);
408 if (status) {
409 make_bad_inode(inode);
410 mlog_errno(status);
411 return status;
412 }
442 status = ocfs2_meta_lock(inode, NULL, 0); 413 status = ocfs2_meta_lock(inode, NULL, 0);
443 if (status) { 414 if (status) {
444 make_bad_inode(inode); 415 make_bad_inode(inode);
@@ -447,6 +418,14 @@ static int ocfs2_read_locked_inode(struct inode *inode,
447 } 418 }
448 } 419 }
449 420
421 if (args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) {
422 status = ocfs2_try_open_lock(inode, 0);
423 if (status) {
424 make_bad_inode(inode);
425 return status;
426 }
427 }
428
450 status = ocfs2_read_block(osb, args->fi_blkno, &bh, 0, 429 status = ocfs2_read_block(osb, args->fi_blkno, &bh, 0,
451 can_lock ? inode : NULL); 430 can_lock ? inode : NULL);
452 if (status < 0) { 431 if (status < 0) {
@@ -507,50 +486,56 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb,
507 struct buffer_head *fe_bh) 486 struct buffer_head *fe_bh)
508{ 487{
509 int status = 0; 488 int status = 0;
510 handle_t *handle = NULL;
511 struct ocfs2_truncate_context *tc = NULL; 489 struct ocfs2_truncate_context *tc = NULL;
512 struct ocfs2_dinode *fe; 490 struct ocfs2_dinode *fe;
491 handle_t *handle = NULL;
513 492
514 mlog_entry_void(); 493 mlog_entry_void();
515 494
516 fe = (struct ocfs2_dinode *) fe_bh->b_data; 495 fe = (struct ocfs2_dinode *) fe_bh->b_data;
517 496
518 /* zero allocation, zero truncate :) */ 497 if (fe->i_clusters) {
519 if (!fe->i_clusters) 498 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
520 goto bail; 499 if (IS_ERR(handle)) {
500 status = PTR_ERR(handle);
501 mlog_errno(status);
502 goto out;
503 }
521 504
522 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); 505 status = ocfs2_journal_access(handle, inode, fe_bh,
523 if (IS_ERR(handle)) { 506 OCFS2_JOURNAL_ACCESS_WRITE);
524 status = PTR_ERR(handle); 507 if (status < 0) {
525 handle = NULL; 508 mlog_errno(status);
526 mlog_errno(status); 509 goto out;
527 goto bail; 510 }
528 }
529 511
530 status = ocfs2_set_inode_size(handle, inode, fe_bh, 0ULL); 512 i_size_write(inode, 0);
531 if (status < 0) {
532 mlog_errno(status);
533 goto bail;
534 }
535 513
536 ocfs2_commit_trans(osb, handle); 514 status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
537 handle = NULL; 515 if (status < 0) {
516 mlog_errno(status);
517 goto out;
518 }
538 519
539 status = ocfs2_prepare_truncate(osb, inode, fe_bh, &tc); 520 ocfs2_commit_trans(osb, handle);
540 if (status < 0) { 521 handle = NULL;
541 mlog_errno(status);
542 goto bail;
543 }
544 522
545 status = ocfs2_commit_truncate(osb, inode, fe_bh, tc); 523 status = ocfs2_prepare_truncate(osb, inode, fe_bh, &tc);
546 if (status < 0) { 524 if (status < 0) {
547 mlog_errno(status); 525 mlog_errno(status);
548 goto bail; 526 goto out;
527 }
528
529 status = ocfs2_commit_truncate(osb, inode, fe_bh, tc);
530 if (status < 0) {
531 mlog_errno(status);
532 goto out;
533 }
549 } 534 }
550bail: 535
536out:
551 if (handle) 537 if (handle)
552 ocfs2_commit_trans(osb, handle); 538 ocfs2_commit_trans(osb, handle);
553
554 mlog_exit(status); 539 mlog_exit(status);
555 return status; 540 return status;
556} 541}
@@ -678,10 +663,10 @@ static int ocfs2_wipe_inode(struct inode *inode,
678 struct inode *orphan_dir_inode = NULL; 663 struct inode *orphan_dir_inode = NULL;
679 struct buffer_head *orphan_dir_bh = NULL; 664 struct buffer_head *orphan_dir_bh = NULL;
680 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 665 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
666 struct ocfs2_dinode *di;
681 667
682 /* We've already voted on this so it should be readonly - no 668 di = (struct ocfs2_dinode *) di_bh->b_data;
683 * spinlock needed. */ 669 orphaned_slot = le16_to_cpu(di->i_orphaned_slot);
684 orphaned_slot = OCFS2_I(inode)->ip_orphaned_slot;
685 670
686 status = ocfs2_check_orphan_recovery_state(osb, orphaned_slot); 671 status = ocfs2_check_orphan_recovery_state(osb, orphaned_slot);
687 if (status) 672 if (status)
@@ -839,11 +824,20 @@ static int ocfs2_query_inode_wipe(struct inode *inode,
839 goto bail; 824 goto bail;
840 } 825 }
841 826
842 status = ocfs2_request_delete_vote(inode); 827 /*
843 /* -EBUSY means that other nodes are still using the 828 * This is how ocfs2 determines whether an inode is still live
844 * inode. We're done here though, so avoid doing anything on 829 * within the cluster. Every node takes a shared read lock on
845 * disk and let them worry about deleting it. */ 830 * the inode open lock in ocfs2_read_locked_inode(). When we
846 if (status == -EBUSY) { 831 * get to ->delete_inode(), each node tries to convert it's
832 * lock to an exclusive. Trylocks are serialized by the inode
833 * meta data lock. If the upconvert suceeds, we know the inode
834 * is no longer live and can be deleted.
835 *
836 * Though we call this with the meta data lock held, the
837 * trylock keeps us from ABBA deadlock.
838 */
839 status = ocfs2_try_open_lock(inode, 1);
840 if (status == -EAGAIN) {
847 status = 0; 841 status = 0;
848 mlog(0, "Skipping delete of %llu because it is in use on" 842 mlog(0, "Skipping delete of %llu because it is in use on"
849 "other nodes\n", (unsigned long long)oi->ip_blkno); 843 "other nodes\n", (unsigned long long)oi->ip_blkno);
@@ -854,21 +848,10 @@ static int ocfs2_query_inode_wipe(struct inode *inode,
854 goto bail; 848 goto bail;
855 } 849 }
856 850
857 spin_lock(&oi->ip_lock); 851 *wipe = 1;
858 if (oi->ip_orphaned_slot == OCFS2_INVALID_SLOT) { 852 mlog(0, "Inode %llu is ok to wipe from orphan dir %u\n",
859 /* Nobody knew which slot this inode was orphaned 853 (unsigned long long)oi->ip_blkno,
860 * into. This may happen during node death and 854 le16_to_cpu(di->i_orphaned_slot));
861 * recovery knows how to clean it up so we can safely
862 * ignore this inode for now on. */
863 mlog(0, "Nobody knew where inode %llu was orphaned!\n",
864 (unsigned long long)oi->ip_blkno);
865 } else {
866 *wipe = 1;
867
868 mlog(0, "Inode %llu is ok to wipe from orphan dir %d\n",
869 (unsigned long long)oi->ip_blkno, oi->ip_orphaned_slot);
870 }
871 spin_unlock(&oi->ip_lock);
872 855
873bail: 856bail:
874 return status; 857 return status;
@@ -1001,11 +984,16 @@ void ocfs2_clear_inode(struct inode *inode)
1001 mlog_bug_on_msg(OCFS2_SB(inode->i_sb) == NULL, 984 mlog_bug_on_msg(OCFS2_SB(inode->i_sb) == NULL,
1002 "Inode=%lu\n", inode->i_ino); 985 "Inode=%lu\n", inode->i_ino);
1003 986
987 /* For remove delete_inode vote, we hold open lock before,
988 * now it is time to unlock PR and EX open locks. */
989 ocfs2_open_unlock(inode);
990
1004 /* Do these before all the other work so that we don't bounce 991 /* Do these before all the other work so that we don't bounce
1005 * the vote thread while waiting to destroy the locks. */ 992 * the vote thread while waiting to destroy the locks. */
1006 ocfs2_mark_lockres_freeing(&oi->ip_rw_lockres); 993 ocfs2_mark_lockres_freeing(&oi->ip_rw_lockres);
1007 ocfs2_mark_lockres_freeing(&oi->ip_meta_lockres); 994 ocfs2_mark_lockres_freeing(&oi->ip_meta_lockres);
1008 ocfs2_mark_lockres_freeing(&oi->ip_data_lockres); 995 ocfs2_mark_lockres_freeing(&oi->ip_data_lockres);
996 ocfs2_mark_lockres_freeing(&oi->ip_open_lockres);
1009 997
1010 /* We very well may get a clear_inode before all an inodes 998 /* We very well may get a clear_inode before all an inodes
1011 * metadata has hit disk. Of course, we can't drop any cluster 999 * metadata has hit disk. Of course, we can't drop any cluster
@@ -1020,8 +1008,7 @@ void ocfs2_clear_inode(struct inode *inode)
1020 "Clear inode of %llu, inode has io markers\n", 1008 "Clear inode of %llu, inode has io markers\n",
1021 (unsigned long long)oi->ip_blkno); 1009 (unsigned long long)oi->ip_blkno);
1022 1010
1023 ocfs2_extent_map_drop(inode, 0); 1011 ocfs2_extent_map_trunc(inode, 0);
1024 ocfs2_extent_map_init(inode);
1025 1012
1026 status = ocfs2_drop_inode_locks(inode); 1013 status = ocfs2_drop_inode_locks(inode);
1027 if (status < 0) 1014 if (status < 0)
@@ -1030,6 +1017,7 @@ void ocfs2_clear_inode(struct inode *inode)
1030 ocfs2_lock_res_free(&oi->ip_rw_lockres); 1017 ocfs2_lock_res_free(&oi->ip_rw_lockres);
1031 ocfs2_lock_res_free(&oi->ip_meta_lockres); 1018 ocfs2_lock_res_free(&oi->ip_meta_lockres);
1032 ocfs2_lock_res_free(&oi->ip_data_lockres); 1019 ocfs2_lock_res_free(&oi->ip_data_lockres);
1020 ocfs2_lock_res_free(&oi->ip_open_lockres);
1033 1021
1034 ocfs2_metadata_cache_purge(inode); 1022 ocfs2_metadata_cache_purge(inode);
1035 1023
@@ -1086,9 +1074,6 @@ void ocfs2_drop_inode(struct inode *inode)
1086 mlog(0, "Drop inode %llu, nlink = %u, ip_flags = 0x%x\n", 1074 mlog(0, "Drop inode %llu, nlink = %u, ip_flags = 0x%x\n",
1087 (unsigned long long)oi->ip_blkno, inode->i_nlink, oi->ip_flags); 1075 (unsigned long long)oi->ip_blkno, inode->i_nlink, oi->ip_flags);
1088 1076
1089 /* Testing ip_orphaned_slot here wouldn't work because we may
1090 * not have gotten a delete_inode vote from any other nodes
1091 * yet. */
1092 if (oi->ip_flags & OCFS2_INODE_MAYBE_ORPHANED) 1077 if (oi->ip_flags & OCFS2_INODE_MAYBE_ORPHANED)
1093 generic_delete_inode(inode); 1078 generic_delete_inode(inode);
1094 else 1079 else
@@ -1121,8 +1106,8 @@ struct buffer_head *ocfs2_bread(struct inode *inode,
1121 return NULL; 1106 return NULL;
1122 } 1107 }
1123 1108
1124 tmperr = ocfs2_extent_map_get_blocks(inode, block, 1, 1109 tmperr = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL,
1125 &p_blkno, NULL); 1110 NULL);
1126 if (tmperr < 0) { 1111 if (tmperr < 0) {
1127 mlog_errno(tmperr); 1112 mlog_errno(tmperr);
1128 goto fail; 1113 goto fail;
@@ -1259,7 +1244,7 @@ void ocfs2_refresh_inode(struct inode *inode,
1259 if (S_ISLNK(inode->i_mode) && le32_to_cpu(fe->i_clusters) == 0) 1244 if (S_ISLNK(inode->i_mode) && le32_to_cpu(fe->i_clusters) == 0)
1260 inode->i_blocks = 0; 1245 inode->i_blocks = 0;
1261 else 1246 else
1262 inode->i_blocks = ocfs2_align_bytes_to_sectors(i_size_read(inode)); 1247 inode->i_blocks = ocfs2_inode_sector_count(inode);
1263 inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime); 1248 inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime);
1264 inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec); 1249 inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec);
1265 inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime); 1250 inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime);
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h
index 1a7dd2945b34..03ae075869ee 100644
--- a/fs/ocfs2/inode.h
+++ b/fs/ocfs2/inode.h
@@ -26,6 +26,8 @@
26#ifndef OCFS2_INODE_H 26#ifndef OCFS2_INODE_H
27#define OCFS2_INODE_H 27#define OCFS2_INODE_H
28 28
29#include "extent_map.h"
30
29/* OCFS2 Inode Private Data */ 31/* OCFS2 Inode Private Data */
30struct ocfs2_inode_info 32struct ocfs2_inode_info
31{ 33{
@@ -34,6 +36,7 @@ struct ocfs2_inode_info
34 struct ocfs2_lock_res ip_rw_lockres; 36 struct ocfs2_lock_res ip_rw_lockres;
35 struct ocfs2_lock_res ip_meta_lockres; 37 struct ocfs2_lock_res ip_meta_lockres;
36 struct ocfs2_lock_res ip_data_lockres; 38 struct ocfs2_lock_res ip_data_lockres;
39 struct ocfs2_lock_res ip_open_lockres;
37 40
38 /* protects allocation changes on this inode. */ 41 /* protects allocation changes on this inode. */
39 struct rw_semaphore ip_alloc_sem; 42 struct rw_semaphore ip_alloc_sem;
@@ -42,9 +45,7 @@ struct ocfs2_inode_info
42 spinlock_t ip_lock; 45 spinlock_t ip_lock;
43 u32 ip_open_count; 46 u32 ip_open_count;
44 u32 ip_clusters; 47 u32 ip_clusters;
45 struct ocfs2_extent_map ip_map;
46 struct list_head ip_io_markers; 48 struct list_head ip_io_markers;
47 int ip_orphaned_slot;
48 49
49 struct mutex ip_io_mutex; 50 struct mutex ip_io_mutex;
50 51
@@ -64,6 +65,8 @@ struct ocfs2_inode_info
64 65
65 struct ocfs2_caching_info ip_metadata_cache; 66 struct ocfs2_caching_info ip_metadata_cache;
66 67
68 struct ocfs2_extent_map ip_extent_map;
69
67 struct inode vfs_inode; 70 struct inode vfs_inode;
68}; 71};
69 72
@@ -117,14 +120,9 @@ void ocfs2_delete_inode(struct inode *inode);
117void ocfs2_drop_inode(struct inode *inode); 120void ocfs2_drop_inode(struct inode *inode);
118 121
119/* Flags for ocfs2_iget() */ 122/* Flags for ocfs2_iget() */
120#define OCFS2_FI_FLAG_NOWAIT 0x1 123#define OCFS2_FI_FLAG_SYSFILE 0x4
121#define OCFS2_FI_FLAG_DELETE 0x2 124#define OCFS2_FI_FLAG_ORPHAN_RECOVERY 0x8
122#define OCFS2_FI_FLAG_SYSFILE 0x4
123#define OCFS2_FI_FLAG_NOLOCK 0x8
124struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 feoff, int flags); 125struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 feoff, int flags);
125struct inode *ocfs2_ilookup_for_vote(struct ocfs2_super *osb,
126 u64 blkno,
127 int delete_vote);
128int ocfs2_inode_init_private(struct inode *inode); 126int ocfs2_inode_init_private(struct inode *inode);
129int ocfs2_inode_revalidate(struct dentry *dentry); 127int ocfs2_inode_revalidate(struct dentry *dentry);
130int ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe, 128int ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
@@ -144,4 +142,11 @@ int ocfs2_aio_write(struct file *file, struct kiocb *req, struct iocb *iocb);
144 142
145void ocfs2_set_inode_flags(struct inode *inode); 143void ocfs2_set_inode_flags(struct inode *inode);
146 144
145static inline blkcnt_t ocfs2_inode_sector_count(struct inode *inode)
146{
147 int c_to_s_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits - 9;
148
149 return (blkcnt_t)(OCFS2_I(inode)->ip_clusters << c_to_s_bits);
150}
151
147#endif /* OCFS2_INODE_H */ 152#endif /* OCFS2_INODE_H */
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 825cb0ae1b4c..5a8a90d1c787 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -649,29 +649,20 @@ bail:
649static int ocfs2_force_read_journal(struct inode *inode) 649static int ocfs2_force_read_journal(struct inode *inode)
650{ 650{
651 int status = 0; 651 int status = 0;
652 int i, p_blocks; 652 int i;
653 u64 v_blkno, p_blkno; 653 u64 v_blkno, p_blkno, p_blocks, num_blocks;
654#define CONCURRENT_JOURNAL_FILL 32 654#define CONCURRENT_JOURNAL_FILL 32ULL
655 struct buffer_head *bhs[CONCURRENT_JOURNAL_FILL]; 655 struct buffer_head *bhs[CONCURRENT_JOURNAL_FILL];
656 656
657 mlog_entry_void(); 657 mlog_entry_void();
658 658
659 BUG_ON(inode->i_blocks !=
660 ocfs2_align_bytes_to_sectors(i_size_read(inode)));
661
662 memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL); 659 memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL);
663 660
664 mlog(0, "Force reading %llu blocks\n", 661 num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, inode->i_size);
665 (unsigned long long)(inode->i_blocks >>
666 (inode->i_sb->s_blocksize_bits - 9)));
667
668 v_blkno = 0; 662 v_blkno = 0;
669 while (v_blkno < 663 while (v_blkno < num_blocks) {
670 (inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9))) {
671
672 status = ocfs2_extent_map_get_blocks(inode, v_blkno, 664 status = ocfs2_extent_map_get_blocks(inode, v_blkno,
673 1, &p_blkno, 665 &p_blkno, &p_blocks, NULL);
674 &p_blocks);
675 if (status < 0) { 666 if (status < 0) {
676 mlog_errno(status); 667 mlog_errno(status);
677 goto bail; 668 goto bail;
@@ -1306,7 +1297,7 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb,
1306 continue; 1297 continue;
1307 1298
1308 iter = ocfs2_iget(osb, le64_to_cpu(de->inode), 1299 iter = ocfs2_iget(osb, le64_to_cpu(de->inode),
1309 OCFS2_FI_FLAG_NOLOCK); 1300 OCFS2_FI_FLAG_ORPHAN_RECOVERY);
1310 if (IS_ERR(iter)) 1301 if (IS_ERR(iter))
1311 continue; 1302 continue;
1312 1303
@@ -1418,7 +1409,6 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb,
1418 /* Set the proper information to get us going into 1409 /* Set the proper information to get us going into
1419 * ocfs2_delete_inode. */ 1410 * ocfs2_delete_inode. */
1420 oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED; 1411 oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
1421 oi->ip_orphaned_slot = slot;
1422 spin_unlock(&oi->ip_lock); 1412 spin_unlock(&oi->ip_lock);
1423 1413
1424 iput(inode); 1414 iput(inode);
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index d026b4f27757..3db5de4506da 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -390,7 +390,7 @@ static inline int ocfs2_calc_tree_trunc_credits(struct super_block *sb,
390 /* We may be deleting metadata blocks, so metadata alloc dinode + 390 /* We may be deleting metadata blocks, so metadata alloc dinode +
391 one desc. block for each possible delete. */ 391 one desc. block for each possible delete. */
392 if (tree_depth && next_free == 1 && 392 if (tree_depth && next_free == 1 &&
393 le32_to_cpu(last_el->l_recs[i].e_clusters) == clusters_to_del) 393 ocfs2_rec_clusters(last_el, &last_el->l_recs[i]) == clusters_to_del)
394 credits += 1 + tree_depth; 394 credits += 1 + tree_depth;
395 395
396 /* update to the truncate log. */ 396 /* update to the truncate log. */
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 51b020447683..af01158b39f5 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -85,8 +85,11 @@ int ocfs2_mmap(struct file *file, struct vm_area_struct *vma)
85 int ret = 0, lock_level = 0; 85 int ret = 0, lock_level = 0;
86 struct ocfs2_super *osb = OCFS2_SB(file->f_dentry->d_inode->i_sb); 86 struct ocfs2_super *osb = OCFS2_SB(file->f_dentry->d_inode->i_sb);
87 87
88 /* We don't want to support shared writable mappings yet. */ 88 /*
89 if (!ocfs2_mount_local(osb) && 89 * Only support shared writeable mmap for local mounts which
90 * don't know about holes.
91 */
92 if ((!ocfs2_mount_local(osb) || ocfs2_sparse_alloc(osb)) &&
90 ((vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_MAYSHARE)) && 93 ((vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_MAYSHARE)) &&
91 ((vma->vm_flags & VM_WRITE) || (vma->vm_flags & VM_MAYWRITE))) { 94 ((vma->vm_flags & VM_WRITE) || (vma->vm_flags & VM_MAYWRITE))) {
92 mlog(0, "disallow shared writable mmaps %lx\n", vma->vm_flags); 95 mlog(0, "disallow shared writable mmaps %lx\n", vma->vm_flags);
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 28dd757ff67d..2bcf353fd7c5 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -175,8 +175,6 @@ static struct dentry *ocfs2_lookup(struct inode *dir, struct dentry *dentry,
175 175
176 inode = ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0); 176 inode = ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0);
177 if (IS_ERR(inode)) { 177 if (IS_ERR(inode)) {
178 mlog(ML_ERROR, "Unable to create inode %llu\n",
179 (unsigned long long)blkno);
180 ret = ERR_PTR(-EACCES); 178 ret = ERR_PTR(-EACCES);
181 goto bail_unlock; 179 goto bail_unlock;
182 } 180 }
@@ -189,7 +187,6 @@ static struct dentry *ocfs2_lookup(struct inode *dir, struct dentry *dentry,
189 * unlink. */ 187 * unlink. */
190 spin_lock(&oi->ip_lock); 188 spin_lock(&oi->ip_lock);
191 oi->ip_flags &= ~OCFS2_INODE_MAYBE_ORPHANED; 189 oi->ip_flags &= ~OCFS2_INODE_MAYBE_ORPHANED;
192 oi->ip_orphaned_slot = OCFS2_INVALID_SLOT;
193 spin_unlock(&oi->ip_lock); 190 spin_unlock(&oi->ip_lock);
194 191
195bail_add: 192bail_add:
@@ -288,7 +285,7 @@ static int ocfs2_fill_new_dir(struct ocfs2_super *osb,
288 285
289 i_size_write(inode, inode->i_sb->s_blocksize); 286 i_size_write(inode, inode->i_sb->s_blocksize);
290 inode->i_nlink = 2; 287 inode->i_nlink = 2;
291 inode->i_blocks = ocfs2_align_bytes_to_sectors(inode->i_sb->s_blocksize); 288 inode->i_blocks = ocfs2_inode_sector_count(inode);
292 status = ocfs2_mark_inode_dirty(handle, inode, fe_bh); 289 status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
293 if (status < 0) { 290 if (status < 0) {
294 mlog_errno(status); 291 mlog_errno(status);
@@ -1486,8 +1483,7 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
1486 struct buffer_head **bhs = NULL; 1483 struct buffer_head **bhs = NULL;
1487 const char *c; 1484 const char *c;
1488 struct super_block *sb = osb->sb; 1485 struct super_block *sb = osb->sb;
1489 u64 p_blkno; 1486 u64 p_blkno, p_blocks;
1490 int p_blocks;
1491 int virtual, blocks, status, i, bytes_left; 1487 int virtual, blocks, status, i, bytes_left;
1492 1488
1493 bytes_left = i_size_read(inode) + 1; 1489 bytes_left = i_size_read(inode) + 1;
@@ -1514,8 +1510,8 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
1514 goto bail; 1510 goto bail;
1515 } 1511 }
1516 1512
1517 status = ocfs2_extent_map_get_blocks(inode, 0, 1, &p_blkno, 1513 status = ocfs2_extent_map_get_blocks(inode, 0, &p_blkno, &p_blocks,
1518 &p_blocks); 1514 NULL);
1519 if (status < 0) { 1515 if (status < 0) {
1520 mlog_errno(status); 1516 mlog_errno(status);
1521 goto bail; 1517 goto bail;
@@ -1674,8 +1670,11 @@ static int ocfs2_symlink(struct inode *dir,
1674 inode->i_rdev = 0; 1670 inode->i_rdev = 0;
1675 newsize = l - 1; 1671 newsize = l - 1;
1676 if (l > ocfs2_fast_symlink_chars(sb)) { 1672 if (l > ocfs2_fast_symlink_chars(sb)) {
1673 u32 offset = 0;
1674
1677 inode->i_op = &ocfs2_symlink_inode_operations; 1675 inode->i_op = &ocfs2_symlink_inode_operations;
1678 status = ocfs2_do_extend_allocation(osb, inode, 1, new_fe_bh, 1676 status = ocfs2_do_extend_allocation(osb, inode, &offset, 1,
1677 new_fe_bh,
1679 handle, data_ac, NULL, 1678 handle, data_ac, NULL,
1680 NULL); 1679 NULL);
1681 if (status < 0) { 1680 if (status < 0) {
@@ -1689,7 +1688,7 @@ static int ocfs2_symlink(struct inode *dir,
1689 goto bail; 1688 goto bail;
1690 } 1689 }
1691 i_size_write(inode, newsize); 1690 i_size_write(inode, newsize);
1692 inode->i_blocks = ocfs2_align_bytes_to_sectors(newsize); 1691 inode->i_blocks = ocfs2_inode_sector_count(inode);
1693 } else { 1692 } else {
1694 inode->i_op = &ocfs2_fast_symlink_inode_operations; 1693 inode->i_op = &ocfs2_fast_symlink_inode_operations;
1695 memcpy((char *) fe->id2.i_symlink, symname, l); 1694 memcpy((char *) fe->id2.i_symlink, symname, l);
@@ -2222,9 +2221,7 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb,
2222 /* Record which orphan dir our inode now resides 2221 /* Record which orphan dir our inode now resides
2223 * in. delete_inode will use this to determine which orphan 2222 * in. delete_inode will use this to determine which orphan
2224 * dir to lock. */ 2223 * dir to lock. */
2225 spin_lock(&OCFS2_I(inode)->ip_lock); 2224 fe->i_orphaned_slot = cpu_to_le16(osb->slot_num);
2226 OCFS2_I(inode)->ip_orphaned_slot = osb->slot_num;
2227 spin_unlock(&OCFS2_I(inode)->ip_lock);
2228 2225
2229 mlog(0, "Inode %llu orphaned in slot %d\n", 2226 mlog(0, "Inode %llu orphaned in slot %d\n",
2230 (unsigned long long)OCFS2_I(inode)->ip_blkno, osb->slot_num); 2227 (unsigned long long)OCFS2_I(inode)->ip_blkno, osb->slot_num);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index db8e77cd35d3..82cc92dcf8a6 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -46,11 +46,6 @@
46#include "endian.h" 46#include "endian.h"
47#include "ocfs2_lockid.h" 47#include "ocfs2_lockid.h"
48 48
49struct ocfs2_extent_map {
50 u32 em_clusters;
51 struct rb_root em_extents;
52};
53
54/* Most user visible OCFS2 inodes will have very few pieces of 49/* Most user visible OCFS2 inodes will have very few pieces of
55 * metadata, but larger files (including bitmaps, etc) must be taken 50 * metadata, but larger files (including bitmaps, etc) must be taken
56 * into account when designing an access scheme. We allow a small 51 * into account when designing an access scheme. We allow a small
@@ -303,6 +298,13 @@ static inline int ocfs2_should_order_data(struct inode *inode)
303 return 1; 298 return 1;
304} 299}
305 300
301static inline int ocfs2_sparse_alloc(struct ocfs2_super *osb)
302{
303 if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_SPARSE_ALLOC)
304 return 1;
305 return 0;
306}
307
306/* set / clear functions because cluster events can make these happen 308/* set / clear functions because cluster events can make these happen
307 * in parallel so we want the transitions to be atomic. this also 309 * in parallel so we want the transitions to be atomic. this also
308 * means that any future flags osb_flags must be protected by spinlock 310 * means that any future flags osb_flags must be protected by spinlock
@@ -461,6 +463,49 @@ static inline unsigned long ocfs2_align_bytes_to_sectors(u64 bytes)
461 return (unsigned long)((bytes + 511) >> 9); 463 return (unsigned long)((bytes + 511) >> 9);
462} 464}
463 465
466static inline unsigned int ocfs2_page_index_to_clusters(struct super_block *sb,
467 unsigned long pg_index)
468{
469 u32 clusters = pg_index;
470 unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
471
472 if (unlikely(PAGE_CACHE_SHIFT > cbits))
473 clusters = pg_index << (PAGE_CACHE_SHIFT - cbits);
474 else if (PAGE_CACHE_SHIFT < cbits)
475 clusters = pg_index >> (cbits - PAGE_CACHE_SHIFT);
476
477 return clusters;
478}
479
480/*
481 * Find the 1st page index which covers the given clusters.
482 */
483static inline unsigned long ocfs2_align_clusters_to_page_index(struct super_block *sb,
484 u32 clusters)
485{
486 unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
487 unsigned long index = clusters;
488
489 if (PAGE_CACHE_SHIFT > cbits) {
490 index = clusters >> (PAGE_CACHE_SHIFT - cbits);
491 } else if (PAGE_CACHE_SHIFT < cbits) {
492 index = clusters << (cbits - PAGE_CACHE_SHIFT);
493 }
494
495 return index;
496}
497
498static inline unsigned int ocfs2_pages_per_cluster(struct super_block *sb)
499{
500 unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
501 unsigned int pages_per_cluster = 1;
502
503 if (PAGE_CACHE_SHIFT < cbits)
504 pages_per_cluster = 1 << (cbits - PAGE_CACHE_SHIFT);
505
506 return pages_per_cluster;
507}
508
464#define ocfs2_set_bit ext2_set_bit 509#define ocfs2_set_bit ext2_set_bit
465#define ocfs2_clear_bit ext2_clear_bit 510#define ocfs2_clear_bit ext2_clear_bit
466#define ocfs2_test_bit ext2_test_bit 511#define ocfs2_test_bit ext2_test_bit
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index e61e218f5e0b..71306479c68f 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -86,7 +86,8 @@
86 OCFS2_SB(sb)->s_feature_incompat &= ~(mask) 86 OCFS2_SB(sb)->s_feature_incompat &= ~(mask)
87 87
88#define OCFS2_FEATURE_COMPAT_SUPP OCFS2_FEATURE_COMPAT_BACKUP_SB 88#define OCFS2_FEATURE_COMPAT_SUPP OCFS2_FEATURE_COMPAT_BACKUP_SB
89#define OCFS2_FEATURE_INCOMPAT_SUPP OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT 89#define OCFS2_FEATURE_INCOMPAT_SUPP (OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT \
90 | OCFS2_FEATURE_INCOMPAT_SPARSE_ALLOC)
90#define OCFS2_FEATURE_RO_COMPAT_SUPP 0 91#define OCFS2_FEATURE_RO_COMPAT_SUPP 0
91 92
92/* 93/*
@@ -155,6 +156,12 @@
155#define OCFS2_FL_MODIFIABLE (0x000100FF) /* User modifiable flags */ 156#define OCFS2_FL_MODIFIABLE (0x000100FF) /* User modifiable flags */
156 157
157/* 158/*
159 * Extent record flags (e_node.leaf.flags)
160 */
161#define OCFS2_EXT_UNWRITTEN (0x01) /* Extent is allocated but
162 * unwritten */
163
164/*
158 * ioctl commands 165 * ioctl commands
159 */ 166 */
160#define OCFS2_IOC_GETFLAGS _IOR('f', 1, long) 167#define OCFS2_IOC_GETFLAGS _IOR('f', 1, long)
@@ -282,10 +289,21 @@ static unsigned char ocfs2_type_by_mode[S_IFMT >> S_SHIFT] = {
282/* 289/*
283 * On disk extent record for OCFS2 290 * On disk extent record for OCFS2
284 * It describes a range of clusters on disk. 291 * It describes a range of clusters on disk.
292 *
293 * Length fields are divided into interior and leaf node versions.
294 * This leaves room for a flags field (OCFS2_EXT_*) in the leaf nodes.
285 */ 295 */
286struct ocfs2_extent_rec { 296struct ocfs2_extent_rec {
287/*00*/ __le32 e_cpos; /* Offset into the file, in clusters */ 297/*00*/ __le32 e_cpos; /* Offset into the file, in clusters */
288 __le32 e_clusters; /* Clusters covered by this extent */ 298 union {
299 __le32 e_int_clusters; /* Clusters covered by all children */
300 struct {
301 __le16 e_leaf_clusters; /* Clusters covered by this
302 extent */
303 __u8 e_reserved1;
304 __u8 e_flags; /* Extent flags */
305 };
306 };
289 __le64 e_blkno; /* Physical disk offset, in blocks */ 307 __le64 e_blkno; /* Physical disk offset, in blocks */
290/*10*/ 308/*10*/
291}; 309};
@@ -311,7 +329,10 @@ struct ocfs2_extent_list {
311/*00*/ __le16 l_tree_depth; /* Extent tree depth from this 329/*00*/ __le16 l_tree_depth; /* Extent tree depth from this
312 point. 0 means data extents 330 point. 0 means data extents
313 hang directly off this 331 hang directly off this
314 header (a leaf) */ 332 header (a leaf)
333 NOTE: The high 8 bits cannot be
334 used - tree_depth is never that big.
335 */
315 __le16 l_count; /* Number of extent records */ 336 __le16 l_count; /* Number of extent records */
316 __le16 l_next_free_rec; /* Next unused extent slot */ 337 __le16 l_next_free_rec; /* Next unused extent slot */
317 __le16 l_reserved1; 338 __le16 l_reserved1;
@@ -446,7 +467,9 @@ struct ocfs2_dinode {
446 __le32 i_ctime_nsec; 467 __le32 i_ctime_nsec;
447 __le32 i_mtime_nsec; 468 __le32 i_mtime_nsec;
448 __le32 i_attr; 469 __le32 i_attr;
449 __le32 i_reserved1; 470 __le16 i_orphaned_slot; /* Only valid when OCFS2_ORPHANED_FL
471 was set in i_flags */
472 __le16 i_reserved1;
450/*70*/ __le64 i_reserved2[8]; 473/*70*/ __le64 i_reserved2[8];
451/*B8*/ union { 474/*B8*/ union {
452 __le64 i_pad1; /* Generic way to refer to this 475 __le64 i_pad1; /* Generic way to refer to this
diff --git a/fs/ocfs2/ocfs2_lockid.h b/fs/ocfs2/ocfs2_lockid.h
index 4d5d5655c185..4ca02b1c38ac 100644
--- a/fs/ocfs2/ocfs2_lockid.h
+++ b/fs/ocfs2/ocfs2_lockid.h
@@ -44,6 +44,7 @@ enum ocfs2_lock_type {
44 OCFS2_LOCK_TYPE_RENAME, 44 OCFS2_LOCK_TYPE_RENAME,
45 OCFS2_LOCK_TYPE_RW, 45 OCFS2_LOCK_TYPE_RW,
46 OCFS2_LOCK_TYPE_DENTRY, 46 OCFS2_LOCK_TYPE_DENTRY,
47 OCFS2_LOCK_TYPE_OPEN,
47 OCFS2_NUM_LOCK_TYPES 48 OCFS2_NUM_LOCK_TYPES
48}; 49};
49 50
@@ -69,6 +70,9 @@ static inline char ocfs2_lock_type_char(enum ocfs2_lock_type type)
69 case OCFS2_LOCK_TYPE_DENTRY: 70 case OCFS2_LOCK_TYPE_DENTRY:
70 c = 'N'; 71 c = 'N';
71 break; 72 break;
73 case OCFS2_LOCK_TYPE_OPEN:
74 c = 'O';
75 break;
72 default: 76 default:
73 c = '\0'; 77 c = '\0';
74 } 78 }
@@ -85,6 +89,7 @@ static char *ocfs2_lock_type_strings[] = {
85 * important job it does, anyway. */ 89 * important job it does, anyway. */
86 [OCFS2_LOCK_TYPE_RW] = "Write/Read", 90 [OCFS2_LOCK_TYPE_RW] = "Write/Read",
87 [OCFS2_LOCK_TYPE_DENTRY] = "Dentry", 91 [OCFS2_LOCK_TYPE_DENTRY] = "Dentry",
92 [OCFS2_LOCK_TYPE_OPEN] = "Open",
88}; 93};
89 94
90static inline const char *ocfs2_lock_type_string(enum ocfs2_lock_type type) 95static inline const char *ocfs2_lock_type_string(enum ocfs2_lock_type type)
diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c
index 2d3ac32cb74e..d921a28329dc 100644
--- a/fs/ocfs2/slot_map.c
+++ b/fs/ocfs2/slot_map.c
@@ -197,7 +197,7 @@ int ocfs2_init_slot_info(struct ocfs2_super *osb)
197 goto bail; 197 goto bail;
198 } 198 }
199 199
200 status = ocfs2_extent_map_get_blocks(inode, 0ULL, 1, &blkno, NULL); 200 status = ocfs2_extent_map_get_blocks(inode, 0ULL, &blkno, NULL, NULL);
201 if (status < 0) { 201 if (status < 0) {
202 mlog_errno(status); 202 mlog_errno(status);
203 goto bail; 203 goto bail;
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 6dbb11762759..0da655ae5d6f 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -381,8 +381,7 @@ static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
381 le32_to_cpu(fe->i_clusters))); 381 le32_to_cpu(fe->i_clusters)));
382 spin_unlock(&OCFS2_I(alloc_inode)->ip_lock); 382 spin_unlock(&OCFS2_I(alloc_inode)->ip_lock);
383 i_size_write(alloc_inode, le64_to_cpu(fe->i_size)); 383 i_size_write(alloc_inode, le64_to_cpu(fe->i_size));
384 alloc_inode->i_blocks = 384 alloc_inode->i_blocks = ocfs2_inode_sector_count(alloc_inode);
385 ocfs2_align_bytes_to_sectors(i_size_read(alloc_inode));
386 385
387 status = 0; 386 status = 0;
388bail: 387bail:
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 6534f92424dd..5c9e8243691f 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -806,9 +806,6 @@ static int __init ocfs2_init(void)
806 806
807 ocfs2_print_version(); 807 ocfs2_print_version();
808 808
809 if (init_ocfs2_extent_maps())
810 return -ENOMEM;
811
812 status = init_ocfs2_uptodate_cache(); 809 status = init_ocfs2_uptodate_cache();
813 if (status < 0) { 810 if (status < 0) {
814 mlog_errno(status); 811 mlog_errno(status);
@@ -837,7 +834,6 @@ leave:
837 if (status < 0) { 834 if (status < 0) {
838 ocfs2_free_mem_caches(); 835 ocfs2_free_mem_caches();
839 exit_ocfs2_uptodate_cache(); 836 exit_ocfs2_uptodate_cache();
840 exit_ocfs2_extent_maps();
841 } 837 }
842 838
843 mlog_exit(status); 839 mlog_exit(status);
@@ -863,8 +859,6 @@ static void __exit ocfs2_exit(void)
863 859
864 unregister_filesystem(&ocfs2_fs_type); 860 unregister_filesystem(&ocfs2_fs_type);
865 861
866 exit_ocfs2_extent_maps();
867
868 exit_ocfs2_uptodate_cache(); 862 exit_ocfs2_uptodate_cache();
869 863
870 mlog_exit_void(); 864 mlog_exit_void();
@@ -963,6 +957,7 @@ static void ocfs2_inode_init_once(void *data,
963 ocfs2_lock_res_init_once(&oi->ip_rw_lockres); 957 ocfs2_lock_res_init_once(&oi->ip_rw_lockres);
964 ocfs2_lock_res_init_once(&oi->ip_meta_lockres); 958 ocfs2_lock_res_init_once(&oi->ip_meta_lockres);
965 ocfs2_lock_res_init_once(&oi->ip_data_lockres); 959 ocfs2_lock_res_init_once(&oi->ip_data_lockres);
960 ocfs2_lock_res_init_once(&oi->ip_open_lockres);
966 961
967 ocfs2_metadata_cache_init(&oi->vfs_inode); 962 ocfs2_metadata_cache_init(&oi->vfs_inode);
968 963
diff --git a/fs/ocfs2/vote.c b/fs/ocfs2/vote.c
index f30e63b9910c..4f82a2f0efef 100644
--- a/fs/ocfs2/vote.c
+++ b/fs/ocfs2/vote.c
@@ -63,17 +63,10 @@ struct ocfs2_msg_hdr
63 __be32 h_node_num; /* node sending this particular message. */ 63 __be32 h_node_num; /* node sending this particular message. */
64}; 64};
65 65
66/* OCFS2_MAX_FILENAME_LEN is 255 characters, but we want to align this
67 * for the network. */
68#define OCFS2_VOTE_FILENAME_LEN 256
69struct ocfs2_vote_msg 66struct ocfs2_vote_msg
70{ 67{
71 struct ocfs2_msg_hdr v_hdr; 68 struct ocfs2_msg_hdr v_hdr;
72 union { 69 __be32 v_reserved1;
73 __be32 v_generic1;
74 __be32 v_orphaned_slot; /* Used during delete votes */
75 __be32 v_nlink; /* Used during unlink votes */
76 } md1; /* Message type dependant 1 */
77}; 70};
78 71
79/* Responses are given these values to maintain backwards 72/* Responses are given these values to maintain backwards
@@ -86,7 +79,6 @@ struct ocfs2_response_msg
86{ 79{
87 struct ocfs2_msg_hdr r_hdr; 80 struct ocfs2_msg_hdr r_hdr;
88 __be32 r_response; 81 __be32 r_response;
89 __be32 r_orphaned_slot;
90}; 82};
91 83
92struct ocfs2_vote_work { 84struct ocfs2_vote_work {
@@ -96,7 +88,6 @@ struct ocfs2_vote_work {
96 88
97enum ocfs2_vote_request { 89enum ocfs2_vote_request {
98 OCFS2_VOTE_REQ_INVALID = 0, 90 OCFS2_VOTE_REQ_INVALID = 0,
99 OCFS2_VOTE_REQ_DELETE,
100 OCFS2_VOTE_REQ_MOUNT, 91 OCFS2_VOTE_REQ_MOUNT,
101 OCFS2_VOTE_REQ_UMOUNT, 92 OCFS2_VOTE_REQ_UMOUNT,
102 OCFS2_VOTE_REQ_LAST 93 OCFS2_VOTE_REQ_LAST
@@ -151,135 +142,23 @@ static void ocfs2_process_umount_request(struct ocfs2_super *osb,
151 ocfs2_node_map_set_bit(osb, &osb->umount_map, node_num); 142 ocfs2_node_map_set_bit(osb, &osb->umount_map, node_num);
152} 143}
153 144
154void ocfs2_mark_inode_remotely_deleted(struct inode *inode)
155{
156 struct ocfs2_inode_info *oi = OCFS2_I(inode);
157
158 assert_spin_locked(&oi->ip_lock);
159 /* We set the SKIP_DELETE flag on the inode so we don't try to
160 * delete it in delete_inode ourselves, thus avoiding
161 * unecessary lock pinging. If the other node failed to wipe
162 * the inode as a result of a crash, then recovery will pick
163 * up the slack. */
164 oi->ip_flags |= OCFS2_INODE_DELETED|OCFS2_INODE_SKIP_DELETE;
165}
166
167static int ocfs2_process_delete_request(struct inode *inode,
168 int *orphaned_slot)
169{
170 int response = OCFS2_RESPONSE_BUSY;
171
172 mlog(0, "DELETE vote on inode %lu, read lnk_cnt = %u, slot = %d\n",
173 inode->i_ino, inode->i_nlink, *orphaned_slot);
174
175 spin_lock(&OCFS2_I(inode)->ip_lock);
176
177 /* Whatever our vote response is, we want to make sure that
178 * the orphaned slot is recorded properly on this node *and*
179 * on the requesting node. Technically, if the requesting node
180 * did not know which slot the inode is orphaned in but we
181 * respond with BUSY he doesn't actually need the orphaned
182 * slot, but it doesn't hurt to do it here anyway. */
183 if ((*orphaned_slot) != OCFS2_INVALID_SLOT) {
184 mlog_bug_on_msg(OCFS2_I(inode)->ip_orphaned_slot !=
185 OCFS2_INVALID_SLOT &&
186 OCFS2_I(inode)->ip_orphaned_slot !=
187 (*orphaned_slot),
188 "Inode %llu: This node thinks it's "
189 "orphaned in slot %d, messaged it's in %d\n",
190 (unsigned long long)OCFS2_I(inode)->ip_blkno,
191 OCFS2_I(inode)->ip_orphaned_slot,
192 *orphaned_slot);
193
194 mlog(0, "Setting orphaned slot for inode %llu to %d\n",
195 (unsigned long long)OCFS2_I(inode)->ip_blkno,
196 *orphaned_slot);
197
198 OCFS2_I(inode)->ip_orphaned_slot = *orphaned_slot;
199 } else {
200 mlog(0, "Sending back orphaned slot %d for inode %llu\n",
201 OCFS2_I(inode)->ip_orphaned_slot,
202 (unsigned long long)OCFS2_I(inode)->ip_blkno);
203
204 *orphaned_slot = OCFS2_I(inode)->ip_orphaned_slot;
205 }
206
207 /* vote no if the file is still open. */
208 if (OCFS2_I(inode)->ip_open_count) {
209 mlog(0, "open count = %u\n",
210 OCFS2_I(inode)->ip_open_count);
211 spin_unlock(&OCFS2_I(inode)->ip_lock);
212 goto done;
213 }
214 spin_unlock(&OCFS2_I(inode)->ip_lock);
215
216 /* directories are a bit ugly... What if someone is sitting in
217 * it? We want to make sure the inode is removed completely as
218 * a result of the iput in process_vote. */
219 if (S_ISDIR(inode->i_mode) && (atomic_read(&inode->i_count) != 1)) {
220 mlog(0, "i_count = %u\n", atomic_read(&inode->i_count));
221 goto done;
222 }
223
224 if (filemap_fdatawrite(inode->i_mapping)) {
225 mlog(ML_ERROR, "Could not sync inode %llu for delete!\n",
226 (unsigned long long)OCFS2_I(inode)->ip_blkno);
227 goto done;
228 }
229 sync_mapping_buffers(inode->i_mapping);
230 truncate_inode_pages(inode->i_mapping, 0);
231 ocfs2_extent_map_trunc(inode, 0);
232
233 spin_lock(&OCFS2_I(inode)->ip_lock);
234 /* double check open count - someone might have raced this
235 * thread into ocfs2_file_open while we were writing out
236 * data. If we're to allow a wipe of this inode now, we *must*
237 * hold the spinlock until we've marked it. */
238 if (OCFS2_I(inode)->ip_open_count) {
239 mlog(0, "Raced to wipe! open count = %u\n",
240 OCFS2_I(inode)->ip_open_count);
241 spin_unlock(&OCFS2_I(inode)->ip_lock);
242 goto done;
243 }
244
245 /* Mark the inode as being wiped from disk. */
246 ocfs2_mark_inode_remotely_deleted(inode);
247 spin_unlock(&OCFS2_I(inode)->ip_lock);
248
249 /* Not sure this is necessary anymore. */
250 d_prune_aliases(inode);
251
252 /* If we get here, then we're voting 'yes', so commit the
253 * delete on our side. */
254 response = OCFS2_RESPONSE_OK;
255done:
256 return response;
257}
258
259static void ocfs2_process_vote(struct ocfs2_super *osb, 145static void ocfs2_process_vote(struct ocfs2_super *osb,
260 struct ocfs2_vote_msg *msg) 146 struct ocfs2_vote_msg *msg)
261{ 147{
262 int net_status, vote_response; 148 int net_status, vote_response;
263 int orphaned_slot = 0; 149 unsigned int node_num;
264 unsigned int node_num, generation;
265 u64 blkno; 150 u64 blkno;
266 enum ocfs2_vote_request request; 151 enum ocfs2_vote_request request;
267 struct inode *inode = NULL;
268 struct ocfs2_msg_hdr *hdr = &msg->v_hdr; 152 struct ocfs2_msg_hdr *hdr = &msg->v_hdr;
269 struct ocfs2_response_msg response; 153 struct ocfs2_response_msg response;
270 154
271 /* decode the network mumbo jumbo into local variables. */ 155 /* decode the network mumbo jumbo into local variables. */
272 request = be32_to_cpu(hdr->h_request); 156 request = be32_to_cpu(hdr->h_request);
273 blkno = be64_to_cpu(hdr->h_blkno); 157 blkno = be64_to_cpu(hdr->h_blkno);
274 generation = be32_to_cpu(hdr->h_generation);
275 node_num = be32_to_cpu(hdr->h_node_num); 158 node_num = be32_to_cpu(hdr->h_node_num);
276 if (request == OCFS2_VOTE_REQ_DELETE)
277 orphaned_slot = be32_to_cpu(msg->md1.v_orphaned_slot);
278 159
279 mlog(0, "processing vote: request = %u, blkno = %llu, " 160 mlog(0, "processing vote: request = %u, blkno = %llu, node_num = %u\n",
280 "generation = %u, node_num = %u, priv1 = %u\n", request, 161 request, (unsigned long long)blkno, node_num);
281 (unsigned long long)blkno, generation, node_num,
282 be32_to_cpu(msg->md1.v_generic1));
283 162
284 if (!ocfs2_is_valid_vote_request(request)) { 163 if (!ocfs2_is_valid_vote_request(request)) {
285 mlog(ML_ERROR, "Invalid vote request %d from node %u\n", 164 mlog(ML_ERROR, "Invalid vote request %d from node %u\n",
@@ -302,52 +181,6 @@ static void ocfs2_process_vote(struct ocfs2_super *osb,
302 break; 181 break;
303 } 182 }
304 183
305 /* We cannot process the remaining message types before we're
306 * fully mounted. It's perfectly safe however to send a 'yes'
307 * response as we can't possibly have any of the state they're
308 * asking us to modify yet. */
309 if (atomic_read(&osb->vol_state) == VOLUME_INIT)
310 goto respond;
311
312 /* If we get here, then the request is against an inode. */
313 inode = ocfs2_ilookup_for_vote(osb, blkno,
314 request == OCFS2_VOTE_REQ_DELETE);
315
316 /* Not finding the inode is perfectly valid - it means we're
317 * not interested in what the other node is about to do to it
318 * so in those cases we automatically respond with an
319 * affirmative. Cluster locking ensures that we won't race
320 * interest in the inode with this vote request. */
321 if (!inode)
322 goto respond;
323
324 /* Check generation values. It's possible for us to get a
325 * request against a stale inode. If so then we proceed as if
326 * we had not found an inode in the first place. */
327 if (inode->i_generation != generation) {
328 mlog(0, "generation passed %u != inode generation = %u, "
329 "ip_flags = %x, ip_blkno = %llu, msg %llu, i_count = %u, "
330 "message type = %u\n", generation, inode->i_generation,
331 OCFS2_I(inode)->ip_flags,
332 (unsigned long long)OCFS2_I(inode)->ip_blkno,
333 (unsigned long long)blkno, atomic_read(&inode->i_count),
334 request);
335 iput(inode);
336 inode = NULL;
337 goto respond;
338 }
339
340 switch (request) {
341 case OCFS2_VOTE_REQ_DELETE:
342 vote_response = ocfs2_process_delete_request(inode,
343 &orphaned_slot);
344 break;
345 default:
346 mlog(ML_ERROR, "node %u, invalid request: %u\n",
347 node_num, request);
348 vote_response = OCFS2_RESPONSE_BAD_MSG;
349 }
350
351respond: 184respond:
352 /* Response struture is small so we just put it on the stack 185 /* Response struture is small so we just put it on the stack
353 * and stuff it inline. */ 186 * and stuff it inline. */
@@ -357,7 +190,6 @@ respond:
357 response.r_hdr.h_generation = hdr->h_generation; 190 response.r_hdr.h_generation = hdr->h_generation;
358 response.r_hdr.h_node_num = cpu_to_be32(osb->node_num); 191 response.r_hdr.h_node_num = cpu_to_be32(osb->node_num);
359 response.r_response = cpu_to_be32(vote_response); 192 response.r_response = cpu_to_be32(vote_response);
360 response.r_orphaned_slot = cpu_to_be32(orphaned_slot);
361 193
362 net_status = o2net_send_message(OCFS2_MESSAGE_TYPE_RESPONSE, 194 net_status = o2net_send_message(OCFS2_MESSAGE_TYPE_RESPONSE,
363 osb->net_key, 195 osb->net_key,
@@ -373,9 +205,6 @@ respond:
373 && net_status != -ENOTCONN) 205 && net_status != -ENOTCONN)
374 mlog(ML_ERROR, "message to node %u fails with error %d!\n", 206 mlog(ML_ERROR, "message to node %u fails with error %d!\n",
375 node_num, net_status); 207 node_num, net_status);
376
377 if (inode)
378 iput(inode);
379} 208}
380 209
381static void ocfs2_vote_thread_do_work(struct ocfs2_super *osb) 210static void ocfs2_vote_thread_do_work(struct ocfs2_super *osb)
@@ -634,8 +463,7 @@ bail:
634static struct ocfs2_vote_msg * ocfs2_new_vote_request(struct ocfs2_super *osb, 463static struct ocfs2_vote_msg * ocfs2_new_vote_request(struct ocfs2_super *osb,
635 u64 blkno, 464 u64 blkno,
636 unsigned int generation, 465 unsigned int generation,
637 enum ocfs2_vote_request type, 466 enum ocfs2_vote_request type)
638 u32 priv)
639{ 467{
640 struct ocfs2_vote_msg *request; 468 struct ocfs2_vote_msg *request;
641 struct ocfs2_msg_hdr *hdr; 469 struct ocfs2_msg_hdr *hdr;
@@ -651,8 +479,6 @@ static struct ocfs2_vote_msg * ocfs2_new_vote_request(struct ocfs2_super *osb,
651 hdr->h_request = cpu_to_be32(type); 479 hdr->h_request = cpu_to_be32(type);
652 hdr->h_blkno = cpu_to_be64(blkno); 480 hdr->h_blkno = cpu_to_be64(blkno);
653 hdr->h_generation = cpu_to_be32(generation); 481 hdr->h_generation = cpu_to_be32(generation);
654
655 request->md1.v_generic1 = cpu_to_be32(priv);
656 } 482 }
657 483
658 return request; 484 return request;
@@ -664,7 +490,7 @@ static int ocfs2_do_request_vote(struct ocfs2_super *osb,
664 struct ocfs2_vote_msg *request, 490 struct ocfs2_vote_msg *request,
665 struct ocfs2_net_response_cb *callback) 491 struct ocfs2_net_response_cb *callback)
666{ 492{
667 int status, response; 493 int status, response = -EBUSY;
668 unsigned int response_id; 494 unsigned int response_id;
669 struct ocfs2_msg_hdr *hdr; 495 struct ocfs2_msg_hdr *hdr;
670 496
@@ -686,109 +512,12 @@ bail:
686 return status; 512 return status;
687} 513}
688 514
689static int ocfs2_request_vote(struct inode *inode,
690 struct ocfs2_vote_msg *request,
691 struct ocfs2_net_response_cb *callback)
692{
693 int status;
694 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
695
696 if (ocfs2_inode_is_new(inode))
697 return 0;
698
699 status = -EAGAIN;
700 while (status == -EAGAIN) {
701 if (!(osb->s_mount_opt & OCFS2_MOUNT_NOINTR) &&
702 signal_pending(current))
703 return -ERESTARTSYS;
704
705 status = ocfs2_super_lock(osb, 0);
706 if (status < 0) {
707 mlog_errno(status);
708 break;
709 }
710
711 status = 0;
712 if (!ocfs2_node_map_is_only(osb, &osb->mounted_map,
713 osb->node_num))
714 status = ocfs2_do_request_vote(osb, request, callback);
715
716 ocfs2_super_unlock(osb, 0);
717 }
718 return status;
719}
720
721static void ocfs2_delete_response_cb(void *priv,
722 struct ocfs2_response_msg *resp)
723{
724 int orphaned_slot, node;
725 struct inode *inode = priv;
726
727 orphaned_slot = be32_to_cpu(resp->r_orphaned_slot);
728 node = be32_to_cpu(resp->r_hdr.h_node_num);
729 mlog(0, "node %d tells us that inode %llu is orphaned in slot %d\n",
730 node, (unsigned long long)OCFS2_I(inode)->ip_blkno,
731 orphaned_slot);
732
733 /* The other node may not actually know which slot the inode
734 * is orphaned in. */
735 if (orphaned_slot == OCFS2_INVALID_SLOT)
736 return;
737
738 /* Ok, the responding node knows which slot this inode is
739 * orphaned in. We verify that the information is correct and
740 * then record this in the inode. ocfs2_delete_inode will use
741 * this information to determine which lock to take. */
742 spin_lock(&OCFS2_I(inode)->ip_lock);
743 mlog_bug_on_msg(OCFS2_I(inode)->ip_orphaned_slot != orphaned_slot &&
744 OCFS2_I(inode)->ip_orphaned_slot
745 != OCFS2_INVALID_SLOT, "Inode %llu: Node %d says it's "
746 "orphaned in slot %d, we think it's in %d\n",
747 (unsigned long long)OCFS2_I(inode)->ip_blkno,
748 be32_to_cpu(resp->r_hdr.h_node_num),
749 orphaned_slot, OCFS2_I(inode)->ip_orphaned_slot);
750
751 OCFS2_I(inode)->ip_orphaned_slot = orphaned_slot;
752 spin_unlock(&OCFS2_I(inode)->ip_lock);
753}
754
755int ocfs2_request_delete_vote(struct inode *inode)
756{
757 int orphaned_slot, status;
758 struct ocfs2_net_response_cb delete_cb;
759 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
760 struct ocfs2_vote_msg *request;
761
762 spin_lock(&OCFS2_I(inode)->ip_lock);
763 orphaned_slot = OCFS2_I(inode)->ip_orphaned_slot;
764 spin_unlock(&OCFS2_I(inode)->ip_lock);
765
766 delete_cb.rc_cb = ocfs2_delete_response_cb;
767 delete_cb.rc_priv = inode;
768
769 mlog(0, "Inode %llu, we start thinking orphaned slot is %d\n",
770 (unsigned long long)OCFS2_I(inode)->ip_blkno, orphaned_slot);
771
772 status = -ENOMEM;
773 request = ocfs2_new_vote_request(osb, OCFS2_I(inode)->ip_blkno,
774 inode->i_generation,
775 OCFS2_VOTE_REQ_DELETE, orphaned_slot);
776 if (request) {
777 status = ocfs2_request_vote(inode, request, &delete_cb);
778
779 kfree(request);
780 }
781
782 return status;
783}
784
785int ocfs2_request_mount_vote(struct ocfs2_super *osb) 515int ocfs2_request_mount_vote(struct ocfs2_super *osb)
786{ 516{
787 int status; 517 int status;
788 struct ocfs2_vote_msg *request = NULL; 518 struct ocfs2_vote_msg *request = NULL;
789 519
790 request = ocfs2_new_vote_request(osb, 0ULL, 0, 520 request = ocfs2_new_vote_request(osb, 0ULL, 0, OCFS2_VOTE_REQ_MOUNT);
791 OCFS2_VOTE_REQ_MOUNT, 0);
792 if (!request) { 521 if (!request) {
793 status = -ENOMEM; 522 status = -ENOMEM;
794 goto bail; 523 goto bail;
@@ -821,8 +550,7 @@ int ocfs2_request_umount_vote(struct ocfs2_super *osb)
821 int status; 550 int status;
822 struct ocfs2_vote_msg *request = NULL; 551 struct ocfs2_vote_msg *request = NULL;
823 552
824 request = ocfs2_new_vote_request(osb, 0ULL, 0, 553 request = ocfs2_new_vote_request(osb, 0ULL, 0, OCFS2_VOTE_REQ_UMOUNT);
825 OCFS2_VOTE_REQ_UMOUNT, 0);
826 if (!request) { 554 if (!request) {
827 status = -ENOMEM; 555 status = -ENOMEM;
828 goto bail; 556 goto bail;
@@ -969,7 +697,6 @@ static int ocfs2_handle_vote_message(struct o2net_msg *msg,
969 be32_to_cpu(work->w_msg.v_hdr.h_generation)); 697 be32_to_cpu(work->w_msg.v_hdr.h_generation));
970 mlog(0, "h_node_num = %u\n", 698 mlog(0, "h_node_num = %u\n",
971 be32_to_cpu(work->w_msg.v_hdr.h_node_num)); 699 be32_to_cpu(work->w_msg.v_hdr.h_node_num));
972 mlog(0, "v_generic1 = %u\n", be32_to_cpu(work->w_msg.md1.v_generic1));
973 700
974 spin_lock(&osb->vote_task_lock); 701 spin_lock(&osb->vote_task_lock);
975 list_add_tail(&work->w_list, &osb->vote_list); 702 list_add_tail(&work->w_list, &osb->vote_list);
diff --git a/fs/ocfs2/vote.h b/fs/ocfs2/vote.h
index 53ebc1c69e56..9ea46f62de31 100644
--- a/fs/ocfs2/vote.h
+++ b/fs/ocfs2/vote.h
@@ -38,14 +38,11 @@ static inline void ocfs2_kick_vote_thread(struct ocfs2_super *osb)
38 wake_up(&osb->vote_event); 38 wake_up(&osb->vote_event);
39} 39}
40 40
41int ocfs2_request_delete_vote(struct inode *inode);
42int ocfs2_request_mount_vote(struct ocfs2_super *osb); 41int ocfs2_request_mount_vote(struct ocfs2_super *osb);
43int ocfs2_request_umount_vote(struct ocfs2_super *osb); 42int ocfs2_request_umount_vote(struct ocfs2_super *osb);
44int ocfs2_register_net_handlers(struct ocfs2_super *osb); 43int ocfs2_register_net_handlers(struct ocfs2_super *osb);
45void ocfs2_unregister_net_handlers(struct ocfs2_super *osb); 44void ocfs2_unregister_net_handlers(struct ocfs2_super *osb);
46 45
47void ocfs2_mark_inode_remotely_deleted(struct inode *inode);
48
49void ocfs2_remove_node_from_vote_queues(struct ocfs2_super *osb, 46void ocfs2_remove_node_from_vote_queues(struct ocfs2_super *osb,
50 int node_num); 47 int node_num);
51#endif 48#endif
diff --git a/fs/sync.c b/fs/sync.c
index d0feff61e6aa..5cb9e7e43383 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -239,13 +239,11 @@ out:
239/* 239/*
240 * `endbyte' is inclusive 240 * `endbyte' is inclusive
241 */ 241 */
242int do_sync_file_range(struct file *file, loff_t offset, loff_t endbyte, 242int do_sync_mapping_range(struct address_space *mapping, loff_t offset,
243 unsigned int flags) 243 loff_t endbyte, unsigned int flags)
244{ 244{
245 int ret; 245 int ret;
246 struct address_space *mapping;
247 246
248 mapping = file->f_mapping;
249 if (!mapping) { 247 if (!mapping) {
250 ret = -EINVAL; 248 ret = -EINVAL;
251 goto out; 249 goto out;
@@ -275,4 +273,4 @@ int do_sync_file_range(struct file *file, loff_t offset, loff_t endbyte,
275out: 273out:
276 return ret; 274 return ret;
277} 275}
278EXPORT_SYMBOL_GPL(do_sync_file_range); 276EXPORT_SYMBOL_GPL(do_sync_mapping_range);