/*
 *  linux/fs/nfs/inode.c
 *
 *  Copyright (C) 1992  Rick Sladkey
 *
 *  nfs inode and superblock handling functions
 *
 *  Modularised by Alan Cox <alan@lxorguk.ukuu.org.uk>, while hacking some
 *  experimental NFS changes. Modularisation taken straight from SYS5 fs.
 *
 *  Change to nfs_read_super() to permit NFS mounts to multi-homed hosts.
 *  J.S.Peatfield@damtp.cam.ac.uk
 *
 */

#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/time.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/stat.h>
#include <linux/errno.h>
#include <linux/unistd.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/metrics.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_mount.h>
#include <linux/nfs4_mount.h>
#include <linux/lockd/bind.h>
#include <linux/smp_lock.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
#include <linux/nfs_idmap.h>
#include <linux/vfs.h>
#include <linux/inet.h>
#include <linux/nfs_xdr.h>

#include <asm/system.h>
#include <asm/uaccess.h>

#include "nfs4_fs.h"
#include "callback.h"
#include "delegation.h"
#include "iostat.h"
#include "internal.h"

#define NFSDBG_FACILITY		NFSDBG_VFS

#define NFS_64_BIT_INODE_NUMBERS_ENABLED	1

/* Default is to see 64-bit inode numbers */
static int enable_ino64 = NFS_64_BIT_INODE_NUMBERS_ENABLED;

static void nfs_invalidate_inode(struct inode *);
static int nfs_update_inode(struct inode *, struct nfs_fattr *);

static struct kmem_cache * nfs_inode_cachep;

static inline unsigned long
nfs_fattr_to_ino_t(struct nfs_fattr *fattr)
{
	return nfs_fileid_to_ino_t(fattr->fileid);
}

/**
 * nfs_compat_user_ino64 - returns the user-visible inode number
 * @fileid: 64-bit fileid
 *
 * This function returns a 32-bit inode number if the boot parameter
 * nfs.enable_ino64 is zero.
 */
u64 nfs_compat_user_ino64(u64 fileid)
{
	int ino;

	if (enable_ino64)
		return fileid;
	ino = fileid;
	if (sizeof(ino) < sizeof(fileid))
		ino ^= fileid >> (sizeof(fileid)-sizeof(ino)) * 8;
	return ino;
}

int nfs_write_inode(struct inode *inode, int sync)
{
	int ret;

	if (sync) {
		ret = filemap_fdatawait(inode->i_mapping);
		if (ret == 0)
			ret = nfs_commit_inode(inode, FLUSH_SYNC);
	} else
		ret = nfs_commit_inode(inode, 0);
	if (ret >= 0)
		return 0;
	__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
	return ret;
}

void nfs_clear_inode(struct inode *inode)
{
	/*
	 * The following should never happen...
	 */
	BUG_ON(nfs_have_writebacks(inode));
	BUG_ON(!list_empty(&NFS_I(inode)->open_files));
	nfs_zap_acl_cache(inode);
	nfs_access_zap_cache(inode);
}

/**
 * nfs_sync_mapping - helper to flush all mmapped dirty data to disk
 */
int nfs_sync_mapping(struct address_space *mapping)
{
	int ret;

	if (mapping->nrpages == 0)
		return 0;
	unmap_mapping_range(mapping, 0, 0, 0);
	ret = filemap_write_and_wait(mapping);
	if (ret != 0)
		goto out;
	ret = nfs_wb_all(mapping->host);
out:
	return ret;
}

/*
 * Invalidate the local caches
 */
static void nfs_zap_caches_locked(struct inode *inode)
{
	struct nfs_inode *nfsi = NFS_I(inode);
	int mode = inode->i_mode;

	nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE);

	nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
	nfsi->attrtimeo_timestamp = jiffies;

	memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
	if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
		nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
	else
		nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
}

void nfs_zap_caches(struct inode *inode)
{
	spin_lock(&inode->i_lock);
	nfs_zap_caches_locked(inode);
	spin_unlock(&inode->i_lock);
}

void nfs_zap_mapping(struct inode *inode, struct address_space *mapping)
{
	if (mapping->nrpages != 0) {
		spin_lock(&inode->i_lock);
		NFS_I(inode)->cache_validity |= NFS_INO_INVALID_DATA;
		spin_unlock(&inode->i_lock);
	}
}

void nfs_zap_acl_cache(struct inode *inode)
{
	void (*clear_acl_cache)(struct inode *);

	clear_acl_cache = NFS_PROTO(inode)->clear_acl_cache;
	if (clear_acl_cache != NULL)
		clear_acl_cache(inode);
	spin_lock(&inode->i_lock);
	NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_ACL;
	spin_unlock(&inode->i_lock);
}

void nfs_invalidate_atime(struct inode *inode)
{
	spin_lock(&inode->i_lock);
	NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME;
	spin_unlock(&inode->i_lock);
}

/*
 * Invalidate, but do not unhash, the inode.
 * NB: must be called with inode->i_lock held!
 */
static void nfs_invalidate_inode(struct inode *inode)
{
	set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
	nfs_zap_caches_locked(inode);
}

struct nfs_find_desc {
	struct nfs_fh		*fh;
	struct nfs_fattr	*fattr;
};

/*
 * In NFSv3 we can have 64bit inode numbers. In order to support
 * this, and re-exported directories (also seen in NFSv2)
 * we are forced to allow 2 different inodes to have the same
 * i_ino.
 */
static int
nfs_find_actor(struct inode *inode, void *opaque)
{
	struct nfs_find_desc	*desc = (struct nfs_find_desc *)opaque;
	struct nfs_fh		*fh = desc->fh;
	struct nfs_fattr	*fattr = desc->fattr;

	if (NFS_FILEID(inode) != fattr->fileid)
		return 0;
	if (nfs_compare_fh(NFS_FH(inode), fh))
		return 0;
	if (is_bad_inode(inode) || NFS_STALE(inode))
		return 0;
	return 1;
}

static int
nfs_init_locked(struct inode *inode, void *opaque)
{
	struct nfs_find_desc	*desc = (struct nfs_find_desc *)opaque;
	struct nfs_fattr	*fattr = desc->fattr;

	set_nfs_fileid(inode, fattr->fileid);
	nfs_copy_fh(NFS_FH(inode), desc->fh);
	return 0;
}

/* Don't use READDIRPLUS on directories that we believe are too large */
#define NFS_LIMIT_READDIRPLUS (8*PAGE_SIZE)

/*
 * This is our front-end to iget that looks up inodes by file handle
 * instead of inode number.
 */
struct inode *
nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
{
	struct nfs_find_desc desc = {
		.fh	= fh,
		.fattr	= fattr
	};
	struct inode *inode = ERR_PTR(-ENOENT);
	unsigned long hash;

	if ((fattr->valid & NFS_ATTR_FATTR) == 0)
		goto out_no_inode;

	if (!fattr->nlink) {
		printk("NFS: Buggy server - nlink == 0!\n");
		goto out_no_inode;
	}

	hash = nfs_fattr_to_ino_t(fattr);

	inode = iget5_locked(sb, hash, nfs_find_actor, nfs_init_locked, &desc);
	if (inode == NULL) {
		inode = ERR_PTR(-ENOMEM);
		goto out_no_inode;
	}

	if (inode->i_state & I_NEW) {
		struct nfs_inode *nfsi = NFS_I(inode);
		unsigned long now = jiffies;

		/* We set i_ino for the few things that still rely on it,
		 * such as stat(2) */
		inode->i_ino = hash;

		/* We can't support update_atime(), since the server will reset it */
		inode->i_flags |= S_NOATIME|S_NOCMTIME;
		inode->i_mode = fattr->mode;
		/* Why so? Because we want revalidate for devices/FIFOs, and
		 * that's precisely what we have in nfs_file_inode_operations.
		 */
		inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->file_inode_ops;
		if (S_ISREG(inode->i_mode)) {
			inode->i_fop = &nfs_file_operations;
			inode->i_data.a_ops = &nfs_file_aops;
			inode->i_data.backing_dev_info = &NFS_SB(sb)->backing_dev_info;
		} else if (S_ISDIR(inode->i_mode)) {
			inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops;
			inode->i_fop = &nfs_dir_operations;
			if (nfs_server_capable(inode, NFS_CAP_READDIRPLUS)
			    && fattr->size <= NFS_LIMIT_READDIRPLUS)
				set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
			/* Deal with crossing mountpoints */
			if (!nfs_fsid_equal(&NFS_SB(sb)->fsid, &fattr->fsid)) {
				if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
					inode->i_op = &nfs_referral_inode_operations;
				else
					inode->i_op = &nfs_mountpoint_inode_operations;
				inode->i_fop = NULL;
				set_bit(NFS_INO_MOUNTPOINT, &nfsi->flags);
			}
		} else if (S_ISLNK(inode->i_mode))
			inode->i_op = &nfs_symlink_inode_operations;
		else
			init_special_inode(inode, inode->i_mode, fattr->rdev);

		nfsi->read_cache_jiffies = fattr->time_start;
		nfsi->attr_gencount = fattr->gencount;
		inode->i_atime = fattr->atime;
		inode->i_mtime = fattr->mtime;
		inode->i_ctime = fattr->ctime;
		if (fattr->valid & NFS_ATTR_FATTR_V4)
			nfsi->change_attr = fattr->change_attr;
		inode->i_size = nfs_size_to_loff_t(fattr->size);
		inode->i_nlink = fattr->nlink;
		inode->i_uid = fattr->uid;
		inode->i_gid = fattr->gid;
		if (fattr->valid & (NFS_ATTR_FATTR_V3 | NFS_ATTR_FATTR_V4)) {
			/*
			 * report the blocks in 512byte units
			 */
			inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
		} else {
			inode->i_blocks = fattr->du.nfs2.blocks;
		}
		nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
		nfsi->attrtimeo_timestamp = now;
		memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
		nfsi->access_cache = RB_ROOT;

		unlock_new_inode(inode);
	} else
		nfs_refresh_inode(inode, fattr);
	dprintk("NFS: nfs_fhget(%s/%Ld ct=%d)\n",
		inode->i_sb->s_id,
		(long long)NFS_FILEID(inode),
		atomic_read(&inode->i_count));

out:
	return inode;

out_no_inode:
	dprintk("nfs_fhget: iget failed with error %ld\n", PTR_ERR(inode));
	goto out;
}

#define NFS_VALID_ATTRS (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_SIZE|ATTR_ATIME|ATTR_ATIME_SET|ATTR_MTIME|ATTR_MTIME_SET|ATTR_FILE)

int
nfs_setattr(struct dentry *dentry, struct iattr *attr)
{
	struct inode *inode = dentry->d_inode;
	struct nfs_fattr fattr;
	int error;

	nfs_inc_stats(inode, NFSIOS_VFSSETATTR);

	/* skip mode change if it's just for clearing setuid/setgid */
	if (attr->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
		attr->ia_valid &= ~ATTR_MODE;

	if (attr->ia_valid & ATTR_SIZE) {
		if (!S_ISREG(inode->i_mode) || attr->ia_size == i_size_read(inode))
			attr->ia_valid &= ~ATTR_SIZE;
	}

	/* Optimization: if the end result is no change, don't RPC */
	attr->ia_valid &= NFS_VALID_ATTRS;
	if ((attr->ia_valid & ~ATTR_FILE) == 0)
		return 0;

	/* Write all dirty data */
	if (S_ISREG(inode->i_mode)) {
		filemap_write_and_wait(inode->i_mapping);
		nfs_wb_all(inode);
	}
	/*
	 * Return any delegations if we're going to change ACLs
	 */
	if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0)
		nfs_inode_return_delegation(inode);
	error = NFS_PROTO(inode)->setattr(dentry, &fattr, attr);
	if (error == 0)
		nfs_refresh_inode(inode, &fattr);
	return error;
}

/**
 * nfs_vmtruncate - unmap mappings "freed" by truncate() syscall
 * @inode: inode of the file used
 * @offset: file offset to start truncating
 *
 * This is a copy of the common vmtruncate, but with the locking
 * corrected to take into account the fact that NFS requires
 * inode->i_size to be updated under the inode->i_lock.
 */
static int nfs_vmtruncate(struct inode * inode, loff_t offset)
{
	if (i_size_read(inode) < offset) {
		unsigned long limit;

		limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
		if (limit != RLIM_INFINITY && offset > limit)
			goto out_sig;
		if (offset > inode->i_sb->s_maxbytes)
			goto out_big;
		spin_lock(&inode->i_lock);
		i_size_write(inode, offset);
		spin_unlock(&inode->i_lock);
	} else {
		struct address_space *mapping = inode->i_mapping;

		/*
		 * truncation of in-use swapfiles is disallowed - it would
		 * cause subsequent swapout to scribble on the now-freed
		 * blocks.
		 */
		if (IS_SWAPFILE(inode))
			return -ETXTBSY;
		spin_lock(&inode->i_lock);
		i_size_write(inode, offset);
		spin_unlock(&inode->i_lock);

		/*
		 * unmap_mapping_range is called twice, first simply for
		 * efficiency so that truncate_inode_pages does fewer
		 * single-page unmaps.  However after this first call, and
		 * before truncate_inode_pages finishes, it is possible for
		 * private pages to be COWed, which remain after
		 * truncate_inode_pages finishes, hence the second
		 * unmap_mapping_range call must be made for correctness.
		 */
		unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
		truncate_inode_pages(mapping, offset);
		unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
	}
	return 0;
out_sig:
	send_sig(SIGXFSZ, current, 0);
out_big:
	return -EFBIG;
}

/**
 * nfs_setattr_update_inode - Update inode metadata after a setattr call.
 * @inode: pointer to struct inode
 * @attr: pointer to struct iattr
 *
 * Note: we do this in the *proc.c in order to ensure that
 *       it works for things like exclusive creates too.
 */
void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr)
{
	if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) {
		spin_lock(&inode->i_lock);
		if ((attr->ia_valid & ATTR_MODE) != 0) {
			int mode = attr->ia_mode & S_IALLUGO;
			mode |= inode->i_mode & ~S_IALLUGO;
			inode->i_mode = mode;
		}
		if ((attr->ia_valid & ATTR_UID) != 0)
			inode->i_uid = attr->ia_uid;
		if ((attr->ia_valid & ATTR_GID) != 0)
			inode->i_gid = attr->ia_gid;
		NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
		spin_unlock(&inode->i_lock);
	}
	if ((attr->ia_valid & ATTR_SIZE) != 0) {
		nfs_inc_stats(inode, NFSIOS_SETATTRTRUNC);
		nfs_vmtruncate(inode, attr->ia_size);
	}
}

int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
{
	struct inode *inode = dentry->d_inode;
	int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME;
	int err;

	/*
	 * Flush out writes to the server in order to update c/mtime.
	 *
	 * Hold the i_mutex to suspend application writes temporarily;
	 * this prevents long-running writing applications from blocking
	 * nfs_wb_nocommit.
	 */
	if (S_ISREG(inode->i_mode)) {
		mutex_lock(&inode->i_mutex);
		nfs_wb_nocommit(inode);
		mutex_unlock(&inode->i_mutex);
	}

	/*
	 * We may force a getattr if the user cares about atime.
	 *
	 * Note that we only have to check the vfsmount flags here:
	 *  - NFS always sets S_NOATIME by so checking it would give a
	 *    bogus result
	 *  - NFS never sets MS_NOATIME or MS_NODIRATIME so there is
	 *    no point in checking those.
	 */
 	if ((mnt->mnt_flags & MNT_NOATIME) ||
 	    ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
		need_atime = 0;

	if (need_atime)
		err = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
	else
		err = nfs_revalidate_inode(NFS_SERVER(inode), inode);
	if (!err) {
		generic_fillattr(inode, stat);
		stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode));
	}
	return err;
}

static struct nfs_open_context *alloc_nfs_open_context(struct vfsmount *mnt, struct dentry *dentry, struct rpc_cred *cred)
{
	struct nfs_open_context *ctx;

	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
	if (ctx != NULL) {
		ctx->path.dentry = dget(dentry);
		ctx->path.mnt = mntget(mnt);
		ctx->cred = get_rpccred(cred);
		ctx->state = NULL;
		ctx->lockowner = current->files;
		ctx->flags = 0;
		ctx->error = 0;
		ctx->dir_cookie = 0;
		atomic_set(&ctx->count, 1);
	}
	return ctx;
}

struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx)
{
	if (ctx != NULL)
		atomic_inc(&ctx->count);
	return ctx;
}

static void __put_nfs_open_context(struct nfs_open_context *ctx, int wait)
{
	struct inode *inode;

	if (ctx == NULL)
		return;

	inode = ctx->path.dentry->d_inode;
	if (!atomic_dec_and_lock(&ctx->count, &inode->i_lock))
		return;
	list_del(&ctx->list);
	spin_unlock(&inode->i_lock);
	if (ctx->state != NULL) {
		if (wait)
			nfs4_close_sync(&ctx->path, ctx->state, ctx->mode);
		else
			nfs4_close_state(&ctx->path, ctx->state, ctx->mode);
	}
	if (ctx->cred != NULL)
		put_rpccred(ctx->cred);
	path_put(&ctx->path);
	kfree(ctx);
}

void put_nfs_open_context(struct nfs_open_context *ctx)
{
	__put_nfs_open_context(ctx, 0);
}

static void put_nfs_open_context_sync(struct nfs_open_context *ctx)
{
	__put_nfs_open_context(ctx, 1);
}

/*
 * Ensure that mmap has a recent RPC credential for use when writing out
 * shared pages
 */
static void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx)
{
	struct inode *inode = filp->f_path.dentry->d_inode;
	struct nfs_inode *nfsi = NFS_I(inode);

	filp->private_data = get_nfs_open_context(ctx);
	spin_lock(&inode->i_lock);
	list_add(&ctx->list, &nfsi->open_files);
	spin_unlock(&inode->i_lock);
}

/*
 * Given an inode, search for an open context with the desired characteristics
 */
struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, int mode)
{
	struct nfs_inode *nfsi = NFS_I(inode);
	struct nfs_open_context *pos, *ctx = NULL;

	spin_lock(&inode->i_lock);
	list_for_each_entry(pos, &nfsi->open_files, list) {
		if (cred != NULL && pos->cred != cred)
			continue;
		if ((pos->mode & mode) == mode) {
			ctx = get_nfs_open_context(pos);
			break;
		}
	}
	spin_unlock(&inode->i_lock);
	return ctx;
}

static void nfs_file_clear_open_context(struct file *filp)
{
	struct inode *inode = filp->f_path.dentry->d_inode;
	struct nfs_open_context *ctx = nfs_file_open_context(filp);

	if (ctx) {
		filp->private_data = NULL;
		spin_lock(&inode->i_lock);
		list_move_tail(&ctx->list, &NFS_I(inode)->open_files);
		spin_unlock(&inode->i_lock);
		put_nfs_open_context_sync(ctx);
	}
}

/*
 * These allocate and release file read/write context information.
 */
int nfs_open(struct inode *inode, struct file *filp)
{
	struct nfs_open_context *ctx;
	struct rpc_cred *cred;

	cred = rpc_lookup_cred();
	if (IS_ERR(cred))
		return PTR_ERR(cred);
	ctx = alloc_nfs_open_context(filp->f_path.mnt, filp->f_path.dentry, cred);
	put_rpccred(cred);
	if (ctx == NULL)
		return -ENOMEM;
	ctx->mode = filp->f_mode;
	nfs_file_set_open_context(filp, ctx);
	put_nfs_open_context(ctx);
	return 0;
}

int nfs_release(struct inode *inode, struct file *filp)
{
	nfs_file_clear_open_context(filp);
	return 0;
}

/*
 * This function is called whenever some part of NFS notices that
 * the cached attributes have to be refreshed.
 */
int
__nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
{
	int		 status = -ESTALE;
	struct nfs_fattr fattr;
	struct nfs_inode *nfsi = NFS_I(inode);

	dfprintk(PAGECACHE, "NFS: revalidating (%s/%Ld)\n",
		inode->i_sb->s_id, (long long)NFS_FILEID(inode));

	if (is_bad_inode(inode))
		goto out;
	if (NFS_STALE(inode))
		goto out;

	if (NFS_STALE(inode))
		goto out;

	nfs_inc_stats(inode, NFSIOS_INODEREVALIDATE);
	status = NFS_PROTO(inode)->getattr(server, NFS_FH(inode), &fattr);
	if (status != 0) {
		dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Ld) getattr failed, error=%d\n",
			 inode->i_sb->s_id,
			 (long long)NFS_FILEID(inode), status);
		if (status == -ESTALE) {
			nfs_zap_caches(inode);
			if (!S_ISDIR(inode->i_mode))
				set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
		}
		goto out;
	}

	status = nfs_refresh_inode(inode, &fattr);
	if (status) {
		dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Ld) refresh failed, error=%d\n",
			 inode->i_sb->s_id,
			 (long long)NFS_FILEID(inode), status);
		goto out;
	}

	if (nfsi->cache_validity & NFS_INO_INVALID_ACL)
		nfs_zap_acl_cache(inode);

	dfprintk(PAGECACHE, "NFS: (%s/%Ld) revalidation complete\n",
		inode->i_sb->s_id,
		(long long)NFS_FILEID(inode));

 out:
	return status;
}

int nfs_attribute_timeout(struct inode *inode)
{
	struct nfs_inode *nfsi = NFS_I(inode);

	if (nfs_have_delegation(inode, FMODE_READ))
		return 0;
	/*
	 * Special case: if the attribute timeout is set to 0, then always
	 * 		 treat the cache as having expired (unless holding
	 * 		 a delegation).
	 */
	if (nfsi->attrtimeo == 0)
		return 1;
	return !time_in_range(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo);
}

/**
 * nfs_revalidate_inode - Revalidate the inode attributes
 * @server - pointer to nfs_server struct
 * @inode - pointer to inode struct
 *
 * Updates inode attribute information by retrieving the data from the server.
 */
int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
{
	if (!(NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATTR)
			&& !nfs_attribute_timeout(inode))
		return NFS_STALE(inode) ? -ESTALE : 0;
	return __nfs_revalidate_inode(server, inode);
}

static int nfs_invalidate_mapping_nolock(struct inode *inode, struct address_space *mapping)
{
	struct nfs_inode *nfsi = NFS_I(inode);
	
	if (mapping->nrpages != 0) {
		int ret = invalidate_inode_pages2(mapping);
		if (ret < 0)
			return ret;
	}
	spin_lock(&inode->i_lock);
	nfsi->cache_validity &= ~NFS_INO_INVALID_DATA;
	if (S_ISDIR(inode->i_mode))
		memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
	spin_unlock(&inode->i_lock);
	nfs_inc_stats(inode, NFSIOS_DATAINVALIDATE);
	dfprintk(PAGECACHE, "NFS: (%s/%Ld) data cache invalidated\n",
			inode->i_sb->s_id, (long long)NFS_FILEID(inode));
	return 0;
}

static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping)
{
	int ret = 0;

	mutex_lock(&inode->i_mutex);
	if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_DATA) {
		ret = nfs_sync_mapping(mapping);
		if (ret == 0)
			ret = nfs_invalidate_mapping_nolock(inode, mapping);
	}
	mutex_unlock(&inode->i_mutex);
	return ret;
}

/**
 * nfs_revalidate_mapping_nolock - Revalidate the pagecache
 * @inode - pointer to host inode
 * @mapping - pointer to mapping
 */
int nfs_revalidate_mapping_nolock(struct inode *inode, struct address_space *mapping)
{
	struct nfs_inode *nfsi = NFS_I(inode);
	int ret = 0;

	if ((nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
			|| nfs_attribute_timeout(inode) || NFS_STALE(inode)) {
		ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
		if (ret < 0)
			goto out;
	}
	if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
		ret = nfs_invalidate_mapping_nolock(inode, mapping);
out:
	return ret;
}

/**
 * nfs_revalidate_mapping - Revalidate the pagecache
 * @inode - pointer to host inode
 * @mapping - pointer to mapping
 *
 * This version of the function will take the inode->i_mutex and attempt to
 * flush out all dirty data if it needs to invalidate the page cache.
 */
int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
{
	struct nfs_inode *nfsi = NFS_I(inode);
	int ret = 0;

	if ((nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
			|| nfs_attribute_timeout(inode) || NFS_STALE(inode)) {
		ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
		if (ret < 0)
			goto out;
	}
	if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
		ret = nfs_invalidate_mapping(inode, mapping);
out:
	return ret;
}

static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
{
	struct nfs_inode *nfsi = NFS_I(inode);

	if ((fattr->valid & NFS_ATTR_WCC_V4) != 0 &&
			nfsi->change_attr == fattr->pre_change_attr) {
		nfsi->change_attr = fattr->change_attr;
		if (S_ISDIR(inode->i_mode))
			nfsi->cache_validity |= NFS_INO_INVALID_DATA;
	}
	/* If we have atomic WCC data, we may update some attributes */
	if ((fattr->valid & NFS_ATTR_WCC) != 0) {
		if (timespec_equal(&inode->i_ctime, &fattr->pre_ctime))
			memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
		if (timespec_equal(&inode->i_mtime, &fattr->pre_mtime)) {
			memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
			if (S_ISDIR(inode->i_mode))
				nfsi->cache_validity |= NFS_INO_INVALID_DATA;
		}
		if (i_size_read(inode) == nfs_size_to_loff_t(fattr->pre_size) &&
		    nfsi->npages == 0)
			i_size_write(inode, nfs_size_to_loff_t(fattr->size));
	}
}

/**
 * nfs_check_inode_attributes - verify consistency of the inode attribute cache
 * @inode - pointer to inode
 * @fattr - updated attributes
 *
 * Verifies the attribute cache. If we have just changed the attributes,
 * so that fattr carries weak cache consistency data, then it may
 * also update the ctime/mtime/change_attribute.
 */
static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fattr)
{
	struct nfs_inode *nfsi = NFS_I(inode);
	loff_t cur_size, new_isize;
	unsigned long invalid = 0;


	/* Has the inode gone and changed behind our back? */
	if (nfsi->fileid != fattr->fileid
			|| (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) {
		return -EIO;
	}

	if ((fattr->valid & NFS_ATTR_FATTR_V4) != 0 &&
			nfsi->change_attr != fattr->change_attr)
		invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;

	/* Verify a few of the more important attributes */
	if (!timespec_equal(&inode->i_mtime, &fattr->mtime))
		invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;

	cur_size = i_size_read(inode);
 	new_isize = nfs_size_to_loff_t(fattr->size);
	if (cur_size != new_isize && nfsi->npages == 0)
		invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;

	/* Have any file permissions changed? */
	if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)
			|| inode->i_uid != fattr->uid
			|| inode->i_gid != fattr->gid)
		invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;

	/* Has the link count changed? */
	if (inode->i_nlink != fattr->nlink)
		invalid |= NFS_INO_INVALID_ATTR;

	if (!timespec_equal(&inode->i_atime, &fattr->atime))
		invalid |= NFS_INO_INVALID_ATIME;

	if (invalid != 0)
		nfsi->cache_validity |= invalid;

	nfsi->read_cache_jiffies = fattr->time_start;
	return 0;
}

static int nfs_ctime_need_update(const struct inode *inode, const struct nfs_fattr *fattr)
{
	return timespec_co<span class="hl com">/* fastlane.c: Driver for Phase5&apos;s Fastlane SCSI Controller.</span>
<span class="hl com"> *</span>
<span class="hl com"> * Copyright (C) 1996 Jesper Skov (jskov&#64;cygnus.co.uk)</span>
<span class="hl com"> *</span>
<span class="hl com"> * This driver is based on the CyberStorm driver, hence the occasional</span>
<span class="hl com"> * reference to CyberStorm.</span>
<span class="hl com"> *</span>
<span class="hl com"> * Betatesting &amp; crucial adjustments by</span>
<span class="hl com"> *        Patrik Rak (prak3264&#64;ss1000.ms.mff.cuni.cz)</span>
<span class="hl com"> *</span>
<span class="hl com"> */</span>

<span class="hl com">/* TODO:</span>
<span class="hl com"> *</span>
<span class="hl com"> * o According to the doc from laire, it is required to reset the DMA when</span>
<span class="hl com"> *   the transfer is done. ATM we reset DMA just before every new </span>
<span class="hl com"> *   dma_init_(read|write).</span>
<span class="hl com"> *</span>
<span class="hl com"> * 1) Figure out how to make a cleaner merge with the sparc driver with regard</span>
<span class="hl com"> *    to the caches and the Sparc MMU mapping.</span>
<span class="hl com"> * 2) Make as few routines required outside the generic driver. A lot of the</span>
<span class="hl com"> *    routines in this file used to be inline!</span>
<span class="hl com"> */</span>

<span class="hl ppc">#include &lt;linux/module.h&gt;</span>

<span class="hl ppc">#include &lt;linux/init.h&gt;</span>
<span class="hl ppc">#include &lt;linux/kernel.h&gt;</span>
<span class="hl ppc">#include &lt;linux/delay.h&gt;</span>
<span class="hl ppc">#include &lt;linux/types.h&gt;</span>
<span class="hl ppc">#include &lt;linux/string.h&gt;</span>
<span class="hl ppc">#include &lt;linux/slab.h&gt;</span>
<span class="hl ppc">#include &lt;linux/blkdev.h&gt;</span>
<span class="hl ppc">#include &lt;linux/proc_fs.h&gt;</span>
<span class="hl ppc">#include &lt;linux/stat.h&gt;</span>
<span class="hl ppc">#include &lt;linux/interrupt.h&gt;</span>

<span class="hl ppc">#include</span> <span class="hl pps">&quot;scsi.h&quot;</span><span class="hl ppc"></span>
<span class="hl ppc">#include &lt;scsi/scsi_host.h&gt;</span>
<span class="hl ppc">#include</span> <span class="hl pps">&quot;NCR53C9x.h&quot;</span><span class="hl ppc"></span>

<span class="hl ppc">#include &lt;linux/zorro.h&gt;</span>
<span class="hl ppc">#include &lt;asm/irq.h&gt;</span>

<span class="hl ppc">#include &lt;asm/amigaints.h&gt;</span>
<span class="hl ppc">#include &lt;asm/amigahw.h&gt;</span>

<span class="hl ppc">#include &lt;asm/pgtable.h&gt;</span>

<span class="hl com">/* Such day has just come... */</span>
<span class="hl ppc">#if 0</span>
<span class="hl com">/* Let this defined unless you really need to enable DMA IRQ one day */</span>
<span class="hl ppc">#define NODMAIRQ</span>
<span class="hl ppc">#endif</span>

<span class="hl com">/* The controller registers can be found in the Z2 config area at these</span>
<span class="hl com"> * offsets:</span>
<span class="hl com"> */</span>
<span class="hl ppc">#define FASTLANE_ESP_ADDR 0x1000001</span>
<span class="hl ppc">#define FASTLANE_DMA_ADDR 0x1000041</span>


<span class="hl com">/* The Fastlane DMA interface */</span>
<span class="hl kwb">struct</span> fastlane_dma_registers <span class="hl opt">{</span>
	<span class="hl kwc">volatile</span> <span class="hl kwb">unsigned char</span> cond_reg<span class="hl opt">;</span>	<span class="hl com">/* DMA status  (ro) [0x0000] */</span>
<span class="hl ppc">#define ctrl_reg  cond_reg</span>			<span class="hl com">/* DMA control (wo) [0x0000] */</span><span class="hl ppc"></span>
	<span class="hl kwb">unsigned char</span> dmapad1<span class="hl opt">[</span><span class="hl num">0x3f</span><span class="hl opt">];</span>
	<span class="hl kwc">volatile</span> <span class="hl kwb">unsigned char</span> clear_strobe<span class="hl opt">;</span>    <span class="hl com">/* DMA clear   (wo) [0x0040] */</span>
<span class="hl opt">};</span>


<span class="hl com">/* DMA status bits */</span>
<span class="hl ppc">#define FASTLANE_DMA_MINT  0x80</span>
<span class="hl ppc">#define FASTLANE_DMA_IACT  0x40</span>
<span class="hl ppc">#define FASTLANE_DMA_CREQ  0x20</span>

<span class="hl com">/* DMA control bits */</span>
<span class="hl ppc">#define FASTLANE_DMA_FCODE 0xa0</span>
<span class="hl ppc">#define FASTLANE_DMA_MASK  0xf3</span>
<span class="hl ppc">#define FASTLANE_DMA_LED   0x10</span>	<span class="hl com">/* HD led control 1 = on */</span><span class="hl ppc"></span>
<span class="hl ppc">#define FASTLANE_DMA_WRITE 0x08</span> <span class="hl com">/* 1 = write */</span><span class="hl ppc"></span>
<span class="hl ppc">#define FASTLANE_DMA_ENABLE 0x04</span> <span class="hl com">/* Enable DMA */</span><span class="hl ppc"></span>
<span class="hl ppc">#define FASTLANE_DMA_EDI   0x02</span>	<span class="hl com">/* Enable DMA IRQ ? */</span><span class="hl ppc"></span>
<span class="hl ppc">#define FASTLANE_DMA_ESI   0x01</span>	<span class="hl com">/* Enable SCSI IRQ */</span><span class="hl ppc"></span>

<span class="hl kwb">static int</span>  <span class="hl kwd">dma_bytes_sent</span><span class="hl opt">(</span><span class="hl kwb">struct</span> NCR_ESP <span class="hl opt">*</span>esp<span class="hl opt">,</span> <span class="hl kwb">int</span> fifo_count<span class="hl opt">);</span>
<span class="hl kwb">static int</span>  <span class="hl kwd">dma_can_transfer</span><span class="hl opt">(</span><span class="hl kwb">struct</span> NCR_ESP <span class="hl opt">*</span>esp<span class="hl opt">,</span> Scsi_Cmnd <span class="hl opt">*</span>sp<span class="hl opt">);</span>
<span class="hl kwb">static void</span> <span class="hl kwd">dma_dump_state</span><span class="hl opt">(</span><span class="hl kwb">struct</span> NCR_ESP <span class="hl opt">*</span>esp<span class="hl opt">);</span>
<span class="hl kwb">static void</span> <span class="hl kwd">dma_init_read</span><span class="hl opt">(</span><span class="hl kwb">struct</span> NCR_ESP <span class="hl opt">*</span>esp<span class="hl opt">,</span> __u32 addr<span class="hl opt">,</span> <span class="hl kwb">int</span> length<span class="hl opt">);</span>
<span class="hl kwb">static void</span> <span class="hl kwd">dma_init_write</span><span class="hl opt">(</span><span class="hl kwb">struct</span> NCR_ESP <span class="hl opt">*</span>esp<span class="hl opt">,</span> __u32 vaddr<span class="hl opt">,</span> <span class="hl kwb">int</span> length<span class="hl opt">);</span>
<span class="hl kwb">static void</span> <span class="hl kwd">dma_ints_off</span><span class="hl opt">(</span><span class="hl kwb">struct</span> NCR_ESP <span class="hl opt">*</span>esp<span class="hl opt">);</span>
<span class="hl kwb">static void</span> <span class="hl kwd">dma_ints_on</span><span class="hl opt">(</span><span class="hl kwb">struct</span> NCR_ESP <span class="hl opt">*</span>esp<span class="hl opt">);</span>
<span class="hl kwb">static int</span>  <span class="hl kwd">dma_irq_p</span><span class="hl opt">(</span><span class="hl kwb">struct</span> NCR_ESP <span class="hl opt">*</span>esp<span class="hl opt">);</span>
<span class="hl kwb">static void</span> <span class="hl kwd">dma_irq_exit</span><span class="hl opt">(</span><span class="hl kwb">struct</span> NCR_ESP <span class="hl opt">*</span>esp<span class="hl opt">);</span>
<span class="hl kwb">static void</span> <span class="hl kwd">dma_led_off</span><span class="hl opt">(</span><span class="hl kwb">struct</span> NCR_ESP <span class="hl opt">*</span>esp<span class="hl opt">);</span>
<span class="hl kwb">static void</span> <span class="hl kwd">dma_led_on</span><span class="hl opt">(</span><span class="hl kwb">struct</span> NCR_ESP <span class="hl opt">*</span>esp<span class="hl opt">);</span>
<span class="hl kwb">static int</span>  <span class="hl kwd">dma_ports_p</span><span class="hl opt">(</span><span class="hl kwb">struct</span> NCR_ESP <span class="hl opt">*</span>esp<span class="hl opt">);</span>
<span class="hl kwb">static void</span> <span class="hl kwd">dma_setup</span><span class="hl opt">(</span><span class="hl kwb">struct</span> NCR_ESP <span class="hl opt">*</span>esp<span class="hl opt">,</span> __u32 addr<span class="hl opt">,</span> <span class="hl kwb">int</span> count<span class="hl opt">,</span> <span class="hl kwb">int</span> write<span class="hl opt">);</span>

<span class="hl kwb">static unsigned char</span> ctrl_data <span class="hl opt">=</span> <span class="hl num">0</span><span class="hl opt">;</span>	<span class="hl com">/* Keep backup of the stuff written</span>
<span class="hl com">				 * to ctrl_reg. Always write a copy</span>
<span class="hl com">				 * to this register when writing to</span>
<span class="hl com">				 * the hardware register!</span>
<span class="hl com">				 */</span>

<span class="hl kwb">static</span> <span class="hl kwc">volatile</span> <span class="hl kwb">unsigned char</span> cmd_buffer<span class="hl opt">[</span><span class="hl num">16</span><span class="hl opt">];</span>
				<span class="hl com">/* This is where all commands are put</span>
<span class="hl com">				 * before they are transferred to the ESP chip</span>
<span class="hl com">				 * via PIO.</span>
<span class="hl com">				 */</span>

<span class="hl kwb">static</span> <span class="hl kwc">inline</span> <span class="hl kwb">void</span> <span class="hl kwd">dma_clear</span><span class="hl opt">(</span><span class="hl kwb">struct</span> NCR_ESP <span class="hl opt">*</span>esp<span class="hl opt">)</span>
<span class="hl opt">{</span>
	<span class="hl kwb">struct</span> fastlane_dma_registers <span class="hl opt">*</span>dregs <span class="hl opt">=</span>
		<span class="hl opt">(</span><span class="hl kwb">struct</span> fastlane_dma_registers <span class="hl opt">*) (</span>esp<span class="hl opt">-&gt;</span>dregs<span class="hl opt">);</span>
	<span class="hl kwb">unsigned long</span> <span class="hl opt">*</span>t<span class="hl opt">;</span>

	ctrl_data <span class="hl opt">= (</span>ctrl_data <span class="hl opt">&amp;</span> FASTLANE_DMA_MASK<span class="hl opt">);</span>
	dregs<span class="hl opt">-&gt;</span>ctrl_reg <span class="hl opt">=</span> ctrl_data<span class="hl opt">;</span>

	t <span class="hl opt">= (</span><span class="hl kwb">unsigned long</span> <span class="hl opt">*)(</span>esp<span class="hl opt">-&gt;</span>edev<span class="hl opt">);</span>

	dregs<span class="hl opt">-&gt;</span>clear_strobe <span class="hl opt">=</span> <span class="hl num">0</span><span class="hl opt">;</span>
	<span class="hl opt">*</span>t <span class="hl opt">=</span> <span class="hl num">0</span> <span class="hl opt">;</span>
<span class="hl opt">}</span>

<span class="hl com">/***************************************************************** Detection */</span>
<span class="hl kwb">int</span> __init <span class="hl kwd">fastlane_esp_detect</span><span class="hl opt">(</span>Scsi_Host_Template <span class="hl opt">*</span>tpnt<span class="hl opt">)</span>
<span class="hl opt">{</span>
	<span class="hl kwb">struct</span> NCR_ESP <span class="hl opt">*</span>esp<span class="hl opt">;</span>
	<span class="hl kwb">struct</span> zorro_dev <span class="hl opt">*</span>z <span class="hl opt">=</span> NULL<span class="hl opt">;</span>
	<span class="hl kwb">unsigned long</span> address<span class="hl opt">;</span>

	<span class="hl kwa">if</span> <span class="hl opt">((</span>z <span class="hl opt">=</span> <span class="hl kwd">zorro_find_device</span><span class="hl opt">(</span>ZORRO_PROD_PHASE5_BLIZZARD_1230_II_FASTLANE_Z3_CYBERSCSI_CYBERSTORM060<span class="hl opt">,</span> z<span class="hl opt">))) {</span>
	    <span class="hl kwb">unsigned long</span> board <span class="hl opt">=</span> z<span class="hl opt">-&gt;</span>resource<span class="hl opt">.</span>start<span class="hl opt">;</span>
	    <span class="hl kwa">if</span> <span class="hl opt">(</span><span class="hl kwd">request_mem_region</span><span class="hl opt">(</span>board<span class="hl opt">+</span>FASTLANE_ESP_ADDR<span class="hl opt">,</span>
				   <span class="hl kwa">sizeof</span><span class="hl opt">(</span><span class="hl kwb">struct</span> ESP_regs<span class="hl opt">),</span> <span class="hl str">&quot;NCR53C9x&quot;</span><span class="hl opt">)) {</span>
		<span class="hl com">/* Check if this is really a fastlane controller. The problem</span>
<span class="hl com">		 * is that also the cyberstorm and blizzard controllers use</span>
<span class="hl com">		 * this ID value. Fortunately only Fastlane maps in Z3 space</span>
<span class="hl com">		 */</span>
		<span class="hl kwa">if</span> <span class="hl opt">(</span>board <span class="hl opt">&lt;</span> <span class="hl num">0x1000000</span><span class="hl opt">) {</span>
			<span class="hl kwa">goto</span> err_release<span class="hl opt">;</span>
		<span class="hl opt">}</span>
		esp <span class="hl opt">=</span> <span class="hl kwd">esp_allocate</span><span class="hl opt">(</span>tpnt<span class="hl opt">, (</span><span class="hl kwb">void</span> <span class="hl opt">*)</span>board<span class="hl opt">+</span>FASTLANE_ESP_ADDR<span class="hl opt">);</span>

		<span class="hl com">/* Do command transfer with programmed I/O */</span>
		esp<span class="hl opt">-&gt;</span>do_pio_cmds <span class="hl opt">=</span> <span class="hl num">1</span><span class="hl opt">;</span>

		<span class="hl com">/* Required functions */</span>
		esp<span class="hl opt">-&gt;</span>dma_bytes_sent <span class="hl opt">= &amp;</span>dma_bytes_sent<span class="hl opt">;</span>
		esp<span class="hl opt">-&gt;</span>dma_can_transfer <span class="hl opt">= &amp;</span>dma_can_transfer<span class="hl opt">;</span>
		esp<span class="hl opt">-&gt;</span>dma_dump_state <span class="hl opt">= &amp;</span>dma_dump_state<span class="hl opt">;</span>
		esp<span class="hl opt">-&gt;</span>dma_init_read <span class="hl opt">= &amp;</span>dma_init_read<span class="hl opt">;</span>
		esp<span class="hl opt">-&gt;</span>dma_init_write <span class="hl opt">= &amp;</span>dma_init_write<span class="hl opt">;</span>
		esp<span class="hl opt">-&gt;</span>dma_ints_off <span class="hl opt">= &amp;</span>dma_ints_off<span class="hl opt">;</span>
		esp<span class="hl opt">-&gt;</span>dma_ints_on <span class="hl opt">= &amp;</span>dma_ints_on<span class="hl opt">;</span>
		esp<span class="hl opt">-&gt;</span>dma_irq_p <span class="hl opt">= &amp;</span>dma_irq_p<span class="hl opt">;</span>
		esp<span class="hl opt">-&gt;</span>dma_ports_p <span class="hl opt">= &amp;</span>dma_ports_p<span class="hl opt">;</span>
		esp<span class="hl opt">-&gt;</span>dma_setup <span class="hl opt">= &amp;</span>dma_setup<span class