aboutsummaryrefslogblamecommitdiffstats
path: root/lib/textsearch.c
blob: 9fbcb44c554f9edbb2b2b439c20d5e18362cdc6d (plain) (tree)
1
2
3
4
5
6
7
8
9








                                                                             
                                                       































                                                                             
                                                        












                                                                          






                                                                            






























                                                                              


                                                                             



                         
                          
























































































































                                                                          
                                        






























                                                                           

                                                                      

                                                                           
                                                                                 




                               


                                        
                                   
                     













                                                                 
                                                        





































                                                         
/*
 * lib/textsearch.c	Generic text search interface
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 *
 * Authors:	Thomas Graf <tgraf@suug.ch>
 * 		Pablo Neira Ayuso <pablo@netfilter.org>
 *
 * ==========================================================================
 *
 * INTRODUCTION
 *
 *   The textsearch infrastructure provides text searching facitilies for
 *   both linear and non-linear data. Individual search algorithms are
 *   implemented in modules and chosen by the user.
 *
 * ARCHITECTURE
 *
 *      User
 *     +----------------+
 *     |        finish()|<--------------(6)-----------------+
 *     |get_next_block()|<--------------(5)---------------+ |
 *     |                |                     Algorithm   | |
 *     |                |                    +------------------------------+
 *     |                |                    |  init()   find()   destroy() |
 *     |                |                    +------------------------------+
 *     |                |       Core API           ^       ^          ^
 *     |                |      +---------------+  (2)     (4)        (8)
 *     |             (1)|----->| prepare()     |---+       |          |
 *     |             (3)|----->| find()/next() |-----------+          |
 *     |             (7)|----->| destroy()     |----------------------+
 *     +----------------+      +---------------+
 *  
 *   (1) User configures a search by calling _prepare() specifying the
 *       search parameters such as the pattern and algorithm name.
 *   (2) Core requests the algorithm to allocate and initialize a search
 *       configuration according to the specified parameters.
 *   (3) User starts the search(es) by calling _find() or _next() to
 *       fetch subsequent occurrences. A state variable is provided
 *       to the algorithm to store persistent variables.
 *   (4) Core eventually resets the search offset and forwards the find()
 *       request to the algorithm.
 *   (5) Algorithm calls get_next_block() provided by the user continously
 *       to fetch the data to be searched in block by block.
 *   (6) Algorithm invokes finish() after the last call to get_next_block
 *       to clean up any leftovers from get_next_block. (Optional)
 *   (7) User destroys the configuration by calling _destroy().
 *   (8) Core notifies the algorithm to destroy algorithm specific
 *       allocations. (Optional)
 *
 * USAGE
 *
 *   Before a search can be performed, a configuration must be created
 *   by calling textsearch_prepare() specifying the searching algorithm,
 *   the pattern to look for and flags. As a flag, you can set TS_IGNORECASE
 *   to perform case insensitive matching. But it might slow down
 *   performance of algorithm, so you should use it at own your risk.
 *   The returned configuration may then be used for an arbitary
 *   amount of times and even in parallel as long as a separate struct
 *   ts_state variable is provided to every instance.
 *
 *   The actual search is performed by either calling textsearch_find_-
 *   continuous() for linear data or by providing an own get_next_block()
 *   implementation and calling textsearch_find(). Both functions return
 *   the position of the first occurrence of the patern or UINT_MAX if
 *   no match was found. Subsequent occurences can be found by calling
 *   textsearch_next() regardless of the linearity of the data.
 *
 *   Once you're done using a configuration it must be given back via
 *   textsearch_destroy.
 *
 * EXAMPLE
 *
 *   int pos;
 *   struct ts_config *conf;
 *   struct ts_state state;
 *   const char *pattern = "chicken";
 *   const char *example = "We dance the funky chicken";
 *
 *   conf = textsearch_prepare("kmp", pattern, strlen(pattern),
 *                             GFP_KERNEL, TS_AUTOLOAD);
 *   if (IS_ERR(conf)) {
 *       err = PTR_ERR(conf);
 *       goto errout;
 *   }
 *
 *   pos = textsearch_find_continuous(conf, &state, example, strlen(example));
 *   if (pos != UINT_MAX)
 *       panic("Oh my god, dancing chickens at %d\n", pos);
 *
 *   textsearch_destroy(conf);
 * ==========================================================================
 */

#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/err.h>
#include <linux/textsearch.h>

static LIST_HEAD(ts_ops);
static DEFINE_SPINLOCK(ts_mod_lock);

static inline struct ts_ops *lookup_ts_algo(const char *name)
{
	struct ts_ops *o;

	rcu_read_lock();
	list_for_each_entry_rcu(o, &ts_ops, list) {
		if (!strcmp(name, o->name)) {
			if (!try_module_get(o->owner))
				o = NULL;
			rcu_read_unlock();
			return o;
		}
	}
	rcu_read_unlock();

	return NULL;
}

/**
 * textsearch_register - register a textsearch module
 * @ops: operations lookup table
 *
 * This function must be called by textsearch modules to announce
 * their presence. The specified &@ops must have %name set to a
 * unique identifier and the callbacks find(), init(), get_pattern(),
 * and get_pattern_len() must be implemented.
 *
 * Returns 0 or -EEXISTS if another module has already registered
 * with same name.
 */
int textsearch_register(struct ts_ops *ops)
{
	int err = -EEXIST;
	struct ts_ops *o;

	if (ops->name == NULL || ops->find == NULL || ops->init == NULL ||
	    ops->get_pattern == NULL || ops->get_pattern_len == NULL)
		return -EINVAL;

	spin_lock(&ts_mod_lock);
	list_for_each_entry(o, &ts_ops, list) {
		if (!strcmp(ops->name, o->name))
			goto errout;
	}

	list_add_tail_rcu(&ops->list, &ts_ops);
	err = 0;
errout:
	spin_unlock(&ts_mod_lock);
	return err;
}

/**
 * textsearch_unregister - unregister a textsearch module
 * @ops: operations lookup table
 *
 * This function must be called by textsearch modules to announce
 * their disappearance for examples when the module gets unloaded.
 * The &ops parameter must be the same as the one during the
 * registration.
 *
 * Returns 0 on success or -ENOENT if no matching textsearch
 * registration was found.
 */
int textsearch_unregister(struct ts_ops *ops)
{
	int err = 0;
	struct ts_ops *o;

	spin_lock(&ts_mod_lock);
	list_for_each_entry(o, &ts_ops, list) {
		if (o == ops) {
			list_del_rcu(&o->list);
			goto out;
		}
	}

	err = -ENOENT;
out:
	spin_unlock(&ts_mod_lock);
	return err;
}

struct ts_linear_state
{
	unsigned int	len;
	const void	*data;
};

static unsigned int get_linear_data(unsigned int consumed, const u8 **dst,
				    struct ts_config *conf,
				    struct ts_state *state)
{
	struct ts_linear_state *st = (struct ts_linear_state *) state->cb;

	if (likely(consumed < st->len)) {
		*dst = st->data + consumed;
		return st->len - consumed;
	}

	return 0;
}

/**
 * textsearch_find_continuous - search a pattern in continuous/linear data
 * @conf: search configuration
 * @state: search state
 * @data: data to search in
 * @len: length of data
 *
 * A simplified version of textsearch_find() for continuous/linear data.
 * Call textsearch_next() to retrieve subsequent matches.
 *
 * Returns the position of first occurrence of the pattern or
 * %UINT_MAX if no occurrence was found.
 */ 
unsigned int textsearch_find_continuous(struct ts_config *conf,
					struct ts_state *state,
					const void *data, unsigned int len)
{
	struct ts_linear_state *st = (struct ts_linear_state *) state->cb;

	conf->get_next_block = get_linear_data;
	st->data = data;
	st->len = len;

	return textsearch_find(conf, state);
}

/**
 * textsearch_prepare - Prepare a search
 * @algo: name of search algorithm
 * @pattern: pattern data
 * @len: length of pattern
 * @gfp_mask: allocation mask
 * @flags: search flags
 *
 * Looks up the search algorithm module and creates a new textsearch
 * configuration for the specified pattern. Upon completion all
 * necessary refcnts are held and the configuration must be put back
 * using textsearch_put() after usage.
 *
 * Note: The format of the pattern may not be compatible between
 *       the various search algorithms.
 *
 * Returns a new textsearch configuration according to the specified
 * parameters or a ERR_PTR(). If a zero length pattern is passed, this
 * function returns EINVAL.
 */
struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
				     unsigned int len, gfp_t gfp_mask, int flags)
{
	int err = -ENOENT;
	struct ts_config *conf;
	struct ts_ops *ops;
	
	if (len == 0)
		return ERR_PTR(-EINVAL);

	ops = lookup_ts_algo(algo);
#ifdef CONFIG_MODULES
	/*
	 * Why not always autoload you may ask. Some users are
	 * in a situation where requesting a module may deadlock,
	 * especially when the module is located on a NFS mount.
	 */
	if (ops == NULL && flags & TS_AUTOLOAD) {
		request_module("ts_%s", algo);
		ops = lookup_ts_algo(algo);
	}
#endif

	if (ops == NULL)
		goto errout;

	conf = ops->init(pattern, len, gfp_mask, flags);
	if (IS_ERR(conf)) {
		err = PTR_ERR(conf);
		goto errout;
	}

	conf->ops = ops;
	return conf;

errout:
	if (ops)
		module_put(ops->owner);
		
	return ERR_PTR(err);
}

/**
 * textsearch_destroy - destroy a search configuration
 * @conf: search configuration
 *
 * Releases all references of the configuration and frees
 * up the memory.
 */
void textsearch_destroy(struct ts_config *conf)
{
	if (conf->ops) {
		if (conf->ops->destroy)
			conf->ops->destroy(conf);
		module_put(conf->ops->owner);
	}

	kfree(conf);
}

EXPORT_SYMBOL(textsearch_register);
EXPORT_SYMBOL(textsearch_unregister);
EXPORT_SYMBOL(textsearch_prepare);
EXPORT_SYMBOL(textsearch_find_continuous);
EXPORT_SYMBOL(textsearch_destroy);
static ssize_t pipe_writev(struct file *filp, const struct iovec *_iov, unsigned long nr_segs, loff_t *ppos) { struct inode *inode = filp->f_dentry->d_inode; struct pipe_inode_info *info; ssize_t ret; int do_wakeup; struct iovec *iov = (struct iovec *)_iov; size_t total_len; ssize_t chars; total_len = iov_length(iov, nr_segs); /* Null write succeeds. */ if (unlikely(total_len == 0)) return 0; do_wakeup = 0; ret = 0; down(PIPE_SEM(*inode)); info = inode->i_pipe; if (!PIPE_READERS(*inode)) { send_sig(SIGPIPE, current, 0); ret = -EPIPE; goto out; } /* We try to merge small writes */ chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */ if (info->nrbufs && chars != 0) { int lastbuf = (info->curbuf + info->nrbufs - 1) & (PIPE_BUFFERS-1); struct pipe_buffer *buf = info->bufs + lastbuf; struct pipe_buf_operations *ops = buf->ops; int offset = buf->offset + buf->len; if (ops->can_merge && offset + chars <= PAGE_SIZE) { void *addr = ops->map(filp, info, buf); int error = pipe_iov_copy_from_user(offset + addr, iov, chars); ops->unmap(info, buf); ret = error; do_wakeup = 1; if (error) goto out; buf->len += chars; total_len -= chars; ret = chars; if (!total_len) goto out; } } for (;;) { int bufs; if (!PIPE_READERS(*inode)) { send_sig(SIGPIPE, current, 0); if (!ret) ret = -EPIPE; break; } bufs = info->nrbufs; if (bufs < PIPE_BUFFERS) { int newbuf = (info->curbuf + bufs) & (PIPE_BUFFERS-1); struct pipe_buffer *buf = info->bufs + newbuf; struct page *page = info->tmp_page; int error; if (!page) { page = alloc_page(GFP_HIGHUSER); if (unlikely(!page)) { ret = ret ? : -ENOMEM; break; } info->tmp_page = page; } /* Always wakeup, even if the copy fails. Otherwise * we lock up (O_NONBLOCK-)readers that sleep due to * syscall merging. * FIXME! Is this really true? */ do_wakeup = 1; chars = PAGE_SIZE; if (chars > total_len) chars = total_len; error = pipe_iov_copy_from_user(kmap(page), iov, chars); kunmap(page); if (unlikely(error)) { if (!ret) ret = -EFAULT; break; } ret += chars; /* Insert it into the buffer array */ buf->page = page; buf->ops = &anon_pipe_buf_ops; buf->offset = 0; buf->len = chars; info->nrbufs = ++bufs; info->tmp_page = NULL; total_len -= chars; if (!total_len) break; } if (bufs < PIPE_BUFFERS) continue; if (filp->f_flags & O_NONBLOCK) { if (!ret) ret = -EAGAIN; break; } if (signal_pending(current)) { if (!ret) ret = -ERESTARTSYS; break; } if (do_wakeup) { wake_up_interruptible_sync(PIPE_WAIT(*inode)); kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN); do_wakeup = 0; } PIPE_WAITING_WRITERS(*inode)++; pipe_wait(inode); PIPE_WAITING_WRITERS(*inode)--; } out: up(PIPE_SEM(*inode)); if (do_wakeup) { wake_up_interruptible(PIPE_WAIT(*inode)); kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN); } if (ret > 0) inode_update_time(inode, 1); /* mtime and ctime */ return ret; } static ssize_t pipe_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count }; return pipe_writev(filp, &iov, 1, ppos); } static ssize_t bad_pipe_r(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { return -EBADF; } static ssize_t bad_pipe_w(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { return -EBADF; } static int pipe_ioctl(struct inode *pino, struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = filp->f_dentry->d_inode; struct pipe_inode_info *info; int count, buf, nrbufs; switch (cmd) { case FIONREAD: down(PIPE_SEM(*inode)); info = inode->i_pipe; count = 0; buf = info->curbuf; nrbufs = info->nrbufs; while (--nrbufs >= 0) { count += info->bufs[buf].len; buf = (buf+1) & (PIPE_BUFFERS-1); } up(PIPE_SEM(*inode)); return put_user(count, (int __user *)arg); default: return -EINVAL; } } /* No kernel lock held - fine */ static unsigned int pipe_poll(struct file *filp, poll_table *wait) { unsigned int mask; struct inode *inode = filp->f_dentry->d_inode; struct pipe_inode_info *info = inode->i_pipe; int nrbufs; poll_wait(filp, PIPE_WAIT(*inode), wait); /* Reading only -- no need for acquiring the semaphore. */ nrbufs = info->nrbufs; mask = 0; if (filp->f_mode & FMODE_READ) { mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0; if (!PIPE_WRITERS(*inode) && filp->f_version != PIPE_WCOUNTER(*inode)) mask |= POLLHUP; } if (filp->f_mode & FMODE_WRITE) { mask |= (nrbufs < PIPE_BUFFERS) ? POLLOUT | POLLWRNORM : 0; /* * Most Unices do not set POLLERR for FIFOs but on Linux they * behave exactly like pipes for poll(). */ if (!PIPE_READERS(*inode)) mask |= POLLERR; } return mask; } static int pipe_release(struct inode *inode, int decr, int decw) { down(PIPE_SEM(*inode)); PIPE_READERS(*inode) -= decr; PIPE_WRITERS(*inode) -= decw; if (!PIPE_READERS(*inode) && !PIPE_WRITERS(*inode)) { free_pipe_info(inode); } else { wake_up_interruptible(PIPE_WAIT(*inode)); kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN); kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT); } up(PIPE_SEM(*inode)); return 0; } static int pipe_read_fasync(int fd, struct file *filp, int on) { struct inode *inode = filp->f_dentry->d_inode; int retval; down(PIPE_SEM(*inode)); retval = fasync_helper(fd, filp, on, PIPE_FASYNC_READERS(*inode)); up(PIPE_SEM(*inode)); if (retval < 0) return retval; return 0; } static int pipe_write_fasync(int fd, struct file *filp, int on) { struct inode *inode = filp->f_dentry->d_inode; int retval; down(PIPE_SEM(*inode)); retval = fasync_helper(fd, filp, on, PIPE_FASYNC_WRITERS(*inode)); up(PIPE_SEM(*inode)); if (retval < 0) return retval; return 0; } static int pipe_rdwr_fasync(int fd, struct file *filp, int on) { struct inode *inode = filp->f_dentry->d_inode; int retval; down(PIPE_SEM(*inode)); retval = fasync_helper(fd, filp, on, PIPE_FASYNC_READERS(*inode)); if (retval >= 0) retval = fasync_helper(fd, filp, on, PIPE_FASYNC_WRITERS(*inode)); up(PIPE_SEM(*inode)); if (retval < 0) return retval; return 0; } static int pipe_read_release(struct inode *inode, struct file *filp) { pipe_read_fasync(-1, filp, 0); return pipe_release(inode, 1, 0); } static int pipe_write_release(struct inode *inode, struct file *filp) { pipe_write_fasync(-1, filp, 0); return pipe_release(inode, 0, 1); } static int pipe_rdwr_release(struct inode *inode, struct file *filp) { int decr, decw; pipe_rdwr_fasync(-1, filp, 0); decr = (filp->f_mode & FMODE_READ) != 0; decw = (filp->f_mode & FMODE_WRITE) != 0; return pipe_release(inode, decr, decw); } static int pipe_read_open(struct inode *inode, struct file *filp) { /* We could have perhaps used atomic_t, but this and friends below are the only places. So it doesn't seem worthwhile. */ down(PIPE_SEM(*inode)); PIPE_READERS(*inode)++; up(PIPE_SEM(*inode)); return 0; } static int pipe_write_open(struct inode *inode, struct file *filp) { down(PIPE_SEM(*inode)); PIPE_WRITERS(*inode)++; up(PIPE_SEM(*inode)); return 0; } static int pipe_rdwr_open(struct inode *inode, struct file *filp) { down(PIPE_SEM(*inode)); if (filp->f_mode & FMODE_READ) PIPE_READERS(*inode)++; if (filp->f_mode & FMODE_WRITE) PIPE_WRITERS(*inode)++; up(PIPE_SEM(*inode)); return 0; } /* * The file_operations structs are not static because they * are also used in linux/fs/fifo.c to do operations on FIFOs. */ struct file_operations read_fifo_fops = { .llseek = no_llseek, .read = pipe_read, .readv = pipe_readv, .write = bad_pipe_w, .poll = pipe_poll, .ioctl = pipe_ioctl, .open = pipe_read_open, .release = pipe_read_release, .fasync = pipe_read_fasync, }; struct file_operations write_fifo_fops = { .llseek = no_llseek, .read = bad_pipe_r, .write = pipe_write, .writev = pipe_writev, .poll = pipe_poll, .ioctl = pipe_ioctl, .open = pipe_write_open, .release = pipe_write_release, .fasync = pipe_write_fasync, }; struct file_operations rdwr_fifo_fops = { .llseek = no_llseek, .read = pipe_read, .readv = pipe_readv, .write = pipe_write, .writev = pipe_writev, .poll = pipe_poll, .ioctl = pipe_ioctl, .open = pipe_rdwr_open, .release = pipe_rdwr_release, .fasync = pipe_rdwr_fasync, }; struct file_operations read_pipe_fops = { .llseek = no_llseek, .read = pipe_read, .readv = pipe_readv, .write = bad_pipe_w, .poll = pipe_poll, .ioctl = pipe_ioctl, .open = pipe_read_open, .release = pipe_read_release, .fasync = pipe_read_fasync, }; struct file_operations write_pipe_fops = { .llseek = no_llseek, .read = bad_pipe_r, .write = pipe_write, .writev = pipe_writev, .poll = pipe_poll, .ioctl = pipe_ioctl, .open = pipe_write_open, .release = pipe_write_release, .fasync = pipe_write_fasync, }; struct file_operations rdwr_pipe_fops = { .llseek = no_llseek, .read = pipe_read, .readv = pipe_readv, .write = pipe_write, .writev = pipe_writev, .poll = pipe_poll, .ioctl = pipe_ioctl, .open = pipe_rdwr_open, .release = pipe_rdwr_release, .fasync = pipe_rdwr_fasync, }; void free_pipe_info(struct inode *inode) { int i; struct pipe_inode_info *info = inode->i_pipe; inode->i_pipe = NULL; for (i = 0; i < PIPE_BUFFERS; i++) { struct pipe_buffer *buf = info->bufs + i; if (buf->ops) buf->ops->release(info, buf); } if (info->tmp_page) __free_page(info->tmp_page); kfree(info); } struct inode* pipe_new(struct inode* inode) { struct pipe_inode_info *info; info = kmalloc(sizeof(struct pipe_inode_info), GFP_KERNEL); if (!info) goto fail_page; memset(info, 0, sizeof(*info)); inode->i_pipe = info; init_waitqueue_head(PIPE_WAIT(*inode)); PIPE_RCOUNTER(*inode) = PIPE_WCOUNTER(*inode) = 1; return inode; fail_page: return NULL; } static struct vfsmount *pipe_mnt; static int pipefs_delete_dentry(struct dentry *dentry) { return 1; } static struct dentry_operations pipefs_dentry_operations = { .d_delete = pipefs_delete_dentry, }; static struct inode * get_pipe_inode(void) { struct inode *inode = new_inode(pipe_mnt->mnt_sb); if (!inode) goto fail_inode; if(!pipe_new(inode)) goto fail_iput; PIPE_READERS(*inode) = PIPE_WRITERS(*inode) = 1; inode->i_fop = &rdwr_pipe_fops; /* * Mark the inode dirty from the very beginning, * that way it will never be moved to the dirty * list because "mark_inode_dirty()" will think * that it already _is_ on the dirty list. */ inode->i_state = I_DIRTY; inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR; inode->i_uid = current->fsuid; inode->i_gid = current->fsgid; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inode->i_blksize = PAGE_SIZE; return inode; fail_iput: iput(inode); fail_inode: return NULL; } int do_pipe(int *fd) { struct qstr this; char name[32]; struct dentry *dentry; struct inode * inode; struct file *f1, *f2; int error; int i,j; error = -ENFILE; f1 = get_empty_filp(); if (!f1) goto no_files; f2 = get_empty_filp(); if (!f2) goto close_f1; inode = get_pipe_inode(); if (!inode) goto close_f12; error = get_unused_fd(); if (error < 0) goto close_f12_inode; i = error; error = get_unused_fd(); if (error < 0) goto close_f12_inode_i; j = error; error = -ENOMEM; sprintf(name, "[%lu]", inode->i_ino); this.name = name; this.len = strlen(name); this.hash = inode->i_ino; /* will go */ dentry = d_alloc(pipe_mnt->mnt_sb->s_root, &this); if (!dentry) goto close_f12_inode_i_j; dentry->d_op = &pipefs_dentry_operations; d_add(dentry, inode); f1->f_vfsmnt = f2->f_vfsmnt = mntget(mntget(pipe_mnt)); f1->f_dentry = f2->f_dentry = dget(dentry); f1->f_mapping = f2->f_mapping = inode->i_mapping; /* read file */ f1->f_pos = f2->f_pos = 0; f1->f_flags = O_RDONLY; f1->f_op = &read_pipe_fops; f1->f_mode = FMODE_READ; f1->f_version = 0; /* write file */ f2->f_flags = O_WRONLY; f2->f_op = &write_pipe_fops; f2->f_mode = FMODE_WRITE; f2->f_version = 0; fd_install(i, f1); fd_install(j, f2); fd[0] = i; fd[1] = j; return 0; close_f12_inode_i_j: put_unused_fd(j); close_f12_inode_i: put_unused_fd(i); close_f12_inode: free_pipe_info(inode); iput(inode); close_f12: put_filp(f2); close_f1: put_filp(f1); no_files: return error; } /* * pipefs should _never_ be mounted by userland - too much of security hassle, * no real gain from having the whole whorehouse mounted. So we don't need * any operations on the root directory. However, we need a non-trivial * d_name - pipe: will go nicely and kill the special-casing in procfs. */ static struct super_block *pipefs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return get_sb_pseudo(fs_type, "pipe:", NULL, PIPEFS_MAGIC); } static struct file_system_type pipe_fs_type = { .name = "pipefs", .get_sb = pipefs_get_sb, .kill_sb = kill_anon_super, }; static int __init init_pipe_fs(void) { int err = register_filesystem(&pipe_fs_type); if (!err) { pipe_mnt = kern_mount(&pipe_fs_type); if (IS_ERR(pipe_mnt)) { err = PTR_ERR(pipe_mnt); unregister_filesystem(&pipe_fs_type); } } return err; } static void __exit exit_pipe_fs(void) { unregister_filesystem(&pipe_fs_type); mntput(pipe_mnt); } fs_initcall(init_pipe_fs); module_exit(exit_pipe_fs);