aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Makefile2
-rw-r--r--mm/allocpercpu.c129
-rw-r--r--mm/slab.c124
-rw-r--r--mm/slob.c45
4 files changed, 130 insertions, 170 deletions
diff --git a/mm/Makefile b/mm/Makefile
index 9dd824c11eeb..60c56c0b5e10 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -23,4 +23,4 @@ obj-$(CONFIG_SLAB) += slab.o
23obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o 23obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
24obj-$(CONFIG_FS_XIP) += filemap_xip.o 24obj-$(CONFIG_FS_XIP) += filemap_xip.o
25obj-$(CONFIG_MIGRATION) += migrate.o 25obj-$(CONFIG_MIGRATION) += migrate.o
26 26obj-$(CONFIG_SMP) += allocpercpu.o
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
new file mode 100644
index 000000000000..eaa9abeea536
--- /dev/null
+++ b/mm/allocpercpu.c
@@ -0,0 +1,129 @@
1/*
2 * linux/mm/allocpercpu.c
3 *
4 * Separated from slab.c August 11, 2006 Christoph Lameter <clameter@sgi.com>
5 */
6#include <linux/mm.h>
7#include <linux/module.h>
8
9/**
10 * percpu_depopulate - depopulate per-cpu data for given cpu
11 * @__pdata: per-cpu data to depopulate
12 * @cpu: depopulate per-cpu data for this cpu
13 *
14 * Depopulating per-cpu data for a cpu going offline would be a typical
15 * use case. You need to register a cpu hotplug handler for that purpose.
16 */
17void percpu_depopulate(void *__pdata, int cpu)
18{
19 struct percpu_data *pdata = __percpu_disguise(__pdata);
20 if (pdata->ptrs[cpu]) {
21 kfree(pdata->ptrs[cpu]);
22 pdata->ptrs[cpu] = NULL;
23 }
24}
25EXPORT_SYMBOL_GPL(percpu_depopulate);
26
27/**
28 * percpu_depopulate_mask - depopulate per-cpu data for some cpu's
29 * @__pdata: per-cpu data to depopulate
30 * @mask: depopulate per-cpu data for cpu's selected through mask bits
31 */
32void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask)
33{
34 int cpu;
35 for_each_cpu_mask(cpu, *mask)
36 percpu_depopulate(__pdata, cpu);
37}
38EXPORT_SYMBOL_GPL(__percpu_depopulate_mask);
39
40/**
41 * percpu_populate - populate per-cpu data for given cpu
42 * @__pdata: per-cpu data to populate further
43 * @size: size of per-cpu object
44 * @gfp: may sleep or not etc.
45 * @cpu: populate per-data for this cpu
46 *
47 * Populating per-cpu data for a cpu coming online would be a typical
48 * use case. You need to register a cpu hotplug handler for that purpose.
49 * Per-cpu object is populated with zeroed buffer.
50 */
51void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
52{
53 struct percpu_data *pdata = __percpu_disguise(__pdata);
54 int node = cpu_to_node(cpu);
55
56 BUG_ON(pdata->ptrs[cpu]);
57 if (node_online(node)) {
58 /* FIXME: kzalloc_node(size, gfp, node) */
59 pdata->ptrs[cpu] = kmalloc_node(size, gfp, node);
60 if (pdata->ptrs[cpu])
61 memset(pdata->ptrs[cpu], 0, size);
62 } else
63 pdata->ptrs[cpu] = kzalloc(size, gfp);
64 return pdata->ptrs[cpu];
65}
66EXPORT_SYMBOL_GPL(percpu_populate);
67
68/**
69 * percpu_populate_mask - populate per-cpu data for more cpu's
70 * @__pdata: per-cpu data to populate further
71 * @size: size of per-cpu object
72 * @gfp: may sleep or not etc.
73 * @mask: populate per-cpu data for cpu's selected through mask bits
74 *
75 * Per-cpu objects are populated with zeroed buffers.
76 */
77int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
78 cpumask_t *mask)
79{
80 cpumask_t populated = CPU_MASK_NONE;
81 int cpu;
82
83 for_each_cpu_mask(cpu, *mask)
84 if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
85 __percpu_depopulate_mask(__pdata, &populated);
86 return -ENOMEM;
87 } else
88 cpu_set(cpu, populated);
89 return 0;
90}
91EXPORT_SYMBOL_GPL(__percpu_populate_mask);
92
93/**
94 * percpu_alloc_mask - initial setup of per-cpu data
95 * @size: size of per-cpu object
96 * @gfp: may sleep or not etc.
97 * @mask: populate per-data for cpu's selected through mask bits
98 *
99 * Populating per-cpu data for all online cpu's would be a typical use case,
100 * which is simplified by the percpu_alloc() wrapper.
101 * Per-cpu objects are populated with zeroed buffers.
102 */
103void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
104{
105 void *pdata = kzalloc(sizeof(struct percpu_data), gfp);
106 void *__pdata = __percpu_disguise(pdata);
107
108 if (unlikely(!pdata))
109 return NULL;
110 if (likely(!__percpu_populate_mask(__pdata, size, gfp, mask)))
111 return __pdata;
112 kfree(pdata);
113 return NULL;
114}
115EXPORT_SYMBOL_GPL(__percpu_alloc_mask);
116
117/**
118 * percpu_free - final cleanup of per-cpu data
119 * @__pdata: object to clean up
120 *
121 * We simply clean up any per-cpu object left. No need for the client to
122 * track and specify through a bis mask which per-cpu objects are to free.
123 */
124void percpu_free(void *__pdata)
125{
126 __percpu_depopulate_mask(__pdata, &cpu_possible_map);
127 kfree(__percpu_disguise(__pdata));
128}
129EXPORT_SYMBOL_GPL(percpu_free);
diff --git a/mm/slab.c b/mm/slab.c
index 619337a5cb2b..13b5050f84cc 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3440,130 +3440,6 @@ void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
3440EXPORT_SYMBOL(__kmalloc_track_caller); 3440EXPORT_SYMBOL(__kmalloc_track_caller);
3441#endif 3441#endif
3442 3442
3443#ifdef CONFIG_SMP
3444/**
3445 * percpu_depopulate - depopulate per-cpu data for given cpu
3446 * @__pdata: per-cpu data to depopulate
3447 * @cpu: depopulate per-cpu data for this cpu
3448 *
3449 * Depopulating per-cpu data for a cpu going offline would be a typical
3450 * use case. You need to register a cpu hotplug handler for that purpose.
3451 */
3452void percpu_depopulate(void *__pdata, int cpu)
3453{
3454 struct percpu_data *pdata = __percpu_disguise(__pdata);
3455 if (pdata->ptrs[cpu]) {
3456 kfree(pdata->ptrs[cpu]);
3457 pdata->ptrs[cpu] = NULL;
3458 }
3459}
3460EXPORT_SYMBOL_GPL(percpu_depopulate);
3461
3462/**
3463 * percpu_depopulate_mask - depopulate per-cpu data for some cpu's
3464 * @__pdata: per-cpu data to depopulate
3465 * @mask: depopulate per-cpu data for cpu's selected through mask bits
3466 */
3467void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask)
3468{
3469 int cpu;
3470 for_each_cpu_mask(cpu, *mask)
3471 percpu_depopulate(__pdata, cpu);
3472}
3473EXPORT_SYMBOL_GPL(__percpu_depopulate_mask);
3474
3475/**
3476 * percpu_populate - populate per-cpu data for given cpu
3477 * @__pdata: per-cpu data to populate further
3478 * @size: size of per-cpu object
3479 * @gfp: may sleep or not etc.
3480 * @cpu: populate per-data for this cpu
3481 *
3482 * Populating per-cpu data for a cpu coming online would be a typical
3483 * use case. You need to register a cpu hotplug handler for that purpose.
3484 * Per-cpu object is populated with zeroed buffer.
3485 */
3486void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
3487{
3488 struct percpu_data *pdata = __percpu_disguise(__pdata);
3489 int node = cpu_to_node(cpu);
3490
3491 BUG_ON(pdata->ptrs[cpu]);
3492 if (node_online(node)) {
3493 /* FIXME: kzalloc_node(size, gfp, node) */
3494 pdata->ptrs[cpu] = kmalloc_node(size, gfp, node);
3495 if (pdata->ptrs[cpu])
3496 memset(pdata->ptrs[cpu], 0, size);
3497 } else
3498 pdata->ptrs[cpu] = kzalloc(size, gfp);
3499 return pdata->ptrs[cpu];
3500}
3501EXPORT_SYMBOL_GPL(percpu_populate);
3502
3503/**
3504 * percpu_populate_mask - populate per-cpu data for more cpu's
3505 * @__pdata: per-cpu data to populate further
3506 * @size: size of per-cpu object
3507 * @gfp: may sleep or not etc.
3508 * @mask: populate per-cpu data for cpu's selected through mask bits
3509 *
3510 * Per-cpu objects are populated with zeroed buffers.
3511 */
3512int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
3513 cpumask_t *mask)
3514{
3515 cpumask_t populated = CPU_MASK_NONE;
3516 int cpu;
3517
3518 for_each_cpu_mask(cpu, *mask)
3519 if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
3520 __percpu_depopulate_mask(__pdata, &populated);
3521 return -ENOMEM;
3522 } else
3523 cpu_set(cpu, populated);
3524 return 0;
3525}
3526EXPORT_SYMBOL_GPL(__percpu_populate_mask);
3527
3528/**
3529 * percpu_alloc_mask - initial setup of per-cpu data
3530 * @size: size of per-cpu object
3531 * @gfp: may sleep or not etc.
3532 * @mask: populate per-data for cpu's selected through mask bits
3533 *
3534 * Populating per-cpu data for all online cpu's would be a typical use case,
3535 * which is simplified by the percpu_alloc() wrapper.
3536 * Per-cpu objects are populated with zeroed buffers.
3537 */
3538void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
3539{
3540 void *pdata = kzalloc(sizeof(struct percpu_data), gfp);
3541 void *__pdata = __percpu_disguise(pdata);
3542
3543 if (unlikely(!pdata))
3544 return NULL;
3545 if (likely(!__percpu_populate_mask(__pdata, size, gfp, mask)))
3546 return __pdata;
3547 kfree(pdata);
3548 return NULL;
3549}
3550EXPORT_SYMBOL_GPL(__percpu_alloc_mask);
3551
3552/**
3553 * percpu_free - final cleanup of per-cpu data
3554 * @__pdata: object to clean up
3555 *
3556 * We simply clean up any per-cpu object left. No need for the client to
3557 * track and specify through a bis mask which per-cpu objects are to free.
3558 */
3559void percpu_free(void *__pdata)
3560{
3561 __percpu_depopulate_mask(__pdata, &cpu_possible_map);
3562 kfree(__percpu_disguise(__pdata));
3563}
3564EXPORT_SYMBOL_GPL(percpu_free);
3565#endif /* CONFIG_SMP */
3566
3567/** 3443/**
3568 * kmem_cache_free - Deallocate an object 3444 * kmem_cache_free - Deallocate an object
3569 * @cachep: The cache the allocation was from. 3445 * @cachep: The cache the allocation was from.
diff --git a/mm/slob.c b/mm/slob.c
index 7b52b20b9607..4c28a421b270 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -343,48 +343,3 @@ void kmem_cache_init(void)
343atomic_t slab_reclaim_pages = ATOMIC_INIT(0); 343atomic_t slab_reclaim_pages = ATOMIC_INIT(0);
344EXPORT_SYMBOL(slab_reclaim_pages); 344EXPORT_SYMBOL(slab_reclaim_pages);
345 345
346#ifdef CONFIG_SMP
347
348void *__alloc_percpu(size_t size)
349{
350 int i;
351 struct percpu_data *pdata = kmalloc(sizeof (*pdata), GFP_KERNEL);
352
353 if (!pdata)
354 return NULL;
355
356 for_each_possible_cpu(i) {
357 pdata->ptrs[i] = kmalloc(size, GFP_KERNEL);
358 if (!pdata->ptrs[i])
359 goto unwind_oom;
360 memset(pdata->ptrs[i], 0, size);
361 }
362
363 /* Catch derefs w/o wrappers */
364 return (void *) (~(unsigned long) pdata);
365
366unwind_oom:
367 while (--i >= 0) {
368 if (!cpu_possible(i))
369 continue;
370 kfree(pdata->ptrs[i]);
371 }
372 kfree(pdata);
373 return NULL;
374}
375EXPORT_SYMBOL(__alloc_percpu);
376
377void
378free_percpu(const void *objp)
379{
380 int i;
381 struct percpu_data *p = (struct percpu_data *) (~(unsigned long) objp);
382
383 for_each_possible_cpu(i)
384 kfree(p->ptrs[i]);
385
386 kfree(p);
387}
388EXPORT_SYMBOL(free_percpu);
389
390#endif
ppos) { return -EBADF; } static ssize_t bad_pipe_w(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { return -EBADF; } static int pipe_ioctl(struct inode *pino, struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = filp->f_dentry->d_inode; struct pipe_inode_info *pipe; int count, buf, nrbufs; switch (cmd) { case FIONREAD: mutex_lock(&inode->i_mutex); pipe = inode->i_pipe; count = 0; buf = pipe->curbuf; nrbufs = pipe->nrbufs; while (--nrbufs >= 0) { count += pipe->bufs[buf].len; buf = (buf+1) & (PIPE_BUFFERS-1); } mutex_unlock(&inode->i_mutex); return put_user(count, (int __user *)arg); default: return -EINVAL; } } /* No kernel lock held - fine */ static unsigned int pipe_poll(struct file *filp, poll_table *wait) { unsigned int mask; struct inode *inode = filp->f_dentry->d_inode; struct pipe_inode_info *pipe = inode->i_pipe; int nrbufs; poll_wait(filp, &pipe->wait, wait); /* Reading only -- no need for acquiring the semaphore. */ nrbufs = pipe->nrbufs; mask = 0; if (filp->f_mode & FMODE_READ) { mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0; if (!pipe->writers && filp->f_version != pipe->w_counter) mask |= POLLHUP; } if (filp->f_mode & FMODE_WRITE) { mask |= (nrbufs < PIPE_BUFFERS) ? POLLOUT | POLLWRNORM : 0; /* * Most Unices do not set POLLERR for FIFOs but on Linux they * behave exactly like pipes for poll(). */ if (!pipe->readers) mask |= POLLERR; } return mask; } static int pipe_release(struct inode *inode, int decr, int decw) { struct pipe_inode_info *pipe; mutex_lock(&inode->i_mutex); pipe = inode->i_pipe; pipe->readers -= decr; pipe->writers -= decw; if (!pipe->readers && !pipe->writers) { free_pipe_info(inode); } else { wake_up_interruptible(&pipe->wait); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } mutex_unlock(&inode->i_mutex); return 0; } static int pipe_read_fasync(int fd, struct file *filp, int on) { struct inode *inode = filp->f_dentry->d_inode; int retval; mutex_lock(&inode->i_mutex); retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_readers); mutex_unlock(&inode->i_mutex); if (retval < 0) return retval; return 0; } static int pipe_write_fasync(int fd, struct file *filp, int on) { struct inode *inode = filp->f_dentry->d_inode; int retval; mutex_lock(&inode->i_mutex); retval = fasync_helper(fd, filp, on, &inode->i_pipe->fasync_writers); mutex_unlock(&inode->i_mutex); if (retval < 0) return retval; return 0; } static int pipe_rdwr_fasync(int fd, struct file *filp, int on) { struct inode *inode = filp->f_dentry->d_inode; struct pipe_inode_info *pipe = inode->i_pipe; int retval; mutex_lock(&inode->i_mutex); retval = fasync_helper(fd, filp, on, &pipe->fasync_readers); if (retval >= 0) retval = fasync_helper(fd, filp, on, &pipe->fasync_writers); mutex_unlock(&inode->i_mutex); if (retval < 0) return retval; return 0; } static int pipe_read_release(struct inode *inode, struct file *filp) { pipe_read_fasync(-1, filp, 0); return pipe_release(inode, 1, 0); } static int pipe_write_release(struct inode *inode, struct file *filp) { pipe_write_fasync(-1, filp, 0); return pipe_release(inode, 0, 1); } static int pipe_rdwr_release(struct inode *inode, struct file *filp) { int decr, decw; pipe_rdwr_fasync(-1, filp, 0); decr = (filp->f_mode & FMODE_READ) != 0; decw = (filp->f_mode & FMODE_WRITE) != 0; return pipe_release(inode, decr, decw); } static int pipe_read_open(struct inode *inode, struct file *filp) { /* We could have perhaps used atomic_t, but this and friends below are the only places. So it doesn't seem worthwhile. */ mutex_lock(&inode->i_mutex); inode->i_pipe->readers++; mutex_unlock(&inode->i_mutex); return 0; } static int pipe_write_open(struct inode *inode, struct file *filp) { mutex_lock(&inode->i_mutex); inode->i_pipe->writers++; mutex_unlock(&inode->i_mutex); return 0; } static int pipe_rdwr_open(struct inode *inode, struct file *filp) { mutex_lock(&inode->i_mutex); if (filp->f_mode & FMODE_READ) inode->i_pipe->readers++; if (filp->f_mode & FMODE_WRITE) inode->i_pipe->writers++; mutex_unlock(&inode->i_mutex); return 0; } /* * The file_operations structs are not static because they * are also used in linux/fs/fifo.c to do operations on FIFOs. */ const struct file_operations read_fifo_fops = { .llseek = no_llseek, .read = pipe_read, .readv = pipe_readv, .write = bad_pipe_w, .poll = pipe_poll, .ioctl = pipe_ioctl, .open = pipe_read_open, .release = pipe_read_release, .fasync = pipe_read_fasync, }; const struct file_operations write_fifo_fops = { .llseek = no_llseek, .read = bad_pipe_r, .write = pipe_write, .writev = pipe_writev, .poll = pipe_poll, .ioctl = pipe_ioctl, .open = pipe_write_open, .release = pipe_write_release, .fasync = pipe_write_fasync, }; const struct file_operations rdwr_fifo_fops = { .llseek = no_llseek, .read = pipe_read, .readv = pipe_readv, .write = pipe_write, .writev = pipe_writev, .poll = pipe_poll, .ioctl = pipe_ioctl, .open = pipe_rdwr_open, .release = pipe_rdwr_release, .fasync = pipe_rdwr_fasync, }; static struct file_operations read_pipe_fops = { .llseek = no_llseek, .read = pipe_read, .readv = pipe_readv, .write = bad_pipe_w, .poll = pipe_poll, .ioctl = pipe_ioctl, .open = pipe_read_open, .release = pipe_read_release, .fasync = pipe_read_fasync, }; static struct file_operations write_pipe_fops = { .llseek = no_llseek, .read = bad_pipe_r, .write = pipe_write, .writev = pipe_writev, .poll = pipe_poll, .ioctl = pipe_ioctl, .open = pipe_write_open, .release = pipe_write_release, .fasync = pipe_write_fasync, }; static struct file_operations rdwr_pipe_fops = { .llseek = no_llseek, .read = pipe_read, .readv = pipe_readv, .write = pipe_write, .writev = pipe_writev, .poll = pipe_poll, .ioctl = pipe_ioctl, .open = pipe_rdwr_open, .release = pipe_rdwr_release, .fasync = pipe_rdwr_fasync, }; struct pipe_inode_info * alloc_pipe_info(struct inode *inode) { struct pipe_inode_info *pipe; pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL); if (pipe) { init_waitqueue_head(&pipe->wait); pipe->r_counter = pipe->w_counter = 1; pipe->inode = inode; } return pipe; } void __free_pipe_info(struct pipe_inode_info *pipe) { int i; for (i = 0; i < PIPE_BUFFERS; i++) { struct pipe_buffer *buf = pipe->bufs + i; if (buf->ops) buf->ops->release(pipe, buf); } if (pipe->tmp_page) __free_page(pipe->tmp_page); kfree(pipe); } void free_pipe_info(struct inode *inode) { __free_pipe_info(inode->i_pipe); inode->i_pipe = NULL; } static struct vfsmount *pipe_mnt __read_mostly; static int pipefs_delete_dentry(struct dentry *dentry) { return 1; } static struct dentry_operations pipefs_dentry_operations = { .d_delete = pipefs_delete_dentry, }; static struct inode * get_pipe_inode(void) { struct inode *inode = new_inode(pipe_mnt->mnt_sb); struct pipe_inode_info *pipe; if (!inode) goto fail_inode; pipe = alloc_pipe_info(inode); if (!pipe) goto fail_iput; inode->i_pipe = pipe; pipe->readers = pipe->writers = 1; inode->i_fop = &rdwr_pipe_fops; /* * Mark the inode dirty from the very beginning, * that way it will never be moved to the dirty * list because "mark_inode_dirty()" will think * that it already _is_ on the dirty list. */ inode->i_state = I_DIRTY; inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR; inode->i_uid = current->fsuid; inode->i_gid = current->fsgid; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inode->i_blksize = PAGE_SIZE; return inode; fail_iput: iput(inode); fail_inode: return NULL; } int do_pipe(int *fd) { struct qstr this; char name[32]; struct dentry *dentry; struct inode * inode; struct file *f1, *f2; int error; int i, j; error = -ENFILE; f1 = get_empty_filp(); if (!f1) goto no_files; f2 = get_empty_filp(); if (!f2) goto close_f1; inode = get_pipe_inode(); if (!inode) goto close_f12; error = get_unused_fd(); if (error < 0) goto close_f12_inode; i = error; error = get_unused_fd(); if (error < 0) goto close_f12_inode_i; j = error; error = -ENOMEM; sprintf(name, "[%lu]", inode->i_ino); this.name = name; this.len = strlen(name); this.hash = inode->i_ino; /* will go */ dentry = d_alloc(pipe_mnt->mnt_sb->s_root, &this); if (!dentry) goto close_f12_inode_i_j; dentry->d_op = &pipefs_dentry_operations; d_add(dentry, inode); f1->f_vfsmnt = f2->f_vfsmnt = mntget(mntget(pipe_mnt)); f1->f_dentry = f2->f_dentry = dget(dentry); f1->f_mapping = f2->f_mapping = inode->i_mapping; /* read file */ f1->f_pos = f2->f_pos = 0; f1->f_flags = O_RDONLY; f1->f_op = &read_pipe_fops; f1->f_mode = FMODE_READ; f1->f_version = 0; /* write file */ f2->f_flags = O_WRONLY; f2->f_op = &write_pipe_fops; f2->f_mode = FMODE_WRITE; f2->f_version = 0; fd_install(i, f1); fd_install(j, f2); fd[0] = i; fd[1] = j; return 0; close_f12_inode_i_j: put_unused_fd(j); close_f12_inode_i: put_unused_fd(i); close_f12_inode: free_pipe_info(inode); iput(inode); close_f12: put_filp(f2); close_f1: put_filp(f1); no_files: return error; } /* * pipefs should _never_ be mounted by userland - too much of security hassle, * no real gain from having the whole whorehouse mounted. So we don't need * any operations on the root directory. However, we need a non-trivial * d_name - pipe: will go nicely and kill the special-casing in procfs. */ static struct super_block * pipefs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return get_sb_pseudo(fs_type, "pipe:", NULL, PIPEFS_MAGIC); } static struct file_system_type pipe_fs_type = { .name = "pipefs", .get_sb = pipefs_get_sb, .kill_sb = kill_anon_super, }; static int __init init_pipe_fs(void) { int err = register_filesystem(&pipe_fs_type); if (!err) { pipe_mnt = kern_mount(&pipe_fs_type); if (IS_ERR(pipe_mnt)) { err = PTR_ERR(pipe_mnt); unregister_filesystem(&pipe_fs_type); } } return err; } static void __exit exit_pipe_fs(void) { unregister_filesystem(&pipe_fs_type); mntput(pipe_mnt); } fs_initcall(init_pipe_fs); module_exit(exit_pipe_fs);