diff options
| author | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-20 18:37:56 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-20 18:37:56 -0400 |
| commit | d9eaec9e295a84a80b663996d0489fcff3a1dca9 (patch) | |
| tree | 85cfc09bb5f0eb42d3be7dfbddaad31353307796 | |
| parent | cee4cca740d209bcb4b9857baa2253d5ba4e3fbe (diff) | |
| parent | 41757106b9ca7867dafb2404d618f947b4786fd7 (diff) | |
Merge branch 'audit.b21' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/audit-current
* 'audit.b21' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/audit-current: (25 commits)
[PATCH] make set_loginuid obey audit_enabled
[PATCH] log more info for directory entry change events
[PATCH] fix AUDIT_FILTER_PREPEND handling
[PATCH] validate rule fields' types
[PATCH] audit: path-based rules
[PATCH] Audit of POSIX Message Queue Syscalls v.2
[PATCH] fix se_sen audit filter
[PATCH] deprecate AUDIT_POSSBILE
[PATCH] inline more audit helpers
[PATCH] proc_loginuid_write() uses simple_strtoul() on non-terminated array
[PATCH] update of IPC audit record cleanup
[PATCH] minor audit updates
[PATCH] fix audit_krule_to_{rule,data} return values
[PATCH] add filtering by ppid
[PATCH] log ppid
[PATCH] collect sid of those who send signals to auditd
[PATCH] execve argument logging
[PATCH] fix deadlocks in AUDIT_LIST/AUDIT_LIST_RULES
[PATCH] audit_panic() is audit-internal
[PATCH] inotify (5/5): update kernel documentation
...
Manual fixup of conflict in unclude/linux/inotify.h
| -rw-r--r-- | Documentation/filesystems/inotify.txt | 130 | ||||
| -rw-r--r-- | fs/Kconfig | 24 | ||||
| -rw-r--r-- | fs/Makefile | 1 | ||||
| -rw-r--r-- | fs/exec.c | 6 | ||||
| -rw-r--r-- | fs/inotify.c | 991 | ||||
| -rw-r--r-- | fs/inotify_user.c | 719 | ||||
| -rw-r--r-- | fs/namei.c | 2 | ||||
| -rw-r--r-- | fs/open.c | 4 | ||||
| -rw-r--r-- | fs/proc/base.c | 5 | ||||
| -rw-r--r-- | fs/xattr.c | 4 | ||||
| -rw-r--r-- | include/linux/audit.h | 99 | ||||
| -rw-r--r-- | include/linux/fsnotify.h | 32 | ||||
| -rw-r--r-- | include/linux/inotify.h | 108 | ||||
| -rw-r--r-- | include/linux/sched.h | 2 | ||||
| -rw-r--r-- | init/Kconfig | 3 | ||||
| -rw-r--r-- | ipc/mqueue.c | 22 | ||||
| -rw-r--r-- | ipc/msg.c | 9 | ||||
| -rw-r--r-- | ipc/sem.c | 8 | ||||
| -rw-r--r-- | ipc/shm.c | 2 | ||||
| -rw-r--r-- | kernel/audit.c | 205 | ||||
| -rw-r--r-- | kernel/audit.h | 61 | ||||
| -rw-r--r-- | kernel/auditfilter.c | 899 | ||||
| -rw-r--r-- | kernel/auditsc.c | 648 | ||||
| -rw-r--r-- | kernel/signal.c | 2 | ||||
| -rw-r--r-- | kernel/sysctl.c | 4 | ||||
| -rw-r--r-- | kernel/user.c | 2 | ||||
| -rw-r--r-- | security/selinux/ss/services.c | 2 |
27 files changed, 2961 insertions, 1033 deletions
diff --git a/Documentation/filesystems/inotify.txt b/Documentation/filesystems/inotify.txt index 6d501903f68e..59a919f16144 100644 --- a/Documentation/filesystems/inotify.txt +++ b/Documentation/filesystems/inotify.txt | |||
| @@ -69,17 +69,135 @@ Prototypes: | |||
| 69 | int inotify_rm_watch (int fd, __u32 mask); | 69 | int inotify_rm_watch (int fd, __u32 mask); |
| 70 | 70 | ||
| 71 | 71 | ||
| 72 | (iii) Internal Kernel Implementation | 72 | (iii) Kernel Interface |
| 73 | 73 | ||
| 74 | Each inotify instance is associated with an inotify_device structure. | 74 | Inotify's kernel API consists a set of functions for managing watches and an |
| 75 | event callback. | ||
| 76 | |||
| 77 | To use the kernel API, you must first initialize an inotify instance with a set | ||
| 78 | of inotify_operations. You are given an opaque inotify_handle, which you use | ||
| 79 | for any further calls to inotify. | ||
| 80 | |||
| 81 | struct inotify_handle *ih = inotify_init(my_event_handler); | ||
| 82 | |||
| 83 | You must provide a function for processing events and a function for destroying | ||
| 84 | the inotify watch. | ||
| 85 | |||
| 86 | void handle_event(struct inotify_watch *watch, u32 wd, u32 mask, | ||
| 87 | u32 cookie, const char *name, struct inode *inode) | ||
| 88 | |||
| 89 | watch - the pointer to the inotify_watch that triggered this call | ||
| 90 | wd - the watch descriptor | ||
| 91 | mask - describes the event that occurred | ||
| 92 | cookie - an identifier for synchronizing events | ||
| 93 | name - the dentry name for affected files in a directory-based event | ||
| 94 | inode - the affected inode in a directory-based event | ||
| 95 | |||
| 96 | void destroy_watch(struct inotify_watch *watch) | ||
| 97 | |||
| 98 | You may add watches by providing a pre-allocated and initialized inotify_watch | ||
| 99 | structure and specifying the inode to watch along with an inotify event mask. | ||
| 100 | You must pin the inode during the call. You will likely wish to embed the | ||
| 101 | inotify_watch structure in a structure of your own which contains other | ||
| 102 | information about the watch. Once you add an inotify watch, it is immediately | ||
| 103 | subject to removal depending on filesystem events. You must grab a reference if | ||
| 104 | you depend on the watch hanging around after the call. | ||
| 105 | |||
| 106 | inotify_init_watch(&my_watch->iwatch); | ||
| 107 | inotify_get_watch(&my_watch->iwatch); // optional | ||
| 108 | s32 wd = inotify_add_watch(ih, &my_watch->iwatch, inode, mask); | ||
| 109 | inotify_put_watch(&my_watch->iwatch); // optional | ||
| 110 | |||
| 111 | You may use the watch descriptor (wd) or the address of the inotify_watch for | ||
| 112 | other inotify operations. You must not directly read or manipulate data in the | ||
| 113 | inotify_watch. Additionally, you must not call inotify_add_watch() more than | ||
| 114 | once for a given inotify_watch structure, unless you have first called either | ||
| 115 | inotify_rm_watch() or inotify_rm_wd(). | ||
| 116 | |||
| 117 | To determine if you have already registered a watch for a given inode, you may | ||
| 118 | call inotify_find_watch(), which gives you both the wd and the watch pointer for | ||
| 119 | the inotify_watch, or an error if the watch does not exist. | ||
| 120 | |||
| 121 | wd = inotify_find_watch(ih, inode, &watchp); | ||
| 122 | |||
| 123 | You may use container_of() on the watch pointer to access your own data | ||
| 124 | associated with a given watch. When an existing watch is found, | ||
| 125 | inotify_find_watch() bumps the refcount before releasing its locks. You must | ||
| 126 | put that reference with: | ||
| 127 | |||
| 128 | put_inotify_watch(watchp); | ||
| 129 | |||
| 130 | Call inotify_find_update_watch() to update the event mask for an existing watch. | ||
| 131 | inotify_find_update_watch() returns the wd of the updated watch, or an error if | ||
| 132 | the watch does not exist. | ||
| 133 | |||
| 134 | wd = inotify_find_update_watch(ih, inode, mask); | ||
| 135 | |||
| 136 | An existing watch may be removed by calling either inotify_rm_watch() or | ||
| 137 | inotify_rm_wd(). | ||
| 138 | |||
| 139 | int ret = inotify_rm_watch(ih, &my_watch->iwatch); | ||
| 140 | int ret = inotify_rm_wd(ih, wd); | ||
| 141 | |||
| 142 | A watch may be removed while executing your event handler with the following: | ||
| 143 | |||
| 144 | inotify_remove_watch_locked(ih, iwatch); | ||
| 145 | |||
| 146 | Call inotify_destroy() to remove all watches from your inotify instance and | ||
| 147 | release it. If there are no outstanding references, inotify_destroy() will call | ||
| 148 | your destroy_watch op for each watch. | ||
| 149 | |||
| 150 | inotify_destroy(ih); | ||
| 151 | |||
| 152 | When inotify removes a watch, it sends an IN_IGNORED event to your callback. | ||
| 153 | You may use this event as an indication to free the watch memory. Note that | ||
| 154 | inotify may remove a watch due to filesystem events, as well as by your request. | ||
| 155 | If you use IN_ONESHOT, inotify will remove the watch after the first event, at | ||
| 156 | which point you may call the final inotify_put_watch. | ||
| 157 | |||
| 158 | (iv) Kernel Interface Prototypes | ||
| 159 | |||
| 160 | struct inotify_handle *inotify_init(struct inotify_operations *ops); | ||
| 161 | |||
| 162 | inotify_init_watch(struct inotify_watch *watch); | ||
| 163 | |||
| 164 | s32 inotify_add_watch(struct inotify_handle *ih, | ||
| 165 | struct inotify_watch *watch, | ||
| 166 | struct inode *inode, u32 mask); | ||
| 167 | |||
| 168 | s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode, | ||
| 169 | struct inotify_watch **watchp); | ||
| 170 | |||
| 171 | s32 inotify_find_update_watch(struct inotify_handle *ih, | ||
| 172 | struct inode *inode, u32 mask); | ||
| 173 | |||
| 174 | int inotify_rm_wd(struct inotify_handle *ih, u32 wd); | ||
| 175 | |||
| 176 | int inotify_rm_watch(struct inotify_handle *ih, | ||
| 177 | struct inotify_watch *watch); | ||
| 178 | |||
| 179 | void inotify_remove_watch_locked(struct inotify_handle *ih, | ||
| 180 | struct inotify_watch *watch); | ||
| 181 | |||
| 182 | void inotify_destroy(struct inotify_handle *ih); | ||
| 183 | |||
| 184 | void get_inotify_watch(struct inotify_watch *watch); | ||
| 185 | void put_inotify_watch(struct inotify_watch *watch); | ||
| 186 | |||
| 187 | |||
| 188 | (v) Internal Kernel Implementation | ||
| 189 | |||
| 190 | Each inotify instance is represented by an inotify_handle structure. | ||
| 191 | Inotify's userspace consumers also have an inotify_device which is | ||
| 192 | associated with the inotify_handle, and on which events are queued. | ||
| 75 | 193 | ||
| 76 | Each watch is associated with an inotify_watch structure. Watches are chained | 194 | Each watch is associated with an inotify_watch structure. Watches are chained |
| 77 | off of each associated device and each associated inode. | 195 | off of each associated inotify_handle and each associated inode. |
| 78 | 196 | ||
| 79 | See fs/inotify.c for the locking and lifetime rules. | 197 | See fs/inotify.c and fs/inotify_user.c for the locking and lifetime rules. |
| 80 | 198 | ||
| 81 | 199 | ||
| 82 | (iv) Rationale | 200 | (vi) Rationale |
| 83 | 201 | ||
| 84 | Q: What is the design decision behind not tying the watch to the open fd of | 202 | Q: What is the design decision behind not tying the watch to the open fd of |
| 85 | the watched object? | 203 | the watched object? |
| @@ -145,7 +263,7 @@ A: The poor user-space interface is the second biggest problem with dnotify. | |||
| 145 | file descriptor-based one that allows basic file I/O and poll/select. | 263 | file descriptor-based one that allows basic file I/O and poll/select. |
| 146 | Obtaining the fd and managing the watches could have been done either via a | 264 | Obtaining the fd and managing the watches could have been done either via a |
| 147 | device file or a family of new system calls. We decided to implement a | 265 | device file or a family of new system calls. We decided to implement a |
| 148 | family of system calls because that is the preffered approach for new kernel | 266 | family of system calls because that is the preferred approach for new kernel |
| 149 | interfaces. The only real difference was whether we wanted to use open(2) | 267 | interfaces. The only real difference was whether we wanted to use open(2) |
| 150 | and ioctl(2) or a couple of new system calls. System calls beat ioctls. | 268 | and ioctl(2) or a couple of new system calls. System calls beat ioctls. |
| 151 | 269 | ||
diff --git a/fs/Kconfig b/fs/Kconfig index 572cc435a1bb..20f9b557732d 100644 --- a/fs/Kconfig +++ b/fs/Kconfig | |||
| @@ -393,18 +393,30 @@ config INOTIFY | |||
| 393 | bool "Inotify file change notification support" | 393 | bool "Inotify file change notification support" |
| 394 | default y | 394 | default y |
| 395 | ---help--- | 395 | ---help--- |
| 396 | Say Y here to enable inotify support and the associated system | 396 | Say Y here to enable inotify support. Inotify is a file change |
| 397 | calls. Inotify is a file change notification system and a | 397 | notification system and a replacement for dnotify. Inotify fixes |
| 398 | replacement for dnotify. Inotify fixes numerous shortcomings in | 398 | numerous shortcomings in dnotify and introduces several new features |
| 399 | dnotify and introduces several new features. It allows monitoring | 399 | including multiple file events, one-shot support, and unmount |
| 400 | of both files and directories via a single open fd. Other features | ||
| 401 | include multiple file events, one-shot support, and unmount | ||
| 402 | notification. | 400 | notification. |
| 403 | 401 | ||
| 404 | For more information, see Documentation/filesystems/inotify.txt | 402 | For more information, see Documentation/filesystems/inotify.txt |
| 405 | 403 | ||
| 406 | If unsure, say Y. | 404 | If unsure, say Y. |
| 407 | 405 | ||
| 406 | config INOTIFY_USER | ||
| 407 | bool "Inotify support for userspace" | ||
| 408 | depends on INOTIFY | ||
| 409 | default y | ||
| 410 | ---help--- | ||
| 411 | Say Y here to enable inotify support for userspace, including the | ||
| 412 | associated system calls. Inotify allows monitoring of both files and | ||
| 413 | directories via a single open fd. Events are read from the file | ||
| 414 | descriptor, which is also select()- and poll()-able. | ||
| 415 | |||
| 416 | For more information, see Documentation/filesystems/inotify.txt | ||
| 417 | |||
| 418 | If unsure, say Y. | ||
| 419 | |||
| 408 | config QUOTA | 420 | config QUOTA |
| 409 | bool "Quota support" | 421 | bool "Quota support" |
| 410 | help | 422 | help |
diff --git a/fs/Makefile b/fs/Makefile index 078d3d1191a5..d0ea6bfccf29 100644 --- a/fs/Makefile +++ b/fs/Makefile | |||
| @@ -13,6 +13,7 @@ obj-y := open.o read_write.o file_table.o buffer.o bio.o super.o \ | |||
| 13 | ioprio.o pnode.o drop_caches.o splice.o sync.o | 13 | ioprio.o pnode.o drop_caches.o splice.o sync.o |
| 14 | 14 | ||
| 15 | obj-$(CONFIG_INOTIFY) += inotify.o | 15 | obj-$(CONFIG_INOTIFY) += inotify.o |
| 16 | obj-$(CONFIG_INOTIFY_USER) += inotify_user.o | ||
| 16 | obj-$(CONFIG_EPOLL) += eventpoll.o | 17 | obj-$(CONFIG_EPOLL) += eventpoll.o |
| 17 | obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o | 18 | obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o |
| 18 | 19 | ||
| @@ -49,6 +49,7 @@ | |||
| 49 | #include <linux/rmap.h> | 49 | #include <linux/rmap.h> |
| 50 | #include <linux/acct.h> | 50 | #include <linux/acct.h> |
| 51 | #include <linux/cn_proc.h> | 51 | #include <linux/cn_proc.h> |
| 52 | #include <linux/audit.h> | ||
| 52 | 53 | ||
| 53 | #include <asm/uaccess.h> | 54 | #include <asm/uaccess.h> |
| 54 | #include <asm/mmu_context.h> | 55 | #include <asm/mmu_context.h> |
| @@ -1085,6 +1086,11 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) | |||
| 1085 | /* kernel module loader fixup */ | 1086 | /* kernel module loader fixup */ |
| 1086 | /* so we don't try to load run modprobe in kernel space. */ | 1087 | /* so we don't try to load run modprobe in kernel space. */ |
| 1087 | set_fs(USER_DS); | 1088 | set_fs(USER_DS); |
| 1089 | |||
| 1090 | retval = audit_bprm(bprm); | ||
| 1091 | if (retval) | ||
| 1092 | return retval; | ||
| 1093 | |||
| 1088 | retval = -ENOENT; | 1094 | retval = -ENOENT; |
| 1089 | for (try=0; try<2; try++) { | 1095 | for (try=0; try<2; try++) { |
| 1090 | read_lock(&binfmt_lock); | 1096 | read_lock(&binfmt_lock); |
diff --git a/fs/inotify.c b/fs/inotify.c index 732ec4bd5774..723836a1f718 100644 --- a/fs/inotify.c +++ b/fs/inotify.c | |||
| @@ -5,7 +5,10 @@ | |||
| 5 | * John McCutchan <ttb@tentacle.dhs.org> | 5 | * John McCutchan <ttb@tentacle.dhs.org> |
| 6 | * Robert Love <rml@novell.com> | 6 | * Robert Love <rml@novell.com> |
| 7 | * | 7 | * |
| 8 | * Kernel API added by: Amy Griffis <amy.griffis@hp.com> | ||
| 9 | * | ||
| 8 | * Copyright (C) 2005 John McCutchan | 10 | * Copyright (C) 2005 John McCutchan |
| 11 | * Copyright 2006 Hewlett-Packard Development Company, L.P. | ||
| 9 | * | 12 | * |
| 10 | * This program is free software; you can redistribute it and/or modify it | 13 | * This program is free software; you can redistribute it and/or modify it |
| 11 | * under the terms of the GNU General Public License as published by the | 14 | * under the terms of the GNU General Public License as published by the |
| @@ -20,35 +23,17 @@ | |||
| 20 | 23 | ||
| 21 | #include <linux/module.h> | 24 | #include <linux/module.h> |
| 22 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
| 23 | #include <linux/sched.h> | ||
| 24 | #include <linux/spinlock.h> | 26 | #include <linux/spinlock.h> |
| 25 | #include <linux/idr.h> | 27 | #include <linux/idr.h> |
| 26 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
| 27 | #include <linux/fs.h> | 29 | #include <linux/fs.h> |
| 28 | #include <linux/file.h> | ||
| 29 | #include <linux/mount.h> | ||
| 30 | #include <linux/namei.h> | ||
| 31 | #include <linux/poll.h> | ||
| 32 | #include <linux/init.h> | 30 | #include <linux/init.h> |
| 33 | #include <linux/list.h> | 31 | #include <linux/list.h> |
| 34 | #include <linux/writeback.h> | 32 | #include <linux/writeback.h> |
| 35 | #include <linux/inotify.h> | 33 | #include <linux/inotify.h> |
| 36 | #include <linux/syscalls.h> | ||
| 37 | |||
| 38 | #include <asm/ioctls.h> | ||
| 39 | 34 | ||
| 40 | static atomic_t inotify_cookie; | 35 | static atomic_t inotify_cookie; |
| 41 | 36 | ||
| 42 | static kmem_cache_t *watch_cachep __read_mostly; | ||
| 43 | static kmem_cache_t *event_cachep __read_mostly; | ||
| 44 | |||
| 45 | static struct vfsmount *inotify_mnt __read_mostly; | ||
| 46 | |||
| 47 | /* these are configurable via /proc/sys/fs/inotify/ */ | ||
| 48 | int inotify_max_user_instances __read_mostly; | ||
| 49 | int inotify_max_user_watches __read_mostly; | ||
| 50 | int inotify_max_queued_events __read_mostly; | ||
| 51 | |||
| 52 | /* | 37 | /* |
| 53 | * Lock ordering: | 38 | * Lock ordering: |
| 54 | * | 39 | * |
| @@ -56,327 +41,108 @@ int inotify_max_queued_events __read_mostly; | |||
| 56 | * iprune_mutex (synchronize shrink_icache_memory()) | 41 | * iprune_mutex (synchronize shrink_icache_memory()) |
| 57 | * inode_lock (protects the super_block->s_inodes list) | 42 | * inode_lock (protects the super_block->s_inodes list) |
| 58 | * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list) | 43 | * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list) |
| 59 | * inotify_dev->mutex (protects inotify_device and watches->d_list) | 44 | * inotify_handle->mutex (protects inotify_handle and watches->h_list) |
| 45 | * | ||
| 46 | * The inode->inotify_mutex and inotify_handle->mutex and held during execution | ||
| 47 | * of a caller's event handler. Thus, the caller must not hold any locks | ||
| 48 | * taken in their event handler while calling any of the published inotify | ||
| 49 | * interfaces. | ||
| 60 | */ | 50 | */ |
| 61 | 51 | ||
| 62 | /* | 52 | /* |
| 63 | * Lifetimes of the three main data structures--inotify_device, inode, and | 53 | * Lifetimes of the three main data structures--inotify_handle, inode, and |
| 64 | * inotify_watch--are managed by reference count. | 54 | * inotify_watch--are managed by reference count. |
| 65 | * | 55 | * |
| 66 | * inotify_device: Lifetime is from inotify_init() until release. Additional | 56 | * inotify_handle: Lifetime is from inotify_init() to inotify_destroy(). |
| 67 | * references can bump the count via get_inotify_dev() and drop the count via | 57 | * Additional references can bump the count via get_inotify_handle() and drop |
| 68 | * put_inotify_dev(). | 58 | * the count via put_inotify_handle(). |
| 69 | * | 59 | * |
| 70 | * inotify_watch: Lifetime is from create_watch() to destory_watch(). | 60 | * inotify_watch: for inotify's purposes, lifetime is from inotify_add_watch() |
| 71 | * Additional references can bump the count via get_inotify_watch() and drop | 61 | * to remove_watch_no_event(). Additional references can bump the count via |
| 72 | * the count via put_inotify_watch(). | 62 | * get_inotify_watch() and drop the count via put_inotify_watch(). The caller |
| 63 | * is reponsible for the final put after receiving IN_IGNORED, or when using | ||
| 64 | * IN_ONESHOT after receiving the first event. Inotify does the final put if | ||
| 65 | * inotify_destroy() is called. | ||
| 73 | * | 66 | * |
| 74 | * inode: Pinned so long as the inode is associated with a watch, from | 67 | * inode: Pinned so long as the inode is associated with a watch, from |
| 75 | * create_watch() to put_inotify_watch(). | 68 | * inotify_add_watch() to the final put_inotify_watch(). |
| 76 | */ | 69 | */ |
| 77 | 70 | ||
| 78 | /* | 71 | /* |
| 79 | * struct inotify_device - represents an inotify instance | 72 | * struct inotify_handle - represents an inotify instance |
| 80 | * | 73 | * |
| 81 | * This structure is protected by the mutex 'mutex'. | 74 | * This structure is protected by the mutex 'mutex'. |
| 82 | */ | 75 | */ |
| 83 | struct inotify_device { | 76 | struct inotify_handle { |
| 84 | wait_queue_head_t wq; /* wait queue for i/o */ | ||
| 85 | struct idr idr; /* idr mapping wd -> watch */ | 77 | struct idr idr; /* idr mapping wd -> watch */ |
| 86 | struct mutex mutex; /* protects this bad boy */ | 78 | struct mutex mutex; /* protects this bad boy */ |
| 87 | struct list_head events; /* list of queued events */ | ||
| 88 | struct list_head watches; /* list of watches */ | 79 | struct list_head watches; /* list of watches */ |
| 89 | atomic_t count; /* reference count */ | 80 | atomic_t count; /* reference count */ |
| 90 | struct user_struct *user; /* user who opened this dev */ | ||
| 91 | unsigned int queue_size; /* size of the queue (bytes) */ | ||
| 92 | unsigned int event_count; /* number of pending events */ | ||
| 93 | unsigned int max_events; /* maximum number of events */ | ||
| 94 | u32 last_wd; /* the last wd allocated */ | 81 | u32 last_wd; /* the last wd allocated */ |
| 82 | const struct inotify_operations *in_ops; /* inotify caller operations */ | ||
| 95 | }; | 83 | }; |
| 96 | 84 | ||
| 97 | /* | 85 | static inline void get_inotify_handle(struct inotify_handle *ih) |
| 98 | * struct inotify_kernel_event - An inotify event, originating from a watch and | ||
| 99 | * queued for user-space. A list of these is attached to each instance of the | ||
| 100 | * device. In read(), this list is walked and all events that can fit in the | ||
| 101 | * buffer are returned. | ||
| 102 | * | ||
| 103 | * Protected by dev->mutex of the device in which we are queued. | ||
| 104 | */ | ||
| 105 | struct inotify_kernel_event { | ||
| 106 | struct inotify_event event; /* the user-space event */ | ||
| 107 | struct list_head list; /* entry in inotify_device's list */ | ||
| 108 | char *name; /* filename, if any */ | ||
| 109 | }; | ||
| 110 | |||
| 111 | /* | ||
| 112 | * struct inotify_watch - represents a watch request on a specific inode | ||
| 113 | * | ||
| 114 | * d_list is protected by dev->mutex of the associated watch->dev. | ||
| 115 | * i_list and mask are protected by inode->inotify_mutex of the associated inode. | ||
| 116 | * dev, inode, and wd are never written to once the watch is created. | ||
| 117 | */ | ||
| 118 | struct inotify_watch { | ||
| 119 | struct list_head d_list; /* entry in inotify_device's list */ | ||
| 120 | struct list_head i_list; /* entry in inode's list */ | ||
| 121 | atomic_t count; /* reference count */ | ||
| 122 | struct inotify_device *dev; /* associated device */ | ||
| 123 | struct inode *inode; /* associated inode */ | ||
| 124 | s32 wd; /* watch descriptor */ | ||
| 125 | u32 mask; /* event mask for this watch */ | ||
| 126 | }; | ||
| 127 | |||
| 128 | #ifdef CONFIG_SYSCTL | ||
| 129 | |||
| 130 | #include <linux/sysctl.h> | ||
| 131 | |||
| 132 | static int zero; | ||
| 133 | |||
| 134 | ctl_table inotify_table[] = { | ||
| 135 | { | ||
| 136 | .ctl_name = INOTIFY_MAX_USER_INSTANCES, | ||
| 137 | .procname = "max_user_instances", | ||
| 138 | .data = &inotify_max_user_instances, | ||
| 139 | .maxlen = sizeof(int), | ||
| 140 | .mode = 0644, | ||
| 141 | .proc_handler = &proc_dointvec_minmax, | ||
| 142 | .strategy = &sysctl_intvec, | ||
| 143 | .extra1 = &zero, | ||
| 144 | }, | ||
| 145 | { | ||
| 146 | .ctl_name = INOTIFY_MAX_USER_WATCHES, | ||
| 147 | .procname = "max_user_watches", | ||
| 148 | .data = &inotify_max_user_watches, | ||
| 149 | .maxlen = sizeof(int), | ||
| 150 | .mode = 0644, | ||
| 151 | .proc_handler = &proc_dointvec_minmax, | ||
| 152 | .strategy = &sysctl_intvec, | ||
| 153 | .extra1 = &zero, | ||
| 154 | }, | ||
| 155 | { | ||
| 156 | .ctl_name = INOTIFY_MAX_QUEUED_EVENTS, | ||
| 157 | .procname = "max_queued_events", | ||
| 158 | .data = &inotify_max_queued_events, | ||
| 159 | .maxlen = sizeof(int), | ||
| 160 | .mode = 0644, | ||
| 161 | .proc_handler = &proc_dointvec_minmax, | ||
| 162 | .strategy = &sysctl_intvec, | ||
| 163 | .extra1 = &zero | ||
| 164 | }, | ||
| 165 | { .ctl_name = 0 } | ||
| 166 | }; | ||
| 167 | #endif /* CONFIG_SYSCTL */ | ||
| 168 | |||
| 169 | static inline void get_inotify_dev(struct inotify_device *dev) | ||
| 170 | { | 86 | { |
| 171 | atomic_inc(&dev->count); | 87 | atomic_inc(&ih->count); |
| 172 | } | 88 | } |
| 173 | 89 | ||
| 174 | static inline void put_inotify_dev(struct inotify_device *dev) | 90 | static inline void put_inotify_handle(struct inotify_handle *ih) |
| 175 | { | 91 | { |
| 176 | if (atomic_dec_and_test(&dev->count)) { | 92 | if (atomic_dec_and_test(&ih->count)) { |
| 177 | atomic_dec(&dev->user->inotify_devs); | 93 | idr_destroy(&ih->idr); |
| 178 | free_uid(dev->user); | 94 | kfree(ih); |
| 179 | idr_destroy(&dev->idr); | ||
| 180 | kfree(dev); | ||
| 181 | } | 95 | } |
| 182 | } | 96 | } |
| 183 | 97 | ||
| 184 | static inline void get_inotify_watch(struct inotify_watch *watch) | 98 | /** |
| 99 | * get_inotify_watch - grab a reference to an inotify_watch | ||
| 100 | * @watch: watch to grab | ||
| 101 | */ | ||
| 102 | void get_inotify_watch(struct inotify_watch *watch) | ||
| 185 | { | 103 | { |
| 186 | atomic_inc(&watch->count); | 104 | atomic_inc(&watch->count); |
| 187 | } | 105 | } |
| 106 | EXPORT_SYMBOL_GPL(get_inotify_watch); | ||
| 188 | 107 | ||
| 189 | /* | 108 | /** |
| 190 | * put_inotify_watch - decrements the ref count on a given watch. cleans up | 109 | * put_inotify_watch - decrements the ref count on a given watch. cleans up |
| 191 | * the watch and its references if the count reaches zero. | 110 | * watch references if the count reaches zero. inotify_watch is freed by |
| 111 | * inotify callers via the destroy_watch() op. | ||
| 112 | * @watch: watch to release | ||
| 192 | */ | 113 | */ |
| 193 | static inline void put_inotify_watch(struct inotify_watch *watch) | 114 | void put_inotify_watch(struct inotify_watch *watch) |
| 194 | { | 115 | { |
| 195 | if (atomic_dec_and_test(&watch->count)) { | 116 | if (atomic_dec_and_test(&watch->count)) { |
| 196 | put_inotify_dev(watch->dev); | 117 | struct inotify_handle *ih = watch->ih; |
| 197 | iput(watch->inode); | ||
| 198 | kmem_cache_free(watch_cachep, watch); | ||
| 199 | } | ||
| 200 | } | ||
| 201 | |||
| 202 | /* | ||
| 203 | * kernel_event - create a new kernel event with the given parameters | ||
| 204 | * | ||
| 205 | * This function can sleep. | ||
| 206 | */ | ||
| 207 | static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie, | ||
| 208 | const char *name) | ||
| 209 | { | ||
| 210 | struct inotify_kernel_event *kevent; | ||
| 211 | |||
| 212 | kevent = kmem_cache_alloc(event_cachep, GFP_KERNEL); | ||
| 213 | if (unlikely(!kevent)) | ||
| 214 | return NULL; | ||
| 215 | |||
| 216 | /* we hand this out to user-space, so zero it just in case */ | ||
| 217 | memset(&kevent->event, 0, sizeof(struct inotify_event)); | ||
| 218 | |||
| 219 | kevent->event.wd = wd; | ||
| 220 | kevent->event.mask = mask; | ||
| 221 | kevent->event.cookie = cookie; | ||
| 222 | |||
| 223 | INIT_LIST_HEAD(&kevent->list); | ||
| 224 | |||
| 225 | if (name) { | ||
| 226 | size_t len, rem, event_size = sizeof(struct inotify_event); | ||
| 227 | |||
| 228 | /* | ||
| 229 | * We need to pad the filename so as to properly align an | ||
| 230 | * array of inotify_event structures. Because the structure is | ||
| 231 | * small and the common case is a small filename, we just round | ||
| 232 | * up to the next multiple of the structure's sizeof. This is | ||
| 233 | * simple and safe for all architectures. | ||
| 234 | */ | ||
| 235 | len = strlen(name) + 1; | ||
| 236 | rem = event_size - len; | ||
| 237 | if (len > event_size) { | ||
| 238 | rem = event_size - (len % event_size); | ||
| 239 | if (len % event_size == 0) | ||
| 240 | rem = 0; | ||
| 241 | } | ||
| 242 | |||
| 243 | kevent->name = kmalloc(len + rem, GFP_KERNEL); | ||
| 244 | if (unlikely(!kevent->name)) { | ||
| 245 | kmem_cache_free(event_cachep, kevent); | ||
| 246 | return NULL; | ||
| 247 | } | ||
| 248 | memcpy(kevent->name, name, len); | ||
| 249 | if (rem) | ||
| 250 | memset(kevent->name + len, 0, rem); | ||
| 251 | kevent->event.len = len + rem; | ||
| 252 | } else { | ||
| 253 | kevent->event.len = 0; | ||
| 254 | kevent->name = NULL; | ||
| 255 | } | ||
| 256 | |||
| 257 | return kevent; | ||
| 258 | } | ||
| 259 | |||
| 260 | /* | ||
| 261 | * inotify_dev_get_event - return the next event in the given dev's queue | ||
| 262 | * | ||
| 263 | * Caller must hold dev->mutex. | ||
| 264 | */ | ||
| 265 | static inline struct inotify_kernel_event * | ||
| 266 | inotify_dev_get_event(struct inotify_device *dev) | ||
| 267 | { | ||
| 268 | return list_entry(dev->events.next, struct inotify_kernel_event, list); | ||
| 269 | } | ||
| 270 | |||
| 271 | /* | ||
| 272 | * inotify_dev_queue_event - add a new event to the given device | ||
| 273 | * | ||
| 274 | * Caller must hold dev->mutex. Can sleep (calls kernel_event()). | ||
| 275 | */ | ||
| 276 | static void inotify_dev_queue_event(struct inotify_device *dev, | ||
| 277 | struct inotify_watch *watch, u32 mask, | ||
| 278 | u32 cookie, const char *name) | ||
| 279 | { | ||
| 280 | struct inotify_kernel_event *kevent, *last; | ||
| 281 | |||
| 282 | /* coalescing: drop this event if it is a dupe of the previous */ | ||
| 283 | last = inotify_dev_get_event(dev); | ||
| 284 | if (last && last->event.mask == mask && last->event.wd == watch->wd && | ||
| 285 | last->event.cookie == cookie) { | ||
| 286 | const char *lastname = last->name; | ||
| 287 | |||
| 288 | if (!name && !lastname) | ||
| 289 | return; | ||
| 290 | if (name && lastname && !strcmp(lastname, name)) | ||
| 291 | return; | ||
| 292 | } | ||
| 293 | |||
| 294 | /* the queue overflowed and we already sent the Q_OVERFLOW event */ | ||
| 295 | if (unlikely(dev->event_count > dev->max_events)) | ||
| 296 | return; | ||
| 297 | |||
| 298 | /* if the queue overflows, we need to notify user space */ | ||
| 299 | if (unlikely(dev->event_count == dev->max_events)) | ||
| 300 | kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL); | ||
| 301 | else | ||
| 302 | kevent = kernel_event(watch->wd, mask, cookie, name); | ||
| 303 | |||
| 304 | if (unlikely(!kevent)) | ||
| 305 | return; | ||
| 306 | |||
| 307 | /* queue the event and wake up anyone waiting */ | ||
| 308 | dev->event_count++; | ||
| 309 | dev->queue_size += sizeof(struct inotify_event) + kevent->event.len; | ||
| 310 | list_add_tail(&kevent->list, &dev->events); | ||
| 311 | wake_up_interruptible(&dev->wq); | ||
| 312 | } | ||
| 313 | |||
| 314 | /* | ||
| 315 | * remove_kevent - cleans up and ultimately frees the given kevent | ||
| 316 | * | ||
| 317 | * Caller must hold dev->mutex. | ||
| 318 | */ | ||
| 319 | static void remove_kevent(struct inotify_device *dev, | ||
| 320 | struct inotify_kernel_event *kevent) | ||
| 321 | { | ||
| 322 | list_del(&kevent->list); | ||
| 323 | |||
| 324 | dev->event_count--; | ||
| 325 | dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len; | ||
| 326 | |||
| 327 | kfree(kevent->name); | ||
| 328 | kmem_cache_free(event_cachep, kevent); | ||
| 329 | } | ||
| 330 | 118 | ||
| 331 | /* | 119 | iput(watch->inode); |
| 332 | * inotify_dev_event_dequeue - destroy an event on the given device | 120 | ih->in_ops->destroy_watch(watch); |
| 333 | * | 121 | put_inotify_handle(ih); |
| 334 | * Caller must hold dev->mutex. | ||
| 335 | */ | ||
| 336 | static void inotify_dev_event_dequeue(struct inotify_device *dev) | ||
| 337 | { | ||
| 338 | if (!list_empty(&dev->events)) { | ||
| 339 | struct inotify_kernel_event *kevent; | ||
| 340 | kevent = inotify_dev_get_event(dev); | ||
| 341 | remove_kevent(dev, kevent); | ||
| 342 | } | 122 | } |
| 343 | } | 123 | } |
| 124 | EXPORT_SYMBOL_GPL(put_inotify_watch); | ||
| 344 | 125 | ||
| 345 | /* | 126 | /* |
| 346 | * inotify_dev_get_wd - returns the next WD for use by the given dev | 127 | * inotify_handle_get_wd - returns the next WD for use by the given handle |
| 347 | * | 128 | * |
| 348 | * Callers must hold dev->mutex. This function can sleep. | 129 | * Callers must hold ih->mutex. This function can sleep. |
| 349 | */ | 130 | */ |
| 350 | static int inotify_dev_get_wd(struct inotify_device *dev, | 131 | static int inotify_handle_get_wd(struct inotify_handle *ih, |
| 351 | struct inotify_watch *watch) | 132 | struct inotify_watch *watch) |
| 352 | { | 133 | { |
| 353 | int ret; | 134 | int ret; |
| 354 | 135 | ||
| 355 | do { | 136 | do { |
| 356 | if (unlikely(!idr_pre_get(&dev->idr, GFP_KERNEL))) | 137 | if (unlikely(!idr_pre_get(&ih->idr, GFP_KERNEL))) |
| 357 | return -ENOSPC; | 138 | return -ENOSPC; |
| 358 | ret = idr_get_new_above(&dev->idr, watch, dev->last_wd+1, &watch->wd); | 139 | ret = idr_get_new_above(&ih->idr, watch, ih->last_wd+1, &watch->wd); |
| 359 | } while (ret == -EAGAIN); | 140 | } while (ret == -EAGAIN); |
| 360 | 141 | ||
| 361 | return ret; | 142 | if (likely(!ret)) |
| 362 | } | 143 | ih->last_wd = watch->wd; |
| 363 | 144 | ||
| 364 | /* | 145 | return ret; |
| 365 | * find_inode - resolve a user-given path to a specific inode and return a nd | ||
| 366 | */ | ||
| 367 | static int find_inode(const char __user *dirname, struct nameidata *nd, | ||
| 368 | unsigned flags) | ||
| 369 | { | ||
| 370 | int error; | ||
| 371 | |||
| 372 | error = __user_walk(dirname, flags, nd); | ||
| 373 | if (error) | ||
| 374 | return error; | ||
| 375 | /* you can only watch an inode if you have read permissions on it */ | ||
| 376 | error = vfs_permission(nd, MAY_READ); | ||
| 377 | if (error) | ||
| 378 | path_release(nd); | ||
| 379 | return error; | ||
| 380 | } | 146 | } |
| 381 | 147 | ||
| 382 | /* | 148 | /* |
| @@ -422,67 +188,18 @@ static void set_dentry_child_flags(struct inode *inode, int watched) | |||
| 422 | } | 188 | } |
| 423 | 189 | ||
| 424 | /* | 190 | /* |
| 425 | * create_watch - creates a watch on the given device. | 191 | * inotify_find_handle - find the watch associated with the given inode and |
| 426 | * | 192 | * handle |
| 427 | * Callers must hold dev->mutex. Calls inotify_dev_get_wd() so may sleep. | ||
| 428 | * Both 'dev' and 'inode' (by way of nameidata) need to be pinned. | ||
| 429 | */ | ||
| 430 | static struct inotify_watch *create_watch(struct inotify_device *dev, | ||
| 431 | u32 mask, struct inode *inode) | ||
| 432 | { | ||
| 433 | struct inotify_watch *watch; | ||
| 434 | int ret; | ||
| 435 | |||
| 436 | if (atomic_read(&dev->user->inotify_watches) >= | ||
| 437 | inotify_max_user_watches) | ||
| 438 | return ERR_PTR(-ENOSPC); | ||
| 439 | |||
| 440 | watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL); | ||
| 441 | if (unlikely(!watch)) | ||
| 442 | return ERR_PTR(-ENOMEM); | ||
| 443 | |||
| 444 | ret = inotify_dev_get_wd(dev, watch); | ||
| 445 | if (unlikely(ret)) { | ||
| 446 | kmem_cache_free(watch_cachep, watch); | ||
| 447 | return ERR_PTR(ret); | ||
| 448 | } | ||
| 449 | |||
| 450 | dev->last_wd = watch->wd; | ||
| 451 | watch->mask = mask; | ||
| 452 | atomic_set(&watch->count, 0); | ||
| 453 | INIT_LIST_HEAD(&watch->d_list); | ||
| 454 | INIT_LIST_HEAD(&watch->i_list); | ||
| 455 | |||
| 456 | /* save a reference to device and bump the count to make it official */ | ||
| 457 | get_inotify_dev(dev); | ||
| 458 | watch->dev = dev; | ||
| 459 | |||
| 460 | /* | ||
| 461 | * Save a reference to the inode and bump the ref count to make it | ||
| 462 | * official. We hold a reference to nameidata, which makes this safe. | ||
| 463 | */ | ||
| 464 | watch->inode = igrab(inode); | ||
| 465 | |||
| 466 | /* bump our own count, corresponding to our entry in dev->watches */ | ||
| 467 | get_inotify_watch(watch); | ||
| 468 | |||
| 469 | atomic_inc(&dev->user->inotify_watches); | ||
| 470 | |||
| 471 | return watch; | ||
| 472 | } | ||
| 473 | |||
| 474 | /* | ||
| 475 | * inotify_find_dev - find the watch associated with the given inode and dev | ||
| 476 | * | 193 | * |
| 477 | * Callers must hold inode->inotify_mutex. | 194 | * Callers must hold inode->inotify_mutex. |
| 478 | */ | 195 | */ |
| 479 | static struct inotify_watch *inode_find_dev(struct inode *inode, | 196 | static struct inotify_watch *inode_find_handle(struct inode *inode, |
| 480 | struct inotify_device *dev) | 197 | struct inotify_handle *ih) |
| 481 | { | 198 | { |
| 482 | struct inotify_watch *watch; | 199 | struct inotify_watch *watch; |
| 483 | 200 | ||
| 484 | list_for_each_entry(watch, &inode->inotify_watches, i_list) { | 201 | list_for_each_entry(watch, &inode->inotify_watches, i_list) { |
| 485 | if (watch->dev == dev) | 202 | if (watch->ih == ih) |
| 486 | return watch; | 203 | return watch; |
| 487 | } | 204 | } |
| 488 | 205 | ||
| @@ -490,40 +207,40 @@ static struct inotify_watch *inode_find_dev(struct inode *inode, | |||
| 490 | } | 207 | } |
| 491 | 208 | ||
| 492 | /* | 209 | /* |
| 493 | * remove_watch_no_event - remove_watch() without the IN_IGNORED event. | 210 | * remove_watch_no_event - remove watch without the IN_IGNORED event. |
| 211 | * | ||
| 212 | * Callers must hold both inode->inotify_mutex and ih->mutex. | ||
| 494 | */ | 213 | */ |
| 495 | static void remove_watch_no_event(struct inotify_watch *watch, | 214 | static void remove_watch_no_event(struct inotify_watch *watch, |
| 496 | struct inotify_device *dev) | 215 | struct inotify_handle *ih) |
| 497 | { | 216 | { |
| 498 | list_del(&watch->i_list); | 217 | list_del(&watch->i_list); |
| 499 | list_del(&watch->d_list); | 218 | list_del(&watch->h_list); |
| 500 | 219 | ||
| 501 | if (!inotify_inode_watched(watch->inode)) | 220 | if (!inotify_inode_watched(watch->inode)) |
| 502 | set_dentry_child_flags(watch->inode, 0); | 221 | set_dentry_child_flags(watch->inode, 0); |
| 503 | 222 | ||
| 504 | atomic_dec(&dev->user->inotify_watches); | 223 | idr_remove(&ih->idr, watch->wd); |
| 505 | idr_remove(&dev->idr, watch->wd); | ||
| 506 | put_inotify_watch(watch); | ||
| 507 | } | 224 | } |
| 508 | 225 | ||
| 509 | /* | 226 | /** |
| 510 | * remove_watch - Remove a watch from both the device and the inode. Sends | 227 | * inotify_remove_watch_locked - Remove a watch from both the handle and the |
| 511 | * the IN_IGNORED event to the given device signifying that the inode is no | 228 | * inode. Sends the IN_IGNORED event signifying that the inode is no longer |
| 512 | * longer watched. | 229 | * watched. May be invoked from a caller's event handler. |
| 513 | * | 230 | * @ih: inotify handle associated with watch |
| 514 | * Callers must hold both inode->inotify_mutex and dev->mutex. We drop a | 231 | * @watch: watch to remove |
| 515 | * reference to the inode before returning. | ||
| 516 | * | 232 | * |
| 517 | * The inode is not iput() so as to remain atomic. If the inode needs to be | 233 | * Callers must hold both inode->inotify_mutex and ih->mutex. |
| 518 | * iput(), the call returns one. Otherwise, it returns zero. | ||
| 519 | */ | 234 | */ |
| 520 | static void remove_watch(struct inotify_watch *watch,struct inotify_device *dev) | 235 | void inotify_remove_watch_locked(struct inotify_handle *ih, |
| 236 | struct inotify_watch *watch) | ||
| 521 | { | 237 | { |
| 522 | inotify_dev_queue_event(dev, watch, IN_IGNORED, 0, NULL); | 238 | remove_watch_no_event(watch, ih); |
| 523 | remove_watch_no_event(watch, dev); | 239 | ih->in_ops->handle_event(watch, watch->wd, IN_IGNORED, 0, NULL, NULL); |
| 524 | } | 240 | } |
| 241 | EXPORT_SYMBOL_GPL(inotify_remove_watch_locked); | ||
| 525 | 242 | ||
| 526 | /* Kernel API */ | 243 | /* Kernel API for producing events */ |
| 527 | 244 | ||
| 528 | /* | 245 | /* |
| 529 | * inotify_d_instantiate - instantiate dcache entry for inode | 246 | * inotify_d_instantiate - instantiate dcache entry for inode |
| @@ -563,9 +280,10 @@ void inotify_d_move(struct dentry *entry) | |||
| 563 | * @mask: event mask describing this event | 280 | * @mask: event mask describing this event |
| 564 | * @cookie: cookie for synchronization, or zero | 281 | * @cookie: cookie for synchronization, or zero |
| 565 | * @name: filename, if any | 282 | * @name: filename, if any |
| 283 | * @n_inode: inode associated with name | ||
| 566 | */ | 284 | */ |
| 567 | void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie, | 285 | void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie, |
| 568 | const char *name) | 286 | const char *name, struct inode *n_inode) |
| 569 | { | 287 | { |
| 570 | struct inotify_watch *watch, *next; | 288 | struct inotify_watch *watch, *next; |
| 571 | 289 | ||
| @@ -576,14 +294,13 @@ void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie, | |||
| 576 | list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { | 294 | list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { |
| 577 | u32 watch_mask = watch->mask; | 295 | u32 watch_mask = watch->mask; |
| 578 | if (watch_mask & mask) { | 296 | if (watch_mask & mask) { |
| 579 | struct inotify_device *dev = watch->dev; | 297 | struct inotify_handle *ih= watch->ih; |
| 580 | get_inotify_watch(watch); | 298 | mutex_lock(&ih->mutex); |
| 581 | mutex_lock(&dev->mutex); | ||
| 582 | inotify_dev_queue_event(dev, watch, mask, cookie, name); | ||
| 583 | if (watch_mask & IN_ONESHOT) | 299 | if (watch_mask & IN_ONESHOT) |
| 584 | remove_watch_no_event(watch, dev); | 300 | remove_watch_no_event(watch, ih); |
| 585 | mutex_unlock(&dev->mutex); | 301 | ih->in_ops->handle_event(watch, watch->wd, mask, cookie, |
| 586 | put_inotify_watch(watch); | 302 | name, n_inode); |
| 303 | mutex_unlock(&ih->mutex); | ||
| 587 | } | 304 | } |
| 588 | } | 305 | } |
| 589 | mutex_unlock(&inode->inotify_mutex); | 306 | mutex_unlock(&inode->inotify_mutex); |
| @@ -613,7 +330,8 @@ void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask, | |||
| 613 | if (inotify_inode_watched(inode)) { | 330 | if (inotify_inode_watched(inode)) { |
| 614 | dget(parent); | 331 | dget(parent); |
| 615 | spin_unlock(&dentry->d_lock); | 332 | spin_unlock(&dentry->d_lock); |
| 616 | inotify_inode_queue_event(inode, mask, cookie, name); | 333 | inotify_inode_queue_event(inode, mask, cookie, name, |
| 334 | dentry->d_inode); | ||
| 617 | dput(parent); | 335 | dput(parent); |
| 618 | } else | 336 | } else |
| 619 | spin_unlock(&dentry->d_lock); | 337 | spin_unlock(&dentry->d_lock); |
| @@ -665,7 +383,7 @@ void inotify_unmount_inodes(struct list_head *list) | |||
| 665 | 383 | ||
| 666 | need_iput_tmp = need_iput; | 384 | need_iput_tmp = need_iput; |
| 667 | need_iput = NULL; | 385 | need_iput = NULL; |
| 668 | /* In case the remove_watch() drops a reference. */ | 386 | /* In case inotify_remove_watch_locked() drops a reference. */ |
| 669 | if (inode != need_iput_tmp) | 387 | if (inode != need_iput_tmp) |
| 670 | __iget(inode); | 388 | __iget(inode); |
| 671 | else | 389 | else |
| @@ -694,11 +412,12 @@ void inotify_unmount_inodes(struct list_head *list) | |||
| 694 | mutex_lock(&inode->inotify_mutex); | 412 | mutex_lock(&inode->inotify_mutex); |
| 695 | watches = &inode->inotify_watches; | 413 | watches = &inode->inotify_watches; |
| 696 | list_for_each_entry_safe(watch, next_w, watches, i_list) { | 414 | list_for_each_entry_safe(watch, next_w, watches, i_list) { |
| 697 | struct inotify_device *dev = watch->dev; | 415 | struct inotify_handle *ih= watch->ih; |
| 698 | mutex_lock(&dev->mutex); | 416 | mutex_lock(&ih->mutex); |
| 699 | inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL); | 417 | ih->in_ops->handle_event(watch, watch->wd, IN_UNMOUNT, 0, |
| 700 | remove_watch(watch, dev); | 418 | NULL, NULL); |
| 701 | mutex_unlock(&dev->mutex); | 419 | inotify_remove_watch_locked(ih, watch); |
| 420 | mutex_unlock(&ih->mutex); | ||
| 702 | } | 421 | } |
| 703 | mutex_unlock(&inode->inotify_mutex); | 422 | mutex_unlock(&inode->inotify_mutex); |
| 704 | iput(inode); | 423 | iput(inode); |
| @@ -718,432 +437,292 @@ void inotify_inode_is_dead(struct inode *inode) | |||
| 718 | 437 | ||
| 719 | mutex_lock(&inode->inotify_mutex); | 438 | mutex_lock(&inode->inotify_mutex); |
| 720 | list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { | 439 | list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { |
| 721 | struct inotify_device *dev = watch->dev; | 440 | struct inotify_handle *ih = watch->ih; |
| 722 | mutex_lock(&dev->mutex); | 441 | mutex_lock(&ih->mutex); |
| 723 | remove_watch(watch, dev); | 442 | inotify_remove_watch_locked(ih, watch); |
| 724 | mutex_unlock(&dev->mutex); | 443 | mutex_unlock(&ih->mutex); |
| 725 | } | 444 | } |
| 726 | mutex_unlock(&inode->inotify_mutex); | 445 | mutex_unlock(&inode->inotify_mutex); |
| 727 | } | 446 | } |
| 728 | EXPORT_SYMBOL_GPL(inotify_inode_is_dead); | 447 | EXPORT_SYMBOL_GPL(inotify_inode_is_dead); |
| 729 | 448 | ||
| 730 | /* Device Interface */ | 449 | /* Kernel Consumer API */ |
| 731 | 450 | ||
| 732 | static unsigned int inotify_poll(struct file *file, poll_table *wait) | 451 | /** |
| 452 | * inotify_init - allocate and initialize an inotify instance | ||
| 453 | * @ops: caller's inotify operations | ||
| 454 | */ | ||
| 455 | struct inotify_handle *inotify_init(const struct inotify_operations *ops) | ||
| 733 | { | 456 | { |
| 734 | struct inotify_device *dev = file->private_data; | 457 | struct inotify_handle *ih; |
| 735 | int ret = 0; | ||
| 736 | 458 | ||
| 737 | poll_wait(file, &dev->wq, wait); | 459 | ih = kmalloc(sizeof(struct inotify_handle), GFP_KERNEL); |
| 738 | mutex_lock(&dev->mutex); | 460 | if (unlikely(!ih)) |
| 739 | if (!list_empty(&dev->events)) | 461 | return ERR_PTR(-ENOMEM); |
| 740 | ret = POLLIN | POLLRDNORM; | ||
| 741 | mutex_unlock(&dev->mutex); | ||
| 742 | 462 | ||
| 743 | return ret; | 463 | idr_init(&ih->idr); |
| 464 | INIT_LIST_HEAD(&ih->watches); | ||
| 465 | mutex_init(&ih->mutex); | ||
| 466 | ih->last_wd = 0; | ||
| 467 | ih->in_ops = ops; | ||
| 468 | atomic_set(&ih->count, 0); | ||
| 469 | get_inotify_handle(ih); | ||
| 470 | |||
| 471 | return ih; | ||
| 744 | } | 472 | } |
| 473 | EXPORT_SYMBOL_GPL(inotify_init); | ||
| 745 | 474 | ||
| 746 | static ssize_t inotify_read(struct file *file, char __user *buf, | 475 | /** |
| 747 | size_t count, loff_t *pos) | 476 | * inotify_init_watch - initialize an inotify watch |
| 477 | * @watch: watch to initialize | ||
| 478 | */ | ||
| 479 | void inotify_init_watch(struct inotify_watch *watch) | ||
| 748 | { | 480 | { |
| 749 | size_t event_size = sizeof (struct inotify_event); | 481 | INIT_LIST_HEAD(&watch->h_list); |
| 750 | struct inotify_device *dev; | 482 | INIT_LIST_HEAD(&watch->i_list); |
| 751 | char __user *start; | 483 | atomic_set(&watch->count, 0); |
| 752 | int ret; | 484 | get_inotify_watch(watch); /* initial get */ |
| 753 | DEFINE_WAIT(wait); | ||
| 754 | |||
| 755 | start = buf; | ||
| 756 | dev = file->private_data; | ||
| 757 | |||
| 758 | while (1) { | ||
| 759 | int events; | ||
| 760 | |||
| 761 | prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); | ||
| 762 | |||
| 763 | mutex_lock(&dev->mutex); | ||
| 764 | events = !list_empty(&dev->events); | ||
| 765 | mutex_unlock(&dev->mutex); | ||
| 766 | if (events) { | ||
| 767 | ret = 0; | ||
| 768 | break; | ||
| 769 | } | ||
| 770 | |||
| 771 | if (file->f_flags & O_NONBLOCK) { | ||
| 772 | ret = -EAGAIN; | ||
| 773 | break; | ||
| 774 | } | ||
| 775 | |||
| 776 | if (signal_pending(current)) { | ||
| 777 | ret = -EINTR; | ||
| 778 | break; | ||
| 779 | } | ||
| 780 | |||
| 781 | schedule(); | ||
| 782 | } | ||
| 783 | |||
| 784 | finish_wait(&dev->wq, &wait); | ||
| 785 | if (ret) | ||
| 786 | return ret; | ||
| 787 | |||
| 788 | mutex_lock(&dev->mutex); | ||
| 789 | while (1) { | ||
| 790 | struct inotify_kernel_event *kevent; | ||
| 791 | |||
| 792 | ret = buf - start; | ||
| 793 | if (list_empty(&dev->events)) | ||
| 794 | break; | ||
| 795 | |||
| 796 | kevent = inotify_dev_get_event(dev); | ||
| 797 | if (event_size + kevent->event.len > count) | ||
| 798 | break; | ||
| 799 | |||
| 800 | if (copy_to_user(buf, &kevent->event, event_size)) { | ||
| 801 | ret = -EFAULT; | ||
| 802 | break; | ||
| 803 | } | ||
| 804 | buf += event_size; | ||
| 805 | count -= event_size; | ||
| 806 | |||
| 807 | if (kevent->name) { | ||
| 808 | if (copy_to_user(buf, kevent->name, kevent->event.len)){ | ||
| 809 | ret = -EFAULT; | ||
| 810 | break; | ||
| 811 | } | ||
| 812 | buf += kevent->event.len; | ||
| 813 | count -= kevent->event.len; | ||
| 814 | } | ||
| 815 | |||
| 816 | remove_kevent(dev, kevent); | ||
| 817 | } | ||
| 818 | mutex_unlock(&dev->mutex); | ||
| 819 | |||
| 820 | return ret; | ||
| 821 | } | 485 | } |
| 486 | EXPORT_SYMBOL_GPL(inotify_init_watch); | ||
| 822 | 487 | ||
| 823 | static int inotify_release(struct inode *ignored, struct file *file) | 488 | /** |
| 489 | * inotify_destroy - clean up and destroy an inotify instance | ||
| 490 | * @ih: inotify handle | ||
| 491 | */ | ||
| 492 | void inotify_destroy(struct inotify_handle *ih) | ||
| 824 | { | 493 | { |
| 825 | struct inotify_device *dev = file->private_data; | ||
| 826 | |||
| 827 | /* | 494 | /* |
| 828 | * Destroy all of the watches on this device. Unfortunately, not very | 495 | * Destroy all of the watches for this handle. Unfortunately, not very |
| 829 | * pretty. We cannot do a simple iteration over the list, because we | 496 | * pretty. We cannot do a simple iteration over the list, because we |
| 830 | * do not know the inode until we iterate to the watch. But we need to | 497 | * do not know the inode until we iterate to the watch. But we need to |
| 831 | * hold inode->inotify_mutex before dev->mutex. The following works. | 498 | * hold inode->inotify_mutex before ih->mutex. The following works. |
| 832 | */ | 499 | */ |
| 833 | while (1) { | 500 | while (1) { |
| 834 | struct inotify_watch *watch; | 501 | struct inotify_watch *watch; |
| 835 | struct list_head *watches; | 502 | struct list_head *watches; |
| 836 | struct inode *inode; | 503 | struct inode *inode; |
| 837 | 504 | ||
| 838 | mutex_lock(&dev->mutex); | 505 | mutex_lock(&ih->mutex); |
| 839 | watches = &dev->watches; | 506 | watches = &ih->watches; |
| 840 | if (list_empty(watches)) { | 507 | if (list_empty(watches)) { |
| 841 | mutex_unlock(&dev->mutex); | 508 | mutex_unlock(&ih->mutex); |
| 842 | break; | 509 | break; |
| 843 | } | 510 | } |
| 844 | watch = list_entry(watches->next, struct inotify_watch, d_list); | 511 | watch = list_entry(watches->next, struct inotify_watch, h_list); |
| 845 | get_inotify_watch(watch); | 512 | get_inotify_watch(watch); |
| 846 | mutex_unlock(&dev->mutex); | 513 | mutex_unlock(&ih->mutex); |
| 847 | 514 | ||
| 848 | inode = watch->inode; | 515 | inode = watch->inode; |
| 849 | mutex_lock(&inode->inotify_mutex); | 516 | mutex_lock(&inode->inotify_mutex); |
| 850 | mutex_lock(&dev->mutex); | 517 | mutex_lock(&ih->mutex); |
| 851 | 518 | ||
| 852 | /* make sure we didn't race with another list removal */ | 519 | /* make sure we didn't race with another list removal */ |
| 853 | if (likely(idr_find(&dev->idr, watch->wd))) | 520 | if (likely(idr_find(&ih->idr, watch->wd))) { |
| 854 | remove_watch_no_event(watch, dev); | 521 | remove_watch_no_event(watch, ih); |
| 522 | put_inotify_watch(watch); | ||
| 523 | } | ||
| 855 | 524 | ||
| 856 | mutex_unlock(&dev->mutex); | 525 | mutex_unlock(&ih->mutex); |
| 857 | mutex_unlock(&inode->inotify_mutex); | 526 | mutex_unlock(&inode->inotify_mutex); |
| 858 | put_inotify_watch(watch); | 527 | put_inotify_watch(watch); |
| 859 | } | 528 | } |
| 860 | 529 | ||
| 861 | /* destroy all of the events on this device */ | 530 | /* free this handle: the put matching the get in inotify_init() */ |
| 862 | mutex_lock(&dev->mutex); | 531 | put_inotify_handle(ih); |
| 863 | while (!list_empty(&dev->events)) | ||
| 864 | inotify_dev_event_dequeue(dev); | ||
| 865 | mutex_unlock(&dev->mutex); | ||
| 866 | |||
| 867 | /* free this device: the put matching the get in inotify_init() */ | ||
| 868 | put_inotify_dev(dev); | ||
| 869 | |||
| 870 | return 0; | ||
| 871 | } | 532 | } |
| 533 | EXPORT_SYMBOL_GPL(inotify_destroy); | ||
| 872 | 534 | ||
| 873 | /* | 535 | /** |
| 874 | * inotify_ignore - remove a given wd from this inotify instance. | 536 | * inotify_find_watch - find an existing watch for an (ih,inode) pair |
| 537 | * @ih: inotify handle | ||
| 538 | * @inode: inode to watch | ||
| 539 | * @watchp: pointer to existing inotify_watch | ||
| 875 | * | 540 | * |
| 876 | * Can sleep. | 541 | * Caller must pin given inode (via nameidata). |
| 877 | */ | 542 | */ |
| 878 | static int inotify_ignore(struct inotify_device *dev, s32 wd) | 543 | s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode, |
| 544 | struct inotify_watch **watchp) | ||
| 879 | { | 545 | { |
| 880 | struct inotify_watch *watch; | 546 | struct inotify_watch *old; |
| 881 | struct inode *inode; | 547 | int ret = -ENOENT; |
| 882 | |||
| 883 | mutex_lock(&dev->mutex); | ||
| 884 | watch = idr_find(&dev->idr, wd); | ||
| 885 | if (unlikely(!watch)) { | ||
| 886 | mutex_unlock(&dev->mutex); | ||
| 887 | return -EINVAL; | ||
| 888 | } | ||
| 889 | get_inotify_watch(watch); | ||
| 890 | inode = watch->inode; | ||
| 891 | mutex_unlock(&dev->mutex); | ||
| 892 | 548 | ||
| 893 | mutex_lock(&inode->inotify_mutex); | 549 | mutex_lock(&inode->inotify_mutex); |
| 894 | mutex_lock(&dev->mutex); | 550 | mutex_lock(&ih->mutex); |
| 895 | 551 | ||
| 896 | /* make sure that we did not race */ | 552 | old = inode_find_handle(inode, ih); |
| 897 | if (likely(idr_find(&dev->idr, wd) == watch)) | 553 | if (unlikely(old)) { |
| 898 | remove_watch(watch, dev); | 554 | get_inotify_watch(old); /* caller must put watch */ |
| 555 | *watchp = old; | ||
| 556 | ret = old->wd; | ||
| 557 | } | ||
| 899 | 558 | ||
| 900 | mutex_unlock(&dev->mutex); | 559 | mutex_unlock(&ih->mutex); |
| 901 | mutex_unlock(&inode->inotify_mutex); | 560 | mutex_unlock(&inode->inotify_mutex); |
| 902 | put_inotify_watch(watch); | ||
| 903 | 561 | ||
| 904 | return 0; | 562 | return ret; |
| 905 | } | 563 | } |
| 564 | EXPORT_SYMBOL_GPL(inotify_find_watch); | ||
| 906 | 565 | ||
| 907 | static long inotify_ioctl(struct file *file, unsigned int cmd, | 566 | /** |
| 908 | unsigned long arg) | 567 | * inotify_find_update_watch - find and update the mask of an existing watch |
| 568 | * @ih: inotify handle | ||
| 569 | * @inode: inode's watch to update | ||
| 570 | * @mask: mask of events to watch | ||
| 571 | * | ||
| 572 | * Caller must pin given inode (via nameidata). | ||
| 573 | */ | ||
| 574 | s32 inotify_find_update_watch(struct inotify_handle *ih, struct inode *inode, | ||
| 575 | u32 mask) | ||
| 909 | { | 576 | { |
| 910 | struct inotify_device *dev; | 577 | struct inotify_watch *old; |
| 911 | void __user *p; | 578 | int mask_add = 0; |
| 912 | int ret = -ENOTTY; | 579 | int ret; |
| 913 | |||
| 914 | dev = file->private_data; | ||
| 915 | p = (void __user *) arg; | ||
| 916 | |||
| 917 | switch (cmd) { | ||
| 918 | case FIONREAD: | ||
| 919 | ret = put_user(dev->queue_size, (int __user *) p); | ||
| 920 | break; | ||
| 921 | } | ||
| 922 | |||
| 923 | return ret; | ||
| 924 | } | ||
| 925 | 580 | ||
| 926 | static const struct file_operations inotify_fops = { | 581 | if (mask & IN_MASK_ADD) |
| 927 | .poll = inotify_poll, | 582 | mask_add = 1; |
| 928 | .read = inotify_read, | ||
| 929 | .release = inotify_release, | ||
| 930 | .unlocked_ioctl = inotify_ioctl, | ||
| 931 | .compat_ioctl = inotify_ioctl, | ||
| 932 | }; | ||
| 933 | 583 | ||
| 934 | asmlinkage long sys_inotify_init(void) | 584 | /* don't allow invalid bits: we don't want flags set */ |
| 935 | { | 585 | mask &= IN_ALL_EVENTS | IN_ONESHOT; |
| 936 | struct inotify_device *dev; | 586 | if (unlikely(!mask)) |
| 937 | struct user_struct *user; | 587 | return -EINVAL; |
| 938 | struct file *filp; | ||
| 939 | int fd, ret; | ||
| 940 | |||
| 941 | fd = get_unused_fd(); | ||
| 942 | if (fd < 0) | ||
| 943 | return fd; | ||
| 944 | |||
| 945 | filp = get_empty_filp(); | ||
| 946 | if (!filp) { | ||
| 947 | ret = -ENFILE; | ||
| 948 | goto out_put_fd; | ||
| 949 | } | ||
| 950 | 588 | ||
| 951 | user = get_uid(current->user); | 589 | mutex_lock(&inode->inotify_mutex); |
| 952 | if (unlikely(atomic_read(&user->inotify_devs) >= | 590 | mutex_lock(&ih->mutex); |
| 953 | inotify_max_user_instances)) { | ||
| 954 | ret = -EMFILE; | ||
| 955 | goto out_free_uid; | ||
| 956 | } | ||
| 957 | 591 | ||
| 958 | dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL); | 592 | /* |
| 959 | if (unlikely(!dev)) { | 593 | * Handle the case of re-adding a watch on an (inode,ih) pair that we |
| 960 | ret = -ENOMEM; | 594 | * are already watching. We just update the mask and return its wd. |
| 961 | goto out_free_uid; | 595 | */ |
| 596 | old = inode_find_handle(inode, ih); | ||
| 597 | if (unlikely(!old)) { | ||
| 598 | ret = -ENOENT; | ||
| 599 | goto out; | ||
| 962 | } | 600 | } |
| 963 | 601 | ||
| 964 | filp->f_op = &inotify_fops; | 602 | if (mask_add) |
| 965 | filp->f_vfsmnt = mntget(inotify_mnt); | 603 | old->mask |= mask; |
| 966 | filp->f_dentry = dget(inotify_mnt->mnt_root); | 604 | else |
| 967 | filp->f_mapping = filp->f_dentry->d_inode->i_mapping; | 605 | old->mask = mask; |
| 968 | filp->f_mode = FMODE_READ; | 606 | ret = old->wd; |
| 969 | filp->f_flags = O_RDONLY; | 607 | out: |
| 970 | filp->private_data = dev; | 608 | mutex_unlock(&ih->mutex); |
| 971 | 609 | mutex_unlock(&inode->inotify_mutex); | |
| 972 | idr_init(&dev->idr); | ||
| 973 | INIT_LIST_HEAD(&dev->events); | ||
| 974 | INIT_LIST_HEAD(&dev->watches); | ||
| 975 | init_waitqueue_head(&dev->wq); | ||
| 976 | mutex_init(&dev->mutex); | ||
| 977 | dev->event_count = 0; | ||
| 978 | dev->queue_size = 0; | ||
| 979 | dev->max_events = inotify_max_queued_events; | ||
| 980 | dev->user = user; | ||
| 981 | dev->last_wd = 0; | ||
| 982 | atomic_set(&dev->count, 0); | ||
| 983 | |||
| 984 | get_inotify_dev(dev); | ||
| 985 | atomic_inc(&user->inotify_devs); | ||
| 986 | fd_install(fd, filp); | ||
| 987 | |||
| 988 | return fd; | ||
| 989 | out_free_uid: | ||
| 990 | free_uid(user); | ||
| 991 | put_filp(filp); | ||
| 992 | out_put_fd: | ||
| 993 | put_unused_fd(fd); | ||
| 994 | return ret; | 610 | return ret; |
| 995 | } | 611 | } |
| 612 | EXPORT_SYMBOL_GPL(inotify_find_update_watch); | ||
| 996 | 613 | ||
| 997 | asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask) | 614 | /** |
| 615 | * inotify_add_watch - add a watch to an inotify instance | ||
| 616 | * @ih: inotify handle | ||
| 617 | * @watch: caller allocated watch structure | ||
| 618 | * @inode: inode to watch | ||
| 619 | * @mask: mask of events to watch | ||
| 620 | * | ||
| 621 | * Caller must pin given inode (via nameidata). | ||
| 622 | * Caller must ensure it only calls inotify_add_watch() once per watch. | ||
| 623 | * Calls inotify_handle_get_wd() so may sleep. | ||
| 624 | */ | ||
| 625 | s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch, | ||
| 626 | struct inode *inode, u32 mask) | ||
| 998 | { | 627 | { |
| 999 | struct inotify_watch *watch, *old; | 628 | int ret = 0; |
| 1000 | struct inode *inode; | ||
| 1001 | struct inotify_device *dev; | ||
| 1002 | struct nameidata nd; | ||
| 1003 | struct file *filp; | ||
| 1004 | int ret, fput_needed; | ||
| 1005 | int mask_add = 0; | ||
| 1006 | unsigned flags = 0; | ||
| 1007 | |||
| 1008 | filp = fget_light(fd, &fput_needed); | ||
| 1009 | if (unlikely(!filp)) | ||
| 1010 | return -EBADF; | ||
| 1011 | |||
| 1012 | /* verify that this is indeed an inotify instance */ | ||
| 1013 | if (unlikely(filp->f_op != &inotify_fops)) { | ||
| 1014 | ret = -EINVAL; | ||
| 1015 | goto fput_and_out; | ||
| 1016 | } | ||
| 1017 | |||
| 1018 | if (!(mask & IN_DONT_FOLLOW)) | ||
| 1019 | flags |= LOOKUP_FOLLOW; | ||
| 1020 | if (mask & IN_ONLYDIR) | ||
| 1021 | flags |= LOOKUP_DIRECTORY; | ||
| 1022 | |||
| 1023 | ret = find_inode(path, &nd, flags); | ||
| 1024 | if (unlikely(ret)) | ||
| 1025 | goto fput_and_out; | ||
| 1026 | 629 | ||
| 1027 | /* inode held in place by reference to nd; dev by fget on fd */ | 630 | /* don't allow invalid bits: we don't want flags set */ |
| 1028 | inode = nd.dentry->d_inode; | 631 | mask &= IN_ALL_EVENTS | IN_ONESHOT; |
| 1029 | dev = filp->private_data; | 632 | if (unlikely(!mask)) |
| 633 | return -EINVAL; | ||
| 634 | watch->mask = mask; | ||
| 1030 | 635 | ||
| 1031 | mutex_lock(&inode->inotify_mutex); | 636 | mutex_lock(&inode->inotify_mutex); |
| 1032 | mutex_lock(&dev->mutex); | 637 | mutex_lock(&ih->mutex); |
| 1033 | |||
| 1034 | if (mask & IN_MASK_ADD) | ||
| 1035 | mask_add = 1; | ||
| 1036 | 638 | ||
| 1037 | /* don't let user-space set invalid bits: we don't want flags set */ | 639 | /* Initialize a new watch */ |
| 1038 | mask &= IN_ALL_EVENTS | IN_ONESHOT; | 640 | ret = inotify_handle_get_wd(ih, watch); |
| 1039 | if (unlikely(!mask)) { | 641 | if (unlikely(ret)) |
| 1040 | ret = -EINVAL; | ||
| 1041 | goto out; | 642 | goto out; |
| 1042 | } | 643 | ret = watch->wd; |
| 644 | |||
| 645 | /* save a reference to handle and bump the count to make it official */ | ||
| 646 | get_inotify_handle(ih); | ||
| 647 | watch->ih = ih; | ||
| 1043 | 648 | ||
| 1044 | /* | 649 | /* |
| 1045 | * Handle the case of re-adding a watch on an (inode,dev) pair that we | 650 | * Save a reference to the inode and bump the ref count to make it |
| 1046 | * are already watching. We just update the mask and return its wd. | 651 | * official. We hold a reference to nameidata, which makes this safe. |
| 1047 | */ | 652 | */ |
| 1048 | old = inode_find_dev(inode, dev); | 653 | watch->inode = igrab(inode); |
| 1049 | if (unlikely(old)) { | ||
| 1050 | if (mask_add) | ||
| 1051 | old->mask |= mask; | ||
| 1052 | else | ||
| 1053 | old->mask = mask; | ||
| 1054 | ret = old->wd; | ||
| 1055 | goto out; | ||
| 1056 | } | ||
| 1057 | |||
| 1058 | watch = create_watch(dev, mask, inode); | ||
| 1059 | if (unlikely(IS_ERR(watch))) { | ||
| 1060 | ret = PTR_ERR(watch); | ||
| 1061 | goto out; | ||
| 1062 | } | ||
| 1063 | 654 | ||
| 1064 | if (!inotify_inode_watched(inode)) | 655 | if (!inotify_inode_watched(inode)) |
| 1065 | set_dentry_child_flags(inode, 1); | 656 | set_dentry_child_flags(inode, 1); |
| 1066 | 657 | ||
| 1067 | /* Add the watch to the device's and the inode's list */ | 658 | /* Add the watch to the handle's and the inode's list */ |
| 1068 | list_add(&watch->d_list, &dev->watches); | 659 | list_add(&watch->h_list, &ih->watches); |
| 1069 | list_add(&watch->i_list, &inode->inotify_watches); | 660 | list_add(&watch->i_list, &inode->inotify_watches); |
| 1070 | ret = watch->wd; | ||
| 1071 | out: | 661 | out: |
| 1072 | mutex_unlock(&dev->mutex); | 662 | mutex_unlock(&ih->mutex); |
| 1073 | mutex_unlock(&inode->inotify_mutex); | 663 | mutex_unlock(&inode->inotify_mutex); |
| 1074 | path_release(&nd); | ||
| 1075 | fput_and_out: | ||
| 1076 | fput_light(filp, fput_needed); | ||
| 1077 | return ret; | 664 | return ret; |
| 1078 | } | 665 | } |
| 666 | EXPORT_SYMBOL_GPL(inotify_add_watch); | ||
| 1079 | 667 | ||
| 1080 | asmlinkage long sys_inotify_rm_watch(int fd, u32 wd) | 668 | /** |
| 669 | * inotify_rm_wd - remove a watch from an inotify instance | ||
| 670 | * @ih: inotify handle | ||
| 671 | * @wd: watch descriptor to remove | ||
| 672 | * | ||
| 673 | * Can sleep. | ||
| 674 | */ | ||
| 675 | int inotify_rm_wd(struct inotify_handle *ih, u32 wd) | ||
| 1081 | { | 676 | { |
| 1082 | struct file *filp; | 677 | struct inotify_watch *watch; |
| 1083 | struct inotify_device *dev; | 678 | struct inode *inode; |
| 1084 | int ret, fput_needed; | ||
| 1085 | |||
| 1086 | filp = fget_light(fd, &fput_needed); | ||
| 1087 | if (unlikely(!filp)) | ||
| 1088 | return -EBADF; | ||
| 1089 | 679 | ||
| 1090 | /* verify that this is indeed an inotify instance */ | 680 | mutex_lock(&ih->mutex); |
| 1091 | if (unlikely(filp->f_op != &inotify_fops)) { | 681 | watch = idr_find(&ih->idr, wd); |
| 1092 | ret = -EINVAL; | 682 | if (unlikely(!watch)) { |
| 1093 | goto out; | 683 | mutex_unlock(&ih->mutex); |
| 684 | return -EINVAL; | ||
| 1094 | } | 685 | } |
| 686 | get_inotify_watch(watch); | ||
| 687 | inode = watch->inode; | ||
| 688 | mutex_unlock(&ih->mutex); | ||
| 1095 | 689 | ||
| 1096 | dev = filp->private_data; | 690 | mutex_lock(&inode->inotify_mutex); |
| 1097 | ret = inotify_ignore(dev, wd); | 691 | mutex_lock(&ih->mutex); |
| 1098 | 692 | ||
| 1099 | out: | 693 | /* make sure that we did not race */ |
| 1100 | fput_light(filp, fput_needed); | 694 | if (likely(idr_find(&ih->idr, wd) == watch)) |
| 1101 | return ret; | 695 | inotify_remove_watch_locked(ih, watch); |
| 696 | |||
| 697 | mutex_unlock(&ih->mutex); | ||
| 698 | mutex_unlock(&inode->inotify_mutex); | ||
| 699 | put_inotify_watch(watch); | ||
| 700 | |||
| 701 | return 0; | ||
| 1102 | } | 702 | } |
| 703 | EXPORT_SYMBOL_GPL(inotify_rm_wd); | ||
| 1103 | 704 | ||
| 1104 | static struct super_block * | 705 | /** |
| 1105 | inotify_get_sb(struct file_system_type *fs_type, int flags, | 706 | * inotify_rm_watch - remove a watch from an inotify instance |
| 1106 | const char *dev_name, void *data) | 707 | * @ih: inotify handle |
| 708 | * @watch: watch to remove | ||
| 709 | * | ||
| 710 | * Can sleep. | ||
| 711 | */ | ||
| 712 | int inotify_rm_watch(struct inotify_handle *ih, | ||
| 713 | struct inotify_watch *watch) | ||
| 1107 | { | 714 | { |
| 1108 | return get_sb_pseudo(fs_type, "inotify", NULL, 0xBAD1DEA); | 715 | return inotify_rm_wd(ih, watch->wd); |
| 1109 | } | 716 | } |
| 1110 | 717 | EXPORT_SYMBOL_GPL(inotify_rm_watch); | |
| 1111 | static struct file_system_type inotify_fs_type = { | ||
| 1112 | .name = "inotifyfs", | ||
| 1113 | .get_sb = inotify_get_sb, | ||
| 1114 | .kill_sb = kill_anon_super, | ||
| 1115 | }; | ||
| 1116 | 718 | ||
| 1117 | /* | 719 | /* |
| 1118 | * inotify_setup - Our initialization function. Note that we cannnot return | 720 | * inotify_setup - core initialization function |
| 1119 | * error because we have compiled-in VFS hooks. So an (unlikely) failure here | ||
| 1120 | * must result in panic(). | ||
| 1121 | */ | 721 | */ |
| 1122 | static int __init inotify_setup(void) | 722 | static int __init inotify_setup(void) |
| 1123 | { | 723 | { |
| 1124 | int ret; | ||
| 1125 | |||
| 1126 | ret = register_filesystem(&inotify_fs_type); | ||
| 1127 | if (unlikely(ret)) | ||
| 1128 | panic("inotify: register_filesystem returned %d!\n", ret); | ||
| 1129 | |||
| 1130 | inotify_mnt = kern_mount(&inotify_fs_type); | ||
| 1131 | if (IS_ERR(inotify_mnt)) | ||
| 1132 | panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt)); | ||
| 1133 | |||
| 1134 | inotify_max_queued_events = 16384; | ||
| 1135 | inotify_max_user_instances = 128; | ||
| 1136 | inotify_max_user_watches = 8192; | ||
| 1137 | |||
| 1138 | atomic_set(&inotify_cookie, 0); | 724 | atomic_set(&inotify_cookie, 0); |
| 1139 | 725 | ||
| 1140 | watch_cachep = kmem_cache_create("inotify_watch_cache", | ||
| 1141 | sizeof(struct inotify_watch), | ||
| 1142 | 0, SLAB_PANIC, NULL, NULL); | ||
| 1143 | event_cachep = kmem_cache_create("inotify_event_cache", | ||
| 1144 | sizeof(struct inotify_kernel_event), | ||
| 1145 | 0, SLAB_PANIC, NULL, NULL); | ||
| 1146 | |||
| 1147 | return 0; | 726 | return 0; |
| 1148 | } | 727 | } |
| 1149 | 728 | ||
diff --git a/fs/inotify_user.c b/fs/inotify_user.c new file mode 100644 index 000000000000..9e9931e2badd --- /dev/null +++ b/fs/inotify_user.c | |||
| @@ -0,0 +1,719 @@ | |||
| 1 | /* | ||
| 2 | * fs/inotify_user.c - inotify support for userspace | ||
| 3 | * | ||
| 4 | * Authors: | ||
| 5 | * John McCutchan <ttb@tentacle.dhs.org> | ||
| 6 | * Robert Love <rml@novell.com> | ||
| 7 | * | ||
| 8 | * Copyright (C) 2005 John McCutchan | ||
| 9 | * Copyright 2006 Hewlett-Packard Development Company, L.P. | ||
| 10 | * | ||
| 11 | * This program is free software; you can redistribute it and/or modify it | ||
| 12 | * under the terms of the GNU General Public License as published by the | ||
| 13 | * Free Software Foundation; either version 2, or (at your option) any | ||
| 14 | * later version. | ||
| 15 | * | ||
| 16 | * This program is distributed in the hope that it will be useful, but | ||
| 17 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 19 | * General Public License for more details. | ||
| 20 | */ | ||
| 21 | |||
| 22 | #include <linux/kernel.h> | ||
| 23 | #include <linux/sched.h> | ||
| 24 | #include <linux/slab.h> | ||
| 25 | #include <linux/fs.h> | ||
| 26 | #include <linux/file.h> | ||
| 27 | #include <linux/mount.h> | ||
| 28 | #include <linux/namei.h> | ||
| 29 | #include <linux/poll.h> | ||
| 30 | #include <linux/init.h> | ||
| 31 | #include <linux/list.h> | ||
| 32 | #include <linux/inotify.h> | ||
| 33 | #include <linux/syscalls.h> | ||
| 34 | |||
| 35 | #include <asm/ioctls.h> | ||
| 36 | |||
| 37 | static kmem_cache_t *watch_cachep __read_mostly; | ||
| 38 | static kmem_cache_t *event_cachep __read_mostly; | ||
| 39 | |||
| 40 | static struct vfsmount *inotify_mnt __read_mostly; | ||
| 41 | |||
| 42 | /* these are configurable via /proc/sys/fs/inotify/ */ | ||
| 43 | int inotify_max_user_instances __read_mostly; | ||
| 44 | int inotify_max_user_watches __read_mostly; | ||
| 45 | int inotify_max_queued_events __read_mostly; | ||
| 46 | |||
| 47 | /* | ||
| 48 | * Lock ordering: | ||
| 49 | * | ||
| 50 | * inotify_dev->up_mutex (ensures we don't re-add the same watch) | ||
| 51 | * inode->inotify_mutex (protects inode's watch list) | ||
| 52 | * inotify_handle->mutex (protects inotify_handle's watch list) | ||
| 53 | * inotify_dev->ev_mutex (protects device's event queue) | ||
| 54 | */ | ||
| 55 | |||
| 56 | /* | ||
| 57 | * Lifetimes of the main data structures: | ||
| 58 | * | ||
| 59 | * inotify_device: Lifetime is managed by reference count, from | ||
| 60 | * sys_inotify_init() until release. Additional references can bump the count | ||
| 61 | * via get_inotify_dev() and drop the count via put_inotify_dev(). | ||
| 62 | * | ||
| 63 | * inotify_user_watch: Lifetime is from create_watch() to the receipt of an | ||
| 64 | * IN_IGNORED event from inotify, or when using IN_ONESHOT, to receipt of the | ||
| 65 | * first event, or to inotify_destroy(). | ||
| 66 | */ | ||
| 67 | |||
| 68 | /* | ||
| 69 | * struct inotify_device - represents an inotify instance | ||
| 70 | * | ||
| 71 | * This structure is protected by the mutex 'mutex'. | ||
| 72 | */ | ||
| 73 | struct inotify_device { | ||
| 74 | wait_queue_head_t wq; /* wait queue for i/o */ | ||
| 75 | struct mutex ev_mutex; /* protects event queue */ | ||
| 76 | struct mutex up_mutex; /* synchronizes watch updates */ | ||
| 77 | struct list_head events; /* list of queued events */ | ||
| 78 | atomic_t count; /* reference count */ | ||
| 79 | struct user_struct *user; /* user who opened this dev */ | ||
| 80 | struct inotify_handle *ih; /* inotify handle */ | ||
| 81 | unsigned int queue_size; /* size of the queue (bytes) */ | ||
| 82 | unsigned int event_count; /* number of pending events */ | ||
| 83 | unsigned int max_events; /* maximum number of events */ | ||
| 84 | }; | ||
| 85 | |||
| 86 | /* | ||
| 87 | * struct inotify_kernel_event - An inotify event, originating from a watch and | ||
| 88 | * queued for user-space. A list of these is attached to each instance of the | ||
| 89 | * device. In read(), this list is walked and all events that can fit in the | ||
| 90 | * buffer are returned. | ||
| 91 | * | ||
| 92 | * Protected by dev->ev_mutex of the device in which we are queued. | ||
| 93 | */ | ||
| 94 | struct inotify_kernel_event { | ||
| 95 | struct inotify_event event; /* the user-space event */ | ||
| 96 | struct list_head list; /* entry in inotify_device's list */ | ||
| 97 | char *name; /* filename, if any */ | ||
| 98 | }; | ||
| 99 | |||
| 100 | /* | ||
| 101 | * struct inotify_user_watch - our version of an inotify_watch, we add | ||
| 102 | * a reference to the associated inotify_device. | ||
| 103 | */ | ||
| 104 | struct inotify_user_watch { | ||
| 105 | struct inotify_device *dev; /* associated device */ | ||
| 106 | struct inotify_watch wdata; /* inotify watch data */ | ||
| 107 | }; | ||
| 108 | |||
| 109 | #ifdef CONFIG_SYSCTL | ||
| 110 | |||
| 111 | #include <linux/sysctl.h> | ||
| 112 | |||
| 113 | static int zero; | ||
| 114 | |||
| 115 | ctl_table inotify_table[] = { | ||
| 116 | { | ||
| 117 | .ctl_name = INOTIFY_MAX_USER_INSTANCES, | ||
| 118 | .procname = "max_user_instances", | ||
| 119 | .data = &inotify_max_user_instances, | ||
| 120 | .maxlen = sizeof(int), | ||
| 121 | .mode = 0644, | ||
| 122 | .proc_handler = &proc_dointvec_minmax, | ||
| 123 | .strategy = &sysctl_intvec, | ||
| 124 | .extra1 = &zero, | ||
| 125 | }, | ||
| 126 | { | ||
| 127 | .ctl_name = INOTIFY_MAX_USER_WATCHES, | ||
| 128 | .procname = "max_user_watches", | ||
| 129 | .data = &inotify_max_user_watches, | ||
| 130 | .maxlen = sizeof(int), | ||
| 131 | .mode = 0644, | ||
| 132 | .proc_handler = &proc_dointvec_minmax, | ||
| 133 | .strategy = &sysctl_intvec, | ||
| 134 | .extra1 = &zero, | ||
| 135 | }, | ||
| 136 | { | ||
| 137 | .ctl_name = INOTIFY_MAX_QUEUED_EVENTS, | ||
| 138 | .procname = "max_queued_events", | ||
| 139 | .data = &inotify_max_queued_events, | ||
| 140 | .maxlen = sizeof(int), | ||
| 141 | .mode = 0644, | ||
| 142 | .proc_handler = &proc_dointvec_minmax, | ||
| 143 | .strategy = &sysctl_intvec, | ||
| 144 | .extra1 = &zero | ||
| 145 | }, | ||
| 146 | { .ctl_name = 0 } | ||
| 147 | }; | ||
| 148 | #endif /* CONFIG_SYSCTL */ | ||
| 149 | |||
| 150 | static inline void get_inotify_dev(struct inotify_device *dev) | ||
| 151 | { | ||
| 152 | atomic_inc(&dev->count); | ||
| 153 | } | ||
| 154 | |||
| 155 | static inline void put_inotify_dev(struct inotify_device *dev) | ||
| 156 | { | ||
| 157 | if (atomic_dec_and_test(&dev->count)) { | ||
| 158 | atomic_dec(&dev->user->inotify_devs); | ||
| 159 | free_uid(dev->user); | ||
| 160 | kfree(dev); | ||
| 161 | } | ||
| 162 | } | ||
| 163 | |||
| 164 | /* | ||
| 165 | * free_inotify_user_watch - cleans up the watch and its references | ||
| 166 | */ | ||
| 167 | static void free_inotify_user_watch(struct inotify_watch *w) | ||
| 168 | { | ||
| 169 | struct inotify_user_watch *watch; | ||
| 170 | struct inotify_device *dev; | ||
| 171 | |||
| 172 | watch = container_of(w, struct inotify_user_watch, wdata); | ||
| 173 | dev = watch->dev; | ||
| 174 | |||
| 175 | atomic_dec(&dev->user->inotify_watches); | ||
| 176 | put_inotify_dev(dev); | ||
| 177 | kmem_cache_free(watch_cachep, watch); | ||
| 178 | } | ||
| 179 | |||
| 180 | /* | ||
| 181 | * kernel_event - create a new kernel event with the given parameters | ||
| 182 | * | ||
| 183 | * This function can sleep. | ||
| 184 | */ | ||
| 185 | static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie, | ||
| 186 | const char *name) | ||
| 187 | { | ||
| 188 | struct inotify_kernel_event *kevent; | ||
| 189 | |||
| 190 | kevent = kmem_cache_alloc(event_cachep, GFP_KERNEL); | ||
| 191 | if (unlikely(!kevent)) | ||
| 192 | return NULL; | ||
| 193 | |||
| 194 | /* we hand this out to user-space, so zero it just in case */ | ||
| 195 | memset(&kevent->event, 0, sizeof(struct inotify_event)); | ||
| 196 | |||
| 197 | kevent->event.wd = wd; | ||
| 198 | kevent->event.mask = mask; | ||
| 199 | kevent->event.cookie = cookie; | ||
| 200 | |||
| 201 | INIT_LIST_HEAD(&kevent->list); | ||
| 202 | |||
| 203 | if (name) { | ||
| 204 | size_t len, rem, event_size = sizeof(struct inotify_event); | ||
| 205 | |||
| 206 | /* | ||
| 207 | * We need to pad the filename so as to properly align an | ||
| 208 | * array of inotify_event structures. Because the structure is | ||
| 209 | * small and the common case is a small filename, we just round | ||
| 210 | * up to the next multiple of the structure's sizeof. This is | ||
| 211 | * simple and safe for all architectures. | ||
| 212 | */ | ||
| 213 | len = strlen(name) + 1; | ||
| 214 | rem = event_size - len; | ||
| 215 | if (len > event_size) { | ||
| 216 | rem = event_size - (len % event_size); | ||
| 217 | if (len % event_size == 0) | ||
| 218 | rem = 0; | ||
| 219 | } | ||
| 220 | |||
| 221 | kevent->name = kmalloc(len + rem, GFP_KERNEL); | ||
| 222 | if (unlikely(!kevent->name)) { | ||
| 223 | kmem_cache_free(event_cachep, kevent); | ||
| 224 | return NULL; | ||
| 225 | } | ||
| 226 | memcpy(kevent->name, name, len); | ||
| 227 | if (rem) | ||
| 228 | memset(kevent->name + len, 0, rem); | ||
| 229 | kevent->event.len = len + rem; | ||
| 230 | } else { | ||
| 231 | kevent->event.len = 0; | ||
| 232 | kevent->name = NULL; | ||
| 233 | } | ||
| 234 | |||
| 235 | return kevent; | ||
| 236 | } | ||
| 237 | |||
| 238 | /* | ||
| 239 | * inotify_dev_get_event - return the next event in the given dev's queue | ||
| 240 | * | ||
| 241 | * Caller must hold dev->ev_mutex. | ||
| 242 | */ | ||
| 243 | static inline struct inotify_kernel_event * | ||
| 244 | inotify_dev_get_event(struct inotify_device *dev) | ||
| 245 | { | ||
| 246 | return list_entry(dev->events.next, struct inotify_kernel_event, list); | ||
| 247 | } | ||
| 248 | |||
| 249 | /* | ||
| 250 | * inotify_dev_queue_event - event handler registered with core inotify, adds | ||
| 251 | * a new event to the given device | ||
| 252 | * | ||
| 253 | * Can sleep (calls kernel_event()). | ||
| 254 | */ | ||
| 255 | static void inotify_dev_queue_event(struct inotify_watch *w, u32 wd, u32 mask, | ||
| 256 | u32 cookie, const char *name, | ||
| 257 | struct inode *ignored) | ||
| 258 | { | ||
| 259 | struct inotify_user_watch *watch; | ||
| 260 | struct inotify_device *dev; | ||
| 261 | struct inotify_kernel_event *kevent, *last; | ||
| 262 | |||
| 263 | watch = container_of(w, struct inotify_user_watch, wdata); | ||
| 264 | dev = watch->dev; | ||
| 265 | |||
| 266 | mutex_lock(&dev->ev_mutex); | ||
| 267 | |||
| 268 | /* we can safely put the watch as we don't reference it while | ||
| 269 | * generating the event | ||
| 270 | */ | ||
| 271 | if (mask & IN_IGNORED || mask & IN_ONESHOT) | ||
| 272 | put_inotify_watch(w); /* final put */ | ||
| 273 | |||
| 274 | /* coalescing: drop this event if it is a dupe of the previous */ | ||
| 275 | last = inotify_dev_get_event(dev); | ||
| 276 | if (last && last->event.mask == mask && last->event.wd == wd && | ||
| 277 | last->event.cookie == cookie) { | ||
| 278 | const char *lastname = last->name; | ||
| 279 | |||
| 280 | if (!name && !lastname) | ||
| 281 | goto out; | ||
| 282 | if (name && lastname && !strcmp(lastname, name)) | ||
| 283 | goto out; | ||
| 284 | } | ||
| 285 | |||
| 286 | /* the queue overflowed and we already sent the Q_OVERFLOW event */ | ||
| 287 | if (unlikely(dev->event_count > dev->max_events)) | ||
| 288 | goto out; | ||
| 289 | |||
| 290 | /* if the queue overflows, we need to notify user space */ | ||
| 291 | if (unlikely(dev->event_count == dev->max_events)) | ||
| 292 | kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL); | ||
| 293 | else | ||
| 294 | kevent = kernel_event(wd, mask, cookie, name); | ||
| 295 | |||
| 296 | if (unlikely(!kevent)) | ||
| 297 | goto out; | ||
| 298 | |||
| 299 | /* queue the event and wake up anyone waiting */ | ||
| 300 | dev->event_count++; | ||
| 301 | dev->queue_size += sizeof(struct inotify_event) + kevent->event.len; | ||
| 302 | list_add_tail(&kevent->list, &dev->events); | ||
| 303 | wake_up_interruptible(&dev->wq); | ||
| 304 | |||
| 305 | out: | ||
| 306 | mutex_unlock(&dev->ev_mutex); | ||
| 307 | } | ||
| 308 | |||
| 309 | /* | ||
| 310 | * remove_kevent - cleans up and ultimately frees the given kevent | ||
| 311 | * | ||
| 312 | * Caller must hold dev->ev_mutex. | ||
| 313 | */ | ||
| 314 | static void remove_kevent(struct inotify_device *dev, | ||
| 315 | struct inotify_kernel_event *kevent) | ||
| 316 | { | ||
| 317 | list_del(&kevent->list); | ||
| 318 | |||
| 319 | dev->event_count--; | ||
| 320 | dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len; | ||
| 321 | |||
| 322 | kfree(kevent->name); | ||
| 323 | kmem_cache_free(event_cachep, kevent); | ||
| 324 | } | ||
| 325 | |||
| 326 | /* | ||
| 327 | * inotify_dev_event_dequeue - destroy an event on the given device | ||
| 328 | * | ||
| 329 | * Caller must hold dev->ev_mutex. | ||
| 330 | */ | ||
| 331 | static void inotify_dev_event_dequeue(struct inotify_device *dev) | ||
| 332 | { | ||
| 333 | if (!list_empty(&dev->events)) { | ||
| 334 | struct inotify_kernel_event *kevent; | ||
| 335 | kevent = inotify_dev_get_event(dev); | ||
| 336 | remove_kevent(dev, kevent); | ||
| 337 | } | ||
| 338 | } | ||
| 339 | |||
| 340 | /* | ||
| 341 | * find_inode - resolve a user-given path to a specific inode and return a nd | ||
| 342 | */ | ||
| 343 | static int find_inode(const char __user *dirname, struct nameidata *nd, | ||
| 344 | unsigned flags) | ||
| 345 | { | ||
| 346 | int error; | ||
| 347 | |||
| 348 | error = __user_walk(dirname, flags, nd); | ||
| 349 | if (error) | ||
| 350 | return error; | ||
| 351 | /* you can only watch an inode if you have read permissions on it */ | ||
| 352 | error = vfs_permission(nd, MAY_READ); | ||
| 353 | if (error) | ||
| 354 | path_release(nd); | ||
| 355 | return error; | ||
| 356 | } | ||
| 357 | |||
| 358 | /* | ||
| 359 | * create_watch - creates a watch on the given device. | ||
| 360 | * | ||
| 361 | * Callers must hold dev->up_mutex. | ||
| 362 | */ | ||
| 363 | static int create_watch(struct inotify_device *dev, struct inode *inode, | ||
| 364 | u32 mask) | ||
| 365 | { | ||
| 366 | struct inotify_user_watch *watch; | ||
| 367 | int ret; | ||
| 368 | |||
| 369 | if (atomic_read(&dev->user->inotify_watches) >= | ||
| 370 | inotify_max_user_watches) | ||
| 371 | return -ENOSPC; | ||
| 372 | |||
| 373 | watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL); | ||
| 374 | if (unlikely(!watch)) | ||
| 375 | return -ENOMEM; | ||
| 376 | |||
| 377 | /* save a reference to device and bump the count to make it official */ | ||
| 378 | get_inotify_dev(dev); | ||
| 379 | watch->dev = dev; | ||
| 380 | |||
| 381 | atomic_inc(&dev->user->inotify_watches); | ||
| 382 | |||
| 383 | inotify_init_watch(&watch->wdata); | ||
| 384 | ret = inotify_add_watch(dev->ih, &watch->wdata, inode, mask); | ||
| 385 | if (ret < 0) | ||
| 386 | free_inotify_user_watch(&watch->wdata); | ||
| 387 | |||
| 388 | return ret; | ||
| 389 | } | ||
| 390 | |||
| 391 | /* Device Interface */ | ||
| 392 | |||
| 393 | static unsigned int inotify_poll(struct file *file, poll_table *wait) | ||
| 394 | { | ||
| 395 | struct inotify_device *dev = file->private_data; | ||
| 396 | int ret = 0; | ||
| 397 | |||
| 398 | poll_wait(file, &dev->wq, wait); | ||
| 399 | mutex_lock(&dev->ev_mutex); | ||
| 400 | if (!list_empty(&dev->events)) | ||
| 401 | ret = POLLIN | POLLRDNORM; | ||
| 402 | mutex_unlock(&dev->ev_mutex); | ||
| 403 | |||
| 404 | return ret; | ||
| 405 | } | ||
| 406 | |||
| 407 | static ssize_t inotify_read(struct file *file, char __user *buf, | ||
| 408 | size_t count, loff_t *pos) | ||
| 409 | { | ||
| 410 | size_t event_size = sizeof (struct inotify_event); | ||
| 411 | struct inotify_device *dev; | ||
| 412 | char __user *start; | ||
| 413 | int ret; | ||
| 414 | DEFINE_WAIT(wait); | ||
| 415 | |||
| 416 | start = buf; | ||
| 417 | dev = file->private_data; | ||
| 418 | |||
| 419 | while (1) { | ||
| 420 | int events; | ||
| 421 | |||
| 422 | prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); | ||
| 423 | |||
| 424 | mutex_lock(&dev->ev_mutex); | ||
| 425 | events = !list_empty(&dev->events); | ||
| 426 | mutex_unlock(&dev->ev_mutex); | ||
| 427 | if (events) { | ||
| 428 | ret = 0; | ||
| 429 | break; | ||
| 430 | } | ||
| 431 | |||
| 432 | if (file->f_flags & O_NONBLOCK) { | ||
| 433 | ret = -EAGAIN; | ||
| 434 | break; | ||
| 435 | } | ||
| 436 | |||
| 437 | if (signal_pending(current)) { | ||
| 438 | ret = -EINTR; | ||
| 439 | break; | ||
| 440 | } | ||
| 441 | |||
| 442 | schedule(); | ||
| 443 | } | ||
| 444 | |||
| 445 | finish_wait(&dev->wq, &wait); | ||
| 446 | if (ret) | ||
| 447 | return ret; | ||
| 448 | |||
| 449 | mutex_lock(&dev->ev_mutex); | ||
| 450 | while (1) { | ||
| 451 | struct inotify_kernel_event *kevent; | ||
| 452 | |||
| 453 | ret = buf - start; | ||
| 454 | if (list_empty(&dev->events)) | ||
| 455 | break; | ||
| 456 | |||
| 457 | kevent = inotify_dev_get_event(dev); | ||
| 458 | if (event_size + kevent->event.len > count) | ||
| 459 | break; | ||
| 460 | |||
| 461 | if (copy_to_user(buf, &kevent->event, event_size)) { | ||
| 462 | ret = -EFAULT; | ||
| 463 | break; | ||
| 464 | } | ||
| 465 | buf += event_size; | ||
| 466 | count -= event_size; | ||
| 467 | |||
| 468 | if (kevent->name) { | ||
| 469 | if (copy_to_user(buf, kevent->name, kevent->event.len)){ | ||
| 470 | ret = -EFAULT; | ||
| 471 | break; | ||
| 472 | } | ||
| 473 | buf += kevent->event.len; | ||
| 474 | count -= kevent->event.len; | ||
| 475 | } | ||
| 476 | |||
| 477 | remove_kevent(dev, kevent); | ||
| 478 | } | ||
| 479 | mutex_unlock(&dev->ev_mutex); | ||
| 480 | |||
| 481 | return ret; | ||
| 482 | } | ||
| 483 | |||
| 484 | static int inotify_release(struct inode *ignored, struct file *file) | ||
| 485 | { | ||
| 486 | struct inotify_device *dev = file->private_data; | ||
| 487 | |||
| 488 | inotify_destroy(dev->ih); | ||
| 489 | |||
| 490 | /* destroy all of the events on this device */ | ||
| 491 | mutex_lock(&dev->ev_mutex); | ||
| 492 | while (!list_empty(&dev->events)) | ||
| 493 | inotify_dev_event_dequeue(dev); | ||
| 494 | mutex_unlock(&dev->ev_mutex); | ||
| 495 | |||
| 496 | /* free this device: the put matching the get in inotify_init() */ | ||
| 497 | put_inotify_dev(dev); | ||
| 498 | |||
| 499 | return 0; | ||
| 500 | } | ||
| 501 | |||
| 502 | static long inotify_ioctl(struct file *file, unsigned int cmd, | ||
| 503 | unsigned long arg) | ||
| 504 | { | ||
| 505 | struct inotify_device *dev; | ||
| 506 | void __user *p; | ||
| 507 | int ret = -ENOTTY; | ||
| 508 | |||
| 509 | dev = file->private_data; | ||
| 510 | p = (void __user *) arg; | ||
| 511 | |||
| 512 | switch (cmd) { | ||
| 513 | case FIONREAD: | ||
| 514 | ret = put_user(dev->queue_size, (int __user *) p); | ||
| 515 | break; | ||
| 516 | } | ||
| 517 | |||
| 518 | return ret; | ||
| 519 | } | ||
| 520 | |||
| 521 | static const struct file_operations inotify_fops = { | ||
| 522 | .poll = inotify_poll, | ||
| 523 | .read = inotify_read, | ||
| 524 | .release = inotify_release, | ||
| 525 | .unlocked_ioctl = inotify_ioctl, | ||
| 526 | .compat_ioctl = inotify_ioctl, | ||
| 527 | }; | ||
| 528 | |||
| 529 | static const struct inotify_operations inotify_user_ops = { | ||
| 530 | .handle_event = inotify_dev_queue_event, | ||
| 531 | .destroy_watch = free_inotify_user_watch, | ||
| 532 | }; | ||
| 533 | |||
| 534 | asmlinkage long sys_inotify_init(void) | ||
| 535 | { | ||
| 536 | struct inotify_device *dev; | ||
| 537 | struct inotify_handle *ih; | ||
| 538 | struct user_struct *user; | ||
| 539 | struct file *filp; | ||
| 540 | int fd, ret; | ||
| 541 | |||
| 542 | fd = get_unused_fd(); | ||
| 543 | if (fd < 0) | ||
| 544 | return fd; | ||
| 545 | |||
| 546 | filp = get_empty_filp(); | ||
| 547 | if (!filp) { | ||
| 548 | ret = -ENFILE; | ||
| 549 | goto out_put_fd; | ||
| 550 | } | ||
| 551 | |||
| 552 | user = get_uid(current->user); | ||
| 553 | if (unlikely(atomic_read(&user->inotify_devs) >= | ||
| 554 | inotify_max_user_instances)) { | ||
| 555 | ret = -EMFILE; | ||
| 556 | goto out_free_uid; | ||
| 557 | } | ||
| 558 | |||
| 559 | dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL); | ||
| 560 | if (unlikely(!dev)) { | ||
| 561 | ret = -ENOMEM; | ||
| 562 | goto out_free_uid; | ||
| 563 | } | ||
| 564 | |||
| 565 | ih = inotify_init(&inotify_user_ops); | ||
| 566 | if (unlikely(IS_ERR(ih))) { | ||
| 567 | ret = PTR_ERR(ih); | ||
| 568 | goto out_free_dev; | ||
| 569 | } | ||
| 570 | dev->ih = ih; | ||
| 571 | |||
| 572 | filp->f_op = &inotify_fops; | ||
| 573 | filp->f_vfsmnt = mntget(inotify_mnt); | ||
| 574 | filp->f_dentry = dget(inotify_mnt->mnt_root); | ||
| 575 | filp->f_mapping = filp->f_dentry->d_inode->i_mapping; | ||
| 576 | filp->f_mode = FMODE_READ; | ||
| 577 | filp->f_flags = O_RDONLY; | ||
| 578 | filp->private_data = dev; | ||
| 579 | |||
| 580 | INIT_LIST_HEAD(&dev->events); | ||
| 581 | init_waitqueue_head(&dev->wq); | ||
| 582 | mutex_init(&dev->ev_mutex); | ||
| 583 | mutex_init(&dev->up_mutex); | ||
| 584 | dev->event_count = 0; | ||
| 585 | dev->queue_size = 0; | ||
| 586 | dev->max_events = inotify_max_queued_events; | ||
| 587 | dev->user = user; | ||
| 588 | atomic_set(&dev->count, 0); | ||
| 589 | |||
| 590 | get_inotify_dev(dev); | ||
| 591 | atomic_inc(&user->inotify_devs); | ||
| 592 | fd_install(fd, filp); | ||
| 593 | |||
| 594 | return fd; | ||
| 595 | out_free_dev: | ||
| 596 | kfree(dev); | ||
| 597 | out_free_uid: | ||
| 598 | free_uid(user); | ||
| 599 | put_filp(filp); | ||
| 600 | out_put_fd: | ||
| 601 | put_unused_fd(fd); | ||
| 602 | return ret; | ||
| 603 | } | ||
| 604 | |||
| 605 | asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask) | ||
| 606 | { | ||
| 607 | struct inode *inode; | ||
| 608 | struct inotify_device *dev; | ||
| 609 | struct nameidata nd; | ||
| 610 | struct file *filp; | ||
| 611 | int ret, fput_needed; | ||
| 612 | unsigned flags = 0; | ||
| 613 | |||
| 614 | filp = fget_light(fd, &fput_needed); | ||
| 615 | if (unlikely(!filp)) | ||
| 616 | return -EBADF; | ||
| 617 | |||
| 618 | /* verify that this is indeed an inotify instance */ | ||
| 619 | if (unlikely(filp->f_op != &inotify_fops)) { | ||
| 620 | ret = -EINVAL; | ||
| 621 | goto fput_and_out; | ||
| 622 | } | ||
| 623 | |||
| 624 | if (!(mask & IN_DONT_FOLLOW)) | ||
| 625 | flags |= LOOKUP_FOLLOW; | ||
| 626 | if (mask & IN_ONLYDIR) | ||
| 627 | flags |= LOOKUP_DIRECTORY; | ||
| 628 | |||
| 629 | ret = find_inode(path, &nd, flags); | ||
| 630 | if (unlikely(ret)) | ||
| 631 | goto fput_and_out; | ||
| 632 | |||
| 633 | /* inode held in place by reference to nd; dev by fget on fd */ | ||
| 634 | inode = nd.dentry->d_inode; | ||
| 635 | dev = filp->private_data; | ||
| 636 | |||
| 637 | mutex_lock(&dev->up_mutex); | ||
| 638 | ret = inotify_find_update_watch(dev->ih, inode, mask); | ||
| 639 | if (ret == -ENOENT) | ||
| 640 | ret = create_watch(dev, inode, mask); | ||
| 641 | mutex_unlock(&dev->up_mutex); | ||
| 642 | |||
| 643 | path_release(&nd); | ||
| 644 | fput_and_out: | ||
| 645 | fput_light(filp, fput_needed); | ||
| 646 | return ret; | ||
| 647 | } | ||
| 648 | |||
| 649 | asmlinkage long sys_inotify_rm_watch(int fd, u32 wd) | ||
| 650 | { | ||
| 651 | struct file *filp; | ||
| 652 | struct inotify_device *dev; | ||
| 653 | int ret, fput_needed; | ||
| 654 | |||
| 655 | filp = fget_light(fd, &fput_needed); | ||
| 656 | if (unlikely(!filp)) | ||
| 657 | return -EBADF; | ||
| 658 | |||
| 659 | /* verify that this is indeed an inotify instance */ | ||
| 660 | if (unlikely(filp->f_op != &inotify_fops)) { | ||
| 661 | ret = -EINVAL; | ||
| 662 | goto out; | ||
| 663 | } | ||
| 664 | |||
| 665 | dev = filp->private_data; | ||
| 666 | |||
| 667 | /* we free our watch data when we get IN_IGNORED */ | ||
| 668 | ret = inotify_rm_wd(dev->ih, wd); | ||
| 669 | |||
| 670 | out: | ||
| 671 | fput_light(filp, fput_needed); | ||
| 672 | return ret; | ||
| 673 | } | ||
| 674 | |||
| 675 | static struct super_block * | ||
| 676 | inotify_get_sb(struct file_system_type *fs_type, int flags, | ||
| 677 | const char *dev_name, void *data) | ||
| 678 | { | ||
| 679 | return get_sb_pseudo(fs_type, "inotify", NULL, 0xBAD1DEA); | ||
| 680 | } | ||
| 681 | |||
| 682 | static struct file_system_type inotify_fs_type = { | ||
| 683 | .name = "inotifyfs", | ||
| 684 | .get_sb = inotify_get_sb, | ||
| 685 | .kill_sb = kill_anon_super, | ||
| 686 | }; | ||
| 687 | |||
| 688 | /* | ||
| 689 | * inotify_user_setup - Our initialization function. Note that we cannnot return | ||
| 690 | * error because we have compiled-in VFS hooks. So an (unlikely) failure here | ||
| 691 | * must result in panic(). | ||
| 692 | */ | ||
| 693 | static int __init inotify_user_setup(void) | ||
| 694 | { | ||
| 695 | int ret; | ||
| 696 | |||
| 697 | ret = register_filesystem(&inotify_fs_type); | ||
| 698 | if (unlikely(ret)) | ||
| 699 | panic("inotify: register_filesystem returned %d!\n", ret); | ||
| 700 | |||
| 701 | inotify_mnt = kern_mount(&inotify_fs_type); | ||
| 702 | if (IS_ERR(inotify_mnt)) | ||
| 703 | panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt)); | ||
| 704 | |||
| 705 | inotify_max_queued_events = 16384; | ||
| 706 | inotify_max_user_instances = 128; | ||
| 707 | inotify_max_user_watches = 8192; | ||
| 708 | |||
| 709 | watch_cachep = kmem_cache_create("inotify_watch_cache", | ||
| 710 | sizeof(struct inotify_user_watch), | ||
| 711 | 0, SLAB_PANIC, NULL, NULL); | ||
| 712 | event_cachep = kmem_cache_create("inotify_event_cache", | ||
| 713 | sizeof(struct inotify_kernel_event), | ||
| 714 | 0, SLAB_PANIC, NULL, NULL); | ||
| 715 | |||
| 716 | return 0; | ||
| 717 | } | ||
| 718 | |||
| 719 | module_init(inotify_user_setup); | ||
diff --git a/fs/namei.c b/fs/namei.c index d6e2ee251736..184fe4acf824 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
| @@ -1127,7 +1127,7 @@ out: | |||
| 1127 | if (likely(retval == 0)) { | 1127 | if (likely(retval == 0)) { |
| 1128 | if (unlikely(current->audit_context && nd && nd->dentry && | 1128 | if (unlikely(current->audit_context && nd && nd->dentry && |
| 1129 | nd->dentry->d_inode)) | 1129 | nd->dentry->d_inode)) |
| 1130 | audit_inode(name, nd->dentry->d_inode, flags); | 1130 | audit_inode(name, nd->dentry->d_inode); |
| 1131 | } | 1131 | } |
| 1132 | out_fail: | 1132 | out_fail: |
| 1133 | return retval; | 1133 | return retval; |
| @@ -633,7 +633,7 @@ asmlinkage long sys_fchmod(unsigned int fd, mode_t mode) | |||
| 633 | dentry = file->f_dentry; | 633 | dentry = file->f_dentry; |
| 634 | inode = dentry->d_inode; | 634 | inode = dentry->d_inode; |
| 635 | 635 | ||
| 636 | audit_inode(NULL, inode, 0); | 636 | audit_inode(NULL, inode); |
| 637 | 637 | ||
| 638 | err = -EROFS; | 638 | err = -EROFS; |
| 639 | if (IS_RDONLY(inode)) | 639 | if (IS_RDONLY(inode)) |
| @@ -786,7 +786,7 @@ asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group) | |||
| 786 | if (file) { | 786 | if (file) { |
| 787 | struct dentry * dentry; | 787 | struct dentry * dentry; |
| 788 | dentry = file->f_dentry; | 788 | dentry = file->f_dentry; |
| 789 | audit_inode(NULL, dentry->d_inode, 0); | 789 | audit_inode(NULL, dentry->d_inode); |
| 790 | error = chown_common(dentry, user, group); | 790 | error = chown_common(dentry, user, group); |
| 791 | fput(file); | 791 | fput(file); |
| 792 | } | 792 | } |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 6cc77dc3f3ff..6afff725a8c9 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
| @@ -1019,8 +1019,8 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf, | |||
| 1019 | if (current != task) | 1019 | if (current != task) |
| 1020 | return -EPERM; | 1020 | return -EPERM; |
| 1021 | 1021 | ||
| 1022 | if (count > PAGE_SIZE) | 1022 | if (count >= PAGE_SIZE) |
| 1023 | count = PAGE_SIZE; | 1023 | count = PAGE_SIZE - 1; |
| 1024 | 1024 | ||
| 1025 | if (*ppos != 0) { | 1025 | if (*ppos != 0) { |
| 1026 | /* No partial writes. */ | 1026 | /* No partial writes. */ |
| @@ -1033,6 +1033,7 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf, | |||
| 1033 | if (copy_from_user(page, buf, count)) | 1033 | if (copy_from_user(page, buf, count)) |
| 1034 | goto out_free_page; | 1034 | goto out_free_page; |
| 1035 | 1035 | ||
| 1036 | page[count] = '\0'; | ||
| 1036 | loginuid = simple_strtoul(page, &tmp, 10); | 1037 | loginuid = simple_strtoul(page, &tmp, 10); |
| 1037 | if (tmp == page) { | 1038 | if (tmp == page) { |
| 1038 | length = -EINVAL; | 1039 | length = -EINVAL; |
diff --git a/fs/xattr.c b/fs/xattr.c index e416190f5e9c..c32f15b5f60f 100644 --- a/fs/xattr.c +++ b/fs/xattr.c | |||
| @@ -242,7 +242,7 @@ sys_fsetxattr(int fd, char __user *name, void __user *value, | |||
| 242 | if (!f) | 242 | if (!f) |
| 243 | return error; | 243 | return error; |
| 244 | dentry = f->f_dentry; | 244 | dentry = f->f_dentry; |
| 245 | audit_inode(NULL, dentry->d_inode, 0); | 245 | audit_inode(NULL, dentry->d_inode); |
| 246 | error = setxattr(dentry, name, value, size, flags); | 246 | error = setxattr(dentry, name, value, size, flags); |
| 247 | fput(f); | 247 | fput(f); |
| 248 | return error; | 248 | return error; |
| @@ -469,7 +469,7 @@ sys_fremovexattr(int fd, char __user *name) | |||
| 469 | if (!f) | 469 | if (!f) |
| 470 | return error; | 470 | return error; |
| 471 | dentry = f->f_dentry; | 471 | dentry = f->f_dentry; |
| 472 | audit_inode(NULL, dentry->d_inode, 0); | 472 | audit_inode(NULL, dentry->d_inode); |
| 473 | error = removexattr(dentry, name); | 473 | error = removexattr(dentry, name); |
| 474 | fput(f); | 474 | fput(f); |
| 475 | return error; | 475 | return error; |
diff --git a/include/linux/audit.h b/include/linux/audit.h index 14259f6db5bc..e051ff9c5b50 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h | |||
| @@ -82,7 +82,12 @@ | |||
| 82 | #define AUDIT_CONFIG_CHANGE 1305 /* Audit system configuration change */ | 82 | #define AUDIT_CONFIG_CHANGE 1305 /* Audit system configuration change */ |
| 83 | #define AUDIT_SOCKADDR 1306 /* sockaddr copied as syscall arg */ | 83 | #define AUDIT_SOCKADDR 1306 /* sockaddr copied as syscall arg */ |
| 84 | #define AUDIT_CWD 1307 /* Current working directory */ | 84 | #define AUDIT_CWD 1307 /* Current working directory */ |
| 85 | #define AUDIT_EXECVE 1309 /* execve arguments */ | ||
| 85 | #define AUDIT_IPC_SET_PERM 1311 /* IPC new permissions record type */ | 86 | #define AUDIT_IPC_SET_PERM 1311 /* IPC new permissions record type */ |
| 87 | #define AUDIT_MQ_OPEN 1312 /* POSIX MQ open record type */ | ||
| 88 | #define AUDIT_MQ_SENDRECV 1313 /* POSIX MQ send/receive record type */ | ||
| 89 | #define AUDIT_MQ_NOTIFY 1314 /* POSIX MQ notify record type */ | ||
| 90 | #define AUDIT_MQ_GETSETATTR 1315 /* POSIX MQ get/set attribute record type */ | ||
| 86 | 91 | ||
| 87 | #define AUDIT_AVC 1400 /* SE Linux avc denial or grant */ | 92 | #define AUDIT_AVC 1400 /* SE Linux avc denial or grant */ |
| 88 | #define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */ | 93 | #define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */ |
| @@ -150,6 +155,7 @@ | |||
| 150 | #define AUDIT_SE_TYPE 15 /* security label type */ | 155 | #define AUDIT_SE_TYPE 15 /* security label type */ |
| 151 | #define AUDIT_SE_SEN 16 /* security label sensitivity label */ | 156 | #define AUDIT_SE_SEN 16 /* security label sensitivity label */ |
| 152 | #define AUDIT_SE_CLR 17 /* security label clearance label */ | 157 | #define AUDIT_SE_CLR 17 /* security label clearance label */ |
| 158 | #define AUDIT_PPID 18 | ||
| 153 | 159 | ||
| 154 | /* These are ONLY useful when checking | 160 | /* These are ONLY useful when checking |
| 155 | * at syscall exit time (AUDIT_AT_EXIT). */ | 161 | * at syscall exit time (AUDIT_AT_EXIT). */ |
| @@ -158,6 +164,7 @@ | |||
| 158 | #define AUDIT_INODE 102 | 164 | #define AUDIT_INODE 102 |
| 159 | #define AUDIT_EXIT 103 | 165 | #define AUDIT_EXIT 103 |
| 160 | #define AUDIT_SUCCESS 104 /* exit >= 0; value ignored */ | 166 | #define AUDIT_SUCCESS 104 /* exit >= 0; value ignored */ |
| 167 | #define AUDIT_WATCH 105 | ||
| 161 | 168 | ||
| 162 | #define AUDIT_ARG0 200 | 169 | #define AUDIT_ARG0 200 |
| 163 | #define AUDIT_ARG1 (AUDIT_ARG0+1) | 170 | #define AUDIT_ARG1 (AUDIT_ARG0+1) |
| @@ -277,12 +284,16 @@ struct audit_rule { /* for AUDIT_LIST, AUDIT_ADD, and AUDIT_DEL */ | |||
| 277 | struct audit_sig_info { | 284 | struct audit_sig_info { |
| 278 | uid_t uid; | 285 | uid_t uid; |
| 279 | pid_t pid; | 286 | pid_t pid; |
| 287 | char ctx[0]; | ||
| 280 | }; | 288 | }; |
| 281 | 289 | ||
| 282 | struct audit_buffer; | 290 | struct audit_buffer; |
| 283 | struct audit_context; | 291 | struct audit_context; |
| 284 | struct inode; | 292 | struct inode; |
| 285 | struct netlink_skb_parms; | 293 | struct netlink_skb_parms; |
| 294 | struct linux_binprm; | ||
| 295 | struct mq_attr; | ||
| 296 | struct mqstat; | ||
| 286 | 297 | ||
| 287 | #define AUDITSC_INVALID 0 | 298 | #define AUDITSC_INVALID 0 |
| 288 | #define AUDITSC_SUCCESS 1 | 299 | #define AUDITSC_SUCCESS 1 |
| @@ -297,15 +308,19 @@ extern void audit_syscall_entry(int arch, | |||
| 297 | int major, unsigned long a0, unsigned long a1, | 308 | int major, unsigned long a0, unsigned long a1, |
| 298 | unsigned long a2, unsigned long a3); | 309 | unsigned long a2, unsigned long a3); |
| 299 | extern void audit_syscall_exit(int failed, long return_code); | 310 | extern void audit_syscall_exit(int failed, long return_code); |
| 300 | extern void audit_getname(const char *name); | 311 | extern void __audit_getname(const char *name); |
| 301 | extern void audit_putname(const char *name); | 312 | extern void audit_putname(const char *name); |
| 302 | extern void __audit_inode(const char *name, const struct inode *inode, unsigned flags); | 313 | extern void __audit_inode(const char *name, const struct inode *inode); |
| 303 | extern void __audit_inode_child(const char *dname, const struct inode *inode, | 314 | extern void __audit_inode_child(const char *dname, const struct inode *inode, |
| 304 | unsigned long pino); | 315 | unsigned long pino); |
| 305 | static inline void audit_inode(const char *name, const struct inode *inode, | 316 | static inline void audit_getname(const char *name) |
| 306 | unsigned flags) { | 317 | { |
| 307 | if (unlikely(current->audit_context)) | 318 | if (unlikely(current->audit_context)) |
| 308 | __audit_inode(name, inode, flags); | 319 | __audit_getname(name); |
| 320 | } | ||
| 321 | static inline void audit_inode(const char *name, const struct inode *inode) { | ||
| 322 | if (unlikely(current->audit_context)) | ||
| 323 | __audit_inode(name, inode); | ||
| 309 | } | 324 | } |
| 310 | static inline void audit_inode_child(const char *dname, | 325 | static inline void audit_inode_child(const char *dname, |
| 311 | const struct inode *inode, | 326 | const struct inode *inode, |
| @@ -320,13 +335,61 @@ extern void auditsc_get_stamp(struct audit_context *ctx, | |||
| 320 | struct timespec *t, unsigned int *serial); | 335 | struct timespec *t, unsigned int *serial); |
| 321 | extern int audit_set_loginuid(struct task_struct *task, uid_t loginuid); | 336 | extern int audit_set_loginuid(struct task_struct *task, uid_t loginuid); |
| 322 | extern uid_t audit_get_loginuid(struct audit_context *ctx); | 337 | extern uid_t audit_get_loginuid(struct audit_context *ctx); |
| 323 | extern int audit_ipc_obj(struct kern_ipc_perm *ipcp); | 338 | extern int __audit_ipc_obj(struct kern_ipc_perm *ipcp); |
| 324 | extern int audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode, struct kern_ipc_perm *ipcp); | 339 | extern int __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode); |
| 340 | extern int audit_bprm(struct linux_binprm *bprm); | ||
| 325 | extern int audit_socketcall(int nargs, unsigned long *args); | 341 | extern int audit_socketcall(int nargs, unsigned long *args); |
| 326 | extern int audit_sockaddr(int len, void *addr); | 342 | extern int audit_sockaddr(int len, void *addr); |
| 327 | extern int audit_avc_path(struct dentry *dentry, struct vfsmount *mnt); | 343 | extern int audit_avc_path(struct dentry *dentry, struct vfsmount *mnt); |
| 328 | extern void audit_signal_info(int sig, struct task_struct *t); | ||
| 329 | extern int audit_set_macxattr(const char *name); | 344 | extern int audit_set_macxattr(const char *name); |
| 345 | extern int __audit_mq_open(int oflag, mode_t mode, struct mq_attr __user *u_attr); | ||
| 346 | extern int __audit_mq_timedsend(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec __user *u_abs_timeout); | ||
| 347 | extern int __audit_mq_timedreceive(mqd_t mqdes, size_t msg_len, unsigned int __user *u_msg_prio, const struct timespec __user *u_abs_timeout); | ||
| 348 | extern int __audit_mq_notify(mqd_t mqdes, const struct sigevent __user *u_notification); | ||
| 349 | extern int __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat); | ||
| 350 | |||
| 351 | static inline int audit_ipc_obj(struct kern_ipc_perm *ipcp) | ||
| 352 | { | ||
| 353 | if (unlikely(current->audit_context)) | ||
| 354 | return __audit_ipc_obj(ipcp); | ||
| 355 | return 0; | ||
| 356 | } | ||
| 357 | static inline int audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode) | ||
| 358 | { | ||
| 359 | if (unlikely(current->audit_context)) | ||
| 360 | return __audit_ipc_set_perm(qbytes, uid, gid, mode); | ||
| 361 | return 0; | ||
| 362 | } | ||
| 363 | static inline int audit_mq_open(int oflag, mode_t mode, struct mq_attr __user *u_attr) | ||
| 364 | { | ||
| 365 | if (unlikely(current->audit_context)) | ||
| 366 | return __audit_mq_open(oflag, mode, u_attr); | ||
| 367 | return 0; | ||
| 368 | } | ||
| 369 | static inline int audit_mq_timedsend(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec __user *u_abs_timeout) | ||
| 370 | { | ||
| 371 | if (unlikely(current->audit_context)) | ||
| 372 | return __audit_mq_timedsend(mqdes, msg_len, msg_prio, u_abs_timeout); | ||
| 373 | return 0; | ||
| 374 | } | ||
| 375 | static inline int audit_mq_timedreceive(mqd_t mqdes, size_t msg_len, unsigned int __user *u_msg_prio, const struct timespec __user *u_abs_timeout) | ||
| 376 | { | ||
| 377 | if (unlikely(current->audit_context)) | ||
| 378 | return __audit_mq_timedreceive(mqdes, msg_len, u_msg_prio, u_abs_timeout); | ||
| 379 | return 0; | ||
| 380 | } | ||
| 381 | static inline int audit_mq_notify(mqd_t mqdes, const struct sigevent __user *u_notification) | ||
| 382 | { | ||
| 383 | if (unlikely(current->audit_context)) | ||
| 384 | return __audit_mq_notify(mqdes, u_notification); | ||
| 385 | return 0; | ||
| 386 | } | ||
| 387 | static inline int audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) | ||
| 388 | { | ||
| 389 | if (unlikely(current->audit_context)) | ||
| 390 | return __audit_mq_getsetattr(mqdes, mqstat); | ||
| 391 | return 0; | ||
| 392 | } | ||
| 330 | #else | 393 | #else |
| 331 | #define audit_alloc(t) ({ 0; }) | 394 | #define audit_alloc(t) ({ 0; }) |
| 332 | #define audit_free(t) do { ; } while (0) | 395 | #define audit_free(t) do { ; } while (0) |
| @@ -334,19 +397,24 @@ extern int audit_set_macxattr(const char *name); | |||
| 334 | #define audit_syscall_exit(f,r) do { ; } while (0) | 397 | #define audit_syscall_exit(f,r) do { ; } while (0) |
| 335 | #define audit_getname(n) do { ; } while (0) | 398 | #define audit_getname(n) do { ; } while (0) |
| 336 | #define audit_putname(n) do { ; } while (0) | 399 | #define audit_putname(n) do { ; } while (0) |
| 337 | #define __audit_inode(n,i,f) do { ; } while (0) | 400 | #define __audit_inode(n,i) do { ; } while (0) |
| 338 | #define __audit_inode_child(d,i,p) do { ; } while (0) | 401 | #define __audit_inode_child(d,i,p) do { ; } while (0) |
| 339 | #define audit_inode(n,i,f) do { ; } while (0) | 402 | #define audit_inode(n,i) do { ; } while (0) |
| 340 | #define audit_inode_child(d,i,p) do { ; } while (0) | 403 | #define audit_inode_child(d,i,p) do { ; } while (0) |
| 341 | #define auditsc_get_stamp(c,t,s) do { BUG(); } while (0) | 404 | #define auditsc_get_stamp(c,t,s) do { BUG(); } while (0) |
| 342 | #define audit_get_loginuid(c) ({ -1; }) | 405 | #define audit_get_loginuid(c) ({ -1; }) |
| 343 | #define audit_ipc_obj(i) ({ 0; }) | 406 | #define audit_ipc_obj(i) ({ 0; }) |
| 344 | #define audit_ipc_set_perm(q,u,g,m,i) ({ 0; }) | 407 | #define audit_ipc_set_perm(q,u,g,m) ({ 0; }) |
| 408 | #define audit_bprm(p) ({ 0; }) | ||
| 345 | #define audit_socketcall(n,a) ({ 0; }) | 409 | #define audit_socketcall(n,a) ({ 0; }) |
| 346 | #define audit_sockaddr(len, addr) ({ 0; }) | 410 | #define audit_sockaddr(len, addr) ({ 0; }) |
| 347 | #define audit_avc_path(dentry, mnt) ({ 0; }) | 411 | #define audit_avc_path(dentry, mnt) ({ 0; }) |
| 348 | #define audit_signal_info(s,t) do { ; } while (0) | ||
| 349 | #define audit_set_macxattr(n) do { ; } while (0) | 412 | #define audit_set_macxattr(n) do { ; } while (0) |
| 413 | #define audit_mq_open(o,m,a) ({ 0; }) | ||
| 414 | #define audit_mq_timedsend(d,l,p,t) ({ 0; }) | ||
| 415 | #define audit_mq_timedreceive(d,l,p,t) ({ 0; }) | ||
| 416 | #define audit_mq_notify(d,n) ({ 0; }) | ||
| 417 | #define audit_mq_getsetattr(d,s) ({ 0; }) | ||
| 350 | #endif | 418 | #endif |
| 351 | 419 | ||
| 352 | #ifdef CONFIG_AUDIT | 420 | #ifdef CONFIG_AUDIT |
| @@ -364,8 +432,11 @@ extern void audit_log_end(struct audit_buffer *ab); | |||
| 364 | extern void audit_log_hex(struct audit_buffer *ab, | 432 | extern void audit_log_hex(struct audit_buffer *ab, |
| 365 | const unsigned char *buf, | 433 | const unsigned char *buf, |
| 366 | size_t len); | 434 | size_t len); |
| 367 | extern void audit_log_untrustedstring(struct audit_buffer *ab, | 435 | extern const char * audit_log_untrustedstring(struct audit_buffer *ab, |
| 368 | const char *string); | 436 | const char *string); |
| 437 | extern const char * audit_log_n_untrustedstring(struct audit_buffer *ab, | ||
| 438 | size_t n, | ||
| 439 | const char *string); | ||
| 369 | extern void audit_log_d_path(struct audit_buffer *ab, | 440 | extern void audit_log_d_path(struct audit_buffer *ab, |
| 370 | const char *prefix, | 441 | const char *prefix, |
| 371 | struct dentry *dentry, | 442 | struct dentry *dentry, |
| @@ -383,8 +454,8 @@ extern int audit_receive_filter(int type, int pid, int uid, int seq, | |||
| 383 | #define audit_log_end(b) do { ; } while (0) | 454 | #define audit_log_end(b) do { ; } while (0) |
| 384 | #define audit_log_hex(a,b,l) do { ; } while (0) | 455 | #define audit_log_hex(a,b,l) do { ; } while (0) |
| 385 | #define audit_log_untrustedstring(a,s) do { ; } while (0) | 456 | #define audit_log_untrustedstring(a,s) do { ; } while (0) |
| 457 | #define audit_log_n_untrustedstring(a,n,s) do { ; } while (0) | ||
| 386 | #define audit_log_d_path(b,p,d,v) do { ; } while (0) | 458 | #define audit_log_d_path(b,p,d,v) do { ; } while (0) |
| 387 | #define audit_panic(m) do { ; } while (0) | ||
| 388 | #endif | 459 | #endif |
| 389 | #endif | 460 | #endif |
| 390 | #endif | 461 | #endif |
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 11438eff4d44..cc5dec70c32c 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h | |||
| @@ -54,19 +54,20 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, | |||
| 54 | 54 | ||
| 55 | if (isdir) | 55 | if (isdir) |
| 56 | isdir = IN_ISDIR; | 56 | isdir = IN_ISDIR; |
| 57 | inotify_inode_queue_event(old_dir, IN_MOVED_FROM|isdir,cookie,old_name); | 57 | inotify_inode_queue_event(old_dir, IN_MOVED_FROM|isdir,cookie,old_name, |
| 58 | inotify_inode_queue_event(new_dir, IN_MOVED_TO|isdir, cookie, new_name); | 58 | source); |
| 59 | inotify_inode_queue_event(new_dir, IN_MOVED_TO|isdir, cookie, new_name, | ||
| 60 | source); | ||
| 59 | 61 | ||
| 60 | if (target) { | 62 | if (target) { |
| 61 | inotify_inode_queue_event(target, IN_DELETE_SELF, 0, NULL); | 63 | inotify_inode_queue_event(target, IN_DELETE_SELF, 0, NULL, NULL); |
| 62 | inotify_inode_is_dead(target); | 64 | inotify_inode_is_dead(target); |
| 63 | } | 65 | } |
| 64 | 66 | ||
| 65 | if (source) { | 67 | if (source) { |
| 66 | inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL); | 68 | inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL, NULL); |
| 67 | } | 69 | } |
| 68 | audit_inode_child(old_name, source, old_dir->i_ino); | 70 | audit_inode_child(new_name, source, new_dir->i_ino); |
| 69 | audit_inode_child(new_name, target, new_dir->i_ino); | ||
| 70 | } | 71 | } |
| 71 | 72 | ||
| 72 | /* | 73 | /* |
| @@ -85,7 +86,7 @@ static inline void fsnotify_nameremove(struct dentry *dentry, int isdir) | |||
| 85 | */ | 86 | */ |
| 86 | static inline void fsnotify_inoderemove(struct inode *inode) | 87 | static inline void fsnotify_inoderemove(struct inode *inode) |
| 87 | { | 88 | { |
| 88 | inotify_inode_queue_event(inode, IN_DELETE_SELF, 0, NULL); | 89 | inotify_inode_queue_event(inode, IN_DELETE_SELF, 0, NULL, NULL); |
| 89 | inotify_inode_is_dead(inode); | 90 | inotify_inode_is_dead(inode); |
| 90 | } | 91 | } |
| 91 | 92 | ||
| @@ -95,7 +96,8 @@ static inline void fsnotify_inoderemove(struct inode *inode) | |||
| 95 | static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) | 96 | static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) |
| 96 | { | 97 | { |
| 97 | inode_dir_notify(inode, DN_CREATE); | 98 | inode_dir_notify(inode, DN_CREATE); |
| 98 | inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name); | 99 | inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name, |
| 100 | dentry->d_inode); | ||
| 99 | audit_inode_child(dentry->d_name.name, dentry->d_inode, inode->i_ino); | 101 | audit_inode_child(dentry->d_name.name, dentry->d_inode, inode->i_ino); |
| 100 | } | 102 | } |
| 101 | 103 | ||
| @@ -106,7 +108,7 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) | |||
| 106 | { | 108 | { |
| 107 | inode_dir_notify(inode, DN_CREATE); | 109 | inode_dir_notify(inode, DN_CREATE); |
| 108 | inotify_inode_queue_event(inode, IN_CREATE | IN_ISDIR, 0, | 110 | inotify_inode_queue_event(inode, IN_CREATE | IN_ISDIR, 0, |
| 109 | dentry->d_name.name); | 111 | dentry->d_name.name, dentry->d_inode); |
| 110 | audit_inode_child(dentry->d_name.name, dentry->d_inode, inode->i_ino); | 112 | audit_inode_child(dentry->d_name.name, dentry->d_inode, inode->i_ino); |
| 111 | } | 113 | } |
| 112 | 114 | ||
| @@ -123,7 +125,7 @@ static inline void fsnotify_access(struct dentry *dentry) | |||
| 123 | 125 | ||
| 124 | dnotify_parent(dentry, DN_ACCESS); | 126 | dnotify_parent(dentry, DN_ACCESS); |
| 125 | inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); | 127 | inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); |
| 126 | inotify_inode_queue_event(inode, mask, 0, NULL); | 128 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); |
| 127 | } | 129 | } |
| 128 | 130 | ||
| 129 | /* | 131 | /* |
| @@ -139,7 +141,7 @@ static inline void fsnotify_modify(struct dentry *dentry) | |||
| 139 | 141 | ||
| 140 | dnotify_parent(dentry, DN_MODIFY); | 142 | dnotify_parent(dentry, DN_MODIFY); |
| 141 | inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); | 143 | inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); |
| 142 | inotify_inode_queue_event(inode, mask, 0, NULL); | 144 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); |
| 143 | } | 145 | } |
| 144 | 146 | ||
| 145 | /* | 147 | /* |
| @@ -154,7 +156,7 @@ static inline void fsnotify_open(struct dentry *dentry) | |||
| 154 | mask |= IN_ISDIR; | 156 | mask |= IN_ISDIR; |
| 155 | 157 | ||
| 156 | inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); | 158 | inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); |
| 157 | inotify_inode_queue_event(inode, mask, 0, NULL); | 159 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); |
| 158 | } | 160 | } |
| 159 | 161 | ||
| 160 | /* | 162 | /* |
| @@ -172,7 +174,7 @@ static inline void fsnotify_close(struct file *file) | |||
| 172 | mask |= IN_ISDIR; | 174 | mask |= IN_ISDIR; |
| 173 | 175 | ||
| 174 | inotify_dentry_parent_queue_event(dentry, mask, 0, name); | 176 | inotify_dentry_parent_queue_event(dentry, mask, 0, name); |
| 175 | inotify_inode_queue_event(inode, mask, 0, NULL); | 177 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); |
| 176 | } | 178 | } |
| 177 | 179 | ||
| 178 | /* | 180 | /* |
| @@ -187,7 +189,7 @@ static inline void fsnotify_xattr(struct dentry *dentry) | |||
| 187 | mask |= IN_ISDIR; | 189 | mask |= IN_ISDIR; |
| 188 | 190 | ||
| 189 | inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); | 191 | inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); |
| 190 | inotify_inode_queue_event(inode, mask, 0, NULL); | 192 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); |
| 191 | } | 193 | } |
| 192 | 194 | ||
| 193 | /* | 195 | /* |
| @@ -234,7 +236,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) | |||
| 234 | if (in_mask) { | 236 | if (in_mask) { |
| 235 | if (S_ISDIR(inode->i_mode)) | 237 | if (S_ISDIR(inode->i_mode)) |
| 236 | in_mask |= IN_ISDIR; | 238 | in_mask |= IN_ISDIR; |
| 237 | inotify_inode_queue_event(inode, in_mask, 0, NULL); | 239 | inotify_inode_queue_event(inode, in_mask, 0, NULL, NULL); |
| 238 | inotify_dentry_parent_queue_event(dentry, in_mask, 0, | 240 | inotify_dentry_parent_queue_event(dentry, in_mask, 0, |
| 239 | dentry->d_name.name); | 241 | dentry->d_name.name); |
| 240 | } | 242 | } |
diff --git a/include/linux/inotify.h b/include/linux/inotify.h index 71aa1553ef38..d4f48c6402e6 100644 --- a/include/linux/inotify.h +++ b/include/linux/inotify.h | |||
| @@ -68,18 +68,65 @@ struct inotify_event { | |||
| 68 | #include <linux/dcache.h> | 68 | #include <linux/dcache.h> |
| 69 | #include <linux/fs.h> | 69 | #include <linux/fs.h> |
| 70 | 70 | ||
| 71 | /* | ||
| 72 | * struct inotify_watch - represents a watch request on a specific inode | ||
| 73 | * | ||
| 74 | * h_list is protected by ih->mutex of the associated inotify_handle. | ||
| 75 | * i_list, mask are protected by inode->inotify_mutex of the associated inode. | ||
| 76 | * ih, inode, and wd are never written to once the watch is created. | ||
| 77 | * | ||
| 78 | * Callers must use the established inotify interfaces to access inotify_watch | ||
| 79 | * contents. The content of this structure is private to the inotify | ||
| 80 | * implementation. | ||
| 81 | */ | ||
| 82 | struct inotify_watch { | ||
| 83 | struct list_head h_list; /* entry in inotify_handle's list */ | ||
| 84 | struct list_head i_list; /* entry in inode's list */ | ||
| 85 | atomic_t count; /* reference count */ | ||
| 86 | struct inotify_handle *ih; /* associated inotify handle */ | ||
| 87 | struct inode *inode; /* associated inode */ | ||
| 88 | __s32 wd; /* watch descriptor */ | ||
| 89 | __u32 mask; /* event mask for this watch */ | ||
| 90 | }; | ||
| 91 | |||
| 92 | struct inotify_operations { | ||
| 93 | void (*handle_event)(struct inotify_watch *, u32, u32, u32, | ||
| 94 | const char *, struct inode *); | ||
| 95 | void (*destroy_watch)(struct inotify_watch *); | ||
| 96 | }; | ||
| 97 | |||
| 71 | #ifdef CONFIG_INOTIFY | 98 | #ifdef CONFIG_INOTIFY |
| 72 | 99 | ||
| 100 | /* Kernel API for producing events */ | ||
| 101 | |||
| 73 | extern void inotify_d_instantiate(struct dentry *, struct inode *); | 102 | extern void inotify_d_instantiate(struct dentry *, struct inode *); |
| 74 | extern void inotify_d_move(struct dentry *); | 103 | extern void inotify_d_move(struct dentry *); |
| 75 | extern void inotify_inode_queue_event(struct inode *, __u32, __u32, | 104 | extern void inotify_inode_queue_event(struct inode *, __u32, __u32, |
| 76 | const char *); | 105 | const char *, struct inode *); |
| 77 | extern void inotify_dentry_parent_queue_event(struct dentry *, __u32, __u32, | 106 | extern void inotify_dentry_parent_queue_event(struct dentry *, __u32, __u32, |
| 78 | const char *); | 107 | const char *); |
| 79 | extern void inotify_unmount_inodes(struct list_head *); | 108 | extern void inotify_unmount_inodes(struct list_head *); |
| 80 | extern void inotify_inode_is_dead(struct inode *); | 109 | extern void inotify_inode_is_dead(struct inode *); |
| 81 | extern u32 inotify_get_cookie(void); | 110 | extern u32 inotify_get_cookie(void); |
| 82 | 111 | ||
| 112 | /* Kernel Consumer API */ | ||
| 113 | |||
| 114 | extern struct inotify_handle *inotify_init(const struct inotify_operations *); | ||
| 115 | extern void inotify_init_watch(struct inotify_watch *); | ||
| 116 | extern void inotify_destroy(struct inotify_handle *); | ||
| 117 | extern __s32 inotify_find_watch(struct inotify_handle *, struct inode *, | ||
| 118 | struct inotify_watch **); | ||
| 119 | extern __s32 inotify_find_update_watch(struct inotify_handle *, struct inode *, | ||
| 120 | u32); | ||
| 121 | extern __s32 inotify_add_watch(struct inotify_handle *, struct inotify_watch *, | ||
| 122 | struct inode *, __u32); | ||
| 123 | extern int inotify_rm_watch(struct inotify_handle *, struct inotify_watch *); | ||
| 124 | extern int inotify_rm_wd(struct inotify_handle *, __u32); | ||
| 125 | extern void inotify_remove_watch_locked(struct inotify_handle *, | ||
| 126 | struct inotify_watch *); | ||
| 127 | extern void get_inotify_watch(struct inotify_watch *); | ||
| 128 | extern void put_inotify_watch(struct inotify_watch *); | ||
| 129 | |||
| 83 | #else | 130 | #else |
| 84 | 131 | ||
| 85 | static inline void inotify_d_instantiate(struct dentry *dentry, | 132 | static inline void inotify_d_instantiate(struct dentry *dentry, |
| @@ -93,7 +140,8 @@ static inline void inotify_d_move(struct dentry *dentry) | |||
| 93 | 140 | ||
| 94 | static inline void inotify_inode_queue_event(struct inode *inode, | 141 | static inline void inotify_inode_queue_event(struct inode *inode, |
| 95 | __u32 mask, __u32 cookie, | 142 | __u32 mask, __u32 cookie, |
| 96 | const char *filename) | 143 | const char *filename, |
| 144 | struct inode *n_inode) | ||
| 97 | { | 145 | { |
| 98 | } | 146 | } |
| 99 | 147 | ||
| @@ -116,6 +164,62 @@ static inline u32 inotify_get_cookie(void) | |||
| 116 | return 0; | 164 | return 0; |
| 117 | } | 165 | } |
| 118 | 166 | ||
| 167 | static inline struct inotify_handle *inotify_init(const struct inotify_operations *ops) | ||
| 168 | { | ||
| 169 | return ERR_PTR(-EOPNOTSUPP); | ||
| 170 | } | ||
| 171 | |||
| 172 | static inline void inotify_init_watch(struct inotify_watch *watch) | ||
| 173 | { | ||
| 174 | } | ||
| 175 | |||
| 176 | static inline void inotify_destroy(struct inotify_handle *ih) | ||
| 177 | { | ||
| 178 | } | ||
| 179 | |||
| 180 | static inline __s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode, | ||
| 181 | struct inotify_watch **watchp) | ||
| 182 | { | ||
| 183 | return -EOPNOTSUPP; | ||
| 184 | } | ||
| 185 | |||
| 186 | static inline __s32 inotify_find_update_watch(struct inotify_handle *ih, | ||
| 187 | struct inode *inode, u32 mask) | ||
| 188 | { | ||
| 189 | return -EOPNOTSUPP; | ||
| 190 | } | ||
| 191 | |||
| 192 | static inline __s32 inotify_add_watch(struct inotify_handle *ih, | ||
| 193 | struct inotify_watch *watch, | ||
| 194 | struct inode *inode, __u32 mask) | ||
| 195 | { | ||
| 196 | return -EOPNOTSUPP; | ||
| 197 | } | ||
| 198 | |||
| 199 | static inline int inotify_rm_watch(struct inotify_handle *ih, | ||
| 200 | struct inotify_watch *watch) | ||
| 201 | { | ||
| 202 | return -EOPNOTSUPP; | ||
| 203 | } | ||
| 204 | |||
| 205 | static inline int inotify_rm_wd(struct inotify_handle *ih, __u32 wd) | ||
| 206 | { | ||
| 207 | return -EOPNOTSUPP; | ||
| 208 | } | ||
| 209 | |||
| 210 | static inline void inotify_remove_watch_locked(struct inotify_handle *ih, | ||
| 211 | struct inotify_watch *watch) | ||
| 212 | { | ||
| 213 | } | ||
| 214 | |||
| 215 | static inline void get_inotify_watch(struct inotify_watch *watch) | ||
| 216 | { | ||
| 217 | } | ||
| 218 | |||
| 219 | static inline void put_inotify_watch(struct inotify_watch *watch) | ||
| 220 | { | ||
| 221 | } | ||
| 222 | |||
| 119 | #endif /* CONFIG_INOTIFY */ | 223 | #endif /* CONFIG_INOTIFY */ |
| 120 | 224 | ||
| 121 | #endif /* __KERNEL __ */ | 225 | #endif /* __KERNEL __ */ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 701b8cbceb05..267f15257040 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -494,7 +494,7 @@ struct user_struct { | |||
| 494 | atomic_t processes; /* How many processes does this user have? */ | 494 | atomic_t processes; /* How many processes does this user have? */ |
| 495 | atomic_t files; /* How many open files does this user have? */ | 495 | atomic_t files; /* How many open files does this user have? */ |
| 496 | atomic_t sigpending; /* How many pending signals does this user have? */ | 496 | atomic_t sigpending; /* How many pending signals does this user have? */ |
| 497 | #ifdef CONFIG_INOTIFY | 497 | #ifdef CONFIG_INOTIFY_USER |
| 498 | atomic_t inotify_watches; /* How many inotify watches does this user have? */ | 498 | atomic_t inotify_watches; /* How many inotify watches does this user have? */ |
| 499 | atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ | 499 | atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ |
| 500 | #endif | 500 | #endif |
diff --git a/init/Kconfig b/init/Kconfig index a7697787946a..df864a358221 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
| @@ -182,7 +182,8 @@ config AUDITSYSCALL | |||
| 182 | help | 182 | help |
| 183 | Enable low-overhead system-call auditing infrastructure that | 183 | Enable low-overhead system-call auditing infrastructure that |
| 184 | can be used independently or with another kernel subsystem, | 184 | can be used independently or with another kernel subsystem, |
| 185 | such as SELinux. | 185 | such as SELinux. To use audit's filesystem watch feature, please |
| 186 | ensure that INOTIFY is configured. | ||
| 186 | 187 | ||
| 187 | config IKCONFIG | 188 | config IKCONFIG |
| 188 | bool "Kernel .config support" | 189 | bool "Kernel .config support" |
diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 41ecbd440fed..1511714a9585 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c | |||
| @@ -8,6 +8,8 @@ | |||
| 8 | * Lockless receive & send, fd based notify: | 8 | * Lockless receive & send, fd based notify: |
| 9 | * Manfred Spraul (manfred@colorfullife.com) | 9 | * Manfred Spraul (manfred@colorfullife.com) |
| 10 | * | 10 | * |
| 11 | * Audit: George Wilson (ltcgcw@us.ibm.com) | ||
| 12 | * | ||
| 11 | * This file is released under the GPL. | 13 | * This file is released under the GPL. |
| 12 | */ | 14 | */ |
| 13 | 15 | ||
| @@ -24,6 +26,7 @@ | |||
| 24 | #include <linux/skbuff.h> | 26 | #include <linux/skbuff.h> |
| 25 | #include <linux/netlink.h> | 27 | #include <linux/netlink.h> |
| 26 | #include <linux/syscalls.h> | 28 | #include <linux/syscalls.h> |
| 29 | #include <linux/audit.h> | ||
| 27 | #include <linux/signal.h> | 30 | #include <linux/signal.h> |
| 28 | #include <linux/mutex.h> | 31 | #include <linux/mutex.h> |
| 29 | 32 | ||
| @@ -657,6 +660,10 @@ asmlinkage long sys_mq_open(const char __user *u_name, int oflag, mode_t mode, | |||
| 657 | char *name; | 660 | char *name; |
| 658 | int fd, error; | 661 | int fd, error; |
| 659 | 662 | ||
| 663 | error = audit_mq_open(oflag, mode, u_attr); | ||
| 664 | if (error != 0) | ||
| 665 | return error; | ||
| 666 | |||
| 660 | if (IS_ERR(name = getname(u_name))) | 667 | if (IS_ERR(name = getname(u_name))) |
| 661 | return PTR_ERR(name); | 668 | return PTR_ERR(name); |
| 662 | 669 | ||
| @@ -814,6 +821,10 @@ asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr, | |||
| 814 | long timeout; | 821 | long timeout; |
| 815 | int ret; | 822 | int ret; |
| 816 | 823 | ||
| 824 | ret = audit_mq_timedsend(mqdes, msg_len, msg_prio, u_abs_timeout); | ||
| 825 | if (ret != 0) | ||
| 826 | return ret; | ||
| 827 | |||
| 817 | if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) | 828 | if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) |
| 818 | return -EINVAL; | 829 | return -EINVAL; |
| 819 | 830 | ||
| @@ -896,6 +907,10 @@ asmlinkage ssize_t sys_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr, | |||
| 896 | struct mqueue_inode_info *info; | 907 | struct mqueue_inode_info *info; |
| 897 | struct ext_wait_queue wait; | 908 | struct ext_wait_queue wait; |
| 898 | 909 | ||
| 910 | ret = audit_mq_timedreceive(mqdes, msg_len, u_msg_prio, u_abs_timeout); | ||
| 911 | if (ret != 0) | ||
| 912 | return ret; | ||
| 913 | |||
| 899 | timeout = prepare_timeout(u_abs_timeout); | 914 | timeout = prepare_timeout(u_abs_timeout); |
| 900 | 915 | ||
| 901 | ret = -EBADF; | 916 | ret = -EBADF; |
| @@ -975,6 +990,10 @@ asmlinkage long sys_mq_notify(mqd_t mqdes, | |||
| 975 | struct mqueue_inode_info *info; | 990 | struct mqueue_inode_info *info; |
| 976 | struct sk_buff *nc; | 991 | struct sk_buff *nc; |
| 977 | 992 | ||
| 993 | ret = audit_mq_notify(mqdes, u_notification); | ||
| 994 | if (ret != 0) | ||
| 995 | return ret; | ||
| 996 | |||
| 978 | nc = NULL; | 997 | nc = NULL; |
| 979 | sock = NULL; | 998 | sock = NULL; |
| 980 | if (u_notification != NULL) { | 999 | if (u_notification != NULL) { |
| @@ -1115,6 +1134,9 @@ asmlinkage long sys_mq_getsetattr(mqd_t mqdes, | |||
| 1115 | omqstat = info->attr; | 1134 | omqstat = info->attr; |
| 1116 | omqstat.mq_flags = filp->f_flags & O_NONBLOCK; | 1135 | omqstat.mq_flags = filp->f_flags & O_NONBLOCK; |
| 1117 | if (u_mqstat) { | 1136 | if (u_mqstat) { |
| 1137 | ret = audit_mq_getsetattr(mqdes, &mqstat); | ||
| 1138 | if (ret != 0) | ||
| 1139 | goto out; | ||
| 1118 | if (mqstat.mq_flags & O_NONBLOCK) | 1140 | if (mqstat.mq_flags & O_NONBLOCK) |
| 1119 | filp->f_flags |= O_NONBLOCK; | 1141 | filp->f_flags |= O_NONBLOCK; |
| 1120 | else | 1142 | else |
| @@ -454,6 +454,11 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) | |||
| 454 | err = audit_ipc_obj(ipcp); | 454 | err = audit_ipc_obj(ipcp); |
| 455 | if (err) | 455 | if (err) |
| 456 | goto out_unlock_up; | 456 | goto out_unlock_up; |
| 457 | if (cmd==IPC_SET) { | ||
| 458 | err = audit_ipc_set_perm(setbuf.qbytes, setbuf.uid, setbuf.gid, setbuf.mode); | ||
| 459 | if (err) | ||
| 460 | goto out_unlock_up; | ||
| 461 | } | ||
| 457 | 462 | ||
| 458 | err = -EPERM; | 463 | err = -EPERM; |
| 459 | if (current->euid != ipcp->cuid && | 464 | if (current->euid != ipcp->cuid && |
| @@ -468,10 +473,6 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) | |||
| 468 | switch (cmd) { | 473 | switch (cmd) { |
| 469 | case IPC_SET: | 474 | case IPC_SET: |
| 470 | { | 475 | { |
| 471 | err = audit_ipc_set_perm(setbuf.qbytes, setbuf.uid, setbuf.gid, setbuf.mode, ipcp); | ||
| 472 | if (err) | ||
| 473 | goto out_unlock_up; | ||
| 474 | |||
| 475 | err = -EPERM; | 476 | err = -EPERM; |
| 476 | if (setbuf.qbytes > msg_ctlmnb && !capable(CAP_SYS_RESOURCE)) | 477 | if (setbuf.qbytes > msg_ctlmnb && !capable(CAP_SYS_RESOURCE)) |
| 477 | goto out_unlock_up; | 478 | goto out_unlock_up; |
| @@ -828,6 +828,11 @@ static int semctl_down(int semid, int semnum, int cmd, int version, union semun | |||
| 828 | if (err) | 828 | if (err) |
| 829 | goto out_unlock; | 829 | goto out_unlock; |
| 830 | 830 | ||
| 831 | if (cmd == IPC_SET) { | ||
| 832 | err = audit_ipc_set_perm(0, setbuf.uid, setbuf.gid, setbuf.mode); | ||
| 833 | if (err) | ||
| 834 | goto out_unlock; | ||
| 835 | } | ||
| 831 | if (current->euid != ipcp->cuid && | 836 | if (current->euid != ipcp->cuid && |
| 832 | current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) { | 837 | current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) { |
| 833 | err=-EPERM; | 838 | err=-EPERM; |
| @@ -844,9 +849,6 @@ static int semctl_down(int semid, int semnum, int cmd, int version, union semun | |||
| 844 | err = 0; | 849 | err = 0; |
| 845 | break; | 850 | break; |
| 846 | case IPC_SET: | 851 | case IPC_SET: |
| 847 | err = audit_ipc_set_perm(0, setbuf.uid, setbuf.gid, setbuf.mode, ipcp); | ||
| 848 | if (err) | ||
| 849 | goto out_unlock; | ||
| 850 | ipcp->uid = setbuf.uid; | 852 | ipcp->uid = setbuf.uid; |
| 851 | ipcp->gid = setbuf.gid; | 853 | ipcp->gid = setbuf.gid; |
| 852 | ipcp->mode = (ipcp->mode & ~S_IRWXUGO) | 854 | ipcp->mode = (ipcp->mode & ~S_IRWXUGO) |
| @@ -643,7 +643,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) | |||
| 643 | err = audit_ipc_obj(&(shp->shm_perm)); | 643 | err = audit_ipc_obj(&(shp->shm_perm)); |
| 644 | if (err) | 644 | if (err) |
| 645 | goto out_unlock_up; | 645 | goto out_unlock_up; |
| 646 | err = audit_ipc_set_perm(0, setbuf.uid, setbuf.gid, setbuf.mode, &(shp->shm_perm)); | 646 | err = audit_ipc_set_perm(0, setbuf.uid, setbuf.gid, setbuf.mode); |
| 647 | if (err) | 647 | if (err) |
| 648 | goto out_unlock_up; | 648 | goto out_unlock_up; |
| 649 | err=-EPERM; | 649 | err=-EPERM; |
diff --git a/kernel/audit.c b/kernel/audit.c index df57b493e1cb..7dfac7031bd7 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
| @@ -56,6 +56,7 @@ | |||
| 56 | #include <linux/skbuff.h> | 56 | #include <linux/skbuff.h> |
| 57 | #include <linux/netlink.h> | 57 | #include <linux/netlink.h> |
| 58 | #include <linux/selinux.h> | 58 | #include <linux/selinux.h> |
| 59 | #include <linux/inotify.h> | ||
| 59 | 60 | ||
| 60 | #include "audit.h" | 61 | #include "audit.h" |
| 61 | 62 | ||
| @@ -89,6 +90,7 @@ static int audit_backlog_wait_overflow = 0; | |||
| 89 | /* The identity of the user shutting down the audit system. */ | 90 | /* The identity of the user shutting down the audit system. */ |
| 90 | uid_t audit_sig_uid = -1; | 91 | uid_t audit_sig_uid = -1; |
| 91 | pid_t audit_sig_pid = -1; | 92 | pid_t audit_sig_pid = -1; |
| 93 | u32 audit_sig_sid = 0; | ||
| 92 | 94 | ||
| 93 | /* Records can be lost in several ways: | 95 | /* Records can be lost in several ways: |
| 94 | 0) [suppressed in audit_alloc] | 96 | 0) [suppressed in audit_alloc] |
| @@ -102,6 +104,12 @@ static atomic_t audit_lost = ATOMIC_INIT(0); | |||
| 102 | /* The netlink socket. */ | 104 | /* The netlink socket. */ |
| 103 | static struct sock *audit_sock; | 105 | static struct sock *audit_sock; |
| 104 | 106 | ||
| 107 | /* Inotify handle. */ | ||
| 108 | struct inotify_handle *audit_ih; | ||
| 109 | |||
| 110 | /* Hash for inode-based rules */ | ||
| 111 | struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS]; | ||
| 112 | |||
| 105 | /* The audit_freelist is a list of pre-allocated audit buffers (if more | 113 | /* The audit_freelist is a list of pre-allocated audit buffers (if more |
| 106 | * than AUDIT_MAXFREE are in use, the audit buffer is freed instead of | 114 | * than AUDIT_MAXFREE are in use, the audit buffer is freed instead of |
| 107 | * being placed on the freelist). */ | 115 | * being placed on the freelist). */ |
| @@ -114,10 +122,8 @@ static struct task_struct *kauditd_task; | |||
| 114 | static DECLARE_WAIT_QUEUE_HEAD(kauditd_wait); | 122 | static DECLARE_WAIT_QUEUE_HEAD(kauditd_wait); |
| 115 | static DECLARE_WAIT_QUEUE_HEAD(audit_backlog_wait); | 123 | static DECLARE_WAIT_QUEUE_HEAD(audit_backlog_wait); |
| 116 | 124 | ||
| 117 | /* The netlink socket is only to be read by 1 CPU, which lets us assume | 125 | /* Serialize requests from userspace. */ |
| 118 | * that list additions and deletions never happen simultaneously in | 126 | static DEFINE_MUTEX(audit_cmd_mutex); |
| 119 | * auditsc.c */ | ||
| 120 | DEFINE_MUTEX(audit_netlink_mutex); | ||
| 121 | 127 | ||
| 122 | /* AUDIT_BUFSIZ is the size of the temporary buffer used for formatting | 128 | /* AUDIT_BUFSIZ is the size of the temporary buffer used for formatting |
| 123 | * audit records. Since printk uses a 1024 byte buffer, this buffer | 129 | * audit records. Since printk uses a 1024 byte buffer, this buffer |
| @@ -250,7 +256,7 @@ static int audit_set_rate_limit(int limit, uid_t loginuid, u32 sid) | |||
| 250 | "audit_rate_limit=%d old=%d by auid=%u", | 256 | "audit_rate_limit=%d old=%d by auid=%u", |
| 251 | limit, old, loginuid); | 257 | limit, old, loginuid); |
| 252 | audit_rate_limit = limit; | 258 | audit_rate_limit = limit; |
| 253 | return old; | 259 | return 0; |
| 254 | } | 260 | } |
| 255 | 261 | ||
| 256 | static int audit_set_backlog_limit(int limit, uid_t loginuid, u32 sid) | 262 | static int audit_set_backlog_limit(int limit, uid_t loginuid, u32 sid) |
| @@ -273,7 +279,7 @@ static int audit_set_backlog_limit(int limit, uid_t loginuid, u32 sid) | |||
| 273 | "audit_backlog_limit=%d old=%d by auid=%u", | 279 | "audit_backlog_limit=%d old=%d by auid=%u", |
| 274 | limit, old, loginuid); | 280 | limit, old, loginuid); |
| 275 | audit_backlog_limit = limit; | 281 | audit_backlog_limit = limit; |
| 276 | return old; | 282 | return 0; |
| 277 | } | 283 | } |
| 278 | 284 | ||
| 279 | static int audit_set_enabled(int state, uid_t loginuid, u32 sid) | 285 | static int audit_set_enabled(int state, uid_t loginuid, u32 sid) |
| @@ -299,7 +305,7 @@ static int audit_set_enabled(int state, uid_t loginuid, u32 sid) | |||
| 299 | "audit_enabled=%d old=%d by auid=%u", | 305 | "audit_enabled=%d old=%d by auid=%u", |
| 300 | state, old, loginuid); | 306 | state, old, loginuid); |
| 301 | audit_enabled = state; | 307 | audit_enabled = state; |
| 302 | return old; | 308 | return 0; |
| 303 | } | 309 | } |
| 304 | 310 | ||
| 305 | static int audit_set_failure(int state, uid_t loginuid, u32 sid) | 311 | static int audit_set_failure(int state, uid_t loginuid, u32 sid) |
| @@ -327,7 +333,7 @@ static int audit_set_failure(int state, uid_t loginuid, u32 sid) | |||
| 327 | "audit_failure=%d old=%d by auid=%u", | 333 | "audit_failure=%d old=%d by auid=%u", |
| 328 | state, old, loginuid); | 334 | state, old, loginuid); |
| 329 | audit_failure = state; | 335 | audit_failure = state; |
| 330 | return old; | 336 | return 0; |
| 331 | } | 337 | } |
| 332 | 338 | ||
| 333 | static int kauditd_thread(void *dummy) | 339 | static int kauditd_thread(void *dummy) |
| @@ -363,9 +369,52 @@ static int kauditd_thread(void *dummy) | |||
| 363 | remove_wait_queue(&kauditd_wait, &wait); | 369 | remove_wait_queue(&kauditd_wait, &wait); |
| 364 | } | 370 | } |
| 365 | } | 371 | } |
| 372 | } | ||
| 373 | |||
| 374 | int audit_send_list(void *_dest) | ||
| 375 | { | ||
| 376 | struct audit_netlink_list *dest = _dest; | ||
| 377 | int pid = dest->pid; | ||
| 378 | struct sk_buff *skb; | ||
| 379 | |||
| 380 | /* wait for parent to finish and send an ACK */ | ||
| 381 | mutex_lock(&audit_cmd_mutex); | ||
| 382 | mutex_unlock(&audit_cmd_mutex); | ||
| 383 | |||
| 384 | while ((skb = __skb_dequeue(&dest->q)) != NULL) | ||
| 385 | netlink_unicast(audit_sock, skb, pid, 0); | ||
| 386 | |||
| 387 | kfree(dest); | ||
| 388 | |||
| 366 | return 0; | 389 | return 0; |
| 367 | } | 390 | } |
| 368 | 391 | ||
| 392 | struct sk_buff *audit_make_reply(int pid, int seq, int type, int done, | ||
| 393 | int multi, void *payload, int size) | ||
| 394 | { | ||
| 395 | struct sk_buff *skb; | ||
| 396 | struct nlmsghdr *nlh; | ||
| 397 | int len = NLMSG_SPACE(size); | ||
| 398 | void *data; | ||
| 399 | int flags = multi ? NLM_F_MULTI : 0; | ||
| 400 | int t = done ? NLMSG_DONE : type; | ||
| 401 | |||
| 402 | skb = alloc_skb(len, GFP_KERNEL); | ||
| 403 | if (!skb) | ||
| 404 | return NULL; | ||
| 405 | |||
| 406 | nlh = NLMSG_PUT(skb, pid, seq, t, size); | ||
| 407 | nlh->nlmsg_flags = flags; | ||
| 408 | data = NLMSG_DATA(nlh); | ||
| 409 | memcpy(data, payload, size); | ||
| 410 | return skb; | ||
| 411 | |||
| 412 | nlmsg_failure: /* Used by NLMSG_PUT */ | ||
| 413 | if (skb) | ||
| 414 | kfree_skb(skb); | ||
| 415 | return NULL; | ||
| 416 | } | ||
| 417 | |||
| 369 | /** | 418 | /** |
| 370 | * audit_send_reply - send an audit reply message via netlink | 419 | * audit_send_reply - send an audit reply message via netlink |
| 371 | * @pid: process id to send reply to | 420 | * @pid: process id to send reply to |
| @@ -383,29 +432,13 @@ void audit_send_reply(int pid, int seq, int type, int done, int multi, | |||
| 383 | void *payload, int size) | 432 | void *payload, int size) |
| 384 | { | 433 | { |
| 385 | struct sk_buff *skb; | 434 | struct sk_buff *skb; |
| 386 | struct nlmsghdr *nlh; | 435 | skb = audit_make_reply(pid, seq, type, done, multi, payload, size); |
| 387 | int len = NLMSG_SPACE(size); | ||
| 388 | void *data; | ||
| 389 | int flags = multi ? NLM_F_MULTI : 0; | ||
| 390 | int t = done ? NLMSG_DONE : type; | ||
| 391 | |||
| 392 | skb = alloc_skb(len, GFP_KERNEL); | ||
| 393 | if (!skb) | 436 | if (!skb) |
| 394 | return; | 437 | return; |
| 395 | |||
| 396 | nlh = NLMSG_PUT(skb, pid, seq, t, size); | ||
| 397 | nlh->nlmsg_flags = flags; | ||
| 398 | data = NLMSG_DATA(nlh); | ||
| 399 | memcpy(data, payload, size); | ||
| 400 | |||
| 401 | /* Ignore failure. It'll only happen if the sender goes away, | 438 | /* Ignore failure. It'll only happen if the sender goes away, |
| 402 | because our timeout is set to infinite. */ | 439 | because our timeout is set to infinite. */ |
| 403 | netlink_unicast(audit_sock, skb, pid, 0); | 440 | netlink_unicast(audit_sock, skb, pid, 0); |
| 404 | return; | 441 | return; |
| 405 | |||
| 406 | nlmsg_failure: /* Used by NLMSG_PUT */ | ||
| 407 | if (skb) | ||
| 408 | kfree_skb(skb); | ||
| 409 | } | 442 | } |
| 410 | 443 | ||
| 411 | /* | 444 | /* |
| @@ -451,7 +484,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 451 | struct audit_buffer *ab; | 484 | struct audit_buffer *ab; |
| 452 | u16 msg_type = nlh->nlmsg_type; | 485 | u16 msg_type = nlh->nlmsg_type; |
| 453 | uid_t loginuid; /* loginuid of sender */ | 486 | uid_t loginuid; /* loginuid of sender */ |
| 454 | struct audit_sig_info sig_data; | 487 | struct audit_sig_info *sig_data; |
| 488 | char *ctx; | ||
| 489 | u32 len; | ||
| 455 | 490 | ||
| 456 | err = audit_netlink_ok(NETLINK_CB(skb).eff_cap, msg_type); | 491 | err = audit_netlink_ok(NETLINK_CB(skb).eff_cap, msg_type); |
| 457 | if (err) | 492 | if (err) |
| @@ -503,12 +538,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 503 | if (status_get->mask & AUDIT_STATUS_PID) { | 538 | if (status_get->mask & AUDIT_STATUS_PID) { |
| 504 | int old = audit_pid; | 539 | int old = audit_pid; |
| 505 | if (sid) { | 540 | if (sid) { |
| 506 | char *ctx = NULL; | 541 | if ((err = selinux_ctxid_to_string( |
| 507 | u32 len; | ||
| 508 | int rc; | ||
| 509 | if ((rc = selinux_ctxid_to_string( | ||
| 510 | sid, &ctx, &len))) | 542 | sid, &ctx, &len))) |
| 511 | return rc; | 543 | return err; |
| 512 | else | 544 | else |
| 513 | audit_log(NULL, GFP_KERNEL, | 545 | audit_log(NULL, GFP_KERNEL, |
| 514 | AUDIT_CONFIG_CHANGE, | 546 | AUDIT_CONFIG_CHANGE, |
| @@ -523,10 +555,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 523 | audit_pid = status_get->pid; | 555 | audit_pid = status_get->pid; |
| 524 | } | 556 | } |
| 525 | if (status_get->mask & AUDIT_STATUS_RATE_LIMIT) | 557 | if (status_get->mask & AUDIT_STATUS_RATE_LIMIT) |
| 526 | audit_set_rate_limit(status_get->rate_limit, | 558 | err = audit_set_rate_limit(status_get->rate_limit, |
| 527 | loginuid, sid); | 559 | loginuid, sid); |
| 528 | if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT) | 560 | if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT) |
| 529 | audit_set_backlog_limit(status_get->backlog_limit, | 561 | err = audit_set_backlog_limit(status_get->backlog_limit, |
| 530 | loginuid, sid); | 562 | loginuid, sid); |
| 531 | break; | 563 | break; |
| 532 | case AUDIT_USER: | 564 | case AUDIT_USER: |
| @@ -544,8 +576,6 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 544 | "user pid=%d uid=%u auid=%u", | 576 | "user pid=%d uid=%u auid=%u", |
| 545 | pid, uid, loginuid); | 577 | pid, uid, loginuid); |
| 546 | if (sid) { | 578 | if (sid) { |
| 547 | char *ctx = NULL; | ||
| 548 | u32 len; | ||
| 549 | if (selinux_ctxid_to_string( | 579 | if (selinux_ctxid_to_string( |
| 550 | sid, &ctx, &len)) { | 580 | sid, &ctx, &len)) { |
| 551 | audit_log_format(ab, | 581 | audit_log_format(ab, |
| @@ -584,10 +614,21 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 584 | loginuid, sid); | 614 | loginuid, sid); |
| 585 | break; | 615 | break; |
| 586 | case AUDIT_SIGNAL_INFO: | 616 | case AUDIT_SIGNAL_INFO: |
| 587 | sig_data.uid = audit_sig_uid; | 617 | err = selinux_ctxid_to_string(audit_sig_sid, &ctx, &len); |
| 588 | sig_data.pid = audit_sig_pid; | 618 | if (err) |
| 619 | return err; | ||
| 620 | sig_data = kmalloc(sizeof(*sig_data) + len, GFP_KERNEL); | ||
| 621 | if (!sig_data) { | ||
| 622 | kfree(ctx); | ||
| 623 | return -ENOMEM; | ||
| 624 | } | ||
| 625 | sig_data->uid = audit_sig_uid; | ||
| 626 | sig_data->pid = audit_sig_pid; | ||
| 627 | memcpy(sig_data->ctx, ctx, len); | ||
| 628 | kfree(ctx); | ||
| 589 | audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_SIGNAL_INFO, | 629 | audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_SIGNAL_INFO, |
| 590 | 0, 0, &sig_data, sizeof(sig_data)); | 630 | 0, 0, sig_data, sizeof(*sig_data) + len); |
| 631 | kfree(sig_data); | ||
| 591 | break; | 632 | break; |
| 592 | default: | 633 | default: |
| 593 | err = -EINVAL; | 634 | err = -EINVAL; |
| @@ -629,20 +670,30 @@ static void audit_receive(struct sock *sk, int length) | |||
| 629 | struct sk_buff *skb; | 670 | struct sk_buff *skb; |
| 630 | unsigned int qlen; | 671 | unsigned int qlen; |
| 631 | 672 | ||
| 632 | mutex_lock(&audit_netlink_mutex); | 673 | mutex_lock(&audit_cmd_mutex); |
| 633 | 674 | ||
| 634 | for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) { | 675 | for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) { |
| 635 | skb = skb_dequeue(&sk->sk_receive_queue); | 676 | skb = skb_dequeue(&sk->sk_receive_queue); |
| 636 | audit_receive_skb(skb); | 677 | audit_receive_skb(skb); |
| 637 | kfree_skb(skb); | 678 | kfree_skb(skb); |
| 638 | } | 679 | } |
| 639 | mutex_unlock(&audit_netlink_mutex); | 680 | mutex_unlock(&audit_cmd_mutex); |
| 640 | } | 681 | } |
| 641 | 682 | ||
| 683 | #ifdef CONFIG_AUDITSYSCALL | ||
| 684 | static const struct inotify_operations audit_inotify_ops = { | ||
| 685 | .handle_event = audit_handle_ievent, | ||
| 686 | .destroy_watch = audit_free_parent, | ||
| 687 | }; | ||
| 688 | #endif | ||
| 642 | 689 | ||
| 643 | /* Initialize audit support at boot time. */ | 690 | /* Initialize audit support at boot time. */ |
| 644 | static int __init audit_init(void) | 691 | static int __init audit_init(void) |
| 645 | { | 692 | { |
| 693 | #ifdef CONFIG_AUDITSYSCALL | ||
| 694 | int i; | ||
| 695 | #endif | ||
| 696 | |||
| 646 | printk(KERN_INFO "audit: initializing netlink socket (%s)\n", | 697 | printk(KERN_INFO "audit: initializing netlink socket (%s)\n", |
| 647 | audit_default ? "enabled" : "disabled"); | 698 | audit_default ? "enabled" : "disabled"); |
| 648 | audit_sock = netlink_kernel_create(NETLINK_AUDIT, 0, audit_receive, | 699 | audit_sock = netlink_kernel_create(NETLINK_AUDIT, 0, audit_receive, |
| @@ -661,6 +712,16 @@ static int __init audit_init(void) | |||
| 661 | selinux_audit_set_callback(&selinux_audit_rule_update); | 712 | selinux_audit_set_callback(&selinux_audit_rule_update); |
| 662 | 713 | ||
| 663 | audit_log(NULL, GFP_KERNEL, AUDIT_KERNEL, "initialized"); | 714 | audit_log(NULL, GFP_KERNEL, AUDIT_KERNEL, "initialized"); |
| 715 | |||
| 716 | #ifdef CONFIG_AUDITSYSCALL | ||
| 717 | audit_ih = inotify_init(&audit_inotify_ops); | ||
| 718 | if (IS_ERR(audit_ih)) | ||
| 719 | audit_panic("cannot initialize inotify handle"); | ||
| 720 | |||
| 721 | for (i = 0; i < AUDIT_INODE_BUCKETS; i++) | ||
| 722 | INIT_LIST_HEAD(&audit_inode_hash[i]); | ||
| 723 | #endif | ||
| 724 | |||
| 664 | return 0; | 725 | return 0; |
| 665 | } | 726 | } |
| 666 | __initcall(audit_init); | 727 | __initcall(audit_init); |
| @@ -690,10 +751,12 @@ static void audit_buffer_free(struct audit_buffer *ab) | |||
| 690 | kfree_skb(ab->skb); | 751 | kfree_skb(ab->skb); |
| 691 | 752 | ||
| 692 | spin_lock_irqsave(&audit_freelist_lock, flags); | 753 | spin_lock_irqsave(&audit_freelist_lock, flags); |
| 693 | if (++audit_freelist_count > AUDIT_MAXFREE) | 754 | if (audit_freelist_count > AUDIT_MAXFREE) |
| 694 | kfree(ab); | 755 | kfree(ab); |
| 695 | else | 756 | else { |
| 757 | audit_freelist_count++; | ||
| 696 | list_add(&ab->list, &audit_freelist); | 758 | list_add(&ab->list, &audit_freelist); |
| 759 | } | ||
| 697 | spin_unlock_irqrestore(&audit_freelist_lock, flags); | 760 | spin_unlock_irqrestore(&audit_freelist_lock, flags); |
| 698 | } | 761 | } |
| 699 | 762 | ||
| @@ -988,28 +1051,76 @@ void audit_log_hex(struct audit_buffer *ab, const unsigned char *buf, | |||
| 988 | skb_put(skb, len << 1); /* new string is twice the old string */ | 1051 | skb_put(skb, len << 1); /* new string is twice the old string */ |
| 989 | } | 1052 | } |
| 990 | 1053 | ||
| 1054 | /* | ||
| 1055 | * Format a string of no more than slen characters into the audit buffer, | ||
| 1056 | * enclosed in quote marks. | ||
| 1057 | */ | ||
| 1058 | static void audit_log_n_string(struct audit_buffer *ab, size_t slen, | ||
| 1059 | const char *string) | ||
| 1060 | { | ||
| 1061 | int avail, new_len; | ||
| 1062 | unsigned char *ptr; | ||
| 1063 | struct sk_buff *skb; | ||
| 1064 | |||
| 1065 | BUG_ON(!ab->skb); | ||
| 1066 | skb = ab->skb; | ||
| 1067 | avail = skb_tailroom(skb); | ||
| 1068 | new_len = slen + 3; /* enclosing quotes + null terminator */ | ||
| 1069 | if (new_len > avail) { | ||
| 1070 | avail = audit_expand(ab, new_len); | ||
| 1071 | if (!avail) | ||
| 1072 | return; | ||
| 1073 | } | ||
| 1074 | ptr = skb->tail; | ||
| 1075 | *ptr++ = '"'; | ||
| 1076 | memcpy(ptr, string, slen); | ||
| 1077 | ptr += slen; | ||
| 1078 | *ptr++ = '"'; | ||
| 1079 | *ptr = 0; | ||
| 1080 | skb_put(skb, slen + 2); /* don't include null terminator */ | ||
| 1081 | } | ||
| 1082 | |||
| 991 | /** | 1083 | /** |
| 992 | * audit_log_unstrustedstring - log a string that may contain random characters | 1084 | * audit_log_n_unstrustedstring - log a string that may contain random characters |
| 993 | * @ab: audit_buffer | 1085 | * @ab: audit_buffer |
| 1086 | * @len: lenth of string (not including trailing null) | ||
| 994 | * @string: string to be logged | 1087 | * @string: string to be logged |
| 995 | * | 1088 | * |
| 996 | * This code will escape a string that is passed to it if the string | 1089 | * This code will escape a string that is passed to it if the string |
| 997 | * contains a control character, unprintable character, double quote mark, | 1090 | * contains a control character, unprintable character, double quote mark, |
| 998 | * or a space. Unescaped strings will start and end with a double quote mark. | 1091 | * or a space. Unescaped strings will start and end with a double quote mark. |
| 999 | * Strings that are escaped are printed in hex (2 digits per char). | 1092 | * Strings that are escaped are printed in hex (2 digits per char). |
| 1093 | * | ||
| 1094 | * The caller specifies the number of characters in the string to log, which may | ||
| 1095 | * or may not be the entire string. | ||
| 1000 | */ | 1096 | */ |
| 1001 | void audit_log_untrustedstring(struct audit_buffer *ab, const char *string) | 1097 | const char *audit_log_n_untrustedstring(struct audit_buffer *ab, size_t len, |
| 1098 | const char *string) | ||
| 1002 | { | 1099 | { |
| 1003 | const unsigned char *p = string; | 1100 | const unsigned char *p = string; |
| 1004 | 1101 | ||
| 1005 | while (*p) { | 1102 | while (*p) { |
| 1006 | if (*p == '"' || *p < 0x21 || *p > 0x7f) { | 1103 | if (*p == '"' || *p < 0x21 || *p > 0x7f) { |
| 1007 | audit_log_hex(ab, string, strlen(string)); | 1104 | audit_log_hex(ab, string, len); |
| 1008 | return; | 1105 | return string + len + 1; |
| 1009 | } | 1106 | } |
| 1010 | p++; | 1107 | p++; |
| 1011 | } | 1108 | } |
| 1012 | audit_log_format(ab, "\"%s\"", string); | 1109 | audit_log_n_string(ab, len, string); |
| 1110 | return p + 1; | ||
| 1111 | } | ||
| 1112 | |||
| 1113 | /** | ||
| 1114 | * audit_log_unstrustedstring - log a string that may contain random characters | ||
| 1115 | * @ab: audit_buffer | ||
| 1116 | * @string: string to be logged | ||
| 1117 | * | ||
| 1118 | * Same as audit_log_n_unstrustedstring(), except that strlen is used to | ||
| 1119 | * determine string length. | ||
| 1120 | */ | ||
| 1121 | const char *audit_log_untrustedstring(struct audit_buffer *ab, const char *string) | ||
| 1122 | { | ||
| 1123 | return audit_log_n_untrustedstring(ab, strlen(string), string); | ||
| 1013 | } | 1124 | } |
| 1014 | 1125 | ||
| 1015 | /* This is a helper-function to print the escaped d_path */ | 1126 | /* This is a helper-function to print the escaped d_path */ |
diff --git a/kernel/audit.h b/kernel/audit.h index 6f733920fd32..8323e4132a33 100644 --- a/kernel/audit.h +++ b/kernel/audit.h | |||
| @@ -19,9 +19,9 @@ | |||
| 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 20 | */ | 20 | */ |
| 21 | 21 | ||
| 22 | #include <linux/mutex.h> | ||
| 23 | #include <linux/fs.h> | 22 | #include <linux/fs.h> |
| 24 | #include <linux/audit.h> | 23 | #include <linux/audit.h> |
| 24 | #include <linux/skbuff.h> | ||
| 25 | 25 | ||
| 26 | /* 0 = no checking | 26 | /* 0 = no checking |
| 27 | 1 = put_count checking | 27 | 1 = put_count checking |
| @@ -53,6 +53,18 @@ enum audit_state { | |||
| 53 | }; | 53 | }; |
| 54 | 54 | ||
| 55 | /* Rule lists */ | 55 | /* Rule lists */ |
| 56 | struct audit_parent; | ||
| 57 | |||
| 58 | struct audit_watch { | ||
| 59 | atomic_t count; /* reference count */ | ||
| 60 | char *path; /* insertion path */ | ||
| 61 | dev_t dev; /* associated superblock device */ | ||
| 62 | unsigned long ino; /* associated inode number */ | ||
| 63 | struct audit_parent *parent; /* associated parent */ | ||
| 64 | struct list_head wlist; /* entry in parent->watches list */ | ||
| 65 | struct list_head rules; /* associated rules */ | ||
| 66 | }; | ||
| 67 | |||
| 56 | struct audit_field { | 68 | struct audit_field { |
| 57 | u32 type; | 69 | u32 type; |
| 58 | u32 val; | 70 | u32 val; |
| @@ -70,6 +82,9 @@ struct audit_krule { | |||
| 70 | u32 buflen; /* for data alloc on list rules */ | 82 | u32 buflen; /* for data alloc on list rules */ |
| 71 | u32 field_count; | 83 | u32 field_count; |
| 72 | struct audit_field *fields; | 84 | struct audit_field *fields; |
| 85 | struct audit_field *inode_f; /* quick access to an inode field */ | ||
| 86 | struct audit_watch *watch; /* associated watch */ | ||
| 87 | struct list_head rlist; /* entry in audit_watch.rules list */ | ||
| 73 | }; | 88 | }; |
| 74 | 89 | ||
| 75 | struct audit_entry { | 90 | struct audit_entry { |
| @@ -78,15 +93,53 @@ struct audit_entry { | |||
| 78 | struct audit_krule rule; | 93 | struct audit_krule rule; |
| 79 | }; | 94 | }; |
| 80 | 95 | ||
| 81 | |||
| 82 | extern int audit_pid; | 96 | extern int audit_pid; |
| 83 | extern int audit_comparator(const u32 left, const u32 op, const u32 right); | ||
| 84 | 97 | ||
| 98 | #define AUDIT_INODE_BUCKETS 32 | ||
| 99 | extern struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS]; | ||
| 100 | |||
| 101 | static inline int audit_hash_ino(u32 ino) | ||
| 102 | { | ||
| 103 | return (ino & (AUDIT_INODE_BUCKETS-1)); | ||
| 104 | } | ||
| 105 | |||
| 106 | extern int audit_comparator(const u32 left, const u32 op, const u32 right); | ||
| 107 | extern int audit_compare_dname_path(const char *dname, const char *path, | ||
| 108 | int *dirlen); | ||
| 109 | extern struct sk_buff * audit_make_reply(int pid, int seq, int type, | ||
| 110 | int done, int multi, | ||
| 111 | void *payload, int size); | ||
| 85 | extern void audit_send_reply(int pid, int seq, int type, | 112 | extern void audit_send_reply(int pid, int seq, int type, |
| 86 | int done, int multi, | 113 | int done, int multi, |
| 87 | void *payload, int size); | 114 | void *payload, int size); |
| 88 | extern void audit_log_lost(const char *message); | 115 | extern void audit_log_lost(const char *message); |
| 89 | extern void audit_panic(const char *message); | 116 | extern void audit_panic(const char *message); |
| 90 | extern struct mutex audit_netlink_mutex; | ||
| 91 | 117 | ||
| 118 | struct audit_netlink_list { | ||
| 119 | int pid; | ||
| 120 | struct sk_buff_head q; | ||
| 121 | }; | ||
| 122 | |||
| 123 | int audit_send_list(void *); | ||
| 124 | |||
| 125 | struct inotify_watch; | ||
| 126 | extern void audit_free_parent(struct inotify_watch *); | ||
| 127 | extern void audit_handle_ievent(struct inotify_watch *, u32, u32, u32, | ||
| 128 | const char *, struct inode *); | ||
| 92 | extern int selinux_audit_rule_update(void); | 129 | extern int selinux_audit_rule_update(void); |
| 130 | |||
| 131 | #ifdef CONFIG_AUDITSYSCALL | ||
| 132 | extern void __audit_signal_info(int sig, struct task_struct *t); | ||
| 133 | static inline void audit_signal_info(int sig, struct task_struct *t) | ||
| 134 | { | ||
| 135 | if (unlikely(audit_pid && t->tgid == audit_pid)) | ||
| 136 | __audit_signal_info(sig, t); | ||
| 137 | } | ||
| 138 | extern enum audit_state audit_filter_inodes(struct task_struct *, | ||
| 139 | struct audit_context *); | ||
| 140 | extern void audit_set_auditable(struct audit_context *); | ||
| 141 | #else | ||
| 142 | #define audit_signal_info(s,t) | ||
| 143 | #define audit_filter_inodes(t,c) AUDIT_DISABLED | ||
| 144 | #define audit_set_auditable(c) | ||
| 145 | #endif | ||
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 7c134906d689..4c99d2c586ed 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c | |||
| @@ -22,13 +22,59 @@ | |||
| 22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
| 23 | #include <linux/audit.h> | 23 | #include <linux/audit.h> |
| 24 | #include <linux/kthread.h> | 24 | #include <linux/kthread.h> |
| 25 | #include <linux/mutex.h> | ||
| 26 | #include <linux/fs.h> | ||
| 27 | #include <linux/namei.h> | ||
| 25 | #include <linux/netlink.h> | 28 | #include <linux/netlink.h> |
| 29 | #include <linux/sched.h> | ||
| 30 | #include <linux/inotify.h> | ||
| 26 | #include <linux/selinux.h> | 31 | #include <linux/selinux.h> |
| 27 | #include "audit.h" | 32 | #include "audit.h" |
| 28 | 33 | ||
| 29 | /* There are three lists of rules -- one to search at task creation | 34 | /* |
| 30 | * time, one to search at syscall entry time, and another to search at | 35 | * Locking model: |
| 31 | * syscall exit time. */ | 36 | * |
| 37 | * audit_filter_mutex: | ||
| 38 | * Synchronizes writes and blocking reads of audit's filterlist | ||
| 39 | * data. Rcu is used to traverse the filterlist and access | ||
| 40 | * contents of structs audit_entry, audit_watch and opaque | ||
| 41 | * selinux rules during filtering. If modified, these structures | ||
| 42 | * must be copied and replace their counterparts in the filterlist. | ||
| 43 | * An audit_parent struct is not accessed during filtering, so may | ||
| 44 | * be written directly provided audit_filter_mutex is held. | ||
| 45 | */ | ||
| 46 | |||
| 47 | /* | ||
| 48 | * Reference counting: | ||
| 49 | * | ||
| 50 | * audit_parent: lifetime is from audit_init_parent() to receipt of an IN_IGNORED | ||
| 51 | * event. Each audit_watch holds a reference to its associated parent. | ||
| 52 | * | ||
| 53 | * audit_watch: if added to lists, lifetime is from audit_init_watch() to | ||
| 54 | * audit_remove_watch(). Additionally, an audit_watch may exist | ||
| 55 | * temporarily to assist in searching existing filter data. Each | ||
| 56 | * audit_krule holds a reference to its associated watch. | ||
| 57 | */ | ||
| 58 | |||
| 59 | struct audit_parent { | ||
| 60 | struct list_head ilist; /* entry in inotify registration list */ | ||
| 61 | struct list_head watches; /* associated watches */ | ||
| 62 | struct inotify_watch wdata; /* inotify watch data */ | ||
| 63 | unsigned flags; /* status flags */ | ||
| 64 | }; | ||
| 65 | |||
| 66 | /* | ||
| 67 | * audit_parent status flags: | ||
| 68 | * | ||
| 69 | * AUDIT_PARENT_INVALID - set anytime rules/watches are auto-removed due to | ||
| 70 | * a filesystem event to ensure we're adding audit watches to a valid parent. | ||
| 71 | * Technically not needed for IN_DELETE_SELF or IN_UNMOUNT events, as we cannot | ||
| 72 | * receive them while we have nameidata, but must be used for IN_MOVE_SELF which | ||
| 73 | * we can receive while holding nameidata. | ||
| 74 | */ | ||
| 75 | #define AUDIT_PARENT_INVALID 0x001 | ||
| 76 | |||
| 77 | /* Audit filter lists, defined in <linux/audit.h> */ | ||
| 32 | struct list_head audit_filter_list[AUDIT_NR_FILTERS] = { | 78 | struct list_head audit_filter_list[AUDIT_NR_FILTERS] = { |
| 33 | LIST_HEAD_INIT(audit_filter_list[0]), | 79 | LIST_HEAD_INIT(audit_filter_list[0]), |
| 34 | LIST_HEAD_INIT(audit_filter_list[1]), | 80 | LIST_HEAD_INIT(audit_filter_list[1]), |
| @@ -41,9 +87,53 @@ struct list_head audit_filter_list[AUDIT_NR_FILTERS] = { | |||
| 41 | #endif | 87 | #endif |
| 42 | }; | 88 | }; |
| 43 | 89 | ||
| 90 | static DEFINE_MUTEX(audit_filter_mutex); | ||
| 91 | |||
| 92 | /* Inotify handle */ | ||
| 93 | extern struct inotify_handle *audit_ih; | ||
| 94 | |||
| 95 | /* Inotify events we care about. */ | ||
| 96 | #define AUDIT_IN_WATCH IN_MOVE|IN_CREATE|IN_DELETE|IN_DELETE_SELF|IN_MOVE_SELF | ||
| 97 | |||
| 98 | void audit_free_parent(struct inotify_watch *i_watch) | ||
| 99 | { | ||
| 100 | struct audit_parent *parent; | ||
| 101 | |||
| 102 | parent = container_of(i_watch, struct audit_parent, wdata); | ||
| 103 | WARN_ON(!list_empty(&parent->watches)); | ||
| 104 | kfree(parent); | ||
| 105 | } | ||
| 106 | |||
| 107 | static inline void audit_get_watch(struct audit_watch *watch) | ||
| 108 | { | ||
| 109 | atomic_inc(&watch->count); | ||
| 110 | } | ||
| 111 | |||
| 112 | static void audit_put_watch(struct audit_watch *watch) | ||
| 113 | { | ||
| 114 | if (atomic_dec_and_test(&watch->count)) { | ||
| 115 | WARN_ON(watch->parent); | ||
| 116 | WARN_ON(!list_empty(&watch->rules)); | ||
| 117 | kfree(watch->path); | ||
| 118 | kfree(watch); | ||
| 119 | } | ||
| 120 | } | ||
| 121 | |||
| 122 | static void audit_remove_watch(struct audit_watch *watch) | ||
| 123 | { | ||
| 124 | list_del(&watch->wlist); | ||
| 125 | put_inotify_watch(&watch->parent->wdata); | ||
| 126 | watch->parent = NULL; | ||
| 127 | audit_put_watch(watch); /* match initial get */ | ||
| 128 | } | ||
| 129 | |||
| 44 | static inline void audit_free_rule(struct audit_entry *e) | 130 | static inline void audit_free_rule(struct audit_entry *e) |
| 45 | { | 131 | { |
| 46 | int i; | 132 | int i; |
| 133 | |||
| 134 | /* some rules don't have associated watches */ | ||
| 135 | if (e->rule.watch) | ||
| 136 | audit_put_watch(e->rule.watch); | ||
| 47 | if (e->rule.fields) | 137 | if (e->rule.fields) |
| 48 | for (i = 0; i < e->rule.field_count; i++) { | 138 | for (i = 0; i < e->rule.field_count; i++) { |
| 49 | struct audit_field *f = &e->rule.fields[i]; | 139 | struct audit_field *f = &e->rule.fields[i]; |
| @@ -60,6 +150,50 @@ static inline void audit_free_rule_rcu(struct rcu_head *head) | |||
| 60 | audit_free_rule(e); | 150 | audit_free_rule(e); |
| 61 | } | 151 | } |
| 62 | 152 | ||
| 153 | /* Initialize a parent watch entry. */ | ||
| 154 | static struct audit_parent *audit_init_parent(struct nameidata *ndp) | ||
| 155 | { | ||
| 156 | struct audit_parent *parent; | ||
| 157 | s32 wd; | ||
| 158 | |||
| 159 | parent = kzalloc(sizeof(*parent), GFP_KERNEL); | ||
| 160 | if (unlikely(!parent)) | ||
| 161 | return ERR_PTR(-ENOMEM); | ||
| 162 | |||
| 163 | INIT_LIST_HEAD(&parent->watches); | ||
| 164 | parent->flags = 0; | ||
| 165 | |||
| 166 | inotify_init_watch(&parent->wdata); | ||
| 167 | /* grab a ref so inotify watch hangs around until we take audit_filter_mutex */ | ||
| 168 | get_inotify_watch(&parent->wdata); | ||
| 169 | wd = inotify_add_watch(audit_ih, &parent->wdata, ndp->dentry->d_inode, | ||
| 170 | AUDIT_IN_WATCH); | ||
| 171 | if (wd < 0) { | ||
| 172 | audit_free_parent(&parent->wdata); | ||
| 173 | return ERR_PTR(wd); | ||
| 174 | } | ||
| 175 | |||
| 176 | return parent; | ||
| 177 | } | ||
| 178 | |||
| 179 | /* Initialize a watch entry. */ | ||
| 180 | static struct audit_watch *audit_init_watch(char *path) | ||
| 181 | { | ||
| 182 | struct audit_watch *watch; | ||
| 183 | |||
| 184 | watch = kzalloc(sizeof(*watch), GFP_KERNEL); | ||
| 185 | if (unlikely(!watch)) | ||
| 186 | return ERR_PTR(-ENOMEM); | ||
| 187 | |||
| 188 | INIT_LIST_HEAD(&watch->rules); | ||
| 189 | atomic_set(&watch->count, 1); | ||
| 190 | watch->path = path; | ||
| 191 | watch->dev = (dev_t)-1; | ||
| 192 | watch->ino = (unsigned long)-1; | ||
| 193 | |||
| 194 | return watch; | ||
| 195 | } | ||
| 196 | |||
| 63 | /* Initialize an audit filterlist entry. */ | 197 | /* Initialize an audit filterlist entry. */ |
| 64 | static inline struct audit_entry *audit_init_entry(u32 field_count) | 198 | static inline struct audit_entry *audit_init_entry(u32 field_count) |
| 65 | { | 199 | { |
| @@ -107,6 +241,43 @@ static char *audit_unpack_string(void **bufp, size_t *remain, size_t len) | |||
| 107 | return str; | 241 | return str; |
| 108 | } | 242 | } |
| 109 | 243 | ||
| 244 | /* Translate an inode field to kernel respresentation. */ | ||
| 245 | static inline int audit_to_inode(struct audit_krule *krule, | ||
| 246 | struct audit_field *f) | ||
| 247 | { | ||
| 248 | if (krule->listnr != AUDIT_FILTER_EXIT || | ||
| 249 | krule->watch || krule->inode_f) | ||
| 250 | return -EINVAL; | ||
| 251 | |||
| 252 | krule->inode_f = f; | ||
| 253 | return 0; | ||
| 254 | } | ||
| 255 | |||
| 256 | /* Translate a watch string to kernel respresentation. */ | ||
| 257 | static int audit_to_watch(struct audit_krule *krule, char *path, int len, | ||
| 258 | u32 op) | ||
| 259 | { | ||
| 260 | struct audit_watch *watch; | ||
| 261 | |||
| 262 | if (!audit_ih) | ||
| 263 | return -EOPNOTSUPP; | ||
| 264 | |||
| 265 | if (path[0] != '/' || path[len-1] == '/' || | ||
| 266 | krule->listnr != AUDIT_FILTER_EXIT || | ||
| 267 | op & ~AUDIT_EQUAL || | ||
| 268 | krule->inode_f || krule->watch) /* 1 inode # per rule, for hash */ | ||
| 269 | return -EINVAL; | ||
| 270 | |||
| 271 | watch = audit_init_watch(path); | ||
| 272 | if (unlikely(IS_ERR(watch))) | ||
| 273 | return PTR_ERR(watch); | ||
| 274 | |||
| 275 | audit_get_watch(watch); | ||
| 276 | krule->watch = watch; | ||
| 277 | |||
| 278 | return 0; | ||
| 279 | } | ||
| 280 | |||
| 110 | /* Common user-space to kernel rule translation. */ | 281 | /* Common user-space to kernel rule translation. */ |
| 111 | static inline struct audit_entry *audit_to_entry_common(struct audit_rule *rule) | 282 | static inline struct audit_entry *audit_to_entry_common(struct audit_rule *rule) |
| 112 | { | 283 | { |
| @@ -128,8 +299,11 @@ static inline struct audit_entry *audit_to_entry_common(struct audit_rule *rule) | |||
| 128 | #endif | 299 | #endif |
| 129 | ; | 300 | ; |
| 130 | } | 301 | } |
| 131 | if (rule->action != AUDIT_NEVER && rule->action != AUDIT_POSSIBLE && | 302 | if (unlikely(rule->action == AUDIT_POSSIBLE)) { |
| 132 | rule->action != AUDIT_ALWAYS) | 303 | printk(KERN_ERR "AUDIT_POSSIBLE is deprecated\n"); |
| 304 | goto exit_err; | ||
| 305 | } | ||
| 306 | if (rule->action != AUDIT_NEVER && rule->action != AUDIT_ALWAYS) | ||
| 133 | goto exit_err; | 307 | goto exit_err; |
| 134 | if (rule->field_count > AUDIT_MAX_FIELDS) | 308 | if (rule->field_count > AUDIT_MAX_FIELDS) |
| 135 | goto exit_err; | 309 | goto exit_err; |
| @@ -158,6 +332,7 @@ exit_err: | |||
| 158 | static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule) | 332 | static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule) |
| 159 | { | 333 | { |
| 160 | struct audit_entry *entry; | 334 | struct audit_entry *entry; |
| 335 | struct audit_field *f; | ||
| 161 | int err = 0; | 336 | int err = 0; |
| 162 | int i; | 337 | int i; |
| 163 | 338 | ||
| @@ -172,14 +347,37 @@ static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule) | |||
| 172 | f->type = rule->fields[i] & ~(AUDIT_NEGATE|AUDIT_OPERATORS); | 347 | f->type = rule->fields[i] & ~(AUDIT_NEGATE|AUDIT_OPERATORS); |
| 173 | f->val = rule->values[i]; | 348 | f->val = rule->values[i]; |
| 174 | 349 | ||
| 175 | if (f->type & AUDIT_UNUSED_BITS || | 350 | err = -EINVAL; |
| 176 | f->type == AUDIT_SE_USER || | 351 | switch(f->type) { |
| 177 | f->type == AUDIT_SE_ROLE || | 352 | default: |
| 178 | f->type == AUDIT_SE_TYPE || | ||
| 179 | f->type == AUDIT_SE_SEN || | ||
| 180 | f->type == AUDIT_SE_CLR) { | ||
| 181 | err = -EINVAL; | ||
| 182 | goto exit_free; | 353 | goto exit_free; |
| 354 | case AUDIT_PID: | ||
| 355 | case AUDIT_UID: | ||
| 356 | case AUDIT_EUID: | ||
| 357 | case AUDIT_SUID: | ||
| 358 | case AUDIT_FSUID: | ||
| 359 | case AUDIT_GID: | ||
| 360 | case AUDIT_EGID: | ||
| 361 | case AUDIT_SGID: | ||
| 362 | case AUDIT_FSGID: | ||
| 363 | case AUDIT_LOGINUID: | ||
| 364 | case AUDIT_PERS: | ||
| 365 | case AUDIT_ARCH: | ||
| 366 | case AUDIT_MSGTYPE: | ||
| 367 | case AUDIT_DEVMAJOR: | ||
| 368 | case AUDIT_DEVMINOR: | ||
| 369 | case AUDIT_EXIT: | ||
| 370 | case AUDIT_SUCCESS: | ||
| 371 | case AUDIT_ARG0: | ||
| 372 | case AUDIT_ARG1: | ||
| 373 | case AUDIT_ARG2: | ||
| 374 | case AUDIT_ARG3: | ||
| 375 | break; | ||
| 376 | case AUDIT_INODE: | ||
| 377 | err = audit_to_inode(&entry->rule, f); | ||
| 378 | if (err) | ||
| 379 | goto exit_free; | ||
| 380 | break; | ||
| 183 | } | 381 | } |
| 184 | 382 | ||
| 185 | entry->rule.vers_ops = (f->op & AUDIT_OPERATORS) ? 2 : 1; | 383 | entry->rule.vers_ops = (f->op & AUDIT_OPERATORS) ? 2 : 1; |
| @@ -196,6 +394,18 @@ static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule) | |||
| 196 | } | 394 | } |
| 197 | } | 395 | } |
| 198 | 396 | ||
| 397 | f = entry->rule.inode_f; | ||
| 398 | if (f) { | ||
| 399 | switch(f->op) { | ||
| 400 | case AUDIT_NOT_EQUAL: | ||
| 401 | entry->rule.inode_f = NULL; | ||
| 402 | case AUDIT_EQUAL: | ||
| 403 | break; | ||
| 404 | default: | ||
| 405 | goto exit_free; | ||
| 406 | } | ||
| 407 | } | ||
| 408 | |||
| 199 | exit_nofree: | 409 | exit_nofree: |
| 200 | return entry; | 410 | return entry; |
| 201 | 411 | ||
| @@ -210,6 +420,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, | |||
| 210 | { | 420 | { |
| 211 | int err = 0; | 421 | int err = 0; |
| 212 | struct audit_entry *entry; | 422 | struct audit_entry *entry; |
| 423 | struct audit_field *f; | ||
| 213 | void *bufp; | 424 | void *bufp; |
| 214 | size_t remain = datasz - sizeof(struct audit_rule_data); | 425 | size_t remain = datasz - sizeof(struct audit_rule_data); |
| 215 | int i; | 426 | int i; |
| @@ -235,6 +446,29 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, | |||
| 235 | f->se_str = NULL; | 446 | f->se_str = NULL; |
| 236 | f->se_rule = NULL; | 447 | f->se_rule = NULL; |
| 237 | switch(f->type) { | 448 | switch(f->type) { |
| 449 | case AUDIT_PID: | ||
| 450 | case AUDIT_UID: | ||
| 451 | case AUDIT_EUID: | ||
| 452 | case AUDIT_SUID: | ||
| 453 | case AUDIT_FSUID: | ||
| 454 | case AUDIT_GID: | ||
| 455 | case AUDIT_EGID: | ||
| 456 | case AUDIT_SGID: | ||
| 457 | case AUDIT_FSGID: | ||
| 458 | case AUDIT_LOGINUID: | ||
| 459 | case AUDIT_PERS: | ||
| 460 | case AUDIT_ARCH: | ||
| 461 | case AUDIT_MSGTYPE: | ||
| 462 | case AUDIT_PPID: | ||
| 463 | case AUDIT_DEVMAJOR: | ||
| 464 | case AUDIT_DEVMINOR: | ||
| 465 | case AUDIT_EXIT: | ||
| 466 | case AUDIT_SUCCESS: | ||
| 467 | case AUDIT_ARG0: | ||
| 468 | case AUDIT_ARG1: | ||
| 469 | case AUDIT_ARG2: | ||
| 470 | case AUDIT_ARG3: | ||
| 471 | break; | ||
| 238 | case AUDIT_SE_USER: | 472 | case AUDIT_SE_USER: |
| 239 | case AUDIT_SE_ROLE: | 473 | case AUDIT_SE_ROLE: |
| 240 | case AUDIT_SE_TYPE: | 474 | case AUDIT_SE_TYPE: |
| @@ -260,6 +494,37 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data, | |||
| 260 | } else | 494 | } else |
| 261 | f->se_str = str; | 495 | f->se_str = str; |
| 262 | break; | 496 | break; |
| 497 | case AUDIT_WATCH: | ||
| 498 | str = audit_unpack_string(&bufp, &remain, f->val); | ||
| 499 | if (IS_ERR(str)) | ||
| 500 | goto exit_free; | ||
| 501 | entry->rule.buflen += f->val; | ||
| 502 | |||
| 503 | err = audit_to_watch(&entry->rule, str, f->val, f->op); | ||
| 504 | if (err) { | ||
| 505 | kfree(str); | ||
| 506 | goto exit_free; | ||
| 507 | } | ||
| 508 | break; | ||
| 509 | case AUDIT_INODE: | ||
| 510 | err = audit_to_inode(&entry->rule, f); | ||
| 511 | if (err) | ||
| 512 | goto exit_free; | ||
| 513 | break; | ||
| 514 | default: | ||
| 515 | goto exit_free; | ||
| 516 | } | ||
| 517 | } | ||
| 518 | |||
| 519 | f = entry->rule.inode_f; | ||
| 520 | if (f) { | ||
| 521 | switch(f->op) { | ||
| 522 | case AUDIT_NOT_EQUAL: | ||
| 523 | entry->rule.inode_f = NULL; | ||
| 524 | case AUDIT_EQUAL: | ||
| 525 | break; | ||
| 526 | default: | ||
| 527 | goto exit_free; | ||
| 263 | } | 528 | } |
| 264 | } | 529 | } |
| 265 | 530 | ||
| @@ -291,7 +556,7 @@ static struct audit_rule *audit_krule_to_rule(struct audit_krule *krule) | |||
| 291 | 556 | ||
| 292 | rule = kmalloc(sizeof(*rule), GFP_KERNEL); | 557 | rule = kmalloc(sizeof(*rule), GFP_KERNEL); |
| 293 | if (unlikely(!rule)) | 558 | if (unlikely(!rule)) |
| 294 | return ERR_PTR(-ENOMEM); | 559 | return NULL; |
| 295 | memset(rule, 0, sizeof(*rule)); | 560 | memset(rule, 0, sizeof(*rule)); |
| 296 | 561 | ||
| 297 | rule->flags = krule->flags | krule->listnr; | 562 | rule->flags = krule->flags | krule->listnr; |
| @@ -322,7 +587,7 @@ static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule) | |||
| 322 | 587 | ||
| 323 | data = kmalloc(sizeof(*data) + krule->buflen, GFP_KERNEL); | 588 | data = kmalloc(sizeof(*data) + krule->buflen, GFP_KERNEL); |
| 324 | if (unlikely(!data)) | 589 | if (unlikely(!data)) |
| 325 | return ERR_PTR(-ENOMEM); | 590 | return NULL; |
| 326 | memset(data, 0, sizeof(*data)); | 591 | memset(data, 0, sizeof(*data)); |
| 327 | 592 | ||
| 328 | data->flags = krule->flags | krule->listnr; | 593 | data->flags = krule->flags | krule->listnr; |
| @@ -343,6 +608,10 @@ static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule) | |||
| 343 | data->buflen += data->values[i] = | 608 | data->buflen += data->values[i] = |
| 344 | audit_pack_string(&bufp, f->se_str); | 609 | audit_pack_string(&bufp, f->se_str); |
| 345 | break; | 610 | break; |
| 611 | case AUDIT_WATCH: | ||
| 612 | data->buflen += data->values[i] = | ||
| 613 | audit_pack_string(&bufp, krule->watch->path); | ||
| 614 | break; | ||
| 346 | default: | 615 | default: |
| 347 | data->values[i] = f->val; | 616 | data->values[i] = f->val; |
| 348 | } | 617 | } |
| @@ -378,6 +647,10 @@ static int audit_compare_rule(struct audit_krule *a, struct audit_krule *b) | |||
| 378 | if (strcmp(a->fields[i].se_str, b->fields[i].se_str)) | 647 | if (strcmp(a->fields[i].se_str, b->fields[i].se_str)) |
| 379 | return 1; | 648 | return 1; |
| 380 | break; | 649 | break; |
| 650 | case AUDIT_WATCH: | ||
| 651 | if (strcmp(a->watch->path, b->watch->path)) | ||
| 652 | return 1; | ||
| 653 | break; | ||
| 381 | default: | 654 | default: |
| 382 | if (a->fields[i].val != b->fields[i].val) | 655 | if (a->fields[i].val != b->fields[i].val) |
| 383 | return 1; | 656 | return 1; |
| @@ -391,6 +664,32 @@ static int audit_compare_rule(struct audit_krule *a, struct audit_krule *b) | |||
| 391 | return 0; | 664 | return 0; |
| 392 | } | 665 | } |
| 393 | 666 | ||
| 667 | /* Duplicate the given audit watch. The new watch's rules list is initialized | ||
| 668 | * to an empty list and wlist is undefined. */ | ||
| 669 | static struct audit_watch *audit_dupe_watch(struct audit_watch *old) | ||
| 670 | { | ||
| 671 | char *path; | ||
| 672 | struct audit_watch *new; | ||
| 673 | |||
| 674 | path = kstrdup(old->path, GFP_KERNEL); | ||
| 675 | if (unlikely(!path)) | ||
| 676 | return ERR_PTR(-ENOMEM); | ||
| 677 | |||
| 678 | new = audit_init_watch(path); | ||
| 679 | if (unlikely(IS_ERR(new))) { | ||
| 680 | kfree(path); | ||
| 681 | goto out; | ||
| 682 | } | ||
| 683 | |||
| 684 | new->dev = old->dev; | ||
| 685 | new->ino = old->ino; | ||
| 686 | get_inotify_watch(&old->parent->wdata); | ||
| 687 | new->parent = old->parent; | ||
| 688 | |||
| 689 | out: | ||
| 690 | return new; | ||
| 691 | } | ||
| 692 | |||
| 394 | /* Duplicate selinux field information. The se_rule is opaque, so must be | 693 | /* Duplicate selinux field information. The se_rule is opaque, so must be |
| 395 | * re-initialized. */ | 694 | * re-initialized. */ |
| 396 | static inline int audit_dupe_selinux_field(struct audit_field *df, | 695 | static inline int audit_dupe_selinux_field(struct audit_field *df, |
| @@ -422,8 +721,11 @@ static inline int audit_dupe_selinux_field(struct audit_field *df, | |||
| 422 | /* Duplicate an audit rule. This will be a deep copy with the exception | 721 | /* Duplicate an audit rule. This will be a deep copy with the exception |
| 423 | * of the watch - that pointer is carried over. The selinux specific fields | 722 | * of the watch - that pointer is carried over. The selinux specific fields |
| 424 | * will be updated in the copy. The point is to be able to replace the old | 723 | * will be updated in the copy. The point is to be able to replace the old |
| 425 | * rule with the new rule in the filterlist, then free the old rule. */ | 724 | * rule with the new rule in the filterlist, then free the old rule. |
| 426 | static struct audit_entry *audit_dupe_rule(struct audit_krule *old) | 725 | * The rlist element is undefined; list manipulations are handled apart from |
| 726 | * the initial copy. */ | ||
| 727 | static struct audit_entry *audit_dupe_rule(struct audit_krule *old, | ||
| 728 | struct audit_watch *watch) | ||
| 427 | { | 729 | { |
| 428 | u32 fcount = old->field_count; | 730 | u32 fcount = old->field_count; |
| 429 | struct audit_entry *entry; | 731 | struct audit_entry *entry; |
| @@ -442,6 +744,8 @@ static struct audit_entry *audit_dupe_rule(struct audit_krule *old) | |||
| 442 | for (i = 0; i < AUDIT_BITMASK_SIZE; i++) | 744 | for (i = 0; i < AUDIT_BITMASK_SIZE; i++) |
| 443 | new->mask[i] = old->mask[i]; | 745 | new->mask[i] = old->mask[i]; |
| 444 | new->buflen = old->buflen; | 746 | new->buflen = old->buflen; |
| 747 | new->inode_f = old->inode_f; | ||
| 748 | new->watch = NULL; | ||
| 445 | new->field_count = old->field_count; | 749 | new->field_count = old->field_count; |
| 446 | memcpy(new->fields, old->fields, sizeof(struct audit_field) * fcount); | 750 | memcpy(new->fields, old->fields, sizeof(struct audit_field) * fcount); |
| 447 | 751 | ||
| @@ -463,68 +767,409 @@ static struct audit_entry *audit_dupe_rule(struct audit_krule *old) | |||
| 463 | } | 767 | } |
| 464 | } | 768 | } |
| 465 | 769 | ||
| 770 | if (watch) { | ||
| 771 | audit_get_watch(watch); | ||
| 772 | new->watch = watch; | ||
| 773 | } | ||
| 774 | |||
| 466 | return entry; | 775 | return entry; |
| 467 | } | 776 | } |
| 468 | 777 | ||
| 469 | /* Add rule to given filterlist if not a duplicate. Protected by | 778 | /* Update inode info in audit rules based on filesystem event. */ |
| 470 | * audit_netlink_mutex. */ | 779 | static void audit_update_watch(struct audit_parent *parent, |
| 780 | const char *dname, dev_t dev, | ||
| 781 | unsigned long ino, unsigned invalidating) | ||
| 782 | { | ||
| 783 | struct audit_watch *owatch, *nwatch, *nextw; | ||
| 784 | struct audit_krule *r, *nextr; | ||
| 785 | struct audit_entry *oentry, *nentry; | ||
| 786 | struct audit_buffer *ab; | ||
| 787 | |||
| 788 | mutex_lock(&audit_filter_mutex); | ||
| 789 | list_for_each_entry_safe(owatch, nextw, &parent->watches, wlist) { | ||
| 790 | if (audit_compare_dname_path(dname, owatch->path, NULL)) | ||
| 791 | continue; | ||
| 792 | |||
| 793 | /* If the update involves invalidating rules, do the inode-based | ||
| 794 | * filtering now, so we don't omit records. */ | ||
| 795 | if (invalidating && | ||
| 796 | audit_filter_inodes(current, current->audit_context) == AUDIT_RECORD_CONTEXT) | ||
| 797 | audit_set_auditable(current->audit_context); | ||
| 798 | |||
| 799 | nwatch = audit_dupe_watch(owatch); | ||
| 800 | if (unlikely(IS_ERR(nwatch))) { | ||
| 801 | mutex_unlock(&audit_filter_mutex); | ||
| 802 | audit_panic("error updating watch, skipping"); | ||
| 803 | return; | ||
| 804 | } | ||
| 805 | nwatch->dev = dev; | ||
| 806 | nwatch->ino = ino; | ||
| 807 | |||
| 808 | list_for_each_entry_safe(r, nextr, &owatch->rules, rlist) { | ||
| 809 | |||
| 810 | oentry = container_of(r, struct audit_entry, rule); | ||
| 811 | list_del(&oentry->rule.rlist); | ||
| 812 | list_del_rcu(&oentry->list); | ||
| 813 | |||
| 814 | nentry = audit_dupe_rule(&oentry->rule, nwatch); | ||
| 815 | if (unlikely(IS_ERR(nentry))) | ||
| 816 | audit_panic("error updating watch, removing"); | ||
| 817 | else { | ||
| 818 | int h = audit_hash_ino((u32)ino); | ||
| 819 | list_add(&nentry->rule.rlist, &nwatch->rules); | ||
| 820 | list_add_rcu(&nentry->list, &audit_inode_hash[h]); | ||
| 821 | } | ||
| 822 | |||
| 823 | call_rcu(&oentry->rcu, audit_free_rule_rcu); | ||
| 824 | } | ||
| 825 | |||
| 826 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); | ||
| 827 | audit_log_format(ab, "audit updated rules specifying watch="); | ||
| 828 | audit_log_untrustedstring(ab, owatch->path); | ||
| 829 | audit_log_format(ab, " with dev=%u ino=%lu\n", dev, ino); | ||
| 830 | audit_log_end(ab); | ||
| 831 | |||
| 832 | audit_remove_watch(owatch); | ||
| 833 | goto add_watch_to_parent; /* event applies to a single watch */ | ||
| 834 | } | ||
| 835 | mutex_unlock(&audit_filter_mutex); | ||
| 836 | return; | ||
| 837 | |||
| 838 | add_watch_to_parent: | ||
| 839 | list_add(&nwatch->wlist, &parent->watches); | ||
| 840 | mutex_unlock(&audit_filter_mutex); | ||
| 841 | return; | ||
| 842 | } | ||
| 843 | |||
| 844 | /* Remove all watches & rules associated with a parent that is going away. */ | ||
| 845 | static void audit_remove_parent_watches(struct audit_parent *parent) | ||
| 846 | { | ||
| 847 | struct audit_watch *w, *nextw; | ||
| 848 | struct audit_krule *r, *nextr; | ||
| 849 | struct audit_entry *e; | ||
| 850 | |||
| 851 | mutex_lock(&audit_filter_mutex); | ||
| 852 | parent->flags |= AUDIT_PARENT_INVALID; | ||
| 853 | list_for_each_entry_safe(w, nextw, &parent->watches, wlist) { | ||
| 854 | list_for_each_entry_safe(r, nextr, &w->rules, rlist) { | ||
| 855 | e = container_of(r, struct audit_entry, rule); | ||
| 856 | list_del(&r->rlist); | ||
| 857 | list_del_rcu(&e->list); | ||
| 858 | call_rcu(&e->rcu, audit_free_rule_rcu); | ||
| 859 | |||
| 860 | audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE, | ||
| 861 | "audit implicitly removed rule from list=%d\n", | ||
| 862 | AUDIT_FILTER_EXIT); | ||
| 863 | } | ||
| 864 | audit_remove_watch(w); | ||
| 865 | } | ||
| 866 | mutex_unlock(&audit_filter_mutex); | ||
| 867 | } | ||
| 868 | |||
| 869 | /* Unregister inotify watches for parents on in_list. | ||
| 870 | * Generates an IN_IGNORED event. */ | ||
| 871 | static void audit_inotify_unregister(struct list_head *in_list) | ||
| 872 | { | ||
| 873 | struct audit_parent *p, *n; | ||
| 874 | |||
| 875 | list_for_each_entry_safe(p, n, in_list, ilist) { | ||
| 876 | list_del(&p->ilist); | ||
| 877 | inotify_rm_watch(audit_ih, &p->wdata); | ||
| 878 | /* the put matching the get in audit_do_del_rule() */ | ||
| 879 | put_inotify_watch(&p->wdata); | ||
| 880 | } | ||
| 881 | } | ||
| 882 | |||
| 883 | /* Find an existing audit rule. | ||
| 884 | * Caller must hold audit_filter_mutex to prevent stale rule data. */ | ||
| 885 | static struct audit_entry *audit_find_rule(struct audit_entry *entry, | ||
| 886 | struct list_head *list) | ||
| 887 | { | ||
| 888 | struct audit_entry *e, *found = NULL; | ||
| 889 | int h; | ||
| 890 | |||
| 891 | if (entry->rule.watch) { | ||
| 892 | /* we don't know the inode number, so must walk entire hash */ | ||
| 893 | for (h = 0; h < AUDIT_INODE_BUCKETS; h++) { | ||
| 894 | list = &audit_inode_hash[h]; | ||
| 895 | list_for_each_entry(e, list, list) | ||
| 896 | if (!audit_compare_rule(&entry->rule, &e->rule)) { | ||
| 897 | found = e; | ||
| 898 | goto out; | ||
| 899 | } | ||
| 900 | } | ||
| 901 | goto out; | ||
| 902 | } | ||
| 903 | |||
| 904 | list_for_each_entry(e, list, list) | ||
| 905 | if (!audit_compare_rule(&entry->rule, &e->rule)) { | ||
| 906 | found = e; | ||
| 907 | goto out; | ||
| 908 | } | ||
| 909 | |||
| 910 | out: | ||
| 911 | return found; | ||
| 912 | } | ||
| 913 | |||
| 914 | /* Get path information necessary for adding watches. */ | ||
| 915 | static int audit_get_nd(char *path, struct nameidata **ndp, | ||
| 916 | struct nameidata **ndw) | ||
| 917 | { | ||
| 918 | struct nameidata *ndparent, *ndwatch; | ||
| 919 | int err; | ||
| 920 | |||
| 921 | ndparent = kmalloc(sizeof(*ndparent), GFP_KERNEL); | ||
| 922 | if (unlikely(!ndparent)) | ||
| 923 | return -ENOMEM; | ||
| 924 | |||
| 925 | ndwatch = kmalloc(sizeof(*ndwatch), GFP_KERNEL); | ||
| 926 | if (unlikely(!ndwatch)) { | ||
| 927 | kfree(ndparent); | ||
| 928 | return -ENOMEM; | ||
| 929 | } | ||
| 930 | |||
| 931 | err = path_lookup(path, LOOKUP_PARENT, ndparent); | ||
| 932 | if (err) { | ||
| 933 | kfree(ndparent); | ||
| 934 | kfree(ndwatch); | ||
| 935 | return err; | ||
| 936 | } | ||
| 937 | |||
| 938 | err = path_lookup(path, 0, ndwatch); | ||
| 939 | if (err) { | ||
| 940 | kfree(ndwatch); | ||
| 941 | ndwatch = NULL; | ||
| 942 | } | ||
| 943 | |||
| 944 | *ndp = ndparent; | ||
| 945 | *ndw = ndwatch; | ||
| 946 | |||
| 947 | return 0; | ||
| 948 | } | ||
| 949 | |||
| 950 | /* Release resources used for watch path information. */ | ||
| 951 | static void audit_put_nd(struct nameidata *ndp, struct nameidata *ndw) | ||
| 952 | { | ||
| 953 | if (ndp) { | ||
| 954 | path_release(ndp); | ||
| 955 | kfree(ndp); | ||
| 956 | } | ||
| 957 | if (ndw) { | ||
| 958 | path_release(ndw); | ||
| 959 | kfree(ndw); | ||
| 960 | } | ||
| 961 | } | ||
| 962 | |||
| 963 | /* Associate the given rule with an existing parent inotify_watch. | ||
| 964 | * Caller must hold audit_filter_mutex. */ | ||
| 965 | static void audit_add_to_parent(struct audit_krule *krule, | ||
| 966 | struct audit_parent *parent) | ||
| 967 | { | ||
| 968 | struct audit_watch *w, *watch = krule->watch; | ||
| 969 | int watch_found = 0; | ||
| 970 | |||
| 971 | list_for_each_entry(w, &parent->watches, wlist) { | ||
| 972 | if (strcmp(watch->path, w->path)) | ||
| 973 | continue; | ||
| 974 | |||
| 975 | watch_found = 1; | ||
| 976 | |||
| 977 | /* put krule's and initial refs to temporary watch */ | ||
| 978 | audit_put_watch(watch); | ||
| 979 | audit_put_watch(watch); | ||
| 980 | |||
| 981 | audit_get_watch(w); | ||
| 982 | krule->watch = watch = w; | ||
| 983 | break; | ||
| 984 | } | ||
| 985 | |||
| 986 | if (!watch_found) { | ||
| 987 | get_inotify_watch(&parent->wdata); | ||
| 988 | watch->parent = parent; | ||
| 989 | |||
| 990 | list_add(&watch->wlist, &parent->watches); | ||
| 991 | } | ||
| 992 | list_add(&krule->rlist, &watch->rules); | ||
| 993 | } | ||
| 994 | |||
| 995 | /* Find a matching watch entry, or add this one. | ||
| 996 | * Caller must hold audit_filter_mutex. */ | ||
| 997 | static int audit_add_watch(struct audit_krule *krule, struct nameidata *ndp, | ||
| 998 | struct nameidata *ndw) | ||
| 999 | { | ||
| 1000 | struct audit_watch *watch = krule->watch; | ||
| 1001 | struct inotify_watch *i_watch; | ||
| 1002 | struct audit_parent *parent; | ||
| 1003 | int ret = 0; | ||
| 1004 | |||
| 1005 | /* update watch filter fields */ | ||
| 1006 | if (ndw) { | ||
| 1007 | watch->dev = ndw->dentry->d_inode->i_sb->s_dev; | ||
| 1008 | watch->ino = ndw->dentry->d_inode->i_ino; | ||
| 1009 | } | ||
| 1010 | |||
| 1011 | /* The audit_filter_mutex must not be held during inotify calls because | ||
| 1012 | * we hold it during inotify event callback processing. If an existing | ||
| 1013 | * inotify watch is found, inotify_find_watch() grabs a reference before | ||
| 1014 | * returning. | ||
| 1015 | */ | ||
| 1016 | mutex_unlock(&audit_filter_mutex); | ||
| 1017 | |||
| 1018 | if (inotify_find_watch(audit_ih, ndp->dentry->d_inode, &i_watch) < 0) { | ||
| 1019 | parent = audit_init_parent(ndp); | ||
| 1020 | if (IS_ERR(parent)) { | ||
| 1021 | /* caller expects mutex locked */ | ||
| 1022 | mutex_lock(&audit_filter_mutex); | ||
| 1023 | return PTR_ERR(parent); | ||
| 1024 | } | ||
| 1025 | } else | ||
| 1026 | parent = container_of(i_watch, struct audit_parent, wdata); | ||
| 1027 | |||
| 1028 | mutex_lock(&audit_filter_mutex); | ||
| 1029 | |||
| 1030 | /* parent was moved before we took audit_filter_mutex */ | ||
| 1031 | if (parent->flags & AUDIT_PARENT_INVALID) | ||
| 1032 | ret = -ENOENT; | ||
| 1033 | else | ||
| 1034 | audit_add_to_parent(krule, parent); | ||
| 1035 | |||
| 1036 | /* match get in audit_init_parent or inotify_find_watch */ | ||
| 1037 | put_inotify_watch(&parent->wdata); | ||
| 1038 | return ret; | ||
| 1039 | } | ||
| 1040 | |||
| 1041 | /* Add rule to given filterlist if not a duplicate. */ | ||
| 471 | static inline int audit_add_rule(struct audit_entry *entry, | 1042 | static inline int audit_add_rule(struct audit_entry *entry, |
| 472 | struct list_head *list) | 1043 | struct list_head *list) |
| 473 | { | 1044 | { |
| 474 | struct audit_entry *e; | 1045 | struct audit_entry *e; |
| 1046 | struct audit_field *inode_f = entry->rule.inode_f; | ||
| 1047 | struct audit_watch *watch = entry->rule.watch; | ||
| 1048 | struct nameidata *ndp, *ndw; | ||
| 1049 | int h, err, putnd_needed = 0; | ||
| 1050 | |||
| 1051 | if (inode_f) { | ||
| 1052 | h = audit_hash_ino(inode_f->val); | ||
| 1053 | list = &audit_inode_hash[h]; | ||
| 1054 | } | ||
| 475 | 1055 | ||
| 476 | /* Do not use the _rcu iterator here, since this is the only | 1056 | mutex_lock(&audit_filter_mutex); |
| 477 | * addition routine. */ | 1057 | e = audit_find_rule(entry, list); |
| 478 | list_for_each_entry(e, list, list) { | 1058 | mutex_unlock(&audit_filter_mutex); |
| 479 | if (!audit_compare_rule(&entry->rule, &e->rule)) | 1059 | if (e) { |
| 480 | return -EEXIST; | 1060 | err = -EEXIST; |
| 1061 | goto error; | ||
| 1062 | } | ||
| 1063 | |||
| 1064 | /* Avoid calling path_lookup under audit_filter_mutex. */ | ||
| 1065 | if (watch) { | ||
| 1066 | err = audit_get_nd(watch->path, &ndp, &ndw); | ||
| 1067 | if (err) | ||
| 1068 | goto error; | ||
| 1069 | putnd_needed = 1; | ||
| 1070 | } | ||
| 1071 | |||
| 1072 | mutex_lock(&audit_filter_mutex); | ||
| 1073 | if (watch) { | ||
| 1074 | /* audit_filter_mutex is dropped and re-taken during this call */ | ||
| 1075 | err = audit_add_watch(&entry->rule, ndp, ndw); | ||
| 1076 | if (err) { | ||
| 1077 | mutex_unlock(&audit_filter_mutex); | ||
| 1078 | goto error; | ||
| 1079 | } | ||
| 1080 | h = audit_hash_ino((u32)watch->ino); | ||
| 1081 | list = &audit_inode_hash[h]; | ||
| 481 | } | 1082 | } |
| 482 | 1083 | ||
| 483 | if (entry->rule.flags & AUDIT_FILTER_PREPEND) { | 1084 | if (entry->rule.flags & AUDIT_FILTER_PREPEND) { |
| 484 | list_add_rcu(&entry->list, list); | 1085 | list_add_rcu(&entry->list, list); |
| 1086 | entry->rule.flags &= ~AUDIT_FILTER_PREPEND; | ||
| 485 | } else { | 1087 | } else { |
| 486 | list_add_tail_rcu(&entry->list, list); | 1088 | list_add_tail_rcu(&entry->list, list); |
| 487 | } | 1089 | } |
| 1090 | mutex_unlock(&audit_filter_mutex); | ||
| 488 | 1091 | ||
| 489 | return 0; | 1092 | if (putnd_needed) |
| 1093 | audit_put_nd(ndp, ndw); | ||
| 1094 | |||
| 1095 | return 0; | ||
| 1096 | |||
| 1097 | error: | ||
| 1098 | if (putnd_needed) | ||
| 1099 | audit_put_nd(ndp, ndw); | ||
| 1100 | if (watch) | ||
| 1101 | audit_put_watch(watch); /* tmp watch, matches initial get */ | ||
| 1102 | return err; | ||
| 490 | } | 1103 | } |
| 491 | 1104 | ||
| 492 | /* Remove an existing rule from filterlist. Protected by | 1105 | /* Remove an existing rule from filterlist. */ |
| 493 | * audit_netlink_mutex. */ | ||
| 494 | static inline int audit_del_rule(struct audit_entry *entry, | 1106 | static inline int audit_del_rule(struct audit_entry *entry, |
| 495 | struct list_head *list) | 1107 | struct list_head *list) |
| 496 | { | 1108 | { |
| 497 | struct audit_entry *e; | 1109 | struct audit_entry *e; |
| 1110 | struct audit_field *inode_f = entry->rule.inode_f; | ||
| 1111 | struct audit_watch *watch, *tmp_watch = entry->rule.watch; | ||
| 1112 | LIST_HEAD(inotify_list); | ||
| 1113 | int h, ret = 0; | ||
| 1114 | |||
| 1115 | if (inode_f) { | ||
| 1116 | h = audit_hash_ino(inode_f->val); | ||
| 1117 | list = &audit_inode_hash[h]; | ||
| 1118 | } | ||
| 498 | 1119 | ||
| 499 | /* Do not use the _rcu iterator here, since this is the only | 1120 | mutex_lock(&audit_filter_mutex); |
| 500 | * deletion routine. */ | 1121 | e = audit_find_rule(entry, list); |
| 501 | list_for_each_entry(e, list, list) { | 1122 | if (!e) { |
| 502 | if (!audit_compare_rule(&entry->rule, &e->rule)) { | 1123 | mutex_unlock(&audit_filter_mutex); |
| 503 | list_del_rcu(&e->list); | 1124 | ret = -ENOENT; |
| 504 | call_rcu(&e->rcu, audit_free_rule_rcu); | 1125 | goto out; |
| 505 | return 0; | 1126 | } |
| 1127 | |||
| 1128 | watch = e->rule.watch; | ||
| 1129 | if (watch) { | ||
| 1130 | struct audit_parent *parent = watch->parent; | ||
| 1131 | |||
| 1132 | list_del(&e->rule.rlist); | ||
| 1133 | |||
| 1134 | if (list_empty(&watch->rules)) { | ||
| 1135 | audit_remove_watch(watch); | ||
| 1136 | |||
| 1137 | if (list_empty(&parent->watches)) { | ||
| 1138 | /* Put parent on the inotify un-registration | ||
| 1139 | * list. Grab a reference before releasing | ||
| 1140 | * audit_filter_mutex, to be released in | ||
| 1141 | * audit_inotify_unregister(). */ | ||
| 1142 | list_add(&parent->ilist, &inotify_list); | ||
| 1143 | get_inotify_watch(&parent->wdata); | ||
| 1144 | } | ||
| 506 | } | 1145 | } |
| 507 | } | 1146 | } |
| 508 | return -ENOENT; /* No matching rule */ | 1147 | |
| 1148 | list_del_rcu(&e->list); | ||
| 1149 | call_rcu(&e->rcu, audit_free_rule_rcu); | ||
| 1150 | |||
| 1151 | mutex_unlock(&audit_filter_mutex); | ||
| 1152 | |||
| 1153 | if (!list_empty(&inotify_list)) | ||
| 1154 | audit_inotify_unregister(&inotify_list); | ||
| 1155 | |||
| 1156 | out: | ||
| 1157 | if (tmp_watch) | ||
| 1158 | audit_put_watch(tmp_watch); /* match initial get */ | ||
| 1159 | |||
| 1160 | return ret; | ||
| 509 | } | 1161 | } |
| 510 | 1162 | ||
| 511 | /* List rules using struct audit_rule. Exists for backward | 1163 | /* List rules using struct audit_rule. Exists for backward |
| 512 | * compatibility with userspace. */ | 1164 | * compatibility with userspace. */ |
| 513 | static int audit_list(void *_dest) | 1165 | static void audit_list(int pid, int seq, struct sk_buff_head *q) |
| 514 | { | 1166 | { |
| 515 | int pid, seq; | 1167 | struct sk_buff *skb; |
| 516 | int *dest = _dest; | ||
| 517 | struct audit_entry *entry; | 1168 | struct audit_entry *entry; |
| 518 | int i; | 1169 | int i; |
| 519 | 1170 | ||
| 520 | pid = dest[0]; | 1171 | /* This is a blocking read, so use audit_filter_mutex instead of rcu |
| 521 | seq = dest[1]; | 1172 | * iterator to sync with list writers. */ |
| 522 | kfree(dest); | ||
| 523 | |||
| 524 | mutex_lock(&audit_netlink_mutex); | ||
| 525 | |||
| 526 | /* The *_rcu iterators not needed here because we are | ||
| 527 | always called with audit_netlink_mutex held. */ | ||
| 528 | for (i=0; i<AUDIT_NR_FILTERS; i++) { | 1173 | for (i=0; i<AUDIT_NR_FILTERS; i++) { |
| 529 | list_for_each_entry(entry, &audit_filter_list[i], list) { | 1174 | list_for_each_entry(entry, &audit_filter_list[i], list) { |
| 530 | struct audit_rule *rule; | 1175 | struct audit_rule *rule; |
| @@ -532,33 +1177,41 @@ static int audit_list(void *_dest) | |||
| 532 | rule = audit_krule_to_rule(&entry->rule); | 1177 | rule = audit_krule_to_rule(&entry->rule); |
| 533 | if (unlikely(!rule)) | 1178 | if (unlikely(!rule)) |
| 534 | break; | 1179 | break; |
| 535 | audit_send_reply(pid, seq, AUDIT_LIST, 0, 1, | 1180 | skb = audit_make_reply(pid, seq, AUDIT_LIST, 0, 1, |
| 536 | rule, sizeof(*rule)); | 1181 | rule, sizeof(*rule)); |
| 1182 | if (skb) | ||
| 1183 | skb_queue_tail(q, skb); | ||
| 537 | kfree(rule); | 1184 | kfree(rule); |
| 538 | } | 1185 | } |
| 539 | } | 1186 | } |
| 540 | audit_send_reply(pid, seq, AUDIT_LIST, 1, 1, NULL, 0); | 1187 | for (i = 0; i < AUDIT_INODE_BUCKETS; i++) { |
| 541 | 1188 | list_for_each_entry(entry, &audit_inode_hash[i], list) { | |
| 542 | mutex_unlock(&audit_netlink_mutex); | 1189 | struct audit_rule *rule; |
| 543 | return 0; | 1190 | |
| 1191 | rule = audit_krule_to_rule(&entry->rule); | ||
| 1192 | if (unlikely(!rule)) | ||
| 1193 | break; | ||
| 1194 | skb = audit_make_reply(pid, seq, AUDIT_LIST, 0, 1, | ||
| 1195 | rule, sizeof(*rule)); | ||
| 1196 | if (skb) | ||
| 1197 | skb_queue_tail(q, skb); | ||
| 1198 | kfree(rule); | ||
| 1199 | } | ||
| 1200 | } | ||
| 1201 | skb = audit_make_reply(pid, seq, AUDIT_LIST, 1, 1, NULL, 0); | ||
| 1202 | if (skb) | ||
| 1203 | skb_queue_tail(q, skb); | ||
| 544 | } | 1204 | } |
| 545 | 1205 | ||
| 546 | /* List rules using struct audit_rule_data. */ | 1206 | /* List rules using struct audit_rule_data. */ |
| 547 | static int audit_list_rules(void *_dest) | 1207 | static void audit_list_rules(int pid, int seq, struct sk_buff_head *q) |
| 548 | { | 1208 | { |
| 549 | int pid, seq; | 1209 | struct sk_buff *skb; |
| 550 | int *dest = _dest; | ||
| 551 | struct audit_entry *e; | 1210 | struct audit_entry *e; |
| 552 | int i; | 1211 | int i; |
| 553 | 1212 | ||
| 554 | pid = dest[0]; | 1213 | /* This is a blocking read, so use audit_filter_mutex instead of rcu |
| 555 | seq = dest[1]; | 1214 | * iterator to sync with list writers. */ |
| 556 | kfree(dest); | ||
| 557 | |||
| 558 | mutex_lock(&audit_netlink_mutex); | ||
| 559 | |||
| 560 | /* The *_rcu iterators not needed here because we are | ||
| 561 | always called with audit_netlink_mutex held. */ | ||
| 562 | for (i=0; i<AUDIT_NR_FILTERS; i++) { | 1215 | for (i=0; i<AUDIT_NR_FILTERS; i++) { |
| 563 | list_for_each_entry(e, &audit_filter_list[i], list) { | 1216 | list_for_each_entry(e, &audit_filter_list[i], list) { |
| 564 | struct audit_rule_data *data; | 1217 | struct audit_rule_data *data; |
| @@ -566,15 +1219,30 @@ static int audit_list_rules(void *_dest) | |||
| 566 | data = audit_krule_to_data(&e->rule); | 1219 | data = audit_krule_to_data(&e->rule); |
| 567 | if (unlikely(!data)) | 1220 | if (unlikely(!data)) |
| 568 | break; | 1221 | break; |
| 569 | audit_send_reply(pid, seq, AUDIT_LIST_RULES, 0, 1, | 1222 | skb = audit_make_reply(pid, seq, AUDIT_LIST_RULES, 0, 1, |
| 570 | data, sizeof(*data)); | 1223 | data, sizeof(*data) + data->buflen); |
| 1224 | if (skb) | ||
| 1225 | skb_queue_tail(q, skb); | ||
| 571 | kfree(data); | 1226 | kfree(data); |
| 572 | } | 1227 | } |
| 573 | } | 1228 | } |
| 574 | audit_send_reply(pid, seq, AUDIT_LIST_RULES, 1, 1, NULL, 0); | 1229 | for (i=0; i< AUDIT_INODE_BUCKETS; i++) { |
| 1230 | list_for_each_entry(e, &audit_inode_hash[i], list) { | ||
| 1231 | struct audit_rule_data *data; | ||
| 575 | 1232 | ||
| 576 | mutex_unlock(&audit_netlink_mutex); | 1233 | data = audit_krule_to_data(&e->rule); |
| 577 | return 0; | 1234 | if (unlikely(!data)) |
| 1235 | break; | ||
| 1236 | skb = audit_make_reply(pid, seq, AUDIT_LIST_RULES, 0, 1, | ||
| 1237 | data, sizeof(*data) + data->buflen); | ||
| 1238 | if (skb) | ||
| 1239 | skb_queue_tail(q, skb); | ||
| 1240 | kfree(data); | ||
| 1241 | } | ||
| 1242 | } | ||
| 1243 | skb = audit_make_reply(pid, seq, AUDIT_LIST_RULES, 1, 1, NULL, 0); | ||
| 1244 | if (skb) | ||
| 1245 | skb_queue_tail(q, skb); | ||
| 578 | } | 1246 | } |
| 579 | 1247 | ||
| 580 | /** | 1248 | /** |
| @@ -592,7 +1260,7 @@ int audit_receive_filter(int type, int pid, int uid, int seq, void *data, | |||
| 592 | size_t datasz, uid_t loginuid, u32 sid) | 1260 | size_t datasz, uid_t loginuid, u32 sid) |
| 593 | { | 1261 | { |
| 594 | struct task_struct *tsk; | 1262 | struct task_struct *tsk; |
| 595 | int *dest; | 1263 | struct audit_netlink_list *dest; |
| 596 | int err = 0; | 1264 | int err = 0; |
| 597 | struct audit_entry *entry; | 1265 | struct audit_entry *entry; |
| 598 | 1266 | ||
| @@ -605,18 +1273,22 @@ int audit_receive_filter(int type, int pid, int uid, int seq, void *data, | |||
| 605 | * happen if we're actually running in the context of auditctl | 1273 | * happen if we're actually running in the context of auditctl |
| 606 | * trying to _send_ the stuff */ | 1274 | * trying to _send_ the stuff */ |
| 607 | 1275 | ||
| 608 | dest = kmalloc(2 * sizeof(int), GFP_KERNEL); | 1276 | dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL); |
| 609 | if (!dest) | 1277 | if (!dest) |
| 610 | return -ENOMEM; | 1278 | return -ENOMEM; |
| 611 | dest[0] = pid; | 1279 | dest->pid = pid; |
| 612 | dest[1] = seq; | 1280 | skb_queue_head_init(&dest->q); |
| 613 | 1281 | ||
| 1282 | mutex_lock(&audit_filter_mutex); | ||
| 614 | if (type == AUDIT_LIST) | 1283 | if (type == AUDIT_LIST) |
| 615 | tsk = kthread_run(audit_list, dest, "audit_list"); | 1284 | audit_list(pid, seq, &dest->q); |
| 616 | else | 1285 | else |
| 617 | tsk = kthread_run(audit_list_rules, dest, | 1286 | audit_list_rules(pid, seq, &dest->q); |
| 618 | "audit_list_rules"); | 1287 | mutex_unlock(&audit_filter_mutex); |
| 1288 | |||
| 1289 | tsk = kthread_run(audit_send_list, dest, "audit_send_list"); | ||
| 619 | if (IS_ERR(tsk)) { | 1290 | if (IS_ERR(tsk)) { |
| 1291 | skb_queue_purge(&dest->q); | ||
| 620 | kfree(dest); | 1292 | kfree(dest); |
| 621 | err = PTR_ERR(tsk); | 1293 | err = PTR_ERR(tsk); |
| 622 | } | 1294 | } |
| @@ -632,6 +1304,7 @@ int audit_receive_filter(int type, int pid, int uid, int seq, void *data, | |||
| 632 | 1304 | ||
| 633 | err = audit_add_rule(entry, | 1305 | err = audit_add_rule(entry, |
| 634 | &audit_filter_list[entry->rule.listnr]); | 1306 | &audit_filter_list[entry->rule.listnr]); |
| 1307 | |||
| 635 | if (sid) { | 1308 | if (sid) { |
| 636 | char *ctx = NULL; | 1309 | char *ctx = NULL; |
| 637 | u32 len; | 1310 | u32 len; |
| @@ -712,7 +1385,43 @@ int audit_comparator(const u32 left, const u32 op, const u32 right) | |||
| 712 | return 0; | 1385 | return 0; |
| 713 | } | 1386 | } |
| 714 | 1387 | ||
| 1388 | /* Compare given dentry name with last component in given path, | ||
| 1389 | * return of 0 indicates a match. */ | ||
| 1390 | int audit_compare_dname_path(const char *dname, const char *path, | ||
| 1391 | int *dirlen) | ||
| 1392 | { | ||
| 1393 | int dlen, plen; | ||
| 1394 | const char *p; | ||
| 715 | 1395 | ||
| 1396 | if (!dname || !path) | ||
| 1397 | return 1; | ||
| 1398 | |||
| 1399 | dlen = strlen(dname); | ||
| 1400 | plen = strlen(path); | ||
| 1401 | if (plen < dlen) | ||
| 1402 | return 1; | ||
| 1403 | |||
| 1404 | /* disregard trailing slashes */ | ||
| 1405 | p = path + plen - 1; | ||
| 1406 | while ((*p == '/') && (p > path)) | ||
| 1407 | p--; | ||
| 1408 | |||
| 1409 | /* find last path component */ | ||
| 1410 | p = p - dlen + 1; | ||
| 1411 | if (p < path) | ||
| 1412 | return 1; | ||
| 1413 | else if (p > path) { | ||
| 1414 | if (*--p != '/') | ||
| 1415 | return 1; | ||
| 1416 | else | ||
| 1417 | p++; | ||
| 1418 | } | ||
| 1419 | |||
| 1420 | /* return length of path's directory component */ | ||
| 1421 | if (dirlen) | ||
| 1422 | *dirlen = p - path; | ||
| 1423 | return strncmp(p, dname, dlen); | ||
| 1424 | } | ||
| 716 | 1425 | ||
| 717 | static int audit_filter_user_rules(struct netlink_skb_parms *cb, | 1426 | static int audit_filter_user_rules(struct netlink_skb_parms *cb, |
| 718 | struct audit_krule *rule, | 1427 | struct audit_krule *rule, |
| @@ -744,7 +1453,6 @@ static int audit_filter_user_rules(struct netlink_skb_parms *cb, | |||
| 744 | } | 1453 | } |
| 745 | switch (rule->action) { | 1454 | switch (rule->action) { |
| 746 | case AUDIT_NEVER: *state = AUDIT_DISABLED; break; | 1455 | case AUDIT_NEVER: *state = AUDIT_DISABLED; break; |
| 747 | case AUDIT_POSSIBLE: *state = AUDIT_BUILD_CONTEXT; break; | ||
| 748 | case AUDIT_ALWAYS: *state = AUDIT_RECORD_CONTEXT; break; | 1456 | case AUDIT_ALWAYS: *state = AUDIT_RECORD_CONTEXT; break; |
| 749 | } | 1457 | } |
| 750 | return 1; | 1458 | return 1; |
| @@ -826,32 +1534,65 @@ static inline int audit_rule_has_selinux(struct audit_krule *rule) | |||
| 826 | int selinux_audit_rule_update(void) | 1534 | int selinux_audit_rule_update(void) |
| 827 | { | 1535 | { |
| 828 | struct audit_entry *entry, *n, *nentry; | 1536 | struct audit_entry *entry, *n, *nentry; |
| 1537 | struct audit_watch *watch; | ||
| 829 | int i, err = 0; | 1538 | int i, err = 0; |
| 830 | 1539 | ||
| 831 | /* audit_netlink_mutex synchronizes the writers */ | 1540 | /* audit_filter_mutex synchronizes the writers */ |
| 832 | mutex_lock(&audit_netlink_mutex); | 1541 | mutex_lock(&audit_filter_mutex); |
| 833 | 1542 | ||
| 834 | for (i = 0; i < AUDIT_NR_FILTERS; i++) { | 1543 | for (i = 0; i < AUDIT_NR_FILTERS; i++) { |
| 835 | list_for_each_entry_safe(entry, n, &audit_filter_list[i], list) { | 1544 | list_for_each_entry_safe(entry, n, &audit_filter_list[i], list) { |
| 836 | if (!audit_rule_has_selinux(&entry->rule)) | 1545 | if (!audit_rule_has_selinux(&entry->rule)) |
| 837 | continue; | 1546 | continue; |
| 838 | 1547 | ||
| 839 | nentry = audit_dupe_rule(&entry->rule); | 1548 | watch = entry->rule.watch; |
| 1549 | nentry = audit_dupe_rule(&entry->rule, watch); | ||
| 840 | if (unlikely(IS_ERR(nentry))) { | 1550 | if (unlikely(IS_ERR(nentry))) { |
| 841 | /* save the first error encountered for the | 1551 | /* save the first error encountered for the |
| 842 | * return value */ | 1552 | * return value */ |
| 843 | if (!err) | 1553 | if (!err) |
| 844 | err = PTR_ERR(nentry); | 1554 | err = PTR_ERR(nentry); |
| 845 | audit_panic("error updating selinux filters"); | 1555 | audit_panic("error updating selinux filters"); |
| 1556 | if (watch) | ||
| 1557 | list_del(&entry->rule.rlist); | ||
| 846 | list_del_rcu(&entry->list); | 1558 | list_del_rcu(&entry->list); |
| 847 | } else { | 1559 | } else { |
| 1560 | if (watch) { | ||
| 1561 | list_add(&nentry->rule.rlist, | ||
| 1562 | &watch->rules); | ||
| 1563 | list_del(&entry->rule.rlist); | ||
| 1564 | } | ||
| 848 | list_replace_rcu(&entry->list, &nentry->list); | 1565 | list_replace_rcu(&entry->list, &nentry->list); |
| 849 | } | 1566 | } |
| 850 | call_rcu(&entry->rcu, audit_free_rule_rcu); | 1567 | call_rcu(&entry->rcu, audit_free_rule_rcu); |
| 851 | } | 1568 | } |
| 852 | } | 1569 | } |
| 853 | 1570 | ||
| 854 | mutex_unlock(&audit_netlink_mutex); | 1571 | mutex_unlock(&audit_filter_mutex); |
| 855 | 1572 | ||
| 856 | return err; | 1573 | return err; |
| 857 | } | 1574 | } |
| 1575 | |||
| 1576 | /* Update watch data in audit rules based on inotify events. */ | ||
| 1577 | void audit_handle_ievent(struct inotify_watch *i_watch, u32 wd, u32 mask, | ||
| 1578 | u32 cookie, const char *dname, struct inode *inode) | ||
| 1579 | { | ||
| 1580 | struct audit_parent *parent; | ||
| 1581 | |||
| 1582 | parent = container_of(i_watch, struct audit_parent, wdata); | ||
| 1583 | |||
| 1584 | if (mask & (IN_CREATE|IN_MOVED_TO) && inode) | ||
| 1585 | audit_update_watch(parent, dname, inode->i_sb->s_dev, | ||
| 1586 | inode->i_ino, 0); | ||
| 1587 | else if (mask & (IN_DELETE|IN_MOVED_FROM)) | ||
| 1588 | audit_update_watch(parent, dname, (dev_t)-1, (unsigned long)-1, 1); | ||
| 1589 | /* inotify automatically removes the watch and sends IN_IGNORED */ | ||
| 1590 | else if (mask & (IN_DELETE_SELF|IN_UNMOUNT)) | ||
| 1591 | audit_remove_parent_watches(parent); | ||
| 1592 | /* inotify does not remove the watch, so remove it manually */ | ||
| 1593 | else if(mask & IN_MOVE_SELF) { | ||
| 1594 | audit_remove_parent_watches(parent); | ||
| 1595 | inotify_remove_watch_locked(audit_ih, i_watch); | ||
| 1596 | } else if (mask & IN_IGNORED) | ||
| 1597 | put_inotify_watch(i_watch); | ||
| 1598 | } | ||
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 1c03a4ed1b27..b097ccb4eb7e 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | * | 3 | * |
| 4 | * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. | 4 | * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. |
| 5 | * Copyright 2005 Hewlett-Packard Development Company, L.P. | 5 | * Copyright 2005 Hewlett-Packard Development Company, L.P. |
| 6 | * Copyright (C) 2005 IBM Corporation | 6 | * Copyright (C) 2005, 2006 IBM Corporation |
| 7 | * All Rights Reserved. | 7 | * All Rights Reserved. |
| 8 | * | 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
| @@ -29,6 +29,9 @@ | |||
| 29 | * this file -- see entry.S) is based on a GPL'd patch written by | 29 | * this file -- see entry.S) is based on a GPL'd patch written by |
| 30 | * okir@suse.de and Copyright 2003 SuSE Linux AG. | 30 | * okir@suse.de and Copyright 2003 SuSE Linux AG. |
| 31 | * | 31 | * |
| 32 | * POSIX message queue support added by George Wilson <ltcgcw@us.ibm.com>, | ||
| 33 | * 2006. | ||
| 34 | * | ||
| 32 | * The support of additional filter rules compares (>, <, >=, <=) was | 35 | * The support of additional filter rules compares (>, <, >=, <=) was |
| 33 | * added by Dustin Kirkland <dustin.kirkland@us.ibm.com>, 2005. | 36 | * added by Dustin Kirkland <dustin.kirkland@us.ibm.com>, 2005. |
| 34 | * | 37 | * |
| @@ -49,6 +52,7 @@ | |||
| 49 | #include <linux/module.h> | 52 | #include <linux/module.h> |
| 50 | #include <linux/mount.h> | 53 | #include <linux/mount.h> |
| 51 | #include <linux/socket.h> | 54 | #include <linux/socket.h> |
| 55 | #include <linux/mqueue.h> | ||
| 52 | #include <linux/audit.h> | 56 | #include <linux/audit.h> |
| 53 | #include <linux/personality.h> | 57 | #include <linux/personality.h> |
| 54 | #include <linux/time.h> | 58 | #include <linux/time.h> |
| @@ -59,6 +63,8 @@ | |||
| 59 | #include <linux/list.h> | 63 | #include <linux/list.h> |
| 60 | #include <linux/tty.h> | 64 | #include <linux/tty.h> |
| 61 | #include <linux/selinux.h> | 65 | #include <linux/selinux.h> |
| 66 | #include <linux/binfmts.h> | ||
| 67 | #include <linux/syscalls.h> | ||
| 62 | 68 | ||
| 63 | #include "audit.h" | 69 | #include "audit.h" |
| 64 | 70 | ||
| @@ -76,6 +82,9 @@ extern int audit_enabled; | |||
| 76 | * path_lookup. */ | 82 | * path_lookup. */ |
| 77 | #define AUDIT_NAMES_RESERVED 7 | 83 | #define AUDIT_NAMES_RESERVED 7 |
| 78 | 84 | ||
| 85 | /* Indicates that audit should log the full pathname. */ | ||
| 86 | #define AUDIT_NAME_FULL -1 | ||
| 87 | |||
| 79 | /* When fs/namei.c:getname() is called, we store the pointer in name and | 88 | /* When fs/namei.c:getname() is called, we store the pointer in name and |
| 80 | * we don't let putname() free it (instead we free all of the saved | 89 | * we don't let putname() free it (instead we free all of the saved |
| 81 | * pointers at syscall exit time). | 90 | * pointers at syscall exit time). |
| @@ -83,8 +92,9 @@ extern int audit_enabled; | |||
| 83 | * Further, in fs/namei.c:path_lookup() we store the inode and device. */ | 92 | * Further, in fs/namei.c:path_lookup() we store the inode and device. */ |
| 84 | struct audit_names { | 93 | struct audit_names { |
| 85 | const char *name; | 94 | const char *name; |
| 95 | int name_len; /* number of name's characters to log */ | ||
| 96 | unsigned name_put; /* call __putname() for this name */ | ||
| 86 | unsigned long ino; | 97 | unsigned long ino; |
| 87 | unsigned long pino; | ||
| 88 | dev_t dev; | 98 | dev_t dev; |
| 89 | umode_t mode; | 99 | umode_t mode; |
| 90 | uid_t uid; | 100 | uid_t uid; |
| @@ -100,6 +110,33 @@ struct audit_aux_data { | |||
| 100 | 110 | ||
| 101 | #define AUDIT_AUX_IPCPERM 0 | 111 | #define AUDIT_AUX_IPCPERM 0 |
| 102 | 112 | ||
| 113 | struct audit_aux_data_mq_open { | ||
| 114 | struct audit_aux_data d; | ||
| 115 | int oflag; | ||
| 116 | mode_t mode; | ||
| 117 | struct mq_attr attr; | ||
| 118 | }; | ||
| 119 | |||
| 120 | struct audit_aux_data_mq_sendrecv { | ||
| 121 | struct audit_aux_data d; | ||
| 122 | mqd_t mqdes; | ||
| 123 | size_t msg_len; | ||
| 124 | unsigned int msg_prio; | ||
| 125 | struct timespec abs_timeout; | ||
| 126 | }; | ||
| 127 | |||
| 128 | struct audit_aux_data_mq_notify { | ||
| 129 | struct audit_aux_data d; | ||
| 130 | mqd_t mqdes; | ||
| 131 | struct sigevent notification; | ||
| 132 | }; | ||
| 133 | |||
| 134 | struct audit_aux_data_mq_getsetattr { | ||
| 135 | struct audit_aux_data d; | ||
| 136 | mqd_t mqdes; | ||
| 137 | struct mq_attr mqstat; | ||
| 138 | }; | ||
| 139 | |||
| 103 | struct audit_aux_data_ipcctl { | 140 | struct audit_aux_data_ipcctl { |
| 104 | struct audit_aux_data d; | 141 | struct audit_aux_data d; |
| 105 | struct ipc_perm p; | 142 | struct ipc_perm p; |
| @@ -110,6 +147,13 @@ struct audit_aux_data_ipcctl { | |||
| 110 | u32 osid; | 147 | u32 osid; |
| 111 | }; | 148 | }; |
| 112 | 149 | ||
| 150 | struct audit_aux_data_execve { | ||
| 151 | struct audit_aux_data d; | ||
| 152 | int argc; | ||
| 153 | int envc; | ||
| 154 | char mem[0]; | ||
| 155 | }; | ||
| 156 | |||
| 113 | struct audit_aux_data_socketcall { | 157 | struct audit_aux_data_socketcall { |
| 114 | struct audit_aux_data d; | 158 | struct audit_aux_data d; |
| 115 | int nargs; | 159 | int nargs; |
| @@ -148,7 +192,7 @@ struct audit_context { | |||
| 148 | struct audit_aux_data *aux; | 192 | struct audit_aux_data *aux; |
| 149 | 193 | ||
| 150 | /* Save things to print about task_struct */ | 194 | /* Save things to print about task_struct */ |
| 151 | pid_t pid; | 195 | pid_t pid, ppid; |
| 152 | uid_t uid, euid, suid, fsuid; | 196 | uid_t uid, euid, suid, fsuid; |
| 153 | gid_t gid, egid, sgid, fsgid; | 197 | gid_t gid, egid, sgid, fsgid; |
| 154 | unsigned long personality; | 198 | unsigned long personality; |
| @@ -160,12 +204,13 @@ struct audit_context { | |||
| 160 | #endif | 204 | #endif |
| 161 | }; | 205 | }; |
| 162 | 206 | ||
| 163 | 207 | /* Determine if any context name data matches a rule's watch data */ | |
| 164 | /* Compare a task_struct with an audit_rule. Return 1 on match, 0 | 208 | /* Compare a task_struct with an audit_rule. Return 1 on match, 0 |
| 165 | * otherwise. */ | 209 | * otherwise. */ |
| 166 | static int audit_filter_rules(struct task_struct *tsk, | 210 | static int audit_filter_rules(struct task_struct *tsk, |
| 167 | struct audit_krule *rule, | 211 | struct audit_krule *rule, |
| 168 | struct audit_context *ctx, | 212 | struct audit_context *ctx, |
| 213 | struct audit_names *name, | ||
| 169 | enum audit_state *state) | 214 | enum audit_state *state) |
| 170 | { | 215 | { |
| 171 | int i, j, need_sid = 1; | 216 | int i, j, need_sid = 1; |
| @@ -179,6 +224,10 @@ static int audit_filter_rules(struct task_struct *tsk, | |||
| 179 | case AUDIT_PID: | 224 | case AUDIT_PID: |
| 180 | result = audit_comparator(tsk->pid, f->op, f->val); | 225 | result = audit_comparator(tsk->pid, f->op, f->val); |
| 181 | break; | 226 | break; |
| 227 | case AUDIT_PPID: | ||
| 228 | if (ctx) | ||
| 229 | result = audit_comparator(ctx->ppid, f->op, f->val); | ||
| 230 | break; | ||
| 182 | case AUDIT_UID: | 231 | case AUDIT_UID: |
| 183 | result = audit_comparator(tsk->uid, f->op, f->val); | 232 | result = audit_comparator(tsk->uid, f->op, f->val); |
| 184 | break; | 233 | break; |
| @@ -224,7 +273,10 @@ static int audit_filter_rules(struct task_struct *tsk, | |||
| 224 | } | 273 | } |
| 225 | break; | 274 | break; |
| 226 | case AUDIT_DEVMAJOR: | 275 | case AUDIT_DEVMAJOR: |
| 227 | if (ctx) { | 276 | if (name) |
| 277 | result = audit_comparator(MAJOR(name->dev), | ||
| 278 | f->op, f->val); | ||
| 279 | else if (ctx) { | ||
| 228 | for (j = 0; j < ctx->name_count; j++) { | 280 | for (j = 0; j < ctx->name_count; j++) { |
| 229 | if (audit_comparator(MAJOR(ctx->names[j].dev), f->op, f->val)) { | 281 | if (audit_comparator(MAJOR(ctx->names[j].dev), f->op, f->val)) { |
| 230 | ++result; | 282 | ++result; |
| @@ -234,7 +286,10 @@ static int audit_filter_rules(struct task_struct *tsk, | |||
| 234 | } | 286 | } |
| 235 | break; | 287 | break; |
| 236 | case AUDIT_DEVMINOR: | 288 | case AUDIT_DEVMINOR: |
| 237 | if (ctx) { | 289 | if (name) |
| 290 | result = audit_comparator(MINOR(name->dev), | ||
| 291 | f->op, f->val); | ||
| 292 | else if (ctx) { | ||
| 238 | for (j = 0; j < ctx->name_count; j++) { | 293 | for (j = 0; j < ctx->name_count; j++) { |
| 239 | if (audit_comparator(MINOR(ctx->names[j].dev), f->op, f->val)) { | 294 | if (audit_comparator(MINOR(ctx->names[j].dev), f->op, f->val)) { |
| 240 | ++result; | 295 | ++result; |
| @@ -244,16 +299,22 @@ static int audit_filter_rules(struct task_struct *tsk, | |||
| 244 | } | 299 | } |
| 245 | break; | 300 | break; |
| 246 | case AUDIT_INODE: | 301 | case AUDIT_INODE: |
| 247 | if (ctx) { | 302 | if (name) |
| 303 | result = (name->ino == f->val); | ||
| 304 | else if (ctx) { | ||
| 248 | for (j = 0; j < ctx->name_count; j++) { | 305 | for (j = 0; j < ctx->name_count; j++) { |
| 249 | if (audit_comparator(ctx->names[j].ino, f->op, f->val) || | 306 | if (audit_comparator(ctx->names[j].ino, f->op, f->val)) { |
| 250 | audit_comparator(ctx->names[j].pino, f->op, f->val)) { | ||
| 251 | ++result; | 307 | ++result; |
| 252 | break; | 308 | break; |
| 253 | } | 309 | } |
| 254 | } | 310 | } |
| 255 | } | 311 | } |
| 256 | break; | 312 | break; |
| 313 | case AUDIT_WATCH: | ||
| 314 | if (name && rule->watch->ino != (unsigned long)-1) | ||
| 315 | result = (name->dev == rule->watch->dev && | ||
| 316 | name->ino == rule->watch->ino); | ||
| 317 | break; | ||
| 257 | case AUDIT_LOGINUID: | 318 | case AUDIT_LOGINUID: |
| 258 | result = 0; | 319 | result = 0; |
| 259 | if (ctx) | 320 | if (ctx) |
| @@ -294,7 +355,6 @@ static int audit_filter_rules(struct task_struct *tsk, | |||
| 294 | } | 355 | } |
| 295 | switch (rule->action) { | 356 | switch (rule->action) { |
| 296 | case AUDIT_NEVER: *state = AUDIT_DISABLED; break; | 357 | case AUDIT_NEVER: *state = AUDIT_DISABLED; break; |
| 297 | case AUDIT_POSSIBLE: *state = AUDIT_BUILD_CONTEXT; break; | ||
| 298 | case AUDIT_ALWAYS: *state = AUDIT_RECORD_CONTEXT; break; | 358 | case AUDIT_ALWAYS: *state = AUDIT_RECORD_CONTEXT; break; |
| 299 | } | 359 | } |
| 300 | return 1; | 360 | return 1; |
| @@ -311,7 +371,7 @@ static enum audit_state audit_filter_task(struct task_struct *tsk) | |||
| 311 | 371 | ||
| 312 | rcu_read_lock(); | 372 | rcu_read_lock(); |
| 313 | list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_TASK], list) { | 373 | list_for_each_entry_rcu(e, &audit_filter_list[AUDIT_FILTER_TASK], list) { |
| 314 | if (audit_filter_rules(tsk, &e->rule, NULL, &state)) { | 374 | if (audit_filter_rules(tsk, &e->rule, NULL, NULL, &state)) { |
| 315 | rcu_read_unlock(); | 375 | rcu_read_unlock(); |
| 316 | return state; | 376 | return state; |
| 317 | } | 377 | } |
| @@ -341,8 +401,47 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk, | |||
| 341 | int bit = AUDIT_BIT(ctx->major); | 401 | int bit = AUDIT_BIT(ctx->major); |
| 342 | 402 | ||
| 343 | list_for_each_entry_rcu(e, list, list) { | 403 | list_for_each_entry_rcu(e, list, list) { |
| 344 | if ((e->rule.mask[word] & bit) == bit | 404 | if ((e->rule.mask[word] & bit) == bit && |
| 345 | && audit_filter_rules(tsk, &e->rule, ctx, &state)) { | 405 | audit_filter_rules(tsk, &e->rule, ctx, NULL, |
| 406 | &state)) { | ||
| 407 | rcu_read_unlock(); | ||
| 408 | return state; | ||
| 409 | } | ||
| 410 | } | ||
| 411 | } | ||
| 412 | rcu_read_unlock(); | ||
| 413 | return AUDIT_BUILD_CONTEXT; | ||
| 414 | } | ||
| 415 | |||
| 416 | /* At syscall exit time, this filter is called if any audit_names[] have been | ||
| 417 | * collected during syscall processing. We only check rules in sublists at hash | ||
| 418 | * buckets applicable to the inode numbers in audit_names[]. | ||
| 419 | * Regarding audit_state, same rules apply as for audit_filter_syscall(). | ||
| 420 | */ | ||
| 421 | enum audit_state audit_filter_inodes(struct task_struct *tsk, | ||
| 422 | struct audit_context *ctx) | ||
| 423 | { | ||
| 424 | int i; | ||
| 425 | struct audit_entry *e; | ||
| 426 | enum audit_state state; | ||
| 427 | |||
| 428 | if (audit_pid && tsk->tgid == audit_pid) | ||
| 429 | return AUDIT_DISABLED; | ||
| 430 | |||
| 431 | rcu_read_lock(); | ||
| 432 | for (i = 0; i < ctx->name_count; i++) { | ||
| 433 | int word = AUDIT_WORD(ctx->major); | ||
| 434 | int bit = AUDIT_BIT(ctx->major); | ||
| 435 | struct audit_names *n = &ctx->names[i]; | ||
| 436 | int h = audit_hash_ino((u32)n->ino); | ||
| 437 | struct list_head *list = &audit_inode_hash[h]; | ||
| 438 | |||
| 439 | if (list_empty(list)) | ||
| 440 | continue; | ||
| 441 | |||
| 442 | list_for_each_entry_rcu(e, list, list) { | ||
| 443 | if ((e->rule.mask[word] & bit) == bit && | ||
| 444 | audit_filter_rules(tsk, &e->rule, ctx, n, &state)) { | ||
| 346 | rcu_read_unlock(); | 445 | rcu_read_unlock(); |
| 347 | return state; | 446 | return state; |
| 348 | } | 447 | } |
| @@ -352,6 +451,11 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk, | |||
| 352 | return AUDIT_BUILD_CONTEXT; | 451 | return AUDIT_BUILD_CONTEXT; |
| 353 | } | 452 | } |
| 354 | 453 | ||
| 454 | void audit_set_auditable(struct audit_context *ctx) | ||
| 455 | { | ||
| 456 | ctx->auditable = 1; | ||
| 457 | } | ||
| 458 | |||
| 355 | static inline struct audit_context *audit_get_context(struct task_struct *tsk, | 459 | static inline struct audit_context *audit_get_context(struct task_struct *tsk, |
| 356 | int return_valid, | 460 | int return_valid, |
| 357 | int return_code) | 461 | int return_code) |
| @@ -365,12 +469,22 @@ static inline struct audit_context *audit_get_context(struct task_struct *tsk, | |||
| 365 | 469 | ||
| 366 | if (context->in_syscall && !context->auditable) { | 470 | if (context->in_syscall && !context->auditable) { |
| 367 | enum audit_state state; | 471 | enum audit_state state; |
| 472 | |||
| 368 | state = audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_EXIT]); | 473 | state = audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_EXIT]); |
| 474 | if (state == AUDIT_RECORD_CONTEXT) { | ||
| 475 | context->auditable = 1; | ||
| 476 | goto get_context; | ||
| 477 | } | ||
| 478 | |||
| 479 | state = audit_filter_inodes(tsk, context); | ||
| 369 | if (state == AUDIT_RECORD_CONTEXT) | 480 | if (state == AUDIT_RECORD_CONTEXT) |
| 370 | context->auditable = 1; | 481 | context->auditable = 1; |
| 482 | |||
| 371 | } | 483 | } |
| 372 | 484 | ||
| 485 | get_context: | ||
| 373 | context->pid = tsk->pid; | 486 | context->pid = tsk->pid; |
| 487 | context->ppid = sys_getppid(); /* sic. tsk == current in all cases */ | ||
| 374 | context->uid = tsk->uid; | 488 | context->uid = tsk->uid; |
| 375 | context->gid = tsk->gid; | 489 | context->gid = tsk->gid; |
| 376 | context->euid = tsk->euid; | 490 | context->euid = tsk->euid; |
| @@ -413,7 +527,7 @@ static inline void audit_free_names(struct audit_context *context) | |||
| 413 | #endif | 527 | #endif |
| 414 | 528 | ||
| 415 | for (i = 0; i < context->name_count; i++) { | 529 | for (i = 0; i < context->name_count; i++) { |
| 416 | if (context->names[i].name) | 530 | if (context->names[i].name && context->names[i].name_put) |
| 417 | __putname(context->names[i].name); | 531 | __putname(context->names[i].name); |
| 418 | } | 532 | } |
| 419 | context->name_count = 0; | 533 | context->name_count = 0; |
| @@ -606,7 +720,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts | |||
| 606 | tty = "(none)"; | 720 | tty = "(none)"; |
| 607 | audit_log_format(ab, | 721 | audit_log_format(ab, |
| 608 | " a0=%lx a1=%lx a2=%lx a3=%lx items=%d" | 722 | " a0=%lx a1=%lx a2=%lx a3=%lx items=%d" |
| 609 | " pid=%d auid=%u uid=%u gid=%u" | 723 | " ppid=%d pid=%d auid=%u uid=%u gid=%u" |
| 610 | " euid=%u suid=%u fsuid=%u" | 724 | " euid=%u suid=%u fsuid=%u" |
| 611 | " egid=%u sgid=%u fsgid=%u tty=%s", | 725 | " egid=%u sgid=%u fsgid=%u tty=%s", |
| 612 | context->argv[0], | 726 | context->argv[0], |
| @@ -614,6 +728,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts | |||
| 614 | context->argv[2], | 728 | context->argv[2], |
| 615 | context->argv[3], | 729 | context->argv[3], |
| 616 | context->name_count, | 730 | context->name_count, |
| 731 | context->ppid, | ||
| 617 | context->pid, | 732 | context->pid, |
| 618 | context->loginuid, | 733 | context->loginuid, |
| 619 | context->uid, | 734 | context->uid, |
| @@ -630,11 +745,48 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts | |||
| 630 | continue; /* audit_panic has been called */ | 745 | continue; /* audit_panic has been called */ |
| 631 | 746 | ||
| 632 | switch (aux->type) { | 747 | switch (aux->type) { |
| 748 | case AUDIT_MQ_OPEN: { | ||
| 749 | struct audit_aux_data_mq_open *axi = (void *)aux; | ||
| 750 | audit_log_format(ab, | ||
| 751 | "oflag=0x%x mode=%#o mq_flags=0x%lx mq_maxmsg=%ld " | ||
| 752 | "mq_msgsize=%ld mq_curmsgs=%ld", | ||
| 753 | axi->oflag, axi->mode, axi->attr.mq_flags, | ||
| 754 | axi->attr.mq_maxmsg, axi->attr.mq_msgsize, | ||
| 755 | axi->attr.mq_curmsgs); | ||
| 756 | break; } | ||
| 757 | |||
| 758 | case AUDIT_MQ_SENDRECV: { | ||
| 759 | struct audit_aux_data_mq_sendrecv *axi = (void *)aux; | ||
| 760 | audit_log_format(ab, | ||
| 761 | "mqdes=%d msg_len=%zd msg_prio=%u " | ||
| 762 | "abs_timeout_sec=%ld abs_timeout_nsec=%ld", | ||
| 763 | axi->mqdes, axi->msg_len, axi->msg_prio, | ||
| 764 | axi->abs_timeout.tv_sec, axi->abs_timeout.tv_nsec); | ||
| 765 | break; } | ||
| 766 | |||
| 767 | case AUDIT_MQ_NOTIFY: { | ||
| 768 | struct audit_aux_data_mq_notify *axi = (void *)aux; | ||
| 769 | audit_log_format(ab, | ||
| 770 | "mqdes=%d sigev_signo=%d", | ||
| 771 | axi->mqdes, | ||
| 772 | axi->notification.sigev_signo); | ||
| 773 | break; } | ||
| 774 | |||
| 775 | case AUDIT_MQ_GETSETATTR: { | ||
| 776 | struct audit_aux_data_mq_getsetattr *axi = (void *)aux; | ||
| 777 | audit_log_format(ab, | ||
| 778 | "mqdes=%d mq_flags=0x%lx mq_maxmsg=%ld mq_msgsize=%ld " | ||
| 779 | "mq_curmsgs=%ld ", | ||
| 780 | axi->mqdes, | ||
| 781 | axi->mqstat.mq_flags, axi->mqstat.mq_maxmsg, | ||
| 782 | axi->mqstat.mq_msgsize, axi->mqstat.mq_curmsgs); | ||
| 783 | break; } | ||
| 784 | |||
| 633 | case AUDIT_IPC: { | 785 | case AUDIT_IPC: { |
| 634 | struct audit_aux_data_ipcctl *axi = (void *)aux; | 786 | struct audit_aux_data_ipcctl *axi = (void *)aux; |
| 635 | audit_log_format(ab, | 787 | audit_log_format(ab, |
| 636 | " qbytes=%lx iuid=%u igid=%u mode=%x", | 788 | "ouid=%u ogid=%u mode=%x", |
| 637 | axi->qbytes, axi->uid, axi->gid, axi->mode); | 789 | axi->uid, axi->gid, axi->mode); |
| 638 | if (axi->osid != 0) { | 790 | if (axi->osid != 0) { |
| 639 | char *ctx = NULL; | 791 | char *ctx = NULL; |
| 640 | u32 len; | 792 | u32 len; |
| @@ -652,19 +804,18 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts | |||
| 652 | case AUDIT_IPC_SET_PERM: { | 804 | case AUDIT_IPC_SET_PERM: { |
| 653 | struct audit_aux_data_ipcctl *axi = (void *)aux; | 805 | struct audit_aux_data_ipcctl *axi = (void *)aux; |
| 654 | audit_log_format(ab, | 806 | audit_log_format(ab, |
| 655 | " new qbytes=%lx new iuid=%u new igid=%u new mode=%x", | 807 | "qbytes=%lx ouid=%u ogid=%u mode=%x", |
| 656 | axi->qbytes, axi->uid, axi->gid, axi->mode); | 808 | axi->qbytes, axi->uid, axi->gid, axi->mode); |
| 657 | if (axi->osid != 0) { | 809 | break; } |
| 658 | char *ctx = NULL; | 810 | |
| 659 | u32 len; | 811 | case AUDIT_EXECVE: { |
| 660 | if (selinux_ctxid_to_string( | 812 | struct audit_aux_data_execve *axi = (void *)aux; |
| 661 | axi->osid, &ctx, &len)) { | 813 | int i; |
| 662 | audit_log_format(ab, " osid=%u", | 814 | const char *p; |
| 663 | axi->osid); | 815 | for (i = 0, p = axi->mem; i < axi->argc; i++) { |
| 664 | call_panic = 1; | 816 | audit_log_format(ab, "a%d=", i); |
| 665 | } else | 817 | p = audit_log_untrustedstring(ab, p); |
| 666 | audit_log_format(ab, " obj=%s", ctx); | 818 | audit_log_format(ab, "\n"); |
| 667 | kfree(ctx); | ||
| 668 | } | 819 | } |
| 669 | break; } | 820 | break; } |
| 670 | 821 | ||
| @@ -700,8 +851,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts | |||
| 700 | } | 851 | } |
| 701 | } | 852 | } |
| 702 | for (i = 0; i < context->name_count; i++) { | 853 | for (i = 0; i < context->name_count; i++) { |
| 703 | unsigned long ino = context->names[i].ino; | 854 | struct audit_names *n = &context->names[i]; |
| 704 | unsigned long pino = context->names[i].pino; | ||
| 705 | 855 | ||
| 706 | ab = audit_log_start(context, GFP_KERNEL, AUDIT_PATH); | 856 | ab = audit_log_start(context, GFP_KERNEL, AUDIT_PATH); |
| 707 | if (!ab) | 857 | if (!ab) |
| @@ -709,33 +859,47 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts | |||
| 709 | 859 | ||
| 710 | audit_log_format(ab, "item=%d", i); | 860 | audit_log_format(ab, "item=%d", i); |
| 711 | 861 | ||
| 712 | audit_log_format(ab, " name="); | 862 | if (n->name) { |
| 713 | if (context->names[i].name) | 863 | switch(n->name_len) { |
| 714 | audit_log_untrustedstring(ab, context->names[i].name); | 864 | case AUDIT_NAME_FULL: |
| 715 | else | 865 | /* log the full path */ |
| 716 | audit_log_format(ab, "(null)"); | 866 | audit_log_format(ab, " name="); |
| 717 | 867 | audit_log_untrustedstring(ab, n->name); | |
| 718 | if (pino != (unsigned long)-1) | 868 | break; |
| 719 | audit_log_format(ab, " parent=%lu", pino); | 869 | case 0: |
| 720 | if (ino != (unsigned long)-1) | 870 | /* name was specified as a relative path and the |
| 721 | audit_log_format(ab, " inode=%lu", ino); | 871 | * directory component is the cwd */ |
| 722 | if ((pino != (unsigned long)-1) || (ino != (unsigned long)-1)) | 872 | audit_log_d_path(ab, " name=", context->pwd, |
| 723 | audit_log_format(ab, " dev=%02x:%02x mode=%#o" | 873 | context->pwdmnt); |
| 724 | " ouid=%u ogid=%u rdev=%02x:%02x", | 874 | break; |
| 725 | MAJOR(context->names[i].dev), | 875 | default: |
| 726 | MINOR(context->names[i].dev), | 876 | /* log the name's directory component */ |
| 727 | context->names[i].mode, | 877 | audit_log_format(ab, " name="); |
| 728 | context->names[i].uid, | 878 | audit_log_n_untrustedstring(ab, n->name_len, |
| 729 | context->names[i].gid, | 879 | n->name); |
| 730 | MAJOR(context->names[i].rdev), | 880 | } |
| 731 | MINOR(context->names[i].rdev)); | 881 | } else |
| 732 | if (context->names[i].osid != 0) { | 882 | audit_log_format(ab, " name=(null)"); |
| 883 | |||
| 884 | if (n->ino != (unsigned long)-1) { | ||
| 885 | audit_log_format(ab, " inode=%lu" | ||
| 886 | " dev=%02x:%02x mode=%#o" | ||
| 887 | " ouid=%u ogid=%u rdev=%02x:%02x", | ||
| 888 | n->ino, | ||
| 889 | MAJOR(n->dev), | ||
| 890 | MINOR(n->dev), | ||
| 891 | n->mode, | ||
| 892 | n->uid, | ||
| 893 | n->gid, | ||
| 894 | MAJOR(n->rdev), | ||
| 895 | MINOR(n->rdev)); | ||
| 896 | } | ||
| 897 | if (n->osid != 0) { | ||
| 733 | char *ctx = NULL; | 898 | char *ctx = NULL; |
| 734 | u32 len; | 899 | u32 len; |
| 735 | if (selinux_ctxid_to_string( | 900 | if (selinux_ctxid_to_string( |
| 736 | context->names[i].osid, &ctx, &len)) { | 901 | n->osid, &ctx, &len)) { |
| 737 | audit_log_format(ab, " osid=%u", | 902 | audit_log_format(ab, " osid=%u", n->osid); |
| 738 | context->names[i].osid); | ||
| 739 | call_panic = 2; | 903 | call_panic = 2; |
| 740 | } else | 904 | } else |
| 741 | audit_log_format(ab, " obj=%s", ctx); | 905 | audit_log_format(ab, " obj=%s", ctx); |
| @@ -908,11 +1072,11 @@ void audit_syscall_exit(int valid, long return_code) | |||
| 908 | * Add a name to the list of audit names for this context. | 1072 | * Add a name to the list of audit names for this context. |
| 909 | * Called from fs/namei.c:getname(). | 1073 | * Called from fs/namei.c:getname(). |
| 910 | */ | 1074 | */ |
| 911 | void audit_getname(const char *name) | 1075 | void __audit_getname(const char *name) |
| 912 | { | 1076 | { |
| 913 | struct audit_context *context = current->audit_context; | 1077 | struct audit_context *context = current->audit_context; |
| 914 | 1078 | ||
| 915 | if (!context || IS_ERR(name) || !name) | 1079 | if (IS_ERR(name) || !name) |
| 916 | return; | 1080 | return; |
| 917 | 1081 | ||
| 918 | if (!context->in_syscall) { | 1082 | if (!context->in_syscall) { |
| @@ -925,6 +1089,8 @@ void audit_getname(const char *name) | |||
| 925 | } | 1089 | } |
| 926 | BUG_ON(context->name_count >= AUDIT_NAMES); | 1090 | BUG_ON(context->name_count >= AUDIT_NAMES); |
| 927 | context->names[context->name_count].name = name; | 1091 | context->names[context->name_count].name = name; |
| 1092 | context->names[context->name_count].name_len = AUDIT_NAME_FULL; | ||
| 1093 | context->names[context->name_count].name_put = 1; | ||
| 928 | context->names[context->name_count].ino = (unsigned long)-1; | 1094 | context->names[context->name_count].ino = (unsigned long)-1; |
| 929 | ++context->name_count; | 1095 | ++context->name_count; |
| 930 | if (!context->pwd) { | 1096 | if (!context->pwd) { |
| @@ -991,11 +1157,10 @@ static void audit_inode_context(int idx, const struct inode *inode) | |||
| 991 | * audit_inode - store the inode and device from a lookup | 1157 | * audit_inode - store the inode and device from a lookup |
| 992 | * @name: name being audited | 1158 | * @name: name being audited |
| 993 | * @inode: inode being audited | 1159 | * @inode: inode being audited |
| 994 | * @flags: lookup flags (as used in path_lookup()) | ||
| 995 | * | 1160 | * |
| 996 | * Called from fs/namei.c:path_lookup(). | 1161 | * Called from fs/namei.c:path_lookup(). |
| 997 | */ | 1162 | */ |
| 998 | void __audit_inode(const char *name, const struct inode *inode, unsigned flags) | 1163 | void __audit_inode(const char *name, const struct inode *inode) |
| 999 | { | 1164 | { |
| 1000 | int idx; | 1165 | int idx; |
| 1001 | struct audit_context *context = current->audit_context; | 1166 | struct audit_context *context = current->audit_context; |
| @@ -1021,20 +1186,13 @@ void __audit_inode(const char *name, const struct inode *inode, unsigned flags) | |||
| 1021 | ++context->ino_count; | 1186 | ++context->ino_count; |
| 1022 | #endif | 1187 | #endif |
| 1023 | } | 1188 | } |
| 1189 | context->names[idx].ino = inode->i_ino; | ||
| 1024 | context->names[idx].dev = inode->i_sb->s_dev; | 1190 | context->names[idx].dev = inode->i_sb->s_dev; |
| 1025 | context->names[idx].mode = inode->i_mode; | 1191 | context->names[idx].mode = inode->i_mode; |
| 1026 | context->names[idx].uid = inode->i_uid; | 1192 | context->names[idx].uid = inode->i_uid; |
| 1027 | context->names[idx].gid = inode->i_gid; | 1193 | context->names[idx].gid = inode->i_gid; |
| 1028 | context->names[idx].rdev = inode->i_rdev; | 1194 | context->names[idx].rdev = inode->i_rdev; |
| 1029 | audit_inode_context(idx, inode); | 1195 | audit_inode_context(idx, inode); |
| 1030 | if ((flags & LOOKUP_PARENT) && (strcmp(name, "/") != 0) && | ||
| 1031 | (strcmp(name, ".") != 0)) { | ||
| 1032 | context->names[idx].ino = (unsigned long)-1; | ||
| 1033 | context->names[idx].pino = inode->i_ino; | ||
| 1034 | } else { | ||
| 1035 | context->names[idx].ino = inode->i_ino; | ||
| 1036 | context->names[idx].pino = (unsigned long)-1; | ||
| 1037 | } | ||
| 1038 | } | 1196 | } |
| 1039 | 1197 | ||
| 1040 | /** | 1198 | /** |
| @@ -1056,51 +1214,40 @@ void __audit_inode_child(const char *dname, const struct inode *inode, | |||
| 1056 | { | 1214 | { |
| 1057 | int idx; | 1215 | int idx; |
| 1058 | struct audit_context *context = current->audit_context; | 1216 | struct audit_context *context = current->audit_context; |
| 1217 | const char *found_name = NULL; | ||
| 1218 | int dirlen = 0; | ||
| 1059 | 1219 | ||
| 1060 | if (!context->in_syscall) | 1220 | if (!context->in_syscall) |
| 1061 | return; | 1221 | return; |
| 1062 | 1222 | ||
| 1063 | /* determine matching parent */ | 1223 | /* determine matching parent */ |
| 1064 | if (dname) | 1224 | if (!dname) |
| 1065 | for (idx = 0; idx < context->name_count; idx++) | 1225 | goto update_context; |
| 1066 | if (context->names[idx].pino == pino) { | 1226 | for (idx = 0; idx < context->name_count; idx++) |
| 1067 | const char *n; | 1227 | if (context->names[idx].ino == pino) { |
| 1068 | const char *name = context->names[idx].name; | 1228 | const char *name = context->names[idx].name; |
| 1069 | int dlen = strlen(dname); | 1229 | |
| 1070 | int nlen = name ? strlen(name) : 0; | 1230 | if (!name) |
| 1071 | 1231 | continue; | |
| 1072 | if (nlen < dlen) | 1232 | |
| 1073 | continue; | 1233 | if (audit_compare_dname_path(dname, name, &dirlen) == 0) { |
| 1074 | 1234 | context->names[idx].name_len = dirlen; | |
| 1075 | /* disregard trailing slashes */ | 1235 | found_name = name; |
| 1076 | n = name + nlen - 1; | 1236 | break; |
| 1077 | while ((*n == '/') && (n > name)) | ||
| 1078 | n--; | ||
| 1079 | |||
| 1080 | /* find last path component */ | ||
| 1081 | n = n - dlen + 1; | ||
| 1082 | if (n < name) | ||
| 1083 | continue; | ||
| 1084 | else if (n > name) { | ||
| 1085 | if (*--n != '/') | ||
| 1086 | continue; | ||
| 1087 | else | ||
| 1088 | n++; | ||
| 1089 | } | ||
| 1090 | |||
| 1091 | if (strncmp(n, dname, dlen) == 0) | ||
| 1092 | goto update_context; | ||
| 1093 | } | 1237 | } |
| 1238 | } | ||
| 1094 | 1239 | ||
| 1095 | /* catch-all in case match not found */ | 1240 | update_context: |
| 1096 | idx = context->name_count++; | 1241 | idx = context->name_count++; |
| 1097 | context->names[idx].name = NULL; | ||
| 1098 | context->names[idx].pino = pino; | ||
| 1099 | #if AUDIT_DEBUG | 1242 | #if AUDIT_DEBUG |
| 1100 | context->ino_count++; | 1243 | context->ino_count++; |
| 1101 | #endif | 1244 | #endif |
| 1245 | /* Re-use the name belonging to the slot for a matching parent directory. | ||
| 1246 | * All names for this context are relinquished in audit_free_names() */ | ||
| 1247 | context->names[idx].name = found_name; | ||
| 1248 | context->names[idx].name_len = AUDIT_NAME_FULL; | ||
| 1249 | context->names[idx].name_put = 0; /* don't call __putname() */ | ||
| 1102 | 1250 | ||
| 1103 | update_context: | ||
| 1104 | if (inode) { | 1251 | if (inode) { |
| 1105 | context->names[idx].ino = inode->i_ino; | 1252 | context->names[idx].ino = inode->i_ino; |
| 1106 | context->names[idx].dev = inode->i_sb->s_dev; | 1253 | context->names[idx].dev = inode->i_sb->s_dev; |
| @@ -1109,7 +1256,8 @@ update_context: | |||
| 1109 | context->names[idx].gid = inode->i_gid; | 1256 | context->names[idx].gid = inode->i_gid; |
| 1110 | context->names[idx].rdev = inode->i_rdev; | 1257 | context->names[idx].rdev = inode->i_rdev; |
| 1111 | audit_inode_context(idx, inode); | 1258 | audit_inode_context(idx, inode); |
| 1112 | } | 1259 | } else |
| 1260 | context->names[idx].ino = (unsigned long)-1; | ||
| 1113 | } | 1261 | } |
| 1114 | 1262 | ||
| 1115 | /** | 1263 | /** |
| @@ -1142,18 +1290,23 @@ void auditsc_get_stamp(struct audit_context *ctx, | |||
| 1142 | */ | 1290 | */ |
| 1143 | int audit_set_loginuid(struct task_struct *task, uid_t loginuid) | 1291 | int audit_set_loginuid(struct task_struct *task, uid_t loginuid) |
| 1144 | { | 1292 | { |
| 1145 | if (task->audit_context) { | 1293 | struct audit_context *context = task->audit_context; |
| 1146 | struct audit_buffer *ab; | 1294 | |
| 1147 | 1295 | if (context) { | |
| 1148 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN); | 1296 | /* Only log if audit is enabled */ |
| 1149 | if (ab) { | 1297 | if (context->in_syscall) { |
| 1150 | audit_log_format(ab, "login pid=%d uid=%u " | 1298 | struct audit_buffer *ab; |
| 1151 | "old auid=%u new auid=%u", | 1299 | |
| 1152 | task->pid, task->uid, | 1300 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN); |
| 1153 | task->audit_context->loginuid, loginuid); | 1301 | if (ab) { |
| 1154 | audit_log_end(ab); | 1302 | audit_log_format(ab, "login pid=%d uid=%u " |
| 1303 | "old auid=%u new auid=%u", | ||
| 1304 | task->pid, task->uid, | ||
| 1305 | context->loginuid, loginuid); | ||
| 1306 | audit_log_end(ab); | ||
| 1307 | } | ||
| 1155 | } | 1308 | } |
| 1156 | task->audit_context->loginuid = loginuid; | 1309 | context->loginuid = loginuid; |
| 1157 | } | 1310 | } |
| 1158 | return 0; | 1311 | return 0; |
| 1159 | } | 1312 | } |
| @@ -1170,16 +1323,193 @@ uid_t audit_get_loginuid(struct audit_context *ctx) | |||
| 1170 | } | 1323 | } |
| 1171 | 1324 | ||
| 1172 | /** | 1325 | /** |
| 1173 | * audit_ipc_obj - record audit data for ipc object | 1326 | * __audit_mq_open - record audit data for a POSIX MQ open |
| 1174 | * @ipcp: ipc permissions | 1327 | * @oflag: open flag |
| 1328 | * @mode: mode bits | ||
| 1329 | * @u_attr: queue attributes | ||
| 1175 | * | 1330 | * |
| 1176 | * Returns 0 for success or NULL context or < 0 on error. | 1331 | * Returns 0 for success or NULL context or < 0 on error. |
| 1177 | */ | 1332 | */ |
| 1178 | int audit_ipc_obj(struct kern_ipc_perm *ipcp) | 1333 | int __audit_mq_open(int oflag, mode_t mode, struct mq_attr __user *u_attr) |
| 1179 | { | 1334 | { |
| 1180 | struct audit_aux_data_ipcctl *ax; | 1335 | struct audit_aux_data_mq_open *ax; |
| 1336 | struct audit_context *context = current->audit_context; | ||
| 1337 | |||
| 1338 | if (!audit_enabled) | ||
| 1339 | return 0; | ||
| 1340 | |||
| 1341 | if (likely(!context)) | ||
| 1342 | return 0; | ||
| 1343 | |||
| 1344 | ax = kmalloc(sizeof(*ax), GFP_ATOMIC); | ||
| 1345 | if (!ax) | ||
| 1346 | return -ENOMEM; | ||
| 1347 | |||
| 1348 | if (u_attr != NULL) { | ||
| 1349 | if (copy_from_user(&ax->attr, u_attr, sizeof(ax->attr))) { | ||
| 1350 | kfree(ax); | ||
| 1351 | return -EFAULT; | ||
| 1352 | } | ||
| 1353 | } else | ||
| 1354 | memset(&ax->attr, 0, sizeof(ax->attr)); | ||
| 1355 | |||
| 1356 | ax->oflag = oflag; | ||
| 1357 | ax->mode = mode; | ||
| 1358 | |||
| 1359 | ax->d.type = AUDIT_MQ_OPEN; | ||
| 1360 | ax->d.next = context->aux; | ||
| 1361 | context->aux = (void *)ax; | ||
| 1362 | return 0; | ||
| 1363 | } | ||
| 1364 | |||
| 1365 | /** | ||
| 1366 | * __audit_mq_timedsend - record audit data for a POSIX MQ timed send | ||
| 1367 | * @mqdes: MQ descriptor | ||
| 1368 | * @msg_len: Message length | ||
| 1369 | * @msg_prio: Message priority | ||
| 1370 | * @abs_timeout: Message timeout in absolute time | ||
| 1371 | * | ||
| 1372 | * Returns 0 for success or NULL context or < 0 on error. | ||
| 1373 | */ | ||
| 1374 | int __audit_mq_timedsend(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, | ||
| 1375 | const struct timespec __user *u_abs_timeout) | ||
| 1376 | { | ||
| 1377 | struct audit_aux_data_mq_sendrecv *ax; | ||
| 1378 | struct audit_context *context = current->audit_context; | ||
| 1379 | |||
| 1380 | if (!audit_enabled) | ||
| 1381 | return 0; | ||
| 1382 | |||
| 1383 | if (likely(!context)) | ||
| 1384 | return 0; | ||
| 1385 | |||
| 1386 | ax = kmalloc(sizeof(*ax), GFP_ATOMIC); | ||
| 1387 | if (!ax) | ||
| 1388 | return -ENOMEM; | ||
| 1389 | |||
| 1390 | if (u_abs_timeout != NULL) { | ||
| 1391 | if (copy_from_user(&ax->abs_timeout, u_abs_timeout, sizeof(ax->abs_timeout))) { | ||
| 1392 | kfree(ax); | ||
| 1393 | return -EFAULT; | ||
| 1394 | } | ||
| 1395 | } else | ||
| 1396 | memset(&ax->abs_timeout, 0, sizeof(ax->abs_timeout)); | ||
| 1397 | |||
| 1398 | ax->mqdes = mqdes; | ||
| 1399 | ax->msg_len = msg_len; | ||
| 1400 | ax->msg_prio = msg_prio; | ||
| 1401 | |||
| 1402 | ax->d.type = AUDIT_MQ_SENDRECV; | ||
| 1403 | ax->d.next = context->aux; | ||
| 1404 | context->aux = (void *)ax; | ||
| 1405 | return 0; | ||
| 1406 | } | ||
| 1407 | |||
| 1408 | /** | ||
| 1409 | * __audit_mq_timedreceive - record audit data for a POSIX MQ timed receive | ||
| 1410 | * @mqdes: MQ descriptor | ||
| 1411 | * @msg_len: Message length | ||
| 1412 | * @msg_prio: Message priority | ||
| 1413 | * @abs_timeout: Message timeout in absolute time | ||
| 1414 | * | ||
| 1415 | * Returns 0 for success or NULL context or < 0 on error. | ||
| 1416 | */ | ||
| 1417 | int __audit_mq_timedreceive(mqd_t mqdes, size_t msg_len, | ||
| 1418 | unsigned int __user *u_msg_prio, | ||
| 1419 | const struct timespec __user *u_abs_timeout) | ||
| 1420 | { | ||
| 1421 | struct audit_aux_data_mq_sendrecv *ax; | ||
| 1422 | struct audit_context *context = current->audit_context; | ||
| 1423 | |||
| 1424 | if (!audit_enabled) | ||
| 1425 | return 0; | ||
| 1426 | |||
| 1427 | if (likely(!context)) | ||
| 1428 | return 0; | ||
| 1429 | |||
| 1430 | ax = kmalloc(sizeof(*ax), GFP_ATOMIC); | ||
| 1431 | if (!ax) | ||
| 1432 | return -ENOMEM; | ||
| 1433 | |||
| 1434 | if (u_msg_prio != NULL) { | ||
| 1435 | if (get_user(ax->msg_prio, u_msg_prio)) { | ||
| 1436 | kfree(ax); | ||
| 1437 | return -EFAULT; | ||
| 1438 | } | ||
| 1439 | } else | ||
| 1440 | ax->msg_prio = 0; | ||
| 1441 | |||
| 1442 | if (u_abs_timeout != NULL) { | ||
| 1443 | if (copy_from_user(&ax->abs_timeout, u_abs_timeout, sizeof(ax->abs_timeout))) { | ||
| 1444 | kfree(ax); | ||
| 1445 | return -EFAULT; | ||
| 1446 | } | ||
| 1447 | } else | ||
| 1448 | memset(&ax->abs_timeout, 0, sizeof(ax->abs_timeout)); | ||
| 1449 | |||
| 1450 | ax->mqdes = mqdes; | ||
| 1451 | ax->msg_len = msg_len; | ||
| 1452 | |||
| 1453 | ax->d.type = AUDIT_MQ_SENDRECV; | ||
| 1454 | ax->d.next = context->aux; | ||
| 1455 | context->aux = (void *)ax; | ||
| 1456 | return 0; | ||
| 1457 | } | ||
| 1458 | |||
| 1459 | /** | ||
| 1460 | * __audit_mq_notify - record audit data for a POSIX MQ notify | ||
| 1461 | * @mqdes: MQ descriptor | ||
| 1462 | * @u_notification: Notification event | ||
| 1463 | * | ||
| 1464 | * Returns 0 for success or NULL context or < 0 on error. | ||
| 1465 | */ | ||
| 1466 | |||
| 1467 | int __audit_mq_notify(mqd_t mqdes, const struct sigevent __user *u_notification) | ||
| 1468 | { | ||
| 1469 | struct audit_aux_data_mq_notify *ax; | ||
| 1470 | struct audit_context *context = current->audit_context; | ||
| 1471 | |||
| 1472 | if (!audit_enabled) | ||
| 1473 | return 0; | ||
| 1474 | |||
| 1475 | if (likely(!context)) | ||
| 1476 | return 0; | ||
| 1477 | |||
| 1478 | ax = kmalloc(sizeof(*ax), GFP_ATOMIC); | ||
| 1479 | if (!ax) | ||
| 1480 | return -ENOMEM; | ||
| 1481 | |||
| 1482 | if (u_notification != NULL) { | ||
| 1483 | if (copy_from_user(&ax->notification, u_notification, sizeof(ax->notification))) { | ||
| 1484 | kfree(ax); | ||
| 1485 | return -EFAULT; | ||
| 1486 | } | ||
| 1487 | } else | ||
| 1488 | memset(&ax->notification, 0, sizeof(ax->notification)); | ||
| 1489 | |||
| 1490 | ax->mqdes = mqdes; | ||
| 1491 | |||
| 1492 | ax->d.type = AUDIT_MQ_NOTIFY; | ||
| 1493 | ax->d.next = context->aux; | ||
| 1494 | context->aux = (void *)ax; | ||
| 1495 | return 0; | ||
| 1496 | } | ||
| 1497 | |||
| 1498 | /** | ||
| 1499 | * __audit_mq_getsetattr - record audit data for a POSIX MQ get/set attribute | ||
| 1500 | * @mqdes: MQ descriptor | ||
| 1501 | * @mqstat: MQ flags | ||
| 1502 | * | ||
| 1503 | * Returns 0 for success or NULL context or < 0 on error. | ||
| 1504 | */ | ||
| 1505 | int __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) | ||
| 1506 | { | ||
| 1507 | struct audit_aux_data_mq_getsetattr *ax; | ||
| 1181 | struct audit_context *context = current->audit_context; | 1508 | struct audit_context *context = current->audit_context; |
| 1182 | 1509 | ||
| 1510 | if (!audit_enabled) | ||
| 1511 | return 0; | ||
| 1512 | |||
| 1183 | if (likely(!context)) | 1513 | if (likely(!context)) |
| 1184 | return 0; | 1514 | return 0; |
| 1185 | 1515 | ||
| @@ -1187,6 +1517,30 @@ int audit_ipc_obj(struct kern_ipc_perm *ipcp) | |||
| 1187 | if (!ax) | 1517 | if (!ax) |
| 1188 | return -ENOMEM; | 1518 | return -ENOMEM; |
| 1189 | 1519 | ||
| 1520 | ax->mqdes = mqdes; | ||
| 1521 | ax->mqstat = *mqstat; | ||
| 1522 | |||
| 1523 | ax->d.type = AUDIT_MQ_GETSETATTR; | ||
| 1524 | ax->d.next = context->aux; | ||
| 1525 | context->aux = (void *)ax; | ||
| 1526 | return 0; | ||
| 1527 | } | ||
| 1528 | |||
| 1529 | /** | ||
| 1530 | * audit_ipc_obj - record audit data for ipc object | ||
| 1531 | * @ipcp: ipc permissions | ||
| 1532 | * | ||
| 1533 | * Returns 0 for success or NULL context or < 0 on error. | ||
| 1534 | */ | ||
| 1535 | int __audit_ipc_obj(struct kern_ipc_perm *ipcp) | ||
| 1536 | { | ||
| 1537 | struct audit_aux_data_ipcctl *ax; | ||
| 1538 | struct audit_context *context = current->audit_context; | ||
| 1539 | |||
| 1540 | ax = kmalloc(sizeof(*ax), GFP_ATOMIC); | ||
| 1541 | if (!ax) | ||
| 1542 | return -ENOMEM; | ||
| 1543 | |||
| 1190 | ax->uid = ipcp->uid; | 1544 | ax->uid = ipcp->uid; |
| 1191 | ax->gid = ipcp->gid; | 1545 | ax->gid = ipcp->gid; |
| 1192 | ax->mode = ipcp->mode; | 1546 | ax->mode = ipcp->mode; |
| @@ -1207,14 +1561,11 @@ int audit_ipc_obj(struct kern_ipc_perm *ipcp) | |||
| 1207 | * | 1561 | * |
| 1208 | * Returns 0 for success or NULL context or < 0 on error. | 1562 | * Returns 0 for success or NULL context or < 0 on error. |
| 1209 | */ | 1563 | */ |
| 1210 | int audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode, struct kern_ipc_perm *ipcp) | 1564 | int __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode) |
| 1211 | { | 1565 | { |
| 1212 | struct audit_aux_data_ipcctl *ax; | 1566 | struct audit_aux_data_ipcctl *ax; |
| 1213 | struct audit_context *context = current->audit_context; | 1567 | struct audit_context *context = current->audit_context; |
| 1214 | 1568 | ||
| 1215 | if (likely(!context)) | ||
| 1216 | return 0; | ||
| 1217 | |||
| 1218 | ax = kmalloc(sizeof(*ax), GFP_ATOMIC); | 1569 | ax = kmalloc(sizeof(*ax), GFP_ATOMIC); |
| 1219 | if (!ax) | 1570 | if (!ax) |
| 1220 | return -ENOMEM; | 1571 | return -ENOMEM; |
| @@ -1223,7 +1574,6 @@ int audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode, | |||
| 1223 | ax->uid = uid; | 1574 | ax->uid = uid; |
| 1224 | ax->gid = gid; | 1575 | ax->gid = gid; |
| 1225 | ax->mode = mode; | 1576 | ax->mode = mode; |
| 1226 | selinux_get_ipc_sid(ipcp, &ax->osid); | ||
| 1227 | 1577 | ||
| 1228 | ax->d.type = AUDIT_IPC_SET_PERM; | 1578 | ax->d.type = AUDIT_IPC_SET_PERM; |
| 1229 | ax->d.next = context->aux; | 1579 | ax->d.next = context->aux; |
| @@ -1231,6 +1581,39 @@ int audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode, | |||
| 1231 | return 0; | 1581 | return 0; |
| 1232 | } | 1582 | } |
| 1233 | 1583 | ||
| 1584 | int audit_bprm(struct linux_binprm *bprm) | ||
| 1585 | { | ||
| 1586 | struct audit_aux_data_execve *ax; | ||
| 1587 | struct audit_context *context = current->audit_context; | ||
| 1588 | unsigned long p, next; | ||
| 1589 | void *to; | ||
| 1590 | |||
| 1591 | if (likely(!audit_enabled || !context)) | ||
| 1592 | return 0; | ||
| 1593 | |||
| 1594 | ax = kmalloc(sizeof(*ax) + PAGE_SIZE * MAX_ARG_PAGES - bprm->p, | ||
| 1595 | GFP_KERNEL); | ||
| 1596 | if (!ax) | ||
| 1597 | return -ENOMEM; | ||
| 1598 | |||
| 1599 | ax->argc = bprm->argc; | ||
| 1600 | ax->envc = bprm->envc; | ||
| 1601 | for (p = bprm->p, to = ax->mem; p < MAX_ARG_PAGES*PAGE_SIZE; p = next) { | ||
| 1602 | struct page *page = bprm->page[p / PAGE_SIZE]; | ||
| 1603 | void *kaddr = kmap(page); | ||
| 1604 | next = (p + PAGE_SIZE) & ~(PAGE_SIZE - 1); | ||
| 1605 | memcpy(to, kaddr + (p & (PAGE_SIZE - 1)), next - p); | ||
| 1606 | to += next - p; | ||
| 1607 | kunmap(page); | ||
| 1608 | } | ||
| 1609 | |||
| 1610 | ax->d.type = AUDIT_EXECVE; | ||
| 1611 | ax->d.next = context->aux; | ||
| 1612 | context->aux = (void *)ax; | ||
| 1613 | return 0; | ||
| 1614 | } | ||
| 1615 | |||
| 1616 | |||
| 1234 | /** | 1617 | /** |
| 1235 | * audit_socketcall - record audit data for sys_socketcall | 1618 | * audit_socketcall - record audit data for sys_socketcall |
| 1236 | * @nargs: number of args | 1619 | * @nargs: number of args |
| @@ -1325,19 +1708,20 @@ int audit_avc_path(struct dentry *dentry, struct vfsmount *mnt) | |||
| 1325 | * If the audit subsystem is being terminated, record the task (pid) | 1708 | * If the audit subsystem is being terminated, record the task (pid) |
| 1326 | * and uid that is doing that. | 1709 | * and uid that is doing that. |
| 1327 | */ | 1710 | */ |
| 1328 | void audit_signal_info(int sig, struct task_struct *t) | 1711 | void __audit_signal_info(int sig, struct task_struct *t) |
| 1329 | { | 1712 | { |
| 1330 | extern pid_t audit_sig_pid; | 1713 | extern pid_t audit_sig_pid; |
| 1331 | extern uid_t audit_sig_uid; | 1714 | extern uid_t audit_sig_uid; |
| 1332 | 1715 | extern u32 audit_sig_sid; | |
| 1333 | if (unlikely(audit_pid && t->tgid == audit_pid)) { | 1716 | |
| 1334 | if (sig == SIGTERM || sig == SIGHUP) { | 1717 | if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1) { |
| 1335 | struct audit_context *ctx = current->audit_context; | 1718 | struct task_struct *tsk = current; |
| 1336 | audit_sig_pid = current->pid; | 1719 | struct audit_context *ctx = tsk->audit_context; |
| 1337 | if (ctx) | 1720 | audit_sig_pid = tsk->pid; |
| 1338 | audit_sig_uid = ctx->loginuid; | 1721 | if (ctx) |
| 1339 | else | 1722 | audit_sig_uid = ctx->loginuid; |
| 1340 | audit_sig_uid = current->uid; | 1723 | else |
| 1341 | } | 1724 | audit_sig_uid = tsk->uid; |
| 1725 | selinux_get_task_sid(tsk, &audit_sig_sid); | ||
| 1342 | } | 1726 | } |
| 1343 | } | 1727 | } |
diff --git a/kernel/signal.c b/kernel/signal.c index e5f8aea78ffe..1b3c921737e2 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -23,12 +23,12 @@ | |||
| 23 | #include <linux/syscalls.h> | 23 | #include <linux/syscalls.h> |
| 24 | #include <linux/ptrace.h> | 24 | #include <linux/ptrace.h> |
| 25 | #include <linux/signal.h> | 25 | #include <linux/signal.h> |
| 26 | #include <linux/audit.h> | ||
| 27 | #include <linux/capability.h> | 26 | #include <linux/capability.h> |
| 28 | #include <asm/param.h> | 27 | #include <asm/param.h> |
| 29 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
| 30 | #include <asm/unistd.h> | 29 | #include <asm/unistd.h> |
| 31 | #include <asm/siginfo.h> | 30 | #include <asm/siginfo.h> |
| 31 | #include "audit.h" /* audit_signal_info() */ | ||
| 32 | 32 | ||
| 33 | /* | 33 | /* |
| 34 | * SLAB caches for signal bits. | 34 | * SLAB caches for signal bits. |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index e82726faeeff..0d656e61621d 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
| @@ -150,7 +150,7 @@ extern ctl_table random_table[]; | |||
| 150 | #ifdef CONFIG_UNIX98_PTYS | 150 | #ifdef CONFIG_UNIX98_PTYS |
| 151 | extern ctl_table pty_table[]; | 151 | extern ctl_table pty_table[]; |
| 152 | #endif | 152 | #endif |
| 153 | #ifdef CONFIG_INOTIFY | 153 | #ifdef CONFIG_INOTIFY_USER |
| 154 | extern ctl_table inotify_table[]; | 154 | extern ctl_table inotify_table[]; |
| 155 | #endif | 155 | #endif |
| 156 | 156 | ||
| @@ -1028,7 +1028,7 @@ static ctl_table fs_table[] = { | |||
| 1028 | .mode = 0644, | 1028 | .mode = 0644, |
| 1029 | .proc_handler = &proc_doulongvec_minmax, | 1029 | .proc_handler = &proc_doulongvec_minmax, |
| 1030 | }, | 1030 | }, |
| 1031 | #ifdef CONFIG_INOTIFY | 1031 | #ifdef CONFIG_INOTIFY_USER |
| 1032 | { | 1032 | { |
| 1033 | .ctl_name = FS_INOTIFY, | 1033 | .ctl_name = FS_INOTIFY, |
| 1034 | .procname = "inotify", | 1034 | .procname = "inotify", |
diff --git a/kernel/user.c b/kernel/user.c index 2116642f42c6..4b1eb745afa1 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
| @@ -140,7 +140,7 @@ struct user_struct * alloc_uid(uid_t uid) | |||
| 140 | atomic_set(&new->processes, 0); | 140 | atomic_set(&new->processes, 0); |
| 141 | atomic_set(&new->files, 0); | 141 | atomic_set(&new->files, 0); |
| 142 | atomic_set(&new->sigpending, 0); | 142 | atomic_set(&new->sigpending, 0); |
| 143 | #ifdef CONFIG_INOTIFY | 143 | #ifdef CONFIG_INOTIFY_USER |
| 144 | atomic_set(&new->inotify_watches, 0); | 144 | atomic_set(&new->inotify_watches, 0); |
| 145 | atomic_set(&new->inotify_devs, 0); | 145 | atomic_set(&new->inotify_devs, 0); |
| 146 | #endif | 146 | #endif |
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index c284dbb8b8c0..e9548bc049e1 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c | |||
| @@ -1980,7 +1980,7 @@ int selinux_audit_rule_match(u32 ctxid, u32 field, u32 op, | |||
| 1980 | break; | 1980 | break; |
| 1981 | case AUDIT_SE_SEN: | 1981 | case AUDIT_SE_SEN: |
| 1982 | case AUDIT_SE_CLR: | 1982 | case AUDIT_SE_CLR: |
| 1983 | level = (op == AUDIT_SE_SEN ? | 1983 | level = (field == AUDIT_SE_SEN ? |
| 1984 | &ctxt->range.level[0] : &ctxt->range.level[1]); | 1984 | &ctxt->range.level[0] : &ctxt->range.level[1]); |
| 1985 | switch (op) { | 1985 | switch (op) { |
| 1986 | case AUDIT_EQUAL: | 1986 | case AUDIT_EQUAL: |
