aboutsummaryrefslogtreecommitdiffstats
path: root/security
diff options
context:
space:
mode:
authorJames Morris <jmorris@namei.org>2009-09-29 17:47:33 -0400
committerJames Morris <jmorris@namei.org>2009-09-29 17:47:33 -0400
commit1669b049db50fc7f1d4e694fb115a0f408c63fce (patch)
tree9b3b90b5cbff9b8f30ecf0b2a44896ce8bef0c20 /security
parent7f366784f5c2b8fc0658b5b374f4c63ee42c789f (diff)
parent17d857be649a21ca90008c6dc425d849fa83db5c (diff)
Merge branch 'master' into next
Diffstat (limited to 'security')
-rw-r--r--security/Kconfig30
-rw-r--r--security/device_cgroup.c3
-rw-r--r--security/integrity/ima/ima_fs.c4
-rw-r--r--security/keys/gc.c78
-rw-r--r--security/keys/key.c4
-rw-r--r--security/keys/keyctl.c3
-rw-r--r--security/keys/keyring.c24
-rw-r--r--security/lsm_audit.c2
-rw-r--r--security/min_addr.c4
-rw-r--r--security/selinux/avc.c41
-rw-r--r--security/selinux/exports.c6
-rw-r--r--security/selinux/hooks.c2
-rw-r--r--security/smack/smack_lsm.c8
-rw-r--r--security/smack/smackfs.c6
14 files changed, 154 insertions, 61 deletions
diff --git a/security/Kconfig b/security/Kconfig
index 4c865345caa0..fb363cd81cf6 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -113,6 +113,36 @@ config SECURITY_ROOTPLUG
113 113
114 If you are unsure how to answer this question, answer N. 114 If you are unsure how to answer this question, answer N.
115 115
116config INTEL_TXT
117 bool "Enable Intel(R) Trusted Execution Technology (Intel(R) TXT)"
118 depends on HAVE_INTEL_TXT
119 help
120 This option enables support for booting the kernel with the
121 Trusted Boot (tboot) module. This will utilize
122 Intel(R) Trusted Execution Technology to perform a measured launch
123 of the kernel. If the system does not support Intel(R) TXT, this
124 will have no effect.
125
126 Intel TXT will provide higher assurance of system configuration and
127 initial state as well as data reset protection. This is used to
128 create a robust initial kernel measurement and verification, which
129 helps to ensure that kernel security mechanisms are functioning
130 correctly. This level of protection requires a root of trust outside
131 of the kernel itself.
132
133 Intel TXT also helps solve real end user concerns about having
134 confidence that their hardware is running the VMM or kernel that
135 it was configured with, especially since they may be responsible for
136 providing such assurances to VMs and services running on it.
137
138 See <http://www.intel.com/technology/security/> for more information
139 about Intel(R) TXT.
140 See <http://tboot.sourceforge.net> for more information about tboot.
141 See Documentation/intel_txt.txt for a description of how to enable
142 Intel TXT support in a kernel boot.
143
144 If you are unsure as to whether this is required, answer N.
145
116config LSM_MMAP_MIN_ADDR 146config LSM_MMAP_MIN_ADDR
117 int "Low address space for LSM to protect from user allocation" 147 int "Low address space for LSM to protect from user allocation"
118 depends on SECURITY && SECURITY_SELINUX 148 depends on SECURITY && SECURITY_SELINUX
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index b8186bac8b7e..6cf8fd2b79e8 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -61,7 +61,8 @@ static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
61struct cgroup_subsys devices_subsys; 61struct cgroup_subsys devices_subsys;
62 62
63static int devcgroup_can_attach(struct cgroup_subsys *ss, 63static int devcgroup_can_attach(struct cgroup_subsys *ss,
64 struct cgroup *new_cgroup, struct task_struct *task) 64 struct cgroup *new_cgroup, struct task_struct *task,
65 bool threadgroup)
65{ 66{
66 if (current != task && !capable(CAP_SYS_ADMIN)) 67 if (current != task && !capable(CAP_SYS_ADMIN))
67 return -EPERM; 68 return -EPERM;
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index 6bfc7eaebfda..8e9777b76405 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -146,7 +146,7 @@ static int ima_measurements_show(struct seq_file *m, void *v)
146 return 0; 146 return 0;
147} 147}
148 148
149static struct seq_operations ima_measurments_seqops = { 149static const struct seq_operations ima_measurments_seqops = {
150 .start = ima_measurements_start, 150 .start = ima_measurements_start,
151 .next = ima_measurements_next, 151 .next = ima_measurements_next,
152 .stop = ima_measurements_stop, 152 .stop = ima_measurements_stop,
@@ -221,7 +221,7 @@ static int ima_ascii_measurements_show(struct seq_file *m, void *v)
221 return 0; 221 return 0;
222} 222}
223 223
224static struct seq_operations ima_ascii_measurements_seqops = { 224static const struct seq_operations ima_ascii_measurements_seqops = {
225 .start = ima_measurements_start, 225 .start = ima_measurements_start,
226 .next = ima_measurements_next, 226 .next = ima_measurements_next,
227 .stop = ima_measurements_stop, 227 .stop = ima_measurements_stop,
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 1e616aef55fd..4770be375ffe 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -26,8 +26,10 @@ static void key_garbage_collector(struct work_struct *);
26static DEFINE_TIMER(key_gc_timer, key_gc_timer_func, 0, 0); 26static DEFINE_TIMER(key_gc_timer, key_gc_timer_func, 0, 0);
27static DECLARE_WORK(key_gc_work, key_garbage_collector); 27static DECLARE_WORK(key_gc_work, key_garbage_collector);
28static key_serial_t key_gc_cursor; /* the last key the gc considered */ 28static key_serial_t key_gc_cursor; /* the last key the gc considered */
29static bool key_gc_again;
29static unsigned long key_gc_executing; 30static unsigned long key_gc_executing;
30static time_t key_gc_next_run = LONG_MAX; 31static time_t key_gc_next_run = LONG_MAX;
32static time_t key_gc_new_timer;
31 33
32/* 34/*
33 * Schedule a garbage collection run 35 * Schedule a garbage collection run
@@ -40,9 +42,7 @@ void key_schedule_gc(time_t gc_at)
40 42
41 kenter("%ld", gc_at - now); 43 kenter("%ld", gc_at - now);
42 44
43 gc_at += key_gc_delay; 45 if (gc_at <= now) {
44
45 if (now >= gc_at) {
46 schedule_work(&key_gc_work); 46 schedule_work(&key_gc_work);
47 } else if (gc_at < key_gc_next_run) { 47 } else if (gc_at < key_gc_next_run) {
48 expires = jiffies + (gc_at - now) * HZ; 48 expires = jiffies + (gc_at - now) * HZ;
@@ -112,16 +112,18 @@ static void key_garbage_collector(struct work_struct *work)
112 struct rb_node *rb; 112 struct rb_node *rb;
113 key_serial_t cursor; 113 key_serial_t cursor;
114 struct key *key, *xkey; 114 struct key *key, *xkey;
115 time_t new_timer = LONG_MAX, limit; 115 time_t new_timer = LONG_MAX, limit, now;
116 116
117 kenter(""); 117 now = current_kernel_time().tv_sec;
118 kenter("[%x,%ld]", key_gc_cursor, key_gc_new_timer - now);
118 119
119 if (test_and_set_bit(0, &key_gc_executing)) { 120 if (test_and_set_bit(0, &key_gc_executing)) {
120 key_schedule_gc(current_kernel_time().tv_sec); 121 key_schedule_gc(current_kernel_time().tv_sec + 1);
122 kleave(" [busy; deferring]");
121 return; 123 return;
122 } 124 }
123 125
124 limit = current_kernel_time().tv_sec; 126 limit = now;
125 if (limit > key_gc_delay) 127 if (limit > key_gc_delay)
126 limit -= key_gc_delay; 128 limit -= key_gc_delay;
127 else 129 else
@@ -129,12 +131,19 @@ static void key_garbage_collector(struct work_struct *work)
129 131
130 spin_lock(&key_serial_lock); 132 spin_lock(&key_serial_lock);
131 133
132 if (RB_EMPTY_ROOT(&key_serial_tree)) 134 if (unlikely(RB_EMPTY_ROOT(&key_serial_tree))) {
133 goto reached_the_end; 135 spin_unlock(&key_serial_lock);
136 clear_bit(0, &key_gc_executing);
137 return;
138 }
134 139
135 cursor = key_gc_cursor; 140 cursor = key_gc_cursor;
136 if (cursor < 0) 141 if (cursor < 0)
137 cursor = 0; 142 cursor = 0;
143 if (cursor > 0)
144 new_timer = key_gc_new_timer;
145 else
146 key_gc_again = false;
138 147
139 /* find the first key above the cursor */ 148 /* find the first key above the cursor */
140 key = NULL; 149 key = NULL;
@@ -160,35 +169,50 @@ static void key_garbage_collector(struct work_struct *work)
160 169
161 /* trawl through the keys looking for keyrings */ 170 /* trawl through the keys looking for keyrings */
162 for (;;) { 171 for (;;) {
163 if (key->expiry > 0 && key->expiry < new_timer) 172 if (key->expiry > limit && key->expiry < new_timer) {
173 kdebug("will expire %x in %ld",
174 key_serial(key), key->expiry - limit);
164 new_timer = key->expiry; 175 new_timer = key->expiry;
176 }
165 177
166 if (key->type == &key_type_keyring && 178 if (key->type == &key_type_keyring &&
167 key_gc_keyring(key, limit)) { 179 key_gc_keyring(key, limit))
168 /* the gc ate our lock */ 180 /* the gc had to release our lock so that the keyring
169 schedule_work(&key_gc_work); 181 * could be modified, so we have to get it again */
170 goto no_unlock; 182 goto gc_released_our_lock;
171 }
172 183
173 rb = rb_next(&key->serial_node); 184 rb = rb_next(&key->serial_node);
174 if (!rb) { 185 if (!rb)
175 key_gc_cursor = 0; 186 goto reached_the_end;
176 break;
177 }
178 key = rb_entry(rb, struct key, serial_node); 187 key = rb_entry(rb, struct key, serial_node);
179 } 188 }
180 189
181out: 190gc_released_our_lock:
182 spin_unlock(&key_serial_lock); 191 kdebug("gc_released_our_lock");
183no_unlock: 192 key_gc_new_timer = new_timer;
193 key_gc_again = true;
184 clear_bit(0, &key_gc_executing); 194 clear_bit(0, &key_gc_executing);
185 if (new_timer < LONG_MAX) 195 schedule_work(&key_gc_work);
186 key_schedule_gc(new_timer); 196 kleave(" [continue]");
187
188 kleave("");
189 return; 197 return;
190 198
199 /* when we reach the end of the run, we set the timer for the next one */
191reached_the_end: 200reached_the_end:
201 kdebug("reached_the_end");
202 spin_unlock(&key_serial_lock);
203 key_gc_new_timer = new_timer;
192 key_gc_cursor = 0; 204 key_gc_cursor = 0;
193 goto out; 205 clear_bit(0, &key_gc_executing);
206
207 if (key_gc_again) {
208 /* there may have been a key that expired whilst we were
209 * scanning, so if we discarded any links we should do another
210 * scan */
211 new_timer = now + 1;
212 key_schedule_gc(new_timer);
213 } else if (new_timer < LONG_MAX) {
214 new_timer += key_gc_delay;
215 key_schedule_gc(new_timer);
216 }
217 kleave(" [end]");
194} 218}
diff --git a/security/keys/key.c b/security/keys/key.c
index 08531ad0f252..e50d264c9ad1 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -500,7 +500,7 @@ int key_negate_and_link(struct key *key,
500 set_bit(KEY_FLAG_INSTANTIATED, &key->flags); 500 set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
501 now = current_kernel_time(); 501 now = current_kernel_time();
502 key->expiry = now.tv_sec + timeout; 502 key->expiry = now.tv_sec + timeout;
503 key_schedule_gc(key->expiry); 503 key_schedule_gc(key->expiry + key_gc_delay);
504 504
505 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) 505 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
506 awaken = 1; 506 awaken = 1;
@@ -909,7 +909,7 @@ void key_revoke(struct key *key)
909 time = now.tv_sec; 909 time = now.tv_sec;
910 if (key->revoked_at == 0 || key->revoked_at > time) { 910 if (key->revoked_at == 0 || key->revoked_at > time) {
911 key->revoked_at = time; 911 key->revoked_at = time;
912 key_schedule_gc(key->revoked_at); 912 key_schedule_gc(key->revoked_at + key_gc_delay);
913 } 913 }
914 914
915 up_write(&key->sem); 915 up_write(&key->sem);
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 74c968524592..2fb28efc5326 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1115,7 +1115,7 @@ long keyctl_set_timeout(key_serial_t id, unsigned timeout)
1115 } 1115 }
1116 1116
1117 key->expiry = expiry; 1117 key->expiry = expiry;
1118 key_schedule_gc(key->expiry); 1118 key_schedule_gc(key->expiry + key_gc_delay);
1119 1119
1120 up_write(&key->sem); 1120 up_write(&key->sem);
1121 key_put(key); 1121 key_put(key);
@@ -1319,6 +1319,7 @@ long keyctl_session_to_parent(void)
1319already_same: 1319already_same:
1320 ret = 0; 1320 ret = 0;
1321not_permitted: 1321not_permitted:
1322 write_unlock_irq(&tasklist_lock);
1322 put_cred(cred); 1323 put_cred(cred);
1323 return ret; 1324 return ret;
1324 1325
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index ac977f661a79..8ec02746ca99 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -1019,18 +1019,18 @@ void keyring_gc(struct key *keyring, time_t limit)
1019 struct key *key; 1019 struct key *key;
1020 int loop, keep, max; 1020 int loop, keep, max;
1021 1021
1022 kenter("%x", key_serial(keyring)); 1022 kenter("{%x,%s}", key_serial(keyring), keyring->description);
1023 1023
1024 down_write(&keyring->sem); 1024 down_write(&keyring->sem);
1025 1025
1026 klist = keyring->payload.subscriptions; 1026 klist = keyring->payload.subscriptions;
1027 if (!klist) 1027 if (!klist)
1028 goto just_return; 1028 goto no_klist;
1029 1029
1030 /* work out how many subscriptions we're keeping */ 1030 /* work out how many subscriptions we're keeping */
1031 keep = 0; 1031 keep = 0;
1032 for (loop = klist->nkeys - 1; loop >= 0; loop--) 1032 for (loop = klist->nkeys - 1; loop >= 0; loop--)
1033 if (!key_is_dead(klist->keys[loop], limit)); 1033 if (!key_is_dead(klist->keys[loop], limit))
1034 keep++; 1034 keep++;
1035 1035
1036 if (keep == klist->nkeys) 1036 if (keep == klist->nkeys)
@@ -1041,7 +1041,7 @@ void keyring_gc(struct key *keyring, time_t limit)
1041 new = kmalloc(sizeof(struct keyring_list) + max * sizeof(struct key *), 1041 new = kmalloc(sizeof(struct keyring_list) + max * sizeof(struct key *),
1042 GFP_KERNEL); 1042 GFP_KERNEL);
1043 if (!new) 1043 if (!new)
1044 goto just_return; 1044 goto nomem;
1045 new->maxkeys = max; 1045 new->maxkeys = max;
1046 new->nkeys = 0; 1046 new->nkeys = 0;
1047 new->delkey = 0; 1047 new->delkey = 0;
@@ -1081,7 +1081,21 @@ void keyring_gc(struct key *keyring, time_t limit)
1081discard_new: 1081discard_new:
1082 new->nkeys = keep; 1082 new->nkeys = keep;
1083 keyring_clear_rcu_disposal(&new->rcu); 1083 keyring_clear_rcu_disposal(&new->rcu);
1084 up_write(&keyring->sem);
1085 kleave(" [discard]");
1086 return;
1087
1084just_return: 1088just_return:
1085 up_write(&keyring->sem); 1089 up_write(&keyring->sem);
1086 kleave(" [no]"); 1090 kleave(" [no dead]");
1091 return;
1092
1093no_klist:
1094 up_write(&keyring->sem);
1095 kleave(" [no_klist]");
1096 return;
1097
1098nomem:
1099 up_write(&keyring->sem);
1100 kleave(" [oom]");
1087} 1101}
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 500aad0ebd6a..3bb90b6f1dd3 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -187,7 +187,7 @@ static inline void print_ipv6_addr(struct audit_buffer *ab,
187 char *name1, char *name2) 187 char *name1, char *name2)
188{ 188{
189 if (!ipv6_addr_any(addr)) 189 if (!ipv6_addr_any(addr))
190 audit_log_format(ab, " %s=%pI6", name1, addr); 190 audit_log_format(ab, " %s=%pI6c", name1, addr);
191 if (port) 191 if (port)
192 audit_log_format(ab, " %s=%d", name2, ntohs(port)); 192 audit_log_format(ab, " %s=%d", name2, ntohs(port));
193} 193}
diff --git a/security/min_addr.c b/security/min_addr.c
index 14cc7b3b8d03..c844eed7915d 100644
--- a/security/min_addr.c
+++ b/security/min_addr.c
@@ -28,12 +28,12 @@ static void update_mmap_min_addr(void)
28 * sysctl handler which just sets dac_mmap_min_addr = the new value and then 28 * sysctl handler which just sets dac_mmap_min_addr = the new value and then
29 * calls update_mmap_min_addr() so non MAP_FIXED hints get rounded properly 29 * calls update_mmap_min_addr() so non MAP_FIXED hints get rounded properly
30 */ 30 */
31int mmap_min_addr_handler(struct ctl_table *table, int write, struct file *filp, 31int mmap_min_addr_handler(struct ctl_table *table, int write,
32 void __user *buffer, size_t *lenp, loff_t *ppos) 32 void __user *buffer, size_t *lenp, loff_t *ppos)
33{ 33{
34 int ret; 34 int ret;
35 35
36 ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos); 36 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
37 37
38 update_mmap_min_addr(); 38 update_mmap_min_addr();
39 39
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index e3d19014259b..b4b5da1c0a42 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -709,18 +709,16 @@ out:
709} 709}
710 710
711/** 711/**
712 * avc_ss_reset - Flush the cache and revalidate migrated permissions. 712 * avc_flush - Flush the cache
713 * @seqno: policy sequence number
714 */ 713 */
715int avc_ss_reset(u32 seqno) 714static void avc_flush(void)
716{ 715{
717 struct avc_callback_node *c;
718 int i, rc = 0, tmprc;
719 unsigned long flag;
720 struct avc_node *node;
721 struct hlist_head *head; 716 struct hlist_head *head;
722 struct hlist_node *next; 717 struct hlist_node *next;
718 struct avc_node *node;
723 spinlock_t *lock; 719 spinlock_t *lock;
720 unsigned long flag;
721 int i;
724 722
725 for (i = 0; i < AVC_CACHE_SLOTS; i++) { 723 for (i = 0; i < AVC_CACHE_SLOTS; i++) {
726 head = &avc_cache.slots[i]; 724 head = &avc_cache.slots[i];
@@ -737,6 +735,18 @@ int avc_ss_reset(u32 seqno)
737 rcu_read_unlock(); 735 rcu_read_unlock();
738 spin_unlock_irqrestore(lock, flag); 736 spin_unlock_irqrestore(lock, flag);
739 } 737 }
738}
739
740/**
741 * avc_ss_reset - Flush the cache and revalidate migrated permissions.
742 * @seqno: policy sequence number
743 */
744int avc_ss_reset(u32 seqno)
745{
746 struct avc_callback_node *c;
747 int rc = 0, tmprc;
748
749 avc_flush();
740 750
741 for (c = avc_callbacks; c; c = c->next) { 751 for (c = avc_callbacks; c; c = c->next) {
742 if (c->events & AVC_CALLBACK_RESET) { 752 if (c->events & AVC_CALLBACK_RESET) {
@@ -858,6 +868,19 @@ u32 avc_policy_seqno(void)
858 868
859void avc_disable(void) 869void avc_disable(void)
860{ 870{
861 if (avc_node_cachep) 871 /*
862 kmem_cache_destroy(avc_node_cachep); 872 * If you are looking at this because you have realized that we are
873 * not destroying the avc_node_cachep it might be easy to fix, but
874 * I don't know the memory barrier semantics well enough to know. It's
875 * possible that some other task dereferenced security_ops when
876 * it still pointed to selinux operations. If that is the case it's
877 * possible that it is about to use the avc and is about to need the
878 * avc_node_cachep. I know I could wrap the security.c security_ops call
879 * in an rcu_lock, but seriously, it's not worth it. Instead I just flush
880 * the cache and get that memory back.
881 */
882 if (avc_node_cachep) {
883 avc_flush();
884 /* kmem_cache_destroy(avc_node_cachep); */
885 }
863} 886}
diff --git a/security/selinux/exports.c b/security/selinux/exports.c
index c73aeaa008e8..c0a454aee1e0 100644
--- a/security/selinux/exports.c
+++ b/security/selinux/exports.c
@@ -63,3 +63,9 @@ void selinux_secmark_refcount_dec(void)
63 atomic_dec(&selinux_secmark_refcount); 63 atomic_dec(&selinux_secmark_refcount);
64} 64}
65EXPORT_SYMBOL_GPL(selinux_secmark_refcount_dec); 65EXPORT_SYMBOL_GPL(selinux_secmark_refcount_dec);
66
67bool selinux_is_enabled(void)
68{
69 return selinux_enabled;
70}
71EXPORT_SYMBOL_GPL(selinux_is_enabled);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 417f7c994522..bb230d5d7085 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2411,7 +2411,7 @@ static void selinux_bprm_committed_creds(struct linux_binprm *bprm)
2411 /* Wake up the parent if it is waiting so that it can recheck 2411 /* Wake up the parent if it is waiting so that it can recheck
2412 * wait permission to the new task SID. */ 2412 * wait permission to the new task SID. */
2413 read_lock(&tasklist_lock); 2413 read_lock(&tasklist_lock);
2414 wake_up_interruptible(&current->real_parent->signal->wait_chldexit); 2414 __wake_up_parent(current, current->real_parent);
2415 read_unlock(&tasklist_lock); 2415 read_unlock(&tasklist_lock);
2416} 2416}
2417 2417
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index acae7ef4092d..c33b6bb9b6dd 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -30,17 +30,11 @@
30#include <net/netlabel.h> 30#include <net/netlabel.h>
31#include <net/cipso_ipv4.h> 31#include <net/cipso_ipv4.h>
32#include <linux/audit.h> 32#include <linux/audit.h>
33#include <linux/magic.h>
33#include "smack.h" 34#include "smack.h"
34 35
35#define task_security(task) (task_cred_xxx((task), security)) 36#define task_security(task) (task_cred_xxx((task), security))
36 37
37/*
38 * I hope these are the hokeyist lines of code in the module. Casey.
39 */
40#define DEVPTS_SUPER_MAGIC 0x1cd1
41#define SOCKFS_MAGIC 0x534F434B
42#define TMPFS_MAGIC 0x01021994
43
44/** 38/**
45 * smk_fetch - Fetch the smack label from a file. 39 * smk_fetch - Fetch the smack label from a file.
46 * @ip: a pointer to the inode 40 * @ip: a pointer to the inode
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index f83a80980726..aeead7585093 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -187,7 +187,7 @@ static void load_seq_stop(struct seq_file *s, void *v)
187 /* No-op */ 187 /* No-op */
188} 188}
189 189
190static struct seq_operations load_seq_ops = { 190static const struct seq_operations load_seq_ops = {
191 .start = load_seq_start, 191 .start = load_seq_start,
192 .next = load_seq_next, 192 .next = load_seq_next,
193 .show = load_seq_show, 193 .show = load_seq_show,
@@ -503,7 +503,7 @@ static void cipso_seq_stop(struct seq_file *s, void *v)
503 /* No-op */ 503 /* No-op */
504} 504}
505 505
506static struct seq_operations cipso_seq_ops = { 506static const struct seq_operations cipso_seq_ops = {
507 .start = cipso_seq_start, 507 .start = cipso_seq_start,
508 .stop = cipso_seq_stop, 508 .stop = cipso_seq_stop,
509 .next = cipso_seq_next, 509 .next = cipso_seq_next,
@@ -697,7 +697,7 @@ static void netlbladdr_seq_stop(struct seq_file *s, void *v)
697 /* No-op */ 697 /* No-op */
698} 698}
699 699
700static struct seq_operations netlbladdr_seq_ops = { 700static const struct seq_operations netlbladdr_seq_ops = {
701 .start = netlbladdr_seq_start, 701 .start = netlbladdr_seq_start,
702 .stop = netlbladdr_seq_stop, 702 .stop = netlbladdr_seq_stop,
703 .next = netlbladdr_seq_next, 703 .next = netlbladdr_seq_next,