diff options
Diffstat (limited to 'ipc')
-rw-r--r-- | ipc/compat.c | 2 | ||||
-rw-r--r-- | ipc/compat_mq.c | 2 | ||||
-rw-r--r-- | ipc/ipc_sysctl.c | 14 | ||||
-rw-r--r-- | ipc/mq_sysctl.c | 12 | ||||
-rw-r--r-- | ipc/msg.c | 188 | ||||
-rw-r--r-- | ipc/sem.c | 171 | ||||
-rw-r--r-- | ipc/shm.c | 23 | ||||
-rw-r--r-- | ipc/util.c | 12 | ||||
-rw-r--r-- | ipc/util.h | 10 |
9 files changed, 224 insertions, 210 deletions
diff --git a/ipc/compat.c b/ipc/compat.c index 45d035d4cedc..b5ef4f7946dc 100644 --- a/ipc/compat.c +++ b/ipc/compat.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #include <linux/ptrace.h> | 30 | #include <linux/ptrace.h> |
31 | 31 | ||
32 | #include <linux/mutex.h> | 32 | #include <linux/mutex.h> |
33 | #include <asm/uaccess.h> | 33 | #include <linux/uaccess.h> |
34 | 34 | ||
35 | #include "util.h" | 35 | #include "util.h" |
36 | 36 | ||
diff --git a/ipc/compat_mq.c b/ipc/compat_mq.c index 90d29f59cac6..ef6f91cc4490 100644 --- a/ipc/compat_mq.c +++ b/ipc/compat_mq.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/mqueue.h> | 12 | #include <linux/mqueue.h> |
13 | #include <linux/syscalls.h> | 13 | #include <linux/syscalls.h> |
14 | 14 | ||
15 | #include <asm/uaccess.h> | 15 | #include <linux/uaccess.h> |
16 | 16 | ||
17 | struct compat_mq_attr { | 17 | struct compat_mq_attr { |
18 | compat_long_t mq_flags; /* message queue flags */ | 18 | compat_long_t mq_flags; /* message queue flags */ |
diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c index 998d31b230f1..c3f0326e98db 100644 --- a/ipc/ipc_sysctl.c +++ b/ipc/ipc_sysctl.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <linux/msg.h> | 18 | #include <linux/msg.h> |
19 | #include "util.h" | 19 | #include "util.h" |
20 | 20 | ||
21 | static void *get_ipc(ctl_table *table) | 21 | static void *get_ipc(struct ctl_table *table) |
22 | { | 22 | { |
23 | char *which = table->data; | 23 | char *which = table->data; |
24 | struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; | 24 | struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; |
@@ -27,7 +27,7 @@ static void *get_ipc(ctl_table *table) | |||
27 | } | 27 | } |
28 | 28 | ||
29 | #ifdef CONFIG_PROC_SYSCTL | 29 | #ifdef CONFIG_PROC_SYSCTL |
30 | static int proc_ipc_dointvec(ctl_table *table, int write, | 30 | static int proc_ipc_dointvec(struct ctl_table *table, int write, |
31 | void __user *buffer, size_t *lenp, loff_t *ppos) | 31 | void __user *buffer, size_t *lenp, loff_t *ppos) |
32 | { | 32 | { |
33 | struct ctl_table ipc_table; | 33 | struct ctl_table ipc_table; |
@@ -38,7 +38,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write, | |||
38 | return proc_dointvec(&ipc_table, write, buffer, lenp, ppos); | 38 | return proc_dointvec(&ipc_table, write, buffer, lenp, ppos); |
39 | } | 39 | } |
40 | 40 | ||
41 | static int proc_ipc_dointvec_minmax(ctl_table *table, int write, | 41 | static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write, |
42 | void __user *buffer, size_t *lenp, loff_t *ppos) | 42 | void __user *buffer, size_t *lenp, loff_t *ppos) |
43 | { | 43 | { |
44 | struct ctl_table ipc_table; | 44 | struct ctl_table ipc_table; |
@@ -49,7 +49,7 @@ static int proc_ipc_dointvec_minmax(ctl_table *table, int write, | |||
49 | return proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos); | 49 | return proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos); |
50 | } | 50 | } |
51 | 51 | ||
52 | static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write, | 52 | static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write, |
53 | void __user *buffer, size_t *lenp, loff_t *ppos) | 53 | void __user *buffer, size_t *lenp, loff_t *ppos) |
54 | { | 54 | { |
55 | struct ipc_namespace *ns = current->nsproxy->ipc_ns; | 55 | struct ipc_namespace *ns = current->nsproxy->ipc_ns; |
@@ -62,7 +62,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write, | |||
62 | return err; | 62 | return err; |
63 | } | 63 | } |
64 | 64 | ||
65 | static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write, | 65 | static int proc_ipc_callback_dointvec_minmax(struct ctl_table *table, int write, |
66 | void __user *buffer, size_t *lenp, loff_t *ppos) | 66 | void __user *buffer, size_t *lenp, loff_t *ppos) |
67 | { | 67 | { |
68 | struct ctl_table ipc_table; | 68 | struct ctl_table ipc_table; |
@@ -85,7 +85,7 @@ static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write, | |||
85 | return rc; | 85 | return rc; |
86 | } | 86 | } |
87 | 87 | ||
88 | static int proc_ipc_doulongvec_minmax(ctl_table *table, int write, | 88 | static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write, |
89 | void __user *buffer, size_t *lenp, loff_t *ppos) | 89 | void __user *buffer, size_t *lenp, loff_t *ppos) |
90 | { | 90 | { |
91 | struct ctl_table ipc_table; | 91 | struct ctl_table ipc_table; |
@@ -119,7 +119,7 @@ static void ipc_auto_callback(int val) | |||
119 | } | 119 | } |
120 | } | 120 | } |
121 | 121 | ||
122 | static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write, | 122 | static int proc_ipcauto_dointvec_minmax(struct ctl_table *table, int write, |
123 | void __user *buffer, size_t *lenp, loff_t *ppos) | 123 | void __user *buffer, size_t *lenp, loff_t *ppos) |
124 | { | 124 | { |
125 | struct ctl_table ipc_table; | 125 | struct ctl_table ipc_table; |
diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c index 5bb8bfe67149..68d4e953762c 100644 --- a/ipc/mq_sysctl.c +++ b/ipc/mq_sysctl.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <linux/sysctl.h> | 14 | #include <linux/sysctl.h> |
15 | 15 | ||
16 | #ifdef CONFIG_PROC_SYSCTL | 16 | #ifdef CONFIG_PROC_SYSCTL |
17 | static void *get_mq(ctl_table *table) | 17 | static void *get_mq(struct ctl_table *table) |
18 | { | 18 | { |
19 | char *which = table->data; | 19 | char *which = table->data; |
20 | struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; | 20 | struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; |
@@ -22,7 +22,7 @@ static void *get_mq(ctl_table *table) | |||
22 | return which; | 22 | return which; |
23 | } | 23 | } |
24 | 24 | ||
25 | static int proc_mq_dointvec(ctl_table *table, int write, | 25 | static int proc_mq_dointvec(struct ctl_table *table, int write, |
26 | void __user *buffer, size_t *lenp, loff_t *ppos) | 26 | void __user *buffer, size_t *lenp, loff_t *ppos) |
27 | { | 27 | { |
28 | struct ctl_table mq_table; | 28 | struct ctl_table mq_table; |
@@ -32,7 +32,7 @@ static int proc_mq_dointvec(ctl_table *table, int write, | |||
32 | return proc_dointvec(&mq_table, write, buffer, lenp, ppos); | 32 | return proc_dointvec(&mq_table, write, buffer, lenp, ppos); |
33 | } | 33 | } |
34 | 34 | ||
35 | static int proc_mq_dointvec_minmax(ctl_table *table, int write, | 35 | static int proc_mq_dointvec_minmax(struct ctl_table *table, int write, |
36 | void __user *buffer, size_t *lenp, loff_t *ppos) | 36 | void __user *buffer, size_t *lenp, loff_t *ppos) |
37 | { | 37 | { |
38 | struct ctl_table mq_table; | 38 | struct ctl_table mq_table; |
@@ -53,7 +53,7 @@ static int msg_max_limit_max = HARD_MSGMAX; | |||
53 | static int msg_maxsize_limit_min = MIN_MSGSIZEMAX; | 53 | static int msg_maxsize_limit_min = MIN_MSGSIZEMAX; |
54 | static int msg_maxsize_limit_max = HARD_MSGSIZEMAX; | 54 | static int msg_maxsize_limit_max = HARD_MSGSIZEMAX; |
55 | 55 | ||
56 | static ctl_table mq_sysctls[] = { | 56 | static struct ctl_table mq_sysctls[] = { |
57 | { | 57 | { |
58 | .procname = "queues_max", | 58 | .procname = "queues_max", |
59 | .data = &init_ipc_ns.mq_queues_max, | 59 | .data = &init_ipc_ns.mq_queues_max, |
@@ -100,7 +100,7 @@ static ctl_table mq_sysctls[] = { | |||
100 | {} | 100 | {} |
101 | }; | 101 | }; |
102 | 102 | ||
103 | static ctl_table mq_sysctl_dir[] = { | 103 | static struct ctl_table mq_sysctl_dir[] = { |
104 | { | 104 | { |
105 | .procname = "mqueue", | 105 | .procname = "mqueue", |
106 | .mode = 0555, | 106 | .mode = 0555, |
@@ -109,7 +109,7 @@ static ctl_table mq_sysctl_dir[] = { | |||
109 | {} | 109 | {} |
110 | }; | 110 | }; |
111 | 111 | ||
112 | static ctl_table mq_sysctl_root[] = { | 112 | static struct ctl_table mq_sysctl_root[] = { |
113 | { | 113 | { |
114 | .procname = "fs", | 114 | .procname = "fs", |
115 | .mode = 0555, | 115 | .mode = 0555, |
@@ -39,12 +39,10 @@ | |||
39 | #include <linux/ipc_namespace.h> | 39 | #include <linux/ipc_namespace.h> |
40 | 40 | ||
41 | #include <asm/current.h> | 41 | #include <asm/current.h> |
42 | #include <asm/uaccess.h> | 42 | #include <linux/uaccess.h> |
43 | #include "util.h" | 43 | #include "util.h" |
44 | 44 | ||
45 | /* | 45 | /* one msg_receiver structure for each sleeping receiver */ |
46 | * one msg_receiver structure for each sleeping receiver: | ||
47 | */ | ||
48 | struct msg_receiver { | 46 | struct msg_receiver { |
49 | struct list_head r_list; | 47 | struct list_head r_list; |
50 | struct task_struct *r_tsk; | 48 | struct task_struct *r_tsk; |
@@ -53,6 +51,12 @@ struct msg_receiver { | |||
53 | long r_msgtype; | 51 | long r_msgtype; |
54 | long r_maxsize; | 52 | long r_maxsize; |
55 | 53 | ||
54 | /* | ||
55 | * Mark r_msg volatile so that the compiler | ||
56 | * does not try to get smart and optimize | ||
57 | * it. We rely on this for the lockless | ||
58 | * receive algorithm. | ||
59 | */ | ||
56 | struct msg_msg *volatile r_msg; | 60 | struct msg_msg *volatile r_msg; |
57 | }; | 61 | }; |
58 | 62 | ||
@@ -70,75 +74,6 @@ struct msg_sender { | |||
70 | 74 | ||
71 | #define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS]) | 75 | #define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS]) |
72 | 76 | ||
73 | static void freeque(struct ipc_namespace *, struct kern_ipc_perm *); | ||
74 | static int newque(struct ipc_namespace *, struct ipc_params *); | ||
75 | #ifdef CONFIG_PROC_FS | ||
76 | static int sysvipc_msg_proc_show(struct seq_file *s, void *it); | ||
77 | #endif | ||
78 | |||
79 | /* | ||
80 | * Scale msgmni with the available lowmem size: the memory dedicated to msg | ||
81 | * queues should occupy at most 1/MSG_MEM_SCALE of lowmem. | ||
82 | * Also take into account the number of nsproxies created so far. | ||
83 | * This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range. | ||
84 | */ | ||
85 | void recompute_msgmni(struct ipc_namespace *ns) | ||
86 | { | ||
87 | struct sysinfo i; | ||
88 | unsigned long allowed; | ||
89 | int nb_ns; | ||
90 | |||
91 | si_meminfo(&i); | ||
92 | allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit) | ||
93 | / MSGMNB; | ||
94 | nb_ns = atomic_read(&nr_ipc_ns); | ||
95 | allowed /= nb_ns; | ||
96 | |||
97 | if (allowed < MSGMNI) { | ||
98 | ns->msg_ctlmni = MSGMNI; | ||
99 | return; | ||
100 | } | ||
101 | |||
102 | if (allowed > IPCMNI / nb_ns) { | ||
103 | ns->msg_ctlmni = IPCMNI / nb_ns; | ||
104 | return; | ||
105 | } | ||
106 | |||
107 | ns->msg_ctlmni = allowed; | ||
108 | } | ||
109 | |||
110 | void msg_init_ns(struct ipc_namespace *ns) | ||
111 | { | ||
112 | ns->msg_ctlmax = MSGMAX; | ||
113 | ns->msg_ctlmnb = MSGMNB; | ||
114 | |||
115 | recompute_msgmni(ns); | ||
116 | |||
117 | atomic_set(&ns->msg_bytes, 0); | ||
118 | atomic_set(&ns->msg_hdrs, 0); | ||
119 | ipc_init_ids(&ns->ids[IPC_MSG_IDS]); | ||
120 | } | ||
121 | |||
122 | #ifdef CONFIG_IPC_NS | ||
123 | void msg_exit_ns(struct ipc_namespace *ns) | ||
124 | { | ||
125 | free_ipcs(ns, &msg_ids(ns), freeque); | ||
126 | idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr); | ||
127 | } | ||
128 | #endif | ||
129 | |||
130 | void __init msg_init(void) | ||
131 | { | ||
132 | msg_init_ns(&init_ipc_ns); | ||
133 | |||
134 | printk(KERN_INFO "msgmni has been set to %d\n", | ||
135 | init_ipc_ns.msg_ctlmni); | ||
136 | |||
137 | ipc_init_proc_interface("sysvipc/msg", | ||
138 | " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n", | ||
139 | IPC_MSG_IDS, sysvipc_msg_proc_show); | ||
140 | } | ||
141 | |||
142 | static inline struct msg_queue *msq_obtain_object(struct ipc_namespace *ns, int id) | 77 | static inline struct msg_queue *msq_obtain_object(struct ipc_namespace *ns, int id) |
143 | { | 78 | { |
144 | struct kern_ipc_perm *ipcp = ipc_obtain_object(&msg_ids(ns), id); | 79 | struct kern_ipc_perm *ipcp = ipc_obtain_object(&msg_ids(ns), id); |
@@ -227,7 +162,7 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) | |||
227 | static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss) | 162 | static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss) |
228 | { | 163 | { |
229 | mss->tsk = current; | 164 | mss->tsk = current; |
230 | current->state = TASK_INTERRUPTIBLE; | 165 | __set_current_state(TASK_INTERRUPTIBLE); |
231 | list_add_tail(&mss->list, &msq->q_senders); | 166 | list_add_tail(&mss->list, &msq->q_senders); |
232 | } | 167 | } |
233 | 168 | ||
@@ -306,15 +241,14 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg) | |||
306 | SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg) | 241 | SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg) |
307 | { | 242 | { |
308 | struct ipc_namespace *ns; | 243 | struct ipc_namespace *ns; |
309 | struct ipc_ops msg_ops; | 244 | static const struct ipc_ops msg_ops = { |
245 | .getnew = newque, | ||
246 | .associate = msg_security, | ||
247 | }; | ||
310 | struct ipc_params msg_params; | 248 | struct ipc_params msg_params; |
311 | 249 | ||
312 | ns = current->nsproxy->ipc_ns; | 250 | ns = current->nsproxy->ipc_ns; |
313 | 251 | ||
314 | msg_ops.getnew = newque; | ||
315 | msg_ops.associate = msg_security; | ||
316 | msg_ops.more_checks = NULL; | ||
317 | |||
318 | msg_params.key = key; | 252 | msg_params.key = key; |
319 | msg_params.flg = msgflg; | 253 | msg_params.flg = msgflg; |
320 | 254 | ||
@@ -612,23 +546,22 @@ SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf) | |||
612 | 546 | ||
613 | static int testmsg(struct msg_msg *msg, long type, int mode) | 547 | static int testmsg(struct msg_msg *msg, long type, int mode) |
614 | { | 548 | { |
615 | switch (mode) | 549 | switch (mode) { |
616 | { | 550 | case SEARCH_ANY: |
617 | case SEARCH_ANY: | 551 | case SEARCH_NUMBER: |
618 | case SEARCH_NUMBER: | 552 | return 1; |
553 | case SEARCH_LESSEQUAL: | ||
554 | if (msg->m_type <= type) | ||
619 | return 1; | 555 | return 1; |
620 | case SEARCH_LESSEQUAL: | 556 | break; |
621 | if (msg->m_type <= type) | 557 | case SEARCH_EQUAL: |
622 | return 1; | 558 | if (msg->m_type == type) |
623 | break; | 559 | return 1; |
624 | case SEARCH_EQUAL: | 560 | break; |
625 | if (msg->m_type == type) | 561 | case SEARCH_NOTEQUAL: |
626 | return 1; | 562 | if (msg->m_type != type) |
627 | break; | 563 | return 1; |
628 | case SEARCH_NOTEQUAL: | 564 | break; |
629 | if (msg->m_type != type) | ||
630 | return 1; | ||
631 | break; | ||
632 | } | 565 | } |
633 | return 0; | 566 | return 0; |
634 | } | 567 | } |
@@ -978,7 +911,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl | |||
978 | else | 911 | else |
979 | msr_d.r_maxsize = bufsz; | 912 | msr_d.r_maxsize = bufsz; |
980 | msr_d.r_msg = ERR_PTR(-EAGAIN); | 913 | msr_d.r_msg = ERR_PTR(-EAGAIN); |
981 | current->state = TASK_INTERRUPTIBLE; | 914 | __set_current_state(TASK_INTERRUPTIBLE); |
982 | 915 | ||
983 | ipc_unlock_object(&msq->q_perm); | 916 | ipc_unlock_object(&msq->q_perm); |
984 | rcu_read_unlock(); | 917 | rcu_read_unlock(); |
@@ -1056,6 +989,57 @@ SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz, | |||
1056 | return do_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg, do_msg_fill); | 989 | return do_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg, do_msg_fill); |
1057 | } | 990 | } |
1058 | 991 | ||
992 | /* | ||
993 | * Scale msgmni with the available lowmem size: the memory dedicated to msg | ||
994 | * queues should occupy at most 1/MSG_MEM_SCALE of lowmem. | ||
995 | * Also take into account the number of nsproxies created so far. | ||
996 | * This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range. | ||
997 | */ | ||
998 | void recompute_msgmni(struct ipc_namespace *ns) | ||
999 | { | ||
1000 | struct sysinfo i; | ||
1001 | unsigned long allowed; | ||
1002 | int nb_ns; | ||
1003 | |||
1004 | si_meminfo(&i); | ||
1005 | allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit) | ||
1006 | / MSGMNB; | ||
1007 | nb_ns = atomic_read(&nr_ipc_ns); | ||
1008 | allowed /= nb_ns; | ||
1009 | |||
1010 | if (allowed < MSGMNI) { | ||
1011 | ns->msg_ctlmni = MSGMNI; | ||
1012 | return; | ||
1013 | } | ||
1014 | |||
1015 | if (allowed > IPCMNI / nb_ns) { | ||
1016 | ns->msg_ctlmni = IPCMNI / nb_ns; | ||
1017 | return; | ||
1018 | } | ||
1019 | |||
1020 | ns->msg_ctlmni = allowed; | ||
1021 | } | ||
1022 | |||
1023 | void msg_init_ns(struct ipc_namespace *ns) | ||
1024 | { | ||
1025 | ns->msg_ctlmax = MSGMAX; | ||
1026 | ns->msg_ctlmnb = MSGMNB; | ||
1027 | |||
1028 | recompute_msgmni(ns); | ||
1029 | |||
1030 | atomic_set(&ns->msg_bytes, 0); | ||
1031 | atomic_set(&ns->msg_hdrs, 0); | ||
1032 | ipc_init_ids(&ns->ids[IPC_MSG_IDS]); | ||
1033 | } | ||
1034 | |||
1035 | #ifdef CONFIG_IPC_NS | ||
1036 | void msg_exit_ns(struct ipc_namespace *ns) | ||
1037 | { | ||
1038 | free_ipcs(ns, &msg_ids(ns), freeque); | ||
1039 | idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr); | ||
1040 | } | ||
1041 | #endif | ||
1042 | |||
1059 | #ifdef CONFIG_PROC_FS | 1043 | #ifdef CONFIG_PROC_FS |
1060 | static int sysvipc_msg_proc_show(struct seq_file *s, void *it) | 1044 | static int sysvipc_msg_proc_show(struct seq_file *s, void *it) |
1061 | { | 1045 | { |
@@ -1080,3 +1064,15 @@ static int sysvipc_msg_proc_show(struct seq_file *s, void *it) | |||
1080 | msq->q_ctime); | 1064 | msq->q_ctime); |
1081 | } | 1065 | } |
1082 | #endif | 1066 | #endif |
1067 | |||
1068 | void __init msg_init(void) | ||
1069 | { | ||
1070 | msg_init_ns(&init_ipc_ns); | ||
1071 | |||
1072 | printk(KERN_INFO "msgmni has been set to %d\n", | ||
1073 | init_ipc_ns.msg_ctlmni); | ||
1074 | |||
1075 | ipc_init_proc_interface("sysvipc/msg", | ||
1076 | " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n", | ||
1077 | IPC_MSG_IDS, sysvipc_msg_proc_show); | ||
1078 | } | ||
@@ -47,8 +47,7 @@ | |||
47 | * Thus: Perfect SMP scaling between independent semaphore arrays. | 47 | * Thus: Perfect SMP scaling between independent semaphore arrays. |
48 | * If multiple semaphores in one array are used, then cache line | 48 | * If multiple semaphores in one array are used, then cache line |
49 | * trashing on the semaphore array spinlock will limit the scaling. | 49 | * trashing on the semaphore array spinlock will limit the scaling. |
50 | * - semncnt and semzcnt are calculated on demand in count_semncnt() and | 50 | * - semncnt and semzcnt are calculated on demand in count_semcnt() |
51 | * count_semzcnt() | ||
52 | * - the task that performs a successful semop() scans the list of all | 51 | * - the task that performs a successful semop() scans the list of all |
53 | * sleeping tasks and completes any pending operations that can be fulfilled. | 52 | * sleeping tasks and completes any pending operations that can be fulfilled. |
54 | * Semaphores are actively given to waiting tasks (necessary for FIFO). | 53 | * Semaphores are actively given to waiting tasks (necessary for FIFO). |
@@ -87,7 +86,7 @@ | |||
87 | #include <linux/nsproxy.h> | 86 | #include <linux/nsproxy.h> |
88 | #include <linux/ipc_namespace.h> | 87 | #include <linux/ipc_namespace.h> |
89 | 88 | ||
90 | #include <asm/uaccess.h> | 89 | #include <linux/uaccess.h> |
91 | #include "util.h" | 90 | #include "util.h" |
92 | 91 | ||
93 | /* One semaphore structure for each semaphore in the system. */ | 92 | /* One semaphore structure for each semaphore in the system. */ |
@@ -110,6 +109,7 @@ struct sem_queue { | |||
110 | int pid; /* process id of requesting process */ | 109 | int pid; /* process id of requesting process */ |
111 | int status; /* completion status of operation */ | 110 | int status; /* completion status of operation */ |
112 | struct sembuf *sops; /* array of pending operations */ | 111 | struct sembuf *sops; /* array of pending operations */ |
112 | struct sembuf *blocking; /* the operation that blocked */ | ||
113 | int nsops; /* number of operations */ | 113 | int nsops; /* number of operations */ |
114 | int alter; /* does *sops alter the array? */ | 114 | int alter; /* does *sops alter the array? */ |
115 | }; | 115 | }; |
@@ -160,7 +160,7 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it); | |||
160 | * sem_array.pending{_alter,_cont}, | 160 | * sem_array.pending{_alter,_cont}, |
161 | * sem_array.sem_undo: global sem_lock() for read/write | 161 | * sem_array.sem_undo: global sem_lock() for read/write |
162 | * sem_undo.proc_next: only "current" is allowed to read/write that field. | 162 | * sem_undo.proc_next: only "current" is allowed to read/write that field. |
163 | * | 163 | * |
164 | * sem_array.sem_base[i].pending_{const,alter}: | 164 | * sem_array.sem_base[i].pending_{const,alter}: |
165 | * global or semaphore sem_lock() for read/write | 165 | * global or semaphore sem_lock() for read/write |
166 | */ | 166 | */ |
@@ -564,7 +564,11 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp, | |||
564 | SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) | 564 | SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) |
565 | { | 565 | { |
566 | struct ipc_namespace *ns; | 566 | struct ipc_namespace *ns; |
567 | struct ipc_ops sem_ops; | 567 | static const struct ipc_ops sem_ops = { |
568 | .getnew = newary, | ||
569 | .associate = sem_security, | ||
570 | .more_checks = sem_more_checks, | ||
571 | }; | ||
568 | struct ipc_params sem_params; | 572 | struct ipc_params sem_params; |
569 | 573 | ||
570 | ns = current->nsproxy->ipc_ns; | 574 | ns = current->nsproxy->ipc_ns; |
@@ -572,10 +576,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) | |||
572 | if (nsems < 0 || nsems > ns->sc_semmsl) | 576 | if (nsems < 0 || nsems > ns->sc_semmsl) |
573 | return -EINVAL; | 577 | return -EINVAL; |
574 | 578 | ||
575 | sem_ops.getnew = newary; | ||
576 | sem_ops.associate = sem_security; | ||
577 | sem_ops.more_checks = sem_more_checks; | ||
578 | |||
579 | sem_params.key = key; | 579 | sem_params.key = key; |
580 | sem_params.flg = semflg; | 580 | sem_params.flg = semflg; |
581 | sem_params.u.nsems = nsems; | 581 | sem_params.u.nsems = nsems; |
@@ -586,21 +586,23 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) | |||
586 | /** | 586 | /** |
587 | * perform_atomic_semop - Perform (if possible) a semaphore operation | 587 | * perform_atomic_semop - Perform (if possible) a semaphore operation |
588 | * @sma: semaphore array | 588 | * @sma: semaphore array |
589 | * @sops: array with operations that should be checked | 589 | * @q: struct sem_queue that describes the operation |
590 | * @nsops: number of operations | ||
591 | * @un: undo array | ||
592 | * @pid: pid that did the change | ||
593 | * | 590 | * |
594 | * Returns 0 if the operation was possible. | 591 | * Returns 0 if the operation was possible. |
595 | * Returns 1 if the operation is impossible, the caller must sleep. | 592 | * Returns 1 if the operation is impossible, the caller must sleep. |
596 | * Negative values are error codes. | 593 | * Negative values are error codes. |
597 | */ | 594 | */ |
598 | static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops, | 595 | static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q) |
599 | int nsops, struct sem_undo *un, int pid) | ||
600 | { | 596 | { |
601 | int result, sem_op; | 597 | int result, sem_op, nsops, pid; |
602 | struct sembuf *sop; | 598 | struct sembuf *sop; |
603 | struct sem *curr; | 599 | struct sem *curr; |
600 | struct sembuf *sops; | ||
601 | struct sem_undo *un; | ||
602 | |||
603 | sops = q->sops; | ||
604 | nsops = q->nsops; | ||
605 | un = q->undo; | ||
604 | 606 | ||
605 | for (sop = sops; sop < sops + nsops; sop++) { | 607 | for (sop = sops; sop < sops + nsops; sop++) { |
606 | curr = sma->sem_base + sop->sem_num; | 608 | curr = sma->sem_base + sop->sem_num; |
@@ -628,6 +630,7 @@ static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops, | |||
628 | } | 630 | } |
629 | 631 | ||
630 | sop--; | 632 | sop--; |
633 | pid = q->pid; | ||
631 | while (sop >= sops) { | 634 | while (sop >= sops) { |
632 | sma->sem_base[sop->sem_num].sempid = pid; | 635 | sma->sem_base[sop->sem_num].sempid = pid; |
633 | sop--; | 636 | sop--; |
@@ -640,6 +643,8 @@ out_of_range: | |||
640 | goto undo; | 643 | goto undo; |
641 | 644 | ||
642 | would_block: | 645 | would_block: |
646 | q->blocking = sop; | ||
647 | |||
643 | if (sop->sem_flg & IPC_NOWAIT) | 648 | if (sop->sem_flg & IPC_NOWAIT) |
644 | result = -EAGAIN; | 649 | result = -EAGAIN; |
645 | else | 650 | else |
@@ -780,8 +785,7 @@ static int wake_const_ops(struct sem_array *sma, int semnum, | |||
780 | q = container_of(walk, struct sem_queue, list); | 785 | q = container_of(walk, struct sem_queue, list); |
781 | walk = walk->next; | 786 | walk = walk->next; |
782 | 787 | ||
783 | error = perform_atomic_semop(sma, q->sops, q->nsops, | 788 | error = perform_atomic_semop(sma, q); |
784 | q->undo, q->pid); | ||
785 | 789 | ||
786 | if (error <= 0) { | 790 | if (error <= 0) { |
787 | /* operation completed, remove from queue & wakeup */ | 791 | /* operation completed, remove from queue & wakeup */ |
@@ -893,8 +897,7 @@ again: | |||
893 | if (semnum != -1 && sma->sem_base[semnum].semval == 0) | 897 | if (semnum != -1 && sma->sem_base[semnum].semval == 0) |
894 | break; | 898 | break; |
895 | 899 | ||
896 | error = perform_atomic_semop(sma, q->sops, q->nsops, | 900 | error = perform_atomic_semop(sma, q); |
897 | q->undo, q->pid); | ||
898 | 901 | ||
899 | /* Does q->sleeper still need to sleep? */ | 902 | /* Does q->sleeper still need to sleep? */ |
900 | if (error > 0) | 903 | if (error > 0) |
@@ -989,65 +992,74 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop | |||
989 | set_semotime(sma, sops); | 992 | set_semotime(sma, sops); |
990 | } | 993 | } |
991 | 994 | ||
992 | /* The following counts are associated to each semaphore: | 995 | /* |
993 | * semncnt number of tasks waiting on semval being nonzero | 996 | * check_qop: Test if a queued operation sleeps on the semaphore semnum |
994 | * semzcnt number of tasks waiting on semval being zero | ||
995 | * This model assumes that a task waits on exactly one semaphore. | ||
996 | * Since semaphore operations are to be performed atomically, tasks actually | ||
997 | * wait on a whole sequence of semaphores simultaneously. | ||
998 | * The counts we return here are a rough approximation, but still | ||
999 | * warrant that semncnt+semzcnt>0 if the task is on the pending queue. | ||
1000 | */ | 997 | */ |
1001 | static int count_semncnt(struct sem_array *sma, ushort semnum) | 998 | static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q, |
999 | bool count_zero) | ||
1002 | { | 1000 | { |
1003 | int semncnt; | 1001 | struct sembuf *sop = q->blocking; |
1004 | struct sem_queue *q; | ||
1005 | 1002 | ||
1006 | semncnt = 0; | 1003 | /* |
1007 | list_for_each_entry(q, &sma->sem_base[semnum].pending_alter, list) { | 1004 | * Linux always (since 0.99.10) reported a task as sleeping on all |
1008 | struct sembuf *sops = q->sops; | 1005 | * semaphores. This violates SUS, therefore it was changed to the |
1009 | BUG_ON(sops->sem_num != semnum); | 1006 | * standard compliant behavior. |
1010 | if ((sops->sem_op < 0) && !(sops->sem_flg & IPC_NOWAIT)) | 1007 | * Give the administrators a chance to notice that an application |
1011 | semncnt++; | 1008 | * might misbehave because it relies on the Linux behavior. |
1012 | } | 1009 | */ |
1010 | pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n" | ||
1011 | "The task %s (%d) triggered the difference, watch for misbehavior.\n", | ||
1012 | current->comm, task_pid_nr(current)); | ||
1013 | 1013 | ||
1014 | list_for_each_entry(q, &sma->pending_alter, list) { | 1014 | if (sop->sem_num != semnum) |
1015 | struct sembuf *sops = q->sops; | 1015 | return 0; |
1016 | int nsops = q->nsops; | 1016 | |
1017 | int i; | 1017 | if (count_zero && sop->sem_op == 0) |
1018 | for (i = 0; i < nsops; i++) | 1018 | return 1; |
1019 | if (sops[i].sem_num == semnum | 1019 | if (!count_zero && sop->sem_op < 0) |
1020 | && (sops[i].sem_op < 0) | 1020 | return 1; |
1021 | && !(sops[i].sem_flg & IPC_NOWAIT)) | 1021 | |
1022 | semncnt++; | 1022 | return 0; |
1023 | } | ||
1024 | return semncnt; | ||
1025 | } | 1023 | } |
1026 | 1024 | ||
1027 | static int count_semzcnt(struct sem_array *sma, ushort semnum) | 1025 | /* The following counts are associated to each semaphore: |
1026 | * semncnt number of tasks waiting on semval being nonzero | ||
1027 | * semzcnt number of tasks waiting on semval being zero | ||
1028 | * | ||
1029 | * Per definition, a task waits only on the semaphore of the first semop | ||
1030 | * that cannot proceed, even if additional operation would block, too. | ||
1031 | */ | ||
1032 | static int count_semcnt(struct sem_array *sma, ushort semnum, | ||
1033 | bool count_zero) | ||
1028 | { | 1034 | { |
1029 | int semzcnt; | 1035 | struct list_head *l; |
1030 | struct sem_queue *q; | 1036 | struct sem_queue *q; |
1037 | int semcnt; | ||
1038 | |||
1039 | semcnt = 0; | ||
1040 | /* First: check the simple operations. They are easy to evaluate */ | ||
1041 | if (count_zero) | ||
1042 | l = &sma->sem_base[semnum].pending_const; | ||
1043 | else | ||
1044 | l = &sma->sem_base[semnum].pending_alter; | ||
1031 | 1045 | ||
1032 | semzcnt = 0; | 1046 | list_for_each_entry(q, l, list) { |
1033 | list_for_each_entry(q, &sma->sem_base[semnum].pending_const, list) { | 1047 | /* all task on a per-semaphore list sleep on exactly |
1034 | struct sembuf *sops = q->sops; | 1048 | * that semaphore |
1035 | BUG_ON(sops->sem_num != semnum); | 1049 | */ |
1036 | if ((sops->sem_op == 0) && !(sops->sem_flg & IPC_NOWAIT)) | 1050 | semcnt++; |
1037 | semzcnt++; | ||
1038 | } | 1051 | } |
1039 | 1052 | ||
1040 | list_for_each_entry(q, &sma->pending_const, list) { | 1053 | /* Then: check the complex operations. */ |
1041 | struct sembuf *sops = q->sops; | 1054 | list_for_each_entry(q, &sma->pending_alter, list) { |
1042 | int nsops = q->nsops; | 1055 | semcnt += check_qop(sma, semnum, q, count_zero); |
1043 | int i; | ||
1044 | for (i = 0; i < nsops; i++) | ||
1045 | if (sops[i].sem_num == semnum | ||
1046 | && (sops[i].sem_op == 0) | ||
1047 | && !(sops[i].sem_flg & IPC_NOWAIT)) | ||
1048 | semzcnt++; | ||
1049 | } | 1056 | } |
1050 | return semzcnt; | 1057 | if (count_zero) { |
1058 | list_for_each_entry(q, &sma->pending_const, list) { | ||
1059 | semcnt += check_qop(sma, semnum, q, count_zero); | ||
1060 | } | ||
1061 | } | ||
1062 | return semcnt; | ||
1051 | } | 1063 | } |
1052 | 1064 | ||
1053 | /* Free a semaphore set. freeary() is called with sem_ids.rwsem locked | 1065 | /* Free a semaphore set. freeary() is called with sem_ids.rwsem locked |
@@ -1161,7 +1173,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, | |||
1161 | err = security_sem_semctl(NULL, cmd); | 1173 | err = security_sem_semctl(NULL, cmd); |
1162 | if (err) | 1174 | if (err) |
1163 | return err; | 1175 | return err; |
1164 | 1176 | ||
1165 | memset(&seminfo, 0, sizeof(seminfo)); | 1177 | memset(&seminfo, 0, sizeof(seminfo)); |
1166 | seminfo.semmni = ns->sc_semmni; | 1178 | seminfo.semmni = ns->sc_semmni; |
1167 | seminfo.semmns = ns->sc_semmns; | 1179 | seminfo.semmns = ns->sc_semmns; |
@@ -1181,7 +1193,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, | |||
1181 | } | 1193 | } |
1182 | max_id = ipc_get_maxid(&sem_ids(ns)); | 1194 | max_id = ipc_get_maxid(&sem_ids(ns)); |
1183 | up_read(&sem_ids(ns).rwsem); | 1195 | up_read(&sem_ids(ns).rwsem); |
1184 | if (copy_to_user(p, &seminfo, sizeof(struct seminfo))) | 1196 | if (copy_to_user(p, &seminfo, sizeof(struct seminfo))) |
1185 | return -EFAULT; | 1197 | return -EFAULT; |
1186 | return (max_id < 0) ? 0 : max_id; | 1198 | return (max_id < 0) ? 0 : max_id; |
1187 | } | 1199 | } |
@@ -1449,10 +1461,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, | |||
1449 | err = curr->sempid; | 1461 | err = curr->sempid; |
1450 | goto out_unlock; | 1462 | goto out_unlock; |
1451 | case GETNCNT: | 1463 | case GETNCNT: |
1452 | err = count_semncnt(sma, semnum); | 1464 | err = count_semcnt(sma, semnum, 0); |
1453 | goto out_unlock; | 1465 | goto out_unlock; |
1454 | case GETZCNT: | 1466 | case GETZCNT: |
1455 | err = count_semzcnt(sma, semnum); | 1467 | err = count_semcnt(sma, semnum, 1); |
1456 | goto out_unlock; | 1468 | goto out_unlock; |
1457 | } | 1469 | } |
1458 | 1470 | ||
@@ -1866,8 +1878,13 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, | |||
1866 | if (un && un->semid == -1) | 1878 | if (un && un->semid == -1) |
1867 | goto out_unlock_free; | 1879 | goto out_unlock_free; |
1868 | 1880 | ||
1869 | error = perform_atomic_semop(sma, sops, nsops, un, | 1881 | queue.sops = sops; |
1870 | task_tgid_vnr(current)); | 1882 | queue.nsops = nsops; |
1883 | queue.undo = un; | ||
1884 | queue.pid = task_tgid_vnr(current); | ||
1885 | queue.alter = alter; | ||
1886 | |||
1887 | error = perform_atomic_semop(sma, &queue); | ||
1871 | if (error == 0) { | 1888 | if (error == 0) { |
1872 | /* If the operation was successful, then do | 1889 | /* If the operation was successful, then do |
1873 | * the required updates. | 1890 | * the required updates. |
@@ -1883,12 +1900,6 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, | |||
1883 | /* We need to sleep on this operation, so we put the current | 1900 | /* We need to sleep on this operation, so we put the current |
1884 | * task into the pending queue and go to sleep. | 1901 | * task into the pending queue and go to sleep. |
1885 | */ | 1902 | */ |
1886 | |||
1887 | queue.sops = sops; | ||
1888 | queue.nsops = nsops; | ||
1889 | queue.undo = un; | ||
1890 | queue.pid = task_tgid_vnr(current); | ||
1891 | queue.alter = alter; | ||
1892 | 1903 | ||
1893 | if (nsops == 1) { | 1904 | if (nsops == 1) { |
1894 | struct sem *curr; | 1905 | struct sem *curr; |
@@ -2016,7 +2027,7 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) | |||
2016 | return error; | 2027 | return error; |
2017 | atomic_inc(&undo_list->refcnt); | 2028 | atomic_inc(&undo_list->refcnt); |
2018 | tsk->sysvsem.undo_list = undo_list; | 2029 | tsk->sysvsem.undo_list = undo_list; |
2019 | } else | 2030 | } else |
2020 | tsk->sysvsem.undo_list = NULL; | 2031 | tsk->sysvsem.undo_list = NULL; |
2021 | 2032 | ||
2022 | return 0; | 2033 | return 0; |
@@ -43,7 +43,7 @@ | |||
43 | #include <linux/mount.h> | 43 | #include <linux/mount.h> |
44 | #include <linux/ipc_namespace.h> | 44 | #include <linux/ipc_namespace.h> |
45 | 45 | ||
46 | #include <asm/uaccess.h> | 46 | #include <linux/uaccess.h> |
47 | 47 | ||
48 | #include "util.h" | 48 | #include "util.h" |
49 | 49 | ||
@@ -493,7 +493,11 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) | |||
493 | if (size < SHMMIN || size > ns->shm_ctlmax) | 493 | if (size < SHMMIN || size > ns->shm_ctlmax) |
494 | return -EINVAL; | 494 | return -EINVAL; |
495 | 495 | ||
496 | if (ns->shm_tot + numpages > ns->shm_ctlall) | 496 | if (numpages << PAGE_SHIFT < size) |
497 | return -ENOSPC; | ||
498 | |||
499 | if (ns->shm_tot + numpages < ns->shm_tot || | ||
500 | ns->shm_tot + numpages > ns->shm_ctlall) | ||
497 | return -ENOSPC; | 501 | return -ENOSPC; |
498 | 502 | ||
499 | shp = ipc_rcu_alloc(sizeof(*shp)); | 503 | shp = ipc_rcu_alloc(sizeof(*shp)); |
@@ -609,15 +613,15 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp, | |||
609 | SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) | 613 | SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) |
610 | { | 614 | { |
611 | struct ipc_namespace *ns; | 615 | struct ipc_namespace *ns; |
612 | struct ipc_ops shm_ops; | 616 | static const struct ipc_ops shm_ops = { |
617 | .getnew = newseg, | ||
618 | .associate = shm_security, | ||
619 | .more_checks = shm_more_checks, | ||
620 | }; | ||
613 | struct ipc_params shm_params; | 621 | struct ipc_params shm_params; |
614 | 622 | ||
615 | ns = current->nsproxy->ipc_ns; | 623 | ns = current->nsproxy->ipc_ns; |
616 | 624 | ||
617 | shm_ops.getnew = newseg; | ||
618 | shm_ops.associate = shm_security; | ||
619 | shm_ops.more_checks = shm_more_checks; | ||
620 | |||
621 | shm_params.key = key; | 625 | shm_params.key = key; |
622 | shm_params.flg = shmflg; | 626 | shm_params.flg = shmflg; |
623 | shm_params.u.size = size; | 627 | shm_params.u.size = size; |
@@ -694,7 +698,7 @@ static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminf | |||
694 | out.shmmin = in->shmmin; | 698 | out.shmmin = in->shmmin; |
695 | out.shmmni = in->shmmni; | 699 | out.shmmni = in->shmmni; |
696 | out.shmseg = in->shmseg; | 700 | out.shmseg = in->shmseg; |
697 | out.shmall = in->shmall; | 701 | out.shmall = in->shmall; |
698 | 702 | ||
699 | return copy_to_user(buf, &out, sizeof(out)); | 703 | return copy_to_user(buf, &out, sizeof(out)); |
700 | } | 704 | } |
@@ -1160,6 +1164,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, | |||
1160 | down_write(¤t->mm->mmap_sem); | 1164 | down_write(¤t->mm->mmap_sem); |
1161 | if (addr && !(shmflg & SHM_REMAP)) { | 1165 | if (addr && !(shmflg & SHM_REMAP)) { |
1162 | err = -EINVAL; | 1166 | err = -EINVAL; |
1167 | if (addr + size < addr) | ||
1168 | goto invalid; | ||
1169 | |||
1163 | if (find_vma_intersection(current->mm, addr, addr + size)) | 1170 | if (find_vma_intersection(current->mm, addr, addr + size)) |
1164 | goto invalid; | 1171 | goto invalid; |
1165 | /* | 1172 | /* |
diff --git a/ipc/util.c b/ipc/util.c index 2eb0d1eaa312..27d74e69fd57 100644 --- a/ipc/util.c +++ b/ipc/util.c | |||
@@ -183,7 +183,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header, | |||
183 | * ipc_findkey - find a key in an ipc identifier set | 183 | * ipc_findkey - find a key in an ipc identifier set |
184 | * @ids: ipc identifier set | 184 | * @ids: ipc identifier set |
185 | * @key: key to find | 185 | * @key: key to find |
186 | * | 186 | * |
187 | * Returns the locked pointer to the ipc structure if found or NULL | 187 | * Returns the locked pointer to the ipc structure if found or NULL |
188 | * otherwise. If key is found ipc points to the owning ipc structure | 188 | * otherwise. If key is found ipc points to the owning ipc structure |
189 | * | 189 | * |
@@ -317,7 +317,7 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size) | |||
317 | * when the key is IPC_PRIVATE. | 317 | * when the key is IPC_PRIVATE. |
318 | */ | 318 | */ |
319 | static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids, | 319 | static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids, |
320 | struct ipc_ops *ops, struct ipc_params *params) | 320 | const struct ipc_ops *ops, struct ipc_params *params) |
321 | { | 321 | { |
322 | int err; | 322 | int err; |
323 | 323 | ||
@@ -344,7 +344,7 @@ static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids, | |||
344 | */ | 344 | */ |
345 | static int ipc_check_perms(struct ipc_namespace *ns, | 345 | static int ipc_check_perms(struct ipc_namespace *ns, |
346 | struct kern_ipc_perm *ipcp, | 346 | struct kern_ipc_perm *ipcp, |
347 | struct ipc_ops *ops, | 347 | const struct ipc_ops *ops, |
348 | struct ipc_params *params) | 348 | struct ipc_params *params) |
349 | { | 349 | { |
350 | int err; | 350 | int err; |
@@ -375,7 +375,7 @@ static int ipc_check_perms(struct ipc_namespace *ns, | |||
375 | * On success, the ipc id is returned. | 375 | * On success, the ipc id is returned. |
376 | */ | 376 | */ |
377 | static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids, | 377 | static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids, |
378 | struct ipc_ops *ops, struct ipc_params *params) | 378 | const struct ipc_ops *ops, struct ipc_params *params) |
379 | { | 379 | { |
380 | struct kern_ipc_perm *ipcp; | 380 | struct kern_ipc_perm *ipcp; |
381 | int flg = params->flg; | 381 | int flg = params->flg; |
@@ -538,7 +538,7 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag) | |||
538 | else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid)) | 538 | else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid)) |
539 | granted_mode >>= 3; | 539 | granted_mode >>= 3; |
540 | /* is there some bit set in requested_mode but not in granted_mode? */ | 540 | /* is there some bit set in requested_mode but not in granted_mode? */ |
541 | if ((requested_mode & ~granted_mode & 0007) && | 541 | if ((requested_mode & ~granted_mode & 0007) && |
542 | !ns_capable(ns->user_ns, CAP_IPC_OWNER)) | 542 | !ns_capable(ns->user_ns, CAP_IPC_OWNER)) |
543 | return -1; | 543 | return -1; |
544 | 544 | ||
@@ -678,7 +678,7 @@ out: | |||
678 | * Common routine called by sys_msgget(), sys_semget() and sys_shmget(). | 678 | * Common routine called by sys_msgget(), sys_semget() and sys_shmget(). |
679 | */ | 679 | */ |
680 | int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, | 680 | int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, |
681 | struct ipc_ops *ops, struct ipc_params *params) | 681 | const struct ipc_ops *ops, struct ipc_params *params) |
682 | { | 682 | { |
683 | if (params->key == IPC_PRIVATE) | 683 | if (params->key == IPC_PRIVATE) |
684 | return ipcget_new(ns, ids, ops, params); | 684 | return ipcget_new(ns, ids, ops, params); |
diff --git a/ipc/util.h b/ipc/util.h index 9c47d6f6c7b4..1a5a0fcd099c 100644 --- a/ipc/util.h +++ b/ipc/util.h | |||
@@ -78,9 +78,9 @@ struct ipc_params { | |||
78 | * . routine to call for an extra check if needed | 78 | * . routine to call for an extra check if needed |
79 | */ | 79 | */ |
80 | struct ipc_ops { | 80 | struct ipc_ops { |
81 | int (*getnew) (struct ipc_namespace *, struct ipc_params *); | 81 | int (*getnew)(struct ipc_namespace *, struct ipc_params *); |
82 | int (*associate) (struct kern_ipc_perm *, int); | 82 | int (*associate)(struct kern_ipc_perm *, int); |
83 | int (*more_checks) (struct kern_ipc_perm *, struct ipc_params *); | 83 | int (*more_checks)(struct kern_ipc_perm *, struct ipc_params *); |
84 | }; | 84 | }; |
85 | 85 | ||
86 | struct seq_file; | 86 | struct seq_file; |
@@ -142,7 +142,7 @@ struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns, | |||
142 | struct ipc64_perm *perm, int extra_perm); | 142 | struct ipc64_perm *perm, int extra_perm); |
143 | 143 | ||
144 | #ifndef CONFIG_ARCH_WANT_IPC_PARSE_VERSION | 144 | #ifndef CONFIG_ARCH_WANT_IPC_PARSE_VERSION |
145 | /* On IA-64, we always use the "64-bit version" of the IPC structures. */ | 145 | /* On IA-64, we always use the "64-bit version" of the IPC structures. */ |
146 | # define ipc_parse_version(cmd) IPC_64 | 146 | # define ipc_parse_version(cmd) IPC_64 |
147 | #else | 147 | #else |
148 | int ipc_parse_version(int *cmd); | 148 | int ipc_parse_version(int *cmd); |
@@ -201,7 +201,7 @@ static inline bool ipc_valid_object(struct kern_ipc_perm *perm) | |||
201 | 201 | ||
202 | struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id); | 202 | struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id); |
203 | int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, | 203 | int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, |
204 | struct ipc_ops *ops, struct ipc_params *params); | 204 | const struct ipc_ops *ops, struct ipc_params *params); |
205 | void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, | 205 | void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, |
206 | void (*free)(struct ipc_namespace *, struct kern_ipc_perm *)); | 206 | void (*free)(struct ipc_namespace *, struct kern_ipc_perm *)); |
207 | #endif | 207 | #endif |