aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2014-04-14 16:58:55 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-04-26 20:15:34 -0400
commit98afe6dfdef0ef9df6e21cdd9d977bfc6147b0a9 (patch)
tree8597f71a2954e658adb0907a316c50fcce140695 /kernel
parentf512eefd5cde0ad21bd99bbfe4dc70b62805838e (diff)
user namespace: fix incorrect memory barriers
commit e79323bd87808fdfbc68ce6c5371bd224d9672ee upstream. smp_read_barrier_depends() can be used if there is data dependency between the readers - i.e. if the read operation after the barrier uses address that was obtained from the read operation before the barrier. In this file, there is only control dependency, no data dependecy, so the use of smp_read_barrier_depends() is incorrect. The code could fail in the following way: * the cpu predicts that idx < entries is true and starts executing the body of the for loop * the cpu fetches map->extent[0].first and map->extent[0].count * the cpu fetches map->nr_extents * the cpu verifies that idx < extents is true, so it commits the instructions in the body of the for loop The problem is that in this scenario, the cpu read map->extent[0].first and map->nr_extents in the wrong order. We need a full read memory barrier to prevent it. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/user_namespace.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 9064b919a406..9bea1d7dd21f 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -148,7 +148,7 @@ static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
148 148
149 /* Find the matching extent */ 149 /* Find the matching extent */
150 extents = map->nr_extents; 150 extents = map->nr_extents;
151 smp_read_barrier_depends(); 151 smp_rmb();
152 for (idx = 0; idx < extents; idx++) { 152 for (idx = 0; idx < extents; idx++) {
153 first = map->extent[idx].first; 153 first = map->extent[idx].first;
154 last = first + map->extent[idx].count - 1; 154 last = first + map->extent[idx].count - 1;
@@ -172,7 +172,7 @@ static u32 map_id_down(struct uid_gid_map *map, u32 id)
172 172
173 /* Find the matching extent */ 173 /* Find the matching extent */
174 extents = map->nr_extents; 174 extents = map->nr_extents;
175 smp_read_barrier_depends(); 175 smp_rmb();
176 for (idx = 0; idx < extents; idx++) { 176 for (idx = 0; idx < extents; idx++) {
177 first = map->extent[idx].first; 177 first = map->extent[idx].first;
178 last = first + map->extent[idx].count - 1; 178 last = first + map->extent[idx].count - 1;
@@ -195,7 +195,7 @@ static u32 map_id_up(struct uid_gid_map *map, u32 id)
195 195
196 /* Find the matching extent */ 196 /* Find the matching extent */
197 extents = map->nr_extents; 197 extents = map->nr_extents;
198 smp_read_barrier_depends(); 198 smp_rmb();
199 for (idx = 0; idx < extents; idx++) { 199 for (idx = 0; idx < extents; idx++) {
200 first = map->extent[idx].lower_first; 200 first = map->extent[idx].lower_first;
201 last = first + map->extent[idx].count - 1; 201 last = first + map->extent[idx].count - 1;
@@ -611,9 +611,8 @@ static ssize_t map_write(struct file *file, const char __user *buf,
611 * were written before the count of the extents. 611 * were written before the count of the extents.
612 * 612 *
613 * To achieve this smp_wmb() is used on guarantee the write 613 * To achieve this smp_wmb() is used on guarantee the write
614 * order and smp_read_barrier_depends() is guaranteed that we 614 * order and smp_rmb() is guaranteed that we don't have crazy
615 * don't have crazy architectures returning stale data. 615 * architectures returning stale data.
616 *
617 */ 616 */
618 mutex_lock(&id_map_mutex); 617 mutex_lock(&id_map_mutex);
619 618