aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2013-06-12 22:53:42 -0400
committerJ. Bruce Fields <bfields@redhat.com>2013-07-01 17:42:53 -0400
commitf9e1aedc6c79f18bb56caa3735b94217c4ec1e0c (patch)
tree41ccb179f3d71ded6d035f3bc292f752c32f6995 /net
parentd08d32e6e5c0fee127d3b20d70f5b59d8af0a261 (diff)
sunrpc/cache: remove races with queuing an upcall.
We currently queue an upcall after setting CACHE_PENDING, and dequeue after clearing CACHE_PENDING. So a request should only be present when CACHE_PENDING is set. However we don't combine the test and the enqueue/dequeue in a protected region, so it is possible (if unlikely) for a race to result in a request being queued without CACHE_PENDING set, or a request to be absent despite CACHE_PENDING. So: include a test for CACHE_PENDING inside the regions of enqueue and dequeue where queue_lock is held, and abort the operation if the value is not as expected. Also remove the early 'return' from cache_dequeue() to ensure that it always removes all entries: As there is no locking between setting CACHE_PENDING and calling sunrpc_cache_pipe_upcall it is not inconceivable for some other thread to clear CACHE_PENDING and then someone else to set it and call sunrpc_cache_pipe_upcall, both before the original threads completed the call. With this, it perfectly safe and correct to: - call cache_dequeue() if and only if we have just cleared CACHE_PENDING - call sunrpc_cache_pipe_upcall() (via cache_make_upcall) if and only if we have just set CACHE_PENDING. Reported-by: Bodo Stroesser <bstroesser@ts.fujitsu.com> Signed-off-by: NeilBrown <neilb@suse.de> Signed-off-by: Bodo Stroesser <bstroesser@ts.fujitsu.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/cache.c40
1 files changed, 29 insertions, 11 deletions
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 3b3f14fc02c5..8e964ae8d7b5 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1036,23 +1036,32 @@ static int cache_release(struct inode *inode, struct file *filp,
1036 1036
1037static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch) 1037static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
1038{ 1038{
1039 struct cache_queue *cq; 1039 struct cache_queue *cq, *tmp;
1040 struct cache_request *cr;
1041 struct list_head dequeued;
1042
1043 INIT_LIST_HEAD(&dequeued);
1040 spin_lock(&queue_lock); 1044 spin_lock(&queue_lock);
1041 list_for_each_entry(cq, &detail->queue, list) 1045 list_for_each_entry_safe(cq, tmp, &detail->queue, list)
1042 if (!cq->reader) { 1046 if (!cq->reader) {
1043 struct cache_request *cr = container_of(cq, struct cache_request, q); 1047 cr = container_of(cq, struct cache_request, q);
1044 if (cr->item != ch) 1048 if (cr->item != ch)
1045 continue; 1049 continue;
1050 if (test_bit(CACHE_PENDING, &ch->flags))
1051 /* Lost a race and it is pending again */
1052 break;
1046 if (cr->readers != 0) 1053 if (cr->readers != 0)
1047 continue; 1054 continue;
1048 list_del(&cr->q.list); 1055 list_move(&cr->q.list, &dequeued);
1049 spin_unlock(&queue_lock);
1050 cache_put(cr->item, detail);
1051 kfree(cr->buf);
1052 kfree(cr);
1053 return;
1054 } 1056 }
1055 spin_unlock(&queue_lock); 1057 spin_unlock(&queue_lock);
1058 while (!list_empty(&dequeued)) {
1059 cr = list_entry(dequeued.next, struct cache_request, q.list);
1060 list_del(&cr->q.list);
1061 cache_put(cr->item, detail);
1062 kfree(cr->buf);
1063 kfree(cr);
1064 }
1056} 1065}
1057 1066
1058/* 1067/*
@@ -1166,6 +1175,7 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1166 1175
1167 char *buf; 1176 char *buf;
1168 struct cache_request *crq; 1177 struct cache_request *crq;
1178 int ret = 0;
1169 1179
1170 if (!detail->cache_request) 1180 if (!detail->cache_request)
1171 return -EINVAL; 1181 return -EINVAL;
@@ -1191,10 +1201,18 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1191 crq->len = 0; 1201 crq->len = 0;
1192 crq->readers = 0; 1202 crq->readers = 0;
1193 spin_lock(&queue_lock); 1203 spin_lock(&queue_lock);
1194 list_add_tail(&crq->q.list, &detail->queue); 1204 if (test_bit(CACHE_PENDING, &h->flags))
1205 list_add_tail(&crq->q.list, &detail->queue);
1206 else
1207 /* Lost a race, no longer PENDING, so don't enqueue */
1208 ret = -EAGAIN;
1195 spin_unlock(&queue_lock); 1209 spin_unlock(&queue_lock);
1196 wake_up(&queue_wait); 1210 wake_up(&queue_wait);
1197 return 0; 1211 if (ret == -EAGAIN) {
1212 kfree(buf);
1213 kfree(crq);
1214 }
1215 return ret;
1198} 1216}
1199EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall); 1217EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1200 1218