summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDongli Zhang <dongli.zhang@oracle.com>2019-02-24 10:17:27 -0500
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2019-02-24 10:17:56 -0500
commit4a8c31a1c6f526ec96a35e613f2a71e26ffbd7dd (patch)
tree78b6973f8ef763634ab1a235adacd896f43e6ba0
parent5b8e432dbb0e20bd8e99033f4a0bfa0d38c0e08e (diff)
xen/blkback: rework connect_ring() to avoid inconsistent xenstore 'ring-page-order' set by malicious blkfront
The xenstore 'ring-page-order' is used globally for each blkback queue and therefore should be read from xenstore only once. However, it is obtained in read_per_ring_refs() which might be called multiple times during the initialization of each blkback queue. If the blkfront is malicious and the 'ring-page-order' is set in different value by blkfront every time before blkback reads it, this may end up at the "WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));" in xen_blkif_disconnect() when frontend is destroyed. This patch reworks connect_ring() to read xenstore 'ring-page-order' only once. Signed-off-by: Dongli Zhang <dongli.zhang@oracle.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-rw-r--r--drivers/block/xen-blkback/xenbus.c72
1 files changed, 43 insertions, 29 deletions
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index a4aadac772e5..24896ffb04ed 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -926,7 +926,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
926 int err, i, j; 926 int err, i, j;
927 struct xen_blkif *blkif = ring->blkif; 927 struct xen_blkif *blkif = ring->blkif;
928 struct xenbus_device *dev = blkif->be->dev; 928 struct xenbus_device *dev = blkif->be->dev;
929 unsigned int ring_page_order, nr_grefs, evtchn; 929 unsigned int nr_grefs, evtchn;
930 930
931 err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u", 931 err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u",
932 &evtchn); 932 &evtchn);
@@ -936,43 +936,42 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
936 return err; 936 return err;
937 } 937 }
938 938
939 err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u", 939 nr_grefs = blkif->nr_ring_pages;
940 &ring_page_order); 940
941 if (err != 1) { 941 if (unlikely(!nr_grefs)) {
942 err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u", &ring_ref[0]); 942 WARN_ON(true);
943 return -EINVAL;
944 }
945
946 for (i = 0; i < nr_grefs; i++) {
947 char ring_ref_name[RINGREF_NAME_LEN];
948
949 snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
950 err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
951 "%u", &ring_ref[i]);
952
943 if (err != 1) { 953 if (err != 1) {
954 if (nr_grefs == 1)
955 break;
956
944 err = -EINVAL; 957 err = -EINVAL;
945 xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir); 958 xenbus_dev_fatal(dev, err, "reading %s/%s",
959 dir, ring_ref_name);
946 return err; 960 return err;
947 } 961 }
948 nr_grefs = 1; 962 }
949 } else { 963
950 unsigned int i; 964 if (err != 1) {
965 WARN_ON(nr_grefs != 1);
951 966
952 if (ring_page_order > xen_blkif_max_ring_order) { 967 err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u",
968 &ring_ref[0]);
969 if (err != 1) {
953 err = -EINVAL; 970 err = -EINVAL;
954 xenbus_dev_fatal(dev, err, "%s/request %d ring page order exceed max:%d", 971 xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
955 dir, ring_page_order,
956 xen_blkif_max_ring_order);
957 return err; 972 return err;
958 } 973 }
959
960 nr_grefs = 1 << ring_page_order;
961 for (i = 0; i < nr_grefs; i++) {
962 char ring_ref_name[RINGREF_NAME_LEN];
963
964 snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
965 err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
966 "%u", &ring_ref[i]);
967 if (err != 1) {
968 err = -EINVAL;
969 xenbus_dev_fatal(dev, err, "reading %s/%s",
970 dir, ring_ref_name);
971 return err;
972 }
973 }
974 } 974 }
975 blkif->nr_ring_pages = nr_grefs;
976 975
977 for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) { 976 for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
978 req = kzalloc(sizeof(*req), GFP_KERNEL); 977 req = kzalloc(sizeof(*req), GFP_KERNEL);
@@ -1031,6 +1030,7 @@ static int connect_ring(struct backend_info *be)
1031 size_t xspathsize; 1030 size_t xspathsize;
1032 const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */ 1031 const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
1033 unsigned int requested_num_queues = 0; 1032 unsigned int requested_num_queues = 0;
1033 unsigned int ring_page_order;
1034 1034
1035 pr_debug("%s %s\n", __func__, dev->otherend); 1035 pr_debug("%s %s\n", __func__, dev->otherend);
1036 1036
@@ -1076,6 +1076,20 @@ static int connect_ring(struct backend_info *be)
1076 blkif->nr_rings, blkif->blk_protocol, protocol, 1076 blkif->nr_rings, blkif->blk_protocol, protocol,
1077 pers_grants ? "persistent grants" : ""); 1077 pers_grants ? "persistent grants" : "");
1078 1078
1079 ring_page_order = xenbus_read_unsigned(dev->otherend,
1080 "ring-page-order", 0);
1081
1082 if (ring_page_order > xen_blkif_max_ring_order) {
1083 err = -EINVAL;
1084 xenbus_dev_fatal(dev, err,
1085 "requested ring page order %d exceed max:%d",
1086 ring_page_order,
1087 xen_blkif_max_ring_order);
1088 return err;
1089 }
1090
1091 blkif->nr_ring_pages = 1 << ring_page_order;
1092
1079 if (blkif->nr_rings == 1) 1093 if (blkif->nr_rings == 1)
1080 return read_per_ring_refs(&blkif->rings[0], dev->otherend); 1094 return read_per_ring_refs(&blkif->rings[0], dev->otherend);
1081 else { 1095 else {