diff options
Diffstat (limited to 'drivers/s390/block/scm_blk_cluster.c')
-rw-r--r-- | drivers/s390/block/scm_blk_cluster.c | 69 |
1 files changed, 47 insertions, 22 deletions
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c index 9aae909d47a5..09db45296eed 100644 --- a/drivers/s390/block/scm_blk_cluster.c +++ b/drivers/s390/block/scm_blk_cluster.c | |||
@@ -57,39 +57,52 @@ void scm_request_cluster_init(struct scm_request *scmrq) | |||
57 | scmrq->cluster.state = CLUSTER_NONE; | 57 | scmrq->cluster.state = CLUSTER_NONE; |
58 | } | 58 | } |
59 | 59 | ||
60 | static bool clusters_intersect(struct scm_request *A, struct scm_request *B) | 60 | static bool clusters_intersect(struct request *A, struct request *B) |
61 | { | 61 | { |
62 | unsigned long firstA, lastA, firstB, lastB; | 62 | unsigned long firstA, lastA, firstB, lastB; |
63 | 63 | ||
64 | firstA = ((u64) blk_rq_pos(A->request) << 9) / CLUSTER_SIZE; | 64 | firstA = ((u64) blk_rq_pos(A) << 9) / CLUSTER_SIZE; |
65 | lastA = (((u64) blk_rq_pos(A->request) << 9) + | 65 | lastA = (((u64) blk_rq_pos(A) << 9) + |
66 | blk_rq_bytes(A->request) - 1) / CLUSTER_SIZE; | 66 | blk_rq_bytes(A) - 1) / CLUSTER_SIZE; |
67 | 67 | ||
68 | firstB = ((u64) blk_rq_pos(B->request) << 9) / CLUSTER_SIZE; | 68 | firstB = ((u64) blk_rq_pos(B) << 9) / CLUSTER_SIZE; |
69 | lastB = (((u64) blk_rq_pos(B->request) << 9) + | 69 | lastB = (((u64) blk_rq_pos(B) << 9) + |
70 | blk_rq_bytes(B->request) - 1) / CLUSTER_SIZE; | 70 | blk_rq_bytes(B) - 1) / CLUSTER_SIZE; |
71 | 71 | ||
72 | return (firstB <= lastA && firstA <= lastB); | 72 | return (firstB <= lastA && firstA <= lastB); |
73 | } | 73 | } |
74 | 74 | ||
75 | bool scm_reserve_cluster(struct scm_request *scmrq) | 75 | bool scm_reserve_cluster(struct scm_request *scmrq) |
76 | { | 76 | { |
77 | struct request *req = scmrq->request[scmrq->aob->request.msb_count]; | ||
77 | struct scm_blk_dev *bdev = scmrq->bdev; | 78 | struct scm_blk_dev *bdev = scmrq->bdev; |
78 | struct scm_request *iter; | 79 | struct scm_request *iter; |
80 | int pos, add = 1; | ||
79 | 81 | ||
80 | if (write_cluster_size == 0) | 82 | if (write_cluster_size == 0) |
81 | return true; | 83 | return true; |
82 | 84 | ||
83 | spin_lock(&bdev->lock); | 85 | spin_lock(&bdev->lock); |
84 | list_for_each_entry(iter, &bdev->cluster_list, cluster.list) { | 86 | list_for_each_entry(iter, &bdev->cluster_list, cluster.list) { |
85 | if (clusters_intersect(scmrq, iter) && | 87 | if (iter == scmrq) { |
86 | (rq_data_dir(scmrq->request) == WRITE || | 88 | /* |
87 | rq_data_dir(iter->request) == WRITE)) { | 89 | * We don't have to use clusters_intersect here, since |
88 | spin_unlock(&bdev->lock); | 90 | * cluster requests are always started separately. |
89 | return false; | 91 | */ |
92 | add = 0; | ||
93 | continue; | ||
94 | } | ||
95 | for (pos = 0; pos <= iter->aob->request.msb_count; pos++) { | ||
96 | if (clusters_intersect(req, iter->request[pos]) && | ||
97 | (rq_data_dir(req) == WRITE || | ||
98 | rq_data_dir(iter->request[pos]) == WRITE)) { | ||
99 | spin_unlock(&bdev->lock); | ||
100 | return false; | ||
101 | } | ||
90 | } | 102 | } |
91 | } | 103 | } |
92 | list_add(&scmrq->cluster.list, &bdev->cluster_list); | 104 | if (add) |
105 | list_add(&scmrq->cluster.list, &bdev->cluster_list); | ||
93 | spin_unlock(&bdev->lock); | 106 | spin_unlock(&bdev->lock); |
94 | 107 | ||
95 | return true; | 108 | return true; |
@@ -114,14 +127,14 @@ void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) | |||
114 | blk_queue_io_opt(bdev->rq, CLUSTER_SIZE); | 127 | blk_queue_io_opt(bdev->rq, CLUSTER_SIZE); |
115 | } | 128 | } |
116 | 129 | ||
117 | static void scm_prepare_cluster_request(struct scm_request *scmrq) | 130 | static int scm_prepare_cluster_request(struct scm_request *scmrq) |
118 | { | 131 | { |
119 | struct scm_blk_dev *bdev = scmrq->bdev; | 132 | struct scm_blk_dev *bdev = scmrq->bdev; |
120 | struct scm_device *scmdev = bdev->gendisk->private_data; | 133 | struct scm_device *scmdev = bdev->gendisk->private_data; |
121 | struct request *req = scmrq->request; | 134 | struct request *req = scmrq->request[0]; |
122 | struct aidaw *aidaw = scmrq->aidaw; | ||
123 | struct msb *msb = &scmrq->aob->msb[0]; | 135 | struct msb *msb = &scmrq->aob->msb[0]; |
124 | struct req_iterator iter; | 136 | struct req_iterator iter; |
137 | struct aidaw *aidaw; | ||
125 | struct bio_vec bv; | 138 | struct bio_vec bv; |
126 | int i = 0; | 139 | int i = 0; |
127 | u64 addr; | 140 | u64 addr; |
@@ -131,11 +144,9 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq) | |||
131 | scmrq->cluster.state = CLUSTER_READ; | 144 | scmrq->cluster.state = CLUSTER_READ; |
132 | /* fall through */ | 145 | /* fall through */ |
133 | case CLUSTER_READ: | 146 | case CLUSTER_READ: |
134 | scmrq->aob->request.msb_count = 1; | ||
135 | msb->bs = MSB_BS_4K; | 147 | msb->bs = MSB_BS_4K; |
136 | msb->oc = MSB_OC_READ; | 148 | msb->oc = MSB_OC_READ; |
137 | msb->flags = MSB_FLAG_IDA; | 149 | msb->flags = MSB_FLAG_IDA; |
138 | msb->data_addr = (u64) aidaw; | ||
139 | msb->blk_count = write_cluster_size; | 150 | msb->blk_count = write_cluster_size; |
140 | 151 | ||
141 | addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); | 152 | addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); |
@@ -146,6 +157,12 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq) | |||
146 | CLUSTER_SIZE)) | 157 | CLUSTER_SIZE)) |
147 | msb->blk_count = 2 * write_cluster_size; | 158 | msb->blk_count = 2 * write_cluster_size; |
148 | 159 | ||
160 | aidaw = scm_aidaw_fetch(scmrq, msb->blk_count * PAGE_SIZE); | ||
161 | if (!aidaw) | ||
162 | return -ENOMEM; | ||
163 | |||
164 | scmrq->aob->request.msb_count = 1; | ||
165 | msb->data_addr = (u64) aidaw; | ||
149 | for (i = 0; i < msb->blk_count; i++) { | 166 | for (i = 0; i < msb->blk_count; i++) { |
150 | aidaw->data_addr = (u64) scmrq->cluster.buf[i]; | 167 | aidaw->data_addr = (u64) scmrq->cluster.buf[i]; |
151 | aidaw++; | 168 | aidaw++; |
@@ -153,6 +170,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq) | |||
153 | 170 | ||
154 | break; | 171 | break; |
155 | case CLUSTER_WRITE: | 172 | case CLUSTER_WRITE: |
173 | aidaw = (void *) msb->data_addr; | ||
156 | msb->oc = MSB_OC_WRITE; | 174 | msb->oc = MSB_OC_WRITE; |
157 | 175 | ||
158 | for (addr = msb->scm_addr; | 176 | for (addr = msb->scm_addr; |
@@ -173,22 +191,29 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq) | |||
173 | } | 191 | } |
174 | break; | 192 | break; |
175 | } | 193 | } |
194 | return 0; | ||
176 | } | 195 | } |
177 | 196 | ||
178 | bool scm_need_cluster_request(struct scm_request *scmrq) | 197 | bool scm_need_cluster_request(struct scm_request *scmrq) |
179 | { | 198 | { |
180 | if (rq_data_dir(scmrq->request) == READ) | 199 | int pos = scmrq->aob->request.msb_count; |
200 | |||
201 | if (rq_data_dir(scmrq->request[pos]) == READ) | ||
181 | return false; | 202 | return false; |
182 | 203 | ||
183 | return blk_rq_bytes(scmrq->request) < CLUSTER_SIZE; | 204 | return blk_rq_bytes(scmrq->request[pos]) < CLUSTER_SIZE; |
184 | } | 205 | } |
185 | 206 | ||
186 | /* Called with queue lock held. */ | 207 | /* Called with queue lock held. */ |
187 | void scm_initiate_cluster_request(struct scm_request *scmrq) | 208 | void scm_initiate_cluster_request(struct scm_request *scmrq) |
188 | { | 209 | { |
189 | scm_prepare_cluster_request(scmrq); | 210 | if (scm_prepare_cluster_request(scmrq)) |
211 | goto requeue; | ||
190 | if (eadm_start_aob(scmrq->aob)) | 212 | if (eadm_start_aob(scmrq->aob)) |
191 | scm_request_requeue(scmrq); | 213 | goto requeue; |
214 | return; | ||
215 | requeue: | ||
216 | scm_request_requeue(scmrq); | ||
192 | } | 217 | } |
193 | 218 | ||
194 | bool scm_test_cluster_request(struct scm_request *scmrq) | 219 | bool scm_test_cluster_request(struct scm_request *scmrq) |