aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-11 20:30:55 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-11 20:30:55 -0500
commit27afc5dbda52ee3dbcd0bda7375c917c6936b470 (patch)
tree47591400f85590d48fa71bbfa50e0707e20e4bd0 /drivers/s390
parent70e71ca0af244f48a5dcf56dc435243792e3a495 (diff)
parent351997810131565fe62aec2c366deccbf6bda3f4 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky: "The most notable change for this pull request is the ftrace rework from Heiko. It brings a small performance improvement and the ground work to support a new gcc option to replace the mcount blocks with a single nop. Two new s390 specific system calls are added to emulate user space mmio for PCI, an artifact of the how PCI memory is accessed. Two patches for the memory management with changes to common code. For KVM mm_forbids_zeropage is added which disables the empty zero page for an mm that is used by a KVM process. And an optimization, pmdp_get_and_clear_full is added analog to ptep_get_and_clear_full. Some micro optimization for the cmpxchg and the spinlock code. And as usual bug fixes and cleanups" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (46 commits) s390/cputime: fix 31-bit compile s390/scm_block: make the number of reqs per HW req configurable s390/scm_block: handle multiple requests in one HW request s390/scm_block: allocate aidaw pages only when necessary s390/scm_block: use mempool to manage aidaw requests s390/eadm: change timeout value s390/mm: fix memory leak of ptlock in pmd_free_tlb s390: use local symbol names in entry[64].S s390/ptrace: always include vector registers in core files s390/simd: clear vector register pointer on fork/clone s390: translate cputime magic constants to macros s390/idle: convert open coded idle time seqcount s390/idle: add missing irq off lockdep annotation s390/debug: avoid function call for debug_sprintf_* s390/kprobes: fix instruction copy for out of line execution s390: remove diag 44 calls from cpu_relax() s390/dasd: retry partition detection s390/dasd: fix list corruption for sleep_on requests s390/dasd: fix infinite term I/O loop s390/dasd: remove unused code ...
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/dasd.c31
-rw-r--r--drivers/s390/block/dasd_genhd.c26
-rw-r--r--drivers/s390/block/scm_blk.c222
-rw-r--r--drivers/s390/block/scm_blk.h6
-rw-r--r--drivers/s390/block/scm_blk_cluster.c69
-rw-r--r--drivers/s390/char/Kconfig10
-rw-r--r--drivers/s390/char/sclp_async.c3
-rw-r--r--drivers/s390/cio/eadm_sch.c2
8 files changed, 278 insertions, 91 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 329db997ee66..4abf11965484 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1377,6 +1377,20 @@ int dasd_term_IO(struct dasd_ccw_req *cqr)
1377 "I/O error, retry"); 1377 "I/O error, retry");
1378 break; 1378 break;
1379 case -EINVAL: 1379 case -EINVAL:
1380 /*
1381 * device not valid so no I/O could be running
1382 * handle CQR as termination successful
1383 */
1384 cqr->status = DASD_CQR_CLEARED;
1385 cqr->stopclk = get_tod_clock();
1386 cqr->starttime = 0;
1387 /* no retries for invalid devices */
1388 cqr->retries = -1;
1389 DBF_DEV_EVENT(DBF_ERR, device, "%s",
1390 "EINVAL, handle as terminated");
1391 /* fake rc to success */
1392 rc = 0;
1393 break;
1380 case -EBUSY: 1394 case -EBUSY:
1381 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1395 DBF_DEV_EVENT(DBF_ERR, device, "%s",
1382 "device busy, retry later"); 1396 "device busy, retry later");
@@ -1683,11 +1697,8 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1683 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1697 if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1684 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { 1698 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
1685 cqr->status = DASD_CQR_CLEARED; 1699 cqr->status = DASD_CQR_CLEARED;
1686 if (cqr->callback_data == DASD_SLEEPON_START_TAG)
1687 cqr->callback_data = DASD_SLEEPON_END_TAG;
1688 dasd_device_clear_timer(device); 1700 dasd_device_clear_timer(device);
1689 wake_up(&dasd_flush_wq); 1701 wake_up(&dasd_flush_wq);
1690 wake_up(&generic_waitq);
1691 dasd_schedule_device_bh(device); 1702 dasd_schedule_device_bh(device);
1692 return; 1703 return;
1693 } 1704 }
@@ -2326,21 +2337,11 @@ retry:
2326 return -EAGAIN; 2337 return -EAGAIN;
2327 2338
2328 /* normal recovery for basedev IO */ 2339 /* normal recovery for basedev IO */
2329 if (__dasd_sleep_on_erp(cqr)) { 2340 if (__dasd_sleep_on_erp(cqr))
2341 /* handle erp first */
2330 goto retry; 2342 goto retry;
2331 /* remember that ERP was needed */
2332 rc = 1;
2333 /* skip processing for active cqr */
2334 if (cqr->status != DASD_CQR_TERMINATED &&
2335 cqr->status != DASD_CQR_NEED_ERP)
2336 break;
2337 }
2338 } 2343 }
2339 2344
2340 /* start ERP requests in upper loop */
2341 if (rc)
2342 goto retry;
2343
2344 return 0; 2345 return 0;
2345} 2346}
2346 2347
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index f224d59c4b6b..90f39f79f5d7 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -99,15 +99,37 @@ void dasd_gendisk_free(struct dasd_block *block)
99int dasd_scan_partitions(struct dasd_block *block) 99int dasd_scan_partitions(struct dasd_block *block)
100{ 100{
101 struct block_device *bdev; 101 struct block_device *bdev;
102 int retry, rc;
102 103
104 retry = 5;
103 bdev = bdget_disk(block->gdp, 0); 105 bdev = bdget_disk(block->gdp, 0);
104 if (!bdev || blkdev_get(bdev, FMODE_READ, NULL) < 0) 106 if (!bdev) {
107 DBF_DEV_EVENT(DBF_ERR, block->base, "%s",
108 "scan partitions error, bdget returned NULL");
105 return -ENODEV; 109 return -ENODEV;
110 }
111
112 rc = blkdev_get(bdev, FMODE_READ, NULL);
113 if (rc < 0) {
114 DBF_DEV_EVENT(DBF_ERR, block->base,
115 "scan partitions error, blkdev_get returned %d",
116 rc);
117 return -ENODEV;
118 }
106 /* 119 /*
107 * See fs/partition/check.c:register_disk,rescan_partitions 120 * See fs/partition/check.c:register_disk,rescan_partitions
108 * Can't call rescan_partitions directly. Use ioctl. 121 * Can't call rescan_partitions directly. Use ioctl.
109 */ 122 */
110 ioctl_by_bdev(bdev, BLKRRPART, 0); 123 rc = ioctl_by_bdev(bdev, BLKRRPART, 0);
124 while (rc == -EBUSY && retry > 0) {
125 schedule();
126 rc = ioctl_by_bdev(bdev, BLKRRPART, 0);
127 retry--;
128 DBF_DEV_EVENT(DBF_ERR, block->base,
129 "scan partitions error, retry %d rc %d",
130 retry, rc);
131 }
132
111 /* 133 /*
112 * Since the matching blkdev_put call to the blkdev_get in 134 * Since the matching blkdev_put call to the blkdev_get in
113 * this function is not called before dasd_destroy_partitions 135 * this function is not called before dasd_destroy_partitions
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 56046ab39629..75d9896deccb 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/spinlock.h> 12#include <linux/spinlock.h>
13#include <linux/mempool.h>
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/blkdev.h> 15#include <linux/blkdev.h>
15#include <linux/genhd.h> 16#include <linux/genhd.h>
@@ -20,13 +21,18 @@
20 21
21debug_info_t *scm_debug; 22debug_info_t *scm_debug;
22static int scm_major; 23static int scm_major;
24static mempool_t *aidaw_pool;
23static DEFINE_SPINLOCK(list_lock); 25static DEFINE_SPINLOCK(list_lock);
24static LIST_HEAD(inactive_requests); 26static LIST_HEAD(inactive_requests);
25static unsigned int nr_requests = 64; 27static unsigned int nr_requests = 64;
28static unsigned int nr_requests_per_io = 8;
26static atomic_t nr_devices = ATOMIC_INIT(0); 29static atomic_t nr_devices = ATOMIC_INIT(0);
27module_param(nr_requests, uint, S_IRUGO); 30module_param(nr_requests, uint, S_IRUGO);
28MODULE_PARM_DESC(nr_requests, "Number of parallel requests."); 31MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
29 32
33module_param(nr_requests_per_io, uint, S_IRUGO);
34MODULE_PARM_DESC(nr_requests_per_io, "Number of requests per IO.");
35
30MODULE_DESCRIPTION("Block driver for s390 storage class memory."); 36MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
31MODULE_LICENSE("GPL"); 37MODULE_LICENSE("GPL");
32MODULE_ALIAS("scm:scmdev*"); 38MODULE_ALIAS("scm:scmdev*");
@@ -36,8 +42,8 @@ static void __scm_free_rq(struct scm_request *scmrq)
36 struct aob_rq_header *aobrq = to_aobrq(scmrq); 42 struct aob_rq_header *aobrq = to_aobrq(scmrq);
37 43
38 free_page((unsigned long) scmrq->aob); 44 free_page((unsigned long) scmrq->aob);
39 free_page((unsigned long) scmrq->aidaw);
40 __scm_free_rq_cluster(scmrq); 45 __scm_free_rq_cluster(scmrq);
46 kfree(scmrq->request);
41 kfree(aobrq); 47 kfree(aobrq);
42} 48}
43 49
@@ -53,6 +59,8 @@ static void scm_free_rqs(void)
53 __scm_free_rq(scmrq); 59 __scm_free_rq(scmrq);
54 } 60 }
55 spin_unlock_irq(&list_lock); 61 spin_unlock_irq(&list_lock);
62
63 mempool_destroy(aidaw_pool);
56} 64}
57 65
58static int __scm_alloc_rq(void) 66static int __scm_alloc_rq(void)
@@ -65,17 +73,17 @@ static int __scm_alloc_rq(void)
65 return -ENOMEM; 73 return -ENOMEM;
66 74
67 scmrq = (void *) aobrq->data; 75 scmrq = (void *) aobrq->data;
68 scmrq->aidaw = (void *) get_zeroed_page(GFP_DMA);
69 scmrq->aob = (void *) get_zeroed_page(GFP_DMA); 76 scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
70 if (!scmrq->aob || !scmrq->aidaw) { 77 if (!scmrq->aob)
71 __scm_free_rq(scmrq); 78 goto free;
72 return -ENOMEM;
73 }
74 79
75 if (__scm_alloc_rq_cluster(scmrq)) { 80 scmrq->request = kcalloc(nr_requests_per_io, sizeof(scmrq->request[0]),
76 __scm_free_rq(scmrq); 81 GFP_KERNEL);
77 return -ENOMEM; 82 if (!scmrq->request)
78 } 83 goto free;
84
85 if (__scm_alloc_rq_cluster(scmrq))
86 goto free;
79 87
80 INIT_LIST_HEAD(&scmrq->list); 88 INIT_LIST_HEAD(&scmrq->list);
81 spin_lock_irq(&list_lock); 89 spin_lock_irq(&list_lock);
@@ -83,12 +91,19 @@ static int __scm_alloc_rq(void)
83 spin_unlock_irq(&list_lock); 91 spin_unlock_irq(&list_lock);
84 92
85 return 0; 93 return 0;
94free:
95 __scm_free_rq(scmrq);
96 return -ENOMEM;
86} 97}
87 98
88static int scm_alloc_rqs(unsigned int nrqs) 99static int scm_alloc_rqs(unsigned int nrqs)
89{ 100{
90 int ret = 0; 101 int ret = 0;
91 102
103 aidaw_pool = mempool_create_page_pool(max(nrqs/8, 1U), 0);
104 if (!aidaw_pool)
105 return -ENOMEM;
106
92 while (nrqs-- && !ret) 107 while (nrqs-- && !ret)
93 ret = __scm_alloc_rq(); 108 ret = __scm_alloc_rq();
94 109
@@ -112,6 +127,18 @@ out:
112static void scm_request_done(struct scm_request *scmrq) 127static void scm_request_done(struct scm_request *scmrq)
113{ 128{
114 unsigned long flags; 129 unsigned long flags;
130 struct msb *msb;
131 u64 aidaw;
132 int i;
133
134 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
135 msb = &scmrq->aob->msb[i];
136 aidaw = msb->data_addr;
137
138 if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
139 IS_ALIGNED(aidaw, PAGE_SIZE))
140 mempool_free(virt_to_page(aidaw), aidaw_pool);
141 }
115 142
116 spin_lock_irqsave(&list_lock, flags); 143 spin_lock_irqsave(&list_lock, flags);
117 list_add(&scmrq->list, &inactive_requests); 144 list_add(&scmrq->list, &inactive_requests);
@@ -123,48 +150,90 @@ static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
123 return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; 150 return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
124} 151}
125 152
126static void scm_request_prepare(struct scm_request *scmrq) 153static inline struct aidaw *scm_aidaw_alloc(void)
154{
155 struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC);
156
157 return page ? page_address(page) : NULL;
158}
159
160static inline unsigned long scm_aidaw_bytes(struct aidaw *aidaw)
161{
162 unsigned long _aidaw = (unsigned long) aidaw;
163 unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw;
164
165 return (bytes / sizeof(*aidaw)) * PAGE_SIZE;
166}
167
168struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes)
169{
170 struct aidaw *aidaw;
171
172 if (scm_aidaw_bytes(scmrq->next_aidaw) >= bytes)
173 return scmrq->next_aidaw;
174
175 aidaw = scm_aidaw_alloc();
176 if (aidaw)
177 memset(aidaw, 0, PAGE_SIZE);
178 return aidaw;
179}
180
181static int scm_request_prepare(struct scm_request *scmrq)
127{ 182{
128 struct scm_blk_dev *bdev = scmrq->bdev; 183 struct scm_blk_dev *bdev = scmrq->bdev;
129 struct scm_device *scmdev = bdev->gendisk->private_data; 184 struct scm_device *scmdev = bdev->gendisk->private_data;
130 struct aidaw *aidaw = scmrq->aidaw; 185 int pos = scmrq->aob->request.msb_count;
131 struct msb *msb = &scmrq->aob->msb[0]; 186 struct msb *msb = &scmrq->aob->msb[pos];
187 struct request *req = scmrq->request[pos];
132 struct req_iterator iter; 188 struct req_iterator iter;
189 struct aidaw *aidaw;
133 struct bio_vec bv; 190 struct bio_vec bv;
134 191
192 aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(req));
193 if (!aidaw)
194 return -ENOMEM;
195
135 msb->bs = MSB_BS_4K; 196 msb->bs = MSB_BS_4K;
136 scmrq->aob->request.msb_count = 1; 197 scmrq->aob->request.msb_count++;
137 msb->scm_addr = scmdev->address + 198 msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
138 ((u64) blk_rq_pos(scmrq->request) << 9); 199 msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE;
139 msb->oc = (rq_data_dir(scmrq->request) == READ) ?
140 MSB_OC_READ : MSB_OC_WRITE;
141 msb->flags |= MSB_FLAG_IDA; 200 msb->flags |= MSB_FLAG_IDA;
142 msb->data_addr = (u64) aidaw; 201 msb->data_addr = (u64) aidaw;
143 202
144 rq_for_each_segment(bv, scmrq->request, iter) { 203 rq_for_each_segment(bv, req, iter) {
145 WARN_ON(bv.bv_offset); 204 WARN_ON(bv.bv_offset);
146 msb->blk_count += bv.bv_len >> 12; 205 msb->blk_count += bv.bv_len >> 12;
147 aidaw->data_addr = (u64) page_address(bv.bv_page); 206 aidaw->data_addr = (u64) page_address(bv.bv_page);
148 aidaw++; 207 aidaw++;
149 } 208 }
209
210 scmrq->next_aidaw = aidaw;
211 return 0;
212}
213
214static inline void scm_request_set(struct scm_request *scmrq,
215 struct request *req)
216{
217 scmrq->request[scmrq->aob->request.msb_count] = req;
150} 218}
151 219
152static inline void scm_request_init(struct scm_blk_dev *bdev, 220static inline void scm_request_init(struct scm_blk_dev *bdev,
153 struct scm_request *scmrq, 221 struct scm_request *scmrq)
154 struct request *req)
155{ 222{
156 struct aob_rq_header *aobrq = to_aobrq(scmrq); 223 struct aob_rq_header *aobrq = to_aobrq(scmrq);
157 struct aob *aob = scmrq->aob; 224 struct aob *aob = scmrq->aob;
158 225
226 memset(scmrq->request, 0,
227 nr_requests_per_io * sizeof(scmrq->request[0]));
159 memset(aob, 0, sizeof(*aob)); 228 memset(aob, 0, sizeof(*aob));
160 memset(scmrq->aidaw, 0, PAGE_SIZE);
161 aobrq->scmdev = bdev->scmdev; 229 aobrq->scmdev = bdev->scmdev;
162 aob->request.cmd_code = ARQB_CMD_MOVE; 230 aob->request.cmd_code = ARQB_CMD_MOVE;
163 aob->request.data = (u64) aobrq; 231 aob->request.data = (u64) aobrq;
164 scmrq->request = req;
165 scmrq->bdev = bdev; 232 scmrq->bdev = bdev;
166 scmrq->retries = 4; 233 scmrq->retries = 4;
167 scmrq->error = 0; 234 scmrq->error = 0;
235 /* We don't use all msbs - place aidaws at the end of the aob page. */
236 scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io];
168 scm_request_cluster_init(scmrq); 237 scm_request_cluster_init(scmrq);
169} 238}
170 239
@@ -180,9 +249,12 @@ static void scm_ensure_queue_restart(struct scm_blk_dev *bdev)
180void scm_request_requeue(struct scm_request *scmrq) 249void scm_request_requeue(struct scm_request *scmrq)
181{ 250{
182 struct scm_blk_dev *bdev = scmrq->bdev; 251 struct scm_blk_dev *bdev = scmrq->bdev;
252 int i;
183 253
184 scm_release_cluster(scmrq); 254 scm_release_cluster(scmrq);
185 blk_requeue_request(bdev->rq, scmrq->request); 255 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
256 blk_requeue_request(bdev->rq, scmrq->request[i]);
257
186 atomic_dec(&bdev->queued_reqs); 258 atomic_dec(&bdev->queued_reqs);
187 scm_request_done(scmrq); 259 scm_request_done(scmrq);
188 scm_ensure_queue_restart(bdev); 260 scm_ensure_queue_restart(bdev);
@@ -191,20 +263,41 @@ void scm_request_requeue(struct scm_request *scmrq)
191void scm_request_finish(struct scm_request *scmrq) 263void scm_request_finish(struct scm_request *scmrq)
192{ 264{
193 struct scm_blk_dev *bdev = scmrq->bdev; 265 struct scm_blk_dev *bdev = scmrq->bdev;
266 int i;
194 267
195 scm_release_cluster(scmrq); 268 scm_release_cluster(scmrq);
196 blk_end_request_all(scmrq->request, scmrq->error); 269 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
270 blk_end_request_all(scmrq->request[i], scmrq->error);
271
197 atomic_dec(&bdev->queued_reqs); 272 atomic_dec(&bdev->queued_reqs);
198 scm_request_done(scmrq); 273 scm_request_done(scmrq);
199} 274}
200 275
276static int scm_request_start(struct scm_request *scmrq)
277{
278 struct scm_blk_dev *bdev = scmrq->bdev;
279 int ret;
280
281 atomic_inc(&bdev->queued_reqs);
282 if (!scmrq->aob->request.msb_count) {
283 scm_request_requeue(scmrq);
284 return -EINVAL;
285 }
286
287 ret = eadm_start_aob(scmrq->aob);
288 if (ret) {
289 SCM_LOG(5, "no subchannel");
290 scm_request_requeue(scmrq);
291 }
292 return ret;
293}
294
201static void scm_blk_request(struct request_queue *rq) 295static void scm_blk_request(struct request_queue *rq)
202{ 296{
203 struct scm_device *scmdev = rq->queuedata; 297 struct scm_device *scmdev = rq->queuedata;
204 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); 298 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
205 struct scm_request *scmrq; 299 struct scm_request *scmrq = NULL;
206 struct request *req; 300 struct request *req;
207 int ret;
208 301
209 while ((req = blk_peek_request(rq))) { 302 while ((req = blk_peek_request(rq))) {
210 if (req->cmd_type != REQ_TYPE_FS) { 303 if (req->cmd_type != REQ_TYPE_FS) {
@@ -214,39 +307,64 @@ static void scm_blk_request(struct request_queue *rq)
214 continue; 307 continue;
215 } 308 }
216 309
217 if (!scm_permit_request(bdev, req)) { 310 if (!scm_permit_request(bdev, req))
218 scm_ensure_queue_restart(bdev); 311 goto out;
219 return; 312
220 }
221 scmrq = scm_request_fetch();
222 if (!scmrq) { 313 if (!scmrq) {
223 SCM_LOG(5, "no request"); 314 scmrq = scm_request_fetch();
224 scm_ensure_queue_restart(bdev); 315 if (!scmrq) {
225 return; 316 SCM_LOG(5, "no request");
317 goto out;
318 }
319 scm_request_init(bdev, scmrq);
226 } 320 }
227 scm_request_init(bdev, scmrq, req); 321 scm_request_set(scmrq, req);
322
228 if (!scm_reserve_cluster(scmrq)) { 323 if (!scm_reserve_cluster(scmrq)) {
229 SCM_LOG(5, "cluster busy"); 324 SCM_LOG(5, "cluster busy");
325 scm_request_set(scmrq, NULL);
326 if (scmrq->aob->request.msb_count)
327 goto out;
328
230 scm_request_done(scmrq); 329 scm_request_done(scmrq);
231 return; 330 return;
232 } 331 }
332
233 if (scm_need_cluster_request(scmrq)) { 333 if (scm_need_cluster_request(scmrq)) {
234 atomic_inc(&bdev->queued_reqs); 334 if (scmrq->aob->request.msb_count) {
235 blk_start_request(req); 335 /* Start cluster requests separately. */
236 scm_initiate_cluster_request(scmrq); 336 scm_request_set(scmrq, NULL);
237 return; 337 if (scm_request_start(scmrq))
338 return;
339 } else {
340 atomic_inc(&bdev->queued_reqs);
341 blk_start_request(req);
342 scm_initiate_cluster_request(scmrq);
343 }
344 scmrq = NULL;
345 continue;
346 }
347
348 if (scm_request_prepare(scmrq)) {
349 SCM_LOG(5, "aidaw alloc failed");
350 scm_request_set(scmrq, NULL);
351 goto out;
238 } 352 }
239 scm_request_prepare(scmrq);
240 atomic_inc(&bdev->queued_reqs);
241 blk_start_request(req); 353 blk_start_request(req);
242 354
243 ret = eadm_start_aob(scmrq->aob); 355 if (scmrq->aob->request.msb_count < nr_requests_per_io)
244 if (ret) { 356 continue;
245 SCM_LOG(5, "no subchannel"); 357
246 scm_request_requeue(scmrq); 358 if (scm_request_start(scmrq))
247 return; 359 return;
248 } 360
361 scmrq = NULL;
249 } 362 }
363out:
364 if (scmrq)
365 scm_request_start(scmrq);
366 else
367 scm_ensure_queue_restart(bdev);
250} 368}
251 369
252static void __scmrq_log_error(struct scm_request *scmrq) 370static void __scmrq_log_error(struct scm_request *scmrq)
@@ -443,11 +561,19 @@ void scm_blk_set_available(struct scm_blk_dev *bdev)
443 spin_unlock_irqrestore(&bdev->lock, flags); 561 spin_unlock_irqrestore(&bdev->lock, flags);
444} 562}
445 563
564static bool __init scm_blk_params_valid(void)
565{
566 if (!nr_requests_per_io || nr_requests_per_io > 64)
567 return false;
568
569 return scm_cluster_size_valid();
570}
571
446static int __init scm_blk_init(void) 572static int __init scm_blk_init(void)
447{ 573{
448 int ret = -EINVAL; 574 int ret = -EINVAL;
449 575
450 if (!scm_cluster_size_valid()) 576 if (!scm_blk_params_valid())
451 goto out; 577 goto out;
452 578
453 ret = register_blkdev(0, "scm"); 579 ret = register_blkdev(0, "scm");
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
index e59331e6c2e5..09218cdc5129 100644
--- a/drivers/s390/block/scm_blk.h
+++ b/drivers/s390/block/scm_blk.h
@@ -30,8 +30,8 @@ struct scm_blk_dev {
30 30
31struct scm_request { 31struct scm_request {
32 struct scm_blk_dev *bdev; 32 struct scm_blk_dev *bdev;
33 struct request *request; 33 struct aidaw *next_aidaw;
34 struct aidaw *aidaw; 34 struct request **request;
35 struct aob *aob; 35 struct aob *aob;
36 struct list_head list; 36 struct list_head list;
37 u8 retries; 37 u8 retries;
@@ -55,6 +55,8 @@ void scm_blk_irq(struct scm_device *, void *, int);
55void scm_request_finish(struct scm_request *); 55void scm_request_finish(struct scm_request *);
56void scm_request_requeue(struct scm_request *); 56void scm_request_requeue(struct scm_request *);
57 57
58struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes);
59
58int scm_drv_init(void); 60int scm_drv_init(void);
59void scm_drv_cleanup(void); 61void scm_drv_cleanup(void);
60 62
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c
index 9aae909d47a5..09db45296eed 100644
--- a/drivers/s390/block/scm_blk_cluster.c
+++ b/drivers/s390/block/scm_blk_cluster.c
@@ -57,39 +57,52 @@ void scm_request_cluster_init(struct scm_request *scmrq)
57 scmrq->cluster.state = CLUSTER_NONE; 57 scmrq->cluster.state = CLUSTER_NONE;
58} 58}
59 59
60static bool clusters_intersect(struct scm_request *A, struct scm_request *B) 60static bool clusters_intersect(struct request *A, struct request *B)
61{ 61{
62 unsigned long firstA, lastA, firstB, lastB; 62 unsigned long firstA, lastA, firstB, lastB;
63 63
64 firstA = ((u64) blk_rq_pos(A->request) << 9) / CLUSTER_SIZE; 64 firstA = ((u64) blk_rq_pos(A) << 9) / CLUSTER_SIZE;
65 lastA = (((u64) blk_rq_pos(A->request) << 9) + 65 lastA = (((u64) blk_rq_pos(A) << 9) +
66 blk_rq_bytes(A->request) - 1) / CLUSTER_SIZE; 66 blk_rq_bytes(A) - 1) / CLUSTER_SIZE;
67 67
68 firstB = ((u64) blk_rq_pos(B->request) << 9) / CLUSTER_SIZE; 68 firstB = ((u64) blk_rq_pos(B) << 9) / CLUSTER_SIZE;
69 lastB = (((u64) blk_rq_pos(B->request) << 9) + 69 lastB = (((u64) blk_rq_pos(B) << 9) +
70 blk_rq_bytes(B->request) - 1) / CLUSTER_SIZE; 70 blk_rq_bytes(B) - 1) / CLUSTER_SIZE;
71 71
72 return (firstB <= lastA && firstA <= lastB); 72 return (firstB <= lastA && firstA <= lastB);
73} 73}
74 74
75bool scm_reserve_cluster(struct scm_request *scmrq) 75bool scm_reserve_cluster(struct scm_request *scmrq)
76{ 76{
77 struct request *req = scmrq->request[scmrq->aob->request.msb_count];
77 struct scm_blk_dev *bdev = scmrq->bdev; 78 struct scm_blk_dev *bdev = scmrq->bdev;
78 struct scm_request *iter; 79 struct scm_request *iter;
80 int pos, add = 1;
79 81
80 if (write_cluster_size == 0) 82 if (write_cluster_size == 0)
81 return true; 83 return true;
82 84
83 spin_lock(&bdev->lock); 85 spin_lock(&bdev->lock);
84 list_for_each_entry(iter, &bdev->cluster_list, cluster.list) { 86 list_for_each_entry(iter, &bdev->cluster_list, cluster.list) {
85 if (clusters_intersect(scmrq, iter) && 87 if (iter == scmrq) {
86 (rq_data_dir(scmrq->request) == WRITE || 88 /*
87 rq_data_dir(iter->request) == WRITE)) { 89 * We don't have to use clusters_intersect here, since
88 spin_unlock(&bdev->lock); 90 * cluster requests are always started separately.
89 return false; 91 */
92 add = 0;
93 continue;
94 }
95 for (pos = 0; pos <= iter->aob->request.msb_count; pos++) {
96 if (clusters_intersect(req, iter->request[pos]) &&
97 (rq_data_dir(req) == WRITE ||
98 rq_data_dir(iter->request[pos]) == WRITE)) {
99 spin_unlock(&bdev->lock);
100 return false;
101 }
90 } 102 }
91 } 103 }
92 list_add(&scmrq->cluster.list, &bdev->cluster_list); 104 if (add)
105 list_add(&scmrq->cluster.list, &bdev->cluster_list);
93 spin_unlock(&bdev->lock); 106 spin_unlock(&bdev->lock);
94 107
95 return true; 108 return true;
@@ -114,14 +127,14 @@ void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev)
114 blk_queue_io_opt(bdev->rq, CLUSTER_SIZE); 127 blk_queue_io_opt(bdev->rq, CLUSTER_SIZE);
115} 128}
116 129
117static void scm_prepare_cluster_request(struct scm_request *scmrq) 130static int scm_prepare_cluster_request(struct scm_request *scmrq)
118{ 131{
119 struct scm_blk_dev *bdev = scmrq->bdev; 132 struct scm_blk_dev *bdev = scmrq->bdev;
120 struct scm_device *scmdev = bdev->gendisk->private_data; 133 struct scm_device *scmdev = bdev->gendisk->private_data;
121 struct request *req = scmrq->request; 134 struct request *req = scmrq->request[0];
122 struct aidaw *aidaw = scmrq->aidaw;
123 struct msb *msb = &scmrq->aob->msb[0]; 135 struct msb *msb = &scmrq->aob->msb[0];
124 struct req_iterator iter; 136 struct req_iterator iter;
137 struct aidaw *aidaw;
125 struct bio_vec bv; 138 struct bio_vec bv;
126 int i = 0; 139 int i = 0;
127 u64 addr; 140 u64 addr;
@@ -131,11 +144,9 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
131 scmrq->cluster.state = CLUSTER_READ; 144 scmrq->cluster.state = CLUSTER_READ;
132 /* fall through */ 145 /* fall through */
133 case CLUSTER_READ: 146 case CLUSTER_READ:
134 scmrq->aob->request.msb_count = 1;
135 msb->bs = MSB_BS_4K; 147 msb->bs = MSB_BS_4K;
136 msb->oc = MSB_OC_READ; 148 msb->oc = MSB_OC_READ;
137 msb->flags = MSB_FLAG_IDA; 149 msb->flags = MSB_FLAG_IDA;
138 msb->data_addr = (u64) aidaw;
139 msb->blk_count = write_cluster_size; 150 msb->blk_count = write_cluster_size;
140 151
141 addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); 152 addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
@@ -146,6 +157,12 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
146 CLUSTER_SIZE)) 157 CLUSTER_SIZE))
147 msb->blk_count = 2 * write_cluster_size; 158 msb->blk_count = 2 * write_cluster_size;
148 159
160 aidaw = scm_aidaw_fetch(scmrq, msb->blk_count * PAGE_SIZE);
161 if (!aidaw)
162 return -ENOMEM;
163
164 scmrq->aob->request.msb_count = 1;
165 msb->data_addr = (u64) aidaw;
149 for (i = 0; i < msb->blk_count; i++) { 166 for (i = 0; i < msb->blk_count; i++) {
150 aidaw->data_addr = (u64) scmrq->cluster.buf[i]; 167 aidaw->data_addr = (u64) scmrq->cluster.buf[i];
151 aidaw++; 168 aidaw++;
@@ -153,6 +170,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
153 170
154 break; 171 break;
155 case CLUSTER_WRITE: 172 case CLUSTER_WRITE:
173 aidaw = (void *) msb->data_addr;
156 msb->oc = MSB_OC_WRITE; 174 msb->oc = MSB_OC_WRITE;
157 175
158 for (addr = msb->scm_addr; 176 for (addr = msb->scm_addr;
@@ -173,22 +191,29 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
173 } 191 }
174 break; 192 break;
175 } 193 }
194 return 0;
176} 195}
177 196
178bool scm_need_cluster_request(struct scm_request *scmrq) 197bool scm_need_cluster_request(struct scm_request *scmrq)
179{ 198{
180 if (rq_data_dir(scmrq->request) == READ) 199 int pos = scmrq->aob->request.msb_count;
200
201 if (rq_data_dir(scmrq->request[pos]) == READ)
181 return false; 202 return false;
182 203
183 return blk_rq_bytes(scmrq->request) < CLUSTER_SIZE; 204 return blk_rq_bytes(scmrq->request[pos]) < CLUSTER_SIZE;
184} 205}
185 206
186/* Called with queue lock held. */ 207/* Called with queue lock held. */
187void scm_initiate_cluster_request(struct scm_request *scmrq) 208void scm_initiate_cluster_request(struct scm_request *scmrq)
188{ 209{
189 scm_prepare_cluster_request(scmrq); 210 if (scm_prepare_cluster_request(scmrq))
211 goto requeue;
190 if (eadm_start_aob(scmrq->aob)) 212 if (eadm_start_aob(scmrq->aob))
191 scm_request_requeue(scmrq); 213 goto requeue;
214 return;
215requeue:
216 scm_request_requeue(scmrq);
192} 217}
193 218
194bool scm_test_cluster_request(struct scm_request *scmrq) 219bool scm_test_cluster_request(struct scm_request *scmrq)
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index db2cb1f8a1b5..a5c6f7e157aa 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -102,6 +102,16 @@ config SCLP_ASYNC
102 want for inform other people about your kernel panics, 102 want for inform other people about your kernel panics,
103 need this feature and intend to run your kernel in LPAR. 103 need this feature and intend to run your kernel in LPAR.
104 104
105config SCLP_ASYNC_ID
106 string "Component ID for Call Home"
107 depends on SCLP_ASYNC
108 default "000000000"
109 help
110 The Component ID for Call Home is used to identify the correct
111 problem reporting queue the call home records should be sent to.
112
113 If your are unsure, please use the default value "000000000".
114
105config HMC_DRV 115config HMC_DRV
106 def_tristate m 116 def_tristate m
107 prompt "Support for file transfers from HMC drive CD/DVD-ROM" 117 prompt "Support for file transfers from HMC drive CD/DVD-ROM"
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c
index 5f9f929e891c..19c25427f27f 100644
--- a/drivers/s390/char/sclp_async.c
+++ b/drivers/s390/char/sclp_async.c
@@ -137,7 +137,8 @@ static int sclp_async_send_wait(char *message)
137 * Retain Queue 137 * Retain Queue
138 * e.g. 5639CC140 500 Red Hat RHEL5 Linux for zSeries (RHEL AS) 138 * e.g. 5639CC140 500 Red Hat RHEL5 Linux for zSeries (RHEL AS)
139 */ 139 */
140 strncpy(sccb->evbuf.comp_id, "000000000", sizeof(sccb->evbuf.comp_id)); 140 strncpy(sccb->evbuf.comp_id, CONFIG_SCLP_ASYNC_ID,
141 sizeof(sccb->evbuf.comp_id));
141 sccb->evbuf.header.length = sizeof(sccb->evbuf); 142 sccb->evbuf.header.length = sizeof(sccb->evbuf);
142 sccb->header.length = sizeof(sccb->evbuf) + sizeof(sccb->header); 143 sccb->header.length = sizeof(sccb->evbuf) + sizeof(sccb->header);
143 sccb->header.function_code = SCLP_NORMAL_WRITE; 144 sccb->header.function_code = SCLP_NORMAL_WRITE;
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
index 37f0834300ea..bee8c11cd086 100644
--- a/drivers/s390/cio/eadm_sch.c
+++ b/drivers/s390/cio/eadm_sch.c
@@ -31,7 +31,7 @@
31MODULE_DESCRIPTION("driver for s390 eadm subchannels"); 31MODULE_DESCRIPTION("driver for s390 eadm subchannels");
32MODULE_LICENSE("GPL"); 32MODULE_LICENSE("GPL");
33 33
34#define EADM_TIMEOUT (5 * HZ) 34#define EADM_TIMEOUT (7 * HZ)
35static DEFINE_SPINLOCK(list_lock); 35static DEFINE_SPINLOCK(list_lock);
36static LIST_HEAD(eadm_list); 36static LIST_HEAD(eadm_list);
37 37