aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/block
diff options
context:
space:
mode:
authorSebastian Ott <sebott@linux.vnet.ibm.com>2012-08-28 10:50:38 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2012-09-26 09:45:01 -0400
commitf30664e2c85c7804f07c636bbe99f35e0b2d4c76 (patch)
tree17b6c655cd9f7eddc8980e05c7f378ff774cac0d /drivers/s390/block
parent2e73c2cf78f797f3ff299ca39b210bceb40ab804 (diff)
s390: add scm block driver
Block device driver for Storage Class Memory (SCM). This driver provides a block device interface for each available SCM increment. Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390/block')
-rw-r--r--drivers/s390/block/Kconfig11
-rw-r--r--drivers/s390/block/Makefile3
-rw-r--r--drivers/s390/block/scm_blk.c414
-rw-r--r--drivers/s390/block/scm_blk.h79
-rw-r--r--drivers/s390/block/scm_drv.c90
5 files changed, 597 insertions, 0 deletions
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 8e477bb1f3f6..18178b64e89a 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -70,3 +70,14 @@ config DASD_EER
70 This driver provides a character device interface to the 70 This driver provides a character device interface to the
71 DASD extended error reporting. This is only needed if you want to 71 DASD extended error reporting. This is only needed if you want to
72 use applications written for the EER facility. 72 use applications written for the EER facility.
73
74config SCM_BLOCK
75 def_tristate m
76 prompt "Support for Storage Class Memory"
77 depends on S390 && BLOCK && EADM_SCH && SCM_BUS
78 help
79 Block device driver for Storage Class Memory (SCM). This driver
80 provides a block device interface for each available SCM increment.
81
82 To compile this driver as a module, choose M here: the
83 module will be called scm_block.
diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile
index 0a89e080b389..b64e2b32c753 100644
--- a/drivers/s390/block/Makefile
+++ b/drivers/s390/block/Makefile
@@ -17,3 +17,6 @@ obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o
17obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o 17obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o
18obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o 18obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o
19obj-$(CONFIG_DCSSBLK) += dcssblk.o 19obj-$(CONFIG_DCSSBLK) += dcssblk.o
20
21scm_block-objs := scm_drv.o scm_blk.o
22obj-$(CONFIG_SCM_BLOCK) += scm_block.o
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
new file mode 100644
index 000000000000..634ad58cbef6
--- /dev/null
+++ b/drivers/s390/block/scm_blk.c
@@ -0,0 +1,414 @@
1/*
2 * Block driver for s390 storage class memory.
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
6 */
7
8#define KMSG_COMPONENT "scm_block"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11#include <linux/interrupt.h>
12#include <linux/spinlock.h>
13#include <linux/module.h>
14#include <linux/blkdev.h>
15#include <linux/genhd.h>
16#include <linux/slab.h>
17#include <linux/list.h>
18#include <asm/eadm.h>
19#include "scm_blk.h"
20
21debug_info_t *scm_debug;
22static int scm_major;
23static DEFINE_SPINLOCK(list_lock);
24static LIST_HEAD(inactive_requests);
25static unsigned int nr_requests = 64;
26static atomic_t nr_devices = ATOMIC_INIT(0);
27module_param(nr_requests, uint, S_IRUGO);
28MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
29
30MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
31MODULE_LICENSE("GPL");
32MODULE_ALIAS("scm:scmdev*");
33
34static void __scm_free_rq(struct scm_request *scmrq)
35{
36 struct aob_rq_header *aobrq = to_aobrq(scmrq);
37
38 free_page((unsigned long) scmrq->aob);
39 free_page((unsigned long) scmrq->aidaw);
40 kfree(aobrq);
41}
42
43static void scm_free_rqs(void)
44{
45 struct list_head *iter, *safe;
46 struct scm_request *scmrq;
47
48 spin_lock_irq(&list_lock);
49 list_for_each_safe(iter, safe, &inactive_requests) {
50 scmrq = list_entry(iter, struct scm_request, list);
51 list_del(&scmrq->list);
52 __scm_free_rq(scmrq);
53 }
54 spin_unlock_irq(&list_lock);
55}
56
57static int __scm_alloc_rq(void)
58{
59 struct aob_rq_header *aobrq;
60 struct scm_request *scmrq;
61
62 aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL);
63 if (!aobrq)
64 return -ENOMEM;
65
66 scmrq = (void *) aobrq->data;
67 scmrq->aidaw = (void *) get_zeroed_page(GFP_DMA);
68 scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
69 if (!scmrq->aob || !scmrq->aidaw) {
70 __scm_free_rq(scmrq);
71 return -ENOMEM;
72 }
73 INIT_LIST_HEAD(&scmrq->list);
74 spin_lock_irq(&list_lock);
75 list_add(&scmrq->list, &inactive_requests);
76 spin_unlock_irq(&list_lock);
77
78 return 0;
79}
80
81static int scm_alloc_rqs(unsigned int nrqs)
82{
83 int ret = 0;
84
85 while (nrqs-- && !ret)
86 ret = __scm_alloc_rq();
87
88 return ret;
89}
90
91static struct scm_request *scm_request_fetch(void)
92{
93 struct scm_request *scmrq = NULL;
94
95 spin_lock(&list_lock);
96 if (list_empty(&inactive_requests))
97 goto out;
98 scmrq = list_first_entry(&inactive_requests, struct scm_request, list);
99 list_del(&scmrq->list);
100out:
101 spin_unlock(&list_lock);
102 return scmrq;
103}
104
105static void scm_request_done(struct scm_request *scmrq)
106{
107 unsigned long flags;
108
109 spin_lock_irqsave(&list_lock, flags);
110 list_add(&scmrq->list, &inactive_requests);
111 spin_unlock_irqrestore(&list_lock, flags);
112}
113
114static int scm_open(struct block_device *blkdev, fmode_t mode)
115{
116 return scm_get_ref();
117}
118
119static int scm_release(struct gendisk *gendisk, fmode_t mode)
120{
121 scm_put_ref();
122 return 0;
123}
124
125static const struct block_device_operations scm_blk_devops = {
126 .owner = THIS_MODULE,
127 .open = scm_open,
128 .release = scm_release,
129};
130
131static void scm_request_prepare(struct scm_request *scmrq)
132{
133 struct scm_blk_dev *bdev = scmrq->bdev;
134 struct scm_device *scmdev = bdev->gendisk->private_data;
135 struct aidaw *aidaw = scmrq->aidaw;
136 struct msb *msb = &scmrq->aob->msb[0];
137 struct req_iterator iter;
138 struct bio_vec *bv;
139
140 msb->bs = MSB_BS_4K;
141 scmrq->aob->request.msb_count = 1;
142 msb->scm_addr = scmdev->address +
143 ((u64) blk_rq_pos(scmrq->request) << 9);
144 msb->oc = (rq_data_dir(scmrq->request) == READ) ?
145 MSB_OC_READ : MSB_OC_WRITE;
146 msb->flags |= MSB_FLAG_IDA;
147 msb->data_addr = (u64) aidaw;
148
149 rq_for_each_segment(bv, scmrq->request, iter) {
150 WARN_ON(bv->bv_offset);
151 msb->blk_count += bv->bv_len >> 12;
152 aidaw->data_addr = (u64) page_address(bv->bv_page);
153 aidaw++;
154 }
155}
156
157static inline void scm_request_init(struct scm_blk_dev *bdev,
158 struct scm_request *scmrq,
159 struct request *req)
160{
161 struct aob_rq_header *aobrq = to_aobrq(scmrq);
162 struct aob *aob = scmrq->aob;
163
164 memset(aob, 0, sizeof(*aob));
165 memset(scmrq->aidaw, 0, PAGE_SIZE);
166 aobrq->scmdev = bdev->scmdev;
167 aob->request.cmd_code = ARQB_CMD_MOVE;
168 aob->request.data = (u64) aobrq;
169 scmrq->request = req;
170 scmrq->bdev = bdev;
171 scmrq->retries = 4;
172 scmrq->error = 0;
173}
174
175static void scm_ensure_queue_restart(struct scm_blk_dev *bdev)
176{
177 if (atomic_read(&bdev->queued_reqs)) {
178 /* Queue restart is triggered by the next interrupt. */
179 return;
180 }
181 blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY);
182}
183
184static void scm_request_requeue(struct scm_request *scmrq)
185{
186 struct scm_blk_dev *bdev = scmrq->bdev;
187
188 blk_requeue_request(bdev->rq, scmrq->request);
189 scm_request_done(scmrq);
190 scm_ensure_queue_restart(bdev);
191}
192
193static void scm_request_finish(struct scm_request *scmrq)
194{
195 blk_end_request_all(scmrq->request, scmrq->error);
196 scm_request_done(scmrq);
197}
198
199static void scm_blk_request(struct request_queue *rq)
200{
201 struct scm_device *scmdev = rq->queuedata;
202 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
203 struct scm_request *scmrq;
204 struct request *req;
205 int ret;
206
207 while ((req = blk_peek_request(rq))) {
208 if (req->cmd_type != REQ_TYPE_FS)
209 continue;
210
211 scmrq = scm_request_fetch();
212 if (!scmrq) {
213 SCM_LOG(5, "no request");
214 scm_ensure_queue_restart(bdev);
215 return;
216 }
217 scm_request_init(bdev, scmrq, req);
218 scm_request_prepare(scmrq);
219 blk_start_request(req);
220
221 ret = scm_start_aob(scmrq->aob);
222 if (ret) {
223 SCM_LOG(5, "no subchannel");
224 scm_request_requeue(scmrq);
225 return;
226 }
227 atomic_inc(&bdev->queued_reqs);
228 }
229}
230
231static void __scmrq_log_error(struct scm_request *scmrq)
232{
233 struct aob *aob = scmrq->aob;
234
235 if (scmrq->error == -ETIMEDOUT)
236 SCM_LOG(1, "Request timeout");
237 else {
238 SCM_LOG(1, "Request error");
239 SCM_LOG_HEX(1, &aob->response, sizeof(aob->response));
240 }
241 if (scmrq->retries)
242 SCM_LOG(1, "Retry request");
243 else
244 pr_err("An I/O operation to SCM failed with rc=%d\n",
245 scmrq->error);
246}
247
248void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
249{
250 struct scm_request *scmrq = data;
251 struct scm_blk_dev *bdev = scmrq->bdev;
252
253 scmrq->error = error;
254 if (error)
255 __scmrq_log_error(scmrq);
256
257 spin_lock(&bdev->lock);
258 list_add_tail(&scmrq->list, &bdev->finished_requests);
259 spin_unlock(&bdev->lock);
260 tasklet_hi_schedule(&bdev->tasklet);
261}
262
263static void scm_blk_tasklet(struct scm_blk_dev *bdev)
264{
265 struct scm_request *scmrq;
266 unsigned long flags;
267
268 spin_lock_irqsave(&bdev->lock, flags);
269 while (!list_empty(&bdev->finished_requests)) {
270 scmrq = list_first_entry(&bdev->finished_requests,
271 struct scm_request, list);
272 list_del(&scmrq->list);
273 spin_unlock_irqrestore(&bdev->lock, flags);
274
275 if (scmrq->error && scmrq->retries-- > 0) {
276 if (scm_start_aob(scmrq->aob)) {
277 spin_lock_irqsave(&bdev->rq_lock, flags);
278 scm_request_requeue(scmrq);
279 spin_unlock_irqrestore(&bdev->rq_lock, flags);
280 }
281 /* Request restarted or requeued, handle next. */
282 spin_lock_irqsave(&bdev->lock, flags);
283 continue;
284 }
285 scm_request_finish(scmrq);
286 atomic_dec(&bdev->queued_reqs);
287 spin_lock_irqsave(&bdev->lock, flags);
288 }
289 spin_unlock_irqrestore(&bdev->lock, flags);
290 /* Look out for more requests. */
291 blk_run_queue(bdev->rq);
292}
293
294int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
295{
296 struct request_queue *rq;
297 int len, ret = -ENOMEM;
298 unsigned int devindex, nr_max_blk;
299
300 devindex = atomic_inc_return(&nr_devices) - 1;
301 /* scma..scmz + scmaa..scmzz */
302 if (devindex > 701) {
303 ret = -ENODEV;
304 goto out;
305 }
306
307 bdev->scmdev = scmdev;
308 spin_lock_init(&bdev->rq_lock);
309 spin_lock_init(&bdev->lock);
310 INIT_LIST_HEAD(&bdev->finished_requests);
311 atomic_set(&bdev->queued_reqs, 0);
312 tasklet_init(&bdev->tasklet,
313 (void (*)(unsigned long)) scm_blk_tasklet,
314 (unsigned long) bdev);
315
316 rq = blk_init_queue(scm_blk_request, &bdev->rq_lock);
317 if (!rq)
318 goto out;
319
320 bdev->rq = rq;
321 nr_max_blk = min(scmdev->nr_max_block,
322 (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
323
324 blk_queue_logical_block_size(rq, 1 << 12);
325 blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
326 blk_queue_max_segments(rq, nr_max_blk);
327 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq);
328
329 bdev->gendisk = alloc_disk(SCM_NR_PARTS);
330 if (!bdev->gendisk)
331 goto out_queue;
332
333 rq->queuedata = scmdev;
334 bdev->gendisk->driverfs_dev = &scmdev->dev;
335 bdev->gendisk->private_data = scmdev;
336 bdev->gendisk->fops = &scm_blk_devops;
337 bdev->gendisk->queue = rq;
338 bdev->gendisk->major = scm_major;
339 bdev->gendisk->first_minor = devindex * SCM_NR_PARTS;
340
341 len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm");
342 if (devindex > 25) {
343 len += snprintf(bdev->gendisk->disk_name + len,
344 DISK_NAME_LEN - len, "%c",
345 'a' + (devindex / 26) - 1);
346 devindex = devindex % 26;
347 }
348 snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c",
349 'a' + devindex);
350
351 /* 512 byte sectors */
352 set_capacity(bdev->gendisk, scmdev->size >> 9);
353 add_disk(bdev->gendisk);
354 return 0;
355
356out_queue:
357 blk_cleanup_queue(rq);
358out:
359 atomic_dec(&nr_devices);
360 return ret;
361}
362
363void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
364{
365 tasklet_kill(&bdev->tasklet);
366 del_gendisk(bdev->gendisk);
367 blk_cleanup_queue(bdev->gendisk->queue);
368 put_disk(bdev->gendisk);
369}
370
371static int __init scm_blk_init(void)
372{
373 int ret;
374
375 ret = register_blkdev(0, "scm");
376 if (ret < 0)
377 goto out;
378
379 scm_major = ret;
380 if (scm_alloc_rqs(nr_requests))
381 goto out_unreg;
382
383 scm_debug = debug_register("scm_log", 16, 1, 16);
384 if (!scm_debug)
385 goto out_free;
386
387 debug_register_view(scm_debug, &debug_hex_ascii_view);
388 debug_set_level(scm_debug, 2);
389
390 ret = scm_drv_init();
391 if (ret)
392 goto out_dbf;
393
394 return ret;
395
396out_dbf:
397 debug_unregister(scm_debug);
398out_free:
399 scm_free_rqs();
400out_unreg:
401 unregister_blkdev(scm_major, "scm");
402out:
403 return ret;
404}
405module_init(scm_blk_init);
406
407static void __exit scm_blk_cleanup(void)
408{
409 scm_drv_cleanup();
410 debug_unregister(scm_debug);
411 scm_free_rqs();
412 unregister_blkdev(scm_major, "scm");
413}
414module_exit(scm_blk_cleanup);
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
new file mode 100644
index 000000000000..5aba5612588f
--- /dev/null
+++ b/drivers/s390/block/scm_blk.h
@@ -0,0 +1,79 @@
1#ifndef SCM_BLK_H
2#define SCM_BLK_H
3
4#include <linux/interrupt.h>
5#include <linux/spinlock.h>
6#include <linux/blkdev.h>
7#include <linux/genhd.h>
8#include <linux/list.h>
9
10#include <asm/debug.h>
11#include <asm/eadm.h>
12
13#define SCM_NR_PARTS 8
14#define SCM_QUEUE_DELAY 5
15
16struct scm_blk_dev {
17 struct tasklet_struct tasklet;
18 struct request_queue *rq;
19 struct gendisk *gendisk;
20 struct scm_device *scmdev;
21 spinlock_t rq_lock; /* guard the request queue */
22 spinlock_t lock; /* guard the rest of the blockdev */
23 atomic_t queued_reqs;
24 struct list_head finished_requests;
25};
26
27struct scm_request {
28 struct scm_blk_dev *bdev;
29 struct request *request;
30 struct aidaw *aidaw;
31 struct aob *aob;
32 struct list_head list;
33 u8 retries;
34 int error;
35};
36
37#define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data)
38
39int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *);
40void scm_blk_dev_cleanup(struct scm_blk_dev *);
41void scm_blk_irq(struct scm_device *, void *, int);
42
43int scm_drv_init(void);
44void scm_drv_cleanup(void);
45
46
47extern debug_info_t *scm_debug;
48
49#define SCM_LOG(imp, txt) do { \
50 debug_text_event(scm_debug, imp, txt); \
51 } while (0)
52
53static inline void SCM_LOG_HEX(int level, void *data, int length)
54{
55 if (level > scm_debug->level)
56 return;
57 while (length > 0) {
58 debug_event(scm_debug, level, data, length);
59 length -= scm_debug->buf_size;
60 data += scm_debug->buf_size;
61 }
62}
63
64static inline void SCM_LOG_STATE(int level, struct scm_device *scmdev)
65{
66 struct {
67 u64 address;
68 u8 oper_state;
69 u8 rank;
70 } __packed data = {
71 .address = scmdev->address,
72 .oper_state = scmdev->attrs.oper_state,
73 .rank = scmdev->attrs.rank,
74 };
75
76 SCM_LOG_HEX(level, &data, sizeof(data));
77}
78
79#endif /* SCM_BLK_H */
diff --git a/drivers/s390/block/scm_drv.c b/drivers/s390/block/scm_drv.c
new file mode 100644
index 000000000000..fce711a63060
--- /dev/null
+++ b/drivers/s390/block/scm_drv.c
@@ -0,0 +1,90 @@
1/*
2 * Device driver for s390 storage class memory.
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
6 */
7
8#define KMSG_COMPONENT "scm_block"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11#include <linux/module.h>
12#include <linux/spinlock.h>
13#include <linux/slab.h>
14#include <asm/eadm.h>
15#include "scm_blk.h"
16
17static void notify(struct scm_device *scmdev)
18{
19 pr_info("%lu: The capabilities of the SCM increment changed\n",
20 (unsigned long) scmdev->address);
21 SCM_LOG(2, "State changed");
22 SCM_LOG_STATE(2, scmdev);
23}
24
25static int scm_probe(struct scm_device *scmdev)
26{
27 struct scm_blk_dev *bdev;
28 int ret;
29
30 SCM_LOG(2, "probe");
31 SCM_LOG_STATE(2, scmdev);
32
33 if (scmdev->attrs.oper_state != OP_STATE_GOOD)
34 return -EINVAL;
35
36 bdev = kzalloc(sizeof(*bdev), GFP_KERNEL);
37 if (!bdev)
38 return -ENOMEM;
39
40 spin_lock_irq(&scmdev->lock);
41 dev_set_drvdata(&scmdev->dev, bdev);
42 spin_unlock_irq(&scmdev->lock);
43
44 ret = scm_blk_dev_setup(bdev, scmdev);
45 if (ret) {
46 spin_lock_irq(&scmdev->lock);
47 dev_set_drvdata(&scmdev->dev, NULL);
48 spin_unlock_irq(&scmdev->lock);
49 kfree(bdev);
50 goto out;
51 }
52
53out:
54 return ret;
55}
56
57static int scm_remove(struct scm_device *scmdev)
58{
59 struct scm_blk_dev *bdev;
60
61 spin_lock_irq(&scmdev->lock);
62 bdev = dev_get_drvdata(&scmdev->dev);
63 dev_set_drvdata(&scmdev->dev, NULL);
64 spin_unlock_irq(&scmdev->lock);
65 scm_blk_dev_cleanup(bdev);
66 kfree(bdev);
67
68 return 0;
69}
70
71static struct scm_driver scm_drv = {
72 .drv = {
73 .name = "scm_block",
74 .owner = THIS_MODULE,
75 },
76 .notify = notify,
77 .probe = scm_probe,
78 .remove = scm_remove,
79 .handler = scm_blk_irq,
80};
81
82int __init scm_drv_init(void)
83{
84 return scm_driver_register(&scm_drv);
85}
86
87void scm_drv_cleanup(void)
88{
89 scm_driver_unregister(&scm_drv);
90}