aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/cache-v4wb.S
blob: df8368afa102771bb99c19045abac26abb42e656 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
/*
 *  linux/arch/arm/mm/cache-v4wb.S
 *
 *  Copyright (C) 1997-2002 Russell king
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/memory.h>
#include <asm/page.h>
#include "proc-macros.S"

/*
 * The size of one data cache line.
 */
#define CACHE_DLINESIZE	32

/*
 * The total size of the data cache.
 */
#if defined(CONFIG_CPU_SA110)
# define CACHE_DSIZE	16384
#elif defined(CONFIG_CPU_SA1100)
# define CACHE_DSIZE	8192
#else
# error Unknown cache size
#endif

/*
 * This is the size at which it becomes more efficient to
 * clean the whole cache, rather than using the individual
 * cache line maintainence instructions.
 *
 *  Size  Clean (ticks) Dirty (ticks)
 *   4096   21  20  21    53  55  54
 *   8192   40  41  40   106 100 102
 *  16384   77  77  76   140 140 138
 *  32768  150 149 150   214 216 212 <---
 *  65536  296 297 296   351 358 361
 * 131072  591 591 591   656 657 651
 *  Whole  132 136 132   221 217 207 <---
 */
#define CACHE_DLIMIT	(CACHE_DSIZE * 4)

	.data
flush_base:
	.long	FLUSH_BASE
	.text

/*
 *	flush_user_cache_all()
 *
 *	Clean and invalidate all cache entries in a particular address
 *	space.
 */
ENTRY(v4wb_flush_user_cache_all)
	/* FALLTHROUGH */
/*
 *	flush_kern_cache_all()
 *
 *	Clean and invalidate the entire cache.
 */
ENTRY(v4wb_flush_kern_cache_all)
	mov	ip, #0
	mcr	p15, 0, ip, c7, c5, 0		@ invalidate I cache
__flush_whole_cache:
	ldr	r3, =flush_base
	ldr	r1, [r3, #0]
	eor	r1, r1, #CACHE_DSIZE
	str	r1, [r3, #0]
	add	r2, r1, #CACHE_DSIZE
1:	ldr	r3, [r1], #32
	cmp	r1, r2
	blo	1b
#ifdef FLUSH_BASE_MINICACHE
	add	r2, r2, #FLUSH_BASE_MINICACHE - FLUSH_BASE
	sub	r1, r2, #512			@ only 512 bytes
1:	ldr	r3, [r1], #32
	cmp	r1, r2
	blo	1b
#endif
	mcr	p15, 0, ip, c7, c10, 4		@ drain write buffer
	mov	pc, lr

/*
 *	flush_user_cache_range(start, end, flags)
 *
 *	Invalidate a range of cache entries in the specified
 *	address space.
 *
 *	- start - start address (inclusive, page aligned)
 *	- end	- end address (exclusive, page aligned)
 *	- flags	- vma_area_struct flags describing address space
 */
ENTRY(v4wb_flush_user_cache_range)
	mov	ip, #0
	sub	r3, r1, r0			@ calculate total size
	tst	r2, #VM_EXEC			@ executable region?
	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache

	cmp	r3, #CACHE_DLIMIT		@ total size >= limit?
	bhs	__flush_whole_cache		@ flush whole D cache

1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
	add	r0, r0, #CACHE_DLINESIZE
	cmp	r0, r1
	blo	1b
	tst	r2, #VM_EXEC
	mcrne	p15, 0, ip, c7, c10, 4		@ drain write buffer
	mov	pc, lr

/*
 *	flush_kern_dcache_area(void *addr, size_t size)
 *
 *	Ensure no D cache aliasing occurs, either with itself or
 *	the I cache
 *
 *	- addr	- kernel address
 *	- size	- region size
 */
ENTRY(v4wb_flush_kern_dcache_area)
	add	r1, r0, r1
	/* fall through */

/*
 *	coherent_kern_range(start, end)
 *
 *	Ensure coherency between the Icache and the Dcache in the
 *	region described by start.  If you have non-snooping
 *	Harvard caches, you need to implement this function.
 *
 *	- start  - virtual start address
 *	- end	 - virtual end address
 */
ENTRY(v4wb_coherent_kern_range)
	/* fall through */

/*
 *	coherent_user_range(start, end)
 *
 *	Ensure coherency between the Icache and the Dcache in the
 *	region described by start.  If you have non-snooping
 *	Harvard caches, you need to implement this function.
 *
 *	- start  - virtual start address
 *	- end	 - virtual end address
 */
ENTRY(v4wb_coherent_user_range)
	bic	r0, r0, #CACHE_DLINESIZE - 1
1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
	add	r0, r0, #CACHE_DLINESIZE
	cmp	r0, r1
	blo	1b
	mov	ip, #0
	mcr	p15, 0, ip, c7, c5, 0		@ invalidate I cache
	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
	mov	pc, lr


/*
 *	dma_inv_range(start, end)
 *
 *	Invalidate (discard) the specified virtual address range.
 *	May not write back any entries.  If 'start' or 'end'
 *	are not cache line aligned, those lines must be written
 *	back.
 *
 *	- start  - virtual start address
 *	- end	 - virtual end address
 */
v4wb_dma_inv_range:
	tst	r0, #CACHE_DLINESIZE - 1
	bic	r0, r0, #CACHE_DLINESIZE - 1
	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
	tst	r1, #CACHE_DLINESIZE - 1
	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
1:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
	add	r0, r0, #CACHE_DLINESIZE
	cmp	r0, r1
	blo	1b
	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
	mov	pc, lr

/*
 *	dma_clean_range(start, end)
 *
 *	Clean (write back) the specified virtual address range.
 *
 *	- start  - virtual start address
 *	- end	 - virtual end address
 */
v4wb_dma_clean_range:
	bic	r0, r0, #CACHE_DLINESIZE - 1
1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
	add	r0, r0, #CACHE_DLINESIZE
	cmp	r0, r1
	blo	1b
	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
	mov	pc, lr

/*
 *	dma_flush_range(start, end)
 *
 *	Clean and invalidate the specified virtual address range.
 *
 *	- start  - virtual start address
 *	- end	 - virtual end address
 *
 *	This is actually the same as v4wb_coherent_kern_range()
 */
	.globl	v4wb_dma_flush_range
	.set	v4wb_dma_flush_range, v4wb_coherent_kern_range

/*
 *	dma_map_area(start, size, dir)
 *	- start	- kernel virtual start address
 *	- size	- size of region
 *	- dir	- DMA direction
 */
ENTRY(v4wb_dma_map_area)
	add	r1, r1, r0
	cmp	r2, #DMA_TO_DEVICE
	beq	v4wb_dma_clean_range
	bcs	v4wb_dma_inv_range
	b	v4wb_dma_flush_range
ENDPROC(v4wb_dma_map_area)

/*
 *	dma_unmap_area(start, size, dir)
 *	- start	- kernel virtual start address
 *	- size	- size of region
 *	- dir	- DMA direction
 */
ENTRY(v4wb_dma_unmap_area)
	mov	pc, lr
ENDPROC(v4wb_dma_unmap_area)

	__INITDATA

	.type	v4wb_cache_fns, #object
ENTRY(v4wb_cache_fns)
	.long	v4wb_flush_kern_cache_all
	.long	v4wb_flush_user_cache_all
	.long	v4wb_flush_user_cache_range
	.long	v4wb_coherent_kern_range
	.long	v4wb_coherent_user_range
	.long	v4wb_flush_kern_dcache_area
	.long	v4wb_dma_map_area
	.long	v4wb_dma_unmap_area
	.long	v4wb_dma_flush_range
	.size	v4wb_cache_fns, . - v4wb_cache_fns
_SET_CONF_IND 0x04000000 #define VIRTIO_CCW_INTPARM_MASK 0xffff0000 static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev) { return container_of(vdev, struct virtio_ccw_device, vdev); } static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag) { unsigned long flags; __u32 ret; spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags); if (vcdev->err) ret = 0; else ret = vcdev->curr_io & flag; spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags); return ret; } static int ccw_io_helper(struct virtio_ccw_device *vcdev, __u32 intparm) { int ret; unsigned long flags; int flag = intparm & VIRTIO_CCW_INTPARM_MASK; spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags); ret = ccw_device_start(vcdev->cdev, vcdev->ccw, intparm, 0, 0); if (!ret) vcdev->curr_io |= flag; spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags); wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0); return ret ? ret : vcdev->err; } static inline long do_kvm_notify(struct subchannel_id schid, unsigned long queue_index) { register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY; register struct subchannel_id __schid asm("2") = schid; register unsigned long __index asm("3") = queue_index; register long __rc asm("2"); asm volatile ("diag 2,4,0x500\n" : "=d" (__rc) : "d" (__nr), "d" (__schid), "d" (__index) : "memory", "cc"); return __rc; } static void virtio_ccw_kvm_notify(struct virtqueue *vq) { struct virtio_ccw_vq_info *info = vq->priv; struct virtio_ccw_device *vcdev; struct subchannel_id schid; vcdev = to_vc_device(info->vq->vdev); ccw_device_get_schid(vcdev->cdev, &schid); do_kvm_notify(schid, virtqueue_get_queue_index(vq)); } static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev, int index) { vcdev->config_block->index = index; vcdev->ccw->cmd_code = CCW_CMD_READ_VQ_CONF; vcdev->ccw->flags = 0; vcdev->ccw->count = sizeof(struct vq_config_block); vcdev->ccw->cda = (__u32)(unsigned long)(vcdev->config_block); ccw_io_helper(vcdev, VIRTIO_CCW_DOING_READ_VQ_CONF); return vcdev->config_block->num; } static void virtio_ccw_del_vq(struct virtqueue *vq) { struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev); struct virtio_ccw_vq_info *info = vq->priv; unsigned long flags; unsigned long size; int ret; unsigned int index = virtqueue_get_queue_index(vq); /* Remove from our list. */ spin_lock_irqsave(&vcdev->lock, flags); list_del(&info->node); spin_unlock_irqrestore(&vcdev->lock, flags); /* Release from host. */ info->info_block->queue = 0; info->info_block->align = 0; info->info_block->index = index; info->info_block->num = 0; vcdev->ccw->cmd_code = CCW_CMD_SET_VQ; vcdev->ccw->flags = 0; vcdev->ccw->count = sizeof(*info->info_block); vcdev->ccw->cda = (__u32)(unsigned long)(info->info_block); ret = ccw_io_helper(vcdev, VIRTIO_CCW_DOING_SET_VQ | index); /* * -ENODEV isn't considered an error: The device is gone anyway. * This may happen on device detach. */ if (ret && (ret != -ENODEV)) dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d", ret, index); vring_del_virtqueue(vq); size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN)); free_pages_exact(info->queue, size); kfree(info->info_block); kfree(info); } static void virtio_ccw_del_vqs(struct virtio_device *vdev) { struct virtqueue *vq, *n; list_for_each_entry_safe(vq, n, &vdev->vqs, list) virtio_ccw_del_vq(vq); } static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, int i, vq_callback_t *callback, const char *name) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); int err; struct virtqueue *vq; struct virtio_ccw_vq_info *info; unsigned long size; unsigned long flags; /* Allocate queue. */ info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL); if (!info) { dev_warn(&vcdev->cdev->dev, "no info\n"); err = -ENOMEM; goto out_err; } info->info_block = kzalloc(sizeof(*info->info_block), GFP_DMA | GFP_KERNEL); if (!info->info_block) { dev_warn(&vcdev->cdev->dev, "no info block\n"); err = -ENOMEM; goto out_err; } info->num = virtio_ccw_read_vq_conf(vcdev, i); size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN)); info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); if (info->queue == NULL) { dev_warn(&vcdev->cdev->dev, "no queue\n"); err = -ENOMEM; goto out_err; } vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev, true, info->queue, virtio_ccw_kvm_notify, callback, name); if (!vq) { /* For now, we fail if we can't get the requested size. */ dev_warn(&vcdev->cdev->dev, "no vq\n"); err = -ENOMEM; free_pages_exact(info->queue, size); goto out_err; } info->vq = vq; vq->priv = info; /* Register it with the host. */ info->info_block->queue = (__u64)info->queue; info->info_block->align = KVM_VIRTIO_CCW_RING_ALIGN; info->info_block->index = i; info->info_block->num = info->num; vcdev->ccw->cmd_code = CCW_CMD_SET_VQ; vcdev->ccw->flags = 0; vcdev->ccw->count = sizeof(*info->info_block); vcdev->ccw->cda = (__u32)(unsigned long)(info->info_block); err = ccw_io_helper(vcdev, VIRTIO_CCW_DOING_SET_VQ | i); if (err) { dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n"); free_pages_exact(info->queue, size); info->vq = NULL; vq->priv = NULL; goto out_err; } /* Save it to our list. */ spin_lock_irqsave(&vcdev->lock, flags); list_add(&info->node, &vcdev->virtqueues); spin_unlock_irqrestore(&vcdev->lock, flags); return vq; out_err: if (info) kfree(info->info_block); kfree(info); return ERR_PTR(err); } static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char *names[]) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); unsigned long *indicatorp = NULL; int ret, i; for (i = 0; i < nvqs; ++i) { vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i]); if (IS_ERR(vqs[i])) { ret = PTR_ERR(vqs[i]); vqs[i] = NULL; goto out; } } ret = -ENOMEM; /* We need a data area under 2G to communicate. */ indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL); if (!indicatorp) goto out; *indicatorp = (unsigned long) &vcdev->indicators; /* Register queue indicators with host. */ vcdev->indicators = 0; vcdev->ccw->cmd_code = CCW_CMD_SET_IND; vcdev->ccw->flags = 0; vcdev->ccw->count = sizeof(vcdev->indicators); vcdev->ccw->cda = (__u32)(unsigned long) indicatorp; ret = ccw_io_helper(vcdev, VIRTIO_CCW_DOING_SET_IND); if (ret) goto out; /* Register indicators2 with host for config changes */ *indicatorp = (unsigned long) &vcdev->indicators2; vcdev->indicators2 = 0; vcdev->ccw->cmd_code = CCW_CMD_SET_CONF_IND; vcdev->ccw->flags = 0; vcdev->ccw->count = sizeof(vcdev->indicators2); vcdev->ccw->cda = (__u32)(unsigned long) indicatorp; ret = ccw_io_helper(vcdev, VIRTIO_CCW_DOING_SET_CONF_IND); if (ret) goto out; kfree(indicatorp); return 0; out: kfree(indicatorp); virtio_ccw_del_vqs(vdev); return ret; } static void virtio_ccw_reset(struct virtio_device *vdev) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); /* Zero status bits. */ vcdev->status = 0; /* Send a reset ccw on device. */ vcdev->ccw->cmd_code = CCW_CMD_VDEV_RESET; vcdev->ccw->flags = 0; vcdev->ccw->count = 0; vcdev->ccw->cda = 0; ccw_io_helper(vcdev, VIRTIO_CCW_DOING_RESET); } static u32 virtio_ccw_get_features(struct virtio_device *vdev) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); struct virtio_feature_desc features; int ret; /* Read the feature bits from the host. */ /* TODO: Features > 32 bits */ features.index = 0; vcdev->ccw->cmd_code = CCW_CMD_READ_FEAT; vcdev->ccw->flags = 0; vcdev->ccw->count = sizeof(features); vcdev->ccw->cda = vcdev->area; ret = ccw_io_helper(vcdev, VIRTIO_CCW_DOING_READ_FEAT); if (ret) return 0; memcpy(&features, (void *)(unsigned long)vcdev->area, sizeof(features)); return le32_to_cpu(features.features); } static void virtio_ccw_finalize_features(struct virtio_device *vdev) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); struct virtio_feature_desc features; int i; /* Give virtio_ring a chance to accept features. */ vring_transport_features(vdev); for (i = 0; i < sizeof(*vdev->features) / sizeof(features.features); i++) { int highbits = i % 2 ? 32 : 0; features.index = i; features.features = cpu_to_le32(vdev->features[i / 2] >> highbits); memcpy((void *)(unsigned long)vcdev->area, &features, sizeof(features)); /* Write the feature bits to the host. */ vcdev->ccw->cmd_code = CCW_CMD_WRITE_FEAT; vcdev->ccw->flags = 0; vcdev->ccw->count = sizeof(features); vcdev->ccw->cda = vcdev->area; ccw_io_helper(vcdev, VIRTIO_CCW_DOING_WRITE_FEAT); } } static void virtio_ccw_get_config(struct virtio_device *vdev, unsigned int offset, void *buf, unsigned len) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); int ret; /* Read the config area from the host. */ vcdev->ccw->cmd_code = CCW_CMD_READ_CONF; vcdev->ccw->flags = 0; vcdev->ccw->count = offset + len; vcdev->ccw->cda = vcdev->area; ret = ccw_io_helper(vcdev, VIRTIO_CCW_DOING_READ_CONFIG); if (ret) return; memcpy(vcdev->config, (void *)(unsigned long)vcdev->area, sizeof(vcdev->config)); memcpy(buf, &vcdev->config[offset], len); } static void virtio_ccw_set_config(struct virtio_device *vdev, unsigned int offset, const void *buf, unsigned len) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); memcpy(&vcdev->config[offset], buf, len); /* Write the config area to the host. */ memcpy((void *)(unsigned long)vcdev->area, vcdev->config, sizeof(vcdev->config)); vcdev->ccw->cmd_code = CCW_CMD_WRITE_CONF; vcdev->ccw->flags = 0; vcdev->ccw->count = offset + len; vcdev->ccw->cda = vcdev->area; ccw_io_helper(vcdev, VIRTIO_CCW_DOING_WRITE_CONFIG); } static u8 virtio_ccw_get_status(struct virtio_device *vdev) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); return vcdev->status; } static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); /* Write the status to the host. */ vcdev->status = status; memcpy((void *)(unsigned long)vcdev->area, &status, sizeof(status)); vcdev->ccw->cmd_code = CCW_CMD_WRITE_STATUS; vcdev->ccw->flags = 0; vcdev->ccw->count = sizeof(status); vcdev->ccw->cda = vcdev->area; ccw_io_helper(vcdev, VIRTIO_CCW_DOING_WRITE_STATUS); } static struct virtio_config_ops virtio_ccw_config_ops = { .get_features = virtio_ccw_get_features, .finalize_features = virtio_ccw_finalize_features, .get = virtio_ccw_get_config, .set = virtio_ccw_set_config, .get_status = virtio_ccw_get_status, .set_status = virtio_ccw_set_status, .reset = virtio_ccw_reset, .find_vqs = virtio_ccw_find_vqs, .del_vqs = virtio_ccw_del_vqs, }; /* * ccw bus driver related functions */ static void virtio_ccw_release_dev(struct device *_d) { struct virtio_device *dev = container_of(_d, struct virtio_device, dev); struct virtio_ccw_device *vcdev = to_vc_device(dev); kfree((void *)(unsigned long)vcdev->area); kfree(vcdev->config_block); kfree(vcdev->ccw); kfree(vcdev); } static int irb_is_error(struct irb *irb) { if (scsw_cstat(&irb->scsw) != 0) return 1; if (scsw_dstat(&irb->scsw) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) return 1; if (scsw_cc(&irb->scsw) != 0) return 1; return 0; } static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev, int index) { struct virtio_ccw_vq_info *info; unsigned long flags; struct virtqueue *vq; vq = NULL; spin_lock_irqsave(&vcdev->lock, flags); list_for_each_entry(info, &vcdev->virtqueues, node) { if (virtqueue_get_queue_index(info->vq) == index) { vq = info->vq; break; } } spin_unlock_irqrestore(&vcdev->lock, flags); return vq; } static void virtio_ccw_int_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) { __u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK; struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); int i; struct virtqueue *vq; struct virtio_driver *drv; /* Check if it's a notification from the host. */ if ((intparm == 0) && (scsw_stctl(&irb->scsw) == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) { /* OK */ } if (irb_is_error(irb)) vcdev->err = -EIO; /* XXX - use real error */ if (vcdev->curr_io & activity) { switch (activity) { case VIRTIO_CCW_DOING_READ_FEAT: case VIRTIO_CCW_DOING_WRITE_FEAT: case VIRTIO_CCW_DOING_READ_CONFIG: case VIRTIO_CCW_DOING_WRITE_CONFIG: case VIRTIO_CCW_DOING_WRITE_STATUS: case VIRTIO_CCW_DOING_SET_VQ: case VIRTIO_CCW_DOING_SET_IND: case VIRTIO_CCW_DOING_SET_CONF_IND: case VIRTIO_CCW_DOING_RESET: case VIRTIO_CCW_DOING_READ_VQ_CONF: vcdev->curr_io &= ~activity; wake_up(&vcdev->wait_q); break; default: /* don't know what to do... */ dev_warn(&cdev->dev, "Suspicious activity '%08x'\n", activity); WARN_ON(1); break; } } for_each_set_bit(i, &vcdev->indicators, sizeof(vcdev->indicators) * BITS_PER_BYTE) { /* The bit clear must happen before the vring kick. */ clear_bit(i, &vcdev->indicators); barrier(); vq = virtio_ccw_vq_by_ind(vcdev, i); vring_interrupt(0, vq); } if (test_bit(0, &vcdev->indicators2)) { drv = container_of(vcdev->vdev.dev.driver, struct virtio_driver, driver); if (drv && drv->config_changed) drv->config_changed(&vcdev->vdev); clear_bit(0, &vcdev->indicators2); } } /* * We usually want to autoonline all devices, but give the admin * a way to exempt devices from this. */ #define __DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \ (8*sizeof(long))) static unsigned long devs_no_auto[__MAX_SSID + 1][__DEV_WORDS]; static char *no_auto = ""; module_param(no_auto, charp, 0444); MODULE_PARM_DESC(no_auto, "list of ccw bus id ranges not to be auto-onlined"); static int virtio_ccw_check_autoonline(struct ccw_device *cdev) { struct ccw_dev_id id; ccw_device_get_id(cdev, &id); if (test_bit(id.devno, devs_no_auto[id.ssid])) return 0; return 1; } static void virtio_ccw_auto_online(void *data, async_cookie_t cookie) { struct ccw_device *cdev = data; int ret; ret = ccw_device_set_online(cdev); if (ret) dev_warn(&cdev->dev, "Failed to set online: %d\n", ret); } static int virtio_ccw_probe(struct ccw_device *cdev) { cdev->handler = virtio_ccw_int_handler; if (virtio_ccw_check_autoonline(cdev)) async_schedule(virtio_ccw_auto_online, cdev); return 0; } static void virtio_ccw_remove(struct ccw_device *cdev) { struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); if (cdev->online) { unregister_virtio_device(&vcdev->vdev); dev_set_drvdata(&cdev->dev, NULL); } cdev->handler = NULL; } static int virtio_ccw_offline(struct ccw_device *cdev) { struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); unregister_virtio_device(&vcdev->vdev); dev_set_drvdata(&cdev->dev, NULL); return 0; } /* Area needs to be big enough to fit status, features or configuration. */ #define VIRTIO_AREA_SIZE VIRTIO_CCW_CONFIG_SIZE /* biggest possible use */ static int virtio_ccw_online(struct ccw_device *cdev) { int ret; struct virtio_ccw_device *vcdev; vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL); if (!vcdev) { dev_warn(&cdev->dev, "Could not get memory for virtio\n"); ret = -ENOMEM; goto out_free; } vcdev->area = (__u32)(unsigned long)kzalloc(VIRTIO_AREA_SIZE, GFP_DMA | GFP_KERNEL); if (!vcdev->area) { dev_warn(&cdev->dev, "Cound not get memory for virtio\n"); ret = -ENOMEM; goto out_free; } vcdev->config_block = kzalloc(sizeof(*vcdev->config_block), GFP_DMA | GFP_KERNEL); if (!vcdev->config_block) { ret = -ENOMEM; goto out_free; } vcdev->ccw = kzalloc(sizeof(*vcdev->ccw), GFP_DMA | GFP_KERNEL); if (!vcdev->ccw) { ret = -ENOMEM; goto out_free; } vcdev->vdev.dev.parent = &cdev->dev; vcdev->vdev.dev.release = virtio_ccw_release_dev; vcdev->vdev.config = &virtio_ccw_config_ops; vcdev->cdev = cdev; init_waitqueue_head(&vcdev->wait_q); INIT_LIST_HEAD(&vcdev->virtqueues); spin_lock_init(&vcdev->lock); dev_set_drvdata(&cdev->dev, vcdev); vcdev->vdev.id.vendor = cdev->id.cu_type; vcdev->vdev.id.device = cdev->id.cu_model; ret = register_virtio_device(&vcdev->vdev); if (ret) { dev_warn(&cdev->dev, "Failed to register virtio device: %d\n", ret); goto out_put; } return 0; out_put: dev_set_drvdata(&cdev->dev, NULL); put_device(&vcdev->vdev.dev); return ret; out_free: if (vcdev) { kfree((void *)(unsigned long)vcdev->area); kfree(vcdev->config_block); kfree(vcdev->ccw); } kfree(vcdev); return ret; } static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event) { /* TODO: Check whether we need special handling here. */ return 0; } static struct ccw_device_id virtio_ids[] = { { CCW_DEVICE(0x3832, 0) }, {}, }; MODULE_DEVICE_TABLE(ccw, virtio_ids); static struct ccw_driver virtio_ccw_driver = { .driver = { .owner = THIS_MODULE, .name = "virtio_ccw", }, .ids = virtio_ids, .probe = virtio_ccw_probe, .remove = virtio_ccw_remove, .set_offline = virtio_ccw_offline, .set_online = virtio_ccw_online, .notify = virtio_ccw_cio_notify, .int_class = IOINT_VIR, }; static int __init pure_hex(char **cp, unsigned int *val, int min_digit, int max_digit, int max_val) { int diff; diff = 0; *val = 0; while (diff <= max_digit) { int value = hex_to_bin(**cp); if (value < 0) break; *val = *val * 16 + value; (*cp)++; diff++; } if ((diff < min_digit) || (diff > max_digit) || (*val > max_val)) return 1; return 0; } static int __init parse_busid(char *str, unsigned int *cssid, unsigned int *ssid, unsigned int *devno) { char *str_work; int rc, ret; rc = 1; if (*str == '\0') goto out; str_work = str; ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID); if (ret || (str_work[0] != '.')) goto out; str_work++; ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID); if (ret || (str_work[0] != '.')) goto out; str_work++; ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL); if (ret || (str_work[0] != '\0')) goto out; rc = 0; out: return rc; } static void __init no_auto_parse(void) { unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to; char *parm, *str; int rc; str = no_auto; while ((parm = strsep(&str, ","))) { rc = parse_busid(strsep(&parm, "-"), &from_cssid, &from_ssid, &from); if (rc) continue; if (parm != NULL) { rc = parse_busid(parm, &to_cssid, &to_ssid, &to); if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) rc = -EINVAL; } else { to_cssid = from_cssid; to_ssid = from_ssid; to = from; } if (rc) continue; while ((from_ssid < to_ssid) || ((from_ssid == to_ssid) && (from <= to))) { set_bit(from, devs_no_auto[from_ssid]); from++; if (from > __MAX_SUBCHANNEL) { from_ssid++; from = 0; } } } } static int __init virtio_ccw_init(void) { /* parse no_auto string before we do anything further */ no_auto_parse(); return ccw_driver_register(&virtio_ccw_driver); } module_init(virtio_ccw_init); static void __exit virtio_ccw_exit(void) { ccw_driver_unregister(&virtio_ccw_driver); } module_exit(virtio_ccw_exit);