diff options
Diffstat (limited to 'drivers')
38 files changed, 7972 insertions, 2743 deletions
diff --git a/drivers/Makefile b/drivers/Makefile index 503d82569449..6d9d7fab77f5 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
@@ -15,6 +15,8 @@ obj-$(CONFIG_ACPI) += acpi/ | |||
15 | obj-$(CONFIG_PNP) += pnp/ | 15 | obj-$(CONFIG_PNP) += pnp/ |
16 | obj-$(CONFIG_ARM_AMBA) += amba/ | 16 | obj-$(CONFIG_ARM_AMBA) += amba/ |
17 | 17 | ||
18 | obj-$(CONFIG_XEN) += xen/ | ||
19 | |||
18 | # char/ comes before serial/ etc so that the VT console is the boot-time | 20 | # char/ comes before serial/ etc so that the VT console is the boot-time |
19 | # default. | 21 | # default. |
20 | obj-y += char/ | 22 | obj-y += char/ |
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 88a6fc7fd271..58f1338981bc 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/jiffies.h> | 40 | #include <linux/jiffies.h> |
41 | #include <linux/kmod.h> | 41 | #include <linux/kmod.h> |
42 | #include <linux/seq_file.h> | 42 | #include <linux/seq_file.h> |
43 | #include <linux/reboot.h> | ||
43 | #include <asm/uaccess.h> | 44 | #include <asm/uaccess.h> |
44 | 45 | ||
45 | #include <acpi/acpi_bus.h> | 46 | #include <acpi/acpi_bus.h> |
@@ -59,7 +60,6 @@ | |||
59 | #define ACPI_THERMAL_NOTIFY_CRITICAL 0xF0 | 60 | #define ACPI_THERMAL_NOTIFY_CRITICAL 0xF0 |
60 | #define ACPI_THERMAL_NOTIFY_HOT 0xF1 | 61 | #define ACPI_THERMAL_NOTIFY_HOT 0xF1 |
61 | #define ACPI_THERMAL_MODE_ACTIVE 0x00 | 62 | #define ACPI_THERMAL_MODE_ACTIVE 0x00 |
62 | #define ACPI_THERMAL_PATH_POWEROFF "/sbin/poweroff" | ||
63 | 63 | ||
64 | #define ACPI_THERMAL_MAX_ACTIVE 10 | 64 | #define ACPI_THERMAL_MAX_ACTIVE 10 |
65 | #define ACPI_THERMAL_MAX_LIMIT_STR_LEN 65 | 65 | #define ACPI_THERMAL_MAX_LIMIT_STR_LEN 65 |
@@ -419,26 +419,6 @@ static int acpi_thermal_get_devices(struct acpi_thermal *tz) | |||
419 | return 0; | 419 | return 0; |
420 | } | 420 | } |
421 | 421 | ||
422 | static int acpi_thermal_call_usermode(char *path) | ||
423 | { | ||
424 | char *argv[2] = { NULL, NULL }; | ||
425 | char *envp[3] = { NULL, NULL, NULL }; | ||
426 | |||
427 | |||
428 | if (!path) | ||
429 | return -EINVAL; | ||
430 | |||
431 | argv[0] = path; | ||
432 | |||
433 | /* minimal command environment */ | ||
434 | envp[0] = "HOME=/"; | ||
435 | envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; | ||
436 | |||
437 | call_usermodehelper(argv[0], argv, envp, 0); | ||
438 | |||
439 | return 0; | ||
440 | } | ||
441 | |||
442 | static int acpi_thermal_critical(struct acpi_thermal *tz) | 422 | static int acpi_thermal_critical(struct acpi_thermal *tz) |
443 | { | 423 | { |
444 | if (!tz || !tz->trips.critical.flags.valid) | 424 | if (!tz || !tz->trips.critical.flags.valid) |
@@ -456,7 +436,7 @@ static int acpi_thermal_critical(struct acpi_thermal *tz) | |||
456 | acpi_bus_generate_event(tz->device, ACPI_THERMAL_NOTIFY_CRITICAL, | 436 | acpi_bus_generate_event(tz->device, ACPI_THERMAL_NOTIFY_CRITICAL, |
457 | tz->trips.critical.flags.enabled); | 437 | tz->trips.critical.flags.enabled); |
458 | 438 | ||
459 | acpi_thermal_call_usermode(ACPI_THERMAL_PATH_POWEROFF); | 439 | orderly_poweroff(true); |
460 | 440 | ||
461 | return 0; | 441 | return 0; |
462 | } | 442 | } |
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 8f65b88cf711..a4a311992408 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig | |||
@@ -427,4 +427,13 @@ config XILINX_SYSACE | |||
427 | help | 427 | help |
428 | Include support for the Xilinx SystemACE CompactFlash interface | 428 | Include support for the Xilinx SystemACE CompactFlash interface |
429 | 429 | ||
430 | config XEN_BLKDEV_FRONTEND | ||
431 | tristate "Xen virtual block device support" | ||
432 | depends on XEN | ||
433 | default y | ||
434 | help | ||
435 | This driver implements the front-end of the Xen virtual | ||
436 | block device driver. It communicates with a back-end driver | ||
437 | in another domain which drives the actual block device. | ||
438 | |||
430 | endif # BLK_DEV | 439 | endif # BLK_DEV |
diff --git a/drivers/block/Makefile b/drivers/block/Makefile index 9ee08ab4ffa8..3e31532df0ed 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile | |||
@@ -29,3 +29,4 @@ obj-$(CONFIG_VIODASD) += viodasd.o | |||
29 | obj-$(CONFIG_BLK_DEV_SX8) += sx8.o | 29 | obj-$(CONFIG_BLK_DEV_SX8) += sx8.o |
30 | obj-$(CONFIG_BLK_DEV_UB) += ub.o | 30 | obj-$(CONFIG_BLK_DEV_UB) += ub.o |
31 | 31 | ||
32 | obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o | ||
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c new file mode 100644 index 000000000000..6746c29181f8 --- /dev/null +++ b/drivers/block/xen-blkfront.c | |||
@@ -0,0 +1,988 @@ | |||
1 | /* | ||
2 | * blkfront.c | ||
3 | * | ||
4 | * XenLinux virtual block device driver. | ||
5 | * | ||
6 | * Copyright (c) 2003-2004, Keir Fraser & Steve Hand | ||
7 | * Modifications by Mark A. Williamson are (c) Intel Research Cambridge | ||
8 | * Copyright (c) 2004, Christian Limpach | ||
9 | * Copyright (c) 2004, Andrew Warfield | ||
10 | * Copyright (c) 2005, Christopher Clark | ||
11 | * Copyright (c) 2005, XenSource Ltd | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or | ||
14 | * modify it under the terms of the GNU General Public License version 2 | ||
15 | * as published by the Free Software Foundation; or, when distributed | ||
16 | * separately from the Linux kernel or incorporated into other | ||
17 | * software packages, subject to the following license: | ||
18 | * | ||
19 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
20 | * of this source file (the "Software"), to deal in the Software without | ||
21 | * restriction, including without limitation the rights to use, copy, modify, | ||
22 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
23 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
24 | * the following conditions: | ||
25 | * | ||
26 | * The above copyright notice and this permission notice shall be included in | ||
27 | * all copies or substantial portions of the Software. | ||
28 | * | ||
29 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
30 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
31 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
32 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
33 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
34 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
35 | * IN THE SOFTWARE. | ||
36 | */ | ||
37 | |||
38 | #include <linux/interrupt.h> | ||
39 | #include <linux/blkdev.h> | ||
40 | #include <linux/module.h> | ||
41 | |||
42 | #include <xen/xenbus.h> | ||
43 | #include <xen/grant_table.h> | ||
44 | #include <xen/events.h> | ||
45 | #include <xen/page.h> | ||
46 | |||
47 | #include <xen/interface/grant_table.h> | ||
48 | #include <xen/interface/io/blkif.h> | ||
49 | |||
50 | #include <asm/xen/hypervisor.h> | ||
51 | |||
52 | enum blkif_state { | ||
53 | BLKIF_STATE_DISCONNECTED, | ||
54 | BLKIF_STATE_CONNECTED, | ||
55 | BLKIF_STATE_SUSPENDED, | ||
56 | }; | ||
57 | |||
58 | struct blk_shadow { | ||
59 | struct blkif_request req; | ||
60 | unsigned long request; | ||
61 | unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | ||
62 | }; | ||
63 | |||
64 | static struct block_device_operations xlvbd_block_fops; | ||
65 | |||
66 | #define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE) | ||
67 | |||
68 | /* | ||
69 | * We have one of these per vbd, whether ide, scsi or 'other'. They | ||
70 | * hang in private_data off the gendisk structure. We may end up | ||
71 | * putting all kinds of interesting stuff here :-) | ||
72 | */ | ||
73 | struct blkfront_info | ||
74 | { | ||
75 | struct xenbus_device *xbdev; | ||
76 | dev_t dev; | ||
77 | struct gendisk *gd; | ||
78 | int vdevice; | ||
79 | blkif_vdev_t handle; | ||
80 | enum blkif_state connected; | ||
81 | int ring_ref; | ||
82 | struct blkif_front_ring ring; | ||
83 | unsigned int evtchn, irq; | ||
84 | struct request_queue *rq; | ||
85 | struct work_struct work; | ||
86 | struct gnttab_free_callback callback; | ||
87 | struct blk_shadow shadow[BLK_RING_SIZE]; | ||
88 | unsigned long shadow_free; | ||
89 | int feature_barrier; | ||
90 | |||
91 | /** | ||
92 | * The number of people holding this device open. We won't allow a | ||
93 | * hot-unplug unless this is 0. | ||
94 | */ | ||
95 | int users; | ||
96 | }; | ||
97 | |||
98 | static DEFINE_SPINLOCK(blkif_io_lock); | ||
99 | |||
100 | #define MAXIMUM_OUTSTANDING_BLOCK_REQS \ | ||
101 | (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE) | ||
102 | #define GRANT_INVALID_REF 0 | ||
103 | |||
104 | #define PARTS_PER_DISK 16 | ||
105 | |||
106 | #define BLKIF_MAJOR(dev) ((dev)>>8) | ||
107 | #define BLKIF_MINOR(dev) ((dev) & 0xff) | ||
108 | |||
109 | #define DEV_NAME "xvd" /* name in /dev */ | ||
110 | |||
111 | /* Information about our VBDs. */ | ||
112 | #define MAX_VBDS 64 | ||
113 | static LIST_HEAD(vbds_list); | ||
114 | |||
115 | static int get_id_from_freelist(struct blkfront_info *info) | ||
116 | { | ||
117 | unsigned long free = info->shadow_free; | ||
118 | BUG_ON(free > BLK_RING_SIZE); | ||
119 | info->shadow_free = info->shadow[free].req.id; | ||
120 | info->shadow[free].req.id = 0x0fffffee; /* debug */ | ||
121 | return free; | ||
122 | } | ||
123 | |||
124 | static void add_id_to_freelist(struct blkfront_info *info, | ||
125 | unsigned long id) | ||
126 | { | ||
127 | info->shadow[id].req.id = info->shadow_free; | ||
128 | info->shadow[id].request = 0; | ||
129 | info->shadow_free = id; | ||
130 | } | ||
131 | |||
132 | static void blkif_restart_queue_callback(void *arg) | ||
133 | { | ||
134 | struct blkfront_info *info = (struct blkfront_info *)arg; | ||
135 | schedule_work(&info->work); | ||
136 | } | ||
137 | |||
138 | /* | ||
139 | * blkif_queue_request | ||
140 | * | ||
141 | * request block io | ||
142 | * | ||
143 | * id: for guest use only. | ||
144 | * operation: BLKIF_OP_{READ,WRITE,PROBE} | ||
145 | * buffer: buffer to read/write into. this should be a | ||
146 | * virtual address in the guest os. | ||
147 | */ | ||
148 | static int blkif_queue_request(struct request *req) | ||
149 | { | ||
150 | struct blkfront_info *info = req->rq_disk->private_data; | ||
151 | unsigned long buffer_mfn; | ||
152 | struct blkif_request *ring_req; | ||
153 | struct bio *bio; | ||
154 | struct bio_vec *bvec; | ||
155 | int idx; | ||
156 | unsigned long id; | ||
157 | unsigned int fsect, lsect; | ||
158 | int ref; | ||
159 | grant_ref_t gref_head; | ||
160 | |||
161 | if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) | ||
162 | return 1; | ||
163 | |||
164 | if (gnttab_alloc_grant_references( | ||
165 | BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) { | ||
166 | gnttab_request_free_callback( | ||
167 | &info->callback, | ||
168 | blkif_restart_queue_callback, | ||
169 | info, | ||
170 | BLKIF_MAX_SEGMENTS_PER_REQUEST); | ||
171 | return 1; | ||
172 | } | ||
173 | |||
174 | /* Fill out a communications ring structure. */ | ||
175 | ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); | ||
176 | id = get_id_from_freelist(info); | ||
177 | info->shadow[id].request = (unsigned long)req; | ||
178 | |||
179 | ring_req->id = id; | ||
180 | ring_req->sector_number = (blkif_sector_t)req->sector; | ||
181 | ring_req->handle = info->handle; | ||
182 | |||
183 | ring_req->operation = rq_data_dir(req) ? | ||
184 | BLKIF_OP_WRITE : BLKIF_OP_READ; | ||
185 | if (blk_barrier_rq(req)) | ||
186 | ring_req->operation = BLKIF_OP_WRITE_BARRIER; | ||
187 | |||
188 | ring_req->nr_segments = 0; | ||
189 | rq_for_each_bio (bio, req) { | ||
190 | bio_for_each_segment (bvec, bio, idx) { | ||
191 | BUG_ON(ring_req->nr_segments | ||
192 | == BLKIF_MAX_SEGMENTS_PER_REQUEST); | ||
193 | buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page)); | ||
194 | fsect = bvec->bv_offset >> 9; | ||
195 | lsect = fsect + (bvec->bv_len >> 9) - 1; | ||
196 | /* install a grant reference. */ | ||
197 | ref = gnttab_claim_grant_reference(&gref_head); | ||
198 | BUG_ON(ref == -ENOSPC); | ||
199 | |||
200 | gnttab_grant_foreign_access_ref( | ||
201 | ref, | ||
202 | info->xbdev->otherend_id, | ||
203 | buffer_mfn, | ||
204 | rq_data_dir(req) ); | ||
205 | |||
206 | info->shadow[id].frame[ring_req->nr_segments] = | ||
207 | mfn_to_pfn(buffer_mfn); | ||
208 | |||
209 | ring_req->seg[ring_req->nr_segments] = | ||
210 | (struct blkif_request_segment) { | ||
211 | .gref = ref, | ||
212 | .first_sect = fsect, | ||
213 | .last_sect = lsect }; | ||
214 | |||
215 | ring_req->nr_segments++; | ||
216 | } | ||
217 | } | ||
218 | |||
219 | info->ring.req_prod_pvt++; | ||
220 | |||
221 | /* Keep a private copy so we can reissue requests when recovering. */ | ||
222 | info->shadow[id].req = *ring_req; | ||
223 | |||
224 | gnttab_free_grant_references(gref_head); | ||
225 | |||
226 | return 0; | ||
227 | } | ||
228 | |||
229 | |||
230 | static inline void flush_requests(struct blkfront_info *info) | ||
231 | { | ||
232 | int notify; | ||
233 | |||
234 | RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify); | ||
235 | |||
236 | if (notify) | ||
237 | notify_remote_via_irq(info->irq); | ||
238 | } | ||
239 | |||
240 | /* | ||
241 | * do_blkif_request | ||
242 | * read a block; request is in a request queue | ||
243 | */ | ||
244 | static void do_blkif_request(request_queue_t *rq) | ||
245 | { | ||
246 | struct blkfront_info *info = NULL; | ||
247 | struct request *req; | ||
248 | int queued; | ||
249 | |||
250 | pr_debug("Entered do_blkif_request\n"); | ||
251 | |||
252 | queued = 0; | ||
253 | |||
254 | while ((req = elv_next_request(rq)) != NULL) { | ||
255 | info = req->rq_disk->private_data; | ||
256 | if (!blk_fs_request(req)) { | ||
257 | end_request(req, 0); | ||
258 | continue; | ||
259 | } | ||
260 | |||
261 | if (RING_FULL(&info->ring)) | ||
262 | goto wait; | ||
263 | |||
264 | pr_debug("do_blk_req %p: cmd %p, sec %lx, " | ||
265 | "(%u/%li) buffer:%p [%s]\n", | ||
266 | req, req->cmd, (unsigned long)req->sector, | ||
267 | req->current_nr_sectors, | ||
268 | req->nr_sectors, req->buffer, | ||
269 | rq_data_dir(req) ? "write" : "read"); | ||
270 | |||
271 | |||
272 | blkdev_dequeue_request(req); | ||
273 | if (blkif_queue_request(req)) { | ||
274 | blk_requeue_request(rq, req); | ||
275 | wait: | ||
276 | /* Avoid pointless unplugs. */ | ||
277 | blk_stop_queue(rq); | ||
278 | break; | ||
279 | } | ||
280 | |||
281 | queued++; | ||
282 | } | ||
283 | |||
284 | if (queued != 0) | ||
285 | flush_requests(info); | ||
286 | } | ||
287 | |||
288 | static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) | ||
289 | { | ||
290 | request_queue_t *rq; | ||
291 | |||
292 | rq = blk_init_queue(do_blkif_request, &blkif_io_lock); | ||
293 | if (rq == NULL) | ||
294 | return -1; | ||
295 | |||
296 | elevator_init(rq, "noop"); | ||
297 | |||
298 | /* Hard sector size and max sectors impersonate the equiv. hardware. */ | ||
299 | blk_queue_hardsect_size(rq, sector_size); | ||
300 | blk_queue_max_sectors(rq, 512); | ||
301 | |||
302 | /* Each segment in a request is up to an aligned page in size. */ | ||
303 | blk_queue_segment_boundary(rq, PAGE_SIZE - 1); | ||
304 | blk_queue_max_segment_size(rq, PAGE_SIZE); | ||
305 | |||
306 | /* Ensure a merged request will fit in a single I/O ring slot. */ | ||
307 | blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); | ||
308 | blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); | ||
309 | |||
310 | /* Make sure buffer addresses are sector-aligned. */ | ||
311 | blk_queue_dma_alignment(rq, 511); | ||
312 | |||
313 | gd->queue = rq; | ||
314 | |||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | |||
319 | static int xlvbd_barrier(struct blkfront_info *info) | ||
320 | { | ||
321 | int err; | ||
322 | |||
323 | err = blk_queue_ordered(info->rq, | ||
324 | info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE, | ||
325 | NULL); | ||
326 | |||
327 | if (err) | ||
328 | return err; | ||
329 | |||
330 | printk(KERN_INFO "blkfront: %s: barriers %s\n", | ||
331 | info->gd->disk_name, | ||
332 | info->feature_barrier ? "enabled" : "disabled"); | ||
333 | return 0; | ||
334 | } | ||
335 | |||
336 | |||
337 | static int xlvbd_alloc_gendisk(int minor, blkif_sector_t capacity, | ||
338 | int vdevice, u16 vdisk_info, u16 sector_size, | ||
339 | struct blkfront_info *info) | ||
340 | { | ||
341 | struct gendisk *gd; | ||
342 | int nr_minors = 1; | ||
343 | int err = -ENODEV; | ||
344 | |||
345 | BUG_ON(info->gd != NULL); | ||
346 | BUG_ON(info->rq != NULL); | ||
347 | |||
348 | if ((minor % PARTS_PER_DISK) == 0) | ||
349 | nr_minors = PARTS_PER_DISK; | ||
350 | |||
351 | gd = alloc_disk(nr_minors); | ||
352 | if (gd == NULL) | ||
353 | goto out; | ||
354 | |||
355 | if (nr_minors > 1) | ||
356 | sprintf(gd->disk_name, "%s%c", DEV_NAME, | ||
357 | 'a' + minor / PARTS_PER_DISK); | ||
358 | else | ||
359 | sprintf(gd->disk_name, "%s%c%d", DEV_NAME, | ||
360 | 'a' + minor / PARTS_PER_DISK, | ||
361 | minor % PARTS_PER_DISK); | ||
362 | |||
363 | gd->major = XENVBD_MAJOR; | ||
364 | gd->first_minor = minor; | ||
365 | gd->fops = &xlvbd_block_fops; | ||
366 | gd->private_data = info; | ||
367 | gd->driverfs_dev = &(info->xbdev->dev); | ||
368 | set_capacity(gd, capacity); | ||
369 | |||
370 | if (xlvbd_init_blk_queue(gd, sector_size)) { | ||
371 | del_gendisk(gd); | ||
372 | goto out; | ||
373 | } | ||
374 | |||
375 | info->rq = gd->queue; | ||
376 | info->gd = gd; | ||
377 | |||
378 | if (info->feature_barrier) | ||
379 | xlvbd_barrier(info); | ||
380 | |||
381 | if (vdisk_info & VDISK_READONLY) | ||
382 | set_disk_ro(gd, 1); | ||
383 | |||
384 | if (vdisk_info & VDISK_REMOVABLE) | ||
385 | gd->flags |= GENHD_FL_REMOVABLE; | ||
386 | |||
387 | if (vdisk_info & VDISK_CDROM) | ||
388 | gd->flags |= GENHD_FL_CD; | ||
389 | |||
390 | return 0; | ||
391 | |||
392 | out: | ||
393 | return err; | ||
394 | } | ||
395 | |||
396 | static void kick_pending_request_queues(struct blkfront_info *info) | ||
397 | { | ||
398 | if (!RING_FULL(&info->ring)) { | ||
399 | /* Re-enable calldowns. */ | ||
400 | blk_start_queue(info->rq); | ||
401 | /* Kick things off immediately. */ | ||
402 | do_blkif_request(info->rq); | ||
403 | } | ||
404 | } | ||
405 | |||
406 | static void blkif_restart_queue(struct work_struct *work) | ||
407 | { | ||
408 | struct blkfront_info *info = container_of(work, struct blkfront_info, work); | ||
409 | |||
410 | spin_lock_irq(&blkif_io_lock); | ||
411 | if (info->connected == BLKIF_STATE_CONNECTED) | ||
412 | kick_pending_request_queues(info); | ||
413 | spin_unlock_irq(&blkif_io_lock); | ||
414 | } | ||
415 | |||
416 | static void blkif_free(struct blkfront_info *info, int suspend) | ||
417 | { | ||
418 | /* Prevent new requests being issued until we fix things up. */ | ||
419 | spin_lock_irq(&blkif_io_lock); | ||
420 | info->connected = suspend ? | ||
421 | BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; | ||
422 | /* No more blkif_request(). */ | ||
423 | if (info->rq) | ||
424 | blk_stop_queue(info->rq); | ||
425 | /* No more gnttab callback work. */ | ||
426 | gnttab_cancel_free_callback(&info->callback); | ||
427 | spin_unlock_irq(&blkif_io_lock); | ||
428 | |||
429 | /* Flush gnttab callback work. Must be done with no locks held. */ | ||
430 | flush_scheduled_work(); | ||
431 | |||
432 | /* Free resources associated with old device channel. */ | ||
433 | if (info->ring_ref != GRANT_INVALID_REF) { | ||
434 | gnttab_end_foreign_access(info->ring_ref, 0, | ||
435 | (unsigned long)info->ring.sring); | ||
436 | info->ring_ref = GRANT_INVALID_REF; | ||
437 | info->ring.sring = NULL; | ||
438 | } | ||
439 | if (info->irq) | ||
440 | unbind_from_irqhandler(info->irq, info); | ||
441 | info->evtchn = info->irq = 0; | ||
442 | |||
443 | } | ||
444 | |||
445 | static void blkif_completion(struct blk_shadow *s) | ||
446 | { | ||
447 | int i; | ||
448 | for (i = 0; i < s->req.nr_segments; i++) | ||
449 | gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL); | ||
450 | } | ||
451 | |||
452 | static irqreturn_t blkif_interrupt(int irq, void *dev_id) | ||
453 | { | ||
454 | struct request *req; | ||
455 | struct blkif_response *bret; | ||
456 | RING_IDX i, rp; | ||
457 | unsigned long flags; | ||
458 | struct blkfront_info *info = (struct blkfront_info *)dev_id; | ||
459 | int uptodate; | ||
460 | |||
461 | spin_lock_irqsave(&blkif_io_lock, flags); | ||
462 | |||
463 | if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { | ||
464 | spin_unlock_irqrestore(&blkif_io_lock, flags); | ||
465 | return IRQ_HANDLED; | ||
466 | } | ||
467 | |||
468 | again: | ||
469 | rp = info->ring.sring->rsp_prod; | ||
470 | rmb(); /* Ensure we see queued responses up to 'rp'. */ | ||
471 | |||
472 | for (i = info->ring.rsp_cons; i != rp; i++) { | ||
473 | unsigned long id; | ||
474 | int ret; | ||
475 | |||
476 | bret = RING_GET_RESPONSE(&info->ring, i); | ||
477 | id = bret->id; | ||
478 | req = (struct request *)info->shadow[id].request; | ||
479 | |||
480 | blkif_completion(&info->shadow[id]); | ||
481 | |||
482 | add_id_to_freelist(info, id); | ||
483 | |||
484 | uptodate = (bret->status == BLKIF_RSP_OKAY); | ||
485 | switch (bret->operation) { | ||
486 | case BLKIF_OP_WRITE_BARRIER: | ||
487 | if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { | ||
488 | printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", | ||
489 | info->gd->disk_name); | ||
490 | uptodate = -EOPNOTSUPP; | ||
491 | info->feature_barrier = 0; | ||
492 | xlvbd_barrier(info); | ||
493 | } | ||
494 | /* fall through */ | ||
495 | case BLKIF_OP_READ: | ||
496 | case BLKIF_OP_WRITE: | ||
497 | if (unlikely(bret->status != BLKIF_RSP_OKAY)) | ||
498 | dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " | ||
499 | "request: %x\n", bret->status); | ||
500 | |||
501 | ret = end_that_request_first(req, uptodate, | ||
502 | req->hard_nr_sectors); | ||
503 | BUG_ON(ret); | ||
504 | end_that_request_last(req, uptodate); | ||
505 | break; | ||
506 | default: | ||
507 | BUG(); | ||
508 | } | ||
509 | } | ||
510 | |||
511 | info->ring.rsp_cons = i; | ||
512 | |||
513 | if (i != info->ring.req_prod_pvt) { | ||
514 | int more_to_do; | ||
515 | RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); | ||
516 | if (more_to_do) | ||
517 | goto again; | ||
518 | } else | ||
519 | info->ring.sring->rsp_event = i + 1; | ||
520 | |||
521 | kick_pending_request_queues(info); | ||
522 | |||
523 | spin_unlock_irqrestore(&blkif_io_lock, flags); | ||
524 | |||
525 | return IRQ_HANDLED; | ||
526 | } | ||
527 | |||
528 | |||
529 | static int setup_blkring(struct xenbus_device *dev, | ||
530 | struct blkfront_info *info) | ||
531 | { | ||
532 | struct blkif_sring *sring; | ||
533 | int err; | ||
534 | |||
535 | info->ring_ref = GRANT_INVALID_REF; | ||
536 | |||
537 | sring = (struct blkif_sring *)__get_free_page(GFP_KERNEL); | ||
538 | if (!sring) { | ||
539 | xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring"); | ||
540 | return -ENOMEM; | ||
541 | } | ||
542 | SHARED_RING_INIT(sring); | ||
543 | FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); | ||
544 | |||
545 | err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); | ||
546 | if (err < 0) { | ||
547 | free_page((unsigned long)sring); | ||
548 | info->ring.sring = NULL; | ||
549 | goto fail; | ||
550 | } | ||
551 | info->ring_ref = err; | ||
552 | |||
553 | err = xenbus_alloc_evtchn(dev, &info->evtchn); | ||
554 | if (err) | ||
555 | goto fail; | ||
556 | |||
557 | err = bind_evtchn_to_irqhandler(info->evtchn, | ||
558 | blkif_interrupt, | ||
559 | IRQF_SAMPLE_RANDOM, "blkif", info); | ||
560 | if (err <= 0) { | ||
561 | xenbus_dev_fatal(dev, err, | ||
562 | "bind_evtchn_to_irqhandler failed"); | ||
563 | goto fail; | ||
564 | } | ||
565 | info->irq = err; | ||
566 | |||
567 | return 0; | ||
568 | fail: | ||
569 | blkif_free(info, 0); | ||
570 | return err; | ||
571 | } | ||
572 | |||
573 | |||
574 | /* Common code used when first setting up, and when resuming. */ | ||
575 | static int talk_to_backend(struct xenbus_device *dev, | ||
576 | struct blkfront_info *info) | ||
577 | { | ||
578 | const char *message = NULL; | ||
579 | struct xenbus_transaction xbt; | ||
580 | int err; | ||
581 | |||
582 | /* Create shared ring, alloc event channel. */ | ||
583 | err = setup_blkring(dev, info); | ||
584 | if (err) | ||
585 | goto out; | ||
586 | |||
587 | again: | ||
588 | err = xenbus_transaction_start(&xbt); | ||
589 | if (err) { | ||
590 | xenbus_dev_fatal(dev, err, "starting transaction"); | ||
591 | goto destroy_blkring; | ||
592 | } | ||
593 | |||
594 | err = xenbus_printf(xbt, dev->nodename, | ||
595 | "ring-ref", "%u", info->ring_ref); | ||
596 | if (err) { | ||
597 | message = "writing ring-ref"; | ||
598 | goto abort_transaction; | ||
599 | } | ||
600 | err = xenbus_printf(xbt, dev->nodename, | ||
601 | "event-channel", "%u", info->evtchn); | ||
602 | if (err) { | ||
603 | message = "writing event-channel"; | ||
604 | goto abort_transaction; | ||
605 | } | ||
606 | |||
607 | err = xenbus_transaction_end(xbt, 0); | ||
608 | if (err) { | ||
609 | if (err == -EAGAIN) | ||
610 | goto again; | ||
611 | xenbus_dev_fatal(dev, err, "completing transaction"); | ||
612 | goto destroy_blkring; | ||
613 | } | ||
614 | |||
615 | xenbus_switch_state(dev, XenbusStateInitialised); | ||
616 | |||
617 | return 0; | ||
618 | |||
619 | abort_transaction: | ||
620 | xenbus_transaction_end(xbt, 1); | ||
621 | if (message) | ||
622 | xenbus_dev_fatal(dev, err, "%s", message); | ||
623 | destroy_blkring: | ||
624 | blkif_free(info, 0); | ||
625 | out: | ||
626 | return err; | ||
627 | } | ||
628 | |||
629 | |||
630 | /** | ||
631 | * Entry point to this code when a new device is created. Allocate the basic | ||
632 | * structures and the ring buffer for communication with the backend, and | ||
633 | * inform the backend of the appropriate details for those. Switch to | ||
634 | * Initialised state. | ||
635 | */ | ||
636 | static int blkfront_probe(struct xenbus_device *dev, | ||
637 | const struct xenbus_device_id *id) | ||
638 | { | ||
639 | int err, vdevice, i; | ||
640 | struct blkfront_info *info; | ||
641 | |||
642 | /* FIXME: Use dynamic device id if this is not set. */ | ||
643 | err = xenbus_scanf(XBT_NIL, dev->nodename, | ||
644 | "virtual-device", "%i", &vdevice); | ||
645 | if (err != 1) { | ||
646 | xenbus_dev_fatal(dev, err, "reading virtual-device"); | ||
647 | return err; | ||
648 | } | ||
649 | |||
650 | info = kzalloc(sizeof(*info), GFP_KERNEL); | ||
651 | if (!info) { | ||
652 | xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); | ||
653 | return -ENOMEM; | ||
654 | } | ||
655 | |||
656 | info->xbdev = dev; | ||
657 | info->vdevice = vdevice; | ||
658 | info->connected = BLKIF_STATE_DISCONNECTED; | ||
659 | INIT_WORK(&info->work, blkif_restart_queue); | ||
660 | |||
661 | for (i = 0; i < BLK_RING_SIZE; i++) | ||
662 | info->shadow[i].req.id = i+1; | ||
663 | info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; | ||
664 | |||
665 | /* Front end dir is a number, which is used as the id. */ | ||
666 | info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); | ||
667 | dev->dev.driver_data = info; | ||
668 | |||
669 | err = talk_to_backend(dev, info); | ||
670 | if (err) { | ||
671 | kfree(info); | ||
672 | dev->dev.driver_data = NULL; | ||
673 | return err; | ||
674 | } | ||
675 | |||
676 | return 0; | ||
677 | } | ||
678 | |||
679 | |||
680 | static int blkif_recover(struct blkfront_info *info) | ||
681 | { | ||
682 | int i; | ||
683 | struct blkif_request *req; | ||
684 | struct blk_shadow *copy; | ||
685 | int j; | ||
686 | |||
687 | /* Stage 1: Make a safe copy of the shadow state. */ | ||
688 | copy = kmalloc(sizeof(info->shadow), GFP_KERNEL); | ||
689 | if (!copy) | ||
690 | return -ENOMEM; | ||
691 | memcpy(copy, info->shadow, sizeof(info->shadow)); | ||
692 | |||
693 | /* Stage 2: Set up free list. */ | ||
694 | memset(&info->shadow, 0, sizeof(info->shadow)); | ||
695 | for (i = 0; i < BLK_RING_SIZE; i++) | ||
696 | info->shadow[i].req.id = i+1; | ||
697 | info->shadow_free = info->ring.req_prod_pvt; | ||
698 | info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; | ||
699 | |||
700 | /* Stage 3: Find pending requests and requeue them. */ | ||
701 | for (i = 0; i < BLK_RING_SIZE; i++) { | ||
702 | /* Not in use? */ | ||
703 | if (copy[i].request == 0) | ||
704 | continue; | ||
705 | |||
706 | /* Grab a request slot and copy shadow state into it. */ | ||
707 | req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); | ||
708 | *req = copy[i].req; | ||
709 | |||
710 | /* We get a new request id, and must reset the shadow state. */ | ||
711 | req->id = get_id_from_freelist(info); | ||
712 | memcpy(&info->shadow[req->id], ©[i], sizeof(copy[i])); | ||
713 | |||
714 | /* Rewrite any grant references invalidated by susp/resume. */ | ||
715 | for (j = 0; j < req->nr_segments; j++) | ||
716 | gnttab_grant_foreign_access_ref( | ||
717 | req->seg[j].gref, | ||
718 | info->xbdev->otherend_id, | ||
719 | pfn_to_mfn(info->shadow[req->id].frame[j]), | ||
720 | rq_data_dir( | ||
721 | (struct request *) | ||
722 | info->shadow[req->id].request)); | ||
723 | info->shadow[req->id].req = *req; | ||
724 | |||
725 | info->ring.req_prod_pvt++; | ||
726 | } | ||
727 | |||
728 | kfree(copy); | ||
729 | |||
730 | xenbus_switch_state(info->xbdev, XenbusStateConnected); | ||
731 | |||
732 | spin_lock_irq(&blkif_io_lock); | ||
733 | |||
734 | /* Now safe for us to use the shared ring */ | ||
735 | info->connected = BLKIF_STATE_CONNECTED; | ||
736 | |||
737 | /* Send off requeued requests */ | ||
738 | flush_requests(info); | ||
739 | |||
740 | /* Kick any other new requests queued since we resumed */ | ||
741 | kick_pending_request_queues(info); | ||
742 | |||
743 | spin_unlock_irq(&blkif_io_lock); | ||
744 | |||
745 | return 0; | ||
746 | } | ||
747 | |||
748 | /** | ||
749 | * We are reconnecting to the backend, due to a suspend/resume, or a backend | ||
750 | * driver restart. We tear down our blkif structure and recreate it, but | ||
751 | * leave the device-layer structures intact so that this is transparent to the | ||
752 | * rest of the kernel. | ||
753 | */ | ||
754 | static int blkfront_resume(struct xenbus_device *dev) | ||
755 | { | ||
756 | struct blkfront_info *info = dev->dev.driver_data; | ||
757 | int err; | ||
758 | |||
759 | dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); | ||
760 | |||
761 | blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); | ||
762 | |||
763 | err = talk_to_backend(dev, info); | ||
764 | if (info->connected == BLKIF_STATE_SUSPENDED && !err) | ||
765 | err = blkif_recover(info); | ||
766 | |||
767 | return err; | ||
768 | } | ||
769 | |||
770 | |||
771 | /* | ||
772 | * Invoked when the backend is finally 'ready' (and has told produced | ||
773 | * the details about the physical device - #sectors, size, etc). | ||
774 | */ | ||
775 | static void blkfront_connect(struct blkfront_info *info) | ||
776 | { | ||
777 | unsigned long long sectors; | ||
778 | unsigned long sector_size; | ||
779 | unsigned int binfo; | ||
780 | int err; | ||
781 | |||
782 | if ((info->connected == BLKIF_STATE_CONNECTED) || | ||
783 | (info->connected == BLKIF_STATE_SUSPENDED) ) | ||
784 | return; | ||
785 | |||
786 | dev_dbg(&info->xbdev->dev, "%s:%s.\n", | ||
787 | __func__, info->xbdev->otherend); | ||
788 | |||
789 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, | ||
790 | "sectors", "%llu", §ors, | ||
791 | "info", "%u", &binfo, | ||
792 | "sector-size", "%lu", §or_size, | ||
793 | NULL); | ||
794 | if (err) { | ||
795 | xenbus_dev_fatal(info->xbdev, err, | ||
796 | "reading backend fields at %s", | ||
797 | info->xbdev->otherend); | ||
798 | return; | ||
799 | } | ||
800 | |||
801 | err = xenbus_gather(XBT_NIL, info->xbdev->otherend, | ||
802 | "feature-barrier", "%lu", &info->feature_barrier, | ||
803 | NULL); | ||
804 | if (err) | ||
805 | info->feature_barrier = 0; | ||
806 | |||
807 | err = xlvbd_alloc_gendisk(BLKIF_MINOR(info->vdevice), | ||
808 | sectors, info->vdevice, | ||
809 | binfo, sector_size, info); | ||
810 | if (err) { | ||
811 | xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", | ||
812 | info->xbdev->otherend); | ||
813 | return; | ||
814 | } | ||
815 | |||
816 | xenbus_switch_state(info->xbdev, XenbusStateConnected); | ||
817 | |||
818 | /* Kick pending requests. */ | ||
819 | spin_lock_irq(&blkif_io_lock); | ||
820 | info->connected = BLKIF_STATE_CONNECTED; | ||
821 | kick_pending_request_queues(info); | ||
822 | spin_unlock_irq(&blkif_io_lock); | ||
823 | |||
824 | add_disk(info->gd); | ||
825 | } | ||
826 | |||
827 | /** | ||
828 | * Handle the change of state of the backend to Closing. We must delete our | ||
829 | * device-layer structures now, to ensure that writes are flushed through to | ||
830 | * the backend. Once is this done, we can switch to Closed in | ||
831 | * acknowledgement. | ||
832 | */ | ||
833 | static void blkfront_closing(struct xenbus_device *dev) | ||
834 | { | ||
835 | struct blkfront_info *info = dev->dev.driver_data; | ||
836 | unsigned long flags; | ||
837 | |||
838 | dev_dbg(&dev->dev, "blkfront_closing: %s removed\n", dev->nodename); | ||
839 | |||
840 | if (info->rq == NULL) | ||
841 | goto out; | ||
842 | |||
843 | spin_lock_irqsave(&blkif_io_lock, flags); | ||
844 | |||
845 | del_gendisk(info->gd); | ||
846 | |||
847 | /* No more blkif_request(). */ | ||
848 | blk_stop_queue(info->rq); | ||
849 | |||
850 | /* No more gnttab callback work. */ | ||
851 | gnttab_cancel_free_callback(&info->callback); | ||
852 | spin_unlock_irqrestore(&blkif_io_lock, flags); | ||
853 | |||
854 | /* Flush gnttab callback work. Must be done with no locks held. */ | ||
855 | flush_scheduled_work(); | ||
856 | |||
857 | blk_cleanup_queue(info->rq); | ||
858 | info->rq = NULL; | ||
859 | |||
860 | out: | ||
861 | xenbus_frontend_closed(dev); | ||
862 | } | ||
863 | |||
864 | /** | ||
865 | * Callback received when the backend's state changes. | ||
866 | */ | ||
867 | static void backend_changed(struct xenbus_device *dev, | ||
868 | enum xenbus_state backend_state) | ||
869 | { | ||
870 | struct blkfront_info *info = dev->dev.driver_data; | ||
871 | struct block_device *bd; | ||
872 | |||
873 | dev_dbg(&dev->dev, "blkfront:backend_changed.\n"); | ||
874 | |||
875 | switch (backend_state) { | ||
876 | case XenbusStateInitialising: | ||
877 | case XenbusStateInitWait: | ||
878 | case XenbusStateInitialised: | ||
879 | case XenbusStateUnknown: | ||
880 | case XenbusStateClosed: | ||
881 | break; | ||
882 | |||
883 | case XenbusStateConnected: | ||
884 | blkfront_connect(info); | ||
885 | break; | ||
886 | |||
887 | case XenbusStateClosing: | ||
888 | bd = bdget(info->dev); | ||
889 | if (bd == NULL) | ||
890 | xenbus_dev_fatal(dev, -ENODEV, "bdget failed"); | ||
891 | |||
892 | mutex_lock(&bd->bd_mutex); | ||
893 | if (info->users > 0) | ||
894 | xenbus_dev_error(dev, -EBUSY, | ||
895 | "Device in use; refusing to close"); | ||
896 | else | ||
897 | blkfront_closing(dev); | ||
898 | mutex_unlock(&bd->bd_mutex); | ||
899 | bdput(bd); | ||
900 | break; | ||
901 | } | ||
902 | } | ||
903 | |||
904 | static int blkfront_remove(struct xenbus_device *dev) | ||
905 | { | ||
906 | struct blkfront_info *info = dev->dev.driver_data; | ||
907 | |||
908 | dev_dbg(&dev->dev, "blkfront_remove: %s removed\n", dev->nodename); | ||
909 | |||
910 | blkif_free(info, 0); | ||
911 | |||
912 | kfree(info); | ||
913 | |||
914 | return 0; | ||
915 | } | ||
916 | |||
917 | static int blkif_open(struct inode *inode, struct file *filep) | ||
918 | { | ||
919 | struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; | ||
920 | info->users++; | ||
921 | return 0; | ||
922 | } | ||
923 | |||
924 | static int blkif_release(struct inode *inode, struct file *filep) | ||
925 | { | ||
926 | struct blkfront_info *info = inode->i_bdev->bd_disk->private_data; | ||
927 | info->users--; | ||
928 | if (info->users == 0) { | ||
929 | /* Check whether we have been instructed to close. We will | ||
930 | have ignored this request initially, as the device was | ||
931 | still mounted. */ | ||
932 | struct xenbus_device *dev = info->xbdev; | ||
933 | enum xenbus_state state = xenbus_read_driver_state(dev->otherend); | ||
934 | |||
935 | if (state == XenbusStateClosing) | ||
936 | blkfront_closing(dev); | ||
937 | } | ||
938 | return 0; | ||
939 | } | ||
940 | |||
941 | static struct block_device_operations xlvbd_block_fops = | ||
942 | { | ||
943 | .owner = THIS_MODULE, | ||
944 | .open = blkif_open, | ||
945 | .release = blkif_release, | ||
946 | }; | ||
947 | |||
948 | |||
949 | static struct xenbus_device_id blkfront_ids[] = { | ||
950 | { "vbd" }, | ||
951 | { "" } | ||
952 | }; | ||
953 | |||
954 | static struct xenbus_driver blkfront = { | ||
955 | .name = "vbd", | ||
956 | .owner = THIS_MODULE, | ||
957 | .ids = blkfront_ids, | ||
958 | .probe = blkfront_probe, | ||
959 | .remove = blkfront_remove, | ||
960 | .resume = blkfront_resume, | ||
961 | .otherend_changed = backend_changed, | ||
962 | }; | ||
963 | |||
964 | static int __init xlblk_init(void) | ||
965 | { | ||
966 | if (!is_running_on_xen()) | ||
967 | return -ENODEV; | ||
968 | |||
969 | if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) { | ||
970 | printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n", | ||
971 | XENVBD_MAJOR, DEV_NAME); | ||
972 | return -ENODEV; | ||
973 | } | ||
974 | |||
975 | return xenbus_register_frontend(&blkfront); | ||
976 | } | ||
977 | module_init(xlblk_init); | ||
978 | |||
979 | |||
980 | static void xlblk_exit(void) | ||
981 | { | ||
982 | return xenbus_unregister_driver(&blkfront); | ||
983 | } | ||
984 | module_exit(xlblk_exit); | ||
985 | |||
986 | MODULE_DESCRIPTION("Xen virtual block device frontend"); | ||
987 | MODULE_LICENSE("GPL"); | ||
988 | MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR); | ||
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index d8d7125529c4..9e8f21410d2d 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
@@ -372,39 +372,6 @@ config ISTALLION | |||
372 | To compile this driver as a module, choose M here: the | 372 | To compile this driver as a module, choose M here: the |
373 | module will be called istallion. | 373 | module will be called istallion. |
374 | 374 | ||
375 | config SERIAL_DEC | ||
376 | bool "DECstation serial support" | ||
377 | depends on MACH_DECSTATION | ||
378 | default y | ||
379 | help | ||
380 | This selects whether you want to be asked about drivers for | ||
381 | DECstation serial ports. | ||
382 | |||
383 | Note that the answer to this question won't directly affect the | ||
384 | kernel: saying N will just cause the configurator to skip all | ||
385 | the questions about DECstation serial ports. | ||
386 | |||
387 | config SERIAL_DEC_CONSOLE | ||
388 | bool "Support for console on a DECstation serial port" | ||
389 | depends on SERIAL_DEC | ||
390 | default y | ||
391 | help | ||
392 | If you say Y here, it will be possible to use a serial port as the | ||
393 | system console (the system console is the device which receives all | ||
394 | kernel messages and warnings and which allows logins in single user | ||
395 | mode). Note that the firmware uses ttyS0 as the serial console on | ||
396 | the Maxine and ttyS2 on the others. | ||
397 | |||
398 | If unsure, say Y. | ||
399 | |||
400 | config ZS | ||
401 | bool "Z85C30 Serial Support" | ||
402 | depends on SERIAL_DEC | ||
403 | default y | ||
404 | help | ||
405 | Documentation on the Zilog 85C350 serial communications controller | ||
406 | is downloadable at <http://www.zilog.com/pdfs/serial/z85c30.pdf> | ||
407 | |||
408 | config A2232 | 375 | config A2232 |
409 | tristate "Commodore A2232 serial support (EXPERIMENTAL)" | 376 | tristate "Commodore A2232 serial support (EXPERIMENTAL)" |
410 | depends on EXPERIMENTAL && ZORRO && BROKEN_ON_SMP | 377 | depends on EXPERIMENTAL && ZORRO && BROKEN_ON_SMP |
@@ -637,6 +604,14 @@ config HVC_BEAT | |||
637 | help | 604 | help |
638 | Toshiba's Cell Reference Set Beat Console device driver | 605 | Toshiba's Cell Reference Set Beat Console device driver |
639 | 606 | ||
607 | config HVC_XEN | ||
608 | bool "Xen Hypervisor Console support" | ||
609 | depends on XEN | ||
610 | select HVC_DRIVER | ||
611 | default y | ||
612 | help | ||
613 | Xen virtual console device driver | ||
614 | |||
640 | config HVCS | 615 | config HVCS |
641 | tristate "IBM Hypervisor Virtual Console Server support" | 616 | tristate "IBM Hypervisor Virtual Console Server support" |
642 | depends on PPC_PSERIES | 617 | depends on PPC_PSERIES |
diff --git a/drivers/char/Makefile b/drivers/char/Makefile index f2996a95eb07..8852b8d643cf 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile | |||
@@ -48,6 +48,7 @@ obj-$(CONFIG_HVC_ISERIES) += hvc_iseries.o | |||
48 | obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o | 48 | obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o |
49 | obj-$(CONFIG_HVC_BEAT) += hvc_beat.o | 49 | obj-$(CONFIG_HVC_BEAT) += hvc_beat.o |
50 | obj-$(CONFIG_HVC_DRIVER) += hvc_console.o | 50 | obj-$(CONFIG_HVC_DRIVER) += hvc_console.o |
51 | obj-$(CONFIG_HVC_XEN) += hvc_xen.o | ||
51 | obj-$(CONFIG_RAW_DRIVER) += raw.o | 52 | obj-$(CONFIG_RAW_DRIVER) += raw.o |
52 | obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o | 53 | obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o |
53 | obj-$(CONFIG_MSPEC) += mspec.o | 54 | obj-$(CONFIG_MSPEC) += mspec.o |
diff --git a/drivers/char/decserial.c b/drivers/char/decserial.c deleted file mode 100644 index 8ea2bea2b183..000000000000 --- a/drivers/char/decserial.c +++ /dev/null | |||
@@ -1,67 +0,0 @@ | |||
1 | /* | ||
2 | * sercons.c | ||
3 | * choose the right serial device at boot time | ||
4 | * | ||
5 | * triemer 6-SEP-1998 | ||
6 | * sercons.c is designed to allow the three different kinds | ||
7 | * of serial devices under the decstation world to co-exist | ||
8 | * in the same kernel. The idea here is to abstract | ||
9 | * the pieces of the drivers that are common to this file | ||
10 | * so that they do not clash at compile time and runtime. | ||
11 | * | ||
12 | * HK 16-SEP-1998 v0.002 | ||
13 | * removed the PROM console as this is not a real serial | ||
14 | * device. Added support for PROM console in drivers/char/tty_io.c | ||
15 | * instead. Although it may work to enable more than one | ||
16 | * console device I strongly recommend to use only one. | ||
17 | */ | ||
18 | |||
19 | #include <linux/init.h> | ||
20 | #include <asm/dec/machtype.h> | ||
21 | |||
22 | #ifdef CONFIG_ZS | ||
23 | extern int zs_init(void); | ||
24 | #endif | ||
25 | |||
26 | #ifdef CONFIG_SERIAL_CONSOLE | ||
27 | |||
28 | #ifdef CONFIG_ZS | ||
29 | extern void zs_serial_console_init(void); | ||
30 | #endif | ||
31 | |||
32 | #endif | ||
33 | |||
34 | /* rs_init - starts up the serial interface - | ||
35 | handle normal case of starting up the serial interface */ | ||
36 | |||
37 | #ifdef CONFIG_SERIAL | ||
38 | |||
39 | int __init rs_init(void) | ||
40 | { | ||
41 | #ifdef CONFIG_ZS | ||
42 | if (IOASIC) | ||
43 | return zs_init(); | ||
44 | #endif | ||
45 | return -ENXIO; | ||
46 | } | ||
47 | |||
48 | __initcall(rs_init); | ||
49 | |||
50 | #endif | ||
51 | |||
52 | #ifdef CONFIG_SERIAL_CONSOLE | ||
53 | |||
54 | /* serial_console_init handles the special case of starting | ||
55 | * up the console on the serial port | ||
56 | */ | ||
57 | static int __init decserial_console_init(void) | ||
58 | { | ||
59 | #ifdef CONFIG_ZS | ||
60 | if (IOASIC) | ||
61 | zs_serial_console_init(); | ||
62 | #endif | ||
63 | return 0; | ||
64 | } | ||
65 | console_initcall(decserial_console_init); | ||
66 | |||
67 | #endif | ||
diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c new file mode 100644 index 000000000000..dd68f8541c2d --- /dev/null +++ b/drivers/char/hvc_xen.c | |||
@@ -0,0 +1,159 @@ | |||
1 | /* | ||
2 | * xen console driver interface to hvc_console.c | ||
3 | * | ||
4 | * (c) 2007 Gerd Hoffmann <kraxel@suse.de> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | */ | ||
20 | |||
21 | #include <linux/console.h> | ||
22 | #include <linux/delay.h> | ||
23 | #include <linux/err.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/types.h> | ||
26 | |||
27 | #include <asm/xen/hypervisor.h> | ||
28 | #include <xen/page.h> | ||
29 | #include <xen/events.h> | ||
30 | #include <xen/interface/io/console.h> | ||
31 | #include <xen/hvc-console.h> | ||
32 | |||
33 | #include "hvc_console.h" | ||
34 | |||
35 | #define HVC_COOKIE 0x58656e /* "Xen" in hex */ | ||
36 | |||
37 | static struct hvc_struct *hvc; | ||
38 | static int xencons_irq; | ||
39 | |||
40 | /* ------------------------------------------------------------------ */ | ||
41 | |||
42 | static inline struct xencons_interface *xencons_interface(void) | ||
43 | { | ||
44 | return mfn_to_virt(xen_start_info->console.domU.mfn); | ||
45 | } | ||
46 | |||
47 | static inline void notify_daemon(void) | ||
48 | { | ||
49 | /* Use evtchn: this is called early, before irq is set up. */ | ||
50 | notify_remote_via_evtchn(xen_start_info->console.domU.evtchn); | ||
51 | } | ||
52 | |||
53 | static int write_console(uint32_t vtermno, const char *data, int len) | ||
54 | { | ||
55 | struct xencons_interface *intf = xencons_interface(); | ||
56 | XENCONS_RING_IDX cons, prod; | ||
57 | int sent = 0; | ||
58 | |||
59 | cons = intf->out_cons; | ||
60 | prod = intf->out_prod; | ||
61 | mb(); /* update queue values before going on */ | ||
62 | BUG_ON((prod - cons) > sizeof(intf->out)); | ||
63 | |||
64 | while ((sent < len) && ((prod - cons) < sizeof(intf->out))) | ||
65 | intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++]; | ||
66 | |||
67 | wmb(); /* write ring before updating pointer */ | ||
68 | intf->out_prod = prod; | ||
69 | |||
70 | notify_daemon(); | ||
71 | return sent; | ||
72 | } | ||
73 | |||
74 | static int read_console(uint32_t vtermno, char *buf, int len) | ||
75 | { | ||
76 | struct xencons_interface *intf = xencons_interface(); | ||
77 | XENCONS_RING_IDX cons, prod; | ||
78 | int recv = 0; | ||
79 | |||
80 | cons = intf->in_cons; | ||
81 | prod = intf->in_prod; | ||
82 | mb(); /* get pointers before reading ring */ | ||
83 | BUG_ON((prod - cons) > sizeof(intf->in)); | ||
84 | |||
85 | while (cons != prod && recv < len) | ||
86 | buf[recv++] = intf->in[MASK_XENCONS_IDX(cons++, intf->in)]; | ||
87 | |||
88 | mb(); /* read ring before consuming */ | ||
89 | intf->in_cons = cons; | ||
90 | |||
91 | notify_daemon(); | ||
92 | return recv; | ||
93 | } | ||
94 | |||
95 | static struct hv_ops hvc_ops = { | ||
96 | .get_chars = read_console, | ||
97 | .put_chars = write_console, | ||
98 | }; | ||
99 | |||
100 | static int __init xen_init(void) | ||
101 | { | ||
102 | struct hvc_struct *hp; | ||
103 | |||
104 | if (!is_running_on_xen()) | ||
105 | return 0; | ||
106 | |||
107 | xencons_irq = bind_evtchn_to_irq(xen_start_info->console.domU.evtchn); | ||
108 | if (xencons_irq < 0) | ||
109 | xencons_irq = 0 /* NO_IRQ */; | ||
110 | hp = hvc_alloc(HVC_COOKIE, xencons_irq, &hvc_ops, 256); | ||
111 | if (IS_ERR(hp)) | ||
112 | return PTR_ERR(hp); | ||
113 | |||
114 | hvc = hp; | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | static void __exit xen_fini(void) | ||
119 | { | ||
120 | if (hvc) | ||
121 | hvc_remove(hvc); | ||
122 | } | ||
123 | |||
124 | static int xen_cons_init(void) | ||
125 | { | ||
126 | if (!is_running_on_xen()) | ||
127 | return 0; | ||
128 | |||
129 | hvc_instantiate(HVC_COOKIE, 0, &hvc_ops); | ||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | module_init(xen_init); | ||
134 | module_exit(xen_fini); | ||
135 | console_initcall(xen_cons_init); | ||
136 | |||
137 | static void xenboot_write_console(struct console *console, const char *string, | ||
138 | unsigned len) | ||
139 | { | ||
140 | unsigned int linelen, off = 0; | ||
141 | const char *pos; | ||
142 | |||
143 | while (off < len && NULL != (pos = strchr(string+off, '\n'))) { | ||
144 | linelen = pos-string+off; | ||
145 | if (off + linelen > len) | ||
146 | break; | ||
147 | write_console(0, string+off, linelen); | ||
148 | write_console(0, "\r\n", 2); | ||
149 | off += linelen + 1; | ||
150 | } | ||
151 | if (off < len) | ||
152 | write_console(0, string+off, len-off); | ||
153 | } | ||
154 | |||
155 | struct console xenboot_console = { | ||
156 | .name = "xenboot", | ||
157 | .write = xenboot_write_console, | ||
158 | .flags = CON_PRINTBUFFER | CON_BOOT, | ||
159 | }; | ||
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c index dbb22403979f..3d90fc002097 100644 --- a/drivers/macintosh/therm_pm72.c +++ b/drivers/macintosh/therm_pm72.c | |||
@@ -1770,7 +1770,8 @@ static int call_critical_overtemp(void) | |||
1770 | "PATH=/sbin:/usr/sbin:/bin:/usr/bin", | 1770 | "PATH=/sbin:/usr/sbin:/bin:/usr/bin", |
1771 | NULL }; | 1771 | NULL }; |
1772 | 1772 | ||
1773 | return call_usermodehelper(critical_overtemp_path, argv, envp, 0); | 1773 | return call_usermodehelper(critical_overtemp_path, |
1774 | argv, envp, UMH_WAIT_EXEC); | ||
1774 | } | 1775 | } |
1775 | 1776 | ||
1776 | 1777 | ||
diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c index e18d265d5d33..516d943227e2 100644 --- a/drivers/macintosh/windfarm_core.c +++ b/drivers/macintosh/windfarm_core.c | |||
@@ -80,7 +80,8 @@ int wf_critical_overtemp(void) | |||
80 | "PATH=/sbin:/usr/sbin:/bin:/usr/bin", | 80 | "PATH=/sbin:/usr/sbin:/bin:/usr/bin", |
81 | NULL }; | 81 | NULL }; |
82 | 82 | ||
83 | return call_usermodehelper(critical_overtemp_path, argv, envp, 0); | 83 | return call_usermodehelper(critical_overtemp_path, |
84 | argv, envp, UMH_WAIT_EXEC); | ||
84 | } | 85 | } |
85 | EXPORT_SYMBOL_GPL(wf_critical_overtemp); | 86 | EXPORT_SYMBOL_GPL(wf_critical_overtemp); |
86 | 87 | ||
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index 3d65917a1bbb..8fe81e1807e0 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c | |||
@@ -623,6 +623,7 @@ int dm_create_persistent(struct exception_store *store) | |||
623 | 623 | ||
624 | ps->metadata_wq = create_singlethread_workqueue("ksnaphd"); | 624 | ps->metadata_wq = create_singlethread_workqueue("ksnaphd"); |
625 | if (!ps->metadata_wq) { | 625 | if (!ps->metadata_wq) { |
626 | kfree(ps); | ||
626 | DMERR("couldn't start header metadata update thread"); | 627 | DMERR("couldn't start header metadata update thread"); |
627 | return -ENOMEM; | 628 | return -ENOMEM; |
628 | } | 629 | } |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 43d03178064d..5fb659f8b20e 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -2486,6 +2486,18 @@ source "drivers/atm/Kconfig" | |||
2486 | 2486 | ||
2487 | source "drivers/s390/net/Kconfig" | 2487 | source "drivers/s390/net/Kconfig" |
2488 | 2488 | ||
2489 | config XEN_NETDEV_FRONTEND | ||
2490 | tristate "Xen network device frontend driver" | ||
2491 | depends on XEN | ||
2492 | default y | ||
2493 | help | ||
2494 | The network device frontend driver allows the kernel to | ||
2495 | access network devices exported exported by a virtual | ||
2496 | machine containing a physical network device driver. The | ||
2497 | frontend driver is intended for unprivileged guest domains; | ||
2498 | if you are compiling a kernel for a Xen guest, you almost | ||
2499 | certainly want to enable this. | ||
2500 | |||
2489 | config ISERIES_VETH | 2501 | config ISERIES_VETH |
2490 | tristate "iSeries Virtual Ethernet driver support" | 2502 | tristate "iSeries Virtual Ethernet driver support" |
2491 | depends on PPC_ISERIES | 2503 | depends on PPC_ISERIES |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index eb4167622a6a..0e286ab8855a 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -127,6 +127,8 @@ obj-$(CONFIG_PPPOL2TP) += pppox.o pppol2tp.o | |||
127 | obj-$(CONFIG_SLIP) += slip.o | 127 | obj-$(CONFIG_SLIP) += slip.o |
128 | obj-$(CONFIG_SLHC) += slhc.o | 128 | obj-$(CONFIG_SLHC) += slhc.o |
129 | 129 | ||
130 | obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o | ||
131 | |||
130 | obj-$(CONFIG_DUMMY) += dummy.o | 132 | obj-$(CONFIG_DUMMY) += dummy.o |
131 | obj-$(CONFIG_IFB) += ifb.o | 133 | obj-$(CONFIG_IFB) += ifb.o |
132 | obj-$(CONFIG_MACVLAN) += macvlan.o | 134 | obj-$(CONFIG_MACVLAN) += macvlan.o |
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index 84aa2117c0ee..355c6cf3d112 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c | |||
@@ -320,7 +320,7 @@ static int eppconfig(struct baycom_state *bc) | |||
320 | sprintf(portarg, "%ld", bc->pdev->port->base); | 320 | sprintf(portarg, "%ld", bc->pdev->port->base); |
321 | printk(KERN_DEBUG "%s: %s -s -p %s -m %s\n", bc_drvname, eppconfig_path, portarg, modearg); | 321 | printk(KERN_DEBUG "%s: %s -s -p %s -m %s\n", bc_drvname, eppconfig_path, portarg, modearg); |
322 | 322 | ||
323 | return call_usermodehelper(eppconfig_path, argv, envp, 1); | 323 | return call_usermodehelper(eppconfig_path, argv, envp, UMH_WAIT_PROC); |
324 | } | 324 | } |
325 | 325 | ||
326 | /* ---------------------------------------------------------------------- */ | 326 | /* ---------------------------------------------------------------------- */ |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c new file mode 100644 index 000000000000..489f69c5d6ca --- /dev/null +++ b/drivers/net/xen-netfront.c | |||
@@ -0,0 +1,1863 @@ | |||
1 | /* | ||
2 | * Virtual network driver for conversing with remote driver backends. | ||
3 | * | ||
4 | * Copyright (c) 2002-2005, K A Fraser | ||
5 | * Copyright (c) 2005, XenSource Ltd | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License version 2 | ||
9 | * as published by the Free Software Foundation; or, when distributed | ||
10 | * separately from the Linux kernel or incorporated into other | ||
11 | * software packages, subject to the following license: | ||
12 | * | ||
13 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
14 | * of this source file (the "Software"), to deal in the Software without | ||
15 | * restriction, including without limitation the rights to use, copy, modify, | ||
16 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
17 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
18 | * the following conditions: | ||
19 | * | ||
20 | * The above copyright notice and this permission notice shall be included in | ||
21 | * all copies or substantial portions of the Software. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
24 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
25 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
26 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
27 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
28 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
29 | * IN THE SOFTWARE. | ||
30 | */ | ||
31 | |||
32 | #include <linux/module.h> | ||
33 | #include <linux/kernel.h> | ||
34 | #include <linux/netdevice.h> | ||
35 | #include <linux/etherdevice.h> | ||
36 | #include <linux/skbuff.h> | ||
37 | #include <linux/ethtool.h> | ||
38 | #include <linux/if_ether.h> | ||
39 | #include <linux/tcp.h> | ||
40 | #include <linux/udp.h> | ||
41 | #include <linux/moduleparam.h> | ||
42 | #include <linux/mm.h> | ||
43 | #include <net/ip.h> | ||
44 | |||
45 | #include <xen/xenbus.h> | ||
46 | #include <xen/events.h> | ||
47 | #include <xen/page.h> | ||
48 | #include <xen/grant_table.h> | ||
49 | |||
50 | #include <xen/interface/io/netif.h> | ||
51 | #include <xen/interface/memory.h> | ||
52 | #include <xen/interface/grant_table.h> | ||
53 | |||
54 | static struct ethtool_ops xennet_ethtool_ops; | ||
55 | |||
56 | struct netfront_cb { | ||
57 | struct page *page; | ||
58 | unsigned offset; | ||
59 | }; | ||
60 | |||
61 | #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) | ||
62 | |||
63 | #define RX_COPY_THRESHOLD 256 | ||
64 | |||
65 | #define GRANT_INVALID_REF 0 | ||
66 | |||
67 | #define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE) | ||
68 | #define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE) | ||
69 | #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) | ||
70 | |||
71 | struct netfront_info { | ||
72 | struct list_head list; | ||
73 | struct net_device *netdev; | ||
74 | |||
75 | struct net_device_stats stats; | ||
76 | |||
77 | struct xen_netif_tx_front_ring tx; | ||
78 | struct xen_netif_rx_front_ring rx; | ||
79 | |||
80 | spinlock_t tx_lock; | ||
81 | spinlock_t rx_lock; | ||
82 | |||
83 | unsigned int evtchn; | ||
84 | |||
85 | /* Receive-ring batched refills. */ | ||
86 | #define RX_MIN_TARGET 8 | ||
87 | #define RX_DFL_MIN_TARGET 64 | ||
88 | #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) | ||
89 | unsigned rx_min_target, rx_max_target, rx_target; | ||
90 | struct sk_buff_head rx_batch; | ||
91 | |||
92 | struct timer_list rx_refill_timer; | ||
93 | |||
94 | /* | ||
95 | * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries | ||
96 | * are linked from tx_skb_freelist through skb_entry.link. | ||
97 | * | ||
98 | * NB. Freelist index entries are always going to be less than | ||
99 | * PAGE_OFFSET, whereas pointers to skbs will always be equal or | ||
100 | * greater than PAGE_OFFSET: we use this property to distinguish | ||
101 | * them. | ||
102 | */ | ||
103 | union skb_entry { | ||
104 | struct sk_buff *skb; | ||
105 | unsigned link; | ||
106 | } tx_skbs[NET_TX_RING_SIZE]; | ||
107 | grant_ref_t gref_tx_head; | ||
108 | grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; | ||
109 | unsigned tx_skb_freelist; | ||
110 | |||
111 | struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; | ||
112 | grant_ref_t gref_rx_head; | ||
113 | grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; | ||
114 | |||
115 | struct xenbus_device *xbdev; | ||
116 | int tx_ring_ref; | ||
117 | int rx_ring_ref; | ||
118 | |||
119 | unsigned long rx_pfn_array[NET_RX_RING_SIZE]; | ||
120 | struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; | ||
121 | struct mmu_update rx_mmu[NET_RX_RING_SIZE]; | ||
122 | }; | ||
123 | |||
124 | struct netfront_rx_info { | ||
125 | struct xen_netif_rx_response rx; | ||
126 | struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; | ||
127 | }; | ||
128 | |||
129 | /* | ||
130 | * Access macros for acquiring freeing slots in tx_skbs[]. | ||
131 | */ | ||
132 | |||
133 | static void add_id_to_freelist(unsigned *head, union skb_entry *list, | ||
134 | unsigned short id) | ||
135 | { | ||
136 | list[id].link = *head; | ||
137 | *head = id; | ||
138 | } | ||
139 | |||
140 | static unsigned short get_id_from_freelist(unsigned *head, | ||
141 | union skb_entry *list) | ||
142 | { | ||
143 | unsigned int id = *head; | ||
144 | *head = list[id].link; | ||
145 | return id; | ||
146 | } | ||
147 | |||
148 | static int xennet_rxidx(RING_IDX idx) | ||
149 | { | ||
150 | return idx & (NET_RX_RING_SIZE - 1); | ||
151 | } | ||
152 | |||
153 | static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, | ||
154 | RING_IDX ri) | ||
155 | { | ||
156 | int i = xennet_rxidx(ri); | ||
157 | struct sk_buff *skb = np->rx_skbs[i]; | ||
158 | np->rx_skbs[i] = NULL; | ||
159 | return skb; | ||
160 | } | ||
161 | |||
162 | static grant_ref_t xennet_get_rx_ref(struct netfront_info *np, | ||
163 | RING_IDX ri) | ||
164 | { | ||
165 | int i = xennet_rxidx(ri); | ||
166 | grant_ref_t ref = np->grant_rx_ref[i]; | ||
167 | np->grant_rx_ref[i] = GRANT_INVALID_REF; | ||
168 | return ref; | ||
169 | } | ||
170 | |||
171 | #ifdef CONFIG_SYSFS | ||
172 | static int xennet_sysfs_addif(struct net_device *netdev); | ||
173 | static void xennet_sysfs_delif(struct net_device *netdev); | ||
174 | #else /* !CONFIG_SYSFS */ | ||
175 | #define xennet_sysfs_addif(dev) (0) | ||
176 | #define xennet_sysfs_delif(dev) do { } while (0) | ||
177 | #endif | ||
178 | |||
179 | static int xennet_can_sg(struct net_device *dev) | ||
180 | { | ||
181 | return dev->features & NETIF_F_SG; | ||
182 | } | ||
183 | |||
184 | |||
185 | static void rx_refill_timeout(unsigned long data) | ||
186 | { | ||
187 | struct net_device *dev = (struct net_device *)data; | ||
188 | netif_rx_schedule(dev); | ||
189 | } | ||
190 | |||
191 | static int netfront_tx_slot_available(struct netfront_info *np) | ||
192 | { | ||
193 | return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < | ||
194 | (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); | ||
195 | } | ||
196 | |||
197 | static void xennet_maybe_wake_tx(struct net_device *dev) | ||
198 | { | ||
199 | struct netfront_info *np = netdev_priv(dev); | ||
200 | |||
201 | if (unlikely(netif_queue_stopped(dev)) && | ||
202 | netfront_tx_slot_available(np) && | ||
203 | likely(netif_running(dev))) | ||
204 | netif_wake_queue(dev); | ||
205 | } | ||
206 | |||
207 | static void xennet_alloc_rx_buffers(struct net_device *dev) | ||
208 | { | ||
209 | unsigned short id; | ||
210 | struct netfront_info *np = netdev_priv(dev); | ||
211 | struct sk_buff *skb; | ||
212 | struct page *page; | ||
213 | int i, batch_target, notify; | ||
214 | RING_IDX req_prod = np->rx.req_prod_pvt; | ||
215 | struct xen_memory_reservation reservation; | ||
216 | grant_ref_t ref; | ||
217 | unsigned long pfn; | ||
218 | void *vaddr; | ||
219 | int nr_flips; | ||
220 | struct xen_netif_rx_request *req; | ||
221 | |||
222 | if (unlikely(!netif_carrier_ok(dev))) | ||
223 | return; | ||
224 | |||
225 | /* | ||
226 | * Allocate skbuffs greedily, even though we batch updates to the | ||
227 | * receive ring. This creates a less bursty demand on the memory | ||
228 | * allocator, so should reduce the chance of failed allocation requests | ||
229 | * both for ourself and for other kernel subsystems. | ||
230 | */ | ||
231 | batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); | ||
232 | for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { | ||
233 | skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD, | ||
234 | GFP_ATOMIC | __GFP_NOWARN); | ||
235 | if (unlikely(!skb)) | ||
236 | goto no_skb; | ||
237 | |||
238 | page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); | ||
239 | if (!page) { | ||
240 | kfree_skb(skb); | ||
241 | no_skb: | ||
242 | /* Any skbuffs queued for refill? Force them out. */ | ||
243 | if (i != 0) | ||
244 | goto refill; | ||
245 | /* Could not allocate any skbuffs. Try again later. */ | ||
246 | mod_timer(&np->rx_refill_timer, | ||
247 | jiffies + (HZ/10)); | ||
248 | break; | ||
249 | } | ||
250 | |||
251 | skb_shinfo(skb)->frags[0].page = page; | ||
252 | skb_shinfo(skb)->nr_frags = 1; | ||
253 | __skb_queue_tail(&np->rx_batch, skb); | ||
254 | } | ||
255 | |||
256 | /* Is the batch large enough to be worthwhile? */ | ||
257 | if (i < (np->rx_target/2)) { | ||
258 | if (req_prod > np->rx.sring->req_prod) | ||
259 | goto push; | ||
260 | return; | ||
261 | } | ||
262 | |||
263 | /* Adjust our fill target if we risked running out of buffers. */ | ||
264 | if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && | ||
265 | ((np->rx_target *= 2) > np->rx_max_target)) | ||
266 | np->rx_target = np->rx_max_target; | ||
267 | |||
268 | refill: | ||
269 | for (nr_flips = i = 0; ; i++) { | ||
270 | skb = __skb_dequeue(&np->rx_batch); | ||
271 | if (skb == NULL) | ||
272 | break; | ||
273 | |||
274 | skb->dev = dev; | ||
275 | |||
276 | id = xennet_rxidx(req_prod + i); | ||
277 | |||
278 | BUG_ON(np->rx_skbs[id]); | ||
279 | np->rx_skbs[id] = skb; | ||
280 | |||
281 | ref = gnttab_claim_grant_reference(&np->gref_rx_head); | ||
282 | BUG_ON((signed short)ref < 0); | ||
283 | np->grant_rx_ref[id] = ref; | ||
284 | |||
285 | pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); | ||
286 | vaddr = page_address(skb_shinfo(skb)->frags[0].page); | ||
287 | |||
288 | req = RING_GET_REQUEST(&np->rx, req_prod + i); | ||
289 | gnttab_grant_foreign_access_ref(ref, | ||
290 | np->xbdev->otherend_id, | ||
291 | pfn_to_mfn(pfn), | ||
292 | 0); | ||
293 | |||
294 | req->id = id; | ||
295 | req->gref = ref; | ||
296 | } | ||
297 | |||
298 | if (nr_flips != 0) { | ||
299 | reservation.extent_start = np->rx_pfn_array; | ||
300 | reservation.nr_extents = nr_flips; | ||
301 | reservation.extent_order = 0; | ||
302 | reservation.address_bits = 0; | ||
303 | reservation.domid = DOMID_SELF; | ||
304 | |||
305 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | ||
306 | /* After all PTEs have been zapped, flush the TLB. */ | ||
307 | np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = | ||
308 | UVMF_TLB_FLUSH|UVMF_ALL; | ||
309 | |||
310 | /* Give away a batch of pages. */ | ||
311 | np->rx_mcl[i].op = __HYPERVISOR_memory_op; | ||
312 | np->rx_mcl[i].args[0] = XENMEM_decrease_reservation; | ||
313 | np->rx_mcl[i].args[1] = (unsigned long)&reservation; | ||
314 | |||
315 | /* Zap PTEs and give away pages in one big | ||
316 | * multicall. */ | ||
317 | (void)HYPERVISOR_multicall(np->rx_mcl, i+1); | ||
318 | |||
319 | /* Check return status of HYPERVISOR_memory_op(). */ | ||
320 | if (unlikely(np->rx_mcl[i].result != i)) | ||
321 | panic("Unable to reduce memory reservation\n"); | ||
322 | } else { | ||
323 | if (HYPERVISOR_memory_op(XENMEM_decrease_reservation, | ||
324 | &reservation) != i) | ||
325 | panic("Unable to reduce memory reservation\n"); | ||
326 | } | ||
327 | } else { | ||
328 | wmb(); /* barrier so backend seens requests */ | ||
329 | } | ||
330 | |||
331 | /* Above is a suitable barrier to ensure backend will see requests. */ | ||
332 | np->rx.req_prod_pvt = req_prod + i; | ||
333 | push: | ||
334 | RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); | ||
335 | if (notify) | ||
336 | notify_remote_via_irq(np->netdev->irq); | ||
337 | } | ||
338 | |||
339 | static int xennet_open(struct net_device *dev) | ||
340 | { | ||
341 | struct netfront_info *np = netdev_priv(dev); | ||
342 | |||
343 | memset(&np->stats, 0, sizeof(np->stats)); | ||
344 | |||
345 | spin_lock_bh(&np->rx_lock); | ||
346 | if (netif_carrier_ok(dev)) { | ||
347 | xennet_alloc_rx_buffers(dev); | ||
348 | np->rx.sring->rsp_event = np->rx.rsp_cons + 1; | ||
349 | if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) | ||
350 | netif_rx_schedule(dev); | ||
351 | } | ||
352 | spin_unlock_bh(&np->rx_lock); | ||
353 | |||
354 | xennet_maybe_wake_tx(dev); | ||
355 | |||
356 | return 0; | ||
357 | } | ||
358 | |||
359 | static void xennet_tx_buf_gc(struct net_device *dev) | ||
360 | { | ||
361 | RING_IDX cons, prod; | ||
362 | unsigned short id; | ||
363 | struct netfront_info *np = netdev_priv(dev); | ||
364 | struct sk_buff *skb; | ||
365 | |||
366 | BUG_ON(!netif_carrier_ok(dev)); | ||
367 | |||
368 | do { | ||
369 | prod = np->tx.sring->rsp_prod; | ||
370 | rmb(); /* Ensure we see responses up to 'rp'. */ | ||
371 | |||
372 | for (cons = np->tx.rsp_cons; cons != prod; cons++) { | ||
373 | struct xen_netif_tx_response *txrsp; | ||
374 | |||
375 | txrsp = RING_GET_RESPONSE(&np->tx, cons); | ||
376 | if (txrsp->status == NETIF_RSP_NULL) | ||
377 | continue; | ||
378 | |||
379 | id = txrsp->id; | ||
380 | skb = np->tx_skbs[id].skb; | ||
381 | if (unlikely(gnttab_query_foreign_access( | ||
382 | np->grant_tx_ref[id]) != 0)) { | ||
383 | printk(KERN_ALERT "xennet_tx_buf_gc: warning " | ||
384 | "-- grant still in use by backend " | ||
385 | "domain.\n"); | ||
386 | BUG(); | ||
387 | } | ||
388 | gnttab_end_foreign_access_ref( | ||
389 | np->grant_tx_ref[id], GNTMAP_readonly); | ||
390 | gnttab_release_grant_reference( | ||
391 | &np->gref_tx_head, np->grant_tx_ref[id]); | ||
392 | np->grant_tx_ref[id] = GRANT_INVALID_REF; | ||
393 | add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id); | ||
394 | dev_kfree_skb_irq(skb); | ||
395 | } | ||
396 | |||
397 | np->tx.rsp_cons = prod; | ||
398 | |||
399 | /* | ||
400 | * Set a new event, then check for race with update of tx_cons. | ||
401 | * Note that it is essential to schedule a callback, no matter | ||
402 | * how few buffers are pending. Even if there is space in the | ||
403 | * transmit ring, higher layers may be blocked because too much | ||
404 | * data is outstanding: in such cases notification from Xen is | ||
405 | * likely to be the only kick that we'll get. | ||
406 | */ | ||
407 | np->tx.sring->rsp_event = | ||
408 | prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; | ||
409 | mb(); /* update shared area */ | ||
410 | } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); | ||
411 | |||
412 | xennet_maybe_wake_tx(dev); | ||
413 | } | ||
414 | |||
415 | static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, | ||
416 | struct xen_netif_tx_request *tx) | ||
417 | { | ||
418 | struct netfront_info *np = netdev_priv(dev); | ||
419 | char *data = skb->data; | ||
420 | unsigned long mfn; | ||
421 | RING_IDX prod = np->tx.req_prod_pvt; | ||
422 | int frags = skb_shinfo(skb)->nr_frags; | ||
423 | unsigned int offset = offset_in_page(data); | ||
424 | unsigned int len = skb_headlen(skb); | ||
425 | unsigned int id; | ||
426 | grant_ref_t ref; | ||
427 | int i; | ||
428 | |||
429 | /* While the header overlaps a page boundary (including being | ||
430 | larger than a page), split it it into page-sized chunks. */ | ||
431 | while (len > PAGE_SIZE - offset) { | ||
432 | tx->size = PAGE_SIZE - offset; | ||
433 | tx->flags |= NETTXF_more_data; | ||
434 | len -= tx->size; | ||
435 | data += tx->size; | ||
436 | offset = 0; | ||
437 | |||
438 | id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); | ||
439 | np->tx_skbs[id].skb = skb_get(skb); | ||
440 | tx = RING_GET_REQUEST(&np->tx, prod++); | ||
441 | tx->id = id; | ||
442 | ref = gnttab_claim_grant_reference(&np->gref_tx_head); | ||
443 | BUG_ON((signed short)ref < 0); | ||
444 | |||
445 | mfn = virt_to_mfn(data); | ||
446 | gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, | ||
447 | mfn, GNTMAP_readonly); | ||
448 | |||
449 | tx->gref = np->grant_tx_ref[id] = ref; | ||
450 | tx->offset = offset; | ||
451 | tx->size = len; | ||
452 | tx->flags = 0; | ||
453 | } | ||
454 | |||
455 | /* Grant backend access to each skb fragment page. */ | ||
456 | for (i = 0; i < frags; i++) { | ||
457 | skb_frag_t *frag = skb_shinfo(skb)->frags + i; | ||
458 | |||
459 | tx->flags |= NETTXF_more_data; | ||
460 | |||
461 | id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); | ||
462 | np->tx_skbs[id].skb = skb_get(skb); | ||
463 | tx = RING_GET_REQUEST(&np->tx, prod++); | ||
464 | tx->id = id; | ||
465 | ref = gnttab_claim_grant_reference(&np->gref_tx_head); | ||
466 | BUG_ON((signed short)ref < 0); | ||
467 | |||
468 | mfn = pfn_to_mfn(page_to_pfn(frag->page)); | ||
469 | gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, | ||
470 | mfn, GNTMAP_readonly); | ||
471 | |||
472 | tx->gref = np->grant_tx_ref[id] = ref; | ||
473 | tx->offset = frag->page_offset; | ||
474 | tx->size = frag->size; | ||
475 | tx->flags = 0; | ||
476 | } | ||
477 | |||
478 | np->tx.req_prod_pvt = prod; | ||
479 | } | ||
480 | |||
481 | static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
482 | { | ||
483 | unsigned short id; | ||
484 | struct netfront_info *np = netdev_priv(dev); | ||
485 | struct xen_netif_tx_request *tx; | ||
486 | struct xen_netif_extra_info *extra; | ||
487 | char *data = skb->data; | ||
488 | RING_IDX i; | ||
489 | grant_ref_t ref; | ||
490 | unsigned long mfn; | ||
491 | int notify; | ||
492 | int frags = skb_shinfo(skb)->nr_frags; | ||
493 | unsigned int offset = offset_in_page(data); | ||
494 | unsigned int len = skb_headlen(skb); | ||
495 | |||
496 | frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE; | ||
497 | if (unlikely(frags > MAX_SKB_FRAGS + 1)) { | ||
498 | printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", | ||
499 | frags); | ||
500 | dump_stack(); | ||
501 | goto drop; | ||
502 | } | ||
503 | |||
504 | spin_lock_irq(&np->tx_lock); | ||
505 | |||
506 | if (unlikely(!netif_carrier_ok(dev) || | ||
507 | (frags > 1 && !xennet_can_sg(dev)) || | ||
508 | netif_needs_gso(dev, skb))) { | ||
509 | spin_unlock_irq(&np->tx_lock); | ||
510 | goto drop; | ||
511 | } | ||
512 | |||
513 | i = np->tx.req_prod_pvt; | ||
514 | |||
515 | id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); | ||
516 | np->tx_skbs[id].skb = skb; | ||
517 | |||
518 | tx = RING_GET_REQUEST(&np->tx, i); | ||
519 | |||
520 | tx->id = id; | ||
521 | ref = gnttab_claim_grant_reference(&np->gref_tx_head); | ||
522 | BUG_ON((signed short)ref < 0); | ||
523 | mfn = virt_to_mfn(data); | ||
524 | gnttab_grant_foreign_access_ref( | ||
525 | ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); | ||
526 | tx->gref = np->grant_tx_ref[id] = ref; | ||
527 | tx->offset = offset; | ||
528 | tx->size = len; | ||
529 | extra = NULL; | ||
530 | |||
531 | tx->flags = 0; | ||
532 | if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
533 | /* local packet? */ | ||
534 | tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; | ||
535 | else if (skb->ip_summed == CHECKSUM_UNNECESSARY) | ||
536 | /* remote but checksummed. */ | ||
537 | tx->flags |= NETTXF_data_validated; | ||
538 | |||
539 | if (skb_shinfo(skb)->gso_size) { | ||
540 | struct xen_netif_extra_info *gso; | ||
541 | |||
542 | gso = (struct xen_netif_extra_info *) | ||
543 | RING_GET_REQUEST(&np->tx, ++i); | ||
544 | |||
545 | if (extra) | ||
546 | extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; | ||
547 | else | ||
548 | tx->flags |= NETTXF_extra_info; | ||
549 | |||
550 | gso->u.gso.size = skb_shinfo(skb)->gso_size; | ||
551 | gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; | ||
552 | gso->u.gso.pad = 0; | ||
553 | gso->u.gso.features = 0; | ||
554 | |||
555 | gso->type = XEN_NETIF_EXTRA_TYPE_GSO; | ||
556 | gso->flags = 0; | ||
557 | extra = gso; | ||
558 | } | ||
559 | |||
560 | np->tx.req_prod_pvt = i + 1; | ||
561 | |||
562 | xennet_make_frags(skb, dev, tx); | ||
563 | tx->size = skb->len; | ||
564 | |||
565 | RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); | ||
566 | if (notify) | ||
567 | notify_remote_via_irq(np->netdev->irq); | ||
568 | |||
569 | xennet_tx_buf_gc(dev); | ||
570 | |||
571 | if (!netfront_tx_slot_available(np)) | ||
572 | netif_stop_queue(dev); | ||
573 | |||
574 | spin_unlock_irq(&np->tx_lock); | ||
575 | |||
576 | np->stats.tx_bytes += skb->len; | ||
577 | np->stats.tx_packets++; | ||
578 | |||
579 | return 0; | ||
580 | |||
581 | drop: | ||
582 | np->stats.tx_dropped++; | ||
583 | dev_kfree_skb(skb); | ||
584 | return 0; | ||
585 | } | ||
586 | |||
587 | static int xennet_close(struct net_device *dev) | ||
588 | { | ||
589 | struct netfront_info *np = netdev_priv(dev); | ||
590 | netif_stop_queue(np->netdev); | ||
591 | return 0; | ||
592 | } | ||
593 | |||
594 | static struct net_device_stats *xennet_get_stats(struct net_device *dev) | ||
595 | { | ||
596 | struct netfront_info *np = netdev_priv(dev); | ||
597 | return &np->stats; | ||
598 | } | ||
599 | |||
600 | static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, | ||
601 | grant_ref_t ref) | ||
602 | { | ||
603 | int new = xennet_rxidx(np->rx.req_prod_pvt); | ||
604 | |||
605 | BUG_ON(np->rx_skbs[new]); | ||
606 | np->rx_skbs[new] = skb; | ||
607 | np->grant_rx_ref[new] = ref; | ||
608 | RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; | ||
609 | RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; | ||
610 | np->rx.req_prod_pvt++; | ||
611 | } | ||
612 | |||
613 | static int xennet_get_extras(struct netfront_info *np, | ||
614 | struct xen_netif_extra_info *extras, | ||
615 | RING_IDX rp) | ||
616 | |||
617 | { | ||
618 | struct xen_netif_extra_info *extra; | ||
619 | struct device *dev = &np->netdev->dev; | ||
620 | RING_IDX cons = np->rx.rsp_cons; | ||
621 | int err = 0; | ||
622 | |||
623 | do { | ||
624 | struct sk_buff *skb; | ||
625 | grant_ref_t ref; | ||
626 | |||
627 | if (unlikely(cons + 1 == rp)) { | ||
628 | if (net_ratelimit()) | ||
629 | dev_warn(dev, "Missing extra info\n"); | ||
630 | err = -EBADR; | ||
631 | break; | ||
632 | } | ||
633 | |||
634 | extra = (struct xen_netif_extra_info *) | ||
635 | RING_GET_RESPONSE(&np->rx, ++cons); | ||
636 | |||
637 | if (unlikely(!extra->type || | ||
638 | extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { | ||
639 | if (net_ratelimit()) | ||
640 | dev_warn(dev, "Invalid extra type: %d\n", | ||
641 | extra->type); | ||
642 | err = -EINVAL; | ||
643 | } else { | ||
644 | memcpy(&extras[extra->type - 1], extra, | ||
645 | sizeof(*extra)); | ||
646 | } | ||
647 | |||
648 | skb = xennet_get_rx_skb(np, cons); | ||
649 | ref = xennet_get_rx_ref(np, cons); | ||
650 | xennet_move_rx_slot(np, skb, ref); | ||
651 | } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); | ||
652 | |||
653 | np->rx.rsp_cons = cons; | ||
654 | return err; | ||
655 | } | ||
656 | |||
657 | static int xennet_get_responses(struct netfront_info *np, | ||
658 | struct netfront_rx_info *rinfo, RING_IDX rp, | ||
659 | struct sk_buff_head *list) | ||
660 | { | ||
661 | struct xen_netif_rx_response *rx = &rinfo->rx; | ||
662 | struct xen_netif_extra_info *extras = rinfo->extras; | ||
663 | struct device *dev = &np->netdev->dev; | ||
664 | RING_IDX cons = np->rx.rsp_cons; | ||
665 | struct sk_buff *skb = xennet_get_rx_skb(np, cons); | ||
666 | grant_ref_t ref = xennet_get_rx_ref(np, cons); | ||
667 | int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); | ||
668 | int frags = 1; | ||
669 | int err = 0; | ||
670 | unsigned long ret; | ||
671 | |||
672 | if (rx->flags & NETRXF_extra_info) { | ||
673 | err = xennet_get_extras(np, extras, rp); | ||
674 | cons = np->rx.rsp_cons; | ||
675 | } | ||
676 | |||
677 | for (;;) { | ||
678 | if (unlikely(rx->status < 0 || | ||
679 | rx->offset + rx->status > PAGE_SIZE)) { | ||
680 | if (net_ratelimit()) | ||
681 | dev_warn(dev, "rx->offset: %x, size: %u\n", | ||
682 | rx->offset, rx->status); | ||
683 | xennet_move_rx_slot(np, skb, ref); | ||
684 | err = -EINVAL; | ||
685 | goto next; | ||
686 | } | ||
687 | |||
688 | /* | ||
689 | * This definitely indicates a bug, either in this driver or in | ||
690 | * the backend driver. In future this should flag the bad | ||
691 | * situation to the system controller to reboot the backed. | ||
692 | */ | ||
693 | if (ref == GRANT_INVALID_REF) { | ||
694 | if (net_ratelimit()) | ||
695 | dev_warn(dev, "Bad rx response id %d.\n", | ||
696 | rx->id); | ||
697 | err = -EINVAL; | ||
698 | goto next; | ||
699 | } | ||
700 | |||
701 | ret = gnttab_end_foreign_access_ref(ref, 0); | ||
702 | BUG_ON(!ret); | ||
703 | |||
704 | gnttab_release_grant_reference(&np->gref_rx_head, ref); | ||
705 | |||
706 | __skb_queue_tail(list, skb); | ||
707 | |||
708 | next: | ||
709 | if (!(rx->flags & NETRXF_more_data)) | ||
710 | break; | ||
711 | |||
712 | if (cons + frags == rp) { | ||
713 | if (net_ratelimit()) | ||
714 | dev_warn(dev, "Need more frags\n"); | ||
715 | err = -ENOENT; | ||
716 | break; | ||
717 | } | ||
718 | |||
719 | rx = RING_GET_RESPONSE(&np->rx, cons + frags); | ||
720 | skb = xennet_get_rx_skb(np, cons + frags); | ||
721 | ref = xennet_get_rx_ref(np, cons + frags); | ||
722 | frags++; | ||
723 | } | ||
724 | |||
725 | if (unlikely(frags > max)) { | ||
726 | if (net_ratelimit()) | ||
727 | dev_warn(dev, "Too many frags\n"); | ||
728 | err = -E2BIG; | ||
729 | } | ||
730 | |||
731 | if (unlikely(err)) | ||
732 | np->rx.rsp_cons = cons + frags; | ||
733 | |||
734 | return err; | ||
735 | } | ||
736 | |||
737 | static int xennet_set_skb_gso(struct sk_buff *skb, | ||
738 | struct xen_netif_extra_info *gso) | ||
739 | { | ||
740 | if (!gso->u.gso.size) { | ||
741 | if (net_ratelimit()) | ||
742 | printk(KERN_WARNING "GSO size must not be zero.\n"); | ||
743 | return -EINVAL; | ||
744 | } | ||
745 | |||
746 | /* Currently only TCPv4 S.O. is supported. */ | ||
747 | if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { | ||
748 | if (net_ratelimit()) | ||
749 | printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type); | ||
750 | return -EINVAL; | ||
751 | } | ||
752 | |||
753 | skb_shinfo(skb)->gso_size = gso->u.gso.size; | ||
754 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; | ||
755 | |||
756 | /* Header must be checked, and gso_segs computed. */ | ||
757 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; | ||
758 | skb_shinfo(skb)->gso_segs = 0; | ||
759 | |||
760 | return 0; | ||
761 | } | ||
762 | |||
763 | static RING_IDX xennet_fill_frags(struct netfront_info *np, | ||
764 | struct sk_buff *skb, | ||
765 | struct sk_buff_head *list) | ||
766 | { | ||
767 | struct skb_shared_info *shinfo = skb_shinfo(skb); | ||
768 | int nr_frags = shinfo->nr_frags; | ||
769 | RING_IDX cons = np->rx.rsp_cons; | ||
770 | skb_frag_t *frag = shinfo->frags + nr_frags; | ||
771 | struct sk_buff *nskb; | ||
772 | |||
773 | while ((nskb = __skb_dequeue(list))) { | ||
774 | struct xen_netif_rx_response *rx = | ||
775 | RING_GET_RESPONSE(&np->rx, ++cons); | ||
776 | |||
777 | frag->page = skb_shinfo(nskb)->frags[0].page; | ||
778 | frag->page_offset = rx->offset; | ||
779 | frag->size = rx->status; | ||
780 | |||
781 | skb->data_len += rx->status; | ||
782 | |||
783 | skb_shinfo(nskb)->nr_frags = 0; | ||
784 | kfree_skb(nskb); | ||
785 | |||
786 | frag++; | ||
787 | nr_frags++; | ||
788 | } | ||
789 | |||
790 | shinfo->nr_frags = nr_frags; | ||
791 | return cons; | ||
792 | } | ||
793 | |||
794 | static int skb_checksum_setup(struct sk_buff *skb) | ||
795 | { | ||
796 | struct iphdr *iph; | ||
797 | unsigned char *th; | ||
798 | int err = -EPROTO; | ||
799 | |||
800 | if (skb->protocol != htons(ETH_P_IP)) | ||
801 | goto out; | ||
802 | |||
803 | iph = (void *)skb->data; | ||
804 | th = skb->data + 4 * iph->ihl; | ||
805 | if (th >= skb_tail_pointer(skb)) | ||
806 | goto out; | ||
807 | |||
808 | skb->csum_start = th - skb->head; | ||
809 | switch (iph->protocol) { | ||
810 | case IPPROTO_TCP: | ||
811 | skb->csum_offset = offsetof(struct tcphdr, check); | ||
812 | break; | ||
813 | case IPPROTO_UDP: | ||
814 | skb->csum_offset = offsetof(struct udphdr, check); | ||
815 | break; | ||
816 | default: | ||
817 | if (net_ratelimit()) | ||
818 | printk(KERN_ERR "Attempting to checksum a non-" | ||
819 | "TCP/UDP packet, dropping a protocol" | ||
820 | " %d packet", iph->protocol); | ||
821 | goto out; | ||
822 | } | ||
823 | |||
824 | if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb)) | ||
825 | goto out; | ||
826 | |||
827 | err = 0; | ||
828 | |||
829 | out: | ||
830 | return err; | ||
831 | } | ||
832 | |||
833 | static int handle_incoming_queue(struct net_device *dev, | ||
834 | struct sk_buff_head *rxq) | ||
835 | { | ||
836 | struct netfront_info *np = netdev_priv(dev); | ||
837 | int packets_dropped = 0; | ||
838 | struct sk_buff *skb; | ||
839 | |||
840 | while ((skb = __skb_dequeue(rxq)) != NULL) { | ||
841 | struct page *page = NETFRONT_SKB_CB(skb)->page; | ||
842 | void *vaddr = page_address(page); | ||
843 | unsigned offset = NETFRONT_SKB_CB(skb)->offset; | ||
844 | |||
845 | memcpy(skb->data, vaddr + offset, | ||
846 | skb_headlen(skb)); | ||
847 | |||
848 | if (page != skb_shinfo(skb)->frags[0].page) | ||
849 | __free_page(page); | ||
850 | |||
851 | /* Ethernet work: Delayed to here as it peeks the header. */ | ||
852 | skb->protocol = eth_type_trans(skb, dev); | ||
853 | |||
854 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
855 | if (skb_checksum_setup(skb)) { | ||
856 | kfree_skb(skb); | ||
857 | packets_dropped++; | ||
858 | np->stats.rx_errors++; | ||
859 | continue; | ||
860 | } | ||
861 | } | ||
862 | |||
863 | np->stats.rx_packets++; | ||
864 | np->stats.rx_bytes += skb->len; | ||
865 | |||
866 | /* Pass it up. */ | ||
867 | netif_receive_skb(skb); | ||
868 | dev->last_rx = jiffies; | ||
869 | } | ||
870 | |||
871 | return packets_dropped; | ||
872 | } | ||
873 | |||
874 | static int xennet_poll(struct net_device *dev, int *pbudget) | ||
875 | { | ||
876 | struct netfront_info *np = netdev_priv(dev); | ||
877 | struct sk_buff *skb; | ||
878 | struct netfront_rx_info rinfo; | ||
879 | struct xen_netif_rx_response *rx = &rinfo.rx; | ||
880 | struct xen_netif_extra_info *extras = rinfo.extras; | ||
881 | RING_IDX i, rp; | ||
882 | int work_done, budget, more_to_do = 1; | ||
883 | struct sk_buff_head rxq; | ||
884 | struct sk_buff_head errq; | ||
885 | struct sk_buff_head tmpq; | ||
886 | unsigned long flags; | ||
887 | unsigned int len; | ||
888 | int err; | ||
889 | |||
890 | spin_lock(&np->rx_lock); | ||
891 | |||
892 | if (unlikely(!netif_carrier_ok(dev))) { | ||
893 | spin_unlock(&np->rx_lock); | ||
894 | return 0; | ||
895 | } | ||
896 | |||
897 | skb_queue_head_init(&rxq); | ||
898 | skb_queue_head_init(&errq); | ||
899 | skb_queue_head_init(&tmpq); | ||
900 | |||
901 | budget = *pbudget; | ||
902 | if (budget > dev->quota) | ||
903 | budget = dev->quota; | ||
904 | rp = np->rx.sring->rsp_prod; | ||
905 | rmb(); /* Ensure we see queued responses up to 'rp'. */ | ||
906 | |||
907 | i = np->rx.rsp_cons; | ||
908 | work_done = 0; | ||
909 | while ((i != rp) && (work_done < budget)) { | ||
910 | memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); | ||
911 | memset(extras, 0, sizeof(rinfo.extras)); | ||
912 | |||
913 | err = xennet_get_responses(np, &rinfo, rp, &tmpq); | ||
914 | |||
915 | if (unlikely(err)) { | ||
916 | err: | ||
917 | while ((skb = __skb_dequeue(&tmpq))) | ||
918 | __skb_queue_tail(&errq, skb); | ||
919 | np->stats.rx_errors++; | ||
920 | i = np->rx.rsp_cons; | ||
921 | continue; | ||
922 | } | ||
923 | |||
924 | skb = __skb_dequeue(&tmpq); | ||
925 | |||
926 | if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { | ||
927 | struct xen_netif_extra_info *gso; | ||
928 | gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; | ||
929 | |||
930 | if (unlikely(xennet_set_skb_gso(skb, gso))) { | ||
931 | __skb_queue_head(&tmpq, skb); | ||
932 | np->rx.rsp_cons += skb_queue_len(&tmpq); | ||
933 | goto err; | ||
934 | } | ||
935 | } | ||
936 | |||
937 | NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page; | ||
938 | NETFRONT_SKB_CB(skb)->offset = rx->offset; | ||
939 | |||
940 | len = rx->status; | ||
941 | if (len > RX_COPY_THRESHOLD) | ||
942 | len = RX_COPY_THRESHOLD; | ||
943 | skb_put(skb, len); | ||
944 | |||
945 | if (rx->status > len) { | ||
946 | skb_shinfo(skb)->frags[0].page_offset = | ||
947 | rx->offset + len; | ||
948 | skb_shinfo(skb)->frags[0].size = rx->status - len; | ||
949 | skb->data_len = rx->status - len; | ||
950 | } else { | ||
951 | skb_shinfo(skb)->frags[0].page = NULL; | ||
952 | skb_shinfo(skb)->nr_frags = 0; | ||
953 | } | ||
954 | |||
955 | i = xennet_fill_frags(np, skb, &tmpq); | ||
956 | |||
957 | /* | ||
958 | * Truesize approximates the size of true data plus | ||
959 | * any supervisor overheads. Adding hypervisor | ||
960 | * overheads has been shown to significantly reduce | ||
961 | * achievable bandwidth with the default receive | ||
962 | * buffer size. It is therefore not wise to account | ||
963 | * for it here. | ||
964 | * | ||
965 | * After alloc_skb(RX_COPY_THRESHOLD), truesize is set | ||
966 | * to RX_COPY_THRESHOLD + the supervisor | ||
967 | * overheads. Here, we add the size of the data pulled | ||
968 | * in xennet_fill_frags(). | ||
969 | * | ||
970 | * We also adjust for any unused space in the main | ||
971 | * data area by subtracting (RX_COPY_THRESHOLD - | ||
972 | * len). This is especially important with drivers | ||
973 | * which split incoming packets into header and data, | ||
974 | * using only 66 bytes of the main data area (see the | ||
975 | * e1000 driver for example.) On such systems, | ||
976 | * without this last adjustement, our achievable | ||
977 | * receive throughout using the standard receive | ||
978 | * buffer size was cut by 25%(!!!). | ||
979 | */ | ||
980 | skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); | ||
981 | skb->len += skb->data_len; | ||
982 | |||
983 | if (rx->flags & NETRXF_csum_blank) | ||
984 | skb->ip_summed = CHECKSUM_PARTIAL; | ||
985 | else if (rx->flags & NETRXF_data_validated) | ||
986 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
987 | |||
988 | __skb_queue_tail(&rxq, skb); | ||
989 | |||
990 | np->rx.rsp_cons = ++i; | ||
991 | work_done++; | ||
992 | } | ||
993 | |||
994 | while ((skb = __skb_dequeue(&errq))) | ||
995 | kfree_skb(skb); | ||
996 | |||
997 | work_done -= handle_incoming_queue(dev, &rxq); | ||
998 | |||
999 | /* If we get a callback with very few responses, reduce fill target. */ | ||
1000 | /* NB. Note exponential increase, linear decrease. */ | ||
1001 | if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > | ||
1002 | ((3*np->rx_target) / 4)) && | ||
1003 | (--np->rx_target < np->rx_min_target)) | ||
1004 | np->rx_target = np->rx_min_target; | ||
1005 | |||
1006 | xennet_alloc_rx_buffers(dev); | ||
1007 | |||
1008 | *pbudget -= work_done; | ||
1009 | dev->quota -= work_done; | ||
1010 | |||
1011 | if (work_done < budget) { | ||
1012 | local_irq_save(flags); | ||
1013 | |||
1014 | RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); | ||
1015 | if (!more_to_do) | ||
1016 | __netif_rx_complete(dev); | ||
1017 | |||
1018 | local_irq_restore(flags); | ||
1019 | } | ||
1020 | |||
1021 | spin_unlock(&np->rx_lock); | ||
1022 | |||
1023 | return more_to_do; | ||
1024 | } | ||
1025 | |||
1026 | static int xennet_change_mtu(struct net_device *dev, int mtu) | ||
1027 | { | ||
1028 | int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN; | ||
1029 | |||
1030 | if (mtu > max) | ||
1031 | return -EINVAL; | ||
1032 | dev->mtu = mtu; | ||
1033 | return 0; | ||
1034 | } | ||
1035 | |||
1036 | static void xennet_release_tx_bufs(struct netfront_info *np) | ||
1037 | { | ||
1038 | struct sk_buff *skb; | ||
1039 | int i; | ||
1040 | |||
1041 | for (i = 0; i < NET_TX_RING_SIZE; i++) { | ||
1042 | /* Skip over entries which are actually freelist references */ | ||
1043 | if ((unsigned long)np->tx_skbs[i].skb < PAGE_OFFSET) | ||
1044 | continue; | ||
1045 | |||
1046 | skb = np->tx_skbs[i].skb; | ||
1047 | gnttab_end_foreign_access_ref(np->grant_tx_ref[i], | ||
1048 | GNTMAP_readonly); | ||
1049 | gnttab_release_grant_reference(&np->gref_tx_head, | ||
1050 | np->grant_tx_ref[i]); | ||
1051 | np->grant_tx_ref[i] = GRANT_INVALID_REF; | ||
1052 | add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i); | ||
1053 | dev_kfree_skb_irq(skb); | ||
1054 | } | ||
1055 | } | ||
1056 | |||
1057 | static void xennet_release_rx_bufs(struct netfront_info *np) | ||
1058 | { | ||
1059 | struct mmu_update *mmu = np->rx_mmu; | ||
1060 | struct multicall_entry *mcl = np->rx_mcl; | ||
1061 | struct sk_buff_head free_list; | ||
1062 | struct sk_buff *skb; | ||
1063 | unsigned long mfn; | ||
1064 | int xfer = 0, noxfer = 0, unused = 0; | ||
1065 | int id, ref; | ||
1066 | |||
1067 | dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n", | ||
1068 | __func__); | ||
1069 | return; | ||
1070 | |||
1071 | skb_queue_head_init(&free_list); | ||
1072 | |||
1073 | spin_lock_bh(&np->rx_lock); | ||
1074 | |||
1075 | for (id = 0; id < NET_RX_RING_SIZE; id++) { | ||
1076 | ref = np->grant_rx_ref[id]; | ||
1077 | if (ref == GRANT_INVALID_REF) { | ||
1078 | unused++; | ||
1079 | continue; | ||
1080 | } | ||
1081 | |||
1082 | skb = np->rx_skbs[id]; | ||
1083 | mfn = gnttab_end_foreign_transfer_ref(ref); | ||
1084 | gnttab_release_grant_reference(&np->gref_rx_head, ref); | ||
1085 | np->grant_rx_ref[id] = GRANT_INVALID_REF; | ||
1086 | |||
1087 | if (0 == mfn) { | ||
1088 | skb_shinfo(skb)->nr_frags = 0; | ||
1089 | dev_kfree_skb(skb); | ||
1090 | noxfer++; | ||
1091 | continue; | ||
1092 | } | ||
1093 | |||
1094 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | ||
1095 | /* Remap the page. */ | ||
1096 | struct page *page = skb_shinfo(skb)->frags[0].page; | ||
1097 | unsigned long pfn = page_to_pfn(page); | ||
1098 | void *vaddr = page_address(page); | ||
1099 | |||
1100 | MULTI_update_va_mapping(mcl, (unsigned long)vaddr, | ||
1101 | mfn_pte(mfn, PAGE_KERNEL), | ||
1102 | 0); | ||
1103 | mcl++; | ||
1104 | mmu->ptr = ((u64)mfn << PAGE_SHIFT) | ||
1105 | | MMU_MACHPHYS_UPDATE; | ||
1106 | mmu->val = pfn; | ||
1107 | mmu++; | ||
1108 | |||
1109 | set_phys_to_machine(pfn, mfn); | ||
1110 | } | ||
1111 | __skb_queue_tail(&free_list, skb); | ||
1112 | xfer++; | ||
1113 | } | ||
1114 | |||
1115 | dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n", | ||
1116 | __func__, xfer, noxfer, unused); | ||
1117 | |||
1118 | if (xfer) { | ||
1119 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | ||
1120 | /* Do all the remapping work and M2P updates. */ | ||
1121 | MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu, | ||
1122 | 0, DOMID_SELF); | ||
1123 | mcl++; | ||
1124 | HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl); | ||
1125 | } | ||
1126 | } | ||
1127 | |||
1128 | while ((skb = __skb_dequeue(&free_list)) != NULL) | ||
1129 | dev_kfree_skb(skb); | ||
1130 | |||
1131 | spin_unlock_bh(&np->rx_lock); | ||
1132 | } | ||
1133 | |||
1134 | static void xennet_uninit(struct net_device *dev) | ||
1135 | { | ||
1136 | struct netfront_info *np = netdev_priv(dev); | ||
1137 | xennet_release_tx_bufs(np); | ||
1138 | xennet_release_rx_bufs(np); | ||
1139 | gnttab_free_grant_references(np->gref_tx_head); | ||
1140 | gnttab_free_grant_references(np->gref_rx_head); | ||
1141 | } | ||
1142 | |||
1143 | static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev) | ||
1144 | { | ||
1145 | int i, err; | ||
1146 | struct net_device *netdev; | ||
1147 | struct netfront_info *np; | ||
1148 | |||
1149 | netdev = alloc_etherdev(sizeof(struct netfront_info)); | ||
1150 | if (!netdev) { | ||
1151 | printk(KERN_WARNING "%s> alloc_etherdev failed.\n", | ||
1152 | __func__); | ||
1153 | return ERR_PTR(-ENOMEM); | ||
1154 | } | ||
1155 | |||
1156 | np = netdev_priv(netdev); | ||
1157 | np->xbdev = dev; | ||
1158 | |||
1159 | spin_lock_init(&np->tx_lock); | ||
1160 | spin_lock_init(&np->rx_lock); | ||
1161 | |||
1162 | skb_queue_head_init(&np->rx_batch); | ||
1163 | np->rx_target = RX_DFL_MIN_TARGET; | ||
1164 | np->rx_min_target = RX_DFL_MIN_TARGET; | ||
1165 | np->rx_max_target = RX_MAX_TARGET; | ||
1166 | |||
1167 | init_timer(&np->rx_refill_timer); | ||
1168 | np->rx_refill_timer.data = (unsigned long)netdev; | ||
1169 | np->rx_refill_timer.function = rx_refill_timeout; | ||
1170 | |||
1171 | /* Initialise tx_skbs as a free chain containing every entry. */ | ||
1172 | np->tx_skb_freelist = 0; | ||
1173 | for (i = 0; i < NET_TX_RING_SIZE; i++) { | ||
1174 | np->tx_skbs[i].link = i+1; | ||
1175 | np->grant_tx_ref[i] = GRANT_INVALID_REF; | ||
1176 | } | ||
1177 | |||
1178 | /* Clear out rx_skbs */ | ||
1179 | for (i = 0; i < NET_RX_RING_SIZE; i++) { | ||
1180 | np->rx_skbs[i] = NULL; | ||
1181 | np->grant_rx_ref[i] = GRANT_INVALID_REF; | ||
1182 | } | ||
1183 | |||
1184 | /* A grant for every tx ring slot */ | ||
1185 | if (gnttab_alloc_grant_references(TX_MAX_TARGET, | ||
1186 | &np->gref_tx_head) < 0) { | ||
1187 | printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); | ||
1188 | err = -ENOMEM; | ||
1189 | goto exit; | ||
1190 | } | ||
1191 | /* A grant for every rx ring slot */ | ||
1192 | if (gnttab_alloc_grant_references(RX_MAX_TARGET, | ||
1193 | &np->gref_rx_head) < 0) { | ||
1194 | printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); | ||
1195 | err = -ENOMEM; | ||
1196 | goto exit_free_tx; | ||
1197 | } | ||
1198 | |||
1199 | netdev->open = xennet_open; | ||
1200 | netdev->hard_start_xmit = xennet_start_xmit; | ||
1201 | netdev->stop = xennet_close; | ||
1202 | netdev->get_stats = xennet_get_stats; | ||
1203 | netdev->poll = xennet_poll; | ||
1204 | netdev->uninit = xennet_uninit; | ||
1205 | netdev->change_mtu = xennet_change_mtu; | ||
1206 | netdev->weight = 64; | ||
1207 | netdev->features = NETIF_F_IP_CSUM; | ||
1208 | |||
1209 | SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); | ||
1210 | SET_MODULE_OWNER(netdev); | ||
1211 | SET_NETDEV_DEV(netdev, &dev->dev); | ||
1212 | |||
1213 | np->netdev = netdev; | ||
1214 | |||
1215 | netif_carrier_off(netdev); | ||
1216 | |||
1217 | return netdev; | ||
1218 | |||
1219 | exit_free_tx: | ||
1220 | gnttab_free_grant_references(np->gref_tx_head); | ||
1221 | exit: | ||
1222 | free_netdev(netdev); | ||
1223 | return ERR_PTR(err); | ||
1224 | } | ||
1225 | |||
1226 | /** | ||
1227 | * Entry point to this code when a new device is created. Allocate the basic | ||
1228 | * structures and the ring buffers for communication with the backend, and | ||
1229 | * inform the backend of the appropriate details for those. | ||
1230 | */ | ||
1231 | static int __devinit netfront_probe(struct xenbus_device *dev, | ||
1232 | const struct xenbus_device_id *id) | ||
1233 | { | ||
1234 | int err; | ||
1235 | struct net_device *netdev; | ||
1236 | struct netfront_info *info; | ||
1237 | |||
1238 | netdev = xennet_create_dev(dev); | ||
1239 | if (IS_ERR(netdev)) { | ||
1240 | err = PTR_ERR(netdev); | ||
1241 | xenbus_dev_fatal(dev, err, "creating netdev"); | ||
1242 | return err; | ||
1243 | } | ||
1244 | |||
1245 | info = netdev_priv(netdev); | ||
1246 | dev->dev.driver_data = info; | ||
1247 | |||
1248 | err = register_netdev(info->netdev); | ||
1249 | if (err) { | ||
1250 | printk(KERN_WARNING "%s: register_netdev err=%d\n", | ||
1251 | __func__, err); | ||
1252 | goto fail; | ||
1253 | } | ||
1254 | |||
1255 | err = xennet_sysfs_addif(info->netdev); | ||
1256 | if (err) { | ||
1257 | unregister_netdev(info->netdev); | ||
1258 | printk(KERN_WARNING "%s: add sysfs failed err=%d\n", | ||
1259 | __func__, err); | ||
1260 | goto fail; | ||
1261 | } | ||
1262 | |||
1263 | return 0; | ||
1264 | |||
1265 | fail: | ||
1266 | free_netdev(netdev); | ||
1267 | dev->dev.driver_data = NULL; | ||
1268 | return err; | ||
1269 | } | ||
1270 | |||
1271 | static void xennet_end_access(int ref, void *page) | ||
1272 | { | ||
1273 | /* This frees the page as a side-effect */ | ||
1274 | if (ref != GRANT_INVALID_REF) | ||
1275 | gnttab_end_foreign_access(ref, 0, (unsigned long)page); | ||
1276 | } | ||
1277 | |||
1278 | static void xennet_disconnect_backend(struct netfront_info *info) | ||
1279 | { | ||
1280 | /* Stop old i/f to prevent errors whilst we rebuild the state. */ | ||
1281 | spin_lock_bh(&info->rx_lock); | ||
1282 | spin_lock_irq(&info->tx_lock); | ||
1283 | netif_carrier_off(info->netdev); | ||
1284 | spin_unlock_irq(&info->tx_lock); | ||
1285 | spin_unlock_bh(&info->rx_lock); | ||
1286 | |||
1287 | if (info->netdev->irq) | ||
1288 | unbind_from_irqhandler(info->netdev->irq, info->netdev); | ||
1289 | info->evtchn = info->netdev->irq = 0; | ||
1290 | |||
1291 | /* End access and free the pages */ | ||
1292 | xennet_end_access(info->tx_ring_ref, info->tx.sring); | ||
1293 | xennet_end_access(info->rx_ring_ref, info->rx.sring); | ||
1294 | |||
1295 | info->tx_ring_ref = GRANT_INVALID_REF; | ||
1296 | info->rx_ring_ref = GRANT_INVALID_REF; | ||
1297 | info->tx.sring = NULL; | ||
1298 | info->rx.sring = NULL; | ||
1299 | } | ||
1300 | |||
1301 | /** | ||
1302 | * We are reconnecting to the backend, due to a suspend/resume, or a backend | ||
1303 | * driver restart. We tear down our netif structure and recreate it, but | ||
1304 | * leave the device-layer structures intact so that this is transparent to the | ||
1305 | * rest of the kernel. | ||
1306 | */ | ||
1307 | static int netfront_resume(struct xenbus_device *dev) | ||
1308 | { | ||
1309 | struct netfront_info *info = dev->dev.driver_data; | ||
1310 | |||
1311 | dev_dbg(&dev->dev, "%s\n", dev->nodename); | ||
1312 | |||
1313 | xennet_disconnect_backend(info); | ||
1314 | return 0; | ||
1315 | } | ||
1316 | |||
1317 | static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) | ||
1318 | { | ||
1319 | char *s, *e, *macstr; | ||
1320 | int i; | ||
1321 | |||
1322 | macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); | ||
1323 | if (IS_ERR(macstr)) | ||
1324 | return PTR_ERR(macstr); | ||
1325 | |||
1326 | for (i = 0; i < ETH_ALEN; i++) { | ||
1327 | mac[i] = simple_strtoul(s, &e, 16); | ||
1328 | if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { | ||
1329 | kfree(macstr); | ||
1330 | return -ENOENT; | ||
1331 | } | ||
1332 | s = e+1; | ||
1333 | } | ||
1334 | |||
1335 | kfree(macstr); | ||
1336 | return 0; | ||
1337 | } | ||
1338 | |||
1339 | static irqreturn_t xennet_interrupt(int irq, void *dev_id) | ||
1340 | { | ||
1341 | struct net_device *dev = dev_id; | ||
1342 | struct netfront_info *np = netdev_priv(dev); | ||
1343 | unsigned long flags; | ||
1344 | |||
1345 | spin_lock_irqsave(&np->tx_lock, flags); | ||
1346 | |||
1347 | if (likely(netif_carrier_ok(dev))) { | ||
1348 | xennet_tx_buf_gc(dev); | ||
1349 | /* Under tx_lock: protects access to rx shared-ring indexes. */ | ||
1350 | if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) | ||
1351 | netif_rx_schedule(dev); | ||
1352 | } | ||
1353 | |||
1354 | spin_unlock_irqrestore(&np->tx_lock, flags); | ||
1355 | |||
1356 | return IRQ_HANDLED; | ||
1357 | } | ||
1358 | |||
1359 | static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) | ||
1360 | { | ||
1361 | struct xen_netif_tx_sring *txs; | ||
1362 | struct xen_netif_rx_sring *rxs; | ||
1363 | int err; | ||
1364 | struct net_device *netdev = info->netdev; | ||
1365 | |||
1366 | info->tx_ring_ref = GRANT_INVALID_REF; | ||
1367 | info->rx_ring_ref = GRANT_INVALID_REF; | ||
1368 | info->rx.sring = NULL; | ||
1369 | info->tx.sring = NULL; | ||
1370 | netdev->irq = 0; | ||
1371 | |||
1372 | err = xen_net_read_mac(dev, netdev->dev_addr); | ||
1373 | if (err) { | ||
1374 | xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); | ||
1375 | goto fail; | ||
1376 | } | ||
1377 | |||
1378 | txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_KERNEL); | ||
1379 | if (!txs) { | ||
1380 | err = -ENOMEM; | ||
1381 | xenbus_dev_fatal(dev, err, "allocating tx ring page"); | ||
1382 | goto fail; | ||
1383 | } | ||
1384 | SHARED_RING_INIT(txs); | ||
1385 | FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); | ||
1386 | |||
1387 | err = xenbus_grant_ring(dev, virt_to_mfn(txs)); | ||
1388 | if (err < 0) { | ||
1389 | free_page((unsigned long)txs); | ||
1390 | goto fail; | ||
1391 | } | ||
1392 | |||
1393 | info->tx_ring_ref = err; | ||
1394 | rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_KERNEL); | ||
1395 | if (!rxs) { | ||
1396 | err = -ENOMEM; | ||
1397 | xenbus_dev_fatal(dev, err, "allocating rx ring page"); | ||
1398 | goto fail; | ||
1399 | } | ||
1400 | SHARED_RING_INIT(rxs); | ||
1401 | FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); | ||
1402 | |||
1403 | err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); | ||
1404 | if (err < 0) { | ||
1405 | free_page((unsigned long)rxs); | ||
1406 | goto fail; | ||
1407 | } | ||
1408 | info->rx_ring_ref = err; | ||
1409 | |||
1410 | err = xenbus_alloc_evtchn(dev, &info->evtchn); | ||
1411 | if (err) | ||
1412 | goto fail; | ||
1413 | |||
1414 | err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt, | ||
1415 | IRQF_SAMPLE_RANDOM, netdev->name, | ||
1416 | netdev); | ||
1417 | if (err < 0) | ||
1418 | goto fail; | ||
1419 | netdev->irq = err; | ||
1420 | return 0; | ||
1421 | |||
1422 | fail: | ||
1423 | return err; | ||
1424 | } | ||
1425 | |||
1426 | /* Common code used when first setting up, and when resuming. */ | ||
1427 | static int talk_to_backend(struct xenbus_device *dev, | ||
1428 | struct netfront_info *info) | ||
1429 | { | ||
1430 | const char *message; | ||
1431 | struct xenbus_transaction xbt; | ||
1432 | int err; | ||
1433 | |||
1434 | /* Create shared ring, alloc event channel. */ | ||
1435 | err = setup_netfront(dev, info); | ||
1436 | if (err) | ||
1437 | goto out; | ||
1438 | |||
1439 | again: | ||
1440 | err = xenbus_transaction_start(&xbt); | ||
1441 | if (err) { | ||
1442 | xenbus_dev_fatal(dev, err, "starting transaction"); | ||
1443 | goto destroy_ring; | ||
1444 | } | ||
1445 | |||
1446 | err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u", | ||
1447 | info->tx_ring_ref); | ||
1448 | if (err) { | ||
1449 | message = "writing tx ring-ref"; | ||
1450 | goto abort_transaction; | ||
1451 | } | ||
1452 | err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u", | ||
1453 | info->rx_ring_ref); | ||
1454 | if (err) { | ||
1455 | message = "writing rx ring-ref"; | ||
1456 | goto abort_transaction; | ||
1457 | } | ||
1458 | err = xenbus_printf(xbt, dev->nodename, | ||
1459 | "event-channel", "%u", info->evtchn); | ||
1460 | if (err) { | ||
1461 | message = "writing event-channel"; | ||
1462 | goto abort_transaction; | ||
1463 | } | ||
1464 | |||
1465 | err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", | ||
1466 | 1); | ||
1467 | if (err) { | ||
1468 | message = "writing request-rx-copy"; | ||
1469 | goto abort_transaction; | ||
1470 | } | ||
1471 | |||
1472 | err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); | ||
1473 | if (err) { | ||
1474 | message = "writing feature-rx-notify"; | ||
1475 | goto abort_transaction; | ||
1476 | } | ||
1477 | |||
1478 | err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); | ||
1479 | if (err) { | ||
1480 | message = "writing feature-sg"; | ||
1481 | goto abort_transaction; | ||
1482 | } | ||
1483 | |||
1484 | err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1); | ||
1485 | if (err) { | ||
1486 | message = "writing feature-gso-tcpv4"; | ||
1487 | goto abort_transaction; | ||
1488 | } | ||
1489 | |||
1490 | err = xenbus_transaction_end(xbt, 0); | ||
1491 | if (err) { | ||
1492 | if (err == -EAGAIN) | ||
1493 | goto again; | ||
1494 | xenbus_dev_fatal(dev, err, "completing transaction"); | ||
1495 | goto destroy_ring; | ||
1496 | } | ||
1497 | |||
1498 | return 0; | ||
1499 | |||
1500 | abort_transaction: | ||
1501 | xenbus_transaction_end(xbt, 1); | ||
1502 | xenbus_dev_fatal(dev, err, "%s", message); | ||
1503 | destroy_ring: | ||
1504 | xennet_disconnect_backend(info); | ||
1505 | out: | ||
1506 | return err; | ||
1507 | } | ||
1508 | |||
1509 | static int xennet_set_sg(struct net_device *dev, u32 data) | ||
1510 | { | ||
1511 | if (data) { | ||
1512 | struct netfront_info *np = netdev_priv(dev); | ||
1513 | int val; | ||
1514 | |||
1515 | if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", | ||
1516 | "%d", &val) < 0) | ||
1517 | val = 0; | ||
1518 | if (!val) | ||
1519 | return -ENOSYS; | ||
1520 | } else if (dev->mtu > ETH_DATA_LEN) | ||
1521 | dev->mtu = ETH_DATA_LEN; | ||
1522 | |||
1523 | return ethtool_op_set_sg(dev, data); | ||
1524 | } | ||
1525 | |||
1526 | static int xennet_set_tso(struct net_device *dev, u32 data) | ||
1527 | { | ||
1528 | if (data) { | ||
1529 | struct netfront_info *np = netdev_priv(dev); | ||
1530 | int val; | ||
1531 | |||
1532 | if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, | ||
1533 | "feature-gso-tcpv4", "%d", &val) < 0) | ||
1534 | val = 0; | ||
1535 | if (!val) | ||
1536 | return -ENOSYS; | ||
1537 | } | ||
1538 | |||
1539 | return ethtool_op_set_tso(dev, data); | ||
1540 | } | ||
1541 | |||
1542 | static void xennet_set_features(struct net_device *dev) | ||
1543 | { | ||
1544 | /* Turn off all GSO bits except ROBUST. */ | ||
1545 | dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1; | ||
1546 | dev->features |= NETIF_F_GSO_ROBUST; | ||
1547 | xennet_set_sg(dev, 0); | ||
1548 | |||
1549 | /* We need checksum offload to enable scatter/gather and TSO. */ | ||
1550 | if (!(dev->features & NETIF_F_IP_CSUM)) | ||
1551 | return; | ||
1552 | |||
1553 | if (!xennet_set_sg(dev, 1)) | ||
1554 | xennet_set_tso(dev, 1); | ||
1555 | } | ||
1556 | |||
1557 | static int xennet_connect(struct net_device *dev) | ||
1558 | { | ||
1559 | struct netfront_info *np = netdev_priv(dev); | ||
1560 | int i, requeue_idx, err; | ||
1561 | struct sk_buff *skb; | ||
1562 | grant_ref_t ref; | ||
1563 | struct xen_netif_rx_request *req; | ||
1564 | unsigned int feature_rx_copy; | ||
1565 | |||
1566 | err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, | ||
1567 | "feature-rx-copy", "%u", &feature_rx_copy); | ||
1568 | if (err != 1) | ||
1569 | feature_rx_copy = 0; | ||
1570 | |||
1571 | if (!feature_rx_copy) { | ||
1572 | dev_info(&dev->dev, | ||
1573 | "backend does not support copying recieve path"); | ||
1574 | return -ENODEV; | ||
1575 | } | ||
1576 | |||
1577 | err = talk_to_backend(np->xbdev, np); | ||
1578 | if (err) | ||
1579 | return err; | ||
1580 | |||
1581 | xennet_set_features(dev); | ||
1582 | |||
1583 | spin_lock_bh(&np->rx_lock); | ||
1584 | spin_lock_irq(&np->tx_lock); | ||
1585 | |||
1586 | /* Step 1: Discard all pending TX packet fragments. */ | ||
1587 | xennet_release_tx_bufs(np); | ||
1588 | |||
1589 | /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ | ||
1590 | for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { | ||
1591 | if (!np->rx_skbs[i]) | ||
1592 | continue; | ||
1593 | |||
1594 | skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); | ||
1595 | ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); | ||
1596 | req = RING_GET_REQUEST(&np->rx, requeue_idx); | ||
1597 | |||
1598 | gnttab_grant_foreign_access_ref( | ||
1599 | ref, np->xbdev->otherend_id, | ||
1600 | pfn_to_mfn(page_to_pfn(skb_shinfo(skb)-> | ||
1601 | frags->page)), | ||
1602 | 0); | ||
1603 | req->gref = ref; | ||
1604 | req->id = requeue_idx; | ||
1605 | |||
1606 | requeue_idx++; | ||
1607 | } | ||
1608 | |||
1609 | np->rx.req_prod_pvt = requeue_idx; | ||
1610 | |||
1611 | /* | ||
1612 | * Step 3: All public and private state should now be sane. Get | ||
1613 | * ready to start sending and receiving packets and give the driver | ||
1614 | * domain a kick because we've probably just requeued some | ||
1615 | * packets. | ||
1616 | */ | ||
1617 | netif_carrier_on(np->netdev); | ||
1618 | notify_remote_via_irq(np->netdev->irq); | ||
1619 | xennet_tx_buf_gc(dev); | ||
1620 | xennet_alloc_rx_buffers(dev); | ||
1621 | |||
1622 | spin_unlock_irq(&np->tx_lock); | ||
1623 | spin_unlock_bh(&np->rx_lock); | ||
1624 | |||
1625 | return 0; | ||
1626 | } | ||
1627 | |||
1628 | /** | ||
1629 | * Callback received when the backend's state changes. | ||
1630 | */ | ||
1631 | static void backend_changed(struct xenbus_device *dev, | ||
1632 | enum xenbus_state backend_state) | ||
1633 | { | ||
1634 | struct netfront_info *np = dev->dev.driver_data; | ||
1635 | struct net_device *netdev = np->netdev; | ||
1636 | |||
1637 | dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); | ||
1638 | |||
1639 | switch (backend_state) { | ||
1640 | case XenbusStateInitialising: | ||
1641 | case XenbusStateInitialised: | ||
1642 | case XenbusStateConnected: | ||
1643 | case XenbusStateUnknown: | ||
1644 | case XenbusStateClosed: | ||
1645 | break; | ||
1646 | |||
1647 | case XenbusStateInitWait: | ||
1648 | if (dev->state != XenbusStateInitialising) | ||
1649 | break; | ||
1650 | if (xennet_connect(netdev) != 0) | ||
1651 | break; | ||
1652 | xenbus_switch_state(dev, XenbusStateConnected); | ||
1653 | break; | ||
1654 | |||
1655 | case XenbusStateClosing: | ||
1656 | xenbus_frontend_closed(dev); | ||
1657 | break; | ||
1658 | } | ||
1659 | } | ||
1660 | |||
1661 | static struct ethtool_ops xennet_ethtool_ops = | ||
1662 | { | ||
1663 | .get_tx_csum = ethtool_op_get_tx_csum, | ||
1664 | .set_tx_csum = ethtool_op_set_tx_csum, | ||
1665 | .get_sg = ethtool_op_get_sg, | ||
1666 | .set_sg = xennet_set_sg, | ||
1667 | .get_tso = ethtool_op_get_tso, | ||
1668 | .set_tso = xennet_set_tso, | ||
1669 | .get_link = ethtool_op_get_link, | ||
1670 | }; | ||
1671 | |||
1672 | #ifdef CONFIG_SYSFS | ||
1673 | static ssize_t show_rxbuf_min(struct device *dev, | ||
1674 | struct device_attribute *attr, char *buf) | ||
1675 | { | ||
1676 | struct net_device *netdev = to_net_dev(dev); | ||
1677 | struct netfront_info *info = netdev_priv(netdev); | ||
1678 | |||
1679 | return sprintf(buf, "%u\n", info->rx_min_target); | ||
1680 | } | ||
1681 | |||
1682 | static ssize_t store_rxbuf_min(struct device *dev, | ||
1683 | struct device_attribute *attr, | ||
1684 | const char *buf, size_t len) | ||
1685 | { | ||
1686 | struct net_device *netdev = to_net_dev(dev); | ||
1687 | struct netfront_info *np = netdev_priv(netdev); | ||
1688 | char *endp; | ||
1689 | unsigned long target; | ||
1690 | |||
1691 | if (!capable(CAP_NET_ADMIN)) | ||
1692 | return -EPERM; | ||
1693 | |||
1694 | target = simple_strtoul(buf, &endp, 0); | ||
1695 | if (endp == buf) | ||
1696 | return -EBADMSG; | ||
1697 | |||
1698 | if (target < RX_MIN_TARGET) | ||
1699 | target = RX_MIN_TARGET; | ||
1700 | if (target > RX_MAX_TARGET) | ||
1701 | target = RX_MAX_TARGET; | ||
1702 | |||
1703 | spin_lock_bh(&np->rx_lock); | ||
1704 | if (target > np->rx_max_target) | ||
1705 | np->rx_max_target = target; | ||
1706 | np->rx_min_target = target; | ||
1707 | if (target > np->rx_target) | ||
1708 | np->rx_target = target; | ||
1709 | |||
1710 | xennet_alloc_rx_buffers(netdev); | ||
1711 | |||
1712 | spin_unlock_bh(&np->rx_lock); | ||
1713 | return len; | ||
1714 | } | ||
1715 | |||
1716 | static ssize_t show_rxbuf_max(struct device *dev, | ||
1717 | struct device_attribute *attr, char *buf) | ||
1718 | { | ||
1719 | struct net_device *netdev = to_net_dev(dev); | ||
1720 | struct netfront_info *info = netdev_priv(netdev); | ||
1721 | |||
1722 | return sprintf(buf, "%u\n", info->rx_max_target); | ||
1723 | } | ||
1724 | |||
1725 | static ssize_t store_rxbuf_max(struct device *dev, | ||
1726 | struct device_attribute *attr, | ||
1727 | const char *buf, size_t len) | ||
1728 | { | ||
1729 | struct net_device *netdev = to_net_dev(dev); | ||
1730 | struct netfront_info *np = netdev_priv(netdev); | ||
1731 | char *endp; | ||
1732 | unsigned long target; | ||
1733 | |||
1734 | if (!capable(CAP_NET_ADMIN)) | ||
1735 | return -EPERM; | ||
1736 | |||
1737 | target = simple_strtoul(buf, &endp, 0); | ||
1738 | if (endp == buf) | ||
1739 | return -EBADMSG; | ||
1740 | |||
1741 | if (target < RX_MIN_TARGET) | ||
1742 | target = RX_MIN_TARGET; | ||
1743 | if (target > RX_MAX_TARGET) | ||
1744 | target = RX_MAX_TARGET; | ||
1745 | |||
1746 | spin_lock_bh(&np->rx_lock); | ||
1747 | if (target < np->rx_min_target) | ||
1748 | np->rx_min_target = target; | ||
1749 | np->rx_max_target = target; | ||
1750 | if (target < np->rx_target) | ||
1751 | np->rx_target = target; | ||
1752 | |||
1753 | xennet_alloc_rx_buffers(netdev); | ||
1754 | |||
1755 | spin_unlock_bh(&np->rx_lock); | ||
1756 | return len; | ||
1757 | } | ||
1758 | |||
1759 | static ssize_t show_rxbuf_cur(struct device *dev, | ||
1760 | struct device_attribute *attr, char *buf) | ||
1761 | { | ||
1762 | struct net_device *netdev = to_net_dev(dev); | ||
1763 | struct netfront_info *info = netdev_priv(netdev); | ||
1764 | |||
1765 | return sprintf(buf, "%u\n", info->rx_target); | ||
1766 | } | ||
1767 | |||
1768 | static struct device_attribute xennet_attrs[] = { | ||
1769 | __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), | ||
1770 | __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), | ||
1771 | __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), | ||
1772 | }; | ||
1773 | |||
1774 | static int xennet_sysfs_addif(struct net_device *netdev) | ||
1775 | { | ||
1776 | int i; | ||
1777 | int err; | ||
1778 | |||
1779 | for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { | ||
1780 | err = device_create_file(&netdev->dev, | ||
1781 | &xennet_attrs[i]); | ||
1782 | if (err) | ||
1783 | goto fail; | ||
1784 | } | ||
1785 | return 0; | ||
1786 | |||
1787 | fail: | ||
1788 | while (--i >= 0) | ||
1789 | device_remove_file(&netdev->dev, &xennet_attrs[i]); | ||
1790 | return err; | ||
1791 | } | ||
1792 | |||
1793 | static void xennet_sysfs_delif(struct net_device *netdev) | ||
1794 | { | ||
1795 | int i; | ||
1796 | |||
1797 | for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) | ||
1798 | device_remove_file(&netdev->dev, &xennet_attrs[i]); | ||
1799 | } | ||
1800 | |||
1801 | #endif /* CONFIG_SYSFS */ | ||
1802 | |||
1803 | static struct xenbus_device_id netfront_ids[] = { | ||
1804 | { "vif" }, | ||
1805 | { "" } | ||
1806 | }; | ||
1807 | |||
1808 | |||
1809 | static int __devexit xennet_remove(struct xenbus_device *dev) | ||
1810 | { | ||
1811 | struct netfront_info *info = dev->dev.driver_data; | ||
1812 | |||
1813 | dev_dbg(&dev->dev, "%s\n", dev->nodename); | ||
1814 | |||
1815 | unregister_netdev(info->netdev); | ||
1816 | |||
1817 | xennet_disconnect_backend(info); | ||
1818 | |||
1819 | del_timer_sync(&info->rx_refill_timer); | ||
1820 | |||
1821 | xennet_sysfs_delif(info->netdev); | ||
1822 | |||
1823 | free_netdev(info->netdev); | ||
1824 | |||
1825 | return 0; | ||
1826 | } | ||
1827 | |||
1828 | static struct xenbus_driver netfront = { | ||
1829 | .name = "vif", | ||
1830 | .owner = THIS_MODULE, | ||
1831 | .ids = netfront_ids, | ||
1832 | .probe = netfront_probe, | ||
1833 | .remove = __devexit_p(xennet_remove), | ||
1834 | .resume = netfront_resume, | ||
1835 | .otherend_changed = backend_changed, | ||
1836 | }; | ||
1837 | |||
1838 | static int __init netif_init(void) | ||
1839 | { | ||
1840 | if (!is_running_on_xen()) | ||
1841 | return -ENODEV; | ||
1842 | |||
1843 | if (is_initial_xendomain()) | ||
1844 | return 0; | ||
1845 | |||
1846 | printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n"); | ||
1847 | |||
1848 | return xenbus_register_frontend(&netfront); | ||
1849 | } | ||
1850 | module_init(netif_init); | ||
1851 | |||
1852 | |||
1853 | static void __exit netif_exit(void) | ||
1854 | { | ||
1855 | if (is_initial_xendomain()) | ||
1856 | return; | ||
1857 | |||
1858 | return xenbus_unregister_driver(&netfront); | ||
1859 | } | ||
1860 | module_exit(netif_exit); | ||
1861 | |||
1862 | MODULE_DESCRIPTION("Xen virtual network device frontend"); | ||
1863 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c index a708c329675e..38cdf9fa36a7 100644 --- a/drivers/parisc/superio.c +++ b/drivers/parisc/superio.c | |||
@@ -73,6 +73,7 @@ | |||
73 | #include <linux/termios.h> | 73 | #include <linux/termios.h> |
74 | #include <linux/tty.h> | 74 | #include <linux/tty.h> |
75 | #include <linux/serial_core.h> | 75 | #include <linux/serial_core.h> |
76 | #include <linux/serial_8250.h> | ||
76 | #include <linux/delay.h> | 77 | #include <linux/delay.h> |
77 | 78 | ||
78 | #include <asm/io.h> | 79 | #include <asm/io.h> |
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c index 03baf1c64a2e..ed112ee16012 100644 --- a/drivers/pnp/pnpbios/core.c +++ b/drivers/pnp/pnpbios/core.c | |||
@@ -147,7 +147,7 @@ static int pnp_dock_event(int dock, struct pnp_docking_station_info *info) | |||
147 | info->location_id, info->serial, info->capabilities); | 147 | info->location_id, info->serial, info->capabilities); |
148 | envp[i] = NULL; | 148 | envp[i] = NULL; |
149 | 149 | ||
150 | value = call_usermodehelper (argv [0], argv, envp, 0); | 150 | value = call_usermodehelper (argv [0], argv, envp, UMH_WAIT_EXEC); |
151 | kfree (buf); | 151 | kfree (buf); |
152 | kfree (envp); | 152 | kfree (envp); |
153 | return 0; | 153 | return 0; |
diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c index a54e4140683a..e821a155b658 100644 --- a/drivers/sbus/char/bbc_envctrl.c +++ b/drivers/sbus/char/bbc_envctrl.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/kthread.h> | 7 | #include <linux/kthread.h> |
8 | #include <linux/delay.h> | 8 | #include <linux/delay.h> |
9 | #include <linux/kmod.h> | 9 | #include <linux/kmod.h> |
10 | #include <linux/reboot.h> | ||
10 | #include <asm/oplib.h> | 11 | #include <asm/oplib.h> |
11 | #include <asm/ebus.h> | 12 | #include <asm/ebus.h> |
12 | 13 | ||
@@ -170,8 +171,6 @@ static void get_current_temps(struct bbc_cpu_temperature *tp) | |||
170 | static void do_envctrl_shutdown(struct bbc_cpu_temperature *tp) | 171 | static void do_envctrl_shutdown(struct bbc_cpu_temperature *tp) |
171 | { | 172 | { |
172 | static int shutting_down = 0; | 173 | static int shutting_down = 0; |
173 | static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; | ||
174 | char *argv[] = { "/sbin/shutdown", "-h", "now", NULL }; | ||
175 | char *type = "???"; | 174 | char *type = "???"; |
176 | s8 val = -1; | 175 | s8 val = -1; |
177 | 176 | ||
@@ -195,7 +194,7 @@ static void do_envctrl_shutdown(struct bbc_cpu_temperature *tp) | |||
195 | printk(KERN_CRIT "kenvctrld: Shutting down the system now.\n"); | 194 | printk(KERN_CRIT "kenvctrld: Shutting down the system now.\n"); |
196 | 195 | ||
197 | shutting_down = 1; | 196 | shutting_down = 1; |
198 | if (call_usermodehelper("/sbin/shutdown", argv, envp, 0) < 0) | 197 | if (orderly_poweroff(true) < 0) |
199 | printk(KERN_CRIT "envctrl: shutdown execution failed\n"); | 198 | printk(KERN_CRIT "envctrl: shutdown execution failed\n"); |
200 | } | 199 | } |
201 | 200 | ||
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c index 8328acab47fd..dadabef116b6 100644 --- a/drivers/sbus/char/envctrl.c +++ b/drivers/sbus/char/envctrl.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/ioport.h> | 26 | #include <linux/ioport.h> |
27 | #include <linux/miscdevice.h> | 27 | #include <linux/miscdevice.h> |
28 | #include <linux/kmod.h> | 28 | #include <linux/kmod.h> |
29 | #include <linux/reboot.h> | ||
29 | 30 | ||
30 | #include <asm/ebus.h> | 31 | #include <asm/ebus.h> |
31 | #include <asm/uaccess.h> | 32 | #include <asm/uaccess.h> |
@@ -966,10 +967,6 @@ static struct i2c_child_t *envctrl_get_i2c_child(unsigned char mon_type) | |||
966 | static void envctrl_do_shutdown(void) | 967 | static void envctrl_do_shutdown(void) |
967 | { | 968 | { |
968 | static int inprog = 0; | 969 | static int inprog = 0; |
969 | static char *envp[] = { | ||
970 | "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; | ||
971 | char *argv[] = { | ||
972 | "/sbin/shutdown", "-h", "now", NULL }; | ||
973 | int ret; | 970 | int ret; |
974 | 971 | ||
975 | if (inprog != 0) | 972 | if (inprog != 0) |
@@ -977,7 +974,7 @@ static void envctrl_do_shutdown(void) | |||
977 | 974 | ||
978 | inprog = 1; | 975 | inprog = 1; |
979 | printk(KERN_CRIT "kenvctrld: WARNING: Shutting down the system now.\n"); | 976 | printk(KERN_CRIT "kenvctrld: WARNING: Shutting down the system now.\n"); |
980 | ret = call_usermodehelper("/sbin/shutdown", argv, envp, 0); | 977 | ret = orderly_poweroff(true); |
981 | if (ret < 0) { | 978 | if (ret < 0) { |
982 | printk(KERN_CRIT "kenvctrld: WARNING: system shutdown failed!\n"); | 979 | printk(KERN_CRIT "kenvctrld: WARNING: system shutdown failed!\n"); |
983 | inprog = 0; /* unlikely to succeed, but we could try again */ | 980 | inprog = 0; /* unlikely to succeed, but we could try again */ |
diff --git a/drivers/serial/8250_hp300.c b/drivers/serial/8250_hp300.c index 53e81a44c1a3..2cf0953fe0ec 100644 --- a/drivers/serial/8250_hp300.c +++ b/drivers/serial/8250_hp300.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/serial.h> | 12 | #include <linux/serial.h> |
13 | #include <linux/serial_core.h> | 13 | #include <linux/serial_core.h> |
14 | #include <linux/serial_8250.h> | ||
14 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
15 | #include <linux/dio.h> | 16 | #include <linux/dio.h> |
16 | #include <linux/console.h> | 17 | #include <linux/console.h> |
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index 7fa413ddccf5..18f629706448 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig | |||
@@ -486,6 +486,36 @@ config SERIAL_DZ_CONSOLE | |||
486 | 486 | ||
487 | If unsure, say Y. | 487 | If unsure, say Y. |
488 | 488 | ||
489 | config SERIAL_ZS | ||
490 | tristate "DECstation Z85C30 serial support" | ||
491 | depends on MACH_DECSTATION | ||
492 | select SERIAL_CORE | ||
493 | default y | ||
494 | ---help--- | ||
495 | Support for the Zilog 85C350 serial communications controller used | ||
496 | for serial ports in newer DECstation systems. These include the | ||
497 | DECsystem 5900 and all models of the DECstation and DECsystem 5000 | ||
498 | systems except from model 200. | ||
499 | |||
500 | If unsure, say Y. To compile this driver as a module, choose M here: | ||
501 | the module will be called zs. | ||
502 | |||
503 | config SERIAL_ZS_CONSOLE | ||
504 | bool "Support for console on a DECstation Z85C30 serial port" | ||
505 | depends on SERIAL_ZS=y | ||
506 | select SERIAL_CORE_CONSOLE | ||
507 | default y | ||
508 | ---help--- | ||
509 | If you say Y here, it will be possible to use a serial port as the | ||
510 | system console (the system console is the device which receives all | ||
511 | kernel messages and warnings and which allows logins in single user | ||
512 | mode). | ||
513 | |||
514 | Note that the firmware uses ttyS1 as the serial console on the | ||
515 | Maxine and ttyS3 on the others using this driver. | ||
516 | |||
517 | If unsure, say Y. | ||
518 | |||
489 | config SERIAL_21285 | 519 | config SERIAL_21285 |
490 | tristate "DC21285 serial port support" | 520 | tristate "DC21285 serial port support" |
491 | depends on ARM && FOOTBRIDGE | 521 | depends on ARM && FOOTBRIDGE |
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile index c48cdd61b736..af6377d480d7 100644 --- a/drivers/serial/Makefile +++ b/drivers/serial/Makefile | |||
@@ -43,6 +43,7 @@ obj-$(CONFIG_V850E_UART) += v850e_uart.o | |||
43 | obj-$(CONFIG_SERIAL_PMACZILOG) += pmac_zilog.o | 43 | obj-$(CONFIG_SERIAL_PMACZILOG) += pmac_zilog.o |
44 | obj-$(CONFIG_SERIAL_LH7A40X) += serial_lh7a40x.o | 44 | obj-$(CONFIG_SERIAL_LH7A40X) += serial_lh7a40x.o |
45 | obj-$(CONFIG_SERIAL_DZ) += dz.o | 45 | obj-$(CONFIG_SERIAL_DZ) += dz.o |
46 | obj-$(CONFIG_SERIAL_ZS) += zs.o | ||
46 | obj-$(CONFIG_SERIAL_SH_SCI) += sh-sci.o | 47 | obj-$(CONFIG_SERIAL_SH_SCI) += sh-sci.o |
47 | obj-$(CONFIG_SERIAL_SGI_L1_CONSOLE) += sn_console.o | 48 | obj-$(CONFIG_SERIAL_SGI_L1_CONSOLE) += sn_console.o |
48 | obj-$(CONFIG_SERIAL_CPM) += cpm_uart/ | 49 | obj-$(CONFIG_SERIAL_CPM) += cpm_uart/ |
diff --git a/drivers/serial/zs.c b/drivers/serial/zs.c new file mode 100644 index 000000000000..65f1294fd27b --- /dev/null +++ b/drivers/serial/zs.c | |||
@@ -0,0 +1,1287 @@ | |||
1 | /* | ||
2 | * zs.c: Serial port driver for IOASIC DECstations. | ||
3 | * | ||
4 | * Derived from drivers/sbus/char/sunserial.c by Paul Mackerras. | ||
5 | * Derived from drivers/macintosh/macserial.c by Harald Koerfgen. | ||
6 | * | ||
7 | * DECstation changes | ||
8 | * Copyright (C) 1998-2000 Harald Koerfgen | ||
9 | * Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki | ||
10 | * | ||
11 | * For the rest of the code the original Copyright applies: | ||
12 | * Copyright (C) 1996 Paul Mackerras (Paul.Mackerras@cs.anu.edu.au) | ||
13 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | ||
14 | * | ||
15 | * | ||
16 | * Note: for IOASIC systems the wiring is as follows: | ||
17 | * | ||
18 | * mouse/keyboard: | ||
19 | * DIN-7 MJ-4 signal SCC | ||
20 | * 2 1 TxD <- A.TxD | ||
21 | * 3 4 RxD -> A.RxD | ||
22 | * | ||
23 | * EIA-232/EIA-423: | ||
24 | * DB-25 MMJ-6 signal SCC | ||
25 | * 2 2 TxD <- B.TxD | ||
26 | * 3 5 RxD -> B.RxD | ||
27 | * 4 RTS <- ~A.RTS | ||
28 | * 5 CTS -> ~B.CTS | ||
29 | * 6 6 DSR -> ~A.SYNC | ||
30 | * 8 CD -> ~B.DCD | ||
31 | * 12 DSRS(DCE) -> ~A.CTS (*) | ||
32 | * 15 TxC -> B.TxC | ||
33 | * 17 RxC -> B.RxC | ||
34 | * 20 1 DTR <- ~A.DTR | ||
35 | * 22 RI -> ~A.DCD | ||
36 | * 23 DSRS(DTE) <- ~B.RTS | ||
37 | * | ||
38 | * (*) EIA-232 defines the signal at this pin to be SCD, while DSRS(DCE) | ||
39 | * is shared with DSRS(DTE) at pin 23. | ||
40 | * | ||
41 | * As you can immediately notice the wiring of the RTS, DTR and DSR signals | ||
42 | * is a bit odd. This makes the handling of port B unnecessarily | ||
43 | * complicated and prevents the use of some automatic modes of operation. | ||
44 | */ | ||
45 | |||
46 | #if defined(CONFIG_SERIAL_ZS_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) | ||
47 | #define SUPPORT_SYSRQ | ||
48 | #endif | ||
49 | |||
50 | #include <linux/bug.h> | ||
51 | #include <linux/console.h> | ||
52 | #include <linux/delay.h> | ||
53 | #include <linux/errno.h> | ||
54 | #include <linux/init.h> | ||
55 | #include <linux/interrupt.h> | ||
56 | #include <linux/io.h> | ||
57 | #include <linux/ioport.h> | ||
58 | #include <linux/irqflags.h> | ||
59 | #include <linux/kernel.h> | ||
60 | #include <linux/major.h> | ||
61 | #include <linux/serial.h> | ||
62 | #include <linux/serial_core.h> | ||
63 | #include <linux/spinlock.h> | ||
64 | #include <linux/sysrq.h> | ||
65 | #include <linux/tty.h> | ||
66 | #include <linux/types.h> | ||
67 | |||
68 | #include <asm/atomic.h> | ||
69 | #include <asm/system.h> | ||
70 | |||
71 | #include <asm/dec/interrupts.h> | ||
72 | #include <asm/dec/ioasic_addrs.h> | ||
73 | #include <asm/dec/system.h> | ||
74 | |||
75 | #include "zs.h" | ||
76 | |||
77 | |||
78 | MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>"); | ||
79 | MODULE_DESCRIPTION("DECstation Z85C30 serial driver"); | ||
80 | MODULE_LICENSE("GPL"); | ||
81 | |||
82 | |||
83 | static char zs_name[] __initdata = "DECstation Z85C30 serial driver version "; | ||
84 | static char zs_version[] __initdata = "0.10"; | ||
85 | |||
86 | /* | ||
87 | * It would be nice to dynamically allocate everything that | ||
88 | * depends on ZS_NUM_SCCS, so we could support any number of | ||
89 | * Z85C30s, but for now... | ||
90 | */ | ||
91 | #define ZS_NUM_SCCS 2 /* Max # of ZS chips supported. */ | ||
92 | #define ZS_NUM_CHAN 2 /* 2 channels per chip. */ | ||
93 | #define ZS_CHAN_A 0 /* Index of the channel A. */ | ||
94 | #define ZS_CHAN_B 1 /* Index of the channel B. */ | ||
95 | #define ZS_CHAN_IO_SIZE 8 /* IOMEM space size. */ | ||
96 | #define ZS_CHAN_IO_STRIDE 4 /* Register alignment. */ | ||
97 | #define ZS_CHAN_IO_OFFSET 1 /* The SCC resides on the high byte | ||
98 | of the 16-bit IOBUS. */ | ||
99 | #define ZS_CLOCK 7372800 /* Z85C30 PCLK input clock rate. */ | ||
100 | |||
101 | #define to_zport(uport) container_of(uport, struct zs_port, port) | ||
102 | |||
103 | struct zs_parms { | ||
104 | resource_size_t scc[ZS_NUM_SCCS]; | ||
105 | int irq[ZS_NUM_SCCS]; | ||
106 | }; | ||
107 | |||
108 | static struct zs_scc zs_sccs[ZS_NUM_SCCS]; | ||
109 | |||
110 | static u8 zs_init_regs[ZS_NUM_REGS] __initdata = { | ||
111 | 0, /* write 0 */ | ||
112 | PAR_SPEC, /* write 1 */ | ||
113 | 0, /* write 2 */ | ||
114 | 0, /* write 3 */ | ||
115 | X16CLK | SB1, /* write 4 */ | ||
116 | 0, /* write 5 */ | ||
117 | 0, 0, 0, /* write 6, 7, 8 */ | ||
118 | MIE | DLC | NV, /* write 9 */ | ||
119 | NRZ, /* write 10 */ | ||
120 | TCBR | RCBR, /* write 11 */ | ||
121 | 0, 0, /* BRG time constant, write 12 + 13 */ | ||
122 | BRSRC | BRENABL, /* write 14 */ | ||
123 | 0, /* write 15 */ | ||
124 | }; | ||
125 | |||
126 | /* | ||
127 | * Debugging. | ||
128 | */ | ||
129 | #undef ZS_DEBUG_REGS | ||
130 | |||
131 | |||
132 | /* | ||
133 | * Reading and writing Z85C30 registers. | ||
134 | */ | ||
135 | static void recovery_delay(void) | ||
136 | { | ||
137 | udelay(2); | ||
138 | } | ||
139 | |||
140 | static u8 read_zsreg(struct zs_port *zport, int reg) | ||
141 | { | ||
142 | void __iomem *control = zport->port.membase + ZS_CHAN_IO_OFFSET; | ||
143 | u8 retval; | ||
144 | |||
145 | if (reg != 0) { | ||
146 | writeb(reg & 0xf, control); | ||
147 | fast_iob(); | ||
148 | recovery_delay(); | ||
149 | } | ||
150 | retval = readb(control); | ||
151 | recovery_delay(); | ||
152 | return retval; | ||
153 | } | ||
154 | |||
155 | static void write_zsreg(struct zs_port *zport, int reg, u8 value) | ||
156 | { | ||
157 | void __iomem *control = zport->port.membase + ZS_CHAN_IO_OFFSET; | ||
158 | |||
159 | if (reg != 0) { | ||
160 | writeb(reg & 0xf, control); | ||
161 | fast_iob(); recovery_delay(); | ||
162 | } | ||
163 | writeb(value, control); | ||
164 | fast_iob(); | ||
165 | recovery_delay(); | ||
166 | return; | ||
167 | } | ||
168 | |||
169 | static u8 read_zsdata(struct zs_port *zport) | ||
170 | { | ||
171 | void __iomem *data = zport->port.membase + | ||
172 | ZS_CHAN_IO_STRIDE + ZS_CHAN_IO_OFFSET; | ||
173 | u8 retval; | ||
174 | |||
175 | retval = readb(data); | ||
176 | recovery_delay(); | ||
177 | return retval; | ||
178 | } | ||
179 | |||
180 | static void write_zsdata(struct zs_port *zport, u8 value) | ||
181 | { | ||
182 | void __iomem *data = zport->port.membase + | ||
183 | ZS_CHAN_IO_STRIDE + ZS_CHAN_IO_OFFSET; | ||
184 | |||
185 | writeb(value, data); | ||
186 | fast_iob(); | ||
187 | recovery_delay(); | ||
188 | return; | ||
189 | } | ||
190 | |||
191 | #ifdef ZS_DEBUG_REGS | ||
192 | void zs_dump(void) | ||
193 | { | ||
194 | struct zs_port *zport; | ||
195 | int i, j; | ||
196 | |||
197 | for (i = 0; i < ZS_NUM_SCCS * ZS_NUM_CHAN; i++) { | ||
198 | zport = &zs_sccs[i / ZS_NUM_CHAN].zport[i % ZS_NUM_CHAN]; | ||
199 | |||
200 | if (!zport->scc) | ||
201 | continue; | ||
202 | |||
203 | for (j = 0; j < 16; j++) | ||
204 | printk("W%-2d = 0x%02x\t", j, zport->regs[j]); | ||
205 | printk("\n"); | ||
206 | for (j = 0; j < 16; j++) | ||
207 | printk("R%-2d = 0x%02x\t", j, read_zsreg(zport, j)); | ||
208 | printk("\n\n"); | ||
209 | } | ||
210 | } | ||
211 | #endif | ||
212 | |||
213 | |||
214 | static void zs_spin_lock_cond_irq(spinlock_t *lock, int irq) | ||
215 | { | ||
216 | if (irq) | ||
217 | spin_lock_irq(lock); | ||
218 | else | ||
219 | spin_lock(lock); | ||
220 | } | ||
221 | |||
222 | static void zs_spin_unlock_cond_irq(spinlock_t *lock, int irq) | ||
223 | { | ||
224 | if (irq) | ||
225 | spin_unlock_irq(lock); | ||
226 | else | ||
227 | spin_unlock(lock); | ||
228 | } | ||
229 | |||
230 | static int zs_receive_drain(struct zs_port *zport) | ||
231 | { | ||
232 | int loops = 10000; | ||
233 | |||
234 | while ((read_zsreg(zport, R0) & Rx_CH_AV) && loops--) | ||
235 | read_zsdata(zport); | ||
236 | return loops; | ||
237 | } | ||
238 | |||
239 | static int zs_transmit_drain(struct zs_port *zport, int irq) | ||
240 | { | ||
241 | struct zs_scc *scc = zport->scc; | ||
242 | int loops = 10000; | ||
243 | |||
244 | while (!(read_zsreg(zport, R0) & Tx_BUF_EMP) && loops--) { | ||
245 | zs_spin_unlock_cond_irq(&scc->zlock, irq); | ||
246 | udelay(2); | ||
247 | zs_spin_lock_cond_irq(&scc->zlock, irq); | ||
248 | } | ||
249 | return loops; | ||
250 | } | ||
251 | |||
252 | static int zs_line_drain(struct zs_port *zport, int irq) | ||
253 | { | ||
254 | struct zs_scc *scc = zport->scc; | ||
255 | int loops = 10000; | ||
256 | |||
257 | while (!(read_zsreg(zport, R1) & ALL_SNT) && loops--) { | ||
258 | zs_spin_unlock_cond_irq(&scc->zlock, irq); | ||
259 | udelay(2); | ||
260 | zs_spin_lock_cond_irq(&scc->zlock, irq); | ||
261 | } | ||
262 | return loops; | ||
263 | } | ||
264 | |||
265 | |||
266 | static void load_zsregs(struct zs_port *zport, u8 *regs, int irq) | ||
267 | { | ||
268 | /* Let the current transmission finish. */ | ||
269 | zs_line_drain(zport, irq); | ||
270 | /* Load 'em up. */ | ||
271 | write_zsreg(zport, R3, regs[3] & ~RxENABLE); | ||
272 | write_zsreg(zport, R5, regs[5] & ~TxENAB); | ||
273 | write_zsreg(zport, R4, regs[4]); | ||
274 | write_zsreg(zport, R9, regs[9]); | ||
275 | write_zsreg(zport, R1, regs[1]); | ||
276 | write_zsreg(zport, R2, regs[2]); | ||
277 | write_zsreg(zport, R10, regs[10]); | ||
278 | write_zsreg(zport, R14, regs[14] & ~BRENABL); | ||
279 | write_zsreg(zport, R11, regs[11]); | ||
280 | write_zsreg(zport, R12, regs[12]); | ||
281 | write_zsreg(zport, R13, regs[13]); | ||
282 | write_zsreg(zport, R14, regs[14]); | ||
283 | write_zsreg(zport, R15, regs[15]); | ||
284 | if (regs[3] & RxENABLE) | ||
285 | write_zsreg(zport, R3, regs[3]); | ||
286 | if (regs[5] & TxENAB) | ||
287 | write_zsreg(zport, R5, regs[5]); | ||
288 | return; | ||
289 | } | ||
290 | |||
291 | |||
292 | /* | ||
293 | * Status handling routines. | ||
294 | */ | ||
295 | |||
296 | /* | ||
297 | * zs_tx_empty() -- get the transmitter empty status | ||
298 | * | ||
299 | * Purpose: Let user call ioctl() to get info when the UART physically | ||
300 | * is emptied. On bus types like RS485, the transmitter must | ||
301 | * release the bus after transmitting. This must be done when | ||
302 | * the transmit shift register is empty, not be done when the | ||
303 | * transmit holding register is empty. This functionality | ||
304 | * allows an RS485 driver to be written in user space. | ||
305 | */ | ||
306 | static unsigned int zs_tx_empty(struct uart_port *uport) | ||
307 | { | ||
308 | struct zs_port *zport = to_zport(uport); | ||
309 | struct zs_scc *scc = zport->scc; | ||
310 | unsigned long flags; | ||
311 | u8 status; | ||
312 | |||
313 | spin_lock_irqsave(&scc->zlock, flags); | ||
314 | status = read_zsreg(zport, R1); | ||
315 | spin_unlock_irqrestore(&scc->zlock, flags); | ||
316 | |||
317 | return status & ALL_SNT ? TIOCSER_TEMT : 0; | ||
318 | } | ||
319 | |||
320 | static unsigned int zs_raw_get_ab_mctrl(struct zs_port *zport_a, | ||
321 | struct zs_port *zport_b) | ||
322 | { | ||
323 | u8 status_a, status_b; | ||
324 | unsigned int mctrl; | ||
325 | |||
326 | status_a = read_zsreg(zport_a, R0); | ||
327 | status_b = read_zsreg(zport_b, R0); | ||
328 | |||
329 | mctrl = ((status_b & CTS) ? TIOCM_CTS : 0) | | ||
330 | ((status_b & DCD) ? TIOCM_CAR : 0) | | ||
331 | ((status_a & DCD) ? TIOCM_RNG : 0) | | ||
332 | ((status_a & SYNC_HUNT) ? TIOCM_DSR : 0); | ||
333 | |||
334 | return mctrl; | ||
335 | } | ||
336 | |||
337 | static unsigned int zs_raw_get_mctrl(struct zs_port *zport) | ||
338 | { | ||
339 | struct zs_port *zport_a = &zport->scc->zport[ZS_CHAN_A]; | ||
340 | |||
341 | return zport != zport_a ? zs_raw_get_ab_mctrl(zport_a, zport) : 0; | ||
342 | } | ||
343 | |||
344 | static unsigned int zs_raw_xor_mctrl(struct zs_port *zport) | ||
345 | { | ||
346 | struct zs_port *zport_a = &zport->scc->zport[ZS_CHAN_A]; | ||
347 | unsigned int mmask, mctrl, delta; | ||
348 | u8 mask_a, mask_b; | ||
349 | |||
350 | if (zport == zport_a) | ||
351 | return 0; | ||
352 | |||
353 | mask_a = zport_a->regs[15]; | ||
354 | mask_b = zport->regs[15]; | ||
355 | |||
356 | mmask = ((mask_b & CTSIE) ? TIOCM_CTS : 0) | | ||
357 | ((mask_b & DCDIE) ? TIOCM_CAR : 0) | | ||
358 | ((mask_a & DCDIE) ? TIOCM_RNG : 0) | | ||
359 | ((mask_a & SYNCIE) ? TIOCM_DSR : 0); | ||
360 | |||
361 | mctrl = zport->mctrl; | ||
362 | if (mmask) { | ||
363 | mctrl &= ~mmask; | ||
364 | mctrl |= zs_raw_get_ab_mctrl(zport_a, zport) & mmask; | ||
365 | } | ||
366 | |||
367 | delta = mctrl ^ zport->mctrl; | ||
368 | if (delta) | ||
369 | zport->mctrl = mctrl; | ||
370 | |||
371 | return delta; | ||
372 | } | ||
373 | |||
374 | static unsigned int zs_get_mctrl(struct uart_port *uport) | ||
375 | { | ||
376 | struct zs_port *zport = to_zport(uport); | ||
377 | struct zs_scc *scc = zport->scc; | ||
378 | unsigned int mctrl; | ||
379 | |||
380 | spin_lock(&scc->zlock); | ||
381 | mctrl = zs_raw_get_mctrl(zport); | ||
382 | spin_unlock(&scc->zlock); | ||
383 | |||
384 | return mctrl; | ||
385 | } | ||
386 | |||
387 | static void zs_set_mctrl(struct uart_port *uport, unsigned int mctrl) | ||
388 | { | ||
389 | struct zs_port *zport = to_zport(uport); | ||
390 | struct zs_scc *scc = zport->scc; | ||
391 | struct zs_port *zport_a = &scc->zport[ZS_CHAN_A]; | ||
392 | u8 oldloop, newloop; | ||
393 | |||
394 | spin_lock(&scc->zlock); | ||
395 | if (zport != zport_a) { | ||
396 | if (mctrl & TIOCM_DTR) | ||
397 | zport_a->regs[5] |= DTR; | ||
398 | else | ||
399 | zport_a->regs[5] &= ~DTR; | ||
400 | if (mctrl & TIOCM_RTS) | ||
401 | zport_a->regs[5] |= RTS; | ||
402 | else | ||
403 | zport_a->regs[5] &= ~RTS; | ||
404 | write_zsreg(zport_a, R5, zport_a->regs[5]); | ||
405 | } | ||
406 | |||
407 | /* Rarely modified, so don't poke at hardware unless necessary. */ | ||
408 | oldloop = zport->regs[14]; | ||
409 | newloop = oldloop; | ||
410 | if (mctrl & TIOCM_LOOP) | ||
411 | newloop |= LOOPBAK; | ||
412 | else | ||
413 | newloop &= ~LOOPBAK; | ||
414 | if (newloop != oldloop) { | ||
415 | zport->regs[14] = newloop; | ||
416 | write_zsreg(zport, R14, zport->regs[14]); | ||
417 | } | ||
418 | spin_unlock(&scc->zlock); | ||
419 | } | ||
420 | |||
421 | static void zs_raw_stop_tx(struct zs_port *zport) | ||
422 | { | ||
423 | write_zsreg(zport, R0, RES_Tx_P); | ||
424 | zport->tx_stopped = 1; | ||
425 | } | ||
426 | |||
427 | static void zs_stop_tx(struct uart_port *uport) | ||
428 | { | ||
429 | struct zs_port *zport = to_zport(uport); | ||
430 | struct zs_scc *scc = zport->scc; | ||
431 | |||
432 | spin_lock(&scc->zlock); | ||
433 | zs_raw_stop_tx(zport); | ||
434 | spin_unlock(&scc->zlock); | ||
435 | } | ||
436 | |||
437 | static void zs_raw_transmit_chars(struct zs_port *); | ||
438 | |||
439 | static void zs_start_tx(struct uart_port *uport) | ||
440 | { | ||
441 | struct zs_port *zport = to_zport(uport); | ||
442 | struct zs_scc *scc = zport->scc; | ||
443 | |||
444 | spin_lock(&scc->zlock); | ||
445 | if (zport->tx_stopped) { | ||
446 | zs_transmit_drain(zport, 0); | ||
447 | zport->tx_stopped = 0; | ||
448 | zs_raw_transmit_chars(zport); | ||
449 | } | ||
450 | spin_unlock(&scc->zlock); | ||
451 | } | ||
452 | |||
453 | static void zs_stop_rx(struct uart_port *uport) | ||
454 | { | ||
455 | struct zs_port *zport = to_zport(uport); | ||
456 | struct zs_scc *scc = zport->scc; | ||
457 | struct zs_port *zport_a = &scc->zport[ZS_CHAN_A]; | ||
458 | |||
459 | spin_lock(&scc->zlock); | ||
460 | zport->regs[15] &= ~BRKIE; | ||
461 | zport->regs[1] &= ~(RxINT_MASK | TxINT_ENAB); | ||
462 | zport->regs[1] |= RxINT_DISAB; | ||
463 | |||
464 | if (zport != zport_a) { | ||
465 | /* A-side DCD tracks RI and SYNC tracks DSR. */ | ||
466 | zport_a->regs[15] &= ~(DCDIE | SYNCIE); | ||
467 | write_zsreg(zport_a, R15, zport_a->regs[15]); | ||
468 | if (!(zport_a->regs[15] & BRKIE)) { | ||
469 | zport_a->regs[1] &= ~EXT_INT_ENAB; | ||
470 | write_zsreg(zport_a, R1, zport_a->regs[1]); | ||
471 | } | ||
472 | |||
473 | /* This-side DCD tracks DCD and CTS tracks CTS. */ | ||
474 | zport->regs[15] &= ~(DCDIE | CTSIE); | ||
475 | zport->regs[1] &= ~EXT_INT_ENAB; | ||
476 | } else { | ||
477 | /* DCD tracks RI and SYNC tracks DSR for the B side. */ | ||
478 | if (!(zport->regs[15] & (DCDIE | SYNCIE))) | ||
479 | zport->regs[1] &= ~EXT_INT_ENAB; | ||
480 | } | ||
481 | |||
482 | write_zsreg(zport, R15, zport->regs[15]); | ||
483 | write_zsreg(zport, R1, zport->regs[1]); | ||
484 | spin_unlock(&scc->zlock); | ||
485 | } | ||
486 | |||
487 | static void zs_enable_ms(struct uart_port *uport) | ||
488 | { | ||
489 | struct zs_port *zport = to_zport(uport); | ||
490 | struct zs_scc *scc = zport->scc; | ||
491 | struct zs_port *zport_a = &scc->zport[ZS_CHAN_A]; | ||
492 | |||
493 | if (zport == zport_a) | ||
494 | return; | ||
495 | |||
496 | spin_lock(&scc->zlock); | ||
497 | |||
498 | /* Clear Ext interrupts if not being handled already. */ | ||
499 | if (!(zport_a->regs[1] & EXT_INT_ENAB)) | ||
500 | write_zsreg(zport_a, R0, RES_EXT_INT); | ||
501 | |||
502 | /* A-side DCD tracks RI and SYNC tracks DSR. */ | ||
503 | zport_a->regs[1] |= EXT_INT_ENAB; | ||
504 | zport_a->regs[15] |= DCDIE | SYNCIE; | ||
505 | |||
506 | /* This-side DCD tracks DCD and CTS tracks CTS. */ | ||
507 | zport->regs[15] |= DCDIE | CTSIE; | ||
508 | |||
509 | zs_raw_xor_mctrl(zport); | ||
510 | |||
511 | write_zsreg(zport_a, R1, zport_a->regs[1]); | ||
512 | write_zsreg(zport_a, R15, zport_a->regs[15]); | ||
513 | write_zsreg(zport, R15, zport->regs[15]); | ||
514 | spin_unlock(&scc->zlock); | ||
515 | } | ||
516 | |||
517 | static void zs_break_ctl(struct uart_port *uport, int break_state) | ||
518 | { | ||
519 | struct zs_port *zport = to_zport(uport); | ||
520 | struct zs_scc *scc = zport->scc; | ||
521 | unsigned long flags; | ||
522 | |||
523 | spin_lock_irqsave(&scc->zlock, flags); | ||
524 | if (break_state == -1) | ||
525 | zport->regs[5] |= SND_BRK; | ||
526 | else | ||
527 | zport->regs[5] &= ~SND_BRK; | ||
528 | write_zsreg(zport, R5, zport->regs[5]); | ||
529 | spin_unlock_irqrestore(&scc->zlock, flags); | ||
530 | } | ||
531 | |||
532 | |||
533 | /* | ||
534 | * Interrupt handling routines. | ||
535 | */ | ||
536 | #define Rx_BRK 0x0100 /* BREAK event software flag. */ | ||
537 | #define Rx_SYS 0x0200 /* SysRq event software flag. */ | ||
538 | |||
539 | static void zs_receive_chars(struct zs_port *zport) | ||
540 | { | ||
541 | struct uart_port *uport = &zport->port; | ||
542 | struct zs_scc *scc = zport->scc; | ||
543 | struct uart_icount *icount; | ||
544 | unsigned int avail, status, ch, flag; | ||
545 | int count; | ||
546 | |||
547 | for (count = 16; count; count--) { | ||
548 | spin_lock(&scc->zlock); | ||
549 | avail = read_zsreg(zport, R0) & Rx_CH_AV; | ||
550 | spin_unlock(&scc->zlock); | ||
551 | if (!avail) | ||
552 | break; | ||
553 | |||
554 | spin_lock(&scc->zlock); | ||
555 | status = read_zsreg(zport, R1) & (Rx_OVR | FRM_ERR | PAR_ERR); | ||
556 | ch = read_zsdata(zport); | ||
557 | spin_unlock(&scc->zlock); | ||
558 | |||
559 | flag = TTY_NORMAL; | ||
560 | |||
561 | icount = &uport->icount; | ||
562 | icount->rx++; | ||
563 | |||
564 | /* Handle the null char got when BREAK is removed. */ | ||
565 | if (!ch) | ||
566 | status |= zport->tty_break; | ||
567 | if (unlikely(status & | ||
568 | (Rx_OVR | FRM_ERR | PAR_ERR | Rx_SYS | Rx_BRK))) { | ||
569 | zport->tty_break = 0; | ||
570 | |||
571 | /* Reset the error indication. */ | ||
572 | if (status & (Rx_OVR | FRM_ERR | PAR_ERR)) { | ||
573 | spin_lock(&scc->zlock); | ||
574 | write_zsreg(zport, R0, ERR_RES); | ||
575 | spin_unlock(&scc->zlock); | ||
576 | } | ||
577 | |||
578 | if (status & (Rx_SYS | Rx_BRK)) { | ||
579 | icount->brk++; | ||
580 | /* SysRq discards the null char. */ | ||
581 | if (status & Rx_SYS) | ||
582 | continue; | ||
583 | } else if (status & FRM_ERR) | ||
584 | icount->frame++; | ||
585 | else if (status & PAR_ERR) | ||
586 | icount->parity++; | ||
587 | if (status & Rx_OVR) | ||
588 | icount->overrun++; | ||
589 | |||
590 | status &= uport->read_status_mask; | ||
591 | if (status & Rx_BRK) | ||
592 | flag = TTY_BREAK; | ||
593 | else if (status & FRM_ERR) | ||
594 | flag = TTY_FRAME; | ||
595 | else if (status & PAR_ERR) | ||
596 | flag = TTY_PARITY; | ||
597 | } | ||
598 | |||
599 | if (uart_handle_sysrq_char(uport, ch)) | ||
600 | continue; | ||
601 | |||
602 | uart_insert_char(uport, status, Rx_OVR, ch, flag); | ||
603 | } | ||
604 | |||
605 | tty_flip_buffer_push(uport->info->tty); | ||
606 | } | ||
607 | |||
608 | static void zs_raw_transmit_chars(struct zs_port *zport) | ||
609 | { | ||
610 | struct circ_buf *xmit = &zport->port.info->xmit; | ||
611 | |||
612 | /* XON/XOFF chars. */ | ||
613 | if (zport->port.x_char) { | ||
614 | write_zsdata(zport, zport->port.x_char); | ||
615 | zport->port.icount.tx++; | ||
616 | zport->port.x_char = 0; | ||
617 | return; | ||
618 | } | ||
619 | |||
620 | /* If nothing to do or stopped or hardware stopped. */ | ||
621 | if (uart_circ_empty(xmit) || uart_tx_stopped(&zport->port)) { | ||
622 | zs_raw_stop_tx(zport); | ||
623 | return; | ||
624 | } | ||
625 | |||
626 | /* Send char. */ | ||
627 | write_zsdata(zport, xmit->buf[xmit->tail]); | ||
628 | xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); | ||
629 | zport->port.icount.tx++; | ||
630 | |||
631 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | ||
632 | uart_write_wakeup(&zport->port); | ||
633 | |||
634 | /* Are we are done? */ | ||
635 | if (uart_circ_empty(xmit)) | ||
636 | zs_raw_stop_tx(zport); | ||
637 | } | ||
638 | |||
639 | static void zs_transmit_chars(struct zs_port *zport) | ||
640 | { | ||
641 | struct zs_scc *scc = zport->scc; | ||
642 | |||
643 | spin_lock(&scc->zlock); | ||
644 | zs_raw_transmit_chars(zport); | ||
645 | spin_unlock(&scc->zlock); | ||
646 | } | ||
647 | |||
648 | static void zs_status_handle(struct zs_port *zport, struct zs_port *zport_a) | ||
649 | { | ||
650 | struct uart_port *uport = &zport->port; | ||
651 | struct zs_scc *scc = zport->scc; | ||
652 | unsigned int delta; | ||
653 | u8 status, brk; | ||
654 | |||
655 | spin_lock(&scc->zlock); | ||
656 | |||
657 | /* Get status from Read Register 0. */ | ||
658 | status = read_zsreg(zport, R0); | ||
659 | |||
660 | if (zport->regs[15] & BRKIE) { | ||
661 | brk = status & BRK_ABRT; | ||
662 | if (brk && !zport->brk) { | ||
663 | spin_unlock(&scc->zlock); | ||
664 | if (uart_handle_break(uport)) | ||
665 | zport->tty_break = Rx_SYS; | ||
666 | else | ||
667 | zport->tty_break = Rx_BRK; | ||
668 | spin_lock(&scc->zlock); | ||
669 | } | ||
670 | zport->brk = brk; | ||
671 | } | ||
672 | |||
673 | if (zport != zport_a) { | ||
674 | delta = zs_raw_xor_mctrl(zport); | ||
675 | spin_unlock(&scc->zlock); | ||
676 | |||
677 | if (delta & TIOCM_CTS) | ||
678 | uart_handle_cts_change(uport, | ||
679 | zport->mctrl & TIOCM_CTS); | ||
680 | if (delta & TIOCM_CAR) | ||
681 | uart_handle_dcd_change(uport, | ||
682 | zport->mctrl & TIOCM_CAR); | ||
683 | if (delta & TIOCM_RNG) | ||
684 | uport->icount.dsr++; | ||
685 | if (delta & TIOCM_DSR) | ||
686 | uport->icount.rng++; | ||
687 | |||
688 | if (delta) | ||
689 | wake_up_interruptible(&uport->info->delta_msr_wait); | ||
690 | |||
691 | spin_lock(&scc->zlock); | ||
692 | } | ||
693 | |||
694 | /* Clear the status condition... */ | ||
695 | write_zsreg(zport, R0, RES_EXT_INT); | ||
696 | |||
697 | spin_unlock(&scc->zlock); | ||
698 | } | ||
699 | |||
700 | /* | ||
701 | * This is the Z85C30 driver's generic interrupt routine. | ||
702 | */ | ||
703 | static irqreturn_t zs_interrupt(int irq, void *dev_id) | ||
704 | { | ||
705 | struct zs_scc *scc = dev_id; | ||
706 | struct zs_port *zport_a = &scc->zport[ZS_CHAN_A]; | ||
707 | struct zs_port *zport_b = &scc->zport[ZS_CHAN_B]; | ||
708 | irqreturn_t status = IRQ_NONE; | ||
709 | u8 zs_intreg; | ||
710 | int count; | ||
711 | |||
712 | /* | ||
713 | * NOTE: The read register 3, which holds the irq status, | ||
714 | * does so for both channels on each chip. Although | ||
715 | * the status value itself must be read from the A | ||
716 | * channel and is only valid when read from channel A. | ||
717 | * Yes... broken hardware... | ||
718 | */ | ||
719 | for (count = 16; count; count--) { | ||
720 | spin_lock(&scc->zlock); | ||
721 | zs_intreg = read_zsreg(zport_a, R3); | ||
722 | spin_unlock(&scc->zlock); | ||
723 | if (!zs_intreg) | ||
724 | break; | ||
725 | |||
726 | /* | ||
727 | * We do not like losing characters, so we prioritise | ||
728 | * interrupt sources a little bit differently than | ||
729 | * the SCC would, was it allowed to. | ||
730 | */ | ||
731 | if (zs_intreg & CHBRxIP) | ||
732 | zs_receive_chars(zport_b); | ||
733 | if (zs_intreg & CHARxIP) | ||
734 | zs_receive_chars(zport_a); | ||
735 | if (zs_intreg & CHBEXT) | ||
736 | zs_status_handle(zport_b, zport_a); | ||
737 | if (zs_intreg & CHAEXT) | ||
738 | zs_status_handle(zport_a, zport_a); | ||
739 | if (zs_intreg & CHBTxIP) | ||
740 | zs_transmit_chars(zport_b); | ||
741 | if (zs_intreg & CHATxIP) | ||
742 | zs_transmit_chars(zport_a); | ||
743 | |||
744 | status = IRQ_HANDLED; | ||
745 | } | ||
746 | |||
747 | return status; | ||
748 | } | ||
749 | |||
750 | |||
751 | /* | ||
752 | * Finally, routines used to initialize the serial port. | ||
753 | */ | ||
754 | static int zs_startup(struct uart_port *uport) | ||
755 | { | ||
756 | struct zs_port *zport = to_zport(uport); | ||
757 | struct zs_scc *scc = zport->scc; | ||
758 | unsigned long flags; | ||
759 | int irq_guard; | ||
760 | int ret; | ||
761 | |||
762 | irq_guard = atomic_add_return(1, &scc->irq_guard); | ||
763 | if (irq_guard == 1) { | ||
764 | ret = request_irq(zport->port.irq, zs_interrupt, | ||
765 | IRQF_SHARED, "scc", scc); | ||
766 | if (ret) { | ||
767 | atomic_add(-1, &scc->irq_guard); | ||
768 | printk(KERN_ERR "zs: can't get irq %d\n", | ||
769 | zport->port.irq); | ||
770 | return ret; | ||
771 | } | ||
772 | } | ||
773 | |||
774 | spin_lock_irqsave(&scc->zlock, flags); | ||
775 | |||
776 | /* Clear the receive FIFO. */ | ||
777 | zs_receive_drain(zport); | ||
778 | |||
779 | /* Clear the interrupt registers. */ | ||
780 | write_zsreg(zport, R0, ERR_RES); | ||
781 | write_zsreg(zport, R0, RES_Tx_P); | ||
782 | /* But Ext only if not being handled already. */ | ||
783 | if (!(zport->regs[1] & EXT_INT_ENAB)) | ||
784 | write_zsreg(zport, R0, RES_EXT_INT); | ||
785 | |||
786 | /* Finally, enable sequencing and interrupts. */ | ||
787 | zport->regs[1] &= ~RxINT_MASK; | ||
788 | zport->regs[1] |= RxINT_ALL | TxINT_ENAB | EXT_INT_ENAB; | ||
789 | zport->regs[3] |= RxENABLE; | ||
790 | zport->regs[5] |= TxENAB; | ||
791 | zport->regs[15] |= BRKIE; | ||
792 | write_zsreg(zport, R1, zport->regs[1]); | ||
793 | write_zsreg(zport, R3, zport->regs[3]); | ||
794 | write_zsreg(zport, R5, zport->regs[5]); | ||
795 | write_zsreg(zport, R15, zport->regs[15]); | ||
796 | |||
797 | /* Record the current state of RR0. */ | ||
798 | zport->mctrl = zs_raw_get_mctrl(zport); | ||
799 | zport->brk = read_zsreg(zport, R0) & BRK_ABRT; | ||
800 | |||
801 | zport->tx_stopped = 1; | ||
802 | |||
803 | spin_unlock_irqrestore(&scc->zlock, flags); | ||
804 | |||
805 | return 0; | ||
806 | } | ||
807 | |||
808 | static void zs_shutdown(struct uart_port *uport) | ||
809 | { | ||
810 | struct zs_port *zport = to_zport(uport); | ||
811 | struct zs_scc *scc = zport->scc; | ||
812 | unsigned long flags; | ||
813 | int irq_guard; | ||
814 | |||
815 | spin_lock_irqsave(&scc->zlock, flags); | ||
816 | |||
817 | zport->regs[5] &= ~TxENAB; | ||
818 | zport->regs[3] &= ~RxENABLE; | ||
819 | write_zsreg(zport, R5, zport->regs[5]); | ||
820 | write_zsreg(zport, R3, zport->regs[3]); | ||
821 | |||
822 | spin_unlock_irqrestore(&scc->zlock, flags); | ||
823 | |||
824 | irq_guard = atomic_add_return(-1, &scc->irq_guard); | ||
825 | if (!irq_guard) | ||
826 | free_irq(zport->port.irq, scc); | ||
827 | } | ||
828 | |||
829 | |||
830 | static void zs_reset(struct zs_port *zport) | ||
831 | { | ||
832 | struct zs_scc *scc = zport->scc; | ||
833 | int irq; | ||
834 | unsigned long flags; | ||
835 | |||
836 | spin_lock_irqsave(&scc->zlock, flags); | ||
837 | irq = !irqs_disabled_flags(flags); | ||
838 | if (!scc->initialised) { | ||
839 | /* Reset the pointer first, just in case... */ | ||
840 | read_zsreg(zport, R0); | ||
841 | /* And let the current transmission finish. */ | ||
842 | zs_line_drain(zport, irq); | ||
843 | write_zsreg(zport, R9, FHWRES); | ||
844 | udelay(10); | ||
845 | write_zsreg(zport, R9, 0); | ||
846 | scc->initialised = 1; | ||
847 | } | ||
848 | load_zsregs(zport, zport->regs, irq); | ||
849 | spin_unlock_irqrestore(&scc->zlock, flags); | ||
850 | } | ||
851 | |||
852 | static void zs_set_termios(struct uart_port *uport, struct ktermios *termios, | ||
853 | struct ktermios *old_termios) | ||
854 | { | ||
855 | struct zs_port *zport = to_zport(uport); | ||
856 | struct zs_scc *scc = zport->scc; | ||
857 | struct zs_port *zport_a = &scc->zport[ZS_CHAN_A]; | ||
858 | int irq; | ||
859 | unsigned int baud, brg; | ||
860 | unsigned long flags; | ||
861 | |||
862 | spin_lock_irqsave(&scc->zlock, flags); | ||
863 | irq = !irqs_disabled_flags(flags); | ||
864 | |||
865 | /* Byte size. */ | ||
866 | zport->regs[3] &= ~RxNBITS_MASK; | ||
867 | zport->regs[5] &= ~TxNBITS_MASK; | ||
868 | switch (termios->c_cflag & CSIZE) { | ||
869 | case CS5: | ||
870 | zport->regs[3] |= Rx5; | ||
871 | zport->regs[5] |= Tx5; | ||
872 | break; | ||
873 | case CS6: | ||
874 | zport->regs[3] |= Rx6; | ||
875 | zport->regs[5] |= Tx6; | ||
876 | break; | ||
877 | case CS7: | ||
878 | zport->regs[3] |= Rx7; | ||
879 | zport->regs[5] |= Tx7; | ||
880 | break; | ||
881 | case CS8: | ||
882 | default: | ||
883 | zport->regs[3] |= Rx8; | ||
884 | zport->regs[5] |= Tx8; | ||
885 | break; | ||
886 | } | ||
887 | |||
888 | /* Parity and stop bits. */ | ||
889 | zport->regs[4] &= ~(XCLK_MASK | SB_MASK | PAR_ENA | PAR_EVEN); | ||
890 | if (termios->c_cflag & CSTOPB) | ||
891 | zport->regs[4] |= SB2; | ||
892 | else | ||
893 | zport->regs[4] |= SB1; | ||
894 | if (termios->c_cflag & PARENB) | ||
895 | zport->regs[4] |= PAR_ENA; | ||
896 | if (!(termios->c_cflag & PARODD)) | ||
897 | zport->regs[4] |= PAR_EVEN; | ||
898 | switch (zport->clk_mode) { | ||
899 | case 64: | ||
900 | zport->regs[4] |= X64CLK; | ||
901 | break; | ||
902 | case 32: | ||
903 | zport->regs[4] |= X32CLK; | ||
904 | break; | ||
905 | case 16: | ||
906 | zport->regs[4] |= X16CLK; | ||
907 | break; | ||
908 | case 1: | ||
909 | zport->regs[4] |= X1CLK; | ||
910 | break; | ||
911 | default: | ||
912 | BUG(); | ||
913 | } | ||
914 | |||
915 | baud = uart_get_baud_rate(uport, termios, old_termios, 0, | ||
916 | uport->uartclk / zport->clk_mode / 4); | ||
917 | |||
918 | brg = ZS_BPS_TO_BRG(baud, uport->uartclk / zport->clk_mode); | ||
919 | zport->regs[12] = brg & 0xff; | ||
920 | zport->regs[13] = (brg >> 8) & 0xff; | ||
921 | |||
922 | uart_update_timeout(uport, termios->c_cflag, baud); | ||
923 | |||
924 | uport->read_status_mask = Rx_OVR; | ||
925 | if (termios->c_iflag & INPCK) | ||
926 | uport->read_status_mask |= FRM_ERR | PAR_ERR; | ||
927 | if (termios->c_iflag & (BRKINT | PARMRK)) | ||
928 | uport->read_status_mask |= Rx_BRK; | ||
929 | |||
930 | uport->ignore_status_mask = 0; | ||
931 | if (termios->c_iflag & IGNPAR) | ||
932 | uport->ignore_status_mask |= FRM_ERR | PAR_ERR; | ||
933 | if (termios->c_iflag & IGNBRK) { | ||
934 | uport->ignore_status_mask |= Rx_BRK; | ||
935 | if (termios->c_iflag & IGNPAR) | ||
936 | uport->ignore_status_mask |= Rx_OVR; | ||
937 | } | ||
938 | |||
939 | if (termios->c_cflag & CREAD) | ||
940 | zport->regs[3] |= RxENABLE; | ||
941 | else | ||
942 | zport->regs[3] &= ~RxENABLE; | ||
943 | |||
944 | if (zport != zport_a) { | ||
945 | if (!(termios->c_cflag & CLOCAL)) { | ||
946 | zport->regs[15] |= DCDIE; | ||
947 | } else | ||
948 | zport->regs[15] &= ~DCDIE; | ||
949 | if (termios->c_cflag & CRTSCTS) { | ||
950 | zport->regs[15] |= CTSIE; | ||
951 | } else | ||
952 | zport->regs[15] &= ~CTSIE; | ||
953 | zs_raw_xor_mctrl(zport); | ||
954 | } | ||
955 | |||
956 | /* Load up the new values. */ | ||
957 | load_zsregs(zport, zport->regs, irq); | ||
958 | |||
959 | spin_unlock_irqrestore(&scc->zlock, flags); | ||
960 | } | ||
961 | |||
962 | |||
963 | static const char *zs_type(struct uart_port *uport) | ||
964 | { | ||
965 | return "Z85C30 SCC"; | ||
966 | } | ||
967 | |||
968 | static void zs_release_port(struct uart_port *uport) | ||
969 | { | ||
970 | iounmap(uport->membase); | ||
971 | uport->membase = 0; | ||
972 | release_mem_region(uport->mapbase, ZS_CHAN_IO_SIZE); | ||
973 | } | ||
974 | |||
975 | static int zs_map_port(struct uart_port *uport) | ||
976 | { | ||
977 | if (!uport->membase) | ||
978 | uport->membase = ioremap_nocache(uport->mapbase, | ||
979 | ZS_CHAN_IO_SIZE); | ||
980 | if (!uport->membase) { | ||
981 | printk(KERN_ERR "zs: Cannot map MMIO\n"); | ||
982 | return -ENOMEM; | ||
983 | } | ||
984 | return 0; | ||
985 | } | ||
986 | |||
987 | static int zs_request_port(struct uart_port *uport) | ||
988 | { | ||
989 | int ret; | ||
990 | |||
991 | if (!request_mem_region(uport->mapbase, ZS_CHAN_IO_SIZE, "scc")) { | ||
992 | printk(KERN_ERR "zs: Unable to reserve MMIO resource\n"); | ||
993 | return -EBUSY; | ||
994 | } | ||
995 | ret = zs_map_port(uport); | ||
996 | if (ret) { | ||
997 | release_mem_region(uport->mapbase, ZS_CHAN_IO_SIZE); | ||
998 | return ret; | ||
999 | } | ||
1000 | return 0; | ||
1001 | } | ||
1002 | |||
1003 | static void zs_config_port(struct uart_port *uport, int flags) | ||
1004 | { | ||
1005 | struct zs_port *zport = to_zport(uport); | ||
1006 | |||
1007 | if (flags & UART_CONFIG_TYPE) { | ||
1008 | if (zs_request_port(uport)) | ||
1009 | return; | ||
1010 | |||
1011 | uport->type = PORT_ZS; | ||
1012 | |||
1013 | zs_reset(zport); | ||
1014 | } | ||
1015 | } | ||
1016 | |||
1017 | static int zs_verify_port(struct uart_port *uport, struct serial_struct *ser) | ||
1018 | { | ||
1019 | struct zs_port *zport = to_zport(uport); | ||
1020 | int ret = 0; | ||
1021 | |||
1022 | if (ser->type != PORT_UNKNOWN && ser->type != PORT_ZS) | ||
1023 | ret = -EINVAL; | ||
1024 | if (ser->irq != uport->irq) | ||
1025 | ret = -EINVAL; | ||
1026 | if (ser->baud_base != uport->uartclk / zport->clk_mode / 4) | ||
1027 | ret = -EINVAL; | ||
1028 | return ret; | ||
1029 | } | ||
1030 | |||
1031 | |||
1032 | static struct uart_ops zs_ops = { | ||
1033 | .tx_empty = zs_tx_empty, | ||
1034 | .set_mctrl = zs_set_mctrl, | ||
1035 | .get_mctrl = zs_get_mctrl, | ||
1036 | .stop_tx = zs_stop_tx, | ||
1037 | .start_tx = zs_start_tx, | ||
1038 | .stop_rx = zs_stop_rx, | ||
1039 | .enable_ms = zs_enable_ms, | ||
1040 | .break_ctl = zs_break_ctl, | ||
1041 | .startup = zs_startup, | ||
1042 | .shutdown = zs_shutdown, | ||
1043 | .set_termios = zs_set_termios, | ||
1044 | .type = zs_type, | ||
1045 | .release_port = zs_release_port, | ||
1046 | .request_port = zs_request_port, | ||
1047 | .config_port = zs_config_port, | ||
1048 | .verify_port = zs_verify_port, | ||
1049 | }; | ||
1050 | |||
1051 | /* | ||
1052 | * Initialize Z85C30 port structures. | ||
1053 | */ | ||
1054 | static int __init zs_probe_sccs(void) | ||
1055 | { | ||
1056 | static int probed; | ||
1057 | struct zs_parms zs_parms; | ||
1058 | int chip, side, irq; | ||
1059 | int n_chips = 0; | ||
1060 | int i; | ||
1061 | |||
1062 | if (probed) | ||
1063 | return 0; | ||
1064 | |||
1065 | irq = dec_interrupt[DEC_IRQ_SCC0]; | ||
1066 | if (irq >= 0) { | ||
1067 | zs_parms.scc[n_chips] = IOASIC_SCC0; | ||
1068 | zs_parms.irq[n_chips] = dec_interrupt[DEC_IRQ_SCC0]; | ||
1069 | n_chips++; | ||
1070 | } | ||
1071 | irq = dec_interrupt[DEC_IRQ_SCC1]; | ||
1072 | if (irq >= 0) { | ||
1073 | zs_parms.scc[n_chips] = IOASIC_SCC1; | ||
1074 | zs_parms.irq[n_chips] = dec_interrupt[DEC_IRQ_SCC1]; | ||
1075 | n_chips++; | ||
1076 | } | ||
1077 | if (!n_chips) | ||
1078 | return -ENXIO; | ||
1079 | |||
1080 | probed = 1; | ||
1081 | |||
1082 | for (chip = 0; chip < n_chips; chip++) { | ||
1083 | spin_lock_init(&zs_sccs[chip].zlock); | ||
1084 | for (side = 0; side < ZS_NUM_CHAN; side++) { | ||
1085 | struct zs_port *zport = &zs_sccs[chip].zport[side]; | ||
1086 | struct uart_port *uport = &zport->port; | ||
1087 | |||
1088 | zport->scc = &zs_sccs[chip]; | ||
1089 | zport->clk_mode = 16; | ||
1090 | |||
1091 | uport->irq = zs_parms.irq[chip]; | ||
1092 | uport->uartclk = ZS_CLOCK; | ||
1093 | uport->fifosize = 1; | ||
1094 | uport->iotype = UPIO_MEM; | ||
1095 | uport->flags = UPF_BOOT_AUTOCONF; | ||
1096 | uport->ops = &zs_ops; | ||
1097 | uport->line = chip * ZS_NUM_CHAN + side; | ||
1098 | uport->mapbase = dec_kn_slot_base + | ||
1099 | zs_parms.scc[chip] + | ||
1100 | (side ^ ZS_CHAN_B) * ZS_CHAN_IO_SIZE; | ||
1101 | |||
1102 | for (i = 0; i < ZS_NUM_REGS; i++) | ||
1103 | zport->regs[i] = zs_init_regs[i]; | ||
1104 | } | ||
1105 | } | ||
1106 | |||
1107 | return 0; | ||
1108 | } | ||
1109 | |||
1110 | |||
1111 | #ifdef CONFIG_SERIAL_ZS_CONSOLE | ||
1112 | static void zs_console_putchar(struct uart_port *uport, int ch) | ||
1113 | { | ||
1114 | struct zs_port *zport = to_zport(uport); | ||
1115 | struct zs_scc *scc = zport->scc; | ||
1116 | int irq; | ||
1117 | unsigned long flags; | ||
1118 | |||
1119 | spin_lock_irqsave(&scc->zlock, flags); | ||
1120 | irq = !irqs_disabled_flags(flags); | ||
1121 | if (zs_transmit_drain(zport, irq)) | ||
1122 | write_zsdata(zport, ch); | ||
1123 | spin_unlock_irqrestore(&scc->zlock, flags); | ||
1124 | } | ||
1125 | |||
1126 | /* | ||
1127 | * Print a string to the serial port trying not to disturb | ||
1128 | * any possible real use of the port... | ||
1129 | */ | ||
1130 | static void zs_console_write(struct console *co, const char *s, | ||
1131 | unsigned int count) | ||
1132 | { | ||
1133 | int chip = co->index / ZS_NUM_CHAN, side = co->index % ZS_NUM_CHAN; | ||
1134 | struct zs_port *zport = &zs_sccs[chip].zport[side]; | ||
1135 | struct zs_scc *scc = zport->scc; | ||
1136 | unsigned long flags; | ||
1137 | u8 txint, txenb; | ||
1138 | int irq; | ||
1139 | |||
1140 | /* Disable transmit interrupts and enable the transmitter. */ | ||
1141 | spin_lock_irqsave(&scc->zlock, flags); | ||
1142 | txint = zport->regs[1]; | ||
1143 | txenb = zport->regs[5]; | ||
1144 | if (txint & TxINT_ENAB) { | ||
1145 | zport->regs[1] = txint & ~TxINT_ENAB; | ||
1146 | write_zsreg(zport, R1, zport->regs[1]); | ||
1147 | } | ||
1148 | if (!(txenb & TxENAB)) { | ||
1149 | zport->regs[5] = txenb | TxENAB; | ||
1150 | write_zsreg(zport, R5, zport->regs[5]); | ||
1151 | } | ||
1152 | spin_unlock_irqrestore(&scc->zlock, flags); | ||
1153 | |||
1154 | uart_console_write(&zport->port, s, count, zs_console_putchar); | ||
1155 | |||
1156 | /* Restore transmit interrupts and the transmitter enable. */ | ||
1157 | spin_lock_irqsave(&scc->zlock, flags); | ||
1158 | irq = !irqs_disabled_flags(flags); | ||
1159 | zs_line_drain(zport, irq); | ||
1160 | if (!(txenb & TxENAB)) { | ||
1161 | zport->regs[5] &= ~TxENAB; | ||
1162 | write_zsreg(zport, R5, zport->regs[5]); | ||
1163 | } | ||
1164 | if (txint & TxINT_ENAB) { | ||
1165 | zport->regs[1] |= TxINT_ENAB; | ||
1166 | write_zsreg(zport, R1, zport->regs[1]); | ||
1167 | } | ||
1168 | spin_unlock_irqrestore(&scc->zlock, flags); | ||
1169 | } | ||
1170 | |||
1171 | /* | ||
1172 | * Setup serial console baud/bits/parity. We do two things here: | ||
1173 | * - construct a cflag setting for the first uart_open() | ||
1174 | * - initialise the serial port | ||
1175 | * Return non-zero if we didn't find a serial port. | ||
1176 | */ | ||
1177 | static int __init zs_console_setup(struct console *co, char *options) | ||
1178 | { | ||
1179 | int chip = co->index / ZS_NUM_CHAN, side = co->index % ZS_NUM_CHAN; | ||
1180 | struct zs_port *zport = &zs_sccs[chip].zport[side]; | ||
1181 | struct uart_port *uport = &zport->port; | ||
1182 | int baud = 9600; | ||
1183 | int bits = 8; | ||
1184 | int parity = 'n'; | ||
1185 | int flow = 'n'; | ||
1186 | int ret; | ||
1187 | |||
1188 | ret = zs_map_port(uport); | ||
1189 | if (ret) | ||
1190 | return ret; | ||
1191 | |||
1192 | zs_reset(zport); | ||
1193 | |||
1194 | if (options) | ||
1195 | uart_parse_options(options, &baud, &parity, &bits, &flow); | ||
1196 | return uart_set_options(uport, co, baud, parity, bits, flow); | ||
1197 | } | ||
1198 | |||
1199 | static struct uart_driver zs_reg; | ||
1200 | static struct console zs_console = { | ||
1201 | .name = "ttyS", | ||
1202 | .write = zs_console_write, | ||
1203 | .device = uart_console_device, | ||
1204 | .setup = zs_console_setup, | ||
1205 | .flags = CON_PRINTBUFFER, | ||
1206 | .index = -1, | ||
1207 | .data = &zs_reg, | ||
1208 | }; | ||
1209 | |||
1210 | /* | ||
1211 | * Register console. | ||
1212 | */ | ||
1213 | static int __init zs_serial_console_init(void) | ||
1214 | { | ||
1215 | int ret; | ||
1216 | |||
1217 | ret = zs_probe_sccs(); | ||
1218 | if (ret) | ||
1219 | return ret; | ||
1220 | register_console(&zs_console); | ||
1221 | |||
1222 | return 0; | ||
1223 | } | ||
1224 | |||
1225 | console_initcall(zs_serial_console_init); | ||
1226 | |||
1227 | #define SERIAL_ZS_CONSOLE &zs_console | ||
1228 | #else | ||
1229 | #define SERIAL_ZS_CONSOLE NULL | ||
1230 | #endif /* CONFIG_SERIAL_ZS_CONSOLE */ | ||
1231 | |||
1232 | static struct uart_driver zs_reg = { | ||
1233 | .owner = THIS_MODULE, | ||
1234 | .driver_name = "serial", | ||
1235 | .dev_name = "ttyS", | ||
1236 | .major = TTY_MAJOR, | ||
1237 | .minor = 64, | ||
1238 | .nr = ZS_NUM_SCCS * ZS_NUM_CHAN, | ||
1239 | .cons = SERIAL_ZS_CONSOLE, | ||
1240 | }; | ||
1241 | |||
1242 | /* zs_init inits the driver. */ | ||
1243 | static int __init zs_init(void) | ||
1244 | { | ||
1245 | int i, ret; | ||
1246 | |||
1247 | pr_info("%s%s\n", zs_name, zs_version); | ||
1248 | |||
1249 | /* Find out how many Z85C30 SCCs we have. */ | ||
1250 | ret = zs_probe_sccs(); | ||
1251 | if (ret) | ||
1252 | return ret; | ||
1253 | |||
1254 | ret = uart_register_driver(&zs_reg); | ||
1255 | if (ret) | ||
1256 | return ret; | ||
1257 | |||
1258 | for (i = 0; i < ZS_NUM_SCCS * ZS_NUM_CHAN; i++) { | ||
1259 | struct zs_scc *scc = &zs_sccs[i / ZS_NUM_CHAN]; | ||
1260 | struct zs_port *zport = &scc->zport[i % ZS_NUM_CHAN]; | ||
1261 | struct uart_port *uport = &zport->port; | ||
1262 | |||
1263 | if (zport->scc) | ||
1264 | uart_add_one_port(&zs_reg, uport); | ||
1265 | } | ||
1266 | |||
1267 | return 0; | ||
1268 | } | ||
1269 | |||
1270 | static void __exit zs_exit(void) | ||
1271 | { | ||
1272 | int i; | ||
1273 | |||
1274 | for (i = ZS_NUM_SCCS * ZS_NUM_CHAN - 1; i >= 0; i--) { | ||
1275 | struct zs_scc *scc = &zs_sccs[i / ZS_NUM_CHAN]; | ||
1276 | struct zs_port *zport = &scc->zport[i % ZS_NUM_CHAN]; | ||
1277 | struct uart_port *uport = &zport->port; | ||
1278 | |||
1279 | if (zport->scc) | ||
1280 | uart_remove_one_port(&zs_reg, uport); | ||
1281 | } | ||
1282 | |||
1283 | uart_unregister_driver(&zs_reg); | ||
1284 | } | ||
1285 | |||
1286 | module_init(zs_init); | ||
1287 | module_exit(zs_exit); | ||
diff --git a/drivers/serial/zs.h b/drivers/serial/zs.h new file mode 100644 index 000000000000..aa921b57d827 --- /dev/null +++ b/drivers/serial/zs.h | |||
@@ -0,0 +1,284 @@ | |||
1 | /* | ||
2 | * zs.h: Definitions for the DECstation Z85C30 serial driver. | ||
3 | * | ||
4 | * Adapted from drivers/sbus/char/sunserial.h by Paul Mackerras. | ||
5 | * Adapted from drivers/macintosh/macserial.h by Harald Koerfgen. | ||
6 | * | ||
7 | * Copyright (C) 1996 Paul Mackerras (Paul.Mackerras@cs.anu.edu.au) | ||
8 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | ||
9 | * Copyright (C) 2004, 2005, 2007 Maciej W. Rozycki | ||
10 | */ | ||
11 | #ifndef _SERIAL_ZS_H | ||
12 | #define _SERIAL_ZS_H | ||
13 | |||
14 | #ifdef __KERNEL__ | ||
15 | |||
16 | #define ZS_NUM_REGS 16 | ||
17 | |||
18 | /* | ||
19 | * This is our internal structure for each serial port's state. | ||
20 | */ | ||
21 | struct zs_port { | ||
22 | struct zs_scc *scc; /* Containing SCC. */ | ||
23 | struct uart_port port; /* Underlying UART. */ | ||
24 | |||
25 | int clk_mode; /* May be 1, 16, 32, or 64. */ | ||
26 | |||
27 | unsigned int tty_break; /* Set on BREAK condition. */ | ||
28 | int tx_stopped; /* Output is suspended. */ | ||
29 | |||
30 | unsigned int mctrl; /* State of modem lines. */ | ||
31 | u8 brk; /* BREAK state from RR0. */ | ||
32 | |||
33 | u8 regs[ZS_NUM_REGS]; /* Channel write registers. */ | ||
34 | }; | ||
35 | |||
36 | /* | ||
37 | * Per-SCC state for locking and the interrupt handler. | ||
38 | */ | ||
39 | struct zs_scc { | ||
40 | struct zs_port zport[2]; | ||
41 | spinlock_t zlock; | ||
42 | atomic_t irq_guard; | ||
43 | int initialised; | ||
44 | }; | ||
45 | |||
46 | #endif /* __KERNEL__ */ | ||
47 | |||
48 | /* | ||
49 | * Conversion routines to/from brg time constants from/to bits per second. | ||
50 | */ | ||
51 | #define ZS_BRG_TO_BPS(brg, freq) ((freq) / 2 / ((brg) + 2)) | ||
52 | #define ZS_BPS_TO_BRG(bps, freq) ((((freq) + (bps)) / (2 * (bps))) - 2) | ||
53 | |||
54 | /* | ||
55 | * The Zilog register set. | ||
56 | */ | ||
57 | |||
58 | /* Write Register 0 (Command) */ | ||
59 | #define R0 0 /* Register selects */ | ||
60 | #define R1 1 | ||
61 | #define R2 2 | ||
62 | #define R3 3 | ||
63 | #define R4 4 | ||
64 | #define R5 5 | ||
65 | #define R6 6 | ||
66 | #define R7 7 | ||
67 | #define R8 8 | ||
68 | #define R9 9 | ||
69 | #define R10 10 | ||
70 | #define R11 11 | ||
71 | #define R12 12 | ||
72 | #define R13 13 | ||
73 | #define R14 14 | ||
74 | #define R15 15 | ||
75 | |||
76 | #define NULLCODE 0 /* Null Code */ | ||
77 | #define POINT_HIGH 0x8 /* Select upper half of registers */ | ||
78 | #define RES_EXT_INT 0x10 /* Reset Ext. Status Interrupts */ | ||
79 | #define SEND_ABORT 0x18 /* HDLC Abort */ | ||
80 | #define RES_RxINT_FC 0x20 /* Reset RxINT on First Character */ | ||
81 | #define RES_Tx_P 0x28 /* Reset TxINT Pending */ | ||
82 | #define ERR_RES 0x30 /* Error Reset */ | ||
83 | #define RES_H_IUS 0x38 /* Reset highest IUS */ | ||
84 | |||
85 | #define RES_Rx_CRC 0x40 /* Reset Rx CRC Checker */ | ||
86 | #define RES_Tx_CRC 0x80 /* Reset Tx CRC Checker */ | ||
87 | #define RES_EOM_L 0xC0 /* Reset EOM latch */ | ||
88 | |||
89 | /* Write Register 1 (Tx/Rx/Ext Int Enable and WAIT/DMA Commands) */ | ||
90 | #define EXT_INT_ENAB 0x1 /* Ext Int Enable */ | ||
91 | #define TxINT_ENAB 0x2 /* Tx Int Enable */ | ||
92 | #define PAR_SPEC 0x4 /* Parity is special condition */ | ||
93 | |||
94 | #define RxINT_DISAB 0 /* Rx Int Disable */ | ||
95 | #define RxINT_FCERR 0x8 /* Rx Int on First Character Only or Error */ | ||
96 | #define RxINT_ALL 0x10 /* Int on all Rx Characters or error */ | ||
97 | #define RxINT_ERR 0x18 /* Int on error only */ | ||
98 | #define RxINT_MASK 0x18 | ||
99 | |||
100 | #define WT_RDY_RT 0x20 /* Wait/Ready on R/T */ | ||
101 | #define WT_FN_RDYFN 0x40 /* Wait/FN/Ready FN */ | ||
102 | #define WT_RDY_ENAB 0x80 /* Wait/Ready Enable */ | ||
103 | |||
104 | /* Write Register 2 (Interrupt Vector) */ | ||
105 | |||
106 | /* Write Register 3 (Receive Parameters and Control) */ | ||
107 | #define RxENABLE 0x1 /* Rx Enable */ | ||
108 | #define SYNC_L_INH 0x2 /* Sync Character Load Inhibit */ | ||
109 | #define ADD_SM 0x4 /* Address Search Mode (SDLC) */ | ||
110 | #define RxCRC_ENAB 0x8 /* Rx CRC Enable */ | ||
111 | #define ENT_HM 0x10 /* Enter Hunt Mode */ | ||
112 | #define AUTO_ENAB 0x20 /* Auto Enables */ | ||
113 | #define Rx5 0x0 /* Rx 5 Bits/Character */ | ||
114 | #define Rx7 0x40 /* Rx 7 Bits/Character */ | ||
115 | #define Rx6 0x80 /* Rx 6 Bits/Character */ | ||
116 | #define Rx8 0xc0 /* Rx 8 Bits/Character */ | ||
117 | #define RxNBITS_MASK 0xc0 | ||
118 | |||
119 | /* Write Register 4 (Transmit/Receive Miscellaneous Parameters and Modes) */ | ||
120 | #define PAR_ENA 0x1 /* Parity Enable */ | ||
121 | #define PAR_EVEN 0x2 /* Parity Even/Odd* */ | ||
122 | |||
123 | #define SYNC_ENAB 0 /* Sync Modes Enable */ | ||
124 | #define SB1 0x4 /* 1 stop bit/char */ | ||
125 | #define SB15 0x8 /* 1.5 stop bits/char */ | ||
126 | #define SB2 0xc /* 2 stop bits/char */ | ||
127 | #define SB_MASK 0xc | ||
128 | |||
129 | #define MONSYNC 0 /* 8 Bit Sync character */ | ||
130 | #define BISYNC 0x10 /* 16 bit sync character */ | ||
131 | #define SDLC 0x20 /* SDLC Mode (01111110 Sync Flag) */ | ||
132 | #define EXTSYNC 0x30 /* External Sync Mode */ | ||
133 | |||
134 | #define X1CLK 0x0 /* x1 clock mode */ | ||
135 | #define X16CLK 0x40 /* x16 clock mode */ | ||
136 | #define X32CLK 0x80 /* x32 clock mode */ | ||
137 | #define X64CLK 0xc0 /* x64 clock mode */ | ||
138 | #define XCLK_MASK 0xc0 | ||
139 | |||
140 | /* Write Register 5 (Transmit Parameters and Controls) */ | ||
141 | #define TxCRC_ENAB 0x1 /* Tx CRC Enable */ | ||
142 | #define RTS 0x2 /* RTS */ | ||
143 | #define SDLC_CRC 0x4 /* SDLC/CRC-16 */ | ||
144 | #define TxENAB 0x8 /* Tx Enable */ | ||
145 | #define SND_BRK 0x10 /* Send Break */ | ||
146 | #define Tx5 0x0 /* Tx 5 bits (or less)/character */ | ||
147 | #define Tx7 0x20 /* Tx 7 bits/character */ | ||
148 | #define Tx6 0x40 /* Tx 6 bits/character */ | ||
149 | #define Tx8 0x60 /* Tx 8 bits/character */ | ||
150 | #define TxNBITS_MASK 0x60 | ||
151 | #define DTR 0x80 /* DTR */ | ||
152 | |||
153 | /* Write Register 6 (Sync bits 0-7/SDLC Address Field) */ | ||
154 | |||
155 | /* Write Register 7 (Sync bits 8-15/SDLC 01111110) */ | ||
156 | |||
157 | /* Write Register 8 (Transmit Buffer) */ | ||
158 | |||
159 | /* Write Register 9 (Master Interrupt Control) */ | ||
160 | #define VIS 1 /* Vector Includes Status */ | ||
161 | #define NV 2 /* No Vector */ | ||
162 | #define DLC 4 /* Disable Lower Chain */ | ||
163 | #define MIE 8 /* Master Interrupt Enable */ | ||
164 | #define STATHI 0x10 /* Status high */ | ||
165 | #define SOFTACK 0x20 /* Software Interrupt Acknowledge */ | ||
166 | #define NORESET 0 /* No reset on write to R9 */ | ||
167 | #define CHRB 0x40 /* Reset channel B */ | ||
168 | #define CHRA 0x80 /* Reset channel A */ | ||
169 | #define FHWRES 0xc0 /* Force hardware reset */ | ||
170 | |||
171 | /* Write Register 10 (Miscellaneous Transmitter/Receiver Control Bits) */ | ||
172 | #define BIT6 1 /* 6 bit/8bit sync */ | ||
173 | #define LOOPMODE 2 /* SDLC Loop mode */ | ||
174 | #define ABUNDER 4 /* Abort/flag on SDLC xmit underrun */ | ||
175 | #define MARKIDLE 8 /* Mark/flag on idle */ | ||
176 | #define GAOP 0x10 /* Go active on poll */ | ||
177 | #define NRZ 0 /* NRZ mode */ | ||
178 | #define NRZI 0x20 /* NRZI mode */ | ||
179 | #define FM1 0x40 /* FM1 (transition = 1) */ | ||
180 | #define FM0 0x60 /* FM0 (transition = 0) */ | ||
181 | #define CRCPS 0x80 /* CRC Preset I/O */ | ||
182 | |||
183 | /* Write Register 11 (Clock Mode Control) */ | ||
184 | #define TRxCXT 0 /* TRxC = Xtal output */ | ||
185 | #define TRxCTC 1 /* TRxC = Transmit clock */ | ||
186 | #define TRxCBR 2 /* TRxC = BR Generator Output */ | ||
187 | #define TRxCDP 3 /* TRxC = DPLL output */ | ||
188 | #define TRxCOI 4 /* TRxC O/I */ | ||
189 | #define TCRTxCP 0 /* Transmit clock = RTxC pin */ | ||
190 | #define TCTRxCP 8 /* Transmit clock = TRxC pin */ | ||
191 | #define TCBR 0x10 /* Transmit clock = BR Generator output */ | ||
192 | #define TCDPLL 0x18 /* Transmit clock = DPLL output */ | ||
193 | #define RCRTxCP 0 /* Receive clock = RTxC pin */ | ||
194 | #define RCTRxCP 0x20 /* Receive clock = TRxC pin */ | ||
195 | #define RCBR 0x40 /* Receive clock = BR Generator output */ | ||
196 | #define RCDPLL 0x60 /* Receive clock = DPLL output */ | ||
197 | #define RTxCX 0x80 /* RTxC Xtal/No Xtal */ | ||
198 | |||
199 | /* Write Register 12 (Lower Byte of Baud Rate Generator Time Constant) */ | ||
200 | |||
201 | /* Write Register 13 (Upper Byte of Baud Rate Generator Time Constant) */ | ||
202 | |||
203 | /* Write Register 14 (Miscellaneous Control Bits) */ | ||
204 | #define BRENABL 1 /* Baud rate generator enable */ | ||
205 | #define BRSRC 2 /* Baud rate generator source */ | ||
206 | #define DTRREQ 4 /* DTR/Request function */ | ||
207 | #define AUTOECHO 8 /* Auto Echo */ | ||
208 | #define LOOPBAK 0x10 /* Local loopback */ | ||
209 | #define SEARCH 0x20 /* Enter search mode */ | ||
210 | #define RMC 0x40 /* Reset missing clock */ | ||
211 | #define DISDPLL 0x60 /* Disable DPLL */ | ||
212 | #define SSBR 0x80 /* Set DPLL source = BR generator */ | ||
213 | #define SSRTxC 0xa0 /* Set DPLL source = RTxC */ | ||
214 | #define SFMM 0xc0 /* Set FM mode */ | ||
215 | #define SNRZI 0xe0 /* Set NRZI mode */ | ||
216 | |||
217 | /* Write Register 15 (External/Status Interrupt Control) */ | ||
218 | #define WR7P_EN 1 /* WR7 Prime SDLC Feature Enable */ | ||
219 | #define ZCIE 2 /* Zero count IE */ | ||
220 | #define DCDIE 8 /* DCD IE */ | ||
221 | #define SYNCIE 0x10 /* Sync/hunt IE */ | ||
222 | #define CTSIE 0x20 /* CTS IE */ | ||
223 | #define TxUIE 0x40 /* Tx Underrun/EOM IE */ | ||
224 | #define BRKIE 0x80 /* Break/Abort IE */ | ||
225 | |||
226 | |||
227 | /* Read Register 0 (Transmit/Receive Buffer Status and External Status) */ | ||
228 | #define Rx_CH_AV 0x1 /* Rx Character Available */ | ||
229 | #define ZCOUNT 0x2 /* Zero count */ | ||
230 | #define Tx_BUF_EMP 0x4 /* Tx Buffer empty */ | ||
231 | #define DCD 0x8 /* DCD */ | ||
232 | #define SYNC_HUNT 0x10 /* Sync/hunt */ | ||
233 | #define CTS 0x20 /* CTS */ | ||
234 | #define TxEOM 0x40 /* Tx underrun */ | ||
235 | #define BRK_ABRT 0x80 /* Break/Abort */ | ||
236 | |||
237 | /* Read Register 1 (Special Receive Condition Status) */ | ||
238 | #define ALL_SNT 0x1 /* All sent */ | ||
239 | /* Residue Data for 8 Rx bits/char programmed */ | ||
240 | #define RES3 0x8 /* 0/3 */ | ||
241 | #define RES4 0x4 /* 0/4 */ | ||
242 | #define RES5 0xc /* 0/5 */ | ||
243 | #define RES6 0x2 /* 0/6 */ | ||
244 | #define RES7 0xa /* 0/7 */ | ||
245 | #define RES8 0x6 /* 0/8 */ | ||
246 | #define RES18 0xe /* 1/8 */ | ||
247 | #define RES28 0x0 /* 2/8 */ | ||
248 | /* Special Rx Condition Interrupts */ | ||
249 | #define PAR_ERR 0x10 /* Parity Error */ | ||
250 | #define Rx_OVR 0x20 /* Rx Overrun Error */ | ||
251 | #define FRM_ERR 0x40 /* CRC/Framing Error */ | ||
252 | #define END_FR 0x80 /* End of Frame (SDLC) */ | ||
253 | |||
254 | /* Read Register 2 (Interrupt Vector (WR2) -- channel A). */ | ||
255 | |||
256 | /* Read Register 2 (Modified Interrupt Vector -- channel B). */ | ||
257 | |||
258 | /* Read Register 3 (Interrupt Pending Bits -- channel A only). */ | ||
259 | #define CHBEXT 0x1 /* Channel B Ext/Stat IP */ | ||
260 | #define CHBTxIP 0x2 /* Channel B Tx IP */ | ||
261 | #define CHBRxIP 0x4 /* Channel B Rx IP */ | ||
262 | #define CHAEXT 0x8 /* Channel A Ext/Stat IP */ | ||
263 | #define CHATxIP 0x10 /* Channel A Tx IP */ | ||
264 | #define CHARxIP 0x20 /* Channel A Rx IP */ | ||
265 | |||
266 | /* Read Register 6 (SDLC FIFO Status and Byte Count LSB) */ | ||
267 | |||
268 | /* Read Register 7 (SDLC FIFO Status and Byte Count MSB) */ | ||
269 | |||
270 | /* Read Register 8 (Receive Data) */ | ||
271 | |||
272 | /* Read Register 10 (Miscellaneous Status Bits) */ | ||
273 | #define ONLOOP 2 /* On loop */ | ||
274 | #define LOOPSEND 0x10 /* Loop sending */ | ||
275 | #define CLK2MIS 0x40 /* Two clocks missing */ | ||
276 | #define CLK1MIS 0x80 /* One clock missing */ | ||
277 | |||
278 | /* Read Register 12 (Lower Byte of Baud Rate Generator Constant (WR12)) */ | ||
279 | |||
280 | /* Read Register 13 (Upper Byte of Baud Rate Generator Constant (WR13) */ | ||
281 | |||
282 | /* Read Register 15 (External/Status Interrupt Control (WR15)) */ | ||
283 | |||
284 | #endif /* _SERIAL_ZS_H */ | ||
diff --git a/drivers/tc/Makefile b/drivers/tc/Makefile index 967342692211..c899246bd362 100644 --- a/drivers/tc/Makefile +++ b/drivers/tc/Makefile | |||
@@ -5,7 +5,6 @@ | |||
5 | # Object file lists. | 5 | # Object file lists. |
6 | 6 | ||
7 | obj-$(CONFIG_TC) += tc.o tc-driver.o | 7 | obj-$(CONFIG_TC) += tc.o tc-driver.o |
8 | obj-$(CONFIG_ZS) += zs.o | ||
9 | obj-$(CONFIG_VT) += lk201.o lk201-map.o lk201-remap.o | 8 | obj-$(CONFIG_VT) += lk201.o lk201-map.o lk201-remap.o |
10 | 9 | ||
11 | $(obj)/lk201-map.o: $(obj)/lk201-map.c | 10 | $(obj)/lk201-map.o: $(obj)/lk201-map.c |
diff --git a/drivers/tc/zs.c b/drivers/tc/zs.c deleted file mode 100644 index ed979f13908a..000000000000 --- a/drivers/tc/zs.c +++ /dev/null | |||
@@ -1,2203 +0,0 @@ | |||
1 | /* | ||
2 | * decserial.c: Serial port driver for IOASIC DECstations. | ||
3 | * | ||
4 | * Derived from drivers/sbus/char/sunserial.c by Paul Mackerras. | ||
5 | * Derived from drivers/macintosh/macserial.c by Harald Koerfgen. | ||
6 | * | ||
7 | * DECstation changes | ||
8 | * Copyright (C) 1998-2000 Harald Koerfgen | ||
9 | * Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005 Maciej W. Rozycki | ||
10 | * | ||
11 | * For the rest of the code the original Copyright applies: | ||
12 | * Copyright (C) 1996 Paul Mackerras (Paul.Mackerras@cs.anu.edu.au) | ||
13 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | ||
14 | * | ||
15 | * | ||
16 | * Note: for IOASIC systems the wiring is as follows: | ||
17 | * | ||
18 | * mouse/keyboard: | ||
19 | * DIN-7 MJ-4 signal SCC | ||
20 | * 2 1 TxD <- A.TxD | ||
21 | * 3 4 RxD -> A.RxD | ||
22 | * | ||
23 | * EIA-232/EIA-423: | ||
24 | * DB-25 MMJ-6 signal SCC | ||
25 | * 2 2 TxD <- B.TxD | ||
26 | * 3 5 RxD -> B.RxD | ||
27 | * 4 RTS <- ~A.RTS | ||
28 | * 5 CTS -> ~B.CTS | ||
29 | * 6 6 DSR -> ~A.SYNC | ||
30 | * 8 CD -> ~B.DCD | ||
31 | * 12 DSRS(DCE) -> ~A.CTS (*) | ||
32 | * 15 TxC -> B.TxC | ||
33 | * 17 RxC -> B.RxC | ||
34 | * 20 1 DTR <- ~A.DTR | ||
35 | * 22 RI -> ~A.DCD | ||
36 | * 23 DSRS(DTE) <- ~B.RTS | ||
37 | * | ||
38 | * (*) EIA-232 defines the signal at this pin to be SCD, while DSRS(DCE) | ||
39 | * is shared with DSRS(DTE) at pin 23. | ||
40 | */ | ||
41 | |||
42 | #include <linux/errno.h> | ||
43 | #include <linux/signal.h> | ||
44 | #include <linux/sched.h> | ||
45 | #include <linux/timer.h> | ||
46 | #include <linux/interrupt.h> | ||
47 | #include <linux/tty.h> | ||
48 | #include <linux/tty_flip.h> | ||
49 | #include <linux/major.h> | ||
50 | #include <linux/string.h> | ||
51 | #include <linux/fcntl.h> | ||
52 | #include <linux/mm.h> | ||
53 | #include <linux/kernel.h> | ||
54 | #include <linux/delay.h> | ||
55 | #include <linux/init.h> | ||
56 | #include <linux/ioport.h> | ||
57 | #include <linux/spinlock.h> | ||
58 | #ifdef CONFIG_SERIAL_DEC_CONSOLE | ||
59 | #include <linux/console.h> | ||
60 | #endif | ||
61 | |||
62 | #include <asm/io.h> | ||
63 | #include <asm/pgtable.h> | ||
64 | #include <asm/irq.h> | ||
65 | #include <asm/system.h> | ||
66 | #include <asm/bootinfo.h> | ||
67 | |||
68 | #include <asm/dec/interrupts.h> | ||
69 | #include <asm/dec/ioasic_addrs.h> | ||
70 | #include <asm/dec/machtype.h> | ||
71 | #include <asm/dec/serial.h> | ||
72 | #include <asm/dec/system.h> | ||
73 | |||
74 | #ifdef CONFIG_KGDB | ||
75 | #include <asm/kgdb.h> | ||
76 | #endif | ||
77 | #ifdef CONFIG_MAGIC_SYSRQ | ||
78 | #include <linux/sysrq.h> | ||
79 | #endif | ||
80 | |||
81 | #include "zs.h" | ||
82 | |||
83 | /* | ||
84 | * It would be nice to dynamically allocate everything that | ||
85 | * depends on NUM_SERIAL, so we could support any number of | ||
86 | * Z8530s, but for now... | ||
87 | */ | ||
88 | #define NUM_SERIAL 2 /* Max number of ZS chips supported */ | ||
89 | #define NUM_CHANNELS (NUM_SERIAL * 2) /* 2 channels per chip */ | ||
90 | #define CHANNEL_A_NR (zs_parms->channel_a_offset > zs_parms->channel_b_offset) | ||
91 | /* Number of channel A in the chip */ | ||
92 | #define ZS_CHAN_IO_SIZE 8 | ||
93 | #define ZS_CLOCK 7372800 /* Z8530 RTxC input clock rate */ | ||
94 | |||
95 | #define RECOVERY_DELAY udelay(2) | ||
96 | |||
97 | struct zs_parms { | ||
98 | unsigned long scc0; | ||
99 | unsigned long scc1; | ||
100 | int channel_a_offset; | ||
101 | int channel_b_offset; | ||
102 | int irq0; | ||
103 | int irq1; | ||
104 | int clock; | ||
105 | }; | ||
106 | |||
107 | static struct zs_parms *zs_parms; | ||
108 | |||
109 | #ifdef CONFIG_MACH_DECSTATION | ||
110 | static struct zs_parms ds_parms = { | ||
111 | scc0 : IOASIC_SCC0, | ||
112 | scc1 : IOASIC_SCC1, | ||
113 | channel_a_offset : 1, | ||
114 | channel_b_offset : 9, | ||
115 | irq0 : -1, | ||
116 | irq1 : -1, | ||
117 | clock : ZS_CLOCK | ||
118 | }; | ||
119 | #endif | ||
120 | |||
121 | #ifdef CONFIG_MACH_DECSTATION | ||
122 | #define DS_BUS_PRESENT (IOASIC) | ||
123 | #else | ||
124 | #define DS_BUS_PRESENT 0 | ||
125 | #endif | ||
126 | |||
127 | #define BUS_PRESENT (DS_BUS_PRESENT) | ||
128 | |||
129 | DEFINE_SPINLOCK(zs_lock); | ||
130 | |||
131 | struct dec_zschannel zs_channels[NUM_CHANNELS]; | ||
132 | struct dec_serial zs_soft[NUM_CHANNELS]; | ||
133 | int zs_channels_found; | ||
134 | struct dec_serial *zs_chain; /* list of all channels */ | ||
135 | |||
136 | struct tty_struct zs_ttys[NUM_CHANNELS]; | ||
137 | |||
138 | #ifdef CONFIG_SERIAL_DEC_CONSOLE | ||
139 | static struct console zs_console; | ||
140 | #endif | ||
141 | #if defined(CONFIG_SERIAL_DEC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) && \ | ||
142 | !defined(MODULE) | ||
143 | static unsigned long break_pressed; /* break, really ... */ | ||
144 | #endif | ||
145 | |||
146 | static unsigned char zs_init_regs[16] __initdata = { | ||
147 | 0, /* write 0 */ | ||
148 | 0, /* write 1 */ | ||
149 | 0, /* write 2 */ | ||
150 | 0, /* write 3 */ | ||
151 | (X16CLK), /* write 4 */ | ||
152 | 0, /* write 5 */ | ||
153 | 0, 0, 0, /* write 6, 7, 8 */ | ||
154 | (MIE | DLC | NV), /* write 9 */ | ||
155 | (NRZ), /* write 10 */ | ||
156 | (TCBR | RCBR), /* write 11 */ | ||
157 | 0, 0, /* BRG time constant, write 12 + 13 */ | ||
158 | (BRSRC | BRENABL), /* write 14 */ | ||
159 | 0 /* write 15 */ | ||
160 | }; | ||
161 | |||
162 | static struct tty_driver *serial_driver; | ||
163 | |||
164 | /* serial subtype definitions */ | ||
165 | #define SERIAL_TYPE_NORMAL 1 | ||
166 | |||
167 | /* number of characters left in xmit buffer before we ask for more */ | ||
168 | #define WAKEUP_CHARS 256 | ||
169 | |||
170 | /* | ||
171 | * Debugging. | ||
172 | */ | ||
173 | #undef SERIAL_DEBUG_OPEN | ||
174 | #undef SERIAL_DEBUG_FLOW | ||
175 | #undef SERIAL_DEBUG_THROTTLE | ||
176 | #undef SERIAL_PARANOIA_CHECK | ||
177 | |||
178 | #undef ZS_DEBUG_REGS | ||
179 | |||
180 | #ifdef SERIAL_DEBUG_THROTTLE | ||
181 | #define _tty_name(tty,buf) tty_name(tty,buf) | ||
182 | #endif | ||
183 | |||
184 | #define RS_STROBE_TIME 10 | ||
185 | #define RS_ISR_PASS_LIMIT 256 | ||
186 | |||
187 | static void probe_sccs(void); | ||
188 | static void change_speed(struct dec_serial *info); | ||
189 | static void rs_wait_until_sent(struct tty_struct *tty, int timeout); | ||
190 | |||
191 | static inline int serial_paranoia_check(struct dec_serial *info, | ||
192 | char *name, const char *routine) | ||
193 | { | ||
194 | #ifdef SERIAL_PARANOIA_CHECK | ||
195 | static const char *badmagic = | ||
196 | "Warning: bad magic number for serial struct %s in %s\n"; | ||
197 | static const char *badinfo = | ||
198 | "Warning: null mac_serial for %s in %s\n"; | ||
199 | |||
200 | if (!info) { | ||
201 | printk(badinfo, name, routine); | ||
202 | return 1; | ||
203 | } | ||
204 | if (info->magic != SERIAL_MAGIC) { | ||
205 | printk(badmagic, name, routine); | ||
206 | return 1; | ||
207 | } | ||
208 | #endif | ||
209 | return 0; | ||
210 | } | ||
211 | |||
212 | /* | ||
213 | * This is used to figure out the divisor speeds and the timeouts | ||
214 | */ | ||
215 | static int baud_table[] = { | ||
216 | 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800, | ||
217 | 9600, 19200, 38400, 57600, 115200, 0 }; | ||
218 | |||
219 | /* | ||
220 | * Reading and writing Z8530 registers. | ||
221 | */ | ||
222 | static inline unsigned char read_zsreg(struct dec_zschannel *channel, | ||
223 | unsigned char reg) | ||
224 | { | ||
225 | unsigned char retval; | ||
226 | |||
227 | if (reg != 0) { | ||
228 | *channel->control = reg & 0xf; | ||
229 | fast_iob(); RECOVERY_DELAY; | ||
230 | } | ||
231 | retval = *channel->control; | ||
232 | RECOVERY_DELAY; | ||
233 | return retval; | ||
234 | } | ||
235 | |||
236 | static inline void write_zsreg(struct dec_zschannel *channel, | ||
237 | unsigned char reg, unsigned char value) | ||
238 | { | ||
239 | if (reg != 0) { | ||
240 | *channel->control = reg & 0xf; | ||
241 | fast_iob(); RECOVERY_DELAY; | ||
242 | } | ||
243 | *channel->control = value; | ||
244 | fast_iob(); RECOVERY_DELAY; | ||
245 | return; | ||
246 | } | ||
247 | |||
248 | static inline unsigned char read_zsdata(struct dec_zschannel *channel) | ||
249 | { | ||
250 | unsigned char retval; | ||
251 | |||
252 | retval = *channel->data; | ||
253 | RECOVERY_DELAY; | ||
254 | return retval; | ||
255 | } | ||
256 | |||
257 | static inline void write_zsdata(struct dec_zschannel *channel, | ||
258 | unsigned char value) | ||
259 | { | ||
260 | *channel->data = value; | ||
261 | fast_iob(); RECOVERY_DELAY; | ||
262 | return; | ||
263 | } | ||
264 | |||
265 | static inline void load_zsregs(struct dec_zschannel *channel, | ||
266 | unsigned char *regs) | ||
267 | { | ||
268 | /* ZS_CLEARERR(channel); | ||
269 | ZS_CLEARFIFO(channel); */ | ||
270 | /* Load 'em up */ | ||
271 | write_zsreg(channel, R3, regs[R3] & ~RxENABLE); | ||
272 | write_zsreg(channel, R5, regs[R5] & ~TxENAB); | ||
273 | write_zsreg(channel, R4, regs[R4]); | ||
274 | write_zsreg(channel, R9, regs[R9]); | ||
275 | write_zsreg(channel, R1, regs[R1]); | ||
276 | write_zsreg(channel, R2, regs[R2]); | ||
277 | write_zsreg(channel, R10, regs[R10]); | ||
278 | write_zsreg(channel, R11, regs[R11]); | ||
279 | write_zsreg(channel, R12, regs[R12]); | ||
280 | write_zsreg(channel, R13, regs[R13]); | ||
281 | write_zsreg(channel, R14, regs[R14]); | ||
282 | write_zsreg(channel, R15, regs[R15]); | ||
283 | write_zsreg(channel, R3, regs[R3]); | ||
284 | write_zsreg(channel, R5, regs[R5]); | ||
285 | return; | ||
286 | } | ||
287 | |||
288 | /* Sets or clears DTR/RTS on the requested line */ | ||
289 | static inline void zs_rtsdtr(struct dec_serial *info, int which, int set) | ||
290 | { | ||
291 | unsigned long flags; | ||
292 | |||
293 | spin_lock_irqsave(&zs_lock, flags); | ||
294 | if (info->zs_channel != info->zs_chan_a) { | ||
295 | if (set) { | ||
296 | info->zs_chan_a->curregs[5] |= (which & (RTS | DTR)); | ||
297 | } else { | ||
298 | info->zs_chan_a->curregs[5] &= ~(which & (RTS | DTR)); | ||
299 | } | ||
300 | write_zsreg(info->zs_chan_a, 5, info->zs_chan_a->curregs[5]); | ||
301 | } | ||
302 | spin_unlock_irqrestore(&zs_lock, flags); | ||
303 | } | ||
304 | |||
305 | /* Utility routines for the Zilog */ | ||
306 | static inline int get_zsbaud(struct dec_serial *ss) | ||
307 | { | ||
308 | struct dec_zschannel *channel = ss->zs_channel; | ||
309 | int brg; | ||
310 | |||
311 | /* The baud rate is split up between two 8-bit registers in | ||
312 | * what is termed 'BRG time constant' format in my docs for | ||
313 | * the chip, it is a function of the clk rate the chip is | ||
314 | * receiving which happens to be constant. | ||
315 | */ | ||
316 | brg = (read_zsreg(channel, 13) << 8); | ||
317 | brg |= read_zsreg(channel, 12); | ||
318 | return BRG_TO_BPS(brg, (zs_parms->clock/(ss->clk_divisor))); | ||
319 | } | ||
320 | |||
321 | /* On receive, this clears errors and the receiver interrupts */ | ||
322 | static inline void rs_recv_clear(struct dec_zschannel *zsc) | ||
323 | { | ||
324 | write_zsreg(zsc, 0, ERR_RES); | ||
325 | write_zsreg(zsc, 0, RES_H_IUS); /* XXX this is unnecessary */ | ||
326 | } | ||
327 | |||
328 | /* | ||
329 | * ---------------------------------------------------------------------- | ||
330 | * | ||
331 | * Here starts the interrupt handling routines. All of the following | ||
332 | * subroutines are declared as inline and are folded into | ||
333 | * rs_interrupt(). They were separated out for readability's sake. | ||
334 | * | ||
335 | * - Ted Ts'o (tytso@mit.edu), 7-Mar-93 | ||
336 | * ----------------------------------------------------------------------- | ||
337 | */ | ||
338 | |||
339 | /* | ||
340 | * This routine is used by the interrupt handler to schedule | ||
341 | * processing in the software interrupt portion of the driver. | ||
342 | */ | ||
343 | static void rs_sched_event(struct dec_serial *info, int event) | ||
344 | { | ||
345 | info->event |= 1 << event; | ||
346 | tasklet_schedule(&info->tlet); | ||
347 | } | ||
348 | |||
349 | static void receive_chars(struct dec_serial *info) | ||
350 | { | ||
351 | struct tty_struct *tty = info->tty; | ||
352 | unsigned char ch, stat, flag; | ||
353 | |||
354 | while ((read_zsreg(info->zs_channel, R0) & Rx_CH_AV) != 0) { | ||
355 | |||
356 | stat = read_zsreg(info->zs_channel, R1); | ||
357 | ch = read_zsdata(info->zs_channel); | ||
358 | |||
359 | if (!tty && (!info->hook || !info->hook->rx_char)) | ||
360 | continue; | ||
361 | |||
362 | flag = TTY_NORMAL; | ||
363 | if (info->tty_break) { | ||
364 | info->tty_break = 0; | ||
365 | flag = TTY_BREAK; | ||
366 | if (info->flags & ZILOG_SAK) | ||
367 | do_SAK(tty); | ||
368 | /* Ignore the null char got when BREAK is removed. */ | ||
369 | if (ch == 0) | ||
370 | continue; | ||
371 | } else { | ||
372 | if (stat & Rx_OVR) { | ||
373 | flag = TTY_OVERRUN; | ||
374 | } else if (stat & FRM_ERR) { | ||
375 | flag = TTY_FRAME; | ||
376 | } else if (stat & PAR_ERR) { | ||
377 | flag = TTY_PARITY; | ||
378 | } | ||
379 | if (flag != TTY_NORMAL) | ||
380 | /* reset the error indication */ | ||
381 | write_zsreg(info->zs_channel, R0, ERR_RES); | ||
382 | } | ||
383 | |||
384 | #if defined(CONFIG_SERIAL_DEC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) && \ | ||
385 | !defined(MODULE) | ||
386 | if (break_pressed && info->line == zs_console.index) { | ||
387 | /* Ignore the null char got when BREAK is removed. */ | ||
388 | if (ch == 0) | ||
389 | continue; | ||
390 | if (time_before(jiffies, break_pressed + HZ * 5)) { | ||
391 | handle_sysrq(ch, NULL); | ||
392 | break_pressed = 0; | ||
393 | continue; | ||
394 | } | ||
395 | break_pressed = 0; | ||
396 | } | ||
397 | #endif | ||
398 | |||
399 | if (info->hook && info->hook->rx_char) { | ||
400 | (*info->hook->rx_char)(ch, flag); | ||
401 | return; | ||
402 | } | ||
403 | |||
404 | tty_insert_flip_char(tty, ch, flag); | ||
405 | } | ||
406 | if (tty) | ||
407 | tty_flip_buffer_push(tty); | ||
408 | } | ||
409 | |||
410 | static void transmit_chars(struct dec_serial *info) | ||
411 | { | ||
412 | if ((read_zsreg(info->zs_channel, R0) & Tx_BUF_EMP) == 0) | ||
413 | return; | ||
414 | info->tx_active = 0; | ||
415 | |||
416 | if (info->x_char) { | ||
417 | /* Send next char */ | ||
418 | write_zsdata(info->zs_channel, info->x_char); | ||
419 | info->x_char = 0; | ||
420 | info->tx_active = 1; | ||
421 | return; | ||
422 | } | ||
423 | |||
424 | if ((info->xmit_cnt <= 0) || (info->tty && info->tty->stopped) | ||
425 | || info->tx_stopped) { | ||
426 | write_zsreg(info->zs_channel, R0, RES_Tx_P); | ||
427 | return; | ||
428 | } | ||
429 | /* Send char */ | ||
430 | write_zsdata(info->zs_channel, info->xmit_buf[info->xmit_tail++]); | ||
431 | info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1); | ||
432 | info->xmit_cnt--; | ||
433 | info->tx_active = 1; | ||
434 | |||
435 | if (info->xmit_cnt < WAKEUP_CHARS) | ||
436 | rs_sched_event(info, RS_EVENT_WRITE_WAKEUP); | ||
437 | } | ||
438 | |||
439 | static void status_handle(struct dec_serial *info) | ||
440 | { | ||
441 | unsigned char stat; | ||
442 | |||
443 | /* Get status from Read Register 0 */ | ||
444 | stat = read_zsreg(info->zs_channel, R0); | ||
445 | |||
446 | if ((stat & BRK_ABRT) && !(info->read_reg_zero & BRK_ABRT)) { | ||
447 | #if defined(CONFIG_SERIAL_DEC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) && \ | ||
448 | !defined(MODULE) | ||
449 | if (info->line == zs_console.index) { | ||
450 | if (!break_pressed) | ||
451 | break_pressed = jiffies; | ||
452 | } else | ||
453 | #endif | ||
454 | info->tty_break = 1; | ||
455 | } | ||
456 | |||
457 | if (info->zs_channel != info->zs_chan_a) { | ||
458 | |||
459 | /* Check for DCD transitions */ | ||
460 | if (info->tty && !C_CLOCAL(info->tty) && | ||
461 | ((stat ^ info->read_reg_zero) & DCD) != 0 ) { | ||
462 | if (stat & DCD) { | ||
463 | wake_up_interruptible(&info->open_wait); | ||
464 | } else { | ||
465 | tty_hangup(info->tty); | ||
466 | } | ||
467 | } | ||
468 | |||
469 | /* Check for CTS transitions */ | ||
470 | if (info->tty && C_CRTSCTS(info->tty)) { | ||
471 | if ((stat & CTS) != 0) { | ||
472 | if (info->tx_stopped) { | ||
473 | info->tx_stopped = 0; | ||
474 | if (!info->tx_active) | ||
475 | transmit_chars(info); | ||
476 | } | ||
477 | } else { | ||
478 | info->tx_stopped = 1; | ||
479 | } | ||
480 | } | ||
481 | |||
482 | } | ||
483 | |||
484 | /* Clear status condition... */ | ||
485 | write_zsreg(info->zs_channel, R0, RES_EXT_INT); | ||
486 | info->read_reg_zero = stat; | ||
487 | } | ||
488 | |||
489 | /* | ||
490 | * This is the serial driver's generic interrupt routine | ||
491 | */ | ||
492 | static irqreturn_t rs_interrupt(int irq, void *dev_id) | ||
493 | { | ||
494 | struct dec_serial *info = (struct dec_serial *) dev_id; | ||
495 | irqreturn_t status = IRQ_NONE; | ||
496 | unsigned char zs_intreg; | ||
497 | int shift; | ||
498 | |||
499 | /* NOTE: The read register 3, which holds the irq status, | ||
500 | * does so for both channels on each chip. Although | ||
501 | * the status value itself must be read from the A | ||
502 | * channel and is only valid when read from channel A. | ||
503 | * Yes... broken hardware... | ||
504 | */ | ||
505 | #define CHAN_IRQMASK (CHBRxIP | CHBTxIP | CHBEXT) | ||
506 | |||
507 | if (info->zs_chan_a == info->zs_channel) | ||
508 | shift = 3; /* Channel A */ | ||
509 | else | ||
510 | shift = 0; /* Channel B */ | ||
511 | |||
512 | for (;;) { | ||
513 | zs_intreg = read_zsreg(info->zs_chan_a, R3) >> shift; | ||
514 | if ((zs_intreg & CHAN_IRQMASK) == 0) | ||
515 | break; | ||
516 | |||
517 | status = IRQ_HANDLED; | ||
518 | |||
519 | if (zs_intreg & CHBRxIP) { | ||
520 | receive_chars(info); | ||
521 | } | ||
522 | if (zs_intreg & CHBTxIP) { | ||
523 | transmit_chars(info); | ||
524 | } | ||
525 | if (zs_intreg & CHBEXT) { | ||
526 | status_handle(info); | ||
527 | } | ||
528 | } | ||
529 | |||
530 | /* Why do we need this ? */ | ||
531 | write_zsreg(info->zs_channel, 0, RES_H_IUS); | ||
532 | |||
533 | return status; | ||
534 | } | ||
535 | |||
536 | #ifdef ZS_DEBUG_REGS | ||
537 | void zs_dump (void) { | ||
538 | int i, j; | ||
539 | for (i = 0; i < zs_channels_found; i++) { | ||
540 | struct dec_zschannel *ch = &zs_channels[i]; | ||
541 | if ((long)ch->control == UNI_IO_BASE+UNI_SCC1A_CTRL) { | ||
542 | for (j = 0; j < 15; j++) { | ||
543 | printk("W%d = 0x%x\t", | ||
544 | j, (int)ch->curregs[j]); | ||
545 | } | ||
546 | for (j = 0; j < 15; j++) { | ||
547 | printk("R%d = 0x%x\t", | ||
548 | j, (int)read_zsreg(ch,j)); | ||
549 | } | ||
550 | printk("\n\n"); | ||
551 | } | ||
552 | } | ||
553 | } | ||
554 | #endif | ||
555 | |||
556 | /* | ||
557 | * ------------------------------------------------------------------- | ||
558 | * Here ends the serial interrupt routines. | ||
559 | * ------------------------------------------------------------------- | ||
560 | */ | ||
561 | |||
562 | /* | ||
563 | * ------------------------------------------------------------ | ||
564 | * rs_stop() and rs_start() | ||
565 | * | ||
566 | * This routines are called before setting or resetting tty->stopped. | ||
567 | * ------------------------------------------------------------ | ||
568 | */ | ||
569 | static void rs_stop(struct tty_struct *tty) | ||
570 | { | ||
571 | struct dec_serial *info = (struct dec_serial *)tty->driver_data; | ||
572 | unsigned long flags; | ||
573 | |||
574 | if (serial_paranoia_check(info, tty->name, "rs_stop")) | ||
575 | return; | ||
576 | |||
577 | #if 1 | ||
578 | spin_lock_irqsave(&zs_lock, flags); | ||
579 | if (info->zs_channel->curregs[5] & TxENAB) { | ||
580 | info->zs_channel->curregs[5] &= ~TxENAB; | ||
581 | write_zsreg(info->zs_channel, 5, info->zs_channel->curregs[5]); | ||
582 | } | ||
583 | spin_unlock_irqrestore(&zs_lock, flags); | ||
584 | #endif | ||
585 | } | ||
586 | |||
587 | static void rs_start(struct tty_struct *tty) | ||
588 | { | ||
589 | struct dec_serial *info = (struct dec_serial *)tty->driver_data; | ||
590 | unsigned long flags; | ||
591 | |||
592 | if (serial_paranoia_check(info, tty->name, "rs_start")) | ||
593 | return; | ||
594 | |||
595 | spin_lock_irqsave(&zs_lock, flags); | ||
596 | #if 1 | ||
597 | if (info->xmit_cnt && info->xmit_buf && !(info->zs_channel->curregs[5] & TxENAB)) { | ||
598 | info->zs_channel->curregs[5] |= TxENAB; | ||
599 | write_zsreg(info->zs_channel, 5, info->zs_channel->curregs[5]); | ||
600 | } | ||
601 | #else | ||
602 | if (info->xmit_cnt && info->xmit_buf && !info->tx_active) { | ||
603 | transmit_chars(info); | ||
604 | } | ||
605 | #endif | ||
606 | spin_unlock_irqrestore(&zs_lock, flags); | ||
607 | } | ||
608 | |||
609 | /* | ||
610 | * This routine is used to handle the "bottom half" processing for the | ||
611 | * serial driver, known also the "software interrupt" processing. | ||
612 | * This processing is done at the kernel interrupt level, after the | ||
613 | * rs_interrupt() has returned, BUT WITH INTERRUPTS TURNED ON. This | ||
614 | * is where time-consuming activities which can not be done in the | ||
615 | * interrupt driver proper are done; the interrupt driver schedules | ||
616 | * them using rs_sched_event(), and they get done here. | ||
617 | */ | ||
618 | |||
619 | static void do_softint(unsigned long private_) | ||
620 | { | ||
621 | struct dec_serial *info = (struct dec_serial *) private_; | ||
622 | struct tty_struct *tty; | ||
623 | |||
624 | tty = info->tty; | ||
625 | if (!tty) | ||
626 | return; | ||
627 | |||
628 | if (test_and_clear_bit(RS_EVENT_WRITE_WAKEUP, &info->event)) | ||
629 | tty_wakeup(tty); | ||
630 | } | ||
631 | |||
632 | static int zs_startup(struct dec_serial * info) | ||
633 | { | ||
634 | unsigned long flags; | ||
635 | |||
636 | if (info->flags & ZILOG_INITIALIZED) | ||
637 | return 0; | ||
638 | |||
639 | if (!info->xmit_buf) { | ||
640 | info->xmit_buf = (unsigned char *) get_zeroed_page(GFP_KERNEL); | ||
641 | if (!info->xmit_buf) | ||
642 | return -ENOMEM; | ||
643 | } | ||
644 | |||
645 | spin_lock_irqsave(&zs_lock, flags); | ||
646 | |||
647 | #ifdef SERIAL_DEBUG_OPEN | ||
648 | printk("starting up ttyS%d (irq %d)...", info->line, info->irq); | ||
649 | #endif | ||
650 | |||
651 | /* | ||
652 | * Clear the receive FIFO. | ||
653 | */ | ||
654 | ZS_CLEARFIFO(info->zs_channel); | ||
655 | info->xmit_fifo_size = 1; | ||
656 | |||
657 | /* | ||
658 | * Clear the interrupt registers. | ||
659 | */ | ||
660 | write_zsreg(info->zs_channel, R0, ERR_RES); | ||
661 | write_zsreg(info->zs_channel, R0, RES_H_IUS); | ||
662 | |||
663 | /* | ||
664 | * Set the speed of the serial port | ||
665 | */ | ||
666 | change_speed(info); | ||
667 | |||
668 | /* | ||
669 | * Turn on RTS and DTR. | ||
670 | */ | ||
671 | zs_rtsdtr(info, RTS | DTR, 1); | ||
672 | |||
673 | /* | ||
674 | * Finally, enable sequencing and interrupts | ||
675 | */ | ||
676 | info->zs_channel->curregs[R1] &= ~RxINT_MASK; | ||
677 | info->zs_channel->curregs[R1] |= (RxINT_ALL | TxINT_ENAB | | ||
678 | EXT_INT_ENAB); | ||
679 | info->zs_channel->curregs[R3] |= RxENABLE; | ||
680 | info->zs_channel->curregs[R5] |= TxENAB; | ||
681 | info->zs_channel->curregs[R15] |= (DCDIE | CTSIE | TxUIE | BRKIE); | ||
682 | write_zsreg(info->zs_channel, R1, info->zs_channel->curregs[R1]); | ||
683 | write_zsreg(info->zs_channel, R3, info->zs_channel->curregs[R3]); | ||
684 | write_zsreg(info->zs_channel, R5, info->zs_channel->curregs[R5]); | ||
685 | write_zsreg(info->zs_channel, R15, info->zs_channel->curregs[R15]); | ||
686 | |||
687 | /* | ||
688 | * And clear the interrupt registers again for luck. | ||
689 | */ | ||
690 | write_zsreg(info->zs_channel, R0, ERR_RES); | ||
691 | write_zsreg(info->zs_channel, R0, RES_H_IUS); | ||
692 | |||
693 | /* Save the current value of RR0 */ | ||
694 | info->read_reg_zero = read_zsreg(info->zs_channel, R0); | ||
695 | |||
696 | if (info->tty) | ||
697 | clear_bit(TTY_IO_ERROR, &info->tty->flags); | ||
698 | info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; | ||
699 | |||
700 | info->flags |= ZILOG_INITIALIZED; | ||
701 | spin_unlock_irqrestore(&zs_lock, flags); | ||
702 | return 0; | ||
703 | } | ||
704 | |||
705 | /* | ||
706 | * This routine will shutdown a serial port; interrupts are disabled, and | ||
707 | * DTR is dropped if the hangup on close termio flag is on. | ||
708 | */ | ||
709 | static void shutdown(struct dec_serial * info) | ||
710 | { | ||
711 | unsigned long flags; | ||
712 | |||
713 | if (!(info->flags & ZILOG_INITIALIZED)) | ||
714 | return; | ||
715 | |||
716 | #ifdef SERIAL_DEBUG_OPEN | ||
717 | printk("Shutting down serial port %d (irq %d)....", info->line, | ||
718 | info->irq); | ||
719 | #endif | ||
720 | |||
721 | spin_lock_irqsave(&zs_lock, flags); | ||
722 | |||
723 | if (info->xmit_buf) { | ||
724 | free_page((unsigned long) info->xmit_buf); | ||
725 | info->xmit_buf = 0; | ||
726 | } | ||
727 | |||
728 | info->zs_channel->curregs[1] = 0; | ||
729 | write_zsreg(info->zs_channel, 1, info->zs_channel->curregs[1]); /* no interrupts */ | ||
730 | |||
731 | info->zs_channel->curregs[3] &= ~RxENABLE; | ||
732 | write_zsreg(info->zs_channel, 3, info->zs_channel->curregs[3]); | ||
733 | |||
734 | info->zs_channel->curregs[5] &= ~TxENAB; | ||
735 | write_zsreg(info->zs_channel, 5, info->zs_channel->curregs[5]); | ||
736 | if (!info->tty || C_HUPCL(info->tty)) { | ||
737 | zs_rtsdtr(info, RTS | DTR, 0); | ||
738 | } | ||
739 | |||
740 | if (info->tty) | ||
741 | set_bit(TTY_IO_ERROR, &info->tty->flags); | ||
742 | |||
743 | info->flags &= ~ZILOG_INITIALIZED; | ||
744 | spin_unlock_irqrestore(&zs_lock, flags); | ||
745 | } | ||
746 | |||
747 | /* | ||
748 | * This routine is called to set the UART divisor registers to match | ||
749 | * the specified baud rate for a serial port. | ||
750 | */ | ||
751 | static void change_speed(struct dec_serial *info) | ||
752 | { | ||
753 | unsigned cflag; | ||
754 | int i; | ||
755 | int brg, bits; | ||
756 | unsigned long flags; | ||
757 | |||
758 | if (!info->hook) { | ||
759 | if (!info->tty || !info->tty->termios) | ||
760 | return; | ||
761 | cflag = info->tty->termios->c_cflag; | ||
762 | if (!info->port) | ||
763 | return; | ||
764 | } else { | ||
765 | cflag = info->hook->cflags; | ||
766 | } | ||
767 | |||
768 | i = cflag & CBAUD; | ||
769 | if (i & CBAUDEX) { | ||
770 | i &= ~CBAUDEX; | ||
771 | if (i < 1 || i > 2) { | ||
772 | if (!info->hook) | ||
773 | info->tty->termios->c_cflag &= ~CBAUDEX; | ||
774 | else | ||
775 | info->hook->cflags &= ~CBAUDEX; | ||
776 | } else | ||
777 | i += 15; | ||
778 | } | ||
779 | |||
780 | spin_lock_irqsave(&zs_lock, flags); | ||
781 | info->zs_baud = baud_table[i]; | ||
782 | if (info->zs_baud) { | ||
783 | brg = BPS_TO_BRG(info->zs_baud, zs_parms->clock/info->clk_divisor); | ||
784 | info->zs_channel->curregs[12] = (brg & 255); | ||
785 | info->zs_channel->curregs[13] = ((brg >> 8) & 255); | ||
786 | zs_rtsdtr(info, DTR, 1); | ||
787 | } else { | ||
788 | zs_rtsdtr(info, RTS | DTR, 0); | ||
789 | return; | ||
790 | } | ||
791 | |||
792 | /* byte size and parity */ | ||
793 | info->zs_channel->curregs[3] &= ~RxNBITS_MASK; | ||
794 | info->zs_channel->curregs[5] &= ~TxNBITS_MASK; | ||
795 | switch (cflag & CSIZE) { | ||
796 | case CS5: | ||
797 | bits = 7; | ||
798 | info->zs_channel->curregs[3] |= Rx5; | ||
799 | info->zs_channel->curregs[5] |= Tx5; | ||
800 | break; | ||
801 | case CS6: | ||
802 | bits = 8; | ||
803 | info->zs_channel->curregs[3] |= Rx6; | ||
804 | info->zs_channel->curregs[5] |= Tx6; | ||
805 | break; | ||
806 | case CS7: | ||
807 | bits = 9; | ||
808 | info->zs_channel->curregs[3] |= Rx7; | ||
809 | info->zs_channel->curregs[5] |= Tx7; | ||
810 | break; | ||
811 | case CS8: | ||
812 | default: /* defaults to 8 bits */ | ||
813 | bits = 10; | ||
814 | info->zs_channel->curregs[3] |= Rx8; | ||
815 | info->zs_channel->curregs[5] |= Tx8; | ||
816 | break; | ||
817 | } | ||
818 | |||
819 | info->timeout = ((info->xmit_fifo_size*HZ*bits) / info->zs_baud); | ||
820 | info->timeout += HZ/50; /* Add .02 seconds of slop */ | ||
821 | |||
822 | info->zs_channel->curregs[4] &= ~(SB_MASK | PAR_ENA | PAR_EVEN); | ||
823 | if (cflag & CSTOPB) { | ||
824 | info->zs_channel->curregs[4] |= SB2; | ||
825 | } else { | ||
826 | info->zs_channel->curregs[4] |= SB1; | ||
827 | } | ||
828 | if (cflag & PARENB) { | ||
829 | info->zs_channel->curregs[4] |= PAR_ENA; | ||
830 | } | ||
831 | if (!(cflag & PARODD)) { | ||
832 | info->zs_channel->curregs[4] |= PAR_EVEN; | ||
833 | } | ||
834 | |||
835 | if (!(cflag & CLOCAL)) { | ||
836 | if (!(info->zs_channel->curregs[15] & DCDIE)) | ||
837 | info->read_reg_zero = read_zsreg(info->zs_channel, 0); | ||
838 | info->zs_channel->curregs[15] |= DCDIE; | ||
839 | } else | ||
840 | info->zs_channel->curregs[15] &= ~DCDIE; | ||
841 | if (cflag & CRTSCTS) { | ||
842 | info->zs_channel->curregs[15] |= CTSIE; | ||
843 | if ((read_zsreg(info->zs_channel, 0) & CTS) == 0) | ||
844 | info->tx_stopped = 1; | ||
845 | } else { | ||
846 | info->zs_channel->curregs[15] &= ~CTSIE; | ||
847 | info->tx_stopped = 0; | ||
848 | } | ||
849 | |||
850 | /* Load up the new values */ | ||
851 | load_zsregs(info->zs_channel, info->zs_channel->curregs); | ||
852 | |||
853 | spin_unlock_irqrestore(&zs_lock, flags); | ||
854 | } | ||
855 | |||
856 | static void rs_flush_chars(struct tty_struct *tty) | ||
857 | { | ||
858 | struct dec_serial *info = (struct dec_serial *)tty->driver_data; | ||
859 | unsigned long flags; | ||
860 | |||
861 | if (serial_paranoia_check(info, tty->name, "rs_flush_chars")) | ||
862 | return; | ||
863 | |||
864 | if (info->xmit_cnt <= 0 || tty->stopped || info->tx_stopped || | ||
865 | !info->xmit_buf) | ||
866 | return; | ||
867 | |||
868 | /* Enable transmitter */ | ||
869 | spin_lock_irqsave(&zs_lock, flags); | ||
870 | transmit_chars(info); | ||
871 | spin_unlock_irqrestore(&zs_lock, flags); | ||
872 | } | ||
873 | |||
874 | static int rs_write(struct tty_struct * tty, | ||
875 | const unsigned char *buf, int count) | ||
876 | { | ||
877 | int c, total = 0; | ||
878 | struct dec_serial *info = (struct dec_serial *)tty->driver_data; | ||
879 | unsigned long flags; | ||
880 | |||
881 | if (serial_paranoia_check(info, tty->name, "rs_write")) | ||
882 | return 0; | ||
883 | |||
884 | if (!tty || !info->xmit_buf) | ||
885 | return 0; | ||
886 | |||
887 | while (1) { | ||
888 | spin_lock_irqsave(&zs_lock, flags); | ||
889 | c = min(count, min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1, | ||
890 | SERIAL_XMIT_SIZE - info->xmit_head)); | ||
891 | if (c <= 0) | ||
892 | break; | ||
893 | |||
894 | memcpy(info->xmit_buf + info->xmit_head, buf, c); | ||
895 | info->xmit_head = (info->xmit_head + c) & (SERIAL_XMIT_SIZE-1); | ||
896 | info->xmit_cnt += c; | ||
897 | spin_unlock_irqrestore(&zs_lock, flags); | ||
898 | buf += c; | ||
899 | count -= c; | ||
900 | total += c; | ||
901 | } | ||
902 | |||
903 | if (info->xmit_cnt && !tty->stopped && !info->tx_stopped | ||
904 | && !info->tx_active) | ||
905 | transmit_chars(info); | ||
906 | spin_unlock_irqrestore(&zs_lock, flags); | ||
907 | return total; | ||
908 | } | ||
909 | |||
910 | static int rs_write_room(struct tty_struct *tty) | ||
911 | { | ||
912 | struct dec_serial *info = (struct dec_serial *)tty->driver_data; | ||
913 | int ret; | ||
914 | |||
915 | if (serial_paranoia_check(info, tty->name, "rs_write_room")) | ||
916 | return 0; | ||
917 | ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1; | ||
918 | if (ret < 0) | ||
919 | ret = 0; | ||
920 | return ret; | ||
921 | } | ||
922 | |||
923 | static int rs_chars_in_buffer(struct tty_struct *tty) | ||
924 | { | ||
925 | struct dec_serial *info = (struct dec_serial *)tty->driver_data; | ||
926 | |||
927 | if (serial_paranoia_check(info, tty->name, "rs_chars_in_buffer")) | ||
928 | return 0; | ||
929 | return info->xmit_cnt; | ||
930 | } | ||
931 | |||
932 | static void rs_flush_buffer(struct tty_struct *tty) | ||
933 | { | ||
934 | struct dec_serial *info = (struct dec_serial *)tty->driver_data; | ||
935 | |||
936 | if (serial_paranoia_check(info, tty->name, "rs_flush_buffer")) | ||
937 | return; | ||
938 | spin_lock_irq(&zs_lock); | ||
939 | info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; | ||
940 | spin_unlock_irq(&zs_lock); | ||
941 | tty_wakeup(tty); | ||
942 | } | ||
943 | |||
944 | /* | ||
945 | * ------------------------------------------------------------ | ||
946 | * rs_throttle() | ||
947 | * | ||
948 | * This routine is called by the upper-layer tty layer to signal that | ||
949 | * incoming characters should be throttled. | ||
950 | * ------------------------------------------------------------ | ||
951 | */ | ||
952 | static void rs_throttle(struct tty_struct * tty) | ||
953 | { | ||
954 | struct dec_serial *info = (struct dec_serial *)tty->driver_data; | ||
955 | unsigned long flags; | ||
956 | |||
957 | #ifdef SERIAL_DEBUG_THROTTLE | ||
958 | char buf[64]; | ||
959 | |||
960 | printk("throttle %s: %d....\n", _tty_name(tty, buf), | ||
961 | tty->ldisc.chars_in_buffer(tty)); | ||
962 | #endif | ||
963 | |||
964 | if (serial_paranoia_check(info, tty->name, "rs_throttle")) | ||
965 | return; | ||
966 | |||
967 | if (I_IXOFF(tty)) { | ||
968 | spin_lock_irqsave(&zs_lock, flags); | ||
969 | info->x_char = STOP_CHAR(tty); | ||
970 | if (!info->tx_active) | ||
971 | transmit_chars(info); | ||
972 | spin_unlock_irqrestore(&zs_lock, flags); | ||
973 | } | ||
974 | |||
975 | if (C_CRTSCTS(tty)) { | ||
976 | zs_rtsdtr(info, RTS, 0); | ||
977 | } | ||
978 | } | ||
979 | |||
980 | static void rs_unthrottle(struct tty_struct * tty) | ||
981 | { | ||
982 | struct dec_serial *info = (struct dec_serial *)tty->driver_data; | ||
983 | unsigned long flags; | ||
984 | |||
985 | #ifdef SERIAL_DEBUG_THROTTLE | ||
986 | char buf[64]; | ||
987 | |||
988 | printk("unthrottle %s: %d....\n", _tty_name(tty, buf), | ||
989 | tty->ldisc.chars_in_buffer(tty)); | ||
990 | #endif | ||
991 | |||
992 | if (serial_paranoia_check(info, tty->name, "rs_unthrottle")) | ||
993 | return; | ||
994 | |||
995 | if (I_IXOFF(tty)) { | ||
996 | spin_lock_irqsave(&zs_lock, flags); | ||
997 | if (info->x_char) | ||
998 | info->x_char = 0; | ||
999 | else { | ||
1000 | info->x_char = START_CHAR(tty); | ||
1001 | if (!info->tx_active) | ||
1002 | transmit_chars(info); | ||
1003 | } | ||
1004 | spin_unlock_irqrestore(&zs_lock, flags); | ||
1005 | } | ||
1006 | |||
1007 | if (C_CRTSCTS(tty)) { | ||
1008 | zs_rtsdtr(info, RTS, 1); | ||
1009 | } | ||
1010 | } | ||
1011 | |||
1012 | /* | ||
1013 | * ------------------------------------------------------------ | ||
1014 | * rs_ioctl() and friends | ||
1015 | * ------------------------------------------------------------ | ||
1016 | */ | ||
1017 | |||
1018 | static int get_serial_info(struct dec_serial * info, | ||
1019 | struct serial_struct * retinfo) | ||
1020 | { | ||
1021 | struct serial_struct tmp; | ||
1022 | |||
1023 | if (!retinfo) | ||
1024 | return -EFAULT; | ||
1025 | memset(&tmp, 0, sizeof(tmp)); | ||
1026 | tmp.type = info->type; | ||
1027 | tmp.line = info->line; | ||
1028 | tmp.port = info->port; | ||
1029 | tmp.irq = info->irq; | ||
1030 | tmp.flags = info->flags; | ||
1031 | tmp.baud_base = info->baud_base; | ||
1032 | tmp.close_delay = info->close_delay; | ||
1033 | tmp.closing_wait = info->closing_wait; | ||
1034 | tmp.custom_divisor = info->custom_divisor; | ||
1035 | return copy_to_user(retinfo,&tmp,sizeof(*retinfo)) ? -EFAULT : 0; | ||
1036 | } | ||
1037 | |||
1038 | static int set_serial_info(struct dec_serial * info, | ||
1039 | struct serial_struct * new_info) | ||
1040 | { | ||
1041 | struct serial_struct new_serial; | ||
1042 | struct dec_serial old_info; | ||
1043 | int retval = 0; | ||
1044 | |||
1045 | if (!new_info) | ||
1046 | return -EFAULT; | ||
1047 | copy_from_user(&new_serial,new_info,sizeof(new_serial)); | ||
1048 | old_info = *info; | ||
1049 | |||
1050 | if (!capable(CAP_SYS_ADMIN)) { | ||
1051 | if ((new_serial.baud_base != info->baud_base) || | ||
1052 | (new_serial.type != info->type) || | ||
1053 | (new_serial.close_delay != info->close_delay) || | ||
1054 | ((new_serial.flags & ~ZILOG_USR_MASK) != | ||
1055 | (info->flags & ~ZILOG_USR_MASK))) | ||
1056 | return -EPERM; | ||
1057 | info->flags = ((info->flags & ~ZILOG_USR_MASK) | | ||
1058 | (new_serial.flags & ZILOG_USR_MASK)); | ||
1059 | info->custom_divisor = new_serial.custom_divisor; | ||
1060 | goto check_and_exit; | ||
1061 | } | ||
1062 | |||
1063 | if (info->count > 1) | ||
1064 | return -EBUSY; | ||
1065 | |||
1066 | /* | ||
1067 | * OK, past this point, all the error checking has been done. | ||
1068 | * At this point, we start making changes..... | ||
1069 | */ | ||
1070 | |||
1071 | info->baud_base = new_serial.baud_base; | ||
1072 | info->flags = ((info->flags & ~ZILOG_FLAGS) | | ||
1073 | (new_serial.flags & ZILOG_FLAGS)); | ||
1074 | info->type = new_serial.type; | ||
1075 | info->close_delay = new_serial.close_delay; | ||
1076 | info->closing_wait = new_serial.closing_wait; | ||
1077 | |||
1078 | check_and_exit: | ||
1079 | retval = zs_startup(info); | ||
1080 | return retval; | ||
1081 | } | ||
1082 | |||
1083 | /* | ||
1084 | * get_lsr_info - get line status register info | ||
1085 | * | ||
1086 | * Purpose: Let user call ioctl() to get info when the UART physically | ||
1087 | * is emptied. On bus types like RS485, the transmitter must | ||
1088 | * release the bus after transmitting. This must be done when | ||
1089 | * the transmit shift register is empty, not be done when the | ||
1090 | * transmit holding register is empty. This functionality | ||
1091 | * allows an RS485 driver to be written in user space. | ||
1092 | */ | ||
1093 | static int get_lsr_info(struct dec_serial * info, unsigned int *value) | ||
1094 | { | ||
1095 | unsigned char status; | ||
1096 | |||
1097 | spin_lock(&zs_lock); | ||
1098 | status = read_zsreg(info->zs_channel, 0); | ||
1099 | spin_unlock_irq(&zs_lock); | ||
1100 | put_user(status,value); | ||
1101 | return 0; | ||
1102 | } | ||
1103 | |||
1104 | static int rs_tiocmget(struct tty_struct *tty, struct file *file) | ||
1105 | { | ||
1106 | struct dec_serial * info = (struct dec_serial *)tty->driver_data; | ||
1107 | unsigned char control, status_a, status_b; | ||
1108 | unsigned int result; | ||
1109 | |||
1110 | if (info->hook) | ||
1111 | return -ENODEV; | ||
1112 | |||
1113 | if (serial_paranoia_check(info, tty->name, __FUNCTION__)) | ||
1114 | return -ENODEV; | ||
1115 | |||
1116 | if (tty->flags & (1 << TTY_IO_ERROR)) | ||
1117 | return -EIO; | ||
1118 | |||
1119 | if (info->zs_channel == info->zs_chan_a) | ||
1120 | result = 0; | ||
1121 | else { | ||
1122 | spin_lock(&zs_lock); | ||
1123 | control = info->zs_chan_a->curregs[5]; | ||
1124 | status_a = read_zsreg(info->zs_chan_a, 0); | ||
1125 | status_b = read_zsreg(info->zs_channel, 0); | ||
1126 | spin_unlock_irq(&zs_lock); | ||
1127 | result = ((control & RTS) ? TIOCM_RTS: 0) | ||
1128 | | ((control & DTR) ? TIOCM_DTR: 0) | ||
1129 | | ((status_b & DCD) ? TIOCM_CAR: 0) | ||
1130 | | ((status_a & DCD) ? TIOCM_RNG: 0) | ||
1131 | | ((status_a & SYNC_HUNT) ? TIOCM_DSR: 0) | ||
1132 | | ((status_b & CTS) ? TIOCM_CTS: 0); | ||
1133 | } | ||
1134 | return result; | ||
1135 | } | ||
1136 | |||
1137 | static int rs_tiocmset(struct tty_struct *tty, struct file *file, | ||
1138 | unsigned int set, unsigned int clear) | ||
1139 | { | ||
1140 | struct dec_serial * info = (struct dec_serial *)tty->driver_data; | ||
1141 | |||
1142 | if (info->hook) | ||
1143 | return -ENODEV; | ||
1144 | |||
1145 | if (serial_paranoia_check(info, tty->name, __FUNCTION__)) | ||
1146 | return -ENODEV; | ||
1147 | |||
1148 | if (tty->flags & (1 << TTY_IO_ERROR)) | ||
1149 | return -EIO; | ||
1150 | |||
1151 | if (info->zs_channel == info->zs_chan_a) | ||
1152 | return 0; | ||
1153 | |||
1154 | spin_lock(&zs_lock); | ||
1155 | if (set & TIOCM_RTS) | ||
1156 | info->zs_chan_a->curregs[5] |= RTS; | ||
1157 | if (set & TIOCM_DTR) | ||
1158 | info->zs_chan_a->curregs[5] |= DTR; | ||
1159 | if (clear & TIOCM_RTS) | ||
1160 | info->zs_chan_a->curregs[5] &= ~RTS; | ||
1161 | if (clear & TIOCM_DTR) | ||
1162 | info->zs_chan_a->curregs[5] &= ~DTR; | ||
1163 | write_zsreg(info->zs_chan_a, 5, info->zs_chan_a->curregs[5]); | ||
1164 | spin_unlock_irq(&zs_lock); | ||
1165 | return 0; | ||
1166 | } | ||
1167 | |||
1168 | /* | ||
1169 | * rs_break - turn transmit break condition on/off | ||
1170 | */ | ||
1171 | static void rs_break(struct tty_struct *tty, int break_state) | ||
1172 | { | ||
1173 | struct dec_serial *info = (struct dec_serial *) tty->driver_data; | ||
1174 | unsigned long flags; | ||
1175 | |||
1176 | if (serial_paranoia_check(info, tty->name, "rs_break")) | ||
1177 | return; | ||
1178 | if (!info->port) | ||
1179 | return; | ||
1180 | |||
1181 | spin_lock_irqsave(&zs_lock, flags); | ||
1182 | if (break_state == -1) | ||
1183 | info->zs_channel->curregs[5] |= SND_BRK; | ||
1184 | else | ||
1185 | info->zs_channel->curregs[5] &= ~SND_BRK; | ||
1186 | write_zsreg(info->zs_channel, 5, info->zs_channel->curregs[5]); | ||
1187 | spin_unlock_irqrestore(&zs_lock, flags); | ||
1188 | } | ||
1189 | |||
1190 | static int rs_ioctl(struct tty_struct *tty, struct file * file, | ||
1191 | unsigned int cmd, unsigned long arg) | ||
1192 | { | ||
1193 | struct dec_serial * info = (struct dec_serial *)tty->driver_data; | ||
1194 | |||
1195 | if (info->hook) | ||
1196 | return -ENODEV; | ||
1197 | |||
1198 | if (serial_paranoia_check(info, tty->name, "rs_ioctl")) | ||
1199 | return -ENODEV; | ||
1200 | |||
1201 | if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && | ||
1202 | (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGWILD) && | ||
1203 | (cmd != TIOCSERSWILD) && (cmd != TIOCSERGSTRUCT)) { | ||
1204 | if (tty->flags & (1 << TTY_IO_ERROR)) | ||
1205 | return -EIO; | ||
1206 | } | ||
1207 | |||
1208 | switch (cmd) { | ||
1209 | case TIOCGSERIAL: | ||
1210 | if (!access_ok(VERIFY_WRITE, (void *)arg, | ||
1211 | sizeof(struct serial_struct))) | ||
1212 | return -EFAULT; | ||
1213 | return get_serial_info(info, (struct serial_struct *)arg); | ||
1214 | |||
1215 | case TIOCSSERIAL: | ||
1216 | return set_serial_info(info, (struct serial_struct *)arg); | ||
1217 | |||
1218 | case TIOCSERGETLSR: /* Get line status register */ | ||
1219 | if (!access_ok(VERIFY_WRITE, (void *)arg, | ||
1220 | sizeof(unsigned int))) | ||
1221 | return -EFAULT; | ||
1222 | return get_lsr_info(info, (unsigned int *)arg); | ||
1223 | |||
1224 | case TIOCSERGSTRUCT: | ||
1225 | if (!access_ok(VERIFY_WRITE, (void *)arg, | ||
1226 | sizeof(struct dec_serial))) | ||
1227 | return -EFAULT; | ||
1228 | copy_from_user((struct dec_serial *)arg, info, | ||
1229 | sizeof(struct dec_serial)); | ||
1230 | return 0; | ||
1231 | |||
1232 | default: | ||
1233 | return -ENOIOCTLCMD; | ||
1234 | } | ||
1235 | return 0; | ||
1236 | } | ||
1237 | |||
1238 | static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios) | ||
1239 | { | ||
1240 | struct dec_serial *info = (struct dec_serial *)tty->driver_data; | ||
1241 | int was_stopped; | ||
1242 | |||
1243 | if (tty->termios->c_cflag == old_termios->c_cflag) | ||
1244 | return; | ||
1245 | was_stopped = info->tx_stopped; | ||
1246 | |||
1247 | change_speed(info); | ||
1248 | |||
1249 | if (was_stopped && !info->tx_stopped) | ||
1250 | rs_start(tty); | ||
1251 | } | ||
1252 | |||
1253 | /* | ||
1254 | * ------------------------------------------------------------ | ||
1255 | * rs_close() | ||
1256 | * | ||
1257 | * This routine is called when the serial port gets closed. | ||
1258 | * Wait for the last remaining data to be sent. | ||
1259 | * ------------------------------------------------------------ | ||
1260 | */ | ||
1261 | static void rs_close(struct tty_struct *tty, struct file * filp) | ||
1262 | { | ||
1263 | struct dec_serial * info = (struct dec_serial *)tty->driver_data; | ||
1264 | unsigned long flags; | ||
1265 | |||
1266 | if (!info || serial_paranoia_check(info, tty->name, "rs_close")) | ||
1267 | return; | ||
1268 | |||
1269 | spin_lock_irqsave(&zs_lock, flags); | ||
1270 | |||
1271 | if (tty_hung_up_p(filp)) { | ||
1272 | spin_unlock_irqrestore(&zs_lock, flags); | ||
1273 | return; | ||
1274 | } | ||
1275 | |||
1276 | #ifdef SERIAL_DEBUG_OPEN | ||
1277 | printk("rs_close ttyS%d, count = %d\n", info->line, info->count); | ||
1278 | #endif | ||
1279 | if ((tty->count == 1) && (info->count != 1)) { | ||
1280 | /* | ||
1281 | * Uh, oh. tty->count is 1, which means that the tty | ||
1282 | * structure will be freed. Info->count should always | ||
1283 | * be one in these conditions. If it's greater than | ||
1284 | * one, we've got real problems, since it means the | ||
1285 | * serial port won't be shutdown. | ||
1286 | */ | ||
1287 | printk("rs_close: bad serial port count; tty->count is 1, " | ||
1288 | "info->count is %d\n", info->count); | ||
1289 | info->count = 1; | ||
1290 | } | ||
1291 | if (--info->count < 0) { | ||
1292 | printk("rs_close: bad serial port count for ttyS%d: %d\n", | ||
1293 | info->line, info->count); | ||
1294 | info->count = 0; | ||
1295 | } | ||
1296 | if (info->count) { | ||
1297 | spin_unlock_irqrestore(&zs_lock, flags); | ||
1298 | return; | ||
1299 | } | ||
1300 | info->flags |= ZILOG_CLOSING; | ||
1301 | /* | ||
1302 | * Now we wait for the transmit buffer to clear; and we notify | ||
1303 | * the line discipline to only process XON/XOFF characters. | ||
1304 | */ | ||
1305 | tty->closing = 1; | ||
1306 | if (info->closing_wait != ZILOG_CLOSING_WAIT_NONE) | ||
1307 | tty_wait_until_sent(tty, info->closing_wait); | ||
1308 | /* | ||
1309 | * At this point we stop accepting input. To do this, we | ||
1310 | * disable the receiver and receive interrupts. | ||
1311 | */ | ||
1312 | info->zs_channel->curregs[3] &= ~RxENABLE; | ||
1313 | write_zsreg(info->zs_channel, 3, info->zs_channel->curregs[3]); | ||
1314 | info->zs_channel->curregs[1] = 0; /* disable any rx ints */ | ||
1315 | write_zsreg(info->zs_channel, 1, info->zs_channel->curregs[1]); | ||
1316 | ZS_CLEARFIFO(info->zs_channel); | ||
1317 | if (info->flags & ZILOG_INITIALIZED) { | ||
1318 | /* | ||
1319 | * Before we drop DTR, make sure the SCC transmitter | ||
1320 | * has completely drained. | ||
1321 | */ | ||
1322 | rs_wait_until_sent(tty, info->timeout); | ||
1323 | } | ||
1324 | |||
1325 | shutdown(info); | ||
1326 | if (tty->driver->flush_buffer) | ||
1327 | tty->driver->flush_buffer(tty); | ||
1328 | tty_ldisc_flush(tty); | ||
1329 | tty->closing = 0; | ||
1330 | info->event = 0; | ||
1331 | info->tty = 0; | ||
1332 | if (info->blocked_open) { | ||
1333 | if (info->close_delay) { | ||
1334 | msleep_interruptible(jiffies_to_msecs(info->close_delay)); | ||
1335 | } | ||
1336 | wake_up_interruptible(&info->open_wait); | ||
1337 | } | ||
1338 | info->flags &= ~(ZILOG_NORMAL_ACTIVE|ZILOG_CLOSING); | ||
1339 | wake_up_interruptible(&info->close_wait); | ||
1340 | spin_unlock_irqrestore(&zs_lock, flags); | ||
1341 | } | ||
1342 | |||
1343 | /* | ||
1344 | * rs_wait_until_sent() --- wait until the transmitter is empty | ||
1345 | */ | ||
1346 | static void rs_wait_until_sent(struct tty_struct *tty, int timeout) | ||
1347 | { | ||
1348 | struct dec_serial *info = (struct dec_serial *) tty->driver_data; | ||
1349 | unsigned long orig_jiffies; | ||
1350 | int char_time; | ||
1351 | |||
1352 | if (serial_paranoia_check(info, tty->name, "rs_wait_until_sent")) | ||
1353 | return; | ||
1354 | |||
1355 | orig_jiffies = jiffies; | ||
1356 | /* | ||
1357 | * Set the check interval to be 1/5 of the estimated time to | ||
1358 | * send a single character, and make it at least 1. The check | ||
1359 | * interval should also be less than the timeout. | ||
1360 | */ | ||
1361 | char_time = (info->timeout - HZ/50) / info->xmit_fifo_size; | ||
1362 | char_time = char_time / 5; | ||
1363 | if (char_time == 0) | ||
1364 | char_time = 1; | ||
1365 | if (timeout) | ||
1366 | char_time = min(char_time, timeout); | ||
1367 | while ((read_zsreg(info->zs_channel, 1) & Tx_BUF_EMP) == 0) { | ||
1368 | msleep_interruptible(jiffies_to_msecs(char_time)); | ||
1369 | if (signal_pending(current)) | ||
1370 | break; | ||
1371 | if (timeout && time_after(jiffies, orig_jiffies + timeout)) | ||
1372 | break; | ||
1373 | } | ||
1374 | current->state = TASK_RUNNING; | ||
1375 | } | ||
1376 | |||
1377 | /* | ||
1378 | * rs_hangup() --- called by tty_hangup() when a hangup is signaled. | ||
1379 | */ | ||
1380 | static void rs_hangup(struct tty_struct *tty) | ||
1381 | { | ||
1382 | struct dec_serial * info = (struct dec_serial *)tty->driver_data; | ||
1383 | |||
1384 | if (serial_paranoia_check(info, tty->name, "rs_hangup")) | ||
1385 | return; | ||
1386 | |||
1387 | rs_flush_buffer(tty); | ||
1388 | shutdown(info); | ||
1389 | info->event = 0; | ||
1390 | info->count = 0; | ||
1391 | info->flags &= ~ZILOG_NORMAL_ACTIVE; | ||
1392 | info->tty = 0; | ||
1393 | wake_up_interruptible(&info->open_wait); | ||
1394 | } | ||
1395 | |||
1396 | /* | ||
1397 | * ------------------------------------------------------------ | ||
1398 | * rs_open() and friends | ||
1399 | * ------------------------------------------------------------ | ||
1400 | */ | ||
1401 | static int block_til_ready(struct tty_struct *tty, struct file * filp, | ||
1402 | struct dec_serial *info) | ||
1403 | { | ||
1404 | DECLARE_WAITQUEUE(wait, current); | ||
1405 | int retval; | ||
1406 | int do_clocal = 0; | ||
1407 | |||
1408 | /* | ||
1409 | * If the device is in the middle of being closed, then block | ||
1410 | * until it's done, and then try again. | ||
1411 | */ | ||
1412 | if (info->flags & ZILOG_CLOSING) { | ||
1413 | interruptible_sleep_on(&info->close_wait); | ||
1414 | #ifdef SERIAL_DO_RESTART | ||
1415 | return ((info->flags & ZILOG_HUP_NOTIFY) ? | ||
1416 | -EAGAIN : -ERESTARTSYS); | ||
1417 | #else | ||
1418 | return -EAGAIN; | ||
1419 | #endif | ||
1420 | } | ||
1421 | |||
1422 | /* | ||
1423 | * If non-blocking mode is set, or the port is not enabled, | ||
1424 | * then make the check up front and then exit. | ||
1425 | */ | ||
1426 | if ((filp->f_flags & O_NONBLOCK) || | ||
1427 | (tty->flags & (1 << TTY_IO_ERROR))) { | ||
1428 | info->flags |= ZILOG_NORMAL_ACTIVE; | ||
1429 | return 0; | ||
1430 | } | ||
1431 | |||
1432 | if (tty->termios->c_cflag & CLOCAL) | ||
1433 | do_clocal = 1; | ||
1434 | |||
1435 | /* | ||
1436 | * Block waiting for the carrier detect and the line to become | ||
1437 | * free (i.e., not in use by the callout). While we are in | ||
1438 | * this loop, info->count is dropped by one, so that | ||
1439 | * rs_close() knows when to free things. We restore it upon | ||
1440 | * exit, either normal or abnormal. | ||
1441 | */ | ||
1442 | retval = 0; | ||
1443 | add_wait_queue(&info->open_wait, &wait); | ||
1444 | #ifdef SERIAL_DEBUG_OPEN | ||
1445 | printk("block_til_ready before block: ttyS%d, count = %d\n", | ||
1446 | info->line, info->count); | ||
1447 | #endif | ||
1448 | spin_lock(&zs_lock); | ||
1449 | if (!tty_hung_up_p(filp)) | ||
1450 | info->count--; | ||
1451 | spin_unlock_irq(&zs_lock); | ||
1452 | info->blocked_open++; | ||
1453 | while (1) { | ||
1454 | spin_lock(&zs_lock); | ||
1455 | if (tty->termios->c_cflag & CBAUD) | ||
1456 | zs_rtsdtr(info, RTS | DTR, 1); | ||
1457 | spin_unlock_irq(&zs_lock); | ||
1458 | set_current_state(TASK_INTERRUPTIBLE); | ||
1459 | if (tty_hung_up_p(filp) || | ||
1460 | !(info->flags & ZILOG_INITIALIZED)) { | ||
1461 | #ifdef SERIAL_DO_RESTART | ||
1462 | if (info->flags & ZILOG_HUP_NOTIFY) | ||
1463 | retval = -EAGAIN; | ||
1464 | else | ||
1465 | retval = -ERESTARTSYS; | ||
1466 | #else | ||
1467 | retval = -EAGAIN; | ||
1468 | #endif | ||
1469 | break; | ||
1470 | } | ||
1471 | if (!(info->flags & ZILOG_CLOSING) && | ||
1472 | (do_clocal || (read_zsreg(info->zs_channel, 0) & DCD))) | ||
1473 | break; | ||
1474 | if (signal_pending(current)) { | ||
1475 | retval = -ERESTARTSYS; | ||
1476 | break; | ||
1477 | } | ||
1478 | #ifdef SERIAL_DEBUG_OPEN | ||
1479 | printk("block_til_ready blocking: ttyS%d, count = %d\n", | ||
1480 | info->line, info->count); | ||
1481 | #endif | ||
1482 | schedule(); | ||
1483 | } | ||
1484 | current->state = TASK_RUNNING; | ||
1485 | remove_wait_queue(&info->open_wait, &wait); | ||
1486 | if (!tty_hung_up_p(filp)) | ||
1487 | info->count++; | ||
1488 | info->blocked_open--; | ||
1489 | #ifdef SERIAL_DEBUG_OPEN | ||
1490 | printk("block_til_ready after blocking: ttyS%d, count = %d\n", | ||
1491 | info->line, info->count); | ||
1492 | #endif | ||
1493 | if (retval) | ||
1494 | return retval; | ||
1495 | info->flags |= ZILOG_NORMAL_ACTIVE; | ||
1496 | return 0; | ||
1497 | } | ||
1498 | |||
1499 | /* | ||
1500 | * This routine is called whenever a serial port is opened. It | ||
1501 | * enables interrupts for a serial port, linking in its ZILOG structure into | ||
1502 | * the IRQ chain. It also performs the serial-specific | ||
1503 | * initialization for the tty structure. | ||
1504 | */ | ||
1505 | static int rs_open(struct tty_struct *tty, struct file * filp) | ||
1506 | { | ||
1507 | struct dec_serial *info; | ||
1508 | int retval, line; | ||
1509 | |||
1510 | line = tty->index; | ||
1511 | if ((line < 0) || (line >= zs_channels_found)) | ||
1512 | return -ENODEV; | ||
1513 | info = zs_soft + line; | ||
1514 | |||
1515 | if (info->hook) | ||
1516 | return -ENODEV; | ||
1517 | |||
1518 | if (serial_paranoia_check(info, tty->name, "rs_open")) | ||
1519 | return -ENODEV; | ||
1520 | #ifdef SERIAL_DEBUG_OPEN | ||
1521 | printk("rs_open %s, count = %d\n", tty->name, info->count); | ||
1522 | #endif | ||
1523 | |||
1524 | info->count++; | ||
1525 | tty->driver_data = info; | ||
1526 | info->tty = tty; | ||
1527 | |||
1528 | /* | ||
1529 | * If the port is the middle of closing, bail out now | ||
1530 | */ | ||
1531 | if (tty_hung_up_p(filp) || | ||
1532 | (info->flags & ZILOG_CLOSING)) { | ||
1533 | if (info->flags & ZILOG_CLOSING) | ||
1534 | interruptible_sleep_on(&info->close_wait); | ||
1535 | #ifdef SERIAL_DO_RESTART | ||
1536 | return ((info->flags & ZILOG_HUP_NOTIFY) ? | ||
1537 | -EAGAIN : -ERESTARTSYS); | ||
1538 | #else | ||
1539 | return -EAGAIN; | ||
1540 | #endif | ||
1541 | } | ||
1542 | |||
1543 | /* | ||
1544 | * Start up serial port | ||
1545 | */ | ||
1546 | retval = zs_startup(info); | ||
1547 | if (retval) | ||
1548 | return retval; | ||
1549 | |||
1550 | retval = block_til_ready(tty, filp, info); | ||
1551 | if (retval) { | ||
1552 | #ifdef SERIAL_DEBUG_OPEN | ||
1553 | printk("rs_open returning after block_til_ready with %d\n", | ||
1554 | retval); | ||
1555 | #endif | ||
1556 | return retval; | ||
1557 | } | ||
1558 | |||
1559 | #ifdef CONFIG_SERIAL_DEC_CONSOLE | ||
1560 | if (zs_console.cflag && zs_console.index == line) { | ||
1561 | tty->termios->c_cflag = zs_console.cflag; | ||
1562 | zs_console.cflag = 0; | ||
1563 | change_speed(info); | ||
1564 | } | ||
1565 | #endif | ||
1566 | |||
1567 | #ifdef SERIAL_DEBUG_OPEN | ||
1568 | printk("rs_open %s successful...", tty->name); | ||
1569 | #endif | ||
1570 | /* tty->low_latency = 1; */ | ||
1571 | return 0; | ||
1572 | } | ||
1573 | |||
1574 | /* Finally, routines used to initialize the serial driver. */ | ||
1575 | |||
1576 | static void __init show_serial_version(void) | ||
1577 | { | ||
1578 | printk("DECstation Z8530 serial driver version 0.09\n"); | ||
1579 | } | ||
1580 | |||
1581 | /* Initialize Z8530s zs_channels | ||
1582 | */ | ||
1583 | |||
1584 | static void __init probe_sccs(void) | ||
1585 | { | ||
1586 | struct dec_serial **pp; | ||
1587 | int i, n, n_chips = 0, n_channels, chip, channel; | ||
1588 | unsigned long flags; | ||
1589 | |||
1590 | /* | ||
1591 | * did we get here by accident? | ||
1592 | */ | ||
1593 | if(!BUS_PRESENT) { | ||
1594 | printk("Not on JUNKIO machine, skipping probe_sccs\n"); | ||
1595 | return; | ||
1596 | } | ||
1597 | |||
1598 | switch(mips_machtype) { | ||
1599 | #ifdef CONFIG_MACH_DECSTATION | ||
1600 | case MACH_DS5000_2X0: | ||
1601 | case MACH_DS5900: | ||
1602 | n_chips = 2; | ||
1603 | zs_parms = &ds_parms; | ||
1604 | zs_parms->irq0 = dec_interrupt[DEC_IRQ_SCC0]; | ||
1605 | zs_parms->irq1 = dec_interrupt[DEC_IRQ_SCC1]; | ||
1606 | break; | ||
1607 | case MACH_DS5000_1XX: | ||
1608 | n_chips = 2; | ||
1609 | zs_parms = &ds_parms; | ||
1610 | zs_parms->irq0 = dec_interrupt[DEC_IRQ_SCC0]; | ||
1611 | zs_parms->irq1 = dec_interrupt[DEC_IRQ_SCC1]; | ||
1612 | break; | ||
1613 | case MACH_DS5000_XX: | ||
1614 | n_chips = 1; | ||
1615 | zs_parms = &ds_parms; | ||
1616 | zs_parms->irq0 = dec_interrupt[DEC_IRQ_SCC0]; | ||
1617 | break; | ||
1618 | #endif | ||
1619 | default: | ||
1620 | panic("zs: unsupported bus"); | ||
1621 | } | ||
1622 | if (!zs_parms) | ||
1623 | panic("zs: uninitialized parms"); | ||
1624 | |||
1625 | pp = &zs_chain; | ||
1626 | |||
1627 | n_channels = 0; | ||
1628 | |||
1629 | for (chip = 0; chip < n_chips; chip++) { | ||
1630 | for (channel = 0; channel <= 1; channel++) { | ||
1631 | /* | ||
1632 | * The sccs reside on the high byte of the 16 bit IOBUS | ||
1633 | */ | ||
1634 | zs_channels[n_channels].control = | ||
1635 | (volatile void *)CKSEG1ADDR(dec_kn_slot_base + | ||
1636 | (0 == chip ? zs_parms->scc0 : zs_parms->scc1) + | ||
1637 | (0 == channel ? zs_parms->channel_a_offset : | ||
1638 | zs_parms->channel_b_offset)); | ||
1639 | zs_channels[n_channels].data = | ||
1640 | zs_channels[n_channels].control + 4; | ||
1641 | |||
1642 | #ifndef CONFIG_SERIAL_DEC_CONSOLE | ||
1643 | /* | ||
1644 | * We're called early and memory managment isn't up, yet. | ||
1645 | * Thus request_region would fail. | ||
1646 | */ | ||
1647 | if (!request_region((unsigned long) | ||
1648 | zs_channels[n_channels].control, | ||
1649 | ZS_CHAN_IO_SIZE, "SCC")) | ||
1650 | panic("SCC I/O region is not free"); | ||
1651 | #endif | ||
1652 | zs_soft[n_channels].zs_channel = &zs_channels[n_channels]; | ||
1653 | /* HACK alert! */ | ||
1654 | if (!(chip & 1)) | ||
1655 | zs_soft[n_channels].irq = zs_parms->irq0; | ||
1656 | else | ||
1657 | zs_soft[n_channels].irq = zs_parms->irq1; | ||
1658 | |||
1659 | /* | ||
1660 | * Identification of channel A. Location of channel A | ||
1661 | * inside chip depends on mapping of internal address | ||
1662 | * the chip decodes channels by. | ||
1663 | * CHANNEL_A_NR returns either 0 (in case of | ||
1664 | * DECstations) or 1 (in case of Baget). | ||
1665 | */ | ||
1666 | if (CHANNEL_A_NR == channel) | ||
1667 | zs_soft[n_channels].zs_chan_a = | ||
1668 | &zs_channels[n_channels+1-2*CHANNEL_A_NR]; | ||
1669 | else | ||
1670 | zs_soft[n_channels].zs_chan_a = | ||
1671 | &zs_channels[n_channels]; | ||
1672 | |||
1673 | *pp = &zs_soft[n_channels]; | ||
1674 | pp = &zs_soft[n_channels].zs_next; | ||
1675 | n_channels++; | ||
1676 | } | ||
1677 | } | ||
1678 | |||
1679 | *pp = 0; | ||
1680 | zs_channels_found = n_channels; | ||
1681 | |||
1682 | for (n = 0; n < zs_channels_found; n++) { | ||
1683 | for (i = 0; i < 16; i++) { | ||
1684 | zs_soft[n].zs_channel->curregs[i] = zs_init_regs[i]; | ||
1685 | } | ||
1686 | } | ||
1687 | |||
1688 | spin_lock_irqsave(&zs_lock, flags); | ||
1689 | for (n = 0; n < zs_channels_found; n++) { | ||
1690 | if (n % 2 == 0) { | ||
1691 | write_zsreg(zs_soft[n].zs_chan_a, R9, FHWRES); | ||
1692 | udelay(10); | ||
1693 | write_zsreg(zs_soft[n].zs_chan_a, R9, 0); | ||
1694 | } | ||
1695 | load_zsregs(zs_soft[n].zs_channel, | ||
1696 | zs_soft[n].zs_channel->curregs); | ||
1697 | } | ||
1698 | spin_unlock_irqrestore(&zs_lock, flags); | ||
1699 | } | ||
1700 | |||
1701 | static const struct tty_operations serial_ops = { | ||
1702 | .open = rs_open, | ||
1703 | .close = rs_close, | ||
1704 | .write = rs_write, | ||
1705 | .flush_chars = rs_flush_chars, | ||
1706 | .write_room = rs_write_room, | ||
1707 | .chars_in_buffer = rs_chars_in_buffer, | ||
1708 | .flush_buffer = rs_flush_buffer, | ||
1709 | .ioctl = rs_ioctl, | ||
1710 | .throttle = rs_throttle, | ||
1711 | .unthrottle = rs_unthrottle, | ||
1712 | .set_termios = rs_set_termios, | ||
1713 | .stop = rs_stop, | ||
1714 | .start = rs_start, | ||
1715 | .hangup = rs_hangup, | ||
1716 | .break_ctl = rs_break, | ||
1717 | .wait_until_sent = rs_wait_until_sent, | ||
1718 | .tiocmget = rs_tiocmget, | ||
1719 | .tiocmset = rs_tiocmset, | ||
1720 | }; | ||
1721 | |||
1722 | /* zs_init inits the driver */ | ||
1723 | int __init zs_init(void) | ||
1724 | { | ||
1725 | int channel, i; | ||
1726 | struct dec_serial *info; | ||
1727 | |||
1728 | if(!BUS_PRESENT) | ||
1729 | return -ENODEV; | ||
1730 | |||
1731 | /* Find out how many Z8530 SCCs we have */ | ||
1732 | if (zs_chain == 0) | ||
1733 | probe_sccs(); | ||
1734 | serial_driver = alloc_tty_driver(zs_channels_found); | ||
1735 | if (!serial_driver) | ||
1736 | return -ENOMEM; | ||
1737 | |||
1738 | show_serial_version(); | ||
1739 | |||
1740 | /* Initialize the tty_driver structure */ | ||
1741 | /* Not all of this is exactly right for us. */ | ||
1742 | |||
1743 | serial_driver->owner = THIS_MODULE; | ||
1744 | serial_driver->name = "ttyS"; | ||
1745 | serial_driver->major = TTY_MAJOR; | ||
1746 | serial_driver->minor_start = 64; | ||
1747 | serial_driver->type = TTY_DRIVER_TYPE_SERIAL; | ||
1748 | serial_driver->subtype = SERIAL_TYPE_NORMAL; | ||
1749 | serial_driver->init_termios = tty_std_termios; | ||
1750 | serial_driver->init_termios.c_cflag = | ||
1751 | B9600 | CS8 | CREAD | HUPCL | CLOCAL; | ||
1752 | serial_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; | ||
1753 | tty_set_operations(serial_driver, &serial_ops); | ||
1754 | |||
1755 | if (tty_register_driver(serial_driver)) | ||
1756 | panic("Couldn't register serial driver"); | ||
1757 | |||
1758 | for (info = zs_chain, i = 0; info; info = info->zs_next, i++) { | ||
1759 | |||
1760 | /* Needed before interrupts are enabled. */ | ||
1761 | info->tty = 0; | ||
1762 | info->x_char = 0; | ||
1763 | |||
1764 | if (info->hook && info->hook->init_info) { | ||
1765 | (*info->hook->init_info)(info); | ||
1766 | continue; | ||
1767 | } | ||
1768 | |||
1769 | info->magic = SERIAL_MAGIC; | ||
1770 | info->port = (int) info->zs_channel->control; | ||
1771 | info->line = i; | ||
1772 | info->custom_divisor = 16; | ||
1773 | info->close_delay = 50; | ||
1774 | info->closing_wait = 3000; | ||
1775 | info->event = 0; | ||
1776 | info->count = 0; | ||
1777 | info->blocked_open = 0; | ||
1778 | tasklet_init(&info->tlet, do_softint, (unsigned long)info); | ||
1779 | init_waitqueue_head(&info->open_wait); | ||
1780 | init_waitqueue_head(&info->close_wait); | ||
1781 | printk("ttyS%02d at 0x%08x (irq = %d) is a Z85C30 SCC\n", | ||
1782 | info->line, info->port, info->irq); | ||
1783 | tty_register_device(serial_driver, info->line, NULL); | ||
1784 | |||
1785 | } | ||
1786 | |||
1787 | for (channel = 0; channel < zs_channels_found; ++channel) { | ||
1788 | zs_soft[channel].clk_divisor = 16; | ||
1789 | zs_soft[channel].zs_baud = get_zsbaud(&zs_soft[channel]); | ||
1790 | |||
1791 | if (request_irq(zs_soft[channel].irq, rs_interrupt, IRQF_SHARED, | ||
1792 | "scc", &zs_soft[channel])) | ||
1793 | printk(KERN_ERR "decserial: can't get irq %d\n", | ||
1794 | zs_soft[channel].irq); | ||
1795 | |||
1796 | if (zs_soft[channel].hook) { | ||
1797 | zs_startup(&zs_soft[channel]); | ||
1798 | if (zs_soft[channel].hook->init_channel) | ||
1799 | (*zs_soft[channel].hook->init_channel) | ||
1800 | (&zs_soft[channel]); | ||
1801 | } | ||
1802 | } | ||
1803 | |||
1804 | return 0; | ||
1805 | } | ||
1806 | |||
1807 | /* | ||
1808 | * polling I/O routines | ||
1809 | */ | ||
1810 | static int zs_poll_tx_char(void *handle, unsigned char ch) | ||
1811 | { | ||
1812 | struct dec_serial *info = handle; | ||
1813 | struct dec_zschannel *chan = info->zs_channel; | ||
1814 | int ret; | ||
1815 | |||
1816 | if(chan) { | ||
1817 | int loops = 10000; | ||
1818 | |||
1819 | while (loops && !(read_zsreg(chan, 0) & Tx_BUF_EMP)) | ||
1820 | loops--; | ||
1821 | |||
1822 | if (loops) { | ||
1823 | write_zsdata(chan, ch); | ||
1824 | ret = 0; | ||
1825 | } else | ||
1826 | ret = -EAGAIN; | ||
1827 | |||
1828 | return ret; | ||
1829 | } else | ||
1830 | return -ENODEV; | ||
1831 | } | ||
1832 | |||
1833 | static int zs_poll_rx_char(void *handle) | ||
1834 | { | ||
1835 | struct dec_serial *info = handle; | ||
1836 | struct dec_zschannel *chan = info->zs_channel; | ||
1837 | int ret; | ||
1838 | |||
1839 | if(chan) { | ||
1840 | int loops = 10000; | ||
1841 | |||
1842 | while (loops && !(read_zsreg(chan, 0) & Rx_CH_AV)) | ||
1843 | loops--; | ||
1844 | |||
1845 | if (loops) | ||
1846 | ret = read_zsdata(chan); | ||
1847 | else | ||
1848 | ret = -EAGAIN; | ||
1849 | |||
1850 | return ret; | ||
1851 | } else | ||
1852 | return -ENODEV; | ||
1853 | } | ||
1854 | |||
1855 | int register_zs_hook(unsigned int channel, struct dec_serial_hook *hook) | ||
1856 | { | ||
1857 | struct dec_serial *info = &zs_soft[channel]; | ||
1858 | |||
1859 | if (info->hook) { | ||
1860 | printk("%s: line %d has already a hook registered\n", | ||
1861 | __FUNCTION__, channel); | ||
1862 | |||
1863 | return 0; | ||
1864 | } else { | ||
1865 | hook->poll_rx_char = zs_poll_rx_char; | ||
1866 | hook->poll_tx_char = zs_poll_tx_char; | ||
1867 | info->hook = hook; | ||
1868 | |||
1869 | return 1; | ||
1870 | } | ||
1871 | } | ||
1872 | |||
1873 | int unregister_zs_hook(unsigned int channel) | ||
1874 | { | ||
1875 | struct dec_serial *info = &zs_soft[channel]; | ||
1876 | |||
1877 | if (info->hook) { | ||
1878 | info->hook = NULL; | ||
1879 | return 1; | ||
1880 | } else { | ||
1881 | printk("%s: trying to unregister hook on line %d," | ||
1882 | " but none is registered\n", __FUNCTION__, channel); | ||
1883 | return 0; | ||
1884 | } | ||
1885 | } | ||
1886 | |||
1887 | /* | ||
1888 | * ------------------------------------------------------------ | ||
1889 | * Serial console driver | ||
1890 | * ------------------------------------------------------------ | ||
1891 | */ | ||
1892 | #ifdef CONFIG_SERIAL_DEC_CONSOLE | ||
1893 | |||
1894 | |||
1895 | /* | ||
1896 | * Print a string to the serial port trying not to disturb | ||
1897 | * any possible real use of the port... | ||
1898 | */ | ||
1899 | static void serial_console_write(struct console *co, const char *s, | ||
1900 | unsigned count) | ||
1901 | { | ||
1902 | struct dec_serial *info; | ||
1903 | int i; | ||
1904 | |||
1905 | info = zs_soft + co->index; | ||
1906 | |||
1907 | for (i = 0; i < count; i++, s++) { | ||
1908 | if(*s == '\n') | ||
1909 | zs_poll_tx_char(info, '\r'); | ||
1910 | zs_poll_tx_char(info, *s); | ||
1911 | } | ||
1912 | } | ||
1913 | |||
1914 | static struct tty_driver *serial_console_device(struct console *c, int *index) | ||
1915 | { | ||
1916 | *index = c->index; | ||
1917 | return serial_driver; | ||
1918 | } | ||
1919 | |||
1920 | /* | ||
1921 | * Setup initial baud/bits/parity. We do two things here: | ||
1922 | * - construct a cflag setting for the first rs_open() | ||
1923 | * - initialize the serial port | ||
1924 | * Return non-zero if we didn't find a serial port. | ||
1925 | */ | ||
1926 | static int __init serial_console_setup(struct console *co, char *options) | ||
1927 | { | ||
1928 | struct dec_serial *info; | ||
1929 | int baud = 9600; | ||
1930 | int bits = 8; | ||
1931 | int parity = 'n'; | ||
1932 | int cflag = CREAD | HUPCL | CLOCAL; | ||
1933 | int clk_divisor = 16; | ||
1934 | int brg; | ||
1935 | char *s; | ||
1936 | unsigned long flags; | ||
1937 | |||
1938 | if(!BUS_PRESENT) | ||
1939 | return -ENODEV; | ||
1940 | |||
1941 | info = zs_soft + co->index; | ||
1942 | |||
1943 | if (zs_chain == 0) | ||
1944 | probe_sccs(); | ||
1945 | |||
1946 | info->is_cons = 1; | ||
1947 | |||
1948 | if (options) { | ||
1949 | baud = simple_strtoul(options, NULL, 10); | ||
1950 | s = options; | ||
1951 | while(*s >= '0' && *s <= '9') | ||
1952 | s++; | ||
1953 | if (*s) | ||
1954 | parity = *s++; | ||
1955 | if (*s) | ||
1956 | bits = *s - '0'; | ||
1957 | } | ||
1958 | |||
1959 | /* | ||
1960 | * Now construct a cflag setting. | ||
1961 | */ | ||
1962 | switch(baud) { | ||
1963 | case 1200: | ||
1964 | cflag |= B1200; | ||
1965 | break; | ||
1966 | case 2400: | ||
1967 | cflag |= B2400; | ||
1968 | break; | ||
1969 | case 4800: | ||
1970 | cflag |= B4800; | ||
1971 | break; | ||
1972 | case 19200: | ||
1973 | cflag |= B19200; | ||
1974 | break; | ||
1975 | case 38400: | ||
1976 | cflag |= B38400; | ||
1977 | break; | ||
1978 | case 57600: | ||
1979 | cflag |= B57600; | ||
1980 | break; | ||
1981 | case 115200: | ||
1982 | cflag |= B115200; | ||
1983 | break; | ||
1984 | case 9600: | ||
1985 | default: | ||
1986 | cflag |= B9600; | ||
1987 | /* | ||
1988 | * Set this to a sane value to prevent a divide error. | ||
1989 | */ | ||
1990 | baud = 9600; | ||
1991 | break; | ||
1992 | } | ||
1993 | switch(bits) { | ||
1994 | case 7: | ||
1995 | cflag |= CS7; | ||
1996 | break; | ||
1997 | default: | ||
1998 | case 8: | ||
1999 | cflag |= CS8; | ||
2000 | break; | ||
2001 | } | ||
2002 | switch(parity) { | ||
2003 | case 'o': case 'O': | ||
2004 | cflag |= PARODD; | ||
2005 | break; | ||
2006 | case 'e': case 'E': | ||
2007 | cflag |= PARENB; | ||
2008 | break; | ||
2009 | } | ||
2010 | co->cflag = cflag; | ||
2011 | |||
2012 | spin_lock_irqsave(&zs_lock, flags); | ||
2013 | |||
2014 | /* | ||
2015 | * Set up the baud rate generator. | ||
2016 | */ | ||
2017 | brg = BPS_TO_BRG(baud, zs_parms->clock / clk_divisor); | ||
2018 | info->zs_channel->curregs[R12] = (brg & 255); | ||
2019 | info->zs_channel->curregs[R13] = ((brg >> 8) & 255); | ||
2020 | |||
2021 | /* | ||
2022 | * Set byte size and parity. | ||
2023 | */ | ||
2024 | if (bits == 7) { | ||
2025 | info->zs_channel->curregs[R3] |= Rx7; | ||
2026 | info->zs_channel->curregs[R5] |= Tx7; | ||
2027 | } else { | ||
2028 | info->zs_channel->curregs[R3] |= Rx8; | ||
2029 | info->zs_channel->curregs[R5] |= Tx8; | ||
2030 | } | ||
2031 | if (cflag & PARENB) { | ||
2032 | info->zs_channel->curregs[R4] |= PAR_ENA; | ||
2033 | } | ||
2034 | if (!(cflag & PARODD)) { | ||
2035 | info->zs_channel->curregs[R4] |= PAR_EVEN; | ||
2036 | } | ||
2037 | info->zs_channel->curregs[R4] |= SB1; | ||
2038 | |||
2039 | /* | ||
2040 | * Turn on RTS and DTR. | ||
2041 | */ | ||
2042 | zs_rtsdtr(info, RTS | DTR, 1); | ||
2043 | |||
2044 | /* | ||
2045 | * Finally, enable sequencing. | ||
2046 | */ | ||
2047 | info->zs_channel->curregs[R3] |= RxENABLE; | ||
2048 | info->zs_channel->curregs[R5] |= TxENAB; | ||
2049 | |||
2050 | /* | ||
2051 | * Clear the interrupt registers. | ||
2052 | */ | ||
2053 | write_zsreg(info->zs_channel, R0, ERR_RES); | ||
2054 | write_zsreg(info->zs_channel, R0, RES_H_IUS); | ||
2055 | |||
2056 | /* | ||
2057 | * Load up the new values. | ||
2058 | */ | ||
2059 | load_zsregs(info->zs_channel, info->zs_channel->curregs); | ||
2060 | |||
2061 | /* Save the current value of RR0 */ | ||
2062 | info->read_reg_zero = read_zsreg(info->zs_channel, R0); | ||
2063 | |||
2064 | zs_soft[co->index].clk_divisor = clk_divisor; | ||
2065 | zs_soft[co->index].zs_baud = get_zsbaud(&zs_soft[co->index]); | ||
2066 | |||
2067 | spin_unlock_irqrestore(&zs_lock, flags); | ||
2068 | |||
2069 | return 0; | ||
2070 | } | ||
2071 | |||
2072 | static struct console zs_console = { | ||
2073 | .name = "ttyS", | ||
2074 | .write = serial_console_write, | ||
2075 | .device = serial_console_device, | ||
2076 | .setup = serial_console_setup, | ||
2077 | .flags = CON_PRINTBUFFER, | ||
2078 | .index = -1, | ||
2079 | }; | ||
2080 | |||
2081 | /* | ||
2082 | * Register console. | ||
2083 | */ | ||
2084 | void __init zs_serial_console_init(void) | ||
2085 | { | ||
2086 | register_console(&zs_console); | ||
2087 | } | ||
2088 | #endif /* ifdef CONFIG_SERIAL_DEC_CONSOLE */ | ||
2089 | |||
2090 | #ifdef CONFIG_KGDB | ||
2091 | struct dec_zschannel *zs_kgdbchan; | ||
2092 | static unsigned char scc_inittab[] = { | ||
2093 | 9, 0x80, /* reset A side (CHRA) */ | ||
2094 | 13, 0, /* set baud rate divisor */ | ||
2095 | 12, 1, | ||
2096 | 14, 1, /* baud rate gen enable, src=rtxc (BRENABL) */ | ||
2097 | 11, 0x50, /* clocks = br gen (RCBR | TCBR) */ | ||
2098 | 5, 0x6a, /* tx 8 bits, assert RTS (Tx8 | TxENAB | RTS) */ | ||
2099 | 4, 0x44, /* x16 clock, 1 stop (SB1 | X16CLK)*/ | ||
2100 | 3, 0xc1, /* rx enable, 8 bits (RxENABLE | Rx8)*/ | ||
2101 | }; | ||
2102 | |||
2103 | /* These are for receiving and sending characters under the kgdb | ||
2104 | * source level kernel debugger. | ||
2105 | */ | ||
2106 | void putDebugChar(char kgdb_char) | ||
2107 | { | ||
2108 | struct dec_zschannel *chan = zs_kgdbchan; | ||
2109 | while ((read_zsreg(chan, 0) & Tx_BUF_EMP) == 0) | ||
2110 | RECOVERY_DELAY; | ||
2111 | write_zsdata(chan, kgdb_char); | ||
2112 | } | ||
2113 | char getDebugChar(void) | ||
2114 | { | ||
2115 | struct dec_zschannel *chan = zs_kgdbchan; | ||
2116 | while((read_zsreg(chan, 0) & Rx_CH_AV) == 0) | ||
2117 | eieio(); /*barrier();*/ | ||
2118 | return read_zsdata(chan); | ||
2119 | } | ||
2120 | void kgdb_interruptible(int yes) | ||
2121 | { | ||
2122 | struct dec_zschannel *chan = zs_kgdbchan; | ||
2123 | int one, nine; | ||
2124 | nine = read_zsreg(chan, 9); | ||
2125 | if (yes == 1) { | ||
2126 | one = EXT_INT_ENAB|RxINT_ALL; | ||
2127 | nine |= MIE; | ||
2128 | printk("turning serial ints on\n"); | ||
2129 | } else { | ||
2130 | one = RxINT_DISAB; | ||
2131 | nine &= ~MIE; | ||
2132 | printk("turning serial ints off\n"); | ||
2133 | } | ||
2134 | write_zsreg(chan, 1, one); | ||
2135 | write_zsreg(chan, 9, nine); | ||
2136 | } | ||
2137 | |||
2138 | static int kgdbhook_init_channel(void *handle) | ||
2139 | { | ||
2140 | return 0; | ||
2141 | } | ||
2142 | |||
2143 | static void kgdbhook_init_info(void *handle) | ||
2144 | { | ||
2145 | } | ||
2146 | |||
2147 | static void kgdbhook_rx_char(void *handle, unsigned char ch, unsigned char fl) | ||
2148 | { | ||
2149 | struct dec_serial *info = handle; | ||
2150 | |||
2151 | if (fl != TTY_NORMAL) | ||
2152 | return; | ||
2153 | if (ch == 0x03 || ch == '$') | ||
2154 | breakpoint(); | ||
2155 | } | ||
2156 | |||
2157 | /* This sets up the serial port we're using, and turns on | ||
2158 | * interrupts for that channel, so kgdb is usable once we're done. | ||
2159 | */ | ||
2160 | static inline void kgdb_chaninit(struct dec_zschannel *ms, int intson, int bps) | ||
2161 | { | ||
2162 | int brg; | ||
2163 | int i, x; | ||
2164 | volatile char *sccc = ms->control; | ||
2165 | brg = BPS_TO_BRG(bps, zs_parms->clock/16); | ||
2166 | printk("setting bps on kgdb line to %d [brg=%x]\n", bps, brg); | ||
2167 | for (i = 20000; i != 0; --i) { | ||
2168 | x = *sccc; eieio(); | ||
2169 | } | ||
2170 | for (i = 0; i < sizeof(scc_inittab); ++i) { | ||
2171 | write_zsreg(ms, scc_inittab[i], scc_inittab[i+1]); | ||
2172 | i++; | ||
2173 | } | ||
2174 | } | ||
2175 | /* This is called at boot time to prime the kgdb serial debugging | ||
2176 | * serial line. The 'tty_num' argument is 0 for /dev/ttya and 1 | ||
2177 | * for /dev/ttyb which is determined in setup_arch() from the | ||
2178 | * boot command line flags. | ||
2179 | */ | ||
2180 | struct dec_serial_hook zs_kgdbhook = { | ||
2181 | .init_channel = kgdbhook_init_channel, | ||
2182 | .init_info = kgdbhook_init_info, | ||
2183 | .rx_char = kgdbhook_rx_char, | ||
2184 | .cflags = B38400 | CS8 | CLOCAL, | ||
2185 | }; | ||
2186 | |||
2187 | void __init zs_kgdb_hook(int tty_num) | ||
2188 | { | ||
2189 | /* Find out how many Z8530 SCCs we have */ | ||
2190 | if (zs_chain == 0) | ||
2191 | probe_sccs(); | ||
2192 | zs_soft[tty_num].zs_channel = &zs_channels[tty_num]; | ||
2193 | zs_kgdbchan = zs_soft[tty_num].zs_channel; | ||
2194 | zs_soft[tty_num].change_needed = 0; | ||
2195 | zs_soft[tty_num].clk_divisor = 16; | ||
2196 | zs_soft[tty_num].zs_baud = 38400; | ||
2197 | zs_soft[tty_num].hook = &zs_kgdbhook; /* This runs kgdb */ | ||
2198 | /* Turn on transmitter/receiver at 8-bits/char */ | ||
2199 | kgdb_chaninit(zs_soft[tty_num].zs_channel, 1, 38400); | ||
2200 | printk("KGDB: on channel %d initialized\n", tty_num); | ||
2201 | set_debug_traps(); /* init stub */ | ||
2202 | } | ||
2203 | #endif /* ifdef CONFIG_KGDB */ | ||
diff --git a/drivers/tc/zs.h b/drivers/tc/zs.h deleted file mode 100644 index 13512200ceba..000000000000 --- a/drivers/tc/zs.h +++ /dev/null | |||
@@ -1,404 +0,0 @@ | |||
1 | /* | ||
2 | * drivers/tc/zs.h: Definitions for the DECstation Z85C30 serial driver. | ||
3 | * | ||
4 | * Adapted from drivers/sbus/char/sunserial.h by Paul Mackerras. | ||
5 | * Adapted from drivers/macintosh/macserial.h by Harald Koerfgen. | ||
6 | * | ||
7 | * Copyright (C) 1996 Paul Mackerras (Paul.Mackerras@cs.anu.edu.au) | ||
8 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | ||
9 | * Copyright (C) 2004, 2005 Maciej W. Rozycki | ||
10 | */ | ||
11 | #ifndef _DECSERIAL_H | ||
12 | #define _DECSERIAL_H | ||
13 | |||
14 | #include <asm/dec/serial.h> | ||
15 | |||
16 | #define NUM_ZSREGS 16 | ||
17 | |||
18 | struct serial_struct { | ||
19 | int type; | ||
20 | int line; | ||
21 | int port; | ||
22 | int irq; | ||
23 | int flags; | ||
24 | int xmit_fifo_size; | ||
25 | int custom_divisor; | ||
26 | int baud_base; | ||
27 | unsigned short close_delay; | ||
28 | char reserved_char[2]; | ||
29 | int hub6; | ||
30 | unsigned short closing_wait; /* time to wait before closing */ | ||
31 | unsigned short closing_wait2; /* no longer used... */ | ||
32 | int reserved[4]; | ||
33 | }; | ||
34 | |||
35 | /* | ||
36 | * For the close wait times, 0 means wait forever for serial port to | ||
37 | * flush its output. 65535 means don't wait at all. | ||
38 | */ | ||
39 | #define ZILOG_CLOSING_WAIT_INF 0 | ||
40 | #define ZILOG_CLOSING_WAIT_NONE 65535 | ||
41 | |||
42 | /* | ||
43 | * Definitions for ZILOG_struct (and serial_struct) flags field | ||
44 | */ | ||
45 | #define ZILOG_HUP_NOTIFY 0x0001 /* Notify getty on hangups and closes | ||
46 | on the callout port */ | ||
47 | #define ZILOG_FOURPORT 0x0002 /* Set OU1, OUT2 per AST Fourport settings */ | ||
48 | #define ZILOG_SAK 0x0004 /* Secure Attention Key (Orange book) */ | ||
49 | #define ZILOG_SPLIT_TERMIOS 0x0008 /* Separate termios for dialin/callout */ | ||
50 | |||
51 | #define ZILOG_SPD_MASK 0x0030 | ||
52 | #define ZILOG_SPD_HI 0x0010 /* Use 56000 instead of 38400 bps */ | ||
53 | |||
54 | #define ZILOG_SPD_VHI 0x0020 /* Use 115200 instead of 38400 bps */ | ||
55 | #define ZILOG_SPD_CUST 0x0030 /* Use user-specified divisor */ | ||
56 | |||
57 | #define ZILOG_SKIP_TEST 0x0040 /* Skip UART test during autoconfiguration */ | ||
58 | #define ZILOG_AUTO_IRQ 0x0080 /* Do automatic IRQ during autoconfiguration */ | ||
59 | #define ZILOG_SESSION_LOCKOUT 0x0100 /* Lock out cua opens based on session */ | ||
60 | #define ZILOG_PGRP_LOCKOUT 0x0200 /* Lock out cua opens based on pgrp */ | ||
61 | #define ZILOG_CALLOUT_NOHUP 0x0400 /* Don't do hangups for cua device */ | ||
62 | |||
63 | #define ZILOG_FLAGS 0x0FFF /* Possible legal ZILOG flags */ | ||
64 | #define ZILOG_USR_MASK 0x0430 /* Legal flags that non-privileged | ||
65 | * users can set or reset */ | ||
66 | |||
67 | /* Internal flags used only by kernel/chr_drv/serial.c */ | ||
68 | #define ZILOG_INITIALIZED 0x80000000 /* Serial port was initialized */ | ||
69 | #define ZILOG_CALLOUT_ACTIVE 0x40000000 /* Call out device is active */ | ||
70 | #define ZILOG_NORMAL_ACTIVE 0x20000000 /* Normal device is active */ | ||
71 | #define ZILOG_BOOT_AUTOCONF 0x10000000 /* Autoconfigure port on bootup */ | ||
72 | #define ZILOG_CLOSING 0x08000000 /* Serial port is closing */ | ||
73 | #define ZILOG_CTS_FLOW 0x04000000 /* Do CTS flow control */ | ||
74 | #define ZILOG_CHECK_CD 0x02000000 /* i.e., CLOCAL */ | ||
75 | |||
76 | /* Software state per channel */ | ||
77 | |||
78 | #ifdef __KERNEL__ | ||
79 | /* | ||
80 | * This is our internal structure for each serial port's state. | ||
81 | * | ||
82 | * Many fields are paralleled by the structure used by the serial_struct | ||
83 | * structure. | ||
84 | * | ||
85 | * For definitions of the flags field, see tty.h | ||
86 | */ | ||
87 | |||
88 | struct dec_zschannel { | ||
89 | volatile unsigned char *control; | ||
90 | volatile unsigned char *data; | ||
91 | |||
92 | /* Current write register values */ | ||
93 | unsigned char curregs[NUM_ZSREGS]; | ||
94 | }; | ||
95 | |||
96 | struct dec_serial { | ||
97 | struct dec_serial *zs_next; /* For IRQ servicing chain. */ | ||
98 | struct dec_zschannel *zs_channel; /* Channel registers. */ | ||
99 | struct dec_zschannel *zs_chan_a; /* A side registers. */ | ||
100 | unsigned char read_reg_zero; | ||
101 | |||
102 | struct dec_serial_hook *hook; /* Hook on this channel. */ | ||
103 | int tty_break; /* Set on BREAK condition. */ | ||
104 | int is_cons; /* Is this our console. */ | ||
105 | int tx_active; /* Char is being xmitted. */ | ||
106 | int tx_stopped; /* Output is suspended. */ | ||
107 | |||
108 | /* | ||
109 | * We need to know the current clock divisor | ||
110 | * to read the bps rate the chip has currently loaded. | ||
111 | */ | ||
112 | int clk_divisor; /* May be 1, 16, 32, or 64. */ | ||
113 | int zs_baud; | ||
114 | |||
115 | char change_needed; | ||
116 | |||
117 | int magic; | ||
118 | int baud_base; | ||
119 | int port; | ||
120 | int irq; | ||
121 | int flags; /* Defined in tty.h. */ | ||
122 | int type; /* UART type. */ | ||
123 | struct tty_struct *tty; | ||
124 | int read_status_mask; | ||
125 | int ignore_status_mask; | ||
126 | int timeout; | ||
127 | int xmit_fifo_size; | ||
128 | int custom_divisor; | ||
129 | int x_char; /* XON/XOFF character. */ | ||
130 | int close_delay; | ||
131 | unsigned short closing_wait; | ||
132 | unsigned short closing_wait2; | ||
133 | unsigned long event; | ||
134 | unsigned long last_active; | ||
135 | int line; | ||
136 | int count; /* # of fds on device. */ | ||
137 | int blocked_open; /* # of blocked opens. */ | ||
138 | unsigned char *xmit_buf; | ||
139 | int xmit_head; | ||
140 | int xmit_tail; | ||
141 | int xmit_cnt; | ||
142 | struct tasklet_struct tlet; | ||
143 | wait_queue_head_t open_wait; | ||
144 | wait_queue_head_t close_wait; | ||
145 | }; | ||
146 | |||
147 | |||
148 | #define SERIAL_MAGIC 0x5301 | ||
149 | |||
150 | /* | ||
151 | * The size of the serial xmit buffer is 1 page, or 4096 bytes | ||
152 | */ | ||
153 | #define SERIAL_XMIT_SIZE 4096 | ||
154 | |||
155 | /* | ||
156 | * Events are used to schedule things to happen at timer-interrupt | ||
157 | * time, instead of at rs interrupt time. | ||
158 | */ | ||
159 | #define RS_EVENT_WRITE_WAKEUP 0 | ||
160 | |||
161 | #endif /* __KERNEL__ */ | ||
162 | |||
163 | /* Conversion routines to/from brg time constants from/to bits | ||
164 | * per second. | ||
165 | */ | ||
166 | #define BRG_TO_BPS(brg, freq) ((freq) / 2 / ((brg) + 2)) | ||
167 | #define BPS_TO_BRG(bps, freq) ((((freq) + (bps)) / (2 * (bps))) - 2) | ||
168 | |||
169 | /* The Zilog register set */ | ||
170 | |||
171 | #define FLAG 0x7e | ||
172 | |||
173 | /* Write Register 0 */ | ||
174 | #define R0 0 /* Register selects */ | ||
175 | #define R1 1 | ||
176 | #define R2 2 | ||
177 | #define R3 3 | ||
178 | #define R4 4 | ||
179 | #define R5 5 | ||
180 | #define R6 6 | ||
181 | #define R7 7 | ||
182 | #define R8 8 | ||
183 | #define R9 9 | ||
184 | #define R10 10 | ||
185 | #define R11 11 | ||
186 | #define R12 12 | ||
187 | #define R13 13 | ||
188 | #define R14 14 | ||
189 | #define R15 15 | ||
190 | |||
191 | #define NULLCODE 0 /* Null Code */ | ||
192 | #define POINT_HIGH 0x8 /* Select upper half of registers */ | ||
193 | #define RES_EXT_INT 0x10 /* Reset Ext. Status Interrupts */ | ||
194 | #define SEND_ABORT 0x18 /* HDLC Abort */ | ||
195 | #define RES_RxINT_FC 0x20 /* Reset RxINT on First Character */ | ||
196 | #define RES_Tx_P 0x28 /* Reset TxINT Pending */ | ||
197 | #define ERR_RES 0x30 /* Error Reset */ | ||
198 | #define RES_H_IUS 0x38 /* Reset highest IUS */ | ||
199 | |||
200 | #define RES_Rx_CRC 0x40 /* Reset Rx CRC Checker */ | ||
201 | #define RES_Tx_CRC 0x80 /* Reset Tx CRC Checker */ | ||
202 | #define RES_EOM_L 0xC0 /* Reset EOM latch */ | ||
203 | |||
204 | /* Write Register 1 */ | ||
205 | |||
206 | #define EXT_INT_ENAB 0x1 /* Ext Int Enable */ | ||
207 | #define TxINT_ENAB 0x2 /* Tx Int Enable */ | ||
208 | #define PAR_SPEC 0x4 /* Parity is special condition */ | ||
209 | |||
210 | #define RxINT_DISAB 0 /* Rx Int Disable */ | ||
211 | #define RxINT_FCERR 0x8 /* Rx Int on First Character Only or Error */ | ||
212 | #define RxINT_ALL 0x10 /* Int on all Rx Characters or error */ | ||
213 | #define RxINT_ERR 0x18 /* Int on error only */ | ||
214 | #define RxINT_MASK 0x18 | ||
215 | |||
216 | #define WT_RDY_RT 0x20 /* Wait/Ready on R/T */ | ||
217 | #define WT_FN_RDYFN 0x40 /* Wait/FN/Ready FN */ | ||
218 | #define WT_RDY_ENAB 0x80 /* Wait/Ready Enable */ | ||
219 | |||
220 | /* Write Register #2 (Interrupt Vector) */ | ||
221 | |||
222 | /* Write Register 3 */ | ||
223 | |||
224 | #define RxENABLE 0x1 /* Rx Enable */ | ||
225 | #define SYNC_L_INH 0x2 /* Sync Character Load Inhibit */ | ||
226 | #define ADD_SM 0x4 /* Address Search Mode (SDLC) */ | ||
227 | #define RxCRC_ENAB 0x8 /* Rx CRC Enable */ | ||
228 | #define ENT_HM 0x10 /* Enter Hunt Mode */ | ||
229 | #define AUTO_ENAB 0x20 /* Auto Enables */ | ||
230 | #define Rx5 0x0 /* Rx 5 Bits/Character */ | ||
231 | #define Rx7 0x40 /* Rx 7 Bits/Character */ | ||
232 | #define Rx6 0x80 /* Rx 6 Bits/Character */ | ||
233 | #define Rx8 0xc0 /* Rx 8 Bits/Character */ | ||
234 | #define RxNBITS_MASK 0xc0 | ||
235 | |||
236 | /* Write Register 4 */ | ||
237 | |||
238 | #define PAR_ENA 0x1 /* Parity Enable */ | ||
239 | #define PAR_EVEN 0x2 /* Parity Even/Odd* */ | ||
240 | |||
241 | #define SYNC_ENAB 0 /* Sync Modes Enable */ | ||
242 | #define SB1 0x4 /* 1 stop bit/char */ | ||
243 | #define SB15 0x8 /* 1.5 stop bits/char */ | ||
244 | #define SB2 0xc /* 2 stop bits/char */ | ||
245 | #define SB_MASK 0xc | ||
246 | |||
247 | #define MONSYNC 0 /* 8 Bit Sync character */ | ||
248 | #define BISYNC 0x10 /* 16 bit sync character */ | ||
249 | #define SDLC 0x20 /* SDLC Mode (01111110 Sync Flag) */ | ||
250 | #define EXTSYNC 0x30 /* External Sync Mode */ | ||
251 | |||
252 | #define X1CLK 0x0 /* x1 clock mode */ | ||
253 | #define X16CLK 0x40 /* x16 clock mode */ | ||
254 | #define X32CLK 0x80 /* x32 clock mode */ | ||
255 | #define X64CLK 0xC0 /* x64 clock mode */ | ||
256 | #define XCLK_MASK 0xC0 | ||
257 | |||
258 | /* Write Register 5 */ | ||
259 | |||
260 | #define TxCRC_ENAB 0x1 /* Tx CRC Enable */ | ||
261 | #define RTS 0x2 /* RTS */ | ||
262 | #define SDLC_CRC 0x4 /* SDLC/CRC-16 */ | ||
263 | #define TxENAB 0x8 /* Tx Enable */ | ||
264 | #define SND_BRK 0x10 /* Send Break */ | ||
265 | #define Tx5 0x0 /* Tx 5 bits (or less)/character */ | ||
266 | #define Tx7 0x20 /* Tx 7 bits/character */ | ||
267 | #define Tx6 0x40 /* Tx 6 bits/character */ | ||
268 | #define Tx8 0x60 /* Tx 8 bits/character */ | ||
269 | #define TxNBITS_MASK 0x60 | ||
270 | #define DTR 0x80 /* DTR */ | ||
271 | |||
272 | /* Write Register 6 (Sync bits 0-7/SDLC Address Field) */ | ||
273 | |||
274 | /* Write Register 7 (Sync bits 8-15/SDLC 01111110) */ | ||
275 | |||
276 | /* Write Register 8 (transmit buffer) */ | ||
277 | |||
278 | /* Write Register 9 (Master interrupt control) */ | ||
279 | #define VIS 1 /* Vector Includes Status */ | ||
280 | #define NV 2 /* No Vector */ | ||
281 | #define DLC 4 /* Disable Lower Chain */ | ||
282 | #define MIE 8 /* Master Interrupt Enable */ | ||
283 | #define STATHI 0x10 /* Status high */ | ||
284 | #define SOFTACK 0x20 /* Software Interrupt Acknowledge */ | ||
285 | #define NORESET 0 /* No reset on write to R9 */ | ||
286 | #define CHRB 0x40 /* Reset channel B */ | ||
287 | #define CHRA 0x80 /* Reset channel A */ | ||
288 | #define FHWRES 0xc0 /* Force hardware reset */ | ||
289 | |||
290 | /* Write Register 10 (misc control bits) */ | ||
291 | #define BIT6 1 /* 6 bit/8bit sync */ | ||
292 | #define LOOPMODE 2 /* SDLC Loop mode */ | ||
293 | #define ABUNDER 4 /* Abort/flag on SDLC xmit underrun */ | ||
294 | #define MARKIDLE 8 /* Mark/flag on idle */ | ||
295 | #define GAOP 0x10 /* Go active on poll */ | ||
296 | #define NRZ 0 /* NRZ mode */ | ||
297 | #define NRZI 0x20 /* NRZI mode */ | ||
298 | #define FM1 0x40 /* FM1 (transition = 1) */ | ||
299 | #define FM0 0x60 /* FM0 (transition = 0) */ | ||
300 | #define CRCPS 0x80 /* CRC Preset I/O */ | ||
301 | |||
302 | /* Write Register 11 (Clock Mode control) */ | ||
303 | #define TRxCXT 0 /* TRxC = Xtal output */ | ||
304 | #define TRxCTC 1 /* TRxC = Transmit clock */ | ||
305 | #define TRxCBR 2 /* TRxC = BR Generator Output */ | ||
306 | #define TRxCDP 3 /* TRxC = DPLL output */ | ||
307 | #define TRxCOI 4 /* TRxC O/I */ | ||
308 | #define TCRTxCP 0 /* Transmit clock = RTxC pin */ | ||
309 | #define TCTRxCP 8 /* Transmit clock = TRxC pin */ | ||
310 | #define TCBR 0x10 /* Transmit clock = BR Generator output */ | ||
311 | #define TCDPLL 0x18 /* Transmit clock = DPLL output */ | ||
312 | #define RCRTxCP 0 /* Receive clock = RTxC pin */ | ||
313 | #define RCTRxCP 0x20 /* Receive clock = TRxC pin */ | ||
314 | #define RCBR 0x40 /* Receive clock = BR Generator output */ | ||
315 | #define RCDPLL 0x60 /* Receive clock = DPLL output */ | ||
316 | #define RTxCX 0x80 /* RTxC Xtal/No Xtal */ | ||
317 | |||
318 | /* Write Register 12 (lower byte of baud rate generator time constant) */ | ||
319 | |||
320 | /* Write Register 13 (upper byte of baud rate generator time constant) */ | ||
321 | |||
322 | /* Write Register 14 (Misc control bits) */ | ||
323 | #define BRENABL 1 /* Baud rate generator enable */ | ||
324 | #define BRSRC 2 /* Baud rate generator source */ | ||
325 | #define DTRREQ 4 /* DTR/Request function */ | ||
326 | #define AUTOECHO 8 /* Auto Echo */ | ||
327 | #define LOOPBAK 0x10 /* Local loopback */ | ||
328 | #define SEARCH 0x20 /* Enter search mode */ | ||
329 | #define RMC 0x40 /* Reset missing clock */ | ||
330 | #define DISDPLL 0x60 /* Disable DPLL */ | ||
331 | #define SSBR 0x80 /* Set DPLL source = BR generator */ | ||
332 | #define SSRTxC 0xa0 /* Set DPLL source = RTxC */ | ||
333 | #define SFMM 0xc0 /* Set FM mode */ | ||
334 | #define SNRZI 0xe0 /* Set NRZI mode */ | ||
335 | |||
336 | /* Write Register 15 (external/status interrupt control) */ | ||
337 | #define ZCIE 2 /* Zero count IE */ | ||
338 | #define DCDIE 8 /* DCD IE */ | ||
339 | #define SYNCIE 0x10 /* Sync/hunt IE */ | ||
340 | #define CTSIE 0x20 /* CTS IE */ | ||
341 | #define TxUIE 0x40 /* Tx Underrun/EOM IE */ | ||
342 | #define BRKIE 0x80 /* Break/Abort IE */ | ||
343 | |||
344 | |||
345 | /* Read Register 0 */ | ||
346 | #define Rx_CH_AV 0x1 /* Rx Character Available */ | ||
347 | #define ZCOUNT 0x2 /* Zero count */ | ||
348 | #define Tx_BUF_EMP 0x4 /* Tx Buffer empty */ | ||
349 | #define DCD 0x8 /* DCD */ | ||
350 | #define SYNC_HUNT 0x10 /* Sync/hunt */ | ||
351 | #define CTS 0x20 /* CTS */ | ||
352 | #define TxEOM 0x40 /* Tx underrun */ | ||
353 | #define BRK_ABRT 0x80 /* Break/Abort */ | ||
354 | |||
355 | /* Read Register 1 */ | ||
356 | #define ALL_SNT 0x1 /* All sent */ | ||
357 | /* Residue Data for 8 Rx bits/char programmed */ | ||
358 | #define RES3 0x8 /* 0/3 */ | ||
359 | #define RES4 0x4 /* 0/4 */ | ||
360 | #define RES5 0xc /* 0/5 */ | ||
361 | #define RES6 0x2 /* 0/6 */ | ||
362 | #define RES7 0xa /* 0/7 */ | ||
363 | #define RES8 0x6 /* 0/8 */ | ||
364 | #define RES18 0xe /* 1/8 */ | ||
365 | #define RES28 0x0 /* 2/8 */ | ||
366 | /* Special Rx Condition Interrupts */ | ||
367 | #define PAR_ERR 0x10 /* Parity error */ | ||
368 | #define Rx_OVR 0x20 /* Rx Overrun Error */ | ||
369 | #define FRM_ERR 0x40 /* CRC/Framing Error */ | ||
370 | #define END_FR 0x80 /* End of Frame (SDLC) */ | ||
371 | |||
372 | /* Read Register 2 (channel b only) - Interrupt vector */ | ||
373 | |||
374 | /* Read Register 3 (interrupt pending register) ch a only */ | ||
375 | #define CHBEXT 0x1 /* Channel B Ext/Stat IP */ | ||
376 | #define CHBTxIP 0x2 /* Channel B Tx IP */ | ||
377 | #define CHBRxIP 0x4 /* Channel B Rx IP */ | ||
378 | #define CHAEXT 0x8 /* Channel A Ext/Stat IP */ | ||
379 | #define CHATxIP 0x10 /* Channel A Tx IP */ | ||
380 | #define CHARxIP 0x20 /* Channel A Rx IP */ | ||
381 | |||
382 | /* Read Register 8 (receive data register) */ | ||
383 | |||
384 | /* Read Register 10 (misc status bits) */ | ||
385 | #define ONLOOP 2 /* On loop */ | ||
386 | #define LOOPSEND 0x10 /* Loop sending */ | ||
387 | #define CLK2MIS 0x40 /* Two clocks missing */ | ||
388 | #define CLK1MIS 0x80 /* One clock missing */ | ||
389 | |||
390 | /* Read Register 12 (lower byte of baud rate generator constant) */ | ||
391 | |||
392 | /* Read Register 13 (upper byte of baud rate generator constant) */ | ||
393 | |||
394 | /* Read Register 15 (value of WR 15) */ | ||
395 | |||
396 | /* Misc macros */ | ||
397 | #define ZS_CLEARERR(channel) (write_zsreg(channel, 0, ERR_RES)) | ||
398 | #define ZS_CLEARFIFO(channel) do { volatile unsigned char garbage; \ | ||
399 | garbage = read_zsdata(channel); \ | ||
400 | garbage = read_zsdata(channel); \ | ||
401 | garbage = read_zsdata(channel); \ | ||
402 | } while(0) | ||
403 | |||
404 | #endif /* !(_DECSERIAL_H) */ | ||
diff --git a/drivers/video/logo/Kconfig b/drivers/video/logo/Kconfig index da219c043c99..9de1c114f809 100644 --- a/drivers/video/logo/Kconfig +++ b/drivers/video/logo/Kconfig | |||
@@ -12,7 +12,7 @@ if LOGO | |||
12 | 12 | ||
13 | config FB_LOGO_EXTRA | 13 | config FB_LOGO_EXTRA |
14 | bool | 14 | bool |
15 | depends on FB | 15 | depends on FB=y |
16 | default y if SPU_BASE | 16 | default y if SPU_BASE |
17 | 17 | ||
18 | config LOGO_LINUX_MONO | 18 | config LOGO_LINUX_MONO |
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile new file mode 100644 index 000000000000..56592f0d6cef --- /dev/null +++ b/drivers/xen/Makefile | |||
@@ -0,0 +1,2 @@ | |||
1 | obj-y += grant-table.o | ||
2 | obj-y += xenbus/ | ||
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c new file mode 100644 index 000000000000..ea94dbabf9a9 --- /dev/null +++ b/drivers/xen/grant-table.c | |||
@@ -0,0 +1,582 @@ | |||
1 | /****************************************************************************** | ||
2 | * grant_table.c | ||
3 | * | ||
4 | * Granting foreign access to our memory reservation. | ||
5 | * | ||
6 | * Copyright (c) 2005-2006, Christopher Clark | ||
7 | * Copyright (c) 2004-2005, K A Fraser | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version 2 | ||
11 | * as published by the Free Software Foundation; or, when distributed | ||
12 | * separately from the Linux kernel or incorporated into other | ||
13 | * software packages, subject to the following license: | ||
14 | * | ||
15 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
16 | * of this source file (the "Software"), to deal in the Software without | ||
17 | * restriction, including without limitation the rights to use, copy, modify, | ||
18 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
19 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
20 | * the following conditions: | ||
21 | * | ||
22 | * The above copyright notice and this permission notice shall be included in | ||
23 | * all copies or substantial portions of the Software. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
26 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
27 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
28 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
29 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
30 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
31 | * IN THE SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #include <linux/module.h> | ||
35 | #include <linux/sched.h> | ||
36 | #include <linux/mm.h> | ||
37 | #include <linux/vmalloc.h> | ||
38 | #include <linux/uaccess.h> | ||
39 | |||
40 | #include <xen/interface/xen.h> | ||
41 | #include <xen/page.h> | ||
42 | #include <xen/grant_table.h> | ||
43 | |||
44 | #include <asm/pgtable.h> | ||
45 | #include <asm/sync_bitops.h> | ||
46 | |||
47 | |||
48 | /* External tools reserve first few grant table entries. */ | ||
49 | #define NR_RESERVED_ENTRIES 8 | ||
50 | #define GNTTAB_LIST_END 0xffffffff | ||
51 | #define GREFS_PER_GRANT_FRAME (PAGE_SIZE / sizeof(struct grant_entry)) | ||
52 | |||
53 | static grant_ref_t **gnttab_list; | ||
54 | static unsigned int nr_grant_frames; | ||
55 | static unsigned int boot_max_nr_grant_frames; | ||
56 | static int gnttab_free_count; | ||
57 | static grant_ref_t gnttab_free_head; | ||
58 | static DEFINE_SPINLOCK(gnttab_list_lock); | ||
59 | |||
60 | static struct grant_entry *shared; | ||
61 | |||
62 | static struct gnttab_free_callback *gnttab_free_callback_list; | ||
63 | |||
64 | static int gnttab_expand(unsigned int req_entries); | ||
65 | |||
66 | #define RPP (PAGE_SIZE / sizeof(grant_ref_t)) | ||
67 | |||
68 | static inline grant_ref_t *__gnttab_entry(grant_ref_t entry) | ||
69 | { | ||
70 | return &gnttab_list[(entry) / RPP][(entry) % RPP]; | ||
71 | } | ||
72 | /* This can be used as an l-value */ | ||
73 | #define gnttab_entry(entry) (*__gnttab_entry(entry)) | ||
74 | |||
75 | static int get_free_entries(unsigned count) | ||
76 | { | ||
77 | unsigned long flags; | ||
78 | int ref, rc; | ||
79 | grant_ref_t head; | ||
80 | |||
81 | spin_lock_irqsave(&gnttab_list_lock, flags); | ||
82 | |||
83 | if ((gnttab_free_count < count) && | ||
84 | ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) { | ||
85 | spin_unlock_irqrestore(&gnttab_list_lock, flags); | ||
86 | return rc; | ||
87 | } | ||
88 | |||
89 | ref = head = gnttab_free_head; | ||
90 | gnttab_free_count -= count; | ||
91 | while (count-- > 1) | ||
92 | head = gnttab_entry(head); | ||
93 | gnttab_free_head = gnttab_entry(head); | ||
94 | gnttab_entry(head) = GNTTAB_LIST_END; | ||
95 | |||
96 | spin_unlock_irqrestore(&gnttab_list_lock, flags); | ||
97 | |||
98 | return ref; | ||
99 | } | ||
100 | |||
101 | static void do_free_callbacks(void) | ||
102 | { | ||
103 | struct gnttab_free_callback *callback, *next; | ||
104 | |||
105 | callback = gnttab_free_callback_list; | ||
106 | gnttab_free_callback_list = NULL; | ||
107 | |||
108 | while (callback != NULL) { | ||
109 | next = callback->next; | ||
110 | if (gnttab_free_count >= callback->count) { | ||
111 | callback->next = NULL; | ||
112 | callback->fn(callback->arg); | ||
113 | } else { | ||
114 | callback->next = gnttab_free_callback_list; | ||
115 | gnttab_free_callback_list = callback; | ||
116 | } | ||
117 | callback = next; | ||
118 | } | ||
119 | } | ||
120 | |||
121 | static inline void check_free_callbacks(void) | ||
122 | { | ||
123 | if (unlikely(gnttab_free_callback_list)) | ||
124 | do_free_callbacks(); | ||
125 | } | ||
126 | |||
127 | static void put_free_entry(grant_ref_t ref) | ||
128 | { | ||
129 | unsigned long flags; | ||
130 | spin_lock_irqsave(&gnttab_list_lock, flags); | ||
131 | gnttab_entry(ref) = gnttab_free_head; | ||
132 | gnttab_free_head = ref; | ||
133 | gnttab_free_count++; | ||
134 | check_free_callbacks(); | ||
135 | spin_unlock_irqrestore(&gnttab_list_lock, flags); | ||
136 | } | ||
137 | |||
138 | static void update_grant_entry(grant_ref_t ref, domid_t domid, | ||
139 | unsigned long frame, unsigned flags) | ||
140 | { | ||
141 | /* | ||
142 | * Introducing a valid entry into the grant table: | ||
143 | * 1. Write ent->domid. | ||
144 | * 2. Write ent->frame: | ||
145 | * GTF_permit_access: Frame to which access is permitted. | ||
146 | * GTF_accept_transfer: Pseudo-phys frame slot being filled by new | ||
147 | * frame, or zero if none. | ||
148 | * 3. Write memory barrier (WMB). | ||
149 | * 4. Write ent->flags, inc. valid type. | ||
150 | */ | ||
151 | shared[ref].frame = frame; | ||
152 | shared[ref].domid = domid; | ||
153 | wmb(); | ||
154 | shared[ref].flags = flags; | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * Public grant-issuing interface functions | ||
159 | */ | ||
160 | void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid, | ||
161 | unsigned long frame, int readonly) | ||
162 | { | ||
163 | update_grant_entry(ref, domid, frame, | ||
164 | GTF_permit_access | (readonly ? GTF_readonly : 0)); | ||
165 | } | ||
166 | EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref); | ||
167 | |||
168 | int gnttab_grant_foreign_access(domid_t domid, unsigned long frame, | ||
169 | int readonly) | ||
170 | { | ||
171 | int ref; | ||
172 | |||
173 | ref = get_free_entries(1); | ||
174 | if (unlikely(ref < 0)) | ||
175 | return -ENOSPC; | ||
176 | |||
177 | gnttab_grant_foreign_access_ref(ref, domid, frame, readonly); | ||
178 | |||
179 | return ref; | ||
180 | } | ||
181 | EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access); | ||
182 | |||
183 | int gnttab_query_foreign_access(grant_ref_t ref) | ||
184 | { | ||
185 | u16 nflags; | ||
186 | |||
187 | nflags = shared[ref].flags; | ||
188 | |||
189 | return (nflags & (GTF_reading|GTF_writing)); | ||
190 | } | ||
191 | EXPORT_SYMBOL_GPL(gnttab_query_foreign_access); | ||
192 | |||
193 | int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) | ||
194 | { | ||
195 | u16 flags, nflags; | ||
196 | |||
197 | nflags = shared[ref].flags; | ||
198 | do { | ||
199 | flags = nflags; | ||
200 | if (flags & (GTF_reading|GTF_writing)) { | ||
201 | printk(KERN_ALERT "WARNING: g.e. still in use!\n"); | ||
202 | return 0; | ||
203 | } | ||
204 | } while ((nflags = sync_cmpxchg(&shared[ref].flags, flags, 0)) != flags); | ||
205 | |||
206 | return 1; | ||
207 | } | ||
208 | EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref); | ||
209 | |||
210 | void gnttab_end_foreign_access(grant_ref_t ref, int readonly, | ||
211 | unsigned long page) | ||
212 | { | ||
213 | if (gnttab_end_foreign_access_ref(ref, readonly)) { | ||
214 | put_free_entry(ref); | ||
215 | if (page != 0) | ||
216 | free_page(page); | ||
217 | } else { | ||
218 | /* XXX This needs to be fixed so that the ref and page are | ||
219 | placed on a list to be freed up later. */ | ||
220 | printk(KERN_WARNING | ||
221 | "WARNING: leaking g.e. and page still in use!\n"); | ||
222 | } | ||
223 | } | ||
224 | EXPORT_SYMBOL_GPL(gnttab_end_foreign_access); | ||
225 | |||
226 | int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn) | ||
227 | { | ||
228 | int ref; | ||
229 | |||
230 | ref = get_free_entries(1); | ||
231 | if (unlikely(ref < 0)) | ||
232 | return -ENOSPC; | ||
233 | gnttab_grant_foreign_transfer_ref(ref, domid, pfn); | ||
234 | |||
235 | return ref; | ||
236 | } | ||
237 | EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer); | ||
238 | |||
239 | void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid, | ||
240 | unsigned long pfn) | ||
241 | { | ||
242 | update_grant_entry(ref, domid, pfn, GTF_accept_transfer); | ||
243 | } | ||
244 | EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref); | ||
245 | |||
246 | unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref) | ||
247 | { | ||
248 | unsigned long frame; | ||
249 | u16 flags; | ||
250 | |||
251 | /* | ||
252 | * If a transfer is not even yet started, try to reclaim the grant | ||
253 | * reference and return failure (== 0). | ||
254 | */ | ||
255 | while (!((flags = shared[ref].flags) & GTF_transfer_committed)) { | ||
256 | if (sync_cmpxchg(&shared[ref].flags, flags, 0) == flags) | ||
257 | return 0; | ||
258 | cpu_relax(); | ||
259 | } | ||
260 | |||
261 | /* If a transfer is in progress then wait until it is completed. */ | ||
262 | while (!(flags & GTF_transfer_completed)) { | ||
263 | flags = shared[ref].flags; | ||
264 | cpu_relax(); | ||
265 | } | ||
266 | |||
267 | rmb(); /* Read the frame number /after/ reading completion status. */ | ||
268 | frame = shared[ref].frame; | ||
269 | BUG_ON(frame == 0); | ||
270 | |||
271 | return frame; | ||
272 | } | ||
273 | EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref); | ||
274 | |||
275 | unsigned long gnttab_end_foreign_transfer(grant_ref_t ref) | ||
276 | { | ||
277 | unsigned long frame = gnttab_end_foreign_transfer_ref(ref); | ||
278 | put_free_entry(ref); | ||
279 | return frame; | ||
280 | } | ||
281 | EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer); | ||
282 | |||
283 | void gnttab_free_grant_reference(grant_ref_t ref) | ||
284 | { | ||
285 | put_free_entry(ref); | ||
286 | } | ||
287 | EXPORT_SYMBOL_GPL(gnttab_free_grant_reference); | ||
288 | |||
289 | void gnttab_free_grant_references(grant_ref_t head) | ||
290 | { | ||
291 | grant_ref_t ref; | ||
292 | unsigned long flags; | ||
293 | int count = 1; | ||
294 | if (head == GNTTAB_LIST_END) | ||
295 | return; | ||
296 | spin_lock_irqsave(&gnttab_list_lock, flags); | ||
297 | ref = head; | ||
298 | while (gnttab_entry(ref) != GNTTAB_LIST_END) { | ||
299 | ref = gnttab_entry(ref); | ||
300 | count++; | ||
301 | } | ||
302 | gnttab_entry(ref) = gnttab_free_head; | ||
303 | gnttab_free_head = head; | ||
304 | gnttab_free_count += count; | ||
305 | check_free_callbacks(); | ||
306 | spin_unlock_irqrestore(&gnttab_list_lock, flags); | ||
307 | } | ||
308 | EXPORT_SYMBOL_GPL(gnttab_free_grant_references); | ||
309 | |||
310 | int gnttab_alloc_grant_references(u16 count, grant_ref_t *head) | ||
311 | { | ||
312 | int h = get_free_entries(count); | ||
313 | |||
314 | if (h < 0) | ||
315 | return -ENOSPC; | ||
316 | |||
317 | *head = h; | ||
318 | |||
319 | return 0; | ||
320 | } | ||
321 | EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references); | ||
322 | |||
323 | int gnttab_empty_grant_references(const grant_ref_t *private_head) | ||
324 | { | ||
325 | return (*private_head == GNTTAB_LIST_END); | ||
326 | } | ||
327 | EXPORT_SYMBOL_GPL(gnttab_empty_grant_references); | ||
328 | |||
329 | int gnttab_claim_grant_reference(grant_ref_t *private_head) | ||
330 | { | ||
331 | grant_ref_t g = *private_head; | ||
332 | if (unlikely(g == GNTTAB_LIST_END)) | ||
333 | return -ENOSPC; | ||
334 | *private_head = gnttab_entry(g); | ||
335 | return g; | ||
336 | } | ||
337 | EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference); | ||
338 | |||
339 | void gnttab_release_grant_reference(grant_ref_t *private_head, | ||
340 | grant_ref_t release) | ||
341 | { | ||
342 | gnttab_entry(release) = *private_head; | ||
343 | *private_head = release; | ||
344 | } | ||
345 | EXPORT_SYMBOL_GPL(gnttab_release_grant_reference); | ||
346 | |||
347 | void gnttab_request_free_callback(struct gnttab_free_callback *callback, | ||
348 | void (*fn)(void *), void *arg, u16 count) | ||
349 | { | ||
350 | unsigned long flags; | ||
351 | spin_lock_irqsave(&gnttab_list_lock, flags); | ||
352 | if (callback->next) | ||
353 | goto out; | ||
354 | callback->fn = fn; | ||
355 | callback->arg = arg; | ||
356 | callback->count = count; | ||
357 | callback->next = gnttab_free_callback_list; | ||
358 | gnttab_free_callback_list = callback; | ||
359 | check_free_callbacks(); | ||
360 | out: | ||
361 | spin_unlock_irqrestore(&gnttab_list_lock, flags); | ||
362 | } | ||
363 | EXPORT_SYMBOL_GPL(gnttab_request_free_callback); | ||
364 | |||
365 | void gnttab_cancel_free_callback(struct gnttab_free_callback *callback) | ||
366 | { | ||
367 | struct gnttab_free_callback **pcb; | ||
368 | unsigned long flags; | ||
369 | |||
370 | spin_lock_irqsave(&gnttab_list_lock, flags); | ||
371 | for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) { | ||
372 | if (*pcb == callback) { | ||
373 | *pcb = callback->next; | ||
374 | break; | ||
375 | } | ||
376 | } | ||
377 | spin_unlock_irqrestore(&gnttab_list_lock, flags); | ||
378 | } | ||
379 | EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback); | ||
380 | |||
381 | static int grow_gnttab_list(unsigned int more_frames) | ||
382 | { | ||
383 | unsigned int new_nr_grant_frames, extra_entries, i; | ||
384 | |||
385 | new_nr_grant_frames = nr_grant_frames + more_frames; | ||
386 | extra_entries = more_frames * GREFS_PER_GRANT_FRAME; | ||
387 | |||
388 | for (i = nr_grant_frames; i < new_nr_grant_frames; i++) { | ||
389 | gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC); | ||
390 | if (!gnttab_list[i]) | ||
391 | goto grow_nomem; | ||
392 | } | ||
393 | |||
394 | |||
395 | for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames; | ||
396 | i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++) | ||
397 | gnttab_entry(i) = i + 1; | ||
398 | |||
399 | gnttab_entry(i) = gnttab_free_head; | ||
400 | gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames; | ||
401 | gnttab_free_count += extra_entries; | ||
402 | |||
403 | nr_grant_frames = new_nr_grant_frames; | ||
404 | |||
405 | check_free_callbacks(); | ||
406 | |||
407 | return 0; | ||
408 | |||
409 | grow_nomem: | ||
410 | for ( ; i >= nr_grant_frames; i--) | ||
411 | free_page((unsigned long) gnttab_list[i]); | ||
412 | return -ENOMEM; | ||
413 | } | ||
414 | |||
415 | static unsigned int __max_nr_grant_frames(void) | ||
416 | { | ||
417 | struct gnttab_query_size query; | ||
418 | int rc; | ||
419 | |||
420 | query.dom = DOMID_SELF; | ||
421 | |||
422 | rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1); | ||
423 | if ((rc < 0) || (query.status != GNTST_okay)) | ||
424 | return 4; /* Legacy max supported number of frames */ | ||
425 | |||
426 | return query.max_nr_frames; | ||
427 | } | ||
428 | |||
429 | static inline unsigned int max_nr_grant_frames(void) | ||
430 | { | ||
431 | unsigned int xen_max = __max_nr_grant_frames(); | ||
432 | |||
433 | if (xen_max > boot_max_nr_grant_frames) | ||
434 | return boot_max_nr_grant_frames; | ||
435 | return xen_max; | ||
436 | } | ||
437 | |||
438 | static int map_pte_fn(pte_t *pte, struct page *pmd_page, | ||
439 | unsigned long addr, void *data) | ||
440 | { | ||
441 | unsigned long **frames = (unsigned long **)data; | ||
442 | |||
443 | set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL)); | ||
444 | (*frames)++; | ||
445 | return 0; | ||
446 | } | ||
447 | |||
448 | static int unmap_pte_fn(pte_t *pte, struct page *pmd_page, | ||
449 | unsigned long addr, void *data) | ||
450 | { | ||
451 | |||
452 | set_pte_at(&init_mm, addr, pte, __pte(0)); | ||
453 | return 0; | ||
454 | } | ||
455 | |||
456 | static int gnttab_map(unsigned int start_idx, unsigned int end_idx) | ||
457 | { | ||
458 | struct gnttab_setup_table setup; | ||
459 | unsigned long *frames; | ||
460 | unsigned int nr_gframes = end_idx + 1; | ||
461 | int rc; | ||
462 | |||
463 | frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC); | ||
464 | if (!frames) | ||
465 | return -ENOMEM; | ||
466 | |||
467 | setup.dom = DOMID_SELF; | ||
468 | setup.nr_frames = nr_gframes; | ||
469 | setup.frame_list = frames; | ||
470 | |||
471 | rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1); | ||
472 | if (rc == -ENOSYS) { | ||
473 | kfree(frames); | ||
474 | return -ENOSYS; | ||
475 | } | ||
476 | |||
477 | BUG_ON(rc || setup.status); | ||
478 | |||
479 | if (shared == NULL) { | ||
480 | struct vm_struct *area; | ||
481 | area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames()); | ||
482 | BUG_ON(area == NULL); | ||
483 | shared = area->addr; | ||
484 | } | ||
485 | rc = apply_to_page_range(&init_mm, (unsigned long)shared, | ||
486 | PAGE_SIZE * nr_gframes, | ||
487 | map_pte_fn, &frames); | ||
488 | BUG_ON(rc); | ||
489 | frames -= nr_gframes; /* adjust after map_pte_fn() */ | ||
490 | |||
491 | kfree(frames); | ||
492 | |||
493 | return 0; | ||
494 | } | ||
495 | |||
496 | static int gnttab_resume(void) | ||
497 | { | ||
498 | if (max_nr_grant_frames() < nr_grant_frames) | ||
499 | return -ENOSYS; | ||
500 | return gnttab_map(0, nr_grant_frames - 1); | ||
501 | } | ||
502 | |||
503 | static int gnttab_suspend(void) | ||
504 | { | ||
505 | apply_to_page_range(&init_mm, (unsigned long)shared, | ||
506 | PAGE_SIZE * nr_grant_frames, | ||
507 | unmap_pte_fn, NULL); | ||
508 | |||
509 | return 0; | ||
510 | } | ||
511 | |||
512 | static int gnttab_expand(unsigned int req_entries) | ||
513 | { | ||
514 | int rc; | ||
515 | unsigned int cur, extra; | ||
516 | |||
517 | cur = nr_grant_frames; | ||
518 | extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) / | ||
519 | GREFS_PER_GRANT_FRAME); | ||
520 | if (cur + extra > max_nr_grant_frames()) | ||
521 | return -ENOSPC; | ||
522 | |||
523 | rc = gnttab_map(cur, cur + extra - 1); | ||
524 | if (rc == 0) | ||
525 | rc = grow_gnttab_list(extra); | ||
526 | |||
527 | return rc; | ||
528 | } | ||
529 | |||
530 | static int __devinit gnttab_init(void) | ||
531 | { | ||
532 | int i; | ||
533 | unsigned int max_nr_glist_frames; | ||
534 | unsigned int nr_init_grefs; | ||
535 | |||
536 | if (!is_running_on_xen()) | ||
537 | return -ENODEV; | ||
538 | |||
539 | nr_grant_frames = 1; | ||
540 | boot_max_nr_grant_frames = __max_nr_grant_frames(); | ||
541 | |||
542 | /* Determine the maximum number of frames required for the | ||
543 | * grant reference free list on the current hypervisor. | ||
544 | */ | ||
545 | max_nr_glist_frames = (boot_max_nr_grant_frames * | ||
546 | GREFS_PER_GRANT_FRAME / | ||
547 | (PAGE_SIZE / sizeof(grant_ref_t))); | ||
548 | |||
549 | gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *), | ||
550 | GFP_KERNEL); | ||
551 | if (gnttab_list == NULL) | ||
552 | return -ENOMEM; | ||
553 | |||
554 | for (i = 0; i < nr_grant_frames; i++) { | ||
555 | gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); | ||
556 | if (gnttab_list[i] == NULL) | ||
557 | goto ini_nomem; | ||
558 | } | ||
559 | |||
560 | if (gnttab_resume() < 0) | ||
561 | return -ENODEV; | ||
562 | |||
563 | nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME; | ||
564 | |||
565 | for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) | ||
566 | gnttab_entry(i) = i + 1; | ||
567 | |||
568 | gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END; | ||
569 | gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES; | ||
570 | gnttab_free_head = NR_RESERVED_ENTRIES; | ||
571 | |||
572 | printk("Grant table initialized\n"); | ||
573 | return 0; | ||
574 | |||
575 | ini_nomem: | ||
576 | for (i--; i >= 0; i--) | ||
577 | free_page((unsigned long)gnttab_list[i]); | ||
578 | kfree(gnttab_list); | ||
579 | return -ENOMEM; | ||
580 | } | ||
581 | |||
582 | core_initcall(gnttab_init); | ||
diff --git a/drivers/xen/xenbus/Makefile b/drivers/xen/xenbus/Makefile new file mode 100644 index 000000000000..5571f5b84223 --- /dev/null +++ b/drivers/xen/xenbus/Makefile | |||
@@ -0,0 +1,7 @@ | |||
1 | obj-y += xenbus.o | ||
2 | |||
3 | xenbus-objs = | ||
4 | xenbus-objs += xenbus_client.o | ||
5 | xenbus-objs += xenbus_comms.o | ||
6 | xenbus-objs += xenbus_xs.o | ||
7 | xenbus-objs += xenbus_probe.o | ||
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c new file mode 100644 index 000000000000..9fd2f70ab46d --- /dev/null +++ b/drivers/xen/xenbus/xenbus_client.c | |||
@@ -0,0 +1,569 @@ | |||
1 | /****************************************************************************** | ||
2 | * Client-facing interface for the Xenbus driver. In other words, the | ||
3 | * interface between the Xenbus and the device-specific code, be it the | ||
4 | * frontend or the backend of that driver. | ||
5 | * | ||
6 | * Copyright (C) 2005 XenSource Ltd | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version 2 | ||
10 | * as published by the Free Software Foundation; or, when distributed | ||
11 | * separately from the Linux kernel or incorporated into other | ||
12 | * software packages, subject to the following license: | ||
13 | * | ||
14 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
15 | * of this source file (the "Software"), to deal in the Software without | ||
16 | * restriction, including without limitation the rights to use, copy, modify, | ||
17 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
18 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
19 | * the following conditions: | ||
20 | * | ||
21 | * The above copyright notice and this permission notice shall be included in | ||
22 | * all copies or substantial portions of the Software. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
25 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
26 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
27 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
28 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
29 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
30 | * IN THE SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #include <linux/types.h> | ||
34 | #include <linux/vmalloc.h> | ||
35 | #include <asm/xen/hypervisor.h> | ||
36 | #include <xen/interface/xen.h> | ||
37 | #include <xen/interface/event_channel.h> | ||
38 | #include <xen/events.h> | ||
39 | #include <xen/grant_table.h> | ||
40 | #include <xen/xenbus.h> | ||
41 | |||
42 | const char *xenbus_strstate(enum xenbus_state state) | ||
43 | { | ||
44 | static const char *const name[] = { | ||
45 | [ XenbusStateUnknown ] = "Unknown", | ||
46 | [ XenbusStateInitialising ] = "Initialising", | ||
47 | [ XenbusStateInitWait ] = "InitWait", | ||
48 | [ XenbusStateInitialised ] = "Initialised", | ||
49 | [ XenbusStateConnected ] = "Connected", | ||
50 | [ XenbusStateClosing ] = "Closing", | ||
51 | [ XenbusStateClosed ] = "Closed", | ||
52 | }; | ||
53 | return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; | ||
54 | } | ||
55 | EXPORT_SYMBOL_GPL(xenbus_strstate); | ||
56 | |||
57 | /** | ||
58 | * xenbus_watch_path - register a watch | ||
59 | * @dev: xenbus device | ||
60 | * @path: path to watch | ||
61 | * @watch: watch to register | ||
62 | * @callback: callback to register | ||
63 | * | ||
64 | * Register a @watch on the given path, using the given xenbus_watch structure | ||
65 | * for storage, and the given @callback function as the callback. Return 0 on | ||
66 | * success, or -errno on error. On success, the given @path will be saved as | ||
67 | * @watch->node, and remains the caller's to free. On error, @watch->node will | ||
68 | * be NULL, the device will switch to %XenbusStateClosing, and the error will | ||
69 | * be saved in the store. | ||
70 | */ | ||
71 | int xenbus_watch_path(struct xenbus_device *dev, const char *path, | ||
72 | struct xenbus_watch *watch, | ||
73 | void (*callback)(struct xenbus_watch *, | ||
74 | const char **, unsigned int)) | ||
75 | { | ||
76 | int err; | ||
77 | |||
78 | watch->node = path; | ||
79 | watch->callback = callback; | ||
80 | |||
81 | err = register_xenbus_watch(watch); | ||
82 | |||
83 | if (err) { | ||
84 | watch->node = NULL; | ||
85 | watch->callback = NULL; | ||
86 | xenbus_dev_fatal(dev, err, "adding watch on %s", path); | ||
87 | } | ||
88 | |||
89 | return err; | ||
90 | } | ||
91 | EXPORT_SYMBOL_GPL(xenbus_watch_path); | ||
92 | |||
93 | |||
94 | /** | ||
95 | * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path | ||
96 | * @dev: xenbus device | ||
97 | * @watch: watch to register | ||
98 | * @callback: callback to register | ||
99 | * @pathfmt: format of path to watch | ||
100 | * | ||
101 | * Register a watch on the given @path, using the given xenbus_watch | ||
102 | * structure for storage, and the given @callback function as the callback. | ||
103 | * Return 0 on success, or -errno on error. On success, the watched path | ||
104 | * (@path/@path2) will be saved as @watch->node, and becomes the caller's to | ||
105 | * kfree(). On error, watch->node will be NULL, so the caller has nothing to | ||
106 | * free, the device will switch to %XenbusStateClosing, and the error will be | ||
107 | * saved in the store. | ||
108 | */ | ||
109 | int xenbus_watch_pathfmt(struct xenbus_device *dev, | ||
110 | struct xenbus_watch *watch, | ||
111 | void (*callback)(struct xenbus_watch *, | ||
112 | const char **, unsigned int), | ||
113 | const char *pathfmt, ...) | ||
114 | { | ||
115 | int err; | ||
116 | va_list ap; | ||
117 | char *path; | ||
118 | |||
119 | va_start(ap, pathfmt); | ||
120 | path = kvasprintf(GFP_KERNEL, pathfmt, ap); | ||
121 | va_end(ap); | ||
122 | |||
123 | if (!path) { | ||
124 | xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); | ||
125 | return -ENOMEM; | ||
126 | } | ||
127 | err = xenbus_watch_path(dev, path, watch, callback); | ||
128 | |||
129 | if (err) | ||
130 | kfree(path); | ||
131 | return err; | ||
132 | } | ||
133 | EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt); | ||
134 | |||
135 | |||
136 | /** | ||
137 | * xenbus_switch_state | ||
138 | * @dev: xenbus device | ||
139 | * @xbt: transaction handle | ||
140 | * @state: new state | ||
141 | * | ||
142 | * Advertise in the store a change of the given driver to the given new_state. | ||
143 | * Return 0 on success, or -errno on error. On error, the device will switch | ||
144 | * to XenbusStateClosing, and the error will be saved in the store. | ||
145 | */ | ||
146 | int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) | ||
147 | { | ||
148 | /* We check whether the state is currently set to the given value, and | ||
149 | if not, then the state is set. We don't want to unconditionally | ||
150 | write the given state, because we don't want to fire watches | ||
151 | unnecessarily. Furthermore, if the node has gone, we don't write | ||
152 | to it, as the device will be tearing down, and we don't want to | ||
153 | resurrect that directory. | ||
154 | |||
155 | Note that, because of this cached value of our state, this function | ||
156 | will not work inside a Xenstore transaction (something it was | ||
157 | trying to in the past) because dev->state would not get reset if | ||
158 | the transaction was aborted. | ||
159 | |||
160 | */ | ||
161 | |||
162 | int current_state; | ||
163 | int err; | ||
164 | |||
165 | if (state == dev->state) | ||
166 | return 0; | ||
167 | |||
168 | err = xenbus_scanf(XBT_NIL, dev->nodename, "state", "%d", | ||
169 | ¤t_state); | ||
170 | if (err != 1) | ||
171 | return 0; | ||
172 | |||
173 | err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%d", state); | ||
174 | if (err) { | ||
175 | if (state != XenbusStateClosing) /* Avoid looping */ | ||
176 | xenbus_dev_fatal(dev, err, "writing new state"); | ||
177 | return err; | ||
178 | } | ||
179 | |||
180 | dev->state = state; | ||
181 | |||
182 | return 0; | ||
183 | } | ||
184 | EXPORT_SYMBOL_GPL(xenbus_switch_state); | ||
185 | |||
186 | int xenbus_frontend_closed(struct xenbus_device *dev) | ||
187 | { | ||
188 | xenbus_switch_state(dev, XenbusStateClosed); | ||
189 | complete(&dev->down); | ||
190 | return 0; | ||
191 | } | ||
192 | EXPORT_SYMBOL_GPL(xenbus_frontend_closed); | ||
193 | |||
194 | /** | ||
195 | * Return the path to the error node for the given device, or NULL on failure. | ||
196 | * If the value returned is non-NULL, then it is the caller's to kfree. | ||
197 | */ | ||
198 | static char *error_path(struct xenbus_device *dev) | ||
199 | { | ||
200 | return kasprintf(GFP_KERNEL, "error/%s", dev->nodename); | ||
201 | } | ||
202 | |||
203 | |||
204 | static void xenbus_va_dev_error(struct xenbus_device *dev, int err, | ||
205 | const char *fmt, va_list ap) | ||
206 | { | ||
207 | int ret; | ||
208 | unsigned int len; | ||
209 | char *printf_buffer = NULL; | ||
210 | char *path_buffer = NULL; | ||
211 | |||
212 | #define PRINTF_BUFFER_SIZE 4096 | ||
213 | printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL); | ||
214 | if (printf_buffer == NULL) | ||
215 | goto fail; | ||
216 | |||
217 | len = sprintf(printf_buffer, "%i ", -err); | ||
218 | ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap); | ||
219 | |||
220 | BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1); | ||
221 | |||
222 | dev_err(&dev->dev, "%s\n", printf_buffer); | ||
223 | |||
224 | path_buffer = error_path(dev); | ||
225 | |||
226 | if (path_buffer == NULL) { | ||
227 | dev_err(&dev->dev, "failed to write error node for %s (%s)\n", | ||
228 | dev->nodename, printf_buffer); | ||
229 | goto fail; | ||
230 | } | ||
231 | |||
232 | if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) { | ||
233 | dev_err(&dev->dev, "failed to write error node for %s (%s)\n", | ||
234 | dev->nodename, printf_buffer); | ||
235 | goto fail; | ||
236 | } | ||
237 | |||
238 | fail: | ||
239 | kfree(printf_buffer); | ||
240 | kfree(path_buffer); | ||
241 | } | ||
242 | |||
243 | |||
244 | /** | ||
245 | * xenbus_dev_error | ||
246 | * @dev: xenbus device | ||
247 | * @err: error to report | ||
248 | * @fmt: error message format | ||
249 | * | ||
250 | * Report the given negative errno into the store, along with the given | ||
251 | * formatted message. | ||
252 | */ | ||
253 | void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...) | ||
254 | { | ||
255 | va_list ap; | ||
256 | |||
257 | va_start(ap, fmt); | ||
258 | xenbus_va_dev_error(dev, err, fmt, ap); | ||
259 | va_end(ap); | ||
260 | } | ||
261 | EXPORT_SYMBOL_GPL(xenbus_dev_error); | ||
262 | |||
263 | /** | ||
264 | * xenbus_dev_fatal | ||
265 | * @dev: xenbus device | ||
266 | * @err: error to report | ||
267 | * @fmt: error message format | ||
268 | * | ||
269 | * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by | ||
270 | * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly | ||
271 | * closedown of this driver and its peer. | ||
272 | */ | ||
273 | |||
274 | void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...) | ||
275 | { | ||
276 | va_list ap; | ||
277 | |||
278 | va_start(ap, fmt); | ||
279 | xenbus_va_dev_error(dev, err, fmt, ap); | ||
280 | va_end(ap); | ||
281 | |||
282 | xenbus_switch_state(dev, XenbusStateClosing); | ||
283 | } | ||
284 | EXPORT_SYMBOL_GPL(xenbus_dev_fatal); | ||
285 | |||
286 | /** | ||
287 | * xenbus_grant_ring | ||
288 | * @dev: xenbus device | ||
289 | * @ring_mfn: mfn of ring to grant | ||
290 | |||
291 | * Grant access to the given @ring_mfn to the peer of the given device. Return | ||
292 | * 0 on success, or -errno on error. On error, the device will switch to | ||
293 | * XenbusStateClosing, and the error will be saved in the store. | ||
294 | */ | ||
295 | int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn) | ||
296 | { | ||
297 | int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0); | ||
298 | if (err < 0) | ||
299 | xenbus_dev_fatal(dev, err, "granting access to ring page"); | ||
300 | return err; | ||
301 | } | ||
302 | EXPORT_SYMBOL_GPL(xenbus_grant_ring); | ||
303 | |||
304 | |||
305 | /** | ||
306 | * Allocate an event channel for the given xenbus_device, assigning the newly | ||
307 | * created local port to *port. Return 0 on success, or -errno on error. On | ||
308 | * error, the device will switch to XenbusStateClosing, and the error will be | ||
309 | * saved in the store. | ||
310 | */ | ||
311 | int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port) | ||
312 | { | ||
313 | struct evtchn_alloc_unbound alloc_unbound; | ||
314 | int err; | ||
315 | |||
316 | alloc_unbound.dom = DOMID_SELF; | ||
317 | alloc_unbound.remote_dom = dev->otherend_id; | ||
318 | |||
319 | err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, | ||
320 | &alloc_unbound); | ||
321 | if (err) | ||
322 | xenbus_dev_fatal(dev, err, "allocating event channel"); | ||
323 | else | ||
324 | *port = alloc_unbound.port; | ||
325 | |||
326 | return err; | ||
327 | } | ||
328 | EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn); | ||
329 | |||
330 | |||
331 | /** | ||
332 | * Bind to an existing interdomain event channel in another domain. Returns 0 | ||
333 | * on success and stores the local port in *port. On error, returns -errno, | ||
334 | * switches the device to XenbusStateClosing, and saves the error in XenStore. | ||
335 | */ | ||
336 | int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port) | ||
337 | { | ||
338 | struct evtchn_bind_interdomain bind_interdomain; | ||
339 | int err; | ||
340 | |||
341 | bind_interdomain.remote_dom = dev->otherend_id; | ||
342 | bind_interdomain.remote_port = remote_port; | ||
343 | |||
344 | err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, | ||
345 | &bind_interdomain); | ||
346 | if (err) | ||
347 | xenbus_dev_fatal(dev, err, | ||
348 | "binding to event channel %d from domain %d", | ||
349 | remote_port, dev->otherend_id); | ||
350 | else | ||
351 | *port = bind_interdomain.local_port; | ||
352 | |||
353 | return err; | ||
354 | } | ||
355 | EXPORT_SYMBOL_GPL(xenbus_bind_evtchn); | ||
356 | |||
357 | |||
358 | /** | ||
359 | * Free an existing event channel. Returns 0 on success or -errno on error. | ||
360 | */ | ||
361 | int xenbus_free_evtchn(struct xenbus_device *dev, int port) | ||
362 | { | ||
363 | struct evtchn_close close; | ||
364 | int err; | ||
365 | |||
366 | close.port = port; | ||
367 | |||
368 | err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); | ||
369 | if (err) | ||
370 | xenbus_dev_error(dev, err, "freeing event channel %d", port); | ||
371 | |||
372 | return err; | ||
373 | } | ||
374 | EXPORT_SYMBOL_GPL(xenbus_free_evtchn); | ||
375 | |||
376 | |||
377 | /** | ||
378 | * xenbus_map_ring_valloc | ||
379 | * @dev: xenbus device | ||
380 | * @gnt_ref: grant reference | ||
381 | * @vaddr: pointer to address to be filled out by mapping | ||
382 | * | ||
383 | * Based on Rusty Russell's skeleton driver's map_page. | ||
384 | * Map a page of memory into this domain from another domain's grant table. | ||
385 | * xenbus_map_ring_valloc allocates a page of virtual address space, maps the | ||
386 | * page to that address, and sets *vaddr to that address. | ||
387 | * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) | ||
388 | * or -ENOMEM on error. If an error is returned, device will switch to | ||
389 | * XenbusStateClosing and the error message will be saved in XenStore. | ||
390 | */ | ||
391 | int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr) | ||
392 | { | ||
393 | struct gnttab_map_grant_ref op = { | ||
394 | .flags = GNTMAP_host_map, | ||
395 | .ref = gnt_ref, | ||
396 | .dom = dev->otherend_id, | ||
397 | }; | ||
398 | struct vm_struct *area; | ||
399 | |||
400 | *vaddr = NULL; | ||
401 | |||
402 | area = alloc_vm_area(PAGE_SIZE); | ||
403 | if (!area) | ||
404 | return -ENOMEM; | ||
405 | |||
406 | op.host_addr = (unsigned long)area->addr; | ||
407 | |||
408 | if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) | ||
409 | BUG(); | ||
410 | |||
411 | if (op.status != GNTST_okay) { | ||
412 | free_vm_area(area); | ||
413 | xenbus_dev_fatal(dev, op.status, | ||
414 | "mapping in shared page %d from domain %d", | ||
415 | gnt_ref, dev->otherend_id); | ||
416 | return op.status; | ||
417 | } | ||
418 | |||
419 | /* Stuff the handle in an unused field */ | ||
420 | area->phys_addr = (unsigned long)op.handle; | ||
421 | |||
422 | *vaddr = area->addr; | ||
423 | return 0; | ||
424 | } | ||
425 | EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); | ||
426 | |||
427 | |||
428 | /** | ||
429 | * xenbus_map_ring | ||
430 | * @dev: xenbus device | ||
431 | * @gnt_ref: grant reference | ||
432 | * @handle: pointer to grant handle to be filled | ||
433 | * @vaddr: address to be mapped to | ||
434 | * | ||
435 | * Map a page of memory into this domain from another domain's grant table. | ||
436 | * xenbus_map_ring does not allocate the virtual address space (you must do | ||
437 | * this yourself!). It only maps in the page to the specified address. | ||
438 | * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) | ||
439 | * or -ENOMEM on error. If an error is returned, device will switch to | ||
440 | * XenbusStateClosing and the error message will be saved in XenStore. | ||
441 | */ | ||
442 | int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, | ||
443 | grant_handle_t *handle, void *vaddr) | ||
444 | { | ||
445 | struct gnttab_map_grant_ref op = { | ||
446 | .host_addr = (unsigned long)vaddr, | ||
447 | .flags = GNTMAP_host_map, | ||
448 | .ref = gnt_ref, | ||
449 | .dom = dev->otherend_id, | ||
450 | }; | ||
451 | |||
452 | if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) | ||
453 | BUG(); | ||
454 | |||
455 | if (op.status != GNTST_okay) { | ||
456 | xenbus_dev_fatal(dev, op.status, | ||
457 | "mapping in shared page %d from domain %d", | ||
458 | gnt_ref, dev->otherend_id); | ||
459 | } else | ||
460 | *handle = op.handle; | ||
461 | |||
462 | return op.status; | ||
463 | } | ||
464 | EXPORT_SYMBOL_GPL(xenbus_map_ring); | ||
465 | |||
466 | |||
467 | /** | ||
468 | * xenbus_unmap_ring_vfree | ||
469 | * @dev: xenbus device | ||
470 | * @vaddr: addr to unmap | ||
471 | * | ||
472 | * Based on Rusty Russell's skeleton driver's unmap_page. | ||
473 | * Unmap a page of memory in this domain that was imported from another domain. | ||
474 | * Use xenbus_unmap_ring_vfree if you mapped in your memory with | ||
475 | * xenbus_map_ring_valloc (it will free the virtual address space). | ||
476 | * Returns 0 on success and returns GNTST_* on error | ||
477 | * (see xen/include/interface/grant_table.h). | ||
478 | */ | ||
479 | int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) | ||
480 | { | ||
481 | struct vm_struct *area; | ||
482 | struct gnttab_unmap_grant_ref op = { | ||
483 | .host_addr = (unsigned long)vaddr, | ||
484 | }; | ||
485 | |||
486 | /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr) | ||
487 | * method so that we don't have to muck with vmalloc internals here. | ||
488 | * We could force the user to hang on to their struct vm_struct from | ||
489 | * xenbus_map_ring_valloc, but these 6 lines considerably simplify | ||
490 | * this API. | ||
491 | */ | ||
492 | read_lock(&vmlist_lock); | ||
493 | for (area = vmlist; area != NULL; area = area->next) { | ||
494 | if (area->addr == vaddr) | ||
495 | break; | ||
496 | } | ||
497 | read_unlock(&vmlist_lock); | ||
498 | |||
499 | if (!area) { | ||
500 | xenbus_dev_error(dev, -ENOENT, | ||
501 | "can't find mapped virtual address %p", vaddr); | ||
502 | return GNTST_bad_virt_addr; | ||
503 | } | ||
504 | |||
505 | op.handle = (grant_handle_t)area->phys_addr; | ||
506 | |||
507 | if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) | ||
508 | BUG(); | ||
509 | |||
510 | if (op.status == GNTST_okay) | ||
511 | free_vm_area(area); | ||
512 | else | ||
513 | xenbus_dev_error(dev, op.status, | ||
514 | "unmapping page at handle %d error %d", | ||
515 | (int16_t)area->phys_addr, op.status); | ||
516 | |||
517 | return op.status; | ||
518 | } | ||
519 | EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); | ||
520 | |||
521 | |||
522 | /** | ||
523 | * xenbus_unmap_ring | ||
524 | * @dev: xenbus device | ||
525 | * @handle: grant handle | ||
526 | * @vaddr: addr to unmap | ||
527 | * | ||
528 | * Unmap a page of memory in this domain that was imported from another domain. | ||
529 | * Returns 0 on success and returns GNTST_* on error | ||
530 | * (see xen/include/interface/grant_table.h). | ||
531 | */ | ||
532 | int xenbus_unmap_ring(struct xenbus_device *dev, | ||
533 | grant_handle_t handle, void *vaddr) | ||
534 | { | ||
535 | struct gnttab_unmap_grant_ref op = { | ||
536 | .host_addr = (unsigned long)vaddr, | ||
537 | .handle = handle, | ||
538 | }; | ||
539 | |||
540 | if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) | ||
541 | BUG(); | ||
542 | |||
543 | if (op.status != GNTST_okay) | ||
544 | xenbus_dev_error(dev, op.status, | ||
545 | "unmapping page at handle %d error %d", | ||
546 | handle, op.status); | ||
547 | |||
548 | return op.status; | ||
549 | } | ||
550 | EXPORT_SYMBOL_GPL(xenbus_unmap_ring); | ||
551 | |||
552 | |||
553 | /** | ||
554 | * xenbus_read_driver_state | ||
555 | * @path: path for driver | ||
556 | * | ||
557 | * Return the state of the driver rooted at the given store path, or | ||
558 | * XenbusStateUnknown if no state can be read. | ||
559 | */ | ||
560 | enum xenbus_state xenbus_read_driver_state(const char *path) | ||
561 | { | ||
562 | enum xenbus_state result; | ||
563 | int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL); | ||
564 | if (err) | ||
565 | result = XenbusStateUnknown; | ||
566 | |||
567 | return result; | ||
568 | } | ||
569 | EXPORT_SYMBOL_GPL(xenbus_read_driver_state); | ||
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c new file mode 100644 index 000000000000..6efbe3f29ca5 --- /dev/null +++ b/drivers/xen/xenbus/xenbus_comms.c | |||
@@ -0,0 +1,233 @@ | |||
1 | /****************************************************************************** | ||
2 | * xenbus_comms.c | ||
3 | * | ||
4 | * Low level code to talks to Xen Store: ringbuffer and event channel. | ||
5 | * | ||
6 | * Copyright (C) 2005 Rusty Russell, IBM Corporation | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version 2 | ||
10 | * as published by the Free Software Foundation; or, when distributed | ||
11 | * separately from the Linux kernel or incorporated into other | ||
12 | * software packages, subject to the following license: | ||
13 | * | ||
14 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
15 | * of this source file (the "Software"), to deal in the Software without | ||
16 | * restriction, including without limitation the rights to use, copy, modify, | ||
17 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
18 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
19 | * the following conditions: | ||
20 | * | ||
21 | * The above copyright notice and this permission notice shall be included in | ||
22 | * all copies or substantial portions of the Software. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
25 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
26 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
27 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
28 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
29 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
30 | * IN THE SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #include <linux/wait.h> | ||
34 | #include <linux/interrupt.h> | ||
35 | #include <linux/sched.h> | ||
36 | #include <linux/err.h> | ||
37 | #include <xen/xenbus.h> | ||
38 | #include <asm/xen/hypervisor.h> | ||
39 | #include <xen/events.h> | ||
40 | #include <xen/page.h> | ||
41 | #include "xenbus_comms.h" | ||
42 | |||
43 | static int xenbus_irq; | ||
44 | |||
45 | static DECLARE_WORK(probe_work, xenbus_probe); | ||
46 | |||
47 | static DECLARE_WAIT_QUEUE_HEAD(xb_waitq); | ||
48 | |||
49 | static irqreturn_t wake_waiting(int irq, void *unused) | ||
50 | { | ||
51 | if (unlikely(xenstored_ready == 0)) { | ||
52 | xenstored_ready = 1; | ||
53 | schedule_work(&probe_work); | ||
54 | } | ||
55 | |||
56 | wake_up(&xb_waitq); | ||
57 | return IRQ_HANDLED; | ||
58 | } | ||
59 | |||
60 | static int check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod) | ||
61 | { | ||
62 | return ((prod - cons) <= XENSTORE_RING_SIZE); | ||
63 | } | ||
64 | |||
65 | static void *get_output_chunk(XENSTORE_RING_IDX cons, | ||
66 | XENSTORE_RING_IDX prod, | ||
67 | char *buf, uint32_t *len) | ||
68 | { | ||
69 | *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod); | ||
70 | if ((XENSTORE_RING_SIZE - (prod - cons)) < *len) | ||
71 | *len = XENSTORE_RING_SIZE - (prod - cons); | ||
72 | return buf + MASK_XENSTORE_IDX(prod); | ||
73 | } | ||
74 | |||
75 | static const void *get_input_chunk(XENSTORE_RING_IDX cons, | ||
76 | XENSTORE_RING_IDX prod, | ||
77 | const char *buf, uint32_t *len) | ||
78 | { | ||
79 | *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons); | ||
80 | if ((prod - cons) < *len) | ||
81 | *len = prod - cons; | ||
82 | return buf + MASK_XENSTORE_IDX(cons); | ||
83 | } | ||
84 | |||
85 | /** | ||
86 | * xb_write - low level write | ||
87 | * @data: buffer to send | ||
88 | * @len: length of buffer | ||
89 | * | ||
90 | * Returns 0 on success, error otherwise. | ||
91 | */ | ||
92 | int xb_write(const void *data, unsigned len) | ||
93 | { | ||
94 | struct xenstore_domain_interface *intf = xen_store_interface; | ||
95 | XENSTORE_RING_IDX cons, prod; | ||
96 | int rc; | ||
97 | |||
98 | while (len != 0) { | ||
99 | void *dst; | ||
100 | unsigned int avail; | ||
101 | |||
102 | rc = wait_event_interruptible( | ||
103 | xb_waitq, | ||
104 | (intf->req_prod - intf->req_cons) != | ||
105 | XENSTORE_RING_SIZE); | ||
106 | if (rc < 0) | ||
107 | return rc; | ||
108 | |||
109 | /* Read indexes, then verify. */ | ||
110 | cons = intf->req_cons; | ||
111 | prod = intf->req_prod; | ||
112 | if (!check_indexes(cons, prod)) { | ||
113 | intf->req_cons = intf->req_prod = 0; | ||
114 | return -EIO; | ||
115 | } | ||
116 | |||
117 | dst = get_output_chunk(cons, prod, intf->req, &avail); | ||
118 | if (avail == 0) | ||
119 | continue; | ||
120 | if (avail > len) | ||
121 | avail = len; | ||
122 | |||
123 | /* Must write data /after/ reading the consumer index. */ | ||
124 | mb(); | ||
125 | |||
126 | memcpy(dst, data, avail); | ||
127 | data += avail; | ||
128 | len -= avail; | ||
129 | |||
130 | /* Other side must not see new producer until data is there. */ | ||
131 | wmb(); | ||
132 | intf->req_prod += avail; | ||
133 | |||
134 | /* Implies mb(): other side will see the updated producer. */ | ||
135 | notify_remote_via_evtchn(xen_store_evtchn); | ||
136 | } | ||
137 | |||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | int xb_data_to_read(void) | ||
142 | { | ||
143 | struct xenstore_domain_interface *intf = xen_store_interface; | ||
144 | return (intf->rsp_cons != intf->rsp_prod); | ||
145 | } | ||
146 | |||
147 | int xb_wait_for_data_to_read(void) | ||
148 | { | ||
149 | return wait_event_interruptible(xb_waitq, xb_data_to_read()); | ||
150 | } | ||
151 | |||
152 | int xb_read(void *data, unsigned len) | ||
153 | { | ||
154 | struct xenstore_domain_interface *intf = xen_store_interface; | ||
155 | XENSTORE_RING_IDX cons, prod; | ||
156 | int rc; | ||
157 | |||
158 | while (len != 0) { | ||
159 | unsigned int avail; | ||
160 | const char *src; | ||
161 | |||
162 | rc = xb_wait_for_data_to_read(); | ||
163 | if (rc < 0) | ||
164 | return rc; | ||
165 | |||
166 | /* Read indexes, then verify. */ | ||
167 | cons = intf->rsp_cons; | ||
168 | prod = intf->rsp_prod; | ||
169 | if (!check_indexes(cons, prod)) { | ||
170 | intf->rsp_cons = intf->rsp_prod = 0; | ||
171 | return -EIO; | ||
172 | } | ||
173 | |||
174 | src = get_input_chunk(cons, prod, intf->rsp, &avail); | ||
175 | if (avail == 0) | ||
176 | continue; | ||
177 | if (avail > len) | ||
178 | avail = len; | ||
179 | |||
180 | /* Must read data /after/ reading the producer index. */ | ||
181 | rmb(); | ||
182 | |||
183 | memcpy(data, src, avail); | ||
184 | data += avail; | ||
185 | len -= avail; | ||
186 | |||
187 | /* Other side must not see free space until we've copied out */ | ||
188 | mb(); | ||
189 | intf->rsp_cons += avail; | ||
190 | |||
191 | pr_debug("Finished read of %i bytes (%i to go)\n", avail, len); | ||
192 | |||
193 | /* Implies mb(): other side will see the updated consumer. */ | ||
194 | notify_remote_via_evtchn(xen_store_evtchn); | ||
195 | } | ||
196 | |||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | /** | ||
201 | * xb_init_comms - Set up interrupt handler off store event channel. | ||
202 | */ | ||
203 | int xb_init_comms(void) | ||
204 | { | ||
205 | struct xenstore_domain_interface *intf = xen_store_interface; | ||
206 | int err; | ||
207 | |||
208 | if (intf->req_prod != intf->req_cons) | ||
209 | printk(KERN_ERR "XENBUS request ring is not quiescent " | ||
210 | "(%08x:%08x)!\n", intf->req_cons, intf->req_prod); | ||
211 | |||
212 | if (intf->rsp_prod != intf->rsp_cons) { | ||
213 | printk(KERN_WARNING "XENBUS response ring is not quiescent " | ||
214 | "(%08x:%08x): fixing up\n", | ||
215 | intf->rsp_cons, intf->rsp_prod); | ||
216 | intf->rsp_cons = intf->rsp_prod; | ||
217 | } | ||
218 | |||
219 | if (xenbus_irq) | ||
220 | unbind_from_irqhandler(xenbus_irq, &xb_waitq); | ||
221 | |||
222 | err = bind_evtchn_to_irqhandler( | ||
223 | xen_store_evtchn, wake_waiting, | ||
224 | 0, "xenbus", &xb_waitq); | ||
225 | if (err <= 0) { | ||
226 | printk(KERN_ERR "XENBUS request irq failed %i\n", err); | ||
227 | return err; | ||
228 | } | ||
229 | |||
230 | xenbus_irq = err; | ||
231 | |||
232 | return 0; | ||
233 | } | ||
diff --git a/drivers/xen/xenbus/xenbus_comms.h b/drivers/xen/xenbus/xenbus_comms.h new file mode 100644 index 000000000000..c21db7513736 --- /dev/null +++ b/drivers/xen/xenbus/xenbus_comms.h | |||
@@ -0,0 +1,46 @@ | |||
1 | /* | ||
2 | * Private include for xenbus communications. | ||
3 | * | ||
4 | * Copyright (C) 2005 Rusty Russell, IBM Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License version 2 | ||
8 | * as published by the Free Software Foundation; or, when distributed | ||
9 | * separately from the Linux kernel or incorporated into other | ||
10 | * software packages, subject to the following license: | ||
11 | * | ||
12 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
13 | * of this source file (the "Software"), to deal in the Software without | ||
14 | * restriction, including without limitation the rights to use, copy, modify, | ||
15 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
16 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
17 | * the following conditions: | ||
18 | * | ||
19 | * The above copyright notice and this permission notice shall be included in | ||
20 | * all copies or substantial portions of the Software. | ||
21 | * | ||
22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
23 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
24 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
25 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
26 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
27 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
28 | * IN THE SOFTWARE. | ||
29 | */ | ||
30 | |||
31 | #ifndef _XENBUS_COMMS_H | ||
32 | #define _XENBUS_COMMS_H | ||
33 | |||
34 | int xs_init(void); | ||
35 | int xb_init_comms(void); | ||
36 | |||
37 | /* Low level routines. */ | ||
38 | int xb_write(const void *data, unsigned len); | ||
39 | int xb_read(void *data, unsigned len); | ||
40 | int xb_data_to_read(void); | ||
41 | int xb_wait_for_data_to_read(void); | ||
42 | int xs_input_avail(void); | ||
43 | extern struct xenstore_domain_interface *xen_store_interface; | ||
44 | extern int xen_store_evtchn; | ||
45 | |||
46 | #endif /* _XENBUS_COMMS_H */ | ||
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c new file mode 100644 index 000000000000..0b769f7c4a48 --- /dev/null +++ b/drivers/xen/xenbus/xenbus_probe.c | |||
@@ -0,0 +1,935 @@ | |||
1 | /****************************************************************************** | ||
2 | * Talks to Xen Store to figure out what devices we have. | ||
3 | * | ||
4 | * Copyright (C) 2005 Rusty Russell, IBM Corporation | ||
5 | * Copyright (C) 2005 Mike Wray, Hewlett-Packard | ||
6 | * Copyright (C) 2005, 2006 XenSource Ltd | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version 2 | ||
10 | * as published by the Free Software Foundation; or, when distributed | ||
11 | * separately from the Linux kernel or incorporated into other | ||
12 | * software packages, subject to the following license: | ||
13 | * | ||
14 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
15 | * of this source file (the "Software"), to deal in the Software without | ||
16 | * restriction, including without limitation the rights to use, copy, modify, | ||
17 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
18 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
19 | * the following conditions: | ||
20 | * | ||
21 | * The above copyright notice and this permission notice shall be included in | ||
22 | * all copies or substantial portions of the Software. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
25 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
26 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
27 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
28 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
29 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
30 | * IN THE SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #define DPRINTK(fmt, args...) \ | ||
34 | pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ | ||
35 | __func__, __LINE__, ##args) | ||
36 | |||
37 | #include <linux/kernel.h> | ||
38 | #include <linux/err.h> | ||
39 | #include <linux/string.h> | ||
40 | #include <linux/ctype.h> | ||
41 | #include <linux/fcntl.h> | ||
42 | #include <linux/mm.h> | ||
43 | #include <linux/notifier.h> | ||
44 | #include <linux/kthread.h> | ||
45 | #include <linux/mutex.h> | ||
46 | #include <linux/io.h> | ||
47 | |||
48 | #include <asm/page.h> | ||
49 | #include <asm/pgtable.h> | ||
50 | #include <asm/xen/hypervisor.h> | ||
51 | #include <xen/xenbus.h> | ||
52 | #include <xen/events.h> | ||
53 | #include <xen/page.h> | ||
54 | |||
55 | #include "xenbus_comms.h" | ||
56 | #include "xenbus_probe.h" | ||
57 | |||
58 | int xen_store_evtchn; | ||
59 | struct xenstore_domain_interface *xen_store_interface; | ||
60 | static unsigned long xen_store_mfn; | ||
61 | |||
62 | static BLOCKING_NOTIFIER_HEAD(xenstore_chain); | ||
63 | |||
64 | static void wait_for_devices(struct xenbus_driver *xendrv); | ||
65 | |||
66 | static int xenbus_probe_frontend(const char *type, const char *name); | ||
67 | |||
68 | static void xenbus_dev_shutdown(struct device *_dev); | ||
69 | |||
70 | /* If something in array of ids matches this device, return it. */ | ||
71 | static const struct xenbus_device_id * | ||
72 | match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev) | ||
73 | { | ||
74 | for (; *arr->devicetype != '\0'; arr++) { | ||
75 | if (!strcmp(arr->devicetype, dev->devicetype)) | ||
76 | return arr; | ||
77 | } | ||
78 | return NULL; | ||
79 | } | ||
80 | |||
81 | int xenbus_match(struct device *_dev, struct device_driver *_drv) | ||
82 | { | ||
83 | struct xenbus_driver *drv = to_xenbus_driver(_drv); | ||
84 | |||
85 | if (!drv->ids) | ||
86 | return 0; | ||
87 | |||
88 | return match_device(drv->ids, to_xenbus_device(_dev)) != NULL; | ||
89 | } | ||
90 | |||
91 | /* device/<type>/<id> => <type>-<id> */ | ||
92 | static int frontend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename) | ||
93 | { | ||
94 | nodename = strchr(nodename, '/'); | ||
95 | if (!nodename || strlen(nodename + 1) >= BUS_ID_SIZE) { | ||
96 | printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename); | ||
97 | return -EINVAL; | ||
98 | } | ||
99 | |||
100 | strlcpy(bus_id, nodename + 1, BUS_ID_SIZE); | ||
101 | if (!strchr(bus_id, '/')) { | ||
102 | printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id); | ||
103 | return -EINVAL; | ||
104 | } | ||
105 | *strchr(bus_id, '/') = '-'; | ||
106 | return 0; | ||
107 | } | ||
108 | |||
109 | |||
110 | static void free_otherend_details(struct xenbus_device *dev) | ||
111 | { | ||
112 | kfree(dev->otherend); | ||
113 | dev->otherend = NULL; | ||
114 | } | ||
115 | |||
116 | |||
117 | static void free_otherend_watch(struct xenbus_device *dev) | ||
118 | { | ||
119 | if (dev->otherend_watch.node) { | ||
120 | unregister_xenbus_watch(&dev->otherend_watch); | ||
121 | kfree(dev->otherend_watch.node); | ||
122 | dev->otherend_watch.node = NULL; | ||
123 | } | ||
124 | } | ||
125 | |||
126 | |||
127 | int read_otherend_details(struct xenbus_device *xendev, | ||
128 | char *id_node, char *path_node) | ||
129 | { | ||
130 | int err = xenbus_gather(XBT_NIL, xendev->nodename, | ||
131 | id_node, "%i", &xendev->otherend_id, | ||
132 | path_node, NULL, &xendev->otherend, | ||
133 | NULL); | ||
134 | if (err) { | ||
135 | xenbus_dev_fatal(xendev, err, | ||
136 | "reading other end details from %s", | ||
137 | xendev->nodename); | ||
138 | return err; | ||
139 | } | ||
140 | if (strlen(xendev->otherend) == 0 || | ||
141 | !xenbus_exists(XBT_NIL, xendev->otherend, "")) { | ||
142 | xenbus_dev_fatal(xendev, -ENOENT, | ||
143 | "unable to read other end from %s. " | ||
144 | "missing or inaccessible.", | ||
145 | xendev->nodename); | ||
146 | free_otherend_details(xendev); | ||
147 | return -ENOENT; | ||
148 | } | ||
149 | |||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | |||
154 | static int read_backend_details(struct xenbus_device *xendev) | ||
155 | { | ||
156 | return read_otherend_details(xendev, "backend-id", "backend"); | ||
157 | } | ||
158 | |||
159 | |||
160 | /* Bus type for frontend drivers. */ | ||
161 | static struct xen_bus_type xenbus_frontend = { | ||
162 | .root = "device", | ||
163 | .levels = 2, /* device/type/<id> */ | ||
164 | .get_bus_id = frontend_bus_id, | ||
165 | .probe = xenbus_probe_frontend, | ||
166 | .bus = { | ||
167 | .name = "xen", | ||
168 | .match = xenbus_match, | ||
169 | .probe = xenbus_dev_probe, | ||
170 | .remove = xenbus_dev_remove, | ||
171 | .shutdown = xenbus_dev_shutdown, | ||
172 | }, | ||
173 | }; | ||
174 | |||
175 | static void otherend_changed(struct xenbus_watch *watch, | ||
176 | const char **vec, unsigned int len) | ||
177 | { | ||
178 | struct xenbus_device *dev = | ||
179 | container_of(watch, struct xenbus_device, otherend_watch); | ||
180 | struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); | ||
181 | enum xenbus_state state; | ||
182 | |||
183 | /* Protect us against watches firing on old details when the otherend | ||
184 | details change, say immediately after a resume. */ | ||
185 | if (!dev->otherend || | ||
186 | strncmp(dev->otherend, vec[XS_WATCH_PATH], | ||
187 | strlen(dev->otherend))) { | ||
188 | dev_dbg(&dev->dev, "Ignoring watch at %s", vec[XS_WATCH_PATH]); | ||
189 | return; | ||
190 | } | ||
191 | |||
192 | state = xenbus_read_driver_state(dev->otherend); | ||
193 | |||
194 | dev_dbg(&dev->dev, "state is %d, (%s), %s, %s", | ||
195 | state, xenbus_strstate(state), dev->otherend_watch.node, | ||
196 | vec[XS_WATCH_PATH]); | ||
197 | |||
198 | /* | ||
199 | * Ignore xenbus transitions during shutdown. This prevents us doing | ||
200 | * work that can fail e.g., when the rootfs is gone. | ||
201 | */ | ||
202 | if (system_state > SYSTEM_RUNNING) { | ||
203 | struct xen_bus_type *bus = bus; | ||
204 | bus = container_of(dev->dev.bus, struct xen_bus_type, bus); | ||
205 | /* If we're frontend, drive the state machine to Closed. */ | ||
206 | /* This should cause the backend to release our resources. */ | ||
207 | if ((bus == &xenbus_frontend) && (state == XenbusStateClosing)) | ||
208 | xenbus_frontend_closed(dev); | ||
209 | return; | ||
210 | } | ||
211 | |||
212 | if (drv->otherend_changed) | ||
213 | drv->otherend_changed(dev, state); | ||
214 | } | ||
215 | |||
216 | |||
217 | static int talk_to_otherend(struct xenbus_device *dev) | ||
218 | { | ||
219 | struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver); | ||
220 | |||
221 | free_otherend_watch(dev); | ||
222 | free_otherend_details(dev); | ||
223 | |||
224 | return drv->read_otherend_details(dev); | ||
225 | } | ||
226 | |||
227 | |||
228 | static int watch_otherend(struct xenbus_device *dev) | ||
229 | { | ||
230 | return xenbus_watch_pathfmt(dev, &dev->otherend_watch, otherend_changed, | ||
231 | "%s/%s", dev->otherend, "state"); | ||
232 | } | ||
233 | |||
234 | |||
235 | int xenbus_dev_probe(struct device *_dev) | ||
236 | { | ||
237 | struct xenbus_device *dev = to_xenbus_device(_dev); | ||
238 | struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); | ||
239 | const struct xenbus_device_id *id; | ||
240 | int err; | ||
241 | |||
242 | DPRINTK("%s", dev->nodename); | ||
243 | |||
244 | if (!drv->probe) { | ||
245 | err = -ENODEV; | ||
246 | goto fail; | ||
247 | } | ||
248 | |||
249 | id = match_device(drv->ids, dev); | ||
250 | if (!id) { | ||
251 | err = -ENODEV; | ||
252 | goto fail; | ||
253 | } | ||
254 | |||
255 | err = talk_to_otherend(dev); | ||
256 | if (err) { | ||
257 | dev_warn(&dev->dev, "talk_to_otherend on %s failed.\n", | ||
258 | dev->nodename); | ||
259 | return err; | ||
260 | } | ||
261 | |||
262 | err = drv->probe(dev, id); | ||
263 | if (err) | ||
264 | goto fail; | ||
265 | |||
266 | err = watch_otherend(dev); | ||
267 | if (err) { | ||
268 | dev_warn(&dev->dev, "watch_otherend on %s failed.\n", | ||
269 | dev->nodename); | ||
270 | return err; | ||
271 | } | ||
272 | |||
273 | return 0; | ||
274 | fail: | ||
275 | xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename); | ||
276 | xenbus_switch_state(dev, XenbusStateClosed); | ||
277 | return -ENODEV; | ||
278 | } | ||
279 | |||
280 | int xenbus_dev_remove(struct device *_dev) | ||
281 | { | ||
282 | struct xenbus_device *dev = to_xenbus_device(_dev); | ||
283 | struct xenbus_driver *drv = to_xenbus_driver(_dev->driver); | ||
284 | |||
285 | DPRINTK("%s", dev->nodename); | ||
286 | |||
287 | free_otherend_watch(dev); | ||
288 | free_otherend_details(dev); | ||
289 | |||
290 | if (drv->remove) | ||
291 | drv->remove(dev); | ||
292 | |||
293 | xenbus_switch_state(dev, XenbusStateClosed); | ||
294 | return 0; | ||
295 | } | ||
296 | |||
297 | static void xenbus_dev_shutdown(struct device *_dev) | ||
298 | { | ||
299 | struct xenbus_device *dev = to_xenbus_device(_dev); | ||
300 | unsigned long timeout = 5*HZ; | ||
301 | |||
302 | DPRINTK("%s", dev->nodename); | ||
303 | |||
304 | get_device(&dev->dev); | ||
305 | if (dev->state != XenbusStateConnected) { | ||
306 | printk(KERN_INFO "%s: %s: %s != Connected, skipping\n", __func__, | ||
307 | dev->nodename, xenbus_strstate(dev->state)); | ||
308 | goto out; | ||
309 | } | ||
310 | xenbus_switch_state(dev, XenbusStateClosing); | ||
311 | timeout = wait_for_completion_timeout(&dev->down, timeout); | ||
312 | if (!timeout) | ||
313 | printk(KERN_INFO "%s: %s timeout closing device\n", | ||
314 | __func__, dev->nodename); | ||
315 | out: | ||
316 | put_device(&dev->dev); | ||
317 | } | ||
318 | |||
319 | int xenbus_register_driver_common(struct xenbus_driver *drv, | ||
320 | struct xen_bus_type *bus, | ||
321 | struct module *owner, | ||
322 | const char *mod_name) | ||
323 | { | ||
324 | drv->driver.name = drv->name; | ||
325 | drv->driver.bus = &bus->bus; | ||
326 | drv->driver.owner = owner; | ||
327 | drv->driver.mod_name = mod_name; | ||
328 | |||
329 | return driver_register(&drv->driver); | ||
330 | } | ||
331 | |||
332 | int __xenbus_register_frontend(struct xenbus_driver *drv, | ||
333 | struct module *owner, const char *mod_name) | ||
334 | { | ||
335 | int ret; | ||
336 | |||
337 | drv->read_otherend_details = read_backend_details; | ||
338 | |||
339 | ret = xenbus_register_driver_common(drv, &xenbus_frontend, | ||
340 | owner, mod_name); | ||
341 | if (ret) | ||
342 | return ret; | ||
343 | |||
344 | /* If this driver is loaded as a module wait for devices to attach. */ | ||
345 | wait_for_devices(drv); | ||
346 | |||
347 | return 0; | ||
348 | } | ||
349 | EXPORT_SYMBOL_GPL(__xenbus_register_frontend); | ||
350 | |||
351 | void xenbus_unregister_driver(struct xenbus_driver *drv) | ||
352 | { | ||
353 | driver_unregister(&drv->driver); | ||
354 | } | ||
355 | EXPORT_SYMBOL_GPL(xenbus_unregister_driver); | ||
356 | |||
357 | struct xb_find_info | ||
358 | { | ||
359 | struct xenbus_device *dev; | ||
360 | const char *nodename; | ||
361 | }; | ||
362 | |||
363 | static int cmp_dev(struct device *dev, void *data) | ||
364 | { | ||
365 | struct xenbus_device *xendev = to_xenbus_device(dev); | ||
366 | struct xb_find_info *info = data; | ||
367 | |||
368 | if (!strcmp(xendev->nodename, info->nodename)) { | ||
369 | info->dev = xendev; | ||
370 | get_device(dev); | ||
371 | return 1; | ||
372 | } | ||
373 | return 0; | ||
374 | } | ||
375 | |||
376 | struct xenbus_device *xenbus_device_find(const char *nodename, | ||
377 | struct bus_type *bus) | ||
378 | { | ||
379 | struct xb_find_info info = { .dev = NULL, .nodename = nodename }; | ||
380 | |||
381 | bus_for_each_dev(bus, NULL, &info, cmp_dev); | ||
382 | return info.dev; | ||
383 | } | ||
384 | |||
385 | static int cleanup_dev(struct device *dev, void *data) | ||
386 | { | ||
387 | struct xenbus_device *xendev = to_xenbus_device(dev); | ||
388 | struct xb_find_info *info = data; | ||
389 | int len = strlen(info->nodename); | ||
390 | |||
391 | DPRINTK("%s", info->nodename); | ||
392 | |||
393 | /* Match the info->nodename path, or any subdirectory of that path. */ | ||
394 | if (strncmp(xendev->nodename, info->nodename, len)) | ||
395 | return 0; | ||
396 | |||
397 | /* If the node name is longer, ensure it really is a subdirectory. */ | ||
398 | if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/')) | ||
399 | return 0; | ||
400 | |||
401 | info->dev = xendev; | ||
402 | get_device(dev); | ||
403 | return 1; | ||
404 | } | ||
405 | |||
406 | static void xenbus_cleanup_devices(const char *path, struct bus_type *bus) | ||
407 | { | ||
408 | struct xb_find_info info = { .nodename = path }; | ||
409 | |||
410 | do { | ||
411 | info.dev = NULL; | ||
412 | bus_for_each_dev(bus, NULL, &info, cleanup_dev); | ||
413 | if (info.dev) { | ||
414 | device_unregister(&info.dev->dev); | ||
415 | put_device(&info.dev->dev); | ||
416 | } | ||
417 | } while (info.dev); | ||
418 | } | ||
419 | |||
420 | static void xenbus_dev_release(struct device *dev) | ||
421 | { | ||
422 | if (dev) | ||
423 | kfree(to_xenbus_device(dev)); | ||
424 | } | ||
425 | |||
426 | static ssize_t xendev_show_nodename(struct device *dev, | ||
427 | struct device_attribute *attr, char *buf) | ||
428 | { | ||
429 | return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename); | ||
430 | } | ||
431 | DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL); | ||
432 | |||
433 | static ssize_t xendev_show_devtype(struct device *dev, | ||
434 | struct device_attribute *attr, char *buf) | ||
435 | { | ||
436 | return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype); | ||
437 | } | ||
438 | DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL); | ||
439 | |||
440 | |||
441 | int xenbus_probe_node(struct xen_bus_type *bus, | ||
442 | const char *type, | ||
443 | const char *nodename) | ||
444 | { | ||
445 | int err; | ||
446 | struct xenbus_device *xendev; | ||
447 | size_t stringlen; | ||
448 | char *tmpstring; | ||
449 | |||
450 | enum xenbus_state state = xenbus_read_driver_state(nodename); | ||
451 | |||
452 | if (state != XenbusStateInitialising) { | ||
453 | /* Device is not new, so ignore it. This can happen if a | ||
454 | device is going away after switching to Closed. */ | ||
455 | return 0; | ||
456 | } | ||
457 | |||
458 | stringlen = strlen(nodename) + 1 + strlen(type) + 1; | ||
459 | xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL); | ||
460 | if (!xendev) | ||
461 | return -ENOMEM; | ||
462 | |||
463 | xendev->state = XenbusStateInitialising; | ||
464 | |||
465 | /* Copy the strings into the extra space. */ | ||
466 | |||
467 | tmpstring = (char *)(xendev + 1); | ||
468 | strcpy(tmpstring, nodename); | ||
469 | xendev->nodename = tmpstring; | ||
470 | |||
471 | tmpstring += strlen(tmpstring) + 1; | ||
472 | strcpy(tmpstring, type); | ||
473 | xendev->devicetype = tmpstring; | ||
474 | init_completion(&xendev->down); | ||
475 | |||
476 | xendev->dev.bus = &bus->bus; | ||
477 | xendev->dev.release = xenbus_dev_release; | ||
478 | |||
479 | err = bus->get_bus_id(xendev->dev.bus_id, xendev->nodename); | ||
480 | if (err) | ||
481 | goto fail; | ||
482 | |||
483 | /* Register with generic device framework. */ | ||
484 | err = device_register(&xendev->dev); | ||
485 | if (err) | ||
486 | goto fail; | ||
487 | |||
488 | err = device_create_file(&xendev->dev, &dev_attr_nodename); | ||
489 | if (err) | ||
490 | goto fail_unregister; | ||
491 | |||
492 | err = device_create_file(&xendev->dev, &dev_attr_devtype); | ||
493 | if (err) | ||
494 | goto fail_remove_file; | ||
495 | |||
496 | return 0; | ||
497 | fail_remove_file: | ||
498 | device_remove_file(&xendev->dev, &dev_attr_nodename); | ||
499 | fail_unregister: | ||
500 | device_unregister(&xendev->dev); | ||
501 | fail: | ||
502 | kfree(xendev); | ||
503 | return err; | ||
504 | } | ||
505 | |||
506 | /* device/<typename>/<name> */ | ||
507 | static int xenbus_probe_frontend(const char *type, const char *name) | ||
508 | { | ||
509 | char *nodename; | ||
510 | int err; | ||
511 | |||
512 | nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", | ||
513 | xenbus_frontend.root, type, name); | ||
514 | if (!nodename) | ||
515 | return -ENOMEM; | ||
516 | |||
517 | DPRINTK("%s", nodename); | ||
518 | |||
519 | err = xenbus_probe_node(&xenbus_frontend, type, nodename); | ||
520 | kfree(nodename); | ||
521 | return err; | ||
522 | } | ||
523 | |||
524 | static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type) | ||
525 | { | ||
526 | int err = 0; | ||
527 | char **dir; | ||
528 | unsigned int dir_n = 0; | ||
529 | int i; | ||
530 | |||
531 | dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n); | ||
532 | if (IS_ERR(dir)) | ||
533 | return PTR_ERR(dir); | ||
534 | |||
535 | for (i = 0; i < dir_n; i++) { | ||
536 | err = bus->probe(type, dir[i]); | ||
537 | if (err) | ||
538 | break; | ||
539 | } | ||
540 | kfree(dir); | ||
541 | return err; | ||
542 | } | ||
543 | |||
544 | int xenbus_probe_devices(struct xen_bus_type *bus) | ||
545 | { | ||
546 | int err = 0; | ||
547 | char **dir; | ||
548 | unsigned int i, dir_n; | ||
549 | |||
550 | dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n); | ||
551 | if (IS_ERR(dir)) | ||
552 | return PTR_ERR(dir); | ||
553 | |||
554 | for (i = 0; i < dir_n; i++) { | ||
555 | err = xenbus_probe_device_type(bus, dir[i]); | ||
556 | if (err) | ||
557 | break; | ||
558 | } | ||
559 | kfree(dir); | ||
560 | return err; | ||
561 | } | ||
562 | |||
563 | static unsigned int char_count(const char *str, char c) | ||
564 | { | ||
565 | unsigned int i, ret = 0; | ||
566 | |||
567 | for (i = 0; str[i]; i++) | ||
568 | if (str[i] == c) | ||
569 | ret++; | ||
570 | return ret; | ||
571 | } | ||
572 | |||
573 | static int strsep_len(const char *str, char c, unsigned int len) | ||
574 | { | ||
575 | unsigned int i; | ||
576 | |||
577 | for (i = 0; str[i]; i++) | ||
578 | if (str[i] == c) { | ||
579 | if (len == 0) | ||
580 | return i; | ||
581 | len--; | ||
582 | } | ||
583 | return (len == 0) ? i : -ERANGE; | ||
584 | } | ||
585 | |||
586 | void xenbus_dev_changed(const char *node, struct xen_bus_type *bus) | ||
587 | { | ||
588 | int exists, rootlen; | ||
589 | struct xenbus_device *dev; | ||
590 | char type[BUS_ID_SIZE]; | ||
591 | const char *p, *root; | ||
592 | |||
593 | if (char_count(node, '/') < 2) | ||
594 | return; | ||
595 | |||
596 | exists = xenbus_exists(XBT_NIL, node, ""); | ||
597 | if (!exists) { | ||
598 | xenbus_cleanup_devices(node, &bus->bus); | ||
599 | return; | ||
600 | } | ||
601 | |||
602 | /* backend/<type>/... or device/<type>/... */ | ||
603 | p = strchr(node, '/') + 1; | ||
604 | snprintf(type, BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p); | ||
605 | type[BUS_ID_SIZE-1] = '\0'; | ||
606 | |||
607 | rootlen = strsep_len(node, '/', bus->levels); | ||
608 | if (rootlen < 0) | ||
609 | return; | ||
610 | root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node); | ||
611 | if (!root) | ||
612 | return; | ||
613 | |||
614 | dev = xenbus_device_find(root, &bus->bus); | ||
615 | if (!dev) | ||
616 | xenbus_probe_node(bus, type, root); | ||
617 | else | ||
618 | put_device(&dev->dev); | ||
619 | |||
620 | kfree(root); | ||
621 | } | ||
622 | |||
623 | static void frontend_changed(struct xenbus_watch *watch, | ||
624 | const char **vec, unsigned int len) | ||
625 | { | ||
626 | DPRINTK(""); | ||
627 | |||
628 | xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend); | ||
629 | } | ||
630 | |||
631 | /* We watch for devices appearing and vanishing. */ | ||
632 | static struct xenbus_watch fe_watch = { | ||
633 | .node = "device", | ||
634 | .callback = frontend_changed, | ||
635 | }; | ||
636 | |||
637 | static int suspend_dev(struct device *dev, void *data) | ||
638 | { | ||
639 | int err = 0; | ||
640 | struct xenbus_driver *drv; | ||
641 | struct xenbus_device *xdev; | ||
642 | |||
643 | DPRINTK(""); | ||
644 | |||
645 | if (dev->driver == NULL) | ||
646 | return 0; | ||
647 | drv = to_xenbus_driver(dev->driver); | ||
648 | xdev = container_of(dev, struct xenbus_device, dev); | ||
649 | if (drv->suspend) | ||
650 | err = drv->suspend(xdev); | ||
651 | if (err) | ||
652 | printk(KERN_WARNING | ||
653 | "xenbus: suspend %s failed: %i\n", dev->bus_id, err); | ||
654 | return 0; | ||
655 | } | ||
656 | |||
657 | static int suspend_cancel_dev(struct device *dev, void *data) | ||
658 | { | ||
659 | int err = 0; | ||
660 | struct xenbus_driver *drv; | ||
661 | struct xenbus_device *xdev; | ||
662 | |||
663 | DPRINTK(""); | ||
664 | |||
665 | if (dev->driver == NULL) | ||
666 | return 0; | ||
667 | drv = to_xenbus_driver(dev->driver); | ||
668 | xdev = container_of(dev, struct xenbus_device, dev); | ||
669 | if (drv->suspend_cancel) | ||
670 | err = drv->suspend_cancel(xdev); | ||
671 | if (err) | ||
672 | printk(KERN_WARNING | ||
673 | "xenbus: suspend_cancel %s failed: %i\n", | ||
674 | dev->bus_id, err); | ||
675 | return 0; | ||
676 | } | ||
677 | |||
678 | static int resume_dev(struct device *dev, void *data) | ||
679 | { | ||
680 | int err; | ||
681 | struct xenbus_driver *drv; | ||
682 | struct xenbus_device *xdev; | ||
683 | |||
684 | DPRINTK(""); | ||
685 | |||
686 | if (dev->driver == NULL) | ||
687 | return 0; | ||
688 | |||
689 | drv = to_xenbus_driver(dev->driver); | ||
690 | xdev = container_of(dev, struct xenbus_device, dev); | ||
691 | |||
692 | err = talk_to_otherend(xdev); | ||
693 | if (err) { | ||
694 | printk(KERN_WARNING | ||
695 | "xenbus: resume (talk_to_otherend) %s failed: %i\n", | ||
696 | dev->bus_id, err); | ||
697 | return err; | ||
698 | } | ||
699 | |||
700 | xdev->state = XenbusStateInitialising; | ||
701 | |||
702 | if (drv->resume) { | ||
703 | err = drv->resume(xdev); | ||
704 | if (err) { | ||
705 | printk(KERN_WARNING | ||
706 | "xenbus: resume %s failed: %i\n", | ||
707 | dev->bus_id, err); | ||
708 | return err; | ||
709 | } | ||
710 | } | ||
711 | |||
712 | err = watch_otherend(xdev); | ||
713 | if (err) { | ||
714 | printk(KERN_WARNING | ||
715 | "xenbus_probe: resume (watch_otherend) %s failed: " | ||
716 | "%d.\n", dev->bus_id, err); | ||
717 | return err; | ||
718 | } | ||
719 | |||
720 | return 0; | ||
721 | } | ||
722 | |||
723 | void xenbus_suspend(void) | ||
724 | { | ||
725 | DPRINTK(""); | ||
726 | |||
727 | bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev); | ||
728 | xenbus_backend_suspend(suspend_dev); | ||
729 | xs_suspend(); | ||
730 | } | ||
731 | EXPORT_SYMBOL_GPL(xenbus_suspend); | ||
732 | |||
733 | void xenbus_resume(void) | ||
734 | { | ||
735 | xb_init_comms(); | ||
736 | xs_resume(); | ||
737 | bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev); | ||
738 | xenbus_backend_resume(resume_dev); | ||
739 | } | ||
740 | EXPORT_SYMBOL_GPL(xenbus_resume); | ||
741 | |||
742 | void xenbus_suspend_cancel(void) | ||
743 | { | ||
744 | xs_suspend_cancel(); | ||
745 | bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev); | ||
746 | xenbus_backend_resume(suspend_cancel_dev); | ||
747 | } | ||
748 | EXPORT_SYMBOL_GPL(xenbus_suspend_cancel); | ||
749 | |||
750 | /* A flag to determine if xenstored is 'ready' (i.e. has started) */ | ||
751 | int xenstored_ready = 0; | ||
752 | |||
753 | |||
754 | int register_xenstore_notifier(struct notifier_block *nb) | ||
755 | { | ||
756 | int ret = 0; | ||
757 | |||
758 | if (xenstored_ready > 0) | ||
759 | ret = nb->notifier_call(nb, 0, NULL); | ||
760 | else | ||
761 | blocking_notifier_chain_register(&xenstore_chain, nb); | ||
762 | |||
763 | return ret; | ||
764 | } | ||
765 | EXPORT_SYMBOL_GPL(register_xenstore_notifier); | ||
766 | |||
767 | void unregister_xenstore_notifier(struct notifier_block *nb) | ||
768 | { | ||
769 | blocking_notifier_chain_unregister(&xenstore_chain, nb); | ||
770 | } | ||
771 | EXPORT_SYMBOL_GPL(unregister_xenstore_notifier); | ||
772 | |||
773 | void xenbus_probe(struct work_struct *unused) | ||
774 | { | ||
775 | BUG_ON((xenstored_ready <= 0)); | ||
776 | |||
777 | /* Enumerate devices in xenstore and watch for changes. */ | ||
778 | xenbus_probe_devices(&xenbus_frontend); | ||
779 | register_xenbus_watch(&fe_watch); | ||
780 | xenbus_backend_probe_and_watch(); | ||
781 | |||
782 | /* Notify others that xenstore is up */ | ||
783 | blocking_notifier_call_chain(&xenstore_chain, 0, NULL); | ||
784 | } | ||
785 | |||
786 | static int __init xenbus_probe_init(void) | ||
787 | { | ||
788 | int err = 0; | ||
789 | |||
790 | DPRINTK(""); | ||
791 | |||
792 | err = -ENODEV; | ||
793 | if (!is_running_on_xen()) | ||
794 | goto out_error; | ||
795 | |||
796 | /* Register ourselves with the kernel bus subsystem */ | ||
797 | err = bus_register(&xenbus_frontend.bus); | ||
798 | if (err) | ||
799 | goto out_error; | ||
800 | |||
801 | err = xenbus_backend_bus_register(); | ||
802 | if (err) | ||
803 | goto out_unreg_front; | ||
804 | |||
805 | /* | ||
806 | * Domain0 doesn't have a store_evtchn or store_mfn yet. | ||
807 | */ | ||
808 | if (is_initial_xendomain()) { | ||
809 | /* dom0 not yet supported */ | ||
810 | } else { | ||
811 | xenstored_ready = 1; | ||
812 | xen_store_evtchn = xen_start_info->store_evtchn; | ||
813 | xen_store_mfn = xen_start_info->store_mfn; | ||
814 | } | ||
815 | xen_store_interface = mfn_to_virt(xen_store_mfn); | ||
816 | |||
817 | /* Initialize the interface to xenstore. */ | ||
818 | err = xs_init(); | ||
819 | if (err) { | ||
820 | printk(KERN_WARNING | ||
821 | "XENBUS: Error initializing xenstore comms: %i\n", err); | ||
822 | goto out_unreg_back; | ||
823 | } | ||
824 | |||
825 | if (!is_initial_xendomain()) | ||
826 | xenbus_probe(NULL); | ||
827 | |||
828 | return 0; | ||
829 | |||
830 | out_unreg_back: | ||
831 | xenbus_backend_bus_unregister(); | ||
832 | |||
833 | out_unreg_front: | ||
834 | bus_unregister(&xenbus_frontend.bus); | ||
835 | |||
836 | out_error: | ||
837 | return err; | ||
838 | } | ||
839 | |||
840 | postcore_initcall(xenbus_probe_init); | ||
841 | |||
842 | MODULE_LICENSE("GPL"); | ||
843 | |||
844 | static int is_disconnected_device(struct device *dev, void *data) | ||
845 | { | ||
846 | struct xenbus_device *xendev = to_xenbus_device(dev); | ||
847 | struct device_driver *drv = data; | ||
848 | |||
849 | /* | ||
850 | * A device with no driver will never connect. We care only about | ||
851 | * devices which should currently be in the process of connecting. | ||
852 | */ | ||
853 | if (!dev->driver) | ||
854 | return 0; | ||
855 | |||
856 | /* Is this search limited to a particular driver? */ | ||
857 | if (drv && (dev->driver != drv)) | ||
858 | return 0; | ||
859 | |||
860 | return (xendev->state != XenbusStateConnected); | ||
861 | } | ||
862 | |||
863 | static int exists_disconnected_device(struct device_driver *drv) | ||
864 | { | ||
865 | return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, | ||
866 | is_disconnected_device); | ||
867 | } | ||
868 | |||
869 | static int print_device_status(struct device *dev, void *data) | ||
870 | { | ||
871 | struct xenbus_device *xendev = to_xenbus_device(dev); | ||
872 | struct device_driver *drv = data; | ||
873 | |||
874 | /* Is this operation limited to a particular driver? */ | ||
875 | if (drv && (dev->driver != drv)) | ||
876 | return 0; | ||
877 | |||
878 | if (!dev->driver) { | ||
879 | /* Information only: is this too noisy? */ | ||
880 | printk(KERN_INFO "XENBUS: Device with no driver: %s\n", | ||
881 | xendev->nodename); | ||
882 | } else if (xendev->state != XenbusStateConnected) { | ||
883 | printk(KERN_WARNING "XENBUS: Timeout connecting " | ||
884 | "to device: %s (state %d)\n", | ||
885 | xendev->nodename, xendev->state); | ||
886 | } | ||
887 | |||
888 | return 0; | ||
889 | } | ||
890 | |||
891 | /* We only wait for device setup after most initcalls have run. */ | ||
892 | static int ready_to_wait_for_devices; | ||
893 | |||
894 | /* | ||
895 | * On a 10 second timeout, wait for all devices currently configured. We need | ||
896 | * to do this to guarantee that the filesystems and / or network devices | ||
897 | * needed for boot are available, before we can allow the boot to proceed. | ||
898 | * | ||
899 | * This needs to be on a late_initcall, to happen after the frontend device | ||
900 | * drivers have been initialised, but before the root fs is mounted. | ||
901 | * | ||
902 | * A possible improvement here would be to have the tools add a per-device | ||
903 | * flag to the store entry, indicating whether it is needed at boot time. | ||
904 | * This would allow people who knew what they were doing to accelerate their | ||
905 | * boot slightly, but of course needs tools or manual intervention to set up | ||
906 | * those flags correctly. | ||
907 | */ | ||
908 | static void wait_for_devices(struct xenbus_driver *xendrv) | ||
909 | { | ||
910 | unsigned long timeout = jiffies + 10*HZ; | ||
911 | struct device_driver *drv = xendrv ? &xendrv->driver : NULL; | ||
912 | |||
913 | if (!ready_to_wait_for_devices || !is_running_on_xen()) | ||
914 | return; | ||
915 | |||
916 | while (exists_disconnected_device(drv)) { | ||
917 | if (time_after(jiffies, timeout)) | ||
918 | break; | ||
919 | schedule_timeout_interruptible(HZ/10); | ||
920 | } | ||
921 | |||
922 | bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, | ||
923 | print_device_status); | ||
924 | } | ||
925 | |||
926 | #ifndef MODULE | ||
927 | static int __init boot_wait_for_devices(void) | ||
928 | { | ||
929 | ready_to_wait_for_devices = 1; | ||
930 | wait_for_devices(NULL); | ||
931 | return 0; | ||
932 | } | ||
933 | |||
934 | late_initcall(boot_wait_for_devices); | ||
935 | #endif | ||
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h new file mode 100644 index 000000000000..e09b19415a40 --- /dev/null +++ b/drivers/xen/xenbus/xenbus_probe.h | |||
@@ -0,0 +1,74 @@ | |||
1 | /****************************************************************************** | ||
2 | * xenbus_probe.h | ||
3 | * | ||
4 | * Talks to Xen Store to figure out what devices we have. | ||
5 | * | ||
6 | * Copyright (C) 2005 Rusty Russell, IBM Corporation | ||
7 | * Copyright (C) 2005 XenSource Ltd. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version 2 | ||
11 | * as published by the Free Software Foundation; or, when distributed | ||
12 | * separately from the Linux kernel or incorporated into other | ||
13 | * software packages, subject to the following license: | ||
14 | * | ||
15 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
16 | * of this source file (the "Software"), to deal in the Software without | ||
17 | * restriction, including without limitation the rights to use, copy, modify, | ||
18 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
19 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
20 | * the following conditions: | ||
21 | * | ||
22 | * The above copyright notice and this permission notice shall be included in | ||
23 | * all copies or substantial portions of the Software. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
26 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
27 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
28 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
29 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
30 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
31 | * IN THE SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #ifndef _XENBUS_PROBE_H | ||
35 | #define _XENBUS_PROBE_H | ||
36 | |||
37 | #ifdef CONFIG_XEN_BACKEND | ||
38 | extern void xenbus_backend_suspend(int (*fn)(struct device *, void *)); | ||
39 | extern void xenbus_backend_resume(int (*fn)(struct device *, void *)); | ||
40 | extern void xenbus_backend_probe_and_watch(void); | ||
41 | extern int xenbus_backend_bus_register(void); | ||
42 | extern void xenbus_backend_bus_unregister(void); | ||
43 | #else | ||
44 | static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {} | ||
45 | static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {} | ||
46 | static inline void xenbus_backend_probe_and_watch(void) {} | ||
47 | static inline int xenbus_backend_bus_register(void) { return 0; } | ||
48 | static inline void xenbus_backend_bus_unregister(void) {} | ||
49 | #endif | ||
50 | |||
51 | struct xen_bus_type | ||
52 | { | ||
53 | char *root; | ||
54 | unsigned int levels; | ||
55 | int (*get_bus_id)(char bus_id[BUS_ID_SIZE], const char *nodename); | ||
56 | int (*probe)(const char *type, const char *dir); | ||
57 | struct bus_type bus; | ||
58 | }; | ||
59 | |||
60 | extern int xenbus_match(struct device *_dev, struct device_driver *_drv); | ||
61 | extern int xenbus_dev_probe(struct device *_dev); | ||
62 | extern int xenbus_dev_remove(struct device *_dev); | ||
63 | extern int xenbus_register_driver_common(struct xenbus_driver *drv, | ||
64 | struct xen_bus_type *bus, | ||
65 | struct module *owner, | ||
66 | const char *mod_name); | ||
67 | extern int xenbus_probe_node(struct xen_bus_type *bus, | ||
68 | const char *type, | ||
69 | const char *nodename); | ||
70 | extern int xenbus_probe_devices(struct xen_bus_type *bus); | ||
71 | |||
72 | extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus); | ||
73 | |||
74 | #endif | ||
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c new file mode 100644 index 000000000000..9e943fbce81b --- /dev/null +++ b/drivers/xen/xenbus/xenbus_xs.c | |||
@@ -0,0 +1,861 @@ | |||
1 | /****************************************************************************** | ||
2 | * xenbus_xs.c | ||
3 | * | ||
4 | * This is the kernel equivalent of the "xs" library. We don't need everything | ||
5 | * and we use xenbus_comms for communication. | ||
6 | * | ||
7 | * Copyright (C) 2005 Rusty Russell, IBM Corporation | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License version 2 | ||
11 | * as published by the Free Software Foundation; or, when distributed | ||
12 | * separately from the Linux kernel or incorporated into other | ||
13 | * software packages, subject to the following license: | ||
14 | * | ||
15 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
16 | * of this source file (the "Software"), to deal in the Software without | ||
17 | * restriction, including without limitation the rights to use, copy, modify, | ||
18 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
19 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
20 | * the following conditions: | ||
21 | * | ||
22 | * The above copyright notice and this permission notice shall be included in | ||
23 | * all copies or substantial portions of the Software. | ||
24 | * | ||
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
26 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
27 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
28 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
29 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
30 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
31 | * IN THE SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #include <linux/unistd.h> | ||
35 | #include <linux/errno.h> | ||
36 | #include <linux/types.h> | ||
37 | #include <linux/uio.h> | ||
38 | #include <linux/kernel.h> | ||
39 | #include <linux/string.h> | ||
40 | #include <linux/err.h> | ||
41 | #include <linux/slab.h> | ||
42 | #include <linux/fcntl.h> | ||
43 | #include <linux/kthread.h> | ||
44 | #include <linux/rwsem.h> | ||
45 | #include <linux/module.h> | ||
46 | #include <linux/mutex.h> | ||
47 | #include <xen/xenbus.h> | ||
48 | #include "xenbus_comms.h" | ||
49 | |||
50 | struct xs_stored_msg { | ||
51 | struct list_head list; | ||
52 | |||
53 | struct xsd_sockmsg hdr; | ||
54 | |||
55 | union { | ||
56 | /* Queued replies. */ | ||
57 | struct { | ||
58 | char *body; | ||
59 | } reply; | ||
60 | |||
61 | /* Queued watch events. */ | ||
62 | struct { | ||
63 | struct xenbus_watch *handle; | ||
64 | char **vec; | ||
65 | unsigned int vec_size; | ||
66 | } watch; | ||
67 | } u; | ||
68 | }; | ||
69 | |||
70 | struct xs_handle { | ||
71 | /* A list of replies. Currently only one will ever be outstanding. */ | ||
72 | struct list_head reply_list; | ||
73 | spinlock_t reply_lock; | ||
74 | wait_queue_head_t reply_waitq; | ||
75 | |||
76 | /* | ||
77 | * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex. | ||
78 | * response_mutex is never taken simultaneously with the other three. | ||
79 | */ | ||
80 | |||
81 | /* One request at a time. */ | ||
82 | struct mutex request_mutex; | ||
83 | |||
84 | /* Protect xenbus reader thread against save/restore. */ | ||
85 | struct mutex response_mutex; | ||
86 | |||
87 | /* Protect transactions against save/restore. */ | ||
88 | struct rw_semaphore transaction_mutex; | ||
89 | |||
90 | /* Protect watch (de)register against save/restore. */ | ||
91 | struct rw_semaphore watch_mutex; | ||
92 | }; | ||
93 | |||
94 | static struct xs_handle xs_state; | ||
95 | |||
96 | /* List of registered watches, and a lock to protect it. */ | ||
97 | static LIST_HEAD(watches); | ||
98 | static DEFINE_SPINLOCK(watches_lock); | ||
99 | |||
100 | /* List of pending watch callback events, and a lock to protect it. */ | ||
101 | static LIST_HEAD(watch_events); | ||
102 | static DEFINE_SPINLOCK(watch_events_lock); | ||
103 | |||
104 | /* | ||
105 | * Details of the xenwatch callback kernel thread. The thread waits on the | ||
106 | * watch_events_waitq for work to do (queued on watch_events list). When it | ||
107 | * wakes up it acquires the xenwatch_mutex before reading the list and | ||
108 | * carrying out work. | ||
109 | */ | ||
110 | static pid_t xenwatch_pid; | ||
111 | static DEFINE_MUTEX(xenwatch_mutex); | ||
112 | static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq); | ||
113 | |||
114 | static int get_error(const char *errorstring) | ||
115 | { | ||
116 | unsigned int i; | ||
117 | |||
118 | for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) { | ||
119 | if (i == ARRAY_SIZE(xsd_errors) - 1) { | ||
120 | printk(KERN_WARNING | ||
121 | "XENBUS xen store gave: unknown error %s", | ||
122 | errorstring); | ||
123 | return EINVAL; | ||
124 | } | ||
125 | } | ||
126 | return xsd_errors[i].errnum; | ||
127 | } | ||
128 | |||
129 | static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len) | ||
130 | { | ||
131 | struct xs_stored_msg *msg; | ||
132 | char *body; | ||
133 | |||
134 | spin_lock(&xs_state.reply_lock); | ||
135 | |||
136 | while (list_empty(&xs_state.reply_list)) { | ||
137 | spin_unlock(&xs_state.reply_lock); | ||
138 | /* XXX FIXME: Avoid synchronous wait for response here. */ | ||
139 | wait_event(xs_state.reply_waitq, | ||
140 | !list_empty(&xs_state.reply_list)); | ||
141 | spin_lock(&xs_state.reply_lock); | ||
142 | } | ||
143 | |||
144 | msg = list_entry(xs_state.reply_list.next, | ||
145 | struct xs_stored_msg, list); | ||
146 | list_del(&msg->list); | ||
147 | |||
148 | spin_unlock(&xs_state.reply_lock); | ||
149 | |||
150 | *type = msg->hdr.type; | ||
151 | if (len) | ||
152 | *len = msg->hdr.len; | ||
153 | body = msg->u.reply.body; | ||
154 | |||
155 | kfree(msg); | ||
156 | |||
157 | return body; | ||
158 | } | ||
159 | |||
160 | void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) | ||
161 | { | ||
162 | void *ret; | ||
163 | struct xsd_sockmsg req_msg = *msg; | ||
164 | int err; | ||
165 | |||
166 | if (req_msg.type == XS_TRANSACTION_START) | ||
167 | down_read(&xs_state.transaction_mutex); | ||
168 | |||
169 | mutex_lock(&xs_state.request_mutex); | ||
170 | |||
171 | err = xb_write(msg, sizeof(*msg) + msg->len); | ||
172 | if (err) { | ||
173 | msg->type = XS_ERROR; | ||
174 | ret = ERR_PTR(err); | ||
175 | } else | ||
176 | ret = read_reply(&msg->type, &msg->len); | ||
177 | |||
178 | mutex_unlock(&xs_state.request_mutex); | ||
179 | |||
180 | if ((msg->type == XS_TRANSACTION_END) || | ||
181 | ((req_msg.type == XS_TRANSACTION_START) && | ||
182 | (msg->type == XS_ERROR))) | ||
183 | up_read(&xs_state.transaction_mutex); | ||
184 | |||
185 | return ret; | ||
186 | } | ||
187 | |||
188 | /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */ | ||
189 | static void *xs_talkv(struct xenbus_transaction t, | ||
190 | enum xsd_sockmsg_type type, | ||
191 | const struct kvec *iovec, | ||
192 | unsigned int num_vecs, | ||
193 | unsigned int *len) | ||
194 | { | ||
195 | struct xsd_sockmsg msg; | ||
196 | void *ret = NULL; | ||
197 | unsigned int i; | ||
198 | int err; | ||
199 | |||
200 | msg.tx_id = t.id; | ||
201 | msg.req_id = 0; | ||
202 | msg.type = type; | ||
203 | msg.len = 0; | ||
204 | for (i = 0; i < num_vecs; i++) | ||
205 | msg.len += iovec[i].iov_len; | ||
206 | |||
207 | mutex_lock(&xs_state.request_mutex); | ||
208 | |||
209 | err = xb_write(&msg, sizeof(msg)); | ||
210 | if (err) { | ||
211 | mutex_unlock(&xs_state.request_mutex); | ||
212 | return ERR_PTR(err); | ||
213 | } | ||
214 | |||
215 | for (i = 0; i < num_vecs; i++) { | ||
216 | err = xb_write(iovec[i].iov_base, iovec[i].iov_len); | ||
217 | if (err) { | ||
218 | mutex_unlock(&xs_state.request_mutex); | ||
219 | return ERR_PTR(err); | ||
220 | } | ||
221 | } | ||
222 | |||
223 | ret = read_reply(&msg.type, len); | ||
224 | |||
225 | mutex_unlock(&xs_state.request_mutex); | ||
226 | |||
227 | if (IS_ERR(ret)) | ||
228 | return ret; | ||
229 | |||
230 | if (msg.type == XS_ERROR) { | ||
231 | err = get_error(ret); | ||
232 | kfree(ret); | ||
233 | return ERR_PTR(-err); | ||
234 | } | ||
235 | |||
236 | if (msg.type != type) { | ||
237 | if (printk_ratelimit()) | ||
238 | printk(KERN_WARNING | ||
239 | "XENBUS unexpected type [%d], expected [%d]\n", | ||
240 | msg.type, type); | ||
241 | kfree(ret); | ||
242 | return ERR_PTR(-EINVAL); | ||
243 | } | ||
244 | return ret; | ||
245 | } | ||
246 | |||
247 | /* Simplified version of xs_talkv: single message. */ | ||
248 | static void *xs_single(struct xenbus_transaction t, | ||
249 | enum xsd_sockmsg_type type, | ||
250 | const char *string, | ||
251 | unsigned int *len) | ||
252 | { | ||
253 | struct kvec iovec; | ||
254 | |||
255 | iovec.iov_base = (void *)string; | ||
256 | iovec.iov_len = strlen(string) + 1; | ||
257 | return xs_talkv(t, type, &iovec, 1, len); | ||
258 | } | ||
259 | |||
260 | /* Many commands only need an ack, don't care what it says. */ | ||
261 | static int xs_error(char *reply) | ||
262 | { | ||
263 | if (IS_ERR(reply)) | ||
264 | return PTR_ERR(reply); | ||
265 | kfree(reply); | ||
266 | return 0; | ||
267 | } | ||
268 | |||
269 | static unsigned int count_strings(const char *strings, unsigned int len) | ||
270 | { | ||
271 | unsigned int num; | ||
272 | const char *p; | ||
273 | |||
274 | for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1) | ||
275 | num++; | ||
276 | |||
277 | return num; | ||
278 | } | ||
279 | |||
280 | /* Return the path to dir with /name appended. Buffer must be kfree()'ed. */ | ||
281 | static char *join(const char *dir, const char *name) | ||
282 | { | ||
283 | char *buffer; | ||
284 | |||
285 | if (strlen(name) == 0) | ||
286 | buffer = kasprintf(GFP_KERNEL, "%s", dir); | ||
287 | else | ||
288 | buffer = kasprintf(GFP_KERNEL, "%s/%s", dir, name); | ||
289 | return (!buffer) ? ERR_PTR(-ENOMEM) : buffer; | ||
290 | } | ||
291 | |||
292 | static char **split(char *strings, unsigned int len, unsigned int *num) | ||
293 | { | ||
294 | char *p, **ret; | ||
295 | |||
296 | /* Count the strings. */ | ||
297 | *num = count_strings(strings, len); | ||
298 | |||
299 | /* Transfer to one big alloc for easy freeing. */ | ||
300 | ret = kmalloc(*num * sizeof(char *) + len, GFP_KERNEL); | ||
301 | if (!ret) { | ||
302 | kfree(strings); | ||
303 | return ERR_PTR(-ENOMEM); | ||
304 | } | ||
305 | memcpy(&ret[*num], strings, len); | ||
306 | kfree(strings); | ||
307 | |||
308 | strings = (char *)&ret[*num]; | ||
309 | for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1) | ||
310 | ret[(*num)++] = p; | ||
311 | |||
312 | return ret; | ||
313 | } | ||
314 | |||
315 | char **xenbus_directory(struct xenbus_transaction t, | ||
316 | const char *dir, const char *node, unsigned int *num) | ||
317 | { | ||
318 | char *strings, *path; | ||
319 | unsigned int len; | ||
320 | |||
321 | path = join(dir, node); | ||
322 | if (IS_ERR(path)) | ||
323 | return (char **)path; | ||
324 | |||
325 | strings = xs_single(t, XS_DIRECTORY, path, &len); | ||
326 | kfree(path); | ||
327 | if (IS_ERR(strings)) | ||
328 | return (char **)strings; | ||
329 | |||
330 | return split(strings, len, num); | ||
331 | } | ||
332 | EXPORT_SYMBOL_GPL(xenbus_directory); | ||
333 | |||
334 | /* Check if a path exists. Return 1 if it does. */ | ||
335 | int xenbus_exists(struct xenbus_transaction t, | ||
336 | const char *dir, const char *node) | ||
337 | { | ||
338 | char **d; | ||
339 | int dir_n; | ||
340 | |||
341 | d = xenbus_directory(t, dir, node, &dir_n); | ||
342 | if (IS_ERR(d)) | ||
343 | return 0; | ||
344 | kfree(d); | ||
345 | return 1; | ||
346 | } | ||
347 | EXPORT_SYMBOL_GPL(xenbus_exists); | ||
348 | |||
349 | /* Get the value of a single file. | ||
350 | * Returns a kmalloced value: call free() on it after use. | ||
351 | * len indicates length in bytes. | ||
352 | */ | ||
353 | void *xenbus_read(struct xenbus_transaction t, | ||
354 | const char *dir, const char *node, unsigned int *len) | ||
355 | { | ||
356 | char *path; | ||
357 | void *ret; | ||
358 | |||
359 | path = join(dir, node); | ||
360 | if (IS_ERR(path)) | ||
361 | return (void *)path; | ||
362 | |||
363 | ret = xs_single(t, XS_READ, path, len); | ||
364 | kfree(path); | ||
365 | return ret; | ||
366 | } | ||
367 | EXPORT_SYMBOL_GPL(xenbus_read); | ||
368 | |||
369 | /* Write the value of a single file. | ||
370 | * Returns -err on failure. | ||
371 | */ | ||
372 | int xenbus_write(struct xenbus_transaction t, | ||
373 | const char *dir, const char *node, const char *string) | ||
374 | { | ||
375 | const char *path; | ||
376 | struct kvec iovec[2]; | ||
377 | int ret; | ||
378 | |||
379 | path = join(dir, node); | ||
380 | if (IS_ERR(path)) | ||
381 | return PTR_ERR(path); | ||
382 | |||
383 | iovec[0].iov_base = (void *)path; | ||
384 | iovec[0].iov_len = strlen(path) + 1; | ||
385 | iovec[1].iov_base = (void *)string; | ||
386 | iovec[1].iov_len = strlen(string); | ||
387 | |||
388 | ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL)); | ||
389 | kfree(path); | ||
390 | return ret; | ||
391 | } | ||
392 | EXPORT_SYMBOL_GPL(xenbus_write); | ||
393 | |||
394 | /* Create a new directory. */ | ||
395 | int xenbus_mkdir(struct xenbus_transaction t, | ||
396 | const char *dir, const char *node) | ||
397 | { | ||
398 | char *path; | ||
399 | int ret; | ||
400 | |||
401 | path = join(dir, node); | ||
402 | if (IS_ERR(path)) | ||
403 | return PTR_ERR(path); | ||
404 | |||
405 | ret = xs_error(xs_single(t, XS_MKDIR, path, NULL)); | ||
406 | kfree(path); | ||
407 | return ret; | ||
408 | } | ||
409 | EXPORT_SYMBOL_GPL(xenbus_mkdir); | ||
410 | |||
411 | /* Destroy a file or directory (directories must be empty). */ | ||
412 | int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node) | ||
413 | { | ||
414 | char *path; | ||
415 | int ret; | ||
416 | |||
417 | path = join(dir, node); | ||
418 | if (IS_ERR(path)) | ||
419 | return PTR_ERR(path); | ||
420 | |||
421 | ret = xs_error(xs_single(t, XS_RM, path, NULL)); | ||
422 | kfree(path); | ||
423 | return ret; | ||
424 | } | ||
425 | EXPORT_SYMBOL_GPL(xenbus_rm); | ||
426 | |||
427 | /* Start a transaction: changes by others will not be seen during this | ||
428 | * transaction, and changes will not be visible to others until end. | ||
429 | */ | ||
430 | int xenbus_transaction_start(struct xenbus_transaction *t) | ||
431 | { | ||
432 | char *id_str; | ||
433 | |||
434 | down_read(&xs_state.transaction_mutex); | ||
435 | |||
436 | id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL); | ||
437 | if (IS_ERR(id_str)) { | ||
438 | up_read(&xs_state.transaction_mutex); | ||
439 | return PTR_ERR(id_str); | ||
440 | } | ||
441 | |||
442 | t->id = simple_strtoul(id_str, NULL, 0); | ||
443 | kfree(id_str); | ||
444 | return 0; | ||
445 | } | ||
446 | EXPORT_SYMBOL_GPL(xenbus_transaction_start); | ||
447 | |||
448 | /* End a transaction. | ||
449 | * If abandon is true, transaction is discarded instead of committed. | ||
450 | */ | ||
451 | int xenbus_transaction_end(struct xenbus_transaction t, int abort) | ||
452 | { | ||
453 | char abortstr[2]; | ||
454 | int err; | ||
455 | |||
456 | if (abort) | ||
457 | strcpy(abortstr, "F"); | ||
458 | else | ||
459 | strcpy(abortstr, "T"); | ||
460 | |||
461 | err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL)); | ||
462 | |||
463 | up_read(&xs_state.transaction_mutex); | ||
464 | |||
465 | return err; | ||
466 | } | ||
467 | EXPORT_SYMBOL_GPL(xenbus_transaction_end); | ||
468 | |||
469 | /* Single read and scanf: returns -errno or num scanned. */ | ||
470 | int xenbus_scanf(struct xenbus_transaction t, | ||
471 | const char *dir, const char *node, const char *fmt, ...) | ||
472 | { | ||
473 | va_list ap; | ||
474 | int ret; | ||
475 | char *val; | ||
476 | |||
477 | val = xenbus_read(t, dir, node, NULL); | ||
478 | if (IS_ERR(val)) | ||
479 | return PTR_ERR(val); | ||
480 | |||
481 | va_start(ap, fmt); | ||
482 | ret = vsscanf(val, fmt, ap); | ||
483 | va_end(ap); | ||
484 | kfree(val); | ||
485 | /* Distinctive errno. */ | ||
486 | if (ret == 0) | ||
487 | return -ERANGE; | ||
488 | return ret; | ||
489 | } | ||
490 | EXPORT_SYMBOL_GPL(xenbus_scanf); | ||
491 | |||
492 | /* Single printf and write: returns -errno or 0. */ | ||
493 | int xenbus_printf(struct xenbus_transaction t, | ||
494 | const char *dir, const char *node, const char *fmt, ...) | ||
495 | { | ||
496 | va_list ap; | ||
497 | int ret; | ||
498 | #define PRINTF_BUFFER_SIZE 4096 | ||
499 | char *printf_buffer; | ||
500 | |||
501 | printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL); | ||
502 | if (printf_buffer == NULL) | ||
503 | return -ENOMEM; | ||
504 | |||
505 | va_start(ap, fmt); | ||
506 | ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap); | ||
507 | va_end(ap); | ||
508 | |||
509 | BUG_ON(ret > PRINTF_BUFFER_SIZE-1); | ||
510 | ret = xenbus_write(t, dir, node, printf_buffer); | ||
511 | |||
512 | kfree(printf_buffer); | ||
513 | |||
514 | return ret; | ||
515 | } | ||
516 | EXPORT_SYMBOL_GPL(xenbus_printf); | ||
517 | |||
518 | /* Takes tuples of names, scanf-style args, and void **, NULL terminated. */ | ||
519 | int xenbus_gather(struct xenbus_transaction t, const char *dir, ...) | ||
520 | { | ||
521 | va_list ap; | ||
522 | const char *name; | ||
523 | int ret = 0; | ||
524 | |||
525 | va_start(ap, dir); | ||
526 | while (ret == 0 && (name = va_arg(ap, char *)) != NULL) { | ||
527 | const char *fmt = va_arg(ap, char *); | ||
528 | void *result = va_arg(ap, void *); | ||
529 | char *p; | ||
530 | |||
531 | p = xenbus_read(t, dir, name, NULL); | ||
532 | if (IS_ERR(p)) { | ||
533 | ret = PTR_ERR(p); | ||
534 | break; | ||
535 | } | ||
536 | if (fmt) { | ||
537 | if (sscanf(p, fmt, result) == 0) | ||
538 | ret = -EINVAL; | ||
539 | kfree(p); | ||
540 | } else | ||
541 | *(char **)result = p; | ||
542 | } | ||
543 | va_end(ap); | ||
544 | return ret; | ||
545 | } | ||
546 | EXPORT_SYMBOL_GPL(xenbus_gather); | ||
547 | |||
548 | static int xs_watch(const char *path, const char *token) | ||
549 | { | ||
550 | struct kvec iov[2]; | ||
551 | |||
552 | iov[0].iov_base = (void *)path; | ||
553 | iov[0].iov_len = strlen(path) + 1; | ||
554 | iov[1].iov_base = (void *)token; | ||
555 | iov[1].iov_len = strlen(token) + 1; | ||
556 | |||
557 | return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov, | ||
558 | ARRAY_SIZE(iov), NULL)); | ||
559 | } | ||
560 | |||
561 | static int xs_unwatch(const char *path, const char *token) | ||
562 | { | ||
563 | struct kvec iov[2]; | ||
564 | |||
565 | iov[0].iov_base = (char *)path; | ||
566 | iov[0].iov_len = strlen(path) + 1; | ||
567 | iov[1].iov_base = (char *)token; | ||
568 | iov[1].iov_len = strlen(token) + 1; | ||
569 | |||
570 | return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov, | ||
571 | ARRAY_SIZE(iov), NULL)); | ||
572 | } | ||
573 | |||
574 | static struct xenbus_watch *find_watch(const char *token) | ||
575 | { | ||
576 | struct xenbus_watch *i, *cmp; | ||
577 | |||
578 | cmp = (void *)simple_strtoul(token, NULL, 16); | ||
579 | |||
580 | list_for_each_entry(i, &watches, list) | ||
581 | if (i == cmp) | ||
582 | return i; | ||
583 | |||
584 | return NULL; | ||
585 | } | ||
586 | |||
587 | /* Register callback to watch this node. */ | ||
588 | int register_xenbus_watch(struct xenbus_watch *watch) | ||
589 | { | ||
590 | /* Pointer in ascii is the token. */ | ||
591 | char token[sizeof(watch) * 2 + 1]; | ||
592 | int err; | ||
593 | |||
594 | sprintf(token, "%lX", (long)watch); | ||
595 | |||
596 | down_read(&xs_state.watch_mutex); | ||
597 | |||
598 | spin_lock(&watches_lock); | ||
599 | BUG_ON(find_watch(token)); | ||
600 | list_add(&watch->list, &watches); | ||
601 | spin_unlock(&watches_lock); | ||
602 | |||
603 | err = xs_watch(watch->node, token); | ||
604 | |||
605 | /* Ignore errors due to multiple registration. */ | ||
606 | if ((err != 0) && (err != -EEXIST)) { | ||
607 | spin_lock(&watches_lock); | ||
608 | list_del(&watch->list); | ||
609 | spin_unlock(&watches_lock); | ||
610 | } | ||
611 | |||
612 | up_read(&xs_state.watch_mutex); | ||
613 | |||
614 | return err; | ||
615 | } | ||
616 | EXPORT_SYMBOL_GPL(register_xenbus_watch); | ||
617 | |||
618 | void unregister_xenbus_watch(struct xenbus_watch *watch) | ||
619 | { | ||
620 | struct xs_stored_msg *msg, *tmp; | ||
621 | char token[sizeof(watch) * 2 + 1]; | ||
622 | int err; | ||
623 | |||
624 | sprintf(token, "%lX", (long)watch); | ||
625 | |||
626 | down_read(&xs_state.watch_mutex); | ||
627 | |||
628 | spin_lock(&watches_lock); | ||
629 | BUG_ON(!find_watch(token)); | ||
630 | list_del(&watch->list); | ||
631 | spin_unlock(&watches_lock); | ||
632 | |||
633 | err = xs_unwatch(watch->node, token); | ||
634 | if (err) | ||
635 | printk(KERN_WARNING | ||
636 | "XENBUS Failed to release watch %s: %i\n", | ||
637 | watch->node, err); | ||
638 | |||
639 | up_read(&xs_state.watch_mutex); | ||
640 | |||
641 | /* Make sure there are no callbacks running currently (unless | ||
642 | its us) */ | ||
643 | if (current->pid != xenwatch_pid) | ||
644 | mutex_lock(&xenwatch_mutex); | ||
645 | |||
646 | /* Cancel pending watch events. */ | ||
647 | spin_lock(&watch_events_lock); | ||
648 | list_for_each_entry_safe(msg, tmp, &watch_events, list) { | ||
649 | if (msg->u.watch.handle != watch) | ||
650 | continue; | ||
651 | list_del(&msg->list); | ||
652 | kfree(msg->u.watch.vec); | ||
653 | kfree(msg); | ||
654 | } | ||
655 | spin_unlock(&watch_events_lock); | ||
656 | |||
657 | if (current->pid != xenwatch_pid) | ||
658 | mutex_unlock(&xenwatch_mutex); | ||
659 | } | ||
660 | EXPORT_SYMBOL_GPL(unregister_xenbus_watch); | ||
661 | |||
662 | void xs_suspend(void) | ||
663 | { | ||
664 | down_write(&xs_state.transaction_mutex); | ||
665 | down_write(&xs_state.watch_mutex); | ||
666 | mutex_lock(&xs_state.request_mutex); | ||
667 | mutex_lock(&xs_state.response_mutex); | ||
668 | } | ||
669 | |||
670 | void xs_resume(void) | ||
671 | { | ||
672 | struct xenbus_watch *watch; | ||
673 | char token[sizeof(watch) * 2 + 1]; | ||
674 | |||
675 | mutex_unlock(&xs_state.response_mutex); | ||
676 | mutex_unlock(&xs_state.request_mutex); | ||
677 | up_write(&xs_state.transaction_mutex); | ||
678 | |||
679 | /* No need for watches_lock: the watch_mutex is sufficient. */ | ||
680 | list_for_each_entry(watch, &watches, list) { | ||
681 | sprintf(token, "%lX", (long)watch); | ||
682 | xs_watch(watch->node, token); | ||
683 | } | ||
684 | |||
685 | up_write(&xs_state.watch_mutex); | ||
686 | } | ||
687 | |||
688 | void xs_suspend_cancel(void) | ||
689 | { | ||
690 | mutex_unlock(&xs_state.response_mutex); | ||
691 | mutex_unlock(&xs_state.request_mutex); | ||
692 | up_write(&xs_state.watch_mutex); | ||
693 | up_write(&xs_state.transaction_mutex); | ||
694 | } | ||
695 | |||
696 | static int xenwatch_thread(void *unused) | ||
697 | { | ||
698 | struct list_head *ent; | ||
699 | struct xs_stored_msg *msg; | ||
700 | |||
701 | for (;;) { | ||
702 | wait_event_interruptible(watch_events_waitq, | ||
703 | !list_empty(&watch_events)); | ||
704 | |||
705 | if (kthread_should_stop()) | ||
706 | break; | ||
707 | |||
708 | mutex_lock(&xenwatch_mutex); | ||
709 | |||
710 | spin_lock(&watch_events_lock); | ||
711 | ent = watch_events.next; | ||
712 | if (ent != &watch_events) | ||
713 | list_del(ent); | ||
714 | spin_unlock(&watch_events_lock); | ||
715 | |||
716 | if (ent != &watch_events) { | ||
717 | msg = list_entry(ent, struct xs_stored_msg, list); | ||
718 | msg->u.watch.handle->callback( | ||
719 | msg->u.watch.handle, | ||
720 | (const char **)msg->u.watch.vec, | ||
721 | msg->u.watch.vec_size); | ||
722 | kfree(msg->u.watch.vec); | ||
723 | kfree(msg); | ||
724 | } | ||
725 | |||
726 | mutex_unlock(&xenwatch_mutex); | ||
727 | } | ||
728 | |||
729 | return 0; | ||
730 | } | ||
731 | |||
732 | static int process_msg(void) | ||
733 | { | ||
734 | struct xs_stored_msg *msg; | ||
735 | char *body; | ||
736 | int err; | ||
737 | |||
738 | /* | ||
739 | * We must disallow save/restore while reading a xenstore message. | ||
740 | * A partial read across s/r leaves us out of sync with xenstored. | ||
741 | */ | ||
742 | for (;;) { | ||
743 | err = xb_wait_for_data_to_read(); | ||
744 | if (err) | ||
745 | return err; | ||
746 | mutex_lock(&xs_state.response_mutex); | ||
747 | if (xb_data_to_read()) | ||
748 | break; | ||
749 | /* We raced with save/restore: pending data 'disappeared'. */ | ||
750 | mutex_unlock(&xs_state.response_mutex); | ||
751 | } | ||
752 | |||
753 | |||
754 | msg = kmalloc(sizeof(*msg), GFP_KERNEL); | ||
755 | if (msg == NULL) { | ||
756 | err = -ENOMEM; | ||
757 | goto out; | ||
758 | } | ||
759 | |||
760 | err = xb_read(&msg->hdr, sizeof(msg->hdr)); | ||
761 | if (err) { | ||
762 | kfree(msg); | ||
763 | goto out; | ||
764 | } | ||
765 | |||
766 | body = kmalloc(msg->hdr.len + 1, GFP_KERNEL); | ||
767 | if (body == NULL) { | ||
768 | kfree(msg); | ||
769 | err = -ENOMEM; | ||
770 | goto out; | ||
771 | } | ||
772 | |||
773 | err = xb_read(body, msg->hdr.len); | ||
774 | if (err) { | ||
775 | kfree(body); | ||
776 | kfree(msg); | ||
777 | goto out; | ||
778 | } | ||
779 | body[msg->hdr.len] = '\0'; | ||
780 | |||
781 | if (msg->hdr.type == XS_WATCH_EVENT) { | ||
782 | msg->u.watch.vec = split(body, msg->hdr.len, | ||
783 | &msg->u.watch.vec_size); | ||
784 | if (IS_ERR(msg->u.watch.vec)) { | ||
785 | kfree(msg); | ||
786 | err = PTR_ERR(msg->u.watch.vec); | ||
787 | goto out; | ||
788 | } | ||
789 | |||
790 | spin_lock(&watches_lock); | ||
791 | msg->u.watch.handle = find_watch( | ||
792 | msg->u.watch.vec[XS_WATCH_TOKEN]); | ||
793 | if (msg->u.watch.handle != NULL) { | ||
794 | spin_lock(&watch_events_lock); | ||
795 | list_add_tail(&msg->list, &watch_events); | ||
796 | wake_up(&watch_events_waitq); | ||
797 | spin_unlock(&watch_events_lock); | ||
798 | } else { | ||
799 | kfree(msg->u.watch.vec); | ||
800 | kfree(msg); | ||
801 | } | ||
802 | spin_unlock(&watches_lock); | ||
803 | } else { | ||
804 | msg->u.reply.body = body; | ||
805 | spin_lock(&xs_state.reply_lock); | ||
806 | list_add_tail(&msg->list, &xs_state.reply_list); | ||
807 | spin_unlock(&xs_state.reply_lock); | ||
808 | wake_up(&xs_state.reply_waitq); | ||
809 | } | ||
810 | |||
811 | out: | ||
812 | mutex_unlock(&xs_state.response_mutex); | ||
813 | return err; | ||
814 | } | ||
815 | |||
816 | static int xenbus_thread(void *unused) | ||
817 | { | ||
818 | int err; | ||
819 | |||
820 | for (;;) { | ||
821 | err = process_msg(); | ||
822 | if (err) | ||
823 | printk(KERN_WARNING "XENBUS error %d while reading " | ||
824 | "message\n", err); | ||
825 | if (kthread_should_stop()) | ||
826 | break; | ||
827 | } | ||
828 | |||
829 | return 0; | ||
830 | } | ||
831 | |||
832 | int xs_init(void) | ||
833 | { | ||
834 | int err; | ||
835 | struct task_struct *task; | ||
836 | |||
837 | INIT_LIST_HEAD(&xs_state.reply_list); | ||
838 | spin_lock_init(&xs_state.reply_lock); | ||
839 | init_waitqueue_head(&xs_state.reply_waitq); | ||
840 | |||
841 | mutex_init(&xs_state.request_mutex); | ||
842 | mutex_init(&xs_state.response_mutex); | ||
843 | init_rwsem(&xs_state.transaction_mutex); | ||
844 | init_rwsem(&xs_state.watch_mutex); | ||
845 | |||
846 | /* Initialize the shared memory rings to talk to xenstored */ | ||
847 | err = xb_init_comms(); | ||
848 | if (err) | ||
849 | return err; | ||
850 | |||
851 | task = kthread_run(xenwatch_thread, NULL, "xenwatch"); | ||
852 | if (IS_ERR(task)) | ||
853 | return PTR_ERR(task); | ||
854 | xenwatch_pid = task->pid; | ||
855 | |||
856 | task = kthread_run(xenbus_thread, NULL, "xenbus"); | ||
857 | if (IS_ERR(task)) | ||
858 | return PTR_ERR(task); | ||
859 | |||
860 | return 0; | ||
861 | } | ||