aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2006-09-20 09:58:25 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2006-09-20 09:58:25 -0400
commit1534c3820c26aca4e2567f97b8add8bea40e7e2b (patch)
treeda76ec6d0c3abb099ffe7d542b3f0f6fe570957f
parent7561b974e0cbbdca1bb880b55200afd9a1a20737 (diff)
[S390] zcrypt adjunct processor bus.
Add a bus for the adjunct processor interface. Up to 64 devices can be connect to the ap bus interface, each device with 16 domains. That makes 1024 message queues. The interface is asynchronous, the answer to a message sent to a queue needs to be received at some later point in time. Unfortunately the interface does not provide interrupts when a message reply is pending. So the ap bus needs to implement some fancy polling, each active queue is polled once per 1/HZ second or continuously if an idle cpus exsists and the poll thread is activ (see poll_thread parameter). The ap bus uses the sysfs path /sys/bus/ap and has two bus attributes, ap_domain and config_time. The ap_domain selects one of the 16 domains to be used for this system. This limits the maximum number of ap devices to 64. The config_time attribute contains the number of seconds between two ap bus scans to find new devices. The ap bus uses the modalias entries of the form "ap:tN" to autoload the ap driver for hardware type N. Currently known types are: 3 - PCICC, 4 - PCICA, 5 - PCIXCC, 6 - CEX2A and 7 - CEX2C. Signed-off-by: Cornelia Huck <cornelia.huck@de.ibm.com> Signed-off-by: Ralph Wuerthner <rwuerthn@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--drivers/s390/crypto/ap_bus.c1221
-rw-r--r--drivers/s390/crypto/ap_bus.h158
-rw-r--r--include/linux/mod_devicetable.h11
-rw-r--r--scripts/mod/file2alias.c12
4 files changed, 1402 insertions, 0 deletions
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
new file mode 100644
index 000000000000..6ed0985c0c91
--- /dev/null
+++ b/drivers/s390/crypto/ap_bus.c
@@ -0,0 +1,1221 @@
1/*
2 * linux/drivers/s390/crypto/ap_bus.c
3 *
4 * Copyright (C) 2006 IBM Corporation
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Ralph Wuerthner <rwuerthn@de.ibm.com>
8 *
9 * Adjunct processor bus.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#include <linux/module.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/err.h>
30#include <linux/interrupt.h>
31#include <linux/workqueue.h>
32#include <linux/notifier.h>
33#include <linux/kthread.h>
34#include <linux/mutex.h>
35#include <asm/s390_rdev.h>
36
37#include "ap_bus.h"
38
39/* Some prototypes. */
40static void ap_scan_bus(void *);
41static void ap_poll_all(unsigned long);
42static void ap_poll_timeout(unsigned long);
43static int ap_poll_thread_start(void);
44static void ap_poll_thread_stop(void);
45
46/**
47 * Module description.
48 */
49MODULE_AUTHOR("IBM Corporation");
50MODULE_DESCRIPTION("Adjunct Processor Bus driver, "
51 "Copyright 2006 IBM Corporation");
52MODULE_LICENSE("GPL");
53
54/**
55 * Module parameter
56 */
57int ap_domain_index = -1; /* Adjunct Processor Domain Index */
58module_param_named(domain, ap_domain_index, int, 0000);
59MODULE_PARM_DESC(domain, "domain index for ap devices");
60EXPORT_SYMBOL(ap_domain_index);
61
62static int ap_thread_flag = 1;
63module_param_named(poll_thread, ap_thread_flag, int, 0000);
64MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 1 (on).");
65
66static struct device *ap_root_device = NULL;
67
68/**
69 * Workqueue & timer for bus rescan.
70 */
71static struct workqueue_struct *ap_work_queue;
72static struct timer_list ap_config_timer;
73static int ap_config_time = AP_CONFIG_TIME;
74static DECLARE_WORK(ap_config_work, ap_scan_bus, NULL);
75
76/**
77 * Tasklet & timer for AP request polling.
78 */
79static struct timer_list ap_poll_timer = TIMER_INITIALIZER(ap_poll_timeout,0,0);
80static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0);
81static atomic_t ap_poll_requests = ATOMIC_INIT(0);
82static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
83static struct task_struct *ap_poll_kthread = NULL;
84static DEFINE_MUTEX(ap_poll_thread_mutex);
85
86/**
87 * Test if ap instructions are available.
88 *
89 * Returns 0 if the ap instructions are installed.
90 */
91static inline int ap_instructions_available(void)
92{
93 register unsigned long reg0 asm ("0") = AP_MKQID(0,0);
94 register unsigned long reg1 asm ("1") = -ENODEV;
95 register unsigned long reg2 asm ("2") = 0UL;
96
97 asm volatile(
98 " .long 0xb2af0000\n" /* PQAP(TAPQ) */
99 "0: la %1,0\n"
100 "1:\n"
101 EX_TABLE(0b, 1b)
102 : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" );
103 return reg1;
104}
105
106/**
107 * Test adjunct processor queue.
108 * @qid: the ap queue number
109 * @queue_depth: pointer to queue depth value
110 * @device_type: pointer to device type value
111 *
112 * Returns ap queue status structure.
113 */
114static inline struct ap_queue_status
115ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
116{
117 register unsigned long reg0 asm ("0") = qid;
118 register struct ap_queue_status reg1 asm ("1");
119 register unsigned long reg2 asm ("2") = 0UL;
120
121 asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
122 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
123 *device_type = (int) (reg2 >> 24);
124 *queue_depth = (int) (reg2 & 0xff);
125 return reg1;
126}
127
128/**
129 * Reset adjunct processor queue.
130 * @qid: the ap queue number
131 *
132 * Returns ap queue status structure.
133 */
134static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
135{
136 register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
137 register struct ap_queue_status reg1 asm ("1");
138 register unsigned long reg2 asm ("2") = 0UL;
139
140 asm volatile(
141 ".long 0xb2af0000" /* PQAP(RAPQ) */
142 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
143 return reg1;
144}
145
146/**
147 * Send message to adjunct processor queue.
148 * @qid: the ap queue number
149 * @psmid: the program supplied message identifier
150 * @msg: the message text
151 * @length: the message length
152 *
153 * Returns ap queue status structure.
154 *
155 * Condition code 1 on NQAP can't happen because the L bit is 1.
156 *
157 * Condition code 2 on NQAP also means the send is incomplete,
158 * because a segment boundary was reached. The NQAP is repeated.
159 */
160static inline struct ap_queue_status
161__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
162{
163 typedef struct { char _[length]; } msgblock;
164 register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
165 register struct ap_queue_status reg1 asm ("1");
166 register unsigned long reg2 asm ("2") = (unsigned long) msg;
167 register unsigned long reg3 asm ("3") = (unsigned long) length;
168 register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
169 register unsigned long reg5 asm ("5") = (unsigned int) psmid;
170
171 asm volatile (
172 "0: .long 0xb2ad0042\n" /* DQAP */
173 " brc 2,0b"
174 : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
175 : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
176 : "cc" );
177 return reg1;
178}
179
180int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
181{
182 struct ap_queue_status status;
183
184 status = __ap_send(qid, psmid, msg, length);
185 switch (status.response_code) {
186 case AP_RESPONSE_NORMAL:
187 return 0;
188 case AP_RESPONSE_Q_FULL:
189 return -EBUSY;
190 default: /* Device is gone. */
191 return -ENODEV;
192 }
193}
194EXPORT_SYMBOL(ap_send);
195
196/*
197 * Receive message from adjunct processor queue.
198 * @qid: the ap queue number
199 * @psmid: pointer to program supplied message identifier
200 * @msg: the message text
201 * @length: the message length
202 *
203 * Returns ap queue status structure.
204 *
205 * Condition code 1 on DQAP means the receive has taken place
206 * but only partially. The response is incomplete, hence the
207 * DQAP is repeated.
208 *
209 * Condition code 2 on DQAP also means the receive is incomplete,
210 * this time because a segment boundary was reached. Again, the
211 * DQAP is repeated.
212 *
213 * Note that gpr2 is used by the DQAP instruction to keep track of
214 * any 'residual' length, in case the instruction gets interrupted.
215 * Hence it gets zeroed before the instruction.
216 */
217static inline struct ap_queue_status
218__ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
219{
220 typedef struct { char _[length]; } msgblock;
221 register unsigned long reg0 asm("0") = qid | 0x80000000UL;
222 register struct ap_queue_status reg1 asm ("1");
223 register unsigned long reg2 asm("2") = 0UL;
224 register unsigned long reg4 asm("4") = (unsigned long) msg;
225 register unsigned long reg5 asm("5") = (unsigned long) length;
226 register unsigned long reg6 asm("6") = 0UL;
227 register unsigned long reg7 asm("7") = 0UL;
228
229
230 asm volatile(
231 "0: .long 0xb2ae0064\n"
232 " brc 6,0b\n"
233 : "+d" (reg0), "=d" (reg1), "+d" (reg2),
234 "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
235 "=m" (*(msgblock *) msg) : : "cc" );
236 *psmid = (((unsigned long long) reg6) << 32) + reg7;
237 return reg1;
238}
239
240int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
241{
242 struct ap_queue_status status;
243
244 status = __ap_recv(qid, psmid, msg, length);
245 switch (status.response_code) {
246 case AP_RESPONSE_NORMAL:
247 return 0;
248 case AP_RESPONSE_NO_PENDING_REPLY:
249 if (status.queue_empty)
250 return -ENOENT;
251 return -EBUSY;
252 default:
253 return -ENODEV;
254 }
255}
256EXPORT_SYMBOL(ap_recv);
257
258/**
259 * Check if an AP queue is available. The test is repeated for
260 * AP_MAX_RESET times.
261 * @qid: the ap queue number
262 * @queue_depth: pointer to queue depth value
263 * @device_type: pointer to device type value
264 */
265static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
266{
267 struct ap_queue_status status;
268 int t_depth, t_device_type, rc, i;
269
270 rc = -EBUSY;
271 for (i = 0; i < AP_MAX_RESET; i++) {
272 status = ap_test_queue(qid, &t_depth, &t_device_type);
273 switch (status.response_code) {
274 case AP_RESPONSE_NORMAL:
275 *queue_depth = t_depth + 1;
276 *device_type = t_device_type;
277 rc = 0;
278 break;
279 case AP_RESPONSE_Q_NOT_AVAIL:
280 rc = -ENODEV;
281 break;
282 case AP_RESPONSE_RESET_IN_PROGRESS:
283 break;
284 case AP_RESPONSE_DECONFIGURED:
285 rc = -ENODEV;
286 break;
287 case AP_RESPONSE_CHECKSTOPPED:
288 rc = -ENODEV;
289 break;
290 case AP_RESPONSE_BUSY:
291 break;
292 default:
293 BUG();
294 }
295 if (rc != -EBUSY)
296 break;
297 if (i < AP_MAX_RESET - 1)
298 udelay(5);
299 }
300 return rc;
301}
302
303/**
304 * Reset an AP queue and wait for it to become available again.
305 * @qid: the ap queue number
306 */
307static int ap_init_queue(ap_qid_t qid)
308{
309 struct ap_queue_status status;
310 int rc, dummy, i;
311
312 rc = -ENODEV;
313 status = ap_reset_queue(qid);
314 for (i = 0; i < AP_MAX_RESET; i++) {
315 switch (status.response_code) {
316 case AP_RESPONSE_NORMAL:
317 if (status.queue_empty)
318 rc = 0;
319 break;
320 case AP_RESPONSE_Q_NOT_AVAIL:
321 case AP_RESPONSE_DECONFIGURED:
322 case AP_RESPONSE_CHECKSTOPPED:
323 i = AP_MAX_RESET; /* return with -ENODEV */
324 break;
325 case AP_RESPONSE_RESET_IN_PROGRESS:
326 case AP_RESPONSE_BUSY:
327 default:
328 break;
329 }
330 if (rc != -ENODEV)
331 break;
332 if (i < AP_MAX_RESET - 1) {
333 udelay(5);
334 status = ap_test_queue(qid, &dummy, &dummy);
335 }
336 }
337 return rc;
338}
339
340/**
341 * AP device related attributes.
342 */
343static ssize_t ap_hwtype_show(struct device *dev,
344 struct device_attribute *attr, char *buf)
345{
346 struct ap_device *ap_dev = to_ap_dev(dev);
347 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type);
348}
349static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
350
351static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
352 char *buf)
353{
354 struct ap_device *ap_dev = to_ap_dev(dev);
355 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth);
356}
357static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
358
359static ssize_t ap_request_count_show(struct device *dev,
360 struct device_attribute *attr,
361 char *buf)
362{
363 struct ap_device *ap_dev = to_ap_dev(dev);
364 int rc;
365
366 spin_lock_bh(&ap_dev->lock);
367 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count);
368 spin_unlock_bh(&ap_dev->lock);
369 return rc;
370}
371
372static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
373
374static ssize_t ap_modalias_show(struct device *dev,
375 struct device_attribute *attr, char *buf)
376{
377 return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type);
378}
379
380static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
381
382static struct attribute *ap_dev_attrs[] = {
383 &dev_attr_hwtype.attr,
384 &dev_attr_depth.attr,
385 &dev_attr_request_count.attr,
386 &dev_attr_modalias.attr,
387 NULL
388};
389static struct attribute_group ap_dev_attr_group = {
390 .attrs = ap_dev_attrs
391};
392
393/**
394 * AP bus driver registration/unregistration.
395 */
396static int ap_bus_match(struct device *dev, struct device_driver *drv)
397{
398 struct ap_device *ap_dev = to_ap_dev(dev);
399 struct ap_driver *ap_drv = to_ap_drv(drv);
400 struct ap_device_id *id;
401
402 /**
403 * Compare device type of the device with the list of
404 * supported types of the device_driver.
405 */
406 for (id = ap_drv->ids; id->match_flags; id++) {
407 if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) &&
408 (id->dev_type != ap_dev->device_type))
409 continue;
410 return 1;
411 }
412 return 0;
413}
414
415/**
416 * uevent function for AP devices. It sets up a single environment
417 * variable DEV_TYPE which contains the hardware device type.
418 */
419static int ap_uevent (struct device *dev, char **envp, int num_envp,
420 char *buffer, int buffer_size)
421{
422 struct ap_device *ap_dev = to_ap_dev(dev);
423 int length;
424
425 if (!ap_dev)
426 return -ENODEV;
427
428 /* Set up DEV_TYPE environment variable. */
429 envp[0] = buffer;
430 length = scnprintf(buffer, buffer_size, "DEV_TYPE=%04X",
431 ap_dev->device_type);
432 if (buffer_size - length <= 0)
433 return -ENOMEM;
434 envp[1] = 0;
435 return 0;
436}
437
438static struct bus_type ap_bus_type = {
439 .name = "ap",
440 .match = &ap_bus_match,
441 .uevent = &ap_uevent,
442};
443
444static int ap_device_probe(struct device *dev)
445{
446 struct ap_device *ap_dev = to_ap_dev(dev);
447 struct ap_driver *ap_drv = to_ap_drv(dev->driver);
448 int rc;
449
450 ap_dev->drv = ap_drv;
451 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
452 if (rc)
453 ap_dev->unregistered = 1;
454 return rc;
455}
456
457/**
458 * Flush all requests from the request/pending queue of an AP device.
459 * @ap_dev: pointer to the AP device.
460 */
461static inline void __ap_flush_queue(struct ap_device *ap_dev)
462{
463 struct ap_message *ap_msg, *next;
464
465 list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
466 list_del_init(&ap_msg->list);
467 ap_dev->pendingq_count--;
468 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
469 }
470 list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
471 list_del_init(&ap_msg->list);
472 ap_dev->requestq_count--;
473 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
474 }
475}
476
477void ap_flush_queue(struct ap_device *ap_dev)
478{
479 spin_lock_bh(&ap_dev->lock);
480 __ap_flush_queue(ap_dev);
481 spin_unlock_bh(&ap_dev->lock);
482}
483EXPORT_SYMBOL(ap_flush_queue);
484
485static int ap_device_remove(struct device *dev)
486{
487 struct ap_device *ap_dev = to_ap_dev(dev);
488 struct ap_driver *ap_drv = ap_dev->drv;
489
490 spin_lock_bh(&ap_dev->lock);
491 __ap_flush_queue(ap_dev);
492 /**
493 * set ->unregistered to 1 while holding the lock. This prevents
494 * new messages to be put on the queue from now on.
495 */
496 ap_dev->unregistered = 1;
497 spin_unlock_bh(&ap_dev->lock);
498 if (ap_drv->remove)
499 ap_drv->remove(ap_dev);
500 return 0;
501}
502
503int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
504 char *name)
505{
506 struct device_driver *drv = &ap_drv->driver;
507
508 drv->bus = &ap_bus_type;
509 drv->probe = ap_device_probe;
510 drv->remove = ap_device_remove;
511 drv->owner = owner;
512 drv->name = name;
513 return driver_register(drv);
514}
515EXPORT_SYMBOL(ap_driver_register);
516
517void ap_driver_unregister(struct ap_driver *ap_drv)
518{
519 driver_unregister(&ap_drv->driver);
520}
521EXPORT_SYMBOL(ap_driver_unregister);
522
523/**
524 * AP bus attributes.
525 */
526static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
527{
528 return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
529}
530
531static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL);
532
533static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
534{
535 return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
536}
537
538static ssize_t ap_config_time_store(struct bus_type *bus,
539 const char *buf, size_t count)
540{
541 int time;
542
543 if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
544 return -EINVAL;
545 ap_config_time = time;
546 if (!timer_pending(&ap_config_timer) ||
547 !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) {
548 ap_config_timer.expires = jiffies + ap_config_time * HZ;
549 add_timer(&ap_config_timer);
550 }
551 return count;
552}
553
554static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store);
555
556static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf)
557{
558 return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
559}
560
561static ssize_t ap_poll_thread_store(struct bus_type *bus,
562 const char *buf, size_t count)
563{
564 int flag, rc;
565
566 if (sscanf(buf, "%d\n", &flag) != 1)
567 return -EINVAL;
568 if (flag) {
569 rc = ap_poll_thread_start();
570 if (rc)
571 return rc;
572 }
573 else
574 ap_poll_thread_stop();
575 return count;
576}
577
578static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store);
579
580static struct bus_attribute *const ap_bus_attrs[] = {
581 &bus_attr_ap_domain,
582 &bus_attr_config_time,
583 &bus_attr_poll_thread,
584 NULL
585};
586
587/**
588 * Pick one of the 16 ap domains.
589 */
590static inline int ap_select_domain(void)
591{
592 int queue_depth, device_type, count, max_count, best_domain;
593 int rc, i, j;
594
595 /**
596 * We want to use a single domain. Either the one specified with
597 * the "domain=" parameter or the domain with the maximum number
598 * of devices.
599 */
600 if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS)
601 /* Domain has already been selected. */
602 return 0;
603 best_domain = -1;
604 max_count = 0;
605 for (i = 0; i < AP_DOMAINS; i++) {
606 count = 0;
607 for (j = 0; j < AP_DEVICES; j++) {
608 ap_qid_t qid = AP_MKQID(j, i);
609 rc = ap_query_queue(qid, &queue_depth, &device_type);
610 if (rc)
611 continue;
612 count++;
613 }
614 if (count > max_count) {
615 max_count = count;
616 best_domain = i;
617 }
618 }
619 if (best_domain >= 0){
620 ap_domain_index = best_domain;
621 return 0;
622 }
623 return -ENODEV;
624}
625
626/**
627 * Find the device type if query queue returned a device type of 0.
628 * @ap_dev: pointer to the AP device.
629 */
630static int ap_probe_device_type(struct ap_device *ap_dev)
631{
632 static unsigned char msg[] = {
633 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
634 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
635 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
636 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
637 0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50,
638 0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,
639 0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00,
640 0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,
641 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
642 0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,
643 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
644 0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,
645 0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00,
646 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
647 0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00,
648 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
649 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
650 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
651 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
652 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
653 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
654 0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,
655 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
656 0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,
657 0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20,
658 0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,
659 0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22,
660 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
661 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,
662 0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
663 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
664 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,
665 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
666 0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,
667 0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00,
668 0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,
669 0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01,
670 0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,
671 0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68,
672 0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,
673 0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0,
674 0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,
675 0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04,
676 0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,
677 0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d,
678 };
679 struct ap_queue_status status;
680 unsigned long long psmid;
681 char *reply;
682 int rc, i;
683
684 reply = (void *) get_zeroed_page(GFP_KERNEL);
685 if (!reply) {
686 rc = -ENOMEM;
687 goto out;
688 }
689
690 status = __ap_send(ap_dev->qid, 0x0102030405060708ULL,
691 msg, sizeof(msg));
692 if (status.response_code != AP_RESPONSE_NORMAL) {
693 rc = -ENODEV;
694 goto out_free;
695 }
696
697 /* Wait for the test message to complete. */
698 for (i = 0; i < 6; i++) {
699 mdelay(300);
700 status = __ap_recv(ap_dev->qid, &psmid, reply, 4096);
701 if (status.response_code == AP_RESPONSE_NORMAL &&
702 psmid == 0x0102030405060708ULL)
703 break;
704 }
705 if (i < 6) {
706 /* Got an answer. */
707 if (reply[0] == 0x00 && reply[1] == 0x86)
708 ap_dev->device_type = AP_DEVICE_TYPE_PCICC;
709 else
710 ap_dev->device_type = AP_DEVICE_TYPE_PCICA;
711 rc = 0;
712 } else
713 rc = -ENODEV;
714
715out_free:
716 free_page((unsigned long) reply);
717out:
718 return rc;
719}
720
721/**
722 * Scan the ap bus for new devices.
723 */
724static int __ap_scan_bus(struct device *dev, void *data)
725{
726 return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data;
727}
728
729static void ap_device_release(struct device *dev)
730{
731 struct ap_device *ap_dev = to_ap_dev(dev);
732
733 kfree(ap_dev);
734}
735
736static void ap_scan_bus(void *data)
737{
738 struct ap_device *ap_dev;
739 struct device *dev;
740 ap_qid_t qid;
741 int queue_depth, device_type;
742 int rc, i;
743
744 if (ap_select_domain() != 0)
745 return;
746 for (i = 0; i < AP_DEVICES; i++) {
747 qid = AP_MKQID(i, ap_domain_index);
748 dev = bus_find_device(&ap_bus_type, NULL,
749 (void *)(unsigned long)qid,
750 __ap_scan_bus);
751 if (dev) {
752 put_device(dev);
753 continue;
754 }
755 rc = ap_query_queue(qid, &queue_depth, &device_type);
756 if (rc)
757 continue;
758 rc = ap_init_queue(qid);
759 if (rc)
760 continue;
761 ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL);
762 if (!ap_dev)
763 break;
764 ap_dev->qid = qid;
765 ap_dev->queue_depth = queue_depth;
766 spin_lock_init(&ap_dev->lock);
767 INIT_LIST_HEAD(&ap_dev->pendingq);
768 INIT_LIST_HEAD(&ap_dev->requestq);
769 if (device_type == 0)
770 ap_probe_device_type(ap_dev);
771 else
772 ap_dev->device_type = device_type;
773
774 ap_dev->device.bus = &ap_bus_type;
775 ap_dev->device.parent = ap_root_device;
776 snprintf(ap_dev->device.bus_id, BUS_ID_SIZE, "card%02x",
777 AP_QID_DEVICE(ap_dev->qid));
778 ap_dev->device.release = ap_device_release;
779 rc = device_register(&ap_dev->device);
780 if (rc) {
781 kfree(ap_dev);
782 continue;
783 }
784 /* Add device attributes. */
785 rc = sysfs_create_group(&ap_dev->device.kobj,
786 &ap_dev_attr_group);
787 if (rc)
788 device_unregister(&ap_dev->device);
789 }
790}
791
792static void
793ap_config_timeout(unsigned long ptr)
794{
795 queue_work(ap_work_queue, &ap_config_work);
796 ap_config_timer.expires = jiffies + ap_config_time * HZ;
797 add_timer(&ap_config_timer);
798}
799
800/**
801 * Set up the timer to run the poll tasklet
802 */
803static inline void ap_schedule_poll_timer(void)
804{
805 if (timer_pending(&ap_poll_timer))
806 return;
807 mod_timer(&ap_poll_timer, jiffies + AP_POLL_TIME);
808}
809
810/**
811 * Receive pending reply messages from an AP device.
812 * @ap_dev: pointer to the AP device
813 * @flags: pointer to control flags, bit 2^0 is set if another poll is
814 * required, bit 2^1 is set if the poll timer needs to get armed
815 * Returns 0 if the device is still present, -ENODEV if not.
816 */
817static inline int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
818{
819 struct ap_queue_status status;
820 struct ap_message *ap_msg;
821
822 if (ap_dev->queue_count <= 0)
823 return 0;
824 status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid,
825 ap_dev->reply->message, ap_dev->reply->length);
826 switch (status.response_code) {
827 case AP_RESPONSE_NORMAL:
828 atomic_dec(&ap_poll_requests);
829 ap_dev->queue_count--;
830 list_for_each_entry(ap_msg, &ap_dev->pendingq, list) {
831 if (ap_msg->psmid != ap_dev->reply->psmid)
832 continue;
833 list_del_init(&ap_msg->list);
834 ap_dev->pendingq_count--;
835 ap_dev->drv->receive(ap_dev, ap_msg, ap_dev->reply);
836 break;
837 }
838 if (ap_dev->queue_count > 0)
839 *flags |= 1;
840 break;
841 case AP_RESPONSE_NO_PENDING_REPLY:
842 if (status.queue_empty) {
843 /* The card shouldn't forget requests but who knows. */
844 ap_dev->queue_count = 0;
845 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
846 ap_dev->requestq_count += ap_dev->pendingq_count;
847 ap_dev->pendingq_count = 0;
848 } else
849 *flags |= 2;
850 break;
851 default:
852 return -ENODEV;
853 }
854 return 0;
855}
856
857/**
858 * Send messages from the request queue to an AP device.
859 * @ap_dev: pointer to the AP device
860 * @flags: pointer to control flags, bit 2^0 is set if another poll is
861 * required, bit 2^1 is set if the poll timer needs to get armed
862 * Returns 0 if the device is still present, -ENODEV if not.
863 */
864static inline int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
865{
866 struct ap_queue_status status;
867 struct ap_message *ap_msg;
868
869 if (ap_dev->requestq_count <= 0 ||
870 ap_dev->queue_count >= ap_dev->queue_depth)
871 return 0;
872 /* Start the next request on the queue. */
873 ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
874 status = __ap_send(ap_dev->qid, ap_msg->psmid,
875 ap_msg->message, ap_msg->length);
876 switch (status.response_code) {
877 case AP_RESPONSE_NORMAL:
878 atomic_inc(&ap_poll_requests);
879 ap_dev->queue_count++;
880 list_move_tail(&ap_msg->list, &ap_dev->pendingq);
881 ap_dev->requestq_count--;
882 ap_dev->pendingq_count++;
883 if (ap_dev->queue_count < ap_dev->queue_depth &&
884 ap_dev->requestq_count > 0)
885 *flags |= 1;
886 *flags |= 2;
887 break;
888 case AP_RESPONSE_Q_FULL:
889 *flags |= 2;
890 break;
891 case AP_RESPONSE_MESSAGE_TOO_BIG:
892 return -EINVAL;
893 default:
894 return -ENODEV;
895 }
896 return 0;
897}
898
899/**
900 * Poll AP device for pending replies and send new messages. If either
901 * ap_poll_read or ap_poll_write returns -ENODEV unregister the device.
902 * @ap_dev: pointer to the bus device
903 * @flags: pointer to control flags, bit 2^0 is set if another poll is
904 * required, bit 2^1 is set if the poll timer needs to get armed
905 * Returns 0.
906 */
907static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
908{
909 int rc;
910
911 rc = ap_poll_read(ap_dev, flags);
912 if (rc)
913 return rc;
914 return ap_poll_write(ap_dev, flags);
915}
916
917/**
918 * Queue a message to a device.
919 * @ap_dev: pointer to the AP device
920 * @ap_msg: the message to be queued
921 */
922static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
923{
924 struct ap_queue_status status;
925
926 if (list_empty(&ap_dev->requestq) &&
927 ap_dev->queue_count < ap_dev->queue_depth) {
928 status = __ap_send(ap_dev->qid, ap_msg->psmid,
929 ap_msg->message, ap_msg->length);
930 switch (status.response_code) {
931 case AP_RESPONSE_NORMAL:
932 list_add_tail(&ap_msg->list, &ap_dev->pendingq);
933 atomic_inc(&ap_poll_requests);
934 ap_dev->pendingq_count++;
935 ap_dev->queue_count++;
936 ap_dev->total_request_count++;
937 break;
938 case AP_RESPONSE_Q_FULL:
939 list_add_tail(&ap_msg->list, &ap_dev->requestq);
940 ap_dev->requestq_count++;
941 ap_dev->total_request_count++;
942 return -EBUSY;
943 case AP_RESPONSE_MESSAGE_TOO_BIG:
944 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
945 return -EINVAL;
946 default: /* Device is gone. */
947 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
948 return -ENODEV;
949 }
950 } else {
951 list_add_tail(&ap_msg->list, &ap_dev->requestq);
952 ap_dev->requestq_count++;
953 ap_dev->total_request_count++;
954 return -EBUSY;
955 }
956 ap_schedule_poll_timer();
957 return 0;
958}
959
960void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
961{
962 unsigned long flags;
963 int rc;
964
965 spin_lock_bh(&ap_dev->lock);
966 if (!ap_dev->unregistered) {
967 /* Make room on the queue by polling for finished requests. */
968 rc = ap_poll_queue(ap_dev, &flags);
969 if (!rc)
970 rc = __ap_queue_message(ap_dev, ap_msg);
971 if (!rc)
972 wake_up(&ap_poll_wait);
973 } else {
974 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
975 rc = 0;
976 }
977 spin_unlock_bh(&ap_dev->lock);
978 if (rc == -ENODEV)
979 device_unregister(&ap_dev->device);
980}
981EXPORT_SYMBOL(ap_queue_message);
982
983/**
984 * Cancel a crypto request. This is done by removing the request
985 * from the devive pendingq or requestq queue. Note that the
986 * request stays on the AP queue. When it finishes the message
987 * reply will be discarded because the psmid can't be found.
988 * @ap_dev: AP device that has the message queued
989 * @ap_msg: the message that is to be removed
990 */
991void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
992{
993 struct ap_message *tmp;
994
995 spin_lock_bh(&ap_dev->lock);
996 if (!list_empty(&ap_msg->list)) {
997 list_for_each_entry(tmp, &ap_dev->pendingq, list)
998 if (tmp->psmid == ap_msg->psmid) {
999 ap_dev->pendingq_count--;
1000 goto found;
1001 }
1002 ap_dev->requestq_count--;
1003 found:
1004 list_del_init(&ap_msg->list);
1005 }
1006 spin_unlock_bh(&ap_dev->lock);
1007}
1008EXPORT_SYMBOL(ap_cancel_message);
1009
1010/**
1011 * AP receive polling for finished AP requests
1012 */
1013static void ap_poll_timeout(unsigned long unused)
1014{
1015 tasklet_schedule(&ap_tasklet);
1016}
1017
1018/**
1019 * Poll all AP devices on the bus in a round robin fashion. Continue
1020 * polling until bit 2^0 of the control flags is not set. If bit 2^1
1021 * of the control flags has been set arm the poll timer.
1022 */
1023static int __ap_poll_all(struct device *dev, void *data)
1024{
1025 struct ap_device *ap_dev = to_ap_dev(dev);
1026 int rc;
1027
1028 spin_lock(&ap_dev->lock);
1029 if (!ap_dev->unregistered) {
1030 rc = ap_poll_queue(to_ap_dev(dev), (unsigned long *) data);
1031 } else
1032 rc = 0;
1033 spin_unlock(&ap_dev->lock);
1034 if (rc)
1035 device_unregister(&ap_dev->device);
1036 return 0;
1037}
1038
1039static void ap_poll_all(unsigned long dummy)
1040{
1041 unsigned long flags;
1042
1043 do {
1044 flags = 0;
1045 bus_for_each_dev(&ap_bus_type, NULL, &flags, __ap_poll_all);
1046 } while (flags & 1);
1047 if (flags & 2)
1048 ap_schedule_poll_timer();
1049}
1050
1051/**
1052 * AP bus poll thread. The purpose of this thread is to poll for
1053 * finished requests in a loop if there is a "free" cpu - that is
1054 * a cpu that doesn't have anything better to do. The polling stops
1055 * as soon as there is another task or if all messages have been
1056 * delivered.
1057 */
1058static int ap_poll_thread(void *data)
1059{
1060 DECLARE_WAITQUEUE(wait, current);
1061 unsigned long flags;
1062 int requests;
1063
1064 set_user_nice(current, -20);
1065 while (1) {
1066 if (need_resched()) {
1067 schedule();
1068 continue;
1069 }
1070 add_wait_queue(&ap_poll_wait, &wait);
1071 set_current_state(TASK_INTERRUPTIBLE);
1072 if (kthread_should_stop())
1073 break;
1074 requests = atomic_read(&ap_poll_requests);
1075 if (requests <= 0)
1076 schedule();
1077 set_current_state(TASK_RUNNING);
1078 remove_wait_queue(&ap_poll_wait, &wait);
1079
1080 local_bh_disable();
1081 flags = 0;
1082 bus_for_each_dev(&ap_bus_type, NULL, &flags, __ap_poll_all);
1083 local_bh_enable();
1084 }
1085 set_current_state(TASK_RUNNING);
1086 remove_wait_queue(&ap_poll_wait, &wait);
1087 return 0;
1088}
1089
1090static int ap_poll_thread_start(void)
1091{
1092 int rc;
1093
1094 mutex_lock(&ap_poll_thread_mutex);
1095 if (!ap_poll_kthread) {
1096 ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
1097 rc = IS_ERR(ap_poll_kthread) ? PTR_ERR(ap_poll_kthread) : 0;
1098 if (rc)
1099 ap_poll_kthread = NULL;
1100 }
1101 else
1102 rc = 0;
1103 mutex_unlock(&ap_poll_thread_mutex);
1104 return rc;
1105}
1106
1107static void ap_poll_thread_stop(void)
1108{
1109 mutex_lock(&ap_poll_thread_mutex);
1110 if (ap_poll_kthread) {
1111 kthread_stop(ap_poll_kthread);
1112 ap_poll_kthread = NULL;
1113 }
1114 mutex_unlock(&ap_poll_thread_mutex);
1115}
1116
1117/**
1118 * The module initialization code.
1119 */
1120int __init ap_module_init(void)
1121{
1122 int rc, i;
1123
1124 if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) {
1125 printk(KERN_WARNING "Invalid param: domain = %d. "
1126 " Not loading.\n", ap_domain_index);
1127 return -EINVAL;
1128 }
1129 if (ap_instructions_available() != 0) {
1130 printk(KERN_WARNING "AP instructions not installed.\n");
1131 return -ENODEV;
1132 }
1133
1134 /* Create /sys/bus/ap. */
1135 rc = bus_register(&ap_bus_type);
1136 if (rc)
1137 goto out;
1138 for (i = 0; ap_bus_attrs[i]; i++) {
1139 rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]);
1140 if (rc)
1141 goto out_bus;
1142 }
1143
1144 /* Create /sys/devices/ap. */
1145 ap_root_device = s390_root_dev_register("ap");
1146 rc = IS_ERR(ap_root_device) ? PTR_ERR(ap_root_device) : 0;
1147 if (rc)
1148 goto out_bus;
1149
1150 ap_work_queue = create_singlethread_workqueue("kapwork");
1151 if (!ap_work_queue) {
1152 rc = -ENOMEM;
1153 goto out_root;
1154 }
1155
1156 if (ap_select_domain() == 0)
1157 ap_scan_bus(NULL);
1158
1159 /* Setup the ap bus rescan timer. */
1160 init_timer(&ap_config_timer);
1161 ap_config_timer.function = ap_config_timeout;
1162 ap_config_timer.data = 0;
1163 ap_config_timer.expires = jiffies + ap_config_time * HZ;
1164 add_timer(&ap_config_timer);
1165
1166 /* Start the low priority AP bus poll thread. */
1167 if (ap_thread_flag) {
1168 rc = ap_poll_thread_start();
1169 if (rc)
1170 goto out_work;
1171 }
1172
1173 return 0;
1174
1175out_work:
1176 del_timer_sync(&ap_config_timer);
1177 del_timer_sync(&ap_poll_timer);
1178 destroy_workqueue(ap_work_queue);
1179out_root:
1180 s390_root_dev_unregister(ap_root_device);
1181out_bus:
1182 while (i--)
1183 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1184 bus_unregister(&ap_bus_type);
1185out:
1186 return rc;
1187}
1188
1189static int __ap_match_all(struct device *dev, void *data)
1190{
1191 return 1;
1192}
1193
1194/**
1195 * The module termination code
1196 */
1197void ap_module_exit(void)
1198{
1199 int i;
1200 struct device *dev;
1201
1202 ap_poll_thread_stop();
1203 del_timer_sync(&ap_config_timer);
1204 del_timer_sync(&ap_poll_timer);
1205 destroy_workqueue(ap_work_queue);
1206 s390_root_dev_unregister(ap_root_device);
1207 while ((dev = bus_find_device(&ap_bus_type, NULL, NULL,
1208 __ap_match_all)))
1209 {
1210 device_unregister(dev);
1211 put_device(dev);
1212 }
1213 for (i = 0; ap_bus_attrs[i]; i++)
1214 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1215 bus_unregister(&ap_bus_type);
1216}
1217
1218#ifndef CONFIG_ZCRYPT_MONOLITHIC
1219module_init(ap_module_init);
1220module_exit(ap_module_exit);
1221#endif
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
new file mode 100644
index 000000000000..83b69c01cd6e
--- /dev/null
+++ b/drivers/s390/crypto/ap_bus.h
@@ -0,0 +1,158 @@
1/*
2 * linux/drivers/s390/crypto/ap_bus.h
3 *
4 * Copyright (C) 2006 IBM Corporation
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Ralph Wuerthner <rwuerthn@de.ibm.com>
8 *
9 * Adjunct processor bus header file.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#ifndef _AP_BUS_H_
27#define _AP_BUS_H_
28
29#include <linux/device.h>
30#include <linux/mod_devicetable.h>
31#include <linux/types.h>
32
33#define AP_DEVICES 64 /* Number of AP devices. */
34#define AP_DOMAINS 16 /* Number of AP domains. */
35#define AP_MAX_RESET 90 /* Maximum number of resets. */
36#define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */
37#define AP_POLL_TIME 1 /* Time in ticks between receive polls. */
38
39extern int ap_domain_index;
40
41/**
42 * The ap_qid_t identifier of an ap queue. It contains a
43 * 6 bit device index and a 4 bit queue index (domain).
44 */
45typedef unsigned int ap_qid_t;
46
47#define AP_MKQID(_device,_queue) (((_device) & 63) << 8 | ((_queue) & 15))
48#define AP_QID_DEVICE(_qid) (((_qid) >> 8) & 63)
49#define AP_QID_QUEUE(_qid) ((_qid) & 15)
50
51/**
52 * The ap queue status word is returned by all three AP functions
53 * (PQAP, NQAP and DQAP). There's a set of flags in the first
54 * byte, followed by a 1 byte response code.
55 */
56struct ap_queue_status {
57 unsigned int queue_empty : 1;
58 unsigned int replies_waiting : 1;
59 unsigned int queue_full : 1;
60 unsigned int pad1 : 5;
61 unsigned int response_code : 8;
62 unsigned int pad2 : 16;
63};
64
65#define AP_RESPONSE_NORMAL 0x00
66#define AP_RESPONSE_Q_NOT_AVAIL 0x01
67#define AP_RESPONSE_RESET_IN_PROGRESS 0x02
68#define AP_RESPONSE_DECONFIGURED 0x03
69#define AP_RESPONSE_CHECKSTOPPED 0x04
70#define AP_RESPONSE_BUSY 0x05
71#define AP_RESPONSE_Q_FULL 0x10
72#define AP_RESPONSE_NO_PENDING_REPLY 0x10
73#define AP_RESPONSE_INDEX_TOO_BIG 0x11
74#define AP_RESPONSE_NO_FIRST_PART 0x13
75#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
76
77/**
78 * Known device types
79 */
80#define AP_DEVICE_TYPE_PCICC 3
81#define AP_DEVICE_TYPE_PCICA 4
82#define AP_DEVICE_TYPE_PCIXCC 5
83#define AP_DEVICE_TYPE_CEX2A 6
84#define AP_DEVICE_TYPE_CEX2C 7
85
86struct ap_device;
87struct ap_message;
88
89struct ap_driver {
90 struct device_driver driver;
91 struct ap_device_id *ids;
92
93 int (*probe)(struct ap_device *);
94 void (*remove)(struct ap_device *);
95 /* receive is called from tasklet context */
96 void (*receive)(struct ap_device *, struct ap_message *,
97 struct ap_message *);
98};
99
100#define to_ap_drv(x) container_of((x), struct ap_driver, driver)
101
102int ap_driver_register(struct ap_driver *, struct module *, char *);
103void ap_driver_unregister(struct ap_driver *);
104
105struct ap_device {
106 struct device device;
107 struct ap_driver *drv; /* Pointer to AP device driver. */
108 spinlock_t lock; /* Per device lock. */
109
110 ap_qid_t qid; /* AP queue id. */
111 int queue_depth; /* AP queue depth.*/
112 int device_type; /* AP device type. */
113 int unregistered; /* marks AP device as unregistered */
114
115 int queue_count; /* # messages currently on AP queue. */
116
117 struct list_head pendingq; /* List of message sent to AP queue. */
118 int pendingq_count; /* # requests on pendingq list. */
119 struct list_head requestq; /* List of message yet to be sent. */
120 int requestq_count; /* # requests on requestq list. */
121 int total_request_count; /* # requests ever for this AP device. */
122
123 struct ap_message *reply; /* Per device reply message. */
124
125 void *private; /* ap driver private pointer. */
126};
127
128#define to_ap_dev(x) container_of((x), struct ap_device, device)
129
130struct ap_message {
131 struct list_head list; /* Request queueing. */
132 unsigned long long psmid; /* Message id. */
133 void *message; /* Pointer to message buffer. */
134 size_t length; /* Message length. */
135
136 void *private; /* ap driver private pointer. */
137};
138
139#define AP_DEVICE(dt) \
140 .dev_type=(dt), \
141 .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE,
142
143/**
144 * Note: don't use ap_send/ap_recv after using ap_queue_message
145 * for the first time. Otherwise the ap message queue will get
146 * confused.
147 */
148int ap_send(ap_qid_t, unsigned long long, void *, size_t);
149int ap_recv(ap_qid_t, unsigned long long *, void *, size_t);
150
151void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
152void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg);
153void ap_flush_queue(struct ap_device *ap_dev);
154
155int ap_module_init(void);
156void ap_module_exit(void);
157
158#endif /* _AP_BUS_H_ */
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index f6977708585c..f7ca0b09075d 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -148,6 +148,17 @@ struct ccw_device_id {
148#define CCW_DEVICE_ID_MATCH_DEVICE_TYPE 0x04 148#define CCW_DEVICE_ID_MATCH_DEVICE_TYPE 0x04
149#define CCW_DEVICE_ID_MATCH_DEVICE_MODEL 0x08 149#define CCW_DEVICE_ID_MATCH_DEVICE_MODEL 0x08
150 150
151/* s390 AP bus devices */
152struct ap_device_id {
153 __u16 match_flags; /* which fields to match against */
154 __u8 dev_type; /* device type */
155 __u8 pad1;
156 __u32 pad2;
157 kernel_ulong_t driver_info;
158};
159
160#define AP_DEVICE_ID_MATCH_DEVICE_TYPE 0x01
161
151 162
152#define PNP_ID_LEN 8 163#define PNP_ID_LEN 8
153#define PNP_MAX_DEVICES 8 164#define PNP_MAX_DEVICES 8
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index e2de650d3dbf..de76da80443f 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -265,6 +265,14 @@ static int do_ccw_entry(const char *filename,
265 return 1; 265 return 1;
266} 266}
267 267
268/* looks like: "ap:tN" */
269static int do_ap_entry(const char *filename,
270 struct ap_device_id *id, char *alias)
271{
272 sprintf(alias, "ap:t%02X", id->dev_type);
273 return 1;
274}
275
268/* Looks like: "serio:tyNprNidNexN" */ 276/* Looks like: "serio:tyNprNidNexN" */
269static int do_serio_entry(const char *filename, 277static int do_serio_entry(const char *filename,
270 struct serio_device_id *id, char *alias) 278 struct serio_device_id *id, char *alias)
@@ -503,6 +511,10 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
503 do_table(symval, sym->st_size, 511 do_table(symval, sym->st_size,
504 sizeof(struct ccw_device_id), "ccw", 512 sizeof(struct ccw_device_id), "ccw",
505 do_ccw_entry, mod); 513 do_ccw_entry, mod);
514 else if (sym_is(symname, "__mod_ap_device_table"))
515 do_table(symval, sym->st_size,
516 sizeof(struct ap_device_id), "ap",
517 do_ap_entry, mod);
506 else if (sym_is(symname, "__mod_serio_device_table")) 518 else if (sym_is(symname, "__mod_serio_device_table"))
507 do_table(symval, sym->st_size, 519 do_table(symval, sym->st_size,
508 sizeof(struct serio_device_id), "serio", 520 sizeof(struct serio_device_id), "serio",
ass="hl opt">== GDT_INIT) ha->pccb->Service |= 0x80; if (ha->type == GDT_EISA) { if (ha->pccb->OpCode == GDT_INIT) /* store DMA buffer */ outl(ha->ccb_phys, ha->bmic + MAILBOXREG); outb(ha->pccb->Service, ha->bmic + LDOORREG); } else if (ha->type == GDT_ISA) { writeb(0, &((gdt2_dpram_str __iomem *)ha->brd)->io.event); } else if (ha->type == GDT_PCI) { writeb(0, &((gdt6_dpram_str __iomem *)ha->brd)->io.event); } else if (ha->type == GDT_PCINEW) { outb(1, PTR2USHORT(&ha->plx->ldoor_reg)); } else if (ha->type == GDT_PCIMPR) { writeb(1, &((gdt6m_dpram_str __iomem *)ha->brd)->i960r.ldoor_reg); } } static int gdth_wait(gdth_ha_str *ha, int index, ulong32 time) { int answer_found = FALSE; int wait_index = 0; TRACE(("gdth_wait() hanum %d index %d time %d\n", ha->hanum, index, time)); if (index == 0) return 1; /* no wait required */ do { __gdth_interrupt(ha, true, &wait_index); if (wait_index == index) { answer_found = TRUE; break; } gdth_delay(1); } while (--time); while (gdth_test_busy(ha)) gdth_delay(0); return (answer_found); } static int gdth_internal_cmd(gdth_ha_str *ha, unchar service, ushort opcode, ulong32 p1, ulong64 p2, ulong64 p3) { register gdth_cmd_str *cmd_ptr; int retries,index; TRACE2(("gdth_internal_cmd() service %d opcode %d\n",service,opcode)); cmd_ptr = ha->pccb; memset((char*)cmd_ptr,0,sizeof(gdth_cmd_str)); /* make command */ for (retries = INIT_RETRIES;;) { cmd_ptr->Service = service; cmd_ptr->RequestBuffer = INTERNAL_CMND; if (!(index=gdth_get_cmd_index(ha))) { TRACE(("GDT: No free command index found\n")); return 0; } gdth_set_sema0(ha); cmd_ptr->OpCode = opcode; cmd_ptr->BoardNode = LOCALBOARD; if (service == CACHESERVICE) { if (opcode == GDT_IOCTL) { cmd_ptr->u.ioctl.subfunc = p1; cmd_ptr->u.ioctl.channel = (ulong32)p2; cmd_ptr->u.ioctl.param_size = (ushort)p3; cmd_ptr->u.ioctl.p_param = ha->scratch_phys; } else { if (ha->cache_feat & GDT_64BIT) { cmd_ptr->u.cache64.DeviceNo = (ushort)p1; cmd_ptr->u.cache64.BlockNo = p2; } else { cmd_ptr->u.cache.DeviceNo = (ushort)p1; cmd_ptr->u.cache.BlockNo = (ulong32)p2; } } } else if (service == SCSIRAWSERVICE) { if (ha->raw_feat & GDT_64BIT) { cmd_ptr->u.raw64.direction = p1; cmd_ptr->u.raw64.bus = (unchar)p2; cmd_ptr->u.raw64.target = (unchar)p3; cmd_ptr->u.raw64.lun = (unchar)(p3 >> 8); } else { cmd_ptr->u.raw.direction = p1; cmd_ptr->u.raw.bus = (unchar)p2; cmd_ptr->u.raw.target = (unchar)p3; cmd_ptr->u.raw.lun = (unchar)(p3 >> 8); } } else if (service == SCREENSERVICE) { if (opcode == GDT_REALTIME) { *(ulong32 *)&cmd_ptr->u.screen.su.data[0] = p1; *(ulong32 *)&cmd_ptr->u.screen.su.data[4] = (ulong32)p2; *(ulong32 *)&cmd_ptr->u.screen.su.data[8] = (ulong32)p3; } } ha->cmd_len = sizeof(gdth_cmd_str); ha->cmd_offs_dpmem = 0; ha->cmd_cnt = 0; gdth_copy_command(ha); gdth_release_event(ha); gdth_delay(20); if (!gdth_wait(ha, index, INIT_TIMEOUT)) { printk("GDT: Initialization error (timeout service %d)\n",service); return 0; } if (ha->status != S_BSY || --retries == 0) break; gdth_delay(1); } return (ha->status != S_OK ? 0:1); } /* search for devices */ static int __devinit gdth_search_drives(gdth_ha_str *ha) { ushort cdev_cnt, i; int ok; ulong32 bus_no, drv_cnt, drv_no, j; gdth_getch_str *chn; gdth_drlist_str *drl; gdth_iochan_str *ioc; gdth_raw_iochan_str *iocr; gdth_arcdl_str *alst; gdth_alist_str *alst2; gdth_oem_str_ioctl *oemstr; #ifdef INT_COAL gdth_perf_modes *pmod; #endif #ifdef GDTH_RTC unchar rtc[12]; ulong flags; #endif TRACE(("gdth_search_drives() hanum %d\n", ha->hanum)); ok = 0; /* initialize controller services, at first: screen service */ ha->screen_feat = 0; if (!force_dma32) { ok = gdth_internal_cmd(ha, SCREENSERVICE, GDT_X_INIT_SCR, 0, 0, 0); if (ok) ha->screen_feat = GDT_64BIT; } if (force_dma32 || (!ok && ha->status == (ushort)S_NOFUNC)) ok = gdth_internal_cmd(ha, SCREENSERVICE, GDT_INIT, 0, 0, 0); if (!ok) { printk("GDT-HA %d: Initialization error screen service (code %d)\n", ha->hanum, ha->status); return 0; } TRACE2(("gdth_search_drives(): SCREENSERVICE initialized\n")); #ifdef GDTH_RTC /* read realtime clock info, send to controller */ /* 1. wait for the falling edge of update flag */ spin_lock_irqsave(&rtc_lock, flags); for (j = 0; j < 1000000; ++j) if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) break; for (j = 0; j < 1000000; ++j) if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)) break; /* 2. read info */ do { for (j = 0; j < 12; ++j) rtc[j] = CMOS_READ(j); } while (rtc[0] != CMOS_READ(0)); spin_unlock_irqrestore(&rtc_lock, flags); TRACE2(("gdth_search_drives(): RTC: %x/%x/%x\n",*(ulong32 *)&rtc[0], *(ulong32 *)&rtc[4], *(ulong32 *)&rtc[8])); /* 3. send to controller firmware */ gdth_internal_cmd(ha, SCREENSERVICE, GDT_REALTIME, *(ulong32 *)&rtc[0], *(ulong32 *)&rtc[4], *(ulong32 *)&rtc[8]); #endif /* unfreeze all IOs */ gdth_internal_cmd(ha, CACHESERVICE, GDT_UNFREEZE_IO, 0, 0, 0); /* initialize cache service */ ha->cache_feat = 0; if (!force_dma32) { ok = gdth_internal_cmd(ha, CACHESERVICE, GDT_X_INIT_HOST, LINUX_OS, 0, 0); if (ok) ha->cache_feat = GDT_64BIT; } if (force_dma32 || (!ok && ha->status == (ushort)S_NOFUNC)) ok = gdth_internal_cmd(ha, CACHESERVICE, GDT_INIT, LINUX_OS, 0, 0); if (!ok) { printk("GDT-HA %d: Initialization error cache service (code %d)\n", ha->hanum, ha->status); return 0; } TRACE2(("gdth_search_drives(): CACHESERVICE initialized\n")); cdev_cnt = (ushort)ha->info; ha->fw_vers = ha->service; #ifdef INT_COAL if (ha->type == GDT_PCIMPR) { /* set perf. modes */ pmod = (gdth_perf_modes *)ha->pscratch; pmod->version = 1; pmod->st_mode = 1; /* enable one status buffer */ *((ulong64 *)&pmod->st_buff_addr1) = ha->coal_stat_phys; pmod->st_buff_indx1 = COALINDEX; pmod->st_buff_addr2 = 0; pmod->st_buff_u_addr2 = 0; pmod->st_buff_indx2 = 0; pmod->st_buff_size = sizeof(gdth_coal_status) * MAXOFFSETS; pmod->cmd_mode = 0; // disable all cmd buffers pmod->cmd_buff_addr1 = 0; pmod->cmd_buff_u_addr1 = 0; pmod->cmd_buff_indx1 = 0; pmod->cmd_buff_addr2 = 0; pmod->cmd_buff_u_addr2 = 0; pmod->cmd_buff_indx2 = 0; pmod->cmd_buff_size = 0; pmod->reserved1 = 0; pmod->reserved2 = 0; if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, SET_PERF_MODES, INVALID_CHANNEL,sizeof(gdth_perf_modes))) { printk("GDT-HA %d: Interrupt coalescing activated\n", ha->hanum); } } #endif /* detect number of buses - try new IOCTL */ iocr = (gdth_raw_iochan_str *)ha->pscratch; iocr->hdr.version = 0xffffffff; iocr->hdr.list_entries = MAXBUS; iocr->hdr.first_chan = 0; iocr->hdr.last_chan = MAXBUS-1; iocr->hdr.list_offset = GDTOFFSOF(gdth_raw_iochan_str, list[0]); if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, IOCHAN_RAW_DESC, INVALID_CHANNEL,sizeof(gdth_raw_iochan_str))) { TRACE2(("IOCHAN_RAW_DESC supported!\n")); ha->bus_cnt = iocr->hdr.chan_count; for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) { if (iocr->list[bus_no].proc_id < MAXID) ha->bus_id[bus_no] = iocr->list[bus_no].proc_id; else ha->bus_id[bus_no] = 0xff; } } else { /* old method */ chn = (gdth_getch_str *)ha->pscratch; for (bus_no = 0; bus_no < MAXBUS; ++bus_no) { chn->channel_no = bus_no; if (!gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, SCSI_CHAN_CNT | L_CTRL_PATTERN, IO_CHANNEL | INVALID_CHANNEL, sizeof(gdth_getch_str))) { if (bus_no == 0) { printk("GDT-HA %d: Error detecting channel count (0x%x)\n", ha->hanum, ha->status); return 0; } break; } if (chn->siop_id < MAXID) ha->bus_id[bus_no] = chn->siop_id; else ha->bus_id[bus_no] = 0xff; } ha->bus_cnt = (unchar)bus_no; } TRACE2(("gdth_search_drives() %d channels\n",ha->bus_cnt)); /* read cache configuration */ if (!gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_INFO, INVALID_CHANNEL,sizeof(gdth_cinfo_str))) { printk("GDT-HA %d: Initialization error cache service (code %d)\n", ha->hanum, ha->status); return 0; } ha->cpar = ((gdth_cinfo_str *)ha->pscratch)->cpar; TRACE2(("gdth_search_drives() cinfo: vs %x sta %d str %d dw %d b %d\n", ha->cpar.version,ha->cpar.state,ha->cpar.strategy, ha->cpar.write_back,ha->cpar.block_size)); /* read board info and features */ ha->more_proc = FALSE; if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, BOARD_INFO, INVALID_CHANNEL,sizeof(gdth_binfo_str))) { memcpy(&ha->binfo, (gdth_binfo_str *)ha->pscratch, sizeof(gdth_binfo_str)); if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, BOARD_FEATURES, INVALID_CHANNEL,sizeof(gdth_bfeat_str))) { TRACE2(("BOARD_INFO/BOARD_FEATURES supported\n")); ha->bfeat = *(gdth_bfeat_str *)ha->pscratch; ha->more_proc = TRUE; } } else { TRACE2(("BOARD_INFO requires firmware >= 1.10/2.08\n")); strcpy(ha->binfo.type_string, gdth_ctr_name(ha)); } TRACE2(("Controller name: %s\n",ha->binfo.type_string)); /* read more informations */ if (ha->more_proc) { /* physical drives, channel addresses */ ioc = (gdth_iochan_str *)ha->pscratch; ioc->hdr.version = 0xffffffff; ioc->hdr.list_entries = MAXBUS; ioc->hdr.first_chan = 0; ioc->hdr.last_chan = MAXBUS-1; ioc->hdr.list_offset = GDTOFFSOF(gdth_iochan_str, list[0]); if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, IOCHAN_DESC, INVALID_CHANNEL,sizeof(gdth_iochan_str))) { for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) { ha->raw[bus_no].address = ioc->list[bus_no].address; ha->raw[bus_no].local_no = ioc->list[bus_no].local_no; } } else { for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) { ha->raw[bus_no].address = IO_CHANNEL; ha->raw[bus_no].local_no = bus_no; } } for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) { chn = (gdth_getch_str *)ha->pscratch; chn->channel_no = ha->raw[bus_no].local_no; if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, SCSI_CHAN_CNT | L_CTRL_PATTERN, ha->raw[bus_no].address | INVALID_CHANNEL, sizeof(gdth_getch_str))) { ha->raw[bus_no].pdev_cnt = chn->drive_cnt; TRACE2(("Channel %d: %d phys. drives\n", bus_no,chn->drive_cnt)); } if (ha->raw[bus_no].pdev_cnt > 0) { drl = (gdth_drlist_str *)ha->pscratch; drl->sc_no = ha->raw[bus_no].local_no; drl->sc_cnt = ha->raw[bus_no].pdev_cnt; if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, SCSI_DR_LIST | L_CTRL_PATTERN, ha->raw[bus_no].address | INVALID_CHANNEL, sizeof(gdth_drlist_str))) { for (j = 0; j < ha->raw[bus_no].pdev_cnt; ++j) ha->raw[bus_no].id_list[j] = drl->sc_list[j]; } else { ha->raw[bus_no].pdev_cnt = 0; } } } /* logical drives */ if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_CNT, INVALID_CHANNEL,sizeof(ulong32))) { drv_cnt = *(ulong32 *)ha->pscratch; if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_LIST, INVALID_CHANNEL,drv_cnt * sizeof(ulong32))) { for (j = 0; j < drv_cnt; ++j) { drv_no = ((ulong32 *)ha->pscratch)[j]; if (drv_no < MAX_LDRIVES) { ha->hdr[drv_no].is_logdrv = TRUE; TRACE2(("Drive %d is log. drive\n",drv_no)); } } } alst = (gdth_arcdl_str *)ha->pscratch; alst->entries_avail = MAX_LDRIVES; alst->first_entry = 0; alst->list_offset = GDTOFFSOF(gdth_arcdl_str, list[0]); if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, ARRAY_DRV_LIST2 | LA_CTRL_PATTERN, INVALID_CHANNEL, sizeof(gdth_arcdl_str) + (alst->entries_avail-1) * sizeof(gdth_alist_str))) { for (j = 0; j < alst->entries_init; ++j) { ha->hdr[j].is_arraydrv = alst->list[j].is_arrayd; ha->hdr[j].is_master = alst->list[j].is_master; ha->hdr[j].is_parity = alst->list[j].is_parity; ha->hdr[j].is_hotfix = alst->list[j].is_hotfix; ha->hdr[j].master_no = alst->list[j].cd_handle; } } else if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, ARRAY_DRV_LIST | LA_CTRL_PATTERN, 0, 35 * sizeof(gdth_alist_str))) { for (j = 0; j < 35; ++j) { alst2 = &((gdth_alist_str *)ha->pscratch)[j]; ha->hdr[j].is_arraydrv = alst2->is_arrayd; ha->hdr[j].is_master = alst2->is_master; ha->hdr[j].is_parity = alst2->is_parity; ha->hdr[j].is_hotfix = alst2->is_hotfix; ha->hdr[j].master_no = alst2->cd_handle; } } } } /* initialize raw service */ ha->raw_feat = 0; if (!force_dma32) { ok = gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_X_INIT_RAW, 0, 0, 0); if (ok) ha->raw_feat = GDT_64BIT; } if (force_dma32 || (!ok && ha->status == (ushort)S_NOFUNC)) ok = gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_INIT, 0, 0, 0); if (!ok) { printk("GDT-HA %d: Initialization error raw service (code %d)\n", ha->hanum, ha->status); return 0; } TRACE2(("gdth_search_drives(): RAWSERVICE initialized\n")); /* set/get features raw service (scatter/gather) */ if (gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_SET_FEAT, SCATTER_GATHER, 0, 0)) { TRACE2(("gdth_search_drives(): set features RAWSERVICE OK\n")); if (gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_GET_FEAT, 0, 0, 0)) { TRACE2(("gdth_search_dr(): get feat RAWSERVICE %d\n", ha->info)); ha->raw_feat |= (ushort)ha->info; } } /* set/get features cache service (equal to raw service) */ if (gdth_internal_cmd(ha, CACHESERVICE, GDT_SET_FEAT, 0, SCATTER_GATHER,0)) { TRACE2(("gdth_search_drives(): set features CACHESERVICE OK\n")); if (gdth_internal_cmd(ha, CACHESERVICE, GDT_GET_FEAT, 0, 0, 0)) { TRACE2(("gdth_search_dr(): get feat CACHESERV. %d\n", ha->info)); ha->cache_feat |= (ushort)ha->info; } } /* reserve drives for raw service */ if (reserve_mode != 0) { gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_RESERVE_ALL, reserve_mode == 1 ? 1 : 3, 0, 0); TRACE2(("gdth_search_drives(): RESERVE_ALL code %d\n", ha->status)); } for (i = 0; i < MAX_RES_ARGS; i += 4) { if (reserve_list[i] == ha->hanum && reserve_list[i+1] < ha->bus_cnt && reserve_list[i+2] < ha->tid_cnt && reserve_list[i+3] < MAXLUN) { TRACE2(("gdth_search_drives(): reserve ha %d bus %d id %d lun %d\n", reserve_list[i], reserve_list[i+1], reserve_list[i+2], reserve_list[i+3])); if (!gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_RESERVE, 0, reserve_list[i+1], reserve_list[i+2] | (reserve_list[i+3] << 8))) { printk("GDT-HA %d: Error raw service (RESERVE, code %d)\n", ha->hanum, ha->status); } } } /* Determine OEM string using IOCTL */ oemstr = (gdth_oem_str_ioctl *)ha->pscratch; oemstr->params.ctl_version = 0x01; oemstr->params.buffer_size = sizeof(oemstr->text); if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_READ_OEM_STRING_RECORD,INVALID_CHANNEL, sizeof(gdth_oem_str_ioctl))) { TRACE2(("gdth_search_drives(): CACHE_READ_OEM_STRING_RECORD OK\n")); printk("GDT-HA %d: Vendor: %s Name: %s\n", ha->hanum, oemstr->text.oem_company_name, ha->binfo.type_string); /* Save the Host Drive inquiry data */ strlcpy(ha->oem_name,oemstr->text.scsi_host_drive_inquiry_vendor_id, sizeof(ha->oem_name)); } else { /* Old method, based on PCI ID */ TRACE2(("gdth_search_drives(): CACHE_READ_OEM_STRING_RECORD failed\n")); printk("GDT-HA %d: Name: %s\n", ha->hanum, ha->binfo.type_string); if (ha->oem_id == OEM_ID_INTEL) strlcpy(ha->oem_name,"Intel ", sizeof(ha->oem_name)); else strlcpy(ha->oem_name,"ICP ", sizeof(ha->oem_name)); } /* scanning for host drives */ for (i = 0; i < cdev_cnt; ++i) gdth_analyse_hdrive(ha, i); TRACE(("gdth_search_drives() OK\n")); return 1; } static int gdth_analyse_hdrive(gdth_ha_str *ha, ushort hdrive) { ulong32 drv_cyls; int drv_hds, drv_secs; TRACE(("gdth_analyse_hdrive() hanum %d drive %d\n", ha->hanum, hdrive)); if (hdrive >= MAX_HDRIVES) return 0; if (!gdth_internal_cmd(ha, CACHESERVICE, GDT_INFO, hdrive, 0, 0)) return 0; ha->hdr[hdrive].present = TRUE; ha->hdr[hdrive].size = ha->info; /* evaluate mapping (sectors per head, heads per cylinder) */ ha->hdr[hdrive].size &= ~SECS32; if (ha->info2 == 0) { gdth_eval_mapping(ha->hdr[hdrive].size,&drv_cyls,&drv_hds,&drv_secs); } else { drv_hds = ha->info2 & 0xff; drv_secs = (ha->info2 >> 8) & 0xff; drv_cyls = (ulong32)ha->hdr[hdrive].size / drv_hds / drv_secs; } ha->hdr[hdrive].heads = (unchar)drv_hds; ha->hdr[hdrive].secs = (unchar)drv_secs; /* round size */ ha->hdr[hdrive].size = drv_cyls * drv_hds * drv_secs; if (ha->cache_feat & GDT_64BIT) { if (gdth_internal_cmd(ha, CACHESERVICE, GDT_X_INFO, hdrive, 0, 0) && ha->info2 != 0) { ha->hdr[hdrive].size = ((ulong64)ha->info2 << 32) | ha->info; } } TRACE2(("gdth_search_dr() cdr. %d size %d hds %d scs %d\n", hdrive,ha->hdr[hdrive].size,drv_hds,drv_secs)); /* get informations about device */ if (gdth_internal_cmd(ha, CACHESERVICE, GDT_DEVTYPE, hdrive, 0, 0)) { TRACE2(("gdth_search_dr() cache drive %d devtype %d\n", hdrive,ha->info)); ha->hdr[hdrive].devtype = (ushort)ha->info; } /* cluster info */ if (gdth_internal_cmd(ha, CACHESERVICE, GDT_CLUST_INFO, hdrive, 0, 0)) { TRACE2(("gdth_search_dr() cache drive %d cluster info %d\n", hdrive,ha->info)); if (!shared_access) ha->hdr[hdrive].cluster_type = (unchar)ha->info; } /* R/W attributes */ if (gdth_internal_cmd(ha, CACHESERVICE, GDT_RW_ATTRIBS, hdrive, 0, 0)) { TRACE2(("gdth_search_dr() cache drive %d r/w attrib. %d\n", hdrive,ha->info)); ha->hdr[hdrive].rw_attribs = (unchar)ha->info; } return 1; } /* command queueing/sending functions */ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority) { struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); register Scsi_Cmnd *pscp; register Scsi_Cmnd *nscp; ulong flags; TRACE(("gdth_putq() priority %d\n",priority)); spin_lock_irqsave(&ha->smp_lock, flags); if (!cmndinfo->internal_command) cmndinfo->priority = priority; if (ha->req_first==NULL) { ha->req_first = scp; /* queue was empty */ scp->SCp.ptr = NULL; } else { /* queue not empty */ pscp = ha->req_first; nscp = (Scsi_Cmnd *)pscp->SCp.ptr; /* priority: 0-highest,..,0xff-lowest */ while (nscp && gdth_cmnd_priv(nscp)->priority <= priority) { pscp = nscp; nscp = (Scsi_Cmnd *)pscp->SCp.ptr; } pscp->SCp.ptr = (char *)scp; scp->SCp.ptr = (char *)nscp; } spin_unlock_irqrestore(&ha->smp_lock, flags); #ifdef GDTH_STATISTICS flags = 0; for (nscp=ha->req_first; nscp; nscp=(Scsi_Cmnd*)nscp->SCp.ptr) ++flags; if (max_rq < flags) { max_rq = flags; TRACE3(("GDT: max_rq = %d\n",(ushort)max_rq)); } #endif } static void gdth_next(gdth_ha_str *ha) { register Scsi_Cmnd *pscp; register Scsi_Cmnd *nscp; unchar b, t, l, firsttime; unchar this_cmd, next_cmd; ulong flags = 0; int cmd_index; TRACE(("gdth_next() hanum %d\n", ha->hanum)); if (!gdth_polling) spin_lock_irqsave(&ha->smp_lock, flags); ha->cmd_cnt = ha->cmd_offs_dpmem = 0; this_cmd = firsttime = TRUE; next_cmd = gdth_polling ? FALSE:TRUE; cmd_index = 0; for (nscp = pscp = ha->req_first; nscp; nscp = (Scsi_Cmnd *)nscp->SCp.ptr) { struct gdth_cmndinfo *nscp_cmndinfo = gdth_cmnd_priv(nscp); if (nscp != pscp && nscp != (Scsi_Cmnd *)pscp->SCp.ptr) pscp = (Scsi_Cmnd *)pscp->SCp.ptr; if (!nscp_cmndinfo->internal_command) { b = nscp->device->channel; t = nscp->device->id; l = nscp->device->lun; if (nscp_cmndinfo->priority >= DEFAULT_PRI) { if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha,b)].lock) || (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock)) continue; } } else b = t = l = 0; if (firsttime) { if (gdth_test_busy(ha)) { /* controller busy ? */ TRACE(("gdth_next() controller %d busy !\n", ha->hanum)); if (!gdth_polling) { spin_unlock_irqrestore(&ha->smp_lock, flags); return; } while (gdth_test_busy(ha)) gdth_delay(1); } firsttime = FALSE; } if (!nscp_cmndinfo->internal_command) { if (nscp_cmndinfo->phase == -1) { nscp_cmndinfo->phase = CACHESERVICE; /* default: cache svc. */ if (nscp->cmnd[0] == TEST_UNIT_READY) { TRACE2(("TEST_UNIT_READY Bus %d Id %d LUN %d\n", b, t, l)); /* TEST_UNIT_READY -> set scan mode */ if ((ha->scan_mode & 0x0f) == 0) { if (b == 0 && t == 0 && l == 0) { ha->scan_mode |= 1; TRACE2(("Scan mode: 0x%x\n", ha->scan_mode)); } } else if ((ha->scan_mode & 0x0f) == 1) { if (b == 0 && ((t == 0 && l == 1) || (t == 1 && l == 0))) { nscp_cmndinfo->OpCode = GDT_SCAN_START; nscp_cmndinfo->phase = ((ha->scan_mode & 0x10 ? 1:0) << 8) | SCSIRAWSERVICE; ha->scan_mode = 0x12; TRACE2(("Scan mode: 0x%x (SCAN_START)\n", ha->scan_mode)); } else { ha->scan_mode &= 0x10; TRACE2(("Scan mode: 0x%x\n", ha->scan_mode)); } } else if (ha->scan_mode == 0x12) { if (b == ha->bus_cnt && t == ha->tid_cnt-1) { nscp_cmndinfo->phase = SCSIRAWSERVICE; nscp_cmndinfo->OpCode = GDT_SCAN_END; ha->scan_mode &= 0x10; TRACE2(("Scan mode: 0x%x (SCAN_END)\n", ha->scan_mode)); } } } if (b == ha->virt_bus && nscp->cmnd[0] != INQUIRY && nscp->cmnd[0] != READ_CAPACITY && nscp->cmnd[0] != MODE_SENSE && (ha->hdr[t].cluster_type & CLUSTER_DRIVE)) { /* always GDT_CLUST_INFO! */ nscp_cmndinfo->OpCode = GDT_CLUST_INFO; } } } if (nscp_cmndinfo->OpCode != -1) { if ((nscp_cmndinfo->phase & 0xff) == CACHESERVICE) { if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t))) this_cmd = FALSE; next_cmd = FALSE; } else if ((nscp_cmndinfo->phase & 0xff) == SCSIRAWSERVICE) { if (!(cmd_index=gdth_fill_raw_cmd(ha, nscp, BUS_L2P(ha, b)))) this_cmd = FALSE; next_cmd = FALSE; } else { memset((char*)nscp->sense_buffer,0,16); nscp->sense_buffer[0] = 0x70; nscp->sense_buffer[2] = NOT_READY; nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1); if (!nscp_cmndinfo->wait_for_completion) nscp_cmndinfo->wait_for_completion++; else gdth_scsi_done(nscp); } } else if (gdth_cmnd_priv(nscp)->internal_command) { if (!(cmd_index=gdth_special_cmd(ha, nscp))) this_cmd = FALSE; next_cmd = FALSE; } else if (b != ha->virt_bus) { if (ha->raw[BUS_L2P(ha,b)].io_cnt[t] >= GDTH_MAX_RAW || !(cmd_index=gdth_fill_raw_cmd(ha, nscp, BUS_L2P(ha, b)))) this_cmd = FALSE; else ha->raw[BUS_L2P(ha,b)].io_cnt[t]++; } else if (t >= MAX_HDRIVES || !ha->hdr[t].present || l != 0) { TRACE2(("Command 0x%x to bus %d id %d lun %d -> IGNORE\n", nscp->cmnd[0], b, t, l)); nscp->result = DID_BAD_TARGET << 16; if (!nscp_cmndinfo->wait_for_completion) nscp_cmndinfo->wait_for_completion++; else gdth_scsi_done(nscp); } else { switch (nscp->cmnd[0]) { case TEST_UNIT_READY: case INQUIRY: case REQUEST_SENSE: case READ_CAPACITY: case VERIFY: case START_STOP: case MODE_SENSE: case SERVICE_ACTION_IN: TRACE(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0], nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3], nscp->cmnd[4],nscp->cmnd[5])); if (ha->hdr[t].media_changed && nscp->cmnd[0] != INQUIRY) { /* return UNIT_ATTENTION */ TRACE2(("cmd 0x%x target %d: UNIT_ATTENTION\n", nscp->cmnd[0], t)); ha->hdr[t].media_changed = FALSE; memset((char*)nscp->sense_buffer,0,16); nscp->sense_buffer[0] = 0x70; nscp->sense_buffer[2] = UNIT_ATTENTION; nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1); if (!nscp_cmndinfo->wait_for_completion) nscp_cmndinfo->wait_for_completion++; else gdth_scsi_done(nscp); } else if (gdth_internal_cache_cmd(ha, nscp)) gdth_scsi_done(nscp); break; case ALLOW_MEDIUM_REMOVAL: TRACE(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0], nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3], nscp->cmnd[4],nscp->cmnd[5])); if ( (nscp->cmnd[4]&1) && !(ha->hdr[t].devtype&1) ) { TRACE(("Prevent r. nonremov. drive->do nothing\n")); nscp->result = DID_OK << 16; nscp->sense_buffer[0] = 0; if (!nscp_cmndinfo->wait_for_completion) nscp_cmndinfo->wait_for_completion++; else gdth_scsi_done(nscp); } else { nscp->cmnd[3] = (ha->hdr[t].devtype&1) ? 1:0; TRACE(("Prevent/allow r. %d rem. drive %d\n", nscp->cmnd[4],nscp->cmnd[3])); if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t))) this_cmd = FALSE; } break; case RESERVE: case RELEASE: TRACE2(("cache cmd %s\n",nscp->cmnd[0] == RESERVE ? "RESERVE" : "RELEASE")); if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t))) this_cmd = FALSE; break; case READ_6: case WRITE_6: case READ_10: case WRITE_10: case READ_16: case WRITE_16: if (ha->hdr[t].media_changed) { /* return UNIT_ATTENTION */ TRACE2(("cmd 0x%x target %d: UNIT_ATTENTION\n", nscp->cmnd[0], t)); ha->hdr[t].media_changed = FALSE; memset((char*)nscp->sense_buffer,0,16); nscp->sense_buffer[0] = 0x70; nscp->sense_buffer[2] = UNIT_ATTENTION; nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1); if (!nscp_cmndinfo->wait_for_completion) nscp_cmndinfo->wait_for_completion++; else gdth_scsi_done(nscp); } else if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t))) this_cmd = FALSE; break; default: TRACE2(("cache cmd %x/%x/%x/%x/%x/%x unknown\n",nscp->cmnd[0], nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3], nscp->cmnd[4],nscp->cmnd[5])); printk("GDT-HA %d: Unknown SCSI command 0x%x to cache service !\n", ha->hanum, nscp->cmnd[0]); nscp->result = DID_ABORT << 16; if (!nscp_cmndinfo->wait_for_completion) nscp_cmndinfo->wait_for_completion++; else gdth_scsi_done(nscp); break; } } if (!this_cmd) break; if (nscp == ha->req_first) ha->req_first = pscp = (Scsi_Cmnd *)nscp->SCp.ptr; else pscp->SCp.ptr = nscp->SCp.ptr; if (!next_cmd) break; } if (ha->cmd_cnt > 0) { gdth_release_event(ha); } if (!gdth_polling) spin_unlock_irqrestore(&ha->smp_lock, flags); if (gdth_polling && ha->cmd_cnt > 0) { if (!gdth_wait(ha, cmd_index, POLL_TIMEOUT)) printk("GDT-HA %d: Command %d timed out !\n", ha->hanum, cmd_index); } } /* * gdth_copy_internal_data() - copy to/from a buffer onto a scsi_cmnd's * buffers, kmap_atomic() as needed. */ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, char *buffer, ushort count) { ushort cpcount,i, max_sg = scsi_sg_count(scp); ushort cpsum,cpnow; struct scatterlist *sl; char *address; cpcount = min_t(ushort, count, scsi_bufflen(scp)); if (cpcount) { cpsum=0; scsi_for_each_sg(scp, sl, max_sg, i) { unsigned long flags; cpnow = (ushort)sl->length; TRACE(("copy_internal() now %d sum %d count %d %d\n", cpnow, cpsum, cpcount, scsi_bufflen(scp))); if (cpsum+cpnow > cpcount) cpnow = cpcount - cpsum; cpsum += cpnow; if (!sg_page(sl)) { printk("GDT-HA %d: invalid sc/gt element in gdth_copy_internal_data()\n", ha->hanum); return; } local_irq_save(flags); address = kmap_atomic(sg_page(sl), KM_BIO_SRC_IRQ) + sl->offset; memcpy(address, buffer, cpnow); flush_dcache_page(sg_page(sl)); kunmap_atomic(address, KM_BIO_SRC_IRQ); local_irq_restore(flags); if (cpsum == cpcount) break; buffer += cpnow; } } else if (count) { printk("GDT-HA %d: SCSI command with no buffers but data transfer expected!\n", ha->hanum); WARN_ON(1); } } static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) { unchar t; gdth_inq_data inq; gdth_rdcap_data rdc; gdth_sense_data sd; gdth_modep_data mpd; struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); t = scp->device->id; TRACE(("gdth_internal_cache_cmd() cmd 0x%x hdrive %d\n", scp->cmnd[0],t)); scp->result = DID_OK << 16; scp->sense_buffer[0] = 0; switch (scp->cmnd[0]) { case TEST_UNIT_READY: case VERIFY: case START_STOP: TRACE2(("Test/Verify/Start hdrive %d\n",t)); break; case INQUIRY: TRACE2(("Inquiry hdrive %d devtype %d\n", t,ha->hdr[t].devtype)); inq.type_qual = (ha->hdr[t].devtype&4) ? TYPE_ROM:TYPE_DISK; /* you can here set all disks to removable, if you want to do a flush using the ALLOW_MEDIUM_REMOVAL command */ inq.modif_rmb = 0x00; if ((ha->hdr[t].devtype & 1) || (ha->hdr[t].cluster_type & CLUSTER_DRIVE)) inq.modif_rmb = 0x80; inq.version = 2; inq.resp_aenc = 2; inq.add_length= 32; strcpy(inq.vendor,ha->oem_name); sprintf(inq.product,"Host Drive #%02d",t); strcpy(inq.revision," "); gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data)); break; case REQUEST_SENSE: TRACE2(("Request sense hdrive %d\n",t)); sd.errorcode = 0x70; sd.segno = 0x00; sd.key = NO_SENSE; sd.info = 0; sd.add_length= 0; gdth_copy_internal_data(ha, scp, (char*)&sd, sizeof(gdth_sense_data)); break; case MODE_SENSE: TRACE2(("Mode sense hdrive %d\n",t)); memset((char*)&mpd,0,sizeof(gdth_modep_data)); mpd.hd.data_length = sizeof(gdth_modep_data); mpd.hd.dev_par = (ha->hdr[t].devtype&2) ? 0x80:0; mpd.hd.bd_length = sizeof(mpd.bd); mpd.bd.block_length[0] = (SECTOR_SIZE & 0x00ff0000) >> 16; mpd.bd.block_length[1] = (SECTOR_SIZE & 0x0000ff00) >> 8; mpd.bd.block_length[2] = (SECTOR_SIZE & 0x000000ff); gdth_copy_internal_data(ha, scp, (char*)&mpd, sizeof(gdth_modep_data)); break; case READ_CAPACITY: TRACE2(("Read capacity hdrive %d\n",t)); if (ha->hdr[t].size > (ulong64)0xffffffff) rdc.last_block_no = 0xffffffff; else rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1); rdc.block_length = cpu_to_be32(SECTOR_SIZE); gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data)); break; case SERVICE_ACTION_IN: if ((scp->cmnd[1] & 0x1f) == SAI_READ_CAPACITY_16 && (ha->cache_feat & GDT_64BIT)) { gdth_rdcap16_data rdc16; TRACE2(("Read capacity (16) hdrive %d\n",t)); rdc16.last_block_no = cpu_to_be64(ha->hdr[t].size-1); rdc16.block_length = cpu_to_be32(SECTOR_SIZE); gdth_copy_internal_data(ha, scp, (char*)&rdc16, sizeof(gdth_rdcap16_data)); } else { scp->result = DID_ABORT << 16; } break; default: TRACE2(("Internal cache cmd 0x%x unknown\n",scp->cmnd[0])); break; } if (!cmndinfo->wait_for_completion) cmndinfo->wait_for_completion++; else return 1; return 0; } static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive) { register gdth_cmd_str *cmdp; struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); ulong32 cnt, blockcnt; ulong64 no, blockno; int i, cmd_index, read_write, sgcnt, mode64; cmdp = ha->pccb; TRACE(("gdth_fill_cache_cmd() cmd 0x%x cmdsize %d hdrive %d\n", scp->cmnd[0],scp->cmd_len,hdrive)); if (ha->type==GDT_EISA && ha->cmd_cnt>0) return 0; mode64 = (ha->cache_feat & GDT_64BIT) ? TRUE : FALSE; /* test for READ_16, WRITE_16 if !mode64 ? --- not required, should not occur due to error return on READ_CAPACITY_16 */ cmdp->Service = CACHESERVICE; cmdp->RequestBuffer = scp; /* search free command index */ if (!(cmd_index=gdth_get_cmd_index(ha))) { TRACE(("GDT: No free command index found\n")); return 0; } /* if it's the first command, set command semaphore */ if (ha->cmd_cnt == 0) gdth_set_sema0(ha); /* fill command */ read_write = 0; if (cmndinfo->OpCode != -1) cmdp->OpCode = cmndinfo->OpCode; /* special cache cmd. */ else if (scp->cmnd[0] == RESERVE) cmdp->OpCode = GDT_RESERVE_DRV; else if (scp->cmnd[0] == RELEASE) cmdp->OpCode = GDT_RELEASE_DRV; else if (scp->cmnd[0] == ALLOW_MEDIUM_REMOVAL) { if (scp->cmnd[4] & 1) /* prevent ? */ cmdp->OpCode = GDT_MOUNT; else if (scp->cmnd[3] & 1) /* removable drive ? */ cmdp->OpCode = GDT_UNMOUNT; else cmdp->OpCode = GDT_FLUSH; } else if (scp->cmnd[0] == WRITE_6 || scp->cmnd[0] == WRITE_10 || scp->cmnd[0] == WRITE_12 || scp->cmnd[0] == WRITE_16 ) { read_write = 1; if (gdth_write_through || ((ha->hdr[hdrive].rw_attribs & 1) && (ha->cache_feat & GDT_WR_THROUGH))) cmdp->OpCode = GDT_WRITE_THR; else cmdp->OpCode = GDT_WRITE; } else { read_write = 2; cmdp->OpCode = GDT_READ; } cmdp->BoardNode = LOCALBOARD; if (mode64) { cmdp->u.cache64.DeviceNo = hdrive; cmdp->u.cache64.BlockNo = 1; cmdp->u.cache64.sg_canz = 0; } else { cmdp->u.cache.DeviceNo = hdrive; cmdp->u.cache.BlockNo = 1; cmdp->u.cache.sg_canz = 0; } if (read_write) { if (scp->cmd_len == 16) { memcpy(&no, &scp->cmnd[2], sizeof(ulong64)); blockno = be64_to_cpu(no); memcpy(&cnt, &scp->cmnd[10], sizeof(ulong32)); blockcnt = be32_to_cpu(cnt); } else if (scp->cmd_len == 10) { memcpy(&no, &scp->cmnd[2], sizeof(ulong32)); blockno = be32_to_cpu(no); memcpy(&cnt, &scp->cmnd[7], sizeof(ushort)); blockcnt = be16_to_cpu(cnt); } else { memcpy(&no, &scp->cmnd[0], sizeof(ulong32)); blockno = be32_to_cpu(no) & 0x001fffffUL; blockcnt= scp->cmnd[4]==0 ? 0x100 : scp->cmnd[4]; } if (mode64) { cmdp->u.cache64.BlockNo = blockno; cmdp->u.cache64.BlockCnt = blockcnt; } else { cmdp->u.cache.BlockNo = (ulong32)blockno; cmdp->u.cache.BlockCnt = blockcnt; } if (scsi_bufflen(scp)) { cmndinfo->dma_dir = (read_write == 1 ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); sgcnt = pci_map_sg(ha->pdev, scsi_sglist(scp), scsi_sg_count(scp), cmndinfo->dma_dir); if (mode64) { struct scatterlist *sl; cmdp->u.cache64.DestAddr= (ulong64)-1; cmdp->u.cache64.sg_canz = sgcnt; scsi_for_each_sg(scp, sl, sgcnt, i) { cmdp->u.cache64.sg_lst[i].sg_ptr = sg_dma_address(sl); #ifdef GDTH_DMA_STATISTICS if (cmdp->u.cache64.sg_lst[i].sg_ptr > (ulong64)0xffffffff) ha->dma64_cnt++; else ha->dma32_cnt++; #endif cmdp->u.cache64.sg_lst[i].sg_len = sg_dma_len(sl); } } else { struct scatterlist *sl; cmdp->u.cache.DestAddr= 0xffffffff; cmdp->u.cache.sg_canz = sgcnt; scsi_for_each_sg(scp, sl, sgcnt, i) { cmdp->u.cache.sg_lst[i].sg_ptr = sg_dma_address(sl); #ifdef GDTH_DMA_STATISTICS ha->dma32_cnt++; #endif cmdp->u.cache.sg_lst[i].sg_len = sg_dma_len(sl); } } #ifdef GDTH_STATISTICS if (max_sg < (ulong32)sgcnt) { max_sg = (ulong32)sgcnt; TRACE3(("GDT: max_sg = %d\n",max_sg)); } #endif } } /* evaluate command size, check space */ if (mode64) { TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n", cmdp->u.cache64.DestAddr,cmdp->u.cache64.sg_canz, cmdp->u.cache64.sg_lst[0].sg_ptr, cmdp->u.cache64.sg_lst[0].sg_len)); TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n", cmdp->OpCode,cmdp->u.cache64.BlockNo,cmdp->u.cache64.BlockCnt)); ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) + (ushort)cmdp->u.cache64.sg_canz * sizeof(gdth_sg64_str); } else { TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n", cmdp->u.cache.DestAddr,cmdp->u.cache.sg_canz, cmdp->u.cache.sg_lst[0].sg_ptr, cmdp->u.cache.sg_lst[0].sg_len)); TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n", cmdp->OpCode,cmdp->u.cache.BlockNo,cmdp->u.cache.BlockCnt)); ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) + (ushort)cmdp->u.cache.sg_canz * sizeof(gdth_sg_str); } if (ha->cmd_len & 3) ha->cmd_len += (4 - (ha->cmd_len & 3)); if (ha->cmd_cnt > 0) { if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) > ha->ic_all_size) { TRACE2(("gdth_fill_cache() DPMEM overflow\n")); ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND; return 0; } } /* copy command */ gdth_copy_command(ha); return cmd_index; } static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b) { register gdth_cmd_str *cmdp; ushort i; dma_addr_t sense_paddr; int cmd_index, sgcnt, mode64; unchar t,l; struct page *page; ulong offset; struct gdth_cmndinfo *cmndinfo; t = scp->device->id; l = scp->device->lun; cmdp = ha->pccb; TRACE(("gdth_fill_raw_cmd() cmd 0x%x bus %d ID %d LUN %d\n", scp->cmnd[0],b,t,l)); if (ha->type==GDT_EISA && ha->cmd_cnt>0) return 0; mode64 = (ha->raw_feat & GDT_64BIT) ? TRUE : FALSE; cmdp->Service = SCSIRAWSERVICE; cmdp->RequestBuffer = scp; /* search free command index */ if (!(cmd_index=gdth_get_cmd_index(ha))) { TRACE(("GDT: No free command index found\n")); return 0; } /* if it's the first command, set command semaphore */ if (ha->cmd_cnt == 0) gdth_set_sema0(ha); cmndinfo = gdth_cmnd_priv(scp); /* fill command */ if (cmndinfo->OpCode != -1) { cmdp->OpCode = cmndinfo->OpCode; /* special raw cmd. */ cmdp->BoardNode = LOCALBOARD; if (mode64) { cmdp->u.raw64.direction = (cmndinfo->phase >> 8); TRACE2(("special raw cmd 0x%x param 0x%x\n", cmdp->OpCode, cmdp->u.raw64.direction)); /* evaluate command size */ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst); } else { cmdp->u.raw.direction = (cmndinfo->phase >> 8); TRACE2(("special raw cmd 0x%x param 0x%x\n", cmdp->OpCode, cmdp->u.raw.direction)); /* evaluate command size */ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst); } } else { page = virt_to_page(scp->sense_buffer); offset = (ulong)scp->sense_buffer & ~PAGE_MASK; sense_paddr = pci_map_page(ha->pdev,page,offset, 16,PCI_DMA_FROMDEVICE); cmndinfo->sense_paddr = sense_paddr; cmdp->OpCode = GDT_WRITE; /* always */ cmdp->BoardNode = LOCALBOARD; if (mode64) { cmdp->u.raw64.reserved = 0; cmdp->u.raw64.mdisc_time = 0; cmdp->u.raw64.mcon_time = 0; cmdp->u.raw64.clen = scp->cmd_len; cmdp->u.raw64.target = t; cmdp->u.raw64.lun = l; cmdp->u.raw64.bus = b; cmdp->u.raw64.priority = 0; cmdp->u.raw64.sdlen = scsi_bufflen(scp); cmdp->u.raw64.sense_len = 16; cmdp->u.raw64.sense_data = sense_paddr; cmdp->u.raw64.direction = gdth_direction_tab[scp->cmnd[0]]==DOU ? GDTH_DATA_OUT:GDTH_DATA_IN; memcpy(cmdp->u.raw64.cmd,scp->cmnd,16); cmdp->u.raw64.sg_ranz = 0; } else { cmdp->u.raw.reserved = 0; cmdp->u.raw.mdisc_time = 0; cmdp->u.raw.mcon_time = 0; cmdp->u.raw.clen = scp->cmd_len; cmdp->u.raw.target = t; cmdp->u.raw.lun = l; cmdp->u.raw.bus = b; cmdp->u.raw.priority = 0; cmdp->u.raw.link_p = 0; cmdp->u.raw.sdlen = scsi_bufflen(scp); cmdp->u.raw.sense_len = 16; cmdp->u.raw.sense_data = sense_paddr; cmdp->u.raw.direction = gdth_direction_tab[scp->cmnd[0]]==DOU ? GDTH_DATA_OUT:GDTH_DATA_IN; memcpy(cmdp->u.raw.cmd,scp->cmnd,12); cmdp->u.raw.sg_ranz = 0; } if (scsi_bufflen(scp)) { cmndinfo->dma_dir = PCI_DMA_BIDIRECTIONAL; sgcnt = pci_map_sg(ha->pdev, scsi_sglist(scp), scsi_sg_count(scp), cmndinfo->dma_dir); if (mode64) { struct scatterlist *sl; cmdp->u.raw64.sdata = (ulong64)-1; cmdp->u.raw64.sg_ranz = sgcnt; scsi_for_each_sg(scp, sl, sgcnt, i) { cmdp->u.raw64.sg_lst[i].sg_ptr = sg_dma_address(sl); #ifdef GDTH_DMA_STATISTICS if (cmdp->u.raw64.sg_lst[i].sg_ptr > (ulong64)0xffffffff) ha->dma64_cnt++; else ha->dma32_cnt++; #endif cmdp->u.raw64.sg_lst[i].sg_len = sg_dma_len(sl); } } else { struct scatterlist *sl; cmdp->u.raw.sdata = 0xffffffff; cmdp->u.raw.sg_ranz = sgcnt; scsi_for_each_sg(scp, sl, sgcnt, i) { cmdp->u.raw.sg_lst[i].sg_ptr = sg_dma_address(sl); #ifdef GDTH_DMA_STATISTICS ha->dma32_cnt++; #endif cmdp->u.raw.sg_lst[i].sg_len = sg_dma_len(sl); } } #ifdef GDTH_STATISTICS if (max_sg < sgcnt) { max_sg = sgcnt; TRACE3(("GDT: max_sg = %d\n",sgcnt)); } #endif } if (mode64) { TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n", cmdp->u.raw64.sdata,cmdp->u.raw64.sg_ranz, cmdp->u.raw64.sg_lst[0].sg_ptr, cmdp->u.raw64.sg_lst[0].sg_len)); /* evaluate command size */ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) + (ushort)cmdp->u.raw64.sg_ranz * sizeof(gdth_sg64_str); } else { TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n", cmdp->u.raw.sdata,cmdp->u.raw.sg_ranz, cmdp->u.raw.sg_lst[0].sg_ptr, cmdp->u.raw.sg_lst[0].sg_len)); /* evaluate command size */ ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) + (ushort)cmdp->u.raw.sg_ranz * sizeof(gdth_sg_str); } } /* check space */ if (ha->cmd_len & 3) ha->cmd_len += (4 - (ha->cmd_len & 3)); if (ha->cmd_cnt > 0) { if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) > ha->ic_all_size) { TRACE2(("gdth_fill_raw() DPMEM overflow\n")); ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND; return 0; } } /* copy command */ gdth_copy_command(ha); return cmd_index; } static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) { register gdth_cmd_str *cmdp; struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); int cmd_index; cmdp= ha->pccb; TRACE2(("gdth_special_cmd(): ")); if (ha->type==GDT_EISA && ha->cmd_cnt>0) return 0; *cmdp = *cmndinfo->internal_cmd_str; cmdp->RequestBuffer = scp; /* search free command index */ if (!(cmd_index=gdth_get_cmd_index(ha))) { TRACE(("GDT: No free command index found\n")); return 0; } /* if it's the first command, set command semaphore */ if (ha->cmd_cnt == 0) gdth_set_sema0(ha); /* evaluate command size, check space */ if (cmdp->OpCode == GDT_IOCTL) { TRACE2(("IOCTL\n")); ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.ioctl.p_param) + sizeof(ulong64); } else if (cmdp->Service == CACHESERVICE) { TRACE2(("cache command %d\n",cmdp->OpCode)); if (ha->cache_feat & GDT_64BIT) ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) + sizeof(gdth_sg64_str); else ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) + sizeof(gdth_sg_str); } else if (cmdp->Service == SCSIRAWSERVICE) { TRACE2(("raw command %d\n",cmdp->OpCode)); if (ha->raw_feat & GDT_64BIT) ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) + sizeof(gdth_sg64_str); else ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) + sizeof(gdth_sg_str); } if (ha->cmd_len & 3) ha->cmd_len += (4 - (ha->cmd_len & 3)); if (ha->cmd_cnt > 0) { if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) > ha->ic_all_size) { TRACE2(("gdth_special_cmd() DPMEM overflow\n")); ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND; return 0; } } /* copy command */ gdth_copy_command(ha); return cmd_index; } /* Controller event handling functions */ static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, ushort source, ushort idx, gdth_evt_data *evt) { gdth_evt_str *e; struct timeval tv; /* no GDTH_LOCK_HA() ! */ TRACE2(("gdth_store_event() source %d idx %d\n", source, idx)); if (source == 0) /* no source -> no event */ return NULL; if (ebuffer[elastidx].event_source == source && ebuffer[elastidx].event_idx == idx && ((evt->size != 0 && ebuffer[elastidx].event_data.size != 0 && !memcmp((char *)&ebuffer[elastidx].event_data.eu, (char *)&evt->eu, evt->size)) || (evt->size == 0 && ebuffer[elastidx].event_data.size == 0 && !strcmp((char *)&ebuffer[elastidx].event_data.event_string, (char *)&evt->event_string)))) { e = &ebuffer[elastidx]; do_gettimeofday(&tv); e->last_stamp = tv.tv_sec; ++e->same_count; } else { if (ebuffer[elastidx].event_source != 0) { /* entry not free ? */ ++elastidx; if (elastidx == MAX_EVENTS) elastidx = 0; if (elastidx == eoldidx) { /* reached mark ? */ ++eoldidx; if (eoldidx == MAX_EVENTS) eoldidx = 0; } } e = &ebuffer[elastidx]; e->event_source = source; e->event_idx = idx; do_gettimeofday(&tv); e->first_stamp = e->last_stamp = tv.tv_sec; e->same_count = 1; e->event_data = *evt; e->application = 0; } return e; } static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr) { gdth_evt_str *e; int eindex; ulong flags; TRACE2(("gdth_read_event() handle %d\n", handle)); spin_lock_irqsave(&ha->smp_lock, flags); if (handle == -1) eindex = eoldidx; else eindex = handle; estr->event_source = 0; if (eindex < 0 || eindex >= MAX_EVENTS) { spin_unlock_irqrestore(&ha->smp_lock, flags); return eindex; } e = &ebuffer[eindex]; if (e->event_source != 0) { if (eindex != elastidx) { if (++eindex == MAX_EVENTS) eindex = 0; } else { eindex = -1; } memcpy(estr, e, sizeof(gdth_evt_str)); } spin_unlock_irqrestore(&ha->smp_lock, flags); return eindex; } static void gdth_readapp_event(gdth_ha_str *ha, unchar application, gdth_evt_str *estr) { gdth_evt_str *e; int eindex; ulong flags; unchar found = FALSE; TRACE2(("gdth_readapp_event() app. %d\n", application)); spin_lock_irqsave(&ha->smp_lock, flags); eindex = eoldidx; for (;;) { e = &ebuffer[eindex]; if (e->event_source == 0) break; if ((e->application & application) == 0) { e->application |= application; found = TRUE; break; } if (eindex == elastidx) break; if (++eindex == MAX_EVENTS) eindex = 0; } if (found) memcpy(estr, e, sizeof(gdth_evt_str)); else estr->event_source = 0; spin_unlock_irqrestore(&ha->smp_lock, flags); } static void gdth_clear_events(void) { TRACE(("gdth_clear_events()")); eoldidx = elastidx = 0; ebuffer[0].event_source = 0; } /* SCSI interface functions */ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, int gdth_from_wait, int* pIndex) { gdt6m_dpram_str __iomem *dp6m_ptr = NULL; gdt6_dpram_str __iomem *dp6_ptr; gdt2_dpram_str __iomem *dp2_ptr; Scsi_Cmnd *scp; int rval, i; unchar IStatus; ushort Service; ulong flags = 0; #ifdef INT_COAL int coalesced = FALSE; int next = FALSE; gdth_coal_status *pcs = NULL; int act_int_coal = 0; #endif TRACE(("gdth_interrupt() IRQ %d\n", ha->irq)); /* if polling and not from gdth_wait() -> return */ if (gdth_polling) { if (!gdth_from_wait) { return IRQ_HANDLED; } } if (!gdth_polling) spin_lock_irqsave(&ha->smp_lock, flags); /* search controller */ IStatus = gdth_get_status(ha); if (IStatus == 0) { /* spurious interrupt */ if (!gdth_polling) spin_unlock_irqrestore(&ha->smp_lock, flags); return IRQ_HANDLED; } #ifdef GDTH_STATISTICS ++act_ints; #endif #ifdef INT_COAL /* See if the fw is returning coalesced status */ if (IStatus == COALINDEX) { /* Coalesced status. Setup the initial status buffer pointer and flags */ pcs = ha->coal_stat; coalesced = TRUE; next = TRUE; } do { if (coalesced) { /* For coalesced requests all status information is found in the status buffer */ IStatus = (unchar)(pcs->status & 0xff); } #endif if (ha->type == GDT_EISA) { if (IStatus & 0x80) { /* error flag */ IStatus &= ~0x80; ha->status = inw(ha->bmic + MAILBOXREG+8); TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status)); } else /* no error */ ha->status = S_OK; ha->info = inl(ha->bmic + MAILBOXREG+12); ha->service = inw(ha->bmic + MAILBOXREG+10); ha->info2 = inl(ha->bmic + MAILBOXREG+4); outb(0xff, ha->bmic + EDOORREG); /* acknowledge interrupt */ outb(0x00, ha->bmic + SEMA1REG); /* reset status semaphore */ } else if (ha->type == GDT_ISA) { dp2_ptr = ha->brd; if (IStatus & 0x80) { /* error flag */ IStatus &= ~0x80; ha->status = readw(&dp2_ptr->u.ic.Status); TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status)); } else /* no error */ ha->status = S_OK; ha->info = readl(&dp2_ptr->u.ic.Info[0]); ha->service = readw(&dp2_ptr->u.ic.Service); ha->info2 = readl(&dp2_ptr->u.ic.Info[1]); writeb(0xff, &dp2_ptr->io.irqdel); /* acknowledge interrupt */ writeb(0, &dp2_ptr->u.ic.Cmd_Index);/* reset command index */ writeb(0, &dp2_ptr->io.Sema1); /* reset status semaphore */ } else if (ha->type == GDT_PCI) { dp6_ptr = ha->brd; if (IStatus & 0x80) { /* error flag */ IStatus &= ~0x80; ha->status = readw(&dp6_ptr->u.ic.Status); TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status)); } else /* no error */ ha->status = S_OK; ha->info = readl(&dp6_ptr->u.ic.Info[0]); ha->service = readw(&dp6_ptr->u.ic.Service); ha->info2 = readl(&dp6_ptr->u.ic.Info[1]); writeb(0xff, &dp6_ptr->io.irqdel); /* acknowledge interrupt */ writeb(0, &dp6_ptr->u.ic.Cmd_Index);/* reset command index */ writeb(0, &dp6_ptr->io.Sema1); /* reset status semaphore */ } else if (ha->type == GDT_PCINEW) { if (IStatus & 0x80) { /* error flag */ IStatus &= ~0x80; ha->status = inw(PTR2USHORT(&ha->plx->status)); TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status)); } else ha->status = S_OK; ha->info = inl(PTR2USHORT(&ha->plx->info[0])); ha->service = inw(PTR2USHORT(&ha->plx->service)); ha->info2 = inl(PTR2USHORT(&ha->plx->info[1])); outb(0xff, PTR2USHORT(&ha->plx->edoor_reg)); outb(0x00, PTR2USHORT(&ha->plx->sema1_reg)); } else if (ha->type == GDT_PCIMPR) { dp6m_ptr = ha->brd; if (IStatus & 0x80) { /* error flag */ IStatus &= ~0x80; #ifdef INT_COAL if (coalesced) ha->status = pcs->ext_status & 0xffff; else #endif ha->status = readw(&dp6m_ptr->i960r.status); TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status)); } else /* no error */ ha->status = S_OK; #ifdef INT_COAL /* get information */ if (coalesced) { ha->info = pcs->info0; ha->info2 = pcs->info1; ha->service = (pcs->ext_status >> 16) & 0xffff; } else #endif { ha->info = readl(&dp6m_ptr->i960r.info[0]); ha->service = readw(&dp6m_ptr->i960r.service); ha->info2 = readl(&dp6m_ptr->i960r.info[1]); } /* event string */ if (IStatus == ASYNCINDEX) { if (ha->service != SCREENSERVICE && (ha->fw_vers & 0xff) >= 0x1a) { ha->dvr.severity = readb (&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.severity); for (i = 0; i < 256; ++i) { ha->dvr.event_string[i] = readb (&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.evt_str[i]); if (ha->dvr.event_string[i] == 0) break; } } } #ifdef INT_COAL /* Make sure that non coalesced interrupts get cleared before being handled by gdth_async_event/gdth_sync_event */ if (!coalesced) #endif { writeb(0xff, &dp6m_ptr->i960r.edoor_reg); writeb(0, &dp6m_ptr->i960r.sema1_reg); } } else { TRACE2(("gdth_interrupt() unknown controller type\n")); if (!gdth_polling) spin_unlock_irqrestore(&ha->smp_lock, flags); return IRQ_HANDLED; } TRACE(("gdth_interrupt() index %d stat %d info %d\n", IStatus,ha->status,ha->info)); if (gdth_from_wait) { *pIndex = (int)IStatus; } if (IStatus == ASYNCINDEX) { TRACE2(("gdth_interrupt() async. event\n")); gdth_async_event(ha); if (!gdth_polling) spin_unlock_irqrestore(&ha->smp_lock, flags); gdth_next(ha); return IRQ_HANDLED; } if (IStatus == SPEZINDEX) { TRACE2(("Service unknown or not initialized !\n")); ha->dvr.size = sizeof(ha->dvr.eu.driver); ha->dvr.eu.driver.ionode = ha->hanum; gdth_store_event(ha, ES_DRIVER, 4, &ha->dvr); if (!gdth_polling) spin_unlock_irqrestore(&ha->smp_lock, flags); return IRQ_HANDLED; } scp = ha->cmd_tab[IStatus-2].cmnd; Service = ha->cmd_tab[IStatus-2].service; ha->cmd_tab[IStatus-2].cmnd = UNUSED_CMND; if (scp == UNUSED_CMND) { TRACE2(("gdth_interrupt() index to unused command (%d)\n",IStatus)); ha->dvr.size = sizeof(ha->dvr.eu.driver); ha->dvr.eu.driver.ionode = ha->hanum; ha->dvr.eu.driver.index = IStatus; gdth_store_event(ha, ES_DRIVER, 1, &ha->dvr); if (!gdth_polling) spin_unlock_irqrestore(&ha->smp_lock, flags); return IRQ_HANDLED; } if (scp == INTERNAL_CMND) { TRACE(("gdth_interrupt() answer to internal command\n")); if (!gdth_polling) spin_unlock_irqrestore(&ha->smp_lock, flags); return IRQ_HANDLED; } TRACE(("gdth_interrupt() sync. status\n")); rval = gdth_sync_event(ha,Service,IStatus,scp); if (!gdth_polling) spin_unlock_irqrestore(&ha->smp_lock, flags); if (rval == 2) { gdth_putq(ha, scp, gdth_cmnd_priv(scp)->priority); } else if (rval == 1) { gdth_scsi_done(scp); } #ifdef INT_COAL if (coalesced) { /* go to the next status in the status buffer */ ++pcs; #ifdef GDTH_STATISTICS ++act_int_coal; if (act_int_coal > max_int_coal) { max_int_coal = act_int_coal; printk("GDT: max_int_coal = %d\n",(ushort)max_int_coal); } #endif /* see if there is another status */ if (pcs->status == 0) /* Stop the coalesce loop */ next = FALSE; } } while (next); /* coalescing only for new GDT_PCIMPR controllers available */ if (ha->type == GDT_PCIMPR && coalesced) { writeb(0xff, &dp6m_ptr->i960r.edoor_reg); writeb(0, &dp6m_ptr->i960r.sema1_reg); } #endif gdth_next(ha); return IRQ_HANDLED; } static irqreturn_t gdth_interrupt(int irq, void *dev_id) { gdth_ha_str *ha = dev_id; return __gdth_interrupt(ha, false, NULL); } static int gdth_sync_event(gdth_ha_str *ha, int service, unchar index, Scsi_Cmnd *scp) { gdth_msg_str *msg; gdth_cmd_str *cmdp; unchar b, t; struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); cmdp = ha->pccb; TRACE(("gdth_sync_event() serv %d status %d\n", service,ha->status)); if (service == SCREENSERVICE) { msg = ha->pmsg; TRACE(("len: %d, answer: %d, ext: %d, alen: %d\n", msg->msg_len,msg->msg_answer,msg->msg_ext,msg->msg_alen)); if (msg->msg_len > MSGLEN+1) msg->msg_len = MSGLEN+1; if (msg->msg_len) if (!(msg->msg_answer && msg->msg_ext)) { msg->msg_text[msg->msg_len] = '\0'; printk("%s",msg->msg_text); } if (msg->msg_ext && !msg->msg_answer) { while (gdth_test_busy(ha)) gdth_delay(0); cmdp->Service = SCREENSERVICE; cmdp->RequestBuffer = SCREEN_CMND; gdth_get_cmd_index(ha); gdth_set_sema0(ha); cmdp->OpCode = GDT_READ; cmdp->BoardNode = LOCALBOARD; cmdp->u.screen.reserved = 0; cmdp->u.screen.su.msg.msg_handle= msg->msg_handle; cmdp->u.screen.su.msg.msg_addr = ha->msg_phys; ha->cmd_offs_dpmem = 0; ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr) + sizeof(ulong64); ha->cmd_cnt = 0; gdth_copy_command(ha); gdth_release_event(ha); return 0; } if (msg->msg_answer && msg->msg_alen) { /* default answers (getchar() not possible) */ if (msg->msg_alen == 1) { msg->msg_alen = 0; msg->msg_len = 1; msg->msg_text[0] = 0; } else { msg->msg_alen -= 2; msg->msg_len = 2; msg->msg_text[0] = 1; msg->msg_text[1] = 0; } msg->msg_ext = 0; msg->msg_answer = 0; while (gdth_test_busy(ha)) gdth_delay(0); cmdp->Service = SCREENSERVICE; cmdp->RequestBuffer = SCREEN_CMND; gdth_get_cmd_index(ha); gdth_set_sema0(ha); cmdp->OpCode = GDT_WRITE; cmdp->BoardNode = LOCALBOARD; cmdp->u.screen.reserved = 0; cmdp->u.screen.su.msg.msg_handle= msg->msg_handle; cmdp->u.screen.su.msg.msg_addr = ha->msg_phys; ha->cmd_offs_dpmem = 0; ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr) + sizeof(ulong64); ha->cmd_cnt = 0; gdth_copy_command(ha); gdth_release_event(ha); return 0; } printk("\n"); } else { b = scp->device->channel; t = scp->device->id; if (cmndinfo->OpCode == -1 && b != ha->virt_bus) { ha->raw[BUS_L2P(ha,b)].io_cnt[t]--; } /* cache or raw service */ if (ha->status == S_BSY) { TRACE2(("Controller busy -> retry !\n")); if (cmndinfo->OpCode == GDT_MOUNT) cmndinfo->OpCode = GDT_CLUST_INFO; /* retry */ return 2; } if (scsi_bufflen(scp)) pci_unmap_sg(ha->pdev, scsi_sglist(scp), scsi_sg_count(scp), cmndinfo->dma_dir); if (cmndinfo->sense_paddr) pci_unmap_page(ha->pdev, cmndinfo->sense_paddr, 16, PCI_DMA_FROMDEVICE); if (ha->status == S_OK) { cmndinfo->status = S_OK; cmndinfo->info = ha->info; if (cmndinfo->OpCode != -1) { TRACE2(("gdth_sync_event(): special cmd 0x%x OK\n", cmndinfo->OpCode)); /* special commands GDT_CLUST_INFO/GDT_MOUNT ? */ if (cmndinfo->OpCode == GDT_CLUST_INFO) { ha->hdr[t].cluster_type = (unchar)ha->info; if (!(ha->hdr[t].cluster_type & CLUSTER_MOUNTED)) { /* NOT MOUNTED -> MOUNT */ cmndinfo->OpCode = GDT_MOUNT; if (ha->hdr[t].cluster_type & CLUSTER_RESERVED) { /* cluster drive RESERVED (on the other node) */ cmndinfo->phase = -2; /* reservation conflict */ } } else { cmndinfo->OpCode = -1; } } else { if (cmndinfo->OpCode == GDT_MOUNT) { ha->hdr[t].cluster_type |= CLUSTER_MOUNTED; ha->hdr[t].media_changed = TRUE; } else if (cmndinfo->OpCode == GDT_UNMOUNT) { ha->hdr[t].cluster_type &= ~CLUSTER_MOUNTED; ha->hdr[t].media_changed = TRUE; } cmndinfo->OpCode = -1; } /* retry */ cmndinfo->priority = HIGH_PRI; return 2; } else { /* RESERVE/RELEASE ? */ if (scp->cmnd[0] == RESERVE) { ha->hdr[t].cluster_type |= CLUSTER_RESERVED; } else if (scp->cmnd[0] == RELEASE) { ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED; } scp->result = DID_OK << 16; scp->sense_buffer[0] = 0; } } else { cmndinfo->status = ha->status; cmndinfo->info = ha->info; if (cmndinfo->OpCode != -1) { TRACE2(("gdth_sync_event(): special cmd 0x%x error 0x%x\n", cmndinfo->OpCode, ha->status)); if (cmndinfo->OpCode == GDT_SCAN_START || cmndinfo->OpCode == GDT_SCAN_END) { cmndinfo->OpCode = -1; /* retry */ cmndinfo->priority = HIGH_PRI; return 2; } memset((char*)scp->sense_buffer,0,16); scp->sense_buffer[0] = 0x70; scp->sense_buffer[2] = NOT_READY; scp->result = (DID_OK << 16) | (CHECK_CONDITION << 1); } else if (service == CACHESERVICE) { if (ha->status == S_CACHE_UNKNOWN && (ha->hdr[t].cluster_type & CLUSTER_RESERVE_STATE) == CLUSTER_RESERVE_STATE) { /* bus reset -> force GDT_CLUST_INFO */ ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED; } memset((char*)scp->sense_buffer,0,16); if (ha->status == (ushort)S_CACHE_RESERV) { scp->result = (DID_OK << 16) | (RESERVATION_CONFLICT << 1); } else { scp->sense_buffer[0] = 0x70; scp->sense_buffer[2] = NOT_READY; scp->result = (DID_OK << 16) | (CHECK_CONDITION << 1); } if (!cmndinfo->internal_command) { ha->dvr.size = sizeof(ha->dvr.eu.sync); ha->dvr.eu.sync.ionode = ha->hanum; ha->dvr.eu.sync.service = service; ha->dvr.eu.sync.status = ha->status; ha->dvr.eu.sync.info = ha->info; ha->dvr.eu.sync.hostdrive = t; if (ha->status >= 0x8000) gdth_store_event(ha, ES_SYNC, 0, &ha->dvr); else gdth_store_event(ha, ES_SYNC, service, &ha->dvr); } } else { /* sense buffer filled from controller firmware (DMA) */ if (ha->status != S_RAW_SCSI || ha->info >= 0x100) { scp->result = DID_BAD_TARGET << 16; } else { scp->result = (DID_OK << 16) | ha->info; } } } if (!cmndinfo->wait_for_completion) cmndinfo->wait_for_completion++; else return 1; } return 0; } static char *async_cache_tab[] = { /* 0*/ "\011\000\002\002\002\004\002\006\004" "GDT HA %u, service %u, async. status %u/%lu unknown", /* 1*/ "\011\000\002\002\002\004\002\006\004" "GDT HA %u, service %u, async. status %u/%lu unknown", /* 2*/ "\005\000\002\006\004" "GDT HA %u, Host Drive %lu not ready", /* 3*/ "\005\000\002\006\004" "GDT HA %u, Host Drive %lu: REASSIGN not successful and/or data error on reassigned blocks. Drive may crash in the future and should be replaced", /* 4*/ "\005\000\002\006\004" "GDT HA %u, mirror update on Host Drive %lu failed", /* 5*/ "\005\000\002\006\004" "GDT HA %u, Mirror Drive %lu failed", /* 6*/ "\005\000\002\006\004" "GDT HA %u, Mirror Drive %lu: REASSIGN not successful and/or data error on reassigned blocks. Drive may crash in the future and should be replaced", /* 7*/ "\005\000\002\006\004" "GDT HA %u, Host Drive %lu write protected", /* 8*/ "\005\000\002\006\004" "GDT HA %u, media changed in Host Drive %lu", /* 9*/ "\005\000\002\006\004" "GDT HA %u, Host Drive %lu is offline", /*10*/ "\005\000\002\006\004" "GDT HA %u, media change of Mirror Drive %lu", /*11*/ "\005\000\002\006\004" "GDT HA %u, Mirror Drive %lu is write protected", /*12*/ "\005\000\002\006\004" "GDT HA %u, general error on Host Drive %lu. Please check the devices of this drive!", /*13*/ "\007\000\002\006\002\010\002" "GDT HA %u, Array Drive %u: Cache Drive %u failed", /*14*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: FAIL state entered", /*15*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: error", /*16*/ "\007\000\002\006\002\010\002" "GDT HA %u, Array Drive %u: failed drive replaced by Cache Drive %u", /*17*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: parity build failed", /*18*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: drive rebuild failed", /*19*/ "\005\000\002\010\002" "GDT HA %u, Test of Hot Fix %u failed", /*20*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: drive build finished successfully", /*21*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: drive rebuild finished successfully", /*22*/ "\007\000\002\006\002\010\002" "GDT HA %u, Array Drive %u: Hot Fix %u activated", /*23*/ "\005\000\002\006\002" "GDT HA %u, Host Drive %u: processing of i/o aborted due to serious drive error", /*24*/ "\005\000\002\010\002" "GDT HA %u, mirror update on Cache Drive %u completed", /*25*/ "\005\000\002\010\002" "GDT HA %u, mirror update on Cache Drive %lu failed", /*26*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: drive rebuild started", /*27*/ "\005\000\002\012\001" "GDT HA %u, Fault bus %u: SHELF OK detected", /*28*/ "\005\000\002\012\001" "GDT HA %u, Fault bus %u: SHELF not OK detected", /*29*/ "\007\000\002\012\001\013\001" "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug started", /*30*/ "\007\000\002\012\001\013\001" "GDT HA %u, Fault bus %u, ID %u: new disk detected", /*31*/ "\007\000\002\012\001\013\001" "GDT HA %u, Fault bus %u, ID %u: old disk detected", /*32*/ "\007\000\002\012\001\013\001" "GDT HA %u, Fault bus %u, ID %u: plugging an active disk is invalid", /*33*/ "\007\000\002\012\001\013\001" "GDT HA %u, Fault bus %u, ID %u: invalid device detected", /*34*/ "\011\000\002\012\001\013\001\006\004" "GDT HA %u, Fault bus %u, ID %u: insufficient disk capacity (%lu MB required)", /*35*/ "\007\000\002\012\001\013\001" "GDT HA %u, Fault bus %u, ID %u: disk write protected", /*36*/ "\007\000\002\012\001\013\001" "GDT HA %u, Fault bus %u, ID %u: disk not available", /*37*/ "\007\000\002\012\001\006\004" "GDT HA %u, Fault bus %u: swap detected (%lu)", /*38*/ "\007\000\002\012\001\013\001" "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug finished successfully", /*39*/ "\007\000\002\012\001\013\001" "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug aborted due to user Hot Plug", /*40*/ "\007\000\002\012\001\013\001" "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug aborted", /*41*/ "\007\000\002\012\001\013\001" "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug for Hot Fix started", /*42*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: drive build started", /*43*/ "\003\000\002" "GDT HA %u, DRAM parity error detected", /*44*/ "\005\000\002\006\002" "GDT HA %u, Mirror Drive %u: update started", /*45*/ "\007\000\002\006\002\010\002" "GDT HA %u, Mirror Drive %u: Hot Fix %u activated", /*46*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: no matching Pool Hot Fix Drive available", /*47*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: Pool Hot Fix Drive available", /*48*/ "\005\000\002\006\002" "GDT HA %u, Mirror Drive %u: no matching Pool Hot Fix Drive available", /*49*/ "\005\000\002\006\002" "GDT HA %u, Mirror Drive %u: Pool Hot Fix Drive available", /*50*/ "\007\000\002\012\001\013\001" "GDT HA %u, SCSI bus %u, ID %u: IGNORE_WIDE_RESIDUE message received", /*51*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: expand started", /*52*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: expand finished successfully", /*53*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: expand failed", /*54*/ "\003\000\002" "GDT HA %u, CPU temperature critical", /*55*/ "\003\000\002" "GDT HA %u, CPU temperature OK", /*56*/ "\005\000\002\006\004" "GDT HA %u, Host drive %lu created", /*57*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: expand restarted", /*58*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: expand stopped", /*59*/ "\005\000\002\010\002" "GDT HA %u, Mirror Drive %u: drive build quited", /*60*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: parity build quited", /*61*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: drive rebuild quited", /*62*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: parity verify started", /*63*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: parity verify done", /*64*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: parity verify failed", /*65*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: parity error detected", /*66*/ "\005\000\002\006\002" "GDT HA %u, Array Drive %u: parity verify quited", /*67*/ "\005\000\002\006\002" "GDT HA %u, Host Drive %u reserved", /*68*/ "\005\000\002\006\002" "GDT HA %u, Host Drive %u mounted and released", /*69*/ "\005\000\002\006\002" "GDT HA %u, Host Drive %u released", /*70*/ "\003\000\002" "GDT HA %u, DRAM error detected and corrected with ECC", /*71*/ "\003\000\002" "GDT HA %u, Uncorrectable DRAM error detected with ECC", /*72*/ "\011\000\002\012\001\013\001\014\001" "GDT HA %u, SCSI bus %u, ID %u, LUN %u: reassigning block", /*73*/ "\005\000\002\006\002" "GDT HA %u, Host drive %u resetted locally", /*74*/ "\005\000\002\006\002" "GDT HA %u, Host drive %u resetted remotely", /*75*/ "\003\000\002" "GDT HA %u, async. status 75 unknown", }; static int gdth_async_event(gdth_ha_str *ha) { gdth_cmd_str *cmdp; int cmd_index; cmdp= ha->pccb; TRACE2(("gdth_async_event() ha %d serv %d\n", ha->hanum, ha->service)); if (ha->service == SCREENSERVICE) { if (ha->status == MSG_REQUEST) { while (gdth_test_busy(ha)) gdth_delay(0); cmdp->Service = SCREENSERVICE; cmdp->RequestBuffer = SCREEN_CMND; cmd_index = gdth_get_cmd_index(ha); gdth_set_sema0(ha); cmdp->OpCode = GDT_READ; cmdp->BoardNode = LOCALBOARD; cmdp->u.screen.reserved = 0; cmdp->u.screen.su.msg.msg_handle= MSG_INV_HANDLE; cmdp->u.screen.su.msg.msg_addr = ha->msg_phys; ha->cmd_offs_dpmem = 0; ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr) + sizeof(ulong64); ha->cmd_cnt = 0; gdth_copy_command(ha); if (ha->type == GDT_EISA) printk("[EISA slot %d] ",(ushort)ha->brd_phys); else if (ha->type == GDT_ISA) printk("[DPMEM 0x%4X] ",(ushort)ha->brd_phys); else printk("[PCI %d/%d] ",(ushort)(ha->brd_phys>>8), (ushort)((ha->brd_phys>>3)&0x1f)); gdth_release_event(ha); } } else { if (ha->type == GDT_PCIMPR && (ha->fw_vers & 0xff) >= 0x1a) { ha->dvr.size = 0; ha->dvr.eu.async.ionode = ha->hanum; ha->dvr.eu.async.status = ha->status; /* severity and event_string already set! */ } else { ha->dvr.size = sizeof(ha->dvr.eu.async); ha->dvr.eu.async.ionode = ha->hanum; ha->dvr.eu.async.service = ha->service; ha->dvr.eu.async.status = ha->status; ha->dvr.eu.async.info = ha->info; *(ulong32 *)ha->dvr.eu.async.scsi_coord = ha->info2; } gdth_store_event( ha, ES_ASYNC, ha->service, &ha->dvr ); gdth_log_event( &ha->dvr, NULL ); /* new host drive from expand? */ if (ha->service == CACHESERVICE && ha->status == 56) { TRACE2(("gdth_async_event(): new host drive %d created\n", (ushort)ha->info)); /* gdth_analyse_hdrive(hanum, (ushort)ha->info); */ } } return 1; } static void gdth_log_event(gdth_evt_data *dvr, char *buffer) { gdth_stackframe stack; char *f = NULL; int i,j; TRACE2(("gdth_log_event()\n")); if (dvr->size == 0) { if (buffer == NULL) { printk("Adapter %d: %s\n",dvr->eu.async.ionode,dvr->event_string); } else { sprintf(buffer,"Adapter %d: %s\n", dvr->eu.async.ionode,dvr->event_string); } } else if (dvr->eu.async.service == CACHESERVICE && INDEX_OK(dvr->eu.async.status, async_cache_tab)) { TRACE2(("GDT: Async. event cache service, event no.: %d\n", dvr->eu.async.status)); f = async_cache_tab[dvr->eu.async.status]; /* i: parameter to push, j: stack element to fill */ for (j=0,i=1; i < f[0]; i+=2) { switch (f[i+1]) { case 4: stack.b[j++] = *(ulong32*)&dvr->eu.stream[(int)f[i]]; break; case 2: stack.b[j++] = *(ushort*)&dvr->eu.stream[(int)f[i]]; break; case 1: stack.b[j++] = *(unchar*)&dvr->eu.stream[(int)f[i]]; break; default: break; } } if (buffer == NULL) { printk(&f[(int)f[0]],stack); printk("\n"); } else { sprintf(buffer,&f[(int)f[0]],stack); } } else { if (buffer == NULL) { printk("GDT HA %u, Unknown async. event service %d event no. %d\n", dvr->eu.async.ionode,dvr->eu.async.service,dvr->eu.async.status); } else { sprintf(buffer,"GDT HA %u, Unknown async. event service %d event no. %d", dvr->eu.async.ionode,dvr->eu.async.service,dvr->eu.async.status); } } } #ifdef GDTH_STATISTICS static unchar gdth_timer_running; static void gdth_timeout(ulong data) { ulong32 i; Scsi_Cmnd *nscp; gdth_ha_str *ha; ulong flags; if(unlikely(list_empty(&gdth_instances))) { gdth_timer_running = 0; return; } ha = list_first_entry(&gdth_instances, gdth_ha_str, list); spin_lock_irqsave(&ha->smp_lock, flags); for (act_stats=0,i=0; i<GDTH_MAXCMDS; ++i) if (ha->cmd_tab[i].cmnd != UNUSED_CMND) ++act_stats; for (act_rq=0,nscp=ha->req_first; nscp; nscp=(Scsi_Cmnd*)nscp->SCp.ptr) ++act_rq; TRACE2(("gdth_to(): ints %d, ios %d, act_stats %d, act_rq %d\n", act_ints, act_ios, act_stats, act_rq)); act_ints = act_ios = 0; gdth_timer.expires = jiffies + 30 * HZ; add_timer(&gdth_timer); spin_unlock_irqrestore(&ha->smp_lock, flags); } static void gdth_timer_init(void) { if (gdth_timer_running) return; gdth_timer_running = 1; TRACE2(("gdth_detect(): Initializing timer !\n")); gdth_timer.expires = jiffies + HZ; gdth_timer.data = 0L; gdth_timer.function = gdth_timeout; add_timer(&gdth_timer); } #else static inline void gdth_timer_init(void) { } #endif static void __init internal_setup(char *str,int *ints) { int i, argc; char *cur_str, *argv; TRACE2(("internal_setup() str %s ints[0] %d\n", str ? str:"NULL", ints ? ints[0]:0)); /* read irq[] from ints[] */ if (ints) { argc = ints[0]; if (argc > 0) { if (argc > MAXHA) argc = MAXHA; for (i = 0; i < argc; ++i) irq[i] = ints[i+1]; } } /* analyse string */ argv = str; while (argv && (cur_str = strchr(argv, ':'))) { int val = 0, c = *++cur_str; if (c == 'n' || c == 'N') val = 0; else if (c == 'y' || c == 'Y') val = 1; else val = (int)simple_strtoul(cur_str, NULL, 0); if (!strncmp(argv, "disable:", 8)) disable = val; else if (!strncmp(argv, "reserve_mode:", 13)) reserve_mode = val; else if (!strncmp(argv, "reverse_scan:", 13)) reverse_scan = val; else if (!strncmp(argv, "hdr_channel:", 12)) hdr_channel = val; else if (!strncmp(argv, "max_ids:", 8)) max_ids = val; else if (!strncmp(argv, "rescan:", 7)) rescan = val; else if (!strncmp(argv, "shared_access:", 14)) shared_access = val; else if (!strncmp(argv, "probe_eisa_isa:", 15)) probe_eisa_isa = val; else if (!strncmp(argv, "reserve_list:", 13)) { reserve_list[0] = val; for (i = 1; i < MAX_RES_ARGS; i++) { cur_str = strchr(cur_str, ','); if (!cur_str) break; if (!isdigit((int)*++cur_str)) { --cur_str; break; } reserve_list[i] = (int)simple_strtoul(cur_str, NULL, 0); } if (!cur_str) break; argv = ++cur_str; continue; } if ((argv = strchr(argv, ','))) ++argv; } } int __init option_setup(char *str) { int ints[MAXHA]; char *cur = str; int i = 1; TRACE2(("option_setup() str %s\n", str ? str:"NULL")); while (cur && isdigit(*cur) && i <= MAXHA) { ints[i++] = simple_strtoul(cur, NULL, 0); if ((cur = strchr(cur, ',')) != NULL) cur++; } ints[0] = i - 1; internal_setup(cur, ints); return 1; } static const char *gdth_ctr_name(gdth_ha_str *ha) { TRACE2(("gdth_ctr_name()\n")); if (ha->type == GDT_EISA) { switch (ha->stype) { case GDT3_ID: return("GDT3000/3020"); case GDT3A_ID: return("GDT3000A/3020A/3050A"); case GDT3B_ID: return("GDT3000B/3010A"); } } else if (ha->type == GDT_ISA) { return("GDT2000/2020"); } else if (ha->type == GDT_PCI) { switch (ha->pdev->device) { case PCI_DEVICE_ID_VORTEX_GDT60x0: return("GDT6000/6020/6050"); case PCI_DEVICE_ID_VORTEX_GDT6000B: return("GDT6000B/6010"); } } /* new controllers (GDT_PCINEW, GDT_PCIMPR, ..) use board_info IOCTL! */ return(""); } static const char *gdth_info(struct Scsi_Host *shp) { gdth_ha_str *ha = shost_priv(shp); TRACE2(("gdth_info()\n")); return ((const char *)ha->binfo.type_string); } static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp) { gdth_ha_str *ha = shost_priv(scp->device->host); struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); unchar b, t; ulong flags; enum blk_eh_timer_return retval = BLK_EH_NOT_HANDLED; TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__)); b = scp->device->channel; t = scp->device->id; /* * We don't really honor the command timeout, but we try to * honor 6 times of the actual command timeout! So reset the * timer if this is less than 6th timeout on this command! */ if (++cmndinfo->timeout_count < 6) retval = BLK_EH_RESET_TIMER; /* Reset the timeout if it is locked IO */ spin_lock_irqsave(&ha->smp_lock, flags); if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha, b)].lock) || (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock)) { TRACE2(("%s(): locked IO, reset timeout\n", __func__)); retval = BLK_EH_RESET_TIMER; } spin_unlock_irqrestore(&ha->smp_lock, flags); return retval; } static int gdth_eh_bus_reset(Scsi_Cmnd *scp) { gdth_ha_str *ha = shost_priv(scp->device->host); int i; ulong flags; Scsi_Cmnd *cmnd; unchar b; TRACE2(("gdth_eh_bus_reset()\n")); b = scp->device->channel; /* clear command tab */ spin_lock_irqsave(&ha->smp_lock, flags); for (i = 0; i < GDTH_MAXCMDS; ++i) { cmnd = ha->cmd_tab[i].cmnd; if (!SPECIAL_SCP(cmnd) && cmnd->device->channel == b) ha->cmd_tab[i].cmnd = UNUSED_CMND; } spin_unlock_irqrestore(&ha->smp_lock, flags); if (b == ha->virt_bus) { /* host drives */ for (i = 0; i < MAX_HDRIVES; ++i) { if (ha->hdr[i].present) { spin_lock_irqsave(&ha->smp_lock, flags); gdth_polling = TRUE; while (gdth_test_busy(ha)) gdth_delay(0); if (gdth_internal_cmd(ha, CACHESERVICE, GDT_CLUST_RESET, i, 0, 0)) ha->hdr[i].cluster_type &= ~CLUSTER_RESERVED; gdth_polling = FALSE; spin_unlock_irqrestore(&ha->smp_lock, flags); } } } else { /* raw devices */ spin_lock_irqsave(&ha->smp_lock, flags); for (i = 0; i < MAXID; ++i) ha->raw[BUS_L2P(ha,b)].io_cnt[i] = 0; gdth_polling = TRUE; while (gdth_test_busy(ha)) gdth_delay(0); gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_RESET_BUS, BUS_L2P(ha,b), 0, 0); gdth_polling = FALSE; spin_unlock_irqrestore(&ha->smp_lock, flags); } return SUCCESS; } static int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,sector_t cap,int *ip) { unchar b, t; gdth_ha_str *ha = shost_priv(sdev->host); struct scsi_device *sd; unsigned capacity; sd = sdev; capacity = cap; b = sd->channel; t = sd->id; TRACE2(("gdth_bios_param() ha %d bus %d target %d\n", ha->hanum, b, t)); if (b != ha->virt_bus || ha->hdr[t].heads == 0) { /* raw device or host drive without mapping information */ TRACE2(("Evaluate mapping\n")); gdth_eval_mapping(capacity,&ip[2],&ip[0],&ip[1]); } else { ip[0] = ha->hdr[t].heads; ip[1] = ha->hdr[t].secs; ip[2] = capacity / ip[0] / ip[1]; } TRACE2(("gdth_bios_param(): %d heads, %d secs, %d cyls\n", ip[0],ip[1],ip[2])); return 0; } static int gdth_queuecommand(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) { gdth_ha_str *ha = shost_priv(scp->device->host); struct gdth_cmndinfo *cmndinfo; TRACE(("gdth_queuecommand() cmd 0x%x\n", scp->cmnd[0])); cmndinfo = gdth_get_cmndinfo(ha); BUG_ON(!cmndinfo); scp->scsi_done = done; cmndinfo->timeout_count = 0; cmndinfo->priority = DEFAULT_PRI; return __gdth_queuecommand(ha, scp, cmndinfo); } static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp, struct gdth_cmndinfo *cmndinfo) { scp->host_scribble = (unsigned char *)cmndinfo; cmndinfo->wait_for_completion = 1; cmndinfo->phase = -1; cmndinfo->OpCode = -1; #ifdef GDTH_STATISTICS ++act_ios; #endif gdth_putq(ha, scp, cmndinfo->priority); gdth_next(ha); return 0; } static int gdth_open(struct inode *inode, struct file *filep) { gdth_ha_str *ha; lock_kernel(); list_for_each_entry(ha, &gdth_instances, list) { if (!ha->sdev) ha->sdev = scsi_get_host_dev(ha->shost); } unlock_kernel(); TRACE(("gdth_open()\n")); return 0; } static int gdth_close(struct inode *inode, struct file *filep) { TRACE(("gdth_close()\n")); return 0; } static int ioc_event(void __user *arg) { gdth_ioctl_event evt; gdth_ha_str *ha; ulong flags; if (copy_from_user(&evt, arg, sizeof(gdth_ioctl_event))) return -EFAULT; ha = gdth_find_ha(evt.ionode); if (!ha) return -EFAULT; if (evt.erase == 0xff) { if (evt.event.event_source == ES_TEST) evt.event.event_data.size=sizeof(evt.event.event_data.eu.test); else if (evt.event.event_source == ES_DRIVER) evt.event.event_data.size=sizeof(evt.event.event_data.eu.driver); else if (evt.event.event_source == ES_SYNC) evt.event.event_data.size=sizeof(evt.event.event_data.eu.sync); else evt.event.event_data.size=sizeof(evt.event.event_data.eu.async); spin_lock_irqsave(&ha->smp_lock, flags); gdth_store_event(ha, evt.event.event_source, evt.event.event_idx, &evt.event.event_data); spin_unlock_irqrestore(&ha->smp_lock, flags); } else if (evt.erase == 0xfe) { gdth_clear_events(); } else if (evt.erase == 0) { evt.handle = gdth_read_event(ha, evt.handle, &evt.event); } else { gdth_readapp_event(ha, evt.erase, &evt.event); } if (copy_to_user(arg, &evt, sizeof(gdth_ioctl_event))) return -EFAULT; return 0; } static int ioc_lockdrv(void __user *arg) { gdth_ioctl_lockdrv ldrv; unchar i, j; ulong flags; gdth_ha_str *ha; if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv))) return -EFAULT; ha = gdth_find_ha(ldrv.ionode); if (!ha) return -EFAULT; for (i = 0; i < ldrv.drive_cnt && i < MAX_HDRIVES; ++i) { j = ldrv.drives[i]; if (j >= MAX_HDRIVES || !ha->hdr[j].present) continue; if (ldrv.lock) { spin_lock_irqsave(&ha->smp_lock, flags); ha->hdr[j].lock = 1; spin_unlock_irqrestore(&ha->smp_lock, flags); gdth_wait_completion(ha, ha->bus_cnt, j); } else { spin_lock_irqsave(&ha->smp_lock, flags); ha->hdr[j].lock = 0; spin_unlock_irqrestore(&ha->smp_lock, flags); gdth_next(ha); } } return 0; } static int ioc_resetdrv(void __user *arg, char *cmnd) { gdth_ioctl_reset res; gdth_cmd_str cmd; gdth_ha_str *ha; int rval; if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) || res.number >= MAX_HDRIVES) return -EFAULT; ha = gdth_find_ha(res.ionode); if (!ha) return -EFAULT; if (!ha->hdr[res.number].present) return 0; memset(&cmd, 0, sizeof(gdth_cmd_str)); cmd.Service = CACHESERVICE; cmd.OpCode = GDT_CLUST_RESET; if (ha->cache_feat & GDT_64BIT) cmd.u.cache64.DeviceNo = res.number; else cmd.u.cache.DeviceNo = res.number; rval = __gdth_execute(ha->sdev, &cmd, cmnd, 30, NULL); if (rval < 0) return rval; res.status = rval; if (copy_to_user(arg, &res, sizeof(gdth_ioctl_reset))) return -EFAULT; return 0; } static int ioc_general(void __user *arg, char *cmnd) { gdth_ioctl_general gen; char *buf = NULL; ulong64 paddr; gdth_ha_str *ha; int rval; if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general))) return -EFAULT; ha = gdth_find_ha(gen.ionode); if (!ha) return -EFAULT; if (gen.data_len + gen.sense_len != 0) { if (!(buf = gdth_ioctl_alloc(ha, gen.data_len + gen.sense_len, FALSE, &paddr))) return -EFAULT; if (copy_from_user(buf, arg + sizeof(gdth_ioctl_general), gen.data_len + gen.sense_len)) { gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr); return -EFAULT; } if (gen.command.OpCode == GDT_IOCTL) { gen.command.u.ioctl.p_param = paddr; } else if (gen.command.Service == CACHESERVICE) { if (ha->cache_feat & GDT_64BIT) { /* copy elements from 32-bit IOCTL structure */ gen.command.u.cache64.BlockCnt = gen.command.u.cache.BlockCnt; gen.command.u.cache64.BlockNo = gen.command.u.cache.BlockNo; gen.command.u.cache64.DeviceNo = gen.command.u.cache.DeviceNo; /* addresses */ if (ha->cache_feat & SCATTER_GATHER) { gen.command.u.cache64.DestAddr = (ulong64)-1; gen.command.u.cache64.sg_canz = 1; gen.command.u.cache64.sg_lst[0].sg_ptr = paddr; gen.command.u.cache64.sg_lst[0].sg_len = gen.data_len; gen.command.u.cache64.sg_lst[1].sg_len = 0; } else { gen.command.u.cache64.DestAddr = paddr; gen.command.u.cache64.sg_canz = 0; } } else { if (ha->cache_feat & SCATTER_GATHER) { gen.command.u.cache.DestAddr = 0xffffffff; gen.command.u.cache.sg_canz = 1; gen.command.u.cache.sg_lst[0].sg_ptr = (ulong32)paddr; gen.command.u.cache.sg_lst[0].sg_len = gen.data_len; gen.command.u.cache.sg_lst[1].sg_len = 0; } else { gen.command.u.cache.DestAddr = paddr; gen.command.u.cache.sg_canz = 0; } } } else if (gen.command.Service == SCSIRAWSERVICE) { if (ha->raw_feat & GDT_64BIT) { /* copy elements from 32-bit IOCTL structure */ char cmd[16]; gen.command.u.raw64.sense_len = gen.command.u.raw.sense_len; gen.command.u.raw64.bus = gen.command.u.raw.bus; gen.command.u.raw64.lun = gen.command.u.raw.lun; gen.command.u.raw64.target = gen.command.u.raw.target; memcpy(cmd, gen.command.u.raw.cmd, 16); memcpy(gen.command.u.raw64.cmd, cmd, 16); gen.command.u.raw64.clen = gen.command.u.raw.clen; gen.command.u.raw64.sdlen = gen.command.u.raw.sdlen; gen.command.u.raw64.direction = gen.command.u.raw.direction; /* addresses */ if (ha->raw_feat & SCATTER_GATHER) { gen.command.u.raw64.sdata = (ulong64)-1; gen.command.u.raw64.sg_ranz = 1; gen.command.u.raw64.sg_lst[0].sg_ptr = paddr; gen.command.u.raw64.sg_lst[0].sg_len = gen.data_len; gen.command.u.raw64.sg_lst[1].sg_len = 0; } else { gen.command.u.raw64.sdata = paddr; gen.command.u.raw64.sg_ranz = 0; } gen.command.u.raw64.sense_data = paddr + gen.data_len; } else { if (ha->raw_feat & SCATTER_GATHER) { gen.command.u.raw.sdata = 0xffffffff; gen.command.u.raw.sg_ranz = 1; gen.command.u.raw.sg_lst[0].sg_ptr = (ulong32)paddr; gen.command.u.raw.sg_lst[0].sg_len = gen.data_len; gen.command.u.raw.sg_lst[1].sg_len = 0; } else { gen.command.u.raw.sdata = paddr; gen.command.u.raw.sg_ranz = 0; } gen.command.u.raw.sense_data = (ulong32)paddr + gen.data_len; } } else { gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr); return -EFAULT; } } rval = __gdth_execute(ha->sdev, &gen.command, cmnd, gen.timeout, &gen.info); if (rval < 0) return rval; gen.status = rval; if (copy_to_user(arg + sizeof(gdth_ioctl_general), buf, gen.data_len + gen.sense_len)) { gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr); return -EFAULT; } if (copy_to_user(arg, &gen, sizeof(gdth_ioctl_general) - sizeof(gdth_cmd_str))) { gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr); return -EFAULT; } gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr); return 0; } static int ioc_hdrlist(void __user *arg, char *cmnd) { gdth_ioctl_rescan *rsc; gdth_cmd_str *cmd; gdth_ha_str *ha; unchar i; int rc = -ENOMEM; u32 cluster_type = 0; rsc = kmalloc(sizeof(*rsc), GFP_KERNEL); cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); if (!rsc || !cmd) goto free_fail; if (copy_from_user(rsc, arg, sizeof(gdth_ioctl_rescan)) || (NULL == (ha = gdth_find_ha(rsc->ionode)))) { rc = -EFAULT; goto free_fail; } memset(cmd, 0, sizeof(gdth_cmd_str)); for (i = 0; i < MAX_HDRIVES; ++i) { if (!ha->hdr[i].present) { rsc->hdr_list[i].bus = 0xff; continue; } rsc->hdr_list[i].bus = ha->virt_bus; rsc->hdr_list[i].target = i; rsc->hdr_list[i].lun = 0; rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type; if (ha->hdr[i].cluster_type & CLUSTER_DRIVE) { cmd->Service = CACHESERVICE; cmd->OpCode = GDT_CLUST_INFO; if (ha->cache_feat & GDT_64BIT) cmd->u.cache64.DeviceNo = i; else cmd->u.cache.DeviceNo = i; if (__gdth_execute(ha->sdev, cmd, cmnd, 30, &cluster_type) == S_OK) rsc->hdr_list[i].cluster_type = cluster_type; } } if (copy_to_user(arg, rsc, sizeof(gdth_ioctl_rescan))) rc = -EFAULT; else rc = 0; free_fail: kfree(rsc); kfree(cmd); return rc; } static int ioc_rescan(void __user *arg, char *cmnd) { gdth_ioctl_rescan *rsc; gdth_cmd_str *cmd; ushort i, status, hdr_cnt; ulong32 info; int cyls, hds, secs; int rc = -ENOMEM; ulong flags; gdth_ha_str *ha; rsc = kmalloc(sizeof(*rsc), GFP_KERNEL); cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd || !rsc) goto free_fail; if (copy_from_user(rsc, arg, sizeof(gdth_ioctl_rescan)) || (NULL == (ha = gdth_find_ha(rsc->ionode)))) { rc = -EFAULT; goto free_fail; } memset(cmd, 0, sizeof(gdth_cmd_str)); if (rsc->flag == 0) { /* old method: re-init. cache service */ cmd->Service = CACHESERVICE; if (ha->cache_feat & GDT_64BIT) { cmd->OpCode = GDT_X_INIT_HOST; cmd->u.cache64.DeviceNo = LINUX_OS; } else { cmd->OpCode = GDT_INIT; cmd->u.cache.DeviceNo = LINUX_OS; } status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info); i = 0; hdr_cnt = (status == S_OK ? (ushort)info : 0); } else { i = rsc->hdr_no; hdr_cnt = i + 1; } for (; i < hdr_cnt && i < MAX_HDRIVES; ++i) { cmd->Service = CACHESERVICE; cmd->OpCode = GDT_INFO; if (ha->cache_feat & GDT_64BIT) cmd->u.cache64.DeviceNo = i; else cmd->u.cache.DeviceNo = i; status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info); spin_lock_irqsave(&ha->smp_lock, flags); rsc->hdr_list[i].bus = ha->virt_bus; rsc->hdr_list[i].target = i; rsc->hdr_list[i].lun = 0; if (status != S_OK) { ha->hdr[i].present = FALSE; } else { ha->hdr[i].present = TRUE; ha->hdr[i].size = info; /* evaluate mapping */ ha->hdr[i].size &= ~SECS32; gdth_eval_mapping(ha->hdr[i].size,&cyls,&hds,&secs); ha->hdr[i].heads = hds; ha->hdr[i].secs = secs; /* round size */ ha->hdr[i].size = cyls * hds * secs; } spin_unlock_irqrestore(&ha->smp_lock, flags); if (status != S_OK) continue; /* extended info, if GDT_64BIT, for drives > 2 TB */ /* but we need ha->info2, not yet stored in scp->SCp */ /* devtype, cluster info, R/W attribs */ cmd->Service = CACHESERVICE; cmd->OpCode = GDT_DEVTYPE; if (ha->cache_feat & GDT_64BIT) cmd->u.cache64.DeviceNo = i; else cmd->u.cache.DeviceNo = i; status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info); spin_lock_irqsave(&ha->smp_lock, flags); ha->hdr[i].devtype = (status == S_OK ? (ushort)info : 0); spin_unlock_irqrestore(&ha->smp_lock, flags); cmd->Service = CACHESERVICE; cmd->OpCode = GDT_CLUST_INFO; if (ha->cache_feat & GDT_64BIT) cmd->u.cache64.DeviceNo = i; else cmd->u.cache.DeviceNo = i; status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info); spin_lock_irqsave(&ha->smp_lock, flags); ha->hdr[i].cluster_type = ((status == S_OK && !shared_access) ? (ushort)info : 0); spin_unlock_irqrestore(&ha->smp_lock, flags); rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type; cmd->Service = CACHESERVICE; cmd->OpCode = GDT_RW_ATTRIBS; if (ha->cache_feat & GDT_64BIT) cmd->u.cache64.DeviceNo = i; else cmd->u.cache.DeviceNo = i; status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info); spin_lock_irqsave(&ha->smp_lock, flags); ha->hdr[i].rw_attribs = (status == S_OK ? (ushort)info : 0); spin_unlock_irqrestore(&ha->smp_lock, flags); } if (copy_to_user(arg, rsc, sizeof(gdth_ioctl_rescan))) rc = -EFAULT; else rc = 0; free_fail: kfree(rsc); kfree(cmd); return rc; } static int gdth_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg) { gdth_ha_str *ha; Scsi_Cmnd *scp; ulong flags; char cmnd[MAX_COMMAND_SIZE]; void __user *argp = (void __user *)arg; memset(cmnd, 0xff, 12); TRACE(("gdth_ioctl() cmd 0x%x\n", cmd)); switch (cmd) { case GDTIOCTL_CTRCNT: { int cnt = gdth_ctr_count; if (put_user(cnt, (int __user *)argp)) return -EFAULT; break; } case GDTIOCTL_DRVERS: { int ver = (GDTH_VERSION<<8) | GDTH_SUBVERSION; if (put_user(ver, (int __user *)argp)) return -EFAULT; break; } case GDTIOCTL_OSVERS: { gdth_ioctl_osvers osv; osv.version = (unchar)(LINUX_VERSION_CODE >> 16); osv.subversion = (unchar)(LINUX_VERSION_CODE >> 8);