aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/crypto/z90main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/crypto/z90main.c')
-rw-r--r--drivers/s390/crypto/z90main.c3379
1 files changed, 0 insertions, 3379 deletions
diff --git a/drivers/s390/crypto/z90main.c b/drivers/s390/crypto/z90main.c
deleted file mode 100644
index b2f20ab8431a..000000000000
--- a/drivers/s390/crypto/z90main.c
+++ /dev/null
@@ -1,3379 +0,0 @@
1/*
2 * linux/drivers/s390/crypto/z90main.c
3 *
4 * z90crypt 1.3.3
5 *
6 * Copyright (C) 2001, 2005 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27#include <asm/uaccess.h> // copy_(from|to)_user
28#include <linux/compat.h>
29#include <linux/compiler.h>
30#include <linux/delay.h> // mdelay
31#include <linux/init.h>
32#include <linux/interrupt.h> // for tasklets
33#include <linux/miscdevice.h>
34#include <linux/module.h>
35#include <linux/moduleparam.h>
36#include <linux/proc_fs.h>
37#include <linux/syscalls.h>
38#include "z90crypt.h"
39#include "z90common.h"
40
41/**
42 * Defaults that may be modified.
43 */
44
45/**
46 * You can specify a different minor at compile time.
47 */
48#ifndef Z90CRYPT_MINOR
49#define Z90CRYPT_MINOR MISC_DYNAMIC_MINOR
50#endif
51
52/**
53 * You can specify a different domain at compile time or on the insmod
54 * command line.
55 */
56#ifndef DOMAIN_INDEX
57#define DOMAIN_INDEX -1
58#endif
59
60/**
61 * This is the name under which the device is registered in /proc/modules.
62 */
63#define REG_NAME "z90crypt"
64
65/**
66 * Cleanup should run every CLEANUPTIME seconds and should clean up requests
67 * older than CLEANUPTIME seconds in the past.
68 */
69#ifndef CLEANUPTIME
70#define CLEANUPTIME 15
71#endif
72
73/**
74 * Config should run every CONFIGTIME seconds
75 */
76#ifndef CONFIGTIME
77#define CONFIGTIME 30
78#endif
79
80/**
81 * The first execution of the config task should take place
82 * immediately after initialization
83 */
84#ifndef INITIAL_CONFIGTIME
85#define INITIAL_CONFIGTIME 1
86#endif
87
88/**
89 * Reader should run every READERTIME milliseconds
90 * With the 100Hz patch for s390, z90crypt can lock the system solid while
91 * under heavy load. We'll try to avoid that.
92 */
93#ifndef READERTIME
94#if HZ > 1000
95#define READERTIME 2
96#else
97#define READERTIME 10
98#endif
99#endif
100
101/**
102 * turn long device array index into device pointer
103 */
104#define LONG2DEVPTR(ndx) (z90crypt.device_p[(ndx)])
105
106/**
107 * turn short device array index into long device array index
108 */
109#define SHRT2LONG(ndx) (z90crypt.overall_device_x.device_index[(ndx)])
110
111/**
112 * turn short device array index into device pointer
113 */
114#define SHRT2DEVPTR(ndx) LONG2DEVPTR(SHRT2LONG(ndx))
115
116/**
117 * Status for a work-element
118 */
119#define STAT_DEFAULT 0x00 // request has not been processed
120
121#define STAT_ROUTED 0x80 // bit 7: requests get routed to specific device
122 // else, device is determined each write
123#define STAT_FAILED 0x40 // bit 6: this bit is set if the request failed
124 // before being sent to the hardware.
125#define STAT_WRITTEN 0x30 // bits 5-4: work to be done, not sent to device
126// 0x20 // UNUSED state
127#define STAT_READPEND 0x10 // bits 5-4: work done, we're returning data now
128#define STAT_NOWORK 0x00 // bits off: no work on any queue
129#define STAT_RDWRMASK 0x30 // mask for bits 5-4
130
131/**
132 * Macros to check the status RDWRMASK
133 */
134#define CHK_RDWRMASK(statbyte) ((statbyte) & STAT_RDWRMASK)
135#define SET_RDWRMASK(statbyte, newval) \
136 {(statbyte) &= ~STAT_RDWRMASK; (statbyte) |= newval;}
137
138/**
139 * Audit Trail. Progress of a Work element
140 * audit[0]: Unless noted otherwise, these bits are all set by the process
141 */
142#define FP_COPYFROM 0x80 // Caller's buffer has been copied to work element
143#define FP_BUFFREQ 0x40 // Low Level buffer requested
144#define FP_BUFFGOT 0x20 // Low Level buffer obtained
145#define FP_SENT 0x10 // Work element sent to a crypto device
146 // (may be set by process or by reader task)
147#define FP_PENDING 0x08 // Work element placed on pending queue
148 // (may be set by process or by reader task)
149#define FP_REQUEST 0x04 // Work element placed on request queue
150#define FP_ASLEEP 0x02 // Work element about to sleep
151#define FP_AWAKE 0x01 // Work element has been awakened
152
153/**
154 * audit[1]: These bits are set by the reader task and/or the cleanup task
155 */
156#define FP_NOTPENDING 0x80 // Work element removed from pending queue
157#define FP_AWAKENING 0x40 // Caller about to be awakened
158#define FP_TIMEDOUT 0x20 // Caller timed out
159#define FP_RESPSIZESET 0x10 // Response size copied to work element
160#define FP_RESPADDRCOPIED 0x08 // Response address copied to work element
161#define FP_RESPBUFFCOPIED 0x04 // Response buffer copied to work element
162#define FP_REMREQUEST 0x02 // Work element removed from request queue
163#define FP_SIGNALED 0x01 // Work element was awakened by a signal
164
165/**
166 * audit[2]: unused
167 */
168
169/**
170 * state of the file handle in private_data.status
171 */
172#define STAT_OPEN 0
173#define STAT_CLOSED 1
174
175/**
176 * PID() expands to the process ID of the current process
177 */
178#define PID() (current->pid)
179
180/**
181 * Selected Constants. The number of APs and the number of devices
182 */
183#ifndef Z90CRYPT_NUM_APS
184#define Z90CRYPT_NUM_APS 64
185#endif
186#ifndef Z90CRYPT_NUM_DEVS
187#define Z90CRYPT_NUM_DEVS Z90CRYPT_NUM_APS
188#endif
189
190/**
191 * Buffer size for receiving responses. The maximum Response Size
192 * is actually the maximum request size, since in an error condition
193 * the request itself may be returned unchanged.
194 */
195#define MAX_RESPONSE_SIZE 0x0000077C
196
197/**
198 * A count and status-byte mask
199 */
200struct status {
201 int st_count; // # of enabled devices
202 int disabled_count; // # of disabled devices
203 int user_disabled_count; // # of devices disabled via proc fs
204 unsigned char st_mask[Z90CRYPT_NUM_APS]; // current status mask
205};
206
207/**
208 * The array of device indexes is a mechanism for fast indexing into
209 * a long (and sparse) array. For instance, if APs 3, 9 and 47 are
210 * installed, z90CDeviceIndex[0] is 3, z90CDeviceIndex[1] is 9, and
211 * z90CDeviceIndex[2] is 47.
212 */
213struct device_x {
214 int device_index[Z90CRYPT_NUM_DEVS];
215};
216
217/**
218 * All devices are arranged in a single array: 64 APs
219 */
220struct device {
221 int dev_type; // PCICA, PCICC, PCIXCC_MCL2,
222 // PCIXCC_MCL3, CEX2C, CEX2A
223 enum devstat dev_stat; // current device status
224 int dev_self_x; // Index in array
225 int disabled; // Set when device is in error
226 int user_disabled; // Set when device is disabled by user
227 int dev_q_depth; // q depth
228 unsigned char * dev_resp_p; // Response buffer address
229 int dev_resp_l; // Response Buffer length
230 int dev_caller_count; // Number of callers
231 int dev_total_req_cnt; // # requests for device since load
232 struct list_head dev_caller_list; // List of callers
233};
234
235/**
236 * There's a struct status and a struct device_x for each device type.
237 */
238struct hdware_block {
239 struct status hdware_mask;
240 struct status type_mask[Z90CRYPT_NUM_TYPES];
241 struct device_x type_x_addr[Z90CRYPT_NUM_TYPES];
242 unsigned char device_type_array[Z90CRYPT_NUM_APS];
243};
244
245/**
246 * z90crypt is the topmost data structure in the hierarchy.
247 */
248struct z90crypt {
249 int max_count; // Nr of possible crypto devices
250 struct status mask;
251 int q_depth_array[Z90CRYPT_NUM_DEVS];
252 int dev_type_array[Z90CRYPT_NUM_DEVS];
253 struct device_x overall_device_x; // array device indexes
254 struct device * device_p[Z90CRYPT_NUM_DEVS];
255 int terminating;
256 int domain_established;// TRUE: domain has been found
257 int cdx; // Crypto Domain Index
258 int len; // Length of this data structure
259 struct hdware_block *hdware_info;
260};
261
262/**
263 * An array of these structures is pointed to from dev_caller
264 * The length of the array depends on the device type. For APs,
265 * there are 8.
266 *
267 * The caller buffer is allocated to the user at OPEN. At WRITE,
268 * it contains the request; at READ, the response. The function
269 * send_to_crypto_device converts the request to device-dependent
270 * form and use the caller's OPEN-allocated buffer for the response.
271 *
272 * For the contents of caller_dev_dep_req and caller_dev_dep_req_p
273 * because that points to it, see the discussion in z90hardware.c.
274 * Search for "extended request message block".
275 */
276struct caller {
277 int caller_buf_l; // length of original request
278 unsigned char * caller_buf_p; // Original request on WRITE
279 int caller_dev_dep_req_l; // len device dependent request
280 unsigned char * caller_dev_dep_req_p; // Device dependent form
281 unsigned char caller_id[8]; // caller-supplied message id
282 struct list_head caller_liste;
283 unsigned char caller_dev_dep_req[MAX_RESPONSE_SIZE];
284};
285
286/**
287 * Function prototypes from z90hardware.c
288 */
289enum hdstat query_online(int deviceNr, int cdx, int resetNr, int *q_depth,
290 int *dev_type);
291enum devstat reset_device(int deviceNr, int cdx, int resetNr);
292enum devstat send_to_AP(int dev_nr, int cdx, int msg_len, unsigned char *msg_ext);
293enum devstat receive_from_AP(int dev_nr, int cdx, int resplen,
294 unsigned char *resp, unsigned char *psmid);
295int convert_request(unsigned char *buffer, int func, unsigned short function,
296 int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p);
297int convert_response(unsigned char *response, unsigned char *buffer,
298 int *respbufflen_p, unsigned char *resp_buff);
299
300/**
301 * Low level function prototypes
302 */
303static int create_z90crypt(int *cdx_p);
304static int refresh_z90crypt(int *cdx_p);
305static int find_crypto_devices(struct status *deviceMask);
306static int create_crypto_device(int index);
307static int destroy_crypto_device(int index);
308static void destroy_z90crypt(void);
309static int refresh_index_array(struct status *status_str,
310 struct device_x *index_array);
311static int probe_device_type(struct device *devPtr);
312static int probe_PCIXCC_type(struct device *devPtr);
313
314/**
315 * proc fs definitions
316 */
317static struct proc_dir_entry *z90crypt_entry;
318
319/**
320 * data structures
321 */
322
323/**
324 * work_element.opener points back to this structure
325 */
326struct priv_data {
327 pid_t opener_pid;
328 unsigned char status; // 0: open 1: closed
329};
330
331/**
332 * A work element is allocated for each request
333 */
334struct work_element {
335 struct priv_data *priv_data;
336 pid_t pid;
337 int devindex; // index of device processing this w_e
338 // (If request did not specify device,
339 // -1 until placed onto a queue)
340 int devtype;
341 struct list_head liste; // used for requestq and pendingq
342 char buffer[128]; // local copy of user request
343 int buff_size; // size of the buffer for the request
344 char resp_buff[RESPBUFFSIZE];
345 int resp_buff_size;
346 char __user * resp_addr; // address of response in user space
347 unsigned int funccode; // function code of request
348 wait_queue_head_t waitq;
349 unsigned long requestsent; // time at which the request was sent
350 atomic_t alarmrung; // wake-up signal
351 unsigned char caller_id[8]; // pid + counter, for this w_e
352 unsigned char status[1]; // bits to mark status of the request
353 unsigned char audit[3]; // record of work element's progress
354 unsigned char * requestptr; // address of request buffer
355 int retcode; // return code of request
356};
357
358/**
359 * High level function prototypes
360 */
361static int z90crypt_open(struct inode *, struct file *);
362static int z90crypt_release(struct inode *, struct file *);
363static ssize_t z90crypt_read(struct file *, char __user *, size_t, loff_t *);
364static ssize_t z90crypt_write(struct file *, const char __user *,
365 size_t, loff_t *);
366static long z90crypt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
367static long z90crypt_compat_ioctl(struct file *, unsigned int, unsigned long);
368
369static void z90crypt_reader_task(unsigned long);
370static void z90crypt_schedule_reader_task(unsigned long);
371static void z90crypt_config_task(unsigned long);
372static void z90crypt_cleanup_task(unsigned long);
373
374static int z90crypt_status(char *, char **, off_t, int, int *, void *);
375static int z90crypt_status_write(struct file *, const char __user *,
376 unsigned long, void *);
377
378/**
379 * Storage allocated at initialization and used throughout the life of
380 * this insmod
381 */
382static int domain = DOMAIN_INDEX;
383static struct z90crypt z90crypt;
384static int quiesce_z90crypt;
385static spinlock_t queuespinlock;
386static struct list_head request_list;
387static int requestq_count;
388static struct list_head pending_list;
389static int pendingq_count;
390
391static struct tasklet_struct reader_tasklet;
392static struct timer_list reader_timer;
393static struct timer_list config_timer;
394static struct timer_list cleanup_timer;
395static atomic_t total_open;
396static atomic_t z90crypt_step;
397
398static struct file_operations z90crypt_fops = {
399 .owner = THIS_MODULE,
400 .read = z90crypt_read,
401 .write = z90crypt_write,
402 .unlocked_ioctl = z90crypt_unlocked_ioctl,
403#ifdef CONFIG_COMPAT
404 .compat_ioctl = z90crypt_compat_ioctl,
405#endif
406 .open = z90crypt_open,
407 .release = z90crypt_release
408};
409
410static struct miscdevice z90crypt_misc_device = {
411 .minor = Z90CRYPT_MINOR,
412 .name = DEV_NAME,
413 .fops = &z90crypt_fops,
414};
415
416/**
417 * Documentation values.
418 */
419MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman"
420 "and Jochen Roehrig");
421MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, "
422 "Copyright 2001, 2005 IBM Corporation");
423MODULE_LICENSE("GPL");
424module_param(domain, int, 0);
425MODULE_PARM_DESC(domain, "domain index for device");
426
427#ifdef CONFIG_COMPAT
428/**
429 * ioctl32 conversion routines
430 */
431struct ica_rsa_modexpo_32 { // For 32-bit callers
432 compat_uptr_t inputdata;
433 unsigned int inputdatalength;
434 compat_uptr_t outputdata;
435 unsigned int outputdatalength;
436 compat_uptr_t b_key;
437 compat_uptr_t n_modulus;
438};
439
440static long
441trans_modexpo32(struct file *filp, unsigned int cmd, unsigned long arg)
442{
443 struct ica_rsa_modexpo_32 __user *mex32u = compat_ptr(arg);
444 struct ica_rsa_modexpo_32 mex32k;
445 struct ica_rsa_modexpo __user *mex64;
446 long ret = 0;
447 unsigned int i;
448
449 if (!access_ok(VERIFY_WRITE, mex32u, sizeof(struct ica_rsa_modexpo_32)))
450 return -EFAULT;
451 mex64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo));
452 if (!access_ok(VERIFY_WRITE, mex64, sizeof(struct ica_rsa_modexpo)))
453 return -EFAULT;
454 if (copy_from_user(&mex32k, mex32u, sizeof(struct ica_rsa_modexpo_32)))
455 return -EFAULT;
456 if (__put_user(compat_ptr(mex32k.inputdata), &mex64->inputdata) ||
457 __put_user(mex32k.inputdatalength, &mex64->inputdatalength) ||
458 __put_user(compat_ptr(mex32k.outputdata), &mex64->outputdata) ||
459 __put_user(mex32k.outputdatalength, &mex64->outputdatalength) ||
460 __put_user(compat_ptr(mex32k.b_key), &mex64->b_key) ||
461 __put_user(compat_ptr(mex32k.n_modulus), &mex64->n_modulus))
462 return -EFAULT;
463 ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)mex64);
464 if (!ret)
465 if (__get_user(i, &mex64->outputdatalength) ||
466 __put_user(i, &mex32u->outputdatalength))
467 ret = -EFAULT;
468 return ret;
469}
470
471struct ica_rsa_modexpo_crt_32 { // For 32-bit callers
472 compat_uptr_t inputdata;
473 unsigned int inputdatalength;
474 compat_uptr_t outputdata;
475 unsigned int outputdatalength;
476 compat_uptr_t bp_key;
477 compat_uptr_t bq_key;
478 compat_uptr_t np_prime;
479 compat_uptr_t nq_prime;
480 compat_uptr_t u_mult_inv;
481};
482
483static long
484trans_modexpo_crt32(struct file *filp, unsigned int cmd, unsigned long arg)
485{
486 struct ica_rsa_modexpo_crt_32 __user *crt32u = compat_ptr(arg);
487 struct ica_rsa_modexpo_crt_32 crt32k;
488 struct ica_rsa_modexpo_crt __user *crt64;
489 long ret = 0;
490 unsigned int i;
491
492 if (!access_ok(VERIFY_WRITE, crt32u,
493 sizeof(struct ica_rsa_modexpo_crt_32)))
494 return -EFAULT;
495 crt64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo_crt));
496 if (!access_ok(VERIFY_WRITE, crt64, sizeof(struct ica_rsa_modexpo_crt)))
497 return -EFAULT;
498 if (copy_from_user(&crt32k, crt32u,
499 sizeof(struct ica_rsa_modexpo_crt_32)))
500 return -EFAULT;
501 if (__put_user(compat_ptr(crt32k.inputdata), &crt64->inputdata) ||
502 __put_user(crt32k.inputdatalength, &crt64->inputdatalength) ||
503 __put_user(compat_ptr(crt32k.outputdata), &crt64->outputdata) ||
504 __put_user(crt32k.outputdatalength, &crt64->outputdatalength) ||
505 __put_user(compat_ptr(crt32k.bp_key), &crt64->bp_key) ||
506 __put_user(compat_ptr(crt32k.bq_key), &crt64->bq_key) ||
507 __put_user(compat_ptr(crt32k.np_prime), &crt64->np_prime) ||
508 __put_user(compat_ptr(crt32k.nq_prime), &crt64->nq_prime) ||
509 __put_user(compat_ptr(crt32k.u_mult_inv), &crt64->u_mult_inv))
510 return -EFAULT;
511 ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)crt64);
512 if (!ret)
513 if (__get_user(i, &crt64->outputdatalength) ||
514 __put_user(i, &crt32u->outputdatalength))
515 ret = -EFAULT;
516 return ret;
517}
518
519static long
520z90crypt_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
521{
522 switch (cmd) {
523 case ICAZ90STATUS:
524 case Z90QUIESCE:
525 case Z90STAT_TOTALCOUNT:
526 case Z90STAT_PCICACOUNT:
527 case Z90STAT_PCICCCOUNT:
528 case Z90STAT_PCIXCCCOUNT:
529 case Z90STAT_PCIXCCMCL2COUNT:
530 case Z90STAT_PCIXCCMCL3COUNT:
531 case Z90STAT_CEX2CCOUNT:
532 case Z90STAT_REQUESTQ_COUNT:
533 case Z90STAT_PENDINGQ_COUNT:
534 case Z90STAT_TOTALOPEN_COUNT:
535 case Z90STAT_DOMAIN_INDEX:
536 case Z90STAT_STATUS_MASK:
537 case Z90STAT_QDEPTH_MASK:
538 case Z90STAT_PERDEV_REQCNT:
539 return z90crypt_unlocked_ioctl(filp, cmd, arg);
540 case ICARSAMODEXPO:
541 return trans_modexpo32(filp, cmd, arg);
542 case ICARSACRT:
543 return trans_modexpo_crt32(filp, cmd, arg);
544 default:
545 return -ENOIOCTLCMD;
546 }
547}
548#endif
549
550/**
551 * The module initialization code.
552 */
553static int __init
554z90crypt_init_module(void)
555{
556 int result, nresult;
557 struct proc_dir_entry *entry;
558
559 PDEBUG("PID %d\n", PID());
560
561 if ((domain < -1) || (domain > 15)) {
562 PRINTKW("Invalid param: domain = %d. Not loading.\n", domain);
563 return -EINVAL;
564 }
565
566 /* Register as misc device with given minor (or get a dynamic one). */
567 result = misc_register(&z90crypt_misc_device);
568 if (result < 0) {
569 PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n",
570 z90crypt_misc_device.minor, result);
571 return result;
572 }
573
574 PDEBUG("Registered " DEV_NAME " with result %d\n", result);
575
576 result = create_z90crypt(&domain);
577 if (result != 0) {
578 PRINTKW("create_z90crypt (domain index %d) failed with %d.\n",
579 domain, result);
580 result = -ENOMEM;
581 goto init_module_cleanup;
582 }
583
584 if (result == 0) {
585 PRINTKN("Version %d.%d.%d loaded, built on %s %s\n",
586 z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT,
587 __DATE__, __TIME__);
588 PDEBUG("create_z90crypt (domain index %d) successful.\n",
589 domain);
590 } else
591 PRINTK("No devices at startup\n");
592
593 /* Initialize globals. */
594 spin_lock_init(&queuespinlock);
595
596 INIT_LIST_HEAD(&pending_list);
597 pendingq_count = 0;
598
599 INIT_LIST_HEAD(&request_list);
600 requestq_count = 0;
601
602 quiesce_z90crypt = 0;
603
604 atomic_set(&total_open, 0);
605 atomic_set(&z90crypt_step, 0);
606
607 /* Set up the cleanup task. */
608 init_timer(&cleanup_timer);
609 cleanup_timer.function = z90crypt_cleanup_task;
610 cleanup_timer.data = 0;
611 cleanup_timer.expires = jiffies + (CLEANUPTIME * HZ);
612 add_timer(&cleanup_timer);
613
614 /* Set up the proc file system */
615 entry = create_proc_entry("driver/z90crypt", 0644, 0);
616 if (entry) {
617 entry->nlink = 1;
618 entry->data = 0;
619 entry->read_proc = z90crypt_status;
620 entry->write_proc = z90crypt_status_write;
621 }
622 else
623 PRINTK("Couldn't create z90crypt proc entry\n");
624 z90crypt_entry = entry;
625
626 /* Set up the configuration task. */
627 init_timer(&config_timer);
628 config_timer.function = z90crypt_config_task;
629 config_timer.data = 0;
630 config_timer.expires = jiffies + (INITIAL_CONFIGTIME * HZ);
631 add_timer(&config_timer);
632
633 /* Set up the reader task */
634 tasklet_init(&reader_tasklet, z90crypt_reader_task, 0);
635 init_timer(&reader_timer);
636 reader_timer.function = z90crypt_schedule_reader_task;
637 reader_timer.data = 0;
638 reader_timer.expires = jiffies + (READERTIME * HZ / 1000);
639 add_timer(&reader_timer);
640
641 return 0; // success
642
643init_module_cleanup:
644 if ((nresult = misc_deregister(&z90crypt_misc_device)))
645 PRINTK("misc_deregister failed with %d.\n", nresult);
646 else
647 PDEBUG("misc_deregister successful.\n");
648
649 return result; // failure
650}
651
652/**
653 * The module termination code
654 */
655static void __exit
656z90crypt_cleanup_module(void)
657{
658 int nresult;
659
660 PDEBUG("PID %d\n", PID());
661
662 remove_proc_entry("driver/z90crypt", 0);
663
664 if ((nresult = misc_deregister(&z90crypt_misc_device)))
665 PRINTK("misc_deregister failed with %d.\n", nresult);
666 else
667 PDEBUG("misc_deregister successful.\n");
668
669 /* Remove the tasks */
670 tasklet_kill(&reader_tasklet);
671 del_timer(&reader_timer);
672 del_timer(&config_timer);
673 del_timer(&cleanup_timer);
674
675 destroy_z90crypt();
676
677 PRINTKN("Unloaded.\n");
678}
679
680/**
681 * Functions running under a process id
682 *
683 * The I/O functions:
684 * z90crypt_open
685 * z90crypt_release
686 * z90crypt_read
687 * z90crypt_write
688 * z90crypt_unlocked_ioctl
689 * z90crypt_status
690 * z90crypt_status_write
691 * disable_card
692 * enable_card
693 *
694 * Helper functions:
695 * z90crypt_rsa
696 * z90crypt_prepare
697 * z90crypt_send
698 * z90crypt_process_results
699 *
700 */
701static int
702z90crypt_open(struct inode *inode, struct file *filp)
703{
704 struct priv_data *private_data_p;
705
706 if (quiesce_z90crypt)
707 return -EQUIESCE;
708
709 private_data_p = kzalloc(sizeof(struct priv_data), GFP_KERNEL);
710 if (!private_data_p) {
711 PRINTK("Memory allocate failed\n");
712 return -ENOMEM;
713 }
714
715 private_data_p->status = STAT_OPEN;
716 private_data_p->opener_pid = PID();
717 filp->private_data = private_data_p;
718 atomic_inc(&total_open);
719
720 return 0;
721}
722
723static int
724z90crypt_release(struct inode *inode, struct file *filp)
725{
726 struct priv_data *private_data_p = filp->private_data;
727
728 PDEBUG("PID %d (filp %p)\n", PID(), filp);
729
730 private_data_p->status = STAT_CLOSED;
731 memset(private_data_p, 0, sizeof(struct priv_data));
732 kfree(private_data_p);
733 atomic_dec(&total_open);
734
735 return 0;
736}
737
738/*
739 * there are two read functions, of which compile options will choose one
740 * without USE_GET_RANDOM_BYTES
741 * => read() always returns -EPERM;
742 * otherwise
743 * => read() uses get_random_bytes() kernel function
744 */
745#ifndef USE_GET_RANDOM_BYTES
746/**
747 * z90crypt_read will not be supported beyond z90crypt 1.3.1
748 */
749static ssize_t
750z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
751{
752 PDEBUG("filp %p (PID %d)\n", filp, PID());
753 return -EPERM;
754}
755#else // we want to use get_random_bytes
756/**
757 * read() just returns a string of random bytes. Since we have no way
758 * to generate these cryptographically, we just execute get_random_bytes
759 * for the length specified.
760 */
761#include <linux/random.h>
762static ssize_t
763z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
764{
765 unsigned char *temp_buff;
766
767 PDEBUG("filp %p (PID %d)\n", filp, PID());
768
769 if (quiesce_z90crypt)
770 return -EQUIESCE;
771 if (count < 0) {
772 PRINTK("Requested random byte count negative: %ld\n", count);
773 return -EINVAL;
774 }
775 if (count > RESPBUFFSIZE) {
776 PDEBUG("count[%d] > RESPBUFFSIZE", count);
777 return -EINVAL;
778 }
779 if (count == 0)
780 return 0;
781 temp_buff = kmalloc(RESPBUFFSIZE, GFP_KERNEL);
782 if (!temp_buff) {
783 PRINTK("Memory allocate failed\n");
784 return -ENOMEM;
785 }
786 get_random_bytes(temp_buff, count);
787
788 if (copy_to_user(buf, temp_buff, count) != 0) {
789 kfree(temp_buff);
790 return -EFAULT;
791 }
792 kfree(temp_buff);
793 return count;
794}
795#endif
796
797/**
798 * Write is is not allowed
799 */
800static ssize_t
801z90crypt_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
802{
803 PDEBUG("filp %p (PID %d)\n", filp, PID());
804 return -EPERM;
805}
806
807/**
808 * New status functions
809 */
810static inline int
811get_status_totalcount(void)
812{
813 return z90crypt.hdware_info->hdware_mask.st_count;
814}
815
816static inline int
817get_status_PCICAcount(void)
818{
819 return z90crypt.hdware_info->type_mask[PCICA].st_count;
820}
821
822static inline int
823get_status_PCICCcount(void)
824{
825 return z90crypt.hdware_info->type_mask[PCICC].st_count;
826}
827
828static inline int
829get_status_PCIXCCcount(void)
830{
831 return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count +
832 z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
833}
834
835static inline int
836get_status_PCIXCCMCL2count(void)
837{
838 return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count;
839}
840
841static inline int
842get_status_PCIXCCMCL3count(void)
843{
844 return z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
845}
846
847static inline int
848get_status_CEX2Ccount(void)
849{
850 return z90crypt.hdware_info->type_mask[CEX2C].st_count;
851}
852
853static inline int
854get_status_CEX2Acount(void)
855{
856 return z90crypt.hdware_info->type_mask[CEX2A].st_count;
857}
858
859static inline int
860get_status_requestq_count(void)
861{
862 return requestq_count;
863}
864
865static inline int
866get_status_pendingq_count(void)
867{
868 return pendingq_count;
869}
870
871static inline int
872get_status_totalopen_count(void)
873{
874 return atomic_read(&total_open);
875}
876
877static inline int
878get_status_domain_index(void)
879{
880 return z90crypt.cdx;
881}
882
883static inline unsigned char *
884get_status_status_mask(unsigned char status[Z90CRYPT_NUM_APS])
885{
886 int i, ix;
887
888 memcpy(status, z90crypt.hdware_info->device_type_array,
889 Z90CRYPT_NUM_APS);
890
891 for (i = 0; i < get_status_totalcount(); i++) {
892 ix = SHRT2LONG(i);
893 if (LONG2DEVPTR(ix)->user_disabled)
894 status[ix] = 0x0d;
895 }
896
897 return status;
898}
899
900static inline unsigned char *
901get_status_qdepth_mask(unsigned char qdepth[Z90CRYPT_NUM_APS])
902{
903 int i, ix;
904
905 memset(qdepth, 0, Z90CRYPT_NUM_APS);
906
907 for (i = 0; i < get_status_totalcount(); i++) {
908 ix = SHRT2LONG(i);
909 qdepth[ix] = LONG2DEVPTR(ix)->dev_caller_count;
910 }
911
912 return qdepth;
913}
914
915static inline unsigned int *
916get_status_perdevice_reqcnt(unsigned int reqcnt[Z90CRYPT_NUM_APS])
917{
918 int i, ix;
919
920 memset(reqcnt, 0, Z90CRYPT_NUM_APS * sizeof(int));
921
922 for (i = 0; i < get_status_totalcount(); i++) {
923 ix = SHRT2LONG(i);
924 reqcnt[ix] = LONG2DEVPTR(ix)->dev_total_req_cnt;
925 }
926
927 return reqcnt;
928}
929
930static inline void
931init_work_element(struct work_element *we_p,
932 struct priv_data *priv_data, pid_t pid)
933{
934 int step;
935
936 we_p->requestptr = (unsigned char *)we_p + sizeof(struct work_element);
937 /* Come up with a unique id for this caller. */
938 step = atomic_inc_return(&z90crypt_step);
939 memcpy(we_p->caller_id+0, (void *) &pid, sizeof(pid));
940 memcpy(we_p->caller_id+4, (void *) &step, sizeof(step));
941 we_p->pid = pid;
942 we_p->priv_data = priv_data;
943 we_p->status[0] = STAT_DEFAULT;
944 we_p->audit[0] = 0x00;
945 we_p->audit[1] = 0x00;
946 we_p->audit[2] = 0x00;
947 we_p->resp_buff_size = 0;
948 we_p->retcode = 0;
949 we_p->devindex = -1;
950 we_p->devtype = -1;
951 atomic_set(&we_p->alarmrung, 0);
952 init_waitqueue_head(&we_p->waitq);
953 INIT_LIST_HEAD(&(we_p->liste));
954}
955
956static inline int
957allocate_work_element(struct work_element **we_pp,
958 struct priv_data *priv_data_p, pid_t pid)
959{
960 struct work_element *we_p;
961
962 we_p = (struct work_element *) get_zeroed_page(GFP_KERNEL);
963 if (!we_p)
964 return -ENOMEM;
965 init_work_element(we_p, priv_data_p, pid);
966 *we_pp = we_p;
967 return 0;
968}
969
970static inline void
971remove_device(struct device *device_p)
972{
973 if (!device_p || (device_p->disabled != 0))
974 return;
975 device_p->disabled = 1;
976 z90crypt.hdware_info->type_mask[device_p->dev_type].disabled_count++;
977 z90crypt.hdware_info->hdware_mask.disabled_count++;
978}
979
980/**
981 * Bitlength limits for each card
982 *
983 * There are new MCLs which allow more bitlengths. See the table for details.
984 * The MCL must be applied and the newer bitlengths enabled for these to work.
985 *
986 * Card Type Old limit New limit
987 * PCICA ??-2048 same (the lower limit is less than 128 bit...)
988 * PCICC 512-1024 512-2048
989 * PCIXCC_MCL2 512-2048 ----- (applying any GA LIC will make an MCL3 card)
990 * PCIXCC_MCL3 ----- 128-2048
991 * CEX2C 512-2048 128-2048
992 * CEX2A ??-2048 same (the lower limit is less than 128 bit...)
993 *
994 * ext_bitlens (extended bitlengths) is a global, since you should not apply an
995 * MCL to just one card in a machine. We assume, at first, that all cards have
996 * these capabilities.
997 */
998int ext_bitlens = 1; // This is global
999#define PCIXCC_MIN_MOD_SIZE 16 // 128 bits
1000#define OLD_PCIXCC_MIN_MOD_SIZE 64 // 512 bits
1001#define PCICC_MIN_MOD_SIZE 64 // 512 bits
1002#define OLD_PCICC_MAX_MOD_SIZE 128 // 1024 bits
1003#define MAX_MOD_SIZE 256 // 2048 bits
1004
1005static inline int
1006select_device_type(int *dev_type_p, int bytelength)
1007{
1008 static int count = 0;
1009 int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, CEX2A_avail,
1010 index_to_use;
1011 struct status *stat;
1012 if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) &&
1013 (*dev_type_p != PCIXCC_MCL2) && (*dev_type_p != PCIXCC_MCL3) &&
1014 (*dev_type_p != CEX2C) && (*dev_type_p != CEX2A) &&
1015 (*dev_type_p != ANYDEV))
1016 return -1;
1017 if (*dev_type_p != ANYDEV) {
1018 stat = &z90crypt.hdware_info->type_mask[*dev_type_p];
1019 if (stat->st_count >
1020 (stat->disabled_count + stat->user_disabled_count))
1021 return 0;
1022 return -1;
1023 }
1024
1025 /**
1026 * Assumption: PCICA, PCIXCC_MCL3, CEX2C, and CEX2A are all similar in
1027 * speed.
1028 *
1029 * PCICA and CEX2A do NOT co-exist, so it would be either one or the
1030 * other present.
1031 */
1032 stat = &z90crypt.hdware_info->type_mask[PCICA];
1033 PCICA_avail = stat->st_count -
1034 (stat->disabled_count + stat->user_disabled_count);
1035 stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL3];
1036 PCIXCC_MCL3_avail = stat->st_count -
1037 (stat->disabled_count + stat->user_disabled_count);
1038 stat = &z90crypt.hdware_info->type_mask[CEX2C];
1039 CEX2C_avail = stat->st_count -
1040 (stat->disabled_count + stat->user_disabled_count);
1041 stat = &z90crypt.hdware_info->type_mask[CEX2A];
1042 CEX2A_avail = stat->st_count -
1043 (stat->disabled_count + stat->user_disabled_count);
1044 if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail || CEX2A_avail) {
1045 /**
1046 * bitlength is a factor, PCICA or CEX2A are the most capable,
1047 * even with the new MCL for PCIXCC.
1048 */
1049 if ((bytelength < PCIXCC_MIN_MOD_SIZE) ||
1050 (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) {
1051 if (PCICA_avail) {
1052 *dev_type_p = PCICA;
1053 return 0;
1054 }
1055 if (CEX2A_avail) {
1056 *dev_type_p = CEX2A;
1057 return 0;
1058 }
1059 return -1;
1060 }
1061
1062 index_to_use = count % (PCICA_avail + PCIXCC_MCL3_avail +
1063 CEX2C_avail + CEX2A_avail);
1064 if (index_to_use < PCICA_avail)
1065 *dev_type_p = PCICA;
1066 else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail))
1067 *dev_type_p = PCIXCC_MCL3;
1068 else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail +
1069 CEX2C_avail))
1070 *dev_type_p = CEX2C;
1071 else
1072 *dev_type_p = CEX2A;
1073 count++;
1074 return 0;
1075 }
1076
1077 /* Less than OLD_PCIXCC_MIN_MOD_SIZE cannot go to a PCIXCC_MCL2 */
1078 if (bytelength < OLD_PCIXCC_MIN_MOD_SIZE)
1079 return -1;
1080 stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL2];
1081 if (stat->st_count >
1082 (stat->disabled_count + stat->user_disabled_count)) {
1083 *dev_type_p = PCIXCC_MCL2;
1084 return 0;
1085 }
1086
1087 /**
1088 * Less than PCICC_MIN_MOD_SIZE or more than OLD_PCICC_MAX_MOD_SIZE
1089 * (if we don't have the MCL applied and the newer bitlengths enabled)
1090 * cannot go to a PCICC
1091 */
1092 if ((bytelength < PCICC_MIN_MOD_SIZE) ||
1093 (!ext_bitlens && (bytelength > OLD_PCICC_MAX_MOD_SIZE))) {
1094 return -1;
1095 }
1096 stat = &z90crypt.hdware_info->type_mask[PCICC];
1097 if (stat->st_count >
1098 (stat->disabled_count + stat->user_disabled_count)) {
1099 *dev_type_p = PCICC;
1100 return 0;
1101 }
1102
1103 return -1;
1104}
1105
1106/**
1107 * Try the selected number, then the selected type (can be ANYDEV)
1108 */
1109static inline int
1110select_device(int *dev_type_p, int *device_nr_p, int bytelength)
1111{
1112 int i, indx, devTp, low_count, low_indx;
1113 struct device_x *index_p;
1114 struct device *dev_ptr;
1115
1116 PDEBUG("device type = %d, index = %d\n", *dev_type_p, *device_nr_p);
1117 if ((*device_nr_p >= 0) && (*device_nr_p < Z90CRYPT_NUM_DEVS)) {
1118 PDEBUG("trying index = %d\n", *device_nr_p);
1119 dev_ptr = z90crypt.device_p[*device_nr_p];
1120
1121 if (dev_ptr &&
1122 (dev_ptr->dev_stat != DEV_GONE) &&
1123 (dev_ptr->disabled == 0) &&
1124 (dev_ptr->user_disabled == 0)) {
1125 PDEBUG("selected by number, index = %d\n",
1126 *device_nr_p);
1127 *dev_type_p = dev_ptr->dev_type;
1128 return *device_nr_p;
1129 }
1130 }
1131 *device_nr_p = -1;
1132 PDEBUG("trying type = %d\n", *dev_type_p);
1133 devTp = *dev_type_p;
1134 if (select_device_type(&devTp, bytelength) == -1) {
1135 PDEBUG("failed to select by type\n");
1136 return -1;
1137 }
1138 PDEBUG("selected type = %d\n", devTp);
1139 index_p = &z90crypt.hdware_info->type_x_addr[devTp];
1140 low_count = 0x0000FFFF;
1141 low_indx = -1;
1142 for (i = 0; i < z90crypt.hdware_info->type_mask[devTp].st_count; i++) {
1143 indx = index_p->device_index[i];
1144 dev_ptr = z90crypt.device_p[indx];
1145 if (dev_ptr &&
1146 (dev_ptr->dev_stat != DEV_GONE) &&
1147 (dev_ptr->disabled == 0) &&
1148 (dev_ptr->user_disabled == 0) &&
1149 (devTp == dev_ptr->dev_type) &&
1150 (low_count > dev_ptr->dev_caller_count)) {
1151 low_count = dev_ptr->dev_caller_count;
1152 low_indx = indx;
1153 }
1154 }
1155 *device_nr_p = low_indx;
1156 return low_indx;
1157}
1158
1159static inline int
1160send_to_crypto_device(struct work_element *we_p)
1161{
1162 struct caller *caller_p;
1163 struct device *device_p;
1164 int dev_nr;
1165 int bytelen = ((struct ica_rsa_modexpo *)we_p->buffer)->inputdatalength;
1166
1167 if (!we_p->requestptr)
1168 return SEN_FATAL_ERROR;
1169 caller_p = (struct caller *)we_p->requestptr;
1170 dev_nr = we_p->devindex;
1171 if (select_device(&we_p->devtype, &dev_nr, bytelen) == -1) {
1172 if (z90crypt.hdware_info->hdware_mask.st_count != 0)
1173 return SEN_RETRY;
1174 else
1175 return SEN_NOT_AVAIL;
1176 }
1177 we_p->devindex = dev_nr;
1178 device_p = z90crypt.device_p[dev_nr];
1179 if (!device_p)
1180 return SEN_NOT_AVAIL;
1181 if (device_p->dev_type != we_p->devtype)
1182 return SEN_RETRY;
1183 if (device_p->dev_caller_count >= device_p->dev_q_depth)
1184 return SEN_QUEUE_FULL;
1185 PDEBUG("device number prior to send: %d\n", dev_nr);
1186 switch (send_to_AP(dev_nr, z90crypt.cdx,
1187 caller_p->caller_dev_dep_req_l,
1188 caller_p->caller_dev_dep_req_p)) {
1189 case DEV_SEN_EXCEPTION:
1190 PRINTKC("Exception during send to device %d\n", dev_nr);
1191 z90crypt.terminating = 1;
1192 return SEN_FATAL_ERROR;
1193 case DEV_GONE:
1194 PRINTK("Device %d not available\n", dev_nr);
1195 remove_device(device_p);
1196 return SEN_NOT_AVAIL;
1197 case DEV_EMPTY:
1198 return SEN_NOT_AVAIL;
1199 case DEV_NO_WORK:
1200 return SEN_FATAL_ERROR;
1201 case DEV_BAD_MESSAGE:
1202 return SEN_USER_ERROR;
1203 case DEV_QUEUE_FULL:
1204 return SEN_QUEUE_FULL;
1205 default:
1206 case DEV_ONLINE:
1207 break;
1208 }
1209 list_add_tail(&(caller_p->caller_liste), &(device_p->dev_caller_list));
1210 device_p->dev_caller_count++;
1211 return 0;
1212}
1213
1214/**
1215 * Send puts the user's work on one of two queues:
1216 * the pending queue if the send was successful
1217 * the request queue if the send failed because device full or busy
1218 */
1219static inline int
1220z90crypt_send(struct work_element *we_p, const char *buf)
1221{
1222 int rv;
1223
1224 PDEBUG("PID %d\n", PID());
1225
1226 if (CHK_RDWRMASK(we_p->status[0]) != STAT_NOWORK) {
1227 PDEBUG("PID %d tried to send more work but has outstanding "
1228 "work.\n", PID());
1229 return -EWORKPEND;
1230 }
1231 we_p->devindex = -1; // Reset device number
1232 spin_lock_irq(&queuespinlock);
1233 rv = send_to_crypto_device(we_p);
1234 switch (rv) {
1235 case 0:
1236 we_p->requestsent = jiffies;
1237 we_p->audit[0] |= FP_SENT;
1238 list_add_tail(&we_p->liste, &pending_list);
1239 ++pendingq_count;
1240 we_p->audit[0] |= FP_PENDING;
1241 break;
1242 case SEN_BUSY:
1243 case SEN_QUEUE_FULL:
1244 rv = 0;
1245 we_p->devindex = -1; // any device will do
1246 we_p->requestsent = jiffies;
1247 list_add_tail(&we_p->liste, &request_list);
1248 ++requestq_count;
1249 we_p->audit[0] |= FP_REQUEST;
1250 break;
1251 case SEN_RETRY:
1252 rv = -ERESTARTSYS;
1253 break;
1254 case SEN_NOT_AVAIL:
1255 PRINTK("*** No devices available.\n");
1256 rv = we_p->retcode = -ENODEV;
1257 we_p->status[0] |= STAT_FAILED;
1258 break;
1259 case REC_OPERAND_INV:
1260 case REC_OPERAND_SIZE:
1261 case REC_EVEN_MOD:
1262 case REC_INVALID_PAD:
1263 rv = we_p->retcode = -EINVAL;
1264 we_p->status[0] |= STAT_FAILED;
1265 break;
1266 default:
1267 we_p->retcode = rv;
1268 we_p->status[0] |= STAT_FAILED;
1269 break;
1270 }
1271 if (rv != -ERESTARTSYS)
1272 SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
1273 spin_unlock_irq(&queuespinlock);
1274 if (rv == 0)
1275 tasklet_schedule(&reader_tasklet);
1276 return rv;
1277}
1278
1279/**
1280 * process_results copies the user's work from kernel space.
1281 */
1282static inline int
1283z90crypt_process_results(struct work_element *we_p, char __user *buf)
1284{
1285 int rv;
1286
1287 PDEBUG("we_p %p (PID %d)\n", we_p, PID());
1288
1289 LONG2DEVPTR(we_p->devindex)->dev_total_req_cnt++;
1290 SET_RDWRMASK(we_p->status[0], STAT_READPEND);
1291
1292 rv = 0;
1293 if (!we_p->buffer) {
1294 PRINTK("we_p %p PID %d in STAT_READPEND: buffer NULL.\n",
1295 we_p, PID());
1296 rv = -ENOBUFF;
1297 }
1298
1299 if (!rv)
1300 if ((rv = copy_to_user(buf, we_p->buffer, we_p->buff_size))) {
1301 PDEBUG("copy_to_user failed: rv = %d\n", rv);
1302 rv = -EFAULT;
1303 }
1304
1305 if (!rv)
1306 rv = we_p->retcode;
1307 if (!rv)
1308 if (we_p->resp_buff_size
1309 && copy_to_user(we_p->resp_addr, we_p->resp_buff,
1310 we_p->resp_buff_size))
1311 rv = -EFAULT;
1312
1313 SET_RDWRMASK(we_p->status[0], STAT_NOWORK);
1314 return rv;
1315}
1316
1317static unsigned char NULL_psmid[8] =
1318{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1319
1320/**
1321 * Used in device configuration functions
1322 */
1323#define MAX_RESET 90
1324
1325/**
1326 * This is used only for PCICC support
1327 */
1328static inline int
1329is_PKCS11_padded(unsigned char *buffer, int length)
1330{
1331 int i;
1332 if ((buffer[0] != 0x00) || (buffer[1] != 0x01))
1333 return 0;
1334 for (i = 2; i < length; i++)
1335 if (buffer[i] != 0xFF)
1336 break;
1337 if ((i < 10) || (i == length))
1338 return 0;
1339 if (buffer[i] != 0x00)
1340 return 0;
1341 return 1;
1342}
1343
1344/**
1345 * This is used only for PCICC support
1346 */
1347static inline int
1348is_PKCS12_padded(unsigned char *buffer, int length)
1349{
1350 int i;
1351 if ((buffer[0] != 0x00) || (buffer[1] != 0x02))
1352 return 0;
1353 for (i = 2; i < length; i++)
1354 if (buffer[i] == 0x00)
1355 break;
1356 if ((i < 10) || (i == length))
1357 return 0;
1358 if (buffer[i] != 0x00)
1359 return 0;
1360 return 1;
1361}
1362
1363/**
1364 * builds struct caller and converts message from generic format to
1365 * device-dependent format
1366 * func is ICARSAMODEXPO or ICARSACRT
1367 * function is PCI_FUNC_KEY_ENCRYPT or PCI_FUNC_KEY_DECRYPT
1368 */
1369static inline int
1370build_caller(struct work_element *we_p, short function)
1371{
1372 int rv;
1373 struct caller *caller_p = (struct caller *)we_p->requestptr;
1374
1375 if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) &&
1376 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
1377 (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A))
1378 return SEN_NOT_AVAIL;
1379
1380 memcpy(caller_p->caller_id, we_p->caller_id,
1381 sizeof(caller_p->caller_id));
1382 caller_p->caller_dev_dep_req_p = caller_p->caller_dev_dep_req;
1383 caller_p->caller_dev_dep_req_l = MAX_RESPONSE_SIZE;
1384 caller_p->caller_buf_p = we_p->buffer;
1385 INIT_LIST_HEAD(&(caller_p->caller_liste));
1386
1387 rv = convert_request(we_p->buffer, we_p->funccode, function,
1388 z90crypt.cdx, we_p->devtype,
1389 &caller_p->caller_dev_dep_req_l,
1390 caller_p->caller_dev_dep_req_p);
1391 if (rv) {
1392 if (rv == SEN_NOT_AVAIL)
1393 PDEBUG("request can't be processed on hdwr avail\n");
1394 else
1395 PRINTK("Error from convert_request: %d\n", rv);
1396 }
1397 else
1398 memcpy(&(caller_p->caller_dev_dep_req_p[4]), we_p->caller_id,8);
1399 return rv;
1400}
1401
1402static inline void
1403unbuild_caller(struct device *device_p, struct caller *caller_p)
1404{
1405 if (!caller_p)
1406 return;
1407 if (caller_p->caller_liste.next && caller_p->caller_liste.prev)
1408 if (!list_empty(&caller_p->caller_liste)) {
1409 list_del_init(&caller_p->caller_liste);
1410 device_p->dev_caller_count--;
1411 }
1412 memset(caller_p->caller_id, 0, sizeof(caller_p->caller_id));
1413}
1414
1415static inline int
1416get_crypto_request_buffer(struct work_element *we_p)
1417{
1418 struct ica_rsa_modexpo *mex_p;
1419 struct ica_rsa_modexpo_crt *crt_p;
1420 unsigned char *temp_buffer;
1421 short function;
1422 int rv;
1423
1424 mex_p = (struct ica_rsa_modexpo *) we_p->buffer;
1425 crt_p = (struct ica_rsa_modexpo_crt *) we_p->buffer;
1426
1427 PDEBUG("device type input = %d\n", we_p->devtype);
1428
1429 if (z90crypt.terminating)
1430 return REC_NO_RESPONSE;
1431 if (memcmp(we_p->caller_id, NULL_psmid, 8) == 0) {
1432 PRINTK("psmid zeroes\n");
1433 return SEN_FATAL_ERROR;
1434 }
1435 if (!we_p->buffer) {
1436 PRINTK("buffer pointer NULL\n");
1437 return SEN_USER_ERROR;
1438 }
1439 if (!we_p->requestptr) {
1440 PRINTK("caller pointer NULL\n");
1441 return SEN_USER_ERROR;
1442 }
1443
1444 if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) &&
1445 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
1446 (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A) &&
1447 (we_p->devtype != ANYDEV)) {
1448 PRINTK("invalid device type\n");
1449 return SEN_USER_ERROR;
1450 }
1451
1452 if ((mex_p->inputdatalength < 1) ||
1453 (mex_p->inputdatalength > MAX_MOD_SIZE)) {
1454 PRINTK("inputdatalength[%d] is not valid\n",
1455 mex_p->inputdatalength);
1456 return SEN_USER_ERROR;
1457 }
1458
1459 if (mex_p->outputdatalength < mex_p->inputdatalength) {
1460 PRINTK("outputdatalength[%d] < inputdatalength[%d]\n",
1461 mex_p->outputdatalength, mex_p->inputdatalength);
1462 return SEN_USER_ERROR;
1463 }
1464
1465 if (!mex_p->inputdata || !mex_p->outputdata) {
1466 PRINTK("inputdata[%p] or outputdata[%p] is NULL\n",
1467 mex_p->outputdata, mex_p->inputdata);
1468 return SEN_USER_ERROR;
1469 }
1470
1471 /**
1472 * As long as outputdatalength is big enough, we can set the
1473 * outputdatalength equal to the inputdatalength, since that is the
1474 * number of bytes we will copy in any case
1475 */
1476 mex_p->outputdatalength = mex_p->inputdatalength;
1477
1478 rv = 0;
1479 switch (we_p->funccode) {
1480 case ICARSAMODEXPO:
1481 if (!mex_p->b_key || !mex_p->n_modulus)
1482 rv = SEN_USER_ERROR;
1483 break;
1484 case ICARSACRT:
1485 if (!IS_EVEN(crt_p->inputdatalength)) {
1486 PRINTK("inputdatalength[%d] is odd, CRT form\n",
1487 crt_p->inputdatalength);
1488 rv = SEN_USER_ERROR;
1489 break;
1490 }
1491 if (!crt_p->bp_key ||
1492 !crt_p->bq_key ||
1493 !crt_p->np_prime ||
1494 !crt_p->nq_prime ||
1495 !crt_p->u_mult_inv) {
1496 PRINTK("CRT form, bad data: %p/%p/%p/%p/%p\n",
1497 crt_p->bp_key, crt_p->bq_key,
1498 crt_p->np_prime, crt_p->nq_prime,
1499 crt_p->u_mult_inv);
1500 rv = SEN_USER_ERROR;
1501 }
1502 break;
1503 default:
1504 PRINTK("bad func = %d\n", we_p->funccode);
1505 rv = SEN_USER_ERROR;
1506 break;
1507 }
1508 if (rv != 0)
1509 return rv;
1510
1511 if (select_device_type(&we_p->devtype, mex_p->inputdatalength) < 0)
1512 return SEN_NOT_AVAIL;
1513
1514 temp_buffer = (unsigned char *)we_p + sizeof(struct work_element) +
1515 sizeof(struct caller);
1516 if (copy_from_user(temp_buffer, mex_p->inputdata,
1517 mex_p->inputdatalength) != 0)
1518 return SEN_RELEASED;
1519
1520 function = PCI_FUNC_KEY_ENCRYPT;
1521 switch (we_p->devtype) {
1522 /* PCICA and CEX2A do everything with a simple RSA mod-expo operation */
1523 case PCICA:
1524 case CEX2A:
1525 function = PCI_FUNC_KEY_ENCRYPT;
1526 break;
1527 /**
1528 * PCIXCC_MCL2 does all Mod-Expo form with a simple RSA mod-expo
1529 * operation, and all CRT forms with a PKCS-1.2 format decrypt.
1530 * PCIXCC_MCL3 and CEX2C do all Mod-Expo and CRT forms with a simple RSA
1531 * mod-expo operation
1532 */
1533 case PCIXCC_MCL2:
1534 if (we_p->funccode == ICARSAMODEXPO)
1535 function = PCI_FUNC_KEY_ENCRYPT;
1536 else
1537 function = PCI_FUNC_KEY_DECRYPT;
1538 break;
1539 case PCIXCC_MCL3:
1540 case CEX2C:
1541 if (we_p->funccode == ICARSAMODEXPO)
1542 function = PCI_FUNC_KEY_ENCRYPT;
1543 else
1544 function = PCI_FUNC_KEY_DECRYPT;
1545 break;
1546 /**
1547 * PCICC does everything as a PKCS-1.2 format request
1548 */
1549 case PCICC:
1550 /* PCICC cannot handle input that is is PKCS#1.1 padded */
1551 if (is_PKCS11_padded(temp_buffer, mex_p->inputdatalength)) {
1552 return SEN_NOT_AVAIL;
1553 }
1554 if (we_p->funccode == ICARSAMODEXPO) {
1555 if (is_PKCS12_padded(temp_buffer,
1556 mex_p->inputdatalength))
1557 function = PCI_FUNC_KEY_ENCRYPT;
1558 else
1559 function = PCI_FUNC_KEY_DECRYPT;
1560 } else
1561 /* all CRT forms are decrypts */
1562 function = PCI_FUNC_KEY_DECRYPT;
1563 break;
1564 }
1565 PDEBUG("function: %04x\n", function);
1566 rv = build_caller(we_p, function);
1567 PDEBUG("rv from build_caller = %d\n", rv);
1568 return rv;
1569}
1570
1571static inline int
1572z90crypt_prepare(struct work_element *we_p, unsigned int funccode,
1573 const char __user *buffer)
1574{
1575 int rv;
1576
1577 we_p->devindex = -1;
1578 if (funccode == ICARSAMODEXPO)
1579 we_p->buff_size = sizeof(struct ica_rsa_modexpo);
1580 else
1581 we_p->buff_size = sizeof(struct ica_rsa_modexpo_crt);
1582
1583 if (copy_from_user(we_p->buffer, buffer, we_p->buff_size))
1584 return -EFAULT;
1585
1586 we_p->audit[0] |= FP_COPYFROM;
1587 SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
1588 we_p->funccode = funccode;
1589 we_p->devtype = -1;
1590 we_p->audit[0] |= FP_BUFFREQ;
1591 rv = get_crypto_request_buffer(we_p);
1592 switch (rv) {
1593 case 0:
1594 we_p->audit[0] |= FP_BUFFGOT;
1595 break;
1596 case SEN_USER_ERROR:
1597 rv = -EINVAL;
1598 break;
1599 case SEN_QUEUE_FULL:
1600 rv = 0;
1601 break;
1602 case SEN_RELEASED:
1603 rv = -EFAULT;
1604 break;
1605 case REC_NO_RESPONSE:
1606 rv = -ENODEV;
1607 break;
1608 case SEN_NOT_AVAIL:
1609 case EGETBUFF:
1610 rv = -EGETBUFF;
1611 break;
1612 default:
1613 PRINTK("rv = %d\n", rv);
1614 rv = -EGETBUFF;
1615 break;
1616 }
1617 if (CHK_RDWRMASK(we_p->status[0]) == STAT_WRITTEN)
1618 SET_RDWRMASK(we_p->status[0], STAT_DEFAULT);
1619 return rv;
1620}
1621
1622static inline void
1623purge_work_element(struct work_element *we_p)
1624{
1625 struct list_head *lptr;
1626
1627 spin_lock_irq(&queuespinlock);
1628 list_for_each(lptr, &request_list) {
1629 if (lptr == &we_p->liste) {
1630 list_del_init(lptr);
1631 requestq_count--;
1632 break;
1633 }
1634 }
1635 list_for_each(lptr, &pending_list) {
1636 if (lptr == &we_p->liste) {
1637 list_del_init(lptr);
1638 pendingq_count--;
1639 break;
1640 }
1641 }
1642 spin_unlock_irq(&queuespinlock);
1643}
1644
1645/**
1646 * Build the request and send it.
1647 */
1648static inline int
1649z90crypt_rsa(struct priv_data *private_data_p, pid_t pid,
1650 unsigned int cmd, unsigned long arg)
1651{
1652 struct work_element *we_p;
1653 int rv;
1654
1655 if ((rv = allocate_work_element(&we_p, private_data_p, pid))) {
1656 PDEBUG("PID %d: allocate_work_element returned ENOMEM\n", pid);
1657 return rv;
1658 }
1659 if ((rv = z90crypt_prepare(we_p, cmd, (const char __user *)arg)))
1660 PDEBUG("PID %d: rv = %d from z90crypt_prepare\n", pid, rv);
1661 if (!rv)
1662 if ((rv = z90crypt_send(we_p, (const char *)arg)))
1663 PDEBUG("PID %d: rv %d from z90crypt_send.\n", pid, rv);
1664 if (!rv) {
1665 we_p->audit[0] |= FP_ASLEEP;
1666 wait_event(we_p->waitq, atomic_read(&we_p->alarmrung));
1667 we_p->audit[0] |= FP_AWAKE;
1668 rv = we_p->retcode;
1669 }
1670 if (!rv)
1671 rv = z90crypt_process_results(we_p, (char __user *)arg);
1672
1673 if ((we_p->status[0] & STAT_FAILED)) {
1674 switch (rv) {
1675 /**
1676 * EINVAL *after* receive is almost always a padding error or
1677 * length error issued by a coprocessor (not an accelerator).
1678 * We convert this return value to -EGETBUFF which should
1679 * trigger a fallback to software.
1680 */
1681 case -EINVAL:
1682 if ((we_p->devtype != PCICA) &&
1683 (we_p->devtype != CEX2A))
1684 rv = -EGETBUFF;
1685 break;
1686 case -ETIMEOUT:
1687 if (z90crypt.mask.st_count > 0)
1688 rv = -ERESTARTSYS; // retry with another
1689 else
1690 rv = -ENODEV; // no cards left
1691 /* fall through to clean up request queue */
1692 case -ERESTARTSYS:
1693 case -ERELEASED:
1694 switch (CHK_RDWRMASK(we_p->status[0])) {
1695 case STAT_WRITTEN:
1696 purge_work_element(we_p);
1697 break;
1698 case STAT_READPEND:
1699 case STAT_NOWORK:
1700 default:
1701 break;
1702 }
1703 break;
1704 default:
1705 we_p->status[0] ^= STAT_FAILED;
1706 break;
1707 }
1708 }
1709 free_page((long)we_p);
1710 return rv;
1711}
1712
1713/**
1714 * This function is a little long, but it's really just one large switch
1715 * statement.
1716 */
1717static long
1718z90crypt_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1719{
1720 struct priv_data *private_data_p = filp->private_data;
1721 unsigned char *status;
1722 unsigned char *qdepth;
1723 unsigned int *reqcnt;
1724 struct ica_z90_status *pstat;
1725 int ret, i, loopLim, tempstat;
1726 static int deprecated_msg_count1 = 0;
1727 static int deprecated_msg_count2 = 0;
1728
1729 PDEBUG("filp %p (PID %d), cmd 0x%08X\n", filp, PID(), cmd);
1730 PDEBUG("cmd 0x%08X: dir %s, size 0x%04X, type 0x%02X, nr 0x%02X\n",
1731 cmd,
1732 !_IOC_DIR(cmd) ? "NO"
1733 : ((_IOC_DIR(cmd) == (_IOC_READ|_IOC_WRITE)) ? "RW"
1734 : ((_IOC_DIR(cmd) == _IOC_READ) ? "RD"
1735 : "WR")),
1736 _IOC_SIZE(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd));
1737
1738 if (_IOC_TYPE(cmd) != Z90_IOCTL_MAGIC) {
1739 PRINTK("cmd 0x%08X contains bad magic\n", cmd);
1740 return -ENOTTY;
1741 }
1742
1743 ret = 0;
1744 switch (cmd) {
1745 case ICARSAMODEXPO:
1746 case ICARSACRT:
1747 if (quiesce_z90crypt) {
1748 ret = -EQUIESCE;
1749 break;
1750 }
1751 ret = -ENODEV; // Default if no devices
1752 loopLim = z90crypt.hdware_info->hdware_mask.st_count -
1753 (z90crypt.hdware_info->hdware_mask.disabled_count +
1754 z90crypt.hdware_info->hdware_mask.user_disabled_count);
1755 for (i = 0; i < loopLim; i++) {
1756 ret = z90crypt_rsa(private_data_p, PID(), cmd, arg);
1757 if (ret != -ERESTARTSYS)
1758 break;
1759 }
1760 if (ret == -ERESTARTSYS)
1761 ret = -ENODEV;
1762 break;
1763
1764 case Z90STAT_TOTALCOUNT:
1765 tempstat = get_status_totalcount();
1766 if (copy_to_user((int __user *)arg, &tempstat,sizeof(int)) != 0)
1767 ret = -EFAULT;
1768 break;
1769
1770 case Z90STAT_PCICACOUNT:
1771 tempstat = get_status_PCICAcount();
1772 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1773 ret = -EFAULT;
1774 break;
1775
1776 case Z90STAT_PCICCCOUNT:
1777 tempstat = get_status_PCICCcount();
1778 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1779 ret = -EFAULT;
1780 break;
1781
1782 case Z90STAT_PCIXCCMCL2COUNT:
1783 tempstat = get_status_PCIXCCMCL2count();
1784 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1785 ret = -EFAULT;
1786 break;
1787
1788 case Z90STAT_PCIXCCMCL3COUNT:
1789 tempstat = get_status_PCIXCCMCL3count();
1790 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1791 ret = -EFAULT;
1792 break;
1793
1794 case Z90STAT_CEX2CCOUNT:
1795 tempstat = get_status_CEX2Ccount();
1796 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1797 ret = -EFAULT;
1798 break;
1799
1800 case Z90STAT_CEX2ACOUNT:
1801 tempstat = get_status_CEX2Acount();
1802 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1803 ret = -EFAULT;
1804 break;
1805
1806 case Z90STAT_REQUESTQ_COUNT:
1807 tempstat = get_status_requestq_count();
1808 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1809 ret = -EFAULT;
1810 break;
1811
1812 case Z90STAT_PENDINGQ_COUNT:
1813 tempstat = get_status_pendingq_count();
1814 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1815 ret = -EFAULT;
1816 break;
1817
1818 case Z90STAT_TOTALOPEN_COUNT:
1819 tempstat = get_status_totalopen_count();
1820 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1821 ret = -EFAULT;
1822 break;
1823
1824 case Z90STAT_DOMAIN_INDEX:
1825 tempstat = get_status_domain_index();
1826 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1827 ret = -EFAULT;
1828 break;
1829
1830 case Z90STAT_STATUS_MASK:
1831 status = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
1832 if (!status) {
1833 PRINTK("kmalloc for status failed!\n");
1834 ret = -ENOMEM;
1835 break;
1836 }
1837 get_status_status_mask(status);
1838 if (copy_to_user((char __user *) arg, status, Z90CRYPT_NUM_APS)
1839 != 0)
1840 ret = -EFAULT;
1841 kfree(status);
1842 break;
1843
1844 case Z90STAT_QDEPTH_MASK:
1845 qdepth = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
1846 if (!qdepth) {
1847 PRINTK("kmalloc for qdepth failed!\n");
1848 ret = -ENOMEM;
1849 break;
1850 }
1851 get_status_qdepth_mask(qdepth);
1852 if (copy_to_user((char __user *) arg, qdepth, Z90CRYPT_NUM_APS) != 0)
1853 ret = -EFAULT;
1854 kfree(qdepth);
1855 break;
1856
1857 case Z90STAT_PERDEV_REQCNT:
1858 reqcnt = kmalloc(sizeof(int) * Z90CRYPT_NUM_APS, GFP_KERNEL);
1859 if (!reqcnt) {
1860 PRINTK("kmalloc for reqcnt failed!\n");
1861 ret = -ENOMEM;
1862 break;
1863 }
1864 get_status_perdevice_reqcnt(reqcnt);
1865 if (copy_to_user((char __user *) arg, reqcnt,
1866 Z90CRYPT_NUM_APS * sizeof(int)) != 0)
1867 ret = -EFAULT;
1868 kfree(reqcnt);
1869 break;
1870
1871 /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
1872 case ICAZ90STATUS:
1873 if (deprecated_msg_count1 < 20) {
1874 PRINTK("deprecated call to ioctl (ICAZ90STATUS)!\n");
1875 deprecated_msg_count1++;
1876 if (deprecated_msg_count1 == 20)
1877 PRINTK("No longer issuing messages related to "
1878 "deprecated call to ICAZ90STATUS.\n");
1879 }
1880
1881 pstat = kmalloc(sizeof(struct ica_z90_status), GFP_KERNEL);
1882 if (!pstat) {
1883 PRINTK("kmalloc for pstat failed!\n");
1884 ret = -ENOMEM;
1885 break;
1886 }
1887
1888 pstat->totalcount = get_status_totalcount();
1889 pstat->leedslitecount = get_status_PCICAcount();
1890 pstat->leeds2count = get_status_PCICCcount();
1891 pstat->requestqWaitCount = get_status_requestq_count();
1892 pstat->pendingqWaitCount = get_status_pendingq_count();
1893 pstat->totalOpenCount = get_status_totalopen_count();
1894 pstat->cryptoDomain = get_status_domain_index();
1895 get_status_status_mask(pstat->status);
1896 get_status_qdepth_mask(pstat->qdepth);
1897
1898 if (copy_to_user((struct ica_z90_status __user *) arg, pstat,
1899 sizeof(struct ica_z90_status)) != 0)
1900 ret = -EFAULT;
1901 kfree(pstat);
1902 break;
1903
1904 /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
1905 case Z90STAT_PCIXCCCOUNT:
1906 if (deprecated_msg_count2 < 20) {
1907 PRINTK("deprecated ioctl (Z90STAT_PCIXCCCOUNT)!\n");
1908 deprecated_msg_count2++;
1909 if (deprecated_msg_count2 == 20)
1910 PRINTK("No longer issuing messages about depre"
1911 "cated ioctl Z90STAT_PCIXCCCOUNT.\n");
1912 }
1913
1914 tempstat = get_status_PCIXCCcount();
1915 if (copy_to_user((int *)arg, &tempstat, sizeof(int)) != 0)
1916 ret = -EFAULT;
1917 break;
1918
1919 case Z90QUIESCE:
1920 if (current->euid != 0) {
1921 PRINTK("QUIESCE fails: euid %d\n",
1922 current->euid);
1923 ret = -EACCES;
1924 } else {
1925 PRINTK("QUIESCE device from PID %d\n", PID());
1926 quiesce_z90crypt = 1;
1927 }
1928 break;
1929
1930 default:
1931 /* user passed an invalid IOCTL number */
1932 PDEBUG("cmd 0x%08X contains invalid ioctl code\n", cmd);
1933 ret = -ENOTTY;
1934 break;
1935 }
1936
1937 return ret;
1938}
1939
1940static inline int
1941sprintcl(unsigned char *outaddr, unsigned char *addr, unsigned int len)
1942{
1943 int hl, i;
1944
1945 hl = 0;
1946 for (i = 0; i < len; i++)
1947 hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]);
1948 hl += sprintf(outaddr+hl, " ");
1949
1950 return hl;
1951}
1952
1953static inline int
1954sprintrw(unsigned char *outaddr, unsigned char *addr, unsigned int len)
1955{
1956 int hl, inl, c, cx;
1957
1958 hl = sprintf(outaddr, " ");
1959 inl = 0;
1960 for (c = 0; c < (len / 16); c++) {
1961 hl += sprintcl(outaddr+hl, addr+inl, 16);
1962 inl += 16;
1963 }
1964
1965 cx = len%16;
1966 if (cx) {
1967 hl += sprintcl(outaddr+hl, addr+inl, cx);
1968 inl += cx;
1969 }
1970
1971 hl += sprintf(outaddr+hl, "\n");
1972
1973 return hl;
1974}
1975
1976static inline int
1977sprinthx(unsigned char *title, unsigned char *outaddr,
1978 unsigned char *addr, unsigned int len)
1979{
1980 int hl, inl, r, rx;
1981
1982 hl = sprintf(outaddr, "\n%s\n", title);
1983 inl = 0;
1984 for (r = 0; r < (len / 64); r++) {
1985 hl += sprintrw(outaddr+hl, addr+inl, 64);
1986 inl += 64;
1987 }
1988 rx = len % 64;
1989 if (rx) {
1990 hl += sprintrw(outaddr+hl, addr+inl, rx);
1991 inl += rx;
1992 }
1993
1994 hl += sprintf(outaddr+hl, "\n");
1995
1996 return hl;
1997}
1998
1999static inline int
2000sprinthx4(unsigned char *title, unsigned char *outaddr,
2001 unsigned int *array, unsigned int len)
2002{
2003 int hl, r;
2004
2005 hl = sprintf(outaddr, "\n%s\n", title);
2006
2007 for (r = 0; r < len; r++) {
2008 if ((r % 8) == 0)
2009 hl += sprintf(outaddr+hl, " ");
2010 hl += sprintf(outaddr+hl, "%08X ", array[r]);
2011 if ((r % 8) == 7)
2012 hl += sprintf(outaddr+hl, "\n");
2013 }
2014
2015 hl += sprintf(outaddr+hl, "\n");
2016
2017 return hl;
2018}
2019
2020static int
2021z90crypt_status(char *resp_buff, char **start, off_t offset,
2022 int count, int *eof, void *data)
2023{
2024 unsigned char *workarea;
2025 int len;
2026
2027 /* resp_buff is a page. Use the right half for a work area */
2028 workarea = resp_buff+2000;
2029 len = 0;
2030 len += sprintf(resp_buff+len, "\nz90crypt version: %d.%d.%d\n",
2031 z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT);
2032 len += sprintf(resp_buff+len, "Cryptographic domain: %d\n",
2033 get_status_domain_index());
2034 len += sprintf(resp_buff+len, "Total device count: %d\n",
2035 get_status_totalcount());
2036 len += sprintf(resp_buff+len, "PCICA count: %d\n",
2037 get_status_PCICAcount());
2038 len += sprintf(resp_buff+len, "PCICC count: %d\n",
2039 get_status_PCICCcount());
2040 len += sprintf(resp_buff+len, "PCIXCC MCL2 count: %d\n",
2041 get_status_PCIXCCMCL2count());
2042 len += sprintf(resp_buff+len, "PCIXCC MCL3 count: %d\n",
2043 get_status_PCIXCCMCL3count());
2044 len += sprintf(resp_buff+len, "CEX2C count: %d\n",
2045 get_status_CEX2Ccount());
2046 len += sprintf(resp_buff+len, "CEX2A count: %d\n",
2047 get_status_CEX2Acount());
2048 len += sprintf(resp_buff+len, "requestq count: %d\n",
2049 get_status_requestq_count());
2050 len += sprintf(resp_buff+len, "pendingq count: %d\n",
2051 get_status_pendingq_count());
2052 len += sprintf(resp_buff+len, "Total open handles: %d\n\n",
2053 get_status_totalopen_count());
2054 len += sprinthx(
2055 "Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
2056 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A",
2057 resp_buff+len,
2058 get_status_status_mask(workarea),
2059 Z90CRYPT_NUM_APS);
2060 len += sprinthx("Waiting work element counts",
2061 resp_buff+len,
2062 get_status_qdepth_mask(workarea),
2063 Z90CRYPT_NUM_APS);
2064 len += sprinthx4(
2065 "Per-device successfully completed request counts",
2066 resp_buff+len,
2067 get_status_perdevice_reqcnt((unsigned int *)workarea),
2068 Z90CRYPT_NUM_APS);
2069 *eof = 1;
2070 memset(workarea, 0, Z90CRYPT_NUM_APS * sizeof(unsigned int));
2071 return len;
2072}
2073
2074static inline void
2075disable_card(int card_index)
2076{
2077 struct device *devp;
2078
2079 devp = LONG2DEVPTR(card_index);
2080 if (!devp || devp->user_disabled)
2081 return;
2082 devp->user_disabled = 1;
2083 z90crypt.hdware_info->hdware_mask.user_disabled_count++;
2084 if (devp->dev_type == -1)
2085 return;
2086 z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count++;
2087}
2088
2089static inline void
2090enable_card(int card_index)
2091{
2092 struct device *devp;
2093
2094 devp = LONG2DEVPTR(card_index);
2095 if (!devp || !devp->user_disabled)
2096 return;
2097 devp->user_disabled = 0;
2098 z90crypt.hdware_info->hdware_mask.user_disabled_count--;
2099 if (devp->dev_type == -1)
2100 return;
2101 z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count--;
2102}
2103
2104static int
2105z90crypt_status_write(struct file *file, const char __user *buffer,
2106 unsigned long count, void *data)
2107{
2108 int j, eol;
2109 unsigned char *lbuf, *ptr;
2110 unsigned int local_count;
2111
2112#define LBUFSIZE 1200
2113 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
2114 if (!lbuf) {
2115 PRINTK("kmalloc failed!\n");
2116 return 0;
2117 }
2118
2119 if (count <= 0)
2120 return 0;
2121
2122 local_count = UMIN((unsigned int)count, LBUFSIZE-1);
2123
2124 if (copy_from_user(lbuf, buffer, local_count) != 0) {
2125 kfree(lbuf);
2126 return -EFAULT;
2127 }
2128
2129 lbuf[local_count] = '\0';
2130
2131 ptr = strstr(lbuf, "Online devices");
2132 if (ptr == 0) {
2133 PRINTK("Unable to parse data (missing \"Online devices\")\n");
2134 kfree(lbuf);
2135 return count;
2136 }
2137
2138 ptr = strstr(ptr, "\n");
2139 if (ptr == 0) {
2140 PRINTK("Unable to parse data (missing newline after \"Online devices\")\n");
2141 kfree(lbuf);
2142 return count;
2143 }
2144 ptr++;
2145
2146 if (strstr(ptr, "Waiting work element counts") == NULL) {
2147 PRINTK("Unable to parse data (missing \"Waiting work element counts\")\n");
2148 kfree(lbuf);
2149 return count;
2150 }
2151
2152 j = 0;
2153 eol = 0;
2154 while ((j < 64) && (*ptr != '\0')) {
2155 switch (*ptr) {
2156 case '\t':
2157 case ' ':
2158 break;
2159 case '\n':
2160 default:
2161 eol = 1;
2162 break;
2163 case '0': // no device
2164 case '1': // PCICA
2165 case '2': // PCICC
2166 case '3': // PCIXCC_MCL2
2167 case '4': // PCIXCC_MCL3
2168 case '5': // CEX2C
2169 case '6': // CEX2A
2170 j++;
2171 break;
2172 case 'd':
2173 case 'D':
2174 disable_card(j);
2175 j++;
2176 break;
2177 case 'e':
2178 case 'E':
2179 enable_card(j);
2180 j++;
2181 break;
2182 }
2183 if (eol)
2184 break;
2185 ptr++;
2186 }
2187
2188 kfree(lbuf);
2189 return count;
2190}
2191
2192/**
2193 * Functions that run under a timer, with no process id
2194 *
2195 * The task functions:
2196 * z90crypt_reader_task
2197 * helper_send_work
2198 * helper_handle_work_element
2199 * helper_receive_rc
2200 * z90crypt_config_task
2201 * z90crypt_cleanup_task
2202 *
2203 * Helper functions:
2204 * z90crypt_schedule_reader_timer
2205 * z90crypt_schedule_reader_task
2206 * z90crypt_schedule_config_task
2207 * z90crypt_schedule_cleanup_task
2208 */
2209static inline int
2210receive_from_crypto_device(int index, unsigned char *psmid, int *buff_len_p,
2211 unsigned char *buff, unsigned char __user **dest_p_p)
2212{
2213 int dv, rv;
2214 struct device *dev_ptr;
2215 struct caller *caller_p;
2216 struct ica_rsa_modexpo *icaMsg_p;
2217 struct list_head *ptr, *tptr;
2218
2219 memcpy(psmid, NULL_psmid, sizeof(NULL_psmid));
2220
2221 if (z90crypt.terminating)
2222 return REC_FATAL_ERROR;
2223
2224 caller_p = 0;
2225 dev_ptr = z90crypt.device_p[index];
2226 rv = 0;
2227 do {
2228 if (!dev_ptr || dev_ptr->disabled) {
2229 rv = REC_NO_WORK; // a disabled device can't return work
2230 break;
2231 }
2232 if (dev_ptr->dev_self_x != index) {
2233 PRINTKC("Corrupt dev ptr\n");
2234 z90crypt.terminating = 1;
2235 rv = REC_FATAL_ERROR;
2236 break;
2237 }
2238 if (!dev_ptr->dev_resp_l || !dev_ptr->dev_resp_p) {
2239 dv = DEV_REC_EXCEPTION;
2240 PRINTK("dev_resp_l = %d, dev_resp_p = %p\n",
2241 dev_ptr->dev_resp_l, dev_ptr->dev_resp_p);
2242 } else {
2243 PDEBUG("Dequeue called for device %d\n", index);
2244 dv = receive_from_AP(index, z90crypt.cdx,
2245 dev_ptr->dev_resp_l,
2246 dev_ptr->dev_resp_p, psmid);
2247 }
2248 switch (dv) {
2249 case DEV_REC_EXCEPTION:
2250 rv = REC_FATAL_ERROR;
2251 z90crypt.terminating = 1;
2252 PRINTKC("Exception in receive from device %d\n",
2253 index);
2254 break;
2255 case DEV_ONLINE:
2256 rv = 0;
2257 break;
2258 case DEV_EMPTY:
2259 rv = REC_EMPTY;
2260 break;
2261 case DEV_NO_WORK:
2262 rv = REC_NO_WORK;
2263 break;
2264 case DEV_BAD_MESSAGE:
2265 case DEV_GONE:
2266 case REC_HARDWAR_ERR:
2267 default:
2268 rv = REC_NO_RESPONSE;
2269 break;
2270 }
2271 if (rv)
2272 break;
2273 if (dev_ptr->dev_caller_count <= 0) {
2274 rv = REC_USER_GONE;
2275 break;
2276 }
2277
2278 list_for_each_safe(ptr, tptr, &dev_ptr->dev_caller_list) {
2279 caller_p = list_entry(ptr, struct caller, caller_liste);
2280 if (!memcmp(caller_p->caller_id, psmid,
2281 sizeof(caller_p->caller_id))) {
2282 if (!list_empty(&caller_p->caller_liste)) {
2283 list_del_init(ptr);
2284 dev_ptr->dev_caller_count--;
2285 break;
2286 }
2287 }
2288 caller_p = 0;
2289 }
2290 if (!caller_p) {
2291 PRINTKW("Unable to locate PSMID %02X%02X%02X%02X%02X"
2292 "%02X%02X%02X in device list\n",
2293 psmid[0], psmid[1], psmid[2], psmid[3],
2294 psmid[4], psmid[5], psmid[6], psmid[7]);
2295 rv = REC_USER_GONE;
2296 break;
2297 }
2298
2299 PDEBUG("caller_p after successful receive: %p\n", caller_p);
2300 rv = convert_response(dev_ptr->dev_resp_p,
2301 caller_p->caller_buf_p, buff_len_p, buff);
2302 switch (rv) {
2303 case REC_USE_PCICA:
2304 break;
2305 case REC_OPERAND_INV:
2306 case REC_OPERAND_SIZE:
2307 case REC_EVEN_MOD:
2308 case REC_INVALID_PAD:
2309 PDEBUG("device %d: 'user error' %d\n", index, rv);
2310 break;
2311 case WRONG_DEVICE_TYPE:
2312 case REC_HARDWAR_ERR:
2313 case REC_BAD_MESSAGE:
2314 PRINTKW("device %d: hardware error %d\n", index, rv);
2315 rv = REC_NO_RESPONSE;
2316 break;
2317 default:
2318 PDEBUG("device %d: rv = %d\n", index, rv);
2319 break;
2320 }
2321 } while (0);
2322
2323 switch (rv) {
2324 case 0:
2325 PDEBUG("Successful receive from device %d\n", index);
2326 icaMsg_p = (struct ica_rsa_modexpo *)caller_p->caller_buf_p;
2327 *dest_p_p = icaMsg_p->outputdata;
2328 if (*buff_len_p == 0)
2329 PRINTK("Zero *buff_len_p\n");
2330 break;
2331 case REC_NO_RESPONSE:
2332 PRINTKW("Removing device %d from availability\n", index);
2333 remove_device(dev_ptr);
2334 break;
2335 }
2336
2337 if (caller_p)
2338 unbuild_caller(dev_ptr, caller_p);
2339
2340 return rv;
2341}
2342
2343static inline void
2344helper_send_work(int index)
2345{
2346 struct work_element *rq_p;
2347 int rv;
2348
2349 if (list_empty(&request_list))
2350 return;
2351 requestq_count--;
2352 rq_p = list_entry(request_list.next, struct work_element, liste);
2353 list_del_init(&rq_p->liste);
2354 rq_p->audit[1] |= FP_REMREQUEST;
2355 if (rq_p->devtype == SHRT2DEVPTR(index)->dev_type) {
2356 rq_p->devindex = SHRT2LONG(index);
2357 rv = send_to_crypto_device(rq_p);
2358 if (rv == 0) {
2359 rq_p->requestsent = jiffies;
2360 rq_p->audit[0] |= FP_SENT;
2361 list_add_tail(&rq_p->liste, &pending_list);
2362 ++pendingq_count;
2363 rq_p->audit[0] |= FP_PENDING;
2364 } else {
2365 switch (rv) {
2366 case REC_OPERAND_INV:
2367 case REC_OPERAND_SIZE:
2368 case REC_EVEN_MOD:
2369 case REC_INVALID_PAD:
2370 rq_p->retcode = -EINVAL;
2371 break;
2372 case SEN_NOT_AVAIL:
2373 case SEN_RETRY:
2374 case REC_NO_RESPONSE:
2375 default:
2376 if (z90crypt.mask.st_count > 1)
2377 rq_p->retcode =
2378 -ERESTARTSYS;
2379 else
2380 rq_p->retcode = -ENODEV;
2381 break;
2382 }
2383 rq_p->status[0] |= STAT_FAILED;
2384 rq_p->audit[1] |= FP_AWAKENING;
2385 atomic_set(&rq_p->alarmrung, 1);
2386 wake_up(&rq_p->waitq);
2387 }
2388 } else {
2389 if (z90crypt.mask.st_count > 1)
2390 rq_p->retcode = -ERESTARTSYS;
2391 else
2392 rq_p->retcode = -ENODEV;
2393 rq_p->status[0] |= STAT_FAILED;
2394 rq_p->audit[1] |= FP_AWAKENING;
2395 atomic_set(&rq_p->alarmrung, 1);
2396 wake_up(&rq_p->waitq);
2397 }
2398}
2399
2400static inline void
2401helper_handle_work_element(int index, unsigned char psmid[8], int rc,
2402 int buff_len, unsigned char *buff,
2403 unsigned char __user *resp_addr)
2404{
2405 struct work_element *pq_p;
2406 struct list_head *lptr, *tptr;
2407
2408 pq_p = 0;
2409 list_for_each_safe(lptr, tptr, &pending_list) {
2410 pq_p = list_entry(lptr, struct work_element, liste);
2411 if (!memcmp(pq_p->caller_id, psmid, sizeof(pq_p->caller_id))) {
2412 list_del_init(lptr);
2413 pendingq_count--;
2414 pq_p->audit[1] |= FP_NOTPENDING;
2415 break;
2416 }
2417 pq_p = 0;
2418 }
2419
2420 if (!pq_p) {
2421 PRINTK("device %d has work but no caller exists on pending Q\n",
2422 SHRT2LONG(index));
2423 return;
2424 }
2425
2426 switch (rc) {
2427 case 0:
2428 pq_p->resp_buff_size = buff_len;
2429 pq_p->audit[1] |= FP_RESPSIZESET;
2430 if (buff_len) {
2431 pq_p->resp_addr = resp_addr;
2432 pq_p->audit[1] |= FP_RESPADDRCOPIED;
2433 memcpy(pq_p->resp_buff, buff, buff_len);
2434 pq_p->audit[1] |= FP_RESPBUFFCOPIED;
2435 }
2436 break;
2437 case REC_OPERAND_INV:
2438 case REC_OPERAND_SIZE:
2439 case REC_EVEN_MOD:
2440 case REC_INVALID_PAD:
2441 PDEBUG("-EINVAL after application error %d\n", rc);
2442 pq_p->retcode = -EINVAL;
2443 pq_p->status[0] |= STAT_FAILED;
2444 break;
2445 case REC_USE_PCICA:
2446 pq_p->retcode = -ERESTARTSYS;
2447 pq_p->status[0] |= STAT_FAILED;
2448 break;
2449 case REC_NO_RESPONSE:
2450 default:
2451 if (z90crypt.mask.st_count > 1)
2452 pq_p->retcode = -ERESTARTSYS;
2453 else
2454 pq_p->retcode = -ENODEV;
2455 pq_p->status[0] |= STAT_FAILED;
2456 break;
2457 }
2458 if ((pq_p->status[0] != STAT_FAILED) || (pq_p->retcode != -ERELEASED)) {
2459 pq_p->audit[1] |= FP_AWAKENING;
2460 atomic_set(&pq_p->alarmrung, 1);
2461 wake_up(&pq_p->waitq);
2462 }
2463}
2464
2465/**
2466 * return TRUE if the work element should be removed from the queue
2467 */
2468static inline int
2469helper_receive_rc(int index, int *rc_p)
2470{
2471 switch (*rc_p) {
2472 case 0:
2473 case REC_OPERAND_INV:
2474 case REC_OPERAND_SIZE:
2475 case REC_EVEN_MOD:
2476 case REC_INVALID_PAD:
2477 case REC_USE_PCICA:
2478 break;
2479
2480 case REC_BUSY:
2481 case REC_NO_WORK:
2482 case REC_EMPTY:
2483 case REC_RETRY_DEV:
2484 case REC_FATAL_ERROR:
2485 return 0;
2486
2487 case REC_NO_RESPONSE:
2488 break;
2489
2490 default:
2491 PRINTK("rc %d, device %d converted to REC_NO_RESPONSE\n",
2492 *rc_p, SHRT2LONG(index));
2493 *rc_p = REC_NO_RESPONSE;
2494 break;
2495 }
2496 return 1;
2497}
2498
2499static inline void
2500z90crypt_schedule_reader_timer(void)
2501{
2502 if (timer_pending(&reader_timer))
2503 return;
2504 if (mod_timer(&reader_timer, jiffies+(READERTIME*HZ/1000)) != 0)
2505 PRINTK("Timer pending while modifying reader timer\n");
2506}
2507
2508static void
2509z90crypt_reader_task(unsigned long ptr)
2510{
2511 int workavail, index, rc, buff_len;
2512 unsigned char psmid[8];
2513 unsigned char __user *resp_addr;
2514 static unsigned char buff[1024];
2515
2516 /**
2517 * we use workavail = 2 to ensure 2 passes with nothing dequeued before
2518 * exiting the loop. If (pendingq_count+requestq_count) == 0 after the
2519 * loop, there is no work remaining on the queues.
2520 */
2521 resp_addr = 0;
2522 workavail = 2;
2523 buff_len = 0;
2524 while (workavail) {
2525 workavail--;
2526 rc = 0;
2527 spin_lock_irq(&queuespinlock);
2528 memset(buff, 0x00, sizeof(buff));
2529
2530 /* Dequeue once from each device in round robin. */
2531 for (index = 0; index < z90crypt.mask.st_count; index++) {
2532 PDEBUG("About to receive.\n");
2533 rc = receive_from_crypto_device(SHRT2LONG(index),
2534 psmid,
2535 &buff_len,
2536 buff,
2537 &resp_addr);
2538 PDEBUG("Dequeued: rc = %d.\n", rc);
2539
2540 if (helper_receive_rc(index, &rc)) {
2541 if (rc != REC_NO_RESPONSE) {
2542 helper_send_work(index);
2543 workavail = 2;
2544 }
2545
2546 helper_handle_work_element(index, psmid, rc,
2547 buff_len, buff,
2548 resp_addr);
2549 }
2550
2551 if (rc == REC_FATAL_ERROR)
2552 PRINTKW("REC_FATAL_ERROR from device %d!\n",
2553 SHRT2LONG(index));
2554 }
2555 spin_unlock_irq(&queuespinlock);
2556 }
2557
2558 if (pendingq_count + requestq_count)
2559 z90crypt_schedule_reader_timer();
2560}
2561
2562static inline void
2563z90crypt_schedule_config_task(unsigned int expiration)
2564{
2565 if (timer_pending(&config_timer))
2566 return;
2567 if (mod_timer(&config_timer, jiffies+(expiration*HZ)) != 0)
2568 PRINTK("Timer pending while modifying config timer\n");
2569}
2570
2571static void
2572z90crypt_config_task(unsigned long ptr)
2573{
2574 int rc;
2575
2576 PDEBUG("jiffies %ld\n", jiffies);
2577
2578 if ((rc = refresh_z90crypt(&z90crypt.cdx)))
2579 PRINTK("Error %d detected in refresh_z90crypt.\n", rc);
2580 /* If return was fatal, don't bother reconfiguring */
2581 if ((rc != TSQ_FATAL_ERROR) && (rc != RSQ_FATAL_ERROR))
2582 z90crypt_schedule_config_task(CONFIGTIME);
2583}
2584
2585static inline void
2586z90crypt_schedule_cleanup_task(void)
2587{
2588 if (timer_pending(&cleanup_timer))
2589 return;
2590 if (mod_timer(&cleanup_timer, jiffies+(CLEANUPTIME*HZ)) != 0)
2591 PRINTK("Timer pending while modifying cleanup timer\n");
2592}
2593
2594static inline void
2595helper_drain_queues(void)
2596{
2597 struct work_element *pq_p;
2598 struct list_head *lptr, *tptr;
2599
2600 list_for_each_safe(lptr, tptr, &pending_list) {
2601 pq_p = list_entry(lptr, struct work_element, liste);
2602 pq_p->retcode = -ENODEV;
2603 pq_p->status[0] |= STAT_FAILED;
2604 unbuild_caller(LONG2DEVPTR(pq_p->devindex),
2605 (struct caller *)pq_p->requestptr);
2606 list_del_init(lptr);
2607 pendingq_count--;
2608 pq_p->audit[1] |= FP_NOTPENDING;
2609 pq_p->audit[1] |= FP_AWAKENING;
2610 atomic_set(&pq_p->alarmrung, 1);
2611 wake_up(&pq_p->waitq);
2612 }
2613
2614 list_for_each_safe(lptr, tptr, &request_list) {
2615 pq_p = list_entry(lptr, struct work_element, liste);
2616 pq_p->retcode = -ENODEV;
2617 pq_p->status[0] |= STAT_FAILED;
2618 list_del_init(lptr);
2619 requestq_count--;
2620 pq_p->audit[1] |= FP_REMREQUEST;
2621 pq_p->audit[1] |= FP_AWAKENING;
2622 atomic_set(&pq_p->alarmrung, 1);
2623 wake_up(&pq_p->waitq);
2624 }
2625}
2626
2627static inline void
2628helper_timeout_requests(void)
2629{
2630 struct work_element *pq_p;
2631 struct list_head *lptr, *tptr;
2632 long timelimit;
2633
2634 timelimit = jiffies - (CLEANUPTIME * HZ);
2635 /* The list is in strict chronological order */
2636 list_for_each_safe(lptr, tptr, &pending_list) {
2637 pq_p = list_entry(lptr, struct work_element, liste);
2638 if (pq_p->requestsent >= timelimit)
2639 break;
2640 PRINTKW("Purging(PQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
2641 ((struct caller *)pq_p->requestptr)->caller_id[0],
2642 ((struct caller *)pq_p->requestptr)->caller_id[1],
2643 ((struct caller *)pq_p->requestptr)->caller_id[2],
2644 ((struct caller *)pq_p->requestptr)->caller_id[3],
2645 ((struct caller *)pq_p->requestptr)->caller_id[4],
2646 ((struct caller *)pq_p->requestptr)->caller_id[5],
2647 ((struct caller *)pq_p->requestptr)->caller_id[6],
2648 ((struct caller *)pq_p->requestptr)->caller_id[7]);
2649 pq_p->retcode = -ETIMEOUT;
2650 pq_p->status[0] |= STAT_FAILED;
2651 /* get this off any caller queue it may be on */
2652 unbuild_caller(LONG2DEVPTR(pq_p->devindex),
2653 (struct caller *) pq_p->requestptr);
2654 list_del_init(lptr);
2655 pendingq_count--;
2656 pq_p->audit[1] |= FP_TIMEDOUT;
2657 pq_p->audit[1] |= FP_NOTPENDING;
2658 pq_p->audit[1] |= FP_AWAKENING;
2659 atomic_set(&pq_p->alarmrung, 1);
2660 wake_up(&pq_p->waitq);
2661 }
2662
2663 /**
2664 * If pending count is zero, items left on the request queue may
2665 * never be processed.
2666 */
2667 if (pendingq_count <= 0) {
2668 list_for_each_safe(lptr, tptr, &request_list) {
2669 pq_p = list_entry(lptr, struct work_element, liste);
2670 if (pq_p->requestsent >= timelimit)
2671 break;
2672 PRINTKW("Purging(RQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
2673 ((struct caller *)pq_p->requestptr)->caller_id[0],
2674 ((struct caller *)pq_p->requestptr)->caller_id[1],
2675 ((struct caller *)pq_p->requestptr)->caller_id[2],
2676 ((struct caller *)pq_p->requestptr)->caller_id[3],
2677 ((struct caller *)pq_p->requestptr)->caller_id[4],
2678 ((struct caller *)pq_p->requestptr)->caller_id[5],
2679 ((struct caller *)pq_p->requestptr)->caller_id[6],
2680 ((struct caller *)pq_p->requestptr)->caller_id[7]);
2681 pq_p->retcode = -ETIMEOUT;
2682 pq_p->status[0] |= STAT_FAILED;
2683 list_del_init(lptr);
2684 requestq_count--;
2685 pq_p->audit[1] |= FP_TIMEDOUT;
2686 pq_p->audit[1] |= FP_REMREQUEST;
2687 pq_p->audit[1] |= FP_AWAKENING;
2688 atomic_set(&pq_p->alarmrung, 1);
2689 wake_up(&pq_p->waitq);
2690 }
2691 }
2692}
2693
2694static void
2695z90crypt_cleanup_task(unsigned long ptr)
2696{
2697 PDEBUG("jiffies %ld\n", jiffies);
2698 spin_lock_irq(&queuespinlock);
2699 if (z90crypt.mask.st_count <= 0) // no devices!
2700 helper_drain_queues();
2701 else
2702 helper_timeout_requests();
2703 spin_unlock_irq(&queuespinlock);
2704 z90crypt_schedule_cleanup_task();
2705}
2706
2707static void
2708z90crypt_schedule_reader_task(unsigned long ptr)
2709{
2710 tasklet_schedule(&reader_tasklet);
2711}
2712
2713/**
2714 * Lowlevel Functions:
2715 *
2716 * create_z90crypt: creates and initializes basic data structures
2717 * refresh_z90crypt: re-initializes basic data structures
2718 * find_crypto_devices: returns a count and mask of hardware status
2719 * create_crypto_device: builds the descriptor for a device
2720 * destroy_crypto_device: unallocates the descriptor for a device
2721 * destroy_z90crypt: drains all work, unallocates structs
2722 */
2723
2724/**
2725 * build the z90crypt root structure using the given domain index
2726 */
2727static int
2728create_z90crypt(int *cdx_p)
2729{
2730 struct hdware_block *hdware_blk_p;
2731
2732 memset(&z90crypt, 0x00, sizeof(struct z90crypt));
2733 z90crypt.domain_established = 0;
2734 z90crypt.len = sizeof(struct z90crypt);
2735 z90crypt.max_count = Z90CRYPT_NUM_DEVS;
2736 z90crypt.cdx = *cdx_p;
2737
2738 hdware_blk_p = kzalloc(sizeof(struct hdware_block), GFP_ATOMIC);
2739 if (!hdware_blk_p) {
2740 PDEBUG("kmalloc for hardware block failed\n");
2741 return ENOMEM;
2742 }
2743 z90crypt.hdware_info = hdware_blk_p;
2744
2745 return 0;
2746}
2747
2748static inline int
2749helper_scan_devices(int cdx_array[16], int *cdx_p, int *correct_cdx_found)
2750{
2751 enum hdstat hd_stat;
2752 int q_depth, dev_type;
2753 int indx, chkdom, numdomains;
2754
2755 q_depth = dev_type = numdomains = 0;
2756 for (chkdom = 0; chkdom <= 15; cdx_array[chkdom++] = -1);
2757 for (indx = 0; indx < z90crypt.max_count; indx++) {
2758 hd_stat = HD_NOT_THERE;
2759 numdomains = 0;
2760 for (chkdom = 0; chkdom <= 15; chkdom++) {
2761 hd_stat = query_online(indx, chkdom, MAX_RESET,
2762 &q_depth, &dev_type);
2763 if (hd_stat == HD_TSQ_EXCEPTION) {
2764 z90crypt.terminating = 1;
2765 PRINTKC("exception taken!\n");
2766 break;
2767 }
2768 if (hd_stat == HD_ONLINE) {
2769 cdx_array[numdomains++] = chkdom;
2770 if (*cdx_p == chkdom) {
2771 *correct_cdx_found = 1;
2772 break;
2773 }
2774 }
2775 }
2776 if ((*correct_cdx_found == 1) || (numdomains != 0))
2777 break;
2778 if (z90crypt.terminating)
2779 break;
2780 }
2781 return numdomains;
2782}
2783
2784static inline int
2785probe_crypto_domain(int *cdx_p)
2786{
2787 int cdx_array[16];
2788 char cdx_array_text[53], temp[5];
2789 int correct_cdx_found, numdomains;
2790
2791 correct_cdx_found = 0;
2792 numdomains = helper_scan_devices(cdx_array, cdx_p, &correct_cdx_found);
2793
2794 if (z90crypt.terminating)
2795 return TSQ_FATAL_ERROR;
2796
2797 if (correct_cdx_found)
2798 return 0;
2799
2800 if (numdomains == 0) {
2801 PRINTKW("Unable to find crypto domain: No devices found\n");
2802 return Z90C_NO_DEVICES;
2803 }
2804
2805 if (numdomains == 1) {
2806 if (*cdx_p == -1) {
2807 *cdx_p = cdx_array[0];
2808 return 0;
2809 }
2810 PRINTKW("incorrect domain: specified = %d, found = %d\n",
2811 *cdx_p, cdx_array[0]);
2812 return Z90C_INCORRECT_DOMAIN;
2813 }
2814
2815 numdomains--;
2816 sprintf(cdx_array_text, "%d", cdx_array[numdomains]);
2817 while (numdomains) {
2818 numdomains--;
2819 sprintf(temp, ", %d", cdx_array[numdomains]);
2820 strcat(cdx_array_text, temp);
2821 }
2822
2823 PRINTKW("ambiguous domain detected: specified = %d, found array = %s\n",
2824 *cdx_p, cdx_array_text);
2825 return Z90C_AMBIGUOUS_DOMAIN;
2826}
2827
2828static int
2829refresh_z90crypt(int *cdx_p)
2830{
2831 int i, j, indx, rv;
2832 static struct status local_mask;
2833 struct device *devPtr;
2834 unsigned char oldStat, newStat;
2835 int return_unchanged;
2836
2837 if (z90crypt.len != sizeof(z90crypt))
2838 return ENOTINIT;
2839 if (z90crypt.terminating)
2840 return TSQ_FATAL_ERROR;
2841 rv = 0;
2842 if (!z90crypt.hdware_info->hdware_mask.st_count &&
2843 !z90crypt.domain_established) {
2844 rv = probe_crypto_domain(cdx_p);
2845 if (z90crypt.terminating)
2846 return TSQ_FATAL_ERROR;
2847 if (rv == Z90C_NO_DEVICES)
2848 return 0; // try later
2849 if (rv)
2850 return rv;
2851 z90crypt.cdx = *cdx_p;
2852 z90crypt.domain_established = 1;
2853 }
2854 rv = find_crypto_devices(&local_mask);
2855 if (rv) {
2856 PRINTK("find crypto devices returned %d\n", rv);
2857 return rv;
2858 }
2859 if (!memcmp(&local_mask, &z90crypt.hdware_info->hdware_mask,
2860 sizeof(struct status))) {
2861 return_unchanged = 1;
2862 for (i = 0; i < Z90CRYPT_NUM_TYPES; i++) {
2863 /**
2864 * Check for disabled cards. If any device is marked
2865 * disabled, destroy it.
2866 */
2867 for (j = 0;
2868 j < z90crypt.hdware_info->type_mask[i].st_count;
2869 j++) {
2870 indx = z90crypt.hdware_info->type_x_addr[i].
2871 device_index[j];
2872 devPtr = z90crypt.device_p[indx];
2873 if (devPtr && devPtr->disabled) {
2874 local_mask.st_mask[indx] = HD_NOT_THERE;
2875 return_unchanged = 0;
2876 }
2877 }
2878 }
2879 if (return_unchanged == 1)
2880 return 0;
2881 }
2882
2883 spin_lock_irq(&queuespinlock);
2884 for (i = 0; i < z90crypt.max_count; i++) {
2885 oldStat = z90crypt.hdware_info->hdware_mask.st_mask[i];
2886 newStat = local_mask.st_mask[i];
2887 if ((oldStat == HD_ONLINE) && (newStat != HD_ONLINE))
2888 destroy_crypto_device(i);
2889 else if ((oldStat != HD_ONLINE) && (newStat == HD_ONLINE)) {
2890 rv = create_crypto_device(i);
2891 if (rv >= REC_FATAL_ERROR)
2892 return rv;
2893 if (rv != 0) {
2894 local_mask.st_mask[i] = HD_NOT_THERE;
2895 local_mask.st_count--;
2896 }
2897 }
2898 }
2899 memcpy(z90crypt.hdware_info->hdware_mask.st_mask, local_mask.st_mask,
2900 sizeof(local_mask.st_mask));
2901 z90crypt.hdware_info->hdware_mask.st_count = local_mask.st_count;
2902 z90crypt.hdware_info->hdware_mask.disabled_count =
2903 local_mask.disabled_count;
2904 refresh_index_array(&z90crypt.mask, &z90crypt.overall_device_x);
2905 for (i = 0; i < Z90CRYPT_NUM_TYPES; i++)
2906 refresh_index_array(&(z90crypt.hdware_info->type_mask[i]),
2907 &(z90crypt.hdware_info->type_x_addr[i]));
2908 spin_unlock_irq(&queuespinlock);
2909
2910 return rv;
2911}
2912
2913static int
2914find_crypto_devices(struct status *deviceMask)
2915{
2916 int i, q_depth, dev_type;
2917 enum hdstat hd_stat;
2918
2919 deviceMask->st_count = 0;
2920 deviceMask->disabled_count = 0;
2921 deviceMask->user_disabled_count = 0;
2922
2923 for (i = 0; i < z90crypt.max_count; i++) {
2924 hd_stat = query_online(i, z90crypt.cdx, MAX_RESET, &q_depth,
2925 &dev_type);
2926 if (hd_stat == HD_TSQ_EXCEPTION) {
2927 z90crypt.terminating = 1;
2928 PRINTKC("Exception during probe for crypto devices\n");
2929 return TSQ_FATAL_ERROR;
2930 }
2931 deviceMask->st_mask[i] = hd_stat;
2932 if (hd_stat == HD_ONLINE) {
2933 PDEBUG("Got an online crypto!: %d\n", i);
2934 PDEBUG("Got a queue depth of %d\n", q_depth);
2935 PDEBUG("Got a device type of %d\n", dev_type);
2936 if (q_depth <= 0)
2937 return TSQ_FATAL_ERROR;
2938 deviceMask->st_count++;
2939 z90crypt.q_depth_array[i] = q_depth;
2940 z90crypt.dev_type_array[i] = dev_type;
2941 }
2942 }
2943
2944 return 0;
2945}
2946
2947static int
2948refresh_index_array(struct status *status_str, struct device_x *index_array)
2949{
2950 int i, count;
2951 enum devstat stat;
2952
2953 i = -1;
2954 count = 0;
2955 do {
2956 stat = status_str->st_mask[++i];
2957 if (stat == DEV_ONLINE)
2958 index_array->device_index[count++] = i;
2959 } while ((i < Z90CRYPT_NUM_DEVS) && (count < status_str->st_count));
2960
2961 return count;
2962}
2963
2964static int
2965create_crypto_device(int index)
2966{
2967 int rv, devstat, total_size;
2968 struct device *dev_ptr;
2969 struct status *type_str_p;
2970 int deviceType;
2971
2972 dev_ptr = z90crypt.device_p[index];
2973 if (!dev_ptr) {
2974 total_size = sizeof(struct device) +
2975 z90crypt.q_depth_array[index] * sizeof(int);
2976
2977 dev_ptr = kzalloc(total_size, GFP_ATOMIC);
2978 if (!dev_ptr) {
2979 PRINTK("kmalloc device %d failed\n", index);
2980 return ENOMEM;
2981 }
2982 dev_ptr->dev_resp_p = kmalloc(MAX_RESPONSE_SIZE, GFP_ATOMIC);
2983 if (!dev_ptr->dev_resp_p) {
2984 kfree(dev_ptr);
2985 PRINTK("kmalloc device %d rec buffer failed\n", index);
2986 return ENOMEM;
2987 }
2988 dev_ptr->dev_resp_l = MAX_RESPONSE_SIZE;
2989 INIT_LIST_HEAD(&(dev_ptr->dev_caller_list));
2990 }
2991
2992 devstat = reset_device(index, z90crypt.cdx, MAX_RESET);
2993 if (devstat == DEV_RSQ_EXCEPTION) {
2994 PRINTK("exception during reset device %d\n", index);
2995 kfree(dev_ptr->dev_resp_p);
2996 kfree(dev_ptr);
2997 return RSQ_FATAL_ERROR;
2998 }
2999 if (devstat == DEV_ONLINE) {
3000 dev_ptr->dev_self_x = index;
3001 dev_ptr->dev_type = z90crypt.dev_type_array[index];
3002 if (dev_ptr->dev_type == NILDEV) {
3003 rv = probe_device_type(dev_ptr);
3004 if (rv) {
3005 PRINTK("rv = %d from probe_device_type %d\n",
3006 rv, index);
3007 kfree(dev_ptr->dev_resp_p);
3008 kfree(dev_ptr);
3009 return rv;
3010 }
3011 }
3012 if (dev_ptr->dev_type == PCIXCC_UNK) {
3013 rv = probe_PCIXCC_type(dev_ptr);
3014 if (rv) {
3015 PRINTK("rv = %d from probe_PCIXCC_type %d\n",
3016 rv, index);
3017 kfree(dev_ptr->dev_resp_p);
3018 kfree(dev_ptr);
3019 return rv;
3020 }
3021 }
3022 deviceType = dev_ptr->dev_type;
3023 z90crypt.dev_type_array[index] = deviceType;
3024 if (deviceType == PCICA)
3025 z90crypt.hdware_info->device_type_array[index] = 1;
3026 else if (deviceType == PCICC)
3027 z90crypt.hdware_info->device_type_array[index] = 2;
3028 else if (deviceType == PCIXCC_MCL2)
3029 z90crypt.hdware_info->device_type_array[index] = 3;
3030 else if (deviceType == PCIXCC_MCL3)
3031 z90crypt.hdware_info->device_type_array[index] = 4;
3032 else if (deviceType == CEX2C)
3033 z90crypt.hdware_info->device_type_array[index] = 5;
3034 else if (deviceType == CEX2A)
3035 z90crypt.hdware_info->device_type_array[index] = 6;
3036 else // No idea how this would happen.
3037 z90crypt.hdware_info->device_type_array[index] = -1;
3038 }
3039
3040 /**
3041 * 'q_depth' returned by the hardware is one less than
3042 * the actual depth
3043 */
3044 dev_ptr->dev_q_depth = z90crypt.q_depth_array[index];
3045 dev_ptr->dev_type = z90crypt.dev_type_array[index];
3046 dev_ptr->dev_stat = devstat;
3047 dev_ptr->disabled = 0;
3048 z90crypt.device_p[index] = dev_ptr;
3049
3050 if (devstat == DEV_ONLINE) {
3051 if (z90crypt.mask.st_mask[index] != DEV_ONLINE) {
3052 z90crypt.mask.st_mask[index] = DEV_ONLINE;
3053 z90crypt.mask.st_count++;
3054 }
3055 deviceType = dev_ptr->dev_type;
3056 type_str_p = &z90crypt.hdware_info->type_mask[deviceType];
3057 if (type_str_p->st_mask[index] != DEV_ONLINE) {
3058 type_str_p->st_mask[index] = DEV_ONLINE;
3059 type_str_p->st_count++;
3060 }
3061 }
3062
3063 return 0;
3064}
3065
3066static int
3067destroy_crypto_device(int index)
3068{
3069 struct device *dev_ptr;
3070 int t, disabledFlag;
3071
3072 dev_ptr = z90crypt.device_p[index];
3073
3074 /* remember device type; get rid of device struct */
3075 if (dev_ptr) {
3076 disabledFlag = dev_ptr->disabled;
3077 t = dev_ptr->dev_type;
3078 kfree(dev_ptr->dev_resp_p);
3079 kfree(dev_ptr);
3080 } else {
3081 disabledFlag = 0;
3082 t = -1;
3083 }
3084 z90crypt.device_p[index] = 0;
3085
3086 /* if the type is valid, remove the device from the type_mask */
3087 if ((t != -1) && z90crypt.hdware_info->type_mask[t].st_mask[index]) {
3088 z90crypt.hdware_info->type_mask[t].st_mask[index] = 0x00;
3089 z90crypt.hdware_info->type_mask[t].st_count--;
3090 if (disabledFlag == 1)
3091 z90crypt.hdware_info->type_mask[t].disabled_count--;
3092 }
3093 if (z90crypt.mask.st_mask[index] != DEV_GONE) {
3094 z90crypt.mask.st_mask[index] = DEV_GONE;
3095 z90crypt.mask.st_count--;
3096 }
3097 z90crypt.hdware_info->device_type_array[index] = 0;
3098
3099 return 0;
3100}
3101
3102static void
3103destroy_z90crypt(void)
3104{
3105 int i;
3106
3107 for (i = 0; i < z90crypt.max_count; i++)
3108 if (z90crypt.device_p[i])
3109 destroy_crypto_device(i);
3110 kfree(z90crypt.hdware_info);
3111 memset((void *)&z90crypt, 0, sizeof(z90crypt));
3112}
3113
3114static unsigned char static_testmsg[384] = {
31150x00,0x00,0x00,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x00,0x06,0x00,0x00,
31160x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x58,
31170x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x43,0x43,
31180x41,0x2d,0x41,0x50,0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,0x00,0x00,0x00,0x00,
31190x50,0x4b,0x00,0x00,0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31200x00,0x00,0x00,0x00,0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31210x00,0x00,0x00,0x00,0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x32,
31220x01,0x00,0xa0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31230xb8,0x05,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31240x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31250x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31260x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
31270x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,0x49,0x43,0x53,0x46,
31280x20,0x20,0x20,0x20,0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,0x2d,0x31,0x2e,0x32,
31290x37,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
31300x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
31310x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
31320x77,0x88,0x99,0x00,0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,0x88,0x1e,0x00,0x00,
31330x57,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,0x03,0x02,0x00,0x00,
31340x40,0x01,0x00,0x01,0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,0xf6,0xd2,0x7b,0x58,
31350x4b,0xf9,0x28,0x68,0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,0x63,0x42,0xef,0xf8,
31360xfd,0xa4,0xf8,0xb0,0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,0x53,0x8c,0x6f,0x4e,
31370x72,0x8f,0x6c,0x04,0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,0xf7,0xdd,0xfd,0x4f,
31380x11,0x36,0x95,0x5d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
3139};
3140
3141static int
3142probe_device_type(struct device *devPtr)
3143{
3144 int rv, dv, i, index, length;
3145 unsigned char psmid[8];
3146 static unsigned char loc_testmsg[sizeof(static_testmsg)];
3147
3148 index = devPtr->dev_self_x;
3149 rv = 0;
3150 do {
3151 memcpy(loc_testmsg, static_testmsg, sizeof(static_testmsg));
3152 length = sizeof(static_testmsg) - 24;
3153 /* the -24 allows for the header */
3154 dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
3155 if (dv) {
3156 PDEBUG("dv returned by send during probe: %d\n", dv);
3157 if (dv == DEV_SEN_EXCEPTION) {
3158 rv = SEN_FATAL_ERROR;
3159 PRINTKC("exception in send to AP %d\n", index);
3160 break;
3161 }
3162 PDEBUG("return value from send_to_AP: %d\n", rv);
3163 switch (dv) {
3164 case DEV_GONE:
3165 PDEBUG("dev %d not available\n", index);
3166 rv = SEN_NOT_AVAIL;
3167 break;
3168 case DEV_ONLINE:
3169 rv = 0;
3170 break;
3171 case DEV_EMPTY:
3172 rv = SEN_NOT_AVAIL;
3173 break;
3174 case DEV_NO_WORK:
3175 rv = SEN_FATAL_ERROR;
3176 break;
3177 case DEV_BAD_MESSAGE:
3178 rv = SEN_USER_ERROR;
3179 break;
3180 case DEV_QUEUE_FULL:
3181 rv = SEN_QUEUE_FULL;
3182 break;
3183 default:
3184 PRINTK("unknown dv=%d for dev %d\n", dv, index);
3185 rv = SEN_NOT_AVAIL;
3186 break;
3187 }
3188 }
3189
3190 if (rv)
3191 break;
3192
3193 for (i = 0; i < 6; i++) {
3194 mdelay(300);
3195 dv = receive_from_AP(index, z90crypt.cdx,
3196 devPtr->dev_resp_l,
3197 devPtr->dev_resp_p, psmid);
3198 PDEBUG("dv returned by DQ = %d\n", dv);
3199 if (dv == DEV_REC_EXCEPTION) {
3200 rv = REC_FATAL_ERROR;
3201 PRINTKC("exception in dequeue %d\n",
3202 index);
3203 break;
3204 }
3205 switch (dv) {
3206 case DEV_ONLINE:
3207 rv = 0;
3208 break;
3209 case DEV_EMPTY:
3210 rv = REC_EMPTY;
3211 break;
3212 case DEV_NO_WORK:
3213 rv = REC_NO_WORK;
3214 break;
3215 case DEV_BAD_MESSAGE:
3216 case DEV_GONE:
3217 default:
3218 rv = REC_NO_RESPONSE;
3219 break;
3220 }
3221 if ((rv != 0) && (rv != REC_NO_WORK))
3222 break;
3223 if (rv == 0)
3224 break;
3225 }
3226 if (rv)
3227 break;
3228 rv = (devPtr->dev_resp_p[0] == 0x00) &&
3229 (devPtr->dev_resp_p[1] == 0x86);
3230 if (rv)
3231 devPtr->dev_type = PCICC;
3232 else
3233 devPtr->dev_type = PCICA;
3234 rv = 0;
3235 } while (0);
3236 /* In a general error case, the card is not marked online */
3237 return rv;
3238}
3239
3240static unsigned char MCL3_testmsg[] = {
32410x00,0x00,0x00,0x00,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,
32420x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32430x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32440x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32450x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
32460x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
32470x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
32480x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
32490x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32500x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32510x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32520x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32530x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32540x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32550x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32560x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32570x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32580x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32590x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32600x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
32610x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
32620x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
32630x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
32640xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
32650x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
32660x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
32670x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
32680x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
32690x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
32700xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
32710xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
32720x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
32730x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
32740xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
32750x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,0xF1,0x3D,0x93,0x53
3276};
3277
3278static int
3279probe_PCIXCC_type(struct device *devPtr)
3280{
3281 int rv, dv, i, index, length;
3282 unsigned char psmid[8];
3283 static unsigned char loc_testmsg[548];
3284 struct CPRBX *cprbx_p;
3285
3286 index = devPtr->dev_self_x;
3287 rv = 0;
3288 do {
3289 memcpy(loc_testmsg, MCL3_testmsg, sizeof(MCL3_testmsg));
3290 length = sizeof(MCL3_testmsg) - 0x0C;
3291 dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
3292 if (dv) {
3293 PDEBUG("dv returned = %d\n", dv);
3294 if (dv == DEV_SEN_EXCEPTION) {
3295 rv = SEN_FATAL_ERROR;
3296 PRINTKC("exception in send to AP %d\n", index);
3297 break;
3298 }
3299 PDEBUG("return value from send_to_AP: %d\n", rv);
3300 switch (dv) {
3301 case DEV_GONE:
3302 PDEBUG("dev %d not available\n", index);
3303 rv = SEN_NOT_AVAIL;
3304 break;
3305 case DEV_ONLINE:
3306 rv = 0;
3307 break;
3308 case DEV_EMPTY:
3309 rv = SEN_NOT_AVAIL;
3310 break;
3311 case DEV_NO_WORK:
3312 rv = SEN_FATAL_ERROR;
3313 break;
3314 case DEV_BAD_MESSAGE:
3315 rv = SEN_USER_ERROR;
3316 break;
3317 case DEV_QUEUE_FULL:
3318 rv = SEN_QUEUE_FULL;
3319 break;
3320 default:
3321 PRINTK("unknown dv=%d for dev %d\n", dv, index);
3322 rv = SEN_NOT_AVAIL;
3323 break;
3324 }
3325 }
3326
3327 if (rv)
3328 break;
3329
3330 for (i = 0; i < 6; i++) {
3331 mdelay(300);
3332 dv = receive_from_AP(index, z90crypt.cdx,
3333 devPtr->dev_resp_l,
3334 devPtr->dev_resp_p, psmid);
3335 PDEBUG("dv returned by DQ = %d\n", dv);
3336 if (dv == DEV_REC_EXCEPTION) {
3337 rv = REC_FATAL_ERROR;
3338 PRINTKC("exception in dequeue %d\n",
3339 index);
3340 break;
3341 }
3342 switch (dv) {
3343 case DEV_ONLINE:
3344 rv = 0;
3345 break;
3346 case DEV_EMPTY:
3347 rv = REC_EMPTY;
3348 break;
3349 case DEV_NO_WORK:
3350 rv = REC_NO_WORK;
3351 break;
3352 case DEV_BAD_MESSAGE:
3353 case DEV_GONE:
3354 default:
3355 rv = REC_NO_RESPONSE;
3356 break;
3357 }
3358 if ((rv != 0) && (rv != REC_NO_WORK))
3359 break;
3360 if (rv == 0)
3361 break;
3362 }
3363 if (rv)
3364 break;
3365 cprbx_p = (struct CPRBX *) (devPtr->dev_resp_p + 48);
3366 if ((cprbx_p->ccp_rtcode == 8) && (cprbx_p->ccp_rscode == 33)) {
3367 devPtr->dev_type = PCIXCC_MCL2;
3368 PDEBUG("device %d is MCL2\n", index);
3369 } else {
3370 devPtr->dev_type = PCIXCC_MCL3;
3371 PDEBUG("device %d is MCL3\n", index);
3372 }
3373 } while (0);
3374 /* In a general error case, the card is not marked online */
3375 return rv;
3376}
3377
3378module_init(z90crypt_init_module);
3379module_exit(z90crypt_cleanup_module);