aboutsummaryrefslogtreecommitdiffstats
path: root/scripts/mod/file2alias.c
blob: d4dc222a74f33c69fd17dbb7170c41731933aa2b (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
/* Simple code to turn various tables in an ELF file into alias definitions.
 * This deals with kernel datastructures where they should be
 * dealt with: in the kernel source.
 *
 * Copyright 2002-2003  Rusty Russell, IBM Corporation
 *           2003       Kai Germaschewski
 *
 *
 * This software may be used and distributed according to the terms
 * of the GNU General Public License, incorporated herein by reference.
 */

#include "modpost.h"

/* We use the ELF typedefs for kernel_ulong_t but bite the bullet and
 * use either stdint.h or inttypes.h for the rest. */
#if KERNEL_ELFCLASS == ELFCLASS32
typedef Elf32_Addr	kernel_ulong_t;
#define BITS_PER_LONG 32
#else
typedef Elf64_Addr	kernel_ulong_t;
#define BITS_PER_LONG 64
#endif
#ifdef __sun__
#include <inttypes.h>
#else
#include <stdint.h>
#endif

#include <ctype.h>

typedef uint32_t	__u32;
typedef uint16_t	__u16;
typedef unsigned char	__u8;

/* Big exception to the "don't include kernel headers into userspace, which
 * even potentially has different endianness and word sizes, since
 * we handle those differences explicitly below */
#include "../../include/linux/mod_devicetable.h"

#define ADD(str, sep, cond, field)                              \
do {                                                            \
        strcat(str, sep);                                       \
        if (cond)                                               \
                sprintf(str + strlen(str),                      \
                        sizeof(field) == 1 ? "%02X" :           \
                        sizeof(field) == 2 ? "%04X" :           \
                        sizeof(field) == 4 ? "%08X" : "",       \
                        field);                                 \
        else                                                    \
                sprintf(str + strlen(str), "*");                \
} while(0)

/* Always end in a wildcard, for future extension */
static inline void add_wildcard(char *str)
{
	int len = strlen(str);

	if (str[len - 1] != '*')
		strcat(str + len, "*");
}

unsigned int cross_build = 0;
/**
 * Check that sizeof(device_id type) are consistent with size of section
 * in .o file. If in-consistent then userspace and kernel does not agree
 * on actual size which is a bug.
 * Also verify that the final entry in the table is all zeros.
 * Ignore both checks if build host differ from target host and size differs.
 **/
static void device_id_check(const char *modname, const char *device_id,
			    unsigned long size, unsigned long id_size,
			    void *symval)
{
	int i;

	if (size % id_size || size < id_size) {
		if (cross_build != 0)
			return;
		fatal("%s: sizeof(struct %s_device_id)=%lu is not a modulo "
		      "of the size of section __mod_%s_device_table=%lu.\n"
		      "Fix definition of struct %s_device_id "
		      "in mod_devicetable.h\n",
		      modname, device_id, id_size, device_id, size, device_id);
	}
	/* Verify last one is a terminator */
	for (i = 0; i < id_size; i++ ) {
		if (*(uint8_t*)(symval+size-id_size+i)) {
			fprintf(stderr,"%s: struct %s_device_id is %lu bytes.  "
				"The last of %lu is:\n",
				modname, device_id, id_size, size / id_size);
			for (i = 0; i < id_size; i++ )
				fprintf(stderr,"0x%02x ",
					*(uint8_t*)(symval+size-id_size+i) );
			fprintf(stderr,"\n");
			fatal("%s: struct %s_device_id is not terminated "
				"with a NULL entry!\n", modname, device_id);
		}
	}
}

/* USB is special because the bcdDevice can be matched against a numeric range */
/* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
static void do_usb_entry(struct usb_device_id *id,
			 unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
			 unsigned char range_lo, unsigned char range_hi,
			 struct module *mod)
{
	char alias[500];
	strcpy(alias, "usb:");
	ADD(alias, "v", id->match_flags&USB_DEVICE_ID_MATCH_VENDOR,
	    id->idVendor);
	ADD(alias, "p", id->match_flags&USB_DEVICE_ID_MATCH_PRODUCT,
	    id->idProduct);

	strcat(alias, "d");
	if (bcdDevice_initial_digits)
		sprintf(alias + strlen(alias), "%0*X",
			bcdDevice_initial_digits, bcdDevice_initial);
	if (range_lo == range_hi)
		sprintf(alias + strlen(alias), "%u", range_lo);
	else if (range_lo > 0 || range_hi < 9)
		sprintf(alias + strlen(alias), "[%u-%u]", range_lo, range_hi);
	if (bcdDevice_initial_digits < (sizeof(id->bcdDevice_lo) * 2 - 1))
		strcat(alias, "*");

	ADD(alias, "dc", id->match_flags&USB_DEVICE_ID_MATCH_DEV_CLASS,
	    id->bDeviceClass);
	ADD(alias, "dsc",
	    id->match_flags&USB_DEVICE_ID_MATCH_DEV_SUBCLASS,
	    id->bDeviceSubClass);
	ADD(alias, "dp",
	    id->match_flags&USB_DEVICE_ID_MATCH_DEV_PROTOCOL,
	    id->bDeviceProtocol);
	ADD(alias, "ic",
	    id->match_flags&USB_DEVICE_ID_MATCH_INT_CLASS,
	    id->bInterfaceClass);
	ADD(alias, "isc",
	    id->match_flags&USB_DEVICE_ID_MATCH_INT_SUBCLASS,
	    id->bInterfaceSubClass);
	ADD(alias, "ip",
	    id->match_flags&USB_DEVICE_ID_MATCH_INT_PROTOCOL,
	    id->bInterfaceProtocol);

	add_wildcard(alias);
	buf_printf(&mod->dev_table_buf,
		   "MODULE_ALIAS(\"%s\");\n", alias);
}

static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
{
	unsigned int devlo, devhi;
	unsigned char chi, clo;
	int ndigits;

	id->match_flags = TO_NATIVE(id->match_flags);
	id->idVendor = TO_NATIVE(id->idVendor);
	id->idProduct = TO_NATIVE(id->idProduct);

	devlo = id->match_flags & USB_DEVICE_ID_MATCH_DEV_LO ?
		TO_NATIVE(id->bcdDevice_lo) : 0x0U;
	devhi = id->match_flags & USB_DEVICE_ID_MATCH_DEV_HI ?
		TO_NATIVE(id->bcdDevice_hi) : ~0x0U;

	/*
	 * Some modules (visor) have empty slots as placeholder for
	 * run-time specification that results in catch-all alias
	 */
	if (!(id->idVendor | id->idProduct | id->bDeviceClass | id->bInterfaceClass))
		return;

	/* Convert numeric bcdDevice range into fnmatch-able pattern(s) */
	for (ndigits = sizeof(id->bcdDevice_lo) * 2 - 1; devlo <= devhi; ndigits--) {
		clo = devlo & 0xf;
		chi = devhi & 0xf;
		if (chi > 9)	/* it's bcd not hex */
			chi = 9;
		devlo >>= 4;
		devhi >>= 4;

		if (devlo == devhi || !ndigits) {
			do_usb_entry(id, devlo, ndigits, clo, chi, mod);
			break;
		}

		if (clo > 0)
			do_usb_entry(id, devlo++, ndigits, clo, 9, mod);

		if (chi < 9)
			do_usb_entry(id, devhi--, ndigits, 0, chi, mod);
	}
}

static void do_usb_table(void *symval, unsigned long size,
			 struct module *mod)
{
	unsigned int i;
	const unsigned long id_size = sizeof(struct usb_device_id);

	device_id_check(mod->name, "usb", size, id_size, symval);

	/* Leave last one: it's the terminator. */
	size -= id_size;

	for (i = 0; i < size; i += id_size)
		do_usb_entry_multi(symval + i, mod);
}

/* Looks like: hid:bNvNpN */
static int do_hid_entry(const char *filename,
			     struct hid_device_id *id, char *alias)
{
	id->vendor = TO_NATIVE(id->vendor);
	id->product = TO_NATIVE(id->product);

	sprintf(alias, "hid:b%04X", id->bus);
	ADD(alias, "v", id->vendor != HID_ANY_ID, id->vendor);
	ADD(alias, "p", id->product != HID_ANY_ID, id->product);

	return 1;
}

/* Looks like: ieee1394:venNmoNspNverN */
static int do_ieee1394_entry(const char *filename,
			     struct ieee1394_device_id *id, char *alias)
{
	id->match_flags = TO_NATIVE(id->match_flags);
	id->vendor_id = TO_NATIVE(id->vendor_id);
	id->model_id = TO_NATIVE(id->model_id);
	id->specifier_id = TO_NATIVE(id->specifier_id);
	id->version = TO_NATIVE(id->version);

	strcpy(alias, "ieee1394:");
	ADD(alias, "ven", id->match_flags & IEEE1394_MATCH_VENDOR_ID,
	    id->vendor_id);
	ADD(alias, "mo", id->match_flags & IEEE1394_MATCH_MODEL_ID,
	    id->model_id);
	ADD(alias, "sp", id->match_flags & IEEE1394_MATCH_SPECIFIER_ID,
	    id->specifier_id);
	ADD(alias, "ver", id->match_flags & IEEE1394_MATCH_VERSION,
	    id->version);

	add_wildcard(alias);
	return 1;
}

/* Looks like: pci:vNdNsvNsdNbcNscNiN. */
static int do_pci_entry(const char *filename,
			struct pci_device_id *id, char *alias)
{
	/* Class field can be divided into these three. */
	unsigned char baseclass, subclass, interface,
		baseclass_mask, subclass_mask, interface_mask;

	id->vendor = TO_NATIVE(id->vendor);
	id->device = TO_NATIVE(id->device);
	id->subvendor = TO_NATIVE(id->subvendor);
	id->subdevice = TO_NATIVE(id->subdevice);
	id->class = TO_NATIVE(id->class);
	id->class_mask = TO_NATIVE(id->class_mask);

	strcpy(alias, "pci:");
	ADD(alias, "v", id->vendor != PCI_ANY_ID, id->vendor);
	ADD(alias, "d", id->device != PCI_ANY_ID, id->device);
	ADD(alias, "sv", id->subvendor != PCI_ANY_ID, id->subvendor);
	ADD(alias, "sd", id->subdevice != PCI_ANY_ID, id->subdevice);

	baseclass = (id->class) >> 16;
	baseclass_mask = (id->class_mask) >> 16;
	subclass = (id->class) >> 8;
	subclass_mask = (id->class_mask) >> 8;
	interface = id->class;
	interface_mask = id->class_mask;

	if ((baseclass_mask != 0 && baseclass_mask != 0xFF)
	    || (subclass_mask != 0 && subclass_mask != 0xFF)
	    || (interface_mask != 0 && interface_mask != 0xFF)) {
		warn("Can't handle masks in %s:%04X\n",
		     filename, id->class_mask);
		return 0;
	}

	ADD(alias, "bc", baseclass_mask == 0xFF, baseclass);
	ADD(alias, "sc", subclass_mask == 0xFF, subclass);
	ADD(alias, "i", interface_mask == 0xFF, interface);
	add_wildcard(alias);
	return 1;
}

/* looks like: "ccw:tNmNdtNdmN" */
static int do_ccw_entry(const char *filename,
			struct ccw_device_id *id, char *alias)
{
	id->match_flags = TO_NATIVE(id->match_flags);
	id->cu_type = TO_NATIVE(id->cu_type);
	id->cu_model = TO_NATIVE(id->cu_model);
	id->dev_type = TO_NATIVE(id->dev_type);
	id->dev_model = TO_NATIVE(id->dev_model);

	strcpy(alias, "ccw:");
	ADD(alias, "t", id->match_flags&CCW_DEVICE_ID_MATCH_CU_TYPE,
	    id->cu_type);
	ADD(alias, "m", id->match_flags&CCW_DEVICE_ID_MATCH_CU_MODEL,
	    id->cu_model);
	ADD(alias, "dt", id->match_flags&CCW_DEVICE_ID_MATCH_DEVICE_TYPE,
	    id->dev_type);
	ADD(alias, "dm", id->match_flags&CCW_DEVICE_ID_MATCH_DEVICE_MODEL,
	    id->dev_model);
	add_wildcard(alias);
	return 1;
}

/* looks like: "ap:tN" */
static int do_ap_entry(const char *filename,
		       struct ap_device_id *id, char *alias)
{
	sprintf(alias, "ap:t%02X*", id->dev_type);
	return 1;
}

/* looks like: "css:tN" */
static int do_css_entry(const char *filename,
			struct css_device_id *id, char *alias)
{
	sprintf(alias, "css:t%01X", id->type);
	return 1;
}

/* Looks like: "serio:tyNprNidNexN" */
static int do_serio_entry(const char *filename,
			  struct serio_device_id *id, char *alias)
{
	id->type = TO_NATIVE(id->type);
	id->proto = TO_NATIVE(id->proto);
	id->id = TO_NATIVE(id->id);
	id->extra = TO_NATIVE(id->extra);

	strcpy(alias, "serio:");
	ADD(alias, "ty", id->type != SERIO_ANY, id->type);
	ADD(alias, "pr", id->proto != SERIO_ANY, id->proto);
	ADD(alias, "id", id->id != SERIO_ANY, id->id);
	ADD(alias, "ex", id->extra != SERIO_ANY, id->extra);

	add_wildcard(alias);
	return 1;
}

/* looks like: "acpi:ACPI0003 or acpi:PNP0C0B" or "acpi:LNXVIDEO" */
static int do_acpi_entry(const char *filename,
			struct acpi_device_id *id, char *alias)
{
	sprintf(alias, "acpi*:%s:*", id->id);
	return 1;
}

/* looks like: "pnp:dD" */
static void do_pnp_device_entry(void *symval, unsigned long size,
				struct module *mod)
{
	const unsigned long id_size = sizeof(struct pnp_device_id);
	const unsigned int count = (size / id_size)-1;
	const struct pnp_device_id *devs = symval;
	unsigned int i;

	device_id_check(mod->name, "pnp", size, id_size, symval);

	for (i = 0; i < count; i++) {
		const char *id = (char *)devs[i].id;

		buf_printf(&mod->dev_table_buf,
			   "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
		buf_printf(&mod->dev_table_buf,
			   "MODULE_ALIAS(\"acpi*:%s:*\");\n", id);
	}
}

/* looks like: "pnp:dD" for every device of the card */
static void do_pnp_card_entries(void *symval, unsigned long size,
				struct module *mod)
{
	const unsigned long id_size = sizeof(struct pnp_card_device_id);
	const unsigned int count = (size / id_size)-1;
	const struct pnp_card_device_id *cards = symval;
	unsigned int i;

	device_id_check(mod->name, "pnp", size, id_size, symval);

	for (i = 0; i < count; i++) {
		unsigned int j;
		const struct pnp_card_device_id *card = &cards[i];

		for (j = 0; j < PNP_MAX_DEVICES; j++) {
			const char *id = (char *)card->devs[j].id;
			int i2, j2;
			int dup = 0;

			if (!id[0])
				break;

			/* find duplicate, already added value */
			for (i2 = 0; i2 < i && !dup; i2++) {
				const struct pnp_card_device_id *card2 = &cards[i2];

				for (j2 = 0; j2 < PNP_MAX_DEVICES; j2++) {
					const char *id2 = (char *)card2->devs[j2].id;

					if (!id2[0])
						break;

					if (!strcmp(id, id2)) {
						dup = 1;
						break;
					}
				}
			}

			/* add an individual alias for every device entry */
			if (!dup) {
				buf_printf(&mod->dev_table_buf,
					   "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
				buf_printf(&mod->dev_table_buf,
					   "MODULE_ALIAS(\"acpi*:%s:*\");\n", id);
			}
		}
	}
}

/* Looks like: pcmcia:mNcNfNfnNpfnNvaNvbNvcNvdN. */
static int do_pcmcia_entry(const char *filename,
			   struct pcmcia_device_id *id, char *alias)
{
	unsigned int i;

	id->match_flags = TO_NATIVE(id->match_flags);
	id->manf_id = TO_NATIVE(id->manf_id);
	id->card_id = TO_NATIVE(id->card_id);
	id->func_id = TO_NATIVE(id->func_id);
	id->function = TO_NATIVE(id->function);
	id->device_no = TO_NATIVE(id->device_no);

	for (i=0; i<4; i++) {
		id->prod_id_hash[i] = TO_NATIVE(id->prod_id_hash[i]);
       }

       strcpy(alias, "pcmcia:");
       ADD(alias, "m", id->match_flags & PCMCIA_DEV_ID_MATCH_MANF_ID,
	   id->manf_id);
       ADD(alias, "c", id->match_flags & PCMCIA_DEV_ID_MATCH_CARD_ID,
	   id->card_id);
       ADD(alias, "f", id->match_flags & PCMCIA_DEV_ID_MATCH_FUNC_ID,
	   id->func_id);
       ADD(alias, "fn", id->match_flags & PCMCIA_DEV_ID_MATCH_FUNCTION,
	   id->function);
       ADD(alias, "pfn", id->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO,
	   id->device_no);
       ADD(alias, "pa", id->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID1, id->prod_id_hash[0]);
       ADD(alias, "pb", id->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID2, id->prod_id_hash[1]);
       ADD(alias, "pc", id->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID3, id->prod_id_hash[2]);
       ADD(alias, "pd", id->match_flags & PCMCIA_DEV_ID_MATCH_PROD_ID4, id->prod_id_hash[3]);

	add_wildcard(alias);
       return 1;
}



static int do_of_entry (const char *filename, struct of_device_id *of, char *alias)
{
    int len;
    char *tmp;
    len = sprintf (alias, "of:N%sT%s",
                    of->name[0] ? of->name : "*",
                    of->type[0] ? of->type : "*");

    if (of->compatible[0])
        sprintf (&alias[len], "%sC%s",
                     of->type[0] ? "*" : "",
                     of->compatible);

    /* Replace all whitespace with underscores */
    for (tmp = alias; tmp && *tmp; tmp++)
        if (isspace (*tmp))
            *tmp = '_';

    add_wildcard(alias);
    return 1;
}

static int do_vio_entry(const char *filename, struct vio_device_id *vio,
		char *alias)
{
	char *tmp;

	sprintf(alias, "vio:T%sS%s", vio->type[0] ? vio->type : "*",
			vio->compat[0] ? vio->compat : "*");

	/* Replace all whitespace with underscores */
	for (tmp = alias; tmp && *tmp; tmp++)
		if (isspace (*tmp))
			*tmp = '_';

	add_wildcard(alias);
	return 1;
}

#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))

static void do_input(char *alias,
		     kernel_ulong_t *arr, unsigned int min, unsigned int max)
{
	unsigned int i;

	for (i = min; i < max; i++)
		if (arr[i / BITS_PER_LONG] & (1L << (i%BITS_PER_LONG)))
			sprintf(alias + strlen(alias), "%X,*", i);
}

/* input:b0v0p0e0-eXkXrXaXmXlXsXfXwX where X is comma-separated %02X. */
static int do_input_entry(const char *filename, struct input_device_id *id,
			  char *alias)
{
	sprintf(alias, "input:");

	ADD(alias, "b", id->flags & INPUT_DEVICE_ID_MATCH_BUS, id->bustype);
	ADD(alias, "v", id->flags & INPUT_DEVICE_ID_MATCH_VENDOR, id->vendor);
	ADD(alias, "p", id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT, id->product);
	ADD(alias, "e", id->flags & INPUT_DEVICE_ID_MATCH_VERSION, id->version);

	sprintf(alias + strlen(alias), "-e*");
	if (id->flags & INPUT_DEVICE_ID_MATCH_EVBIT)
		do_input(alias, id->evbit, 0, INPUT_DEVICE_ID_EV_MAX);
	sprintf(alias + strlen(alias), "k*");
	if (id->flags & INPUT_DEVICE_ID_MATCH_KEYBIT)
		do_input(alias, id->keybit,
			 INPUT_DEVICE_ID_KEY_MIN_INTERESTING,
			 INPUT_DEVICE_ID_KEY_MAX);
	sprintf(alias + strlen(alias), "r*");
	if (id->flags & INPUT_DEVICE_ID_MATCH_RELBIT)
		do_input(alias, id->relbit, 0, INPUT_DEVICE_ID_REL_MAX);
	sprintf(alias + strlen(alias), "a*");
	if (id->flags & INPUT_DEVICE_ID_MATCH_ABSBIT)
		do_input(alias, id->absbit, 0, INPUT_DEVICE_ID_ABS_MAX);
	sprintf(alias + strlen(alias), "m*");
	if (id->flags & INPUT_DEVICE_ID_MATCH_MSCIT)
		do_input(alias, id->mscbit, 0, INPUT_DEVICE_ID_MSC_MAX);
	sprintf(alias + strlen(alias), "l*");
	if (id->flags & INPUT_DEVICE_ID_MATCH_LEDBIT)
		do_input(alias, id->ledbit, 0, INPUT_DEVICE_ID_LED_MAX);
	sprintf(alias + strlen(alias), "s*");
	if (id->flags & INPUT_DEVICE_ID_MATCH_SNDBIT)
		do_input(alias, id->sndbit, 0, INPUT_DEVICE_ID_SND_MAX);
	sprintf(alias + strlen(alias), "f*");
	if (id->flags & INPUT_DEVICE_ID_MATCH_FFBIT)
		do_input(alias, id->ffbit, 0, INPUT_DEVICE_ID_FF_MAX);
	sprintf(alias + strlen(alias), "w*");
	if (id->flags & INPUT_DEVICE_ID_MATCH_SWBIT)
		do_input(alias, id->swbit, 0, INPUT_DEVICE_ID_SW_MAX);
	return 1;
}

static int do_eisa_entry(const char *filename, struct eisa_device_id *eisa,
		char *alias)
{
	if (eisa->sig[0])
		sprintf(alias, EISA_DEVICE_MODALIAS_FMT "*", eisa->sig);
	else
		strcat(alias, "*");
	return 1;
}

/* Looks like: parisc:tNhvNrevNsvN */
static int do_parisc_entry(const char *filename, struct parisc_device_id *id,
		char *alias)
{
	id->hw_type = TO_NATIVE(id->hw_type);
	id->hversion = TO_NATIVE(id->hversion);
	id->hversion_rev = TO_NATIVE(id->hversion_rev);
	id->sversion = TO_NATIVE(id->sversion);

	strcpy(alias, "parisc:");
	ADD(alias, "t", id->hw_type != PA_HWTYPE_ANY_ID, id->hw_type);
	ADD(alias, "hv", id->hversion != PA_HVERSION_ANY_ID, id->hversion);
	ADD(alias, "rev", id->hversion_rev != PA_HVERSION_REV_ANY_ID, id->hversion_rev);
	ADD(alias, "sv", id->sversion != PA_SVERSION_ANY_ID, id->sversion);

	add_wildcard(alias);
	return 1;
}

/* Looks like: sdio:cNvNdN. */
static int do_sdio_entry(const char *filename,
			struct sdio_device_id *id, char *alias)
{
	id->class = TO_NATIVE(id->class);
	id->vendor = TO_NATIVE(id->vendor);
	id->device = TO_NATIVE(id->device);

	strcpy(alias, "sdio:");
	ADD(alias, "c", id->class != (__u8)SDIO_ANY_ID, id->class);
	ADD(alias, "v", id->vendor != (__u16)SDIO_ANY_ID, id->vendor);
	ADD(alias, "d", id->device != (__u16)SDIO_ANY_ID, id->device);
	add_wildcard(alias);
	return 1;
}

/* Looks like: ssb:vNidNrevN. */
static int do_ssb_entry(const char *filename,
			struct ssb_device_id *id, char *alias)
{
	id->vendor = TO_NATIVE(id->vendor);
	id->coreid = TO_NATIVE(id->coreid);
	id->revision = TO_NATIVE(id->revision);

	strcpy(alias, "ssb:");
	ADD(alias, "v", id->vendor != SSB_ANY_VENDOR, id->vendor);
	ADD(alias, "id", id->coreid != SSB_ANY_ID, id->coreid);
	ADD(alias, "rev", id->revision != SSB_ANY_REV, id->revision);
	add_wildcard(alias);
	return 1;
}

/* Looks like: virtio:dNvN */
static int do_virtio_entry(const char *filename, struct virtio_device_id *id,
			   char *alias)
{
	id->device = TO_NATIVE(id->device);
	id->vendor = TO_NATIVE(id->vendor);

	strcpy(alias, "virtio:");
	ADD(alias, "d", 1, id->device);
	ADD(alias, "v", id->vendor != VIRTIO_DEV_ANY_ID, id->vendor);

	add_wildcard(alias);
	return 1;
}

/* Looks like: i2c:S */
static int do_i2c_entry(const char *filename, struct i2c_device_id *id,
			char *alias)
{
	sprintf(alias, I2C_MODULE_PREFIX "%s", id->name);

	return 1;
}

static const struct dmifield {
	const char *prefix;
	int field;
} dmi_fields[] = {
	{ "bvn", DMI_BIOS_VENDOR },
	{ "bvr", DMI_BIOS_VERSION },
	{ "bd",  DMI_BIOS_DATE },
	{ "svn", DMI_SYS_VENDOR },
	{ "pn",  DMI_PRODUCT_NAME },
	{ "pvr", DMI_PRODUCT_VERSION },
	{ "rvn", DMI_BOARD_VENDOR },
	{ "rn",  DMI_BOARD_NAME },
	{ "rvr", DMI_BOARD_VERSION },
	{ "cvn", DMI_CHASSIS_VENDOR },
	{ "ct",  DMI_CHASSIS_TYPE },
	{ "cvr", DMI_CHASSIS_VERSION },
	{ NULL,  DMI_NONE }
};

static void dmi_ascii_filter(char *d, const char *s)
{
	/* Filter out characters we don't want to see in the modalias string */
	for (; *s; s++)
		if (*s > ' ' && *s < 127 && *s != ':')
			*(d++) = *s;

	*d = 0;
}


static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
			char *alias)
{
	int i, j;

	sprintf(alias, "dmi*");

	for (i = 0; i < ARRAY_SIZE(dmi_fields); i++) {
		for (j = 0; j < 4; j++) {
			if (id->matches[j].slot &&
			    id->matches[j].slot == dmi_fields[i].field) {
				sprintf(alias + strlen(alias), ":%s*",
					dmi_fields[i].prefix);
				dmi_ascii_filter(alias + strlen(alias),
						 id->matches[j].substr);
				strcat(alias, "*");
			}
		}
	}

	strcat(alias, ":");
	return 1;
}
/* Ignore any prefix, eg. some architectures prepend _ */
static inline int sym_is(const char *symbol, const char *name)
{
	const char *match;

	match = strstr(symbol, name);
	if (!match)
		return 0;
	return match[strlen(symbol)] == '\0';
}

static void do_table(void *symval, unsigned long size,
		     unsigned long id_size,
		     const char *device_id,
		     void *function,
		     struct module *mod)
{
	unsigned int i;
	char alias[500];
	int (*do_entry)(const char *, void *entry, char *alias) = function;

	device_id_check(mod->name, device_id, size, id_size, symval);
	/* Leave last one: it's the terminator. */
	size -= id_size;

	for (i = 0; i < size; i += id_size) {
		if (do_entry(mod->name, symval+i, alias)) {
			buf_printf(&mod->dev_table_buf,
				   "MODULE_ALIAS(\"%s\");\n", alias);
		}
	}
}

/* Create MODULE_ALIAS() statements.
 * At this time, we cannot write the actual output C source yet,
 * so we write into the mod->dev_table_buf buffer. */
void handle_moddevtable(struct module *mod, struct elf_info *info,
			Elf_Sym *sym, const char *symname)
{
	void *symval;
	char *zeros = NULL;

	/* We're looking for a section relative symbol */
	if (!sym->st_shndx || sym->st_shndx >= info->hdr->e_shnum)
		return;

	/* Handle all-NULL symbols allocated into .bss */
	if (info->sechdrs[sym->st_shndx].sh_type & SHT_NOBITS) {
		zeros = calloc(1, sym->st_size);
		symval = zeros;
	} else {
		symval = (void *)info->hdr
			+ info->sechdrs[sym->st_shndx].sh_offset
			+ sym->st_value;
	}

	if (sym_is(symname, "__mod_pci_device_table"))
		do_table(symval, sym->st_size,
			 sizeof(struct pci_device_id), "pci",
			 do_pci_entry, mod);
	else if (sym_is(symname, "__mod_usb_device_table"))
		/* special case to handle bcdDevice ranges */
		do_usb_table(symval, sym->st_size, mod);
	else if (sym_is(symname, "__mod_hid_device_table"))
		do_table(symval, sym->st_size,
			 sizeof(struct hid_device_id), "hid",
			 do_hid_entry, mod);
	else if (sym_is(symname, "__mod_ieee1394_device_table"))
		do_table(symval, sym->st_size,
			 sizeof(struct ieee1394_device_id), "ieee1394",
			 do_ieee1394_entry, mod);
	else if (sym_is(symname, "__mod_ccw_device_table"))
		do_table(symval, sym->st_size,
			 sizeof(struct ccw_device_id), "ccw",
			 do_ccw_entry, mod);
	else if (sym_is(symname, "__mod_ap_device_table"))
		do_table(symval, sym->st_size,
			 sizeof(struct ap_device_id), "ap",
			 do_ap_entry, mod);
	else if (sym_is(symname, "__mod_css_device_table"))
		do_table(symval, sym->st_size,
			 sizeof(struct css_device_id), "css",
			 do_css_entry, mod);
	else if (sym_is(symname, "__mod_serio_device_table"))
		do_table(symval, sym->st_size,
			 sizeof(struct serio_device_id), "serio",
			 do_serio_entry, mod);
	else if (sym_is(symname, "__mod_acpi_device_table"))
		do_table(symval, sym->st_size,
			 sizeof(struct acpi_device_id), "acpi",
			 do_acpi_entry, mod);
	else if (sym_is(symname, "__mod_pnp_device_table"))
		do_pnp_device_entry(symval, sym->st_size, mod);
	else if (sym_is(symname, "__mod_pnp_card_device_table"))
		do_pnp_card_entries(symval, sym->st_size, mod);
	else if (sym_is(symname, "__mod_pcmcia_device_table"))
		do_table(symval, sym->st_size,
			 sizeof(struct pcmcia_device_id), "pcmcia",
			 do_pcmcia_entry, mod);
        else if (sym_is(symname, "__mod_of_device_table"))
		do_table(symval, sym->st_size,
			 sizeof(struct of_device_id), "of",
			 do_of_entry, mod);
        else if (sym_is(symname, "__mod_vio_device_table"))
		do_table(symval, sym->st_size,
			 sizeof(struct vio_device_id), "vio",
			 do_vio_entry, mod);
	else if (sym_is(symname, "__mod_input_device_table"))
		do_table(symval, sym->st_size,
			 sizeof(struct input_device_id), "input",
			 do_input_entry, mod);
	else if (sym_is(symname, "__mod_eisa_device_table"))
		do_table(symval, sym->st_size,
			 sizeof(struct eisa_device_id), "eisa",
			 do_eisa_entry, mod);
	else if (sym_is(symname, "__mod_parisc_device_table"))
		do_table(symval, sym->st_size,
			 sizeof(struct parisc_device_id), "parisc",
			 do_parisc_entry, mod);
	else if (sym_is(symname, "__mod_sdio_device_table"))
		do_table(symval, sym->st_size,
			 sizeof(struct sdio_device_id), "sdio",
			 do_sdio_entry, mod);
	else if (sym_is(symname, "__mod_ssb_device_table"))
		do_table(symval, sym->st_size,
			 sizeof(struct ssb_device_id), "ssb",
			 do_ssb_entry, mod);
	else if (sym_is(symname, "__mod_virtio_device_table"))
		do_table(symval, sym->st_size,
			 sizeof(struct virtio_device_id), "virtio",
			 do_virtio_entry, mod);
	else if (sym_is(symname, "__mod_i2c_device_table"))
		do_table(symval, sym->st_size,
			 sizeof(struct i2c_device_id), "i2c",
			 do_i2c_entry, mod);
	else if (sym_is(symname, "__mod_dmi_device_table"))
		do_table(symval, sym->st_size,
			 sizeof(struct dmi_system_id), "dmi",
			 do_dmi_entry, mod);
	free(zeros);
}

/* Now add out buffered information to the generated C source */
void add_moddevtable(struct buffer *buf, struct module *mod)
{
	buf_printf(buf, "\n");
	buf_write(buf, mod->dev_table_buf.p, mod->dev_table_buf.pos);
	free(mod->dev_table_buf.p);
}
sleft; else mapbytes = PAGE_SIZE; sg_set_page(sg, virt_to_page(pl022->dummypage), mapbytes, 0); bytesleft -= mapbytes; dev_dbg(&pl022->adev->dev, "set RX/TX to dummy page %d bytes, %d left\n", mapbytes, bytesleft); } } BUG_ON(bytesleft); } /** * configure_dma - configures the channels for the next transfer * @pl022: SSP driver's private data structure */ static int configure_dma(struct pl022 *pl022) { struct dma_slave_config rx_conf = { .src_addr = SSP_DR(pl022->phybase), .direction = DMA_FROM_DEVICE, .src_maxburst = pl022->vendor->fifodepth >> 1, }; struct dma_slave_config tx_conf = { .dst_addr = SSP_DR(pl022->phybase), .direction = DMA_TO_DEVICE, .dst_maxburst = pl022->vendor->fifodepth >> 1, }; unsigned int pages; int ret; int rx_sglen, tx_sglen; struct dma_chan *rxchan = pl022->dma_rx_channel; struct dma_chan *txchan = pl022->dma_tx_channel; struct dma_async_tx_descriptor *rxdesc; struct dma_async_tx_descriptor *txdesc; /* Check that the channels are available */ if (!rxchan || !txchan) return -ENODEV; switch (pl022->read) { case READING_NULL: /* Use the same as for writing */ rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; break; case READING_U8: rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; break; case READING_U16: rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; break; case READING_U32: rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; break; } switch (pl022->write) { case WRITING_NULL: /* Use the same as for reading */ tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; break; case WRITING_U8: tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; break; case WRITING_U16: tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; break; case WRITING_U32: tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; break; } /* SPI pecularity: we need to read and write the same width */ if (rx_conf.src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) rx_conf.src_addr_width = tx_conf.dst_addr_width; if (tx_conf.dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) tx_conf.dst_addr_width = rx_conf.src_addr_width; BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width); dmaengine_slave_config(rxchan, &rx_conf); dmaengine_slave_config(txchan, &tx_conf); /* Create sglists for the transfers */ pages = (pl022->cur_transfer->len >> PAGE_SHIFT) + 1; dev_dbg(&pl022->adev->dev, "using %d pages for transfer\n", pages); ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_KERNEL); if (ret) goto err_alloc_rx_sg; ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_KERNEL); if (ret) goto err_alloc_tx_sg; /* Fill in the scatterlists for the RX+TX buffers */ setup_dma_scatter(pl022, pl022->rx, pl022->cur_transfer->len, &pl022->sgt_rx); setup_dma_scatter(pl022, pl022->tx, pl022->cur_transfer->len, &pl022->sgt_tx); /* Map DMA buffers */ rx_sglen = dma_map_sg(rxchan->device->dev, pl022->sgt_rx.sgl, pl022->sgt_rx.nents, DMA_FROM_DEVICE); if (!rx_sglen) goto err_rx_sgmap; tx_sglen = dma_map_sg(txchan->device->dev, pl022->sgt_tx.sgl, pl022->sgt_tx.nents, DMA_TO_DEVICE); if (!tx_sglen) goto err_tx_sgmap; /* Send both scatterlists */ rxdesc = rxchan->device->device_prep_slave_sg(rxchan, pl022->sgt_rx.sgl, rx_sglen, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!rxdesc) goto err_rxdesc; txdesc = txchan->device->device_prep_slave_sg(txchan, pl022->sgt_tx.sgl, tx_sglen, DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!txdesc) goto err_txdesc; /* Put the callback on the RX transfer only, that should finish last */ rxdesc->callback = dma_callback; rxdesc->callback_param = pl022; /* Submit and fire RX and TX with TX last so we're ready to read! */ dmaengine_submit(rxdesc); dmaengine_submit(txdesc); dma_async_issue_pending(rxchan); dma_async_issue_pending(txchan); return 0; err_txdesc: dmaengine_terminate_all(txchan); err_rxdesc: dmaengine_terminate_all(rxchan); dma_unmap_sg(txchan->device->dev, pl022->sgt_tx.sgl, pl022->sgt_tx.nents, DMA_TO_DEVICE); err_tx_sgmap: dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl, pl022->sgt_tx.nents, DMA_FROM_DEVICE); err_rx_sgmap: sg_free_table(&pl022->sgt_tx); err_alloc_tx_sg: sg_free_table(&pl022->sgt_rx); err_alloc_rx_sg: return -ENOMEM; } static int __init pl022_dma_probe(struct pl022 *pl022) { dma_cap_mask_t mask; /* Try to acquire a generic DMA engine slave channel */ dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); /* * We need both RX and TX channels to do DMA, else do none * of them. */ pl022->dma_rx_channel = dma_request_channel(mask, pl022->master_info->dma_filter, pl022->master_info->dma_rx_param); if (!pl022->dma_rx_channel) { dev_dbg(&pl022->adev->dev, "no RX DMA channel!\n"); goto err_no_rxchan; } pl022->dma_tx_channel = dma_request_channel(mask, pl022->master_info->dma_filter, pl022->master_info->dma_tx_param); if (!pl022->dma_tx_channel) { dev_dbg(&pl022->adev->dev, "no TX DMA channel!\n"); goto err_no_txchan; } pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!pl022->dummypage) { dev_dbg(&pl022->adev->dev, "no DMA dummypage!\n"); goto err_no_dummypage; } dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n", dma_chan_name(pl022->dma_rx_channel), dma_chan_name(pl022->dma_tx_channel)); return 0; err_no_dummypage: dma_release_channel(pl022->dma_tx_channel); err_no_txchan: dma_release_channel(pl022->dma_rx_channel); pl022->dma_rx_channel = NULL; err_no_rxchan: dev_err(&pl022->adev->dev, "Failed to work in dma mode, work without dma!\n"); return -ENODEV; } static void terminate_dma(struct pl022 *pl022) { struct dma_chan *rxchan = pl022->dma_rx_channel; struct dma_chan *txchan = pl022->dma_tx_channel; dmaengine_terminate_all(rxchan); dmaengine_terminate_all(txchan); unmap_free_dma_scatter(pl022); } static void pl022_dma_remove(struct pl022 *pl022) { if (pl022->busy) terminate_dma(pl022); if (pl022->dma_tx_channel) dma_release_channel(pl022->dma_tx_channel); if (pl022->dma_rx_channel) dma_release_channel(pl022->dma_rx_channel); kfree(pl022->dummypage); } #else static inline int configure_dma(struct pl022 *pl022) { return -ENODEV; } static inline int pl022_dma_probe(struct pl022 *pl022) { return 0; } static inline void pl022_dma_remove(struct pl022 *pl022) { } #endif /** * pl022_interrupt_handler - Interrupt handler for SSP controller * * This function handles interrupts generated for an interrupt based transfer. * If a receive overrun (ROR) interrupt is there then we disable SSP, flag the * current message's state as STATE_ERROR and schedule the tasklet * pump_transfers which will do the postprocessing of the current message by * calling giveback(). Otherwise it reads data from RX FIFO till there is no * more data, and writes data in TX FIFO till it is not full. If we complete * the transfer we move to the next transfer and schedule the tasklet. */ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id) { struct pl022 *pl022 = dev_id; struct spi_message *msg = pl022->cur_msg; u16 irq_status = 0; u16 flag = 0; if (unlikely(!msg)) { dev_err(&pl022->adev->dev, "bad message state in interrupt handler"); /* Never fail */ return IRQ_HANDLED; } /* Read the Interrupt Status Register */ irq_status = readw(SSP_MIS(pl022->virtbase)); if (unlikely(!irq_status)) return IRQ_NONE; /* * This handles the FIFO interrupts, the timeout * interrupts are flatly ignored, they cannot be * trusted. */ if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) { /* * Overrun interrupt - bail out since our Data has been * corrupted */ dev_err(&pl022->adev->dev, "FIFO overrun\n"); if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF) dev_err(&pl022->adev->dev, "RXFIFO is full\n"); if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF) dev_err(&pl022->adev->dev, "TXFIFO is full\n"); /* * Disable and clear interrupts, disable SSP, * mark message with bad status so it can be * retried. */ writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); msg->state = STATE_ERROR; /* Schedule message queue handler */ tasklet_schedule(&pl022->pump_transfers); return IRQ_HANDLED; } readwriter(pl022); if ((pl022->tx == pl022->tx_end) && (flag == 0)) { flag = 1; /* Disable Transmit interrupt */ writew(readw(SSP_IMSC(pl022->virtbase)) & (~SSP_IMSC_MASK_TXIM), SSP_IMSC(pl022->virtbase)); } /* * Since all transactions must write as much as shall be read, * we can conclude the entire transaction once RX is complete. * At this point, all TX will always be finished. */ if (pl022->rx >= pl022->rx_end) { writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); if (unlikely(pl022->rx > pl022->rx_end)) { dev_warn(&pl022->adev->dev, "read %u surplus " "bytes (did you request an odd " "number of bytes on a 16bit bus?)\n", (u32) (pl022->rx - pl022->rx_end)); } /* Update total bytes transferred */ msg->actual_length += pl022->cur_transfer->len; if (pl022->cur_transfer->cs_change) pl022->cur_chip-> cs_control(SSP_CHIP_DESELECT); /* Move to next transfer */ msg->state = next_transfer(pl022); tasklet_schedule(&pl022->pump_transfers); return IRQ_HANDLED; } return IRQ_HANDLED; } /** * This sets up the pointers to memory for the next message to * send out on the SPI bus. */ static int set_up_next_transfer(struct pl022 *pl022, struct spi_transfer *transfer) { int residue; /* Sanity check the message for this bus width */ residue = pl022->cur_transfer->len % pl022->cur_chip->n_bytes; if (unlikely(residue != 0)) { dev_err(&pl022->adev->dev, "message of %u bytes to transmit but the current " "chip bus has a data width of %u bytes!\n", pl022->cur_transfer->len, pl022->cur_chip->n_bytes); dev_err(&pl022->adev->dev, "skipping this message\n"); return -EIO; } pl022->tx = (void *)transfer->tx_buf; pl022->tx_end = pl022->tx + pl022->cur_transfer->len; pl022->rx = (void *)transfer->rx_buf; pl022->rx_end = pl022->rx + pl022->cur_transfer->len; pl022->write = pl022->tx ? pl022->cur_chip->write : WRITING_NULL; pl022->read = pl022->rx ? pl022->cur_chip->read : READING_NULL; return 0; } /** * pump_transfers - Tasklet function which schedules next transfer * when running in interrupt or DMA transfer mode. * @data: SSP driver private data structure * */ static void pump_transfers(unsigned long data) { struct pl022 *pl022 = (struct pl022 *) data; struct spi_message *message = NULL; struct spi_transfer *transfer = NULL; struct spi_transfer *previous = NULL; /* Get current state information */ message = pl022->cur_msg; transfer = pl022->cur_transfer; /* Handle for abort */ if (message->state == STATE_ERROR) { message->status = -EIO; giveback(pl022); return; } /* Handle end of message */ if (message->state == STATE_DONE) { message->status = 0; giveback(pl022); return; } /* Delay if requested at end of transfer before CS change */ if (message->state == STATE_RUNNING) { previous = list_entry(transfer->transfer_list.prev, struct spi_transfer, transfer_list); if (previous->delay_usecs) /* * FIXME: This runs in interrupt context. * Is this really smart? */ udelay(previous->delay_usecs); /* Drop chip select only if cs_change is requested */ if (previous->cs_change) pl022->cur_chip->cs_control(SSP_CHIP_SELECT); } else { /* STATE_START */ message->state = STATE_RUNNING; } if (set_up_next_transfer(pl022, transfer)) { message->state = STATE_ERROR; message->status = -EIO; giveback(pl022); return; } /* Flush the FIFOs and let's go! */ flush(pl022); if (pl022->cur_chip->enable_dma) { if (configure_dma(pl022)) { dev_dbg(&pl022->adev->dev, "configuration of DMA failed, fall back to interrupt mode\n"); goto err_config_dma; } return; } err_config_dma: writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); } static void do_interrupt_dma_transfer(struct pl022 *pl022) { u32 irqflags = ENABLE_ALL_INTERRUPTS; /* Enable target chip */ pl022->cur_chip->cs_control(SSP_CHIP_SELECT); if (set_up_next_transfer(pl022, pl022->cur_transfer)) { /* Error path */ pl022->cur_msg->state = STATE_ERROR; pl022->cur_msg->status = -EIO; giveback(pl022); return; } /* If we're using DMA, set up DMA here */ if (pl022->cur_chip->enable_dma) { /* Configure DMA transfer */ if (configure_dma(pl022)) { dev_dbg(&pl022->adev->dev, "configuration of DMA failed, fall back to interrupt mode\n"); goto err_config_dma; } /* Disable interrupts in DMA mode, IRQ from DMA controller */ irqflags = DISABLE_ALL_INTERRUPTS; } err_config_dma: /* Enable SSP, turn on interrupts */ writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), SSP_CR1(pl022->virtbase)); writew(irqflags, SSP_IMSC(pl022->virtbase)); } static void do_polling_transfer(struct pl022 *pl022) { struct spi_message *message = NULL; struct spi_transfer *transfer = NULL; struct spi_transfer *previous = NULL; struct chip_data *chip; unsigned long time, timeout; chip = pl022->cur_chip; message = pl022->cur_msg; while (message->state != STATE_DONE) { /* Handle for abort */ if (message->state == STATE_ERROR) break; transfer = pl022->cur_transfer; /* Delay if requested at end of transfer */ if (message->state == STATE_RUNNING) { previous = list_entry(transfer->transfer_list.prev, struct spi_transfer, transfer_list); if (previous->delay_usecs) udelay(previous->delay_usecs); if (previous->cs_change) pl022->cur_chip->cs_control(SSP_CHIP_SELECT); } else { /* STATE_START */ message->state = STATE_RUNNING; pl022->cur_chip->cs_control(SSP_CHIP_SELECT); } /* Configuration Changing Per Transfer */ if (set_up_next_transfer(pl022, transfer)) { /* Error path */ message->state = STATE_ERROR; break; } /* Flush FIFOs and enable SSP */ flush(pl022); writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), SSP_CR1(pl022->virtbase)); dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n"); timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT); while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) { time = jiffies; readwriter(pl022); if (time_after(time, timeout)) { dev_warn(&pl022->adev->dev, "%s: timeout!\n", __func__); message->state = STATE_ERROR; goto out; } cpu_relax(); } /* Update total byte transferred */ message->actual_length += pl022->cur_transfer->len; if (pl022->cur_transfer->cs_change) pl022->cur_chip->cs_control(SSP_CHIP_DESELECT); /* Move to next transfer */ message->state = next_transfer(pl022); } out: /* Handle end of message */ if (message->state == STATE_DONE) message->status = 0; else message->status = -EIO; giveback(pl022); return; } /** * pump_messages - Workqueue function which processes spi message queue * @data: pointer to private data of SSP driver * * This function checks if there is any spi message in the queue that * needs processing and delegate control to appropriate function * do_polling_transfer()/do_interrupt_dma_transfer() * based on the kind of the transfer * */ static void pump_messages(struct work_struct *work) { struct pl022 *pl022 = container_of(work, struct pl022, pump_messages); unsigned long flags; /* Lock queue and check for queue work */ spin_lock_irqsave(&pl022->queue_lock, flags); if (list_empty(&pl022->queue) || !pl022->running) { pl022->busy = false; spin_unlock_irqrestore(&pl022->queue_lock, flags); return; } /* Make sure we are not already running a message */ if (pl022->cur_msg) { spin_unlock_irqrestore(&pl022->queue_lock, flags); return; } /* Extract head of queue */ pl022->cur_msg = list_entry(pl022->queue.next, struct spi_message, queue); list_del_init(&pl022->cur_msg->queue); pl022->busy = true; spin_unlock_irqrestore(&pl022->queue_lock, flags); /* Initial message state */ pl022->cur_msg->state = STATE_START; pl022->cur_transfer = list_entry(pl022->cur_msg->transfers.next, struct spi_transfer, transfer_list); /* Setup the SPI using the per chip configuration */ pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi); /* * We enable the core voltage and clocks here, then the clocks * and core will be disabled when giveback() is called in each method * (poll/interrupt/DMA) */ amba_vcore_enable(pl022->adev); amba_pclk_enable(pl022->adev); clk_enable(pl022->clk); restore_state(pl022); flush(pl022); if (pl022->cur_chip->xfer_type == POLLING_TRANSFER) do_polling_transfer(pl022); else do_interrupt_dma_transfer(pl022); } static int __init init_queue(struct pl022 *pl022) { INIT_LIST_HEAD(&pl022->queue); spin_lock_init(&pl022->queue_lock); pl022->running = false; pl022->busy = false; tasklet_init(&pl022->pump_transfers, pump_transfers, (unsigned long)pl022); INIT_WORK(&pl022->pump_messages, pump_messages); pl022->workqueue = create_singlethread_workqueue( dev_name(pl022->master->dev.parent)); if (pl022->workqueue == NULL) return -EBUSY; return 0; } static int start_queue(struct pl022 *pl022) { unsigned long flags; spin_lock_irqsave(&pl022->queue_lock, flags); if (pl022->running || pl022->busy) { spin_unlock_irqrestore(&pl022->queue_lock, flags); return -EBUSY; } pl022->running = true; pl022->cur_msg = NULL; pl022->cur_transfer = NULL; pl022->cur_chip = NULL; spin_unlock_irqrestore(&pl022->queue_lock, flags); queue_work(pl022->workqueue, &pl022->pump_messages); return 0; } static int stop_queue(struct pl022 *pl022) { unsigned long flags; unsigned limit = 500; int status = 0; spin_lock_irqsave(&pl022->queue_lock, flags); /* This is a bit lame, but is optimized for the common execution path. * A wait_queue on the pl022->busy could be used, but then the common * execution path (pump_messages) would be required to call wake_up or * friends on every SPI message. Do this instead */ while ((!list_empty(&pl022->queue) || pl022->busy) && limit--) { spin_unlock_irqrestore(&pl022->queue_lock, flags); msleep(10); spin_lock_irqsave(&pl022->queue_lock, flags); } if (!list_empty(&pl022->queue) || pl022->busy) status = -EBUSY; else pl022->running = false; spin_unlock_irqrestore(&pl022->queue_lock, flags); return status; } static int destroy_queue(struct pl022 *pl022) { int status; status = stop_queue(pl022); /* we are unloading the module or failing to load (only two calls * to this routine), and neither call can handle a return value. * However, destroy_workqueue calls flush_workqueue, and that will * block until all work is done. If the reason that stop_queue * timed out is that the work will never finish, then it does no * good to call destroy_workqueue, so return anyway. */ if (status != 0) return status; destroy_workqueue(pl022->workqueue); return 0; } static int verify_controller_parameters(struct pl022 *pl022, struct pl022_config_chip const *chip_info) { if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI) || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) { dev_err(&pl022->adev->dev, "interface is configured incorrectly\n"); return -EINVAL; } if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) && (!pl022->vendor->unidir)) { dev_err(&pl022->adev->dev, "unidirectional mode not supported in this " "hardware version\n"); return -EINVAL; } if ((chip_info->hierarchy != SSP_MASTER) && (chip_info->hierarchy != SSP_SLAVE)) { dev_err(&pl022->adev->dev, "hierarchy is configured incorrectly\n"); return -EINVAL; } if ((chip_info->com_mode != INTERRUPT_TRANSFER) && (chip_info->com_mode != DMA_TRANSFER) && (chip_info->com_mode != POLLING_TRANSFER)) { dev_err(&pl022->adev->dev, "Communication mode is configured incorrectly\n"); return -EINVAL; } if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM) || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) { dev_err(&pl022->adev->dev, "RX FIFO Trigger Level is configured incorrectly\n"); return -EINVAL; } if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC) || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) { dev_err(&pl022->adev->dev, "TX FIFO Trigger Level is configured incorrectly\n"); return -EINVAL; } if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) { if ((chip_info->ctrl_len < SSP_BITS_4) || (chip_info->ctrl_len > SSP_BITS_32)) { dev_err(&pl022->adev->dev, "CTRL LEN is configured incorrectly\n"); return -EINVAL; } if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO) && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) { dev_err(&pl022->adev->dev, "Wait State is configured incorrectly\n"); return -EINVAL; } /* Half duplex is only available in the ST Micro version */ if (pl022->vendor->extended_cr) { if ((chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) && (chip_info->duplex != SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) { dev_err(&pl022->adev->dev, "Microwire duplex mode is configured incorrectly\n"); return -EINVAL; } } else { if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) dev_err(&pl022->adev->dev, "Microwire half duplex mode requested," " but this is only available in the" " ST version of PL022\n"); return -EINVAL; } } return 0; } /** * pl022_transfer - transfer function registered to SPI master framework * @spi: spi device which is requesting transfer * @msg: spi message which is to handled is queued to driver queue * * This function is registered to the SPI framework for this SPI master * controller. It will queue the spi_message in the queue of driver if * the queue is not stopped and return. */ static int pl022_transfer(struct spi_device *spi, struct spi_message *msg) { struct pl022 *pl022 = spi_master_get_devdata(spi->master); unsigned long flags; spin_lock_irqsave(&pl022->queue_lock, flags); if (!pl022->running) { spin_unlock_irqrestore(&pl022->queue_lock, flags); return -ESHUTDOWN; } msg->actual_length = 0; msg->status = -EINPROGRESS; msg->state = STATE_START; list_add_tail(&msg->queue, &pl022->queue); if (pl022->running && !pl022->busy) queue_work(pl022->workqueue, &pl022->pump_messages); spin_unlock_irqrestore(&pl022->queue_lock, flags); return 0; } static int calculate_effective_freq(struct pl022 *pl022, int freq, struct ssp_clock_params *clk_freq) { /* Lets calculate the frequency parameters */ u16 cpsdvsr = 2; u16 scr = 0; bool freq_found = false; u32 rate; u32 max_tclk; u32 min_tclk; rate = clk_get_rate(pl022->clk); /* cpsdvscr = 2 & scr 0 */ max_tclk = (rate / (CPSDVR_MIN * (1 + SCR_MIN))); /* cpsdvsr = 254 & scr = 255 */ min_tclk = (rate / (CPSDVR_MAX * (1 + SCR_MAX))); if ((freq <= max_tclk) && (freq >= min_tclk)) { while (cpsdvsr <= CPSDVR_MAX && !freq_found) { while (scr <= SCR_MAX && !freq_found) { if ((rate / (cpsdvsr * (1 + scr))) > freq) scr += 1; else { /* * This bool is made true when * effective frequency >= * target frequency is found */ freq_found = true; if ((rate / (cpsdvsr * (1 + scr))) != freq) { if (scr == SCR_MIN) { cpsdvsr -= 2; scr = SCR_MAX; } else scr -= 1; } } } if (!freq_found) { cpsdvsr += 2; scr = SCR_MIN; } } if (cpsdvsr != 0) { dev_dbg(&pl022->adev->dev, "SSP Effective Frequency is %u\n", (rate / (cpsdvsr * (1 + scr)))); clk_freq->cpsdvsr = (u8) (cpsdvsr & 0xFF); clk_freq->scr = (u8) (scr & 0xFF); dev_dbg(&pl022->adev->dev, "SSP cpsdvsr = %d, scr = %d\n", clk_freq->cpsdvsr, clk_freq->scr); } } else { dev_err(&pl022->adev->dev, "controller data is incorrect: out of range frequency"); return -EINVAL; } return 0; } /* * A piece of default chip info unless the platform * supplies it. */ static const struct pl022_config_chip pl022_default_chip_info = { .com_mode = POLLING_TRANSFER, .iface = SSP_INTERFACE_MOTOROLA_SPI, .hierarchy = SSP_SLAVE, .slave_tx_disable = DO_NOT_DRIVE_TX, .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM, .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC, .ctrl_len = SSP_BITS_8, .wait_state = SSP_MWIRE_WAIT_ZERO, .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, .cs_control = null_cs_control, }; /** * pl022_setup - setup function registered to SPI master framework * @spi: spi device which is requesting setup * * This function is registered to the SPI framework for this SPI master * controller. If it is the first time when setup is called by this device, * this function will initialize the runtime state for this chip and save * the same in the device structure. Else it will update the runtime info * with the updated chip info. Nothing is really being written to the * controller hardware here, that is not done until the actual transfer * commence. */ static int pl022_setup(struct spi_device *spi) { struct pl022_config_chip const *chip_info; struct chip_data *chip; struct ssp_clock_params clk_freq = {0, }; int status = 0; struct pl022 *pl022 = spi_master_get_devdata(spi->master); unsigned int bits = spi->bits_per_word; u32 tmp; if (!spi->max_speed_hz) return -EINVAL; /* Get controller_state if one is supplied */ chip = spi_get_ctldata(spi); if (chip == NULL) { chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); if (!chip) { dev_err(&spi->dev, "cannot allocate controller state\n"); return -ENOMEM; } dev_dbg(&spi->dev, "allocated memory for controller's runtime state\n"); } /* Get controller data if one is supplied */ chip_info = spi->controller_data; if (chip_info == NULL) { chip_info = &pl022_default_chip_info; /* spi_board_info.controller_data not is supplied */ dev_dbg(&spi->dev, "using default controller_data settings\n"); } else dev_dbg(&spi->dev, "using user supplied controller_data settings\n"); /* * We can override with custom divisors, else we use the board * frequency setting */ if ((0 == chip_info->clk_freq.cpsdvsr) && (0 == chip_info->clk_freq.scr)) { status = calculate_effective_freq(pl022, spi->max_speed_hz, &clk_freq); if (status < 0) goto err_config_params; } else { memcpy(&clk_freq, &chip_info->clk_freq, sizeof(clk_freq)); if ((clk_freq.cpsdvsr % 2) != 0) clk_freq.cpsdvsr = clk_freq.cpsdvsr - 1; } if ((clk_freq.cpsdvsr < CPSDVR_MIN) || (clk_freq.cpsdvsr > CPSDVR_MAX)) { status = -EINVAL; dev_err(&spi->dev, "cpsdvsr is configured incorrectly\n"); goto err_config_params; } status = verify_controller_parameters(pl022, chip_info); if (status) { dev_err(&spi->dev, "controller data is incorrect"); goto err_config_params; } /* Now set controller state based on controller data */ chip->xfer_type = chip_info->com_mode; if (!chip_info->cs_control) { chip->cs_control = null_cs_control; dev_warn(&spi->dev, "chip select function is NULL for this chip\n"); } else chip->cs_control = chip_info->cs_control; if (bits <= 3) { /* PL022 doesn't support less than 4-bits */ status = -ENOTSUPP; goto err_config_params; } else if (bits <= 8) { dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n"); chip->n_bytes = 1; chip->read = READING_U8; chip->write = WRITING_U8; } else if (bits <= 16) { dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n"); chip->n_bytes = 2; chip->read = READING_U16; chip->write = WRITING_U16; } else { if (pl022->vendor->max_bpw >= 32) { dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n"); chip->n_bytes = 4; chip->read = READING_U32; chip->write = WRITING_U32; } else { dev_err(&spi->dev, "illegal data size for this controller!\n"); dev_err(&spi->dev, "a standard pl022 can only handle " "1 <= n <= 16 bit words\n"); status = -ENOTSUPP; goto err_config_params; } } /* Now Initialize all register settings required for this chip */ chip->cr0 = 0; chip->cr1 = 0; chip->dmacr = 0; chip->cpsr = 0; if ((chip_info->com_mode == DMA_TRANSFER) && ((pl022->master_info)->enable_dma)) { chip->enable_dma = true; dev_dbg(&spi->dev, "DMA mode set in controller state\n"); SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, SSP_DMACR_MASK_RXDMAE, 0); SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, SSP_DMACR_MASK_TXDMAE, 1); } else { chip->enable_dma = false; dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n"); SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0); SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1); } chip->cpsr = clk_freq.cpsdvsr; /* Special setup for the ST micro extended control registers */ if (pl022->vendor->extended_cr) { u32 etx; if (pl022->vendor->pl023) { /* These bits are only in the PL023 */ SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay, SSP_CR1_MASK_FBCLKDEL_ST, 13); } else { /* These bits are in the PL022 but not PL023 */ SSP_WRITE_BITS(chip->cr0, chip_info->duplex, SSP_CR0_MASK_HALFDUP_ST, 5); SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len, SSP_CR0_MASK_CSS_ST, 16); SSP_WRITE_BITS(chip->cr0, chip_info->iface, SSP_CR0_MASK_FRF_ST, 21); SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, SSP_CR1_MASK_MWAIT_ST, 6); } SSP_WRITE_BITS(chip->cr0, bits - 1, SSP_CR0_MASK_DSS_ST, 0); if (spi->mode & SPI_LSB_FIRST) { tmp = SSP_RX_LSB; etx = SSP_TX_LSB; } else { tmp = SSP_RX_MSB; etx = SSP_TX_MSB; } SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_RENDN_ST, 4); SSP_WRITE_BITS(chip->cr1, etx, SSP_CR1_MASK_TENDN_ST, 5); SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, SSP_CR1_MASK_RXIFLSEL_ST, 7); SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, SSP_CR1_MASK_TXIFLSEL_ST, 10); } else { SSP_WRITE_BITS(chip->cr0, bits - 1, SSP_CR0_MASK_DSS, 0); SSP_WRITE_BITS(chip->cr0, chip_info->iface, SSP_CR0_MASK_FRF, 4); } /* Stuff that is common for all versions */ if (spi->mode & SPI_CPOL) tmp = SSP_CLK_POL_IDLE_HIGH; else tmp = SSP_CLK_POL_IDLE_LOW; SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPO, 6); if (spi->mode & SPI_CPHA) tmp = SSP_CLK_SECOND_EDGE; else tmp = SSP_CLK_FIRST_EDGE; SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPH, 7); SSP_WRITE_BITS(chip->cr0, clk_freq.scr, SSP_CR0_MASK_SCR, 8); /* Loopback is available on all versions except PL023 */ if (pl022->vendor->loopback) { if (spi->mode & SPI_LOOP) tmp = LOOPBACK_ENABLED; else tmp = LOOPBACK_DISABLED; SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_LBM, 0); } SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1); SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2); SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3); /* Save controller_state */ spi_set_ctldata(spi, chip); return status; err_config_params: spi_set_ctldata(spi, NULL); kfree(chip); return status; } /** * pl022_cleanup - cleanup function registered to SPI master framework * @spi: spi device which is requesting cleanup * * This function is registered to the SPI framework for this SPI master * controller. It will free the runtime state of chip. */ static void pl022_cleanup(struct spi_device *spi) { struct chip_data *chip = spi_get_ctldata(spi); spi_set_ctldata(spi, NULL); kfree(chip); } static int __devinit pl022_probe(struct amba_device *adev, const struct amba_id *id) { struct device *dev = &adev->dev; struct pl022_ssp_controller *platform_info = adev->dev.platform_data; struct spi_master *master; struct pl022 *pl022 = NULL; /*Data for this driver */ int status = 0; dev_info(&adev->dev, "ARM PL022 driver, device ID: 0x%08x\n", adev->periphid); if (platform_info == NULL) { dev_err(&adev->dev, "probe - no platform data supplied\n"); status = -ENODEV; goto err_no_pdata; } /* Allocate master with space for data */ master = spi_alloc_master(dev, sizeof(struct pl022)); if (master == NULL) { dev_err(&adev->dev, "probe - cannot alloc SPI master\n"); status = -ENOMEM; goto err_no_master; } pl022 = spi_master_get_devdata(master); pl022->master = master; pl022->master_info = platform_info; pl022->adev = adev; pl022->vendor = id->data; /* * Bus Number Which has been Assigned to this SSP controller * on this board */ master->bus_num = platform_info->bus_id; master->num_chipselect = platform_info->num_chipselect; master->cleanup = pl022_cleanup; master->setup = pl022_setup; master->transfer = pl022_transfer; /* * Supports mode 0-3, loopback, and active low CS. Transfers are * always MS bit first on the original pl022. */ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; if (pl022->vendor->extended_cr) master->mode_bits |= SPI_LSB_FIRST; dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num); status = amba_request_regions(adev, NULL); if (status) goto err_no_ioregion; pl022->phybase = adev->res.start; pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res)); if (pl022->virtbase == NULL) { status = -ENOMEM; goto err_no_ioremap; } printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n", adev->res.start, pl022->virtbase); pl022->clk = clk_get(&adev->dev, NULL); if (IS_ERR(pl022->clk)) { status = PTR_ERR(pl022->clk); dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n"); goto err_no_clk; } /* Disable SSP */ writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); load_ssp_default_config(pl022); status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022", pl022); if (status < 0) { dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status); goto err_no_irq; } /* Get DMA channels */ if (platform_info->enable_dma) { status = pl022_dma_probe(pl022); if (status != 0) platform_info->enable_dma = 0; } /* Initialize and start queue */ status = init_queue(pl022); if (status != 0) { dev_err(&adev->dev, "probe - problem initializing queue\n"); goto err_init_queue; } status = start_queue(pl022); if (status != 0) { dev_err(&adev->dev, "probe - problem starting queue\n"); goto err_start_queue; } /* Register with the SPI framework */ amba_set_drvdata(adev, pl022); status = spi_register_master(master); if (status != 0) { dev_err(&adev->dev, "probe - problem registering spi master\n"); goto err_spi_register; } dev_dbg(dev, "probe succeeded\n"); /* * Disable the silicon block pclk and any voltage domain and just * power it up and clock it when it's needed */ amba_pclk_disable(adev); amba_vcore_disable(adev); return 0; err_spi_register: err_start_queue: err_init_queue: destroy_queue(pl022); pl022_dma_remove(pl022); free_irq(adev->irq[0], pl022); err_no_irq: clk_put(pl022->clk); err_no_clk: iounmap(pl022->virtbase); err_no_ioremap: amba_release_regions(adev); err_no_ioregion: spi_master_put(master); err_no_master: err_no_pdata: return status; } static int __devexit pl022_remove(struct amba_device *adev) { struct pl022 *pl022 = amba_get_drvdata(adev); int status = 0; if (!pl022) return 0; /* Remove the queue */ status = destroy_queue(pl022); if (status != 0) { dev_err(&adev->dev, "queue remove failed (%d)\n", status); return status; } load_ssp_default_config(pl022); pl022_dma_remove(pl022); free_irq(adev->irq[0], pl022); clk_disable(pl022->clk); clk_put(pl022->clk); iounmap(pl022->virtbase); amba_release_regions(adev); tasklet_disable(&pl022->pump_transfers); spi_unregister_master(pl022->master); spi_master_put(pl022->master); amba_set_drvdata(adev, NULL); dev_dbg(&adev->dev, "remove succeeded\n"); return 0; } #ifdef CONFIG_PM static int pl022_suspend(struct amba_device *adev, pm_message_t state) { struct pl022 *pl022 = amba_get_drvdata(adev); int status = 0; status = stop_queue(pl022); if (status) { dev_warn(&adev->dev, "suspend cannot stop queue\n"); return status; } amba_vcore_enable(adev); amba_pclk_enable(adev); load_ssp_default_config(pl022); amba_pclk_disable(adev); amba_vcore_disable(adev); dev_dbg(&adev->dev, "suspended\n"); return 0; } static int pl022_resume(struct amba_device *adev) { struct pl022 *pl022 = amba_get_drvdata(adev); int status = 0; /* Start the queue running */ status = start_queue(pl022); if (status) dev_err(&adev->dev, "problem starting queue (%d)\n", status); else dev_dbg(&adev->dev, "resumed\n"); return status; } #else #define pl022_suspend NULL #define pl022_resume NULL #endif /* CONFIG_PM */ static struct vendor_data vendor_arm = { .fifodepth = 8, .max_bpw = 16, .unidir = false, .extended_cr = false, .pl023 = false, .loopback = true, }; static struct vendor_data vendor_st = { .fifodepth = 32, .max_bpw = 32, .unidir = false, .extended_cr = true, .pl023 = false, .loopback = true, }; static struct vendor_data vendor_st_pl023 = { .fifodepth = 32, .max_bpw = 32, .unidir = false, .extended_cr = true, .pl023 = true, .loopback = false, }; static struct vendor_data vendor_db5500_pl023 = { .fifodepth = 32, .max_bpw = 32, .unidir = false, .extended_cr = true, .pl023 = true, .loopback = true, }; static struct amba_id pl022_ids[] = { { /* * ARM PL022 variant, this has a 16bit wide * and 8 locations deep TX/RX FIFO */ .id = 0x00041022, .mask = 0x000fffff, .data = &vendor_arm, }, { /* * ST Micro derivative, this has 32bit wide * and 32 locations deep TX/RX FIFO */ .id = 0x01080022, .mask = 0xffffffff, .data = &vendor_st, }, { /* * ST-Ericsson derivative "PL023" (this is not * an official ARM number), this is a PL022 SSP block * stripped to SPI mode only, it has 32bit wide * and 32 locations deep TX/RX FIFO but no extended * CR0/CR1 register */ .id = 0x00080023, .mask = 0xffffffff, .data = &vendor_st_pl023, }, { .id = 0x10080023, .mask = 0xffffffff, .data = &vendor_db5500_pl023, }, { 0, 0 }, }; static struct amba_driver pl022_driver = { .drv = { .name = "ssp-pl022", }, .id_table = pl022_ids, .probe = pl022_probe, .remove = __devexit_p(pl022_remove), .suspend = pl022_suspend, .resume = pl022_resume, }; static int __init pl022_init(void) { return amba_driver_register(&pl022_driver); } subsys_initcall(pl022_init); static void __exit pl022_exit(void) { amba_driver_unregister(&pl022_driver); } module_exit(pl022_exit); MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>"); MODULE_DESCRIPTION("PL022 SSP Controller Driver"); MODULE_LICENSE("GPL");