aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/cpumask.h
blob: 60e56c6e03dd3ee6c4836031b88626eaba249b1e (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
#ifndef __LINUX_CPUMASK_H
#define __LINUX_CPUMASK_H

/*
 * Cpumasks provide a bitmap suitable for representing the
 * set of CPU's in a system, one bit position per CPU number.
 *
 * See detailed comments in the file linux/bitmap.h describing the
 * data type on which these cpumasks are based.
 *
 * For details of cpumask_scnprintf() and cpumask_parse(),
 * see bitmap_scnprintf() and bitmap_parse() in lib/bitmap.c.
 * For details of cpulist_scnprintf() and cpulist_parse(), see
 * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
 * For details of cpu_remap(), see bitmap_bitremap in lib/bitmap.c
 * For details of cpus_remap(), see bitmap_remap in lib/bitmap.c.
 *
 * The available cpumask operations are:
 *
 * void cpu_set(cpu, mask)		turn on bit 'cpu' in mask
 * void cpu_clear(cpu, mask)		turn off bit 'cpu' in mask
 * void cpus_setall(mask)		set all bits
 * void cpus_clear(mask)		clear all bits
 * int cpu_isset(cpu, mask)		true iff bit 'cpu' set in mask
 * int cpu_test_and_set(cpu, mask)	test and set bit 'cpu' in mask
 *
 * void cpus_and(dst, src1, src2)	dst = src1 & src2  [intersection]
 * void cpus_or(dst, src1, src2)	dst = src1 | src2  [union]
 * void cpus_xor(dst, src1, src2)	dst = src1 ^ src2
 * void cpus_andnot(dst, src1, src2)	dst = src1 & ~src2
 * void cpus_complement(dst, src)	dst = ~src
 *
 * int cpus_equal(mask1, mask2)		Does mask1 == mask2?
 * int cpus_intersects(mask1, mask2)	Do mask1 and mask2 intersect?
 * int cpus_subset(mask1, mask2)	Is mask1 a subset of mask2?
 * int cpus_empty(mask)			Is mask empty (no bits sets)?
 * int cpus_full(mask)			Is mask full (all bits sets)?
 * int cpus_weight(mask)		Hamming weigh - number of set bits
 *
 * void cpus_shift_right(dst, src, n)	Shift right
 * void cpus_shift_left(dst, src, n)	Shift left
 *
 * int first_cpu(mask)			Number lowest set bit, or NR_CPUS
 * int next_cpu(cpu, mask)		Next cpu past 'cpu', or NR_CPUS
 *
 * cpumask_t cpumask_of_cpu(cpu)	Return cpumask with bit 'cpu' set
 * CPU_MASK_ALL				Initializer - all bits set
 * CPU_MASK_NONE			Initializer - no bits set
 * unsigned long *cpus_addr(mask)	Array of unsigned long's in mask
 *
 * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
 * int cpumask_parse(ubuf, ulen, mask)	Parse ascii string as cpumask
 * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
 * int cpulist_parse(buf, map)		Parse ascii string as cpulist
 * int cpu_remap(oldbit, old, new)	newbit = map(old, new)(oldbit)
 * int cpus_remap(dst, src, old, new)	*dst = map(old, new)(src)
 *
 * for_each_cpu_mask(cpu, mask)		for-loop cpu over mask
 *
 * int num_online_cpus()		Number of online CPUs
 * int num_possible_cpus()		Number of all possible CPUs
 * int num_present_cpus()		Number of present CPUs
 *
 * int cpu_online(cpu)			Is some cpu online?
 * int cpu_possible(cpu)		Is some cpu possible?
 * int cpu_present(cpu)			Is some cpu present (can schedule)?
 *
 * int any_online_cpu(mask)		First online cpu in mask
 *
 * for_each_cpu(cpu)			for-loop cpu over cpu_possible_map
 * for_each_online_cpu(cpu)		for-loop cpu over cpu_online_map
 * for_each_present_cpu(cpu)		for-loop cpu over cpu_present_map
 *
 * Subtlety:
 * 1) The 'type-checked' form of cpu_isset() causes gcc (3.3.2, anyway)
 *    to generate slightly worse code.  Note for example the additional
 *    40 lines of assembly code compiling the "for each possible cpu"
 *    loops buried in the disk_stat_read() macros calls when compiling
 *    drivers/block/genhd.c (arch i386, CONFIG_SMP=y).  So use a simple
 *    one-line #define for cpu_isset(), instead of wrapping an inline
 *    inside a macro, the way we do the other calls.
 */

#include <linux/kernel.h>
#include <linux/threads.h>
#include <linux/bitmap.h>

typedef struct { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
extern cpumask_t _unused_cpumask_arg_;

#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
{
	set_bit(cpu, dstp->bits);
}

#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
{
	clear_bit(cpu, dstp->bits);
}

#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
static inline void __cpus_setall(cpumask_t *dstp, int nbits)
{
	bitmap_fill(dstp->bits, nbits);
}

#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
static inline void __cpus_clear(cpumask_t *dstp, int nbits)
{
	bitmap_zero(dstp->bits, nbits);
}

/* No static inline type checking - see Subtlety (1) above. */
#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)

#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
{
	return test_and_set_bit(cpu, addr->bits);
}

#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
static inline void __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
					const cpumask_t *src2p, int nbits)
{
	bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
}

#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
					const cpumask_t *src2p, int nbits)
{
	bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
}

#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
					const cpumask_t *src2p, int nbits)
{
	bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
}

#define cpus_andnot(dst, src1, src2) \
				__cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
static inline void __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
					const cpumask_t *src2p, int nbits)
{
	bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
}

#define cpus_complement(dst, src) __cpus_complement(&(dst), &(src), NR_CPUS)
static inline void __cpus_complement(cpumask_t *dstp,
					const cpumask_t *srcp, int nbits)
{
	bitmap_complement(dstp->bits, srcp->bits, nbits);
}

#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
static inline int __cpus_equal(const cpumask_t *src1p,
					const cpumask_t *src2p, int nbits)
{
	return bitmap_equal(src1p->bits, src2p->bits, nbits);
}

#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
static inline int __cpus_intersects(const cpumask_t *src1p,
					const cpumask_t *src2p, int nbits)
{
	return bitmap_intersects(src1p->bits, src2p->bits, nbits);
}

#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
static inline int __cpus_subset(const cpumask_t *src1p,
					const cpumask_t *src2p, int nbits)
{
	return bitmap_subset(src1p->bits, src2p->bits, nbits);
}

#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
{
	return bitmap_empty(srcp->bits, nbits);
}

#define cpus_full(cpumask) __cpus_full(&(cpumask), NR_CPUS)
static inline int __cpus_full(const cpumask_t *srcp, int nbits)
{
	return bitmap_full(srcp->bits, nbits);
}

#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
{
	return bitmap_weight(srcp->bits, nbits);
}

#define cpus_shift_right(dst, src, n) \
			__cpus_shift_right(&(dst), &(src), (n), NR_CPUS)
static inline void __cpus_shift_right(cpumask_t *dstp,
					const cpumask_t *srcp, int n, int nbits)
{
	bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
}

#define cpus_shift_left(dst, src, n) \
			__cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
static inline void __cpus_shift_left(cpumask_t *dstp,
					const cpumask_t *srcp, int n, int nbits)
{
	bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
}

#define first_cpu(src) __first_cpu(&(src), NR_CPUS)
static inline int __first_cpu(const cpumask_t *srcp, int nbits)
{
	return min_t(int, nbits, find_first_bit(srcp->bits, nbits));
}

#define next_cpu(n, src) __next_cpu((n), &(src), NR_CPUS)
static inline int __next_cpu(int n, const cpumask_t *srcp, int nbits)
{
	return min_t(int, nbits, find_next_bit(srcp->bits, nbits, n+1));
}

#define cpumask_of_cpu(cpu)						\
({									\
	typeof(_unused_cpumask_arg_) m;					\
	if (sizeof(m) == sizeof(unsigned long)) {			\
		m.bits[0] = 1UL<<(cpu);					\
	} else {							\
		cpus_clear(m);						\
		cpu_set((cpu), m);					\
	}								\
	m;								\
})

#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)

#if NR_CPUS <= BITS_PER_LONG

#define CPU_MASK_ALL							\
(cpumask_t) { {								\
	[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD			\
} }

#else

#define CPU_MASK_ALL							\
(cpumask_t) { {								\
	[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL,			\
	[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD			\
} }

#endif

#define CPU_MASK_NONE							\
(cpumask_t) { {								\
	[0 ... BITS_TO_LONGS(NR_CPUS)-1] =  0UL				\
} }

#define CPU_MASK_CPU0							\
(cpumask_t) { {								\
	[0] =  1UL							\
} }

#define cpus_addr(src) ((src).bits)

#define cpumask_scnprintf(buf, len, src) \
			__cpumask_scnprintf((buf), (len), &(src), NR_CPUS)
static inline int __cpumask_scnprintf(char *buf, int len,
					const cpumask_t *srcp, int nbits)
{
	return bitmap_scnprintf(buf, len, srcp->bits, nbits);
}

#define cpumask_parse(ubuf, ulen, dst) \
			__cpumask_parse((ubuf), (ulen), &(dst), NR_CPUS)
static inline int __cpumask_parse(const char __user *buf, int len,
					cpumask_t *dstp, int nbits)
{
	return bitmap_parse(buf, len, dstp->bits, nbits);
}

#define cpulist_scnprintf(buf, len, src) \
			__cpulist_scnprintf((buf), (len), &(src), NR_CPUS)
static inline int __cpulist_scnprintf(char *buf, int len,
					const cpumask_t *srcp, int nbits)
{
	return bitmap_scnlistprintf(buf, len, srcp->bits, nbits);
}

#define cpulist_parse(buf, dst) __cpulist_parse((buf), &(dst), NR_CPUS)
static inline int __cpulist_parse(const char *buf, cpumask_t *dstp, int nbits)
{
	return bitmap_parselist(buf, dstp->bits, nbits);
}

#define cpu_remap(oldbit, old, new) \
		__cpu_remap((oldbit), &(old), &(new), NR_CPUS)
static inline int __cpu_remap(int oldbit,
		const cpumask_t *oldp, const cpumask_t *newp, int nbits)
{
	return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
}

#define cpus_remap(dst, src, old, new) \
		__cpus_remap(&(dst), &(src), &(old), &(new), NR_CPUS)
static inline void __cpus_remap(cpumask_t *dstp, const cpumask_t *srcp,
		const cpumask_t *oldp, const cpumask_t *newp, int nbits)
{
	bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
}

#if NR_CPUS > 1
#define for_each_cpu_mask(cpu, mask)		\
	for ((cpu) = first_cpu(mask);		\
		(cpu) < NR_CPUS;		\
		(cpu) = next_cpu((cpu), (mask)))
#else /* NR_CPUS == 1 */
#define for_each_cpu_mask(cpu, mask) for ((cpu) = 0; (cpu) < 1; (cpu)++)
#endif /* NR_CPUS */

/*
 * The following particular system cpumasks and operations manage
 * possible, present and online cpus.  Each of them is a fixed size
 * bitmap of size NR_CPUS.
 *
 *  #ifdef CONFIG_HOTPLUG_CPU
 *     cpu_possible_map - has bit 'cpu' set iff cpu is populatable
 *     cpu_present_map  - has bit 'cpu' set iff cpu is populated
 *     cpu_online_map   - has bit 'cpu' set iff cpu available to scheduler
 *  #else
 *     cpu_possible_map - has bit 'cpu' set iff cpu is populated
 *     cpu_present_map  - copy of cpu_possible_map
 *     cpu_online_map   - has bit 'cpu' set iff cpu available to scheduler
 *  #endif
 *
 *  In either case, NR_CPUS is fixed at compile time, as the static
 *  size of these bitmaps.  The cpu_possible_map is fixed at boot
 *  time, as the set of CPU id's that it is possible might ever
 *  be plugged in at anytime during the life of that system boot.
 *  The cpu_present_map is dynamic(*), representing which CPUs
 *  are currently plugged in.  And cpu_online_map is the dynamic
 *  subset of cpu_present_map, indicating those CPUs available
 *  for scheduling.
 *
 *  If HOTPLUG is enabled, then cpu_possible_map is forced to have
 *  all NR_CPUS bits set, otherwise it is just the set of CPUs that
 *  ACPI reports present at boot.
 *
 *  If HOTPLUG is enabled, then cpu_present_map varies dynamically,
 *  depending on what ACPI reports as currently plugged in, otherwise
 *  cpu_present_map is just a copy of cpu_possible_map.
 *
 *  (*) Well, cpu_present_map is dynamic in the hotplug case.  If not
 *      hotplug, it's a copy of cpu_possible_map, hence fixed at boot.
 *
 * Subtleties:
 * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
 *    assumption that their single CPU is online.  The UP
 *    cpu_{online,possible,present}_maps are placebos.  Changing them
 *    will have no useful affect on the following num_*_cpus()
 *    and cpu_*() macros in the UP case.  This ugliness is a UP
 *    optimization - don't waste any instructions or memory references
 *    asking if you're online or how many CPUs there are if there is
 *    only one CPU.
 * 2) Most SMP arch's #define some of these maps to be some
 *    other map specific to that arch.  Therefore, the following
 *    must be #define macros, not inlines.  To see why, examine
 *    the assembly code produced by the following.  Note that
 *    set1() writes phys_x_map, but set2() writes x_map:
 *        int x_map, phys_x_map;
 *        #define set1(a) x_map = a
 *        inline void set2(int a) { x_map = a; }
 *        #define x_map phys_x_map
 *        main(){ set1(3); set2(5); }
 */

extern cpumask_t cpu_possible_map;
extern cpumask_t cpu_online_map;
extern cpumask_t cpu_present_map;

#if NR_CPUS > 1
#define num_online_cpus()	cpus_weight(cpu_online_map)
#define num_possible_cpus()	cpus_weight(cpu_possible_map)
#define num_present_cpus()	cpus_weight(cpu_present_map)
#define cpu_online(cpu)		cpu_isset((cpu), cpu_online_map)
#define cpu_possible(cpu)	cpu_isset((cpu), cpu_possible_map)
#define cpu_present(cpu)	cpu_isset((cpu), cpu_present_map)
#else
#define num_online_cpus()	1
#define num_possible_cpus()	1
#define num_present_cpus()	1
#define cpu_online(cpu)		((cpu) == 0)
#define cpu_possible(cpu)	((cpu) == 0)
#define cpu_present(cpu)	((cpu) == 0)
#endif

#define any_online_cpu(mask)			\
({						\
	int cpu;				\
	for_each_cpu_mask(cpu, (mask))		\
		if (cpu_online(cpu))		\
			break;			\
	cpu;					\
})

#define for_each_cpu(cpu)	  for_each_cpu_mask((cpu), cpu_possible_map)
#define for_each_online_cpu(cpu)  for_each_cpu_mask((cpu), cpu_online_map)
#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map)

/* Find the highest possible smp_processor_id() */
#define highest_possible_processor_id() \
({ \
	unsigned int cpu, highest = 0; \
	for_each_cpu_mask(cpu, cpu_possible_map) \
		highest = cpu; \
	highest; \
})


#endif /* __LINUX_CPUMASK_H */
ss="hl opt">); void (*dataout) (struct ace_device * ace); }; /* 8 Bit bus width */ static u16 ace_in_8(struct ace_device *ace, int reg) { void __iomem *r = ace->baseaddr + reg; return in_8(r) | (in_8(r + 1) << 8); } static void ace_out_8(struct ace_device *ace, int reg, u16 val) { void __iomem *r = ace->baseaddr + reg; out_8(r, val); out_8(r + 1, val >> 8); } static void ace_datain_8(struct ace_device *ace) { void __iomem *r = ace->baseaddr + 0x40; u8 *dst = ace->data_ptr; int i = ACE_FIFO_SIZE; while (i--) *dst++ = in_8(r++); ace->data_ptr = dst; } static void ace_dataout_8(struct ace_device *ace) { void __iomem *r = ace->baseaddr + 0x40; u8 *src = ace->data_ptr; int i = ACE_FIFO_SIZE; while (i--) out_8(r++, *src++); ace->data_ptr = src; } static struct ace_reg_ops ace_reg_8_ops = { .in = ace_in_8, .out = ace_out_8, .datain = ace_datain_8, .dataout = ace_dataout_8, }; /* 16 bit big endian bus attachment */ static u16 ace_in_be16(struct ace_device *ace, int reg) { return in_be16(ace->baseaddr + reg); } static void ace_out_be16(struct ace_device *ace, int reg, u16 val) { out_be16(ace->baseaddr + reg, val); } static void ace_datain_be16(struct ace_device *ace) { int i = ACE_FIFO_SIZE / 2; u16 *dst = ace->data_ptr; while (i--) *dst++ = in_le16(ace->baseaddr + 0x40); ace->data_ptr = dst; } static void ace_dataout_be16(struct ace_device *ace) { int i = ACE_FIFO_SIZE / 2; u16 *src = ace->data_ptr; while (i--) out_le16(ace->baseaddr + 0x40, *src++); ace->data_ptr = src; } /* 16 bit little endian bus attachment */ static u16 ace_in_le16(struct ace_device *ace, int reg) { return in_le16(ace->baseaddr + reg); } static void ace_out_le16(struct ace_device *ace, int reg, u16 val) { out_le16(ace->baseaddr + reg, val); } static void ace_datain_le16(struct ace_device *ace) { int i = ACE_FIFO_SIZE / 2; u16 *dst = ace->data_ptr; while (i--) *dst++ = in_be16(ace->baseaddr + 0x40); ace->data_ptr = dst; } static void ace_dataout_le16(struct ace_device *ace) { int i = ACE_FIFO_SIZE / 2; u16 *src = ace->data_ptr; while (i--) out_be16(ace->baseaddr + 0x40, *src++); ace->data_ptr = src; } static struct ace_reg_ops ace_reg_be16_ops = { .in = ace_in_be16, .out = ace_out_be16, .datain = ace_datain_be16, .dataout = ace_dataout_be16, }; static struct ace_reg_ops ace_reg_le16_ops = { .in = ace_in_le16, .out = ace_out_le16, .datain = ace_datain_le16, .dataout = ace_dataout_le16, }; static inline u16 ace_in(struct ace_device *ace, int reg) { return ace->reg_ops->in(ace, reg); } static inline u32 ace_in32(struct ace_device *ace, int reg) { return ace_in(ace, reg) | (ace_in(ace, reg + 2) << 16); } static inline void ace_out(struct ace_device *ace, int reg, u16 val) { ace->reg_ops->out(ace, reg, val); } static inline void ace_out32(struct ace_device *ace, int reg, u32 val) { ace_out(ace, reg, val); ace_out(ace, reg + 2, val >> 16); } /* --------------------------------------------------------------------- * Debug support functions */ #if defined(DEBUG) static void ace_dump_mem(void *base, int len) { const char *ptr = base; int i, j; for (i = 0; i < len; i += 16) { printk(KERN_INFO "%.8x:", i); for (j = 0; j < 16; j++) { if (!(j % 4)) printk(" "); printk("%.2x", ptr[i + j]); } printk(" "); for (j = 0; j < 16; j++) printk("%c", isprint(ptr[i + j]) ? ptr[i + j] : '.'); printk("\n"); } } #else static inline void ace_dump_mem(void *base, int len) { } #endif static void ace_dump_regs(struct ace_device *ace) { dev_info(ace->dev, " ctrl: %.8x seccnt/cmd: %.4x ver:%.4x\n" " status:%.8x mpu_lba:%.8x busmode:%4x\n" " error: %.8x cfg_lba:%.8x fatstat:%.4x\n", ace_in32(ace, ACE_CTRL), ace_in(ace, ACE_SECCNTCMD), ace_in(ace, ACE_VERSION), ace_in32(ace, ACE_STATUS), ace_in32(ace, ACE_MPULBA), ace_in(ace, ACE_BUSMODE), ace_in32(ace, ACE_ERROR), ace_in32(ace, ACE_CFGLBA), ace_in(ace, ACE_FATSTAT)); } static void ace_fix_driveid(u16 *id) { #if defined(__BIG_ENDIAN) int i; /* All half words have wrong byte order; swap the bytes */ for (i = 0; i < ATA_ID_WORDS; i++, id++) *id = le16_to_cpu(*id); #endif } /* --------------------------------------------------------------------- * Finite State Machine (FSM) implementation */ /* FSM tasks; used to direct state transitions */ #define ACE_TASK_IDLE 0 #define ACE_TASK_IDENTIFY 1 #define ACE_TASK_READ 2 #define ACE_TASK_WRITE 3 #define ACE_FSM_NUM_TASKS 4 /* FSM state definitions */ #define ACE_FSM_STATE_IDLE 0 #define ACE_FSM_STATE_REQ_LOCK 1 #define ACE_FSM_STATE_WAIT_LOCK 2 #define ACE_FSM_STATE_WAIT_CFREADY 3 #define ACE_FSM_STATE_IDENTIFY_PREPARE 4 #define ACE_FSM_STATE_IDENTIFY_TRANSFER 5 #define ACE_FSM_STATE_IDENTIFY_COMPLETE 6 #define ACE_FSM_STATE_REQ_PREPARE 7 #define ACE_FSM_STATE_REQ_TRANSFER 8 #define ACE_FSM_STATE_REQ_COMPLETE 9 #define ACE_FSM_STATE_ERROR 10 #define ACE_FSM_NUM_STATES 11 /* Set flag to exit FSM loop and reschedule tasklet */ static inline void ace_fsm_yield(struct ace_device *ace) { dev_dbg(ace->dev, "ace_fsm_yield()\n"); tasklet_schedule(&ace->fsm_tasklet); ace->fsm_continue_flag = 0; } /* Set flag to exit FSM loop and wait for IRQ to reschedule tasklet */ static inline void ace_fsm_yieldirq(struct ace_device *ace) { dev_dbg(ace->dev, "ace_fsm_yieldirq()\n"); if (!ace->irq) /* No IRQ assigned, so need to poll */ tasklet_schedule(&ace->fsm_tasklet); ace->fsm_continue_flag = 0; } /* Get the next read/write request; ending requests that we don't handle */ static struct request *ace_get_next_request(struct request_queue *q) { struct request *req; while ((req = blk_peek_request(q)) != NULL) { if (req->cmd_type == REQ_TYPE_FS) break; blk_start_request(req); __blk_end_request_all(req, -EIO); } return req; } static void ace_fsm_dostate(struct ace_device *ace) { struct request *req; u32 status; u16 val; int count; #if defined(DEBUG) dev_dbg(ace->dev, "fsm_state=%i, id_req_count=%i\n", ace->fsm_state, ace->id_req_count); #endif /* Verify that there is actually a CF in the slot. If not, then * bail out back to the idle state and wake up all the waiters */ status = ace_in32(ace, ACE_STATUS); if ((status & ACE_STATUS_CFDETECT) == 0) { ace->fsm_state = ACE_FSM_STATE_IDLE; ace->media_change = 1; set_capacity(ace->gd, 0); dev_info(ace->dev, "No CF in slot\n"); /* Drop all in-flight and pending requests */ if (ace->req) { __blk_end_request_all(ace->req, -EIO); ace->req = NULL; } while ((req = blk_fetch_request(ace->queue)) != NULL) __blk_end_request_all(req, -EIO); /* Drop back to IDLE state and notify waiters */ ace->fsm_state = ACE_FSM_STATE_IDLE; ace->id_result = -EIO; while (ace->id_req_count) { complete(&ace->id_completion); ace->id_req_count--; } } switch (ace->fsm_state) { case ACE_FSM_STATE_IDLE: /* See if there is anything to do */ if (ace->id_req_count || ace_get_next_request(ace->queue)) { ace->fsm_iter_num++; ace->fsm_state = ACE_FSM_STATE_REQ_LOCK; mod_timer(&ace->stall_timer, jiffies + HZ); if (!timer_pending(&ace->stall_timer)) add_timer(&ace->stall_timer); break; } del_timer(&ace->stall_timer); ace->fsm_continue_flag = 0; break; case ACE_FSM_STATE_REQ_LOCK: if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) { /* Already have the lock, jump to next state */ ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY; break; } /* Request the lock */ val = ace_in(ace, ACE_CTRL); ace_out(ace, ACE_CTRL, val | ACE_CTRL_LOCKREQ); ace->fsm_state = ACE_FSM_STATE_WAIT_LOCK; break; case ACE_FSM_STATE_WAIT_LOCK: if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) { /* got the lock; move to next state */ ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY; break; } /* wait a bit for the lock */ ace_fsm_yield(ace); break; case ACE_FSM_STATE_WAIT_CFREADY: status = ace_in32(ace, ACE_STATUS); if (!(status & ACE_STATUS_RDYFORCFCMD) || (status & ACE_STATUS_CFBSY)) { /* CF card isn't ready; it needs to be polled */ ace_fsm_yield(ace); break; } /* Device is ready for command; determine what to do next */ if (ace->id_req_count) ace->fsm_state = ACE_FSM_STATE_IDENTIFY_PREPARE; else ace->fsm_state = ACE_FSM_STATE_REQ_PREPARE; break; case ACE_FSM_STATE_IDENTIFY_PREPARE: /* Send identify command */ ace->fsm_task = ACE_TASK_IDENTIFY; ace->data_ptr = ace->cf_id; ace->data_count = ACE_BUF_PER_SECTOR; ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY); /* As per datasheet, put config controller in reset */ val = ace_in(ace, ACE_CTRL); ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET); /* irq handler takes over from this point; wait for the * transfer to complete */ ace->fsm_state = ACE_FSM_STATE_IDENTIFY_TRANSFER; ace_fsm_yieldirq(ace); break; case ACE_FSM_STATE_IDENTIFY_TRANSFER: /* Check that the sysace is ready to receive data */ status = ace_in32(ace, ACE_STATUS); if (status & ACE_STATUS_CFBSY) { dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i dc=%i\n", ace->fsm_task, ace->fsm_iter_num, ace->data_count); ace_fsm_yield(ace); break; } if (!(status & ACE_STATUS_DATABUFRDY)) { ace_fsm_yield(ace); break; } /* Transfer the next buffer */ ace->reg_ops->datain(ace); ace->data_count--; /* If there are still buffers to be transfers; jump out here */ if (ace->data_count != 0) { ace_fsm_yieldirq(ace); break; } /* transfer finished; kick state machine */ dev_dbg(ace->dev, "identify finished\n"); ace->fsm_state = ACE_FSM_STATE_IDENTIFY_COMPLETE; break; case ACE_FSM_STATE_IDENTIFY_COMPLETE: ace_fix_driveid(ace->cf_id); ace_dump_mem(ace->cf_id, 512); /* Debug: Dump out disk ID */ if (ace->data_result) { /* Error occurred, disable the disk */ ace->media_change = 1; set_capacity(ace->gd, 0); dev_err(ace->dev, "error fetching CF id (%i)\n", ace->data_result); } else { ace->media_change = 0; /* Record disk parameters */ set_capacity(ace->gd, ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY)); dev_info(ace->dev, "capacity: %i sectors\n", ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY)); } /* We're done, drop to IDLE state and notify waiters */ ace->fsm_state = ACE_FSM_STATE_IDLE; ace->id_result = ace->data_result; while (ace->id_req_count) { complete(&ace->id_completion); ace->id_req_count--; } break; case ACE_FSM_STATE_REQ_PREPARE: req = ace_get_next_request(ace->queue); if (!req) { ace->fsm_state = ACE_FSM_STATE_IDLE; break; } blk_start_request(req); /* Okay, it's a data request, set it up for transfer */ dev_dbg(ace->dev, "request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n", (unsigned long long)blk_rq_pos(req), blk_rq_sectors(req), blk_rq_cur_sectors(req), rq_data_dir(req)); ace->req = req; ace->data_ptr = bio_data(req->bio); ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR; ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF); count = blk_rq_sectors(req); if (rq_data_dir(req)) { /* Kick off write request */ dev_dbg(ace->dev, "write data\n"); ace->fsm_task = ACE_TASK_WRITE; ace_out(ace, ACE_SECCNTCMD, count | ACE_SECCNTCMD_WRITE_DATA); } else { /* Kick off read request */ dev_dbg(ace->dev, "read data\n"); ace->fsm_task = ACE_TASK_READ; ace_out(ace, ACE_SECCNTCMD, count | ACE_SECCNTCMD_READ_DATA); } /* As per datasheet, put config controller in reset */ val = ace_in(ace, ACE_CTRL); ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET); /* Move to the transfer state. The systemace will raise * an interrupt once there is something to do */ ace->fsm_state = ACE_FSM_STATE_REQ_TRANSFER; if (ace->fsm_task == ACE_TASK_READ) ace_fsm_yieldirq(ace); /* wait for data ready */ break; case ACE_FSM_STATE_REQ_TRANSFER: /* Check that the sysace is ready to receive data */ status = ace_in32(ace, ACE_STATUS); if (status & ACE_STATUS_CFBSY) { dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n", ace->fsm_task, ace->fsm_iter_num, blk_rq_cur_sectors(ace->req) * 16, ace->data_count, ace->in_irq); ace_fsm_yield(ace); /* need to poll CFBSY bit */ break; } if (!(status & ACE_STATUS_DATABUFRDY)) { dev_dbg(ace->dev, "DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n", ace->fsm_task, ace->fsm_iter_num, blk_rq_cur_sectors(ace->req) * 16, ace->data_count, ace->in_irq); ace_fsm_yieldirq(ace); break; } /* Transfer the next buffer */ if (ace->fsm_task == ACE_TASK_WRITE) ace->reg_ops->dataout(ace); else ace->reg_ops->datain(ace); ace->data_count--; /* If there are still buffers to be transfers; jump out here */ if (ace->data_count != 0) { ace_fsm_yieldirq(ace); break; } /* bio finished; is there another one? */ if (__blk_end_request_cur(ace->req, 0)) { /* dev_dbg(ace->dev, "next block; h=%u c=%u\n", * blk_rq_sectors(ace->req), * blk_rq_cur_sectors(ace->req)); */ ace->data_ptr = bio_data(ace->req->bio); ace->data_count = blk_rq_cur_sectors(ace->req) * 16; ace_fsm_yieldirq(ace); break; } ace->fsm_state = ACE_FSM_STATE_REQ_COMPLETE; break; case ACE_FSM_STATE_REQ_COMPLETE: ace->req = NULL; /* Finished request; go to idle state */ ace->fsm_state = ACE_FSM_STATE_IDLE; break; default: ace->fsm_state = ACE_FSM_STATE_IDLE; break; } } static void ace_fsm_tasklet(unsigned long data) { struct ace_device *ace = (void *)data; unsigned long flags; spin_lock_irqsave(&ace->lock, flags); /* Loop over state machine until told to stop */ ace->fsm_continue_flag = 1; while (ace->fsm_continue_flag) ace_fsm_dostate(ace); spin_unlock_irqrestore(&ace->lock, flags); } static void ace_stall_timer(unsigned long data) { struct ace_device *ace = (void *)data; unsigned long flags; dev_warn(ace->dev, "kicking stalled fsm; state=%i task=%i iter=%i dc=%i\n", ace->fsm_state, ace->fsm_task, ace->fsm_iter_num, ace->data_count); spin_lock_irqsave(&ace->lock, flags); /* Rearm the stall timer *before* entering FSM (which may then * delete the timer) */ mod_timer(&ace->stall_timer, jiffies + HZ); /* Loop over state machine until told to stop */ ace->fsm_continue_flag = 1; while (ace->fsm_continue_flag) ace_fsm_dostate(ace); spin_unlock_irqrestore(&ace->lock, flags); } /* --------------------------------------------------------------------- * Interrupt handling routines */ static int ace_interrupt_checkstate(struct ace_device *ace) { u32 sreg = ace_in32(ace, ACE_STATUS); u16 creg = ace_in(ace, ACE_CTRL); /* Check for error occurrence */ if ((sreg & (ACE_STATUS_CFGERROR | ACE_STATUS_CFCERROR)) && (creg & ACE_CTRL_ERRORIRQ)) { dev_err(ace->dev, "transfer failure\n"); ace_dump_regs(ace); return -EIO; } return 0; } static irqreturn_t ace_interrupt(int irq, void *dev_id) { u16 creg; struct ace_device *ace = dev_id; /* be safe and get the lock */ spin_lock(&ace->lock); ace->in_irq = 1; /* clear the interrupt */ creg = ace_in(ace, ACE_CTRL); ace_out(ace, ACE_CTRL, creg | ACE_CTRL_RESETIRQ); ace_out(ace, ACE_CTRL, creg); /* check for IO failures */ if (ace_interrupt_checkstate(ace)) ace->data_result = -EIO; if (ace->fsm_task == 0) { dev_err(ace->dev, "spurious irq; stat=%.8x ctrl=%.8x cmd=%.4x\n", ace_in32(ace, ACE_STATUS), ace_in32(ace, ACE_CTRL), ace_in(ace, ACE_SECCNTCMD)); dev_err(ace->dev, "fsm_task=%i fsm_state=%i data_count=%i\n", ace->fsm_task, ace->fsm_state, ace->data_count); } /* Loop over state machine until told to stop */ ace->fsm_continue_flag = 1; while (ace->fsm_continue_flag) ace_fsm_dostate(ace); /* done with interrupt; drop the lock */ ace->in_irq = 0; spin_unlock(&ace->lock); return IRQ_HANDLED; } /* --------------------------------------------------------------------- * Block ops */ static void ace_request(struct request_queue * q) { struct request *req; struct ace_device *ace; req = ace_get_next_request(q); if (req) { ace = req->rq_disk->private_data; tasklet_schedule(&ace->fsm_tasklet); } } static unsigned int ace_check_events(struct gendisk *gd, unsigned int clearing) { struct ace_device *ace = gd->private_data; dev_dbg(ace->dev, "ace_check_events(): %i\n", ace->media_change); return ace->media_change ? DISK_EVENT_MEDIA_CHANGE : 0; } static int ace_revalidate_disk(struct gendisk *gd) { struct ace_device *ace = gd->private_data; unsigned long flags; dev_dbg(ace->dev, "ace_revalidate_disk()\n"); if (ace->media_change) { dev_dbg(ace->dev, "requesting cf id and scheduling tasklet\n"); spin_lock_irqsave(&ace->lock, flags); ace->id_req_count++; spin_unlock_irqrestore(&ace->lock, flags); tasklet_schedule(&ace->fsm_tasklet); wait_for_completion(&ace->id_completion); } dev_dbg(ace->dev, "revalidate complete\n"); return ace->id_result; } static int ace_open(struct block_device *bdev, fmode_t mode) { struct ace_device *ace = bdev->bd_disk->private_data; unsigned long flags; dev_dbg(ace->dev, "ace_open() users=%i\n", ace->users + 1); mutex_lock(&xsysace_mutex); spin_lock_irqsave(&ace->lock, flags); ace->users++; spin_unlock_irqrestore(&ace->lock, flags); check_disk_change(bdev); mutex_unlock(&xsysace_mutex); return 0; } static void ace_release(struct gendisk *disk, fmode_t mode) { struct ace_device *ace = disk->private_data; unsigned long flags; u16 val; dev_dbg(ace->dev, "ace_release() users=%i\n", ace->users - 1); mutex_lock(&xsysace_mutex); spin_lock_irqsave(&ace->lock, flags); ace->users--; if (ace->users == 0) { val = ace_in(ace, ACE_CTRL); ace_out(ace, ACE_CTRL, val & ~ACE_CTRL_LOCKREQ); } spin_unlock_irqrestore(&ace->lock, flags); mutex_unlock(&xsysace_mutex); } static int ace_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct ace_device *ace = bdev->bd_disk->private_data; u16 *cf_id = ace->cf_id; dev_dbg(ace->dev, "ace_getgeo()\n"); geo->heads = cf_id[ATA_ID_HEADS]; geo->sectors = cf_id[ATA_ID_SECTORS]; geo->cylinders = cf_id[ATA_ID_CYLS]; return 0; } static const struct block_device_operations ace_fops = { .owner = THIS_MODULE, .open = ace_open, .release = ace_release, .check_events = ace_check_events, .revalidate_disk = ace_revalidate_disk, .getgeo = ace_getgeo, }; /* -------------------------------------------------------------------- * SystemACE device setup/teardown code */ static int ace_setup(struct ace_device *ace) { u16 version; u16 val; int rc; dev_dbg(ace->dev, "ace_setup(ace=0x%p)\n", ace); dev_dbg(ace->dev, "physaddr=0x%llx irq=%i\n", (unsigned long long)ace->physaddr, ace->irq); spin_lock_init(&ace->lock); init_completion(&ace->id_completion); /* * Map the device */ ace->baseaddr = ioremap(ace->physaddr, 0x80); if (!ace->baseaddr) goto err_ioremap; /* * Initialize the state machine tasklet and stall timer */ tasklet_init(&ace->fsm_tasklet, ace_fsm_tasklet, (unsigned long)ace); setup_timer(&ace->stall_timer, ace_stall_timer, (unsigned long)ace); /* * Initialize the request queue */ ace->queue = blk_init_queue(ace_request, &ace->lock); if (ace->queue == NULL) goto err_blk_initq; blk_queue_logical_block_size(ace->queue, 512); /* * Allocate and initialize GD structure */ ace->gd = alloc_disk(ACE_NUM_MINORS); if (!ace->gd) goto err_alloc_disk; ace->gd->major = ace_major; ace->gd->first_minor = ace->id * ACE_NUM_MINORS; ace->gd->fops = &ace_fops; ace->gd->queue = ace->queue; ace->gd->private_data = ace; snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a'); /* set bus width */ if (ace->bus_width == ACE_BUS_WIDTH_16) { /* 0x0101 should work regardless of endianess */ ace_out_le16(ace, ACE_BUSMODE, 0x0101); /* read it back to determine endianess */ if (ace_in_le16(ace, ACE_BUSMODE) == 0x0001) ace->reg_ops = &ace_reg_le16_ops; else ace->reg_ops = &ace_reg_be16_ops; } else { ace_out_8(ace, ACE_BUSMODE, 0x00); ace->reg_ops = &ace_reg_8_ops; } /* Make sure version register is sane */ version = ace_in(ace, ACE_VERSION); if ((version == 0) || (version == 0xFFFF)) goto err_read; /* Put sysace in a sane state by clearing most control reg bits */ ace_out(ace, ACE_CTRL, ACE_CTRL_FORCECFGMODE | ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ); /* Now we can hook up the irq handler */ if (ace->irq) { rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace); if (rc) { /* Failure - fall back to polled mode */ dev_err(ace->dev, "request_irq failed\n"); ace->irq = 0; } } /* Enable interrupts */ val = ace_in(ace, ACE_CTRL); val |= ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ; ace_out(ace, ACE_CTRL, val); /* Print the identification */ dev_info(ace->dev, "Xilinx SystemACE revision %i.%i.%i\n", (version >> 12) & 0xf, (version >> 8) & 0x0f, version & 0xff); dev_dbg(ace->dev, "physaddr 0x%llx, mapped to 0x%p, irq=%i\n", (unsigned long long) ace->physaddr, ace->baseaddr, ace->irq); ace->media_change = 1; ace_revalidate_disk(ace->gd); /* Make the sysace device 'live' */ add_disk(ace->gd); return 0; err_read: put_disk(ace->gd); err_alloc_disk: blk_cleanup_queue(ace->queue); err_blk_initq: iounmap(ace->baseaddr); err_ioremap: dev_info(ace->dev, "xsysace: error initializing device at 0x%llx\n", (unsigned long long) ace->physaddr); return -ENOMEM; } static void ace_teardown(struct ace_device *ace) { if (ace->gd) { del_gendisk(ace->gd); put_disk(ace->gd); } if (ace->queue) blk_cleanup_queue(ace->queue); tasklet_kill(&ace->fsm_tasklet); if (ace->irq) free_irq(ace->irq, ace); iounmap(ace->baseaddr); } static int ace_alloc(struct device *dev, int id, resource_size_t physaddr, int irq, int bus_width) { struct ace_device *ace; int rc; dev_dbg(dev, "ace_alloc(%p)\n", dev); if (!physaddr) { rc = -ENODEV; goto err_noreg; } /* Allocate and initialize the ace device structure */ ace = kzalloc(sizeof(struct ace_device), GFP_KERNEL); if (!ace) { rc = -ENOMEM; goto err_alloc; } ace->dev = dev; ace->id = id; ace->physaddr = physaddr; ace->irq = irq; ace->bus_width = bus_width; /* Call the setup code */ rc = ace_setup(ace); if (rc) goto err_setup; dev_set_drvdata(dev, ace); return 0; err_setup: dev_set_drvdata(dev, NULL); kfree(ace); err_alloc: err_noreg: dev_err(dev, "could not initialize device, err=%i\n", rc); return rc; } static void ace_free(struct device *dev) { struct ace_device *ace = dev_get_drvdata(dev); dev_dbg(dev, "ace_free(%p)\n", dev); if (ace) { ace_teardown(ace); dev_set_drvdata(dev, NULL); kfree(ace); } } /* --------------------------------------------------------------------- * Platform Bus Support */ static int ace_probe(struct platform_device *dev) { resource_size_t physaddr = 0; int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */ u32 id = dev->id; int irq = 0; int i; dev_dbg(&dev->dev, "ace_probe(%p)\n", dev); /* device id and bus width */ if (of_property_read_u32(dev->dev.of_node, "port-number", &id)) id = 0; if (of_find_property(dev->dev.of_node, "8-bit", NULL)) bus_width = ACE_BUS_WIDTH_8; for (i = 0; i < dev->num_resources; i++) { if (dev->resource[i].flags & IORESOURCE_MEM) physaddr = dev->resource[i].start; if (dev->resource[i].flags & IORESOURCE_IRQ) irq = dev->resource[i].start; } /* Call the bus-independent setup code */ return ace_alloc(&dev->dev, id, physaddr, irq, bus_width); } /* * Platform bus remove() method */ static int ace_remove(struct platform_device *dev) { ace_free(&dev->dev); return 0; } #if defined(CONFIG_OF) /* Match table for of_platform binding */ static const struct of_device_id ace_of_match[] = { { .compatible = "xlnx,opb-sysace-1.00.b", }, { .compatible = "xlnx,opb-sysace-1.00.c", }, { .compatible = "xlnx,xps-sysace-1.00.a", }, { .compatible = "xlnx,sysace", }, {}, }; MODULE_DEVICE_TABLE(of, ace_of_match); #else /* CONFIG_OF */ #define ace_of_match NULL #endif /* CONFIG_OF */ static struct platform_driver ace_platform_driver = { .probe = ace_probe, .remove = ace_remove, .driver = { .name = "xsysace", .of_match_table = ace_of_match, }, }; /* --------------------------------------------------------------------- * Module init/exit routines */ static int __init ace_init(void) { int rc; ace_major = register_blkdev(ace_major, "xsysace"); if (ace_major <= 0) { rc = -ENOMEM; goto err_blk; } rc = platform_driver_register(&ace_platform_driver); if (rc) goto err_plat; pr_info("Xilinx SystemACE device driver, major=%i\n", ace_major); return 0; err_plat: unregister_blkdev(ace_major, "xsysace"); err_blk: printk(KERN_ERR "xsysace: registration failed; err=%i\n", rc); return rc; } module_init(ace_init); static void __exit ace_exit(void) { pr_debug("Unregistering Xilinx SystemACE driver\n"); platform_driver_unregister(&ace_platform_driver); unregister_blkdev(ace_major, "xsysace"); } module_exit(ace_exit);