aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/rsxx/cregs.c
diff options
context:
space:
mode:
authorjosh.h.morris@us.ibm.com <josh.h.morris@us.ibm.com>2013-02-05 08:15:02 -0500
committerJens Axboe <axboe@kernel.dk>2013-02-05 08:16:05 -0500
commit8722ff8cdbfac9c1b20e67bb067b455c48cb8e93 (patch)
tree5b85a6366f1337d3d56d67a76755e82e11a8b324 /drivers/block/rsxx/cregs.c
parent478c030eecbec927d62561c5f48a4515ea0fa21a (diff)
block: IBM RamSan 70/80 device driver
This patch includes the device driver for the IBM RamSan family of PCI SSD flash storage cards. This driver will include support for the RamSan 70 and 80. The driver presents a block device for device I/O. Signed-off-by: Philip J Kelleher <pjk1939@linux.vnet.ibm.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/block/rsxx/cregs.c')
-rw-r--r--drivers/block/rsxx/cregs.c743
1 files changed, 743 insertions, 0 deletions
diff --git a/drivers/block/rsxx/cregs.c b/drivers/block/rsxx/cregs.c
new file mode 100644
index 000000000000..a31fd727e804
--- /dev/null
+++ b/drivers/block/rsxx/cregs.c
@@ -0,0 +1,743 @@
1/*
2* Filename: cregs.c
3*
4*
5* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
7*
8* (C) Copyright 2013 IBM Corporation
9*
10* This program is free software; you can redistribute it and/or
11* modify it under the terms of the GNU General Public License as
12* published by the Free Software Foundation; either version 2 of the
13* License, or (at your option) any later version.
14*
15* This program is distributed in the hope that it will be useful, but
16* WITHOUT ANY WARRANTY; without even the implied warranty of
17* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18* General Public License for more details.
19*
20* You should have received a copy of the GNU General Public License
21* along with this program; if not, write to the Free Software Foundation,
22* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23*/
24
25#include <linux/completion.h>
26#include <linux/slab.h>
27
28#include "rsxx_priv.h"
29
30#define CREG_TIMEOUT_MSEC 10000
31
32typedef void (*creg_cmd_cb)(struct rsxx_cardinfo *card,
33 struct creg_cmd *cmd,
34 int st);
35
36struct creg_cmd {
37 struct list_head list;
38 creg_cmd_cb cb;
39 void *cb_private;
40 unsigned int op;
41 unsigned int addr;
42 int cnt8;
43 void *buf;
44 unsigned int stream;
45 unsigned int status;
46};
47
48static struct kmem_cache *creg_cmd_pool;
49
50
51/*------------ Private Functions --------------*/
52
53#if defined(__LITTLE_ENDIAN)
54#define LITTLE_ENDIAN 1
55#elif defined(__BIG_ENDIAN)
56#define LITTLE_ENDIAN 0
57#else
58#error Unknown endianess!!! Aborting...
59#endif
60
61static void copy_to_creg_data(struct rsxx_cardinfo *card,
62 int cnt8,
63 void *buf,
64 unsigned int stream)
65{
66 int i = 0;
67 u32 *data = buf;
68
69 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
70 /*
71 * Firmware implementation makes it necessary to byte swap on
72 * little endian processors.
73 */
74 if (LITTLE_ENDIAN && stream)
75 iowrite32be(data[i], card->regmap + CREG_DATA(i));
76 else
77 iowrite32(data[i], card->regmap + CREG_DATA(i));
78 }
79}
80
81
82static void copy_from_creg_data(struct rsxx_cardinfo *card,
83 int cnt8,
84 void *buf,
85 unsigned int stream)
86{
87 int i = 0;
88 u32 *data = buf;
89
90 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
91 /*
92 * Firmware implementation makes it necessary to byte swap on
93 * little endian processors.
94 */
95 if (LITTLE_ENDIAN && stream)
96 data[i] = ioread32be(card->regmap + CREG_DATA(i));
97 else
98 data[i] = ioread32(card->regmap + CREG_DATA(i));
99 }
100}
101
102static struct creg_cmd *pop_active_cmd(struct rsxx_cardinfo *card)
103{
104 struct creg_cmd *cmd;
105
106 /*
107 * Spin lock is needed because this can be called in atomic/interrupt
108 * context.
109 */
110 spin_lock_bh(&card->creg_ctrl.pop_lock);
111 cmd = card->creg_ctrl.active_cmd;
112 card->creg_ctrl.active_cmd = NULL;
113 spin_unlock_bh(&card->creg_ctrl.pop_lock);
114
115 return cmd;
116}
117
118static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd)
119{
120 iowrite32(cmd->addr, card->regmap + CREG_ADD);
121 iowrite32(cmd->cnt8, card->regmap + CREG_CNT);
122
123 if (cmd->op == CREG_OP_WRITE) {
124 if (cmd->buf)
125 copy_to_creg_data(card, cmd->cnt8,
126 cmd->buf, cmd->stream);
127 }
128
129 /* Data copy must complete before initiating the command. */
130 wmb();
131
132 /* Setting the valid bit will kick off the command. */
133 iowrite32(cmd->op, card->regmap + CREG_CMD);
134}
135
136static void creg_kick_queue(struct rsxx_cardinfo *card)
137{
138 if (card->creg_ctrl.active || list_empty(&card->creg_ctrl.queue))
139 return;
140
141 card->creg_ctrl.active = 1;
142 card->creg_ctrl.active_cmd = list_first_entry(&card->creg_ctrl.queue,
143 struct creg_cmd, list);
144 list_del(&card->creg_ctrl.active_cmd->list);
145 card->creg_ctrl.q_depth--;
146
147 /*
148 * We have to set the timer before we push the new command. Otherwise,
149 * we could create a race condition that would occur if the timer
150 * was not canceled, and expired after the new command was pushed,
151 * but before the command was issued to hardware.
152 */
153 mod_timer(&card->creg_ctrl.cmd_timer,
154 jiffies + msecs_to_jiffies(CREG_TIMEOUT_MSEC));
155
156 creg_issue_cmd(card, card->creg_ctrl.active_cmd);
157}
158
159static int creg_queue_cmd(struct rsxx_cardinfo *card,
160 unsigned int op,
161 unsigned int addr,
162 unsigned int cnt8,
163 void *buf,
164 int stream,
165 creg_cmd_cb callback,
166 void *cb_private)
167{
168 struct creg_cmd *cmd;
169
170 /* Don't queue stuff up if we're halted. */
171 if (unlikely(card->halt))
172 return -EINVAL;
173
174 if (card->creg_ctrl.reset)
175 return -EAGAIN;
176
177 if (cnt8 > MAX_CREG_DATA8)
178 return -EINVAL;
179
180 cmd = kmem_cache_alloc(creg_cmd_pool, GFP_KERNEL);
181 if (!cmd)
182 return -ENOMEM;
183
184 INIT_LIST_HEAD(&cmd->list);
185
186 cmd->op = op;
187 cmd->addr = addr;
188 cmd->cnt8 = cnt8;
189 cmd->buf = buf;
190 cmd->stream = stream;
191 cmd->cb = callback;
192 cmd->cb_private = cb_private;
193 cmd->status = 0;
194
195 mutex_lock(&card->creg_ctrl.lock);
196 list_add_tail(&cmd->list, &card->creg_ctrl.queue);
197 card->creg_ctrl.q_depth++;
198 creg_kick_queue(card);
199 mutex_unlock(&card->creg_ctrl.lock);
200
201 return 0;
202}
203
204static void creg_cmd_timed_out(unsigned long data)
205{
206 struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data;
207 struct creg_cmd *cmd;
208
209 cmd = pop_active_cmd(card);
210 if (cmd == NULL) {
211 card->creg_ctrl.creg_stats.creg_timeout++;
212 dev_warn(CARD_TO_DEV(card),
213 "No active command associated with timeout!\n");
214 return;
215 }
216
217 if (cmd->cb)
218 cmd->cb(card, cmd, -ETIMEDOUT);
219
220 kmem_cache_free(creg_cmd_pool, cmd);
221
222 spin_lock(&card->creg_ctrl.pop_lock);
223 card->creg_ctrl.active = 0;
224 creg_kick_queue(card);
225 spin_unlock(&card->creg_ctrl.pop_lock);
226}
227
228
229static void creg_cmd_done(struct work_struct *work)
230{
231 struct rsxx_cardinfo *card;
232 struct creg_cmd *cmd;
233 int st = 0;
234
235 card = container_of(work, struct rsxx_cardinfo,
236 creg_ctrl.done_work);
237
238 /*
239 * The timer could not be cancelled for some reason,
240 * race to pop the active command.
241 */
242 if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0)
243 card->creg_ctrl.creg_stats.failed_cancel_timer++;
244
245 cmd = pop_active_cmd(card);
246 if (cmd == NULL) {
247 dev_err(CARD_TO_DEV(card),
248 "Spurious creg interrupt!\n");
249 return;
250 }
251
252 card->creg_ctrl.creg_stats.stat = ioread32(card->regmap + CREG_STAT);
253 cmd->status = card->creg_ctrl.creg_stats.stat;
254 if ((cmd->status & CREG_STAT_STATUS_MASK) == 0) {
255 dev_err(CARD_TO_DEV(card),
256 "Invalid status on creg command\n");
257 /*
258 * At this point we're probably reading garbage from HW. Don't
259 * do anything else that could mess up the system and let
260 * the sync function return an error.
261 */
262 st = -EIO;
263 goto creg_done;
264 } else if (cmd->status & CREG_STAT_ERROR) {
265 st = -EIO;
266 }
267
268 if ((cmd->op == CREG_OP_READ)) {
269 unsigned int cnt8 = ioread32(card->regmap + CREG_CNT);
270
271 /* Paranoid Sanity Checks */
272 if (!cmd->buf) {
273 dev_err(CARD_TO_DEV(card),
274 "Buffer not given for read.\n");
275 st = -EIO;
276 goto creg_done;
277 }
278 if (cnt8 != cmd->cnt8) {
279 dev_err(CARD_TO_DEV(card),
280 "count mismatch\n");
281 st = -EIO;
282 goto creg_done;
283 }
284
285 copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
286 }
287
288creg_done:
289 if (cmd->cb)
290 cmd->cb(card, cmd, st);
291
292 kmem_cache_free(creg_cmd_pool, cmd);
293
294 mutex_lock(&card->creg_ctrl.lock);
295 card->creg_ctrl.active = 0;
296 creg_kick_queue(card);
297 mutex_unlock(&card->creg_ctrl.lock);
298}
299
300static void creg_reset(struct rsxx_cardinfo *card)
301{
302 struct creg_cmd *cmd = NULL;
303 struct creg_cmd *tmp;
304 unsigned long flags;
305
306 if (!mutex_trylock(&card->creg_ctrl.reset_lock))
307 return;
308
309 card->creg_ctrl.reset = 1;
310 spin_lock_irqsave(&card->irq_lock, flags);
311 rsxx_disable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
312 spin_unlock_irqrestore(&card->irq_lock, flags);
313
314 dev_warn(CARD_TO_DEV(card),
315 "Resetting creg interface for recovery\n");
316
317 /* Cancel outstanding commands */
318 mutex_lock(&card->creg_ctrl.lock);
319 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
320 list_del(&cmd->list);
321 card->creg_ctrl.q_depth--;
322 if (cmd->cb)
323 cmd->cb(card, cmd, -ECANCELED);
324 kmem_cache_free(creg_cmd_pool, cmd);
325 }
326
327 cmd = card->creg_ctrl.active_cmd;
328 card->creg_ctrl.active_cmd = NULL;
329 if (cmd) {
330 if (timer_pending(&card->creg_ctrl.cmd_timer))
331 del_timer_sync(&card->creg_ctrl.cmd_timer);
332
333 if (cmd->cb)
334 cmd->cb(card, cmd, -ECANCELED);
335 kmem_cache_free(creg_cmd_pool, cmd);
336
337 card->creg_ctrl.active = 0;
338 }
339 mutex_unlock(&card->creg_ctrl.lock);
340
341 card->creg_ctrl.reset = 0;
342 spin_lock_irqsave(&card->irq_lock, flags);
343 rsxx_enable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
344 spin_unlock_irqrestore(&card->irq_lock, flags);
345
346 mutex_unlock(&card->creg_ctrl.reset_lock);
347}
348
349/* Used for synchronous accesses */
350struct creg_completion {
351 struct completion *cmd_done;
352 int st;
353 u32 creg_status;
354};
355
356static void creg_cmd_done_cb(struct rsxx_cardinfo *card,
357 struct creg_cmd *cmd,
358 int st)
359{
360 struct creg_completion *cmd_completion;
361
362 cmd_completion = (struct creg_completion *)cmd->cb_private;
363 BUG_ON(!cmd_completion);
364
365 cmd_completion->st = st;
366 cmd_completion->creg_status = cmd->status;
367 complete(cmd_completion->cmd_done);
368}
369
370static int __issue_creg_rw(struct rsxx_cardinfo *card,
371 unsigned int op,
372 unsigned int addr,
373 unsigned int cnt8,
374 void *buf,
375 int stream,
376 unsigned int *hw_stat)
377{
378 DECLARE_COMPLETION_ONSTACK(cmd_done);
379 struct creg_completion completion;
380 unsigned long timeout;
381 int st;
382
383 INIT_COMPLETION(cmd_done);
384 completion.cmd_done = &cmd_done;
385 completion.st = 0;
386 completion.creg_status = 0;
387
388 st = creg_queue_cmd(card, op, addr, cnt8, buf, stream, creg_cmd_done_cb,
389 &completion);
390 if (st)
391 return st;
392
393 timeout = msecs_to_jiffies((CREG_TIMEOUT_MSEC *
394 card->creg_ctrl.q_depth) + 20000);
395
396 /*
397 * The creg interface is guaranteed to complete. It has a timeout
398 * mechanism that will kick in if hardware does not respond.
399 */
400 st = wait_for_completion_timeout(completion.cmd_done, timeout);
401 if (st == 0) {
402 /*
403 * This is really bad, because the kernel timer did not
404 * expire and notify us of a timeout!
405 */
406 dev_crit(CARD_TO_DEV(card),
407 "cregs timer failed\n");
408 creg_reset(card);
409 return -EIO;
410 }
411
412 *hw_stat = completion.creg_status;
413
414 if (completion.st) {
415 dev_warn(CARD_TO_DEV(card),
416 "creg command failed(%d x%08x)\n",
417 completion.st, addr);
418 return completion.st;
419 }
420
421 return 0;
422}
423
424static int issue_creg_rw(struct rsxx_cardinfo *card,
425 u32 addr,
426 unsigned int size8,
427 void *data,
428 int stream,
429 int read)
430{
431 unsigned int hw_stat;
432 unsigned int xfer;
433 unsigned int op;
434 int st;
435
436 op = read ? CREG_OP_READ : CREG_OP_WRITE;
437
438 do {
439 xfer = min_t(unsigned int, size8, MAX_CREG_DATA8);
440
441 st = __issue_creg_rw(card, op, addr, xfer,
442 data, stream, &hw_stat);
443 if (st)
444 return st;
445
446 data = (void *)((char *)data + xfer);
447 addr += xfer;
448 size8 -= xfer;
449 } while (size8);
450
451 return 0;
452}
453
454/* ---------------------------- Public API ---------------------------------- */
455int rsxx_creg_write(struct rsxx_cardinfo *card,
456 u32 addr,
457 unsigned int size8,
458 void *data,
459 int byte_stream)
460{
461 return issue_creg_rw(card, addr, size8, data, byte_stream, 0);
462}
463
464int rsxx_creg_read(struct rsxx_cardinfo *card,
465 u32 addr,
466 unsigned int size8,
467 void *data,
468 int byte_stream)
469{
470 return issue_creg_rw(card, addr, size8, data, byte_stream, 1);
471}
472
473int rsxx_get_card_state(struct rsxx_cardinfo *card, unsigned int *state)
474{
475 return rsxx_creg_read(card, CREG_ADD_CARD_STATE,
476 sizeof(*state), state, 0);
477}
478
479int rsxx_get_card_size8(struct rsxx_cardinfo *card, u64 *size8)
480{
481 unsigned int size;
482 int st;
483
484 st = rsxx_creg_read(card, CREG_ADD_CARD_SIZE,
485 sizeof(size), &size, 0);
486 if (st)
487 return st;
488
489 *size8 = (u64)size * RSXX_HW_BLK_SIZE;
490 return 0;
491}
492
493int rsxx_get_num_targets(struct rsxx_cardinfo *card,
494 unsigned int *n_targets)
495{
496 return rsxx_creg_read(card, CREG_ADD_NUM_TARGETS,
497 sizeof(*n_targets), n_targets, 0);
498}
499
500int rsxx_get_card_capabilities(struct rsxx_cardinfo *card,
501 u32 *capabilities)
502{
503 return rsxx_creg_read(card, CREG_ADD_CAPABILITIES,
504 sizeof(*capabilities), capabilities, 0);
505}
506
507int rsxx_issue_card_cmd(struct rsxx_cardinfo *card, u32 cmd)
508{
509 return rsxx_creg_write(card, CREG_ADD_CARD_CMD,
510 sizeof(cmd), &cmd, 0);
511}
512
513
514/*----------------- HW Log Functions -------------------*/
515static void hw_log_msg(struct rsxx_cardinfo *card, const char *str, int len)
516{
517 static char level;
518
519 /*
520 * New messages start with "<#>", where # is the log level. Messages
521 * that extend past the log buffer will use the previous level
522 */
523 if ((len > 3) && (str[0] == '<') && (str[2] == '>')) {
524 level = str[1];
525 str += 3; /* Skip past the log level. */
526 len -= 3;
527 }
528
529 switch (level) {
530 case '0':
531 dev_emerg(CARD_TO_DEV(card), "HW: %.*s", len, str);
532 break;
533 case '1':
534 dev_alert(CARD_TO_DEV(card), "HW: %.*s", len, str);
535 break;
536 case '2':
537 dev_crit(CARD_TO_DEV(card), "HW: %.*s", len, str);
538 break;
539 case '3':
540 dev_err(CARD_TO_DEV(card), "HW: %.*s", len, str);
541 break;
542 case '4':
543 dev_warn(CARD_TO_DEV(card), "HW: %.*s", len, str);
544 break;
545 case '5':
546 dev_notice(CARD_TO_DEV(card), "HW: %.*s", len, str);
547 break;
548 case '6':
549 dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
550 break;
551 case '7':
552 dev_dbg(CARD_TO_DEV(card), "HW: %.*s", len, str);
553 break;
554 default:
555 dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
556 break;
557 }
558}
559
560/*
561 * The substrncpy() function copies to string(up to count bytes) point to by src
562 * (including the terminating '\0' character) to dest. Returns the number of
563 * bytes copied to dest.
564 */
565static int substrncpy(char *dest, const char *src, int count)
566{
567 int max_cnt = count;
568
569 while (count) {
570 count--;
571 *dest = *src;
572 if (*dest == '\0')
573 break;
574 src++;
575 dest++;
576 }
577 return max_cnt - count;
578}
579
580
581static void read_hw_log_done(struct rsxx_cardinfo *card,
582 struct creg_cmd *cmd,
583 int st)
584{
585 char *buf;
586 char *log_str;
587 int cnt;
588 int len;
589 int off;
590
591 buf = cmd->buf;
592 off = 0;
593
594 /* Failed getting the log message */
595 if (st)
596 return;
597
598 while (off < cmd->cnt8) {
599 log_str = &card->log.buf[card->log.buf_len];
600 cnt = min(cmd->cnt8 - off, LOG_BUF_SIZE8 - card->log.buf_len);
601 len = substrncpy(log_str, &buf[off], cnt);
602
603 off += len;
604 card->log.buf_len += len;
605
606 /*
607 * Flush the log if we've hit the end of a message or if we've
608 * run out of buffer space.
609 */
610 if ((log_str[len - 1] == '\0') ||
611 (card->log.buf_len == LOG_BUF_SIZE8)) {
612 if (card->log.buf_len != 1) /* Don't log blank lines. */
613 hw_log_msg(card, card->log.buf,
614 card->log.buf_len);
615 card->log.buf_len = 0;
616 }
617
618 }
619
620 if (cmd->status & CREG_STAT_LOG_PENDING)
621 rsxx_read_hw_log(card);
622}
623
624int rsxx_read_hw_log(struct rsxx_cardinfo *card)
625{
626 int st;
627
628 st = creg_queue_cmd(card, CREG_OP_READ, CREG_ADD_LOG,
629 sizeof(card->log.tmp), card->log.tmp,
630 1, read_hw_log_done, NULL);
631 if (st)
632 dev_err(CARD_TO_DEV(card),
633 "Failed getting log text\n");
634
635 return st;
636}
637
638/*-------------- IOCTL REG Access ------------------*/
639static int issue_reg_cmd(struct rsxx_cardinfo *card,
640 struct rsxx_reg_access *cmd,
641 int read)
642{
643 unsigned int op = read ? CREG_OP_READ : CREG_OP_WRITE;
644
645 return __issue_creg_rw(card, op, cmd->addr, cmd->cnt, cmd->data,
646 cmd->stream, &cmd->stat);
647}
648
649int rsxx_reg_access(struct rsxx_cardinfo *card,
650 struct rsxx_reg_access __user *ucmd,
651 int read)
652{
653 struct rsxx_reg_access cmd;
654 int st;
655
656 st = copy_from_user(&cmd, ucmd, sizeof(cmd));
657 if (st)
658 return -EFAULT;
659
660 st = issue_reg_cmd(card, &cmd, read);
661 if (st)
662 return st;
663
664 st = put_user(cmd.stat, &ucmd->stat);
665 if (st)
666 return -EFAULT;
667
668 if (read) {
669 st = copy_to_user(ucmd->data, cmd.data, cmd.cnt);
670 if (st)
671 return -EFAULT;
672 }
673
674 return 0;
675}
676
677/*------------ Initialization & Setup --------------*/
678int rsxx_creg_setup(struct rsxx_cardinfo *card)
679{
680 card->creg_ctrl.active_cmd = NULL;
681
682 INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done);
683 mutex_init(&card->creg_ctrl.reset_lock);
684 INIT_LIST_HEAD(&card->creg_ctrl.queue);
685 mutex_init(&card->creg_ctrl.lock);
686 spin_lock_init(&card->creg_ctrl.pop_lock);
687 setup_timer(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out,
688 (unsigned long) card);
689
690 return 0;
691}
692
693void rsxx_creg_destroy(struct rsxx_cardinfo *card)
694{
695 struct creg_cmd *cmd;
696 struct creg_cmd *tmp;
697 int cnt = 0;
698
699 /* Cancel outstanding commands */
700 mutex_lock(&card->creg_ctrl.lock);
701 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
702 list_del(&cmd->list);
703 if (cmd->cb)
704 cmd->cb(card, cmd, -ECANCELED);
705 kmem_cache_free(creg_cmd_pool, cmd);
706 cnt++;
707 }
708
709 if (cnt)
710 dev_info(CARD_TO_DEV(card),
711 "Canceled %d queue creg commands\n", cnt);
712
713 cmd = card->creg_ctrl.active_cmd;
714 card->creg_ctrl.active_cmd = NULL;
715 if (cmd) {
716 if (timer_pending(&card->creg_ctrl.cmd_timer))
717 del_timer_sync(&card->creg_ctrl.cmd_timer);
718
719 if (cmd->cb)
720 cmd->cb(card, cmd, -ECANCELED);
721 dev_info(CARD_TO_DEV(card),
722 "Canceled active creg command\n");
723 kmem_cache_free(creg_cmd_pool, cmd);
724 }
725 mutex_unlock(&card->creg_ctrl.lock);
726
727 cancel_work_sync(&card->creg_ctrl.done_work);
728}
729
730
731int rsxx_creg_init(void)
732{
733 creg_cmd_pool = KMEM_CACHE(creg_cmd, SLAB_HWCACHE_ALIGN);
734 if (!creg_cmd_pool)
735 return -ENOMEM;
736
737 return 0;
738}
739
740void rsxx_creg_cleanup(void)
741{
742 kmem_cache_destroy(creg_cmd_pool);
743}