aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/drivers/dma/dma-isa.c
blob: 5fb044b791c37ecf9342cdec533cf1d19462f1de (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
/*
 * arch/sh/drivers/dma/dma-isa.c
 *
 * Generic ISA DMA wrapper for SH DMA API
 *
 * Copyright (C) 2003, 2004  Paul Mundt
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/dma.h>

/*
 * This implements a small wrapper set to make code using the old ISA DMA API
 * work with the SH DMA API. Since most of the work in the new API happens
 * at ops->xfer() time, we simply use the various set_dma_xxx() routines to
 * fill in per-channel info, and then hand hand this off to ops->xfer() at
 * enable_dma() time.
 *
 * For channels that are doing on-demand data transfer via cascading, the
 * channel itself will still need to be configured through the new API. As
 * such, this code is meant for only the simplest of tasks (and shouldn't be
 * used in any new drivers at all).
 *
 * NOTE: ops->xfer() is the preferred way of doing things. However, there
 * are some users of the ISA DMA API that exist in common code that we
 * don't necessarily want to go out of our way to break, so we still
 * allow for some compatibility at that level. Any new code is strongly
 * advised to run far away from the ISA DMA API and use the SH DMA API
 * directly.
 */
unsigned long claim_dma_lock(void)
{
	unsigned long flags;

	spin_lock_irqsave(&dma_spin_lock, flags);

	return flags;
}
EXPORT_SYMBOL(claim_dma_lock);

void release_dma_lock(unsigned long flags)
{
	spin_unlock_irqrestore(&dma_spin_lock, flags);
}
EXPORT_SYMBOL(release_dma_lock);

void disable_dma(unsigned int chan)
{
	/* Nothing */
}
EXPORT_SYMBOL(disable_dma);

void enable_dma(unsigned int chan)
{
	struct dma_info *info = get_dma_info(chan);
	struct dma_channel *channel = &info->channels[chan];

	info->ops->xfer(channel);
}
EXPORT_SYMBOL(enable_dma);

void clear_dma_ff(unsigned int chan)
{
	/* Nothing */
}
EXPORT_SYMBOL(clear_dma_ff);

void set_dma_mode(unsigned int chan, char mode)
{
	struct dma_info *info = get_dma_info(chan);
	struct dma_channel *channel = &info->channels[chan];

	channel->mode = mode;
}
EXPORT_SYMBOL(set_dma_mode);

void set_dma_addr(unsigned int chan, unsigned int addr)
{
	struct dma_info *info = get_dma_info(chan);
	struct dma_channel *channel = &info->channels[chan];

	/*
	 * Single address mode is the only thing supported through
	 * this interface.
	 */
	if ((channel->mode & DMA_MODE_MASK) == DMA_MODE_READ) {
		channel->sar = addr;
	} else {
		channel->dar = addr;
	}
}
EXPORT_SYMBOL(set_dma_addr);

void set_dma_count(unsigned int chan, unsigned int count)
{
	struct dma_info *info = get_dma_info(chan);
	struct dma_channel *channel = &info->channels[chan];

	channel->count = count;
}
EXPORT_SYMBOL(set_dma_count);