aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/macintosh/macio-adb.c
blob: bd6da7a9c55bf77f040de2cb4b5314d163eb3b42 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
/*
 * Driver for the ADB controller in the Mac I/O (Hydra) chip.
 */
#include <stdarg.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <asm/prom.h>
#include <linux/adb.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/hydra.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <linux/init.h>
#include <linux/ioport.h>

struct preg {
	unsigned char r;
	char pad[15];
};

struct adb_regs {
	struct preg intr;
	struct preg data[9];
	struct preg intr_enb;
	struct preg dcount;
	struct preg error;
	struct preg ctrl;
	struct preg autopoll;
	struct preg active_hi;
	struct preg active_lo;
	struct preg test;
};

/* Bits in intr and intr_enb registers */
#define DFB	1		/* data from bus */
#define TAG	2		/* transfer access grant */

/* Bits in dcount register */
#define HMB	0x0f		/* how many bytes */
#define APD	0x10		/* auto-poll data */

/* Bits in error register */
#define NRE	1		/* no response error */
#define DLE	2		/* data lost error */

/* Bits in ctrl register */
#define TAR	1		/* transfer access request */
#define DTB	2		/* data to bus */
#define CRE	4		/* command response expected */
#define ADB_RST	8		/* ADB reset */

/* Bits in autopoll register */
#define APE	1		/* autopoll enable */

static volatile struct adb_regs __iomem *adb;
static struct adb_request *current_req, *last_req;
static DEFINE_SPINLOCK(macio_lock);

static int macio_probe(void);
static int macio_init(void);
static irqreturn_t macio_adb_interrupt(int irq, void *arg);
static int macio_send_request(struct adb_request *req, int sync);
static int macio_adb_autopoll(int devs);
static void macio_adb_poll(void);
static int macio_adb_reset_bus(void);

struct adb_driver macio_adb_driver = {
	"MACIO",
	macio_probe,
	macio_init,
	macio_send_request,
	/*macio_write,*/
	macio_adb_autopoll,
	macio_adb_poll,
	macio_adb_reset_bus
};

int macio_probe(void)
{
	struct device_node *np;

	np = of_find_compatible_node(NULL, "adb", "chrp,adb0");
	if (np) {
		of_node_put(np);
		return 0;
	}
	return -ENODEV;
}

int macio_init(void)
{
	struct device_node *adbs;
	struct resource r;
	unsigned int irq;

	adbs = of_find_compatible_node(NULL, "adb", "chrp,adb0");
	if (adbs == 0)
		return -ENXIO;

	if (of_address_to_resource(adbs, 0, &r)) {
		of_node_put(adbs);
		return -ENXIO;
	}
	adb = ioremap(r.start, sizeof(struct adb_regs));

	out_8(&adb->ctrl.r, 0);
	out_8(&adb->intr.r, 0);
	out_8(&adb->error.r, 0);
	out_8(&adb->active_hi.r, 0xff); /* for now, set all devices active */
	out_8(&adb->active_lo.r, 0xff);
	out_8(&adb->autopoll.r, APE);

	irq = irq_of_parse_and_map(adbs, 0);
	of_node_put(adbs);
	if (request_irq(irq, macio_adb_interrupt, 0, "ADB", (void *)0)) {
		printk(KERN_ERR "ADB: can't get irq %d\n", irq);
		return -EAGAIN;
	}
	out_8(&adb->intr_enb.r, DFB | TAG);

	printk("adb: mac-io driver 1.0 for unified ADB\n");

	return 0;
}

static int macio_adb_autopoll(int devs)
{
	unsigned long flags;
	
	spin_lock_irqsave(&macio_lock, flags);
	out_8(&adb->active_hi.r, devs >> 8);
	out_8(&adb->active_lo.r, devs);
	out_8(&adb->autopoll.r, devs? APE: 0);
	spin_unlock_irqrestore(&macio_lock, flags);
	return 0;
}

static int macio_adb_reset_bus(void)
{
	unsigned long flags;
	int timeout = 1000000;

	/* Hrm... we may want to not lock interrupts for so
	 * long ... oh well, who uses that chip anyway ? :)
	 * That function will be seldomly used during boot
	 * on rare machines, so...
	 */
	spin_lock_irqsave(&macio_lock, flags);
	out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) | ADB_RST);
	while ((in_8(&adb->ctrl.r) & ADB_RST) != 0) {
		if (--timeout == 0) {
			out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) & ~ADB_RST);
			spin_unlock_irqrestore(&macio_lock, flags);
			return -1;
		}
	}
	spin_unlock_irqrestore(&macio_lock, flags);
	return 0;
}

/* Send an ADB command */
static int macio_send_request(struct adb_request *req, int sync)
{
	unsigned long flags;
	int i;
	
	if (req->data[0] != ADB_PACKET)
		return -EINVAL;
	
	for (i = 0; i < req->nbytes - 1; ++i)
		req->data[i] = req->data[i+1];
	--req->nbytes;
	
	req->next = NULL;
	req->sent = 0;
	req->complete = 0;
	req->reply_len = 0;

	spin_lock_irqsave(&macio_lock, flags);
	if (current_req != 0) {
		last_req->next = req;
		last_req = req;
	} else {
		current_req = last_req = req;
		out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) | TAR);
	}
	spin_unlock_irqrestore(&macio_lock, flags);
	
	if (sync) {
		while (!req->complete)
			macio_adb_poll();
	}

	return 0;
}

static irqreturn_t macio_adb_interrupt(int irq, void *arg)
{
	int i, n, err;
	struct adb_request *req = NULL;
	unsigned char ibuf[16];
	int ibuf_len = 0;
	int complete = 0;
	int autopoll = 0;
	int handled = 0;

	spin_lock(&macio_lock);
	if (in_8(&adb->intr.r) & TAG) {
		handled = 1;
		if ((req = current_req) != 0) {
			/* put the current request in */
			for (i = 0; i < req->nbytes; ++i)
				out_8(&adb->data[i].r, req->data[i]);
			out_8(&adb->dcount.r, req->nbytes & HMB);
			req->sent = 1;
			if (req->reply_expected) {
				out_8(&adb->ctrl.r, DTB + CRE);
			} else {
				out_8(&adb->ctrl.r, DTB);
				current_req = req->next;
				complete = 1;
				if (current_req)
					out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) | TAR);
			}
		}
		out_8(&adb->intr.r, 0);
	}

	if (in_8(&adb->intr.r) & DFB) {
		handled = 1;
		err = in_8(&adb->error.r);
		if (current_req && current_req->sent) {
			/* this is the response to a command */
			req = current_req;
			if (err == 0) {
				req->reply_len = in_8(&adb->dcount.r) & HMB;
				for (i = 0; i < req->reply_len; ++i)
					req->reply[i] = in_8(&adb->data[i].r);
			}
			current_req = req->next;
			complete = 1;
			if (current_req)
				out_8(&adb->ctrl.r, in_8(&adb->ctrl.r) | TAR);
		} else if (err == 0) {
			/* autopoll data */
			n = in_8(&adb->dcount.r) & HMB;
			for (i = 0; i < n; ++i)
				ibuf[i] = in_8(&adb->data[i].r);
			ibuf_len = n;
			autopoll = (in_8(&adb->dcount.r) & APD) != 0;
		}
		out_8(&adb->error.r, 0);
		out_8(&adb->intr.r, 0);
	}
	spin_unlock(&macio_lock);
	if (complete && req) {
	    void (*done)(struct adb_request *) = req->done;
	    mb();
	    req->complete = 1;
	    /* Here, we assume that if the request has a done member, the
    	     * struct request will survive to setting req->complete to 1
	     */
	    if (done)
		(*done)(req);
	}
	if (ibuf_len)
		adb_input(ibuf, ibuf_len, autopoll);

	return IRQ_RETVAL(handled);
}

static void macio_adb_poll(void)
{
	unsigned long flags;

	local_irq_save(flags);
	if (in_8(&adb->intr.r) != 0)
		macio_adb_interrupt(0, NULL);
	local_irq_restore(flags);
}
mask for address bits that can't be used with the ST-DMA */ static unsigned long atari_dma_stram_mask; #define STRAM_ADDR(a) (((a) & atari_dma_stram_mask) == 0) #endif static int setup_can_queue = -1; module_param(setup_can_queue, int, 0); static int setup_cmd_per_lun = -1; module_param(setup_cmd_per_lun, int, 0); static int setup_sg_tablesize = -1; module_param(setup_sg_tablesize, int, 0); static int setup_use_tagged_queuing = -1; module_param(setup_use_tagged_queuing, int, 0); static int setup_hostid = -1; module_param(setup_hostid, int, 0); static int setup_toshiba_delay = -1; module_param(setup_toshiba_delay, int, 0); #if defined(REAL_DMA) static int scsi_dma_is_ignored_buserr(unsigned char dma_stat) { int i; unsigned long addr = SCSI_DMA_READ_P(dma_addr), end_addr; if (dma_stat & 0x01) { /* A bus error happens when DMA-ing from the last page of a * physical memory chunk (DMA prefetch!), but that doesn't hurt. * Check for this case: */ for (i = 0; i < m68k_num_memory; ++i) { end_addr = m68k_memory[i].addr + m68k_memory[i].size; if (end_addr <= addr && addr <= end_addr + 4) return 1; } } return 0; } #if 0 /* Dead code... wasn't called anyway :-) and causes some trouble, because at * end-of-DMA, both SCSI ints are triggered simultaneously, so the NCR int has * to clear the DMA int pending bit before it allows other level 6 interrupts. */ static void scsi_dma_buserr(int irq, void *dummy) { unsigned char dma_stat = tt_scsi_dma.dma_ctrl; /* Don't do anything if a NCR interrupt is pending. Probably it's just * masked... */ if (atari_irq_pending(IRQ_TT_MFP_SCSI)) return; printk("Bad SCSI DMA interrupt! dma_addr=0x%08lx dma_stat=%02x dma_cnt=%08lx\n", SCSI_DMA_READ_P(dma_addr), dma_stat, SCSI_DMA_READ_P(dma_cnt)); if (dma_stat & 0x80) { if (!scsi_dma_is_ignored_buserr(dma_stat)) printk("SCSI DMA bus error -- bad DMA programming!\n"); } else { /* Under normal circumstances we never should get to this point, * since both interrupts are triggered simultaneously and the 5380 * int has higher priority. When this irq is handled, that DMA * interrupt is cleared. So a warning message is printed here. */ printk("SCSI DMA intr ?? -- this shouldn't happen!\n"); } } #endif #endif static irqreturn_t scsi_tt_intr(int irq, void *dev) { #ifdef REAL_DMA struct Scsi_Host *instance = dev; struct NCR5380_hostdata *hostdata = shost_priv(instance); int dma_stat; dma_stat = tt_scsi_dma.dma_ctrl; dsprintk(NDEBUG_INTR, instance, "NCR5380 interrupt, DMA status = %02x\n", dma_stat & 0xff); /* Look if it was the DMA that has interrupted: First possibility * is that a bus error occurred... */ if (dma_stat & 0x80) { if (!scsi_dma_is_ignored_buserr(dma_stat)) { printk(KERN_ERR "SCSI DMA caused bus error near 0x%08lx\n", SCSI_DMA_READ_P(dma_addr)); printk(KERN_CRIT "SCSI DMA bus error -- bad DMA programming!"); } } /* If the DMA is active but not finished, we have the case * that some other 5380 interrupt occurred within the DMA transfer. * This means we have residual bytes, if the desired end address * is not yet reached. Maybe we have to fetch some bytes from the * rest data register, too. The residual must be calculated from * the address pointer, not the counter register, because only the * addr reg counts bytes not yet written and pending in the rest * data reg! */ if ((dma_stat & 0x02) && !(dma_stat & 0x40)) { atari_dma_residual = hostdata->dma_len - (SCSI_DMA_READ_P(dma_addr) - atari_dma_startaddr); dprintk(NDEBUG_DMA, "SCSI DMA: There are %ld residual bytes.\n", atari_dma_residual); if ((signed int)atari_dma_residual < 0) atari_dma_residual = 0; if ((dma_stat & 1) == 0) { /* * After read operations, we maybe have to * transport some rest bytes */ atari_scsi_fetch_restbytes(); } else { /* * There seems to be a nasty bug in some SCSI-DMA/NCR * combinations: If a target disconnects while a write * operation is going on, the address register of the * DMA may be a few bytes farer than it actually read. * This is probably due to DMA prefetching and a delay * between DMA and NCR. Experiments showed that the * dma_addr is 9 bytes to high, but this could vary. * The problem is, that the residual is thus calculated * wrong and the next transfer will start behind where * it should. So we round up the residual to the next * multiple of a sector size, if it isn't already a * multiple and the originally expected transfer size * was. The latter condition is there to ensure that * the correction is taken only for "real" data * transfers and not for, e.g., the parameters of some * other command. These shouldn't disconnect anyway. */ if (atari_dma_residual & 0x1ff) { dprintk(NDEBUG_DMA, "SCSI DMA: DMA bug corrected, " "difference %ld bytes\n", 512 - (atari_dma_residual & 0x1ff)); atari_dma_residual = (atari_dma_residual + 511) & ~0x1ff; } } tt_scsi_dma.dma_ctrl = 0; } /* If the DMA is finished, fetch the rest bytes and turn it off */ if (dma_stat & 0x40) { atari_dma_residual = 0; if ((dma_stat & 1) == 0) atari_scsi_fetch_restbytes(); tt_scsi_dma.dma_ctrl = 0; } #endif /* REAL_DMA */ NCR5380_intr(irq, dev); return IRQ_HANDLED; } static irqreturn_t scsi_falcon_intr(int irq, void *dev) { #ifdef REAL_DMA struct Scsi_Host *instance = dev; struct NCR5380_hostdata *hostdata = shost_priv(instance); int dma_stat; /* Turn off DMA and select sector counter register before * accessing the status register (Atari recommendation!) */ st_dma.dma_mode_status = 0x90; dma_stat = st_dma.dma_mode_status; /* Bit 0 indicates some error in the DMA process... don't know * what happened exactly (no further docu). */ if (!(dma_stat & 0x01)) { /* DMA error */ printk(KERN_CRIT "SCSI DMA error near 0x%08lx!\n", SCSI_DMA_GETADR()); } /* If the DMA was active, but now bit 1 is not clear, it is some * other 5380 interrupt that finishes the DMA transfer. We have to * calculate the number of residual bytes and give a warning if * bytes are stuck in the ST-DMA fifo (there's no way to reach them!) */ if (atari_dma_active && (dma_stat & 0x02)) { unsigned long transferred; transferred = SCSI_DMA_GETADR() - atari_dma_startaddr; /* The ST-DMA address is incremented in 2-byte steps, but the * data are written only in 16-byte chunks. If the number of * transferred bytes is not divisible by 16, the remainder is * lost somewhere in outer space. */ if (transferred & 15) printk(KERN_ERR "SCSI DMA error: %ld bytes lost in " "ST-DMA fifo\n", transferred & 15); atari_dma_residual = hostdata->dma_len - transferred; dprintk(NDEBUG_DMA, "SCSI DMA: There are %ld residual bytes.\n", atari_dma_residual); } else atari_dma_residual = 0; atari_dma_active = 0; if (atari_dma_orig_addr) { /* If the dribble buffer was used on a read operation, copy the DMA-ed * data to the original destination address. */ memcpy(atari_dma_orig_addr, phys_to_virt(atari_dma_startaddr), hostdata->dma_len - atari_dma_residual); atari_dma_orig_addr = NULL; } #endif /* REAL_DMA */ NCR5380_intr(irq, dev); return IRQ_HANDLED; } #ifdef REAL_DMA static void atari_scsi_fetch_restbytes(void) { int nr; char *src, *dst; unsigned long phys_dst; /* fetch rest bytes in the DMA register */ phys_dst = SCSI_DMA_READ_P(dma_addr); nr = phys_dst & 3; if (nr) { /* there are 'nr' bytes left for the last long address before the DMA pointer */ phys_dst ^= nr; dprintk(NDEBUG_DMA, "SCSI DMA: there are %d rest bytes for phys addr 0x%08lx", nr, phys_dst); /* The content of the DMA pointer is a physical address! */ dst = phys_to_virt(phys_dst); dprintk(NDEBUG_DMA, " = virt addr %p\n", dst); for (src = (char *)&tt_scsi_dma.dma_restdata; nr != 0; --nr) *dst++ = *src++; } } #endif /* REAL_DMA */ /* This function releases the lock on the DMA chip if there is no * connected command and the disconnected queue is empty. */ static void falcon_release_lock(void) { if (IS_A_TT()) return; if (stdma_is_locked_by(scsi_falcon_intr)) stdma_release(); } /* This function manages the locking of the ST-DMA. * If the DMA isn't locked already for SCSI, it tries to lock it by * calling stdma_lock(). But if the DMA is locked by the SCSI code and * there are other drivers waiting for the chip, we do not issue the * command immediately but tell the SCSI mid-layer to defer. */ static int falcon_get_lock(struct Scsi_Host *instance) { if (IS_A_TT()) return 1; if (in_interrupt()) return stdma_try_lock(scsi_falcon_intr, instance); stdma_lock(scsi_falcon_intr, instance); return 1; } #ifndef MODULE static int __init atari_scsi_setup(char *str) { /* Format of atascsi parameter is: * atascsi=<can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags> * Defaults depend on TT or Falcon, determined at run time. * Negative values mean don't change. */ int ints[8]; get_options(str, ARRAY_SIZE(ints), ints); if (ints[0] < 1) { printk("atari_scsi_setup: no arguments!\n"); return 0; } if (ints[0] >= 1) setup_can_queue = ints[1]; if (ints[0] >= 2) setup_cmd_per_lun = ints[2]; if (ints[0] >= 3) setup_sg_tablesize = ints[3]; if (ints[0] >= 4) setup_hostid = ints[4]; if (ints[0] >= 5) setup_use_tagged_queuing = ints[5]; /* ints[6] (use_pdma) is ignored */ if (ints[0] >= 7) setup_toshiba_delay = ints[7]; return 1; } __setup("atascsi=", atari_scsi_setup); #endif /* !MODULE */ #if defined(REAL_DMA) static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance, void *data, unsigned long count, int dir) { unsigned long addr = virt_to_phys(data); dprintk(NDEBUG_DMA, "scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, " "dir = %d\n", instance->host_no, data, addr, count, dir); if (!IS_A_TT() && !STRAM_ADDR(addr)) { /* If we have a non-DMAable address on a Falcon, use the dribble * buffer; 'orig_addr' != 0 in the read case tells the interrupt * handler to copy data from the dribble buffer to the originally * wanted address. */ if (dir) memcpy(atari_dma_buffer, data, count); else atari_dma_orig_addr = data; addr = atari_dma_phys_buffer; } atari_dma_startaddr = addr; /* Needed for calculating residual later. */ /* Cache cleanup stuff: On writes, push any dirty cache out before sending * it to the peripheral. (Must be done before DMA setup, since at least * the ST-DMA begins to fill internal buffers right after setup. For * reads, invalidate any cache, may be altered after DMA without CPU * knowledge. * * ++roman: For the Medusa, there's no need at all for that cache stuff, * because the hardware does bus snooping (fine!). */ dma_cache_maintenance(addr, count, dir); if (count == 0) printk(KERN_NOTICE "SCSI warning: DMA programmed for 0 bytes !\n"); if (IS_A_TT()) { tt_scsi_dma.dma_ctrl = dir; SCSI_DMA_WRITE_P(dma_addr, addr); SCSI_DMA_WRITE_P(dma_cnt, count); tt_scsi_dma.dma_ctrl = dir | 2; } else { /* ! IS_A_TT */ /* set address */ SCSI_DMA_SETADR(addr); /* toggle direction bit to clear FIFO and set DMA direction */ dir <<= 8; st_dma.dma_mode_status = 0x90 | dir; st_dma.dma_mode_status = 0x90 | (dir ^ 0x100); st_dma.dma_mode_status = 0x90 | dir; udelay(40); /* On writes, round up the transfer length to the next multiple of 512 * (see also comment at atari_dma_xfer_len()). */ st_dma.fdc_acces_seccount = (count + (dir ? 511 : 0)) >> 9; udelay(40); st_dma.dma_mode_status = 0x10 | dir; udelay(40); /* need not restore value of dir, only boolean value is tested */ atari_dma_active = 1; } return count; } static long atari_scsi_dma_residual(struct Scsi_Host *instance) { return atari_dma_residual; } #define CMD_SURELY_BLOCK_MODE 0 #define CMD_SURELY_BYTE_MODE 1 #define CMD_MODE_UNKNOWN 2 static int falcon_classify_cmd(struct scsi_cmnd *cmd) { unsigned char opcode = cmd->cmnd[0]; if (opcode == READ_DEFECT_DATA || opcode == READ_LONG || opcode == READ_BUFFER) return CMD_SURELY_BYTE_MODE; else if (opcode == READ_6 || opcode == READ_10 || opcode == 0xa8 /* READ_12 */ || opcode == READ_REVERSE || opcode == RECOVER_BUFFERED_DATA) { /* In case of a sequential-access target (tape), special care is * needed here: The transfer is block-mode only if the 'fixed' bit is * set! */ if (cmd->device->type == TYPE_TAPE && !(cmd->cmnd[1] & 1)) return CMD_SURELY_BYTE_MODE; else return CMD_SURELY_BLOCK_MODE; } else return CMD_MODE_UNKNOWN; } /* This function calculates the number of bytes that can be transferred via * DMA. On the TT, this is arbitrary, but on the Falcon we have to use the * ST-DMA chip. There are only multiples of 512 bytes possible and max. * 255*512 bytes :-( This means also, that defining READ_OVERRUNS is not * possible on the Falcon, since that would require to program the DMA for * n*512 - atari_read_overrun bytes. But it seems that the Falcon doesn't have * the overrun problem, so this question is academic :-) */ static unsigned long atari_dma_xfer_len(unsigned long wanted_len, struct scsi_cmnd *cmd, int write_flag) { unsigned long possible_len, limit; if (IS_A_TT()) /* TT SCSI DMA can transfer arbitrary #bytes */ return wanted_len; /* ST DMA chip is stupid -- only multiples of 512 bytes! (and max. * 255*512 bytes, but this should be enough) * * ++roman: Aaargl! Another Falcon-SCSI problem... There are some commands * that return a number of bytes which cannot be known beforehand. In this * case, the given transfer length is an "allocation length". Now it * can happen that this allocation length is a multiple of 512 bytes and * the DMA is used. But if not n*512 bytes really arrive, some input data * will be lost in the ST-DMA's FIFO :-( Thus, we have to distinguish * between commands that do block transfers and those that do byte * transfers. But this isn't easy... there are lots of vendor specific * commands, and the user can issue any command via the * SCSI_IOCTL_SEND_COMMAND. * * The solution: We classify SCSI commands in 1) surely block-mode cmd.s, * 2) surely byte-mode cmd.s and 3) cmd.s with unknown mode. In case 1) * and 3), the thing to do is obvious: allow any number of blocks via DMA * or none. In case 2), we apply some heuristic: Byte mode is assumed if * the transfer (allocation) length is < 1024, hoping that no cmd. not * explicitly known as byte mode have such big allocation lengths... * BTW, all the discussion above applies only to reads. DMA writes are * unproblematic anyways, since the targets aborts the transfer after * receiving a sufficient number of bytes. * * Another point: If the transfer is from/to an non-ST-RAM address, we * use the dribble buffer and thus can do only STRAM_BUFFER_SIZE bytes. */ if (write_flag) { /* Write operation can always use the DMA, but the transfer size must * be rounded up to the next multiple of 512 (atari_dma_setup() does * this). */ possible_len = wanted_len; } else { /* Read operations: if the wanted transfer length is not a multiple of * 512, we cannot use DMA, since the ST-DMA cannot split transfers * (no interrupt on DMA finished!) */ if (wanted_len & 0x1ff) possible_len = 0; else { /* Now classify the command (see above) and decide whether it is * allowed to do DMA at all */ switch (falcon_classify_cmd(cmd)) { case CMD_SURELY_BLOCK_MODE: possible_len = wanted_len; break; case CMD_SURELY_BYTE_MODE: possible_len = 0; /* DMA prohibited */ break; case CMD_MODE_UNKNOWN: default: /* For unknown commands assume block transfers if the transfer * size/allocation length is >= 1024 */ possible_len = (wanted_len < 1024) ? 0 : wanted_len; break; } } } /* Last step: apply the hard limit on DMA transfers */ limit = (atari_dma_buffer && !STRAM_ADDR(virt_to_phys(cmd->SCp.ptr))) ? STRAM_BUFFER_SIZE : 255*512; if (possible_len > limit) possible_len = limit; if (possible_len != wanted_len) dprintk(NDEBUG_DMA, "Sorry, must cut DMA transfer size to %ld bytes " "instead of %ld\n", possible_len, wanted_len); return possible_len;