aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/block
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/DAC960.c7099
-rw-r--r--drivers/block/DAC960.h4114
-rw-r--r--drivers/block/Kconfig509
-rw-r--r--drivers/block/Kconfig.iosched41
-rw-r--r--drivers/block/Makefile47
-rw-r--r--drivers/block/acsi.c1829
-rw-r--r--drivers/block/acsi_slm.c1045
-rw-r--r--drivers/block/amiflop.c1850
-rw-r--r--drivers/block/aoe/Makefile6
-rw-r--r--drivers/block/aoe/aoe.h165
-rw-r--r--drivers/block/aoe/aoeblk.c267
-rw-r--r--drivers/block/aoe/aoechr.c244
-rw-r--r--drivers/block/aoe/aoecmd.c629
-rw-r--r--drivers/block/aoe/aoedev.c180
-rw-r--r--drivers/block/aoe/aoemain.c112
-rw-r--r--drivers/block/aoe/aoenet.c172
-rw-r--r--drivers/block/as-iosched.c2136
-rw-r--r--drivers/block/ataflop.c2006
-rw-r--r--drivers/block/cciss.c2976
-rw-r--r--drivers/block/cciss.h266
-rw-r--r--drivers/block/cciss_cmd.h271
-rw-r--r--drivers/block/cciss_scsi.c1417
-rw-r--r--drivers/block/cciss_scsi.h79
-rw-r--r--drivers/block/cfq-iosched.c1856
-rw-r--r--drivers/block/cpqarray.c1850
-rw-r--r--drivers/block/cpqarray.h126
-rw-r--r--drivers/block/cryptoloop.c268
-rw-r--r--drivers/block/deadline-iosched.c967
-rw-r--r--drivers/block/elevator.c705
-rw-r--r--drivers/block/floppy.c4638
-rw-r--r--drivers/block/genhd.c685
-rw-r--r--drivers/block/ida_cmd.h349
-rw-r--r--drivers/block/ida_ioctl.h87
-rw-r--r--drivers/block/ioctl.c239
-rw-r--r--drivers/block/ll_rw_blk.c3642
-rw-r--r--drivers/block/loop.c1348
-rw-r--r--drivers/block/nbd.c731
-rw-r--r--drivers/block/noop-iosched.c104
-rw-r--r--drivers/block/paride/Kconfig305
-rw-r--r--drivers/block/paride/Makefile28
-rw-r--r--drivers/block/paride/Transition-notes128
-rw-r--r--drivers/block/paride/aten.c162
-rw-r--r--drivers/block/paride/bpck.c477
-rw-r--r--drivers/block/paride/bpck6.c282
-rw-r--r--drivers/block/paride/comm.c218
-rw-r--r--drivers/block/paride/dstr.c233
-rw-r--r--drivers/block/paride/epat.c340
-rw-r--r--drivers/block/paride/epia.c316
-rw-r--r--drivers/block/paride/fit2.c151
-rw-r--r--drivers/block/paride/fit3.c211
-rw-r--r--drivers/block/paride/friq.c276
-rw-r--r--drivers/block/paride/frpw.c313
-rw-r--r--drivers/block/paride/jumbo70
-rw-r--r--drivers/block/paride/kbic.c297
-rw-r--r--drivers/block/paride/ktti.c128
-rw-r--r--drivers/block/paride/mkd30
-rw-r--r--drivers/block/paride/on20.c153
-rw-r--r--drivers/block/paride/on26.c319
-rw-r--r--drivers/block/paride/paride.c467
-rw-r--r--drivers/block/paride/paride.h170
-rw-r--r--drivers/block/paride/pcd.c971
-rw-r--r--drivers/block/paride/pd.c950
-rw-r--r--drivers/block/paride/pf.c982
-rw-r--r--drivers/block/paride/pg.c723
-rw-r--r--drivers/block/paride/ppc6lnx.c726
-rw-r--r--drivers/block/paride/pseudo.h102
-rw-r--r--drivers/block/paride/pt.c1024
-rw-r--r--drivers/block/pktcdvd.c2681
-rw-r--r--drivers/block/ps2esdi.c1092
-rw-r--r--drivers/block/rd.c515
-rw-r--r--drivers/block/scsi_ioctl.c580
-rw-r--r--drivers/block/smart1,2.h278
-rw-r--r--drivers/block/swim3.c1154
-rw-r--r--drivers/block/swim_iop.c579
-rw-r--r--drivers/block/sx8.c1764
-rw-r--r--drivers/block/ub.c2215
-rw-r--r--drivers/block/umem.c1256
-rw-r--r--drivers/block/viodasd.c846
-rw-r--r--drivers/block/xd.c1112
-rw-r--r--drivers/block/xd.h135
-rw-r--r--drivers/block/z2ram.c429
81 files changed, 70243 insertions, 0 deletions
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
new file mode 100644
index 000000000000..423bbf2000d2
--- /dev/null
+++ b/drivers/block/DAC960.c
@@ -0,0 +1,7099 @@
1/*
2
3 Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
4
5 Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
6
7 This program is free software; you may redistribute and/or modify it under
8 the terms of the GNU General Public License Version 2 as published by the
9 Free Software Foundation.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY, without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for complete details.
15
16*/
17
18
19#define DAC960_DriverVersion "2.5.47"
20#define DAC960_DriverDate "14 November 2002"
21
22
23#include <linux/module.h>
24#include <linux/types.h>
25#include <linux/miscdevice.h>
26#include <linux/blkdev.h>
27#include <linux/bio.h>
28#include <linux/completion.h>
29#include <linux/delay.h>
30#include <linux/genhd.h>
31#include <linux/hdreg.h>
32#include <linux/blkpg.h>
33#include <linux/interrupt.h>
34#include <linux/ioport.h>
35#include <linux/mm.h>
36#include <linux/slab.h>
37#include <linux/proc_fs.h>
38#include <linux/reboot.h>
39#include <linux/spinlock.h>
40#include <linux/timer.h>
41#include <linux/pci.h>
42#include <linux/init.h>
43#include <asm/io.h>
44#include <asm/uaccess.h>
45#include "DAC960.h"
46
47#define DAC960_GAM_MINOR 252
48
49
50static DAC960_Controller_T *DAC960_Controllers[DAC960_MaxControllers];
51static int DAC960_ControllerCount;
52static struct proc_dir_entry *DAC960_ProcDirectoryEntry;
53
54static long disk_size(DAC960_Controller_T *p, int drive_nr)
55{
56 if (p->FirmwareType == DAC960_V1_Controller) {
57 if (drive_nr >= p->LogicalDriveCount)
58 return 0;
59 return p->V1.LogicalDriveInformation[drive_nr].
60 LogicalDriveSize;
61 } else {
62 DAC960_V2_LogicalDeviceInfo_T *i =
63 p->V2.LogicalDeviceInformation[drive_nr];
64 if (i == NULL)
65 return 0;
66 return i->ConfigurableDeviceSize;
67 }
68}
69
70static int DAC960_open(struct inode *inode, struct file *file)
71{
72 struct gendisk *disk = inode->i_bdev->bd_disk;
73 DAC960_Controller_T *p = disk->queue->queuedata;
74 int drive_nr = (long)disk->private_data;
75
76 if (p->FirmwareType == DAC960_V1_Controller) {
77 if (p->V1.LogicalDriveInformation[drive_nr].
78 LogicalDriveState == DAC960_V1_LogicalDrive_Offline)
79 return -ENXIO;
80 } else {
81 DAC960_V2_LogicalDeviceInfo_T *i =
82 p->V2.LogicalDeviceInformation[drive_nr];
83 if (!i || i->LogicalDeviceState == DAC960_V2_LogicalDevice_Offline)
84 return -ENXIO;
85 }
86
87 check_disk_change(inode->i_bdev);
88
89 if (!get_capacity(p->disks[drive_nr]))
90 return -ENXIO;
91 return 0;
92}
93
94static int DAC960_ioctl(struct inode *inode, struct file *file,
95 unsigned int cmd, unsigned long arg)
96{
97 struct gendisk *disk = inode->i_bdev->bd_disk;
98 DAC960_Controller_T *p = disk->queue->queuedata;
99 int drive_nr = (long)disk->private_data;
100 struct hd_geometry g;
101 struct hd_geometry __user *loc = (struct hd_geometry __user *)arg;
102
103 if (cmd != HDIO_GETGEO || !loc)
104 return -EINVAL;
105
106 if (p->FirmwareType == DAC960_V1_Controller) {
107 g.heads = p->V1.GeometryTranslationHeads;
108 g.sectors = p->V1.GeometryTranslationSectors;
109 g.cylinders = p->V1.LogicalDriveInformation[drive_nr].
110 LogicalDriveSize / (g.heads * g.sectors);
111 } else {
112 DAC960_V2_LogicalDeviceInfo_T *i =
113 p->V2.LogicalDeviceInformation[drive_nr];
114 switch (i->DriveGeometry) {
115 case DAC960_V2_Geometry_128_32:
116 g.heads = 128;
117 g.sectors = 32;
118 break;
119 case DAC960_V2_Geometry_255_63:
120 g.heads = 255;
121 g.sectors = 63;
122 break;
123 default:
124 DAC960_Error("Illegal Logical Device Geometry %d\n",
125 p, i->DriveGeometry);
126 return -EINVAL;
127 }
128
129 g.cylinders = i->ConfigurableDeviceSize / (g.heads * g.sectors);
130 }
131
132 g.start = get_start_sect(inode->i_bdev);
133
134 return copy_to_user(loc, &g, sizeof g) ? -EFAULT : 0;
135}
136
137static int DAC960_media_changed(struct gendisk *disk)
138{
139 DAC960_Controller_T *p = disk->queue->queuedata;
140 int drive_nr = (long)disk->private_data;
141
142 if (!p->LogicalDriveInitiallyAccessible[drive_nr])
143 return 1;
144 return 0;
145}
146
147static int DAC960_revalidate_disk(struct gendisk *disk)
148{
149 DAC960_Controller_T *p = disk->queue->queuedata;
150 int unit = (long)disk->private_data;
151
152 set_capacity(disk, disk_size(p, unit));
153 return 0;
154}
155
156static struct block_device_operations DAC960_BlockDeviceOperations = {
157 .owner = THIS_MODULE,
158 .open = DAC960_open,
159 .ioctl = DAC960_ioctl,
160 .media_changed = DAC960_media_changed,
161 .revalidate_disk = DAC960_revalidate_disk,
162};
163
164
165/*
166 DAC960_AnnounceDriver announces the Driver Version and Date, Author's Name,
167 Copyright Notice, and Electronic Mail Address.
168*/
169
170static void DAC960_AnnounceDriver(DAC960_Controller_T *Controller)
171{
172 DAC960_Announce("***** DAC960 RAID Driver Version "
173 DAC960_DriverVersion " of "
174 DAC960_DriverDate " *****\n", Controller);
175 DAC960_Announce("Copyright 1998-2001 by Leonard N. Zubkoff "
176 "<lnz@dandelion.com>\n", Controller);
177}
178
179
180/*
181 DAC960_Failure prints a standardized error message, and then returns false.
182*/
183
184static boolean DAC960_Failure(DAC960_Controller_T *Controller,
185 unsigned char *ErrorMessage)
186{
187 DAC960_Error("While configuring DAC960 PCI RAID Controller at\n",
188 Controller);
189 if (Controller->IO_Address == 0)
190 DAC960_Error("PCI Bus %d Device %d Function %d I/O Address N/A "
191 "PCI Address 0x%X\n", Controller,
192 Controller->Bus, Controller->Device,
193 Controller->Function, Controller->PCI_Address);
194 else DAC960_Error("PCI Bus %d Device %d Function %d I/O Address "
195 "0x%X PCI Address 0x%X\n", Controller,
196 Controller->Bus, Controller->Device,
197 Controller->Function, Controller->IO_Address,
198 Controller->PCI_Address);
199 DAC960_Error("%s FAILED - DETACHING\n", Controller, ErrorMessage);
200 return false;
201}
202
203/*
204 init_dma_loaf() and slice_dma_loaf() are helper functions for
205 aggregating the dma-mapped memory for a well-known collection of
206 data structures that are of different lengths.
207
208 These routines don't guarantee any alignment. The caller must
209 include any space needed for alignment in the sizes of the structures
210 that are passed in.
211 */
212
213static boolean init_dma_loaf(struct pci_dev *dev, struct dma_loaf *loaf,
214 size_t len)
215{
216 void *cpu_addr;
217 dma_addr_t dma_handle;
218
219 cpu_addr = pci_alloc_consistent(dev, len, &dma_handle);
220 if (cpu_addr == NULL)
221 return false;
222
223 loaf->cpu_free = loaf->cpu_base = cpu_addr;
224 loaf->dma_free =loaf->dma_base = dma_handle;
225 loaf->length = len;
226 memset(cpu_addr, 0, len);
227 return true;
228}
229
230static void *slice_dma_loaf(struct dma_loaf *loaf, size_t len,
231 dma_addr_t *dma_handle)
232{
233 void *cpu_end = loaf->cpu_free + len;
234 void *cpu_addr = loaf->cpu_free;
235
236 if (cpu_end > loaf->cpu_base + loaf->length)
237 BUG();
238 *dma_handle = loaf->dma_free;
239 loaf->cpu_free = cpu_end;
240 loaf->dma_free += len;
241 return cpu_addr;
242}
243
244static void free_dma_loaf(struct pci_dev *dev, struct dma_loaf *loaf_handle)
245{
246 if (loaf_handle->cpu_base != NULL)
247 pci_free_consistent(dev, loaf_handle->length,
248 loaf_handle->cpu_base, loaf_handle->dma_base);
249}
250
251
252/*
253 DAC960_CreateAuxiliaryStructures allocates and initializes the auxiliary
254 data structures for Controller. It returns true on success and false on
255 failure.
256*/
257
258static boolean DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *Controller)
259{
260 int CommandAllocationLength, CommandAllocationGroupSize;
261 int CommandsRemaining = 0, CommandIdentifier, CommandGroupByteCount;
262 void *AllocationPointer = NULL;
263 void *ScatterGatherCPU = NULL;
264 dma_addr_t ScatterGatherDMA;
265 struct pci_pool *ScatterGatherPool;
266 void *RequestSenseCPU = NULL;
267 dma_addr_t RequestSenseDMA;
268 struct pci_pool *RequestSensePool = NULL;
269
270 if (Controller->FirmwareType == DAC960_V1_Controller)
271 {
272 CommandAllocationLength = offsetof(DAC960_Command_T, V1.EndMarker);
273 CommandAllocationGroupSize = DAC960_V1_CommandAllocationGroupSize;
274 ScatterGatherPool = pci_pool_create("DAC960_V1_ScatterGather",
275 Controller->PCIDevice,
276 DAC960_V1_ScatterGatherLimit * sizeof(DAC960_V1_ScatterGatherSegment_T),
277 sizeof(DAC960_V1_ScatterGatherSegment_T), 0);
278 if (ScatterGatherPool == NULL)
279 return DAC960_Failure(Controller,
280 "AUXILIARY STRUCTURE CREATION (SG)");
281 Controller->ScatterGatherPool = ScatterGatherPool;
282 }
283 else
284 {
285 CommandAllocationLength = offsetof(DAC960_Command_T, V2.EndMarker);
286 CommandAllocationGroupSize = DAC960_V2_CommandAllocationGroupSize;
287 ScatterGatherPool = pci_pool_create("DAC960_V2_ScatterGather",
288 Controller->PCIDevice,
289 DAC960_V2_ScatterGatherLimit * sizeof(DAC960_V2_ScatterGatherSegment_T),
290 sizeof(DAC960_V2_ScatterGatherSegment_T), 0);
291 if (ScatterGatherPool == NULL)
292 return DAC960_Failure(Controller,
293 "AUXILIARY STRUCTURE CREATION (SG)");
294 RequestSensePool = pci_pool_create("DAC960_V2_RequestSense",
295 Controller->PCIDevice, sizeof(DAC960_SCSI_RequestSense_T),
296 sizeof(int), 0);
297 if (RequestSensePool == NULL) {
298 pci_pool_destroy(ScatterGatherPool);
299 return DAC960_Failure(Controller,
300 "AUXILIARY STRUCTURE CREATION (SG)");
301 }
302 Controller->ScatterGatherPool = ScatterGatherPool;
303 Controller->V2.RequestSensePool = RequestSensePool;
304 }
305 Controller->CommandAllocationGroupSize = CommandAllocationGroupSize;
306 Controller->FreeCommands = NULL;
307 for (CommandIdentifier = 1;
308 CommandIdentifier <= Controller->DriverQueueDepth;
309 CommandIdentifier++)
310 {
311 DAC960_Command_T *Command;
312 if (--CommandsRemaining <= 0)
313 {
314 CommandsRemaining =
315 Controller->DriverQueueDepth - CommandIdentifier + 1;
316 if (CommandsRemaining > CommandAllocationGroupSize)
317 CommandsRemaining = CommandAllocationGroupSize;
318 CommandGroupByteCount =
319 CommandsRemaining * CommandAllocationLength;
320 AllocationPointer = kmalloc(CommandGroupByteCount, GFP_ATOMIC);
321 if (AllocationPointer == NULL)
322 return DAC960_Failure(Controller,
323 "AUXILIARY STRUCTURE CREATION");
324 memset(AllocationPointer, 0, CommandGroupByteCount);
325 }
326 Command = (DAC960_Command_T *) AllocationPointer;
327 AllocationPointer += CommandAllocationLength;
328 Command->CommandIdentifier = CommandIdentifier;
329 Command->Controller = Controller;
330 Command->Next = Controller->FreeCommands;
331 Controller->FreeCommands = Command;
332 Controller->Commands[CommandIdentifier-1] = Command;
333 ScatterGatherCPU = pci_pool_alloc(ScatterGatherPool, SLAB_ATOMIC,
334 &ScatterGatherDMA);
335 if (ScatterGatherCPU == NULL)
336 return DAC960_Failure(Controller, "AUXILIARY STRUCTURE CREATION");
337
338 if (RequestSensePool != NULL) {
339 RequestSenseCPU = pci_pool_alloc(RequestSensePool, SLAB_ATOMIC,
340 &RequestSenseDMA);
341 if (RequestSenseCPU == NULL) {
342 pci_pool_free(ScatterGatherPool, ScatterGatherCPU,
343 ScatterGatherDMA);
344 return DAC960_Failure(Controller,
345 "AUXILIARY STRUCTURE CREATION");
346 }
347 }
348 if (Controller->FirmwareType == DAC960_V1_Controller) {
349 Command->cmd_sglist = Command->V1.ScatterList;
350 Command->V1.ScatterGatherList =
351 (DAC960_V1_ScatterGatherSegment_T *)ScatterGatherCPU;
352 Command->V1.ScatterGatherListDMA = ScatterGatherDMA;
353 } else {
354 Command->cmd_sglist = Command->V2.ScatterList;
355 Command->V2.ScatterGatherList =
356 (DAC960_V2_ScatterGatherSegment_T *)ScatterGatherCPU;
357 Command->V2.ScatterGatherListDMA = ScatterGatherDMA;
358 Command->V2.RequestSense =
359 (DAC960_SCSI_RequestSense_T *)RequestSenseCPU;
360 Command->V2.RequestSenseDMA = RequestSenseDMA;
361 }
362 }
363 return true;
364}
365
366
367/*
368 DAC960_DestroyAuxiliaryStructures deallocates the auxiliary data
369 structures for Controller.
370*/
371
372static void DAC960_DestroyAuxiliaryStructures(DAC960_Controller_T *Controller)
373{
374 int i;
375 struct pci_pool *ScatterGatherPool = Controller->ScatterGatherPool;
376 struct pci_pool *RequestSensePool = NULL;
377 void *ScatterGatherCPU;
378 dma_addr_t ScatterGatherDMA;
379 void *RequestSenseCPU;
380 dma_addr_t RequestSenseDMA;
381 DAC960_Command_T *CommandGroup = NULL;
382
383
384 if (Controller->FirmwareType == DAC960_V2_Controller)
385 RequestSensePool = Controller->V2.RequestSensePool;
386
387 Controller->FreeCommands = NULL;
388 for (i = 0; i < Controller->DriverQueueDepth; i++)
389 {
390 DAC960_Command_T *Command = Controller->Commands[i];
391
392 if (Command == NULL)
393 continue;
394
395 if (Controller->FirmwareType == DAC960_V1_Controller) {
396 ScatterGatherCPU = (void *)Command->V1.ScatterGatherList;
397 ScatterGatherDMA = Command->V1.ScatterGatherListDMA;
398 RequestSenseCPU = NULL;
399 RequestSenseDMA = (dma_addr_t)0;
400 } else {
401 ScatterGatherCPU = (void *)Command->V2.ScatterGatherList;
402 ScatterGatherDMA = Command->V2.ScatterGatherListDMA;
403 RequestSenseCPU = (void *)Command->V2.RequestSense;
404 RequestSenseDMA = Command->V2.RequestSenseDMA;
405 }
406 if (ScatterGatherCPU != NULL)
407 pci_pool_free(ScatterGatherPool, ScatterGatherCPU, ScatterGatherDMA);
408 if (RequestSenseCPU != NULL)
409 pci_pool_free(RequestSensePool, RequestSenseCPU, RequestSenseDMA);
410
411 if ((Command->CommandIdentifier
412 % Controller->CommandAllocationGroupSize) == 1) {
413 /*
414 * We can't free the group of commands until all of the
415 * request sense and scatter gather dma structures are free.
416 * Remember the beginning of the group, but don't free it
417 * until we've reached the beginning of the next group.
418 */
419 if (CommandGroup != NULL)
420 kfree(CommandGroup);
421 CommandGroup = Command;
422 }
423 Controller->Commands[i] = NULL;
424 }
425 if (CommandGroup != NULL)
426 kfree(CommandGroup);
427
428 if (Controller->CombinedStatusBuffer != NULL)
429 {
430 kfree(Controller->CombinedStatusBuffer);
431 Controller->CombinedStatusBuffer = NULL;
432 Controller->CurrentStatusBuffer = NULL;
433 }
434
435 if (ScatterGatherPool != NULL)
436 pci_pool_destroy(ScatterGatherPool);
437 if (Controller->FirmwareType == DAC960_V1_Controller) return;
438
439 if (RequestSensePool != NULL)
440 pci_pool_destroy(RequestSensePool);
441
442 for (i = 0; i < DAC960_MaxLogicalDrives; i++)
443 if (Controller->V2.LogicalDeviceInformation[i] != NULL)
444 {
445 kfree(Controller->V2.LogicalDeviceInformation[i]);
446 Controller->V2.LogicalDeviceInformation[i] = NULL;
447 }
448
449 for (i = 0; i < DAC960_V2_MaxPhysicalDevices; i++)
450 {
451 if (Controller->V2.PhysicalDeviceInformation[i] != NULL)
452 {
453 kfree(Controller->V2.PhysicalDeviceInformation[i]);
454 Controller->V2.PhysicalDeviceInformation[i] = NULL;
455 }
456 if (Controller->V2.InquiryUnitSerialNumber[i] != NULL)
457 {
458 kfree(Controller->V2.InquiryUnitSerialNumber[i]);
459 Controller->V2.InquiryUnitSerialNumber[i] = NULL;
460 }
461 }
462}
463
464
465/*
466 DAC960_V1_ClearCommand clears critical fields of Command for DAC960 V1
467 Firmware Controllers.
468*/
469
470static inline void DAC960_V1_ClearCommand(DAC960_Command_T *Command)
471{
472 DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
473 memset(CommandMailbox, 0, sizeof(DAC960_V1_CommandMailbox_T));
474 Command->V1.CommandStatus = 0;
475}
476
477
478/*
479 DAC960_V2_ClearCommand clears critical fields of Command for DAC960 V2
480 Firmware Controllers.
481*/
482
483static inline void DAC960_V2_ClearCommand(DAC960_Command_T *Command)
484{
485 DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
486 memset(CommandMailbox, 0, sizeof(DAC960_V2_CommandMailbox_T));
487 Command->V2.CommandStatus = 0;
488}
489
490
491/*
492 DAC960_AllocateCommand allocates a Command structure from Controller's
493 free list. During driver initialization, a special initialization command
494 has been placed on the free list to guarantee that command allocation can
495 never fail.
496*/
497
498static inline DAC960_Command_T *DAC960_AllocateCommand(DAC960_Controller_T
499 *Controller)
500{
501 DAC960_Command_T *Command = Controller->FreeCommands;
502 if (Command == NULL) return NULL;
503 Controller->FreeCommands = Command->Next;
504 Command->Next = NULL;
505 return Command;
506}
507
508
509/*
510 DAC960_DeallocateCommand deallocates Command, returning it to Controller's
511 free list.
512*/
513
514static inline void DAC960_DeallocateCommand(DAC960_Command_T *Command)
515{
516 DAC960_Controller_T *Controller = Command->Controller;
517
518 Command->Request = NULL;
519 Command->Next = Controller->FreeCommands;
520 Controller->FreeCommands = Command;
521}
522
523
524/*
525 DAC960_WaitForCommand waits for a wake_up on Controller's Command Wait Queue.
526*/
527
528static void DAC960_WaitForCommand(DAC960_Controller_T *Controller)
529{
530 spin_unlock_irq(&Controller->queue_lock);
531 __wait_event(Controller->CommandWaitQueue, Controller->FreeCommands);
532 spin_lock_irq(&Controller->queue_lock);
533}
534
535
536/*
537 DAC960_BA_QueueCommand queues Command for DAC960 BA Series Controllers.
538*/
539
540static void DAC960_BA_QueueCommand(DAC960_Command_T *Command)
541{
542 DAC960_Controller_T *Controller = Command->Controller;
543 void __iomem *ControllerBaseAddress = Controller->BaseAddress;
544 DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
545 DAC960_V2_CommandMailbox_T *NextCommandMailbox =
546 Controller->V2.NextCommandMailbox;
547 CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
548 DAC960_BA_WriteCommandMailbox(NextCommandMailbox, CommandMailbox);
549 if (Controller->V2.PreviousCommandMailbox1->Words[0] == 0 ||
550 Controller->V2.PreviousCommandMailbox2->Words[0] == 0)
551 DAC960_BA_MemoryMailboxNewCommand(ControllerBaseAddress);
552 Controller->V2.PreviousCommandMailbox2 =
553 Controller->V2.PreviousCommandMailbox1;
554 Controller->V2.PreviousCommandMailbox1 = NextCommandMailbox;
555 if (++NextCommandMailbox > Controller->V2.LastCommandMailbox)
556 NextCommandMailbox = Controller->V2.FirstCommandMailbox;
557 Controller->V2.NextCommandMailbox = NextCommandMailbox;
558}
559
560
561/*
562 DAC960_LP_QueueCommand queues Command for DAC960 LP Series Controllers.
563*/
564
565static void DAC960_LP_QueueCommand(DAC960_Command_T *Command)
566{
567 DAC960_Controller_T *Controller = Command->Controller;
568 void __iomem *ControllerBaseAddress = Controller->BaseAddress;
569 DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
570 DAC960_V2_CommandMailbox_T *NextCommandMailbox =
571 Controller->V2.NextCommandMailbox;
572 CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
573 DAC960_LP_WriteCommandMailbox(NextCommandMailbox, CommandMailbox);
574 if (Controller->V2.PreviousCommandMailbox1->Words[0] == 0 ||
575 Controller->V2.PreviousCommandMailbox2->Words[0] == 0)
576 DAC960_LP_MemoryMailboxNewCommand(ControllerBaseAddress);
577 Controller->V2.PreviousCommandMailbox2 =
578 Controller->V2.PreviousCommandMailbox1;
579 Controller->V2.PreviousCommandMailbox1 = NextCommandMailbox;
580 if (++NextCommandMailbox > Controller->V2.LastCommandMailbox)
581 NextCommandMailbox = Controller->V2.FirstCommandMailbox;
582 Controller->V2.NextCommandMailbox = NextCommandMailbox;
583}
584
585
586/*
587 DAC960_LA_QueueCommandDualMode queues Command for DAC960 LA Series
588 Controllers with Dual Mode Firmware.
589*/
590
591static void DAC960_LA_QueueCommandDualMode(DAC960_Command_T *Command)
592{
593 DAC960_Controller_T *Controller = Command->Controller;
594 void __iomem *ControllerBaseAddress = Controller->BaseAddress;
595 DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
596 DAC960_V1_CommandMailbox_T *NextCommandMailbox =
597 Controller->V1.NextCommandMailbox;
598 CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
599 DAC960_LA_WriteCommandMailbox(NextCommandMailbox, CommandMailbox);
600 if (Controller->V1.PreviousCommandMailbox1->Words[0] == 0 ||
601 Controller->V1.PreviousCommandMailbox2->Words[0] == 0)
602 DAC960_LA_MemoryMailboxNewCommand(ControllerBaseAddress);
603 Controller->V1.PreviousCommandMailbox2 =
604 Controller->V1.PreviousCommandMailbox1;
605 Controller->V1.PreviousCommandMailbox1 = NextCommandMailbox;
606 if (++NextCommandMailbox > Controller->V1.LastCommandMailbox)
607 NextCommandMailbox = Controller->V1.FirstCommandMailbox;
608 Controller->V1.NextCommandMailbox = NextCommandMailbox;
609}
610
611
612/*
613 DAC960_LA_QueueCommandSingleMode queues Command for DAC960 LA Series
614 Controllers with Single Mode Firmware.
615*/
616
617static void DAC960_LA_QueueCommandSingleMode(DAC960_Command_T *Command)
618{
619 DAC960_Controller_T *Controller = Command->Controller;
620 void __iomem *ControllerBaseAddress = Controller->BaseAddress;
621 DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
622 DAC960_V1_CommandMailbox_T *NextCommandMailbox =
623 Controller->V1.NextCommandMailbox;
624 CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
625 DAC960_LA_WriteCommandMailbox(NextCommandMailbox, CommandMailbox);
626 if (Controller->V1.PreviousCommandMailbox1->Words[0] == 0 ||
627 Controller->V1.PreviousCommandMailbox2->Words[0] == 0)
628 DAC960_LA_HardwareMailboxNewCommand(ControllerBaseAddress);
629 Controller->V1.PreviousCommandMailbox2 =
630 Controller->V1.PreviousCommandMailbox1;
631 Controller->V1.PreviousCommandMailbox1 = NextCommandMailbox;
632 if (++NextCommandMailbox > Controller->V1.LastCommandMailbox)
633 NextCommandMailbox = Controller->V1.FirstCommandMailbox;
634 Controller->V1.NextCommandMailbox = NextCommandMailbox;
635}
636
637
638/*
639 DAC960_PG_QueueCommandDualMode queues Command for DAC960 PG Series
640 Controllers with Dual Mode Firmware.
641*/
642
643static void DAC960_PG_QueueCommandDualMode(DAC960_Command_T *Command)
644{
645 DAC960_Controller_T *Controller = Command->Controller;
646 void __iomem *ControllerBaseAddress = Controller->BaseAddress;
647 DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
648 DAC960_V1_CommandMailbox_T *NextCommandMailbox =
649 Controller->V1.NextCommandMailbox;
650 CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
651 DAC960_PG_WriteCommandMailbox(NextCommandMailbox, CommandMailbox);
652 if (Controller->V1.PreviousCommandMailbox1->Words[0] == 0 ||
653 Controller->V1.PreviousCommandMailbox2->Words[0] == 0)
654 DAC960_PG_MemoryMailboxNewCommand(ControllerBaseAddress);
655 Controller->V1.PreviousCommandMailbox2 =
656 Controller->V1.PreviousCommandMailbox1;
657 Controller->V1.PreviousCommandMailbox1 = NextCommandMailbox;
658 if (++NextCommandMailbox > Controller->V1.LastCommandMailbox)
659 NextCommandMailbox = Controller->V1.FirstCommandMailbox;
660 Controller->V1.NextCommandMailbox = NextCommandMailbox;
661}
662
663
664/*
665 DAC960_PG_QueueCommandSingleMode queues Command for DAC960 PG Series
666 Controllers with Single Mode Firmware.
667*/
668
669static void DAC960_PG_QueueCommandSingleMode(DAC960_Command_T *Command)
670{
671 DAC960_Controller_T *Controller = Command->Controller;
672 void __iomem *ControllerBaseAddress = Controller->BaseAddress;
673 DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
674 DAC960_V1_CommandMailbox_T *NextCommandMailbox =
675 Controller->V1.NextCommandMailbox;
676 CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
677 DAC960_PG_WriteCommandMailbox(NextCommandMailbox, CommandMailbox);
678 if (Controller->V1.PreviousCommandMailbox1->Words[0] == 0 ||
679 Controller->V1.PreviousCommandMailbox2->Words[0] == 0)
680 DAC960_PG_HardwareMailboxNewCommand(ControllerBaseAddress);
681 Controller->V1.PreviousCommandMailbox2 =
682 Controller->V1.PreviousCommandMailbox1;
683 Controller->V1.PreviousCommandMailbox1 = NextCommandMailbox;
684 if (++NextCommandMailbox > Controller->V1.LastCommandMailbox)
685 NextCommandMailbox = Controller->V1.FirstCommandMailbox;
686 Controller->V1.NextCommandMailbox = NextCommandMailbox;
687}
688
689
690/*
691 DAC960_PD_QueueCommand queues Command for DAC960 PD Series Controllers.
692*/
693
694static void DAC960_PD_QueueCommand(DAC960_Command_T *Command)
695{
696 DAC960_Controller_T *Controller = Command->Controller;
697 void __iomem *ControllerBaseAddress = Controller->BaseAddress;
698 DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
699 CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
700 while (DAC960_PD_MailboxFullP(ControllerBaseAddress))
701 udelay(1);
702 DAC960_PD_WriteCommandMailbox(ControllerBaseAddress, CommandMailbox);
703 DAC960_PD_NewCommand(ControllerBaseAddress);
704}
705
706
707/*
708 DAC960_P_QueueCommand queues Command for DAC960 P Series Controllers.
709*/
710
711static void DAC960_P_QueueCommand(DAC960_Command_T *Command)
712{
713 DAC960_Controller_T *Controller = Command->Controller;
714 void __iomem *ControllerBaseAddress = Controller->BaseAddress;
715 DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
716 CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
717 switch (CommandMailbox->Common.CommandOpcode)
718 {
719 case DAC960_V1_Enquiry:
720 CommandMailbox->Common.CommandOpcode = DAC960_V1_Enquiry_Old;
721 break;
722 case DAC960_V1_GetDeviceState:
723 CommandMailbox->Common.CommandOpcode = DAC960_V1_GetDeviceState_Old;
724 break;
725 case DAC960_V1_Read:
726 CommandMailbox->Common.CommandOpcode = DAC960_V1_Read_Old;
727 DAC960_PD_To_P_TranslateReadWriteCommand(CommandMailbox);
728 break;
729 case DAC960_V1_Write:
730 CommandMailbox->Common.CommandOpcode = DAC960_V1_Write_Old;
731 DAC960_PD_To_P_TranslateReadWriteCommand(CommandMailbox);
732 break;
733 case DAC960_V1_ReadWithScatterGather:
734 CommandMailbox->Common.CommandOpcode =
735 DAC960_V1_ReadWithScatterGather_Old;
736 DAC960_PD_To_P_TranslateReadWriteCommand(CommandMailbox);
737 break;
738 case DAC960_V1_WriteWithScatterGather:
739 CommandMailbox->Common.CommandOpcode =
740 DAC960_V1_WriteWithScatterGather_Old;
741 DAC960_PD_To_P_TranslateReadWriteCommand(CommandMailbox);
742 break;
743 default:
744 break;
745 }
746 while (DAC960_PD_MailboxFullP(ControllerBaseAddress))
747 udelay(1);
748 DAC960_PD_WriteCommandMailbox(ControllerBaseAddress, CommandMailbox);
749 DAC960_PD_NewCommand(ControllerBaseAddress);
750}
751
752
753/*
754 DAC960_ExecuteCommand executes Command and waits for completion.
755*/
756
757static void DAC960_ExecuteCommand(DAC960_Command_T *Command)
758{
759 DAC960_Controller_T *Controller = Command->Controller;
760 DECLARE_COMPLETION(Completion);
761 unsigned long flags;
762 Command->Completion = &Completion;
763
764 spin_lock_irqsave(&Controller->queue_lock, flags);
765 DAC960_QueueCommand(Command);
766 spin_unlock_irqrestore(&Controller->queue_lock, flags);
767
768 if (in_interrupt())
769 return;
770 wait_for_completion(&Completion);
771}
772
773
774/*
775 DAC960_V1_ExecuteType3 executes a DAC960 V1 Firmware Controller Type 3
776 Command and waits for completion. It returns true on success and false
777 on failure.
778*/
779
780static boolean DAC960_V1_ExecuteType3(DAC960_Controller_T *Controller,
781 DAC960_V1_CommandOpcode_T CommandOpcode,
782 dma_addr_t DataDMA)
783{
784 DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
785 DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
786 DAC960_V1_CommandStatus_T CommandStatus;
787 DAC960_V1_ClearCommand(Command);
788 Command->CommandType = DAC960_ImmediateCommand;
789 CommandMailbox->Type3.CommandOpcode = CommandOpcode;
790 CommandMailbox->Type3.BusAddress = DataDMA;
791 DAC960_ExecuteCommand(Command);
792 CommandStatus = Command->V1.CommandStatus;
793 DAC960_DeallocateCommand(Command);
794 return (CommandStatus == DAC960_V1_NormalCompletion);
795}
796
797
798/*
799 DAC960_V1_ExecuteTypeB executes a DAC960 V1 Firmware Controller Type 3B
800 Command and waits for completion. It returns true on success and false
801 on failure.
802*/
803
804static boolean DAC960_V1_ExecuteType3B(DAC960_Controller_T *Controller,
805 DAC960_V1_CommandOpcode_T CommandOpcode,
806 unsigned char CommandOpcode2,
807 dma_addr_t DataDMA)
808{
809 DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
810 DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
811 DAC960_V1_CommandStatus_T CommandStatus;
812 DAC960_V1_ClearCommand(Command);
813 Command->CommandType = DAC960_ImmediateCommand;
814 CommandMailbox->Type3B.CommandOpcode = CommandOpcode;
815 CommandMailbox->Type3B.CommandOpcode2 = CommandOpcode2;
816 CommandMailbox->Type3B.BusAddress = DataDMA;
817 DAC960_ExecuteCommand(Command);
818 CommandStatus = Command->V1.CommandStatus;
819 DAC960_DeallocateCommand(Command);
820 return (CommandStatus == DAC960_V1_NormalCompletion);
821}
822
823
824/*
825 DAC960_V1_ExecuteType3D executes a DAC960 V1 Firmware Controller Type 3D
826 Command and waits for completion. It returns true on success and false
827 on failure.
828*/
829
830static boolean DAC960_V1_ExecuteType3D(DAC960_Controller_T *Controller,
831 DAC960_V1_CommandOpcode_T CommandOpcode,
832 unsigned char Channel,
833 unsigned char TargetID,
834 dma_addr_t DataDMA)
835{
836 DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
837 DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
838 DAC960_V1_CommandStatus_T CommandStatus;
839 DAC960_V1_ClearCommand(Command);
840 Command->CommandType = DAC960_ImmediateCommand;
841 CommandMailbox->Type3D.CommandOpcode = CommandOpcode;
842 CommandMailbox->Type3D.Channel = Channel;
843 CommandMailbox->Type3D.TargetID = TargetID;
844 CommandMailbox->Type3D.BusAddress = DataDMA;
845 DAC960_ExecuteCommand(Command);
846 CommandStatus = Command->V1.CommandStatus;
847 DAC960_DeallocateCommand(Command);
848 return (CommandStatus == DAC960_V1_NormalCompletion);
849}
850
851
852/*
853 DAC960_V2_GeneralInfo executes a DAC960 V2 Firmware General Information
854 Reading IOCTL Command and waits for completion. It returns true on success
855 and false on failure.
856
857 Return data in The controller's HealthStatusBuffer, which is dma-able memory
858*/
859
860static boolean DAC960_V2_GeneralInfo(DAC960_Controller_T *Controller)
861{
862 DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
863 DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
864 DAC960_V2_CommandStatus_T CommandStatus;
865 DAC960_V2_ClearCommand(Command);
866 Command->CommandType = DAC960_ImmediateCommand;
867 CommandMailbox->Common.CommandOpcode = DAC960_V2_IOCTL;
868 CommandMailbox->Common.CommandControlBits
869 .DataTransferControllerToHost = true;
870 CommandMailbox->Common.CommandControlBits
871 .NoAutoRequestSense = true;
872 CommandMailbox->Common.DataTransferSize = sizeof(DAC960_V2_HealthStatusBuffer_T);
873 CommandMailbox->Common.IOCTL_Opcode = DAC960_V2_GetHealthStatus;
874 CommandMailbox->Common.DataTransferMemoryAddress
875 .ScatterGatherSegments[0]
876 .SegmentDataPointer =
877 Controller->V2.HealthStatusBufferDMA;
878 CommandMailbox->Common.DataTransferMemoryAddress
879 .ScatterGatherSegments[0]
880 .SegmentByteCount =
881 CommandMailbox->Common.DataTransferSize;
882 DAC960_ExecuteCommand(Command);
883 CommandStatus = Command->V2.CommandStatus;
884 DAC960_DeallocateCommand(Command);
885 return (CommandStatus == DAC960_V2_NormalCompletion);
886}
887
888
889/*
890 DAC960_V2_ControllerInfo executes a DAC960 V2 Firmware Controller
891 Information Reading IOCTL Command and waits for completion. It returns
892 true on success and false on failure.
893
894 Data is returned in the controller's V2.NewControllerInformation dma-able
895 memory buffer.
896*/
897
898static boolean DAC960_V2_NewControllerInfo(DAC960_Controller_T *Controller)
899{
900 DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
901 DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
902 DAC960_V2_CommandStatus_T CommandStatus;
903 DAC960_V2_ClearCommand(Command);
904 Command->CommandType = DAC960_ImmediateCommand;
905 CommandMailbox->ControllerInfo.CommandOpcode = DAC960_V2_IOCTL;
906 CommandMailbox->ControllerInfo.CommandControlBits
907 .DataTransferControllerToHost = true;
908 CommandMailbox->ControllerInfo.CommandControlBits
909 .NoAutoRequestSense = true;
910 CommandMailbox->ControllerInfo.DataTransferSize = sizeof(DAC960_V2_ControllerInfo_T);
911 CommandMailbox->ControllerInfo.ControllerNumber = 0;
912 CommandMailbox->ControllerInfo.IOCTL_Opcode = DAC960_V2_GetControllerInfo;
913 CommandMailbox->ControllerInfo.DataTransferMemoryAddress
914 .ScatterGatherSegments[0]
915 .SegmentDataPointer =
916 Controller->V2.NewControllerInformationDMA;
917 CommandMailbox->ControllerInfo.DataTransferMemoryAddress
918 .ScatterGatherSegments[0]
919 .SegmentByteCount =
920 CommandMailbox->ControllerInfo.DataTransferSize;
921 DAC960_ExecuteCommand(Command);
922 CommandStatus = Command->V2.CommandStatus;
923 DAC960_DeallocateCommand(Command);
924 return (CommandStatus == DAC960_V2_NormalCompletion);
925}
926
927
928/*
929 DAC960_V2_LogicalDeviceInfo executes a DAC960 V2 Firmware Controller Logical
930 Device Information Reading IOCTL Command and waits for completion. It
931 returns true on success and false on failure.
932
933 Data is returned in the controller's V2.NewLogicalDeviceInformation
934*/
935
936static boolean DAC960_V2_NewLogicalDeviceInfo(DAC960_Controller_T *Controller,
937 unsigned short LogicalDeviceNumber)
938{
939 DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
940 DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
941 DAC960_V2_CommandStatus_T CommandStatus;
942
943 DAC960_V2_ClearCommand(Command);
944 Command->CommandType = DAC960_ImmediateCommand;
945 CommandMailbox->LogicalDeviceInfo.CommandOpcode =
946 DAC960_V2_IOCTL;
947 CommandMailbox->LogicalDeviceInfo.CommandControlBits
948 .DataTransferControllerToHost = true;
949 CommandMailbox->LogicalDeviceInfo.CommandControlBits
950 .NoAutoRequestSense = true;
951 CommandMailbox->LogicalDeviceInfo.DataTransferSize =
952 sizeof(DAC960_V2_LogicalDeviceInfo_T);
953 CommandMailbox->LogicalDeviceInfo.LogicalDevice.LogicalDeviceNumber =
954 LogicalDeviceNumber;
955 CommandMailbox->LogicalDeviceInfo.IOCTL_Opcode = DAC960_V2_GetLogicalDeviceInfoValid;
956 CommandMailbox->LogicalDeviceInfo.DataTransferMemoryAddress
957 .ScatterGatherSegments[0]
958 .SegmentDataPointer =
959 Controller->V2.NewLogicalDeviceInformationDMA;
960 CommandMailbox->LogicalDeviceInfo.DataTransferMemoryAddress
961 .ScatterGatherSegments[0]
962 .SegmentByteCount =
963 CommandMailbox->LogicalDeviceInfo.DataTransferSize;
964 DAC960_ExecuteCommand(Command);
965 CommandStatus = Command->V2.CommandStatus;
966 DAC960_DeallocateCommand(Command);
967 return (CommandStatus == DAC960_V2_NormalCompletion);
968}
969
970
971/*
972 DAC960_V2_PhysicalDeviceInfo executes a DAC960 V2 Firmware Controller "Read
973 Physical Device Information" IOCTL Command and waits for completion. It
974 returns true on success and false on failure.
975
976 The Channel, TargetID, LogicalUnit arguments should be 0 the first time
977 this function is called for a given controller. This will return data
978 for the "first" device on that controller. The returned data includes a
979 Channel, TargetID, LogicalUnit that can be passed in to this routine to
980 get data for the NEXT device on that controller.
981
982 Data is stored in the controller's V2.NewPhysicalDeviceInfo dma-able
983 memory buffer.
984
985*/
986
987static boolean DAC960_V2_NewPhysicalDeviceInfo(DAC960_Controller_T *Controller,
988 unsigned char Channel,
989 unsigned char TargetID,
990 unsigned char LogicalUnit)
991{
992 DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
993 DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
994 DAC960_V2_CommandStatus_T CommandStatus;
995
996 DAC960_V2_ClearCommand(Command);
997 Command->CommandType = DAC960_ImmediateCommand;
998 CommandMailbox->PhysicalDeviceInfo.CommandOpcode = DAC960_V2_IOCTL;
999 CommandMailbox->PhysicalDeviceInfo.CommandControlBits
1000 .DataTransferControllerToHost = true;
1001 CommandMailbox->PhysicalDeviceInfo.CommandControlBits
1002 .NoAutoRequestSense = true;
1003 CommandMailbox->PhysicalDeviceInfo.DataTransferSize =
1004 sizeof(DAC960_V2_PhysicalDeviceInfo_T);
1005 CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.LogicalUnit = LogicalUnit;
1006 CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.TargetID = TargetID;
1007 CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.Channel = Channel;
1008 CommandMailbox->PhysicalDeviceInfo.IOCTL_Opcode =
1009 DAC960_V2_GetPhysicalDeviceInfoValid;
1010 CommandMailbox->PhysicalDeviceInfo.DataTransferMemoryAddress
1011 .ScatterGatherSegments[0]
1012 .SegmentDataPointer =
1013 Controller->V2.NewPhysicalDeviceInformationDMA;
1014 CommandMailbox->PhysicalDeviceInfo.DataTransferMemoryAddress
1015 .ScatterGatherSegments[0]
1016 .SegmentByteCount =
1017 CommandMailbox->PhysicalDeviceInfo.DataTransferSize;
1018 DAC960_ExecuteCommand(Command);
1019 CommandStatus = Command->V2.CommandStatus;
1020 DAC960_DeallocateCommand(Command);
1021 return (CommandStatus == DAC960_V2_NormalCompletion);
1022}
1023
1024
1025static void DAC960_V2_ConstructNewUnitSerialNumber(
1026 DAC960_Controller_T *Controller,
1027 DAC960_V2_CommandMailbox_T *CommandMailbox, int Channel, int TargetID,
1028 int LogicalUnit)
1029{
1030 CommandMailbox->SCSI_10.CommandOpcode = DAC960_V2_SCSI_10_Passthru;
1031 CommandMailbox->SCSI_10.CommandControlBits
1032 .DataTransferControllerToHost = true;
1033 CommandMailbox->SCSI_10.CommandControlBits
1034 .NoAutoRequestSense = true;
1035 CommandMailbox->SCSI_10.DataTransferSize =
1036 sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
1037 CommandMailbox->SCSI_10.PhysicalDevice.LogicalUnit = LogicalUnit;
1038 CommandMailbox->SCSI_10.PhysicalDevice.TargetID = TargetID;
1039 CommandMailbox->SCSI_10.PhysicalDevice.Channel = Channel;
1040 CommandMailbox->SCSI_10.CDBLength = 6;
1041 CommandMailbox->SCSI_10.SCSI_CDB[0] = 0x12; /* INQUIRY */
1042 CommandMailbox->SCSI_10.SCSI_CDB[1] = 1; /* EVPD = 1 */
1043 CommandMailbox->SCSI_10.SCSI_CDB[2] = 0x80; /* Page Code */
1044 CommandMailbox->SCSI_10.SCSI_CDB[3] = 0; /* Reserved */
1045 CommandMailbox->SCSI_10.SCSI_CDB[4] =
1046 sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
1047 CommandMailbox->SCSI_10.SCSI_CDB[5] = 0; /* Control */
1048 CommandMailbox->SCSI_10.DataTransferMemoryAddress
1049 .ScatterGatherSegments[0]
1050 .SegmentDataPointer =
1051 Controller->V2.NewInquiryUnitSerialNumberDMA;
1052 CommandMailbox->SCSI_10.DataTransferMemoryAddress
1053 .ScatterGatherSegments[0]
1054 .SegmentByteCount =
1055 CommandMailbox->SCSI_10.DataTransferSize;
1056}
1057
1058
1059/*
1060 DAC960_V2_NewUnitSerialNumber executes an SCSI pass-through
1061 Inquiry command to a SCSI device identified by Channel number,
1062 Target id, Logical Unit Number. This function Waits for completion
1063 of the command.
1064
1065 The return data includes Unit Serial Number information for the
1066 specified device.
1067
1068 Data is stored in the controller's V2.NewPhysicalDeviceInfo dma-able
1069 memory buffer.
1070*/
1071
1072static boolean DAC960_V2_NewInquiryUnitSerialNumber(DAC960_Controller_T *Controller,
1073 int Channel, int TargetID, int LogicalUnit)
1074{
1075 DAC960_Command_T *Command;
1076 DAC960_V2_CommandMailbox_T *CommandMailbox;
1077 DAC960_V2_CommandStatus_T CommandStatus;
1078
1079 Command = DAC960_AllocateCommand(Controller);
1080 CommandMailbox = &Command->V2.CommandMailbox;
1081 DAC960_V2_ClearCommand(Command);
1082 Command->CommandType = DAC960_ImmediateCommand;
1083
1084 DAC960_V2_ConstructNewUnitSerialNumber(Controller, CommandMailbox,
1085 Channel, TargetID, LogicalUnit);
1086
1087 DAC960_ExecuteCommand(Command);
1088 CommandStatus = Command->V2.CommandStatus;
1089 DAC960_DeallocateCommand(Command);
1090 return (CommandStatus == DAC960_V2_NormalCompletion);
1091}
1092
1093
1094/*
1095 DAC960_V2_DeviceOperation executes a DAC960 V2 Firmware Controller Device
1096 Operation IOCTL Command and waits for completion. It returns true on
1097 success and false on failure.
1098*/
1099
1100static boolean DAC960_V2_DeviceOperation(DAC960_Controller_T *Controller,
1101 DAC960_V2_IOCTL_Opcode_T IOCTL_Opcode,
1102 DAC960_V2_OperationDevice_T
1103 OperationDevice)
1104{
1105 DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
1106 DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
1107 DAC960_V2_CommandStatus_T CommandStatus;
1108 DAC960_V2_ClearCommand(Command);
1109 Command->CommandType = DAC960_ImmediateCommand;
1110 CommandMailbox->DeviceOperation.CommandOpcode = DAC960_V2_IOCTL;
1111 CommandMailbox->DeviceOperation.CommandControlBits
1112 .DataTransferControllerToHost = true;
1113 CommandMailbox->DeviceOperation.CommandControlBits
1114 .NoAutoRequestSense = true;
1115 CommandMailbox->DeviceOperation.IOCTL_Opcode = IOCTL_Opcode;
1116 CommandMailbox->DeviceOperation.OperationDevice = OperationDevice;
1117 DAC960_ExecuteCommand(Command);
1118 CommandStatus = Command->V2.CommandStatus;
1119 DAC960_DeallocateCommand(Command);
1120 return (CommandStatus == DAC960_V2_NormalCompletion);
1121}
1122
1123
1124/*
1125 DAC960_V1_EnableMemoryMailboxInterface enables the Memory Mailbox Interface
1126 for DAC960 V1 Firmware Controllers.
1127
1128 PD and P controller types have no memory mailbox, but still need the
1129 other dma mapped memory.
1130*/
1131
1132static boolean DAC960_V1_EnableMemoryMailboxInterface(DAC960_Controller_T
1133 *Controller)
1134{
1135 void __iomem *ControllerBaseAddress = Controller->BaseAddress;
1136 DAC960_HardwareType_T hw_type = Controller->HardwareType;
1137 struct pci_dev *PCI_Device = Controller->PCIDevice;
1138 struct dma_loaf *DmaPages = &Controller->DmaPages;
1139 size_t DmaPagesSize;
1140 size_t CommandMailboxesSize;
1141 size_t StatusMailboxesSize;
1142
1143 DAC960_V1_CommandMailbox_T *CommandMailboxesMemory;
1144 dma_addr_t CommandMailboxesMemoryDMA;
1145
1146 DAC960_V1_StatusMailbox_T *StatusMailboxesMemory;
1147 dma_addr_t StatusMailboxesMemoryDMA;
1148
1149 DAC960_V1_CommandMailbox_T CommandMailbox;
1150 DAC960_V1_CommandStatus_T CommandStatus;
1151 int TimeoutCounter;
1152 int i;
1153
1154
1155 if (pci_set_dma_mask(Controller->PCIDevice, DAC690_V1_PciDmaMask))
1156 return DAC960_Failure(Controller, "DMA mask out of range");
1157 Controller->BounceBufferLimit = DAC690_V1_PciDmaMask;
1158
1159 if ((hw_type == DAC960_PD_Controller) || (hw_type == DAC960_P_Controller)) {
1160 CommandMailboxesSize = 0;
1161 StatusMailboxesSize = 0;
1162 } else {
1163 CommandMailboxesSize = DAC960_V1_CommandMailboxCount * sizeof(DAC960_V1_CommandMailbox_T);
1164 StatusMailboxesSize = DAC960_V1_StatusMailboxCount * sizeof(DAC960_V1_StatusMailbox_T);
1165 }
1166 DmaPagesSize = CommandMailboxesSize + StatusMailboxesSize +
1167 sizeof(DAC960_V1_DCDB_T) + sizeof(DAC960_V1_Enquiry_T) +
1168 sizeof(DAC960_V1_ErrorTable_T) + sizeof(DAC960_V1_EventLogEntry_T) +
1169 sizeof(DAC960_V1_RebuildProgress_T) +
1170 sizeof(DAC960_V1_LogicalDriveInformationArray_T) +
1171 sizeof(DAC960_V1_BackgroundInitializationStatus_T) +
1172 sizeof(DAC960_V1_DeviceState_T) + sizeof(DAC960_SCSI_Inquiry_T) +
1173 sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
1174
1175 if (!init_dma_loaf(PCI_Device, DmaPages, DmaPagesSize))
1176 return false;
1177
1178
1179 if ((hw_type == DAC960_PD_Controller) || (hw_type == DAC960_P_Controller))
1180 goto skip_mailboxes;
1181
1182 CommandMailboxesMemory = slice_dma_loaf(DmaPages,
1183 CommandMailboxesSize, &CommandMailboxesMemoryDMA);
1184
1185 /* These are the base addresses for the command memory mailbox array */
1186 Controller->V1.FirstCommandMailbox = CommandMailboxesMemory;
1187 Controller->V1.FirstCommandMailboxDMA = CommandMailboxesMemoryDMA;
1188
1189 CommandMailboxesMemory += DAC960_V1_CommandMailboxCount - 1;
1190 Controller->V1.LastCommandMailbox = CommandMailboxesMemory;
1191 Controller->V1.NextCommandMailbox = Controller->V1.FirstCommandMailbox;
1192 Controller->V1.PreviousCommandMailbox1 = Controller->V1.LastCommandMailbox;
1193 Controller->V1.PreviousCommandMailbox2 =
1194 Controller->V1.LastCommandMailbox - 1;
1195
1196 /* These are the base addresses for the status memory mailbox array */
1197 StatusMailboxesMemory = slice_dma_loaf(DmaPages,
1198 StatusMailboxesSize, &StatusMailboxesMemoryDMA);
1199
1200 Controller->V1.FirstStatusMailbox = StatusMailboxesMemory;
1201 Controller->V1.FirstStatusMailboxDMA = StatusMailboxesMemoryDMA;
1202 StatusMailboxesMemory += DAC960_V1_StatusMailboxCount - 1;
1203 Controller->V1.LastStatusMailbox = StatusMailboxesMemory;
1204 Controller->V1.NextStatusMailbox = Controller->V1.FirstStatusMailbox;
1205
1206skip_mailboxes:
1207 Controller->V1.MonitoringDCDB = slice_dma_loaf(DmaPages,
1208 sizeof(DAC960_V1_DCDB_T),
1209 &Controller->V1.MonitoringDCDB_DMA);
1210
1211 Controller->V1.NewEnquiry = slice_dma_loaf(DmaPages,
1212 sizeof(DAC960_V1_Enquiry_T),
1213 &Controller->V1.NewEnquiryDMA);
1214
1215 Controller->V1.NewErrorTable = slice_dma_loaf(DmaPages,
1216 sizeof(DAC960_V1_ErrorTable_T),
1217 &Controller->V1.NewErrorTableDMA);
1218
1219 Controller->V1.EventLogEntry = slice_dma_loaf(DmaPages,
1220 sizeof(DAC960_V1_EventLogEntry_T),
1221 &Controller->V1.EventLogEntryDMA);
1222
1223 Controller->V1.RebuildProgress = slice_dma_loaf(DmaPages,
1224 sizeof(DAC960_V1_RebuildProgress_T),
1225 &Controller->V1.RebuildProgressDMA);
1226
1227 Controller->V1.NewLogicalDriveInformation = slice_dma_loaf(DmaPages,
1228 sizeof(DAC960_V1_LogicalDriveInformationArray_T),
1229 &Controller->V1.NewLogicalDriveInformationDMA);
1230
1231 Controller->V1.BackgroundInitializationStatus = slice_dma_loaf(DmaPages,
1232 sizeof(DAC960_V1_BackgroundInitializationStatus_T),
1233 &Controller->V1.BackgroundInitializationStatusDMA);
1234
1235 Controller->V1.NewDeviceState = slice_dma_loaf(DmaPages,
1236 sizeof(DAC960_V1_DeviceState_T),
1237 &Controller->V1.NewDeviceStateDMA);
1238
1239 Controller->V1.NewInquiryStandardData = slice_dma_loaf(DmaPages,
1240 sizeof(DAC960_SCSI_Inquiry_T),
1241 &Controller->V1.NewInquiryStandardDataDMA);
1242
1243 Controller->V1.NewInquiryUnitSerialNumber = slice_dma_loaf(DmaPages,
1244 sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T),
1245 &Controller->V1.NewInquiryUnitSerialNumberDMA);
1246
1247 if ((hw_type == DAC960_PD_Controller) || (hw_type == DAC960_P_Controller))
1248 return true;
1249
1250 /* Enable the Memory Mailbox Interface. */
1251 Controller->V1.DualModeMemoryMailboxInterface = true;
1252 CommandMailbox.TypeX.CommandOpcode = 0x2B;
1253 CommandMailbox.TypeX.CommandIdentifier = 0;
1254 CommandMailbox.TypeX.CommandOpcode2 = 0x14;
1255 CommandMailbox.TypeX.CommandMailboxesBusAddress =
1256 Controller->V1.FirstCommandMailboxDMA;
1257 CommandMailbox.TypeX.StatusMailboxesBusAddress =
1258 Controller->V1.FirstStatusMailboxDMA;
1259#define TIMEOUT_COUNT 1000000
1260
1261 for (i = 0; i < 2; i++)
1262 switch (Controller->HardwareType)
1263 {
1264 case DAC960_LA_Controller:
1265 TimeoutCounter = TIMEOUT_COUNT;
1266 while (--TimeoutCounter >= 0)
1267 {
1268 if (!DAC960_LA_HardwareMailboxFullP(ControllerBaseAddress))
1269 break;
1270 udelay(10);
1271 }
1272 if (TimeoutCounter < 0) return false;
1273 DAC960_LA_WriteHardwareMailbox(ControllerBaseAddress, &CommandMailbox);
1274 DAC960_LA_HardwareMailboxNewCommand(ControllerBaseAddress);
1275 TimeoutCounter = TIMEOUT_COUNT;
1276 while (--TimeoutCounter >= 0)
1277 {
1278 if (DAC960_LA_HardwareMailboxStatusAvailableP(
1279 ControllerBaseAddress))
1280 break;
1281 udelay(10);
1282 }
1283 if (TimeoutCounter < 0) return false;
1284 CommandStatus = DAC960_LA_ReadStatusRegister(ControllerBaseAddress);
1285 DAC960_LA_AcknowledgeHardwareMailboxInterrupt(ControllerBaseAddress);
1286 DAC960_LA_AcknowledgeHardwareMailboxStatus(ControllerBaseAddress);
1287 if (CommandStatus == DAC960_V1_NormalCompletion) return true;
1288 Controller->V1.DualModeMemoryMailboxInterface = false;
1289 CommandMailbox.TypeX.CommandOpcode2 = 0x10;
1290 break;
1291 case DAC960_PG_Controller:
1292 TimeoutCounter = TIMEOUT_COUNT;
1293 while (--TimeoutCounter >= 0)
1294 {
1295 if (!DAC960_PG_HardwareMailboxFullP(ControllerBaseAddress))
1296 break;
1297 udelay(10);
1298 }
1299 if (TimeoutCounter < 0) return false;
1300 DAC960_PG_WriteHardwareMailbox(ControllerBaseAddress, &CommandMailbox);
1301 DAC960_PG_HardwareMailboxNewCommand(ControllerBaseAddress);
1302
1303 TimeoutCounter = TIMEOUT_COUNT;
1304 while (--TimeoutCounter >= 0)
1305 {
1306 if (DAC960_PG_HardwareMailboxStatusAvailableP(
1307 ControllerBaseAddress))
1308 break;
1309 udelay(10);
1310 }
1311 if (TimeoutCounter < 0) return false;
1312 CommandStatus = DAC960_PG_ReadStatusRegister(ControllerBaseAddress);
1313 DAC960_PG_AcknowledgeHardwareMailboxInterrupt(ControllerBaseAddress);
1314 DAC960_PG_AcknowledgeHardwareMailboxStatus(ControllerBaseAddress);
1315 if (CommandStatus == DAC960_V1_NormalCompletion) return true;
1316 Controller->V1.DualModeMemoryMailboxInterface = false;
1317 CommandMailbox.TypeX.CommandOpcode2 = 0x10;
1318 break;
1319 default:
1320 DAC960_Failure(Controller, "Unknown Controller Type\n");
1321 break;
1322 }
1323 return false;
1324}
1325
1326
1327/*
1328 DAC960_V2_EnableMemoryMailboxInterface enables the Memory Mailbox Interface
1329 for DAC960 V2 Firmware Controllers.
1330
1331 Aggregate the space needed for the controller's memory mailbox and
1332 the other data structures that will be targets of dma transfers with
1333 the controller. Allocate a dma-mapped region of memory to hold these
1334 structures. Then, save CPU pointers and dma_addr_t values to reference
1335 the structures that are contained in that region.
1336*/
1337
1338static boolean DAC960_V2_EnableMemoryMailboxInterface(DAC960_Controller_T
1339 *Controller)
1340{
1341 void __iomem *ControllerBaseAddress = Controller->BaseAddress;
1342 struct pci_dev *PCI_Device = Controller->PCIDevice;
1343 struct dma_loaf *DmaPages = &Controller->DmaPages;
1344 size_t DmaPagesSize;
1345 size_t CommandMailboxesSize;
1346 size_t StatusMailboxesSize;
1347
1348 DAC960_V2_CommandMailbox_T *CommandMailboxesMemory;
1349 dma_addr_t CommandMailboxesMemoryDMA;
1350
1351 DAC960_V2_StatusMailbox_T *StatusMailboxesMemory;
1352 dma_addr_t StatusMailboxesMemoryDMA;
1353
1354 DAC960_V2_CommandMailbox_T *CommandMailbox;
1355 dma_addr_t CommandMailboxDMA;
1356 DAC960_V2_CommandStatus_T CommandStatus;
1357
1358 if (pci_set_dma_mask(Controller->PCIDevice, DAC690_V2_PciDmaMask))
1359 return DAC960_Failure(Controller, "DMA mask out of range");
1360 Controller->BounceBufferLimit = DAC690_V2_PciDmaMask;
1361
1362 /* This is a temporary dma mapping, used only in the scope of this function */
1363 CommandMailbox =
1364 (DAC960_V2_CommandMailbox_T *)pci_alloc_consistent( PCI_Device,
1365 sizeof(DAC960_V2_CommandMailbox_T), &CommandMailboxDMA);
1366 if (CommandMailbox == NULL)
1367 return false;
1368
1369 CommandMailboxesSize = DAC960_V2_CommandMailboxCount * sizeof(DAC960_V2_CommandMailbox_T);
1370 StatusMailboxesSize = DAC960_V2_StatusMailboxCount * sizeof(DAC960_V2_StatusMailbox_T);
1371 DmaPagesSize =
1372 CommandMailboxesSize + StatusMailboxesSize +
1373 sizeof(DAC960_V2_HealthStatusBuffer_T) +
1374 sizeof(DAC960_V2_ControllerInfo_T) +
1375 sizeof(DAC960_V2_LogicalDeviceInfo_T) +
1376 sizeof(DAC960_V2_PhysicalDeviceInfo_T) +
1377 sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T) +
1378 sizeof(DAC960_V2_Event_T) +
1379 sizeof(DAC960_V2_PhysicalToLogicalDevice_T);
1380
1381 if (!init_dma_loaf(PCI_Device, DmaPages, DmaPagesSize)) {
1382 pci_free_consistent(PCI_Device, sizeof(DAC960_V2_CommandMailbox_T),
1383 CommandMailbox, CommandMailboxDMA);
1384 return false;
1385 }
1386
1387 CommandMailboxesMemory = slice_dma_loaf(DmaPages,
1388 CommandMailboxesSize, &CommandMailboxesMemoryDMA);
1389
1390 /* These are the base addresses for the command memory mailbox array */
1391 Controller->V2.FirstCommandMailbox = CommandMailboxesMemory;
1392 Controller->V2.FirstCommandMailboxDMA = CommandMailboxesMemoryDMA;
1393
1394 CommandMailboxesMemory += DAC960_V2_CommandMailboxCount - 1;
1395 Controller->V2.LastCommandMailbox = CommandMailboxesMemory;
1396 Controller->V2.NextCommandMailbox = Controller->V2.FirstCommandMailbox;
1397 Controller->V2.PreviousCommandMailbox1 = Controller->V2.LastCommandMailbox;
1398 Controller->V2.PreviousCommandMailbox2 =
1399 Controller->V2.LastCommandMailbox - 1;
1400
1401 /* These are the base addresses for the status memory mailbox array */
1402 StatusMailboxesMemory = slice_dma_loaf(DmaPages,
1403 StatusMailboxesSize, &StatusMailboxesMemoryDMA);
1404
1405 Controller->V2.FirstStatusMailbox = StatusMailboxesMemory;
1406 Controller->V2.FirstStatusMailboxDMA = StatusMailboxesMemoryDMA;
1407 StatusMailboxesMemory += DAC960_V2_StatusMailboxCount - 1;
1408 Controller->V2.LastStatusMailbox = StatusMailboxesMemory;
1409 Controller->V2.NextStatusMailbox = Controller->V2.FirstStatusMailbox;
1410
1411 Controller->V2.HealthStatusBuffer = slice_dma_loaf(DmaPages,
1412 sizeof(DAC960_V2_HealthStatusBuffer_T),
1413 &Controller->V2.HealthStatusBufferDMA);
1414
1415 Controller->V2.NewControllerInformation = slice_dma_loaf(DmaPages,
1416 sizeof(DAC960_V2_ControllerInfo_T),
1417 &Controller->V2.NewControllerInformationDMA);
1418
1419 Controller->V2.NewLogicalDeviceInformation = slice_dma_loaf(DmaPages,
1420 sizeof(DAC960_V2_LogicalDeviceInfo_T),
1421 &Controller->V2.NewLogicalDeviceInformationDMA);
1422
1423 Controller->V2.NewPhysicalDeviceInformation = slice_dma_loaf(DmaPages,
1424 sizeof(DAC960_V2_PhysicalDeviceInfo_T),
1425 &Controller->V2.NewPhysicalDeviceInformationDMA);
1426
1427 Controller->V2.NewInquiryUnitSerialNumber = slice_dma_loaf(DmaPages,
1428 sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T),
1429 &Controller->V2.NewInquiryUnitSerialNumberDMA);
1430
1431 Controller->V2.Event = slice_dma_loaf(DmaPages,
1432 sizeof(DAC960_V2_Event_T),
1433 &Controller->V2.EventDMA);
1434
1435 Controller->V2.PhysicalToLogicalDevice = slice_dma_loaf(DmaPages,
1436 sizeof(DAC960_V2_PhysicalToLogicalDevice_T),
1437 &Controller->V2.PhysicalToLogicalDeviceDMA);
1438
1439 /*
1440 Enable the Memory Mailbox Interface.
1441
1442 I don't know why we can't just use one of the memory mailboxes
1443 we just allocated to do this, instead of using this temporary one.
1444 Try this change later.
1445 */
1446 memset(CommandMailbox, 0, sizeof(DAC960_V2_CommandMailbox_T));
1447 CommandMailbox->SetMemoryMailbox.CommandIdentifier = 1;
1448 CommandMailbox->SetMemoryMailbox.CommandOpcode = DAC960_V2_IOCTL;
1449 CommandMailbox->SetMemoryMailbox.CommandControlBits.NoAutoRequestSense = true;
1450 CommandMailbox->SetMemoryMailbox.FirstCommandMailboxSizeKB =
1451 (DAC960_V2_CommandMailboxCount * sizeof(DAC960_V2_CommandMailbox_T)) >> 10;
1452 CommandMailbox->SetMemoryMailbox.FirstStatusMailboxSizeKB =
1453 (DAC960_V2_StatusMailboxCount * sizeof(DAC960_V2_StatusMailbox_T)) >> 10;
1454 CommandMailbox->SetMemoryMailbox.SecondCommandMailboxSizeKB = 0;
1455 CommandMailbox->SetMemoryMailbox.SecondStatusMailboxSizeKB = 0;
1456 CommandMailbox->SetMemoryMailbox.RequestSenseSize = 0;
1457 CommandMailbox->SetMemoryMailbox.IOCTL_Opcode = DAC960_V2_SetMemoryMailbox;
1458 CommandMailbox->SetMemoryMailbox.HealthStatusBufferSizeKB = 1;
1459 CommandMailbox->SetMemoryMailbox.HealthStatusBufferBusAddress =
1460 Controller->V2.HealthStatusBufferDMA;
1461 CommandMailbox->SetMemoryMailbox.FirstCommandMailboxBusAddress =
1462 Controller->V2.FirstCommandMailboxDMA;
1463 CommandMailbox->SetMemoryMailbox.FirstStatusMailboxBusAddress =
1464 Controller->V2.FirstStatusMailboxDMA;
1465 switch (Controller->HardwareType)
1466 {
1467 case DAC960_BA_Controller:
1468 while (DAC960_BA_HardwareMailboxFullP(ControllerBaseAddress))
1469 udelay(1);
1470 DAC960_BA_WriteHardwareMailbox(ControllerBaseAddress, CommandMailboxDMA);
1471 DAC960_BA_HardwareMailboxNewCommand(ControllerBaseAddress);
1472 while (!DAC960_BA_HardwareMailboxStatusAvailableP(ControllerBaseAddress))
1473 udelay(1);
1474 CommandStatus = DAC960_BA_ReadCommandStatus(ControllerBaseAddress);
1475 DAC960_BA_AcknowledgeHardwareMailboxInterrupt(ControllerBaseAddress);
1476 DAC960_BA_AcknowledgeHardwareMailboxStatus(ControllerBaseAddress);
1477 break;
1478 case DAC960_LP_Controller:
1479 while (DAC960_LP_HardwareMailboxFullP(ControllerBaseAddress))
1480 udelay(1);
1481 DAC960_LP_WriteHardwareMailbox(ControllerBaseAddress, CommandMailboxDMA);
1482 DAC960_LP_HardwareMailboxNewCommand(ControllerBaseAddress);
1483 while (!DAC960_LP_HardwareMailboxStatusAvailableP(ControllerBaseAddress))
1484 udelay(1);
1485 CommandStatus = DAC960_LP_ReadCommandStatus(ControllerBaseAddress);
1486 DAC960_LP_AcknowledgeHardwareMailboxInterrupt(ControllerBaseAddress);
1487 DAC960_LP_AcknowledgeHardwareMailboxStatus(ControllerBaseAddress);
1488 break;
1489 default:
1490 DAC960_Failure(Controller, "Unknown Controller Type\n");
1491 CommandStatus = DAC960_V2_AbormalCompletion;
1492 break;
1493 }
1494 pci_free_consistent(PCI_Device, sizeof(DAC960_V2_CommandMailbox_T),
1495 CommandMailbox, CommandMailboxDMA);
1496 return (CommandStatus == DAC960_V2_NormalCompletion);
1497}
1498
1499
1500/*
1501 DAC960_V1_ReadControllerConfiguration reads the Configuration Information
1502 from DAC960 V1 Firmware Controllers and initializes the Controller structure.
1503*/
1504
1505static boolean DAC960_V1_ReadControllerConfiguration(DAC960_Controller_T
1506 *Controller)
1507{
1508 DAC960_V1_Enquiry2_T *Enquiry2;
1509 dma_addr_t Enquiry2DMA;
1510 DAC960_V1_Config2_T *Config2;
1511 dma_addr_t Config2DMA;
1512 int LogicalDriveNumber, Channel, TargetID;
1513 struct dma_loaf local_dma;
1514
1515 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
1516 sizeof(DAC960_V1_Enquiry2_T) + sizeof(DAC960_V1_Config2_T)))
1517 return DAC960_Failure(Controller, "LOGICAL DEVICE ALLOCATION");
1518
1519 Enquiry2 = slice_dma_loaf(&local_dma, sizeof(DAC960_V1_Enquiry2_T), &Enquiry2DMA);
1520 Config2 = slice_dma_loaf(&local_dma, sizeof(DAC960_V1_Config2_T), &Config2DMA);
1521
1522 if (!DAC960_V1_ExecuteType3(Controller, DAC960_V1_Enquiry,
1523 Controller->V1.NewEnquiryDMA)) {
1524 free_dma_loaf(Controller->PCIDevice, &local_dma);
1525 return DAC960_Failure(Controller, "ENQUIRY");
1526 }
1527 memcpy(&Controller->V1.Enquiry, Controller->V1.NewEnquiry,
1528 sizeof(DAC960_V1_Enquiry_T));
1529
1530 if (!DAC960_V1_ExecuteType3(Controller, DAC960_V1_Enquiry2, Enquiry2DMA)) {
1531 free_dma_loaf(Controller->PCIDevice, &local_dma);
1532 return DAC960_Failure(Controller, "ENQUIRY2");
1533 }
1534
1535 if (!DAC960_V1_ExecuteType3(Controller, DAC960_V1_ReadConfig2, Config2DMA)) {
1536 free_dma_loaf(Controller->PCIDevice, &local_dma);
1537 return DAC960_Failure(Controller, "READ CONFIG2");
1538 }
1539
1540 if (!DAC960_V1_ExecuteType3(Controller, DAC960_V1_GetLogicalDriveInformation,
1541 Controller->V1.NewLogicalDriveInformationDMA)) {
1542 free_dma_loaf(Controller->PCIDevice, &local_dma);
1543 return DAC960_Failure(Controller, "GET LOGICAL DRIVE INFORMATION");
1544 }
1545 memcpy(&Controller->V1.LogicalDriveInformation,
1546 Controller->V1.NewLogicalDriveInformation,
1547 sizeof(DAC960_V1_LogicalDriveInformationArray_T));
1548
1549 for (Channel = 0; Channel < Enquiry2->ActualChannels; Channel++)
1550 for (TargetID = 0; TargetID < Enquiry2->MaxTargets; TargetID++) {
1551 if (!DAC960_V1_ExecuteType3D(Controller, DAC960_V1_GetDeviceState,
1552 Channel, TargetID,
1553 Controller->V1.NewDeviceStateDMA)) {
1554 free_dma_loaf(Controller->PCIDevice, &local_dma);
1555 return DAC960_Failure(Controller, "GET DEVICE STATE");
1556 }
1557 memcpy(&Controller->V1.DeviceState[Channel][TargetID],
1558 Controller->V1.NewDeviceState, sizeof(DAC960_V1_DeviceState_T));
1559 }
1560 /*
1561 Initialize the Controller Model Name and Full Model Name fields.
1562 */
1563 switch (Enquiry2->HardwareID.SubModel)
1564 {
1565 case DAC960_V1_P_PD_PU:
1566 if (Enquiry2->SCSICapability.BusSpeed == DAC960_V1_Ultra)
1567 strcpy(Controller->ModelName, "DAC960PU");
1568 else strcpy(Controller->ModelName, "DAC960PD");
1569 break;
1570 case DAC960_V1_PL:
1571 strcpy(Controller->ModelName, "DAC960PL");
1572 break;
1573 case DAC960_V1_PG:
1574 strcpy(Controller->ModelName, "DAC960PG");
1575 break;
1576 case DAC960_V1_PJ:
1577 strcpy(Controller->ModelName, "DAC960PJ");
1578 break;
1579 case DAC960_V1_PR:
1580 strcpy(Controller->ModelName, "DAC960PR");
1581 break;
1582 case DAC960_V1_PT:
1583 strcpy(Controller->ModelName, "DAC960PT");
1584 break;
1585 case DAC960_V1_PTL0:
1586 strcpy(Controller->ModelName, "DAC960PTL0");
1587 break;
1588 case DAC960_V1_PRL:
1589 strcpy(Controller->ModelName, "DAC960PRL");
1590 break;
1591 case DAC960_V1_PTL1:
1592 strcpy(Controller->ModelName, "DAC960PTL1");
1593 break;
1594 case DAC960_V1_1164P:
1595 strcpy(Controller->ModelName, "DAC1164P");
1596 break;
1597 default:
1598 free_dma_loaf(Controller->PCIDevice, &local_dma);
1599 return DAC960_Failure(Controller, "MODEL VERIFICATION");
1600 }
1601 strcpy(Controller->FullModelName, "Mylex ");
1602 strcat(Controller->FullModelName, Controller->ModelName);
1603 /*
1604 Initialize the Controller Firmware Version field and verify that it
1605 is a supported firmware version. The supported firmware versions are:
1606
1607 DAC1164P 5.06 and above
1608 DAC960PTL/PRL/PJ/PG 4.06 and above
1609 DAC960PU/PD/PL 3.51 and above
1610 DAC960PU/PD/PL/P 2.73 and above
1611 */
1612#if defined(CONFIG_ALPHA)
1613 /*
1614 DEC Alpha machines were often equipped with DAC960 cards that were
1615 OEMed from Mylex, and had their own custom firmware. Version 2.70,
1616 the last custom FW revision to be released by DEC for these older
1617 controllers, appears to work quite well with this driver.
1618
1619 Cards tested successfully were several versions each of the PD and
1620 PU, called by DEC the KZPSC and KZPAC, respectively, and having
1621 the Manufacturer Numbers (from Mylex), usually on a sticker on the
1622 back of the board, of:
1623
1624 KZPSC: D040347 (1-channel) or D040348 (2-channel) or D040349 (3-channel)
1625 KZPAC: D040395 (1-channel) or D040396 (2-channel) or D040397 (3-channel)
1626 */
1627# define FIRMWARE_27X "2.70"
1628#else
1629# define FIRMWARE_27X "2.73"
1630#endif
1631
1632 if (Enquiry2->FirmwareID.MajorVersion == 0)
1633 {
1634 Enquiry2->FirmwareID.MajorVersion =
1635 Controller->V1.Enquiry.MajorFirmwareVersion;
1636 Enquiry2->FirmwareID.MinorVersion =
1637 Controller->V1.Enquiry.MinorFirmwareVersion;
1638 Enquiry2->FirmwareID.FirmwareType = '0';
1639 Enquiry2->FirmwareID.TurnID = 0;
1640 }
1641 sprintf(Controller->FirmwareVersion, "%d.%02d-%c-%02d",
1642 Enquiry2->FirmwareID.MajorVersion, Enquiry2->FirmwareID.MinorVersion,
1643 Enquiry2->FirmwareID.FirmwareType, Enquiry2->FirmwareID.TurnID);
1644 if (!((Controller->FirmwareVersion[0] == '5' &&
1645 strcmp(Controller->FirmwareVersion, "5.06") >= 0) ||
1646 (Controller->FirmwareVersion[0] == '4' &&
1647 strcmp(Controller->FirmwareVersion, "4.06") >= 0) ||
1648 (Controller->FirmwareVersion[0] == '3' &&
1649 strcmp(Controller->FirmwareVersion, "3.51") >= 0) ||
1650 (Controller->FirmwareVersion[0] == '2' &&
1651 strcmp(Controller->FirmwareVersion, FIRMWARE_27X) >= 0)))
1652 {
1653 DAC960_Failure(Controller, "FIRMWARE VERSION VERIFICATION");
1654 DAC960_Error("Firmware Version = '%s'\n", Controller,
1655 Controller->FirmwareVersion);
1656 free_dma_loaf(Controller->PCIDevice, &local_dma);
1657 return false;
1658 }
1659 /*
1660 Initialize the Controller Channels, Targets, Memory Size, and SAF-TE
1661 Enclosure Management Enabled fields.
1662 */
1663 Controller->Channels = Enquiry2->ActualChannels;
1664 Controller->Targets = Enquiry2->MaxTargets;
1665 Controller->MemorySize = Enquiry2->MemorySize >> 20;
1666 Controller->V1.SAFTE_EnclosureManagementEnabled =
1667 (Enquiry2->FaultManagementType == DAC960_V1_SAFTE);
1668 /*
1669 Initialize the Controller Queue Depth, Driver Queue Depth, Logical Drive
1670 Count, Maximum Blocks per Command, Controller Scatter/Gather Limit, and
1671 Driver Scatter/Gather Limit. The Driver Queue Depth must be at most one
1672 less than the Controller Queue Depth to allow for an automatic drive
1673 rebuild operation.
1674 */
1675 Controller->ControllerQueueDepth = Controller->V1.Enquiry.MaxCommands;
1676 Controller->DriverQueueDepth = Controller->ControllerQueueDepth - 1;
1677 if (Controller->DriverQueueDepth > DAC960_MaxDriverQueueDepth)
1678 Controller->DriverQueueDepth = DAC960_MaxDriverQueueDepth;
1679 Controller->LogicalDriveCount =
1680 Controller->V1.Enquiry.NumberOfLogicalDrives;
1681 Controller->MaxBlocksPerCommand = Enquiry2->MaxBlocksPerCommand;
1682 Controller->ControllerScatterGatherLimit = Enquiry2->MaxScatterGatherEntries;
1683 Controller->DriverScatterGatherLimit =
1684 Controller->ControllerScatterGatherLimit;
1685 if (Controller->DriverScatterGatherLimit > DAC960_V1_ScatterGatherLimit)
1686 Controller->DriverScatterGatherLimit = DAC960_V1_ScatterGatherLimit;
1687 /*
1688 Initialize the Stripe Size, Segment Size, and Geometry Translation.
1689 */
1690 Controller->V1.StripeSize = Config2->BlocksPerStripe * Config2->BlockFactor
1691 >> (10 - DAC960_BlockSizeBits);
1692 Controller->V1.SegmentSize = Config2->BlocksPerCacheLine * Config2->BlockFactor
1693 >> (10 - DAC960_BlockSizeBits);
1694 switch (Config2->DriveGeometry)
1695 {
1696 case DAC960_V1_Geometry_128_32:
1697 Controller->V1.GeometryTranslationHeads = 128;
1698 Controller->V1.GeometryTranslationSectors = 32;
1699 break;
1700 case DAC960_V1_Geometry_255_63:
1701 Controller->V1.GeometryTranslationHeads = 255;
1702 Controller->V1.GeometryTranslationSectors = 63;
1703 break;
1704 default:
1705 free_dma_loaf(Controller->PCIDevice, &local_dma);
1706 return DAC960_Failure(Controller, "CONFIG2 DRIVE GEOMETRY");
1707 }
1708 /*
1709 Initialize the Background Initialization Status.
1710 */
1711 if ((Controller->FirmwareVersion[0] == '4' &&
1712 strcmp(Controller->FirmwareVersion, "4.08") >= 0) ||
1713 (Controller->FirmwareVersion[0] == '5' &&
1714 strcmp(Controller->FirmwareVersion, "5.08") >= 0))
1715 {
1716 Controller->V1.BackgroundInitializationStatusSupported = true;
1717 DAC960_V1_ExecuteType3B(Controller,
1718 DAC960_V1_BackgroundInitializationControl, 0x20,
1719 Controller->
1720 V1.BackgroundInitializationStatusDMA);
1721 memcpy(&Controller->V1.LastBackgroundInitializationStatus,
1722 Controller->V1.BackgroundInitializationStatus,
1723 sizeof(DAC960_V1_BackgroundInitializationStatus_T));
1724 }
1725 /*
1726 Initialize the Logical Drive Initially Accessible flag.
1727 */
1728 for (LogicalDriveNumber = 0;
1729 LogicalDriveNumber < Controller->LogicalDriveCount;
1730 LogicalDriveNumber++)
1731 if (Controller->V1.LogicalDriveInformation
1732 [LogicalDriveNumber].LogicalDriveState !=
1733 DAC960_V1_LogicalDrive_Offline)
1734 Controller->LogicalDriveInitiallyAccessible[LogicalDriveNumber] = true;
1735 Controller->V1.LastRebuildStatus = DAC960_V1_NoRebuildOrCheckInProgress;
1736 free_dma_loaf(Controller->PCIDevice, &local_dma);
1737 return true;
1738}
1739
1740
1741/*
1742 DAC960_V2_ReadControllerConfiguration reads the Configuration Information
1743 from DAC960 V2 Firmware Controllers and initializes the Controller structure.
1744*/
1745
1746static boolean DAC960_V2_ReadControllerConfiguration(DAC960_Controller_T
1747 *Controller)
1748{
1749 DAC960_V2_ControllerInfo_T *ControllerInfo =
1750 &Controller->V2.ControllerInformation;
1751 unsigned short LogicalDeviceNumber = 0;
1752 int ModelNameLength;
1753
1754 /* Get data into dma-able area, then copy into permanant location */
1755 if (!DAC960_V2_NewControllerInfo(Controller))
1756 return DAC960_Failure(Controller, "GET CONTROLLER INFO");
1757 memcpy(ControllerInfo, Controller->V2.NewControllerInformation,
1758 sizeof(DAC960_V2_ControllerInfo_T));
1759
1760
1761 if (!DAC960_V2_GeneralInfo(Controller))
1762 return DAC960_Failure(Controller, "GET HEALTH STATUS");
1763
1764 /*
1765 Initialize the Controller Model Name and Full Model Name fields.
1766 */
1767 ModelNameLength = sizeof(ControllerInfo->ControllerName);
1768 if (ModelNameLength > sizeof(Controller->ModelName)-1)
1769 ModelNameLength = sizeof(Controller->ModelName)-1;
1770 memcpy(Controller->ModelName, ControllerInfo->ControllerName,
1771 ModelNameLength);
1772 ModelNameLength--;
1773 while (Controller->ModelName[ModelNameLength] == ' ' ||
1774 Controller->ModelName[ModelNameLength] == '\0')
1775 ModelNameLength--;
1776 Controller->ModelName[++ModelNameLength] = '\0';
1777 strcpy(Controller->FullModelName, "Mylex ");
1778 strcat(Controller->FullModelName, Controller->ModelName);
1779 /*
1780 Initialize the Controller Firmware Version field.
1781 */
1782 sprintf(Controller->FirmwareVersion, "%d.%02d-%02d",
1783 ControllerInfo->FirmwareMajorVersion,
1784 ControllerInfo->FirmwareMinorVersion,
1785 ControllerInfo->FirmwareTurnNumber);
1786 if (ControllerInfo->FirmwareMajorVersion == 6 &&
1787 ControllerInfo->FirmwareMinorVersion == 0 &&
1788 ControllerInfo->FirmwareTurnNumber < 1)
1789 {
1790 DAC960_Info("FIRMWARE VERSION %s DOES NOT PROVIDE THE CONTROLLER\n",
1791 Controller, Controller->FirmwareVersion);
1792 DAC960_Info("STATUS MONITORING FUNCTIONALITY NEEDED BY THIS DRIVER.\n",
1793 Controller);
1794 DAC960_Info("PLEASE UPGRADE TO VERSION 6.00-01 OR ABOVE.\n",
1795 Controller);
1796 }
1797 /*
1798 Initialize the Controller Channels, Targets, and Memory Size.
1799 */
1800 Controller->Channels = ControllerInfo->NumberOfPhysicalChannelsPresent;
1801 Controller->Targets =
1802 ControllerInfo->MaximumTargetsPerChannel
1803 [ControllerInfo->NumberOfPhysicalChannelsPresent-1];
1804 Controller->MemorySize = ControllerInfo->MemorySizeMB;
1805 /*
1806 Initialize the Controller Queue Depth, Driver Queue Depth, Logical Drive
1807 Count, Maximum Blocks per Command, Controller Scatter/Gather Limit, and
1808 Driver Scatter/Gather Limit. The Driver Queue Depth must be at most one
1809 less than the Controller Queue Depth to allow for an automatic drive
1810 rebuild operation.
1811 */
1812 Controller->ControllerQueueDepth = ControllerInfo->MaximumParallelCommands;
1813 Controller->DriverQueueDepth = Controller->ControllerQueueDepth - 1;
1814 if (Controller->DriverQueueDepth > DAC960_MaxDriverQueueDepth)
1815 Controller->DriverQueueDepth = DAC960_MaxDriverQueueDepth;
1816 Controller->LogicalDriveCount = ControllerInfo->LogicalDevicesPresent;
1817 Controller->MaxBlocksPerCommand =
1818 ControllerInfo->MaximumDataTransferSizeInBlocks;
1819 Controller->ControllerScatterGatherLimit =
1820 ControllerInfo->MaximumScatterGatherEntries;
1821 Controller->DriverScatterGatherLimit =
1822 Controller->ControllerScatterGatherLimit;
1823 if (Controller->DriverScatterGatherLimit > DAC960_V2_ScatterGatherLimit)
1824 Controller->DriverScatterGatherLimit = DAC960_V2_ScatterGatherLimit;
1825 /*
1826 Initialize the Logical Device Information.
1827 */
1828 while (true)
1829 {
1830 DAC960_V2_LogicalDeviceInfo_T *NewLogicalDeviceInfo =
1831 Controller->V2.NewLogicalDeviceInformation;
1832 DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo;
1833 DAC960_V2_PhysicalDevice_T PhysicalDevice;
1834
1835 if (!DAC960_V2_NewLogicalDeviceInfo(Controller, LogicalDeviceNumber))
1836 break;
1837 LogicalDeviceNumber = NewLogicalDeviceInfo->LogicalDeviceNumber;
1838 if (LogicalDeviceNumber >= DAC960_MaxLogicalDrives) {
1839 DAC960_Error("DAC960: Logical Drive Number %d not supported\n",
1840 Controller, LogicalDeviceNumber);
1841 break;
1842 }
1843 if (NewLogicalDeviceInfo->DeviceBlockSizeInBytes != DAC960_BlockSize) {
1844 DAC960_Error("DAC960: Logical Drive Block Size %d not supported\n",
1845 Controller, NewLogicalDeviceInfo->DeviceBlockSizeInBytes);
1846 LogicalDeviceNumber++;
1847 continue;
1848 }
1849 PhysicalDevice.Controller = 0;
1850 PhysicalDevice.Channel = NewLogicalDeviceInfo->Channel;
1851 PhysicalDevice.TargetID = NewLogicalDeviceInfo->TargetID;
1852 PhysicalDevice.LogicalUnit = NewLogicalDeviceInfo->LogicalUnit;
1853 Controller->V2.LogicalDriveToVirtualDevice[LogicalDeviceNumber] =
1854 PhysicalDevice;
1855 if (NewLogicalDeviceInfo->LogicalDeviceState !=
1856 DAC960_V2_LogicalDevice_Offline)
1857 Controller->LogicalDriveInitiallyAccessible[LogicalDeviceNumber] = true;
1858 LogicalDeviceInfo = (DAC960_V2_LogicalDeviceInfo_T *)
1859 kmalloc(sizeof(DAC960_V2_LogicalDeviceInfo_T), GFP_ATOMIC);
1860 if (LogicalDeviceInfo == NULL)
1861 return DAC960_Failure(Controller, "LOGICAL DEVICE ALLOCATION");
1862 Controller->V2.LogicalDeviceInformation[LogicalDeviceNumber] =
1863 LogicalDeviceInfo;
1864 memcpy(LogicalDeviceInfo, NewLogicalDeviceInfo,
1865 sizeof(DAC960_V2_LogicalDeviceInfo_T));
1866 LogicalDeviceNumber++;
1867 }
1868 return true;
1869}
1870
1871
1872/*
1873 DAC960_ReportControllerConfiguration reports the Configuration Information
1874 for Controller.
1875*/
1876
1877static boolean DAC960_ReportControllerConfiguration(DAC960_Controller_T
1878 *Controller)
1879{
1880 DAC960_Info("Configuring Mylex %s PCI RAID Controller\n",
1881 Controller, Controller->ModelName);
1882 DAC960_Info(" Firmware Version: %s, Channels: %d, Memory Size: %dMB\n",
1883 Controller, Controller->FirmwareVersion,
1884 Controller->Channels, Controller->MemorySize);
1885 DAC960_Info(" PCI Bus: %d, Device: %d, Function: %d, I/O Address: ",
1886 Controller, Controller->Bus,
1887 Controller->Device, Controller->Function);
1888 if (Controller->IO_Address == 0)
1889 DAC960_Info("Unassigned\n", Controller);
1890 else DAC960_Info("0x%X\n", Controller, Controller->IO_Address);
1891 DAC960_Info(" PCI Address: 0x%X mapped at 0x%lX, IRQ Channel: %d\n",
1892 Controller, Controller->PCI_Address,
1893 (unsigned long) Controller->BaseAddress,
1894 Controller->IRQ_Channel);
1895 DAC960_Info(" Controller Queue Depth: %d, "
1896 "Maximum Blocks per Command: %d\n",
1897 Controller, Controller->ControllerQueueDepth,
1898 Controller->MaxBlocksPerCommand);
1899 DAC960_Info(" Driver Queue Depth: %d, "
1900 "Scatter/Gather Limit: %d of %d Segments\n",
1901 Controller, Controller->DriverQueueDepth,
1902 Controller->DriverScatterGatherLimit,
1903 Controller->ControllerScatterGatherLimit);
1904 if (Controller->FirmwareType == DAC960_V1_Controller)
1905 {
1906 DAC960_Info(" Stripe Size: %dKB, Segment Size: %dKB, "
1907 "BIOS Geometry: %d/%d\n", Controller,
1908 Controller->V1.StripeSize,
1909 Controller->V1.SegmentSize,
1910 Controller->V1.GeometryTranslationHeads,
1911 Controller->V1.GeometryTranslationSectors);
1912 if (Controller->V1.SAFTE_EnclosureManagementEnabled)
1913 DAC960_Info(" SAF-TE Enclosure Management Enabled\n", Controller);
1914 }
1915 return true;
1916}
1917
1918
1919/*
1920 DAC960_V1_ReadDeviceConfiguration reads the Device Configuration Information
1921 for DAC960 V1 Firmware Controllers by requesting the SCSI Inquiry and SCSI
1922 Inquiry Unit Serial Number information for each device connected to
1923 Controller.
1924*/
1925
1926static boolean DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
1927 *Controller)
1928{
1929 struct dma_loaf local_dma;
1930
1931 dma_addr_t DCDBs_dma[DAC960_V1_MaxChannels];
1932 DAC960_V1_DCDB_T *DCDBs_cpu[DAC960_V1_MaxChannels];
1933
1934 dma_addr_t SCSI_Inquiry_dma[DAC960_V1_MaxChannels];
1935 DAC960_SCSI_Inquiry_T *SCSI_Inquiry_cpu[DAC960_V1_MaxChannels];
1936
1937 dma_addr_t SCSI_NewInquiryUnitSerialNumberDMA[DAC960_V1_MaxChannels];
1938 DAC960_SCSI_Inquiry_UnitSerialNumber_T *SCSI_NewInquiryUnitSerialNumberCPU[DAC960_V1_MaxChannels];
1939
1940 struct completion Completions[DAC960_V1_MaxChannels];
1941 unsigned long flags;
1942 int Channel, TargetID;
1943
1944 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
1945 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
1946 sizeof(DAC960_SCSI_Inquiry_T) +
1947 sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T))))
1948 return DAC960_Failure(Controller,
1949 "DMA ALLOCATION FAILED IN ReadDeviceConfiguration");
1950
1951 for (Channel = 0; Channel < Controller->Channels; Channel++) {
1952 DCDBs_cpu[Channel] = slice_dma_loaf(&local_dma,
1953 sizeof(DAC960_V1_DCDB_T), DCDBs_dma + Channel);
1954 SCSI_Inquiry_cpu[Channel] = slice_dma_loaf(&local_dma,
1955 sizeof(DAC960_SCSI_Inquiry_T),
1956 SCSI_Inquiry_dma + Channel);
1957 SCSI_NewInquiryUnitSerialNumberCPU[Channel] = slice_dma_loaf(&local_dma,
1958 sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T),
1959 SCSI_NewInquiryUnitSerialNumberDMA + Channel);
1960 }
1961
1962 for (TargetID = 0; TargetID < Controller->Targets; TargetID++)
1963 {
1964 /*
1965 * For each channel, submit a probe for a device on that channel.
1966 * The timeout interval for a device that is present is 10 seconds.
1967 * With this approach, the timeout periods can elapse in parallel
1968 * on each channel.
1969 */
1970 for (Channel = 0; Channel < Controller->Channels; Channel++)
1971 {
1972 dma_addr_t NewInquiryStandardDataDMA = SCSI_Inquiry_dma[Channel];
1973 DAC960_V1_DCDB_T *DCDB = DCDBs_cpu[Channel];
1974 dma_addr_t DCDB_dma = DCDBs_dma[Channel];
1975 DAC960_Command_T *Command = Controller->Commands[Channel];
1976 struct completion *Completion = &Completions[Channel];
1977
1978 init_completion(Completion);
1979 DAC960_V1_ClearCommand(Command);
1980 Command->CommandType = DAC960_ImmediateCommand;
1981 Command->Completion = Completion;
1982 Command->V1.CommandMailbox.Type3.CommandOpcode = DAC960_V1_DCDB;
1983 Command->V1.CommandMailbox.Type3.BusAddress = DCDB_dma;
1984 DCDB->Channel = Channel;
1985 DCDB->TargetID = TargetID;
1986 DCDB->Direction = DAC960_V1_DCDB_DataTransferDeviceToSystem;
1987 DCDB->EarlyStatus = false;
1988 DCDB->Timeout = DAC960_V1_DCDB_Timeout_10_seconds;
1989 DCDB->NoAutomaticRequestSense = false;
1990 DCDB->DisconnectPermitted = true;
1991 DCDB->TransferLength = sizeof(DAC960_SCSI_Inquiry_T);
1992 DCDB->BusAddress = NewInquiryStandardDataDMA;
1993 DCDB->CDBLength = 6;
1994 DCDB->TransferLengthHigh4 = 0;
1995 DCDB->SenseLength = sizeof(DCDB->SenseData);
1996 DCDB->CDB[0] = 0x12; /* INQUIRY */
1997 DCDB->CDB[1] = 0; /* EVPD = 0 */
1998 DCDB->CDB[2] = 0; /* Page Code */
1999 DCDB->CDB[3] = 0; /* Reserved */
2000 DCDB->CDB[4] = sizeof(DAC960_SCSI_Inquiry_T);
2001 DCDB->CDB[5] = 0; /* Control */
2002
2003 spin_lock_irqsave(&Controller->queue_lock, flags);
2004 DAC960_QueueCommand(Command);
2005 spin_unlock_irqrestore(&Controller->queue_lock, flags);
2006 }
2007 /*
2008 * Wait for the problems submitted in the previous loop
2009 * to complete. On the probes that are successful,
2010 * get the serial number of the device that was found.
2011 */
2012 for (Channel = 0; Channel < Controller->Channels; Channel++)
2013 {
2014 DAC960_SCSI_Inquiry_T *InquiryStandardData =
2015 &Controller->V1.InquiryStandardData[Channel][TargetID];
2016 DAC960_SCSI_Inquiry_T *NewInquiryStandardData = SCSI_Inquiry_cpu[Channel];
2017 dma_addr_t NewInquiryUnitSerialNumberDMA =
2018 SCSI_NewInquiryUnitSerialNumberDMA[Channel];
2019 DAC960_SCSI_Inquiry_UnitSerialNumber_T *NewInquiryUnitSerialNumber =
2020 SCSI_NewInquiryUnitSerialNumberCPU[Channel];
2021 DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
2022 &Controller->V1.InquiryUnitSerialNumber[Channel][TargetID];
2023 DAC960_Command_T *Command = Controller->Commands[Channel];
2024 DAC960_V1_DCDB_T *DCDB = DCDBs_cpu[Channel];
2025 struct completion *Completion = &Completions[Channel];
2026
2027 wait_for_completion(Completion);
2028
2029 if (Command->V1.CommandStatus != DAC960_V1_NormalCompletion) {
2030 memset(InquiryStandardData, 0, sizeof(DAC960_SCSI_Inquiry_T));
2031 InquiryStandardData->PeripheralDeviceType = 0x1F;
2032 continue;
2033 } else
2034 memcpy(InquiryStandardData, NewInquiryStandardData, sizeof(DAC960_SCSI_Inquiry_T));
2035
2036 /* Preserve Channel and TargetID values from the previous loop */
2037 Command->Completion = Completion;
2038 DCDB->TransferLength = sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
2039 DCDB->BusAddress = NewInquiryUnitSerialNumberDMA;
2040 DCDB->SenseLength = sizeof(DCDB->SenseData);
2041 DCDB->CDB[0] = 0x12; /* INQUIRY */
2042 DCDB->CDB[1] = 1; /* EVPD = 1 */
2043 DCDB->CDB[2] = 0x80; /* Page Code */
2044 DCDB->CDB[3] = 0; /* Reserved */
2045 DCDB->CDB[4] = sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
2046 DCDB->CDB[5] = 0; /* Control */
2047
2048 spin_lock_irqsave(&Controller->queue_lock, flags);
2049 DAC960_QueueCommand(Command);
2050 spin_unlock_irqrestore(&Controller->queue_lock, flags);
2051 wait_for_completion(Completion);
2052
2053 if (Command->V1.CommandStatus != DAC960_V1_NormalCompletion) {
2054 memset(InquiryUnitSerialNumber, 0,
2055 sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
2056 InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F;
2057 } else
2058 memcpy(InquiryUnitSerialNumber, NewInquiryUnitSerialNumber,
2059 sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
2060 }
2061 }
2062 free_dma_loaf(Controller->PCIDevice, &local_dma);
2063 return true;
2064}
2065
2066
2067/*
2068 DAC960_V2_ReadDeviceConfiguration reads the Device Configuration Information
2069 for DAC960 V2 Firmware Controllers by requesting the Physical Device
2070 Information and SCSI Inquiry Unit Serial Number information for each
2071 device connected to Controller.
2072*/
2073
2074static boolean DAC960_V2_ReadDeviceConfiguration(DAC960_Controller_T
2075 *Controller)
2076{
2077 unsigned char Channel = 0, TargetID = 0, LogicalUnit = 0;
2078 unsigned short PhysicalDeviceIndex = 0;
2079
2080 while (true)
2081 {
2082 DAC960_V2_PhysicalDeviceInfo_T *NewPhysicalDeviceInfo =
2083 Controller->V2.NewPhysicalDeviceInformation;
2084 DAC960_V2_PhysicalDeviceInfo_T *PhysicalDeviceInfo;
2085 DAC960_SCSI_Inquiry_UnitSerialNumber_T *NewInquiryUnitSerialNumber =
2086 Controller->V2.NewInquiryUnitSerialNumber;
2087 DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber;
2088
2089 if (!DAC960_V2_NewPhysicalDeviceInfo(Controller, Channel, TargetID, LogicalUnit))
2090 break;
2091
2092 PhysicalDeviceInfo = (DAC960_V2_PhysicalDeviceInfo_T *)
2093 kmalloc(sizeof(DAC960_V2_PhysicalDeviceInfo_T), GFP_ATOMIC);
2094 if (PhysicalDeviceInfo == NULL)
2095 return DAC960_Failure(Controller, "PHYSICAL DEVICE ALLOCATION");
2096 Controller->V2.PhysicalDeviceInformation[PhysicalDeviceIndex] =
2097 PhysicalDeviceInfo;
2098 memcpy(PhysicalDeviceInfo, NewPhysicalDeviceInfo,
2099 sizeof(DAC960_V2_PhysicalDeviceInfo_T));
2100
2101 InquiryUnitSerialNumber = (DAC960_SCSI_Inquiry_UnitSerialNumber_T *)
2102 kmalloc(sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T), GFP_ATOMIC);
2103 if (InquiryUnitSerialNumber == NULL) {
2104 kfree(PhysicalDeviceInfo);
2105 return DAC960_Failure(Controller, "SERIAL NUMBER ALLOCATION");
2106 }
2107 Controller->V2.InquiryUnitSerialNumber[PhysicalDeviceIndex] =
2108 InquiryUnitSerialNumber;
2109
2110 Channel = NewPhysicalDeviceInfo->Channel;
2111 TargetID = NewPhysicalDeviceInfo->TargetID;
2112 LogicalUnit = NewPhysicalDeviceInfo->LogicalUnit;
2113
2114 /*
2115 Some devices do NOT have Unit Serial Numbers.
2116 This command fails for them. But, we still want to
2117 remember those devices are there. Construct a
2118 UnitSerialNumber structure for the failure case.
2119 */
2120 if (!DAC960_V2_NewInquiryUnitSerialNumber(Controller, Channel, TargetID, LogicalUnit)) {
2121 memset(InquiryUnitSerialNumber, 0,
2122 sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
2123 InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F;
2124 } else
2125 memcpy(InquiryUnitSerialNumber, NewInquiryUnitSerialNumber,
2126 sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
2127
2128 PhysicalDeviceIndex++;
2129 LogicalUnit++;
2130 }
2131 return true;
2132}
2133
2134
2135/*
2136 DAC960_SanitizeInquiryData sanitizes the Vendor, Model, Revision, and
2137 Product Serial Number fields of the Inquiry Standard Data and Inquiry
2138 Unit Serial Number structures.
2139*/
2140
2141static void DAC960_SanitizeInquiryData(DAC960_SCSI_Inquiry_T
2142 *InquiryStandardData,
2143 DAC960_SCSI_Inquiry_UnitSerialNumber_T
2144 *InquiryUnitSerialNumber,
2145 unsigned char *Vendor,
2146 unsigned char *Model,
2147 unsigned char *Revision,
2148 unsigned char *SerialNumber)
2149{
2150 int SerialNumberLength, i;
2151 if (InquiryStandardData->PeripheralDeviceType == 0x1F) return;
2152 for (i = 0; i < sizeof(InquiryStandardData->VendorIdentification); i++)
2153 {
2154 unsigned char VendorCharacter =
2155 InquiryStandardData->VendorIdentification[i];
2156 Vendor[i] = (VendorCharacter >= ' ' && VendorCharacter <= '~'
2157 ? VendorCharacter : ' ');
2158 }
2159 Vendor[sizeof(InquiryStandardData->VendorIdentification)] = '\0';
2160 for (i = 0; i < sizeof(InquiryStandardData->ProductIdentification); i++)
2161 {
2162 unsigned char ModelCharacter =
2163 InquiryStandardData->ProductIdentification[i];
2164 Model[i] = (ModelCharacter >= ' ' && ModelCharacter <= '~'
2165 ? ModelCharacter : ' ');
2166 }
2167 Model[sizeof(InquiryStandardData->ProductIdentification)] = '\0';
2168 for (i = 0; i < sizeof(InquiryStandardData->ProductRevisionLevel); i++)
2169 {
2170 unsigned char RevisionCharacter =
2171 InquiryStandardData->ProductRevisionLevel[i];
2172 Revision[i] = (RevisionCharacter >= ' ' && RevisionCharacter <= '~'
2173 ? RevisionCharacter : ' ');
2174 }
2175 Revision[sizeof(InquiryStandardData->ProductRevisionLevel)] = '\0';
2176 if (InquiryUnitSerialNumber->PeripheralDeviceType == 0x1F) return;
2177 SerialNumberLength = InquiryUnitSerialNumber->PageLength;
2178 if (SerialNumberLength >
2179 sizeof(InquiryUnitSerialNumber->ProductSerialNumber))
2180 SerialNumberLength = sizeof(InquiryUnitSerialNumber->ProductSerialNumber);
2181 for (i = 0; i < SerialNumberLength; i++)
2182 {
2183 unsigned char SerialNumberCharacter =
2184 InquiryUnitSerialNumber->ProductSerialNumber[i];
2185 SerialNumber[i] =
2186 (SerialNumberCharacter >= ' ' && SerialNumberCharacter <= '~'
2187 ? SerialNumberCharacter : ' ');
2188 }
2189 SerialNumber[SerialNumberLength] = '\0';
2190}
2191
2192
2193/*
2194 DAC960_V1_ReportDeviceConfiguration reports the Device Configuration
2195 Information for DAC960 V1 Firmware Controllers.
2196*/
2197
2198static boolean DAC960_V1_ReportDeviceConfiguration(DAC960_Controller_T
2199 *Controller)
2200{
2201 int LogicalDriveNumber, Channel, TargetID;
2202 DAC960_Info(" Physical Devices:\n", Controller);
2203 for (Channel = 0; Channel < Controller->Channels; Channel++)
2204 for (TargetID = 0; TargetID < Controller->Targets; TargetID++)
2205 {
2206 DAC960_SCSI_Inquiry_T *InquiryStandardData =
2207 &Controller->V1.InquiryStandardData[Channel][TargetID];
2208 DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
2209 &Controller->V1.InquiryUnitSerialNumber[Channel][TargetID];
2210 DAC960_V1_DeviceState_T *DeviceState =
2211 &Controller->V1.DeviceState[Channel][TargetID];
2212 DAC960_V1_ErrorTableEntry_T *ErrorEntry =
2213 &Controller->V1.ErrorTable.ErrorTableEntries[Channel][TargetID];
2214 char Vendor[1+sizeof(InquiryStandardData->VendorIdentification)];
2215 char Model[1+sizeof(InquiryStandardData->ProductIdentification)];
2216 char Revision[1+sizeof(InquiryStandardData->ProductRevisionLevel)];
2217 char SerialNumber[1+sizeof(InquiryUnitSerialNumber
2218 ->ProductSerialNumber)];
2219 if (InquiryStandardData->PeripheralDeviceType == 0x1F) continue;
2220 DAC960_SanitizeInquiryData(InquiryStandardData, InquiryUnitSerialNumber,
2221 Vendor, Model, Revision, SerialNumber);
2222 DAC960_Info(" %d:%d%s Vendor: %s Model: %s Revision: %s\n",
2223 Controller, Channel, TargetID, (TargetID < 10 ? " " : ""),
2224 Vendor, Model, Revision);
2225 if (InquiryUnitSerialNumber->PeripheralDeviceType != 0x1F)
2226 DAC960_Info(" Serial Number: %s\n", Controller, SerialNumber);
2227 if (DeviceState->Present &&
2228 DeviceState->DeviceType == DAC960_V1_DiskType)
2229 {
2230 if (Controller->V1.DeviceResetCount[Channel][TargetID] > 0)
2231 DAC960_Info(" Disk Status: %s, %u blocks, %d resets\n",
2232 Controller,
2233 (DeviceState->DeviceState == DAC960_V1_Device_Dead
2234 ? "Dead"
2235 : DeviceState->DeviceState
2236 == DAC960_V1_Device_WriteOnly
2237 ? "Write-Only"
2238 : DeviceState->DeviceState
2239 == DAC960_V1_Device_Online
2240 ? "Online" : "Standby"),
2241 DeviceState->DiskSize,
2242 Controller->V1.DeviceResetCount[Channel][TargetID]);
2243 else
2244 DAC960_Info(" Disk Status: %s, %u blocks\n", Controller,
2245 (DeviceState->DeviceState == DAC960_V1_Device_Dead
2246 ? "Dead"
2247 : DeviceState->DeviceState
2248 == DAC960_V1_Device_WriteOnly
2249 ? "Write-Only"
2250 : DeviceState->DeviceState
2251 == DAC960_V1_Device_Online
2252 ? "Online" : "Standby"),
2253 DeviceState->DiskSize);
2254 }
2255 if (ErrorEntry->ParityErrorCount > 0 ||
2256 ErrorEntry->SoftErrorCount > 0 ||
2257 ErrorEntry->HardErrorCount > 0 ||
2258 ErrorEntry->MiscErrorCount > 0)
2259 DAC960_Info(" Errors - Parity: %d, Soft: %d, "
2260 "Hard: %d, Misc: %d\n", Controller,
2261 ErrorEntry->ParityErrorCount,
2262 ErrorEntry->SoftErrorCount,
2263 ErrorEntry->HardErrorCount,
2264 ErrorEntry->MiscErrorCount);
2265 }
2266 DAC960_Info(" Logical Drives:\n", Controller);
2267 for (LogicalDriveNumber = 0;
2268 LogicalDriveNumber < Controller->LogicalDriveCount;
2269 LogicalDriveNumber++)
2270 {
2271 DAC960_V1_LogicalDriveInformation_T *LogicalDriveInformation =
2272 &Controller->V1.LogicalDriveInformation[LogicalDriveNumber];
2273 DAC960_Info(" /dev/rd/c%dd%d: RAID-%d, %s, %u blocks, %s\n",
2274 Controller, Controller->ControllerNumber, LogicalDriveNumber,
2275 LogicalDriveInformation->RAIDLevel,
2276 (LogicalDriveInformation->LogicalDriveState
2277 == DAC960_V1_LogicalDrive_Online
2278 ? "Online"
2279 : LogicalDriveInformation->LogicalDriveState
2280 == DAC960_V1_LogicalDrive_Critical
2281 ? "Critical" : "Offline"),
2282 LogicalDriveInformation->LogicalDriveSize,
2283 (LogicalDriveInformation->WriteBack
2284 ? "Write Back" : "Write Thru"));
2285 }
2286 return true;
2287}
2288
2289
2290/*
2291 DAC960_V2_ReportDeviceConfiguration reports the Device Configuration
2292 Information for DAC960 V2 Firmware Controllers.
2293*/
2294
2295static boolean DAC960_V2_ReportDeviceConfiguration(DAC960_Controller_T
2296 *Controller)
2297{
2298 int PhysicalDeviceIndex, LogicalDriveNumber;
2299 DAC960_Info(" Physical Devices:\n", Controller);
2300 for (PhysicalDeviceIndex = 0;
2301 PhysicalDeviceIndex < DAC960_V2_MaxPhysicalDevices;
2302 PhysicalDeviceIndex++)
2303 {
2304 DAC960_V2_PhysicalDeviceInfo_T *PhysicalDeviceInfo =
2305 Controller->V2.PhysicalDeviceInformation[PhysicalDeviceIndex];
2306 DAC960_SCSI_Inquiry_T *InquiryStandardData =
2307 (DAC960_SCSI_Inquiry_T *) &PhysicalDeviceInfo->SCSI_InquiryData;
2308 DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
2309 Controller->V2.InquiryUnitSerialNumber[PhysicalDeviceIndex];
2310 char Vendor[1+sizeof(InquiryStandardData->VendorIdentification)];
2311 char Model[1+sizeof(InquiryStandardData->ProductIdentification)];
2312 char Revision[1+sizeof(InquiryStandardData->ProductRevisionLevel)];
2313 char SerialNumber[1+sizeof(InquiryUnitSerialNumber->ProductSerialNumber)];
2314 if (PhysicalDeviceInfo == NULL) break;
2315 DAC960_SanitizeInquiryData(InquiryStandardData, InquiryUnitSerialNumber,
2316 Vendor, Model, Revision, SerialNumber);
2317 DAC960_Info(" %d:%d%s Vendor: %s Model: %s Revision: %s\n",
2318 Controller,
2319 PhysicalDeviceInfo->Channel,
2320 PhysicalDeviceInfo->TargetID,
2321 (PhysicalDeviceInfo->TargetID < 10 ? " " : ""),
2322 Vendor, Model, Revision);
2323 if (PhysicalDeviceInfo->NegotiatedSynchronousMegaTransfers == 0)
2324 DAC960_Info(" %sAsynchronous\n", Controller,
2325 (PhysicalDeviceInfo->NegotiatedDataWidthBits == 16
2326 ? "Wide " :""));
2327 else
2328 DAC960_Info(" %sSynchronous at %d MB/sec\n", Controller,
2329 (PhysicalDeviceInfo->NegotiatedDataWidthBits == 16
2330 ? "Wide " :""),
2331 (PhysicalDeviceInfo->NegotiatedSynchronousMegaTransfers
2332 * PhysicalDeviceInfo->NegotiatedDataWidthBits/8));
2333 if (InquiryUnitSerialNumber->PeripheralDeviceType != 0x1F)
2334 DAC960_Info(" Serial Number: %s\n", Controller, SerialNumber);
2335 if (PhysicalDeviceInfo->PhysicalDeviceState ==
2336 DAC960_V2_Device_Unconfigured)
2337 continue;
2338 DAC960_Info(" Disk Status: %s, %u blocks\n", Controller,
2339 (PhysicalDeviceInfo->PhysicalDeviceState
2340 == DAC960_V2_Device_Online
2341 ? "Online"
2342 : PhysicalDeviceInfo->PhysicalDeviceState
2343 == DAC960_V2_Device_Rebuild
2344 ? "Rebuild"
2345 : PhysicalDeviceInfo->PhysicalDeviceState
2346 == DAC960_V2_Device_Missing
2347 ? "Missing"
2348 : PhysicalDeviceInfo->PhysicalDeviceState
2349 == DAC960_V2_Device_Critical
2350 ? "Critical"
2351 : PhysicalDeviceInfo->PhysicalDeviceState
2352 == DAC960_V2_Device_Dead
2353 ? "Dead"
2354 : PhysicalDeviceInfo->PhysicalDeviceState
2355 == DAC960_V2_Device_SuspectedDead
2356 ? "Suspected-Dead"
2357 : PhysicalDeviceInfo->PhysicalDeviceState
2358 == DAC960_V2_Device_CommandedOffline
2359 ? "Commanded-Offline"
2360 : PhysicalDeviceInfo->PhysicalDeviceState
2361 == DAC960_V2_Device_Standby
2362 ? "Standby" : "Unknown"),
2363 PhysicalDeviceInfo->ConfigurableDeviceSize);
2364 if (PhysicalDeviceInfo->ParityErrors == 0 &&
2365 PhysicalDeviceInfo->SoftErrors == 0 &&
2366 PhysicalDeviceInfo->HardErrors == 0 &&
2367 PhysicalDeviceInfo->MiscellaneousErrors == 0 &&
2368 PhysicalDeviceInfo->CommandTimeouts == 0 &&
2369 PhysicalDeviceInfo->Retries == 0 &&
2370 PhysicalDeviceInfo->Aborts == 0 &&
2371 PhysicalDeviceInfo->PredictedFailuresDetected == 0)
2372 continue;
2373 DAC960_Info(" Errors - Parity: %d, Soft: %d, "
2374 "Hard: %d, Misc: %d\n", Controller,
2375 PhysicalDeviceInfo->ParityErrors,
2376 PhysicalDeviceInfo->SoftErrors,
2377 PhysicalDeviceInfo->HardErrors,
2378 PhysicalDeviceInfo->MiscellaneousErrors);
2379 DAC960_Info(" Timeouts: %d, Retries: %d, "
2380 "Aborts: %d, Predicted: %d\n", Controller,
2381 PhysicalDeviceInfo->CommandTimeouts,
2382 PhysicalDeviceInfo->Retries,
2383 PhysicalDeviceInfo->Aborts,
2384 PhysicalDeviceInfo->PredictedFailuresDetected);
2385 }
2386 DAC960_Info(" Logical Drives:\n", Controller);
2387 for (LogicalDriveNumber = 0;
2388 LogicalDriveNumber < DAC960_MaxLogicalDrives;
2389 LogicalDriveNumber++)
2390 {
2391 DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo =
2392 Controller->V2.LogicalDeviceInformation[LogicalDriveNumber];
2393 unsigned char *ReadCacheStatus[] = { "Read Cache Disabled",
2394 "Read Cache Enabled",
2395 "Read Ahead Enabled",
2396 "Intelligent Read Ahead Enabled",
2397 "-", "-", "-", "-" };
2398 unsigned char *WriteCacheStatus[] = { "Write Cache Disabled",
2399 "Logical Device Read Only",
2400 "Write Cache Enabled",
2401 "Intelligent Write Cache Enabled",
2402 "-", "-", "-", "-" };
2403 unsigned char *GeometryTranslation;
2404 if (LogicalDeviceInfo == NULL) continue;
2405 switch (LogicalDeviceInfo->DriveGeometry)
2406 {
2407 case DAC960_V2_Geometry_128_32:
2408 GeometryTranslation = "128/32";
2409 break;
2410 case DAC960_V2_Geometry_255_63:
2411 GeometryTranslation = "255/63";
2412 break;
2413 default:
2414 GeometryTranslation = "Invalid";
2415 DAC960_Error("Illegal Logical Device Geometry %d\n",
2416 Controller, LogicalDeviceInfo->DriveGeometry);
2417 break;
2418 }
2419 DAC960_Info(" /dev/rd/c%dd%d: RAID-%d, %s, %u blocks\n",
2420 Controller, Controller->ControllerNumber, LogicalDriveNumber,
2421 LogicalDeviceInfo->RAIDLevel,
2422 (LogicalDeviceInfo->LogicalDeviceState
2423 == DAC960_V2_LogicalDevice_Online
2424 ? "Online"
2425 : LogicalDeviceInfo->LogicalDeviceState
2426 == DAC960_V2_LogicalDevice_Critical
2427 ? "Critical" : "Offline"),
2428 LogicalDeviceInfo->ConfigurableDeviceSize);
2429 DAC960_Info(" Logical Device %s, BIOS Geometry: %s\n",
2430 Controller,
2431 (LogicalDeviceInfo->LogicalDeviceControl
2432 .LogicalDeviceInitialized
2433 ? "Initialized" : "Uninitialized"),
2434 GeometryTranslation);
2435 if (LogicalDeviceInfo->StripeSize == 0)
2436 {
2437 if (LogicalDeviceInfo->CacheLineSize == 0)
2438 DAC960_Info(" Stripe Size: N/A, "
2439 "Segment Size: N/A\n", Controller);
2440 else
2441 DAC960_Info(" Stripe Size: N/A, "
2442 "Segment Size: %dKB\n", Controller,
2443 1 << (LogicalDeviceInfo->CacheLineSize - 2));
2444 }
2445 else
2446 {
2447 if (LogicalDeviceInfo->CacheLineSize == 0)
2448 DAC960_Info(" Stripe Size: %dKB, "
2449 "Segment Size: N/A\n", Controller,
2450 1 << (LogicalDeviceInfo->StripeSize - 2));
2451 else
2452 DAC960_Info(" Stripe Size: %dKB, "
2453 "Segment Size: %dKB\n", Controller,
2454 1 << (LogicalDeviceInfo->StripeSize - 2),
2455 1 << (LogicalDeviceInfo->CacheLineSize - 2));
2456 }
2457 DAC960_Info(" %s, %s\n", Controller,
2458 ReadCacheStatus[
2459 LogicalDeviceInfo->LogicalDeviceControl.ReadCache],
2460 WriteCacheStatus[
2461 LogicalDeviceInfo->LogicalDeviceControl.WriteCache]);
2462 if (LogicalDeviceInfo->SoftErrors > 0 ||
2463 LogicalDeviceInfo->CommandsFailed > 0 ||
2464 LogicalDeviceInfo->DeferredWriteErrors)
2465 DAC960_Info(" Errors - Soft: %d, Failed: %d, "
2466 "Deferred Write: %d\n", Controller,
2467 LogicalDeviceInfo->SoftErrors,
2468 LogicalDeviceInfo->CommandsFailed,
2469 LogicalDeviceInfo->DeferredWriteErrors);
2470
2471 }
2472 return true;
2473}
2474
2475/*
2476 DAC960_RegisterBlockDevice registers the Block Device structures
2477 associated with Controller.
2478*/
2479
2480static boolean DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
2481{
2482 int MajorNumber = DAC960_MAJOR + Controller->ControllerNumber;
2483 int n;
2484
2485 /*
2486 Register the Block Device Major Number for this DAC960 Controller.
2487 */
2488 if (register_blkdev(MajorNumber, "dac960") < 0)
2489 return false;
2490
2491 for (n = 0; n < DAC960_MaxLogicalDrives; n++) {
2492 struct gendisk *disk = Controller->disks[n];
2493 struct request_queue *RequestQueue;
2494
2495 /* for now, let all request queues share controller's lock */
2496 RequestQueue = blk_init_queue(DAC960_RequestFunction,&Controller->queue_lock);
2497 if (!RequestQueue) {
2498 printk("DAC960: failure to allocate request queue\n");
2499 continue;
2500 }
2501 Controller->RequestQueue[n] = RequestQueue;
2502 blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit);
2503 RequestQueue->queuedata = Controller;
2504 blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit);
2505 blk_queue_max_phys_segments(RequestQueue, Controller->DriverScatterGatherLimit);
2506 blk_queue_max_sectors(RequestQueue, Controller->MaxBlocksPerCommand);
2507 disk->queue = RequestQueue;
2508 sprintf(disk->disk_name, "rd/c%dd%d", Controller->ControllerNumber, n);
2509 sprintf(disk->devfs_name, "rd/host%d/target%d", Controller->ControllerNumber, n);
2510 disk->major = MajorNumber;
2511 disk->first_minor = n << DAC960_MaxPartitionsBits;
2512 disk->fops = &DAC960_BlockDeviceOperations;
2513 }
2514 /*
2515 Indicate the Block Device Registration completed successfully,
2516 */
2517 return true;
2518}
2519
2520
2521/*
2522 DAC960_UnregisterBlockDevice unregisters the Block Device structures
2523 associated with Controller.
2524*/
2525
2526static void DAC960_UnregisterBlockDevice(DAC960_Controller_T *Controller)
2527{
2528 int MajorNumber = DAC960_MAJOR + Controller->ControllerNumber;
2529 int disk;
2530
2531 /* does order matter when deleting gendisk and cleanup in request queue? */
2532 for (disk = 0; disk < DAC960_MaxLogicalDrives; disk++) {
2533 del_gendisk(Controller->disks[disk]);
2534 blk_cleanup_queue(Controller->RequestQueue[disk]);
2535 Controller->RequestQueue[disk] = NULL;
2536 }
2537
2538 /*
2539 Unregister the Block Device Major Number for this DAC960 Controller.
2540 */
2541 unregister_blkdev(MajorNumber, "dac960");
2542}
2543
2544/*
2545 DAC960_ComputeGenericDiskInfo computes the values for the Generic Disk
2546 Information Partition Sector Counts and Block Sizes.
2547*/
2548
2549static void DAC960_ComputeGenericDiskInfo(DAC960_Controller_T *Controller)
2550{
2551 int disk;
2552 for (disk = 0; disk < DAC960_MaxLogicalDrives; disk++)
2553 set_capacity(Controller->disks[disk], disk_size(Controller, disk));
2554}
2555
2556/*
2557 DAC960_ReportErrorStatus reports Controller BIOS Messages passed through
2558 the Error Status Register when the driver performs the BIOS handshaking.
2559 It returns true for fatal errors and false otherwise.
2560*/
2561
2562static boolean DAC960_ReportErrorStatus(DAC960_Controller_T *Controller,
2563 unsigned char ErrorStatus,
2564 unsigned char Parameter0,
2565 unsigned char Parameter1)
2566{
2567 switch (ErrorStatus)
2568 {
2569 case 0x00:
2570 DAC960_Notice("Physical Device %d:%d Not Responding\n",
2571 Controller, Parameter1, Parameter0);
2572 break;
2573 case 0x08:
2574 if (Controller->DriveSpinUpMessageDisplayed) break;
2575 DAC960_Notice("Spinning Up Drives\n", Controller);
2576 Controller->DriveSpinUpMessageDisplayed = true;
2577 break;
2578 case 0x30:
2579 DAC960_Notice("Configuration Checksum Error\n", Controller);
2580 break;
2581 case 0x60:
2582 DAC960_Notice("Mirror Race Recovery Failed\n", Controller);
2583 break;
2584 case 0x70:
2585 DAC960_Notice("Mirror Race Recovery In Progress\n", Controller);
2586 break;
2587 case 0x90:
2588 DAC960_Notice("Physical Device %d:%d COD Mismatch\n",
2589 Controller, Parameter1, Parameter0);
2590 break;
2591 case 0xA0:
2592 DAC960_Notice("Logical Drive Installation Aborted\n", Controller);
2593 break;
2594 case 0xB0:
2595 DAC960_Notice("Mirror Race On A Critical Logical Drive\n", Controller);
2596 break;
2597 case 0xD0:
2598 DAC960_Notice("New Controller Configuration Found\n", Controller);
2599 break;
2600 case 0xF0:
2601 DAC960_Error("Fatal Memory Parity Error for Controller at\n", Controller);
2602 return true;
2603 default:
2604 DAC960_Error("Unknown Initialization Error %02X for Controller at\n",
2605 Controller, ErrorStatus);
2606 return true;
2607 }
2608 return false;
2609}
2610
2611
2612/*
2613 * DAC960_DetectCleanup releases the resources that were allocated
2614 * during DAC960_DetectController(). DAC960_DetectController can
2615 * has several internal failure points, so not ALL resources may
2616 * have been allocated. It's important to free only
2617 * resources that HAVE been allocated. The code below always
2618 * tests that the resource has been allocated before attempting to
2619 * free it.
2620 */
2621static void DAC960_DetectCleanup(DAC960_Controller_T *Controller)
2622{
2623 int i;
2624
2625 /* Free the memory mailbox, status, and related structures */
2626 free_dma_loaf(Controller->PCIDevice, &Controller->DmaPages);
2627 if (Controller->MemoryMappedAddress) {
2628 switch(Controller->HardwareType)
2629 {
2630 case DAC960_BA_Controller:
2631 DAC960_BA_DisableInterrupts(Controller->BaseAddress);
2632 break;
2633 case DAC960_LP_Controller:
2634 DAC960_LP_DisableInterrupts(Controller->BaseAddress);
2635 break;
2636 case DAC960_LA_Controller:
2637 DAC960_LA_DisableInterrupts(Controller->BaseAddress);
2638 break;
2639 case DAC960_PG_Controller:
2640 DAC960_PG_DisableInterrupts(Controller->BaseAddress);
2641 break;
2642 case DAC960_PD_Controller:
2643 DAC960_PD_DisableInterrupts(Controller->BaseAddress);
2644 break;
2645 case DAC960_P_Controller:
2646 DAC960_PD_DisableInterrupts(Controller->BaseAddress);
2647 break;
2648 }
2649 iounmap(Controller->MemoryMappedAddress);
2650 }
2651 if (Controller->IRQ_Channel)
2652 free_irq(Controller->IRQ_Channel, Controller);
2653 if (Controller->IO_Address)
2654 release_region(Controller->IO_Address, 0x80);
2655 pci_disable_device(Controller->PCIDevice);
2656 for (i = 0; (i < DAC960_MaxLogicalDrives) && Controller->disks[i]; i++)
2657 put_disk(Controller->disks[i]);
2658 DAC960_Controllers[Controller->ControllerNumber] = NULL;
2659 kfree(Controller);
2660}
2661
2662
2663/*
2664 DAC960_DetectController detects Mylex DAC960/AcceleRAID/eXtremeRAID
2665 PCI RAID Controllers by interrogating the PCI Configuration Space for
2666 Controller Type.
2667*/
2668
2669static DAC960_Controller_T *
2670DAC960_DetectController(struct pci_dev *PCI_Device,
2671 const struct pci_device_id *entry)
2672{
2673 struct DAC960_privdata *privdata =
2674 (struct DAC960_privdata *)entry->driver_data;
2675 irqreturn_t (*InterruptHandler)(int, void *, struct pt_regs *) =
2676 privdata->InterruptHandler;
2677 unsigned int MemoryWindowSize = privdata->MemoryWindowSize;
2678 DAC960_Controller_T *Controller = NULL;
2679 unsigned char DeviceFunction = PCI_Device->devfn;
2680 unsigned char ErrorStatus, Parameter0, Parameter1;
2681 unsigned int IRQ_Channel;
2682 void __iomem *BaseAddress;
2683 int i;
2684
2685 Controller = (DAC960_Controller_T *)
2686 kmalloc(sizeof(DAC960_Controller_T), GFP_ATOMIC);
2687 if (Controller == NULL) {
2688 DAC960_Error("Unable to allocate Controller structure for "
2689 "Controller at\n", NULL);
2690 return NULL;
2691 }
2692 memset(Controller, 0, sizeof(DAC960_Controller_T));
2693 Controller->ControllerNumber = DAC960_ControllerCount;
2694 DAC960_Controllers[DAC960_ControllerCount++] = Controller;
2695 Controller->Bus = PCI_Device->bus->number;
2696 Controller->FirmwareType = privdata->FirmwareType;
2697 Controller->HardwareType = privdata->HardwareType;
2698 Controller->Device = DeviceFunction >> 3;
2699 Controller->Function = DeviceFunction & 0x7;
2700 Controller->PCIDevice = PCI_Device;
2701 strcpy(Controller->FullModelName, "DAC960");
2702
2703 if (pci_enable_device(PCI_Device))
2704 goto Failure;
2705
2706 switch (Controller->HardwareType)
2707 {
2708 case DAC960_BA_Controller:
2709 Controller->PCI_Address = pci_resource_start(PCI_Device, 0);
2710 break;
2711 case DAC960_LP_Controller:
2712 Controller->PCI_Address = pci_resource_start(PCI_Device, 0);
2713 break;
2714 case DAC960_LA_Controller:
2715 Controller->PCI_Address = pci_resource_start(PCI_Device, 0);
2716 break;
2717 case DAC960_PG_Controller:
2718 Controller->PCI_Address = pci_resource_start(PCI_Device, 0);
2719 break;
2720 case DAC960_PD_Controller:
2721 Controller->IO_Address = pci_resource_start(PCI_Device, 0);
2722 Controller->PCI_Address = pci_resource_start(PCI_Device, 1);
2723 break;
2724 case DAC960_P_Controller:
2725 Controller->IO_Address = pci_resource_start(PCI_Device, 0);
2726 Controller->PCI_Address = pci_resource_start(PCI_Device, 1);
2727 break;
2728 }
2729
2730 pci_set_drvdata(PCI_Device, (void *)((long)Controller->ControllerNumber));
2731 for (i = 0; i < DAC960_MaxLogicalDrives; i++) {
2732 Controller->disks[i] = alloc_disk(1<<DAC960_MaxPartitionsBits);
2733 if (!Controller->disks[i])
2734 goto Failure;
2735 Controller->disks[i]->private_data = (void *)((long)i);
2736 }
2737 init_waitqueue_head(&Controller->CommandWaitQueue);
2738 init_waitqueue_head(&Controller->HealthStatusWaitQueue);
2739 spin_lock_init(&Controller->queue_lock);
2740 DAC960_AnnounceDriver(Controller);
2741 /*
2742 Map the Controller Register Window.
2743 */
2744 if (MemoryWindowSize < PAGE_SIZE)
2745 MemoryWindowSize = PAGE_SIZE;
2746 Controller->MemoryMappedAddress =
2747 ioremap_nocache(Controller->PCI_Address & PAGE_MASK, MemoryWindowSize);
2748 Controller->BaseAddress =
2749 Controller->MemoryMappedAddress + (Controller->PCI_Address & ~PAGE_MASK);
2750 if (Controller->MemoryMappedAddress == NULL)
2751 {
2752 DAC960_Error("Unable to map Controller Register Window for "
2753 "Controller at\n", Controller);
2754 goto Failure;
2755 }
2756 BaseAddress = Controller->BaseAddress;
2757 switch (Controller->HardwareType)
2758 {
2759 case DAC960_BA_Controller:
2760 DAC960_BA_DisableInterrupts(BaseAddress);
2761 DAC960_BA_AcknowledgeHardwareMailboxStatus(BaseAddress);
2762 udelay(1000);
2763 while (DAC960_BA_InitializationInProgressP(BaseAddress))
2764 {
2765 if (DAC960_BA_ReadErrorStatus(BaseAddress, &ErrorStatus,
2766 &Parameter0, &Parameter1) &&
2767 DAC960_ReportErrorStatus(Controller, ErrorStatus,
2768 Parameter0, Parameter1))
2769 goto Failure;
2770 udelay(10);
2771 }
2772 if (!DAC960_V2_EnableMemoryMailboxInterface(Controller))
2773 {
2774 DAC960_Error("Unable to Enable Memory Mailbox Interface "
2775 "for Controller at\n", Controller);
2776 goto Failure;
2777 }
2778 DAC960_BA_EnableInterrupts(BaseAddress);
2779 Controller->QueueCommand = DAC960_BA_QueueCommand;
2780 Controller->ReadControllerConfiguration =
2781 DAC960_V2_ReadControllerConfiguration;
2782 Controller->ReadDeviceConfiguration =
2783 DAC960_V2_ReadDeviceConfiguration;
2784 Controller->ReportDeviceConfiguration =
2785 DAC960_V2_ReportDeviceConfiguration;
2786 Controller->QueueReadWriteCommand =
2787 DAC960_V2_QueueReadWriteCommand;
2788 break;
2789 case DAC960_LP_Controller:
2790 DAC960_LP_DisableInterrupts(BaseAddress);
2791 DAC960_LP_AcknowledgeHardwareMailboxStatus(BaseAddress);
2792 udelay(1000);
2793 while (DAC960_LP_InitializationInProgressP(BaseAddress))
2794 {
2795 if (DAC960_LP_ReadErrorStatus(BaseAddress, &ErrorStatus,
2796 &Parameter0, &Parameter1) &&
2797 DAC960_ReportErrorStatus(Controller, ErrorStatus,
2798 Parameter0, Parameter1))
2799 goto Failure;
2800 udelay(10);
2801 }
2802 if (!DAC960_V2_EnableMemoryMailboxInterface(Controller))
2803 {
2804 DAC960_Error("Unable to Enable Memory Mailbox Interface "
2805 "for Controller at\n", Controller);
2806 goto Failure;
2807 }
2808 DAC960_LP_EnableInterrupts(BaseAddress);
2809 Controller->QueueCommand = DAC960_LP_QueueCommand;
2810 Controller->ReadControllerConfiguration =
2811 DAC960_V2_ReadControllerConfiguration;
2812 Controller->ReadDeviceConfiguration =
2813 DAC960_V2_ReadDeviceConfiguration;
2814 Controller->ReportDeviceConfiguration =
2815 DAC960_V2_ReportDeviceConfiguration;
2816 Controller->QueueReadWriteCommand =
2817 DAC960_V2_QueueReadWriteCommand;
2818 break;
2819 case DAC960_LA_Controller:
2820 DAC960_LA_DisableInterrupts(BaseAddress);
2821 DAC960_LA_AcknowledgeHardwareMailboxStatus(BaseAddress);
2822 udelay(1000);
2823 while (DAC960_LA_InitializationInProgressP(BaseAddress))
2824 {
2825 if (DAC960_LA_ReadErrorStatus(BaseAddress, &ErrorStatus,
2826 &Parameter0, &Parameter1) &&
2827 DAC960_ReportErrorStatus(Controller, ErrorStatus,
2828 Parameter0, Parameter1))
2829 goto Failure;
2830 udelay(10);
2831 }
2832 if (!DAC960_V1_EnableMemoryMailboxInterface(Controller))
2833 {
2834 DAC960_Error("Unable to Enable Memory Mailbox Interface "
2835 "for Controller at\n", Controller);
2836 goto Failure;
2837 }
2838 DAC960_LA_EnableInterrupts(BaseAddress);
2839 if (Controller->V1.DualModeMemoryMailboxInterface)
2840 Controller->QueueCommand = DAC960_LA_QueueCommandDualMode;
2841 else Controller->QueueCommand = DAC960_LA_QueueCommandSingleMode;
2842 Controller->ReadControllerConfiguration =
2843 DAC960_V1_ReadControllerConfiguration;
2844 Controller->ReadDeviceConfiguration =
2845 DAC960_V1_ReadDeviceConfiguration;
2846 Controller->ReportDeviceConfiguration =
2847 DAC960_V1_ReportDeviceConfiguration;
2848 Controller->QueueReadWriteCommand =
2849 DAC960_V1_QueueReadWriteCommand;
2850 break;
2851 case DAC960_PG_Controller:
2852 DAC960_PG_DisableInterrupts(BaseAddress);
2853 DAC960_PG_AcknowledgeHardwareMailboxStatus(BaseAddress);
2854 udelay(1000);
2855 while (DAC960_PG_InitializationInProgressP(BaseAddress))
2856 {
2857 if (DAC960_PG_ReadErrorStatus(BaseAddress, &ErrorStatus,
2858 &Parameter0, &Parameter1) &&
2859 DAC960_ReportErrorStatus(Controller, ErrorStatus,
2860 Parameter0, Parameter1))
2861 goto Failure;
2862 udelay(10);
2863 }
2864 if (!DAC960_V1_EnableMemoryMailboxInterface(Controller))
2865 {
2866 DAC960_Error("Unable to Enable Memory Mailbox Interface "
2867 "for Controller at\n", Controller);
2868 goto Failure;
2869 }
2870 DAC960_PG_EnableInterrupts(BaseAddress);
2871 if (Controller->V1.DualModeMemoryMailboxInterface)
2872 Controller->QueueCommand = DAC960_PG_QueueCommandDualMode;
2873 else Controller->QueueCommand = DAC960_PG_QueueCommandSingleMode;
2874 Controller->ReadControllerConfiguration =
2875 DAC960_V1_ReadControllerConfiguration;
2876 Controller->ReadDeviceConfiguration =
2877 DAC960_V1_ReadDeviceConfiguration;
2878 Controller->ReportDeviceConfiguration =
2879 DAC960_V1_ReportDeviceConfiguration;
2880 Controller->QueueReadWriteCommand =
2881 DAC960_V1_QueueReadWriteCommand;
2882 break;
2883 case DAC960_PD_Controller:
2884 if (!request_region(Controller->IO_Address, 0x80,
2885 Controller->FullModelName)) {
2886 DAC960_Error("IO port 0x%d busy for Controller at\n",
2887 Controller, Controller->IO_Address);
2888 goto Failure;
2889 }
2890 DAC960_PD_DisableInterrupts(BaseAddress);
2891 DAC960_PD_AcknowledgeStatus(BaseAddress);
2892 udelay(1000);
2893 while (DAC960_PD_InitializationInProgressP(BaseAddress))
2894 {
2895 if (DAC960_PD_ReadErrorStatus(BaseAddress, &ErrorStatus,
2896 &Parameter0, &Parameter1) &&
2897 DAC960_ReportErrorStatus(Controller, ErrorStatus,
2898 Parameter0, Parameter1))
2899 goto Failure;
2900 udelay(10);
2901 }
2902 if (!DAC960_V1_EnableMemoryMailboxInterface(Controller))
2903 {
2904 DAC960_Error("Unable to allocate DMA mapped memory "
2905 "for Controller at\n", Controller);
2906 goto Failure;
2907 }
2908 DAC960_PD_EnableInterrupts(BaseAddress);
2909 Controller->QueueCommand = DAC960_PD_QueueCommand;
2910 Controller->ReadControllerConfiguration =
2911 DAC960_V1_ReadControllerConfiguration;
2912 Controller->ReadDeviceConfiguration =
2913 DAC960_V1_ReadDeviceConfiguration;
2914 Controller->ReportDeviceConfiguration =
2915 DAC960_V1_ReportDeviceConfiguration;
2916 Controller->QueueReadWriteCommand =
2917 DAC960_V1_QueueReadWriteCommand;
2918 break;
2919 case DAC960_P_Controller:
2920 if (!request_region(Controller->IO_Address, 0x80,
2921 Controller->FullModelName)){
2922 DAC960_Error("IO port 0x%d busy for Controller at\n",
2923 Controller, Controller->IO_Address);
2924 goto Failure;
2925 }
2926 DAC960_PD_DisableInterrupts(BaseAddress);
2927 DAC960_PD_AcknowledgeStatus(BaseAddress);
2928 udelay(1000);
2929 while (DAC960_PD_InitializationInProgressP(BaseAddress))
2930 {
2931 if (DAC960_PD_ReadErrorStatus(BaseAddress, &ErrorStatus,
2932 &Parameter0, &Parameter1) &&
2933 DAC960_ReportErrorStatus(Controller, ErrorStatus,
2934 Parameter0, Parameter1))
2935 goto Failure;
2936 udelay(10);
2937 }
2938 if (!DAC960_V1_EnableMemoryMailboxInterface(Controller))
2939 {
2940 DAC960_Error("Unable to allocate DMA mapped memory"
2941 "for Controller at\n", Controller);
2942 goto Failure;
2943 }
2944 DAC960_PD_EnableInterrupts(BaseAddress);
2945 Controller->QueueCommand = DAC960_P_QueueCommand;
2946 Controller->ReadControllerConfiguration =
2947 DAC960_V1_ReadControllerConfiguration;
2948 Controller->ReadDeviceConfiguration =
2949 DAC960_V1_ReadDeviceConfiguration;
2950 Controller->ReportDeviceConfiguration =
2951 DAC960_V1_ReportDeviceConfiguration;
2952 Controller->QueueReadWriteCommand =
2953 DAC960_V1_QueueReadWriteCommand;
2954 break;
2955 }
2956 /*
2957 Acquire shared access to the IRQ Channel.
2958 */
2959 IRQ_Channel = PCI_Device->irq;
2960 if (request_irq(IRQ_Channel, InterruptHandler, SA_SHIRQ,
2961 Controller->FullModelName, Controller) < 0)
2962 {
2963 DAC960_Error("Unable to acquire IRQ Channel %d for Controller at\n",
2964 Controller, Controller->IRQ_Channel);
2965 goto Failure;
2966 }
2967 Controller->IRQ_Channel = IRQ_Channel;
2968 Controller->InitialCommand.CommandIdentifier = 1;
2969 Controller->InitialCommand.Controller = Controller;
2970 Controller->Commands[0] = &Controller->InitialCommand;
2971 Controller->FreeCommands = &Controller->InitialCommand;
2972 return Controller;
2973
2974Failure:
2975 if (Controller->IO_Address == 0)
2976 DAC960_Error("PCI Bus %d Device %d Function %d I/O Address N/A "
2977 "PCI Address 0x%X\n", Controller,
2978 Controller->Bus, Controller->Device,
2979 Controller->Function, Controller->PCI_Address);
2980 else
2981 DAC960_Error("PCI Bus %d Device %d Function %d I/O Address "
2982 "0x%X PCI Address 0x%X\n", Controller,
2983 Controller->Bus, Controller->Device,
2984 Controller->Function, Controller->IO_Address,
2985 Controller->PCI_Address);
2986 DAC960_DetectCleanup(Controller);
2987 DAC960_ControllerCount--;
2988 return NULL;
2989}
2990
2991/*
2992 DAC960_InitializeController initializes Controller.
2993*/
2994
2995static boolean
2996DAC960_InitializeController(DAC960_Controller_T *Controller)
2997{
2998 if (DAC960_ReadControllerConfiguration(Controller) &&
2999 DAC960_ReportControllerConfiguration(Controller) &&
3000 DAC960_CreateAuxiliaryStructures(Controller) &&
3001 DAC960_ReadDeviceConfiguration(Controller) &&
3002 DAC960_ReportDeviceConfiguration(Controller) &&
3003 DAC960_RegisterBlockDevice(Controller))
3004 {
3005 /*
3006 Initialize the Monitoring Timer.
3007 */
3008 init_timer(&Controller->MonitoringTimer);
3009 Controller->MonitoringTimer.expires =
3010 jiffies + DAC960_MonitoringTimerInterval;
3011 Controller->MonitoringTimer.data = (unsigned long) Controller;
3012 Controller->MonitoringTimer.function = DAC960_MonitoringTimerFunction;
3013 add_timer(&Controller->MonitoringTimer);
3014 Controller->ControllerInitialized = true;
3015 return true;
3016 }
3017 return false;
3018}
3019
3020
3021/*
3022 DAC960_FinalizeController finalizes Controller.
3023*/
3024
3025static void DAC960_FinalizeController(DAC960_Controller_T *Controller)
3026{
3027 if (Controller->ControllerInitialized)
3028 {
3029 unsigned long flags;
3030
3031 /*
3032 * Acquiring and releasing lock here eliminates
3033 * a very low probability race.
3034 *
3035 * The code below allocates controller command structures
3036 * from the free list without holding the controller lock.
3037 * This is safe assuming there is no other activity on
3038 * the controller at the time.
3039 *
3040 * But, there might be a monitoring command still
3041 * in progress. Setting the Shutdown flag while holding
3042 * the lock ensures that there is no monitoring command
3043 * in the interrupt handler currently, and any monitoring
3044 * commands that complete from this time on will NOT return
3045 * their command structure to the free list.
3046 */
3047
3048 spin_lock_irqsave(&Controller->queue_lock, flags);
3049 Controller->ShutdownMonitoringTimer = 1;
3050 spin_unlock_irqrestore(&Controller->queue_lock, flags);
3051
3052 del_timer_sync(&Controller->MonitoringTimer);
3053 if (Controller->FirmwareType == DAC960_V1_Controller)
3054 {
3055 DAC960_Notice("Flushing Cache...", Controller);
3056 DAC960_V1_ExecuteType3(Controller, DAC960_V1_Flush, 0);
3057 DAC960_Notice("done\n", Controller);
3058
3059 if (Controller->HardwareType == DAC960_PD_Controller)
3060 release_region(Controller->IO_Address, 0x80);
3061 }
3062 else
3063 {
3064 DAC960_Notice("Flushing Cache...", Controller);
3065 DAC960_V2_DeviceOperation(Controller, DAC960_V2_PauseDevice,
3066 DAC960_V2_RAID_Controller);
3067 DAC960_Notice("done\n", Controller);
3068 }
3069 }
3070 DAC960_UnregisterBlockDevice(Controller);
3071 DAC960_DestroyAuxiliaryStructures(Controller);
3072 DAC960_DestroyProcEntries(Controller);
3073 DAC960_DetectCleanup(Controller);
3074}
3075
3076
3077/*
3078 DAC960_Probe verifies controller's existence and
3079 initializes the DAC960 Driver for that controller.
3080*/
3081
3082static int
3083DAC960_Probe(struct pci_dev *dev, const struct pci_device_id *entry)
3084{
3085 int disk;
3086 DAC960_Controller_T *Controller;
3087
3088 if (DAC960_ControllerCount == DAC960_MaxControllers)
3089 {
3090 DAC960_Error("More than %d DAC960 Controllers detected - "
3091 "ignoring from Controller at\n",
3092 NULL, DAC960_MaxControllers);
3093 return -ENODEV;
3094 }
3095
3096 Controller = DAC960_DetectController(dev, entry);
3097 if (!Controller)
3098 return -ENODEV;
3099
3100 if (!DAC960_InitializeController(Controller)) {
3101 DAC960_FinalizeController(Controller);
3102 return -ENODEV;
3103 }
3104
3105 for (disk = 0; disk < DAC960_MaxLogicalDrives; disk++) {
3106 set_capacity(Controller->disks[disk], disk_size(Controller, disk));
3107 add_disk(Controller->disks[disk]);
3108 }
3109 DAC960_CreateProcEntries(Controller);
3110 return 0;
3111}
3112
3113
3114/*
3115 DAC960_Finalize finalizes the DAC960 Driver.
3116*/
3117
3118static void DAC960_Remove(struct pci_dev *PCI_Device)
3119{
3120 int Controller_Number = (long)pci_get_drvdata(PCI_Device);
3121 DAC960_Controller_T *Controller = DAC960_Controllers[Controller_Number];
3122 if (Controller != NULL)
3123 DAC960_FinalizeController(Controller);
3124}
3125
3126
3127/*
3128 DAC960_V1_QueueReadWriteCommand prepares and queues a Read/Write Command for
3129 DAC960 V1 Firmware Controllers.
3130*/
3131
3132static void DAC960_V1_QueueReadWriteCommand(DAC960_Command_T *Command)
3133{
3134 DAC960_Controller_T *Controller = Command->Controller;
3135 DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
3136 DAC960_V1_ScatterGatherSegment_T *ScatterGatherList =
3137 Command->V1.ScatterGatherList;
3138 struct scatterlist *ScatterList = Command->V1.ScatterList;
3139
3140 DAC960_V1_ClearCommand(Command);
3141
3142 if (Command->SegmentCount == 1)
3143 {
3144 if (Command->DmaDirection == PCI_DMA_FROMDEVICE)
3145 CommandMailbox->Type5.CommandOpcode = DAC960_V1_Read;
3146 else
3147 CommandMailbox->Type5.CommandOpcode = DAC960_V1_Write;
3148
3149 CommandMailbox->Type5.LD.TransferLength = Command->BlockCount;
3150 CommandMailbox->Type5.LD.LogicalDriveNumber = Command->LogicalDriveNumber;
3151 CommandMailbox->Type5.LogicalBlockAddress = Command->BlockNumber;
3152 CommandMailbox->Type5.BusAddress =
3153 (DAC960_BusAddress32_T)sg_dma_address(ScatterList);
3154 }
3155 else
3156 {
3157 int i;
3158
3159 if (Command->DmaDirection == PCI_DMA_FROMDEVICE)
3160 CommandMailbox->Type5.CommandOpcode = DAC960_V1_ReadWithScatterGather;
3161 else
3162 CommandMailbox->Type5.CommandOpcode = DAC960_V1_WriteWithScatterGather;
3163
3164 CommandMailbox->Type5.LD.TransferLength = Command->BlockCount;
3165 CommandMailbox->Type5.LD.LogicalDriveNumber = Command->LogicalDriveNumber;
3166 CommandMailbox->Type5.LogicalBlockAddress = Command->BlockNumber;
3167 CommandMailbox->Type5.BusAddress = Command->V1.ScatterGatherListDMA;
3168
3169 CommandMailbox->Type5.ScatterGatherCount = Command->SegmentCount;
3170
3171 for (i = 0; i < Command->SegmentCount; i++, ScatterList++, ScatterGatherList++) {
3172 ScatterGatherList->SegmentDataPointer =
3173 (DAC960_BusAddress32_T)sg_dma_address(ScatterList);
3174 ScatterGatherList->SegmentByteCount =
3175 (DAC960_ByteCount32_T)sg_dma_len(ScatterList);
3176 }
3177 }
3178 DAC960_QueueCommand(Command);
3179}
3180
3181
3182/*
3183 DAC960_V2_QueueReadWriteCommand prepares and queues a Read/Write Command for
3184 DAC960 V2 Firmware Controllers.
3185*/
3186
3187static void DAC960_V2_QueueReadWriteCommand(DAC960_Command_T *Command)
3188{
3189 DAC960_Controller_T *Controller = Command->Controller;
3190 DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
3191 struct scatterlist *ScatterList = Command->V2.ScatterList;
3192
3193 DAC960_V2_ClearCommand(Command);
3194
3195 CommandMailbox->SCSI_10.CommandOpcode = DAC960_V2_SCSI_10;
3196 CommandMailbox->SCSI_10.CommandControlBits.DataTransferControllerToHost =
3197 (Command->DmaDirection == PCI_DMA_FROMDEVICE);
3198 CommandMailbox->SCSI_10.DataTransferSize =
3199 Command->BlockCount << DAC960_BlockSizeBits;
3200 CommandMailbox->SCSI_10.RequestSenseBusAddress = Command->V2.RequestSenseDMA;
3201 CommandMailbox->SCSI_10.PhysicalDevice =
3202 Controller->V2.LogicalDriveToVirtualDevice[Command->LogicalDriveNumber];
3203 CommandMailbox->SCSI_10.RequestSenseSize = sizeof(DAC960_SCSI_RequestSense_T);
3204 CommandMailbox->SCSI_10.CDBLength = 10;
3205 CommandMailbox->SCSI_10.SCSI_CDB[0] =
3206 (Command->DmaDirection == PCI_DMA_FROMDEVICE ? 0x28 : 0x2A);
3207 CommandMailbox->SCSI_10.SCSI_CDB[2] = Command->BlockNumber >> 24;
3208 CommandMailbox->SCSI_10.SCSI_CDB[3] = Command->BlockNumber >> 16;
3209 CommandMailbox->SCSI_10.SCSI_CDB[4] = Command->BlockNumber >> 8;
3210 CommandMailbox->SCSI_10.SCSI_CDB[5] = Command->BlockNumber;
3211 CommandMailbox->SCSI_10.SCSI_CDB[7] = Command->BlockCount >> 8;
3212 CommandMailbox->SCSI_10.SCSI_CDB[8] = Command->BlockCount;
3213
3214 if (Command->SegmentCount == 1)
3215 {
3216 CommandMailbox->SCSI_10.DataTransferMemoryAddress
3217 .ScatterGatherSegments[0]
3218 .SegmentDataPointer =
3219 (DAC960_BusAddress64_T)sg_dma_address(ScatterList);
3220 CommandMailbox->SCSI_10.DataTransferMemoryAddress
3221 .ScatterGatherSegments[0]
3222 .SegmentByteCount =
3223 CommandMailbox->SCSI_10.DataTransferSize;
3224 }
3225 else
3226 {
3227 DAC960_V2_ScatterGatherSegment_T *ScatterGatherList;
3228 int i;
3229
3230 if (Command->SegmentCount > 2)
3231 {
3232 ScatterGatherList = Command->V2.ScatterGatherList;
3233 CommandMailbox->SCSI_10.CommandControlBits
3234 .AdditionalScatterGatherListMemory = true;
3235 CommandMailbox->SCSI_10.DataTransferMemoryAddress
3236 .ExtendedScatterGather.ScatterGatherList0Length = Command->SegmentCount;
3237 CommandMailbox->SCSI_10.DataTransferMemoryAddress
3238 .ExtendedScatterGather.ScatterGatherList0Address =
3239 Command->V2.ScatterGatherListDMA;
3240 }
3241 else
3242 ScatterGatherList = CommandMailbox->SCSI_10.DataTransferMemoryAddress
3243 .ScatterGatherSegments;
3244
3245 for (i = 0; i < Command->SegmentCount; i++, ScatterList++, ScatterGatherList++) {
3246 ScatterGatherList->SegmentDataPointer =
3247 (DAC960_BusAddress64_T)sg_dma_address(ScatterList);
3248 ScatterGatherList->SegmentByteCount =
3249 (DAC960_ByteCount64_T)sg_dma_len(ScatterList);
3250 }
3251 }
3252 DAC960_QueueCommand(Command);
3253}
3254
3255
3256static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_queue *req_q)
3257{
3258 struct request *Request;
3259 DAC960_Command_T *Command;
3260
3261 while(1) {
3262 Request = elv_next_request(req_q);
3263 if (!Request)
3264 return 1;
3265
3266 Command = DAC960_AllocateCommand(Controller);
3267 if (Command == NULL)
3268 return 0;
3269
3270 if (rq_data_dir(Request) == READ) {
3271 Command->DmaDirection = PCI_DMA_FROMDEVICE;
3272 Command->CommandType = DAC960_ReadCommand;
3273 } else {
3274 Command->DmaDirection = PCI_DMA_TODEVICE;
3275 Command->CommandType = DAC960_WriteCommand;
3276 }
3277 Command->Completion = Request->waiting;
3278 Command->LogicalDriveNumber = (long)Request->rq_disk->private_data;
3279 Command->BlockNumber = Request->sector;
3280 Command->BlockCount = Request->nr_sectors;
3281 Command->Request = Request;
3282 blkdev_dequeue_request(Request);
3283 Command->SegmentCount = blk_rq_map_sg(req_q,
3284 Command->Request, Command->cmd_sglist);
3285 /* pci_map_sg MAY change the value of SegCount */
3286 Command->SegmentCount = pci_map_sg(Controller->PCIDevice, Command->cmd_sglist,
3287 Command->SegmentCount, Command->DmaDirection);
3288
3289 DAC960_QueueReadWriteCommand(Command);
3290 }
3291}
3292
3293/*
3294 DAC960_ProcessRequest attempts to remove one I/O Request from Controller's
3295 I/O Request Queue and queues it to the Controller. WaitForCommand is true if
3296 this function should wait for a Command to become available if necessary.
3297 This function returns true if an I/O Request was queued and false otherwise.
3298*/
3299static void DAC960_ProcessRequest(DAC960_Controller_T *controller)
3300{
3301 int i;
3302
3303 if (!controller->ControllerInitialized)
3304 return;
3305
3306 /* Do this better later! */
3307 for (i = controller->req_q_index; i < DAC960_MaxLogicalDrives; i++) {
3308 struct request_queue *req_q = controller->RequestQueue[i];
3309
3310 if (req_q == NULL)
3311 continue;
3312
3313 if (!DAC960_process_queue(controller, req_q)) {
3314 controller->req_q_index = i;
3315 return;
3316 }
3317 }
3318
3319 if (controller->req_q_index == 0)
3320 return;
3321
3322 for (i = 0; i < controller->req_q_index; i++) {
3323 struct request_queue *req_q = controller->RequestQueue[i];
3324
3325 if (req_q == NULL)
3326 continue;
3327
3328 if (!DAC960_process_queue(controller, req_q)) {
3329 controller->req_q_index = i;
3330 return;
3331 }
3332 }
3333}
3334
3335
3336/*
3337 DAC960_queue_partial_rw extracts one bio from the request already
3338 associated with argument command, and construct a new command block to retry I/O
3339 only on that bio. Queue that command to the controller.
3340
3341 This function re-uses a previously-allocated Command,
3342 there is no failure mode from trying to allocate a command.
3343*/
3344
3345static void DAC960_queue_partial_rw(DAC960_Command_T *Command)
3346{
3347 DAC960_Controller_T *Controller = Command->Controller;
3348 struct request *Request = Command->Request;
3349 struct request_queue *req_q = Controller->RequestQueue[Command->LogicalDriveNumber];
3350
3351 if (Command->DmaDirection == PCI_DMA_FROMDEVICE)
3352 Command->CommandType = DAC960_ReadRetryCommand;
3353 else
3354 Command->CommandType = DAC960_WriteRetryCommand;
3355
3356 /*
3357 * We could be more efficient with these mapping requests
3358 * and map only the portions that we need. But since this
3359 * code should almost never be called, just go with a
3360 * simple coding.
3361 */
3362 (void)blk_rq_map_sg(req_q, Command->Request, Command->cmd_sglist);
3363
3364 (void)pci_map_sg(Controller->PCIDevice, Command->cmd_sglist, 1, Command->DmaDirection);
3365 /*
3366 * Resubmitting the request sector at a time is really tedious.
3367 * But, this should almost never happen. So, we're willing to pay
3368 * this price so that in the end, as much of the transfer is completed
3369 * successfully as possible.
3370 */
3371 Command->SegmentCount = 1;
3372 Command->BlockNumber = Request->sector;
3373 Command->BlockCount = 1;
3374 DAC960_QueueReadWriteCommand(Command);
3375 return;
3376}
3377
3378/*
3379 DAC960_RequestFunction is the I/O Request Function for DAC960 Controllers.
3380*/
3381
3382static void DAC960_RequestFunction(struct request_queue *RequestQueue)
3383{
3384 DAC960_ProcessRequest(RequestQueue->queuedata);
3385}
3386
3387/*
3388 DAC960_ProcessCompletedBuffer performs completion processing for an
3389 individual Buffer.
3390*/
3391
3392static inline boolean DAC960_ProcessCompletedRequest(DAC960_Command_T *Command,
3393 boolean SuccessfulIO)
3394{
3395 struct request *Request = Command->Request;
3396 int UpToDate;
3397
3398 UpToDate = 0;
3399 if (SuccessfulIO)
3400 UpToDate = 1;
3401
3402 pci_unmap_sg(Command->Controller->PCIDevice, Command->cmd_sglist,
3403 Command->SegmentCount, Command->DmaDirection);
3404
3405 if (!end_that_request_first(Request, UpToDate, Command->BlockCount)) {
3406
3407 end_that_request_last(Request);
3408
3409 if (Command->Completion) {
3410 complete(Command->Completion);
3411 Command->Completion = NULL;
3412 }
3413 return true;
3414 }
3415 return false;
3416}
3417
3418/*
3419 DAC960_V1_ReadWriteError prints an appropriate error message for Command
3420 when an error occurs on a Read or Write operation.
3421*/
3422
3423static void DAC960_V1_ReadWriteError(DAC960_Command_T *Command)
3424{
3425 DAC960_Controller_T *Controller = Command->Controller;
3426 unsigned char *CommandName = "UNKNOWN";
3427 switch (Command->CommandType)
3428 {
3429 case DAC960_ReadCommand:
3430 case DAC960_ReadRetryCommand:
3431 CommandName = "READ";
3432 break;
3433 case DAC960_WriteCommand:
3434 case DAC960_WriteRetryCommand:
3435 CommandName = "WRITE";
3436 break;
3437 case DAC960_MonitoringCommand:
3438 case DAC960_ImmediateCommand:
3439 case DAC960_QueuedCommand:
3440 break;
3441 }
3442 switch (Command->V1.CommandStatus)
3443 {
3444 case DAC960_V1_IrrecoverableDataError:
3445 DAC960_Error("Irrecoverable Data Error on %s:\n",
3446 Controller, CommandName);
3447 break;
3448 case DAC960_V1_LogicalDriveNonexistentOrOffline:
3449 DAC960_Error("Logical Drive Nonexistent or Offline on %s:\n",
3450 Controller, CommandName);
3451 break;
3452 case DAC960_V1_AccessBeyondEndOfLogicalDrive:
3453 DAC960_Error("Attempt to Access Beyond End of Logical Drive "
3454 "on %s:\n", Controller, CommandName);
3455 break;
3456 case DAC960_V1_BadDataEncountered:
3457 DAC960_Error("Bad Data Encountered on %s:\n", Controller, CommandName);
3458 break;
3459 default:
3460 DAC960_Error("Unexpected Error Status %04X on %s:\n",
3461 Controller, Command->V1.CommandStatus, CommandName);
3462 break;
3463 }
3464 DAC960_Error(" /dev/rd/c%dd%d: absolute blocks %u..%u\n",
3465 Controller, Controller->ControllerNumber,
3466 Command->LogicalDriveNumber, Command->BlockNumber,
3467 Command->BlockNumber + Command->BlockCount - 1);
3468}
3469
3470
3471/*
3472 DAC960_V1_ProcessCompletedCommand performs completion processing for Command
3473 for DAC960 V1 Firmware Controllers.
3474*/
3475
3476static void DAC960_V1_ProcessCompletedCommand(DAC960_Command_T *Command)
3477{
3478 DAC960_Controller_T *Controller = Command->Controller;
3479 DAC960_CommandType_T CommandType = Command->CommandType;
3480 DAC960_V1_CommandOpcode_T CommandOpcode =
3481 Command->V1.CommandMailbox.Common.CommandOpcode;
3482 DAC960_V1_CommandStatus_T CommandStatus = Command->V1.CommandStatus;
3483
3484 if (CommandType == DAC960_ReadCommand ||
3485 CommandType == DAC960_WriteCommand)
3486 {
3487
3488#ifdef FORCE_RETRY_DEBUG
3489 CommandStatus = DAC960_V1_IrrecoverableDataError;
3490#endif
3491
3492 if (CommandStatus == DAC960_V1_NormalCompletion) {
3493
3494 if (!DAC960_ProcessCompletedRequest(Command, true))
3495 BUG();
3496
3497 } else if (CommandStatus == DAC960_V1_IrrecoverableDataError ||
3498 CommandStatus == DAC960_V1_BadDataEncountered)
3499 {
3500 /*
3501 * break the command down into pieces and resubmit each
3502 * piece, hoping that some of them will succeed.
3503 */
3504 DAC960_queue_partial_rw(Command);
3505 return;
3506 }
3507 else
3508 {
3509 if (CommandStatus != DAC960_V1_LogicalDriveNonexistentOrOffline)
3510 DAC960_V1_ReadWriteError(Command);
3511
3512 if (!DAC960_ProcessCompletedRequest(Command, false))
3513 BUG();
3514 }
3515 }
3516 else if (CommandType == DAC960_ReadRetryCommand ||
3517 CommandType == DAC960_WriteRetryCommand)
3518 {
3519 boolean normal_completion;
3520#ifdef FORCE_RETRY_FAILURE_DEBUG
3521 static int retry_count = 1;
3522#endif
3523 /*
3524 Perform completion processing for the portion that was
3525 retried, and submit the next portion, if any.
3526 */
3527 normal_completion = true;
3528 if (CommandStatus != DAC960_V1_NormalCompletion) {
3529 normal_completion = false;
3530 if (CommandStatus != DAC960_V1_LogicalDriveNonexistentOrOffline)
3531 DAC960_V1_ReadWriteError(Command);
3532 }
3533
3534#ifdef FORCE_RETRY_FAILURE_DEBUG
3535 if (!(++retry_count % 10000)) {
3536 printk("V1 error retry failure test\n");
3537 normal_completion = false;
3538 DAC960_V1_ReadWriteError(Command);
3539 }
3540#endif
3541
3542 if (!DAC960_ProcessCompletedRequest(Command, normal_completion)) {
3543 DAC960_queue_partial_rw(Command);
3544 return;
3545 }
3546 }
3547
3548 else if (CommandType == DAC960_MonitoringCommand)
3549 {
3550 if (Controller->ShutdownMonitoringTimer)
3551 return;
3552 if (CommandOpcode == DAC960_V1_Enquiry)
3553 {
3554 DAC960_V1_Enquiry_T *OldEnquiry = &Controller->V1.Enquiry;
3555 DAC960_V1_Enquiry_T *NewEnquiry = Controller->V1.NewEnquiry;
3556 unsigned int OldCriticalLogicalDriveCount =
3557 OldEnquiry->CriticalLogicalDriveCount;
3558 unsigned int NewCriticalLogicalDriveCount =
3559 NewEnquiry->CriticalLogicalDriveCount;
3560 if (NewEnquiry->NumberOfLogicalDrives > Controller->LogicalDriveCount)
3561 {
3562 int LogicalDriveNumber = Controller->LogicalDriveCount - 1;
3563 while (++LogicalDriveNumber < NewEnquiry->NumberOfLogicalDrives)
3564 DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
3565 "Now Exists\n", Controller,
3566 LogicalDriveNumber,
3567 Controller->ControllerNumber,
3568 LogicalDriveNumber);
3569 Controller->LogicalDriveCount = NewEnquiry->NumberOfLogicalDrives;
3570 DAC960_ComputeGenericDiskInfo(Controller);
3571 }
3572 if (NewEnquiry->NumberOfLogicalDrives < Controller->LogicalDriveCount)
3573 {
3574 int LogicalDriveNumber = NewEnquiry->NumberOfLogicalDrives - 1;
3575 while (++LogicalDriveNumber < Controller->LogicalDriveCount)
3576 DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
3577 "No Longer Exists\n", Controller,
3578 LogicalDriveNumber,
3579 Controller->ControllerNumber,
3580 LogicalDriveNumber);
3581 Controller->LogicalDriveCount = NewEnquiry->NumberOfLogicalDrives;
3582 DAC960_ComputeGenericDiskInfo(Controller);
3583 }
3584 if (NewEnquiry->StatusFlags.DeferredWriteError !=
3585 OldEnquiry->StatusFlags.DeferredWriteError)
3586 DAC960_Critical("Deferred Write Error Flag is now %s\n", Controller,
3587 (NewEnquiry->StatusFlags.DeferredWriteError
3588 ? "TRUE" : "FALSE"));
3589 if ((NewCriticalLogicalDriveCount > 0 ||
3590 NewCriticalLogicalDriveCount != OldCriticalLogicalDriveCount) ||
3591 (NewEnquiry->OfflineLogicalDriveCount > 0 ||
3592 NewEnquiry->OfflineLogicalDriveCount !=
3593 OldEnquiry->OfflineLogicalDriveCount) ||
3594 (NewEnquiry->DeadDriveCount > 0 ||
3595 NewEnquiry->DeadDriveCount !=
3596 OldEnquiry->DeadDriveCount) ||
3597 (NewEnquiry->EventLogSequenceNumber !=
3598 OldEnquiry->EventLogSequenceNumber) ||
3599 Controller->MonitoringTimerCount == 0 ||
3600 (jiffies - Controller->SecondaryMonitoringTime
3601 >= DAC960_SecondaryMonitoringInterval))
3602 {
3603 Controller->V1.NeedLogicalDriveInformation = true;
3604 Controller->V1.NewEventLogSequenceNumber =
3605 NewEnquiry->EventLogSequenceNumber;
3606 Controller->V1.NeedErrorTableInformation = true;
3607 Controller->V1.NeedDeviceStateInformation = true;
3608 Controller->V1.StartDeviceStateScan = true;
3609 Controller->V1.NeedBackgroundInitializationStatus =
3610 Controller->V1.BackgroundInitializationStatusSupported;
3611 Controller->SecondaryMonitoringTime = jiffies;
3612 }
3613 if (NewEnquiry->RebuildFlag == DAC960_V1_StandbyRebuildInProgress ||
3614 NewEnquiry->RebuildFlag
3615 == DAC960_V1_BackgroundRebuildInProgress ||
3616 OldEnquiry->RebuildFlag == DAC960_V1_StandbyRebuildInProgress ||
3617 OldEnquiry->RebuildFlag == DAC960_V1_BackgroundRebuildInProgress)
3618 {
3619 Controller->V1.NeedRebuildProgress = true;
3620 Controller->V1.RebuildProgressFirst =
3621 (NewEnquiry->CriticalLogicalDriveCount <
3622 OldEnquiry->CriticalLogicalDriveCount);
3623 }
3624 if (OldEnquiry->RebuildFlag == DAC960_V1_BackgroundCheckInProgress)
3625 switch (NewEnquiry->RebuildFlag)
3626 {
3627 case DAC960_V1_NoStandbyRebuildOrCheckInProgress:
3628 DAC960_Progress("Consistency Check Completed Successfully\n",
3629 Controller);
3630 break;
3631 case DAC960_V1_StandbyRebuildInProgress:
3632 case DAC960_V1_BackgroundRebuildInProgress:
3633 break;
3634 case DAC960_V1_BackgroundCheckInProgress:
3635 Controller->V1.NeedConsistencyCheckProgress = true;
3636 break;
3637 case DAC960_V1_StandbyRebuildCompletedWithError:
3638 DAC960_Progress("Consistency Check Completed with Error\n",
3639 Controller);
3640 break;
3641 case DAC960_V1_BackgroundRebuildOrCheckFailed_DriveFailed:
3642 DAC960_Progress("Consistency Check Failed - "
3643 "Physical Device Failed\n", Controller);
3644 break;
3645 case DAC960_V1_BackgroundRebuildOrCheckFailed_LogicalDriveFailed:
3646 DAC960_Progress("Consistency Check Failed - "
3647 "Logical Drive Failed\n", Controller);
3648 break;
3649 case DAC960_V1_BackgroundRebuildOrCheckFailed_OtherCauses:
3650 DAC960_Progress("Consistency Check Failed - Other Causes\n",
3651 Controller);
3652 break;
3653 case DAC960_V1_BackgroundRebuildOrCheckSuccessfullyTerminated:
3654 DAC960_Progress("Consistency Check Successfully Terminated\n",
3655 Controller);
3656 break;
3657 }
3658 else if (NewEnquiry->RebuildFlag
3659 == DAC960_V1_BackgroundCheckInProgress)
3660 Controller->V1.NeedConsistencyCheckProgress = true;
3661 Controller->MonitoringAlertMode =
3662 (NewEnquiry->CriticalLogicalDriveCount > 0 ||
3663 NewEnquiry->OfflineLogicalDriveCount > 0 ||
3664 NewEnquiry->DeadDriveCount > 0);
3665 if (NewEnquiry->RebuildFlag > DAC960_V1_BackgroundCheckInProgress)
3666 {
3667 Controller->V1.PendingRebuildFlag = NewEnquiry->RebuildFlag;
3668 Controller->V1.RebuildFlagPending = true;
3669 }
3670 memcpy(&Controller->V1.Enquiry, &Controller->V1.NewEnquiry,
3671 sizeof(DAC960_V1_Enquiry_T));
3672 }
3673 else if (CommandOpcode == DAC960_V1_PerformEventLogOperation)
3674 {
3675 static char
3676 *DAC960_EventMessages[] =
3677 { "killed because write recovery failed",
3678 "killed because of SCSI bus reset failure",
3679 "killed because of double check condition",
3680 "killed because it was removed",
3681 "killed because of gross error on SCSI chip",
3682 "killed because of bad tag returned from drive",
3683 "killed because of timeout on SCSI command",
3684 "killed because of reset SCSI command issued from system",
3685 "killed because busy or parity error count exceeded limit",
3686 "killed because of 'kill drive' command from system",
3687 "killed because of selection timeout",
3688 "killed due to SCSI phase sequence error",
3689 "killed due to unknown status" };
3690 DAC960_V1_EventLogEntry_T *EventLogEntry =
3691 Controller->V1.EventLogEntry;
3692 if (EventLogEntry->SequenceNumber ==
3693 Controller->V1.OldEventLogSequenceNumber)
3694 {
3695 unsigned char SenseKey = EventLogEntry->SenseKey;
3696 unsigned char AdditionalSenseCode =
3697 EventLogEntry->AdditionalSenseCode;
3698 unsigned char AdditionalSenseCodeQualifier =
3699 EventLogEntry->AdditionalSenseCodeQualifier;
3700 if (SenseKey == DAC960_SenseKey_VendorSpecific &&
3701 AdditionalSenseCode == 0x80 &&
3702 AdditionalSenseCodeQualifier <
3703 sizeof(DAC960_EventMessages) / sizeof(char *))
3704 DAC960_Critical("Physical Device %d:%d %s\n", Controller,
3705 EventLogEntry->Channel,
3706 EventLogEntry->TargetID,
3707 DAC960_EventMessages[
3708 AdditionalSenseCodeQualifier]);
3709 else if (SenseKey == DAC960_SenseKey_UnitAttention &&
3710 AdditionalSenseCode == 0x29)
3711 {
3712 if (Controller->MonitoringTimerCount > 0)
3713 Controller->V1.DeviceResetCount[EventLogEntry->Channel]
3714 [EventLogEntry->TargetID]++;
3715 }
3716 else if (!(SenseKey == DAC960_SenseKey_NoSense ||
3717 (SenseKey == DAC960_SenseKey_NotReady &&
3718 AdditionalSenseCode == 0x04 &&
3719 (AdditionalSenseCodeQualifier == 0x01 ||
3720 AdditionalSenseCodeQualifier == 0x02))))
3721 {
3722 DAC960_Critical("Physical Device %d:%d Error Log: "
3723 "Sense Key = %X, ASC = %02X, ASCQ = %02X\n",
3724 Controller,
3725 EventLogEntry->Channel,
3726 EventLogEntry->TargetID,
3727 SenseKey,
3728 AdditionalSenseCode,
3729 AdditionalSenseCodeQualifier);
3730 DAC960_Critical("Physical Device %d:%d Error Log: "
3731 "Information = %02X%02X%02X%02X "
3732 "%02X%02X%02X%02X\n",
3733 Controller,
3734 EventLogEntry->Channel,
3735 EventLogEntry->TargetID,
3736 EventLogEntry->Information[0],
3737 EventLogEntry->Information[1],
3738 EventLogEntry->Information[2],
3739 EventLogEntry->Information[3],
3740 EventLogEntry->CommandSpecificInformation[0],
3741 EventLogEntry->CommandSpecificInformation[1],
3742 EventLogEntry->CommandSpecificInformation[2],
3743 EventLogEntry->CommandSpecificInformation[3]);
3744 }
3745 }
3746 Controller->V1.OldEventLogSequenceNumber++;
3747 }
3748 else if (CommandOpcode == DAC960_V1_GetErrorTable)
3749 {
3750 DAC960_V1_ErrorTable_T *OldErrorTable = &Controller->V1.ErrorTable;
3751 DAC960_V1_ErrorTable_T *NewErrorTable = Controller->V1.NewErrorTable;
3752 int Channel, TargetID;
3753 for (Channel = 0; Channel < Controller->Channels; Channel++)
3754 for (TargetID = 0; TargetID < Controller->Targets; TargetID++)
3755 {
3756 DAC960_V1_ErrorTableEntry_T *NewErrorEntry =
3757 &NewErrorTable->ErrorTableEntries[Channel][TargetID];
3758 DAC960_V1_ErrorTableEntry_T *OldErrorEntry =
3759 &OldErrorTable->ErrorTableEntries[Channel][TargetID];
3760 if ((NewErrorEntry->ParityErrorCount !=
3761 OldErrorEntry->ParityErrorCount) ||
3762 (NewErrorEntry->SoftErrorCount !=
3763 OldErrorEntry->SoftErrorCount) ||
3764 (NewErrorEntry->HardErrorCount !=
3765 OldErrorEntry->HardErrorCount) ||
3766 (NewErrorEntry->MiscErrorCount !=
3767 OldErrorEntry->MiscErrorCount))
3768 DAC960_Critical("Physical Device %d:%d Errors: "
3769 "Parity = %d, Soft = %d, "
3770 "Hard = %d, Misc = %d\n",
3771 Controller, Channel, TargetID,
3772 NewErrorEntry->ParityErrorCount,
3773 NewErrorEntry->SoftErrorCount,
3774 NewErrorEntry->HardErrorCount,
3775 NewErrorEntry->MiscErrorCount);
3776 }
3777 memcpy(&Controller->V1.ErrorTable, Controller->V1.NewErrorTable,
3778 sizeof(DAC960_V1_ErrorTable_T));
3779 }
3780 else if (CommandOpcode == DAC960_V1_GetDeviceState)
3781 {
3782 DAC960_V1_DeviceState_T *OldDeviceState =
3783 &Controller->V1.DeviceState[Controller->V1.DeviceStateChannel]
3784 [Controller->V1.DeviceStateTargetID];
3785 DAC960_V1_DeviceState_T *NewDeviceState =
3786 Controller->V1.NewDeviceState;
3787 if (NewDeviceState->DeviceState != OldDeviceState->DeviceState)
3788 DAC960_Critical("Physical Device %d:%d is now %s\n", Controller,
3789 Controller->V1.DeviceStateChannel,
3790 Controller->V1.DeviceStateTargetID,
3791 (NewDeviceState->DeviceState
3792 == DAC960_V1_Device_Dead
3793 ? "DEAD"
3794 : NewDeviceState->DeviceState
3795 == DAC960_V1_Device_WriteOnly
3796 ? "WRITE-ONLY"
3797 : NewDeviceState->DeviceState
3798 == DAC960_V1_Device_Online
3799 ? "ONLINE" : "STANDBY"));
3800 if (OldDeviceState->DeviceState == DAC960_V1_Device_Dead &&
3801 NewDeviceState->DeviceState != DAC960_V1_Device_Dead)
3802 {
3803 Controller->V1.NeedDeviceInquiryInformation = true;
3804 Controller->V1.NeedDeviceSerialNumberInformation = true;
3805 Controller->V1.DeviceResetCount
3806 [Controller->V1.DeviceStateChannel]
3807 [Controller->V1.DeviceStateTargetID] = 0;
3808 }
3809 memcpy(OldDeviceState, NewDeviceState,
3810 sizeof(DAC960_V1_DeviceState_T));
3811 }
3812 else if (CommandOpcode == DAC960_V1_GetLogicalDriveInformation)
3813 {
3814 int LogicalDriveNumber;
3815 for (LogicalDriveNumber = 0;
3816 LogicalDriveNumber < Controller->LogicalDriveCount;
3817 LogicalDriveNumber++)
3818 {
3819 DAC960_V1_LogicalDriveInformation_T *OldLogicalDriveInformation =
3820 &Controller->V1.LogicalDriveInformation[LogicalDriveNumber];
3821 DAC960_V1_LogicalDriveInformation_T *NewLogicalDriveInformation =
3822 &(*Controller->V1.NewLogicalDriveInformation)[LogicalDriveNumber];
3823 if (NewLogicalDriveInformation->LogicalDriveState !=
3824 OldLogicalDriveInformation->LogicalDriveState)
3825 DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
3826 "is now %s\n", Controller,
3827 LogicalDriveNumber,
3828 Controller->ControllerNumber,
3829 LogicalDriveNumber,
3830 (NewLogicalDriveInformation->LogicalDriveState
3831 == DAC960_V1_LogicalDrive_Online
3832 ? "ONLINE"
3833 : NewLogicalDriveInformation->LogicalDriveState
3834 == DAC960_V1_LogicalDrive_Critical
3835 ? "CRITICAL" : "OFFLINE"));
3836 if (NewLogicalDriveInformation->WriteBack !=
3837 OldLogicalDriveInformation->WriteBack)
3838 DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
3839 "is now %s\n", Controller,
3840 LogicalDriveNumber,
3841 Controller->ControllerNumber,
3842 LogicalDriveNumber,
3843 (NewLogicalDriveInformation->WriteBack
3844 ? "WRITE BACK" : "WRITE THRU"));
3845 }
3846 memcpy(&Controller->V1.LogicalDriveInformation,
3847 Controller->V1.NewLogicalDriveInformation,
3848 sizeof(DAC960_V1_LogicalDriveInformationArray_T));
3849 }
3850 else if (CommandOpcode == DAC960_V1_GetRebuildProgress)
3851 {
3852 unsigned int LogicalDriveNumber =
3853 Controller->V1.RebuildProgress->LogicalDriveNumber;
3854 unsigned int LogicalDriveSize =
3855 Controller->V1.RebuildProgress->LogicalDriveSize;
3856 unsigned int BlocksCompleted =
3857 LogicalDriveSize - Controller->V1.RebuildProgress->RemainingBlocks;
3858 if (CommandStatus == DAC960_V1_NoRebuildOrCheckInProgress &&
3859 Controller->V1.LastRebuildStatus == DAC960_V1_NormalCompletion)
3860 CommandStatus = DAC960_V1_RebuildSuccessful;
3861 switch (CommandStatus)
3862 {
3863 case DAC960_V1_NormalCompletion:
3864 Controller->EphemeralProgressMessage = true;
3865 DAC960_Progress("Rebuild in Progress: "
3866 "Logical Drive %d (/dev/rd/c%dd%d) "
3867 "%d%% completed\n",
3868 Controller, LogicalDriveNumber,
3869 Controller->ControllerNumber,
3870 LogicalDriveNumber,
3871 (100 * (BlocksCompleted >> 7))
3872 / (LogicalDriveSize >> 7));
3873 Controller->EphemeralProgressMessage = false;
3874 break;
3875 case DAC960_V1_RebuildFailed_LogicalDriveFailure:
3876 DAC960_Progress("Rebuild Failed due to "
3877 "Logical Drive Failure\n", Controller);
3878 break;
3879 case DAC960_V1_RebuildFailed_BadBlocksOnOther:
3880 DAC960_Progress("Rebuild Failed due to "
3881 "Bad Blocks on Other Drives\n", Controller);
3882 break;
3883 case DAC960_V1_RebuildFailed_NewDriveFailed:
3884 DAC960_Progress("Rebuild Failed due to "
3885 "Failure of Drive Being Rebuilt\n", Controller);
3886 break;
3887 case DAC960_V1_NoRebuildOrCheckInProgress:
3888 break;
3889 case DAC960_V1_RebuildSuccessful:
3890 DAC960_Progress("Rebuild Completed Successfully\n", Controller);
3891 break;
3892 case DAC960_V1_RebuildSuccessfullyTerminated:
3893 DAC960_Progress("Rebuild Successfully Terminated\n", Controller);
3894 break;
3895 }
3896 Controller->V1.LastRebuildStatus = CommandStatus;
3897 if (CommandType != DAC960_MonitoringCommand &&
3898 Controller->V1.RebuildStatusPending)
3899 {
3900 Command->V1.CommandStatus = Controller->V1.PendingRebuildStatus;
3901 Controller->V1.RebuildStatusPending = false;
3902 }
3903 else if (CommandType == DAC960_MonitoringCommand &&
3904 CommandStatus != DAC960_V1_NormalCompletion &&
3905 CommandStatus != DAC960_V1_NoRebuildOrCheckInProgress)
3906 {
3907 Controller->V1.PendingRebuildStatus = CommandStatus;
3908 Controller->V1.RebuildStatusPending = true;
3909 }
3910 }
3911 else if (CommandOpcode == DAC960_V1_RebuildStat)
3912 {
3913 unsigned int LogicalDriveNumber =
3914 Controller->V1.RebuildProgress->LogicalDriveNumber;
3915 unsigned int LogicalDriveSize =
3916 Controller->V1.RebuildProgress->LogicalDriveSize;
3917 unsigned int BlocksCompleted =
3918 LogicalDriveSize - Controller->V1.RebuildProgress->RemainingBlocks;
3919 if (CommandStatus == DAC960_V1_NormalCompletion)
3920 {
3921 Controller->EphemeralProgressMessage = true;
3922 DAC960_Progress("Consistency Check in Progress: "
3923 "Logical Drive %d (/dev/rd/c%dd%d) "
3924 "%d%% completed\n",
3925 Controller, LogicalDriveNumber,
3926 Controller->ControllerNumber,
3927 LogicalDriveNumber,
3928 (100 * (BlocksCompleted >> 7))
3929 / (LogicalDriveSize >> 7));
3930 Controller->EphemeralProgressMessage = false;
3931 }
3932 }
3933 else if (CommandOpcode == DAC960_V1_BackgroundInitializationControl)
3934 {
3935 unsigned int LogicalDriveNumber =
3936 Controller->V1.BackgroundInitializationStatus->LogicalDriveNumber;
3937 unsigned int LogicalDriveSize =
3938 Controller->V1.BackgroundInitializationStatus->LogicalDriveSize;
3939 unsigned int BlocksCompleted =
3940 Controller->V1.BackgroundInitializationStatus->BlocksCompleted;
3941 switch (CommandStatus)
3942 {
3943 case DAC960_V1_NormalCompletion:
3944 switch (Controller->V1.BackgroundInitializationStatus->Status)
3945 {
3946 case DAC960_V1_BackgroundInitializationInvalid:
3947 break;
3948 case DAC960_V1_BackgroundInitializationStarted:
3949 DAC960_Progress("Background Initialization Started\n",
3950 Controller);
3951 break;
3952 case DAC960_V1_BackgroundInitializationInProgress:
3953 if (BlocksCompleted ==
3954 Controller->V1.LastBackgroundInitializationStatus.
3955 BlocksCompleted &&
3956 LogicalDriveNumber ==
3957 Controller->V1.LastBackgroundInitializationStatus.
3958 LogicalDriveNumber)
3959 break;
3960 Controller->EphemeralProgressMessage = true;
3961 DAC960_Progress("Background Initialization in Progress: "
3962 "Logical Drive %d (/dev/rd/c%dd%d) "
3963 "%d%% completed\n",
3964 Controller, LogicalDriveNumber,
3965 Controller->ControllerNumber,
3966 LogicalDriveNumber,
3967 (100 * (BlocksCompleted >> 7))
3968 / (LogicalDriveSize >> 7));
3969 Controller->EphemeralProgressMessage = false;
3970 break;
3971 case DAC960_V1_BackgroundInitializationSuspended:
3972 DAC960_Progress("Background Initialization Suspended\n",
3973 Controller);
3974 break;
3975 case DAC960_V1_BackgroundInitializationCancelled:
3976 DAC960_Progress("Background Initialization Cancelled\n",
3977 Controller);
3978 break;
3979 }
3980 memcpy(&Controller->V1.LastBackgroundInitializationStatus,
3981 Controller->V1.BackgroundInitializationStatus,
3982 sizeof(DAC960_V1_BackgroundInitializationStatus_T));
3983 break;
3984 case DAC960_V1_BackgroundInitSuccessful:
3985 if (Controller->V1.BackgroundInitializationStatus->Status ==
3986 DAC960_V1_BackgroundInitializationInProgress)
3987 DAC960_Progress("Background Initialization "
3988 "Completed Successfully\n", Controller);
3989 Controller->V1.BackgroundInitializationStatus->Status =
3990 DAC960_V1_BackgroundInitializationInvalid;
3991 break;
3992 case DAC960_V1_BackgroundInitAborted:
3993 if (Controller->V1.BackgroundInitializationStatus->Status ==
3994 DAC960_V1_BackgroundInitializationInProgress)
3995 DAC960_Progress("Background Initialization Aborted\n",
3996 Controller);
3997 Controller->V1.BackgroundInitializationStatus->Status =
3998 DAC960_V1_BackgroundInitializationInvalid;
3999 break;
4000 case DAC960_V1_NoBackgroundInitInProgress:
4001 break;
4002 }
4003 }
4004 else if (CommandOpcode == DAC960_V1_DCDB)
4005 {
4006 /*
4007 This is a bit ugly.
4008
4009 The InquiryStandardData and
4010 the InquiryUntitSerialNumber information
4011 retrieval operations BOTH use the DAC960_V1_DCDB
4012 commands. the test above can't distinguish between
4013 these two cases.
4014
4015 Instead, we rely on the order of code later in this
4016 function to ensure that DeviceInquiryInformation commands
4017 are submitted before DeviceSerialNumber commands.
4018 */
4019 if (Controller->V1.NeedDeviceInquiryInformation)
4020 {
4021 DAC960_SCSI_Inquiry_T *InquiryStandardData =
4022 &Controller->V1.InquiryStandardData
4023 [Controller->V1.DeviceStateChannel]
4024 [Controller->V1.DeviceStateTargetID];
4025 if (CommandStatus != DAC960_V1_NormalCompletion)
4026 {
4027 memset(InquiryStandardData, 0,
4028 sizeof(DAC960_SCSI_Inquiry_T));
4029 InquiryStandardData->PeripheralDeviceType = 0x1F;
4030 }
4031 else
4032 memcpy(InquiryStandardData,
4033 Controller->V1.NewInquiryStandardData,
4034 sizeof(DAC960_SCSI_Inquiry_T));
4035 Controller->V1.NeedDeviceInquiryInformation = false;
4036 }
4037 else if (Controller->V1.NeedDeviceSerialNumberInformation)
4038 {
4039 DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
4040 &Controller->V1.InquiryUnitSerialNumber
4041 [Controller->V1.DeviceStateChannel]
4042 [Controller->V1.DeviceStateTargetID];
4043 if (CommandStatus != DAC960_V1_NormalCompletion)
4044 {
4045 memset(InquiryUnitSerialNumber, 0,
4046 sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
4047 InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F;
4048 }
4049 else
4050 memcpy(InquiryUnitSerialNumber,
4051 Controller->V1.NewInquiryUnitSerialNumber,
4052 sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
4053 Controller->V1.NeedDeviceSerialNumberInformation = false;
4054 }
4055 }
4056 /*
4057 Begin submitting new monitoring commands.
4058 */
4059 if (Controller->V1.NewEventLogSequenceNumber
4060 - Controller->V1.OldEventLogSequenceNumber > 0)
4061 {
4062 Command->V1.CommandMailbox.Type3E.CommandOpcode =
4063 DAC960_V1_PerformEventLogOperation;
4064 Command->V1.CommandMailbox.Type3E.OperationType =
4065 DAC960_V1_GetEventLogEntry;
4066 Command->V1.CommandMailbox.Type3E.OperationQualifier = 1;
4067 Command->V1.CommandMailbox.Type3E.SequenceNumber =
4068 Controller->V1.OldEventLogSequenceNumber;
4069 Command->V1.CommandMailbox.Type3E.BusAddress =
4070 Controller->V1.EventLogEntryDMA;
4071 DAC960_QueueCommand(Command);
4072 return;
4073 }
4074 if (Controller->V1.NeedErrorTableInformation)
4075 {
4076 Controller->V1.NeedErrorTableInformation = false;
4077 Command->V1.CommandMailbox.Type3.CommandOpcode =
4078 DAC960_V1_GetErrorTable;
4079 Command->V1.CommandMailbox.Type3.BusAddress =
4080 Controller->V1.NewErrorTableDMA;
4081 DAC960_QueueCommand(Command);
4082 return;
4083 }
4084 if (Controller->V1.NeedRebuildProgress &&
4085 Controller->V1.RebuildProgressFirst)
4086 {
4087 Controller->V1.NeedRebuildProgress = false;
4088 Command->V1.CommandMailbox.Type3.CommandOpcode =
4089 DAC960_V1_GetRebuildProgress;
4090 Command->V1.CommandMailbox.Type3.BusAddress =
4091 Controller->V1.RebuildProgressDMA;
4092 DAC960_QueueCommand(Command);
4093 return;
4094 }
4095 if (Controller->V1.NeedDeviceStateInformation)
4096 {
4097 if (Controller->V1.NeedDeviceInquiryInformation)
4098 {
4099 DAC960_V1_DCDB_T *DCDB = Controller->V1.MonitoringDCDB;
4100 dma_addr_t DCDB_DMA = Controller->V1.MonitoringDCDB_DMA;
4101
4102 dma_addr_t NewInquiryStandardDataDMA =
4103 Controller->V1.NewInquiryStandardDataDMA;
4104
4105 Command->V1.CommandMailbox.Type3.CommandOpcode = DAC960_V1_DCDB;
4106 Command->V1.CommandMailbox.Type3.BusAddress = DCDB_DMA;
4107 DCDB->Channel = Controller->V1.DeviceStateChannel;
4108 DCDB->TargetID = Controller->V1.DeviceStateTargetID;
4109 DCDB->Direction = DAC960_V1_DCDB_DataTransferDeviceToSystem;
4110 DCDB->EarlyStatus = false;
4111 DCDB->Timeout = DAC960_V1_DCDB_Timeout_10_seconds;
4112 DCDB->NoAutomaticRequestSense = false;
4113 DCDB->DisconnectPermitted = true;
4114 DCDB->TransferLength = sizeof(DAC960_SCSI_Inquiry_T);
4115 DCDB->BusAddress = NewInquiryStandardDataDMA;
4116 DCDB->CDBLength = 6;
4117 DCDB->TransferLengthHigh4 = 0;
4118 DCDB->SenseLength = sizeof(DCDB->SenseData);
4119 DCDB->CDB[0] = 0x12; /* INQUIRY */
4120 DCDB->CDB[1] = 0; /* EVPD = 0 */
4121 DCDB->CDB[2] = 0; /* Page Code */
4122 DCDB->CDB[3] = 0; /* Reserved */
4123 DCDB->CDB[4] = sizeof(DAC960_SCSI_Inquiry_T);
4124 DCDB->CDB[5] = 0; /* Control */
4125 DAC960_QueueCommand(Command);
4126 return;
4127 }
4128 if (Controller->V1.NeedDeviceSerialNumberInformation)
4129 {
4130 DAC960_V1_DCDB_T *DCDB = Controller->V1.MonitoringDCDB;
4131 dma_addr_t DCDB_DMA = Controller->V1.MonitoringDCDB_DMA;
4132 dma_addr_t NewInquiryUnitSerialNumberDMA =
4133 Controller->V1.NewInquiryUnitSerialNumberDMA;
4134
4135 Command->V1.CommandMailbox.Type3.CommandOpcode = DAC960_V1_DCDB;
4136 Command->V1.CommandMailbox.Type3.BusAddress = DCDB_DMA;
4137 DCDB->Channel = Controller->V1.DeviceStateChannel;
4138 DCDB->TargetID = Controller->V1.DeviceStateTargetID;
4139 DCDB->Direction = DAC960_V1_DCDB_DataTransferDeviceToSystem;
4140 DCDB->EarlyStatus = false;
4141 DCDB->Timeout = DAC960_V1_DCDB_Timeout_10_seconds;
4142 DCDB->NoAutomaticRequestSense = false;
4143 DCDB->DisconnectPermitted = true;
4144 DCDB->TransferLength =
4145 sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
4146 DCDB->BusAddress = NewInquiryUnitSerialNumberDMA;
4147 DCDB->CDBLength = 6;
4148 DCDB->TransferLengthHigh4 = 0;
4149 DCDB->SenseLength = sizeof(DCDB->SenseData);
4150 DCDB->CDB[0] = 0x12; /* INQUIRY */
4151 DCDB->CDB[1] = 1; /* EVPD = 1 */
4152 DCDB->CDB[2] = 0x80; /* Page Code */
4153 DCDB->CDB[3] = 0; /* Reserved */
4154 DCDB->CDB[4] = sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
4155 DCDB->CDB[5] = 0; /* Control */
4156 DAC960_QueueCommand(Command);
4157 return;
4158 }
4159 if (Controller->V1.StartDeviceStateScan)
4160 {
4161 Controller->V1.DeviceStateChannel = 0;
4162 Controller->V1.DeviceStateTargetID = 0;
4163 Controller->V1.StartDeviceStateScan = false;
4164 }
4165 else if (++Controller->V1.DeviceStateTargetID == Controller->Targets)
4166 {
4167 Controller->V1.DeviceStateChannel++;
4168 Controller->V1.DeviceStateTargetID = 0;
4169 }
4170 if (Controller->V1.DeviceStateChannel < Controller->Channels)
4171 {
4172 Controller->V1.NewDeviceState->DeviceState =
4173 DAC960_V1_Device_Dead;
4174 Command->V1.CommandMailbox.Type3D.CommandOpcode =
4175 DAC960_V1_GetDeviceState;
4176 Command->V1.CommandMailbox.Type3D.Channel =
4177 Controller->V1.DeviceStateChannel;
4178 Command->V1.CommandMailbox.Type3D.TargetID =
4179 Controller->V1.DeviceStateTargetID;
4180 Command->V1.CommandMailbox.Type3D.BusAddress =
4181 Controller->V1.NewDeviceStateDMA;
4182 DAC960_QueueCommand(Command);
4183 return;
4184 }
4185 Controller->V1.NeedDeviceStateInformation = false;
4186 }
4187 if (Controller->V1.NeedLogicalDriveInformation)
4188 {
4189 Controller->V1.NeedLogicalDriveInformation = false;
4190 Command->V1.CommandMailbox.Type3.CommandOpcode =
4191 DAC960_V1_GetLogicalDriveInformation;
4192 Command->V1.CommandMailbox.Type3.BusAddress =
4193 Controller->V1.NewLogicalDriveInformationDMA;
4194 DAC960_QueueCommand(Command);
4195 return;
4196 }
4197 if (Controller->V1.NeedRebuildProgress)
4198 {
4199 Controller->V1.NeedRebuildProgress = false;
4200 Command->V1.CommandMailbox.Type3.CommandOpcode =
4201 DAC960_V1_GetRebuildProgress;
4202 Command->V1.CommandMailbox.Type3.BusAddress =
4203 Controller->V1.RebuildProgressDMA;
4204 DAC960_QueueCommand(Command);
4205 return;
4206 }
4207 if (Controller->V1.NeedConsistencyCheckProgress)
4208 {
4209 Controller->V1.NeedConsistencyCheckProgress = false;
4210 Command->V1.CommandMailbox.Type3.CommandOpcode =
4211 DAC960_V1_RebuildStat;
4212 Command->V1.CommandMailbox.Type3.BusAddress =
4213 Controller->V1.RebuildProgressDMA;
4214 DAC960_QueueCommand(Command);
4215 return;
4216 }
4217 if (Controller->V1.NeedBackgroundInitializationStatus)
4218 {
4219 Controller->V1.NeedBackgroundInitializationStatus = false;
4220 Command->V1.CommandMailbox.Type3B.CommandOpcode =
4221 DAC960_V1_BackgroundInitializationControl;
4222 Command->V1.CommandMailbox.Type3B.CommandOpcode2 = 0x20;
4223 Command->V1.CommandMailbox.Type3B.BusAddress =
4224 Controller->V1.BackgroundInitializationStatusDMA;
4225 DAC960_QueueCommand(Command);
4226 return;
4227 }
4228 Controller->MonitoringTimerCount++;
4229 Controller->MonitoringTimer.expires =
4230 jiffies + DAC960_MonitoringTimerInterval;
4231 add_timer(&Controller->MonitoringTimer);
4232 }
4233 if (CommandType == DAC960_ImmediateCommand)
4234 {
4235 complete(Command->Completion);
4236 Command->Completion = NULL;
4237 return;
4238 }
4239 if (CommandType == DAC960_QueuedCommand)
4240 {
4241 DAC960_V1_KernelCommand_T *KernelCommand = Command->V1.KernelCommand;
4242 KernelCommand->CommandStatus = Command->V1.CommandStatus;
4243 Command->V1.KernelCommand = NULL;
4244 if (CommandOpcode == DAC960_V1_DCDB)
4245 Controller->V1.DirectCommandActive[KernelCommand->DCDB->Channel]
4246 [KernelCommand->DCDB->TargetID] =
4247 false;
4248 DAC960_DeallocateCommand(Command);
4249 KernelCommand->CompletionFunction(KernelCommand);
4250 return;
4251 }
4252 /*
4253 Queue a Status Monitoring Command to the Controller using the just
4254 completed Command if one was deferred previously due to lack of a
4255 free Command when the Monitoring Timer Function was called.
4256 */
4257 if (Controller->MonitoringCommandDeferred)
4258 {
4259 Controller->MonitoringCommandDeferred = false;
4260 DAC960_V1_QueueMonitoringCommand(Command);
4261 return;
4262 }
4263 /*
4264 Deallocate the Command.
4265 */
4266 DAC960_DeallocateCommand(Command);
4267 /*
4268 Wake up any processes waiting on a free Command.
4269 */
4270 wake_up(&Controller->CommandWaitQueue);
4271}
4272
4273
4274/*
4275 DAC960_V2_ReadWriteError prints an appropriate error message for Command
4276 when an error occurs on a Read or Write operation.
4277*/
4278
4279static void DAC960_V2_ReadWriteError(DAC960_Command_T *Command)
4280{
4281 DAC960_Controller_T *Controller = Command->Controller;
4282 unsigned char *SenseErrors[] = { "NO SENSE", "RECOVERED ERROR",
4283 "NOT READY", "MEDIUM ERROR",
4284 "HARDWARE ERROR", "ILLEGAL REQUEST",
4285 "UNIT ATTENTION", "DATA PROTECT",
4286 "BLANK CHECK", "VENDOR-SPECIFIC",
4287 "COPY ABORTED", "ABORTED COMMAND",
4288 "EQUAL", "VOLUME OVERFLOW",
4289 "MISCOMPARE", "RESERVED" };
4290 unsigned char *CommandName = "UNKNOWN";
4291 switch (Command->CommandType)
4292 {
4293 case DAC960_ReadCommand:
4294 case DAC960_ReadRetryCommand:
4295 CommandName = "READ";
4296 break;
4297 case DAC960_WriteCommand:
4298 case DAC960_WriteRetryCommand:
4299 CommandName = "WRITE";
4300 break;
4301 case DAC960_MonitoringCommand:
4302 case DAC960_ImmediateCommand:
4303 case DAC960_QueuedCommand:
4304 break;
4305 }
4306 DAC960_Error("Error Condition %s on %s:\n", Controller,
4307 SenseErrors[Command->V2.RequestSense->SenseKey], CommandName);
4308 DAC960_Error(" /dev/rd/c%dd%d: absolute blocks %u..%u\n",
4309 Controller, Controller->ControllerNumber,
4310 Command->LogicalDriveNumber, Command->BlockNumber,
4311 Command->BlockNumber + Command->BlockCount - 1);
4312}
4313
4314
4315/*
4316 DAC960_V2_ReportEvent prints an appropriate message when a Controller Event
4317 occurs.
4318*/
4319
4320static void DAC960_V2_ReportEvent(DAC960_Controller_T *Controller,
4321 DAC960_V2_Event_T *Event)
4322{
4323 DAC960_SCSI_RequestSense_T *RequestSense =
4324 (DAC960_SCSI_RequestSense_T *) &Event->RequestSenseData;
4325 unsigned char MessageBuffer[DAC960_LineBufferSize];
4326 static struct { int EventCode; unsigned char *EventMessage; } EventList[] =
4327 { /* Physical Device Events (0x0000 - 0x007F) */
4328 { 0x0001, "P Online" },
4329 { 0x0002, "P Standby" },
4330 { 0x0005, "P Automatic Rebuild Started" },
4331 { 0x0006, "P Manual Rebuild Started" },
4332 { 0x0007, "P Rebuild Completed" },
4333 { 0x0008, "P Rebuild Cancelled" },
4334 { 0x0009, "P Rebuild Failed for Unknown Reasons" },
4335 { 0x000A, "P Rebuild Failed due to New Physical Device" },
4336 { 0x000B, "P Rebuild Failed due to Logical Drive Failure" },
4337 { 0x000C, "S Offline" },
4338 { 0x000D, "P Found" },
4339 { 0x000E, "P Removed" },
4340 { 0x000F, "P Unconfigured" },
4341 { 0x0010, "P Expand Capacity Started" },
4342 { 0x0011, "P Expand Capacity Completed" },
4343 { 0x0012, "P Expand Capacity Failed" },
4344 { 0x0013, "P Command Timed Out" },
4345 { 0x0014, "P Command Aborted" },
4346 { 0x0015, "P Command Retried" },
4347 { 0x0016, "P Parity Error" },
4348 { 0x0017, "P Soft Error" },
4349 { 0x0018, "P Miscellaneous Error" },
4350 { 0x0019, "P Reset" },
4351 { 0x001A, "P Active Spare Found" },
4352 { 0x001B, "P Warm Spare Found" },
4353 { 0x001C, "S Sense Data Received" },
4354 { 0x001D, "P Initialization Started" },
4355 { 0x001E, "P Initialization Completed" },
4356 { 0x001F, "P Initialization Failed" },
4357 { 0x0020, "P Initialization Cancelled" },
4358 { 0x0021, "P Failed because Write Recovery Failed" },
4359 { 0x0022, "P Failed because SCSI Bus Reset Failed" },
4360 { 0x0023, "P Failed because of Double Check Condition" },
4361 { 0x0024, "P Failed because Device Cannot Be Accessed" },
4362 { 0x0025, "P Failed because of Gross Error on SCSI Processor" },
4363 { 0x0026, "P Failed because of Bad Tag from Device" },
4364 { 0x0027, "P Failed because of Command Timeout" },
4365 { 0x0028, "P Failed because of System Reset" },
4366 { 0x0029, "P Failed because of Busy Status or Parity Error" },
4367 { 0x002A, "P Failed because Host Set Device to Failed State" },
4368 { 0x002B, "P Failed because of Selection Timeout" },
4369 { 0x002C, "P Failed because of SCSI Bus Phase Error" },
4370 { 0x002D, "P Failed because Device Returned Unknown Status" },
4371 { 0x002E, "P Failed because Device Not Ready" },
4372 { 0x002F, "P Failed because Device Not Found at Startup" },
4373 { 0x0030, "P Failed because COD Write Operation Failed" },
4374 { 0x0031, "P Failed because BDT Write Operation Failed" },
4375 { 0x0039, "P Missing at Startup" },
4376 { 0x003A, "P Start Rebuild Failed due to Physical Drive Too Small" },
4377 { 0x003C, "P Temporarily Offline Device Automatically Made Online" },
4378 { 0x003D, "P Standby Rebuild Started" },
4379 /* Logical Device Events (0x0080 - 0x00FF) */
4380 { 0x0080, "M Consistency Check Started" },
4381 { 0x0081, "M Consistency Check Completed" },
4382 { 0x0082, "M Consistency Check Cancelled" },
4383 { 0x0083, "M Consistency Check Completed With Errors" },
4384 { 0x0084, "M Consistency Check Failed due to Logical Drive Failure" },
4385 { 0x0085, "M Consistency Check Failed due to Physical Device Failure" },
4386 { 0x0086, "L Offline" },
4387 { 0x0087, "L Critical" },
4388 { 0x0088, "L Online" },
4389 { 0x0089, "M Automatic Rebuild Started" },
4390 { 0x008A, "M Manual Rebuild Started" },
4391 { 0x008B, "M Rebuild Completed" },
4392 { 0x008C, "M Rebuild Cancelled" },
4393 { 0x008D, "M Rebuild Failed for Unknown Reasons" },
4394 { 0x008E, "M Rebuild Failed due to New Physical Device" },
4395 { 0x008F, "M Rebuild Failed due to Logical Drive Failure" },
4396 { 0x0090, "M Initialization Started" },
4397 { 0x0091, "M Initialization Completed" },
4398 { 0x0092, "M Initialization Cancelled" },
4399 { 0x0093, "M Initialization Failed" },
4400 { 0x0094, "L Found" },
4401 { 0x0095, "L Deleted" },
4402 { 0x0096, "M Expand Capacity Started" },
4403 { 0x0097, "M Expand Capacity Completed" },
4404 { 0x0098, "M Expand Capacity Failed" },
4405 { 0x0099, "L Bad Block Found" },
4406 { 0x009A, "L Size Changed" },
4407 { 0x009B, "L Type Changed" },
4408 { 0x009C, "L Bad Data Block Found" },
4409 { 0x009E, "L Read of Data Block in BDT" },
4410 { 0x009F, "L Write Back Data for Disk Block Lost" },
4411 { 0x00A0, "L Temporarily Offline RAID-5/3 Drive Made Online" },
4412 { 0x00A1, "L Temporarily Offline RAID-6/1/0/7 Drive Made Online" },
4413 { 0x00A2, "L Standby Rebuild Started" },
4414 /* Fault Management Events (0x0100 - 0x017F) */
4415 { 0x0140, "E Fan %d Failed" },
4416 { 0x0141, "E Fan %d OK" },
4417 { 0x0142, "E Fan %d Not Present" },
4418 { 0x0143, "E Power Supply %d Failed" },
4419 { 0x0144, "E Power Supply %d OK" },
4420 { 0x0145, "E Power Supply %d Not Present" },
4421 { 0x0146, "E Temperature Sensor %d Temperature Exceeds Safe Limit" },
4422 { 0x0147, "E Temperature Sensor %d Temperature Exceeds Working Limit" },
4423 { 0x0148, "E Temperature Sensor %d Temperature Normal" },
4424 { 0x0149, "E Temperature Sensor %d Not Present" },
4425 { 0x014A, "E Enclosure Management Unit %d Access Critical" },
4426 { 0x014B, "E Enclosure Management Unit %d Access OK" },
4427 { 0x014C, "E Enclosure Management Unit %d Access Offline" },
4428 /* Controller Events (0x0180 - 0x01FF) */
4429 { 0x0181, "C Cache Write Back Error" },
4430 { 0x0188, "C Battery Backup Unit Found" },
4431 { 0x0189, "C Battery Backup Unit Charge Level Low" },
4432 { 0x018A, "C Battery Backup Unit Charge Level OK" },
4433 { 0x0193, "C Installation Aborted" },
4434 { 0x0195, "C Battery Backup Unit Physically Removed" },
4435 { 0x0196, "C Memory Error During Warm Boot" },
4436 { 0x019E, "C Memory Soft ECC Error Corrected" },
4437 { 0x019F, "C Memory Hard ECC Error Corrected" },
4438 { 0x01A2, "C Battery Backup Unit Failed" },
4439 { 0x01AB, "C Mirror Race Recovery Failed" },
4440 { 0x01AC, "C Mirror Race on Critical Drive" },
4441 /* Controller Internal Processor Events */
4442 { 0x0380, "C Internal Controller Hung" },
4443 { 0x0381, "C Internal Controller Firmware Breakpoint" },
4444 { 0x0390, "C Internal Controller i960 Processor Specific Error" },
4445 { 0x03A0, "C Internal Controller StrongARM Processor Specific Error" },
4446 { 0, "" } };
4447 int EventListIndex = 0, EventCode;
4448 unsigned char EventType, *EventMessage;
4449 if (Event->EventCode == 0x1C &&
4450 RequestSense->SenseKey == DAC960_SenseKey_VendorSpecific &&
4451 (RequestSense->AdditionalSenseCode == 0x80 ||
4452 RequestSense->AdditionalSenseCode == 0x81))
4453 Event->EventCode = ((RequestSense->AdditionalSenseCode - 0x80) << 8) |
4454 RequestSense->AdditionalSenseCodeQualifier;
4455 while (true)
4456 {
4457 EventCode = EventList[EventListIndex].EventCode;
4458 if (EventCode == Event->EventCode || EventCode == 0) break;
4459 EventListIndex++;
4460 }
4461 EventType = EventList[EventListIndex].EventMessage[0];
4462 EventMessage = &EventList[EventListIndex].EventMessage[2];
4463 if (EventCode == 0)
4464 {
4465 DAC960_Critical("Unknown Controller Event Code %04X\n",
4466 Controller, Event->EventCode);
4467 return;
4468 }
4469 switch (EventType)
4470 {
4471 case 'P':
4472 DAC960_Critical("Physical Device %d:%d %s\n", Controller,
4473 Event->Channel, Event->TargetID, EventMessage);
4474 break;
4475 case 'L':
4476 DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) %s\n", Controller,
4477 Event->LogicalUnit, Controller->ControllerNumber,
4478 Event->LogicalUnit, EventMessage);
4479 break;
4480 case 'M':
4481 DAC960_Progress("Logical Drive %d (/dev/rd/c%dd%d) %s\n", Controller,
4482 Event->LogicalUnit, Controller->ControllerNumber,
4483 Event->LogicalUnit, EventMessage);
4484 break;
4485 case 'S':
4486 if (RequestSense->SenseKey == DAC960_SenseKey_NoSense ||
4487 (RequestSense->SenseKey == DAC960_SenseKey_NotReady &&
4488 RequestSense->AdditionalSenseCode == 0x04 &&
4489 (RequestSense->AdditionalSenseCodeQualifier == 0x01 ||
4490 RequestSense->AdditionalSenseCodeQualifier == 0x02)))
4491 break;
4492 DAC960_Critical("Physical Device %d:%d %s\n", Controller,
4493 Event->Channel, Event->TargetID, EventMessage);
4494 DAC960_Critical("Physical Device %d:%d Request Sense: "
4495 "Sense Key = %X, ASC = %02X, ASCQ = %02X\n",
4496 Controller,
4497 Event->Channel,
4498 Event->TargetID,
4499 RequestSense->SenseKey,
4500 RequestSense->AdditionalSenseCode,
4501 RequestSense->AdditionalSenseCodeQualifier);
4502 DAC960_Critical("Physical Device %d:%d Request Sense: "
4503 "Information = %02X%02X%02X%02X "
4504 "%02X%02X%02X%02X\n",
4505 Controller,
4506 Event->Channel,
4507 Event->TargetID,
4508 RequestSense->Information[0],
4509 RequestSense->Information[1],
4510 RequestSense->Information[2],
4511 RequestSense->Information[3],
4512 RequestSense->CommandSpecificInformation[0],
4513 RequestSense->CommandSpecificInformation[1],
4514 RequestSense->CommandSpecificInformation[2],
4515 RequestSense->CommandSpecificInformation[3]);
4516 break;
4517 case 'E':
4518 if (Controller->SuppressEnclosureMessages) break;
4519 sprintf(MessageBuffer, EventMessage, Event->LogicalUnit);
4520 DAC960_Critical("Enclosure %d %s\n", Controller,
4521 Event->TargetID, MessageBuffer);
4522 break;
4523 case 'C':
4524 DAC960_Critical("Controller %s\n", Controller, EventMessage);
4525 break;
4526 default:
4527 DAC960_Critical("Unknown Controller Event Code %04X\n",
4528 Controller, Event->EventCode);
4529 break;
4530 }
4531}
4532
4533
4534/*
4535 DAC960_V2_ReportProgress prints an appropriate progress message for
4536 Logical Device Long Operations.
4537*/
4538
4539static void DAC960_V2_ReportProgress(DAC960_Controller_T *Controller,
4540 unsigned char *MessageString,
4541 unsigned int LogicalDeviceNumber,
4542 unsigned long BlocksCompleted,
4543 unsigned long LogicalDeviceSize)
4544{
4545 Controller->EphemeralProgressMessage = true;
4546 DAC960_Progress("%s in Progress: Logical Drive %d (/dev/rd/c%dd%d) "
4547 "%d%% completed\n", Controller,
4548 MessageString,
4549 LogicalDeviceNumber,
4550 Controller->ControllerNumber,
4551 LogicalDeviceNumber,
4552 (100 * (BlocksCompleted >> 7)) / (LogicalDeviceSize >> 7));
4553 Controller->EphemeralProgressMessage = false;
4554}
4555
4556
4557/*
4558 DAC960_V2_ProcessCompletedCommand performs completion processing for Command
4559 for DAC960 V2 Firmware Controllers.
4560*/
4561
4562static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command)
4563{
4564 DAC960_Controller_T *Controller = Command->Controller;
4565 DAC960_CommandType_T CommandType = Command->CommandType;
4566 DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
4567 DAC960_V2_IOCTL_Opcode_T CommandOpcode = CommandMailbox->Common.IOCTL_Opcode;
4568 DAC960_V2_CommandStatus_T CommandStatus = Command->V2.CommandStatus;
4569
4570 if (CommandType == DAC960_ReadCommand ||
4571 CommandType == DAC960_WriteCommand)
4572 {
4573
4574#ifdef FORCE_RETRY_DEBUG
4575 CommandStatus = DAC960_V2_AbormalCompletion;
4576#endif
4577 Command->V2.RequestSense->SenseKey = DAC960_SenseKey_MediumError;
4578
4579 if (CommandStatus == DAC960_V2_NormalCompletion) {
4580
4581 if (!DAC960_ProcessCompletedRequest(Command, true))
4582 BUG();
4583
4584 } else if (Command->V2.RequestSense->SenseKey == DAC960_SenseKey_MediumError)
4585 {
4586 /*
4587 * break the command down into pieces and resubmit each
4588 * piece, hoping that some of them will succeed.
4589 */
4590 DAC960_queue_partial_rw(Command);
4591 return;
4592 }
4593 else
4594 {
4595 if (Command->V2.RequestSense->SenseKey != DAC960_SenseKey_NotReady)
4596 DAC960_V2_ReadWriteError(Command);
4597 /*
4598 Perform completion processing for all buffers in this I/O Request.
4599 */
4600 (void)DAC960_ProcessCompletedRequest(Command, false);
4601 }
4602 }
4603 else if (CommandType == DAC960_ReadRetryCommand ||
4604 CommandType == DAC960_WriteRetryCommand)
4605 {
4606 boolean normal_completion;
4607
4608#ifdef FORCE_RETRY_FAILURE_DEBUG
4609 static int retry_count = 1;
4610#endif
4611 /*
4612 Perform completion processing for the portion that was
4613 retried, and submit the next portion, if any.
4614 */
4615 normal_completion = true;
4616 if (CommandStatus != DAC960_V2_NormalCompletion) {
4617 normal_completion = false;
4618 if (Command->V2.RequestSense->SenseKey != DAC960_SenseKey_NotReady)
4619 DAC960_V2_ReadWriteError(Command);
4620 }
4621
4622#ifdef FORCE_RETRY_FAILURE_DEBUG
4623 if (!(++retry_count % 10000)) {
4624 printk("V2 error retry failure test\n");
4625 normal_completion = false;
4626 DAC960_V2_ReadWriteError(Command);
4627 }
4628#endif
4629
4630 if (!DAC960_ProcessCompletedRequest(Command, normal_completion)) {
4631 DAC960_queue_partial_rw(Command);
4632 return;
4633 }
4634 }
4635 else if (CommandType == DAC960_MonitoringCommand)
4636 {
4637 if (Controller->ShutdownMonitoringTimer)
4638 return;
4639 if (CommandOpcode == DAC960_V2_GetControllerInfo)
4640 {
4641 DAC960_V2_ControllerInfo_T *NewControllerInfo =
4642 Controller->V2.NewControllerInformation;
4643 DAC960_V2_ControllerInfo_T *ControllerInfo =
4644 &Controller->V2.ControllerInformation;
4645 Controller->LogicalDriveCount =
4646 NewControllerInfo->LogicalDevicesPresent;
4647 Controller->V2.NeedLogicalDeviceInformation = true;
4648 Controller->V2.NeedPhysicalDeviceInformation = true;
4649 Controller->V2.StartLogicalDeviceInformationScan = true;
4650 Controller->V2.StartPhysicalDeviceInformationScan = true;
4651 Controller->MonitoringAlertMode =
4652 (NewControllerInfo->LogicalDevicesCritical > 0 ||
4653 NewControllerInfo->LogicalDevicesOffline > 0 ||
4654 NewControllerInfo->PhysicalDisksCritical > 0 ||
4655 NewControllerInfo->PhysicalDisksOffline > 0);
4656 memcpy(ControllerInfo, NewControllerInfo,
4657 sizeof(DAC960_V2_ControllerInfo_T));
4658 }
4659 else if (CommandOpcode == DAC960_V2_GetEvent)
4660 {
4661 if (CommandStatus == DAC960_V2_NormalCompletion) {
4662 DAC960_V2_ReportEvent(Controller, Controller->V2.Event);
4663 }
4664 Controller->V2.NextEventSequenceNumber++;
4665 }
4666 else if (CommandOpcode == DAC960_V2_GetPhysicalDeviceInfoValid &&
4667 CommandStatus == DAC960_V2_NormalCompletion)
4668 {
4669 DAC960_V2_PhysicalDeviceInfo_T *NewPhysicalDeviceInfo =
4670 Controller->V2.NewPhysicalDeviceInformation;
4671 unsigned int PhysicalDeviceIndex = Controller->V2.PhysicalDeviceIndex;
4672 DAC960_V2_PhysicalDeviceInfo_T *PhysicalDeviceInfo =
4673 Controller->V2.PhysicalDeviceInformation[PhysicalDeviceIndex];
4674 DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
4675 Controller->V2.InquiryUnitSerialNumber[PhysicalDeviceIndex];
4676 unsigned int DeviceIndex;
4677 while (PhysicalDeviceInfo != NULL &&
4678 (NewPhysicalDeviceInfo->Channel >
4679 PhysicalDeviceInfo->Channel ||
4680 (NewPhysicalDeviceInfo->Channel ==
4681 PhysicalDeviceInfo->Channel &&
4682 (NewPhysicalDeviceInfo->TargetID >
4683 PhysicalDeviceInfo->TargetID ||
4684 (NewPhysicalDeviceInfo->TargetID ==
4685 PhysicalDeviceInfo->TargetID &&
4686 NewPhysicalDeviceInfo->LogicalUnit >
4687 PhysicalDeviceInfo->LogicalUnit)))))
4688 {
4689 DAC960_Critical("Physical Device %d:%d No Longer Exists\n",
4690 Controller,
4691 PhysicalDeviceInfo->Channel,
4692 PhysicalDeviceInfo->TargetID);
4693 Controller->V2.PhysicalDeviceInformation
4694 [PhysicalDeviceIndex] = NULL;
4695 Controller->V2.InquiryUnitSerialNumber
4696 [PhysicalDeviceIndex] = NULL;
4697 kfree(PhysicalDeviceInfo);
4698 kfree(InquiryUnitSerialNumber);
4699 for (DeviceIndex = PhysicalDeviceIndex;
4700 DeviceIndex < DAC960_V2_MaxPhysicalDevices - 1;
4701 DeviceIndex++)
4702 {
4703 Controller->V2.PhysicalDeviceInformation[DeviceIndex] =
4704 Controller->V2.PhysicalDeviceInformation[DeviceIndex+1];
4705 Controller->V2.InquiryUnitSerialNumber[DeviceIndex] =
4706 Controller->V2.InquiryUnitSerialNumber[DeviceIndex+1];
4707 }
4708 Controller->V2.PhysicalDeviceInformation
4709 [DAC960_V2_MaxPhysicalDevices-1] = NULL;
4710 Controller->V2.InquiryUnitSerialNumber
4711 [DAC960_V2_MaxPhysicalDevices-1] = NULL;
4712 PhysicalDeviceInfo =
4713 Controller->V2.PhysicalDeviceInformation[PhysicalDeviceIndex];
4714 InquiryUnitSerialNumber =
4715 Controller->V2.InquiryUnitSerialNumber[PhysicalDeviceIndex];
4716 }
4717 if (PhysicalDeviceInfo == NULL ||
4718 (NewPhysicalDeviceInfo->Channel !=
4719 PhysicalDeviceInfo->Channel) ||
4720 (NewPhysicalDeviceInfo->TargetID !=
4721 PhysicalDeviceInfo->TargetID) ||
4722 (NewPhysicalDeviceInfo->LogicalUnit !=
4723 PhysicalDeviceInfo->LogicalUnit))
4724 {
4725 PhysicalDeviceInfo = (DAC960_V2_PhysicalDeviceInfo_T *)
4726 kmalloc(sizeof(DAC960_V2_PhysicalDeviceInfo_T), GFP_ATOMIC);
4727 InquiryUnitSerialNumber =
4728 (DAC960_SCSI_Inquiry_UnitSerialNumber_T *)
4729 kmalloc(sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T),
4730 GFP_ATOMIC);
4731 if (InquiryUnitSerialNumber == NULL &&
4732 PhysicalDeviceInfo != NULL)
4733 {
4734 kfree(PhysicalDeviceInfo);
4735 PhysicalDeviceInfo = NULL;
4736 }
4737 DAC960_Critical("Physical Device %d:%d Now Exists%s\n",
4738 Controller,
4739 NewPhysicalDeviceInfo->Channel,
4740 NewPhysicalDeviceInfo->TargetID,
4741 (PhysicalDeviceInfo != NULL
4742 ? "" : " - Allocation Failed"));
4743 if (PhysicalDeviceInfo != NULL)
4744 {
4745 memset(PhysicalDeviceInfo, 0,
4746 sizeof(DAC960_V2_PhysicalDeviceInfo_T));
4747 PhysicalDeviceInfo->PhysicalDeviceState =
4748 DAC960_V2_Device_InvalidState;
4749 memset(InquiryUnitSerialNumber, 0,
4750 sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
4751 InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F;
4752 for (DeviceIndex = DAC960_V2_MaxPhysicalDevices - 1;
4753 DeviceIndex > PhysicalDeviceIndex;
4754 DeviceIndex--)
4755 {
4756 Controller->V2.PhysicalDeviceInformation[DeviceIndex] =
4757 Controller->V2.PhysicalDeviceInformation[DeviceIndex-1];
4758 Controller->V2.InquiryUnitSerialNumber[DeviceIndex] =
4759 Controller->V2.InquiryUnitSerialNumber[DeviceIndex-1];
4760 }
4761 Controller->V2.PhysicalDeviceInformation
4762 [PhysicalDeviceIndex] =
4763 PhysicalDeviceInfo;
4764 Controller->V2.InquiryUnitSerialNumber
4765 [PhysicalDeviceIndex] =
4766 InquiryUnitSerialNumber;
4767 Controller->V2.NeedDeviceSerialNumberInformation = true;
4768 }
4769 }
4770 if (PhysicalDeviceInfo != NULL)
4771 {
4772 if (NewPhysicalDeviceInfo->PhysicalDeviceState !=
4773 PhysicalDeviceInfo->PhysicalDeviceState)
4774 DAC960_Critical(
4775 "Physical Device %d:%d is now %s\n", Controller,
4776 NewPhysicalDeviceInfo->Channel,
4777 NewPhysicalDeviceInfo->TargetID,
4778 (NewPhysicalDeviceInfo->PhysicalDeviceState
4779 == DAC960_V2_Device_Online
4780 ? "ONLINE"
4781 : NewPhysicalDeviceInfo->PhysicalDeviceState
4782 == DAC960_V2_Device_Rebuild
4783 ? "REBUILD"
4784 : NewPhysicalDeviceInfo->PhysicalDeviceState
4785 == DAC960_V2_Device_Missing
4786 ? "MISSING"
4787 : NewPhysicalDeviceInfo->PhysicalDeviceState
4788 == DAC960_V2_Device_Critical
4789 ? "CRITICAL"
4790 : NewPhysicalDeviceInfo->PhysicalDeviceState
4791 == DAC960_V2_Device_Dead
4792 ? "DEAD"
4793 : NewPhysicalDeviceInfo->PhysicalDeviceState
4794 == DAC960_V2_Device_SuspectedDead
4795 ? "SUSPECTED-DEAD"
4796 : NewPhysicalDeviceInfo->PhysicalDeviceState
4797 == DAC960_V2_Device_CommandedOffline
4798 ? "COMMANDED-OFFLINE"
4799 : NewPhysicalDeviceInfo->PhysicalDeviceState
4800 == DAC960_V2_Device_Standby
4801 ? "STANDBY" : "UNKNOWN"));
4802 if ((NewPhysicalDeviceInfo->ParityErrors !=
4803 PhysicalDeviceInfo->ParityErrors) ||
4804 (NewPhysicalDeviceInfo->SoftErrors !=
4805 PhysicalDeviceInfo->SoftErrors) ||
4806 (NewPhysicalDeviceInfo->HardErrors !=
4807 PhysicalDeviceInfo->HardErrors) ||
4808 (NewPhysicalDeviceInfo->MiscellaneousErrors !=
4809 PhysicalDeviceInfo->MiscellaneousErrors) ||
4810 (NewPhysicalDeviceInfo->CommandTimeouts !=
4811 PhysicalDeviceInfo->CommandTimeouts) ||
4812 (NewPhysicalDeviceInfo->Retries !=
4813 PhysicalDeviceInfo->Retries) ||
4814 (NewPhysicalDeviceInfo->Aborts !=
4815 PhysicalDeviceInfo->Aborts) ||
4816 (NewPhysicalDeviceInfo->PredictedFailuresDetected !=
4817 PhysicalDeviceInfo->PredictedFailuresDetected))
4818 {
4819 DAC960_Critical("Physical Device %d:%d Errors: "
4820 "Parity = %d, Soft = %d, "
4821 "Hard = %d, Misc = %d\n",
4822 Controller,
4823 NewPhysicalDeviceInfo->Channel,
4824 NewPhysicalDeviceInfo->TargetID,
4825 NewPhysicalDeviceInfo->ParityErrors,
4826 NewPhysicalDeviceInfo->SoftErrors,
4827 NewPhysicalDeviceInfo->HardErrors,
4828 NewPhysicalDeviceInfo->MiscellaneousErrors);
4829 DAC960_Critical("Physical Device %d:%d Errors: "
4830 "Timeouts = %d, Retries = %d, "
4831 "Aborts = %d, Predicted = %d\n",
4832 Controller,
4833 NewPhysicalDeviceInfo->Channel,
4834 NewPhysicalDeviceInfo->TargetID,
4835 NewPhysicalDeviceInfo->CommandTimeouts,
4836 NewPhysicalDeviceInfo->Retries,
4837 NewPhysicalDeviceInfo->Aborts,
4838 NewPhysicalDeviceInfo
4839 ->PredictedFailuresDetected);
4840 }
4841 if ((PhysicalDeviceInfo->PhysicalDeviceState
4842 == DAC960_V2_Device_Dead ||
4843 PhysicalDeviceInfo->PhysicalDeviceState
4844 == DAC960_V2_Device_InvalidState) &&
4845 NewPhysicalDeviceInfo->PhysicalDeviceState
4846 != DAC960_V2_Device_Dead)
4847 Controller->V2.NeedDeviceSerialNumberInformation = true;
4848 memcpy(PhysicalDeviceInfo, NewPhysicalDeviceInfo,
4849 sizeof(DAC960_V2_PhysicalDeviceInfo_T));
4850 }
4851 NewPhysicalDeviceInfo->LogicalUnit++;
4852 Controller->V2.PhysicalDeviceIndex++;
4853 }
4854 else if (CommandOpcode == DAC960_V2_GetPhysicalDeviceInfoValid)
4855 {
4856 unsigned int DeviceIndex;
4857 for (DeviceIndex = Controller->V2.PhysicalDeviceIndex;
4858 DeviceIndex < DAC960_V2_MaxPhysicalDevices;
4859 DeviceIndex++)
4860 {
4861 DAC960_V2_PhysicalDeviceInfo_T *PhysicalDeviceInfo =
4862 Controller->V2.PhysicalDeviceInformation[DeviceIndex];
4863 DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
4864 Controller->V2.InquiryUnitSerialNumber[DeviceIndex];
4865 if (PhysicalDeviceInfo == NULL) break;
4866 DAC960_Critical("Physical Device %d:%d No Longer Exists\n",
4867 Controller,
4868 PhysicalDeviceInfo->Channel,
4869 PhysicalDeviceInfo->TargetID);
4870 Controller->V2.PhysicalDeviceInformation[DeviceIndex] = NULL;
4871 Controller->V2.InquiryUnitSerialNumber[DeviceIndex] = NULL;
4872 kfree(PhysicalDeviceInfo);
4873 kfree(InquiryUnitSerialNumber);
4874 }
4875 Controller->V2.NeedPhysicalDeviceInformation = false;
4876 }
4877 else if (CommandOpcode == DAC960_V2_GetLogicalDeviceInfoValid &&
4878 CommandStatus == DAC960_V2_NormalCompletion)
4879 {
4880 DAC960_V2_LogicalDeviceInfo_T *NewLogicalDeviceInfo =
4881 Controller->V2.NewLogicalDeviceInformation;
4882 unsigned short LogicalDeviceNumber =
4883 NewLogicalDeviceInfo->LogicalDeviceNumber;
4884 DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo =
4885 Controller->V2.LogicalDeviceInformation[LogicalDeviceNumber];
4886 if (LogicalDeviceInfo == NULL)
4887 {
4888 DAC960_V2_PhysicalDevice_T PhysicalDevice;
4889 PhysicalDevice.Controller = 0;
4890 PhysicalDevice.Channel = NewLogicalDeviceInfo->Channel;
4891 PhysicalDevice.TargetID = NewLogicalDeviceInfo->TargetID;
4892 PhysicalDevice.LogicalUnit = NewLogicalDeviceInfo->LogicalUnit;
4893 Controller->V2.LogicalDriveToVirtualDevice[LogicalDeviceNumber] =
4894 PhysicalDevice;
4895 LogicalDeviceInfo = (DAC960_V2_LogicalDeviceInfo_T *)
4896 kmalloc(sizeof(DAC960_V2_LogicalDeviceInfo_T), GFP_ATOMIC);
4897 Controller->V2.LogicalDeviceInformation[LogicalDeviceNumber] =
4898 LogicalDeviceInfo;
4899 DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
4900 "Now Exists%s\n", Controller,
4901 LogicalDeviceNumber,
4902 Controller->ControllerNumber,
4903 LogicalDeviceNumber,
4904 (LogicalDeviceInfo != NULL
4905 ? "" : " - Allocation Failed"));
4906 if (LogicalDeviceInfo != NULL)
4907 {
4908 memset(LogicalDeviceInfo, 0,
4909 sizeof(DAC960_V2_LogicalDeviceInfo_T));
4910 DAC960_ComputeGenericDiskInfo(Controller);
4911 }
4912 }
4913 if (LogicalDeviceInfo != NULL)
4914 {
4915 unsigned long LogicalDeviceSize =
4916 NewLogicalDeviceInfo->ConfigurableDeviceSize;
4917 if (NewLogicalDeviceInfo->LogicalDeviceState !=
4918 LogicalDeviceInfo->LogicalDeviceState)
4919 DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
4920 "is now %s\n", Controller,
4921 LogicalDeviceNumber,
4922 Controller->ControllerNumber,
4923 LogicalDeviceNumber,
4924 (NewLogicalDeviceInfo->LogicalDeviceState
4925 == DAC960_V2_LogicalDevice_Online
4926 ? "ONLINE"
4927 : NewLogicalDeviceInfo->LogicalDeviceState
4928 == DAC960_V2_LogicalDevice_Critical
4929 ? "CRITICAL" : "OFFLINE"));
4930 if ((NewLogicalDeviceInfo->SoftErrors !=
4931 LogicalDeviceInfo->SoftErrors) ||
4932 (NewLogicalDeviceInfo->CommandsFailed !=
4933 LogicalDeviceInfo->CommandsFailed) ||
4934 (NewLogicalDeviceInfo->DeferredWriteErrors !=
4935 LogicalDeviceInfo->DeferredWriteErrors))
4936 DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) Errors: "
4937 "Soft = %d, Failed = %d, Deferred Write = %d\n",
4938 Controller, LogicalDeviceNumber,
4939 Controller->ControllerNumber,
4940 LogicalDeviceNumber,
4941 NewLogicalDeviceInfo->SoftErrors,
4942 NewLogicalDeviceInfo->CommandsFailed,
4943 NewLogicalDeviceInfo->DeferredWriteErrors);
4944 if (NewLogicalDeviceInfo->ConsistencyCheckInProgress)
4945 DAC960_V2_ReportProgress(Controller,
4946 "Consistency Check",
4947 LogicalDeviceNumber,
4948 NewLogicalDeviceInfo
4949 ->ConsistencyCheckBlockNumber,
4950 LogicalDeviceSize);
4951 else if (NewLogicalDeviceInfo->RebuildInProgress)
4952 DAC960_V2_ReportProgress(Controller,
4953 "Rebuild",
4954 LogicalDeviceNumber,
4955 NewLogicalDeviceInfo
4956 ->RebuildBlockNumber,
4957 LogicalDeviceSize);
4958 else if (NewLogicalDeviceInfo->BackgroundInitializationInProgress)
4959 DAC960_V2_ReportProgress(Controller,
4960 "Background Initialization",
4961 LogicalDeviceNumber,
4962 NewLogicalDeviceInfo
4963 ->BackgroundInitializationBlockNumber,
4964 LogicalDeviceSize);
4965 else if (NewLogicalDeviceInfo->ForegroundInitializationInProgress)
4966 DAC960_V2_ReportProgress(Controller,
4967 "Foreground Initialization",
4968 LogicalDeviceNumber,
4969 NewLogicalDeviceInfo
4970 ->ForegroundInitializationBlockNumber,
4971 LogicalDeviceSize);
4972 else if (NewLogicalDeviceInfo->DataMigrationInProgress)
4973 DAC960_V2_ReportProgress(Controller,
4974 "Data Migration",
4975 LogicalDeviceNumber,
4976 NewLogicalDeviceInfo
4977 ->DataMigrationBlockNumber,
4978 LogicalDeviceSize);
4979 else if (NewLogicalDeviceInfo->PatrolOperationInProgress)
4980 DAC960_V2_ReportProgress(Controller,
4981 "Patrol Operation",
4982 LogicalDeviceNumber,
4983 NewLogicalDeviceInfo
4984 ->PatrolOperationBlockNumber,
4985 LogicalDeviceSize);
4986 if (LogicalDeviceInfo->BackgroundInitializationInProgress &&
4987 !NewLogicalDeviceInfo->BackgroundInitializationInProgress)
4988 DAC960_Progress("Logical Drive %d (/dev/rd/c%dd%d) "
4989 "Background Initialization %s\n",
4990 Controller,
4991 LogicalDeviceNumber,
4992 Controller->ControllerNumber,
4993 LogicalDeviceNumber,
4994 (NewLogicalDeviceInfo->LogicalDeviceControl
4995 .LogicalDeviceInitialized
4996 ? "Completed" : "Failed"));
4997 memcpy(LogicalDeviceInfo, NewLogicalDeviceInfo,
4998 sizeof(DAC960_V2_LogicalDeviceInfo_T));
4999 }
5000 Controller->V2.LogicalDriveFoundDuringScan
5001 [LogicalDeviceNumber] = true;
5002 NewLogicalDeviceInfo->LogicalDeviceNumber++;
5003 }
5004 else if (CommandOpcode == DAC960_V2_GetLogicalDeviceInfoValid)
5005 {
5006 int LogicalDriveNumber;
5007 for (LogicalDriveNumber = 0;
5008 LogicalDriveNumber < DAC960_MaxLogicalDrives;
5009 LogicalDriveNumber++)
5010 {
5011 DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo =
5012 Controller->V2.LogicalDeviceInformation[LogicalDriveNumber];
5013 if (LogicalDeviceInfo == NULL ||
5014 Controller->V2.LogicalDriveFoundDuringScan
5015 [LogicalDriveNumber])
5016 continue;
5017 DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
5018 "No Longer Exists\n", Controller,
5019 LogicalDriveNumber,
5020 Controller->ControllerNumber,
5021 LogicalDriveNumber);
5022 Controller->V2.LogicalDeviceInformation
5023 [LogicalDriveNumber] = NULL;
5024 kfree(LogicalDeviceInfo);
5025 Controller->LogicalDriveInitiallyAccessible
5026 [LogicalDriveNumber] = false;
5027 DAC960_ComputeGenericDiskInfo(Controller);
5028 }
5029 Controller->V2.NeedLogicalDeviceInformation = false;
5030 }
5031 else if (CommandOpcode == DAC960_V2_SCSI_10_Passthru)
5032 {
5033 DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
5034 Controller->V2.InquiryUnitSerialNumber[Controller->V2.PhysicalDeviceIndex - 1];
5035
5036 if (CommandStatus != DAC960_V2_NormalCompletion) {
5037 memset(InquiryUnitSerialNumber,
5038 0, sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
5039 InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F;
5040 } else
5041 memcpy(InquiryUnitSerialNumber,
5042 Controller->V2.NewInquiryUnitSerialNumber,
5043 sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
5044
5045 Controller->V2.NeedDeviceSerialNumberInformation = false;
5046 }
5047
5048 if (Controller->V2.HealthStatusBuffer->NextEventSequenceNumber
5049 - Controller->V2.NextEventSequenceNumber > 0)
5050 {
5051 CommandMailbox->GetEvent.CommandOpcode = DAC960_V2_IOCTL;
5052 CommandMailbox->GetEvent.DataTransferSize = sizeof(DAC960_V2_Event_T);
5053 CommandMailbox->GetEvent.EventSequenceNumberHigh16 =
5054 Controller->V2.NextEventSequenceNumber >> 16;
5055 CommandMailbox->GetEvent.ControllerNumber = 0;
5056 CommandMailbox->GetEvent.IOCTL_Opcode =
5057 DAC960_V2_GetEvent;
5058 CommandMailbox->GetEvent.EventSequenceNumberLow16 =
5059 Controller->V2.NextEventSequenceNumber & 0xFFFF;
5060 CommandMailbox->GetEvent.DataTransferMemoryAddress
5061 .ScatterGatherSegments[0]
5062 .SegmentDataPointer =
5063 Controller->V2.EventDMA;
5064 CommandMailbox->GetEvent.DataTransferMemoryAddress
5065 .ScatterGatherSegments[0]
5066 .SegmentByteCount =
5067 CommandMailbox->GetEvent.DataTransferSize;
5068 DAC960_QueueCommand(Command);
5069 return;
5070 }
5071 if (Controller->V2.NeedPhysicalDeviceInformation)
5072 {
5073 if (Controller->V2.NeedDeviceSerialNumberInformation)
5074 {
5075 DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
5076 Controller->V2.NewInquiryUnitSerialNumber;
5077 InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F;
5078
5079 DAC960_V2_ConstructNewUnitSerialNumber(Controller, CommandMailbox,
5080 Controller->V2.NewPhysicalDeviceInformation->Channel,
5081 Controller->V2.NewPhysicalDeviceInformation->TargetID,
5082 Controller->V2.NewPhysicalDeviceInformation->LogicalUnit - 1);
5083
5084
5085 DAC960_QueueCommand(Command);
5086 return;
5087 }
5088 if (Controller->V2.StartPhysicalDeviceInformationScan)
5089 {
5090 Controller->V2.PhysicalDeviceIndex = 0;
5091 Controller->V2.NewPhysicalDeviceInformation->Channel = 0;
5092 Controller->V2.NewPhysicalDeviceInformation->TargetID = 0;
5093 Controller->V2.NewPhysicalDeviceInformation->LogicalUnit = 0;
5094 Controller->V2.StartPhysicalDeviceInformationScan = false;
5095 }
5096 CommandMailbox->PhysicalDeviceInfo.CommandOpcode = DAC960_V2_IOCTL;
5097 CommandMailbox->PhysicalDeviceInfo.DataTransferSize =
5098 sizeof(DAC960_V2_PhysicalDeviceInfo_T);
5099 CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.LogicalUnit =
5100 Controller->V2.NewPhysicalDeviceInformation->LogicalUnit;
5101 CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.TargetID =
5102 Controller->V2.NewPhysicalDeviceInformation->TargetID;
5103 CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.Channel =
5104 Controller->V2.NewPhysicalDeviceInformation->Channel;
5105 CommandMailbox->PhysicalDeviceInfo.IOCTL_Opcode =
5106 DAC960_V2_GetPhysicalDeviceInfoValid;
5107 CommandMailbox->PhysicalDeviceInfo.DataTransferMemoryAddress
5108 .ScatterGatherSegments[0]
5109 .SegmentDataPointer =
5110 Controller->V2.NewPhysicalDeviceInformationDMA;
5111 CommandMailbox->PhysicalDeviceInfo.DataTransferMemoryAddress
5112 .ScatterGatherSegments[0]
5113 .SegmentByteCount =
5114 CommandMailbox->PhysicalDeviceInfo.DataTransferSize;
5115 DAC960_QueueCommand(Command);
5116 return;
5117 }
5118 if (Controller->V2.NeedLogicalDeviceInformation)
5119 {
5120 if (Controller->V2.StartLogicalDeviceInformationScan)
5121 {
5122 int LogicalDriveNumber;
5123 for (LogicalDriveNumber = 0;
5124 LogicalDriveNumber < DAC960_MaxLogicalDrives;
5125 LogicalDriveNumber++)
5126 Controller->V2.LogicalDriveFoundDuringScan
5127 [LogicalDriveNumber] = false;
5128 Controller->V2.NewLogicalDeviceInformation->LogicalDeviceNumber = 0;
5129 Controller->V2.StartLogicalDeviceInformationScan = false;
5130 }
5131 CommandMailbox->LogicalDeviceInfo.CommandOpcode = DAC960_V2_IOCTL;
5132 CommandMailbox->LogicalDeviceInfo.DataTransferSize =
5133 sizeof(DAC960_V2_LogicalDeviceInfo_T);
5134 CommandMailbox->LogicalDeviceInfo.LogicalDevice.LogicalDeviceNumber =
5135 Controller->V2.NewLogicalDeviceInformation->LogicalDeviceNumber;
5136 CommandMailbox->LogicalDeviceInfo.IOCTL_Opcode =
5137 DAC960_V2_GetLogicalDeviceInfoValid;
5138 CommandMailbox->LogicalDeviceInfo.DataTransferMemoryAddress
5139 .ScatterGatherSegments[0]
5140 .SegmentDataPointer =
5141 Controller->V2.NewLogicalDeviceInformationDMA;
5142 CommandMailbox->LogicalDeviceInfo.DataTransferMemoryAddress
5143 .ScatterGatherSegments[0]
5144 .SegmentByteCount =
5145 CommandMailbox->LogicalDeviceInfo.DataTransferSize;
5146 DAC960_QueueCommand(Command);
5147 return;
5148 }
5149 Controller->MonitoringTimerCount++;
5150 Controller->MonitoringTimer.expires =
5151 jiffies + DAC960_HealthStatusMonitoringInterval;
5152 add_timer(&Controller->MonitoringTimer);
5153 }
5154 if (CommandType == DAC960_ImmediateCommand)
5155 {
5156 complete(Command->Completion);
5157 Command->Completion = NULL;
5158 return;
5159 }
5160 if (CommandType == DAC960_QueuedCommand)
5161 {
5162 DAC960_V2_KernelCommand_T *KernelCommand = Command->V2.KernelCommand;
5163 KernelCommand->CommandStatus = CommandStatus;
5164 KernelCommand->RequestSenseLength = Command->V2.RequestSenseLength;
5165 KernelCommand->DataTransferLength = Command->V2.DataTransferResidue;
5166 Command->V2.KernelCommand = NULL;
5167 DAC960_DeallocateCommand(Command);
5168 KernelCommand->CompletionFunction(KernelCommand);
5169 return;
5170 }
5171 /*
5172 Queue a Status Monitoring Command to the Controller using the just
5173 completed Command if one was deferred previously due to lack of a
5174 free Command when the Monitoring Timer Function was called.
5175 */
5176 if (Controller->MonitoringCommandDeferred)
5177 {
5178 Controller->MonitoringCommandDeferred = false;
5179 DAC960_V2_QueueMonitoringCommand(Command);
5180 return;
5181 }
5182 /*
5183 Deallocate the Command.
5184 */
5185 DAC960_DeallocateCommand(Command);
5186 /*
5187 Wake up any processes waiting on a free Command.
5188 */
5189 wake_up(&Controller->CommandWaitQueue);
5190}
5191
5192
5193/*
5194 DAC960_BA_InterruptHandler handles hardware interrupts from DAC960 BA Series
5195 Controllers.
5196*/
5197
5198static irqreturn_t DAC960_BA_InterruptHandler(int IRQ_Channel,
5199 void *DeviceIdentifier,
5200 struct pt_regs *InterruptRegisters)
5201{
5202 DAC960_Controller_T *Controller = (DAC960_Controller_T *) DeviceIdentifier;
5203 void __iomem *ControllerBaseAddress = Controller->BaseAddress;
5204 DAC960_V2_StatusMailbox_T *NextStatusMailbox;
5205 unsigned long flags;
5206
5207 spin_lock_irqsave(&Controller->queue_lock, flags);
5208 DAC960_BA_AcknowledgeInterrupt(ControllerBaseAddress);
5209 NextStatusMailbox = Controller->V2.NextStatusMailbox;
5210 while (NextStatusMailbox->Fields.CommandIdentifier > 0)
5211 {
5212 DAC960_V2_CommandIdentifier_T CommandIdentifier =
5213 NextStatusMailbox->Fields.CommandIdentifier;
5214 DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1];
5215 Command->V2.CommandStatus = NextStatusMailbox->Fields.CommandStatus;
5216 Command->V2.RequestSenseLength =
5217 NextStatusMailbox->Fields.RequestSenseLength;
5218 Command->V2.DataTransferResidue =
5219 NextStatusMailbox->Fields.DataTransferResidue;
5220 NextStatusMailbox->Words[0] = 0;
5221 if (++NextStatusMailbox > Controller->V2.LastStatusMailbox)
5222 NextStatusMailbox = Controller->V2.FirstStatusMailbox;
5223 DAC960_V2_ProcessCompletedCommand(Command);
5224 }
5225 Controller->V2.NextStatusMailbox = NextStatusMailbox;
5226 /*
5227 Attempt to remove additional I/O Requests from the Controller's
5228 I/O Request Queue and queue them to the Controller.
5229 */
5230 DAC960_ProcessRequest(Controller);
5231 spin_unlock_irqrestore(&Controller->queue_lock, flags);
5232 return IRQ_HANDLED;
5233}
5234
5235
5236/*
5237 DAC960_LP_InterruptHandler handles hardware interrupts from DAC960 LP Series
5238 Controllers.
5239*/
5240
5241static irqreturn_t DAC960_LP_InterruptHandler(int IRQ_Channel,
5242 void *DeviceIdentifier,
5243 struct pt_regs *InterruptRegisters)
5244{
5245 DAC960_Controller_T *Controller = (DAC960_Controller_T *) DeviceIdentifier;
5246 void __iomem *ControllerBaseAddress = Controller->BaseAddress;
5247 DAC960_V2_StatusMailbox_T *NextStatusMailbox;
5248 unsigned long flags;
5249
5250 spin_lock_irqsave(&Controller->queue_lock, flags);
5251 DAC960_LP_AcknowledgeInterrupt(ControllerBaseAddress);
5252 NextStatusMailbox = Controller->V2.NextStatusMailbox;
5253 while (NextStatusMailbox->Fields.CommandIdentifier > 0)
5254 {
5255 DAC960_V2_CommandIdentifier_T CommandIdentifier =
5256 NextStatusMailbox->Fields.CommandIdentifier;
5257 DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1];
5258 Command->V2.CommandStatus = NextStatusMailbox->Fields.CommandStatus;
5259 Command->V2.RequestSenseLength =
5260 NextStatusMailbox->Fields.RequestSenseLength;
5261 Command->V2.DataTransferResidue =
5262 NextStatusMailbox->Fields.DataTransferResidue;
5263 NextStatusMailbox->Words[0] = 0;
5264 if (++NextStatusMailbox > Controller->V2.LastStatusMailbox)
5265 NextStatusMailbox = Controller->V2.FirstStatusMailbox;
5266 DAC960_V2_ProcessCompletedCommand(Command);
5267 }
5268 Controller->V2.NextStatusMailbox = NextStatusMailbox;
5269 /*
5270 Attempt to remove additional I/O Requests from the Controller's
5271 I/O Request Queue and queue them to the Controller.
5272 */
5273 DAC960_ProcessRequest(Controller);
5274 spin_unlock_irqrestore(&Controller->queue_lock, flags);
5275 return IRQ_HANDLED;
5276}
5277
5278
5279/*
5280 DAC960_LA_InterruptHandler handles hardware interrupts from DAC960 LA Series
5281 Controllers.
5282*/
5283
5284static irqreturn_t DAC960_LA_InterruptHandler(int IRQ_Channel,
5285 void *DeviceIdentifier,
5286 struct pt_regs *InterruptRegisters)
5287{
5288 DAC960_Controller_T *Controller = (DAC960_Controller_T *) DeviceIdentifier;
5289 void __iomem *ControllerBaseAddress = Controller->BaseAddress;
5290 DAC960_V1_StatusMailbox_T *NextStatusMailbox;
5291 unsigned long flags;
5292
5293 spin_lock_irqsave(&Controller->queue_lock, flags);
5294 DAC960_LA_AcknowledgeInterrupt(ControllerBaseAddress);
5295 NextStatusMailbox = Controller->V1.NextStatusMailbox;
5296 while (NextStatusMailbox->Fields.Valid)
5297 {
5298 DAC960_V1_CommandIdentifier_T CommandIdentifier =
5299 NextStatusMailbox->Fields.CommandIdentifier;
5300 DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1];
5301 Command->V1.CommandStatus = NextStatusMailbox->Fields.CommandStatus;
5302 NextStatusMailbox->Word = 0;
5303 if (++NextStatusMailbox > Controller->V1.LastStatusMailbox)
5304 NextStatusMailbox = Controller->V1.FirstStatusMailbox;
5305 DAC960_V1_ProcessCompletedCommand(Command);
5306 }
5307 Controller->V1.NextStatusMailbox = NextStatusMailbox;
5308 /*
5309 Attempt to remove additional I/O Requests from the Controller's
5310 I/O Request Queue and queue them to the Controller.
5311 */
5312 DAC960_ProcessRequest(Controller);
5313 spin_unlock_irqrestore(&Controller->queue_lock, flags);
5314 return IRQ_HANDLED;
5315}
5316
5317
5318/*
5319 DAC960_PG_InterruptHandler handles hardware interrupts from DAC960 PG Series
5320 Controllers.
5321*/
5322
5323static irqreturn_t DAC960_PG_InterruptHandler(int IRQ_Channel,
5324 void *DeviceIdentifier,
5325 struct pt_regs *InterruptRegisters)
5326{
5327 DAC960_Controller_T *Controller = (DAC960_Controller_T *) DeviceIdentifier;
5328 void __iomem *ControllerBaseAddress = Controller->BaseAddress;
5329 DAC960_V1_StatusMailbox_T *NextStatusMailbox;
5330 unsigned long flags;
5331
5332 spin_lock_irqsave(&Controller->queue_lock, flags);
5333 DAC960_PG_AcknowledgeInterrupt(ControllerBaseAddress);
5334 NextStatusMailbox = Controller->V1.NextStatusMailbox;
5335 while (NextStatusMailbox->Fields.Valid)
5336 {
5337 DAC960_V1_CommandIdentifier_T CommandIdentifier =
5338 NextStatusMailbox->Fields.CommandIdentifier;
5339 DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1];
5340 Command->V1.CommandStatus = NextStatusMailbox->Fields.CommandStatus;
5341 NextStatusMailbox->Word = 0;
5342 if (++NextStatusMailbox > Controller->V1.LastStatusMailbox)
5343 NextStatusMailbox = Controller->V1.FirstStatusMailbox;
5344 DAC960_V1_ProcessCompletedCommand(Command);
5345 }
5346 Controller->V1.NextStatusMailbox = NextStatusMailbox;
5347 /*
5348 Attempt to remove additional I/O Requests from the Controller's
5349 I/O Request Queue and queue them to the Controller.
5350 */
5351 DAC960_ProcessRequest(Controller);
5352 spin_unlock_irqrestore(&Controller->queue_lock, flags);
5353 return IRQ_HANDLED;
5354}
5355
5356
5357/*
5358 DAC960_PD_InterruptHandler handles hardware interrupts from DAC960 PD Series
5359 Controllers.
5360*/
5361
5362static irqreturn_t DAC960_PD_InterruptHandler(int IRQ_Channel,
5363 void *DeviceIdentifier,
5364 struct pt_regs *InterruptRegisters)
5365{
5366 DAC960_Controller_T *Controller = (DAC960_Controller_T *) DeviceIdentifier;
5367 void __iomem *ControllerBaseAddress = Controller->BaseAddress;
5368 unsigned long flags;
5369
5370 spin_lock_irqsave(&Controller->queue_lock, flags);
5371 while (DAC960_PD_StatusAvailableP(ControllerBaseAddress))
5372 {
5373 DAC960_V1_CommandIdentifier_T CommandIdentifier =
5374 DAC960_PD_ReadStatusCommandIdentifier(ControllerBaseAddress);
5375 DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1];
5376 Command->V1.CommandStatus =
5377 DAC960_PD_ReadStatusRegister(ControllerBaseAddress);
5378 DAC960_PD_AcknowledgeInterrupt(ControllerBaseAddress);
5379 DAC960_PD_AcknowledgeStatus(ControllerBaseAddress);
5380 DAC960_V1_ProcessCompletedCommand(Command);
5381 }
5382 /*
5383 Attempt to remove additional I/O Requests from the Controller's
5384 I/O Request Queue and queue them to the Controller.
5385 */
5386 DAC960_ProcessRequest(Controller);
5387 spin_unlock_irqrestore(&Controller->queue_lock, flags);
5388 return IRQ_HANDLED;
5389}
5390
5391
5392/*
5393 DAC960_P_InterruptHandler handles hardware interrupts from DAC960 P Series
5394 Controllers.
5395
5396 Translations of DAC960_V1_Enquiry and DAC960_V1_GetDeviceState rely
5397 on the data having been placed into DAC960_Controller_T, rather than
5398 an arbitrary buffer.
5399*/
5400
5401static irqreturn_t DAC960_P_InterruptHandler(int IRQ_Channel,
5402 void *DeviceIdentifier,
5403 struct pt_regs *InterruptRegisters)
5404{
5405 DAC960_Controller_T *Controller = (DAC960_Controller_T *) DeviceIdentifier;
5406 void __iomem *ControllerBaseAddress = Controller->BaseAddress;
5407 unsigned long flags;
5408
5409 spin_lock_irqsave(&Controller->queue_lock, flags);
5410 while (DAC960_PD_StatusAvailableP(ControllerBaseAddress))
5411 {
5412 DAC960_V1_CommandIdentifier_T CommandIdentifier =
5413 DAC960_PD_ReadStatusCommandIdentifier(ControllerBaseAddress);
5414 DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1];
5415 DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
5416 DAC960_V1_CommandOpcode_T CommandOpcode =
5417 CommandMailbox->Common.CommandOpcode;
5418 Command->V1.CommandStatus =
5419 DAC960_PD_ReadStatusRegister(ControllerBaseAddress);
5420 DAC960_PD_AcknowledgeInterrupt(ControllerBaseAddress);
5421 DAC960_PD_AcknowledgeStatus(ControllerBaseAddress);
5422 switch (CommandOpcode)
5423 {
5424 case DAC960_V1_Enquiry_Old:
5425 Command->V1.CommandMailbox.Common.CommandOpcode = DAC960_V1_Enquiry;
5426 DAC960_P_To_PD_TranslateEnquiry(Controller->V1.NewEnquiry);
5427 break;
5428 case DAC960_V1_GetDeviceState_Old:
5429 Command->V1.CommandMailbox.Common.CommandOpcode =
5430 DAC960_V1_GetDeviceState;
5431 DAC960_P_To_PD_TranslateDeviceState(Controller->V1.NewDeviceState);
5432 break;
5433 case DAC960_V1_Read_Old:
5434 Command->V1.CommandMailbox.Common.CommandOpcode = DAC960_V1_Read;
5435 DAC960_P_To_PD_TranslateReadWriteCommand(CommandMailbox);
5436 break;
5437 case DAC960_V1_Write_Old:
5438 Command->V1.CommandMailbox.Common.CommandOpcode = DAC960_V1_Write;
5439 DAC960_P_To_PD_TranslateReadWriteCommand(CommandMailbox);
5440 break;
5441 case DAC960_V1_ReadWithScatterGather_Old:
5442 Command->V1.CommandMailbox.Common.CommandOpcode =
5443 DAC960_V1_ReadWithScatterGather;
5444 DAC960_P_To_PD_TranslateReadWriteCommand(CommandMailbox);
5445 break;
5446 case DAC960_V1_WriteWithScatterGather_Old:
5447 Command->V1.CommandMailbox.Common.CommandOpcode =
5448 DAC960_V1_WriteWithScatterGather;
5449 DAC960_P_To_PD_TranslateReadWriteCommand(CommandMailbox);
5450 break;
5451 default:
5452 break;
5453 }
5454 DAC960_V1_ProcessCompletedCommand(Command);
5455 }
5456 /*
5457 Attempt to remove additional I/O Requests from the Controller's
5458 I/O Request Queue and queue them to the Controller.
5459 */
5460 DAC960_ProcessRequest(Controller);
5461 spin_unlock_irqrestore(&Controller->queue_lock, flags);
5462 return IRQ_HANDLED;
5463}
5464
5465
5466/*
5467 DAC960_V1_QueueMonitoringCommand queues a Monitoring Command to DAC960 V1
5468 Firmware Controllers.
5469*/
5470
5471static void DAC960_V1_QueueMonitoringCommand(DAC960_Command_T *Command)
5472{
5473 DAC960_Controller_T *Controller = Command->Controller;
5474 DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
5475 DAC960_V1_ClearCommand(Command);
5476 Command->CommandType = DAC960_MonitoringCommand;
5477 CommandMailbox->Type3.CommandOpcode = DAC960_V1_Enquiry;
5478 CommandMailbox->Type3.BusAddress = Controller->V1.NewEnquiryDMA;
5479 DAC960_QueueCommand(Command);
5480}
5481
5482
5483/*
5484 DAC960_V2_QueueMonitoringCommand queues a Monitoring Command to DAC960 V2
5485 Firmware Controllers.
5486*/
5487
5488static void DAC960_V2_QueueMonitoringCommand(DAC960_Command_T *Command)
5489{
5490 DAC960_Controller_T *Controller = Command->Controller;
5491 DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
5492 DAC960_V2_ClearCommand(Command);
5493 Command->CommandType = DAC960_MonitoringCommand;
5494 CommandMailbox->ControllerInfo.CommandOpcode = DAC960_V2_IOCTL;
5495 CommandMailbox->ControllerInfo.CommandControlBits
5496 .DataTransferControllerToHost = true;
5497 CommandMailbox->ControllerInfo.CommandControlBits
5498 .NoAutoRequestSense = true;
5499 CommandMailbox->ControllerInfo.DataTransferSize =
5500 sizeof(DAC960_V2_ControllerInfo_T);
5501 CommandMailbox->ControllerInfo.ControllerNumber = 0;
5502 CommandMailbox->ControllerInfo.IOCTL_Opcode = DAC960_V2_GetControllerInfo;
5503 CommandMailbox->ControllerInfo.DataTransferMemoryAddress
5504 .ScatterGatherSegments[0]
5505 .SegmentDataPointer =
5506 Controller->V2.NewControllerInformationDMA;
5507 CommandMailbox->ControllerInfo.DataTransferMemoryAddress
5508 .ScatterGatherSegments[0]
5509 .SegmentByteCount =
5510 CommandMailbox->ControllerInfo.DataTransferSize;
5511 DAC960_QueueCommand(Command);
5512}
5513
5514
5515/*
5516 DAC960_MonitoringTimerFunction is the timer function for monitoring
5517 the status of DAC960 Controllers.
5518*/
5519
5520static void DAC960_MonitoringTimerFunction(unsigned long TimerData)
5521{
5522 DAC960_Controller_T *Controller = (DAC960_Controller_T *) TimerData;
5523 DAC960_Command_T *Command;
5524 unsigned long flags;
5525
5526 if (Controller->FirmwareType == DAC960_V1_Controller)
5527 {
5528 spin_lock_irqsave(&Controller->queue_lock, flags);
5529 /*
5530 Queue a Status Monitoring Command to Controller.
5531 */
5532 Command = DAC960_AllocateCommand(Controller);
5533 if (Command != NULL)
5534 DAC960_V1_QueueMonitoringCommand(Command);
5535 else Controller->MonitoringCommandDeferred = true;
5536 spin_unlock_irqrestore(&Controller->queue_lock, flags);
5537 }
5538 else
5539 {
5540 DAC960_V2_ControllerInfo_T *ControllerInfo =
5541 &Controller->V2.ControllerInformation;
5542 unsigned int StatusChangeCounter =
5543 Controller->V2.HealthStatusBuffer->StatusChangeCounter;
5544 boolean ForceMonitoringCommand = false;
5545 if (jiffies - Controller->SecondaryMonitoringTime
5546 > DAC960_SecondaryMonitoringInterval)
5547 {
5548 int LogicalDriveNumber;
5549 for (LogicalDriveNumber = 0;
5550 LogicalDriveNumber < DAC960_MaxLogicalDrives;
5551 LogicalDriveNumber++)
5552 {
5553 DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo =
5554 Controller->V2.LogicalDeviceInformation[LogicalDriveNumber];
5555 if (LogicalDeviceInfo == NULL) continue;
5556 if (!LogicalDeviceInfo->LogicalDeviceControl
5557 .LogicalDeviceInitialized)
5558 {
5559 ForceMonitoringCommand = true;
5560 break;
5561 }
5562 }
5563 Controller->SecondaryMonitoringTime = jiffies;
5564 }
5565 if (StatusChangeCounter == Controller->V2.StatusChangeCounter &&
5566 Controller->V2.HealthStatusBuffer->NextEventSequenceNumber
5567 == Controller->V2.NextEventSequenceNumber &&
5568 (ControllerInfo->BackgroundInitializationsActive +
5569 ControllerInfo->LogicalDeviceInitializationsActive +
5570 ControllerInfo->PhysicalDeviceInitializationsActive +
5571 ControllerInfo->ConsistencyChecksActive +
5572 ControllerInfo->RebuildsActive +
5573 ControllerInfo->OnlineExpansionsActive == 0 ||
5574 jiffies - Controller->PrimaryMonitoringTime
5575 < DAC960_MonitoringTimerInterval) &&
5576 !ForceMonitoringCommand)
5577 {
5578 Controller->MonitoringTimer.expires =
5579 jiffies + DAC960_HealthStatusMonitoringInterval;
5580 add_timer(&Controller->MonitoringTimer);
5581 return;
5582 }
5583 Controller->V2.StatusChangeCounter = StatusChangeCounter;
5584 Controller->PrimaryMonitoringTime = jiffies;
5585
5586 spin_lock_irqsave(&Controller->queue_lock, flags);
5587 /*
5588 Queue a Status Monitoring Command to Controller.
5589 */
5590 Command = DAC960_AllocateCommand(Controller);
5591 if (Command != NULL)
5592 DAC960_V2_QueueMonitoringCommand(Command);
5593 else Controller->MonitoringCommandDeferred = true;
5594 spin_unlock_irqrestore(&Controller->queue_lock, flags);
5595 /*
5596 Wake up any processes waiting on a Health Status Buffer change.
5597 */
5598 wake_up(&Controller->HealthStatusWaitQueue);
5599 }
5600}
5601
5602/*
5603 DAC960_CheckStatusBuffer verifies that there is room to hold ByteCount
5604 additional bytes in the Combined Status Buffer and grows the buffer if
5605 necessary. It returns true if there is enough room and false otherwise.
5606*/
5607
5608static boolean DAC960_CheckStatusBuffer(DAC960_Controller_T *Controller,
5609 unsigned int ByteCount)
5610{
5611 unsigned char *NewStatusBuffer;
5612 if (Controller->InitialStatusLength + 1 +
5613 Controller->CurrentStatusLength + ByteCount + 1 <=
5614 Controller->CombinedStatusBufferLength)
5615 return true;
5616 if (Controller->CombinedStatusBufferLength == 0)
5617 {
5618 unsigned int NewStatusBufferLength = DAC960_InitialStatusBufferSize;
5619 while (NewStatusBufferLength < ByteCount)
5620 NewStatusBufferLength *= 2;
5621 Controller->CombinedStatusBuffer =
5622 (unsigned char *) kmalloc(NewStatusBufferLength, GFP_ATOMIC);
5623 if (Controller->CombinedStatusBuffer == NULL) return false;
5624 Controller->CombinedStatusBufferLength = NewStatusBufferLength;
5625 return true;
5626 }
5627 NewStatusBuffer = (unsigned char *)
5628 kmalloc(2 * Controller->CombinedStatusBufferLength, GFP_ATOMIC);
5629 if (NewStatusBuffer == NULL)
5630 {
5631 DAC960_Warning("Unable to expand Combined Status Buffer - Truncating\n",
5632 Controller);
5633 return false;
5634 }
5635 memcpy(NewStatusBuffer, Controller->CombinedStatusBuffer,
5636 Controller->CombinedStatusBufferLength);
5637 kfree(Controller->CombinedStatusBuffer);
5638 Controller->CombinedStatusBuffer = NewStatusBuffer;
5639 Controller->CombinedStatusBufferLength *= 2;
5640 Controller->CurrentStatusBuffer =
5641 &NewStatusBuffer[Controller->InitialStatusLength + 1];
5642 return true;
5643}
5644
5645
5646/*
5647 DAC960_Message prints Driver Messages.
5648*/
5649
5650static void DAC960_Message(DAC960_MessageLevel_T MessageLevel,
5651 unsigned char *Format,
5652 DAC960_Controller_T *Controller,
5653 ...)
5654{
5655 static unsigned char Buffer[DAC960_LineBufferSize];
5656 static boolean BeginningOfLine = true;
5657 va_list Arguments;
5658 int Length = 0;
5659 va_start(Arguments, Controller);
5660 Length = vsprintf(Buffer, Format, Arguments);
5661 va_end(Arguments);
5662 if (Controller == NULL)
5663 printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel],
5664 DAC960_ControllerCount, Buffer);
5665 else if (MessageLevel == DAC960_AnnounceLevel ||
5666 MessageLevel == DAC960_InfoLevel)
5667 {
5668 if (!Controller->ControllerInitialized)
5669 {
5670 if (DAC960_CheckStatusBuffer(Controller, Length))
5671 {
5672 strcpy(&Controller->CombinedStatusBuffer
5673 [Controller->InitialStatusLength],
5674 Buffer);
5675 Controller->InitialStatusLength += Length;
5676 Controller->CurrentStatusBuffer =
5677 &Controller->CombinedStatusBuffer
5678 [Controller->InitialStatusLength + 1];
5679 }
5680 if (MessageLevel == DAC960_AnnounceLevel)
5681 {
5682 static int AnnouncementLines = 0;
5683 if (++AnnouncementLines <= 2)
5684 printk("%sDAC960: %s", DAC960_MessageLevelMap[MessageLevel],
5685 Buffer);
5686 }
5687 else
5688 {
5689 if (BeginningOfLine)
5690 {
5691 if (Buffer[0] != '\n' || Length > 1)
5692 printk("%sDAC960#%d: %s",
5693 DAC960_MessageLevelMap[MessageLevel],
5694 Controller->ControllerNumber, Buffer);
5695 }
5696 else printk("%s", Buffer);
5697 }
5698 }
5699 else if (DAC960_CheckStatusBuffer(Controller, Length))
5700 {
5701 strcpy(&Controller->CurrentStatusBuffer[
5702 Controller->CurrentStatusLength], Buffer);
5703 Controller->CurrentStatusLength += Length;
5704 }
5705 }
5706 else if (MessageLevel == DAC960_ProgressLevel)
5707 {
5708 strcpy(Controller->ProgressBuffer, Buffer);
5709 Controller->ProgressBufferLength = Length;
5710 if (Controller->EphemeralProgressMessage)
5711 {
5712 if (jiffies - Controller->LastProgressReportTime
5713 >= DAC960_ProgressReportingInterval)
5714 {
5715 printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel],
5716 Controller->ControllerNumber, Buffer);
5717 Controller->LastProgressReportTime = jiffies;
5718 }
5719 }
5720 else printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel],
5721 Controller->ControllerNumber, Buffer);
5722 }
5723 else if (MessageLevel == DAC960_UserCriticalLevel)
5724 {
5725 strcpy(&Controller->UserStatusBuffer[Controller->UserStatusLength],
5726 Buffer);
5727 Controller->UserStatusLength += Length;
5728 if (Buffer[0] != '\n' || Length > 1)
5729 printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel],
5730 Controller->ControllerNumber, Buffer);
5731 }
5732 else
5733 {
5734 if (BeginningOfLine)
5735 printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel],
5736 Controller->ControllerNumber, Buffer);
5737 else printk("%s", Buffer);
5738 }
5739 BeginningOfLine = (Buffer[Length-1] == '\n');
5740}
5741
5742
5743/*
5744 DAC960_ParsePhysicalDevice parses spaces followed by a Physical Device
5745 Channel:TargetID specification from a User Command string. It updates
5746 Channel and TargetID and returns true on success and false on failure.
5747*/
5748
5749static boolean DAC960_ParsePhysicalDevice(DAC960_Controller_T *Controller,
5750 char *UserCommandString,
5751 unsigned char *Channel,
5752 unsigned char *TargetID)
5753{
5754 char *NewUserCommandString = UserCommandString;
5755 unsigned long XChannel, XTargetID;
5756 while (*UserCommandString == ' ') UserCommandString++;
5757 if (UserCommandString == NewUserCommandString)
5758 return false;
5759 XChannel = simple_strtoul(UserCommandString, &NewUserCommandString, 10);
5760 if (NewUserCommandString == UserCommandString ||
5761 *NewUserCommandString != ':' ||
5762 XChannel >= Controller->Channels)
5763 return false;
5764 UserCommandString = ++NewUserCommandString;
5765 XTargetID = simple_strtoul(UserCommandString, &NewUserCommandString, 10);
5766 if (NewUserCommandString == UserCommandString ||
5767 *NewUserCommandString != '\0' ||
5768 XTargetID >= Controller->Targets)
5769 return false;
5770 *Channel = XChannel;
5771 *TargetID = XTargetID;
5772 return true;
5773}
5774
5775
5776/*
5777 DAC960_ParseLogicalDrive parses spaces followed by a Logical Drive Number
5778 specification from a User Command string. It updates LogicalDriveNumber and
5779 returns true on success and false on failure.
5780*/
5781
5782static boolean DAC960_ParseLogicalDrive(DAC960_Controller_T *Controller,
5783 char *UserCommandString,
5784 unsigned char *LogicalDriveNumber)
5785{
5786 char *NewUserCommandString = UserCommandString;
5787 unsigned long XLogicalDriveNumber;
5788 while (*UserCommandString == ' ') UserCommandString++;
5789 if (UserCommandString == NewUserCommandString)
5790 return false;
5791 XLogicalDriveNumber =
5792 simple_strtoul(UserCommandString, &NewUserCommandString, 10);
5793 if (NewUserCommandString == UserCommandString ||
5794 *NewUserCommandString != '\0' ||
5795 XLogicalDriveNumber > DAC960_MaxLogicalDrives - 1)
5796 return false;
5797 *LogicalDriveNumber = XLogicalDriveNumber;
5798 return true;
5799}
5800
5801
5802/*
5803 DAC960_V1_SetDeviceState sets the Device State for a Physical Device for
5804 DAC960 V1 Firmware Controllers.
5805*/
5806
5807static void DAC960_V1_SetDeviceState(DAC960_Controller_T *Controller,
5808 DAC960_Command_T *Command,
5809 unsigned char Channel,
5810 unsigned char TargetID,
5811 DAC960_V1_PhysicalDeviceState_T
5812 DeviceState,
5813 const unsigned char *DeviceStateString)
5814{
5815 DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
5816 CommandMailbox->Type3D.CommandOpcode = DAC960_V1_StartDevice;
5817 CommandMailbox->Type3D.Channel = Channel;
5818 CommandMailbox->Type3D.TargetID = TargetID;
5819 CommandMailbox->Type3D.DeviceState = DeviceState;
5820 CommandMailbox->Type3D.Modifier = 0;
5821 DAC960_ExecuteCommand(Command);
5822 switch (Command->V1.CommandStatus)
5823 {
5824 case DAC960_V1_NormalCompletion:
5825 DAC960_UserCritical("%s of Physical Device %d:%d Succeeded\n", Controller,
5826 DeviceStateString, Channel, TargetID);
5827 break;
5828 case DAC960_V1_UnableToStartDevice:
5829 DAC960_UserCritical("%s of Physical Device %d:%d Failed - "
5830 "Unable to Start Device\n", Controller,
5831 DeviceStateString, Channel, TargetID);
5832 break;
5833 case DAC960_V1_NoDeviceAtAddress:
5834 DAC960_UserCritical("%s of Physical Device %d:%d Failed - "
5835 "No Device at Address\n", Controller,
5836 DeviceStateString, Channel, TargetID);
5837 break;
5838 case DAC960_V1_InvalidChannelOrTargetOrModifier:
5839 DAC960_UserCritical("%s of Physical Device %d:%d Failed - "
5840 "Invalid Channel or Target or Modifier\n",
5841 Controller, DeviceStateString, Channel, TargetID);
5842 break;
5843 case DAC960_V1_ChannelBusy:
5844 DAC960_UserCritical("%s of Physical Device %d:%d Failed - "
5845 "Channel Busy\n", Controller,
5846 DeviceStateString, Channel, TargetID);
5847 break;
5848 default:
5849 DAC960_UserCritical("%s of Physical Device %d:%d Failed - "
5850 "Unexpected Status %04X\n", Controller,
5851 DeviceStateString, Channel, TargetID,
5852 Command->V1.CommandStatus);
5853 break;
5854 }
5855}
5856
5857
5858/*
5859 DAC960_V1_ExecuteUserCommand executes a User Command for DAC960 V1 Firmware
5860 Controllers.
5861*/
5862
5863static boolean DAC960_V1_ExecuteUserCommand(DAC960_Controller_T *Controller,
5864 unsigned char *UserCommand)
5865{
5866 DAC960_Command_T *Command;
5867 DAC960_V1_CommandMailbox_T *CommandMailbox;
5868 unsigned long flags;
5869 unsigned char Channel, TargetID, LogicalDriveNumber;
5870
5871 spin_lock_irqsave(&Controller->queue_lock, flags);
5872 while ((Command = DAC960_AllocateCommand(Controller)) == NULL)
5873 DAC960_WaitForCommand(Controller);
5874 spin_unlock_irqrestore(&Controller->queue_lock, flags);
5875 Controller->UserStatusLength = 0;
5876 DAC960_V1_ClearCommand(Command);
5877 Command->CommandType = DAC960_ImmediateCommand;
5878 CommandMailbox = &Command->V1.CommandMailbox;
5879 if (strcmp(UserCommand, "flush-cache") == 0)
5880 {
5881 CommandMailbox->Type3.CommandOpcode = DAC960_V1_Flush;
5882 DAC960_ExecuteCommand(Command);
5883 DAC960_UserCritical("Cache Flush Completed\n", Controller);
5884 }
5885 else if (strncmp(UserCommand, "kill", 4) == 0 &&
5886 DAC960_ParsePhysicalDevice(Controller, &UserCommand[4],
5887 &Channel, &TargetID))
5888 {
5889 DAC960_V1_DeviceState_T *DeviceState =
5890 &Controller->V1.DeviceState[Channel][TargetID];
5891 if (DeviceState->Present &&
5892 DeviceState->DeviceType == DAC960_V1_DiskType &&
5893 DeviceState->DeviceState != DAC960_V1_Device_Dead)
5894 DAC960_V1_SetDeviceState(Controller, Command, Channel, TargetID,
5895 DAC960_V1_Device_Dead, "Kill");
5896 else DAC960_UserCritical("Kill of Physical Device %d:%d Illegal\n",
5897 Controller, Channel, TargetID);
5898 }
5899 else if (strncmp(UserCommand, "make-online", 11) == 0 &&
5900 DAC960_ParsePhysicalDevice(Controller, &UserCommand[11],
5901 &Channel, &TargetID))
5902 {
5903 DAC960_V1_DeviceState_T *DeviceState =
5904 &Controller->V1.DeviceState[Channel][TargetID];
5905 if (DeviceState->Present &&
5906 DeviceState->DeviceType == DAC960_V1_DiskType &&
5907 DeviceState->DeviceState == DAC960_V1_Device_Dead)
5908 DAC960_V1_SetDeviceState(Controller, Command, Channel, TargetID,
5909 DAC960_V1_Device_Online, "Make Online");
5910 else DAC960_UserCritical("Make Online of Physical Device %d:%d Illegal\n",
5911 Controller, Channel, TargetID);
5912
5913 }
5914 else if (strncmp(UserCommand, "make-standby", 12) == 0 &&
5915 DAC960_ParsePhysicalDevice(Controller, &UserCommand[12],
5916 &Channel, &TargetID))
5917 {
5918 DAC960_V1_DeviceState_T *DeviceState =
5919 &Controller->V1.DeviceState[Channel][TargetID];
5920 if (DeviceState->Present &&
5921 DeviceState->DeviceType == DAC960_V1_DiskType &&
5922 DeviceState->DeviceState == DAC960_V1_Device_Dead)
5923 DAC960_V1_SetDeviceState(Controller, Command, Channel, TargetID,
5924 DAC960_V1_Device_Standby, "Make Standby");
5925 else DAC960_UserCritical("Make Standby of Physical "
5926 "Device %d:%d Illegal\n",
5927 Controller, Channel, TargetID);
5928 }
5929 else if (strncmp(UserCommand, "rebuild", 7) == 0 &&
5930 DAC960_ParsePhysicalDevice(Controller, &UserCommand[7],
5931 &Channel, &TargetID))
5932 {
5933 CommandMailbox->Type3D.CommandOpcode = DAC960_V1_RebuildAsync;
5934 CommandMailbox->Type3D.Channel = Channel;
5935 CommandMailbox->Type3D.TargetID = TargetID;
5936 DAC960_ExecuteCommand(Command);
5937 switch (Command->V1.CommandStatus)
5938 {
5939 case DAC960_V1_NormalCompletion:
5940 DAC960_UserCritical("Rebuild of Physical Device %d:%d Initiated\n",
5941 Controller, Channel, TargetID);
5942 break;
5943 case DAC960_V1_AttemptToRebuildOnlineDrive:
5944 DAC960_UserCritical("Rebuild of Physical Device %d:%d Failed - "
5945 "Attempt to Rebuild Online or "
5946 "Unresponsive Drive\n",
5947 Controller, Channel, TargetID);
5948 break;
5949 case DAC960_V1_NewDiskFailedDuringRebuild:
5950 DAC960_UserCritical("Rebuild of Physical Device %d:%d Failed - "
5951 "New Disk Failed During Rebuild\n",
5952 Controller, Channel, TargetID);
5953 break;
5954 case DAC960_V1_InvalidDeviceAddress:
5955 DAC960_UserCritical("Rebuild of Physical Device %d:%d Failed - "
5956 "Invalid Device Address\n",
5957 Controller, Channel, TargetID);
5958 break;
5959 case DAC960_V1_RebuildOrCheckAlreadyInProgress:
5960 DAC960_UserCritical("Rebuild of Physical Device %d:%d Failed - "
5961 "Rebuild or Consistency Check Already "
5962 "in Progress\n", Controller, Channel, TargetID);
5963 break;
5964 default:
5965 DAC960_UserCritical("Rebuild of Physical Device %d:%d Failed - "
5966 "Unexpected Status %04X\n", Controller,
5967 Channel, TargetID, Command->V1.CommandStatus);
5968 break;
5969 }
5970 }
5971 else if (strncmp(UserCommand, "check-consistency", 17) == 0 &&
5972 DAC960_ParseLogicalDrive(Controller, &UserCommand[17],
5973 &LogicalDriveNumber))
5974 {
5975 CommandMailbox->Type3C.CommandOpcode = DAC960_V1_CheckConsistencyAsync;
5976 CommandMailbox->Type3C.LogicalDriveNumber = LogicalDriveNumber;
5977 CommandMailbox->Type3C.AutoRestore = true;
5978 DAC960_ExecuteCommand(Command);
5979 switch (Command->V1.CommandStatus)
5980 {
5981 case DAC960_V1_NormalCompletion:
5982 DAC960_UserCritical("Consistency Check of Logical Drive %d "
5983 "(/dev/rd/c%dd%d) Initiated\n",
5984 Controller, LogicalDriveNumber,
5985 Controller->ControllerNumber,
5986 LogicalDriveNumber);
5987 break;
5988 case DAC960_V1_DependentDiskIsDead:
5989 DAC960_UserCritical("Consistency Check of Logical Drive %d "
5990 "(/dev/rd/c%dd%d) Failed - "
5991 "Dependent Physical Device is DEAD\n",
5992 Controller, LogicalDriveNumber,
5993 Controller->ControllerNumber,
5994 LogicalDriveNumber);
5995 break;
5996 case DAC960_V1_InvalidOrNonredundantLogicalDrive:
5997 DAC960_UserCritical("Consistency Check of Logical Drive %d "
5998 "(/dev/rd/c%dd%d) Failed - "
5999 "Invalid or Nonredundant Logical Drive\n",
6000 Controller, LogicalDriveNumber,
6001 Controller->ControllerNumber,
6002 LogicalDriveNumber);
6003 break;
6004 case DAC960_V1_RebuildOrCheckAlreadyInProgress:
6005 DAC960_UserCritical("Consistency Check of Logical Drive %d "
6006 "(/dev/rd/c%dd%d) Failed - Rebuild or "
6007 "Consistency Check Already in Progress\n",
6008 Controller, LogicalDriveNumber,
6009 Controller->ControllerNumber,
6010 LogicalDriveNumber);
6011 break;
6012 default:
6013 DAC960_UserCritical("Consistency Check of Logical Drive %d "
6014 "(/dev/rd/c%dd%d) Failed - "
6015 "Unexpected Status %04X\n",
6016 Controller, LogicalDriveNumber,
6017 Controller->ControllerNumber,
6018 LogicalDriveNumber, Command->V1.CommandStatus);
6019 break;
6020 }
6021 }
6022 else if (strcmp(UserCommand, "cancel-rebuild") == 0 ||
6023 strcmp(UserCommand, "cancel-consistency-check") == 0)
6024 {
6025 /*
6026 the OldRebuildRateConstant is never actually used
6027 once its value is retrieved from the controller.
6028 */
6029 unsigned char *OldRebuildRateConstant;
6030 dma_addr_t OldRebuildRateConstantDMA;
6031
6032 OldRebuildRateConstant = pci_alloc_consistent( Controller->PCIDevice,
6033 sizeof(char), &OldRebuildRateConstantDMA);
6034 if (OldRebuildRateConstant == NULL) {
6035 DAC960_UserCritical("Cancellation of Rebuild or "
6036 "Consistency Check Failed - "
6037 "Out of Memory",
6038 Controller);
6039 goto failure;
6040 }
6041 CommandMailbox->Type3R.CommandOpcode = DAC960_V1_RebuildControl;
6042 CommandMailbox->Type3R.RebuildRateConstant = 0xFF;
6043 CommandMailbox->Type3R.BusAddress = OldRebuildRateConstantDMA;
6044 DAC960_ExecuteCommand(Command);
6045 switch (Command->V1.CommandStatus)
6046 {
6047 case DAC960_V1_NormalCompletion:
6048 DAC960_UserCritical("Rebuild or Consistency Check Cancelled\n",
6049 Controller);
6050 break;
6051 default:
6052 DAC960_UserCritical("Cancellation of Rebuild or "
6053 "Consistency Check Failed - "
6054 "Unexpected Status %04X\n",
6055 Controller, Command->V1.CommandStatus);
6056 break;
6057 }
6058failure:
6059 pci_free_consistent(Controller->PCIDevice, sizeof(char),
6060 OldRebuildRateConstant, OldRebuildRateConstantDMA);
6061 }
6062 else DAC960_UserCritical("Illegal User Command: '%s'\n",
6063 Controller, UserCommand);
6064
6065 spin_lock_irqsave(&Controller->queue_lock, flags);
6066 DAC960_DeallocateCommand(Command);
6067 spin_unlock_irqrestore(&Controller->queue_lock, flags);
6068 return true;
6069}
6070
6071
6072/*
6073 DAC960_V2_TranslatePhysicalDevice translates a Physical Device Channel and
6074 TargetID into a Logical Device. It returns true on success and false
6075 on failure.
6076*/
6077
6078static boolean DAC960_V2_TranslatePhysicalDevice(DAC960_Command_T *Command,
6079 unsigned char Channel,
6080 unsigned char TargetID,
6081 unsigned short
6082 *LogicalDeviceNumber)
6083{
6084 DAC960_V2_CommandMailbox_T SavedCommandMailbox, *CommandMailbox;
6085 DAC960_Controller_T *Controller = Command->Controller;
6086
6087 CommandMailbox = &Command->V2.CommandMailbox;
6088 memcpy(&SavedCommandMailbox, CommandMailbox,
6089 sizeof(DAC960_V2_CommandMailbox_T));
6090
6091 CommandMailbox->PhysicalDeviceInfo.CommandOpcode = DAC960_V2_IOCTL;
6092 CommandMailbox->PhysicalDeviceInfo.CommandControlBits
6093 .DataTransferControllerToHost = true;
6094 CommandMailbox->PhysicalDeviceInfo.CommandControlBits
6095 .NoAutoRequestSense = true;
6096 CommandMailbox->PhysicalDeviceInfo.DataTransferSize =
6097 sizeof(DAC960_V2_PhysicalToLogicalDevice_T);
6098 CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.TargetID = TargetID;
6099 CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.Channel = Channel;
6100 CommandMailbox->PhysicalDeviceInfo.IOCTL_Opcode =
6101 DAC960_V2_TranslatePhysicalToLogicalDevice;
6102 CommandMailbox->Common.DataTransferMemoryAddress
6103 .ScatterGatherSegments[0]
6104 .SegmentDataPointer =
6105 Controller->V2.PhysicalToLogicalDeviceDMA;
6106 CommandMailbox->Common.DataTransferMemoryAddress
6107 .ScatterGatherSegments[0]
6108 .SegmentByteCount =
6109 CommandMailbox->Common.DataTransferSize;
6110
6111 DAC960_ExecuteCommand(Command);
6112 *LogicalDeviceNumber = Controller->V2.PhysicalToLogicalDevice->LogicalDeviceNumber;
6113
6114 memcpy(CommandMailbox, &SavedCommandMailbox,
6115 sizeof(DAC960_V2_CommandMailbox_T));
6116 return (Command->V2.CommandStatus == DAC960_V2_NormalCompletion);
6117}
6118
6119
6120/*
6121 DAC960_V2_ExecuteUserCommand executes a User Command for DAC960 V2 Firmware
6122 Controllers.
6123*/
6124
6125static boolean DAC960_V2_ExecuteUserCommand(DAC960_Controller_T *Controller,
6126 unsigned char *UserCommand)
6127{
6128 DAC960_Command_T *Command;
6129 DAC960_V2_CommandMailbox_T *CommandMailbox;
6130 unsigned long flags;
6131 unsigned char Channel, TargetID, LogicalDriveNumber;
6132 unsigned short LogicalDeviceNumber;
6133
6134 spin_lock_irqsave(&Controller->queue_lock, flags);
6135 while ((Command = DAC960_AllocateCommand(Controller)) == NULL)
6136 DAC960_WaitForCommand(Controller);
6137 spin_unlock_irqrestore(&Controller->queue_lock, flags);
6138 Controller->UserStatusLength = 0;
6139 DAC960_V2_ClearCommand(Command);
6140 Command->CommandType = DAC960_ImmediateCommand;
6141 CommandMailbox = &Command->V2.CommandMailbox;
6142 CommandMailbox->Common.CommandOpcode = DAC960_V2_IOCTL;
6143 CommandMailbox->Common.CommandControlBits.DataTransferControllerToHost = true;
6144 CommandMailbox->Common.CommandControlBits.NoAutoRequestSense = true;
6145 if (strcmp(UserCommand, "flush-cache") == 0)
6146 {
6147 CommandMailbox->DeviceOperation.IOCTL_Opcode = DAC960_V2_PauseDevice;
6148 CommandMailbox->DeviceOperation.OperationDevice =
6149 DAC960_V2_RAID_Controller;
6150 DAC960_ExecuteCommand(Command);
6151 DAC960_UserCritical("Cache Flush Completed\n", Controller);
6152 }
6153 else if (strncmp(UserCommand, "kill", 4) == 0 &&
6154 DAC960_ParsePhysicalDevice(Controller, &UserCommand[4],
6155 &Channel, &TargetID) &&
6156 DAC960_V2_TranslatePhysicalDevice(Command, Channel, TargetID,
6157 &LogicalDeviceNumber))
6158 {
6159 CommandMailbox->SetDeviceState.LogicalDevice.LogicalDeviceNumber =
6160 LogicalDeviceNumber;
6161 CommandMailbox->SetDeviceState.IOCTL_Opcode =
6162 DAC960_V2_SetDeviceState;
6163 CommandMailbox->SetDeviceState.DeviceState.PhysicalDeviceState =
6164 DAC960_V2_Device_Dead;
6165 DAC960_ExecuteCommand(Command);
6166 DAC960_UserCritical("Kill of Physical Device %d:%d %s\n",
6167 Controller, Channel, TargetID,
6168 (Command->V2.CommandStatus
6169 == DAC960_V2_NormalCompletion
6170 ? "Succeeded" : "Failed"));
6171 }
6172 else if (strncmp(UserCommand, "make-online", 11) == 0 &&
6173 DAC960_ParsePhysicalDevice(Controller, &UserCommand[11],
6174 &Channel, &TargetID) &&
6175 DAC960_V2_TranslatePhysicalDevice(Command, Channel, TargetID,
6176 &LogicalDeviceNumber))
6177 {
6178 CommandMailbox->SetDeviceState.LogicalDevice.LogicalDeviceNumber =
6179 LogicalDeviceNumber;
6180 CommandMailbox->SetDeviceState.IOCTL_Opcode =
6181 DAC960_V2_SetDeviceState;
6182 CommandMailbox->SetDeviceState.DeviceState.PhysicalDeviceState =
6183 DAC960_V2_Device_Online;
6184 DAC960_ExecuteCommand(Command);
6185 DAC960_UserCritical("Make Online of Physical Device %d:%d %s\n",
6186 Controller, Channel, TargetID,
6187 (Command->V2.CommandStatus
6188 == DAC960_V2_NormalCompletion
6189 ? "Succeeded" : "Failed"));
6190 }
6191 else if (strncmp(UserCommand, "make-standby", 12) == 0 &&
6192 DAC960_ParsePhysicalDevice(Controller, &UserCommand[12],
6193 &Channel, &TargetID) &&
6194 DAC960_V2_TranslatePhysicalDevice(Command, Channel, TargetID,
6195 &LogicalDeviceNumber))
6196 {
6197 CommandMailbox->SetDeviceState.LogicalDevice.LogicalDeviceNumber =
6198 LogicalDeviceNumber;
6199 CommandMailbox->SetDeviceState.IOCTL_Opcode =
6200 DAC960_V2_SetDeviceState;
6201 CommandMailbox->SetDeviceState.DeviceState.PhysicalDeviceState =
6202 DAC960_V2_Device_Standby;
6203 DAC960_ExecuteCommand(Command);
6204 DAC960_UserCritical("Make Standby of Physical Device %d:%d %s\n",
6205 Controller, Channel, TargetID,
6206 (Command->V2.CommandStatus
6207 == DAC960_V2_NormalCompletion
6208 ? "Succeeded" : "Failed"));
6209 }
6210 else if (strncmp(UserCommand, "rebuild", 7) == 0 &&
6211 DAC960_ParsePhysicalDevice(Controller, &UserCommand[7],
6212 &Channel, &TargetID) &&
6213 DAC960_V2_TranslatePhysicalDevice(Command, Channel, TargetID,
6214 &LogicalDeviceNumber))
6215 {
6216 CommandMailbox->LogicalDeviceInfo.LogicalDevice.LogicalDeviceNumber =
6217 LogicalDeviceNumber;
6218 CommandMailbox->LogicalDeviceInfo.IOCTL_Opcode =
6219 DAC960_V2_RebuildDeviceStart;
6220 DAC960_ExecuteCommand(Command);
6221 DAC960_UserCritical("Rebuild of Physical Device %d:%d %s\n",
6222 Controller, Channel, TargetID,
6223 (Command->V2.CommandStatus
6224 == DAC960_V2_NormalCompletion
6225 ? "Initiated" : "Not Initiated"));
6226 }
6227 else if (strncmp(UserCommand, "cancel-rebuild", 14) == 0 &&
6228 DAC960_ParsePhysicalDevice(Controller, &UserCommand[14],
6229 &Channel, &TargetID) &&
6230 DAC960_V2_TranslatePhysicalDevice(Command, Channel, TargetID,
6231 &LogicalDeviceNumber))
6232 {
6233 CommandMailbox->LogicalDeviceInfo.LogicalDevice.LogicalDeviceNumber =
6234 LogicalDeviceNumber;
6235 CommandMailbox->LogicalDeviceInfo.IOCTL_Opcode =
6236 DAC960_V2_RebuildDeviceStop;
6237 DAC960_ExecuteCommand(Command);
6238 DAC960_UserCritical("Rebuild of Physical Device %d:%d %s\n",
6239 Controller, Channel, TargetID,
6240 (Command->V2.CommandStatus
6241 == DAC960_V2_NormalCompletion
6242 ? "Cancelled" : "Not Cancelled"));
6243 }
6244 else if (strncmp(UserCommand, "check-consistency", 17) == 0 &&
6245 DAC960_ParseLogicalDrive(Controller, &UserCommand[17],
6246 &LogicalDriveNumber))
6247 {
6248 CommandMailbox->ConsistencyCheck.LogicalDevice.LogicalDeviceNumber =
6249 LogicalDriveNumber;
6250 CommandMailbox->ConsistencyCheck.IOCTL_Opcode =
6251 DAC960_V2_ConsistencyCheckStart;
6252 CommandMailbox->ConsistencyCheck.RestoreConsistency = true;
6253 CommandMailbox->ConsistencyCheck.InitializedAreaOnly = false;
6254 DAC960_ExecuteCommand(Command);
6255 DAC960_UserCritical("Consistency Check of Logical Drive %d "
6256 "(/dev/rd/c%dd%d) %s\n",
6257 Controller, LogicalDriveNumber,
6258 Controller->ControllerNumber,
6259 LogicalDriveNumber,
6260 (Command->V2.CommandStatus
6261 == DAC960_V2_NormalCompletion
6262 ? "Initiated" : "Not Initiated"));
6263 }
6264 else if (strncmp(UserCommand, "cancel-consistency-check", 24) == 0 &&
6265 DAC960_ParseLogicalDrive(Controller, &UserCommand[24],
6266 &LogicalDriveNumber))
6267 {
6268 CommandMailbox->ConsistencyCheck.LogicalDevice.LogicalDeviceNumber =
6269 LogicalDriveNumber;
6270 CommandMailbox->ConsistencyCheck.IOCTL_Opcode =
6271 DAC960_V2_ConsistencyCheckStop;
6272 DAC960_ExecuteCommand(Command);
6273 DAC960_UserCritical("Consistency Check of Logical Drive %d "
6274 "(/dev/rd/c%dd%d) %s\n",
6275 Controller, LogicalDriveNumber,
6276 Controller->ControllerNumber,
6277 LogicalDriveNumber,
6278 (Command->V2.CommandStatus
6279 == DAC960_V2_NormalCompletion
6280 ? "Cancelled" : "Not Cancelled"));
6281 }
6282 else if (strcmp(UserCommand, "perform-discovery") == 0)
6283 {
6284 CommandMailbox->Common.IOCTL_Opcode = DAC960_V2_StartDiscovery;
6285 DAC960_ExecuteCommand(Command);
6286 DAC960_UserCritical("Discovery %s\n", Controller,
6287 (Command->V2.CommandStatus
6288 == DAC960_V2_NormalCompletion
6289 ? "Initiated" : "Not Initiated"));
6290 if (Command->V2.CommandStatus == DAC960_V2_NormalCompletion)
6291 {
6292 CommandMailbox->ControllerInfo.CommandOpcode = DAC960_V2_IOCTL;
6293 CommandMailbox->ControllerInfo.CommandControlBits
6294 .DataTransferControllerToHost = true;
6295 CommandMailbox->ControllerInfo.CommandControlBits
6296 .NoAutoRequestSense = true;
6297 CommandMailbox->ControllerInfo.DataTransferSize =
6298 sizeof(DAC960_V2_ControllerInfo_T);
6299 CommandMailbox->ControllerInfo.ControllerNumber = 0;
6300 CommandMailbox->ControllerInfo.IOCTL_Opcode =
6301 DAC960_V2_GetControllerInfo;
6302 /*
6303 * How does this NOT race with the queued Monitoring
6304 * usage of this structure?
6305 */
6306 CommandMailbox->ControllerInfo.DataTransferMemoryAddress
6307 .ScatterGatherSegments[0]
6308 .SegmentDataPointer =
6309 Controller->V2.NewControllerInformationDMA;
6310 CommandMailbox->ControllerInfo.DataTransferMemoryAddress
6311 .ScatterGatherSegments[0]
6312 .SegmentByteCount =
6313 CommandMailbox->ControllerInfo.DataTransferSize;
6314 DAC960_ExecuteCommand(Command);
6315 while (Controller->V2.NewControllerInformation->PhysicalScanActive)
6316 {
6317 DAC960_ExecuteCommand(Command);
6318 sleep_on_timeout(&Controller->CommandWaitQueue, HZ);
6319 }
6320 DAC960_UserCritical("Discovery Completed\n", Controller);
6321 }
6322 }
6323 else if (strcmp(UserCommand, "suppress-enclosure-messages") == 0)
6324 Controller->SuppressEnclosureMessages = true;
6325 else DAC960_UserCritical("Illegal User Command: '%s'\n",
6326 Controller, UserCommand);
6327
6328 spin_lock_irqsave(&Controller->queue_lock, flags);
6329 DAC960_DeallocateCommand(Command);
6330 spin_unlock_irqrestore(&Controller->queue_lock, flags);
6331 return true;
6332}
6333
6334
6335/*
6336 DAC960_ProcReadStatus implements reading /proc/rd/status.
6337*/
6338
6339static int DAC960_ProcReadStatus(char *Page, char **Start, off_t Offset,
6340 int Count, int *EOF, void *Data)
6341{
6342 unsigned char *StatusMessage = "OK\n";
6343 int ControllerNumber, BytesAvailable;
6344 for (ControllerNumber = 0;
6345 ControllerNumber < DAC960_ControllerCount;
6346 ControllerNumber++)
6347 {
6348 DAC960_Controller_T *Controller = DAC960_Controllers[ControllerNumber];
6349 if (Controller == NULL) continue;
6350 if (Controller->MonitoringAlertMode)
6351 {
6352 StatusMessage = "ALERT\n";
6353 break;
6354 }
6355 }
6356 BytesAvailable = strlen(StatusMessage) - Offset;
6357 if (Count >= BytesAvailable)
6358 {
6359 Count = BytesAvailable;
6360 *EOF = true;
6361 }
6362 if (Count <= 0) return 0;
6363 *Start = Page;
6364 memcpy(Page, &StatusMessage[Offset], Count);
6365 return Count;
6366}
6367
6368
6369/*
6370 DAC960_ProcReadInitialStatus implements reading /proc/rd/cN/initial_status.
6371*/
6372
6373static int DAC960_ProcReadInitialStatus(char *Page, char **Start, off_t Offset,
6374 int Count, int *EOF, void *Data)
6375{
6376 DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data;
6377 int BytesAvailable = Controller->InitialStatusLength - Offset;
6378 if (Count >= BytesAvailable)
6379 {
6380 Count = BytesAvailable;
6381 *EOF = true;
6382 }
6383 if (Count <= 0) return 0;
6384 *Start = Page;
6385 memcpy(Page, &Controller->CombinedStatusBuffer[Offset], Count);
6386 return Count;
6387}
6388
6389
6390/*
6391 DAC960_ProcReadCurrentStatus implements reading /proc/rd/cN/current_status.
6392*/
6393
6394static int DAC960_ProcReadCurrentStatus(char *Page, char **Start, off_t Offset,
6395 int Count, int *EOF, void *Data)
6396{
6397 DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data;
6398 unsigned char *StatusMessage =
6399 "No Rebuild or Consistency Check in Progress\n";
6400 int ProgressMessageLength = strlen(StatusMessage);
6401 int BytesAvailable;
6402 if (jiffies != Controller->LastCurrentStatusTime)
6403 {
6404 Controller->CurrentStatusLength = 0;
6405 DAC960_AnnounceDriver(Controller);
6406 DAC960_ReportControllerConfiguration(Controller);
6407 DAC960_ReportDeviceConfiguration(Controller);
6408 if (Controller->ProgressBufferLength > 0)
6409 ProgressMessageLength = Controller->ProgressBufferLength;
6410 if (DAC960_CheckStatusBuffer(Controller, 2 + ProgressMessageLength))
6411 {
6412 unsigned char *CurrentStatusBuffer = Controller->CurrentStatusBuffer;
6413 CurrentStatusBuffer[Controller->CurrentStatusLength++] = ' ';
6414 CurrentStatusBuffer[Controller->CurrentStatusLength++] = ' ';
6415 if (Controller->ProgressBufferLength > 0)
6416 strcpy(&CurrentStatusBuffer[Controller->CurrentStatusLength],
6417 Controller->ProgressBuffer);
6418 else
6419 strcpy(&CurrentStatusBuffer[Controller->CurrentStatusLength],
6420 StatusMessage);
6421 Controller->CurrentStatusLength += ProgressMessageLength;
6422 }
6423 Controller->LastCurrentStatusTime = jiffies;
6424 }
6425 BytesAvailable = Controller->CurrentStatusLength - Offset;
6426 if (Count >= BytesAvailable)
6427 {
6428 Count = BytesAvailable;
6429 *EOF = true;
6430 }
6431 if (Count <= 0) return 0;
6432 *Start = Page;
6433 memcpy(Page, &Controller->CurrentStatusBuffer[Offset], Count);
6434 return Count;
6435}
6436
6437
6438/*
6439 DAC960_ProcReadUserCommand implements reading /proc/rd/cN/user_command.
6440*/
6441
6442static int DAC960_ProcReadUserCommand(char *Page, char **Start, off_t Offset,
6443 int Count, int *EOF, void *Data)
6444{
6445 DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data;
6446 int BytesAvailable = Controller->UserStatusLength - Offset;
6447 if (Count >= BytesAvailable)
6448 {
6449 Count = BytesAvailable;
6450 *EOF = true;
6451 }
6452 if (Count <= 0) return 0;
6453 *Start = Page;
6454 memcpy(Page, &Controller->UserStatusBuffer[Offset], Count);
6455 return Count;
6456}
6457
6458
6459/*
6460 DAC960_ProcWriteUserCommand implements writing /proc/rd/cN/user_command.
6461*/
6462
6463static int DAC960_ProcWriteUserCommand(struct file *file,
6464 const char __user *Buffer,
6465 unsigned long Count, void *Data)
6466{
6467 DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data;
6468 unsigned char CommandBuffer[80];
6469 int Length;
6470 if (Count > sizeof(CommandBuffer)-1) return -EINVAL;
6471 if (copy_from_user(CommandBuffer, Buffer, Count)) return -EFAULT;
6472 CommandBuffer[Count] = '\0';
6473 Length = strlen(CommandBuffer);
6474 if (CommandBuffer[Length-1] == '\n')
6475 CommandBuffer[--Length] = '\0';
6476 if (Controller->FirmwareType == DAC960_V1_Controller)
6477 return (DAC960_V1_ExecuteUserCommand(Controller, CommandBuffer)
6478 ? Count : -EBUSY);
6479 else
6480 return (DAC960_V2_ExecuteUserCommand(Controller, CommandBuffer)
6481 ? Count : -EBUSY);
6482}
6483
6484
6485/*
6486 DAC960_CreateProcEntries creates the /proc/rd/... entries for the
6487 DAC960 Driver.
6488*/
6489
6490static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller)
6491{
6492 struct proc_dir_entry *StatusProcEntry;
6493 struct proc_dir_entry *ControllerProcEntry;
6494 struct proc_dir_entry *UserCommandProcEntry;
6495
6496 if (DAC960_ProcDirectoryEntry == NULL) {
6497 DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL);
6498 StatusProcEntry = create_proc_read_entry("status", 0,
6499 DAC960_ProcDirectoryEntry,
6500 DAC960_ProcReadStatus, NULL);
6501 }
6502
6503 sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber);
6504 ControllerProcEntry = proc_mkdir(Controller->ControllerName,
6505 DAC960_ProcDirectoryEntry);
6506 create_proc_read_entry("initial_status", 0, ControllerProcEntry,
6507 DAC960_ProcReadInitialStatus, Controller);
6508 create_proc_read_entry("current_status", 0, ControllerProcEntry,
6509 DAC960_ProcReadCurrentStatus, Controller);
6510 UserCommandProcEntry =
6511 create_proc_read_entry("user_command", S_IWUSR | S_IRUSR,
6512 ControllerProcEntry, DAC960_ProcReadUserCommand,
6513 Controller);
6514 UserCommandProcEntry->write_proc = DAC960_ProcWriteUserCommand;
6515 Controller->ControllerProcEntry = ControllerProcEntry;
6516}
6517
6518
6519/*
6520 DAC960_DestroyProcEntries destroys the /proc/rd/... entries for the
6521 DAC960 Driver.
6522*/
6523
6524static void DAC960_DestroyProcEntries(DAC960_Controller_T *Controller)
6525{
6526 if (Controller->ControllerProcEntry == NULL)
6527 return;
6528 remove_proc_entry("initial_status", Controller->ControllerProcEntry);
6529 remove_proc_entry("current_status", Controller->ControllerProcEntry);
6530 remove_proc_entry("user_command", Controller->ControllerProcEntry);
6531 remove_proc_entry(Controller->ControllerName, DAC960_ProcDirectoryEntry);
6532 Controller->ControllerProcEntry = NULL;
6533}
6534
6535#ifdef DAC960_GAM_MINOR
6536
6537/*
6538 * DAC960_gam_ioctl is the ioctl function for performing RAID operations.
6539*/
6540
6541static int DAC960_gam_ioctl(struct inode *inode, struct file *file,
6542 unsigned int Request, unsigned long Argument)
6543{
6544 int ErrorCode = 0;
6545 if (!capable(CAP_SYS_ADMIN)) return -EACCES;
6546 switch (Request)
6547 {
6548 case DAC960_IOCTL_GET_CONTROLLER_COUNT:
6549 return DAC960_ControllerCount;
6550 case DAC960_IOCTL_GET_CONTROLLER_INFO:
6551 {
6552 DAC960_ControllerInfo_T __user *UserSpaceControllerInfo =
6553 (DAC960_ControllerInfo_T __user *) Argument;
6554 DAC960_ControllerInfo_T ControllerInfo;
6555 DAC960_Controller_T *Controller;
6556 int ControllerNumber;
6557 if (UserSpaceControllerInfo == NULL) return -EINVAL;
6558 ErrorCode = get_user(ControllerNumber,
6559 &UserSpaceControllerInfo->ControllerNumber);
6560 if (ErrorCode != 0) return ErrorCode;
6561 if (ControllerNumber < 0 ||
6562 ControllerNumber > DAC960_ControllerCount - 1)
6563 return -ENXIO;
6564 Controller = DAC960_Controllers[ControllerNumber];
6565 if (Controller == NULL) return -ENXIO;
6566 memset(&ControllerInfo, 0, sizeof(DAC960_ControllerInfo_T));
6567 ControllerInfo.ControllerNumber = ControllerNumber;
6568 ControllerInfo.FirmwareType = Controller->FirmwareType;
6569 ControllerInfo.Channels = Controller->Channels;
6570 ControllerInfo.Targets = Controller->Targets;
6571 ControllerInfo.PCI_Bus = Controller->Bus;
6572 ControllerInfo.PCI_Device = Controller->Device;
6573 ControllerInfo.PCI_Function = Controller->Function;
6574 ControllerInfo.IRQ_Channel = Controller->IRQ_Channel;
6575 ControllerInfo.PCI_Address = Controller->PCI_Address;
6576 strcpy(ControllerInfo.ModelName, Controller->ModelName);
6577 strcpy(ControllerInfo.FirmwareVersion, Controller->FirmwareVersion);
6578 return (copy_to_user(UserSpaceControllerInfo, &ControllerInfo,
6579 sizeof(DAC960_ControllerInfo_T)) ? -EFAULT : 0);
6580 }
6581 case DAC960_IOCTL_V1_EXECUTE_COMMAND:
6582 {
6583 DAC960_V1_UserCommand_T __user *UserSpaceUserCommand =
6584 (DAC960_V1_UserCommand_T __user *) Argument;
6585 DAC960_V1_UserCommand_T UserCommand;
6586 DAC960_Controller_T *Controller;
6587 DAC960_Command_T *Command = NULL;
6588 DAC960_V1_CommandOpcode_T CommandOpcode;
6589 DAC960_V1_CommandStatus_T CommandStatus;
6590 DAC960_V1_DCDB_T DCDB;
6591 DAC960_V1_DCDB_T *DCDB_IOBUF = NULL;
6592 dma_addr_t DCDB_IOBUFDMA;
6593 unsigned long flags;
6594 int ControllerNumber, DataTransferLength;
6595 unsigned char *DataTransferBuffer = NULL;
6596 dma_addr_t DataTransferBufferDMA;
6597 if (UserSpaceUserCommand == NULL) return -EINVAL;
6598 if (copy_from_user(&UserCommand, UserSpaceUserCommand,
6599 sizeof(DAC960_V1_UserCommand_T))) {
6600 ErrorCode = -EFAULT;
6601 goto Failure1a;
6602 }
6603 ControllerNumber = UserCommand.ControllerNumber;
6604 if (ControllerNumber < 0 ||
6605 ControllerNumber > DAC960_ControllerCount - 1)
6606 return -ENXIO;
6607 Controller = DAC960_Controllers[ControllerNumber];
6608 if (Controller == NULL) return -ENXIO;
6609 if (Controller->FirmwareType != DAC960_V1_Controller) return -EINVAL;
6610 CommandOpcode = UserCommand.CommandMailbox.Common.CommandOpcode;
6611 DataTransferLength = UserCommand.DataTransferLength;
6612 if (CommandOpcode & 0x80) return -EINVAL;
6613 if (CommandOpcode == DAC960_V1_DCDB)
6614 {
6615 if (copy_from_user(&DCDB, UserCommand.DCDB,
6616 sizeof(DAC960_V1_DCDB_T))) {
6617 ErrorCode = -EFAULT;
6618 goto Failure1a;
6619 }
6620 if (DCDB.Channel >= DAC960_V1_MaxChannels) return -EINVAL;
6621 if (!((DataTransferLength == 0 &&
6622 DCDB.Direction
6623 == DAC960_V1_DCDB_NoDataTransfer) ||
6624 (DataTransferLength > 0 &&
6625 DCDB.Direction
6626 == DAC960_V1_DCDB_DataTransferDeviceToSystem) ||
6627 (DataTransferLength < 0 &&
6628 DCDB.Direction
6629 == DAC960_V1_DCDB_DataTransferSystemToDevice)))
6630 return -EINVAL;
6631 if (((DCDB.TransferLengthHigh4 << 16) | DCDB.TransferLength)
6632 != abs(DataTransferLength))
6633 return -EINVAL;
6634 DCDB_IOBUF = pci_alloc_consistent(Controller->PCIDevice,
6635 sizeof(DAC960_V1_DCDB_T), &DCDB_IOBUFDMA);
6636 if (DCDB_IOBUF == NULL)
6637 return -ENOMEM;
6638 }
6639 if (DataTransferLength > 0)
6640 {
6641 DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
6642 DataTransferLength, &DataTransferBufferDMA);
6643 if (DataTransferBuffer == NULL) {
6644 ErrorCode = -ENOMEM;
6645 goto Failure1;
6646 }
6647 memset(DataTransferBuffer, 0, DataTransferLength);
6648 }
6649 else if (DataTransferLength < 0)
6650 {
6651 DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
6652 -DataTransferLength, &DataTransferBufferDMA);
6653 if (DataTransferBuffer == NULL) {
6654 ErrorCode = -ENOMEM;
6655 goto Failure1;
6656 }
6657 if (copy_from_user(DataTransferBuffer,
6658 UserCommand.DataTransferBuffer,
6659 -DataTransferLength)) {
6660 ErrorCode = -EFAULT;
6661 goto Failure1;
6662 }
6663 }
6664 if (CommandOpcode == DAC960_V1_DCDB)
6665 {
6666 spin_lock_irqsave(&Controller->queue_lock, flags);
6667 while ((Command = DAC960_AllocateCommand(Controller)) == NULL)
6668 DAC960_WaitForCommand(Controller);
6669 while (Controller->V1.DirectCommandActive[DCDB.Channel]
6670 [DCDB.TargetID])
6671 {
6672 spin_unlock_irq(&Controller->queue_lock);
6673 __wait_event(Controller->CommandWaitQueue,
6674 !Controller->V1.DirectCommandActive
6675 [DCDB.Channel][DCDB.TargetID]);
6676 spin_lock_irq(&Controller->queue_lock);
6677 }
6678 Controller->V1.DirectCommandActive[DCDB.Channel]
6679 [DCDB.TargetID] = true;
6680 spin_unlock_irqrestore(&Controller->queue_lock, flags);
6681 DAC960_V1_ClearCommand(Command);
6682 Command->CommandType = DAC960_ImmediateCommand;
6683 memcpy(&Command->V1.CommandMailbox, &UserCommand.CommandMailbox,
6684 sizeof(DAC960_V1_CommandMailbox_T));
6685 Command->V1.CommandMailbox.Type3.BusAddress = DCDB_IOBUFDMA;
6686 DCDB.BusAddress = DataTransferBufferDMA;
6687 memcpy(DCDB_IOBUF, &DCDB, sizeof(DAC960_V1_DCDB_T));
6688 }
6689 else
6690 {
6691 spin_lock_irqsave(&Controller->queue_lock, flags);
6692 while ((Command = DAC960_AllocateCommand(Controller)) == NULL)
6693 DAC960_WaitForCommand(Controller);
6694 spin_unlock_irqrestore(&Controller->queue_lock, flags);
6695 DAC960_V1_ClearCommand(Command);
6696 Command->CommandType = DAC960_ImmediateCommand;
6697 memcpy(&Command->V1.CommandMailbox, &UserCommand.CommandMailbox,
6698 sizeof(DAC960_V1_CommandMailbox_T));
6699 if (DataTransferBuffer != NULL)
6700 Command->V1.CommandMailbox.Type3.BusAddress =
6701 DataTransferBufferDMA;
6702 }
6703 DAC960_ExecuteCommand(Command);
6704 CommandStatus = Command->V1.CommandStatus;
6705 spin_lock_irqsave(&Controller->queue_lock, flags);
6706 DAC960_DeallocateCommand(Command);
6707 spin_unlock_irqrestore(&Controller->queue_lock, flags);
6708 if (DataTransferLength > 0)
6709 {
6710 if (copy_to_user(UserCommand.DataTransferBuffer,
6711 DataTransferBuffer, DataTransferLength)) {
6712 ErrorCode = -EFAULT;
6713 goto Failure1;
6714 }
6715 }
6716 if (CommandOpcode == DAC960_V1_DCDB)
6717 {
6718 /*
6719 I don't believe Target or Channel in the DCDB_IOBUF
6720 should be any different from the contents of DCDB.
6721 */
6722 Controller->V1.DirectCommandActive[DCDB.Channel]
6723 [DCDB.TargetID] = false;
6724 if (copy_to_user(UserCommand.DCDB, DCDB_IOBUF,
6725 sizeof(DAC960_V1_DCDB_T))) {
6726 ErrorCode = -EFAULT;
6727 goto Failure1;
6728 }
6729 }
6730 ErrorCode = CommandStatus;
6731 Failure1:
6732 if (DataTransferBuffer != NULL)
6733 pci_free_consistent(Controller->PCIDevice, abs(DataTransferLength),
6734 DataTransferBuffer, DataTransferBufferDMA);
6735 if (DCDB_IOBUF != NULL)
6736 pci_free_consistent(Controller->PCIDevice, sizeof(DAC960_V1_DCDB_T),
6737 DCDB_IOBUF, DCDB_IOBUFDMA);
6738 Failure1a:
6739 return ErrorCode;
6740 }
6741 case DAC960_IOCTL_V2_EXECUTE_COMMAND:
6742 {
6743 DAC960_V2_UserCommand_T __user *UserSpaceUserCommand =
6744 (DAC960_V2_UserCommand_T __user *) Argument;
6745 DAC960_V2_UserCommand_T UserCommand;
6746 DAC960_Controller_T *Controller;
6747 DAC960_Command_T *Command = NULL;
6748 DAC960_V2_CommandMailbox_T *CommandMailbox;
6749 DAC960_V2_CommandStatus_T CommandStatus;
6750 unsigned long flags;
6751 int ControllerNumber, DataTransferLength;
6752 int DataTransferResidue, RequestSenseLength;
6753 unsigned char *DataTransferBuffer = NULL;
6754 dma_addr_t DataTransferBufferDMA;
6755 unsigned char *RequestSenseBuffer = NULL;
6756 dma_addr_t RequestSenseBufferDMA;
6757 if (UserSpaceUserCommand == NULL) return -EINVAL;
6758 if (copy_from_user(&UserCommand, UserSpaceUserCommand,
6759 sizeof(DAC960_V2_UserCommand_T))) {
6760 ErrorCode = -EFAULT;
6761 goto Failure2a;
6762 }
6763 ControllerNumber = UserCommand.ControllerNumber;
6764 if (ControllerNumber < 0 ||
6765 ControllerNumber > DAC960_ControllerCount - 1)
6766 return -ENXIO;
6767 Controller = DAC960_Controllers[ControllerNumber];
6768 if (Controller == NULL) return -ENXIO;
6769 if (Controller->FirmwareType != DAC960_V2_Controller) return -EINVAL;
6770 DataTransferLength = UserCommand.DataTransferLength;
6771 if (DataTransferLength > 0)
6772 {
6773 DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
6774 DataTransferLength, &DataTransferBufferDMA);
6775 if (DataTransferBuffer == NULL) return -ENOMEM;
6776 memset(DataTransferBuffer, 0, DataTransferLength);
6777 }
6778 else if (DataTransferLength < 0)
6779 {
6780 DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
6781 -DataTransferLength, &DataTransferBufferDMA);
6782 if (DataTransferBuffer == NULL) return -ENOMEM;
6783 if (copy_from_user(DataTransferBuffer,
6784 UserCommand.DataTransferBuffer,
6785 -DataTransferLength)) {
6786 ErrorCode = -EFAULT;
6787 goto Failure2;
6788 }
6789 }
6790 RequestSenseLength = UserCommand.RequestSenseLength;
6791 if (RequestSenseLength > 0)
6792 {
6793 RequestSenseBuffer = pci_alloc_consistent(Controller->PCIDevice,
6794 RequestSenseLength, &RequestSenseBufferDMA);
6795 if (RequestSenseBuffer == NULL)
6796 {
6797 ErrorCode = -ENOMEM;
6798 goto Failure2;
6799 }
6800 memset(RequestSenseBuffer, 0, RequestSenseLength);
6801 }
6802 spin_lock_irqsave(&Controller->queue_lock, flags);
6803 while ((Command = DAC960_AllocateCommand(Controller)) == NULL)
6804 DAC960_WaitForCommand(Controller);
6805 spin_unlock_irqrestore(&Controller->queue_lock, flags);
6806 DAC960_V2_ClearCommand(Command);
6807 Command->CommandType = DAC960_ImmediateCommand;
6808 CommandMailbox = &Command->V2.CommandMailbox;
6809 memcpy(CommandMailbox, &UserCommand.CommandMailbox,
6810 sizeof(DAC960_V2_CommandMailbox_T));
6811 CommandMailbox->Common.CommandControlBits
6812 .AdditionalScatterGatherListMemory = false;
6813 CommandMailbox->Common.CommandControlBits
6814 .NoAutoRequestSense = true;
6815 CommandMailbox->Common.DataTransferSize = 0;
6816 CommandMailbox->Common.DataTransferPageNumber = 0;
6817 memset(&CommandMailbox->Common.DataTransferMemoryAddress, 0,
6818 sizeof(DAC960_V2_DataTransferMemoryAddress_T));
6819 if (DataTransferLength != 0)
6820 {
6821 if (DataTransferLength > 0)
6822 {
6823 CommandMailbox->Common.CommandControlBits
6824 .DataTransferControllerToHost = true;
6825 CommandMailbox->Common.DataTransferSize = DataTransferLength;
6826 }
6827 else
6828 {
6829 CommandMailbox->Common.CommandControlBits
6830 .DataTransferControllerToHost = false;
6831 CommandMailbox->Common.DataTransferSize = -DataTransferLength;
6832 }
6833 CommandMailbox->Common.DataTransferMemoryAddress
6834 .ScatterGatherSegments[0]
6835 .SegmentDataPointer = DataTransferBufferDMA;
6836 CommandMailbox->Common.DataTransferMemoryAddress
6837 .ScatterGatherSegments[0]
6838 .SegmentByteCount =
6839 CommandMailbox->Common.DataTransferSize;
6840 }
6841 if (RequestSenseLength > 0)
6842 {
6843 CommandMailbox->Common.CommandControlBits
6844 .NoAutoRequestSense = false;
6845 CommandMailbox->Common.RequestSenseSize = RequestSenseLength;
6846 CommandMailbox->Common.RequestSenseBusAddress =
6847 RequestSenseBufferDMA;
6848 }
6849 DAC960_ExecuteCommand(Command);
6850 CommandStatus = Command->V2.CommandStatus;
6851 RequestSenseLength = Command->V2.RequestSenseLength;
6852 DataTransferResidue = Command->V2.DataTransferResidue;
6853 spin_lock_irqsave(&Controller->queue_lock, flags);
6854 DAC960_DeallocateCommand(Command);
6855 spin_unlock_irqrestore(&Controller->queue_lock, flags);
6856 if (RequestSenseLength > UserCommand.RequestSenseLength)
6857 RequestSenseLength = UserCommand.RequestSenseLength;
6858 if (copy_to_user(&UserSpaceUserCommand->DataTransferLength,
6859 &DataTransferResidue,
6860 sizeof(DataTransferResidue))) {
6861 ErrorCode = -EFAULT;
6862 goto Failure2;
6863 }
6864 if (copy_to_user(&UserSpaceUserCommand->RequestSenseLength,
6865 &RequestSenseLength, sizeof(RequestSenseLength))) {
6866 ErrorCode = -EFAULT;
6867 goto Failure2;
6868 }
6869 if (DataTransferLength > 0)
6870 {
6871 if (copy_to_user(UserCommand.DataTransferBuffer,
6872 DataTransferBuffer, DataTransferLength)) {
6873 ErrorCode = -EFAULT;
6874 goto Failure2;
6875 }
6876 }
6877 if (RequestSenseLength > 0)
6878 {
6879 if (copy_to_user(UserCommand.RequestSenseBuffer,
6880 RequestSenseBuffer, RequestSenseLength)) {
6881 ErrorCode = -EFAULT;
6882 goto Failure2;
6883 }
6884 }
6885 ErrorCode = CommandStatus;
6886 Failure2:
6887 pci_free_consistent(Controller->PCIDevice, abs(DataTransferLength),
6888 DataTransferBuffer, DataTransferBufferDMA);
6889 if (RequestSenseBuffer != NULL)
6890 pci_free_consistent(Controller->PCIDevice, RequestSenseLength,
6891 RequestSenseBuffer, RequestSenseBufferDMA);
6892 Failure2a:
6893 return ErrorCode;
6894 }
6895 case DAC960_IOCTL_V2_GET_HEALTH_STATUS:
6896 {
6897 DAC960_V2_GetHealthStatus_T __user *UserSpaceGetHealthStatus =
6898 (DAC960_V2_GetHealthStatus_T __user *) Argument;
6899 DAC960_V2_GetHealthStatus_T GetHealthStatus;
6900 DAC960_V2_HealthStatusBuffer_T HealthStatusBuffer;
6901 DAC960_Controller_T *Controller;
6902 int ControllerNumber;
6903 if (UserSpaceGetHealthStatus == NULL) return -EINVAL;
6904 if (copy_from_user(&GetHealthStatus, UserSpaceGetHealthStatus,
6905 sizeof(DAC960_V2_GetHealthStatus_T)))
6906 return -EFAULT;
6907 ControllerNumber = GetHealthStatus.ControllerNumber;
6908 if (ControllerNumber < 0 ||
6909 ControllerNumber > DAC960_ControllerCount - 1)
6910 return -ENXIO;
6911 Controller = DAC960_Controllers[ControllerNumber];
6912 if (Controller == NULL) return -ENXIO;
6913 if (Controller->FirmwareType != DAC960_V2_Controller) return -EINVAL;
6914 if (copy_from_user(&HealthStatusBuffer,
6915 GetHealthStatus.HealthStatusBuffer,
6916 sizeof(DAC960_V2_HealthStatusBuffer_T)))
6917 return -EFAULT;
6918 while (Controller->V2.HealthStatusBuffer->StatusChangeCounter
6919 == HealthStatusBuffer.StatusChangeCounter &&
6920 Controller->V2.HealthStatusBuffer->NextEventSequenceNumber
6921 == HealthStatusBuffer.NextEventSequenceNumber)
6922 {
6923 interruptible_sleep_on_timeout(&Controller->HealthStatusWaitQueue,
6924 DAC960_MonitoringTimerInterval);
6925 if (signal_pending(current)) return -EINTR;
6926 }
6927 if (copy_to_user(GetHealthStatus.HealthStatusBuffer,
6928 Controller->V2.HealthStatusBuffer,
6929 sizeof(DAC960_V2_HealthStatusBuffer_T)))
6930 return -EFAULT;
6931 return 0;
6932 }
6933 }
6934 return -EINVAL;
6935}
6936
6937static struct file_operations DAC960_gam_fops = {
6938 .owner = THIS_MODULE,
6939 .ioctl = DAC960_gam_ioctl
6940};
6941
6942static struct miscdevice DAC960_gam_dev = {
6943 DAC960_GAM_MINOR,
6944 "dac960_gam",
6945 &DAC960_gam_fops
6946};
6947
6948static int DAC960_gam_init(void)
6949{
6950 int ret;
6951
6952 ret = misc_register(&DAC960_gam_dev);
6953 if (ret)
6954 printk(KERN_ERR "DAC960_gam: can't misc_register on minor %d\n", DAC960_GAM_MINOR);
6955 return ret;
6956}
6957
6958static void DAC960_gam_cleanup(void)
6959{
6960 misc_deregister(&DAC960_gam_dev);
6961}
6962
6963#endif /* DAC960_GAM_MINOR */
6964
6965static struct DAC960_privdata DAC960_BA_privdata = {
6966 .HardwareType = DAC960_BA_Controller,
6967 .FirmwareType = DAC960_V2_Controller,
6968 .InterruptHandler = DAC960_BA_InterruptHandler,
6969 .MemoryWindowSize = DAC960_BA_RegisterWindowSize,
6970};
6971
6972static struct DAC960_privdata DAC960_LP_privdata = {
6973 .HardwareType = DAC960_LP_Controller,
6974 .FirmwareType = DAC960_LP_Controller,
6975 .InterruptHandler = DAC960_LP_InterruptHandler,
6976 .MemoryWindowSize = DAC960_LP_RegisterWindowSize,
6977};
6978
6979static struct DAC960_privdata DAC960_LA_privdata = {
6980 .HardwareType = DAC960_LA_Controller,
6981 .FirmwareType = DAC960_V1_Controller,
6982 .InterruptHandler = DAC960_LA_InterruptHandler,
6983 .MemoryWindowSize = DAC960_LA_RegisterWindowSize,
6984};
6985
6986static struct DAC960_privdata DAC960_PG_privdata = {
6987 .HardwareType = DAC960_PG_Controller,
6988 .FirmwareType = DAC960_V1_Controller,
6989 .InterruptHandler = DAC960_PG_InterruptHandler,
6990 .MemoryWindowSize = DAC960_PG_RegisterWindowSize,
6991};
6992
6993static struct DAC960_privdata DAC960_PD_privdata = {
6994 .HardwareType = DAC960_PD_Controller,
6995 .FirmwareType = DAC960_V1_Controller,
6996 .InterruptHandler = DAC960_PD_InterruptHandler,
6997 .MemoryWindowSize = DAC960_PD_RegisterWindowSize,
6998};
6999
7000static struct DAC960_privdata DAC960_P_privdata = {
7001 .HardwareType = DAC960_P_Controller,
7002 .FirmwareType = DAC960_V1_Controller,
7003 .InterruptHandler = DAC960_P_InterruptHandler,
7004 .MemoryWindowSize = DAC960_PD_RegisterWindowSize,
7005};
7006
7007static struct pci_device_id DAC960_id_table[] = {
7008 {
7009 .vendor = PCI_VENDOR_ID_MYLEX,
7010 .device = PCI_DEVICE_ID_MYLEX_DAC960_BA,
7011 .subvendor = PCI_ANY_ID,
7012 .subdevice = PCI_ANY_ID,
7013 .driver_data = (unsigned long) &DAC960_BA_privdata,
7014 },
7015 {
7016 .vendor = PCI_VENDOR_ID_MYLEX,
7017 .device = PCI_DEVICE_ID_MYLEX_DAC960_LP,
7018 .subvendor = PCI_ANY_ID,
7019 .subdevice = PCI_ANY_ID,
7020 .driver_data = (unsigned long) &DAC960_LP_privdata,
7021 },
7022 {
7023 .vendor = PCI_VENDOR_ID_DEC,
7024 .device = PCI_DEVICE_ID_DEC_21285,
7025 .subvendor = PCI_VENDOR_ID_MYLEX,
7026 .subdevice = PCI_DEVICE_ID_MYLEX_DAC960_LA,
7027 .driver_data = (unsigned long) &DAC960_LA_privdata,
7028 },
7029 {
7030 .vendor = PCI_VENDOR_ID_MYLEX,
7031 .device = PCI_DEVICE_ID_MYLEX_DAC960_PG,
7032 .subvendor = PCI_ANY_ID,
7033 .subdevice = PCI_ANY_ID,
7034 .driver_data = (unsigned long) &DAC960_PG_privdata,
7035 },
7036 {
7037 .vendor = PCI_VENDOR_ID_MYLEX,
7038 .device = PCI_DEVICE_ID_MYLEX_DAC960_PD,
7039 .subvendor = PCI_ANY_ID,
7040 .subdevice = PCI_ANY_ID,
7041 .driver_data = (unsigned long) &DAC960_PD_privdata,
7042 },
7043 {
7044 .vendor = PCI_VENDOR_ID_MYLEX,
7045 .device = PCI_DEVICE_ID_MYLEX_DAC960_P,
7046 .subvendor = PCI_ANY_ID,
7047 .subdevice = PCI_ANY_ID,
7048 .driver_data = (unsigned long) &DAC960_P_privdata,
7049 },
7050 {0, },
7051};
7052
7053MODULE_DEVICE_TABLE(pci, DAC960_id_table);
7054
7055static struct pci_driver DAC960_pci_driver = {
7056 .name = "DAC960",
7057 .id_table = DAC960_id_table,
7058 .probe = DAC960_Probe,
7059 .remove = DAC960_Remove,
7060};
7061
7062static int DAC960_init_module(void)
7063{
7064 int ret;
7065
7066 ret = pci_module_init(&DAC960_pci_driver);
7067#ifdef DAC960_GAM_MINOR
7068 if (!ret)
7069 DAC960_gam_init();
7070#endif
7071 return ret;
7072}
7073
7074static void DAC960_cleanup_module(void)
7075{
7076 int i;
7077
7078#ifdef DAC960_GAM_MINOR
7079 DAC960_gam_cleanup();
7080#endif
7081
7082 for (i = 0; i < DAC960_ControllerCount; i++) {
7083 DAC960_Controller_T *Controller = DAC960_Controllers[i];
7084 if (Controller == NULL)
7085 continue;
7086 DAC960_FinalizeController(Controller);
7087 }
7088 if (DAC960_ProcDirectoryEntry != NULL) {
7089 remove_proc_entry("rd/status", NULL);
7090 remove_proc_entry("rd", NULL);
7091 }
7092 DAC960_ControllerCount = 0;
7093 pci_unregister_driver(&DAC960_pci_driver);
7094}
7095
7096module_init(DAC960_init_module);
7097module_exit(DAC960_cleanup_module);
7098
7099MODULE_LICENSE("GPL");
diff --git a/drivers/block/DAC960.h b/drivers/block/DAC960.h
new file mode 100644
index 000000000000..d5e8e7190c90
--- /dev/null
+++ b/drivers/block/DAC960.h
@@ -0,0 +1,4114 @@
1/*
2
3 Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
4
5 Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
6
7 This program is free software; you may redistribute and/or modify it under
8 the terms of the GNU General Public License Version 2 as published by the
9 Free Software Foundation.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY, without even the implied warranty of MERCHANTABILITY
13 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for complete details.
15
16 The author respectfully requests that any modifications to this software be
17 sent directly to him for evaluation and testing.
18
19*/
20
21
22/*
23 Define the maximum number of DAC960 Controllers supported by this driver.
24*/
25
26#define DAC960_MaxControllers 8
27
28
29/*
30 Define the maximum number of Controller Channels supported by DAC960
31 V1 and V2 Firmware Controllers.
32*/
33
34#define DAC960_V1_MaxChannels 3
35#define DAC960_V2_MaxChannels 4
36
37
38/*
39 Define the maximum number of Targets per Channel supported by DAC960
40 V1 and V2 Firmware Controllers.
41*/
42
43#define DAC960_V1_MaxTargets 16
44#define DAC960_V2_MaxTargets 128
45
46
47/*
48 Define the maximum number of Logical Drives supported by DAC960
49 V1 and V2 Firmware Controllers.
50*/
51
52#define DAC960_MaxLogicalDrives 32
53
54
55/*
56 Define the maximum number of Physical Devices supported by DAC960
57 V1 and V2 Firmware Controllers.
58*/
59
60#define DAC960_V1_MaxPhysicalDevices 45
61#define DAC960_V2_MaxPhysicalDevices 272
62
63/*
64 Define the pci dma mask supported by DAC960 V1 and V2 Firmware Controlers
65 */
66
67#define DAC690_V1_PciDmaMask 0xffffffff
68#define DAC690_V2_PciDmaMask 0xffffffffffffffffULL
69
70/*
71 Define a Boolean data type.
72*/
73
74typedef enum { false, true } __attribute__ ((packed)) boolean;
75
76
77/*
78 Define a 32/64 bit I/O Address data type.
79*/
80
81typedef unsigned long DAC960_IO_Address_T;
82
83
84/*
85 Define a 32/64 bit PCI Bus Address data type.
86*/
87
88typedef unsigned long DAC960_PCI_Address_T;
89
90
91/*
92 Define a 32 bit Bus Address data type.
93*/
94
95typedef unsigned int DAC960_BusAddress32_T;
96
97
98/*
99 Define a 64 bit Bus Address data type.
100*/
101
102typedef unsigned long long DAC960_BusAddress64_T;
103
104
105/*
106 Define a 32 bit Byte Count data type.
107*/
108
109typedef unsigned int DAC960_ByteCount32_T;
110
111
112/*
113 Define a 64 bit Byte Count data type.
114*/
115
116typedef unsigned long long DAC960_ByteCount64_T;
117
118
119/*
120 dma_loaf is used by helper routines to divide a region of
121 dma mapped memory into smaller pieces, where those pieces
122 are not of uniform size.
123 */
124
125struct dma_loaf {
126 void *cpu_base;
127 dma_addr_t dma_base;
128 size_t length;
129 void *cpu_free;
130 dma_addr_t dma_free;
131};
132
133/*
134 Define the SCSI INQUIRY Standard Data structure.
135*/
136
137typedef struct DAC960_SCSI_Inquiry
138{
139 unsigned char PeripheralDeviceType:5; /* Byte 0 Bits 0-4 */
140 unsigned char PeripheralQualifier:3; /* Byte 0 Bits 5-7 */
141 unsigned char DeviceTypeModifier:7; /* Byte 1 Bits 0-6 */
142 boolean RMB:1; /* Byte 1 Bit 7 */
143 unsigned char ANSI_ApprovedVersion:3; /* Byte 2 Bits 0-2 */
144 unsigned char ECMA_Version:3; /* Byte 2 Bits 3-5 */
145 unsigned char ISO_Version:2; /* Byte 2 Bits 6-7 */
146 unsigned char ResponseDataFormat:4; /* Byte 3 Bits 0-3 */
147 unsigned char :2; /* Byte 3 Bits 4-5 */
148 boolean TrmIOP:1; /* Byte 3 Bit 6 */
149 boolean AENC:1; /* Byte 3 Bit 7 */
150 unsigned char AdditionalLength; /* Byte 4 */
151 unsigned char :8; /* Byte 5 */
152 unsigned char :8; /* Byte 6 */
153 boolean SftRe:1; /* Byte 7 Bit 0 */
154 boolean CmdQue:1; /* Byte 7 Bit 1 */
155 boolean :1; /* Byte 7 Bit 2 */
156 boolean Linked:1; /* Byte 7 Bit 3 */
157 boolean Sync:1; /* Byte 7 Bit 4 */
158 boolean WBus16:1; /* Byte 7 Bit 5 */
159 boolean WBus32:1; /* Byte 7 Bit 6 */
160 boolean RelAdr:1; /* Byte 7 Bit 7 */
161 unsigned char VendorIdentification[8]; /* Bytes 8-15 */
162 unsigned char ProductIdentification[16]; /* Bytes 16-31 */
163 unsigned char ProductRevisionLevel[4]; /* Bytes 32-35 */
164}
165DAC960_SCSI_Inquiry_T;
166
167
168/*
169 Define the SCSI INQUIRY Unit Serial Number structure.
170*/
171
172typedef struct DAC960_SCSI_Inquiry_UnitSerialNumber
173{
174 unsigned char PeripheralDeviceType:5; /* Byte 0 Bits 0-4 */
175 unsigned char PeripheralQualifier:3; /* Byte 0 Bits 5-7 */
176 unsigned char PageCode; /* Byte 1 */
177 unsigned char :8; /* Byte 2 */
178 unsigned char PageLength; /* Byte 3 */
179 unsigned char ProductSerialNumber[28]; /* Bytes 4-31 */
180}
181DAC960_SCSI_Inquiry_UnitSerialNumber_T;
182
183
184/*
185 Define the SCSI REQUEST SENSE Sense Key type.
186*/
187
188typedef enum
189{
190 DAC960_SenseKey_NoSense = 0x0,
191 DAC960_SenseKey_RecoveredError = 0x1,
192 DAC960_SenseKey_NotReady = 0x2,
193 DAC960_SenseKey_MediumError = 0x3,
194 DAC960_SenseKey_HardwareError = 0x4,
195 DAC960_SenseKey_IllegalRequest = 0x5,
196 DAC960_SenseKey_UnitAttention = 0x6,
197 DAC960_SenseKey_DataProtect = 0x7,
198 DAC960_SenseKey_BlankCheck = 0x8,
199 DAC960_SenseKey_VendorSpecific = 0x9,
200 DAC960_SenseKey_CopyAborted = 0xA,
201 DAC960_SenseKey_AbortedCommand = 0xB,
202 DAC960_SenseKey_Equal = 0xC,
203 DAC960_SenseKey_VolumeOverflow = 0xD,
204 DAC960_SenseKey_Miscompare = 0xE,
205 DAC960_SenseKey_Reserved = 0xF
206}
207__attribute__ ((packed))
208DAC960_SCSI_RequestSenseKey_T;
209
210
211/*
212 Define the SCSI REQUEST SENSE structure.
213*/
214
215typedef struct DAC960_SCSI_RequestSense
216{
217 unsigned char ErrorCode:7; /* Byte 0 Bits 0-6 */
218 boolean Valid:1; /* Byte 0 Bit 7 */
219 unsigned char SegmentNumber; /* Byte 1 */
220 DAC960_SCSI_RequestSenseKey_T SenseKey:4; /* Byte 2 Bits 0-3 */
221 unsigned char :1; /* Byte 2 Bit 4 */
222 boolean ILI:1; /* Byte 2 Bit 5 */
223 boolean EOM:1; /* Byte 2 Bit 6 */
224 boolean Filemark:1; /* Byte 2 Bit 7 */
225 unsigned char Information[4]; /* Bytes 3-6 */
226 unsigned char AdditionalSenseLength; /* Byte 7 */
227 unsigned char CommandSpecificInformation[4]; /* Bytes 8-11 */
228 unsigned char AdditionalSenseCode; /* Byte 12 */
229 unsigned char AdditionalSenseCodeQualifier; /* Byte 13 */
230}
231DAC960_SCSI_RequestSense_T;
232
233
234/*
235 Define the DAC960 V1 Firmware Command Opcodes.
236*/
237
238typedef enum
239{
240 /* I/O Commands */
241 DAC960_V1_ReadExtended = 0x33,
242 DAC960_V1_WriteExtended = 0x34,
243 DAC960_V1_ReadAheadExtended = 0x35,
244 DAC960_V1_ReadExtendedWithScatterGather = 0xB3,
245 DAC960_V1_WriteExtendedWithScatterGather = 0xB4,
246 DAC960_V1_Read = 0x36,
247 DAC960_V1_ReadWithScatterGather = 0xB6,
248 DAC960_V1_Write = 0x37,
249 DAC960_V1_WriteWithScatterGather = 0xB7,
250 DAC960_V1_DCDB = 0x04,
251 DAC960_V1_DCDBWithScatterGather = 0x84,
252 DAC960_V1_Flush = 0x0A,
253 /* Controller Status Related Commands */
254 DAC960_V1_Enquiry = 0x53,
255 DAC960_V1_Enquiry2 = 0x1C,
256 DAC960_V1_GetLogicalDriveElement = 0x55,
257 DAC960_V1_GetLogicalDriveInformation = 0x19,
258 DAC960_V1_IOPortRead = 0x39,
259 DAC960_V1_IOPortWrite = 0x3A,
260 DAC960_V1_GetSDStats = 0x3E,
261 DAC960_V1_GetPDStats = 0x3F,
262 DAC960_V1_PerformEventLogOperation = 0x72,
263 /* Device Related Commands */
264 DAC960_V1_StartDevice = 0x10,
265 DAC960_V1_GetDeviceState = 0x50,
266 DAC960_V1_StopChannel = 0x13,
267 DAC960_V1_StartChannel = 0x12,
268 DAC960_V1_ResetChannel = 0x1A,
269 /* Commands Associated with Data Consistency and Errors */
270 DAC960_V1_Rebuild = 0x09,
271 DAC960_V1_RebuildAsync = 0x16,
272 DAC960_V1_CheckConsistency = 0x0F,
273 DAC960_V1_CheckConsistencyAsync = 0x1E,
274 DAC960_V1_RebuildStat = 0x0C,
275 DAC960_V1_GetRebuildProgress = 0x27,
276 DAC960_V1_RebuildControl = 0x1F,
277 DAC960_V1_ReadBadBlockTable = 0x0B,
278 DAC960_V1_ReadBadDataTable = 0x25,
279 DAC960_V1_ClearBadDataTable = 0x26,
280 DAC960_V1_GetErrorTable = 0x17,
281 DAC960_V1_AddCapacityAsync = 0x2A,
282 DAC960_V1_BackgroundInitializationControl = 0x2B,
283 /* Configuration Related Commands */
284 DAC960_V1_ReadConfig2 = 0x3D,
285 DAC960_V1_WriteConfig2 = 0x3C,
286 DAC960_V1_ReadConfigurationOnDisk = 0x4A,
287 DAC960_V1_WriteConfigurationOnDisk = 0x4B,
288 DAC960_V1_ReadConfiguration = 0x4E,
289 DAC960_V1_ReadBackupConfiguration = 0x4D,
290 DAC960_V1_WriteConfiguration = 0x4F,
291 DAC960_V1_AddConfiguration = 0x4C,
292 DAC960_V1_ReadConfigurationLabel = 0x48,
293 DAC960_V1_WriteConfigurationLabel = 0x49,
294 /* Firmware Upgrade Related Commands */
295 DAC960_V1_LoadImage = 0x20,
296 DAC960_V1_StoreImage = 0x21,
297 DAC960_V1_ProgramImage = 0x22,
298 /* Diagnostic Commands */
299 DAC960_V1_SetDiagnosticMode = 0x31,
300 DAC960_V1_RunDiagnostic = 0x32,
301 /* Subsystem Service Commands */
302 DAC960_V1_GetSubsystemData = 0x70,
303 DAC960_V1_SetSubsystemParameters = 0x71,
304 /* Version 2.xx Firmware Commands */
305 DAC960_V1_Enquiry_Old = 0x05,
306 DAC960_V1_GetDeviceState_Old = 0x14,
307 DAC960_V1_Read_Old = 0x02,
308 DAC960_V1_Write_Old = 0x03,
309 DAC960_V1_ReadWithScatterGather_Old = 0x82,
310 DAC960_V1_WriteWithScatterGather_Old = 0x83
311}
312__attribute__ ((packed))
313DAC960_V1_CommandOpcode_T;
314
315
316/*
317 Define the DAC960 V1 Firmware Command Identifier type.
318*/
319
320typedef unsigned char DAC960_V1_CommandIdentifier_T;
321
322
323/*
324 Define the DAC960 V1 Firmware Command Status Codes.
325*/
326
327#define DAC960_V1_NormalCompletion 0x0000 /* Common */
328#define DAC960_V1_CheckConditionReceived 0x0002 /* Common */
329#define DAC960_V1_NoDeviceAtAddress 0x0102 /* Common */
330#define DAC960_V1_InvalidDeviceAddress 0x0105 /* Common */
331#define DAC960_V1_InvalidParameter 0x0105 /* Common */
332#define DAC960_V1_IrrecoverableDataError 0x0001 /* I/O */
333#define DAC960_V1_LogicalDriveNonexistentOrOffline 0x0002 /* I/O */
334#define DAC960_V1_AccessBeyondEndOfLogicalDrive 0x0105 /* I/O */
335#define DAC960_V1_BadDataEncountered 0x010C /* I/O */
336#define DAC960_V1_DeviceBusy 0x0008 /* DCDB */
337#define DAC960_V1_DeviceNonresponsive 0x000E /* DCDB */
338#define DAC960_V1_CommandTerminatedAbnormally 0x000F /* DCDB */
339#define DAC960_V1_UnableToStartDevice 0x0002 /* Device */
340#define DAC960_V1_InvalidChannelOrTargetOrModifier 0x0105 /* Device */
341#define DAC960_V1_ChannelBusy 0x0106 /* Device */
342#define DAC960_V1_ChannelNotStopped 0x0002 /* Device */
343#define DAC960_V1_AttemptToRebuildOnlineDrive 0x0002 /* Consistency */
344#define DAC960_V1_RebuildBadBlocksEncountered 0x0003 /* Consistency */
345#define DAC960_V1_NewDiskFailedDuringRebuild 0x0004 /* Consistency */
346#define DAC960_V1_RebuildOrCheckAlreadyInProgress 0x0106 /* Consistency */
347#define DAC960_V1_DependentDiskIsDead 0x0002 /* Consistency */
348#define DAC960_V1_InconsistentBlocksFound 0x0003 /* Consistency */
349#define DAC960_V1_InvalidOrNonredundantLogicalDrive 0x0105 /* Consistency */
350#define DAC960_V1_NoRebuildOrCheckInProgress 0x0105 /* Consistency */
351#define DAC960_V1_RebuildInProgress_DataValid 0x0000 /* Consistency */
352#define DAC960_V1_RebuildFailed_LogicalDriveFailure 0x0002 /* Consistency */
353#define DAC960_V1_RebuildFailed_BadBlocksOnOther 0x0003 /* Consistency */
354#define DAC960_V1_RebuildFailed_NewDriveFailed 0x0004 /* Consistency */
355#define DAC960_V1_RebuildSuccessful 0x0100 /* Consistency */
356#define DAC960_V1_RebuildSuccessfullyTerminated 0x0107 /* Consistency */
357#define DAC960_V1_BackgroundInitSuccessful 0x0100 /* Consistency */
358#define DAC960_V1_BackgroundInitAborted 0x0005 /* Consistency */
359#define DAC960_V1_NoBackgroundInitInProgress 0x0105 /* Consistency */
360#define DAC960_V1_AddCapacityInProgress 0x0004 /* Consistency */
361#define DAC960_V1_AddCapacityFailedOrSuspended 0x00F4 /* Consistency */
362#define DAC960_V1_Config2ChecksumError 0x0002 /* Configuration */
363#define DAC960_V1_ConfigurationSuspended 0x0106 /* Configuration */
364#define DAC960_V1_FailedToConfigureNVRAM 0x0105 /* Configuration */
365#define DAC960_V1_ConfigurationNotSavedStateChange 0x0106 /* Configuration */
366#define DAC960_V1_SubsystemNotInstalled 0x0001 /* Subsystem */
367#define DAC960_V1_SubsystemFailed 0x0002 /* Subsystem */
368#define DAC960_V1_SubsystemBusy 0x0106 /* Subsystem */
369
370typedef unsigned short DAC960_V1_CommandStatus_T;
371
372
373/*
374 Define the DAC960 V1 Firmware Enquiry Command reply structure.
375*/
376
377typedef struct DAC960_V1_Enquiry
378{
379 unsigned char NumberOfLogicalDrives; /* Byte 0 */
380 unsigned int :24; /* Bytes 1-3 */
381 unsigned int LogicalDriveSizes[32]; /* Bytes 4-131 */
382 unsigned short FlashAge; /* Bytes 132-133 */
383 struct {
384 boolean DeferredWriteError:1; /* Byte 134 Bit 0 */
385 boolean BatteryLow:1; /* Byte 134 Bit 1 */
386 unsigned char :6; /* Byte 134 Bits 2-7 */
387 } StatusFlags;
388 unsigned char :8; /* Byte 135 */
389 unsigned char MinorFirmwareVersion; /* Byte 136 */
390 unsigned char MajorFirmwareVersion; /* Byte 137 */
391 enum {
392 DAC960_V1_NoStandbyRebuildOrCheckInProgress = 0x00,
393 DAC960_V1_StandbyRebuildInProgress = 0x01,
394 DAC960_V1_BackgroundRebuildInProgress = 0x02,
395 DAC960_V1_BackgroundCheckInProgress = 0x03,
396 DAC960_V1_StandbyRebuildCompletedWithError = 0xFF,
397 DAC960_V1_BackgroundRebuildOrCheckFailed_DriveFailed = 0xF0,
398 DAC960_V1_BackgroundRebuildOrCheckFailed_LogicalDriveFailed = 0xF1,
399 DAC960_V1_BackgroundRebuildOrCheckFailed_OtherCauses = 0xF2,
400 DAC960_V1_BackgroundRebuildOrCheckSuccessfullyTerminated = 0xF3
401 } __attribute__ ((packed)) RebuildFlag; /* Byte 138 */
402 unsigned char MaxCommands; /* Byte 139 */
403 unsigned char OfflineLogicalDriveCount; /* Byte 140 */
404 unsigned char :8; /* Byte 141 */
405 unsigned short EventLogSequenceNumber; /* Bytes 142-143 */
406 unsigned char CriticalLogicalDriveCount; /* Byte 144 */
407 unsigned int :24; /* Bytes 145-147 */
408 unsigned char DeadDriveCount; /* Byte 148 */
409 unsigned char :8; /* Byte 149 */
410 unsigned char RebuildCount; /* Byte 150 */
411 struct {
412 unsigned char :3; /* Byte 151 Bits 0-2 */
413 boolean BatteryBackupUnitPresent:1; /* Byte 151 Bit 3 */
414 unsigned char :3; /* Byte 151 Bits 4-6 */
415 unsigned char :1; /* Byte 151 Bit 7 */
416 } MiscFlags;
417 struct {
418 unsigned char TargetID;
419 unsigned char Channel;
420 } DeadDrives[21]; /* Bytes 152-194 */
421 unsigned char Reserved[62]; /* Bytes 195-255 */
422}
423__attribute__ ((packed))
424DAC960_V1_Enquiry_T;
425
426
427/*
428 Define the DAC960 V1 Firmware Enquiry2 Command reply structure.
429*/
430
431typedef struct DAC960_V1_Enquiry2
432{
433 struct {
434 enum {
435 DAC960_V1_P_PD_PU = 0x01,
436 DAC960_V1_PL = 0x02,
437 DAC960_V1_PG = 0x10,
438 DAC960_V1_PJ = 0x11,
439 DAC960_V1_PR = 0x12,
440 DAC960_V1_PT = 0x13,
441 DAC960_V1_PTL0 = 0x14,
442 DAC960_V1_PRL = 0x15,
443 DAC960_V1_PTL1 = 0x16,
444 DAC960_V1_1164P = 0x20
445 } __attribute__ ((packed)) SubModel; /* Byte 0 */
446 unsigned char ActualChannels; /* Byte 1 */
447 enum {
448 DAC960_V1_FiveChannelBoard = 0x01,
449 DAC960_V1_ThreeChannelBoard = 0x02,
450 DAC960_V1_TwoChannelBoard = 0x03,
451 DAC960_V1_ThreeChannelASIC_DAC = 0x04
452 } __attribute__ ((packed)) Model; /* Byte 2 */
453 enum {
454 DAC960_V1_EISA_Controller = 0x01,
455 DAC960_V1_MicroChannel_Controller = 0x02,
456 DAC960_V1_PCI_Controller = 0x03,
457 DAC960_V1_SCSItoSCSI_Controller = 0x08
458 } __attribute__ ((packed)) ProductFamily; /* Byte 3 */
459 } HardwareID; /* Bytes 0-3 */
460 /* MajorVersion.MinorVersion-FirmwareType-TurnID */
461 struct {
462 unsigned char MajorVersion; /* Byte 4 */
463 unsigned char MinorVersion; /* Byte 5 */
464 unsigned char TurnID; /* Byte 6 */
465 char FirmwareType; /* Byte 7 */
466 } FirmwareID; /* Bytes 4-7 */
467 unsigned char :8; /* Byte 8 */
468 unsigned int :24; /* Bytes 9-11 */
469 unsigned char ConfiguredChannels; /* Byte 12 */
470 unsigned char ActualChannels; /* Byte 13 */
471 unsigned char MaxTargets; /* Byte 14 */
472 unsigned char MaxTags; /* Byte 15 */
473 unsigned char MaxLogicalDrives; /* Byte 16 */
474 unsigned char MaxArms; /* Byte 17 */
475 unsigned char MaxSpans; /* Byte 18 */
476 unsigned char :8; /* Byte 19 */
477 unsigned int :32; /* Bytes 20-23 */
478 unsigned int MemorySize; /* Bytes 24-27 */
479 unsigned int CacheSize; /* Bytes 28-31 */
480 unsigned int FlashMemorySize; /* Bytes 32-35 */
481 unsigned int NonVolatileMemorySize; /* Bytes 36-39 */
482 struct {
483 enum {
484 DAC960_V1_RamType_DRAM = 0x0,
485 DAC960_V1_RamType_EDO = 0x1,
486 DAC960_V1_RamType_SDRAM = 0x2,
487 DAC960_V1_RamType_Last = 0x7
488 } __attribute__ ((packed)) RamType:3; /* Byte 40 Bits 0-2 */
489 enum {
490 DAC960_V1_ErrorCorrection_None = 0x0,
491 DAC960_V1_ErrorCorrection_Parity = 0x1,
492 DAC960_V1_ErrorCorrection_ECC = 0x2,
493 DAC960_V1_ErrorCorrection_Last = 0x7
494 } __attribute__ ((packed)) ErrorCorrection:3; /* Byte 40 Bits 3-5 */
495 boolean FastPageMode:1; /* Byte 40 Bit 6 */
496 boolean LowPowerMemory:1; /* Byte 40 Bit 7 */
497 unsigned char :8; /* Bytes 41 */
498 } MemoryType;
499 unsigned short ClockSpeed; /* Bytes 42-43 */
500 unsigned short MemorySpeed; /* Bytes 44-45 */
501 unsigned short HardwareSpeed; /* Bytes 46-47 */
502 unsigned int :32; /* Bytes 48-51 */
503 unsigned int :32; /* Bytes 52-55 */
504 unsigned char :8; /* Byte 56 */
505 unsigned char :8; /* Byte 57 */
506 unsigned short :16; /* Bytes 58-59 */
507 unsigned short MaxCommands; /* Bytes 60-61 */
508 unsigned short MaxScatterGatherEntries; /* Bytes 62-63 */
509 unsigned short MaxDriveCommands; /* Bytes 64-65 */
510 unsigned short MaxIODescriptors; /* Bytes 66-67 */
511 unsigned short MaxCombinedSectors; /* Bytes 68-69 */
512 unsigned char Latency; /* Byte 70 */
513 unsigned char :8; /* Byte 71 */
514 unsigned char SCSITimeout; /* Byte 72 */
515 unsigned char :8; /* Byte 73 */
516 unsigned short MinFreeLines; /* Bytes 74-75 */
517 unsigned int :32; /* Bytes 76-79 */
518 unsigned int :32; /* Bytes 80-83 */
519 unsigned char RebuildRateConstant; /* Byte 84 */
520 unsigned char :8; /* Byte 85 */
521 unsigned char :8; /* Byte 86 */
522 unsigned char :8; /* Byte 87 */
523 unsigned int :32; /* Bytes 88-91 */
524 unsigned int :32; /* Bytes 92-95 */
525 unsigned short PhysicalDriveBlockSize; /* Bytes 96-97 */
526 unsigned short LogicalDriveBlockSize; /* Bytes 98-99 */
527 unsigned short MaxBlocksPerCommand; /* Bytes 100-101 */
528 unsigned short BlockFactor; /* Bytes 102-103 */
529 unsigned short CacheLineSize; /* Bytes 104-105 */
530 struct {
531 enum {
532 DAC960_V1_Narrow_8bit = 0x0,
533 DAC960_V1_Wide_16bit = 0x1,
534 DAC960_V1_Wide_32bit = 0x2
535 } __attribute__ ((packed)) BusWidth:2; /* Byte 106 Bits 0-1 */
536 enum {
537 DAC960_V1_Fast = 0x0,
538 DAC960_V1_Ultra = 0x1,
539 DAC960_V1_Ultra2 = 0x2
540 } __attribute__ ((packed)) BusSpeed:2; /* Byte 106 Bits 2-3 */
541 boolean Differential:1; /* Byte 106 Bit 4 */
542 unsigned char :3; /* Byte 106 Bits 5-7 */
543 } SCSICapability;
544 unsigned char :8; /* Byte 107 */
545 unsigned int :32; /* Bytes 108-111 */
546 unsigned short FirmwareBuildNumber; /* Bytes 112-113 */
547 enum {
548 DAC960_V1_AEMI = 0x01,
549 DAC960_V1_OEM1 = 0x02,
550 DAC960_V1_OEM2 = 0x04,
551 DAC960_V1_OEM3 = 0x08,
552 DAC960_V1_Conner = 0x10,
553 DAC960_V1_SAFTE = 0x20
554 } __attribute__ ((packed)) FaultManagementType; /* Byte 114 */
555 unsigned char :8; /* Byte 115 */
556 struct {
557 boolean Clustering:1; /* Byte 116 Bit 0 */
558 boolean MylexOnlineRAIDExpansion:1; /* Byte 116 Bit 1 */
559 boolean ReadAhead:1; /* Byte 116 Bit 2 */
560 boolean BackgroundInitialization:1; /* Byte 116 Bit 3 */
561 unsigned int :28; /* Bytes 116-119 */
562 } FirmwareFeatures;
563 unsigned int :32; /* Bytes 120-123 */
564 unsigned int :32; /* Bytes 124-127 */
565}
566DAC960_V1_Enquiry2_T;
567
568
569/*
570 Define the DAC960 V1 Firmware Logical Drive State type.
571*/
572
573typedef enum
574{
575 DAC960_V1_LogicalDrive_Online = 0x03,
576 DAC960_V1_LogicalDrive_Critical = 0x04,
577 DAC960_V1_LogicalDrive_Offline = 0xFF
578}
579__attribute__ ((packed))
580DAC960_V1_LogicalDriveState_T;
581
582
583/*
584 Define the DAC960 V1 Firmware Logical Drive Information structure.
585*/
586
587typedef struct DAC960_V1_LogicalDriveInformation
588{
589 unsigned int LogicalDriveSize; /* Bytes 0-3 */
590 DAC960_V1_LogicalDriveState_T LogicalDriveState; /* Byte 4 */
591 unsigned char RAIDLevel:7; /* Byte 5 Bits 0-6 */
592 boolean WriteBack:1; /* Byte 5 Bit 7 */
593 unsigned short :16; /* Bytes 6-7 */
594}
595DAC960_V1_LogicalDriveInformation_T;
596
597
598/*
599 Define the DAC960 V1 Firmware Get Logical Drive Information Command
600 reply structure.
601*/
602
603typedef DAC960_V1_LogicalDriveInformation_T
604 DAC960_V1_LogicalDriveInformationArray_T[DAC960_MaxLogicalDrives];
605
606
607/*
608 Define the DAC960 V1 Firmware Perform Event Log Operation Types.
609*/
610
611typedef enum
612{
613 DAC960_V1_GetEventLogEntry = 0x00
614}
615__attribute__ ((packed))
616DAC960_V1_PerformEventLogOpType_T;
617
618
619/*
620 Define the DAC960 V1 Firmware Get Event Log Entry Command reply structure.
621*/
622
623typedef struct DAC960_V1_EventLogEntry
624{
625 unsigned char MessageType; /* Byte 0 */
626 unsigned char MessageLength; /* Byte 1 */
627 unsigned char TargetID:5; /* Byte 2 Bits 0-4 */
628 unsigned char Channel:3; /* Byte 2 Bits 5-7 */
629 unsigned char LogicalUnit:6; /* Byte 3 Bits 0-5 */
630 unsigned char :2; /* Byte 3 Bits 6-7 */
631 unsigned short SequenceNumber; /* Bytes 4-5 */
632 unsigned char ErrorCode:7; /* Byte 6 Bits 0-6 */
633 boolean Valid:1; /* Byte 6 Bit 7 */
634 unsigned char SegmentNumber; /* Byte 7 */
635 DAC960_SCSI_RequestSenseKey_T SenseKey:4; /* Byte 8 Bits 0-3 */
636 unsigned char :1; /* Byte 8 Bit 4 */
637 boolean ILI:1; /* Byte 8 Bit 5 */
638 boolean EOM:1; /* Byte 8 Bit 6 */
639 boolean Filemark:1; /* Byte 8 Bit 7 */
640 unsigned char Information[4]; /* Bytes 9-12 */
641 unsigned char AdditionalSenseLength; /* Byte 13 */
642 unsigned char CommandSpecificInformation[4]; /* Bytes 14-17 */
643 unsigned char AdditionalSenseCode; /* Byte 18 */
644 unsigned char AdditionalSenseCodeQualifier; /* Byte 19 */
645 unsigned char Dummy[12]; /* Bytes 20-31 */
646}
647DAC960_V1_EventLogEntry_T;
648
649
650/*
651 Define the DAC960 V1 Firmware Physical Device State type.
652*/
653
654typedef enum
655{
656 DAC960_V1_Device_Dead = 0x00,
657 DAC960_V1_Device_WriteOnly = 0x02,
658 DAC960_V1_Device_Online = 0x03,
659 DAC960_V1_Device_Standby = 0x10
660}
661__attribute__ ((packed))
662DAC960_V1_PhysicalDeviceState_T;
663
664
665/*
666 Define the DAC960 V1 Firmware Get Device State Command reply structure.
667 The structure is padded by 2 bytes for compatibility with Version 2.xx
668 Firmware.
669*/
670
671typedef struct DAC960_V1_DeviceState
672{
673 boolean Present:1; /* Byte 0 Bit 0 */
674 unsigned char :7; /* Byte 0 Bits 1-7 */
675 enum {
676 DAC960_V1_OtherType = 0x0,
677 DAC960_V1_DiskType = 0x1,
678 DAC960_V1_SequentialType = 0x2,
679 DAC960_V1_CDROM_or_WORM_Type = 0x3
680 } __attribute__ ((packed)) DeviceType:2; /* Byte 1 Bits 0-1 */
681 boolean :1; /* Byte 1 Bit 2 */
682 boolean Fast20:1; /* Byte 1 Bit 3 */
683 boolean Sync:1; /* Byte 1 Bit 4 */
684 boolean Fast:1; /* Byte 1 Bit 5 */
685 boolean Wide:1; /* Byte 1 Bit 6 */
686 boolean TaggedQueuingSupported:1; /* Byte 1 Bit 7 */
687 DAC960_V1_PhysicalDeviceState_T DeviceState; /* Byte 2 */
688 unsigned char :8; /* Byte 3 */
689 unsigned char SynchronousMultiplier; /* Byte 4 */
690 unsigned char SynchronousOffset:5; /* Byte 5 Bits 0-4 */
691 unsigned char :3; /* Byte 5 Bits 5-7 */
692 unsigned int DiskSize __attribute__ ((packed)); /* Bytes 6-9 */
693 unsigned short :16; /* Bytes 10-11 */
694}
695DAC960_V1_DeviceState_T;
696
697
698/*
699 Define the DAC960 V1 Firmware Get Rebuild Progress Command reply structure.
700*/
701
702typedef struct DAC960_V1_RebuildProgress
703{
704 unsigned int LogicalDriveNumber; /* Bytes 0-3 */
705 unsigned int LogicalDriveSize; /* Bytes 4-7 */
706 unsigned int RemainingBlocks; /* Bytes 8-11 */
707}
708DAC960_V1_RebuildProgress_T;
709
710
711/*
712 Define the DAC960 V1 Firmware Background Initialization Status Command
713 reply structure.
714*/
715
716typedef struct DAC960_V1_BackgroundInitializationStatus
717{
718 unsigned int LogicalDriveSize; /* Bytes 0-3 */
719 unsigned int BlocksCompleted; /* Bytes 4-7 */
720 unsigned char Reserved1[12]; /* Bytes 8-19 */
721 unsigned int LogicalDriveNumber; /* Bytes 20-23 */
722 unsigned char RAIDLevel; /* Byte 24 */
723 enum {
724 DAC960_V1_BackgroundInitializationInvalid = 0x00,
725 DAC960_V1_BackgroundInitializationStarted = 0x02,
726 DAC960_V1_BackgroundInitializationInProgress = 0x04,
727 DAC960_V1_BackgroundInitializationSuspended = 0x05,
728 DAC960_V1_BackgroundInitializationCancelled = 0x06
729 } __attribute__ ((packed)) Status; /* Byte 25 */
730 unsigned char Reserved2[6]; /* Bytes 26-31 */
731}
732DAC960_V1_BackgroundInitializationStatus_T;
733
734
735/*
736 Define the DAC960 V1 Firmware Error Table Entry structure.
737*/
738
739typedef struct DAC960_V1_ErrorTableEntry
740{
741 unsigned char ParityErrorCount; /* Byte 0 */
742 unsigned char SoftErrorCount; /* Byte 1 */
743 unsigned char HardErrorCount; /* Byte 2 */
744 unsigned char MiscErrorCount; /* Byte 3 */
745}
746DAC960_V1_ErrorTableEntry_T;
747
748
749/*
750 Define the DAC960 V1 Firmware Get Error Table Command reply structure.
751*/
752
753typedef struct DAC960_V1_ErrorTable
754{
755 DAC960_V1_ErrorTableEntry_T
756 ErrorTableEntries[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets];
757}
758DAC960_V1_ErrorTable_T;
759
760
761/*
762 Define the DAC960 V1 Firmware Read Config2 Command reply structure.
763*/
764
765typedef struct DAC960_V1_Config2
766{
767 unsigned char :1; /* Byte 0 Bit 0 */
768 boolean ActiveNegationEnabled:1; /* Byte 0 Bit 1 */
769 unsigned char :5; /* Byte 0 Bits 2-6 */
770 boolean NoRescanIfResetReceivedDuringScan:1; /* Byte 0 Bit 7 */
771 boolean StorageWorksSupportEnabled:1; /* Byte 1 Bit 0 */
772 boolean HewlettPackardSupportEnabled:1; /* Byte 1 Bit 1 */
773 boolean NoDisconnectOnFirstCommand:1; /* Byte 1 Bit 2 */
774 unsigned char :2; /* Byte 1 Bits 3-4 */
775 boolean AEMI_ARM:1; /* Byte 1 Bit 5 */
776 boolean AEMI_OFM:1; /* Byte 1 Bit 6 */
777 unsigned char :1; /* Byte 1 Bit 7 */
778 enum {
779 DAC960_V1_OEMID_Mylex = 0x00,
780 DAC960_V1_OEMID_IBM = 0x08,
781 DAC960_V1_OEMID_HP = 0x0A,
782 DAC960_V1_OEMID_DEC = 0x0C,
783 DAC960_V1_OEMID_Siemens = 0x10,
784 DAC960_V1_OEMID_Intel = 0x12
785 } __attribute__ ((packed)) OEMID; /* Byte 2 */
786 unsigned char OEMModelNumber; /* Byte 3 */
787 unsigned char PhysicalSector; /* Byte 4 */
788 unsigned char LogicalSector; /* Byte 5 */
789 unsigned char BlockFactor; /* Byte 6 */
790 boolean ReadAheadEnabled:1; /* Byte 7 Bit 0 */
791 boolean LowBIOSDelay:1; /* Byte 7 Bit 1 */
792 unsigned char :2; /* Byte 7 Bits 2-3 */
793 boolean ReassignRestrictedToOneSector:1; /* Byte 7 Bit 4 */
794 unsigned char :1; /* Byte 7 Bit 5 */
795 boolean ForceUnitAccessDuringWriteRecovery:1; /* Byte 7 Bit 6 */
796 boolean EnableLeftSymmetricRAID5Algorithm:1; /* Byte 7 Bit 7 */
797 unsigned char DefaultRebuildRate; /* Byte 8 */
798 unsigned char :8; /* Byte 9 */
799 unsigned char BlocksPerCacheLine; /* Byte 10 */
800 unsigned char BlocksPerStripe; /* Byte 11 */
801 struct {
802 enum {
803 DAC960_V1_Async = 0x0,
804 DAC960_V1_Sync_8MHz = 0x1,
805 DAC960_V1_Sync_5MHz = 0x2,
806 DAC960_V1_Sync_10or20MHz = 0x3 /* Byte 11 Bits 0-1 */
807 } __attribute__ ((packed)) Speed:2;
808 boolean Force8Bit:1; /* Byte 11 Bit 2 */
809 boolean DisableFast20:1; /* Byte 11 Bit 3 */
810 unsigned char :3; /* Byte 11 Bits 4-6 */
811 boolean EnableTaggedQueuing:1; /* Byte 11 Bit 7 */
812 } __attribute__ ((packed)) ChannelParameters[6]; /* Bytes 12-17 */
813 unsigned char SCSIInitiatorID; /* Byte 18 */
814 unsigned char :8; /* Byte 19 */
815 enum {
816 DAC960_V1_StartupMode_ControllerSpinUp = 0x00,
817 DAC960_V1_StartupMode_PowerOnSpinUp = 0x01
818 } __attribute__ ((packed)) StartupMode; /* Byte 20 */
819 unsigned char SimultaneousDeviceSpinUpCount; /* Byte 21 */
820 unsigned char SecondsDelayBetweenSpinUps; /* Byte 22 */
821 unsigned char Reserved1[29]; /* Bytes 23-51 */
822 boolean BIOSDisabled:1; /* Byte 52 Bit 0 */
823 boolean CDROMBootEnabled:1; /* Byte 52 Bit 1 */
824 unsigned char :3; /* Byte 52 Bits 2-4 */
825 enum {
826 DAC960_V1_Geometry_128_32 = 0x0,
827 DAC960_V1_Geometry_255_63 = 0x1,
828 DAC960_V1_Geometry_Reserved1 = 0x2,
829 DAC960_V1_Geometry_Reserved2 = 0x3
830 } __attribute__ ((packed)) DriveGeometry:2; /* Byte 52 Bits 5-6 */
831 unsigned char :1; /* Byte 52 Bit 7 */
832 unsigned char Reserved2[9]; /* Bytes 53-61 */
833 unsigned short Checksum; /* Bytes 62-63 */
834}
835DAC960_V1_Config2_T;
836
837
838/*
839 Define the DAC960 V1 Firmware DCDB request structure.
840*/
841
842typedef struct DAC960_V1_DCDB
843{
844 unsigned char TargetID:4; /* Byte 0 Bits 0-3 */
845 unsigned char Channel:4; /* Byte 0 Bits 4-7 */
846 enum {
847 DAC960_V1_DCDB_NoDataTransfer = 0,
848 DAC960_V1_DCDB_DataTransferDeviceToSystem = 1,
849 DAC960_V1_DCDB_DataTransferSystemToDevice = 2,
850 DAC960_V1_DCDB_IllegalDataTransfer = 3
851 } __attribute__ ((packed)) Direction:2; /* Byte 1 Bits 0-1 */
852 boolean EarlyStatus:1; /* Byte 1 Bit 2 */
853 unsigned char :1; /* Byte 1 Bit 3 */
854 enum {
855 DAC960_V1_DCDB_Timeout_24_hours = 0,
856 DAC960_V1_DCDB_Timeout_10_seconds = 1,
857 DAC960_V1_DCDB_Timeout_60_seconds = 2,
858 DAC960_V1_DCDB_Timeout_10_minutes = 3
859 } __attribute__ ((packed)) Timeout:2; /* Byte 1 Bits 4-5 */
860 boolean NoAutomaticRequestSense:1; /* Byte 1 Bit 6 */
861 boolean DisconnectPermitted:1; /* Byte 1 Bit 7 */
862 unsigned short TransferLength; /* Bytes 2-3 */
863 DAC960_BusAddress32_T BusAddress; /* Bytes 4-7 */
864 unsigned char CDBLength:4; /* Byte 8 Bits 0-3 */
865 unsigned char TransferLengthHigh4:4; /* Byte 8 Bits 4-7 */
866 unsigned char SenseLength; /* Byte 9 */
867 unsigned char CDB[12]; /* Bytes 10-21 */
868 unsigned char SenseData[64]; /* Bytes 22-85 */
869 unsigned char Status; /* Byte 86 */
870 unsigned char :8; /* Byte 87 */
871}
872DAC960_V1_DCDB_T;
873
874
875/*
876 Define the DAC960 V1 Firmware Scatter/Gather List Type 1 32 Bit Address
877 32 Bit Byte Count structure.
878*/
879
880typedef struct DAC960_V1_ScatterGatherSegment
881{
882 DAC960_BusAddress32_T SegmentDataPointer; /* Bytes 0-3 */
883 DAC960_ByteCount32_T SegmentByteCount; /* Bytes 4-7 */
884}
885DAC960_V1_ScatterGatherSegment_T;
886
887
888/*
889 Define the 13 Byte DAC960 V1 Firmware Command Mailbox structure. Bytes 13-15
890 are not used. The Command Mailbox structure is padded to 16 bytes for
891 efficient access.
892*/
893
894typedef union DAC960_V1_CommandMailbox
895{
896 unsigned int Words[4]; /* Words 0-3 */
897 unsigned char Bytes[16]; /* Bytes 0-15 */
898 struct {
899 DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
900 DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
901 unsigned char Dummy[14]; /* Bytes 2-15 */
902 } __attribute__ ((packed)) Common;
903 struct {
904 DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
905 DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
906 unsigned char Dummy1[6]; /* Bytes 2-7 */
907 DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */
908 unsigned char Dummy2[4]; /* Bytes 12-15 */
909 } __attribute__ ((packed)) Type3;
910 struct {
911 DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
912 DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
913 unsigned char CommandOpcode2; /* Byte 2 */
914 unsigned char Dummy1[5]; /* Bytes 3-7 */
915 DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */
916 unsigned char Dummy2[4]; /* Bytes 12-15 */
917 } __attribute__ ((packed)) Type3B;
918 struct {
919 DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
920 DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
921 unsigned char Dummy1[5]; /* Bytes 2-6 */
922 unsigned char LogicalDriveNumber:6; /* Byte 7 Bits 0-6 */
923 boolean AutoRestore:1; /* Byte 7 Bit 7 */
924 unsigned char Dummy2[8]; /* Bytes 8-15 */
925 } __attribute__ ((packed)) Type3C;
926 struct {
927 DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
928 DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
929 unsigned char Channel; /* Byte 2 */
930 unsigned char TargetID; /* Byte 3 */
931 DAC960_V1_PhysicalDeviceState_T DeviceState:5; /* Byte 4 Bits 0-4 */
932 unsigned char Modifier:3; /* Byte 4 Bits 5-7 */
933 unsigned char Dummy1[3]; /* Bytes 5-7 */
934 DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */
935 unsigned char Dummy2[4]; /* Bytes 12-15 */
936 } __attribute__ ((packed)) Type3D;
937 struct {
938 DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
939 DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
940 DAC960_V1_PerformEventLogOpType_T OperationType; /* Byte 2 */
941 unsigned char OperationQualifier; /* Byte 3 */
942 unsigned short SequenceNumber; /* Bytes 4-5 */
943 unsigned char Dummy1[2]; /* Bytes 6-7 */
944 DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */
945 unsigned char Dummy2[4]; /* Bytes 12-15 */
946 } __attribute__ ((packed)) Type3E;
947 struct {
948 DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
949 DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
950 unsigned char Dummy1[2]; /* Bytes 2-3 */
951 unsigned char RebuildRateConstant; /* Byte 4 */
952 unsigned char Dummy2[3]; /* Bytes 5-7 */
953 DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */
954 unsigned char Dummy3[4]; /* Bytes 12-15 */
955 } __attribute__ ((packed)) Type3R;
956 struct {
957 DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
958 DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
959 unsigned short TransferLength; /* Bytes 2-3 */
960 unsigned int LogicalBlockAddress; /* Bytes 4-7 */
961 DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */
962 unsigned char LogicalDriveNumber; /* Byte 12 */
963 unsigned char Dummy[3]; /* Bytes 13-15 */
964 } __attribute__ ((packed)) Type4;
965 struct {
966 DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
967 DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
968 struct {
969 unsigned short TransferLength:11; /* Bytes 2-3 */
970 unsigned char LogicalDriveNumber:5; /* Byte 3 Bits 3-7 */
971 } __attribute__ ((packed)) LD;
972 unsigned int LogicalBlockAddress; /* Bytes 4-7 */
973 DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */
974 unsigned char ScatterGatherCount:6; /* Byte 12 Bits 0-5 */
975 enum {
976 DAC960_V1_ScatterGather_32BitAddress_32BitByteCount = 0x0,
977 DAC960_V1_ScatterGather_32BitAddress_16BitByteCount = 0x1,
978 DAC960_V1_ScatterGather_32BitByteCount_32BitAddress = 0x2,
979 DAC960_V1_ScatterGather_16BitByteCount_32BitAddress = 0x3
980 } __attribute__ ((packed)) ScatterGatherType:2; /* Byte 12 Bits 6-7 */
981 unsigned char Dummy[3]; /* Bytes 13-15 */
982 } __attribute__ ((packed)) Type5;
983 struct {
984 DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
985 DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
986 unsigned char CommandOpcode2; /* Byte 2 */
987 unsigned char :8; /* Byte 3 */
988 DAC960_BusAddress32_T CommandMailboxesBusAddress; /* Bytes 4-7 */
989 DAC960_BusAddress32_T StatusMailboxesBusAddress; /* Bytes 8-11 */
990 unsigned char Dummy[4]; /* Bytes 12-15 */
991 } __attribute__ ((packed)) TypeX;
992}
993DAC960_V1_CommandMailbox_T;
994
995
996/*
997 Define the DAC960 V2 Firmware Command Opcodes.
998*/
999
1000typedef enum
1001{
1002 DAC960_V2_MemCopy = 0x01,
1003 DAC960_V2_SCSI_10_Passthru = 0x02,
1004 DAC960_V2_SCSI_255_Passthru = 0x03,
1005 DAC960_V2_SCSI_10 = 0x04,
1006 DAC960_V2_SCSI_256 = 0x05,
1007 DAC960_V2_IOCTL = 0x20
1008}
1009__attribute__ ((packed))
1010DAC960_V2_CommandOpcode_T;
1011
1012
1013/*
1014 Define the DAC960 V2 Firmware IOCTL Opcodes.
1015*/
1016
1017typedef enum
1018{
1019 DAC960_V2_GetControllerInfo = 0x01,
1020 DAC960_V2_GetLogicalDeviceInfoValid = 0x03,
1021 DAC960_V2_GetPhysicalDeviceInfoValid = 0x05,
1022 DAC960_V2_GetHealthStatus = 0x11,
1023 DAC960_V2_GetEvent = 0x15,
1024 DAC960_V2_StartDiscovery = 0x81,
1025 DAC960_V2_SetDeviceState = 0x82,
1026 DAC960_V2_RebuildDeviceStart = 0x88,
1027 DAC960_V2_RebuildDeviceStop = 0x89,
1028 DAC960_V2_ConsistencyCheckStart = 0x8C,
1029 DAC960_V2_ConsistencyCheckStop = 0x8D,
1030 DAC960_V2_SetMemoryMailbox = 0x8E,
1031 DAC960_V2_PauseDevice = 0x92,
1032 DAC960_V2_TranslatePhysicalToLogicalDevice = 0xC5
1033}
1034__attribute__ ((packed))
1035DAC960_V2_IOCTL_Opcode_T;
1036
1037
1038/*
1039 Define the DAC960 V2 Firmware Command Identifier type.
1040*/
1041
1042typedef unsigned short DAC960_V2_CommandIdentifier_T;
1043
1044
1045/*
1046 Define the DAC960 V2 Firmware Command Status Codes.
1047*/
1048
1049#define DAC960_V2_NormalCompletion 0x00
1050#define DAC960_V2_AbormalCompletion 0x02
1051#define DAC960_V2_DeviceBusy 0x08
1052#define DAC960_V2_DeviceNonresponsive 0x0E
1053#define DAC960_V2_DeviceNonresponsive2 0x0F
1054#define DAC960_V2_DeviceRevervationConflict 0x18
1055
1056typedef unsigned char DAC960_V2_CommandStatus_T;
1057
1058
1059/*
1060 Define the DAC960 V2 Firmware Memory Type structure.
1061*/
1062
1063typedef struct DAC960_V2_MemoryType
1064{
1065 enum {
1066 DAC960_V2_MemoryType_Reserved = 0x00,
1067 DAC960_V2_MemoryType_DRAM = 0x01,
1068 DAC960_V2_MemoryType_EDRAM = 0x02,
1069 DAC960_V2_MemoryType_EDO = 0x03,
1070 DAC960_V2_MemoryType_SDRAM = 0x04,
1071 DAC960_V2_MemoryType_Last = 0x1F
1072 } __attribute__ ((packed)) MemoryType:5; /* Byte 0 Bits 0-4 */
1073 boolean :1; /* Byte 0 Bit 5 */
1074 boolean MemoryParity:1; /* Byte 0 Bit 6 */
1075 boolean MemoryECC:1; /* Byte 0 Bit 7 */
1076}
1077DAC960_V2_MemoryType_T;
1078
1079
1080/*
1081 Define the DAC960 V2 Firmware Processor Type structure.
1082*/
1083
1084typedef enum
1085{
1086 DAC960_V2_ProcessorType_i960CA = 0x01,
1087 DAC960_V2_ProcessorType_i960RD = 0x02,
1088 DAC960_V2_ProcessorType_i960RN = 0x03,
1089 DAC960_V2_ProcessorType_i960RP = 0x04,
1090 DAC960_V2_ProcessorType_NorthBay = 0x05,
1091 DAC960_V2_ProcessorType_StrongArm = 0x06,
1092 DAC960_V2_ProcessorType_i960RM = 0x07
1093}
1094__attribute__ ((packed))
1095DAC960_V2_ProcessorType_T;
1096
1097
1098/*
1099 Define the DAC960 V2 Firmware Get Controller Info reply structure.
1100*/
1101
1102typedef struct DAC960_V2_ControllerInfo
1103{
1104 unsigned char :8; /* Byte 0 */
1105 enum {
1106 DAC960_V2_SCSI_Bus = 0x00,
1107 DAC960_V2_Fibre_Bus = 0x01,
1108 DAC960_V2_PCI_Bus = 0x03
1109 } __attribute__ ((packed)) BusInterfaceType; /* Byte 1 */
1110 enum {
1111 DAC960_V2_DAC960E = 0x01,
1112 DAC960_V2_DAC960M = 0x08,
1113 DAC960_V2_DAC960PD = 0x10,
1114 DAC960_V2_DAC960PL = 0x11,
1115 DAC960_V2_DAC960PU = 0x12,
1116 DAC960_V2_DAC960PE = 0x13,
1117 DAC960_V2_DAC960PG = 0x14,
1118 DAC960_V2_DAC960PJ = 0x15,
1119 DAC960_V2_DAC960PTL0 = 0x16,
1120 DAC960_V2_DAC960PR = 0x17,
1121 DAC960_V2_DAC960PRL = 0x18,
1122 DAC960_V2_DAC960PT = 0x19,
1123 DAC960_V2_DAC1164P = 0x1A,
1124 DAC960_V2_DAC960PTL1 = 0x1B,
1125 DAC960_V2_EXR2000P = 0x1C,
1126 DAC960_V2_EXR3000P = 0x1D,
1127 DAC960_V2_AcceleRAID352 = 0x1E,
1128 DAC960_V2_AcceleRAID170 = 0x1F,
1129 DAC960_V2_AcceleRAID160 = 0x20,
1130 DAC960_V2_DAC960S = 0x60,
1131 DAC960_V2_DAC960SU = 0x61,
1132 DAC960_V2_DAC960SX = 0x62,
1133 DAC960_V2_DAC960SF = 0x63,
1134 DAC960_V2_DAC960SS = 0x64,
1135 DAC960_V2_DAC960FL = 0x65,
1136 DAC960_V2_DAC960LL = 0x66,
1137 DAC960_V2_DAC960FF = 0x67,
1138 DAC960_V2_DAC960HP = 0x68,
1139 DAC960_V2_RAIDBRICK = 0x69,
1140 DAC960_V2_METEOR_FL = 0x6A,
1141 DAC960_V2_METEOR_FF = 0x6B
1142 } __attribute__ ((packed)) ControllerType; /* Byte 2 */
1143 unsigned char :8; /* Byte 3 */
1144 unsigned short BusInterfaceSpeedMHz; /* Bytes 4-5 */
1145 unsigned char BusWidthBits; /* Byte 6 */
1146 unsigned char FlashCodeTypeOrProductID; /* Byte 7 */
1147 unsigned char NumberOfHostPortsPresent; /* Byte 8 */
1148 unsigned char Reserved1[7]; /* Bytes 9-15 */
1149 unsigned char BusInterfaceName[16]; /* Bytes 16-31 */
1150 unsigned char ControllerName[16]; /* Bytes 32-47 */
1151 unsigned char Reserved2[16]; /* Bytes 48-63 */
1152 /* Firmware Release Information */
1153 unsigned char FirmwareMajorVersion; /* Byte 64 */
1154 unsigned char FirmwareMinorVersion; /* Byte 65 */
1155 unsigned char FirmwareTurnNumber; /* Byte 66 */
1156 unsigned char FirmwareBuildNumber; /* Byte 67 */
1157 unsigned char FirmwareReleaseDay; /* Byte 68 */
1158 unsigned char FirmwareReleaseMonth; /* Byte 69 */
1159 unsigned char FirmwareReleaseYearHigh2Digits; /* Byte 70 */
1160 unsigned char FirmwareReleaseYearLow2Digits; /* Byte 71 */
1161 /* Hardware Release Information */
1162 unsigned char HardwareRevision; /* Byte 72 */
1163 unsigned int :24; /* Bytes 73-75 */
1164 unsigned char HardwareReleaseDay; /* Byte 76 */
1165 unsigned char HardwareReleaseMonth; /* Byte 77 */
1166 unsigned char HardwareReleaseYearHigh2Digits; /* Byte 78 */
1167 unsigned char HardwareReleaseYearLow2Digits; /* Byte 79 */
1168 /* Hardware Manufacturing Information */
1169 unsigned char ManufacturingBatchNumber; /* Byte 80 */
1170 unsigned char :8; /* Byte 81 */
1171 unsigned char ManufacturingPlantNumber; /* Byte 82 */
1172 unsigned char :8; /* Byte 83 */
1173 unsigned char HardwareManufacturingDay; /* Byte 84 */
1174 unsigned char HardwareManufacturingMonth; /* Byte 85 */
1175 unsigned char HardwareManufacturingYearHigh2Digits; /* Byte 86 */
1176 unsigned char HardwareManufacturingYearLow2Digits; /* Byte 87 */
1177 unsigned char MaximumNumberOfPDDperXLD; /* Byte 88 */
1178 unsigned char MaximumNumberOfILDperXLD; /* Byte 89 */
1179 unsigned short NonvolatileMemorySizeKB; /* Bytes 90-91 */
1180 unsigned char MaximumNumberOfXLD; /* Byte 92 */
1181 unsigned int :24; /* Bytes 93-95 */
1182 /* Unique Information per Controller */
1183 unsigned char ControllerSerialNumber[16]; /* Bytes 96-111 */
1184 unsigned char Reserved3[16]; /* Bytes 112-127 */
1185 /* Vendor Information */
1186 unsigned int :24; /* Bytes 128-130 */
1187 unsigned char OEM_Code; /* Byte 131 */
1188 unsigned char VendorName[16]; /* Bytes 132-147 */
1189 /* Other Physical/Controller/Operation Information */
1190 boolean BBU_Present:1; /* Byte 148 Bit 0 */
1191 boolean ActiveActiveClusteringMode:1; /* Byte 148 Bit 1 */
1192 unsigned char :6; /* Byte 148 Bits 2-7 */
1193 unsigned char :8; /* Byte 149 */
1194 unsigned short :16; /* Bytes 150-151 */
1195 /* Physical Device Scan Information */
1196 boolean PhysicalScanActive:1; /* Byte 152 Bit 0 */
1197 unsigned char :7; /* Byte 152 Bits 1-7 */
1198 unsigned char PhysicalDeviceChannelNumber; /* Byte 153 */
1199 unsigned char PhysicalDeviceTargetID; /* Byte 154 */
1200 unsigned char PhysicalDeviceLogicalUnit; /* Byte 155 */
1201 /* Maximum Command Data Transfer Sizes */
1202 unsigned short MaximumDataTransferSizeInBlocks; /* Bytes 156-157 */
1203 unsigned short MaximumScatterGatherEntries; /* Bytes 158-159 */
1204 /* Logical/Physical Device Counts */
1205 unsigned short LogicalDevicesPresent; /* Bytes 160-161 */
1206 unsigned short LogicalDevicesCritical; /* Bytes 162-163 */
1207 unsigned short LogicalDevicesOffline; /* Bytes 164-165 */
1208 unsigned short PhysicalDevicesPresent; /* Bytes 166-167 */
1209 unsigned short PhysicalDisksPresent; /* Bytes 168-169 */
1210 unsigned short PhysicalDisksCritical; /* Bytes 170-171 */
1211 unsigned short PhysicalDisksOffline; /* Bytes 172-173 */
1212 unsigned short MaximumParallelCommands; /* Bytes 174-175 */
1213 /* Channel and Target ID Information */
1214 unsigned char NumberOfPhysicalChannelsPresent; /* Byte 176 */
1215 unsigned char NumberOfVirtualChannelsPresent; /* Byte 177 */
1216 unsigned char NumberOfPhysicalChannelsPossible; /* Byte 178 */
1217 unsigned char NumberOfVirtualChannelsPossible; /* Byte 179 */
1218 unsigned char MaximumTargetsPerChannel[16]; /* Bytes 180-195 */
1219 unsigned char Reserved4[12]; /* Bytes 196-207 */
1220 /* Memory/Cache Information */
1221 unsigned short MemorySizeMB; /* Bytes 208-209 */
1222 unsigned short CacheSizeMB; /* Bytes 210-211 */
1223 unsigned int ValidCacheSizeInBytes; /* Bytes 212-215 */
1224 unsigned int DirtyCacheSizeInBytes; /* Bytes 216-219 */
1225 unsigned short MemorySpeedMHz; /* Bytes 220-221 */
1226 unsigned char MemoryDataWidthBits; /* Byte 222 */
1227 DAC960_V2_MemoryType_T MemoryType; /* Byte 223 */
1228 unsigned char CacheMemoryTypeName[16]; /* Bytes 224-239 */
1229 /* Execution Memory Information */
1230 unsigned short ExecutionMemorySizeMB; /* Bytes 240-241 */
1231 unsigned short ExecutionL2CacheSizeMB; /* Bytes 242-243 */
1232 unsigned char Reserved5[8]; /* Bytes 244-251 */
1233 unsigned short ExecutionMemorySpeedMHz; /* Bytes 252-253 */
1234 unsigned char ExecutionMemoryDataWidthBits; /* Byte 254 */
1235 DAC960_V2_MemoryType_T ExecutionMemoryType; /* Byte 255 */
1236 unsigned char ExecutionMemoryTypeName[16]; /* Bytes 256-271 */
1237 /* First CPU Type Information */
1238 unsigned short FirstProcessorSpeedMHz; /* Bytes 272-273 */
1239 DAC960_V2_ProcessorType_T FirstProcessorType; /* Byte 274 */
1240 unsigned char FirstProcessorCount; /* Byte 275 */
1241 unsigned char Reserved6[12]; /* Bytes 276-287 */
1242 unsigned char FirstProcessorName[16]; /* Bytes 288-303 */
1243 /* Second CPU Type Information */
1244 unsigned short SecondProcessorSpeedMHz; /* Bytes 304-305 */
1245 DAC960_V2_ProcessorType_T SecondProcessorType; /* Byte 306 */
1246 unsigned char SecondProcessorCount; /* Byte 307 */
1247 unsigned char Reserved7[12]; /* Bytes 308-319 */
1248 unsigned char SecondProcessorName[16]; /* Bytes 320-335 */
1249 /* Debugging/Profiling/Command Time Tracing Information */
1250 unsigned short CurrentProfilingDataPageNumber; /* Bytes 336-337 */
1251 unsigned short ProgramsAwaitingProfilingData; /* Bytes 338-339 */
1252 unsigned short CurrentCommandTimeTraceDataPageNumber; /* Bytes 340-341 */
1253 unsigned short ProgramsAwaitingCommandTimeTraceData; /* Bytes 342-343 */
1254 unsigned char Reserved8[8]; /* Bytes 344-351 */
1255 /* Error Counters on Physical Devices */
1256 unsigned short PhysicalDeviceBusResets; /* Bytes 352-353 */
1257 unsigned short PhysicalDeviceParityErrors; /* Bytes 355-355 */
1258 unsigned short PhysicalDeviceSoftErrors; /* Bytes 356-357 */
1259 unsigned short PhysicalDeviceCommandsFailed; /* Bytes 358-359 */
1260 unsigned short PhysicalDeviceMiscellaneousErrors; /* Bytes 360-361 */
1261 unsigned short PhysicalDeviceCommandTimeouts; /* Bytes 362-363 */
1262 unsigned short PhysicalDeviceSelectionTimeouts; /* Bytes 364-365 */
1263 unsigned short PhysicalDeviceRetriesDone; /* Bytes 366-367 */
1264 unsigned short PhysicalDeviceAbortsDone; /* Bytes 368-369 */
1265 unsigned short PhysicalDeviceHostCommandAbortsDone; /* Bytes 370-371 */
1266 unsigned short PhysicalDevicePredictedFailuresDetected; /* Bytes 372-373 */
1267 unsigned short PhysicalDeviceHostCommandsFailed; /* Bytes 374-375 */
1268 unsigned short PhysicalDeviceHardErrors; /* Bytes 376-377 */
1269 unsigned char Reserved9[6]; /* Bytes 378-383 */
1270 /* Error Counters on Logical Devices */
1271 unsigned short LogicalDeviceSoftErrors; /* Bytes 384-385 */
1272 unsigned short LogicalDeviceCommandsFailed; /* Bytes 386-387 */
1273 unsigned short LogicalDeviceHostCommandAbortsDone; /* Bytes 388-389 */
1274 unsigned short :16; /* Bytes 390-391 */
1275 /* Error Counters on Controller */
1276 unsigned short ControllerMemoryErrors; /* Bytes 392-393 */
1277 unsigned short ControllerHostCommandAbortsDone; /* Bytes 394-395 */
1278 unsigned int :32; /* Bytes 396-399 */
1279 /* Long Duration Activity Information */
1280 unsigned short BackgroundInitializationsActive; /* Bytes 400-401 */
1281 unsigned short LogicalDeviceInitializationsActive; /* Bytes 402-403 */
1282 unsigned short PhysicalDeviceInitializationsActive; /* Bytes 404-405 */
1283 unsigned short ConsistencyChecksActive; /* Bytes 406-407 */
1284 unsigned short RebuildsActive; /* Bytes 408-409 */
1285 unsigned short OnlineExpansionsActive; /* Bytes 410-411 */
1286 unsigned short PatrolActivitiesActive; /* Bytes 412-413 */
1287 unsigned short :16; /* Bytes 414-415 */
1288 /* Flash ROM Information */
1289 unsigned char FlashType; /* Byte 416 */
1290 unsigned char :8; /* Byte 417 */
1291 unsigned short FlashSizeMB; /* Bytes 418-419 */
1292 unsigned int FlashLimit; /* Bytes 420-423 */
1293 unsigned int FlashCount; /* Bytes 424-427 */
1294 unsigned int :32; /* Bytes 428-431 */
1295 unsigned char FlashTypeName[16]; /* Bytes 432-447 */
1296 /* Firmware Run Time Information */
1297 unsigned char RebuildRate; /* Byte 448 */
1298 unsigned char BackgroundInitializationRate; /* Byte 449 */
1299 unsigned char ForegroundInitializationRate; /* Byte 450 */
1300 unsigned char ConsistencyCheckRate; /* Byte 451 */
1301 unsigned int :32; /* Bytes 452-455 */
1302 unsigned int MaximumDP; /* Bytes 456-459 */
1303 unsigned int FreeDP; /* Bytes 460-463 */
1304 unsigned int MaximumIOP; /* Bytes 464-467 */
1305 unsigned int FreeIOP; /* Bytes 468-471 */
1306 unsigned short MaximumCombLengthInBlocks; /* Bytes 472-473 */
1307 unsigned short NumberOfConfigurationGroups; /* Bytes 474-475 */
1308 boolean InstallationAbortStatus:1; /* Byte 476 Bit 0 */
1309 boolean MaintenanceModeStatus:1; /* Byte 476 Bit 1 */
1310 unsigned int :24; /* Bytes 476-479 */
1311 unsigned char Reserved10[32]; /* Bytes 480-511 */
1312 unsigned char Reserved11[512]; /* Bytes 512-1023 */
1313}
1314DAC960_V2_ControllerInfo_T;
1315
1316
1317/*
1318 Define the DAC960 V2 Firmware Logical Device State type.
1319*/
1320
1321typedef enum
1322{
1323 DAC960_V2_LogicalDevice_Online = 0x01,
1324 DAC960_V2_LogicalDevice_Offline = 0x08,
1325 DAC960_V2_LogicalDevice_Critical = 0x09
1326}
1327__attribute__ ((packed))
1328DAC960_V2_LogicalDeviceState_T;
1329
1330
1331/*
1332 Define the DAC960 V2 Firmware Get Logical Device Info reply structure.
1333*/
1334
1335typedef struct DAC960_V2_LogicalDeviceInfo
1336{
1337 unsigned char :8; /* Byte 0 */
1338 unsigned char Channel; /* Byte 1 */
1339 unsigned char TargetID; /* Byte 2 */
1340 unsigned char LogicalUnit; /* Byte 3 */
1341 DAC960_V2_LogicalDeviceState_T LogicalDeviceState; /* Byte 4 */
1342 unsigned char RAIDLevel; /* Byte 5 */
1343 unsigned char StripeSize; /* Byte 6 */
1344 unsigned char CacheLineSize; /* Byte 7 */
1345 struct {
1346 enum {
1347 DAC960_V2_ReadCacheDisabled = 0x0,
1348 DAC960_V2_ReadCacheEnabled = 0x1,
1349 DAC960_V2_ReadAheadEnabled = 0x2,
1350 DAC960_V2_IntelligentReadAheadEnabled = 0x3,
1351 DAC960_V2_ReadCache_Last = 0x7
1352 } __attribute__ ((packed)) ReadCache:3; /* Byte 8 Bits 0-2 */
1353 enum {
1354 DAC960_V2_WriteCacheDisabled = 0x0,
1355 DAC960_V2_LogicalDeviceReadOnly = 0x1,
1356 DAC960_V2_WriteCacheEnabled = 0x2,
1357 DAC960_V2_IntelligentWriteCacheEnabled = 0x3,
1358 DAC960_V2_WriteCache_Last = 0x7
1359 } __attribute__ ((packed)) WriteCache:3; /* Byte 8 Bits 3-5 */
1360 boolean :1; /* Byte 8 Bit 6 */
1361 boolean LogicalDeviceInitialized:1; /* Byte 8 Bit 7 */
1362 } LogicalDeviceControl; /* Byte 8 */
1363 /* Logical Device Operations Status */
1364 boolean ConsistencyCheckInProgress:1; /* Byte 9 Bit 0 */
1365 boolean RebuildInProgress:1; /* Byte 9 Bit 1 */
1366 boolean BackgroundInitializationInProgress:1; /* Byte 9 Bit 2 */
1367 boolean ForegroundInitializationInProgress:1; /* Byte 9 Bit 3 */
1368 boolean DataMigrationInProgress:1; /* Byte 9 Bit 4 */
1369 boolean PatrolOperationInProgress:1; /* Byte 9 Bit 5 */
1370 unsigned char :2; /* Byte 9 Bits 6-7 */
1371 unsigned char RAID5WriteUpdate; /* Byte 10 */
1372 unsigned char RAID5Algorithm; /* Byte 11 */
1373 unsigned short LogicalDeviceNumber; /* Bytes 12-13 */
1374 /* BIOS Info */
1375 boolean BIOSDisabled:1; /* Byte 14 Bit 0 */
1376 boolean CDROMBootEnabled:1; /* Byte 14 Bit 1 */
1377 boolean DriveCoercionEnabled:1; /* Byte 14 Bit 2 */
1378 boolean WriteSameDisabled:1; /* Byte 14 Bit 3 */
1379 boolean HBA_ModeEnabled:1; /* Byte 14 Bit 4 */
1380 enum {
1381 DAC960_V2_Geometry_128_32 = 0x0,
1382 DAC960_V2_Geometry_255_63 = 0x1,
1383 DAC960_V2_Geometry_Reserved1 = 0x2,
1384 DAC960_V2_Geometry_Reserved2 = 0x3
1385 } __attribute__ ((packed)) DriveGeometry:2; /* Byte 14 Bits 5-6 */
1386 boolean SuperReadAheadEnabled:1; /* Byte 14 Bit 7 */
1387 unsigned char :8; /* Byte 15 */
1388 /* Error Counters */
1389 unsigned short SoftErrors; /* Bytes 16-17 */
1390 unsigned short CommandsFailed; /* Bytes 18-19 */
1391 unsigned short HostCommandAbortsDone; /* Bytes 20-21 */
1392 unsigned short DeferredWriteErrors; /* Bytes 22-23 */
1393 unsigned int :32; /* Bytes 24-27 */
1394 unsigned int :32; /* Bytes 28-31 */
1395 /* Device Size Information */
1396 unsigned short :16; /* Bytes 32-33 */
1397 unsigned short DeviceBlockSizeInBytes; /* Bytes 34-35 */
1398 unsigned int OriginalDeviceSize; /* Bytes 36-39 */
1399 unsigned int ConfigurableDeviceSize; /* Bytes 40-43 */
1400 unsigned int :32; /* Bytes 44-47 */
1401 unsigned char LogicalDeviceName[32]; /* Bytes 48-79 */
1402 unsigned char SCSI_InquiryData[36]; /* Bytes 80-115 */
1403 unsigned char Reserved1[12]; /* Bytes 116-127 */
1404 DAC960_ByteCount64_T LastReadBlockNumber; /* Bytes 128-135 */
1405 DAC960_ByteCount64_T LastWrittenBlockNumber; /* Bytes 136-143 */
1406 DAC960_ByteCount64_T ConsistencyCheckBlockNumber; /* Bytes 144-151 */
1407 DAC960_ByteCount64_T RebuildBlockNumber; /* Bytes 152-159 */
1408 DAC960_ByteCount64_T BackgroundInitializationBlockNumber; /* Bytes 160-167 */
1409 DAC960_ByteCount64_T ForegroundInitializationBlockNumber; /* Bytes 168-175 */
1410 DAC960_ByteCount64_T DataMigrationBlockNumber; /* Bytes 176-183 */
1411 DAC960_ByteCount64_T PatrolOperationBlockNumber; /* Bytes 184-191 */
1412 unsigned char Reserved2[64]; /* Bytes 192-255 */
1413}
1414DAC960_V2_LogicalDeviceInfo_T;
1415
1416
1417/*
1418 Define the DAC960 V2 Firmware Physical Device State type.
1419*/
1420
1421typedef enum
1422{
1423 DAC960_V2_Device_Unconfigured = 0x00,
1424 DAC960_V2_Device_Online = 0x01,
1425 DAC960_V2_Device_Rebuild = 0x03,
1426 DAC960_V2_Device_Missing = 0x04,
1427 DAC960_V2_Device_Critical = 0x05,
1428 DAC960_V2_Device_Dead = 0x08,
1429 DAC960_V2_Device_SuspectedDead = 0x0C,
1430 DAC960_V2_Device_CommandedOffline = 0x10,
1431 DAC960_V2_Device_Standby = 0x21,
1432 DAC960_V2_Device_InvalidState = 0xFF
1433}
1434__attribute__ ((packed))
1435DAC960_V2_PhysicalDeviceState_T;
1436
1437
1438/*
1439 Define the DAC960 V2 Firmware Get Physical Device Info reply structure.
1440*/
1441
1442typedef struct DAC960_V2_PhysicalDeviceInfo
1443{
1444 unsigned char :8; /* Byte 0 */
1445 unsigned char Channel; /* Byte 1 */
1446 unsigned char TargetID; /* Byte 2 */
1447 unsigned char LogicalUnit; /* Byte 3 */
1448 /* Configuration Status Bits */
1449 boolean PhysicalDeviceFaultTolerant:1; /* Byte 4 Bit 0 */
1450 boolean PhysicalDeviceConnected:1; /* Byte 4 Bit 1 */
1451 boolean PhysicalDeviceLocalToController:1; /* Byte 4 Bit 2 */
1452 unsigned char :5; /* Byte 4 Bits 3-7 */
1453 /* Multiple Host/Controller Status Bits */
1454 boolean RemoteHostSystemDead:1; /* Byte 5 Bit 0 */
1455 boolean RemoteControllerDead:1; /* Byte 5 Bit 1 */
1456 unsigned char :6; /* Byte 5 Bits 2-7 */
1457 DAC960_V2_PhysicalDeviceState_T PhysicalDeviceState; /* Byte 6 */
1458 unsigned char NegotiatedDataWidthBits; /* Byte 7 */
1459 unsigned short NegotiatedSynchronousMegaTransfers; /* Bytes 8-9 */
1460 /* Multiported Physical Device Information */
1461 unsigned char NumberOfPortConnections; /* Byte 10 */
1462 unsigned char DriveAccessibilityBitmap; /* Byte 11 */
1463 unsigned int :32; /* Bytes 12-15 */
1464 unsigned char NetworkAddress[16]; /* Bytes 16-31 */
1465 unsigned short MaximumTags; /* Bytes 32-33 */
1466 /* Physical Device Operations Status */
1467 boolean ConsistencyCheckInProgress:1; /* Byte 34 Bit 0 */
1468 boolean RebuildInProgress:1; /* Byte 34 Bit 1 */
1469 boolean MakingDataConsistentInProgress:1; /* Byte 34 Bit 2 */
1470 boolean PhysicalDeviceInitializationInProgress:1; /* Byte 34 Bit 3 */
1471 boolean DataMigrationInProgress:1; /* Byte 34 Bit 4 */
1472 boolean PatrolOperationInProgress:1; /* Byte 34 Bit 5 */
1473 unsigned char :2; /* Byte 34 Bits 6-7 */
1474 unsigned char LongOperationStatus; /* Byte 35 */
1475 unsigned char ParityErrors; /* Byte 36 */
1476 unsigned char SoftErrors; /* Byte 37 */
1477 unsigned char HardErrors; /* Byte 38 */
1478 unsigned char MiscellaneousErrors; /* Byte 39 */
1479 unsigned char CommandTimeouts; /* Byte 40 */
1480 unsigned char Retries; /* Byte 41 */
1481 unsigned char Aborts; /* Byte 42 */
1482 unsigned char PredictedFailuresDetected; /* Byte 43 */
1483 unsigned int :32; /* Bytes 44-47 */
1484 unsigned short :16; /* Bytes 48-49 */
1485 unsigned short DeviceBlockSizeInBytes; /* Bytes 50-51 */
1486 unsigned int OriginalDeviceSize; /* Bytes 52-55 */
1487 unsigned int ConfigurableDeviceSize; /* Bytes 56-59 */
1488 unsigned int :32; /* Bytes 60-63 */
1489 unsigned char PhysicalDeviceName[16]; /* Bytes 64-79 */
1490 unsigned char Reserved1[16]; /* Bytes 80-95 */
1491 unsigned char Reserved2[32]; /* Bytes 96-127 */
1492 unsigned char SCSI_InquiryData[36]; /* Bytes 128-163 */
1493 unsigned char Reserved3[20]; /* Bytes 164-183 */
1494 unsigned char Reserved4[8]; /* Bytes 184-191 */
1495 DAC960_ByteCount64_T LastReadBlockNumber; /* Bytes 192-199 */
1496 DAC960_ByteCount64_T LastWrittenBlockNumber; /* Bytes 200-207 */
1497 DAC960_ByteCount64_T ConsistencyCheckBlockNumber; /* Bytes 208-215 */
1498 DAC960_ByteCount64_T RebuildBlockNumber; /* Bytes 216-223 */
1499 DAC960_ByteCount64_T MakingDataConsistentBlockNumber; /* Bytes 224-231 */
1500 DAC960_ByteCount64_T DeviceInitializationBlockNumber; /* Bytes 232-239 */
1501 DAC960_ByteCount64_T DataMigrationBlockNumber; /* Bytes 240-247 */
1502 DAC960_ByteCount64_T PatrolOperationBlockNumber; /* Bytes 248-255 */
1503 unsigned char Reserved5[256]; /* Bytes 256-511 */
1504}
1505DAC960_V2_PhysicalDeviceInfo_T;
1506
1507
1508/*
1509 Define the DAC960 V2 Firmware Health Status Buffer structure.
1510*/
1511
1512typedef struct DAC960_V2_HealthStatusBuffer
1513{
1514 unsigned int MicrosecondsFromControllerStartTime; /* Bytes 0-3 */
1515 unsigned int MillisecondsFromControllerStartTime; /* Bytes 4-7 */
1516 unsigned int SecondsFrom1January1970; /* Bytes 8-11 */
1517 unsigned int :32; /* Bytes 12-15 */
1518 unsigned int StatusChangeCounter; /* Bytes 16-19 */
1519 unsigned int :32; /* Bytes 20-23 */
1520 unsigned int DebugOutputMessageBufferIndex; /* Bytes 24-27 */
1521 unsigned int CodedMessageBufferIndex; /* Bytes 28-31 */
1522 unsigned int CurrentTimeTracePageNumber; /* Bytes 32-35 */
1523 unsigned int CurrentProfilerPageNumber; /* Bytes 36-39 */
1524 unsigned int NextEventSequenceNumber; /* Bytes 40-43 */
1525 unsigned int :32; /* Bytes 44-47 */
1526 unsigned char Reserved1[16]; /* Bytes 48-63 */
1527 unsigned char Reserved2[64]; /* Bytes 64-127 */
1528}
1529DAC960_V2_HealthStatusBuffer_T;
1530
1531
1532/*
1533 Define the DAC960 V2 Firmware Get Event reply structure.
1534*/
1535
1536typedef struct DAC960_V2_Event
1537{
1538 unsigned int EventSequenceNumber; /* Bytes 0-3 */
1539 unsigned int EventTime; /* Bytes 4-7 */
1540 unsigned int EventCode; /* Bytes 8-11 */
1541 unsigned char :8; /* Byte 12 */
1542 unsigned char Channel; /* Byte 13 */
1543 unsigned char TargetID; /* Byte 14 */
1544 unsigned char LogicalUnit; /* Byte 15 */
1545 unsigned int :32; /* Bytes 16-19 */
1546 unsigned int EventSpecificParameter; /* Bytes 20-23 */
1547 unsigned char RequestSenseData[40]; /* Bytes 24-63 */
1548}
1549DAC960_V2_Event_T;
1550
1551
1552/*
1553 Define the DAC960 V2 Firmware Command Control Bits structure.
1554*/
1555
1556typedef struct DAC960_V2_CommandControlBits
1557{
1558 boolean ForceUnitAccess:1; /* Byte 0 Bit 0 */
1559 boolean DisablePageOut:1; /* Byte 0 Bit 1 */
1560 boolean :1; /* Byte 0 Bit 2 */
1561 boolean AdditionalScatterGatherListMemory:1; /* Byte 0 Bit 3 */
1562 boolean DataTransferControllerToHost:1; /* Byte 0 Bit 4 */
1563 boolean :1; /* Byte 0 Bit 5 */
1564 boolean NoAutoRequestSense:1; /* Byte 0 Bit 6 */
1565 boolean DisconnectProhibited:1; /* Byte 0 Bit 7 */
1566}
1567DAC960_V2_CommandControlBits_T;
1568
1569
1570/*
1571 Define the DAC960 V2 Firmware Command Timeout structure.
1572*/
1573
1574typedef struct DAC960_V2_CommandTimeout
1575{
1576 unsigned char TimeoutValue:6; /* Byte 0 Bits 0-5 */
1577 enum {
1578 DAC960_V2_TimeoutScale_Seconds = 0,
1579 DAC960_V2_TimeoutScale_Minutes = 1,
1580 DAC960_V2_TimeoutScale_Hours = 2,
1581 DAC960_V2_TimeoutScale_Reserved = 3
1582 } __attribute__ ((packed)) TimeoutScale:2; /* Byte 0 Bits 6-7 */
1583}
1584DAC960_V2_CommandTimeout_T;
1585
1586
1587/*
1588 Define the DAC960 V2 Firmware Physical Device structure.
1589*/
1590
1591typedef struct DAC960_V2_PhysicalDevice
1592{
1593 unsigned char LogicalUnit; /* Byte 0 */
1594 unsigned char TargetID; /* Byte 1 */
1595 unsigned char Channel:3; /* Byte 2 Bits 0-2 */
1596 unsigned char Controller:5; /* Byte 2 Bits 3-7 */
1597}
1598__attribute__ ((packed))
1599DAC960_V2_PhysicalDevice_T;
1600
1601
1602/*
1603 Define the DAC960 V2 Firmware Logical Device structure.
1604*/
1605
1606typedef struct DAC960_V2_LogicalDevice
1607{
1608 unsigned short LogicalDeviceNumber; /* Bytes 0-1 */
1609 unsigned char :3; /* Byte 2 Bits 0-2 */
1610 unsigned char Controller:5; /* Byte 2 Bits 3-7 */
1611}
1612__attribute__ ((packed))
1613DAC960_V2_LogicalDevice_T;
1614
1615
1616/*
1617 Define the DAC960 V2 Firmware Operation Device type.
1618*/
1619
1620typedef enum
1621{
1622 DAC960_V2_Physical_Device = 0x00,
1623 DAC960_V2_RAID_Device = 0x01,
1624 DAC960_V2_Physical_Channel = 0x02,
1625 DAC960_V2_RAID_Channel = 0x03,
1626 DAC960_V2_Physical_Controller = 0x04,
1627 DAC960_V2_RAID_Controller = 0x05,
1628 DAC960_V2_Configuration_Group = 0x10,
1629 DAC960_V2_Enclosure = 0x11
1630}
1631__attribute__ ((packed))
1632DAC960_V2_OperationDevice_T;
1633
1634
1635/*
1636 Define the DAC960 V2 Firmware Translate Physical To Logical Device structure.
1637*/
1638
1639typedef struct DAC960_V2_PhysicalToLogicalDevice
1640{
1641 unsigned short LogicalDeviceNumber; /* Bytes 0-1 */
1642 unsigned short :16; /* Bytes 2-3 */
1643 unsigned char PreviousBootController; /* Byte 4 */
1644 unsigned char PreviousBootChannel; /* Byte 5 */
1645 unsigned char PreviousBootTargetID; /* Byte 6 */
1646 unsigned char PreviousBootLogicalUnit; /* Byte 7 */
1647}
1648DAC960_V2_PhysicalToLogicalDevice_T;
1649
1650
1651
1652/*
1653 Define the DAC960 V2 Firmware Scatter/Gather List Entry structure.
1654*/
1655
1656typedef struct DAC960_V2_ScatterGatherSegment
1657{
1658 DAC960_BusAddress64_T SegmentDataPointer; /* Bytes 0-7 */
1659 DAC960_ByteCount64_T SegmentByteCount; /* Bytes 8-15 */
1660}
1661DAC960_V2_ScatterGatherSegment_T;
1662
1663
1664/*
1665 Define the DAC960 V2 Firmware Data Transfer Memory Address structure.
1666*/
1667
1668typedef union DAC960_V2_DataTransferMemoryAddress
1669{
1670 DAC960_V2_ScatterGatherSegment_T ScatterGatherSegments[2]; /* Bytes 0-31 */
1671 struct {
1672 unsigned short ScatterGatherList0Length; /* Bytes 0-1 */
1673 unsigned short ScatterGatherList1Length; /* Bytes 2-3 */
1674 unsigned short ScatterGatherList2Length; /* Bytes 4-5 */
1675 unsigned short :16; /* Bytes 6-7 */
1676 DAC960_BusAddress64_T ScatterGatherList0Address; /* Bytes 8-15 */
1677 DAC960_BusAddress64_T ScatterGatherList1Address; /* Bytes 16-23 */
1678 DAC960_BusAddress64_T ScatterGatherList2Address; /* Bytes 24-31 */
1679 } ExtendedScatterGather;
1680}
1681DAC960_V2_DataTransferMemoryAddress_T;
1682
1683
1684/*
1685 Define the 64 Byte DAC960 V2 Firmware Command Mailbox structure.
1686*/
1687
1688typedef union DAC960_V2_CommandMailbox
1689{
1690 unsigned int Words[16]; /* Words 0-15 */
1691 struct {
1692 DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
1693 DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
1694 DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
1695 DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */
1696 unsigned char DataTransferPageNumber; /* Byte 7 */
1697 DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
1698 unsigned int :24; /* Bytes 16-18 */
1699 DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
1700 unsigned char RequestSenseSize; /* Byte 20 */
1701 unsigned char IOCTL_Opcode; /* Byte 21 */
1702 unsigned char Reserved[10]; /* Bytes 22-31 */
1703 DAC960_V2_DataTransferMemoryAddress_T
1704 DataTransferMemoryAddress; /* Bytes 32-63 */
1705 } Common;
1706 struct {
1707 DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
1708 DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
1709 DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
1710 DAC960_ByteCount32_T DataTransferSize; /* Bytes 4-7 */
1711 DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
1712 DAC960_V2_PhysicalDevice_T PhysicalDevice; /* Bytes 16-18 */
1713 DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
1714 unsigned char RequestSenseSize; /* Byte 20 */
1715 unsigned char CDBLength; /* Byte 21 */
1716 unsigned char SCSI_CDB[10]; /* Bytes 22-31 */
1717 DAC960_V2_DataTransferMemoryAddress_T
1718 DataTransferMemoryAddress; /* Bytes 32-63 */
1719 } SCSI_10;
1720 struct {
1721 DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
1722 DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
1723 DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
1724 DAC960_ByteCount32_T DataTransferSize; /* Bytes 4-7 */
1725 DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
1726 DAC960_V2_PhysicalDevice_T PhysicalDevice; /* Bytes 16-18 */
1727 DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
1728 unsigned char RequestSenseSize; /* Byte 20 */
1729 unsigned char CDBLength; /* Byte 21 */
1730 unsigned short :16; /* Bytes 22-23 */
1731 DAC960_BusAddress64_T SCSI_CDB_BusAddress; /* Bytes 24-31 */
1732 DAC960_V2_DataTransferMemoryAddress_T
1733 DataTransferMemoryAddress; /* Bytes 32-63 */
1734 } SCSI_255;
1735 struct {
1736 DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
1737 DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
1738 DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
1739 DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */
1740 unsigned char DataTransferPageNumber; /* Byte 7 */
1741 DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
1742 unsigned short :16; /* Bytes 16-17 */
1743 unsigned char ControllerNumber; /* Byte 18 */
1744 DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
1745 unsigned char RequestSenseSize; /* Byte 20 */
1746 unsigned char IOCTL_Opcode; /* Byte 21 */
1747 unsigned char Reserved[10]; /* Bytes 22-31 */
1748 DAC960_V2_DataTransferMemoryAddress_T
1749 DataTransferMemoryAddress; /* Bytes 32-63 */
1750 } ControllerInfo;
1751 struct {
1752 DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
1753 DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
1754 DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
1755 DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */
1756 unsigned char DataTransferPageNumber; /* Byte 7 */
1757 DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
1758 DAC960_V2_LogicalDevice_T LogicalDevice; /* Bytes 16-18 */
1759 DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
1760 unsigned char RequestSenseSize; /* Byte 20 */
1761 unsigned char IOCTL_Opcode; /* Byte 21 */
1762 unsigned char Reserved[10]; /* Bytes 22-31 */
1763 DAC960_V2_DataTransferMemoryAddress_T
1764 DataTransferMemoryAddress; /* Bytes 32-63 */
1765 } LogicalDeviceInfo;
1766 struct {
1767 DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
1768 DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
1769 DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
1770 DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */
1771 unsigned char DataTransferPageNumber; /* Byte 7 */
1772 DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
1773 DAC960_V2_PhysicalDevice_T PhysicalDevice; /* Bytes 16-18 */
1774 DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
1775 unsigned char RequestSenseSize; /* Byte 20 */
1776 unsigned char IOCTL_Opcode; /* Byte 21 */
1777 unsigned char Reserved[10]; /* Bytes 22-31 */
1778 DAC960_V2_DataTransferMemoryAddress_T
1779 DataTransferMemoryAddress; /* Bytes 32-63 */
1780 } PhysicalDeviceInfo;
1781 struct {
1782 DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
1783 DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
1784 DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
1785 DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */
1786 unsigned char DataTransferPageNumber; /* Byte 7 */
1787 DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
1788 unsigned short EventSequenceNumberHigh16; /* Bytes 16-17 */
1789 unsigned char ControllerNumber; /* Byte 18 */
1790 DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
1791 unsigned char RequestSenseSize; /* Byte 20 */
1792 unsigned char IOCTL_Opcode; /* Byte 21 */
1793 unsigned short EventSequenceNumberLow16; /* Bytes 22-23 */
1794 unsigned char Reserved[8]; /* Bytes 24-31 */
1795 DAC960_V2_DataTransferMemoryAddress_T
1796 DataTransferMemoryAddress; /* Bytes 32-63 */
1797 } GetEvent;
1798 struct {
1799 DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
1800 DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
1801 DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
1802 DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */
1803 unsigned char DataTransferPageNumber; /* Byte 7 */
1804 DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
1805 DAC960_V2_LogicalDevice_T LogicalDevice; /* Bytes 16-18 */
1806 DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
1807 unsigned char RequestSenseSize; /* Byte 20 */
1808 unsigned char IOCTL_Opcode; /* Byte 21 */
1809 union {
1810 DAC960_V2_LogicalDeviceState_T LogicalDeviceState;
1811 DAC960_V2_PhysicalDeviceState_T PhysicalDeviceState;
1812 } DeviceState; /* Byte 22 */
1813 unsigned char Reserved[9]; /* Bytes 23-31 */
1814 DAC960_V2_DataTransferMemoryAddress_T
1815 DataTransferMemoryAddress; /* Bytes 32-63 */
1816 } SetDeviceState;
1817 struct {
1818 DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
1819 DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
1820 DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
1821 DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */
1822 unsigned char DataTransferPageNumber; /* Byte 7 */
1823 DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
1824 DAC960_V2_LogicalDevice_T LogicalDevice; /* Bytes 16-18 */
1825 DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
1826 unsigned char RequestSenseSize; /* Byte 20 */
1827 unsigned char IOCTL_Opcode; /* Byte 21 */
1828 boolean RestoreConsistency:1; /* Byte 22 Bit 0 */
1829 boolean InitializedAreaOnly:1; /* Byte 22 Bit 1 */
1830 unsigned char :6; /* Byte 22 Bits 2-7 */
1831 unsigned char Reserved[9]; /* Bytes 23-31 */
1832 DAC960_V2_DataTransferMemoryAddress_T
1833 DataTransferMemoryAddress; /* Bytes 32-63 */
1834 } ConsistencyCheck;
1835 struct {
1836 DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
1837 DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
1838 DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
1839 unsigned char FirstCommandMailboxSizeKB; /* Byte 4 */
1840 unsigned char FirstStatusMailboxSizeKB; /* Byte 5 */
1841 unsigned char SecondCommandMailboxSizeKB; /* Byte 6 */
1842 unsigned char SecondStatusMailboxSizeKB; /* Byte 7 */
1843 DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
1844 unsigned int :24; /* Bytes 16-18 */
1845 DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
1846 unsigned char RequestSenseSize; /* Byte 20 */
1847 unsigned char IOCTL_Opcode; /* Byte 21 */
1848 unsigned char HealthStatusBufferSizeKB; /* Byte 22 */
1849 unsigned char :8; /* Byte 23 */
1850 DAC960_BusAddress64_T HealthStatusBufferBusAddress; /* Bytes 24-31 */
1851 DAC960_BusAddress64_T FirstCommandMailboxBusAddress; /* Bytes 32-39 */
1852 DAC960_BusAddress64_T FirstStatusMailboxBusAddress; /* Bytes 40-47 */
1853 DAC960_BusAddress64_T SecondCommandMailboxBusAddress; /* Bytes 48-55 */
1854 DAC960_BusAddress64_T SecondStatusMailboxBusAddress; /* Bytes 56-63 */
1855 } SetMemoryMailbox;
1856 struct {
1857 DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
1858 DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
1859 DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
1860 DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */
1861 unsigned char DataTransferPageNumber; /* Byte 7 */
1862 DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
1863 DAC960_V2_PhysicalDevice_T PhysicalDevice; /* Bytes 16-18 */
1864 DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
1865 unsigned char RequestSenseSize; /* Byte 20 */
1866 unsigned char IOCTL_Opcode; /* Byte 21 */
1867 DAC960_V2_OperationDevice_T OperationDevice; /* Byte 22 */
1868 unsigned char Reserved[9]; /* Bytes 23-31 */
1869 DAC960_V2_DataTransferMemoryAddress_T
1870 DataTransferMemoryAddress; /* Bytes 32-63 */
1871 } DeviceOperation;
1872}
1873DAC960_V2_CommandMailbox_T;
1874
1875
1876/*
1877 Define the DAC960 Driver IOCTL requests.
1878*/
1879
1880#define DAC960_IOCTL_GET_CONTROLLER_COUNT 0xDAC001
1881#define DAC960_IOCTL_GET_CONTROLLER_INFO 0xDAC002
1882#define DAC960_IOCTL_V1_EXECUTE_COMMAND 0xDAC003
1883#define DAC960_IOCTL_V2_EXECUTE_COMMAND 0xDAC004
1884#define DAC960_IOCTL_V2_GET_HEALTH_STATUS 0xDAC005
1885
1886
1887/*
1888 Define the DAC960_IOCTL_GET_CONTROLLER_INFO reply structure.
1889*/
1890
1891typedef struct DAC960_ControllerInfo
1892{
1893 unsigned char ControllerNumber;
1894 unsigned char FirmwareType;
1895 unsigned char Channels;
1896 unsigned char Targets;
1897 unsigned char PCI_Bus;
1898 unsigned char PCI_Device;
1899 unsigned char PCI_Function;
1900 unsigned char IRQ_Channel;
1901 DAC960_PCI_Address_T PCI_Address;
1902 unsigned char ModelName[20];
1903 unsigned char FirmwareVersion[12];
1904}
1905DAC960_ControllerInfo_T;
1906
1907
1908/*
1909 Define the User Mode DAC960_IOCTL_V1_EXECUTE_COMMAND request structure.
1910*/
1911
1912typedef struct DAC960_V1_UserCommand
1913{
1914 unsigned char ControllerNumber;
1915 DAC960_V1_CommandMailbox_T CommandMailbox;
1916 int DataTransferLength;
1917 void __user *DataTransferBuffer;
1918 DAC960_V1_DCDB_T __user *DCDB;
1919}
1920DAC960_V1_UserCommand_T;
1921
1922
1923/*
1924 Define the Kernel Mode DAC960_IOCTL_V1_EXECUTE_COMMAND request structure.
1925*/
1926
1927typedef struct DAC960_V1_KernelCommand
1928{
1929 unsigned char ControllerNumber;
1930 DAC960_V1_CommandMailbox_T CommandMailbox;
1931 int DataTransferLength;
1932 void *DataTransferBuffer;
1933 DAC960_V1_DCDB_T *DCDB;
1934 DAC960_V1_CommandStatus_T CommandStatus;
1935 void (*CompletionFunction)(struct DAC960_V1_KernelCommand *);
1936 void *CompletionData;
1937}
1938DAC960_V1_KernelCommand_T;
1939
1940
1941/*
1942 Define the User Mode DAC960_IOCTL_V2_EXECUTE_COMMAND request structure.
1943*/
1944
1945typedef struct DAC960_V2_UserCommand
1946{
1947 unsigned char ControllerNumber;
1948 DAC960_V2_CommandMailbox_T CommandMailbox;
1949 int DataTransferLength;
1950 int RequestSenseLength;
1951 void __user *DataTransferBuffer;
1952 void __user *RequestSenseBuffer;
1953}
1954DAC960_V2_UserCommand_T;
1955
1956
1957/*
1958 Define the Kernel Mode DAC960_IOCTL_V2_EXECUTE_COMMAND request structure.
1959*/
1960
1961typedef struct DAC960_V2_KernelCommand
1962{
1963 unsigned char ControllerNumber;
1964 DAC960_V2_CommandMailbox_T CommandMailbox;
1965 int DataTransferLength;
1966 int RequestSenseLength;
1967 void *DataTransferBuffer;
1968 void *RequestSenseBuffer;
1969 DAC960_V2_CommandStatus_T CommandStatus;
1970 void (*CompletionFunction)(struct DAC960_V2_KernelCommand *);
1971 void *CompletionData;
1972}
1973DAC960_V2_KernelCommand_T;
1974
1975
1976/*
1977 Define the User Mode DAC960_IOCTL_V2_GET_HEALTH_STATUS request structure.
1978*/
1979
1980typedef struct DAC960_V2_GetHealthStatus
1981{
1982 unsigned char ControllerNumber;
1983 DAC960_V2_HealthStatusBuffer_T __user *HealthStatusBuffer;
1984}
1985DAC960_V2_GetHealthStatus_T;
1986
1987
1988/*
1989 Import the Kernel Mode IOCTL interface.
1990*/
1991
1992extern int DAC960_KernelIOCTL(unsigned int Request, void *Argument);
1993
1994
1995/*
1996 DAC960_DriverVersion protects the private portion of this file.
1997*/
1998
1999#ifdef DAC960_DriverVersion
2000
2001
2002/*
2003 Define the maximum Driver Queue Depth and Controller Queue Depth supported
2004 by DAC960 V1 and V2 Firmware Controllers.
2005*/
2006
2007#define DAC960_MaxDriverQueueDepth 511
2008#define DAC960_MaxControllerQueueDepth 512
2009
2010
2011/*
2012 Define the maximum number of Scatter/Gather Segments supported for any
2013 DAC960 V1 and V2 Firmware controller.
2014*/
2015
2016#define DAC960_V1_ScatterGatherLimit 33
2017#define DAC960_V2_ScatterGatherLimit 128
2018
2019
2020/*
2021 Define the number of Command Mailboxes and Status Mailboxes used by the
2022 DAC960 V1 and V2 Firmware Memory Mailbox Interface.
2023*/
2024
2025#define DAC960_V1_CommandMailboxCount 256
2026#define DAC960_V1_StatusMailboxCount 1024
2027#define DAC960_V2_CommandMailboxCount 512
2028#define DAC960_V2_StatusMailboxCount 512
2029
2030
2031/*
2032 Define the DAC960 Controller Monitoring Timer Interval.
2033*/
2034
2035#define DAC960_MonitoringTimerInterval (10 * HZ)
2036
2037
2038/*
2039 Define the DAC960 Controller Secondary Monitoring Interval.
2040*/
2041
2042#define DAC960_SecondaryMonitoringInterval (60 * HZ)
2043
2044
2045/*
2046 Define the DAC960 Controller Health Status Monitoring Interval.
2047*/
2048
2049#define DAC960_HealthStatusMonitoringInterval (1 * HZ)
2050
2051
2052/*
2053 Define the DAC960 Controller Progress Reporting Interval.
2054*/
2055
2056#define DAC960_ProgressReportingInterval (60 * HZ)
2057
2058
2059/*
2060 Define the maximum number of Partitions allowed for each Logical Drive.
2061*/
2062
2063#define DAC960_MaxPartitions 8
2064#define DAC960_MaxPartitionsBits 3
2065
2066/*
2067 Define the DAC960 Controller fixed Block Size and Block Size Bits.
2068*/
2069
2070#define DAC960_BlockSize 512
2071#define DAC960_BlockSizeBits 9
2072
2073
2074/*
2075 Define the number of Command structures that should be allocated as a
2076 group to optimize kernel memory allocation.
2077*/
2078
2079#define DAC960_V1_CommandAllocationGroupSize 11
2080#define DAC960_V2_CommandAllocationGroupSize 29
2081
2082
2083/*
2084 Define the Controller Line Buffer, Progress Buffer, User Message, and
2085 Initial Status Buffer sizes.
2086*/
2087
2088#define DAC960_LineBufferSize 100
2089#define DAC960_ProgressBufferSize 200
2090#define DAC960_UserMessageSize 200
2091#define DAC960_InitialStatusBufferSize (8192-32)
2092
2093
2094/*
2095 Define the DAC960 Controller Firmware Types.
2096*/
2097
2098typedef enum
2099{
2100 DAC960_V1_Controller = 1,
2101 DAC960_V2_Controller = 2
2102}
2103DAC960_FirmwareType_T;
2104
2105
2106/*
2107 Define the DAC960 Controller Hardware Types.
2108*/
2109
2110typedef enum
2111{
2112 DAC960_BA_Controller = 1, /* eXtremeRAID 2000 */
2113 DAC960_LP_Controller = 2, /* AcceleRAID 352 */
2114 DAC960_LA_Controller = 3, /* DAC1164P */
2115 DAC960_PG_Controller = 4, /* DAC960PTL/PJ/PG */
2116 DAC960_PD_Controller = 5, /* DAC960PU/PD/PL/P */
2117 DAC960_P_Controller = 6 /* DAC960PU/PD/PL/P */
2118}
2119DAC960_HardwareType_T;
2120
2121
2122/*
2123 Define the Driver Message Levels.
2124*/
2125
2126typedef enum DAC960_MessageLevel
2127{
2128 DAC960_AnnounceLevel = 0,
2129 DAC960_InfoLevel = 1,
2130 DAC960_NoticeLevel = 2,
2131 DAC960_WarningLevel = 3,
2132 DAC960_ErrorLevel = 4,
2133 DAC960_ProgressLevel = 5,
2134 DAC960_CriticalLevel = 6,
2135 DAC960_UserCriticalLevel = 7
2136}
2137DAC960_MessageLevel_T;
2138
2139static char
2140 *DAC960_MessageLevelMap[] =
2141 { KERN_NOTICE, KERN_NOTICE, KERN_NOTICE, KERN_WARNING,
2142 KERN_ERR, KERN_CRIT, KERN_CRIT, KERN_CRIT };
2143
2144
2145/*
2146 Define Driver Message macros.
2147*/
2148
2149#define DAC960_Announce(Format, Arguments...) \
2150 DAC960_Message(DAC960_AnnounceLevel, Format, ##Arguments)
2151
2152#define DAC960_Info(Format, Arguments...) \
2153 DAC960_Message(DAC960_InfoLevel, Format, ##Arguments)
2154
2155#define DAC960_Notice(Format, Arguments...) \
2156 DAC960_Message(DAC960_NoticeLevel, Format, ##Arguments)
2157
2158#define DAC960_Warning(Format, Arguments...) \
2159 DAC960_Message(DAC960_WarningLevel, Format, ##Arguments)
2160
2161#define DAC960_Error(Format, Arguments...) \
2162 DAC960_Message(DAC960_ErrorLevel, Format, ##Arguments)
2163
2164#define DAC960_Progress(Format, Arguments...) \
2165 DAC960_Message(DAC960_ProgressLevel, Format, ##Arguments)
2166
2167#define DAC960_Critical(Format, Arguments...) \
2168 DAC960_Message(DAC960_CriticalLevel, Format, ##Arguments)
2169
2170#define DAC960_UserCritical(Format, Arguments...) \
2171 DAC960_Message(DAC960_UserCriticalLevel, Format, ##Arguments)
2172
2173
2174struct DAC960_privdata {
2175 DAC960_HardwareType_T HardwareType;
2176 DAC960_FirmwareType_T FirmwareType;
2177 irqreturn_t (*InterruptHandler)(int, void *, struct pt_regs *);
2178 unsigned int MemoryWindowSize;
2179};
2180
2181
2182/*
2183 Define the DAC960 V1 Firmware Controller Status Mailbox structure.
2184*/
2185
2186typedef union DAC960_V1_StatusMailbox
2187{
2188 unsigned int Word; /* Word 0 */
2189 struct {
2190 DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 0 */
2191 unsigned char :7; /* Byte 1 Bits 0-6 */
2192 boolean Valid:1; /* Byte 1 Bit 7 */
2193 DAC960_V1_CommandStatus_T CommandStatus; /* Bytes 2-3 */
2194 } Fields;
2195}
2196DAC960_V1_StatusMailbox_T;
2197
2198
2199/*
2200 Define the DAC960 V2 Firmware Controller Status Mailbox structure.
2201*/
2202
2203typedef union DAC960_V2_StatusMailbox
2204{
2205 unsigned int Words[2]; /* Words 0-1 */
2206 struct {
2207 DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
2208 DAC960_V2_CommandStatus_T CommandStatus; /* Byte 2 */
2209 unsigned char RequestSenseLength; /* Byte 3 */
2210 int DataTransferResidue; /* Bytes 4-7 */
2211 } Fields;
2212}
2213DAC960_V2_StatusMailbox_T;
2214
2215
2216/*
2217 Define the DAC960 Driver Command Types.
2218*/
2219
2220typedef enum
2221{
2222 DAC960_ReadCommand = 1,
2223 DAC960_WriteCommand = 2,
2224 DAC960_ReadRetryCommand = 3,
2225 DAC960_WriteRetryCommand = 4,
2226 DAC960_MonitoringCommand = 5,
2227 DAC960_ImmediateCommand = 6,
2228 DAC960_QueuedCommand = 7
2229}
2230DAC960_CommandType_T;
2231
2232
2233/*
2234 Define the DAC960 Driver Command structure.
2235*/
2236
2237typedef struct DAC960_Command
2238{
2239 int CommandIdentifier;
2240 DAC960_CommandType_T CommandType;
2241 struct DAC960_Controller *Controller;
2242 struct DAC960_Command *Next;
2243 struct completion *Completion;
2244 unsigned int LogicalDriveNumber;
2245 unsigned int BlockNumber;
2246 unsigned int BlockCount;
2247 unsigned int SegmentCount;
2248 int DmaDirection;
2249 struct scatterlist *cmd_sglist;
2250 struct request *Request;
2251 union {
2252 struct {
2253 DAC960_V1_CommandMailbox_T CommandMailbox;
2254 DAC960_V1_KernelCommand_T *KernelCommand;
2255 DAC960_V1_CommandStatus_T CommandStatus;
2256 DAC960_V1_ScatterGatherSegment_T *ScatterGatherList;
2257 dma_addr_t ScatterGatherListDMA;
2258 struct scatterlist ScatterList[DAC960_V1_ScatterGatherLimit];
2259 unsigned int EndMarker[0];
2260 } V1;
2261 struct {
2262 DAC960_V2_CommandMailbox_T CommandMailbox;
2263 DAC960_V2_KernelCommand_T *KernelCommand;
2264 DAC960_V2_CommandStatus_T CommandStatus;
2265 unsigned char RequestSenseLength;
2266 int DataTransferResidue;
2267 DAC960_V2_ScatterGatherSegment_T *ScatterGatherList;
2268 dma_addr_t ScatterGatherListDMA;
2269 DAC960_SCSI_RequestSense_T *RequestSense;
2270 dma_addr_t RequestSenseDMA;
2271 struct scatterlist ScatterList[DAC960_V2_ScatterGatherLimit];
2272 unsigned int EndMarker[0];
2273 } V2;
2274 } FW;
2275}
2276DAC960_Command_T;
2277
2278
2279/*
2280 Define the DAC960 Driver Controller structure.
2281*/
2282
2283typedef struct DAC960_Controller
2284{
2285 void __iomem *BaseAddress;
2286 void __iomem *MemoryMappedAddress;
2287 DAC960_FirmwareType_T FirmwareType;
2288 DAC960_HardwareType_T HardwareType;
2289 DAC960_IO_Address_T IO_Address;
2290 DAC960_PCI_Address_T PCI_Address;
2291 struct pci_dev *PCIDevice;
2292 unsigned char ControllerNumber;
2293 unsigned char ControllerName[4];
2294 unsigned char ModelName[20];
2295 unsigned char FullModelName[28];
2296 unsigned char FirmwareVersion[12];
2297 unsigned char Bus;
2298 unsigned char Device;
2299 unsigned char Function;
2300 unsigned char IRQ_Channel;
2301 unsigned char Channels;
2302 unsigned char Targets;
2303 unsigned char MemorySize;
2304 unsigned char LogicalDriveCount;
2305 unsigned short CommandAllocationGroupSize;
2306 unsigned short ControllerQueueDepth;
2307 unsigned short DriverQueueDepth;
2308 unsigned short MaxBlocksPerCommand;
2309 unsigned short ControllerScatterGatherLimit;
2310 unsigned short DriverScatterGatherLimit;
2311 u64 BounceBufferLimit;
2312 unsigned int CombinedStatusBufferLength;
2313 unsigned int InitialStatusLength;
2314 unsigned int CurrentStatusLength;
2315 unsigned int ProgressBufferLength;
2316 unsigned int UserStatusLength;
2317 struct dma_loaf DmaPages;
2318 unsigned long MonitoringTimerCount;
2319 unsigned long PrimaryMonitoringTime;
2320 unsigned long SecondaryMonitoringTime;
2321 unsigned long ShutdownMonitoringTimer;
2322 unsigned long LastProgressReportTime;
2323 unsigned long LastCurrentStatusTime;
2324 boolean ControllerInitialized;
2325 boolean MonitoringCommandDeferred;
2326 boolean EphemeralProgressMessage;
2327 boolean DriveSpinUpMessageDisplayed;
2328 boolean MonitoringAlertMode;
2329 boolean SuppressEnclosureMessages;
2330 struct timer_list MonitoringTimer;
2331 struct gendisk *disks[DAC960_MaxLogicalDrives];
2332 struct pci_pool *ScatterGatherPool;
2333 DAC960_Command_T *FreeCommands;
2334 unsigned char *CombinedStatusBuffer;
2335 unsigned char *CurrentStatusBuffer;
2336 struct request_queue *RequestQueue[DAC960_MaxLogicalDrives];
2337 int req_q_index;
2338 spinlock_t queue_lock;
2339 wait_queue_head_t CommandWaitQueue;
2340 wait_queue_head_t HealthStatusWaitQueue;
2341 DAC960_Command_T InitialCommand;
2342 DAC960_Command_T *Commands[DAC960_MaxDriverQueueDepth];
2343 struct proc_dir_entry *ControllerProcEntry;
2344 boolean LogicalDriveInitiallyAccessible[DAC960_MaxLogicalDrives];
2345 void (*QueueCommand)(DAC960_Command_T *Command);
2346 boolean (*ReadControllerConfiguration)(struct DAC960_Controller *);
2347 boolean (*ReadDeviceConfiguration)(struct DAC960_Controller *);
2348 boolean (*ReportDeviceConfiguration)(struct DAC960_Controller *);
2349 void (*QueueReadWriteCommand)(DAC960_Command_T *Command);
2350 union {
2351 struct {
2352 unsigned char GeometryTranslationHeads;
2353 unsigned char GeometryTranslationSectors;
2354 unsigned char PendingRebuildFlag;
2355 unsigned short StripeSize;
2356 unsigned short SegmentSize;
2357 unsigned short NewEventLogSequenceNumber;
2358 unsigned short OldEventLogSequenceNumber;
2359 unsigned short DeviceStateChannel;
2360 unsigned short DeviceStateTargetID;
2361 boolean DualModeMemoryMailboxInterface;
2362 boolean BackgroundInitializationStatusSupported;
2363 boolean SAFTE_EnclosureManagementEnabled;
2364 boolean NeedLogicalDriveInformation;
2365 boolean NeedErrorTableInformation;
2366 boolean NeedDeviceStateInformation;
2367 boolean NeedDeviceInquiryInformation;
2368 boolean NeedDeviceSerialNumberInformation;
2369 boolean NeedRebuildProgress;
2370 boolean NeedConsistencyCheckProgress;
2371 boolean NeedBackgroundInitializationStatus;
2372 boolean StartDeviceStateScan;
2373 boolean RebuildProgressFirst;
2374 boolean RebuildFlagPending;
2375 boolean RebuildStatusPending;
2376
2377 dma_addr_t FirstCommandMailboxDMA;
2378 DAC960_V1_CommandMailbox_T *FirstCommandMailbox;
2379 DAC960_V1_CommandMailbox_T *LastCommandMailbox;
2380 DAC960_V1_CommandMailbox_T *NextCommandMailbox;
2381 DAC960_V1_CommandMailbox_T *PreviousCommandMailbox1;
2382 DAC960_V1_CommandMailbox_T *PreviousCommandMailbox2;
2383
2384 dma_addr_t FirstStatusMailboxDMA;
2385 DAC960_V1_StatusMailbox_T *FirstStatusMailbox;
2386 DAC960_V1_StatusMailbox_T *LastStatusMailbox;
2387 DAC960_V1_StatusMailbox_T *NextStatusMailbox;
2388
2389 DAC960_V1_DCDB_T *MonitoringDCDB;
2390 dma_addr_t MonitoringDCDB_DMA;
2391
2392 DAC960_V1_Enquiry_T Enquiry;
2393 DAC960_V1_Enquiry_T *NewEnquiry;
2394 dma_addr_t NewEnquiryDMA;
2395
2396 DAC960_V1_ErrorTable_T ErrorTable;
2397 DAC960_V1_ErrorTable_T *NewErrorTable;
2398 dma_addr_t NewErrorTableDMA;
2399
2400 DAC960_V1_EventLogEntry_T *EventLogEntry;
2401 dma_addr_t EventLogEntryDMA;
2402
2403 DAC960_V1_RebuildProgress_T *RebuildProgress;
2404 dma_addr_t RebuildProgressDMA;
2405 DAC960_V1_CommandStatus_T LastRebuildStatus;
2406 DAC960_V1_CommandStatus_T PendingRebuildStatus;
2407
2408 DAC960_V1_LogicalDriveInformationArray_T LogicalDriveInformation;
2409 DAC960_V1_LogicalDriveInformationArray_T *NewLogicalDriveInformation;
2410 dma_addr_t NewLogicalDriveInformationDMA;
2411
2412 DAC960_V1_BackgroundInitializationStatus_T
2413 *BackgroundInitializationStatus;
2414 dma_addr_t BackgroundInitializationStatusDMA;
2415 DAC960_V1_BackgroundInitializationStatus_T
2416 LastBackgroundInitializationStatus;
2417
2418 DAC960_V1_DeviceState_T
2419 DeviceState[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets];
2420 DAC960_V1_DeviceState_T *NewDeviceState;
2421 dma_addr_t NewDeviceStateDMA;
2422
2423 DAC960_SCSI_Inquiry_T
2424 InquiryStandardData[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets];
2425 DAC960_SCSI_Inquiry_T *NewInquiryStandardData;
2426 dma_addr_t NewInquiryStandardDataDMA;
2427
2428 DAC960_SCSI_Inquiry_UnitSerialNumber_T
2429 InquiryUnitSerialNumber[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets];
2430 DAC960_SCSI_Inquiry_UnitSerialNumber_T *NewInquiryUnitSerialNumber;
2431 dma_addr_t NewInquiryUnitSerialNumberDMA;
2432
2433 int DeviceResetCount[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets];
2434 boolean DirectCommandActive[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets];
2435 } V1;
2436 struct {
2437 unsigned int StatusChangeCounter;
2438 unsigned int NextEventSequenceNumber;
2439 unsigned int PhysicalDeviceIndex;
2440 boolean NeedLogicalDeviceInformation;
2441 boolean NeedPhysicalDeviceInformation;
2442 boolean NeedDeviceSerialNumberInformation;
2443 boolean StartLogicalDeviceInformationScan;
2444 boolean StartPhysicalDeviceInformationScan;
2445 struct pci_pool *RequestSensePool;
2446
2447 dma_addr_t FirstCommandMailboxDMA;
2448 DAC960_V2_CommandMailbox_T *FirstCommandMailbox;
2449 DAC960_V2_CommandMailbox_T *LastCommandMailbox;
2450 DAC960_V2_CommandMailbox_T *NextCommandMailbox;
2451 DAC960_V2_CommandMailbox_T *PreviousCommandMailbox1;
2452 DAC960_V2_CommandMailbox_T *PreviousCommandMailbox2;
2453
2454 dma_addr_t FirstStatusMailboxDMA;
2455 DAC960_V2_StatusMailbox_T *FirstStatusMailbox;
2456 DAC960_V2_StatusMailbox_T *LastStatusMailbox;
2457 DAC960_V2_StatusMailbox_T *NextStatusMailbox;
2458
2459 dma_addr_t HealthStatusBufferDMA;
2460 DAC960_V2_HealthStatusBuffer_T *HealthStatusBuffer;
2461
2462 DAC960_V2_ControllerInfo_T ControllerInformation;
2463 DAC960_V2_ControllerInfo_T *NewControllerInformation;
2464 dma_addr_t NewControllerInformationDMA;
2465
2466 DAC960_V2_LogicalDeviceInfo_T
2467 *LogicalDeviceInformation[DAC960_MaxLogicalDrives];
2468 DAC960_V2_LogicalDeviceInfo_T *NewLogicalDeviceInformation;
2469 dma_addr_t NewLogicalDeviceInformationDMA;
2470
2471 DAC960_V2_PhysicalDeviceInfo_T
2472 *PhysicalDeviceInformation[DAC960_V2_MaxPhysicalDevices];
2473 DAC960_V2_PhysicalDeviceInfo_T *NewPhysicalDeviceInformation;
2474 dma_addr_t NewPhysicalDeviceInformationDMA;
2475
2476 DAC960_SCSI_Inquiry_UnitSerialNumber_T *NewInquiryUnitSerialNumber;
2477 dma_addr_t NewInquiryUnitSerialNumberDMA;
2478 DAC960_SCSI_Inquiry_UnitSerialNumber_T
2479 *InquiryUnitSerialNumber[DAC960_V2_MaxPhysicalDevices];
2480
2481 DAC960_V2_Event_T *Event;
2482 dma_addr_t EventDMA;
2483
2484 DAC960_V2_PhysicalToLogicalDevice_T *PhysicalToLogicalDevice;
2485 dma_addr_t PhysicalToLogicalDeviceDMA;
2486
2487 DAC960_V2_PhysicalDevice_T
2488 LogicalDriveToVirtualDevice[DAC960_MaxLogicalDrives];
2489 boolean LogicalDriveFoundDuringScan[DAC960_MaxLogicalDrives];
2490 } V2;
2491 } FW;
2492 unsigned char ProgressBuffer[DAC960_ProgressBufferSize];
2493 unsigned char UserStatusBuffer[DAC960_UserMessageSize];
2494}
2495DAC960_Controller_T;
2496
2497
2498/*
2499 Simplify access to Firmware Version Dependent Data Structure Components
2500 and Functions.
2501*/
2502
2503#define V1 FW.V1
2504#define V2 FW.V2
2505#define DAC960_QueueCommand(Command) \
2506 (Controller->QueueCommand)(Command)
2507#define DAC960_ReadControllerConfiguration(Controller) \
2508 (Controller->ReadControllerConfiguration)(Controller)
2509#define DAC960_ReadDeviceConfiguration(Controller) \
2510 (Controller->ReadDeviceConfiguration)(Controller)
2511#define DAC960_ReportDeviceConfiguration(Controller) \
2512 (Controller->ReportDeviceConfiguration)(Controller)
2513#define DAC960_QueueReadWriteCommand(Command) \
2514 (Controller->QueueReadWriteCommand)(Command)
2515
2516/*
2517 * dma_addr_writeql is provided to write dma_addr_t types
2518 * to a 64-bit pci address space register. The controller
2519 * will accept having the register written as two 32-bit
2520 * values.
2521 *
2522 * In HIGHMEM kernels, dma_addr_t is a 64-bit value.
2523 * without HIGHMEM, dma_addr_t is a 32-bit value.
2524 *
2525 * The compiler should always fix up the assignment
2526 * to u.wq appropriately, depending upon the size of
2527 * dma_addr_t.
2528 */
2529static inline
2530void dma_addr_writeql(dma_addr_t addr, void __iomem *write_address)
2531{
2532 union {
2533 u64 wq;
2534 uint wl[2];
2535 } u;
2536
2537 u.wq = addr;
2538
2539 writel(u.wl[0], write_address);
2540 writel(u.wl[1], write_address + 4);
2541}
2542
2543/*
2544 Define the DAC960 BA Series Controller Interface Register Offsets.
2545*/
2546
2547#define DAC960_BA_RegisterWindowSize 0x80
2548
2549typedef enum
2550{
2551 DAC960_BA_InboundDoorBellRegisterOffset = 0x60,
2552 DAC960_BA_OutboundDoorBellRegisterOffset = 0x61,
2553 DAC960_BA_InterruptStatusRegisterOffset = 0x30,
2554 DAC960_BA_InterruptMaskRegisterOffset = 0x34,
2555 DAC960_BA_CommandMailboxBusAddressOffset = 0x50,
2556 DAC960_BA_CommandStatusOffset = 0x58,
2557 DAC960_BA_ErrorStatusRegisterOffset = 0x63
2558}
2559DAC960_BA_RegisterOffsets_T;
2560
2561
2562/*
2563 Define the structure of the DAC960 BA Series Inbound Door Bell Register.
2564*/
2565
2566typedef union DAC960_BA_InboundDoorBellRegister
2567{
2568 unsigned char All;
2569 struct {
2570 boolean HardwareMailboxNewCommand:1; /* Bit 0 */
2571 boolean AcknowledgeHardwareMailboxStatus:1; /* Bit 1 */
2572 boolean GenerateInterrupt:1; /* Bit 2 */
2573 boolean ControllerReset:1; /* Bit 3 */
2574 boolean MemoryMailboxNewCommand:1; /* Bit 4 */
2575 unsigned char :3; /* Bits 5-7 */
2576 } Write;
2577 struct {
2578 boolean HardwareMailboxEmpty:1; /* Bit 0 */
2579 boolean InitializationNotInProgress:1; /* Bit 1 */
2580 unsigned char :6; /* Bits 2-7 */
2581 } Read;
2582}
2583DAC960_BA_InboundDoorBellRegister_T;
2584
2585
2586/*
2587 Define the structure of the DAC960 BA Series Outbound Door Bell Register.
2588*/
2589
2590typedef union DAC960_BA_OutboundDoorBellRegister
2591{
2592 unsigned char All;
2593 struct {
2594 boolean AcknowledgeHardwareMailboxInterrupt:1; /* Bit 0 */
2595 boolean AcknowledgeMemoryMailboxInterrupt:1; /* Bit 1 */
2596 unsigned char :6; /* Bits 2-7 */
2597 } Write;
2598 struct {
2599 boolean HardwareMailboxStatusAvailable:1; /* Bit 0 */
2600 boolean MemoryMailboxStatusAvailable:1; /* Bit 1 */
2601 unsigned char :6; /* Bits 2-7 */
2602 } Read;
2603}
2604DAC960_BA_OutboundDoorBellRegister_T;
2605
2606
2607/*
2608 Define the structure of the DAC960 BA Series Interrupt Mask Register.
2609*/
2610
2611typedef union DAC960_BA_InterruptMaskRegister
2612{
2613 unsigned char All;
2614 struct {
2615 unsigned int :2; /* Bits 0-1 */
2616 boolean DisableInterrupts:1; /* Bit 2 */
2617 boolean DisableInterruptsI2O:1; /* Bit 3 */
2618 unsigned int :4; /* Bits 4-7 */
2619 } Bits;
2620}
2621DAC960_BA_InterruptMaskRegister_T;
2622
2623
2624/*
2625 Define the structure of the DAC960 BA Series Error Status Register.
2626*/
2627
2628typedef union DAC960_BA_ErrorStatusRegister
2629{
2630 unsigned char All;
2631 struct {
2632 unsigned int :2; /* Bits 0-1 */
2633 boolean ErrorStatusPending:1; /* Bit 2 */
2634 unsigned int :5; /* Bits 3-7 */
2635 } Bits;
2636}
2637DAC960_BA_ErrorStatusRegister_T;
2638
2639
2640/*
2641 Define inline functions to provide an abstraction for reading and writing the
2642 DAC960 BA Series Controller Interface Registers.
2643*/
2644
2645static inline
2646void DAC960_BA_HardwareMailboxNewCommand(void __iomem *ControllerBaseAddress)
2647{
2648 DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
2649 InboundDoorBellRegister.All = 0;
2650 InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true;
2651 writeb(InboundDoorBellRegister.All,
2652 ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
2653}
2654
2655static inline
2656void DAC960_BA_AcknowledgeHardwareMailboxStatus(void __iomem *ControllerBaseAddress)
2657{
2658 DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
2659 InboundDoorBellRegister.All = 0;
2660 InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true;
2661 writeb(InboundDoorBellRegister.All,
2662 ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
2663}
2664
2665static inline
2666void DAC960_BA_GenerateInterrupt(void __iomem *ControllerBaseAddress)
2667{
2668 DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
2669 InboundDoorBellRegister.All = 0;
2670 InboundDoorBellRegister.Write.GenerateInterrupt = true;
2671 writeb(InboundDoorBellRegister.All,
2672 ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
2673}
2674
2675static inline
2676void DAC960_BA_ControllerReset(void __iomem *ControllerBaseAddress)
2677{
2678 DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
2679 InboundDoorBellRegister.All = 0;
2680 InboundDoorBellRegister.Write.ControllerReset = true;
2681 writeb(InboundDoorBellRegister.All,
2682 ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
2683}
2684
2685static inline
2686void DAC960_BA_MemoryMailboxNewCommand(void __iomem *ControllerBaseAddress)
2687{
2688 DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
2689 InboundDoorBellRegister.All = 0;
2690 InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true;
2691 writeb(InboundDoorBellRegister.All,
2692 ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
2693}
2694
2695static inline
2696boolean DAC960_BA_HardwareMailboxFullP(void __iomem *ControllerBaseAddress)
2697{
2698 DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
2699 InboundDoorBellRegister.All =
2700 readb(ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
2701 return !InboundDoorBellRegister.Read.HardwareMailboxEmpty;
2702}
2703
2704static inline
2705boolean DAC960_BA_InitializationInProgressP(void __iomem *ControllerBaseAddress)
2706{
2707 DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
2708 InboundDoorBellRegister.All =
2709 readb(ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
2710 return !InboundDoorBellRegister.Read.InitializationNotInProgress;
2711}
2712
2713static inline
2714void DAC960_BA_AcknowledgeHardwareMailboxInterrupt(void __iomem *ControllerBaseAddress)
2715{
2716 DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
2717 OutboundDoorBellRegister.All = 0;
2718 OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
2719 writeb(OutboundDoorBellRegister.All,
2720 ControllerBaseAddress + DAC960_BA_OutboundDoorBellRegisterOffset);
2721}
2722
2723static inline
2724void DAC960_BA_AcknowledgeMemoryMailboxInterrupt(void __iomem *ControllerBaseAddress)
2725{
2726 DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
2727 OutboundDoorBellRegister.All = 0;
2728 OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
2729 writeb(OutboundDoorBellRegister.All,
2730 ControllerBaseAddress + DAC960_BA_OutboundDoorBellRegisterOffset);
2731}
2732
2733static inline
2734void DAC960_BA_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress)
2735{
2736 DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
2737 OutboundDoorBellRegister.All = 0;
2738 OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
2739 OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
2740 writeb(OutboundDoorBellRegister.All,
2741 ControllerBaseAddress + DAC960_BA_OutboundDoorBellRegisterOffset);
2742}
2743
2744static inline
2745boolean DAC960_BA_HardwareMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
2746{
2747 DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
2748 OutboundDoorBellRegister.All =
2749 readb(ControllerBaseAddress + DAC960_BA_OutboundDoorBellRegisterOffset);
2750 return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable;
2751}
2752
2753static inline
2754boolean DAC960_BA_MemoryMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
2755{
2756 DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
2757 OutboundDoorBellRegister.All =
2758 readb(ControllerBaseAddress + DAC960_BA_OutboundDoorBellRegisterOffset);
2759 return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable;
2760}
2761
2762static inline
2763void DAC960_BA_EnableInterrupts(void __iomem *ControllerBaseAddress)
2764{
2765 DAC960_BA_InterruptMaskRegister_T InterruptMaskRegister;
2766 InterruptMaskRegister.All = 0xFF;
2767 InterruptMaskRegister.Bits.DisableInterrupts = false;
2768 InterruptMaskRegister.Bits.DisableInterruptsI2O = true;
2769 writeb(InterruptMaskRegister.All,
2770 ControllerBaseAddress + DAC960_BA_InterruptMaskRegisterOffset);
2771}
2772
2773static inline
2774void DAC960_BA_DisableInterrupts(void __iomem *ControllerBaseAddress)
2775{
2776 DAC960_BA_InterruptMaskRegister_T InterruptMaskRegister;
2777 InterruptMaskRegister.All = 0xFF;
2778 InterruptMaskRegister.Bits.DisableInterrupts = true;
2779 InterruptMaskRegister.Bits.DisableInterruptsI2O = true;
2780 writeb(InterruptMaskRegister.All,
2781 ControllerBaseAddress + DAC960_BA_InterruptMaskRegisterOffset);
2782}
2783
2784static inline
2785boolean DAC960_BA_InterruptsEnabledP(void __iomem *ControllerBaseAddress)
2786{
2787 DAC960_BA_InterruptMaskRegister_T InterruptMaskRegister;
2788 InterruptMaskRegister.All =
2789 readb(ControllerBaseAddress + DAC960_BA_InterruptMaskRegisterOffset);
2790 return !InterruptMaskRegister.Bits.DisableInterrupts;
2791}
2792
2793static inline
2794void DAC960_BA_WriteCommandMailbox(DAC960_V2_CommandMailbox_T
2795 *MemoryCommandMailbox,
2796 DAC960_V2_CommandMailbox_T
2797 *CommandMailbox)
2798{
2799 memcpy(&MemoryCommandMailbox->Words[1], &CommandMailbox->Words[1],
2800 sizeof(DAC960_V2_CommandMailbox_T) - sizeof(unsigned int));
2801 wmb();
2802 MemoryCommandMailbox->Words[0] = CommandMailbox->Words[0];
2803 mb();
2804}
2805
2806
2807static inline
2808void DAC960_BA_WriteHardwareMailbox(void __iomem *ControllerBaseAddress,
2809 dma_addr_t CommandMailboxDMA)
2810{
2811 dma_addr_writeql(CommandMailboxDMA,
2812 ControllerBaseAddress +
2813 DAC960_BA_CommandMailboxBusAddressOffset);
2814}
2815
2816static inline DAC960_V2_CommandIdentifier_T
2817DAC960_BA_ReadCommandIdentifier(void __iomem *ControllerBaseAddress)
2818{
2819 return readw(ControllerBaseAddress + DAC960_BA_CommandStatusOffset);
2820}
2821
2822static inline DAC960_V2_CommandStatus_T
2823DAC960_BA_ReadCommandStatus(void __iomem *ControllerBaseAddress)
2824{
2825 return readw(ControllerBaseAddress + DAC960_BA_CommandStatusOffset + 2);
2826}
2827
2828static inline boolean
2829DAC960_BA_ReadErrorStatus(void __iomem *ControllerBaseAddress,
2830 unsigned char *ErrorStatus,
2831 unsigned char *Parameter0,
2832 unsigned char *Parameter1)
2833{
2834 DAC960_BA_ErrorStatusRegister_T ErrorStatusRegister;
2835 ErrorStatusRegister.All =
2836 readb(ControllerBaseAddress + DAC960_BA_ErrorStatusRegisterOffset);
2837 if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
2838 ErrorStatusRegister.Bits.ErrorStatusPending = false;
2839 *ErrorStatus = ErrorStatusRegister.All;
2840 *Parameter0 =
2841 readb(ControllerBaseAddress + DAC960_BA_CommandMailboxBusAddressOffset + 0);
2842 *Parameter1 =
2843 readb(ControllerBaseAddress + DAC960_BA_CommandMailboxBusAddressOffset + 1);
2844 writeb(0xFF, ControllerBaseAddress + DAC960_BA_ErrorStatusRegisterOffset);
2845 return true;
2846}
2847
2848
2849/*
2850 Define the DAC960 LP Series Controller Interface Register Offsets.
2851*/
2852
2853#define DAC960_LP_RegisterWindowSize 0x80
2854
2855typedef enum
2856{
2857 DAC960_LP_InboundDoorBellRegisterOffset = 0x20,
2858 DAC960_LP_OutboundDoorBellRegisterOffset = 0x2C,
2859 DAC960_LP_InterruptStatusRegisterOffset = 0x30,
2860 DAC960_LP_InterruptMaskRegisterOffset = 0x34,
2861 DAC960_LP_CommandMailboxBusAddressOffset = 0x10,
2862 DAC960_LP_CommandStatusOffset = 0x18,
2863 DAC960_LP_ErrorStatusRegisterOffset = 0x2E
2864}
2865DAC960_LP_RegisterOffsets_T;
2866
2867
2868/*
2869 Define the structure of the DAC960 LP Series Inbound Door Bell Register.
2870*/
2871
2872typedef union DAC960_LP_InboundDoorBellRegister
2873{
2874 unsigned char All;
2875 struct {
2876 boolean HardwareMailboxNewCommand:1; /* Bit 0 */
2877 boolean AcknowledgeHardwareMailboxStatus:1; /* Bit 1 */
2878 boolean GenerateInterrupt:1; /* Bit 2 */
2879 boolean ControllerReset:1; /* Bit 3 */
2880 boolean MemoryMailboxNewCommand:1; /* Bit 4 */
2881 unsigned char :3; /* Bits 5-7 */
2882 } Write;
2883 struct {
2884 boolean HardwareMailboxFull:1; /* Bit 0 */
2885 boolean InitializationInProgress:1; /* Bit 1 */
2886 unsigned char :6; /* Bits 2-7 */
2887 } Read;
2888}
2889DAC960_LP_InboundDoorBellRegister_T;
2890
2891
2892/*
2893 Define the structure of the DAC960 LP Series Outbound Door Bell Register.
2894*/
2895
2896typedef union DAC960_LP_OutboundDoorBellRegister
2897{
2898 unsigned char All;
2899 struct {
2900 boolean AcknowledgeHardwareMailboxInterrupt:1; /* Bit 0 */
2901 boolean AcknowledgeMemoryMailboxInterrupt:1; /* Bit 1 */
2902 unsigned char :6; /* Bits 2-7 */
2903 } Write;
2904 struct {
2905 boolean HardwareMailboxStatusAvailable:1; /* Bit 0 */
2906 boolean MemoryMailboxStatusAvailable:1; /* Bit 1 */
2907 unsigned char :6; /* Bits 2-7 */
2908 } Read;
2909}
2910DAC960_LP_OutboundDoorBellRegister_T;
2911
2912
2913/*
2914 Define the structure of the DAC960 LP Series Interrupt Mask Register.
2915*/
2916
2917typedef union DAC960_LP_InterruptMaskRegister
2918{
2919 unsigned char All;
2920 struct {
2921 unsigned int :2; /* Bits 0-1 */
2922 boolean DisableInterrupts:1; /* Bit 2 */
2923 unsigned int :5; /* Bits 3-7 */
2924 } Bits;
2925}
2926DAC960_LP_InterruptMaskRegister_T;
2927
2928
2929/*
2930 Define the structure of the DAC960 LP Series Error Status Register.
2931*/
2932
2933typedef union DAC960_LP_ErrorStatusRegister
2934{
2935 unsigned char All;
2936 struct {
2937 unsigned int :2; /* Bits 0-1 */
2938 boolean ErrorStatusPending:1; /* Bit 2 */
2939 unsigned int :5; /* Bits 3-7 */
2940 } Bits;
2941}
2942DAC960_LP_ErrorStatusRegister_T;
2943
2944
2945/*
2946 Define inline functions to provide an abstraction for reading and writing the
2947 DAC960 LP Series Controller Interface Registers.
2948*/
2949
2950static inline
2951void DAC960_LP_HardwareMailboxNewCommand(void __iomem *ControllerBaseAddress)
2952{
2953 DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
2954 InboundDoorBellRegister.All = 0;
2955 InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true;
2956 writeb(InboundDoorBellRegister.All,
2957 ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
2958}
2959
2960static inline
2961void DAC960_LP_AcknowledgeHardwareMailboxStatus(void __iomem *ControllerBaseAddress)
2962{
2963 DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
2964 InboundDoorBellRegister.All = 0;
2965 InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true;
2966 writeb(InboundDoorBellRegister.All,
2967 ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
2968}
2969
2970static inline
2971void DAC960_LP_GenerateInterrupt(void __iomem *ControllerBaseAddress)
2972{
2973 DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
2974 InboundDoorBellRegister.All = 0;
2975 InboundDoorBellRegister.Write.GenerateInterrupt = true;
2976 writeb(InboundDoorBellRegister.All,
2977 ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
2978}
2979
2980static inline
2981void DAC960_LP_ControllerReset(void __iomem *ControllerBaseAddress)
2982{
2983 DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
2984 InboundDoorBellRegister.All = 0;
2985 InboundDoorBellRegister.Write.ControllerReset = true;
2986 writeb(InboundDoorBellRegister.All,
2987 ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
2988}
2989
2990static inline
2991void DAC960_LP_MemoryMailboxNewCommand(void __iomem *ControllerBaseAddress)
2992{
2993 DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
2994 InboundDoorBellRegister.All = 0;
2995 InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true;
2996 writeb(InboundDoorBellRegister.All,
2997 ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
2998}
2999
3000static inline
3001boolean DAC960_LP_HardwareMailboxFullP(void __iomem *ControllerBaseAddress)
3002{
3003 DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
3004 InboundDoorBellRegister.All =
3005 readb(ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
3006 return InboundDoorBellRegister.Read.HardwareMailboxFull;
3007}
3008
3009static inline
3010boolean DAC960_LP_InitializationInProgressP(void __iomem *ControllerBaseAddress)
3011{
3012 DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
3013 InboundDoorBellRegister.All =
3014 readb(ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
3015 return InboundDoorBellRegister.Read.InitializationInProgress;
3016}
3017
3018static inline
3019void DAC960_LP_AcknowledgeHardwareMailboxInterrupt(void __iomem *ControllerBaseAddress)
3020{
3021 DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister;
3022 OutboundDoorBellRegister.All = 0;
3023 OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
3024 writeb(OutboundDoorBellRegister.All,
3025 ControllerBaseAddress + DAC960_LP_OutboundDoorBellRegisterOffset);
3026}
3027
3028static inline
3029void DAC960_LP_AcknowledgeMemoryMailboxInterrupt(void __iomem *ControllerBaseAddress)
3030{
3031 DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister;
3032 OutboundDoorBellRegister.All = 0;
3033 OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
3034 writeb(OutboundDoorBellRegister.All,
3035 ControllerBaseAddress + DAC960_LP_OutboundDoorBellRegisterOffset);
3036}
3037
3038static inline
3039void DAC960_LP_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress)
3040{
3041 DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister;
3042 OutboundDoorBellRegister.All = 0;
3043 OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
3044 OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
3045 writeb(OutboundDoorBellRegister.All,
3046 ControllerBaseAddress + DAC960_LP_OutboundDoorBellRegisterOffset);
3047}
3048
3049static inline
3050boolean DAC960_LP_HardwareMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
3051{
3052 DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister;
3053 OutboundDoorBellRegister.All =
3054 readb(ControllerBaseAddress + DAC960_LP_OutboundDoorBellRegisterOffset);
3055 return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable;
3056}
3057
3058static inline
3059boolean DAC960_LP_MemoryMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
3060{
3061 DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister;
3062 OutboundDoorBellRegister.All =
3063 readb(ControllerBaseAddress + DAC960_LP_OutboundDoorBellRegisterOffset);
3064 return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable;
3065}
3066
3067static inline
3068void DAC960_LP_EnableInterrupts(void __iomem *ControllerBaseAddress)
3069{
3070 DAC960_LP_InterruptMaskRegister_T InterruptMaskRegister;
3071 InterruptMaskRegister.All = 0xFF;
3072 InterruptMaskRegister.Bits.DisableInterrupts = false;
3073 writeb(InterruptMaskRegister.All,
3074 ControllerBaseAddress + DAC960_LP_InterruptMaskRegisterOffset);
3075}
3076
3077static inline
3078void DAC960_LP_DisableInterrupts(void __iomem *ControllerBaseAddress)
3079{
3080 DAC960_LP_InterruptMaskRegister_T InterruptMaskRegister;
3081 InterruptMaskRegister.All = 0xFF;
3082 InterruptMaskRegister.Bits.DisableInterrupts = true;
3083 writeb(InterruptMaskRegister.All,
3084 ControllerBaseAddress + DAC960_LP_InterruptMaskRegisterOffset);
3085}
3086
3087static inline
3088boolean DAC960_LP_InterruptsEnabledP(void __iomem *ControllerBaseAddress)
3089{
3090 DAC960_LP_InterruptMaskRegister_T InterruptMaskRegister;
3091 InterruptMaskRegister.All =
3092 readb(ControllerBaseAddress + DAC960_LP_InterruptMaskRegisterOffset);
3093 return !InterruptMaskRegister.Bits.DisableInterrupts;
3094}
3095
3096static inline
3097void DAC960_LP_WriteCommandMailbox(DAC960_V2_CommandMailbox_T
3098 *MemoryCommandMailbox,
3099 DAC960_V2_CommandMailbox_T
3100 *CommandMailbox)
3101{
3102 memcpy(&MemoryCommandMailbox->Words[1], &CommandMailbox->Words[1],
3103 sizeof(DAC960_V2_CommandMailbox_T) - sizeof(unsigned int));
3104 wmb();
3105 MemoryCommandMailbox->Words[0] = CommandMailbox->Words[0];
3106 mb();
3107}
3108
3109static inline
3110void DAC960_LP_WriteHardwareMailbox(void __iomem *ControllerBaseAddress,
3111 dma_addr_t CommandMailboxDMA)
3112{
3113 dma_addr_writeql(CommandMailboxDMA,
3114 ControllerBaseAddress +
3115 DAC960_LP_CommandMailboxBusAddressOffset);
3116}
3117
3118static inline DAC960_V2_CommandIdentifier_T
3119DAC960_LP_ReadCommandIdentifier(void __iomem *ControllerBaseAddress)
3120{
3121 return readw(ControllerBaseAddress + DAC960_LP_CommandStatusOffset);
3122}
3123
3124static inline DAC960_V2_CommandStatus_T
3125DAC960_LP_ReadCommandStatus(void __iomem *ControllerBaseAddress)
3126{
3127 return readw(ControllerBaseAddress + DAC960_LP_CommandStatusOffset + 2);
3128}
3129
3130static inline boolean
3131DAC960_LP_ReadErrorStatus(void __iomem *ControllerBaseAddress,
3132 unsigned char *ErrorStatus,
3133 unsigned char *Parameter0,
3134 unsigned char *Parameter1)
3135{
3136 DAC960_LP_ErrorStatusRegister_T ErrorStatusRegister;
3137 ErrorStatusRegister.All =
3138 readb(ControllerBaseAddress + DAC960_LP_ErrorStatusRegisterOffset);
3139 if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
3140 ErrorStatusRegister.Bits.ErrorStatusPending = false;
3141 *ErrorStatus = ErrorStatusRegister.All;
3142 *Parameter0 =
3143 readb(ControllerBaseAddress + DAC960_LP_CommandMailboxBusAddressOffset + 0);
3144 *Parameter1 =
3145 readb(ControllerBaseAddress + DAC960_LP_CommandMailboxBusAddressOffset + 1);
3146 writeb(0xFF, ControllerBaseAddress + DAC960_LP_ErrorStatusRegisterOffset);
3147 return true;
3148}
3149
3150
3151/*
3152 Define the DAC960 LA Series Controller Interface Register Offsets.
3153*/
3154
3155#define DAC960_LA_RegisterWindowSize 0x80
3156
3157typedef enum
3158{
3159 DAC960_LA_InboundDoorBellRegisterOffset = 0x60,
3160 DAC960_LA_OutboundDoorBellRegisterOffset = 0x61,
3161 DAC960_LA_InterruptMaskRegisterOffset = 0x34,
3162 DAC960_LA_CommandOpcodeRegisterOffset = 0x50,
3163 DAC960_LA_CommandIdentifierRegisterOffset = 0x51,
3164 DAC960_LA_MailboxRegister2Offset = 0x52,
3165 DAC960_LA_MailboxRegister3Offset = 0x53,
3166 DAC960_LA_MailboxRegister4Offset = 0x54,
3167 DAC960_LA_MailboxRegister5Offset = 0x55,
3168 DAC960_LA_MailboxRegister6Offset = 0x56,
3169 DAC960_LA_MailboxRegister7Offset = 0x57,
3170 DAC960_LA_MailboxRegister8Offset = 0x58,
3171 DAC960_LA_MailboxRegister9Offset = 0x59,
3172 DAC960_LA_MailboxRegister10Offset = 0x5A,
3173 DAC960_LA_MailboxRegister11Offset = 0x5B,
3174 DAC960_LA_MailboxRegister12Offset = 0x5C,
3175 DAC960_LA_StatusCommandIdentifierRegOffset = 0x5D,
3176 DAC960_LA_StatusRegisterOffset = 0x5E,
3177 DAC960_LA_ErrorStatusRegisterOffset = 0x63
3178}
3179DAC960_LA_RegisterOffsets_T;
3180
3181
3182/*
3183 Define the structure of the DAC960 LA Series Inbound Door Bell Register.
3184*/
3185
3186typedef union DAC960_LA_InboundDoorBellRegister
3187{
3188 unsigned char All;
3189 struct {
3190 boolean HardwareMailboxNewCommand:1; /* Bit 0 */
3191 boolean AcknowledgeHardwareMailboxStatus:1; /* Bit 1 */
3192 boolean GenerateInterrupt:1; /* Bit 2 */
3193 boolean ControllerReset:1; /* Bit 3 */
3194 boolean MemoryMailboxNewCommand:1; /* Bit 4 */
3195 unsigned char :3; /* Bits 5-7 */
3196 } Write;
3197 struct {
3198 boolean HardwareMailboxEmpty:1; /* Bit 0 */
3199 boolean InitializationNotInProgress:1; /* Bit 1 */
3200 unsigned char :6; /* Bits 2-7 */
3201 } Read;
3202}
3203DAC960_LA_InboundDoorBellRegister_T;
3204
3205
3206/*
3207 Define the structure of the DAC960 LA Series Outbound Door Bell Register.
3208*/
3209
3210typedef union DAC960_LA_OutboundDoorBellRegister
3211{
3212 unsigned char All;
3213 struct {
3214 boolean AcknowledgeHardwareMailboxInterrupt:1; /* Bit 0 */
3215 boolean AcknowledgeMemoryMailboxInterrupt:1; /* Bit 1 */
3216 unsigned char :6; /* Bits 2-7 */
3217 } Write;
3218 struct {
3219 boolean HardwareMailboxStatusAvailable:1; /* Bit 0 */
3220 boolean MemoryMailboxStatusAvailable:1; /* Bit 1 */
3221 unsigned char :6; /* Bits 2-7 */
3222 } Read;
3223}
3224DAC960_LA_OutboundDoorBellRegister_T;
3225
3226
3227/*
3228 Define the structure of the DAC960 LA Series Interrupt Mask Register.
3229*/
3230
3231typedef union DAC960_LA_InterruptMaskRegister
3232{
3233 unsigned char All;
3234 struct {
3235 unsigned char :2; /* Bits 0-1 */
3236 boolean DisableInterrupts:1; /* Bit 2 */
3237 unsigned char :5; /* Bits 3-7 */
3238 } Bits;
3239}
3240DAC960_LA_InterruptMaskRegister_T;
3241
3242
3243/*
3244 Define the structure of the DAC960 LA Series Error Status Register.
3245*/
3246
3247typedef union DAC960_LA_ErrorStatusRegister
3248{
3249 unsigned char All;
3250 struct {
3251 unsigned int :2; /* Bits 0-1 */
3252 boolean ErrorStatusPending:1; /* Bit 2 */
3253 unsigned int :5; /* Bits 3-7 */
3254 } Bits;
3255}
3256DAC960_LA_ErrorStatusRegister_T;
3257
3258
3259/*
3260 Define inline functions to provide an abstraction for reading and writing the
3261 DAC960 LA Series Controller Interface Registers.
3262*/
3263
3264static inline
3265void DAC960_LA_HardwareMailboxNewCommand(void __iomem *ControllerBaseAddress)
3266{
3267 DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
3268 InboundDoorBellRegister.All = 0;
3269 InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true;
3270 writeb(InboundDoorBellRegister.All,
3271 ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
3272}
3273
3274static inline
3275void DAC960_LA_AcknowledgeHardwareMailboxStatus(void __iomem *ControllerBaseAddress)
3276{
3277 DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
3278 InboundDoorBellRegister.All = 0;
3279 InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true;
3280 writeb(InboundDoorBellRegister.All,
3281 ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
3282}
3283
3284static inline
3285void DAC960_LA_GenerateInterrupt(void __iomem *ControllerBaseAddress)
3286{
3287 DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
3288 InboundDoorBellRegister.All = 0;
3289 InboundDoorBellRegister.Write.GenerateInterrupt = true;
3290 writeb(InboundDoorBellRegister.All,
3291 ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
3292}
3293
3294static inline
3295void DAC960_LA_ControllerReset(void __iomem *ControllerBaseAddress)
3296{
3297 DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
3298 InboundDoorBellRegister.All = 0;
3299 InboundDoorBellRegister.Write.ControllerReset = true;
3300 writeb(InboundDoorBellRegister.All,
3301 ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
3302}
3303
3304static inline
3305void DAC960_LA_MemoryMailboxNewCommand(void __iomem *ControllerBaseAddress)
3306{
3307 DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
3308 InboundDoorBellRegister.All = 0;
3309 InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true;
3310 writeb(InboundDoorBellRegister.All,
3311 ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
3312}
3313
3314static inline
3315boolean DAC960_LA_HardwareMailboxFullP(void __iomem *ControllerBaseAddress)
3316{
3317 DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
3318 InboundDoorBellRegister.All =
3319 readb(ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
3320 return !InboundDoorBellRegister.Read.HardwareMailboxEmpty;
3321}
3322
3323static inline
3324boolean DAC960_LA_InitializationInProgressP(void __iomem *ControllerBaseAddress)
3325{
3326 DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
3327 InboundDoorBellRegister.All =
3328 readb(ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
3329 return !InboundDoorBellRegister.Read.InitializationNotInProgress;
3330}
3331
3332static inline
3333void DAC960_LA_AcknowledgeHardwareMailboxInterrupt(void __iomem *ControllerBaseAddress)
3334{
3335 DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
3336 OutboundDoorBellRegister.All = 0;
3337 OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
3338 writeb(OutboundDoorBellRegister.All,
3339 ControllerBaseAddress + DAC960_LA_OutboundDoorBellRegisterOffset);
3340}
3341
3342static inline
3343void DAC960_LA_AcknowledgeMemoryMailboxInterrupt(void __iomem *ControllerBaseAddress)
3344{
3345 DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
3346 OutboundDoorBellRegister.All = 0;
3347 OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
3348 writeb(OutboundDoorBellRegister.All,
3349 ControllerBaseAddress + DAC960_LA_OutboundDoorBellRegisterOffset);
3350}
3351
3352static inline
3353void DAC960_LA_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress)
3354{
3355 DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
3356 OutboundDoorBellRegister.All = 0;
3357 OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
3358 OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
3359 writeb(OutboundDoorBellRegister.All,
3360 ControllerBaseAddress + DAC960_LA_OutboundDoorBellRegisterOffset);
3361}
3362
3363static inline
3364boolean DAC960_LA_HardwareMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
3365{
3366 DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
3367 OutboundDoorBellRegister.All =
3368 readb(ControllerBaseAddress + DAC960_LA_OutboundDoorBellRegisterOffset);
3369 return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable;
3370}
3371
3372static inline
3373boolean DAC960_LA_MemoryMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
3374{
3375 DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
3376 OutboundDoorBellRegister.All =
3377 readb(ControllerBaseAddress + DAC960_LA_OutboundDoorBellRegisterOffset);
3378 return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable;
3379}
3380
3381static inline
3382void DAC960_LA_EnableInterrupts(void __iomem *ControllerBaseAddress)
3383{
3384 DAC960_LA_InterruptMaskRegister_T InterruptMaskRegister;
3385 InterruptMaskRegister.All = 0xFF;
3386 InterruptMaskRegister.Bits.DisableInterrupts = false;
3387 writeb(InterruptMaskRegister.All,
3388 ControllerBaseAddress + DAC960_LA_InterruptMaskRegisterOffset);
3389}
3390
3391static inline
3392void DAC960_LA_DisableInterrupts(void __iomem *ControllerBaseAddress)
3393{
3394 DAC960_LA_InterruptMaskRegister_T InterruptMaskRegister;
3395 InterruptMaskRegister.All = 0xFF;
3396 InterruptMaskRegister.Bits.DisableInterrupts = true;
3397 writeb(InterruptMaskRegister.All,
3398 ControllerBaseAddress + DAC960_LA_InterruptMaskRegisterOffset);
3399}
3400
3401static inline
3402boolean DAC960_LA_InterruptsEnabledP(void __iomem *ControllerBaseAddress)
3403{
3404 DAC960_LA_InterruptMaskRegister_T InterruptMaskRegister;
3405 InterruptMaskRegister.All =
3406 readb(ControllerBaseAddress + DAC960_LA_InterruptMaskRegisterOffset);
3407 return !InterruptMaskRegister.Bits.DisableInterrupts;
3408}
3409
3410static inline
3411void DAC960_LA_WriteCommandMailbox(DAC960_V1_CommandMailbox_T
3412 *MemoryCommandMailbox,
3413 DAC960_V1_CommandMailbox_T
3414 *CommandMailbox)
3415{
3416 MemoryCommandMailbox->Words[1] = CommandMailbox->Words[1];
3417 MemoryCommandMailbox->Words[2] = CommandMailbox->Words[2];
3418 MemoryCommandMailbox->Words[3] = CommandMailbox->Words[3];
3419 wmb();
3420 MemoryCommandMailbox->Words[0] = CommandMailbox->Words[0];
3421 mb();
3422}
3423
3424static inline
3425void DAC960_LA_WriteHardwareMailbox(void __iomem *ControllerBaseAddress,
3426 DAC960_V1_CommandMailbox_T *CommandMailbox)
3427{
3428 writel(CommandMailbox->Words[0],
3429 ControllerBaseAddress + DAC960_LA_CommandOpcodeRegisterOffset);
3430 writel(CommandMailbox->Words[1],
3431 ControllerBaseAddress + DAC960_LA_MailboxRegister4Offset);
3432 writel(CommandMailbox->Words[2],
3433 ControllerBaseAddress + DAC960_LA_MailboxRegister8Offset);
3434 writeb(CommandMailbox->Bytes[12],
3435 ControllerBaseAddress + DAC960_LA_MailboxRegister12Offset);
3436}
3437
3438static inline DAC960_V1_CommandIdentifier_T
3439DAC960_LA_ReadStatusCommandIdentifier(void __iomem *ControllerBaseAddress)
3440{
3441 return readb(ControllerBaseAddress
3442 + DAC960_LA_StatusCommandIdentifierRegOffset);
3443}
3444
3445static inline DAC960_V1_CommandStatus_T
3446DAC960_LA_ReadStatusRegister(void __iomem *ControllerBaseAddress)
3447{
3448 return readw(ControllerBaseAddress + DAC960_LA_StatusRegisterOffset);
3449}
3450
3451static inline boolean
3452DAC960_LA_ReadErrorStatus(void __iomem *ControllerBaseAddress,
3453 unsigned char *ErrorStatus,
3454 unsigned char *Parameter0,
3455 unsigned char *Parameter1)
3456{
3457 DAC960_LA_ErrorStatusRegister_T ErrorStatusRegister;
3458 ErrorStatusRegister.All =
3459 readb(ControllerBaseAddress + DAC960_LA_ErrorStatusRegisterOffset);
3460 if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
3461 ErrorStatusRegister.Bits.ErrorStatusPending = false;
3462 *ErrorStatus = ErrorStatusRegister.All;
3463 *Parameter0 =
3464 readb(ControllerBaseAddress + DAC960_LA_CommandOpcodeRegisterOffset);
3465 *Parameter1 =
3466 readb(ControllerBaseAddress + DAC960_LA_CommandIdentifierRegisterOffset);
3467 writeb(0xFF, ControllerBaseAddress + DAC960_LA_ErrorStatusRegisterOffset);
3468 return true;
3469}
3470
3471/*
3472 Define the DAC960 PG Series Controller Interface Register Offsets.
3473*/
3474
3475#define DAC960_PG_RegisterWindowSize 0x2000
3476
3477typedef enum
3478{
3479 DAC960_PG_InboundDoorBellRegisterOffset = 0x0020,
3480 DAC960_PG_OutboundDoorBellRegisterOffset = 0x002C,
3481 DAC960_PG_InterruptMaskRegisterOffset = 0x0034,
3482 DAC960_PG_CommandOpcodeRegisterOffset = 0x1000,
3483 DAC960_PG_CommandIdentifierRegisterOffset = 0x1001,
3484 DAC960_PG_MailboxRegister2Offset = 0x1002,
3485 DAC960_PG_MailboxRegister3Offset = 0x1003,
3486 DAC960_PG_MailboxRegister4Offset = 0x1004,
3487 DAC960_PG_MailboxRegister5Offset = 0x1005,
3488 DAC960_PG_MailboxRegister6Offset = 0x1006,
3489 DAC960_PG_MailboxRegister7Offset = 0x1007,
3490 DAC960_PG_MailboxRegister8Offset = 0x1008,
3491 DAC960_PG_MailboxRegister9Offset = 0x1009,
3492 DAC960_PG_MailboxRegister10Offset = 0x100A,
3493 DAC960_PG_MailboxRegister11Offset = 0x100B,
3494 DAC960_PG_MailboxRegister12Offset = 0x100C,
3495 DAC960_PG_StatusCommandIdentifierRegOffset = 0x1018,
3496 DAC960_PG_StatusRegisterOffset = 0x101A,
3497 DAC960_PG_ErrorStatusRegisterOffset = 0x103F
3498}
3499DAC960_PG_RegisterOffsets_T;
3500
3501
3502/*
3503 Define the structure of the DAC960 PG Series Inbound Door Bell Register.
3504*/
3505
3506typedef union DAC960_PG_InboundDoorBellRegister
3507{
3508 unsigned int All;
3509 struct {
3510 boolean HardwareMailboxNewCommand:1; /* Bit 0 */
3511 boolean AcknowledgeHardwareMailboxStatus:1; /* Bit 1 */
3512 boolean GenerateInterrupt:1; /* Bit 2 */
3513 boolean ControllerReset:1; /* Bit 3 */
3514 boolean MemoryMailboxNewCommand:1; /* Bit 4 */
3515 unsigned int :27; /* Bits 5-31 */
3516 } Write;
3517 struct {
3518 boolean HardwareMailboxFull:1; /* Bit 0 */
3519 boolean InitializationInProgress:1; /* Bit 1 */
3520 unsigned int :30; /* Bits 2-31 */
3521 } Read;
3522}
3523DAC960_PG_InboundDoorBellRegister_T;
3524
3525
3526/*
3527 Define the structure of the DAC960 PG Series Outbound Door Bell Register.
3528*/
3529
3530typedef union DAC960_PG_OutboundDoorBellRegister
3531{
3532 unsigned int All;
3533 struct {
3534 boolean AcknowledgeHardwareMailboxInterrupt:1; /* Bit 0 */
3535 boolean AcknowledgeMemoryMailboxInterrupt:1; /* Bit 1 */
3536 unsigned int :30; /* Bits 2-31 */
3537 } Write;
3538 struct {
3539 boolean HardwareMailboxStatusAvailable:1; /* Bit 0 */
3540 boolean MemoryMailboxStatusAvailable:1; /* Bit 1 */
3541 unsigned int :30; /* Bits 2-31 */
3542 } Read;
3543}
3544DAC960_PG_OutboundDoorBellRegister_T;
3545
3546
3547/*
3548 Define the structure of the DAC960 PG Series Interrupt Mask Register.
3549*/
3550
3551typedef union DAC960_PG_InterruptMaskRegister
3552{
3553 unsigned int All;
3554 struct {
3555 unsigned int MessageUnitInterruptMask1:2; /* Bits 0-1 */
3556 boolean DisableInterrupts:1; /* Bit 2 */
3557 unsigned int MessageUnitInterruptMask2:5; /* Bits 3-7 */
3558 unsigned int Reserved0:24; /* Bits 8-31 */
3559 } Bits;
3560}
3561DAC960_PG_InterruptMaskRegister_T;
3562
3563
3564/*
3565 Define the structure of the DAC960 PG Series Error Status Register.
3566*/
3567
3568typedef union DAC960_PG_ErrorStatusRegister
3569{
3570 unsigned char All;
3571 struct {
3572 unsigned int :2; /* Bits 0-1 */
3573 boolean ErrorStatusPending:1; /* Bit 2 */
3574 unsigned int :5; /* Bits 3-7 */
3575 } Bits;
3576}
3577DAC960_PG_ErrorStatusRegister_T;
3578
3579
3580/*
3581 Define inline functions to provide an abstraction for reading and writing the
3582 DAC960 PG Series Controller Interface Registers.
3583*/
3584
3585static inline
3586void DAC960_PG_HardwareMailboxNewCommand(void __iomem *ControllerBaseAddress)
3587{
3588 DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
3589 InboundDoorBellRegister.All = 0;
3590 InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true;
3591 writel(InboundDoorBellRegister.All,
3592 ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
3593}
3594
3595static inline
3596void DAC960_PG_AcknowledgeHardwareMailboxStatus(void __iomem *ControllerBaseAddress)
3597{
3598 DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
3599 InboundDoorBellRegister.All = 0;
3600 InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true;
3601 writel(InboundDoorBellRegister.All,
3602 ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
3603}
3604
3605static inline
3606void DAC960_PG_GenerateInterrupt(void __iomem *ControllerBaseAddress)
3607{
3608 DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
3609 InboundDoorBellRegister.All = 0;
3610 InboundDoorBellRegister.Write.GenerateInterrupt = true;
3611 writel(InboundDoorBellRegister.All,
3612 ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
3613}
3614
3615static inline
3616void DAC960_PG_ControllerReset(void __iomem *ControllerBaseAddress)
3617{
3618 DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
3619 InboundDoorBellRegister.All = 0;
3620 InboundDoorBellRegister.Write.ControllerReset = true;
3621 writel(InboundDoorBellRegister.All,
3622 ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
3623}
3624
3625static inline
3626void DAC960_PG_MemoryMailboxNewCommand(void __iomem *ControllerBaseAddress)
3627{
3628 DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
3629 InboundDoorBellRegister.All = 0;
3630 InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true;
3631 writel(InboundDoorBellRegister.All,
3632 ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
3633}
3634
3635static inline
3636boolean DAC960_PG_HardwareMailboxFullP(void __iomem *ControllerBaseAddress)
3637{
3638 DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
3639 InboundDoorBellRegister.All =
3640 readl(ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
3641 return InboundDoorBellRegister.Read.HardwareMailboxFull;
3642}
3643
3644static inline
3645boolean DAC960_PG_InitializationInProgressP(void __iomem *ControllerBaseAddress)
3646{
3647 DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
3648 InboundDoorBellRegister.All =
3649 readl(ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
3650 return InboundDoorBellRegister.Read.InitializationInProgress;
3651}
3652
3653static inline
3654void DAC960_PG_AcknowledgeHardwareMailboxInterrupt(void __iomem *ControllerBaseAddress)
3655{
3656 DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister;
3657 OutboundDoorBellRegister.All = 0;
3658 OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
3659 writel(OutboundDoorBellRegister.All,
3660 ControllerBaseAddress + DAC960_PG_OutboundDoorBellRegisterOffset);
3661}
3662
3663static inline
3664void DAC960_PG_AcknowledgeMemoryMailboxInterrupt(void __iomem *ControllerBaseAddress)
3665{
3666 DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister;
3667 OutboundDoorBellRegister.All = 0;
3668 OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
3669 writel(OutboundDoorBellRegister.All,
3670 ControllerBaseAddress + DAC960_PG_OutboundDoorBellRegisterOffset);
3671}
3672
3673static inline
3674void DAC960_PG_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress)
3675{
3676 DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister;
3677 OutboundDoorBellRegister.All = 0;
3678 OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
3679 OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
3680 writel(OutboundDoorBellRegister.All,
3681 ControllerBaseAddress + DAC960_PG_OutboundDoorBellRegisterOffset);
3682}
3683
3684static inline
3685boolean DAC960_PG_HardwareMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
3686{
3687 DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister;
3688 OutboundDoorBellRegister.All =
3689 readl(ControllerBaseAddress + DAC960_PG_OutboundDoorBellRegisterOffset);
3690 return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable;
3691}
3692
3693static inline
3694boolean DAC960_PG_MemoryMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
3695{
3696 DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister;
3697 OutboundDoorBellRegister.All =
3698 readl(ControllerBaseAddress + DAC960_PG_OutboundDoorBellRegisterOffset);
3699 return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable;
3700}
3701
3702static inline
3703void DAC960_PG_EnableInterrupts(void __iomem *ControllerBaseAddress)
3704{
3705 DAC960_PG_InterruptMaskRegister_T InterruptMaskRegister;
3706 InterruptMaskRegister.All = 0;
3707 InterruptMaskRegister.Bits.MessageUnitInterruptMask1 = 0x3;
3708 InterruptMaskRegister.Bits.DisableInterrupts = false;
3709 InterruptMaskRegister.Bits.MessageUnitInterruptMask2 = 0x1F;
3710 writel(InterruptMaskRegister.All,
3711 ControllerBaseAddress + DAC960_PG_InterruptMaskRegisterOffset);
3712}
3713
3714static inline
3715void DAC960_PG_DisableInterrupts(void __iomem *ControllerBaseAddress)
3716{
3717 DAC960_PG_InterruptMaskRegister_T InterruptMaskRegister;
3718 InterruptMaskRegister.All = 0;
3719 InterruptMaskRegister.Bits.MessageUnitInterruptMask1 = 0x3;
3720 InterruptMaskRegister.Bits.DisableInterrupts = true;
3721 InterruptMaskRegister.Bits.MessageUnitInterruptMask2 = 0x1F;
3722 writel(InterruptMaskRegister.All,
3723 ControllerBaseAddress + DAC960_PG_InterruptMaskRegisterOffset);
3724}
3725
3726static inline
3727boolean DAC960_PG_InterruptsEnabledP(void __iomem *ControllerBaseAddress)
3728{
3729 DAC960_PG_InterruptMaskRegister_T InterruptMaskRegister;
3730 InterruptMaskRegister.All =
3731 readl(ControllerBaseAddress + DAC960_PG_InterruptMaskRegisterOffset);
3732 return !InterruptMaskRegister.Bits.DisableInterrupts;
3733}
3734
3735static inline
3736void DAC960_PG_WriteCommandMailbox(DAC960_V1_CommandMailbox_T
3737 *MemoryCommandMailbox,
3738 DAC960_V1_CommandMailbox_T
3739 *CommandMailbox)
3740{
3741 MemoryCommandMailbox->Words[1] = CommandMailbox->Words[1];
3742 MemoryCommandMailbox->Words[2] = CommandMailbox->Words[2];
3743 MemoryCommandMailbox->Words[3] = CommandMailbox->Words[3];
3744 wmb();
3745 MemoryCommandMailbox->Words[0] = CommandMailbox->Words[0];
3746 mb();
3747}
3748
3749static inline
3750void DAC960_PG_WriteHardwareMailbox(void __iomem *ControllerBaseAddress,
3751 DAC960_V1_CommandMailbox_T *CommandMailbox)
3752{
3753 writel(CommandMailbox->Words[0],
3754 ControllerBaseAddress + DAC960_PG_CommandOpcodeRegisterOffset);
3755 writel(CommandMailbox->Words[1],
3756 ControllerBaseAddress + DAC960_PG_MailboxRegister4Offset);
3757 writel(CommandMailbox->Words[2],
3758 ControllerBaseAddress + DAC960_PG_MailboxRegister8Offset);
3759 writeb(CommandMailbox->Bytes[12],
3760 ControllerBaseAddress + DAC960_PG_MailboxRegister12Offset);
3761}
3762
3763static inline DAC960_V1_CommandIdentifier_T
3764DAC960_PG_ReadStatusCommandIdentifier(void __iomem *ControllerBaseAddress)
3765{
3766 return readb(ControllerBaseAddress
3767 + DAC960_PG_StatusCommandIdentifierRegOffset);
3768}
3769
3770static inline DAC960_V1_CommandStatus_T
3771DAC960_PG_ReadStatusRegister(void __iomem *ControllerBaseAddress)
3772{
3773 return readw(ControllerBaseAddress + DAC960_PG_StatusRegisterOffset);
3774}
3775
3776static inline boolean
3777DAC960_PG_ReadErrorStatus(void __iomem *ControllerBaseAddress,
3778 unsigned char *ErrorStatus,
3779 unsigned char *Parameter0,
3780 unsigned char *Parameter1)
3781{
3782 DAC960_PG_ErrorStatusRegister_T ErrorStatusRegister;
3783 ErrorStatusRegister.All =
3784 readb(ControllerBaseAddress + DAC960_PG_ErrorStatusRegisterOffset);
3785 if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
3786 ErrorStatusRegister.Bits.ErrorStatusPending = false;
3787 *ErrorStatus = ErrorStatusRegister.All;
3788 *Parameter0 =
3789 readb(ControllerBaseAddress + DAC960_PG_CommandOpcodeRegisterOffset);
3790 *Parameter1 =
3791 readb(ControllerBaseAddress + DAC960_PG_CommandIdentifierRegisterOffset);
3792 writeb(0, ControllerBaseAddress + DAC960_PG_ErrorStatusRegisterOffset);
3793 return true;
3794}
3795
3796/*
3797 Define the DAC960 PD Series Controller Interface Register Offsets.
3798*/
3799
3800#define DAC960_PD_RegisterWindowSize 0x80
3801
3802typedef enum
3803{
3804 DAC960_PD_CommandOpcodeRegisterOffset = 0x00,
3805 DAC960_PD_CommandIdentifierRegisterOffset = 0x01,
3806 DAC960_PD_MailboxRegister2Offset = 0x02,
3807 DAC960_PD_MailboxRegister3Offset = 0x03,
3808 DAC960_PD_MailboxRegister4Offset = 0x04,
3809 DAC960_PD_MailboxRegister5Offset = 0x05,
3810 DAC960_PD_MailboxRegister6Offset = 0x06,
3811 DAC960_PD_MailboxRegister7Offset = 0x07,
3812 DAC960_PD_MailboxRegister8Offset = 0x08,
3813 DAC960_PD_MailboxRegister9Offset = 0x09,
3814 DAC960_PD_MailboxRegister10Offset = 0x0A,
3815 DAC960_PD_MailboxRegister11Offset = 0x0B,
3816 DAC960_PD_MailboxRegister12Offset = 0x0C,
3817 DAC960_PD_StatusCommandIdentifierRegOffset = 0x0D,
3818 DAC960_PD_StatusRegisterOffset = 0x0E,
3819 DAC960_PD_ErrorStatusRegisterOffset = 0x3F,
3820 DAC960_PD_InboundDoorBellRegisterOffset = 0x40,
3821 DAC960_PD_OutboundDoorBellRegisterOffset = 0x41,
3822 DAC960_PD_InterruptEnableRegisterOffset = 0x43
3823}
3824DAC960_PD_RegisterOffsets_T;
3825
3826
3827/*
3828 Define the structure of the DAC960 PD Series Inbound Door Bell Register.
3829*/
3830
3831typedef union DAC960_PD_InboundDoorBellRegister
3832{
3833 unsigned char All;
3834 struct {
3835 boolean NewCommand:1; /* Bit 0 */
3836 boolean AcknowledgeStatus:1; /* Bit 1 */
3837 boolean GenerateInterrupt:1; /* Bit 2 */
3838 boolean ControllerReset:1; /* Bit 3 */
3839 unsigned char :4; /* Bits 4-7 */
3840 } Write;
3841 struct {
3842 boolean MailboxFull:1; /* Bit 0 */
3843 boolean InitializationInProgress:1; /* Bit 1 */
3844 unsigned char :6; /* Bits 2-7 */
3845 } Read;
3846}
3847DAC960_PD_InboundDoorBellRegister_T;
3848
3849
3850/*
3851 Define the structure of the DAC960 PD Series Outbound Door Bell Register.
3852*/
3853
3854typedef union DAC960_PD_OutboundDoorBellRegister
3855{
3856 unsigned char All;
3857 struct {
3858 boolean AcknowledgeInterrupt:1; /* Bit 0 */
3859 unsigned char :7; /* Bits 1-7 */
3860 } Write;
3861 struct {
3862 boolean StatusAvailable:1; /* Bit 0 */
3863 unsigned char :7; /* Bits 1-7 */
3864 } Read;
3865}
3866DAC960_PD_OutboundDoorBellRegister_T;
3867
3868
3869/*
3870 Define the structure of the DAC960 PD Series Interrupt Enable Register.
3871*/
3872
3873typedef union DAC960_PD_InterruptEnableRegister
3874{
3875 unsigned char All;
3876 struct {
3877 boolean EnableInterrupts:1; /* Bit 0 */
3878 unsigned char :7; /* Bits 1-7 */
3879 } Bits;
3880}
3881DAC960_PD_InterruptEnableRegister_T;
3882
3883
3884/*
3885 Define the structure of the DAC960 PD Series Error Status Register.
3886*/
3887
3888typedef union DAC960_PD_ErrorStatusRegister
3889{
3890 unsigned char All;
3891 struct {
3892 unsigned int :2; /* Bits 0-1 */
3893 boolean ErrorStatusPending:1; /* Bit 2 */
3894 unsigned int :5; /* Bits 3-7 */
3895 } Bits;
3896}
3897DAC960_PD_ErrorStatusRegister_T;
3898
3899
3900/*
3901 Define inline functions to provide an abstraction for reading and writing the
3902 DAC960 PD Series Controller Interface Registers.
3903*/
3904
3905static inline
3906void DAC960_PD_NewCommand(void __iomem *ControllerBaseAddress)
3907{
3908 DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
3909 InboundDoorBellRegister.All = 0;
3910 InboundDoorBellRegister.Write.NewCommand = true;
3911 writeb(InboundDoorBellRegister.All,
3912 ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset);
3913}
3914
3915static inline
3916void DAC960_PD_AcknowledgeStatus(void __iomem *ControllerBaseAddress)
3917{
3918 DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
3919 InboundDoorBellRegister.All = 0;
3920 InboundDoorBellRegister.Write.AcknowledgeStatus = true;
3921 writeb(InboundDoorBellRegister.All,
3922 ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset);
3923}
3924
3925static inline
3926void DAC960_PD_GenerateInterrupt(void __iomem *ControllerBaseAddress)
3927{
3928 DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
3929 InboundDoorBellRegister.All = 0;
3930 InboundDoorBellRegister.Write.GenerateInterrupt = true;
3931 writeb(InboundDoorBellRegister.All,
3932 ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset);
3933}
3934
3935static inline
3936void DAC960_PD_ControllerReset(void __iomem *ControllerBaseAddress)
3937{
3938 DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
3939 InboundDoorBellRegister.All = 0;
3940 InboundDoorBellRegister.Write.ControllerReset = true;
3941 writeb(InboundDoorBellRegister.All,
3942 ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset);
3943}
3944
3945static inline
3946boolean DAC960_PD_MailboxFullP(void __iomem *ControllerBaseAddress)
3947{
3948 DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
3949 InboundDoorBellRegister.All =
3950 readb(ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset);
3951 return InboundDoorBellRegister.Read.MailboxFull;
3952}
3953
3954static inline
3955boolean DAC960_PD_InitializationInProgressP(void __iomem *ControllerBaseAddress)
3956{
3957 DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
3958 InboundDoorBellRegister.All =
3959 readb(ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset);
3960 return InboundDoorBellRegister.Read.InitializationInProgress;
3961}
3962
3963static inline
3964void DAC960_PD_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress)
3965{
3966 DAC960_PD_OutboundDoorBellRegister_T OutboundDoorBellRegister;
3967 OutboundDoorBellRegister.All = 0;
3968 OutboundDoorBellRegister.Write.AcknowledgeInterrupt = true;
3969 writeb(OutboundDoorBellRegister.All,
3970 ControllerBaseAddress + DAC960_PD_OutboundDoorBellRegisterOffset);
3971}
3972
3973static inline
3974boolean DAC960_PD_StatusAvailableP(void __iomem *ControllerBaseAddress)
3975{
3976 DAC960_PD_OutboundDoorBellRegister_T OutboundDoorBellRegister;
3977 OutboundDoorBellRegister.All =
3978 readb(ControllerBaseAddress + DAC960_PD_OutboundDoorBellRegisterOffset);
3979 return OutboundDoorBellRegister.Read.StatusAvailable;
3980}
3981
3982static inline
3983void DAC960_PD_EnableInterrupts(void __iomem *ControllerBaseAddress)
3984{
3985 DAC960_PD_InterruptEnableRegister_T InterruptEnableRegister;
3986 InterruptEnableRegister.All = 0;
3987 InterruptEnableRegister.Bits.EnableInterrupts = true;
3988 writeb(InterruptEnableRegister.All,
3989 ControllerBaseAddress + DAC960_PD_InterruptEnableRegisterOffset);
3990}
3991
3992static inline
3993void DAC960_PD_DisableInterrupts(void __iomem *ControllerBaseAddress)
3994{
3995 DAC960_PD_InterruptEnableRegister_T InterruptEnableRegister;
3996 InterruptEnableRegister.All = 0;
3997 InterruptEnableRegister.Bits.EnableInterrupts = false;
3998 writeb(InterruptEnableRegister.All,
3999 ControllerBaseAddress + DAC960_PD_InterruptEnableRegisterOffset);
4000}
4001
4002static inline
4003boolean DAC960_PD_InterruptsEnabledP(void __iomem *ControllerBaseAddress)
4004{
4005 DAC960_PD_InterruptEnableRegister_T InterruptEnableRegister;
4006 InterruptEnableRegister.All =
4007 readb(ControllerBaseAddress + DAC960_PD_InterruptEnableRegisterOffset);
4008 return InterruptEnableRegister.Bits.EnableInterrupts;
4009}
4010
4011static inline
4012void DAC960_PD_WriteCommandMailbox(void __iomem *ControllerBaseAddress,
4013 DAC960_V1_CommandMailbox_T *CommandMailbox)
4014{
4015 writel(CommandMailbox->Words[0],
4016 ControllerBaseAddress + DAC960_PD_CommandOpcodeRegisterOffset);
4017 writel(CommandMailbox->Words[1],
4018 ControllerBaseAddress + DAC960_PD_MailboxRegister4Offset);
4019 writel(CommandMailbox->Words[2],
4020 ControllerBaseAddress + DAC960_PD_MailboxRegister8Offset);
4021 writeb(CommandMailbox->Bytes[12],
4022 ControllerBaseAddress + DAC960_PD_MailboxRegister12Offset);
4023}
4024
4025static inline DAC960_V1_CommandIdentifier_T
4026DAC960_PD_ReadStatusCommandIdentifier(void __iomem *ControllerBaseAddress)
4027{
4028 return readb(ControllerBaseAddress
4029 + DAC960_PD_StatusCommandIdentifierRegOffset);
4030}
4031
4032static inline DAC960_V1_CommandStatus_T
4033DAC960_PD_ReadStatusRegister(void __iomem *ControllerBaseAddress)
4034{
4035 return readw(ControllerBaseAddress + DAC960_PD_StatusRegisterOffset);
4036}
4037
4038static inline boolean
4039DAC960_PD_ReadErrorStatus(void __iomem *ControllerBaseAddress,
4040 unsigned char *ErrorStatus,
4041 unsigned char *Parameter0,
4042 unsigned char *Parameter1)
4043{
4044 DAC960_PD_ErrorStatusRegister_T ErrorStatusRegister;
4045 ErrorStatusRegister.All =
4046 readb(ControllerBaseAddress + DAC960_PD_ErrorStatusRegisterOffset);
4047 if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
4048 ErrorStatusRegister.Bits.ErrorStatusPending = false;
4049 *ErrorStatus = ErrorStatusRegister.All;
4050 *Parameter0 =
4051 readb(ControllerBaseAddress + DAC960_PD_CommandOpcodeRegisterOffset);
4052 *Parameter1 =
4053 readb(ControllerBaseAddress + DAC960_PD_CommandIdentifierRegisterOffset);
4054 writeb(0, ControllerBaseAddress + DAC960_PD_ErrorStatusRegisterOffset);
4055 return true;
4056}
4057
4058static inline void DAC960_P_To_PD_TranslateEnquiry(void *Enquiry)
4059{
4060 memcpy(Enquiry + 132, Enquiry + 36, 64);
4061 memset(Enquiry + 36, 0, 96);
4062}
4063
4064static inline void DAC960_P_To_PD_TranslateDeviceState(void *DeviceState)
4065{
4066 memcpy(DeviceState + 2, DeviceState + 3, 1);
4067 memcpy(DeviceState + 4, DeviceState + 5, 2);
4068 memcpy(DeviceState + 6, DeviceState + 8, 4);
4069}
4070
4071static inline
4072void DAC960_PD_To_P_TranslateReadWriteCommand(DAC960_V1_CommandMailbox_T
4073 *CommandMailbox)
4074{
4075 int LogicalDriveNumber = CommandMailbox->Type5.LD.LogicalDriveNumber;
4076 CommandMailbox->Bytes[3] &= 0x7;
4077 CommandMailbox->Bytes[3] |= CommandMailbox->Bytes[7] << 6;
4078 CommandMailbox->Bytes[7] = LogicalDriveNumber;
4079}
4080
4081static inline
4082void DAC960_P_To_PD_TranslateReadWriteCommand(DAC960_V1_CommandMailbox_T
4083 *CommandMailbox)
4084{
4085 int LogicalDriveNumber = CommandMailbox->Bytes[7];
4086 CommandMailbox->Bytes[7] = CommandMailbox->Bytes[3] >> 6;
4087 CommandMailbox->Bytes[3] &= 0x7;
4088 CommandMailbox->Bytes[3] |= LogicalDriveNumber << 3;
4089}
4090
4091
4092/*
4093 Define prototypes for the forward referenced DAC960 Driver Internal Functions.
4094*/
4095
4096static void DAC960_FinalizeController(DAC960_Controller_T *);
4097static void DAC960_V1_QueueReadWriteCommand(DAC960_Command_T *);
4098static void DAC960_V2_QueueReadWriteCommand(DAC960_Command_T *);
4099static void DAC960_RequestFunction(struct request_queue *);
4100static irqreturn_t DAC960_BA_InterruptHandler(int, void *, struct pt_regs *);
4101static irqreturn_t DAC960_LP_InterruptHandler(int, void *, struct pt_regs *);
4102static irqreturn_t DAC960_LA_InterruptHandler(int, void *, struct pt_regs *);
4103static irqreturn_t DAC960_PG_InterruptHandler(int, void *, struct pt_regs *);
4104static irqreturn_t DAC960_PD_InterruptHandler(int, void *, struct pt_regs *);
4105static irqreturn_t DAC960_P_InterruptHandler(int, void *, struct pt_regs *);
4106static void DAC960_V1_QueueMonitoringCommand(DAC960_Command_T *);
4107static void DAC960_V2_QueueMonitoringCommand(DAC960_Command_T *);
4108static void DAC960_MonitoringTimerFunction(unsigned long);
4109static void DAC960_Message(DAC960_MessageLevel_T, unsigned char *,
4110 DAC960_Controller_T *, ...);
4111static void DAC960_CreateProcEntries(DAC960_Controller_T *);
4112static void DAC960_DestroyProcEntries(DAC960_Controller_T *);
4113
4114#endif /* DAC960_DriverVersion */
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
new file mode 100644
index 000000000000..e83a1e2e8b15
--- /dev/null
+++ b/drivers/block/Kconfig
@@ -0,0 +1,509 @@
1#
2# Block device driver configuration
3#
4
5menu "Block devices"
6
7config BLK_DEV_FD
8 tristate "Normal floppy disk support"
9 depends on (!ARCH_S390 && !M68K && !IA64 && !UML) || Q40 || (SUN3X && BROKEN)
10 ---help---
11 If you want to use the floppy disk drive(s) of your PC under Linux,
12 say Y. Information about this driver, especially important for IBM
13 Thinkpad users, is contained in <file:Documentation/floppy.txt>.
14 That file also contains the location of the Floppy driver FAQ as
15 well as location of the fdutils package used to configure additional
16 parameters of the driver at run time.
17
18 To compile this driver as a module, choose M here: the
19 module will be called floppy.
20
21config AMIGA_FLOPPY
22 tristate "Amiga floppy support"
23 depends on AMIGA
24
25config ATARI_FLOPPY
26 tristate "Atari floppy support"
27 depends on ATARI
28
29config BLK_DEV_SWIM_IOP
30 bool "Macintosh IIfx/Quadra 900/Quadra 950 floppy support (EXPERIMENTAL)"
31 depends on MAC && EXPERIMENTAL && BROKEN
32 help
33 Say Y here to support the SWIM (Super Woz Integrated Machine) IOP
34 floppy controller on the Macintosh IIfx and Quadra 900/950.
35
36config MAC_FLOPPY
37 tristate "Support for PowerMac floppy"
38 depends on PPC_PMAC && !PPC_PMAC64
39 help
40 If you have a SWIM-3 (Super Woz Integrated Machine 3; from Apple)
41 floppy controller, say Y here. Most commonly found in PowerMacs.
42
43config BLK_DEV_PS2
44 tristate "PS/2 ESDI hard disk support"
45 depends on MCA && MCA_LEGACY && BROKEN
46 help
47 Say Y here if you have a PS/2 machine with a MCA bus and an ESDI
48 hard disk.
49
50 To compile this driver as a module, choose M here: the
51 module will be called ps2esdi.
52
53config AMIGA_Z2RAM
54 tristate "Amiga Zorro II ramdisk support"
55 depends on ZORRO
56 help
57 This enables support for using Chip RAM and Zorro II RAM as a
58 ramdisk or as a swap partition. Say Y if you want to include this
59 driver in the kernel.
60
61 To compile this driver as a module, choose M here: the
62 module will be called z2ram.
63
64config ATARI_ACSI
65 tristate "Atari ACSI support"
66 depends on ATARI && BROKEN
67 ---help---
68 This enables support for the Atari ACSI interface. The driver
69 supports hard disks and CD-ROMs, which have 512-byte sectors, or can
70 be switched to that mode. Due to the ACSI command format, only disks
71 up to 1 GB are supported. Special support for certain ACSI to SCSI
72 adapters, which could relax that, isn't included yet. The ACSI
73 driver is also the basis for certain other drivers for devices
74 attached to the ACSI bus: Atari SLM laser printer, BioNet-100
75 Ethernet, and PAMsNet Ethernet. If you want to use one of these
76 devices, you need ACSI support, too.
77
78 To compile this driver as a module, choose M here: the
79 module will be called acsi.
80
81comment "Some devices (e.g. CD jukebox) support multiple LUNs"
82 depends on ATARI && ATARI_ACSI
83
84config ACSI_MULTI_LUN
85 bool "Probe all LUNs on each ACSI device"
86 depends on ATARI_ACSI
87 help
88 If you have a ACSI device that supports more than one LUN (Logical
89 Unit Number), e.g. a CD jukebox, you should say Y here so that all
90 will be found by the ACSI driver. An ACSI device with multiple LUNs
91 acts logically like multiple ACSI devices. The vast majority of ACSI
92 devices have only one LUN, and so most people can say N here and
93 should in fact do so, because it is safer.
94
95config ATARI_SLM
96 tristate "Atari SLM laser printer support"
97 depends on ATARI && ATARI_ACSI!=n
98 help
99 If you have an Atari SLM laser printer, say Y to include support for
100 it in the kernel. Otherwise, say N. This driver is also available as
101 a module ( = code which can be inserted in and removed from the
102 running kernel whenever you want). The module will be called
103 acsi_slm. Be warned: the driver needs much ST-RAM and can cause
104 problems due to that fact!
105
106config BLK_DEV_XD
107 tristate "XT hard disk support"
108 depends on ISA
109 help
110 Very old 8 bit hard disk controllers used in the IBM XT computer
111 will be supported if you say Y here.
112
113 To compile this driver as a module, choose M here: the
114 module will be called xd.
115
116 It's pretty unlikely that you have one of these: say N.
117
118config PARIDE
119 tristate "Parallel port IDE device support"
120 depends on PARPORT
121 ---help---
122 There are many external CD-ROM and disk devices that connect through
123 your computer's parallel port. Most of them are actually IDE devices
124 using a parallel port IDE adapter. This option enables the PARIDE
125 subsystem which contains drivers for many of these external drives.
126 Read <file:Documentation/paride.txt> for more information.
127
128 If you have said Y to the "Parallel-port support" configuration
129 option, you may share a single port between your printer and other
130 parallel port devices. Answer Y to build PARIDE support into your
131 kernel, or M if you would like to build it as a loadable module. If
132 your parallel port support is in a loadable module, you must build
133 PARIDE as a module. If you built PARIDE support into your kernel,
134 you may still build the individual protocol modules and high-level
135 drivers as loadable modules. If you build this support as a module,
136 it will be called paride.
137
138 To use the PARIDE support, you must say Y or M here and also to at
139 least one high-level driver (e.g. "Parallel port IDE disks",
140 "Parallel port ATAPI CD-ROMs", "Parallel port ATAPI disks" etc.) and
141 to at least one protocol driver (e.g. "ATEN EH-100 protocol",
142 "MicroSolutions backpack protocol", "DataStor Commuter protocol"
143 etc.).
144
145source "drivers/block/paride/Kconfig"
146
147config BLK_CPQ_DA
148 tristate "Compaq SMART2 support"
149 depends on PCI
150 help
151 This is the driver for Compaq Smart Array controllers. Everyone
152 using these boards should say Y here. See the file
153 <file:Documentation/cpqarray.txt> for the current list of boards
154 supported by this driver, and for further information on the use of
155 this driver.
156
157config BLK_CPQ_CISS_DA
158 tristate "Compaq Smart Array 5xxx support"
159 depends on PCI
160 help
161 This is the driver for Compaq Smart Array 5xxx controllers.
162 Everyone using these boards should say Y here.
163 See <file:Documentation/cciss.txt> for the current list of
164 boards supported by this driver, and for further information
165 on the use of this driver.
166
167config CISS_SCSI_TAPE
168 bool "SCSI tape drive support for Smart Array 5xxx"
169 depends on BLK_CPQ_CISS_DA && SCSI && PROC_FS
170 help
171 When enabled (Y), this option allows SCSI tape drives and SCSI medium
172 changers (tape robots) to be accessed via a Compaq 5xxx array
173 controller. (See <file:Documentation/cciss.txt> for more details.)
174
175 "SCSI support" and "SCSI tape support" must also be enabled for this
176 option to work.
177
178 When this option is disabled (N), the SCSI portion of the driver
179 is not compiled.
180
181config BLK_DEV_DAC960
182 tristate "Mylex DAC960/DAC1100 PCI RAID Controller support"
183 depends on PCI
184 help
185 This driver adds support for the Mylex DAC960, AcceleRAID, and
186 eXtremeRAID PCI RAID controllers. See the file
187 <file:Documentation/README.DAC960> for further information about
188 this driver.
189
190 To compile this driver as a module, choose M here: the
191 module will be called DAC960.
192
193config BLK_DEV_UMEM
194 tristate "Micro Memory MM5415 Battery Backed RAM support (EXPERIMENTAL)"
195 depends on PCI && EXPERIMENTAL
196 ---help---
197 Saying Y here will include support for the MM5415 family of
198 battery backed (Non-volatile) RAM cards.
199 <http://www.umem.com/>
200
201 The cards appear as block devices that can be partitioned into
202 as many as 15 partitions.
203
204 To compile this driver as a module, choose M here: the
205 module will be called umem.
206
207 The umem driver has not yet been allocated a MAJOR number, so
208 one is chosen dynamically. Use "devfs" or look in /proc/devices
209 for the device number
210
211config BLK_DEV_UBD
212 bool "Virtual block device"
213 depends on UML
214 ---help---
215 The User-Mode Linux port includes a driver called UBD which will let
216 you access arbitrary files on the host computer as block devices.
217 Unless you know that you do not need such virtual block devices say
218 Y here.
219
220config BLK_DEV_UBD_SYNC
221 bool "Always do synchronous disk IO for UBD"
222 depends on BLK_DEV_UBD
223 ---help---
224 Writes to the virtual block device are not immediately written to the
225 host's disk; this may cause problems if, for example, the User-Mode
226 Linux 'Virtual Machine' uses a journalling filesystem and the host
227 computer crashes.
228
229 Synchronous operation (i.e. always writing data to the host's disk
230 immediately) is configurable on a per-UBD basis by using a special
231 kernel command line option. Alternatively, you can say Y here to
232 turn on synchronous operation by default for all block devices.
233
234 If you're running a journalling file system (like reiserfs, for
235 example) in your virtual machine, you will want to say Y here. If
236 you care for the safety of the data in your virtual machine, Y is a
237 wise choice too. In all other cases (for example, if you're just
238 playing around with User-Mode Linux) you can choose N.
239
240config BLK_DEV_COW_COMMON
241 bool
242 default BLK_DEV_UBD
243
244config MMAPPER
245 tristate "Example IO memory driver (BROKEN)"
246 depends on UML && BROKEN
247 ---help---
248 The User-Mode Linux port can provide support for IO Memory
249 emulation with this option. This allows a host file to be
250 specified as an I/O region on the kernel command line. That file
251 will be mapped into UML's kernel address space where a driver can
252 locate it and do whatever it wants with the memory, including
253 providing an interface to it for UML processes to use.
254
255 For more information, see
256 <http://user-mode-linux.sourceforge.net/iomem.html>.
257
258 If you'd like to be able to provide a simulated IO port space for
259 User-Mode Linux processes, say Y. If unsure, say N.
260
261config BLK_DEV_LOOP
262 tristate "Loopback device support"
263 ---help---
264 Saying Y here will allow you to use a regular file as a block
265 device; you can then create a file system on that block device and
266 mount it just as you would mount other block devices such as hard
267 drive partitions, CD-ROM drives or floppy drives. The loop devices
268 are block special device files with major number 7 and typically
269 called /dev/loop0, /dev/loop1 etc.
270
271 This is useful if you want to check an ISO 9660 file system before
272 burning the CD, or if you want to use floppy images without first
273 writing them to floppy. Furthermore, some Linux distributions avoid
274 the need for a dedicated Linux partition by keeping their complete
275 root file system inside a DOS FAT file using this loop device
276 driver.
277
278 To use the loop device, you need the losetup utility, found in the
279 util-linux package, see
280 <ftp://ftp.kernel.org/pub/linux/utils/util-linux/>.
281
282 The loop device driver can also be used to "hide" a file system in
283 a disk partition, floppy, or regular file, either using encryption
284 (scrambling the data) or steganography (hiding the data in the low
285 bits of, say, a sound file). This is also safe if the file resides
286 on a remote file server.
287
288 There are several ways of encrypting disks. Some of these require
289 kernel patches. The vanilla kernel offers the cryptoloop option
290 and a Device Mapper target (which is superior, as it supports all
291 file systems). If you want to use the cryptoloop, say Y to both
292 LOOP and CRYPTOLOOP, and make sure you have a recent (version 2.12
293 or later) version of util-linux. Additionally, be aware that
294 the cryptoloop is not safe for storing journaled filesystems.
295
296 Note that this loop device has nothing to do with the loopback
297 device used for network connections from the machine to itself.
298
299 To compile this driver as a module, choose M here: the
300 module will be called loop.
301
302 Most users will answer N here.
303
304config BLK_DEV_CRYPTOLOOP
305 tristate "Cryptoloop Support"
306 select CRYPTO
307 depends on BLK_DEV_LOOP
308 ---help---
309 Say Y here if you want to be able to use the ciphers that are
310 provided by the CryptoAPI as loop transformation. This might be
311 used as hard disk encryption.
312
313 WARNING: This device is not safe for journaled file systems like
314 ext3 or Reiserfs. Please use the Device Mapper crypto module
315 instead, which can be configured to be on-disk compatible with the
316 cryptoloop device.
317
318config BLK_DEV_NBD
319 tristate "Network block device support"
320 depends on NET
321 ---help---
322 Saying Y here will allow your computer to be a client for network
323 block devices, i.e. it will be able to use block devices exported by
324 servers (mount file systems on them etc.). Communication between
325 client and server works over TCP/IP networking, but to the client
326 program this is hidden: it looks like a regular local file access to
327 a block device special file such as /dev/nd0.
328
329 Network block devices also allows you to run a block-device in
330 userland (making server and client physically the same computer,
331 communicating using the loopback network device).
332
333 Read <file:Documentation/nbd.txt> for more information, especially
334 about where to find the server code, which runs in user space and
335 does not need special kernel support.
336
337 Note that this has nothing to do with the network file systems NFS
338 or Coda; you can say N here even if you intend to use NFS or Coda.
339
340 To compile this driver as a module, choose M here: the
341 module will be called nbd.
342
343 If unsure, say N.
344
345config BLK_DEV_SX8
346 tristate "Promise SATA SX8 support"
347 depends on PCI
348 ---help---
349 Saying Y or M here will enable support for the
350 Promise SATA SX8 controllers.
351
352 Use devices /dev/sx8/$N and /dev/sx8/$Np$M.
353
354config BLK_DEV_UB
355 tristate "Low Performance USB Block driver"
356 depends on USB
357 help
358 This driver supports certain USB attached storage devices
359 such as flash keys.
360
361 Warning: Enabling this cripples the usb-storage driver.
362
363 If unsure, say N.
364
365config BLK_DEV_RAM
366 tristate "RAM disk support"
367 ---help---
368 Saying Y here will allow you to use a portion of your RAM memory as
369 a block device, so that you can make file systems on it, read and
370 write to it and do all the other things that you can do with normal
371 block devices (such as hard drives). It is usually used to load and
372 store a copy of a minimal root file system off of a floppy into RAM
373 during the initial install of Linux.
374
375 Note that the kernel command line option "ramdisk=XX" is now
376 obsolete. For details, read <file:Documentation/ramdisk.txt>.
377
378 To compile this driver as a module, choose M here: the
379 module will be called rd.
380
381 Most normal users won't need the RAM disk functionality, and can
382 thus say N here.
383
384config BLK_DEV_RAM_COUNT
385 int "Default number of RAM disks" if BLK_DEV_RAM
386 default "16"
387 help
388 The default value is 16 RAM disks. Change this if you know what
389 are doing. If you boot from a filesystem that needs to be extracted
390 in memory, you will need at least one RAM disk (e.g. root on cramfs).
391
392config BLK_DEV_RAM_SIZE
393 int "Default RAM disk size (kbytes)"
394 depends on BLK_DEV_RAM
395 default "4096"
396 help
397 The default value is 4096 kilobytes. Only change this if you know
398 what are you doing. If you are using IBM S/390, then set this to
399 8192.
400
401config BLK_DEV_INITRD
402 bool "Initial RAM disk (initrd) support"
403 depends on BLK_DEV_RAM=y
404 help
405 The initial RAM disk is a RAM disk that is loaded by the boot loader
406 (loadlin or lilo) and that is mounted as root before the normal boot
407 procedure. It is typically used to load modules needed to mount the
408 "real" root file system, etc. See <file:Documentation/initrd.txt>
409 for details.
410
411config INITRAMFS_SOURCE
412 string "Initramfs source file(s)"
413 default ""
414 help
415 This can be either a single cpio archive with a .cpio suffix or a
416 space-separated list of directories and files for building the
417 initramfs image. A cpio archive should contain a filesystem archive
418 to be used as an initramfs image. Directories should contain a
419 filesystem layout to be included in the initramfs image. Files
420 should contain entries according to the format described by the
421 "usr/gen_init_cpio" program in the kernel tree.
422
423 When multiple directories and files are specified then the
424 initramfs image will be the aggregate of all of them.
425
426 See <file:Documentation/early-userspace/README for more details.
427
428 If you are not sure, leave it blank.
429
430config INITRAMFS_ROOT_UID
431 int "User ID to map to 0 (user root)"
432 depends on INITRAMFS_SOURCE!=""
433 default "0"
434 help
435 This setting is only meaningful if the INITRAMFS_SOURCE is
436 contains a directory. Setting this user ID (UID) to something
437 other than "0" will cause all files owned by that UID to be
438 owned by user root in the initial ramdisk image.
439
440 If you are not sure, leave it set to "0".
441
442config INITRAMFS_ROOT_GID
443 int "Group ID to map to 0 (group root)"
444 depends on INITRAMFS_SOURCE!=""
445 default "0"
446 help
447 This setting is only meaningful if the INITRAMFS_SOURCE is
448 contains a directory. Setting this group ID (GID) to something
449 other than "0" will cause all files owned by that GID to be
450 owned by group root in the initial ramdisk image.
451
452 If you are not sure, leave it set to "0".
453
454#XXX - it makes sense to enable this only for 32-bit subarch's, not for x86_64
455#for instance.
456config LBD
457 bool "Support for Large Block Devices"
458 depends on X86 || MIPS32 || PPC32 || ARCH_S390_31 || SUPERH || UML
459 help
460 Say Y here if you want to attach large (bigger than 2TB) discs to
461 your machine, or if you want to have a raid or loopback device
462 bigger than 2TB. Otherwise say N.
463
464config CDROM_PKTCDVD
465 tristate "Packet writing on CD/DVD media"
466 depends on !UML
467 help
468 If you have a CDROM drive that supports packet writing, say Y to
469 include preliminary support. It should work with any MMC/Mt Fuji
470 compliant ATAPI or SCSI drive, which is just about any newer CD
471 writer.
472
473 Currently only writing to CD-RW, DVD-RW and DVD+RW discs is possible.
474 DVD-RW disks must be in restricted overwrite mode.
475
476 To compile this driver as a module, choose M here: the
477 module will be called pktcdvd.
478
479config CDROM_PKTCDVD_BUFFERS
480 int "Free buffers for data gathering"
481 depends on CDROM_PKTCDVD
482 default "8"
483 help
484 This controls the maximum number of active concurrent packets. More
485 concurrent packets can increase write performance, but also require
486 more memory. Each concurrent packet will require approximately 64Kb
487 of non-swappable kernel memory, memory which will be allocated at
488 pktsetup time.
489
490config CDROM_PKTCDVD_WCACHE
491 bool "Enable write caching"
492 depends on CDROM_PKTCDVD
493 help
494 If enabled, write caching will be set for the CD-R/W device. For now
495 this option is dangerous unless the CD-RW media is known good, as we
496 don't do deferred write error handling yet.
497
498source "drivers/s390/block/Kconfig"
499
500source "drivers/block/Kconfig.iosched"
501
502config ATA_OVER_ETH
503 tristate "ATA over Ethernet support"
504 depends on NET
505 help
506 This driver provides Support for ATA over Ethernet block
507 devices like the Coraid EtherDrive (R) Storage Blade.
508
509endmenu
diff --git a/drivers/block/Kconfig.iosched b/drivers/block/Kconfig.iosched
new file mode 100644
index 000000000000..6070a480600b
--- /dev/null
+++ b/drivers/block/Kconfig.iosched
@@ -0,0 +1,41 @@
1
2menu "IO Schedulers"
3
4config IOSCHED_NOOP
5 bool
6 default y
7 ---help---
8 The no-op I/O scheduler is a minimal scheduler that does basic merging
9 and sorting. Its main uses include non-disk based block devices like
10 memory devices, and specialised software or hardware environments
11 that do their own scheduling and require only minimal assistance from
12 the kernel.
13
14config IOSCHED_AS
15 tristate "Anticipatory I/O scheduler"
16 default y
17 ---help---
18 The anticipatory I/O scheduler is the default disk scheduler. It is
19 generally a good choice for most environments, but is quite large and
20 complex when compared to the deadline I/O scheduler, it can also be
21 slower in some cases especially some database loads.
22
23config IOSCHED_DEADLINE
24 tristate "Deadline I/O scheduler"
25 default y
26 ---help---
27 The deadline I/O scheduler is simple and compact, and is often as
28 good as the anticipatory I/O scheduler, and in some database
29 workloads, better. In the case of a single process performing I/O to
30 a disk at any one time, its behaviour is almost identical to the
31 anticipatory I/O scheduler and so is a good choice.
32
33config IOSCHED_CFQ
34 tristate "CFQ I/O scheduler"
35 default y
36 ---help---
37 The CFQ I/O scheduler tries to distribute bandwidth equally
38 among all processes in the system. It should provide a fair
39 working environment, suitable for desktop systems.
40
41endmenu
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
new file mode 100644
index 000000000000..1cf09a1c065b
--- /dev/null
+++ b/drivers/block/Makefile
@@ -0,0 +1,47 @@
1#
2# Makefile for the kernel block device drivers.
3#
4# 12 June 2000, Christoph Hellwig <hch@infradead.org>
5# Rewritten to use lists instead of if-statements.
6#
7# Note : at this point, these files are compiled on all systems.
8# In the future, some of these should be built conditionally.
9#
10
11#
12# NOTE that ll_rw_blk.c must come early in linkage order - it starts the
13# kblockd threads
14#
15
16obj-y := elevator.o ll_rw_blk.o ioctl.o genhd.o scsi_ioctl.o
17
18obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
19obj-$(CONFIG_IOSCHED_AS) += as-iosched.o
20obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
21obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
22obj-$(CONFIG_MAC_FLOPPY) += swim3.o
23obj-$(CONFIG_BLK_DEV_FD) += floppy.o
24obj-$(CONFIG_BLK_DEV_FD98) += floppy98.o
25obj-$(CONFIG_AMIGA_FLOPPY) += amiflop.o
26obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o
27obj-$(CONFIG_BLK_DEV_SWIM_IOP) += swim_iop.o
28obj-$(CONFIG_ATARI_ACSI) += acsi.o
29obj-$(CONFIG_ATARI_SLM) += acsi_slm.o
30obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o
31obj-$(CONFIG_BLK_DEV_RAM) += rd.o
32obj-$(CONFIG_BLK_DEV_LOOP) += loop.o
33obj-$(CONFIG_BLK_DEV_PS2) += ps2esdi.o
34obj-$(CONFIG_BLK_DEV_XD) += xd.o
35obj-$(CONFIG_BLK_CPQ_DA) += cpqarray.o
36obj-$(CONFIG_BLK_CPQ_CISS_DA) += cciss.o
37obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o
38obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
39
40obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
41obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
42obj-$(CONFIG_BLK_DEV_CRYPTOLOOP) += cryptoloop.o
43
44obj-$(CONFIG_VIODASD) += viodasd.o
45obj-$(CONFIG_BLK_DEV_SX8) += sx8.o
46obj-$(CONFIG_BLK_DEV_UB) += ub.o
47
diff --git a/drivers/block/acsi.c b/drivers/block/acsi.c
new file mode 100644
index 000000000000..ce933de48084
--- /dev/null
+++ b/drivers/block/acsi.c
@@ -0,0 +1,1829 @@
1/*
2 * acsi.c -- Device driver for Atari ACSI hard disks
3 *
4 * Copyright 1994 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de>
5 *
6 * Some parts are based on hd.c by Linus Torvalds
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file COPYING in the main directory of this archive for
10 * more details.
11 *
12 */
13
14/*
15 * Still to in this file:
16 * - If a command ends with an error status (!= 0), the following
17 * REQUEST SENSE commands (4 to fill the ST-DMA FIFO) are done by
18 * polling the _IRQ signal (not interrupt-driven). This should be
19 * avoided in future because it takes up a non-neglectible time in
20 * the interrupt service routine while interrupts are disabled.
21 * Maybe a timer interrupt will get lost :-(
22 */
23
24/*
25 * General notes:
26 *
27 * - All ACSI devices (disks, CD-ROMs, ...) use major number 28.
28 * Minors are organized like it is with SCSI: The upper 4 bits
29 * identify the device, the lower 4 bits the partition.
30 * The device numbers (the upper 4 bits) are given in the same
31 * order as the devices are found on the bus.
32 * - Up to 8 LUNs are supported for each target (if CONFIG_ACSI_MULTI_LUN
33 * is defined), but only a total of 16 devices (due to minor
34 * numbers...). Note that Atari allows only a maximum of 4 targets
35 * (i.e. controllers, not devices) on the ACSI bus!
36 * - A optimizing scheme similar to SCSI scatter-gather is implemented.
37 * - Removable media are supported. After a medium change to device
38 * is reinitialized (partition check etc.). Also, if the device
39 * knows the PREVENT/ALLOW MEDIUM REMOVAL command, the door should
40 * be locked and unlocked when mounting the first or unmounting the
41 * last filesystem on the device. The code is untested, because I
42 * don't have a removable hard disk.
43 *
44 */
45
46#include <linux/config.h>
47#include <linux/module.h>
48#include <linux/errno.h>
49#include <linux/signal.h>
50#include <linux/sched.h>
51#include <linux/timer.h>
52#include <linux/fs.h>
53#include <linux/kernel.h>
54#include <linux/genhd.h>
55#include <linux/delay.h>
56#include <linux/mm.h>
57#include <linux/major.h>
58#include <linux/slab.h>
59#include <linux/interrupt.h>
60#include <scsi/scsi.h> /* for SCSI_IOCTL_GET_IDLUN */
61typedef void Scsi_Device; /* hack to avoid including scsi.h */
62#include <scsi/scsi_ioctl.h>
63#include <linux/hdreg.h> /* for HDIO_GETGEO */
64#include <linux/blkpg.h>
65#include <linux/buffer_head.h>
66#include <linux/blkdev.h>
67
68#include <asm/setup.h>
69#include <asm/pgtable.h>
70#include <asm/system.h>
71#include <asm/uaccess.h>
72#include <asm/atarihw.h>
73#include <asm/atariints.h>
74#include <asm/atari_acsi.h>
75#include <asm/atari_stdma.h>
76#include <asm/atari_stram.h>
77
78static void (*do_acsi)(void) = NULL;
79static struct request_queue *acsi_queue;
80#define QUEUE (acsi_queue)
81#define CURRENT elv_next_request(acsi_queue)
82
83#define DEBUG
84#undef DEBUG_DETECT
85#undef NO_WRITE
86
87#define MAX_ERRORS 8 /* Max read/write errors/sector */
88#define MAX_LUN 8 /* Max LUNs per target */
89#define MAX_DEV 16
90
91#define ACSI_BUFFER_SIZE (16*1024) /* "normal" ACSI buffer size */
92#define ACSI_BUFFER_MINSIZE (2048) /* min. buf size if ext. DMA */
93#define ACSI_BUFFER_SIZE_ORDER 2 /* order size for above */
94#define ACSI_BUFFER_MINSIZE_ORDER 0 /* order size for above */
95#define ACSI_BUFFER_SECTORS (ACSI_BUFFER_SIZE/512)
96
97#define ACSI_BUFFER_ORDER \
98 (ATARIHW_PRESENT(EXTD_DMA) ? \
99 ACSI_BUFFER_MINSIZE_ORDER : \
100 ACSI_BUFFER_SIZE_ORDER)
101
102#define ACSI_TIMEOUT (4*HZ)
103
104/* minimum delay between two commands */
105
106#define COMMAND_DELAY 500
107
108typedef enum {
109 NONE, HARDDISK, CDROM
110} ACSI_TYPE;
111
112struct acsi_info_struct {
113 ACSI_TYPE type; /* type of device */
114 unsigned target; /* target number */
115 unsigned lun; /* LUN in target controller */
116 unsigned removable : 1; /* Flag for removable media */
117 unsigned read_only : 1; /* Flag for read only devices */
118 unsigned old_atari_disk : 1; /* Is an old Atari disk */
119 unsigned changed : 1; /* Medium has been changed */
120 unsigned long size; /* #blocks */
121 int access_count;
122} acsi_info[MAX_DEV];
123
124/*
125 * SENSE KEYS
126 */
127
128#define NO_SENSE 0x00
129#define RECOVERED_ERROR 0x01
130#define NOT_READY 0x02
131#define MEDIUM_ERROR 0x03
132#define HARDWARE_ERROR 0x04
133#define ILLEGAL_REQUEST 0x05
134#define UNIT_ATTENTION 0x06
135#define DATA_PROTECT 0x07
136#define BLANK_CHECK 0x08
137#define COPY_ABORTED 0x0a
138#define ABORTED_COMMAND 0x0b
139#define VOLUME_OVERFLOW 0x0d
140#define MISCOMPARE 0x0e
141
142
143/*
144 * DEVICE TYPES
145 */
146
147#define TYPE_DISK 0x00
148#define TYPE_TAPE 0x01
149#define TYPE_WORM 0x04
150#define TYPE_ROM 0x05
151#define TYPE_MOD 0x07
152#define TYPE_NO_LUN 0x7f
153
154/* The data returned by MODE SENSE differ between the old Atari
155 * hard disks and SCSI disks connected to ACSI. In the following, both
156 * formats are defined and some macros to operate on them potably.
157 */
158
159typedef struct {
160 unsigned long dummy[2];
161 unsigned long sector_size;
162 unsigned char format_code;
163#define ATARI_SENSE_FORMAT_FIX 1
164#define ATARI_SENSE_FORMAT_CHNG 2
165 unsigned char cylinders_h;
166 unsigned char cylinders_l;
167 unsigned char heads;
168 unsigned char reduced_h;
169 unsigned char reduced_l;
170 unsigned char precomp_h;
171 unsigned char precomp_l;
172 unsigned char landing_zone;
173 unsigned char steprate;
174 unsigned char type;
175#define ATARI_SENSE_TYPE_FIXCHNG_MASK 4
176#define ATARI_SENSE_TYPE_SOFTHARD_MASK 8
177#define ATARI_SENSE_TYPE_FIX 4
178#define ATARI_SENSE_TYPE_CHNG 0
179#define ATARI_SENSE_TYPE_SOFT 0
180#define ATARI_SENSE_TYPE_HARD 8
181 unsigned char sectors;
182} ATARI_SENSE_DATA;
183
184#define ATARI_CAPACITY(sd) \
185 (((int)((sd).cylinders_h<<8)|(sd).cylinders_l) * \
186 (sd).heads * (sd).sectors)
187
188
189typedef struct {
190 unsigned char dummy1;
191 unsigned char medium_type;
192 unsigned char dummy2;
193 unsigned char descriptor_size;
194 unsigned long block_count;
195 unsigned long sector_size;
196 /* Page 0 data */
197 unsigned char page_code;
198 unsigned char page_size;
199 unsigned char page_flags;
200 unsigned char qualifier;
201} SCSI_SENSE_DATA;
202
203#define SCSI_CAPACITY(sd) ((sd).block_count & 0xffffff)
204
205
206typedef union {
207 ATARI_SENSE_DATA atari;
208 SCSI_SENSE_DATA scsi;
209} SENSE_DATA;
210
211#define SENSE_TYPE_UNKNOWN 0
212#define SENSE_TYPE_ATARI 1
213#define SENSE_TYPE_SCSI 2
214
215#define SENSE_TYPE(sd) \
216 (((sd).atari.dummy[0] == 8 && \
217 ((sd).atari.format_code == 1 || \
218 (sd).atari.format_code == 2)) ? SENSE_TYPE_ATARI : \
219 ((sd).scsi.dummy1 >= 11) ? SENSE_TYPE_SCSI : \
220 SENSE_TYPE_UNKNOWN)
221
222#define CAPACITY(sd) \
223 (SENSE_TYPE(sd) == SENSE_TYPE_ATARI ? \
224 ATARI_CAPACITY((sd).atari) : \
225 SCSI_CAPACITY((sd).scsi))
226
227#define SECTOR_SIZE(sd) \
228 (SENSE_TYPE(sd) == SENSE_TYPE_ATARI ? \
229 (sd).atari.sector_size : \
230 (sd).scsi.sector_size & 0xffffff)
231
232/* Default size if capacity cannot be determined (1 GByte) */
233#define DEFAULT_SIZE 0x1fffff
234
235#define CARTRCH_STAT(aip,buf) \
236 (aip->old_atari_disk ? \
237 (((buf)[0] & 0x7f) == 0x28) : \
238 ((((buf)[0] & 0x70) == 0x70) ? \
239 (((buf)[2] & 0x0f) == 0x06) : \
240 (((buf)[0] & 0x0f) == 0x06))) \
241
242/* These two are also exported to other drivers that work on the ACSI bus and
243 * need an ST-RAM buffer. */
244char *acsi_buffer;
245unsigned long phys_acsi_buffer;
246
247static int NDevices;
248
249static int CurrentNReq;
250static int CurrentNSect;
251static char *CurrentBuffer;
252
253static DEFINE_SPINLOCK(acsi_lock);
254
255
256#define SET_TIMER() mod_timer(&acsi_timer, jiffies + ACSI_TIMEOUT)
257#define CLEAR_TIMER() del_timer(&acsi_timer)
258
259static unsigned long STramMask;
260#define STRAM_ADDR(a) (((a) & STramMask) == 0)
261
262
263
264/* ACSI commands */
265
266static char tur_cmd[6] = { 0x00, 0, 0, 0, 0, 0 };
267static char modesense_cmd[6] = { 0x1a, 0, 0, 0, 24, 0 };
268static char modeselect_cmd[6] = { 0x15, 0, 0, 0, 12, 0 };
269static char inquiry_cmd[6] = { 0x12, 0, 0, 0,255, 0 };
270static char reqsense_cmd[6] = { 0x03, 0, 0, 0, 4, 0 };
271static char read_cmd[6] = { 0x08, 0, 0, 0, 0, 0 };
272static char write_cmd[6] = { 0x0a, 0, 0, 0, 0, 0 };
273static char pa_med_rem_cmd[6] = { 0x1e, 0, 0, 0, 0, 0 };
274
275#define CMDSET_TARG_LUN(cmd,targ,lun) \
276 do { \
277 cmd[0] = (cmd[0] & ~0xe0) | (targ)<<5; \
278 cmd[1] = (cmd[1] & ~0xe0) | (lun)<<5; \
279 } while(0)
280
281#define CMDSET_BLOCK(cmd,blk) \
282 do { \
283 unsigned long __blk = (blk); \
284 cmd[3] = __blk; __blk >>= 8; \
285 cmd[2] = __blk; __blk >>= 8; \
286 cmd[1] = (cmd[1] & 0xe0) | (__blk & 0x1f); \
287 } while(0)
288
289#define CMDSET_LEN(cmd,len) \
290 do { \
291 cmd[4] = (len); \
292 } while(0)
293
294/* ACSI errors (from REQUEST SENSE); There are two tables, one for the
295 * old Atari disks and one for SCSI on ACSI disks.
296 */
297
298struct acsi_error {
299 unsigned char code;
300 const char *text;
301} atari_acsi_errors[] = {
302 { 0x00, "No error (??)" },
303 { 0x01, "No index pulses" },
304 { 0x02, "Seek not complete" },
305 { 0x03, "Write fault" },
306 { 0x04, "Drive not ready" },
307 { 0x06, "No Track 00 signal" },
308 { 0x10, "ECC error in ID field" },
309 { 0x11, "Uncorrectable data error" },
310 { 0x12, "ID field address mark not found" },
311 { 0x13, "Data field address mark not found" },
312 { 0x14, "Record not found" },
313 { 0x15, "Seek error" },
314 { 0x18, "Data check in no retry mode" },
315 { 0x19, "ECC error during verify" },
316 { 0x1a, "Access to bad block" },
317 { 0x1c, "Unformatted or bad format" },
318 { 0x20, "Invalid command" },
319 { 0x21, "Invalid block address" },
320 { 0x23, "Volume overflow" },
321 { 0x24, "Invalid argument" },
322 { 0x25, "Invalid drive number" },
323 { 0x26, "Byte zero parity check" },
324 { 0x28, "Cartride changed" },
325 { 0x2c, "Error count overflow" },
326 { 0x30, "Controller selftest failed" }
327},
328
329 scsi_acsi_errors[] = {
330 { 0x00, "No error (??)" },
331 { 0x01, "Recovered error" },
332 { 0x02, "Drive not ready" },
333 { 0x03, "Uncorrectable medium error" },
334 { 0x04, "Hardware error" },
335 { 0x05, "Illegal request" },
336 { 0x06, "Unit attention (Reset or cartridge changed)" },
337 { 0x07, "Data protection" },
338 { 0x08, "Blank check" },
339 { 0x0b, "Aborted Command" },
340 { 0x0d, "Volume overflow" }
341};
342
343
344
345/***************************** Prototypes *****************************/
346
347static int acsicmd_dma( const char *cmd, char *buffer, int blocks, int
348 rwflag, int enable);
349static int acsi_reqsense( char *buffer, int targ, int lun);
350static void acsi_print_error(const unsigned char *errblk, struct acsi_info_struct *aip);
351static irqreturn_t acsi_interrupt (int irq, void *data, struct pt_regs *fp);
352static void unexpected_acsi_interrupt( void );
353static void bad_rw_intr( void );
354static void read_intr( void );
355static void write_intr( void);
356static void acsi_times_out( unsigned long dummy );
357static void copy_to_acsibuffer( void );
358static void copy_from_acsibuffer( void );
359static void do_end_requests( void );
360static void do_acsi_request( request_queue_t * );
361static void redo_acsi_request( void );
362static int acsi_ioctl( struct inode *inode, struct file *file, unsigned int
363 cmd, unsigned long arg );
364static int acsi_open( struct inode * inode, struct file * filp );
365static int acsi_release( struct inode * inode, struct file * file );
366static void acsi_prevent_removal(struct acsi_info_struct *aip, int flag );
367static int acsi_change_blk_size( int target, int lun);
368static int acsi_mode_sense( int target, int lun, SENSE_DATA *sd );
369static int acsi_revalidate (struct gendisk *disk);
370
371/************************* End of Prototypes **************************/
372
373
374struct timer_list acsi_timer = TIMER_INITIALIZER(acsi_times_out, 0, 0);
375
376
377#ifdef CONFIG_ATARI_SLM
378
379extern int attach_slm( int target, int lun );
380extern int slm_init( void );
381
382#endif
383
384
385
386/***********************************************************************
387 *
388 * ACSI primitives
389 *
390 **********************************************************************/
391
392
393/*
394 * The following two functions wait for _IRQ to become Low or High,
395 * resp., with a timeout. The 'timeout' parameter is in jiffies
396 * (10ms).
397 * If the functions are called with timer interrupts on (int level <
398 * 6), the timeout is based on the 'jiffies' variable to provide exact
399 * timeouts for device probing etc.
400 * If interrupts are disabled, the number of tries is based on the
401 * 'loops_per_jiffy' variable. A rough estimation is sufficient here...
402 */
403
404#define INT_LEVEL \
405 ({ unsigned __sr; \
406 __asm__ __volatile__ ( "movew %/sr,%0" : "=dm" (__sr) ); \
407 (__sr >> 8) & 7; \
408 })
409
410int acsi_wait_for_IRQ( unsigned timeout )
411
412{
413 if (INT_LEVEL < 6) {
414 unsigned long maxjif = jiffies + timeout;
415 while (time_before(jiffies, maxjif))
416 if (!(mfp.par_dt_reg & 0x20)) return( 1 );
417 }
418 else {
419 long tries = loops_per_jiffy / 8 * timeout;
420 while( --tries >= 0 )
421 if (!(mfp.par_dt_reg & 0x20)) return( 1 );
422 }
423 return( 0 ); /* timeout! */
424}
425
426
427int acsi_wait_for_noIRQ( unsigned timeout )
428
429{
430 if (INT_LEVEL < 6) {
431 unsigned long maxjif = jiffies + timeout;
432 while (time_before(jiffies, maxjif))
433 if (mfp.par_dt_reg & 0x20) return( 1 );
434 }
435 else {
436 long tries = loops_per_jiffy * timeout / 8;
437 while( tries-- >= 0 )
438 if (mfp.par_dt_reg & 0x20) return( 1 );
439 }
440 return( 0 ); /* timeout! */
441}
442
443static struct timeval start_time;
444
445void
446acsi_delay_start(void)
447{
448 do_gettimeofday(&start_time);
449}
450
451/* wait from acsi_delay_start to now usec (<1E6) usec */
452
453void
454acsi_delay_end(long usec)
455{
456 struct timeval end_time;
457 long deltau,deltas;
458 do_gettimeofday(&end_time);
459 deltau=end_time.tv_usec - start_time.tv_usec;
460 deltas=end_time.tv_sec - start_time.tv_sec;
461 if (deltas > 1 || deltas < 0)
462 return;
463 if (deltas > 0)
464 deltau += 1000*1000;
465 if (deltau >= usec)
466 return;
467 udelay(usec-deltau);
468}
469
470/* acsicmd_dma() sends an ACSI command and sets up the DMA to transfer
471 * 'blocks' blocks of 512 bytes from/to 'buffer'.
472 * Because the _IRQ signal is used for handshaking the command bytes,
473 * the ACSI interrupt has to be disabled in this function. If the end
474 * of the operation should be signalled by a real interrupt, it has to be
475 * reenabled afterwards.
476 */
477
478static int acsicmd_dma( const char *cmd, char *buffer, int blocks, int rwflag, int enable)
479
480{ unsigned long flags, paddr;
481 int i;
482
483#ifdef NO_WRITE
484 if (rwflag || *cmd == 0x0a) {
485 printk( "ACSI: Write commands disabled!\n" );
486 return( 0 );
487 }
488#endif
489
490 rwflag = rwflag ? 0x100 : 0;
491 paddr = virt_to_phys( buffer );
492
493 acsi_delay_end(COMMAND_DELAY);
494 DISABLE_IRQ();
495
496 local_irq_save(flags);
497 /* Low on A1 */
498 dma_wd.dma_mode_status = 0x88 | rwflag;
499 MFPDELAY();
500
501 /* set DMA address */
502 dma_wd.dma_lo = (unsigned char)paddr;
503 paddr >>= 8;
504 MFPDELAY();
505 dma_wd.dma_md = (unsigned char)paddr;
506 paddr >>= 8;
507 MFPDELAY();
508 if (ATARIHW_PRESENT(EXTD_DMA))
509 st_dma_ext_dmahi = (unsigned short)paddr;
510 else
511 dma_wd.dma_hi = (unsigned char)paddr;
512 MFPDELAY();
513 local_irq_restore(flags);
514
515 /* send the command bytes except the last */
516 for( i = 0; i < 5; ++i ) {
517 DMA_LONG_WRITE( *cmd++, 0x8a | rwflag );
518 udelay(20);
519 if (!acsi_wait_for_IRQ( HZ/2 )) return( 0 ); /* timeout */
520 }
521
522 /* Clear FIFO and switch DMA to correct direction */
523 dma_wd.dma_mode_status = 0x92 | (rwflag ^ 0x100);
524 MFPDELAY();
525 dma_wd.dma_mode_status = 0x92 | rwflag;
526 MFPDELAY();
527
528 /* How many sectors for DMA */
529 dma_wd.fdc_acces_seccount = blocks;
530 MFPDELAY();
531
532 /* send last command byte */
533 dma_wd.dma_mode_status = 0x8a | rwflag;
534 MFPDELAY();
535 DMA_LONG_WRITE( *cmd++, 0x0a | rwflag );
536 if (enable)
537 ENABLE_IRQ();
538 udelay(80);
539
540 return( 1 );
541}
542
543
544/*
545 * acsicmd_nodma() sends an ACSI command that requires no DMA.
546 */
547
548int acsicmd_nodma( const char *cmd, int enable)
549
550{ int i;
551
552 acsi_delay_end(COMMAND_DELAY);
553 DISABLE_IRQ();
554
555 /* send first command byte */
556 dma_wd.dma_mode_status = 0x88;
557 MFPDELAY();
558 DMA_LONG_WRITE( *cmd++, 0x8a );
559 udelay(20);
560 if (!acsi_wait_for_IRQ( HZ/2 )) return( 0 ); /* timeout */
561
562 /* send the intermediate command bytes */
563 for( i = 0; i < 4; ++i ) {
564 DMA_LONG_WRITE( *cmd++, 0x8a );
565 udelay(20);
566 if (!acsi_wait_for_IRQ( HZ/2 )) return( 0 ); /* timeout */
567 }
568
569 /* send last command byte */
570 DMA_LONG_WRITE( *cmd++, 0x0a );
571 if (enable)
572 ENABLE_IRQ();
573 udelay(80);
574
575 return( 1 );
576 /* Note that the ACSI interrupt is still disabled after this
577 * function. If you want to get the IRQ delivered, enable it manually!
578 */
579}
580
581
582static int acsi_reqsense( char *buffer, int targ, int lun)
583
584{
585 CMDSET_TARG_LUN( reqsense_cmd, targ, lun);
586 if (!acsicmd_dma( reqsense_cmd, buffer, 1, 0, 0 )) return( 0 );
587 if (!acsi_wait_for_IRQ( 10 )) return( 0 );
588 acsi_getstatus();
589 if (!acsicmd_nodma( reqsense_cmd, 0 )) return( 0 );
590 if (!acsi_wait_for_IRQ( 10 )) return( 0 );
591 acsi_getstatus();
592 if (!acsicmd_nodma( reqsense_cmd, 0 )) return( 0 );
593 if (!acsi_wait_for_IRQ( 10 )) return( 0 );
594 acsi_getstatus();
595 if (!acsicmd_nodma( reqsense_cmd, 0 )) return( 0 );
596 if (!acsi_wait_for_IRQ( 10 )) return( 0 );
597 acsi_getstatus();
598 dma_cache_maintenance( virt_to_phys(buffer), 16, 0 );
599
600 return( 1 );
601}
602
603
604/*
605 * ACSI status phase: get the status byte from the bus
606 *
607 * I've seen several times that a 0xff status is read, propably due to
608 * a timing error. In this case, the procedure is repeated after the
609 * next _IRQ edge.
610 */
611
612int acsi_getstatus( void )
613
614{ int status;
615
616 DISABLE_IRQ();
617 for(;;) {
618 if (!acsi_wait_for_IRQ( 100 )) {
619 acsi_delay_start();
620 return( -1 );
621 }
622 dma_wd.dma_mode_status = 0x8a;
623 MFPDELAY();
624 status = dma_wd.fdc_acces_seccount;
625 if (status != 0xff) break;
626#ifdef DEBUG
627 printk("ACSI: skipping 0xff status byte\n" );
628#endif
629 udelay(40);
630 acsi_wait_for_noIRQ( 20 );
631 }
632 dma_wd.dma_mode_status = 0x80;
633 udelay(40);
634 acsi_wait_for_noIRQ( 20 );
635
636 acsi_delay_start();
637 return( status & 0x1f ); /* mask of the device# */
638}
639
640
641#if (defined(CONFIG_ATARI_SLM) || defined(CONFIG_ATARI_SLM_MODULE))
642
643/* Receive data in an extended status phase. Needed by SLM printer. */
644
645int acsi_extstatus( char *buffer, int cnt )
646
647{ int status;
648
649 DISABLE_IRQ();
650 udelay(80);
651 while( cnt-- > 0 ) {
652 if (!acsi_wait_for_IRQ( 40 )) return( 0 );
653 dma_wd.dma_mode_status = 0x8a;
654 MFPDELAY();
655 status = dma_wd.fdc_acces_seccount;
656 MFPDELAY();
657 *buffer++ = status & 0xff;
658 udelay(40);
659 }
660 return( 1 );
661}
662
663
664/* Finish an extended status phase */
665
666void acsi_end_extstatus( void )
667
668{
669 dma_wd.dma_mode_status = 0x80;
670 udelay(40);
671 acsi_wait_for_noIRQ( 20 );
672 acsi_delay_start();
673}
674
675
676/* Send data in an extended command phase */
677
678int acsi_extcmd( unsigned char *buffer, int cnt )
679
680{
681 while( cnt-- > 0 ) {
682 DMA_LONG_WRITE( *buffer++, 0x8a );
683 udelay(20);
684 if (!acsi_wait_for_IRQ( HZ/2 )) return( 0 ); /* timeout */
685 }
686 return( 1 );
687}
688
689#endif
690
691
692static void acsi_print_error(const unsigned char *errblk, struct acsi_info_struct *aip)
693
694{ int atari_err, i, errcode;
695 struct acsi_error *arr;
696
697 atari_err = aip->old_atari_disk;
698 if (atari_err)
699 errcode = errblk[0] & 0x7f;
700 else
701 if ((errblk[0] & 0x70) == 0x70)
702 errcode = errblk[2] & 0x0f;
703 else
704 errcode = errblk[0] & 0x0f;
705
706 printk( KERN_ERR "ACSI error 0x%02x", errcode );
707
708 if (errblk[0] & 0x80)
709 printk( " for sector %d",
710 ((errblk[1] & 0x1f) << 16) |
711 (errblk[2] << 8) | errblk[0] );
712
713 arr = atari_err ? atari_acsi_errors : scsi_acsi_errors;
714 i = atari_err ? sizeof(atari_acsi_errors)/sizeof(*atari_acsi_errors) :
715 sizeof(scsi_acsi_errors)/sizeof(*scsi_acsi_errors);
716
717 for( --i; i >= 0; --i )
718 if (arr[i].code == errcode) break;
719 if (i >= 0)
720 printk( ": %s\n", arr[i].text );
721}
722
723/*******************************************************************
724 *
725 * ACSI interrupt routine
726 * Test, if this is a ACSI interrupt and call the irq handler
727 * Otherwise ignore this interrupt.
728 *
729 *******************************************************************/
730
731static irqreturn_t acsi_interrupt(int irq, void *data, struct pt_regs *fp )
732
733{ void (*acsi_irq_handler)(void) = do_acsi;
734
735 do_acsi = NULL;
736 CLEAR_TIMER();
737
738 if (!acsi_irq_handler)
739 acsi_irq_handler = unexpected_acsi_interrupt;
740 acsi_irq_handler();
741 return IRQ_HANDLED;
742}
743
744
745/******************************************************************
746 *
747 * The Interrupt handlers
748 *
749 *******************************************************************/
750
751
752static void unexpected_acsi_interrupt( void )
753
754{
755 printk( KERN_WARNING "Unexpected ACSI interrupt\n" );
756}
757
758
759/* This function is called in case of errors. Because we cannot reset
760 * the ACSI bus or a single device, there is no other choice than
761 * retrying several times :-(
762 */
763
764static void bad_rw_intr( void )
765
766{
767 if (!CURRENT)
768 return;
769
770 if (++CURRENT->errors >= MAX_ERRORS)
771 end_request(CURRENT, 0);
772 /* Otherwise just retry */
773}
774
775
776static void read_intr( void )
777
778{ int status;
779
780 status = acsi_getstatus();
781 if (status != 0) {
782 struct gendisk *disk = CURRENT->rq_disk;
783 struct acsi_info_struct *aip = disk->private_data;
784 printk(KERN_ERR "%s: ", disk->disk_name);
785 if (!acsi_reqsense(acsi_buffer, aip->target, aip->lun))
786 printk( "ACSI error and REQUEST SENSE failed (status=0x%02x)\n", status );
787 else {
788 acsi_print_error(acsi_buffer, aip);
789 if (CARTRCH_STAT(aip, acsi_buffer))
790 aip->changed = 1;
791 }
792 ENABLE_IRQ();
793 bad_rw_intr();
794 redo_acsi_request();
795 return;
796 }
797
798 dma_cache_maintenance( virt_to_phys(CurrentBuffer), CurrentNSect*512, 0 );
799 if (CurrentBuffer == acsi_buffer)
800 copy_from_acsibuffer();
801
802 do_end_requests();
803 redo_acsi_request();
804}
805
806
807static void write_intr(void)
808
809{ int status;
810
811 status = acsi_getstatus();
812 if (status != 0) {
813 struct gendisk *disk = CURRENT->rq_disk;
814 struct acsi_info_struct *aip = disk->private_data;
815 printk( KERN_ERR "%s: ", disk->disk_name);
816 if (!acsi_reqsense( acsi_buffer, aip->target, aip->lun))
817 printk( "ACSI error and REQUEST SENSE failed (status=0x%02x)\n", status );
818 else {
819 acsi_print_error(acsi_buffer, aip);
820 if (CARTRCH_STAT(aip, acsi_buffer))
821 aip->changed = 1;
822 }
823 bad_rw_intr();
824 redo_acsi_request();
825 return;
826 }
827
828 do_end_requests();
829 redo_acsi_request();
830}
831
832
833static void acsi_times_out( unsigned long dummy )
834
835{
836 DISABLE_IRQ();
837 if (!do_acsi) return;
838
839 do_acsi = NULL;
840 printk( KERN_ERR "ACSI timeout\n" );
841 if (!CURRENT)
842 return;
843 if (++CURRENT->errors >= MAX_ERRORS) {
844#ifdef DEBUG
845 printk( KERN_ERR "ACSI: too many errors.\n" );
846#endif
847 end_request(CURRENT, 0);
848 }
849
850 redo_acsi_request();
851}
852
853
854
855/***********************************************************************
856 *
857 * Scatter-gather utility functions
858 *
859 ***********************************************************************/
860
861
862static void copy_to_acsibuffer( void )
863
864{ int i;
865 char *src, *dst;
866 struct buffer_head *bh;
867
868 src = CURRENT->buffer;
869 dst = acsi_buffer;
870 bh = CURRENT->bh;
871
872 if (!bh)
873 memcpy( dst, src, CurrentNSect*512 );
874 else
875 for( i = 0; i < CurrentNReq; ++i ) {
876 memcpy( dst, src, bh->b_size );
877 dst += bh->b_size;
878 if ((bh = bh->b_reqnext))
879 src = bh->b_data;
880 }
881}
882
883
884static void copy_from_acsibuffer( void )
885
886{ int i;
887 char *src, *dst;
888 struct buffer_head *bh;
889
890 dst = CURRENT->buffer;
891 src = acsi_buffer;
892 bh = CURRENT->bh;
893
894 if (!bh)
895 memcpy( dst, src, CurrentNSect*512 );
896 else
897 for( i = 0; i < CurrentNReq; ++i ) {
898 memcpy( dst, src, bh->b_size );
899 src += bh->b_size;
900 if ((bh = bh->b_reqnext))
901 dst = bh->b_data;
902 }
903}
904
905
906static void do_end_requests( void )
907
908{ int i, n;
909
910 if (!CURRENT->bh) {
911 CURRENT->nr_sectors -= CurrentNSect;
912 CURRENT->current_nr_sectors -= CurrentNSect;
913 CURRENT->sector += CurrentNSect;
914 if (CURRENT->nr_sectors == 0)
915 end_request(CURRENT, 1);
916 }
917 else {
918 for( i = 0; i < CurrentNReq; ++i ) {
919 n = CURRENT->bh->b_size >> 9;
920 CURRENT->nr_sectors -= n;
921 CURRENT->current_nr_sectors -= n;
922 CURRENT->sector += n;
923 end_request(CURRENT, 1);
924 }
925 }
926}
927
928
929
930
931/***********************************************************************
932 *
933 * do_acsi_request and friends
934 *
935 ***********************************************************************/
936
937static void do_acsi_request( request_queue_t * q )
938
939{
940 stdma_lock( acsi_interrupt, NULL );
941 redo_acsi_request();
942}
943
944
945static void redo_acsi_request( void )
946{
947 unsigned block, target, lun, nsect;
948 char *buffer;
949 unsigned long pbuffer;
950 struct buffer_head *bh;
951 struct gendisk *disk;
952 struct acsi_info_struct *aip;
953
954 repeat:
955 CLEAR_TIMER();
956
957 if (do_acsi)
958 return;
959
960 if (!CURRENT) {
961 do_acsi = NULL;
962 ENABLE_IRQ();
963 stdma_release();
964 return;
965 }
966
967 disk = CURRENT->rq_disk;
968 aip = disk->private_data;
969 if (CURRENT->bh) {
970 if (!CURRENT->bh && !buffer_locked(CURRENT->bh))
971 panic("ACSI: block not locked");
972 }
973
974 block = CURRENT->sector;
975 if (block+CURRENT->nr_sectors >= get_capacity(disk)) {
976#ifdef DEBUG
977 printk( "%s: attempted access for blocks %d...%ld past end of device at block %ld.\n",
978 disk->disk_name,
979 block, block + CURRENT->nr_sectors - 1,
980 get_capacity(disk));
981#endif
982 end_request(CURRENT, 0);
983 goto repeat;
984 }
985 if (aip->changed) {
986 printk( KERN_NOTICE "%s: request denied because cartridge has "
987 "been changed.\n", disk->disk_name);
988 end_request(CURRENT, 0);
989 goto repeat;
990 }
991
992 target = aip->target;
993 lun = aip->lun;
994
995 /* Find out how many sectors should be transferred from/to
996 * consecutive buffers and thus can be done with a single command.
997 */
998 buffer = CURRENT->buffer;
999 pbuffer = virt_to_phys(buffer);
1000 nsect = CURRENT->current_nr_sectors;
1001 CurrentNReq = 1;
1002
1003 if ((bh = CURRENT->bh) && bh != CURRENT->bhtail) {
1004 if (!STRAM_ADDR(pbuffer)) {
1005 /* If transfer is done via the ACSI buffer anyway, we can
1006 * assemble as much bh's as fit in the buffer.
1007 */
1008 while( (bh = bh->b_reqnext) ) {
1009 if (nsect + (bh->b_size>>9) > ACSI_BUFFER_SECTORS) break;
1010 nsect += bh->b_size >> 9;
1011 ++CurrentNReq;
1012 if (bh == CURRENT->bhtail) break;
1013 }
1014 buffer = acsi_buffer;
1015 pbuffer = phys_acsi_buffer;
1016 }
1017 else {
1018 unsigned long pendadr, pnewadr;
1019 pendadr = pbuffer + nsect*512;
1020 while( (bh = bh->b_reqnext) ) {
1021 pnewadr = virt_to_phys(bh->b_data);
1022 if (!STRAM_ADDR(pnewadr) || pendadr != pnewadr) break;
1023 nsect += bh->b_size >> 9;
1024 pendadr = pnewadr + bh->b_size;
1025 ++CurrentNReq;
1026 if (bh == CURRENT->bhtail) break;
1027 }
1028 }
1029 }
1030 else {
1031 if (!STRAM_ADDR(pbuffer)) {
1032 buffer = acsi_buffer;
1033 pbuffer = phys_acsi_buffer;
1034 if (nsect > ACSI_BUFFER_SECTORS)
1035 nsect = ACSI_BUFFER_SECTORS;
1036 }
1037 }
1038 CurrentBuffer = buffer;
1039 CurrentNSect = nsect;
1040
1041 if (rq_data_dir(CURRENT) == WRITE) {
1042 CMDSET_TARG_LUN( write_cmd, target, lun );
1043 CMDSET_BLOCK( write_cmd, block );
1044 CMDSET_LEN( write_cmd, nsect );
1045 if (buffer == acsi_buffer)
1046 copy_to_acsibuffer();
1047 dma_cache_maintenance( pbuffer, nsect*512, 1 );
1048 do_acsi = write_intr;
1049 if (!acsicmd_dma( write_cmd, buffer, nsect, 1, 1)) {
1050 do_acsi = NULL;
1051 printk( KERN_ERR "ACSI (write): Timeout in command block\n" );
1052 bad_rw_intr();
1053 goto repeat;
1054 }
1055 SET_TIMER();
1056 return;
1057 }
1058 if (rq_data_dir(CURRENT) == READ) {
1059 CMDSET_TARG_LUN( read_cmd, target, lun );
1060 CMDSET_BLOCK( read_cmd, block );
1061 CMDSET_LEN( read_cmd, nsect );
1062 do_acsi = read_intr;
1063 if (!acsicmd_dma( read_cmd, buffer, nsect, 0, 1)) {
1064 do_acsi = NULL;
1065 printk( KERN_ERR "ACSI (read): Timeout in command block\n" );
1066 bad_rw_intr();
1067 goto repeat;
1068 }
1069 SET_TIMER();
1070 return;
1071 }
1072 panic("unknown ACSI command");
1073}
1074
1075
1076
1077/***********************************************************************
1078 *
1079 * Misc functions: ioctl, open, release, check_change, ...
1080 *
1081 ***********************************************************************/
1082
1083
1084static int acsi_ioctl( struct inode *inode, struct file *file,
1085 unsigned int cmd, unsigned long arg )
1086{
1087 struct gendisk *disk = inode->i_bdev->bd_disk;
1088 struct acsi_info_struct *aip = disk->private_data;
1089 switch (cmd) {
1090 case HDIO_GETGEO:
1091 /* HDIO_GETGEO is supported more for getting the partition's
1092 * start sector... */
1093 { struct hd_geometry *geo = (struct hd_geometry *)arg;
1094 /* just fake some geometry here, it's nonsense anyway; to make it
1095 * easy, use Adaptec's usual 64/32 mapping */
1096 put_user( 64, &geo->heads );
1097 put_user( 32, &geo->sectors );
1098 put_user( aip->size >> 11, &geo->cylinders );
1099 put_user(get_start_sect(inode->i_bdev), &geo->start);
1100 return 0;
1101 }
1102 case SCSI_IOCTL_GET_IDLUN:
1103 /* SCSI compatible GET_IDLUN call to get target's ID and LUN number */
1104 put_user( aip->target | (aip->lun << 8),
1105 &((Scsi_Idlun *) arg)->dev_id );
1106 put_user( 0, &((Scsi_Idlun *) arg)->host_unique_id );
1107 return 0;
1108 default:
1109 return -EINVAL;
1110 }
1111}
1112
1113
1114/*
1115 * Open a device, check for read-only and lock the medium if it is
1116 * removable.
1117 *
1118 * Changes by Martin Rogge, 9th Aug 1995:
1119 * Check whether check_disk_change (and therefore revalidate_acsidisk)
1120 * was successful. They fail when there is no medium in the drive.
1121 *
1122 * The problem of media being changed during an operation can be
1123 * ignored because of the prevent_removal code.
1124 *
1125 * Added check for the validity of the device number.
1126 *
1127 */
1128
1129static int acsi_open( struct inode * inode, struct file * filp )
1130{
1131 struct gendisk *disk = inode->i_bdev->bd_disk;
1132 struct acsi_info_struct *aip = disk->private_data;
1133
1134 if (aip->access_count == 0 && aip->removable) {
1135#if 0
1136 aip->changed = 1; /* safety first */
1137#endif
1138 check_disk_change( inode->i_bdev );
1139 if (aip->changed) /* revalidate was not successful (no medium) */
1140 return -ENXIO;
1141 acsi_prevent_removal(aip, 1);
1142 }
1143 aip->access_count++;
1144
1145 if (filp && filp->f_mode) {
1146 check_disk_change( inode->i_bdev );
1147 if (filp->f_mode & 2) {
1148 if (aip->read_only) {
1149 acsi_release( inode, filp );
1150 return -EROFS;
1151 }
1152 }
1153 }
1154
1155 return 0;
1156}
1157
1158/*
1159 * Releasing a block device means we sync() it, so that it can safely
1160 * be forgotten about...
1161 */
1162
1163static int acsi_release( struct inode * inode, struct file * file )
1164{
1165 struct gendisk *disk = inode->i_bdev->bd_disk;
1166 struct acsi_info_struct *aip = disk->private_data;
1167 if (--aip->access_count == 0 && aip->removable)
1168 acsi_prevent_removal(aip, 0);
1169 return( 0 );
1170}
1171
1172/*
1173 * Prevent or allow a media change for removable devices.
1174 */
1175
1176static void acsi_prevent_removal(struct acsi_info_struct *aip, int flag)
1177{
1178 stdma_lock( NULL, NULL );
1179
1180 CMDSET_TARG_LUN(pa_med_rem_cmd, aip->target, aip->lun);
1181 CMDSET_LEN( pa_med_rem_cmd, flag );
1182
1183 if (acsicmd_nodma(pa_med_rem_cmd, 0) && acsi_wait_for_IRQ(3*HZ))
1184 acsi_getstatus();
1185 /* Do not report errors -- some devices may not know this command. */
1186
1187 ENABLE_IRQ();
1188 stdma_release();
1189}
1190
1191static int acsi_media_change(struct gendisk *disk)
1192{
1193 struct acsi_info_struct *aip = disk->private_data;
1194
1195 if (!aip->removable)
1196 return 0;
1197
1198 if (aip->changed)
1199 /* We can be sure that the medium has been changed -- REQUEST
1200 * SENSE has reported this earlier.
1201 */
1202 return 1;
1203
1204 /* If the flag isn't set, make a test by reading block 0.
1205 * If errors happen, it seems to be better to say "changed"...
1206 */
1207 stdma_lock( NULL, NULL );
1208 CMDSET_TARG_LUN(read_cmd, aip->target, aip->lun);
1209 CMDSET_BLOCK( read_cmd, 0 );
1210 CMDSET_LEN( read_cmd, 1 );
1211 if (acsicmd_dma(read_cmd, acsi_buffer, 1, 0, 0) &&
1212 acsi_wait_for_IRQ(3*HZ)) {
1213 if (acsi_getstatus()) {
1214 if (acsi_reqsense(acsi_buffer, aip->target, aip->lun)) {
1215 if (CARTRCH_STAT(aip, acsi_buffer))
1216 aip->changed = 1;
1217 }
1218 else {
1219 printk( KERN_ERR "%s: REQUEST SENSE failed in test for "
1220 "medium change; assuming a change\n", disk->disk_name );
1221 aip->changed = 1;
1222 }
1223 }
1224 }
1225 else {
1226 printk( KERN_ERR "%s: Test for medium changed timed out; "
1227 "assuming a change\n", disk->disk_name);
1228 aip->changed = 1;
1229 }
1230 ENABLE_IRQ();
1231 stdma_release();
1232
1233 /* Now, after reading a block, the changed status is surely valid. */
1234 return aip->changed;
1235}
1236
1237
1238static int acsi_change_blk_size( int target, int lun)
1239
1240{ int i;
1241
1242 for (i=0; i<12; i++)
1243 acsi_buffer[i] = 0;
1244
1245 acsi_buffer[3] = 8;
1246 acsi_buffer[10] = 2;
1247 CMDSET_TARG_LUN( modeselect_cmd, target, lun);
1248
1249 if (!acsicmd_dma( modeselect_cmd, acsi_buffer, 1,1,0) ||
1250 !acsi_wait_for_IRQ( 3*HZ ) ||
1251 acsi_getstatus() != 0 ) {
1252 return(0);
1253 }
1254 return(1);
1255}
1256
1257
1258static int acsi_mode_sense( int target, int lun, SENSE_DATA *sd )
1259
1260{
1261 int page;
1262
1263 CMDSET_TARG_LUN( modesense_cmd, target, lun );
1264 for (page=0; page<4; page++) {
1265 modesense_cmd[2] = page;
1266 if (!acsicmd_dma( modesense_cmd, acsi_buffer, 1, 0, 0 ) ||
1267 !acsi_wait_for_IRQ( 3*HZ ) ||
1268 acsi_getstatus())
1269 continue;
1270
1271 /* read twice to jump over the second 16-byte border! */
1272 udelay(300);
1273 if (acsi_wait_for_noIRQ( 20 ) &&
1274 acsicmd_nodma( modesense_cmd, 0 ) &&
1275 acsi_wait_for_IRQ( 3*HZ ) &&
1276 acsi_getstatus() == 0)
1277 break;
1278 }
1279 if (page == 4) {
1280 return(0);
1281 }
1282
1283 dma_cache_maintenance( phys_acsi_buffer, sizeof(SENSE_DATA), 0 );
1284 *sd = *(SENSE_DATA *)acsi_buffer;
1285
1286 /* Validity check, depending on type of data */
1287
1288 switch( SENSE_TYPE(*sd) ) {
1289
1290 case SENSE_TYPE_ATARI:
1291 if (CAPACITY(*sd) == 0)
1292 goto invalid_sense;
1293 break;
1294
1295 case SENSE_TYPE_SCSI:
1296 if (sd->scsi.descriptor_size != 8)
1297 goto invalid_sense;
1298 break;
1299
1300 case SENSE_TYPE_UNKNOWN:
1301
1302 printk( KERN_ERR "ACSI target %d, lun %d: Cannot interpret "
1303 "sense data\n", target, lun );
1304
1305 invalid_sense:
1306
1307#ifdef DEBUG
1308 { int i;
1309 printk( "Mode sense data for ACSI target %d, lun %d seem not valid:",
1310 target, lun );
1311 for( i = 0; i < sizeof(SENSE_DATA); ++i )
1312 printk( "%02x ", (unsigned char)acsi_buffer[i] );
1313 printk( "\n" );
1314 }
1315#endif
1316 return( 0 );
1317 }
1318
1319 return( 1 );
1320}
1321
1322
1323
1324/*******************************************************************
1325 *
1326 * Initialization
1327 *
1328 ********************************************************************/
1329
1330
1331extern struct block_device_operations acsi_fops;
1332
1333static struct gendisk *acsi_gendisk[MAX_DEV];
1334
1335#define MAX_SCSI_DEVICE_CODE 10
1336
1337static const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] =
1338{
1339 "Direct-Access ",
1340 "Sequential-Access",
1341 "Printer ",
1342 "Processor ",
1343 "WORM ",
1344 "CD-ROM ",
1345 "Scanner ",
1346 "Optical Device ",
1347 "Medium Changer ",
1348 "Communications "
1349};
1350
1351static void print_inquiry(unsigned char *data)
1352{
1353 int i;
1354
1355 printk(KERN_INFO " Vendor: ");
1356 for (i = 8; i < 16; i++)
1357 {
1358 if (data[i] >= 0x20 && i < data[4] + 5)
1359 printk("%c", data[i]);
1360 else
1361 printk(" ");
1362 }
1363
1364 printk(" Model: ");
1365 for (i = 16; i < 32; i++)
1366 {
1367 if (data[i] >= 0x20 && i < data[4] + 5)
1368 printk("%c", data[i]);
1369 else
1370 printk(" ");
1371 }
1372
1373 printk(" Rev: ");
1374 for (i = 32; i < 36; i++)
1375 {
1376 if (data[i] >= 0x20 && i < data[4] + 5)
1377 printk("%c", data[i]);
1378 else
1379 printk(" ");
1380 }
1381
1382 printk("\n");
1383
1384 i = data[0] & 0x1f;
1385
1386 printk(KERN_INFO " Type: %s ", (i < MAX_SCSI_DEVICE_CODE
1387 ? scsi_device_types[i]
1388 : "Unknown "));
1389 printk(" ANSI SCSI revision: %02x", data[2] & 0x07);
1390 if ((data[2] & 0x07) == 1 && (data[3] & 0x0f) == 1)
1391 printk(" CCS\n");
1392 else
1393 printk("\n");
1394}
1395
1396
1397/*
1398 * Changes by Martin Rogge, 9th Aug 1995:
1399 * acsi_devinit has been taken out of acsi_geninit, because it needs
1400 * to be called from revalidate_acsidisk. The result of request sense
1401 * is now checked for DRIVE NOT READY.
1402 *
1403 * The structure *aip is only valid when acsi_devinit returns
1404 * DEV_SUPPORTED.
1405 *
1406 */
1407
1408#define DEV_NONE 0
1409#define DEV_UNKNOWN 1
1410#define DEV_SUPPORTED 2
1411#define DEV_SLM 3
1412
1413static int acsi_devinit(struct acsi_info_struct *aip)
1414{
1415 int status, got_inquiry;
1416 SENSE_DATA sense;
1417 unsigned char reqsense, extsense;
1418
1419 /*****************************************************************/
1420 /* Do a TEST UNIT READY command to test the presence of a device */
1421 /*****************************************************************/
1422
1423 CMDSET_TARG_LUN(tur_cmd, aip->target, aip->lun);
1424 if (!acsicmd_nodma(tur_cmd, 0)) {
1425 /* timed out -> no device here */
1426#ifdef DEBUG_DETECT
1427 printk("target %d lun %d: timeout\n", aip->target, aip->lun);
1428#endif
1429 return DEV_NONE;
1430 }
1431
1432 /*************************/
1433 /* Read the ACSI status. */
1434 /*************************/
1435
1436 status = acsi_getstatus();
1437 if (status) {
1438 if (status == 0x12) {
1439 /* The SLM printer should be the only device that
1440 * responds with the error code in the status byte. In
1441 * correct status bytes, bit 4 is never set.
1442 */
1443 printk( KERN_INFO "Detected SLM printer at id %d lun %d\n",
1444 aip->target, aip->lun);
1445 return DEV_SLM;
1446 }
1447 /* ignore CHECK CONDITION, since some devices send a
1448 UNIT ATTENTION */
1449 if ((status & 0x1e) != 0x2) {
1450#ifdef DEBUG_DETECT
1451 printk("target %d lun %d: status %d\n",
1452 aip->target, aip->lun, status);
1453#endif
1454 return DEV_UNKNOWN;
1455 }
1456 }
1457
1458 /*******************************/
1459 /* Do a REQUEST SENSE command. */
1460 /*******************************/
1461
1462 if (!acsi_reqsense(acsi_buffer, aip->target, aip->lun)) {
1463 printk( KERN_WARNING "acsi_reqsense failed\n");
1464 acsi_buffer[0] = 0;
1465 acsi_buffer[2] = UNIT_ATTENTION;
1466 }
1467 reqsense = acsi_buffer[0];
1468 extsense = acsi_buffer[2] & 0xf;
1469 if (status) {
1470 if ((reqsense & 0x70) == 0x70) { /* extended sense */
1471 if (extsense != UNIT_ATTENTION &&
1472 extsense != NOT_READY) {
1473#ifdef DEBUG_DETECT
1474 printk("target %d lun %d: extended sense %d\n",
1475 aip->target, aip->lun, extsense);
1476#endif
1477 return DEV_UNKNOWN;
1478 }
1479 }
1480 else {
1481 if (reqsense & 0x7f) {
1482#ifdef DEBUG_DETECT
1483 printk("target %d lun %d: sense %d\n",
1484 aip->target, aip->lun, reqsense);
1485#endif
1486 return DEV_UNKNOWN;
1487 }
1488 }
1489 }
1490 else
1491 if (reqsense == 0x4) { /* SH204 Bug workaround */
1492#ifdef DEBUG_DETECT
1493 printk("target %d lun %d status=0 sense=4\n",
1494 aip->target, aip->lun);
1495#endif
1496 return DEV_UNKNOWN;
1497 }
1498
1499 /***********************************************************/
1500 /* Do an INQUIRY command to get more infos on this device. */
1501 /***********************************************************/
1502
1503 /* Assume default values */
1504 aip->removable = 1;
1505 aip->read_only = 0;
1506 aip->old_atari_disk = 0;
1507 aip->changed = (extsense == NOT_READY); /* medium inserted? */
1508 aip->size = DEFAULT_SIZE;
1509 got_inquiry = 0;
1510 /* Fake inquiry result for old atari disks */
1511 memcpy(acsi_buffer, "\000\000\001\000 Adaptec 40xx"
1512 " ", 40);
1513 CMDSET_TARG_LUN(inquiry_cmd, aip->target, aip->lun);
1514 if (acsicmd_dma(inquiry_cmd, acsi_buffer, 1, 0, 0) &&
1515 acsi_getstatus() == 0) {
1516 acsicmd_nodma(inquiry_cmd, 0);
1517 acsi_getstatus();
1518 dma_cache_maintenance( phys_acsi_buffer, 256, 0 );
1519 got_inquiry = 1;
1520 aip->removable = !!(acsi_buffer[1] & 0x80);
1521 }
1522 if (aip->type == NONE) /* only at boot time */
1523 print_inquiry(acsi_buffer);
1524 switch(acsi_buffer[0]) {
1525 case TYPE_DISK:
1526 aip->type = HARDDISK;
1527 break;
1528 case TYPE_ROM:
1529 aip->type = CDROM;
1530 aip->read_only = 1;
1531 break;
1532 default:
1533 return DEV_UNKNOWN;
1534 }
1535 /****************************/
1536 /* Do a MODE SENSE command. */
1537 /****************************/
1538
1539 if (!acsi_mode_sense(aip->target, aip->lun, &sense)) {
1540 printk( KERN_WARNING "No mode sense data.\n" );
1541 return DEV_UNKNOWN;
1542 }
1543 if ((SECTOR_SIZE(sense) != 512) &&
1544 ((aip->type != CDROM) ||
1545 !acsi_change_blk_size(aip->target, aip->lun) ||
1546 !acsi_mode_sense(aip->target, aip->lun, &sense) ||
1547 (SECTOR_SIZE(sense) != 512))) {
1548 printk( KERN_WARNING "Sector size != 512 not supported.\n" );
1549 return DEV_UNKNOWN;
1550 }
1551 /* There are disks out there that claim to have 0 sectors... */
1552 if (CAPACITY(sense))
1553 aip->size = CAPACITY(sense); /* else keep DEFAULT_SIZE */
1554 if (!got_inquiry && SENSE_TYPE(sense) == SENSE_TYPE_ATARI) {
1555 /* If INQUIRY failed and the sense data suggest an old
1556 * Atari disk (SH20x, Megafile), the disk is not removable
1557 */
1558 aip->removable = 0;
1559 aip->old_atari_disk = 1;
1560 }
1561
1562 /******************/
1563 /* We've done it. */
1564 /******************/
1565
1566 return DEV_SUPPORTED;
1567}
1568
1569EXPORT_SYMBOL(acsi_delay_start);
1570EXPORT_SYMBOL(acsi_delay_end);
1571EXPORT_SYMBOL(acsi_wait_for_IRQ);
1572EXPORT_SYMBOL(acsi_wait_for_noIRQ);
1573EXPORT_SYMBOL(acsicmd_nodma);
1574EXPORT_SYMBOL(acsi_getstatus);
1575EXPORT_SYMBOL(acsi_buffer);
1576EXPORT_SYMBOL(phys_acsi_buffer);
1577
1578#ifdef CONFIG_ATARI_SLM_MODULE
1579void acsi_attach_SLMs( int (*attach_func)( int, int ) );
1580
1581EXPORT_SYMBOL(acsi_extstatus);
1582EXPORT_SYMBOL(acsi_end_extstatus);
1583EXPORT_SYMBOL(acsi_extcmd);
1584EXPORT_SYMBOL(acsi_attach_SLMs);
1585
1586/* to remember IDs of SLM devices, SLM module is loaded later
1587 * (index is target#, contents is lun#, -1 means "no SLM") */
1588int SLM_devices[8];
1589#endif
1590
1591static struct block_device_operations acsi_fops = {
1592 .owner = THIS_MODULE,
1593 .open = acsi_open,
1594 .release = acsi_release,
1595 .ioctl = acsi_ioctl,
1596 .media_changed = acsi_media_change,
1597 .revalidate_disk= acsi_revalidate,
1598};
1599
1600#ifdef CONFIG_ATARI_SLM_MODULE
1601/* call attach_slm() for each device that is a printer; needed for init of SLM
1602 * driver as a module, since it's not yet present if acsi.c is inited and thus
1603 * the bus gets scanned. */
1604void acsi_attach_SLMs( int (*attach_func)( int, int ) )
1605{
1606 int i, n = 0;
1607
1608 for( i = 0; i < 8; ++i )
1609 if (SLM_devices[i] >= 0)
1610 n += (*attach_func)( i, SLM_devices[i] );
1611 printk( KERN_INFO "Found %d SLM printer(s) total.\n", n );
1612}
1613#endif /* CONFIG_ATARI_SLM_MODULE */
1614
1615
1616int acsi_init( void )
1617{
1618 int err = 0;
1619 int i, target, lun;
1620 struct acsi_info_struct *aip;
1621#ifdef CONFIG_ATARI_SLM
1622 int n_slm = 0;
1623#endif
1624 if (!MACH_IS_ATARI || !ATARIHW_PRESENT(ACSI))
1625 return 0;
1626 if (register_blkdev(ACSI_MAJOR, "ad")) {
1627 err = -EBUSY;
1628 goto out1;
1629 }
1630 if (!(acsi_buffer =
1631 (char *)atari_stram_alloc(ACSI_BUFFER_SIZE, "acsi"))) {
1632 err = -ENOMEM;
1633 printk( KERN_ERR "Unable to get ACSI ST-Ram buffer.\n" );
1634 goto out2;
1635 }
1636 phys_acsi_buffer = virt_to_phys( acsi_buffer );
1637 STramMask = ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000 : 0xff000000;
1638
1639 acsi_queue = blk_init_queue(do_acsi_request, &acsi_lock);
1640 if (!acsi_queue) {
1641 err = -ENOMEM;
1642 goto out2a;
1643 }
1644#ifdef CONFIG_ATARI_SLM
1645 err = slm_init();
1646#endif
1647 if (err)
1648 goto out3;
1649
1650 printk( KERN_INFO "Probing ACSI devices:\n" );
1651 NDevices = 0;
1652#ifdef CONFIG_ATARI_SLM_MODULE
1653 for( i = 0; i < 8; ++i )
1654 SLM_devices[i] = -1;
1655#endif
1656 stdma_lock(NULL, NULL);
1657
1658 for (target = 0; target < 8 && NDevices < MAX_DEV; ++target) {
1659 lun = 0;
1660 do {
1661 aip = &acsi_info[NDevices];
1662 aip->type = NONE;
1663 aip->target = target;
1664 aip->lun = lun;
1665 i = acsi_devinit(aip);
1666 switch (i) {
1667 case DEV_SUPPORTED:
1668 printk( KERN_INFO "Detected ");
1669 switch (aip->type) {
1670 case HARDDISK:
1671 printk("disk");
1672 break;
1673 case CDROM:
1674 printk("cdrom");
1675 break;
1676 default:
1677 }
1678 printk(" ad%c at id %d lun %d ",
1679 'a' + NDevices, target, lun);
1680 if (aip->removable)
1681 printk("(removable) ");
1682 if (aip->read_only)
1683 printk("(read-only) ");
1684 if (aip->size == DEFAULT_SIZE)
1685 printk(" unkown size, using default ");
1686 printk("%ld MByte\n",
1687 (aip->size*512+1024*1024/2)/(1024*1024));
1688 NDevices++;
1689 break;
1690 case DEV_SLM:
1691#ifdef CONFIG_ATARI_SLM
1692 n_slm += attach_slm( target, lun );
1693 break;
1694#endif
1695#ifdef CONFIG_ATARI_SLM_MODULE
1696 SLM_devices[target] = lun;
1697 break;
1698#endif
1699 /* neither of the above: fall through to unknown device */
1700 case DEV_UNKNOWN:
1701 printk( KERN_INFO "Detected unsupported device at "
1702 "id %d lun %d\n", target, lun);
1703 break;
1704 }
1705 }
1706#ifdef CONFIG_ACSI_MULTI_LUN
1707 while (i != DEV_NONE && ++lun < MAX_LUN);
1708#else
1709 while (0);
1710#endif
1711 }
1712
1713 /* reenable interrupt */
1714 ENABLE_IRQ();
1715 stdma_release();
1716
1717#ifndef CONFIG_ATARI_SLM
1718 printk( KERN_INFO "Found %d ACSI device(s) total.\n", NDevices );
1719#else
1720 printk( KERN_INFO "Found %d ACSI device(s) and %d SLM printer(s) total.\n",
1721 NDevices, n_slm );
1722#endif
1723 err = -ENOMEM;
1724 for( i = 0; i < NDevices; ++i ) {
1725 acsi_gendisk[i] = alloc_disk(16);
1726 if (!acsi_gendisk[i])
1727 goto out4;
1728 }
1729
1730 for( i = 0; i < NDevices; ++i ) {
1731 struct gendisk *disk = acsi_gendisk[i];
1732 sprintf(disk->disk_name, "ad%c", 'a'+i);
1733 aip = &acsi_info[NDevices];
1734 sprintf(disk->devfs_name, "ad/target%d/lun%d", aip->target, aip->lun);
1735 disk->major = ACSI_MAJOR;
1736 disk->first_minor = i << 4;
1737 if (acsi_info[i].type != HARDDISK) {
1738 disk->minors = 1;
1739 strcat(disk->devfs_name, "/disc");
1740 }
1741 disk->fops = &acsi_fops;
1742 disk->private_data = &acsi_info[i];
1743 set_capacity(disk, acsi_info[i].size);
1744 disk->queue = acsi_queue;
1745 add_disk(disk);
1746 }
1747 return 0;
1748out4:
1749 while (i--)
1750 put_disk(acsi_gendisk[i]);
1751out3:
1752 blk_cleanup_queue(acsi_queue);
1753out2a:
1754 atari_stram_free( acsi_buffer );
1755out2:
1756 unregister_blkdev( ACSI_MAJOR, "ad" );
1757out1:
1758 return err;
1759}
1760
1761
1762#ifdef MODULE
1763
1764MODULE_LICENSE("GPL");
1765
1766int init_module(void)
1767{
1768 int err;
1769
1770 if ((err = acsi_init()))
1771 return( err );
1772 printk( KERN_INFO "ACSI driver loaded as module.\n");
1773 return( 0 );
1774}
1775
1776void cleanup_module(void)
1777{
1778 int i;
1779 del_timer( &acsi_timer );
1780 blk_cleanup_queue(acsi_queue);
1781 atari_stram_free( acsi_buffer );
1782
1783 if (unregister_blkdev( ACSI_MAJOR, "ad" ) != 0)
1784 printk( KERN_ERR "acsi: cleanup_module failed\n");
1785
1786 for (i = 0; i < NDevices; i++) {
1787 del_gendisk(acsi_gendisk[i]);
1788 put_disk(acsi_gendisk[i]);
1789 }
1790}
1791#endif
1792
1793/*
1794 * This routine is called to flush all partitions and partition tables
1795 * for a changed scsi disk, and then re-read the new partition table.
1796 * If we are revalidating a disk because of a media change, then we
1797 * enter with usage == 0. If we are using an ioctl, we automatically have
1798 * usage == 1 (we need an open channel to use an ioctl :-), so this
1799 * is our limit.
1800 *
1801 * Changes by Martin Rogge, 9th Aug 1995:
1802 * got cd-roms to work by calling acsi_devinit. There are only two problems:
1803 * First, if there is no medium inserted, the status will remain "changed".
1804 * That is no problem at all, but our design of three-valued logic (medium
1805 * changed, medium not changed, no medium inserted).
1806 * Secondly the check could fail completely and the drive could deliver
1807 * nonsensical data, which could mess up the acsi_info[] structure. In
1808 * that case we try to make the entry safe.
1809 *
1810 */
1811
1812static int acsi_revalidate(struct gendisk *disk)
1813{
1814 struct acsi_info_struct *aip = disk->private_data;
1815 stdma_lock( NULL, NULL );
1816 if (acsi_devinit(aip) != DEV_SUPPORTED) {
1817 printk( KERN_ERR "ACSI: revalidate failed for target %d lun %d\n",
1818 aip->target, aip->lun);
1819 aip->size = 0;
1820 aip->read_only = 1;
1821 aip->removable = 1;
1822 aip->changed = 1; /* next acsi_open will try again... */
1823 }
1824
1825 ENABLE_IRQ();
1826 stdma_release();
1827 set_capacity(disk, aip->size);
1828 return 0;
1829}
diff --git a/drivers/block/acsi_slm.c b/drivers/block/acsi_slm.c
new file mode 100644
index 000000000000..e3be8c31a74c
--- /dev/null
+++ b/drivers/block/acsi_slm.c
@@ -0,0 +1,1045 @@
1/*
2 * acsi_slm.c -- Device driver for the Atari SLM laser printer
3 *
4 * Copyright 1995 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de>
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive for
8 * more details.
9 *
10 */
11
12/*
13
14Notes:
15
16The major number for SLM printers is 28 (like ACSI), but as a character
17device, not block device. The minor number is the number of the printer (if
18you have more than one SLM; currently max. 2 (#define-constant) SLMs are
19supported). The device can be opened for reading and writing. If reading it,
20you get some status infos (MODE SENSE data). Writing mode is used for the data
21to be printed. Some ioctls allow to get the printer status and to tune printer
22modes and some internal variables.
23
24A special problem of the SLM driver is the timing and thus the buffering of
25the print data. The problem is that all the data for one page must be present
26in memory when printing starts, else --when swapping occurs-- the timing could
27not be guaranteed. There are several ways to assure this:
28
29 1) Reserve a buffer of 1196k (maximum page size) statically by
30 atari_stram_alloc(). The data are collected there until they're complete,
31 and then printing starts. Since the buffer is reserved, no further
32 considerations about memory and swapping are needed. So this is the
33 simplest method, but it needs a lot of memory for just the SLM.
34
35 An striking advantage of this method is (supposed the SLM_CONT_CNT_REPROG
36 method works, see there), that there are no timing problems with the DMA
37 anymore.
38
39 2) The other method would be to reserve the buffer dynamically each time
40 printing is required. I could think of looking at mem_map where the
41 largest unallocted ST-RAM area is, taking the area, and then extending it
42 by swapping out the neighbored pages, until the needed size is reached.
43 This requires some mm hacking, but seems possible. The only obstacle could
44 be pages that cannot be swapped out (reserved pages)...
45
46 3) Another possibility would be to leave the real data in user space and to
47 work with two dribble buffers of about 32k in the driver: While the one
48 buffer is DMAed to the SLM, the other can be filled with new data. But
49 to keep the timing, that requires that the user data remain in memory and
50 are not swapped out. Requires mm hacking, too, but maybe not so bad as
51 method 2).
52
53*/
54
55#include <linux/module.h>
56
57#include <linux/errno.h>
58#include <linux/sched.h>
59#include <linux/timer.h>
60#include <linux/fs.h>
61#include <linux/major.h>
62#include <linux/kernel.h>
63#include <linux/delay.h>
64#include <linux/interrupt.h>
65#include <linux/time.h>
66#include <linux/mm.h>
67#include <linux/slab.h>
68#include <linux/devfs_fs_kernel.h>
69#include <linux/smp_lock.h>
70
71#include <asm/pgtable.h>
72#include <asm/system.h>
73#include <asm/uaccess.h>
74#include <asm/atarihw.h>
75#include <asm/atariints.h>
76#include <asm/atari_acsi.h>
77#include <asm/atari_stdma.h>
78#include <asm/atari_stram.h>
79#include <asm/atari_SLM.h>
80
81
82#undef DEBUG
83
84/* Define this if the page data are continuous in physical memory. That
85 * requires less reprogramming of the ST-DMA */
86#define SLM_CONTINUOUS_DMA
87
88/* Use continuous reprogramming of the ST-DMA counter register. This is
89 * --strictly speaking-- not allowed, Atari recommends not to look at the
90 * counter register while a DMA is going on. But I don't know if that applies
91 * only for reading the register, or also writing to it. Writing only works
92 * fine for me... The advantage is that the timing becomes absolutely
93 * uncritical: Just update each, say 200ms, the counter reg to its maximum,
94 * and the DMA will work until the status byte interrupt occurs.
95 */
96#define SLM_CONT_CNT_REPROG
97
98#define CMDSET_TARG_LUN(cmd,targ,lun) \
99 do { \
100 cmd[0] = (cmd[0] & ~0xe0) | (targ)<<5; \
101 cmd[1] = (cmd[1] & ~0xe0) | (lun)<<5; \
102 } while(0)
103
104#define START_TIMER(to) mod_timer(&slm_timer, jiffies + (to))
105#define STOP_TIMER() del_timer(&slm_timer)
106
107
108static char slmreqsense_cmd[6] = { 0x03, 0, 0, 0, 0, 0 };
109static char slmprint_cmd[6] = { 0x0a, 0, 0, 0, 0, 0 };
110static char slminquiry_cmd[6] = { 0x12, 0, 0, 0, 0, 0x80 };
111static char slmmsense_cmd[6] = { 0x1a, 0, 0, 0, 255, 0 };
112#if 0
113static char slmmselect_cmd[6] = { 0x15, 0, 0, 0, 0, 0 };
114#endif
115
116
117#define MAX_SLM 2
118
119static struct slm {
120 unsigned target; /* target number */
121 unsigned lun; /* LUN in target controller */
122 atomic_t wr_ok; /* set to 0 if output part busy */
123 atomic_t rd_ok; /* set to 0 if status part busy */
124} slm_info[MAX_SLM];
125
126int N_SLM_Printers = 0;
127
128/* printer buffer */
129static unsigned char *SLMBuffer; /* start of buffer */
130static unsigned char *BufferP; /* current position in buffer */
131static int BufferSize; /* length of buffer for page size */
132
133typedef enum { IDLE, FILLING, PRINTING } SLMSTATE;
134static SLMSTATE SLMState;
135static int SLMBufOwner; /* SLM# currently using the buffer */
136
137/* DMA variables */
138#ifndef SLM_CONT_CNT_REPROG
139static unsigned long SLMCurAddr; /* current base addr of DMA chunk */
140static unsigned long SLMEndAddr; /* expected end addr */
141static unsigned long SLMSliceSize; /* size of one DMA chunk */
142#endif
143static int SLMError;
144
145/* wait queues */
146static DECLARE_WAIT_QUEUE_HEAD(slm_wait); /* waiting for buffer */
147static DECLARE_WAIT_QUEUE_HEAD(print_wait); /* waiting for printing finished */
148
149/* status codes */
150#define SLMSTAT_OK 0x00
151#define SLMSTAT_ORNERY 0x02
152#define SLMSTAT_TONER 0x03
153#define SLMSTAT_WARMUP 0x04
154#define SLMSTAT_PAPER 0x05
155#define SLMSTAT_DRUM 0x06
156#define SLMSTAT_INJAM 0x07
157#define SLMSTAT_THRJAM 0x08
158#define SLMSTAT_OUTJAM 0x09
159#define SLMSTAT_COVER 0x0a
160#define SLMSTAT_FUSER 0x0b
161#define SLMSTAT_IMAGER 0x0c
162#define SLMSTAT_MOTOR 0x0d
163#define SLMSTAT_VIDEO 0x0e
164#define SLMSTAT_SYSTO 0x10
165#define SLMSTAT_OPCODE 0x12
166#define SLMSTAT_DEVNUM 0x15
167#define SLMSTAT_PARAM 0x1a
168#define SLMSTAT_ACSITO 0x1b /* driver defined */
169#define SLMSTAT_NOTALL 0x1c /* driver defined */
170
171static char *SLMErrors[] = {
172 /* 0x00 */ "OK and ready",
173 /* 0x01 */ NULL,
174 /* 0x02 */ "ornery printer",
175 /* 0x03 */ "toner empty",
176 /* 0x04 */ "warming up",
177 /* 0x05 */ "paper empty",
178 /* 0x06 */ "drum empty",
179 /* 0x07 */ "input jam",
180 /* 0x08 */ "through jam",
181 /* 0x09 */ "output jam",
182 /* 0x0a */ "cover open",
183 /* 0x0b */ "fuser malfunction",
184 /* 0x0c */ "imager malfunction",
185 /* 0x0d */ "motor malfunction",
186 /* 0x0e */ "video malfunction",
187 /* 0x0f */ NULL,
188 /* 0x10 */ "printer system timeout",
189 /* 0x11 */ NULL,
190 /* 0x12 */ "invalid operation code",
191 /* 0x13 */ NULL,
192 /* 0x14 */ NULL,
193 /* 0x15 */ "invalid device number",
194 /* 0x16 */ NULL,
195 /* 0x17 */ NULL,
196 /* 0x18 */ NULL,
197 /* 0x19 */ NULL,
198 /* 0x1a */ "invalid parameter list",
199 /* 0x1b */ "ACSI timeout",
200 /* 0x1c */ "not all printed"
201};
202
203#define N_ERRORS (sizeof(SLMErrors)/sizeof(*SLMErrors))
204
205/* real (driver caused) error? */
206#define IS_REAL_ERROR(x) (x > 0x10)
207
208
209static struct {
210 char *name;
211 int w, h;
212} StdPageSize[] = {
213 { "Letter", 2400, 3180 },
214 { "Legal", 2400, 4080 },
215 { "A4", 2336, 3386 },
216 { "B5", 2016, 2914 }
217};
218
219#define N_STD_SIZES (sizeof(StdPageSize)/sizeof(*StdPageSize))
220
221#define SLM_BUFFER_SIZE (2336*3386/8) /* A4 for now */
222#define SLM_DMA_AMOUNT 255 /* #sectors to program the DMA for */
223
224#ifdef SLM_CONTINUOUS_DMA
225# define SLM_DMA_INT_OFFSET 0 /* DMA goes until seccnt 0, no offs */
226# define SLM_DMA_END_OFFSET 32 /* 32 Byte ST-DMA FIFO */
227# define SLM_SLICE_SIZE(w) (255*512)
228#else
229# define SLM_DMA_INT_OFFSET 32 /* 32 Byte ST-DMA FIFO */
230# define SLM_DMA_END_OFFSET 32 /* 32 Byte ST-DMA FIFO */
231# define SLM_SLICE_SIZE(w) ((254*512)/(w/8)*(w/8))
232#endif
233
234/* calculate the number of jiffies to wait for 'n' bytes */
235#ifdef SLM_CONT_CNT_REPROG
236#define DMA_TIME_FOR(n) 50
237#define DMA_STARTUP_TIME 0
238#else
239#define DMA_TIME_FOR(n) (n/1400-1)
240#define DMA_STARTUP_TIME 650
241#endif
242
243/***************************** Prototypes *****************************/
244
245static char *slm_errstr( int stat );
246static int slm_getstats( char *buffer, int device );
247static ssize_t slm_read( struct file* file, char *buf, size_t count, loff_t
248 *ppos );
249static void start_print( int device );
250static irqreturn_t slm_interrupt(int irc, void *data, struct pt_regs *fp);
251static void slm_test_ready( unsigned long dummy );
252static void set_dma_addr( unsigned long paddr );
253static unsigned long get_dma_addr( void );
254static ssize_t slm_write( struct file *file, const char *buf, size_t count,
255 loff_t *ppos );
256static int slm_ioctl( struct inode *inode, struct file *file, unsigned int
257 cmd, unsigned long arg );
258static int slm_open( struct inode *inode, struct file *file );
259static int slm_release( struct inode *inode, struct file *file );
260static int slm_req_sense( int device );
261static int slm_mode_sense( int device, char *buffer, int abs_flag );
262#if 0
263static int slm_mode_select( int device, char *buffer, int len, int
264 default_flag );
265#endif
266static int slm_get_pagesize( int device, int *w, int *h );
267
268/************************* End of Prototypes **************************/
269
270
271static struct timer_list slm_timer = TIMER_INITIALIZER(slm_test_ready, 0, 0);
272
273static struct file_operations slm_fops = {
274 .owner = THIS_MODULE,
275 .read = slm_read,
276 .write = slm_write,
277 .ioctl = slm_ioctl,
278 .open = slm_open,
279 .release = slm_release,
280};
281
282
283/* ---------------------------------------------------------------------- */
284/* Status Functions */
285
286
287static char *slm_errstr( int stat )
288
289{ char *p;
290 static char str[22];
291
292 stat &= 0x1f;
293 if (stat >= 0 && stat < N_ERRORS && (p = SLMErrors[stat]))
294 return( p );
295 sprintf( str, "unknown status 0x%02x", stat );
296 return( str );
297}
298
299
300static int slm_getstats( char *buffer, int device )
301
302{ int len = 0, stat, i, w, h;
303 unsigned char buf[256];
304
305 stat = slm_mode_sense( device, buf, 0 );
306 if (IS_REAL_ERROR(stat))
307 return( -EIO );
308
309#define SHORTDATA(i) ((buf[i] << 8) | buf[i+1])
310#define BOOLDATA(i,mask) ((buf[i] & mask) ? "on" : "off")
311
312 w = SHORTDATA( 3 );
313 h = SHORTDATA( 1 );
314
315 len += sprintf( buffer+len, "Status\t\t%s\n",
316 slm_errstr( stat ) );
317 len += sprintf( buffer+len, "Page Size\t%dx%d",
318 w, h );
319
320 for( i = 0; i < N_STD_SIZES; ++i ) {
321 if (w == StdPageSize[i].w && h == StdPageSize[i].h)
322 break;
323 }
324 if (i < N_STD_SIZES)
325 len += sprintf( buffer+len, " (%s)", StdPageSize[i].name );
326 buffer[len++] = '\n';
327
328 len += sprintf( buffer+len, "Top/Left Margin\t%d/%d\n",
329 SHORTDATA( 5 ), SHORTDATA( 7 ) );
330 len += sprintf( buffer+len, "Manual Feed\t%s\n",
331 BOOLDATA( 9, 0x01 ) );
332 len += sprintf( buffer+len, "Input Select\t%d\n",
333 (buf[9] >> 1) & 7 );
334 len += sprintf( buffer+len, "Auto Select\t%s\n",
335 BOOLDATA( 9, 0x10 ) );
336 len += sprintf( buffer+len, "Prefeed Paper\t%s\n",
337 BOOLDATA( 9, 0x20 ) );
338 len += sprintf( buffer+len, "Thick Pixels\t%s\n",
339 BOOLDATA( 9, 0x40 ) );
340 len += sprintf( buffer+len, "H/V Resol.\t%d/%d dpi\n",
341 SHORTDATA( 12 ), SHORTDATA( 10 ) );
342 len += sprintf( buffer+len, "System Timeout\t%d\n",
343 buf[14] );
344 len += sprintf( buffer+len, "Scan Time\t%d\n",
345 SHORTDATA( 15 ) );
346 len += sprintf( buffer+len, "Page Count\t%d\n",
347 SHORTDATA( 17 ) );
348 len += sprintf( buffer+len, "In/Out Cap.\t%d/%d\n",
349 SHORTDATA( 19 ), SHORTDATA( 21 ) );
350 len += sprintf( buffer+len, "Stagger Output\t%s\n",
351 BOOLDATA( 23, 0x01 ) );
352 len += sprintf( buffer+len, "Output Select\t%d\n",
353 (buf[23] >> 1) & 7 );
354 len += sprintf( buffer+len, "Duplex Print\t%s\n",
355 BOOLDATA( 23, 0x10 ) );
356 len += sprintf( buffer+len, "Color Sep.\t%s\n",
357 BOOLDATA( 23, 0x20 ) );
358
359 return( len );
360}
361
362
363static ssize_t slm_read( struct file *file, char *buf, size_t count,
364 loff_t *ppos )
365
366{
367 struct inode *node = file->f_dentry->d_inode;
368 unsigned long page;
369 int length;
370 int end;
371
372 if (count < 0)
373 return( -EINVAL );
374 if (!(page = __get_free_page( GFP_KERNEL )))
375 return( -ENOMEM );
376
377 length = slm_getstats( (char *)page, iminor(node) );
378 if (length < 0) {
379 count = length;
380 goto out;
381 }
382 if (file->f_pos >= length) {
383 count = 0;
384 goto out;
385 }
386 if (count + file->f_pos > length)
387 count = length - file->f_pos;
388 end = count + file->f_pos;
389 if (copy_to_user(buf, (char *)page + file->f_pos, count)) {
390 count = -EFAULT;
391 goto out;
392 }
393 file->f_pos = end;
394out: free_page( page );
395 return( count );
396}
397
398
399/* ---------------------------------------------------------------------- */
400/* Printing */
401
402
403static void start_print( int device )
404
405{ struct slm *sip = &slm_info[device];
406 unsigned char *cmd;
407 unsigned long paddr;
408 int i;
409
410 stdma_lock( slm_interrupt, NULL );
411
412 CMDSET_TARG_LUN( slmprint_cmd, sip->target, sip->lun );
413 cmd = slmprint_cmd;
414 paddr = virt_to_phys( SLMBuffer );
415 dma_cache_maintenance( paddr, virt_to_phys(BufferP)-paddr, 1 );
416 DISABLE_IRQ();
417
418 /* Low on A1 */
419 dma_wd.dma_mode_status = 0x88;
420 MFPDELAY();
421
422 /* send the command bytes except the last */
423 for( i = 0; i < 5; ++i ) {
424 DMA_LONG_WRITE( *cmd++, 0x8a );
425 udelay(20);
426 if (!acsi_wait_for_IRQ( HZ/2 )) {
427 SLMError = 1;
428 return; /* timeout */
429 }
430 }
431 /* last command byte */
432 DMA_LONG_WRITE( *cmd++, 0x82 );
433 MFPDELAY();
434 /* set DMA address */
435 set_dma_addr( paddr );
436 /* program DMA for write and select sector counter reg */
437 dma_wd.dma_mode_status = 0x192;
438 MFPDELAY();
439 /* program for 255*512 bytes and start DMA */
440 DMA_LONG_WRITE( SLM_DMA_AMOUNT, 0x112 );
441
442#ifndef SLM_CONT_CNT_REPROG
443 SLMCurAddr = paddr;
444 SLMEndAddr = paddr + SLMSliceSize + SLM_DMA_INT_OFFSET;
445#endif
446 START_TIMER( DMA_STARTUP_TIME + DMA_TIME_FOR( SLMSliceSize ));
447#if !defined(SLM_CONT_CNT_REPROG) && defined(DEBUG)
448 printk( "SLM: CurAddr=%#lx EndAddr=%#lx timer=%ld\n",
449 SLMCurAddr, SLMEndAddr, DMA_TIME_FOR( SLMSliceSize ) );
450#endif
451
452 ENABLE_IRQ();
453}
454
455
456/* Only called when an error happened or at the end of a page */
457
458static irqreturn_t slm_interrupt(int irc, void *data, struct pt_regs *fp)
459
460{ unsigned long addr;
461 int stat;
462
463 STOP_TIMER();
464 addr = get_dma_addr();
465 stat = acsi_getstatus();
466 SLMError = (stat < 0) ? SLMSTAT_ACSITO :
467 (addr < virt_to_phys(BufferP)) ? SLMSTAT_NOTALL :
468 stat;
469
470 dma_wd.dma_mode_status = 0x80;
471 MFPDELAY();
472#ifdef DEBUG
473 printk( "SLM: interrupt, addr=%#lx, error=%d\n", addr, SLMError );
474#endif
475
476 wake_up( &print_wait );
477 stdma_release();
478 ENABLE_IRQ();
479 return IRQ_HANDLED;
480}
481
482
483static void slm_test_ready( unsigned long dummy )
484
485{
486#ifdef SLM_CONT_CNT_REPROG
487 /* program for 255*512 bytes again */
488 dma_wd.fdc_acces_seccount = SLM_DMA_AMOUNT;
489 START_TIMER( DMA_TIME_FOR(0) );
490#ifdef DEBUG
491 printk( "SLM: reprogramming timer for %d jiffies, addr=%#lx\n",
492 DMA_TIME_FOR(0), get_dma_addr() );
493#endif
494
495#else /* !SLM_CONT_CNT_REPROG */
496
497 unsigned long flags, addr;
498 int d, ti;
499#ifdef DEBUG
500 struct timeval start_tm, end_tm;
501 int did_wait = 0;
502#endif
503
504 local_irq_save(flags);
505
506 addr = get_dma_addr();
507 if ((d = SLMEndAddr - addr) > 0) {
508 local_irq_restore(flags);
509
510 /* slice not yet finished, decide whether to start another timer or to
511 * busy-wait */
512 ti = DMA_TIME_FOR( d );
513 if (ti > 0) {
514#ifdef DEBUG
515 printk( "SLM: reprogramming timer for %d jiffies, rest %d bytes\n",
516 ti, d );
517#endif
518 START_TIMER( ti );
519 return;
520 }
521 /* wait for desired end address to be reached */
522#ifdef DEBUG
523 do_gettimeofday( &start_tm );
524 did_wait = 1;
525#endif
526 local_irq_disable();
527 while( get_dma_addr() < SLMEndAddr )
528 barrier();
529 }
530
531 /* slice finished, start next one */
532 SLMCurAddr += SLMSliceSize;
533
534#ifdef SLM_CONTINUOUS_DMA
535 /* program for 255*512 bytes again */
536 dma_wd.fdc_acces_seccount = SLM_DMA_AMOUNT;
537#else
538 /* set DMA address;
539 * add 2 bytes for the ones in the SLM controller FIFO! */
540 set_dma_addr( SLMCurAddr + 2 );
541 /* toggle DMA to write and select sector counter reg */
542 dma_wd.dma_mode_status = 0x92;
543 MFPDELAY();
544 dma_wd.dma_mode_status = 0x192;
545 MFPDELAY();
546 /* program for 255*512 bytes and start DMA */
547 DMA_LONG_WRITE( SLM_DMA_AMOUNT, 0x112 );
548#endif
549
550 local_irq_restore(flags);
551
552#ifdef DEBUG
553 if (did_wait) {
554 int ms;
555 do_gettimeofday( &end_tm );
556 ms = (end_tm.tv_sec*1000000+end_tm.tv_usec) -
557 (start_tm.tv_sec*1000000+start_tm.tv_usec);
558 printk( "SLM: did %ld.%ld ms busy waiting for %d bytes\n",
559 ms/1000, ms%1000, d );
560 }
561 else
562 printk( "SLM: didn't wait (!)\n" );
563#endif
564
565 if ((unsigned char *)PTOV( SLMCurAddr + SLMSliceSize ) >= BufferP) {
566 /* will be last slice, no timer necessary */
567#ifdef DEBUG
568 printk( "SLM: CurAddr=%#lx EndAddr=%#lx last slice -> no timer\n",
569 SLMCurAddr, SLMEndAddr );
570#endif
571 }
572 else {
573 /* not last slice */
574 SLMEndAddr = SLMCurAddr + SLMSliceSize + SLM_DMA_INT_OFFSET;
575 START_TIMER( DMA_TIME_FOR( SLMSliceSize ));
576#ifdef DEBUG
577 printk( "SLM: CurAddr=%#lx EndAddr=%#lx timer=%ld\n",
578 SLMCurAddr, SLMEndAddr, DMA_TIME_FOR( SLMSliceSize ) );
579#endif
580 }
581#endif /* SLM_CONT_CNT_REPROG */
582}
583
584
585static void set_dma_addr( unsigned long paddr )
586
587{ unsigned long flags;
588
589 local_irq_save(flags);
590 dma_wd.dma_lo = (unsigned char)paddr;
591 paddr >>= 8;
592 MFPDELAY();
593 dma_wd.dma_md = (unsigned char)paddr;
594 paddr >>= 8;
595 MFPDELAY();
596 if (ATARIHW_PRESENT( EXTD_DMA ))
597 st_dma_ext_dmahi = (unsigned short)paddr;
598 else
599 dma_wd.dma_hi = (unsigned char)paddr;
600 MFPDELAY();
601 local_irq_restore(flags);
602}
603
604
605static unsigned long get_dma_addr( void )
606
607{ unsigned long addr;
608
609 addr = dma_wd.dma_lo & 0xff;
610 MFPDELAY();
611 addr |= (dma_wd.dma_md & 0xff) << 8;
612 MFPDELAY();
613 addr |= (dma_wd.dma_hi & 0xff) << 16;
614 MFPDELAY();
615
616 return( addr );
617}
618
619
620static ssize_t slm_write( struct file *file, const char *buf, size_t count,
621 loff_t *ppos )
622
623{
624 struct inode *node = file->f_dentry->d_inode;
625 int device = iminor(node);
626 int n, filled, w, h;
627
628 while( SLMState == PRINTING ||
629 (SLMState == FILLING && SLMBufOwner != device) ) {
630 interruptible_sleep_on( &slm_wait );
631 if (signal_pending(current))
632 return( -ERESTARTSYS );
633 }
634 if (SLMState == IDLE) {
635 /* first data of page: get current page size */
636 if (slm_get_pagesize( device, &w, &h ))
637 return( -EIO );
638 BufferSize = w*h/8;
639 if (BufferSize > SLM_BUFFER_SIZE)
640 return( -ENOMEM );
641
642 SLMState = FILLING;
643 SLMBufOwner = device;
644 }
645
646 n = count;
647 filled = BufferP - SLMBuffer;
648 if (filled + n > BufferSize)
649 n = BufferSize - filled;
650
651 if (copy_from_user(BufferP, buf, n))
652 return -EFAULT;
653 BufferP += n;
654 filled += n;
655
656 if (filled == BufferSize) {
657 /* Check the paper size again! The user may have switched it in the
658 * time between starting the data and finishing them. Would end up in
659 * a trashy page... */
660 if (slm_get_pagesize( device, &w, &h ))
661 return( -EIO );
662 if (BufferSize != w*h/8) {
663 printk( KERN_NOTICE "slm%d: page size changed while printing\n",
664 device );
665 return( -EAGAIN );
666 }
667
668 SLMState = PRINTING;
669 /* choose a slice size that is a multiple of the line size */
670#ifndef SLM_CONT_CNT_REPROG
671 SLMSliceSize = SLM_SLICE_SIZE(w);
672#endif
673
674 start_print( device );
675 sleep_on( &print_wait );
676 if (SLMError && IS_REAL_ERROR(SLMError)) {
677 printk( KERN_ERR "slm%d: %s\n", device, slm_errstr(SLMError) );
678 n = -EIO;
679 }
680
681 SLMState = IDLE;
682 BufferP = SLMBuffer;
683 wake_up_interruptible( &slm_wait );
684 }
685
686 return( n );
687}
688
689
690/* ---------------------------------------------------------------------- */
691/* ioctl Functions */
692
693
694static int slm_ioctl( struct inode *inode, struct file *file,
695 unsigned int cmd, unsigned long arg )
696
697{ int device = iminor(inode), err;
698
699 /* I can think of setting:
700 * - manual feed
701 * - paper format
702 * - copy count
703 * - ...
704 * but haven't implemented that yet :-)
705 * BTW, has anybody better docs about the MODE SENSE/MODE SELECT data?
706 */
707 switch( cmd ) {
708
709 case SLMIORESET: /* reset buffer, i.e. empty the buffer */
710 if (!(file->f_mode & 2))
711 return( -EINVAL );
712 if (SLMState == PRINTING)
713 return( -EBUSY );
714 SLMState = IDLE;
715 BufferP = SLMBuffer;
716 wake_up_interruptible( &slm_wait );
717 return( 0 );
718
719 case SLMIOGSTAT: { /* get status */
720 int stat;
721 char *str;
722
723 stat = slm_req_sense( device );
724 if (arg) {
725 str = slm_errstr( stat );
726 if (put_user(stat,
727 (long *)&((struct SLM_status *)arg)->stat))
728 return -EFAULT;
729 if (copy_to_user( ((struct SLM_status *)arg)->str, str,
730 strlen(str) + 1))
731 return -EFAULT;
732 }
733 return( stat );
734 }
735
736 case SLMIOGPSIZE: { /* get paper size */
737 int w, h;
738
739 if ((err = slm_get_pagesize( device, &w, &h ))) return( err );
740
741 if (put_user(w, (long *)&((struct SLM_paper_size *)arg)->width))
742 return -EFAULT;
743 if (put_user(h, (long *)&((struct SLM_paper_size *)arg)->height))
744 return -EFAULT;
745 return( 0 );
746 }
747
748 case SLMIOGMFEED: /* get manual feed */
749 return( -EINVAL );
750
751 case SLMIOSPSIZE: /* set paper size */
752 return( -EINVAL );
753
754 case SLMIOSMFEED: /* set manual feed */
755 return( -EINVAL );
756
757 }
758 return( -EINVAL );
759}
760
761
762/* ---------------------------------------------------------------------- */
763/* Opening and Closing */
764
765
766static int slm_open( struct inode *inode, struct file *file )
767
768{ int device;
769 struct slm *sip;
770
771 device = iminor(inode);
772 if (device >= N_SLM_Printers)
773 return( -ENXIO );
774 sip = &slm_info[device];
775
776 if (file->f_mode & 2) {
777 /* open for writing is exclusive */
778 if ( !atomic_dec_and_test(&sip->wr_ok) ) {
779 atomic_inc(&sip->wr_ok);
780 return( -EBUSY );
781 }
782 }
783 if (file->f_mode & 1) {
784 /* open for reading is exclusive */
785 if ( !atomic_dec_and_test(&sip->rd_ok) ) {
786 atomic_inc(&sip->rd_ok);
787 return( -EBUSY );
788 }
789 }
790
791 return( 0 );
792}
793
794
795static int slm_release( struct inode *inode, struct file *file )
796
797{ int device;
798 struct slm *sip;
799
800 device = iminor(inode);
801 sip = &slm_info[device];
802
803 if (file->f_mode & 2)
804 atomic_inc( &sip->wr_ok );
805 if (file->f_mode & 1)
806 atomic_inc( &sip->rd_ok );
807
808 return( 0 );
809}
810
811
812/* ---------------------------------------------------------------------- */
813/* ACSI Primitives for the SLM */
814
815
816static int slm_req_sense( int device )
817
818{ int stat, rv;
819 struct slm *sip = &slm_info[device];
820
821 stdma_lock( NULL, NULL );
822
823 CMDSET_TARG_LUN( slmreqsense_cmd, sip->target, sip->lun );
824 if (!acsicmd_nodma( slmreqsense_cmd, 0 ) ||
825 (stat = acsi_getstatus()) < 0)
826 rv = SLMSTAT_ACSITO;
827 else
828 rv = stat & 0x1f;
829
830 ENABLE_IRQ();
831 stdma_release();
832 return( rv );
833}
834
835
836static int slm_mode_sense( int device, char *buffer, int abs_flag )
837
838{ unsigned char stat, len;
839 int rv = 0;
840 struct slm *sip = &slm_info[device];
841
842 stdma_lock( NULL, NULL );
843
844 CMDSET_TARG_LUN( slmmsense_cmd, sip->target, sip->lun );
845 slmmsense_cmd[5] = abs_flag ? 0x80 : 0;
846 if (!acsicmd_nodma( slmmsense_cmd, 0 )) {
847 rv = SLMSTAT_ACSITO;
848 goto the_end;
849 }
850
851 if (!acsi_extstatus( &stat, 1 )) {
852 acsi_end_extstatus();
853 rv = SLMSTAT_ACSITO;
854 goto the_end;
855 }
856
857 if (!acsi_extstatus( &len, 1 )) {
858 acsi_end_extstatus();
859 rv = SLMSTAT_ACSITO;
860 goto the_end;
861 }
862 buffer[0] = len;
863 if (!acsi_extstatus( buffer+1, len )) {
864 acsi_end_extstatus();
865 rv = SLMSTAT_ACSITO;
866 goto the_end;
867 }
868
869 acsi_end_extstatus();
870 rv = stat & 0x1f;
871
872 the_end:
873 ENABLE_IRQ();
874 stdma_release();
875 return( rv );
876}
877
878
879#if 0
880/* currently unused */
881static int slm_mode_select( int device, char *buffer, int len,
882 int default_flag )
883
884{ int stat, rv;
885 struct slm *sip = &slm_info[device];
886
887 stdma_lock( NULL, NULL );
888
889 CMDSET_TARG_LUN( slmmselect_cmd, sip->target, sip->lun );
890 slmmselect_cmd[5] = default_flag ? 0x80 : 0;
891 if (!acsicmd_nodma( slmmselect_cmd, 0 )) {
892 rv = SLMSTAT_ACSITO;
893 goto the_end;
894 }
895
896 if (!default_flag) {
897 unsigned char c = len;
898 if (!acsi_extcmd( &c, 1 )) {
899 rv = SLMSTAT_ACSITO;
900 goto the_end;
901 }
902 if (!acsi_extcmd( buffer, len )) {
903 rv = SLMSTAT_ACSITO;
904 goto the_end;
905 }
906 }
907
908 stat = acsi_getstatus();
909 rv = (stat < 0 ? SLMSTAT_ACSITO : stat);
910
911 the_end:
912 ENABLE_IRQ();
913 stdma_release();
914 return( rv );
915}
916#endif
917
918
919static int slm_get_pagesize( int device, int *w, int *h )
920
921{ char buf[256];
922 int stat;
923
924 stat = slm_mode_sense( device, buf, 0 );
925 ENABLE_IRQ();
926 stdma_release();
927
928 if (stat != SLMSTAT_OK)
929 return( -EIO );
930
931 *w = (buf[3] << 8) | buf[4];
932 *h = (buf[1] << 8) | buf[2];
933 return( 0 );
934}
935
936
937/* ---------------------------------------------------------------------- */
938/* Initialization */
939
940
941int attach_slm( int target, int lun )
942
943{ static int did_register;
944 int len;
945
946 if (N_SLM_Printers >= MAX_SLM) {
947 printk( KERN_WARNING "Too much SLMs\n" );
948 return( 0 );
949 }
950
951 /* do an INQUIRY */
952 udelay(100);
953 CMDSET_TARG_LUN( slminquiry_cmd, target, lun );
954 if (!acsicmd_nodma( slminquiry_cmd, 0 )) {
955 inq_timeout:
956 printk( KERN_ERR "SLM inquiry command timed out.\n" );
957 inq_fail:
958 acsi_end_extstatus();
959 return( 0 );
960 }
961 /* read status and header of return data */
962 if (!acsi_extstatus( SLMBuffer, 6 ))
963 goto inq_timeout;
964
965 if (SLMBuffer[1] != 2) { /* device type == printer? */
966 printk( KERN_ERR "SLM inquiry returned device type != printer\n" );
967 goto inq_fail;
968 }
969 len = SLMBuffer[5];
970
971 /* read id string */
972 if (!acsi_extstatus( SLMBuffer, len ))
973 goto inq_timeout;
974 acsi_end_extstatus();
975 SLMBuffer[len] = 0;
976
977 if (!did_register) {
978 did_register = 1;
979 }
980
981 slm_info[N_SLM_Printers].target = target;
982 slm_info[N_SLM_Printers].lun = lun;
983 atomic_set(&slm_info[N_SLM_Printers].wr_ok, 1 );
984 atomic_set(&slm_info[N_SLM_Printers].rd_ok, 1 );
985
986 printk( KERN_INFO " Printer: %s\n", SLMBuffer );
987 printk( KERN_INFO "Detected slm%d at id %d lun %d\n",
988 N_SLM_Printers, target, lun );
989 N_SLM_Printers++;
990 return( 1 );
991}
992
993int slm_init( void )
994
995{
996 int i;
997 if (register_chrdev( ACSI_MAJOR, "slm", &slm_fops )) {
998 printk( KERN_ERR "Unable to get major %d for ACSI SLM\n", ACSI_MAJOR );
999 return -EBUSY;
1000 }
1001
1002 if (!(SLMBuffer = atari_stram_alloc( SLM_BUFFER_SIZE, "SLM" ))) {
1003 printk( KERN_ERR "Unable to get SLM ST-Ram buffer.\n" );
1004 unregister_chrdev( ACSI_MAJOR, "slm" );
1005 return -ENOMEM;
1006 }
1007 BufferP = SLMBuffer;
1008 SLMState = IDLE;
1009
1010 devfs_mk_dir("slm");
1011 for (i = 0; i < MAX_SLM; i++) {
1012 devfs_mk_cdev(MKDEV(ACSI_MAJOR, i),
1013 S_IFCHR|S_IRUSR|S_IWUSR, "slm/%d", i);
1014 }
1015 return 0;
1016}
1017
1018#ifdef MODULE
1019
1020/* from acsi.c */
1021void acsi_attach_SLMs( int (*attach_func)( int, int ) );
1022
1023int init_module(void)
1024{
1025 int err;
1026
1027 if ((err = slm_init()))
1028 return( err );
1029 /* This calls attach_slm() for every target/lun where acsi.c detected a
1030 * printer */
1031 acsi_attach_SLMs( attach_slm );
1032 return( 0 );
1033}
1034
1035void cleanup_module(void)
1036{
1037 int i;
1038 for (i = 0; i < MAX_SLM; i++)
1039 devfs_remove("slm/%d", i);
1040 devfs_remove("slm");
1041 if (unregister_chrdev( ACSI_MAJOR, "slm" ) != 0)
1042 printk( KERN_ERR "acsi_slm: cleanup_module failed\n");
1043 atari_stram_free( SLMBuffer );
1044}
1045#endif
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
new file mode 100644
index 000000000000..1468e8cf712d
--- /dev/null
+++ b/drivers/block/amiflop.c
@@ -0,0 +1,1850 @@
1/*
2 * linux/amiga/amiflop.c
3 *
4 * Copyright (C) 1993 Greg Harp
5 * Portions of this driver are based on code contributed by Brad Pepers
6 *
7 * revised 28.5.95 by Joerg Dorchain
8 * - now no bugs(?) any more for both HD & DD
9 * - added support for 40 Track 5.25" drives, 80-track hopefully behaves
10 * like 3.5" dd (no way to test - are there any 5.25" drives out there
11 * that work on an A4000?)
12 * - wrote formatting routine (maybe dirty, but works)
13 *
14 * june/july 1995 added ms-dos support by Joerg Dorchain
15 * (portions based on messydos.device and various contributors)
16 * - currently only 9 and 18 sector disks
17 *
18 * - fixed a bug with the internal trackbuffer when using multiple
19 * disks the same time
20 * - made formatting a bit safer
21 * - added command line and machine based default for "silent" df0
22 *
23 * december 1995 adapted for 1.2.13pl4 by Joerg Dorchain
24 * - works but I think it's inefficient. (look in redo_fd_request)
25 * But the changes were very efficient. (only three and a half lines)
26 *
27 * january 1996 added special ioctl for tracking down read/write problems
28 * - usage ioctl(d, RAW_TRACK, ptr); the raw track buffer (MFM-encoded data
29 * is copied to area. (area should be large enough since no checking is
30 * done - 30K is currently sufficient). return the actual size of the
31 * trackbuffer
32 * - replaced udelays() by a timer (CIAA timer B) for the waits
33 * needed for the disk mechanic.
34 *
35 * february 1996 fixed error recovery and multiple disk access
36 * - both got broken the first time I tampered with the driver :-(
37 * - still not safe, but better than before
38 *
39 * revised Marts 3rd, 1996 by Jes Sorensen for use in the 1.3.28 kernel.
40 * - Minor changes to accept the kdev_t.
41 * - Replaced some more udelays with ms_delays. Udelay is just a loop,
42 * and so the delay will be different depending on the given
43 * processor :-(
44 * - The driver could use a major cleanup because of the new
45 * major/minor handling that came with kdev_t. It seems to work for
46 * the time being, but I can't guarantee that it will stay like
47 * that when we start using 16 (24?) bit minors.
48 *
49 * restructured jan 1997 by Joerg Dorchain
50 * - Fixed Bug accessing multiple disks
51 * - some code cleanup
52 * - added trackbuffer for each drive to speed things up
53 * - fixed some race conditions (who finds the next may send it to me ;-)
54 */
55
56#include <linux/module.h>
57
58#include <linux/fd.h>
59#include <linux/hdreg.h>
60#include <linux/delay.h>
61#include <linux/init.h>
62#include <linux/amifdreg.h>
63#include <linux/amifd.h>
64#include <linux/buffer_head.h>
65#include <linux/blkdev.h>
66#include <linux/elevator.h>
67
68#include <asm/setup.h>
69#include <asm/uaccess.h>
70#include <asm/amigahw.h>
71#include <asm/amigaints.h>
72#include <asm/irq.h>
73
74#undef DEBUG /* print _LOTS_ of infos */
75
76#define RAW_IOCTL
77#ifdef RAW_IOCTL
78#define IOCTL_RAW_TRACK 0x5254524B /* 'RTRK' */
79#endif
80
81/*
82 * Defines
83 */
84
85/*
86 * Error codes
87 */
88#define FD_OK 0 /* operation succeeded */
89#define FD_ERROR -1 /* general error (seek, read, write, etc) */
90#define FD_NOUNIT 1 /* unit does not exist */
91#define FD_UNITBUSY 2 /* unit already active */
92#define FD_NOTACTIVE 3 /* unit is not active */
93#define FD_NOTREADY 4 /* unit is not ready (motor not on/no disk) */
94
95#define MFM_NOSYNC 1
96#define MFM_HEADER 2
97#define MFM_DATA 3
98#define MFM_TRACK 4
99
100/*
101 * Floppy ID values
102 */
103#define FD_NODRIVE 0x00000000 /* response when no unit is present */
104#define FD_DD_3 0xffffffff /* double-density 3.5" (880K) drive */
105#define FD_HD_3 0x55555555 /* high-density 3.5" (1760K) drive */
106#define FD_DD_5 0xaaaaaaaa /* double-density 5.25" (440K) drive */
107
108static unsigned long int fd_def_df0 = FD_DD_3; /* default for df0 if it doesn't identify */
109
110module_param(fd_def_df0, ulong, 0);
111MODULE_LICENSE("GPL");
112
113static struct request_queue *floppy_queue;
114#define QUEUE (floppy_queue)
115#define CURRENT elv_next_request(floppy_queue)
116
117/*
118 * Macros
119 */
120#define MOTOR_ON (ciab.prb &= ~DSKMOTOR)
121#define MOTOR_OFF (ciab.prb |= DSKMOTOR)
122#define SELECT(mask) (ciab.prb &= ~mask)
123#define DESELECT(mask) (ciab.prb |= mask)
124#define SELMASK(drive) (1 << (3 + (drive & 3)))
125
126static struct fd_drive_type drive_types[] = {
127/* code name tr he rdsz wrsz sm pc1 pc2 sd st st*/
128/* warning: times are now in milliseconds (ms) */
129{ FD_DD_3, "DD 3.5", 80, 2, 14716, 13630, 1, 80,161, 3, 18, 1},
130{ FD_HD_3, "HD 3.5", 80, 2, 28344, 27258, 2, 80,161, 3, 18, 1},
131{ FD_DD_5, "DD 5.25", 40, 2, 14716, 13630, 1, 40, 81, 6, 30, 2},
132{ FD_NODRIVE, "No Drive", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
133};
134static int num_dr_types = sizeof(drive_types) / sizeof(drive_types[0]);
135
136static int amiga_read(int), dos_read(int);
137static void amiga_write(int), dos_write(int);
138static struct fd_data_type data_types[] = {
139 { "Amiga", 11 , amiga_read, amiga_write},
140 { "MS-Dos", 9, dos_read, dos_write}
141};
142
143/* current info on each unit */
144static struct amiga_floppy_struct unit[FD_MAX_UNITS];
145
146static struct timer_list flush_track_timer[FD_MAX_UNITS];
147static struct timer_list post_write_timer;
148static struct timer_list motor_on_timer;
149static struct timer_list motor_off_timer[FD_MAX_UNITS];
150static int on_attempts;
151
152/* Synchronization of FDC access */
153/* request loop (trackbuffer) */
154static volatile int fdc_busy = -1;
155static volatile int fdc_nested;
156static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
157
158static DECLARE_WAIT_QUEUE_HEAD(motor_wait);
159
160static volatile int selected = -1; /* currently selected drive */
161
162static int writepending;
163static int writefromint;
164static char *raw_buf;
165
166static DEFINE_SPINLOCK(amiflop_lock);
167
168#define RAW_BUF_SIZE 30000 /* size of raw disk data */
169
170/*
171 * These are global variables, as that's the easiest way to give
172 * information to interrupts. They are the data used for the current
173 * request.
174 */
175static volatile char block_flag;
176static DECLARE_WAIT_QUEUE_HEAD(wait_fd_block);
177
178/* MS-Dos MFM Coding tables (should go quick and easy) */
179static unsigned char mfmencode[16]={
180 0x2a, 0x29, 0x24, 0x25, 0x12, 0x11, 0x14, 0x15,
181 0x4a, 0x49, 0x44, 0x45, 0x52, 0x51, 0x54, 0x55
182};
183static unsigned char mfmdecode[128];
184
185/* floppy internal millisecond timer stuff */
186static volatile int ms_busy = -1;
187static DECLARE_WAIT_QUEUE_HEAD(ms_wait);
188#define MS_TICKS ((amiga_eclock+50)/1000)
189
190/*
191 * Note that MAX_ERRORS=X doesn't imply that we retry every bad read
192 * max X times - some types of errors increase the errorcount by 2 or
193 * even 3, so we might actually retry only X/2 times before giving up.
194 */
195#define MAX_ERRORS 12
196
197/* Prevent "aliased" accesses. */
198static int fd_ref[4] = { 0,0,0,0 };
199static int fd_device[4] = { 0, 0, 0, 0 };
200
201/*
202 * Here come the actual hardware access and helper functions.
203 * They are not reentrant and single threaded because all drives
204 * share the same hardware and the same trackbuffer.
205 */
206
207/* Milliseconds timer */
208
209static irqreturn_t ms_isr(int irq, void *dummy, struct pt_regs *fp)
210{
211 ms_busy = -1;
212 wake_up(&ms_wait);
213 return IRQ_HANDLED;
214}
215
216/* all waits are queued up
217 A more generic routine would do a schedule a la timer.device */
218static void ms_delay(int ms)
219{
220 unsigned long flags;
221 int ticks;
222 if (ms > 0) {
223 local_irq_save(flags);
224 while (ms_busy == 0)
225 sleep_on(&ms_wait);
226 ms_busy = 0;
227 local_irq_restore(flags);
228 ticks = MS_TICKS*ms-1;
229 ciaa.tblo=ticks%256;
230 ciaa.tbhi=ticks/256;
231 ciaa.crb=0x19; /*count eclock, force load, one-shoot, start */
232 sleep_on(&ms_wait);
233 }
234}
235
236/* Hardware semaphore */
237
238/* returns true when we would get the semaphore */
239static inline int try_fdc(int drive)
240{
241 drive &= 3;
242 return ((fdc_busy < 0) || (fdc_busy == drive));
243}
244
245static void get_fdc(int drive)
246{
247 unsigned long flags;
248
249 drive &= 3;
250#ifdef DEBUG
251 printk("get_fdc: drive %d fdc_busy %d fdc_nested %d\n",drive,fdc_busy,fdc_nested);
252#endif
253 local_irq_save(flags);
254 while (!try_fdc(drive))
255 sleep_on(&fdc_wait);
256 fdc_busy = drive;
257 fdc_nested++;
258 local_irq_restore(flags);
259}
260
261static inline void rel_fdc(void)
262{
263#ifdef DEBUG
264 if (fdc_nested == 0)
265 printk("fd: unmatched rel_fdc\n");
266 printk("rel_fdc: fdc_busy %d fdc_nested %d\n",fdc_busy,fdc_nested);
267#endif
268 fdc_nested--;
269 if (fdc_nested == 0) {
270 fdc_busy = -1;
271 wake_up(&fdc_wait);
272 }
273}
274
275static void fd_select (int drive)
276{
277 unsigned char prb = ~0;
278
279 drive&=3;
280#ifdef DEBUG
281 printk("selecting %d\n",drive);
282#endif
283 if (drive == selected)
284 return;
285 get_fdc(drive);
286 selected = drive;
287
288 if (unit[drive].track % 2 != 0)
289 prb &= ~DSKSIDE;
290 if (unit[drive].motor == 1)
291 prb &= ~DSKMOTOR;
292 ciab.prb |= (SELMASK(0)|SELMASK(1)|SELMASK(2)|SELMASK(3));
293 ciab.prb = prb;
294 prb &= ~SELMASK(drive);
295 ciab.prb = prb;
296 rel_fdc();
297}
298
299static void fd_deselect (int drive)
300{
301 unsigned char prb;
302 unsigned long flags;
303
304 drive&=3;
305#ifdef DEBUG
306 printk("deselecting %d\n",drive);
307#endif
308 if (drive != selected) {
309 printk(KERN_WARNING "Deselecting drive %d while %d was selected!\n",drive,selected);
310 return;
311 }
312
313 get_fdc(drive);
314 local_irq_save(flags);
315
316 selected = -1;
317
318 prb = ciab.prb;
319 prb |= (SELMASK(0)|SELMASK(1)|SELMASK(2)|SELMASK(3));
320 ciab.prb = prb;
321
322 local_irq_restore (flags);
323 rel_fdc();
324
325}
326
327static void motor_on_callback(unsigned long nr)
328{
329 if (!(ciaa.pra & DSKRDY) || --on_attempts == 0) {
330 wake_up (&motor_wait);
331 } else {
332 motor_on_timer.expires = jiffies + HZ/10;
333 add_timer(&motor_on_timer);
334 }
335}
336
337static int fd_motor_on(int nr)
338{
339 nr &= 3;
340
341 del_timer(motor_off_timer + nr);
342
343 if (!unit[nr].motor) {
344 unit[nr].motor = 1;
345 fd_select(nr);
346
347 motor_on_timer.data = nr;
348 mod_timer(&motor_on_timer, jiffies + HZ/2);
349
350 on_attempts = 10;
351 sleep_on (&motor_wait);
352 fd_deselect(nr);
353 }
354
355 if (on_attempts == 0) {
356 on_attempts = -1;
357#if 0
358 printk (KERN_ERR "motor_on failed, turning motor off\n");
359 fd_motor_off (nr);
360 return 0;
361#else
362 printk (KERN_WARNING "DSKRDY not set after 1.5 seconds - assuming drive is spinning notwithstanding\n");
363#endif
364 }
365
366 return 1;
367}
368
369static void fd_motor_off(unsigned long drive)
370{
371 long calledfromint;
372#ifdef MODULE
373 long decusecount;
374
375 decusecount = drive & 0x40000000;
376#endif
377 calledfromint = drive & 0x80000000;
378 drive&=3;
379 if (calledfromint && !try_fdc(drive)) {
380 /* We would be blocked in an interrupt, so try again later */
381 motor_off_timer[drive].expires = jiffies + 1;
382 add_timer(motor_off_timer + drive);
383 return;
384 }
385 unit[drive].motor = 0;
386 fd_select(drive);
387 udelay (1);
388 fd_deselect(drive);
389}
390
391static void floppy_off (unsigned int nr)
392{
393 int drive;
394
395 drive = nr & 3;
396 /* called this way it is always from interrupt */
397 motor_off_timer[drive].data = nr | 0x80000000;
398 mod_timer(motor_off_timer + drive, jiffies + 3*HZ);
399}
400
401static int fd_calibrate(int drive)
402{
403 unsigned char prb;
404 int n;
405
406 drive &= 3;
407 get_fdc(drive);
408 if (!fd_motor_on (drive))
409 return 0;
410 fd_select (drive);
411 prb = ciab.prb;
412 prb |= DSKSIDE;
413 prb &= ~DSKDIREC;
414 ciab.prb = prb;
415 for (n = unit[drive].type->tracks/2; n != 0; --n) {
416 if (ciaa.pra & DSKTRACK0)
417 break;
418 prb &= ~DSKSTEP;
419 ciab.prb = prb;
420 prb |= DSKSTEP;
421 udelay (2);
422 ciab.prb = prb;
423 ms_delay(unit[drive].type->step_delay);
424 }
425 ms_delay (unit[drive].type->settle_time);
426 prb |= DSKDIREC;
427 n = unit[drive].type->tracks + 20;
428 for (;;) {
429 prb &= ~DSKSTEP;
430 ciab.prb = prb;
431 prb |= DSKSTEP;
432 udelay (2);
433 ciab.prb = prb;
434 ms_delay(unit[drive].type->step_delay + 1);
435 if ((ciaa.pra & DSKTRACK0) == 0)
436 break;
437 if (--n == 0) {
438 printk (KERN_ERR "fd%d: calibrate failed, turning motor off\n", drive);
439 fd_motor_off (drive);
440 unit[drive].track = -1;
441 rel_fdc();
442 return 0;
443 }
444 }
445 unit[drive].track = 0;
446 ms_delay(unit[drive].type->settle_time);
447
448 rel_fdc();
449 fd_deselect(drive);
450 return 1;
451}
452
453static int fd_seek(int drive, int track)
454{
455 unsigned char prb;
456 int cnt;
457
458#ifdef DEBUG
459 printk("seeking drive %d to track %d\n",drive,track);
460#endif
461 drive &= 3;
462 get_fdc(drive);
463 if (unit[drive].track == track) {
464 rel_fdc();
465 return 1;
466 }
467 if (!fd_motor_on(drive)) {
468 rel_fdc();
469 return 0;
470 }
471 if (unit[drive].track < 0 && !fd_calibrate(drive)) {
472 rel_fdc();
473 return 0;
474 }
475
476 fd_select (drive);
477 cnt = unit[drive].track/2 - track/2;
478 prb = ciab.prb;
479 prb |= DSKSIDE | DSKDIREC;
480 if (track % 2 != 0)
481 prb &= ~DSKSIDE;
482 if (cnt < 0) {
483 cnt = - cnt;
484 prb &= ~DSKDIREC;
485 }
486 ciab.prb = prb;
487 if (track % 2 != unit[drive].track % 2)
488 ms_delay (unit[drive].type->side_time);
489 unit[drive].track = track;
490 if (cnt == 0) {
491 rel_fdc();
492 fd_deselect(drive);
493 return 1;
494 }
495 do {
496 prb &= ~DSKSTEP;
497 ciab.prb = prb;
498 prb |= DSKSTEP;
499 udelay (1);
500 ciab.prb = prb;
501 ms_delay (unit[drive].type->step_delay);
502 } while (--cnt != 0);
503 ms_delay (unit[drive].type->settle_time);
504
505 rel_fdc();
506 fd_deselect(drive);
507 return 1;
508}
509
510static unsigned long fd_get_drive_id(int drive)
511{
512 int i;
513 ulong id = 0;
514
515 drive&=3;
516 get_fdc(drive);
517 /* set up for ID */
518 MOTOR_ON;
519 udelay(2);
520 SELECT(SELMASK(drive));
521 udelay(2);
522 DESELECT(SELMASK(drive));
523 udelay(2);
524 MOTOR_OFF;
525 udelay(2);
526 SELECT(SELMASK(drive));
527 udelay(2);
528 DESELECT(SELMASK(drive));
529 udelay(2);
530
531 /* loop and read disk ID */
532 for (i=0; i<32; i++) {
533 SELECT(SELMASK(drive));
534 udelay(2);
535
536 /* read and store value of DSKRDY */
537 id <<= 1;
538 id |= (ciaa.pra & DSKRDY) ? 0 : 1; /* cia regs are low-active! */
539
540 DESELECT(SELMASK(drive));
541 }
542
543 rel_fdc();
544
545 /*
546 * RB: At least A500/A2000's df0: don't identify themselves.
547 * As every (real) Amiga has at least a 3.5" DD drive as df0:
548 * we default to that if df0: doesn't identify as a certain
549 * type.
550 */
551 if(drive == 0 && id == FD_NODRIVE)
552 {
553 id = fd_def_df0;
554 printk(KERN_NOTICE "fd: drive 0 didn't identify, setting default %08lx\n", (ulong)fd_def_df0);
555 }
556 /* return the ID value */
557 return (id);
558}
559
560static irqreturn_t fd_block_done(int irq, void *dummy, struct pt_regs *fp)
561{
562 if (block_flag)
563 custom.dsklen = 0x4000;
564
565 if (block_flag == 2) { /* writing */
566 writepending = 2;
567 post_write_timer.expires = jiffies + 1; /* at least 2 ms */
568 post_write_timer.data = selected;
569 add_timer(&post_write_timer);
570 }
571 else { /* reading */
572 block_flag = 0;
573 wake_up (&wait_fd_block);
574 }
575 return IRQ_HANDLED;
576}
577
578static void raw_read(int drive)
579{
580 drive&=3;
581 get_fdc(drive);
582 while (block_flag)
583 sleep_on(&wait_fd_block);
584 fd_select(drive);
585 /* setup adkcon bits correctly */
586 custom.adkcon = ADK_MSBSYNC;
587 custom.adkcon = ADK_SETCLR|ADK_WORDSYNC|ADK_FAST;
588
589 custom.dsksync = MFM_SYNC;
590
591 custom.dsklen = 0;
592 custom.dskptr = (u_char *)ZTWO_PADDR((u_char *)raw_buf);
593 custom.dsklen = unit[drive].type->read_size/sizeof(short) | DSKLEN_DMAEN;
594 custom.dsklen = unit[drive].type->read_size/sizeof(short) | DSKLEN_DMAEN;
595
596 block_flag = 1;
597
598 while (block_flag)
599 sleep_on (&wait_fd_block);
600
601 custom.dsklen = 0;
602 fd_deselect(drive);
603 rel_fdc();
604}
605
606static int raw_write(int drive)
607{
608 ushort adk;
609
610 drive&=3;
611 get_fdc(drive); /* corresponds to rel_fdc() in post_write() */
612 if ((ciaa.pra & DSKPROT) == 0) {
613 rel_fdc();
614 return 0;
615 }
616 while (block_flag)
617 sleep_on(&wait_fd_block);
618 fd_select(drive);
619 /* clear adkcon bits */
620 custom.adkcon = ADK_PRECOMP1|ADK_PRECOMP0|ADK_WORDSYNC|ADK_MSBSYNC;
621 /* set appropriate adkcon bits */
622 adk = ADK_SETCLR|ADK_FAST;
623 if ((ulong)unit[drive].track >= unit[drive].type->precomp2)
624 adk |= ADK_PRECOMP1;
625 else if ((ulong)unit[drive].track >= unit[drive].type->precomp1)
626 adk |= ADK_PRECOMP0;
627 custom.adkcon = adk;
628
629 custom.dsklen = DSKLEN_WRITE;
630 custom.dskptr = (u_char *)ZTWO_PADDR((u_char *)raw_buf);
631 custom.dsklen = unit[drive].type->write_size/sizeof(short) | DSKLEN_DMAEN|DSKLEN_WRITE;
632 custom.dsklen = unit[drive].type->write_size/sizeof(short) | DSKLEN_DMAEN|DSKLEN_WRITE;
633
634 block_flag = 2;
635 return 1;
636}
637
638/*
639 * to be called at least 2ms after the write has finished but before any
640 * other access to the hardware.
641 */
642static void post_write (unsigned long drive)
643{
644#ifdef DEBUG
645 printk("post_write for drive %ld\n",drive);
646#endif
647 drive &= 3;
648 custom.dsklen = 0;
649 block_flag = 0;
650 writepending = 0;
651 writefromint = 0;
652 unit[drive].dirty = 0;
653 wake_up(&wait_fd_block);
654 fd_deselect(drive);
655 rel_fdc(); /* corresponds to get_fdc() in raw_write */
656}
657
658
659/*
660 * The following functions are to convert the block contents into raw data
661 * written to disk and vice versa.
662 * (Add other formats here ;-))
663 */
664
665static unsigned long scan_sync(unsigned long raw, unsigned long end)
666{
667 ushort *ptr = (ushort *)raw, *endp = (ushort *)end;
668
669 while (ptr < endp && *ptr++ != 0x4489)
670 ;
671 if (ptr < endp) {
672 while (*ptr == 0x4489 && ptr < endp)
673 ptr++;
674 return (ulong)ptr;
675 }
676 return 0;
677}
678
679static inline unsigned long checksum(unsigned long *addr, int len)
680{
681 unsigned long csum = 0;
682
683 len /= sizeof(*addr);
684 while (len-- > 0)
685 csum ^= *addr++;
686 csum = ((csum>>1) & 0x55555555) ^ (csum & 0x55555555);
687
688 return csum;
689}
690
691static unsigned long decode (unsigned long *data, unsigned long *raw,
692 int len)
693{
694 ulong *odd, *even;
695
696 /* convert length from bytes to longwords */
697 len >>= 2;
698 odd = raw;
699 even = odd + len;
700
701 /* prepare return pointer */
702 raw += len * 2;
703
704 do {
705 *data++ = ((*odd++ & 0x55555555) << 1) | (*even++ & 0x55555555);
706 } while (--len != 0);
707
708 return (ulong)raw;
709}
710
711struct header {
712 unsigned char magic;
713 unsigned char track;
714 unsigned char sect;
715 unsigned char ord;
716 unsigned char labels[16];
717 unsigned long hdrchk;
718 unsigned long datachk;
719};
720
721static int amiga_read(int drive)
722{
723 unsigned long raw;
724 unsigned long end;
725 int scnt;
726 unsigned long csum;
727 struct header hdr;
728
729 drive&=3;
730 raw = (long) raw_buf;
731 end = raw + unit[drive].type->read_size;
732
733 for (scnt = 0;scnt < unit[drive].dtype->sects * unit[drive].type->sect_mult; scnt++) {
734 if (!(raw = scan_sync(raw, end))) {
735 printk (KERN_INFO "can't find sync for sector %d\n", scnt);
736 return MFM_NOSYNC;
737 }
738
739 raw = decode ((ulong *)&hdr.magic, (ulong *)raw, 4);
740 raw = decode ((ulong *)&hdr.labels, (ulong *)raw, 16);
741 raw = decode ((ulong *)&hdr.hdrchk, (ulong *)raw, 4);
742 raw = decode ((ulong *)&hdr.datachk, (ulong *)raw, 4);
743 csum = checksum((ulong *)&hdr,
744 (char *)&hdr.hdrchk-(char *)&hdr);
745
746#ifdef DEBUG
747 printk ("(%x,%d,%d,%d) (%lx,%lx,%lx,%lx) %lx %lx\n",
748 hdr.magic, hdr.track, hdr.sect, hdr.ord,
749 *(ulong *)&hdr.labels[0], *(ulong *)&hdr.labels[4],
750 *(ulong *)&hdr.labels[8], *(ulong *)&hdr.labels[12],
751 hdr.hdrchk, hdr.datachk);
752#endif
753
754 if (hdr.hdrchk != csum) {
755 printk(KERN_INFO "MFM_HEADER: %08lx,%08lx\n", hdr.hdrchk, csum);
756 return MFM_HEADER;
757 }
758
759 /* verify track */
760 if (hdr.track != unit[drive].track) {
761 printk(KERN_INFO "MFM_TRACK: %d, %d\n", hdr.track, unit[drive].track);
762 return MFM_TRACK;
763 }
764
765 raw = decode ((ulong *)(unit[drive].trackbuf + hdr.sect*512),
766 (ulong *)raw, 512);
767 csum = checksum((ulong *)(unit[drive].trackbuf + hdr.sect*512), 512);
768
769 if (hdr.datachk != csum) {
770 printk(KERN_INFO "MFM_DATA: (%x:%d:%d:%d) sc=%d %lx, %lx\n",
771 hdr.magic, hdr.track, hdr.sect, hdr.ord, scnt,
772 hdr.datachk, csum);
773 printk (KERN_INFO "data=(%lx,%lx,%lx,%lx)\n",
774 ((ulong *)(unit[drive].trackbuf+hdr.sect*512))[0],
775 ((ulong *)(unit[drive].trackbuf+hdr.sect*512))[1],
776 ((ulong *)(unit[drive].trackbuf+hdr.sect*512))[2],
777 ((ulong *)(unit[drive].trackbuf+hdr.sect*512))[3]);
778 return MFM_DATA;
779 }
780 }
781
782 return 0;
783}
784
785static void encode(unsigned long data, unsigned long *dest)
786{
787 unsigned long data2;
788
789 data &= 0x55555555;
790 data2 = data ^ 0x55555555;
791 data |= ((data2 >> 1) | 0x80000000) & (data2 << 1);
792
793 if (*(dest - 1) & 0x00000001)
794 data &= 0x7FFFFFFF;
795
796 *dest = data;
797}
798
799static void encode_block(unsigned long *dest, unsigned long *src, int len)
800{
801 int cnt, to_cnt = 0;
802 unsigned long data;
803
804 /* odd bits */
805 for (cnt = 0; cnt < len / 4; cnt++) {
806 data = src[cnt] >> 1;
807 encode(data, dest + to_cnt++);
808 }
809
810 /* even bits */
811 for (cnt = 0; cnt < len / 4; cnt++) {
812 data = src[cnt];
813 encode(data, dest + to_cnt++);
814 }
815}
816
817static unsigned long *putsec(int disk, unsigned long *raw, int cnt)
818{
819 struct header hdr;
820 int i;
821
822 disk&=3;
823 *raw = (raw[-1]&1) ? 0x2AAAAAAA : 0xAAAAAAAA;
824 raw++;
825 *raw++ = 0x44894489;
826
827 hdr.magic = 0xFF;
828 hdr.track = unit[disk].track;
829 hdr.sect = cnt;
830 hdr.ord = unit[disk].dtype->sects * unit[disk].type->sect_mult - cnt;
831 for (i = 0; i < 16; i++)
832 hdr.labels[i] = 0;
833 hdr.hdrchk = checksum((ulong *)&hdr,
834 (char *)&hdr.hdrchk-(char *)&hdr);
835 hdr.datachk = checksum((ulong *)(unit[disk].trackbuf+cnt*512), 512);
836
837 encode_block(raw, (ulong *)&hdr.magic, 4);
838 raw += 2;
839 encode_block(raw, (ulong *)&hdr.labels, 16);
840 raw += 8;
841 encode_block(raw, (ulong *)&hdr.hdrchk, 4);
842 raw += 2;
843 encode_block(raw, (ulong *)&hdr.datachk, 4);
844 raw += 2;
845 encode_block(raw, (ulong *)(unit[disk].trackbuf+cnt*512), 512);
846 raw += 256;
847
848 return raw;
849}
850
851static void amiga_write(int disk)
852{
853 unsigned int cnt;
854 unsigned long *ptr = (unsigned long *)raw_buf;
855
856 disk&=3;
857 /* gap space */
858 for (cnt = 0; cnt < 415 * unit[disk].type->sect_mult; cnt++)
859 *ptr++ = 0xaaaaaaaa;
860
861 /* sectors */
862 for (cnt = 0; cnt < unit[disk].dtype->sects * unit[disk].type->sect_mult; cnt++)
863 ptr = putsec (disk, ptr, cnt);
864 *(ushort *)ptr = (ptr[-1]&1) ? 0x2AA8 : 0xAAA8;
865}
866
867
868struct dos_header {
869 unsigned char track, /* 0-80 */
870 side, /* 0-1 */
871 sec, /* 0-...*/
872 len_desc;/* 2 */
873 unsigned short crc; /* on 68000 we got an alignment problem,
874 but this compiler solves it by adding silently
875 adding a pad byte so data won't fit
876 and this took about 3h to discover.... */
877 unsigned char gap1[22]; /* for longword-alignedness (0x4e) */
878};
879
880/* crc routines are borrowed from the messydos-handler */
881
882/* excerpt from the messydos-device
883; The CRC is computed not only over the actual data, but including
884; the SYNC mark (3 * $a1) and the 'ID/DATA - Address Mark' ($fe/$fb).
885; As we don't read or encode these fields into our buffers, we have to
886; preload the registers containing the CRC with the values they would have
887; after stepping over these fields.
888;
889; How CRCs "really" work:
890;
891; First, you should regard a bitstring as a series of coefficients of
892; polynomials. We calculate with these polynomials in modulo-2
893; arithmetic, in which both add and subtract are done the same as
894; exclusive-or. Now, we modify our data (a very long polynomial) in
895; such a way that it becomes divisible by the CCITT-standard 16-bit
896; 16 12 5
897; polynomial: x + x + x + 1, represented by $11021. The easiest
898; way to do this would be to multiply (using proper arithmetic) our
899; datablock with $11021. So we have:
900; data * $11021 =
901; data * ($10000 + $1021) =
902; data * $10000 + data * $1021
903; The left part of this is simple: Just add two 0 bytes. But then
904; the right part (data $1021) remains difficult and even could have
905; a carry into the left part. The solution is to use a modified
906; multiplication, which has a result that is not correct, but with
907; a difference of any multiple of $11021. We then only need to keep
908; the 16 least significant bits of the result.
909;
910; The following algorithm does this for us:
911;
912; unsigned char *data, c, crclo, crchi;
913; while (not done) {
914; c = *data++ + crchi;
915; crchi = (@ c) >> 8 + crclo;
916; crclo = @ c;
917; }
918;
919; Remember, + is done with EOR, the @ operator is in two tables (high
920; and low byte separately), which is calculated as
921;
922; $1021 * (c & $F0)
923; xor $1021 * (c & $0F)
924; xor $1021 * (c >> 4) (* is regular multiplication)
925;
926;
927; Anyway, the end result is the same as the remainder of the division of
928; the data by $11021. I am afraid I need to study theory a bit more...
929
930
931my only works was to code this from manx to C....
932
933*/
934
935static ushort dos_crc(void * data_a3, int data_d0, int data_d1, int data_d3)
936{
937 static unsigned char CRCTable1[] = {
938 0x00,0x10,0x20,0x30,0x40,0x50,0x60,0x70,0x81,0x91,0xa1,0xb1,0xc1,0xd1,0xe1,0xf1,
939 0x12,0x02,0x32,0x22,0x52,0x42,0x72,0x62,0x93,0x83,0xb3,0xa3,0xd3,0xc3,0xf3,0xe3,
940 0x24,0x34,0x04,0x14,0x64,0x74,0x44,0x54,0xa5,0xb5,0x85,0x95,0xe5,0xf5,0xc5,0xd5,
941 0x36,0x26,0x16,0x06,0x76,0x66,0x56,0x46,0xb7,0xa7,0x97,0x87,0xf7,0xe7,0xd7,0xc7,
942 0x48,0x58,0x68,0x78,0x08,0x18,0x28,0x38,0xc9,0xd9,0xe9,0xf9,0x89,0x99,0xa9,0xb9,
943 0x5a,0x4a,0x7a,0x6a,0x1a,0x0a,0x3a,0x2a,0xdb,0xcb,0xfb,0xeb,0x9b,0x8b,0xbb,0xab,
944 0x6c,0x7c,0x4c,0x5c,0x2c,0x3c,0x0c,0x1c,0xed,0xfd,0xcd,0xdd,0xad,0xbd,0x8d,0x9d,
945 0x7e,0x6e,0x5e,0x4e,0x3e,0x2e,0x1e,0x0e,0xff,0xef,0xdf,0xcf,0xbf,0xaf,0x9f,0x8f,
946 0x91,0x81,0xb1,0xa1,0xd1,0xc1,0xf1,0xe1,0x10,0x00,0x30,0x20,0x50,0x40,0x70,0x60,
947 0x83,0x93,0xa3,0xb3,0xc3,0xd3,0xe3,0xf3,0x02,0x12,0x22,0x32,0x42,0x52,0x62,0x72,
948 0xb5,0xa5,0x95,0x85,0xf5,0xe5,0xd5,0xc5,0x34,0x24,0x14,0x04,0x74,0x64,0x54,0x44,
949 0xa7,0xb7,0x87,0x97,0xe7,0xf7,0xc7,0xd7,0x26,0x36,0x06,0x16,0x66,0x76,0x46,0x56,
950 0xd9,0xc9,0xf9,0xe9,0x99,0x89,0xb9,0xa9,0x58,0x48,0x78,0x68,0x18,0x08,0x38,0x28,
951 0xcb,0xdb,0xeb,0xfb,0x8b,0x9b,0xab,0xbb,0x4a,0x5a,0x6a,0x7a,0x0a,0x1a,0x2a,0x3a,
952 0xfd,0xed,0xdd,0xcd,0xbd,0xad,0x9d,0x8d,0x7c,0x6c,0x5c,0x4c,0x3c,0x2c,0x1c,0x0c,
953 0xef,0xff,0xcf,0xdf,0xaf,0xbf,0x8f,0x9f,0x6e,0x7e,0x4e,0x5e,0x2e,0x3e,0x0e,0x1e
954 };
955
956 static unsigned char CRCTable2[] = {
957 0x00,0x21,0x42,0x63,0x84,0xa5,0xc6,0xe7,0x08,0x29,0x4a,0x6b,0x8c,0xad,0xce,0xef,
958 0x31,0x10,0x73,0x52,0xb5,0x94,0xf7,0xd6,0x39,0x18,0x7b,0x5a,0xbd,0x9c,0xff,0xde,
959 0x62,0x43,0x20,0x01,0xe6,0xc7,0xa4,0x85,0x6a,0x4b,0x28,0x09,0xee,0xcf,0xac,0x8d,
960 0x53,0x72,0x11,0x30,0xd7,0xf6,0x95,0xb4,0x5b,0x7a,0x19,0x38,0xdf,0xfe,0x9d,0xbc,
961 0xc4,0xe5,0x86,0xa7,0x40,0x61,0x02,0x23,0xcc,0xed,0x8e,0xaf,0x48,0x69,0x0a,0x2b,
962 0xf5,0xd4,0xb7,0x96,0x71,0x50,0x33,0x12,0xfd,0xdc,0xbf,0x9e,0x79,0x58,0x3b,0x1a,
963 0xa6,0x87,0xe4,0xc5,0x22,0x03,0x60,0x41,0xae,0x8f,0xec,0xcd,0x2a,0x0b,0x68,0x49,
964 0x97,0xb6,0xd5,0xf4,0x13,0x32,0x51,0x70,0x9f,0xbe,0xdd,0xfc,0x1b,0x3a,0x59,0x78,
965 0x88,0xa9,0xca,0xeb,0x0c,0x2d,0x4e,0x6f,0x80,0xa1,0xc2,0xe3,0x04,0x25,0x46,0x67,
966 0xb9,0x98,0xfb,0xda,0x3d,0x1c,0x7f,0x5e,0xb1,0x90,0xf3,0xd2,0x35,0x14,0x77,0x56,
967 0xea,0xcb,0xa8,0x89,0x6e,0x4f,0x2c,0x0d,0xe2,0xc3,0xa0,0x81,0x66,0x47,0x24,0x05,
968 0xdb,0xfa,0x99,0xb8,0x5f,0x7e,0x1d,0x3c,0xd3,0xf2,0x91,0xb0,0x57,0x76,0x15,0x34,
969 0x4c,0x6d,0x0e,0x2f,0xc8,0xe9,0x8a,0xab,0x44,0x65,0x06,0x27,0xc0,0xe1,0x82,0xa3,
970 0x7d,0x5c,0x3f,0x1e,0xf9,0xd8,0xbb,0x9a,0x75,0x54,0x37,0x16,0xf1,0xd0,0xb3,0x92,
971 0x2e,0x0f,0x6c,0x4d,0xaa,0x8b,0xe8,0xc9,0x26,0x07,0x64,0x45,0xa2,0x83,0xe0,0xc1,
972 0x1f,0x3e,0x5d,0x7c,0x9b,0xba,0xd9,0xf8,0x17,0x36,0x55,0x74,0x93,0xb2,0xd1,0xf0
973 };
974
975/* look at the asm-code - what looks in C a bit strange is almost as good as handmade */
976 register int i;
977 register unsigned char *CRCT1, *CRCT2, *data, c, crch, crcl;
978
979 CRCT1=CRCTable1;
980 CRCT2=CRCTable2;
981 data=data_a3;
982 crcl=data_d1;
983 crch=data_d0;
984 for (i=data_d3; i>=0; i--) {
985 c = (*data++) ^ crch;
986 crch = CRCT1[c] ^ crcl;
987 crcl = CRCT2[c];
988 }
989 return (crch<<8)|crcl;
990}
991
992static inline ushort dos_hdr_crc (struct dos_header *hdr)
993{
994 return dos_crc(&(hdr->track), 0xb2, 0x30, 3); /* precomputed magic */
995}
996
997static inline ushort dos_data_crc(unsigned char *data)
998{
999 return dos_crc(data, 0xe2, 0x95 ,511); /* precomputed magic */
1000}
1001
1002static inline unsigned char dos_decode_byte(ushort word)
1003{
1004 register ushort w2;
1005 register unsigned char byte;
1006 register unsigned char *dec = mfmdecode;
1007
1008 w2=word;
1009 w2>>=8;
1010 w2&=127;
1011 byte = dec[w2];
1012 byte <<= 4;
1013 w2 = word & 127;
1014 byte |= dec[w2];
1015 return byte;
1016}
1017
1018static unsigned long dos_decode(unsigned char *data, unsigned short *raw, int len)
1019{
1020 int i;
1021
1022 for (i = 0; i < len; i++)
1023 *data++=dos_decode_byte(*raw++);
1024 return ((ulong)raw);
1025}
1026
1027#ifdef DEBUG
1028static void dbg(unsigned long ptr)
1029{
1030 printk("raw data @%08lx: %08lx, %08lx ,%08lx, %08lx\n", ptr,
1031 ((ulong *)ptr)[0], ((ulong *)ptr)[1],
1032 ((ulong *)ptr)[2], ((ulong *)ptr)[3]);
1033}
1034#endif
1035
1036static int dos_read(int drive)
1037{
1038 unsigned long end;
1039 unsigned long raw;
1040 int scnt;
1041 unsigned short crc,data_crc[2];
1042 struct dos_header hdr;
1043
1044 drive&=3;
1045 raw = (long) raw_buf;
1046 end = raw + unit[drive].type->read_size;
1047
1048 for (scnt=0; scnt < unit[drive].dtype->sects * unit[drive].type->sect_mult; scnt++) {
1049 do { /* search for the right sync of each sec-hdr */
1050 if (!(raw = scan_sync (raw, end))) {
1051 printk(KERN_INFO "dos_read: no hdr sync on "
1052 "track %d, unit %d for sector %d\n",
1053 unit[drive].track,drive,scnt);
1054 return MFM_NOSYNC;
1055 }
1056#ifdef DEBUG
1057 dbg(raw);
1058#endif
1059 } while (*((ushort *)raw)!=0x5554); /* loop usually only once done */
1060 raw+=2; /* skip over headermark */
1061 raw = dos_decode((unsigned char *)&hdr,(ushort *) raw,8);
1062 crc = dos_hdr_crc(&hdr);
1063
1064#ifdef DEBUG
1065 printk("(%3d,%d,%2d,%d) %x\n", hdr.track, hdr.side,
1066 hdr.sec, hdr.len_desc, hdr.crc);
1067#endif
1068
1069 if (crc != hdr.crc) {
1070 printk(KERN_INFO "dos_read: MFM_HEADER %04x,%04x\n",
1071 hdr.crc, crc);
1072 return MFM_HEADER;
1073 }
1074 if (hdr.track != unit[drive].track/unit[drive].type->heads) {
1075 printk(KERN_INFO "dos_read: MFM_TRACK %d, %d\n",
1076 hdr.track,
1077 unit[drive].track/unit[drive].type->heads);
1078 return MFM_TRACK;
1079 }
1080
1081 if (hdr.side != unit[drive].track%unit[drive].type->heads) {
1082 printk(KERN_INFO "dos_read: MFM_SIDE %d, %d\n",
1083 hdr.side,
1084 unit[drive].track%unit[drive].type->heads);
1085 return MFM_TRACK;
1086 }
1087
1088 if (hdr.len_desc != 2) {
1089 printk(KERN_INFO "dos_read: unknown sector len "
1090 "descriptor %d\n", hdr.len_desc);
1091 return MFM_DATA;
1092 }
1093#ifdef DEBUG
1094 printk("hdr accepted\n");
1095#endif
1096 if (!(raw = scan_sync (raw, end))) {
1097 printk(KERN_INFO "dos_read: no data sync on track "
1098 "%d, unit %d for sector%d, disk sector %d\n",
1099 unit[drive].track, drive, scnt, hdr.sec);
1100 return MFM_NOSYNC;
1101 }
1102#ifdef DEBUG
1103 dbg(raw);
1104#endif
1105
1106 if (*((ushort *)raw)!=0x5545) {
1107 printk(KERN_INFO "dos_read: no data mark after "
1108 "sync (%d,%d,%d,%d) sc=%d\n",
1109 hdr.track,hdr.side,hdr.sec,hdr.len_desc,scnt);
1110 return MFM_NOSYNC;
1111 }
1112
1113 raw+=2; /* skip data mark (included in checksum) */
1114 raw = dos_decode((unsigned char *)(unit[drive].trackbuf + (hdr.sec - 1) * 512), (ushort *) raw, 512);
1115 raw = dos_decode((unsigned char *)data_crc,(ushort *) raw,4);
1116 crc = dos_data_crc(unit[drive].trackbuf + (hdr.sec - 1) * 512);
1117
1118 if (crc != data_crc[0]) {
1119 printk(KERN_INFO "dos_read: MFM_DATA (%d,%d,%d,%d) "
1120 "sc=%d, %x %x\n", hdr.track, hdr.side,
1121 hdr.sec, hdr.len_desc, scnt,data_crc[0], crc);
1122 printk(KERN_INFO "data=(%lx,%lx,%lx,%lx,...)\n",
1123 ((ulong *)(unit[drive].trackbuf+(hdr.sec-1)*512))[0],
1124 ((ulong *)(unit[drive].trackbuf+(hdr.sec-1)*512))[1],
1125 ((ulong *)(unit[drive].trackbuf+(hdr.sec-1)*512))[2],
1126 ((ulong *)(unit[drive].trackbuf+(hdr.sec-1)*512))[3]);
1127 return MFM_DATA;
1128 }
1129 }
1130 return 0;
1131}
1132
1133static inline ushort dos_encode_byte(unsigned char byte)
1134{
1135 register unsigned char *enc, b2, b1;
1136 register ushort word;
1137
1138 enc=mfmencode;
1139 b1=byte;
1140 b2=b1>>4;
1141 b1&=15;
1142 word=enc[b2] <<8 | enc [b1];
1143 return (word|((word&(256|64)) ? 0: 128));
1144}
1145
1146static void dos_encode_block(ushort *dest, unsigned char *src, int len)
1147{
1148 int i;
1149
1150 for (i = 0; i < len; i++) {
1151 *dest=dos_encode_byte(*src++);
1152 *dest|=((dest[-1]&1)||(*dest&0x4000))? 0: 0x8000;
1153 dest++;
1154 }
1155}
1156
1157static unsigned long *ms_putsec(int drive, unsigned long *raw, int cnt)
1158{
1159 static struct dos_header hdr={0,0,0,2,0,
1160 {78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78}};
1161 int i;
1162 static ushort crc[2]={0,0x4e4e};
1163
1164 drive&=3;
1165/* id gap 1 */
1166/* the MFM word before is always 9254 */
1167 for(i=0;i<6;i++)
1168 *raw++=0xaaaaaaaa;
1169/* 3 sync + 1 headermark */
1170 *raw++=0x44894489;
1171 *raw++=0x44895554;
1172
1173/* fill in the variable parts of the header */
1174 hdr.track=unit[drive].track/unit[drive].type->heads;
1175 hdr.side=unit[drive].track%unit[drive].type->heads;
1176 hdr.sec=cnt+1;
1177 hdr.crc=dos_hdr_crc(&hdr);
1178
1179/* header (without "magic") and id gap 2*/
1180 dos_encode_block((ushort *)raw,(unsigned char *) &hdr.track,28);
1181 raw+=14;
1182
1183/*id gap 3 */
1184 for(i=0;i<6;i++)
1185 *raw++=0xaaaaaaaa;
1186
1187/* 3 syncs and 1 datamark */
1188 *raw++=0x44894489;
1189 *raw++=0x44895545;
1190
1191/* data */
1192 dos_encode_block((ushort *)raw,
1193 (unsigned char *)unit[drive].trackbuf+cnt*512,512);
1194 raw+=256;
1195
1196/*data crc + jd's special gap (long words :-/) */
1197 crc[0]=dos_data_crc(unit[drive].trackbuf+cnt*512);
1198 dos_encode_block((ushort *) raw,(unsigned char *)crc,4);
1199 raw+=2;
1200
1201/* data gap */
1202 for(i=0;i<38;i++)
1203 *raw++=0x92549254;
1204
1205 return raw; /* wrote 652 MFM words */
1206}
1207
1208static void dos_write(int disk)
1209{
1210 int cnt;
1211 unsigned long raw = (unsigned long) raw_buf;
1212 unsigned long *ptr=(unsigned long *)raw;
1213
1214 disk&=3;
1215/* really gap4 + indexgap , but we write it first and round it up */
1216 for (cnt=0;cnt<425;cnt++)
1217 *ptr++=0x92549254;
1218
1219/* the following is just guessed */
1220 if (unit[disk].type->sect_mult==2) /* check for HD-Disks */
1221 for(cnt=0;cnt<473;cnt++)
1222 *ptr++=0x92549254;
1223
1224/* now the index marks...*/
1225 for (cnt=0;cnt<20;cnt++)
1226 *ptr++=0x92549254;
1227 for (cnt=0;cnt<6;cnt++)
1228 *ptr++=0xaaaaaaaa;
1229 *ptr++=0x52245224;
1230 *ptr++=0x52245552;
1231 for (cnt=0;cnt<20;cnt++)
1232 *ptr++=0x92549254;
1233
1234/* sectors */
1235 for(cnt = 0; cnt < unit[disk].dtype->sects * unit[disk].type->sect_mult; cnt++)
1236 ptr=ms_putsec(disk,ptr,cnt);
1237
1238 *(ushort *)ptr = 0xaaa8; /* MFM word before is always 0x9254 */
1239}
1240
1241/*
1242 * Here comes the high level stuff (i.e. the filesystem interface)
1243 * and helper functions.
1244 * Normally this should be the only part that has to be adapted to
1245 * different kernel versions.
1246 */
1247
1248/* FIXME: this assumes the drive is still spinning -
1249 * which is only true if we complete writing a track within three seconds
1250 */
1251static void flush_track_callback(unsigned long nr)
1252{
1253 nr&=3;
1254 writefromint = 1;
1255 if (!try_fdc(nr)) {
1256 /* we might block in an interrupt, so try again later */
1257 flush_track_timer[nr].expires = jiffies + 1;
1258 add_timer(flush_track_timer + nr);
1259 return;
1260 }
1261 get_fdc(nr);
1262 (*unit[nr].dtype->write_fkt)(nr);
1263 if (!raw_write(nr)) {
1264 printk (KERN_NOTICE "floppy disk write protected\n");
1265 writefromint = 0;
1266 writepending = 0;
1267 }
1268 rel_fdc();
1269}
1270
1271static int non_int_flush_track (unsigned long nr)
1272{
1273 unsigned long flags;
1274
1275 nr&=3;
1276 writefromint = 0;
1277 del_timer(&post_write_timer);
1278 get_fdc(nr);
1279 if (!fd_motor_on(nr)) {
1280 writepending = 0;
1281 rel_fdc();
1282 return 0;
1283 }
1284 local_irq_save(flags);
1285 if (writepending != 2) {
1286 local_irq_restore(flags);
1287 (*unit[nr].dtype->write_fkt)(nr);
1288 if (!raw_write(nr)) {
1289 printk (KERN_NOTICE "floppy disk write protected "
1290 "in write!\n");
1291 writepending = 0;
1292 return 0;
1293 }
1294 while (block_flag == 2)
1295 sleep_on (&wait_fd_block);
1296 }
1297 else {
1298 local_irq_restore(flags);
1299 ms_delay(2); /* 2 ms post_write delay */
1300 post_write(nr);
1301 }
1302 rel_fdc();
1303 return 1;
1304}
1305
1306static int get_track(int drive, int track)
1307{
1308 int error, errcnt;
1309
1310 drive&=3;
1311 if (unit[drive].track == track)
1312 return 0;
1313 get_fdc(drive);
1314 if (!fd_motor_on(drive)) {
1315 rel_fdc();
1316 return -1;
1317 }
1318
1319 if (unit[drive].dirty == 1) {
1320 del_timer (flush_track_timer + drive);
1321 non_int_flush_track (drive);
1322 }
1323 errcnt = 0;
1324 while (errcnt < MAX_ERRORS) {
1325 if (!fd_seek(drive, track))
1326 return -1;
1327 raw_read(drive);
1328 error = (*unit[drive].dtype->read_fkt)(drive);
1329 if (error == 0) {
1330 rel_fdc();
1331 return 0;
1332 }
1333 /* Read Error Handling: recalibrate and try again */
1334 unit[drive].track = -1;
1335 errcnt++;
1336 }
1337 rel_fdc();
1338 return -1;
1339}
1340
1341static void redo_fd_request(void)
1342{
1343 unsigned int cnt, block, track, sector;
1344 int drive;
1345 struct amiga_floppy_struct *floppy;
1346 char *data;
1347 unsigned long flags;
1348
1349 repeat:
1350 if (!CURRENT) {
1351 /* Nothing left to do */
1352 return;
1353 }
1354
1355 floppy = CURRENT->rq_disk->private_data;
1356 drive = floppy - unit;
1357
1358 /* Here someone could investigate to be more efficient */
1359 for (cnt = 0; cnt < CURRENT->current_nr_sectors; cnt++) {
1360#ifdef DEBUG
1361 printk("fd: sector %ld + %d requested for %s\n",
1362 CURRENT->sector,cnt,
1363 (CURRENT->cmd==READ)?"read":"write");
1364#endif
1365 block = CURRENT->sector + cnt;
1366 if ((int)block > floppy->blocks) {
1367 end_request(CURRENT, 0);
1368 goto repeat;
1369 }
1370
1371 track = block / (floppy->dtype->sects * floppy->type->sect_mult);
1372 sector = block % (floppy->dtype->sects * floppy->type->sect_mult);
1373 data = CURRENT->buffer + 512 * cnt;
1374#ifdef DEBUG
1375 printk("access to track %d, sector %d, with buffer at "
1376 "0x%08lx\n", track, sector, data);
1377#endif
1378
1379 if ((rq_data_dir(CURRENT) != READ) && (rq_data_dir(CURRENT) != WRITE)) {
1380 printk(KERN_WARNING "do_fd_request: unknown command\n");
1381 end_request(CURRENT, 0);
1382 goto repeat;
1383 }
1384 if (get_track(drive, track) == -1) {
1385 end_request(CURRENT, 0);
1386 goto repeat;
1387 }
1388
1389 switch (rq_data_dir(CURRENT)) {
1390 case READ:
1391 memcpy(data, floppy->trackbuf + sector * 512, 512);
1392 break;
1393
1394 case WRITE:
1395 memcpy(floppy->trackbuf + sector * 512, data, 512);
1396
1397 /* keep the drive spinning while writes are scheduled */
1398 if (!fd_motor_on(drive)) {
1399 end_request(CURRENT, 0);
1400 goto repeat;
1401 }
1402 /*
1403 * setup a callback to write the track buffer
1404 * after a short (1 tick) delay.
1405 */
1406 local_irq_save(flags);
1407
1408 floppy->dirty = 1;
1409 /* reset the timer */
1410 mod_timer (flush_track_timer + drive, jiffies + 1);
1411 local_irq_restore(flags);
1412 break;
1413 }
1414 }
1415 CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
1416 CURRENT->sector += CURRENT->current_nr_sectors;
1417
1418 end_request(CURRENT, 1);
1419 goto repeat;
1420}
1421
1422static void do_fd_request(request_queue_t * q)
1423{
1424 redo_fd_request();
1425}
1426
1427static int fd_ioctl(struct inode *inode, struct file *filp,
1428 unsigned int cmd, unsigned long param)
1429{
1430 int drive = iminor(inode) & 3;
1431 static struct floppy_struct getprm;
1432
1433 switch(cmd){
1434 case HDIO_GETGEO:
1435 {
1436 struct hd_geometry loc;
1437 loc.heads = unit[drive].type->heads;
1438 loc.sectors = unit[drive].dtype->sects * unit[drive].type->sect_mult;
1439 loc.cylinders = unit[drive].type->tracks;
1440 loc.start = 0;
1441 if (copy_to_user((void *)param, (void *)&loc,
1442 sizeof(struct hd_geometry)))
1443 return -EFAULT;
1444 break;
1445 }
1446 case FDFMTBEG:
1447 get_fdc(drive);
1448 if (fd_ref[drive] > 1) {
1449 rel_fdc();
1450 return -EBUSY;
1451 }
1452 fsync_bdev(inode->i_bdev);
1453 if (fd_motor_on(drive) == 0) {
1454 rel_fdc();
1455 return -ENODEV;
1456 }
1457 if (fd_calibrate(drive) == 0) {
1458 rel_fdc();
1459 return -ENXIO;
1460 }
1461 floppy_off(drive);
1462 rel_fdc();
1463 break;
1464 case FDFMTTRK:
1465 if (param < unit[drive].type->tracks * unit[drive].type->heads)
1466 {
1467 get_fdc(drive);
1468 if (fd_seek(drive,param) != 0){
1469 memset(unit[drive].trackbuf, FD_FILL_BYTE,
1470 unit[drive].dtype->sects * unit[drive].type->sect_mult * 512);
1471 non_int_flush_track(drive);
1472 }
1473 floppy_off(drive);
1474 rel_fdc();
1475 }
1476 else
1477 return -EINVAL;
1478 break;
1479 case FDFMTEND:
1480 floppy_off(drive);
1481 invalidate_bdev(inode->i_bdev, 0);
1482 break;
1483 case FDGETPRM:
1484 memset((void *)&getprm, 0, sizeof (getprm));
1485 getprm.track=unit[drive].type->tracks;
1486 getprm.head=unit[drive].type->heads;
1487 getprm.sect=unit[drive].dtype->sects * unit[drive].type->sect_mult;
1488 getprm.size=unit[drive].blocks;
1489 if (copy_to_user((void *)param,
1490 (void *)&getprm,
1491 sizeof(struct floppy_struct)))
1492 return -EFAULT;
1493 break;
1494 case FDSETPRM:
1495 case FDDEFPRM:
1496 return -EINVAL;
1497 case FDFLUSH: /* unconditionally, even if not needed */
1498 del_timer (flush_track_timer + drive);
1499 non_int_flush_track(drive);
1500 break;
1501#ifdef RAW_IOCTL
1502 case IOCTL_RAW_TRACK:
1503 if (copy_to_user((void *)param, raw_buf,
1504 unit[drive].type->read_size))
1505 return -EFAULT;
1506 else
1507 return unit[drive].type->read_size;
1508#endif
1509 default:
1510 printk(KERN_DEBUG "fd_ioctl: unknown cmd %d for drive %d.",
1511 cmd, drive);
1512 return -ENOSYS;
1513 }
1514 return 0;
1515}
1516
1517static void fd_probe(int dev)
1518{
1519 unsigned long code;
1520 int type;
1521 int drive;
1522
1523 drive = dev & 3;
1524 code = fd_get_drive_id(drive);
1525
1526 /* get drive type */
1527 for (type = 0; type < num_dr_types; type++)
1528 if (drive_types[type].code == code)
1529 break;
1530
1531 if (type >= num_dr_types) {
1532 printk(KERN_WARNING "fd_probe: unsupported drive type "
1533 "%08lx found\n", code);
1534 unit[drive].type = &drive_types[num_dr_types-1]; /* FD_NODRIVE */
1535 return;
1536 }
1537
1538 unit[drive].type = drive_types + type;
1539 unit[drive].track = -1;
1540
1541 unit[drive].disk = -1;
1542 unit[drive].motor = 0;
1543 unit[drive].busy = 0;
1544 unit[drive].status = -1;
1545}
1546
1547/*
1548 * floppy_open check for aliasing (/dev/fd0 can be the same as
1549 * /dev/PS0 etc), and disallows simultaneous access to the same
1550 * drive with different device numbers.
1551 */
1552static int floppy_open(struct inode *inode, struct file *filp)
1553{
1554 int drive = iminor(inode) & 3;
1555 int system = (iminor(inode) & 4) >> 2;
1556 int old_dev;
1557 unsigned long flags;
1558
1559 old_dev = fd_device[drive];
1560
1561 if (fd_ref[drive] && old_dev != system)
1562 return -EBUSY;
1563
1564 if (filp && filp->f_mode & 3) {
1565 check_disk_change(inode->i_bdev);
1566 if (filp->f_mode & 2 ) {
1567 int wrprot;
1568
1569 get_fdc(drive);
1570 fd_select (drive);
1571 wrprot = !(ciaa.pra & DSKPROT);
1572 fd_deselect (drive);
1573 rel_fdc();
1574
1575 if (wrprot)
1576 return -EROFS;
1577 }
1578 }
1579
1580 local_irq_save(flags);
1581 fd_ref[drive]++;
1582 fd_device[drive] = system;
1583 local_irq_restore(flags);
1584
1585 unit[drive].dtype=&data_types[system];
1586 unit[drive].blocks=unit[drive].type->heads*unit[drive].type->tracks*
1587 data_types[system].sects*unit[drive].type->sect_mult;
1588 set_capacity(unit[drive].gendisk, unit[drive].blocks);
1589
1590 printk(KERN_INFO "fd%d: accessing %s-disk with %s-layout\n",drive,
1591 unit[drive].type->name, data_types[system].name);
1592
1593 return 0;
1594}
1595
1596static int floppy_release(struct inode * inode, struct file * filp)
1597{
1598 int drive = iminor(inode) & 3;
1599
1600 if (unit[drive].dirty == 1) {
1601 del_timer (flush_track_timer + drive);
1602 non_int_flush_track (drive);
1603 }
1604
1605 if (!fd_ref[drive]--) {
1606 printk(KERN_CRIT "floppy_release with fd_ref == 0");
1607 fd_ref[drive] = 0;
1608 }
1609#ifdef MODULE
1610/* the mod_use counter is handled this way */
1611 floppy_off (drive | 0x40000000);
1612#endif
1613 return 0;
1614}
1615
1616/*
1617 * floppy-change is never called from an interrupt, so we can relax a bit
1618 * here, sleep etc. Note that floppy-on tries to set current_DOR to point
1619 * to the desired drive, but it will probably not survive the sleep if
1620 * several floppies are used at the same time: thus the loop.
1621 */
1622static int amiga_floppy_change(struct gendisk *disk)
1623{
1624 struct amiga_floppy_struct *p = disk->private_data;
1625 int drive = p - unit;
1626 int changed;
1627 static int first_time = 1;
1628
1629 if (first_time)
1630 changed = first_time--;
1631 else {
1632 get_fdc(drive);
1633 fd_select (drive);
1634 changed = !(ciaa.pra & DSKCHANGE);
1635 fd_deselect (drive);
1636 rel_fdc();
1637 }
1638
1639 if (changed) {
1640 fd_probe(drive);
1641 p->track = -1;
1642 p->dirty = 0;
1643 writepending = 0; /* if this was true before, too bad! */
1644 writefromint = 0;
1645 return 1;
1646 }
1647 return 0;
1648}
1649
1650static struct block_device_operations floppy_fops = {
1651 .owner = THIS_MODULE,
1652 .open = floppy_open,
1653 .release = floppy_release,
1654 .ioctl = fd_ioctl,
1655 .media_changed = amiga_floppy_change,
1656};
1657
1658void __init amiga_floppy_setup (char *str, int *ints)
1659{
1660 printk (KERN_INFO "amiflop: Setting default df0 to %x\n", ints[1]);
1661 fd_def_df0 = ints[1];
1662}
1663
1664static int __init fd_probe_drives(void)
1665{
1666 int drive,drives,nomem;
1667
1668 printk(KERN_INFO "FD: probing units\n" KERN_INFO "found ");
1669 drives=0;
1670 nomem=0;
1671 for(drive=0;drive<FD_MAX_UNITS;drive++) {
1672 struct gendisk *disk;
1673 fd_probe(drive);
1674 if (unit[drive].type->code == FD_NODRIVE)
1675 continue;
1676 disk = alloc_disk(1);
1677 if (!disk) {
1678 unit[drive].type->code = FD_NODRIVE;
1679 continue;
1680 }
1681 unit[drive].gendisk = disk;
1682 drives++;
1683 if ((unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL)) == NULL) {
1684 printk("no mem for ");
1685 unit[drive].type = &drive_types[num_dr_types - 1]; /* FD_NODRIVE */
1686 drives--;
1687 nomem = 1;
1688 }
1689 printk("fd%d ",drive);
1690 disk->major = FLOPPY_MAJOR;
1691 disk->first_minor = drive;
1692 disk->fops = &floppy_fops;
1693 sprintf(disk->disk_name, "fd%d", drive);
1694 disk->private_data = &unit[drive];
1695 disk->queue = floppy_queue;
1696 set_capacity(disk, 880*2);
1697 add_disk(disk);
1698 }
1699 if ((drives > 0) || (nomem == 0)) {
1700 if (drives == 0)
1701 printk("no drives");
1702 printk("\n");
1703 return drives;
1704 }
1705 printk("\n");
1706 return -ENOMEM;
1707}
1708
1709static struct kobject *floppy_find(dev_t dev, int *part, void *data)
1710{
1711 int drive = *part & 3;
1712 if (unit[drive].type->code == FD_NODRIVE)
1713 return NULL;
1714 *part = 0;
1715 return get_disk(unit[drive].gendisk);
1716}
1717
1718int __init amiga_floppy_init(void)
1719{
1720 int i, ret;
1721
1722 if (!AMIGAHW_PRESENT(AMI_FLOPPY))
1723 return -ENXIO;
1724
1725 if (register_blkdev(FLOPPY_MAJOR,"fd"))
1726 return -EBUSY;
1727
1728 /*
1729 * We request DSKPTR, DSKLEN and DSKDATA only, because the other
1730 * floppy registers are too spreaded over the custom register space
1731 */
1732 ret = -EBUSY;
1733 if (!request_mem_region(CUSTOM_PHYSADDR+0x20, 8, "amiflop [Paula]")) {
1734 printk("fd: cannot get floppy registers\n");
1735 goto out_blkdev;
1736 }
1737
1738 ret = -ENOMEM;
1739 if ((raw_buf = (char *)amiga_chip_alloc (RAW_BUF_SIZE, "Floppy")) ==
1740 NULL) {
1741 printk("fd: cannot get chip mem buffer\n");
1742 goto out_memregion;
1743 }
1744
1745 ret = -EBUSY;
1746 if (request_irq(IRQ_AMIGA_DSKBLK, fd_block_done, 0, "floppy_dma", NULL)) {
1747 printk("fd: cannot get irq for dma\n");
1748 goto out_irq;
1749 }
1750
1751 if (request_irq(IRQ_AMIGA_CIAA_TB, ms_isr, 0, "floppy_timer", NULL)) {
1752 printk("fd: cannot get irq for timer\n");
1753 goto out_irq2;
1754 }
1755
1756 ret = -ENOMEM;
1757 floppy_queue = blk_init_queue(do_fd_request, &amiflop_lock);
1758 if (!floppy_queue)
1759 goto out_queue;
1760
1761 ret = -ENXIO;
1762 if (fd_probe_drives() < 1) /* No usable drives */
1763 goto out_probe;
1764
1765 blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE,
1766 floppy_find, NULL, NULL);
1767
1768 /* initialize variables */
1769 init_timer(&motor_on_timer);
1770 motor_on_timer.expires = 0;
1771 motor_on_timer.data = 0;
1772 motor_on_timer.function = motor_on_callback;
1773 for (i = 0; i < FD_MAX_UNITS; i++) {
1774 init_timer(&motor_off_timer[i]);
1775 motor_off_timer[i].expires = 0;
1776 motor_off_timer[i].data = i|0x80000000;
1777 motor_off_timer[i].function = fd_motor_off;
1778 init_timer(&flush_track_timer[i]);
1779 flush_track_timer[i].expires = 0;
1780 flush_track_timer[i].data = i;
1781 flush_track_timer[i].function = flush_track_callback;
1782
1783 unit[i].track = -1;
1784 }
1785
1786 init_timer(&post_write_timer);
1787 post_write_timer.expires = 0;
1788 post_write_timer.data = 0;
1789 post_write_timer.function = post_write;
1790
1791 for (i = 0; i < 128; i++)
1792 mfmdecode[i]=255;
1793 for (i = 0; i < 16; i++)
1794 mfmdecode[mfmencode[i]]=i;
1795
1796 /* make sure that disk DMA is enabled */
1797 custom.dmacon = DMAF_SETCLR | DMAF_DISK;
1798
1799 /* init ms timer */
1800 ciaa.crb = 8; /* one-shot, stop */
1801 return 0;
1802
1803out_probe:
1804 blk_cleanup_queue(floppy_queue);
1805out_queue:
1806 free_irq(IRQ_AMIGA_CIAA_TB, NULL);
1807out_irq2:
1808 free_irq(IRQ_AMIGA_DSKBLK, NULL);
1809out_irq:
1810 amiga_chip_free(raw_buf);
1811out_memregion:
1812 release_mem_region(CUSTOM_PHYSADDR+0x20, 8);
1813out_blkdev:
1814 unregister_blkdev(FLOPPY_MAJOR,"fd");
1815 return ret;
1816}
1817
1818#ifdef MODULE
1819#include <linux/version.h>
1820
1821int init_module(void)
1822{
1823 if (!MACH_IS_AMIGA)
1824 return -ENXIO;
1825 return amiga_floppy_init();
1826}
1827
1828#if 0 /* not safe to unload */
1829void cleanup_module(void)
1830{
1831 int i;
1832
1833 for( i = 0; i < FD_MAX_UNITS; i++) {
1834 if (unit[i].type->code != FD_NODRIVE) {
1835 del_gendisk(unit[i].gendisk);
1836 put_disk(unit[i].gendisk);
1837 kfree(unit[i].trackbuf);
1838 }
1839 }
1840 blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
1841 free_irq(IRQ_AMIGA_CIAA_TB, NULL);
1842 free_irq(IRQ_AMIGA_DSKBLK, NULL);
1843 custom.dmacon = DMAF_DISK; /* disable DMA */
1844 amiga_chip_free(raw_buf);
1845 blk_cleanup_queue(floppy_queue);
1846 release_mem_region(CUSTOM_PHYSADDR+0x20, 8);
1847 unregister_blkdev(FLOPPY_MAJOR, "fd");
1848}
1849#endif
1850#endif
diff --git a/drivers/block/aoe/Makefile b/drivers/block/aoe/Makefile
new file mode 100644
index 000000000000..e76d997183c6
--- /dev/null
+++ b/drivers/block/aoe/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for ATA over Ethernet
3#
4
5obj-$(CONFIG_ATA_OVER_ETH) += aoe.o
6aoe-objs := aoeblk.o aoechr.o aoecmd.o aoedev.o aoemain.o aoenet.o
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
new file mode 100644
index 000000000000..db78f826d40c
--- /dev/null
+++ b/drivers/block/aoe/aoe.h
@@ -0,0 +1,165 @@
1/* Copyright (c) 2004 Coraid, Inc. See COPYING for GPL terms. */
2#define VERSION "5"
3#define AOE_MAJOR 152
4#define DEVICE_NAME "aoe"
5#ifndef AOE_PARTITIONS
6#define AOE_PARTITIONS 16
7#endif
8#define SYSMINOR(aoemajor, aoeminor) ((aoemajor) * 10 + (aoeminor))
9#define AOEMAJOR(sysminor) ((sysminor) / 10)
10#define AOEMINOR(sysminor) ((sysminor) % 10)
11#define WHITESPACE " \t\v\f\n"
12
13enum {
14 AOECMD_ATA,
15 AOECMD_CFG,
16
17 AOEFL_RSP = (1<<3),
18 AOEFL_ERR = (1<<2),
19
20 AOEAFL_EXT = (1<<6),
21 AOEAFL_DEV = (1<<4),
22 AOEAFL_ASYNC = (1<<1),
23 AOEAFL_WRITE = (1<<0),
24
25 AOECCMD_READ = 0,
26 AOECCMD_TEST,
27 AOECCMD_PTEST,
28 AOECCMD_SET,
29 AOECCMD_FSET,
30
31 AOE_HVER = 0x10,
32};
33
34struct aoe_hdr {
35 unsigned char dst[6];
36 unsigned char src[6];
37 unsigned char type[2];
38 unsigned char verfl;
39 unsigned char err;
40 unsigned char major[2];
41 unsigned char minor;
42 unsigned char cmd;
43 unsigned char tag[4];
44};
45
46struct aoe_atahdr {
47 unsigned char aflags;
48 unsigned char errfeat;
49 unsigned char scnt;
50 unsigned char cmdstat;
51 unsigned char lba0;
52 unsigned char lba1;
53 unsigned char lba2;
54 unsigned char lba3;
55 unsigned char lba4;
56 unsigned char lba5;
57 unsigned char res[2];
58};
59
60struct aoe_cfghdr {
61 unsigned char bufcnt[2];
62 unsigned char fwver[2];
63 unsigned char res;
64 unsigned char aoeccmd;
65 unsigned char cslen[2];
66};
67
68enum {
69 DEVFL_UP = 1, /* device is installed in system and ready for AoE->ATA commands */
70 DEVFL_TKILL = (1<<1), /* flag for timer to know when to kill self */
71 DEVFL_EXT = (1<<2), /* device accepts lba48 commands */
72 DEVFL_CLOSEWAIT = (1<<3), /* device is waiting for all closes to revalidate */
73 DEVFL_WC_UPDATE = (1<<4), /* this device needs to update write cache status */
74 DEVFL_WORKON = (1<<4),
75
76 BUFFL_FAIL = 1,
77};
78
79enum {
80 MAXATADATA = 1024,
81 NPERSHELF = 10,
82 FREETAG = -1,
83 MIN_BUFS = 8,
84};
85
86struct buf {
87 struct list_head bufs;
88 ulong flags;
89 ulong nframesout;
90 char *bufaddr;
91 ulong resid;
92 ulong bv_resid;
93 sector_t sector;
94 struct bio *bio;
95 struct bio_vec *bv;
96};
97
98struct frame {
99 int tag;
100 ulong waited;
101 struct buf *buf;
102 char *bufaddr;
103 int writedatalen;
104 int ndata;
105
106 /* largest possible */
107 unsigned char data[sizeof(struct aoe_hdr) + sizeof(struct aoe_atahdr)];
108};
109
110struct aoedev {
111 struct aoedev *next;
112 unsigned char addr[6]; /* remote mac addr */
113 ushort flags;
114 ulong sysminor;
115 ulong aoemajor;
116 ulong aoeminor;
117 ulong nopen; /* (bd_openers isn't available without sleeping) */
118 ulong rttavg; /* round trip average of requests/responses */
119 u16 fw_ver; /* version of blade's firmware */
120 struct work_struct work;/* disk create work struct */
121 struct gendisk *gd;
122 request_queue_t blkq;
123 struct hd_geometry geo;
124 sector_t ssize;
125 struct timer_list timer;
126 spinlock_t lock;
127 struct net_device *ifp; /* interface ed is attached to */
128 struct sk_buff *skblist;/* packets needing to be sent */
129 mempool_t *bufpool; /* for deadlock-free Buf allocation */
130 struct list_head bufq; /* queue of bios to work on */
131 struct buf *inprocess; /* the one we're currently working on */
132 ulong lasttag; /* last tag sent */
133 ulong nframes; /* number of frames below */
134 struct frame *frames;
135};
136
137
138int aoeblk_init(void);
139void aoeblk_exit(void);
140void aoeblk_gdalloc(void *);
141void aoedisk_rm_sysfs(struct aoedev *d);
142
143int aoechr_init(void);
144void aoechr_exit(void);
145void aoechr_error(char *);
146
147void aoecmd_work(struct aoedev *d);
148void aoecmd_cfg(ushort, unsigned char);
149void aoecmd_ata_rsp(struct sk_buff *);
150void aoecmd_cfg_rsp(struct sk_buff *);
151
152int aoedev_init(void);
153void aoedev_exit(void);
154struct aoedev *aoedev_bymac(unsigned char *);
155void aoedev_downdev(struct aoedev *d);
156struct aoedev *aoedev_set(ulong, unsigned char *, struct net_device *, ulong);
157int aoedev_busy(void);
158
159int aoenet_init(void);
160void aoenet_exit(void);
161void aoenet_xmit(struct sk_buff *);
162int is_aoe_netif(struct net_device *ifp);
163int set_aoe_iflist(const char __user *str, size_t size);
164
165u64 mac_addr(char addr[6]);
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
new file mode 100644
index 000000000000..63561b280bc5
--- /dev/null
+++ b/drivers/block/aoe/aoeblk.c
@@ -0,0 +1,267 @@
1/* Copyright (c) 2004 Coraid, Inc. See COPYING for GPL terms. */
2/*
3 * aoeblk.c
4 * block device routines
5 */
6
7#include <linux/hdreg.h>
8#include <linux/blkdev.h>
9#include <linux/fs.h>
10#include <linux/ioctl.h>
11#include <linux/genhd.h>
12#include <linux/netdevice.h>
13#include "aoe.h"
14
15static kmem_cache_t *buf_pool_cache;
16
17/* add attributes for our block devices in sysfs */
18static ssize_t aoedisk_show_state(struct gendisk * disk, char *page)
19{
20 struct aoedev *d = disk->private_data;
21
22 return snprintf(page, PAGE_SIZE,
23 "%s%s\n",
24 (d->flags & DEVFL_UP) ? "up" : "down",
25 (d->flags & DEVFL_CLOSEWAIT) ? ",closewait" : "");
26}
27static ssize_t aoedisk_show_mac(struct gendisk * disk, char *page)
28{
29 struct aoedev *d = disk->private_data;
30
31 return snprintf(page, PAGE_SIZE, "%012llx\n",
32 (unsigned long long)mac_addr(d->addr));
33}
34static ssize_t aoedisk_show_netif(struct gendisk * disk, char *page)
35{
36 struct aoedev *d = disk->private_data;
37
38 return snprintf(page, PAGE_SIZE, "%s\n", d->ifp->name);
39}
40
41static struct disk_attribute disk_attr_state = {
42 .attr = {.name = "state", .mode = S_IRUGO },
43 .show = aoedisk_show_state
44};
45static struct disk_attribute disk_attr_mac = {
46 .attr = {.name = "mac", .mode = S_IRUGO },
47 .show = aoedisk_show_mac
48};
49static struct disk_attribute disk_attr_netif = {
50 .attr = {.name = "netif", .mode = S_IRUGO },
51 .show = aoedisk_show_netif
52};
53
54static void
55aoedisk_add_sysfs(struct aoedev *d)
56{
57 sysfs_create_file(&d->gd->kobj, &disk_attr_state.attr);
58 sysfs_create_file(&d->gd->kobj, &disk_attr_mac.attr);
59 sysfs_create_file(&d->gd->kobj, &disk_attr_netif.attr);
60}
61void
62aoedisk_rm_sysfs(struct aoedev *d)
63{
64 sysfs_remove_link(&d->gd->kobj, "state");
65 sysfs_remove_link(&d->gd->kobj, "mac");
66 sysfs_remove_link(&d->gd->kobj, "netif");
67}
68
69static int
70aoeblk_open(struct inode *inode, struct file *filp)
71{
72 struct aoedev *d;
73 ulong flags;
74
75 d = inode->i_bdev->bd_disk->private_data;
76
77 spin_lock_irqsave(&d->lock, flags);
78 if (d->flags & DEVFL_UP) {
79 d->nopen++;
80 spin_unlock_irqrestore(&d->lock, flags);
81 return 0;
82 }
83 spin_unlock_irqrestore(&d->lock, flags);
84 return -ENODEV;
85}
86
87static int
88aoeblk_release(struct inode *inode, struct file *filp)
89{
90 struct aoedev *d;
91 ulong flags;
92
93 d = inode->i_bdev->bd_disk->private_data;
94
95 spin_lock_irqsave(&d->lock, flags);
96
97 if (--d->nopen == 0 && (d->flags & DEVFL_CLOSEWAIT)) {
98 d->flags &= ~DEVFL_CLOSEWAIT;
99 spin_unlock_irqrestore(&d->lock, flags);
100 aoecmd_cfg(d->aoemajor, d->aoeminor);
101 return 0;
102 }
103 spin_unlock_irqrestore(&d->lock, flags);
104
105 return 0;
106}
107
108static int
109aoeblk_make_request(request_queue_t *q, struct bio *bio)
110{
111 struct aoedev *d;
112 struct buf *buf;
113 struct sk_buff *sl;
114 ulong flags;
115
116 blk_queue_bounce(q, &bio);
117
118 d = bio->bi_bdev->bd_disk->private_data;
119 buf = mempool_alloc(d->bufpool, GFP_NOIO);
120 if (buf == NULL) {
121 printk(KERN_INFO "aoe: aoeblk_make_request: buf allocation "
122 "failure\n");
123 bio_endio(bio, bio->bi_size, -ENOMEM);
124 return 0;
125 }
126 memset(buf, 0, sizeof(*buf));
127 INIT_LIST_HEAD(&buf->bufs);
128 buf->bio = bio;
129 buf->resid = bio->bi_size;
130 buf->sector = bio->bi_sector;
131 buf->bv = buf->bio->bi_io_vec;
132 buf->bv_resid = buf->bv->bv_len;
133 buf->bufaddr = page_address(buf->bv->bv_page) + buf->bv->bv_offset;
134
135 spin_lock_irqsave(&d->lock, flags);
136
137 if ((d->flags & DEVFL_UP) == 0) {
138 printk(KERN_INFO "aoe: aoeblk_make_request: device %ld.%ld is not up\n",
139 d->aoemajor, d->aoeminor);
140 spin_unlock_irqrestore(&d->lock, flags);
141 mempool_free(buf, d->bufpool);
142 bio_endio(bio, bio->bi_size, -ENXIO);
143 return 0;
144 }
145
146 list_add_tail(&buf->bufs, &d->bufq);
147 aoecmd_work(d);
148
149 sl = d->skblist;
150 d->skblist = NULL;
151
152 spin_unlock_irqrestore(&d->lock, flags);
153
154 aoenet_xmit(sl);
155 return 0;
156}
157
158/* This ioctl implementation expects userland to have the device node
159 * permissions set so that only priviledged users can open an aoe
160 * block device directly.
161 */
162static int
163aoeblk_ioctl(struct inode *inode, struct file *filp, uint cmd, ulong arg)
164{
165 struct aoedev *d;
166
167 if (!arg)
168 return -EINVAL;
169
170 d = inode->i_bdev->bd_disk->private_data;
171 if ((d->flags & DEVFL_UP) == 0) {
172 printk(KERN_ERR "aoe: aoeblk_ioctl: disk not up\n");
173 return -ENODEV;
174 }
175
176 if (cmd == HDIO_GETGEO) {
177 d->geo.start = get_start_sect(inode->i_bdev);
178 if (!copy_to_user((void __user *) arg, &d->geo, sizeof d->geo))
179 return 0;
180 return -EFAULT;
181 }
182 printk(KERN_INFO "aoe: aoeblk_ioctl: unknown ioctl %d\n", cmd);
183 return -EINVAL;
184}
185
186static struct block_device_operations aoe_bdops = {
187 .open = aoeblk_open,
188 .release = aoeblk_release,
189 .ioctl = aoeblk_ioctl,
190 .owner = THIS_MODULE,
191};
192
193/* alloc_disk and add_disk can sleep */
194void
195aoeblk_gdalloc(void *vp)
196{
197 struct aoedev *d = vp;
198 struct gendisk *gd;
199 ulong flags;
200
201 gd = alloc_disk(AOE_PARTITIONS);
202 if (gd == NULL) {
203 printk(KERN_ERR "aoe: aoeblk_gdalloc: cannot allocate disk "
204 "structure for %ld.%ld\n", d->aoemajor, d->aoeminor);
205 spin_lock_irqsave(&d->lock, flags);
206 d->flags &= ~DEVFL_WORKON;
207 spin_unlock_irqrestore(&d->lock, flags);
208 return;
209 }
210
211 d->bufpool = mempool_create(MIN_BUFS,
212 mempool_alloc_slab, mempool_free_slab,
213 buf_pool_cache);
214 if (d->bufpool == NULL) {
215 printk(KERN_ERR "aoe: aoeblk_gdalloc: cannot allocate bufpool "
216 "for %ld.%ld\n", d->aoemajor, d->aoeminor);
217 put_disk(gd);
218 spin_lock_irqsave(&d->lock, flags);
219 d->flags &= ~DEVFL_WORKON;
220 spin_unlock_irqrestore(&d->lock, flags);
221 return;
222 }
223
224 spin_lock_irqsave(&d->lock, flags);
225 blk_queue_make_request(&d->blkq, aoeblk_make_request);
226 gd->major = AOE_MAJOR;
227 gd->first_minor = d->sysminor * AOE_PARTITIONS;
228 gd->fops = &aoe_bdops;
229 gd->private_data = d;
230 gd->capacity = d->ssize;
231 snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%ld",
232 d->aoemajor, d->aoeminor);
233
234 gd->queue = &d->blkq;
235 d->gd = gd;
236 d->flags &= ~DEVFL_WORKON;
237 d->flags |= DEVFL_UP;
238
239 spin_unlock_irqrestore(&d->lock, flags);
240
241 add_disk(gd);
242 aoedisk_add_sysfs(d);
243
244 printk(KERN_INFO "aoe: %012llx e%lu.%lu v%04x has %llu "
245 "sectors\n", (unsigned long long)mac_addr(d->addr),
246 d->aoemajor, d->aoeminor,
247 d->fw_ver, (long long)d->ssize);
248}
249
250void
251aoeblk_exit(void)
252{
253 kmem_cache_destroy(buf_pool_cache);
254}
255
256int __init
257aoeblk_init(void)
258{
259 buf_pool_cache = kmem_cache_create("aoe_bufs",
260 sizeof(struct buf),
261 0, 0, NULL, NULL);
262 if (buf_pool_cache == NULL)
263 return -ENOMEM;
264
265 return 0;
266}
267
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
new file mode 100644
index 000000000000..14aeca3e2e8c
--- /dev/null
+++ b/drivers/block/aoe/aoechr.c
@@ -0,0 +1,244 @@
1/* Copyright (c) 2004 Coraid, Inc. See COPYING for GPL terms. */
2/*
3 * aoechr.c
4 * AoE character device driver
5 */
6
7#include <linux/hdreg.h>
8#include <linux/blkdev.h>
9#include "aoe.h"
10
11enum {
12 //MINOR_STAT = 1, (moved to sysfs)
13 MINOR_ERR = 2,
14 MINOR_DISCOVER,
15 MINOR_INTERFACES,
16 MSGSZ = 2048,
17 NARGS = 10,
18 NMSG = 100, /* message backlog to retain */
19};
20
21struct aoe_chardev {
22 ulong minor;
23 char name[32];
24};
25
26enum { EMFL_VALID = 1 };
27
28struct ErrMsg {
29 short flags;
30 short len;
31 char *msg;
32};
33
34static struct ErrMsg emsgs[NMSG];
35static int emsgs_head_idx, emsgs_tail_idx;
36static struct semaphore emsgs_sema;
37static spinlock_t emsgs_lock;
38static int nblocked_emsgs_readers;
39static struct class_simple *aoe_class;
40static struct aoe_chardev chardevs[] = {
41 { MINOR_ERR, "err" },
42 { MINOR_DISCOVER, "discover" },
43 { MINOR_INTERFACES, "interfaces" },
44};
45
46static int
47discover(void)
48{
49 aoecmd_cfg(0xffff, 0xff);
50 return 0;
51}
52
53static int
54interfaces(const char __user *str, size_t size)
55{
56 if (set_aoe_iflist(str, size)) {
57 printk(KERN_CRIT
58 "%s: could not set interface list: %s\n",
59 __FUNCTION__, "too many interfaces");
60 return -EINVAL;
61 }
62 return 0;
63}
64
65void
66aoechr_error(char *msg)
67{
68 struct ErrMsg *em;
69 char *mp;
70 ulong flags, n;
71
72 n = strlen(msg);
73
74 spin_lock_irqsave(&emsgs_lock, flags);
75
76 em = emsgs + emsgs_tail_idx;
77 if ((em->flags & EMFL_VALID)) {
78bail: spin_unlock_irqrestore(&emsgs_lock, flags);
79 return;
80 }
81
82 mp = kmalloc(n, GFP_ATOMIC);
83 if (mp == NULL) {
84 printk(KERN_CRIT "aoe: aoechr_error: allocation failure, len=%ld\n", n);
85 goto bail;
86 }
87
88 memcpy(mp, msg, n);
89 em->msg = mp;
90 em->flags |= EMFL_VALID;
91 em->len = n;
92
93 emsgs_tail_idx++;
94 emsgs_tail_idx %= ARRAY_SIZE(emsgs);
95
96 spin_unlock_irqrestore(&emsgs_lock, flags);
97
98 if (nblocked_emsgs_readers)
99 up(&emsgs_sema);
100}
101
102static ssize_t
103aoechr_write(struct file *filp, const char __user *buf, size_t cnt, loff_t *offp)
104{
105 int ret = -EINVAL;
106
107 switch ((unsigned long) filp->private_data) {
108 default:
109 printk(KERN_INFO "aoe: aoechr_write: can't write to that file.\n");
110 break;
111 case MINOR_DISCOVER:
112 ret = discover();
113 break;
114 case MINOR_INTERFACES:
115 ret = interfaces(buf, cnt);
116 break;
117 }
118 if (ret == 0)
119 ret = cnt;
120 return ret;
121}
122
123static int
124aoechr_open(struct inode *inode, struct file *filp)
125{
126 int n, i;
127
128 n = MINOR(inode->i_rdev);
129 filp->private_data = (void *) (unsigned long) n;
130
131 for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
132 if (chardevs[i].minor == n)
133 return 0;
134 return -EINVAL;
135}
136
137static int
138aoechr_rel(struct inode *inode, struct file *filp)
139{
140 return 0;
141}
142
143static ssize_t
144aoechr_read(struct file *filp, char __user *buf, size_t cnt, loff_t *off)
145{
146 unsigned long n;
147 char *mp;
148 struct ErrMsg *em;
149 ssize_t len;
150 ulong flags;
151
152 n = (unsigned long) filp->private_data;
153 switch (n) {
154 case MINOR_ERR:
155 spin_lock_irqsave(&emsgs_lock, flags);
156loop:
157 em = emsgs + emsgs_head_idx;
158 if ((em->flags & EMFL_VALID) == 0) {
159 if (filp->f_flags & O_NDELAY) {
160 spin_unlock_irqrestore(&emsgs_lock, flags);
161 return -EAGAIN;
162 }
163 nblocked_emsgs_readers++;
164
165 spin_unlock_irqrestore(&emsgs_lock, flags);
166
167 n = down_interruptible(&emsgs_sema);
168
169 spin_lock_irqsave(&emsgs_lock, flags);
170
171 nblocked_emsgs_readers--;
172
173 if (n) {
174 spin_unlock_irqrestore(&emsgs_lock, flags);
175 return -ERESTARTSYS;
176 }
177 goto loop;
178 }
179 if (em->len > cnt) {
180 spin_unlock_irqrestore(&emsgs_lock, flags);
181 return -EAGAIN;
182 }
183 mp = em->msg;
184 len = em->len;
185 em->msg = NULL;
186 em->flags &= ~EMFL_VALID;
187
188 emsgs_head_idx++;
189 emsgs_head_idx %= ARRAY_SIZE(emsgs);
190
191 spin_unlock_irqrestore(&emsgs_lock, flags);
192
193 n = copy_to_user(buf, mp, len);
194 kfree(mp);
195 return n == 0 ? len : -EFAULT;
196 default:
197 return -EFAULT;
198 }
199}
200
201static struct file_operations aoe_fops = {
202 .write = aoechr_write,
203 .read = aoechr_read,
204 .open = aoechr_open,
205 .release = aoechr_rel,
206 .owner = THIS_MODULE,
207};
208
209int __init
210aoechr_init(void)
211{
212 int n, i;
213
214 n = register_chrdev(AOE_MAJOR, "aoechr", &aoe_fops);
215 if (n < 0) {
216 printk(KERN_ERR "aoe: aoechr_init: can't register char device\n");
217 return n;
218 }
219 sema_init(&emsgs_sema, 0);
220 spin_lock_init(&emsgs_lock);
221 aoe_class = class_simple_create(THIS_MODULE, "aoe");
222 if (IS_ERR(aoe_class)) {
223 unregister_chrdev(AOE_MAJOR, "aoechr");
224 return PTR_ERR(aoe_class);
225 }
226 for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
227 class_simple_device_add(aoe_class,
228 MKDEV(AOE_MAJOR, chardevs[i].minor),
229 NULL, chardevs[i].name);
230
231 return 0;
232}
233
234void
235aoechr_exit(void)
236{
237 int i;
238
239 for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
240 class_simple_device_remove(MKDEV(AOE_MAJOR, chardevs[i].minor));
241 class_simple_destroy(aoe_class);
242 unregister_chrdev(AOE_MAJOR, "aoechr");
243}
244
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
new file mode 100644
index 000000000000..fb6d942a4565
--- /dev/null
+++ b/drivers/block/aoe/aoecmd.c
@@ -0,0 +1,629 @@
1/* Copyright (c) 2004 Coraid, Inc. See COPYING for GPL terms. */
2/*
3 * aoecmd.c
4 * Filesystem request handling methods
5 */
6
7#include <linux/hdreg.h>
8#include <linux/blkdev.h>
9#include <linux/skbuff.h>
10#include <linux/netdevice.h>
11#include "aoe.h"
12
13#define TIMERTICK (HZ / 10)
14#define MINTIMER (2 * TIMERTICK)
15#define MAXTIMER (HZ << 1)
16#define MAXWAIT (60 * 3) /* After MAXWAIT seconds, give up and fail dev */
17
18static struct sk_buff *
19new_skb(struct net_device *if_dev, ulong len)
20{
21 struct sk_buff *skb;
22
23 skb = alloc_skb(len, GFP_ATOMIC);
24 if (skb) {
25 skb->nh.raw = skb->mac.raw = skb->data;
26 skb->dev = if_dev;
27 skb->protocol = __constant_htons(ETH_P_AOE);
28 skb->priority = 0;
29 skb_put(skb, len);
30 skb->next = skb->prev = NULL;
31
32 /* tell the network layer not to perform IP checksums
33 * or to get the NIC to do it
34 */
35 skb->ip_summed = CHECKSUM_NONE;
36 }
37 return skb;
38}
39
40static struct sk_buff *
41skb_prepare(struct aoedev *d, struct frame *f)
42{
43 struct sk_buff *skb;
44 char *p;
45
46 skb = new_skb(d->ifp, f->ndata + f->writedatalen);
47 if (!skb) {
48 printk(KERN_INFO "aoe: skb_prepare: failure to allocate skb\n");
49 return NULL;
50 }
51
52 p = skb->mac.raw;
53 memcpy(p, f->data, f->ndata);
54
55 if (f->writedatalen) {
56 p += sizeof(struct aoe_hdr) + sizeof(struct aoe_atahdr);
57 memcpy(p, f->bufaddr, f->writedatalen);
58 }
59
60 return skb;
61}
62
63static struct frame *
64getframe(struct aoedev *d, int tag)
65{
66 struct frame *f, *e;
67
68 f = d->frames;
69 e = f + d->nframes;
70 for (; f<e; f++)
71 if (f->tag == tag)
72 return f;
73 return NULL;
74}
75
76/*
77 * Leave the top bit clear so we have tagspace for userland.
78 * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
79 * This driver reserves tag -1 to mean "unused frame."
80 */
81static int
82newtag(struct aoedev *d)
83{
84 register ulong n;
85
86 n = jiffies & 0xffff;
87 return n |= (++d->lasttag & 0x7fff) << 16;
88}
89
90static int
91aoehdr_atainit(struct aoedev *d, struct aoe_hdr *h)
92{
93 u16 type = __constant_cpu_to_be16(ETH_P_AOE);
94 u16 aoemajor = __cpu_to_be16(d->aoemajor);
95 u32 host_tag = newtag(d);
96 u32 tag = __cpu_to_be32(host_tag);
97
98 memcpy(h->src, d->ifp->dev_addr, sizeof h->src);
99 memcpy(h->dst, d->addr, sizeof h->dst);
100 memcpy(h->type, &type, sizeof type);
101 h->verfl = AOE_HVER;
102 memcpy(h->major, &aoemajor, sizeof aoemajor);
103 h->minor = d->aoeminor;
104 h->cmd = AOECMD_ATA;
105 memcpy(h->tag, &tag, sizeof tag);
106
107 return host_tag;
108}
109
110static void
111aoecmd_ata_rw(struct aoedev *d, struct frame *f)
112{
113 struct aoe_hdr *h;
114 struct aoe_atahdr *ah;
115 struct buf *buf;
116 struct sk_buff *skb;
117 ulong bcnt;
118 register sector_t sector;
119 char writebit, extbit;
120
121 writebit = 0x10;
122 extbit = 0x4;
123
124 buf = d->inprocess;
125
126 sector = buf->sector;
127 bcnt = buf->bv_resid;
128 if (bcnt > MAXATADATA)
129 bcnt = MAXATADATA;
130
131 /* initialize the headers & frame */
132 h = (struct aoe_hdr *) f->data;
133 ah = (struct aoe_atahdr *) (h+1);
134 f->ndata = sizeof *h + sizeof *ah;
135 memset(h, 0, f->ndata);
136 f->tag = aoehdr_atainit(d, h);
137 f->waited = 0;
138 f->buf = buf;
139 f->bufaddr = buf->bufaddr;
140
141 /* set up ata header */
142 ah->scnt = bcnt >> 9;
143 ah->lba0 = sector;
144 ah->lba1 = sector >>= 8;
145 ah->lba2 = sector >>= 8;
146 ah->lba3 = sector >>= 8;
147 if (d->flags & DEVFL_EXT) {
148 ah->aflags |= AOEAFL_EXT;
149 ah->lba4 = sector >>= 8;
150 ah->lba5 = sector >>= 8;
151 } else {
152 extbit = 0;
153 ah->lba3 &= 0x0f;
154 ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
155 }
156
157 if (bio_data_dir(buf->bio) == WRITE) {
158 ah->aflags |= AOEAFL_WRITE;
159 f->writedatalen = bcnt;
160 } else {
161 writebit = 0;
162 f->writedatalen = 0;
163 }
164
165 ah->cmdstat = WIN_READ | writebit | extbit;
166
167 /* mark all tracking fields and load out */
168 buf->nframesout += 1;
169 buf->bufaddr += bcnt;
170 buf->bv_resid -= bcnt;
171/* printk(KERN_INFO "aoe: bv_resid=%ld\n", buf->bv_resid); */
172 buf->resid -= bcnt;
173 buf->sector += bcnt >> 9;
174 if (buf->resid == 0) {
175 d->inprocess = NULL;
176 } else if (buf->bv_resid == 0) {
177 buf->bv++;
178 buf->bv_resid = buf->bv->bv_len;
179 buf->bufaddr = page_address(buf->bv->bv_page) + buf->bv->bv_offset;
180 }
181
182 skb = skb_prepare(d, f);
183 if (skb) {
184 skb->next = d->skblist;
185 d->skblist = skb;
186 }
187}
188
189/* enters with d->lock held */
190void
191aoecmd_work(struct aoedev *d)
192{
193 struct frame *f;
194 struct buf *buf;
195loop:
196 f = getframe(d, FREETAG);
197 if (f == NULL)
198 return;
199 if (d->inprocess == NULL) {
200 if (list_empty(&d->bufq))
201 return;
202 buf = container_of(d->bufq.next, struct buf, bufs);
203 list_del(d->bufq.next);
204/*printk(KERN_INFO "aoecmd_work: bi_size=%ld\n", buf->bio->bi_size); */
205 d->inprocess = buf;
206 }
207 aoecmd_ata_rw(d, f);
208 goto loop;
209}
210
211static void
212rexmit(struct aoedev *d, struct frame *f)
213{
214 struct sk_buff *skb;
215 struct aoe_hdr *h;
216 char buf[128];
217 u32 n;
218 u32 net_tag;
219
220 n = newtag(d);
221
222 snprintf(buf, sizeof buf,
223 "%15s e%ld.%ld oldtag=%08x@%08lx newtag=%08x\n",
224 "retransmit",
225 d->aoemajor, d->aoeminor, f->tag, jiffies, n);
226 aoechr_error(buf);
227
228 h = (struct aoe_hdr *) f->data;
229 f->tag = n;
230 net_tag = __cpu_to_be32(n);
231 memcpy(h->tag, &net_tag, sizeof net_tag);
232
233 skb = skb_prepare(d, f);
234 if (skb) {
235 skb->next = d->skblist;
236 d->skblist = skb;
237 }
238}
239
240static int
241tsince(int tag)
242{
243 int n;
244
245 n = jiffies & 0xffff;
246 n -= tag & 0xffff;
247 if (n < 0)
248 n += 1<<16;
249 return n;
250}
251
252static void
253rexmit_timer(ulong vp)
254{
255 struct aoedev *d;
256 struct frame *f, *e;
257 struct sk_buff *sl;
258 register long timeout;
259 ulong flags, n;
260
261 d = (struct aoedev *) vp;
262 sl = NULL;
263
264 /* timeout is always ~150% of the moving average */
265 timeout = d->rttavg;
266 timeout += timeout >> 1;
267
268 spin_lock_irqsave(&d->lock, flags);
269
270 if (d->flags & DEVFL_TKILL) {
271tdie: spin_unlock_irqrestore(&d->lock, flags);
272 return;
273 }
274 f = d->frames;
275 e = f + d->nframes;
276 for (; f<e; f++) {
277 if (f->tag != FREETAG && tsince(f->tag) >= timeout) {
278 n = f->waited += timeout;
279 n /= HZ;
280 if (n > MAXWAIT) { /* waited too long. device failure. */
281 aoedev_downdev(d);
282 goto tdie;
283 }
284 rexmit(d, f);
285 }
286 }
287
288 sl = d->skblist;
289 d->skblist = NULL;
290 if (sl) {
291 n = d->rttavg <<= 1;
292 if (n > MAXTIMER)
293 d->rttavg = MAXTIMER;
294 }
295
296 d->timer.expires = jiffies + TIMERTICK;
297 add_timer(&d->timer);
298
299 spin_unlock_irqrestore(&d->lock, flags);
300
301 aoenet_xmit(sl);
302}
303
304static void
305ataid_complete(struct aoedev *d, unsigned char *id)
306{
307 u64 ssize;
308 u16 n;
309
310 /* word 83: command set supported */
311 n = __le16_to_cpu(*((u16 *) &id[83<<1]));
312
313 /* word 86: command set/feature enabled */
314 n |= __le16_to_cpu(*((u16 *) &id[86<<1]));
315
316 if (n & (1<<10)) { /* bit 10: LBA 48 */
317 d->flags |= DEVFL_EXT;
318
319 /* word 100: number lba48 sectors */
320 ssize = __le64_to_cpu(*((u64 *) &id[100<<1]));
321
322 /* set as in ide-disk.c:init_idedisk_capacity */
323 d->geo.cylinders = ssize;
324 d->geo.cylinders /= (255 * 63);
325 d->geo.heads = 255;
326 d->geo.sectors = 63;
327 } else {
328 d->flags &= ~DEVFL_EXT;
329
330 /* number lba28 sectors */
331 ssize = __le32_to_cpu(*((u32 *) &id[60<<1]));
332
333 /* NOTE: obsolete in ATA 6 */
334 d->geo.cylinders = __le16_to_cpu(*((u16 *) &id[54<<1]));
335 d->geo.heads = __le16_to_cpu(*((u16 *) &id[55<<1]));
336 d->geo.sectors = __le16_to_cpu(*((u16 *) &id[56<<1]));
337 }
338 d->ssize = ssize;
339 d->geo.start = 0;
340 if (d->gd != NULL) {
341 d->gd->capacity = ssize;
342 d->flags |= DEVFL_UP;
343 return;
344 }
345 if (d->flags & DEVFL_WORKON) {
346 printk(KERN_INFO "aoe: ataid_complete: can't schedule work, it's already on! "
347 "(This really shouldn't happen).\n");
348 return;
349 }
350 INIT_WORK(&d->work, aoeblk_gdalloc, d);
351 schedule_work(&d->work);
352 d->flags |= DEVFL_WORKON;
353}
354
355static void
356calc_rttavg(struct aoedev *d, int rtt)
357{
358 register long n;
359
360 n = rtt;
361 if (n < MINTIMER)
362 n = MINTIMER;
363 else if (n > MAXTIMER)
364 n = MAXTIMER;
365
366 /* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */
367 n -= d->rttavg;
368 d->rttavg += n >> 2;
369}
370
371void
372aoecmd_ata_rsp(struct sk_buff *skb)
373{
374 struct aoedev *d;
375 struct aoe_hdr *hin;
376 struct aoe_atahdr *ahin, *ahout;
377 struct frame *f;
378 struct buf *buf;
379 struct sk_buff *sl;
380 register long n;
381 ulong flags;
382 char ebuf[128];
383
384 hin = (struct aoe_hdr *) skb->mac.raw;
385 d = aoedev_bymac(hin->src);
386 if (d == NULL) {
387 snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
388 "for unknown device %d.%d\n",
389 __be16_to_cpu(*((u16 *) hin->major)),
390 hin->minor);
391 aoechr_error(ebuf);
392 return;
393 }
394
395 spin_lock_irqsave(&d->lock, flags);
396
397 f = getframe(d, __be32_to_cpu(*((u32 *) hin->tag)));
398 if (f == NULL) {
399 spin_unlock_irqrestore(&d->lock, flags);
400 snprintf(ebuf, sizeof ebuf,
401 "%15s e%d.%d tag=%08x@%08lx\n",
402 "unexpected rsp",
403 __be16_to_cpu(*((u16 *) hin->major)),
404 hin->minor,
405 __be32_to_cpu(*((u32 *) hin->tag)),
406 jiffies);
407 aoechr_error(ebuf);
408 return;
409 }
410
411 calc_rttavg(d, tsince(f->tag));
412
413 ahin = (struct aoe_atahdr *) (hin+1);
414 ahout = (struct aoe_atahdr *) (f->data + sizeof(struct aoe_hdr));
415 buf = f->buf;
416
417 if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */
418 printk(KERN_CRIT "aoe: aoecmd_ata_rsp: ata error cmd=%2.2Xh "
419 "stat=%2.2Xh from e%ld.%ld\n",
420 ahout->cmdstat, ahin->cmdstat,
421 d->aoemajor, d->aoeminor);
422 if (buf)
423 buf->flags |= BUFFL_FAIL;
424 } else {
425 switch (ahout->cmdstat) {
426 case WIN_READ:
427 case WIN_READ_EXT:
428 n = ahout->scnt << 9;
429 if (skb->len - sizeof *hin - sizeof *ahin < n) {
430 printk(KERN_CRIT "aoe: aoecmd_ata_rsp: runt "
431 "ata data size in read. skb->len=%d\n",
432 skb->len);
433 /* fail frame f? just returning will rexmit. */
434 spin_unlock_irqrestore(&d->lock, flags);
435 return;
436 }
437 memcpy(f->bufaddr, ahin+1, n);
438 case WIN_WRITE:
439 case WIN_WRITE_EXT:
440 break;
441 case WIN_IDENTIFY:
442 if (skb->len - sizeof *hin - sizeof *ahin < 512) {
443 printk(KERN_INFO "aoe: aoecmd_ata_rsp: runt data size "
444 "in ataid. skb->len=%d\n", skb->len);
445 spin_unlock_irqrestore(&d->lock, flags);
446 return;
447 }
448 ataid_complete(d, (char *) (ahin+1));
449 /* d->flags |= DEVFL_WC_UPDATE; */
450 break;
451 default:
452 printk(KERN_INFO "aoe: aoecmd_ata_rsp: unrecognized "
453 "outbound ata command %2.2Xh for %d.%d\n",
454 ahout->cmdstat,
455 __be16_to_cpu(*((u16 *) hin->major)),
456 hin->minor);
457 }
458 }
459
460 if (buf) {
461 buf->nframesout -= 1;
462 if (buf->nframesout == 0 && buf->resid == 0) {
463 n = (buf->flags & BUFFL_FAIL) ? -EIO : 0;
464 bio_endio(buf->bio, buf->bio->bi_size, n);
465 mempool_free(buf, d->bufpool);
466 }
467 }
468
469 f->buf = NULL;
470 f->tag = FREETAG;
471
472 aoecmd_work(d);
473
474 sl = d->skblist;
475 d->skblist = NULL;
476
477 spin_unlock_irqrestore(&d->lock, flags);
478
479 aoenet_xmit(sl);
480}
481
482void
483aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
484{
485 struct aoe_hdr *h;
486 struct aoe_cfghdr *ch;
487 struct sk_buff *skb, *sl;
488 struct net_device *ifp;
489 u16 aoe_type = __constant_cpu_to_be16(ETH_P_AOE);
490 u16 net_aoemajor = __cpu_to_be16(aoemajor);
491
492 sl = NULL;
493
494 read_lock(&dev_base_lock);
495 for (ifp = dev_base; ifp; dev_put(ifp), ifp = ifp->next) {
496 dev_hold(ifp);
497 if (!is_aoe_netif(ifp))
498 continue;
499
500 skb = new_skb(ifp, sizeof *h + sizeof *ch);
501 if (skb == NULL) {
502 printk(KERN_INFO "aoe: aoecmd_cfg: skb alloc failure\n");
503 continue;
504 }
505 h = (struct aoe_hdr *) skb->mac.raw;
506 memset(h, 0, sizeof *h + sizeof *ch);
507
508 memset(h->dst, 0xff, sizeof h->dst);
509 memcpy(h->src, ifp->dev_addr, sizeof h->src);
510 memcpy(h->type, &aoe_type, sizeof aoe_type);
511 h->verfl = AOE_HVER;
512 memcpy(h->major, &net_aoemajor, sizeof net_aoemajor);
513 h->minor = aoeminor;
514 h->cmd = AOECMD_CFG;
515
516 skb->next = sl;
517 sl = skb;
518 }
519 read_unlock(&dev_base_lock);
520
521 aoenet_xmit(sl);
522}
523
524/*
525 * Since we only call this in one place (and it only prepares one frame)
526 * we just return the skb. Usually we'd chain it up to the d->skblist.
527 */
528static struct sk_buff *
529aoecmd_ata_id(struct aoedev *d)
530{
531 struct aoe_hdr *h;
532 struct aoe_atahdr *ah;
533 struct frame *f;
534 struct sk_buff *skb;
535
536 f = getframe(d, FREETAG);
537 if (f == NULL) {
538 printk(KERN_CRIT "aoe: aoecmd_ata_id: can't get a frame. "
539 "This shouldn't happen.\n");
540 return NULL;
541 }
542
543 /* initialize the headers & frame */
544 h = (struct aoe_hdr *) f->data;
545 ah = (struct aoe_atahdr *) (h+1);
546 f->ndata = sizeof *h + sizeof *ah;
547 memset(h, 0, f->ndata);
548 f->tag = aoehdr_atainit(d, h);
549 f->waited = 0;
550 f->writedatalen = 0;
551
552 /* this message initializes the device, so we reset the rttavg */
553 d->rttavg = MAXTIMER;
554
555 /* set up ata header */
556 ah->scnt = 1;
557 ah->cmdstat = WIN_IDENTIFY;
558 ah->lba3 = 0xa0;
559
560 skb = skb_prepare(d, f);
561
562 /* we now want to start the rexmit tracking */
563 d->flags &= ~DEVFL_TKILL;
564 d->timer.data = (ulong) d;
565 d->timer.function = rexmit_timer;
566 d->timer.expires = jiffies + TIMERTICK;
567 add_timer(&d->timer);
568
569 return skb;
570}
571
572void
573aoecmd_cfg_rsp(struct sk_buff *skb)
574{
575 struct aoedev *d;
576 struct aoe_hdr *h;
577 struct aoe_cfghdr *ch;
578 ulong flags, bufcnt, sysminor, aoemajor;
579 struct sk_buff *sl;
580 enum { MAXFRAMES = 8, MAXSYSMINOR = 255 };
581
582 h = (struct aoe_hdr *) skb->mac.raw;
583 ch = (struct aoe_cfghdr *) (h+1);
584
585 /*
586 * Enough people have their dip switches set backwards to
587 * warrant a loud message for this special case.
588 */
589 aoemajor = __be16_to_cpu(*((u16 *) h->major));
590 if (aoemajor == 0xfff) {
591 printk(KERN_CRIT "aoe: aoecmd_cfg_rsp: Warning: shelf "
592 "address is all ones. Check shelf dip switches\n");
593 return;
594 }
595
596 sysminor = SYSMINOR(aoemajor, h->minor);
597 if (sysminor > MAXSYSMINOR) {
598 printk(KERN_INFO "aoe: aoecmd_cfg_rsp: sysminor %ld too "
599 "large\n", sysminor);
600 return;
601 }
602
603 bufcnt = __be16_to_cpu(*((u16 *) ch->bufcnt));
604 if (bufcnt > MAXFRAMES) /* keep it reasonable */
605 bufcnt = MAXFRAMES;
606
607 d = aoedev_set(sysminor, h->src, skb->dev, bufcnt);
608 if (d == NULL) {
609 printk(KERN_INFO "aoe: aoecmd_cfg_rsp: device set failure\n");
610 return;
611 }
612
613 spin_lock_irqsave(&d->lock, flags);
614
615 if (d->flags & (DEVFL_UP | DEVFL_CLOSEWAIT)) {
616 spin_unlock_irqrestore(&d->lock, flags);
617 return;
618 }
619
620 d->fw_ver = __be16_to_cpu(*((u16 *) ch->fwver));
621
622 /* we get here only if the device is new */
623 sl = aoecmd_ata_id(d);
624
625 spin_unlock_irqrestore(&d->lock, flags);
626
627 aoenet_xmit(sl);
628}
629
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
new file mode 100644
index 000000000000..240abaec159b
--- /dev/null
+++ b/drivers/block/aoe/aoedev.c
@@ -0,0 +1,180 @@
1/* Copyright (c) 2004 Coraid, Inc. See COPYING for GPL terms. */
2/*
3 * aoedev.c
4 * AoE device utility functions; maintains device list.
5 */
6
7#include <linux/hdreg.h>
8#include <linux/blkdev.h>
9#include <linux/netdevice.h>
10#include "aoe.h"
11
12static struct aoedev *devlist;
13static spinlock_t devlist_lock;
14
15struct aoedev *
16aoedev_bymac(unsigned char *macaddr)
17{
18 struct aoedev *d;
19 ulong flags;
20
21 spin_lock_irqsave(&devlist_lock, flags);
22
23 for (d=devlist; d; d=d->next)
24 if (!memcmp(d->addr, macaddr, 6))
25 break;
26
27 spin_unlock_irqrestore(&devlist_lock, flags);
28 return d;
29}
30
31/* called with devlist lock held */
32static struct aoedev *
33aoedev_newdev(ulong nframes)
34{
35 struct aoedev *d;
36 struct frame *f, *e;
37
38 d = kcalloc(1, sizeof *d, GFP_ATOMIC);
39 if (d == NULL)
40 return NULL;
41 f = kcalloc(nframes, sizeof *f, GFP_ATOMIC);
42 if (f == NULL) {
43 kfree(d);
44 return NULL;
45 }
46
47 d->nframes = nframes;
48 d->frames = f;
49 e = f + nframes;
50 for (; f<e; f++)
51 f->tag = FREETAG;
52
53 spin_lock_init(&d->lock);
54 init_timer(&d->timer);
55 d->bufpool = NULL; /* defer to aoeblk_gdalloc */
56 INIT_LIST_HEAD(&d->bufq);
57 d->next = devlist;
58 devlist = d;
59
60 return d;
61}
62
63void
64aoedev_downdev(struct aoedev *d)
65{
66 struct frame *f, *e;
67 struct buf *buf;
68 struct bio *bio;
69
70 d->flags |= DEVFL_TKILL;
71 del_timer(&d->timer);
72
73 f = d->frames;
74 e = f + d->nframes;
75 for (; f<e; f->tag = FREETAG, f->buf = NULL, f++) {
76 if (f->tag == FREETAG || f->buf == NULL)
77 continue;
78 buf = f->buf;
79 bio = buf->bio;
80 if (--buf->nframesout == 0) {
81 mempool_free(buf, d->bufpool);
82 bio_endio(bio, bio->bi_size, -EIO);
83 }
84 }
85 d->inprocess = NULL;
86
87 while (!list_empty(&d->bufq)) {
88 buf = container_of(d->bufq.next, struct buf, bufs);
89 list_del(d->bufq.next);
90 bio = buf->bio;
91 mempool_free(buf, d->bufpool);
92 bio_endio(bio, bio->bi_size, -EIO);
93 }
94
95 if (d->nopen)
96 d->flags |= DEVFL_CLOSEWAIT;
97 if (d->gd)
98 d->gd->capacity = 0;
99
100 d->flags &= ~DEVFL_UP;
101}
102
103struct aoedev *
104aoedev_set(ulong sysminor, unsigned char *addr, struct net_device *ifp, ulong bufcnt)
105{
106 struct aoedev *d;
107 ulong flags;
108
109 spin_lock_irqsave(&devlist_lock, flags);
110
111 for (d=devlist; d; d=d->next)
112 if (d->sysminor == sysminor
113 || memcmp(d->addr, addr, sizeof d->addr) == 0)
114 break;
115
116 if (d == NULL && (d = aoedev_newdev(bufcnt)) == NULL) {
117 spin_unlock_irqrestore(&devlist_lock, flags);
118 printk(KERN_INFO "aoe: aoedev_set: aoedev_newdev failure.\n");
119 return NULL;
120 }
121
122 spin_unlock_irqrestore(&devlist_lock, flags);
123 spin_lock_irqsave(&d->lock, flags);
124
125 d->ifp = ifp;
126
127 if (d->sysminor != sysminor
128 || memcmp(d->addr, addr, sizeof d->addr)
129 || (d->flags & DEVFL_UP) == 0) {
130 aoedev_downdev(d); /* flushes outstanding frames */
131 memcpy(d->addr, addr, sizeof d->addr);
132 d->sysminor = sysminor;
133 d->aoemajor = AOEMAJOR(sysminor);
134 d->aoeminor = AOEMINOR(sysminor);
135 }
136
137 spin_unlock_irqrestore(&d->lock, flags);
138 return d;
139}
140
141static void
142aoedev_freedev(struct aoedev *d)
143{
144 if (d->gd) {
145 aoedisk_rm_sysfs(d);
146 del_gendisk(d->gd);
147 put_disk(d->gd);
148 }
149 kfree(d->frames);
150 mempool_destroy(d->bufpool);
151 kfree(d);
152}
153
154void
155aoedev_exit(void)
156{
157 struct aoedev *d;
158 ulong flags;
159
160 flush_scheduled_work();
161
162 while ((d = devlist)) {
163 devlist = d->next;
164
165 spin_lock_irqsave(&d->lock, flags);
166 aoedev_downdev(d);
167 spin_unlock_irqrestore(&d->lock, flags);
168
169 del_timer_sync(&d->timer);
170 aoedev_freedev(d);
171 }
172}
173
174int __init
175aoedev_init(void)
176{
177 spin_lock_init(&devlist_lock);
178 return 0;
179}
180
diff --git a/drivers/block/aoe/aoemain.c b/drivers/block/aoe/aoemain.c
new file mode 100644
index 000000000000..387588a3f4ba
--- /dev/null
+++ b/drivers/block/aoe/aoemain.c
@@ -0,0 +1,112 @@
1/* Copyright (c) 2004 Coraid, Inc. See COPYING for GPL terms. */
2/*
3 * aoemain.c
4 * Module initialization routines, discover timer
5 */
6
7#include <linux/hdreg.h>
8#include <linux/blkdev.h>
9#include <linux/module.h>
10#include "aoe.h"
11
12MODULE_LICENSE("GPL");
13MODULE_AUTHOR("Sam Hopkins <sah@coraid.com>");
14MODULE_DESCRIPTION("AoE block/char driver for 2.6.[0-9]+");
15MODULE_VERSION(VERSION);
16
17enum { TINIT, TRUN, TKILL };
18
19static void
20discover_timer(ulong vp)
21{
22 static struct timer_list t;
23 static volatile ulong die;
24 static spinlock_t lock;
25 ulong flags;
26 enum { DTIMERTICK = HZ * 60 }; /* one minute */
27
28 switch (vp) {
29 case TINIT:
30 init_timer(&t);
31 spin_lock_init(&lock);
32 t.data = TRUN;
33 t.function = discover_timer;
34 die = 0;
35 case TRUN:
36 spin_lock_irqsave(&lock, flags);
37 if (!die) {
38 t.expires = jiffies + DTIMERTICK;
39 add_timer(&t);
40 }
41 spin_unlock_irqrestore(&lock, flags);
42
43 aoecmd_cfg(0xffff, 0xff);
44 return;
45 case TKILL:
46 spin_lock_irqsave(&lock, flags);
47 die = 1;
48 spin_unlock_irqrestore(&lock, flags);
49
50 del_timer_sync(&t);
51 default:
52 return;
53 }
54}
55
56static void
57aoe_exit(void)
58{
59 discover_timer(TKILL);
60
61 aoenet_exit();
62 unregister_blkdev(AOE_MAJOR, DEVICE_NAME);
63 aoechr_exit();
64 aoedev_exit();
65 aoeblk_exit(); /* free cache after de-allocating bufs */
66}
67
68static int __init
69aoe_init(void)
70{
71 int ret;
72
73 ret = aoedev_init();
74 if (ret)
75 return ret;
76 ret = aoechr_init();
77 if (ret)
78 goto chr_fail;
79 ret = aoeblk_init();
80 if (ret)
81 goto blk_fail;
82 ret = aoenet_init();
83 if (ret)
84 goto net_fail;
85 ret = register_blkdev(AOE_MAJOR, DEVICE_NAME);
86 if (ret < 0) {
87 printk(KERN_ERR "aoe: aoeblk_init: can't register major\n");
88 goto blkreg_fail;
89 }
90
91 printk(KERN_INFO
92 "aoe: aoe_init: AoE v2.6-%s initialised.\n",
93 VERSION);
94 discover_timer(TINIT);
95 return 0;
96
97 blkreg_fail:
98 aoenet_exit();
99 net_fail:
100 aoeblk_exit();
101 blk_fail:
102 aoechr_exit();
103 chr_fail:
104 aoedev_exit();
105
106 printk(KERN_INFO "aoe: aoe_init: initialisation failure.\n");
107 return ret;
108}
109
110module_init(aoe_init);
111module_exit(aoe_exit);
112
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c
new file mode 100644
index 000000000000..cc1945b8d52b
--- /dev/null
+++ b/drivers/block/aoe/aoenet.c
@@ -0,0 +1,172 @@
1/* Copyright (c) 2004 Coraid, Inc. See COPYING for GPL terms. */
2/*
3 * aoenet.c
4 * Ethernet portion of AoE driver
5 */
6
7#include <linux/hdreg.h>
8#include <linux/blkdev.h>
9#include <linux/netdevice.h>
10#include "aoe.h"
11
12#define NECODES 5
13
14static char *aoe_errlist[] =
15{
16 "no such error",
17 "unrecognized command code",
18 "bad argument parameter",
19 "device unavailable",
20 "config string present",
21 "unsupported version"
22};
23
24enum {
25 IFLISTSZ = 1024,
26};
27
28static char aoe_iflist[IFLISTSZ];
29
30int
31is_aoe_netif(struct net_device *ifp)
32{
33 register char *p, *q;
34 register int len;
35
36 if (aoe_iflist[0] == '\0')
37 return 1;
38
39 for (p = aoe_iflist; *p; p = q + strspn(q, WHITESPACE)) {
40 q = p + strcspn(p, WHITESPACE);
41 if (q != p)
42 len = q - p;
43 else
44 len = strlen(p); /* last token in aoe_iflist */
45
46 if (strlen(ifp->name) == len && !strncmp(ifp->name, p, len))
47 return 1;
48 if (q == p)
49 break;
50 }
51
52 return 0;
53}
54
55int
56set_aoe_iflist(const char __user *user_str, size_t size)
57{
58 if (size >= IFLISTSZ)
59 return -EINVAL;
60
61 if (copy_from_user(aoe_iflist, user_str, size)) {
62 printk(KERN_INFO "aoe: %s: copy from user failed\n", __FUNCTION__);
63 return -EFAULT;
64 }
65 aoe_iflist[size] = 0x00;
66 return 0;
67}
68
69u64
70mac_addr(char addr[6])
71{
72 u64 n = 0;
73 char *p = (char *) &n;
74
75 memcpy(p + 2, addr, 6); /* (sizeof addr != 6) */
76
77 return __be64_to_cpu(n);
78}
79
80static struct sk_buff *
81skb_check(struct sk_buff *skb)
82{
83 if (skb_is_nonlinear(skb))
84 if ((skb = skb_share_check(skb, GFP_ATOMIC)))
85 if (skb_linearize(skb, GFP_ATOMIC) < 0) {
86 dev_kfree_skb(skb);
87 return NULL;
88 }
89 return skb;
90}
91
92void
93aoenet_xmit(struct sk_buff *sl)
94{
95 struct sk_buff *skb;
96
97 while ((skb = sl)) {
98 sl = sl->next;
99 skb->next = skb->prev = NULL;
100 dev_queue_xmit(skb);
101 }
102}
103
104/*
105 * (1) len doesn't include the header by default. I want this.
106 */
107static int
108aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt)
109{
110 struct aoe_hdr *h;
111 ulong n;
112
113 skb = skb_check(skb);
114 if (!skb)
115 return 0;
116
117 if (!is_aoe_netif(ifp))
118 goto exit;
119
120 //skb->len += ETH_HLEN; /* (1) */
121 skb_push(skb, ETH_HLEN); /* (1) */
122
123 h = (struct aoe_hdr *) skb->mac.raw;
124 n = __be32_to_cpu(*((u32 *) h->tag));
125 if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31))
126 goto exit;
127
128 if (h->verfl & AOEFL_ERR) {
129 n = h->err;
130 if (n > NECODES)
131 n = 0;
132 if (net_ratelimit())
133 printk(KERN_ERR "aoe: aoenet_rcv: error packet from %d.%d; "
134 "ecode=%d '%s'\n",
135 __be16_to_cpu(*((u16 *) h->major)), h->minor,
136 h->err, aoe_errlist[n]);
137 goto exit;
138 }
139
140 switch (h->cmd) {
141 case AOECMD_ATA:
142 aoecmd_ata_rsp(skb);
143 break;
144 case AOECMD_CFG:
145 aoecmd_cfg_rsp(skb);
146 break;
147 default:
148 printk(KERN_INFO "aoe: aoenet_rcv: unknown cmd %d\n", h->cmd);
149 }
150exit:
151 dev_kfree_skb(skb);
152 return 0;
153}
154
155static struct packet_type aoe_pt = {
156 .type = __constant_htons(ETH_P_AOE),
157 .func = aoenet_rcv,
158};
159
160int __init
161aoenet_init(void)
162{
163 dev_add_pack(&aoe_pt);
164 return 0;
165}
166
167void
168aoenet_exit(void)
169{
170 dev_remove_pack(&aoe_pt);
171}
172
diff --git a/drivers/block/as-iosched.c b/drivers/block/as-iosched.c
new file mode 100644
index 000000000000..a9575bb58a5e
--- /dev/null
+++ b/drivers/block/as-iosched.c
@@ -0,0 +1,2136 @@
1/*
2 * linux/drivers/block/as-iosched.c
3 *
4 * Anticipatory & deadline i/o scheduler.
5 *
6 * Copyright (C) 2002 Jens Axboe <axboe@suse.de>
7 * Nick Piggin <piggin@cyberone.com.au>
8 *
9 */
10#include <linux/kernel.h>
11#include <linux/fs.h>
12#include <linux/blkdev.h>
13#include <linux/elevator.h>
14#include <linux/bio.h>
15#include <linux/config.h>
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/init.h>
19#include <linux/compiler.h>
20#include <linux/hash.h>
21#include <linux/rbtree.h>
22#include <linux/interrupt.h>
23
24#define REQ_SYNC 1
25#define REQ_ASYNC 0
26
27/*
28 * See Documentation/block/as-iosched.txt
29 */
30
31/*
32 * max time before a read is submitted.
33 */
34#define default_read_expire (HZ / 8)
35
36/*
37 * ditto for writes, these limits are not hard, even
38 * if the disk is capable of satisfying them.
39 */
40#define default_write_expire (HZ / 4)
41
42/*
43 * read_batch_expire describes how long we will allow a stream of reads to
44 * persist before looking to see whether it is time to switch over to writes.
45 */
46#define default_read_batch_expire (HZ / 2)
47
48/*
49 * write_batch_expire describes how long we want a stream of writes to run for.
50 * This is not a hard limit, but a target we set for the auto-tuning thingy.
51 * See, the problem is: we can send a lot of writes to disk cache / TCQ in
52 * a short amount of time...
53 */
54#define default_write_batch_expire (HZ / 8)
55
56/*
57 * max time we may wait to anticipate a read (default around 6ms)
58 */
59#define default_antic_expire ((HZ / 150) ? HZ / 150 : 1)
60
61/*
62 * Keep track of up to 20ms thinktimes. We can go as big as we like here,
63 * however huge values tend to interfere and not decay fast enough. A program
64 * might be in a non-io phase of operation. Waiting on user input for example,
65 * or doing a lengthy computation. A small penalty can be justified there, and
66 * will still catch out those processes that constantly have large thinktimes.
67 */
68#define MAX_THINKTIME (HZ/50UL)
69
70/* Bits in as_io_context.state */
71enum as_io_states {
72 AS_TASK_RUNNING=0, /* Process has not exitted */
73 AS_TASK_IOSTARTED, /* Process has started some IO */
74 AS_TASK_IORUNNING, /* Process has completed some IO */
75};
76
77enum anticipation_status {
78 ANTIC_OFF=0, /* Not anticipating (normal operation) */
79 ANTIC_WAIT_REQ, /* The last read has not yet completed */
80 ANTIC_WAIT_NEXT, /* Currently anticipating a request vs
81 last read (which has completed) */
82 ANTIC_FINISHED, /* Anticipating but have found a candidate
83 * or timed out */
84};
85
86struct as_data {
87 /*
88 * run time data
89 */
90
91 struct request_queue *q; /* the "owner" queue */
92
93 /*
94 * requests (as_rq s) are present on both sort_list and fifo_list
95 */
96 struct rb_root sort_list[2];
97 struct list_head fifo_list[2];
98
99 struct as_rq *next_arq[2]; /* next in sort order */
100 sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */
101 struct list_head *dispatch; /* driver dispatch queue */
102 struct list_head *hash; /* request hash */
103
104 unsigned long exit_prob; /* probability a task will exit while
105 being waited on */
106 unsigned long new_ttime_total; /* mean thinktime on new proc */
107 unsigned long new_ttime_mean;
108 u64 new_seek_total; /* mean seek on new proc */
109 sector_t new_seek_mean;
110
111 unsigned long current_batch_expires;
112 unsigned long last_check_fifo[2];
113 int changed_batch; /* 1: waiting for old batch to end */
114 int new_batch; /* 1: waiting on first read complete */
115 int batch_data_dir; /* current batch REQ_SYNC / REQ_ASYNC */
116 int write_batch_count; /* max # of reqs in a write batch */
117 int current_write_count; /* how many requests left this batch */
118 int write_batch_idled; /* has the write batch gone idle? */
119 mempool_t *arq_pool;
120
121 enum anticipation_status antic_status;
122 unsigned long antic_start; /* jiffies: when it started */
123 struct timer_list antic_timer; /* anticipatory scheduling timer */
124 struct work_struct antic_work; /* Deferred unplugging */
125 struct io_context *io_context; /* Identify the expected process */
126 int ioc_finished; /* IO associated with io_context is finished */
127 int nr_dispatched;
128
129 /*
130 * settings that change how the i/o scheduler behaves
131 */
132 unsigned long fifo_expire[2];
133 unsigned long batch_expire[2];
134 unsigned long antic_expire;
135};
136
137#define list_entry_fifo(ptr) list_entry((ptr), struct as_rq, fifo)
138
139/*
140 * per-request data.
141 */
142enum arq_state {
143 AS_RQ_NEW=0, /* New - not referenced and not on any lists */
144 AS_RQ_QUEUED, /* In the request queue. It belongs to the
145 scheduler */
146 AS_RQ_DISPATCHED, /* On the dispatch list. It belongs to the
147 driver now */
148 AS_RQ_PRESCHED, /* Debug poisoning for requests being used */
149 AS_RQ_REMOVED,
150 AS_RQ_MERGED,
151 AS_RQ_POSTSCHED, /* when they shouldn't be */
152};
153
154struct as_rq {
155 /*
156 * rbtree index, key is the starting offset
157 */
158 struct rb_node rb_node;
159 sector_t rb_key;
160
161 struct request *request;
162
163 struct io_context *io_context; /* The submitting task */
164
165 /*
166 * request hash, key is the ending offset (for back merge lookup)
167 */
168 struct list_head hash;
169 unsigned int on_hash;
170
171 /*
172 * expire fifo
173 */
174 struct list_head fifo;
175 unsigned long expires;
176
177 unsigned int is_sync;
178 enum arq_state state;
179};
180
181#define RQ_DATA(rq) ((struct as_rq *) (rq)->elevator_private)
182
183static kmem_cache_t *arq_pool;
184
185/*
186 * IO Context helper functions
187 */
188
189/* Called to deallocate the as_io_context */
190static void free_as_io_context(struct as_io_context *aic)
191{
192 kfree(aic);
193}
194
195/* Called when the task exits */
196static void exit_as_io_context(struct as_io_context *aic)
197{
198 WARN_ON(!test_bit(AS_TASK_RUNNING, &aic->state));
199 clear_bit(AS_TASK_RUNNING, &aic->state);
200}
201
202static struct as_io_context *alloc_as_io_context(void)
203{
204 struct as_io_context *ret;
205
206 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
207 if (ret) {
208 ret->dtor = free_as_io_context;
209 ret->exit = exit_as_io_context;
210 ret->state = 1 << AS_TASK_RUNNING;
211 atomic_set(&ret->nr_queued, 0);
212 atomic_set(&ret->nr_dispatched, 0);
213 spin_lock_init(&ret->lock);
214 ret->ttime_total = 0;
215 ret->ttime_samples = 0;
216 ret->ttime_mean = 0;
217 ret->seek_total = 0;
218 ret->seek_samples = 0;
219 ret->seek_mean = 0;
220 }
221
222 return ret;
223}
224
225/*
226 * If the current task has no AS IO context then create one and initialise it.
227 * Then take a ref on the task's io context and return it.
228 */
229static struct io_context *as_get_io_context(void)
230{
231 struct io_context *ioc = get_io_context(GFP_ATOMIC);
232 if (ioc && !ioc->aic) {
233 ioc->aic = alloc_as_io_context();
234 if (!ioc->aic) {
235 put_io_context(ioc);
236 ioc = NULL;
237 }
238 }
239 return ioc;
240}
241
242/*
243 * the back merge hash support functions
244 */
245static const int as_hash_shift = 6;
246#define AS_HASH_BLOCK(sec) ((sec) >> 3)
247#define AS_HASH_FN(sec) (hash_long(AS_HASH_BLOCK((sec)), as_hash_shift))
248#define AS_HASH_ENTRIES (1 << as_hash_shift)
249#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
250#define list_entry_hash(ptr) list_entry((ptr), struct as_rq, hash)
251
252static inline void __as_del_arq_hash(struct as_rq *arq)
253{
254 arq->on_hash = 0;
255 list_del_init(&arq->hash);
256}
257
258static inline void as_del_arq_hash(struct as_rq *arq)
259{
260 if (arq->on_hash)
261 __as_del_arq_hash(arq);
262}
263
264static void as_remove_merge_hints(request_queue_t *q, struct as_rq *arq)
265{
266 as_del_arq_hash(arq);
267
268 if (q->last_merge == arq->request)
269 q->last_merge = NULL;
270}
271
272static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq)
273{
274 struct request *rq = arq->request;
275
276 BUG_ON(arq->on_hash);
277
278 arq->on_hash = 1;
279 list_add(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]);
280}
281
282/*
283 * move hot entry to front of chain
284 */
285static inline void as_hot_arq_hash(struct as_data *ad, struct as_rq *arq)
286{
287 struct request *rq = arq->request;
288 struct list_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))];
289
290 if (!arq->on_hash) {
291 WARN_ON(1);
292 return;
293 }
294
295 if (arq->hash.prev != head) {
296 list_del(&arq->hash);
297 list_add(&arq->hash, head);
298 }
299}
300
301static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset)
302{
303 struct list_head *hash_list = &ad->hash[AS_HASH_FN(offset)];
304 struct list_head *entry, *next = hash_list->next;
305
306 while ((entry = next) != hash_list) {
307 struct as_rq *arq = list_entry_hash(entry);
308 struct request *__rq = arq->request;
309
310 next = entry->next;
311
312 BUG_ON(!arq->on_hash);
313
314 if (!rq_mergeable(__rq)) {
315 as_remove_merge_hints(ad->q, arq);
316 continue;
317 }
318
319 if (rq_hash_key(__rq) == offset)
320 return __rq;
321 }
322
323 return NULL;
324}
325
326/*
327 * rb tree support functions
328 */
329#define RB_NONE (2)
330#define RB_EMPTY(root) ((root)->rb_node == NULL)
331#define ON_RB(node) ((node)->rb_color != RB_NONE)
332#define RB_CLEAR(node) ((node)->rb_color = RB_NONE)
333#define rb_entry_arq(node) rb_entry((node), struct as_rq, rb_node)
334#define ARQ_RB_ROOT(ad, arq) (&(ad)->sort_list[(arq)->is_sync])
335#define rq_rb_key(rq) (rq)->sector
336
337/*
338 * as_find_first_arq finds the first (lowest sector numbered) request
339 * for the specified data_dir. Used to sweep back to the start of the disk
340 * (1-way elevator) after we process the last (highest sector) request.
341 */
342static struct as_rq *as_find_first_arq(struct as_data *ad, int data_dir)
343{
344 struct rb_node *n = ad->sort_list[data_dir].rb_node;
345
346 if (n == NULL)
347 return NULL;
348
349 for (;;) {
350 if (n->rb_left == NULL)
351 return rb_entry_arq(n);
352
353 n = n->rb_left;
354 }
355}
356
357/*
358 * Add the request to the rb tree if it is unique. If there is an alias (an
359 * existing request against the same sector), which can happen when using
360 * direct IO, then return the alias.
361 */
362static struct as_rq *as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
363{
364 struct rb_node **p = &ARQ_RB_ROOT(ad, arq)->rb_node;
365 struct rb_node *parent = NULL;
366 struct as_rq *__arq;
367 struct request *rq = arq->request;
368
369 arq->rb_key = rq_rb_key(rq);
370
371 while (*p) {
372 parent = *p;
373 __arq = rb_entry_arq(parent);
374
375 if (arq->rb_key < __arq->rb_key)
376 p = &(*p)->rb_left;
377 else if (arq->rb_key > __arq->rb_key)
378 p = &(*p)->rb_right;
379 else
380 return __arq;
381 }
382
383 rb_link_node(&arq->rb_node, parent, p);
384 rb_insert_color(&arq->rb_node, ARQ_RB_ROOT(ad, arq));
385
386 return NULL;
387}
388
389static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq)
390{
391 if (!ON_RB(&arq->rb_node)) {
392 WARN_ON(1);
393 return;
394 }
395
396 rb_erase(&arq->rb_node, ARQ_RB_ROOT(ad, arq));
397 RB_CLEAR(&arq->rb_node);
398}
399
400static struct request *
401as_find_arq_rb(struct as_data *ad, sector_t sector, int data_dir)
402{
403 struct rb_node *n = ad->sort_list[data_dir].rb_node;
404 struct as_rq *arq;
405
406 while (n) {
407 arq = rb_entry_arq(n);
408
409 if (sector < arq->rb_key)
410 n = n->rb_left;
411 else if (sector > arq->rb_key)
412 n = n->rb_right;
413 else
414 return arq->request;
415 }
416
417 return NULL;
418}
419
420/*
421 * IO Scheduler proper
422 */
423
424#define MAXBACK (1024 * 1024) /*
425 * Maximum distance the disk will go backward
426 * for a request.
427 */
428
429#define BACK_PENALTY 2
430
431/*
432 * as_choose_req selects the preferred one of two requests of the same data_dir
433 * ignoring time - eg. timeouts, which is the job of as_dispatch_request
434 */
435static struct as_rq *
436as_choose_req(struct as_data *ad, struct as_rq *arq1, struct as_rq *arq2)
437{
438 int data_dir;
439 sector_t last, s1, s2, d1, d2;
440 int r1_wrap=0, r2_wrap=0; /* requests are behind the disk head */
441 const sector_t maxback = MAXBACK;
442
443 if (arq1 == NULL || arq1 == arq2)
444 return arq2;
445 if (arq2 == NULL)
446 return arq1;
447
448 data_dir = arq1->is_sync;
449
450 last = ad->last_sector[data_dir];
451 s1 = arq1->request->sector;
452 s2 = arq2->request->sector;
453
454 BUG_ON(data_dir != arq2->is_sync);
455
456 /*
457 * Strict one way elevator _except_ in the case where we allow
458 * short backward seeks which are biased as twice the cost of a
459 * similar forward seek.
460 */
461 if (s1 >= last)
462 d1 = s1 - last;
463 else if (s1+maxback >= last)
464 d1 = (last - s1)*BACK_PENALTY;
465 else {
466 r1_wrap = 1;
467 d1 = 0; /* shut up, gcc */
468 }
469
470 if (s2 >= last)
471 d2 = s2 - last;
472 else if (s2+maxback >= last)
473 d2 = (last - s2)*BACK_PENALTY;
474 else {
475 r2_wrap = 1;
476 d2 = 0;
477 }
478
479 /* Found required data */
480 if (!r1_wrap && r2_wrap)
481 return arq1;
482 else if (!r2_wrap && r1_wrap)
483 return arq2;
484 else if (r1_wrap && r2_wrap) {
485 /* both behind the head */
486 if (s1 <= s2)
487 return arq1;
488 else
489 return arq2;
490 }
491
492 /* Both requests in front of the head */
493 if (d1 < d2)
494 return arq1;
495 else if (d2 < d1)
496 return arq2;
497 else {
498 if (s1 >= s2)
499 return arq1;
500 else
501 return arq2;
502 }
503}
504
505/*
506 * as_find_next_arq finds the next request after @prev in elevator order.
507 * this with as_choose_req form the basis for how the scheduler chooses
508 * what request to process next. Anticipation works on top of this.
509 */
510static struct as_rq *as_find_next_arq(struct as_data *ad, struct as_rq *last)
511{
512 const int data_dir = last->is_sync;
513 struct as_rq *ret;
514 struct rb_node *rbnext = rb_next(&last->rb_node);
515 struct rb_node *rbprev = rb_prev(&last->rb_node);
516 struct as_rq *arq_next, *arq_prev;
517
518 BUG_ON(!ON_RB(&last->rb_node));
519
520 if (rbprev)
521 arq_prev = rb_entry_arq(rbprev);
522 else
523 arq_prev = NULL;
524
525 if (rbnext)
526 arq_next = rb_entry_arq(rbnext);
527 else {
528 arq_next = as_find_first_arq(ad, data_dir);
529 if (arq_next == last)
530 arq_next = NULL;
531 }
532
533 ret = as_choose_req(ad, arq_next, arq_prev);
534
535 return ret;
536}
537
538/*
539 * anticipatory scheduling functions follow
540 */
541
542/*
543 * as_antic_expired tells us when we have anticipated too long.
544 * The funny "absolute difference" math on the elapsed time is to handle
545 * jiffy wraps, and disks which have been idle for 0x80000000 jiffies.
546 */
547static int as_antic_expired(struct as_data *ad)
548{
549 long delta_jif;
550
551 delta_jif = jiffies - ad->antic_start;
552 if (unlikely(delta_jif < 0))
553 delta_jif = -delta_jif;
554 if (delta_jif < ad->antic_expire)
555 return 0;
556
557 return 1;
558}
559
560/*
561 * as_antic_waitnext starts anticipating that a nice request will soon be
562 * submitted. See also as_antic_waitreq
563 */
564static void as_antic_waitnext(struct as_data *ad)
565{
566 unsigned long timeout;
567
568 BUG_ON(ad->antic_status != ANTIC_OFF
569 && ad->antic_status != ANTIC_WAIT_REQ);
570
571 timeout = ad->antic_start + ad->antic_expire;
572
573 mod_timer(&ad->antic_timer, timeout);
574
575 ad->antic_status = ANTIC_WAIT_NEXT;
576}
577
578/*
579 * as_antic_waitreq starts anticipating. We don't start timing the anticipation
580 * until the request that we're anticipating on has finished. This means we
581 * are timing from when the candidate process wakes up hopefully.
582 */
583static void as_antic_waitreq(struct as_data *ad)
584{
585 BUG_ON(ad->antic_status == ANTIC_FINISHED);
586 if (ad->antic_status == ANTIC_OFF) {
587 if (!ad->io_context || ad->ioc_finished)
588 as_antic_waitnext(ad);
589 else
590 ad->antic_status = ANTIC_WAIT_REQ;
591 }
592}
593
594/*
595 * This is called directly by the functions in this file to stop anticipation.
596 * We kill the timer and schedule a call to the request_fn asap.
597 */
598static void as_antic_stop(struct as_data *ad)
599{
600 int status = ad->antic_status;
601
602 if (status == ANTIC_WAIT_REQ || status == ANTIC_WAIT_NEXT) {
603 if (status == ANTIC_WAIT_NEXT)
604 del_timer(&ad->antic_timer);
605 ad->antic_status = ANTIC_FINISHED;
606 /* see as_work_handler */
607 kblockd_schedule_work(&ad->antic_work);
608 }
609}
610
611/*
612 * as_antic_timeout is the timer function set by as_antic_waitnext.
613 */
614static void as_antic_timeout(unsigned long data)
615{
616 struct request_queue *q = (struct request_queue *)data;
617 struct as_data *ad = q->elevator->elevator_data;
618 unsigned long flags;
619
620 spin_lock_irqsave(q->queue_lock, flags);
621 if (ad->antic_status == ANTIC_WAIT_REQ
622 || ad->antic_status == ANTIC_WAIT_NEXT) {
623 struct as_io_context *aic = ad->io_context->aic;
624
625 ad->antic_status = ANTIC_FINISHED;
626 kblockd_schedule_work(&ad->antic_work);
627
628 if (aic->ttime_samples == 0) {
629 /* process anticipated on has exitted or timed out*/
630 ad->exit_prob = (7*ad->exit_prob + 256)/8;
631 }
632 }
633 spin_unlock_irqrestore(q->queue_lock, flags);
634}
635
636/*
637 * as_close_req decides if one request is considered "close" to the
638 * previous one issued.
639 */
640static int as_close_req(struct as_data *ad, struct as_rq *arq)
641{
642 unsigned long delay; /* milliseconds */
643 sector_t last = ad->last_sector[ad->batch_data_dir];
644 sector_t next = arq->request->sector;
645 sector_t delta; /* acceptable close offset (in sectors) */
646
647 if (ad->antic_status == ANTIC_OFF || !ad->ioc_finished)
648 delay = 0;
649 else
650 delay = ((jiffies - ad->antic_start) * 1000) / HZ;
651
652 if (delay <= 1)
653 delta = 64;
654 else if (delay <= 20 && delay <= ad->antic_expire)
655 delta = 64 << (delay-1);
656 else
657 return 1;
658
659 return (last - (delta>>1) <= next) && (next <= last + delta);
660}
661
662/*
663 * as_can_break_anticipation returns true if we have been anticipating this
664 * request.
665 *
666 * It also returns true if the process against which we are anticipating
667 * submits a write - that's presumably an fsync, O_SYNC write, etc. We want to
668 * dispatch it ASAP, because we know that application will not be submitting
669 * any new reads.
670 *
671 * If the task which has submitted the request has exitted, break anticipation.
672 *
673 * If this task has queued some other IO, do not enter enticipation.
674 */
675static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq)
676{
677 struct io_context *ioc;
678 struct as_io_context *aic;
679 sector_t s;
680
681 ioc = ad->io_context;
682 BUG_ON(!ioc);
683
684 if (arq && ioc == arq->io_context) {
685 /* request from same process */
686 return 1;
687 }
688
689 if (ad->ioc_finished && as_antic_expired(ad)) {
690 /*
691 * In this situation status should really be FINISHED,
692 * however the timer hasn't had the chance to run yet.
693 */
694 return 1;
695 }
696
697 aic = ioc->aic;
698 if (!aic)
699 return 0;
700
701 if (!test_bit(AS_TASK_RUNNING, &aic->state)) {
702 /* process anticipated on has exitted */
703 if (aic->ttime_samples == 0)
704 ad->exit_prob = (7*ad->exit_prob + 256)/8;
705 return 1;
706 }
707
708 if (atomic_read(&aic->nr_queued) > 0) {
709 /* process has more requests queued */
710 return 1;
711 }
712
713 if (atomic_read(&aic->nr_dispatched) > 0) {
714 /* process has more requests dispatched */
715 return 1;
716 }
717
718 if (arq && arq->is_sync == REQ_SYNC && as_close_req(ad, arq)) {
719 /*
720 * Found a close request that is not one of ours.
721 *
722 * This makes close requests from another process reset
723 * our thinktime delay. Is generally useful when there are
724 * two or more cooperating processes working in the same
725 * area.
726 */
727 spin_lock(&aic->lock);
728 aic->last_end_request = jiffies;
729 spin_unlock(&aic->lock);
730 return 1;
731 }
732
733
734 if (aic->ttime_samples == 0) {
735 if (ad->new_ttime_mean > ad->antic_expire)
736 return 1;
737 if (ad->exit_prob > 128)
738 return 1;
739 } else if (aic->ttime_mean > ad->antic_expire) {
740 /* the process thinks too much between requests */
741 return 1;
742 }
743
744 if (!arq)
745 return 0;
746
747 if (ad->last_sector[REQ_SYNC] < arq->request->sector)
748 s = arq->request->sector - ad->last_sector[REQ_SYNC];
749 else
750 s = ad->last_sector[REQ_SYNC] - arq->request->sector;
751
752 if (aic->seek_samples == 0) {
753 /*
754 * Process has just started IO. Use past statistics to
755 * guage success possibility
756 */
757 if (ad->new_seek_mean > s) {
758 /* this request is better than what we're expecting */
759 return 1;
760 }
761
762 } else {
763 if (aic->seek_mean > s) {
764 /* this request is better than what we're expecting */
765 return 1;
766 }
767 }
768
769 return 0;
770}
771
772/*
773 * as_can_anticipate indicates weather we should either run arq
774 * or keep anticipating a better request.
775 */
776static int as_can_anticipate(struct as_data *ad, struct as_rq *arq)
777{
778 if (!ad->io_context)
779 /*
780 * Last request submitted was a write
781 */
782 return 0;
783
784 if (ad->antic_status == ANTIC_FINISHED)
785 /*
786 * Don't restart if we have just finished. Run the next request
787 */
788 return 0;
789
790 if (as_can_break_anticipation(ad, arq))
791 /*
792 * This request is a good candidate. Don't keep anticipating,
793 * run it.
794 */
795 return 0;
796
797 /*
798 * OK from here, we haven't finished, and don't have a decent request!
799 * Status is either ANTIC_OFF so start waiting,
800 * ANTIC_WAIT_REQ so continue waiting for request to finish
801 * or ANTIC_WAIT_NEXT so continue waiting for an acceptable request.
802 *
803 */
804
805 return 1;
806}
807
808static void as_update_thinktime(struct as_data *ad, struct as_io_context *aic, unsigned long ttime)
809{
810 /* fixed point: 1.0 == 1<<8 */
811 if (aic->ttime_samples == 0) {
812 ad->new_ttime_total = (7*ad->new_ttime_total + 256*ttime) / 8;
813 ad->new_ttime_mean = ad->new_ttime_total / 256;
814
815 ad->exit_prob = (7*ad->exit_prob)/8;
816 }
817 aic->ttime_samples = (7*aic->ttime_samples + 256) / 8;
818 aic->ttime_total = (7*aic->ttime_total + 256*ttime) / 8;
819 aic->ttime_mean = (aic->ttime_total + 128) / aic->ttime_samples;
820}
821
822static void as_update_seekdist(struct as_data *ad, struct as_io_context *aic, sector_t sdist)
823{
824 u64 total;
825
826 if (aic->seek_samples == 0) {
827 ad->new_seek_total = (7*ad->new_seek_total + 256*(u64)sdist)/8;
828 ad->new_seek_mean = ad->new_seek_total / 256;
829 }
830
831 /*
832 * Don't allow the seek distance to get too large from the
833 * odd fragment, pagein, etc
834 */
835 if (aic->seek_samples <= 60) /* second&third seek */
836 sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*1024);
837 else
838 sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*64);
839
840 aic->seek_samples = (7*aic->seek_samples + 256) / 8;
841 aic->seek_total = (7*aic->seek_total + (u64)256*sdist) / 8;
842 total = aic->seek_total + (aic->seek_samples/2);
843 do_div(total, aic->seek_samples);
844 aic->seek_mean = (sector_t)total;
845}
846
847/*
848 * as_update_iohist keeps a decaying histogram of IO thinktimes, and
849 * updates @aic->ttime_mean based on that. It is called when a new
850 * request is queued.
851 */
852static void as_update_iohist(struct as_data *ad, struct as_io_context *aic, struct request *rq)
853{
854 struct as_rq *arq = RQ_DATA(rq);
855 int data_dir = arq->is_sync;
856 unsigned long thinktime;
857 sector_t seek_dist;
858
859 if (aic == NULL)
860 return;
861
862 if (data_dir == REQ_SYNC) {
863 unsigned long in_flight = atomic_read(&aic->nr_queued)
864 + atomic_read(&aic->nr_dispatched);
865 spin_lock(&aic->lock);
866 if (test_bit(AS_TASK_IORUNNING, &aic->state) ||
867 test_bit(AS_TASK_IOSTARTED, &aic->state)) {
868 /* Calculate read -> read thinktime */
869 if (test_bit(AS_TASK_IORUNNING, &aic->state)
870 && in_flight == 0) {
871 thinktime = jiffies - aic->last_end_request;
872 thinktime = min(thinktime, MAX_THINKTIME-1);
873 } else
874 thinktime = 0;
875 as_update_thinktime(ad, aic, thinktime);
876
877 /* Calculate read -> read seek distance */
878 if (aic->last_request_pos < rq->sector)
879 seek_dist = rq->sector - aic->last_request_pos;
880 else
881 seek_dist = aic->last_request_pos - rq->sector;
882 as_update_seekdist(ad, aic, seek_dist);
883 }
884 aic->last_request_pos = rq->sector + rq->nr_sectors;
885 set_bit(AS_TASK_IOSTARTED, &aic->state);
886 spin_unlock(&aic->lock);
887 }
888}
889
890/*
891 * as_update_arq must be called whenever a request (arq) is added to
892 * the sort_list. This function keeps caches up to date, and checks if the
893 * request might be one we are "anticipating"
894 */
895static void as_update_arq(struct as_data *ad, struct as_rq *arq)
896{
897 const int data_dir = arq->is_sync;
898
899 /* keep the next_arq cache up to date */
900 ad->next_arq[data_dir] = as_choose_req(ad, arq, ad->next_arq[data_dir]);
901
902 /*
903 * have we been anticipating this request?
904 * or does it come from the same process as the one we are anticipating
905 * for?
906 */
907 if (ad->antic_status == ANTIC_WAIT_REQ
908 || ad->antic_status == ANTIC_WAIT_NEXT) {
909 if (as_can_break_anticipation(ad, arq))
910 as_antic_stop(ad);
911 }
912}
913
914/*
915 * Gathers timings and resizes the write batch automatically
916 */
917static void update_write_batch(struct as_data *ad)
918{
919 unsigned long batch = ad->batch_expire[REQ_ASYNC];
920 long write_time;
921
922 write_time = (jiffies - ad->current_batch_expires) + batch;
923 if (write_time < 0)
924 write_time = 0;
925
926 if (write_time > batch && !ad->write_batch_idled) {
927 if (write_time > batch * 3)
928 ad->write_batch_count /= 2;
929 else
930 ad->write_batch_count--;
931 } else if (write_time < batch && ad->current_write_count == 0) {
932 if (batch > write_time * 3)
933 ad->write_batch_count *= 2;
934 else
935 ad->write_batch_count++;
936 }
937
938 if (ad->write_batch_count < 1)
939 ad->write_batch_count = 1;
940}
941
942/*
943 * as_completed_request is to be called when a request has completed and
944 * returned something to the requesting process, be it an error or data.
945 */
946static void as_completed_request(request_queue_t *q, struct request *rq)
947{
948 struct as_data *ad = q->elevator->elevator_data;
949 struct as_rq *arq = RQ_DATA(rq);
950
951 WARN_ON(!list_empty(&rq->queuelist));
952
953 if (arq->state == AS_RQ_PRESCHED) {
954 WARN_ON(arq->io_context);
955 goto out;
956 }
957
958 if (arq->state == AS_RQ_MERGED)
959 goto out_ioc;
960
961 if (arq->state != AS_RQ_REMOVED) {
962 printk("arq->state %d\n", arq->state);
963 WARN_ON(1);
964 goto out;
965 }
966
967 if (!blk_fs_request(rq))
968 goto out;
969
970 if (ad->changed_batch && ad->nr_dispatched == 1) {
971 kblockd_schedule_work(&ad->antic_work);
972 ad->changed_batch = 0;
973
974 if (ad->batch_data_dir == REQ_SYNC)
975 ad->new_batch = 1;
976 }
977 WARN_ON(ad->nr_dispatched == 0);
978 ad->nr_dispatched--;
979
980 /*
981 * Start counting the batch from when a request of that direction is
982 * actually serviced. This should help devices with big TCQ windows
983 * and writeback caches
984 */
985 if (ad->new_batch && ad->batch_data_dir == arq->is_sync) {
986 update_write_batch(ad);
987 ad->current_batch_expires = jiffies +
988 ad->batch_expire[REQ_SYNC];
989 ad->new_batch = 0;
990 }
991
992 if (ad->io_context == arq->io_context && ad->io_context) {
993 ad->antic_start = jiffies;
994 ad->ioc_finished = 1;
995 if (ad->antic_status == ANTIC_WAIT_REQ) {
996 /*
997 * We were waiting on this request, now anticipate
998 * the next one
999 */
1000 as_antic_waitnext(ad);
1001 }
1002 }
1003
1004out_ioc:
1005 if (!arq->io_context)
1006 goto out;
1007
1008 if (arq->is_sync == REQ_SYNC) {
1009 struct as_io_context *aic = arq->io_context->aic;
1010 if (aic) {
1011 spin_lock(&aic->lock);
1012 set_bit(AS_TASK_IORUNNING, &aic->state);
1013 aic->last_end_request = jiffies;
1014 spin_unlock(&aic->lock);
1015 }
1016 }
1017
1018 put_io_context(arq->io_context);
1019out:
1020 arq->state = AS_RQ_POSTSCHED;
1021}
1022
1023/*
1024 * as_remove_queued_request removes a request from the pre dispatch queue
1025 * without updating refcounts. It is expected the caller will drop the
1026 * reference unless it replaces the request at somepart of the elevator
1027 * (ie. the dispatch queue)
1028 */
1029static void as_remove_queued_request(request_queue_t *q, struct request *rq)
1030{
1031 struct as_rq *arq = RQ_DATA(rq);
1032 const int data_dir = arq->is_sync;
1033 struct as_data *ad = q->elevator->elevator_data;
1034
1035 WARN_ON(arq->state != AS_RQ_QUEUED);
1036
1037 if (arq->io_context && arq->io_context->aic) {
1038 BUG_ON(!atomic_read(&arq->io_context->aic->nr_queued));
1039 atomic_dec(&arq->io_context->aic->nr_queued);
1040 }
1041
1042 /*
1043 * Update the "next_arq" cache if we are about to remove its
1044 * entry
1045 */
1046 if (ad->next_arq[data_dir] == arq)
1047 ad->next_arq[data_dir] = as_find_next_arq(ad, arq);
1048
1049 list_del_init(&arq->fifo);
1050 as_remove_merge_hints(q, arq);
1051 as_del_arq_rb(ad, arq);
1052}
1053
1054/*
1055 * as_remove_dispatched_request is called to remove a request which has gone
1056 * to the dispatch list.
1057 */
1058static void as_remove_dispatched_request(request_queue_t *q, struct request *rq)
1059{
1060 struct as_rq *arq = RQ_DATA(rq);
1061 struct as_io_context *aic;
1062
1063 if (!arq) {
1064 WARN_ON(1);
1065 return;
1066 }
1067
1068 WARN_ON(arq->state != AS_RQ_DISPATCHED);
1069 WARN_ON(ON_RB(&arq->rb_node));
1070 if (arq->io_context && arq->io_context->aic) {
1071 aic = arq->io_context->aic;
1072 if (aic) {
1073 WARN_ON(!atomic_read(&aic->nr_dispatched));
1074 atomic_dec(&aic->nr_dispatched);
1075 }
1076 }
1077}
1078
1079/*
1080 * as_remove_request is called when a driver has finished with a request.
1081 * This should be only called for dispatched requests, but for some reason
1082 * a POWER4 box running hwscan it does not.
1083 */
1084static void as_remove_request(request_queue_t *q, struct request *rq)
1085{
1086 struct as_rq *arq = RQ_DATA(rq);
1087
1088 if (unlikely(arq->state == AS_RQ_NEW))
1089 goto out;
1090
1091 if (ON_RB(&arq->rb_node)) {
1092 if (arq->state != AS_RQ_QUEUED) {
1093 printk("arq->state %d\n", arq->state);
1094 WARN_ON(1);
1095 goto out;
1096 }
1097 /*
1098 * We'll lose the aliased request(s) here. I don't think this
1099 * will ever happen, but if it does, hopefully someone will
1100 * report it.
1101 */
1102 WARN_ON(!list_empty(&rq->queuelist));
1103 as_remove_queued_request(q, rq);
1104 } else {
1105 if (arq->state != AS_RQ_DISPATCHED) {
1106 printk("arq->state %d\n", arq->state);
1107 WARN_ON(1);
1108 goto out;
1109 }
1110 as_remove_dispatched_request(q, rq);
1111 }
1112out:
1113 arq->state = AS_RQ_REMOVED;
1114}
1115
1116/*
1117 * as_fifo_expired returns 0 if there are no expired reads on the fifo,
1118 * 1 otherwise. It is ratelimited so that we only perform the check once per
1119 * `fifo_expire' interval. Otherwise a large number of expired requests
1120 * would create a hopeless seekstorm.
1121 *
1122 * See as_antic_expired comment.
1123 */
1124static int as_fifo_expired(struct as_data *ad, int adir)
1125{
1126 struct as_rq *arq;
1127 long delta_jif;
1128
1129 delta_jif = jiffies - ad->last_check_fifo[adir];
1130 if (unlikely(delta_jif < 0))
1131 delta_jif = -delta_jif;
1132 if (delta_jif < ad->fifo_expire[adir])
1133 return 0;
1134
1135 ad->last_check_fifo[adir] = jiffies;
1136
1137 if (list_empty(&ad->fifo_list[adir]))
1138 return 0;
1139
1140 arq = list_entry_fifo(ad->fifo_list[adir].next);
1141
1142 return time_after(jiffies, arq->expires);
1143}
1144
1145/*
1146 * as_batch_expired returns true if the current batch has expired. A batch
1147 * is a set of reads or a set of writes.
1148 */
1149static inline int as_batch_expired(struct as_data *ad)
1150{
1151 if (ad->changed_batch || ad->new_batch)
1152 return 0;
1153
1154 if (ad->batch_data_dir == REQ_SYNC)
1155 /* TODO! add a check so a complete fifo gets written? */
1156 return time_after(jiffies, ad->current_batch_expires);
1157
1158 return time_after(jiffies, ad->current_batch_expires)
1159 || ad->current_write_count == 0;
1160}
1161
1162/*
1163 * move an entry to dispatch queue
1164 */
1165static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
1166{
1167 struct request *rq = arq->request;
1168 struct list_head *insert;
1169 const int data_dir = arq->is_sync;
1170
1171 BUG_ON(!ON_RB(&arq->rb_node));
1172
1173 as_antic_stop(ad);
1174 ad->antic_status = ANTIC_OFF;
1175
1176 /*
1177 * This has to be set in order to be correctly updated by
1178 * as_find_next_arq
1179 */
1180 ad->last_sector[data_dir] = rq->sector + rq->nr_sectors;
1181
1182 if (data_dir == REQ_SYNC) {
1183 /* In case we have to anticipate after this */
1184 copy_io_context(&ad->io_context, &arq->io_context);
1185 } else {
1186 if (ad->io_context) {
1187 put_io_context(ad->io_context);
1188 ad->io_context = NULL;
1189 }
1190
1191 if (ad->current_write_count != 0)
1192 ad->current_write_count--;
1193 }
1194 ad->ioc_finished = 0;
1195
1196 ad->next_arq[data_dir] = as_find_next_arq(ad, arq);
1197
1198 /*
1199 * take it off the sort and fifo list, add to dispatch queue
1200 */
1201 insert = ad->dispatch->prev;
1202
1203 while (!list_empty(&rq->queuelist)) {
1204 struct request *__rq = list_entry_rq(rq->queuelist.next);
1205 struct as_rq *__arq = RQ_DATA(__rq);
1206
1207 list_move_tail(&__rq->queuelist, ad->dispatch);
1208
1209 if (__arq->io_context && __arq->io_context->aic)
1210 atomic_inc(&__arq->io_context->aic->nr_dispatched);
1211
1212 WARN_ON(__arq->state != AS_RQ_QUEUED);
1213 __arq->state = AS_RQ_DISPATCHED;
1214
1215 ad->nr_dispatched++;
1216 }
1217
1218 as_remove_queued_request(ad->q, rq);
1219 WARN_ON(arq->state != AS_RQ_QUEUED);
1220
1221 list_add(&rq->queuelist, insert);
1222 arq->state = AS_RQ_DISPATCHED;
1223 if (arq->io_context && arq->io_context->aic)
1224 atomic_inc(&arq->io_context->aic->nr_dispatched);
1225 ad->nr_dispatched++;
1226}
1227
1228/*
1229 * as_dispatch_request selects the best request according to
1230 * read/write expire, batch expire, etc, and moves it to the dispatch
1231 * queue. Returns 1 if a request was found, 0 otherwise.
1232 */
1233static int as_dispatch_request(struct as_data *ad)
1234{
1235 struct as_rq *arq;
1236 const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]);
1237 const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]);
1238
1239 /* Signal that the write batch was uncontended, so we can't time it */
1240 if (ad->batch_data_dir == REQ_ASYNC && !reads) {
1241 if (ad->current_write_count == 0 || !writes)
1242 ad->write_batch_idled = 1;
1243 }
1244
1245 if (!(reads || writes)
1246 || ad->antic_status == ANTIC_WAIT_REQ
1247 || ad->antic_status == ANTIC_WAIT_NEXT
1248 || ad->changed_batch)
1249 return 0;
1250
1251 if (!(reads && writes && as_batch_expired(ad)) ) {
1252 /*
1253 * batch is still running or no reads or no writes
1254 */
1255 arq = ad->next_arq[ad->batch_data_dir];
1256
1257 if (ad->batch_data_dir == REQ_SYNC && ad->antic_expire) {
1258 if (as_fifo_expired(ad, REQ_SYNC))
1259 goto fifo_expired;
1260
1261 if (as_can_anticipate(ad, arq)) {
1262 as_antic_waitreq(ad);
1263 return 0;
1264 }
1265 }
1266
1267 if (arq) {
1268 /* we have a "next request" */
1269 if (reads && !writes)
1270 ad->current_batch_expires =
1271 jiffies + ad->batch_expire[REQ_SYNC];
1272 goto dispatch_request;
1273 }
1274 }
1275
1276 /*
1277 * at this point we are not running a batch. select the appropriate
1278 * data direction (read / write)
1279 */
1280
1281 if (reads) {
1282 BUG_ON(RB_EMPTY(&ad->sort_list[REQ_SYNC]));
1283
1284 if (writes && ad->batch_data_dir == REQ_SYNC)
1285 /*
1286 * Last batch was a read, switch to writes
1287 */
1288 goto dispatch_writes;
1289
1290 if (ad->batch_data_dir == REQ_ASYNC) {
1291 WARN_ON(ad->new_batch);
1292 ad->changed_batch = 1;
1293 }
1294 ad->batch_data_dir = REQ_SYNC;
1295 arq = list_entry_fifo(ad->fifo_list[ad->batch_data_dir].next);
1296 ad->last_check_fifo[ad->batch_data_dir] = jiffies;
1297 goto dispatch_request;
1298 }
1299
1300 /*
1301 * the last batch was a read
1302 */
1303
1304 if (writes) {
1305dispatch_writes:
1306 BUG_ON(RB_EMPTY(&ad->sort_list[REQ_ASYNC]));
1307
1308 if (ad->batch_data_dir == REQ_SYNC) {
1309 ad->changed_batch = 1;
1310
1311 /*
1312 * new_batch might be 1 when the queue runs out of
1313 * reads. A subsequent submission of a write might
1314 * cause a change of batch before the read is finished.
1315 */
1316 ad->new_batch = 0;
1317 }
1318 ad->batch_data_dir = REQ_ASYNC;
1319 ad->current_write_count = ad->write_batch_count;
1320 ad->write_batch_idled = 0;
1321 arq = ad->next_arq[ad->batch_data_dir];
1322 goto dispatch_request;
1323 }
1324
1325 BUG();
1326 return 0;
1327
1328dispatch_request:
1329 /*
1330 * If a request has expired, service it.
1331 */
1332
1333 if (as_fifo_expired(ad, ad->batch_data_dir)) {
1334fifo_expired:
1335 arq = list_entry_fifo(ad->fifo_list[ad->batch_data_dir].next);
1336 BUG_ON(arq == NULL);
1337 }
1338
1339 if (ad->changed_batch) {
1340 WARN_ON(ad->new_batch);
1341
1342 if (ad->nr_dispatched)
1343 return 0;
1344
1345 if (ad->batch_data_dir == REQ_ASYNC)
1346 ad->current_batch_expires = jiffies +
1347 ad->batch_expire[REQ_ASYNC];
1348 else
1349 ad->new_batch = 1;
1350
1351 ad->changed_batch = 0;
1352 }
1353
1354 /*
1355 * arq is the selected appropriate request.
1356 */
1357 as_move_to_dispatch(ad, arq);
1358
1359 return 1;
1360}
1361
1362static struct request *as_next_request(request_queue_t *q)
1363{
1364 struct as_data *ad = q->elevator->elevator_data;
1365 struct request *rq = NULL;
1366
1367 /*
1368 * if there are still requests on the dispatch queue, grab the first
1369 */
1370 if (!list_empty(ad->dispatch) || as_dispatch_request(ad))
1371 rq = list_entry_rq(ad->dispatch->next);
1372
1373 return rq;
1374}
1375
1376/*
1377 * Add arq to a list behind alias
1378 */
1379static inline void
1380as_add_aliased_request(struct as_data *ad, struct as_rq *arq, struct as_rq *alias)
1381{
1382 struct request *req = arq->request;
1383 struct list_head *insert = alias->request->queuelist.prev;
1384
1385 /*
1386 * Transfer list of aliases
1387 */
1388 while (!list_empty(&req->queuelist)) {
1389 struct request *__rq = list_entry_rq(req->queuelist.next);
1390 struct as_rq *__arq = RQ_DATA(__rq);
1391
1392 list_move_tail(&__rq->queuelist, &alias->request->queuelist);
1393
1394 WARN_ON(__arq->state != AS_RQ_QUEUED);
1395 }
1396
1397 /*
1398 * Another request with the same start sector on the rbtree.
1399 * Link this request to that sector. They are untangled in
1400 * as_move_to_dispatch
1401 */
1402 list_add(&arq->request->queuelist, insert);
1403
1404 /*
1405 * Don't want to have to handle merges.
1406 */
1407 as_remove_merge_hints(ad->q, arq);
1408}
1409
1410/*
1411 * add arq to rbtree and fifo
1412 */
1413static void as_add_request(struct as_data *ad, struct as_rq *arq)
1414{
1415 struct as_rq *alias;
1416 int data_dir;
1417
1418 if (rq_data_dir(arq->request) == READ
1419 || current->flags&PF_SYNCWRITE)
1420 arq->is_sync = 1;
1421 else
1422 arq->is_sync = 0;
1423 data_dir = arq->is_sync;
1424
1425 arq->io_context = as_get_io_context();
1426
1427 if (arq->io_context) {
1428 as_update_iohist(ad, arq->io_context->aic, arq->request);
1429 atomic_inc(&arq->io_context->aic->nr_queued);
1430 }
1431
1432 alias = as_add_arq_rb(ad, arq);
1433 if (!alias) {
1434 /*
1435 * set expire time (only used for reads) and add to fifo list
1436 */
1437 arq->expires = jiffies + ad->fifo_expire[data_dir];
1438 list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]);
1439
1440 if (rq_mergeable(arq->request)) {
1441 as_add_arq_hash(ad, arq);
1442
1443 if (!ad->q->last_merge)
1444 ad->q->last_merge = arq->request;
1445 }
1446 as_update_arq(ad, arq); /* keep state machine up to date */
1447
1448 } else {
1449 as_add_aliased_request(ad, arq, alias);
1450
1451 /*
1452 * have we been anticipating this request?
1453 * or does it come from the same process as the one we are
1454 * anticipating for?
1455 */
1456 if (ad->antic_status == ANTIC_WAIT_REQ
1457 || ad->antic_status == ANTIC_WAIT_NEXT) {
1458 if (as_can_break_anticipation(ad, arq))
1459 as_antic_stop(ad);
1460 }
1461 }
1462
1463 arq->state = AS_RQ_QUEUED;
1464}
1465
1466static void as_deactivate_request(request_queue_t *q, struct request *rq)
1467{
1468 struct as_data *ad = q->elevator->elevator_data;
1469 struct as_rq *arq = RQ_DATA(rq);
1470
1471 if (arq) {
1472 if (arq->state == AS_RQ_REMOVED) {
1473 arq->state = AS_RQ_DISPATCHED;
1474 if (arq->io_context && arq->io_context->aic)
1475 atomic_inc(&arq->io_context->aic->nr_dispatched);
1476 }
1477 } else
1478 WARN_ON(blk_fs_request(rq)
1479 && (!(rq->flags & (REQ_HARDBARRIER|REQ_SOFTBARRIER))) );
1480
1481 /* Stop anticipating - let this request get through */
1482 as_antic_stop(ad);
1483}
1484
1485/*
1486 * requeue the request. The request has not been completed, nor is it a
1487 * new request, so don't touch accounting.
1488 */
1489static void as_requeue_request(request_queue_t *q, struct request *rq)
1490{
1491 as_deactivate_request(q, rq);
1492 list_add(&rq->queuelist, &q->queue_head);
1493}
1494
1495/*
1496 * Account a request that is inserted directly onto the dispatch queue.
1497 * arq->io_context->aic->nr_dispatched should not need to be incremented
1498 * because only new requests should come through here: requeues go through
1499 * our explicit requeue handler.
1500 */
1501static void as_account_queued_request(struct as_data *ad, struct request *rq)
1502{
1503 if (blk_fs_request(rq)) {
1504 struct as_rq *arq = RQ_DATA(rq);
1505 arq->state = AS_RQ_DISPATCHED;
1506 ad->nr_dispatched++;
1507 }
1508}
1509
1510static void
1511as_insert_request(request_queue_t *q, struct request *rq, int where)
1512{
1513 struct as_data *ad = q->elevator->elevator_data;
1514 struct as_rq *arq = RQ_DATA(rq);
1515
1516 if (arq) {
1517 if (arq->state != AS_RQ_PRESCHED) {
1518 printk("arq->state: %d\n", arq->state);
1519 WARN_ON(1);
1520 }
1521 arq->state = AS_RQ_NEW;
1522 }
1523
1524 /* barriers must flush the reorder queue */
1525 if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)
1526 && where == ELEVATOR_INSERT_SORT)) {
1527 WARN_ON(1);
1528 where = ELEVATOR_INSERT_BACK;
1529 }
1530
1531 switch (where) {
1532 case ELEVATOR_INSERT_BACK:
1533 while (ad->next_arq[REQ_SYNC])
1534 as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]);
1535
1536 while (ad->next_arq[REQ_ASYNC])
1537 as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]);
1538
1539 list_add_tail(&rq->queuelist, ad->dispatch);
1540 as_account_queued_request(ad, rq);
1541 as_antic_stop(ad);
1542 break;
1543 case ELEVATOR_INSERT_FRONT:
1544 list_add(&rq->queuelist, ad->dispatch);
1545 as_account_queued_request(ad, rq);
1546 as_antic_stop(ad);
1547 break;
1548 case ELEVATOR_INSERT_SORT:
1549 BUG_ON(!blk_fs_request(rq));
1550 as_add_request(ad, arq);
1551 break;
1552 default:
1553 BUG();
1554 return;
1555 }
1556}
1557
1558/*
1559 * as_queue_empty tells us if there are requests left in the device. It may
1560 * not be the case that a driver can get the next request even if the queue
1561 * is not empty - it is used in the block layer to check for plugging and
1562 * merging opportunities
1563 */
1564static int as_queue_empty(request_queue_t *q)
1565{
1566 struct as_data *ad = q->elevator->elevator_data;
1567
1568 if (!list_empty(&ad->fifo_list[REQ_ASYNC])
1569 || !list_empty(&ad->fifo_list[REQ_SYNC])
1570 || !list_empty(ad->dispatch))
1571 return 0;
1572
1573 return 1;
1574}
1575
1576static struct request *
1577as_former_request(request_queue_t *q, struct request *rq)
1578{
1579 struct as_rq *arq = RQ_DATA(rq);
1580 struct rb_node *rbprev = rb_prev(&arq->rb_node);
1581 struct request *ret = NULL;
1582
1583 if (rbprev)
1584 ret = rb_entry_arq(rbprev)->request;
1585
1586 return ret;
1587}
1588
1589static struct request *
1590as_latter_request(request_queue_t *q, struct request *rq)
1591{
1592 struct as_rq *arq = RQ_DATA(rq);
1593 struct rb_node *rbnext = rb_next(&arq->rb_node);
1594 struct request *ret = NULL;
1595
1596 if (rbnext)
1597 ret = rb_entry_arq(rbnext)->request;
1598
1599 return ret;
1600}
1601
1602static int
1603as_merge(request_queue_t *q, struct request **req, struct bio *bio)
1604{
1605 struct as_data *ad = q->elevator->elevator_data;
1606 sector_t rb_key = bio->bi_sector + bio_sectors(bio);
1607 struct request *__rq;
1608 int ret;
1609
1610 /*
1611 * try last_merge to avoid going to hash
1612 */
1613 ret = elv_try_last_merge(q, bio);
1614 if (ret != ELEVATOR_NO_MERGE) {
1615 __rq = q->last_merge;
1616 goto out_insert;
1617 }
1618
1619 /*
1620 * see if the merge hash can satisfy a back merge
1621 */
1622 __rq = as_find_arq_hash(ad, bio->bi_sector);
1623 if (__rq) {
1624 BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
1625
1626 if (elv_rq_merge_ok(__rq, bio)) {
1627 ret = ELEVATOR_BACK_MERGE;
1628 goto out;
1629 }
1630 }
1631
1632 /*
1633 * check for front merge
1634 */
1635 __rq = as_find_arq_rb(ad, rb_key, bio_data_dir(bio));
1636 if (__rq) {
1637 BUG_ON(rb_key != rq_rb_key(__rq));
1638
1639 if (elv_rq_merge_ok(__rq, bio)) {
1640 ret = ELEVATOR_FRONT_MERGE;
1641 goto out;
1642 }
1643 }
1644
1645 return ELEVATOR_NO_MERGE;
1646out:
1647 if (rq_mergeable(__rq))
1648 q->last_merge = __rq;
1649out_insert:
1650 if (ret) {
1651 if (rq_mergeable(__rq))
1652 as_hot_arq_hash(ad, RQ_DATA(__rq));
1653 }
1654 *req = __rq;
1655 return ret;
1656}
1657
1658static void as_merged_request(request_queue_t *q, struct request *req)
1659{
1660 struct as_data *ad = q->elevator->elevator_data;
1661 struct as_rq *arq = RQ_DATA(req);
1662
1663 /*
1664 * hash always needs to be repositioned, key is end sector
1665 */
1666 as_del_arq_hash(arq);
1667 as_add_arq_hash(ad, arq);
1668
1669 /*
1670 * if the merge was a front merge, we need to reposition request
1671 */
1672 if (rq_rb_key(req) != arq->rb_key) {
1673 struct as_rq *alias, *next_arq = NULL;
1674
1675 if (ad->next_arq[arq->is_sync] == arq)
1676 next_arq = as_find_next_arq(ad, arq);
1677
1678 /*
1679 * Note! We should really be moving any old aliased requests
1680 * off this request and try to insert them into the rbtree. We
1681 * currently don't bother. Ditto the next function.
1682 */
1683 as_del_arq_rb(ad, arq);
1684 if ((alias = as_add_arq_rb(ad, arq)) ) {
1685 list_del_init(&arq->fifo);
1686 as_add_aliased_request(ad, arq, alias);
1687 if (next_arq)
1688 ad->next_arq[arq->is_sync] = next_arq;
1689 }
1690 /*
1691 * Note! At this stage of this and the next function, our next
1692 * request may not be optimal - eg the request may have "grown"
1693 * behind the disk head. We currently don't bother adjusting.
1694 */
1695 }
1696
1697 if (arq->on_hash)
1698 q->last_merge = req;
1699}
1700
1701static void
1702as_merged_requests(request_queue_t *q, struct request *req,
1703 struct request *next)
1704{
1705 struct as_data *ad = q->elevator->elevator_data;
1706 struct as_rq *arq = RQ_DATA(req);
1707 struct as_rq *anext = RQ_DATA(next);
1708
1709 BUG_ON(!arq);
1710 BUG_ON(!anext);
1711
1712 /*
1713 * reposition arq (this is the merged request) in hash, and in rbtree
1714 * in case of a front merge
1715 */
1716 as_del_arq_hash(arq);
1717 as_add_arq_hash(ad, arq);
1718
1719 if (rq_rb_key(req) != arq->rb_key) {
1720 struct as_rq *alias, *next_arq = NULL;
1721
1722 if (ad->next_arq[arq->is_sync] == arq)
1723 next_arq = as_find_next_arq(ad, arq);
1724
1725 as_del_arq_rb(ad, arq);
1726 if ((alias = as_add_arq_rb(ad, arq)) ) {
1727 list_del_init(&arq->fifo);
1728 as_add_aliased_request(ad, arq, alias);
1729 if (next_arq)
1730 ad->next_arq[arq->is_sync] = next_arq;
1731 }
1732 }
1733
1734 /*
1735 * if anext expires before arq, assign its expire time to arq
1736 * and move into anext position (anext will be deleted) in fifo
1737 */
1738 if (!list_empty(&arq->fifo) && !list_empty(&anext->fifo)) {
1739 if (time_before(anext->expires, arq->expires)) {
1740 list_move(&arq->fifo, &anext->fifo);
1741 arq->expires = anext->expires;
1742 /*
1743 * Don't copy here but swap, because when anext is
1744 * removed below, it must contain the unused context
1745 */
1746 swap_io_context(&arq->io_context, &anext->io_context);
1747 }
1748 }
1749
1750 /*
1751 * Transfer list of aliases
1752 */
1753 while (!list_empty(&next->queuelist)) {
1754 struct request *__rq = list_entry_rq(next->queuelist.next);
1755 struct as_rq *__arq = RQ_DATA(__rq);
1756
1757 list_move_tail(&__rq->queuelist, &req->queuelist);
1758
1759 WARN_ON(__arq->state != AS_RQ_QUEUED);
1760 }
1761
1762 /*
1763 * kill knowledge of next, this one is a goner
1764 */
1765 as_remove_queued_request(q, next);
1766
1767 anext->state = AS_RQ_MERGED;
1768}
1769
1770/*
1771 * This is executed in a "deferred" process context, by kblockd. It calls the
1772 * driver's request_fn so the driver can submit that request.
1773 *
1774 * IMPORTANT! This guy will reenter the elevator, so set up all queue global
1775 * state before calling, and don't rely on any state over calls.
1776 *
1777 * FIXME! dispatch queue is not a queue at all!
1778 */
1779static void as_work_handler(void *data)
1780{
1781 struct request_queue *q = data;
1782 unsigned long flags;
1783
1784 spin_lock_irqsave(q->queue_lock, flags);
1785 if (as_next_request(q))
1786 q->request_fn(q);
1787 spin_unlock_irqrestore(q->queue_lock, flags);
1788}
1789
1790static void as_put_request(request_queue_t *q, struct request *rq)
1791{
1792 struct as_data *ad = q->elevator->elevator_data;
1793 struct as_rq *arq = RQ_DATA(rq);
1794
1795 if (!arq) {
1796 WARN_ON(1);
1797 return;
1798 }
1799
1800 if (arq->state != AS_RQ_POSTSCHED && arq->state != AS_RQ_PRESCHED) {
1801 printk("arq->state %d\n", arq->state);
1802 WARN_ON(1);
1803 }
1804
1805 mempool_free(arq, ad->arq_pool);
1806 rq->elevator_private = NULL;
1807}
1808
1809static int as_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
1810{
1811 struct as_data *ad = q->elevator->elevator_data;
1812 struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask);
1813
1814 if (arq) {
1815 memset(arq, 0, sizeof(*arq));
1816 RB_CLEAR(&arq->rb_node);
1817 arq->request = rq;
1818 arq->state = AS_RQ_PRESCHED;
1819 arq->io_context = NULL;
1820 INIT_LIST_HEAD(&arq->hash);
1821 arq->on_hash = 0;
1822 INIT_LIST_HEAD(&arq->fifo);
1823 rq->elevator_private = arq;
1824 return 0;
1825 }
1826
1827 return 1;
1828}
1829
1830static int as_may_queue(request_queue_t *q, int rw)
1831{
1832 int ret = ELV_MQUEUE_MAY;
1833 struct as_data *ad = q->elevator->elevator_data;
1834 struct io_context *ioc;
1835 if (ad->antic_status == ANTIC_WAIT_REQ ||
1836 ad->antic_status == ANTIC_WAIT_NEXT) {
1837 ioc = as_get_io_context();
1838 if (ad->io_context == ioc)
1839 ret = ELV_MQUEUE_MUST;
1840 put_io_context(ioc);
1841 }
1842
1843 return ret;
1844}
1845
1846static void as_exit_queue(elevator_t *e)
1847{
1848 struct as_data *ad = e->elevator_data;
1849
1850 del_timer_sync(&ad->antic_timer);
1851 kblockd_flush();
1852
1853 BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC]));
1854 BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC]));
1855
1856 mempool_destroy(ad->arq_pool);
1857 put_io_context(ad->io_context);
1858 kfree(ad->hash);
1859 kfree(ad);
1860}
1861
1862/*
1863 * initialize elevator private data (as_data), and alloc a arq for
1864 * each request on the free lists
1865 */
1866static int as_init_queue(request_queue_t *q, elevator_t *e)
1867{
1868 struct as_data *ad;
1869 int i;
1870
1871 if (!arq_pool)
1872 return -ENOMEM;
1873
1874 ad = kmalloc(sizeof(*ad), GFP_KERNEL);
1875 if (!ad)
1876 return -ENOMEM;
1877 memset(ad, 0, sizeof(*ad));
1878
1879 ad->q = q; /* Identify what queue the data belongs to */
1880
1881 ad->hash = kmalloc(sizeof(struct list_head)*AS_HASH_ENTRIES,GFP_KERNEL);
1882 if (!ad->hash) {
1883 kfree(ad);
1884 return -ENOMEM;
1885 }
1886
1887 ad->arq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, arq_pool);
1888 if (!ad->arq_pool) {
1889 kfree(ad->hash);
1890 kfree(ad);
1891 return -ENOMEM;
1892 }
1893
1894 /* anticipatory scheduling helpers */
1895 ad->antic_timer.function = as_antic_timeout;
1896 ad->antic_timer.data = (unsigned long)q;
1897 init_timer(&ad->antic_timer);
1898 INIT_WORK(&ad->antic_work, as_work_handler, q);
1899
1900 for (i = 0; i < AS_HASH_ENTRIES; i++)
1901 INIT_LIST_HEAD(&ad->hash[i]);
1902
1903 INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
1904 INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
1905 ad->sort_list[REQ_SYNC] = RB_ROOT;
1906 ad->sort_list[REQ_ASYNC] = RB_ROOT;
1907 ad->dispatch = &q->queue_head;
1908 ad->fifo_expire[REQ_SYNC] = default_read_expire;
1909 ad->fifo_expire[REQ_ASYNC] = default_write_expire;
1910 ad->antic_expire = default_antic_expire;
1911 ad->batch_expire[REQ_SYNC] = default_read_batch_expire;
1912 ad->batch_expire[REQ_ASYNC] = default_write_batch_expire;
1913 e->elevator_data = ad;
1914
1915 ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC];
1916 ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10;
1917 if (ad->write_batch_count < 2)
1918 ad->write_batch_count = 2;
1919
1920 return 0;
1921}
1922
1923/*
1924 * sysfs parts below
1925 */
1926struct as_fs_entry {
1927 struct attribute attr;
1928 ssize_t (*show)(struct as_data *, char *);
1929 ssize_t (*store)(struct as_data *, const char *, size_t);
1930};
1931
1932static ssize_t
1933as_var_show(unsigned int var, char *page)
1934{
1935 var = (var * 1000) / HZ;
1936 return sprintf(page, "%d\n", var);
1937}
1938
1939static ssize_t
1940as_var_store(unsigned long *var, const char *page, size_t count)
1941{
1942 unsigned long tmp;
1943 char *p = (char *) page;
1944
1945 tmp = simple_strtoul(p, &p, 10);
1946 if (tmp != 0) {
1947 tmp = (tmp * HZ) / 1000;
1948 if (tmp == 0)
1949 tmp = 1;
1950 }
1951 *var = tmp;
1952 return count;
1953}
1954
1955static ssize_t as_est_show(struct as_data *ad, char *page)
1956{
1957 int pos = 0;
1958
1959 pos += sprintf(page+pos, "%lu %% exit probability\n", 100*ad->exit_prob/256);
1960 pos += sprintf(page+pos, "%lu ms new thinktime\n", ad->new_ttime_mean);
1961 pos += sprintf(page+pos, "%llu sectors new seek distance\n", (unsigned long long)ad->new_seek_mean);
1962
1963 return pos;
1964}
1965
1966#define SHOW_FUNCTION(__FUNC, __VAR) \
1967static ssize_t __FUNC(struct as_data *ad, char *page) \
1968{ \
1969 return as_var_show(jiffies_to_msecs((__VAR)), (page)); \
1970}
1971SHOW_FUNCTION(as_readexpire_show, ad->fifo_expire[REQ_SYNC]);
1972SHOW_FUNCTION(as_writeexpire_show, ad->fifo_expire[REQ_ASYNC]);
1973SHOW_FUNCTION(as_anticexpire_show, ad->antic_expire);
1974SHOW_FUNCTION(as_read_batchexpire_show, ad->batch_expire[REQ_SYNC]);
1975SHOW_FUNCTION(as_write_batchexpire_show, ad->batch_expire[REQ_ASYNC]);
1976#undef SHOW_FUNCTION
1977
1978#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
1979static ssize_t __FUNC(struct as_data *ad, const char *page, size_t count) \
1980{ \
1981 int ret = as_var_store(__PTR, (page), count); \
1982 if (*(__PTR) < (MIN)) \
1983 *(__PTR) = (MIN); \
1984 else if (*(__PTR) > (MAX)) \
1985 *(__PTR) = (MAX); \
1986 *(__PTR) = msecs_to_jiffies(*(__PTR)); \
1987 return ret; \
1988}
1989STORE_FUNCTION(as_readexpire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX);
1990STORE_FUNCTION(as_writeexpire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX);
1991STORE_FUNCTION(as_anticexpire_store, &ad->antic_expire, 0, INT_MAX);
1992STORE_FUNCTION(as_read_batchexpire_store,
1993 &ad->batch_expire[REQ_SYNC], 0, INT_MAX);
1994STORE_FUNCTION(as_write_batchexpire_store,
1995 &ad->batch_expire[REQ_ASYNC], 0, INT_MAX);
1996#undef STORE_FUNCTION
1997
1998static struct as_fs_entry as_est_entry = {
1999 .attr = {.name = "est_time", .mode = S_IRUGO },
2000 .show = as_est_show,
2001};
2002static struct as_fs_entry as_readexpire_entry = {
2003 .attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR },
2004 .show = as_readexpire_show,
2005 .store = as_readexpire_store,
2006};
2007static struct as_fs_entry as_writeexpire_entry = {
2008 .attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR },
2009 .show = as_writeexpire_show,
2010 .store = as_writeexpire_store,
2011};
2012static struct as_fs_entry as_anticexpire_entry = {
2013 .attr = {.name = "antic_expire", .mode = S_IRUGO | S_IWUSR },
2014 .show = as_anticexpire_show,
2015 .store = as_anticexpire_store,
2016};
2017static struct as_fs_entry as_read_batchexpire_entry = {
2018 .attr = {.name = "read_batch_expire", .mode = S_IRUGO | S_IWUSR },
2019 .show = as_read_batchexpire_show,
2020 .store = as_read_batchexpire_store,
2021};
2022static struct as_fs_entry as_write_batchexpire_entry = {
2023 .attr = {.name = "write_batch_expire", .mode = S_IRUGO | S_IWUSR },
2024 .show = as_write_batchexpire_show,
2025 .store = as_write_batchexpire_store,
2026};
2027
2028static struct attribute *default_attrs[] = {
2029 &as_est_entry.attr,
2030 &as_readexpire_entry.attr,
2031 &as_writeexpire_entry.attr,
2032 &as_anticexpire_entry.attr,
2033 &as_read_batchexpire_entry.attr,
2034 &as_write_batchexpire_entry.attr,
2035 NULL,
2036};
2037
2038#define to_as(atr) container_of((atr), struct as_fs_entry, attr)
2039
2040static ssize_t
2041as_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2042{
2043 elevator_t *e = container_of(kobj, elevator_t, kobj);
2044 struct as_fs_entry *entry = to_as(attr);
2045
2046 if (!entry->show)
2047 return 0;
2048
2049 return entry->show(e->elevator_data, page);
2050}
2051
2052static ssize_t
2053as_attr_store(struct kobject *kobj, struct attribute *attr,
2054 const char *page, size_t length)
2055{
2056 elevator_t *e = container_of(kobj, elevator_t, kobj);
2057 struct as_fs_entry *entry = to_as(attr);
2058
2059 if (!entry->store)
2060 return -EINVAL;
2061
2062 return entry->store(e->elevator_data, page, length);
2063}
2064
2065static struct sysfs_ops as_sysfs_ops = {
2066 .show = as_attr_show,
2067 .store = as_attr_store,
2068};
2069
2070static struct kobj_type as_ktype = {
2071 .sysfs_ops = &as_sysfs_ops,
2072 .default_attrs = default_attrs,
2073};
2074
2075static struct elevator_type iosched_as = {
2076 .ops = {
2077 .elevator_merge_fn = as_merge,
2078 .elevator_merged_fn = as_merged_request,
2079 .elevator_merge_req_fn = as_merged_requests,
2080 .elevator_next_req_fn = as_next_request,
2081 .elevator_add_req_fn = as_insert_request,
2082 .elevator_remove_req_fn = as_remove_request,
2083 .elevator_requeue_req_fn = as_requeue_request,
2084 .elevator_deactivate_req_fn = as_deactivate_request,
2085 .elevator_queue_empty_fn = as_queue_empty,
2086 .elevator_completed_req_fn = as_completed_request,
2087 .elevator_former_req_fn = as_former_request,
2088 .elevator_latter_req_fn = as_latter_request,
2089 .elevator_set_req_fn = as_set_request,
2090 .elevator_put_req_fn = as_put_request,
2091 .elevator_may_queue_fn = as_may_queue,
2092 .elevator_init_fn = as_init_queue,
2093 .elevator_exit_fn = as_exit_queue,
2094 },
2095
2096 .elevator_ktype = &as_ktype,
2097 .elevator_name = "anticipatory",
2098 .elevator_owner = THIS_MODULE,
2099};
2100
2101static int __init as_init(void)
2102{
2103 int ret;
2104
2105 arq_pool = kmem_cache_create("as_arq", sizeof(struct as_rq),
2106 0, 0, NULL, NULL);
2107 if (!arq_pool)
2108 return -ENOMEM;
2109
2110 ret = elv_register(&iosched_as);
2111 if (!ret) {
2112 /*
2113 * don't allow AS to get unregistered, since we would have
2114 * to browse all tasks in the system and release their
2115 * as_io_context first
2116 */
2117 __module_get(THIS_MODULE);
2118 return 0;
2119 }
2120
2121 kmem_cache_destroy(arq_pool);
2122 return ret;
2123}
2124
2125static void __exit as_exit(void)
2126{
2127 kmem_cache_destroy(arq_pool);
2128 elv_unregister(&iosched_as);
2129}
2130
2131module_init(as_init);
2132module_exit(as_exit);
2133
2134MODULE_AUTHOR("Nick Piggin");
2135MODULE_LICENSE("GPL");
2136MODULE_DESCRIPTION("anticipatory IO scheduler");
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
new file mode 100644
index 000000000000..db05a5a99f35
--- /dev/null
+++ b/drivers/block/ataflop.c
@@ -0,0 +1,2006 @@
1/*
2 * drivers/block/ataflop.c
3 *
4 * Copyright (C) 1993 Greg Harp
5 * Atari Support by Bjoern Brauel, Roman Hodek
6 *
7 * Big cleanup Sep 11..14 1994 Roman Hodek:
8 * - Driver now works interrupt driven
9 * - Support for two drives; should work, but I cannot test that :-(
10 * - Reading is done in whole tracks and buffered to speed up things
11 * - Disk change detection and drive deselecting after motor-off
12 * similar to TOS
13 * - Autodetection of disk format (DD/HD); untested yet, because I
14 * don't have an HD drive :-(
15 *
16 * Fixes Nov 13 1994 Martin Schaller:
17 * - Autodetection works now
18 * - Support for 5 1/4'' disks
19 * - Removed drive type (unknown on atari)
20 * - Do seeks with 8 Mhz
21 *
22 * Changes by Andreas Schwab:
23 * - After errors in multiple read mode try again reading single sectors
24 * (Feb 1995):
25 * - Clean up error handling
26 * - Set blk_size for proper size checking
27 * - Initialize track register when testing presence of floppy
28 * - Implement some ioctl's
29 *
30 * Changes by Torsten Lang:
31 * - When probing the floppies we should add the FDCCMDADD_H flag since
32 * the FDC will otherwise wait forever when no disk is inserted...
33 *
34 * ++ Freddi Aschwanden (fa) 20.9.95 fixes for medusa:
35 * - MFPDELAY() after each FDC access -> atari
36 * - more/other disk formats
37 * - DMA to the block buffer directly if we have a 32bit DMA
38 * - for medusa, the step rate is always 3ms
39 * - on medusa, use only cache_push()
40 * Roman:
41 * - Make disk format numbering independent from minors
42 * - Let user set max. supported drive type (speeds up format
43 * detection, saves buffer space)
44 *
45 * Roman 10/15/95:
46 * - implement some more ioctls
47 * - disk formatting
48 *
49 * Andreas 95/12/12:
50 * - increase gap size at start of track for HD/ED disks
51 *
52 * Michael (MSch) 11/07/96:
53 * - implemented FDSETPRM and FDDEFPRM ioctl
54 *
55 * Andreas (97/03/19):
56 * - implemented missing BLK* ioctls
57 *
58 * Things left to do:
59 * - Formatting
60 * - Maybe a better strategy for disk change detection (does anyone
61 * know one?)
62 */
63
64#include <linux/module.h>
65
66#include <linux/fd.h>
67#include <linux/delay.h>
68#include <linux/init.h>
69#include <linux/blkdev.h>
70
71#include <asm/atafd.h>
72#include <asm/atafdreg.h>
73#include <asm/atariints.h>
74#include <asm/atari_stdma.h>
75#include <asm/atari_stram.h>
76
77#define FD_MAX_UNITS 2
78
79#undef DEBUG
80
81static struct request_queue *floppy_queue;
82
83#define QUEUE (floppy_queue)
84#define CURRENT elv_next_request(floppy_queue)
85
86/* Disk types: DD, HD, ED */
87static struct atari_disk_type {
88 const char *name;
89 unsigned spt; /* sectors per track */
90 unsigned blocks; /* total number of blocks */
91 unsigned fdc_speed; /* fdc_speed setting */
92 unsigned stretch; /* track doubling ? */
93} disk_type[] = {
94 { "d360", 9, 720, 0, 0}, /* 0: 360kB diskette */
95 { "D360", 9, 720, 0, 1}, /* 1: 360kb in 720k or 1.2MB drive */
96 { "D720", 9,1440, 0, 0}, /* 2: 720kb in 720k or 1.2MB drive */
97 { "D820", 10,1640, 0, 0}, /* 3: DD disk with 82 tracks/10 sectors */
98/* formats above are probed for type DD */
99#define MAX_TYPE_DD 3
100 { "h1200",15,2400, 3, 0}, /* 4: 1.2MB diskette */
101 { "H1440",18,2880, 3, 0}, /* 5: 1.4 MB diskette (HD) */
102 { "H1640",20,3280, 3, 0}, /* 6: 1.64MB diskette (fat HD) 82 tr 20 sec */
103/* formats above are probed for types DD and HD */
104#define MAX_TYPE_HD 6
105 { "E2880",36,5760, 3, 0}, /* 7: 2.8 MB diskette (ED) */
106 { "E3280",40,6560, 3, 0}, /* 8: 3.2 MB diskette (fat ED) 82 tr 40 sec */
107/* formats above are probed for types DD, HD and ED */
108#define MAX_TYPE_ED 8
109/* types below are never autoprobed */
110 { "H1680",21,3360, 3, 0}, /* 9: 1.68MB diskette (fat HD) 80 tr 21 sec */
111 { "h410",10,820, 0, 1}, /* 10: 410k diskette 41 tr 10 sec, stretch */
112 { "h1476",18,2952, 3, 0}, /* 11: 1.48MB diskette 82 tr 18 sec */
113 { "H1722",21,3444, 3, 0}, /* 12: 1.72MB diskette 82 tr 21 sec */
114 { "h420",10,840, 0, 1}, /* 13: 420k diskette 42 tr 10 sec, stretch */
115 { "H830",10,1660, 0, 0}, /* 14: 820k diskette 83 tr 10 sec */
116 { "h1494",18,2952, 3, 0}, /* 15: 1.49MB diskette 83 tr 18 sec */
117 { "H1743",21,3486, 3, 0}, /* 16: 1.74MB diskette 83 tr 21 sec */
118 { "h880",11,1760, 0, 0}, /* 17: 880k diskette 80 tr 11 sec */
119 { "D1040",13,2080, 0, 0}, /* 18: 1.04MB diskette 80 tr 13 sec */
120 { "D1120",14,2240, 0, 0}, /* 19: 1.12MB diskette 80 tr 14 sec */
121 { "h1600",20,3200, 3, 0}, /* 20: 1.60MB diskette 80 tr 20 sec */
122 { "H1760",22,3520, 3, 0}, /* 21: 1.76MB diskette 80 tr 22 sec */
123 { "H1920",24,3840, 3, 0}, /* 22: 1.92MB diskette 80 tr 24 sec */
124 { "E3200",40,6400, 3, 0}, /* 23: 3.2MB diskette 80 tr 40 sec */
125 { "E3520",44,7040, 3, 0}, /* 24: 3.52MB diskette 80 tr 44 sec */
126 { "E3840",48,7680, 3, 0}, /* 25: 3.84MB diskette 80 tr 48 sec */
127 { "H1840",23,3680, 3, 0}, /* 26: 1.84MB diskette 80 tr 23 sec */
128 { "D800",10,1600, 0, 0}, /* 27: 800k diskette 80 tr 10 sec */
129};
130
131static int StartDiskType[] = {
132 MAX_TYPE_DD,
133 MAX_TYPE_HD,
134 MAX_TYPE_ED
135};
136
137#define TYPE_DD 0
138#define TYPE_HD 1
139#define TYPE_ED 2
140
141static int DriveType = TYPE_HD;
142
143static DEFINE_SPINLOCK(ataflop_lock);
144
145/* Array for translating minors into disk formats */
146static struct {
147 int index;
148 unsigned drive_types;
149} minor2disktype[] = {
150 { 0, TYPE_DD }, /* 1: d360 */
151 { 4, TYPE_HD }, /* 2: h1200 */
152 { 1, TYPE_DD }, /* 3: D360 */
153 { 2, TYPE_DD }, /* 4: D720 */
154 { 1, TYPE_DD }, /* 5: h360 = D360 */
155 { 2, TYPE_DD }, /* 6: h720 = D720 */
156 { 5, TYPE_HD }, /* 7: H1440 */
157 { 7, TYPE_ED }, /* 8: E2880 */
158/* some PC formats :-) */
159 { 8, TYPE_ED }, /* 9: E3280 <- was "CompaQ" == E2880 for PC */
160 { 5, TYPE_HD }, /* 10: h1440 = H1440 */
161 { 9, TYPE_HD }, /* 11: H1680 */
162 { 10, TYPE_DD }, /* 12: h410 */
163 { 3, TYPE_DD }, /* 13: H820 <- == D820, 82x10 */
164 { 11, TYPE_HD }, /* 14: h1476 */
165 { 12, TYPE_HD }, /* 15: H1722 */
166 { 13, TYPE_DD }, /* 16: h420 */
167 { 14, TYPE_DD }, /* 17: H830 */
168 { 15, TYPE_HD }, /* 18: h1494 */
169 { 16, TYPE_HD }, /* 19: H1743 */
170 { 17, TYPE_DD }, /* 20: h880 */
171 { 18, TYPE_DD }, /* 21: D1040 */
172 { 19, TYPE_DD }, /* 22: D1120 */
173 { 20, TYPE_HD }, /* 23: h1600 */
174 { 21, TYPE_HD }, /* 24: H1760 */
175 { 22, TYPE_HD }, /* 25: H1920 */
176 { 23, TYPE_ED }, /* 26: E3200 */
177 { 24, TYPE_ED }, /* 27: E3520 */
178 { 25, TYPE_ED }, /* 28: E3840 */
179 { 26, TYPE_HD }, /* 29: H1840 */
180 { 27, TYPE_DD }, /* 30: D800 */
181 { 6, TYPE_HD }, /* 31: H1640 <- was H1600 == h1600 for PC */
182};
183
184#define NUM_DISK_MINORS (sizeof(minor2disktype)/sizeof(*minor2disktype))
185
186/*
187 * Maximum disk size (in kilobytes). This default is used whenever the
188 * current disk size is unknown.
189 */
190#define MAX_DISK_SIZE 3280
191
192/*
193 * MSch: User-provided type information. 'drive' points to
194 * the respective entry of this array. Set by FDSETPRM ioctls.
195 */
196static struct atari_disk_type user_params[FD_MAX_UNITS];
197
198/*
199 * User-provided permanent type information. 'drive' points to
200 * the respective entry of this array. Set by FDDEFPRM ioctls,
201 * restored upon disk change by floppy_revalidate() if valid (as seen by
202 * default_params[].blocks > 0 - a bit in unit[].flags might be used for this?)
203 */
204static struct atari_disk_type default_params[FD_MAX_UNITS];
205
206/* current info on each unit */
207static struct atari_floppy_struct {
208 int connected; /* !=0 : drive is connected */
209 int autoprobe; /* !=0 : do autoprobe */
210
211 struct atari_disk_type *disktype; /* current type of disk */
212
213 int track; /* current head position or -1 if
214 unknown */
215 unsigned int steprate; /* steprate setting */
216 unsigned int wpstat; /* current state of WP signal (for
217 disk change detection) */
218 int flags; /* flags */
219 struct gendisk *disk;
220 int ref;
221 int type;
222} unit[FD_MAX_UNITS];
223
224#define UD unit[drive]
225#define UDT unit[drive].disktype
226#define SUD unit[SelectedDrive]
227#define SUDT unit[SelectedDrive].disktype
228
229
230#define FDC_READ(reg) ({ \
231 /* unsigned long __flags; */ \
232 unsigned short __val; \
233 /* local_irq_save(__flags); */ \
234 dma_wd.dma_mode_status = 0x80 | (reg); \
235 udelay(25); \
236 __val = dma_wd.fdc_acces_seccount; \
237 MFPDELAY(); \
238 /* local_irq_restore(__flags); */ \
239 __val & 0xff; \
240})
241
242#define FDC_WRITE(reg,val) \
243 do { \
244 /* unsigned long __flags; */ \
245 /* local_irq_save(__flags); */ \
246 dma_wd.dma_mode_status = 0x80 | (reg); \
247 udelay(25); \
248 dma_wd.fdc_acces_seccount = (val); \
249 MFPDELAY(); \
250 /* local_irq_restore(__flags); */ \
251 } while(0)
252
253
254/* Buffering variables:
255 * First, there is a DMA buffer in ST-RAM that is used for floppy DMA
256 * operations. Second, a track buffer is used to cache a whole track
257 * of the disk to save read operations. These are two separate buffers
258 * because that allows write operations without clearing the track buffer.
259 */
260
261static int MaxSectors[] = {
262 11, 22, 44
263};
264static int BufferSize[] = {
265 15*512, 30*512, 60*512
266};
267
268#define BUFFER_SIZE (BufferSize[DriveType])
269
270unsigned char *DMABuffer; /* buffer for writes */
271static unsigned long PhysDMABuffer; /* physical address */
272
273static int UseTrackbuffer = -1; /* Do track buffering? */
274MODULE_PARM(UseTrackbuffer, "i");
275
276unsigned char *TrackBuffer; /* buffer for reads */
277static unsigned long PhysTrackBuffer; /* physical address */
278static int BufferDrive, BufferSide, BufferTrack;
279static int read_track; /* non-zero if we are reading whole tracks */
280
281#define SECTOR_BUFFER(sec) (TrackBuffer + ((sec)-1)*512)
282#define IS_BUFFERED(drive,side,track) \
283 (BufferDrive == (drive) && BufferSide == (side) && BufferTrack == (track))
284
285/*
286 * These are global variables, as that's the easiest way to give
287 * information to interrupts. They are the data used for the current
288 * request.
289 */
290static int SelectedDrive = 0;
291static int ReqCmd, ReqBlock;
292static int ReqSide, ReqTrack, ReqSector, ReqCnt;
293static int HeadSettleFlag = 0;
294static unsigned char *ReqData, *ReqBuffer;
295static int MotorOn = 0, MotorOffTrys;
296static int IsFormatting = 0, FormatError;
297
298static int UserSteprate[FD_MAX_UNITS] = { -1, -1 };
299MODULE_PARM(UserSteprate, "1-" __MODULE_STRING(FD_MAX_UNITS) "i");
300
301/* Synchronization of FDC access. */
302static volatile int fdc_busy = 0;
303static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
304static DECLARE_WAIT_QUEUE_HEAD(format_wait);
305
306static unsigned long changed_floppies = 0xff, fake_change = 0;
307#define CHECK_CHANGE_DELAY HZ/2
308
309#define FD_MOTOR_OFF_DELAY (3*HZ)
310#define FD_MOTOR_OFF_MAXTRY (10*20)
311
312#define FLOPPY_TIMEOUT (6*HZ)
313#define RECALIBRATE_ERRORS 4 /* After this many errors the drive
314 * will be recalibrated. */
315#define MAX_ERRORS 8 /* After this many errors the driver
316 * will give up. */
317
318
319/*
320 * The driver is trying to determine the correct media format
321 * while Probing is set. fd_rwsec_done() clears it after a
322 * successful access.
323 */
324static int Probing = 0;
325
326/* This flag is set when a dummy seek is necessary to make the WP
327 * status bit accessible.
328 */
329static int NeedSeek = 0;
330
331
332#ifdef DEBUG
333#define DPRINT(a) printk a
334#else
335#define DPRINT(a)
336#endif
337
338/***************************** Prototypes *****************************/
339
340static void fd_select_side( int side );
341static void fd_select_drive( int drive );
342static void fd_deselect( void );
343static void fd_motor_off_timer( unsigned long dummy );
344static void check_change( unsigned long dummy );
345static irqreturn_t floppy_irq (int irq, void *dummy, struct pt_regs *fp);
346static void fd_error( void );
347static int do_format(int drive, int type, struct atari_format_descr *desc);
348static void do_fd_action( int drive );
349static void fd_calibrate( void );
350static void fd_calibrate_done( int status );
351static void fd_seek( void );
352static void fd_seek_done( int status );
353static void fd_rwsec( void );
354static void fd_readtrack_check( unsigned long dummy );
355static void fd_rwsec_done( int status );
356static void fd_rwsec_done1(int status);
357static void fd_writetrack( void );
358static void fd_writetrack_done( int status );
359static void fd_times_out( unsigned long dummy );
360static void finish_fdc( void );
361static void finish_fdc_done( int dummy );
362static void setup_req_params( int drive );
363static void redo_fd_request( void);
364static int fd_ioctl( struct inode *inode, struct file *filp, unsigned int
365 cmd, unsigned long param);
366static void fd_probe( int drive );
367static int fd_test_drive_present( int drive );
368static void config_types( void );
369static int floppy_open( struct inode *inode, struct file *filp );
370static int floppy_release( struct inode * inode, struct file * filp );
371
372/************************* End of Prototypes **************************/
373
374static struct timer_list motor_off_timer =
375 TIMER_INITIALIZER(fd_motor_off_timer, 0, 0);
376static struct timer_list readtrack_timer =
377 TIMER_INITIALIZER(fd_readtrack_check, 0, 0);
378
379static struct timer_list timeout_timer =
380 TIMER_INITIALIZER(fd_times_out, 0, 0);
381
382static struct timer_list fd_timer =
383 TIMER_INITIALIZER(check_change, 0, 0);
384
385static inline void start_motor_off_timer(void)
386{
387 mod_timer(&motor_off_timer, jiffies + FD_MOTOR_OFF_DELAY);
388 MotorOffTrys = 0;
389}
390
391static inline void start_check_change_timer( void )
392{
393 mod_timer(&fd_timer, jiffies + CHECK_CHANGE_DELAY);
394}
395
396static inline void start_timeout(void)
397{
398 mod_timer(&timeout_timer, jiffies + FLOPPY_TIMEOUT);
399}
400
401static inline void stop_timeout(void)
402{
403 del_timer(&timeout_timer);
404}
405
406/* Select the side to use. */
407
408static void fd_select_side( int side )
409{
410 unsigned long flags;
411
412 /* protect against various other ints mucking around with the PSG */
413 local_irq_save(flags);
414
415 sound_ym.rd_data_reg_sel = 14; /* Select PSG Port A */
416 sound_ym.wd_data = (side == 0) ? sound_ym.rd_data_reg_sel | 0x01 :
417 sound_ym.rd_data_reg_sel & 0xfe;
418
419 local_irq_restore(flags);
420}
421
422
423/* Select a drive, update the FDC's track register and set the correct
424 * clock speed for this disk's type.
425 */
426
427static void fd_select_drive( int drive )
428{
429 unsigned long flags;
430 unsigned char tmp;
431
432 if (drive == SelectedDrive)
433 return;
434
435 /* protect against various other ints mucking around with the PSG */
436 local_irq_save(flags);
437 sound_ym.rd_data_reg_sel = 14; /* Select PSG Port A */
438 tmp = sound_ym.rd_data_reg_sel;
439 sound_ym.wd_data = (tmp | DSKDRVNONE) & ~(drive == 0 ? DSKDRV0 : DSKDRV1);
440 atari_dont_touch_floppy_select = 1;
441 local_irq_restore(flags);
442
443 /* restore track register to saved value */
444 FDC_WRITE( FDCREG_TRACK, UD.track );
445 udelay(25);
446
447 /* select 8/16 MHz */
448 if (UDT)
449 if (ATARIHW_PRESENT(FDCSPEED))
450 dma_wd.fdc_speed = UDT->fdc_speed;
451
452 SelectedDrive = drive;
453}
454
455
456/* Deselect both drives. */
457
458static void fd_deselect( void )
459{
460 unsigned long flags;
461
462 /* protect against various other ints mucking around with the PSG */
463 local_irq_save(flags);
464 atari_dont_touch_floppy_select = 0;
465 sound_ym.rd_data_reg_sel=14; /* Select PSG Port A */
466 sound_ym.wd_data = (sound_ym.rd_data_reg_sel |
467 (MACH_IS_FALCON ? 3 : 7)); /* no drives selected */
468 /* On Falcon, the drive B select line is used on the printer port, so
469 * leave it alone... */
470 SelectedDrive = -1;
471 local_irq_restore(flags);
472}
473
474
475/* This timer function deselects the drives when the FDC switched the
476 * motor off. The deselection cannot happen earlier because the FDC
477 * counts the index signals, which arrive only if one drive is selected.
478 */
479
480static void fd_motor_off_timer( unsigned long dummy )
481{
482 unsigned char status;
483
484 if (SelectedDrive < 0)
485 /* no drive selected, needn't deselect anyone */
486 return;
487
488 if (stdma_islocked())
489 goto retry;
490
491 status = FDC_READ( FDCREG_STATUS );
492
493 if (!(status & 0x80)) {
494 /* motor already turned off by FDC -> deselect drives */
495 MotorOn = 0;
496 fd_deselect();
497 return;
498 }
499 /* not yet off, try again */
500
501 retry:
502 /* Test again later; if tested too often, it seems there is no disk
503 * in the drive and the FDC will leave the motor on forever (or,
504 * at least until a disk is inserted). So we'll test only twice
505 * per second from then on...
506 */
507 mod_timer(&motor_off_timer,
508 jiffies + (MotorOffTrys++ < FD_MOTOR_OFF_MAXTRY ? HZ/20 : HZ/2));
509}
510
511
512/* This function is repeatedly called to detect disk changes (as good
513 * as possible) and keep track of the current state of the write protection.
514 */
515
516static void check_change( unsigned long dummy )
517{
518 static int drive = 0;
519
520 unsigned long flags;
521 unsigned char old_porta;
522 int stat;
523
524 if (++drive > 1 || !UD.connected)
525 drive = 0;
526
527 /* protect against various other ints mucking around with the PSG */
528 local_irq_save(flags);
529
530 if (!stdma_islocked()) {
531 sound_ym.rd_data_reg_sel = 14;
532 old_porta = sound_ym.rd_data_reg_sel;
533 sound_ym.wd_data = (old_porta | DSKDRVNONE) &
534 ~(drive == 0 ? DSKDRV0 : DSKDRV1);
535 stat = !!(FDC_READ( FDCREG_STATUS ) & FDCSTAT_WPROT);
536 sound_ym.wd_data = old_porta;
537
538 if (stat != UD.wpstat) {
539 DPRINT(( "wpstat[%d] = %d\n", drive, stat ));
540 UD.wpstat = stat;
541 set_bit (drive, &changed_floppies);
542 }
543 }
544 local_irq_restore(flags);
545
546 start_check_change_timer();
547}
548
549
550/* Handling of the Head Settling Flag: This flag should be set after each
551 * seek operation, because we don't use seeks with verify.
552 */
553
554static inline void set_head_settle_flag(void)
555{
556 HeadSettleFlag = FDCCMDADD_E;
557}
558
559static inline int get_head_settle_flag(void)
560{
561 int tmp = HeadSettleFlag;
562 HeadSettleFlag = 0;
563 return( tmp );
564}
565
566static inline void copy_buffer(void *from, void *to)
567{
568 ulong *p1 = (ulong *)from, *p2 = (ulong *)to;
569 int cnt;
570
571 for (cnt = 512/4; cnt; cnt--)
572 *p2++ = *p1++;
573}
574
575
576
577
578/* General Interrupt Handling */
579
580static void (*FloppyIRQHandler)( int status ) = NULL;
581
582static irqreturn_t floppy_irq (int irq, void *dummy, struct pt_regs *fp)
583{
584 unsigned char status;
585 void (*handler)( int );
586
587 handler = xchg(&FloppyIRQHandler, NULL);
588
589 if (handler) {
590 nop();
591 status = FDC_READ( FDCREG_STATUS );
592 DPRINT(("FDC irq, status = %02x handler = %08lx\n",status,(unsigned long)handler));
593 handler( status );
594 }
595 else {
596 DPRINT(("FDC irq, no handler\n"));
597 }
598 return IRQ_HANDLED;
599}
600
601
602/* Error handling: If some error happened, retry some times, then
603 * recalibrate, then try again, and fail after MAX_ERRORS.
604 */
605
606static void fd_error( void )
607{
608 if (IsFormatting) {
609 IsFormatting = 0;
610 FormatError = 1;
611 wake_up( &format_wait );
612 return;
613 }
614
615 if (!CURRENT)
616 return;
617
618 CURRENT->errors++;
619 if (CURRENT->errors >= MAX_ERRORS) {
620 printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
621 end_request(CURRENT, 0);
622 }
623 else if (CURRENT->errors == RECALIBRATE_ERRORS) {
624 printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive );
625 if (SelectedDrive != -1)
626 SUD.track = -1;
627 }
628 redo_fd_request();
629}
630
631
632
633#define SET_IRQ_HANDLER(proc) do { FloppyIRQHandler = (proc); } while(0)
634
635
636/* ---------- Formatting ---------- */
637
638#define FILL(n,val) \
639 do { \
640 memset( p, val, n ); \
641 p += n; \
642 } while(0)
643
644static int do_format(int drive, int type, struct atari_format_descr *desc)
645{
646 unsigned char *p;
647 int sect, nsect;
648 unsigned long flags;
649
650 DPRINT(("do_format( dr=%d tr=%d he=%d offs=%d )\n",
651 drive, desc->track, desc->head, desc->sect_offset ));
652
653 local_irq_save(flags);
654 while( fdc_busy ) sleep_on( &fdc_wait );
655 fdc_busy = 1;
656 stdma_lock(floppy_irq, NULL);
657 atari_turnon_irq( IRQ_MFP_FDC ); /* should be already, just to be sure */
658 local_irq_restore(flags);
659
660 if (type) {
661 if (--type >= NUM_DISK_MINORS ||
662 minor2disktype[type].drive_types > DriveType) {
663 redo_fd_request();
664 return -EINVAL;
665 }
666 type = minor2disktype[type].index;
667 UDT = &disk_type[type];
668 }
669
670 if (!UDT || desc->track >= UDT->blocks/UDT->spt/2 || desc->head >= 2) {
671 redo_fd_request();
672 return -EINVAL;
673 }
674
675 nsect = UDT->spt;
676 p = TrackBuffer;
677 /* The track buffer is used for the raw track data, so its
678 contents become invalid! */
679 BufferDrive = -1;
680 /* stop deselect timer */
681 del_timer( &motor_off_timer );
682
683 FILL( 60 * (nsect / 9), 0x4e );
684 for( sect = 0; sect < nsect; ++sect ) {
685 FILL( 12, 0 );
686 FILL( 3, 0xf5 );
687 *p++ = 0xfe;
688 *p++ = desc->track;
689 *p++ = desc->head;
690 *p++ = (nsect + sect - desc->sect_offset) % nsect + 1;
691 *p++ = 2;
692 *p++ = 0xf7;
693 FILL( 22, 0x4e );
694 FILL( 12, 0 );
695 FILL( 3, 0xf5 );
696 *p++ = 0xfb;
697 FILL( 512, 0xe5 );
698 *p++ = 0xf7;
699 FILL( 40, 0x4e );
700 }
701 FILL( TrackBuffer+BUFFER_SIZE-p, 0x4e );
702
703 IsFormatting = 1;
704 FormatError = 0;
705 ReqTrack = desc->track;
706 ReqSide = desc->head;
707 do_fd_action( drive );
708
709 sleep_on( &format_wait );
710
711 redo_fd_request();
712 return( FormatError ? -EIO : 0 );
713}
714
715
716/* do_fd_action() is the general procedure for a fd request: All
717 * required parameter settings (drive select, side select, track
718 * position) are checked and set if needed. For each of these
719 * parameters and the actual reading or writing exist two functions:
720 * one that starts the setting (or skips it if possible) and one
721 * callback for the "done" interrupt. Each done func calls the next
722 * set function to propagate the request down to fd_rwsec_done().
723 */
724
725static void do_fd_action( int drive )
726{
727 DPRINT(("do_fd_action\n"));
728
729 if (UseTrackbuffer && !IsFormatting) {
730 repeat:
731 if (IS_BUFFERED( drive, ReqSide, ReqTrack )) {
732 if (ReqCmd == READ) {
733 copy_buffer( SECTOR_BUFFER(ReqSector), ReqData );
734 if (++ReqCnt < CURRENT->current_nr_sectors) {
735 /* read next sector */
736 setup_req_params( drive );
737 goto repeat;
738 }
739 else {
740 /* all sectors finished */
741 CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
742 CURRENT->sector += CURRENT->current_nr_sectors;
743 end_request(CURRENT, 1);
744 redo_fd_request();
745 return;
746 }
747 }
748 else {
749 /* cmd == WRITE, pay attention to track buffer
750 * consistency! */
751 copy_buffer( ReqData, SECTOR_BUFFER(ReqSector) );
752 }
753 }
754 }
755
756 if (SelectedDrive != drive)
757 fd_select_drive( drive );
758
759 if (UD.track == -1)
760 fd_calibrate();
761 else if (UD.track != ReqTrack << UDT->stretch)
762 fd_seek();
763 else if (IsFormatting)
764 fd_writetrack();
765 else
766 fd_rwsec();
767}
768
769
770/* Seek to track 0 if the current track is unknown */
771
772static void fd_calibrate( void )
773{
774 if (SUD.track >= 0) {
775 fd_calibrate_done( 0 );
776 return;
777 }
778
779 if (ATARIHW_PRESENT(FDCSPEED))
780 dma_wd.fdc_speed = 0; /* always seek with 8 Mhz */;
781 DPRINT(("fd_calibrate\n"));
782 SET_IRQ_HANDLER( fd_calibrate_done );
783 /* we can't verify, since the speed may be incorrect */
784 FDC_WRITE( FDCREG_CMD, FDCCMD_RESTORE | SUD.steprate );
785
786 NeedSeek = 1;
787 MotorOn = 1;
788 start_timeout();
789 /* wait for IRQ */
790}
791
792
793static void fd_calibrate_done( int status )
794{
795 DPRINT(("fd_calibrate_done()\n"));
796 stop_timeout();
797
798 /* set the correct speed now */
799 if (ATARIHW_PRESENT(FDCSPEED))
800 dma_wd.fdc_speed = SUDT->fdc_speed;
801 if (status & FDCSTAT_RECNF) {
802 printk(KERN_ERR "fd%d: restore failed\n", SelectedDrive );
803 fd_error();
804 }
805 else {
806 SUD.track = 0;
807 fd_seek();
808 }
809}
810
811
812/* Seek the drive to the requested track. The drive must have been
813 * calibrated at some point before this.
814 */
815
816static void fd_seek( void )
817{
818 if (SUD.track == ReqTrack << SUDT->stretch) {
819 fd_seek_done( 0 );
820 return;
821 }
822
823 if (ATARIHW_PRESENT(FDCSPEED)) {
824 dma_wd.fdc_speed = 0; /* always seek witch 8 Mhz */
825 MFPDELAY();
826 }
827
828 DPRINT(("fd_seek() to track %d\n",ReqTrack));
829 FDC_WRITE( FDCREG_DATA, ReqTrack << SUDT->stretch);
830 udelay(25);
831 SET_IRQ_HANDLER( fd_seek_done );
832 FDC_WRITE( FDCREG_CMD, FDCCMD_SEEK | SUD.steprate );
833
834 MotorOn = 1;
835 set_head_settle_flag();
836 start_timeout();
837 /* wait for IRQ */
838}
839
840
841static void fd_seek_done( int status )
842{
843 DPRINT(("fd_seek_done()\n"));
844 stop_timeout();
845
846 /* set the correct speed */
847 if (ATARIHW_PRESENT(FDCSPEED))
848 dma_wd.fdc_speed = SUDT->fdc_speed;
849 if (status & FDCSTAT_RECNF) {
850 printk(KERN_ERR "fd%d: seek error (to track %d)\n",
851 SelectedDrive, ReqTrack );
852 /* we don't know exactly which track we are on now! */
853 SUD.track = -1;
854 fd_error();
855 }
856 else {
857 SUD.track = ReqTrack << SUDT->stretch;
858 NeedSeek = 0;
859 if (IsFormatting)
860 fd_writetrack();
861 else
862 fd_rwsec();
863 }
864}
865
866
867/* This does the actual reading/writing after positioning the head
868 * over the correct track.
869 */
870
871static int MultReadInProgress = 0;
872
873
874static void fd_rwsec( void )
875{
876 unsigned long paddr, flags;
877 unsigned int rwflag, old_motoron;
878 unsigned int track;
879
880 DPRINT(("fd_rwsec(), Sec=%d, Access=%c\n",ReqSector, ReqCmd == WRITE ? 'w' : 'r' ));
881 if (ReqCmd == WRITE) {
882 if (ATARIHW_PRESENT(EXTD_DMA)) {
883 paddr = virt_to_phys(ReqData);
884 }
885 else {
886 copy_buffer( ReqData, DMABuffer );
887 paddr = PhysDMABuffer;
888 }
889 dma_cache_maintenance( paddr, 512, 1 );
890 rwflag = 0x100;
891 }
892 else {
893 if (read_track)
894 paddr = PhysTrackBuffer;
895 else
896 paddr = ATARIHW_PRESENT(EXTD_DMA) ?
897 virt_to_phys(ReqData) : PhysDMABuffer;
898 rwflag = 0;
899 }
900
901 fd_select_side( ReqSide );
902
903 /* Start sector of this operation */
904 FDC_WRITE( FDCREG_SECTOR, read_track ? 1 : ReqSector );
905 MFPDELAY();
906 /* Cheat for track if stretch != 0 */
907 if (SUDT->stretch) {
908 track = FDC_READ( FDCREG_TRACK);
909 MFPDELAY();
910 FDC_WRITE( FDCREG_TRACK, track >> SUDT->stretch);
911 }
912 udelay(25);
913
914 /* Setup DMA */
915 local_irq_save(flags);
916 dma_wd.dma_lo = (unsigned char)paddr;
917 MFPDELAY();
918 paddr >>= 8;
919 dma_wd.dma_md = (unsigned char)paddr;
920 MFPDELAY();
921 paddr >>= 8;
922 if (ATARIHW_PRESENT(EXTD_DMA))
923 st_dma_ext_dmahi = (unsigned short)paddr;
924 else
925 dma_wd.dma_hi = (unsigned char)paddr;
926 MFPDELAY();
927 local_irq_restore(flags);
928
929 /* Clear FIFO and switch DMA to correct mode */
930 dma_wd.dma_mode_status = 0x90 | rwflag;
931 MFPDELAY();
932 dma_wd.dma_mode_status = 0x90 | (rwflag ^ 0x100);
933 MFPDELAY();
934 dma_wd.dma_mode_status = 0x90 | rwflag;
935 MFPDELAY();
936
937 /* How many sectors for DMA */
938 dma_wd.fdc_acces_seccount = read_track ? SUDT->spt : 1;
939
940 udelay(25);
941
942 /* Start operation */
943 dma_wd.dma_mode_status = FDCSELREG_STP | rwflag;
944 udelay(25);
945 SET_IRQ_HANDLER( fd_rwsec_done );
946 dma_wd.fdc_acces_seccount =
947 (get_head_settle_flag() |
948 (rwflag ? FDCCMD_WRSEC : (FDCCMD_RDSEC | (read_track ? FDCCMDADD_M : 0))));
949
950 old_motoron = MotorOn;
951 MotorOn = 1;
952 NeedSeek = 1;
953 /* wait for interrupt */
954
955 if (read_track) {
956 /* If reading a whole track, wait about one disk rotation and
957 * then check if all sectors are read. The FDC will even
958 * search for the first non-existent sector and need 1 sec to
959 * recognise that it isn't present :-(
960 */
961 MultReadInProgress = 1;
962 mod_timer(&readtrack_timer,
963 /* 1 rot. + 5 rot.s if motor was off */
964 jiffies + HZ/5 + (old_motoron ? 0 : HZ));
965 }
966 start_timeout();
967}
968
969
970static void fd_readtrack_check( unsigned long dummy )
971{
972 unsigned long flags, addr, addr2;
973
974 local_irq_save(flags);
975
976 if (!MultReadInProgress) {
977 /* This prevents a race condition that could arise if the
978 * interrupt is triggered while the calling of this timer
979 * callback function takes place. The IRQ function then has
980 * already cleared 'MultReadInProgress' when flow of control
981 * gets here.
982 */
983 local_irq_restore(flags);
984 return;
985 }
986
987 /* get the current DMA address */
988 /* ++ f.a. read twice to avoid being fooled by switcher */
989 addr = 0;
990 do {
991 addr2 = addr;
992 addr = dma_wd.dma_lo & 0xff;
993 MFPDELAY();
994 addr |= (dma_wd.dma_md & 0xff) << 8;
995 MFPDELAY();
996 if (ATARIHW_PRESENT( EXTD_DMA ))
997 addr |= (st_dma_ext_dmahi & 0xffff) << 16;
998 else
999 addr |= (dma_wd.dma_hi & 0xff) << 16;
1000 MFPDELAY();
1001 } while(addr != addr2);
1002
1003 if (addr >= PhysTrackBuffer + SUDT->spt*512) {
1004 /* already read enough data, force an FDC interrupt to stop
1005 * the read operation
1006 */
1007 SET_IRQ_HANDLER( NULL );
1008 MultReadInProgress = 0;
1009 local_irq_restore(flags);
1010 DPRINT(("fd_readtrack_check(): done\n"));
1011 FDC_WRITE( FDCREG_CMD, FDCCMD_FORCI );
1012 udelay(25);
1013
1014 /* No error until now -- the FDC would have interrupted
1015 * otherwise!
1016 */
1017 fd_rwsec_done1(0);
1018 }
1019 else {
1020 /* not yet finished, wait another tenth rotation */
1021 local_irq_restore(flags);
1022 DPRINT(("fd_readtrack_check(): not yet finished\n"));
1023 mod_timer(&readtrack_timer, jiffies + HZ/5/10);
1024 }
1025}
1026
1027
1028static void fd_rwsec_done( int status )
1029{
1030 DPRINT(("fd_rwsec_done()\n"));
1031
1032 if (read_track) {
1033 del_timer(&readtrack_timer);
1034 if (!MultReadInProgress)
1035 return;
1036 MultReadInProgress = 0;
1037 }
1038 fd_rwsec_done1(status);
1039}
1040
1041static void fd_rwsec_done1(int status)
1042{
1043 unsigned int track;
1044
1045 stop_timeout();
1046
1047 /* Correct the track if stretch != 0 */
1048 if (SUDT->stretch) {
1049 track = FDC_READ( FDCREG_TRACK);
1050 MFPDELAY();
1051 FDC_WRITE( FDCREG_TRACK, track << SUDT->stretch);
1052 }
1053
1054 if (!UseTrackbuffer) {
1055 dma_wd.dma_mode_status = 0x90;
1056 MFPDELAY();
1057 if (!(dma_wd.dma_mode_status & 0x01)) {
1058 printk(KERN_ERR "fd%d: DMA error\n", SelectedDrive );
1059 goto err_end;
1060 }
1061 }
1062 MFPDELAY();
1063
1064 if (ReqCmd == WRITE && (status & FDCSTAT_WPROT)) {
1065 printk(KERN_NOTICE "fd%d: is write protected\n", SelectedDrive );
1066 goto err_end;
1067 }
1068 if ((status & FDCSTAT_RECNF) &&
1069 /* RECNF is no error after a multiple read when the FDC
1070 searched for a non-existent sector! */
1071 !(read_track && FDC_READ(FDCREG_SECTOR) > SUDT->spt)) {
1072 if (Probing) {
1073 if (SUDT > disk_type) {
1074 if (SUDT[-1].blocks > ReqBlock) {
1075 /* try another disk type */
1076 SUDT--;
1077 set_capacity(unit[SelectedDrive].disk,
1078 SUDT->blocks);
1079 } else
1080 Probing = 0;
1081 }
1082 else {
1083 if (SUD.flags & FTD_MSG)
1084 printk(KERN_INFO "fd%d: Auto-detected floppy type %s\n",
1085 SelectedDrive, SUDT->name );
1086 Probing=0;
1087 }
1088 } else {
1089/* record not found, but not probing. Maybe stretch wrong ? Restart probing */
1090 if (SUD.autoprobe) {
1091 SUDT = disk_type + StartDiskType[DriveType];
1092 set_capacity(unit[SelectedDrive].disk,
1093 SUDT->blocks);
1094 Probing = 1;
1095 }
1096 }
1097 if (Probing) {
1098 if (ATARIHW_PRESENT(FDCSPEED)) {
1099 dma_wd.fdc_speed = SUDT->fdc_speed;
1100 MFPDELAY();
1101 }
1102 setup_req_params( SelectedDrive );
1103 BufferDrive = -1;
1104 do_fd_action( SelectedDrive );
1105 return;
1106 }
1107
1108 printk(KERN_ERR "fd%d: sector %d not found (side %d, track %d)\n",
1109 SelectedDrive, FDC_READ (FDCREG_SECTOR), ReqSide, ReqTrack );
1110 goto err_end;
1111 }
1112 if (status & FDCSTAT_CRC) {
1113 printk(KERN_ERR "fd%d: CRC error (side %d, track %d, sector %d)\n",
1114 SelectedDrive, ReqSide, ReqTrack, FDC_READ (FDCREG_SECTOR) );
1115 goto err_end;
1116 }
1117 if (status & FDCSTAT_LOST) {
1118 printk(KERN_ERR "fd%d: lost data (side %d, track %d, sector %d)\n",
1119 SelectedDrive, ReqSide, ReqTrack, FDC_READ (FDCREG_SECTOR) );
1120 goto err_end;
1121 }
1122
1123 Probing = 0;
1124
1125 if (ReqCmd == READ) {
1126 if (!read_track) {
1127 void *addr;
1128 addr = ATARIHW_PRESENT( EXTD_DMA ) ? ReqData : DMABuffer;
1129 dma_cache_maintenance( virt_to_phys(addr), 512, 0 );
1130 if (!ATARIHW_PRESENT( EXTD_DMA ))
1131 copy_buffer (addr, ReqData);
1132 } else {
1133 dma_cache_maintenance( PhysTrackBuffer, MaxSectors[DriveType] * 512, 0 );
1134 BufferDrive = SelectedDrive;
1135 BufferSide = ReqSide;
1136 BufferTrack = ReqTrack;
1137 copy_buffer (SECTOR_BUFFER (ReqSector), ReqData);
1138 }
1139 }
1140
1141 if (++ReqCnt < CURRENT->current_nr_sectors) {
1142 /* read next sector */
1143 setup_req_params( SelectedDrive );
1144 do_fd_action( SelectedDrive );
1145 }
1146 else {
1147 /* all sectors finished */
1148 CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
1149 CURRENT->sector += CURRENT->current_nr_sectors;
1150 end_request(CURRENT, 1);
1151 redo_fd_request();
1152 }
1153 return;
1154
1155 err_end:
1156 BufferDrive = -1;
1157 fd_error();
1158}
1159
1160
1161static void fd_writetrack( void )
1162{
1163 unsigned long paddr, flags;
1164 unsigned int track;
1165
1166 DPRINT(("fd_writetrack() Tr=%d Si=%d\n", ReqTrack, ReqSide ));
1167
1168 paddr = PhysTrackBuffer;
1169 dma_cache_maintenance( paddr, BUFFER_SIZE, 1 );
1170
1171 fd_select_side( ReqSide );
1172
1173 /* Cheat for track if stretch != 0 */
1174 if (SUDT->stretch) {
1175 track = FDC_READ( FDCREG_TRACK);
1176 MFPDELAY();
1177 FDC_WRITE(FDCREG_TRACK,track >> SUDT->stretch);
1178 }
1179 udelay(40);
1180
1181 /* Setup DMA */
1182 local_irq_save(flags);
1183 dma_wd.dma_lo = (unsigned char)paddr;
1184 MFPDELAY();
1185 paddr >>= 8;
1186 dma_wd.dma_md = (unsigned char)paddr;
1187 MFPDELAY();
1188 paddr >>= 8;
1189 if (ATARIHW_PRESENT( EXTD_DMA ))
1190 st_dma_ext_dmahi = (unsigned short)paddr;
1191 else
1192 dma_wd.dma_hi = (unsigned char)paddr;
1193 MFPDELAY();
1194 local_irq_restore(flags);
1195
1196 /* Clear FIFO and switch DMA to correct mode */
1197 dma_wd.dma_mode_status = 0x190;
1198 MFPDELAY();
1199 dma_wd.dma_mode_status = 0x90;
1200 MFPDELAY();
1201 dma_wd.dma_mode_status = 0x190;
1202 MFPDELAY();
1203
1204 /* How many sectors for DMA */
1205 dma_wd.fdc_acces_seccount = BUFFER_SIZE/512;
1206 udelay(40);
1207
1208 /* Start operation */
1209 dma_wd.dma_mode_status = FDCSELREG_STP | 0x100;
1210 udelay(40);
1211 SET_IRQ_HANDLER( fd_writetrack_done );
1212 dma_wd.fdc_acces_seccount = FDCCMD_WRTRA | get_head_settle_flag();
1213
1214 MotorOn = 1;
1215 start_timeout();
1216 /* wait for interrupt */
1217}
1218
1219
1220static void fd_writetrack_done( int status )
1221{
1222 DPRINT(("fd_writetrack_done()\n"));
1223
1224 stop_timeout();
1225
1226 if (status & FDCSTAT_WPROT) {
1227 printk(KERN_NOTICE "fd%d: is write protected\n", SelectedDrive );
1228 goto err_end;
1229 }
1230 if (status & FDCSTAT_LOST) {
1231 printk(KERN_ERR "fd%d: lost data (side %d, track %d)\n",
1232 SelectedDrive, ReqSide, ReqTrack );
1233 goto err_end;
1234 }
1235
1236 wake_up( &format_wait );
1237 return;
1238
1239 err_end:
1240 fd_error();
1241}
1242
1243static void fd_times_out( unsigned long dummy )
1244{
1245 atari_disable_irq( IRQ_MFP_FDC );
1246 if (!FloppyIRQHandler) goto end; /* int occurred after timer was fired, but
1247 * before we came here... */
1248
1249 SET_IRQ_HANDLER( NULL );
1250 /* If the timeout occurred while the readtrack_check timer was
1251 * active, we need to cancel it, else bad things will happen */
1252 if (UseTrackbuffer)
1253 del_timer( &readtrack_timer );
1254 FDC_WRITE( FDCREG_CMD, FDCCMD_FORCI );
1255 udelay( 25 );
1256
1257 printk(KERN_ERR "floppy timeout\n" );
1258 fd_error();
1259 end:
1260 atari_enable_irq( IRQ_MFP_FDC );
1261}
1262
1263
1264/* The (noop) seek operation here is needed to make the WP bit in the
1265 * FDC status register accessible for check_change. If the last disk
1266 * operation would have been a RDSEC, this bit would always read as 0
1267 * no matter what :-( To save time, the seek goes to the track we're
1268 * already on.
1269 */
1270
1271static void finish_fdc( void )
1272{
1273 if (!NeedSeek) {
1274 finish_fdc_done( 0 );
1275 }
1276 else {
1277 DPRINT(("finish_fdc: dummy seek started\n"));
1278 FDC_WRITE (FDCREG_DATA, SUD.track);
1279 SET_IRQ_HANDLER( finish_fdc_done );
1280 FDC_WRITE (FDCREG_CMD, FDCCMD_SEEK);
1281 MotorOn = 1;
1282 start_timeout();
1283 /* we must wait for the IRQ here, because the ST-DMA
1284 is released immediately afterwards and the interrupt
1285 may be delivered to the wrong driver. */
1286 }
1287}
1288
1289
1290static void finish_fdc_done( int dummy )
1291{
1292 unsigned long flags;
1293
1294 DPRINT(("finish_fdc_done entered\n"));
1295 stop_timeout();
1296 NeedSeek = 0;
1297
1298 if (timer_pending(&fd_timer) && time_before(fd_timer.expires, jiffies + 5))
1299 /* If the check for a disk change is done too early after this
1300 * last seek command, the WP bit still reads wrong :-((
1301 */
1302 mod_timer(&fd_timer, jiffies + 5);
1303 else
1304 start_check_change_timer();
1305 start_motor_off_timer();
1306
1307 local_irq_save(flags);
1308 stdma_release();
1309 fdc_busy = 0;
1310 wake_up( &fdc_wait );
1311 local_irq_restore(flags);
1312
1313 DPRINT(("finish_fdc() finished\n"));
1314}
1315
1316/* The detection of disk changes is a dark chapter in Atari history :-(
1317 * Because the "Drive ready" signal isn't present in the Atari
1318 * hardware, one has to rely on the "Write Protect". This works fine,
1319 * as long as no write protected disks are used. TOS solves this
1320 * problem by introducing tri-state logic ("maybe changed") and
1321 * looking at the serial number in block 0. This isn't possible for
1322 * Linux, since the floppy driver can't make assumptions about the
1323 * filesystem used on the disk and thus the contents of block 0. I've
1324 * chosen the method to always say "The disk was changed" if it is
1325 * unsure whether it was. This implies that every open or mount
1326 * invalidates the disk buffers if you work with write protected
1327 * disks. But at least this is better than working with incorrect data
1328 * due to unrecognised disk changes.
1329 */
1330
1331static int check_floppy_change(struct gendisk *disk)
1332{
1333 struct atari_floppy_struct *p = disk->private_data;
1334 unsigned int drive = p - unit;
1335 if (test_bit (drive, &fake_change)) {
1336 /* simulated change (e.g. after formatting) */
1337 return 1;
1338 }
1339 if (test_bit (drive, &changed_floppies)) {
1340 /* surely changed (the WP signal changed at least once) */
1341 return 1;
1342 }
1343 if (UD.wpstat) {
1344 /* WP is on -> could be changed: to be sure, buffers should be
1345 * invalidated...
1346 */
1347 return 1;
1348 }
1349
1350 return 0;
1351}
1352
1353static int floppy_revalidate(struct gendisk *disk)
1354{
1355 struct atari_floppy_struct *p = disk->private_data;
1356 unsigned int drive = p - unit;
1357
1358 if (test_bit(drive, &changed_floppies) ||
1359 test_bit(drive, &fake_change) ||
1360 p->disktype == 0) {
1361 if (UD.flags & FTD_MSG)
1362 printk(KERN_ERR "floppy: clear format %p!\n", UDT);
1363 BufferDrive = -1;
1364 clear_bit(drive, &fake_change);
1365 clear_bit(drive, &changed_floppies);
1366 /* MSch: clearing geometry makes sense only for autoprobe
1367 formats, for 'permanent user-defined' parameter:
1368 restore default_params[] here if flagged valid! */
1369 if (default_params[drive].blocks == 0)
1370 UDT = 0;
1371 else
1372 UDT = &default_params[drive];
1373 }
1374 return 0;
1375}
1376
1377
1378/* This sets up the global variables describing the current request. */
1379
1380static void setup_req_params( int drive )
1381{
1382 int block = ReqBlock + ReqCnt;
1383
1384 ReqTrack = block / UDT->spt;
1385 ReqSector = block - ReqTrack * UDT->spt + 1;
1386 ReqSide = ReqTrack & 1;
1387 ReqTrack >>= 1;
1388 ReqData = ReqBuffer + 512 * ReqCnt;
1389
1390 if (UseTrackbuffer)
1391 read_track = (ReqCmd == READ && CURRENT->errors == 0);
1392 else
1393 read_track = 0;
1394
1395 DPRINT(("Request params: Si=%d Tr=%d Se=%d Data=%08lx\n",ReqSide,
1396 ReqTrack, ReqSector, (unsigned long)ReqData ));
1397}
1398
1399
1400static void redo_fd_request(void)
1401{
1402 int drive, type;
1403 struct atari_floppy_struct *floppy;
1404
1405 DPRINT(("redo_fd_request: CURRENT=%p dev=%s CURRENT->sector=%ld\n",
1406 CURRENT, CURRENT ? CURRENT->rq_disk->disk_name : "",
1407 CURRENT ? CURRENT->sector : 0 ));
1408
1409 IsFormatting = 0;
1410
1411repeat:
1412
1413 if (!CURRENT)
1414 goto the_end;
1415
1416 floppy = CURRENT->rq_disk->private_data;
1417 drive = floppy - unit;
1418 type = floppy->type;
1419
1420 if (!UD.connected) {
1421 /* drive not connected */
1422 printk(KERN_ERR "Unknown Device: fd%d\n", drive );
1423 end_request(CURRENT, 0);
1424 goto repeat;
1425 }
1426
1427 if (type == 0) {
1428 if (!UDT) {
1429 Probing = 1;
1430 UDT = disk_type + StartDiskType[DriveType];
1431 set_capacity(floppy->disk, UDT->blocks);
1432 UD.autoprobe = 1;
1433 }
1434 }
1435 else {
1436 /* user supplied disk type */
1437 if (--type >= NUM_DISK_MINORS) {
1438 printk(KERN_WARNING "fd%d: invalid disk format", drive );
1439 end_request(CURRENT, 0);
1440 goto repeat;
1441 }
1442 if (minor2disktype[type].drive_types > DriveType) {
1443 printk(KERN_WARNING "fd%d: unsupported disk format", drive );
1444 end_request(CURRENT, 0);
1445 goto repeat;
1446 }
1447 type = minor2disktype[type].index;
1448 UDT = &disk_type[type];
1449 set_capacity(floppy->disk, UDT->blocks);
1450 UD.autoprobe = 0;
1451 }
1452
1453 if (CURRENT->sector + 1 > UDT->blocks) {
1454 end_request(CURRENT, 0);
1455 goto repeat;
1456 }
1457
1458 /* stop deselect timer */
1459 del_timer( &motor_off_timer );
1460
1461 ReqCnt = 0;
1462 ReqCmd = rq_data_dir(CURRENT);
1463 ReqBlock = CURRENT->sector;
1464 ReqBuffer = CURRENT->buffer;
1465 setup_req_params( drive );
1466 do_fd_action( drive );
1467
1468 return;
1469
1470 the_end:
1471 finish_fdc();
1472}
1473
1474
1475void do_fd_request(request_queue_t * q)
1476{
1477 unsigned long flags;
1478
1479 DPRINT(("do_fd_request for pid %d\n",current->pid));
1480 while( fdc_busy ) sleep_on( &fdc_wait );
1481 fdc_busy = 1;
1482 stdma_lock(floppy_irq, NULL);
1483
1484 atari_disable_irq( IRQ_MFP_FDC );
1485 local_save_flags(flags); /* The request function is called with ints
1486 local_irq_disable(); * disabled... so must save the IPL for later */
1487 redo_fd_request();
1488 local_irq_restore(flags);
1489 atari_enable_irq( IRQ_MFP_FDC );
1490}
1491
1492static int fd_ioctl(struct inode *inode, struct file *filp,
1493 unsigned int cmd, unsigned long param)
1494{
1495 struct gendisk *disk = inode->i_bdev->bd_disk;
1496 struct atari_floppy_struct *floppy = disk->private_data;
1497 int drive = floppy - unit;
1498 int type = floppy->type;
1499 struct atari_format_descr fmt_desc;
1500 struct atari_disk_type *dtp;
1501 struct floppy_struct getprm;
1502 int settype;
1503 struct floppy_struct setprm;
1504
1505 switch (cmd) {
1506 case FDGETPRM:
1507 if (type) {
1508 if (--type >= NUM_DISK_MINORS)
1509 return -ENODEV;
1510 if (minor2disktype[type].drive_types > DriveType)
1511 return -ENODEV;
1512 type = minor2disktype[type].index;
1513 dtp = &disk_type[type];
1514 if (UD.flags & FTD_MSG)
1515 printk (KERN_ERR "floppy%d: found dtp %p name %s!\n",
1516 drive, dtp, dtp->name);
1517 }
1518 else {
1519 if (!UDT)
1520 return -ENXIO;
1521 else
1522 dtp = UDT;
1523 }
1524 memset((void *)&getprm, 0, sizeof(getprm));
1525 getprm.size = dtp->blocks;
1526 getprm.sect = dtp->spt;
1527 getprm.head = 2;
1528 getprm.track = dtp->blocks/dtp->spt/2;
1529 getprm.stretch = dtp->stretch;
1530 if (copy_to_user((void *)param, &getprm, sizeof(getprm)))
1531 return -EFAULT;
1532 return 0;
1533 }
1534 switch (cmd) {
1535 case FDSETPRM:
1536 case FDDEFPRM:
1537 /*
1538 * MSch 7/96: simple 'set geometry' case: just set the
1539 * 'default' device params (minor == 0).
1540 * Currently, the drive geometry is cleared after each
1541 * disk change and subsequent revalidate()! simple
1542 * implementation of FDDEFPRM: save geometry from a
1543 * FDDEFPRM call and restore it in floppy_revalidate() !
1544 */
1545
1546 /* get the parameters from user space */
1547 if (floppy->ref != 1 && floppy->ref != -1)
1548 return -EBUSY;
1549 if (copy_from_user(&setprm, (void *) param, sizeof(setprm)))
1550 return -EFAULT;
1551 /*
1552 * first of all: check for floppy change and revalidate,
1553 * or the next access will revalidate - and clear UDT :-(
1554 */
1555
1556 if (check_floppy_change(disk))
1557 floppy_revalidate(disk);
1558
1559 if (UD.flags & FTD_MSG)
1560 printk (KERN_INFO "floppy%d: setting size %d spt %d str %d!\n",
1561 drive, setprm.size, setprm.sect, setprm.stretch);
1562
1563 /* what if type > 0 here? Overwrite specified entry ? */
1564 if (type) {
1565 /* refuse to re-set a predefined type for now */
1566 redo_fd_request();
1567 return -EINVAL;
1568 }
1569
1570 /*
1571 * type == 0: first look for a matching entry in the type list,
1572 * and set the UD.disktype field to use the perdefined entry.
1573 * TODO: add user-defined format to head of autoprobe list ?
1574 * Useful to include the user-type for future autodetection!
1575 */
1576
1577 for (settype = 0; settype < NUM_DISK_MINORS; settype++) {
1578 int setidx = 0;
1579 if (minor2disktype[settype].drive_types > DriveType) {
1580 /* skip this one, invalid for drive ... */
1581 continue;
1582 }
1583 setidx = minor2disktype[settype].index;
1584 dtp = &disk_type[setidx];
1585
1586 /* found matching entry ?? */
1587 if ( dtp->blocks == setprm.size
1588 && dtp->spt == setprm.sect
1589 && dtp->stretch == setprm.stretch ) {
1590 if (UD.flags & FTD_MSG)
1591 printk (KERN_INFO "floppy%d: setting %s %p!\n",
1592 drive, dtp->name, dtp);
1593 UDT = dtp;
1594 set_capacity(floppy->disk, UDT->blocks);
1595
1596 if (cmd == FDDEFPRM) {
1597 /* save settings as permanent default type */
1598 default_params[drive].name = dtp->name;
1599 default_params[drive].spt = dtp->spt;
1600 default_params[drive].blocks = dtp->blocks;
1601 default_params[drive].fdc_speed = dtp->fdc_speed;
1602 default_params[drive].stretch = dtp->stretch;
1603 }
1604
1605 return 0;
1606 }
1607
1608 }
1609
1610 /* no matching disk type found above - setting user_params */
1611
1612 if (cmd == FDDEFPRM) {
1613 /* set permanent type */
1614 dtp = &default_params[drive];
1615 } else
1616 /* set user type (reset by disk change!) */
1617 dtp = &user_params[drive];
1618
1619 dtp->name = "user format";
1620 dtp->blocks = setprm.size;
1621 dtp->spt = setprm.sect;
1622 if (setprm.sect > 14)
1623 dtp->fdc_speed = 3;
1624 else
1625 dtp->fdc_speed = 0;
1626 dtp->stretch = setprm.stretch;
1627
1628 if (UD.flags & FTD_MSG)
1629 printk (KERN_INFO "floppy%d: blk %d spt %d str %d!\n",
1630 drive, dtp->blocks, dtp->spt, dtp->stretch);
1631
1632 /* sanity check */
1633 if (!dtp || setprm.track != dtp->blocks/dtp->spt/2 ||
1634 setprm.head != 2) {
1635 redo_fd_request();
1636 return -EINVAL;
1637 }
1638
1639 UDT = dtp;
1640 set_capacity(floppy->disk, UDT->blocks);
1641
1642 return 0;
1643 case FDMSGON:
1644 UD.flags |= FTD_MSG;
1645 return 0;
1646 case FDMSGOFF:
1647 UD.flags &= ~FTD_MSG;
1648 return 0;
1649 case FDSETEMSGTRESH:
1650 return -EINVAL;
1651 case FDFMTBEG:
1652 return 0;
1653 case FDFMTTRK:
1654 if (floppy->ref != 1 && floppy->ref != -1)
1655 return -EBUSY;
1656 if (copy_from_user(&fmt_desc, (void *) param, sizeof(fmt_desc)))
1657 return -EFAULT;
1658 return do_format(drive, type, &fmt_desc);
1659 case FDCLRPRM:
1660 UDT = NULL;
1661 /* MSch: invalidate default_params */
1662 default_params[drive].blocks = 0;
1663 set_capacity(floppy->disk, MAX_DISK_SIZE * 2);
1664 case FDFMTEND:
1665 case FDFLUSH:
1666 /* invalidate the buffer track to force a reread */
1667 BufferDrive = -1;
1668 set_bit(drive, &fake_change);
1669 check_disk_change(inode->i_bdev);
1670 return 0;
1671 default:
1672 return -EINVAL;
1673 }
1674}
1675
1676
1677/* Initialize the 'unit' variable for drive 'drive' */
1678
1679static void __init fd_probe( int drive )
1680{
1681 UD.connected = 0;
1682 UDT = NULL;
1683
1684 if (!fd_test_drive_present( drive ))
1685 return;
1686
1687 UD.connected = 1;
1688 UD.track = 0;
1689 switch( UserSteprate[drive] ) {
1690 case 2:
1691 UD.steprate = FDCSTEP_2;
1692 break;
1693 case 3:
1694 UD.steprate = FDCSTEP_3;
1695 break;
1696 case 6:
1697 UD.steprate = FDCSTEP_6;
1698 break;
1699 case 12:
1700 UD.steprate = FDCSTEP_12;
1701 break;
1702 default: /* should be -1 for "not set by user" */
1703 if (ATARIHW_PRESENT( FDCSPEED ) || MACH_IS_MEDUSA)
1704 UD.steprate = FDCSTEP_3;
1705 else
1706 UD.steprate = FDCSTEP_6;
1707 break;
1708 }
1709 MotorOn = 1; /* from probe restore operation! */
1710}
1711
1712
1713/* This function tests the physical presence of a floppy drive (not
1714 * whether a disk is inserted). This is done by issuing a restore
1715 * command, waiting max. 2 seconds (that should be enough to move the
1716 * head across the whole disk) and looking at the state of the "TR00"
1717 * signal. This should now be raised if there is a drive connected
1718 * (and there is no hardware failure :-) Otherwise, the drive is
1719 * declared absent.
1720 */
1721
1722static int __init fd_test_drive_present( int drive )
1723{
1724 unsigned long timeout;
1725 unsigned char status;
1726 int ok;
1727
1728 if (drive >= (MACH_IS_FALCON ? 1 : 2)) return( 0 );
1729 fd_select_drive( drive );
1730
1731 /* disable interrupt temporarily */
1732 atari_turnoff_irq( IRQ_MFP_FDC );
1733 FDC_WRITE (FDCREG_TRACK, 0xff00);
1734 FDC_WRITE( FDCREG_CMD, FDCCMD_RESTORE | FDCCMDADD_H | FDCSTEP_6 );
1735
1736 timeout = jiffies + 2*HZ+HZ/2;
1737 while (time_before(jiffies, timeout))
1738 if (!(mfp.par_dt_reg & 0x20))
1739 break;
1740
1741 status = FDC_READ( FDCREG_STATUS );
1742 ok = (status & FDCSTAT_TR00) != 0;
1743
1744 /* force interrupt to abort restore operation (FDC would try
1745 * about 50 seconds!) */
1746 FDC_WRITE( FDCREG_CMD, FDCCMD_FORCI );
1747 udelay(500);
1748 status = FDC_READ( FDCREG_STATUS );
1749 udelay(20);
1750
1751 if (ok) {
1752 /* dummy seek command to make WP bit accessible */
1753 FDC_WRITE( FDCREG_DATA, 0 );
1754 FDC_WRITE( FDCREG_CMD, FDCCMD_SEEK );
1755 while( mfp.par_dt_reg & 0x20 )
1756 ;
1757 status = FDC_READ( FDCREG_STATUS );
1758 }
1759
1760 atari_turnon_irq( IRQ_MFP_FDC );
1761 return( ok );
1762}
1763
1764
1765/* Look how many and which kind of drives are connected. If there are
1766 * floppies, additionally start the disk-change and motor-off timers.
1767 */
1768
1769static void __init config_types( void )
1770{
1771 int drive, cnt = 0;
1772
1773 /* for probing drives, set the FDC speed to 8 MHz */
1774 if (ATARIHW_PRESENT(FDCSPEED))
1775 dma_wd.fdc_speed = 0;
1776
1777 printk(KERN_INFO "Probing floppy drive(s):\n");
1778 for( drive = 0; drive < FD_MAX_UNITS; drive++ ) {
1779 fd_probe( drive );
1780 if (UD.connected) {
1781 printk(KERN_INFO "fd%d\n", drive);
1782 ++cnt;
1783 }
1784 }
1785
1786 if (FDC_READ( FDCREG_STATUS ) & FDCSTAT_BUSY) {
1787 /* If FDC is still busy from probing, give it another FORCI
1788 * command to abort the operation. If this isn't done, the FDC
1789 * will interrupt later and its IRQ line stays low, because
1790 * the status register isn't read. And this will block any
1791 * interrupts on this IRQ line :-(
1792 */
1793 FDC_WRITE( FDCREG_CMD, FDCCMD_FORCI );
1794 udelay(500);
1795 FDC_READ( FDCREG_STATUS );
1796 udelay(20);
1797 }
1798
1799 if (cnt > 0) {
1800 start_motor_off_timer();
1801 if (cnt == 1) fd_select_drive( 0 );
1802 start_check_change_timer();
1803 }
1804}
1805
1806/*
1807 * floppy_open check for aliasing (/dev/fd0 can be the same as
1808 * /dev/PS0 etc), and disallows simultaneous access to the same
1809 * drive with different device numbers.
1810 */
1811
1812static int floppy_open( struct inode *inode, struct file *filp )
1813{
1814 struct atari_floppy_struct *p = inode->i_bdev->bd_disk->private_data;
1815 int type = iminor(inode) >> 2;
1816
1817 DPRINT(("fd_open: type=%d\n",type));
1818 if (p->ref && p->type != type)
1819 return -EBUSY;
1820
1821 if (p->ref == -1 || (p->ref && filp->f_flags & O_EXCL))
1822 return -EBUSY;
1823
1824 if (filp->f_flags & O_EXCL)
1825 p->ref = -1;
1826 else
1827 p->ref++;
1828
1829 p->type = type;
1830
1831 if (filp->f_flags & O_NDELAY)
1832 return 0;
1833
1834 if (filp->f_mode & 3) {
1835 check_disk_change(inode->i_bdev);
1836 if (filp->f_mode & 2) {
1837 if (p->wpstat) {
1838 if (p->ref < 0)
1839 p->ref = 0;
1840 else
1841 p->ref--;
1842 floppy_release(inode, filp);
1843 return -EROFS;
1844 }
1845 }
1846 }
1847 return 0;
1848}
1849
1850
1851static int floppy_release( struct inode * inode, struct file * filp )
1852{
1853 struct atari_floppy_struct *p = inode->i_bdev->bd_disk->private_data;
1854 if (p->ref < 0)
1855 p->ref = 0;
1856 else if (!p->ref--) {
1857 printk(KERN_ERR "floppy_release with fd_ref == 0");
1858 p->ref = 0;
1859 }
1860 return 0;
1861}
1862
1863static struct block_device_operations floppy_fops = {
1864 .owner = THIS_MODULE,
1865 .open = floppy_open,
1866 .release = floppy_release,
1867 .ioctl = fd_ioctl,
1868 .media_changed = check_floppy_change,
1869 .revalidate_disk= floppy_revalidate,
1870};
1871
1872static struct kobject *floppy_find(dev_t dev, int *part, void *data)
1873{
1874 int drive = *part & 3;
1875 int type = *part >> 2;
1876 if (drive >= FD_MAX_UNITS || type > NUM_DISK_MINORS)
1877 return NULL;
1878 *part = 0;
1879 return get_disk(unit[drive].disk);
1880}
1881
1882static int __init atari_floppy_init (void)
1883{
1884 int i;
1885
1886 if (!MACH_IS_ATARI)
1887 /* Amiga, Mac, ... don't have Atari-compatible floppy :-) */
1888 return -ENXIO;
1889
1890 if (MACH_IS_HADES)
1891 /* Hades doesn't have Atari-compatible floppy */
1892 return -ENXIO;
1893
1894 if (register_blkdev(FLOPPY_MAJOR,"fd"))
1895 return -EBUSY;
1896
1897 for (i = 0; i < FD_MAX_UNITS; i++) {
1898 unit[i].disk = alloc_disk(1);
1899 if (!unit[i].disk)
1900 goto Enomem;
1901 }
1902
1903 if (UseTrackbuffer < 0)
1904 /* not set by user -> use default: for now, we turn
1905 track buffering off for all Medusas, though it
1906 could be used with ones that have a counter
1907 card. But the test is too hard :-( */
1908 UseTrackbuffer = !MACH_IS_MEDUSA;
1909
1910 /* initialize variables */
1911 SelectedDrive = -1;
1912 BufferDrive = -1;
1913
1914 DMABuffer = atari_stram_alloc(BUFFER_SIZE+512, "ataflop");
1915 if (!DMABuffer) {
1916 printk(KERN_ERR "atari_floppy_init: cannot get dma buffer\n");
1917 goto Enomem;
1918 }
1919 TrackBuffer = DMABuffer + 512;
1920 PhysDMABuffer = virt_to_phys(DMABuffer);
1921 PhysTrackBuffer = virt_to_phys(TrackBuffer);
1922 BufferDrive = BufferSide = BufferTrack = -1;
1923
1924 floppy_queue = blk_init_queue(do_fd_request, &ataflop_lock);
1925 if (!floppy_queue)
1926 goto Enomem;
1927
1928 for (i = 0; i < FD_MAX_UNITS; i++) {
1929 unit[i].track = -1;
1930 unit[i].flags = 0;
1931 unit[i].disk->major = FLOPPY_MAJOR;
1932 unit[i].disk->first_minor = i;
1933 sprintf(unit[i].disk->disk_name, "fd%d", i);
1934 unit[i].disk->fops = &floppy_fops;
1935 unit[i].disk->private_data = &unit[i];
1936 unit[i].disk->queue = floppy_queue;
1937 set_capacity(unit[i].disk, MAX_DISK_SIZE * 2);
1938 add_disk(unit[i].disk);
1939 }
1940
1941 blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE,
1942 floppy_find, NULL, NULL);
1943
1944 printk(KERN_INFO "Atari floppy driver: max. %cD, %strack buffering\n",
1945 DriveType == 0 ? 'D' : DriveType == 1 ? 'H' : 'E',
1946 UseTrackbuffer ? "" : "no ");
1947 config_types();
1948
1949 return 0;
1950Enomem:
1951 while (i--)
1952 put_disk(unit[i].disk);
1953 if (floppy_queue)
1954 blk_cleanup_queue(floppy_queue);
1955 unregister_blkdev(FLOPPY_MAJOR, "fd");
1956 return -ENOMEM;
1957}
1958
1959
1960void __init atari_floppy_setup( char *str, int *ints )
1961{
1962 int i;
1963
1964 if (ints[0] < 1) {
1965 printk(KERN_ERR "ataflop_setup: no arguments!\n" );
1966 return;
1967 }
1968 else if (ints[0] > 2+FD_MAX_UNITS) {
1969 printk(KERN_ERR "ataflop_setup: too many arguments\n" );
1970 }
1971
1972 if (ints[1] < 0 || ints[1] > 2)
1973 printk(KERN_ERR "ataflop_setup: bad drive type\n" );
1974 else
1975 DriveType = ints[1];
1976
1977 if (ints[0] >= 2)
1978 UseTrackbuffer = (ints[2] > 0);
1979
1980 for( i = 3; i <= ints[0] && i-3 < FD_MAX_UNITS; ++i ) {
1981 if (ints[i] != 2 && ints[i] != 3 && ints[i] != 6 && ints[i] != 12)
1982 printk(KERN_ERR "ataflop_setup: bad steprate\n" );
1983 else
1984 UserSteprate[i-3] = ints[i];
1985 }
1986}
1987
1988static void atari_floppy_exit(void)
1989{
1990 int i;
1991 blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
1992 for (i = 0; i < FD_MAX_UNITS; i++) {
1993 del_gendisk(unit[i].disk);
1994 put_disk(unit[i].disk);
1995 }
1996 unregister_blkdev(FLOPPY_MAJOR, "fd");
1997
1998 blk_cleanup_queue(floppy_queue);
1999 del_timer_sync(&fd_timer);
2000 atari_stram_free( DMABuffer );
2001}
2002
2003module_init(atari_floppy_init)
2004module_exit(atari_floppy_exit)
2005
2006MODULE_LICENSE("GPL");
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
new file mode 100644
index 000000000000..8f7c1a1ed7f4
--- /dev/null
+++ b/drivers/block/cciss.c
@@ -0,0 +1,2976 @@
1/*
2 * Disk Array driver for HP SA 5xxx and 6xxx Controllers
3 * Copyright 2000, 2002 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 */
22
23#include <linux/config.h> /* CONFIG_PROC_FS */
24#include <linux/module.h>
25#include <linux/interrupt.h>
26#include <linux/types.h>
27#include <linux/pci.h>
28#include <linux/kernel.h>
29#include <linux/slab.h>
30#include <linux/delay.h>
31#include <linux/major.h>
32#include <linux/fs.h>
33#include <linux/bio.h>
34#include <linux/blkpg.h>
35#include <linux/timer.h>
36#include <linux/proc_fs.h>
37#include <linux/init.h>
38#include <linux/hdreg.h>
39#include <linux/spinlock.h>
40#include <linux/compat.h>
41#include <asm/uaccess.h>
42#include <asm/io.h>
43
44#include <linux/blkdev.h>
45#include <linux/genhd.h>
46#include <linux/completion.h>
47
48#define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
49#define DRIVER_NAME "HP CISS Driver (v 2.6.6)"
50#define DRIVER_VERSION CCISS_DRIVER_VERSION(2,6,6)
51
52/* Embedded module documentation macros - see modules.h */
53MODULE_AUTHOR("Hewlett-Packard Company");
54MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 2.6.6");
55MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
56 " SA6i P600 P800 E400");
57MODULE_LICENSE("GPL");
58
59#include "cciss_cmd.h"
60#include "cciss.h"
61#include <linux/cciss_ioctl.h>
62
63/* define the PCI info for the cards we can control */
64static const struct pci_device_id cciss_pci_device_id[] = {
65 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS,
66 0x0E11, 0x4070, 0, 0, 0},
67 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
68 0x0E11, 0x4080, 0, 0, 0},
69 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
70 0x0E11, 0x4082, 0, 0, 0},
71 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
72 0x0E11, 0x4083, 0, 0, 0},
73 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
74 0x0E11, 0x409A, 0, 0, 0},
75 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
76 0x0E11, 0x409B, 0, 0, 0},
77 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
78 0x0E11, 0x409C, 0, 0, 0},
79 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
80 0x0E11, 0x409D, 0, 0, 0},
81 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
82 0x0E11, 0x4091, 0, 0, 0},
83 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA,
84 0x103C, 0x3225, 0, 0, 0},
85 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSB,
86 0x103c, 0x3223, 0, 0, 0},
87 { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSB,
88 0x103c, 0x3231, 0, 0, 0},
89 {0,}
90};
91MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
92
93#define NR_PRODUCTS (sizeof(products)/sizeof(struct board_type))
94
95/* board_id = Subsystem Device ID & Vendor ID
96 * product = Marketing Name for the board
97 * access = Address of the struct of function pointers
98 */
99static struct board_type products[] = {
100 { 0x40700E11, "Smart Array 5300", &SA5_access },
101 { 0x40800E11, "Smart Array 5i", &SA5B_access},
102 { 0x40820E11, "Smart Array 532", &SA5B_access},
103 { 0x40830E11, "Smart Array 5312", &SA5B_access},
104 { 0x409A0E11, "Smart Array 641", &SA5_access},
105 { 0x409B0E11, "Smart Array 642", &SA5_access},
106 { 0x409C0E11, "Smart Array 6400", &SA5_access},
107 { 0x409D0E11, "Smart Array 6400 EM", &SA5_access},
108 { 0x40910E11, "Smart Array 6i", &SA5_access},
109 { 0x3225103C, "Smart Array P600", &SA5_access},
110 { 0x3223103C, "Smart Array P800", &SA5_access},
111 { 0x3231103C, "Smart Array E400", &SA5_access},
112};
113
114/* How long to wait (in millesconds) for board to go into simple mode */
115#define MAX_CONFIG_WAIT 30000
116#define MAX_IOCTL_CONFIG_WAIT 1000
117
118/*define how many times we will try a command because of bus resets */
119#define MAX_CMD_RETRIES 3
120
121#define READ_AHEAD 1024
122#define NR_CMDS 384 /* #commands that can be outstanding */
123#define MAX_CTLR 32
124
125/* Originally cciss driver only supports 8 major numbers */
126#define MAX_CTLR_ORIG 8
127
128
129#define CCISS_DMA_MASK 0xFFFFFFFF /* 32 bit DMA */
130
131static ctlr_info_t *hba[MAX_CTLR];
132
133static void do_cciss_request(request_queue_t *q);
134static int cciss_open(struct inode *inode, struct file *filep);
135static int cciss_release(struct inode *inode, struct file *filep);
136static int cciss_ioctl(struct inode *inode, struct file *filep,
137 unsigned int cmd, unsigned long arg);
138
139static int revalidate_allvol(ctlr_info_t *host);
140static int cciss_revalidate(struct gendisk *disk);
141static int deregister_disk(struct gendisk *disk);
142static int register_new_disk(ctlr_info_t *h);
143
144static void cciss_getgeometry(int cntl_num);
145
146static void start_io( ctlr_info_t *h);
147static int sendcmd( __u8 cmd, int ctlr, void *buff, size_t size,
148 unsigned int use_unit_num, unsigned int log_unit, __u8 page_code,
149 unsigned char *scsi3addr, int cmd_type);
150
151#ifdef CONFIG_PROC_FS
152static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
153 int length, int *eof, void *data);
154static void cciss_procinit(int i);
155#else
156static void cciss_procinit(int i) {}
157#endif /* CONFIG_PROC_FS */
158
159#ifdef CONFIG_COMPAT
160static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
161#endif
162
163static struct block_device_operations cciss_fops = {
164 .owner = THIS_MODULE,
165 .open = cciss_open,
166 .release = cciss_release,
167 .ioctl = cciss_ioctl,
168#ifdef CONFIG_COMPAT
169 .compat_ioctl = cciss_compat_ioctl,
170#endif
171 .revalidate_disk= cciss_revalidate,
172};
173
174/*
175 * Enqueuing and dequeuing functions for cmdlists.
176 */
177static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
178{
179 if (*Qptr == NULL) {
180 *Qptr = c;
181 c->next = c->prev = c;
182 } else {
183 c->prev = (*Qptr)->prev;
184 c->next = (*Qptr);
185 (*Qptr)->prev->next = c;
186 (*Qptr)->prev = c;
187 }
188}
189
190static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
191 CommandList_struct *c)
192{
193 if (c && c->next != c) {
194 if (*Qptr == c) *Qptr = c->next;
195 c->prev->next = c->next;
196 c->next->prev = c->prev;
197 } else {
198 *Qptr = NULL;
199 }
200 return c;
201}
202
203#include "cciss_scsi.c" /* For SCSI tape support */
204
205#ifdef CONFIG_PROC_FS
206
207/*
208 * Report information about this controller.
209 */
210#define ENG_GIG 1000000000
211#define ENG_GIG_FACTOR (ENG_GIG/512)
212#define RAID_UNKNOWN 6
213static const char *raid_label[] = {"0","4","1(1+0)","5","5+1","ADG",
214 "UNKNOWN"};
215
216static struct proc_dir_entry *proc_cciss;
217
218static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
219 int length, int *eof, void *data)
220{
221 off_t pos = 0;
222 off_t len = 0;
223 int size, i, ctlr;
224 ctlr_info_t *h = (ctlr_info_t*)data;
225 drive_info_struct *drv;
226 unsigned long flags;
227 sector_t vol_sz, vol_sz_frac;
228
229 ctlr = h->ctlr;
230
231 /* prevent displaying bogus info during configuration
232 * or deconfiguration of a logical volume
233 */
234 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
235 if (h->busy_configuring) {
236 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
237 return -EBUSY;
238 }
239 h->busy_configuring = 1;
240 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
241
242 size = sprintf(buffer, "%s: HP %s Controller\n"
243 "Board ID: 0x%08lx\n"
244 "Firmware Version: %c%c%c%c\n"
245 "IRQ: %d\n"
246 "Logical drives: %d\n"
247 "Current Q depth: %d\n"
248 "Current # commands on controller: %d\n"
249 "Max Q depth since init: %d\n"
250 "Max # commands on controller since init: %d\n"
251 "Max SG entries since init: %d\n\n",
252 h->devname,
253 h->product_name,
254 (unsigned long)h->board_id,
255 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], h->firm_ver[3],
256 (unsigned int)h->intr,
257 h->num_luns,
258 h->Qdepth, h->commands_outstanding,
259 h->maxQsinceinit, h->max_outstanding, h->maxSG);
260
261 pos += size; len += size;
262 cciss_proc_tape_report(ctlr, buffer, &pos, &len);
263 for(i=0; i<=h->highest_lun; i++) {
264
265 drv = &h->drv[i];
266 if (drv->block_size == 0)
267 continue;
268
269 vol_sz = drv->nr_blocks;
270 vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
271 vol_sz_frac *= 100;
272 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
273
274 if (drv->raid_level > 5)
275 drv->raid_level = RAID_UNKNOWN;
276 size = sprintf(buffer+len, "cciss/c%dd%d:"
277 "\t%4u.%02uGB\tRAID %s\n",
278 ctlr, i, (int)vol_sz, (int)vol_sz_frac,
279 raid_label[drv->raid_level]);
280 pos += size; len += size;
281 }
282
283 *eof = 1;
284 *start = buffer+offset;
285 len -= offset;
286 if (len>length)
287 len = length;
288 h->busy_configuring = 0;
289 return len;
290}
291
292static int
293cciss_proc_write(struct file *file, const char __user *buffer,
294 unsigned long count, void *data)
295{
296 unsigned char cmd[80];
297 int len;
298#ifdef CONFIG_CISS_SCSI_TAPE
299 ctlr_info_t *h = (ctlr_info_t *) data;
300 int rc;
301#endif
302
303 if (count > sizeof(cmd)-1) return -EINVAL;
304 if (copy_from_user(cmd, buffer, count)) return -EFAULT;
305 cmd[count] = '\0';
306 len = strlen(cmd); // above 3 lines ensure safety
307 if (len && cmd[len-1] == '\n')
308 cmd[--len] = '\0';
309# ifdef CONFIG_CISS_SCSI_TAPE
310 if (strcmp("engage scsi", cmd)==0) {
311 rc = cciss_engage_scsi(h->ctlr);
312 if (rc != 0) return -rc;
313 return count;
314 }
315 /* might be nice to have "disengage" too, but it's not
316 safely possible. (only 1 module use count, lock issues.) */
317# endif
318 return -EINVAL;
319}
320
321/*
322 * Get us a file in /proc/cciss that says something about each controller.
323 * Create /proc/cciss if it doesn't exist yet.
324 */
325static void __devinit cciss_procinit(int i)
326{
327 struct proc_dir_entry *pde;
328
329 if (proc_cciss == NULL) {
330 proc_cciss = proc_mkdir("cciss", proc_root_driver);
331 if (!proc_cciss)
332 return;
333 }
334
335 pde = create_proc_read_entry(hba[i]->devname,
336 S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
337 proc_cciss, cciss_proc_get_info, hba[i]);
338 pde->write_proc = cciss_proc_write;
339}
340#endif /* CONFIG_PROC_FS */
341
342/*
343 * For operations that cannot sleep, a command block is allocated at init,
344 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
345 * which ones are free or in use. For operations that can wait for kmalloc
346 * to possible sleep, this routine can be called with get_from_pool set to 0.
347 * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
348 */
349static CommandList_struct * cmd_alloc(ctlr_info_t *h, int get_from_pool)
350{
351 CommandList_struct *c;
352 int i;
353 u64bit temp64;
354 dma_addr_t cmd_dma_handle, err_dma_handle;
355
356 if (!get_from_pool)
357 {
358 c = (CommandList_struct *) pci_alloc_consistent(
359 h->pdev, sizeof(CommandList_struct), &cmd_dma_handle);
360 if(c==NULL)
361 return NULL;
362 memset(c, 0, sizeof(CommandList_struct));
363
364 c->err_info = (ErrorInfo_struct *)pci_alloc_consistent(
365 h->pdev, sizeof(ErrorInfo_struct),
366 &err_dma_handle);
367
368 if (c->err_info == NULL)
369 {
370 pci_free_consistent(h->pdev,
371 sizeof(CommandList_struct), c, cmd_dma_handle);
372 return NULL;
373 }
374 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
375 } else /* get it out of the controllers pool */
376 {
377 do {
378 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
379 if (i == NR_CMDS)
380 return NULL;
381 } while(test_and_set_bit(i & (BITS_PER_LONG - 1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
382#ifdef CCISS_DEBUG
383 printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
384#endif
385 c = h->cmd_pool + i;
386 memset(c, 0, sizeof(CommandList_struct));
387 cmd_dma_handle = h->cmd_pool_dhandle
388 + i*sizeof(CommandList_struct);
389 c->err_info = h->errinfo_pool + i;
390 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
391 err_dma_handle = h->errinfo_pool_dhandle
392 + i*sizeof(ErrorInfo_struct);
393 h->nr_allocs++;
394 }
395
396 c->busaddr = (__u32) cmd_dma_handle;
397 temp64.val = (__u64) err_dma_handle;
398 c->ErrDesc.Addr.lower = temp64.val32.lower;
399 c->ErrDesc.Addr.upper = temp64.val32.upper;
400 c->ErrDesc.Len = sizeof(ErrorInfo_struct);
401
402 c->ctlr = h->ctlr;
403 return c;
404
405
406}
407
408/*
409 * Frees a command block that was previously allocated with cmd_alloc().
410 */
411static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
412{
413 int i;
414 u64bit temp64;
415
416 if( !got_from_pool)
417 {
418 temp64.val32.lower = c->ErrDesc.Addr.lower;
419 temp64.val32.upper = c->ErrDesc.Addr.upper;
420 pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
421 c->err_info, (dma_addr_t) temp64.val);
422 pci_free_consistent(h->pdev, sizeof(CommandList_struct),
423 c, (dma_addr_t) c->busaddr);
424 } else
425 {
426 i = c - h->cmd_pool;
427 clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
428 h->nr_frees++;
429 }
430}
431
432static inline ctlr_info_t *get_host(struct gendisk *disk)
433{
434 return disk->queue->queuedata;
435}
436
437static inline drive_info_struct *get_drv(struct gendisk *disk)
438{
439 return disk->private_data;
440}
441
442/*
443 * Open. Make sure the device is really there.
444 */
445static int cciss_open(struct inode *inode, struct file *filep)
446{
447 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
448 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
449
450#ifdef CCISS_DEBUG
451 printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
452#endif /* CCISS_DEBUG */
453
454 /*
455 * Root is allowed to open raw volume zero even if it's not configured
456 * so array config can still work. Root is also allowed to open any
457 * volume that has a LUN ID, so it can issue IOCTL to reread the
458 * disk information. I don't think I really like this
459 * but I'm already using way to many device nodes to claim another one
460 * for "raw controller".
461 */
462 if (drv->nr_blocks == 0) {
463 if (iminor(inode) != 0) { /* not node 0? */
464 /* if not node 0 make sure it is a partition = 0 */
465 if (iminor(inode) & 0x0f) {
466 return -ENXIO;
467 /* if it is, make sure we have a LUN ID */
468 } else if (drv->LunID == 0) {
469 return -ENXIO;
470 }
471 }
472 if (!capable(CAP_SYS_ADMIN))
473 return -EPERM;
474 }
475 drv->usage_count++;
476 host->usage_count++;
477 return 0;
478}
479/*
480 * Close. Sync first.
481 */
482static int cciss_release(struct inode *inode, struct file *filep)
483{
484 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
485 drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
486
487#ifdef CCISS_DEBUG
488 printk(KERN_DEBUG "cciss_release %s\n", inode->i_bdev->bd_disk->disk_name);
489#endif /* CCISS_DEBUG */
490
491 drv->usage_count--;
492 host->usage_count--;
493 return 0;
494}
495
496#ifdef CONFIG_COMPAT
497
498static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
499{
500 int ret;
501 lock_kernel();
502 ret = cciss_ioctl(f->f_dentry->d_inode, f, cmd, arg);
503 unlock_kernel();
504 return ret;
505}
506
507static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, unsigned long arg);
508static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd, unsigned long arg);
509
510static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
511{
512 switch (cmd) {
513 case CCISS_GETPCIINFO:
514 case CCISS_GETINTINFO:
515 case CCISS_SETINTINFO:
516 case CCISS_GETNODENAME:
517 case CCISS_SETNODENAME:
518 case CCISS_GETHEARTBEAT:
519 case CCISS_GETBUSTYPES:
520 case CCISS_GETFIRMVER:
521 case CCISS_GETDRIVVER:
522 case CCISS_REVALIDVOLS:
523 case CCISS_DEREGDISK:
524 case CCISS_REGNEWDISK:
525 case CCISS_REGNEWD:
526 case CCISS_RESCANDISK:
527 case CCISS_GETLUNINFO:
528 return do_ioctl(f, cmd, arg);
529
530 case CCISS_PASSTHRU32:
531 return cciss_ioctl32_passthru(f, cmd, arg);
532 case CCISS_BIG_PASSTHRU32:
533 return cciss_ioctl32_big_passthru(f, cmd, arg);
534
535 default:
536 return -ENOIOCTLCMD;
537 }
538}
539
540static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, unsigned long arg)
541{
542 IOCTL32_Command_struct __user *arg32 =
543 (IOCTL32_Command_struct __user *) arg;
544 IOCTL_Command_struct arg64;
545 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
546 int err;
547 u32 cp;
548
549 err = 0;
550 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
551 err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request));
552 err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info));
553 err |= get_user(arg64.buf_size, &arg32->buf_size);
554 err |= get_user(cp, &arg32->buf);
555 arg64.buf = compat_ptr(cp);
556 err |= copy_to_user(p, &arg64, sizeof(arg64));
557
558 if (err)
559 return -EFAULT;
560
561 err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long) p);
562 if (err)
563 return err;
564 err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info));
565 if (err)
566 return -EFAULT;
567 return err;
568}
569
570static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd, unsigned long arg)
571{
572 BIG_IOCTL32_Command_struct __user *arg32 =
573 (BIG_IOCTL32_Command_struct __user *) arg;
574 BIG_IOCTL_Command_struct arg64;
575 BIG_IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
576 int err;
577 u32 cp;
578
579 err = 0;
580 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
581 err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request));
582 err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info));
583 err |= get_user(arg64.buf_size, &arg32->buf_size);
584 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
585 err |= get_user(cp, &arg32->buf);
586 arg64.buf = compat_ptr(cp);
587 err |= copy_to_user(p, &arg64, sizeof(arg64));
588
589 if (err)
590 return -EFAULT;
591
592 err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long) p);
593 if (err)
594 return err;
595 err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info));
596 if (err)
597 return -EFAULT;
598 return err;
599}
600#endif
601/*
602 * ioctl
603 */
604static int cciss_ioctl(struct inode *inode, struct file *filep,
605 unsigned int cmd, unsigned long arg)
606{
607 struct block_device *bdev = inode->i_bdev;
608 struct gendisk *disk = bdev->bd_disk;
609 ctlr_info_t *host = get_host(disk);
610 drive_info_struct *drv = get_drv(disk);
611 int ctlr = host->ctlr;
612 void __user *argp = (void __user *)arg;
613
614#ifdef CCISS_DEBUG
615 printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
616#endif /* CCISS_DEBUG */
617
618 switch(cmd) {
619 case HDIO_GETGEO:
620 {
621 struct hd_geometry driver_geo;
622 if (drv->cylinders) {
623 driver_geo.heads = drv->heads;
624 driver_geo.sectors = drv->sectors;
625 driver_geo.cylinders = drv->cylinders;
626 } else
627 return -ENXIO;
628 driver_geo.start= get_start_sect(inode->i_bdev);
629 if (copy_to_user(argp, &driver_geo, sizeof(struct hd_geometry)))
630 return -EFAULT;
631 return(0);
632 }
633
634 case CCISS_GETPCIINFO:
635 {
636 cciss_pci_info_struct pciinfo;
637
638 if (!arg) return -EINVAL;
639 pciinfo.bus = host->pdev->bus->number;
640 pciinfo.dev_fn = host->pdev->devfn;
641 pciinfo.board_id = host->board_id;
642 if (copy_to_user(argp, &pciinfo, sizeof( cciss_pci_info_struct )))
643 return -EFAULT;
644 return(0);
645 }
646 case CCISS_GETINTINFO:
647 {
648 cciss_coalint_struct intinfo;
649 if (!arg) return -EINVAL;
650 intinfo.delay = readl(&host->cfgtable->HostWrite.CoalIntDelay);
651 intinfo.count = readl(&host->cfgtable->HostWrite.CoalIntCount);
652 if (copy_to_user(argp, &intinfo, sizeof( cciss_coalint_struct )))
653 return -EFAULT;
654 return(0);
655 }
656 case CCISS_SETINTINFO:
657 {
658 cciss_coalint_struct intinfo;
659 unsigned long flags;
660 int i;
661
662 if (!arg) return -EINVAL;
663 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
664 if (copy_from_user(&intinfo, argp, sizeof( cciss_coalint_struct)))
665 return -EFAULT;
666 if ( (intinfo.delay == 0 ) && (intinfo.count == 0))
667
668 {
669// printk("cciss_ioctl: delay and count cannot be 0\n");
670 return( -EINVAL);
671 }
672 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
673 /* Update the field, and then ring the doorbell */
674 writel( intinfo.delay,
675 &(host->cfgtable->HostWrite.CoalIntDelay));
676 writel( intinfo.count,
677 &(host->cfgtable->HostWrite.CoalIntCount));
678 writel( CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
679
680 for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
681 if (!(readl(host->vaddr + SA5_DOORBELL)
682 & CFGTBL_ChangeReq))
683 break;
684 /* delay and try again */
685 udelay(1000);
686 }
687 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
688 if (i >= MAX_IOCTL_CONFIG_WAIT)
689 return -EAGAIN;
690 return(0);
691 }
692 case CCISS_GETNODENAME:
693 {
694 NodeName_type NodeName;
695 int i;
696
697 if (!arg) return -EINVAL;
698 for(i=0;i<16;i++)
699 NodeName[i] = readb(&host->cfgtable->ServerName[i]);
700 if (copy_to_user(argp, NodeName, sizeof( NodeName_type)))
701 return -EFAULT;
702 return(0);
703 }
704 case CCISS_SETNODENAME:
705 {
706 NodeName_type NodeName;
707 unsigned long flags;
708 int i;
709
710 if (!arg) return -EINVAL;
711 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
712
713 if (copy_from_user(NodeName, argp, sizeof( NodeName_type)))
714 return -EFAULT;
715
716 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
717
718 /* Update the field, and then ring the doorbell */
719 for(i=0;i<16;i++)
720 writeb( NodeName[i], &host->cfgtable->ServerName[i]);
721
722 writel( CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
723
724 for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
725 if (!(readl(host->vaddr + SA5_DOORBELL)
726 & CFGTBL_ChangeReq))
727 break;
728 /* delay and try again */
729 udelay(1000);
730 }
731 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
732 if (i >= MAX_IOCTL_CONFIG_WAIT)
733 return -EAGAIN;
734 return(0);
735 }
736
737 case CCISS_GETHEARTBEAT:
738 {
739 Heartbeat_type heartbeat;
740
741 if (!arg) return -EINVAL;
742 heartbeat = readl(&host->cfgtable->HeartBeat);
743 if (copy_to_user(argp, &heartbeat, sizeof( Heartbeat_type)))
744 return -EFAULT;
745 return(0);
746 }
747 case CCISS_GETBUSTYPES:
748 {
749 BusTypes_type BusTypes;
750
751 if (!arg) return -EINVAL;
752 BusTypes = readl(&host->cfgtable->BusTypes);
753 if (copy_to_user(argp, &BusTypes, sizeof( BusTypes_type) ))
754 return -EFAULT;
755 return(0);
756 }
757 case CCISS_GETFIRMVER:
758 {
759 FirmwareVer_type firmware;
760
761 if (!arg) return -EINVAL;
762 memcpy(firmware, host->firm_ver, 4);
763
764 if (copy_to_user(argp, firmware, sizeof( FirmwareVer_type)))
765 return -EFAULT;
766 return(0);
767 }
768 case CCISS_GETDRIVVER:
769 {
770 DriverVer_type DriverVer = DRIVER_VERSION;
771
772 if (!arg) return -EINVAL;
773
774 if (copy_to_user(argp, &DriverVer, sizeof( DriverVer_type) ))
775 return -EFAULT;
776 return(0);
777 }
778
779 case CCISS_REVALIDVOLS:
780 if (bdev != bdev->bd_contains || drv != host->drv)
781 return -ENXIO;
782 return revalidate_allvol(host);
783
784 case CCISS_GETLUNINFO: {
785 LogvolInfo_struct luninfo;
786 int i;
787
788 luninfo.LunID = drv->LunID;
789 luninfo.num_opens = drv->usage_count;
790 luninfo.num_parts = 0;
791 /* count partitions 1 to 15 with sizes > 0 */
792 for (i = 0; i < MAX_PART - 1; i++) {
793 if (!disk->part[i])
794 continue;
795 if (disk->part[i]->nr_sects != 0)
796 luninfo.num_parts++;
797 }
798 if (copy_to_user(argp, &luninfo,
799 sizeof(LogvolInfo_struct)))
800 return -EFAULT;
801 return(0);
802 }
803 case CCISS_DEREGDISK:
804 return deregister_disk(disk);
805
806 case CCISS_REGNEWD:
807 return register_new_disk(host);
808
809 case CCISS_PASSTHRU:
810 {
811 IOCTL_Command_struct iocommand;
812 CommandList_struct *c;
813 char *buff = NULL;
814 u64bit temp64;
815 unsigned long flags;
816 DECLARE_COMPLETION(wait);
817
818 if (!arg) return -EINVAL;
819
820 if (!capable(CAP_SYS_RAWIO)) return -EPERM;
821
822 if (copy_from_user(&iocommand, argp, sizeof( IOCTL_Command_struct) ))
823 return -EFAULT;
824 if((iocommand.buf_size < 1) &&
825 (iocommand.Request.Type.Direction != XFER_NONE))
826 {
827 return -EINVAL;
828 }
829#if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
830 /* Check kmalloc limits */
831 if(iocommand.buf_size > 128000)
832 return -EINVAL;
833#endif
834 if(iocommand.buf_size > 0)
835 {
836 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
837 if( buff == NULL)
838 return -EFAULT;
839 }
840 if (iocommand.Request.Type.Direction == XFER_WRITE)
841 {
842 /* Copy the data into the buffer we created */
843 if (copy_from_user(buff, iocommand.buf, iocommand.buf_size))
844 {
845 kfree(buff);
846 return -EFAULT;
847 }
848 } else {
849 memset(buff, 0, iocommand.buf_size);
850 }
851 if ((c = cmd_alloc(host , 0)) == NULL)
852 {
853 kfree(buff);
854 return -ENOMEM;
855 }
856 // Fill in the command type
857 c->cmd_type = CMD_IOCTL_PEND;
858 // Fill in Command Header
859 c->Header.ReplyQueue = 0; // unused in simple mode
860 if( iocommand.buf_size > 0) // buffer to fill
861 {
862 c->Header.SGList = 1;
863 c->Header.SGTotal= 1;
864 } else // no buffers to fill
865 {
866 c->Header.SGList = 0;
867 c->Header.SGTotal= 0;
868 }
869 c->Header.LUN = iocommand.LUN_info;
870 c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag
871
872 // Fill in Request block
873 c->Request = iocommand.Request;
874
875 // Fill in the scatter gather information
876 if (iocommand.buf_size > 0 )
877 {
878 temp64.val = pci_map_single( host->pdev, buff,
879 iocommand.buf_size,
880 PCI_DMA_BIDIRECTIONAL);
881 c->SG[0].Addr.lower = temp64.val32.lower;
882 c->SG[0].Addr.upper = temp64.val32.upper;
883 c->SG[0].Len = iocommand.buf_size;
884 c->SG[0].Ext = 0; // we are not chaining
885 }
886 c->waiting = &wait;
887
888 /* Put the request on the tail of the request queue */
889 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
890 addQ(&host->reqQ, c);
891 host->Qdepth++;
892 start_io(host);
893 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
894
895 wait_for_completion(&wait);
896
897 /* unlock the buffers from DMA */
898 temp64.val32.lower = c->SG[0].Addr.lower;
899 temp64.val32.upper = c->SG[0].Addr.upper;
900 pci_unmap_single( host->pdev, (dma_addr_t) temp64.val,
901 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
902
903 /* Copy the error information out */
904 iocommand.error_info = *(c->err_info);
905 if ( copy_to_user(argp, &iocommand, sizeof( IOCTL_Command_struct) ) )
906 {
907 kfree(buff);
908 cmd_free(host, c, 0);
909 return( -EFAULT);
910 }
911
912 if (iocommand.Request.Type.Direction == XFER_READ)
913 {
914 /* Copy the data out of the buffer we created */
915 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size))
916 {
917 kfree(buff);
918 cmd_free(host, c, 0);
919 return -EFAULT;
920 }
921 }
922 kfree(buff);
923 cmd_free(host, c, 0);
924 return(0);
925 }
926 case CCISS_BIG_PASSTHRU: {
927 BIG_IOCTL_Command_struct *ioc;
928 CommandList_struct *c;
929 unsigned char **buff = NULL;
930 int *buff_size = NULL;
931 u64bit temp64;
932 unsigned long flags;
933 BYTE sg_used = 0;
934 int status = 0;
935 int i;
936 DECLARE_COMPLETION(wait);
937 __u32 left;
938 __u32 sz;
939 BYTE __user *data_ptr;
940
941 if (!arg)
942 return -EINVAL;
943 if (!capable(CAP_SYS_RAWIO))
944 return -EPERM;
945 ioc = (BIG_IOCTL_Command_struct *)
946 kmalloc(sizeof(*ioc), GFP_KERNEL);
947 if (!ioc) {
948 status = -ENOMEM;
949 goto cleanup1;
950 }
951 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
952 status = -EFAULT;
953 goto cleanup1;
954 }
955 if ((ioc->buf_size < 1) &&
956 (ioc->Request.Type.Direction != XFER_NONE)) {
957 status = -EINVAL;
958 goto cleanup1;
959 }
960 /* Check kmalloc limits using all SGs */
961 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
962 status = -EINVAL;
963 goto cleanup1;
964 }
965 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
966 status = -EINVAL;
967 goto cleanup1;
968 }
969 buff = (unsigned char **) kmalloc(MAXSGENTRIES *
970 sizeof(char *), GFP_KERNEL);
971 if (!buff) {
972 status = -ENOMEM;
973 goto cleanup1;
974 }
975 memset(buff, 0, MAXSGENTRIES);
976 buff_size = (int *) kmalloc(MAXSGENTRIES * sizeof(int),
977 GFP_KERNEL);
978 if (!buff_size) {
979 status = -ENOMEM;
980 goto cleanup1;
981 }
982 left = ioc->buf_size;
983 data_ptr = ioc->buf;
984 while (left) {
985 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
986 buff_size[sg_used] = sz;
987 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
988 if (buff[sg_used] == NULL) {
989 status = -ENOMEM;
990 goto cleanup1;
991 }
992 if (ioc->Request.Type.Direction == XFER_WRITE &&
993 copy_from_user(buff[sg_used], data_ptr, sz)) {
994 status = -ENOMEM;
995 goto cleanup1;
996 } else {
997 memset(buff[sg_used], 0, sz);
998 }
999 left -= sz;
1000 data_ptr += sz;
1001 sg_used++;
1002 }
1003 if ((c = cmd_alloc(host , 0)) == NULL) {
1004 status = -ENOMEM;
1005 goto cleanup1;
1006 }
1007 c->cmd_type = CMD_IOCTL_PEND;
1008 c->Header.ReplyQueue = 0;
1009
1010 if( ioc->buf_size > 0) {
1011 c->Header.SGList = sg_used;
1012 c->Header.SGTotal= sg_used;
1013 } else {
1014 c->Header.SGList = 0;
1015 c->Header.SGTotal= 0;
1016 }
1017 c->Header.LUN = ioc->LUN_info;
1018 c->Header.Tag.lower = c->busaddr;
1019
1020 c->Request = ioc->Request;
1021 if (ioc->buf_size > 0 ) {
1022 int i;
1023 for(i=0; i<sg_used; i++) {
1024 temp64.val = pci_map_single( host->pdev, buff[i],
1025 buff_size[i],
1026 PCI_DMA_BIDIRECTIONAL);
1027 c->SG[i].Addr.lower = temp64.val32.lower;
1028 c->SG[i].Addr.upper = temp64.val32.upper;
1029 c->SG[i].Len = buff_size[i];
1030 c->SG[i].Ext = 0; /* we are not chaining */
1031 }
1032 }
1033 c->waiting = &wait;
1034 /* Put the request on the tail of the request queue */
1035 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1036 addQ(&host->reqQ, c);
1037 host->Qdepth++;
1038 start_io(host);
1039 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1040 wait_for_completion(&wait);
1041 /* unlock the buffers from DMA */
1042 for(i=0; i<sg_used; i++) {
1043 temp64.val32.lower = c->SG[i].Addr.lower;
1044 temp64.val32.upper = c->SG[i].Addr.upper;
1045 pci_unmap_single( host->pdev, (dma_addr_t) temp64.val,
1046 buff_size[i], PCI_DMA_BIDIRECTIONAL);
1047 }
1048 /* Copy the error information out */
1049 ioc->error_info = *(c->err_info);
1050 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
1051 cmd_free(host, c, 0);
1052 status = -EFAULT;
1053 goto cleanup1;
1054 }
1055 if (ioc->Request.Type.Direction == XFER_READ) {
1056 /* Copy the data out of the buffer we created */
1057 BYTE __user *ptr = ioc->buf;
1058 for(i=0; i< sg_used; i++) {
1059 if (copy_to_user(ptr, buff[i], buff_size[i])) {
1060 cmd_free(host, c, 0);
1061 status = -EFAULT;
1062 goto cleanup1;
1063 }
1064 ptr += buff_size[i];
1065 }
1066 }
1067 cmd_free(host, c, 0);
1068 status = 0;
1069cleanup1:
1070 if (buff) {
1071 for(i=0; i<sg_used; i++)
1072 if(buff[i] != NULL)
1073 kfree(buff[i]);
1074 kfree(buff);
1075 }
1076 if (buff_size)
1077 kfree(buff_size);
1078 if (ioc)
1079 kfree(ioc);
1080 return(status);
1081 }
1082 default:
1083 return -ENOTTY;
1084 }
1085
1086}
1087
1088/*
1089 * revalidate_allvol is for online array config utilities. After a
1090 * utility reconfigures the drives in the array, it can use this function
1091 * (through an ioctl) to make the driver zap any previous disk structs for
1092 * that controller and get new ones.
1093 *
1094 * Right now I'm using the getgeometry() function to do this, but this
1095 * function should probably be finer grained and allow you to revalidate one
1096 * particualar logical volume (instead of all of them on a particular
1097 * controller).
1098 */
1099static int revalidate_allvol(ctlr_info_t *host)
1100{
1101 int ctlr = host->ctlr, i;
1102 unsigned long flags;
1103
1104 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1105 if (host->usage_count > 1) {
1106 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1107 printk(KERN_WARNING "cciss: Device busy for volume"
1108 " revalidation (usage=%d)\n", host->usage_count);
1109 return -EBUSY;
1110 }
1111 host->usage_count++;
1112 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1113
1114 for(i=0; i< NWD; i++) {
1115 struct gendisk *disk = host->gendisk[i];
1116 if (disk->flags & GENHD_FL_UP)
1117 del_gendisk(disk);
1118 }
1119
1120 /*
1121 * Set the partition and block size structures for all volumes
1122 * on this controller to zero. We will reread all of this data
1123 */
1124 memset(host->drv, 0, sizeof(drive_info_struct)
1125 * CISS_MAX_LUN);
1126 /*
1127 * Tell the array controller not to give us any interrupts while
1128 * we check the new geometry. Then turn interrupts back on when
1129 * we're done.
1130 */
1131 host->access.set_intr_mask(host, CCISS_INTR_OFF);
1132 cciss_getgeometry(ctlr);
1133 host->access.set_intr_mask(host, CCISS_INTR_ON);
1134
1135 /* Loop through each real device */
1136 for (i = 0; i < NWD; i++) {
1137 struct gendisk *disk = host->gendisk[i];
1138 drive_info_struct *drv = &(host->drv[i]);
1139 /* we must register the controller even if no disks exist */
1140 /* this is for the online array utilities */
1141 if (!drv->heads && i)
1142 continue;
1143 blk_queue_hardsect_size(host->queue, drv->block_size);
1144 set_capacity(disk, drv->nr_blocks);
1145 add_disk(disk);
1146 }
1147 host->usage_count--;
1148 return 0;
1149}
1150
1151static int deregister_disk(struct gendisk *disk)
1152{
1153 unsigned long flags;
1154 ctlr_info_t *h = get_host(disk);
1155 drive_info_struct *drv = get_drv(disk);
1156 int ctlr = h->ctlr;
1157
1158 if (!capable(CAP_SYS_RAWIO))
1159 return -EPERM;
1160
1161 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1162 /* make sure logical volume is NOT is use */
1163 if( drv->usage_count > 1) {
1164 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1165 return -EBUSY;
1166 }
1167 drv->usage_count++;
1168 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1169
1170 /* invalidate the devices and deregister the disk */
1171 if (disk->flags & GENHD_FL_UP)
1172 del_gendisk(disk);
1173 /* check to see if it was the last disk */
1174 if (drv == h->drv + h->highest_lun) {
1175 /* if so, find the new hightest lun */
1176 int i, newhighest =-1;
1177 for(i=0; i<h->highest_lun; i++) {
1178 /* if the disk has size > 0, it is available */
1179 if (h->drv[i].nr_blocks)
1180 newhighest = i;
1181 }
1182 h->highest_lun = newhighest;
1183
1184 }
1185 --h->num_luns;
1186 /* zero out the disk size info */
1187 drv->nr_blocks = 0;
1188 drv->block_size = 0;
1189 drv->cylinders = 0;
1190 drv->LunID = 0;
1191 return(0);
1192}
1193static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
1194 size_t size,
1195 unsigned int use_unit_num, /* 0: address the controller,
1196 1: address logical volume log_unit,
1197 2: periph device address is scsi3addr */
1198 unsigned int log_unit, __u8 page_code, unsigned char *scsi3addr,
1199 int cmd_type)
1200{
1201 ctlr_info_t *h= hba[ctlr];
1202 u64bit buff_dma_handle;
1203 int status = IO_OK;
1204
1205 c->cmd_type = CMD_IOCTL_PEND;
1206 c->Header.ReplyQueue = 0;
1207 if( buff != NULL) {
1208 c->Header.SGList = 1;
1209 c->Header.SGTotal= 1;
1210 } else {
1211 c->Header.SGList = 0;
1212 c->Header.SGTotal= 0;
1213 }
1214 c->Header.Tag.lower = c->busaddr;
1215
1216 c->Request.Type.Type = cmd_type;
1217 if (cmd_type == TYPE_CMD) {
1218 switch(cmd) {
1219 case CISS_INQUIRY:
1220 /* If the logical unit number is 0 then, this is going
1221 to controller so It's a physical command
1222 mode = 0 target = 0. So we have nothing to write.
1223 otherwise, if use_unit_num == 1,
1224 mode = 1(volume set addressing) target = LUNID
1225 otherwise, if use_unit_num == 2,
1226 mode = 0(periph dev addr) target = scsi3addr */
1227 if (use_unit_num == 1) {
1228 c->Header.LUN.LogDev.VolId=
1229 h->drv[log_unit].LunID;
1230 c->Header.LUN.LogDev.Mode = 1;
1231 } else if (use_unit_num == 2) {
1232 memcpy(c->Header.LUN.LunAddrBytes,scsi3addr,8);
1233 c->Header.LUN.LogDev.Mode = 0;
1234 }
1235 /* are we trying to read a vital product page */
1236 if(page_code != 0) {
1237 c->Request.CDB[1] = 0x01;
1238 c->Request.CDB[2] = page_code;
1239 }
1240 c->Request.CDBLen = 6;
1241 c->Request.Type.Attribute = ATTR_SIMPLE;
1242 c->Request.Type.Direction = XFER_READ;
1243 c->Request.Timeout = 0;
1244 c->Request.CDB[0] = CISS_INQUIRY;
1245 c->Request.CDB[4] = size & 0xFF;
1246 break;
1247 case CISS_REPORT_LOG:
1248 case CISS_REPORT_PHYS:
1249 /* Talking to controller so It's a physical command
1250 mode = 00 target = 0. Nothing to write.
1251 */
1252 c->Request.CDBLen = 12;
1253 c->Request.Type.Attribute = ATTR_SIMPLE;
1254 c->Request.Type.Direction = XFER_READ;
1255 c->Request.Timeout = 0;
1256 c->Request.CDB[0] = cmd;
1257 c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB
1258 c->Request.CDB[7] = (size >> 16) & 0xFF;
1259 c->Request.CDB[8] = (size >> 8) & 0xFF;
1260 c->Request.CDB[9] = size & 0xFF;
1261 break;
1262
1263 case CCISS_READ_CAPACITY:
1264 c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
1265 c->Header.LUN.LogDev.Mode = 1;
1266 c->Request.CDBLen = 10;
1267 c->Request.Type.Attribute = ATTR_SIMPLE;
1268 c->Request.Type.Direction = XFER_READ;
1269 c->Request.Timeout = 0;
1270 c->Request.CDB[0] = cmd;
1271 break;
1272 case CCISS_CACHE_FLUSH:
1273 c->Request.CDBLen = 12;
1274 c->Request.Type.Attribute = ATTR_SIMPLE;
1275 c->Request.Type.Direction = XFER_WRITE;
1276 c->Request.Timeout = 0;
1277 c->Request.CDB[0] = BMIC_WRITE;
1278 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
1279 break;
1280 default:
1281 printk(KERN_WARNING
1282 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd);
1283 return(IO_ERROR);
1284 }
1285 } else if (cmd_type == TYPE_MSG) {
1286 switch (cmd) {
1287 case 3: /* No-Op message */
1288 c->Request.CDBLen = 1;
1289 c->Request.Type.Attribute = ATTR_SIMPLE;
1290 c->Request.Type.Direction = XFER_WRITE;
1291 c->Request.Timeout = 0;
1292 c->Request.CDB[0] = cmd;
1293 break;
1294 default:
1295 printk(KERN_WARNING
1296 "cciss%d: unknown message type %d\n",
1297 ctlr, cmd);
1298 return IO_ERROR;
1299 }
1300 } else {
1301 printk(KERN_WARNING
1302 "cciss%d: unknown command type %d\n", ctlr, cmd_type);
1303 return IO_ERROR;
1304 }
1305 /* Fill in the scatter gather information */
1306 if (size > 0) {
1307 buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
1308 buff, size, PCI_DMA_BIDIRECTIONAL);
1309 c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
1310 c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
1311 c->SG[0].Len = size;
1312 c->SG[0].Ext = 0; /* we are not chaining */
1313 }
1314 return status;
1315}
1316static int sendcmd_withirq(__u8 cmd,
1317 int ctlr,
1318 void *buff,
1319 size_t size,
1320 unsigned int use_unit_num,
1321 unsigned int log_unit,
1322 __u8 page_code,
1323 int cmd_type)
1324{
1325 ctlr_info_t *h = hba[ctlr];
1326 CommandList_struct *c;
1327 u64bit buff_dma_handle;
1328 unsigned long flags;
1329 int return_status;
1330 DECLARE_COMPLETION(wait);
1331
1332 if ((c = cmd_alloc(h , 0)) == NULL)
1333 return -ENOMEM;
1334 return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1335 log_unit, page_code, NULL, cmd_type);
1336 if (return_status != IO_OK) {
1337 cmd_free(h, c, 0);
1338 return return_status;
1339 }
1340resend_cmd2:
1341 c->waiting = &wait;
1342
1343 /* Put the request on the tail of the queue and send it */
1344 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1345 addQ(&h->reqQ, c);
1346 h->Qdepth++;
1347 start_io(h);
1348 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1349
1350 wait_for_completion(&wait);
1351
1352 if(c->err_info->CommandStatus != 0)
1353 { /* an error has occurred */
1354 switch(c->err_info->CommandStatus)
1355 {
1356 case CMD_TARGET_STATUS:
1357 printk(KERN_WARNING "cciss: cmd %p has "
1358 " completed with errors\n", c);
1359 if( c->err_info->ScsiStatus)
1360 {
1361 printk(KERN_WARNING "cciss: cmd %p "
1362 "has SCSI Status = %x\n",
1363 c,
1364 c->err_info->ScsiStatus);
1365 }
1366
1367 break;
1368 case CMD_DATA_UNDERRUN:
1369 case CMD_DATA_OVERRUN:
1370 /* expected for inquire and report lun commands */
1371 break;
1372 case CMD_INVALID:
1373 printk(KERN_WARNING "cciss: Cmd %p is "
1374 "reported invalid\n", c);
1375 return_status = IO_ERROR;
1376 break;
1377 case CMD_PROTOCOL_ERR:
1378 printk(KERN_WARNING "cciss: cmd %p has "
1379 "protocol error \n", c);
1380 return_status = IO_ERROR;
1381 break;
1382case CMD_HARDWARE_ERR:
1383 printk(KERN_WARNING "cciss: cmd %p had "
1384 " hardware error\n", c);
1385 return_status = IO_ERROR;
1386 break;
1387 case CMD_CONNECTION_LOST:
1388 printk(KERN_WARNING "cciss: cmd %p had "
1389 "connection lost\n", c);
1390 return_status = IO_ERROR;
1391 break;
1392 case CMD_ABORTED:
1393 printk(KERN_WARNING "cciss: cmd %p was "
1394 "aborted\n", c);
1395 return_status = IO_ERROR;
1396 break;
1397 case CMD_ABORT_FAILED:
1398 printk(KERN_WARNING "cciss: cmd %p reports "
1399 "abort failed\n", c);
1400 return_status = IO_ERROR;
1401 break;
1402 case CMD_UNSOLICITED_ABORT:
1403 printk(KERN_WARNING
1404 "cciss%d: unsolicited abort %p\n",
1405 ctlr, c);
1406 if (c->retry_count < MAX_CMD_RETRIES) {
1407 printk(KERN_WARNING
1408 "cciss%d: retrying %p\n",
1409 ctlr, c);
1410 c->retry_count++;
1411 /* erase the old error information */
1412 memset(c->err_info, 0,
1413 sizeof(ErrorInfo_struct));
1414 return_status = IO_OK;
1415 INIT_COMPLETION(wait);
1416 goto resend_cmd2;
1417 }
1418 return_status = IO_ERROR;
1419 break;
1420 default:
1421 printk(KERN_WARNING "cciss: cmd %p returned "
1422 "unknown status %x\n", c,
1423 c->err_info->CommandStatus);
1424 return_status = IO_ERROR;
1425 }
1426 }
1427 /* unlock the buffers from DMA */
1428 pci_unmap_single( h->pdev, (dma_addr_t) buff_dma_handle.val,
1429 size, PCI_DMA_BIDIRECTIONAL);
1430 cmd_free(h, c, 0);
1431 return(return_status);
1432
1433}
1434static void cciss_geometry_inquiry(int ctlr, int logvol,
1435 int withirq, unsigned int total_size,
1436 unsigned int block_size, InquiryData_struct *inq_buff,
1437 drive_info_struct *drv)
1438{
1439 int return_code;
1440 memset(inq_buff, 0, sizeof(InquiryData_struct));
1441 if (withirq)
1442 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
1443 inq_buff, sizeof(*inq_buff), 1, logvol ,0xC1, TYPE_CMD);
1444 else
1445 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
1446 sizeof(*inq_buff), 1, logvol ,0xC1, NULL, TYPE_CMD);
1447 if (return_code == IO_OK) {
1448 if(inq_buff->data_byte[8] == 0xFF) {
1449 printk(KERN_WARNING
1450 "cciss: reading geometry failed, volume "
1451 "does not support reading geometry\n");
1452 drv->block_size = block_size;
1453 drv->nr_blocks = total_size;
1454 drv->heads = 255;
1455 drv->sectors = 32; // Sectors per track
1456 drv->cylinders = total_size / 255 / 32;
1457 } else {
1458 unsigned int t;
1459
1460 drv->block_size = block_size;
1461 drv->nr_blocks = total_size;
1462 drv->heads = inq_buff->data_byte[6];
1463 drv->sectors = inq_buff->data_byte[7];
1464 drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
1465 drv->cylinders += inq_buff->data_byte[5];
1466 drv->raid_level = inq_buff->data_byte[8];
1467 t = drv->heads * drv->sectors;
1468 if (t > 1) {
1469 drv->cylinders = total_size/t;
1470 }
1471 }
1472 } else { /* Get geometry failed */
1473 printk(KERN_WARNING "cciss: reading geometry failed\n");
1474 }
1475 printk(KERN_INFO " heads= %d, sectors= %d, cylinders= %d\n\n",
1476 drv->heads, drv->sectors, drv->cylinders);
1477}
1478static void
1479cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
1480 int withirq, unsigned int *total_size, unsigned int *block_size)
1481{
1482 int return_code;
1483 memset(buf, 0, sizeof(*buf));
1484 if (withirq)
1485 return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
1486 ctlr, buf, sizeof(*buf), 1, logvol, 0, TYPE_CMD);
1487 else
1488 return_code = sendcmd(CCISS_READ_CAPACITY,
1489 ctlr, buf, sizeof(*buf), 1, logvol, 0, NULL, TYPE_CMD);
1490 if (return_code == IO_OK) {
1491 *total_size = be32_to_cpu(*((__be32 *) &buf->total_size[0]))+1;
1492 *block_size = be32_to_cpu(*((__be32 *) &buf->block_size[0]));
1493 } else { /* read capacity command failed */
1494 printk(KERN_WARNING "cciss: read capacity failed\n");
1495 *total_size = 0;
1496 *block_size = BLOCK_SIZE;
1497 }
1498 printk(KERN_INFO " blocks= %u block_size= %d\n",
1499 *total_size, *block_size);
1500 return;
1501}
1502
1503static int register_new_disk(ctlr_info_t *h)
1504{
1505 struct gendisk *disk;
1506 int ctlr = h->ctlr;
1507 int i;
1508 int num_luns;
1509 int logvol;
1510 int new_lun_found = 0;
1511 int new_lun_index = 0;
1512 int free_index_found = 0;
1513 int free_index = 0;
1514 ReportLunData_struct *ld_buff = NULL;
1515 ReadCapdata_struct *size_buff = NULL;
1516 InquiryData_struct *inq_buff = NULL;
1517 int return_code;
1518 int listlength = 0;
1519 __u32 lunid = 0;
1520 unsigned int block_size;
1521 unsigned int total_size;
1522
1523 if (!capable(CAP_SYS_RAWIO))
1524 return -EPERM;
1525 /* if we have no space in our disk array left to add anything */
1526 if( h->num_luns >= CISS_MAX_LUN)
1527 return -EINVAL;
1528
1529 ld_buff = kmalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
1530 if (ld_buff == NULL)
1531 goto mem_msg;
1532 memset(ld_buff, 0, sizeof(ReportLunData_struct));
1533 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
1534 if (size_buff == NULL)
1535 goto mem_msg;
1536 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
1537 if (inq_buff == NULL)
1538 goto mem_msg;
1539
1540 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
1541 sizeof(ReportLunData_struct), 0, 0, 0, TYPE_CMD);
1542
1543 if( return_code == IO_OK)
1544 {
1545
1546 // printk("LUN Data\n--------------------------\n");
1547
1548 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
1549 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
1550 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
1551 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
1552 } else /* reading number of logical volumes failed */
1553 {
1554 printk(KERN_WARNING "cciss: report logical volume"
1555 " command failed\n");
1556 listlength = 0;
1557 goto free_err;
1558 }
1559 num_luns = listlength / 8; // 8 bytes pre entry
1560 if (num_luns > CISS_MAX_LUN)
1561 {
1562 num_luns = CISS_MAX_LUN;
1563 }
1564#ifdef CCISS_DEBUG
1565 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n", ld_buff->LUNListLength[0],
1566 ld_buff->LUNListLength[1], ld_buff->LUNListLength[2],
1567 ld_buff->LUNListLength[3], num_luns);
1568#endif
1569 for(i=0; i< num_luns; i++)
1570 {
1571 int j;
1572 int lunID_found = 0;
1573
1574 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3])) << 24;
1575 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2])) << 16;
1576 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1])) << 8;
1577 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
1578
1579 /* check to see if this is a new lun */
1580 for(j=0; j <= h->highest_lun; j++)
1581 {
1582#ifdef CCISS_DEBUG
1583 printk("Checking %d %x against %x\n", j,h->drv[j].LunID,
1584 lunid);
1585#endif /* CCISS_DEBUG */
1586 if (h->drv[j].LunID == lunid)
1587 {
1588 lunID_found = 1;
1589 break;
1590 }
1591
1592 }
1593 if( lunID_found == 1)
1594 continue;
1595 else
1596 { /* It is the new lun we have been looking for */
1597#ifdef CCISS_DEBUG
1598 printk("new lun found at %d\n", i);
1599#endif /* CCISS_DEBUG */
1600 new_lun_index = i;
1601 new_lun_found = 1;
1602 break;
1603 }
1604 }
1605 if (!new_lun_found)
1606 {
1607 printk(KERN_WARNING "cciss: New Logical Volume not found\n");
1608 goto free_err;
1609 }
1610 /* Now find the free index */
1611 for(i=0; i <CISS_MAX_LUN; i++)
1612 {
1613#ifdef CCISS_DEBUG
1614 printk("Checking Index %d\n", i);
1615#endif /* CCISS_DEBUG */
1616 if(h->drv[i].LunID == 0)
1617 {
1618#ifdef CCISS_DEBUG
1619 printk("free index found at %d\n", i);
1620#endif /* CCISS_DEBUG */
1621 free_index_found = 1;
1622 free_index = i;
1623 break;
1624 }
1625 }
1626 if (!free_index_found)
1627 {
1628 printk(KERN_WARNING "cciss: unable to find free slot for disk\n");
1629 goto free_err;
1630 }
1631
1632 logvol = free_index;
1633 h->drv[logvol].LunID = lunid;
1634 /* there could be gaps in lun numbers, track hightest */
1635 if(h->highest_lun < lunid)
1636 h->highest_lun = logvol;
1637 cciss_read_capacity(ctlr, logvol, size_buff, 1,
1638 &total_size, &block_size);
1639 cciss_geometry_inquiry(ctlr, logvol, 1, total_size, block_size,
1640 inq_buff, &h->drv[logvol]);
1641 h->drv[logvol].usage_count = 0;
1642 ++h->num_luns;
1643 /* setup partitions per disk */
1644 disk = h->gendisk[logvol];
1645 set_capacity(disk, h->drv[logvol].nr_blocks);
1646 /* if it's the controller it's already added */
1647 if(logvol)
1648 add_disk(disk);
1649freeret:
1650 kfree(ld_buff);
1651 kfree(size_buff);
1652 kfree(inq_buff);
1653 return (logvol);
1654mem_msg:
1655 printk(KERN_ERR "cciss: out of memory\n");
1656free_err:
1657 logvol = -1;
1658 goto freeret;
1659}
1660
1661static int cciss_revalidate(struct gendisk *disk)
1662{
1663 ctlr_info_t *h = get_host(disk);
1664 drive_info_struct *drv = get_drv(disk);
1665 int logvol;
1666 int FOUND=0;
1667 unsigned int block_size;
1668 unsigned int total_size;
1669 ReadCapdata_struct *size_buff = NULL;
1670 InquiryData_struct *inq_buff = NULL;
1671
1672 for(logvol=0; logvol < CISS_MAX_LUN; logvol++)
1673 {
1674 if(h->drv[logvol].LunID == drv->LunID) {
1675 FOUND=1;
1676 break;
1677 }
1678 }
1679
1680 if (!FOUND) return 1;
1681
1682 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
1683 if (size_buff == NULL)
1684 {
1685 printk(KERN_WARNING "cciss: out of memory\n");
1686 return 1;
1687 }
1688 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
1689 if (inq_buff == NULL)
1690 {
1691 printk(KERN_WARNING "cciss: out of memory\n");
1692 kfree(size_buff);
1693 return 1;
1694 }
1695
1696 cciss_read_capacity(h->ctlr, logvol, size_buff, 1, &total_size, &block_size);
1697 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, inq_buff, drv);
1698
1699 blk_queue_hardsect_size(h->queue, drv->block_size);
1700 set_capacity(disk, drv->nr_blocks);
1701
1702 kfree(size_buff);
1703 kfree(inq_buff);
1704 return 0;
1705}
1706
1707/*
1708 * Wait polling for a command to complete.
1709 * The memory mapped FIFO is polled for the completion.
1710 * Used only at init time, interrupts from the HBA are disabled.
1711 */
1712static unsigned long pollcomplete(int ctlr)
1713{
1714 unsigned long done;
1715 int i;
1716
1717 /* Wait (up to 20 seconds) for a command to complete */
1718
1719 for (i = 20 * HZ; i > 0; i--) {
1720 done = hba[ctlr]->access.command_completed(hba[ctlr]);
1721 if (done == FIFO_EMPTY) {
1722 set_current_state(TASK_UNINTERRUPTIBLE);
1723 schedule_timeout(1);
1724 } else
1725 return (done);
1726 }
1727 /* Invalid address to tell caller we ran out of time */
1728 return 1;
1729}
1730/*
1731 * Send a command to the controller, and wait for it to complete.
1732 * Only used at init time.
1733 */
1734static int sendcmd(
1735 __u8 cmd,
1736 int ctlr,
1737 void *buff,
1738 size_t size,
1739 unsigned int use_unit_num, /* 0: address the controller,
1740 1: address logical volume log_unit,
1741 2: periph device address is scsi3addr */
1742 unsigned int log_unit,
1743 __u8 page_code,
1744 unsigned char *scsi3addr,
1745 int cmd_type)
1746{
1747 CommandList_struct *c;
1748 int i;
1749 unsigned long complete;
1750 ctlr_info_t *info_p= hba[ctlr];
1751 u64bit buff_dma_handle;
1752 int status;
1753
1754 if ((c = cmd_alloc(info_p, 1)) == NULL) {
1755 printk(KERN_WARNING "cciss: unable to get memory");
1756 return(IO_ERROR);
1757 }
1758 status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
1759 log_unit, page_code, scsi3addr, cmd_type);
1760 if (status != IO_OK) {
1761 cmd_free(info_p, c, 1);
1762 return status;
1763 }
1764resend_cmd1:
1765 /*
1766 * Disable interrupt
1767 */
1768#ifdef CCISS_DEBUG
1769 printk(KERN_DEBUG "cciss: turning intr off\n");
1770#endif /* CCISS_DEBUG */
1771 info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
1772
1773 /* Make sure there is room in the command FIFO */
1774 /* Actually it should be completely empty at this time. */
1775 for (i = 200000; i > 0; i--)
1776 {
1777 /* if fifo isn't full go */
1778 if (!(info_p->access.fifo_full(info_p)))
1779 {
1780
1781 break;
1782 }
1783 udelay(10);
1784 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
1785 " waiting!\n", ctlr);
1786 }
1787 /*
1788 * Send the cmd
1789 */
1790 info_p->access.submit_command(info_p, c);
1791 complete = pollcomplete(ctlr);
1792
1793#ifdef CCISS_DEBUG
1794 printk(KERN_DEBUG "cciss: command completed\n");
1795#endif /* CCISS_DEBUG */
1796
1797 if (complete != 1) {
1798 if ( (complete & CISS_ERROR_BIT)
1799 && (complete & ~CISS_ERROR_BIT) == c->busaddr)
1800 {
1801 /* if data overrun or underun on Report command
1802 ignore it
1803 */
1804 if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
1805 (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
1806 (c->Request.CDB[0] == CISS_INQUIRY)) &&
1807 ((c->err_info->CommandStatus ==
1808 CMD_DATA_OVERRUN) ||
1809 (c->err_info->CommandStatus ==
1810 CMD_DATA_UNDERRUN)
1811 ))
1812 {
1813 complete = c->busaddr;
1814 } else {
1815 if (c->err_info->CommandStatus ==
1816 CMD_UNSOLICITED_ABORT) {
1817 printk(KERN_WARNING "cciss%d: "
1818 "unsolicited abort %p\n",
1819 ctlr, c);
1820 if (c->retry_count < MAX_CMD_RETRIES) {
1821 printk(KERN_WARNING
1822 "cciss%d: retrying %p\n",
1823 ctlr, c);
1824 c->retry_count++;
1825 /* erase the old error */
1826 /* information */
1827 memset(c->err_info, 0,
1828 sizeof(ErrorInfo_struct));
1829 goto resend_cmd1;
1830 } else {
1831 printk(KERN_WARNING
1832 "cciss%d: retried %p too "
1833 "many times\n", ctlr, c);
1834 status = IO_ERROR;
1835 goto cleanup1;
1836 }
1837 }
1838 printk(KERN_WARNING "ciss ciss%d: sendcmd"
1839 " Error %x \n", ctlr,
1840 c->err_info->CommandStatus);
1841 printk(KERN_WARNING "ciss ciss%d: sendcmd"
1842 " offensive info\n"
1843 " size %x\n num %x value %x\n", ctlr,
1844 c->err_info->MoreErrInfo.Invalid_Cmd.offense_size,
1845 c->err_info->MoreErrInfo.Invalid_Cmd.offense_num,
1846 c->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
1847 status = IO_ERROR;
1848 goto cleanup1;
1849 }
1850 }
1851 if (complete != c->busaddr) {
1852 printk( KERN_WARNING "cciss cciss%d: SendCmd "
1853 "Invalid command list address returned! (%lx)\n",
1854 ctlr, complete);
1855 status = IO_ERROR;
1856 goto cleanup1;
1857 }
1858 } else {
1859 printk( KERN_WARNING
1860 "cciss cciss%d: SendCmd Timeout out, "
1861 "No command list address returned!\n",
1862 ctlr);
1863 status = IO_ERROR;
1864 }
1865
1866cleanup1:
1867 /* unlock the data buffer from DMA */
1868 pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
1869 size, PCI_DMA_BIDIRECTIONAL);
1870 cmd_free(info_p, c, 1);
1871 return (status);
1872}
1873/*
1874 * Map (physical) PCI mem into (virtual) kernel space
1875 */
1876static void __iomem *remap_pci_mem(ulong base, ulong size)
1877{
1878 ulong page_base = ((ulong) base) & PAGE_MASK;
1879 ulong page_offs = ((ulong) base) - page_base;
1880 void __iomem *page_remapped = ioremap(page_base, page_offs+size);
1881
1882 return page_remapped ? (page_remapped + page_offs) : NULL;
1883}
1884
1885/*
1886 * Takes jobs of the Q and sends them to the hardware, then puts it on
1887 * the Q to wait for completion.
1888 */
1889static void start_io( ctlr_info_t *h)
1890{
1891 CommandList_struct *c;
1892
1893 while(( c = h->reqQ) != NULL )
1894 {
1895 /* can't do anything if fifo is full */
1896 if ((h->access.fifo_full(h))) {
1897 printk(KERN_WARNING "cciss: fifo full\n");
1898 break;
1899 }
1900
1901 /* Get the frist entry from the Request Q */
1902 removeQ(&(h->reqQ), c);
1903 h->Qdepth--;
1904
1905 /* Tell the controller execute command */
1906 h->access.submit_command(h, c);
1907
1908 /* Put job onto the completed Q */
1909 addQ (&(h->cmpQ), c);
1910 }
1911}
1912
1913static inline void complete_buffers(struct bio *bio, int status)
1914{
1915 while (bio) {
1916 struct bio *xbh = bio->bi_next;
1917 int nr_sectors = bio_sectors(bio);
1918
1919 bio->bi_next = NULL;
1920 blk_finished_io(len);
1921 bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
1922 bio = xbh;
1923 }
1924
1925}
1926/* Assumes that CCISS_LOCK(h->ctlr) is held. */
1927/* Zeros out the error record and then resends the command back */
1928/* to the controller */
1929static inline void resend_cciss_cmd( ctlr_info_t *h, CommandList_struct *c)
1930{
1931 /* erase the old error information */
1932 memset(c->err_info, 0, sizeof(ErrorInfo_struct));
1933
1934 /* add it to software queue and then send it to the controller */
1935 addQ(&(h->reqQ),c);
1936 h->Qdepth++;
1937 if(h->Qdepth > h->maxQsinceinit)
1938 h->maxQsinceinit = h->Qdepth;
1939
1940 start_io(h);
1941}
1942/* checks the status of the job and calls complete buffers to mark all
1943 * buffers for the completed job.
1944 */
1945static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd,
1946 int timeout)
1947{
1948 int status = 1;
1949 int i;
1950 int retry_cmd = 0;
1951 u64bit temp64;
1952
1953 if (timeout)
1954 status = 0;
1955
1956 if(cmd->err_info->CommandStatus != 0)
1957 { /* an error has occurred */
1958 switch(cmd->err_info->CommandStatus)
1959 {
1960 unsigned char sense_key;
1961 case CMD_TARGET_STATUS:
1962 status = 0;
1963
1964 if( cmd->err_info->ScsiStatus == 0x02)
1965 {
1966 printk(KERN_WARNING "cciss: cmd %p "
1967 "has CHECK CONDITION "
1968 " byte 2 = 0x%x\n", cmd,
1969 cmd->err_info->SenseInfo[2]
1970 );
1971 /* check the sense key */
1972 sense_key = 0xf &
1973 cmd->err_info->SenseInfo[2];
1974 /* no status or recovered error */
1975 if((sense_key == 0x0) ||
1976 (sense_key == 0x1))
1977 {
1978 status = 1;
1979 }
1980 } else
1981 {
1982 printk(KERN_WARNING "cciss: cmd %p "
1983 "has SCSI Status 0x%x\n",
1984 cmd, cmd->err_info->ScsiStatus);
1985 }
1986 break;
1987 case CMD_DATA_UNDERRUN:
1988 printk(KERN_WARNING "cciss: cmd %p has"
1989 " completed with data underrun "
1990 "reported\n", cmd);
1991 break;
1992 case CMD_DATA_OVERRUN:
1993 printk(KERN_WARNING "cciss: cmd %p has"
1994 " completed with data overrun "
1995 "reported\n", cmd);
1996 break;
1997 case CMD_INVALID:
1998 printk(KERN_WARNING "cciss: cmd %p is "
1999 "reported invalid\n", cmd);
2000 status = 0;
2001 break;
2002 case CMD_PROTOCOL_ERR:
2003 printk(KERN_WARNING "cciss: cmd %p has "
2004 "protocol error \n", cmd);
2005 status = 0;
2006 break;
2007 case CMD_HARDWARE_ERR:
2008 printk(KERN_WARNING "cciss: cmd %p had "
2009 " hardware error\n", cmd);
2010 status = 0;
2011 break;
2012 case CMD_CONNECTION_LOST:
2013 printk(KERN_WARNING "cciss: cmd %p had "
2014 "connection lost\n", cmd);
2015 status=0;
2016 break;
2017 case CMD_ABORTED:
2018 printk(KERN_WARNING "cciss: cmd %p was "
2019 "aborted\n", cmd);
2020 status=0;
2021 break;
2022 case CMD_ABORT_FAILED:
2023 printk(KERN_WARNING "cciss: cmd %p reports "
2024 "abort failed\n", cmd);
2025 status=0;
2026 break;
2027 case CMD_UNSOLICITED_ABORT:
2028 printk(KERN_WARNING "cciss%d: unsolicited "
2029 "abort %p\n", h->ctlr, cmd);
2030 if (cmd->retry_count < MAX_CMD_RETRIES) {
2031 retry_cmd=1;
2032 printk(KERN_WARNING
2033 "cciss%d: retrying %p\n",
2034 h->ctlr, cmd);
2035 cmd->retry_count++;
2036 } else
2037 printk(KERN_WARNING
2038 "cciss%d: %p retried too "
2039 "many times\n", h->ctlr, cmd);
2040 status=0;
2041 break;
2042 case CMD_TIMEOUT:
2043 printk(KERN_WARNING "cciss: cmd %p timedout\n",
2044 cmd);
2045 status=0;
2046 break;
2047 default:
2048 printk(KERN_WARNING "cciss: cmd %p returned "
2049 "unknown status %x\n", cmd,
2050 cmd->err_info->CommandStatus);
2051 status=0;
2052 }
2053 }
2054 /* We need to return this command */
2055 if(retry_cmd) {
2056 resend_cciss_cmd(h,cmd);
2057 return;
2058 }
2059 /* command did not need to be retried */
2060 /* unmap the DMA mapping for all the scatter gather elements */
2061 for(i=0; i<cmd->Header.SGList; i++) {
2062 temp64.val32.lower = cmd->SG[i].Addr.lower;
2063 temp64.val32.upper = cmd->SG[i].Addr.upper;
2064 pci_unmap_page(hba[cmd->ctlr]->pdev,
2065 temp64.val, cmd->SG[i].Len,
2066 (cmd->Request.Type.Direction == XFER_READ) ?
2067 PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
2068 }
2069 complete_buffers(cmd->rq->bio, status);
2070
2071#ifdef CCISS_DEBUG
2072 printk("Done with %p\n", cmd->rq);
2073#endif /* CCISS_DEBUG */
2074
2075 end_that_request_last(cmd->rq);
2076 cmd_free(h,cmd,1);
2077}
2078
2079/*
2080 * Get a request and submit it to the controller.
2081 */
2082static void do_cciss_request(request_queue_t *q)
2083{
2084 ctlr_info_t *h= q->queuedata;
2085 CommandList_struct *c;
2086 int start_blk, seg;
2087 struct request *creq;
2088 u64bit temp64;
2089 struct scatterlist tmp_sg[MAXSGENTRIES];
2090 drive_info_struct *drv;
2091 int i, dir;
2092
2093 /* We call start_io here in case there is a command waiting on the
2094 * queue that has not been sent.
2095 */
2096 if (blk_queue_plugged(q))
2097 goto startio;
2098
2099queue:
2100 creq = elv_next_request(q);
2101 if (!creq)
2102 goto startio;
2103
2104 if (creq->nr_phys_segments > MAXSGENTRIES)
2105 BUG();
2106
2107 if (( c = cmd_alloc(h, 1)) == NULL)
2108 goto full;
2109
2110 blkdev_dequeue_request(creq);
2111
2112 spin_unlock_irq(q->queue_lock);
2113
2114 c->cmd_type = CMD_RWREQ;
2115 c->rq = creq;
2116
2117 /* fill in the request */
2118 drv = creq->rq_disk->private_data;
2119 c->Header.ReplyQueue = 0; // unused in simple mode
2120 c->Header.Tag.lower = c->busaddr; // use the physical address the cmd block for tag
2121 c->Header.LUN.LogDev.VolId= drv->LunID;
2122 c->Header.LUN.LogDev.Mode = 1;
2123 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
2124 c->Request.Type.Type = TYPE_CMD; // It is a command.
2125 c->Request.Type.Attribute = ATTR_SIMPLE;
2126 c->Request.Type.Direction =
2127 (rq_data_dir(creq) == READ) ? XFER_READ: XFER_WRITE;
2128 c->Request.Timeout = 0; // Don't time out
2129 c->Request.CDB[0] = (rq_data_dir(creq) == READ) ? CCISS_READ : CCISS_WRITE;
2130 start_blk = creq->sector;
2131#ifdef CCISS_DEBUG
2132 printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",(int) creq->sector,
2133 (int) creq->nr_sectors);
2134#endif /* CCISS_DEBUG */
2135
2136 seg = blk_rq_map_sg(q, creq, tmp_sg);
2137
2138 /* get the DMA records for the setup */
2139 if (c->Request.Type.Direction == XFER_READ)
2140 dir = PCI_DMA_FROMDEVICE;
2141 else
2142 dir = PCI_DMA_TODEVICE;
2143
2144 for (i=0; i<seg; i++)
2145 {
2146 c->SG[i].Len = tmp_sg[i].length;
2147 temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
2148 tmp_sg[i].offset, tmp_sg[i].length,
2149 dir);
2150 c->SG[i].Addr.lower = temp64.val32.lower;
2151 c->SG[i].Addr.upper = temp64.val32.upper;
2152 c->SG[i].Ext = 0; // we are not chaining
2153 }
2154 /* track how many SG entries we are using */
2155 if( seg > h->maxSG)
2156 h->maxSG = seg;
2157
2158#ifdef CCISS_DEBUG
2159 printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n", creq->nr_sectors, seg);
2160#endif /* CCISS_DEBUG */
2161
2162 c->Header.SGList = c->Header.SGTotal = seg;
2163 c->Request.CDB[1]= 0;
2164 c->Request.CDB[2]= (start_blk >> 24) & 0xff; //MSB
2165 c->Request.CDB[3]= (start_blk >> 16) & 0xff;
2166 c->Request.CDB[4]= (start_blk >> 8) & 0xff;
2167 c->Request.CDB[5]= start_blk & 0xff;
2168 c->Request.CDB[6]= 0; // (sect >> 24) & 0xff; MSB
2169 c->Request.CDB[7]= (creq->nr_sectors >> 8) & 0xff;
2170 c->Request.CDB[8]= creq->nr_sectors & 0xff;
2171 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
2172
2173 spin_lock_irq(q->queue_lock);
2174
2175 addQ(&(h->reqQ),c);
2176 h->Qdepth++;
2177 if(h->Qdepth > h->maxQsinceinit)
2178 h->maxQsinceinit = h->Qdepth;
2179
2180 goto queue;
2181full:
2182 blk_stop_queue(q);
2183startio:
2184 /* We will already have the driver lock here so not need
2185 * to lock it.
2186 */
2187 start_io(h);
2188}
2189
2190static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
2191{
2192 ctlr_info_t *h = dev_id;
2193 CommandList_struct *c;
2194 unsigned long flags;
2195 __u32 a, a1;
2196 int j;
2197 int start_queue = h->next_to_run;
2198
2199 /* Is this interrupt for us? */
2200 if (( h->access.intr_pending(h) == 0) || (h->interrupts_enabled == 0))
2201 return IRQ_NONE;
2202
2203 /*
2204 * If there are completed commands in the completion queue,
2205 * we had better do something about it.
2206 */
2207 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
2208 while( h->access.intr_pending(h))
2209 {
2210 while((a = h->access.command_completed(h)) != FIFO_EMPTY)
2211 {
2212 a1 = a;
2213 a &= ~3;
2214 if ((c = h->cmpQ) == NULL)
2215 {
2216 printk(KERN_WARNING "cciss: Completion of %08lx ignored\n", (unsigned long)a1);
2217 continue;
2218 }
2219 while(c->busaddr != a) {
2220 c = c->next;
2221 if (c == h->cmpQ)
2222 break;
2223 }
2224 /*
2225 * If we've found the command, take it off the
2226 * completion Q and free it
2227 */
2228 if (c->busaddr == a) {
2229 removeQ(&h->cmpQ, c);
2230 if (c->cmd_type == CMD_RWREQ) {
2231 complete_command(h, c, 0);
2232 } else if (c->cmd_type == CMD_IOCTL_PEND) {
2233 complete(c->waiting);
2234 }
2235# ifdef CONFIG_CISS_SCSI_TAPE
2236 else if (c->cmd_type == CMD_SCSI)
2237 complete_scsi_command(c, 0, a1);
2238# endif
2239 continue;
2240 }
2241 }
2242 }
2243
2244 /* check to see if we have maxed out the number of commands that can
2245 * be placed on the queue. If so then exit. We do this check here
2246 * in case the interrupt we serviced was from an ioctl and did not
2247 * free any new commands.
2248 */
2249 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
2250 goto cleanup;
2251
2252 /* We have room on the queue for more commands. Now we need to queue
2253 * them up. We will also keep track of the next queue to run so
2254 * that every queue gets a chance to be started first.
2255 */
2256 for (j=0; j < NWD; j++){
2257 int curr_queue = (start_queue + j) % NWD;
2258 /* make sure the disk has been added and the drive is real
2259 * because this can be called from the middle of init_one.
2260 */
2261 if(!(h->gendisk[curr_queue]->queue) ||
2262 !(h->drv[curr_queue].heads))
2263 continue;
2264 blk_start_queue(h->gendisk[curr_queue]->queue);
2265
2266 /* check to see if we have maxed out the number of commands
2267 * that can be placed on the queue.
2268 */
2269 if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
2270 {
2271 if (curr_queue == start_queue){
2272 h->next_to_run = (start_queue + 1) % NWD;
2273 goto cleanup;
2274 } else {
2275 h->next_to_run = curr_queue;
2276 goto cleanup;
2277 }
2278 } else {
2279 curr_queue = (curr_queue + 1) % NWD;
2280 }
2281 }
2282
2283cleanup:
2284 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
2285 return IRQ_HANDLED;
2286}
2287
2288/*
2289 * We cannot read the structure directly, for portablity we must use
2290 * the io functions.
2291 * This is for debug only.
2292 */
2293#ifdef CCISS_DEBUG
2294static void print_cfg_table( CfgTable_struct *tb)
2295{
2296 int i;
2297 char temp_name[17];
2298
2299 printk("Controller Configuration information\n");
2300 printk("------------------------------------\n");
2301 for(i=0;i<4;i++)
2302 temp_name[i] = readb(&(tb->Signature[i]));
2303 temp_name[4]='\0';
2304 printk(" Signature = %s\n", temp_name);
2305 printk(" Spec Number = %d\n", readl(&(tb->SpecValence)));
2306 printk(" Transport methods supported = 0x%x\n",
2307 readl(&(tb-> TransportSupport)));
2308 printk(" Transport methods active = 0x%x\n",
2309 readl(&(tb->TransportActive)));
2310 printk(" Requested transport Method = 0x%x\n",
2311 readl(&(tb->HostWrite.TransportRequest)));
2312 printk(" Coalese Interrupt Delay = 0x%x\n",
2313 readl(&(tb->HostWrite.CoalIntDelay)));
2314 printk(" Coalese Interrupt Count = 0x%x\n",
2315 readl(&(tb->HostWrite.CoalIntCount)));
2316 printk(" Max outstanding commands = 0x%d\n",
2317 readl(&(tb->CmdsOutMax)));
2318 printk(" Bus Types = 0x%x\n", readl(&(tb-> BusTypes)));
2319 for(i=0;i<16;i++)
2320 temp_name[i] = readb(&(tb->ServerName[i]));
2321 temp_name[16] = '\0';
2322 printk(" Server Name = %s\n", temp_name);
2323 printk(" Heartbeat Counter = 0x%x\n\n\n",
2324 readl(&(tb->HeartBeat)));
2325}
2326#endif /* CCISS_DEBUG */
2327
2328static void release_io_mem(ctlr_info_t *c)
2329{
2330 /* if IO mem was not protected do nothing */
2331 if( c->io_mem_addr == 0)
2332 return;
2333 release_region(c->io_mem_addr, c->io_mem_length);
2334 c->io_mem_addr = 0;
2335 c->io_mem_length = 0;
2336}
2337
2338static int find_PCI_BAR_index(struct pci_dev *pdev,
2339 unsigned long pci_bar_addr)
2340{
2341 int i, offset, mem_type, bar_type;
2342 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
2343 return 0;
2344 offset = 0;
2345 for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
2346 bar_type = pci_resource_flags(pdev, i) &
2347 PCI_BASE_ADDRESS_SPACE;
2348 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
2349 offset += 4;
2350 else {
2351 mem_type = pci_resource_flags(pdev, i) &
2352 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
2353 switch (mem_type) {
2354 case PCI_BASE_ADDRESS_MEM_TYPE_32:
2355 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
2356 offset += 4; /* 32 bit */
2357 break;
2358 case PCI_BASE_ADDRESS_MEM_TYPE_64:
2359 offset += 8;
2360 break;
2361 default: /* reserved in PCI 2.2 */
2362 printk(KERN_WARNING "Base address is invalid\n");
2363 return -1;
2364 break;
2365 }
2366 }
2367 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
2368 return i+1;
2369 }
2370 return -1;
2371}
2372
2373static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
2374{
2375 ushort subsystem_vendor_id, subsystem_device_id, command;
2376 __u32 board_id, scratchpad = 0;
2377 __u64 cfg_offset;
2378 __u32 cfg_base_addr;
2379 __u64 cfg_base_addr_index;
2380 int i;
2381
2382 /* check to see if controller has been disabled */
2383 /* BEFORE trying to enable it */
2384 (void) pci_read_config_word(pdev, PCI_COMMAND,&command);
2385 if(!(command & 0x02))
2386 {
2387 printk(KERN_WARNING "cciss: controller appears to be disabled\n");
2388 return(-1);
2389 }
2390
2391 if (pci_enable_device(pdev))
2392 {
2393 printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
2394 return( -1);
2395 }
2396 if (pci_set_dma_mask(pdev, CCISS_DMA_MASK ) != 0)
2397 {
2398 printk(KERN_ERR "cciss: Unable to set DMA mask\n");
2399 return(-1);
2400 }
2401
2402 subsystem_vendor_id = pdev->subsystem_vendor;
2403 subsystem_device_id = pdev->subsystem_device;
2404 board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
2405 subsystem_vendor_id);
2406
2407 /* search for our IO range so we can protect it */
2408 for(i=0; i<DEVICE_COUNT_RESOURCE; i++)
2409 {
2410 /* is this an IO range */
2411 if( pci_resource_flags(pdev, i) & 0x01 ) {
2412 c->io_mem_addr = pci_resource_start(pdev, i);
2413 c->io_mem_length = pci_resource_end(pdev, i) -
2414 pci_resource_start(pdev, i) +1;
2415#ifdef CCISS_DEBUG
2416 printk("IO value found base_addr[%d] %lx %lx\n", i,
2417 c->io_mem_addr, c->io_mem_length);
2418#endif /* CCISS_DEBUG */
2419 /* register the IO range */
2420 if(!request_region( c->io_mem_addr,
2421 c->io_mem_length, "cciss"))
2422 {
2423 printk(KERN_WARNING "cciss I/O memory range already in use addr=%lx length=%ld\n",
2424 c->io_mem_addr, c->io_mem_length);
2425 c->io_mem_addr= 0;
2426 c->io_mem_length = 0;
2427 }
2428 break;
2429 }
2430 }
2431
2432#ifdef CCISS_DEBUG
2433 printk("command = %x\n", command);
2434 printk("irq = %x\n", pdev->irq);
2435 printk("board_id = %x\n", board_id);
2436#endif /* CCISS_DEBUG */
2437
2438 c->intr = pdev->irq;
2439
2440 /*
2441 * Memory base addr is first addr , the second points to the config
2442 * table
2443 */
2444
2445 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
2446#ifdef CCISS_DEBUG
2447 printk("address 0 = %x\n", c->paddr);
2448#endif /* CCISS_DEBUG */
2449 c->vaddr = remap_pci_mem(c->paddr, 200);
2450
2451 /* Wait for the board to become ready. (PCI hotplug needs this.)
2452 * We poll for up to 120 secs, once per 100ms. */
2453 for (i=0; i < 1200; i++) {
2454 scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
2455 if (scratchpad == CCISS_FIRMWARE_READY)
2456 break;
2457 set_current_state(TASK_INTERRUPTIBLE);
2458 schedule_timeout(HZ / 10); /* wait 100ms */
2459 }
2460 if (scratchpad != CCISS_FIRMWARE_READY) {
2461 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
2462 return -1;
2463 }
2464
2465 /* get the address index number */
2466 cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
2467 cfg_base_addr &= (__u32) 0x0000ffff;
2468#ifdef CCISS_DEBUG
2469 printk("cfg base address = %x\n", cfg_base_addr);
2470#endif /* CCISS_DEBUG */
2471 cfg_base_addr_index =
2472 find_PCI_BAR_index(pdev, cfg_base_addr);
2473#ifdef CCISS_DEBUG
2474 printk("cfg base address index = %x\n", cfg_base_addr_index);
2475#endif /* CCISS_DEBUG */
2476 if (cfg_base_addr_index == -1) {
2477 printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
2478 release_io_mem(c);
2479 return -1;
2480 }
2481
2482 cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
2483#ifdef CCISS_DEBUG
2484 printk("cfg offset = %x\n", cfg_offset);
2485#endif /* CCISS_DEBUG */
2486 c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
2487 cfg_base_addr_index) + cfg_offset,
2488 sizeof(CfgTable_struct));
2489 c->board_id = board_id;
2490
2491#ifdef CCISS_DEBUG
2492 print_cfg_table(c->cfgtable);
2493#endif /* CCISS_DEBUG */
2494
2495 for(i=0; i<NR_PRODUCTS; i++) {
2496 if (board_id == products[i].board_id) {
2497 c->product_name = products[i].product_name;
2498 c->access = *(products[i].access);
2499 break;
2500 }
2501 }
2502 if (i == NR_PRODUCTS) {
2503 printk(KERN_WARNING "cciss: Sorry, I don't know how"
2504 " to access the Smart Array controller %08lx\n",
2505 (unsigned long)board_id);
2506 return -1;
2507 }
2508 if ( (readb(&c->cfgtable->Signature[0]) != 'C') ||
2509 (readb(&c->cfgtable->Signature[1]) != 'I') ||
2510 (readb(&c->cfgtable->Signature[2]) != 'S') ||
2511 (readb(&c->cfgtable->Signature[3]) != 'S') )
2512 {
2513 printk("Does not appear to be a valid CISS config table\n");
2514 return -1;
2515 }
2516
2517#ifdef CONFIG_X86
2518{
2519 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
2520 __u32 prefetch;
2521 prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
2522 prefetch |= 0x100;
2523 writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
2524}
2525#endif
2526
2527#ifdef CCISS_DEBUG
2528 printk("Trying to put board into Simple mode\n");
2529#endif /* CCISS_DEBUG */
2530 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
2531 /* Update the field, and then ring the doorbell */
2532 writel( CFGTBL_Trans_Simple,
2533 &(c->cfgtable->HostWrite.TransportRequest));
2534 writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
2535
2536 /* under certain very rare conditions, this can take awhile.
2537 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
2538 * as we enter this code.) */
2539 for(i=0;i<MAX_CONFIG_WAIT;i++) {
2540 if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
2541 break;
2542 /* delay and try again */
2543 set_current_state(TASK_INTERRUPTIBLE);
2544 schedule_timeout(10);
2545 }
2546
2547#ifdef CCISS_DEBUG
2548 printk(KERN_DEBUG "I counter got to %d %x\n", i, readl(c->vaddr + SA5_DOORBELL));
2549#endif /* CCISS_DEBUG */
2550#ifdef CCISS_DEBUG
2551 print_cfg_table(c->cfgtable);
2552#endif /* CCISS_DEBUG */
2553
2554 if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
2555 {
2556 printk(KERN_WARNING "cciss: unable to get board into"
2557 " simple mode\n");
2558 return -1;
2559 }
2560 return 0;
2561
2562}
2563
2564/*
2565 * Gets information about the local volumes attached to the controller.
2566 */
2567static void cciss_getgeometry(int cntl_num)
2568{
2569 ReportLunData_struct *ld_buff;
2570 ReadCapdata_struct *size_buff;
2571 InquiryData_struct *inq_buff;
2572 int return_code;
2573 int i;
2574 int listlength = 0;
2575 __u32 lunid = 0;
2576 int block_size;
2577 int total_size;
2578
2579 ld_buff = kmalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
2580 if (ld_buff == NULL)
2581 {
2582 printk(KERN_ERR "cciss: out of memory\n");
2583 return;
2584 }
2585 memset(ld_buff, 0, sizeof(ReportLunData_struct));
2586 size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
2587 if (size_buff == NULL)
2588 {
2589 printk(KERN_ERR "cciss: out of memory\n");
2590 kfree(ld_buff);
2591 return;
2592 }
2593 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
2594 if (inq_buff == NULL)
2595 {
2596 printk(KERN_ERR "cciss: out of memory\n");
2597 kfree(ld_buff);
2598 kfree(size_buff);
2599 return;
2600 }
2601 /* Get the firmware version */
2602 return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff,
2603 sizeof(InquiryData_struct), 0, 0 ,0, NULL, TYPE_CMD);
2604 if (return_code == IO_OK)
2605 {
2606 hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
2607 hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
2608 hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
2609 hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
2610 } else /* send command failed */
2611 {
2612 printk(KERN_WARNING "cciss: unable to determine firmware"
2613 " version of controller\n");
2614 }
2615 /* Get the number of logical volumes */
2616 return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff,
2617 sizeof(ReportLunData_struct), 0, 0, 0, NULL, TYPE_CMD);
2618
2619 if( return_code == IO_OK)
2620 {
2621#ifdef CCISS_DEBUG
2622 printk("LUN Data\n--------------------------\n");
2623#endif /* CCISS_DEBUG */
2624
2625 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
2626 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
2627 listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;
2628 listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
2629 } else /* reading number of logical volumes failed */
2630 {
2631 printk(KERN_WARNING "cciss: report logical volume"
2632 " command failed\n");
2633 listlength = 0;
2634 }
2635 hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
2636 if (hba[cntl_num]->num_luns > CISS_MAX_LUN)
2637 {
2638 printk(KERN_ERR "ciss: only %d number of logical volumes supported\n",
2639 CISS_MAX_LUN);
2640 hba[cntl_num]->num_luns = CISS_MAX_LUN;
2641 }
2642#ifdef CCISS_DEBUG
2643 printk(KERN_DEBUG "Length = %x %x %x %x = %d\n", ld_buff->LUNListLength[0],
2644 ld_buff->LUNListLength[1], ld_buff->LUNListLength[2],
2645 ld_buff->LUNListLength[3], hba[cntl_num]->num_luns);
2646#endif /* CCISS_DEBUG */
2647
2648 hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns-1;
2649 for(i=0; i< hba[cntl_num]->num_luns; i++)
2650 {
2651
2652 lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3])) << 24;
2653 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2])) << 16;
2654 lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1])) << 8;
2655 lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
2656
2657 hba[cntl_num]->drv[i].LunID = lunid;
2658
2659
2660#ifdef CCISS_DEBUG
2661 printk(KERN_DEBUG "LUN[%d]: %x %x %x %x = %x\n", i,
2662 ld_buff->LUN[i][0], ld_buff->LUN[i][1],ld_buff->LUN[i][2],
2663 ld_buff->LUN[i][3], hba[cntl_num]->drv[i].LunID);
2664#endif /* CCISS_DEBUG */
2665 cciss_read_capacity(cntl_num, i, size_buff, 0,
2666 &total_size, &block_size);
2667 cciss_geometry_inquiry(cntl_num, i, 0, total_size, block_size,
2668 inq_buff, &hba[cntl_num]->drv[i]);
2669 }
2670 kfree(ld_buff);
2671 kfree(size_buff);
2672 kfree(inq_buff);
2673}
2674
2675/* Function to find the first free pointer into our hba[] array */
2676/* Returns -1 if no free entries are left. */
2677static int alloc_cciss_hba(void)
2678{
2679 struct gendisk *disk[NWD];
2680 int i, n;
2681 for (n = 0; n < NWD; n++) {
2682 disk[n] = alloc_disk(1 << NWD_SHIFT);
2683 if (!disk[n])
2684 goto out;
2685 }
2686
2687 for(i=0; i< MAX_CTLR; i++) {
2688 if (!hba[i]) {
2689 ctlr_info_t *p;
2690 p = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
2691 if (!p)
2692 goto Enomem;
2693 memset(p, 0, sizeof(ctlr_info_t));
2694 for (n = 0; n < NWD; n++)
2695 p->gendisk[n] = disk[n];
2696 hba[i] = p;
2697 return i;
2698 }
2699 }
2700 printk(KERN_WARNING "cciss: This driver supports a maximum"
2701 " of %d controllers.\n", MAX_CTLR);
2702 goto out;
2703Enomem:
2704 printk(KERN_ERR "cciss: out of memory.\n");
2705out:
2706 while (n--)
2707 put_disk(disk[n]);
2708 return -1;
2709}
2710
2711static void free_hba(int i)
2712{
2713 ctlr_info_t *p = hba[i];
2714 int n;
2715
2716 hba[i] = NULL;
2717 for (n = 0; n < NWD; n++)
2718 put_disk(p->gendisk[n]);
2719 kfree(p);
2720}
2721
2722/*
2723 * This is it. Find all the controllers and register them. I really hate
2724 * stealing all these major device numbers.
2725 * returns the number of block devices registered.
2726 */
2727static int __devinit cciss_init_one(struct pci_dev *pdev,
2728 const struct pci_device_id *ent)
2729{
2730 request_queue_t *q;
2731 int i;
2732 int j;
2733 int rc;
2734
2735 printk(KERN_DEBUG "cciss: Device 0x%x has been found at"
2736 " bus %d dev %d func %d\n",
2737 pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
2738 PCI_FUNC(pdev->devfn));
2739 i = alloc_cciss_hba();
2740 if(i < 0)
2741 return (-1);
2742 if (cciss_pci_init(hba[i], pdev) != 0)
2743 goto clean1;
2744
2745 sprintf(hba[i]->devname, "cciss%d", i);
2746 hba[i]->ctlr = i;
2747 hba[i]->pdev = pdev;
2748
2749 /* configure PCI DMA stuff */
2750 if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL))
2751 printk("cciss: using DAC cycles\n");
2752 else if (!pci_set_dma_mask(pdev, 0xffffffff))
2753 printk("cciss: not using DAC cycles\n");
2754 else {
2755 printk("cciss: no suitable DMA available\n");
2756 goto clean1;
2757 }
2758
2759 /*
2760 * register with the major number, or get a dynamic major number
2761 * by passing 0 as argument. This is done for greater than
2762 * 8 controller support.
2763 */
2764 if (i < MAX_CTLR_ORIG)
2765 hba[i]->major = MAJOR_NR + i;
2766 rc = register_blkdev(hba[i]->major, hba[i]->devname);
2767 if(rc == -EBUSY || rc == -EINVAL) {
2768 printk(KERN_ERR
2769 "cciss: Unable to get major number %d for %s "
2770 "on hba %d\n", hba[i]->major, hba[i]->devname, i);
2771 goto clean1;
2772 }
2773 else {
2774 if (i >= MAX_CTLR_ORIG)
2775 hba[i]->major = rc;
2776 }
2777
2778 /* make sure the board interrupts are off */
2779 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
2780 if( request_irq(hba[i]->intr, do_cciss_intr,
2781 SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM,
2782 hba[i]->devname, hba[i])) {
2783 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
2784 hba[i]->intr, hba[i]->devname);
2785 goto clean2;
2786 }
2787 hba[i]->cmd_pool_bits = kmalloc(((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long), GFP_KERNEL);
2788 hba[i]->cmd_pool = (CommandList_struct *)pci_alloc_consistent(
2789 hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
2790 &(hba[i]->cmd_pool_dhandle));
2791 hba[i]->errinfo_pool = (ErrorInfo_struct *)pci_alloc_consistent(
2792 hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
2793 &(hba[i]->errinfo_pool_dhandle));
2794 if((hba[i]->cmd_pool_bits == NULL)
2795 || (hba[i]->cmd_pool == NULL)
2796 || (hba[i]->errinfo_pool == NULL)) {
2797 printk( KERN_ERR "cciss: out of memory");
2798 goto clean4;
2799 }
2800
2801 spin_lock_init(&hba[i]->lock);
2802 q = blk_init_queue(do_cciss_request, &hba[i]->lock);
2803 if (!q)
2804 goto clean4;
2805
2806 q->backing_dev_info.ra_pages = READ_AHEAD;
2807 hba[i]->queue = q;
2808 q->queuedata = hba[i];
2809
2810 /* Initialize the pdev driver private data.
2811 have it point to hba[i]. */
2812 pci_set_drvdata(pdev, hba[i]);
2813 /* command and error info recs zeroed out before
2814 they are used */
2815 memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long));
2816
2817#ifdef CCISS_DEBUG
2818 printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n",i);
2819#endif /* CCISS_DEBUG */
2820
2821 cciss_getgeometry(i);
2822
2823 cciss_scsi_setup(i);
2824
2825 /* Turn the interrupts on so we can service requests */
2826 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
2827
2828 cciss_procinit(i);
2829
2830 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
2831
2832 /* This is a hardware imposed limit. */
2833 blk_queue_max_hw_segments(q, MAXSGENTRIES);
2834
2835 /* This is a limit in the driver and could be eliminated. */
2836 blk_queue_max_phys_segments(q, MAXSGENTRIES);
2837
2838 blk_queue_max_sectors(q, 512);
2839
2840
2841 for(j=0; j<NWD; j++) {
2842 drive_info_struct *drv = &(hba[i]->drv[j]);
2843 struct gendisk *disk = hba[i]->gendisk[j];
2844
2845 sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
2846 sprintf(disk->devfs_name, "cciss/host%d/target%d", i, j);
2847 disk->major = hba[i]->major;
2848 disk->first_minor = j << NWD_SHIFT;
2849 disk->fops = &cciss_fops;
2850 disk->queue = hba[i]->queue;
2851 disk->private_data = drv;
2852 /* we must register the controller even if no disks exist */
2853 /* this is for the online array utilities */
2854 if(!drv->heads && j)
2855 continue;
2856 blk_queue_hardsect_size(hba[i]->queue, drv->block_size);
2857 set_capacity(disk, drv->nr_blocks);
2858 add_disk(disk);
2859 }
2860 return(1);
2861
2862clean4:
2863 if(hba[i]->cmd_pool_bits)
2864 kfree(hba[i]->cmd_pool_bits);
2865 if(hba[i]->cmd_pool)
2866 pci_free_consistent(hba[i]->pdev,
2867 NR_CMDS * sizeof(CommandList_struct),
2868 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
2869 if(hba[i]->errinfo_pool)
2870 pci_free_consistent(hba[i]->pdev,
2871 NR_CMDS * sizeof( ErrorInfo_struct),
2872 hba[i]->errinfo_pool,
2873 hba[i]->errinfo_pool_dhandle);
2874 free_irq(hba[i]->intr, hba[i]);
2875clean2:
2876 unregister_blkdev(hba[i]->major, hba[i]->devname);
2877clean1:
2878 release_io_mem(hba[i]);
2879 free_hba(i);
2880 return(-1);
2881}
2882
2883static void __devexit cciss_remove_one (struct pci_dev *pdev)
2884{
2885 ctlr_info_t *tmp_ptr;
2886 int i, j;
2887 char flush_buf[4];
2888 int return_code;
2889
2890 if (pci_get_drvdata(pdev) == NULL)
2891 {
2892 printk( KERN_ERR "cciss: Unable to remove device \n");
2893 return;
2894 }
2895 tmp_ptr = pci_get_drvdata(pdev);
2896 i = tmp_ptr->ctlr;
2897 if (hba[i] == NULL)
2898 {
2899 printk(KERN_ERR "cciss: device appears to "
2900 "already be removed \n");
2901 return;
2902 }
2903 /* Turn board interrupts off and send the flush cache command */
2904 /* sendcmd will turn off interrupt, and send the flush...
2905 * To write all data in the battery backed cache to disks */
2906 memset(flush_buf, 0, 4);
2907 return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
2908 TYPE_CMD);
2909 if(return_code != IO_OK)
2910 {
2911 printk(KERN_WARNING "Error Flushing cache on controller %d\n",
2912 i);
2913 }
2914 free_irq(hba[i]->intr, hba[i]);
2915 pci_set_drvdata(pdev, NULL);
2916 iounmap(hba[i]->vaddr);
2917 cciss_unregister_scsi(i); /* unhook from SCSI subsystem */
2918 unregister_blkdev(hba[i]->major, hba[i]->devname);
2919 remove_proc_entry(hba[i]->devname, proc_cciss);
2920
2921 /* remove it from the disk list */
2922 for (j = 0; j < NWD; j++) {
2923 struct gendisk *disk = hba[i]->gendisk[j];
2924 if (disk->flags & GENHD_FL_UP)
2925 del_gendisk(disk);
2926 }
2927
2928 blk_cleanup_queue(hba[i]->queue);
2929 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
2930 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
2931 pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
2932 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
2933 kfree(hba[i]->cmd_pool_bits);
2934 release_io_mem(hba[i]);
2935 free_hba(i);
2936}
2937
2938static struct pci_driver cciss_pci_driver = {
2939 .name = "cciss",
2940 .probe = cciss_init_one,
2941 .remove = __devexit_p(cciss_remove_one),
2942 .id_table = cciss_pci_device_id, /* id_table */
2943};
2944
2945/*
2946 * This is it. Register the PCI driver information for the cards we control
2947 * the OS will call our registered routines when it finds one of our cards.
2948 */
2949static int __init cciss_init(void)
2950{
2951 printk(KERN_INFO DRIVER_NAME "\n");
2952
2953 /* Register for our PCI devices */
2954 return pci_module_init(&cciss_pci_driver);
2955}
2956
2957static void __exit cciss_cleanup(void)
2958{
2959 int i;
2960
2961 pci_unregister_driver(&cciss_pci_driver);
2962 /* double check that all controller entrys have been removed */
2963 for (i=0; i< MAX_CTLR; i++)
2964 {
2965 if (hba[i] != NULL)
2966 {
2967 printk(KERN_WARNING "cciss: had to remove"
2968 " controller %d\n", i);
2969 cciss_remove_one(hba[i]->pdev);
2970 }
2971 }
2972 remove_proc_entry("cciss", proc_root_driver);
2973}
2974
2975module_init(cciss_init);
2976module_exit(cciss_cleanup);
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
new file mode 100644
index 000000000000..8fb19206eddb
--- /dev/null
+++ b/drivers/block/cciss.h
@@ -0,0 +1,266 @@
1#ifndef CCISS_H
2#define CCISS_H
3
4#include <linux/genhd.h>
5
6#include "cciss_cmd.h"
7
8
9#define NWD 16
10#define NWD_SHIFT 4
11#define MAX_PART (1 << NWD_SHIFT)
12
13#define IO_OK 0
14#define IO_ERROR 1
15
16#define MAJOR_NR COMPAQ_CISS_MAJOR
17
18struct ctlr_info;
19typedef struct ctlr_info ctlr_info_t;
20
21struct access_method {
22 void (*submit_command)(ctlr_info_t *h, CommandList_struct *c);
23 void (*set_intr_mask)(ctlr_info_t *h, unsigned long val);
24 unsigned long (*fifo_full)(ctlr_info_t *h);
25 unsigned long (*intr_pending)(ctlr_info_t *h);
26 unsigned long (*command_completed)(ctlr_info_t *h);
27};
28typedef struct _drive_info_struct
29{
30 __u32 LunID;
31 int usage_count;
32 sector_t nr_blocks;
33 int block_size;
34 int heads;
35 int sectors;
36 int cylinders;
37 int raid_level;
38} drive_info_struct;
39
40struct ctlr_info
41{
42 int ctlr;
43 char devname[8];
44 char *product_name;
45 char firm_ver[4]; // Firmware version
46 struct pci_dev *pdev;
47 __u32 board_id;
48 void __iomem *vaddr;
49 unsigned long paddr;
50 unsigned long io_mem_addr;
51 unsigned long io_mem_length;
52 CfgTable_struct __iomem *cfgtable;
53 unsigned int intr;
54 int interrupts_enabled;
55 int major;
56 int max_commands;
57 int commands_outstanding;
58 int max_outstanding; /* Debug */
59 int num_luns;
60 int highest_lun;
61 int usage_count; /* number of opens all all minor devices */
62
63 // information about each logical volume
64 drive_info_struct drv[CISS_MAX_LUN];
65
66 struct access_method access;
67
68 /* queue and queue Info */
69 CommandList_struct *reqQ;
70 CommandList_struct *cmpQ;
71 unsigned int Qdepth;
72 unsigned int maxQsinceinit;
73 unsigned int maxSG;
74 spinlock_t lock;
75 struct request_queue *queue;
76
77 //* pointers to command and error info pool */
78 CommandList_struct *cmd_pool;
79 dma_addr_t cmd_pool_dhandle;
80 ErrorInfo_struct *errinfo_pool;
81 dma_addr_t errinfo_pool_dhandle;
82 unsigned long *cmd_pool_bits;
83 int nr_allocs;
84 int nr_frees;
85 int busy_configuring;
86
87 /* This element holds the zero based queue number of the last
88 * queue to be started. It is used for fairness.
89 */
90 int next_to_run;
91
92 // Disk structures we need to pass back
93 struct gendisk *gendisk[NWD];
94#ifdef CONFIG_CISS_SCSI_TAPE
95 void *scsi_ctlr; /* ptr to structure containing scsi related stuff */
96#endif
97};
98
99/* Defining the diffent access_menthods */
100/*
101 * Memory mapped FIFO interface (SMART 53xx cards)
102 */
103#define SA5_DOORBELL 0x20
104#define SA5_REQUEST_PORT_OFFSET 0x40
105#define SA5_REPLY_INTR_MASK_OFFSET 0x34
106#define SA5_REPLY_PORT_OFFSET 0x44
107#define SA5_INTR_STATUS 0x30
108#define SA5_SCRATCHPAD_OFFSET 0xB0
109
110#define SA5_CTCFG_OFFSET 0xB4
111#define SA5_CTMEM_OFFSET 0xB8
112
113#define SA5_INTR_OFF 0x08
114#define SA5B_INTR_OFF 0x04
115#define SA5_INTR_PENDING 0x08
116#define SA5B_INTR_PENDING 0x04
117#define FIFO_EMPTY 0xffffffff
118#define CCISS_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
119
120#define CISS_ERROR_BIT 0x02
121
122#define CCISS_INTR_ON 1
123#define CCISS_INTR_OFF 0
124/*
125 Send the command to the hardware
126*/
127static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c)
128{
129#ifdef CCISS_DEBUG
130 printk("Sending %x - down to controller\n", c->busaddr );
131#endif /* CCISS_DEBUG */
132 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
133 h->commands_outstanding++;
134 if ( h->commands_outstanding > h->max_outstanding)
135 h->max_outstanding = h->commands_outstanding;
136}
137
138/*
139 * This card is the opposite of the other cards.
140 * 0 turns interrupts on...
141 * 0x08 turns them off...
142 */
143static void SA5_intr_mask(ctlr_info_t *h, unsigned long val)
144{
145 if (val)
146 { /* Turn interrupts on */
147 h->interrupts_enabled = 1;
148 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
149 } else /* Turn them off */
150 {
151 h->interrupts_enabled = 0;
152 writel( SA5_INTR_OFF,
153 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
154 }
155}
156/*
157 * This card is the opposite of the other cards.
158 * 0 turns interrupts on...
159 * 0x04 turns them off...
160 */
161static void SA5B_intr_mask(ctlr_info_t *h, unsigned long val)
162{
163 if (val)
164 { /* Turn interrupts on */
165 h->interrupts_enabled = 1;
166 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
167 } else /* Turn them off */
168 {
169 h->interrupts_enabled = 0;
170 writel( SA5B_INTR_OFF,
171 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
172 }
173}
174/*
175 * Returns true if fifo is full.
176 *
177 */
178static unsigned long SA5_fifo_full(ctlr_info_t *h)
179{
180 if( h->commands_outstanding >= h->max_commands)
181 return(1);
182 else
183 return(0);
184
185}
186/*
187 * returns value read from hardware.
188 * returns FIFO_EMPTY if there is nothing to read
189 */
190static unsigned long SA5_completed(ctlr_info_t *h)
191{
192 unsigned long register_value
193 = readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
194 if(register_value != FIFO_EMPTY)
195 {
196 h->commands_outstanding--;
197#ifdef CCISS_DEBUG
198 printk("cciss: Read %lx back from board\n", register_value);
199#endif /* CCISS_DEBUG */
200 }
201#ifdef CCISS_DEBUG
202 else
203 {
204 printk("cciss: FIFO Empty read\n");
205 }
206#endif
207 return ( register_value);
208
209}
210/*
211 * Returns true if an interrupt is pending..
212 */
213static unsigned long SA5_intr_pending(ctlr_info_t *h)
214{
215 unsigned long register_value =
216 readl(h->vaddr + SA5_INTR_STATUS);
217#ifdef CCISS_DEBUG
218 printk("cciss: intr_pending %lx\n", register_value);
219#endif /* CCISS_DEBUG */
220 if( register_value & SA5_INTR_PENDING)
221 return 1;
222 return 0 ;
223}
224
225/*
226 * Returns true if an interrupt is pending..
227 */
228static unsigned long SA5B_intr_pending(ctlr_info_t *h)
229{
230 unsigned long register_value =
231 readl(h->vaddr + SA5_INTR_STATUS);
232#ifdef CCISS_DEBUG
233 printk("cciss: intr_pending %lx\n", register_value);
234#endif /* CCISS_DEBUG */
235 if( register_value & SA5B_INTR_PENDING)
236 return 1;
237 return 0 ;
238}
239
240
241static struct access_method SA5_access = {
242 SA5_submit_command,
243 SA5_intr_mask,
244 SA5_fifo_full,
245 SA5_intr_pending,
246 SA5_completed,
247};
248
249static struct access_method SA5B_access = {
250 SA5_submit_command,
251 SA5B_intr_mask,
252 SA5_fifo_full,
253 SA5B_intr_pending,
254 SA5_completed,
255};
256
257struct board_type {
258 __u32 board_id;
259 char *product_name;
260 struct access_method *access;
261};
262
263#define CCISS_LOCK(i) (hba[i]->queue->queue_lock)
264
265#endif /* CCISS_H */
266
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
new file mode 100644
index 000000000000..a88a88817623
--- /dev/null
+++ b/drivers/block/cciss_cmd.h
@@ -0,0 +1,271 @@
1#ifndef CCISS_CMD_H
2#define CCISS_CMD_H
3//###########################################################################
4//DEFINES
5//###########################################################################
6#define CISS_VERSION "1.00"
7
8//general boundary defintions
9#define SENSEINFOBYTES 32//note that this value may vary between host implementations
10#define MAXSGENTRIES 31
11#define MAXREPLYQS 256
12
13//Command Status value
14#define CMD_SUCCESS 0x0000
15#define CMD_TARGET_STATUS 0x0001
16#define CMD_DATA_UNDERRUN 0x0002
17#define CMD_DATA_OVERRUN 0x0003
18#define CMD_INVALID 0x0004
19#define CMD_PROTOCOL_ERR 0x0005
20#define CMD_HARDWARE_ERR 0x0006
21#define CMD_CONNECTION_LOST 0x0007
22#define CMD_ABORTED 0x0008
23#define CMD_ABORT_FAILED 0x0009
24#define CMD_UNSOLICITED_ABORT 0x000A
25#define CMD_TIMEOUT 0x000B
26#define CMD_UNABORTABLE 0x000C
27
28//transfer direction
29#define XFER_NONE 0x00
30#define XFER_WRITE 0x01
31#define XFER_READ 0x02
32#define XFER_RSVD 0x03
33
34//task attribute
35#define ATTR_UNTAGGED 0x00
36#define ATTR_SIMPLE 0x04
37#define ATTR_HEADOFQUEUE 0x05
38#define ATTR_ORDERED 0x06
39#define ATTR_ACA 0x07
40
41//cdb type
42#define TYPE_CMD 0x00
43#define TYPE_MSG 0x01
44
45//config space register offsets
46#define CFG_VENDORID 0x00
47#define CFG_DEVICEID 0x02
48#define CFG_I2OBAR 0x10
49#define CFG_MEM1BAR 0x14
50
51//i2o space register offsets
52#define I2O_IBDB_SET 0x20
53#define I2O_IBDB_CLEAR 0x70
54#define I2O_INT_STATUS 0x30
55#define I2O_INT_MASK 0x34
56#define I2O_IBPOST_Q 0x40
57#define I2O_OBPOST_Q 0x44
58
59//Configuration Table
60#define CFGTBL_ChangeReq 0x00000001l
61#define CFGTBL_AccCmds 0x00000001l
62
63#define CFGTBL_Trans_Simple 0x00000002l
64
65#define CFGTBL_BusType_Ultra2 0x00000001l
66#define CFGTBL_BusType_Ultra3 0x00000002l
67#define CFGTBL_BusType_Fibre1G 0x00000100l
68#define CFGTBL_BusType_Fibre2G 0x00000200l
69typedef struct _vals32
70{
71 __u32 lower;
72 __u32 upper;
73} vals32;
74
75typedef union _u64bit
76{
77 vals32 val32;
78 __u64 val;
79} u64bit;
80
81// Type defs used in the following structs
82#define BYTE __u8
83#define WORD __u16
84#define HWORD __u16
85#define DWORD __u32
86#define QWORD vals32
87
88//###########################################################################
89//STRUCTURES
90//###########################################################################
91#define CISS_MAX_LUN 16
92#define CISS_MAX_PHYS_LUN 1024
93// SCSI-3 Cmmands
94
95#pragma pack(1)
96
97#define CISS_INQUIRY 0x12
98//Date returned
99typedef struct _InquiryData_struct
100{
101 BYTE data_byte[36];
102} InquiryData_struct;
103
104#define CISS_REPORT_LOG 0xc2 /* Report Logical LUNs */
105#define CISS_REPORT_PHYS 0xc3 /* Report Physical LUNs */
106// Data returned
107typedef struct _ReportLUNdata_struct
108{
109 BYTE LUNListLength[4];
110 DWORD reserved;
111 BYTE LUN[CISS_MAX_LUN][8];
112} ReportLunData_struct;
113
114#define CCISS_READ_CAPACITY 0x25 /* Read Capacity */
115typedef struct _ReadCapdata_struct
116{
117 BYTE total_size[4]; // Total size in blocks
118 BYTE block_size[4]; // Size of blocks in bytes
119} ReadCapdata_struct;
120
121// 12 byte commands not implemented in firmware yet.
122// #define CCISS_READ 0xa8 // Read(12)
123// #define CCISS_WRITE 0xaa // Write(12)
124 #define CCISS_READ 0x28 // Read(10)
125 #define CCISS_WRITE 0x2a // Write(10)
126
127// BMIC commands
128#define BMIC_READ 0x26
129#define BMIC_WRITE 0x27
130#define BMIC_CACHE_FLUSH 0xc2
131#define CCISS_CACHE_FLUSH 0x01 //C2 was already being used by CCISS
132
133//Command List Structure
134typedef union _SCSI3Addr_struct {
135 struct {
136 BYTE Dev;
137 BYTE Bus:6;
138 BYTE Mode:2; // b00
139 } PeripDev;
140 struct {
141 BYTE DevLSB;
142 BYTE DevMSB:6;
143 BYTE Mode:2; // b01
144 } LogDev;
145 struct {
146 BYTE Dev:5;
147 BYTE Bus:3;
148 BYTE Targ:6;
149 BYTE Mode:2; // b10
150 } LogUnit;
151} SCSI3Addr_struct;
152
153typedef struct _PhysDevAddr_struct {
154 DWORD TargetId:24;
155 DWORD Bus:6;
156 DWORD Mode:2;
157 SCSI3Addr_struct Target[2]; //2 level target device addr
158} PhysDevAddr_struct;
159
160typedef struct _LogDevAddr_struct {
161 DWORD VolId:30;
162 DWORD Mode:2;
163 BYTE reserved[4];
164} LogDevAddr_struct;
165
166typedef union _LUNAddr_struct {
167 BYTE LunAddrBytes[8];
168 SCSI3Addr_struct SCSI3Lun[4];
169 PhysDevAddr_struct PhysDev;
170 LogDevAddr_struct LogDev;
171} LUNAddr_struct;
172
173typedef struct _CommandListHeader_struct {
174 BYTE ReplyQueue;
175 BYTE SGList;
176 HWORD SGTotal;
177 QWORD Tag;
178 LUNAddr_struct LUN;
179} CommandListHeader_struct;
180typedef struct _RequestBlock_struct {
181 BYTE CDBLen;
182 struct {
183 BYTE Type:3;
184 BYTE Attribute:3;
185 BYTE Direction:2;
186 } Type;
187 HWORD Timeout;
188 BYTE CDB[16];
189} RequestBlock_struct;
190typedef struct _ErrDescriptor_struct {
191 QWORD Addr;
192 DWORD Len;
193} ErrDescriptor_struct;
194typedef struct _SGDescriptor_struct {
195 QWORD Addr;
196 DWORD Len;
197 DWORD Ext;
198} SGDescriptor_struct;
199
200typedef union _MoreErrInfo_struct{
201 struct {
202 BYTE Reserved[3];
203 BYTE Type;
204 DWORD ErrorInfo;
205 }Common_Info;
206 struct{
207 BYTE Reserved[2];
208 BYTE offense_size;//size of offending entry
209 BYTE offense_num; //byte # of offense 0-base
210 DWORD offense_value;
211 }Invalid_Cmd;
212}MoreErrInfo_struct;
213typedef struct _ErrorInfo_struct {
214 BYTE ScsiStatus;
215 BYTE SenseLen;
216 HWORD CommandStatus;
217 DWORD ResidualCnt;
218 MoreErrInfo_struct MoreErrInfo;
219 BYTE SenseInfo[SENSEINFOBYTES];
220} ErrorInfo_struct;
221
222/* Command types */
223#define CMD_RWREQ 0x00
224#define CMD_IOCTL_PEND 0x01
225#define CMD_SCSI 0x03
226#define CMD_MSG_DONE 0x04
227#define CMD_MSG_TIMEOUT 0x05
228
229typedef struct _CommandList_struct {
230 CommandListHeader_struct Header;
231 RequestBlock_struct Request;
232 ErrDescriptor_struct ErrDesc;
233 SGDescriptor_struct SG[MAXSGENTRIES];
234 /* information associated with the command */
235 __u32 busaddr; /* physical address of this record */
236 ErrorInfo_struct * err_info; /* pointer to the allocated mem */
237 int ctlr;
238 int cmd_type;
239 struct _CommandList_struct *prev;
240 struct _CommandList_struct *next;
241 struct request * rq;
242 struct completion *waiting;
243 int retry_count;
244#ifdef CONFIG_CISS_SCSI_TAPE
245 void * scsi_cmd;
246#endif
247} CommandList_struct;
248
249//Configuration Table Structure
250typedef struct _HostWrite_struct {
251 DWORD TransportRequest;
252 DWORD Reserved;
253 DWORD CoalIntDelay;
254 DWORD CoalIntCount;
255} HostWrite_struct;
256
257typedef struct _CfgTable_struct {
258 BYTE Signature[4];
259 DWORD SpecValence;
260 DWORD TransportSupport;
261 DWORD TransportActive;
262 HostWrite_struct HostWrite;
263 DWORD CmdsOutMax;
264 DWORD BusTypes;
265 DWORD Reserved;
266 BYTE ServerName[16];
267 DWORD HeartBeat;
268 DWORD SCSI_Prefetch;
269} CfgTable_struct;
270#pragma pack()
271#endif // CCISS_CMD_H
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
new file mode 100644
index 000000000000..f16e3caed58a
--- /dev/null
+++ b/drivers/block/cciss_scsi.c
@@ -0,0 +1,1417 @@
1/*
2 * Disk Array driver for Compaq SA53xx Controllers, SCSI Tape module
3 * Copyright 2001 Compaq Computer Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 * Author: Stephen M. Cameron
22 */
23#ifdef CONFIG_CISS_SCSI_TAPE
24
25/* Here we have code to present the driver as a scsi driver
26 as it is simultaneously presented as a block driver. The
27 reason for doing this is to allow access to SCSI tape drives
28 through the array controller. Note in particular, neither
29 physical nor logical disks are presented through the scsi layer. */
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
35#include <asm/atomic.h>
36#include <linux/timer.h>
37#include <linux/completion.h>
38
39#include "cciss_scsi.h"
40
41/* some prototypes... */
42static int sendcmd(
43 __u8 cmd,
44 int ctlr,
45 void *buff,
46 size_t size,
47 unsigned int use_unit_num, /* 0: address the controller,
48 1: address logical volume log_unit,
49 2: address is in scsi3addr */
50 unsigned int log_unit,
51 __u8 page_code,
52 unsigned char *scsi3addr,
53 int cmd_type);
54
55
56static int cciss_scsi_proc_info(
57 struct Scsi_Host *sh,
58 char *buffer, /* data buffer */
59 char **start, /* where data in buffer starts */
60 off_t offset, /* offset from start of imaginary file */
61 int length, /* length of data in buffer */
62 int func); /* 0 == read, 1 == write */
63
64static int cciss_scsi_queue_command (struct scsi_cmnd *cmd,
65 void (* done)(struct scsi_cmnd *));
66
67static struct cciss_scsi_hba_t ccissscsi[MAX_CTLR] = {
68 { .name = "cciss0", .ndevices = 0 },
69 { .name = "cciss1", .ndevices = 0 },
70 { .name = "cciss2", .ndevices = 0 },
71 { .name = "cciss3", .ndevices = 0 },
72 { .name = "cciss4", .ndevices = 0 },
73 { .name = "cciss5", .ndevices = 0 },
74 { .name = "cciss6", .ndevices = 0 },
75 { .name = "cciss7", .ndevices = 0 },
76};
77
78static struct scsi_host_template cciss_driver_template = {
79 .module = THIS_MODULE,
80 .name = "cciss",
81 .proc_name = "cciss",
82 .proc_info = cciss_scsi_proc_info,
83 .queuecommand = cciss_scsi_queue_command,
84 .can_queue = SCSI_CCISS_CAN_QUEUE,
85 .this_id = 7,
86 .sg_tablesize = MAXSGENTRIES,
87 .cmd_per_lun = 1,
88 .use_clustering = DISABLE_CLUSTERING,
89};
90
91#pragma pack(1)
92struct cciss_scsi_cmd_stack_elem_t {
93 CommandList_struct cmd;
94 ErrorInfo_struct Err;
95 __u32 busaddr;
96};
97
98#pragma pack()
99
100#define CMD_STACK_SIZE (SCSI_CCISS_CAN_QUEUE * \
101 CCISS_MAX_SCSI_DEVS_PER_HBA + 2)
102 // plus two for init time usage
103
104#pragma pack(1)
105struct cciss_scsi_cmd_stack_t {
106 struct cciss_scsi_cmd_stack_elem_t *pool;
107 struct cciss_scsi_cmd_stack_elem_t *elem[CMD_STACK_SIZE];
108 dma_addr_t cmd_pool_handle;
109 int top;
110};
111#pragma pack()
112
113struct cciss_scsi_adapter_data_t {
114 struct Scsi_Host *scsi_host;
115 struct cciss_scsi_cmd_stack_t cmd_stack;
116 int registered;
117 spinlock_t lock; // to protect ccissscsi[ctlr];
118};
119
120#define CPQ_TAPE_LOCK(ctlr, flags) spin_lock_irqsave( \
121 &(((struct cciss_scsi_adapter_data_t *) \
122 hba[ctlr]->scsi_ctlr)->lock), flags);
123#define CPQ_TAPE_UNLOCK(ctlr, flags) spin_unlock_irqrestore( \
124 &(((struct cciss_scsi_adapter_data_t *) \
125 hba[ctlr]->scsi_ctlr)->lock), flags);
126
127static CommandList_struct *
128scsi_cmd_alloc(ctlr_info_t *h)
129{
130 /* assume only one process in here at a time, locking done by caller. */
131 /* use CCISS_LOCK(ctlr) */
132 /* might be better to rewrite how we allocate scsi commands in a way that */
133 /* needs no locking at all. */
134
135 /* take the top memory chunk off the stack and return it, if any. */
136 struct cciss_scsi_cmd_stack_elem_t *c;
137 struct cciss_scsi_adapter_data_t *sa;
138 struct cciss_scsi_cmd_stack_t *stk;
139 u64bit temp64;
140
141 sa = (struct cciss_scsi_adapter_data_t *) h->scsi_ctlr;
142 stk = &sa->cmd_stack;
143
144 if (stk->top < 0)
145 return NULL;
146 c = stk->elem[stk->top];
147 /* memset(c, 0, sizeof(*c)); */
148 memset(&c->cmd, 0, sizeof(c->cmd));
149 memset(&c->Err, 0, sizeof(c->Err));
150 /* set physical addr of cmd and addr of scsi parameters */
151 c->cmd.busaddr = c->busaddr;
152 /* (__u32) (stk->cmd_pool_handle +
153 (sizeof(struct cciss_scsi_cmd_stack_elem_t)*stk->top)); */
154
155 temp64.val = (__u64) (c->busaddr + sizeof(CommandList_struct));
156 /* (__u64) (stk->cmd_pool_handle +
157 (sizeof(struct cciss_scsi_cmd_stack_elem_t)*stk->top) +
158 sizeof(CommandList_struct)); */
159 stk->top--;
160 c->cmd.ErrDesc.Addr.lower = temp64.val32.lower;
161 c->cmd.ErrDesc.Addr.upper = temp64.val32.upper;
162 c->cmd.ErrDesc.Len = sizeof(ErrorInfo_struct);
163
164 c->cmd.ctlr = h->ctlr;
165 c->cmd.err_info = &c->Err;
166
167 return (CommandList_struct *) c;
168}
169
170static void
171scsi_cmd_free(ctlr_info_t *h, CommandList_struct *cmd)
172{
173 /* assume only one process in here at a time, locking done by caller. */
174 /* use CCISS_LOCK(ctlr) */
175 /* drop the free memory chunk on top of the stack. */
176
177 struct cciss_scsi_adapter_data_t *sa;
178 struct cciss_scsi_cmd_stack_t *stk;
179
180 sa = (struct cciss_scsi_adapter_data_t *) h->scsi_ctlr;
181 stk = &sa->cmd_stack;
182 if (stk->top >= CMD_STACK_SIZE) {
183 printk("cciss: scsi_cmd_free called too many times.\n");
184 BUG();
185 }
186 stk->top++;
187 stk->elem[stk->top] = (struct cciss_scsi_cmd_stack_elem_t *) cmd;
188}
189
190static int
191scsi_cmd_stack_setup(int ctlr, struct cciss_scsi_adapter_data_t *sa)
192{
193 int i;
194 struct cciss_scsi_cmd_stack_t *stk;
195 size_t size;
196
197 stk = &sa->cmd_stack;
198 size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE;
199
200 // pci_alloc_consistent guarantees 32-bit DMA address will
201 // be used
202
203 stk->pool = (struct cciss_scsi_cmd_stack_elem_t *)
204 pci_alloc_consistent(hba[ctlr]->pdev, size, &stk->cmd_pool_handle);
205
206 if (stk->pool == NULL) {
207 printk("stk->pool is null\n");
208 return -1;
209 }
210
211 for (i=0; i<CMD_STACK_SIZE; i++) {
212 stk->elem[i] = &stk->pool[i];
213 stk->elem[i]->busaddr = (__u32) (stk->cmd_pool_handle +
214 (sizeof(struct cciss_scsi_cmd_stack_elem_t) * i));
215 }
216 stk->top = CMD_STACK_SIZE-1;
217 return 0;
218}
219
220static void
221scsi_cmd_stack_free(int ctlr)
222{
223 struct cciss_scsi_adapter_data_t *sa;
224 struct cciss_scsi_cmd_stack_t *stk;
225 size_t size;
226
227 sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr;
228 stk = &sa->cmd_stack;
229 if (stk->top != CMD_STACK_SIZE-1) {
230 printk( "cciss: %d scsi commands are still outstanding.\n",
231 CMD_STACK_SIZE - stk->top);
232 // BUG();
233 printk("WE HAVE A BUG HERE!!! stk=0x%p\n", stk);
234 }
235 size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE;
236
237 pci_free_consistent(hba[ctlr]->pdev, size, stk->pool, stk->cmd_pool_handle);
238 stk->pool = NULL;
239}
240
241/* scsi_device_types comes from scsi.h */
242#define DEVICETYPE(n) (n<0 || n>MAX_SCSI_DEVICE_CODE) ? \
243 "Unknown" : scsi_device_types[n]
244
245#if 0
246static int xmargin=8;
247static int amargin=60;
248
249static void
250print_bytes (unsigned char *c, int len, int hex, int ascii)
251{
252
253 int i;
254 unsigned char *x;
255
256 if (hex)
257 {
258 x = c;
259 for (i=0;i<len;i++)
260 {
261 if ((i % xmargin) == 0 && i>0) printk("\n");
262 if ((i % xmargin) == 0) printk("0x%04x:", i);
263 printk(" %02x", *x);
264 x++;
265 }
266 printk("\n");
267 }
268 if (ascii)
269 {
270 x = c;
271 for (i=0;i<len;i++)
272 {
273 if ((i % amargin) == 0 && i>0) printk("\n");
274 if ((i % amargin) == 0) printk("0x%04x:", i);
275 if (*x > 26 && *x < 128) printk("%c", *x);
276 else printk(".");
277 x++;
278 }
279 printk("\n");
280 }
281}
282
283static void
284print_cmd(CommandList_struct *cp)
285{
286 printk("queue:%d\n", cp->Header.ReplyQueue);
287 printk("sglist:%d\n", cp->Header.SGList);
288 printk("sgtot:%d\n", cp->Header.SGTotal);
289 printk("Tag:0x%08x/0x%08x\n", cp->Header.Tag.upper,
290 cp->Header.Tag.lower);
291 printk("LUN:0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
292 cp->Header.LUN.LunAddrBytes[0],
293 cp->Header.LUN.LunAddrBytes[1],
294 cp->Header.LUN.LunAddrBytes[2],
295 cp->Header.LUN.LunAddrBytes[3],
296 cp->Header.LUN.LunAddrBytes[4],
297 cp->Header.LUN.LunAddrBytes[5],
298 cp->Header.LUN.LunAddrBytes[6],
299 cp->Header.LUN.LunAddrBytes[7]);
300 printk("CDBLen:%d\n", cp->Request.CDBLen);
301 printk("Type:%d\n",cp->Request.Type.Type);
302 printk("Attr:%d\n",cp->Request.Type.Attribute);
303 printk(" Dir:%d\n",cp->Request.Type.Direction);
304 printk("Timeout:%d\n",cp->Request.Timeout);
305 printk( "CDB: %02x %02x %02x %02x %02x %02x %02x %02x"
306 " %02x %02x %02x %02x %02x %02x %02x %02x\n",
307 cp->Request.CDB[0], cp->Request.CDB[1],
308 cp->Request.CDB[2], cp->Request.CDB[3],
309 cp->Request.CDB[4], cp->Request.CDB[5],
310 cp->Request.CDB[6], cp->Request.CDB[7],
311 cp->Request.CDB[8], cp->Request.CDB[9],
312 cp->Request.CDB[10], cp->Request.CDB[11],
313 cp->Request.CDB[12], cp->Request.CDB[13],
314 cp->Request.CDB[14], cp->Request.CDB[15]),
315 printk("edesc.Addr: 0x%08x/0%08x, Len = %d\n",
316 cp->ErrDesc.Addr.upper, cp->ErrDesc.Addr.lower,
317 cp->ErrDesc.Len);
318 printk("sgs..........Errorinfo:\n");
319 printk("scsistatus:%d\n", cp->err_info->ScsiStatus);
320 printk("senselen:%d\n", cp->err_info->SenseLen);
321 printk("cmd status:%d\n", cp->err_info->CommandStatus);
322 printk("resid cnt:%d\n", cp->err_info->ResidualCnt);
323 printk("offense size:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_size);
324 printk("offense byte:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_num);
325 printk("offense value:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
326
327}
328
329#endif
330
331static int
332find_bus_target_lun(int ctlr, int *bus, int *target, int *lun)
333{
334 /* finds an unused bus, target, lun for a new device */
335 /* assumes hba[ctlr]->scsi_ctlr->lock is held */
336 int i, found=0;
337 unsigned char target_taken[CCISS_MAX_SCSI_DEVS_PER_HBA];
338
339 memset(&target_taken[0], 0, CCISS_MAX_SCSI_DEVS_PER_HBA);
340
341 target_taken[SELF_SCSI_ID] = 1;
342 for (i=0;i<ccissscsi[ctlr].ndevices;i++)
343 target_taken[ccissscsi[ctlr].dev[i].target] = 1;
344
345 for (i=0;i<CCISS_MAX_SCSI_DEVS_PER_HBA;i++) {
346 if (!target_taken[i]) {
347 *bus = 0; *target=i; *lun = 0; found=1;
348 break;
349 }
350 }
351 return (!found);
352}
353
354static int
355cciss_scsi_add_entry(int ctlr, int hostno,
356 unsigned char *scsi3addr, int devtype)
357{
358 /* assumes hba[ctlr]->scsi_ctlr->lock is held */
359 int n = ccissscsi[ctlr].ndevices;
360 struct cciss_scsi_dev_t *sd;
361
362 if (n >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
363 printk("cciss%d: Too many devices, "
364 "some will be inaccessible.\n", ctlr);
365 return -1;
366 }
367 sd = &ccissscsi[ctlr].dev[n];
368 if (find_bus_target_lun(ctlr, &sd->bus, &sd->target, &sd->lun) != 0)
369 return -1;
370 memcpy(&sd->scsi3addr[0], scsi3addr, 8);
371 sd->devtype = devtype;
372 ccissscsi[ctlr].ndevices++;
373
374 /* initially, (before registering with scsi layer) we don't
375 know our hostno and we don't want to print anything first
376 time anyway (the scsi layer's inquiries will show that info) */
377 if (hostno != -1)
378 printk("cciss%d: %s device c%db%dt%dl%d added.\n",
379 ctlr, DEVICETYPE(sd->devtype), hostno,
380 sd->bus, sd->target, sd->lun);
381 return 0;
382}
383
384static void
385cciss_scsi_remove_entry(int ctlr, int hostno, int entry)
386{
387 /* assumes hba[ctlr]->scsi_ctlr->lock is held */
388 int i;
389 struct cciss_scsi_dev_t sd;
390
391 if (entry < 0 || entry >= CCISS_MAX_SCSI_DEVS_PER_HBA) return;
392 sd = ccissscsi[ctlr].dev[entry];
393 for (i=entry;i<ccissscsi[ctlr].ndevices-1;i++)
394 ccissscsi[ctlr].dev[i] = ccissscsi[ctlr].dev[i+1];
395 ccissscsi[ctlr].ndevices--;
396 printk("cciss%d: %s device c%db%dt%dl%d removed.\n",
397 ctlr, DEVICETYPE(sd.devtype), hostno,
398 sd.bus, sd.target, sd.lun);
399}
400
401
402#define SCSI3ADDR_EQ(a,b) ( \
403 (a)[7] == (b)[7] && \
404 (a)[6] == (b)[6] && \
405 (a)[5] == (b)[5] && \
406 (a)[4] == (b)[4] && \
407 (a)[3] == (b)[3] && \
408 (a)[2] == (b)[2] && \
409 (a)[1] == (b)[1] && \
410 (a)[0] == (b)[0])
411
412static int
413adjust_cciss_scsi_table(int ctlr, int hostno,
414 struct cciss_scsi_dev_t sd[], int nsds)
415{
416 /* sd contains scsi3 addresses and devtypes, but
417 bus target and lun are not filled in. This funciton
418 takes what's in sd to be the current and adjusts
419 ccissscsi[] to be in line with what's in sd. */
420
421 int i,j, found, changes=0;
422 struct cciss_scsi_dev_t *csd;
423 unsigned long flags;
424
425 CPQ_TAPE_LOCK(ctlr, flags);
426
427 /* find any devices in ccissscsi[] that are not in
428 sd[] and remove them from ccissscsi[] */
429
430 i = 0;
431 while(i<ccissscsi[ctlr].ndevices) {
432 csd = &ccissscsi[ctlr].dev[i];
433 found=0;
434 for (j=0;j<nsds;j++) {
435 if (SCSI3ADDR_EQ(sd[j].scsi3addr,
436 csd->scsi3addr)) {
437 if (sd[j].devtype == csd->devtype)
438 found=2;
439 else
440 found=1;
441 break;
442 }
443 }
444
445 if (found == 0) { /* device no longer present. */
446 changes++;
447 /* printk("cciss%d: %s device c%db%dt%dl%d removed.\n",
448 ctlr, DEVICETYPE(csd->devtype), hostno,
449 csd->bus, csd->target, csd->lun); */
450 cciss_scsi_remove_entry(ctlr, hostno, i);
451 /* note, i not incremented */
452 }
453 else if (found == 1) { /* device is different kind */
454 changes++;
455 printk("cciss%d: device c%db%dt%dl%d type changed "
456 "(device type now %s).\n",
457 ctlr, hostno, csd->bus, csd->target, csd->lun,
458 DEVICETYPE(csd->devtype));
459 csd->devtype = sd[j].devtype;
460 i++; /* so just move along. */
461 } else /* device is same as it ever was, */
462 i++; /* so just move along. */
463 }
464
465 /* Now, make sure every device listed in sd[] is also
466 listed in ccissscsi[], adding them if they aren't found */
467
468 for (i=0;i<nsds;i++) {
469 found=0;
470 for (j=0;j<ccissscsi[ctlr].ndevices;j++) {
471 csd = &ccissscsi[ctlr].dev[j];
472 if (SCSI3ADDR_EQ(sd[i].scsi3addr,
473 csd->scsi3addr)) {
474 if (sd[i].devtype == csd->devtype)
475 found=2; /* found device */
476 else
477 found=1; /* found a bug. */
478 break;
479 }
480 }
481 if (!found) {
482 changes++;
483 if (cciss_scsi_add_entry(ctlr, hostno,
484 &sd[i].scsi3addr[0], sd[i].devtype) != 0)
485 break;
486 } else if (found == 1) {
487 /* should never happen... */
488 changes++;
489 printk("cciss%d: device unexpectedly changed type\n",
490 ctlr);
491 /* but if it does happen, we just ignore that device */
492 }
493 }
494 CPQ_TAPE_UNLOCK(ctlr, flags);
495
496 if (!changes)
497 printk("cciss%d: No device changes detected.\n", ctlr);
498
499 return 0;
500}
501
502static int
503lookup_scsi3addr(int ctlr, int bus, int target, int lun, char *scsi3addr)
504{
505 int i;
506 struct cciss_scsi_dev_t *sd;
507 unsigned long flags;
508
509 CPQ_TAPE_LOCK(ctlr, flags);
510 for (i=0;i<ccissscsi[ctlr].ndevices;i++) {
511 sd = &ccissscsi[ctlr].dev[i];
512 if (sd->bus == bus &&
513 sd->target == target &&
514 sd->lun == lun) {
515 memcpy(scsi3addr, &sd->scsi3addr[0], 8);
516 CPQ_TAPE_UNLOCK(ctlr, flags);
517 return 0;
518 }
519 }
520 CPQ_TAPE_UNLOCK(ctlr, flags);
521 return -1;
522}
523
524static void
525cciss_scsi_setup(int cntl_num)
526{
527 struct cciss_scsi_adapter_data_t * shba;
528
529 ccissscsi[cntl_num].ndevices = 0;
530 shba = (struct cciss_scsi_adapter_data_t *)
531 kmalloc(sizeof(*shba), GFP_KERNEL);
532 if (shba == NULL)
533 return;
534 shba->scsi_host = NULL;
535 spin_lock_init(&shba->lock);
536 shba->registered = 0;
537 if (scsi_cmd_stack_setup(cntl_num, shba) != 0) {
538 kfree(shba);
539 shba = NULL;
540 }
541 hba[cntl_num]->scsi_ctlr = (void *) shba;
542 return;
543}
544
545static void
546complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
547{
548 struct scsi_cmnd *cmd;
549 ctlr_info_t *ctlr;
550 u64bit addr64;
551 ErrorInfo_struct *ei;
552
553 ei = cp->err_info;
554
555 /* First, see if it was a message rather than a command */
556 if (cp->Request.Type.Type == TYPE_MSG) {
557 cp->cmd_type = CMD_MSG_DONE;
558 return;
559 }
560
561 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
562 ctlr = hba[cp->ctlr];
563
564 /* undo the DMA mappings */
565
566 if (cmd->use_sg) {
567 pci_unmap_sg(ctlr->pdev,
568 cmd->buffer, cmd->use_sg,
569 cmd->sc_data_direction);
570 }
571 else if (cmd->request_bufflen) {
572 addr64.val32.lower = cp->SG[0].Addr.lower;
573 addr64.val32.upper = cp->SG[0].Addr.upper;
574 pci_unmap_single(ctlr->pdev, (dma_addr_t) addr64.val,
575 cmd->request_bufflen,
576 cmd->sc_data_direction);
577 }
578
579 cmd->result = (DID_OK << 16); /* host byte */
580 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
581 /* cmd->result |= (GOOD < 1); */ /* status byte */
582
583 cmd->result |= (ei->ScsiStatus);
584 /* printk("Scsistatus is 0x%02x\n", ei->ScsiStatus); */
585
586 /* copy the sense data whether we need to or not. */
587
588 memcpy(cmd->sense_buffer, ei->SenseInfo,
589 ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
590 SCSI_SENSE_BUFFERSIZE :
591 ei->SenseLen);
592 cmd->resid = ei->ResidualCnt;
593
594 if(ei->CommandStatus != 0)
595 { /* an error has occurred */
596 switch(ei->CommandStatus)
597 {
598 case CMD_TARGET_STATUS:
599 /* Pass it up to the upper layers... */
600 if( ei->ScsiStatus)
601 {
602#if 0
603 printk(KERN_WARNING "cciss: cmd %p "
604 "has SCSI Status = %x\n",
605 cp,
606 ei->ScsiStatus);
607#endif
608 cmd->result |= (ei->ScsiStatus < 1);
609 }
610 else { /* scsi status is zero??? How??? */
611
612 /* Ordinarily, this case should never happen, but there is a bug
613 in some released firmware revisions that allows it to happen
614 if, for example, a 4100 backplane loses power and the tape
615 drive is in it. We assume that it's a fatal error of some
616 kind because we can't show that it wasn't. We will make it
617 look like selection timeout since that is the most common
618 reason for this to occur, and it's severe enough. */
619
620 cmd->result = DID_NO_CONNECT << 16;
621 }
622 break;
623 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
624 break;
625 case CMD_DATA_OVERRUN:
626 printk(KERN_WARNING "cciss: cp %p has"
627 " completed with data overrun "
628 "reported\n", cp);
629 break;
630 case CMD_INVALID: {
631 /* print_bytes(cp, sizeof(*cp), 1, 0);
632 print_cmd(cp); */
633 /* We get CMD_INVALID if you address a non-existent tape drive instead
634 of a selection timeout (no response). You will see this if you yank
635 out a tape drive, then try to access it. This is kind of a shame
636 because it means that any other CMD_INVALID (e.g. driver bug) will
637 get interpreted as a missing target. */
638 cmd->result = DID_NO_CONNECT << 16;
639 }
640 break;
641 case CMD_PROTOCOL_ERR:
642 printk(KERN_WARNING "cciss: cp %p has "
643 "protocol error \n", cp);
644 break;
645 case CMD_HARDWARE_ERR:
646 cmd->result = DID_ERROR << 16;
647 printk(KERN_WARNING "cciss: cp %p had "
648 " hardware error\n", cp);
649 break;
650 case CMD_CONNECTION_LOST:
651 cmd->result = DID_ERROR << 16;
652 printk(KERN_WARNING "cciss: cp %p had "
653 "connection lost\n", cp);
654 break;
655 case CMD_ABORTED:
656 cmd->result = DID_ABORT << 16;
657 printk(KERN_WARNING "cciss: cp %p was "
658 "aborted\n", cp);
659 break;
660 case CMD_ABORT_FAILED:
661 cmd->result = DID_ERROR << 16;
662 printk(KERN_WARNING "cciss: cp %p reports "
663 "abort failed\n", cp);
664 break;
665 case CMD_UNSOLICITED_ABORT:
666 cmd->result = DID_ABORT << 16;
667 printk(KERN_WARNING "cciss: cp %p aborted "
668 "do to an unsolicited abort\n", cp);
669 break;
670 case CMD_TIMEOUT:
671 cmd->result = DID_TIME_OUT << 16;
672 printk(KERN_WARNING "cciss: cp %p timedout\n",
673 cp);
674 break;
675 default:
676 cmd->result = DID_ERROR << 16;
677 printk(KERN_WARNING "cciss: cp %p returned "
678 "unknown status %x\n", cp,
679 ei->CommandStatus);
680 }
681 }
682 // printk("c:%p:c%db%dt%dl%d ", cmd, ctlr->ctlr, cmd->channel,
683 // cmd->target, cmd->lun);
684 cmd->scsi_done(cmd);
685 scsi_cmd_free(ctlr, cp);
686}
687
688static int
689cciss_scsi_detect(int ctlr)
690{
691 struct Scsi_Host *sh;
692 int error;
693
694 sh = scsi_host_alloc(&cciss_driver_template, sizeof(struct ctlr_info *));
695 if (sh == NULL)
696 goto fail;
697 sh->io_port = 0; // good enough? FIXME,
698 sh->n_io_port = 0; // I don't think we use these two...
699 sh->this_id = SELF_SCSI_ID;
700
701 ((struct cciss_scsi_adapter_data_t *)
702 hba[ctlr]->scsi_ctlr)->scsi_host = (void *) sh;
703 sh->hostdata[0] = (unsigned long) hba[ctlr];
704 sh->irq = hba[ctlr]->intr;
705 sh->unique_id = sh->irq;
706 error = scsi_add_host(sh, &hba[ctlr]->pdev->dev);
707 if (error)
708 goto fail_host_put;
709 scsi_scan_host(sh);
710 return 1;
711
712 fail_host_put:
713 scsi_host_put(sh);
714 fail:
715 return 0;
716}
717
718static void
719cciss_unmap_one(struct pci_dev *pdev,
720 CommandList_struct *cp,
721 size_t buflen,
722 int data_direction)
723{
724 u64bit addr64;
725
726 addr64.val32.lower = cp->SG[0].Addr.lower;
727 addr64.val32.upper = cp->SG[0].Addr.upper;
728 pci_unmap_single(pdev, (dma_addr_t) addr64.val, buflen, data_direction);
729}
730
731static void
732cciss_map_one(struct pci_dev *pdev,
733 CommandList_struct *cp,
734 unsigned char *buf,
735 size_t buflen,
736 int data_direction)
737{
738 __u64 addr64;
739
740 addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction);
741 cp->SG[0].Addr.lower =
742 (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
743 cp->SG[0].Addr.upper =
744 (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
745 cp->SG[0].Len = buflen;
746 cp->Header.SGList = (__u8) 1; /* no. SGs contig in this cmd */
747 cp->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */
748}
749
750static int
751cciss_scsi_do_simple_cmd(ctlr_info_t *c,
752 CommandList_struct *cp,
753 unsigned char *scsi3addr,
754 unsigned char *cdb,
755 unsigned char cdblen,
756 unsigned char *buf, int bufsize,
757 int direction)
758{
759 unsigned long flags;
760 DECLARE_COMPLETION(wait);
761
762 cp->cmd_type = CMD_IOCTL_PEND; // treat this like an ioctl
763 cp->scsi_cmd = NULL;
764 cp->Header.ReplyQueue = 0; // unused in simple mode
765 memcpy(&cp->Header.LUN, scsi3addr, sizeof(cp->Header.LUN));
766 cp->Header.Tag.lower = cp->busaddr; // Use k. address of cmd as tag
767 // Fill in the request block...
768
769 /* printk("Using scsi3addr 0x%02x%0x2%0x2%0x2%0x2%0x2%0x2%0x2\n",
770 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
771 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); */
772
773 memset(cp->Request.CDB, 0, sizeof(cp->Request.CDB));
774 memcpy(cp->Request.CDB, cdb, cdblen);
775 cp->Request.Timeout = 0;
776 cp->Request.CDBLen = cdblen;
777 cp->Request.Type.Type = TYPE_CMD;
778 cp->Request.Type.Attribute = ATTR_SIMPLE;
779 cp->Request.Type.Direction = direction;
780
781 /* Fill in the SG list and do dma mapping */
782 cciss_map_one(c->pdev, cp, (unsigned char *) buf,
783 bufsize, DMA_FROM_DEVICE);
784
785 cp->waiting = &wait;
786
787 /* Put the request on the tail of the request queue */
788 spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
789 addQ(&c->reqQ, cp);
790 c->Qdepth++;
791 start_io(c);
792 spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
793
794 wait_for_completion(&wait);
795
796 /* undo the dma mapping */
797 cciss_unmap_one(c->pdev, cp, bufsize, DMA_FROM_DEVICE);
798 return(0);
799}
800
801static void
802cciss_scsi_interpret_error(CommandList_struct *cp)
803{
804 ErrorInfo_struct *ei;
805
806 ei = cp->err_info;
807 switch(ei->CommandStatus)
808 {
809 case CMD_TARGET_STATUS:
810 printk(KERN_WARNING "cciss: cmd %p has "
811 "completed with errors\n", cp);
812 printk(KERN_WARNING "cciss: cmd %p "
813 "has SCSI Status = %x\n",
814 cp,
815 ei->ScsiStatus);
816 if (ei->ScsiStatus == 0)
817 printk(KERN_WARNING
818 "cciss:SCSI status is abnormally zero. "
819 "(probably indicates selection timeout "
820 "reported incorrectly due to a known "
821 "firmware bug, circa July, 2001.)\n");
822 break;
823 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
824 printk("UNDERRUN\n");
825 break;
826 case CMD_DATA_OVERRUN:
827 printk(KERN_WARNING "cciss: cp %p has"
828 " completed with data overrun "
829 "reported\n", cp);
830 break;
831 case CMD_INVALID: {
832 /* controller unfortunately reports SCSI passthru's */
833 /* to non-existent targets as invalid commands. */
834 printk(KERN_WARNING "cciss: cp %p is "
835 "reported invalid (probably means "
836 "target device no longer present)\n",
837 cp);
838 /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
839 print_cmd(cp); */
840 }
841 break;
842 case CMD_PROTOCOL_ERR:
843 printk(KERN_WARNING "cciss: cp %p has "
844 "protocol error \n", cp);
845 break;
846 case CMD_HARDWARE_ERR:
847 /* cmd->result = DID_ERROR << 16; */
848 printk(KERN_WARNING "cciss: cp %p had "
849 " hardware error\n", cp);
850 break;
851 case CMD_CONNECTION_LOST:
852 printk(KERN_WARNING "cciss: cp %p had "
853 "connection lost\n", cp);
854 break;
855 case CMD_ABORTED:
856 printk(KERN_WARNING "cciss: cp %p was "
857 "aborted\n", cp);
858 break;
859 case CMD_ABORT_FAILED:
860 printk(KERN_WARNING "cciss: cp %p reports "
861 "abort failed\n", cp);
862 break;
863 case CMD_UNSOLICITED_ABORT:
864 printk(KERN_WARNING "cciss: cp %p aborted "
865 "do to an unsolicited abort\n", cp);
866 break;
867 case CMD_TIMEOUT:
868 printk(KERN_WARNING "cciss: cp %p timedout\n",
869 cp);
870 break;
871 default:
872 printk(KERN_WARNING "cciss: cp %p returned "
873 "unknown status %x\n", cp,
874 ei->CommandStatus);
875 }
876}
877
878static int
879cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr,
880 InquiryData_struct *buf)
881{
882 int rc;
883 CommandList_struct *cp;
884 char cdb[6];
885 ErrorInfo_struct *ei;
886 unsigned long flags;
887
888 spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
889 cp = scsi_cmd_alloc(c);
890 spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
891
892 if (cp == NULL) { /* trouble... */
893 printk("cmd_alloc returned NULL!\n");
894 return -1;
895 }
896
897 ei = cp->err_info;
898
899 cdb[0] = CISS_INQUIRY;
900 cdb[1] = 0;
901 cdb[2] = 0;
902 cdb[3] = 0;
903 cdb[4] = sizeof(*buf) & 0xff;
904 cdb[5] = 0;
905 rc = cciss_scsi_do_simple_cmd(c, cp, scsi3addr, cdb,
906 6, (unsigned char *) buf,
907 sizeof(*buf), XFER_READ);
908
909 if (rc != 0) return rc; /* something went wrong */
910
911 if (ei->CommandStatus != 0 &&
912 ei->CommandStatus != CMD_DATA_UNDERRUN) {
913 cciss_scsi_interpret_error(cp);
914 rc = -1;
915 }
916 spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
917 scsi_cmd_free(c, cp);
918 spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
919 return rc;
920}
921
922static int
923cciss_scsi_do_report_phys_luns(ctlr_info_t *c,
924 ReportLunData_struct *buf, int bufsize)
925{
926 int rc;
927 CommandList_struct *cp;
928 unsigned char cdb[12];
929 unsigned char scsi3addr[8];
930 ErrorInfo_struct *ei;
931 unsigned long flags;
932
933 spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
934 cp = scsi_cmd_alloc(c);
935 spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
936 if (cp == NULL) { /* trouble... */
937 printk("cmd_alloc returned NULL!\n");
938 return -1;
939 }
940
941 memset(&scsi3addr[0], 0, 8); /* address the controller */
942 cdb[0] = CISS_REPORT_PHYS;
943 cdb[1] = 0;
944 cdb[2] = 0;
945 cdb[3] = 0;
946 cdb[4] = 0;
947 cdb[5] = 0;
948 cdb[6] = (bufsize >> 24) & 0xFF; //MSB
949 cdb[7] = (bufsize >> 16) & 0xFF;
950 cdb[8] = (bufsize >> 8) & 0xFF;
951 cdb[9] = bufsize & 0xFF;
952 cdb[10] = 0;
953 cdb[11] = 0;
954
955 rc = cciss_scsi_do_simple_cmd(c, cp, scsi3addr,
956 cdb, 12,
957 (unsigned char *) buf,
958 bufsize, XFER_READ);
959
960 if (rc != 0) return rc; /* something went wrong */
961
962 ei = cp->err_info;
963 if (ei->CommandStatus != 0 &&
964 ei->CommandStatus != CMD_DATA_UNDERRUN) {
965 cciss_scsi_interpret_error(cp);
966 rc = -1;
967 }
968 spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
969 scsi_cmd_free(c, cp);
970 spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
971 return rc;
972}
973
974static void
975cciss_update_non_disk_devices(int cntl_num, int hostno)
976{
977 /* the idea here is we could get notified from /proc
978 that some devices have changed, so we do a report
979 physical luns cmd, and adjust our list of devices
980 accordingly. (We can't rely on the scsi-mid layer just
981 doing inquiries, because the "busses" that the scsi
982 mid-layer probes are totally fabricated by this driver,
983 so new devices wouldn't show up.
984
985 the scsi3addr's of devices won't change so long as the
986 adapter is not reset. That means we can rescan and
987 tell which devices we already know about, vs. new
988 devices, vs. disappearing devices.
989
990 Also, if you yank out a tape drive, then put in a disk
991 in it's place, (say, a configured volume from another
992 array controller for instance) _don't_ poke this driver
993 (so it thinks it's still a tape, but _do_ poke the scsi
994 mid layer, so it does an inquiry... the scsi mid layer
995 will see the physical disk. This would be bad. Need to
996 think about how to prevent that. One idea would be to
997 snoop all scsi responses and if an inquiry repsonse comes
998 back that reports a disk, chuck it an return selection
999 timeout instead and adjust our table... Not sure i like
1000 that though.
1001
1002 */
1003
1004 ReportLunData_struct *ld_buff;
1005 InquiryData_struct *inq_buff;
1006 unsigned char scsi3addr[8];
1007 ctlr_info_t *c;
1008 __u32 num_luns=0;
1009 unsigned char *ch;
1010 /* unsigned char found[CCISS_MAX_SCSI_DEVS_PER_HBA]; */
1011 struct cciss_scsi_dev_t currentsd[CCISS_MAX_SCSI_DEVS_PER_HBA];
1012 int ncurrent=0;
1013 int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8;
1014 int i;
1015
1016 c = (ctlr_info_t *) hba[cntl_num];
1017 ld_buff = kmalloc(reportlunsize, GFP_KERNEL);
1018 if (ld_buff == NULL) {
1019 printk(KERN_ERR "cciss: out of memory\n");
1020 return;
1021 }
1022 memset(ld_buff, 0, reportlunsize);
1023 inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
1024 if (inq_buff == NULL) {
1025 printk(KERN_ERR "cciss: out of memory\n");
1026 kfree(ld_buff);
1027 return;
1028 }
1029
1030 if (cciss_scsi_do_report_phys_luns(c, ld_buff, reportlunsize) == 0) {
1031 ch = &ld_buff->LUNListLength[0];
1032 num_luns = ((ch[0]<<24) | (ch[1]<<16) | (ch[2]<<8) | ch[3]) / 8;
1033 if (num_luns > CISS_MAX_PHYS_LUN) {
1034 printk(KERN_WARNING
1035 "cciss: Maximum physical LUNs (%d) exceeded. "
1036 "%d LUNs ignored.\n", CISS_MAX_PHYS_LUN,
1037 num_luns - CISS_MAX_PHYS_LUN);
1038 num_luns = CISS_MAX_PHYS_LUN;
1039 }
1040 }
1041 else {
1042 printk(KERN_ERR "cciss: Report physical LUNs failed.\n");
1043 goto out;
1044 }
1045
1046
1047 /* adjust our table of devices */
1048 for(i=0; i<num_luns; i++)
1049 {
1050 int devtype;
1051
1052 /* for each physical lun, do an inquiry */
1053 if (ld_buff->LUN[i][3] & 0xC0) continue;
1054 memset(inq_buff, 0, sizeof(InquiryData_struct));
1055 memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8);
1056
1057 if (cciss_scsi_do_inquiry(hba[cntl_num],
1058 scsi3addr, inq_buff) != 0)
1059 {
1060 /* Inquiry failed (msg printed already) */
1061 devtype = 0; /* so we will skip this device. */
1062 } else /* what kind of device is this? */
1063 devtype = (inq_buff->data_byte[0] & 0x1f);
1064
1065 switch (devtype)
1066 {
1067 case 0x01: /* sequential access, (tape) */
1068 case 0x08: /* medium changer */
1069 if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
1070 printk(KERN_INFO "cciss%d: %s ignored, "
1071 "too many devices.\n", cntl_num,
1072 DEVICETYPE(devtype));
1073 break;
1074 }
1075 memcpy(&currentsd[ncurrent].scsi3addr[0],
1076 &scsi3addr[0], 8);
1077 currentsd[ncurrent].devtype = devtype;
1078 currentsd[ncurrent].bus = -1;
1079 currentsd[ncurrent].target = -1;
1080 currentsd[ncurrent].lun = -1;
1081 ncurrent++;
1082 break;
1083 default:
1084 break;
1085 }
1086 }
1087
1088 adjust_cciss_scsi_table(cntl_num, hostno, currentsd, ncurrent);
1089out:
1090 kfree(inq_buff);
1091 kfree(ld_buff);
1092 return;
1093}
1094
1095static int
1096is_keyword(char *ptr, int len, char *verb) // Thanks to ncr53c8xx.c
1097{
1098 int verb_len = strlen(verb);
1099 if (len >= verb_len && !memcmp(verb,ptr,verb_len))
1100 return verb_len;
1101 else
1102 return 0;
1103}
1104
1105static int
1106cciss_scsi_user_command(int ctlr, int hostno, char *buffer, int length)
1107{
1108 int arg_len;
1109
1110 if ((arg_len = is_keyword(buffer, length, "rescan")) != 0)
1111 cciss_update_non_disk_devices(ctlr, hostno);
1112 else
1113 return -EINVAL;
1114 return length;
1115}
1116
1117
1118static int
1119cciss_scsi_proc_info(struct Scsi_Host *sh,
1120 char *buffer, /* data buffer */
1121 char **start, /* where data in buffer starts */
1122 off_t offset, /* offset from start of imaginary file */
1123 int length, /* length of data in buffer */
1124 int func) /* 0 == read, 1 == write */
1125{
1126
1127 int buflen, datalen;
1128 ctlr_info_t *ci;
1129 int cntl_num;
1130
1131
1132 ci = (ctlr_info_t *) sh->hostdata[0];
1133 if (ci == NULL) /* This really shouldn't ever happen. */
1134 return -EINVAL;
1135
1136 cntl_num = ci->ctlr; /* Get our index into the hba[] array */
1137
1138 if (func == 0) { /* User is reading from /proc/scsi/ciss*?/?* */
1139 buflen = sprintf(buffer, "hostnum=%d\n", sh->host_no);
1140
1141 datalen = buflen - offset;
1142 if (datalen < 0) { /* they're reading past EOF. */
1143 datalen = 0;
1144 *start = buffer+buflen;
1145 } else
1146 *start = buffer + offset;
1147 return(datalen);
1148 } else /* User is writing to /proc/scsi/cciss*?/?* ... */
1149 return cciss_scsi_user_command(cntl_num, sh->host_no,
1150 buffer, length);
1151}
1152
1153/* cciss_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
1154 dma mapping and fills in the scatter gather entries of the
1155 cciss command, cp. */
1156
1157static void
1158cciss_scatter_gather(struct pci_dev *pdev,
1159 CommandList_struct *cp,
1160 struct scsi_cmnd *cmd)
1161{
1162 unsigned int use_sg, nsegs=0, len;
1163 struct scatterlist *scatter = (struct scatterlist *) cmd->buffer;
1164 __u64 addr64;
1165
1166 /* is it just one virtual address? */
1167 if (!cmd->use_sg) {
1168 if (cmd->request_bufflen) { /* anything to xfer? */
1169
1170 addr64 = (__u64) pci_map_single(pdev,
1171 cmd->request_buffer,
1172 cmd->request_bufflen,
1173 cmd->sc_data_direction);
1174
1175 cp->SG[0].Addr.lower =
1176 (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
1177 cp->SG[0].Addr.upper =
1178 (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
1179 cp->SG[0].Len = cmd->request_bufflen;
1180 nsegs=1;
1181 }
1182 } /* else, must be a list of virtual addresses.... */
1183 else if (cmd->use_sg <= MAXSGENTRIES) { /* not too many addrs? */
1184
1185 use_sg = pci_map_sg(pdev, cmd->buffer, cmd->use_sg,
1186 cmd->sc_data_direction);
1187
1188 for (nsegs=0; nsegs < use_sg; nsegs++) {
1189 addr64 = (__u64) sg_dma_address(&scatter[nsegs]);
1190 len = sg_dma_len(&scatter[nsegs]);
1191 cp->SG[nsegs].Addr.lower =
1192 (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
1193 cp->SG[nsegs].Addr.upper =
1194 (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
1195 cp->SG[nsegs].Len = len;
1196 cp->SG[nsegs].Ext = 0; // we are not chaining
1197 }
1198 } else BUG();
1199
1200 cp->Header.SGList = (__u8) nsegs; /* no. SGs contig in this cmd */
1201 cp->Header.SGTotal = (__u16) nsegs; /* total sgs in this cmd list */
1202 return;
1203}
1204
1205
1206static int
1207cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
1208{
1209 ctlr_info_t **c;
1210 int ctlr, rc;
1211 unsigned char scsi3addr[8];
1212 CommandList_struct *cp;
1213 unsigned long flags;
1214
1215 // Get the ptr to our adapter structure (hba[i]) out of cmd->host.
1216 // We violate cmd->host privacy here. (Is there another way?)
1217 c = (ctlr_info_t **) &cmd->device->host->hostdata[0];
1218 ctlr = (*c)->ctlr;
1219
1220 rc = lookup_scsi3addr(ctlr, cmd->device->channel, cmd->device->id,
1221 cmd->device->lun, scsi3addr);
1222 if (rc != 0) {
1223 /* the scsi nexus does not match any that we presented... */
1224 /* pretend to mid layer that we got selection timeout */
1225 cmd->result = DID_NO_CONNECT << 16;
1226 done(cmd);
1227 /* we might want to think about registering controller itself
1228 as a processor device on the bus so sg binds to it. */
1229 return 0;
1230 }
1231
1232 /* printk("cciss_queue_command, p=%p, cmd=0x%02x, c%db%dt%dl%d\n",
1233 cmd, cmd->cmnd[0], ctlr, cmd->channel, cmd->target, cmd->lun);*/
1234 // printk("q:%p:c%db%dt%dl%d ", cmd, ctlr, cmd->channel,
1235 // cmd->target, cmd->lun);
1236
1237 /* Ok, we have a reasonable scsi nexus, so send the cmd down, and
1238 see what the device thinks of it. */
1239
1240 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1241 cp = scsi_cmd_alloc(*c);
1242 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1243 if (cp == NULL) { /* trouble... */
1244 printk("scsi_cmd_alloc returned NULL!\n");
1245 /* FIXME: next 3 lines are -> BAD! <- */
1246 cmd->result = DID_NO_CONNECT << 16;
1247 done(cmd);
1248 return 0;
1249 }
1250
1251 // Fill in the command list header
1252
1253 cmd->scsi_done = done; // save this for use by completion code
1254
1255 // save cp in case we have to abort it
1256 cmd->host_scribble = (unsigned char *) cp;
1257
1258 cp->cmd_type = CMD_SCSI;
1259 cp->scsi_cmd = cmd;
1260 cp->Header.ReplyQueue = 0; // unused in simple mode
1261 memcpy(&cp->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
1262 cp->Header.Tag.lower = cp->busaddr; // Use k. address of cmd as tag
1263
1264 // Fill in the request block...
1265
1266 cp->Request.Timeout = 0;
1267 memset(cp->Request.CDB, 0, sizeof(cp->Request.CDB));
1268 if (cmd->cmd_len > sizeof(cp->Request.CDB)) BUG();
1269 cp->Request.CDBLen = cmd->cmd_len;
1270 memcpy(cp->Request.CDB, cmd->cmnd, cmd->cmd_len);
1271 cp->Request.Type.Type = TYPE_CMD;
1272 cp->Request.Type.Attribute = ATTR_SIMPLE;
1273 switch(cmd->sc_data_direction)
1274 {
1275 case DMA_TO_DEVICE: cp->Request.Type.Direction = XFER_WRITE; break;
1276 case DMA_FROM_DEVICE: cp->Request.Type.Direction = XFER_READ; break;
1277 case DMA_NONE: cp->Request.Type.Direction = XFER_NONE; break;
1278 case DMA_BIDIRECTIONAL:
1279 // This can happen if a buggy application does a scsi passthru
1280 // and sets both inlen and outlen to non-zero. ( see
1281 // ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
1282
1283 cp->Request.Type.Direction = XFER_RSVD;
1284 // This is technically wrong, and cciss controllers should
1285 // reject it with CMD_INVALID, which is the most correct
1286 // response, but non-fibre backends appear to let it
1287 // slide by, and give the same results as if this field
1288 // were set correctly. Either way is acceptable for
1289 // our purposes here.
1290
1291 break;
1292
1293 default:
1294 printk("cciss: unknown data direction: %d\n",
1295 cmd->sc_data_direction);
1296 BUG();
1297 break;
1298 }
1299
1300 cciss_scatter_gather((*c)->pdev, cp, cmd); // Fill the SG list
1301
1302 /* Put the request on the tail of the request queue */
1303
1304 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1305 addQ(&(*c)->reqQ, cp);
1306 (*c)->Qdepth++;
1307 start_io(*c);
1308 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1309
1310 /* the cmd'll come back via intr handler in complete_scsi_command() */
1311 return 0;
1312}
1313
1314static void
1315cciss_unregister_scsi(int ctlr)
1316{
1317 struct cciss_scsi_adapter_data_t *sa;
1318 struct cciss_scsi_cmd_stack_t *stk;
1319 unsigned long flags;
1320
1321 /* we are being forcibly unloaded, and may not refuse. */
1322
1323 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1324 sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr;
1325 stk = &sa->cmd_stack;
1326
1327 /* if we weren't ever actually registered, don't unregister */
1328 if (sa->registered) {
1329 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1330 scsi_remove_host(sa->scsi_host);
1331 scsi_host_put(sa->scsi_host);
1332 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1333 }
1334
1335 /* set scsi_host to NULL so our detect routine will
1336 find us on register */
1337 sa->scsi_host = NULL;
1338 scsi_cmd_stack_free(ctlr);
1339 kfree(sa);
1340 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1341}
1342
1343static int
1344cciss_register_scsi(int ctlr)
1345{
1346 unsigned long flags;
1347
1348 CPQ_TAPE_LOCK(ctlr, flags);
1349
1350 /* Since this is really a block driver, the SCSI core may not be
1351 initialized at init time, in which case, calling scsi_register_host
1352 would hang. Instead, we do it later, via /proc filesystem
1353 and rc scripts, when we know SCSI core is good to go. */
1354
1355 /* Only register if SCSI devices are detected. */
1356 if (ccissscsi[ctlr].ndevices != 0) {
1357 ((struct cciss_scsi_adapter_data_t *)
1358 hba[ctlr]->scsi_ctlr)->registered = 1;
1359 CPQ_TAPE_UNLOCK(ctlr, flags);
1360 return cciss_scsi_detect(ctlr);
1361 }
1362 CPQ_TAPE_UNLOCK(ctlr, flags);
1363 printk(KERN_INFO
1364 "cciss%d: No appropriate SCSI device detected, "
1365 "SCSI subsystem not engaged.\n", ctlr);
1366 return 0;
1367}
1368
1369static int
1370cciss_engage_scsi(int ctlr)
1371{
1372 struct cciss_scsi_adapter_data_t *sa;
1373 struct cciss_scsi_cmd_stack_t *stk;
1374 unsigned long flags;
1375
1376 spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
1377 sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr;
1378 stk = &sa->cmd_stack;
1379
1380 if (((struct cciss_scsi_adapter_data_t *)
1381 hba[ctlr]->scsi_ctlr)->registered) {
1382 printk("cciss%d: SCSI subsystem already engaged.\n", ctlr);
1383 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1384 return ENXIO;
1385 }
1386 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
1387 cciss_update_non_disk_devices(ctlr, -1);
1388 cciss_register_scsi(ctlr);
1389 return 0;
1390}
1391
1392static void
1393cciss_proc_tape_report(int ctlr, unsigned char *buffer, off_t *pos, off_t *len)
1394{
1395 unsigned long flags;
1396 int size;
1397
1398 *pos = *pos -1; *len = *len - 1; // cut off the last trailing newline
1399
1400 CPQ_TAPE_LOCK(ctlr, flags);
1401 size = sprintf(buffer + *len,
1402 " Sequential access devices: %d\n\n",
1403 ccissscsi[ctlr].ndevices);
1404 CPQ_TAPE_UNLOCK(ctlr, flags);
1405 *pos += size; *len += size;
1406}
1407
1408#else /* no CONFIG_CISS_SCSI_TAPE */
1409
1410/* If no tape support, then these become defined out of existence */
1411
1412#define cciss_scsi_setup(cntl_num)
1413#define cciss_unregister_scsi(ctlr)
1414#define cciss_register_scsi(ctlr)
1415#define cciss_proc_tape_report(ctlr, buffer, pos, len)
1416
1417#endif /* CONFIG_CISS_SCSI_TAPE */
diff --git a/drivers/block/cciss_scsi.h b/drivers/block/cciss_scsi.h
new file mode 100644
index 000000000000..5e7e06c07d6c
--- /dev/null
+++ b/drivers/block/cciss_scsi.h
@@ -0,0 +1,79 @@
1/*
2 * Disk Array driver for Compaq SA53xx Controllers, SCSI Tape module
3 * Copyright 2001 Compaq Computer Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 */
22#ifdef CONFIG_CISS_SCSI_TAPE
23#ifndef _CCISS_SCSI_H_
24#define _CCISS_SCSI_H_
25
26#include <scsi/scsicam.h> /* possibly irrelevant, since we don't show disks */
27
28 // the scsi id of the adapter...
29#define SELF_SCSI_ID 15
30 // 15 is somewhat arbitrary, since the scsi-2 bus
31 // that's presented by the driver to the OS is
32 // fabricated. The "real" scsi-3 bus the
33 // hardware presents is fabricated too.
34 // The actual, honest-to-goodness physical
35 // bus that the devices are attached to is not
36 // addressible natively, and may in fact turn
37 // out to be not scsi at all.
38
39#define SCSI_CCISS_CAN_QUEUE 2
40
41/*
42
43Note, cmd_per_lun could give us some trouble, so I'm setting it very low.
44Likewise, SCSI_CCISS_CAN_QUEUE is set very conservatively.
45
46If the upper scsi layer tries to track how many commands we have
47outstanding, it will be operating under the misapprehension that it is
48the only one sending us requests. We also have the block interface,
49which is where most requests must surely come from, so the upper layer's
50notion of how many requests we have outstanding will be wrong most or
51all of the time.
52
53Note, the normal SCSI mid-layer error handling doesn't work well
54for this driver because 1) it takes the io_request_lock before
55calling error handlers and uses a local variable to store flags,
56so the io_request_lock cannot be released and interrupts enabled
57inside the error handlers, and, the error handlers cannot poll
58for command completion because they might get commands from the
59block half of the driver completing, and not know what to do
60with them. That's what we get for making a hybrid scsi/block
61driver, I suppose.
62
63*/
64
65struct cciss_scsi_dev_t {
66 int devtype;
67 int bus, target, lun; /* as presented to the OS */
68 unsigned char scsi3addr[8]; /* as presented to the HW */
69};
70
71struct cciss_scsi_hba_t {
72 char *name;
73 int ndevices;
74#define CCISS_MAX_SCSI_DEVS_PER_HBA 16
75 struct cciss_scsi_dev_t dev[CCISS_MAX_SCSI_DEVS_PER_HBA];
76};
77
78#endif /* _CCISS_SCSI_H_ */
79#endif /* CONFIG_CISS_SCSI_TAPE */
diff --git a/drivers/block/cfq-iosched.c b/drivers/block/cfq-iosched.c
new file mode 100644
index 000000000000..0ef7a0065ece
--- /dev/null
+++ b/drivers/block/cfq-iosched.c
@@ -0,0 +1,1856 @@
1/*
2 * linux/drivers/block/cfq-iosched.c
3 *
4 * CFQ, or complete fairness queueing, disk scheduler.
5 *
6 * Based on ideas from a previously unfinished io
7 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
8 *
9 * Copyright (C) 2003 Jens Axboe <axboe@suse.de>
10 */
11#include <linux/kernel.h>
12#include <linux/fs.h>
13#include <linux/blkdev.h>
14#include <linux/elevator.h>
15#include <linux/bio.h>
16#include <linux/config.h>
17#include <linux/module.h>
18#include <linux/slab.h>
19#include <linux/init.h>
20#include <linux/compiler.h>
21#include <linux/hash.h>
22#include <linux/rbtree.h>
23#include <linux/mempool.h>
24
25static unsigned long max_elapsed_crq;
26static unsigned long max_elapsed_dispatch;
27
28/*
29 * tunables
30 */
31static int cfq_quantum = 4; /* max queue in one round of service */
32static int cfq_queued = 8; /* minimum rq allocate limit per-queue*/
33static int cfq_service = HZ; /* period over which service is avg */
34static int cfq_fifo_expire_r = HZ / 2; /* fifo timeout for sync requests */
35static int cfq_fifo_expire_w = 5 * HZ; /* fifo timeout for async requests */
36static int cfq_fifo_rate = HZ / 8; /* fifo expiry rate */
37static int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */
38static int cfq_back_penalty = 2; /* penalty of a backwards seek */
39
40/*
41 * for the hash of cfqq inside the cfqd
42 */
43#define CFQ_QHASH_SHIFT 6
44#define CFQ_QHASH_ENTRIES (1 << CFQ_QHASH_SHIFT)
45#define list_entry_qhash(entry) hlist_entry((entry), struct cfq_queue, cfq_hash)
46
47/*
48 * for the hash of crq inside the cfqq
49 */
50#define CFQ_MHASH_SHIFT 6
51#define CFQ_MHASH_BLOCK(sec) ((sec) >> 3)
52#define CFQ_MHASH_ENTRIES (1 << CFQ_MHASH_SHIFT)
53#define CFQ_MHASH_FN(sec) hash_long(CFQ_MHASH_BLOCK(sec), CFQ_MHASH_SHIFT)
54#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
55#define list_entry_hash(ptr) hlist_entry((ptr), struct cfq_rq, hash)
56
57#define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list)
58
59#define RQ_DATA(rq) (rq)->elevator_private
60
61/*
62 * rb-tree defines
63 */
64#define RB_NONE (2)
65#define RB_EMPTY(node) ((node)->rb_node == NULL)
66#define RB_CLEAR_COLOR(node) (node)->rb_color = RB_NONE
67#define RB_CLEAR(node) do { \
68 (node)->rb_parent = NULL; \
69 RB_CLEAR_COLOR((node)); \
70 (node)->rb_right = NULL; \
71 (node)->rb_left = NULL; \
72} while (0)
73#define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL)
74#define ON_RB(node) ((node)->rb_color != RB_NONE)
75#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node)
76#define rq_rb_key(rq) (rq)->sector
77
78/*
79 * threshold for switching off non-tag accounting
80 */
81#define CFQ_MAX_TAG (4)
82
83/*
84 * sort key types and names
85 */
86enum {
87 CFQ_KEY_PGID,
88 CFQ_KEY_TGID,
89 CFQ_KEY_UID,
90 CFQ_KEY_GID,
91 CFQ_KEY_LAST,
92};
93
94static char *cfq_key_types[] = { "pgid", "tgid", "uid", "gid", NULL };
95
96static kmem_cache_t *crq_pool;
97static kmem_cache_t *cfq_pool;
98static kmem_cache_t *cfq_ioc_pool;
99
100struct cfq_data {
101 struct list_head rr_list;
102 struct list_head empty_list;
103
104 struct hlist_head *cfq_hash;
105 struct hlist_head *crq_hash;
106
107 /* queues on rr_list (ie they have pending requests */
108 unsigned int busy_queues;
109
110 unsigned int max_queued;
111
112 atomic_t ref;
113
114 int key_type;
115
116 mempool_t *crq_pool;
117
118 request_queue_t *queue;
119
120 sector_t last_sector;
121
122 int rq_in_driver;
123
124 /*
125 * tunables, see top of file
126 */
127 unsigned int cfq_quantum;
128 unsigned int cfq_queued;
129 unsigned int cfq_fifo_expire_r;
130 unsigned int cfq_fifo_expire_w;
131 unsigned int cfq_fifo_batch_expire;
132 unsigned int cfq_back_penalty;
133 unsigned int cfq_back_max;
134 unsigned int find_best_crq;
135
136 unsigned int cfq_tagged;
137};
138
139struct cfq_queue {
140 /* reference count */
141 atomic_t ref;
142 /* parent cfq_data */
143 struct cfq_data *cfqd;
144 /* hash of mergeable requests */
145 struct hlist_node cfq_hash;
146 /* hash key */
147 unsigned long key;
148 /* whether queue is on rr (or empty) list */
149 int on_rr;
150 /* on either rr or empty list of cfqd */
151 struct list_head cfq_list;
152 /* sorted list of pending requests */
153 struct rb_root sort_list;
154 /* if fifo isn't expired, next request to serve */
155 struct cfq_rq *next_crq;
156 /* requests queued in sort_list */
157 int queued[2];
158 /* currently allocated requests */
159 int allocated[2];
160 /* fifo list of requests in sort_list */
161 struct list_head fifo[2];
162 /* last time fifo expired */
163 unsigned long last_fifo_expire;
164
165 int key_type;
166
167 unsigned long service_start;
168 unsigned long service_used;
169
170 unsigned int max_rate;
171
172 /* number of requests that have been handed to the driver */
173 int in_flight;
174 /* number of currently allocated requests */
175 int alloc_limit[2];
176};
177
178struct cfq_rq {
179 struct rb_node rb_node;
180 sector_t rb_key;
181 struct request *request;
182 struct hlist_node hash;
183
184 struct cfq_queue *cfq_queue;
185 struct cfq_io_context *io_context;
186
187 unsigned long service_start;
188 unsigned long queue_start;
189
190 unsigned int in_flight : 1;
191 unsigned int accounted : 1;
192 unsigned int is_sync : 1;
193 unsigned int is_write : 1;
194};
195
196static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned long);
197static void cfq_dispatch_sort(request_queue_t *, struct cfq_rq *);
198static void cfq_update_next_crq(struct cfq_rq *);
199static void cfq_put_cfqd(struct cfq_data *cfqd);
200
201/*
202 * what the fairness is based on (ie how processes are grouped and
203 * differentiated)
204 */
205static inline unsigned long
206cfq_hash_key(struct cfq_data *cfqd, struct task_struct *tsk)
207{
208 /*
209 * optimize this so that ->key_type is the offset into the struct
210 */
211 switch (cfqd->key_type) {
212 case CFQ_KEY_PGID:
213 return process_group(tsk);
214 default:
215 case CFQ_KEY_TGID:
216 return tsk->tgid;
217 case CFQ_KEY_UID:
218 return tsk->uid;
219 case CFQ_KEY_GID:
220 return tsk->gid;
221 }
222}
223
224/*
225 * lots of deadline iosched dupes, can be abstracted later...
226 */
227static inline void cfq_del_crq_hash(struct cfq_rq *crq)
228{
229 hlist_del_init(&crq->hash);
230}
231
232static void cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq)
233{
234 cfq_del_crq_hash(crq);
235
236 if (q->last_merge == crq->request)
237 q->last_merge = NULL;
238
239 cfq_update_next_crq(crq);
240}
241
242static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq)
243{
244 const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request));
245
246 BUG_ON(!hlist_unhashed(&crq->hash));
247
248 hlist_add_head(&crq->hash, &cfqd->crq_hash[hash_idx]);
249}
250
251static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
252{
253 struct hlist_head *hash_list = &cfqd->crq_hash[CFQ_MHASH_FN(offset)];
254 struct hlist_node *entry, *next;
255
256 hlist_for_each_safe(entry, next, hash_list) {
257 struct cfq_rq *crq = list_entry_hash(entry);
258 struct request *__rq = crq->request;
259
260 BUG_ON(hlist_unhashed(&crq->hash));
261
262 if (!rq_mergeable(__rq)) {
263 cfq_del_crq_hash(crq);
264 continue;
265 }
266
267 if (rq_hash_key(__rq) == offset)
268 return __rq;
269 }
270
271 return NULL;
272}
273
274/*
275 * Lifted from AS - choose which of crq1 and crq2 that is best served now.
276 * We choose the request that is closest to the head right now. Distance
277 * behind the head are penalized and only allowed to a certain extent.
278 */
279static struct cfq_rq *
280cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2)
281{
282 sector_t last, s1, s2, d1 = 0, d2 = 0;
283 int r1_wrap = 0, r2_wrap = 0; /* requests are behind the disk head */
284 unsigned long back_max;
285
286 if (crq1 == NULL || crq1 == crq2)
287 return crq2;
288 if (crq2 == NULL)
289 return crq1;
290
291 s1 = crq1->request->sector;
292 s2 = crq2->request->sector;
293
294 last = cfqd->last_sector;
295
296#if 0
297 if (!list_empty(&cfqd->queue->queue_head)) {
298 struct list_head *entry = &cfqd->queue->queue_head;
299 unsigned long distance = ~0UL;
300 struct request *rq;
301
302 while ((entry = entry->prev) != &cfqd->queue->queue_head) {
303 rq = list_entry_rq(entry);
304
305 if (blk_barrier_rq(rq))
306 break;
307
308 if (distance < abs(s1 - rq->sector + rq->nr_sectors)) {
309 distance = abs(s1 - rq->sector +rq->nr_sectors);
310 last = rq->sector + rq->nr_sectors;
311 }
312 if (distance < abs(s2 - rq->sector + rq->nr_sectors)) {
313 distance = abs(s2 - rq->sector +rq->nr_sectors);
314 last = rq->sector + rq->nr_sectors;
315 }
316 }
317 }
318#endif
319
320 /*
321 * by definition, 1KiB is 2 sectors
322 */
323 back_max = cfqd->cfq_back_max * 2;
324
325 /*
326 * Strict one way elevator _except_ in the case where we allow
327 * short backward seeks which are biased as twice the cost of a
328 * similar forward seek.
329 */
330 if (s1 >= last)
331 d1 = s1 - last;
332 else if (s1 + back_max >= last)
333 d1 = (last - s1) * cfqd->cfq_back_penalty;
334 else
335 r1_wrap = 1;
336
337 if (s2 >= last)
338 d2 = s2 - last;
339 else if (s2 + back_max >= last)
340 d2 = (last - s2) * cfqd->cfq_back_penalty;
341 else
342 r2_wrap = 1;
343
344 /* Found required data */
345 if (!r1_wrap && r2_wrap)
346 return crq1;
347 else if (!r2_wrap && r1_wrap)
348 return crq2;
349 else if (r1_wrap && r2_wrap) {
350 /* both behind the head */
351 if (s1 <= s2)
352 return crq1;
353 else
354 return crq2;
355 }
356
357 /* Both requests in front of the head */
358 if (d1 < d2)
359 return crq1;
360 else if (d2 < d1)
361 return crq2;
362 else {
363 if (s1 >= s2)
364 return crq1;
365 else
366 return crq2;
367 }
368}
369
370/*
371 * would be nice to take fifo expire time into account as well
372 */
373static struct cfq_rq *
374cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
375 struct cfq_rq *last)
376{
377 struct cfq_rq *crq_next = NULL, *crq_prev = NULL;
378 struct rb_node *rbnext, *rbprev;
379
380 if (!ON_RB(&last->rb_node))
381 return NULL;
382
383 if ((rbnext = rb_next(&last->rb_node)) == NULL)
384 rbnext = rb_first(&cfqq->sort_list);
385
386 rbprev = rb_prev(&last->rb_node);
387
388 if (rbprev)
389 crq_prev = rb_entry_crq(rbprev);
390 if (rbnext)
391 crq_next = rb_entry_crq(rbnext);
392
393 return cfq_choose_req(cfqd, crq_next, crq_prev);
394}
395
396static void cfq_update_next_crq(struct cfq_rq *crq)
397{
398 struct cfq_queue *cfqq = crq->cfq_queue;
399
400 if (cfqq->next_crq == crq)
401 cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq);
402}
403
404static int cfq_check_sort_rr_list(struct cfq_queue *cfqq)
405{
406 struct list_head *head = &cfqq->cfqd->rr_list;
407 struct list_head *next, *prev;
408
409 /*
410 * list might still be ordered
411 */
412 next = cfqq->cfq_list.next;
413 if (next != head) {
414 struct cfq_queue *cnext = list_entry_cfqq(next);
415
416 if (cfqq->service_used > cnext->service_used)
417 return 1;
418 }
419
420 prev = cfqq->cfq_list.prev;
421 if (prev != head) {
422 struct cfq_queue *cprev = list_entry_cfqq(prev);
423
424 if (cfqq->service_used < cprev->service_used)
425 return 1;
426 }
427
428 return 0;
429}
430
431static void cfq_sort_rr_list(struct cfq_queue *cfqq, int new_queue)
432{
433 struct list_head *entry = &cfqq->cfqd->rr_list;
434
435 if (!cfqq->on_rr)
436 return;
437 if (!new_queue && !cfq_check_sort_rr_list(cfqq))
438 return;
439
440 list_del(&cfqq->cfq_list);
441
442 /*
443 * sort by our mean service_used, sub-sort by in-flight requests
444 */
445 while ((entry = entry->prev) != &cfqq->cfqd->rr_list) {
446 struct cfq_queue *__cfqq = list_entry_cfqq(entry);
447
448 if (cfqq->service_used > __cfqq->service_used)
449 break;
450 else if (cfqq->service_used == __cfqq->service_used) {
451 struct list_head *prv;
452
453 while ((prv = entry->prev) != &cfqq->cfqd->rr_list) {
454 __cfqq = list_entry_cfqq(prv);
455
456 WARN_ON(__cfqq->service_used > cfqq->service_used);
457 if (cfqq->service_used != __cfqq->service_used)
458 break;
459 if (cfqq->in_flight > __cfqq->in_flight)
460 break;
461
462 entry = prv;
463 }
464 }
465 }
466
467 list_add(&cfqq->cfq_list, entry);
468}
469
470/*
471 * add to busy list of queues for service, trying to be fair in ordering
472 * the pending list according to requests serviced
473 */
474static inline void
475cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
476{
477 /*
478 * it's currently on the empty list
479 */
480 cfqq->on_rr = 1;
481 cfqd->busy_queues++;
482
483 if (time_after(jiffies, cfqq->service_start + cfq_service))
484 cfqq->service_used >>= 3;
485
486 cfq_sort_rr_list(cfqq, 1);
487}
488
489static inline void
490cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
491{
492 list_move(&cfqq->cfq_list, &cfqd->empty_list);
493 cfqq->on_rr = 0;
494
495 BUG_ON(!cfqd->busy_queues);
496 cfqd->busy_queues--;
497}
498
499/*
500 * rb tree support functions
501 */
502static inline void cfq_del_crq_rb(struct cfq_rq *crq)
503{
504 struct cfq_queue *cfqq = crq->cfq_queue;
505
506 if (ON_RB(&crq->rb_node)) {
507 struct cfq_data *cfqd = cfqq->cfqd;
508
509 BUG_ON(!cfqq->queued[crq->is_sync]);
510
511 cfq_update_next_crq(crq);
512
513 cfqq->queued[crq->is_sync]--;
514 rb_erase(&crq->rb_node, &cfqq->sort_list);
515 RB_CLEAR_COLOR(&crq->rb_node);
516
517 if (RB_EMPTY(&cfqq->sort_list) && cfqq->on_rr)
518 cfq_del_cfqq_rr(cfqd, cfqq);
519 }
520}
521
522static struct cfq_rq *
523__cfq_add_crq_rb(struct cfq_rq *crq)
524{
525 struct rb_node **p = &crq->cfq_queue->sort_list.rb_node;
526 struct rb_node *parent = NULL;
527 struct cfq_rq *__crq;
528
529 while (*p) {
530 parent = *p;
531 __crq = rb_entry_crq(parent);
532
533 if (crq->rb_key < __crq->rb_key)
534 p = &(*p)->rb_left;
535 else if (crq->rb_key > __crq->rb_key)
536 p = &(*p)->rb_right;
537 else
538 return __crq;
539 }
540
541 rb_link_node(&crq->rb_node, parent, p);
542 return NULL;
543}
544
545static void cfq_add_crq_rb(struct cfq_rq *crq)
546{
547 struct cfq_queue *cfqq = crq->cfq_queue;
548 struct cfq_data *cfqd = cfqq->cfqd;
549 struct request *rq = crq->request;
550 struct cfq_rq *__alias;
551
552 crq->rb_key = rq_rb_key(rq);
553 cfqq->queued[crq->is_sync]++;
554
555 /*
556 * looks a little odd, but the first insert might return an alias.
557 * if that happens, put the alias on the dispatch list
558 */
559 while ((__alias = __cfq_add_crq_rb(crq)) != NULL)
560 cfq_dispatch_sort(cfqd->queue, __alias);
561
562 rb_insert_color(&crq->rb_node, &cfqq->sort_list);
563
564 if (!cfqq->on_rr)
565 cfq_add_cfqq_rr(cfqd, cfqq);
566
567 /*
568 * check if this request is a better next-serve candidate
569 */
570 cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
571}
572
573static inline void
574cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
575{
576 if (ON_RB(&crq->rb_node)) {
577 rb_erase(&crq->rb_node, &cfqq->sort_list);
578 cfqq->queued[crq->is_sync]--;
579 }
580
581 cfq_add_crq_rb(crq);
582}
583
584static struct request *
585cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector)
586{
587 const unsigned long key = cfq_hash_key(cfqd, current);
588 struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, key);
589 struct rb_node *n;
590
591 if (!cfqq)
592 goto out;
593
594 n = cfqq->sort_list.rb_node;
595 while (n) {
596 struct cfq_rq *crq = rb_entry_crq(n);
597
598 if (sector < crq->rb_key)
599 n = n->rb_left;
600 else if (sector > crq->rb_key)
601 n = n->rb_right;
602 else
603 return crq->request;
604 }
605
606out:
607 return NULL;
608}
609
610static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
611{
612 struct cfq_rq *crq = RQ_DATA(rq);
613
614 if (crq) {
615 struct cfq_queue *cfqq = crq->cfq_queue;
616
617 if (cfqq->cfqd->cfq_tagged) {
618 cfqq->service_used--;
619 cfq_sort_rr_list(cfqq, 0);
620 }
621
622 if (crq->accounted) {
623 crq->accounted = 0;
624 cfqq->cfqd->rq_in_driver--;
625 }
626 }
627}
628
629/*
630 * make sure the service time gets corrected on reissue of this request
631 */
632static void cfq_requeue_request(request_queue_t *q, struct request *rq)
633{
634 cfq_deactivate_request(q, rq);
635 list_add(&rq->queuelist, &q->queue_head);
636}
637
638static void cfq_remove_request(request_queue_t *q, struct request *rq)
639{
640 struct cfq_rq *crq = RQ_DATA(rq);
641
642 if (crq) {
643 cfq_remove_merge_hints(q, crq);
644 list_del_init(&rq->queuelist);
645
646 if (crq->cfq_queue)
647 cfq_del_crq_rb(crq);
648 }
649}
650
651static int
652cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
653{
654 struct cfq_data *cfqd = q->elevator->elevator_data;
655 struct request *__rq;
656 int ret;
657
658 ret = elv_try_last_merge(q, bio);
659 if (ret != ELEVATOR_NO_MERGE) {
660 __rq = q->last_merge;
661 goto out_insert;
662 }
663
664 __rq = cfq_find_rq_hash(cfqd, bio->bi_sector);
665 if (__rq) {
666 BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
667
668 if (elv_rq_merge_ok(__rq, bio)) {
669 ret = ELEVATOR_BACK_MERGE;
670 goto out;
671 }
672 }
673
674 __rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio));
675 if (__rq) {
676 if (elv_rq_merge_ok(__rq, bio)) {
677 ret = ELEVATOR_FRONT_MERGE;
678 goto out;
679 }
680 }
681
682 return ELEVATOR_NO_MERGE;
683out:
684 q->last_merge = __rq;
685out_insert:
686 *req = __rq;
687 return ret;
688}
689
690static void cfq_merged_request(request_queue_t *q, struct request *req)
691{
692 struct cfq_data *cfqd = q->elevator->elevator_data;
693 struct cfq_rq *crq = RQ_DATA(req);
694
695 cfq_del_crq_hash(crq);
696 cfq_add_crq_hash(cfqd, crq);
697
698 if (ON_RB(&crq->rb_node) && (rq_rb_key(req) != crq->rb_key)) {
699 struct cfq_queue *cfqq = crq->cfq_queue;
700
701 cfq_update_next_crq(crq);
702 cfq_reposition_crq_rb(cfqq, crq);
703 }
704
705 q->last_merge = req;
706}
707
708static void
709cfq_merged_requests(request_queue_t *q, struct request *rq,
710 struct request *next)
711{
712 struct cfq_rq *crq = RQ_DATA(rq);
713 struct cfq_rq *cnext = RQ_DATA(next);
714
715 cfq_merged_request(q, rq);
716
717 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) {
718 if (time_before(cnext->queue_start, crq->queue_start)) {
719 list_move(&rq->queuelist, &next->queuelist);
720 crq->queue_start = cnext->queue_start;
721 }
722 }
723
724 cfq_update_next_crq(cnext);
725 cfq_remove_request(q, next);
726}
727
728/*
729 * we dispatch cfqd->cfq_quantum requests in total from the rr_list queues,
730 * this function sector sorts the selected request to minimize seeks. we start
731 * at cfqd->last_sector, not 0.
732 */
733static void cfq_dispatch_sort(request_queue_t *q, struct cfq_rq *crq)
734{
735 struct cfq_data *cfqd = q->elevator->elevator_data;
736 struct cfq_queue *cfqq = crq->cfq_queue;
737 struct list_head *head = &q->queue_head, *entry = head;
738 struct request *__rq;
739 sector_t last;
740
741 cfq_del_crq_rb(crq);
742 cfq_remove_merge_hints(q, crq);
743 list_del(&crq->request->queuelist);
744
745 last = cfqd->last_sector;
746 while ((entry = entry->prev) != head) {
747 __rq = list_entry_rq(entry);
748
749 if (blk_barrier_rq(crq->request))
750 break;
751 if (!blk_fs_request(crq->request))
752 break;
753
754 if (crq->request->sector > __rq->sector)
755 break;
756 if (__rq->sector > last && crq->request->sector < last) {
757 last = crq->request->sector;
758 break;
759 }
760 }
761
762 cfqd->last_sector = last;
763 crq->in_flight = 1;
764 cfqq->in_flight++;
765 list_add(&crq->request->queuelist, entry);
766}
767
768/*
769 * return expired entry, or NULL to just start from scratch in rbtree
770 */
771static inline struct cfq_rq *cfq_check_fifo(struct cfq_queue *cfqq)
772{
773 struct cfq_data *cfqd = cfqq->cfqd;
774 const int reads = !list_empty(&cfqq->fifo[0]);
775 const int writes = !list_empty(&cfqq->fifo[1]);
776 unsigned long now = jiffies;
777 struct cfq_rq *crq;
778
779 if (time_before(now, cfqq->last_fifo_expire + cfqd->cfq_fifo_batch_expire))
780 return NULL;
781
782 crq = RQ_DATA(list_entry(cfqq->fifo[0].next, struct request, queuelist));
783 if (reads && time_after(now, crq->queue_start + cfqd->cfq_fifo_expire_r)) {
784 cfqq->last_fifo_expire = now;
785 return crq;
786 }
787
788 crq = RQ_DATA(list_entry(cfqq->fifo[1].next, struct request, queuelist));
789 if (writes && time_after(now, crq->queue_start + cfqd->cfq_fifo_expire_w)) {
790 cfqq->last_fifo_expire = now;
791 return crq;
792 }
793
794 return NULL;
795}
796
797/*
798 * dispatch a single request from given queue
799 */
800static inline void
801cfq_dispatch_request(request_queue_t *q, struct cfq_data *cfqd,
802 struct cfq_queue *cfqq)
803{
804 struct cfq_rq *crq;
805
806 /*
807 * follow expired path, else get first next available
808 */
809 if ((crq = cfq_check_fifo(cfqq)) == NULL) {
810 if (cfqd->find_best_crq)
811 crq = cfqq->next_crq;
812 else
813 crq = rb_entry_crq(rb_first(&cfqq->sort_list));
814 }
815
816 cfqd->last_sector = crq->request->sector + crq->request->nr_sectors;
817
818 /*
819 * finally, insert request into driver list
820 */
821 cfq_dispatch_sort(q, crq);
822}
823
824static int cfq_dispatch_requests(request_queue_t *q, int max_dispatch)
825{
826 struct cfq_data *cfqd = q->elevator->elevator_data;
827 struct cfq_queue *cfqq;
828 struct list_head *entry, *tmp;
829 int queued, busy_queues, first_round;
830
831 if (list_empty(&cfqd->rr_list))
832 return 0;
833
834 queued = 0;
835 first_round = 1;
836restart:
837 busy_queues = 0;
838 list_for_each_safe(entry, tmp, &cfqd->rr_list) {
839 cfqq = list_entry_cfqq(entry);
840
841 BUG_ON(RB_EMPTY(&cfqq->sort_list));
842
843 /*
844 * first round of queueing, only select from queues that
845 * don't already have io in-flight
846 */
847 if (first_round && cfqq->in_flight)
848 continue;
849
850 cfq_dispatch_request(q, cfqd, cfqq);
851
852 if (!RB_EMPTY(&cfqq->sort_list))
853 busy_queues++;
854
855 queued++;
856 }
857
858 if ((queued < max_dispatch) && (busy_queues || first_round)) {
859 first_round = 0;
860 goto restart;
861 }
862
863 return queued;
864}
865
866static inline void cfq_account_dispatch(struct cfq_rq *crq)
867{
868 struct cfq_queue *cfqq = crq->cfq_queue;
869 struct cfq_data *cfqd = cfqq->cfqd;
870 unsigned long now, elapsed;
871
872 if (!blk_fs_request(crq->request))
873 return;
874
875 /*
876 * accounted bit is necessary since some drivers will call
877 * elv_next_request() many times for the same request (eg ide)
878 */
879 if (crq->accounted)
880 return;
881
882 now = jiffies;
883 if (cfqq->service_start == ~0UL)
884 cfqq->service_start = now;
885
886 /*
887 * on drives with tagged command queueing, command turn-around time
888 * doesn't necessarily reflect the time spent processing this very
889 * command inside the drive. so do the accounting differently there,
890 * by just sorting on the number of requests
891 */
892 if (cfqd->cfq_tagged) {
893 if (time_after(now, cfqq->service_start + cfq_service)) {
894 cfqq->service_start = now;
895 cfqq->service_used /= 10;
896 }
897
898 cfqq->service_used++;
899 cfq_sort_rr_list(cfqq, 0);
900 }
901
902 elapsed = now - crq->queue_start;
903 if (elapsed > max_elapsed_dispatch)
904 max_elapsed_dispatch = elapsed;
905
906 crq->accounted = 1;
907 crq->service_start = now;
908
909 if (++cfqd->rq_in_driver >= CFQ_MAX_TAG && !cfqd->cfq_tagged) {
910 cfqq->cfqd->cfq_tagged = 1;
911 printk("cfq: depth %d reached, tagging now on\n", CFQ_MAX_TAG);
912 }
913}
914
915static inline void
916cfq_account_completion(struct cfq_queue *cfqq, struct cfq_rq *crq)
917{
918 struct cfq_data *cfqd = cfqq->cfqd;
919
920 if (!crq->accounted)
921 return;
922
923 WARN_ON(!cfqd->rq_in_driver);
924 cfqd->rq_in_driver--;
925
926 if (!cfqd->cfq_tagged) {
927 unsigned long now = jiffies;
928 unsigned long duration = now - crq->service_start;
929
930 if (time_after(now, cfqq->service_start + cfq_service)) {
931 cfqq->service_start = now;
932 cfqq->service_used >>= 3;
933 }
934
935 cfqq->service_used += duration;
936 cfq_sort_rr_list(cfqq, 0);
937
938 if (duration > max_elapsed_crq)
939 max_elapsed_crq = duration;
940 }
941}
942
943static struct request *cfq_next_request(request_queue_t *q)
944{
945 struct cfq_data *cfqd = q->elevator->elevator_data;
946 struct request *rq;
947
948 if (!list_empty(&q->queue_head)) {
949 struct cfq_rq *crq;
950dispatch:
951 rq = list_entry_rq(q->queue_head.next);
952
953 if ((crq = RQ_DATA(rq)) != NULL) {
954 cfq_remove_merge_hints(q, crq);
955 cfq_account_dispatch(crq);
956 }
957
958 return rq;
959 }
960
961 if (cfq_dispatch_requests(q, cfqd->cfq_quantum))
962 goto dispatch;
963
964 return NULL;
965}
966
967/*
968 * task holds one reference to the queue, dropped when task exits. each crq
969 * in-flight on this queue also holds a reference, dropped when crq is freed.
970 *
971 * queue lock must be held here.
972 */
973static void cfq_put_queue(struct cfq_queue *cfqq)
974{
975 BUG_ON(!atomic_read(&cfqq->ref));
976
977 if (!atomic_dec_and_test(&cfqq->ref))
978 return;
979
980 BUG_ON(rb_first(&cfqq->sort_list));
981 BUG_ON(cfqq->on_rr);
982
983 cfq_put_cfqd(cfqq->cfqd);
984
985 /*
986 * it's on the empty list and still hashed
987 */
988 list_del(&cfqq->cfq_list);
989 hlist_del(&cfqq->cfq_hash);
990 kmem_cache_free(cfq_pool, cfqq);
991}
992
993static inline struct cfq_queue *
994__cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned long key, const int hashval)
995{
996 struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
997 struct hlist_node *entry, *next;
998
999 hlist_for_each_safe(entry, next, hash_list) {
1000 struct cfq_queue *__cfqq = list_entry_qhash(entry);
1001
1002 if (__cfqq->key == key)
1003 return __cfqq;
1004 }
1005
1006 return NULL;
1007}
1008
1009static struct cfq_queue *
1010cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned long key)
1011{
1012 return __cfq_find_cfq_hash(cfqd, key, hash_long(key, CFQ_QHASH_SHIFT));
1013}
1014
1015static inline void
1016cfq_rehash_cfqq(struct cfq_data *cfqd, struct cfq_queue **cfqq,
1017 struct cfq_io_context *cic)
1018{
1019 unsigned long hashkey = cfq_hash_key(cfqd, current);
1020 unsigned long hashval = hash_long(hashkey, CFQ_QHASH_SHIFT);
1021 struct cfq_queue *__cfqq;
1022 unsigned long flags;
1023
1024 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1025
1026 hlist_del(&(*cfqq)->cfq_hash);
1027
1028 __cfqq = __cfq_find_cfq_hash(cfqd, hashkey, hashval);
1029 if (!__cfqq || __cfqq == *cfqq) {
1030 __cfqq = *cfqq;
1031 hlist_add_head(&__cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
1032 __cfqq->key_type = cfqd->key_type;
1033 } else {
1034 atomic_inc(&__cfqq->ref);
1035 cic->cfqq = __cfqq;
1036 cfq_put_queue(*cfqq);
1037 *cfqq = __cfqq;
1038 }
1039
1040 cic->cfqq = __cfqq;
1041 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1042}
1043
1044static void cfq_free_io_context(struct cfq_io_context *cic)
1045{
1046 kmem_cache_free(cfq_ioc_pool, cic);
1047}
1048
1049/*
1050 * locking hierarchy is: io_context lock -> queue locks
1051 */
1052static void cfq_exit_io_context(struct cfq_io_context *cic)
1053{
1054 struct cfq_queue *cfqq = cic->cfqq;
1055 struct list_head *entry = &cic->list;
1056 request_queue_t *q;
1057 unsigned long flags;
1058
1059 /*
1060 * put the reference this task is holding to the various queues
1061 */
1062 spin_lock_irqsave(&cic->ioc->lock, flags);
1063 while ((entry = cic->list.next) != &cic->list) {
1064 struct cfq_io_context *__cic;
1065
1066 __cic = list_entry(entry, struct cfq_io_context, list);
1067 list_del(entry);
1068
1069 q = __cic->cfqq->cfqd->queue;
1070 spin_lock(q->queue_lock);
1071 cfq_put_queue(__cic->cfqq);
1072 spin_unlock(q->queue_lock);
1073 }
1074
1075 q = cfqq->cfqd->queue;
1076 spin_lock(q->queue_lock);
1077 cfq_put_queue(cfqq);
1078 spin_unlock(q->queue_lock);
1079
1080 cic->cfqq = NULL;
1081 spin_unlock_irqrestore(&cic->ioc->lock, flags);
1082}
1083
1084static struct cfq_io_context *cfq_alloc_io_context(int gfp_flags)
1085{
1086 struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_flags);
1087
1088 if (cic) {
1089 cic->dtor = cfq_free_io_context;
1090 cic->exit = cfq_exit_io_context;
1091 INIT_LIST_HEAD(&cic->list);
1092 cic->cfqq = NULL;
1093 }
1094
1095 return cic;
1096}
1097
1098/*
1099 * Setup general io context and cfq io context. There can be several cfq
1100 * io contexts per general io context, if this process is doing io to more
1101 * than one device managed by cfq. Note that caller is holding a reference to
1102 * cfqq, so we don't need to worry about it disappearing
1103 */
1104static struct cfq_io_context *
1105cfq_get_io_context(struct cfq_queue **cfqq, int gfp_flags)
1106{
1107 struct cfq_data *cfqd = (*cfqq)->cfqd;
1108 struct cfq_queue *__cfqq = *cfqq;
1109 struct cfq_io_context *cic;
1110 struct io_context *ioc;
1111
1112 might_sleep_if(gfp_flags & __GFP_WAIT);
1113
1114 ioc = get_io_context(gfp_flags);
1115 if (!ioc)
1116 return NULL;
1117
1118 if ((cic = ioc->cic) == NULL) {
1119 cic = cfq_alloc_io_context(gfp_flags);
1120
1121 if (cic == NULL)
1122 goto err;
1123
1124 ioc->cic = cic;
1125 cic->ioc = ioc;
1126 cic->cfqq = __cfqq;
1127 atomic_inc(&__cfqq->ref);
1128 } else {
1129 struct cfq_io_context *__cic;
1130 unsigned long flags;
1131
1132 /*
1133 * since the first cic on the list is actually the head
1134 * itself, need to check this here or we'll duplicate an
1135 * cic per ioc for no reason
1136 */
1137 if (cic->cfqq == __cfqq)
1138 goto out;
1139
1140 /*
1141 * cic exists, check if we already are there. linear search
1142 * should be ok here, the list will usually not be more than
1143 * 1 or a few entries long
1144 */
1145 spin_lock_irqsave(&ioc->lock, flags);
1146 list_for_each_entry(__cic, &cic->list, list) {
1147 /*
1148 * this process is already holding a reference to
1149 * this queue, so no need to get one more
1150 */
1151 if (__cic->cfqq == __cfqq) {
1152 cic = __cic;
1153 spin_unlock_irqrestore(&ioc->lock, flags);
1154 goto out;
1155 }
1156 }
1157 spin_unlock_irqrestore(&ioc->lock, flags);
1158
1159 /*
1160 * nope, process doesn't have a cic assoicated with this
1161 * cfqq yet. get a new one and add to list
1162 */
1163 __cic = cfq_alloc_io_context(gfp_flags);
1164 if (__cic == NULL)
1165 goto err;
1166
1167 __cic->ioc = ioc;
1168 __cic->cfqq = __cfqq;
1169 atomic_inc(&__cfqq->ref);
1170 spin_lock_irqsave(&ioc->lock, flags);
1171 list_add(&__cic->list, &cic->list);
1172 spin_unlock_irqrestore(&ioc->lock, flags);
1173
1174 cic = __cic;
1175 *cfqq = __cfqq;
1176 }
1177
1178out:
1179 /*
1180 * if key_type has been changed on the fly, we lazily rehash
1181 * each queue at lookup time
1182 */
1183 if ((*cfqq)->key_type != cfqd->key_type)
1184 cfq_rehash_cfqq(cfqd, cfqq, cic);
1185
1186 return cic;
1187err:
1188 put_io_context(ioc);
1189 return NULL;
1190}
1191
1192static struct cfq_queue *
1193__cfq_get_queue(struct cfq_data *cfqd, unsigned long key, int gfp_mask)
1194{
1195 const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
1196 struct cfq_queue *cfqq, *new_cfqq = NULL;
1197
1198retry:
1199 cfqq = __cfq_find_cfq_hash(cfqd, key, hashval);
1200
1201 if (!cfqq) {
1202 if (new_cfqq) {
1203 cfqq = new_cfqq;
1204 new_cfqq = NULL;
1205 } else if (gfp_mask & __GFP_WAIT) {
1206 spin_unlock_irq(cfqd->queue->queue_lock);
1207 new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
1208 spin_lock_irq(cfqd->queue->queue_lock);
1209 goto retry;
1210 } else
1211 goto out;
1212
1213 memset(cfqq, 0, sizeof(*cfqq));
1214
1215 INIT_HLIST_NODE(&cfqq->cfq_hash);
1216 INIT_LIST_HEAD(&cfqq->cfq_list);
1217 RB_CLEAR_ROOT(&cfqq->sort_list);
1218 INIT_LIST_HEAD(&cfqq->fifo[0]);
1219 INIT_LIST_HEAD(&cfqq->fifo[1]);
1220
1221 cfqq->key = key;
1222 hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
1223 atomic_set(&cfqq->ref, 0);
1224 cfqq->cfqd = cfqd;
1225 atomic_inc(&cfqd->ref);
1226 cfqq->key_type = cfqd->key_type;
1227 cfqq->service_start = ~0UL;
1228 }
1229
1230 if (new_cfqq)
1231 kmem_cache_free(cfq_pool, new_cfqq);
1232
1233 atomic_inc(&cfqq->ref);
1234out:
1235 WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
1236 return cfqq;
1237}
1238
1239static void cfq_enqueue(struct cfq_data *cfqd, struct cfq_rq *crq)
1240{
1241 crq->is_sync = 0;
1242 if (rq_data_dir(crq->request) == READ || current->flags & PF_SYNCWRITE)
1243 crq->is_sync = 1;
1244
1245 cfq_add_crq_rb(crq);
1246 crq->queue_start = jiffies;
1247
1248 list_add_tail(&crq->request->queuelist, &crq->cfq_queue->fifo[crq->is_sync]);
1249}
1250
1251static void
1252cfq_insert_request(request_queue_t *q, struct request *rq, int where)
1253{
1254 struct cfq_data *cfqd = q->elevator->elevator_data;
1255 struct cfq_rq *crq = RQ_DATA(rq);
1256
1257 switch (where) {
1258 case ELEVATOR_INSERT_BACK:
1259 while (cfq_dispatch_requests(q, cfqd->cfq_quantum))
1260 ;
1261 list_add_tail(&rq->queuelist, &q->queue_head);
1262 break;
1263 case ELEVATOR_INSERT_FRONT:
1264 list_add(&rq->queuelist, &q->queue_head);
1265 break;
1266 case ELEVATOR_INSERT_SORT:
1267 BUG_ON(!blk_fs_request(rq));
1268 cfq_enqueue(cfqd, crq);
1269 break;
1270 default:
1271 printk("%s: bad insert point %d\n", __FUNCTION__,where);
1272 return;
1273 }
1274
1275 if (rq_mergeable(rq)) {
1276 cfq_add_crq_hash(cfqd, crq);
1277
1278 if (!q->last_merge)
1279 q->last_merge = rq;
1280 }
1281}
1282
1283static int cfq_queue_empty(request_queue_t *q)
1284{
1285 struct cfq_data *cfqd = q->elevator->elevator_data;
1286
1287 return list_empty(&q->queue_head) && list_empty(&cfqd->rr_list);
1288}
1289
1290static void cfq_completed_request(request_queue_t *q, struct request *rq)
1291{
1292 struct cfq_rq *crq = RQ_DATA(rq);
1293 struct cfq_queue *cfqq;
1294
1295 if (unlikely(!blk_fs_request(rq)))
1296 return;
1297
1298 cfqq = crq->cfq_queue;
1299
1300 if (crq->in_flight) {
1301 WARN_ON(!cfqq->in_flight);
1302 cfqq->in_flight--;
1303 }
1304
1305 cfq_account_completion(cfqq, crq);
1306}
1307
1308static struct request *
1309cfq_former_request(request_queue_t *q, struct request *rq)
1310{
1311 struct cfq_rq *crq = RQ_DATA(rq);
1312 struct rb_node *rbprev = rb_prev(&crq->rb_node);
1313
1314 if (rbprev)
1315 return rb_entry_crq(rbprev)->request;
1316
1317 return NULL;
1318}
1319
1320static struct request *
1321cfq_latter_request(request_queue_t *q, struct request *rq)
1322{
1323 struct cfq_rq *crq = RQ_DATA(rq);
1324 struct rb_node *rbnext = rb_next(&crq->rb_node);
1325
1326 if (rbnext)
1327 return rb_entry_crq(rbnext)->request;
1328
1329 return NULL;
1330}
1331
1332static int cfq_may_queue(request_queue_t *q, int rw)
1333{
1334 struct cfq_data *cfqd = q->elevator->elevator_data;
1335 struct cfq_queue *cfqq;
1336 int ret = ELV_MQUEUE_MAY;
1337
1338 if (current->flags & PF_MEMALLOC)
1339 return ELV_MQUEUE_MAY;
1340
1341 cfqq = cfq_find_cfq_hash(cfqd, cfq_hash_key(cfqd, current));
1342 if (cfqq) {
1343 int limit = cfqd->max_queued;
1344
1345 if (cfqq->allocated[rw] < cfqd->cfq_queued)
1346 return ELV_MQUEUE_MUST;
1347
1348 if (cfqd->busy_queues)
1349 limit = q->nr_requests / cfqd->busy_queues;
1350
1351 if (limit < cfqd->cfq_queued)
1352 limit = cfqd->cfq_queued;
1353 else if (limit > cfqd->max_queued)
1354 limit = cfqd->max_queued;
1355
1356 if (cfqq->allocated[rw] >= limit) {
1357 if (limit > cfqq->alloc_limit[rw])
1358 cfqq->alloc_limit[rw] = limit;
1359
1360 ret = ELV_MQUEUE_NO;
1361 }
1362 }
1363
1364 return ret;
1365}
1366
1367static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq)
1368{
1369 struct request_list *rl = &q->rq;
1370 const int write = waitqueue_active(&rl->wait[WRITE]);
1371 const int read = waitqueue_active(&rl->wait[READ]);
1372
1373 if (read && cfqq->allocated[READ] < cfqq->alloc_limit[READ])
1374 wake_up(&rl->wait[READ]);
1375 if (write && cfqq->allocated[WRITE] < cfqq->alloc_limit[WRITE])
1376 wake_up(&rl->wait[WRITE]);
1377}
1378
1379/*
1380 * queue lock held here
1381 */
1382static void cfq_put_request(request_queue_t *q, struct request *rq)
1383{
1384 struct cfq_data *cfqd = q->elevator->elevator_data;
1385 struct cfq_rq *crq = RQ_DATA(rq);
1386
1387 if (crq) {
1388 struct cfq_queue *cfqq = crq->cfq_queue;
1389
1390 BUG_ON(q->last_merge == rq);
1391 BUG_ON(!hlist_unhashed(&crq->hash));
1392
1393 if (crq->io_context)
1394 put_io_context(crq->io_context->ioc);
1395
1396 BUG_ON(!cfqq->allocated[crq->is_write]);
1397 cfqq->allocated[crq->is_write]--;
1398
1399 mempool_free(crq, cfqd->crq_pool);
1400 rq->elevator_private = NULL;
1401
1402 smp_mb();
1403 cfq_check_waiters(q, cfqq);
1404 cfq_put_queue(cfqq);
1405 }
1406}
1407
1408/*
1409 * Allocate cfq data structures associated with this request. A queue and
1410 */
1411static int cfq_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
1412{
1413 struct cfq_data *cfqd = q->elevator->elevator_data;
1414 struct cfq_io_context *cic;
1415 const int rw = rq_data_dir(rq);
1416 struct cfq_queue *cfqq, *saved_cfqq;
1417 struct cfq_rq *crq;
1418 unsigned long flags;
1419
1420 might_sleep_if(gfp_mask & __GFP_WAIT);
1421
1422 spin_lock_irqsave(q->queue_lock, flags);
1423
1424 cfqq = __cfq_get_queue(cfqd, cfq_hash_key(cfqd, current), gfp_mask);
1425 if (!cfqq)
1426 goto out_lock;
1427
1428repeat:
1429 if (cfqq->allocated[rw] >= cfqd->max_queued)
1430 goto out_lock;
1431
1432 cfqq->allocated[rw]++;
1433 spin_unlock_irqrestore(q->queue_lock, flags);
1434
1435 /*
1436 * if hashing type has changed, the cfq_queue might change here.
1437 */
1438 saved_cfqq = cfqq;
1439 cic = cfq_get_io_context(&cfqq, gfp_mask);
1440 if (!cic)
1441 goto err;
1442
1443 /*
1444 * repeat allocation checks on queue change
1445 */
1446 if (unlikely(saved_cfqq != cfqq)) {
1447 spin_lock_irqsave(q->queue_lock, flags);
1448 saved_cfqq->allocated[rw]--;
1449 goto repeat;
1450 }
1451
1452 crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
1453 if (crq) {
1454 RB_CLEAR(&crq->rb_node);
1455 crq->rb_key = 0;
1456 crq->request = rq;
1457 INIT_HLIST_NODE(&crq->hash);
1458 crq->cfq_queue = cfqq;
1459 crq->io_context = cic;
1460 crq->service_start = crq->queue_start = 0;
1461 crq->in_flight = crq->accounted = crq->is_sync = 0;
1462 crq->is_write = rw;
1463 rq->elevator_private = crq;
1464 cfqq->alloc_limit[rw] = 0;
1465 return 0;
1466 }
1467
1468 put_io_context(cic->ioc);
1469err:
1470 spin_lock_irqsave(q->queue_lock, flags);
1471 cfqq->allocated[rw]--;
1472 cfq_put_queue(cfqq);
1473out_lock:
1474 spin_unlock_irqrestore(q->queue_lock, flags);
1475 return 1;
1476}
1477
1478static void cfq_put_cfqd(struct cfq_data *cfqd)
1479{
1480 request_queue_t *q = cfqd->queue;
1481
1482 if (!atomic_dec_and_test(&cfqd->ref))
1483 return;
1484
1485 blk_put_queue(q);
1486
1487 mempool_destroy(cfqd->crq_pool);
1488 kfree(cfqd->crq_hash);
1489 kfree(cfqd->cfq_hash);
1490 kfree(cfqd);
1491}
1492
1493static void cfq_exit_queue(elevator_t *e)
1494{
1495 cfq_put_cfqd(e->elevator_data);
1496}
1497
1498static int cfq_init_queue(request_queue_t *q, elevator_t *e)
1499{
1500 struct cfq_data *cfqd;
1501 int i;
1502
1503 cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL);
1504 if (!cfqd)
1505 return -ENOMEM;
1506
1507 memset(cfqd, 0, sizeof(*cfqd));
1508 INIT_LIST_HEAD(&cfqd->rr_list);
1509 INIT_LIST_HEAD(&cfqd->empty_list);
1510
1511 cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL);
1512 if (!cfqd->crq_hash)
1513 goto out_crqhash;
1514
1515 cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL);
1516 if (!cfqd->cfq_hash)
1517 goto out_cfqhash;
1518
1519 cfqd->crq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, crq_pool);
1520 if (!cfqd->crq_pool)
1521 goto out_crqpool;
1522
1523 for (i = 0; i < CFQ_MHASH_ENTRIES; i++)
1524 INIT_HLIST_HEAD(&cfqd->crq_hash[i]);
1525 for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
1526 INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
1527
1528 e->elevator_data = cfqd;
1529
1530 cfqd->queue = q;
1531 atomic_inc(&q->refcnt);
1532
1533 /*
1534 * just set it to some high value, we want anyone to be able to queue
1535 * some requests. fairness is handled differently
1536 */
1537 q->nr_requests = 1024;
1538 cfqd->max_queued = q->nr_requests / 16;
1539 q->nr_batching = cfq_queued;
1540 cfqd->key_type = CFQ_KEY_TGID;
1541 cfqd->find_best_crq = 1;
1542 atomic_set(&cfqd->ref, 1);
1543
1544 cfqd->cfq_queued = cfq_queued;
1545 cfqd->cfq_quantum = cfq_quantum;
1546 cfqd->cfq_fifo_expire_r = cfq_fifo_expire_r;
1547 cfqd->cfq_fifo_expire_w = cfq_fifo_expire_w;
1548 cfqd->cfq_fifo_batch_expire = cfq_fifo_rate;
1549 cfqd->cfq_back_max = cfq_back_max;
1550 cfqd->cfq_back_penalty = cfq_back_penalty;
1551
1552 return 0;
1553out_crqpool:
1554 kfree(cfqd->cfq_hash);
1555out_cfqhash:
1556 kfree(cfqd->crq_hash);
1557out_crqhash:
1558 kfree(cfqd);
1559 return -ENOMEM;
1560}
1561
1562static void cfq_slab_kill(void)
1563{
1564 if (crq_pool)
1565 kmem_cache_destroy(crq_pool);
1566 if (cfq_pool)
1567 kmem_cache_destroy(cfq_pool);
1568 if (cfq_ioc_pool)
1569 kmem_cache_destroy(cfq_ioc_pool);
1570}
1571
1572static int __init cfq_slab_setup(void)
1573{
1574 crq_pool = kmem_cache_create("crq_pool", sizeof(struct cfq_rq), 0, 0,
1575 NULL, NULL);
1576 if (!crq_pool)
1577 goto fail;
1578
1579 cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0,
1580 NULL, NULL);
1581 if (!cfq_pool)
1582 goto fail;
1583
1584 cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool",
1585 sizeof(struct cfq_io_context), 0, 0, NULL, NULL);
1586 if (!cfq_ioc_pool)
1587 goto fail;
1588
1589 return 0;
1590fail:
1591 cfq_slab_kill();
1592 return -ENOMEM;
1593}
1594
1595
1596/*
1597 * sysfs parts below -->
1598 */
1599struct cfq_fs_entry {
1600 struct attribute attr;
1601 ssize_t (*show)(struct cfq_data *, char *);
1602 ssize_t (*store)(struct cfq_data *, const char *, size_t);
1603};
1604
1605static ssize_t
1606cfq_var_show(unsigned int var, char *page)
1607{
1608 return sprintf(page, "%d\n", var);
1609}
1610
1611static ssize_t
1612cfq_var_store(unsigned int *var, const char *page, size_t count)
1613{
1614 char *p = (char *) page;
1615
1616 *var = simple_strtoul(p, &p, 10);
1617 return count;
1618}
1619
1620static ssize_t
1621cfq_clear_elapsed(struct cfq_data *cfqd, const char *page, size_t count)
1622{
1623 max_elapsed_dispatch = max_elapsed_crq = 0;
1624 return count;
1625}
1626
1627static ssize_t
1628cfq_set_key_type(struct cfq_data *cfqd, const char *page, size_t count)
1629{
1630 spin_lock_irq(cfqd->queue->queue_lock);
1631 if (!strncmp(page, "pgid", 4))
1632 cfqd->key_type = CFQ_KEY_PGID;
1633 else if (!strncmp(page, "tgid", 4))
1634 cfqd->key_type = CFQ_KEY_TGID;
1635 else if (!strncmp(page, "uid", 3))
1636 cfqd->key_type = CFQ_KEY_UID;
1637 else if (!strncmp(page, "gid", 3))
1638 cfqd->key_type = CFQ_KEY_GID;
1639 spin_unlock_irq(cfqd->queue->queue_lock);
1640 return count;
1641}
1642
1643static ssize_t
1644cfq_read_key_type(struct cfq_data *cfqd, char *page)
1645{
1646 ssize_t len = 0;
1647 int i;
1648
1649 for (i = CFQ_KEY_PGID; i < CFQ_KEY_LAST; i++) {
1650 if (cfqd->key_type == i)
1651 len += sprintf(page+len, "[%s] ", cfq_key_types[i]);
1652 else
1653 len += sprintf(page+len, "%s ", cfq_key_types[i]);
1654 }
1655 len += sprintf(page+len, "\n");
1656 return len;
1657}
1658
1659#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
1660static ssize_t __FUNC(struct cfq_data *cfqd, char *page) \
1661{ \
1662 unsigned int __data = __VAR; \
1663 if (__CONV) \
1664 __data = jiffies_to_msecs(__data); \
1665 return cfq_var_show(__data, (page)); \
1666}
1667SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
1668SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0);
1669SHOW_FUNCTION(cfq_fifo_expire_r_show, cfqd->cfq_fifo_expire_r, 1);
1670SHOW_FUNCTION(cfq_fifo_expire_w_show, cfqd->cfq_fifo_expire_w, 1);
1671SHOW_FUNCTION(cfq_fifo_batch_expire_show, cfqd->cfq_fifo_batch_expire, 1);
1672SHOW_FUNCTION(cfq_find_best_show, cfqd->find_best_crq, 0);
1673SHOW_FUNCTION(cfq_back_max_show, cfqd->cfq_back_max, 0);
1674SHOW_FUNCTION(cfq_back_penalty_show, cfqd->cfq_back_penalty, 0);
1675#undef SHOW_FUNCTION
1676
1677#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
1678static ssize_t __FUNC(struct cfq_data *cfqd, const char *page, size_t count) \
1679{ \
1680 unsigned int __data; \
1681 int ret = cfq_var_store(&__data, (page), count); \
1682 if (__data < (MIN)) \
1683 __data = (MIN); \
1684 else if (__data > (MAX)) \
1685 __data = (MAX); \
1686 if (__CONV) \
1687 *(__PTR) = msecs_to_jiffies(__data); \
1688 else \
1689 *(__PTR) = __data; \
1690 return ret; \
1691}
1692STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
1693STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0);
1694STORE_FUNCTION(cfq_fifo_expire_r_store, &cfqd->cfq_fifo_expire_r, 1, UINT_MAX, 1);
1695STORE_FUNCTION(cfq_fifo_expire_w_store, &cfqd->cfq_fifo_expire_w, 1, UINT_MAX, 1);
1696STORE_FUNCTION(cfq_fifo_batch_expire_store, &cfqd->cfq_fifo_batch_expire, 0, UINT_MAX, 1);
1697STORE_FUNCTION(cfq_find_best_store, &cfqd->find_best_crq, 0, 1, 0);
1698STORE_FUNCTION(cfq_back_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
1699STORE_FUNCTION(cfq_back_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
1700#undef STORE_FUNCTION
1701
1702static struct cfq_fs_entry cfq_quantum_entry = {
1703 .attr = {.name = "quantum", .mode = S_IRUGO | S_IWUSR },
1704 .show = cfq_quantum_show,
1705 .store = cfq_quantum_store,
1706};
1707static struct cfq_fs_entry cfq_queued_entry = {
1708 .attr = {.name = "queued", .mode = S_IRUGO | S_IWUSR },
1709 .show = cfq_queued_show,
1710 .store = cfq_queued_store,
1711};
1712static struct cfq_fs_entry cfq_fifo_expire_r_entry = {
1713 .attr = {.name = "fifo_expire_sync", .mode = S_IRUGO | S_IWUSR },
1714 .show = cfq_fifo_expire_r_show,
1715 .store = cfq_fifo_expire_r_store,
1716};
1717static struct cfq_fs_entry cfq_fifo_expire_w_entry = {
1718 .attr = {.name = "fifo_expire_async", .mode = S_IRUGO | S_IWUSR },
1719 .show = cfq_fifo_expire_w_show,
1720 .store = cfq_fifo_expire_w_store,
1721};
1722static struct cfq_fs_entry cfq_fifo_batch_expire_entry = {
1723 .attr = {.name = "fifo_batch_expire", .mode = S_IRUGO | S_IWUSR },
1724 .show = cfq_fifo_batch_expire_show,
1725 .store = cfq_fifo_batch_expire_store,
1726};
1727static struct cfq_fs_entry cfq_find_best_entry = {
1728 .attr = {.name = "find_best_crq", .mode = S_IRUGO | S_IWUSR },
1729 .show = cfq_find_best_show,
1730 .store = cfq_find_best_store,
1731};
1732static struct cfq_fs_entry cfq_back_max_entry = {
1733 .attr = {.name = "back_seek_max", .mode = S_IRUGO | S_IWUSR },
1734 .show = cfq_back_max_show,
1735 .store = cfq_back_max_store,
1736};
1737static struct cfq_fs_entry cfq_back_penalty_entry = {
1738 .attr = {.name = "back_seek_penalty", .mode = S_IRUGO | S_IWUSR },
1739 .show = cfq_back_penalty_show,
1740 .store = cfq_back_penalty_store,
1741};
1742static struct cfq_fs_entry cfq_clear_elapsed_entry = {
1743 .attr = {.name = "clear_elapsed", .mode = S_IWUSR },
1744 .store = cfq_clear_elapsed,
1745};
1746static struct cfq_fs_entry cfq_key_type_entry = {
1747 .attr = {.name = "key_type", .mode = S_IRUGO | S_IWUSR },
1748 .show = cfq_read_key_type,
1749 .store = cfq_set_key_type,
1750};
1751
1752static struct attribute *default_attrs[] = {
1753 &cfq_quantum_entry.attr,
1754 &cfq_queued_entry.attr,
1755 &cfq_fifo_expire_r_entry.attr,
1756 &cfq_fifo_expire_w_entry.attr,
1757 &cfq_fifo_batch_expire_entry.attr,
1758 &cfq_key_type_entry.attr,
1759 &cfq_find_best_entry.attr,
1760 &cfq_back_max_entry.attr,
1761 &cfq_back_penalty_entry.attr,
1762 &cfq_clear_elapsed_entry.attr,
1763 NULL,
1764};
1765
1766#define to_cfq(atr) container_of((atr), struct cfq_fs_entry, attr)
1767
1768static ssize_t
1769cfq_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1770{
1771 elevator_t *e = container_of(kobj, elevator_t, kobj);
1772 struct cfq_fs_entry *entry = to_cfq(attr);
1773
1774 if (!entry->show)
1775 return 0;
1776
1777 return entry->show(e->elevator_data, page);
1778}
1779
1780static ssize_t
1781cfq_attr_store(struct kobject *kobj, struct attribute *attr,
1782 const char *page, size_t length)
1783{
1784 elevator_t *e = container_of(kobj, elevator_t, kobj);
1785 struct cfq_fs_entry *entry = to_cfq(attr);
1786
1787 if (!entry->store)
1788 return -EINVAL;
1789
1790 return entry->store(e->elevator_data, page, length);
1791}
1792
1793static struct sysfs_ops cfq_sysfs_ops = {
1794 .show = cfq_attr_show,
1795 .store = cfq_attr_store,
1796};
1797
1798static struct kobj_type cfq_ktype = {
1799 .sysfs_ops = &cfq_sysfs_ops,
1800 .default_attrs = default_attrs,
1801};
1802
1803static struct elevator_type iosched_cfq = {
1804 .ops = {
1805 .elevator_merge_fn = cfq_merge,
1806 .elevator_merged_fn = cfq_merged_request,
1807 .elevator_merge_req_fn = cfq_merged_requests,
1808 .elevator_next_req_fn = cfq_next_request,
1809 .elevator_add_req_fn = cfq_insert_request,
1810 .elevator_remove_req_fn = cfq_remove_request,
1811 .elevator_requeue_req_fn = cfq_requeue_request,
1812 .elevator_deactivate_req_fn = cfq_deactivate_request,
1813 .elevator_queue_empty_fn = cfq_queue_empty,
1814 .elevator_completed_req_fn = cfq_completed_request,
1815 .elevator_former_req_fn = cfq_former_request,
1816 .elevator_latter_req_fn = cfq_latter_request,
1817 .elevator_set_req_fn = cfq_set_request,
1818 .elevator_put_req_fn = cfq_put_request,
1819 .elevator_may_queue_fn = cfq_may_queue,
1820 .elevator_init_fn = cfq_init_queue,
1821 .elevator_exit_fn = cfq_exit_queue,
1822 },
1823 .elevator_ktype = &cfq_ktype,
1824 .elevator_name = "cfq",
1825 .elevator_owner = THIS_MODULE,
1826};
1827
1828static int __init cfq_init(void)
1829{
1830 int ret;
1831
1832 if (cfq_slab_setup())
1833 return -ENOMEM;
1834
1835 ret = elv_register(&iosched_cfq);
1836 if (!ret) {
1837 __module_get(THIS_MODULE);
1838 return 0;
1839 }
1840
1841 cfq_slab_kill();
1842 return ret;
1843}
1844
1845static void __exit cfq_exit(void)
1846{
1847 cfq_slab_kill();
1848 elv_unregister(&iosched_cfq);
1849}
1850
1851module_init(cfq_init);
1852module_exit(cfq_exit);
1853
1854MODULE_AUTHOR("Jens Axboe");
1855MODULE_LICENSE("GPL");
1856MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
new file mode 100644
index 000000000000..cf1822a6361c
--- /dev/null
+++ b/drivers/block/cpqarray.c
@@ -0,0 +1,1850 @@
1/*
2 * Disk Array driver for Compaq SMART2 Controllers
3 * Copyright 1998 Compaq Computer Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 */
22#include <linux/config.h> /* CONFIG_PROC_FS */
23#include <linux/module.h>
24#include <linux/types.h>
25#include <linux/pci.h>
26#include <linux/bio.h>
27#include <linux/interrupt.h>
28#include <linux/kernel.h>
29#include <linux/slab.h>
30#include <linux/delay.h>
31#include <linux/major.h>
32#include <linux/fs.h>
33#include <linux/blkpg.h>
34#include <linux/timer.h>
35#include <linux/proc_fs.h>
36#include <linux/devfs_fs_kernel.h>
37#include <linux/init.h>
38#include <linux/hdreg.h>
39#include <linux/spinlock.h>
40#include <linux/blkdev.h>
41#include <linux/genhd.h>
42#include <asm/uaccess.h>
43#include <asm/io.h>
44
45
46#define SMART2_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
47
48#define DRIVER_NAME "Compaq SMART2 Driver (v 2.6.0)"
49#define DRIVER_VERSION SMART2_DRIVER_VERSION(2,6,0)
50
51/* Embedded module documentation macros - see modules.h */
52/* Original author Chris Frantz - Compaq Computer Corporation */
53MODULE_AUTHOR("Compaq Computer Corporation");
54MODULE_DESCRIPTION("Driver for Compaq Smart2 Array Controllers version 2.6.0");
55MODULE_LICENSE("GPL");
56
57#include "cpqarray.h"
58#include "ida_cmd.h"
59#include "smart1,2.h"
60#include "ida_ioctl.h"
61
62#define READ_AHEAD 128
63#define NR_CMDS 128 /* This could probably go as high as ~400 */
64
65#define MAX_CTLR 8
66#define CTLR_SHIFT 8
67
68#define CPQARRAY_DMA_MASK 0xFFFFFFFF /* 32 bit DMA */
69
70static int nr_ctlr;
71static ctlr_info_t *hba[MAX_CTLR];
72
73static int eisa[8];
74
75#define NR_PRODUCTS (sizeof(products)/sizeof(struct board_type))
76
77/* board_id = Subsystem Device ID & Vendor ID
78 * product = Marketing Name for the board
79 * access = Address of the struct of function pointers
80 */
81static struct board_type products[] = {
82 { 0x0040110E, "IDA", &smart1_access },
83 { 0x0140110E, "IDA-2", &smart1_access },
84 { 0x1040110E, "IAES", &smart1_access },
85 { 0x2040110E, "SMART", &smart1_access },
86 { 0x3040110E, "SMART-2/E", &smart2e_access },
87 { 0x40300E11, "SMART-2/P", &smart2_access },
88 { 0x40310E11, "SMART-2SL", &smart2_access },
89 { 0x40320E11, "Smart Array 3200", &smart2_access },
90 { 0x40330E11, "Smart Array 3100ES", &smart2_access },
91 { 0x40340E11, "Smart Array 221", &smart2_access },
92 { 0x40400E11, "Integrated Array", &smart4_access },
93 { 0x40480E11, "Compaq Raid LC2", &smart4_access },
94 { 0x40500E11, "Smart Array 4200", &smart4_access },
95 { 0x40510E11, "Smart Array 4250ES", &smart4_access },
96 { 0x40580E11, "Smart Array 431", &smart4_access },
97};
98
99/* define the PCI info for the PCI cards this driver can control */
100static const struct pci_device_id cpqarray_pci_device_id[] =
101{
102 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
103 0x0E11, 0x4058, 0, 0, 0}, /* SA431 */
104 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
105 0x0E11, 0x4051, 0, 0, 0}, /* SA4250ES */
106 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
107 0x0E11, 0x4050, 0, 0, 0}, /* SA4200 */
108 { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
109 0x0E11, 0x4048, 0, 0, 0}, /* LC2 */
110 { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
111 0x0E11, 0x4040, 0, 0, 0}, /* Integrated Array */
112 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
113 0x0E11, 0x4034, 0, 0, 0}, /* SA 221 */
114 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
115 0x0E11, 0x4033, 0, 0, 0}, /* SA 3100ES*/
116 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
117 0x0E11, 0x4032, 0, 0, 0}, /* SA 3200*/
118 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
119 0x0E11, 0x4031, 0, 0, 0}, /* SA 2SL*/
120 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
121 0x0E11, 0x4030, 0, 0, 0}, /* SA 2P */
122 { 0 }
123};
124
125MODULE_DEVICE_TABLE(pci, cpqarray_pci_device_id);
126
127static struct gendisk *ida_gendisk[MAX_CTLR][NWD];
128
129/* Debug... */
130#define DBG(s) do { s } while(0)
131/* Debug (general info)... */
132#define DBGINFO(s) do { } while(0)
133/* Debug Paranoid... */
134#define DBGP(s) do { } while(0)
135/* Debug Extra Paranoid... */
136#define DBGPX(s) do { } while(0)
137
138static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev);
139static void __iomem *remap_pci_mem(ulong base, ulong size);
140static int cpqarray_eisa_detect(void);
141static int pollcomplete(int ctlr);
142static void getgeometry(int ctlr);
143static void start_fwbk(int ctlr);
144
145static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool);
146static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool);
147
148static void free_hba(int i);
149static int alloc_cpqarray_hba(void);
150
151static int sendcmd(
152 __u8 cmd,
153 int ctlr,
154 void *buff,
155 size_t size,
156 unsigned int blk,
157 unsigned int blkcnt,
158 unsigned int log_unit );
159
160static int ida_open(struct inode *inode, struct file *filep);
161static int ida_release(struct inode *inode, struct file *filep);
162static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg);
163static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io);
164
165static void do_ida_request(request_queue_t *q);
166static void start_io(ctlr_info_t *h);
167
168static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
169static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c);
170static inline void complete_buffers(struct bio *bio, int ok);
171static inline void complete_command(cmdlist_t *cmd, int timeout);
172
173static irqreturn_t do_ida_intr(int irq, void *dev_id, struct pt_regs * regs);
174static void ida_timer(unsigned long tdata);
175static int ida_revalidate(struct gendisk *disk);
176static int revalidate_allvol(ctlr_info_t *host);
177static int cpqarray_register_ctlr(int ctlr, struct pci_dev *pdev);
178
179#ifdef CONFIG_PROC_FS
180static void ida_procinit(int i);
181static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
182#else
183static void ida_procinit(int i) {}
184#endif
185
186static inline drv_info_t *get_drv(struct gendisk *disk)
187{
188 return disk->private_data;
189}
190
191static inline ctlr_info_t *get_host(struct gendisk *disk)
192{
193 return disk->queue->queuedata;
194}
195
196
197static struct block_device_operations ida_fops = {
198 .owner = THIS_MODULE,
199 .open = ida_open,
200 .release = ida_release,
201 .ioctl = ida_ioctl,
202 .revalidate_disk= ida_revalidate,
203};
204
205
206#ifdef CONFIG_PROC_FS
207
208static struct proc_dir_entry *proc_array;
209
210/*
211 * Get us a file in /proc/array that says something about each controller.
212 * Create /proc/array if it doesn't exist yet.
213 */
214static void __init ida_procinit(int i)
215{
216 if (proc_array == NULL) {
217 proc_array = proc_mkdir("cpqarray", proc_root_driver);
218 if (!proc_array) return;
219 }
220
221 create_proc_read_entry(hba[i]->devname, 0, proc_array,
222 ida_proc_get_info, hba[i]);
223}
224
225/*
226 * Report information about this controller.
227 */
228static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
229{
230 off_t pos = 0;
231 off_t len = 0;
232 int size, i, ctlr;
233 ctlr_info_t *h = (ctlr_info_t*)data;
234 drv_info_t *drv;
235#ifdef CPQ_PROC_PRINT_QUEUES
236 cmdlist_t *c;
237 unsigned long flags;
238#endif
239
240 ctlr = h->ctlr;
241 size = sprintf(buffer, "%s: Compaq %s Controller\n"
242 " Board ID: 0x%08lx\n"
243 " Firmware Revision: %c%c%c%c\n"
244 " Controller Sig: 0x%08lx\n"
245 " Memory Address: 0x%08lx\n"
246 " I/O Port: 0x%04x\n"
247 " IRQ: %d\n"
248 " Logical drives: %d\n"
249 " Physical drives: %d\n\n"
250 " Current Q depth: %d\n"
251 " Max Q depth since init: %d\n\n",
252 h->devname,
253 h->product_name,
254 (unsigned long)h->board_id,
255 h->firm_rev[0], h->firm_rev[1], h->firm_rev[2], h->firm_rev[3],
256 (unsigned long)h->ctlr_sig, (unsigned long)h->vaddr,
257 (unsigned int) h->io_mem_addr, (unsigned int)h->intr,
258 h->log_drives, h->phys_drives,
259 h->Qdepth, h->maxQsinceinit);
260
261 pos += size; len += size;
262
263 size = sprintf(buffer+len, "Logical Drive Info:\n");
264 pos += size; len += size;
265
266 for(i=0; i<h->log_drives; i++) {
267 drv = &h->drv[i];
268 size = sprintf(buffer+len, "ida/c%dd%d: blksz=%d nr_blks=%d\n",
269 ctlr, i, drv->blk_size, drv->nr_blks);
270 pos += size; len += size;
271 }
272
273#ifdef CPQ_PROC_PRINT_QUEUES
274 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
275 size = sprintf(buffer+len, "\nCurrent Queues:\n");
276 pos += size; len += size;
277
278 c = h->reqQ;
279 size = sprintf(buffer+len, "reqQ = %p", c); pos += size; len += size;
280 if (c) c=c->next;
281 while(c && c != h->reqQ) {
282 size = sprintf(buffer+len, "->%p", c);
283 pos += size; len += size;
284 c=c->next;
285 }
286
287 c = h->cmpQ;
288 size = sprintf(buffer+len, "\ncmpQ = %p", c); pos += size; len += size;
289 if (c) c=c->next;
290 while(c && c != h->cmpQ) {
291 size = sprintf(buffer+len, "->%p", c);
292 pos += size; len += size;
293 c=c->next;
294 }
295
296 size = sprintf(buffer+len, "\n"); pos += size; len += size;
297 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
298#endif
299 size = sprintf(buffer+len, "nr_allocs = %d\nnr_frees = %d\n",
300 h->nr_allocs, h->nr_frees);
301 pos += size; len += size;
302
303 *eof = 1;
304 *start = buffer+offset;
305 len -= offset;
306 if (len>length)
307 len = length;
308 return len;
309}
310#endif /* CONFIG_PROC_FS */
311
312module_param_array(eisa, int, NULL, 0);
313
314static void release_io_mem(ctlr_info_t *c)
315{
316 /* if IO mem was not protected do nothing */
317 if( c->io_mem_addr == 0)
318 return;
319 release_region(c->io_mem_addr, c->io_mem_length);
320 c->io_mem_addr = 0;
321 c->io_mem_length = 0;
322}
323
324static void __devexit cpqarray_remove_one(int i)
325{
326 int j;
327 char buff[4];
328
329 /* sendcmd will turn off interrupt, and send the flush...
330 * To write all data in the battery backed cache to disks
331 * no data returned, but don't want to send NULL to sendcmd */
332 if( sendcmd(FLUSH_CACHE, i, buff, 4, 0, 0, 0))
333 {
334 printk(KERN_WARNING "Unable to flush cache on controller %d\n",
335 i);
336 }
337 free_irq(hba[i]->intr, hba[i]);
338 iounmap(hba[i]->vaddr);
339 unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
340 del_timer(&hba[i]->timer);
341 remove_proc_entry(hba[i]->devname, proc_array);
342 pci_free_consistent(hba[i]->pci_dev,
343 NR_CMDS * sizeof(cmdlist_t), (hba[i]->cmd_pool),
344 hba[i]->cmd_pool_dhandle);
345 kfree(hba[i]->cmd_pool_bits);
346 for(j = 0; j < NWD; j++) {
347 if (ida_gendisk[i][j]->flags & GENHD_FL_UP)
348 del_gendisk(ida_gendisk[i][j]);
349 devfs_remove("ida/c%dd%d",i,j);
350 put_disk(ida_gendisk[i][j]);
351 }
352 blk_cleanup_queue(hba[i]->queue);
353 release_io_mem(hba[i]);
354 free_hba(i);
355}
356
357static void __devexit cpqarray_remove_one_pci (struct pci_dev *pdev)
358{
359 int i;
360 ctlr_info_t *tmp_ptr;
361
362 if (pci_get_drvdata(pdev) == NULL) {
363 printk( KERN_ERR "cpqarray: Unable to remove device \n");
364 return;
365 }
366
367 tmp_ptr = pci_get_drvdata(pdev);
368 i = tmp_ptr->ctlr;
369 if (hba[i] == NULL) {
370 printk(KERN_ERR "cpqarray: controller %d appears to have"
371 "already been removed \n", i);
372 return;
373 }
374 pci_set_drvdata(pdev, NULL);
375
376 cpqarray_remove_one(i);
377}
378
379/* removing an instance that was not removed automatically..
380 * must be an eisa card.
381 */
382static void __devexit cpqarray_remove_one_eisa (int i)
383{
384 if (hba[i] == NULL) {
385 printk(KERN_ERR "cpqarray: controller %d appears to have"
386 "already been removed \n", i);
387 return;
388 }
389 cpqarray_remove_one(i);
390}
391
392/* pdev is NULL for eisa */
393static int cpqarray_register_ctlr( int i, struct pci_dev *pdev)
394{
395 request_queue_t *q;
396 int j;
397
398 /*
399 * register block devices
400 * Find disks and fill in structs
401 * Get an interrupt, set the Q depth and get into /proc
402 */
403
404 /* If this successful it should insure that we are the only */
405 /* instance of the driver */
406 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
407 goto Enomem4;
408 }
409 hba[i]->access.set_intr_mask(hba[i], 0);
410 if (request_irq(hba[i]->intr, do_ida_intr,
411 SA_INTERRUPT|SA_SHIRQ|SA_SAMPLE_RANDOM,
412 hba[i]->devname, hba[i]))
413 {
414 printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n",
415 hba[i]->intr, hba[i]->devname);
416 goto Enomem3;
417 }
418
419 for (j=0; j<NWD; j++) {
420 ida_gendisk[i][j] = alloc_disk(1 << NWD_SHIFT);
421 if (!ida_gendisk[i][j])
422 goto Enomem2;
423 }
424
425 hba[i]->cmd_pool = (cmdlist_t *)pci_alloc_consistent(
426 hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t),
427 &(hba[i]->cmd_pool_dhandle));
428 hba[i]->cmd_pool_bits = kmalloc(
429 ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long),
430 GFP_KERNEL);
431
432 if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool)
433 goto Enomem1;
434
435 memset(hba[i]->cmd_pool, 0, NR_CMDS * sizeof(cmdlist_t));
436 memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long));
437 printk(KERN_INFO "cpqarray: Finding drives on %s",
438 hba[i]->devname);
439
440 spin_lock_init(&hba[i]->lock);
441 q = blk_init_queue(do_ida_request, &hba[i]->lock);
442 if (!q)
443 goto Enomem1;
444
445 hba[i]->queue = q;
446 q->queuedata = hba[i];
447
448 getgeometry(i);
449 start_fwbk(i);
450
451 ida_procinit(i);
452
453 if (pdev)
454 blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);
455
456 /* This is a hardware imposed limit. */
457 blk_queue_max_hw_segments(q, SG_MAX);
458
459 /* This is a driver limit and could be eliminated. */
460 blk_queue_max_phys_segments(q, SG_MAX);
461
462 init_timer(&hba[i]->timer);
463 hba[i]->timer.expires = jiffies + IDA_TIMER;
464 hba[i]->timer.data = (unsigned long)hba[i];
465 hba[i]->timer.function = ida_timer;
466 add_timer(&hba[i]->timer);
467
468 /* Enable IRQ now that spinlock and rate limit timer are set up */
469 hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
470
471 for(j=0; j<NWD; j++) {
472 struct gendisk *disk = ida_gendisk[i][j];
473 drv_info_t *drv = &hba[i]->drv[j];
474 sprintf(disk->disk_name, "ida/c%dd%d", i, j);
475 disk->major = COMPAQ_SMART2_MAJOR + i;
476 disk->first_minor = j<<NWD_SHIFT;
477 disk->fops = &ida_fops;
478 if (j && !drv->nr_blks)
479 continue;
480 blk_queue_hardsect_size(hba[i]->queue, drv->blk_size);
481 set_capacity(disk, drv->nr_blks);
482 disk->queue = hba[i]->queue;
483 disk->private_data = drv;
484 add_disk(disk);
485 }
486
487 /* done ! */
488 return(i);
489
490Enomem1:
491 nr_ctlr = i;
492 kfree(hba[i]->cmd_pool_bits);
493 if (hba[i]->cmd_pool)
494 pci_free_consistent(hba[i]->pci_dev, NR_CMDS*sizeof(cmdlist_t),
495 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
496Enomem2:
497 while (j--) {
498 put_disk(ida_gendisk[i][j]);
499 ida_gendisk[i][j] = NULL;
500 }
501 free_irq(hba[i]->intr, hba[i]);
502Enomem3:
503 unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
504Enomem4:
505 if (pdev)
506 pci_set_drvdata(pdev, NULL);
507 release_io_mem(hba[i]);
508 free_hba(i);
509
510 printk( KERN_ERR "cpqarray: out of memory");
511
512 return -1;
513}
514
515static int __init cpqarray_init_one( struct pci_dev *pdev,
516 const struct pci_device_id *ent)
517{
518 int i;
519
520 printk(KERN_DEBUG "cpqarray: Device 0x%x has been found at"
521 " bus %d dev %d func %d\n",
522 pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
523 PCI_FUNC(pdev->devfn));
524 i = alloc_cpqarray_hba();
525 if( i < 0 )
526 return (-1);
527 memset(hba[i], 0, sizeof(ctlr_info_t));
528 sprintf(hba[i]->devname, "ida%d", i);
529 hba[i]->ctlr = i;
530 /* Initialize the pdev driver private data */
531 pci_set_drvdata(pdev, hba[i]);
532
533 if (cpqarray_pci_init(hba[i], pdev) != 0) {
534 pci_set_drvdata(pdev, NULL);
535 release_io_mem(hba[i]);
536 free_hba(i);
537 return -1;
538 }
539
540 return (cpqarray_register_ctlr(i, pdev));
541}
542
543static struct pci_driver cpqarray_pci_driver = {
544 .name = "cpqarray",
545 .probe = cpqarray_init_one,
546 .remove = __devexit_p(cpqarray_remove_one_pci),
547 .id_table = cpqarray_pci_device_id,
548};
549
550/*
551 * This is it. Find all the controllers and register them.
552 * returns the number of block devices registered.
553 */
554static int __init cpqarray_init(void)
555{
556 int num_cntlrs_reg = 0;
557 int i;
558 int rc = 0;
559
560 /* detect controllers */
561 printk(DRIVER_NAME "\n");
562
563 rc = pci_register_driver(&cpqarray_pci_driver);
564 if (rc)
565 return rc;
566 cpqarray_eisa_detect();
567
568 for (i=0; i < MAX_CTLR; i++) {
569 if (hba[i] != NULL)
570 num_cntlrs_reg++;
571 }
572
573 return(num_cntlrs_reg);
574}
575
576/* Function to find the first free pointer into our hba[] array */
577/* Returns -1 if no free entries are left. */
578static int alloc_cpqarray_hba(void)
579{
580 int i;
581
582 for(i=0; i< MAX_CTLR; i++) {
583 if (hba[i] == NULL) {
584 hba[i] = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
585 if(hba[i]==NULL) {
586 printk(KERN_ERR "cpqarray: out of memory.\n");
587 return (-1);
588 }
589 return (i);
590 }
591 }
592 printk(KERN_WARNING "cpqarray: This driver supports a maximum"
593 " of 8 controllers.\n");
594 return(-1);
595}
596
597static void free_hba(int i)
598{
599 kfree(hba[i]);
600 hba[i]=NULL;
601}
602
603/*
604 * Find the IO address of the controller, its IRQ and so forth. Fill
605 * in some basic stuff into the ctlr_info_t structure.
606 */
607static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
608{
609 ushort vendor_id, device_id, command;
610 unchar cache_line_size, latency_timer;
611 unchar irq, revision;
612 unsigned long addr[6];
613 __u32 board_id;
614
615 int i;
616
617 c->pci_dev = pdev;
618 if (pci_enable_device(pdev)) {
619 printk(KERN_ERR "cpqarray: Unable to Enable PCI device\n");
620 return -1;
621 }
622 vendor_id = pdev->vendor;
623 device_id = pdev->device;
624 irq = pdev->irq;
625
626 for(i=0; i<6; i++)
627 addr[i] = pci_resource_start(pdev, i);
628
629 if (pci_set_dma_mask(pdev, CPQARRAY_DMA_MASK) != 0)
630 {
631 printk(KERN_ERR "cpqarray: Unable to set DMA mask\n");
632 return -1;
633 }
634
635 pci_read_config_word(pdev, PCI_COMMAND, &command);
636 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
637 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size);
638 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer);
639
640 pci_read_config_dword(pdev, 0x2c, &board_id);
641
642 /* check to see if controller has been disabled */
643 if(!(command & 0x02)) {
644 printk(KERN_WARNING
645 "cpqarray: controller appears to be disabled\n");
646 return(-1);
647 }
648
649DBGINFO(
650 printk("vendor_id = %x\n", vendor_id);
651 printk("device_id = %x\n", device_id);
652 printk("command = %x\n", command);
653 for(i=0; i<6; i++)
654 printk("addr[%d] = %lx\n", i, addr[i]);
655 printk("revision = %x\n", revision);
656 printk("irq = %x\n", irq);
657 printk("cache_line_size = %x\n", cache_line_size);
658 printk("latency_timer = %x\n", latency_timer);
659 printk("board_id = %x\n", board_id);
660);
661
662 c->intr = irq;
663
664 for(i=0; i<6; i++) {
665 if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO)
666 { /* IO space */
667 c->io_mem_addr = addr[i];
668 c->io_mem_length = pci_resource_end(pdev, i)
669 - pci_resource_start(pdev, i) + 1;
670 if(!request_region( c->io_mem_addr, c->io_mem_length,
671 "cpqarray"))
672 {
673 printk( KERN_WARNING "cpqarray I/O memory range already in use addr %lx length = %ld\n", c->io_mem_addr, c->io_mem_length);
674 c->io_mem_addr = 0;
675 c->io_mem_length = 0;
676 }
677 break;
678 }
679 }
680
681 c->paddr = 0;
682 for(i=0; i<6; i++)
683 if (!(pci_resource_flags(pdev, i) &
684 PCI_BASE_ADDRESS_SPACE_IO)) {
685 c->paddr = pci_resource_start (pdev, i);
686 break;
687 }
688 if (!c->paddr)
689 return -1;
690 c->vaddr = remap_pci_mem(c->paddr, 128);
691 if (!c->vaddr)
692 return -1;
693 c->board_id = board_id;
694
695 for(i=0; i<NR_PRODUCTS; i++) {
696 if (board_id == products[i].board_id) {
697 c->product_name = products[i].product_name;
698 c->access = *(products[i].access);
699 break;
700 }
701 }
702 if (i == NR_PRODUCTS) {
703 printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
704 " to access the SMART Array controller %08lx\n",
705 (unsigned long)board_id);
706 return -1;
707 }
708
709 return 0;
710}
711
712/*
713 * Map (physical) PCI mem into (virtual) kernel space
714 */
715static void __iomem *remap_pci_mem(ulong base, ulong size)
716{
717 ulong page_base = ((ulong) base) & PAGE_MASK;
718 ulong page_offs = ((ulong) base) - page_base;
719 void __iomem *page_remapped = ioremap(page_base, page_offs+size);
720
721 return (page_remapped ? (page_remapped + page_offs) : NULL);
722}
723
724#ifndef MODULE
725/*
726 * Config string is a comma separated set of i/o addresses of EISA cards.
727 */
728static int cpqarray_setup(char *str)
729{
730 int i, ints[9];
731
732 (void)get_options(str, ARRAY_SIZE(ints), ints);
733
734 for(i=0; i<ints[0] && i<8; i++)
735 eisa[i] = ints[i+1];
736 return 1;
737}
738
739__setup("smart2=", cpqarray_setup);
740
741#endif
742
743/*
744 * Find an EISA controller's signature. Set up an hba if we find it.
745 */
746static int cpqarray_eisa_detect(void)
747{
748 int i=0, j;
749 __u32 board_id;
750 int intr;
751 int ctlr;
752 int num_ctlr = 0;
753
754 while(i<8 && eisa[i]) {
755 ctlr = alloc_cpqarray_hba();
756 if(ctlr == -1)
757 break;
758 board_id = inl(eisa[i]+0xC80);
759 for(j=0; j < NR_PRODUCTS; j++)
760 if (board_id == products[j].board_id)
761 break;
762
763 if (j == NR_PRODUCTS) {
764 printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
765 " to access the SMART Array controller %08lx\n", (unsigned long)board_id);
766 continue;
767 }
768
769 memset(hba[ctlr], 0, sizeof(ctlr_info_t));
770 hba[ctlr]->io_mem_addr = eisa[i];
771 hba[ctlr]->io_mem_length = 0x7FF;
772 if(!request_region(hba[ctlr]->io_mem_addr,
773 hba[ctlr]->io_mem_length,
774 "cpqarray"))
775 {
776 printk(KERN_WARNING "cpqarray: I/O range already in "
777 "use addr = %lx length = %ld\n",
778 hba[ctlr]->io_mem_addr,
779 hba[ctlr]->io_mem_length);
780 free_hba(ctlr);
781 continue;
782 }
783
784 /*
785 * Read the config register to find our interrupt
786 */
787 intr = inb(eisa[i]+0xCC0) >> 4;
788 if (intr & 1) intr = 11;
789 else if (intr & 2) intr = 10;
790 else if (intr & 4) intr = 14;
791 else if (intr & 8) intr = 15;
792
793 hba[ctlr]->intr = intr;
794 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
795 hba[ctlr]->product_name = products[j].product_name;
796 hba[ctlr]->access = *(products[j].access);
797 hba[ctlr]->ctlr = ctlr;
798 hba[ctlr]->board_id = board_id;
799 hba[ctlr]->pci_dev = NULL; /* not PCI */
800
801DBGINFO(
802 printk("i = %d, j = %d\n", i, j);
803 printk("irq = %x\n", intr);
804 printk("product name = %s\n", products[j].product_name);
805 printk("board_id = %x\n", board_id);
806);
807
808 num_ctlr++;
809 i++;
810
811 if (cpqarray_register_ctlr(ctlr, NULL) == -1)
812 printk(KERN_WARNING
813 "cpqarray: Can't register EISA controller %d\n",
814 ctlr);
815
816 }
817
818 return num_ctlr;
819}
820
821/*
822 * Open. Make sure the device is really there.
823 */
824static int ida_open(struct inode *inode, struct file *filep)
825{
826 drv_info_t *drv = get_drv(inode->i_bdev->bd_disk);
827 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
828
829 DBGINFO(printk("ida_open %s\n", inode->i_bdev->bd_disk->disk_name));
830 /*
831 * Root is allowed to open raw volume zero even if it's not configured
832 * so array config can still work. I don't think I really like this,
833 * but I'm already using way to many device nodes to claim another one
834 * for "raw controller".
835 */
836 if (!drv->nr_blks) {
837 if (!capable(CAP_SYS_RAWIO))
838 return -ENXIO;
839 if (!capable(CAP_SYS_ADMIN) && drv != host->drv)
840 return -ENXIO;
841 }
842 host->usage_count++;
843 return 0;
844}
845
846/*
847 * Close. Sync first.
848 */
849static int ida_release(struct inode *inode, struct file *filep)
850{
851 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
852 host->usage_count--;
853 return 0;
854}
855
856/*
857 * Enqueuing and dequeuing functions for cmdlists.
858 */
859static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c)
860{
861 if (*Qptr == NULL) {
862 *Qptr = c;
863 c->next = c->prev = c;
864 } else {
865 c->prev = (*Qptr)->prev;
866 c->next = (*Qptr);
867 (*Qptr)->prev->next = c;
868 (*Qptr)->prev = c;
869 }
870}
871
872static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c)
873{
874 if (c && c->next != c) {
875 if (*Qptr == c) *Qptr = c->next;
876 c->prev->next = c->next;
877 c->next->prev = c->prev;
878 } else {
879 *Qptr = NULL;
880 }
881 return c;
882}
883
884/*
885 * Get a request and submit it to the controller.
886 * This routine needs to grab all the requests it possibly can from the
887 * req Q and submit them. Interrupts are off (and need to be off) when you
888 * are in here (either via the dummy do_ida_request functions or by being
889 * called from the interrupt handler
890 */
891static void do_ida_request(request_queue_t *q)
892{
893 ctlr_info_t *h = q->queuedata;
894 cmdlist_t *c;
895 struct request *creq;
896 struct scatterlist tmp_sg[SG_MAX];
897 int i, dir, seg;
898
899 if (blk_queue_plugged(q))
900 goto startio;
901
902queue_next:
903 creq = elv_next_request(q);
904 if (!creq)
905 goto startio;
906
907 if (creq->nr_phys_segments > SG_MAX)
908 BUG();
909
910 if ((c = cmd_alloc(h,1)) == NULL)
911 goto startio;
912
913 blkdev_dequeue_request(creq);
914
915 c->ctlr = h->ctlr;
916 c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv;
917 c->hdr.size = sizeof(rblk_t) >> 2;
918 c->size += sizeof(rblk_t);
919
920 c->req.hdr.blk = creq->sector;
921 c->rq = creq;
922DBGPX(
923 printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors);
924);
925 seg = blk_rq_map_sg(q, creq, tmp_sg);
926
927 /* Now do all the DMA Mappings */
928 if (rq_data_dir(creq) == READ)
929 dir = PCI_DMA_FROMDEVICE;
930 else
931 dir = PCI_DMA_TODEVICE;
932 for( i=0; i < seg; i++)
933 {
934 c->req.sg[i].size = tmp_sg[i].length;
935 c->req.sg[i].addr = (__u32) pci_map_page(h->pci_dev,
936 tmp_sg[i].page,
937 tmp_sg[i].offset,
938 tmp_sg[i].length, dir);
939 }
940DBGPX( printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); );
941 c->req.hdr.sg_cnt = seg;
942 c->req.hdr.blk_cnt = creq->nr_sectors;
943 c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
944 c->type = CMD_RWREQ;
945
946 /* Put the request on the tail of the request queue */
947 addQ(&h->reqQ, c);
948 h->Qdepth++;
949 if (h->Qdepth > h->maxQsinceinit)
950 h->maxQsinceinit = h->Qdepth;
951
952 goto queue_next;
953
954startio:
955 start_io(h);
956}
957
958/*
959 * start_io submits everything on a controller's request queue
960 * and moves it to the completion queue.
961 *
962 * Interrupts had better be off if you're in here
963 */
964static void start_io(ctlr_info_t *h)
965{
966 cmdlist_t *c;
967
968 while((c = h->reqQ) != NULL) {
969 /* Can't do anything if we're busy */
970 if (h->access.fifo_full(h) == 0)
971 return;
972
973 /* Get the first entry from the request Q */
974 removeQ(&h->reqQ, c);
975 h->Qdepth--;
976
977 /* Tell the controller to do our bidding */
978 h->access.submit_command(h, c);
979
980 /* Get onto the completion Q */
981 addQ(&h->cmpQ, c);
982 }
983}
984
985static inline void complete_buffers(struct bio *bio, int ok)
986{
987 struct bio *xbh;
988 while(bio) {
989 int nr_sectors = bio_sectors(bio);
990
991 xbh = bio->bi_next;
992 bio->bi_next = NULL;
993
994 blk_finished_io(nr_sectors);
995 bio_endio(bio, nr_sectors << 9, ok ? 0 : -EIO);
996
997 bio = xbh;
998 }
999}
1000/*
1001 * Mark all buffers that cmd was responsible for
1002 */
1003static inline void complete_command(cmdlist_t *cmd, int timeout)
1004{
1005 int ok=1;
1006 int i, ddir;
1007
1008 if (cmd->req.hdr.rcode & RCODE_NONFATAL &&
1009 (hba[cmd->ctlr]->misc_tflags & MISC_NONFATAL_WARN) == 0) {
1010 printk(KERN_NOTICE "Non Fatal error on ida/c%dd%d\n",
1011 cmd->ctlr, cmd->hdr.unit);
1012 hba[cmd->ctlr]->misc_tflags |= MISC_NONFATAL_WARN;
1013 }
1014 if (cmd->req.hdr.rcode & RCODE_FATAL) {
1015 printk(KERN_WARNING "Fatal error on ida/c%dd%d\n",
1016 cmd->ctlr, cmd->hdr.unit);
1017 ok = 0;
1018 }
1019 if (cmd->req.hdr.rcode & RCODE_INVREQ) {
1020 printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n",
1021 cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd,
1022 cmd->req.hdr.blk, cmd->req.hdr.blk_cnt,
1023 cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode);
1024 ok = 0;
1025 }
1026 if (timeout) ok = 0;
1027 /* unmap the DMA mapping for all the scatter gather elements */
1028 if (cmd->req.hdr.cmd == IDA_READ)
1029 ddir = PCI_DMA_FROMDEVICE;
1030 else
1031 ddir = PCI_DMA_TODEVICE;
1032 for(i=0; i<cmd->req.hdr.sg_cnt; i++)
1033 pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr,
1034 cmd->req.sg[i].size, ddir);
1035
1036 complete_buffers(cmd->rq->bio, ok);
1037
1038 DBGPX(printk("Done with %p\n", cmd->rq););
1039 end_that_request_last(cmd->rq);
1040}
1041
1042/*
1043 * The controller will interrupt us upon completion of commands.
1044 * Find the command on the completion queue, remove it, tell the OS and
1045 * try to queue up more IO
1046 */
1047static irqreturn_t do_ida_intr(int irq, void *dev_id, struct pt_regs *regs)
1048{
1049 ctlr_info_t *h = dev_id;
1050 cmdlist_t *c;
1051 unsigned long istat;
1052 unsigned long flags;
1053 __u32 a,a1;
1054
1055 istat = h->access.intr_pending(h);
1056 /* Is this interrupt for us? */
1057 if (istat == 0)
1058 return IRQ_NONE;
1059
1060 /*
1061 * If there are completed commands in the completion queue,
1062 * we had better do something about it.
1063 */
1064 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
1065 if (istat & FIFO_NOT_EMPTY) {
1066 while((a = h->access.command_completed(h))) {
1067 a1 = a; a &= ~3;
1068 if ((c = h->cmpQ) == NULL)
1069 {
1070 printk(KERN_WARNING "cpqarray: Completion of %08lx ignored\n", (unsigned long)a1);
1071 continue;
1072 }
1073 while(c->busaddr != a) {
1074 c = c->next;
1075 if (c == h->cmpQ)
1076 break;
1077 }
1078 /*
1079 * If we've found the command, take it off the
1080 * completion Q and free it
1081 */
1082 if (c->busaddr == a) {
1083 removeQ(&h->cmpQ, c);
1084 /* Check for invalid command.
1085 * Controller returns command error,
1086 * But rcode = 0.
1087 */
1088
1089 if((a1 & 0x03) && (c->req.hdr.rcode == 0))
1090 {
1091 c->req.hdr.rcode = RCODE_INVREQ;
1092 }
1093 if (c->type == CMD_RWREQ) {
1094 complete_command(c, 0);
1095 cmd_free(h, c, 1);
1096 } else if (c->type == CMD_IOCTL_PEND) {
1097 c->type = CMD_IOCTL_DONE;
1098 }
1099 continue;
1100 }
1101 }
1102 }
1103
1104 /*
1105 * See if we can queue up some more IO
1106 */
1107 do_ida_request(h->queue);
1108 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
1109 return IRQ_HANDLED;
1110}
1111
1112/*
1113 * This timer was for timing out requests that haven't happened after
1114 * IDA_TIMEOUT. That wasn't such a good idea. This timer is used to
1115 * reset a flags structure so we don't flood the user with
1116 * "Non-Fatal error" messages.
1117 */
1118static void ida_timer(unsigned long tdata)
1119{
1120 ctlr_info_t *h = (ctlr_info_t*)tdata;
1121
1122 h->timer.expires = jiffies + IDA_TIMER;
1123 add_timer(&h->timer);
1124 h->misc_tflags = 0;
1125}
1126
1127/*
1128 * ida_ioctl does some miscellaneous stuff like reporting drive geometry,
1129 * setting readahead and submitting commands from userspace to the controller.
1130 */
1131static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg)
1132{
1133 drv_info_t *drv = get_drv(inode->i_bdev->bd_disk);
1134 ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
1135 int error;
1136 int diskinfo[4];
1137 struct hd_geometry __user *geo = (struct hd_geometry __user *)arg;
1138 ida_ioctl_t __user *io = (ida_ioctl_t __user *)arg;
1139 ida_ioctl_t *my_io;
1140
1141 switch(cmd) {
1142 case HDIO_GETGEO:
1143 if (drv->cylinders) {
1144 diskinfo[0] = drv->heads;
1145 diskinfo[1] = drv->sectors;
1146 diskinfo[2] = drv->cylinders;
1147 } else {
1148 diskinfo[0] = 0xff;
1149 diskinfo[1] = 0x3f;
1150 diskinfo[2] = drv->nr_blks / (0xff*0x3f);
1151 }
1152 put_user(diskinfo[0], &geo->heads);
1153 put_user(diskinfo[1], &geo->sectors);
1154 put_user(diskinfo[2], &geo->cylinders);
1155 put_user(get_start_sect(inode->i_bdev), &geo->start);
1156 return 0;
1157 case IDAGETDRVINFO:
1158 if (copy_to_user(&io->c.drv, drv, sizeof(drv_info_t)))
1159 return -EFAULT;
1160 return 0;
1161 case IDAPASSTHRU:
1162 if (!capable(CAP_SYS_RAWIO))
1163 return -EPERM;
1164 my_io = kmalloc(sizeof(ida_ioctl_t), GFP_KERNEL);
1165 if (!my_io)
1166 return -ENOMEM;
1167 error = -EFAULT;
1168 if (copy_from_user(my_io, io, sizeof(*my_io)))
1169 goto out_passthru;
1170 error = ida_ctlr_ioctl(host, drv - host->drv, my_io);
1171 if (error)
1172 goto out_passthru;
1173 error = -EFAULT;
1174 if (copy_to_user(io, my_io, sizeof(*my_io)))
1175 goto out_passthru;
1176 error = 0;
1177out_passthru:
1178 kfree(my_io);
1179 return error;
1180 case IDAGETCTLRSIG:
1181 if (!arg) return -EINVAL;
1182 put_user(host->ctlr_sig, (int __user *)arg);
1183 return 0;
1184 case IDAREVALIDATEVOLS:
1185 if (iminor(inode) != 0)
1186 return -ENXIO;
1187 return revalidate_allvol(host);
1188 case IDADRIVERVERSION:
1189 if (!arg) return -EINVAL;
1190 put_user(DRIVER_VERSION, (unsigned long __user *)arg);
1191 return 0;
1192 case IDAGETPCIINFO:
1193 {
1194
1195 ida_pci_info_struct pciinfo;
1196
1197 if (!arg) return -EINVAL;
1198 pciinfo.bus = host->pci_dev->bus->number;
1199 pciinfo.dev_fn = host->pci_dev->devfn;
1200 pciinfo.board_id = host->board_id;
1201 if(copy_to_user((void __user *) arg, &pciinfo,
1202 sizeof( ida_pci_info_struct)))
1203 return -EFAULT;
1204 return(0);
1205 }
1206
1207 default:
1208 return -EINVAL;
1209 }
1210
1211}
1212/*
1213 * ida_ctlr_ioctl is for passing commands to the controller from userspace.
1214 * The command block (io) has already been copied to kernel space for us,
1215 * however, any elements in the sglist need to be copied to kernel space
1216 * or copied back to userspace.
1217 *
1218 * Only root may perform a controller passthru command, however I'm not doing
1219 * any serious sanity checking on the arguments. Doing an IDA_WRITE_MEDIA and
1220 * putting a 64M buffer in the sglist is probably a *bad* idea.
1221 */
1222static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io)
1223{
1224 int ctlr = h->ctlr;
1225 cmdlist_t *c;
1226 void *p = NULL;
1227 unsigned long flags;
1228 int error;
1229
1230 if ((c = cmd_alloc(h, 0)) == NULL)
1231 return -ENOMEM;
1232 c->ctlr = ctlr;
1233 c->hdr.unit = (io->unit & UNITVALID) ? (io->unit & ~UNITVALID) : dsk;
1234 c->hdr.size = sizeof(rblk_t) >> 2;
1235 c->size += sizeof(rblk_t);
1236
1237 c->req.hdr.cmd = io->cmd;
1238 c->req.hdr.blk = io->blk;
1239 c->req.hdr.blk_cnt = io->blk_cnt;
1240 c->type = CMD_IOCTL_PEND;
1241
1242 /* Pre submit processing */
1243 switch(io->cmd) {
1244 case PASSTHRU_A:
1245 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1246 if (!p)
1247 {
1248 error = -ENOMEM;
1249 cmd_free(h, c, 0);
1250 return(error);
1251 }
1252 if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {
1253 kfree(p);
1254 cmd_free(h, c, 0);
1255 return -EFAULT;
1256 }
1257 c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c),
1258 sizeof(ida_ioctl_t),
1259 PCI_DMA_BIDIRECTIONAL);
1260 c->req.sg[0].size = io->sg[0].size;
1261 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1262 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1263 c->req.hdr.sg_cnt = 1;
1264 break;
1265 case IDA_READ:
1266 case READ_FLASH_ROM:
1267 case SENSE_CONTROLLER_PERFORMANCE:
1268 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1269 if (!p)
1270 {
1271 error = -ENOMEM;
1272 cmd_free(h, c, 0);
1273 return(error);
1274 }
1275
1276 c->req.sg[0].size = io->sg[0].size;
1277 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1278 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1279 c->req.hdr.sg_cnt = 1;
1280 break;
1281 case IDA_WRITE:
1282 case IDA_WRITE_MEDIA:
1283 case DIAG_PASS_THRU:
1284 case COLLECT_BUFFER:
1285 case WRITE_FLASH_ROM:
1286 p = kmalloc(io->sg[0].size, GFP_KERNEL);
1287 if (!p)
1288 {
1289 error = -ENOMEM;
1290 cmd_free(h, c, 0);
1291 return(error);
1292 }
1293 if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {
1294 kfree(p);
1295 cmd_free(h, c, 0);
1296 return -EFAULT;
1297 }
1298 c->req.sg[0].size = io->sg[0].size;
1299 c->req.sg[0].addr = pci_map_single(h->pci_dev, p,
1300 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1301 c->req.hdr.sg_cnt = 1;
1302 break;
1303 default:
1304 c->req.sg[0].size = sizeof(io->c);
1305 c->req.sg[0].addr = pci_map_single(h->pci_dev,&io->c,
1306 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1307 c->req.hdr.sg_cnt = 1;
1308 }
1309
1310 /* Put the request on the tail of the request queue */
1311 spin_lock_irqsave(IDA_LOCK(ctlr), flags);
1312 addQ(&h->reqQ, c);
1313 h->Qdepth++;
1314 start_io(h);
1315 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1316
1317 /* Wait for completion */
1318 while(c->type != CMD_IOCTL_DONE)
1319 schedule();
1320
1321 /* Unmap the DMA */
1322 pci_unmap_single(h->pci_dev, c->req.sg[0].addr, c->req.sg[0].size,
1323 PCI_DMA_BIDIRECTIONAL);
1324 /* Post submit processing */
1325 switch(io->cmd) {
1326 case PASSTHRU_A:
1327 pci_unmap_single(h->pci_dev, c->req.hdr.blk,
1328 sizeof(ida_ioctl_t),
1329 PCI_DMA_BIDIRECTIONAL);
1330 case IDA_READ:
1331 case DIAG_PASS_THRU:
1332 case SENSE_CONTROLLER_PERFORMANCE:
1333 case READ_FLASH_ROM:
1334 if (copy_to_user(io->sg[0].addr, p, io->sg[0].size)) {
1335 kfree(p);
1336 return -EFAULT;
1337 }
1338 /* fall through and free p */
1339 case IDA_WRITE:
1340 case IDA_WRITE_MEDIA:
1341 case COLLECT_BUFFER:
1342 case WRITE_FLASH_ROM:
1343 kfree(p);
1344 break;
1345 default:;
1346 /* Nothing to do */
1347 }
1348
1349 io->rcode = c->req.hdr.rcode;
1350 cmd_free(h, c, 0);
1351 return(0);
1352}
1353
1354/*
1355 * Commands are pre-allocated in a large block. Here we use a simple bitmap
1356 * scheme to suballocte them to the driver. Operations that are not time
1357 * critical (and can wait for kmalloc and possibly sleep) can pass in NULL
1358 * as the first argument to get a new command.
1359 */
1360static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool)
1361{
1362 cmdlist_t * c;
1363 int i;
1364 dma_addr_t cmd_dhandle;
1365
1366 if (!get_from_pool) {
1367 c = (cmdlist_t*)pci_alloc_consistent(h->pci_dev,
1368 sizeof(cmdlist_t), &cmd_dhandle);
1369 if(c==NULL)
1370 return NULL;
1371 } else {
1372 do {
1373 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
1374 if (i == NR_CMDS)
1375 return NULL;
1376 } while(test_and_set_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
1377 c = h->cmd_pool + i;
1378 cmd_dhandle = h->cmd_pool_dhandle + i*sizeof(cmdlist_t);
1379 h->nr_allocs++;
1380 }
1381
1382 memset(c, 0, sizeof(cmdlist_t));
1383 c->busaddr = cmd_dhandle;
1384 return c;
1385}
1386
1387static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool)
1388{
1389 int i;
1390
1391 if (!got_from_pool) {
1392 pci_free_consistent(h->pci_dev, sizeof(cmdlist_t), c,
1393 c->busaddr);
1394 } else {
1395 i = c - h->cmd_pool;
1396 clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
1397 h->nr_frees++;
1398 }
1399}
1400
1401/***********************************************************************
1402 name: sendcmd
1403 Send a command to an IDA using the memory mapped FIFO interface
1404 and wait for it to complete.
1405 This routine should only be called at init time.
1406***********************************************************************/
1407static int sendcmd(
1408 __u8 cmd,
1409 int ctlr,
1410 void *buff,
1411 size_t size,
1412 unsigned int blk,
1413 unsigned int blkcnt,
1414 unsigned int log_unit )
1415{
1416 cmdlist_t *c;
1417 int complete;
1418 unsigned long temp;
1419 unsigned long i;
1420 ctlr_info_t *info_p = hba[ctlr];
1421
1422 c = cmd_alloc(info_p, 1);
1423 if(!c)
1424 return IO_ERROR;
1425 c->ctlr = ctlr;
1426 c->hdr.unit = log_unit;
1427 c->hdr.prio = 0;
1428 c->hdr.size = sizeof(rblk_t) >> 2;
1429 c->size += sizeof(rblk_t);
1430
1431 /* The request information. */
1432 c->req.hdr.next = 0;
1433 c->req.hdr.rcode = 0;
1434 c->req.bp = 0;
1435 c->req.hdr.sg_cnt = 1;
1436 c->req.hdr.reserved = 0;
1437
1438 if (size == 0)
1439 c->req.sg[0].size = 512;
1440 else
1441 c->req.sg[0].size = size;
1442
1443 c->req.hdr.blk = blk;
1444 c->req.hdr.blk_cnt = blkcnt;
1445 c->req.hdr.cmd = (unsigned char) cmd;
1446 c->req.sg[0].addr = (__u32) pci_map_single(info_p->pci_dev,
1447 buff, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1448 /*
1449 * Disable interrupt
1450 */
1451 info_p->access.set_intr_mask(info_p, 0);
1452 /* Make sure there is room in the command FIFO */
1453 /* Actually it should be completely empty at this time. */
1454 for (i = 200000; i > 0; i--) {
1455 temp = info_p->access.fifo_full(info_p);
1456 if (temp != 0) {
1457 break;
1458 }
1459 udelay(10);
1460DBG(
1461 printk(KERN_WARNING "cpqarray ida%d: idaSendPciCmd FIFO full,"
1462 " waiting!\n", ctlr);
1463);
1464 }
1465 /*
1466 * Send the cmd
1467 */
1468 info_p->access.submit_command(info_p, c);
1469 complete = pollcomplete(ctlr);
1470
1471 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
1472 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1473 if (complete != 1) {
1474 if (complete != c->busaddr) {
1475 printk( KERN_WARNING
1476 "cpqarray ida%d: idaSendPciCmd "
1477 "Invalid command list address returned! (%08lx)\n",
1478 ctlr, (unsigned long)complete);
1479 cmd_free(info_p, c, 1);
1480 return (IO_ERROR);
1481 }
1482 } else {
1483 printk( KERN_WARNING
1484 "cpqarray ida%d: idaSendPciCmd Timeout out, "
1485 "No command list address returned!\n",
1486 ctlr);
1487 cmd_free(info_p, c, 1);
1488 return (IO_ERROR);
1489 }
1490
1491 if (c->req.hdr.rcode & 0x00FE) {
1492 if (!(c->req.hdr.rcode & BIG_PROBLEM)) {
1493 printk( KERN_WARNING
1494 "cpqarray ida%d: idaSendPciCmd, error: "
1495 "Controller failed at init time "
1496 "cmd: 0x%x, return code = 0x%x\n",
1497 ctlr, c->req.hdr.cmd, c->req.hdr.rcode);
1498
1499 cmd_free(info_p, c, 1);
1500 return (IO_ERROR);
1501 }
1502 }
1503 cmd_free(info_p, c, 1);
1504 return (IO_OK);
1505}
1506
1507/*
1508 * revalidate_allvol is for online array config utilities. After a
1509 * utility reconfigures the drives in the array, it can use this function
1510 * (through an ioctl) to make the driver zap any previous disk structs for
1511 * that controller and get new ones.
1512 *
1513 * Right now I'm using the getgeometry() function to do this, but this
1514 * function should probably be finer grained and allow you to revalidate one
1515 * particualar logical volume (instead of all of them on a particular
1516 * controller).
1517 */
1518static int revalidate_allvol(ctlr_info_t *host)
1519{
1520 int ctlr = host->ctlr;
1521 int i;
1522 unsigned long flags;
1523
1524 spin_lock_irqsave(IDA_LOCK(ctlr), flags);
1525 if (host->usage_count > 1) {
1526 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1527 printk(KERN_WARNING "cpqarray: Device busy for volume"
1528 " revalidation (usage=%d)\n", host->usage_count);
1529 return -EBUSY;
1530 }
1531 host->usage_count++;
1532 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1533
1534 /*
1535 * Set the partition and block size structures for all volumes
1536 * on this controller to zero. We will reread all of this data
1537 */
1538 set_capacity(ida_gendisk[ctlr][0], 0);
1539 for (i = 1; i < NWD; i++) {
1540 struct gendisk *disk = ida_gendisk[ctlr][i];
1541 if (disk->flags & GENHD_FL_UP)
1542 del_gendisk(disk);
1543 }
1544 memset(host->drv, 0, sizeof(drv_info_t)*NWD);
1545
1546 /*
1547 * Tell the array controller not to give us any interrupts while
1548 * we check the new geometry. Then turn interrupts back on when
1549 * we're done.
1550 */
1551 host->access.set_intr_mask(host, 0);
1552 getgeometry(ctlr);
1553 host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
1554
1555 for(i=0; i<NWD; i++) {
1556 struct gendisk *disk = ida_gendisk[ctlr][i];
1557 drv_info_t *drv = &host->drv[i];
1558 if (i && !drv->nr_blks)
1559 continue;
1560 blk_queue_hardsect_size(host->queue, drv->blk_size);
1561 set_capacity(disk, drv->nr_blks);
1562 disk->queue = host->queue;
1563 disk->private_data = drv;
1564 if (i)
1565 add_disk(disk);
1566 }
1567
1568 host->usage_count--;
1569 return 0;
1570}
1571
1572static int ida_revalidate(struct gendisk *disk)
1573{
1574 drv_info_t *drv = disk->private_data;
1575 set_capacity(disk, drv->nr_blks);
1576 return 0;
1577}
1578
1579/********************************************************************
1580 name: pollcomplete
1581 Wait polling for a command to complete.
1582 The memory mapped FIFO is polled for the completion.
1583 Used only at init time, interrupts disabled.
1584 ********************************************************************/
1585static int pollcomplete(int ctlr)
1586{
1587 int done;
1588 int i;
1589
1590 /* Wait (up to 2 seconds) for a command to complete */
1591
1592 for (i = 200000; i > 0; i--) {
1593 done = hba[ctlr]->access.command_completed(hba[ctlr]);
1594 if (done == 0) {
1595 udelay(10); /* a short fixed delay */
1596 } else
1597 return (done);
1598 }
1599 /* Invalid address to tell caller we ran out of time */
1600 return 1;
1601}
1602/*****************************************************************
1603 start_fwbk
1604 Starts controller firmwares background processing.
1605 Currently only the Integrated Raid controller needs this done.
1606 If the PCI mem address registers are written to after this,
1607 data corruption may occur
1608*****************************************************************/
1609static void start_fwbk(int ctlr)
1610{
1611 id_ctlr_t *id_ctlr_buf;
1612 int ret_code;
1613
1614 if( (hba[ctlr]->board_id != 0x40400E11)
1615 && (hba[ctlr]->board_id != 0x40480E11) )
1616
1617 /* Not a Integrated Raid, so there is nothing for us to do */
1618 return;
1619 printk(KERN_DEBUG "cpqarray: Starting firmware's background"
1620 " processing\n");
1621 /* Command does not return anything, but idasend command needs a
1622 buffer */
1623 id_ctlr_buf = (id_ctlr_t *)kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1624 if(id_ctlr_buf==NULL)
1625 {
1626 printk(KERN_WARNING "cpqarray: Out of memory. "
1627 "Unable to start background processing.\n");
1628 return;
1629 }
1630 ret_code = sendcmd(RESUME_BACKGROUND_ACTIVITY, ctlr,
1631 id_ctlr_buf, 0, 0, 0, 0);
1632 if(ret_code != IO_OK)
1633 printk(KERN_WARNING "cpqarray: Unable to start"
1634 " background processing\n");
1635
1636 kfree(id_ctlr_buf);
1637}
1638/*****************************************************************
1639 getgeometry
1640 Get ida logical volume geometry from the controller
1641 This is a large bit of code which once existed in two flavors,
1642 It is used only at init time.
1643*****************************************************************/
1644static void getgeometry(int ctlr)
1645{
1646 id_log_drv_t *id_ldrive;
1647 id_ctlr_t *id_ctlr_buf;
1648 sense_log_drv_stat_t *id_lstatus_buf;
1649 config_t *sense_config_buf;
1650 unsigned int log_unit, log_index;
1651 int ret_code, size;
1652 drv_info_t *drv;
1653 ctlr_info_t *info_p = hba[ctlr];
1654 int i;
1655
1656 info_p->log_drv_map = 0;
1657
1658 id_ldrive = (id_log_drv_t *)kmalloc(sizeof(id_log_drv_t), GFP_KERNEL);
1659 if(id_ldrive == NULL)
1660 {
1661 printk( KERN_ERR "cpqarray: out of memory.\n");
1662 return;
1663 }
1664
1665 id_ctlr_buf = (id_ctlr_t *)kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1666 if(id_ctlr_buf == NULL)
1667 {
1668 kfree(id_ldrive);
1669 printk( KERN_ERR "cpqarray: out of memory.\n");
1670 return;
1671 }
1672
1673 id_lstatus_buf = (sense_log_drv_stat_t *)kmalloc(sizeof(sense_log_drv_stat_t), GFP_KERNEL);
1674 if(id_lstatus_buf == NULL)
1675 {
1676 kfree(id_ctlr_buf);
1677 kfree(id_ldrive);
1678 printk( KERN_ERR "cpqarray: out of memory.\n");
1679 return;
1680 }
1681
1682 sense_config_buf = (config_t *)kmalloc(sizeof(config_t), GFP_KERNEL);
1683 if(sense_config_buf == NULL)
1684 {
1685 kfree(id_lstatus_buf);
1686 kfree(id_ctlr_buf);
1687 kfree(id_ldrive);
1688 printk( KERN_ERR "cpqarray: out of memory.\n");
1689 return;
1690 }
1691
1692 memset(id_ldrive, 0, sizeof(id_log_drv_t));
1693 memset(id_ctlr_buf, 0, sizeof(id_ctlr_t));
1694 memset(id_lstatus_buf, 0, sizeof(sense_log_drv_stat_t));
1695 memset(sense_config_buf, 0, sizeof(config_t));
1696
1697 info_p->phys_drives = 0;
1698 info_p->log_drv_map = 0;
1699 info_p->drv_assign_map = 0;
1700 info_p->drv_spare_map = 0;
1701 info_p->mp_failed_drv_map = 0; /* only initialized here */
1702 /* Get controllers info for this logical drive */
1703 ret_code = sendcmd(ID_CTLR, ctlr, id_ctlr_buf, 0, 0, 0, 0);
1704 if (ret_code == IO_ERROR) {
1705 /*
1706 * If can't get controller info, set the logical drive map to 0,
1707 * so the idastubopen will fail on all logical drives
1708 * on the controller.
1709 */
1710 /* Free all the buffers and return */
1711 printk(KERN_ERR "cpqarray: error sending ID controller\n");
1712 kfree(sense_config_buf);
1713 kfree(id_lstatus_buf);
1714 kfree(id_ctlr_buf);
1715 kfree(id_ldrive);
1716 return;
1717 }
1718
1719 info_p->log_drives = id_ctlr_buf->nr_drvs;
1720 for(i=0;i<4;i++)
1721 info_p->firm_rev[i] = id_ctlr_buf->firm_rev[i];
1722 info_p->ctlr_sig = id_ctlr_buf->cfg_sig;
1723
1724 printk(" (%s)\n", info_p->product_name);
1725 /*
1726 * Initialize logical drive map to zero
1727 */
1728 log_index = 0;
1729 /*
1730 * Get drive geometry for all logical drives
1731 */
1732 if (id_ctlr_buf->nr_drvs > 16)
1733 printk(KERN_WARNING "cpqarray ida%d: This driver supports "
1734 "16 logical drives per controller.\n. "
1735 " Additional drives will not be "
1736 "detected\n", ctlr);
1737
1738 for (log_unit = 0;
1739 (log_index < id_ctlr_buf->nr_drvs)
1740 && (log_unit < NWD);
1741 log_unit++) {
1742 struct gendisk *disk = ida_gendisk[ctlr][log_unit];
1743
1744 size = sizeof(sense_log_drv_stat_t);
1745
1746 /*
1747 Send "Identify logical drive status" cmd
1748 */
1749 ret_code = sendcmd(SENSE_LOG_DRV_STAT,
1750 ctlr, id_lstatus_buf, size, 0, 0, log_unit);
1751 if (ret_code == IO_ERROR) {
1752 /*
1753 If can't get logical drive status, set
1754 the logical drive map to 0, so the
1755 idastubopen will fail for all logical drives
1756 on the controller.
1757 */
1758 info_p->log_drv_map = 0;
1759 printk( KERN_WARNING
1760 "cpqarray ida%d: idaGetGeometry - Controller"
1761 " failed to report status of logical drive %d\n"
1762 "Access to this controller has been disabled\n",
1763 ctlr, log_unit);
1764 /* Free all the buffers and return */
1765 kfree(sense_config_buf);
1766 kfree(id_lstatus_buf);
1767 kfree(id_ctlr_buf);
1768 kfree(id_ldrive);
1769 return;
1770 }
1771 /*
1772 Make sure the logical drive is configured
1773 */
1774 if (id_lstatus_buf->status != LOG_NOT_CONF) {
1775 ret_code = sendcmd(ID_LOG_DRV, ctlr, id_ldrive,
1776 sizeof(id_log_drv_t), 0, 0, log_unit);
1777 /*
1778 If error, the bit for this
1779 logical drive won't be set and
1780 idastubopen will return error.
1781 */
1782 if (ret_code != IO_ERROR) {
1783 drv = &info_p->drv[log_unit];
1784 drv->blk_size = id_ldrive->blk_size;
1785 drv->nr_blks = id_ldrive->nr_blks;
1786 drv->cylinders = id_ldrive->drv.cyl;
1787 drv->heads = id_ldrive->drv.heads;
1788 drv->sectors = id_ldrive->drv.sect_per_track;
1789 info_p->log_drv_map |= (1 << log_unit);
1790
1791 printk(KERN_INFO "cpqarray ida/c%dd%d: blksz=%d nr_blks=%d\n",
1792 ctlr, log_unit, drv->blk_size, drv->nr_blks);
1793 ret_code = sendcmd(SENSE_CONFIG,
1794 ctlr, sense_config_buf,
1795 sizeof(config_t), 0, 0, log_unit);
1796 if (ret_code == IO_ERROR) {
1797 info_p->log_drv_map = 0;
1798 /* Free all the buffers and return */
1799 printk(KERN_ERR "cpqarray: error sending sense config\n");
1800 kfree(sense_config_buf);
1801 kfree(id_lstatus_buf);
1802 kfree(id_ctlr_buf);
1803 kfree(id_ldrive);
1804 return;
1805
1806 }
1807
1808 sprintf(disk->devfs_name, "ida/c%dd%d", ctlr, log_unit);
1809
1810 info_p->phys_drives =
1811 sense_config_buf->ctlr_phys_drv;
1812 info_p->drv_assign_map
1813 |= sense_config_buf->drv_asgn_map;
1814 info_p->drv_assign_map
1815 |= sense_config_buf->spare_asgn_map;
1816 info_p->drv_spare_map
1817 |= sense_config_buf->spare_asgn_map;
1818 } /* end of if no error on id_ldrive */
1819 log_index = log_index + 1;
1820 } /* end of if logical drive configured */
1821 } /* end of for log_unit */
1822 kfree(sense_config_buf);
1823 kfree(id_ldrive);
1824 kfree(id_lstatus_buf);
1825 kfree(id_ctlr_buf);
1826 return;
1827
1828}
1829
1830static void __exit cpqarray_exit(void)
1831{
1832 int i;
1833
1834 pci_unregister_driver(&cpqarray_pci_driver);
1835
1836 /* Double check that all controller entries have been removed */
1837 for(i=0; i<MAX_CTLR; i++) {
1838 if (hba[i] != NULL) {
1839 printk(KERN_WARNING "cpqarray: Removing EISA "
1840 "controller %d\n", i);
1841 cpqarray_remove_one_eisa(i);
1842 }
1843 }
1844
1845 devfs_remove("ida");
1846 remove_proc_entry("cpqarray", proc_root_driver);
1847}
1848
1849module_init(cpqarray_init)
1850module_exit(cpqarray_exit)
diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
new file mode 100644
index 000000000000..be73e9d579c5
--- /dev/null
+++ b/drivers/block/cpqarray.h
@@ -0,0 +1,126 @@
1/*
2 * Disk Array driver for Compaq SMART2 Controllers
3 * Copyright 1998 Compaq Computer Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 * If you want to make changes, improve or add functionality to this
22 * driver, you'll probably need the Compaq Array Controller Interface
23 * Specificiation (Document number ECG086/1198)
24 */
25#ifndef CPQARRAY_H
26#define CPQARRAY_H
27
28#ifdef __KERNEL__
29#include <linux/blkdev.h>
30#include <linux/slab.h>
31#include <linux/proc_fs.h>
32#include <linux/timer.h>
33#endif
34
35#include "ida_cmd.h"
36
37#define IO_OK 0
38#define IO_ERROR 1
39#define NWD 16
40#define NWD_SHIFT 4
41
42#define IDA_TIMER (5*HZ)
43#define IDA_TIMEOUT (10*HZ)
44
45#define MISC_NONFATAL_WARN 0x01
46
47typedef struct {
48 unsigned blk_size;
49 unsigned nr_blks;
50 unsigned cylinders;
51 unsigned heads;
52 unsigned sectors;
53 int usage_count;
54} drv_info_t;
55
56#ifdef __KERNEL__
57
58struct ctlr_info;
59typedef struct ctlr_info ctlr_info_t;
60
61struct access_method {
62 void (*submit_command)(ctlr_info_t *h, cmdlist_t *c);
63 void (*set_intr_mask)(ctlr_info_t *h, unsigned long val);
64 unsigned long (*fifo_full)(ctlr_info_t *h);
65 unsigned long (*intr_pending)(ctlr_info_t *h);
66 unsigned long (*command_completed)(ctlr_info_t *h);
67};
68
69struct board_type {
70 __u32 board_id;
71 char *product_name;
72 struct access_method *access;
73};
74
75struct ctlr_info {
76 int ctlr;
77 char devname[8];
78 __u32 log_drv_map;
79 __u32 drv_assign_map;
80 __u32 drv_spare_map;
81 __u32 mp_failed_drv_map;
82
83 char firm_rev[4];
84 int ctlr_sig;
85
86 int log_drives;
87 int phys_drives;
88
89 struct pci_dev *pci_dev; /* NULL if EISA */
90 __u32 board_id;
91 char *product_name;
92
93 void __iomem *vaddr;
94 unsigned long paddr;
95 unsigned long io_mem_addr;
96 unsigned long io_mem_length;
97 int intr;
98 int usage_count;
99 drv_info_t drv[NWD];
100 struct proc_dir_entry *proc;
101
102 struct access_method access;
103
104 cmdlist_t *reqQ;
105 cmdlist_t *cmpQ;
106 cmdlist_t *cmd_pool;
107 dma_addr_t cmd_pool_dhandle;
108 unsigned long *cmd_pool_bits;
109 struct request_queue *queue;
110 spinlock_t lock;
111
112 unsigned int Qdepth;
113 unsigned int maxQsinceinit;
114
115 unsigned int nr_requests;
116 unsigned int nr_allocs;
117 unsigned int nr_frees;
118 struct timer_list timer;
119 unsigned int misc_tflags;
120};
121
122#define IDA_LOCK(i) (&hba[i]->lock)
123
124#endif
125
126#endif /* CPQARRAY_H */
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c
new file mode 100644
index 000000000000..5be6f998d8c5
--- /dev/null
+++ b/drivers/block/cryptoloop.c
@@ -0,0 +1,268 @@
1/*
2 Linux loop encryption enabling module
3
4 Copyright (C) 2002 Herbert Valerio Riedel <hvr@gnu.org>
5 Copyright (C) 2003 Fruhwirth Clemens <clemens@endorphin.org>
6
7 This module is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This module is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this module; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/module.h>
23
24#include <linux/init.h>
25#include <linux/string.h>
26#include <linux/crypto.h>
27#include <linux/blkdev.h>
28#include <linux/loop.h>
29#include <asm/semaphore.h>
30#include <asm/uaccess.h>
31
32MODULE_LICENSE("GPL");
33MODULE_DESCRIPTION("loop blockdevice transferfunction adaptor / CryptoAPI");
34MODULE_AUTHOR("Herbert Valerio Riedel <hvr@gnu.org>");
35
36#define LOOP_IV_SECTOR_BITS 9
37#define LOOP_IV_SECTOR_SIZE (1 << LOOP_IV_SECTOR_BITS)
38
39static int
40cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
41{
42 int err = -EINVAL;
43 char cms[LO_NAME_SIZE]; /* cipher-mode string */
44 char *cipher;
45 char *mode;
46 char *cmsp = cms; /* c-m string pointer */
47 struct crypto_tfm *tfm = NULL;
48
49 /* encryption breaks for non sector aligned offsets */
50
51 if (info->lo_offset % LOOP_IV_SECTOR_SIZE)
52 goto out;
53
54 strncpy(cms, info->lo_crypt_name, LO_NAME_SIZE);
55 cms[LO_NAME_SIZE - 1] = 0;
56 cipher = strsep(&cmsp, "-");
57 mode = strsep(&cmsp, "-");
58
59 if (mode == NULL || strcmp(mode, "cbc") == 0)
60 tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_CBC);
61 else if (strcmp(mode, "ecb") == 0)
62 tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_ECB);
63 if (tfm == NULL)
64 return -EINVAL;
65
66 err = tfm->crt_u.cipher.cit_setkey(tfm, info->lo_encrypt_key,
67 info->lo_encrypt_key_size);
68
69 if (err != 0)
70 goto out_free_tfm;
71
72 lo->key_data = tfm;
73 return 0;
74
75 out_free_tfm:
76 crypto_free_tfm(tfm);
77
78 out:
79 return err;
80}
81
82
83typedef int (*encdec_ecb_t)(struct crypto_tfm *tfm,
84 struct scatterlist *sg_out,
85 struct scatterlist *sg_in,
86 unsigned int nsg);
87
88
89static int
90cryptoloop_transfer_ecb(struct loop_device *lo, int cmd,
91 struct page *raw_page, unsigned raw_off,
92 struct page *loop_page, unsigned loop_off,
93 int size, sector_t IV)
94{
95 struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data;
96 struct scatterlist sg_out = { NULL, };
97 struct scatterlist sg_in = { NULL, };
98
99 encdec_ecb_t encdecfunc;
100 struct page *in_page, *out_page;
101 unsigned in_offs, out_offs;
102
103 if (cmd == READ) {
104 in_page = raw_page;
105 in_offs = raw_off;
106 out_page = loop_page;
107 out_offs = loop_off;
108 encdecfunc = tfm->crt_u.cipher.cit_decrypt;
109 } else {
110 in_page = loop_page;
111 in_offs = loop_off;
112 out_page = raw_page;
113 out_offs = raw_off;
114 encdecfunc = tfm->crt_u.cipher.cit_encrypt;
115 }
116
117 while (size > 0) {
118 const int sz = min(size, LOOP_IV_SECTOR_SIZE);
119
120 sg_in.page = in_page;
121 sg_in.offset = in_offs;
122 sg_in.length = sz;
123
124 sg_out.page = out_page;
125 sg_out.offset = out_offs;
126 sg_out.length = sz;
127
128 encdecfunc(tfm, &sg_out, &sg_in, sz);
129
130 size -= sz;
131 in_offs += sz;
132 out_offs += sz;
133 }
134
135 return 0;
136}
137
138typedef int (*encdec_cbc_t)(struct crypto_tfm *tfm,
139 struct scatterlist *sg_out,
140 struct scatterlist *sg_in,
141 unsigned int nsg, u8 *iv);
142
143static int
144cryptoloop_transfer_cbc(struct loop_device *lo, int cmd,
145 struct page *raw_page, unsigned raw_off,
146 struct page *loop_page, unsigned loop_off,
147 int size, sector_t IV)
148{
149 struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data;
150 struct scatterlist sg_out = { NULL, };
151 struct scatterlist sg_in = { NULL, };
152
153 encdec_cbc_t encdecfunc;
154 struct page *in_page, *out_page;
155 unsigned in_offs, out_offs;
156
157 if (cmd == READ) {
158 in_page = raw_page;
159 in_offs = raw_off;
160 out_page = loop_page;
161 out_offs = loop_off;
162 encdecfunc = tfm->crt_u.cipher.cit_decrypt_iv;
163 } else {
164 in_page = loop_page;
165 in_offs = loop_off;
166 out_page = raw_page;
167 out_offs = raw_off;
168 encdecfunc = tfm->crt_u.cipher.cit_encrypt_iv;
169 }
170
171 while (size > 0) {
172 const int sz = min(size, LOOP_IV_SECTOR_SIZE);
173 u32 iv[4] = { 0, };
174 iv[0] = cpu_to_le32(IV & 0xffffffff);
175
176 sg_in.page = in_page;
177 sg_in.offset = in_offs;
178 sg_in.length = sz;
179
180 sg_out.page = out_page;
181 sg_out.offset = out_offs;
182 sg_out.length = sz;
183
184 encdecfunc(tfm, &sg_out, &sg_in, sz, (u8 *)iv);
185
186 IV++;
187 size -= sz;
188 in_offs += sz;
189 out_offs += sz;
190 }
191
192 return 0;
193}
194
195static int
196cryptoloop_transfer(struct loop_device *lo, int cmd,
197 struct page *raw_page, unsigned raw_off,
198 struct page *loop_page, unsigned loop_off,
199 int size, sector_t IV)
200{
201 struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data;
202 if(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB)
203 {
204 lo->transfer = cryptoloop_transfer_ecb;
205 return cryptoloop_transfer_ecb(lo, cmd, raw_page, raw_off,
206 loop_page, loop_off, size, IV);
207 }
208 if(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_CBC)
209 {
210 lo->transfer = cryptoloop_transfer_cbc;
211 return cryptoloop_transfer_cbc(lo, cmd, raw_page, raw_off,
212 loop_page, loop_off, size, IV);
213 }
214
215 /* This is not supposed to happen */
216
217 printk( KERN_ERR "cryptoloop: unsupported cipher mode in cryptoloop_transfer!\n");
218 return -EINVAL;
219}
220
221static int
222cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg)
223{
224 return -EINVAL;
225}
226
227static int
228cryptoloop_release(struct loop_device *lo)
229{
230 struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data;
231 if (tfm != NULL) {
232 crypto_free_tfm(tfm);
233 lo->key_data = NULL;
234 return 0;
235 }
236 printk(KERN_ERR "cryptoloop_release(): tfm == NULL?\n");
237 return -EINVAL;
238}
239
240static struct loop_func_table cryptoloop_funcs = {
241 .number = LO_CRYPT_CRYPTOAPI,
242 .init = cryptoloop_init,
243 .ioctl = cryptoloop_ioctl,
244 .transfer = cryptoloop_transfer,
245 .release = cryptoloop_release,
246 .owner = THIS_MODULE
247};
248
249static int __init
250init_cryptoloop(void)
251{
252 int rc = loop_register_transfer(&cryptoloop_funcs);
253
254 if (rc)
255 printk(KERN_ERR "cryptoloop: loop_register_transfer failed\n");
256 return rc;
257}
258
259static void __exit
260cleanup_cryptoloop(void)
261{
262 if (loop_unregister_transfer(LO_CRYPT_CRYPTOAPI))
263 printk(KERN_ERR
264 "cryptoloop: loop_unregister_transfer failed\n");
265}
266
267module_init(init_cryptoloop);
268module_exit(cleanup_cryptoloop);
diff --git a/drivers/block/deadline-iosched.c b/drivers/block/deadline-iosched.c
new file mode 100644
index 000000000000..d63d34c671f7
--- /dev/null
+++ b/drivers/block/deadline-iosched.c
@@ -0,0 +1,967 @@
1/*
2 * linux/drivers/block/deadline-iosched.c
3 *
4 * Deadline i/o scheduler.
5 *
6 * Copyright (C) 2002 Jens Axboe <axboe@suse.de>
7 */
8#include <linux/kernel.h>
9#include <linux/fs.h>
10#include <linux/blkdev.h>
11#include <linux/elevator.h>
12#include <linux/bio.h>
13#include <linux/config.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/init.h>
17#include <linux/compiler.h>
18#include <linux/hash.h>
19#include <linux/rbtree.h>
20
21/*
22 * See Documentation/block/deadline-iosched.txt
23 */
24static int read_expire = HZ / 2; /* max time before a read is submitted. */
25static int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
26static int writes_starved = 2; /* max times reads can starve a write */
27static int fifo_batch = 16; /* # of sequential requests treated as one
28 by the above parameters. For throughput. */
29
30static const int deadline_hash_shift = 5;
31#define DL_HASH_BLOCK(sec) ((sec) >> 3)
32#define DL_HASH_FN(sec) (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift))
33#define DL_HASH_ENTRIES (1 << deadline_hash_shift)
34#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
35#define list_entry_hash(ptr) list_entry((ptr), struct deadline_rq, hash)
36#define ON_HASH(drq) (drq)->on_hash
37
38struct deadline_data {
39 /*
40 * run time data
41 */
42
43 /*
44 * requests (deadline_rq s) are present on both sort_list and fifo_list
45 */
46 struct rb_root sort_list[2];
47 struct list_head fifo_list[2];
48
49 /*
50 * next in sort order. read, write or both are NULL
51 */
52 struct deadline_rq *next_drq[2];
53 struct list_head *dispatch; /* driver dispatch queue */
54 struct list_head *hash; /* request hash */
55 unsigned int batching; /* number of sequential requests made */
56 sector_t last_sector; /* head position */
57 unsigned int starved; /* times reads have starved writes */
58
59 /*
60 * settings that change how the i/o scheduler behaves
61 */
62 int fifo_expire[2];
63 int fifo_batch;
64 int writes_starved;
65 int front_merges;
66
67 mempool_t *drq_pool;
68};
69
70/*
71 * pre-request data.
72 */
73struct deadline_rq {
74 /*
75 * rbtree index, key is the starting offset
76 */
77 struct rb_node rb_node;
78 sector_t rb_key;
79
80 struct request *request;
81
82 /*
83 * request hash, key is the ending offset (for back merge lookup)
84 */
85 struct list_head hash;
86 char on_hash;
87
88 /*
89 * expire fifo
90 */
91 struct list_head fifo;
92 unsigned long expires;
93};
94
95static void deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq);
96
97static kmem_cache_t *drq_pool;
98
99#define RQ_DATA(rq) ((struct deadline_rq *) (rq)->elevator_private)
100
101/*
102 * the back merge hash support functions
103 */
104static inline void __deadline_del_drq_hash(struct deadline_rq *drq)
105{
106 drq->on_hash = 0;
107 list_del_init(&drq->hash);
108}
109
110static inline void deadline_del_drq_hash(struct deadline_rq *drq)
111{
112 if (ON_HASH(drq))
113 __deadline_del_drq_hash(drq);
114}
115
116static void
117deadline_remove_merge_hints(request_queue_t *q, struct deadline_rq *drq)
118{
119 deadline_del_drq_hash(drq);
120
121 if (q->last_merge == drq->request)
122 q->last_merge = NULL;
123}
124
125static inline void
126deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
127{
128 struct request *rq = drq->request;
129
130 BUG_ON(ON_HASH(drq));
131
132 drq->on_hash = 1;
133 list_add(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
134}
135
136/*
137 * move hot entry to front of chain
138 */
139static inline void
140deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
141{
142 struct request *rq = drq->request;
143 struct list_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))];
144
145 if (ON_HASH(drq) && drq->hash.prev != head) {
146 list_del(&drq->hash);
147 list_add(&drq->hash, head);
148 }
149}
150
151static struct request *
152deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
153{
154 struct list_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
155 struct list_head *entry, *next = hash_list->next;
156
157 while ((entry = next) != hash_list) {
158 struct deadline_rq *drq = list_entry_hash(entry);
159 struct request *__rq = drq->request;
160
161 next = entry->next;
162
163 BUG_ON(!ON_HASH(drq));
164
165 if (!rq_mergeable(__rq)) {
166 __deadline_del_drq_hash(drq);
167 continue;
168 }
169
170 if (rq_hash_key(__rq) == offset)
171 return __rq;
172 }
173
174 return NULL;
175}
176
177/*
178 * rb tree support functions
179 */
180#define RB_NONE (2)
181#define RB_EMPTY(root) ((root)->rb_node == NULL)
182#define ON_RB(node) ((node)->rb_color != RB_NONE)
183#define RB_CLEAR(node) ((node)->rb_color = RB_NONE)
184#define rb_entry_drq(node) rb_entry((node), struct deadline_rq, rb_node)
185#define DRQ_RB_ROOT(dd, drq) (&(dd)->sort_list[rq_data_dir((drq)->request)])
186#define rq_rb_key(rq) (rq)->sector
187
188static struct deadline_rq *
189__deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
190{
191 struct rb_node **p = &DRQ_RB_ROOT(dd, drq)->rb_node;
192 struct rb_node *parent = NULL;
193 struct deadline_rq *__drq;
194
195 while (*p) {
196 parent = *p;
197 __drq = rb_entry_drq(parent);
198
199 if (drq->rb_key < __drq->rb_key)
200 p = &(*p)->rb_left;
201 else if (drq->rb_key > __drq->rb_key)
202 p = &(*p)->rb_right;
203 else
204 return __drq;
205 }
206
207 rb_link_node(&drq->rb_node, parent, p);
208 return NULL;
209}
210
211static void
212deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
213{
214 struct deadline_rq *__alias;
215
216 drq->rb_key = rq_rb_key(drq->request);
217
218retry:
219 __alias = __deadline_add_drq_rb(dd, drq);
220 if (!__alias) {
221 rb_insert_color(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
222 return;
223 }
224
225 deadline_move_request(dd, __alias);
226 goto retry;
227}
228
229static inline void
230deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
231{
232 const int data_dir = rq_data_dir(drq->request);
233
234 if (dd->next_drq[data_dir] == drq) {
235 struct rb_node *rbnext = rb_next(&drq->rb_node);
236
237 dd->next_drq[data_dir] = NULL;
238 if (rbnext)
239 dd->next_drq[data_dir] = rb_entry_drq(rbnext);
240 }
241
242 if (ON_RB(&drq->rb_node)) {
243 rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
244 RB_CLEAR(&drq->rb_node);
245 }
246}
247
248static struct request *
249deadline_find_drq_rb(struct deadline_data *dd, sector_t sector, int data_dir)
250{
251 struct rb_node *n = dd->sort_list[data_dir].rb_node;
252 struct deadline_rq *drq;
253
254 while (n) {
255 drq = rb_entry_drq(n);
256
257 if (sector < drq->rb_key)
258 n = n->rb_left;
259 else if (sector > drq->rb_key)
260 n = n->rb_right;
261 else
262 return drq->request;
263 }
264
265 return NULL;
266}
267
268/*
269 * deadline_find_first_drq finds the first (lowest sector numbered) request
270 * for the specified data_dir. Used to sweep back to the start of the disk
271 * (1-way elevator) after we process the last (highest sector) request.
272 */
273static struct deadline_rq *
274deadline_find_first_drq(struct deadline_data *dd, int data_dir)
275{
276 struct rb_node *n = dd->sort_list[data_dir].rb_node;
277
278 for (;;) {
279 if (n->rb_left == NULL)
280 return rb_entry_drq(n);
281
282 n = n->rb_left;
283 }
284}
285
286/*
287 * add drq to rbtree and fifo
288 */
289static inline void
290deadline_add_request(struct request_queue *q, struct request *rq)
291{
292 struct deadline_data *dd = q->elevator->elevator_data;
293 struct deadline_rq *drq = RQ_DATA(rq);
294
295 const int data_dir = rq_data_dir(drq->request);
296
297 deadline_add_drq_rb(dd, drq);
298 /*
299 * set expire time (only used for reads) and add to fifo list
300 */
301 drq->expires = jiffies + dd->fifo_expire[data_dir];
302 list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]);
303
304 if (rq_mergeable(rq)) {
305 deadline_add_drq_hash(dd, drq);
306
307 if (!q->last_merge)
308 q->last_merge = rq;
309 }
310}
311
312/*
313 * remove rq from rbtree, fifo, and hash
314 */
315static void deadline_remove_request(request_queue_t *q, struct request *rq)
316{
317 struct deadline_rq *drq = RQ_DATA(rq);
318
319 if (drq) {
320 struct deadline_data *dd = q->elevator->elevator_data;
321
322 list_del_init(&drq->fifo);
323 deadline_remove_merge_hints(q, drq);
324 deadline_del_drq_rb(dd, drq);
325 }
326}
327
328static int
329deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
330{
331 struct deadline_data *dd = q->elevator->elevator_data;
332 struct request *__rq;
333 int ret;
334
335 /*
336 * try last_merge to avoid going to hash
337 */
338 ret = elv_try_last_merge(q, bio);
339 if (ret != ELEVATOR_NO_MERGE) {
340 __rq = q->last_merge;
341 goto out_insert;
342 }
343
344 /*
345 * see if the merge hash can satisfy a back merge
346 */
347 __rq = deadline_find_drq_hash(dd, bio->bi_sector);
348 if (__rq) {
349 BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
350
351 if (elv_rq_merge_ok(__rq, bio)) {
352 ret = ELEVATOR_BACK_MERGE;
353 goto out;
354 }
355 }
356
357 /*
358 * check for front merge
359 */
360 if (dd->front_merges) {
361 sector_t rb_key = bio->bi_sector + bio_sectors(bio);
362
363 __rq = deadline_find_drq_rb(dd, rb_key, bio_data_dir(bio));
364 if (__rq) {
365 BUG_ON(rb_key != rq_rb_key(__rq));
366
367 if (elv_rq_merge_ok(__rq, bio)) {
368 ret = ELEVATOR_FRONT_MERGE;
369 goto out;
370 }
371 }
372 }
373
374 return ELEVATOR_NO_MERGE;
375out:
376 q->last_merge = __rq;
377out_insert:
378 if (ret)
379 deadline_hot_drq_hash(dd, RQ_DATA(__rq));
380 *req = __rq;
381 return ret;
382}
383
384static void deadline_merged_request(request_queue_t *q, struct request *req)
385{
386 struct deadline_data *dd = q->elevator->elevator_data;
387 struct deadline_rq *drq = RQ_DATA(req);
388
389 /*
390 * hash always needs to be repositioned, key is end sector
391 */
392 deadline_del_drq_hash(drq);
393 deadline_add_drq_hash(dd, drq);
394
395 /*
396 * if the merge was a front merge, we need to reposition request
397 */
398 if (rq_rb_key(req) != drq->rb_key) {
399 deadline_del_drq_rb(dd, drq);
400 deadline_add_drq_rb(dd, drq);
401 }
402
403 q->last_merge = req;
404}
405
406static void
407deadline_merged_requests(request_queue_t *q, struct request *req,
408 struct request *next)
409{
410 struct deadline_data *dd = q->elevator->elevator_data;
411 struct deadline_rq *drq = RQ_DATA(req);
412 struct deadline_rq *dnext = RQ_DATA(next);
413
414 BUG_ON(!drq);
415 BUG_ON(!dnext);
416
417 /*
418 * reposition drq (this is the merged request) in hash, and in rbtree
419 * in case of a front merge
420 */
421 deadline_del_drq_hash(drq);
422 deadline_add_drq_hash(dd, drq);
423
424 if (rq_rb_key(req) != drq->rb_key) {
425 deadline_del_drq_rb(dd, drq);
426 deadline_add_drq_rb(dd, drq);
427 }
428
429 /*
430 * if dnext expires before drq, assign its expire time to drq
431 * and move into dnext position (dnext will be deleted) in fifo
432 */
433 if (!list_empty(&drq->fifo) && !list_empty(&dnext->fifo)) {
434 if (time_before(dnext->expires, drq->expires)) {
435 list_move(&drq->fifo, &dnext->fifo);
436 drq->expires = dnext->expires;
437 }
438 }
439
440 /*
441 * kill knowledge of next, this one is a goner
442 */
443 deadline_remove_request(q, next);
444}
445
446/*
447 * move request from sort list to dispatch queue.
448 */
449static inline void
450deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq)
451{
452 request_queue_t *q = drq->request->q;
453
454 deadline_remove_request(q, drq->request);
455 list_add_tail(&drq->request->queuelist, dd->dispatch);
456}
457
458/*
459 * move an entry to dispatch queue
460 */
461static void
462deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq)
463{
464 const int data_dir = rq_data_dir(drq->request);
465 struct rb_node *rbnext = rb_next(&drq->rb_node);
466
467 dd->next_drq[READ] = NULL;
468 dd->next_drq[WRITE] = NULL;
469
470 if (rbnext)
471 dd->next_drq[data_dir] = rb_entry_drq(rbnext);
472
473 dd->last_sector = drq->request->sector + drq->request->nr_sectors;
474
475 /*
476 * take it off the sort and fifo list, move
477 * to dispatch queue
478 */
479 deadline_move_to_dispatch(dd, drq);
480}
481
482#define list_entry_fifo(ptr) list_entry((ptr), struct deadline_rq, fifo)
483
484/*
485 * deadline_check_fifo returns 0 if there are no expired reads on the fifo,
486 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
487 */
488static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
489{
490 struct deadline_rq *drq = list_entry_fifo(dd->fifo_list[ddir].next);
491
492 /*
493 * drq is expired!
494 */
495 if (time_after(jiffies, drq->expires))
496 return 1;
497
498 return 0;
499}
500
501/*
502 * deadline_dispatch_requests selects the best request according to
503 * read/write expire, fifo_batch, etc
504 */
505static int deadline_dispatch_requests(struct deadline_data *dd)
506{
507 const int reads = !list_empty(&dd->fifo_list[READ]);
508 const int writes = !list_empty(&dd->fifo_list[WRITE]);
509 struct deadline_rq *drq;
510 int data_dir, other_dir;
511
512 /*
513 * batches are currently reads XOR writes
514 */
515 drq = NULL;
516
517 if (dd->next_drq[READ])
518 drq = dd->next_drq[READ];
519
520 if (dd->next_drq[WRITE])
521 drq = dd->next_drq[WRITE];
522
523 if (drq) {
524 /* we have a "next request" */
525
526 if (dd->last_sector != drq->request->sector)
527 /* end the batch on a non sequential request */
528 dd->batching += dd->fifo_batch;
529
530 if (dd->batching < dd->fifo_batch)
531 /* we are still entitled to batch */
532 goto dispatch_request;
533 }
534
535 /*
536 * at this point we are not running a batch. select the appropriate
537 * data direction (read / write)
538 */
539
540 if (reads) {
541 BUG_ON(RB_EMPTY(&dd->sort_list[READ]));
542
543 if (writes && (dd->starved++ >= dd->writes_starved))
544 goto dispatch_writes;
545
546 data_dir = READ;
547 other_dir = WRITE;
548
549 goto dispatch_find_request;
550 }
551
552 /*
553 * there are either no reads or writes have been starved
554 */
555
556 if (writes) {
557dispatch_writes:
558 BUG_ON(RB_EMPTY(&dd->sort_list[WRITE]));
559
560 dd->starved = 0;
561
562 data_dir = WRITE;
563 other_dir = READ;
564
565 goto dispatch_find_request;
566 }
567
568 return 0;
569
570dispatch_find_request:
571 /*
572 * we are not running a batch, find best request for selected data_dir
573 */
574 if (deadline_check_fifo(dd, data_dir)) {
575 /* An expired request exists - satisfy it */
576 dd->batching = 0;
577 drq = list_entry_fifo(dd->fifo_list[data_dir].next);
578
579 } else if (dd->next_drq[data_dir]) {
580 /*
581 * The last req was the same dir and we have a next request in
582 * sort order. No expired requests so continue on from here.
583 */
584 drq = dd->next_drq[data_dir];
585 } else {
586 /*
587 * The last req was the other direction or we have run out of
588 * higher-sectored requests. Go back to the lowest sectored
589 * request (1 way elevator) and start a new batch.
590 */
591 dd->batching = 0;
592 drq = deadline_find_first_drq(dd, data_dir);
593 }
594
595dispatch_request:
596 /*
597 * drq is the selected appropriate request.
598 */
599 dd->batching++;
600 deadline_move_request(dd, drq);
601
602 return 1;
603}
604
605static struct request *deadline_next_request(request_queue_t *q)
606{
607 struct deadline_data *dd = q->elevator->elevator_data;
608 struct request *rq;
609
610 /*
611 * if there are still requests on the dispatch queue, grab the first one
612 */
613 if (!list_empty(dd->dispatch)) {
614dispatch:
615 rq = list_entry_rq(dd->dispatch->next);
616 return rq;
617 }
618
619 if (deadline_dispatch_requests(dd))
620 goto dispatch;
621
622 return NULL;
623}
624
625static void
626deadline_insert_request(request_queue_t *q, struct request *rq, int where)
627{
628 struct deadline_data *dd = q->elevator->elevator_data;
629
630 /* barriers must flush the reorder queue */
631 if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)
632 && where == ELEVATOR_INSERT_SORT))
633 where = ELEVATOR_INSERT_BACK;
634
635 switch (where) {
636 case ELEVATOR_INSERT_BACK:
637 while (deadline_dispatch_requests(dd))
638 ;
639 list_add_tail(&rq->queuelist, dd->dispatch);
640 break;
641 case ELEVATOR_INSERT_FRONT:
642 list_add(&rq->queuelist, dd->dispatch);
643 break;
644 case ELEVATOR_INSERT_SORT:
645 BUG_ON(!blk_fs_request(rq));
646 deadline_add_request(q, rq);
647 break;
648 default:
649 printk("%s: bad insert point %d\n", __FUNCTION__,where);
650 return;
651 }
652}
653
654static int deadline_queue_empty(request_queue_t *q)
655{
656 struct deadline_data *dd = q->elevator->elevator_data;
657
658 if (!list_empty(&dd->fifo_list[WRITE])
659 || !list_empty(&dd->fifo_list[READ])
660 || !list_empty(dd->dispatch))
661 return 0;
662
663 return 1;
664}
665
666static struct request *
667deadline_former_request(request_queue_t *q, struct request *rq)
668{
669 struct deadline_rq *drq = RQ_DATA(rq);
670 struct rb_node *rbprev = rb_prev(&drq->rb_node);
671
672 if (rbprev)
673 return rb_entry_drq(rbprev)->request;
674
675 return NULL;
676}
677
678static struct request *
679deadline_latter_request(request_queue_t *q, struct request *rq)
680{
681 struct deadline_rq *drq = RQ_DATA(rq);
682 struct rb_node *rbnext = rb_next(&drq->rb_node);
683
684 if (rbnext)
685 return rb_entry_drq(rbnext)->request;
686
687 return NULL;
688}
689
690static void deadline_exit_queue(elevator_t *e)
691{
692 struct deadline_data *dd = e->elevator_data;
693
694 BUG_ON(!list_empty(&dd->fifo_list[READ]));
695 BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
696
697 mempool_destroy(dd->drq_pool);
698 kfree(dd->hash);
699 kfree(dd);
700}
701
702/*
703 * initialize elevator private data (deadline_data), and alloc a drq for
704 * each request on the free lists
705 */
706static int deadline_init_queue(request_queue_t *q, elevator_t *e)
707{
708 struct deadline_data *dd;
709 int i;
710
711 if (!drq_pool)
712 return -ENOMEM;
713
714 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
715 if (!dd)
716 return -ENOMEM;
717 memset(dd, 0, sizeof(*dd));
718
719 dd->hash = kmalloc(sizeof(struct list_head)*DL_HASH_ENTRIES,GFP_KERNEL);
720 if (!dd->hash) {
721 kfree(dd);
722 return -ENOMEM;
723 }
724
725 dd->drq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, drq_pool);
726 if (!dd->drq_pool) {
727 kfree(dd->hash);
728 kfree(dd);
729 return -ENOMEM;
730 }
731
732 for (i = 0; i < DL_HASH_ENTRIES; i++)
733 INIT_LIST_HEAD(&dd->hash[i]);
734
735 INIT_LIST_HEAD(&dd->fifo_list[READ]);
736 INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
737 dd->sort_list[READ] = RB_ROOT;
738 dd->sort_list[WRITE] = RB_ROOT;
739 dd->dispatch = &q->queue_head;
740 dd->fifo_expire[READ] = read_expire;
741 dd->fifo_expire[WRITE] = write_expire;
742 dd->writes_starved = writes_starved;
743 dd->front_merges = 1;
744 dd->fifo_batch = fifo_batch;
745 e->elevator_data = dd;
746 return 0;
747}
748
749static void deadline_put_request(request_queue_t *q, struct request *rq)
750{
751 struct deadline_data *dd = q->elevator->elevator_data;
752 struct deadline_rq *drq = RQ_DATA(rq);
753
754 if (drq) {
755 mempool_free(drq, dd->drq_pool);
756 rq->elevator_private = NULL;
757 }
758}
759
760static int
761deadline_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
762{
763 struct deadline_data *dd = q->elevator->elevator_data;
764 struct deadline_rq *drq;
765
766 drq = mempool_alloc(dd->drq_pool, gfp_mask);
767 if (drq) {
768 memset(drq, 0, sizeof(*drq));
769 RB_CLEAR(&drq->rb_node);
770 drq->request = rq;
771
772 INIT_LIST_HEAD(&drq->hash);
773 drq->on_hash = 0;
774
775 INIT_LIST_HEAD(&drq->fifo);
776
777 rq->elevator_private = drq;
778 return 0;
779 }
780
781 return 1;
782}
783
784/*
785 * sysfs parts below
786 */
787struct deadline_fs_entry {
788 struct attribute attr;
789 ssize_t (*show)(struct deadline_data *, char *);
790 ssize_t (*store)(struct deadline_data *, const char *, size_t);
791};
792
793static ssize_t
794deadline_var_show(int var, char *page)
795{
796 return sprintf(page, "%d\n", var);
797}
798
799static ssize_t
800deadline_var_store(int *var, const char *page, size_t count)
801{
802 char *p = (char *) page;
803
804 *var = simple_strtol(p, &p, 10);
805 return count;
806}
807
808#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
809static ssize_t __FUNC(struct deadline_data *dd, char *page) \
810{ \
811 int __data = __VAR; \
812 if (__CONV) \
813 __data = jiffies_to_msecs(__data); \
814 return deadline_var_show(__data, (page)); \
815}
816SHOW_FUNCTION(deadline_readexpire_show, dd->fifo_expire[READ], 1);
817SHOW_FUNCTION(deadline_writeexpire_show, dd->fifo_expire[WRITE], 1);
818SHOW_FUNCTION(deadline_writesstarved_show, dd->writes_starved, 0);
819SHOW_FUNCTION(deadline_frontmerges_show, dd->front_merges, 0);
820SHOW_FUNCTION(deadline_fifobatch_show, dd->fifo_batch, 0);
821#undef SHOW_FUNCTION
822
823#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
824static ssize_t __FUNC(struct deadline_data *dd, const char *page, size_t count) \
825{ \
826 int __data; \
827 int ret = deadline_var_store(&__data, (page), count); \
828 if (__data < (MIN)) \
829 __data = (MIN); \
830 else if (__data > (MAX)) \
831 __data = (MAX); \
832 if (__CONV) \
833 *(__PTR) = msecs_to_jiffies(__data); \
834 else \
835 *(__PTR) = __data; \
836 return ret; \
837}
838STORE_FUNCTION(deadline_readexpire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
839STORE_FUNCTION(deadline_writeexpire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
840STORE_FUNCTION(deadline_writesstarved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
841STORE_FUNCTION(deadline_frontmerges_store, &dd->front_merges, 0, 1, 0);
842STORE_FUNCTION(deadline_fifobatch_store, &dd->fifo_batch, 0, INT_MAX, 0);
843#undef STORE_FUNCTION
844
845static struct deadline_fs_entry deadline_readexpire_entry = {
846 .attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR },
847 .show = deadline_readexpire_show,
848 .store = deadline_readexpire_store,
849};
850static struct deadline_fs_entry deadline_writeexpire_entry = {
851 .attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR },
852 .show = deadline_writeexpire_show,
853 .store = deadline_writeexpire_store,
854};
855static struct deadline_fs_entry deadline_writesstarved_entry = {
856 .attr = {.name = "writes_starved", .mode = S_IRUGO | S_IWUSR },
857 .show = deadline_writesstarved_show,
858 .store = deadline_writesstarved_store,
859};
860static struct deadline_fs_entry deadline_frontmerges_entry = {
861 .attr = {.name = "front_merges", .mode = S_IRUGO | S_IWUSR },
862 .show = deadline_frontmerges_show,
863 .store = deadline_frontmerges_store,
864};
865static struct deadline_fs_entry deadline_fifobatch_entry = {
866 .attr = {.name = "fifo_batch", .mode = S_IRUGO | S_IWUSR },
867 .show = deadline_fifobatch_show,
868 .store = deadline_fifobatch_store,
869};
870
871static struct attribute *default_attrs[] = {
872 &deadline_readexpire_entry.attr,
873 &deadline_writeexpire_entry.attr,
874 &deadline_writesstarved_entry.attr,
875 &deadline_frontmerges_entry.attr,
876 &deadline_fifobatch_entry.attr,
877 NULL,
878};
879
880#define to_deadline(atr) container_of((atr), struct deadline_fs_entry, attr)
881
882static ssize_t
883deadline_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
884{
885 elevator_t *e = container_of(kobj, elevator_t, kobj);
886 struct deadline_fs_entry *entry = to_deadline(attr);
887
888 if (!entry->show)
889 return 0;
890
891 return entry->show(e->elevator_data, page);
892}
893
894static ssize_t
895deadline_attr_store(struct kobject *kobj, struct attribute *attr,
896 const char *page, size_t length)
897{
898 elevator_t *e = container_of(kobj, elevator_t, kobj);
899 struct deadline_fs_entry *entry = to_deadline(attr);
900
901 if (!entry->store)
902 return -EINVAL;
903
904 return entry->store(e->elevator_data, page, length);
905}
906
907static struct sysfs_ops deadline_sysfs_ops = {
908 .show = deadline_attr_show,
909 .store = deadline_attr_store,
910};
911
912static struct kobj_type deadline_ktype = {
913 .sysfs_ops = &deadline_sysfs_ops,
914 .default_attrs = default_attrs,
915};
916
917static struct elevator_type iosched_deadline = {
918 .ops = {
919 .elevator_merge_fn = deadline_merge,
920 .elevator_merged_fn = deadline_merged_request,
921 .elevator_merge_req_fn = deadline_merged_requests,
922 .elevator_next_req_fn = deadline_next_request,
923 .elevator_add_req_fn = deadline_insert_request,
924 .elevator_remove_req_fn = deadline_remove_request,
925 .elevator_queue_empty_fn = deadline_queue_empty,
926 .elevator_former_req_fn = deadline_former_request,
927 .elevator_latter_req_fn = deadline_latter_request,
928 .elevator_set_req_fn = deadline_set_request,
929 .elevator_put_req_fn = deadline_put_request,
930 .elevator_init_fn = deadline_init_queue,
931 .elevator_exit_fn = deadline_exit_queue,
932 },
933
934 .elevator_ktype = &deadline_ktype,
935 .elevator_name = "deadline",
936 .elevator_owner = THIS_MODULE,
937};
938
939static int __init deadline_init(void)
940{
941 int ret;
942
943 drq_pool = kmem_cache_create("deadline_drq", sizeof(struct deadline_rq),
944 0, 0, NULL, NULL);
945
946 if (!drq_pool)
947 return -ENOMEM;
948
949 ret = elv_register(&iosched_deadline);
950 if (ret)
951 kmem_cache_destroy(drq_pool);
952
953 return ret;
954}
955
956static void __exit deadline_exit(void)
957{
958 kmem_cache_destroy(drq_pool);
959 elv_unregister(&iosched_deadline);
960}
961
962module_init(deadline_init);
963module_exit(deadline_exit);
964
965MODULE_AUTHOR("Jens Axboe");
966MODULE_LICENSE("GPL");
967MODULE_DESCRIPTION("deadline IO scheduler");
diff --git a/drivers/block/elevator.c b/drivers/block/elevator.c
new file mode 100644
index 000000000000..6b79b4314622
--- /dev/null
+++ b/drivers/block/elevator.c
@@ -0,0 +1,705 @@
1/*
2 * linux/drivers/block/elevator.c
3 *
4 * Block device elevator/IO-scheduler.
5 *
6 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
7 *
8 * 30042000 Jens Axboe <axboe@suse.de> :
9 *
10 * Split the elevator a bit so that it is possible to choose a different
11 * one or even write a new "plug in". There are three pieces:
12 * - elevator_fn, inserts a new request in the queue list
13 * - elevator_merge_fn, decides whether a new buffer can be merged with
14 * an existing request
15 * - elevator_dequeue_fn, called when a request is taken off the active list
16 *
17 * 20082000 Dave Jones <davej@suse.de> :
18 * Removed tests for max-bomb-segments, which was breaking elvtune
19 * when run without -bN
20 *
21 * Jens:
22 * - Rework again to work with bio instead of buffer_heads
23 * - loose bi_dev comparisons, partition handling is right now
24 * - completely modularize elevator setup and teardown
25 *
26 */
27#include <linux/kernel.h>
28#include <linux/fs.h>
29#include <linux/blkdev.h>
30#include <linux/elevator.h>
31#include <linux/bio.h>
32#include <linux/config.h>
33#include <linux/module.h>
34#include <linux/slab.h>
35#include <linux/init.h>
36#include <linux/compiler.h>
37
38#include <asm/uaccess.h>
39
40static DEFINE_SPINLOCK(elv_list_lock);
41static LIST_HEAD(elv_list);
42
43/*
44 * can we safely merge with this request?
45 */
46inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
47{
48 if (!rq_mergeable(rq))
49 return 0;
50
51 /*
52 * different data direction or already started, don't merge
53 */
54 if (bio_data_dir(bio) != rq_data_dir(rq))
55 return 0;
56
57 /*
58 * same device and no special stuff set, merge is ok
59 */
60 if (rq->rq_disk == bio->bi_bdev->bd_disk &&
61 !rq->waiting && !rq->special)
62 return 1;
63
64 return 0;
65}
66EXPORT_SYMBOL(elv_rq_merge_ok);
67
68inline int elv_try_merge(struct request *__rq, struct bio *bio)
69{
70 int ret = ELEVATOR_NO_MERGE;
71
72 /*
73 * we can merge and sequence is ok, check if it's possible
74 */
75 if (elv_rq_merge_ok(__rq, bio)) {
76 if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
77 ret = ELEVATOR_BACK_MERGE;
78 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
79 ret = ELEVATOR_FRONT_MERGE;
80 }
81
82 return ret;
83}
84EXPORT_SYMBOL(elv_try_merge);
85
86inline int elv_try_last_merge(request_queue_t *q, struct bio *bio)
87{
88 if (q->last_merge)
89 return elv_try_merge(q->last_merge, bio);
90
91 return ELEVATOR_NO_MERGE;
92}
93EXPORT_SYMBOL(elv_try_last_merge);
94
95static struct elevator_type *elevator_find(const char *name)
96{
97 struct elevator_type *e = NULL;
98 struct list_head *entry;
99
100 spin_lock_irq(&elv_list_lock);
101 list_for_each(entry, &elv_list) {
102 struct elevator_type *__e;
103
104 __e = list_entry(entry, struct elevator_type, list);
105
106 if (!strcmp(__e->elevator_name, name)) {
107 e = __e;
108 break;
109 }
110 }
111 spin_unlock_irq(&elv_list_lock);
112
113 return e;
114}
115
116static void elevator_put(struct elevator_type *e)
117{
118 module_put(e->elevator_owner);
119}
120
121static struct elevator_type *elevator_get(const char *name)
122{
123 struct elevator_type *e = elevator_find(name);
124
125 if (!e)
126 return NULL;
127 if (!try_module_get(e->elevator_owner))
128 return NULL;
129
130 return e;
131}
132
133static int elevator_attach(request_queue_t *q, struct elevator_type *e,
134 struct elevator_queue *eq)
135{
136 int ret = 0;
137
138 memset(eq, 0, sizeof(*eq));
139 eq->ops = &e->ops;
140 eq->elevator_type = e;
141
142 INIT_LIST_HEAD(&q->queue_head);
143 q->last_merge = NULL;
144 q->elevator = eq;
145
146 if (eq->ops->elevator_init_fn)
147 ret = eq->ops->elevator_init_fn(q, eq);
148
149 return ret;
150}
151
152static char chosen_elevator[16];
153
154static void elevator_setup_default(void)
155{
156 /*
157 * check if default is set and exists
158 */
159 if (chosen_elevator[0] && elevator_find(chosen_elevator))
160 return;
161
162#if defined(CONFIG_IOSCHED_AS)
163 strcpy(chosen_elevator, "anticipatory");
164#elif defined(CONFIG_IOSCHED_DEADLINE)
165 strcpy(chosen_elevator, "deadline");
166#elif defined(CONFIG_IOSCHED_CFQ)
167 strcpy(chosen_elevator, "cfq");
168#elif defined(CONFIG_IOSCHED_NOOP)
169 strcpy(chosen_elevator, "noop");
170#else
171#error "You must build at least 1 IO scheduler into the kernel"
172#endif
173}
174
175static int __init elevator_setup(char *str)
176{
177 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
178 return 0;
179}
180
181__setup("elevator=", elevator_setup);
182
183int elevator_init(request_queue_t *q, char *name)
184{
185 struct elevator_type *e = NULL;
186 struct elevator_queue *eq;
187 int ret = 0;
188
189 elevator_setup_default();
190
191 if (!name)
192 name = chosen_elevator;
193
194 e = elevator_get(name);
195 if (!e)
196 return -EINVAL;
197
198 eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL);
199 if (!eq) {
200 elevator_put(e->elevator_type);
201 return -ENOMEM;
202 }
203
204 ret = elevator_attach(q, e, eq);
205 if (ret) {
206 kfree(eq);
207 elevator_put(e->elevator_type);
208 }
209
210 return ret;
211}
212
213void elevator_exit(elevator_t *e)
214{
215 if (e->ops->elevator_exit_fn)
216 e->ops->elevator_exit_fn(e);
217
218 elevator_put(e->elevator_type);
219 e->elevator_type = NULL;
220 kfree(e);
221}
222
223static int elevator_global_init(void)
224{
225 return 0;
226}
227
228int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
229{
230 elevator_t *e = q->elevator;
231
232 if (e->ops->elevator_merge_fn)
233 return e->ops->elevator_merge_fn(q, req, bio);
234
235 return ELEVATOR_NO_MERGE;
236}
237
238void elv_merged_request(request_queue_t *q, struct request *rq)
239{
240 elevator_t *e = q->elevator;
241
242 if (e->ops->elevator_merged_fn)
243 e->ops->elevator_merged_fn(q, rq);
244}
245
246void elv_merge_requests(request_queue_t *q, struct request *rq,
247 struct request *next)
248{
249 elevator_t *e = q->elevator;
250
251 if (q->last_merge == next)
252 q->last_merge = NULL;
253
254 if (e->ops->elevator_merge_req_fn)
255 e->ops->elevator_merge_req_fn(q, rq, next);
256}
257
258/*
259 * For careful internal use by the block layer. Essentially the same as
260 * a requeue in that it tells the io scheduler that this request is not
261 * active in the driver or hardware anymore, but we don't want the request
262 * added back to the scheduler. Function is not exported.
263 */
264void elv_deactivate_request(request_queue_t *q, struct request *rq)
265{
266 elevator_t *e = q->elevator;
267
268 /*
269 * it already went through dequeue, we need to decrement the
270 * in_flight count again
271 */
272 if (blk_account_rq(rq))
273 q->in_flight--;
274
275 rq->flags &= ~REQ_STARTED;
276
277 if (e->ops->elevator_deactivate_req_fn)
278 e->ops->elevator_deactivate_req_fn(q, rq);
279}
280
281void elv_requeue_request(request_queue_t *q, struct request *rq)
282{
283 elv_deactivate_request(q, rq);
284
285 /*
286 * if this is the flush, requeue the original instead and drop the flush
287 */
288 if (rq->flags & REQ_BAR_FLUSH) {
289 clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
290 rq = rq->end_io_data;
291 }
292
293 /*
294 * if iosched has an explicit requeue hook, then use that. otherwise
295 * just put the request at the front of the queue
296 */
297 if (q->elevator->ops->elevator_requeue_req_fn)
298 q->elevator->ops->elevator_requeue_req_fn(q, rq);
299 else
300 __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
301}
302
303void __elv_add_request(request_queue_t *q, struct request *rq, int where,
304 int plug)
305{
306 /*
307 * barriers implicitly indicate back insertion
308 */
309 if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER) &&
310 where == ELEVATOR_INSERT_SORT)
311 where = ELEVATOR_INSERT_BACK;
312
313 if (plug)
314 blk_plug_device(q);
315
316 rq->q = q;
317
318 if (!test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)) {
319 q->elevator->ops->elevator_add_req_fn(q, rq, where);
320
321 if (blk_queue_plugged(q)) {
322 int nrq = q->rq.count[READ] + q->rq.count[WRITE]
323 - q->in_flight;
324
325 if (nrq == q->unplug_thresh)
326 __generic_unplug_device(q);
327 }
328 } else
329 /*
330 * if drain is set, store the request "locally". when the drain
331 * is finished, the requests will be handed ordered to the io
332 * scheduler
333 */
334 list_add_tail(&rq->queuelist, &q->drain_list);
335}
336
337void elv_add_request(request_queue_t *q, struct request *rq, int where,
338 int plug)
339{
340 unsigned long flags;
341
342 spin_lock_irqsave(q->queue_lock, flags);
343 __elv_add_request(q, rq, where, plug);
344 spin_unlock_irqrestore(q->queue_lock, flags);
345}
346
347static inline struct request *__elv_next_request(request_queue_t *q)
348{
349 struct request *rq = q->elevator->ops->elevator_next_req_fn(q);
350
351 /*
352 * if this is a barrier write and the device has to issue a
353 * flush sequence to support it, check how far we are
354 */
355 if (rq && blk_fs_request(rq) && blk_barrier_rq(rq)) {
356 BUG_ON(q->ordered == QUEUE_ORDERED_NONE);
357
358 if (q->ordered == QUEUE_ORDERED_FLUSH &&
359 !blk_barrier_preflush(rq))
360 rq = blk_start_pre_flush(q, rq);
361 }
362
363 return rq;
364}
365
366struct request *elv_next_request(request_queue_t *q)
367{
368 struct request *rq;
369 int ret;
370
371 while ((rq = __elv_next_request(q)) != NULL) {
372 /*
373 * just mark as started even if we don't start it, a request
374 * that has been delayed should not be passed by new incoming
375 * requests
376 */
377 rq->flags |= REQ_STARTED;
378
379 if (rq == q->last_merge)
380 q->last_merge = NULL;
381
382 if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
383 break;
384
385 ret = q->prep_rq_fn(q, rq);
386 if (ret == BLKPREP_OK) {
387 break;
388 } else if (ret == BLKPREP_DEFER) {
389 rq = NULL;
390 break;
391 } else if (ret == BLKPREP_KILL) {
392 int nr_bytes = rq->hard_nr_sectors << 9;
393
394 if (!nr_bytes)
395 nr_bytes = rq->data_len;
396
397 blkdev_dequeue_request(rq);
398 rq->flags |= REQ_QUIET;
399 end_that_request_chunk(rq, 0, nr_bytes);
400 end_that_request_last(rq);
401 } else {
402 printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
403 ret);
404 break;
405 }
406 }
407
408 return rq;
409}
410
411void elv_remove_request(request_queue_t *q, struct request *rq)
412{
413 elevator_t *e = q->elevator;
414
415 /*
416 * the time frame between a request being removed from the lists
417 * and to it is freed is accounted as io that is in progress at
418 * the driver side. note that we only account requests that the
419 * driver has seen (REQ_STARTED set), to avoid false accounting
420 * for request-request merges
421 */
422 if (blk_account_rq(rq))
423 q->in_flight++;
424
425 /*
426 * the main clearing point for q->last_merge is on retrieval of
427 * request by driver (it calls elv_next_request()), but it _can_
428 * also happen here if a request is added to the queue but later
429 * deleted without ever being given to driver (merged with another
430 * request).
431 */
432 if (rq == q->last_merge)
433 q->last_merge = NULL;
434
435 if (e->ops->elevator_remove_req_fn)
436 e->ops->elevator_remove_req_fn(q, rq);
437}
438
439int elv_queue_empty(request_queue_t *q)
440{
441 elevator_t *e = q->elevator;
442
443 if (e->ops->elevator_queue_empty_fn)
444 return e->ops->elevator_queue_empty_fn(q);
445
446 return list_empty(&q->queue_head);
447}
448
449struct request *elv_latter_request(request_queue_t *q, struct request *rq)
450{
451 struct list_head *next;
452
453 elevator_t *e = q->elevator;
454
455 if (e->ops->elevator_latter_req_fn)
456 return e->ops->elevator_latter_req_fn(q, rq);
457
458 next = rq->queuelist.next;
459 if (next != &q->queue_head && next != &rq->queuelist)
460 return list_entry_rq(next);
461
462 return NULL;
463}
464
465struct request *elv_former_request(request_queue_t *q, struct request *rq)
466{
467 struct list_head *prev;
468
469 elevator_t *e = q->elevator;
470
471 if (e->ops->elevator_former_req_fn)
472 return e->ops->elevator_former_req_fn(q, rq);
473
474 prev = rq->queuelist.prev;
475 if (prev != &q->queue_head && prev != &rq->queuelist)
476 return list_entry_rq(prev);
477
478 return NULL;
479}
480
481int elv_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
482{
483 elevator_t *e = q->elevator;
484
485 if (e->ops->elevator_set_req_fn)
486 return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
487
488 rq->elevator_private = NULL;
489 return 0;
490}
491
492void elv_put_request(request_queue_t *q, struct request *rq)
493{
494 elevator_t *e = q->elevator;
495
496 if (e->ops->elevator_put_req_fn)
497 e->ops->elevator_put_req_fn(q, rq);
498}
499
500int elv_may_queue(request_queue_t *q, int rw)
501{
502 elevator_t *e = q->elevator;
503
504 if (e->ops->elevator_may_queue_fn)
505 return e->ops->elevator_may_queue_fn(q, rw);
506
507 return ELV_MQUEUE_MAY;
508}
509
510void elv_completed_request(request_queue_t *q, struct request *rq)
511{
512 elevator_t *e = q->elevator;
513
514 /*
515 * request is released from the driver, io must be done
516 */
517 if (blk_account_rq(rq))
518 q->in_flight--;
519
520 if (e->ops->elevator_completed_req_fn)
521 e->ops->elevator_completed_req_fn(q, rq);
522}
523
524int elv_register_queue(struct request_queue *q)
525{
526 elevator_t *e = q->elevator;
527
528 e->kobj.parent = kobject_get(&q->kobj);
529 if (!e->kobj.parent)
530 return -EBUSY;
531
532 snprintf(e->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
533 e->kobj.ktype = e->elevator_type->elevator_ktype;
534
535 return kobject_register(&e->kobj);
536}
537
538void elv_unregister_queue(struct request_queue *q)
539{
540 if (q) {
541 elevator_t *e = q->elevator;
542 kobject_unregister(&e->kobj);
543 kobject_put(&q->kobj);
544 }
545}
546
547int elv_register(struct elevator_type *e)
548{
549 if (elevator_find(e->elevator_name))
550 BUG();
551
552 spin_lock_irq(&elv_list_lock);
553 list_add_tail(&e->list, &elv_list);
554 spin_unlock_irq(&elv_list_lock);
555
556 printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
557 if (!strcmp(e->elevator_name, chosen_elevator))
558 printk(" (default)");
559 printk("\n");
560 return 0;
561}
562EXPORT_SYMBOL_GPL(elv_register);
563
564void elv_unregister(struct elevator_type *e)
565{
566 spin_lock_irq(&elv_list_lock);
567 list_del_init(&e->list);
568 spin_unlock_irq(&elv_list_lock);
569}
570EXPORT_SYMBOL_GPL(elv_unregister);
571
572/*
573 * switch to new_e io scheduler. be careful not to introduce deadlocks -
574 * we don't free the old io scheduler, before we have allocated what we
575 * need for the new one. this way we have a chance of going back to the old
576 * one, if the new one fails init for some reason. we also do an intermediate
577 * switch to noop to ensure safety with stack-allocated requests, since they
578 * don't originate from the block layer allocator. noop is safe here, because
579 * it never needs to touch the elevator itself for completion events. DRAIN
580 * flags will make sure we don't touch it for additions either.
581 */
582static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
583{
584 elevator_t *e = kmalloc(sizeof(elevator_t), GFP_KERNEL);
585 struct elevator_type *noop_elevator = NULL;
586 elevator_t *old_elevator;
587
588 if (!e)
589 goto error;
590
591 /*
592 * first step, drain requests from the block freelist
593 */
594 blk_wait_queue_drained(q, 0);
595
596 /*
597 * unregister old elevator data
598 */
599 elv_unregister_queue(q);
600 old_elevator = q->elevator;
601
602 /*
603 * next step, switch to noop since it uses no private rq structures
604 * and doesn't allocate any memory for anything. then wait for any
605 * non-fs requests in-flight
606 */
607 noop_elevator = elevator_get("noop");
608 spin_lock_irq(q->queue_lock);
609 elevator_attach(q, noop_elevator, e);
610 spin_unlock_irq(q->queue_lock);
611
612 blk_wait_queue_drained(q, 1);
613
614 /*
615 * attach and start new elevator
616 */
617 if (elevator_attach(q, new_e, e))
618 goto fail;
619
620 if (elv_register_queue(q))
621 goto fail_register;
622
623 /*
624 * finally exit old elevator and start queue again
625 */
626 elevator_exit(old_elevator);
627 blk_finish_queue_drain(q);
628 elevator_put(noop_elevator);
629 return;
630
631fail_register:
632 /*
633 * switch failed, exit the new io scheduler and reattach the old
634 * one again (along with re-adding the sysfs dir)
635 */
636 elevator_exit(e);
637fail:
638 q->elevator = old_elevator;
639 elv_register_queue(q);
640 blk_finish_queue_drain(q);
641error:
642 if (noop_elevator)
643 elevator_put(noop_elevator);
644 elevator_put(new_e);
645 printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name);
646}
647
648ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
649{
650 char elevator_name[ELV_NAME_MAX];
651 struct elevator_type *e;
652
653 memset(elevator_name, 0, sizeof(elevator_name));
654 strncpy(elevator_name, name, sizeof(elevator_name));
655
656 if (elevator_name[strlen(elevator_name) - 1] == '\n')
657 elevator_name[strlen(elevator_name) - 1] = '\0';
658
659 e = elevator_get(elevator_name);
660 if (!e) {
661 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
662 return -EINVAL;
663 }
664
665 if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name))
666 return count;
667
668 elevator_switch(q, e);
669 return count;
670}
671
672ssize_t elv_iosched_show(request_queue_t *q, char *name)
673{
674 elevator_t *e = q->elevator;
675 struct elevator_type *elv = e->elevator_type;
676 struct list_head *entry;
677 int len = 0;
678
679 spin_lock_irq(q->queue_lock);
680 list_for_each(entry, &elv_list) {
681 struct elevator_type *__e;
682
683 __e = list_entry(entry, struct elevator_type, list);
684 if (!strcmp(elv->elevator_name, __e->elevator_name))
685 len += sprintf(name+len, "[%s] ", elv->elevator_name);
686 else
687 len += sprintf(name+len, "%s ", __e->elevator_name);
688 }
689 spin_unlock_irq(q->queue_lock);
690
691 len += sprintf(len+name, "\n");
692 return len;
693}
694
695module_init(elevator_global_init);
696
697EXPORT_SYMBOL(elv_add_request);
698EXPORT_SYMBOL(__elv_add_request);
699EXPORT_SYMBOL(elv_requeue_request);
700EXPORT_SYMBOL(elv_next_request);
701EXPORT_SYMBOL(elv_remove_request);
702EXPORT_SYMBOL(elv_queue_empty);
703EXPORT_SYMBOL(elv_completed_request);
704EXPORT_SYMBOL(elevator_exit);
705EXPORT_SYMBOL(elevator_init);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
new file mode 100644
index 000000000000..42dfa281a880
--- /dev/null
+++ b/drivers/block/floppy.c
@@ -0,0 +1,4638 @@
1/*
2 * linux/drivers/block/floppy.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1993, 1994 Alain Knaff
6 * Copyright (C) 1998 Alan Cox
7 */
8/*
9 * 02.12.91 - Changed to static variables to indicate need for reset
10 * and recalibrate. This makes some things easier (output_byte reset
11 * checking etc), and means less interrupt jumping in case of errors,
12 * so the code is hopefully easier to understand.
13 */
14
15/*
16 * This file is certainly a mess. I've tried my best to get it working,
17 * but I don't like programming floppies, and I have only one anyway.
18 * Urgel. I should check for more errors, and do more graceful error
19 * recovery. Seems there are problems with several drives. I've tried to
20 * correct them. No promises.
21 */
22
23/*
24 * As with hd.c, all routines within this file can (and will) be called
25 * by interrupts, so extreme caution is needed. A hardware interrupt
26 * handler may not sleep, or a kernel panic will happen. Thus I cannot
27 * call "floppy-on" directly, but have to set a special timer interrupt
28 * etc.
29 */
30
31/*
32 * 28.02.92 - made track-buffering routines, based on the routines written
33 * by entropy@wintermute.wpi.edu (Lawrence Foard). Linus.
34 */
35
36/*
37 * Automatic floppy-detection and formatting written by Werner Almesberger
38 * (almesber@nessie.cs.id.ethz.ch), who also corrected some problems with
39 * the floppy-change signal detection.
40 */
41
42/*
43 * 1992/7/22 -- Hennus Bergman: Added better error reporting, fixed
44 * FDC data overrun bug, added some preliminary stuff for vertical
45 * recording support.
46 *
47 * 1992/9/17: Added DMA allocation & DMA functions. -- hhb.
48 *
49 * TODO: Errors are still not counted properly.
50 */
51
52/* 1992/9/20
53 * Modifications for ``Sector Shifting'' by Rob Hooft (hooft@chem.ruu.nl)
54 * modeled after the freeware MS-DOS program fdformat/88 V1.8 by
55 * Christoph H. Hochst\"atter.
56 * I have fixed the shift values to the ones I always use. Maybe a new
57 * ioctl() should be created to be able to modify them.
58 * There is a bug in the driver that makes it impossible to format a
59 * floppy as the first thing after bootup.
60 */
61
62/*
63 * 1993/4/29 -- Linus -- cleaned up the timer handling in the kernel, and
64 * this helped the floppy driver as well. Much cleaner, and still seems to
65 * work.
66 */
67
68/* 1994/6/24 --bbroad-- added the floppy table entries and made
69 * minor modifications to allow 2.88 floppies to be run.
70 */
71
72/* 1994/7/13 -- Paul Vojta -- modified the probing code to allow three or more
73 * disk types.
74 */
75
76/*
77 * 1994/8/8 -- Alain Knaff -- Switched to fdpatch driver: Support for bigger
78 * format bug fixes, but unfortunately some new bugs too...
79 */
80
81/* 1994/9/17 -- Koen Holtman -- added logging of physical floppy write
82 * errors to allow safe writing by specialized programs.
83 */
84
85/* 1995/4/24 -- Dan Fandrich -- added support for Commodore 1581 3.5" disks
86 * by defining bit 1 of the "stretch" parameter to mean put sectors on the
87 * opposite side of the disk, leaving the sector IDs alone (i.e. Commodore's
88 * drives are "upside-down").
89 */
90
91/*
92 * 1995/8/26 -- Andreas Busse -- added Mips support.
93 */
94
95/*
96 * 1995/10/18 -- Ralf Baechle -- Portability cleanup; move machine dependent
97 * features to asm/floppy.h.
98 */
99
100/*
101 * 1998/05/07 -- Russell King -- More portability cleanups; moved definition of
102 * interrupt and dma channel to asm/floppy.h. Cleaned up some formatting &
103 * use of '0' for NULL.
104 */
105
106/*
107 * 1998/06/07 -- Alan Cox -- Merged the 2.0.34 fixes for resource allocation
108 * failures.
109 */
110
111/*
112 * 1998/09/20 -- David Weinehall -- Added slow-down code for buggy PS/2-drives.
113 */
114
115/*
116 * 1999/08/13 -- Paul Slootman -- floppy stopped working on Alpha after 24
117 * days, 6 hours, 32 minutes and 32 seconds (i.e. MAXINT jiffies; ints were
118 * being used to store jiffies, which are unsigned longs).
119 */
120
121/*
122 * 2000/08/28 -- Arnaldo Carvalho de Melo <acme@conectiva.com.br>
123 * - get rid of check_region
124 * - s/suser/capable/
125 */
126
127/*
128 * 2001/08/26 -- Paul Gortmaker - fix insmod oops on machines with no
129 * floppy controller (lingering task on list after module is gone... boom.)
130 */
131
132/*
133 * 2002/02/07 -- Anton Altaparmakov - Fix io ports reservation to correct range
134 * (0x3f2-0x3f5, 0x3f7). This fix is a bit of a hack but the proper fix
135 * requires many non-obvious changes in arch dependent code.
136 */
137
138/* 2003/07/28 -- Daniele Bellucci <bellucda@tiscali.it>.
139 * Better audit of register_blkdev.
140 */
141
142#define FLOPPY_SANITY_CHECK
143#undef FLOPPY_SILENT_DCL_CLEAR
144
145#define REALLY_SLOW_IO
146
147#define DEBUGT 2
148#define DCL_DEBUG /* debug disk change line */
149
150/* do print messages for unexpected interrupts */
151static int print_unex = 1;
152#include <linux/module.h>
153#include <linux/sched.h>
154#include <linux/fs.h>
155#include <linux/kernel.h>
156#include <linux/timer.h>
157#include <linux/workqueue.h>
158#define FDPATCHES
159#include <linux/fdreg.h>
160
161/*
162 * 1998/1/21 -- Richard Gooch <rgooch@atnf.csiro.au> -- devfs support
163 */
164
165#include <linux/fd.h>
166#include <linux/hdreg.h>
167
168#include <linux/errno.h>
169#include <linux/slab.h>
170#include <linux/mm.h>
171#include <linux/bio.h>
172#include <linux/string.h>
173#include <linux/fcntl.h>
174#include <linux/delay.h>
175#include <linux/mc146818rtc.h> /* CMOS defines */
176#include <linux/ioport.h>
177#include <linux/interrupt.h>
178#include <linux/init.h>
179#include <linux/devfs_fs_kernel.h>
180#include <linux/device.h>
181#include <linux/buffer_head.h> /* for invalidate_buffers() */
182
183/*
184 * PS/2 floppies have much slower step rates than regular floppies.
185 * It's been recommended that take about 1/4 of the default speed
186 * in some more extreme cases.
187 */
188static int slow_floppy;
189
190#include <asm/dma.h>
191#include <asm/irq.h>
192#include <asm/system.h>
193#include <asm/io.h>
194#include <asm/uaccess.h>
195
196static int FLOPPY_IRQ = 6;
197static int FLOPPY_DMA = 2;
198static int can_use_virtual_dma = 2;
199/* =======
200 * can use virtual DMA:
201 * 0 = use of virtual DMA disallowed by config
202 * 1 = use of virtual DMA prescribed by config
203 * 2 = no virtual DMA preference configured. By default try hard DMA,
204 * but fall back on virtual DMA when not enough memory available
205 */
206
207static int use_virtual_dma;
208/* =======
209 * use virtual DMA
210 * 0 using hard DMA
211 * 1 using virtual DMA
212 * This variable is set to virtual when a DMA mem problem arises, and
213 * reset back in floppy_grab_irq_and_dma.
214 * It is not safe to reset it in other circumstances, because the floppy
215 * driver may have several buffers in use at once, and we do currently not
216 * record each buffers capabilities
217 */
218
219static DEFINE_SPINLOCK(floppy_lock);
220static struct completion device_release;
221
222static unsigned short virtual_dma_port = 0x3f0;
223irqreturn_t floppy_interrupt(int irq, void *dev_id, struct pt_regs *regs);
224static int set_dor(int fdc, char mask, char data);
225static void register_devfs_entries(int drive) __init;
226
227#define K_64 0x10000 /* 64KB */
228
229/* the following is the mask of allowed drives. By default units 2 and
230 * 3 of both floppy controllers are disabled, because switching on the
231 * motor of these drives causes system hangs on some PCI computers. drive
232 * 0 is the low bit (0x1), and drive 7 is the high bit (0x80). Bits are on if
233 * a drive is allowed.
234 *
235 * NOTE: This must come before we include the arch floppy header because
236 * some ports reference this variable from there. -DaveM
237 */
238
239static int allowed_drive_mask = 0x33;
240
241#include <asm/floppy.h>
242
243static int irqdma_allocated;
244
245#define LOCAL_END_REQUEST
246#define DEVICE_NAME "floppy"
247
248#include <linux/blkdev.h>
249#include <linux/blkpg.h>
250#include <linux/cdrom.h> /* for the compatibility eject ioctl */
251#include <linux/completion.h>
252
253static struct request *current_req;
254static struct request_queue *floppy_queue;
255static void do_fd_request(request_queue_t * q);
256
257#ifndef fd_get_dma_residue
258#define fd_get_dma_residue() get_dma_residue(FLOPPY_DMA)
259#endif
260
261/* Dma Memory related stuff */
262
263#ifndef fd_dma_mem_free
264#define fd_dma_mem_free(addr, size) free_pages(addr, get_order(size))
265#endif
266
267#ifndef fd_dma_mem_alloc
268#define fd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL,get_order(size))
269#endif
270
271static inline void fallback_on_nodma_alloc(char **addr, size_t l)
272{
273#ifdef FLOPPY_CAN_FALLBACK_ON_NODMA
274 if (*addr)
275 return; /* we have the memory */
276 if (can_use_virtual_dma != 2)
277 return; /* no fallback allowed */
278 printk
279 ("DMA memory shortage. Temporarily falling back on virtual DMA\n");
280 *addr = (char *)nodma_mem_alloc(l);
281#else
282 return;
283#endif
284}
285
286/* End dma memory related stuff */
287
288static unsigned long fake_change;
289static int initialising = 1;
290
291#define ITYPE(x) (((x)>>2) & 0x1f)
292#define TOMINOR(x) ((x & 3) | ((x & 4) << 5))
293#define UNIT(x) ((x) & 0x03) /* drive on fdc */
294#define FDC(x) (((x) & 0x04) >> 2) /* fdc of drive */
295#define REVDRIVE(fdc, unit) ((unit) + ((fdc) << 2))
296 /* reverse mapping from unit and fdc to drive */
297#define DP (&drive_params[current_drive])
298#define DRS (&drive_state[current_drive])
299#define DRWE (&write_errors[current_drive])
300#define FDCS (&fdc_state[fdc])
301#define CLEARF(x) (clear_bit(x##_BIT, &DRS->flags))
302#define SETF(x) (set_bit(x##_BIT, &DRS->flags))
303#define TESTF(x) (test_bit(x##_BIT, &DRS->flags))
304
305#define UDP (&drive_params[drive])
306#define UDRS (&drive_state[drive])
307#define UDRWE (&write_errors[drive])
308#define UFDCS (&fdc_state[FDC(drive)])
309#define UCLEARF(x) (clear_bit(x##_BIT, &UDRS->flags))
310#define USETF(x) (set_bit(x##_BIT, &UDRS->flags))
311#define UTESTF(x) (test_bit(x##_BIT, &UDRS->flags))
312
313#define DPRINT(format, args...) printk(DEVICE_NAME "%d: " format, current_drive , ## args)
314
315#define PH_HEAD(floppy,head) (((((floppy)->stretch & 2) >>1) ^ head) << 2)
316#define STRETCH(floppy) ((floppy)->stretch & FD_STRETCH)
317
318#define CLEARSTRUCT(x) memset((x), 0, sizeof(*(x)))
319
320/* read/write */
321#define COMMAND raw_cmd->cmd[0]
322#define DR_SELECT raw_cmd->cmd[1]
323#define TRACK raw_cmd->cmd[2]
324#define HEAD raw_cmd->cmd[3]
325#define SECTOR raw_cmd->cmd[4]
326#define SIZECODE raw_cmd->cmd[5]
327#define SECT_PER_TRACK raw_cmd->cmd[6]
328#define GAP raw_cmd->cmd[7]
329#define SIZECODE2 raw_cmd->cmd[8]
330#define NR_RW 9
331
332/* format */
333#define F_SIZECODE raw_cmd->cmd[2]
334#define F_SECT_PER_TRACK raw_cmd->cmd[3]
335#define F_GAP raw_cmd->cmd[4]
336#define F_FILL raw_cmd->cmd[5]
337#define NR_F 6
338
339/*
340 * Maximum disk size (in kilobytes). This default is used whenever the
341 * current disk size is unknown.
342 * [Now it is rather a minimum]
343 */
344#define MAX_DISK_SIZE 4 /* 3984 */
345
346/*
347 * globals used by 'result()'
348 */
349#define MAX_REPLIES 16
350static unsigned char reply_buffer[MAX_REPLIES];
351static int inr; /* size of reply buffer, when called from interrupt */
352#define ST0 (reply_buffer[0])
353#define ST1 (reply_buffer[1])
354#define ST2 (reply_buffer[2])
355#define ST3 (reply_buffer[0]) /* result of GETSTATUS */
356#define R_TRACK (reply_buffer[3])
357#define R_HEAD (reply_buffer[4])
358#define R_SECTOR (reply_buffer[5])
359#define R_SIZECODE (reply_buffer[6])
360
361#define SEL_DLY (2*HZ/100)
362
363/*
364 * this struct defines the different floppy drive types.
365 */
366static struct {
367 struct floppy_drive_params params;
368 const char *name; /* name printed while booting */
369} default_drive_params[] = {
370/* NOTE: the time values in jiffies should be in msec!
371 CMOS drive type
372 | Maximum data rate supported by drive type
373 | | Head load time, msec
374 | | | Head unload time, msec (not used)
375 | | | | Step rate interval, usec
376 | | | | | Time needed for spinup time (jiffies)
377 | | | | | | Timeout for spinning down (jiffies)
378 | | | | | | | Spindown offset (where disk stops)
379 | | | | | | | | Select delay
380 | | | | | | | | | RPS
381 | | | | | | | | | | Max number of tracks
382 | | | | | | | | | | | Interrupt timeout
383 | | | | | | | | | | | | Max nonintlv. sectors
384 | | | | | | | | | | | | | -Max Errors- flags */
385{{0, 500, 16, 16, 8000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 80, 3*HZ, 20, {3,1,2,0,2}, 0,
386 0, { 7, 4, 8, 2, 1, 5, 3,10}, 3*HZ/2, 0 }, "unknown" },
387
388{{1, 300, 16, 16, 8000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 40, 3*HZ, 17, {3,1,2,0,2}, 0,
389 0, { 1, 0, 0, 0, 0, 0, 0, 0}, 3*HZ/2, 1 }, "360K PC" }, /*5 1/4 360 KB PC*/
390
391{{2, 500, 16, 16, 6000, 4*HZ/10, 3*HZ, 14, SEL_DLY, 6, 83, 3*HZ, 17, {3,1,2,0,2}, 0,
392 0, { 2, 5, 6,23,10,20,12, 0}, 3*HZ/2, 2 }, "1.2M" }, /*5 1/4 HD AT*/
393
394{{3, 250, 16, 16, 3000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 83, 3*HZ, 20, {3,1,2,0,2}, 0,
395 0, { 4,22,21,30, 3, 0, 0, 0}, 3*HZ/2, 4 }, "720k" }, /*3 1/2 DD*/
396
397{{4, 500, 16, 16, 4000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 20, {3,1,2,0,2}, 0,
398 0, { 7, 4,25,22,31,21,29,11}, 3*HZ/2, 7 }, "1.44M" }, /*3 1/2 HD*/
399
400{{5, 1000, 15, 8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 40, {3,1,2,0,2}, 0,
401 0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M AMI BIOS" }, /*3 1/2 ED*/
402
403{{6, 1000, 15, 8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 40, {3,1,2,0,2}, 0,
404 0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M" } /*3 1/2 ED*/
405/* | --autodetected formats--- | | |
406 * read_track | | Name printed when booting
407 * | Native format
408 * Frequency of disk change checks */
409};
410
411static struct floppy_drive_params drive_params[N_DRIVE];
412static struct floppy_drive_struct drive_state[N_DRIVE];
413static struct floppy_write_errors write_errors[N_DRIVE];
414static struct timer_list motor_off_timer[N_DRIVE];
415static struct gendisk *disks[N_DRIVE];
416static struct block_device *opened_bdev[N_DRIVE];
417static DECLARE_MUTEX(open_lock);
418static struct floppy_raw_cmd *raw_cmd, default_raw_cmd;
419
420/*
421 * This struct defines the different floppy types.
422 *
423 * Bit 0 of 'stretch' tells if the tracks need to be doubled for some
424 * types (e.g. 360kB diskette in 1.2MB drive, etc.). Bit 1 of 'stretch'
425 * tells if the disk is in Commodore 1581 format, which means side 0 sectors
426 * are located on side 1 of the disk but with a side 0 ID, and vice-versa.
427 * This is the same as the Sharp MZ-80 5.25" CP/M disk format, except that the
428 * 1581's logical side 0 is on physical side 1, whereas the Sharp's logical
429 * side 0 is on physical side 0 (but with the misnamed sector IDs).
430 * 'stretch' should probably be renamed to something more general, like
431 * 'options'. Other parameters should be self-explanatory (see also
432 * setfdprm(8)).
433 */
434/*
435 Size
436 | Sectors per track
437 | | Head
438 | | | Tracks
439 | | | | Stretch
440 | | | | | Gap 1 size
441 | | | | | | Data rate, | 0x40 for perp
442 | | | | | | | Spec1 (stepping rate, head unload
443 | | | | | | | | /fmt gap (gap2) */
444static struct floppy_struct floppy_type[32] = {
445 { 0, 0,0, 0,0,0x00,0x00,0x00,0x00,NULL }, /* 0 no testing */
446 { 720, 9,2,40,0,0x2A,0x02,0xDF,0x50,"d360" }, /* 1 360KB PC */
447 { 2400,15,2,80,0,0x1B,0x00,0xDF,0x54,"h1200" }, /* 2 1.2MB AT */
448 { 720, 9,1,80,0,0x2A,0x02,0xDF,0x50,"D360" }, /* 3 360KB SS 3.5" */
449 { 1440, 9,2,80,0,0x2A,0x02,0xDF,0x50,"D720" }, /* 4 720KB 3.5" */
450 { 720, 9,2,40,1,0x23,0x01,0xDF,0x50,"h360" }, /* 5 360KB AT */
451 { 1440, 9,2,80,0,0x23,0x01,0xDF,0x50,"h720" }, /* 6 720KB AT */
452 { 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,"H1440" }, /* 7 1.44MB 3.5" */
453 { 5760,36,2,80,0,0x1B,0x43,0xAF,0x54,"E2880" }, /* 8 2.88MB 3.5" */
454 { 6240,39,2,80,0,0x1B,0x43,0xAF,0x28,"E3120" }, /* 9 3.12MB 3.5" */
455
456 { 2880,18,2,80,0,0x25,0x00,0xDF,0x02,"h1440" }, /* 10 1.44MB 5.25" */
457 { 3360,21,2,80,0,0x1C,0x00,0xCF,0x0C,"H1680" }, /* 11 1.68MB 3.5" */
458 { 820,10,2,41,1,0x25,0x01,0xDF,0x2E,"h410" }, /* 12 410KB 5.25" */
459 { 1640,10,2,82,0,0x25,0x02,0xDF,0x2E,"H820" }, /* 13 820KB 3.5" */
460 { 2952,18,2,82,0,0x25,0x00,0xDF,0x02,"h1476" }, /* 14 1.48MB 5.25" */
461 { 3444,21,2,82,0,0x25,0x00,0xDF,0x0C,"H1722" }, /* 15 1.72MB 3.5" */
462 { 840,10,2,42,1,0x25,0x01,0xDF,0x2E,"h420" }, /* 16 420KB 5.25" */
463 { 1660,10,2,83,0,0x25,0x02,0xDF,0x2E,"H830" }, /* 17 830KB 3.5" */
464 { 2988,18,2,83,0,0x25,0x00,0xDF,0x02,"h1494" }, /* 18 1.49MB 5.25" */
465 { 3486,21,2,83,0,0x25,0x00,0xDF,0x0C,"H1743" }, /* 19 1.74 MB 3.5" */
466
467 { 1760,11,2,80,0,0x1C,0x09,0xCF,0x00,"h880" }, /* 20 880KB 5.25" */
468 { 2080,13,2,80,0,0x1C,0x01,0xCF,0x00,"D1040" }, /* 21 1.04MB 3.5" */
469 { 2240,14,2,80,0,0x1C,0x19,0xCF,0x00,"D1120" }, /* 22 1.12MB 3.5" */
470 { 3200,20,2,80,0,0x1C,0x20,0xCF,0x2C,"h1600" }, /* 23 1.6MB 5.25" */
471 { 3520,22,2,80,0,0x1C,0x08,0xCF,0x2e,"H1760" }, /* 24 1.76MB 3.5" */
472 { 3840,24,2,80,0,0x1C,0x20,0xCF,0x00,"H1920" }, /* 25 1.92MB 3.5" */
473 { 6400,40,2,80,0,0x25,0x5B,0xCF,0x00,"E3200" }, /* 26 3.20MB 3.5" */
474 { 7040,44,2,80,0,0x25,0x5B,0xCF,0x00,"E3520" }, /* 27 3.52MB 3.5" */
475 { 7680,48,2,80,0,0x25,0x63,0xCF,0x00,"E3840" }, /* 28 3.84MB 3.5" */
476
477 { 3680,23,2,80,0,0x1C,0x10,0xCF,0x00,"H1840" }, /* 29 1.84MB 3.5" */
478 { 1600,10,2,80,0,0x25,0x02,0xDF,0x2E,"D800" }, /* 30 800KB 3.5" */
479 { 3200,20,2,80,0,0x1C,0x00,0xCF,0x2C,"H1600" }, /* 31 1.6MB 3.5" */
480};
481
482#define NUMBER(x) (sizeof(x) / sizeof(*(x)))
483#define SECTSIZE (_FD_SECTSIZE(*floppy))
484
485/* Auto-detection: Disk type used until the next media change occurs. */
486static struct floppy_struct *current_type[N_DRIVE];
487
488/*
489 * User-provided type information. current_type points to
490 * the respective entry of this array.
491 */
492static struct floppy_struct user_params[N_DRIVE];
493
494static sector_t floppy_sizes[256];
495
496/*
497 * The driver is trying to determine the correct media format
498 * while probing is set. rw_interrupt() clears it after a
499 * successful access.
500 */
501static int probing;
502
503/* Synchronization of FDC access. */
504#define FD_COMMAND_NONE -1
505#define FD_COMMAND_ERROR 2
506#define FD_COMMAND_OKAY 3
507
508static volatile int command_status = FD_COMMAND_NONE;
509static unsigned long fdc_busy;
510static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
511static DECLARE_WAIT_QUEUE_HEAD(command_done);
512
513#define NO_SIGNAL (!interruptible || !signal_pending(current))
514#define CALL(x) if ((x) == -EINTR) return -EINTR
515#define ECALL(x) if ((ret = (x))) return ret;
516#define _WAIT(x,i) CALL(ret=wait_til_done((x),i))
517#define WAIT(x) _WAIT((x),interruptible)
518#define IWAIT(x) _WAIT((x),1)
519
520/* Errors during formatting are counted here. */
521static int format_errors;
522
523/* Format request descriptor. */
524static struct format_descr format_req;
525
526/*
527 * Rate is 0 for 500kb/s, 1 for 300kbps, 2 for 250kbps
528 * Spec1 is 0xSH, where S is stepping rate (F=1ms, E=2ms, D=3ms etc),
529 * H is head unload time (1=16ms, 2=32ms, etc)
530 */
531
532/*
533 * Track buffer
534 * Because these are written to by the DMA controller, they must
535 * not contain a 64k byte boundary crossing, or data will be
536 * corrupted/lost.
537 */
538static char *floppy_track_buffer;
539static int max_buffer_sectors;
540
541static int *errors;
542typedef void (*done_f) (int);
543static struct cont_t {
544 void (*interrupt) (void); /* this is called after the interrupt of the
545 * main command */
546 void (*redo) (void); /* this is called to retry the operation */
547 void (*error) (void); /* this is called to tally an error */
548 done_f done; /* this is called to say if the operation has
549 * succeeded/failed */
550} *cont;
551
552static void floppy_ready(void);
553static void floppy_start(void);
554static void process_fd_request(void);
555static void recalibrate_floppy(void);
556static void floppy_shutdown(unsigned long);
557
558static int floppy_grab_irq_and_dma(void);
559static void floppy_release_irq_and_dma(void);
560
561/*
562 * The "reset" variable should be tested whenever an interrupt is scheduled,
563 * after the commands have been sent. This is to ensure that the driver doesn't
564 * get wedged when the interrupt doesn't come because of a failed command.
565 * reset doesn't need to be tested before sending commands, because
566 * output_byte is automatically disabled when reset is set.
567 */
568#define CHECK_RESET { if (FDCS->reset){ reset_fdc(); return; } }
569static void reset_fdc(void);
570
571/*
572 * These are global variables, as that's the easiest way to give
573 * information to interrupts. They are the data used for the current
574 * request.
575 */
576#define NO_TRACK -1
577#define NEED_1_RECAL -2
578#define NEED_2_RECAL -3
579
580static int usage_count;
581
582/* buffer related variables */
583static int buffer_track = -1;
584static int buffer_drive = -1;
585static int buffer_min = -1;
586static int buffer_max = -1;
587
588/* fdc related variables, should end up in a struct */
589static struct floppy_fdc_state fdc_state[N_FDC];
590static int fdc; /* current fdc */
591
592static struct floppy_struct *_floppy = floppy_type;
593static unsigned char current_drive;
594static long current_count_sectors;
595static unsigned char fsector_t; /* sector in track */
596static unsigned char in_sector_offset; /* offset within physical sector,
597 * expressed in units of 512 bytes */
598
599#ifndef fd_eject
600static inline int fd_eject(int drive)
601{
602 return -EINVAL;
603}
604#endif
605
606/*
607 * Debugging
608 * =========
609 */
610#ifdef DEBUGT
611static long unsigned debugtimer;
612
613static inline void set_debugt(void)
614{
615 debugtimer = jiffies;
616}
617
618static inline void debugt(const char *message)
619{
620 if (DP->flags & DEBUGT)
621 printk("%s dtime=%lu\n", message, jiffies - debugtimer);
622}
623#else
624static inline void set_debugt(void) { }
625static inline void debugt(const char *message) { }
626#endif /* DEBUGT */
627
628typedef void (*timeout_fn) (unsigned long);
629static struct timer_list fd_timeout = TIMER_INITIALIZER(floppy_shutdown, 0, 0);
630
631static const char *timeout_message;
632
633#ifdef FLOPPY_SANITY_CHECK
634static void is_alive(const char *message)
635{
636 /* this routine checks whether the floppy driver is "alive" */
637 if (test_bit(0, &fdc_busy) && command_status < 2
638 && !timer_pending(&fd_timeout)) {
639 DPRINT("timeout handler died: %s\n", message);
640 }
641}
642#endif
643
644static void (*do_floppy) (void) = NULL;
645
646#ifdef FLOPPY_SANITY_CHECK
647
648#define OLOGSIZE 20
649
650static void (*lasthandler) (void);
651static unsigned long interruptjiffies;
652static unsigned long resultjiffies;
653static int resultsize;
654static unsigned long lastredo;
655
656static struct output_log {
657 unsigned char data;
658 unsigned char status;
659 unsigned long jiffies;
660} output_log[OLOGSIZE];
661
662static int output_log_pos;
663#endif
664
665#define current_reqD -1
666#define MAXTIMEOUT -2
667
668static void __reschedule_timeout(int drive, const char *message, int marg)
669{
670 if (drive == current_reqD)
671 drive = current_drive;
672 del_timer(&fd_timeout);
673 if (drive < 0 || drive > N_DRIVE) {
674 fd_timeout.expires = jiffies + 20UL * HZ;
675 drive = 0;
676 } else
677 fd_timeout.expires = jiffies + UDP->timeout;
678 add_timer(&fd_timeout);
679 if (UDP->flags & FD_DEBUG) {
680 DPRINT("reschedule timeout ");
681 printk(message, marg);
682 printk("\n");
683 }
684 timeout_message = message;
685}
686
687static void reschedule_timeout(int drive, const char *message, int marg)
688{
689 unsigned long flags;
690
691 spin_lock_irqsave(&floppy_lock, flags);
692 __reschedule_timeout(drive, message, marg);
693 spin_unlock_irqrestore(&floppy_lock, flags);
694}
695
696#define INFBOUND(a,b) (a)=max_t(int, a, b)
697
698#define SUPBOUND(a,b) (a)=min_t(int, a, b)
699
700/*
701 * Bottom half floppy driver.
702 * ==========================
703 *
704 * This part of the file contains the code talking directly to the hardware,
705 * and also the main service loop (seek-configure-spinup-command)
706 */
707
708/*
709 * disk change.
710 * This routine is responsible for maintaining the FD_DISK_CHANGE flag,
711 * and the last_checked date.
712 *
713 * last_checked is the date of the last check which showed 'no disk change'
714 * FD_DISK_CHANGE is set under two conditions:
715 * 1. The floppy has been changed after some i/o to that floppy already
716 * took place.
717 * 2. No floppy disk is in the drive. This is done in order to ensure that
718 * requests are quickly flushed in case there is no disk in the drive. It
719 * follows that FD_DISK_CHANGE can only be cleared if there is a disk in
720 * the drive.
721 *
722 * For 1., maxblock is observed. Maxblock is 0 if no i/o has taken place yet.
723 * For 2., FD_DISK_NEWCHANGE is watched. FD_DISK_NEWCHANGE is cleared on
724 * each seek. If a disk is present, the disk change line should also be
725 * cleared on each seek. Thus, if FD_DISK_NEWCHANGE is clear, but the disk
726 * change line is set, this means either that no disk is in the drive, or
727 * that it has been removed since the last seek.
728 *
729 * This means that we really have a third possibility too:
730 * The floppy has been changed after the last seek.
731 */
732
733static int disk_change(int drive)
734{
735 int fdc = FDC(drive);
736#ifdef FLOPPY_SANITY_CHECK
737 if (jiffies - UDRS->select_date < UDP->select_delay)
738 DPRINT("WARNING disk change called early\n");
739 if (!(FDCS->dor & (0x10 << UNIT(drive))) ||
740 (FDCS->dor & 3) != UNIT(drive) || fdc != FDC(drive)) {
741 DPRINT("probing disk change on unselected drive\n");
742 DPRINT("drive=%d fdc=%d dor=%x\n", drive, FDC(drive),
743 (unsigned int)FDCS->dor);
744 }
745#endif
746
747#ifdef DCL_DEBUG
748 if (UDP->flags & FD_DEBUG) {
749 DPRINT("checking disk change line for drive %d\n", drive);
750 DPRINT("jiffies=%lu\n", jiffies);
751 DPRINT("disk change line=%x\n", fd_inb(FD_DIR) & 0x80);
752 DPRINT("flags=%lx\n", UDRS->flags);
753 }
754#endif
755 if (UDP->flags & FD_BROKEN_DCL)
756 return UTESTF(FD_DISK_CHANGED);
757 if ((fd_inb(FD_DIR) ^ UDP->flags) & 0x80) {
758 USETF(FD_VERIFY); /* verify write protection */
759 if (UDRS->maxblock) {
760 /* mark it changed */
761 USETF(FD_DISK_CHANGED);
762 }
763
764 /* invalidate its geometry */
765 if (UDRS->keep_data >= 0) {
766 if ((UDP->flags & FTD_MSG) &&
767 current_type[drive] != NULL)
768 DPRINT("Disk type is undefined after "
769 "disk change\n");
770 current_type[drive] = NULL;
771 floppy_sizes[TOMINOR(drive)] = MAX_DISK_SIZE << 1;
772 }
773
774 /*USETF(FD_DISK_NEWCHANGE); */
775 return 1;
776 } else {
777 UDRS->last_checked = jiffies;
778 UCLEARF(FD_DISK_NEWCHANGE);
779 }
780 return 0;
781}
782
783static inline int is_selected(int dor, int unit)
784{
785 return ((dor & (0x10 << unit)) && (dor & 3) == unit);
786}
787
788static int set_dor(int fdc, char mask, char data)
789{
790 register unsigned char drive, unit, newdor, olddor;
791
792 if (FDCS->address == -1)
793 return -1;
794
795 olddor = FDCS->dor;
796 newdor = (olddor & mask) | data;
797 if (newdor != olddor) {
798 unit = olddor & 0x3;
799 if (is_selected(olddor, unit) && !is_selected(newdor, unit)) {
800 drive = REVDRIVE(fdc, unit);
801#ifdef DCL_DEBUG
802 if (UDP->flags & FD_DEBUG) {
803 DPRINT("calling disk change from set_dor\n");
804 }
805#endif
806 disk_change(drive);
807 }
808 FDCS->dor = newdor;
809 fd_outb(newdor, FD_DOR);
810
811 unit = newdor & 0x3;
812 if (!is_selected(olddor, unit) && is_selected(newdor, unit)) {
813 drive = REVDRIVE(fdc, unit);
814 UDRS->select_date = jiffies;
815 }
816 }
817 /*
818 * We should propagate failures to grab the resources back
819 * nicely from here. Actually we ought to rewrite the fd
820 * driver some day too.
821 */
822 if (newdor & FLOPPY_MOTOR_MASK)
823 floppy_grab_irq_and_dma();
824 if (olddor & FLOPPY_MOTOR_MASK)
825 floppy_release_irq_and_dma();
826 return olddor;
827}
828
829static void twaddle(void)
830{
831 if (DP->select_delay)
832 return;
833 fd_outb(FDCS->dor & ~(0x10 << UNIT(current_drive)), FD_DOR);
834 fd_outb(FDCS->dor, FD_DOR);
835 DRS->select_date = jiffies;
836}
837
838/* reset all driver information about the current fdc. This is needed after
839 * a reset, and after a raw command. */
840static void reset_fdc_info(int mode)
841{
842 int drive;
843
844 FDCS->spec1 = FDCS->spec2 = -1;
845 FDCS->need_configure = 1;
846 FDCS->perp_mode = 1;
847 FDCS->rawcmd = 0;
848 for (drive = 0; drive < N_DRIVE; drive++)
849 if (FDC(drive) == fdc && (mode || UDRS->track != NEED_1_RECAL))
850 UDRS->track = NEED_2_RECAL;
851}
852
853/* selects the fdc and drive, and enables the fdc's input/dma. */
854static void set_fdc(int drive)
855{
856 if (drive >= 0 && drive < N_DRIVE) {
857 fdc = FDC(drive);
858 current_drive = drive;
859 }
860 if (fdc != 1 && fdc != 0) {
861 printk("bad fdc value\n");
862 return;
863 }
864 set_dor(fdc, ~0, 8);
865#if N_FDC > 1
866 set_dor(1 - fdc, ~8, 0);
867#endif
868 if (FDCS->rawcmd == 2)
869 reset_fdc_info(1);
870 if (fd_inb(FD_STATUS) != STATUS_READY)
871 FDCS->reset = 1;
872}
873
874/* locks the driver */
875static int _lock_fdc(int drive, int interruptible, int line)
876{
877 if (!usage_count) {
878 printk(KERN_ERR
879 "Trying to lock fdc while usage count=0 at line %d\n",
880 line);
881 return -1;
882 }
883 if (floppy_grab_irq_and_dma() == -1)
884 return -EBUSY;
885
886 if (test_and_set_bit(0, &fdc_busy)) {
887 DECLARE_WAITQUEUE(wait, current);
888 add_wait_queue(&fdc_wait, &wait);
889
890 for (;;) {
891 set_current_state(TASK_INTERRUPTIBLE);
892
893 if (!test_and_set_bit(0, &fdc_busy))
894 break;
895
896 schedule();
897
898 if (!NO_SIGNAL) {
899 remove_wait_queue(&fdc_wait, &wait);
900 return -EINTR;
901 }
902 }
903
904 set_current_state(TASK_RUNNING);
905 remove_wait_queue(&fdc_wait, &wait);
906 }
907 command_status = FD_COMMAND_NONE;
908
909 __reschedule_timeout(drive, "lock fdc", 0);
910 set_fdc(drive);
911 return 0;
912}
913
914#define lock_fdc(drive,interruptible) _lock_fdc(drive,interruptible, __LINE__)
915
916#define LOCK_FDC(drive,interruptible) \
917if (lock_fdc(drive,interruptible)) return -EINTR;
918
919/* unlocks the driver */
920static inline void unlock_fdc(void)
921{
922 unsigned long flags;
923
924 raw_cmd = NULL;
925 if (!test_bit(0, &fdc_busy))
926 DPRINT("FDC access conflict!\n");
927
928 if (do_floppy)
929 DPRINT("device interrupt still active at FDC release: %p!\n",
930 do_floppy);
931 command_status = FD_COMMAND_NONE;
932 spin_lock_irqsave(&floppy_lock, flags);
933 del_timer(&fd_timeout);
934 cont = NULL;
935 clear_bit(0, &fdc_busy);
936 if (elv_next_request(floppy_queue))
937 do_fd_request(floppy_queue);
938 spin_unlock_irqrestore(&floppy_lock, flags);
939 floppy_release_irq_and_dma();
940 wake_up(&fdc_wait);
941}
942
943/* switches the motor off after a given timeout */
944static void motor_off_callback(unsigned long nr)
945{
946 unsigned char mask = ~(0x10 << UNIT(nr));
947
948 set_dor(FDC(nr), mask, 0);
949}
950
951/* schedules motor off */
952static void floppy_off(unsigned int drive)
953{
954 unsigned long volatile delta;
955 register int fdc = FDC(drive);
956
957 if (!(FDCS->dor & (0x10 << UNIT(drive))))
958 return;
959
960 del_timer(motor_off_timer + drive);
961
962 /* make spindle stop in a position which minimizes spinup time
963 * next time */
964 if (UDP->rps) {
965 delta = jiffies - UDRS->first_read_date + HZ -
966 UDP->spindown_offset;
967 delta = ((delta * UDP->rps) % HZ) / UDP->rps;
968 motor_off_timer[drive].expires =
969 jiffies + UDP->spindown - delta;
970 }
971 add_timer(motor_off_timer + drive);
972}
973
974/*
975 * cycle through all N_DRIVE floppy drives, for disk change testing.
976 * stopping at current drive. This is done before any long operation, to
977 * be sure to have up to date disk change information.
978 */
979static void scandrives(void)
980{
981 int i, drive, saved_drive;
982
983 if (DP->select_delay)
984 return;
985
986 saved_drive = current_drive;
987 for (i = 0; i < N_DRIVE; i++) {
988 drive = (saved_drive + i + 1) % N_DRIVE;
989 if (UDRS->fd_ref == 0 || UDP->select_delay != 0)
990 continue; /* skip closed drives */
991 set_fdc(drive);
992 if (!(set_dor(fdc, ~3, UNIT(drive) | (0x10 << UNIT(drive))) &
993 (0x10 << UNIT(drive))))
994 /* switch the motor off again, if it was off to
995 * begin with */
996 set_dor(fdc, ~(0x10 << UNIT(drive)), 0);
997 }
998 set_fdc(saved_drive);
999}
1000
1001static void empty(void)
1002{
1003}
1004
1005static DECLARE_WORK(floppy_work, NULL, NULL);
1006
1007static void schedule_bh(void (*handler) (void))
1008{
1009 PREPARE_WORK(&floppy_work, (void (*)(void *))handler, NULL);
1010 schedule_work(&floppy_work);
1011}
1012
1013static struct timer_list fd_timer = TIMER_INITIALIZER(NULL, 0, 0);
1014
1015static void cancel_activity(void)
1016{
1017 unsigned long flags;
1018
1019 spin_lock_irqsave(&floppy_lock, flags);
1020 do_floppy = NULL;
1021 PREPARE_WORK(&floppy_work, (void *)empty, NULL);
1022 del_timer(&fd_timer);
1023 spin_unlock_irqrestore(&floppy_lock, flags);
1024}
1025
1026/* this function makes sure that the disk stays in the drive during the
1027 * transfer */
1028static void fd_watchdog(void)
1029{
1030#ifdef DCL_DEBUG
1031 if (DP->flags & FD_DEBUG) {
1032 DPRINT("calling disk change from watchdog\n");
1033 }
1034#endif
1035
1036 if (disk_change(current_drive)) {
1037 DPRINT("disk removed during i/o\n");
1038 cancel_activity();
1039 cont->done(0);
1040 reset_fdc();
1041 } else {
1042 del_timer(&fd_timer);
1043 fd_timer.function = (timeout_fn) fd_watchdog;
1044 fd_timer.expires = jiffies + HZ / 10;
1045 add_timer(&fd_timer);
1046 }
1047}
1048
1049static void main_command_interrupt(void)
1050{
1051 del_timer(&fd_timer);
1052 cont->interrupt();
1053}
1054
1055/* waits for a delay (spinup or select) to pass */
1056static int fd_wait_for_completion(unsigned long delay, timeout_fn function)
1057{
1058 if (FDCS->reset) {
1059 reset_fdc(); /* do the reset during sleep to win time
1060 * if we don't need to sleep, it's a good
1061 * occasion anyways */
1062 return 1;
1063 }
1064
1065 if ((signed)(jiffies - delay) < 0) {
1066 del_timer(&fd_timer);
1067 fd_timer.function = function;
1068 fd_timer.expires = delay;
1069 add_timer(&fd_timer);
1070 return 1;
1071 }
1072 return 0;
1073}
1074
1075static DEFINE_SPINLOCK(floppy_hlt_lock);
1076static int hlt_disabled;
1077static void floppy_disable_hlt(void)
1078{
1079 unsigned long flags;
1080
1081 spin_lock_irqsave(&floppy_hlt_lock, flags);
1082 if (!hlt_disabled) {
1083 hlt_disabled = 1;
1084#ifdef HAVE_DISABLE_HLT
1085 disable_hlt();
1086#endif
1087 }
1088 spin_unlock_irqrestore(&floppy_hlt_lock, flags);
1089}
1090
1091static void floppy_enable_hlt(void)
1092{
1093 unsigned long flags;
1094
1095 spin_lock_irqsave(&floppy_hlt_lock, flags);
1096 if (hlt_disabled) {
1097 hlt_disabled = 0;
1098#ifdef HAVE_DISABLE_HLT
1099 enable_hlt();
1100#endif
1101 }
1102 spin_unlock_irqrestore(&floppy_hlt_lock, flags);
1103}
1104
1105static void setup_DMA(void)
1106{
1107 unsigned long f;
1108
1109#ifdef FLOPPY_SANITY_CHECK
1110 if (raw_cmd->length == 0) {
1111 int i;
1112
1113 printk("zero dma transfer size:");
1114 for (i = 0; i < raw_cmd->cmd_count; i++)
1115 printk("%x,", raw_cmd->cmd[i]);
1116 printk("\n");
1117 cont->done(0);
1118 FDCS->reset = 1;
1119 return;
1120 }
1121 if (((unsigned long)raw_cmd->kernel_data) % 512) {
1122 printk("non aligned address: %p\n", raw_cmd->kernel_data);
1123 cont->done(0);
1124 FDCS->reset = 1;
1125 return;
1126 }
1127#endif
1128 f = claim_dma_lock();
1129 fd_disable_dma();
1130#ifdef fd_dma_setup
1131 if (fd_dma_setup(raw_cmd->kernel_data, raw_cmd->length,
1132 (raw_cmd->flags & FD_RAW_READ) ?
1133 DMA_MODE_READ : DMA_MODE_WRITE, FDCS->address) < 0) {
1134 release_dma_lock(f);
1135 cont->done(0);
1136 FDCS->reset = 1;
1137 return;
1138 }
1139 release_dma_lock(f);
1140#else
1141 fd_clear_dma_ff();
1142 fd_cacheflush(raw_cmd->kernel_data, raw_cmd->length);
1143 fd_set_dma_mode((raw_cmd->flags & FD_RAW_READ) ?
1144 DMA_MODE_READ : DMA_MODE_WRITE);
1145 fd_set_dma_addr(raw_cmd->kernel_data);
1146 fd_set_dma_count(raw_cmd->length);
1147 virtual_dma_port = FDCS->address;
1148 fd_enable_dma();
1149 release_dma_lock(f);
1150#endif
1151 floppy_disable_hlt();
1152}
1153
1154static void show_floppy(void);
1155
1156/* waits until the fdc becomes ready */
1157static int wait_til_ready(void)
1158{
1159 int counter, status;
1160 if (FDCS->reset)
1161 return -1;
1162 for (counter = 0; counter < 10000; counter++) {
1163 status = fd_inb(FD_STATUS);
1164 if (status & STATUS_READY)
1165 return status;
1166 }
1167 if (!initialising) {
1168 DPRINT("Getstatus times out (%x) on fdc %d\n", status, fdc);
1169 show_floppy();
1170 }
1171 FDCS->reset = 1;
1172 return -1;
1173}
1174
1175/* sends a command byte to the fdc */
1176static int output_byte(char byte)
1177{
1178 int status;
1179
1180 if ((status = wait_til_ready()) < 0)
1181 return -1;
1182 if ((status & (STATUS_READY | STATUS_DIR | STATUS_DMA)) == STATUS_READY) {
1183 fd_outb(byte, FD_DATA);
1184#ifdef FLOPPY_SANITY_CHECK
1185 output_log[output_log_pos].data = byte;
1186 output_log[output_log_pos].status = status;
1187 output_log[output_log_pos].jiffies = jiffies;
1188 output_log_pos = (output_log_pos + 1) % OLOGSIZE;
1189#endif
1190 return 0;
1191 }
1192 FDCS->reset = 1;
1193 if (!initialising) {
1194 DPRINT("Unable to send byte %x to FDC. Fdc=%x Status=%x\n",
1195 byte, fdc, status);
1196 show_floppy();
1197 }
1198 return -1;
1199}
1200
1201#define LAST_OUT(x) if (output_byte(x)<0){ reset_fdc();return;}
1202
1203/* gets the response from the fdc */
1204static int result(void)
1205{
1206 int i, status = 0;
1207
1208 for (i = 0; i < MAX_REPLIES; i++) {
1209 if ((status = wait_til_ready()) < 0)
1210 break;
1211 status &= STATUS_DIR | STATUS_READY | STATUS_BUSY | STATUS_DMA;
1212 if ((status & ~STATUS_BUSY) == STATUS_READY) {
1213#ifdef FLOPPY_SANITY_CHECK
1214 resultjiffies = jiffies;
1215 resultsize = i;
1216#endif
1217 return i;
1218 }
1219 if (status == (STATUS_DIR | STATUS_READY | STATUS_BUSY))
1220 reply_buffer[i] = fd_inb(FD_DATA);
1221 else
1222 break;
1223 }
1224 if (!initialising) {
1225 DPRINT
1226 ("get result error. Fdc=%d Last status=%x Read bytes=%d\n",
1227 fdc, status, i);
1228 show_floppy();
1229 }
1230 FDCS->reset = 1;
1231 return -1;
1232}
1233
1234#define MORE_OUTPUT -2
1235/* does the fdc need more output? */
1236static int need_more_output(void)
1237{
1238 int status;
1239 if ((status = wait_til_ready()) < 0)
1240 return -1;
1241 if ((status & (STATUS_READY | STATUS_DIR | STATUS_DMA)) == STATUS_READY)
1242 return MORE_OUTPUT;
1243 return result();
1244}
1245
1246/* Set perpendicular mode as required, based on data rate, if supported.
1247 * 82077 Now tested. 1Mbps data rate only possible with 82077-1.
1248 */
1249static inline void perpendicular_mode(void)
1250{
1251 unsigned char perp_mode;
1252
1253 if (raw_cmd->rate & 0x40) {
1254 switch (raw_cmd->rate & 3) {
1255 case 0:
1256 perp_mode = 2;
1257 break;
1258 case 3:
1259 perp_mode = 3;
1260 break;
1261 default:
1262 DPRINT("Invalid data rate for perpendicular mode!\n");
1263 cont->done(0);
1264 FDCS->reset = 1; /* convenient way to return to
1265 * redo without to much hassle (deep
1266 * stack et al. */
1267 return;
1268 }
1269 } else
1270 perp_mode = 0;
1271
1272 if (FDCS->perp_mode == perp_mode)
1273 return;
1274 if (FDCS->version >= FDC_82077_ORIG) {
1275 output_byte(FD_PERPENDICULAR);
1276 output_byte(perp_mode);
1277 FDCS->perp_mode = perp_mode;
1278 } else if (perp_mode) {
1279 DPRINT("perpendicular mode not supported by this FDC.\n");
1280 }
1281} /* perpendicular_mode */
1282
1283static int fifo_depth = 0xa;
1284static int no_fifo;
1285
1286static int fdc_configure(void)
1287{
1288 /* Turn on FIFO */
1289 output_byte(FD_CONFIGURE);
1290 if (need_more_output() != MORE_OUTPUT)
1291 return 0;
1292 output_byte(0);
1293 output_byte(0x10 | (no_fifo & 0x20) | (fifo_depth & 0xf));
1294 output_byte(0); /* pre-compensation from track
1295 0 upwards */
1296 return 1;
1297}
1298
1299#define NOMINAL_DTR 500
1300
1301/* Issue a "SPECIFY" command to set the step rate time, head unload time,
1302 * head load time, and DMA disable flag to values needed by floppy.
1303 *
1304 * The value "dtr" is the data transfer rate in Kbps. It is needed
1305 * to account for the data rate-based scaling done by the 82072 and 82077
1306 * FDC types. This parameter is ignored for other types of FDCs (i.e.
1307 * 8272a).
1308 *
1309 * Note that changing the data transfer rate has a (probably deleterious)
1310 * effect on the parameters subject to scaling for 82072/82077 FDCs, so
1311 * fdc_specify is called again after each data transfer rate
1312 * change.
1313 *
1314 * srt: 1000 to 16000 in microseconds
1315 * hut: 16 to 240 milliseconds
1316 * hlt: 2 to 254 milliseconds
1317 *
1318 * These values are rounded up to the next highest available delay time.
1319 */
1320static void fdc_specify(void)
1321{
1322 unsigned char spec1, spec2;
1323 unsigned long srt, hlt, hut;
1324 unsigned long dtr = NOMINAL_DTR;
1325 unsigned long scale_dtr = NOMINAL_DTR;
1326 int hlt_max_code = 0x7f;
1327 int hut_max_code = 0xf;
1328
1329 if (FDCS->need_configure && FDCS->version >= FDC_82072A) {
1330 fdc_configure();
1331 FDCS->need_configure = 0;
1332 /*DPRINT("FIFO enabled\n"); */
1333 }
1334
1335 switch (raw_cmd->rate & 0x03) {
1336 case 3:
1337 dtr = 1000;
1338 break;
1339 case 1:
1340 dtr = 300;
1341 if (FDCS->version >= FDC_82078) {
1342 /* chose the default rate table, not the one
1343 * where 1 = 2 Mbps */
1344 output_byte(FD_DRIVESPEC);
1345 if (need_more_output() == MORE_OUTPUT) {
1346 output_byte(UNIT(current_drive));
1347 output_byte(0xc0);
1348 }
1349 }
1350 break;
1351 case 2:
1352 dtr = 250;
1353 break;
1354 }
1355
1356 if (FDCS->version >= FDC_82072) {
1357 scale_dtr = dtr;
1358 hlt_max_code = 0x00; /* 0==256msec*dtr0/dtr (not linear!) */
1359 hut_max_code = 0x0; /* 0==256msec*dtr0/dtr (not linear!) */
1360 }
1361
1362 /* Convert step rate from microseconds to milliseconds and 4 bits */
1363 srt = 16 - (DP->srt * scale_dtr / 1000 + NOMINAL_DTR - 1) / NOMINAL_DTR;
1364 if (slow_floppy) {
1365 srt = srt / 4;
1366 }
1367 SUPBOUND(srt, 0xf);
1368 INFBOUND(srt, 0);
1369
1370 hlt = (DP->hlt * scale_dtr / 2 + NOMINAL_DTR - 1) / NOMINAL_DTR;
1371 if (hlt < 0x01)
1372 hlt = 0x01;
1373 else if (hlt > 0x7f)
1374 hlt = hlt_max_code;
1375
1376 hut = (DP->hut * scale_dtr / 16 + NOMINAL_DTR - 1) / NOMINAL_DTR;
1377 if (hut < 0x1)
1378 hut = 0x1;
1379 else if (hut > 0xf)
1380 hut = hut_max_code;
1381
1382 spec1 = (srt << 4) | hut;
1383 spec2 = (hlt << 1) | (use_virtual_dma & 1);
1384
1385 /* If these parameters did not change, just return with success */
1386 if (FDCS->spec1 != spec1 || FDCS->spec2 != spec2) {
1387 /* Go ahead and set spec1 and spec2 */
1388 output_byte(FD_SPECIFY);
1389 output_byte(FDCS->spec1 = spec1);
1390 output_byte(FDCS->spec2 = spec2);
1391 }
1392} /* fdc_specify */
1393
1394/* Set the FDC's data transfer rate on behalf of the specified drive.
1395 * NOTE: with 82072/82077 FDCs, changing the data rate requires a reissue
1396 * of the specify command (i.e. using the fdc_specify function).
1397 */
1398static int fdc_dtr(void)
1399{
1400 /* If data rate not already set to desired value, set it. */
1401 if ((raw_cmd->rate & 3) == FDCS->dtr)
1402 return 0;
1403
1404 /* Set dtr */
1405 fd_outb(raw_cmd->rate & 3, FD_DCR);
1406
1407 /* TODO: some FDC/drive combinations (C&T 82C711 with TEAC 1.2MB)
1408 * need a stabilization period of several milliseconds to be
1409 * enforced after data rate changes before R/W operations.
1410 * Pause 5 msec to avoid trouble. (Needs to be 2 jiffies)
1411 */
1412 FDCS->dtr = raw_cmd->rate & 3;
1413 return (fd_wait_for_completion(jiffies + 2UL * HZ / 100,
1414 (timeout_fn) floppy_ready));
1415} /* fdc_dtr */
1416
1417static void tell_sector(void)
1418{
1419 printk(": track %d, head %d, sector %d, size %d",
1420 R_TRACK, R_HEAD, R_SECTOR, R_SIZECODE);
1421} /* tell_sector */
1422
1423/*
1424 * OK, this error interpreting routine is called after a
1425 * DMA read/write has succeeded
1426 * or failed, so we check the results, and copy any buffers.
1427 * hhb: Added better error reporting.
1428 * ak: Made this into a separate routine.
1429 */
1430static int interpret_errors(void)
1431{
1432 char bad;
1433
1434 if (inr != 7) {
1435 DPRINT("-- FDC reply error");
1436 FDCS->reset = 1;
1437 return 1;
1438 }
1439
1440 /* check IC to find cause of interrupt */
1441 switch (ST0 & ST0_INTR) {
1442 case 0x40: /* error occurred during command execution */
1443 if (ST1 & ST1_EOC)
1444 return 0; /* occurs with pseudo-DMA */
1445 bad = 1;
1446 if (ST1 & ST1_WP) {
1447 DPRINT("Drive is write protected\n");
1448 CLEARF(FD_DISK_WRITABLE);
1449 cont->done(0);
1450 bad = 2;
1451 } else if (ST1 & ST1_ND) {
1452 SETF(FD_NEED_TWADDLE);
1453 } else if (ST1 & ST1_OR) {
1454 if (DP->flags & FTD_MSG)
1455 DPRINT("Over/Underrun - retrying\n");
1456 bad = 0;
1457 } else if (*errors >= DP->max_errors.reporting) {
1458 DPRINT("");
1459 if (ST0 & ST0_ECE) {
1460 printk("Recalibrate failed!");
1461 } else if (ST2 & ST2_CRC) {
1462 printk("data CRC error");
1463 tell_sector();
1464 } else if (ST1 & ST1_CRC) {
1465 printk("CRC error");
1466 tell_sector();
1467 } else if ((ST1 & (ST1_MAM | ST1_ND))
1468 || (ST2 & ST2_MAM)) {
1469 if (!probing) {
1470 printk("sector not found");
1471 tell_sector();
1472 } else
1473 printk("probe failed...");
1474 } else if (ST2 & ST2_WC) { /* seek error */
1475 printk("wrong cylinder");
1476 } else if (ST2 & ST2_BC) { /* cylinder marked as bad */
1477 printk("bad cylinder");
1478 } else {
1479 printk
1480 ("unknown error. ST[0..2] are: 0x%x 0x%x 0x%x",
1481 ST0, ST1, ST2);
1482 tell_sector();
1483 }
1484 printk("\n");
1485
1486 }
1487 if (ST2 & ST2_WC || ST2 & ST2_BC)
1488 /* wrong cylinder => recal */
1489 DRS->track = NEED_2_RECAL;
1490 return bad;
1491 case 0x80: /* invalid command given */
1492 DPRINT("Invalid FDC command given!\n");
1493 cont->done(0);
1494 return 2;
1495 case 0xc0:
1496 DPRINT("Abnormal termination caused by polling\n");
1497 cont->error();
1498 return 2;
1499 default: /* (0) Normal command termination */
1500 return 0;
1501 }
1502}
1503
1504/*
1505 * This routine is called when everything should be correctly set up
1506 * for the transfer (i.e. floppy motor is on, the correct floppy is
1507 * selected, and the head is sitting on the right track).
1508 */
1509static void setup_rw_floppy(void)
1510{
1511 int i, r, flags, dflags;
1512 unsigned long ready_date;
1513 timeout_fn function;
1514
1515 flags = raw_cmd->flags;
1516 if (flags & (FD_RAW_READ | FD_RAW_WRITE))
1517 flags |= FD_RAW_INTR;
1518
1519 if ((flags & FD_RAW_SPIN) && !(flags & FD_RAW_NO_MOTOR)) {
1520 ready_date = DRS->spinup_date + DP->spinup;
1521 /* If spinup will take a long time, rerun scandrives
1522 * again just before spinup completion. Beware that
1523 * after scandrives, we must again wait for selection.
1524 */
1525 if ((signed)(ready_date - jiffies) > DP->select_delay) {
1526 ready_date -= DP->select_delay;
1527 function = (timeout_fn) floppy_start;
1528 } else
1529 function = (timeout_fn) setup_rw_floppy;
1530
1531 /* wait until the floppy is spinning fast enough */
1532 if (fd_wait_for_completion(ready_date, function))
1533 return;
1534 }
1535 dflags = DRS->flags;
1536
1537 if ((flags & FD_RAW_READ) || (flags & FD_RAW_WRITE))
1538 setup_DMA();
1539
1540 if (flags & FD_RAW_INTR)
1541 do_floppy = main_command_interrupt;
1542
1543 r = 0;
1544 for (i = 0; i < raw_cmd->cmd_count; i++)
1545 r |= output_byte(raw_cmd->cmd[i]);
1546
1547 debugt("rw_command: ");
1548
1549 if (r) {
1550 cont->error();
1551 reset_fdc();
1552 return;
1553 }
1554
1555 if (!(flags & FD_RAW_INTR)) {
1556 inr = result();
1557 cont->interrupt();
1558 } else if (flags & FD_RAW_NEED_DISK)
1559 fd_watchdog();
1560}
1561
1562static int blind_seek;
1563
1564/*
1565 * This is the routine called after every seek (or recalibrate) interrupt
1566 * from the floppy controller.
1567 */
1568static void seek_interrupt(void)
1569{
1570 debugt("seek interrupt:");
1571 if (inr != 2 || (ST0 & 0xF8) != 0x20) {
1572 DPRINT("seek failed\n");
1573 DRS->track = NEED_2_RECAL;
1574 cont->error();
1575 cont->redo();
1576 return;
1577 }
1578 if (DRS->track >= 0 && DRS->track != ST1 && !blind_seek) {
1579#ifdef DCL_DEBUG
1580 if (DP->flags & FD_DEBUG) {
1581 DPRINT
1582 ("clearing NEWCHANGE flag because of effective seek\n");
1583 DPRINT("jiffies=%lu\n", jiffies);
1584 }
1585#endif
1586 CLEARF(FD_DISK_NEWCHANGE); /* effective seek */
1587 DRS->select_date = jiffies;
1588 }
1589 DRS->track = ST1;
1590 floppy_ready();
1591}
1592
1593static void check_wp(void)
1594{
1595 if (TESTF(FD_VERIFY)) {
1596 /* check write protection */
1597 output_byte(FD_GETSTATUS);
1598 output_byte(UNIT(current_drive));
1599 if (result() != 1) {
1600 FDCS->reset = 1;
1601 return;
1602 }
1603 CLEARF(FD_VERIFY);
1604 CLEARF(FD_NEED_TWADDLE);
1605#ifdef DCL_DEBUG
1606 if (DP->flags & FD_DEBUG) {
1607 DPRINT("checking whether disk is write protected\n");
1608 DPRINT("wp=%x\n", ST3 & 0x40);
1609 }
1610#endif
1611 if (!(ST3 & 0x40))
1612 SETF(FD_DISK_WRITABLE);
1613 else
1614 CLEARF(FD_DISK_WRITABLE);
1615 }
1616}
1617
1618static void seek_floppy(void)
1619{
1620 int track;
1621
1622 blind_seek = 0;
1623
1624#ifdef DCL_DEBUG
1625 if (DP->flags & FD_DEBUG) {
1626 DPRINT("calling disk change from seek\n");
1627 }
1628#endif
1629
1630 if (!TESTF(FD_DISK_NEWCHANGE) &&
1631 disk_change(current_drive) && (raw_cmd->flags & FD_RAW_NEED_DISK)) {
1632 /* the media changed flag should be cleared after the seek.
1633 * If it isn't, this means that there is really no disk in
1634 * the drive.
1635 */
1636 SETF(FD_DISK_CHANGED);
1637 cont->done(0);
1638 cont->redo();
1639 return;
1640 }
1641 if (DRS->track <= NEED_1_RECAL) {
1642 recalibrate_floppy();
1643 return;
1644 } else if (TESTF(FD_DISK_NEWCHANGE) &&
1645 (raw_cmd->flags & FD_RAW_NEED_DISK) &&
1646 (DRS->track <= NO_TRACK || DRS->track == raw_cmd->track)) {
1647 /* we seek to clear the media-changed condition. Does anybody
1648 * know a more elegant way, which works on all drives? */
1649 if (raw_cmd->track)
1650 track = raw_cmd->track - 1;
1651 else {
1652 if (DP->flags & FD_SILENT_DCL_CLEAR) {
1653 set_dor(fdc, ~(0x10 << UNIT(current_drive)), 0);
1654 blind_seek = 1;
1655 raw_cmd->flags |= FD_RAW_NEED_SEEK;
1656 }
1657 track = 1;
1658 }
1659 } else {
1660 check_wp();
1661 if (raw_cmd->track != DRS->track &&
1662 (raw_cmd->flags & FD_RAW_NEED_SEEK))
1663 track = raw_cmd->track;
1664 else {
1665 setup_rw_floppy();
1666 return;
1667 }
1668 }
1669
1670 do_floppy = seek_interrupt;
1671 output_byte(FD_SEEK);
1672 output_byte(UNIT(current_drive));
1673 LAST_OUT(track);
1674 debugt("seek command:");
1675}
1676
1677static void recal_interrupt(void)
1678{
1679 debugt("recal interrupt:");
1680 if (inr != 2)
1681 FDCS->reset = 1;
1682 else if (ST0 & ST0_ECE) {
1683 switch (DRS->track) {
1684 case NEED_1_RECAL:
1685 debugt("recal interrupt need 1 recal:");
1686 /* after a second recalibrate, we still haven't
1687 * reached track 0. Probably no drive. Raise an
1688 * error, as failing immediately might upset
1689 * computers possessed by the Devil :-) */
1690 cont->error();
1691 cont->redo();
1692 return;
1693 case NEED_2_RECAL:
1694 debugt("recal interrupt need 2 recal:");
1695 /* If we already did a recalibrate,
1696 * and we are not at track 0, this
1697 * means we have moved. (The only way
1698 * not to move at recalibration is to
1699 * be already at track 0.) Clear the
1700 * new change flag */
1701#ifdef DCL_DEBUG
1702 if (DP->flags & FD_DEBUG) {
1703 DPRINT
1704 ("clearing NEWCHANGE flag because of second recalibrate\n");
1705 }
1706#endif
1707
1708 CLEARF(FD_DISK_NEWCHANGE);
1709 DRS->select_date = jiffies;
1710 /* fall through */
1711 default:
1712 debugt("recal interrupt default:");
1713 /* Recalibrate moves the head by at
1714 * most 80 steps. If after one
1715 * recalibrate we don't have reached
1716 * track 0, this might mean that we
1717 * started beyond track 80. Try
1718 * again. */
1719 DRS->track = NEED_1_RECAL;
1720 break;
1721 }
1722 } else
1723 DRS->track = ST1;
1724 floppy_ready();
1725}
1726
1727static void print_result(char *message, int inr)
1728{
1729 int i;
1730
1731 DPRINT("%s ", message);
1732 if (inr >= 0)
1733 for (i = 0; i < inr; i++)
1734 printk("repl[%d]=%x ", i, reply_buffer[i]);
1735 printk("\n");
1736}
1737
1738/* interrupt handler. Note that this can be called externally on the Sparc */
1739irqreturn_t floppy_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1740{
1741 void (*handler) (void) = do_floppy;
1742 int do_print;
1743 unsigned long f;
1744
1745 lasthandler = handler;
1746 interruptjiffies = jiffies;
1747
1748 f = claim_dma_lock();
1749 fd_disable_dma();
1750 release_dma_lock(f);
1751
1752 floppy_enable_hlt();
1753 do_floppy = NULL;
1754 if (fdc >= N_FDC || FDCS->address == -1) {
1755 /* we don't even know which FDC is the culprit */
1756 printk("DOR0=%x\n", fdc_state[0].dor);
1757 printk("floppy interrupt on bizarre fdc %d\n", fdc);
1758 printk("handler=%p\n", handler);
1759 is_alive("bizarre fdc");
1760 return IRQ_NONE;
1761 }
1762
1763 FDCS->reset = 0;
1764 /* We have to clear the reset flag here, because apparently on boxes
1765 * with level triggered interrupts (PS/2, Sparc, ...), it is needed to
1766 * emit SENSEI's to clear the interrupt line. And FDCS->reset blocks the
1767 * emission of the SENSEI's.
1768 * It is OK to emit floppy commands because we are in an interrupt
1769 * handler here, and thus we have to fear no interference of other
1770 * activity.
1771 */
1772
1773 do_print = !handler && print_unex && !initialising;
1774
1775 inr = result();
1776 if (do_print)
1777 print_result("unexpected interrupt", inr);
1778 if (inr == 0) {
1779 int max_sensei = 4;
1780 do {
1781 output_byte(FD_SENSEI);
1782 inr = result();
1783 if (do_print)
1784 print_result("sensei", inr);
1785 max_sensei--;
1786 } while ((ST0 & 0x83) != UNIT(current_drive) && inr == 2
1787 && max_sensei);
1788 }
1789 if (!handler) {
1790 FDCS->reset = 1;
1791 return IRQ_NONE;
1792 }
1793 schedule_bh(handler);
1794 is_alive("normal interrupt end");
1795
1796 /* FIXME! Was it really for us? */
1797 return IRQ_HANDLED;
1798}
1799
1800static void recalibrate_floppy(void)
1801{
1802 debugt("recalibrate floppy:");
1803 do_floppy = recal_interrupt;
1804 output_byte(FD_RECALIBRATE);
1805 LAST_OUT(UNIT(current_drive));
1806}
1807
1808/*
1809 * Must do 4 FD_SENSEIs after reset because of ``drive polling''.
1810 */
1811static void reset_interrupt(void)
1812{
1813 debugt("reset interrupt:");
1814 result(); /* get the status ready for set_fdc */
1815 if (FDCS->reset) {
1816 printk("reset set in interrupt, calling %p\n", cont->error);
1817 cont->error(); /* a reset just after a reset. BAD! */
1818 }
1819 cont->redo();
1820}
1821
1822/*
1823 * reset is done by pulling bit 2 of DOR low for a while (old FDCs),
1824 * or by setting the self clearing bit 7 of STATUS (newer FDCs)
1825 */
1826static void reset_fdc(void)
1827{
1828 unsigned long flags;
1829
1830 do_floppy = reset_interrupt;
1831 FDCS->reset = 0;
1832 reset_fdc_info(0);
1833
1834 /* Pseudo-DMA may intercept 'reset finished' interrupt. */
1835 /* Irrelevant for systems with true DMA (i386). */
1836
1837 flags = claim_dma_lock();
1838 fd_disable_dma();
1839 release_dma_lock(flags);
1840
1841 if (FDCS->version >= FDC_82072A)
1842 fd_outb(0x80 | (FDCS->dtr & 3), FD_STATUS);
1843 else {
1844 fd_outb(FDCS->dor & ~0x04, FD_DOR);
1845 udelay(FD_RESET_DELAY);
1846 fd_outb(FDCS->dor, FD_DOR);
1847 }
1848}
1849
1850static void show_floppy(void)
1851{
1852 int i;
1853
1854 printk("\n");
1855 printk("floppy driver state\n");
1856 printk("-------------------\n");
1857 printk("now=%lu last interrupt=%lu diff=%lu last called handler=%p\n",
1858 jiffies, interruptjiffies, jiffies - interruptjiffies,
1859 lasthandler);
1860
1861#ifdef FLOPPY_SANITY_CHECK
1862 printk("timeout_message=%s\n", timeout_message);
1863 printk("last output bytes:\n");
1864 for (i = 0; i < OLOGSIZE; i++)
1865 printk("%2x %2x %lu\n",
1866 output_log[(i + output_log_pos) % OLOGSIZE].data,
1867 output_log[(i + output_log_pos) % OLOGSIZE].status,
1868 output_log[(i + output_log_pos) % OLOGSIZE].jiffies);
1869 printk("last result at %lu\n", resultjiffies);
1870 printk("last redo_fd_request at %lu\n", lastredo);
1871 for (i = 0; i < resultsize; i++) {
1872 printk("%2x ", reply_buffer[i]);
1873 }
1874 printk("\n");
1875#endif
1876
1877 printk("status=%x\n", fd_inb(FD_STATUS));
1878 printk("fdc_busy=%lu\n", fdc_busy);
1879 if (do_floppy)
1880 printk("do_floppy=%p\n", do_floppy);
1881 if (floppy_work.pending)
1882 printk("floppy_work.func=%p\n", floppy_work.func);
1883 if (timer_pending(&fd_timer))
1884 printk("fd_timer.function=%p\n", fd_timer.function);
1885 if (timer_pending(&fd_timeout)) {
1886 printk("timer_function=%p\n", fd_timeout.function);
1887 printk("expires=%lu\n", fd_timeout.expires - jiffies);
1888 printk("now=%lu\n", jiffies);
1889 }
1890 printk("cont=%p\n", cont);
1891 printk("current_req=%p\n", current_req);
1892 printk("command_status=%d\n", command_status);
1893 printk("\n");
1894}
1895
1896static void floppy_shutdown(unsigned long data)
1897{
1898 unsigned long flags;
1899
1900 if (!initialising)
1901 show_floppy();
1902 cancel_activity();
1903
1904 floppy_enable_hlt();
1905
1906 flags = claim_dma_lock();
1907 fd_disable_dma();
1908 release_dma_lock(flags);
1909
1910 /* avoid dma going to a random drive after shutdown */
1911
1912 if (!initialising)
1913 DPRINT("floppy timeout called\n");
1914 FDCS->reset = 1;
1915 if (cont) {
1916 cont->done(0);
1917 cont->redo(); /* this will recall reset when needed */
1918 } else {
1919 printk("no cont in shutdown!\n");
1920 process_fd_request();
1921 }
1922 is_alive("floppy shutdown");
1923}
1924
1925/*typedef void (*timeout_fn)(unsigned long);*/
1926
1927/* start motor, check media-changed condition and write protection */
1928static int start_motor(void (*function) (void))
1929{
1930 int mask, data;
1931
1932 mask = 0xfc;
1933 data = UNIT(current_drive);
1934 if (!(raw_cmd->flags & FD_RAW_NO_MOTOR)) {
1935 if (!(FDCS->dor & (0x10 << UNIT(current_drive)))) {
1936 set_debugt();
1937 /* no read since this drive is running */
1938 DRS->first_read_date = 0;
1939 /* note motor start time if motor is not yet running */
1940 DRS->spinup_date = jiffies;
1941 data |= (0x10 << UNIT(current_drive));
1942 }
1943 } else if (FDCS->dor & (0x10 << UNIT(current_drive)))
1944 mask &= ~(0x10 << UNIT(current_drive));
1945
1946 /* starts motor and selects floppy */
1947 del_timer(motor_off_timer + current_drive);
1948 set_dor(fdc, mask, data);
1949
1950 /* wait_for_completion also schedules reset if needed. */
1951 return (fd_wait_for_completion(DRS->select_date + DP->select_delay,
1952 (timeout_fn) function));
1953}
1954
1955static void floppy_ready(void)
1956{
1957 CHECK_RESET;
1958 if (start_motor(floppy_ready))
1959 return;
1960 if (fdc_dtr())
1961 return;
1962
1963#ifdef DCL_DEBUG
1964 if (DP->flags & FD_DEBUG) {
1965 DPRINT("calling disk change from floppy_ready\n");
1966 }
1967#endif
1968 if (!(raw_cmd->flags & FD_RAW_NO_MOTOR) &&
1969 disk_change(current_drive) && !DP->select_delay)
1970 twaddle(); /* this clears the dcl on certain drive/controller
1971 * combinations */
1972
1973#ifdef fd_chose_dma_mode
1974 if ((raw_cmd->flags & FD_RAW_READ) || (raw_cmd->flags & FD_RAW_WRITE)) {
1975 unsigned long flags = claim_dma_lock();
1976 fd_chose_dma_mode(raw_cmd->kernel_data, raw_cmd->length);
1977 release_dma_lock(flags);
1978 }
1979#endif
1980
1981 if (raw_cmd->flags & (FD_RAW_NEED_SEEK | FD_RAW_NEED_DISK)) {
1982 perpendicular_mode();
1983 fdc_specify(); /* must be done here because of hut, hlt ... */
1984 seek_floppy();
1985 } else {
1986 if ((raw_cmd->flags & FD_RAW_READ) ||
1987 (raw_cmd->flags & FD_RAW_WRITE))
1988 fdc_specify();
1989 setup_rw_floppy();
1990 }
1991}
1992
1993static void floppy_start(void)
1994{
1995 reschedule_timeout(current_reqD, "floppy start", 0);
1996
1997 scandrives();
1998#ifdef DCL_DEBUG
1999 if (DP->flags & FD_DEBUG) {
2000 DPRINT("setting NEWCHANGE in floppy_start\n");
2001 }
2002#endif
2003 SETF(FD_DISK_NEWCHANGE);
2004 floppy_ready();
2005}
2006
2007/*
2008 * ========================================================================
2009 * here ends the bottom half. Exported routines are:
2010 * floppy_start, floppy_off, floppy_ready, lock_fdc, unlock_fdc, set_fdc,
2011 * start_motor, reset_fdc, reset_fdc_info, interpret_errors.
2012 * Initialization also uses output_byte, result, set_dor, floppy_interrupt
2013 * and set_dor.
2014 * ========================================================================
2015 */
2016/*
2017 * General purpose continuations.
2018 * ==============================
2019 */
2020
2021static void do_wakeup(void)
2022{
2023 reschedule_timeout(MAXTIMEOUT, "do wakeup", 0);
2024 cont = NULL;
2025 command_status += 2;
2026 wake_up(&command_done);
2027}
2028
2029static struct cont_t wakeup_cont = {
2030 .interrupt = empty,
2031 .redo = do_wakeup,
2032 .error = empty,
2033 .done = (done_f) empty
2034};
2035
2036static struct cont_t intr_cont = {
2037 .interrupt = empty,
2038 .redo = process_fd_request,
2039 .error = empty,
2040 .done = (done_f) empty
2041};
2042
2043static int wait_til_done(void (*handler) (void), int interruptible)
2044{
2045 int ret;
2046
2047 schedule_bh(handler);
2048
2049 if (command_status < 2 && NO_SIGNAL) {
2050 DECLARE_WAITQUEUE(wait, current);
2051
2052 add_wait_queue(&command_done, &wait);
2053 for (;;) {
2054 set_current_state(interruptible ?
2055 TASK_INTERRUPTIBLE :
2056 TASK_UNINTERRUPTIBLE);
2057
2058 if (command_status >= 2 || !NO_SIGNAL)
2059 break;
2060
2061 is_alive("wait_til_done");
2062
2063 schedule();
2064 }
2065
2066 set_current_state(TASK_RUNNING);
2067 remove_wait_queue(&command_done, &wait);
2068 }
2069
2070 if (command_status < 2) {
2071 cancel_activity();
2072 cont = &intr_cont;
2073 reset_fdc();
2074 return -EINTR;
2075 }
2076
2077 if (FDCS->reset)
2078 command_status = FD_COMMAND_ERROR;
2079 if (command_status == FD_COMMAND_OKAY)
2080 ret = 0;
2081 else
2082 ret = -EIO;
2083 command_status = FD_COMMAND_NONE;
2084 return ret;
2085}
2086
2087static void generic_done(int result)
2088{
2089 command_status = result;
2090 cont = &wakeup_cont;
2091}
2092
2093static void generic_success(void)
2094{
2095 cont->done(1);
2096}
2097
2098static void generic_failure(void)
2099{
2100 cont->done(0);
2101}
2102
2103static void success_and_wakeup(void)
2104{
2105 generic_success();
2106 cont->redo();
2107}
2108
2109/*
2110 * formatting and rw support.
2111 * ==========================
2112 */
2113
2114static int next_valid_format(void)
2115{
2116 int probed_format;
2117
2118 probed_format = DRS->probed_format;
2119 while (1) {
2120 if (probed_format >= 8 || !DP->autodetect[probed_format]) {
2121 DRS->probed_format = 0;
2122 return 1;
2123 }
2124 if (floppy_type[DP->autodetect[probed_format]].sect) {
2125 DRS->probed_format = probed_format;
2126 return 0;
2127 }
2128 probed_format++;
2129 }
2130}
2131
2132static void bad_flp_intr(void)
2133{
2134 int err_count;
2135
2136 if (probing) {
2137 DRS->probed_format++;
2138 if (!next_valid_format())
2139 return;
2140 }
2141 err_count = ++(*errors);
2142 INFBOUND(DRWE->badness, err_count);
2143 if (err_count > DP->max_errors.abort)
2144 cont->done(0);
2145 if (err_count > DP->max_errors.reset)
2146 FDCS->reset = 1;
2147 else if (err_count > DP->max_errors.recal)
2148 DRS->track = NEED_2_RECAL;
2149}
2150
2151static void set_floppy(int drive)
2152{
2153 int type = ITYPE(UDRS->fd_device);
2154 if (type)
2155 _floppy = floppy_type + type;
2156 else
2157 _floppy = current_type[drive];
2158}
2159
2160/*
2161 * formatting support.
2162 * ===================
2163 */
2164static void format_interrupt(void)
2165{
2166 switch (interpret_errors()) {
2167 case 1:
2168 cont->error();
2169 case 2:
2170 break;
2171 case 0:
2172 cont->done(1);
2173 }
2174 cont->redo();
2175}
2176
2177#define CODE2SIZE (ssize = ((1 << SIZECODE) + 3) >> 2)
2178#define FM_MODE(x,y) ((y) & ~(((x)->rate & 0x80) >>1))
2179#define CT(x) ((x) | 0xc0)
2180static void setup_format_params(int track)
2181{
2182 struct fparm {
2183 unsigned char track, head, sect, size;
2184 } *here = (struct fparm *)floppy_track_buffer;
2185 int il, n;
2186 int count, head_shift, track_shift;
2187
2188 raw_cmd = &default_raw_cmd;
2189 raw_cmd->track = track;
2190
2191 raw_cmd->flags = FD_RAW_WRITE | FD_RAW_INTR | FD_RAW_SPIN |
2192 FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK;
2193 raw_cmd->rate = _floppy->rate & 0x43;
2194 raw_cmd->cmd_count = NR_F;
2195 COMMAND = FM_MODE(_floppy, FD_FORMAT);
2196 DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy, format_req.head);
2197 F_SIZECODE = FD_SIZECODE(_floppy);
2198 F_SECT_PER_TRACK = _floppy->sect << 2 >> F_SIZECODE;
2199 F_GAP = _floppy->fmt_gap;
2200 F_FILL = FD_FILL_BYTE;
2201
2202 raw_cmd->kernel_data = floppy_track_buffer;
2203 raw_cmd->length = 4 * F_SECT_PER_TRACK;
2204
2205 /* allow for about 30ms for data transport per track */
2206 head_shift = (F_SECT_PER_TRACK + 5) / 6;
2207
2208 /* a ``cylinder'' is two tracks plus a little stepping time */
2209 track_shift = 2 * head_shift + 3;
2210
2211 /* position of logical sector 1 on this track */
2212 n = (track_shift * format_req.track + head_shift * format_req.head)
2213 % F_SECT_PER_TRACK;
2214
2215 /* determine interleave */
2216 il = 1;
2217 if (_floppy->fmt_gap < 0x22)
2218 il++;
2219
2220 /* initialize field */
2221 for (count = 0; count < F_SECT_PER_TRACK; ++count) {
2222 here[count].track = format_req.track;
2223 here[count].head = format_req.head;
2224 here[count].sect = 0;
2225 here[count].size = F_SIZECODE;
2226 }
2227 /* place logical sectors */
2228 for (count = 1; count <= F_SECT_PER_TRACK; ++count) {
2229 here[n].sect = count;
2230 n = (n + il) % F_SECT_PER_TRACK;
2231 if (here[n].sect) { /* sector busy, find next free sector */
2232 ++n;
2233 if (n >= F_SECT_PER_TRACK) {
2234 n -= F_SECT_PER_TRACK;
2235 while (here[n].sect)
2236 ++n;
2237 }
2238 }
2239 }
2240 if (_floppy->stretch & FD_ZEROBASED) {
2241 for (count = 0; count < F_SECT_PER_TRACK; count++)
2242 here[count].sect--;
2243 }
2244}
2245
2246static void redo_format(void)
2247{
2248 buffer_track = -1;
2249 setup_format_params(format_req.track << STRETCH(_floppy));
2250 floppy_start();
2251 debugt("queue format request");
2252}
2253
2254static struct cont_t format_cont = {
2255 .interrupt = format_interrupt,
2256 .redo = redo_format,
2257 .error = bad_flp_intr,
2258 .done = generic_done
2259};
2260
2261static int do_format(int drive, struct format_descr *tmp_format_req)
2262{
2263 int ret;
2264
2265 LOCK_FDC(drive, 1);
2266 set_floppy(drive);
2267 if (!_floppy ||
2268 _floppy->track > DP->tracks ||
2269 tmp_format_req->track >= _floppy->track ||
2270 tmp_format_req->head >= _floppy->head ||
2271 (_floppy->sect << 2) % (1 << FD_SIZECODE(_floppy)) ||
2272 !_floppy->fmt_gap) {
2273 process_fd_request();
2274 return -EINVAL;
2275 }
2276 format_req = *tmp_format_req;
2277 format_errors = 0;
2278 cont = &format_cont;
2279 errors = &format_errors;
2280 IWAIT(redo_format);
2281 process_fd_request();
2282 return ret;
2283}
2284
2285/*
2286 * Buffer read/write and support
2287 * =============================
2288 */
2289
2290static void floppy_end_request(struct request *req, int uptodate)
2291{
2292 unsigned int nr_sectors = current_count_sectors;
2293
2294 /* current_count_sectors can be zero if transfer failed */
2295 if (!uptodate)
2296 nr_sectors = req->current_nr_sectors;
2297 if (end_that_request_first(req, uptodate, nr_sectors))
2298 return;
2299 add_disk_randomness(req->rq_disk);
2300 floppy_off((long)req->rq_disk->private_data);
2301 blkdev_dequeue_request(req);
2302 end_that_request_last(req);
2303
2304 /* We're done with the request */
2305 current_req = NULL;
2306}
2307
2308/* new request_done. Can handle physical sectors which are smaller than a
2309 * logical buffer */
2310static void request_done(int uptodate)
2311{
2312 struct request_queue *q = floppy_queue;
2313 struct request *req = current_req;
2314 unsigned long flags;
2315 int block;
2316
2317 probing = 0;
2318 reschedule_timeout(MAXTIMEOUT, "request done %d", uptodate);
2319
2320 if (!req) {
2321 printk("floppy.c: no request in request_done\n");
2322 return;
2323 }
2324
2325 if (uptodate) {
2326 /* maintain values for invalidation on geometry
2327 * change */
2328 block = current_count_sectors + req->sector;
2329 INFBOUND(DRS->maxblock, block);
2330 if (block > _floppy->sect)
2331 DRS->maxtrack = 1;
2332
2333 /* unlock chained buffers */
2334 spin_lock_irqsave(q->queue_lock, flags);
2335 floppy_end_request(req, 1);
2336 spin_unlock_irqrestore(q->queue_lock, flags);
2337 } else {
2338 if (rq_data_dir(req) == WRITE) {
2339 /* record write error information */
2340 DRWE->write_errors++;
2341 if (DRWE->write_errors == 1) {
2342 DRWE->first_error_sector = req->sector;
2343 DRWE->first_error_generation = DRS->generation;
2344 }
2345 DRWE->last_error_sector = req->sector;
2346 DRWE->last_error_generation = DRS->generation;
2347 }
2348 spin_lock_irqsave(q->queue_lock, flags);
2349 floppy_end_request(req, 0);
2350 spin_unlock_irqrestore(q->queue_lock, flags);
2351 }
2352}
2353
2354/* Interrupt handler evaluating the result of the r/w operation */
2355static void rw_interrupt(void)
2356{
2357 int nr_sectors, ssize, eoc, heads;
2358
2359 if (R_HEAD >= 2) {
2360 /* some Toshiba floppy controllers occasionnally seem to
2361 * return bogus interrupts after read/write operations, which
2362 * can be recognized by a bad head number (>= 2) */
2363 return;
2364 }
2365
2366 if (!DRS->first_read_date)
2367 DRS->first_read_date = jiffies;
2368
2369 nr_sectors = 0;
2370 CODE2SIZE;
2371
2372 if (ST1 & ST1_EOC)
2373 eoc = 1;
2374 else
2375 eoc = 0;
2376
2377 if (COMMAND & 0x80)
2378 heads = 2;
2379 else
2380 heads = 1;
2381
2382 nr_sectors = (((R_TRACK - TRACK) * heads +
2383 R_HEAD - HEAD) * SECT_PER_TRACK +
2384 R_SECTOR - SECTOR + eoc) << SIZECODE >> 2;
2385
2386#ifdef FLOPPY_SANITY_CHECK
2387 if (nr_sectors / ssize >
2388 (in_sector_offset + current_count_sectors + ssize - 1) / ssize) {
2389 DPRINT("long rw: %x instead of %lx\n",
2390 nr_sectors, current_count_sectors);
2391 printk("rs=%d s=%d\n", R_SECTOR, SECTOR);
2392 printk("rh=%d h=%d\n", R_HEAD, HEAD);
2393 printk("rt=%d t=%d\n", R_TRACK, TRACK);
2394 printk("heads=%d eoc=%d\n", heads, eoc);
2395 printk("spt=%d st=%d ss=%d\n", SECT_PER_TRACK,
2396 fsector_t, ssize);
2397 printk("in_sector_offset=%d\n", in_sector_offset);
2398 }
2399#endif
2400
2401 nr_sectors -= in_sector_offset;
2402 INFBOUND(nr_sectors, 0);
2403 SUPBOUND(current_count_sectors, nr_sectors);
2404
2405 switch (interpret_errors()) {
2406 case 2:
2407 cont->redo();
2408 return;
2409 case 1:
2410 if (!current_count_sectors) {
2411 cont->error();
2412 cont->redo();
2413 return;
2414 }
2415 break;
2416 case 0:
2417 if (!current_count_sectors) {
2418 cont->redo();
2419 return;
2420 }
2421 current_type[current_drive] = _floppy;
2422 floppy_sizes[TOMINOR(current_drive)] = _floppy->size;
2423 break;
2424 }
2425
2426 if (probing) {
2427 if (DP->flags & FTD_MSG)
2428 DPRINT("Auto-detected floppy type %s in fd%d\n",
2429 _floppy->name, current_drive);
2430 current_type[current_drive] = _floppy;
2431 floppy_sizes[TOMINOR(current_drive)] = _floppy->size;
2432 probing = 0;
2433 }
2434
2435 if (CT(COMMAND) != FD_READ ||
2436 raw_cmd->kernel_data == current_req->buffer) {
2437 /* transfer directly from buffer */
2438 cont->done(1);
2439 } else if (CT(COMMAND) == FD_READ) {
2440 buffer_track = raw_cmd->track;
2441 buffer_drive = current_drive;
2442 INFBOUND(buffer_max, nr_sectors + fsector_t);
2443 }
2444 cont->redo();
2445}
2446
2447/* Compute maximal contiguous buffer size. */
2448static int buffer_chain_size(void)
2449{
2450 struct bio *bio;
2451 struct bio_vec *bv;
2452 int size, i;
2453 char *base;
2454
2455 base = bio_data(current_req->bio);
2456 size = 0;
2457
2458 rq_for_each_bio(bio, current_req) {
2459 bio_for_each_segment(bv, bio, i) {
2460 if (page_address(bv->bv_page) + bv->bv_offset !=
2461 base + size)
2462 break;
2463
2464 size += bv->bv_len;
2465 }
2466 }
2467
2468 return size >> 9;
2469}
2470
2471/* Compute the maximal transfer size */
2472static int transfer_size(int ssize, int max_sector, int max_size)
2473{
2474 SUPBOUND(max_sector, fsector_t + max_size);
2475
2476 /* alignment */
2477 max_sector -= (max_sector % _floppy->sect) % ssize;
2478
2479 /* transfer size, beginning not aligned */
2480 current_count_sectors = max_sector - fsector_t;
2481
2482 return max_sector;
2483}
2484
2485/*
2486 * Move data from/to the track buffer to/from the buffer cache.
2487 */
2488static void copy_buffer(int ssize, int max_sector, int max_sector_2)
2489{
2490 int remaining; /* number of transferred 512-byte sectors */
2491 struct bio_vec *bv;
2492 struct bio *bio;
2493 char *buffer, *dma_buffer;
2494 int size, i;
2495
2496 max_sector = transfer_size(ssize,
2497 min(max_sector, max_sector_2),
2498 current_req->nr_sectors);
2499
2500 if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE &&
2501 buffer_max > fsector_t + current_req->nr_sectors)
2502 current_count_sectors = min_t(int, buffer_max - fsector_t,
2503 current_req->nr_sectors);
2504
2505 remaining = current_count_sectors << 9;
2506#ifdef FLOPPY_SANITY_CHECK
2507 if ((remaining >> 9) > current_req->nr_sectors &&
2508 CT(COMMAND) == FD_WRITE) {
2509 DPRINT("in copy buffer\n");
2510 printk("current_count_sectors=%ld\n", current_count_sectors);
2511 printk("remaining=%d\n", remaining >> 9);
2512 printk("current_req->nr_sectors=%ld\n",
2513 current_req->nr_sectors);
2514 printk("current_req->current_nr_sectors=%u\n",
2515 current_req->current_nr_sectors);
2516 printk("max_sector=%d\n", max_sector);
2517 printk("ssize=%d\n", ssize);
2518 }
2519#endif
2520
2521 buffer_max = max(max_sector, buffer_max);
2522
2523 dma_buffer = floppy_track_buffer + ((fsector_t - buffer_min) << 9);
2524
2525 size = current_req->current_nr_sectors << 9;
2526
2527 rq_for_each_bio(bio, current_req) {
2528 bio_for_each_segment(bv, bio, i) {
2529 if (!remaining)
2530 break;
2531
2532 size = bv->bv_len;
2533 SUPBOUND(size, remaining);
2534
2535 buffer = page_address(bv->bv_page) + bv->bv_offset;
2536#ifdef FLOPPY_SANITY_CHECK
2537 if (dma_buffer + size >
2538 floppy_track_buffer + (max_buffer_sectors << 10) ||
2539 dma_buffer < floppy_track_buffer) {
2540 DPRINT("buffer overrun in copy buffer %d\n",
2541 (int)((floppy_track_buffer -
2542 dma_buffer) >> 9));
2543 printk("fsector_t=%d buffer_min=%d\n",
2544 fsector_t, buffer_min);
2545 printk("current_count_sectors=%ld\n",
2546 current_count_sectors);
2547 if (CT(COMMAND) == FD_READ)
2548 printk("read\n");
2549 if (CT(COMMAND) == FD_WRITE)
2550 printk("write\n");
2551 break;
2552 }
2553 if (((unsigned long)buffer) % 512)
2554 DPRINT("%p buffer not aligned\n", buffer);
2555#endif
2556 if (CT(COMMAND) == FD_READ)
2557 memcpy(buffer, dma_buffer, size);
2558 else
2559 memcpy(dma_buffer, buffer, size);
2560
2561 remaining -= size;
2562 dma_buffer += size;
2563 }
2564 }
2565#ifdef FLOPPY_SANITY_CHECK
2566 if (remaining) {
2567 if (remaining > 0)
2568 max_sector -= remaining >> 9;
2569 DPRINT("weirdness: remaining %d\n", remaining >> 9);
2570 }
2571#endif
2572}
2573
2574#if 0
2575static inline int check_dma_crossing(char *start,
2576 unsigned long length, char *message)
2577{
2578 if (CROSS_64KB(start, length)) {
2579 printk("DMA xfer crosses 64KB boundary in %s %p-%p\n",
2580 message, start, start + length);
2581 return 1;
2582 } else
2583 return 0;
2584}
2585#endif
2586
2587/* work around a bug in pseudo DMA
2588 * (on some FDCs) pseudo DMA does not stop when the CPU stops
2589 * sending data. Hence we need a different way to signal the
2590 * transfer length: We use SECT_PER_TRACK. Unfortunately, this
2591 * does not work with MT, hence we can only transfer one head at
2592 * a time
2593 */
2594static void virtualdmabug_workaround(void)
2595{
2596 int hard_sectors, end_sector;
2597
2598 if (CT(COMMAND) == FD_WRITE) {
2599 COMMAND &= ~0x80; /* switch off multiple track mode */
2600
2601 hard_sectors = raw_cmd->length >> (7 + SIZECODE);
2602 end_sector = SECTOR + hard_sectors - 1;
2603#ifdef FLOPPY_SANITY_CHECK
2604 if (end_sector > SECT_PER_TRACK) {
2605 printk("too many sectors %d > %d\n",
2606 end_sector, SECT_PER_TRACK);
2607 return;
2608 }
2609#endif
2610 SECT_PER_TRACK = end_sector; /* make sure SECT_PER_TRACK points
2611 * to end of transfer */
2612 }
2613}
2614
2615/*
2616 * Formulate a read/write request.
2617 * this routine decides where to load the data (directly to buffer, or to
2618 * tmp floppy area), how much data to load (the size of the buffer, the whole
2619 * track, or a single sector)
2620 * All floppy_track_buffer handling goes in here. If we ever add track buffer
2621 * allocation on the fly, it should be done here. No other part should need
2622 * modification.
2623 */
2624
2625static int make_raw_rw_request(void)
2626{
2627 int aligned_sector_t;
2628 int max_sector, max_size, tracksize, ssize;
2629
2630 if (max_buffer_sectors == 0) {
2631 printk("VFS: Block I/O scheduled on unopened device\n");
2632 return 0;
2633 }
2634
2635 set_fdc((long)current_req->rq_disk->private_data);
2636
2637 raw_cmd = &default_raw_cmd;
2638 raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_DISK |
2639 FD_RAW_NEED_SEEK;
2640 raw_cmd->cmd_count = NR_RW;
2641 if (rq_data_dir(current_req) == READ) {
2642 raw_cmd->flags |= FD_RAW_READ;
2643 COMMAND = FM_MODE(_floppy, FD_READ);
2644 } else if (rq_data_dir(current_req) == WRITE) {
2645 raw_cmd->flags |= FD_RAW_WRITE;
2646 COMMAND = FM_MODE(_floppy, FD_WRITE);
2647 } else {
2648 DPRINT("make_raw_rw_request: unknown command\n");
2649 return 0;
2650 }
2651
2652 max_sector = _floppy->sect * _floppy->head;
2653
2654 TRACK = (int)current_req->sector / max_sector;
2655 fsector_t = (int)current_req->sector % max_sector;
2656 if (_floppy->track && TRACK >= _floppy->track) {
2657 if (current_req->current_nr_sectors & 1) {
2658 current_count_sectors = 1;
2659 return 1;
2660 } else
2661 return 0;
2662 }
2663 HEAD = fsector_t / _floppy->sect;
2664
2665 if (((_floppy->stretch & (FD_SWAPSIDES | FD_ZEROBASED)) ||
2666 TESTF(FD_NEED_TWADDLE)) && fsector_t < _floppy->sect)
2667 max_sector = _floppy->sect;
2668
2669 /* 2M disks have phantom sectors on the first track */
2670 if ((_floppy->rate & FD_2M) && (!TRACK) && (!HEAD)) {
2671 max_sector = 2 * _floppy->sect / 3;
2672 if (fsector_t >= max_sector) {
2673 current_count_sectors =
2674 min_t(int, _floppy->sect - fsector_t,
2675 current_req->nr_sectors);
2676 return 1;
2677 }
2678 SIZECODE = 2;
2679 } else
2680 SIZECODE = FD_SIZECODE(_floppy);
2681 raw_cmd->rate = _floppy->rate & 0x43;
2682 if ((_floppy->rate & FD_2M) && (TRACK || HEAD) && raw_cmd->rate == 2)
2683 raw_cmd->rate = 1;
2684
2685 if (SIZECODE)
2686 SIZECODE2 = 0xff;
2687 else
2688 SIZECODE2 = 0x80;
2689 raw_cmd->track = TRACK << STRETCH(_floppy);
2690 DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy, HEAD);
2691 GAP = _floppy->gap;
2692 CODE2SIZE;
2693 SECT_PER_TRACK = _floppy->sect << 2 >> SIZECODE;
2694 SECTOR = ((fsector_t % _floppy->sect) << 2 >> SIZECODE) +
2695 ((_floppy->stretch & FD_ZEROBASED) ? 0 : 1);
2696
2697 /* tracksize describes the size which can be filled up with sectors
2698 * of size ssize.
2699 */
2700 tracksize = _floppy->sect - _floppy->sect % ssize;
2701 if (tracksize < _floppy->sect) {
2702 SECT_PER_TRACK++;
2703 if (tracksize <= fsector_t % _floppy->sect)
2704 SECTOR--;
2705
2706 /* if we are beyond tracksize, fill up using smaller sectors */
2707 while (tracksize <= fsector_t % _floppy->sect) {
2708 while (tracksize + ssize > _floppy->sect) {
2709 SIZECODE--;
2710 ssize >>= 1;
2711 }
2712 SECTOR++;
2713 SECT_PER_TRACK++;
2714 tracksize += ssize;
2715 }
2716 max_sector = HEAD * _floppy->sect + tracksize;
2717 } else if (!TRACK && !HEAD && !(_floppy->rate & FD_2M) && probing) {
2718 max_sector = _floppy->sect;
2719 } else if (!HEAD && CT(COMMAND) == FD_WRITE) {
2720 /* for virtual DMA bug workaround */
2721 max_sector = _floppy->sect;
2722 }
2723
2724 in_sector_offset = (fsector_t % _floppy->sect) % ssize;
2725 aligned_sector_t = fsector_t - in_sector_offset;
2726 max_size = current_req->nr_sectors;
2727 if ((raw_cmd->track == buffer_track) &&
2728 (current_drive == buffer_drive) &&
2729 (fsector_t >= buffer_min) && (fsector_t < buffer_max)) {
2730 /* data already in track buffer */
2731 if (CT(COMMAND) == FD_READ) {
2732 copy_buffer(1, max_sector, buffer_max);
2733 return 1;
2734 }
2735 } else if (in_sector_offset || current_req->nr_sectors < ssize) {
2736 if (CT(COMMAND) == FD_WRITE) {
2737 if (fsector_t + current_req->nr_sectors > ssize &&
2738 fsector_t + current_req->nr_sectors < ssize + ssize)
2739 max_size = ssize + ssize;
2740 else
2741 max_size = ssize;
2742 }
2743 raw_cmd->flags &= ~FD_RAW_WRITE;
2744 raw_cmd->flags |= FD_RAW_READ;
2745 COMMAND = FM_MODE(_floppy, FD_READ);
2746 } else if ((unsigned long)current_req->buffer < MAX_DMA_ADDRESS) {
2747 unsigned long dma_limit;
2748 int direct, indirect;
2749
2750 indirect =
2751 transfer_size(ssize, max_sector,
2752 max_buffer_sectors * 2) - fsector_t;
2753
2754 /*
2755 * Do NOT use minimum() here---MAX_DMA_ADDRESS is 64 bits wide
2756 * on a 64 bit machine!
2757 */
2758 max_size = buffer_chain_size();
2759 dma_limit =
2760 (MAX_DMA_ADDRESS -
2761 ((unsigned long)current_req->buffer)) >> 9;
2762 if ((unsigned long)max_size > dma_limit) {
2763 max_size = dma_limit;
2764 }
2765 /* 64 kb boundaries */
2766 if (CROSS_64KB(current_req->buffer, max_size << 9))
2767 max_size = (K_64 -
2768 ((unsigned long)current_req->buffer) %
2769 K_64) >> 9;
2770 direct = transfer_size(ssize, max_sector, max_size) - fsector_t;
2771 /*
2772 * We try to read tracks, but if we get too many errors, we
2773 * go back to reading just one sector at a time.
2774 *
2775 * This means we should be able to read a sector even if there
2776 * are other bad sectors on this track.
2777 */
2778 if (!direct ||
2779 (indirect * 2 > direct * 3 &&
2780 *errors < DP->max_errors.read_track &&
2781 /*!TESTF(FD_NEED_TWADDLE) && */
2782 ((!probing
2783 || (DP->read_track & (1 << DRS->probed_format)))))) {
2784 max_size = current_req->nr_sectors;
2785 } else {
2786 raw_cmd->kernel_data = current_req->buffer;
2787 raw_cmd->length = current_count_sectors << 9;
2788 if (raw_cmd->length == 0) {
2789 DPRINT
2790 ("zero dma transfer attempted from make_raw_request\n");
2791 DPRINT("indirect=%d direct=%d fsector_t=%d",
2792 indirect, direct, fsector_t);
2793 return 0;
2794 }
2795/* check_dma_crossing(raw_cmd->kernel_data,
2796 raw_cmd->length,
2797 "end of make_raw_request [1]");*/
2798
2799 virtualdmabug_workaround();
2800 return 2;
2801 }
2802 }
2803
2804 if (CT(COMMAND) == FD_READ)
2805 max_size = max_sector; /* unbounded */
2806
2807 /* claim buffer track if needed */
2808 if (buffer_track != raw_cmd->track || /* bad track */
2809 buffer_drive != current_drive || /* bad drive */
2810 fsector_t > buffer_max ||
2811 fsector_t < buffer_min ||
2812 ((CT(COMMAND) == FD_READ ||
2813 (!in_sector_offset && current_req->nr_sectors >= ssize)) &&
2814 max_sector > 2 * max_buffer_sectors + buffer_min &&
2815 max_size + fsector_t > 2 * max_buffer_sectors + buffer_min)
2816 /* not enough space */
2817 ) {
2818 buffer_track = -1;
2819 buffer_drive = current_drive;
2820 buffer_max = buffer_min = aligned_sector_t;
2821 }
2822 raw_cmd->kernel_data = floppy_track_buffer +
2823 ((aligned_sector_t - buffer_min) << 9);
2824
2825 if (CT(COMMAND) == FD_WRITE) {
2826 /* copy write buffer to track buffer.
2827 * if we get here, we know that the write
2828 * is either aligned or the data already in the buffer
2829 * (buffer will be overwritten) */
2830#ifdef FLOPPY_SANITY_CHECK
2831 if (in_sector_offset && buffer_track == -1)
2832 DPRINT("internal error offset !=0 on write\n");
2833#endif
2834 buffer_track = raw_cmd->track;
2835 buffer_drive = current_drive;
2836 copy_buffer(ssize, max_sector,
2837 2 * max_buffer_sectors + buffer_min);
2838 } else
2839 transfer_size(ssize, max_sector,
2840 2 * max_buffer_sectors + buffer_min -
2841 aligned_sector_t);
2842
2843 /* round up current_count_sectors to get dma xfer size */
2844 raw_cmd->length = in_sector_offset + current_count_sectors;
2845 raw_cmd->length = ((raw_cmd->length - 1) | (ssize - 1)) + 1;
2846 raw_cmd->length <<= 9;
2847#ifdef FLOPPY_SANITY_CHECK
2848 /*check_dma_crossing(raw_cmd->kernel_data, raw_cmd->length,
2849 "end of make_raw_request"); */
2850 if ((raw_cmd->length < current_count_sectors << 9) ||
2851 (raw_cmd->kernel_data != current_req->buffer &&
2852 CT(COMMAND) == FD_WRITE &&
2853 (aligned_sector_t + (raw_cmd->length >> 9) > buffer_max ||
2854 aligned_sector_t < buffer_min)) ||
2855 raw_cmd->length % (128 << SIZECODE) ||
2856 raw_cmd->length <= 0 || current_count_sectors <= 0) {
2857 DPRINT("fractionary current count b=%lx s=%lx\n",
2858 raw_cmd->length, current_count_sectors);
2859 if (raw_cmd->kernel_data != current_req->buffer)
2860 printk("addr=%d, length=%ld\n",
2861 (int)((raw_cmd->kernel_data -
2862 floppy_track_buffer) >> 9),
2863 current_count_sectors);
2864 printk("st=%d ast=%d mse=%d msi=%d\n",
2865 fsector_t, aligned_sector_t, max_sector, max_size);
2866 printk("ssize=%x SIZECODE=%d\n", ssize, SIZECODE);
2867 printk("command=%x SECTOR=%d HEAD=%d, TRACK=%d\n",
2868 COMMAND, SECTOR, HEAD, TRACK);
2869 printk("buffer drive=%d\n", buffer_drive);
2870 printk("buffer track=%d\n", buffer_track);
2871 printk("buffer_min=%d\n", buffer_min);
2872 printk("buffer_max=%d\n", buffer_max);
2873 return 0;
2874 }
2875
2876 if (raw_cmd->kernel_data != current_req->buffer) {
2877 if (raw_cmd->kernel_data < floppy_track_buffer ||
2878 current_count_sectors < 0 ||
2879 raw_cmd->length < 0 ||
2880 raw_cmd->kernel_data + raw_cmd->length >
2881 floppy_track_buffer + (max_buffer_sectors << 10)) {
2882 DPRINT("buffer overrun in schedule dma\n");
2883 printk("fsector_t=%d buffer_min=%d current_count=%ld\n",
2884 fsector_t, buffer_min, raw_cmd->length >> 9);
2885 printk("current_count_sectors=%ld\n",
2886 current_count_sectors);
2887 if (CT(COMMAND) == FD_READ)
2888 printk("read\n");
2889 if (CT(COMMAND) == FD_WRITE)
2890 printk("write\n");
2891 return 0;
2892 }
2893 } else if (raw_cmd->length > current_req->nr_sectors << 9 ||
2894 current_count_sectors > current_req->nr_sectors) {
2895 DPRINT("buffer overrun in direct transfer\n");
2896 return 0;
2897 } else if (raw_cmd->length < current_count_sectors << 9) {
2898 DPRINT("more sectors than bytes\n");
2899 printk("bytes=%ld\n", raw_cmd->length >> 9);
2900 printk("sectors=%ld\n", current_count_sectors);
2901 }
2902 if (raw_cmd->length == 0) {
2903 DPRINT("zero dma transfer attempted from make_raw_request\n");
2904 return 0;
2905 }
2906#endif
2907
2908 virtualdmabug_workaround();
2909 return 2;
2910}
2911
2912static void redo_fd_request(void)
2913{
2914#define REPEAT {request_done(0); continue; }
2915 int drive;
2916 int tmp;
2917
2918 lastredo = jiffies;
2919 if (current_drive < N_DRIVE)
2920 floppy_off(current_drive);
2921
2922 for (;;) {
2923 if (!current_req) {
2924 struct request *req;
2925
2926 spin_lock_irq(floppy_queue->queue_lock);
2927 req = elv_next_request(floppy_queue);
2928 spin_unlock_irq(floppy_queue->queue_lock);
2929 if (!req) {
2930 do_floppy = NULL;
2931 unlock_fdc();
2932 return;
2933 }
2934 current_req = req;
2935 }
2936 drive = (long)current_req->rq_disk->private_data;
2937 set_fdc(drive);
2938 reschedule_timeout(current_reqD, "redo fd request", 0);
2939
2940 set_floppy(drive);
2941 raw_cmd = &default_raw_cmd;
2942 raw_cmd->flags = 0;
2943 if (start_motor(redo_fd_request))
2944 return;
2945 disk_change(current_drive);
2946 if (test_bit(current_drive, &fake_change) ||
2947 TESTF(FD_DISK_CHANGED)) {
2948 DPRINT("disk absent or changed during operation\n");
2949 REPEAT;
2950 }
2951 if (!_floppy) { /* Autodetection */
2952 if (!probing) {
2953 DRS->probed_format = 0;
2954 if (next_valid_format()) {
2955 DPRINT("no autodetectable formats\n");
2956 _floppy = NULL;
2957 REPEAT;
2958 }
2959 }
2960 probing = 1;
2961 _floppy =
2962 floppy_type + DP->autodetect[DRS->probed_format];
2963 } else
2964 probing = 0;
2965 errors = &(current_req->errors);
2966 tmp = make_raw_rw_request();
2967 if (tmp < 2) {
2968 request_done(tmp);
2969 continue;
2970 }
2971
2972 if (TESTF(FD_NEED_TWADDLE))
2973 twaddle();
2974 schedule_bh(floppy_start);
2975 debugt("queue fd request");
2976 return;
2977 }
2978#undef REPEAT
2979}
2980
2981static struct cont_t rw_cont = {
2982 .interrupt = rw_interrupt,
2983 .redo = redo_fd_request,
2984 .error = bad_flp_intr,
2985 .done = request_done
2986};
2987
2988static void process_fd_request(void)
2989{
2990 cont = &rw_cont;
2991 schedule_bh(redo_fd_request);
2992}
2993
2994static void do_fd_request(request_queue_t * q)
2995{
2996 if (max_buffer_sectors == 0) {
2997 printk("VFS: do_fd_request called on non-open device\n");
2998 return;
2999 }
3000
3001 if (usage_count == 0) {
3002 printk("warning: usage count=0, current_req=%p exiting\n",
3003 current_req);
3004 printk("sect=%ld flags=%lx\n", (long)current_req->sector,
3005 current_req->flags);
3006 return;
3007 }
3008 if (test_bit(0, &fdc_busy)) {
3009 /* fdc busy, this new request will be treated when the
3010 current one is done */
3011 is_alive("do fd request, old request running");
3012 return;
3013 }
3014 lock_fdc(MAXTIMEOUT, 0);
3015 process_fd_request();
3016 is_alive("do fd request");
3017}
3018
3019static struct cont_t poll_cont = {
3020 .interrupt = success_and_wakeup,
3021 .redo = floppy_ready,
3022 .error = generic_failure,
3023 .done = generic_done
3024};
3025
3026static int poll_drive(int interruptible, int flag)
3027{
3028 int ret;
3029 /* no auto-sense, just clear dcl */
3030 raw_cmd = &default_raw_cmd;
3031 raw_cmd->flags = flag;
3032 raw_cmd->track = 0;
3033 raw_cmd->cmd_count = 0;
3034 cont = &poll_cont;
3035#ifdef DCL_DEBUG
3036 if (DP->flags & FD_DEBUG) {
3037 DPRINT("setting NEWCHANGE in poll_drive\n");
3038 }
3039#endif
3040 SETF(FD_DISK_NEWCHANGE);
3041 WAIT(floppy_ready);
3042 return ret;
3043}
3044
3045/*
3046 * User triggered reset
3047 * ====================
3048 */
3049
3050static void reset_intr(void)
3051{
3052 printk("weird, reset interrupt called\n");
3053}
3054
3055static struct cont_t reset_cont = {
3056 .interrupt = reset_intr,
3057 .redo = success_and_wakeup,
3058 .error = generic_failure,
3059 .done = generic_done
3060};
3061
3062static int user_reset_fdc(int drive, int arg, int interruptible)
3063{
3064 int ret;
3065
3066 ret = 0;
3067 LOCK_FDC(drive, interruptible);
3068 if (arg == FD_RESET_ALWAYS)
3069 FDCS->reset = 1;
3070 if (FDCS->reset) {
3071 cont = &reset_cont;
3072 WAIT(reset_fdc);
3073 }
3074 process_fd_request();
3075 return ret;
3076}
3077
3078/*
3079 * Misc Ioctl's and support
3080 * ========================
3081 */
3082static inline int fd_copyout(void __user *param, const void *address,
3083 unsigned long size)
3084{
3085 return copy_to_user(param, address, size) ? -EFAULT : 0;
3086}
3087
3088static inline int fd_copyin(void __user *param, void *address, unsigned long size)
3089{
3090 return copy_from_user(address, param, size) ? -EFAULT : 0;
3091}
3092
3093#define _COPYOUT(x) (copy_to_user((void __user *)param, &(x), sizeof(x)) ? -EFAULT : 0)
3094#define _COPYIN(x) (copy_from_user(&(x), (void __user *)param, sizeof(x)) ? -EFAULT : 0)
3095
3096#define COPYOUT(x) ECALL(_COPYOUT(x))
3097#define COPYIN(x) ECALL(_COPYIN(x))
3098
3099static inline const char *drive_name(int type, int drive)
3100{
3101 struct floppy_struct *floppy;
3102
3103 if (type)
3104 floppy = floppy_type + type;
3105 else {
3106 if (UDP->native_format)
3107 floppy = floppy_type + UDP->native_format;
3108 else
3109 return "(null)";
3110 }
3111 if (floppy->name)
3112 return floppy->name;
3113 else
3114 return "(null)";
3115}
3116
3117/* raw commands */
3118static void raw_cmd_done(int flag)
3119{
3120 int i;
3121
3122 if (!flag) {
3123 raw_cmd->flags |= FD_RAW_FAILURE;
3124 raw_cmd->flags |= FD_RAW_HARDFAILURE;
3125 } else {
3126 raw_cmd->reply_count = inr;
3127 if (raw_cmd->reply_count > MAX_REPLIES)
3128 raw_cmd->reply_count = 0;
3129 for (i = 0; i < raw_cmd->reply_count; i++)
3130 raw_cmd->reply[i] = reply_buffer[i];
3131
3132 if (raw_cmd->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
3133 unsigned long flags;
3134 flags = claim_dma_lock();
3135 raw_cmd->length = fd_get_dma_residue();
3136 release_dma_lock(flags);
3137 }
3138
3139 if ((raw_cmd->flags & FD_RAW_SOFTFAILURE) &&
3140 (!raw_cmd->reply_count || (raw_cmd->reply[0] & 0xc0)))
3141 raw_cmd->flags |= FD_RAW_FAILURE;
3142
3143 if (disk_change(current_drive))
3144 raw_cmd->flags |= FD_RAW_DISK_CHANGE;
3145 else
3146 raw_cmd->flags &= ~FD_RAW_DISK_CHANGE;
3147 if (raw_cmd->flags & FD_RAW_NO_MOTOR_AFTER)
3148 motor_off_callback(current_drive);
3149
3150 if (raw_cmd->next &&
3151 (!(raw_cmd->flags & FD_RAW_FAILURE) ||
3152 !(raw_cmd->flags & FD_RAW_STOP_IF_FAILURE)) &&
3153 ((raw_cmd->flags & FD_RAW_FAILURE) ||
3154 !(raw_cmd->flags & FD_RAW_STOP_IF_SUCCESS))) {
3155 raw_cmd = raw_cmd->next;
3156 return;
3157 }
3158 }
3159 generic_done(flag);
3160}
3161
3162static struct cont_t raw_cmd_cont = {
3163 .interrupt = success_and_wakeup,
3164 .redo = floppy_start,
3165 .error = generic_failure,
3166 .done = raw_cmd_done
3167};
3168
3169static inline int raw_cmd_copyout(int cmd, char __user *param,
3170 struct floppy_raw_cmd *ptr)
3171{
3172 int ret;
3173
3174 while (ptr) {
3175 COPYOUT(*ptr);
3176 param += sizeof(struct floppy_raw_cmd);
3177 if ((ptr->flags & FD_RAW_READ) && ptr->buffer_length) {
3178 if (ptr->length >= 0
3179 && ptr->length <= ptr->buffer_length)
3180 ECALL(fd_copyout
3181 (ptr->data, ptr->kernel_data,
3182 ptr->buffer_length - ptr->length));
3183 }
3184 ptr = ptr->next;
3185 }
3186 return 0;
3187}
3188
3189static void raw_cmd_free(struct floppy_raw_cmd **ptr)
3190{
3191 struct floppy_raw_cmd *next, *this;
3192
3193 this = *ptr;
3194 *ptr = NULL;
3195 while (this) {
3196 if (this->buffer_length) {
3197 fd_dma_mem_free((unsigned long)this->kernel_data,
3198 this->buffer_length);
3199 this->buffer_length = 0;
3200 }
3201 next = this->next;
3202 kfree(this);
3203 this = next;
3204 }
3205}
3206
3207static inline int raw_cmd_copyin(int cmd, char __user *param,
3208 struct floppy_raw_cmd **rcmd)
3209{
3210 struct floppy_raw_cmd *ptr;
3211 int ret;
3212 int i;
3213
3214 *rcmd = NULL;
3215 while (1) {
3216 ptr = (struct floppy_raw_cmd *)
3217 kmalloc(sizeof(struct floppy_raw_cmd), GFP_USER);
3218 if (!ptr)
3219 return -ENOMEM;
3220 *rcmd = ptr;
3221 COPYIN(*ptr);
3222 ptr->next = NULL;
3223 ptr->buffer_length = 0;
3224 param += sizeof(struct floppy_raw_cmd);
3225 if (ptr->cmd_count > 33)
3226 /* the command may now also take up the space
3227 * initially intended for the reply & the
3228 * reply count. Needed for long 82078 commands
3229 * such as RESTORE, which takes ... 17 command
3230 * bytes. Murphy's law #137: When you reserve
3231 * 16 bytes for a structure, you'll one day
3232 * discover that you really need 17...
3233 */
3234 return -EINVAL;
3235
3236 for (i = 0; i < 16; i++)
3237 ptr->reply[i] = 0;
3238 ptr->resultcode = 0;
3239 ptr->kernel_data = NULL;
3240
3241 if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
3242 if (ptr->length <= 0)
3243 return -EINVAL;
3244 ptr->kernel_data =
3245 (char *)fd_dma_mem_alloc(ptr->length);
3246 fallback_on_nodma_alloc(&ptr->kernel_data, ptr->length);
3247 if (!ptr->kernel_data)
3248 return -ENOMEM;
3249 ptr->buffer_length = ptr->length;
3250 }
3251 if (ptr->flags & FD_RAW_WRITE)
3252 ECALL(fd_copyin(ptr->data, ptr->kernel_data,
3253 ptr->length));
3254 rcmd = &(ptr->next);
3255 if (!(ptr->flags & FD_RAW_MORE))
3256 return 0;
3257 ptr->rate &= 0x43;
3258 }
3259}
3260
3261static int raw_cmd_ioctl(int cmd, void __user *param)
3262{
3263 int drive, ret, ret2;
3264 struct floppy_raw_cmd *my_raw_cmd;
3265
3266 if (FDCS->rawcmd <= 1)
3267 FDCS->rawcmd = 1;
3268 for (drive = 0; drive < N_DRIVE; drive++) {
3269 if (FDC(drive) != fdc)
3270 continue;
3271 if (drive == current_drive) {
3272 if (UDRS->fd_ref > 1) {
3273 FDCS->rawcmd = 2;
3274 break;
3275 }
3276 } else if (UDRS->fd_ref) {
3277 FDCS->rawcmd = 2;
3278 break;
3279 }
3280 }
3281
3282 if (FDCS->reset)
3283 return -EIO;
3284
3285 ret = raw_cmd_copyin(cmd, param, &my_raw_cmd);
3286 if (ret) {
3287 raw_cmd_free(&my_raw_cmd);
3288 return ret;
3289 }
3290
3291 raw_cmd = my_raw_cmd;
3292 cont = &raw_cmd_cont;
3293 ret = wait_til_done(floppy_start, 1);
3294#ifdef DCL_DEBUG
3295 if (DP->flags & FD_DEBUG) {
3296 DPRINT("calling disk change from raw_cmd ioctl\n");
3297 }
3298#endif
3299
3300 if (ret != -EINTR && FDCS->reset)
3301 ret = -EIO;
3302
3303 DRS->track = NO_TRACK;
3304
3305 ret2 = raw_cmd_copyout(cmd, param, my_raw_cmd);
3306 if (!ret)
3307 ret = ret2;
3308 raw_cmd_free(&my_raw_cmd);
3309 return ret;
3310}
3311
3312static int invalidate_drive(struct block_device *bdev)
3313{
3314 /* invalidate the buffer track to force a reread */
3315 set_bit((long)bdev->bd_disk->private_data, &fake_change);
3316 process_fd_request();
3317 check_disk_change(bdev);
3318 return 0;
3319}
3320
3321static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
3322 int drive, int type, struct block_device *bdev)
3323{
3324 int cnt;
3325
3326 /* sanity checking for parameters. */
3327 if (g->sect <= 0 ||
3328 g->head <= 0 ||
3329 g->track <= 0 || g->track > UDP->tracks >> STRETCH(g) ||
3330 /* check if reserved bits are set */
3331 (g->stretch & ~(FD_STRETCH | FD_SWAPSIDES | FD_ZEROBASED)) != 0)
3332 return -EINVAL;
3333 if (type) {
3334 if (!capable(CAP_SYS_ADMIN))
3335 return -EPERM;
3336 down(&open_lock);
3337 LOCK_FDC(drive, 1);
3338 floppy_type[type] = *g;
3339 floppy_type[type].name = "user format";
3340 for (cnt = type << 2; cnt < (type << 2) + 4; cnt++)
3341 floppy_sizes[cnt] = floppy_sizes[cnt + 0x80] =
3342 floppy_type[type].size + 1;
3343 process_fd_request();
3344 for (cnt = 0; cnt < N_DRIVE; cnt++) {
3345 struct block_device *bdev = opened_bdev[cnt];
3346 if (!bdev || ITYPE(drive_state[cnt].fd_device) != type)
3347 continue;
3348 __invalidate_device(bdev, 0);
3349 }
3350 up(&open_lock);
3351 } else {
3352 int oldStretch;
3353 LOCK_FDC(drive, 1);
3354 if (cmd != FDDEFPRM)
3355 /* notice a disk change immediately, else
3356 * we lose our settings immediately*/
3357 CALL(poll_drive(1, FD_RAW_NEED_DISK));
3358 oldStretch = g->stretch;
3359 user_params[drive] = *g;
3360 if (buffer_drive == drive)
3361 SUPBOUND(buffer_max, user_params[drive].sect);
3362 current_type[drive] = &user_params[drive];
3363 floppy_sizes[drive] = user_params[drive].size;
3364 if (cmd == FDDEFPRM)
3365 DRS->keep_data = -1;
3366 else
3367 DRS->keep_data = 1;
3368 /* invalidation. Invalidate only when needed, i.e.
3369 * when there are already sectors in the buffer cache
3370 * whose number will change. This is useful, because
3371 * mtools often changes the geometry of the disk after
3372 * looking at the boot block */
3373 if (DRS->maxblock > user_params[drive].sect ||
3374 DRS->maxtrack ||
3375 ((user_params[drive].sect ^ oldStretch) &
3376 (FD_SWAPSIDES | FD_ZEROBASED)))
3377 invalidate_drive(bdev);
3378 else
3379 process_fd_request();
3380 }
3381 return 0;
3382}
3383
3384/* handle obsolete ioctl's */
3385static int ioctl_table[] = {
3386 FDCLRPRM,
3387 FDSETPRM,
3388 FDDEFPRM,
3389 FDGETPRM,
3390 FDMSGON,
3391 FDMSGOFF,
3392 FDFMTBEG,
3393 FDFMTTRK,
3394 FDFMTEND,
3395 FDSETEMSGTRESH,
3396 FDFLUSH,
3397 FDSETMAXERRS,
3398 FDGETMAXERRS,
3399 FDGETDRVTYP,
3400 FDSETDRVPRM,
3401 FDGETDRVPRM,
3402 FDGETDRVSTAT,
3403 FDPOLLDRVSTAT,
3404 FDRESET,
3405 FDGETFDCSTAT,
3406 FDWERRORCLR,
3407 FDWERRORGET,
3408 FDRAWCMD,
3409 FDEJECT,
3410 FDTWADDLE
3411};
3412
3413static inline int normalize_ioctl(int *cmd, int *size)
3414{
3415 int i;
3416
3417 for (i = 0; i < ARRAY_SIZE(ioctl_table); i++) {
3418 if ((*cmd & 0xffff) == (ioctl_table[i] & 0xffff)) {
3419 *size = _IOC_SIZE(*cmd);
3420 *cmd = ioctl_table[i];
3421 if (*size > _IOC_SIZE(*cmd)) {
3422 printk("ioctl not yet supported\n");
3423 return -EFAULT;
3424 }
3425 return 0;
3426 }
3427 }
3428 return -EINVAL;
3429}
3430
3431static int get_floppy_geometry(int drive, int type, struct floppy_struct **g)
3432{
3433 if (type)
3434 *g = &floppy_type[type];
3435 else {
3436 LOCK_FDC(drive, 0);
3437 CALL(poll_drive(0, 0));
3438 process_fd_request();
3439 *g = current_type[drive];
3440 }
3441 if (!*g)
3442 return -ENODEV;
3443 return 0;
3444}
3445
3446static int fd_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
3447 unsigned long param)
3448{
3449#define FD_IOCTL_ALLOWED ((filp) && (filp)->private_data)
3450#define OUT(c,x) case c: outparam = (const char *) (x); break
3451#define IN(c,x,tag) case c: *(x) = inparam. tag ; return 0
3452
3453 int drive = (long)inode->i_bdev->bd_disk->private_data;
3454 int i, type = ITYPE(UDRS->fd_device);
3455 int ret;
3456 int size;
3457 union inparam {
3458 struct floppy_struct g; /* geometry */
3459 struct format_descr f;
3460 struct floppy_max_errors max_errors;
3461 struct floppy_drive_params dp;
3462 } inparam; /* parameters coming from user space */
3463 const char *outparam; /* parameters passed back to user space */
3464
3465 /* convert compatibility eject ioctls into floppy eject ioctl.
3466 * We do this in order to provide a means to eject floppy disks before
3467 * installing the new fdutils package */
3468 if (cmd == CDROMEJECT || /* CD-ROM eject */
3469 cmd == 0x6470 /* SunOS floppy eject */ ) {
3470 DPRINT("obsolete eject ioctl\n");
3471 DPRINT("please use floppycontrol --eject\n");
3472 cmd = FDEJECT;
3473 }
3474
3475 /* generic block device ioctls */
3476 switch (cmd) {
3477 /* the following have been inspired by the corresponding
3478 * code for other block devices. */
3479 struct floppy_struct *g;
3480 case HDIO_GETGEO:
3481 {
3482 struct hd_geometry loc;
3483 ECALL(get_floppy_geometry(drive, type, &g));
3484 loc.heads = g->head;
3485 loc.sectors = g->sect;
3486 loc.cylinders = g->track;
3487 loc.start = 0;
3488 return _COPYOUT(loc);
3489 }
3490 }
3491
3492 /* convert the old style command into a new style command */
3493 if ((cmd & 0xff00) == 0x0200) {
3494 ECALL(normalize_ioctl(&cmd, &size));
3495 } else
3496 return -EINVAL;
3497
3498 /* permission checks */
3499 if (((cmd & 0x40) && !FD_IOCTL_ALLOWED) ||
3500 ((cmd & 0x80) && !capable(CAP_SYS_ADMIN)))
3501 return -EPERM;
3502
3503 /* copyin */
3504 CLEARSTRUCT(&inparam);
3505 if (_IOC_DIR(cmd) & _IOC_WRITE)
3506 ECALL(fd_copyin((void __user *)param, &inparam, size))
3507
3508 switch (cmd) {
3509 case FDEJECT:
3510 if (UDRS->fd_ref != 1)
3511 /* somebody else has this drive open */
3512 return -EBUSY;
3513 LOCK_FDC(drive, 1);
3514
3515 /* do the actual eject. Fails on
3516 * non-Sparc architectures */
3517 ret = fd_eject(UNIT(drive));
3518
3519 USETF(FD_DISK_CHANGED);
3520 USETF(FD_VERIFY);
3521 process_fd_request();
3522 return ret;
3523 case FDCLRPRM:
3524 LOCK_FDC(drive, 1);
3525 current_type[drive] = NULL;
3526 floppy_sizes[drive] = MAX_DISK_SIZE << 1;
3527 UDRS->keep_data = 0;
3528 return invalidate_drive(inode->i_bdev);
3529 case FDSETPRM:
3530 case FDDEFPRM:
3531 return set_geometry(cmd, &inparam.g,
3532 drive, type, inode->i_bdev);
3533 case FDGETPRM:
3534 ECALL(get_floppy_geometry(drive, type,
3535 (struct floppy_struct **)
3536 &outparam));
3537 break;
3538
3539 case FDMSGON:
3540 UDP->flags |= FTD_MSG;
3541 return 0;
3542 case FDMSGOFF:
3543 UDP->flags &= ~FTD_MSG;
3544 return 0;
3545
3546 case FDFMTBEG:
3547 LOCK_FDC(drive, 1);
3548 CALL(poll_drive(1, FD_RAW_NEED_DISK));
3549 ret = UDRS->flags;
3550 process_fd_request();
3551 if (ret & FD_VERIFY)
3552 return -ENODEV;
3553 if (!(ret & FD_DISK_WRITABLE))
3554 return -EROFS;
3555 return 0;
3556 case FDFMTTRK:
3557 if (UDRS->fd_ref != 1)
3558 return -EBUSY;
3559 return do_format(drive, &inparam.f);
3560 case FDFMTEND:
3561 case FDFLUSH:
3562 LOCK_FDC(drive, 1);
3563 return invalidate_drive(inode->i_bdev);
3564
3565 case FDSETEMSGTRESH:
3566 UDP->max_errors.reporting =
3567 (unsigned short)(param & 0x0f);
3568 return 0;
3569 OUT(FDGETMAXERRS, &UDP->max_errors);
3570 IN(FDSETMAXERRS, &UDP->max_errors, max_errors);
3571
3572 case FDGETDRVTYP:
3573 outparam = drive_name(type, drive);
3574 SUPBOUND(size, strlen(outparam) + 1);
3575 break;
3576
3577 IN(FDSETDRVPRM, UDP, dp);
3578 OUT(FDGETDRVPRM, UDP);
3579
3580 case FDPOLLDRVSTAT:
3581 LOCK_FDC(drive, 1);
3582 CALL(poll_drive(1, FD_RAW_NEED_DISK));
3583 process_fd_request();
3584 /* fall through */
3585 OUT(FDGETDRVSTAT, UDRS);
3586
3587 case FDRESET:
3588 return user_reset_fdc(drive, (int)param, 1);
3589
3590 OUT(FDGETFDCSTAT, UFDCS);
3591
3592 case FDWERRORCLR:
3593 CLEARSTRUCT(UDRWE);
3594 return 0;
3595 OUT(FDWERRORGET, UDRWE);
3596
3597 case FDRAWCMD:
3598 if (type)
3599 return -EINVAL;
3600 LOCK_FDC(drive, 1);
3601 set_floppy(drive);
3602 CALL(i = raw_cmd_ioctl(cmd, (void __user *)param));
3603 process_fd_request();
3604 return i;
3605
3606 case FDTWADDLE:
3607 LOCK_FDC(drive, 1);
3608 twaddle();
3609 process_fd_request();
3610 return 0;
3611
3612 default:
3613 return -EINVAL;
3614 }
3615
3616 if (_IOC_DIR(cmd) & _IOC_READ)
3617 return fd_copyout((void __user *)param, outparam, size);
3618 else
3619 return 0;
3620#undef OUT
3621#undef IN
3622}
3623
3624static void __init config_types(void)
3625{
3626 int first = 1;
3627 int drive;
3628
3629 /* read drive info out of physical CMOS */
3630 drive = 0;
3631 if (!UDP->cmos)
3632 UDP->cmos = FLOPPY0_TYPE;
3633 drive = 1;
3634 if (!UDP->cmos && FLOPPY1_TYPE)
3635 UDP->cmos = FLOPPY1_TYPE;
3636
3637 /* XXX */
3638 /* additional physical CMOS drive detection should go here */
3639
3640 for (drive = 0; drive < N_DRIVE; drive++) {
3641 unsigned int type = UDP->cmos;
3642 struct floppy_drive_params *params;
3643 const char *name = NULL;
3644 static char temparea[32];
3645
3646 if (type < NUMBER(default_drive_params)) {
3647 params = &default_drive_params[type].params;
3648 if (type) {
3649 name = default_drive_params[type].name;
3650 allowed_drive_mask |= 1 << drive;
3651 } else
3652 allowed_drive_mask &= ~(1 << drive);
3653 } else {
3654 params = &default_drive_params[0].params;
3655 sprintf(temparea, "unknown type %d (usb?)", type);
3656 name = temparea;
3657 }
3658 if (name) {
3659 const char *prepend = ",";
3660 if (first) {
3661 prepend = KERN_INFO "Floppy drive(s):";
3662 first = 0;
3663 }
3664 printk("%s fd%d is %s", prepend, drive, name);
3665 register_devfs_entries(drive);
3666 }
3667 *UDP = *params;
3668 }
3669 if (!first)
3670 printk("\n");
3671}
3672
3673static int floppy_release(struct inode *inode, struct file *filp)
3674{
3675 int drive = (long)inode->i_bdev->bd_disk->private_data;
3676
3677 down(&open_lock);
3678 if (UDRS->fd_ref < 0)
3679 UDRS->fd_ref = 0;
3680 else if (!UDRS->fd_ref--) {
3681 DPRINT("floppy_release with fd_ref == 0");
3682 UDRS->fd_ref = 0;
3683 }
3684 if (!UDRS->fd_ref)
3685 opened_bdev[drive] = NULL;
3686 floppy_release_irq_and_dma();
3687 up(&open_lock);
3688 return 0;
3689}
3690
3691/*
3692 * floppy_open check for aliasing (/dev/fd0 can be the same as
3693 * /dev/PS0 etc), and disallows simultaneous access to the same
3694 * drive with different device numbers.
3695 */
3696static int floppy_open(struct inode *inode, struct file *filp)
3697{
3698 int drive = (long)inode->i_bdev->bd_disk->private_data;
3699 int old_dev;
3700 int try;
3701 int res = -EBUSY;
3702 char *tmp;
3703
3704 filp->private_data = (void *)0;
3705 down(&open_lock);
3706 old_dev = UDRS->fd_device;
3707 if (opened_bdev[drive] && opened_bdev[drive] != inode->i_bdev)
3708 goto out2;
3709
3710 if (!UDRS->fd_ref && (UDP->flags & FD_BROKEN_DCL)) {
3711 USETF(FD_DISK_CHANGED);
3712 USETF(FD_VERIFY);
3713 }
3714
3715 if (UDRS->fd_ref == -1 || (UDRS->fd_ref && (filp->f_flags & O_EXCL)))
3716 goto out2;
3717
3718 if (floppy_grab_irq_and_dma())
3719 goto out2;
3720
3721 if (filp->f_flags & O_EXCL)
3722 UDRS->fd_ref = -1;
3723 else
3724 UDRS->fd_ref++;
3725
3726 opened_bdev[drive] = inode->i_bdev;
3727
3728 res = -ENXIO;
3729
3730 if (!floppy_track_buffer) {
3731 /* if opening an ED drive, reserve a big buffer,
3732 * else reserve a small one */
3733 if ((UDP->cmos == 6) || (UDP->cmos == 5))
3734 try = 64; /* Only 48 actually useful */
3735 else
3736 try = 32; /* Only 24 actually useful */
3737
3738 tmp = (char *)fd_dma_mem_alloc(1024 * try);
3739 if (!tmp && !floppy_track_buffer) {
3740 try >>= 1; /* buffer only one side */
3741 INFBOUND(try, 16);
3742 tmp = (char *)fd_dma_mem_alloc(1024 * try);
3743 }
3744 if (!tmp && !floppy_track_buffer) {
3745 fallback_on_nodma_alloc(&tmp, 2048 * try);
3746 }
3747 if (!tmp && !floppy_track_buffer) {
3748 DPRINT("Unable to allocate DMA memory\n");
3749 goto out;
3750 }
3751 if (floppy_track_buffer) {
3752 if (tmp)
3753 fd_dma_mem_free((unsigned long)tmp, try * 1024);
3754 } else {
3755 buffer_min = buffer_max = -1;
3756 floppy_track_buffer = tmp;
3757 max_buffer_sectors = try;
3758 }
3759 }
3760
3761 UDRS->fd_device = iminor(inode);
3762 set_capacity(disks[drive], floppy_sizes[iminor(inode)]);
3763 if (old_dev != -1 && old_dev != iminor(inode)) {
3764 if (buffer_drive == drive)
3765 buffer_track = -1;
3766 }
3767
3768 /* Allow ioctls if we have write-permissions even if read-only open.
3769 * Needed so that programs such as fdrawcmd still can work on write
3770 * protected disks */
3771 if (filp->f_mode & 2
3772 || permission(filp->f_dentry->d_inode, 2, NULL) == 0)
3773 filp->private_data = (void *)8;
3774
3775 if (UFDCS->rawcmd == 1)
3776 UFDCS->rawcmd = 2;
3777
3778 if (!(filp->f_flags & O_NDELAY)) {
3779 if (filp->f_mode & 3) {
3780 UDRS->last_checked = 0;
3781 check_disk_change(inode->i_bdev);
3782 if (UTESTF(FD_DISK_CHANGED))
3783 goto out;
3784 }
3785 res = -EROFS;
3786 if ((filp->f_mode & 2) && !(UTESTF(FD_DISK_WRITABLE)))
3787 goto out;
3788 }
3789 up(&open_lock);
3790 return 0;
3791out:
3792 if (UDRS->fd_ref < 0)
3793 UDRS->fd_ref = 0;
3794 else
3795 UDRS->fd_ref--;
3796 if (!UDRS->fd_ref)
3797 opened_bdev[drive] = NULL;
3798 floppy_release_irq_and_dma();
3799out2:
3800 up(&open_lock);
3801 return res;
3802}
3803
3804/*
3805 * Check if the disk has been changed or if a change has been faked.
3806 */
3807static int check_floppy_change(struct gendisk *disk)
3808{
3809 int drive = (long)disk->private_data;
3810
3811 if (UTESTF(FD_DISK_CHANGED) || UTESTF(FD_VERIFY))
3812 return 1;
3813
3814 if (UDP->checkfreq < (int)(jiffies - UDRS->last_checked)) {
3815 if (floppy_grab_irq_and_dma()) {
3816 return 1;
3817 }
3818
3819 lock_fdc(drive, 0);
3820 poll_drive(0, 0);
3821 process_fd_request();
3822 floppy_release_irq_and_dma();
3823 }
3824
3825 if (UTESTF(FD_DISK_CHANGED) ||
3826 UTESTF(FD_VERIFY) ||
3827 test_bit(drive, &fake_change) ||
3828 (!ITYPE(UDRS->fd_device) && !current_type[drive]))
3829 return 1;
3830 return 0;
3831}
3832
3833/*
3834 * This implements "read block 0" for floppy_revalidate().
3835 * Needed for format autodetection, checking whether there is
3836 * a disk in the drive, and whether that disk is writable.
3837 */
3838
3839static int floppy_rb0_complete(struct bio *bio, unsigned int bytes_done,
3840 int err)
3841{
3842 if (bio->bi_size)
3843 return 1;
3844
3845 complete((struct completion *)bio->bi_private);
3846 return 0;
3847}
3848
3849static int __floppy_read_block_0(struct block_device *bdev)
3850{
3851 struct bio bio;
3852 struct bio_vec bio_vec;
3853 struct completion complete;
3854 struct page *page;
3855 size_t size;
3856
3857 page = alloc_page(GFP_NOIO);
3858 if (!page) {
3859 process_fd_request();
3860 return -ENOMEM;
3861 }
3862
3863 size = bdev->bd_block_size;
3864 if (!size)
3865 size = 1024;
3866
3867 bio_init(&bio);
3868 bio.bi_io_vec = &bio_vec;
3869 bio_vec.bv_page = page;
3870 bio_vec.bv_len = size;
3871 bio_vec.bv_offset = 0;
3872 bio.bi_vcnt = 1;
3873 bio.bi_idx = 0;
3874 bio.bi_size = size;
3875 bio.bi_bdev = bdev;
3876 bio.bi_sector = 0;
3877 init_completion(&complete);
3878 bio.bi_private = &complete;
3879 bio.bi_end_io = floppy_rb0_complete;
3880
3881 submit_bio(READ, &bio);
3882 generic_unplug_device(bdev_get_queue(bdev));
3883 process_fd_request();
3884 wait_for_completion(&complete);
3885
3886 __free_page(page);
3887
3888 return 0;
3889}
3890
3891/* revalidate the floppy disk, i.e. trigger format autodetection by reading
3892 * the bootblock (block 0). "Autodetection" is also needed to check whether
3893 * there is a disk in the drive at all... Thus we also do it for fixed
3894 * geometry formats */
3895static int floppy_revalidate(struct gendisk *disk)
3896{
3897 int drive = (long)disk->private_data;
3898#define NO_GEOM (!current_type[drive] && !ITYPE(UDRS->fd_device))
3899 int cf;
3900 int res = 0;
3901
3902 if (UTESTF(FD_DISK_CHANGED) ||
3903 UTESTF(FD_VERIFY) || test_bit(drive, &fake_change) || NO_GEOM) {
3904 if (usage_count == 0) {
3905 printk("VFS: revalidate called on non-open device.\n");
3906 return -EFAULT;
3907 }
3908 lock_fdc(drive, 0);
3909 cf = UTESTF(FD_DISK_CHANGED) || UTESTF(FD_VERIFY);
3910 if (!(cf || test_bit(drive, &fake_change) || NO_GEOM)) {
3911 process_fd_request(); /*already done by another thread */
3912 return 0;
3913 }
3914 UDRS->maxblock = 0;
3915 UDRS->maxtrack = 0;
3916 if (buffer_drive == drive)
3917 buffer_track = -1;
3918 clear_bit(drive, &fake_change);
3919 UCLEARF(FD_DISK_CHANGED);
3920 if (cf)
3921 UDRS->generation++;
3922 if (NO_GEOM) {
3923 /* auto-sensing */
3924 res = __floppy_read_block_0(opened_bdev[drive]);
3925 } else {
3926 if (cf)
3927 poll_drive(0, FD_RAW_NEED_DISK);
3928 process_fd_request();
3929 }
3930 }
3931 set_capacity(disk, floppy_sizes[UDRS->fd_device]);
3932 return res;
3933}
3934
3935static struct block_device_operations floppy_fops = {
3936 .owner = THIS_MODULE,
3937 .open = floppy_open,
3938 .release = floppy_release,
3939 .ioctl = fd_ioctl,
3940 .media_changed = check_floppy_change,
3941 .revalidate_disk = floppy_revalidate,
3942};
3943static char *table[] = {
3944 "", "d360", "h1200", "u360", "u720", "h360", "h720",
3945 "u1440", "u2880", "CompaQ", "h1440", "u1680", "h410",
3946 "u820", "h1476", "u1722", "h420", "u830", "h1494", "u1743",
3947 "h880", "u1040", "u1120", "h1600", "u1760", "u1920",
3948 "u3200", "u3520", "u3840", "u1840", "u800", "u1600",
3949 NULL
3950};
3951static int t360[] = { 1, 0 },
3952 t1200[] = { 2, 5, 6, 10, 12, 14, 16, 18, 20, 23, 0 },
3953 t3in[] = { 8, 9, 26, 27, 28, 7, 11, 15, 19, 24, 25, 29, 31, 3, 4, 13,
3954 17, 21, 22, 30, 0 };
3955static int *table_sup[] =
3956 { NULL, t360, t1200, t3in + 5 + 8, t3in + 5, t3in, t3in };
3957
3958static void __init register_devfs_entries(int drive)
3959{
3960 int base_minor = (drive < 4) ? drive : (124 + drive);
3961
3962 if (UDP->cmos < NUMBER(default_drive_params)) {
3963 int i = 0;
3964 do {
3965 int minor = base_minor + (table_sup[UDP->cmos][i] << 2);
3966
3967 devfs_mk_bdev(MKDEV(FLOPPY_MAJOR, minor),
3968 S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP |
3969 S_IWGRP, "floppy/%d%s", drive,
3970 table[table_sup[UDP->cmos][i]]);
3971 } while (table_sup[UDP->cmos][i++]);
3972 }
3973}
3974
3975/*
3976 * Floppy Driver initialization
3977 * =============================
3978 */
3979
3980/* Determine the floppy disk controller type */
3981/* This routine was written by David C. Niemi */
3982static char __init get_fdc_version(void)
3983{
3984 int r;
3985
3986 output_byte(FD_DUMPREGS); /* 82072 and better know DUMPREGS */
3987 if (FDCS->reset)
3988 return FDC_NONE;
3989 if ((r = result()) <= 0x00)
3990 return FDC_NONE; /* No FDC present ??? */
3991 if ((r == 1) && (reply_buffer[0] == 0x80)) {
3992 printk(KERN_INFO "FDC %d is an 8272A\n", fdc);
3993 return FDC_8272A; /* 8272a/765 don't know DUMPREGS */
3994 }
3995 if (r != 10) {
3996 printk
3997 ("FDC %d init: DUMPREGS: unexpected return of %d bytes.\n",
3998 fdc, r);
3999 return FDC_UNKNOWN;
4000 }
4001
4002 if (!fdc_configure()) {
4003 printk(KERN_INFO "FDC %d is an 82072\n", fdc);
4004 return FDC_82072; /* 82072 doesn't know CONFIGURE */
4005 }
4006
4007 output_byte(FD_PERPENDICULAR);
4008 if (need_more_output() == MORE_OUTPUT) {
4009 output_byte(0);
4010 } else {
4011 printk(KERN_INFO "FDC %d is an 82072A\n", fdc);
4012 return FDC_82072A; /* 82072A as found on Sparcs. */
4013 }
4014
4015 output_byte(FD_UNLOCK);
4016 r = result();
4017 if ((r == 1) && (reply_buffer[0] == 0x80)) {
4018 printk(KERN_INFO "FDC %d is a pre-1991 82077\n", fdc);
4019 return FDC_82077_ORIG; /* Pre-1991 82077, doesn't know
4020 * LOCK/UNLOCK */
4021 }
4022 if ((r != 1) || (reply_buffer[0] != 0x00)) {
4023 printk("FDC %d init: UNLOCK: unexpected return of %d bytes.\n",
4024 fdc, r);
4025 return FDC_UNKNOWN;
4026 }
4027 output_byte(FD_PARTID);
4028 r = result();
4029 if (r != 1) {
4030 printk("FDC %d init: PARTID: unexpected return of %d bytes.\n",
4031 fdc, r);
4032 return FDC_UNKNOWN;
4033 }
4034 if (reply_buffer[0] == 0x80) {
4035 printk(KERN_INFO "FDC %d is a post-1991 82077\n", fdc);
4036 return FDC_82077; /* Revised 82077AA passes all the tests */
4037 }
4038 switch (reply_buffer[0] >> 5) {
4039 case 0x0:
4040 /* Either a 82078-1 or a 82078SL running at 5Volt */
4041 printk(KERN_INFO "FDC %d is an 82078.\n", fdc);
4042 return FDC_82078;
4043 case 0x1:
4044 printk(KERN_INFO "FDC %d is a 44pin 82078\n", fdc);
4045 return FDC_82078;
4046 case 0x2:
4047 printk(KERN_INFO "FDC %d is a S82078B\n", fdc);
4048 return FDC_S82078B;
4049 case 0x3:
4050 printk(KERN_INFO "FDC %d is a National Semiconductor PC87306\n",
4051 fdc);
4052 return FDC_87306;
4053 default:
4054 printk(KERN_INFO
4055 "FDC %d init: 82078 variant with unknown PARTID=%d.\n",
4056 fdc, reply_buffer[0] >> 5);
4057 return FDC_82078_UNKN;
4058 }
4059} /* get_fdc_version */
4060
4061/* lilo configuration */
4062
4063static void __init floppy_set_flags(int *ints, int param, int param2)
4064{
4065 int i;
4066
4067 for (i = 0; i < ARRAY_SIZE(default_drive_params); i++) {
4068 if (param)
4069 default_drive_params[i].params.flags |= param2;
4070 else
4071 default_drive_params[i].params.flags &= ~param2;
4072 }
4073 DPRINT("%s flag 0x%x\n", param2 ? "Setting" : "Clearing", param);
4074}
4075
4076static void __init daring(int *ints, int param, int param2)
4077{
4078 int i;
4079
4080 for (i = 0; i < ARRAY_SIZE(default_drive_params); i++) {
4081 if (param) {
4082 default_drive_params[i].params.select_delay = 0;
4083 default_drive_params[i].params.flags |=
4084 FD_SILENT_DCL_CLEAR;
4085 } else {
4086 default_drive_params[i].params.select_delay =
4087 2 * HZ / 100;
4088 default_drive_params[i].params.flags &=
4089 ~FD_SILENT_DCL_CLEAR;
4090 }
4091 }
4092 DPRINT("Assuming %s floppy hardware\n", param ? "standard" : "broken");
4093}
4094
4095static void __init set_cmos(int *ints, int dummy, int dummy2)
4096{
4097 int current_drive = 0;
4098
4099 if (ints[0] != 2) {
4100 DPRINT("wrong number of parameters for CMOS\n");
4101 return;
4102 }
4103 current_drive = ints[1];
4104 if (current_drive < 0 || current_drive >= 8) {
4105 DPRINT("bad drive for set_cmos\n");
4106 return;
4107 }
4108#if N_FDC > 1
4109 if (current_drive >= 4 && !FDC2)
4110 FDC2 = 0x370;
4111#endif
4112 DP->cmos = ints[2];
4113 DPRINT("setting CMOS code to %d\n", ints[2]);
4114}
4115
4116static struct param_table {
4117 const char *name;
4118 void (*fn) (int *ints, int param, int param2);
4119 int *var;
4120 int def_param;
4121 int param2;
4122} config_params[] __initdata = {
4123 {"allowed_drive_mask", NULL, &allowed_drive_mask, 0xff, 0}, /* obsolete */
4124 {"all_drives", NULL, &allowed_drive_mask, 0xff, 0}, /* obsolete */
4125 {"asus_pci", NULL, &allowed_drive_mask, 0x33, 0},
4126 {"irq", NULL, &FLOPPY_IRQ, 6, 0},
4127 {"dma", NULL, &FLOPPY_DMA, 2, 0},
4128 {"daring", daring, NULL, 1, 0},
4129#if N_FDC > 1
4130 {"two_fdc", NULL, &FDC2, 0x370, 0},
4131 {"one_fdc", NULL, &FDC2, 0, 0},
4132#endif
4133 {"thinkpad", floppy_set_flags, NULL, 1, FD_INVERTED_DCL},
4134 {"broken_dcl", floppy_set_flags, NULL, 1, FD_BROKEN_DCL},
4135 {"messages", floppy_set_flags, NULL, 1, FTD_MSG},
4136 {"silent_dcl_clear", floppy_set_flags, NULL, 1, FD_SILENT_DCL_CLEAR},
4137 {"debug", floppy_set_flags, NULL, 1, FD_DEBUG},
4138 {"nodma", NULL, &can_use_virtual_dma, 1, 0},
4139 {"omnibook", NULL, &can_use_virtual_dma, 1, 0},
4140 {"yesdma", NULL, &can_use_virtual_dma, 0, 0},
4141 {"fifo_depth", NULL, &fifo_depth, 0xa, 0},
4142 {"nofifo", NULL, &no_fifo, 0x20, 0},
4143 {"usefifo", NULL, &no_fifo, 0, 0},
4144 {"cmos", set_cmos, NULL, 0, 0},
4145 {"slow", NULL, &slow_floppy, 1, 0},
4146 {"unexpected_interrupts", NULL, &print_unex, 1, 0},
4147 {"no_unexpected_interrupts", NULL, &print_unex, 0, 0},
4148 {"L40SX", NULL, &print_unex, 0, 0}
4149
4150 EXTRA_FLOPPY_PARAMS
4151};
4152
4153static int __init floppy_setup(char *str)
4154{
4155 int i;
4156 int param;
4157 int ints[11];
4158
4159 str = get_options(str, ARRAY_SIZE(ints), ints);
4160 if (str) {
4161 for (i = 0; i < ARRAY_SIZE(config_params); i++) {
4162 if (strcmp(str, config_params[i].name) == 0) {
4163 if (ints[0])
4164 param = ints[1];
4165 else
4166 param = config_params[i].def_param;
4167 if (config_params[i].fn)
4168 config_params[i].
4169 fn(ints, param,
4170 config_params[i].param2);
4171 if (config_params[i].var) {
4172 DPRINT("%s=%d\n", str, param);
4173 *config_params[i].var = param;
4174 }
4175 return 1;
4176 }
4177 }
4178 }
4179 if (str) {
4180 DPRINT("unknown floppy option [%s]\n", str);
4181
4182 DPRINT("allowed options are:");
4183 for (i = 0; i < ARRAY_SIZE(config_params); i++)
4184 printk(" %s", config_params[i].name);
4185 printk("\n");
4186 } else
4187 DPRINT("botched floppy option\n");
4188 DPRINT("Read Documentation/floppy.txt\n");
4189 return 0;
4190}
4191
4192static int have_no_fdc = -ENODEV;
4193
4194static void floppy_device_release(struct device *dev)
4195{
4196 complete(&device_release);
4197}
4198
4199static struct platform_device floppy_device = {
4200 .name = "floppy",
4201 .id = 0,
4202 .dev = {
4203 .release = floppy_device_release,
4204 }
4205};
4206
4207static struct kobject *floppy_find(dev_t dev, int *part, void *data)
4208{
4209 int drive = (*part & 3) | ((*part & 0x80) >> 5);
4210 if (drive >= N_DRIVE ||
4211 !(allowed_drive_mask & (1 << drive)) ||
4212 fdc_state[FDC(drive)].version == FDC_NONE)
4213 return NULL;
4214 if (((*part >> 2) & 0x1f) >= NUMBER(floppy_type))
4215 return NULL;
4216 *part = 0;
4217 return get_disk(disks[drive]);
4218}
4219
4220static int __init floppy_init(void)
4221{
4222 int i, unit, drive;
4223 int err, dr;
4224
4225 raw_cmd = NULL;
4226
4227 for (dr = 0; dr < N_DRIVE; dr++) {
4228 disks[dr] = alloc_disk(1);
4229 if (!disks[dr]) {
4230 err = -ENOMEM;
4231 goto out_put_disk;
4232 }
4233
4234 disks[dr]->major = FLOPPY_MAJOR;
4235 disks[dr]->first_minor = TOMINOR(dr);
4236 disks[dr]->fops = &floppy_fops;
4237 sprintf(disks[dr]->disk_name, "fd%d", dr);
4238
4239 init_timer(&motor_off_timer[dr]);
4240 motor_off_timer[dr].data = dr;
4241 motor_off_timer[dr].function = motor_off_callback;
4242 }
4243
4244 devfs_mk_dir("floppy");
4245
4246 err = register_blkdev(FLOPPY_MAJOR, "fd");
4247 if (err)
4248 goto out_devfs_remove;
4249
4250 floppy_queue = blk_init_queue(do_fd_request, &floppy_lock);
4251 if (!floppy_queue) {
4252 err = -ENOMEM;
4253 goto out_unreg_blkdev;
4254 }
4255 blk_queue_max_sectors(floppy_queue, 64);
4256
4257 blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE,
4258 floppy_find, NULL, NULL);
4259
4260 for (i = 0; i < 256; i++)
4261 if (ITYPE(i))
4262 floppy_sizes[i] = floppy_type[ITYPE(i)].size;
4263 else
4264 floppy_sizes[i] = MAX_DISK_SIZE << 1;
4265
4266 reschedule_timeout(MAXTIMEOUT, "floppy init", MAXTIMEOUT);
4267 config_types();
4268
4269 for (i = 0; i < N_FDC; i++) {
4270 fdc = i;
4271 CLEARSTRUCT(FDCS);
4272 FDCS->dtr = -1;
4273 FDCS->dor = 0x4;
4274#if defined(__sparc__) || defined(__mc68000__)
4275 /*sparcs/sun3x don't have a DOR reset which we can fall back on to */
4276#ifdef __mc68000__
4277 if (MACH_IS_SUN3X)
4278#endif
4279 FDCS->version = FDC_82072A;
4280#endif
4281 }
4282
4283 use_virtual_dma = can_use_virtual_dma & 1;
4284#if defined(CONFIG_PPC64)
4285 if (check_legacy_ioport(FDC1)) {
4286 del_timer(&fd_timeout);
4287 err = -ENODEV;
4288 goto out_unreg_region;
4289 }
4290#endif
4291 fdc_state[0].address = FDC1;
4292 if (fdc_state[0].address == -1) {
4293 del_timer(&fd_timeout);
4294 err = -ENODEV;
4295 goto out_unreg_region;
4296 }
4297#if N_FDC > 1
4298 fdc_state[1].address = FDC2;
4299#endif
4300
4301 fdc = 0; /* reset fdc in case of unexpected interrupt */
4302 err = floppy_grab_irq_and_dma();
4303 if (err) {
4304 del_timer(&fd_timeout);
4305 err = -EBUSY;
4306 goto out_unreg_region;
4307 }
4308
4309 /* initialise drive state */
4310 for (drive = 0; drive < N_DRIVE; drive++) {
4311 CLEARSTRUCT(UDRS);
4312 CLEARSTRUCT(UDRWE);
4313 USETF(FD_DISK_NEWCHANGE);
4314 USETF(FD_DISK_CHANGED);
4315 USETF(FD_VERIFY);
4316 UDRS->fd_device = -1;
4317 floppy_track_buffer = NULL;
4318 max_buffer_sectors = 0;
4319 }
4320 /*
4321 * Small 10 msec delay to let through any interrupt that
4322 * initialization might have triggered, to not
4323 * confuse detection:
4324 */
4325 msleep(10);
4326
4327 for (i = 0; i < N_FDC; i++) {
4328 fdc = i;
4329 FDCS->driver_version = FD_DRIVER_VERSION;
4330 for (unit = 0; unit < 4; unit++)
4331 FDCS->track[unit] = 0;
4332 if (FDCS->address == -1)
4333 continue;
4334 FDCS->rawcmd = 2;
4335 if (user_reset_fdc(-1, FD_RESET_ALWAYS, 0)) {
4336 /* free ioports reserved by floppy_grab_irq_and_dma() */
4337 release_region(FDCS->address + 2, 4);
4338 release_region(FDCS->address + 7, 1);
4339 FDCS->address = -1;
4340 FDCS->version = FDC_NONE;
4341 continue;
4342 }
4343 /* Try to determine the floppy controller type */
4344 FDCS->version = get_fdc_version();
4345 if (FDCS->version == FDC_NONE) {
4346 /* free ioports reserved by floppy_grab_irq_and_dma() */
4347 release_region(FDCS->address + 2, 4);
4348 release_region(FDCS->address + 7, 1);
4349 FDCS->address = -1;
4350 continue;
4351 }
4352 if (can_use_virtual_dma == 2 && FDCS->version < FDC_82072A)
4353 can_use_virtual_dma = 0;
4354
4355 have_no_fdc = 0;
4356 /* Not all FDCs seem to be able to handle the version command
4357 * properly, so force a reset for the standard FDC clones,
4358 * to avoid interrupt garbage.
4359 */
4360 user_reset_fdc(-1, FD_RESET_ALWAYS, 0);
4361 }
4362 fdc = 0;
4363 del_timer(&fd_timeout);
4364 current_drive = 0;
4365 floppy_release_irq_and_dma();
4366 initialising = 0;
4367 if (have_no_fdc) {
4368 DPRINT("no floppy controllers found\n");
4369 err = have_no_fdc;
4370 goto out_flush_work;
4371 }
4372
4373 err = platform_device_register(&floppy_device);
4374 if (err)
4375 goto out_flush_work;
4376
4377 for (drive = 0; drive < N_DRIVE; drive++) {
4378 if (!(allowed_drive_mask & (1 << drive)))
4379 continue;
4380 if (fdc_state[FDC(drive)].version == FDC_NONE)
4381 continue;
4382 /* to be cleaned up... */
4383 disks[drive]->private_data = (void *)(long)drive;
4384 disks[drive]->queue = floppy_queue;
4385 disks[drive]->flags |= GENHD_FL_REMOVABLE;
4386 disks[drive]->driverfs_dev = &floppy_device.dev;
4387 add_disk(disks[drive]);
4388 }
4389
4390 return 0;
4391
4392out_flush_work:
4393 flush_scheduled_work();
4394 if (usage_count)
4395 floppy_release_irq_and_dma();
4396out_unreg_region:
4397 blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
4398 blk_cleanup_queue(floppy_queue);
4399out_unreg_blkdev:
4400 unregister_blkdev(FLOPPY_MAJOR, "fd");
4401out_devfs_remove:
4402 devfs_remove("floppy");
4403out_put_disk:
4404 while (dr--) {
4405 del_timer(&motor_off_timer[dr]);
4406 put_disk(disks[dr]);
4407 }
4408 return err;
4409}
4410
4411static DEFINE_SPINLOCK(floppy_usage_lock);
4412
4413static int floppy_grab_irq_and_dma(void)
4414{
4415 unsigned long flags;
4416
4417 spin_lock_irqsave(&floppy_usage_lock, flags);
4418 if (usage_count++) {
4419 spin_unlock_irqrestore(&floppy_usage_lock, flags);
4420 return 0;
4421 }
4422 spin_unlock_irqrestore(&floppy_usage_lock, flags);
4423 if (fd_request_irq()) {
4424 DPRINT("Unable to grab IRQ%d for the floppy driver\n",
4425 FLOPPY_IRQ);
4426 spin_lock_irqsave(&floppy_usage_lock, flags);
4427 usage_count--;
4428 spin_unlock_irqrestore(&floppy_usage_lock, flags);
4429 return -1;
4430 }
4431 if (fd_request_dma()) {
4432 DPRINT("Unable to grab DMA%d for the floppy driver\n",
4433 FLOPPY_DMA);
4434 fd_free_irq();
4435 spin_lock_irqsave(&floppy_usage_lock, flags);
4436 usage_count--;
4437 spin_unlock_irqrestore(&floppy_usage_lock, flags);
4438 return -1;
4439 }
4440
4441 for (fdc = 0; fdc < N_FDC; fdc++) {
4442 if (FDCS->address != -1) {
4443 if (!request_region(FDCS->address + 2, 4, "floppy")) {
4444 DPRINT("Floppy io-port 0x%04lx in use\n",
4445 FDCS->address + 2);
4446 goto cleanup1;
4447 }
4448 if (!request_region(FDCS->address + 7, 1, "floppy DIR")) {
4449 DPRINT("Floppy io-port 0x%04lx in use\n",
4450 FDCS->address + 7);
4451 goto cleanup2;
4452 }
4453 /* address + 6 is reserved, and may be taken by IDE.
4454 * Unfortunately, Adaptec doesn't know this :-(, */
4455 }
4456 }
4457 for (fdc = 0; fdc < N_FDC; fdc++) {
4458 if (FDCS->address != -1) {
4459 reset_fdc_info(1);
4460 fd_outb(FDCS->dor, FD_DOR);
4461 }
4462 }
4463 fdc = 0;
4464 set_dor(0, ~0, 8); /* avoid immediate interrupt */
4465
4466 for (fdc = 0; fdc < N_FDC; fdc++)
4467 if (FDCS->address != -1)
4468 fd_outb(FDCS->dor, FD_DOR);
4469 /*
4470 * The driver will try and free resources and relies on us
4471 * to know if they were allocated or not.
4472 */
4473 fdc = 0;
4474 irqdma_allocated = 1;
4475 return 0;
4476cleanup2:
4477 release_region(FDCS->address + 2, 4);
4478cleanup1:
4479 fd_free_irq();
4480 fd_free_dma();
4481 while (--fdc >= 0) {
4482 release_region(FDCS->address + 2, 4);
4483 release_region(FDCS->address + 7, 1);
4484 }
4485 spin_lock_irqsave(&floppy_usage_lock, flags);
4486 usage_count--;
4487 spin_unlock_irqrestore(&floppy_usage_lock, flags);
4488 return -1;
4489}
4490
4491static void floppy_release_irq_and_dma(void)
4492{
4493 int old_fdc;
4494#ifdef FLOPPY_SANITY_CHECK
4495#ifndef __sparc__
4496 int drive;
4497#endif
4498#endif
4499 long tmpsize;
4500 unsigned long tmpaddr;
4501 unsigned long flags;
4502
4503 spin_lock_irqsave(&floppy_usage_lock, flags);
4504 if (--usage_count) {
4505 spin_unlock_irqrestore(&floppy_usage_lock, flags);
4506 return;
4507 }
4508 spin_unlock_irqrestore(&floppy_usage_lock, flags);
4509 if (irqdma_allocated) {
4510 fd_disable_dma();
4511 fd_free_dma();
4512 fd_free_irq();
4513 irqdma_allocated = 0;
4514 }
4515 set_dor(0, ~0, 8);
4516#if N_FDC > 1
4517 set_dor(1, ~8, 0);
4518#endif
4519 floppy_enable_hlt();
4520
4521 if (floppy_track_buffer && max_buffer_sectors) {
4522 tmpsize = max_buffer_sectors * 1024;
4523 tmpaddr = (unsigned long)floppy_track_buffer;
4524 floppy_track_buffer = NULL;
4525 max_buffer_sectors = 0;
4526 buffer_min = buffer_max = -1;
4527 fd_dma_mem_free(tmpaddr, tmpsize);
4528 }
4529#ifdef FLOPPY_SANITY_CHECK
4530#ifndef __sparc__
4531 for (drive = 0; drive < N_FDC * 4; drive++)
4532 if (timer_pending(motor_off_timer + drive))
4533 printk("motor off timer %d still active\n", drive);
4534#endif
4535
4536 if (timer_pending(&fd_timeout))
4537 printk("floppy timer still active:%s\n", timeout_message);
4538 if (timer_pending(&fd_timer))
4539 printk("auxiliary floppy timer still active\n");
4540 if (floppy_work.pending)
4541 printk("work still pending\n");
4542#endif
4543 old_fdc = fdc;
4544 for (fdc = 0; fdc < N_FDC; fdc++)
4545 if (FDCS->address != -1) {
4546 release_region(FDCS->address + 2, 4);
4547 release_region(FDCS->address + 7, 1);
4548 }
4549 fdc = old_fdc;
4550}
4551
4552#ifdef MODULE
4553
4554static char *floppy;
4555
4556static void unregister_devfs_entries(int drive)
4557{
4558 int i;
4559
4560 if (UDP->cmos < NUMBER(default_drive_params)) {
4561 i = 0;
4562 do {
4563 devfs_remove("floppy/%d%s", drive,
4564 table[table_sup[UDP->cmos][i]]);
4565 } while (table_sup[UDP->cmos][i++]);
4566 }
4567}
4568
4569static void __init parse_floppy_cfg_string(char *cfg)
4570{
4571 char *ptr;
4572
4573 while (*cfg) {
4574 for (ptr = cfg; *cfg && *cfg != ' ' && *cfg != '\t'; cfg++) ;
4575 if (*cfg) {
4576 *cfg = '\0';
4577 cfg++;
4578 }
4579 if (*ptr)
4580 floppy_setup(ptr);
4581 }
4582}
4583
4584int init_module(void)
4585{
4586 if (floppy)
4587 parse_floppy_cfg_string(floppy);
4588 return floppy_init();
4589}
4590
4591void cleanup_module(void)
4592{
4593 int drive;
4594
4595 init_completion(&device_release);
4596 blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
4597 unregister_blkdev(FLOPPY_MAJOR, "fd");
4598
4599 for (drive = 0; drive < N_DRIVE; drive++) {
4600 del_timer_sync(&motor_off_timer[drive]);
4601
4602 if ((allowed_drive_mask & (1 << drive)) &&
4603 fdc_state[FDC(drive)].version != FDC_NONE) {
4604 del_gendisk(disks[drive]);
4605 unregister_devfs_entries(drive);
4606 }
4607 put_disk(disks[drive]);
4608 }
4609 platform_device_unregister(&floppy_device);
4610 devfs_remove("floppy");
4611
4612 del_timer_sync(&fd_timeout);
4613 del_timer_sync(&fd_timer);
4614 blk_cleanup_queue(floppy_queue);
4615
4616 if (usage_count)
4617 floppy_release_irq_and_dma();
4618
4619 /* eject disk, if any */
4620 fd_eject(0);
4621
4622 wait_for_completion(&device_release);
4623}
4624
4625module_param(floppy, charp, 0);
4626module_param(FLOPPY_IRQ, int, 0);
4627module_param(FLOPPY_DMA, int, 0);
4628MODULE_AUTHOR("Alain L. Knaff");
4629MODULE_SUPPORTED_DEVICE("fd");
4630MODULE_LICENSE("GPL");
4631
4632#else
4633
4634__setup("floppy=", floppy_setup);
4635module_init(floppy_init)
4636#endif
4637
4638MODULE_ALIAS_BLOCKDEV_MAJOR(FLOPPY_MAJOR);
diff --git a/drivers/block/genhd.c b/drivers/block/genhd.c
new file mode 100644
index 000000000000..ab4db71375e0
--- /dev/null
+++ b/drivers/block/genhd.c
@@ -0,0 +1,685 @@
1/*
2 * gendisk handling
3 */
4
5#include <linux/config.h>
6#include <linux/module.h>
7#include <linux/fs.h>
8#include <linux/genhd.h>
9#include <linux/kernel.h>
10#include <linux/blkdev.h>
11#include <linux/init.h>
12#include <linux/spinlock.h>
13#include <linux/seq_file.h>
14#include <linux/slab.h>
15#include <linux/kmod.h>
16#include <linux/kobj_map.h>
17
18#define MAX_PROBE_HASH 255 /* random */
19
20static struct subsystem block_subsys;
21
22static DECLARE_MUTEX(block_subsys_sem);
23
24/*
25 * Can be deleted altogether. Later.
26 *
27 */
28static struct blk_major_name {
29 struct blk_major_name *next;
30 int major;
31 char name[16];
32} *major_names[MAX_PROBE_HASH];
33
34/* index in the above - for now: assume no multimajor ranges */
35static inline int major_to_index(int major)
36{
37 return major % MAX_PROBE_HASH;
38}
39
40#ifdef CONFIG_PROC_FS
41/* get block device names in somewhat random order */
42int get_blkdev_list(char *p)
43{
44 struct blk_major_name *n;
45 int i, len;
46
47 len = sprintf(p, "\nBlock devices:\n");
48
49 down(&block_subsys_sem);
50 for (i = 0; i < ARRAY_SIZE(major_names); i++) {
51 for (n = major_names[i]; n; n = n->next)
52 len += sprintf(p+len, "%3d %s\n",
53 n->major, n->name);
54 }
55 up(&block_subsys_sem);
56
57 return len;
58}
59#endif
60
61int register_blkdev(unsigned int major, const char *name)
62{
63 struct blk_major_name **n, *p;
64 int index, ret = 0;
65
66 down(&block_subsys_sem);
67
68 /* temporary */
69 if (major == 0) {
70 for (index = ARRAY_SIZE(major_names)-1; index > 0; index--) {
71 if (major_names[index] == NULL)
72 break;
73 }
74
75 if (index == 0) {
76 printk("register_blkdev: failed to get major for %s\n",
77 name);
78 ret = -EBUSY;
79 goto out;
80 }
81 major = index;
82 ret = major;
83 }
84
85 p = kmalloc(sizeof(struct blk_major_name), GFP_KERNEL);
86 if (p == NULL) {
87 ret = -ENOMEM;
88 goto out;
89 }
90
91 p->major = major;
92 strlcpy(p->name, name, sizeof(p->name));
93 p->next = NULL;
94 index = major_to_index(major);
95
96 for (n = &major_names[index]; *n; n = &(*n)->next) {
97 if ((*n)->major == major)
98 break;
99 }
100 if (!*n)
101 *n = p;
102 else
103 ret = -EBUSY;
104
105 if (ret < 0) {
106 printk("register_blkdev: cannot get major %d for %s\n",
107 major, name);
108 kfree(p);
109 }
110out:
111 up(&block_subsys_sem);
112 return ret;
113}
114
115EXPORT_SYMBOL(register_blkdev);
116
117/* todo: make void - error printk here */
118int unregister_blkdev(unsigned int major, const char *name)
119{
120 struct blk_major_name **n;
121 struct blk_major_name *p = NULL;
122 int index = major_to_index(major);
123 int ret = 0;
124
125 down(&block_subsys_sem);
126 for (n = &major_names[index]; *n; n = &(*n)->next)
127 if ((*n)->major == major)
128 break;
129 if (!*n || strcmp((*n)->name, name))
130 ret = -EINVAL;
131 else {
132 p = *n;
133 *n = p->next;
134 }
135 up(&block_subsys_sem);
136 kfree(p);
137
138 return ret;
139}
140
141EXPORT_SYMBOL(unregister_blkdev);
142
143static struct kobj_map *bdev_map;
144
145/*
146 * Register device numbers dev..(dev+range-1)
147 * range must be nonzero
148 * The hash chain is sorted on range, so that subranges can override.
149 */
150void blk_register_region(dev_t dev, unsigned long range, struct module *module,
151 struct kobject *(*probe)(dev_t, int *, void *),
152 int (*lock)(dev_t, void *), void *data)
153{
154 kobj_map(bdev_map, dev, range, module, probe, lock, data);
155}
156
157EXPORT_SYMBOL(blk_register_region);
158
159void blk_unregister_region(dev_t dev, unsigned long range)
160{
161 kobj_unmap(bdev_map, dev, range);
162}
163
164EXPORT_SYMBOL(blk_unregister_region);
165
166static struct kobject *exact_match(dev_t dev, int *part, void *data)
167{
168 struct gendisk *p = data;
169 return &p->kobj;
170}
171
172static int exact_lock(dev_t dev, void *data)
173{
174 struct gendisk *p = data;
175
176 if (!get_disk(p))
177 return -1;
178 return 0;
179}
180
181/**
182 * add_disk - add partitioning information to kernel list
183 * @disk: per-device partitioning information
184 *
185 * This function registers the partitioning information in @disk
186 * with the kernel.
187 */
188void add_disk(struct gendisk *disk)
189{
190 disk->flags |= GENHD_FL_UP;
191 blk_register_region(MKDEV(disk->major, disk->first_minor),
192 disk->minors, NULL, exact_match, exact_lock, disk);
193 register_disk(disk);
194 blk_register_queue(disk);
195}
196
197EXPORT_SYMBOL(add_disk);
198EXPORT_SYMBOL(del_gendisk); /* in partitions/check.c */
199
200void unlink_gendisk(struct gendisk *disk)
201{
202 blk_unregister_queue(disk);
203 blk_unregister_region(MKDEV(disk->major, disk->first_minor),
204 disk->minors);
205}
206
207#define to_disk(obj) container_of(obj,struct gendisk,kobj)
208
209/**
210 * get_gendisk - get partitioning information for a given device
211 * @dev: device to get partitioning information for
212 *
213 * This function gets the structure containing partitioning
214 * information for the given device @dev.
215 */
216struct gendisk *get_gendisk(dev_t dev, int *part)
217{
218 struct kobject *kobj = kobj_lookup(bdev_map, dev, part);
219 return kobj ? to_disk(kobj) : NULL;
220}
221
222#ifdef CONFIG_PROC_FS
223/* iterator */
224static void *part_start(struct seq_file *part, loff_t *pos)
225{
226 struct list_head *p;
227 loff_t l = *pos;
228
229 down(&block_subsys_sem);
230 list_for_each(p, &block_subsys.kset.list)
231 if (!l--)
232 return list_entry(p, struct gendisk, kobj.entry);
233 return NULL;
234}
235
236static void *part_next(struct seq_file *part, void *v, loff_t *pos)
237{
238 struct list_head *p = ((struct gendisk *)v)->kobj.entry.next;
239 ++*pos;
240 return p==&block_subsys.kset.list ? NULL :
241 list_entry(p, struct gendisk, kobj.entry);
242}
243
244static void part_stop(struct seq_file *part, void *v)
245{
246 up(&block_subsys_sem);
247}
248
249static int show_partition(struct seq_file *part, void *v)
250{
251 struct gendisk *sgp = v;
252 int n;
253 char buf[BDEVNAME_SIZE];
254
255 if (&sgp->kobj.entry == block_subsys.kset.list.next)
256 seq_puts(part, "major minor #blocks name\n\n");
257
258 /* Don't show non-partitionable removeable devices or empty devices */
259 if (!get_capacity(sgp) ||
260 (sgp->minors == 1 && (sgp->flags & GENHD_FL_REMOVABLE)))
261 return 0;
262 if (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)
263 return 0;
264
265 /* show the full disk and all non-0 size partitions of it */
266 seq_printf(part, "%4d %4d %10llu %s\n",
267 sgp->major, sgp->first_minor,
268 (unsigned long long)get_capacity(sgp) >> 1,
269 disk_name(sgp, 0, buf));
270 for (n = 0; n < sgp->minors - 1; n++) {
271 if (!sgp->part[n])
272 continue;
273 if (sgp->part[n]->nr_sects == 0)
274 continue;
275 seq_printf(part, "%4d %4d %10llu %s\n",
276 sgp->major, n + 1 + sgp->first_minor,
277 (unsigned long long)sgp->part[n]->nr_sects >> 1 ,
278 disk_name(sgp, n + 1, buf));
279 }
280
281 return 0;
282}
283
284struct seq_operations partitions_op = {
285 .start =part_start,
286 .next = part_next,
287 .stop = part_stop,
288 .show = show_partition
289};
290#endif
291
292
293extern int blk_dev_init(void);
294
295static struct kobject *base_probe(dev_t dev, int *part, void *data)
296{
297 if (request_module("block-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
298 /* Make old-style 2.4 aliases work */
299 request_module("block-major-%d", MAJOR(dev));
300 return NULL;
301}
302
303static int __init genhd_device_init(void)
304{
305 bdev_map = kobj_map_init(base_probe, &block_subsys_sem);
306 blk_dev_init();
307 subsystem_register(&block_subsys);
308 return 0;
309}
310
311subsys_initcall(genhd_device_init);
312
313
314
315/*
316 * kobject & sysfs bindings for block devices
317 */
318static ssize_t disk_attr_show(struct kobject *kobj, struct attribute *attr,
319 char *page)
320{
321 struct gendisk *disk = to_disk(kobj);
322 struct disk_attribute *disk_attr =
323 container_of(attr,struct disk_attribute,attr);
324 ssize_t ret = 0;
325
326 if (disk_attr->show)
327 ret = disk_attr->show(disk,page);
328 return ret;
329}
330
331static struct sysfs_ops disk_sysfs_ops = {
332 .show = &disk_attr_show,
333};
334
335static ssize_t disk_dev_read(struct gendisk * disk, char *page)
336{
337 dev_t base = MKDEV(disk->major, disk->first_minor);
338 return print_dev_t(page, base);
339}
340static ssize_t disk_range_read(struct gendisk * disk, char *page)
341{
342 return sprintf(page, "%d\n", disk->minors);
343}
344static ssize_t disk_removable_read(struct gendisk * disk, char *page)
345{
346 return sprintf(page, "%d\n",
347 (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
348
349}
350static ssize_t disk_size_read(struct gendisk * disk, char *page)
351{
352 return sprintf(page, "%llu\n", (unsigned long long)get_capacity(disk));
353}
354
355static ssize_t disk_stats_read(struct gendisk * disk, char *page)
356{
357 preempt_disable();
358 disk_round_stats(disk);
359 preempt_enable();
360 return sprintf(page,
361 "%8u %8u %8llu %8u "
362 "%8u %8u %8llu %8u "
363 "%8u %8u %8u"
364 "\n",
365 disk_stat_read(disk, reads), disk_stat_read(disk, read_merges),
366 (unsigned long long)disk_stat_read(disk, read_sectors),
367 jiffies_to_msecs(disk_stat_read(disk, read_ticks)),
368 disk_stat_read(disk, writes),
369 disk_stat_read(disk, write_merges),
370 (unsigned long long)disk_stat_read(disk, write_sectors),
371 jiffies_to_msecs(disk_stat_read(disk, write_ticks)),
372 disk->in_flight,
373 jiffies_to_msecs(disk_stat_read(disk, io_ticks)),
374 jiffies_to_msecs(disk_stat_read(disk, time_in_queue)));
375}
376static struct disk_attribute disk_attr_dev = {
377 .attr = {.name = "dev", .mode = S_IRUGO },
378 .show = disk_dev_read
379};
380static struct disk_attribute disk_attr_range = {
381 .attr = {.name = "range", .mode = S_IRUGO },
382 .show = disk_range_read
383};
384static struct disk_attribute disk_attr_removable = {
385 .attr = {.name = "removable", .mode = S_IRUGO },
386 .show = disk_removable_read
387};
388static struct disk_attribute disk_attr_size = {
389 .attr = {.name = "size", .mode = S_IRUGO },
390 .show = disk_size_read
391};
392static struct disk_attribute disk_attr_stat = {
393 .attr = {.name = "stat", .mode = S_IRUGO },
394 .show = disk_stats_read
395};
396
397static struct attribute * default_attrs[] = {
398 &disk_attr_dev.attr,
399 &disk_attr_range.attr,
400 &disk_attr_removable.attr,
401 &disk_attr_size.attr,
402 &disk_attr_stat.attr,
403 NULL,
404};
405
406static void disk_release(struct kobject * kobj)
407{
408 struct gendisk *disk = to_disk(kobj);
409 kfree(disk->random);
410 kfree(disk->part);
411 free_disk_stats(disk);
412 kfree(disk);
413}
414
415static struct kobj_type ktype_block = {
416 .release = disk_release,
417 .sysfs_ops = &disk_sysfs_ops,
418 .default_attrs = default_attrs,
419};
420
421extern struct kobj_type ktype_part;
422
423static int block_hotplug_filter(struct kset *kset, struct kobject *kobj)
424{
425 struct kobj_type *ktype = get_ktype(kobj);
426
427 return ((ktype == &ktype_block) || (ktype == &ktype_part));
428}
429
430static int block_hotplug(struct kset *kset, struct kobject *kobj, char **envp,
431 int num_envp, char *buffer, int buffer_size)
432{
433 struct kobj_type *ktype = get_ktype(kobj);
434 struct device *physdev;
435 struct gendisk *disk;
436 struct hd_struct *part;
437 int length = 0;
438 int i = 0;
439
440 if (ktype == &ktype_block) {
441 disk = container_of(kobj, struct gendisk, kobj);
442 add_hotplug_env_var(envp, num_envp, &i, buffer, buffer_size,
443 &length, "MINOR=%u", disk->first_minor);
444 } else if (ktype == &ktype_part) {
445 disk = container_of(kobj->parent, struct gendisk, kobj);
446 part = container_of(kobj, struct hd_struct, kobj);
447 add_hotplug_env_var(envp, num_envp, &i, buffer, buffer_size,
448 &length, "MINOR=%u",
449 disk->first_minor + part->partno);
450 } else
451 return 0;
452
453 add_hotplug_env_var(envp, num_envp, &i, buffer, buffer_size, &length,
454 "MAJOR=%u", disk->major);
455
456 /* add physical device, backing this device */
457 physdev = disk->driverfs_dev;
458 if (physdev) {
459 char *path = kobject_get_path(&physdev->kobj, GFP_KERNEL);
460
461 add_hotplug_env_var(envp, num_envp, &i, buffer, buffer_size,
462 &length, "PHYSDEVPATH=%s", path);
463 kfree(path);
464
465 if (physdev->bus)
466 add_hotplug_env_var(envp, num_envp, &i,
467 buffer, buffer_size, &length,
468 "PHYSDEVBUS=%s",
469 physdev->bus->name);
470
471 if (physdev->driver)
472 add_hotplug_env_var(envp, num_envp, &i,
473 buffer, buffer_size, &length,
474 "PHYSDEVDRIVER=%s",
475 physdev->driver->name);
476 }
477
478 /* terminate, set to next free slot, shrink available space */
479 envp[i] = NULL;
480 envp = &envp[i];
481 num_envp -= i;
482 buffer = &buffer[length];
483 buffer_size -= length;
484
485 return 0;
486}
487
488static struct kset_hotplug_ops block_hotplug_ops = {
489 .filter = block_hotplug_filter,
490 .hotplug = block_hotplug,
491};
492
493/* declare block_subsys. */
494static decl_subsys(block, &ktype_block, &block_hotplug_ops);
495
496
497/*
498 * aggregate disk stat collector. Uses the same stats that the sysfs
499 * entries do, above, but makes them available through one seq_file.
500 * Watching a few disks may be efficient through sysfs, but watching
501 * all of them will be more efficient through this interface.
502 *
503 * The output looks suspiciously like /proc/partitions with a bunch of
504 * extra fields.
505 */
506
507/* iterator */
508static void *diskstats_start(struct seq_file *part, loff_t *pos)
509{
510 loff_t k = *pos;
511 struct list_head *p;
512
513 down(&block_subsys_sem);
514 list_for_each(p, &block_subsys.kset.list)
515 if (!k--)
516 return list_entry(p, struct gendisk, kobj.entry);
517 return NULL;
518}
519
520static void *diskstats_next(struct seq_file *part, void *v, loff_t *pos)
521{
522 struct list_head *p = ((struct gendisk *)v)->kobj.entry.next;
523 ++*pos;
524 return p==&block_subsys.kset.list ? NULL :
525 list_entry(p, struct gendisk, kobj.entry);
526}
527
528static void diskstats_stop(struct seq_file *part, void *v)
529{
530 up(&block_subsys_sem);
531}
532
533static int diskstats_show(struct seq_file *s, void *v)
534{
535 struct gendisk *gp = v;
536 char buf[BDEVNAME_SIZE];
537 int n = 0;
538
539 /*
540 if (&sgp->kobj.entry == block_subsys.kset.list.next)
541 seq_puts(s, "major minor name"
542 " rio rmerge rsect ruse wio wmerge "
543 "wsect wuse running use aveq"
544 "\n\n");
545 */
546
547 preempt_disable();
548 disk_round_stats(gp);
549 preempt_enable();
550 seq_printf(s, "%4d %4d %s %u %u %llu %u %u %u %llu %u %u %u %u\n",
551 gp->major, n + gp->first_minor, disk_name(gp, n, buf),
552 disk_stat_read(gp, reads), disk_stat_read(gp, read_merges),
553 (unsigned long long)disk_stat_read(gp, read_sectors),
554 jiffies_to_msecs(disk_stat_read(gp, read_ticks)),
555 disk_stat_read(gp, writes), disk_stat_read(gp, write_merges),
556 (unsigned long long)disk_stat_read(gp, write_sectors),
557 jiffies_to_msecs(disk_stat_read(gp, write_ticks)),
558 gp->in_flight,
559 jiffies_to_msecs(disk_stat_read(gp, io_ticks)),
560 jiffies_to_msecs(disk_stat_read(gp, time_in_queue)));
561
562 /* now show all non-0 size partitions of it */
563 for (n = 0; n < gp->minors - 1; n++) {
564 struct hd_struct *hd = gp->part[n];
565
566 if (hd && hd->nr_sects)
567 seq_printf(s, "%4d %4d %s %u %u %u %u\n",
568 gp->major, n + gp->first_minor + 1,
569 disk_name(gp, n + 1, buf),
570 hd->reads, hd->read_sectors,
571 hd->writes, hd->write_sectors);
572 }
573
574 return 0;
575}
576
577struct seq_operations diskstats_op = {
578 .start = diskstats_start,
579 .next = diskstats_next,
580 .stop = diskstats_stop,
581 .show = diskstats_show
582};
583
584
585struct gendisk *alloc_disk(int minors)
586{
587 struct gendisk *disk = kmalloc(sizeof(struct gendisk), GFP_KERNEL);
588 if (disk) {
589 memset(disk, 0, sizeof(struct gendisk));
590 if (!init_disk_stats(disk)) {
591 kfree(disk);
592 return NULL;
593 }
594 if (minors > 1) {
595 int size = (minors - 1) * sizeof(struct hd_struct *);
596 disk->part = kmalloc(size, GFP_KERNEL);
597 if (!disk->part) {
598 kfree(disk);
599 return NULL;
600 }
601 memset(disk->part, 0, size);
602 }
603 disk->minors = minors;
604 kobj_set_kset_s(disk,block_subsys);
605 kobject_init(&disk->kobj);
606 rand_initialize_disk(disk);
607 }
608 return disk;
609}
610
611EXPORT_SYMBOL(alloc_disk);
612
613struct kobject *get_disk(struct gendisk *disk)
614{
615 struct module *owner;
616 struct kobject *kobj;
617
618 if (!disk->fops)
619 return NULL;
620 owner = disk->fops->owner;
621 if (owner && !try_module_get(owner))
622 return NULL;
623 kobj = kobject_get(&disk->kobj);
624 if (kobj == NULL) {
625 module_put(owner);
626 return NULL;
627 }
628 return kobj;
629
630}
631
632EXPORT_SYMBOL(get_disk);
633
634void put_disk(struct gendisk *disk)
635{
636 if (disk)
637 kobject_put(&disk->kobj);
638}
639
640EXPORT_SYMBOL(put_disk);
641
642void set_device_ro(struct block_device *bdev, int flag)
643{
644 if (bdev->bd_contains != bdev)
645 bdev->bd_part->policy = flag;
646 else
647 bdev->bd_disk->policy = flag;
648}
649
650EXPORT_SYMBOL(set_device_ro);
651
652void set_disk_ro(struct gendisk *disk, int flag)
653{
654 int i;
655 disk->policy = flag;
656 for (i = 0; i < disk->minors - 1; i++)
657 if (disk->part[i]) disk->part[i]->policy = flag;
658}
659
660EXPORT_SYMBOL(set_disk_ro);
661
662int bdev_read_only(struct block_device *bdev)
663{
664 if (!bdev)
665 return 0;
666 else if (bdev->bd_contains != bdev)
667 return bdev->bd_part->policy;
668 else
669 return bdev->bd_disk->policy;
670}
671
672EXPORT_SYMBOL(bdev_read_only);
673
674int invalidate_partition(struct gendisk *disk, int index)
675{
676 int res = 0;
677 struct block_device *bdev = bdget_disk(disk, index);
678 if (bdev) {
679 res = __invalidate_device(bdev, 1);
680 bdput(bdev);
681 }
682 return res;
683}
684
685EXPORT_SYMBOL(invalidate_partition);
diff --git a/drivers/block/ida_cmd.h b/drivers/block/ida_cmd.h
new file mode 100644
index 000000000000..98b5746b3089
--- /dev/null
+++ b/drivers/block/ida_cmd.h
@@ -0,0 +1,349 @@
1/*
2 * Disk Array driver for Compaq SMART2 Controllers
3 * Copyright 1998 Compaq Computer Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 */
22#ifndef ARRAYCMD_H
23#define ARRAYCMD_H
24
25#include <asm/types.h>
26#if 0
27#include <linux/blkdev.h>
28#endif
29
30/* for the Smart Array 42XX cards */
31#define S42XX_REQUEST_PORT_OFFSET 0x40
32#define S42XX_REPLY_INTR_MASK_OFFSET 0x34
33#define S42XX_REPLY_PORT_OFFSET 0x44
34#define S42XX_INTR_STATUS 0x30
35
36#define S42XX_INTR_OFF 0x08
37#define S42XX_INTR_PENDING 0x08
38
39#define COMMAND_FIFO 0x04
40#define COMMAND_COMPLETE_FIFO 0x08
41#define INTR_MASK 0x0C
42#define INTR_STATUS 0x10
43#define INTR_PENDING 0x14
44
45#define FIFO_NOT_EMPTY 0x01
46#define FIFO_NOT_FULL 0x02
47
48#define BIG_PROBLEM 0x40
49#define LOG_NOT_CONF 2
50
51#pragma pack(1)
52typedef struct {
53 __u32 size;
54 __u32 addr;
55} sg_t;
56
57#define RCODE_NONFATAL 0x02
58#define RCODE_FATAL 0x04
59#define RCODE_INVREQ 0x10
60typedef struct {
61 __u16 next;
62 __u8 cmd;
63 __u8 rcode;
64 __u32 blk;
65 __u16 blk_cnt;
66 __u8 sg_cnt;
67 __u8 reserved;
68} rhdr_t;
69
70#define SG_MAX 32
71typedef struct {
72 rhdr_t hdr;
73 sg_t sg[SG_MAX];
74 __u32 bp;
75} rblk_t;
76
77typedef struct {
78 __u8 unit;
79 __u8 prio;
80 __u16 size;
81} chdr_t;
82
83#define CMD_RWREQ 0x00
84#define CMD_IOCTL_PEND 0x01
85#define CMD_IOCTL_DONE 0x02
86
87typedef struct cmdlist {
88 chdr_t hdr;
89 rblk_t req;
90 __u32 size;
91 int retry_cnt;
92 __u32 busaddr;
93 int ctlr;
94 struct cmdlist *prev;
95 struct cmdlist *next;
96 struct request *rq;
97 int type;
98} cmdlist_t;
99
100#define ID_CTLR 0x11
101typedef struct {
102 __u8 nr_drvs;
103 __u32 cfg_sig;
104 __u8 firm_rev[4];
105 __u8 rom_rev[4];
106 __u8 hw_rev;
107 __u32 bb_rev;
108 __u32 drv_present_map;
109 __u32 ext_drv_map;
110 __u32 board_id;
111 __u8 cfg_error;
112 __u32 non_disk_bits;
113 __u8 bad_ram_addr;
114 __u8 cpu_rev;
115 __u8 pdpi_rev;
116 __u8 epic_rev;
117 __u8 wcxc_rev;
118 __u8 marketing_rev;
119 __u8 ctlr_flags;
120 __u8 host_flags;
121 __u8 expand_dis;
122 __u8 scsi_chips;
123 __u32 max_req_blocks;
124 __u32 ctlr_clock;
125 __u8 drvs_per_bus;
126 __u16 big_drv_present_map[8];
127 __u16 big_ext_drv_map[8];
128 __u16 big_non_disk_map[8];
129 __u16 task_flags;
130 __u8 icl_bus;
131 __u8 red_modes;
132 __u8 cur_red_mode;
133 __u8 red_ctlr_stat;
134 __u8 red_fail_reason;
135 __u8 reserved[403];
136} id_ctlr_t;
137
138typedef struct {
139 __u16 cyl;
140 __u8 heads;
141 __u8 xsig;
142 __u8 psectors;
143 __u16 wpre;
144 __u8 maxecc;
145 __u8 drv_ctrl;
146 __u16 pcyls;
147 __u8 pheads;
148 __u16 landz;
149 __u8 sect_per_track;
150 __u8 cksum;
151} drv_param_t;
152
153#define ID_LOG_DRV 0x10
154typedef struct {
155 __u16 blk_size;
156 __u32 nr_blks;
157 drv_param_t drv;
158 __u8 fault_tol;
159 __u8 reserved;
160 __u8 bios_disable;
161} id_log_drv_t;
162
163#define ID_LOG_DRV_EXT 0x18
164typedef struct {
165 __u32 log_drv_id;
166 __u8 log_drv_label[64];
167 __u8 reserved[418];
168} id_log_drv_ext_t;
169
170#define SENSE_LOG_DRV_STAT 0x12
171typedef struct {
172 __u8 status;
173 __u32 fail_map;
174 __u16 read_err[32];
175 __u16 write_err[32];
176 __u8 drv_err_data[256];
177 __u8 drq_timeout[32];
178 __u32 blks_to_recover;
179 __u8 drv_recovering;
180 __u16 remap_cnt[32];
181 __u32 replace_drv_map;
182 __u32 act_spare_map;
183 __u8 spare_stat;
184 __u8 spare_repl_map[32];
185 __u32 repl_ok_map;
186 __u8 media_exch;
187 __u8 cache_fail;
188 __u8 expn_fail;
189 __u8 unit_flags;
190 __u16 big_fail_map[8];
191 __u16 big_remap_map[128];
192 __u16 big_repl_map[8];
193 __u16 big_act_spare_map[8];
194 __u8 big_spar_repl_map[128];
195 __u16 big_repl_ok_map[8];
196 __u8 big_drv_rebuild;
197 __u8 reserved[36];
198} sense_log_drv_stat_t;
199
200#define START_RECOVER 0x13
201
202#define ID_PHYS_DRV 0x15
203typedef struct {
204 __u8 scsi_bus;
205 __u8 scsi_id;
206 __u16 blk_size;
207 __u32 nr_blks;
208 __u32 rsvd_blks;
209 __u8 drv_model[40];
210 __u8 drv_sn[40];
211 __u8 drv_fw[8];
212 __u8 scsi_iq_bits;
213 __u8 compaq_drv_stmp;
214 __u8 last_fail;
215 __u8 phys_drv_flags;
216 __u8 phys_drv_flags1;
217 __u8 scsi_lun;
218 __u8 phys_drv_flags2;
219 __u8 reserved;
220 __u32 spi_speed_rules;
221 __u8 phys_connector[2];
222 __u8 phys_box_on_bus;
223 __u8 phys_bay_in_box;
224} id_phys_drv_t;
225
226#define BLINK_DRV_LEDS 0x16
227typedef struct {
228 __u32 blink_duration;
229 __u32 reserved;
230 __u8 blink[256];
231 __u8 reserved1[248];
232} blink_drv_leds_t;
233
234#define SENSE_BLINK_LEDS 0x17
235typedef struct {
236 __u32 blink_duration;
237 __u32 btime_elap;
238 __u8 blink[256];
239 __u8 reserved1[248];
240} sense_blink_leds_t;
241
242#define IDA_READ 0x20
243#define IDA_WRITE 0x30
244#define IDA_WRITE_MEDIA 0x31
245#define RESET_TO_DIAG 0x40
246#define DIAG_PASS_THRU 0x41
247
248#define SENSE_CONFIG 0x50
249#define SET_CONFIG 0x51
250typedef struct {
251 __u32 cfg_sig;
252 __u16 compat_port;
253 __u8 data_dist_mode;
254 __u8 surf_an_ctrl;
255 __u16 ctlr_phys_drv;
256 __u16 log_unit_phys_drv;
257 __u16 fault_tol_mode;
258 __u8 phys_drv_param[16];
259 drv_param_t drv;
260 __u32 drv_asgn_map;
261 __u16 dist_factor;
262 __u32 spare_asgn_map;
263 __u8 reserved[6];
264 __u16 os;
265 __u8 ctlr_order;
266 __u8 extra_info;
267 __u32 data_offs;
268 __u8 parity_backedout_write_drvs;
269 __u8 parity_dist_mode;
270 __u8 parity_shift_fact;
271 __u8 bios_disable_flag;
272 __u32 blks_on_vol;
273 __u32 blks_per_drv;
274 __u8 scratch[16];
275 __u16 big_drv_map[8];
276 __u16 big_spare_map[8];
277 __u8 ss_source_vol;
278 __u8 mix_drv_cap_range;
279 struct {
280 __u16 big_drv_map[8];
281 __u32 blks_per_drv;
282 __u16 fault_tol_mode;
283 __u16 dist_factor;
284 } MDC_range[4];
285 __u8 reserved1[248];
286} config_t;
287
288#define BYPASS_VOL_STATE 0x52
289#define SS_CREATE_VOL 0x53
290#define CHANGE_CONFIG 0x54
291#define SENSE_ORIG_CONF 0x55
292#define REORDER_LOG_DRV 0x56
293typedef struct {
294 __u8 old_units[32];
295} reorder_log_drv_t;
296
297#define LABEL_LOG_DRV 0x57
298typedef struct {
299 __u8 log_drv_label[64];
300} label_log_drv_t;
301
302#define SS_TO_VOL 0x58
303
304#define SET_SURF_DELAY 0x60
305typedef struct {
306 __u16 delay;
307 __u8 reserved[510];
308} surf_delay_t;
309
310#define SET_OVERHEAT_DELAY 0x61
311typedef struct {
312 __u16 delay;
313} overhead_delay_t;
314
315#define SET_MP_DELAY
316typedef struct {
317 __u16 delay;
318 __u8 reserved[510];
319} mp_delay_t;
320
321#define PASSTHRU_A 0x91
322typedef struct {
323 __u8 target;
324 __u8 bus;
325 __u8 lun;
326 __u32 timeout;
327 __u32 flags;
328 __u8 status;
329 __u8 error;
330 __u8 cdb_len;
331 __u8 sense_error;
332 __u8 sense_key;
333 __u32 sense_info;
334 __u8 sense_code;
335 __u8 sense_qual;
336 __u32 residual;
337 __u8 reserved[4];
338 __u8 cdb[12];
339} scsi_param_t;
340
341#define RESUME_BACKGROUND_ACTIVITY 0x99
342#define SENSE_CONTROLLER_PERFORMANCE 0xa8
343#define FLUSH_CACHE 0xc2
344#define COLLECT_BUFFER 0xd2
345#define READ_FLASH_ROM 0xf6
346#define WRITE_FLASH_ROM 0xf7
347#pragma pack()
348
349#endif /* ARRAYCMD_H */
diff --git a/drivers/block/ida_ioctl.h b/drivers/block/ida_ioctl.h
new file mode 100644
index 000000000000..888fff9caed0
--- /dev/null
+++ b/drivers/block/ida_ioctl.h
@@ -0,0 +1,87 @@
1/*
2 * Disk Array driver for Compaq SMART2 Controllers
3 * Copyright 1998 Compaq Computer Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 */
22#ifndef IDA_IOCTL_H
23#define IDA_IOCTL_H
24
25#include "ida_cmd.h"
26#include "cpqarray.h"
27
28#define IDAGETDRVINFO 0x27272828
29#define IDAPASSTHRU 0x28282929
30#define IDAGETCTLRSIG 0x29293030
31#define IDAREVALIDATEVOLS 0x30303131
32#define IDADRIVERVERSION 0x31313232
33#define IDAGETPCIINFO 0x32323333
34
35typedef struct _ida_pci_info_struct
36{
37 unsigned char bus;
38 unsigned char dev_fn;
39 __u32 board_id;
40} ida_pci_info_struct;
41/*
42 * Normally, the ioctl determines the logical unit for this command by
43 * the major,minor number of the fd passed to ioctl. If you need to send
44 * a command to a different/nonexistant unit (such as during config), you
45 * can override the normal behavior by setting the unit valid bit. (Normally,
46 * it should be zero) The controller the command is sent to is still
47 * determined by the major number of the open device.
48 */
49
50#define UNITVALID 0x80
51typedef struct {
52 __u8 cmd;
53 __u8 rcode;
54 __u8 unit;
55 __u32 blk;
56 __u16 blk_cnt;
57
58/* currently, sg_cnt is assumed to be 1: only the 0th element of sg is used */
59 struct {
60 void __user *addr;
61 size_t size;
62 } sg[SG_MAX];
63 int sg_cnt;
64
65 union ctlr_cmds {
66 drv_info_t drv;
67 unsigned char buf[1024];
68
69 id_ctlr_t id_ctlr;
70 drv_param_t drv_param;
71 id_log_drv_t id_log_drv;
72 id_log_drv_ext_t id_log_drv_ext;
73 sense_log_drv_stat_t sense_log_drv_stat;
74 id_phys_drv_t id_phys_drv;
75 blink_drv_leds_t blink_drv_leds;
76 sense_blink_leds_t sense_blink_leds;
77 config_t config;
78 reorder_log_drv_t reorder_log_drv;
79 label_log_drv_t label_log_drv;
80 surf_delay_t surf_delay;
81 overhead_delay_t overhead_delay;
82 mp_delay_t mp_delay;
83 scsi_param_t scsi_param;
84 } c;
85} ida_ioctl_t;
86
87#endif /* IDA_IOCTL_H */
diff --git a/drivers/block/ioctl.c b/drivers/block/ioctl.c
new file mode 100644
index 000000000000..5e03f5157ef9
--- /dev/null
+++ b/drivers/block/ioctl.c
@@ -0,0 +1,239 @@
1#include <linux/sched.h> /* for capable() */
2#include <linux/blkdev.h>
3#include <linux/blkpg.h>
4#include <linux/backing-dev.h>
5#include <linux/buffer_head.h>
6#include <linux/smp_lock.h>
7#include <asm/uaccess.h>
8
9static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg)
10{
11 struct block_device *bdevp;
12 struct gendisk *disk;
13 struct blkpg_ioctl_arg a;
14 struct blkpg_partition p;
15 long long start, length;
16 int part;
17 int i;
18
19 if (!capable(CAP_SYS_ADMIN))
20 return -EACCES;
21 if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg)))
22 return -EFAULT;
23 if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition)))
24 return -EFAULT;
25 disk = bdev->bd_disk;
26 if (bdev != bdev->bd_contains)
27 return -EINVAL;
28 part = p.pno;
29 if (part <= 0 || part >= disk->minors)
30 return -EINVAL;
31 switch (a.op) {
32 case BLKPG_ADD_PARTITION:
33 start = p.start >> 9;
34 length = p.length >> 9;
35 /* check for fit in a hd_struct */
36 if (sizeof(sector_t) == sizeof(long) &&
37 sizeof(long long) > sizeof(long)) {
38 long pstart = start, plength = length;
39 if (pstart != start || plength != length
40 || pstart < 0 || plength < 0)
41 return -EINVAL;
42 }
43 /* partition number in use? */
44 down(&bdev->bd_sem);
45 if (disk->part[part - 1]) {
46 up(&bdev->bd_sem);
47 return -EBUSY;
48 }
49 /* overlap? */
50 for (i = 0; i < disk->minors - 1; i++) {
51 struct hd_struct *s = disk->part[i];
52
53 if (!s)
54 continue;
55 if (!(start+length <= s->start_sect ||
56 start >= s->start_sect + s->nr_sects)) {
57 up(&bdev->bd_sem);
58 return -EBUSY;
59 }
60 }
61 /* all seems OK */
62 add_partition(disk, part, start, length);
63 up(&bdev->bd_sem);
64 return 0;
65 case BLKPG_DEL_PARTITION:
66 if (!disk->part[part-1])
67 return -ENXIO;
68 if (disk->part[part - 1]->nr_sects == 0)
69 return -ENXIO;
70 bdevp = bdget_disk(disk, part);
71 if (!bdevp)
72 return -ENOMEM;
73 down(&bdevp->bd_sem);
74 if (bdevp->bd_openers) {
75 up(&bdevp->bd_sem);
76 bdput(bdevp);
77 return -EBUSY;
78 }
79 /* all seems OK */
80 fsync_bdev(bdevp);
81 invalidate_bdev(bdevp, 0);
82
83 down(&bdev->bd_sem);
84 delete_partition(disk, part);
85 up(&bdev->bd_sem);
86 up(&bdevp->bd_sem);
87 bdput(bdevp);
88
89 return 0;
90 default:
91 return -EINVAL;
92 }
93}
94
95static int blkdev_reread_part(struct block_device *bdev)
96{
97 struct gendisk *disk = bdev->bd_disk;
98 int res;
99
100 if (disk->minors == 1 || bdev != bdev->bd_contains)
101 return -EINVAL;
102 if (!capable(CAP_SYS_ADMIN))
103 return -EACCES;
104 if (down_trylock(&bdev->bd_sem))
105 return -EBUSY;
106 res = rescan_partitions(disk, bdev);
107 up(&bdev->bd_sem);
108 return res;
109}
110
111static int put_ushort(unsigned long arg, unsigned short val)
112{
113 return put_user(val, (unsigned short __user *)arg);
114}
115
116static int put_int(unsigned long arg, int val)
117{
118 return put_user(val, (int __user *)arg);
119}
120
121static int put_long(unsigned long arg, long val)
122{
123 return put_user(val, (long __user *)arg);
124}
125
126static int put_ulong(unsigned long arg, unsigned long val)
127{
128 return put_user(val, (unsigned long __user *)arg);
129}
130
131static int put_u64(unsigned long arg, u64 val)
132{
133 return put_user(val, (u64 __user *)arg);
134}
135
136int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
137 unsigned long arg)
138{
139 struct block_device *bdev = inode->i_bdev;
140 struct gendisk *disk = bdev->bd_disk;
141 struct backing_dev_info *bdi;
142 int ret, n;
143
144 switch (cmd) {
145 case BLKRAGET:
146 case BLKFRAGET:
147 if (!arg)
148 return -EINVAL;
149 bdi = blk_get_backing_dev_info(bdev);
150 if (bdi == NULL)
151 return -ENOTTY;
152 return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
153 case BLKROGET:
154 return put_int(arg, bdev_read_only(bdev) != 0);
155 case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */
156 return put_int(arg, block_size(bdev));
157 case BLKSSZGET: /* get block device hardware sector size */
158 return put_int(arg, bdev_hardsect_size(bdev));
159 case BLKSECTGET:
160 return put_ushort(arg, bdev_get_queue(bdev)->max_sectors);
161 case BLKRASET:
162 case BLKFRASET:
163 if(!capable(CAP_SYS_ADMIN))
164 return -EACCES;
165 bdi = blk_get_backing_dev_info(bdev);
166 if (bdi == NULL)
167 return -ENOTTY;
168 bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
169 return 0;
170 case BLKBSZSET:
171 /* set the logical block size */
172 if (!capable(CAP_SYS_ADMIN))
173 return -EACCES;
174 if (!arg)
175 return -EINVAL;
176 if (get_user(n, (int __user *) arg))
177 return -EFAULT;
178 if (bd_claim(bdev, file) < 0)
179 return -EBUSY;
180 ret = set_blocksize(bdev, n);
181 bd_release(bdev);
182 return ret;
183 case BLKPG:
184 return blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg);
185 case BLKRRPART:
186 return blkdev_reread_part(bdev);
187 case BLKGETSIZE:
188 if ((bdev->bd_inode->i_size >> 9) > ~0UL)
189 return -EFBIG;
190 return put_ulong(arg, bdev->bd_inode->i_size >> 9);
191 case BLKGETSIZE64:
192 return put_u64(arg, bdev->bd_inode->i_size);
193 case BLKFLSBUF:
194 if (!capable(CAP_SYS_ADMIN))
195 return -EACCES;
196 if (disk->fops->ioctl) {
197 ret = disk->fops->ioctl(inode, file, cmd, arg);
198 /* -EINVAL to handle old uncorrected drivers */
199 if (ret != -EINVAL && ret != -ENOTTY)
200 return ret;
201 }
202 fsync_bdev(bdev);
203 invalidate_bdev(bdev, 0);
204 return 0;
205 case BLKROSET:
206 if (disk->fops->ioctl) {
207 ret = disk->fops->ioctl(inode, file, cmd, arg);
208 /* -EINVAL to handle old uncorrected drivers */
209 if (ret != -EINVAL && ret != -ENOTTY)
210 return ret;
211 }
212 if (!capable(CAP_SYS_ADMIN))
213 return -EACCES;
214 if (get_user(n, (int __user *)(arg)))
215 return -EFAULT;
216 set_device_ro(bdev, n);
217 return 0;
218 default:
219 if (disk->fops->ioctl)
220 return disk->fops->ioctl(inode, file, cmd, arg);
221 }
222 return -ENOTTY;
223}
224
225/* Most of the generic ioctls are handled in the normal fallback path.
226 This assumes the blkdev's low level compat_ioctl always returns
227 ENOIOCTLCMD for unknown ioctls. */
228long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
229{
230 struct block_device *bdev = file->f_dentry->d_inode->i_bdev;
231 struct gendisk *disk = bdev->bd_disk;
232 int ret = -ENOIOCTLCMD;
233 if (disk->fops->compat_ioctl) {
234 lock_kernel();
235 ret = disk->fops->compat_ioctl(file, cmd, arg);
236 unlock_kernel();
237 }
238 return ret;
239}
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
new file mode 100644
index 000000000000..02242e8ba996
--- /dev/null
+++ b/drivers/block/ll_rw_blk.c
@@ -0,0 +1,3642 @@
1/*
2 * linux/drivers/block/ll_rw_blk.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
6 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
7 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
8 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> - July2000
9 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
10 */
11
12/*
13 * This handles all read/write requests to block devices
14 */
15#include <linux/config.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/backing-dev.h>
19#include <linux/bio.h>
20#include <linux/blkdev.h>
21#include <linux/highmem.h>
22#include <linux/mm.h>
23#include <linux/kernel_stat.h>
24#include <linux/string.h>
25#include <linux/init.h>
26#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
27#include <linux/completion.h>
28#include <linux/slab.h>
29#include <linux/swap.h>
30#include <linux/writeback.h>
31
32/*
33 * for max sense size
34 */
35#include <scsi/scsi_cmnd.h>
36
37static void blk_unplug_work(void *data);
38static void blk_unplug_timeout(unsigned long data);
39
40/*
41 * For the allocated request tables
42 */
43static kmem_cache_t *request_cachep;
44
45/*
46 * For queue allocation
47 */
48static kmem_cache_t *requestq_cachep;
49
50/*
51 * For io context allocations
52 */
53static kmem_cache_t *iocontext_cachep;
54
55static wait_queue_head_t congestion_wqh[2] = {
56 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
57 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
58 };
59
60/*
61 * Controlling structure to kblockd
62 */
63static struct workqueue_struct *kblockd_workqueue;
64
65unsigned long blk_max_low_pfn, blk_max_pfn;
66
67EXPORT_SYMBOL(blk_max_low_pfn);
68EXPORT_SYMBOL(blk_max_pfn);
69
70/* Amount of time in which a process may batch requests */
71#define BLK_BATCH_TIME (HZ/50UL)
72
73/* Number of requests a "batching" process may submit */
74#define BLK_BATCH_REQ 32
75
76/*
77 * Return the threshold (number of used requests) at which the queue is
78 * considered to be congested. It include a little hysteresis to keep the
79 * context switch rate down.
80 */
81static inline int queue_congestion_on_threshold(struct request_queue *q)
82{
83 return q->nr_congestion_on;
84}
85
86/*
87 * The threshold at which a queue is considered to be uncongested
88 */
89static inline int queue_congestion_off_threshold(struct request_queue *q)
90{
91 return q->nr_congestion_off;
92}
93
94static void blk_queue_congestion_threshold(struct request_queue *q)
95{
96 int nr;
97
98 nr = q->nr_requests - (q->nr_requests / 8) + 1;
99 if (nr > q->nr_requests)
100 nr = q->nr_requests;
101 q->nr_congestion_on = nr;
102
103 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
104 if (nr < 1)
105 nr = 1;
106 q->nr_congestion_off = nr;
107}
108
109/*
110 * A queue has just exitted congestion. Note this in the global counter of
111 * congested queues, and wake up anyone who was waiting for requests to be
112 * put back.
113 */
114static void clear_queue_congested(request_queue_t *q, int rw)
115{
116 enum bdi_state bit;
117 wait_queue_head_t *wqh = &congestion_wqh[rw];
118
119 bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
120 clear_bit(bit, &q->backing_dev_info.state);
121 smp_mb__after_clear_bit();
122 if (waitqueue_active(wqh))
123 wake_up(wqh);
124}
125
126/*
127 * A queue has just entered congestion. Flag that in the queue's VM-visible
128 * state flags and increment the global gounter of congested queues.
129 */
130static void set_queue_congested(request_queue_t *q, int rw)
131{
132 enum bdi_state bit;
133
134 bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
135 set_bit(bit, &q->backing_dev_info.state);
136}
137
138/**
139 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
140 * @bdev: device
141 *
142 * Locates the passed device's request queue and returns the address of its
143 * backing_dev_info
144 *
145 * Will return NULL if the request queue cannot be located.
146 */
147struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
148{
149 struct backing_dev_info *ret = NULL;
150 request_queue_t *q = bdev_get_queue(bdev);
151
152 if (q)
153 ret = &q->backing_dev_info;
154 return ret;
155}
156
157EXPORT_SYMBOL(blk_get_backing_dev_info);
158
159void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data)
160{
161 q->activity_fn = fn;
162 q->activity_data = data;
163}
164
165EXPORT_SYMBOL(blk_queue_activity_fn);
166
167/**
168 * blk_queue_prep_rq - set a prepare_request function for queue
169 * @q: queue
170 * @pfn: prepare_request function
171 *
172 * It's possible for a queue to register a prepare_request callback which
173 * is invoked before the request is handed to the request_fn. The goal of
174 * the function is to prepare a request for I/O, it can be used to build a
175 * cdb from the request data for instance.
176 *
177 */
178void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn)
179{
180 q->prep_rq_fn = pfn;
181}
182
183EXPORT_SYMBOL(blk_queue_prep_rq);
184
185/**
186 * blk_queue_merge_bvec - set a merge_bvec function for queue
187 * @q: queue
188 * @mbfn: merge_bvec_fn
189 *
190 * Usually queues have static limitations on the max sectors or segments that
191 * we can put in a request. Stacking drivers may have some settings that
192 * are dynamic, and thus we have to query the queue whether it is ok to
193 * add a new bio_vec to a bio at a given offset or not. If the block device
194 * has such limitations, it needs to register a merge_bvec_fn to control
195 * the size of bio's sent to it. Note that a block device *must* allow a
196 * single page to be added to an empty bio. The block device driver may want
197 * to use the bio_split() function to deal with these bio's. By default
198 * no merge_bvec_fn is defined for a queue, and only the fixed limits are
199 * honored.
200 */
201void blk_queue_merge_bvec(request_queue_t *q, merge_bvec_fn *mbfn)
202{
203 q->merge_bvec_fn = mbfn;
204}
205
206EXPORT_SYMBOL(blk_queue_merge_bvec);
207
208/**
209 * blk_queue_make_request - define an alternate make_request function for a device
210 * @q: the request queue for the device to be affected
211 * @mfn: the alternate make_request function
212 *
213 * Description:
214 * The normal way for &struct bios to be passed to a device
215 * driver is for them to be collected into requests on a request
216 * queue, and then to allow the device driver to select requests
217 * off that queue when it is ready. This works well for many block
218 * devices. However some block devices (typically virtual devices
219 * such as md or lvm) do not benefit from the processing on the
220 * request queue, and are served best by having the requests passed
221 * directly to them. This can be achieved by providing a function
222 * to blk_queue_make_request().
223 *
224 * Caveat:
225 * The driver that does this *must* be able to deal appropriately
226 * with buffers in "highmemory". This can be accomplished by either calling
227 * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
228 * blk_queue_bounce() to create a buffer in normal memory.
229 **/
230void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
231{
232 /*
233 * set defaults
234 */
235 q->nr_requests = BLKDEV_MAX_RQ;
236 q->max_phys_segments = MAX_PHYS_SEGMENTS;
237 q->max_hw_segments = MAX_HW_SEGMENTS;
238 q->make_request_fn = mfn;
239 q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
240 q->backing_dev_info.state = 0;
241 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
242 blk_queue_max_sectors(q, MAX_SECTORS);
243 blk_queue_hardsect_size(q, 512);
244 blk_queue_dma_alignment(q, 511);
245 blk_queue_congestion_threshold(q);
246 q->nr_batching = BLK_BATCH_REQ;
247
248 q->unplug_thresh = 4; /* hmm */
249 q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */
250 if (q->unplug_delay == 0)
251 q->unplug_delay = 1;
252
253 INIT_WORK(&q->unplug_work, blk_unplug_work, q);
254
255 q->unplug_timer.function = blk_unplug_timeout;
256 q->unplug_timer.data = (unsigned long)q;
257
258 /*
259 * by default assume old behaviour and bounce for any highmem page
260 */
261 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
262
263 blk_queue_activity_fn(q, NULL, NULL);
264
265 INIT_LIST_HEAD(&q->drain_list);
266}
267
268EXPORT_SYMBOL(blk_queue_make_request);
269
270static inline void rq_init(request_queue_t *q, struct request *rq)
271{
272 INIT_LIST_HEAD(&rq->queuelist);
273
274 rq->errors = 0;
275 rq->rq_status = RQ_ACTIVE;
276 rq->bio = rq->biotail = NULL;
277 rq->buffer = NULL;
278 rq->ref_count = 1;
279 rq->q = q;
280 rq->waiting = NULL;
281 rq->special = NULL;
282 rq->data_len = 0;
283 rq->data = NULL;
284 rq->sense = NULL;
285 rq->end_io = NULL;
286 rq->end_io_data = NULL;
287}
288
289/**
290 * blk_queue_ordered - does this queue support ordered writes
291 * @q: the request queue
292 * @flag: see below
293 *
294 * Description:
295 * For journalled file systems, doing ordered writes on a commit
296 * block instead of explicitly doing wait_on_buffer (which is bad
297 * for performance) can be a big win. Block drivers supporting this
298 * feature should call this function and indicate so.
299 *
300 **/
301void blk_queue_ordered(request_queue_t *q, int flag)
302{
303 switch (flag) {
304 case QUEUE_ORDERED_NONE:
305 if (q->flush_rq)
306 kmem_cache_free(request_cachep, q->flush_rq);
307 q->flush_rq = NULL;
308 q->ordered = flag;
309 break;
310 case QUEUE_ORDERED_TAG:
311 q->ordered = flag;
312 break;
313 case QUEUE_ORDERED_FLUSH:
314 q->ordered = flag;
315 if (!q->flush_rq)
316 q->flush_rq = kmem_cache_alloc(request_cachep,
317 GFP_KERNEL);
318 break;
319 default:
320 printk("blk_queue_ordered: bad value %d\n", flag);
321 break;
322 }
323}
324
325EXPORT_SYMBOL(blk_queue_ordered);
326
327/**
328 * blk_queue_issue_flush_fn - set function for issuing a flush
329 * @q: the request queue
330 * @iff: the function to be called issuing the flush
331 *
332 * Description:
333 * If a driver supports issuing a flush command, the support is notified
334 * to the block layer by defining it through this call.
335 *
336 **/
337void blk_queue_issue_flush_fn(request_queue_t *q, issue_flush_fn *iff)
338{
339 q->issue_flush_fn = iff;
340}
341
342EXPORT_SYMBOL(blk_queue_issue_flush_fn);
343
344/*
345 * Cache flushing for ordered writes handling
346 */
347static void blk_pre_flush_end_io(struct request *flush_rq)
348{
349 struct request *rq = flush_rq->end_io_data;
350 request_queue_t *q = rq->q;
351
352 rq->flags |= REQ_BAR_PREFLUSH;
353
354 if (!flush_rq->errors)
355 elv_requeue_request(q, rq);
356 else {
357 q->end_flush_fn(q, flush_rq);
358 clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
359 q->request_fn(q);
360 }
361}
362
363static void blk_post_flush_end_io(struct request *flush_rq)
364{
365 struct request *rq = flush_rq->end_io_data;
366 request_queue_t *q = rq->q;
367
368 rq->flags |= REQ_BAR_POSTFLUSH;
369
370 q->end_flush_fn(q, flush_rq);
371 clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
372 q->request_fn(q);
373}
374
375struct request *blk_start_pre_flush(request_queue_t *q, struct request *rq)
376{
377 struct request *flush_rq = q->flush_rq;
378
379 BUG_ON(!blk_barrier_rq(rq));
380
381 if (test_and_set_bit(QUEUE_FLAG_FLUSH, &q->queue_flags))
382 return NULL;
383
384 rq_init(q, flush_rq);
385 flush_rq->elevator_private = NULL;
386 flush_rq->flags = REQ_BAR_FLUSH;
387 flush_rq->rq_disk = rq->rq_disk;
388 flush_rq->rl = NULL;
389
390 /*
391 * prepare_flush returns 0 if no flush is needed, just mark both
392 * pre and post flush as done in that case
393 */
394 if (!q->prepare_flush_fn(q, flush_rq)) {
395 rq->flags |= REQ_BAR_PREFLUSH | REQ_BAR_POSTFLUSH;
396 clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
397 return rq;
398 }
399
400 /*
401 * some drivers dequeue requests right away, some only after io
402 * completion. make sure the request is dequeued.
403 */
404 if (!list_empty(&rq->queuelist))
405 blkdev_dequeue_request(rq);
406
407 elv_deactivate_request(q, rq);
408
409 flush_rq->end_io_data = rq;
410 flush_rq->end_io = blk_pre_flush_end_io;
411
412 __elv_add_request(q, flush_rq, ELEVATOR_INSERT_FRONT, 0);
413 return flush_rq;
414}
415
416static void blk_start_post_flush(request_queue_t *q, struct request *rq)
417{
418 struct request *flush_rq = q->flush_rq;
419
420 BUG_ON(!blk_barrier_rq(rq));
421
422 rq_init(q, flush_rq);
423 flush_rq->elevator_private = NULL;
424 flush_rq->flags = REQ_BAR_FLUSH;
425 flush_rq->rq_disk = rq->rq_disk;
426 flush_rq->rl = NULL;
427
428 if (q->prepare_flush_fn(q, flush_rq)) {
429 flush_rq->end_io_data = rq;
430 flush_rq->end_io = blk_post_flush_end_io;
431
432 __elv_add_request(q, flush_rq, ELEVATOR_INSERT_FRONT, 0);
433 q->request_fn(q);
434 }
435}
436
437static inline int blk_check_end_barrier(request_queue_t *q, struct request *rq,
438 int sectors)
439{
440 if (sectors > rq->nr_sectors)
441 sectors = rq->nr_sectors;
442
443 rq->nr_sectors -= sectors;
444 return rq->nr_sectors;
445}
446
447static int __blk_complete_barrier_rq(request_queue_t *q, struct request *rq,
448 int sectors, int queue_locked)
449{
450 if (q->ordered != QUEUE_ORDERED_FLUSH)
451 return 0;
452 if (!blk_fs_request(rq) || !blk_barrier_rq(rq))
453 return 0;
454 if (blk_barrier_postflush(rq))
455 return 0;
456
457 if (!blk_check_end_barrier(q, rq, sectors)) {
458 unsigned long flags = 0;
459
460 if (!queue_locked)
461 spin_lock_irqsave(q->queue_lock, flags);
462
463 blk_start_post_flush(q, rq);
464
465 if (!queue_locked)
466 spin_unlock_irqrestore(q->queue_lock, flags);
467 }
468
469 return 1;
470}
471
472/**
473 * blk_complete_barrier_rq - complete possible barrier request
474 * @q: the request queue for the device
475 * @rq: the request
476 * @sectors: number of sectors to complete
477 *
478 * Description:
479 * Used in driver end_io handling to determine whether to postpone
480 * completion of a barrier request until a post flush has been done. This
481 * is the unlocked variant, used if the caller doesn't already hold the
482 * queue lock.
483 **/
484int blk_complete_barrier_rq(request_queue_t *q, struct request *rq, int sectors)
485{
486 return __blk_complete_barrier_rq(q, rq, sectors, 0);
487}
488EXPORT_SYMBOL(blk_complete_barrier_rq);
489
490/**
491 * blk_complete_barrier_rq_locked - complete possible barrier request
492 * @q: the request queue for the device
493 * @rq: the request
494 * @sectors: number of sectors to complete
495 *
496 * Description:
497 * See blk_complete_barrier_rq(). This variant must be used if the caller
498 * holds the queue lock.
499 **/
500int blk_complete_barrier_rq_locked(request_queue_t *q, struct request *rq,
501 int sectors)
502{
503 return __blk_complete_barrier_rq(q, rq, sectors, 1);
504}
505EXPORT_SYMBOL(blk_complete_barrier_rq_locked);
506
507/**
508 * blk_queue_bounce_limit - set bounce buffer limit for queue
509 * @q: the request queue for the device
510 * @dma_addr: bus address limit
511 *
512 * Description:
513 * Different hardware can have different requirements as to what pages
514 * it can do I/O directly to. A low level driver can call
515 * blk_queue_bounce_limit to have lower memory pages allocated as bounce
516 * buffers for doing I/O to pages residing above @page. By default
517 * the block layer sets this to the highest numbered "low" memory page.
518 **/
519void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
520{
521 unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
522
523 /*
524 * set appropriate bounce gfp mask -- unfortunately we don't have a
525 * full 4GB zone, so we have to resort to low memory for any bounces.
526 * ISA has its own < 16MB zone.
527 */
528 if (bounce_pfn < blk_max_low_pfn) {
529 BUG_ON(dma_addr < BLK_BOUNCE_ISA);
530 init_emergency_isa_pool();
531 q->bounce_gfp = GFP_NOIO | GFP_DMA;
532 } else
533 q->bounce_gfp = GFP_NOIO;
534
535 q->bounce_pfn = bounce_pfn;
536}
537
538EXPORT_SYMBOL(blk_queue_bounce_limit);
539
540/**
541 * blk_queue_max_sectors - set max sectors for a request for this queue
542 * @q: the request queue for the device
543 * @max_sectors: max sectors in the usual 512b unit
544 *
545 * Description:
546 * Enables a low level driver to set an upper limit on the size of
547 * received requests.
548 **/
549void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors)
550{
551 if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
552 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
553 printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
554 }
555
556 q->max_sectors = q->max_hw_sectors = max_sectors;
557}
558
559EXPORT_SYMBOL(blk_queue_max_sectors);
560
561/**
562 * blk_queue_max_phys_segments - set max phys segments for a request for this queue
563 * @q: the request queue for the device
564 * @max_segments: max number of segments
565 *
566 * Description:
567 * Enables a low level driver to set an upper limit on the number of
568 * physical data segments in a request. This would be the largest sized
569 * scatter list the driver could handle.
570 **/
571void blk_queue_max_phys_segments(request_queue_t *q, unsigned short max_segments)
572{
573 if (!max_segments) {
574 max_segments = 1;
575 printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
576 }
577
578 q->max_phys_segments = max_segments;
579}
580
581EXPORT_SYMBOL(blk_queue_max_phys_segments);
582
583/**
584 * blk_queue_max_hw_segments - set max hw segments for a request for this queue
585 * @q: the request queue for the device
586 * @max_segments: max number of segments
587 *
588 * Description:
589 * Enables a low level driver to set an upper limit on the number of
590 * hw data segments in a request. This would be the largest number of
591 * address/length pairs the host adapter can actually give as once
592 * to the device.
593 **/
594void blk_queue_max_hw_segments(request_queue_t *q, unsigned short max_segments)
595{
596 if (!max_segments) {
597 max_segments = 1;
598 printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
599 }
600
601 q->max_hw_segments = max_segments;
602}
603
604EXPORT_SYMBOL(blk_queue_max_hw_segments);
605
606/**
607 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
608 * @q: the request queue for the device
609 * @max_size: max size of segment in bytes
610 *
611 * Description:
612 * Enables a low level driver to set an upper limit on the size of a
613 * coalesced segment
614 **/
615void blk_queue_max_segment_size(request_queue_t *q, unsigned int max_size)
616{
617 if (max_size < PAGE_CACHE_SIZE) {
618 max_size = PAGE_CACHE_SIZE;
619 printk("%s: set to minimum %d\n", __FUNCTION__, max_size);
620 }
621
622 q->max_segment_size = max_size;
623}
624
625EXPORT_SYMBOL(blk_queue_max_segment_size);
626
627/**
628 * blk_queue_hardsect_size - set hardware sector size for the queue
629 * @q: the request queue for the device
630 * @size: the hardware sector size, in bytes
631 *
632 * Description:
633 * This should typically be set to the lowest possible sector size
634 * that the hardware can operate on (possible without reverting to
635 * even internal read-modify-write operations). Usually the default
636 * of 512 covers most hardware.
637 **/
638void blk_queue_hardsect_size(request_queue_t *q, unsigned short size)
639{
640 q->hardsect_size = size;
641}
642
643EXPORT_SYMBOL(blk_queue_hardsect_size);
644
645/*
646 * Returns the minimum that is _not_ zero, unless both are zero.
647 */
648#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
649
650/**
651 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
652 * @t: the stacking driver (top)
653 * @b: the underlying device (bottom)
654 **/
655void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)
656{
657 /* zero is "infinity" */
658 t->max_sectors = t->max_hw_sectors =
659 min_not_zero(t->max_sectors,b->max_sectors);
660
661 t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
662 t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
663 t->max_segment_size = min(t->max_segment_size,b->max_segment_size);
664 t->hardsect_size = max(t->hardsect_size,b->hardsect_size);
665}
666
667EXPORT_SYMBOL(blk_queue_stack_limits);
668
669/**
670 * blk_queue_segment_boundary - set boundary rules for segment merging
671 * @q: the request queue for the device
672 * @mask: the memory boundary mask
673 **/
674void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask)
675{
676 if (mask < PAGE_CACHE_SIZE - 1) {
677 mask = PAGE_CACHE_SIZE - 1;
678 printk("%s: set to minimum %lx\n", __FUNCTION__, mask);
679 }
680
681 q->seg_boundary_mask = mask;
682}
683
684EXPORT_SYMBOL(blk_queue_segment_boundary);
685
686/**
687 * blk_queue_dma_alignment - set dma length and memory alignment
688 * @q: the request queue for the device
689 * @mask: alignment mask
690 *
691 * description:
692 * set required memory and length aligment for direct dma transactions.
693 * this is used when buiding direct io requests for the queue.
694 *
695 **/
696void blk_queue_dma_alignment(request_queue_t *q, int mask)
697{
698 q->dma_alignment = mask;
699}
700
701EXPORT_SYMBOL(blk_queue_dma_alignment);
702
703/**
704 * blk_queue_find_tag - find a request by its tag and queue
705 *
706 * @q: The request queue for the device
707 * @tag: The tag of the request
708 *
709 * Notes:
710 * Should be used when a device returns a tag and you want to match
711 * it with a request.
712 *
713 * no locks need be held.
714 **/
715struct request *blk_queue_find_tag(request_queue_t *q, int tag)
716{
717 struct blk_queue_tag *bqt = q->queue_tags;
718
719 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
720 return NULL;
721
722 return bqt->tag_index[tag];
723}
724
725EXPORT_SYMBOL(blk_queue_find_tag);
726
727/**
728 * __blk_queue_free_tags - release tag maintenance info
729 * @q: the request queue for the device
730 *
731 * Notes:
732 * blk_cleanup_queue() will take care of calling this function, if tagging
733 * has been used. So there's no need to call this directly.
734 **/
735static void __blk_queue_free_tags(request_queue_t *q)
736{
737 struct blk_queue_tag *bqt = q->queue_tags;
738
739 if (!bqt)
740 return;
741
742 if (atomic_dec_and_test(&bqt->refcnt)) {
743 BUG_ON(bqt->busy);
744 BUG_ON(!list_empty(&bqt->busy_list));
745
746 kfree(bqt->tag_index);
747 bqt->tag_index = NULL;
748
749 kfree(bqt->tag_map);
750 bqt->tag_map = NULL;
751
752 kfree(bqt);
753 }
754
755 q->queue_tags = NULL;
756 q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
757}
758
759/**
760 * blk_queue_free_tags - release tag maintenance info
761 * @q: the request queue for the device
762 *
763 * Notes:
764 * This is used to disabled tagged queuing to a device, yet leave
765 * queue in function.
766 **/
767void blk_queue_free_tags(request_queue_t *q)
768{
769 clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
770}
771
772EXPORT_SYMBOL(blk_queue_free_tags);
773
774static int
775init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
776{
777 int bits, i;
778 struct request **tag_index;
779 unsigned long *tag_map;
780
781 if (depth > q->nr_requests * 2) {
782 depth = q->nr_requests * 2;
783 printk(KERN_ERR "%s: adjusted depth to %d\n",
784 __FUNCTION__, depth);
785 }
786
787 tag_index = kmalloc(depth * sizeof(struct request *), GFP_ATOMIC);
788 if (!tag_index)
789 goto fail;
790
791 bits = (depth / BLK_TAGS_PER_LONG) + 1;
792 tag_map = kmalloc(bits * sizeof(unsigned long), GFP_ATOMIC);
793 if (!tag_map)
794 goto fail;
795
796 memset(tag_index, 0, depth * sizeof(struct request *));
797 memset(tag_map, 0, bits * sizeof(unsigned long));
798 tags->max_depth = depth;
799 tags->real_max_depth = bits * BITS_PER_LONG;
800 tags->tag_index = tag_index;
801 tags->tag_map = tag_map;
802
803 /*
804 * set the upper bits if the depth isn't a multiple of the word size
805 */
806 for (i = depth; i < bits * BLK_TAGS_PER_LONG; i++)
807 __set_bit(i, tag_map);
808
809 return 0;
810fail:
811 kfree(tag_index);
812 return -ENOMEM;
813}
814
815/**
816 * blk_queue_init_tags - initialize the queue tag info
817 * @q: the request queue for the device
818 * @depth: the maximum queue depth supported
819 * @tags: the tag to use
820 **/
821int blk_queue_init_tags(request_queue_t *q, int depth,
822 struct blk_queue_tag *tags)
823{
824 int rc;
825
826 BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
827
828 if (!tags && !q->queue_tags) {
829 tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
830 if (!tags)
831 goto fail;
832
833 if (init_tag_map(q, tags, depth))
834 goto fail;
835
836 INIT_LIST_HEAD(&tags->busy_list);
837 tags->busy = 0;
838 atomic_set(&tags->refcnt, 1);
839 } else if (q->queue_tags) {
840 if ((rc = blk_queue_resize_tags(q, depth)))
841 return rc;
842 set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
843 return 0;
844 } else
845 atomic_inc(&tags->refcnt);
846
847 /*
848 * assign it, all done
849 */
850 q->queue_tags = tags;
851 q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
852 return 0;
853fail:
854 kfree(tags);
855 return -ENOMEM;
856}
857
858EXPORT_SYMBOL(blk_queue_init_tags);
859
860/**
861 * blk_queue_resize_tags - change the queueing depth
862 * @q: the request queue for the device
863 * @new_depth: the new max command queueing depth
864 *
865 * Notes:
866 * Must be called with the queue lock held.
867 **/
868int blk_queue_resize_tags(request_queue_t *q, int new_depth)
869{
870 struct blk_queue_tag *bqt = q->queue_tags;
871 struct request **tag_index;
872 unsigned long *tag_map;
873 int bits, max_depth;
874
875 if (!bqt)
876 return -ENXIO;
877
878 /*
879 * don't bother sizing down
880 */
881 if (new_depth <= bqt->real_max_depth) {
882 bqt->max_depth = new_depth;
883 return 0;
884 }
885
886 /*
887 * save the old state info, so we can copy it back
888 */
889 tag_index = bqt->tag_index;
890 tag_map = bqt->tag_map;
891 max_depth = bqt->real_max_depth;
892
893 if (init_tag_map(q, bqt, new_depth))
894 return -ENOMEM;
895
896 memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
897 bits = max_depth / BLK_TAGS_PER_LONG;
898 memcpy(bqt->tag_map, tag_map, bits * sizeof(unsigned long));
899
900 kfree(tag_index);
901 kfree(tag_map);
902 return 0;
903}
904
905EXPORT_SYMBOL(blk_queue_resize_tags);
906
907/**
908 * blk_queue_end_tag - end tag operations for a request
909 * @q: the request queue for the device
910 * @rq: the request that has completed
911 *
912 * Description:
913 * Typically called when end_that_request_first() returns 0, meaning
914 * all transfers have been done for a request. It's important to call
915 * this function before end_that_request_last(), as that will put the
916 * request back on the free list thus corrupting the internal tag list.
917 *
918 * Notes:
919 * queue lock must be held.
920 **/
921void blk_queue_end_tag(request_queue_t *q, struct request *rq)
922{
923 struct blk_queue_tag *bqt = q->queue_tags;
924 int tag = rq->tag;
925
926 BUG_ON(tag == -1);
927
928 if (unlikely(tag >= bqt->real_max_depth))
929 return;
930
931 if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) {
932 printk("attempt to clear non-busy tag (%d)\n", tag);
933 return;
934 }
935
936 list_del_init(&rq->queuelist);
937 rq->flags &= ~REQ_QUEUED;
938 rq->tag = -1;
939
940 if (unlikely(bqt->tag_index[tag] == NULL))
941 printk("tag %d is missing\n", tag);
942
943 bqt->tag_index[tag] = NULL;
944 bqt->busy--;
945}
946
947EXPORT_SYMBOL(blk_queue_end_tag);
948
949/**
950 * blk_queue_start_tag - find a free tag and assign it
951 * @q: the request queue for the device
952 * @rq: the block request that needs tagging
953 *
954 * Description:
955 * This can either be used as a stand-alone helper, or possibly be
956 * assigned as the queue &prep_rq_fn (in which case &struct request
957 * automagically gets a tag assigned). Note that this function
958 * assumes that any type of request can be queued! if this is not
959 * true for your device, you must check the request type before
960 * calling this function. The request will also be removed from
961 * the request queue, so it's the drivers responsibility to readd
962 * it if it should need to be restarted for some reason.
963 *
964 * Notes:
965 * queue lock must be held.
966 **/
967int blk_queue_start_tag(request_queue_t *q, struct request *rq)
968{
969 struct blk_queue_tag *bqt = q->queue_tags;
970 unsigned long *map = bqt->tag_map;
971 int tag = 0;
972
973 if (unlikely((rq->flags & REQ_QUEUED))) {
974 printk(KERN_ERR
975 "request %p for device [%s] already tagged %d",
976 rq, rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
977 BUG();
978 }
979
980 for (map = bqt->tag_map; *map == -1UL; map++) {
981 tag += BLK_TAGS_PER_LONG;
982
983 if (tag >= bqt->max_depth)
984 return 1;
985 }
986
987 tag += ffz(*map);
988 __set_bit(tag, bqt->tag_map);
989
990 rq->flags |= REQ_QUEUED;
991 rq->tag = tag;
992 bqt->tag_index[tag] = rq;
993 blkdev_dequeue_request(rq);
994 list_add(&rq->queuelist, &bqt->busy_list);
995 bqt->busy++;
996 return 0;
997}
998
999EXPORT_SYMBOL(blk_queue_start_tag);
1000
1001/**
1002 * blk_queue_invalidate_tags - invalidate all pending tags
1003 * @q: the request queue for the device
1004 *
1005 * Description:
1006 * Hardware conditions may dictate a need to stop all pending requests.
1007 * In this case, we will safely clear the block side of the tag queue and
1008 * readd all requests to the request queue in the right order.
1009 *
1010 * Notes:
1011 * queue lock must be held.
1012 **/
1013void blk_queue_invalidate_tags(request_queue_t *q)
1014{
1015 struct blk_queue_tag *bqt = q->queue_tags;
1016 struct list_head *tmp, *n;
1017 struct request *rq;
1018
1019 list_for_each_safe(tmp, n, &bqt->busy_list) {
1020 rq = list_entry_rq(tmp);
1021
1022 if (rq->tag == -1) {
1023 printk("bad tag found on list\n");
1024 list_del_init(&rq->queuelist);
1025 rq->flags &= ~REQ_QUEUED;
1026 } else
1027 blk_queue_end_tag(q, rq);
1028
1029 rq->flags &= ~REQ_STARTED;
1030 __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
1031 }
1032}
1033
1034EXPORT_SYMBOL(blk_queue_invalidate_tags);
1035
1036static char *rq_flags[] = {
1037 "REQ_RW",
1038 "REQ_FAILFAST",
1039 "REQ_SOFTBARRIER",
1040 "REQ_HARDBARRIER",
1041 "REQ_CMD",
1042 "REQ_NOMERGE",
1043 "REQ_STARTED",
1044 "REQ_DONTPREP",
1045 "REQ_QUEUED",
1046 "REQ_PC",
1047 "REQ_BLOCK_PC",
1048 "REQ_SENSE",
1049 "REQ_FAILED",
1050 "REQ_QUIET",
1051 "REQ_SPECIAL",
1052 "REQ_DRIVE_CMD",
1053 "REQ_DRIVE_TASK",
1054 "REQ_DRIVE_TASKFILE",
1055 "REQ_PREEMPT",
1056 "REQ_PM_SUSPEND",
1057 "REQ_PM_RESUME",
1058 "REQ_PM_SHUTDOWN",
1059};
1060
1061void blk_dump_rq_flags(struct request *rq, char *msg)
1062{
1063 int bit;
1064
1065 printk("%s: dev %s: flags = ", msg,
1066 rq->rq_disk ? rq->rq_disk->disk_name : "?");
1067 bit = 0;
1068 do {
1069 if (rq->flags & (1 << bit))
1070 printk("%s ", rq_flags[bit]);
1071 bit++;
1072 } while (bit < __REQ_NR_BITS);
1073
1074 printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
1075 rq->nr_sectors,
1076 rq->current_nr_sectors);
1077 printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);
1078
1079 if (rq->flags & (REQ_BLOCK_PC | REQ_PC)) {
1080 printk("cdb: ");
1081 for (bit = 0; bit < sizeof(rq->cmd); bit++)
1082 printk("%02x ", rq->cmd[bit]);
1083 printk("\n");
1084 }
1085}
1086
1087EXPORT_SYMBOL(blk_dump_rq_flags);
1088
1089void blk_recount_segments(request_queue_t *q, struct bio *bio)
1090{
1091 struct bio_vec *bv, *bvprv = NULL;
1092 int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster;
1093 int high, highprv = 1;
1094
1095 if (unlikely(!bio->bi_io_vec))
1096 return;
1097
1098 cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
1099 hw_seg_size = seg_size = nr_phys_segs = nr_hw_segs = 0;
1100 bio_for_each_segment(bv, bio, i) {
1101 /*
1102 * the trick here is making sure that a high page is never
1103 * considered part of another segment, since that might
1104 * change with the bounce page.
1105 */
1106 high = page_to_pfn(bv->bv_page) >= q->bounce_pfn;
1107 if (high || highprv)
1108 goto new_hw_segment;
1109 if (cluster) {
1110 if (seg_size + bv->bv_len > q->max_segment_size)
1111 goto new_segment;
1112 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
1113 goto new_segment;
1114 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
1115 goto new_segment;
1116 if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
1117 goto new_hw_segment;
1118
1119 seg_size += bv->bv_len;
1120 hw_seg_size += bv->bv_len;
1121 bvprv = bv;
1122 continue;
1123 }
1124new_segment:
1125 if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
1126 !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) {
1127 hw_seg_size += bv->bv_len;
1128 } else {
1129new_hw_segment:
1130 if (hw_seg_size > bio->bi_hw_front_size)
1131 bio->bi_hw_front_size = hw_seg_size;
1132 hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
1133 nr_hw_segs++;
1134 }
1135
1136 nr_phys_segs++;
1137 bvprv = bv;
1138 seg_size = bv->bv_len;
1139 highprv = high;
1140 }
1141 if (hw_seg_size > bio->bi_hw_back_size)
1142 bio->bi_hw_back_size = hw_seg_size;
1143 if (nr_hw_segs == 1 && hw_seg_size > bio->bi_hw_front_size)
1144 bio->bi_hw_front_size = hw_seg_size;
1145 bio->bi_phys_segments = nr_phys_segs;
1146 bio->bi_hw_segments = nr_hw_segs;
1147 bio->bi_flags |= (1 << BIO_SEG_VALID);
1148}
1149
1150
1151int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
1152 struct bio *nxt)
1153{
1154 if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
1155 return 0;
1156
1157 if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
1158 return 0;
1159 if (bio->bi_size + nxt->bi_size > q->max_segment_size)
1160 return 0;
1161
1162 /*
1163 * bio and nxt are contigous in memory, check if the queue allows
1164 * these two to be merged into one
1165 */
1166 if (BIO_SEG_BOUNDARY(q, bio, nxt))
1167 return 1;
1168
1169 return 0;
1170}
1171
1172EXPORT_SYMBOL(blk_phys_contig_segment);
1173
1174int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
1175 struct bio *nxt)
1176{
1177 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
1178 blk_recount_segments(q, bio);
1179 if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID)))
1180 blk_recount_segments(q, nxt);
1181 if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
1182 BIOVEC_VIRT_OVERSIZE(bio->bi_hw_front_size + bio->bi_hw_back_size))
1183 return 0;
1184 if (bio->bi_size + nxt->bi_size > q->max_segment_size)
1185 return 0;
1186
1187 return 1;
1188}
1189
1190EXPORT_SYMBOL(blk_hw_contig_segment);
1191
1192/*
1193 * map a request to scatterlist, return number of sg entries setup. Caller
1194 * must make sure sg can hold rq->nr_phys_segments entries
1195 */
1196int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg)
1197{
1198 struct bio_vec *bvec, *bvprv;
1199 struct bio *bio;
1200 int nsegs, i, cluster;
1201
1202 nsegs = 0;
1203 cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
1204
1205 /*
1206 * for each bio in rq
1207 */
1208 bvprv = NULL;
1209 rq_for_each_bio(bio, rq) {
1210 /*
1211 * for each segment in bio
1212 */
1213 bio_for_each_segment(bvec, bio, i) {
1214 int nbytes = bvec->bv_len;
1215
1216 if (bvprv && cluster) {
1217 if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
1218 goto new_segment;
1219
1220 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
1221 goto new_segment;
1222 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
1223 goto new_segment;
1224
1225 sg[nsegs - 1].length += nbytes;
1226 } else {
1227new_segment:
1228 memset(&sg[nsegs],0,sizeof(struct scatterlist));
1229 sg[nsegs].page = bvec->bv_page;
1230 sg[nsegs].length = nbytes;
1231 sg[nsegs].offset = bvec->bv_offset;
1232
1233 nsegs++;
1234 }
1235 bvprv = bvec;
1236 } /* segments in bio */
1237 } /* bios in rq */
1238
1239 return nsegs;
1240}
1241
1242EXPORT_SYMBOL(blk_rq_map_sg);
1243
1244/*
1245 * the standard queue merge functions, can be overridden with device
1246 * specific ones if so desired
1247 */
1248
1249static inline int ll_new_mergeable(request_queue_t *q,
1250 struct request *req,
1251 struct bio *bio)
1252{
1253 int nr_phys_segs = bio_phys_segments(q, bio);
1254
1255 if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
1256 req->flags |= REQ_NOMERGE;
1257 if (req == q->last_merge)
1258 q->last_merge = NULL;
1259 return 0;
1260 }
1261
1262 /*
1263 * A hw segment is just getting larger, bump just the phys
1264 * counter.
1265 */
1266 req->nr_phys_segments += nr_phys_segs;
1267 return 1;
1268}
1269
1270static inline int ll_new_hw_segment(request_queue_t *q,
1271 struct request *req,
1272 struct bio *bio)
1273{
1274 int nr_hw_segs = bio_hw_segments(q, bio);
1275 int nr_phys_segs = bio_phys_segments(q, bio);
1276
1277 if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments
1278 || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
1279 req->flags |= REQ_NOMERGE;
1280 if (req == q->last_merge)
1281 q->last_merge = NULL;
1282 return 0;
1283 }
1284
1285 /*
1286 * This will form the start of a new hw segment. Bump both
1287 * counters.
1288 */
1289 req->nr_hw_segments += nr_hw_segs;
1290 req->nr_phys_segments += nr_phys_segs;
1291 return 1;
1292}
1293
1294static int ll_back_merge_fn(request_queue_t *q, struct request *req,
1295 struct bio *bio)
1296{
1297 int len;
1298
1299 if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
1300 req->flags |= REQ_NOMERGE;
1301 if (req == q->last_merge)
1302 q->last_merge = NULL;
1303 return 0;
1304 }
1305 if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID)))
1306 blk_recount_segments(q, req->biotail);
1307 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
1308 blk_recount_segments(q, bio);
1309 len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
1310 if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) &&
1311 !BIOVEC_VIRT_OVERSIZE(len)) {
1312 int mergeable = ll_new_mergeable(q, req, bio);
1313
1314 if (mergeable) {
1315 if (req->nr_hw_segments == 1)
1316 req->bio->bi_hw_front_size = len;
1317 if (bio->bi_hw_segments == 1)
1318 bio->bi_hw_back_size = len;
1319 }
1320 return mergeable;
1321 }
1322
1323 return ll_new_hw_segment(q, req, bio);
1324}
1325
1326static int ll_front_merge_fn(request_queue_t *q, struct request *req,
1327 struct bio *bio)
1328{
1329 int len;
1330
1331 if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
1332 req->flags |= REQ_NOMERGE;
1333 if (req == q->last_merge)
1334 q->last_merge = NULL;
1335 return 0;
1336 }
1337 len = bio->bi_hw_back_size + req->bio->bi_hw_front_size;
1338 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
1339 blk_recount_segments(q, bio);
1340 if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID)))
1341 blk_recount_segments(q, req->bio);
1342 if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
1343 !BIOVEC_VIRT_OVERSIZE(len)) {
1344 int mergeable = ll_new_mergeable(q, req, bio);
1345
1346 if (mergeable) {
1347 if (bio->bi_hw_segments == 1)
1348 bio->bi_hw_front_size = len;
1349 if (req->nr_hw_segments == 1)
1350 req->biotail->bi_hw_back_size = len;
1351 }
1352 return mergeable;
1353 }
1354
1355 return ll_new_hw_segment(q, req, bio);
1356}
1357
1358static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
1359 struct request *next)
1360{
1361 int total_phys_segments = req->nr_phys_segments +next->nr_phys_segments;
1362 int total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
1363
1364 /*
1365 * First check if the either of the requests are re-queued
1366 * requests. Can't merge them if they are.
1367 */
1368 if (req->special || next->special)
1369 return 0;
1370
1371 /*
1372 * Will it become to large?
1373 */
1374 if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
1375 return 0;
1376
1377 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
1378 if (blk_phys_contig_segment(q, req->biotail, next->bio))
1379 total_phys_segments--;
1380
1381 if (total_phys_segments > q->max_phys_segments)
1382 return 0;
1383
1384 total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
1385 if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
1386 int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size;
1387 /*
1388 * propagate the combined length to the end of the requests
1389 */
1390 if (req->nr_hw_segments == 1)
1391 req->bio->bi_hw_front_size = len;
1392 if (next->nr_hw_segments == 1)
1393 next->biotail->bi_hw_back_size = len;
1394 total_hw_segments--;
1395 }
1396
1397 if (total_hw_segments > q->max_hw_segments)
1398 return 0;
1399
1400 /* Merge is OK... */
1401 req->nr_phys_segments = total_phys_segments;
1402 req->nr_hw_segments = total_hw_segments;
1403 return 1;
1404}
1405
1406/*
1407 * "plug" the device if there are no outstanding requests: this will
1408 * force the transfer to start only after we have put all the requests
1409 * on the list.
1410 *
1411 * This is called with interrupts off and no requests on the queue and
1412 * with the queue lock held.
1413 */
1414void blk_plug_device(request_queue_t *q)
1415{
1416 WARN_ON(!irqs_disabled());
1417
1418 /*
1419 * don't plug a stopped queue, it must be paired with blk_start_queue()
1420 * which will restart the queueing
1421 */
1422 if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
1423 return;
1424
1425 if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
1426 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
1427}
1428
1429EXPORT_SYMBOL(blk_plug_device);
1430
1431/*
1432 * remove the queue from the plugged list, if present. called with
1433 * queue lock held and interrupts disabled.
1434 */
1435int blk_remove_plug(request_queue_t *q)
1436{
1437 WARN_ON(!irqs_disabled());
1438
1439 if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
1440 return 0;
1441
1442 del_timer(&q->unplug_timer);
1443 return 1;
1444}
1445
1446EXPORT_SYMBOL(blk_remove_plug);
1447
1448/*
1449 * remove the plug and let it rip..
1450 */
1451void __generic_unplug_device(request_queue_t *q)
1452{
1453 if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
1454 return;
1455
1456 if (!blk_remove_plug(q))
1457 return;
1458
1459 /*
1460 * was plugged, fire request_fn if queue has stuff to do
1461 */
1462 if (elv_next_request(q))
1463 q->request_fn(q);
1464}
1465EXPORT_SYMBOL(__generic_unplug_device);
1466
1467/**
1468 * generic_unplug_device - fire a request queue
1469 * @q: The &request_queue_t in question
1470 *
1471 * Description:
1472 * Linux uses plugging to build bigger requests queues before letting
1473 * the device have at them. If a queue is plugged, the I/O scheduler
1474 * is still adding and merging requests on the queue. Once the queue
1475 * gets unplugged, the request_fn defined for the queue is invoked and
1476 * transfers started.
1477 **/
1478void generic_unplug_device(request_queue_t *q)
1479{
1480 spin_lock_irq(q->queue_lock);
1481 __generic_unplug_device(q);
1482 spin_unlock_irq(q->queue_lock);
1483}
1484EXPORT_SYMBOL(generic_unplug_device);
1485
1486static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
1487 struct page *page)
1488{
1489 request_queue_t *q = bdi->unplug_io_data;
1490
1491 /*
1492 * devices don't necessarily have an ->unplug_fn defined
1493 */
1494 if (q->unplug_fn)
1495 q->unplug_fn(q);
1496}
1497
1498static void blk_unplug_work(void *data)
1499{
1500 request_queue_t *q = data;
1501
1502 q->unplug_fn(q);
1503}
1504
1505static void blk_unplug_timeout(unsigned long data)
1506{
1507 request_queue_t *q = (request_queue_t *)data;
1508
1509 kblockd_schedule_work(&q->unplug_work);
1510}
1511
1512/**
1513 * blk_start_queue - restart a previously stopped queue
1514 * @q: The &request_queue_t in question
1515 *
1516 * Description:
1517 * blk_start_queue() will clear the stop flag on the queue, and call
1518 * the request_fn for the queue if it was in a stopped state when
1519 * entered. Also see blk_stop_queue(). Queue lock must be held.
1520 **/
1521void blk_start_queue(request_queue_t *q)
1522{
1523 clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
1524
1525 /*
1526 * one level of recursion is ok and is much faster than kicking
1527 * the unplug handling
1528 */
1529 if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
1530 q->request_fn(q);
1531 clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
1532 } else {
1533 blk_plug_device(q);
1534 kblockd_schedule_work(&q->unplug_work);
1535 }
1536}
1537
1538EXPORT_SYMBOL(blk_start_queue);
1539
1540/**
1541 * blk_stop_queue - stop a queue
1542 * @q: The &request_queue_t in question
1543 *
1544 * Description:
1545 * The Linux block layer assumes that a block driver will consume all
1546 * entries on the request queue when the request_fn strategy is called.
1547 * Often this will not happen, because of hardware limitations (queue
1548 * depth settings). If a device driver gets a 'queue full' response,
1549 * or if it simply chooses not to queue more I/O at one point, it can
1550 * call this function to prevent the request_fn from being called until
1551 * the driver has signalled it's ready to go again. This happens by calling
1552 * blk_start_queue() to restart queue operations. Queue lock must be held.
1553 **/
1554void blk_stop_queue(request_queue_t *q)
1555{
1556 blk_remove_plug(q);
1557 set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
1558}
1559EXPORT_SYMBOL(blk_stop_queue);
1560
1561/**
1562 * blk_sync_queue - cancel any pending callbacks on a queue
1563 * @q: the queue
1564 *
1565 * Description:
1566 * The block layer may perform asynchronous callback activity
1567 * on a queue, such as calling the unplug function after a timeout.
1568 * A block device may call blk_sync_queue to ensure that any
1569 * such activity is cancelled, thus allowing it to release resources
1570 * the the callbacks might use. The caller must already have made sure
1571 * that its ->make_request_fn will not re-add plugging prior to calling
1572 * this function.
1573 *
1574 */
1575void blk_sync_queue(struct request_queue *q)
1576{
1577 del_timer_sync(&q->unplug_timer);
1578 kblockd_flush();
1579}
1580EXPORT_SYMBOL(blk_sync_queue);
1581
1582/**
1583 * blk_run_queue - run a single device queue
1584 * @q: The queue to run
1585 */
1586void blk_run_queue(struct request_queue *q)
1587{
1588 unsigned long flags;
1589
1590 spin_lock_irqsave(q->queue_lock, flags);
1591 blk_remove_plug(q);
1592 q->request_fn(q);
1593 spin_unlock_irqrestore(q->queue_lock, flags);
1594}
1595EXPORT_SYMBOL(blk_run_queue);
1596
1597/**
1598 * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed
1599 * @q: the request queue to be released
1600 *
1601 * Description:
1602 * blk_cleanup_queue is the pair to blk_init_queue() or
1603 * blk_queue_make_request(). It should be called when a request queue is
1604 * being released; typically when a block device is being de-registered.
1605 * Currently, its primary task it to free all the &struct request
1606 * structures that were allocated to the queue and the queue itself.
1607 *
1608 * Caveat:
1609 * Hopefully the low level driver will have finished any
1610 * outstanding requests first...
1611 **/
1612void blk_cleanup_queue(request_queue_t * q)
1613{
1614 struct request_list *rl = &q->rq;
1615
1616 if (!atomic_dec_and_test(&q->refcnt))
1617 return;
1618
1619 if (q->elevator)
1620 elevator_exit(q->elevator);
1621
1622 blk_sync_queue(q);
1623
1624 if (rl->rq_pool)
1625 mempool_destroy(rl->rq_pool);
1626
1627 if (q->queue_tags)
1628 __blk_queue_free_tags(q);
1629
1630 blk_queue_ordered(q, QUEUE_ORDERED_NONE);
1631
1632 kmem_cache_free(requestq_cachep, q);
1633}
1634
1635EXPORT_SYMBOL(blk_cleanup_queue);
1636
1637static int blk_init_free_list(request_queue_t *q)
1638{
1639 struct request_list *rl = &q->rq;
1640
1641 rl->count[READ] = rl->count[WRITE] = 0;
1642 rl->starved[READ] = rl->starved[WRITE] = 0;
1643 init_waitqueue_head(&rl->wait[READ]);
1644 init_waitqueue_head(&rl->wait[WRITE]);
1645 init_waitqueue_head(&rl->drain);
1646
1647 rl->rq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, request_cachep);
1648
1649 if (!rl->rq_pool)
1650 return -ENOMEM;
1651
1652 return 0;
1653}
1654
1655static int __make_request(request_queue_t *, struct bio *);
1656
1657request_queue_t *blk_alloc_queue(int gfp_mask)
1658{
1659 request_queue_t *q = kmem_cache_alloc(requestq_cachep, gfp_mask);
1660
1661 if (!q)
1662 return NULL;
1663
1664 memset(q, 0, sizeof(*q));
1665 init_timer(&q->unplug_timer);
1666 atomic_set(&q->refcnt, 1);
1667
1668 q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
1669 q->backing_dev_info.unplug_io_data = q;
1670
1671 return q;
1672}
1673
1674EXPORT_SYMBOL(blk_alloc_queue);
1675
1676/**
1677 * blk_init_queue - prepare a request queue for use with a block device
1678 * @rfn: The function to be called to process requests that have been
1679 * placed on the queue.
1680 * @lock: Request queue spin lock
1681 *
1682 * Description:
1683 * If a block device wishes to use the standard request handling procedures,
1684 * which sorts requests and coalesces adjacent requests, then it must
1685 * call blk_init_queue(). The function @rfn will be called when there
1686 * are requests on the queue that need to be processed. If the device
1687 * supports plugging, then @rfn may not be called immediately when requests
1688 * are available on the queue, but may be called at some time later instead.
1689 * Plugged queues are generally unplugged when a buffer belonging to one
1690 * of the requests on the queue is needed, or due to memory pressure.
1691 *
1692 * @rfn is not required, or even expected, to remove all requests off the
1693 * queue, but only as many as it can handle at a time. If it does leave
1694 * requests on the queue, it is responsible for arranging that the requests
1695 * get dealt with eventually.
1696 *
1697 * The queue spin lock must be held while manipulating the requests on the
1698 * request queue.
1699 *
1700 * Function returns a pointer to the initialized request queue, or NULL if
1701 * it didn't succeed.
1702 *
1703 * Note:
1704 * blk_init_queue() must be paired with a blk_cleanup_queue() call
1705 * when the block device is deactivated (such as at module unload).
1706 **/
1707request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
1708{
1709 request_queue_t *q = blk_alloc_queue(GFP_KERNEL);
1710
1711 if (!q)
1712 return NULL;
1713
1714 if (blk_init_free_list(q))
1715 goto out_init;
1716
1717 q->request_fn = rfn;
1718 q->back_merge_fn = ll_back_merge_fn;
1719 q->front_merge_fn = ll_front_merge_fn;
1720 q->merge_requests_fn = ll_merge_requests_fn;
1721 q->prep_rq_fn = NULL;
1722 q->unplug_fn = generic_unplug_device;
1723 q->queue_flags = (1 << QUEUE_FLAG_CLUSTER);
1724 q->queue_lock = lock;
1725
1726 blk_queue_segment_boundary(q, 0xffffffff);
1727
1728 blk_queue_make_request(q, __make_request);
1729 blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
1730
1731 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
1732 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
1733
1734 /*
1735 * all done
1736 */
1737 if (!elevator_init(q, NULL)) {
1738 blk_queue_congestion_threshold(q);
1739 return q;
1740 }
1741
1742 blk_cleanup_queue(q);
1743out_init:
1744 kmem_cache_free(requestq_cachep, q);
1745 return NULL;
1746}
1747
1748EXPORT_SYMBOL(blk_init_queue);
1749
1750int blk_get_queue(request_queue_t *q)
1751{
1752 if (!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
1753 atomic_inc(&q->refcnt);
1754 return 0;
1755 }
1756
1757 return 1;
1758}
1759
1760EXPORT_SYMBOL(blk_get_queue);
1761
1762static inline void blk_free_request(request_queue_t *q, struct request *rq)
1763{
1764 elv_put_request(q, rq);
1765 mempool_free(rq, q->rq.rq_pool);
1766}
1767
1768static inline struct request *blk_alloc_request(request_queue_t *q, int rw,
1769 int gfp_mask)
1770{
1771 struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
1772
1773 if (!rq)
1774 return NULL;
1775
1776 /*
1777 * first three bits are identical in rq->flags and bio->bi_rw,
1778 * see bio.h and blkdev.h
1779 */
1780 rq->flags = rw;
1781
1782 if (!elv_set_request(q, rq, gfp_mask))
1783 return rq;
1784
1785 mempool_free(rq, q->rq.rq_pool);
1786 return NULL;
1787}
1788
1789/*
1790 * ioc_batching returns true if the ioc is a valid batching request and
1791 * should be given priority access to a request.
1792 */
1793static inline int ioc_batching(request_queue_t *q, struct io_context *ioc)
1794{
1795 if (!ioc)
1796 return 0;
1797
1798 /*
1799 * Make sure the process is able to allocate at least 1 request
1800 * even if the batch times out, otherwise we could theoretically
1801 * lose wakeups.
1802 */
1803 return ioc->nr_batch_requests == q->nr_batching ||
1804 (ioc->nr_batch_requests > 0
1805 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
1806}
1807
1808/*
1809 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
1810 * will cause the process to be a "batcher" on all queues in the system. This
1811 * is the behaviour we want though - once it gets a wakeup it should be given
1812 * a nice run.
1813 */
1814void ioc_set_batching(request_queue_t *q, struct io_context *ioc)
1815{
1816 if (!ioc || ioc_batching(q, ioc))
1817 return;
1818
1819 ioc->nr_batch_requests = q->nr_batching;
1820 ioc->last_waited = jiffies;
1821}
1822
1823static void __freed_request(request_queue_t *q, int rw)
1824{
1825 struct request_list *rl = &q->rq;
1826
1827 if (rl->count[rw] < queue_congestion_off_threshold(q))
1828 clear_queue_congested(q, rw);
1829
1830 if (rl->count[rw] + 1 <= q->nr_requests) {
1831 smp_mb();
1832 if (waitqueue_active(&rl->wait[rw]))
1833 wake_up(&rl->wait[rw]);
1834
1835 blk_clear_queue_full(q, rw);
1836 }
1837}
1838
1839/*
1840 * A request has just been released. Account for it, update the full and
1841 * congestion status, wake up any waiters. Called under q->queue_lock.
1842 */
1843static void freed_request(request_queue_t *q, int rw)
1844{
1845 struct request_list *rl = &q->rq;
1846
1847 rl->count[rw]--;
1848
1849 __freed_request(q, rw);
1850
1851 if (unlikely(rl->starved[rw ^ 1]))
1852 __freed_request(q, rw ^ 1);
1853
1854 if (!rl->count[READ] && !rl->count[WRITE]) {
1855 smp_mb();
1856 if (unlikely(waitqueue_active(&rl->drain)))
1857 wake_up(&rl->drain);
1858 }
1859}
1860
1861#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
1862/*
1863 * Get a free request, queue_lock must not be held
1864 */
1865static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
1866{
1867 struct request *rq = NULL;
1868 struct request_list *rl = &q->rq;
1869 struct io_context *ioc = get_io_context(gfp_mask);
1870
1871 if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)))
1872 goto out;
1873
1874 spin_lock_irq(q->queue_lock);
1875 if (rl->count[rw]+1 >= q->nr_requests) {
1876 /*
1877 * The queue will fill after this allocation, so set it as
1878 * full, and mark this process as "batching". This process
1879 * will be allowed to complete a batch of requests, others
1880 * will be blocked.
1881 */
1882 if (!blk_queue_full(q, rw)) {
1883 ioc_set_batching(q, ioc);
1884 blk_set_queue_full(q, rw);
1885 }
1886 }
1887
1888 switch (elv_may_queue(q, rw)) {
1889 case ELV_MQUEUE_NO:
1890 goto rq_starved;
1891 case ELV_MQUEUE_MAY:
1892 break;
1893 case ELV_MQUEUE_MUST:
1894 goto get_rq;
1895 }
1896
1897 if (blk_queue_full(q, rw) && !ioc_batching(q, ioc)) {
1898 /*
1899 * The queue is full and the allocating process is not a
1900 * "batcher", and not exempted by the IO scheduler
1901 */
1902 spin_unlock_irq(q->queue_lock);
1903 goto out;
1904 }
1905
1906get_rq:
1907 rl->count[rw]++;
1908 rl->starved[rw] = 0;
1909 if (rl->count[rw] >= queue_congestion_on_threshold(q))
1910 set_queue_congested(q, rw);
1911 spin_unlock_irq(q->queue_lock);
1912
1913 rq = blk_alloc_request(q, rw, gfp_mask);
1914 if (!rq) {
1915 /*
1916 * Allocation failed presumably due to memory. Undo anything
1917 * we might have messed up.
1918 *
1919 * Allocating task should really be put onto the front of the
1920 * wait queue, but this is pretty rare.
1921 */
1922 spin_lock_irq(q->queue_lock);
1923 freed_request(q, rw);
1924
1925 /*
1926 * in the very unlikely event that allocation failed and no
1927 * requests for this direction was pending, mark us starved
1928 * so that freeing of a request in the other direction will
1929 * notice us. another possible fix would be to split the
1930 * rq mempool into READ and WRITE
1931 */
1932rq_starved:
1933 if (unlikely(rl->count[rw] == 0))
1934 rl->starved[rw] = 1;
1935
1936 spin_unlock_irq(q->queue_lock);
1937 goto out;
1938 }
1939
1940 if (ioc_batching(q, ioc))
1941 ioc->nr_batch_requests--;
1942
1943 rq_init(q, rq);
1944 rq->rl = rl;
1945out:
1946 put_io_context(ioc);
1947 return rq;
1948}
1949
1950/*
1951 * No available requests for this queue, unplug the device and wait for some
1952 * requests to become available.
1953 */
1954static struct request *get_request_wait(request_queue_t *q, int rw)
1955{
1956 DEFINE_WAIT(wait);
1957 struct request *rq;
1958
1959 generic_unplug_device(q);
1960 do {
1961 struct request_list *rl = &q->rq;
1962
1963 prepare_to_wait_exclusive(&rl->wait[rw], &wait,
1964 TASK_UNINTERRUPTIBLE);
1965
1966 rq = get_request(q, rw, GFP_NOIO);
1967
1968 if (!rq) {
1969 struct io_context *ioc;
1970
1971 io_schedule();
1972
1973 /*
1974 * After sleeping, we become a "batching" process and
1975 * will be able to allocate at least one request, and
1976 * up to a big batch of them for a small period time.
1977 * See ioc_batching, ioc_set_batching
1978 */
1979 ioc = get_io_context(GFP_NOIO);
1980 ioc_set_batching(q, ioc);
1981 put_io_context(ioc);
1982 }
1983 finish_wait(&rl->wait[rw], &wait);
1984 } while (!rq);
1985
1986 return rq;
1987}
1988
1989struct request *blk_get_request(request_queue_t *q, int rw, int gfp_mask)
1990{
1991 struct request *rq;
1992
1993 BUG_ON(rw != READ && rw != WRITE);
1994
1995 if (gfp_mask & __GFP_WAIT)
1996 rq = get_request_wait(q, rw);
1997 else
1998 rq = get_request(q, rw, gfp_mask);
1999
2000 return rq;
2001}
2002
2003EXPORT_SYMBOL(blk_get_request);
2004
2005/**
2006 * blk_requeue_request - put a request back on queue
2007 * @q: request queue where request should be inserted
2008 * @rq: request to be inserted
2009 *
2010 * Description:
2011 * Drivers often keep queueing requests until the hardware cannot accept
2012 * more, when that condition happens we need to put the request back
2013 * on the queue. Must be called with queue lock held.
2014 */
2015void blk_requeue_request(request_queue_t *q, struct request *rq)
2016{
2017 if (blk_rq_tagged(rq))
2018 blk_queue_end_tag(q, rq);
2019
2020 elv_requeue_request(q, rq);
2021}
2022
2023EXPORT_SYMBOL(blk_requeue_request);
2024
2025/**
2026 * blk_insert_request - insert a special request in to a request queue
2027 * @q: request queue where request should be inserted
2028 * @rq: request to be inserted
2029 * @at_head: insert request at head or tail of queue
2030 * @data: private data
2031 * @reinsert: true if request it a reinsertion of previously processed one
2032 *
2033 * Description:
2034 * Many block devices need to execute commands asynchronously, so they don't
2035 * block the whole kernel from preemption during request execution. This is
2036 * accomplished normally by inserting aritficial requests tagged as
2037 * REQ_SPECIAL in to the corresponding request queue, and letting them be
2038 * scheduled for actual execution by the request queue.
2039 *
2040 * We have the option of inserting the head or the tail of the queue.
2041 * Typically we use the tail for new ioctls and so forth. We use the head
2042 * of the queue for things like a QUEUE_FULL message from a device, or a
2043 * host that is unable to accept a particular command.
2044 */
2045void blk_insert_request(request_queue_t *q, struct request *rq,
2046 int at_head, void *data, int reinsert)
2047{
2048 unsigned long flags;
2049
2050 /*
2051 * tell I/O scheduler that this isn't a regular read/write (ie it
2052 * must not attempt merges on this) and that it acts as a soft
2053 * barrier
2054 */
2055 rq->flags |= REQ_SPECIAL | REQ_SOFTBARRIER;
2056
2057 rq->special = data;
2058
2059 spin_lock_irqsave(q->queue_lock, flags);
2060
2061 /*
2062 * If command is tagged, release the tag
2063 */
2064 if (reinsert)
2065 blk_requeue_request(q, rq);
2066 else {
2067 int where = ELEVATOR_INSERT_BACK;
2068
2069 if (at_head)
2070 where = ELEVATOR_INSERT_FRONT;
2071
2072 if (blk_rq_tagged(rq))
2073 blk_queue_end_tag(q, rq);
2074
2075 drive_stat_acct(rq, rq->nr_sectors, 1);
2076 __elv_add_request(q, rq, where, 0);
2077 }
2078 if (blk_queue_plugged(q))
2079 __generic_unplug_device(q);
2080 else
2081 q->request_fn(q);
2082 spin_unlock_irqrestore(q->queue_lock, flags);
2083}
2084
2085EXPORT_SYMBOL(blk_insert_request);
2086
2087/**
2088 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
2089 * @q: request queue where request should be inserted
2090 * @rw: READ or WRITE data
2091 * @ubuf: the user buffer
2092 * @len: length of user data
2093 *
2094 * Description:
2095 * Data will be mapped directly for zero copy io, if possible. Otherwise
2096 * a kernel bounce buffer is used.
2097 *
2098 * A matching blk_rq_unmap_user() must be issued at the end of io, while
2099 * still in process context.
2100 *
2101 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
2102 * before being submitted to the device, as pages mapped may be out of
2103 * reach. It's the callers responsibility to make sure this happens. The
2104 * original bio must be passed back in to blk_rq_unmap_user() for proper
2105 * unmapping.
2106 */
2107struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf,
2108 unsigned int len)
2109{
2110 unsigned long uaddr;
2111 struct request *rq;
2112 struct bio *bio;
2113
2114 if (len > (q->max_sectors << 9))
2115 return ERR_PTR(-EINVAL);
2116 if ((!len && ubuf) || (len && !ubuf))
2117 return ERR_PTR(-EINVAL);
2118
2119 rq = blk_get_request(q, rw, __GFP_WAIT);
2120 if (!rq)
2121 return ERR_PTR(-ENOMEM);
2122
2123 /*
2124 * if alignment requirement is satisfied, map in user pages for
2125 * direct dma. else, set up kernel bounce buffers
2126 */
2127 uaddr = (unsigned long) ubuf;
2128 if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
2129 bio = bio_map_user(q, NULL, uaddr, len, rw == READ);
2130 else
2131 bio = bio_copy_user(q, uaddr, len, rw == READ);
2132
2133 if (!IS_ERR(bio)) {
2134 rq->bio = rq->biotail = bio;
2135 blk_rq_bio_prep(q, rq, bio);
2136
2137 rq->buffer = rq->data = NULL;
2138 rq->data_len = len;
2139 return rq;
2140 }
2141
2142 /*
2143 * bio is the err-ptr
2144 */
2145 blk_put_request(rq);
2146 return (struct request *) bio;
2147}
2148
2149EXPORT_SYMBOL(blk_rq_map_user);
2150
2151/**
2152 * blk_rq_unmap_user - unmap a request with user data
2153 * @rq: request to be unmapped
2154 * @bio: bio for the request
2155 * @ulen: length of user buffer
2156 *
2157 * Description:
2158 * Unmap a request previously mapped by blk_rq_map_user().
2159 */
2160int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen)
2161{
2162 int ret = 0;
2163
2164 if (bio) {
2165 if (bio_flagged(bio, BIO_USER_MAPPED))
2166 bio_unmap_user(bio);
2167 else
2168 ret = bio_uncopy_user(bio);
2169 }
2170
2171 blk_put_request(rq);
2172 return ret;
2173}
2174
2175EXPORT_SYMBOL(blk_rq_unmap_user);
2176
2177/**
2178 * blk_execute_rq - insert a request into queue for execution
2179 * @q: queue to insert the request in
2180 * @bd_disk: matching gendisk
2181 * @rq: request to insert
2182 *
2183 * Description:
2184 * Insert a fully prepared request at the back of the io scheduler queue
2185 * for execution.
2186 */
2187int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
2188 struct request *rq)
2189{
2190 DECLARE_COMPLETION(wait);
2191 char sense[SCSI_SENSE_BUFFERSIZE];
2192 int err = 0;
2193
2194 rq->rq_disk = bd_disk;
2195
2196 /*
2197 * we need an extra reference to the request, so we can look at
2198 * it after io completion
2199 */
2200 rq->ref_count++;
2201
2202 if (!rq->sense) {
2203 memset(sense, 0, sizeof(sense));
2204 rq->sense = sense;
2205 rq->sense_len = 0;
2206 }
2207
2208 rq->flags |= REQ_NOMERGE;
2209 rq->waiting = &wait;
2210 rq->end_io = blk_end_sync_rq;
2211 elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
2212 generic_unplug_device(q);
2213 wait_for_completion(&wait);
2214 rq->waiting = NULL;
2215
2216 if (rq->errors)
2217 err = -EIO;
2218
2219 return err;
2220}
2221
2222EXPORT_SYMBOL(blk_execute_rq);
2223
2224/**
2225 * blkdev_issue_flush - queue a flush
2226 * @bdev: blockdev to issue flush for
2227 * @error_sector: error sector
2228 *
2229 * Description:
2230 * Issue a flush for the block device in question. Caller can supply
2231 * room for storing the error offset in case of a flush error, if they
2232 * wish to. Caller must run wait_for_completion() on its own.
2233 */
2234int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
2235{
2236 request_queue_t *q;
2237
2238 if (bdev->bd_disk == NULL)
2239 return -ENXIO;
2240
2241 q = bdev_get_queue(bdev);
2242 if (!q)
2243 return -ENXIO;
2244 if (!q->issue_flush_fn)
2245 return -EOPNOTSUPP;
2246
2247 return q->issue_flush_fn(q, bdev->bd_disk, error_sector);
2248}
2249
2250EXPORT_SYMBOL(blkdev_issue_flush);
2251
2252/**
2253 * blkdev_scsi_issue_flush_fn - issue flush for SCSI devices
2254 * @q: device queue
2255 * @disk: gendisk
2256 * @error_sector: error offset
2257 *
2258 * Description:
2259 * Devices understanding the SCSI command set, can use this function as
2260 * a helper for issuing a cache flush. Note: driver is required to store
2261 * the error offset (in case of error flushing) in ->sector of struct
2262 * request.
2263 */
2264int blkdev_scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
2265 sector_t *error_sector)
2266{
2267 struct request *rq = blk_get_request(q, WRITE, __GFP_WAIT);
2268 int ret;
2269
2270 rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER;
2271 rq->sector = 0;
2272 memset(rq->cmd, 0, sizeof(rq->cmd));
2273 rq->cmd[0] = 0x35;
2274 rq->cmd_len = 12;
2275 rq->data = NULL;
2276 rq->data_len = 0;
2277 rq->timeout = 60 * HZ;
2278
2279 ret = blk_execute_rq(q, disk, rq);
2280
2281 if (ret && error_sector)
2282 *error_sector = rq->sector;
2283
2284 blk_put_request(rq);
2285 return ret;
2286}
2287
2288EXPORT_SYMBOL(blkdev_scsi_issue_flush_fn);
2289
2290void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
2291{
2292 int rw = rq_data_dir(rq);
2293
2294 if (!blk_fs_request(rq) || !rq->rq_disk)
2295 return;
2296
2297 if (rw == READ) {
2298 __disk_stat_add(rq->rq_disk, read_sectors, nr_sectors);
2299 if (!new_io)
2300 __disk_stat_inc(rq->rq_disk, read_merges);
2301 } else if (rw == WRITE) {
2302 __disk_stat_add(rq->rq_disk, write_sectors, nr_sectors);
2303 if (!new_io)
2304 __disk_stat_inc(rq->rq_disk, write_merges);
2305 }
2306 if (new_io) {
2307 disk_round_stats(rq->rq_disk);
2308 rq->rq_disk->in_flight++;
2309 }
2310}
2311
2312/*
2313 * add-request adds a request to the linked list.
2314 * queue lock is held and interrupts disabled, as we muck with the
2315 * request queue list.
2316 */
2317static inline void add_request(request_queue_t * q, struct request * req)
2318{
2319 drive_stat_acct(req, req->nr_sectors, 1);
2320
2321 if (q->activity_fn)
2322 q->activity_fn(q->activity_data, rq_data_dir(req));
2323
2324 /*
2325 * elevator indicated where it wants this request to be
2326 * inserted at elevator_merge time
2327 */
2328 __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
2329}
2330
2331/*
2332 * disk_round_stats() - Round off the performance stats on a struct
2333 * disk_stats.
2334 *
2335 * The average IO queue length and utilisation statistics are maintained
2336 * by observing the current state of the queue length and the amount of
2337 * time it has been in this state for.
2338 *
2339 * Normally, that accounting is done on IO completion, but that can result
2340 * in more than a second's worth of IO being accounted for within any one
2341 * second, leading to >100% utilisation. To deal with that, we call this
2342 * function to do a round-off before returning the results when reading
2343 * /proc/diskstats. This accounts immediately for all queue usage up to
2344 * the current jiffies and restarts the counters again.
2345 */
2346void disk_round_stats(struct gendisk *disk)
2347{
2348 unsigned long now = jiffies;
2349
2350 __disk_stat_add(disk, time_in_queue,
2351 disk->in_flight * (now - disk->stamp));
2352 disk->stamp = now;
2353
2354 if (disk->in_flight)
2355 __disk_stat_add(disk, io_ticks, (now - disk->stamp_idle));
2356 disk->stamp_idle = now;
2357}
2358
2359/*
2360 * queue lock must be held
2361 */
2362static void __blk_put_request(request_queue_t *q, struct request *req)
2363{
2364 struct request_list *rl = req->rl;
2365
2366 if (unlikely(!q))
2367 return;
2368 if (unlikely(--req->ref_count))
2369 return;
2370
2371 req->rq_status = RQ_INACTIVE;
2372 req->q = NULL;
2373 req->rl = NULL;
2374
2375 /*
2376 * Request may not have originated from ll_rw_blk. if not,
2377 * it didn't come out of our reserved rq pools
2378 */
2379 if (rl) {
2380 int rw = rq_data_dir(req);
2381
2382 elv_completed_request(q, req);
2383
2384 BUG_ON(!list_empty(&req->queuelist));
2385
2386 blk_free_request(q, req);
2387 freed_request(q, rw);
2388 }
2389}
2390
2391void blk_put_request(struct request *req)
2392{
2393 /*
2394 * if req->rl isn't set, this request didnt originate from the
2395 * block layer, so it's safe to just disregard it
2396 */
2397 if (req->rl) {
2398 unsigned long flags;
2399 request_queue_t *q = req->q;
2400
2401 spin_lock_irqsave(q->queue_lock, flags);
2402 __blk_put_request(q, req);
2403 spin_unlock_irqrestore(q->queue_lock, flags);
2404 }
2405}
2406
2407EXPORT_SYMBOL(blk_put_request);
2408
2409/**
2410 * blk_end_sync_rq - executes a completion event on a request
2411 * @rq: request to complete
2412 */
2413void blk_end_sync_rq(struct request *rq)
2414{
2415 struct completion *waiting = rq->waiting;
2416
2417 rq->waiting = NULL;
2418 __blk_put_request(rq->q, rq);
2419
2420 /*
2421 * complete last, if this is a stack request the process (and thus
2422 * the rq pointer) could be invalid right after this complete()
2423 */
2424 complete(waiting);
2425}
2426EXPORT_SYMBOL(blk_end_sync_rq);
2427
2428/**
2429 * blk_congestion_wait - wait for a queue to become uncongested
2430 * @rw: READ or WRITE
2431 * @timeout: timeout in jiffies
2432 *
2433 * Waits for up to @timeout jiffies for a queue (any queue) to exit congestion.
2434 * If no queues are congested then just wait for the next request to be
2435 * returned.
2436 */
2437long blk_congestion_wait(int rw, long timeout)
2438{
2439 long ret;
2440 DEFINE_WAIT(wait);
2441 wait_queue_head_t *wqh = &congestion_wqh[rw];
2442
2443 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
2444 ret = io_schedule_timeout(timeout);
2445 finish_wait(wqh, &wait);
2446 return ret;
2447}
2448
2449EXPORT_SYMBOL(blk_congestion_wait);
2450
2451/*
2452 * Has to be called with the request spinlock acquired
2453 */
2454static int attempt_merge(request_queue_t *q, struct request *req,
2455 struct request *next)
2456{
2457 if (!rq_mergeable(req) || !rq_mergeable(next))
2458 return 0;
2459
2460 /*
2461 * not contigious
2462 */
2463 if (req->sector + req->nr_sectors != next->sector)
2464 return 0;
2465
2466 if (rq_data_dir(req) != rq_data_dir(next)
2467 || req->rq_disk != next->rq_disk
2468 || next->waiting || next->special)
2469 return 0;
2470
2471 /*
2472 * If we are allowed to merge, then append bio list
2473 * from next to rq and release next. merge_requests_fn
2474 * will have updated segment counts, update sector
2475 * counts here.
2476 */
2477 if (!q->merge_requests_fn(q, req, next))
2478 return 0;
2479
2480 /*
2481 * At this point we have either done a back merge
2482 * or front merge. We need the smaller start_time of
2483 * the merged requests to be the current request
2484 * for accounting purposes.
2485 */
2486 if (time_after(req->start_time, next->start_time))
2487 req->start_time = next->start_time;
2488
2489 req->biotail->bi_next = next->bio;
2490 req->biotail = next->biotail;
2491
2492 req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
2493
2494 elv_merge_requests(q, req, next);
2495
2496 if (req->rq_disk) {
2497 disk_round_stats(req->rq_disk);
2498 req->rq_disk->in_flight--;
2499 }
2500
2501 __blk_put_request(q, next);
2502 return 1;
2503}
2504
2505static inline int attempt_back_merge(request_queue_t *q, struct request *rq)
2506{
2507 struct request *next = elv_latter_request(q, rq);
2508
2509 if (next)
2510 return attempt_merge(q, rq, next);
2511
2512 return 0;
2513}
2514
2515static inline int attempt_front_merge(request_queue_t *q, struct request *rq)
2516{
2517 struct request *prev = elv_former_request(q, rq);
2518
2519 if (prev)
2520 return attempt_merge(q, prev, rq);
2521
2522 return 0;
2523}
2524
2525/**
2526 * blk_attempt_remerge - attempt to remerge active head with next request
2527 * @q: The &request_queue_t belonging to the device
2528 * @rq: The head request (usually)
2529 *
2530 * Description:
2531 * For head-active devices, the queue can easily be unplugged so quickly
2532 * that proper merging is not done on the front request. This may hurt
2533 * performance greatly for some devices. The block layer cannot safely
2534 * do merging on that first request for these queues, but the driver can
2535 * call this function and make it happen any way. Only the driver knows
2536 * when it is safe to do so.
2537 **/
2538void blk_attempt_remerge(request_queue_t *q, struct request *rq)
2539{
2540 unsigned long flags;
2541
2542 spin_lock_irqsave(q->queue_lock, flags);
2543 attempt_back_merge(q, rq);
2544 spin_unlock_irqrestore(q->queue_lock, flags);
2545}
2546
2547EXPORT_SYMBOL(blk_attempt_remerge);
2548
2549/*
2550 * Non-locking blk_attempt_remerge variant.
2551 */
2552void __blk_attempt_remerge(request_queue_t *q, struct request *rq)
2553{
2554 attempt_back_merge(q, rq);
2555}
2556
2557EXPORT_SYMBOL(__blk_attempt_remerge);
2558
2559static int __make_request(request_queue_t *q, struct bio *bio)
2560{
2561 struct request *req, *freereq = NULL;
2562 int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err;
2563 sector_t sector;
2564
2565 sector = bio->bi_sector;
2566 nr_sectors = bio_sectors(bio);
2567 cur_nr_sectors = bio_cur_sectors(bio);
2568
2569 rw = bio_data_dir(bio);
2570
2571 /*
2572 * low level driver can indicate that it wants pages above a
2573 * certain limit bounced to low memory (ie for highmem, or even
2574 * ISA dma in theory)
2575 */
2576 blk_queue_bounce(q, &bio);
2577
2578 spin_lock_prefetch(q->queue_lock);
2579
2580 barrier = bio_barrier(bio);
2581 if (barrier && (q->ordered == QUEUE_ORDERED_NONE)) {
2582 err = -EOPNOTSUPP;
2583 goto end_io;
2584 }
2585
2586again:
2587 spin_lock_irq(q->queue_lock);
2588
2589 if (elv_queue_empty(q)) {
2590 blk_plug_device(q);
2591 goto get_rq;
2592 }
2593 if (barrier)
2594 goto get_rq;
2595
2596 el_ret = elv_merge(q, &req, bio);
2597 switch (el_ret) {
2598 case ELEVATOR_BACK_MERGE:
2599 BUG_ON(!rq_mergeable(req));
2600
2601 if (!q->back_merge_fn(q, req, bio))
2602 break;
2603
2604 req->biotail->bi_next = bio;
2605 req->biotail = bio;
2606 req->nr_sectors = req->hard_nr_sectors += nr_sectors;
2607 drive_stat_acct(req, nr_sectors, 0);
2608 if (!attempt_back_merge(q, req))
2609 elv_merged_request(q, req);
2610 goto out;
2611
2612 case ELEVATOR_FRONT_MERGE:
2613 BUG_ON(!rq_mergeable(req));
2614
2615 if (!q->front_merge_fn(q, req, bio))
2616 break;
2617
2618 bio->bi_next = req->bio;
2619 req->bio = bio;
2620
2621 /*
2622 * may not be valid. if the low level driver said
2623 * it didn't need a bounce buffer then it better
2624 * not touch req->buffer either...
2625 */
2626 req->buffer = bio_data(bio);
2627 req->current_nr_sectors = cur_nr_sectors;
2628 req->hard_cur_sectors = cur_nr_sectors;
2629 req->sector = req->hard_sector = sector;
2630 req->nr_sectors = req->hard_nr_sectors += nr_sectors;
2631 drive_stat_acct(req, nr_sectors, 0);
2632 if (!attempt_front_merge(q, req))
2633 elv_merged_request(q, req);
2634 goto out;
2635
2636 /*
2637 * elevator says don't/can't merge. get new request
2638 */
2639 case ELEVATOR_NO_MERGE:
2640 break;
2641
2642 default:
2643 printk("elevator returned crap (%d)\n", el_ret);
2644 BUG();
2645 }
2646
2647 /*
2648 * Grab a free request from the freelist - if that is empty, check
2649 * if we are doing read ahead and abort instead of blocking for
2650 * a free slot.
2651 */
2652get_rq:
2653 if (freereq) {
2654 req = freereq;
2655 freereq = NULL;
2656 } else {
2657 spin_unlock_irq(q->queue_lock);
2658 if ((freereq = get_request(q, rw, GFP_ATOMIC)) == NULL) {
2659 /*
2660 * READA bit set
2661 */
2662 err = -EWOULDBLOCK;
2663 if (bio_rw_ahead(bio))
2664 goto end_io;
2665
2666 freereq = get_request_wait(q, rw);
2667 }
2668 goto again;
2669 }
2670
2671 req->flags |= REQ_CMD;
2672
2673 /*
2674 * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
2675 */
2676 if (bio_rw_ahead(bio) || bio_failfast(bio))
2677 req->flags |= REQ_FAILFAST;
2678
2679 /*
2680 * REQ_BARRIER implies no merging, but lets make it explicit
2681 */
2682 if (barrier)
2683 req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
2684
2685 req->errors = 0;
2686 req->hard_sector = req->sector = sector;
2687 req->hard_nr_sectors = req->nr_sectors = nr_sectors;
2688 req->current_nr_sectors = req->hard_cur_sectors = cur_nr_sectors;
2689 req->nr_phys_segments = bio_phys_segments(q, bio);
2690 req->nr_hw_segments = bio_hw_segments(q, bio);
2691 req->buffer = bio_data(bio); /* see ->buffer comment above */
2692 req->waiting = NULL;
2693 req->bio = req->biotail = bio;
2694 req->rq_disk = bio->bi_bdev->bd_disk;
2695 req->start_time = jiffies;
2696
2697 add_request(q, req);
2698out:
2699 if (freereq)
2700 __blk_put_request(q, freereq);
2701 if (bio_sync(bio))
2702 __generic_unplug_device(q);
2703
2704 spin_unlock_irq(q->queue_lock);
2705 return 0;
2706
2707end_io:
2708 bio_endio(bio, nr_sectors << 9, err);
2709 return 0;
2710}
2711
2712/*
2713 * If bio->bi_dev is a partition, remap the location
2714 */
2715static inline void blk_partition_remap(struct bio *bio)
2716{
2717 struct block_device *bdev = bio->bi_bdev;
2718
2719 if (bdev != bdev->bd_contains) {
2720 struct hd_struct *p = bdev->bd_part;
2721
2722 switch (bio->bi_rw) {
2723 case READ:
2724 p->read_sectors += bio_sectors(bio);
2725 p->reads++;
2726 break;
2727 case WRITE:
2728 p->write_sectors += bio_sectors(bio);
2729 p->writes++;
2730 break;
2731 }
2732 bio->bi_sector += p->start_sect;
2733 bio->bi_bdev = bdev->bd_contains;
2734 }
2735}
2736
2737void blk_finish_queue_drain(request_queue_t *q)
2738{
2739 struct request_list *rl = &q->rq;
2740 struct request *rq;
2741
2742 spin_lock_irq(q->queue_lock);
2743 clear_bit(QUEUE_FLAG_DRAIN, &q->queue_flags);
2744
2745 while (!list_empty(&q->drain_list)) {
2746 rq = list_entry_rq(q->drain_list.next);
2747
2748 list_del_init(&rq->queuelist);
2749 __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
2750 }
2751
2752 spin_unlock_irq(q->queue_lock);
2753
2754 wake_up(&rl->wait[0]);
2755 wake_up(&rl->wait[1]);
2756 wake_up(&rl->drain);
2757}
2758
2759static int wait_drain(request_queue_t *q, struct request_list *rl, int dispatch)
2760{
2761 int wait = rl->count[READ] + rl->count[WRITE];
2762
2763 if (dispatch)
2764 wait += !list_empty(&q->queue_head);
2765
2766 return wait;
2767}
2768
2769/*
2770 * We rely on the fact that only requests allocated through blk_alloc_request()
2771 * have io scheduler private data structures associated with them. Any other
2772 * type of request (allocated on stack or through kmalloc()) should not go
2773 * to the io scheduler core, but be attached to the queue head instead.
2774 */
2775void blk_wait_queue_drained(request_queue_t *q, int wait_dispatch)
2776{
2777 struct request_list *rl = &q->rq;
2778 DEFINE_WAIT(wait);
2779
2780 spin_lock_irq(q->queue_lock);
2781 set_bit(QUEUE_FLAG_DRAIN, &q->queue_flags);
2782
2783 while (wait_drain(q, rl, wait_dispatch)) {
2784 prepare_to_wait(&rl->drain, &wait, TASK_UNINTERRUPTIBLE);
2785
2786 if (wait_drain(q, rl, wait_dispatch)) {
2787 __generic_unplug_device(q);
2788 spin_unlock_irq(q->queue_lock);
2789 io_schedule();
2790 spin_lock_irq(q->queue_lock);
2791 }
2792
2793 finish_wait(&rl->drain, &wait);
2794 }
2795
2796 spin_unlock_irq(q->queue_lock);
2797}
2798
2799/*
2800 * block waiting for the io scheduler being started again.
2801 */
2802static inline void block_wait_queue_running(request_queue_t *q)
2803{
2804 DEFINE_WAIT(wait);
2805
2806 while (test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)) {
2807 struct request_list *rl = &q->rq;
2808
2809 prepare_to_wait_exclusive(&rl->drain, &wait,
2810 TASK_UNINTERRUPTIBLE);
2811
2812 /*
2813 * re-check the condition. avoids using prepare_to_wait()
2814 * in the fast path (queue is running)
2815 */
2816 if (test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))
2817 io_schedule();
2818
2819 finish_wait(&rl->drain, &wait);
2820 }
2821}
2822
2823static void handle_bad_sector(struct bio *bio)
2824{
2825 char b[BDEVNAME_SIZE];
2826
2827 printk(KERN_INFO "attempt to access beyond end of device\n");
2828 printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
2829 bdevname(bio->bi_bdev, b),
2830 bio->bi_rw,
2831 (unsigned long long)bio->bi_sector + bio_sectors(bio),
2832 (long long)(bio->bi_bdev->bd_inode->i_size >> 9));
2833
2834 set_bit(BIO_EOF, &bio->bi_flags);
2835}
2836
2837/**
2838 * generic_make_request: hand a buffer to its device driver for I/O
2839 * @bio: The bio describing the location in memory and on the device.
2840 *
2841 * generic_make_request() is used to make I/O requests of block
2842 * devices. It is passed a &struct bio, which describes the I/O that needs
2843 * to be done.
2844 *
2845 * generic_make_request() does not return any status. The
2846 * success/failure status of the request, along with notification of
2847 * completion, is delivered asynchronously through the bio->bi_end_io
2848 * function described (one day) else where.
2849 *
2850 * The caller of generic_make_request must make sure that bi_io_vec
2851 * are set to describe the memory buffer, and that bi_dev and bi_sector are
2852 * set to describe the device address, and the
2853 * bi_end_io and optionally bi_private are set to describe how
2854 * completion notification should be signaled.
2855 *
2856 * generic_make_request and the drivers it calls may use bi_next if this
2857 * bio happens to be merged with someone else, and may change bi_dev and
2858 * bi_sector for remaps as it sees fit. So the values of these fields
2859 * should NOT be depended on after the call to generic_make_request.
2860 */
2861void generic_make_request(struct bio *bio)
2862{
2863 request_queue_t *q;
2864 sector_t maxsector;
2865 int ret, nr_sectors = bio_sectors(bio);
2866
2867 might_sleep();
2868 /* Test device or partition size, when known. */
2869 maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
2870 if (maxsector) {
2871 sector_t sector = bio->bi_sector;
2872
2873 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
2874 /*
2875 * This may well happen - the kernel calls bread()
2876 * without checking the size of the device, e.g., when
2877 * mounting a device.
2878 */
2879 handle_bad_sector(bio);
2880 goto end_io;
2881 }
2882 }
2883
2884 /*
2885 * Resolve the mapping until finished. (drivers are
2886 * still free to implement/resolve their own stacking
2887 * by explicitly returning 0)
2888 *
2889 * NOTE: we don't repeat the blk_size check for each new device.
2890 * Stacking drivers are expected to know what they are doing.
2891 */
2892 do {
2893 char b[BDEVNAME_SIZE];
2894
2895 q = bdev_get_queue(bio->bi_bdev);
2896 if (!q) {
2897 printk(KERN_ERR
2898 "generic_make_request: Trying to access "
2899 "nonexistent block-device %s (%Lu)\n",
2900 bdevname(bio->bi_bdev, b),
2901 (long long) bio->bi_sector);
2902end_io:
2903 bio_endio(bio, bio->bi_size, -EIO);
2904 break;
2905 }
2906
2907 if (unlikely(bio_sectors(bio) > q->max_hw_sectors)) {
2908 printk("bio too big device %s (%u > %u)\n",
2909 bdevname(bio->bi_bdev, b),
2910 bio_sectors(bio),
2911 q->max_hw_sectors);
2912 goto end_io;
2913 }
2914
2915 if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))
2916 goto end_io;
2917
2918 block_wait_queue_running(q);
2919
2920 /*
2921 * If this device has partitions, remap block n
2922 * of partition p to block n+start(p) of the disk.
2923 */
2924 blk_partition_remap(bio);
2925
2926 ret = q->make_request_fn(q, bio);
2927 } while (ret);
2928}
2929
2930EXPORT_SYMBOL(generic_make_request);
2931
2932/**
2933 * submit_bio: submit a bio to the block device layer for I/O
2934 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
2935 * @bio: The &struct bio which describes the I/O
2936 *
2937 * submit_bio() is very similar in purpose to generic_make_request(), and
2938 * uses that function to do most of the work. Both are fairly rough
2939 * interfaces, @bio must be presetup and ready for I/O.
2940 *
2941 */
2942void submit_bio(int rw, struct bio *bio)
2943{
2944 int count = bio_sectors(bio);
2945
2946 BIO_BUG_ON(!bio->bi_size);
2947 BIO_BUG_ON(!bio->bi_io_vec);
2948 bio->bi_rw = rw;
2949 if (rw & WRITE)
2950 mod_page_state(pgpgout, count);
2951 else
2952 mod_page_state(pgpgin, count);
2953
2954 if (unlikely(block_dump)) {
2955 char b[BDEVNAME_SIZE];
2956 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
2957 current->comm, current->pid,
2958 (rw & WRITE) ? "WRITE" : "READ",
2959 (unsigned long long)bio->bi_sector,
2960 bdevname(bio->bi_bdev,b));
2961 }
2962
2963 generic_make_request(bio);
2964}
2965
2966EXPORT_SYMBOL(submit_bio);
2967
2968void blk_recalc_rq_segments(struct request *rq)
2969{
2970 struct bio *bio, *prevbio = NULL;
2971 int nr_phys_segs, nr_hw_segs;
2972 unsigned int phys_size, hw_size;
2973 request_queue_t *q = rq->q;
2974
2975 if (!rq->bio)
2976 return;
2977
2978 phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
2979 rq_for_each_bio(bio, rq) {
2980 /* Force bio hw/phys segs to be recalculated. */
2981 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
2982
2983 nr_phys_segs += bio_phys_segments(q, bio);
2984 nr_hw_segs += bio_hw_segments(q, bio);
2985 if (prevbio) {
2986 int pseg = phys_size + prevbio->bi_size + bio->bi_size;
2987 int hseg = hw_size + prevbio->bi_size + bio->bi_size;
2988
2989 if (blk_phys_contig_segment(q, prevbio, bio) &&
2990 pseg <= q->max_segment_size) {
2991 nr_phys_segs--;
2992 phys_size += prevbio->bi_size + bio->bi_size;
2993 } else
2994 phys_size = 0;
2995
2996 if (blk_hw_contig_segment(q, prevbio, bio) &&
2997 hseg <= q->max_segment_size) {
2998 nr_hw_segs--;
2999 hw_size += prevbio->bi_size + bio->bi_size;
3000 } else
3001 hw_size = 0;
3002 }
3003 prevbio = bio;
3004 }
3005
3006 rq->nr_phys_segments = nr_phys_segs;
3007 rq->nr_hw_segments = nr_hw_segs;
3008}
3009
3010void blk_recalc_rq_sectors(struct request *rq, int nsect)
3011{
3012 if (blk_fs_request(rq)) {
3013 rq->hard_sector += nsect;
3014 rq->hard_nr_sectors -= nsect;
3015
3016 /*
3017 * Move the I/O submission pointers ahead if required.
3018 */
3019 if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
3020 (rq->sector <= rq->hard_sector)) {
3021 rq->sector = rq->hard_sector;
3022 rq->nr_sectors = rq->hard_nr_sectors;
3023 rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
3024 rq->current_nr_sectors = rq->hard_cur_sectors;
3025 rq->buffer = bio_data(rq->bio);
3026 }
3027
3028 /*
3029 * if total number of sectors is less than the first segment
3030 * size, something has gone terribly wrong
3031 */
3032 if (rq->nr_sectors < rq->current_nr_sectors) {
3033 printk("blk: request botched\n");
3034 rq->nr_sectors = rq->current_nr_sectors;
3035 }
3036 }
3037}
3038
3039static int __end_that_request_first(struct request *req, int uptodate,
3040 int nr_bytes)
3041{
3042 int total_bytes, bio_nbytes, error, next_idx = 0;
3043 struct bio *bio;
3044
3045 /*
3046 * extend uptodate bool to allow < 0 value to be direct io error
3047 */
3048 error = 0;
3049 if (end_io_error(uptodate))
3050 error = !uptodate ? -EIO : uptodate;
3051
3052 /*
3053 * for a REQ_BLOCK_PC request, we want to carry any eventual
3054 * sense key with us all the way through
3055 */
3056 if (!blk_pc_request(req))
3057 req->errors = 0;
3058
3059 if (!uptodate) {
3060 if (blk_fs_request(req) && !(req->flags & REQ_QUIET))
3061 printk("end_request: I/O error, dev %s, sector %llu\n",
3062 req->rq_disk ? req->rq_disk->disk_name : "?",
3063 (unsigned long long)req->sector);
3064 }
3065
3066 total_bytes = bio_nbytes = 0;
3067 while ((bio = req->bio) != NULL) {
3068 int nbytes;
3069
3070 if (nr_bytes >= bio->bi_size) {
3071 req->bio = bio->bi_next;
3072 nbytes = bio->bi_size;
3073 bio_endio(bio, nbytes, error);
3074 next_idx = 0;
3075 bio_nbytes = 0;
3076 } else {
3077 int idx = bio->bi_idx + next_idx;
3078
3079 if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
3080 blk_dump_rq_flags(req, "__end_that");
3081 printk("%s: bio idx %d >= vcnt %d\n",
3082 __FUNCTION__,
3083 bio->bi_idx, bio->bi_vcnt);
3084 break;
3085 }
3086
3087 nbytes = bio_iovec_idx(bio, idx)->bv_len;
3088 BIO_BUG_ON(nbytes > bio->bi_size);
3089
3090 /*
3091 * not a complete bvec done
3092 */
3093 if (unlikely(nbytes > nr_bytes)) {
3094 bio_nbytes += nr_bytes;
3095 total_bytes += nr_bytes;
3096 break;
3097 }
3098
3099 /*
3100 * advance to the next vector
3101 */
3102 next_idx++;
3103 bio_nbytes += nbytes;
3104 }
3105
3106 total_bytes += nbytes;
3107 nr_bytes -= nbytes;
3108
3109 if ((bio = req->bio)) {
3110 /*
3111 * end more in this run, or just return 'not-done'
3112 */
3113 if (unlikely(nr_bytes <= 0))
3114 break;
3115 }
3116 }
3117
3118 /*
3119 * completely done
3120 */
3121 if (!req->bio)
3122 return 0;
3123
3124 /*
3125 * if the request wasn't completed, update state
3126 */
3127 if (bio_nbytes) {
3128 bio_endio(bio, bio_nbytes, error);
3129 bio->bi_idx += next_idx;
3130 bio_iovec(bio)->bv_offset += nr_bytes;
3131 bio_iovec(bio)->bv_len -= nr_bytes;
3132 }
3133
3134 blk_recalc_rq_sectors(req, total_bytes >> 9);
3135 blk_recalc_rq_segments(req);
3136 return 1;
3137}
3138
3139/**
3140 * end_that_request_first - end I/O on a request
3141 * @req: the request being processed
3142 * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error
3143 * @nr_sectors: number of sectors to end I/O on
3144 *
3145 * Description:
3146 * Ends I/O on a number of sectors attached to @req, and sets it up
3147 * for the next range of segments (if any) in the cluster.
3148 *
3149 * Return:
3150 * 0 - we are done with this request, call end_that_request_last()
3151 * 1 - still buffers pending for this request
3152 **/
3153int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
3154{
3155 return __end_that_request_first(req, uptodate, nr_sectors << 9);
3156}
3157
3158EXPORT_SYMBOL(end_that_request_first);
3159
3160/**
3161 * end_that_request_chunk - end I/O on a request
3162 * @req: the request being processed
3163 * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error
3164 * @nr_bytes: number of bytes to complete
3165 *
3166 * Description:
3167 * Ends I/O on a number of bytes attached to @req, and sets it up
3168 * for the next range of segments (if any). Like end_that_request_first(),
3169 * but deals with bytes instead of sectors.
3170 *
3171 * Return:
3172 * 0 - we are done with this request, call end_that_request_last()
3173 * 1 - still buffers pending for this request
3174 **/
3175int end_that_request_chunk(struct request *req, int uptodate, int nr_bytes)
3176{
3177 return __end_that_request_first(req, uptodate, nr_bytes);
3178}
3179
3180EXPORT_SYMBOL(end_that_request_chunk);
3181
3182/*
3183 * queue lock must be held
3184 */
3185void end_that_request_last(struct request *req)
3186{
3187 struct gendisk *disk = req->rq_disk;
3188
3189 if (unlikely(laptop_mode) && blk_fs_request(req))
3190 laptop_io_completion();
3191
3192 if (disk && blk_fs_request(req)) {
3193 unsigned long duration = jiffies - req->start_time;
3194 switch (rq_data_dir(req)) {
3195 case WRITE:
3196 __disk_stat_inc(disk, writes);
3197 __disk_stat_add(disk, write_ticks, duration);
3198 break;
3199 case READ:
3200 __disk_stat_inc(disk, reads);
3201 __disk_stat_add(disk, read_ticks, duration);
3202 break;
3203 }
3204 disk_round_stats(disk);
3205 disk->in_flight--;
3206 }
3207 if (req->end_io)
3208 req->end_io(req);
3209 else
3210 __blk_put_request(req->q, req);
3211}
3212
3213EXPORT_SYMBOL(end_that_request_last);
3214
3215void end_request(struct request *req, int uptodate)
3216{
3217 if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) {
3218 add_disk_randomness(req->rq_disk);
3219 blkdev_dequeue_request(req);
3220 end_that_request_last(req);
3221 }
3222}
3223
3224EXPORT_SYMBOL(end_request);
3225
3226void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
3227{
3228 /* first three bits are identical in rq->flags and bio->bi_rw */
3229 rq->flags |= (bio->bi_rw & 7);
3230
3231 rq->nr_phys_segments = bio_phys_segments(q, bio);
3232 rq->nr_hw_segments = bio_hw_segments(q, bio);
3233 rq->current_nr_sectors = bio_cur_sectors(bio);
3234 rq->hard_cur_sectors = rq->current_nr_sectors;
3235 rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
3236 rq->buffer = bio_data(bio);
3237
3238 rq->bio = rq->biotail = bio;
3239}
3240
3241EXPORT_SYMBOL(blk_rq_bio_prep);
3242
3243int kblockd_schedule_work(struct work_struct *work)
3244{
3245 return queue_work(kblockd_workqueue, work);
3246}
3247
3248EXPORT_SYMBOL(kblockd_schedule_work);
3249
3250void kblockd_flush(void)
3251{
3252 flush_workqueue(kblockd_workqueue);
3253}
3254EXPORT_SYMBOL(kblockd_flush);
3255
3256int __init blk_dev_init(void)
3257{
3258 kblockd_workqueue = create_workqueue("kblockd");
3259 if (!kblockd_workqueue)
3260 panic("Failed to create kblockd\n");
3261
3262 request_cachep = kmem_cache_create("blkdev_requests",
3263 sizeof(struct request), 0, SLAB_PANIC, NULL, NULL);
3264
3265 requestq_cachep = kmem_cache_create("blkdev_queue",
3266 sizeof(request_queue_t), 0, SLAB_PANIC, NULL, NULL);
3267
3268 iocontext_cachep = kmem_cache_create("blkdev_ioc",
3269 sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
3270
3271 blk_max_low_pfn = max_low_pfn;
3272 blk_max_pfn = max_pfn;
3273
3274 return 0;
3275}
3276
3277/*
3278 * IO Context helper functions
3279 */
3280void put_io_context(struct io_context *ioc)
3281{
3282 if (ioc == NULL)
3283 return;
3284
3285 BUG_ON(atomic_read(&ioc->refcount) == 0);
3286
3287 if (atomic_dec_and_test(&ioc->refcount)) {
3288 if (ioc->aic && ioc->aic->dtor)
3289 ioc->aic->dtor(ioc->aic);
3290 if (ioc->cic && ioc->cic->dtor)
3291 ioc->cic->dtor(ioc->cic);
3292
3293 kmem_cache_free(iocontext_cachep, ioc);
3294 }
3295}
3296EXPORT_SYMBOL(put_io_context);
3297
3298/* Called by the exitting task */
3299void exit_io_context(void)
3300{
3301 unsigned long flags;
3302 struct io_context *ioc;
3303
3304 local_irq_save(flags);
3305 ioc = current->io_context;
3306 current->io_context = NULL;
3307 local_irq_restore(flags);
3308
3309 if (ioc->aic && ioc->aic->exit)
3310 ioc->aic->exit(ioc->aic);
3311 if (ioc->cic && ioc->cic->exit)
3312 ioc->cic->exit(ioc->cic);
3313
3314 put_io_context(ioc);
3315}
3316
3317/*
3318 * If the current task has no IO context then create one and initialise it.
3319 * If it does have a context, take a ref on it.
3320 *
3321 * This is always called in the context of the task which submitted the I/O.
3322 * But weird things happen, so we disable local interrupts to ensure exclusive
3323 * access to *current.
3324 */
3325struct io_context *get_io_context(int gfp_flags)
3326{
3327 struct task_struct *tsk = current;
3328 unsigned long flags;
3329 struct io_context *ret;
3330
3331 local_irq_save(flags);
3332 ret = tsk->io_context;
3333 if (ret)
3334 goto out;
3335
3336 local_irq_restore(flags);
3337
3338 ret = kmem_cache_alloc(iocontext_cachep, gfp_flags);
3339 if (ret) {
3340 atomic_set(&ret->refcount, 1);
3341 ret->pid = tsk->pid;
3342 ret->last_waited = jiffies; /* doesn't matter... */
3343 ret->nr_batch_requests = 0; /* because this is 0 */
3344 ret->aic = NULL;
3345 ret->cic = NULL;
3346 spin_lock_init(&ret->lock);
3347
3348 local_irq_save(flags);
3349
3350 /*
3351 * very unlikely, someone raced with us in setting up the task
3352 * io context. free new context and just grab a reference.
3353 */
3354 if (!tsk->io_context)
3355 tsk->io_context = ret;
3356 else {
3357 kmem_cache_free(iocontext_cachep, ret);
3358 ret = tsk->io_context;
3359 }
3360
3361out:
3362 atomic_inc(&ret->refcount);
3363 local_irq_restore(flags);
3364 }
3365
3366 return ret;
3367}
3368EXPORT_SYMBOL(get_io_context);
3369
3370void copy_io_context(struct io_context **pdst, struct io_context **psrc)
3371{
3372 struct io_context *src = *psrc;
3373 struct io_context *dst = *pdst;
3374
3375 if (src) {
3376 BUG_ON(atomic_read(&src->refcount) == 0);
3377 atomic_inc(&src->refcount);
3378 put_io_context(dst);
3379 *pdst = src;
3380 }
3381}
3382EXPORT_SYMBOL(copy_io_context);
3383
3384void swap_io_context(struct io_context **ioc1, struct io_context **ioc2)
3385{
3386 struct io_context *temp;
3387 temp = *ioc1;
3388 *ioc1 = *ioc2;
3389 *ioc2 = temp;
3390}
3391EXPORT_SYMBOL(swap_io_context);
3392
3393/*
3394 * sysfs parts below
3395 */
3396struct queue_sysfs_entry {
3397 struct attribute attr;
3398 ssize_t (*show)(struct request_queue *, char *);
3399 ssize_t (*store)(struct request_queue *, const char *, size_t);
3400};
3401
3402static ssize_t
3403queue_var_show(unsigned int var, char *page)
3404{
3405 return sprintf(page, "%d\n", var);
3406}
3407
3408static ssize_t
3409queue_var_store(unsigned long *var, const char *page, size_t count)
3410{
3411 char *p = (char *) page;
3412
3413 *var = simple_strtoul(p, &p, 10);
3414 return count;
3415}
3416
3417static ssize_t queue_requests_show(struct request_queue *q, char *page)
3418{
3419 return queue_var_show(q->nr_requests, (page));
3420}
3421
3422static ssize_t
3423queue_requests_store(struct request_queue *q, const char *page, size_t count)
3424{
3425 struct request_list *rl = &q->rq;
3426
3427 int ret = queue_var_store(&q->nr_requests, page, count);
3428 if (q->nr_requests < BLKDEV_MIN_RQ)
3429 q->nr_requests = BLKDEV_MIN_RQ;
3430 blk_queue_congestion_threshold(q);
3431
3432 if (rl->count[READ] >= queue_congestion_on_threshold(q))
3433 set_queue_congested(q, READ);
3434 else if (rl->count[READ] < queue_congestion_off_threshold(q))
3435 clear_queue_congested(q, READ);
3436
3437 if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
3438 set_queue_congested(q, WRITE);
3439 else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
3440 clear_queue_congested(q, WRITE);
3441
3442 if (rl->count[READ] >= q->nr_requests) {
3443 blk_set_queue_full(q, READ);
3444 } else if (rl->count[READ]+1 <= q->nr_requests) {
3445 blk_clear_queue_full(q, READ);
3446 wake_up(&rl->wait[READ]);
3447 }
3448
3449 if (rl->count[WRITE] >= q->nr_requests) {
3450 blk_set_queue_full(q, WRITE);
3451 } else if (rl->count[WRITE]+1 <= q->nr_requests) {
3452 blk_clear_queue_full(q, WRITE);
3453 wake_up(&rl->wait[WRITE]);
3454 }
3455 return ret;
3456}
3457
3458static ssize_t queue_ra_show(struct request_queue *q, char *page)
3459{
3460 int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
3461
3462 return queue_var_show(ra_kb, (page));
3463}
3464
3465static ssize_t
3466queue_ra_store(struct request_queue *q, const char *page, size_t count)
3467{
3468 unsigned long ra_kb;
3469 ssize_t ret = queue_var_store(&ra_kb, page, count);
3470
3471 spin_lock_irq(q->queue_lock);
3472 if (ra_kb > (q->max_sectors >> 1))
3473 ra_kb = (q->max_sectors >> 1);
3474
3475 q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
3476 spin_unlock_irq(q->queue_lock);
3477
3478 return ret;
3479}
3480
3481static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
3482{
3483 int max_sectors_kb = q->max_sectors >> 1;
3484
3485 return queue_var_show(max_sectors_kb, (page));
3486}
3487
3488static ssize_t
3489queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
3490{
3491 unsigned long max_sectors_kb,
3492 max_hw_sectors_kb = q->max_hw_sectors >> 1,
3493 page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
3494 ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
3495 int ra_kb;
3496
3497 if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
3498 return -EINVAL;
3499 /*
3500 * Take the queue lock to update the readahead and max_sectors
3501 * values synchronously:
3502 */
3503 spin_lock_irq(q->queue_lock);
3504 /*
3505 * Trim readahead window as well, if necessary:
3506 */
3507 ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
3508 if (ra_kb > max_sectors_kb)
3509 q->backing_dev_info.ra_pages =
3510 max_sectors_kb >> (PAGE_CACHE_SHIFT - 10);
3511
3512 q->max_sectors = max_sectors_kb << 1;
3513 spin_unlock_irq(q->queue_lock);
3514
3515 return ret;
3516}
3517
3518static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
3519{
3520 int max_hw_sectors_kb = q->max_hw_sectors >> 1;
3521
3522 return queue_var_show(max_hw_sectors_kb, (page));
3523}
3524
3525
3526static struct queue_sysfs_entry queue_requests_entry = {
3527 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
3528 .show = queue_requests_show,
3529 .store = queue_requests_store,
3530};
3531
3532static struct queue_sysfs_entry queue_ra_entry = {
3533 .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
3534 .show = queue_ra_show,
3535 .store = queue_ra_store,
3536};
3537
3538static struct queue_sysfs_entry queue_max_sectors_entry = {
3539 .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
3540 .show = queue_max_sectors_show,
3541 .store = queue_max_sectors_store,
3542};
3543
3544static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
3545 .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
3546 .show = queue_max_hw_sectors_show,
3547};
3548
3549static struct queue_sysfs_entry queue_iosched_entry = {
3550 .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
3551 .show = elv_iosched_show,
3552 .store = elv_iosched_store,
3553};
3554
3555static struct attribute *default_attrs[] = {
3556 &queue_requests_entry.attr,
3557 &queue_ra_entry.attr,
3558 &queue_max_hw_sectors_entry.attr,
3559 &queue_max_sectors_entry.attr,
3560 &queue_iosched_entry.attr,
3561 NULL,
3562};
3563
3564#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
3565
3566static ssize_t
3567queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3568{
3569 struct queue_sysfs_entry *entry = to_queue(attr);
3570 struct request_queue *q;
3571
3572 q = container_of(kobj, struct request_queue, kobj);
3573 if (!entry->show)
3574 return 0;
3575
3576 return entry->show(q, page);
3577}
3578
3579static ssize_t
3580queue_attr_store(struct kobject *kobj, struct attribute *attr,
3581 const char *page, size_t length)
3582{
3583 struct queue_sysfs_entry *entry = to_queue(attr);
3584 struct request_queue *q;
3585
3586 q = container_of(kobj, struct request_queue, kobj);
3587 if (!entry->store)
3588 return -EINVAL;
3589
3590 return entry->store(q, page, length);
3591}
3592
3593static struct sysfs_ops queue_sysfs_ops = {
3594 .show = queue_attr_show,
3595 .store = queue_attr_store,
3596};
3597
3598struct kobj_type queue_ktype = {
3599 .sysfs_ops = &queue_sysfs_ops,
3600 .default_attrs = default_attrs,
3601};
3602
3603int blk_register_queue(struct gendisk *disk)
3604{
3605 int ret;
3606
3607 request_queue_t *q = disk->queue;
3608
3609 if (!q || !q->request_fn)
3610 return -ENXIO;
3611
3612 q->kobj.parent = kobject_get(&disk->kobj);
3613 if (!q->kobj.parent)
3614 return -EBUSY;
3615
3616 snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
3617 q->kobj.ktype = &queue_ktype;
3618
3619 ret = kobject_register(&q->kobj);
3620 if (ret < 0)
3621 return ret;
3622
3623 ret = elv_register_queue(q);
3624 if (ret) {
3625 kobject_unregister(&q->kobj);
3626 return ret;
3627 }
3628
3629 return 0;
3630}
3631
3632void blk_unregister_queue(struct gendisk *disk)
3633{
3634 request_queue_t *q = disk->queue;
3635
3636 if (q && q->request_fn) {
3637 elv_unregister_queue(q);
3638
3639 kobject_unregister(&q->kobj);
3640 kobject_put(&disk->kobj);
3641 }
3642}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
new file mode 100644
index 000000000000..6f011d0d8e97
--- /dev/null
+++ b/drivers/block/loop.c
@@ -0,0 +1,1348 @@
1/*
2 * linux/drivers/block/loop.c
3 *
4 * Written by Theodore Ts'o, 3/29/93
5 *
6 * Copyright 1993 by Theodore Ts'o. Redistribution of this file is
7 * permitted under the GNU General Public License.
8 *
9 * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993
10 * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996
11 *
12 * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
13 * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
14 *
15 * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
16 *
17 * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
18 *
19 * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
20 *
21 * Loadable modules and other fixes by AK, 1998
22 *
23 * Make real block number available to downstream transfer functions, enables
24 * CBC (and relatives) mode encryption requiring unique IVs per data block.
25 * Reed H. Petty, rhp@draper.net
26 *
27 * Maximum number of loop devices now dynamic via max_loop module parameter.
28 * Russell Kroll <rkroll@exploits.org> 19990701
29 *
30 * Maximum number of loop devices when compiled-in now selectable by passing
31 * max_loop=<1-255> to the kernel on boot.
32 * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999
33 *
34 * Completely rewrite request handling to be make_request_fn style and
35 * non blocking, pushing work to a helper thread. Lots of fixes from
36 * Al Viro too.
37 * Jens Axboe <axboe@suse.de>, Nov 2000
38 *
39 * Support up to 256 loop devices
40 * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
41 *
42 * Support for falling back on the write file operation when the address space
43 * operations prepare_write and/or commit_write are not available on the
44 * backing filesystem.
45 * Anton Altaparmakov, 16 Feb 2005
46 *
47 * Still To Fix:
48 * - Advisory locking is ignored here.
49 * - Should use an own CAP_* category instead of CAP_SYS_ADMIN
50 *
51 */
52
53#include <linux/config.h>
54#include <linux/module.h>
55#include <linux/moduleparam.h>
56#include <linux/sched.h>
57#include <linux/fs.h>
58#include <linux/file.h>
59#include <linux/stat.h>
60#include <linux/errno.h>
61#include <linux/major.h>
62#include <linux/wait.h>
63#include <linux/blkdev.h>
64#include <linux/blkpg.h>
65#include <linux/init.h>
66#include <linux/devfs_fs_kernel.h>
67#include <linux/smp_lock.h>
68#include <linux/swap.h>
69#include <linux/slab.h>
70#include <linux/loop.h>
71#include <linux/suspend.h>
72#include <linux/writeback.h>
73#include <linux/buffer_head.h> /* for invalidate_bdev() */
74#include <linux/completion.h>
75#include <linux/highmem.h>
76#include <linux/gfp.h>
77
78#include <asm/uaccess.h>
79
80static int max_loop = 8;
81static struct loop_device *loop_dev;
82static struct gendisk **disks;
83
84/*
85 * Transfer functions
86 */
87static int transfer_none(struct loop_device *lo, int cmd,
88 struct page *raw_page, unsigned raw_off,
89 struct page *loop_page, unsigned loop_off,
90 int size, sector_t real_block)
91{
92 char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off;
93 char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off;
94
95 if (cmd == READ)
96 memcpy(loop_buf, raw_buf, size);
97 else
98 memcpy(raw_buf, loop_buf, size);
99
100 kunmap_atomic(raw_buf, KM_USER0);
101 kunmap_atomic(loop_buf, KM_USER1);
102 cond_resched();
103 return 0;
104}
105
106static int transfer_xor(struct loop_device *lo, int cmd,
107 struct page *raw_page, unsigned raw_off,
108 struct page *loop_page, unsigned loop_off,
109 int size, sector_t real_block)
110{
111 char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off;
112 char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off;
113 char *in, *out, *key;
114 int i, keysize;
115
116 if (cmd == READ) {
117 in = raw_buf;
118 out = loop_buf;
119 } else {
120 in = loop_buf;
121 out = raw_buf;
122 }
123
124 key = lo->lo_encrypt_key;
125 keysize = lo->lo_encrypt_key_size;
126 for (i = 0; i < size; i++)
127 *out++ = *in++ ^ key[(i & 511) % keysize];
128
129 kunmap_atomic(raw_buf, KM_USER0);
130 kunmap_atomic(loop_buf, KM_USER1);
131 cond_resched();
132 return 0;
133}
134
135static int xor_init(struct loop_device *lo, const struct loop_info64 *info)
136{
137 if (unlikely(info->lo_encrypt_key_size <= 0))
138 return -EINVAL;
139 return 0;
140}
141
142static struct loop_func_table none_funcs = {
143 .number = LO_CRYPT_NONE,
144 .transfer = transfer_none,
145};
146
147static struct loop_func_table xor_funcs = {
148 .number = LO_CRYPT_XOR,
149 .transfer = transfer_xor,
150 .init = xor_init
151};
152
153/* xfer_funcs[0] is special - its release function is never called */
154static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
155 &none_funcs,
156 &xor_funcs
157};
158
159static loff_t get_loop_size(struct loop_device *lo, struct file *file)
160{
161 loff_t size, offset, loopsize;
162
163 /* Compute loopsize in bytes */
164 size = i_size_read(file->f_mapping->host);
165 offset = lo->lo_offset;
166 loopsize = size - offset;
167 if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
168 loopsize = lo->lo_sizelimit;
169
170 /*
171 * Unfortunately, if we want to do I/O on the device,
172 * the number of 512-byte sectors has to fit into a sector_t.
173 */
174 return loopsize >> 9;
175}
176
177static int
178figure_loop_size(struct loop_device *lo)
179{
180 loff_t size = get_loop_size(lo, lo->lo_backing_file);
181 sector_t x = (sector_t)size;
182
183 if (unlikely((loff_t)x != size))
184 return -EFBIG;
185
186 set_capacity(disks[lo->lo_number], x);
187 return 0;
188}
189
190static inline int
191lo_do_transfer(struct loop_device *lo, int cmd,
192 struct page *rpage, unsigned roffs,
193 struct page *lpage, unsigned loffs,
194 int size, sector_t rblock)
195{
196 if (unlikely(!lo->transfer))
197 return 0;
198
199 return lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
200}
201
202/**
203 * do_lo_send_aops - helper for writing data to a loop device
204 *
205 * This is the fast version for backing filesystems which implement the address
206 * space operations prepare_write and commit_write.
207 */
208static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
209 int bsize, loff_t pos, struct page *page)
210{
211 struct file *file = lo->lo_backing_file; /* kudos to NFsckingS */
212 struct address_space *mapping = file->f_mapping;
213 struct address_space_operations *aops = mapping->a_ops;
214 pgoff_t index;
215 unsigned offset, bv_offs;
216 int len, ret = 0;
217
218 down(&mapping->host->i_sem);
219 index = pos >> PAGE_CACHE_SHIFT;
220 offset = pos & ((pgoff_t)PAGE_CACHE_SIZE - 1);
221 bv_offs = bvec->bv_offset;
222 len = bvec->bv_len;
223 while (len > 0) {
224 sector_t IV;
225 unsigned size;
226 int transfer_result;
227
228 IV = ((sector_t)index << (PAGE_CACHE_SHIFT - 9))+(offset >> 9);
229 size = PAGE_CACHE_SIZE - offset;
230 if (size > len)
231 size = len;
232 page = grab_cache_page(mapping, index);
233 if (unlikely(!page))
234 goto fail;
235 if (unlikely(aops->prepare_write(file, page, offset,
236 offset + size)))
237 goto unlock;
238 transfer_result = lo_do_transfer(lo, WRITE, page, offset,
239 bvec->bv_page, bv_offs, size, IV);
240 if (unlikely(transfer_result)) {
241 char *kaddr;
242
243 /*
244 * The transfer failed, but we still write the data to
245 * keep prepare/commit calls balanced.
246 */
247 printk(KERN_ERR "loop: transfer error block %llu\n",
248 (unsigned long long)index);
249 kaddr = kmap_atomic(page, KM_USER0);
250 memset(kaddr + offset, 0, size);
251 kunmap_atomic(kaddr, KM_USER0);
252 }
253 flush_dcache_page(page);
254 if (unlikely(aops->commit_write(file, page, offset,
255 offset + size)))
256 goto unlock;
257 if (unlikely(transfer_result))
258 goto unlock;
259 bv_offs += size;
260 len -= size;
261 offset = 0;
262 index++;
263 pos += size;
264 unlock_page(page);
265 page_cache_release(page);
266 }
267out:
268 up(&mapping->host->i_sem);
269 return ret;
270unlock:
271 unlock_page(page);
272 page_cache_release(page);
273fail:
274 ret = -1;
275 goto out;
276}
277
278/**
279 * __do_lo_send_write - helper for writing data to a loop device
280 *
281 * This helper just factors out common code between do_lo_send_direct_write()
282 * and do_lo_send_write().
283 */
284static inline int __do_lo_send_write(struct file *file,
285 u8 __user *buf, const int len, loff_t pos)
286{
287 ssize_t bw;
288 mm_segment_t old_fs = get_fs();
289
290 set_fs(get_ds());
291 bw = file->f_op->write(file, buf, len, &pos);
292 set_fs(old_fs);
293 if (likely(bw == len))
294 return 0;
295 printk(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
296 (unsigned long long)pos, len);
297 if (bw >= 0)
298 bw = -EIO;
299 return bw;
300}
301
302/**
303 * do_lo_send_direct_write - helper for writing data to a loop device
304 *
305 * This is the fast, non-transforming version for backing filesystems which do
306 * not implement the address space operations prepare_write and commit_write.
307 * It uses the write file operation which should be present on all writeable
308 * filesystems.
309 */
310static int do_lo_send_direct_write(struct loop_device *lo,
311 struct bio_vec *bvec, int bsize, loff_t pos, struct page *page)
312{
313 ssize_t bw = __do_lo_send_write(lo->lo_backing_file,
314 (u8 __user *)kmap(bvec->bv_page) + bvec->bv_offset,
315 bvec->bv_len, pos);
316 kunmap(bvec->bv_page);
317 cond_resched();
318 return bw;
319}
320
321/**
322 * do_lo_send_write - helper for writing data to a loop device
323 *
324 * This is the slow, transforming version for filesystems which do not
325 * implement the address space operations prepare_write and commit_write. It
326 * uses the write file operation which should be present on all writeable
327 * filesystems.
328 *
329 * Using fops->write is slower than using aops->{prepare,commit}_write in the
330 * transforming case because we need to double buffer the data as we cannot do
331 * the transformations in place as we do not have direct access to the
332 * destination pages of the backing file.
333 */
334static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec,
335 int bsize, loff_t pos, struct page *page)
336{
337 int ret = lo_do_transfer(lo, WRITE, page, 0, bvec->bv_page,
338 bvec->bv_offset, bvec->bv_len, pos >> 9);
339 if (likely(!ret))
340 return __do_lo_send_write(lo->lo_backing_file,
341 (u8 __user *)page_address(page), bvec->bv_len,
342 pos);
343 printk(KERN_ERR "loop: Transfer error at byte offset %llu, "
344 "length %i.\n", (unsigned long long)pos, bvec->bv_len);
345 if (ret > 0)
346 ret = -EIO;
347 return ret;
348}
349
350static int lo_send(struct loop_device *lo, struct bio *bio, int bsize,
351 loff_t pos)
352{
353 int (*do_lo_send)(struct loop_device *, struct bio_vec *, int, loff_t,
354 struct page *page);
355 struct bio_vec *bvec;
356 struct page *page = NULL;
357 int i, ret = 0;
358
359 do_lo_send = do_lo_send_aops;
360 if (!(lo->lo_flags & LO_FLAGS_USE_AOPS)) {
361 do_lo_send = do_lo_send_direct_write;
362 if (lo->transfer != transfer_none) {
363 page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
364 if (unlikely(!page))
365 goto fail;
366 kmap(page);
367 do_lo_send = do_lo_send_write;
368 }
369 }
370 bio_for_each_segment(bvec, bio, i) {
371 ret = do_lo_send(lo, bvec, bsize, pos, page);
372 if (ret < 0)
373 break;
374 pos += bvec->bv_len;
375 }
376 if (page) {
377 kunmap(page);
378 __free_page(page);
379 }
380out:
381 return ret;
382fail:
383 printk(KERN_ERR "loop: Failed to allocate temporary page for write.\n");
384 ret = -ENOMEM;
385 goto out;
386}
387
388struct lo_read_data {
389 struct loop_device *lo;
390 struct page *page;
391 unsigned offset;
392 int bsize;
393};
394
395static int
396lo_read_actor(read_descriptor_t *desc, struct page *page,
397 unsigned long offset, unsigned long size)
398{
399 unsigned long count = desc->count;
400 struct lo_read_data *p = desc->arg.data;
401 struct loop_device *lo = p->lo;
402 sector_t IV;
403
404 IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9))+(offset >> 9);
405
406 if (size > count)
407 size = count;
408
409 if (lo_do_transfer(lo, READ, page, offset, p->page, p->offset, size, IV)) {
410 size = 0;
411 printk(KERN_ERR "loop: transfer error block %ld\n",
412 page->index);
413 desc->error = -EINVAL;
414 }
415
416 flush_dcache_page(p->page);
417
418 desc->count = count - size;
419 desc->written += size;
420 p->offset += size;
421 return size;
422}
423
424static int
425do_lo_receive(struct loop_device *lo,
426 struct bio_vec *bvec, int bsize, loff_t pos)
427{
428 struct lo_read_data cookie;
429 struct file *file;
430 int retval;
431
432 cookie.lo = lo;
433 cookie.page = bvec->bv_page;
434 cookie.offset = bvec->bv_offset;
435 cookie.bsize = bsize;
436 file = lo->lo_backing_file;
437 retval = file->f_op->sendfile(file, &pos, bvec->bv_len,
438 lo_read_actor, &cookie);
439 return (retval < 0)? retval: 0;
440}
441
442static int
443lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
444{
445 struct bio_vec *bvec;
446 int i, ret = 0;
447
448 bio_for_each_segment(bvec, bio, i) {
449 ret = do_lo_receive(lo, bvec, bsize, pos);
450 if (ret < 0)
451 break;
452 pos += bvec->bv_len;
453 }
454 return ret;
455}
456
457static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
458{
459 loff_t pos;
460 int ret;
461
462 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
463 if (bio_rw(bio) == WRITE)
464 ret = lo_send(lo, bio, lo->lo_blocksize, pos);
465 else
466 ret = lo_receive(lo, bio, lo->lo_blocksize, pos);
467 return ret;
468}
469
470/*
471 * Add bio to back of pending list
472 */
473static void loop_add_bio(struct loop_device *lo, struct bio *bio)
474{
475 unsigned long flags;
476
477 spin_lock_irqsave(&lo->lo_lock, flags);
478 if (lo->lo_biotail) {
479 lo->lo_biotail->bi_next = bio;
480 lo->lo_biotail = bio;
481 } else
482 lo->lo_bio = lo->lo_biotail = bio;
483 spin_unlock_irqrestore(&lo->lo_lock, flags);
484
485 up(&lo->lo_bh_mutex);
486}
487
488/*
489 * Grab first pending buffer
490 */
491static struct bio *loop_get_bio(struct loop_device *lo)
492{
493 struct bio *bio;
494
495 spin_lock_irq(&lo->lo_lock);
496 if ((bio = lo->lo_bio)) {
497 if (bio == lo->lo_biotail)
498 lo->lo_biotail = NULL;
499 lo->lo_bio = bio->bi_next;
500 bio->bi_next = NULL;
501 }
502 spin_unlock_irq(&lo->lo_lock);
503
504 return bio;
505}
506
507static int loop_make_request(request_queue_t *q, struct bio *old_bio)
508{
509 struct loop_device *lo = q->queuedata;
510 int rw = bio_rw(old_bio);
511
512 if (!lo)
513 goto out;
514
515 spin_lock_irq(&lo->lo_lock);
516 if (lo->lo_state != Lo_bound)
517 goto inactive;
518 atomic_inc(&lo->lo_pending);
519 spin_unlock_irq(&lo->lo_lock);
520
521 if (rw == WRITE) {
522 if (lo->lo_flags & LO_FLAGS_READ_ONLY)
523 goto err;
524 } else if (rw == READA) {
525 rw = READ;
526 } else if (rw != READ) {
527 printk(KERN_ERR "loop: unknown command (%x)\n", rw);
528 goto err;
529 }
530 loop_add_bio(lo, old_bio);
531 return 0;
532err:
533 if (atomic_dec_and_test(&lo->lo_pending))
534 up(&lo->lo_bh_mutex);
535out:
536 bio_io_error(old_bio, old_bio->bi_size);
537 return 0;
538inactive:
539 spin_unlock_irq(&lo->lo_lock);
540 goto out;
541}
542
543/*
544 * kick off io on the underlying address space
545 */
546static void loop_unplug(request_queue_t *q)
547{
548 struct loop_device *lo = q->queuedata;
549
550 clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
551 blk_run_address_space(lo->lo_backing_file->f_mapping);
552}
553
554struct switch_request {
555 struct file *file;
556 struct completion wait;
557};
558
559static void do_loop_switch(struct loop_device *, struct switch_request *);
560
561static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio)
562{
563 int ret;
564
565 if (unlikely(!bio->bi_bdev)) {
566 do_loop_switch(lo, bio->bi_private);
567 bio_put(bio);
568 } else {
569 ret = do_bio_filebacked(lo, bio);
570 bio_endio(bio, bio->bi_size, ret);
571 }
572}
573
574/*
575 * worker thread that handles reads/writes to file backed loop devices,
576 * to avoid blocking in our make_request_fn. it also does loop decrypting
577 * on reads for block backed loop, as that is too heavy to do from
578 * b_end_io context where irqs may be disabled.
579 */
580static int loop_thread(void *data)
581{
582 struct loop_device *lo = data;
583 struct bio *bio;
584
585 daemonize("loop%d", lo->lo_number);
586
587 /*
588 * loop can be used in an encrypted device,
589 * hence, it mustn't be stopped at all
590 * because it could be indirectly used during suspension
591 */
592 current->flags |= PF_NOFREEZE;
593
594 set_user_nice(current, -20);
595
596 lo->lo_state = Lo_bound;
597 atomic_inc(&lo->lo_pending);
598
599 /*
600 * up sem, we are running
601 */
602 up(&lo->lo_sem);
603
604 for (;;) {
605 down_interruptible(&lo->lo_bh_mutex);
606 /*
607 * could be upped because of tear-down, not because of
608 * pending work
609 */
610 if (!atomic_read(&lo->lo_pending))
611 break;
612
613 bio = loop_get_bio(lo);
614 if (!bio) {
615 printk("loop: missing bio\n");
616 continue;
617 }
618 loop_handle_bio(lo, bio);
619
620 /*
621 * upped both for pending work and tear-down, lo_pending
622 * will hit zero then
623 */
624 if (atomic_dec_and_test(&lo->lo_pending))
625 break;
626 }
627
628 up(&lo->lo_sem);
629 return 0;
630}
631
632/*
633 * loop_switch performs the hard work of switching a backing store.
634 * First it needs to flush existing IO, it does this by sending a magic
635 * BIO down the pipe. The completion of this BIO does the actual switch.
636 */
637static int loop_switch(struct loop_device *lo, struct file *file)
638{
639 struct switch_request w;
640 struct bio *bio = bio_alloc(GFP_KERNEL, 1);
641 if (!bio)
642 return -ENOMEM;
643 init_completion(&w.wait);
644 w.file = file;
645 bio->bi_private = &w;
646 bio->bi_bdev = NULL;
647 loop_make_request(lo->lo_queue, bio);
648 wait_for_completion(&w.wait);
649 return 0;
650}
651
652/*
653 * Do the actual switch; called from the BIO completion routine
654 */
655static void do_loop_switch(struct loop_device *lo, struct switch_request *p)
656{
657 struct file *file = p->file;
658 struct file *old_file = lo->lo_backing_file;
659 struct address_space *mapping = file->f_mapping;
660
661 mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
662 lo->lo_backing_file = file;
663 lo->lo_blocksize = mapping->host->i_blksize;
664 lo->old_gfp_mask = mapping_gfp_mask(mapping);
665 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
666 complete(&p->wait);
667}
668
669
670/*
671 * loop_change_fd switched the backing store of a loopback device to
672 * a new file. This is useful for operating system installers to free up
673 * the original file and in High Availability environments to switch to
674 * an alternative location for the content in case of server meltdown.
675 * This can only work if the loop device is used read-only, and if the
676 * new backing store is the same size and type as the old backing store.
677 */
678static int loop_change_fd(struct loop_device *lo, struct file *lo_file,
679 struct block_device *bdev, unsigned int arg)
680{
681 struct file *file, *old_file;
682 struct inode *inode;
683 int error;
684
685 error = -ENXIO;
686 if (lo->lo_state != Lo_bound)
687 goto out;
688
689 /* the loop device has to be read-only */
690 error = -EINVAL;
691 if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
692 goto out;
693
694 error = -EBADF;
695 file = fget(arg);
696 if (!file)
697 goto out;
698
699 inode = file->f_mapping->host;
700 old_file = lo->lo_backing_file;
701
702 error = -EINVAL;
703
704 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
705 goto out_putf;
706
707 /* new backing store needs to support loop (eg sendfile) */
708 if (!inode->i_fop->sendfile)
709 goto out_putf;
710
711 /* size of the new backing store needs to be the same */
712 if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
713 goto out_putf;
714
715 /* and ... switch */
716 error = loop_switch(lo, file);
717 if (error)
718 goto out_putf;
719
720 fput(old_file);
721 return 0;
722
723 out_putf:
724 fput(file);
725 out:
726 return error;
727}
728
729static inline int is_loop_device(struct file *file)
730{
731 struct inode *i = file->f_mapping->host;
732
733 return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
734}
735
736static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
737 struct block_device *bdev, unsigned int arg)
738{
739 struct file *file, *f;
740 struct inode *inode;
741 struct address_space *mapping;
742 unsigned lo_blocksize;
743 int lo_flags = 0;
744 int error;
745 loff_t size;
746
747 /* This is safe, since we have a reference from open(). */
748 __module_get(THIS_MODULE);
749
750 error = -EBADF;
751 file = fget(arg);
752 if (!file)
753 goto out;
754
755 error = -EBUSY;
756 if (lo->lo_state != Lo_unbound)
757 goto out_putf;
758
759 /* Avoid recursion */
760 f = file;
761 while (is_loop_device(f)) {
762 struct loop_device *l;
763
764 if (f->f_mapping->host->i_rdev == lo_file->f_mapping->host->i_rdev)
765 goto out_putf;
766
767 l = f->f_mapping->host->i_bdev->bd_disk->private_data;
768 if (l->lo_state == Lo_unbound) {
769 error = -EINVAL;
770 goto out_putf;
771 }
772 f = l->lo_backing_file;
773 }
774
775 mapping = file->f_mapping;
776 inode = mapping->host;
777
778 if (!(file->f_mode & FMODE_WRITE))
779 lo_flags |= LO_FLAGS_READ_ONLY;
780
781 error = -EINVAL;
782 if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) {
783 struct address_space_operations *aops = mapping->a_ops;
784 /*
785 * If we can't read - sorry. If we only can't write - well,
786 * it's going to be read-only.
787 */
788 if (!file->f_op->sendfile)
789 goto out_putf;
790 if (aops->prepare_write && aops->commit_write)
791 lo_flags |= LO_FLAGS_USE_AOPS;
792 if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write)
793 lo_flags |= LO_FLAGS_READ_ONLY;
794
795 lo_blocksize = inode->i_blksize;
796 error = 0;
797 } else {
798 goto out_putf;
799 }
800
801 size = get_loop_size(lo, file);
802
803 if ((loff_t)(sector_t)size != size) {
804 error = -EFBIG;
805 goto out_putf;
806 }
807
808 if (!(lo_file->f_mode & FMODE_WRITE))
809 lo_flags |= LO_FLAGS_READ_ONLY;
810
811 set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
812
813 lo->lo_blocksize = lo_blocksize;
814 lo->lo_device = bdev;
815 lo->lo_flags = lo_flags;
816 lo->lo_backing_file = file;
817 lo->transfer = NULL;
818 lo->ioctl = NULL;
819 lo->lo_sizelimit = 0;
820 lo->old_gfp_mask = mapping_gfp_mask(mapping);
821 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
822
823 lo->lo_bio = lo->lo_biotail = NULL;
824
825 /*
826 * set queue make_request_fn, and add limits based on lower level
827 * device
828 */
829 blk_queue_make_request(lo->lo_queue, loop_make_request);
830 lo->lo_queue->queuedata = lo;
831 lo->lo_queue->unplug_fn = loop_unplug;
832
833 set_capacity(disks[lo->lo_number], size);
834 bd_set_size(bdev, size << 9);
835
836 set_blocksize(bdev, lo_blocksize);
837
838 kernel_thread(loop_thread, lo, CLONE_KERNEL);
839 down(&lo->lo_sem);
840 return 0;
841
842 out_putf:
843 fput(file);
844 out:
845 /* This is safe: open() is still holding a reference. */
846 module_put(THIS_MODULE);
847 return error;
848}
849
850static int
851loop_release_xfer(struct loop_device *lo)
852{
853 int err = 0;
854 struct loop_func_table *xfer = lo->lo_encryption;
855
856 if (xfer) {
857 if (xfer->release)
858 err = xfer->release(lo);
859 lo->transfer = NULL;
860 lo->lo_encryption = NULL;
861 module_put(xfer->owner);
862 }
863 return err;
864}
865
866static int
867loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
868 const struct loop_info64 *i)
869{
870 int err = 0;
871
872 if (xfer) {
873 struct module *owner = xfer->owner;
874
875 if (!try_module_get(owner))
876 return -EINVAL;
877 if (xfer->init)
878 err = xfer->init(lo, i);
879 if (err)
880 module_put(owner);
881 else
882 lo->lo_encryption = xfer;
883 }
884 return err;
885}
886
887static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
888{
889 struct file *filp = lo->lo_backing_file;
890 int gfp = lo->old_gfp_mask;
891
892 if (lo->lo_state != Lo_bound)
893 return -ENXIO;
894
895 if (lo->lo_refcnt > 1) /* we needed one fd for the ioctl */
896 return -EBUSY;
897
898 if (filp == NULL)
899 return -EINVAL;
900
901 spin_lock_irq(&lo->lo_lock);
902 lo->lo_state = Lo_rundown;
903 if (atomic_dec_and_test(&lo->lo_pending))
904 up(&lo->lo_bh_mutex);
905 spin_unlock_irq(&lo->lo_lock);
906
907 down(&lo->lo_sem);
908
909 lo->lo_backing_file = NULL;
910
911 loop_release_xfer(lo);
912 lo->transfer = NULL;
913 lo->ioctl = NULL;
914 lo->lo_device = NULL;
915 lo->lo_encryption = NULL;
916 lo->lo_offset = 0;
917 lo->lo_sizelimit = 0;
918 lo->lo_encrypt_key_size = 0;
919 lo->lo_flags = 0;
920 memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
921 memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
922 memset(lo->lo_file_name, 0, LO_NAME_SIZE);
923 invalidate_bdev(bdev, 0);
924 set_capacity(disks[lo->lo_number], 0);
925 bd_set_size(bdev, 0);
926 mapping_set_gfp_mask(filp->f_mapping, gfp);
927 lo->lo_state = Lo_unbound;
928 fput(filp);
929 /* This is safe: open() is still holding a reference. */
930 module_put(THIS_MODULE);
931 return 0;
932}
933
934static int
935loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
936{
937 int err;
938 struct loop_func_table *xfer;
939
940 if (lo->lo_encrypt_key_size && lo->lo_key_owner != current->uid &&
941 !capable(CAP_SYS_ADMIN))
942 return -EPERM;
943 if (lo->lo_state != Lo_bound)
944 return -ENXIO;
945 if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
946 return -EINVAL;
947
948 err = loop_release_xfer(lo);
949 if (err)
950 return err;
951
952 if (info->lo_encrypt_type) {
953 unsigned int type = info->lo_encrypt_type;
954
955 if (type >= MAX_LO_CRYPT)
956 return -EINVAL;
957 xfer = xfer_funcs[type];
958 if (xfer == NULL)
959 return -EINVAL;
960 } else
961 xfer = NULL;
962
963 err = loop_init_xfer(lo, xfer, info);
964 if (err)
965 return err;
966
967 if (lo->lo_offset != info->lo_offset ||
968 lo->lo_sizelimit != info->lo_sizelimit) {
969 lo->lo_offset = info->lo_offset;
970 lo->lo_sizelimit = info->lo_sizelimit;
971 if (figure_loop_size(lo))
972 return -EFBIG;
973 }
974
975 memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
976 memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
977 lo->lo_file_name[LO_NAME_SIZE-1] = 0;
978 lo->lo_crypt_name[LO_NAME_SIZE-1] = 0;
979
980 if (!xfer)
981 xfer = &none_funcs;
982 lo->transfer = xfer->transfer;
983 lo->ioctl = xfer->ioctl;
984
985 lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
986 lo->lo_init[0] = info->lo_init[0];
987 lo->lo_init[1] = info->lo_init[1];
988 if (info->lo_encrypt_key_size) {
989 memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
990 info->lo_encrypt_key_size);
991 lo->lo_key_owner = current->uid;
992 }
993
994 return 0;
995}
996
997static int
998loop_get_status(struct loop_device *lo, struct loop_info64 *info)
999{
1000 struct file *file = lo->lo_backing_file;
1001 struct kstat stat;
1002 int error;
1003
1004 if (lo->lo_state != Lo_bound)
1005 return -ENXIO;
1006 error = vfs_getattr(file->f_vfsmnt, file->f_dentry, &stat);
1007 if (error)
1008 return error;
1009 memset(info, 0, sizeof(*info));
1010 info->lo_number = lo->lo_number;
1011 info->lo_device = huge_encode_dev(stat.dev);
1012 info->lo_inode = stat.ino;
1013 info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev);
1014 info->lo_offset = lo->lo_offset;
1015 info->lo_sizelimit = lo->lo_sizelimit;
1016 info->lo_flags = lo->lo_flags;
1017 memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
1018 memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
1019 info->lo_encrypt_type =
1020 lo->lo_encryption ? lo->lo_encryption->number : 0;
1021 if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
1022 info->lo_encrypt_key_size = lo->lo_encrypt_key_size;
1023 memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
1024 lo->lo_encrypt_key_size);
1025 }
1026 return 0;
1027}
1028
1029static void
1030loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64)
1031{
1032 memset(info64, 0, sizeof(*info64));
1033 info64->lo_number = info->lo_number;
1034 info64->lo_device = info->lo_device;
1035 info64->lo_inode = info->lo_inode;
1036 info64->lo_rdevice = info->lo_rdevice;
1037 info64->lo_offset = info->lo_offset;
1038 info64->lo_sizelimit = 0;
1039 info64->lo_encrypt_type = info->lo_encrypt_type;
1040 info64->lo_encrypt_key_size = info->lo_encrypt_key_size;
1041 info64->lo_flags = info->lo_flags;
1042 info64->lo_init[0] = info->lo_init[0];
1043 info64->lo_init[1] = info->lo_init[1];
1044 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1045 memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE);
1046 else
1047 memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
1048 memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE);
1049}
1050
1051static int
1052loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
1053{
1054 memset(info, 0, sizeof(*info));
1055 info->lo_number = info64->lo_number;
1056 info->lo_device = info64->lo_device;
1057 info->lo_inode = info64->lo_inode;
1058 info->lo_rdevice = info64->lo_rdevice;
1059 info->lo_offset = info64->lo_offset;
1060 info->lo_encrypt_type = info64->lo_encrypt_type;
1061 info->lo_encrypt_key_size = info64->lo_encrypt_key_size;
1062 info->lo_flags = info64->lo_flags;
1063 info->lo_init[0] = info64->lo_init[0];
1064 info->lo_init[1] = info64->lo_init[1];
1065 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1066 memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1067 else
1068 memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
1069 memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1070
1071 /* error in case values were truncated */
1072 if (info->lo_device != info64->lo_device ||
1073 info->lo_rdevice != info64->lo_rdevice ||
1074 info->lo_inode != info64->lo_inode ||
1075 info->lo_offset != info64->lo_offset)
1076 return -EOVERFLOW;
1077
1078 return 0;
1079}
1080
1081static int
1082loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
1083{
1084 struct loop_info info;
1085 struct loop_info64 info64;
1086
1087 if (copy_from_user(&info, arg, sizeof (struct loop_info)))
1088 return -EFAULT;
1089 loop_info64_from_old(&info, &info64);
1090 return loop_set_status(lo, &info64);
1091}
1092
1093static int
1094loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
1095{
1096 struct loop_info64 info64;
1097
1098 if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
1099 return -EFAULT;
1100 return loop_set_status(lo, &info64);
1101}
1102
1103static int
1104loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
1105 struct loop_info info;
1106 struct loop_info64 info64;
1107 int err = 0;
1108
1109 if (!arg)
1110 err = -EINVAL;
1111 if (!err)
1112 err = loop_get_status(lo, &info64);
1113 if (!err)
1114 err = loop_info64_to_old(&info64, &info);
1115 if (!err && copy_to_user(arg, &info, sizeof(info)))
1116 err = -EFAULT;
1117
1118 return err;
1119}
1120
1121static int
1122loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
1123 struct loop_info64 info64;
1124 int err = 0;
1125
1126 if (!arg)
1127 err = -EINVAL;
1128 if (!err)
1129 err = loop_get_status(lo, &info64);
1130 if (!err && copy_to_user(arg, &info64, sizeof(info64)))
1131 err = -EFAULT;
1132
1133 return err;
1134}
1135
1136static int lo_ioctl(struct inode * inode, struct file * file,
1137 unsigned int cmd, unsigned long arg)
1138{
1139 struct loop_device *lo = inode->i_bdev->bd_disk->private_data;
1140 int err;
1141
1142 down(&lo->lo_ctl_mutex);
1143 switch (cmd) {
1144 case LOOP_SET_FD:
1145 err = loop_set_fd(lo, file, inode->i_bdev, arg);
1146 break;
1147 case LOOP_CHANGE_FD:
1148 err = loop_change_fd(lo, file, inode->i_bdev, arg);
1149 break;
1150 case LOOP_CLR_FD:
1151 err = loop_clr_fd(lo, inode->i_bdev);
1152 break;
1153 case LOOP_SET_STATUS:
1154 err = loop_set_status_old(lo, (struct loop_info __user *) arg);
1155 break;
1156 case LOOP_GET_STATUS:
1157 err = loop_get_status_old(lo, (struct loop_info __user *) arg);
1158 break;
1159 case LOOP_SET_STATUS64:
1160 err = loop_set_status64(lo, (struct loop_info64 __user *) arg);
1161 break;
1162 case LOOP_GET_STATUS64:
1163 err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
1164 break;
1165 default:
1166 err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
1167 }
1168 up(&lo->lo_ctl_mutex);
1169 return err;
1170}
1171
1172static int lo_open(struct inode *inode, struct file *file)
1173{
1174 struct loop_device *lo = inode->i_bdev->bd_disk->private_data;
1175
1176 down(&lo->lo_ctl_mutex);
1177 lo->lo_refcnt++;
1178 up(&lo->lo_ctl_mutex);
1179
1180 return 0;
1181}
1182
1183static int lo_release(struct inode *inode, struct file *file)
1184{
1185 struct loop_device *lo = inode->i_bdev->bd_disk->private_data;
1186
1187 down(&lo->lo_ctl_mutex);
1188 --lo->lo_refcnt;
1189 up(&lo->lo_ctl_mutex);
1190
1191 return 0;
1192}
1193
1194static struct block_device_operations lo_fops = {
1195 .owner = THIS_MODULE,
1196 .open = lo_open,
1197 .release = lo_release,
1198 .ioctl = lo_ioctl,
1199};
1200
1201/*
1202 * And now the modules code and kernel interface.
1203 */
1204module_param(max_loop, int, 0);
1205MODULE_PARM_DESC(max_loop, "Maximum number of loop devices (1-256)");
1206MODULE_LICENSE("GPL");
1207MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
1208
1209int loop_register_transfer(struct loop_func_table *funcs)
1210{
1211 unsigned int n = funcs->number;
1212
1213 if (n >= MAX_LO_CRYPT || xfer_funcs[n])
1214 return -EINVAL;
1215 xfer_funcs[n] = funcs;
1216 return 0;
1217}
1218
1219int loop_unregister_transfer(int number)
1220{
1221 unsigned int n = number;
1222 struct loop_device *lo;
1223 struct loop_func_table *xfer;
1224
1225 if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
1226 return -EINVAL;
1227
1228 xfer_funcs[n] = NULL;
1229
1230 for (lo = &loop_dev[0]; lo < &loop_dev[max_loop]; lo++) {
1231 down(&lo->lo_ctl_mutex);
1232
1233 if (lo->lo_encryption == xfer)
1234 loop_release_xfer(lo);
1235
1236 up(&lo->lo_ctl_mutex);
1237 }
1238
1239 return 0;
1240}
1241
1242EXPORT_SYMBOL(loop_register_transfer);
1243EXPORT_SYMBOL(loop_unregister_transfer);
1244
1245static int __init loop_init(void)
1246{
1247 int i;
1248
1249 if (max_loop < 1 || max_loop > 256) {
1250 printk(KERN_WARNING "loop: invalid max_loop (must be between"
1251 " 1 and 256), using default (8)\n");
1252 max_loop = 8;
1253 }
1254
1255 if (register_blkdev(LOOP_MAJOR, "loop"))
1256 return -EIO;
1257
1258 loop_dev = kmalloc(max_loop * sizeof(struct loop_device), GFP_KERNEL);
1259 if (!loop_dev)
1260 goto out_mem1;
1261 memset(loop_dev, 0, max_loop * sizeof(struct loop_device));
1262
1263 disks = kmalloc(max_loop * sizeof(struct gendisk *), GFP_KERNEL);
1264 if (!disks)
1265 goto out_mem2;
1266
1267 for (i = 0; i < max_loop; i++) {
1268 disks[i] = alloc_disk(1);
1269 if (!disks[i])
1270 goto out_mem3;
1271 }
1272
1273 devfs_mk_dir("loop");
1274
1275 for (i = 0; i < max_loop; i++) {
1276 struct loop_device *lo = &loop_dev[i];
1277 struct gendisk *disk = disks[i];
1278
1279 memset(lo, 0, sizeof(*lo));
1280 lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
1281 if (!lo->lo_queue)
1282 goto out_mem4;
1283 init_MUTEX(&lo->lo_ctl_mutex);
1284 init_MUTEX_LOCKED(&lo->lo_sem);
1285 init_MUTEX_LOCKED(&lo->lo_bh_mutex);
1286 lo->lo_number = i;
1287 spin_lock_init(&lo->lo_lock);
1288 disk->major = LOOP_MAJOR;
1289 disk->first_minor = i;
1290 disk->fops = &lo_fops;
1291 sprintf(disk->disk_name, "loop%d", i);
1292 sprintf(disk->devfs_name, "loop/%d", i);
1293 disk->private_data = lo;
1294 disk->queue = lo->lo_queue;
1295 }
1296
1297 /* We cannot fail after we call this, so another loop!*/
1298 for (i = 0; i < max_loop; i++)
1299 add_disk(disks[i]);
1300 printk(KERN_INFO "loop: loaded (max %d devices)\n", max_loop);
1301 return 0;
1302
1303out_mem4:
1304 while (i--)
1305 blk_put_queue(loop_dev[i].lo_queue);
1306 devfs_remove("loop");
1307 i = max_loop;
1308out_mem3:
1309 while (i--)
1310 put_disk(disks[i]);
1311 kfree(disks);
1312out_mem2:
1313 kfree(loop_dev);
1314out_mem1:
1315 unregister_blkdev(LOOP_MAJOR, "loop");
1316 printk(KERN_ERR "loop: ran out of memory\n");
1317 return -ENOMEM;
1318}
1319
1320static void loop_exit(void)
1321{
1322 int i;
1323
1324 for (i = 0; i < max_loop; i++) {
1325 del_gendisk(disks[i]);
1326 blk_put_queue(loop_dev[i].lo_queue);
1327 put_disk(disks[i]);
1328 }
1329 devfs_remove("loop");
1330 if (unregister_blkdev(LOOP_MAJOR, "loop"))
1331 printk(KERN_WARNING "loop: cannot unregister blkdev\n");
1332
1333 kfree(disks);
1334 kfree(loop_dev);
1335}
1336
1337module_init(loop_init);
1338module_exit(loop_exit);
1339
1340#ifndef MODULE
1341static int __init max_loop_setup(char *str)
1342{
1343 max_loop = simple_strtol(str, NULL, 0);
1344 return 1;
1345}
1346
1347__setup("max_loop=", max_loop_setup);
1348#endif
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
new file mode 100644
index 000000000000..efdf04450bf7
--- /dev/null
+++ b/drivers/block/nbd.c
@@ -0,0 +1,731 @@
1/*
2 * Network block device - make block devices work over TCP
3 *
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
6 *
7 * Copyright 1997-2000 Pavel Machek <pavel@ucw.cz>
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
9 *
10 * (part of code stolen from loop.c)
11 *
12 * 97-3-25 compiled 0-th version, not yet tested it
13 * (it did not work, BTW) (later that day) HEY! it works!
14 * (bit later) hmm, not that much... 2:00am next day:
15 * yes, it works, but it gives something like 50kB/sec
16 * 97-4-01 complete rewrite to make it possible for many requests at
17 * once to be processed
18 * 97-4-11 Making protocol independent of endianity etc.
19 * 97-9-13 Cosmetic changes
20 * 98-5-13 Attempt to make 64-bit-clean on 64-bit machines
21 * 99-1-11 Attempt to make 64-bit-clean on 32-bit machines <ankry@mif.pg.gda.pl>
22 * 01-2-27 Fix to store proper blockcount for kernel (calculated using
23 * BLOCK_SIZE_BITS, not device blocksize) <aga@permonline.ru>
24 * 01-3-11 Make nbd work with new Linux block layer code. It now supports
25 * plugging like all the other block devices. Also added in MSG_MORE to
26 * reduce number of partial TCP segments sent. <steve@chygwyn.com>
27 * 01-12-6 Fix deadlock condition by making queue locks independent of
28 * the transmit lock. <steve@chygwyn.com>
29 * 02-10-11 Allow hung xmit to be aborted via SIGKILL & various fixes.
30 * <Paul.Clements@SteelEye.com> <James.Bottomley@SteelEye.com>
31 * 03-06-22 Make nbd work with new linux 2.5 block layer design. This fixes
32 * memory corruption from module removal and possible memory corruption
33 * from sending/receiving disk data. <ldl@aros.net>
34 * 03-06-23 Cosmetic changes. <ldl@aros.net>
35 * 03-06-23 Enhance diagnostics support. <ldl@aros.net>
36 * 03-06-24 Remove unneeded blksize_bits field from nbd_device struct.
37 * <ldl@aros.net>
38 * 03-06-24 Cleanup PARANOIA usage & code. <ldl@aros.net>
39 * 04-02-19 Remove PARANOIA, plus various cleanups (Paul Clements)
40 * possible FIXME: make set_sock / set_blksize / set_size / do_it one syscall
41 * why not: would need access_ok and friends, would share yet another
42 * structure with userland
43 */
44
45#include <linux/major.h>
46
47#include <linux/blkdev.h>
48#include <linux/module.h>
49#include <linux/init.h>
50#include <linux/sched.h>
51#include <linux/fs.h>
52#include <linux/bio.h>
53#include <linux/stat.h>
54#include <linux/errno.h>
55#include <linux/file.h>
56#include <linux/ioctl.h>
57#include <net/sock.h>
58
59#include <linux/devfs_fs_kernel.h>
60
61#include <asm/uaccess.h>
62#include <asm/types.h>
63
64#include <linux/nbd.h>
65
66#define LO_MAGIC 0x68797548
67
68#ifdef NDEBUG
69#define dprintk(flags, fmt...)
70#else /* NDEBUG */
71#define dprintk(flags, fmt...) do { \
72 if (debugflags & (flags)) printk(KERN_DEBUG fmt); \
73} while (0)
74#define DBG_IOCTL 0x0004
75#define DBG_INIT 0x0010
76#define DBG_EXIT 0x0020
77#define DBG_BLKDEV 0x0100
78#define DBG_RX 0x0200
79#define DBG_TX 0x0400
80static unsigned int debugflags;
81#endif /* NDEBUG */
82
83static struct nbd_device nbd_dev[MAX_NBD];
84
85/*
86 * Use just one lock (or at most 1 per NIC). Two arguments for this:
87 * 1. Each NIC is essentially a synchronization point for all servers
88 * accessed through that NIC so there's no need to have more locks
89 * than NICs anyway.
90 * 2. More locks lead to more "Dirty cache line bouncing" which will slow
91 * down each lock to the point where they're actually slower than just
92 * a single lock.
93 * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this!
94 */
95static DEFINE_SPINLOCK(nbd_lock);
96
97#ifndef NDEBUG
98static const char *ioctl_cmd_to_ascii(int cmd)
99{
100 switch (cmd) {
101 case NBD_SET_SOCK: return "set-sock";
102 case NBD_SET_BLKSIZE: return "set-blksize";
103 case NBD_SET_SIZE: return "set-size";
104 case NBD_DO_IT: return "do-it";
105 case NBD_CLEAR_SOCK: return "clear-sock";
106 case NBD_CLEAR_QUE: return "clear-que";
107 case NBD_PRINT_DEBUG: return "print-debug";
108 case NBD_SET_SIZE_BLOCKS: return "set-size-blocks";
109 case NBD_DISCONNECT: return "disconnect";
110 case BLKROSET: return "set-read-only";
111 case BLKFLSBUF: return "flush-buffer-cache";
112 }
113 return "unknown";
114}
115
116static const char *nbdcmd_to_ascii(int cmd)
117{
118 switch (cmd) {
119 case NBD_CMD_READ: return "read";
120 case NBD_CMD_WRITE: return "write";
121 case NBD_CMD_DISC: return "disconnect";
122 }
123 return "invalid";
124}
125#endif /* NDEBUG */
126
127static void nbd_end_request(struct request *req)
128{
129 int uptodate = (req->errors == 0) ? 1 : 0;
130 request_queue_t *q = req->q;
131 unsigned long flags;
132
133 dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name,
134 req, uptodate? "done": "failed");
135
136 spin_lock_irqsave(q->queue_lock, flags);
137 if (!end_that_request_first(req, uptodate, req->nr_sectors)) {
138 end_that_request_last(req);
139 }
140 spin_unlock_irqrestore(q->queue_lock, flags);
141}
142
143/*
144 * Send or receive packet.
145 */
146static int sock_xmit(struct socket *sock, int send, void *buf, int size,
147 int msg_flags)
148{
149 int result;
150 struct msghdr msg;
151 struct kvec iov;
152 unsigned long flags;
153 sigset_t oldset;
154
155 /* Allow interception of SIGKILL only
156 * Don't allow other signals to interrupt the transmission */
157 spin_lock_irqsave(&current->sighand->siglock, flags);
158 oldset = current->blocked;
159 sigfillset(&current->blocked);
160 sigdelsetmask(&current->blocked, sigmask(SIGKILL));
161 recalc_sigpending();
162 spin_unlock_irqrestore(&current->sighand->siglock, flags);
163
164 do {
165 sock->sk->sk_allocation = GFP_NOIO;
166 iov.iov_base = buf;
167 iov.iov_len = size;
168 msg.msg_name = NULL;
169 msg.msg_namelen = 0;
170 msg.msg_control = NULL;
171 msg.msg_controllen = 0;
172 msg.msg_namelen = 0;
173 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
174
175 if (send)
176 result = kernel_sendmsg(sock, &msg, &iov, 1, size);
177 else
178 result = kernel_recvmsg(sock, &msg, &iov, 1, size, 0);
179
180 if (signal_pending(current)) {
181 siginfo_t info;
182 spin_lock_irqsave(&current->sighand->siglock, flags);
183 printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n",
184 current->pid, current->comm,
185 dequeue_signal(current, &current->blocked, &info));
186 spin_unlock_irqrestore(&current->sighand->siglock, flags);
187 result = -EINTR;
188 break;
189 }
190
191 if (result <= 0) {
192 if (result == 0)
193 result = -EPIPE; /* short read */
194 break;
195 }
196 size -= result;
197 buf += result;
198 } while (size > 0);
199
200 spin_lock_irqsave(&current->sighand->siglock, flags);
201 current->blocked = oldset;
202 recalc_sigpending();
203 spin_unlock_irqrestore(&current->sighand->siglock, flags);
204
205 return result;
206}
207
208static inline int sock_send_bvec(struct socket *sock, struct bio_vec *bvec,
209 int flags)
210{
211 int result;
212 void *kaddr = kmap(bvec->bv_page);
213 result = sock_xmit(sock, 1, kaddr + bvec->bv_offset, bvec->bv_len,
214 flags);
215 kunmap(bvec->bv_page);
216 return result;
217}
218
219static int nbd_send_req(struct nbd_device *lo, struct request *req)
220{
221 int result, i, flags;
222 struct nbd_request request;
223 unsigned long size = req->nr_sectors << 9;
224 struct socket *sock = lo->sock;
225
226 request.magic = htonl(NBD_REQUEST_MAGIC);
227 request.type = htonl(nbd_cmd(req));
228 request.from = cpu_to_be64((u64) req->sector << 9);
229 request.len = htonl(size);
230 memcpy(request.handle, &req, sizeof(req));
231
232 down(&lo->tx_lock);
233
234 if (!sock || !lo->sock) {
235 printk(KERN_ERR "%s: Attempted send on closed socket\n",
236 lo->disk->disk_name);
237 goto error_out;
238 }
239
240 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n",
241 lo->disk->disk_name, req,
242 nbdcmd_to_ascii(nbd_cmd(req)),
243 (unsigned long long)req->sector << 9,
244 req->nr_sectors << 9);
245 result = sock_xmit(sock, 1, &request, sizeof(request),
246 (nbd_cmd(req) == NBD_CMD_WRITE)? MSG_MORE: 0);
247 if (result <= 0) {
248 printk(KERN_ERR "%s: Send control failed (result %d)\n",
249 lo->disk->disk_name, result);
250 goto error_out;
251 }
252
253 if (nbd_cmd(req) == NBD_CMD_WRITE) {
254 struct bio *bio;
255 /*
256 * we are really probing at internals to determine
257 * whether to set MSG_MORE or not...
258 */
259 rq_for_each_bio(bio, req) {
260 struct bio_vec *bvec;
261 bio_for_each_segment(bvec, bio, i) {
262 flags = 0;
263 if ((i < (bio->bi_vcnt - 1)) || bio->bi_next)
264 flags = MSG_MORE;
265 dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
266 lo->disk->disk_name, req,
267 bvec->bv_len);
268 result = sock_send_bvec(sock, bvec, flags);
269 if (result <= 0) {
270 printk(KERN_ERR "%s: Send data failed (result %d)\n",
271 lo->disk->disk_name,
272 result);
273 goto error_out;
274 }
275 }
276 }
277 }
278 up(&lo->tx_lock);
279 return 0;
280
281error_out:
282 up(&lo->tx_lock);
283 return 1;
284}
285
286static struct request *nbd_find_request(struct nbd_device *lo, char *handle)
287{
288 struct request *req;
289 struct list_head *tmp;
290 struct request *xreq;
291
292 memcpy(&xreq, handle, sizeof(xreq));
293
294 spin_lock(&lo->queue_lock);
295 list_for_each(tmp, &lo->queue_head) {
296 req = list_entry(tmp, struct request, queuelist);
297 if (req != xreq)
298 continue;
299 list_del_init(&req->queuelist);
300 spin_unlock(&lo->queue_lock);
301 return req;
302 }
303 spin_unlock(&lo->queue_lock);
304 return NULL;
305}
306
307static inline int sock_recv_bvec(struct socket *sock, struct bio_vec *bvec)
308{
309 int result;
310 void *kaddr = kmap(bvec->bv_page);
311 result = sock_xmit(sock, 0, kaddr + bvec->bv_offset, bvec->bv_len,
312 MSG_WAITALL);
313 kunmap(bvec->bv_page);
314 return result;
315}
316
317/* NULL returned = something went wrong, inform userspace */
318static struct request *nbd_read_stat(struct nbd_device *lo)
319{
320 int result;
321 struct nbd_reply reply;
322 struct request *req;
323 struct socket *sock = lo->sock;
324
325 reply.magic = 0;
326 result = sock_xmit(sock, 0, &reply, sizeof(reply), MSG_WAITALL);
327 if (result <= 0) {
328 printk(KERN_ERR "%s: Receive control failed (result %d)\n",
329 lo->disk->disk_name, result);
330 goto harderror;
331 }
332 req = nbd_find_request(lo, reply.handle);
333 if (req == NULL) {
334 printk(KERN_ERR "%s: Unexpected reply (%p)\n",
335 lo->disk->disk_name, reply.handle);
336 result = -EBADR;
337 goto harderror;
338 }
339
340 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
341 printk(KERN_ERR "%s: Wrong magic (0x%lx)\n",
342 lo->disk->disk_name,
343 (unsigned long)ntohl(reply.magic));
344 result = -EPROTO;
345 goto harderror;
346 }
347 if (ntohl(reply.error)) {
348 printk(KERN_ERR "%s: Other side returned error (%d)\n",
349 lo->disk->disk_name, ntohl(reply.error));
350 req->errors++;
351 return req;
352 }
353
354 dprintk(DBG_RX, "%s: request %p: got reply\n",
355 lo->disk->disk_name, req);
356 if (nbd_cmd(req) == NBD_CMD_READ) {
357 int i;
358 struct bio *bio;
359 rq_for_each_bio(bio, req) {
360 struct bio_vec *bvec;
361 bio_for_each_segment(bvec, bio, i) {
362 result = sock_recv_bvec(sock, bvec);
363 if (result <= 0) {
364 printk(KERN_ERR "%s: Receive data failed (result %d)\n",
365 lo->disk->disk_name,
366 result);
367 goto harderror;
368 }
369 dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
370 lo->disk->disk_name, req, bvec->bv_len);
371 }
372 }
373 }
374 return req;
375harderror:
376 lo->harderror = result;
377 return NULL;
378}
379
380static void nbd_do_it(struct nbd_device *lo)
381{
382 struct request *req;
383
384 BUG_ON(lo->magic != LO_MAGIC);
385
386 while ((req = nbd_read_stat(lo)) != NULL)
387 nbd_end_request(req);
388 return;
389}
390
391static void nbd_clear_que(struct nbd_device *lo)
392{
393 struct request *req;
394
395 BUG_ON(lo->magic != LO_MAGIC);
396
397 do {
398 req = NULL;
399 spin_lock(&lo->queue_lock);
400 if (!list_empty(&lo->queue_head)) {
401 req = list_entry(lo->queue_head.next, struct request, queuelist);
402 list_del_init(&req->queuelist);
403 }
404 spin_unlock(&lo->queue_lock);
405 if (req) {
406 req->errors++;
407 nbd_end_request(req);
408 }
409 } while (req);
410}
411
412/*
413 * We always wait for result of write, for now. It would be nice to make it optional
414 * in future
415 * if ((req->cmd == WRITE) && (lo->flags & NBD_WRITE_NOCHK))
416 * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
417 */
418
419static void do_nbd_request(request_queue_t * q)
420{
421 struct request *req;
422
423 while ((req = elv_next_request(q)) != NULL) {
424 struct nbd_device *lo;
425
426 blkdev_dequeue_request(req);
427 dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%lx)\n",
428 req->rq_disk->disk_name, req, req->flags);
429
430 if (!(req->flags & REQ_CMD))
431 goto error_out;
432
433 lo = req->rq_disk->private_data;
434
435 BUG_ON(lo->magic != LO_MAGIC);
436
437 if (!lo->file) {
438 printk(KERN_ERR "%s: Request when not-ready\n",
439 lo->disk->disk_name);
440 goto error_out;
441 }
442 nbd_cmd(req) = NBD_CMD_READ;
443 if (rq_data_dir(req) == WRITE) {
444 nbd_cmd(req) = NBD_CMD_WRITE;
445 if (lo->flags & NBD_READ_ONLY) {
446 printk(KERN_ERR "%s: Write on read-only\n",
447 lo->disk->disk_name);
448 goto error_out;
449 }
450 }
451
452 req->errors = 0;
453 spin_unlock_irq(q->queue_lock);
454
455 spin_lock(&lo->queue_lock);
456
457 if (!lo->file) {
458 spin_unlock(&lo->queue_lock);
459 printk(KERN_ERR "%s: failed between accept and semaphore, file lost\n",
460 lo->disk->disk_name);
461 req->errors++;
462 nbd_end_request(req);
463 spin_lock_irq(q->queue_lock);
464 continue;
465 }
466
467 list_add(&req->queuelist, &lo->queue_head);
468 spin_unlock(&lo->queue_lock);
469
470 if (nbd_send_req(lo, req) != 0) {
471 printk(KERN_ERR "%s: Request send failed\n",
472 lo->disk->disk_name);
473 if (nbd_find_request(lo, (char *)&req) != NULL) {
474 /* we still own req */
475 req->errors++;
476 nbd_end_request(req);
477 } else /* we're racing with nbd_clear_que */
478 printk(KERN_DEBUG "nbd: can't find req\n");
479 }
480
481 spin_lock_irq(q->queue_lock);
482 continue;
483
484error_out:
485 req->errors++;
486 spin_unlock(q->queue_lock);
487 nbd_end_request(req);
488 spin_lock(q->queue_lock);
489 }
490 return;
491}
492
493static int nbd_ioctl(struct inode *inode, struct file *file,
494 unsigned int cmd, unsigned long arg)
495{
496 struct nbd_device *lo = inode->i_bdev->bd_disk->private_data;
497 int error;
498 struct request sreq ;
499
500 if (!capable(CAP_SYS_ADMIN))
501 return -EPERM;
502
503 BUG_ON(lo->magic != LO_MAGIC);
504
505 /* Anyone capable of this syscall can do *real bad* things */
506 dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
507 lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
508
509 switch (cmd) {
510 case NBD_DISCONNECT:
511 printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name);
512 sreq.flags = REQ_SPECIAL;
513 nbd_cmd(&sreq) = NBD_CMD_DISC;
514 /*
515 * Set these to sane values in case server implementation
516 * fails to check the request type first and also to keep
517 * debugging output cleaner.
518 */
519 sreq.sector = 0;
520 sreq.nr_sectors = 0;
521 if (!lo->sock)
522 return -EINVAL;
523 nbd_send_req(lo, &sreq);
524 return 0;
525
526 case NBD_CLEAR_SOCK:
527 error = 0;
528 down(&lo->tx_lock);
529 lo->sock = NULL;
530 up(&lo->tx_lock);
531 spin_lock(&lo->queue_lock);
532 file = lo->file;
533 lo->file = NULL;
534 spin_unlock(&lo->queue_lock);
535 nbd_clear_que(lo);
536 spin_lock(&lo->queue_lock);
537 if (!list_empty(&lo->queue_head)) {
538 printk(KERN_ERR "nbd: disconnect: some requests are in progress -> please try again.\n");
539 error = -EBUSY;
540 }
541 spin_unlock(&lo->queue_lock);
542 if (file)
543 fput(file);
544 return error;
545 case NBD_SET_SOCK:
546 if (lo->file)
547 return -EBUSY;
548 error = -EINVAL;
549 file = fget(arg);
550 if (file) {
551 inode = file->f_dentry->d_inode;
552 if (S_ISSOCK(inode->i_mode)) {
553 lo->file = file;
554 lo->sock = SOCKET_I(inode);
555 error = 0;
556 } else {
557 fput(file);
558 }
559 }
560 return error;
561 case NBD_SET_BLKSIZE:
562 lo->blksize = arg;
563 lo->bytesize &= ~(lo->blksize-1);
564 inode->i_bdev->bd_inode->i_size = lo->bytesize;
565 set_blocksize(inode->i_bdev, lo->blksize);
566 set_capacity(lo->disk, lo->bytesize >> 9);
567 return 0;
568 case NBD_SET_SIZE:
569 lo->bytesize = arg & ~(lo->blksize-1);
570 inode->i_bdev->bd_inode->i_size = lo->bytesize;
571 set_blocksize(inode->i_bdev, lo->blksize);
572 set_capacity(lo->disk, lo->bytesize >> 9);
573 return 0;
574 case NBD_SET_SIZE_BLOCKS:
575 lo->bytesize = ((u64) arg) * lo->blksize;
576 inode->i_bdev->bd_inode->i_size = lo->bytesize;
577 set_blocksize(inode->i_bdev, lo->blksize);
578 set_capacity(lo->disk, lo->bytesize >> 9);
579 return 0;
580 case NBD_DO_IT:
581 if (!lo->file)
582 return -EINVAL;
583 nbd_do_it(lo);
584 /* on return tidy up in case we have a signal */
585 /* Forcibly shutdown the socket causing all listeners
586 * to error
587 *
588 * FIXME: This code is duplicated from sys_shutdown, but
589 * there should be a more generic interface rather than
590 * calling socket ops directly here */
591 down(&lo->tx_lock);
592 if (lo->sock) {
593 printk(KERN_WARNING "%s: shutting down socket\n",
594 lo->disk->disk_name);
595 lo->sock->ops->shutdown(lo->sock,
596 SEND_SHUTDOWN|RCV_SHUTDOWN);
597 lo->sock = NULL;
598 }
599 up(&lo->tx_lock);
600 spin_lock(&lo->queue_lock);
601 file = lo->file;
602 lo->file = NULL;
603 spin_unlock(&lo->queue_lock);
604 nbd_clear_que(lo);
605 printk(KERN_WARNING "%s: queue cleared\n", lo->disk->disk_name);
606 if (file)
607 fput(file);
608 return lo->harderror;
609 case NBD_CLEAR_QUE:
610 down(&lo->tx_lock);
611 if (lo->sock) {
612 up(&lo->tx_lock);
613 return 0; /* probably should be error, but that would
614 * break "nbd-client -d", so just return 0 */
615 }
616 up(&lo->tx_lock);
617 nbd_clear_que(lo);
618 return 0;
619 case NBD_PRINT_DEBUG:
620 printk(KERN_INFO "%s: next = %p, prev = %p, head = %p\n",
621 inode->i_bdev->bd_disk->disk_name,
622 lo->queue_head.next, lo->queue_head.prev,
623 &lo->queue_head);
624 return 0;
625 }
626 return -EINVAL;
627}
628
629static struct block_device_operations nbd_fops =
630{
631 .owner = THIS_MODULE,
632 .ioctl = nbd_ioctl,
633};
634
635/*
636 * And here should be modules and kernel interface
637 * (Just smiley confuses emacs :-)
638 */
639
640static int __init nbd_init(void)
641{
642 int err = -ENOMEM;
643 int i;
644
645 if (sizeof(struct nbd_request) != 28) {
646 printk(KERN_CRIT "nbd: sizeof nbd_request needs to be 28 in order to work!\n" );
647 return -EIO;
648 }
649
650 for (i = 0; i < MAX_NBD; i++) {
651 struct gendisk *disk = alloc_disk(1);
652 if (!disk)
653 goto out;
654 nbd_dev[i].disk = disk;
655 /*
656 * The new linux 2.5 block layer implementation requires
657 * every gendisk to have its very own request_queue struct.
658 * These structs are big so we dynamically allocate them.
659 */
660 disk->queue = blk_init_queue(do_nbd_request, &nbd_lock);
661 if (!disk->queue) {
662 put_disk(disk);
663 goto out;
664 }
665 }
666
667 if (register_blkdev(NBD_MAJOR, "nbd")) {
668 err = -EIO;
669 goto out;
670 }
671
672 printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
673 dprintk(DBG_INIT, "nbd: debugflags=0x%x\n", debugflags);
674
675 devfs_mk_dir("nbd");
676 for (i = 0; i < MAX_NBD; i++) {
677 struct gendisk *disk = nbd_dev[i].disk;
678 nbd_dev[i].file = NULL;
679 nbd_dev[i].magic = LO_MAGIC;
680 nbd_dev[i].flags = 0;
681 spin_lock_init(&nbd_dev[i].queue_lock);
682 INIT_LIST_HEAD(&nbd_dev[i].queue_head);
683 init_MUTEX(&nbd_dev[i].tx_lock);
684 nbd_dev[i].blksize = 1024;
685 nbd_dev[i].bytesize = 0x7ffffc00ULL << 10; /* 2TB */
686 disk->major = NBD_MAJOR;
687 disk->first_minor = i;
688 disk->fops = &nbd_fops;
689 disk->private_data = &nbd_dev[i];
690 disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
691 sprintf(disk->disk_name, "nbd%d", i);
692 sprintf(disk->devfs_name, "nbd/%d", i);
693 set_capacity(disk, 0x7ffffc00ULL << 1); /* 2 TB */
694 add_disk(disk);
695 }
696
697 return 0;
698out:
699 while (i--) {
700 blk_cleanup_queue(nbd_dev[i].disk->queue);
701 put_disk(nbd_dev[i].disk);
702 }
703 return err;
704}
705
706static void __exit nbd_cleanup(void)
707{
708 int i;
709 for (i = 0; i < MAX_NBD; i++) {
710 struct gendisk *disk = nbd_dev[i].disk;
711 if (disk) {
712 del_gendisk(disk);
713 blk_cleanup_queue(disk->queue);
714 put_disk(disk);
715 }
716 }
717 devfs_remove("nbd");
718 unregister_blkdev(NBD_MAJOR, "nbd");
719 printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
720}
721
722module_init(nbd_init);
723module_exit(nbd_cleanup);
724
725MODULE_DESCRIPTION("Network Block Device");
726MODULE_LICENSE("GPL");
727
728#ifndef NDEBUG
729module_param(debugflags, int, 0644);
730MODULE_PARM_DESC(debugflags, "flags for controlling debug output");
731#endif
diff --git a/drivers/block/noop-iosched.c b/drivers/block/noop-iosched.c
new file mode 100644
index 000000000000..888c477e02b3
--- /dev/null
+++ b/drivers/block/noop-iosched.c
@@ -0,0 +1,104 @@
1/*
2 * elevator noop
3 */
4#include <linux/blkdev.h>
5#include <linux/elevator.h>
6#include <linux/bio.h>
7#include <linux/module.h>
8#include <linux/init.h>
9
10/*
11 * See if we can find a request that this buffer can be coalesced with.
12 */
13static int elevator_noop_merge(request_queue_t *q, struct request **req,
14 struct bio *bio)
15{
16 struct list_head *entry = &q->queue_head;
17 struct request *__rq;
18 int ret;
19
20 if ((ret = elv_try_last_merge(q, bio))) {
21 *req = q->last_merge;
22 return ret;
23 }
24
25 while ((entry = entry->prev) != &q->queue_head) {
26 __rq = list_entry_rq(entry);
27
28 if (__rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER))
29 break;
30 else if (__rq->flags & REQ_STARTED)
31 break;
32
33 if (!blk_fs_request(__rq))
34 continue;
35
36 if ((ret = elv_try_merge(__rq, bio))) {
37 *req = __rq;
38 q->last_merge = __rq;
39 return ret;
40 }
41 }
42
43 return ELEVATOR_NO_MERGE;
44}
45
46static void elevator_noop_merge_requests(request_queue_t *q, struct request *req,
47 struct request *next)
48{
49 list_del_init(&next->queuelist);
50}
51
52static void elevator_noop_add_request(request_queue_t *q, struct request *rq,
53 int where)
54{
55 if (where == ELEVATOR_INSERT_FRONT)
56 list_add(&rq->queuelist, &q->queue_head);
57 else
58 list_add_tail(&rq->queuelist, &q->queue_head);
59
60 /*
61 * new merges must not precede this barrier
62 */
63 if (rq->flags & REQ_HARDBARRIER)
64 q->last_merge = NULL;
65 else if (!q->last_merge)
66 q->last_merge = rq;
67}
68
69static struct request *elevator_noop_next_request(request_queue_t *q)
70{
71 if (!list_empty(&q->queue_head))
72 return list_entry_rq(q->queue_head.next);
73
74 return NULL;
75}
76
77static struct elevator_type elevator_noop = {
78 .ops = {
79 .elevator_merge_fn = elevator_noop_merge,
80 .elevator_merge_req_fn = elevator_noop_merge_requests,
81 .elevator_next_req_fn = elevator_noop_next_request,
82 .elevator_add_req_fn = elevator_noop_add_request,
83 },
84 .elevator_name = "noop",
85 .elevator_owner = THIS_MODULE,
86};
87
88static int __init noop_init(void)
89{
90 return elv_register(&elevator_noop);
91}
92
93static void __exit noop_exit(void)
94{
95 elv_unregister(&elevator_noop);
96}
97
98module_init(noop_init);
99module_exit(noop_exit);
100
101
102MODULE_AUTHOR("Jens Axboe");
103MODULE_LICENSE("GPL");
104MODULE_DESCRIPTION("No-op IO scheduler");
diff --git a/drivers/block/paride/Kconfig b/drivers/block/paride/Kconfig
new file mode 100644
index 000000000000..17ff40561257
--- /dev/null
+++ b/drivers/block/paride/Kconfig
@@ -0,0 +1,305 @@
1#
2# PARIDE configuration
3#
4# PARIDE doesn't need PARPORT, but if PARPORT is configured as a module,
5# PARIDE must also be a module. The bogus CONFIG_PARIDE_PARPORT option
6# controls the choices given to the user ...
7config PARIDE_PARPORT
8 tristate
9 depends on PARIDE!=n
10 default m if PARPORT=m
11 default y if PARPORT!=m
12
13comment "Parallel IDE high-level drivers"
14 depends on PARIDE
15
16config PARIDE_PD
17 tristate "Parallel port IDE disks"
18 depends on PARIDE
19 help
20 This option enables the high-level driver for IDE-type disk devices
21 connected through a parallel port. If you chose to build PARIDE
22 support into your kernel, you may answer Y here to build in the
23 parallel port IDE driver, otherwise you should answer M to build
24 it as a loadable module. The module will be called pd. You
25 must also have at least one parallel port protocol driver in your
26 system. Among the devices supported by this driver are the SyQuest
27 EZ-135, EZ-230 and SparQ drives, the Avatar Shark and the backpack
28 hard drives from MicroSolutions.
29
30config PARIDE_PCD
31 tristate "Parallel port ATAPI CD-ROMs"
32 depends on PARIDE
33 ---help---
34 This option enables the high-level driver for ATAPI CD-ROM devices
35 connected through a parallel port. If you chose to build PARIDE
36 support into your kernel, you may answer Y here to build in the
37 parallel port ATAPI CD-ROM driver, otherwise you should answer M to
38 build it as a loadable module. The module will be called pcd. You
39 must also have at least one parallel port protocol driver in your
40 system. Among the devices supported by this driver are the
41 MicroSolutions backpack CD-ROM drives and the Freecom Power CD. If
42 you have such a CD-ROM drive, you should also say Y or M to "ISO
43 9660 CD-ROM file system support" below, because that's the file
44 system used on CD-ROMs.
45
46config PARIDE_PF
47 tristate "Parallel port ATAPI disks"
48 depends on PARIDE
49 help
50 This option enables the high-level driver for ATAPI disk devices
51 connected through a parallel port. If you chose to build PARIDE
52 support into your kernel, you may answer Y here to build in the
53 parallel port ATAPI disk driver, otherwise you should answer M
54 to build it as a loadable module. The module will be called pf.
55 You must also have at least one parallel port protocol driver in
56 your system. Among the devices supported by this driver are the
57 MicroSolutions backpack PD/CD drive and the Imation Superdisk
58 LS-120 drive.
59
60config PARIDE_PT
61 tristate "Parallel port ATAPI tapes"
62 depends on PARIDE
63 help
64 This option enables the high-level driver for ATAPI tape devices
65 connected through a parallel port. If you chose to build PARIDE
66 support into your kernel, you may answer Y here to build in the
67 parallel port ATAPI disk driver, otherwise you should answer M
68 to build it as a loadable module. The module will be called pt.
69 You must also have at least one parallel port protocol driver in
70 your system. Among the devices supported by this driver is the
71 parallel port version of the HP 5GB drive.
72
73config PARIDE_PG
74 tristate "Parallel port generic ATAPI devices"
75 depends on PARIDE
76 ---help---
77 This option enables a special high-level driver for generic ATAPI
78 devices connected through a parallel port. The driver allows user
79 programs, such as cdrtools, to send ATAPI commands directly to a
80 device.
81
82 If you chose to build PARIDE support into your kernel, you may
83 answer Y here to build in the parallel port generic ATAPI driver,
84 otherwise you should answer M to build it as a loadable module. The
85 module will be called pg.
86
87 You must also have at least one parallel port protocol driver in
88 your system.
89
90 This driver implements an API loosely related to the generic SCSI
91 driver. See <file:include/linux/pg.h>. for details.
92
93 You can obtain the most recent version of cdrtools from
94 <ftp://ftp.berlios.de/pub/cdrecord/>. Versions 1.6.1a3 and
95 later fully support this driver.
96
97comment "Parallel IDE protocol modules"
98 depends on PARIDE
99
100config PARIDE_ATEN
101 tristate "ATEN EH-100 protocol"
102 depends on PARIDE
103 help
104 This option enables support for the ATEN EH-100 parallel port IDE
105 protocol. This protocol is used in some inexpensive low performance
106 parallel port kits made in Hong Kong. If you chose to build PARIDE
107 support into your kernel, you may answer Y here to build in the
108 protocol driver, otherwise you should answer M to build it as a
109 loadable module. The module will be called aten. You must also
110 have a high-level driver for the type of device that you want to
111 support.
112
113config PARIDE_BPCK
114 tristate "MicroSolutions backpack (Series 5) protocol"
115 depends on PARIDE
116 ---help---
117 This option enables support for the Micro Solutions BACKPACK
118 parallel port Series 5 IDE protocol. (Most BACKPACK drives made
119 before 1999 were Series 5) Series 5 drives will NOT always have the
120 Series noted on the bottom of the drive. Series 6 drivers will.
121
122 In other words, if your BACKPACK drive doesn't say "Series 6" on the
123 bottom, enable this option.
124
125 If you chose to build PARIDE support into your kernel, you may
126 answer Y here to build in the protocol driver, otherwise you should
127 answer M to build it as a loadable module. The module will be
128 called bpck. You must also have a high-level driver for the type
129 of device that you want to support.
130
131config PARIDE_BPCK6
132 tristate "MicroSolutions backpack (Series 6) protocol"
133 depends on PARIDE && !64BIT
134 ---help---
135 This option enables support for the Micro Solutions BACKPACK
136 parallel port Series 6 IDE protocol. (Most BACKPACK drives made
137 after 1999 were Series 6) Series 6 drives will have the Series noted
138 on the bottom of the drive. Series 5 drivers don't always have it
139 noted.
140
141 In other words, if your BACKPACK drive says "Series 6" on the
142 bottom, enable this option.
143
144 If you chose to build PARIDE support into your kernel, you may
145 answer Y here to build in the protocol driver, otherwise you should
146 answer M to build it as a loadable module. The module will be
147 called bpck6. You must also have a high-level driver for the type
148 of device that you want to support.
149
150config PARIDE_COMM
151 tristate "DataStor Commuter protocol"
152 depends on PARIDE
153 help
154 This option enables support for the Commuter parallel port IDE
155 protocol from DataStor. If you chose to build PARIDE support
156 into your kernel, you may answer Y here to build in the protocol
157 driver, otherwise you should answer M to build it as a loadable
158 module. The module will be called comm. You must also have
159 a high-level driver for the type of device that you want to support.
160
161config PARIDE_DSTR
162 tristate "DataStor EP-2000 protocol"
163 depends on PARIDE
164 help
165 This option enables support for the EP-2000 parallel port IDE
166 protocol from DataStor. If you chose to build PARIDE support
167 into your kernel, you may answer Y here to build in the protocol
168 driver, otherwise you should answer M to build it as a loadable
169 module. The module will be called dstr. You must also have
170 a high-level driver for the type of device that you want to support.
171
172config PARIDE_FIT2
173 tristate "FIT TD-2000 protocol"
174 depends on PARIDE
175 help
176 This option enables support for the TD-2000 parallel port IDE
177 protocol from Fidelity International Technology. This is a simple
178 (low speed) adapter that is used in some portable hard drives. If
179 you chose to build PARIDE support into your kernel, you may answer Y
180 here to build in the protocol driver, otherwise you should answer M
181 to build it as a loadable module. The module will be called ktti.
182 You must also have a high-level driver for the type of device that
183 you want to support.
184
185config PARIDE_FIT3
186 tristate "FIT TD-3000 protocol"
187 depends on PARIDE
188 help
189 This option enables support for the TD-3000 parallel port IDE
190 protocol from Fidelity International Technology. This protocol is
191 used in newer models of their portable disk, CD-ROM and PD/CD
192 devices. If you chose to build PARIDE support into your kernel, you
193 may answer Y here to build in the protocol driver, otherwise you
194 should answer M to build it as a loadable module. The module will be
195 called fit3. You must also have a high-level driver for the type
196 of device that you want to support.
197
198config PARIDE_EPAT
199 tristate "Shuttle EPAT/EPEZ protocol"
200 depends on PARIDE
201 help
202 This option enables support for the EPAT parallel port IDE protocol.
203 EPAT is a parallel port IDE adapter manufactured by Shuttle
204 Technology and widely used in devices from major vendors such as
205 Hewlett-Packard, SyQuest, Imation and Avatar. If you chose to build
206 PARIDE support into your kernel, you may answer Y here to build in
207 the protocol driver, otherwise you should answer M to build it as a
208 loadable module. The module will be called epat. You must also
209 have a high-level driver for the type of device that you want to
210 support.
211
212config PARIDE_EPATC8
213 bool "Support c7/c8 chips (EXPERIMENTAL)"
214 depends on PARIDE_EPAT && EXPERIMENTAL
215 help
216 This option enables support for the newer Shuttle EP1284 (aka c7 and
217 c8) chip. You need this if you are using any recent Imation SuperDisk
218 (LS-120) drive.
219
220config PARIDE_EPIA
221 tristate "Shuttle EPIA protocol"
222 depends on PARIDE
223 help
224 This option enables support for the (obsolete) EPIA parallel port
225 IDE protocol from Shuttle Technology. This adapter can still be
226 found in some no-name kits. If you chose to build PARIDE support
227 into your kernel, you may answer Y here to build in the protocol
228 driver, otherwise you should answer M to build it as a loadable
229 module. The module will be called epia. You must also have a
230 high-level driver for the type of device that you want to support.
231
232config PARIDE_FRIQ
233 tristate "Freecom IQ ASIC-2 protocol"
234 depends on PARIDE
235 help
236 This option enables support for version 2 of the Freecom IQ parallel
237 port IDE adapter. This adapter is used by the Maxell Superdisk
238 drive. If you chose to build PARIDE support into your kernel, you
239 may answer Y here to build in the protocol driver, otherwise you
240 should answer M to build it as a loadable module. The module will be
241 called friq. You must also have a high-level driver for the type
242 of device that you want to support.
243
244config PARIDE_FRPW
245 tristate "FreeCom power protocol"
246 depends on PARIDE
247 help
248 This option enables support for the Freecom power parallel port IDE
249 protocol. If you chose to build PARIDE support into your kernel, you
250 may answer Y here to build in the protocol driver, otherwise you
251 should answer M to build it as a loadable module. The module will be
252 called frpw. You must also have a high-level driver for the type
253 of device that you want to support.
254
255config PARIDE_KBIC
256 tristate "KingByte KBIC-951A/971A protocols"
257 depends on PARIDE
258 help
259 This option enables support for the KBIC-951A and KBIC-971A parallel
260 port IDE protocols from KingByte Information Corp. KingByte's
261 adapters appear in many no-name portable disk and CD-ROM products,
262 especially in Europe. If you chose to build PARIDE support into your
263 kernel, you may answer Y here to build in the protocol driver,
264 otherwise you should answer M to build it as a loadable module. The
265 module will be called kbic. You must also have a high-level driver
266 for the type of device that you want to support.
267
268config PARIDE_KTTI
269 tristate "KT PHd protocol"
270 depends on PARIDE
271 help
272 This option enables support for the "PHd" parallel port IDE protocol
273 from KT Technology. This is a simple (low speed) adapter that is
274 used in some 2.5" portable hard drives. If you chose to build PARIDE
275 support into your kernel, you may answer Y here to build in the
276 protocol driver, otherwise you should answer M to build it as a
277 loadable module. The module will be called ktti. You must also
278 have a high-level driver for the type of device that you want to
279 support.
280
281config PARIDE_ON20
282 tristate "OnSpec 90c20 protocol"
283 depends on PARIDE
284 help
285 This option enables support for the (obsolete) 90c20 parallel port
286 IDE protocol from OnSpec (often marketed under the ValuStore brand
287 name). If you chose to build PARIDE support into your kernel, you
288 may answer Y here to build in the protocol driver, otherwise you
289 should answer M to build it as a loadable module. The module will
290 be called on20. You must also have a high-level driver for the
291 type of device that you want to support.
292
293config PARIDE_ON26
294 tristate "OnSpec 90c26 protocol"
295 depends on PARIDE
296 help
297 This option enables support for the 90c26 parallel port IDE protocol
298 from OnSpec Electronics (often marketed under the ValuStore brand
299 name). If you chose to build PARIDE support into your kernel, you
300 may answer Y here to build in the protocol driver, otherwise you
301 should answer M to build it as a loadable module. The module will be
302 called on26. You must also have a high-level driver for the type
303 of device that you want to support.
304
305#
diff --git a/drivers/block/paride/Makefile b/drivers/block/paride/Makefile
new file mode 100644
index 000000000000..a539e004bb7a
--- /dev/null
+++ b/drivers/block/paride/Makefile
@@ -0,0 +1,28 @@
1#
2# Makefile for Parallel port IDE device drivers.
3#
4# 7 October 2000, Bartlomiej Zolnierkiewicz <bkz@linux-ide.org>
5# Rewritten to use lists instead of if-statements.
6#
7
8obj-$(CONFIG_PARIDE) += paride.o
9obj-$(CONFIG_PARIDE_ATEN) += aten.o
10obj-$(CONFIG_PARIDE_BPCK) += bpck.o
11obj-$(CONFIG_PARIDE_COMM) += comm.o
12obj-$(CONFIG_PARIDE_DSTR) += dstr.o
13obj-$(CONFIG_PARIDE_KBIC) += kbic.o
14obj-$(CONFIG_PARIDE_EPAT) += epat.o
15obj-$(CONFIG_PARIDE_EPIA) += epia.o
16obj-$(CONFIG_PARIDE_FRPW) += frpw.o
17obj-$(CONFIG_PARIDE_FRIQ) += friq.o
18obj-$(CONFIG_PARIDE_FIT2) += fit2.o
19obj-$(CONFIG_PARIDE_FIT3) += fit3.o
20obj-$(CONFIG_PARIDE_ON20) += on20.o
21obj-$(CONFIG_PARIDE_ON26) += on26.o
22obj-$(CONFIG_PARIDE_KTTI) += ktti.o
23obj-$(CONFIG_PARIDE_BPCK6) += bpck6.o
24obj-$(CONFIG_PARIDE_PD) += pd.o
25obj-$(CONFIG_PARIDE_PCD) += pcd.o
26obj-$(CONFIG_PARIDE_PF) += pf.o
27obj-$(CONFIG_PARIDE_PT) += pt.o
28obj-$(CONFIG_PARIDE_PG) += pg.o
diff --git a/drivers/block/paride/Transition-notes b/drivers/block/paride/Transition-notes
new file mode 100644
index 000000000000..70374907c020
--- /dev/null
+++ b/drivers/block/paride/Transition-notes
@@ -0,0 +1,128 @@
1Lemma 1:
2 If ps_tq is scheduled, ps_tq_active is 1. ps_tq_int() can be called
3 only when ps_tq_active is 1.
4Proof: All assignments to ps_tq_active and all scheduling of ps_tq happen
5 under ps_spinlock. There are three places where that can happen:
6 one in ps_set_intr() (A) and two in ps_tq_int() (B and C).
7 Consider the sequnce of these events. A can not be preceded by
8 anything except B, since it is under if (!ps_tq_active) under
9 ps_spinlock. C is always preceded by B, since we can't reach it
10 other than through B and we don't drop ps_spinlock between them.
11 IOW, the sequence is A?(BA|BC|B)*. OTOH, number of B can not exceed
12 the sum of numbers of A and C, since each call of ps_tq_int() is
13 the result of ps_tq execution. Therefore, the sequence starts with
14 A and each B is preceded by either A or C. Moments when we enter
15 ps_tq_int() are sandwiched between {A,C} and B in that sequence,
16 since at any time number of B can not exceed the number of these
17 moments which, in turn, can not exceed the number of A and C.
18 In other words, the sequence of events is (A or C set ps_tq_active to
19 1 and schedule ps_tq, ps_tq is executed, ps_tq_int() is entered,
20 B resets ps_tq_active)*.
21
22
23consider the following area:
24 * in do_pd_request1(): to calls of pi_do_claimed() and return in
25 case when pd_req is NULL.
26 * in next_request(): to call of do_pd_request1()
27 * in do_pd_read(): to call of ps_set_intr()
28 * in do_pd_read_start(): to calls of pi_do_claimed(), next_request()
29and ps_set_intr()
30 * in do_pd_read_drq(): to calls of pi_do_claimed() and next_request()
31 * in do_pd_write(): to call of ps_set_intr()
32 * in do_pd_write_start(): to calls of pi_do_claimed(), next_request()
33and ps_set_intr()
34 * in do_pd_write_done(): to calls of pi_do_claimed() and next_request()
35 * in ps_set_intr(): to check for ps_tq_active and to scheduling
36 ps_tq if ps_tq_active was 0.
37 * in ps_tq_int(): from the moment when we get ps_spinlock() to the
38 return, call of con() or scheduling ps_tq.
39 * in pi_schedule_claimed() when called from pi_do_claimed() called from
40 pd.c, everything until returning 1 or setting or setting ->claim_cont
41 on the path that returns 0
42 * in pi_do_claimed() when called from pd.c, everything until the call
43 of pi_do_claimed() plus the everything until the call of cont() if
44 pi_do_claimed() has returned 1.
45 * in pi_wake_up() called for PIA that belongs to pd.c, everything from
46 the moment when pi_spinlock has been acquired.
47
48Lemma 2:
49 1) at any time at most one thread of execution can be in that area or
50 be preempted there.
51 2) When there is such a thread, pd_busy is set or pd_lock is held by
52 that thread.
53 3) When there is such a thread, ps_tq_active is 0 or ps_spinlock is
54 held by that thread.
55 4) When there is such a thread, all PIA belonging to pd.c have NULL
56 ->claim_cont or pi_spinlock is held by thread in question.
57
58Proof: consider the first moment when the above is not true.
59
60(1) can become not true if some thread enters that area while another is there.
61 a) do_pd_request1() can be called from next_request() or do_pd_request()
62 In the first case the thread was already in the area. In the second,
63 the thread was holding pd_lock and found pd_busy not set, which would
64 mean that (2) was already not true.
65 b) ps_set_intr() and pi_schedule_claimed() can be called only from the
66 area.
67 c) pi_do_claimed() is called by pd.c only from the area.
68 d) ps_tq_int() can enter the area only when the thread is holding
69 ps_spinlock and ps_tq_active is 1 (due to Lemma 1). It means that
70 (3) was already not true.
71 e) do_pd_{read,write}* could be called only from the area. The only
72 case that needs consideration is call from pi_wake_up() and there
73 we would have to be called for the PIA that got ->claimed_cont
74 from pd.c. That could happen only if pi_do_claimed() had been
75 called from pd.c for that PIA, which happens only for PIA belonging
76 to pd.c.
77 f) pi_wake_up() can enter the area only when the thread is holding
78 pi_spinlock and ->claimed_cont is non-NULL for PIA belonging to
79 pd.c. It means that (4) was already not true.
80
81(2) can become not true only when pd_lock is released by the thread in question.
82 Indeed, pd_busy is reset only in the area and thread that resets
83 it is holding pd_lock. The only place within the area where we
84 release pd_lock is in pd_next_buf() (called from within the area).
85 But that code does not reset pd_busy, so pd_busy would have to be
86 0 when pd_next_buf() had acquired pd_lock. If it become 0 while
87 we were acquiring the lock, (1) would be already false, since
88 the thread that had reset it would be in the area simulateously.
89 If it was 0 before we tried to acquire pd_lock, (2) would be
90 already false.
91
92For similar reasons, (3) can become not true only when ps_spinlock is released
93by the thread in question. However, all such places within the area are right
94after resetting ps_tq_active to 0.
95
96(4) is done the same way - all places where we release pi_spinlock within
97the area are either after resetting ->claimed_cont to NULL while holding
98pi_spinlock, or after not tocuhing ->claimed_cont since acquiring pi_spinlock
99also in the area. The only place where ->claimed_cont is made non-NULL is
100in the area, under pi_spinlock and we do not release it until after leaving
101the area.
102
103QED.
104
105
106Corollary 1: ps_tq_active can be killed. Indeed, the only place where we
107check its value is in ps_set_intr() and if it had been non-zero at that
108point, we would have violated either (2.1) (if it was set while ps_set_intr()
109was acquiring ps_spinlock) or (2.3) (if it was set when we started to
110acquire ps_spinlock).
111
112Corollary 2: ps_spinlock can be killed. Indeed, Lemma 1 and Lemma 2 show
113that the only possible contention is between scheduling ps_tq followed by
114immediate release of spinlock and beginning of execution of ps_tq on
115another CPU.
116
117Corollary 3: assignment to pd_busy in do_pd_read_start() and do_pd_write_start()
118can be killed. Indeed, we are not holding pd_lock and thus pd_busy is already
1191 here.
120
121Corollary 4: in ps_tq_int() uses of con can be replaced with uses of
122ps_continuation, since the latter is changed only from the area.
123We don't need to reset it to NULL, since we are guaranteed that there
124will be a call of ps_set_intr() before we look at ps_continuation again.
125We can remove the check for ps_continuation being NULL for the same
126reason - the value is guaranteed to be set by the last ps_set_intr() and
127we never pass it NULL. Assignements in the beginning of ps_set_intr()
128can be taken to callers as long as they remain within the area.
diff --git a/drivers/block/paride/aten.c b/drivers/block/paride/aten.c
new file mode 100644
index 000000000000..c4d696d43dc1
--- /dev/null
+++ b/drivers/block/paride/aten.c
@@ -0,0 +1,162 @@
1/*
2 aten.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
3 Under the terms of the GNU General Public License.
4
5 aten.c is a low-level protocol driver for the ATEN EH-100
6 parallel port adapter. The EH-100 supports 4-bit and 8-bit
7 modes only. There is also an EH-132 which supports EPP mode
8 transfers. The EH-132 is not yet supported.
9
10*/
11
12/* Changes:
13
14 1.01 GRG 1998.05.05 init_proto, release_proto
15
16*/
17
18#define ATEN_VERSION "1.01"
19
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/delay.h>
23#include <linux/kernel.h>
24#include <linux/wait.h>
25#include <linux/types.h>
26#include <asm/io.h>
27
28#include "paride.h"
29
30#define j44(a,b) ((((a>>4)&0x0f)|(b&0xf0))^0x88)
31
32/* cont = 0 - access the IDE register file
33 cont = 1 - access the IDE command set
34*/
35
36static int cont_map[2] = { 0x08, 0x20 };
37
38static void aten_write_regr( PIA *pi, int cont, int regr, int val)
39
40{ int r;
41
42 r = regr + cont_map[cont] + 0x80;
43
44 w0(r); w2(0xe); w2(6); w0(val); w2(7); w2(6); w2(0xc);
45}
46
47static int aten_read_regr( PIA *pi, int cont, int regr )
48
49{ int a, b, r;
50
51 r = regr + cont_map[cont] + 0x40;
52
53 switch (pi->mode) {
54
55 case 0: w0(r); w2(0xe); w2(6);
56 w2(7); w2(6); w2(0);
57 a = r1(); w0(0x10); b = r1(); w2(0xc);
58 return j44(a,b);
59
60 case 1: r |= 0x10;
61 w0(r); w2(0xe); w2(6); w0(0xff);
62 w2(0x27); w2(0x26); w2(0x20);
63 a = r0();
64 w2(0x26); w2(0xc);
65 return a;
66 }
67 return -1;
68}
69
70static void aten_read_block( PIA *pi, char * buf, int count )
71
72{ int k, a, b, c, d;
73
74 switch (pi->mode) {
75
76 case 0: w0(0x48); w2(0xe); w2(6);
77 for (k=0;k<count/2;k++) {
78 w2(7); w2(6); w2(2);
79 a = r1(); w0(0x58); b = r1();
80 w2(0); d = r1(); w0(0x48); c = r1();
81 buf[2*k] = j44(c,d);
82 buf[2*k+1] = j44(a,b);
83 }
84 w2(0xc);
85 break;
86
87 case 1: w0(0x58); w2(0xe); w2(6);
88 for (k=0;k<count/2;k++) {
89 w2(0x27); w2(0x26); w2(0x22);
90 a = r0(); w2(0x20); b = r0();
91 buf[2*k] = b; buf[2*k+1] = a;
92 }
93 w2(0x26); w2(0xc);
94 break;
95 }
96}
97
98static void aten_write_block( PIA *pi, char * buf, int count )
99
100{ int k;
101
102 w0(0x88); w2(0xe); w2(6);
103 for (k=0;k<count/2;k++) {
104 w0(buf[2*k+1]); w2(0xe); w2(6);
105 w0(buf[2*k]); w2(7); w2(6);
106 }
107 w2(0xc);
108}
109
110static void aten_connect ( PIA *pi )
111
112{ pi->saved_r0 = r0();
113 pi->saved_r2 = r2();
114 w2(0xc);
115}
116
117static void aten_disconnect ( PIA *pi )
118
119{ w0(pi->saved_r0);
120 w2(pi->saved_r2);
121}
122
123static void aten_log_adapter( PIA *pi, char * scratch, int verbose )
124
125{ char *mode_string[2] = {"4-bit","8-bit"};
126
127 printk("%s: aten %s, ATEN EH-100 at 0x%x, ",
128 pi->device,ATEN_VERSION,pi->port);
129 printk("mode %d (%s), delay %d\n",pi->mode,
130 mode_string[pi->mode],pi->delay);
131
132}
133
134static struct pi_protocol aten = {
135 .owner = THIS_MODULE,
136 .name = "aten",
137 .max_mode = 2,
138 .epp_first = 2,
139 .default_delay = 1,
140 .max_units = 1,
141 .write_regr = aten_write_regr,
142 .read_regr = aten_read_regr,
143 .write_block = aten_write_block,
144 .read_block = aten_read_block,
145 .connect = aten_connect,
146 .disconnect = aten_disconnect,
147 .log_adapter = aten_log_adapter,
148};
149
150static int __init aten_init(void)
151{
152 return pi_register(&aten)-1;
153}
154
155static void __exit aten_exit(void)
156{
157 pi_unregister( &aten );
158}
159
160MODULE_LICENSE("GPL");
161module_init(aten_init)
162module_exit(aten_exit)
diff --git a/drivers/block/paride/bpck.c b/drivers/block/paride/bpck.c
new file mode 100644
index 000000000000..d462ff6b139d
--- /dev/null
+++ b/drivers/block/paride/bpck.c
@@ -0,0 +1,477 @@
1/*
2 bpck.c (c) 1996-8 Grant R. Guenther <grant@torque.net>
3 Under the terms of the GNU General Public License.
4
5 bpck.c is a low-level protocol driver for the MicroSolutions
6 "backpack" parallel port IDE adapter.
7
8*/
9
10/* Changes:
11
12 1.01 GRG 1998.05.05 init_proto, release_proto, pi->delay
13 1.02 GRG 1998.08.15 default pi->delay returned to 4
14
15*/
16
17#define BPCK_VERSION "1.02"
18
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/delay.h>
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/wait.h>
25#include <asm/io.h>
26
27#include "paride.h"
28
29#undef r2
30#undef w2
31
32#define PC pi->private
33#define r2() (PC=(in_p(2) & 0xff))
34#define w2(byte) {out_p(2,byte); PC = byte;}
35#define t2(pat) {PC ^= pat; out_p(2,PC);}
36#define e2() {PC &= 0xfe; out_p(2,PC);}
37#define o2() {PC |= 1; out_p(2,PC);}
38
39#define j44(l,h) (((l>>3)&0x7)|((l>>4)&0x8)|((h<<1)&0x70)|(h&0x80))
40
41/* cont = 0 - access the IDE register file
42 cont = 1 - access the IDE command set
43 cont = 2 - use internal bpck register addressing
44*/
45
46static int cont_map[3] = { 0x40, 0x48, 0 };
47
48static int bpck_read_regr( PIA *pi, int cont, int regr )
49
50{ int r, l, h;
51
52 r = regr + cont_map[cont];
53
54 switch (pi->mode) {
55
56 case 0: w0(r & 0xf); w0(r); t2(2); t2(4);
57 l = r1();
58 t2(4);
59 h = r1();
60 return j44(l,h);
61
62 case 1: w0(r & 0xf); w0(r); t2(2);
63 e2(); t2(0x20);
64 t2(4); h = r0();
65 t2(1); t2(0x20);
66 return h;
67
68 case 2:
69 case 3:
70 case 4: w0(r); w2(9); w2(0); w2(0x20);
71 h = r4();
72 w2(0);
73 return h;
74
75 }
76 return -1;
77}
78
79static void bpck_write_regr( PIA *pi, int cont, int regr, int val )
80
81{ int r;
82
83 r = regr + cont_map[cont];
84
85 switch (pi->mode) {
86
87 case 0:
88 case 1: w0(r);
89 t2(2);
90 w0(val);
91 o2(); t2(4); t2(1);
92 break;
93
94 case 2:
95 case 3:
96 case 4: w0(r); w2(9); w2(0);
97 w0(val); w2(1); w2(3); w2(0);
98 break;
99
100 }
101}
102
103/* These macros access the bpck registers in native addressing */
104
105#define WR(r,v) bpck_write_regr(pi,2,r,v)
106#define RR(r) (bpck_read_regr(pi,2,r))
107
108static void bpck_write_block( PIA *pi, char * buf, int count )
109
110{ int i;
111
112 switch (pi->mode) {
113
114 case 0: WR(4,0x40);
115 w0(0x40); t2(2); t2(1);
116 for (i=0;i<count;i++) { w0(buf[i]); t2(4); }
117 WR(4,0);
118 break;
119
120 case 1: WR(4,0x50);
121 w0(0x40); t2(2); t2(1);
122 for (i=0;i<count;i++) { w0(buf[i]); t2(4); }
123 WR(4,0x10);
124 break;
125
126 case 2: WR(4,0x48);
127 w0(0x40); w2(9); w2(0); w2(1);
128 for (i=0;i<count;i++) w4(buf[i]);
129 w2(0);
130 WR(4,8);
131 break;
132
133 case 3: WR(4,0x48);
134 w0(0x40); w2(9); w2(0); w2(1);
135 for (i=0;i<count/2;i++) w4w(((u16 *)buf)[i]);
136 w2(0);
137 WR(4,8);
138 break;
139
140 case 4: WR(4,0x48);
141 w0(0x40); w2(9); w2(0); w2(1);
142 for (i=0;i<count/4;i++) w4l(((u32 *)buf)[i]);
143 w2(0);
144 WR(4,8);
145 break;
146 }
147}
148
149static void bpck_read_block( PIA *pi, char * buf, int count )
150
151{ int i, l, h;
152
153 switch (pi->mode) {
154
155 case 0: WR(4,0x40);
156 w0(0x40); t2(2);
157 for (i=0;i<count;i++) {
158 t2(4); l = r1();
159 t2(4); h = r1();
160 buf[i] = j44(l,h);
161 }
162 WR(4,0);
163 break;
164
165 case 1: WR(4,0x50);
166 w0(0x40); t2(2); t2(0x20);
167 for(i=0;i<count;i++) { t2(4); buf[i] = r0(); }
168 t2(1); t2(0x20);
169 WR(4,0x10);
170 break;
171
172 case 2: WR(4,0x48);
173 w0(0x40); w2(9); w2(0); w2(0x20);
174 for (i=0;i<count;i++) buf[i] = r4();
175 w2(0);
176 WR(4,8);
177 break;
178
179 case 3: WR(4,0x48);
180 w0(0x40); w2(9); w2(0); w2(0x20);
181 for (i=0;i<count/2;i++) ((u16 *)buf)[i] = r4w();
182 w2(0);
183 WR(4,8);
184 break;
185
186 case 4: WR(4,0x48);
187 w0(0x40); w2(9); w2(0); w2(0x20);
188 for (i=0;i<count/4;i++) ((u32 *)buf)[i] = r4l();
189 w2(0);
190 WR(4,8);
191 break;
192
193 }
194}
195
196static int bpck_probe_unit ( PIA *pi )
197
198{ int o1, o0, f7, id;
199 int t, s;
200
201 id = pi->unit;
202 s = 0;
203 w2(4); w2(0xe); r2(); t2(2);
204 o1 = r1()&0xf8;
205 o0 = r0();
206 w0(255-id); w2(4); w0(id);
207 t2(8); t2(8); t2(8);
208 t2(2); t = r1()&0xf8;
209 f7 = ((id % 8) == 7);
210 if ((f7) || (t != o1)) { t2(2); s = r1()&0xf8; }
211 if ((t == o1) && ((!f7) || (s == o1))) {
212 w2(0x4c); w0(o0);
213 return 0;
214 }
215 t2(8); w0(0); t2(2); w2(0x4c); w0(o0);
216 return 1;
217}
218
219static void bpck_connect ( PIA *pi )
220
221{ pi->saved_r0 = r0();
222 w0(0xff-pi->unit); w2(4); w0(pi->unit);
223 t2(8); t2(8); t2(8);
224 t2(2); t2(2);
225
226 switch (pi->mode) {
227
228 case 0: t2(8); WR(4,0);
229 break;
230
231 case 1: t2(8); WR(4,0x10);
232 break;
233
234 case 2:
235 case 3:
236 case 4: w2(0); WR(4,8);
237 break;
238
239 }
240
241 WR(5,8);
242
243 if (pi->devtype == PI_PCD) {
244 WR(0x46,0x10); /* fiddle with ESS logic ??? */
245 WR(0x4c,0x38);
246 WR(0x4d,0x88);
247 WR(0x46,0xa0);
248 WR(0x41,0);
249 WR(0x4e,8);
250 }
251}
252
253static void bpck_disconnect ( PIA *pi )
254
255{ w0(0);
256 if (pi->mode >= 2) { w2(9); w2(0); } else t2(2);
257 w2(0x4c); w0(pi->saved_r0);
258}
259
260static void bpck_force_spp ( PIA *pi )
261
262/* This fakes the EPP protocol to turn off EPP ... */
263
264{ pi->saved_r0 = r0();
265 w0(0xff-pi->unit); w2(4); w0(pi->unit);
266 t2(8); t2(8); t2(8);
267 t2(2); t2(2);
268
269 w2(0);
270 w0(4); w2(9); w2(0);
271 w0(0); w2(1); w2(3); w2(0);
272 w0(0); w2(9); w2(0);
273 w2(0x4c); w0(pi->saved_r0);
274}
275
276#define TEST_LEN 16
277
278static int bpck_test_proto( PIA *pi, char * scratch, int verbose )
279
280{ int i, e, l, h, om;
281 char buf[TEST_LEN];
282
283 bpck_force_spp(pi);
284
285 switch (pi->mode) {
286
287 case 0: bpck_connect(pi);
288 WR(0x13,0x7f);
289 w0(0x13); t2(2);
290 for(i=0;i<TEST_LEN;i++) {
291 t2(4); l = r1();
292 t2(4); h = r1();
293 buf[i] = j44(l,h);
294 }
295 bpck_disconnect(pi);
296 break;
297
298 case 1: bpck_connect(pi);
299 WR(0x13,0x7f);
300 w0(0x13); t2(2); t2(0x20);
301 for(i=0;i<TEST_LEN;i++) { t2(4); buf[i] = r0(); }
302 t2(1); t2(0x20);
303 bpck_disconnect(pi);
304 break;
305
306 case 2:
307 case 3:
308 case 4: om = pi->mode;
309 pi->mode = 0;
310 bpck_connect(pi);
311 WR(7,3);
312 WR(4,8);
313 bpck_disconnect(pi);
314
315 pi->mode = om;
316 bpck_connect(pi);
317 w0(0x13); w2(9); w2(1); w0(0); w2(3); w2(0); w2(0xe0);
318
319 switch (pi->mode) {
320 case 2: for (i=0;i<TEST_LEN;i++) buf[i] = r4();
321 break;
322 case 3: for (i=0;i<TEST_LEN/2;i++) ((u16 *)buf)[i] = r4w();
323 break;
324 case 4: for (i=0;i<TEST_LEN/4;i++) ((u32 *)buf)[i] = r4l();
325 break;
326 }
327
328 w2(0);
329 WR(7,0);
330 bpck_disconnect(pi);
331
332 break;
333
334 }
335
336 if (verbose) {
337 printk("%s: bpck: 0x%x unit %d mode %d: ",
338 pi->device,pi->port,pi->unit,pi->mode);
339 for (i=0;i<TEST_LEN;i++) printk("%3d",buf[i]);
340 printk("\n");
341 }
342
343 e = 0;
344 for (i=0;i<TEST_LEN;i++) if (buf[i] != (i+1)) e++;
345 return e;
346}
347
348static void bpck_read_eeprom ( PIA *pi, char * buf )
349
350{ int i,j,k,n,p,v,f, om, od;
351
352 bpck_force_spp(pi);
353
354 om = pi->mode; od = pi->delay;
355 pi->mode = 0; pi->delay = 6;
356
357 bpck_connect(pi);
358
359 n = 0;
360 WR(4,0);
361 for (i=0;i<64;i++) {
362 WR(6,8);
363 WR(6,0xc);
364 p = 0x100;
365 for (k=0;k<9;k++) {
366 f = (((i + 0x180) & p) != 0) * 2;
367 WR(6,f+0xc);
368 WR(6,f+0xd);
369 WR(6,f+0xc);
370 p = (p >> 1);
371 }
372 for (j=0;j<2;j++) {
373 v = 0;
374 for (k=0;k<8;k++) {
375 WR(6,0xc);
376 WR(6,0xd);
377 WR(6,0xc);
378 f = RR(0);
379 v = 2*v + (f == 0x84);
380 }
381 buf[2*i+1-j] = v;
382 }
383 }
384 WR(6,8);
385 WR(6,0);
386 WR(5,8);
387
388 bpck_disconnect(pi);
389
390 if (om >= 2) {
391 bpck_connect(pi);
392 WR(7,3);
393 WR(4,8);
394 bpck_disconnect(pi);
395 }
396
397 pi->mode = om; pi->delay = od;
398}
399
400static int bpck_test_port ( PIA *pi ) /* check for 8-bit port */
401
402{ int i, r, m;
403
404 w2(0x2c); i = r0(); w0(255-i); r = r0(); w0(i);
405 m = -1;
406 if (r == i) m = 2;
407 if (r == (255-i)) m = 0;
408
409 w2(0xc); i = r0(); w0(255-i); r = r0(); w0(i);
410 if (r != (255-i)) m = -1;
411
412 if (m == 0) { w2(6); w2(0xc); r = r0(); w0(0xaa); w0(r); w0(0xaa); }
413 if (m == 2) { w2(0x26); w2(0xc); }
414
415 if (m == -1) return 0;
416 return 5;
417}
418
419static void bpck_log_adapter( PIA *pi, char * scratch, int verbose )
420
421{ char *mode_string[5] = { "4-bit","8-bit","EPP-8",
422 "EPP-16","EPP-32" };
423
424#ifdef DUMP_EEPROM
425 int i;
426#endif
427
428 bpck_read_eeprom(pi,scratch);
429
430#ifdef DUMP_EEPROM
431 if (verbose) {
432 for(i=0;i<128;i++)
433 if ((scratch[i] < ' ') || (scratch[i] > '~'))
434 scratch[i] = '.';
435 printk("%s: bpck EEPROM: %64.64s\n",pi->device,scratch);
436 printk("%s: %64.64s\n",pi->device,&scratch[64]);
437 }
438#endif
439
440 printk("%s: bpck %s, backpack %8.8s unit %d",
441 pi->device,BPCK_VERSION,&scratch[110],pi->unit);
442 printk(" at 0x%x, mode %d (%s), delay %d\n",pi->port,
443 pi->mode,mode_string[pi->mode],pi->delay);
444}
445
446static struct pi_protocol bpck = {
447 .owner = THIS_MODULE,
448 .name = "bpck",
449 .max_mode = 5,
450 .epp_first = 2,
451 .default_delay = 4,
452 .max_units = 255,
453 .write_regr = bpck_write_regr,
454 .read_regr = bpck_read_regr,
455 .write_block = bpck_write_block,
456 .read_block = bpck_read_block,
457 .connect = bpck_connect,
458 .disconnect = bpck_disconnect,
459 .test_port = bpck_test_port,
460 .probe_unit = bpck_probe_unit,
461 .test_proto = bpck_test_proto,
462 .log_adapter = bpck_log_adapter,
463};
464
465static int __init bpck_init(void)
466{
467 return pi_register(&bpck)-1;
468}
469
470static void __exit bpck_exit(void)
471{
472 pi_unregister(&bpck);
473}
474
475MODULE_LICENSE("GPL");
476module_init(bpck_init)
477module_exit(bpck_exit)
diff --git a/drivers/block/paride/bpck6.c b/drivers/block/paride/bpck6.c
new file mode 100644
index 000000000000..08d858ad64db
--- /dev/null
+++ b/drivers/block/paride/bpck6.c
@@ -0,0 +1,282 @@
1/*
2 backpack.c (c) 2001 Micro Solutions Inc.
3 Released under the terms of the GNU General Public license
4
5 backpack.c is a low-level protocol driver for the Micro Solutions
6 "BACKPACK" parallel port IDE adapter
7 (Works on Series 6 drives)
8
9 Written by: Ken Hahn (linux-dev@micro-solutions.com)
10 Clive Turvey (linux-dev@micro-solutions.com)
11
12*/
13
14/*
15 This is Ken's linux wrapper for the PPC library
16 Version 1.0.0 is the backpack driver for which source is not available
17 Version 2.0.0 is the first to have source released
18 Version 2.0.1 is the "Cox-ified" source code
19 Version 2.0.2 - fixed version string usage, and made ppc functions static
20*/
21
22
23/* PARAMETERS */
24static int verbose; /* set this to 1 to see debugging messages and whatnot */
25
26#define BACKPACK_VERSION "2.0.2"
27
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/kernel.h>
31#include <linux/slab.h>
32#include <linux/types.h>
33#include <asm/io.h>
34
35#if defined(CONFIG_PARPORT_MODULE)||defined(CONFIG_PARPORT)
36#include <linux/parport.h>
37#endif
38
39#include "ppc6lnx.c"
40#include "paride.h"
41
42
43
44#define PPCSTRUCT(pi) ((Interface *)(pi->private))
45
46/****************************************************************/
47/*
48 ATAPI CDROM DRIVE REGISTERS
49*/
50#define ATAPI_DATA 0 /* data port */
51#define ATAPI_ERROR 1 /* error register (read) */
52#define ATAPI_FEATURES 1 /* feature register (write) */
53#define ATAPI_INT_REASON 2 /* interrupt reason register */
54#define ATAPI_COUNT_LOW 4 /* byte count register (low) */
55#define ATAPI_COUNT_HIGH 5 /* byte count register (high) */
56#define ATAPI_DRIVE_SEL 6 /* drive select register */
57#define ATAPI_STATUS 7 /* status port (read) */
58#define ATAPI_COMMAND 7 /* command port (write) */
59#define ATAPI_ALT_STATUS 0x0e /* alternate status reg (read) */
60#define ATAPI_DEVICE_CONTROL 0x0e /* device control (write) */
61/****************************************************************/
62
63static int bpck6_read_regr(PIA *pi, int cont, int reg)
64{
65 unsigned int out;
66
67 /* check for bad settings */
68 if (reg<0 || reg>7 || cont<0 || cont>2)
69 {
70 return(-1);
71 }
72 out=ppc6_rd_port(PPCSTRUCT(pi),cont?reg|8:reg);
73 return(out);
74}
75
76static void bpck6_write_regr(PIA *pi, int cont, int reg, int val)
77{
78 /* check for bad settings */
79 if (reg>=0 && reg<=7 && cont>=0 && cont<=1)
80 {
81 ppc6_wr_port(PPCSTRUCT(pi),cont?reg|8:reg,(u8)val);
82 }
83}
84
85static void bpck6_write_block( PIA *pi, char * buf, int len )
86{
87 ppc6_wr_port16_blk(PPCSTRUCT(pi),ATAPI_DATA,buf,(u32)len>>1);
88}
89
90static void bpck6_read_block( PIA *pi, char * buf, int len )
91{
92 ppc6_rd_port16_blk(PPCSTRUCT(pi),ATAPI_DATA,buf,(u32)len>>1);
93}
94
95static void bpck6_connect ( PIA *pi )
96{
97 if(verbose)
98 {
99 printk(KERN_DEBUG "connect\n");
100 }
101
102 if(pi->mode >=2)
103 {
104 PPCSTRUCT(pi)->mode=4+pi->mode-2;
105 }
106 else if(pi->mode==1)
107 {
108 PPCSTRUCT(pi)->mode=3;
109 }
110 else
111 {
112 PPCSTRUCT(pi)->mode=1;
113 }
114
115 ppc6_open(PPCSTRUCT(pi));
116 ppc6_wr_extout(PPCSTRUCT(pi),0x3);
117}
118
119static void bpck6_disconnect ( PIA *pi )
120{
121 if(verbose)
122 {
123 printk("disconnect\n");
124 }
125 ppc6_wr_extout(PPCSTRUCT(pi),0x0);
126 ppc6_close(PPCSTRUCT(pi));
127}
128
129static int bpck6_test_port ( PIA *pi ) /* check for 8-bit port */
130{
131 if(verbose)
132 {
133 printk(KERN_DEBUG "PARPORT indicates modes=%x for lp=0x%lx\n",
134 ((struct pardevice*)(pi->pardev))->port->modes,
135 ((struct pardevice *)(pi->pardev))->port->base);
136 }
137
138 /*copy over duplicate stuff.. initialize state info*/
139 PPCSTRUCT(pi)->ppc_id=pi->unit;
140 PPCSTRUCT(pi)->lpt_addr=pi->port;
141
142#ifdef CONFIG_PARPORT_PC_MODULE
143#define CONFIG_PARPORT_PC
144#endif
145
146#ifdef CONFIG_PARPORT_PC
147 /* look at the parport device to see if what modes we can use */
148 if(((struct pardevice *)(pi->pardev))->port->modes &
149 (PARPORT_MODE_EPP)
150 )
151 {
152 return 5; /* Can do EPP*/
153 }
154 else if(((struct pardevice *)(pi->pardev))->port->modes &
155 (PARPORT_MODE_TRISTATE)
156 )
157 {
158 return 2;
159 }
160 else /*Just flat SPP*/
161 {
162 return 1;
163 }
164#else
165 /* there is no way of knowing what kind of port we have
166 default to the highest mode possible */
167 return 5;
168#endif
169}
170
171static int bpck6_probe_unit ( PIA *pi )
172{
173 int out;
174
175 if(verbose)
176 {
177 printk(KERN_DEBUG "PROBE UNIT %x on port:%x\n",pi->unit,pi->port);
178 }
179
180 /*SET PPC UNIT NUMBER*/
181 PPCSTRUCT(pi)->ppc_id=pi->unit;
182
183 /*LOWER DOWN TO UNIDIRECTIONAL*/
184 PPCSTRUCT(pi)->mode=1;
185
186 out=ppc6_open(PPCSTRUCT(pi));
187
188 if(verbose)
189 {
190 printk(KERN_DEBUG "ppc_open returned %2x\n",out);
191 }
192
193 if(out)
194 {
195 ppc6_close(PPCSTRUCT(pi));
196 if(verbose)
197 {
198 printk(KERN_DEBUG "leaving probe\n");
199 }
200 return(1);
201 }
202 else
203 {
204 if(verbose)
205 {
206 printk(KERN_DEBUG "Failed open\n");
207 }
208 return(0);
209 }
210}
211
212static void bpck6_log_adapter( PIA *pi, char * scratch, int verbose )
213{
214 char *mode_string[5]=
215 {"4-bit","8-bit","EPP-8","EPP-16","EPP-32"};
216
217 printk("%s: BACKPACK Protocol Driver V"BACKPACK_VERSION"\n",pi->device);
218 printk("%s: Copyright 2001 by Micro Solutions, Inc., DeKalb IL.\n",pi->device);
219 printk("%s: BACKPACK %s, Micro Solutions BACKPACK Drive at 0x%x\n",
220 pi->device,BACKPACK_VERSION,pi->port);
221 printk("%s: Unit: %d Mode:%d (%s) Delay %d\n",pi->device,
222 pi->unit,pi->mode,mode_string[pi->mode],pi->delay);
223}
224
225static int bpck6_init_proto(PIA *pi)
226{
227 Interface *p = kmalloc(sizeof(Interface), GFP_KERNEL);
228
229 if (p) {
230 memset(p, 0, sizeof(Interface));
231 pi->private = (unsigned long)p;
232 return 0;
233 }
234
235 printk(KERN_ERR "%s: ERROR COULDN'T ALLOCATE MEMORY\n", pi->device);
236 return -1;
237}
238
239static void bpck6_release_proto(PIA *pi)
240{
241 kfree((void *)(pi->private));
242}
243
244static struct pi_protocol bpck6 = {
245 .owner = THIS_MODULE,
246 .name = "bpck6",
247 .max_mode = 5,
248 .epp_first = 2, /* 2-5 use epp (need 8 ports) */
249 .max_units = 255,
250 .write_regr = bpck6_write_regr,
251 .read_regr = bpck6_read_regr,
252 .write_block = bpck6_write_block,
253 .read_block = bpck6_read_block,
254 .connect = bpck6_connect,
255 .disconnect = bpck6_disconnect,
256 .test_port = bpck6_test_port,
257 .probe_unit = bpck6_probe_unit,
258 .log_adapter = bpck6_log_adapter,
259 .init_proto = bpck6_init_proto,
260 .release_proto = bpck6_release_proto,
261};
262
263static int __init bpck6_init(void)
264{
265 printk(KERN_INFO "bpck6: BACKPACK Protocol Driver V"BACKPACK_VERSION"\n");
266 printk(KERN_INFO "bpck6: Copyright 2001 by Micro Solutions, Inc., DeKalb IL. USA\n");
267 if(verbose)
268 printk(KERN_DEBUG "bpck6: verbose debug enabled.\n");
269 return pi_register(&bpck6) - 1;
270}
271
272static void __exit bpck6_exit(void)
273{
274 pi_unregister(&bpck6);
275}
276
277MODULE_LICENSE("GPL");
278MODULE_AUTHOR("Micro Solutions Inc.");
279MODULE_DESCRIPTION("BACKPACK Protocol module, compatible with PARIDE");
280module_param(verbose, bool, 0644);
281module_init(bpck6_init)
282module_exit(bpck6_exit)
diff --git a/drivers/block/paride/comm.c b/drivers/block/paride/comm.c
new file mode 100644
index 000000000000..d842956edf76
--- /dev/null
+++ b/drivers/block/paride/comm.c
@@ -0,0 +1,218 @@
1/*
2 comm.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
3 Under the terms of the GNU General Public License.
4
5 comm.c is a low-level protocol driver for some older models
6 of the DataStor "Commuter" parallel to IDE adapter. Some of
7 the parallel port devices marketed by Arista currently
8 use this adapter.
9*/
10
11/* Changes:
12
13 1.01 GRG 1998.05.05 init_proto, release_proto
14
15*/
16
17#define COMM_VERSION "1.01"
18
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/delay.h>
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/wait.h>
25#include <asm/io.h>
26
27#include "paride.h"
28
29/* mode codes: 0 nybble reads, 8-bit writes
30 1 8-bit reads and writes
31 2 8-bit EPP mode
32*/
33
34#define j44(a,b) (((a>>3)&0x0f)|((b<<1)&0xf0))
35
36#define P1 w2(5);w2(0xd);w2(0xd);w2(5);w2(4);
37#define P2 w2(5);w2(7);w2(7);w2(5);w2(4);
38
39/* cont = 0 - access the IDE register file
40 cont = 1 - access the IDE command set
41*/
42
43static int cont_map[2] = { 0x08, 0x10 };
44
45static int comm_read_regr( PIA *pi, int cont, int regr )
46
47{ int l, h, r;
48
49 r = regr + cont_map[cont];
50
51 switch (pi->mode) {
52
53 case 0: w0(r); P1; w0(0);
54 w2(6); l = r1(); w0(0x80); h = r1(); w2(4);
55 return j44(l,h);
56
57 case 1: w0(r+0x20); P1;
58 w0(0); w2(0x26); h = r0(); w2(4);
59 return h;
60
61 case 2:
62 case 3:
63 case 4: w3(r+0x20); r1();
64 w2(0x24); h = r4(); w2(4);
65 return h;
66
67 }
68 return -1;
69}
70
71static void comm_write_regr( PIA *pi, int cont, int regr, int val )
72
73{ int r;
74
75 r = regr + cont_map[cont];
76
77 switch (pi->mode) {
78
79 case 0:
80 case 1: w0(r); P1; w0(val); P2;
81 break;
82
83 case 2:
84 case 3:
85 case 4: w3(r); r1(); w4(val);
86 break;
87 }
88}
89
90static void comm_connect ( PIA *pi )
91
92{ pi->saved_r0 = r0();
93 pi->saved_r2 = r2();
94 w2(4); w0(0xff); w2(6);
95 w2(4); w0(0xaa); w2(6);
96 w2(4); w0(0x00); w2(6);
97 w2(4); w0(0x87); w2(6);
98 w2(4); w0(0xe0); w2(0xc); w2(0xc); w2(4);
99}
100
101static void comm_disconnect ( PIA *pi )
102
103{ w2(0); w2(0); w2(0); w2(4);
104 w0(pi->saved_r0);
105 w2(pi->saved_r2);
106}
107
108static void comm_read_block( PIA *pi, char * buf, int count )
109
110{ int i, l, h;
111
112 switch (pi->mode) {
113
114 case 0: w0(0x48); P1;
115 for(i=0;i<count;i++) {
116 w0(0); w2(6); l = r1();
117 w0(0x80); h = r1(); w2(4);
118 buf[i] = j44(l,h);
119 }
120 break;
121
122 case 1: w0(0x68); P1; w0(0);
123 for(i=0;i<count;i++) {
124 w2(0x26); buf[i] = r0(); w2(0x24);
125 }
126 w2(4);
127 break;
128
129 case 2: w3(0x68); r1(); w2(0x24);
130 for (i=0;i<count;i++) buf[i] = r4();
131 w2(4);
132 break;
133
134 case 3: w3(0x68); r1(); w2(0x24);
135 for (i=0;i<count/2;i++) ((u16 *)buf)[i] = r4w();
136 w2(4);
137 break;
138
139 case 4: w3(0x68); r1(); w2(0x24);
140 for (i=0;i<count/4;i++) ((u32 *)buf)[i] = r4l();
141 w2(4);
142 break;
143
144 }
145}
146
147/* NB: Watch out for the byte swapped writes ! */
148
149static void comm_write_block( PIA *pi, char * buf, int count )
150
151{ int k;
152
153 switch (pi->mode) {
154
155 case 0:
156 case 1: w0(0x68); P1;
157 for (k=0;k<count;k++) {
158 w2(5); w0(buf[k^1]); w2(7);
159 }
160 w2(5); w2(4);
161 break;
162
163 case 2: w3(0x48); r1();
164 for (k=0;k<count;k++) w4(buf[k^1]);
165 break;
166
167 case 3: w3(0x48); r1();
168 for (k=0;k<count/2;k++) w4w(pi_swab16(buf,k));
169 break;
170
171 case 4: w3(0x48); r1();
172 for (k=0;k<count/4;k++) w4l(pi_swab32(buf,k));
173 break;
174
175
176 }
177}
178
179static void comm_log_adapter( PIA *pi, char * scratch, int verbose )
180
181{ char *mode_string[5] = {"4-bit","8-bit","EPP-8","EPP-16","EPP-32"};
182
183 printk("%s: comm %s, DataStor Commuter at 0x%x, ",
184 pi->device,COMM_VERSION,pi->port);
185 printk("mode %d (%s), delay %d\n",pi->mode,
186 mode_string[pi->mode],pi->delay);
187
188}
189
190static struct pi_protocol comm = {
191 .owner = THIS_MODULE,
192 .name = "comm",
193 .max_mode = 5,
194 .epp_first = 2,
195 .default_delay = 1,
196 .max_units = 1,
197 .write_regr = comm_write_regr,
198 .read_regr = comm_read_regr,
199 .write_block = comm_write_block,
200 .read_block = comm_read_block,
201 .connect = comm_connect,
202 .disconnect = comm_disconnect,
203 .log_adapter = comm_log_adapter,
204};
205
206static int __init comm_init(void)
207{
208 return pi_register(&comm)-1;
209}
210
211static void __exit comm_exit(void)
212{
213 pi_unregister(&comm);
214}
215
216MODULE_LICENSE("GPL");
217module_init(comm_init)
218module_exit(comm_exit)
diff --git a/drivers/block/paride/dstr.c b/drivers/block/paride/dstr.c
new file mode 100644
index 000000000000..04d53bf58e8c
--- /dev/null
+++ b/drivers/block/paride/dstr.c
@@ -0,0 +1,233 @@
1/*
2 dstr.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
3 Under the terms of the GNU General Public License.
4
5 dstr.c is a low-level protocol driver for the
6 DataStor EP2000 parallel to IDE adapter chip.
7
8*/
9
10/* Changes:
11
12 1.01 GRG 1998.05.06 init_proto, release_proto
13
14*/
15
16#define DSTR_VERSION "1.01"
17
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/delay.h>
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/wait.h>
24#include <asm/io.h>
25
26#include "paride.h"
27
28/* mode codes: 0 nybble reads, 8-bit writes
29 1 8-bit reads and writes
30 2 8-bit EPP mode
31 3 EPP-16
32 4 EPP-32
33*/
34
35#define j44(a,b) (((a>>3)&0x07)|((~a>>4)&0x08)|((b<<1)&0x70)|((~b)&0x80))
36
37#define P1 w2(5);w2(0xd);w2(5);w2(4);
38#define P2 w2(5);w2(7);w2(5);w2(4);
39#define P3 w2(6);w2(4);w2(6);w2(4);
40
41/* cont = 0 - access the IDE register file
42 cont = 1 - access the IDE command set
43*/
44
45static int cont_map[2] = { 0x20, 0x40 };
46
47static int dstr_read_regr( PIA *pi, int cont, int regr )
48
49{ int a, b, r;
50
51 r = regr + cont_map[cont];
52
53 w0(0x81); P1;
54 if (pi->mode) { w0(0x11); } else { w0(1); }
55 P2; w0(r); P1;
56
57 switch (pi->mode) {
58
59 case 0: w2(6); a = r1(); w2(4); w2(6); b = r1(); w2(4);
60 return j44(a,b);
61
62 case 1: w0(0); w2(0x26); a = r0(); w2(4);
63 return a;
64
65 case 2:
66 case 3:
67 case 4: w2(0x24); a = r4(); w2(4);
68 return a;
69
70 }
71 return -1;
72}
73
74static void dstr_write_regr( PIA *pi, int cont, int regr, int val )
75
76{ int r;
77
78 r = regr + cont_map[cont];
79
80 w0(0x81); P1;
81 if (pi->mode >= 2) { w0(0x11); } else { w0(1); }
82 P2; w0(r); P1;
83
84 switch (pi->mode) {
85
86 case 0:
87 case 1: w0(val); w2(5); w2(7); w2(5); w2(4);
88 break;
89
90 case 2:
91 case 3:
92 case 4: w4(val);
93 break;
94 }
95}
96
97#define CCP(x) w0(0xff);w2(0xc);w2(4);\
98 w0(0xaa);w0(0x55);w0(0);w0(0xff);w0(0x87);w0(0x78);\
99 w0(x);w2(5);w2(4);
100
101static void dstr_connect ( PIA *pi )
102
103{ pi->saved_r0 = r0();
104 pi->saved_r2 = r2();
105 w2(4); CCP(0xe0); w0(0xff);
106}
107
108static void dstr_disconnect ( PIA *pi )
109
110{ CCP(0x30);
111 w0(pi->saved_r0);
112 w2(pi->saved_r2);
113}
114
115static void dstr_read_block( PIA *pi, char * buf, int count )
116
117{ int k, a, b;
118
119 w0(0x81); P1;
120 if (pi->mode) { w0(0x19); } else { w0(9); }
121 P2; w0(0x82); P1; P3; w0(0x20); P1;
122
123 switch (pi->mode) {
124
125 case 0: for (k=0;k<count;k++) {
126 w2(6); a = r1(); w2(4);
127 w2(6); b = r1(); w2(4);
128 buf[k] = j44(a,b);
129 }
130 break;
131
132 case 1: w0(0);
133 for (k=0;k<count;k++) {
134 w2(0x26); buf[k] = r0(); w2(0x24);
135 }
136 w2(4);
137 break;
138
139 case 2: w2(0x24);
140 for (k=0;k<count;k++) buf[k] = r4();
141 w2(4);
142 break;
143
144 case 3: w2(0x24);
145 for (k=0;k<count/2;k++) ((u16 *)buf)[k] = r4w();
146 w2(4);
147 break;
148
149 case 4: w2(0x24);
150 for (k=0;k<count/4;k++) ((u32 *)buf)[k] = r4l();
151 w2(4);
152 break;
153
154 }
155}
156
157static void dstr_write_block( PIA *pi, char * buf, int count )
158
159{ int k;
160
161 w0(0x81); P1;
162 if (pi->mode) { w0(0x19); } else { w0(9); }
163 P2; w0(0x82); P1; P3; w0(0x20); P1;
164
165 switch (pi->mode) {
166
167 case 0:
168 case 1: for (k=0;k<count;k++) {
169 w2(5); w0(buf[k]); w2(7);
170 }
171 w2(5); w2(4);
172 break;
173
174 case 2: w2(0xc5);
175 for (k=0;k<count;k++) w4(buf[k]);
176 w2(0xc4);
177 break;
178
179 case 3: w2(0xc5);
180 for (k=0;k<count/2;k++) w4w(((u16 *)buf)[k]);
181 w2(0xc4);
182 break;
183
184 case 4: w2(0xc5);
185 for (k=0;k<count/4;k++) w4l(((u32 *)buf)[k]);
186 w2(0xc4);
187 break;
188
189 }
190}
191
192
193static void dstr_log_adapter( PIA *pi, char * scratch, int verbose )
194
195{ char *mode_string[5] = {"4-bit","8-bit","EPP-8",
196 "EPP-16","EPP-32"};
197
198 printk("%s: dstr %s, DataStor EP2000 at 0x%x, ",
199 pi->device,DSTR_VERSION,pi->port);
200 printk("mode %d (%s), delay %d\n",pi->mode,
201 mode_string[pi->mode],pi->delay);
202
203}
204
205static struct pi_protocol dstr = {
206 .owner = THIS_MODULE,
207 .name = "dstr",
208 .max_mode = 5,
209 .epp_first = 2,
210 .default_delay = 1,
211 .max_units = 1,
212 .write_regr = dstr_write_regr,
213 .read_regr = dstr_read_regr,
214 .write_block = dstr_write_block,
215 .read_block = dstr_read_block,
216 .connect = dstr_connect,
217 .disconnect = dstr_disconnect,
218 .log_adapter = dstr_log_adapter,
219};
220
221static int __init dstr_init(void)
222{
223 return pi_register(&dstr)-1;
224}
225
226static void __exit dstr_exit(void)
227{
228 pi_unregister(&dstr);
229}
230
231MODULE_LICENSE("GPL");
232module_init(dstr_init)
233module_exit(dstr_exit)
diff --git a/drivers/block/paride/epat.c b/drivers/block/paride/epat.c
new file mode 100644
index 000000000000..55d1c0a1fb90
--- /dev/null
+++ b/drivers/block/paride/epat.c
@@ -0,0 +1,340 @@
1/*
2 epat.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
3 Under the terms of the GNU General Public License.
4
5 This is the low level protocol driver for the EPAT parallel
6 to IDE adapter from Shuttle Technologies. This adapter is
7 used in many popular parallel port disk products such as the
8 SyQuest EZ drives, the Avatar Shark and the Imation SuperDisk.
9
10*/
11
12/* Changes:
13
14 1.01 GRG 1998.05.06 init_proto, release_proto
15 1.02 Joshua b. Jore CPP(renamed), epat_connect, epat_disconnect
16
17*/
18
19#define EPAT_VERSION "1.02"
20
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/delay.h>
24#include <linux/kernel.h>
25#include <linux/types.h>
26#include <linux/wait.h>
27#include <asm/io.h>
28
29#include "paride.h"
30
31#define j44(a,b) (((a>>4)&0x0f)+(b&0xf0))
32#define j53(a,b) (((a>>3)&0x1f)+((b<<4)&0xe0))
33
34static int epatc8;
35
36module_param(epatc8, int, 0);
37MODULE_PARM_DESC(epatc8, "support for the Shuttle EP1284 chip, "
38 "used in any recent Imation SuperDisk (LS-120) drive.");
39
40/* cont = 0 IDE register file
41 cont = 1 IDE control registers
42 cont = 2 internal EPAT registers
43*/
44
45static int cont_map[3] = { 0x18, 0x10, 0 };
46
47static void epat_write_regr( PIA *pi, int cont, int regr, int val)
48
49{ int r;
50
51 r = regr + cont_map[cont];
52
53 switch (pi->mode) {
54
55 case 0:
56 case 1:
57 case 2: w0(0x60+r); w2(1); w0(val); w2(4);
58 break;
59
60 case 3:
61 case 4:
62 case 5: w3(0x40+r); w4(val);
63 break;
64
65 }
66}
67
68static int epat_read_regr( PIA *pi, int cont, int regr )
69
70{ int a, b, r;
71
72 r = regr + cont_map[cont];
73
74 switch (pi->mode) {
75
76 case 0: w0(r); w2(1); w2(3);
77 a = r1(); w2(4); b = r1();
78 return j44(a,b);
79
80 case 1: w0(0x40+r); w2(1); w2(4);
81 a = r1(); b = r2(); w0(0xff);
82 return j53(a,b);
83
84 case 2: w0(0x20+r); w2(1); w2(0x25);
85 a = r0(); w2(4);
86 return a;
87
88 case 3:
89 case 4:
90 case 5: w3(r); w2(0x24); a = r4(); w2(4);
91 return a;
92
93 }
94 return -1; /* never gets here */
95}
96
97static void epat_read_block( PIA *pi, char * buf, int count )
98
99{ int k, ph, a, b;
100
101 switch (pi->mode) {
102
103 case 0: w0(7); w2(1); w2(3); w0(0xff);
104 ph = 0;
105 for(k=0;k<count;k++) {
106 if (k == count-1) w0(0xfd);
107 w2(6+ph); a = r1();
108 if (a & 8) b = a;
109 else { w2(4+ph); b = r1(); }
110 buf[k] = j44(a,b);
111 ph = 1 - ph;
112 }
113 w0(0); w2(4);
114 break;
115
116 case 1: w0(0x47); w2(1); w2(5); w0(0xff);
117 ph = 0;
118 for(k=0;k<count;k++) {
119 if (k == count-1) w0(0xfd);
120 w2(4+ph);
121 a = r1(); b = r2();
122 buf[k] = j53(a,b);
123 ph = 1 - ph;
124 }
125 w0(0); w2(4);
126 break;
127
128 case 2: w0(0x27); w2(1); w2(0x25); w0(0);
129 ph = 0;
130 for(k=0;k<count-1;k++) {
131 w2(0x24+ph);
132 buf[k] = r0();
133 ph = 1 - ph;
134 }
135 w2(0x26); w2(0x27); buf[count-1] = r0();
136 w2(0x25); w2(4);
137 break;
138
139 case 3: w3(0x80); w2(0x24);
140 for(k=0;k<count-1;k++) buf[k] = r4();
141 w2(4); w3(0xa0); w2(0x24); buf[count-1] = r4();
142 w2(4);
143 break;
144
145 case 4: w3(0x80); w2(0x24);
146 for(k=0;k<(count/2)-1;k++) ((u16 *)buf)[k] = r4w();
147 buf[count-2] = r4();
148 w2(4); w3(0xa0); w2(0x24); buf[count-1] = r4();
149 w2(4);
150 break;
151
152 case 5: w3(0x80); w2(0x24);
153 for(k=0;k<(count/4)-1;k++) ((u32 *)buf)[k] = r4l();
154 for(k=count-4;k<count-1;k++) buf[k] = r4();
155 w2(4); w3(0xa0); w2(0x24); buf[count-1] = r4();
156 w2(4);
157 break;
158
159 }
160}
161
162static void epat_write_block( PIA *pi, char * buf, int count )
163
164{ int ph, k;
165
166 switch (pi->mode) {
167
168 case 0:
169 case 1:
170 case 2: w0(0x67); w2(1); w2(5);
171 ph = 0;
172 for(k=0;k<count;k++) {
173 w0(buf[k]);
174 w2(4+ph);
175 ph = 1 - ph;
176 }
177 w2(7); w2(4);
178 break;
179
180 case 3: w3(0xc0);
181 for(k=0;k<count;k++) w4(buf[k]);
182 w2(4);
183 break;
184
185 case 4: w3(0xc0);
186 for(k=0;k<(count/2);k++) w4w(((u16 *)buf)[k]);
187 w2(4);
188 break;
189
190 case 5: w3(0xc0);
191 for(k=0;k<(count/4);k++) w4l(((u32 *)buf)[k]);
192 w2(4);
193 break;
194
195 }
196}
197
198/* these macros access the EPAT registers in native addressing */
199
200#define WR(r,v) epat_write_regr(pi,2,r,v)
201#define RR(r) (epat_read_regr(pi,2,r))
202
203/* and these access the IDE task file */
204
205#define WRi(r,v) epat_write_regr(pi,0,r,v)
206#define RRi(r) (epat_read_regr(pi,0,r))
207
208/* FIXME: the CPP stuff should be fixed to handle multiple EPATs on a chain */
209
210#define CPP(x) w2(4);w0(0x22);w0(0xaa);w0(0x55);w0(0);w0(0xff);\
211 w0(0x87);w0(0x78);w0(x);w2(4);w2(5);w2(4);w0(0xff);
212
213static void epat_connect ( PIA *pi )
214
215{ pi->saved_r0 = r0();
216 pi->saved_r2 = r2();
217
218 /* Initialize the chip */
219 CPP(0);
220
221 if (epatc8) {
222 CPP(0x40);CPP(0xe0);
223 w0(0);w2(1);w2(4);
224 WR(0x8,0x12);WR(0xc,0x14);WR(0x12,0x10);
225 WR(0xe,0xf);WR(0xf,4);
226 /* WR(0xe,0xa);WR(0xf,4); */
227 WR(0xe,0xd);WR(0xf,0);
228 /* CPP(0x30); */
229 }
230
231 /* Connect to the chip */
232 CPP(0xe0);
233 w0(0);w2(1);w2(4); /* Idle into SPP */
234 if (pi->mode >= 3) {
235 w0(0);w2(1);w2(4);w2(0xc);
236 /* Request EPP */
237 w0(0x40);w2(6);w2(7);w2(4);w2(0xc);w2(4);
238 }
239
240 if (!epatc8) {
241 WR(8,0x10); WR(0xc,0x14); WR(0xa,0x38); WR(0x12,0x10);
242 }
243}
244
245static void epat_disconnect (PIA *pi)
246{ CPP(0x30);
247 w0(pi->saved_r0);
248 w2(pi->saved_r2);
249}
250
251static int epat_test_proto( PIA *pi, char * scratch, int verbose )
252
253{ int k, j, f, cc;
254 int e[2] = {0,0};
255
256 epat_connect(pi);
257 cc = RR(0xd);
258 epat_disconnect(pi);
259
260 epat_connect(pi);
261 for (j=0;j<2;j++) {
262 WRi(6,0xa0+j*0x10);
263 for (k=0;k<256;k++) {
264 WRi(2,k^0xaa);
265 WRi(3,k^0x55);
266 if (RRi(2) != (k^0xaa)) e[j]++;
267 }
268 }
269 epat_disconnect(pi);
270
271 f = 0;
272 epat_connect(pi);
273 WR(0x13,1); WR(0x13,0); WR(0xa,0x11);
274 epat_read_block(pi,scratch,512);
275
276 for (k=0;k<256;k++) {
277 if ((scratch[2*k] & 0xff) != k) f++;
278 if ((scratch[2*k+1] & 0xff) != (0xff-k)) f++;
279 }
280 epat_disconnect(pi);
281
282 if (verbose) {
283 printk("%s: epat: port 0x%x, mode %d, ccr %x, test=(%d,%d,%d)\n",
284 pi->device,pi->port,pi->mode,cc,e[0],e[1],f);
285 }
286
287 return (e[0] && e[1]) || f;
288}
289
290static void epat_log_adapter( PIA *pi, char * scratch, int verbose )
291
292{ int ver;
293 char *mode_string[6] =
294 {"4-bit","5/3","8-bit","EPP-8","EPP-16","EPP-32"};
295
296 epat_connect(pi);
297 WR(0xa,0x38); /* read the version code */
298 ver = RR(0xb);
299 epat_disconnect(pi);
300
301 printk("%s: epat %s, Shuttle EPAT chip %x at 0x%x, ",
302 pi->device,EPAT_VERSION,ver,pi->port);
303 printk("mode %d (%s), delay %d\n",pi->mode,
304 mode_string[pi->mode],pi->delay);
305
306}
307
308static struct pi_protocol epat = {
309 .owner = THIS_MODULE,
310 .name = "epat",
311 .max_mode = 6,
312 .epp_first = 3,
313 .default_delay = 1,
314 .max_units = 1,
315 .write_regr = epat_write_regr,
316 .read_regr = epat_read_regr,
317 .write_block = epat_write_block,
318 .read_block = epat_read_block,
319 .connect = epat_connect,
320 .disconnect = epat_disconnect,
321 .test_proto = epat_test_proto,
322 .log_adapter = epat_log_adapter,
323};
324
325static int __init epat_init(void)
326{
327#ifdef CONFIG_PARIDE_EPATC8
328 epatc8 = 1;
329#endif
330 return pi_register(&epat)-1;
331}
332
333static void __exit epat_exit(void)
334{
335 pi_unregister(&epat);
336}
337
338MODULE_LICENSE("GPL");
339module_init(epat_init)
340module_exit(epat_exit)
diff --git a/drivers/block/paride/epia.c b/drivers/block/paride/epia.c
new file mode 100644
index 000000000000..0f2e0c292d82
--- /dev/null
+++ b/drivers/block/paride/epia.c
@@ -0,0 +1,316 @@
1/*
2 epia.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
3 Under the terms of the GNU General Public License.
4
5 epia.c is a low-level protocol driver for Shuttle Technologies
6 EPIA parallel to IDE adapter chip. This device is now obsolete
7 and has been replaced with the EPAT chip, which is supported
8 by epat.c, however, some devices based on EPIA are still
9 available.
10
11*/
12
13/* Changes:
14
15 1.01 GRG 1998.05.06 init_proto, release_proto
16 1.02 GRG 1998.06.17 support older versions of EPIA
17
18*/
19
20#define EPIA_VERSION "1.02"
21
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/delay.h>
25#include <linux/kernel.h>
26#include <linux/types.h>
27#include <linux/wait.h>
28#include <asm/io.h>
29
30#include "paride.h"
31
32/* mode codes: 0 nybble reads on port 1, 8-bit writes
33 1 5/3 reads on ports 1 & 2, 8-bit writes
34 2 8-bit reads and writes
35 3 8-bit EPP mode
36 4 16-bit EPP
37 5 32-bit EPP
38*/
39
40#define j44(a,b) (((a>>4)&0x0f)+(b&0xf0))
41#define j53(a,b) (((a>>3)&0x1f)+((b<<4)&0xe0))
42
43/* cont = 0 IDE register file
44 cont = 1 IDE control registers
45*/
46
47static int cont_map[2] = { 0, 0x80 };
48
49static int epia_read_regr( PIA *pi, int cont, int regr )
50
51{ int a, b, r;
52
53 regr += cont_map[cont];
54
55 switch (pi->mode) {
56
57 case 0: r = regr^0x39;
58 w0(r); w2(1); w2(3); w0(r);
59 a = r1(); w2(1); b = r1(); w2(4);
60 return j44(a,b);
61
62 case 1: r = regr^0x31;
63 w0(r); w2(1); w0(r&0x37);
64 w2(3); w2(5); w0(r|0xf0);
65 a = r1(); b = r2(); w2(4);
66 return j53(a,b);
67
68 case 2: r = regr^0x29;
69 w0(r); w2(1); w2(0X21); w2(0x23);
70 a = r0(); w2(4);
71 return a;
72
73 case 3:
74 case 4:
75 case 5: w3(regr); w2(0x24); a = r4(); w2(4);
76 return a;
77
78 }
79 return -1;
80}
81
82static void epia_write_regr( PIA *pi, int cont, int regr, int val)
83
84{ int r;
85
86 regr += cont_map[cont];
87
88 switch (pi->mode) {
89
90 case 0:
91 case 1:
92 case 2: r = regr^0x19;
93 w0(r); w2(1); w0(val); w2(3); w2(4);
94 break;
95
96 case 3:
97 case 4:
98 case 5: r = regr^0x40;
99 w3(r); w4(val); w2(4);
100 break;
101 }
102}
103
104#define WR(r,v) epia_write_regr(pi,0,r,v)
105#define RR(r) (epia_read_regr(pi,0,r))
106
107/* The use of register 0x84 is entirely unclear - it seems to control
108 some EPP counters ... currently we know about 3 different block
109 sizes: the standard 512 byte reads and writes, 12 byte writes and
110 2048 byte reads (the last two being used in the CDrom drivers.
111*/
112
113static void epia_connect ( PIA *pi )
114
115{ pi->saved_r0 = r0();
116 pi->saved_r2 = r2();
117
118 w2(4); w0(0xa0); w0(0x50); w0(0xc0); w0(0x30); w0(0xa0); w0(0);
119 w2(1); w2(4);
120 if (pi->mode >= 3) {
121 w0(0xa); w2(1); w2(4); w0(0x82); w2(4); w2(0xc); w2(4);
122 w2(0x24); w2(0x26); w2(4);
123 }
124 WR(0x86,8);
125}
126
127static void epia_disconnect ( PIA *pi )
128
129{ /* WR(0x84,0x10); */
130 w0(pi->saved_r0);
131 w2(1); w2(4);
132 w0(pi->saved_r0);
133 w2(pi->saved_r2);
134}
135
136static void epia_read_block( PIA *pi, char * buf, int count )
137
138{ int k, ph, a, b;
139
140 switch (pi->mode) {
141
142 case 0: w0(0x81); w2(1); w2(3); w0(0xc1);
143 ph = 1;
144 for (k=0;k<count;k++) {
145 w2(2+ph); a = r1();
146 w2(4+ph); b = r1();
147 buf[k] = j44(a,b);
148 ph = 1 - ph;
149 }
150 w0(0); w2(4);
151 break;
152
153 case 1: w0(0x91); w2(1); w0(0x10); w2(3);
154 w0(0x51); w2(5); w0(0xd1);
155 ph = 1;
156 for (k=0;k<count;k++) {
157 w2(4+ph);
158 a = r1(); b = r2();
159 buf[k] = j53(a,b);
160 ph = 1 - ph;
161 }
162 w0(0); w2(4);
163 break;
164
165 case 2: w0(0x89); w2(1); w2(0x23); w2(0x21);
166 ph = 1;
167 for (k=0;k<count;k++) {
168 w2(0x24+ph);
169 buf[k] = r0();
170 ph = 1 - ph;
171 }
172 w2(6); w2(4);
173 break;
174
175 case 3: if (count > 512) WR(0x84,3);
176 w3(0); w2(0x24);
177 for (k=0;k<count;k++) buf[k] = r4();
178 w2(4); WR(0x84,0);
179 break;
180
181 case 4: if (count > 512) WR(0x84,3);
182 w3(0); w2(0x24);
183 for (k=0;k<count/2;k++) ((u16 *)buf)[k] = r4w();
184 w2(4); WR(0x84,0);
185 break;
186
187 case 5: if (count > 512) WR(0x84,3);
188 w3(0); w2(0x24);
189 for (k=0;k<count/4;k++) ((u32 *)buf)[k] = r4l();
190 w2(4); WR(0x84,0);
191 break;
192
193 }
194}
195
196static void epia_write_block( PIA *pi, char * buf, int count )
197
198{ int ph, k, last, d;
199
200 switch (pi->mode) {
201
202 case 0:
203 case 1:
204 case 2: w0(0xa1); w2(1); w2(3); w2(1); w2(5);
205 ph = 0; last = 0x8000;
206 for (k=0;k<count;k++) {
207 d = buf[k];
208 if (d != last) { last = d; w0(d); }
209 w2(4+ph);
210 ph = 1 - ph;
211 }
212 w2(7); w2(4);
213 break;
214
215 case 3: if (count < 512) WR(0x84,1);
216 w3(0x40);
217 for (k=0;k<count;k++) w4(buf[k]);
218 if (count < 512) WR(0x84,0);
219 break;
220
221 case 4: if (count < 512) WR(0x84,1);
222 w3(0x40);
223 for (k=0;k<count/2;k++) w4w(((u16 *)buf)[k]);
224 if (count < 512) WR(0x84,0);
225 break;
226
227 case 5: if (count < 512) WR(0x84,1);
228 w3(0x40);
229 for (k=0;k<count/4;k++) w4l(((u32 *)buf)[k]);
230 if (count < 512) WR(0x84,0);
231 break;
232
233 }
234
235}
236
237static int epia_test_proto( PIA *pi, char * scratch, int verbose )
238
239{ int j, k, f;
240 int e[2] = {0,0};
241
242 epia_connect(pi);
243 for (j=0;j<2;j++) {
244 WR(6,0xa0+j*0x10);
245 for (k=0;k<256;k++) {
246 WR(2,k^0xaa);
247 WR(3,k^0x55);
248 if (RR(2) != (k^0xaa)) e[j]++;
249 }
250 WR(2,1); WR(3,1);
251 }
252 epia_disconnect(pi);
253
254 f = 0;
255 epia_connect(pi);
256 WR(0x84,8);
257 epia_read_block(pi,scratch,512);
258 for (k=0;k<256;k++) {
259 if ((scratch[2*k] & 0xff) != ((k+1) & 0xff)) f++;
260 if ((scratch[2*k+1] & 0xff) != ((-2-k) & 0xff)) f++;
261 }
262 WR(0x84,0);
263 epia_disconnect(pi);
264
265 if (verbose) {
266 printk("%s: epia: port 0x%x, mode %d, test=(%d,%d,%d)\n",
267 pi->device,pi->port,pi->mode,e[0],e[1],f);
268 }
269
270 return (e[0] && e[1]) || f;
271
272}
273
274
275static void epia_log_adapter( PIA *pi, char * scratch, int verbose )
276
277{ char *mode_string[6] = {"4-bit","5/3","8-bit",
278 "EPP-8","EPP-16","EPP-32"};
279
280 printk("%s: epia %s, Shuttle EPIA at 0x%x, ",
281 pi->device,EPIA_VERSION,pi->port);
282 printk("mode %d (%s), delay %d\n",pi->mode,
283 mode_string[pi->mode],pi->delay);
284
285}
286
287static struct pi_protocol epia = {
288 .owner = THIS_MODULE,
289 .name = "epia",
290 .max_mode = 6,
291 .epp_first = 3,
292 .default_delay = 1,
293 .max_units = 1,
294 .write_regr = epia_write_regr,
295 .read_regr = epia_read_regr,
296 .write_block = epia_write_block,
297 .read_block = epia_read_block,
298 .connect = epia_connect,
299 .disconnect = epia_disconnect,
300 .test_proto = epia_test_proto,
301 .log_adapter = epia_log_adapter,
302};
303
304static int __init epia_init(void)
305{
306 return pi_register(&epia)-1;
307}
308
309static void __exit epia_exit(void)
310{
311 pi_unregister(&epia);
312}
313
314MODULE_LICENSE("GPL");
315module_init(epia_init)
316module_exit(epia_exit)
diff --git a/drivers/block/paride/fit2.c b/drivers/block/paride/fit2.c
new file mode 100644
index 000000000000..e0f0691d8bc2
--- /dev/null
+++ b/drivers/block/paride/fit2.c
@@ -0,0 +1,151 @@
1/*
2 fit2.c (c) 1998 Grant R. Guenther <grant@torque.net>
3 Under the terms of the GNU General Public License.
4
5 fit2.c is a low-level protocol driver for the older version
6 of the Fidelity International Technology parallel port adapter.
7 This adapter is used in their TransDisk 2000 and older TransDisk
8 3000 portable hard-drives. As far as I can tell, this device
9 supports 4-bit mode _only_.
10
11 Newer models of the FIT products use an enhanced protocol.
12 The "fit3" protocol module should support current drives.
13
14*/
15
16#define FIT2_VERSION "1.0"
17
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/delay.h>
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/wait.h>
24#include <asm/io.h>
25
26#include "paride.h"
27
28#define j44(a,b) (((a>>4)&0x0f)|(b&0xf0))
29
30/* cont = 0 - access the IDE register file
31 cont = 1 - access the IDE command set
32
33NB: The FIT adapter does not appear to use the control registers.
34So, we map ALT_STATUS to STATUS and NO-OP writes to the device
35control register - this means that IDE reset will not work on these
36devices.
37
38*/
39
40static void fit2_write_regr( PIA *pi, int cont, int regr, int val)
41
42{ if (cont == 1) return;
43 w2(0xc); w0(regr); w2(4); w0(val); w2(5); w0(0); w2(4);
44}
45
46static int fit2_read_regr( PIA *pi, int cont, int regr )
47
48{ int a, b, r;
49
50 if (cont) {
51 if (regr != 6) return 0xff;
52 r = 7;
53 } else r = regr + 0x10;
54
55 w2(0xc); w0(r); w2(4); w2(5);
56 w0(0); a = r1();
57 w0(1); b = r1();
58 w2(4);
59
60 return j44(a,b);
61
62}
63
64static void fit2_read_block( PIA *pi, char * buf, int count )
65
66{ int k, a, b, c, d;
67
68 w2(0xc); w0(0x10);
69
70 for (k=0;k<count/4;k++) {
71
72 w2(4); w2(5);
73 w0(0); a = r1(); w0(1); b = r1();
74 w0(3); c = r1(); w0(2); d = r1();
75 buf[4*k+0] = j44(a,b);
76 buf[4*k+1] = j44(d,c);
77
78 w2(4); w2(5);
79 a = r1(); w0(3); b = r1();
80 w0(1); c = r1(); w0(0); d = r1();
81 buf[4*k+2] = j44(d,c);
82 buf[4*k+3] = j44(a,b);
83
84 }
85
86 w2(4);
87
88}
89
90static void fit2_write_block( PIA *pi, char * buf, int count )
91
92{ int k;
93
94
95 w2(0xc); w0(0);
96 for (k=0;k<count/2;k++) {
97 w2(4); w0(buf[2*k]);
98 w2(5); w0(buf[2*k+1]);
99 }
100 w2(4);
101}
102
103static void fit2_connect ( PIA *pi )
104
105{ pi->saved_r0 = r0();
106 pi->saved_r2 = r2();
107 w2(0xcc);
108}
109
110static void fit2_disconnect ( PIA *pi )
111
112{ w0(pi->saved_r0);
113 w2(pi->saved_r2);
114}
115
116static void fit2_log_adapter( PIA *pi, char * scratch, int verbose )
117
118{ printk("%s: fit2 %s, FIT 2000 adapter at 0x%x, delay %d\n",
119 pi->device,FIT2_VERSION,pi->port,pi->delay);
120
121}
122
123static struct pi_protocol fit2 = {
124 .owner = THIS_MODULE,
125 .name = "fit2",
126 .max_mode = 1,
127 .epp_first = 2,
128 .default_delay = 1,
129 .max_units = 1,
130 .write_regr = fit2_write_regr,
131 .read_regr = fit2_read_regr,
132 .write_block = fit2_write_block,
133 .read_block = fit2_read_block,
134 .connect = fit2_connect,
135 .disconnect = fit2_disconnect,
136 .log_adapter = fit2_log_adapter,
137};
138
139static int __init fit2_init(void)
140{
141 return pi_register(&fit2)-1;
142}
143
144static void __exit fit2_exit(void)
145{
146 pi_unregister(&fit2);
147}
148
149MODULE_LICENSE("GPL");
150module_init(fit2_init)
151module_exit(fit2_exit)
diff --git a/drivers/block/paride/fit3.c b/drivers/block/paride/fit3.c
new file mode 100644
index 000000000000..15400e7bc666
--- /dev/null
+++ b/drivers/block/paride/fit3.c
@@ -0,0 +1,211 @@
1/*
2 fit3.c (c) 1998 Grant R. Guenther <grant@torque.net>
3 Under the terms of the GNU General Public License.
4
5 fit3.c is a low-level protocol driver for newer models
6 of the Fidelity International Technology parallel port adapter.
7 This adapter is used in their TransDisk 3000 portable
8 hard-drives, as well as CD-ROM, PD-CD and other devices.
9
10 The TD-2000 and certain older devices use a different protocol.
11 Try the fit2 protocol module with them.
12
13 NB: The FIT adapters do not appear to support the control
14 registers. So, we map ALT_STATUS to STATUS and NO-OP writes
15 to the device control register - this means that IDE reset
16 will not work on these devices.
17
18*/
19
20#define FIT3_VERSION "1.0"
21
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/delay.h>
25#include <linux/kernel.h>
26#include <linux/types.h>
27#include <linux/wait.h>
28#include <asm/io.h>
29
30#include "paride.h"
31
32#define j44(a,b) (((a>>3)&0x0f)|((b<<1)&0xf0))
33
34#define w7(byte) {out_p(7,byte);}
35#define r7() (in_p(7) & 0xff)
36
37/* cont = 0 - access the IDE register file
38 cont = 1 - access the IDE command set
39
40*/
41
42static void fit3_write_regr( PIA *pi, int cont, int regr, int val)
43
44{ if (cont == 1) return;
45
46 switch (pi->mode) {
47
48 case 0:
49 case 1: w2(0xc); w0(regr); w2(0x8); w2(0xc);
50 w0(val); w2(0xd);
51 w0(0); w2(0xc);
52 break;
53
54 case 2: w2(0xc); w0(regr); w2(0x8); w2(0xc);
55 w4(val); w4(0);
56 w2(0xc);
57 break;
58
59 }
60}
61
62static int fit3_read_regr( PIA *pi, int cont, int regr )
63
64{ int a, b;
65
66 if (cont) {
67 if (regr != 6) return 0xff;
68 regr = 7;
69 }
70
71 switch (pi->mode) {
72
73 case 0: w2(0xc); w0(regr + 0x10); w2(0x8); w2(0xc);
74 w2(0xd); a = r1();
75 w2(0xf); b = r1();
76 w2(0xc);
77 return j44(a,b);
78
79 case 1: w2(0xc); w0(regr + 0x90); w2(0x8); w2(0xc);
80 w2(0xec); w2(0xee); w2(0xef); a = r0();
81 w2(0xc);
82 return a;
83
84 case 2: w2(0xc); w0(regr + 0x90); w2(0x8); w2(0xc);
85 w2(0xec);
86 a = r4(); b = r4();
87 w2(0xc);
88 return a;
89
90 }
91 return -1;
92
93}
94
95static void fit3_read_block( PIA *pi, char * buf, int count )
96
97{ int k, a, b, c, d;
98
99 switch (pi->mode) {
100
101 case 0: w2(0xc); w0(0x10); w2(0x8); w2(0xc);
102 for (k=0;k<count/2;k++) {
103 w2(0xd); a = r1();
104 w2(0xf); b = r1();
105 w2(0xc); c = r1();
106 w2(0xe); d = r1();
107 buf[2*k ] = j44(a,b);
108 buf[2*k+1] = j44(c,d);
109 }
110 w2(0xc);
111 break;
112
113 case 1: w2(0xc); w0(0x90); w2(0x8); w2(0xc);
114 w2(0xec); w2(0xee);
115 for (k=0;k<count/2;k++) {
116 w2(0xef); a = r0();
117 w2(0xee); b = r0();
118 buf[2*k ] = a;
119 buf[2*k+1] = b;
120 }
121 w2(0xec);
122 w2(0xc);
123 break;
124
125 case 2: w2(0xc); w0(0x90); w2(0x8); w2(0xc);
126 w2(0xec);
127 for (k=0;k<count;k++) buf[k] = r4();
128 w2(0xc);
129 break;
130
131 }
132}
133
134static void fit3_write_block( PIA *pi, char * buf, int count )
135
136{ int k;
137
138 switch (pi->mode) {
139
140 case 0:
141 case 1: w2(0xc); w0(0); w2(0x8); w2(0xc);
142 for (k=0;k<count/2;k++) {
143 w0(buf[2*k ]); w2(0xd);
144 w0(buf[2*k+1]); w2(0xc);
145 }
146 break;
147
148 case 2: w2(0xc); w0(0); w2(0x8); w2(0xc);
149 for (k=0;k<count;k++) w4(buf[k]);
150 w2(0xc);
151 break;
152 }
153}
154
155static void fit3_connect ( PIA *pi )
156
157{ pi->saved_r0 = r0();
158 pi->saved_r2 = r2();
159 w2(0xc); w0(0); w2(0xa);
160 if (pi->mode == 2) {
161 w2(0xc); w0(0x9); w2(0x8); w2(0xc);
162 }
163}
164
165static void fit3_disconnect ( PIA *pi )
166
167{ w2(0xc); w0(0xa); w2(0x8); w2(0xc);
168 w0(pi->saved_r0);
169 w2(pi->saved_r2);
170}
171
172static void fit3_log_adapter( PIA *pi, char * scratch, int verbose )
173
174{ char *mode_string[3] = {"4-bit","8-bit","EPP"};
175
176 printk("%s: fit3 %s, FIT 3000 adapter at 0x%x, "
177 "mode %d (%s), delay %d\n",
178 pi->device,FIT3_VERSION,pi->port,
179 pi->mode,mode_string[pi->mode],pi->delay);
180
181}
182
183static struct pi_protocol fit3 = {
184 .owner = THIS_MODULE,
185 .name = "fit3",
186 .max_mode = 3,
187 .epp_first = 2,
188 .default_delay = 1,
189 .max_units = 1,
190 .write_regr = fit3_write_regr,
191 .read_regr = fit3_read_regr,
192 .write_block = fit3_write_block,
193 .read_block = fit3_read_block,
194 .connect = fit3_connect,
195 .disconnect = fit3_disconnect,
196 .log_adapter = fit3_log_adapter,
197};
198
199static int __init fit3_init(void)
200{
201 return pi_register(&fit3)-1;
202}
203
204static void __exit fit3_exit(void)
205{
206 pi_unregister(&fit3);
207}
208
209MODULE_LICENSE("GPL");
210module_init(fit3_init)
211module_exit(fit3_exit)
diff --git a/drivers/block/paride/friq.c b/drivers/block/paride/friq.c
new file mode 100644
index 000000000000..5ea2904d2815
--- /dev/null
+++ b/drivers/block/paride/friq.c
@@ -0,0 +1,276 @@
1/*
2 friq.c (c) 1998 Grant R. Guenther <grant@torque.net>
3 Under the terms of the GNU General Public License
4
5 friq.c is a low-level protocol driver for the Freecom "IQ"
6 parallel port IDE adapter. Early versions of this adapter
7 use the 'frpw' protocol.
8
9 Freecom uses this adapter in a battery powered external
10 CD-ROM drive. It is also used in LS-120 drives by
11 Maxell and Panasonic, and other devices.
12
13 The battery powered drive requires software support to
14 control the power to the drive. This module enables the
15 drive power when the high level driver (pcd) is loaded
16 and disables it when the module is unloaded. Note, if
17 the friq module is built in to the kernel, the power
18 will never be switched off, so other means should be
19 used to conserve battery power.
20
21*/
22
23/* Changes:
24
25 1.01 GRG 1998.12.20 Added support for soft power switch
26*/
27
28#define FRIQ_VERSION "1.01"
29
30#include <linux/module.h>
31#include <linux/init.h>
32#include <linux/delay.h>
33#include <linux/kernel.h>
34#include <linux/types.h>
35#include <linux/wait.h>
36#include <asm/io.h>
37
38#include "paride.h"
39
40#define CMD(x) w2(4);w0(0xff);w0(0xff);w0(0x73);w0(0x73);\
41 w0(0xc9);w0(0xc9);w0(0x26);w0(0x26);w0(x);w0(x);
42
43#define j44(l,h) (((l>>4)&0x0f)|(h&0xf0))
44
45/* cont = 0 - access the IDE register file
46 cont = 1 - access the IDE command set
47*/
48
49static int cont_map[2] = { 0x08, 0x10 };
50
51static int friq_read_regr( PIA *pi, int cont, int regr )
52
53{ int h,l,r;
54
55 r = regr + cont_map[cont];
56
57 CMD(r);
58 w2(6); l = r1();
59 w2(4); h = r1();
60 w2(4);
61
62 return j44(l,h);
63
64}
65
66static void friq_write_regr( PIA *pi, int cont, int regr, int val)
67
68{ int r;
69
70 r = regr + cont_map[cont];
71
72 CMD(r);
73 w0(val);
74 w2(5);w2(7);w2(5);w2(4);
75}
76
77static void friq_read_block_int( PIA *pi, char * buf, int count, int regr )
78
79{ int h, l, k, ph;
80
81 switch(pi->mode) {
82
83 case 0: CMD(regr);
84 for (k=0;k<count;k++) {
85 w2(6); l = r1();
86 w2(4); h = r1();
87 buf[k] = j44(l,h);
88 }
89 w2(4);
90 break;
91
92 case 1: ph = 2;
93 CMD(regr+0xc0);
94 w0(0xff);
95 for (k=0;k<count;k++) {
96 w2(0xa4 + ph);
97 buf[k] = r0();
98 ph = 2 - ph;
99 }
100 w2(0xac); w2(0xa4); w2(4);
101 break;
102
103 case 2: CMD(regr+0x80);
104 for (k=0;k<count-2;k++) buf[k] = r4();
105 w2(0xac); w2(0xa4);
106 buf[count-2] = r4();
107 buf[count-1] = r4();
108 w2(4);
109 break;
110
111 case 3: CMD(regr+0x80);
112 for (k=0;k<(count/2)-1;k++) ((u16 *)buf)[k] = r4w();
113 w2(0xac); w2(0xa4);
114 buf[count-2] = r4();
115 buf[count-1] = r4();
116 w2(4);
117 break;
118
119 case 4: CMD(regr+0x80);
120 for (k=0;k<(count/4)-1;k++) ((u32 *)buf)[k] = r4l();
121 buf[count-4] = r4();
122 buf[count-3] = r4();
123 w2(0xac); w2(0xa4);
124 buf[count-2] = r4();
125 buf[count-1] = r4();
126 w2(4);
127 break;
128
129 }
130}
131
132static void friq_read_block( PIA *pi, char * buf, int count)
133
134{ friq_read_block_int(pi,buf,count,0x08);
135}
136
137static void friq_write_block( PIA *pi, char * buf, int count )
138
139{ int k;
140
141 switch(pi->mode) {
142
143 case 0:
144 case 1: CMD(8); w2(5);
145 for (k=0;k<count;k++) {
146 w0(buf[k]);
147 w2(7);w2(5);
148 }
149 w2(4);
150 break;
151
152 case 2: CMD(0xc8); w2(5);
153 for (k=0;k<count;k++) w4(buf[k]);
154 w2(4);
155 break;
156
157 case 3: CMD(0xc8); w2(5);
158 for (k=0;k<count/2;k++) w4w(((u16 *)buf)[k]);
159 w2(4);
160 break;
161
162 case 4: CMD(0xc8); w2(5);
163 for (k=0;k<count/4;k++) w4l(((u32 *)buf)[k]);
164 w2(4);
165 break;
166 }
167}
168
169static void friq_connect ( PIA *pi )
170
171{ pi->saved_r0 = r0();
172 pi->saved_r2 = r2();
173 w2(4);
174}
175
176static void friq_disconnect ( PIA *pi )
177
178{ CMD(0x20);
179 w0(pi->saved_r0);
180 w2(pi->saved_r2);
181}
182
183static int friq_test_proto( PIA *pi, char * scratch, int verbose )
184
185{ int j, k, r;
186 int e[2] = {0,0};
187
188 pi->saved_r0 = r0();
189 w0(0xff); udelay(20); CMD(0x3d); /* turn the power on */
190 udelay(500);
191 w0(pi->saved_r0);
192
193 friq_connect(pi);
194 for (j=0;j<2;j++) {
195 friq_write_regr(pi,0,6,0xa0+j*0x10);
196 for (k=0;k<256;k++) {
197 friq_write_regr(pi,0,2,k^0xaa);
198 friq_write_regr(pi,0,3,k^0x55);
199 if (friq_read_regr(pi,0,2) != (k^0xaa)) e[j]++;
200 }
201 }
202 friq_disconnect(pi);
203
204 friq_connect(pi);
205 friq_read_block_int(pi,scratch,512,0x10);
206 r = 0;
207 for (k=0;k<128;k++) if (scratch[k] != k) r++;
208 friq_disconnect(pi);
209
210 if (verbose) {
211 printk("%s: friq: port 0x%x, mode %d, test=(%d,%d,%d)\n",
212 pi->device,pi->port,pi->mode,e[0],e[1],r);
213 }
214
215 return (r || (e[0] && e[1]));
216}
217
218
219static void friq_log_adapter( PIA *pi, char * scratch, int verbose )
220
221{ char *mode_string[6] = {"4-bit","8-bit",
222 "EPP-8","EPP-16","EPP-32"};
223
224 printk("%s: friq %s, Freecom IQ ASIC-2 adapter at 0x%x, ", pi->device,
225 FRIQ_VERSION,pi->port);
226 printk("mode %d (%s), delay %d\n",pi->mode,
227 mode_string[pi->mode],pi->delay);
228
229 pi->private = 1;
230 friq_connect(pi);
231 CMD(0x9e); /* disable sleep timer */
232 friq_disconnect(pi);
233
234}
235
236static void friq_release_proto( PIA *pi)
237{
238 if (pi->private) { /* turn off the power */
239 friq_connect(pi);
240 CMD(0x1d); CMD(0x1e);
241 friq_disconnect(pi);
242 pi->private = 0;
243 }
244}
245
246static struct pi_protocol friq = {
247 .owner = THIS_MODULE,
248 .name = "friq",
249 .max_mode = 5,
250 .epp_first = 2,
251 .default_delay = 1,
252 .max_units = 1,
253 .write_regr = friq_write_regr,
254 .read_regr = friq_read_regr,
255 .write_block = friq_write_block,
256 .read_block = friq_read_block,
257 .connect = friq_connect,
258 .disconnect = friq_disconnect,
259 .test_proto = friq_test_proto,
260 .log_adapter = friq_log_adapter,
261 .release_proto = friq_release_proto,
262};
263
264static int __init friq_init(void)
265{
266 return pi_register(&friq)-1;
267}
268
269static void __exit friq_exit(void)
270{
271 pi_unregister(&friq);
272}
273
274MODULE_LICENSE("GPL");
275module_init(friq_init)
276module_exit(friq_exit)
diff --git a/drivers/block/paride/frpw.c b/drivers/block/paride/frpw.c
new file mode 100644
index 000000000000..56b3824b1538
--- /dev/null
+++ b/drivers/block/paride/frpw.c
@@ -0,0 +1,313 @@
1/*
2 frpw.c (c) 1996-8 Grant R. Guenther <grant@torque.net>
3 Under the terms of the GNU General Public License
4
5 frpw.c is a low-level protocol driver for the Freecom "Power"
6 parallel port IDE adapter.
7
8 Some applications of this adapter may require a "printer" reset
9 prior to loading the driver. This can be done by loading and
10 unloading the "lp" driver, or it can be done by this driver
11 if you define FRPW_HARD_RESET. The latter is not recommended
12 as it may upset devices on other ports.
13
14*/
15
16/* Changes:
17
18 1.01 GRG 1998.05.06 init_proto, release_proto
19 fix chip detect
20 added EPP-16 and EPP-32
21 1.02 GRG 1998.09.23 added hard reset to initialisation process
22 1.03 GRG 1998.12.14 made hard reset conditional
23
24*/
25
26#define FRPW_VERSION "1.03"
27
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/kernel.h>
32#include <linux/types.h>
33#include <linux/wait.h>
34#include <asm/io.h>
35
36#include "paride.h"
37
38#define cec4 w2(0xc);w2(0xe);w2(0xe);w2(0xc);w2(4);w2(4);w2(4);
39#define j44(l,h) (((l>>4)&0x0f)|(h&0xf0))
40
41/* cont = 0 - access the IDE register file
42 cont = 1 - access the IDE command set
43*/
44
45static int cont_map[2] = { 0x08, 0x10 };
46
47static int frpw_read_regr( PIA *pi, int cont, int regr )
48
49{ int h,l,r;
50
51 r = regr + cont_map[cont];
52
53 w2(4);
54 w0(r); cec4;
55 w2(6); l = r1();
56 w2(4); h = r1();
57 w2(4);
58
59 return j44(l,h);
60
61}
62
63static void frpw_write_regr( PIA *pi, int cont, int regr, int val)
64
65{ int r;
66
67 r = regr + cont_map[cont];
68
69 w2(4); w0(r); cec4;
70 w0(val);
71 w2(5);w2(7);w2(5);w2(4);
72}
73
74static void frpw_read_block_int( PIA *pi, char * buf, int count, int regr )
75
76{ int h, l, k, ph;
77
78 switch(pi->mode) {
79
80 case 0: w2(4); w0(regr); cec4;
81 for (k=0;k<count;k++) {
82 w2(6); l = r1();
83 w2(4); h = r1();
84 buf[k] = j44(l,h);
85 }
86 w2(4);
87 break;
88
89 case 1: ph = 2;
90 w2(4); w0(regr + 0xc0); cec4;
91 w0(0xff);
92 for (k=0;k<count;k++) {
93 w2(0xa4 + ph);
94 buf[k] = r0();
95 ph = 2 - ph;
96 }
97 w2(0xac); w2(0xa4); w2(4);
98 break;
99
100 case 2: w2(4); w0(regr + 0x80); cec4;
101 for (k=0;k<count;k++) buf[k] = r4();
102 w2(0xac); w2(0xa4);
103 w2(4);
104 break;
105
106 case 3: w2(4); w0(regr + 0x80); cec4;
107 for (k=0;k<count-2;k++) buf[k] = r4();
108 w2(0xac); w2(0xa4);
109 buf[count-2] = r4();
110 buf[count-1] = r4();
111 w2(4);
112 break;
113
114 case 4: w2(4); w0(regr + 0x80); cec4;
115 for (k=0;k<(count/2)-1;k++) ((u16 *)buf)[k] = r4w();
116 w2(0xac); w2(0xa4);
117 buf[count-2] = r4();
118 buf[count-1] = r4();
119 w2(4);
120 break;
121
122 case 5: w2(4); w0(regr + 0x80); cec4;
123 for (k=0;k<(count/4)-1;k++) ((u32 *)buf)[k] = r4l();
124 buf[count-4] = r4();
125 buf[count-3] = r4();
126 w2(0xac); w2(0xa4);
127 buf[count-2] = r4();
128 buf[count-1] = r4();
129 w2(4);
130 break;
131
132 }
133}
134
135static void frpw_read_block( PIA *pi, char * buf, int count)
136
137{ frpw_read_block_int(pi,buf,count,0x08);
138}
139
140static void frpw_write_block( PIA *pi, char * buf, int count )
141
142{ int k;
143
144 switch(pi->mode) {
145
146 case 0:
147 case 1:
148 case 2: w2(4); w0(8); cec4; w2(5);
149 for (k=0;k<count;k++) {
150 w0(buf[k]);
151 w2(7);w2(5);
152 }
153 w2(4);
154 break;
155
156 case 3: w2(4); w0(0xc8); cec4; w2(5);
157 for (k=0;k<count;k++) w4(buf[k]);
158 w2(4);
159 break;
160
161 case 4: w2(4); w0(0xc8); cec4; w2(5);
162 for (k=0;k<count/2;k++) w4w(((u16 *)buf)[k]);
163 w2(4);
164 break;
165
166 case 5: w2(4); w0(0xc8); cec4; w2(5);
167 for (k=0;k<count/4;k++) w4l(((u32 *)buf)[k]);
168 w2(4);
169 break;
170 }
171}
172
173static void frpw_connect ( PIA *pi )
174
175{ pi->saved_r0 = r0();
176 pi->saved_r2 = r2();
177 w2(4);
178}
179
180static void frpw_disconnect ( PIA *pi )
181
182{ w2(4); w0(0x20); cec4;
183 w0(pi->saved_r0);
184 w2(pi->saved_r2);
185}
186
187/* Stub logic to see if PNP string is available - used to distinguish
188 between the Xilinx and ASIC implementations of the Freecom adapter.
189*/
190
191static int frpw_test_pnp ( PIA *pi )
192
193/* returns chip_type: 0 = Xilinx, 1 = ASIC */
194
195{ int olddelay, a, b;
196
197#ifdef FRPW_HARD_RESET
198 w0(0); w2(8); udelay(50); w2(0xc); /* parallel bus reset */
199 mdelay(1500);
200#endif
201
202 olddelay = pi->delay;
203 pi->delay = 10;
204
205 pi->saved_r0 = r0();
206 pi->saved_r2 = r2();
207
208 w2(4); w0(4); w2(6); w2(7);
209 a = r1() & 0xff; w2(4); b = r1() & 0xff;
210 w2(0xc); w2(0xe); w2(4);
211
212 pi->delay = olddelay;
213 w0(pi->saved_r0);
214 w2(pi->saved_r2);
215
216 return ((~a&0x40) && (b&0x40));
217}
218
219/* We use the pi->private to remember the result of the PNP test.
220 To make this work, private = port*2 + chip. Yes, I know it's
221 a hack :-(
222*/
223
224static int frpw_test_proto( PIA *pi, char * scratch, int verbose )
225
226{ int j, k, r;
227 int e[2] = {0,0};
228
229 if ((pi->private>>1) != pi->port)
230 pi->private = frpw_test_pnp(pi) + 2*pi->port;
231
232 if (((pi->private%2) == 0) && (pi->mode > 2)) {
233 if (verbose)
234 printk("%s: frpw: Xilinx does not support mode %d\n",
235 pi->device, pi->mode);
236 return 1;
237 }
238
239 if (((pi->private%2) == 1) && (pi->mode == 2)) {
240 if (verbose)
241 printk("%s: frpw: ASIC does not support mode 2\n",
242 pi->device);
243 return 1;
244 }
245
246 frpw_connect(pi);
247 for (j=0;j<2;j++) {
248 frpw_write_regr(pi,0,6,0xa0+j*0x10);
249 for (k=0;k<256;k++) {
250 frpw_write_regr(pi,0,2,k^0xaa);
251 frpw_write_regr(pi,0,3,k^0x55);
252 if (frpw_read_regr(pi,0,2) != (k^0xaa)) e[j]++;
253 }
254 }
255 frpw_disconnect(pi);
256
257 frpw_connect(pi);
258 frpw_read_block_int(pi,scratch,512,0x10);
259 r = 0;
260 for (k=0;k<128;k++) if (scratch[k] != k) r++;
261 frpw_disconnect(pi);
262
263 if (verbose) {
264 printk("%s: frpw: port 0x%x, chip %ld, mode %d, test=(%d,%d,%d)\n",
265 pi->device,pi->port,(pi->private%2),pi->mode,e[0],e[1],r);
266 }
267
268 return (r || (e[0] && e[1]));
269}
270
271
272static void frpw_log_adapter( PIA *pi, char * scratch, int verbose )
273
274{ char *mode_string[6] = {"4-bit","8-bit","EPP",
275 "EPP-8","EPP-16","EPP-32"};
276
277 printk("%s: frpw %s, Freecom (%s) adapter at 0x%x, ", pi->device,
278 FRPW_VERSION,((pi->private%2) == 0)?"Xilinx":"ASIC",pi->port);
279 printk("mode %d (%s), delay %d\n",pi->mode,
280 mode_string[pi->mode],pi->delay);
281
282}
283
284static struct pi_protocol frpw = {
285 .owner = THIS_MODULE,
286 .name = "frpw",
287 .max_mode = 6,
288 .epp_first = 2,
289 .default_delay = 2,
290 .max_units = 1,
291 .write_regr = frpw_write_regr,
292 .read_regr = frpw_read_regr,
293 .write_block = frpw_write_block,
294 .read_block = frpw_read_block,
295 .connect = frpw_connect,
296 .disconnect = frpw_disconnect,
297 .test_proto = frpw_test_proto,
298 .log_adapter = frpw_log_adapter,
299};
300
301static int __init frpw_init(void)
302{
303 return pi_register(&frpw)-1;
304}
305
306static void __exit frpw_exit(void)
307{
308 pi_unregister(&frpw);
309}
310
311MODULE_LICENSE("GPL");
312module_init(frpw_init)
313module_exit(frpw_exit)
diff --git a/drivers/block/paride/jumbo b/drivers/block/paride/jumbo
new file mode 100644
index 000000000000..e793b9cb7e72
--- /dev/null
+++ b/drivers/block/paride/jumbo
@@ -0,0 +1,70 @@
1#!/bin/sh
2#
3# This script can be used to build "jumbo" modules that contain the
4# base PARIDE support, one protocol module and one high-level driver.
5#
6echo -n "High level driver [pcd] : "
7read X
8HLD=${X:-pcd}
9#
10echo -n "Protocol module [bpck] : "
11read X
12PROTO=${X:-bpck}
13#
14echo -n "Use MODVERSIONS [y] ? "
15read X
16UMODV=${X:-y}
17#
18echo -n "For SMP kernel [n] ? "
19read X
20USMP=${X:-n}
21#
22echo -n "Support PARPORT [n] ? "
23read X
24UPARP=${X:-n}
25#
26echo
27#
28case $USMP in
29 y* | Y* ) FSMP="-DCONFIG_SMP"
30 ;;
31 *) FSMP=""
32 ;;
33esac
34#
35MODI="-include ../../../include/linux/modversions.h"
36#
37case $UMODV in
38 y* | Y* ) FMODV="-DMODVERSIONS $MODI"
39 ;;
40 *) FMODV=""
41 ;;
42esac
43#
44case $UPARP in
45 y* | Y* ) FPARP="-DCONFIG_PARPORT"
46 ;;
47 *) FPARP=""
48 ;;
49esac
50#
51TARG=$HLD-$PROTO.o
52FPROTO=-DCONFIG_PARIDE_`echo "$PROTO" | tr [a-z] [A-Z]`
53FK="-D__KERNEL__ -I ../../../include"
54FLCH=-D_LINUX_CONFIG_H
55#
56echo cc $FK $FSMP $FLCH $FPARP $FPROTO $FMODV -Wall -O2 -o Jb.o -c paride.c
57cc $FK $FSMP $FLCH $FPARP $FPROTO $FMODV -Wall -O2 -o Jb.o -c paride.c
58#
59echo cc $FK $FSMP $FMODV -Wall -O2 -o Jp.o -c $PROTO.c
60cc $FK $FSMP $FMODV -Wall -O2 -o Jp.o -c $PROTO.c
61#
62echo cc $FK $FSMP $FMODV -DMODULE -DPARIDE_JUMBO -Wall -O2 -o Jd.o -c $HLD.c
63cc $FK $FSMP $FMODV -DMODULE -DPARIDE_JUMBO -Wall -O2 -o Jd.o -c $HLD.c
64#
65echo ld -r -o $TARG Jp.o Jb.o Jd.o
66ld -r -o $TARG Jp.o Jb.o Jd.o
67#
68#
69rm Jp.o Jb.o Jd.o
70#
diff --git a/drivers/block/paride/kbic.c b/drivers/block/paride/kbic.c
new file mode 100644
index 000000000000..d983bcea76fe
--- /dev/null
+++ b/drivers/block/paride/kbic.c
@@ -0,0 +1,297 @@
1/*
2 kbic.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
3 Under the terms of the GNU General Public License.
4
5 This is a low-level driver for the KBIC-951A and KBIC-971A
6 parallel to IDE adapter chips from KingByte Information Systems.
7
8 The chips are almost identical, however, the wakeup code
9 required for the 971A interferes with the correct operation of
10 the 951A, so this driver registers itself twice, once for
11 each chip.
12
13*/
14
15/* Changes:
16
17 1.01 GRG 1998.05.06 init_proto, release_proto
18
19*/
20
21#define KBIC_VERSION "1.01"
22
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/delay.h>
26#include <linux/kernel.h>
27#include <linux/types.h>
28#include <linux/wait.h>
29#include <asm/io.h>
30
31#include "paride.h"
32
33#define r12w() (delay_p,inw(pi->port+1)&0xffff)
34
35#define j44(a,b) ((((a>>4)&0x0f)|(b&0xf0))^0x88)
36#define j53(w) (((w>>3)&0x1f)|((w>>4)&0xe0))
37
38
39/* cont = 0 - access the IDE register file
40 cont = 1 - access the IDE command set
41*/
42
43static int cont_map[2] = { 0x80, 0x40 };
44
45static int kbic_read_regr( PIA *pi, int cont, int regr )
46
47{ int a, b, s;
48
49 s = cont_map[cont];
50
51 switch (pi->mode) {
52
53 case 0: w0(regr|0x18|s); w2(4); w2(6); w2(4); w2(1); w0(8);
54 a = r1(); w0(0x28); b = r1(); w2(4);
55 return j44(a,b);
56
57 case 1: w0(regr|0x38|s); w2(4); w2(6); w2(4); w2(5); w0(8);
58 a = r12w(); w2(4);
59 return j53(a);
60
61 case 2: w0(regr|0x08|s); w2(4); w2(6); w2(4); w2(0xa5); w2(0xa1);
62 a = r0(); w2(4);
63 return a;
64
65 case 3:
66 case 4:
67 case 5: w0(0x20|s); w2(4); w2(6); w2(4); w3(regr);
68 a = r4(); b = r4(); w2(4); w2(0); w2(4);
69 return a;
70
71 }
72 return -1;
73}
74
75static void kbic_write_regr( PIA *pi, int cont, int regr, int val)
76
77{ int s;
78
79 s = cont_map[cont];
80
81 switch (pi->mode) {
82
83 case 0:
84 case 1:
85 case 2: w0(regr|0x10|s); w2(4); w2(6); w2(4);
86 w0(val); w2(5); w2(4);
87 break;
88
89 case 3:
90 case 4:
91 case 5: w0(0x20|s); w2(4); w2(6); w2(4); w3(regr);
92 w4(val); w4(val);
93 w2(4); w2(0); w2(4);
94 break;
95
96 }
97}
98
99static void k951_connect ( PIA *pi )
100
101{ pi->saved_r0 = r0();
102 pi->saved_r2 = r2();
103 w2(4);
104}
105
106static void k951_disconnect ( PIA *pi )
107
108{ w0(pi->saved_r0);
109 w2(pi->saved_r2);
110}
111
112#define CCP(x) w2(0xc4);w0(0xaa);w0(0x55);w0(0);w0(0xff);w0(0x87);\
113 w0(0x78);w0(x);w2(0xc5);w2(0xc4);w0(0xff);
114
115static void k971_connect ( PIA *pi )
116
117{ pi->saved_r0 = r0();
118 pi->saved_r2 = r2();
119 CCP(0x20);
120 w2(4);
121}
122
123static void k971_disconnect ( PIA *pi )
124
125{ CCP(0x30);
126 w0(pi->saved_r0);
127 w2(pi->saved_r2);
128}
129
130/* counts must be congruent to 0 MOD 4, but all known applications
131 have this property.
132*/
133
134static void kbic_read_block( PIA *pi, char * buf, int count )
135
136{ int k, a, b;
137
138 switch (pi->mode) {
139
140 case 0: w0(0x98); w2(4); w2(6); w2(4);
141 for (k=0;k<count/2;k++) {
142 w2(1); w0(8); a = r1();
143 w0(0x28); b = r1();
144 buf[2*k] = j44(a,b);
145 w2(5); b = r1();
146 w0(8); a = r1();
147 buf[2*k+1] = j44(a,b);
148 w2(4);
149 }
150 break;
151
152 case 1: w0(0xb8); w2(4); w2(6); w2(4);
153 for (k=0;k<count/4;k++) {
154 w0(0xb8);
155 w2(4); w2(5);
156 w0(8); buf[4*k] = j53(r12w());
157 w0(0xb8); buf[4*k+1] = j53(r12w());
158 w2(4); w2(5);
159 buf[4*k+3] = j53(r12w());
160 w0(8); buf[4*k+2] = j53(r12w());
161 }
162 w2(4);
163 break;
164
165 case 2: w0(0x88); w2(4); w2(6); w2(4);
166 for (k=0;k<count/2;k++) {
167 w2(0xa0); w2(0xa1); buf[2*k] = r0();
168 w2(0xa5); buf[2*k+1] = r0();
169 }
170 w2(4);
171 break;
172
173 case 3: w0(0xa0); w2(4); w2(6); w2(4); w3(0);
174 for (k=0;k<count;k++) buf[k] = r4();
175 w2(4); w2(0); w2(4);
176 break;
177
178 case 4: w0(0xa0); w2(4); w2(6); w2(4); w3(0);
179 for (k=0;k<count/2;k++) ((u16 *)buf)[k] = r4w();
180 w2(4); w2(0); w2(4);
181 break;
182
183 case 5: w0(0xa0); w2(4); w2(6); w2(4); w3(0);
184 for (k=0;k<count/4;k++) ((u32 *)buf)[k] = r4l();
185 w2(4); w2(0); w2(4);
186 break;
187
188
189 }
190}
191
192static void kbic_write_block( PIA *pi, char * buf, int count )
193
194{ int k;
195
196 switch (pi->mode) {
197
198 case 0:
199 case 1:
200 case 2: w0(0x90); w2(4); w2(6); w2(4);
201 for(k=0;k<count/2;k++) {
202 w0(buf[2*k+1]); w2(0); w2(4);
203 w0(buf[2*k]); w2(5); w2(4);
204 }
205 break;
206
207 case 3: w0(0xa0); w2(4); w2(6); w2(4); w3(0);
208 for(k=0;k<count/2;k++) {
209 w4(buf[2*k+1]);
210 w4(buf[2*k]);
211 }
212 w2(4); w2(0); w2(4);
213 break;
214
215 case 4: w0(0xa0); w2(4); w2(6); w2(4); w3(0);
216 for(k=0;k<count/2;k++) w4w(pi_swab16(buf,k));
217 w2(4); w2(0); w2(4);
218 break;
219
220 case 5: w0(0xa0); w2(4); w2(6); w2(4); w3(0);
221 for(k=0;k<count/4;k++) w4l(pi_swab32(buf,k));
222 w2(4); w2(0); w2(4);
223 break;
224
225 }
226
227}
228
229static void kbic_log_adapter( PIA *pi, char * scratch,
230 int verbose, char * chip )
231
232{ char *mode_string[6] = {"4-bit","5/3","8-bit",
233 "EPP-8","EPP_16","EPP-32"};
234
235 printk("%s: kbic %s, KingByte %s at 0x%x, ",
236 pi->device,KBIC_VERSION,chip,pi->port);
237 printk("mode %d (%s), delay %d\n",pi->mode,
238 mode_string[pi->mode],pi->delay);
239
240}
241
242static void k951_log_adapter( PIA *pi, char * scratch, int verbose )
243
244{ kbic_log_adapter(pi,scratch,verbose,"KBIC-951A");
245}
246
247static void k971_log_adapter( PIA *pi, char * scratch, int verbose )
248
249{ kbic_log_adapter(pi,scratch,verbose,"KBIC-971A");
250}
251
252static struct pi_protocol k951 = {
253 .owner = THIS_MODULE,
254 .name = "k951",
255 .max_mode = 6,
256 .epp_first = 3,
257 .default_delay = 1,
258 .max_units = 1,
259 .write_regr = kbic_write_regr,
260 .read_regr = kbic_read_regr,
261 .write_block = kbic_write_block,
262 .read_block = kbic_read_block,
263 .connect = k951_connect,
264 .disconnect = k951_disconnect,
265 .log_adapter = k951_log_adapter,
266};
267
268static struct pi_protocol k971 = {
269 .owner = THIS_MODULE,
270 .name = "k971",
271 .max_mode = 6,
272 .epp_first = 3,
273 .default_delay = 1,
274 .max_units = 1,
275 .write_regr = kbic_write_regr,
276 .read_regr = kbic_read_regr,
277 .write_block = kbic_write_block,
278 .read_block = kbic_read_block,
279 .connect = k971_connect,
280 .disconnect = k971_disconnect,
281 .log_adapter = k971_log_adapter,
282};
283
284static int __init kbic_init(void)
285{
286 return (pi_register(&k951)||pi_register(&k971))-1;
287}
288
289static void __exit kbic_exit(void)
290{
291 pi_unregister(&k951);
292 pi_unregister(&k971);
293}
294
295MODULE_LICENSE("GPL");
296module_init(kbic_init)
297module_exit(kbic_exit)
diff --git a/drivers/block/paride/ktti.c b/drivers/block/paride/ktti.c
new file mode 100644
index 000000000000..6c7edbfba9a0
--- /dev/null
+++ b/drivers/block/paride/ktti.c
@@ -0,0 +1,128 @@
1/*
2 ktti.c (c) 1998 Grant R. Guenther <grant@torque.net>
3 Under the terms of the GNU General Public License.
4
5 ktti.c is a low-level protocol driver for the KT Technology
6 parallel port adapter. This adapter is used in the "PHd"
7 portable hard-drives. As far as I can tell, this device
8 supports 4-bit mode _only_.
9
10*/
11
12#define KTTI_VERSION "1.0"
13
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/delay.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/wait.h>
20#include <asm/io.h>
21
22#include "paride.h"
23
24#define j44(a,b) (((a>>4)&0x0f)|(b&0xf0))
25
26/* cont = 0 - access the IDE register file
27 cont = 1 - access the IDE command set
28*/
29
30static int cont_map[2] = { 0x10, 0x08 };
31
32static void ktti_write_regr( PIA *pi, int cont, int regr, int val)
33
34{ int r;
35
36 r = regr + cont_map[cont];
37
38 w0(r); w2(0xb); w2(0xa); w2(3); w2(6);
39 w0(val); w2(3); w0(0); w2(6); w2(0xb);
40}
41
42static int ktti_read_regr( PIA *pi, int cont, int regr )
43
44{ int a, b, r;
45
46 r = regr + cont_map[cont];
47
48 w0(r); w2(0xb); w2(0xa); w2(9); w2(0xc); w2(9);
49 a = r1(); w2(0xc); b = r1(); w2(9); w2(0xc); w2(9);
50 return j44(a,b);
51
52}
53
54static void ktti_read_block( PIA *pi, char * buf, int count )
55
56{ int k, a, b;
57
58 for (k=0;k<count/2;k++) {
59 w0(0x10); w2(0xb); w2(0xa); w2(9); w2(0xc); w2(9);
60 a = r1(); w2(0xc); b = r1(); w2(9);
61 buf[2*k] = j44(a,b);
62 a = r1(); w2(0xc); b = r1(); w2(9);
63 buf[2*k+1] = j44(a,b);
64 }
65}
66
67static void ktti_write_block( PIA *pi, char * buf, int count )
68
69{ int k;
70
71 for (k=0;k<count/2;k++) {
72 w0(0x10); w2(0xb); w2(0xa); w2(3); w2(6);
73 w0(buf[2*k]); w2(3);
74 w0(buf[2*k+1]); w2(6);
75 w2(0xb);
76 }
77}
78
79static void ktti_connect ( PIA *pi )
80
81{ pi->saved_r0 = r0();
82 pi->saved_r2 = r2();
83 w2(0xb); w2(0xa); w0(0); w2(3); w2(6);
84}
85
86static void ktti_disconnect ( PIA *pi )
87
88{ w2(0xb); w2(0xa); w0(0xa0); w2(3); w2(4);
89 w0(pi->saved_r0);
90 w2(pi->saved_r2);
91}
92
93static void ktti_log_adapter( PIA *pi, char * scratch, int verbose )
94
95{ printk("%s: ktti %s, KT adapter at 0x%x, delay %d\n",
96 pi->device,KTTI_VERSION,pi->port,pi->delay);
97
98}
99
100static struct pi_protocol ktti = {
101 .owner = THIS_MODULE,
102 .name = "ktti",
103 .max_mode = 1,
104 .epp_first = 2,
105 .default_delay = 1,
106 .max_units = 1,
107 .write_regr = ktti_write_regr,
108 .read_regr = ktti_read_regr,
109 .write_block = ktti_write_block,
110 .read_block = ktti_read_block,
111 .connect = ktti_connect,
112 .disconnect = ktti_disconnect,
113 .log_adapter = ktti_log_adapter,
114};
115
116static int __init ktti_init(void)
117{
118 return pi_register(&ktti)-1;
119}
120
121static void __exit ktti_exit(void)
122{
123 pi_unregister(&ktti);
124}
125
126MODULE_LICENSE("GPL");
127module_init(ktti_init)
128module_exit(ktti_exit)
diff --git a/drivers/block/paride/mkd b/drivers/block/paride/mkd
new file mode 100644
index 000000000000..971f099b40aa
--- /dev/null
+++ b/drivers/block/paride/mkd
@@ -0,0 +1,30 @@
1#!/bin/bash
2#
3# mkd -- a script to create the device special files for the PARIDE subsystem
4#
5# block devices: pd (45), pcd (46), pf (47)
6# character devices: pt (96), pg (97)
7#
8function mkdev {
9 mknod $1 $2 $3 $4 ; chmod 0660 $1 ; chown root:disk $1
10}
11#
12function pd {
13 D=$( printf \\$( printf "x%03x" $[ $1 + 97 ] ) )
14 mkdev pd$D b 45 $[ $1 * 16 ]
15 for P in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
16 do mkdev pd$D$P b 45 $[ $1 * 16 + $P ]
17 done
18}
19#
20cd /dev
21#
22for u in 0 1 2 3 ; do pd $u ; done
23for u in 0 1 2 3 ; do mkdev pcd$u b 46 $u ; done
24for u in 0 1 2 3 ; do mkdev pf$u b 47 $u ; done
25for u in 0 1 2 3 ; do mkdev pt$u c 96 $u ; done
26for u in 0 1 2 3 ; do mkdev npt$u c 96 $[ $u + 128 ] ; done
27for u in 0 1 2 3 ; do mkdev pg$u c 97 $u ; done
28#
29# end of mkd
30
diff --git a/drivers/block/paride/on20.c b/drivers/block/paride/on20.c
new file mode 100644
index 000000000000..9f8e01096809
--- /dev/null
+++ b/drivers/block/paride/on20.c
@@ -0,0 +1,153 @@
1/*
2 on20.c (c) 1996-8 Grant R. Guenther <grant@torque.net>
3 Under the terms of the GNU General Public License.
4
5 on20.c is a low-level protocol driver for the
6 Onspec 90c20 parallel to IDE adapter.
7*/
8
9/* Changes:
10
11 1.01 GRG 1998.05.06 init_proto, release_proto
12
13*/
14
15#define ON20_VERSION "1.01"
16
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/delay.h>
20#include <linux/kernel.h>
21#include <linux/types.h>
22#include <linux/wait.h>
23#include <asm/io.h>
24
25#include "paride.h"
26
27#define op(f) w2(4);w0(f);w2(5);w2(0xd);w2(5);w2(0xd);w2(5);w2(4);
28#define vl(v) w2(4);w0(v);w2(5);w2(7);w2(5);w2(4);
29
30#define j44(a,b) (((a>>4)&0x0f)|(b&0xf0))
31
32/* cont = 0 - access the IDE register file
33 cont = 1 - access the IDE command set
34*/
35
36static int on20_read_regr( PIA *pi, int cont, int regr )
37
38{ int h,l, r ;
39
40 r = (regr<<2) + 1 + cont;
41
42 op(1); vl(r); op(0);
43
44 switch (pi->mode) {
45
46 case 0: w2(4); w2(6); l = r1();
47 w2(4); w2(6); h = r1();
48 w2(4); w2(6); w2(4); w2(6); w2(4);
49 return j44(l,h);
50
51 case 1: w2(4); w2(0x26); r = r0();
52 w2(4); w2(0x26); w2(4);
53 return r;
54
55 }
56 return -1;
57}
58
59static void on20_write_regr( PIA *pi, int cont, int regr, int val )
60
61{ int r;
62
63 r = (regr<<2) + 1 + cont;
64
65 op(1); vl(r);
66 op(0); vl(val);
67 op(0); vl(val);
68}
69
70static void on20_connect ( PIA *pi)
71
72{ pi->saved_r0 = r0();
73 pi->saved_r2 = r2();
74
75 w2(4);w0(0);w2(0xc);w2(4);w2(6);w2(4);w2(6);w2(4);
76 if (pi->mode) { op(2); vl(8); op(2); vl(9); }
77 else { op(2); vl(0); op(2); vl(8); }
78}
79
80static void on20_disconnect ( PIA *pi )
81
82{ w2(4);w0(7);w2(4);w2(0xc);w2(4);
83 w0(pi->saved_r0);
84 w2(pi->saved_r2);
85}
86
87static void on20_read_block( PIA *pi, char * buf, int count )
88
89{ int k, l, h;
90
91 op(1); vl(1); op(0);
92
93 for (k=0;k<count;k++)
94 if (pi->mode) {
95 w2(4); w2(0x26); buf[k] = r0();
96 } else {
97 w2(6); l = r1(); w2(4);
98 w2(6); h = r1(); w2(4);
99 buf[k] = j44(l,h);
100 }
101 w2(4);
102}
103
104static void on20_write_block( PIA *pi, char * buf, int count )
105
106{ int k;
107
108 op(1); vl(1); op(0);
109
110 for (k=0;k<count;k++) { w2(5); w0(buf[k]); w2(7); }
111 w2(4);
112}
113
114static void on20_log_adapter( PIA *pi, char * scratch, int verbose )
115
116{ char *mode_string[2] = {"4-bit","8-bit"};
117
118 printk("%s: on20 %s, OnSpec 90c20 at 0x%x, ",
119 pi->device,ON20_VERSION,pi->port);
120 printk("mode %d (%s), delay %d\n",pi->mode,
121 mode_string[pi->mode],pi->delay);
122
123}
124
125static struct pi_protocol on20 = {
126 .owner = THIS_MODULE,
127 .name = "on20",
128 .max_mode = 2,
129 .epp_first = 2,
130 .default_delay = 1,
131 .max_units = 1,
132 .write_regr = on20_write_regr,
133 .read_regr = on20_read_regr,
134 .write_block = on20_write_block,
135 .read_block = on20_read_block,
136 .connect = on20_connect,
137 .disconnect = on20_disconnect,
138 .log_adapter = on20_log_adapter,
139};
140
141static int __init on20_init(void)
142{
143 return pi_register(&on20)-1;
144}
145
146static void __exit on20_exit(void)
147{
148 pi_unregister(&on20);
149}
150
151MODULE_LICENSE("GPL");
152module_init(on20_init)
153module_exit(on20_exit)
diff --git a/drivers/block/paride/on26.c b/drivers/block/paride/on26.c
new file mode 100644
index 000000000000..9f837d9a3639
--- /dev/null
+++ b/drivers/block/paride/on26.c
@@ -0,0 +1,319 @@
1/*
2 on26.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
3 Under the terms of the GNU General Public License.
4
5 on26.c is a low-level protocol driver for the
6 OnSpec 90c26 parallel to IDE adapter chip.
7
8*/
9
10/* Changes:
11
12 1.01 GRG 1998.05.06 init_proto, release_proto
13 1.02 GRG 1998.09.23 updates for the -E rev chip
14 1.03 GRG 1998.12.14 fix for slave drives
15 1.04 GRG 1998.12.20 yet another bug fix
16
17*/
18
19#define ON26_VERSION "1.04"
20
21#include <linux/module.h>
22#include <linux/init.h>
23#include <linux/delay.h>
24#include <linux/kernel.h>
25#include <linux/types.h>
26#include <linux/wait.h>
27#include <asm/io.h>
28
29#include "paride.h"
30
31/* mode codes: 0 nybble reads, 8-bit writes
32 1 8-bit reads and writes
33 2 8-bit EPP mode
34 3 EPP-16
35 4 EPP-32
36*/
37
38#define j44(a,b) (((a>>4)&0x0f)|(b&0xf0))
39
40#define P1 w2(5);w2(0xd);w2(5);w2(0xd);w2(5);w2(4);
41#define P2 w2(5);w2(7);w2(5);w2(4);
42
43/* cont = 0 - access the IDE register file
44 cont = 1 - access the IDE command set
45*/
46
47static int on26_read_regr( PIA *pi, int cont, int regr )
48
49{ int a, b, r;
50
51 r = (regr<<2) + 1 + cont;
52
53 switch (pi->mode) {
54
55 case 0: w0(1); P1; w0(r); P2; w0(0); P1;
56 w2(6); a = r1(); w2(4);
57 w2(6); b = r1(); w2(4);
58 w2(6); w2(4); w2(6); w2(4);
59 return j44(a,b);
60
61 case 1: w0(1); P1; w0(r); P2; w0(0); P1;
62 w2(0x26); a = r0(); w2(4); w2(0x26); w2(4);
63 return a;
64
65 case 2:
66 case 3:
67 case 4: w3(1); w3(1); w2(5); w4(r); w2(4);
68 w3(0); w3(0); w2(0x24); a = r4(); w2(4);
69 w2(0x24); r4(); w2(4);
70 return a;
71
72 }
73 return -1;
74}
75
76static void on26_write_regr( PIA *pi, int cont, int regr, int val )
77
78{ int r;
79
80 r = (regr<<2) + 1 + cont;
81
82 switch (pi->mode) {
83
84 case 0:
85 case 1: w0(1); P1; w0(r); P2; w0(0); P1;
86 w0(val); P2; w0(val); P2;
87 break;
88
89 case 2:
90 case 3:
91 case 4: w3(1); w3(1); w2(5); w4(r); w2(4);
92 w3(0); w3(0);
93 w2(5); w4(val); w2(4);
94 w2(5); w4(val); w2(4);
95 break;
96 }
97}
98
99#define CCP(x) w0(0xfe);w0(0xaa);w0(0x55);w0(0);w0(0xff);\
100 w0(0x87);w0(0x78);w0(x);w2(4);w2(5);w2(4);w0(0xff);
101
102static void on26_connect ( PIA *pi )
103
104{ int x;
105
106 pi->saved_r0 = r0();
107 pi->saved_r2 = r2();
108
109 CCP(0x20);
110 x = 8; if (pi->mode) x = 9;
111
112 w0(2); P1; w0(8); P2;
113 w0(2); P1; w0(x); P2;
114}
115
116static void on26_disconnect ( PIA *pi )
117
118{ if (pi->mode >= 2) { w3(4); w3(4); w3(4); w3(4); }
119 else { w0(4); P1; w0(4); P1; }
120 CCP(0x30);
121 w0(pi->saved_r0);
122 w2(pi->saved_r2);
123}
124
125#define RESET_WAIT 200
126
127static int on26_test_port( PIA *pi) /* hard reset */
128
129{ int i, m, d, x=0, y=0;
130
131 pi->saved_r0 = r0();
132 pi->saved_r2 = r2();
133
134 d = pi->delay;
135 m = pi->mode;
136 pi->delay = 5;
137 pi->mode = 0;
138
139 w2(0xc);
140
141 CCP(0x30); CCP(0);
142
143 w0(0xfe);w0(0xaa);w0(0x55);w0(0);w0(0xff);
144 i = ((r1() & 0xf0) << 4); w0(0x87);
145 i |= (r1() & 0xf0); w0(0x78);
146 w0(0x20);w2(4);w2(5);
147 i |= ((r1() & 0xf0) >> 4);
148 w2(4);w0(0xff);
149
150 if (i == 0xb5f) {
151
152 w0(2); P1; w0(0); P2;
153 w0(3); P1; w0(0); P2;
154 w0(2); P1; w0(8); P2; udelay(100);
155 w0(2); P1; w0(0xa); P2; udelay(100);
156 w0(2); P1; w0(8); P2; udelay(1000);
157
158 on26_write_regr(pi,0,6,0xa0);
159
160 for (i=0;i<RESET_WAIT;i++) {
161 on26_write_regr(pi,0,6,0xa0);
162 x = on26_read_regr(pi,0,7);
163 on26_write_regr(pi,0,6,0xb0);
164 y = on26_read_regr(pi,0,7);
165 if (!((x&0x80)||(y&0x80))) break;
166 mdelay(100);
167 }
168
169 if (i == RESET_WAIT)
170 printk("on26: Device reset failed (%x,%x)\n",x,y);
171
172 w0(4); P1; w0(4); P1;
173 }
174
175 CCP(0x30);
176
177 pi->delay = d;
178 pi->mode = m;
179 w0(pi->saved_r0);
180 w2(pi->saved_r2);
181
182 return 5;
183}
184
185
186static void on26_read_block( PIA *pi, char * buf, int count )
187
188{ int k, a, b;
189
190 switch (pi->mode) {
191
192 case 0: w0(1); P1; w0(1); P2; w0(2); P1; w0(0x18); P2; w0(0); P1;
193 udelay(10);
194 for (k=0;k<count;k++) {
195 w2(6); a = r1();
196 w2(4); b = r1();
197 buf[k] = j44(a,b);
198 }
199 w0(2); P1; w0(8); P2;
200 break;
201
202 case 1: w0(1); P1; w0(1); P2; w0(2); P1; w0(0x19); P2; w0(0); P1;
203 udelay(10);
204 for (k=0;k<count/2;k++) {
205 w2(0x26); buf[2*k] = r0();
206 w2(0x24); buf[2*k+1] = r0();
207 }
208 w0(2); P1; w0(9); P2;
209 break;
210
211 case 2: w3(1); w3(1); w2(5); w4(1); w2(4);
212 w3(0); w3(0); w2(0x24);
213 udelay(10);
214 for (k=0;k<count;k++) buf[k] = r4();
215 w2(4);
216 break;
217
218 case 3: w3(1); w3(1); w2(5); w4(1); w2(4);
219 w3(0); w3(0); w2(0x24);
220 udelay(10);
221 for (k=0;k<count/2;k++) ((u16 *)buf)[k] = r4w();
222 w2(4);
223 break;
224
225 case 4: w3(1); w3(1); w2(5); w4(1); w2(4);
226 w3(0); w3(0); w2(0x24);
227 udelay(10);
228 for (k=0;k<count/4;k++) ((u32 *)buf)[k] = r4l();
229 w2(4);
230 break;
231
232 }
233}
234
235static void on26_write_block( PIA *pi, char * buf, int count )
236
237{ int k;
238
239 switch (pi->mode) {
240
241 case 0:
242 case 1: w0(1); P1; w0(1); P2;
243 w0(2); P1; w0(0x18+pi->mode); P2; w0(0); P1;
244 udelay(10);
245 for (k=0;k<count/2;k++) {
246 w2(5); w0(buf[2*k]);
247 w2(7); w0(buf[2*k+1]);
248 }
249 w2(5); w2(4);
250 w0(2); P1; w0(8+pi->mode); P2;
251 break;
252
253 case 2: w3(1); w3(1); w2(5); w4(1); w2(4);
254 w3(0); w3(0); w2(0xc5);
255 udelay(10);
256 for (k=0;k<count;k++) w4(buf[k]);
257 w2(0xc4);
258 break;
259
260 case 3: w3(1); w3(1); w2(5); w4(1); w2(4);
261 w3(0); w3(0); w2(0xc5);
262 udelay(10);
263 for (k=0;k<count/2;k++) w4w(((u16 *)buf)[k]);
264 w2(0xc4);
265 break;
266
267 case 4: w3(1); w3(1); w2(5); w4(1); w2(4);
268 w3(0); w3(0); w2(0xc5);
269 udelay(10);
270 for (k=0;k<count/4;k++) w4l(((u32 *)buf)[k]);
271 w2(0xc4);
272 break;
273
274 }
275
276}
277
278static void on26_log_adapter( PIA *pi, char * scratch, int verbose )
279
280{ char *mode_string[5] = {"4-bit","8-bit","EPP-8",
281 "EPP-16","EPP-32"};
282
283 printk("%s: on26 %s, OnSpec 90c26 at 0x%x, ",
284 pi->device,ON26_VERSION,pi->port);
285 printk("mode %d (%s), delay %d\n",pi->mode,
286 mode_string[pi->mode],pi->delay);
287
288}
289
290static struct pi_protocol on26 = {
291 .owner = THIS_MODULE,
292 .name = "on26",
293 .max_mode = 5,
294 .epp_first = 2,
295 .default_delay = 1,
296 .max_units = 1,
297 .write_regr = on26_write_regr,
298 .read_regr = on26_read_regr,
299 .write_block = on26_write_block,
300 .read_block = on26_read_block,
301 .connect = on26_connect,
302 .disconnect = on26_disconnect,
303 .test_port = on26_test_port,
304 .log_adapter = on26_log_adapter,
305};
306
307static int __init on26_init(void)
308{
309 return pi_register(&on26)-1;
310}
311
312static void __exit on26_exit(void)
313{
314 pi_unregister(&on26);
315}
316
317MODULE_LICENSE("GPL");
318module_init(on26_init)
319module_exit(on26_exit)
diff --git a/drivers/block/paride/paride.c b/drivers/block/paride/paride.c
new file mode 100644
index 000000000000..1fef136c0e41
--- /dev/null
+++ b/drivers/block/paride/paride.c
@@ -0,0 +1,467 @@
1/*
2 paride.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
3 Under the terms of the GNU General Public License.
4
5 This is the base module for the family of device drivers
6 that support parallel port IDE devices.
7
8*/
9
10/* Changes:
11
12 1.01 GRG 1998.05.03 Use spinlocks
13 1.02 GRG 1998.05.05 init_proto, release_proto, ktti
14 1.03 GRG 1998.08.15 eliminate compiler warning
15 1.04 GRG 1998.11.28 added support for FRIQ
16 1.05 TMW 2000.06.06 use parport_find_number instead of
17 parport_enumerate
18 1.06 TMW 2001.03.26 more sane parport-or-not resource management
19*/
20
21#define PI_VERSION "1.06"
22
23#include <linux/module.h>
24#include <linux/config.h>
25#include <linux/kmod.h>
26#include <linux/types.h>
27#include <linux/kernel.h>
28#include <linux/ioport.h>
29#include <linux/string.h>
30#include <linux/spinlock.h>
31#include <linux/wait.h>
32
33#ifdef CONFIG_PARPORT_MODULE
34#define CONFIG_PARPORT
35#endif
36
37#ifdef CONFIG_PARPORT
38#include <linux/parport.h>
39#endif
40
41#include "paride.h"
42
43MODULE_LICENSE("GPL");
44
45#define MAX_PROTOS 32
46
47static struct pi_protocol *protocols[MAX_PROTOS];
48
49static DEFINE_SPINLOCK(pi_spinlock);
50
51void pi_write_regr(PIA * pi, int cont, int regr, int val)
52{
53 pi->proto->write_regr(pi, cont, regr, val);
54}
55
56EXPORT_SYMBOL(pi_write_regr);
57
58int pi_read_regr(PIA * pi, int cont, int regr)
59{
60 return pi->proto->read_regr(pi, cont, regr);
61}
62
63EXPORT_SYMBOL(pi_read_regr);
64
65void pi_write_block(PIA * pi, char *buf, int count)
66{
67 pi->proto->write_block(pi, buf, count);
68}
69
70EXPORT_SYMBOL(pi_write_block);
71
72void pi_read_block(PIA * pi, char *buf, int count)
73{
74 pi->proto->read_block(pi, buf, count);
75}
76
77EXPORT_SYMBOL(pi_read_block);
78
79#ifdef CONFIG_PARPORT
80
81static void pi_wake_up(void *p)
82{
83 PIA *pi = (PIA *) p;
84 unsigned long flags;
85 void (*cont) (void) = NULL;
86
87 spin_lock_irqsave(&pi_spinlock, flags);
88
89 if (pi->claim_cont && !parport_claim(pi->pardev)) {
90 cont = pi->claim_cont;
91 pi->claim_cont = NULL;
92 pi->claimed = 1;
93 }
94
95 spin_unlock_irqrestore(&pi_spinlock, flags);
96
97 wake_up(&(pi->parq));
98
99 if (cont)
100 cont();
101}
102
103#endif
104
105int pi_schedule_claimed(PIA * pi, void (*cont) (void))
106{
107#ifdef CONFIG_PARPORT
108 unsigned long flags;
109
110 spin_lock_irqsave(&pi_spinlock, flags);
111 if (pi->pardev && parport_claim(pi->pardev)) {
112 pi->claim_cont = cont;
113 spin_unlock_irqrestore(&pi_spinlock, flags);
114 return 0;
115 }
116 pi->claimed = 1;
117 spin_unlock_irqrestore(&pi_spinlock, flags);
118#endif
119 return 1;
120}
121EXPORT_SYMBOL(pi_schedule_claimed);
122
123void pi_do_claimed(PIA * pi, void (*cont) (void))
124{
125 if (pi_schedule_claimed(pi, cont))
126 cont();
127}
128
129EXPORT_SYMBOL(pi_do_claimed);
130
131static void pi_claim(PIA * pi)
132{
133 if (pi->claimed)
134 return;
135 pi->claimed = 1;
136#ifdef CONFIG_PARPORT
137 if (pi->pardev)
138 wait_event(pi->parq,
139 !parport_claim((struct pardevice *) pi->pardev));
140#endif
141}
142
143static void pi_unclaim(PIA * pi)
144{
145 pi->claimed = 0;
146#ifdef CONFIG_PARPORT
147 if (pi->pardev)
148 parport_release((struct pardevice *) (pi->pardev));
149#endif
150}
151
152void pi_connect(PIA * pi)
153{
154 pi_claim(pi);
155 pi->proto->connect(pi);
156}
157
158EXPORT_SYMBOL(pi_connect);
159
160void pi_disconnect(PIA * pi)
161{
162 pi->proto->disconnect(pi);
163 pi_unclaim(pi);
164}
165
166EXPORT_SYMBOL(pi_disconnect);
167
168static void pi_unregister_parport(PIA * pi)
169{
170#ifdef CONFIG_PARPORT
171 if (pi->pardev) {
172 parport_unregister_device((struct pardevice *) (pi->pardev));
173 pi->pardev = NULL;
174 }
175#endif
176}
177
178void pi_release(PIA * pi)
179{
180 pi_unregister_parport(pi);
181#ifndef CONFIG_PARPORT
182 if (pi->reserved)
183 release_region(pi->port, pi->reserved);
184#endif /* !CONFIG_PARPORT */
185 if (pi->proto->release_proto)
186 pi->proto->release_proto(pi);
187 module_put(pi->proto->owner);
188}
189
190EXPORT_SYMBOL(pi_release);
191
192static int default_test_proto(PIA * pi, char *scratch, int verbose)
193{
194 int j, k;
195 int e[2] = { 0, 0 };
196
197 pi->proto->connect(pi);
198
199 for (j = 0; j < 2; j++) {
200 pi_write_regr(pi, 0, 6, 0xa0 + j * 0x10);
201 for (k = 0; k < 256; k++) {
202 pi_write_regr(pi, 0, 2, k ^ 0xaa);
203 pi_write_regr(pi, 0, 3, k ^ 0x55);
204 if (pi_read_regr(pi, 0, 2) != (k ^ 0xaa))
205 e[j]++;
206 }
207 }
208 pi->proto->disconnect(pi);
209
210 if (verbose)
211 printk("%s: %s: port 0x%x, mode %d, test=(%d,%d)\n",
212 pi->device, pi->proto->name, pi->port,
213 pi->mode, e[0], e[1]);
214
215 return (e[0] && e[1]); /* not here if both > 0 */
216}
217
218static int pi_test_proto(PIA * pi, char *scratch, int verbose)
219{
220 int res;
221
222 pi_claim(pi);
223 if (pi->proto->test_proto)
224 res = pi->proto->test_proto(pi, scratch, verbose);
225 else
226 res = default_test_proto(pi, scratch, verbose);
227 pi_unclaim(pi);
228
229 return res;
230}
231
232int pi_register(PIP * pr)
233{
234 int k;
235
236 for (k = 0; k < MAX_PROTOS; k++)
237 if (protocols[k] && !strcmp(pr->name, protocols[k]->name)) {
238 printk("paride: %s protocol already registered\n",
239 pr->name);
240 return 0;
241 }
242 k = 0;
243 while ((k < MAX_PROTOS) && (protocols[k]))
244 k++;
245 if (k == MAX_PROTOS) {
246 printk("paride: protocol table full\n");
247 return 0;
248 }
249 protocols[k] = pr;
250 pr->index = k;
251 printk("paride: %s registered as protocol %d\n", pr->name, k);
252 return 1;
253}
254
255EXPORT_SYMBOL(pi_register);
256
257void pi_unregister(PIP * pr)
258{
259 if (!pr)
260 return;
261 if (protocols[pr->index] != pr) {
262 printk("paride: %s not registered\n", pr->name);
263 return;
264 }
265 protocols[pr->index] = NULL;
266}
267
268EXPORT_SYMBOL(pi_unregister);
269
270static int pi_register_parport(PIA * pi, int verbose)
271{
272#ifdef CONFIG_PARPORT
273
274 struct parport *port;
275
276 port = parport_find_base(pi->port);
277 if (!port)
278 return 0;
279
280 pi->pardev = parport_register_device(port,
281 pi->device, NULL,
282 pi_wake_up, NULL, 0, (void *) pi);
283 parport_put_port(port);
284 if (!pi->pardev)
285 return 0;
286
287 init_waitqueue_head(&pi->parq);
288
289 if (verbose)
290 printk("%s: 0x%x is %s\n", pi->device, pi->port, port->name);
291
292 pi->parname = (char *) port->name;
293#endif
294
295 return 1;
296}
297
298static int pi_probe_mode(PIA * pi, int max, char *scratch, int verbose)
299{
300 int best, range;
301
302 if (pi->mode != -1) {
303 if (pi->mode >= max)
304 return 0;
305 range = 3;
306 if (pi->mode >= pi->proto->epp_first)
307 range = 8;
308 if ((range == 8) && (pi->port % 8))
309 return 0;
310 pi->reserved = range;
311 return (!pi_test_proto(pi, scratch, verbose));
312 }
313 best = -1;
314 for (pi->mode = 0; pi->mode < max; pi->mode++) {
315 range = 3;
316 if (pi->mode >= pi->proto->epp_first)
317 range = 8;
318 if ((range == 8) && (pi->port % 8))
319 break;
320 pi->reserved = range;
321 if (!pi_test_proto(pi, scratch, verbose))
322 best = pi->mode;
323 }
324 pi->mode = best;
325 return (best > -1);
326}
327
328static int pi_probe_unit(PIA * pi, int unit, char *scratch, int verbose)
329{
330 int max, s, e;
331
332 s = unit;
333 e = s + 1;
334
335 if (s == -1) {
336 s = 0;
337 e = pi->proto->max_units;
338 }
339
340 if (!pi_register_parport(pi, verbose))
341 return 0;
342
343 if (pi->proto->test_port) {
344 pi_claim(pi);
345 max = pi->proto->test_port(pi);
346 pi_unclaim(pi);
347 } else
348 max = pi->proto->max_mode;
349
350 if (pi->proto->probe_unit) {
351 pi_claim(pi);
352 for (pi->unit = s; pi->unit < e; pi->unit++)
353 if (pi->proto->probe_unit(pi)) {
354 pi_unclaim(pi);
355 if (pi_probe_mode(pi, max, scratch, verbose))
356 return 1;
357 pi_unregister_parport(pi);
358 return 0;
359 }
360 pi_unclaim(pi);
361 pi_unregister_parport(pi);
362 return 0;
363 }
364
365 if (!pi_probe_mode(pi, max, scratch, verbose)) {
366 pi_unregister_parport(pi);
367 return 0;
368 }
369 return 1;
370
371}
372
373int pi_init(PIA * pi, int autoprobe, int port, int mode,
374 int unit, int protocol, int delay, char *scratch,
375 int devtype, int verbose, char *device)
376{
377 int p, k, s, e;
378 int lpts[7] = { 0x3bc, 0x378, 0x278, 0x268, 0x27c, 0x26c, 0 };
379
380 s = protocol;
381 e = s + 1;
382
383 if (!protocols[0])
384 request_module("paride_protocol");
385
386 if (autoprobe) {
387 s = 0;
388 e = MAX_PROTOS;
389 } else if ((s < 0) || (s >= MAX_PROTOS) || (port <= 0) ||
390 (!protocols[s]) || (unit < 0) ||
391 (unit >= protocols[s]->max_units)) {
392 printk("%s: Invalid parameters\n", device);
393 return 0;
394 }
395
396 for (p = s; p < e; p++) {
397 struct pi_protocol *proto = protocols[p];
398 if (!proto)
399 continue;
400 /* still racy */
401 if (!try_module_get(proto->owner))
402 continue;
403 pi->proto = proto;
404 pi->private = 0;
405 if (proto->init_proto && proto->init_proto(pi) < 0) {
406 pi->proto = NULL;
407 module_put(proto->owner);
408 continue;
409 }
410 if (delay == -1)
411 pi->delay = pi->proto->default_delay;
412 else
413 pi->delay = delay;
414 pi->devtype = devtype;
415 pi->device = device;
416
417 pi->parname = NULL;
418 pi->pardev = NULL;
419 init_waitqueue_head(&pi->parq);
420 pi->claimed = 0;
421 pi->claim_cont = NULL;
422
423 pi->mode = mode;
424 if (port != -1) {
425 pi->port = port;
426 if (pi_probe_unit(pi, unit, scratch, verbose))
427 break;
428 pi->port = 0;
429 } else {
430 k = 0;
431 while ((pi->port = lpts[k++]))
432 if (pi_probe_unit
433 (pi, unit, scratch, verbose))
434 break;
435 if (pi->port)
436 break;
437 }
438 if (pi->proto->release_proto)
439 pi->proto->release_proto(pi);
440 module_put(proto->owner);
441 }
442
443 if (!pi->port) {
444 if (autoprobe)
445 printk("%s: Autoprobe failed\n", device);
446 else
447 printk("%s: Adapter not found\n", device);
448 return 0;
449 }
450#ifndef CONFIG_PARPORT
451 if (!request_region(pi->port, pi->reserved, pi->device)) {
452 printk(KERN_WARNING "paride: Unable to request region 0x%x\n",
453 pi->port);
454 return 0;
455 }
456#endif /* !CONFIG_PARPORT */
457
458 if (pi->parname)
459 printk("%s: Sharing %s at 0x%x\n", pi->device,
460 pi->parname, pi->port);
461
462 pi->proto->log_adapter(pi, scratch, verbose);
463
464 return 1;
465}
466
467EXPORT_SYMBOL(pi_init);
diff --git a/drivers/block/paride/paride.h b/drivers/block/paride/paride.h
new file mode 100644
index 000000000000..c6d98ef09e48
--- /dev/null
+++ b/drivers/block/paride/paride.h
@@ -0,0 +1,170 @@
1#ifndef __DRIVERS_PARIDE_H__
2#define __DRIVERS_PARIDE_H__
3
4/*
5 paride.h (c) 1997-8 Grant R. Guenther <grant@torque.net>
6 Under the terms of the GPL.
7
8 This file defines the interface between the high-level parallel
9 IDE device drivers (pd, pf, pcd, pt) and the adapter chips.
10
11*/
12
13/* Changes:
14
15 1.01 GRG 1998.05.05 init_proto, release_proto
16*/
17
18#define PARIDE_H_VERSION "1.01"
19
20/* Some adapters need to know what kind of device they are in
21
22 Values for devtype:
23*/
24
25#define PI_PD 0 /* IDE disk */
26#define PI_PCD 1 /* ATAPI CDrom */
27#define PI_PF 2 /* ATAPI disk */
28#define PI_PT 3 /* ATAPI tape */
29#define PI_PG 4 /* ATAPI generic */
30
31/* The paride module contains no state, instead the drivers allocate
32 a pi_adapter data structure and pass it to paride in every operation.
33
34*/
35
36struct pi_adapter {
37
38 struct pi_protocol *proto; /* adapter protocol */
39 int port; /* base address of parallel port */
40 int mode; /* transfer mode in use */
41 int delay; /* adapter delay setting */
42 int devtype; /* device type: PI_PD etc. */
43 char *device; /* name of driver */
44 int unit; /* unit number for chained adapters */
45 int saved_r0; /* saved port state */
46 int saved_r2; /* saved port state */
47 int reserved; /* number of ports reserved */
48 unsigned long private; /* for protocol module */
49
50 wait_queue_head_t parq; /* semaphore for parport sharing */
51 void *pardev; /* pointer to pardevice */
52 char *parname; /* parport name */
53 int claimed; /* parport has already been claimed */
54 void (*claim_cont)(void); /* continuation for parport wait */
55};
56
57typedef struct pi_adapter PIA;
58
59/* functions exported by paride to the high level drivers */
60
61extern int pi_init(PIA *pi,
62 int autoprobe, /* 1 to autoprobe */
63 int port, /* base port address */
64 int mode, /* -1 for autoprobe */
65 int unit, /* unit number, if supported */
66 int protocol, /* protocol to use */
67 int delay, /* -1 to use adapter specific default */
68 char * scratch, /* address of 512 byte buffer */
69 int devtype, /* device type: PI_PD, PI_PCD, etc ... */
70 int verbose, /* log verbose data while probing */
71 char *device /* name of the driver */
72 ); /* returns 0 on failure, 1 on success */
73
74extern void pi_release(PIA *pi);
75
76/* registers are addressed as (cont,regr)
77
78 cont: 0 for command register file, 1 for control register(s)
79 regr: 0-7 for register number.
80
81*/
82
83extern void pi_write_regr(PIA *pi, int cont, int regr, int val);
84
85extern int pi_read_regr(PIA *pi, int cont, int regr);
86
87extern void pi_write_block(PIA *pi, char * buf, int count);
88
89extern void pi_read_block(PIA *pi, char * buf, int count);
90
91extern void pi_connect(PIA *pi);
92
93extern void pi_disconnect(PIA *pi);
94
95extern void pi_do_claimed(PIA *pi, void (*cont)(void));
96extern int pi_schedule_claimed(PIA *pi, void (*cont)(void));
97
98/* macros and functions exported to the protocol modules */
99
100#define delay_p (pi->delay?udelay(pi->delay):(void)0)
101#define out_p(offs,byte) outb(byte,pi->port+offs); delay_p;
102#define in_p(offs) (delay_p,inb(pi->port+offs))
103
104#define w0(byte) {out_p(0,byte);}
105#define r0() (in_p(0) & 0xff)
106#define w1(byte) {out_p(1,byte);}
107#define r1() (in_p(1) & 0xff)
108#define w2(byte) {out_p(2,byte);}
109#define r2() (in_p(2) & 0xff)
110#define w3(byte) {out_p(3,byte);}
111#define w4(byte) {out_p(4,byte);}
112#define r4() (in_p(4) & 0xff)
113#define w4w(data) {outw(data,pi->port+4); delay_p;}
114#define w4l(data) {outl(data,pi->port+4); delay_p;}
115#define r4w() (delay_p,inw(pi->port+4)&0xffff)
116#define r4l() (delay_p,inl(pi->port+4)&0xffffffff)
117
118static inline u16 pi_swab16( char *b, int k)
119
120{ union { u16 u; char t[2]; } r;
121
122 r.t[0]=b[2*k+1]; r.t[1]=b[2*k];
123 return r.u;
124}
125
126static inline u32 pi_swab32( char *b, int k)
127
128{ union { u32 u; char f[4]; } r;
129
130 r.f[0]=b[4*k+1]; r.f[1]=b[4*k];
131 r.f[2]=b[4*k+3]; r.f[3]=b[4*k+2];
132 return r.u;
133}
134
135struct pi_protocol {
136
137 char name[8]; /* name for this protocol */
138 int index; /* index into protocol table */
139
140 int max_mode; /* max mode number */
141 int epp_first; /* modes >= this use 8 ports */
142
143 int default_delay; /* delay parameter if not specified */
144 int max_units; /* max chained units probed for */
145
146 void (*write_regr)(PIA *,int,int,int);
147 int (*read_regr)(PIA *,int,int);
148 void (*write_block)(PIA *,char *,int);
149 void (*read_block)(PIA *,char *,int);
150
151 void (*connect)(PIA *);
152 void (*disconnect)(PIA *);
153
154 int (*test_port)(PIA *);
155 int (*probe_unit)(PIA *);
156 int (*test_proto)(PIA *,char *,int);
157 void (*log_adapter)(PIA *,char *,int);
158
159 int (*init_proto)(PIA *);
160 void (*release_proto)(PIA *);
161 struct module *owner;
162};
163
164typedef struct pi_protocol PIP;
165
166extern int pi_register( PIP * );
167extern void pi_unregister ( PIP * );
168
169#endif /* __DRIVERS_PARIDE_H__ */
170/* end of paride.h */
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
new file mode 100644
index 000000000000..7289f67e9568
--- /dev/null
+++ b/drivers/block/paride/pcd.c
@@ -0,0 +1,971 @@
1/*
2 pcd.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
3 Under the terms of the GNU General Public License.
4
5 This is a high-level driver for parallel port ATAPI CD-ROM
6 drives based on chips supported by the paride module.
7
8 By default, the driver will autoprobe for a single parallel
9 port ATAPI CD-ROM drive, but if their individual parameters are
10 specified, the driver can handle up to 4 drives.
11
12 The behaviour of the pcd driver can be altered by setting
13 some parameters from the insmod command line. The following
14 parameters are adjustable:
15
16 drive0 These four arguments can be arrays of
17 drive1 1-6 integers as follows:
18 drive2
19 drive3 <prt>,<pro>,<uni>,<mod>,<slv>,<dly>
20
21 Where,
22
23 <prt> is the base of the parallel port address for
24 the corresponding drive. (required)
25
26 <pro> is the protocol number for the adapter that
27 supports this drive. These numbers are
28 logged by 'paride' when the protocol modules
29 are initialised. (0 if not given)
30
31 <uni> for those adapters that support chained
32 devices, this is the unit selector for the
33 chain of devices on the given port. It should
34 be zero for devices that don't support chaining.
35 (0 if not given)
36
37 <mod> this can be -1 to choose the best mode, or one
38 of the mode numbers supported by the adapter.
39 (-1 if not given)
40
41 <slv> ATAPI CD-ROMs can be jumpered to master or slave.
42 Set this to 0 to choose the master drive, 1 to
43 choose the slave, -1 (the default) to choose the
44 first drive found.
45
46 <dly> some parallel ports require the driver to
47 go more slowly. -1 sets a default value that
48 should work with the chosen protocol. Otherwise,
49 set this to a small integer, the larger it is
50 the slower the port i/o. In some cases, setting
51 this to zero will speed up the device. (default -1)
52
53 major You may use this parameter to overide the
54 default major number (46) that this driver
55 will use. Be sure to change the device
56 name as well.
57
58 name This parameter is a character string that
59 contains the name the kernel will use for this
60 device (in /proc output, for instance).
61 (default "pcd")
62
63 verbose This parameter controls the amount of logging
64 that the driver will do. Set it to 0 for
65 normal operation, 1 to see autoprobe progress
66 messages, or 2 to see additional debugging
67 output. (default 0)
68
69 nice This parameter controls the driver's use of
70 idle CPU time, at the expense of some speed.
71
72 If this driver is built into the kernel, you can use kernel
73 the following command line parameters, with the same values
74 as the corresponding module parameters listed above:
75
76 pcd.drive0
77 pcd.drive1
78 pcd.drive2
79 pcd.drive3
80 pcd.nice
81
82 In addition, you can use the parameter pcd.disable to disable
83 the driver entirely.
84
85*/
86
87/* Changes:
88
89 1.01 GRG 1998.01.24 Added test unit ready support
90 1.02 GRG 1998.05.06 Changes to pcd_completion, ready_wait,
91 and loosen interpretation of ATAPI
92 standard for clearing error status.
93 Use spinlocks. Eliminate sti().
94 1.03 GRG 1998.06.16 Eliminated an Ugh
95 1.04 GRG 1998.08.15 Added extra debugging, improvements to
96 pcd_completion, use HZ in loop timing
97 1.05 GRG 1998.08.16 Conformed to "Uniform CD-ROM" standard
98 1.06 GRG 1998.08.19 Added audio ioctl support
99 1.07 GRG 1998.09.24 Increased reset timeout, added jumbo support
100
101*/
102
103#define PCD_VERSION "1.07"
104#define PCD_MAJOR 46
105#define PCD_NAME "pcd"
106#define PCD_UNITS 4
107
108/* Here are things one can override from the insmod command.
109 Most are autoprobed by paride unless set here. Verbose is off
110 by default.
111
112*/
113
114static int verbose = 0;
115static int major = PCD_MAJOR;
116static char *name = PCD_NAME;
117static int nice = 0;
118static int disable = 0;
119
120static int drive0[6] = { 0, 0, 0, -1, -1, -1 };
121static int drive1[6] = { 0, 0, 0, -1, -1, -1 };
122static int drive2[6] = { 0, 0, 0, -1, -1, -1 };
123static int drive3[6] = { 0, 0, 0, -1, -1, -1 };
124
125static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
126static int pcd_drive_count;
127
128enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY};
129
130/* end of parameters */
131
132#include <linux/module.h>
133#include <linux/init.h>
134#include <linux/errno.h>
135#include <linux/fs.h>
136#include <linux/kernel.h>
137#include <linux/delay.h>
138#include <linux/cdrom.h>
139#include <linux/spinlock.h>
140#include <linux/blkdev.h>
141#include <asm/uaccess.h>
142
143static spinlock_t pcd_lock;
144
145module_param(verbose, bool, 0644);
146module_param(major, int, 0);
147module_param(name, charp, 0);
148module_param(nice, int, 0);
149module_param_array(drive0, int, NULL, 0);
150module_param_array(drive1, int, NULL, 0);
151module_param_array(drive2, int, NULL, 0);
152module_param_array(drive3, int, NULL, 0);
153
154#include "paride.h"
155#include "pseudo.h"
156
157#define PCD_RETRIES 5
158#define PCD_TMO 800 /* timeout in jiffies */
159#define PCD_DELAY 50 /* spin delay in uS */
160#define PCD_READY_TMO 20 /* in seconds */
161#define PCD_RESET_TMO 100 /* in tenths of a second */
162
163#define PCD_SPIN (1000000*PCD_TMO)/(HZ*PCD_DELAY)
164
165#define IDE_ERR 0x01
166#define IDE_DRQ 0x08
167#define IDE_READY 0x40
168#define IDE_BUSY 0x80
169
170static int pcd_open(struct cdrom_device_info *cdi, int purpose);
171static void pcd_release(struct cdrom_device_info *cdi);
172static int pcd_drive_status(struct cdrom_device_info *cdi, int slot_nr);
173static int pcd_media_changed(struct cdrom_device_info *cdi, int slot_nr);
174static int pcd_tray_move(struct cdrom_device_info *cdi, int position);
175static int pcd_lock_door(struct cdrom_device_info *cdi, int lock);
176static int pcd_drive_reset(struct cdrom_device_info *cdi);
177static int pcd_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn);
178static int pcd_audio_ioctl(struct cdrom_device_info *cdi,
179 unsigned int cmd, void *arg);
180static int pcd_packet(struct cdrom_device_info *cdi,
181 struct packet_command *cgc);
182
183static int pcd_detect(void);
184static void pcd_probe_capabilities(void);
185static void do_pcd_read_drq(void);
186static void do_pcd_request(request_queue_t * q);
187static void do_pcd_read(void);
188
189struct pcd_unit {
190 struct pi_adapter pia; /* interface to paride layer */
191 struct pi_adapter *pi;
192 int drive; /* master/slave */
193 int last_sense; /* result of last request sense */
194 int changed; /* media change seen */
195 int present; /* does this unit exist ? */
196 char *name; /* pcd0, pcd1, etc */
197 struct cdrom_device_info info; /* uniform cdrom interface */
198 struct gendisk *disk;
199};
200
201static struct pcd_unit pcd[PCD_UNITS];
202
203static char pcd_scratch[64];
204static char pcd_buffer[2048]; /* raw block buffer */
205static int pcd_bufblk = -1; /* block in buffer, in CD units,
206 -1 for nothing there. See also
207 pd_unit.
208 */
209
210/* the variables below are used mainly in the I/O request engine, which
211 processes only one request at a time.
212*/
213
214static struct pcd_unit *pcd_current; /* current request's drive */
215static struct request *pcd_req;
216static int pcd_retries; /* retries on current request */
217static int pcd_busy; /* request being processed ? */
218static int pcd_sector; /* address of next requested sector */
219static int pcd_count; /* number of blocks still to do */
220static char *pcd_buf; /* buffer for request in progress */
221
222static int pcd_warned; /* Have we logged a phase warning ? */
223
224/* kernel glue structures */
225
226static int pcd_block_open(struct inode *inode, struct file *file)
227{
228 struct pcd_unit *cd = inode->i_bdev->bd_disk->private_data;
229 return cdrom_open(&cd->info, inode, file);
230}
231
232static int pcd_block_release(struct inode *inode, struct file *file)
233{
234 struct pcd_unit *cd = inode->i_bdev->bd_disk->private_data;
235 return cdrom_release(&cd->info, file);
236}
237
238static int pcd_block_ioctl(struct inode *inode, struct file *file,
239 unsigned cmd, unsigned long arg)
240{
241 struct pcd_unit *cd = inode->i_bdev->bd_disk->private_data;
242 return cdrom_ioctl(file, &cd->info, inode, cmd, arg);
243}
244
245static int pcd_block_media_changed(struct gendisk *disk)
246{
247 struct pcd_unit *cd = disk->private_data;
248 return cdrom_media_changed(&cd->info);
249}
250
251static struct block_device_operations pcd_bdops = {
252 .owner = THIS_MODULE,
253 .open = pcd_block_open,
254 .release = pcd_block_release,
255 .ioctl = pcd_block_ioctl,
256 .media_changed = pcd_block_media_changed,
257};
258
259static struct cdrom_device_ops pcd_dops = {
260 .open = pcd_open,
261 .release = pcd_release,
262 .drive_status = pcd_drive_status,
263 .media_changed = pcd_media_changed,
264 .tray_move = pcd_tray_move,
265 .lock_door = pcd_lock_door,
266 .get_mcn = pcd_get_mcn,
267 .reset = pcd_drive_reset,
268 .audio_ioctl = pcd_audio_ioctl,
269 .generic_packet = pcd_packet,
270 .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK |
271 CDC_MCN | CDC_MEDIA_CHANGED | CDC_RESET |
272 CDC_PLAY_AUDIO | CDC_GENERIC_PACKET | CDC_CD_R |
273 CDC_CD_RW,
274};
275
276static void pcd_init_units(void)
277{
278 struct pcd_unit *cd;
279 int unit;
280
281 pcd_drive_count = 0;
282 for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
283 struct gendisk *disk = alloc_disk(1);
284 if (!disk)
285 continue;
286 cd->disk = disk;
287 cd->pi = &cd->pia;
288 cd->present = 0;
289 cd->last_sense = 0;
290 cd->changed = 1;
291 cd->drive = (*drives[unit])[D_SLV];
292 if ((*drives[unit])[D_PRT])
293 pcd_drive_count++;
294
295 cd->name = &cd->info.name[0];
296 snprintf(cd->name, sizeof(cd->info.name), "%s%d", name, unit);
297 cd->info.ops = &pcd_dops;
298 cd->info.handle = cd;
299 cd->info.speed = 0;
300 cd->info.capacity = 1;
301 cd->info.mask = 0;
302 disk->major = major;
303 disk->first_minor = unit;
304 strcpy(disk->disk_name, cd->name); /* umm... */
305 disk->fops = &pcd_bdops;
306 }
307}
308
309static int pcd_open(struct cdrom_device_info *cdi, int purpose)
310{
311 struct pcd_unit *cd = cdi->handle;
312 if (!cd->present)
313 return -ENODEV;
314 return 0;
315}
316
317static void pcd_release(struct cdrom_device_info *cdi)
318{
319}
320
321static inline int status_reg(struct pcd_unit *cd)
322{
323 return pi_read_regr(cd->pi, 1, 6);
324}
325
326static inline int read_reg(struct pcd_unit *cd, int reg)
327{
328 return pi_read_regr(cd->pi, 0, reg);
329}
330
331static inline void write_reg(struct pcd_unit *cd, int reg, int val)
332{
333 pi_write_regr(cd->pi, 0, reg, val);
334}
335
336static int pcd_wait(struct pcd_unit *cd, int go, int stop, char *fun, char *msg)
337{
338 int j, r, e, s, p;
339
340 j = 0;
341 while ((((r = status_reg(cd)) & go) || (stop && (!(r & stop))))
342 && (j++ < PCD_SPIN))
343 udelay(PCD_DELAY);
344
345 if ((r & (IDE_ERR & stop)) || (j >= PCD_SPIN)) {
346 s = read_reg(cd, 7);
347 e = read_reg(cd, 1);
348 p = read_reg(cd, 2);
349 if (j >= PCD_SPIN)
350 e |= 0x100;
351 if (fun)
352 printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x"
353 " loop=%d phase=%d\n",
354 cd->name, fun, msg, r, s, e, j, p);
355 return (s << 8) + r;
356 }
357 return 0;
358}
359
360static int pcd_command(struct pcd_unit *cd, char *cmd, int dlen, char *fun)
361{
362 pi_connect(cd->pi);
363
364 write_reg(cd, 6, 0xa0 + 0x10 * cd->drive);
365
366 if (pcd_wait(cd, IDE_BUSY | IDE_DRQ, 0, fun, "before command")) {
367 pi_disconnect(cd->pi);
368 return -1;
369 }
370
371 write_reg(cd, 4, dlen % 256);
372 write_reg(cd, 5, dlen / 256);
373 write_reg(cd, 7, 0xa0); /* ATAPI packet command */
374
375 if (pcd_wait(cd, IDE_BUSY, IDE_DRQ, fun, "command DRQ")) {
376 pi_disconnect(cd->pi);
377 return -1;
378 }
379
380 if (read_reg(cd, 2) != 1) {
381 printk("%s: %s: command phase error\n", cd->name, fun);
382 pi_disconnect(cd->pi);
383 return -1;
384 }
385
386 pi_write_block(cd->pi, cmd, 12);
387
388 return 0;
389}
390
391static int pcd_completion(struct pcd_unit *cd, char *buf, char *fun)
392{
393 int r, d, p, n, k, j;
394
395 r = -1;
396 k = 0;
397 j = 0;
398
399 if (!pcd_wait(cd, IDE_BUSY, IDE_DRQ | IDE_READY | IDE_ERR,
400 fun, "completion")) {
401 r = 0;
402 while (read_reg(cd, 7) & IDE_DRQ) {
403 d = read_reg(cd, 4) + 256 * read_reg(cd, 5);
404 n = (d + 3) & 0xfffc;
405 p = read_reg(cd, 2) & 3;
406
407 if ((p == 2) && (n > 0) && (j == 0)) {
408 pi_read_block(cd->pi, buf, n);
409 if (verbose > 1)
410 printk("%s: %s: Read %d bytes\n",
411 cd->name, fun, n);
412 r = 0;
413 j++;
414 } else {
415 if (verbose > 1)
416 printk
417 ("%s: %s: Unexpected phase %d, d=%d, k=%d\n",
418 cd->name, fun, p, d, k);
419 if ((verbose < 2) && !pcd_warned) {
420 pcd_warned = 1;
421 printk
422 ("%s: WARNING: ATAPI phase errors\n",
423 cd->name);
424 }
425 mdelay(1);
426 }
427 if (k++ > PCD_TMO) {
428 printk("%s: Stuck DRQ\n", cd->name);
429 break;
430 }
431 if (pcd_wait
432 (cd, IDE_BUSY, IDE_DRQ | IDE_READY | IDE_ERR, fun,
433 "completion")) {
434 r = -1;
435 break;
436 }
437 }
438 }
439
440 pi_disconnect(cd->pi);
441
442 return r;
443}
444
445static void pcd_req_sense(struct pcd_unit *cd, char *fun)
446{
447 char rs_cmd[12] = { 0x03, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 };
448 char buf[16];
449 int r, c;
450
451 r = pcd_command(cd, rs_cmd, 16, "Request sense");
452 mdelay(1);
453 if (!r)
454 pcd_completion(cd, buf, "Request sense");
455
456 cd->last_sense = -1;
457 c = 2;
458 if (!r) {
459 if (fun)
460 printk("%s: %s: Sense key: %x, ASC: %x, ASQ: %x\n",
461 cd->name, fun, buf[2] & 0xf, buf[12], buf[13]);
462 c = buf[2] & 0xf;
463 cd->last_sense =
464 c | ((buf[12] & 0xff) << 8) | ((buf[13] & 0xff) << 16);
465 }
466 if ((c == 2) || (c == 6))
467 cd->changed = 1;
468}
469
470static int pcd_atapi(struct pcd_unit *cd, char *cmd, int dlen, char *buf, char *fun)
471{
472 int r;
473
474 r = pcd_command(cd, cmd, dlen, fun);
475 mdelay(1);
476 if (!r)
477 r = pcd_completion(cd, buf, fun);
478 if (r)
479 pcd_req_sense(cd, fun);
480
481 return r;
482}
483
484static int pcd_packet(struct cdrom_device_info *cdi, struct packet_command *cgc)
485{
486 return pcd_atapi(cdi->handle, cgc->cmd, cgc->buflen, cgc->buffer,
487 "generic packet");
488}
489
490#define DBMSG(msg) ((verbose>1)?(msg):NULL)
491
492static int pcd_media_changed(struct cdrom_device_info *cdi, int slot_nr)
493{
494 struct pcd_unit *cd = cdi->handle;
495 int res = cd->changed;
496 if (res)
497 cd->changed = 0;
498 return res;
499}
500
501static int pcd_lock_door(struct cdrom_device_info *cdi, int lock)
502{
503 char un_cmd[12] = { 0x1e, 0, 0, 0, lock, 0, 0, 0, 0, 0, 0, 0 };
504
505 return pcd_atapi(cdi->handle, un_cmd, 0, pcd_scratch,
506 lock ? "lock door" : "unlock door");
507}
508
509static int pcd_tray_move(struct cdrom_device_info *cdi, int position)
510{
511 char ej_cmd[12] = { 0x1b, 0, 0, 0, 3 - position, 0, 0, 0, 0, 0, 0, 0 };
512
513 return pcd_atapi(cdi->handle, ej_cmd, 0, pcd_scratch,
514 position ? "eject" : "close tray");
515}
516
517static void pcd_sleep(int cs)
518{
519 current->state = TASK_INTERRUPTIBLE;
520 schedule_timeout(cs);
521}
522
523static int pcd_reset(struct pcd_unit *cd)
524{
525 int i, k, flg;
526 int expect[5] = { 1, 1, 1, 0x14, 0xeb };
527
528 pi_connect(cd->pi);
529 write_reg(cd, 6, 0xa0 + 0x10 * cd->drive);
530 write_reg(cd, 7, 8);
531
532 pcd_sleep(20 * HZ / 1000); /* delay a bit */
533
534 k = 0;
535 while ((k++ < PCD_RESET_TMO) && (status_reg(cd) & IDE_BUSY))
536 pcd_sleep(HZ / 10);
537
538 flg = 1;
539 for (i = 0; i < 5; i++)
540 flg &= (read_reg(cd, i + 1) == expect[i]);
541
542 if (verbose) {
543 printk("%s: Reset (%d) signature = ", cd->name, k);
544 for (i = 0; i < 5; i++)
545 printk("%3x", read_reg(cd, i + 1));
546 if (!flg)
547 printk(" (incorrect)");
548 printk("\n");
549 }
550
551 pi_disconnect(cd->pi);
552 return flg - 1;
553}
554
555static int pcd_drive_reset(struct cdrom_device_info *cdi)
556{
557 return pcd_reset(cdi->handle);
558}
559
560static int pcd_ready_wait(struct pcd_unit *cd, int tmo)
561{
562 char tr_cmd[12] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
563 int k, p;
564
565 k = 0;
566 while (k < tmo) {
567 cd->last_sense = 0;
568 pcd_atapi(cd, tr_cmd, 0, NULL, DBMSG("test unit ready"));
569 p = cd->last_sense;
570 if (!p)
571 return 0;
572 if (!(((p & 0xffff) == 0x0402) || ((p & 0xff) == 6)))
573 return p;
574 k++;
575 pcd_sleep(HZ);
576 }
577 return 0x000020; /* timeout */
578}
579
580static int pcd_drive_status(struct cdrom_device_info *cdi, int slot_nr)
581{
582 char rc_cmd[12] = { 0x25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
583 struct pcd_unit *cd = cdi->handle;
584
585 if (pcd_ready_wait(cd, PCD_READY_TMO))
586 return CDS_DRIVE_NOT_READY;
587 if (pcd_atapi(cd, rc_cmd, 8, pcd_scratch, DBMSG("check media")))
588 return CDS_NO_DISC;
589 return CDS_DISC_OK;
590}
591
592static int pcd_identify(struct pcd_unit *cd, char *id)
593{
594 int k, s;
595 char id_cmd[12] = { 0x12, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
596
597 pcd_bufblk = -1;
598
599 s = pcd_atapi(cd, id_cmd, 36, pcd_buffer, "identify");
600
601 if (s)
602 return -1;
603 if ((pcd_buffer[0] & 0x1f) != 5) {
604 if (verbose)
605 printk("%s: %s is not a CD-ROM\n",
606 cd->name, cd->drive ? "Slave" : "Master");
607 return -1;
608 }
609 memcpy(id, pcd_buffer + 16, 16);
610 id[16] = 0;
611 k = 16;
612 while ((k >= 0) && (id[k] <= 0x20)) {
613 id[k] = 0;
614 k--;
615 }
616
617 printk("%s: %s: %s\n", cd->name, cd->drive ? "Slave" : "Master", id);
618
619 return 0;
620}
621
622/*
623 * returns 0, with id set if drive is detected
624 * -1, if drive detection failed
625 */
626static int pcd_probe(struct pcd_unit *cd, int ms, char *id)
627{
628 if (ms == -1) {
629 for (cd->drive = 0; cd->drive <= 1; cd->drive++)
630 if (!pcd_reset(cd) && !pcd_identify(cd, id))
631 return 0;
632 } else {
633 cd->drive = ms;
634 if (!pcd_reset(cd) && !pcd_identify(cd, id))
635 return 0;
636 }
637 return -1;
638}
639
640static void pcd_probe_capabilities(void)
641{
642 int unit, r;
643 char buffer[32];
644 char cmd[12] = { 0x5a, 1 << 3, 0x2a, 0, 0, 0, 0, 18, 0, 0, 0, 0 };
645 struct pcd_unit *cd;
646
647 for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
648 if (!cd->present)
649 continue;
650 r = pcd_atapi(cd, cmd, 18, buffer, "mode sense capabilities");
651 if (r)
652 continue;
653 /* we should now have the cap page */
654 if ((buffer[11] & 1) == 0)
655 cd->info.mask |= CDC_CD_R;
656 if ((buffer[11] & 2) == 0)
657 cd->info.mask |= CDC_CD_RW;
658 if ((buffer[12] & 1) == 0)
659 cd->info.mask |= CDC_PLAY_AUDIO;
660 if ((buffer[14] & 1) == 0)
661 cd->info.mask |= CDC_LOCK;
662 if ((buffer[14] & 8) == 0)
663 cd->info.mask |= CDC_OPEN_TRAY;
664 if ((buffer[14] >> 6) == 0)
665 cd->info.mask |= CDC_CLOSE_TRAY;
666 }
667}
668
669static int pcd_detect(void)
670{
671 char id[18];
672 int k, unit;
673 struct pcd_unit *cd;
674
675 printk("%s: %s version %s, major %d, nice %d\n",
676 name, name, PCD_VERSION, major, nice);
677
678 k = 0;
679 if (pcd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
680 cd = pcd;
681 if (pi_init(cd->pi, 1, -1, -1, -1, -1, -1, pcd_buffer,
682 PI_PCD, verbose, cd->name)) {
683 if (!pcd_probe(cd, -1, id) && cd->disk) {
684 cd->present = 1;
685 k++;
686 } else
687 pi_release(cd->pi);
688 }
689 } else {
690 for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
691 int *conf = *drives[unit];
692 if (!conf[D_PRT])
693 continue;
694 if (!pi_init(cd->pi, 0, conf[D_PRT], conf[D_MOD],
695 conf[D_UNI], conf[D_PRO], conf[D_DLY],
696 pcd_buffer, PI_PCD, verbose, cd->name))
697 continue;
698 if (!pcd_probe(cd, conf[D_SLV], id) && cd->disk) {
699 cd->present = 1;
700 k++;
701 } else
702 pi_release(cd->pi);
703 }
704 }
705 if (k)
706 return 0;
707
708 printk("%s: No CD-ROM drive found\n", name);
709 for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
710 put_disk(cd->disk);
711 return -1;
712}
713
714/* I/O request processing */
715static struct request_queue *pcd_queue;
716
717static void do_pcd_request(request_queue_t * q)
718{
719 if (pcd_busy)
720 return;
721 while (1) {
722 pcd_req = elv_next_request(q);
723 if (!pcd_req)
724 return;
725
726 if (rq_data_dir(pcd_req) == READ) {
727 struct pcd_unit *cd = pcd_req->rq_disk->private_data;
728 if (cd != pcd_current)
729 pcd_bufblk = -1;
730 pcd_current = cd;
731 pcd_sector = pcd_req->sector;
732 pcd_count = pcd_req->current_nr_sectors;
733 pcd_buf = pcd_req->buffer;
734 pcd_busy = 1;
735 ps_set_intr(do_pcd_read, NULL, 0, nice);
736 return;
737 } else
738 end_request(pcd_req, 0);
739 }
740}
741
742static inline void next_request(int success)
743{
744 unsigned long saved_flags;
745
746 spin_lock_irqsave(&pcd_lock, saved_flags);
747 end_request(pcd_req, success);
748 pcd_busy = 0;
749 do_pcd_request(pcd_queue);
750 spin_unlock_irqrestore(&pcd_lock, saved_flags);
751}
752
753static int pcd_ready(void)
754{
755 return (((status_reg(pcd_current) & (IDE_BUSY | IDE_DRQ)) == IDE_DRQ));
756}
757
758static void pcd_transfer(void)
759{
760
761 while (pcd_count && (pcd_sector / 4 == pcd_bufblk)) {
762 int o = (pcd_sector % 4) * 512;
763 memcpy(pcd_buf, pcd_buffer + o, 512);
764 pcd_count--;
765 pcd_buf += 512;
766 pcd_sector++;
767 }
768}
769
770static void pcd_start(void)
771{
772 int b, i;
773 char rd_cmd[12] = { 0xa8, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0 };
774
775 pcd_bufblk = pcd_sector / 4;
776 b = pcd_bufblk;
777 for (i = 0; i < 4; i++) {
778 rd_cmd[5 - i] = b & 0xff;
779 b = b >> 8;
780 }
781
782 if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) {
783 pcd_bufblk = -1;
784 next_request(0);
785 return;
786 }
787
788 mdelay(1);
789
790 ps_set_intr(do_pcd_read_drq, pcd_ready, PCD_TMO, nice);
791}
792
793static void do_pcd_read(void)
794{
795 pcd_busy = 1;
796 pcd_retries = 0;
797 pcd_transfer();
798 if (!pcd_count) {
799 next_request(1);
800 return;
801 }
802
803 pi_do_claimed(pcd_current->pi, pcd_start);
804}
805
806static void do_pcd_read_drq(void)
807{
808 unsigned long saved_flags;
809
810 if (pcd_completion(pcd_current, pcd_buffer, "read block")) {
811 if (pcd_retries < PCD_RETRIES) {
812 mdelay(1);
813 pcd_retries++;
814 pi_do_claimed(pcd_current->pi, pcd_start);
815 return;
816 }
817 pcd_bufblk = -1;
818 next_request(0);
819 return;
820 }
821
822 do_pcd_read();
823 spin_lock_irqsave(&pcd_lock, saved_flags);
824 do_pcd_request(pcd_queue);
825 spin_unlock_irqrestore(&pcd_lock, saved_flags);
826}
827
828/* the audio_ioctl stuff is adapted from sr_ioctl.c */
829
830static int pcd_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void *arg)
831{
832 struct pcd_unit *cd = cdi->handle;
833
834 switch (cmd) {
835
836 case CDROMREADTOCHDR:
837
838 {
839 char cmd[12] =
840 { GPCMD_READ_TOC_PMA_ATIP, 0, 0, 0, 0, 0, 0, 0, 12,
841 0, 0, 0 };
842 struct cdrom_tochdr *tochdr =
843 (struct cdrom_tochdr *) arg;
844 char buffer[32];
845 int r;
846
847 r = pcd_atapi(cd, cmd, 12, buffer, "read toc header");
848
849 tochdr->cdth_trk0 = buffer[2];
850 tochdr->cdth_trk1 = buffer[3];
851
852 return r ? -EIO : 0;
853 }
854
855 case CDROMREADTOCENTRY:
856
857 {
858 char cmd[12] =
859 { GPCMD_READ_TOC_PMA_ATIP, 0, 0, 0, 0, 0, 0, 0, 12,
860 0, 0, 0 };
861
862 struct cdrom_tocentry *tocentry =
863 (struct cdrom_tocentry *) arg;
864 unsigned char buffer[32];
865 int r;
866
867 cmd[1] =
868 (tocentry->cdte_format == CDROM_MSF ? 0x02 : 0);
869 cmd[6] = tocentry->cdte_track;
870
871 r = pcd_atapi(cd, cmd, 12, buffer, "read toc entry");
872
873 tocentry->cdte_ctrl = buffer[5] & 0xf;
874 tocentry->cdte_adr = buffer[5] >> 4;
875 tocentry->cdte_datamode =
876 (tocentry->cdte_ctrl & 0x04) ? 1 : 0;
877 if (tocentry->cdte_format == CDROM_MSF) {
878 tocentry->cdte_addr.msf.minute = buffer[9];
879 tocentry->cdte_addr.msf.second = buffer[10];
880 tocentry->cdte_addr.msf.frame = buffer[11];
881 } else
882 tocentry->cdte_addr.lba =
883 (((((buffer[8] << 8) + buffer[9]) << 8)
884 + buffer[10]) << 8) + buffer[11];
885
886 return r ? -EIO : 0;
887 }
888
889 default:
890
891 return -ENOSYS;
892 }
893}
894
895static int pcd_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
896{
897 char cmd[12] =
898 { GPCMD_READ_SUBCHANNEL, 0, 0x40, 2, 0, 0, 0, 0, 24, 0, 0, 0 };
899 char buffer[32];
900
901 if (pcd_atapi(cdi->handle, cmd, 24, buffer, "get mcn"))
902 return -EIO;
903
904 memcpy(mcn->medium_catalog_number, buffer + 9, 13);
905 mcn->medium_catalog_number[13] = 0;
906
907 return 0;
908}
909
910static int __init pcd_init(void)
911{
912 struct pcd_unit *cd;
913 int unit;
914
915 if (disable)
916 return -1;
917
918 pcd_init_units();
919
920 if (pcd_detect())
921 return -1;
922
923 /* get the atapi capabilities page */
924 pcd_probe_capabilities();
925
926 if (register_blkdev(major, name)) {
927 for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
928 put_disk(cd->disk);
929 return -1;
930 }
931
932 pcd_queue = blk_init_queue(do_pcd_request, &pcd_lock);
933 if (!pcd_queue) {
934 unregister_blkdev(major, name);
935 for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
936 put_disk(cd->disk);
937 return -1;
938 }
939
940 for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
941 if (cd->present) {
942 register_cdrom(&cd->info);
943 cd->disk->private_data = cd;
944 cd->disk->queue = pcd_queue;
945 add_disk(cd->disk);
946 }
947 }
948
949 return 0;
950}
951
952static void __exit pcd_exit(void)
953{
954 struct pcd_unit *cd;
955 int unit;
956
957 for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
958 if (cd->present) {
959 del_gendisk(cd->disk);
960 pi_release(cd->pi);
961 unregister_cdrom(&cd->info);
962 }
963 put_disk(cd->disk);
964 }
965 blk_cleanup_queue(pcd_queue);
966 unregister_blkdev(major, name);
967}
968
969MODULE_LICENSE("GPL");
970module_init(pcd_init)
971module_exit(pcd_exit)
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
new file mode 100644
index 000000000000..202a5a74ad37
--- /dev/null
+++ b/drivers/block/paride/pd.c
@@ -0,0 +1,950 @@
1/*
2 pd.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
3 Under the terms of the GNU General Public License.
4
5 This is the high-level driver for parallel port IDE hard
6 drives based on chips supported by the paride module.
7
8 By default, the driver will autoprobe for a single parallel
9 port IDE drive, but if their individual parameters are
10 specified, the driver can handle up to 4 drives.
11
12 The behaviour of the pd driver can be altered by setting
13 some parameters from the insmod command line. The following
14 parameters are adjustable:
15
16 drive0 These four arguments can be arrays of
17 drive1 1-8 integers as follows:
18 drive2
19 drive3 <prt>,<pro>,<uni>,<mod>,<geo>,<sby>,<dly>,<slv>
20
21 Where,
22
23 <prt> is the base of the parallel port address for
24 the corresponding drive. (required)
25
26 <pro> is the protocol number for the adapter that
27 supports this drive. These numbers are
28 logged by 'paride' when the protocol modules
29 are initialised. (0 if not given)
30
31 <uni> for those adapters that support chained
32 devices, this is the unit selector for the
33 chain of devices on the given port. It should
34 be zero for devices that don't support chaining.
35 (0 if not given)
36
37 <mod> this can be -1 to choose the best mode, or one
38 of the mode numbers supported by the adapter.
39 (-1 if not given)
40
41 <geo> this defaults to 0 to indicate that the driver
42 should use the CHS geometry provided by the drive
43 itself. If set to 1, the driver will provide
44 a logical geometry with 64 heads and 32 sectors
45 per track, to be consistent with most SCSI
46 drivers. (0 if not given)
47
48 <sby> set this to zero to disable the power saving
49 standby mode, if needed. (1 if not given)
50
51 <dly> some parallel ports require the driver to
52 go more slowly. -1 sets a default value that
53 should work with the chosen protocol. Otherwise,
54 set this to a small integer, the larger it is
55 the slower the port i/o. In some cases, setting
56 this to zero will speed up the device. (default -1)
57
58 <slv> IDE disks can be jumpered to master or slave.
59 Set this to 0 to choose the master drive, 1 to
60 choose the slave, -1 (the default) to choose the
61 first drive found.
62
63
64 major You may use this parameter to overide the
65 default major number (45) that this driver
66 will use. Be sure to change the device
67 name as well.
68
69 name This parameter is a character string that
70 contains the name the kernel will use for this
71 device (in /proc output, for instance).
72 (default "pd")
73
74 cluster The driver will attempt to aggregate requests
75 for adjacent blocks into larger multi-block
76 clusters. The maximum cluster size (in 512
77 byte sectors) is set with this parameter.
78 (default 64)
79
80 verbose This parameter controls the amount of logging
81 that the driver will do. Set it to 0 for
82 normal operation, 1 to see autoprobe progress
83 messages, or 2 to see additional debugging
84 output. (default 0)
85
86 nice This parameter controls the driver's use of
87 idle CPU time, at the expense of some speed.
88
89 If this driver is built into the kernel, you can use kernel
90 the following command line parameters, with the same values
91 as the corresponding module parameters listed above:
92
93 pd.drive0
94 pd.drive1
95 pd.drive2
96 pd.drive3
97 pd.cluster
98 pd.nice
99
100 In addition, you can use the parameter pd.disable to disable
101 the driver entirely.
102
103*/
104
105/* Changes:
106
107 1.01 GRG 1997.01.24 Restored pd_reset()
108 Added eject ioctl
109 1.02 GRG 1998.05.06 SMP spinlock changes,
110 Added slave support
111 1.03 GRG 1998.06.16 Eliminate an Ugh.
112 1.04 GRG 1998.08.15 Extra debugging, use HZ in loop timing
113 1.05 GRG 1998.09.24 Added jumbo support
114
115*/
116
117#define PD_VERSION "1.05"
118#define PD_MAJOR 45
119#define PD_NAME "pd"
120#define PD_UNITS 4
121
122/* Here are things one can override from the insmod command.
123 Most are autoprobed by paride unless set here. Verbose is off
124 by default.
125
126*/
127
128static int verbose = 0;
129static int major = PD_MAJOR;
130static char *name = PD_NAME;
131static int cluster = 64;
132static int nice = 0;
133static int disable = 0;
134
135static int drive0[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
136static int drive1[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
137static int drive2[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
138static int drive3[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
139
140static int (*drives[4])[8] = {&drive0, &drive1, &drive2, &drive3};
141
142enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
143
144/* end of parameters */
145
146#include <linux/init.h>
147#include <linux/module.h>
148#include <linux/fs.h>
149#include <linux/delay.h>
150#include <linux/hdreg.h>
151#include <linux/cdrom.h> /* for the eject ioctl */
152#include <linux/blkdev.h>
153#include <linux/blkpg.h>
154#include <asm/uaccess.h>
155#include <linux/sched.h>
156#include <linux/workqueue.h>
157
158static DEFINE_SPINLOCK(pd_lock);
159
160module_param(verbose, bool, 0);
161module_param(major, int, 0);
162module_param(name, charp, 0);
163module_param(cluster, int, 0);
164module_param(nice, int, 0);
165module_param_array(drive0, int, NULL, 0);
166module_param_array(drive1, int, NULL, 0);
167module_param_array(drive2, int, NULL, 0);
168module_param_array(drive3, int, NULL, 0);
169
170#include "paride.h"
171
172#define PD_BITS 4
173
174/* numbers for "SCSI" geometry */
175
176#define PD_LOG_HEADS 64
177#define PD_LOG_SECTS 32
178
179#define PD_ID_OFF 54
180#define PD_ID_LEN 14
181
182#define PD_MAX_RETRIES 5
183#define PD_TMO 800 /* interrupt timeout in jiffies */
184#define PD_SPIN_DEL 50 /* spin delay in micro-seconds */
185
186#define PD_SPIN (1000000*PD_TMO)/(HZ*PD_SPIN_DEL)
187
188#define STAT_ERR 0x00001
189#define STAT_INDEX 0x00002
190#define STAT_ECC 0x00004
191#define STAT_DRQ 0x00008
192#define STAT_SEEK 0x00010
193#define STAT_WRERR 0x00020
194#define STAT_READY 0x00040
195#define STAT_BUSY 0x00080
196
197#define ERR_AMNF 0x00100
198#define ERR_TK0NF 0x00200
199#define ERR_ABRT 0x00400
200#define ERR_MCR 0x00800
201#define ERR_IDNF 0x01000
202#define ERR_MC 0x02000
203#define ERR_UNC 0x04000
204#define ERR_TMO 0x10000
205
206#define IDE_READ 0x20
207#define IDE_WRITE 0x30
208#define IDE_READ_VRFY 0x40
209#define IDE_INIT_DEV_PARMS 0x91
210#define IDE_STANDBY 0x96
211#define IDE_ACKCHANGE 0xdb
212#define IDE_DOORLOCK 0xde
213#define IDE_DOORUNLOCK 0xdf
214#define IDE_IDENTIFY 0xec
215#define IDE_EJECT 0xed
216
217#define PD_NAMELEN 8
218
219struct pd_unit {
220 struct pi_adapter pia; /* interface to paride layer */
221 struct pi_adapter *pi;
222 int access; /* count of active opens ... */
223 int capacity; /* Size of this volume in sectors */
224 int heads; /* physical geometry */
225 int sectors;
226 int cylinders;
227 int can_lba;
228 int drive; /* master=0 slave=1 */
229 int changed; /* Have we seen a disk change ? */
230 int removable; /* removable media device ? */
231 int standby;
232 int alt_geom;
233 char name[PD_NAMELEN]; /* pda, pdb, etc ... */
234 struct gendisk *gd;
235};
236
237static struct pd_unit pd[PD_UNITS];
238
239static char pd_scratch[512]; /* scratch block buffer */
240
241static char *pd_errs[17] = { "ERR", "INDEX", "ECC", "DRQ", "SEEK", "WRERR",
242 "READY", "BUSY", "AMNF", "TK0NF", "ABRT", "MCR",
243 "IDNF", "MC", "UNC", "???", "TMO"
244};
245
246static inline int status_reg(struct pd_unit *disk)
247{
248 return pi_read_regr(disk->pi, 1, 6);
249}
250
251static inline int read_reg(struct pd_unit *disk, int reg)
252{
253 return pi_read_regr(disk->pi, 0, reg);
254}
255
256static inline void write_status(struct pd_unit *disk, int val)
257{
258 pi_write_regr(disk->pi, 1, 6, val);
259}
260
261static inline void write_reg(struct pd_unit *disk, int reg, int val)
262{
263 pi_write_regr(disk->pi, 0, reg, val);
264}
265
266static inline u8 DRIVE(struct pd_unit *disk)
267{
268 return 0xa0+0x10*disk->drive;
269}
270
271/* ide command interface */
272
273static void pd_print_error(struct pd_unit *disk, char *msg, int status)
274{
275 int i;
276
277 printk("%s: %s: status = 0x%x =", disk->name, msg, status);
278 for (i = 0; i < 18; i++)
279 if (status & (1 << i))
280 printk(" %s", pd_errs[i]);
281 printk("\n");
282}
283
284static void pd_reset(struct pd_unit *disk)
285{ /* called only for MASTER drive */
286 write_status(disk, 4);
287 udelay(50);
288 write_status(disk, 0);
289 udelay(250);
290}
291
292#define DBMSG(msg) ((verbose>1)?(msg):NULL)
293
294static int pd_wait_for(struct pd_unit *disk, int w, char *msg)
295{ /* polled wait */
296 int k, r, e;
297
298 k = 0;
299 while (k < PD_SPIN) {
300 r = status_reg(disk);
301 k++;
302 if (((r & w) == w) && !(r & STAT_BUSY))
303 break;
304 udelay(PD_SPIN_DEL);
305 }
306 e = (read_reg(disk, 1) << 8) + read_reg(disk, 7);
307 if (k >= PD_SPIN)
308 e |= ERR_TMO;
309 if ((e & (STAT_ERR | ERR_TMO)) && (msg != NULL))
310 pd_print_error(disk, msg, e);
311 return e;
312}
313
314static void pd_send_command(struct pd_unit *disk, int n, int s, int h, int c0, int c1, int func)
315{
316 write_reg(disk, 6, DRIVE(disk) + h);
317 write_reg(disk, 1, 0); /* the IDE task file */
318 write_reg(disk, 2, n);
319 write_reg(disk, 3, s);
320 write_reg(disk, 4, c0);
321 write_reg(disk, 5, c1);
322 write_reg(disk, 7, func);
323
324 udelay(1);
325}
326
327static void pd_ide_command(struct pd_unit *disk, int func, int block, int count)
328{
329 int c1, c0, h, s;
330
331 if (disk->can_lba) {
332 s = block & 255;
333 c0 = (block >>= 8) & 255;
334 c1 = (block >>= 8) & 255;
335 h = ((block >>= 8) & 15) + 0x40;
336 } else {
337 s = (block % disk->sectors) + 1;
338 h = (block /= disk->sectors) % disk->heads;
339 c0 = (block /= disk->heads) % 256;
340 c1 = (block >>= 8);
341 }
342 pd_send_command(disk, count, s, h, c0, c1, func);
343}
344
345/* The i/o request engine */
346
347enum action {Fail = 0, Ok = 1, Hold, Wait};
348
349static struct request *pd_req; /* current request */
350static enum action (*phase)(void);
351
352static void run_fsm(void);
353
354static void ps_tq_int( void *data);
355
356static DECLARE_WORK(fsm_tq, ps_tq_int, NULL);
357
358static void schedule_fsm(void)
359{
360 if (!nice)
361 schedule_work(&fsm_tq);
362 else
363 schedule_delayed_work(&fsm_tq, nice-1);
364}
365
366static void ps_tq_int(void *data)
367{
368 run_fsm();
369}
370
371static enum action do_pd_io_start(void);
372static enum action pd_special(void);
373static enum action do_pd_read_start(void);
374static enum action do_pd_write_start(void);
375static enum action do_pd_read_drq(void);
376static enum action do_pd_write_done(void);
377
378static struct request_queue *pd_queue;
379static int pd_claimed;
380
381static struct pd_unit *pd_current; /* current request's drive */
382static PIA *pi_current; /* current request's PIA */
383
384static void run_fsm(void)
385{
386 while (1) {
387 enum action res;
388 unsigned long saved_flags;
389 int stop = 0;
390
391 if (!phase) {
392 pd_current = pd_req->rq_disk->private_data;
393 pi_current = pd_current->pi;
394 phase = do_pd_io_start;
395 }
396
397 switch (pd_claimed) {
398 case 0:
399 pd_claimed = 1;
400 if (!pi_schedule_claimed(pi_current, run_fsm))
401 return;
402 case 1:
403 pd_claimed = 2;
404 pi_current->proto->connect(pi_current);
405 }
406
407 switch(res = phase()) {
408 case Ok: case Fail:
409 pi_disconnect(pi_current);
410 pd_claimed = 0;
411 phase = NULL;
412 spin_lock_irqsave(&pd_lock, saved_flags);
413 end_request(pd_req, res);
414 pd_req = elv_next_request(pd_queue);
415 if (!pd_req)
416 stop = 1;
417 spin_unlock_irqrestore(&pd_lock, saved_flags);
418 if (stop)
419 return;
420 case Hold:
421 schedule_fsm();
422 return;
423 case Wait:
424 pi_disconnect(pi_current);
425 pd_claimed = 0;
426 }
427 }
428}
429
430static int pd_retries = 0; /* i/o error retry count */
431static int pd_block; /* address of next requested block */
432static int pd_count; /* number of blocks still to do */
433static int pd_run; /* sectors in current cluster */
434static int pd_cmd; /* current command READ/WRITE */
435static char *pd_buf; /* buffer for request in progress */
436
437static enum action do_pd_io_start(void)
438{
439 if (pd_req->flags & REQ_SPECIAL) {
440 phase = pd_special;
441 return pd_special();
442 }
443
444 pd_cmd = rq_data_dir(pd_req);
445 if (pd_cmd == READ || pd_cmd == WRITE) {
446 pd_block = pd_req->sector;
447 pd_count = pd_req->current_nr_sectors;
448 if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
449 return Fail;
450 pd_run = pd_req->nr_sectors;
451 pd_buf = pd_req->buffer;
452 pd_retries = 0;
453 if (pd_cmd == READ)
454 return do_pd_read_start();
455 else
456 return do_pd_write_start();
457 }
458 return Fail;
459}
460
461static enum action pd_special(void)
462{
463 enum action (*func)(struct pd_unit *) = pd_req->special;
464 return func(pd_current);
465}
466
467static int pd_next_buf(void)
468{
469 unsigned long saved_flags;
470
471 pd_count--;
472 pd_run--;
473 pd_buf += 512;
474 pd_block++;
475 if (!pd_run)
476 return 1;
477 if (pd_count)
478 return 0;
479 spin_lock_irqsave(&pd_lock, saved_flags);
480 end_request(pd_req, 1);
481 pd_count = pd_req->current_nr_sectors;
482 pd_buf = pd_req->buffer;
483 spin_unlock_irqrestore(&pd_lock, saved_flags);
484 return 0;
485}
486
487static unsigned long pd_timeout;
488
489static enum action do_pd_read_start(void)
490{
491 if (pd_wait_for(pd_current, STAT_READY, "do_pd_read") & STAT_ERR) {
492 if (pd_retries < PD_MAX_RETRIES) {
493 pd_retries++;
494 return Wait;
495 }
496 return Fail;
497 }
498 pd_ide_command(pd_current, IDE_READ, pd_block, pd_run);
499 phase = do_pd_read_drq;
500 pd_timeout = jiffies + PD_TMO;
501 return Hold;
502}
503
504static enum action do_pd_write_start(void)
505{
506 if (pd_wait_for(pd_current, STAT_READY, "do_pd_write") & STAT_ERR) {
507 if (pd_retries < PD_MAX_RETRIES) {
508 pd_retries++;
509 return Wait;
510 }
511 return Fail;
512 }
513 pd_ide_command(pd_current, IDE_WRITE, pd_block, pd_run);
514 while (1) {
515 if (pd_wait_for(pd_current, STAT_DRQ, "do_pd_write_drq") & STAT_ERR) {
516 if (pd_retries < PD_MAX_RETRIES) {
517 pd_retries++;
518 return Wait;
519 }
520 return Fail;
521 }
522 pi_write_block(pd_current->pi, pd_buf, 512);
523 if (pd_next_buf())
524 break;
525 }
526 phase = do_pd_write_done;
527 pd_timeout = jiffies + PD_TMO;
528 return Hold;
529}
530
531static inline int pd_ready(void)
532{
533 return !(status_reg(pd_current) & STAT_BUSY);
534}
535
536static enum action do_pd_read_drq(void)
537{
538 if (!pd_ready() && !time_after_eq(jiffies, pd_timeout))
539 return Hold;
540
541 while (1) {
542 if (pd_wait_for(pd_current, STAT_DRQ, "do_pd_read_drq") & STAT_ERR) {
543 if (pd_retries < PD_MAX_RETRIES) {
544 pd_retries++;
545 phase = do_pd_read_start;
546 return Wait;
547 }
548 return Fail;
549 }
550 pi_read_block(pd_current->pi, pd_buf, 512);
551 if (pd_next_buf())
552 break;
553 }
554 return Ok;
555}
556
557static enum action do_pd_write_done(void)
558{
559 if (!pd_ready() && !time_after_eq(jiffies, pd_timeout))
560 return Hold;
561
562 if (pd_wait_for(pd_current, STAT_READY, "do_pd_write_done") & STAT_ERR) {
563 if (pd_retries < PD_MAX_RETRIES) {
564 pd_retries++;
565 phase = do_pd_write_start;
566 return Wait;
567 }
568 return Fail;
569 }
570 return Ok;
571}
572
573/* special io requests */
574
575/* According to the ATA standard, the default CHS geometry should be
576 available following a reset. Some Western Digital drives come up
577 in a mode where only LBA addresses are accepted until the device
578 parameters are initialised.
579*/
580
581static void pd_init_dev_parms(struct pd_unit *disk)
582{
583 pd_wait_for(disk, 0, DBMSG("before init_dev_parms"));
584 pd_send_command(disk, disk->sectors, 0, disk->heads - 1, 0, 0,
585 IDE_INIT_DEV_PARMS);
586 udelay(300);
587 pd_wait_for(disk, 0, "Initialise device parameters");
588}
589
590static enum action pd_door_lock(struct pd_unit *disk)
591{
592 if (!(pd_wait_for(disk, STAT_READY, "Lock") & STAT_ERR)) {
593 pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORLOCK);
594 pd_wait_for(disk, STAT_READY, "Lock done");
595 }
596 return Ok;
597}
598
599static enum action pd_door_unlock(struct pd_unit *disk)
600{
601 if (!(pd_wait_for(disk, STAT_READY, "Lock") & STAT_ERR)) {
602 pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORUNLOCK);
603 pd_wait_for(disk, STAT_READY, "Lock done");
604 }
605 return Ok;
606}
607
608static enum action pd_eject(struct pd_unit *disk)
609{
610 pd_wait_for(disk, 0, DBMSG("before unlock on eject"));
611 pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORUNLOCK);
612 pd_wait_for(disk, 0, DBMSG("after unlock on eject"));
613 pd_wait_for(disk, 0, DBMSG("before eject"));
614 pd_send_command(disk, 0, 0, 0, 0, 0, IDE_EJECT);
615 pd_wait_for(disk, 0, DBMSG("after eject"));
616 return Ok;
617}
618
619static enum action pd_media_check(struct pd_unit *disk)
620{
621 int r = pd_wait_for(disk, STAT_READY, DBMSG("before media_check"));
622 if (!(r & STAT_ERR)) {
623 pd_send_command(disk, 1, 1, 0, 0, 0, IDE_READ_VRFY);
624 r = pd_wait_for(disk, STAT_READY, DBMSG("RDY after READ_VRFY"));
625 } else
626 disk->changed = 1; /* say changed if other error */
627 if (r & ERR_MC) {
628 disk->changed = 1;
629 pd_send_command(disk, 1, 0, 0, 0, 0, IDE_ACKCHANGE);
630 pd_wait_for(disk, STAT_READY, DBMSG("RDY after ACKCHANGE"));
631 pd_send_command(disk, 1, 1, 0, 0, 0, IDE_READ_VRFY);
632 r = pd_wait_for(disk, STAT_READY, DBMSG("RDY after VRFY"));
633 }
634 return Ok;
635}
636
637static void pd_standby_off(struct pd_unit *disk)
638{
639 pd_wait_for(disk, 0, DBMSG("before STANDBY"));
640 pd_send_command(disk, 0, 0, 0, 0, 0, IDE_STANDBY);
641 pd_wait_for(disk, 0, DBMSG("after STANDBY"));
642}
643
644static enum action pd_identify(struct pd_unit *disk)
645{
646 int j;
647 char id[PD_ID_LEN + 1];
648
649/* WARNING: here there may be dragons. reset() applies to both drives,
650 but we call it only on probing the MASTER. This should allow most
651 common configurations to work, but be warned that a reset can clear
652 settings on the SLAVE drive.
653*/
654
655 if (disk->drive == 0)
656 pd_reset(disk);
657
658 write_reg(disk, 6, DRIVE(disk));
659 pd_wait_for(disk, 0, DBMSG("before IDENT"));
660 pd_send_command(disk, 1, 0, 0, 0, 0, IDE_IDENTIFY);
661
662 if (pd_wait_for(disk, STAT_DRQ, DBMSG("IDENT DRQ")) & STAT_ERR)
663 return Fail;
664 pi_read_block(disk->pi, pd_scratch, 512);
665 disk->can_lba = pd_scratch[99] & 2;
666 disk->sectors = le16_to_cpu(*(u16 *) (pd_scratch + 12));
667 disk->heads = le16_to_cpu(*(u16 *) (pd_scratch + 6));
668 disk->cylinders = le16_to_cpu(*(u16 *) (pd_scratch + 2));
669 if (disk->can_lba)
670 disk->capacity = le32_to_cpu(*(u32 *) (pd_scratch + 120));
671 else
672 disk->capacity = disk->sectors * disk->heads * disk->cylinders;
673
674 for (j = 0; j < PD_ID_LEN; j++)
675 id[j ^ 1] = pd_scratch[j + PD_ID_OFF];
676 j = PD_ID_LEN - 1;
677 while ((j >= 0) && (id[j] <= 0x20))
678 j--;
679 j++;
680 id[j] = 0;
681
682 disk->removable = pd_scratch[0] & 0x80;
683
684 printk("%s: %s, %s, %d blocks [%dM], (%d/%d/%d), %s media\n",
685 disk->name, id,
686 disk->drive ? "slave" : "master",
687 disk->capacity, disk->capacity / 2048,
688 disk->cylinders, disk->heads, disk->sectors,
689 disk->removable ? "removable" : "fixed");
690
691 if (disk->capacity)
692 pd_init_dev_parms(disk);
693 if (!disk->standby)
694 pd_standby_off(disk);
695
696 return Ok;
697}
698
699/* end of io request engine */
700
701static void do_pd_request(request_queue_t * q)
702{
703 if (pd_req)
704 return;
705 pd_req = elv_next_request(q);
706 if (!pd_req)
707 return;
708
709 schedule_fsm();
710}
711
712static int pd_special_command(struct pd_unit *disk,
713 enum action (*func)(struct pd_unit *disk))
714{
715 DECLARE_COMPLETION(wait);
716 struct request rq;
717 int err = 0;
718
719 memset(&rq, 0, sizeof(rq));
720 rq.errors = 0;
721 rq.rq_status = RQ_ACTIVE;
722 rq.rq_disk = disk->gd;
723 rq.ref_count = 1;
724 rq.waiting = &wait;
725 rq.end_io = blk_end_sync_rq;
726 blk_insert_request(disk->gd->queue, &rq, 0, func, 0);
727 wait_for_completion(&wait);
728 rq.waiting = NULL;
729 if (rq.errors)
730 err = -EIO;
731 blk_put_request(&rq);
732 return err;
733}
734
735/* kernel glue structures */
736
737static int pd_open(struct inode *inode, struct file *file)
738{
739 struct pd_unit *disk = inode->i_bdev->bd_disk->private_data;
740
741 disk->access++;
742
743 if (disk->removable) {
744 pd_special_command(disk, pd_media_check);
745 pd_special_command(disk, pd_door_lock);
746 }
747 return 0;
748}
749
750static int pd_ioctl(struct inode *inode, struct file *file,
751 unsigned int cmd, unsigned long arg)
752{
753 struct pd_unit *disk = inode->i_bdev->bd_disk->private_data;
754 struct hd_geometry __user *geo = (struct hd_geometry __user *) arg;
755 struct hd_geometry g;
756
757 switch (cmd) {
758 case CDROMEJECT:
759 if (disk->access == 1)
760 pd_special_command(disk, pd_eject);
761 return 0;
762 case HDIO_GETGEO:
763 if (disk->alt_geom) {
764 g.heads = PD_LOG_HEADS;
765 g.sectors = PD_LOG_SECTS;
766 g.cylinders = disk->capacity / (g.heads * g.sectors);
767 } else {
768 g.heads = disk->heads;
769 g.sectors = disk->sectors;
770 g.cylinders = disk->cylinders;
771 }
772 g.start = get_start_sect(inode->i_bdev);
773 if (copy_to_user(geo, &g, sizeof(struct hd_geometry)))
774 return -EFAULT;
775 return 0;
776 default:
777 return -EINVAL;
778 }
779}
780
781static int pd_release(struct inode *inode, struct file *file)
782{
783 struct pd_unit *disk = inode->i_bdev->bd_disk->private_data;
784
785 if (!--disk->access && disk->removable)
786 pd_special_command(disk, pd_door_unlock);
787
788 return 0;
789}
790
791static int pd_check_media(struct gendisk *p)
792{
793 struct pd_unit *disk = p->private_data;
794 int r;
795 if (!disk->removable)
796 return 0;
797 pd_special_command(disk, pd_media_check);
798 r = disk->changed;
799 disk->changed = 0;
800 return r;
801}
802
803static int pd_revalidate(struct gendisk *p)
804{
805 struct pd_unit *disk = p->private_data;
806 if (pd_special_command(disk, pd_identify) == 0)
807 set_capacity(p, disk->capacity);
808 else
809 set_capacity(p, 0);
810 return 0;
811}
812
813static struct block_device_operations pd_fops = {
814 .owner = THIS_MODULE,
815 .open = pd_open,
816 .release = pd_release,
817 .ioctl = pd_ioctl,
818 .media_changed = pd_check_media,
819 .revalidate_disk= pd_revalidate
820};
821
822/* probing */
823
824static void pd_probe_drive(struct pd_unit *disk)
825{
826 struct gendisk *p = alloc_disk(1 << PD_BITS);
827 if (!p)
828 return;
829 strcpy(p->disk_name, disk->name);
830 p->fops = &pd_fops;
831 p->major = major;
832 p->first_minor = (disk - pd) << PD_BITS;
833 disk->gd = p;
834 p->private_data = disk;
835 p->queue = pd_queue;
836
837 if (disk->drive == -1) {
838 for (disk->drive = 0; disk->drive <= 1; disk->drive++)
839 if (pd_special_command(disk, pd_identify) == 0)
840 return;
841 } else if (pd_special_command(disk, pd_identify) == 0)
842 return;
843 disk->gd = NULL;
844 put_disk(p);
845}
846
847static int pd_detect(void)
848{
849 int found = 0, unit, pd_drive_count = 0;
850 struct pd_unit *disk;
851
852 for (unit = 0; unit < PD_UNITS; unit++) {
853 int *parm = *drives[unit];
854 struct pd_unit *disk = pd + unit;
855 disk->pi = &disk->pia;
856 disk->access = 0;
857 disk->changed = 1;
858 disk->capacity = 0;
859 disk->drive = parm[D_SLV];
860 snprintf(disk->name, PD_NAMELEN, "%s%c", name, 'a'+unit);
861 disk->alt_geom = parm[D_GEO];
862 disk->standby = parm[D_SBY];
863 if (parm[D_PRT])
864 pd_drive_count++;
865 }
866
867 if (pd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
868 disk = pd;
869 if (pi_init(disk->pi, 1, -1, -1, -1, -1, -1, pd_scratch,
870 PI_PD, verbose, disk->name)) {
871 pd_probe_drive(disk);
872 if (!disk->gd)
873 pi_release(disk->pi);
874 }
875
876 } else {
877 for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
878 int *parm = *drives[unit];
879 if (!parm[D_PRT])
880 continue;
881 if (pi_init(disk->pi, 0, parm[D_PRT], parm[D_MOD],
882 parm[D_UNI], parm[D_PRO], parm[D_DLY],
883 pd_scratch, PI_PD, verbose, disk->name)) {
884 pd_probe_drive(disk);
885 if (!disk->gd)
886 pi_release(disk->pi);
887 }
888 }
889 }
890 for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
891 if (disk->gd) {
892 set_capacity(disk->gd, disk->capacity);
893 add_disk(disk->gd);
894 found = 1;
895 }
896 }
897 if (!found)
898 printk("%s: no valid drive found\n", name);
899 return found;
900}
901
902static int __init pd_init(void)
903{
904 if (disable)
905 goto out1;
906
907 pd_queue = blk_init_queue(do_pd_request, &pd_lock);
908 if (!pd_queue)
909 goto out1;
910
911 blk_queue_max_sectors(pd_queue, cluster);
912
913 if (register_blkdev(major, name))
914 goto out2;
915
916 printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
917 name, name, PD_VERSION, major, cluster, nice);
918 if (!pd_detect())
919 goto out3;
920
921 return 0;
922
923out3:
924 unregister_blkdev(major, name);
925out2:
926 blk_cleanup_queue(pd_queue);
927out1:
928 return -ENODEV;
929}
930
931static void __exit pd_exit(void)
932{
933 struct pd_unit *disk;
934 int unit;
935 unregister_blkdev(major, name);
936 for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
937 struct gendisk *p = disk->gd;
938 if (p) {
939 disk->gd = NULL;
940 del_gendisk(p);
941 put_disk(p);
942 pi_release(disk->pi);
943 }
944 }
945 blk_cleanup_queue(pd_queue);
946}
947
948MODULE_LICENSE("GPL");
949module_init(pd_init)
950module_exit(pd_exit)
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
new file mode 100644
index 000000000000..060b1f2a91dd
--- /dev/null
+++ b/drivers/block/paride/pf.c
@@ -0,0 +1,982 @@
1/*
2 pf.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
3 Under the terms of the GNU General Public License.
4
5 This is the high-level driver for parallel port ATAPI disk
6 drives based on chips supported by the paride module.
7
8 By default, the driver will autoprobe for a single parallel
9 port ATAPI disk drive, but if their individual parameters are
10 specified, the driver can handle up to 4 drives.
11
12 The behaviour of the pf driver can be altered by setting
13 some parameters from the insmod command line. The following
14 parameters are adjustable:
15
16 drive0 These four arguments can be arrays of
17 drive1 1-7 integers as follows:
18 drive2
19 drive3 <prt>,<pro>,<uni>,<mod>,<slv>,<lun>,<dly>
20
21 Where,
22
23 <prt> is the base of the parallel port address for
24 the corresponding drive. (required)
25
26 <pro> is the protocol number for the adapter that
27 supports this drive. These numbers are
28 logged by 'paride' when the protocol modules
29 are initialised. (0 if not given)
30
31 <uni> for those adapters that support chained
32 devices, this is the unit selector for the
33 chain of devices on the given port. It should
34 be zero for devices that don't support chaining.
35 (0 if not given)
36
37 <mod> this can be -1 to choose the best mode, or one
38 of the mode numbers supported by the adapter.
39 (-1 if not given)
40
41 <slv> ATAPI CDroms can be jumpered to master or slave.
42 Set this to 0 to choose the master drive, 1 to
43 choose the slave, -1 (the default) to choose the
44 first drive found.
45
46 <lun> Some ATAPI devices support multiple LUNs.
47 One example is the ATAPI PD/CD drive from
48 Matshita/Panasonic. This device has a
49 CD drive on LUN 0 and a PD drive on LUN 1.
50 By default, the driver will search for the
51 first LUN with a supported device. Set
52 this parameter to force it to use a specific
53 LUN. (default -1)
54
55 <dly> some parallel ports require the driver to
56 go more slowly. -1 sets a default value that
57 should work with the chosen protocol. Otherwise,
58 set this to a small integer, the larger it is
59 the slower the port i/o. In some cases, setting
60 this to zero will speed up the device. (default -1)
61
62 major You may use this parameter to overide the
63 default major number (47) that this driver
64 will use. Be sure to change the device
65 name as well.
66
67 name This parameter is a character string that
68 contains the name the kernel will use for this
69 device (in /proc output, for instance).
70 (default "pf").
71
72 cluster The driver will attempt to aggregate requests
73 for adjacent blocks into larger multi-block
74 clusters. The maximum cluster size (in 512
75 byte sectors) is set with this parameter.
76 (default 64)
77
78 verbose This parameter controls the amount of logging
79 that the driver will do. Set it to 0 for
80 normal operation, 1 to see autoprobe progress
81 messages, or 2 to see additional debugging
82 output. (default 0)
83
84 nice This parameter controls the driver's use of
85 idle CPU time, at the expense of some speed.
86
87 If this driver is built into the kernel, you can use the
88 following command line parameters, with the same values
89 as the corresponding module parameters listed above:
90
91 pf.drive0
92 pf.drive1
93 pf.drive2
94 pf.drive3
95 pf.cluster
96 pf.nice
97
98 In addition, you can use the parameter pf.disable to disable
99 the driver entirely.
100
101*/
102
103/* Changes:
104
105 1.01 GRG 1998.05.03 Changes for SMP. Eliminate sti().
106 Fix for drives that don't clear STAT_ERR
107 until after next CDB delivered.
108 Small change in pf_completion to round
109 up transfer size.
110 1.02 GRG 1998.06.16 Eliminated an Ugh
111 1.03 GRG 1998.08.16 Use HZ in loop timings, extra debugging
112 1.04 GRG 1998.09.24 Added jumbo support
113
114*/
115
116#define PF_VERSION "1.04"
117#define PF_MAJOR 47
118#define PF_NAME "pf"
119#define PF_UNITS 4
120
121/* Here are things one can override from the insmod command.
122 Most are autoprobed by paride unless set here. Verbose is off
123 by default.
124
125*/
126
127static int verbose = 0;
128static int major = PF_MAJOR;
129static char *name = PF_NAME;
130static int cluster = 64;
131static int nice = 0;
132static int disable = 0;
133
134static int drive0[7] = { 0, 0, 0, -1, -1, -1, -1 };
135static int drive1[7] = { 0, 0, 0, -1, -1, -1, -1 };
136static int drive2[7] = { 0, 0, 0, -1, -1, -1, -1 };
137static int drive3[7] = { 0, 0, 0, -1, -1, -1, -1 };
138
139static int (*drives[4])[7] = {&drive0, &drive1, &drive2, &drive3};
140static int pf_drive_count;
141
142enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_LUN, D_DLY};
143
144/* end of parameters */
145
146#include <linux/module.h>
147#include <linux/init.h>
148#include <linux/fs.h>
149#include <linux/delay.h>
150#include <linux/hdreg.h>
151#include <linux/cdrom.h>
152#include <linux/spinlock.h>
153#include <linux/blkdev.h>
154#include <linux/blkpg.h>
155#include <asm/uaccess.h>
156
157static spinlock_t pf_spin_lock;
158
159module_param(verbose, bool, 0644);
160module_param(major, int, 0);
161module_param(name, charp, 0);
162module_param(cluster, int, 0);
163module_param(nice, int, 0);
164module_param_array(drive0, int, NULL, 0);
165module_param_array(drive1, int, NULL, 0);
166module_param_array(drive2, int, NULL, 0);
167module_param_array(drive3, int, NULL, 0);
168
169#include "paride.h"
170#include "pseudo.h"
171
172/* constants for faking geometry numbers */
173
174#define PF_FD_MAX 8192 /* use FD geometry under this size */
175#define PF_FD_HDS 2
176#define PF_FD_SPT 18
177#define PF_HD_HDS 64
178#define PF_HD_SPT 32
179
180#define PF_MAX_RETRIES 5
181#define PF_TMO 800 /* interrupt timeout in jiffies */
182#define PF_SPIN_DEL 50 /* spin delay in micro-seconds */
183
184#define PF_SPIN (1000000*PF_TMO)/(HZ*PF_SPIN_DEL)
185
186#define STAT_ERR 0x00001
187#define STAT_INDEX 0x00002
188#define STAT_ECC 0x00004
189#define STAT_DRQ 0x00008
190#define STAT_SEEK 0x00010
191#define STAT_WRERR 0x00020
192#define STAT_READY 0x00040
193#define STAT_BUSY 0x00080
194
195#define ATAPI_REQ_SENSE 0x03
196#define ATAPI_LOCK 0x1e
197#define ATAPI_DOOR 0x1b
198#define ATAPI_MODE_SENSE 0x5a
199#define ATAPI_CAPACITY 0x25
200#define ATAPI_IDENTIFY 0x12
201#define ATAPI_READ_10 0x28
202#define ATAPI_WRITE_10 0x2a
203
204static int pf_open(struct inode *inode, struct file *file);
205static void do_pf_request(request_queue_t * q);
206static int pf_ioctl(struct inode *inode, struct file *file,
207 unsigned int cmd, unsigned long arg);
208
209static int pf_release(struct inode *inode, struct file *file);
210
211static int pf_detect(void);
212static void do_pf_read(void);
213static void do_pf_read_start(void);
214static void do_pf_write(void);
215static void do_pf_write_start(void);
216static void do_pf_read_drq(void);
217static void do_pf_write_done(void);
218
219#define PF_NM 0
220#define PF_RO 1
221#define PF_RW 2
222
223#define PF_NAMELEN 8
224
225struct pf_unit {
226 struct pi_adapter pia; /* interface to paride layer */
227 struct pi_adapter *pi;
228 int removable; /* removable media device ? */
229 int media_status; /* media present ? WP ? */
230 int drive; /* drive */
231 int lun;
232 int access; /* count of active opens ... */
233 int present; /* device present ? */
234 char name[PF_NAMELEN]; /* pf0, pf1, ... */
235 struct gendisk *disk;
236};
237
238static struct pf_unit units[PF_UNITS];
239
240static int pf_identify(struct pf_unit *pf);
241static void pf_lock(struct pf_unit *pf, int func);
242static void pf_eject(struct pf_unit *pf);
243static int pf_check_media(struct gendisk *disk);
244
245static char pf_scratch[512]; /* scratch block buffer */
246
247/* the variables below are used mainly in the I/O request engine, which
248 processes only one request at a time.
249*/
250
251static int pf_retries = 0; /* i/o error retry count */
252static int pf_busy = 0; /* request being processed ? */
253static struct request *pf_req; /* current request */
254static int pf_block; /* address of next requested block */
255static int pf_count; /* number of blocks still to do */
256static int pf_run; /* sectors in current cluster */
257static int pf_cmd; /* current command READ/WRITE */
258static struct pf_unit *pf_current;/* unit of current request */
259static int pf_mask; /* stopper for pseudo-int */
260static char *pf_buf; /* buffer for request in progress */
261
262/* kernel glue structures */
263
264static struct block_device_operations pf_fops = {
265 .owner = THIS_MODULE,
266 .open = pf_open,
267 .release = pf_release,
268 .ioctl = pf_ioctl,
269 .media_changed = pf_check_media,
270};
271
272static void __init pf_init_units(void)
273{
274 struct pf_unit *pf;
275 int unit;
276
277 pf_drive_count = 0;
278 for (unit = 0, pf = units; unit < PF_UNITS; unit++, pf++) {
279 struct gendisk *disk = alloc_disk(1);
280 if (!disk)
281 continue;
282 pf->disk = disk;
283 pf->pi = &pf->pia;
284 pf->media_status = PF_NM;
285 pf->drive = (*drives[unit])[D_SLV];
286 pf->lun = (*drives[unit])[D_LUN];
287 snprintf(pf->name, PF_NAMELEN, "%s%d", name, unit);
288 disk->major = major;
289 disk->first_minor = unit;
290 strcpy(disk->disk_name, pf->name);
291 disk->fops = &pf_fops;
292 if (!(*drives[unit])[D_PRT])
293 pf_drive_count++;
294 }
295}
296
297static int pf_open(struct inode *inode, struct file *file)
298{
299 struct pf_unit *pf = inode->i_bdev->bd_disk->private_data;
300
301 pf_identify(pf);
302
303 if (pf->media_status == PF_NM)
304 return -ENODEV;
305
306 if ((pf->media_status == PF_RO) && (file->f_mode & 2))
307 return -EROFS;
308
309 pf->access++;
310 if (pf->removable)
311 pf_lock(pf, 1);
312
313 return 0;
314}
315
316static int pf_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
317{
318 struct pf_unit *pf = inode->i_bdev->bd_disk->private_data;
319 struct hd_geometry __user *geo = (struct hd_geometry __user *) arg;
320 struct hd_geometry g;
321 sector_t capacity;
322
323 if (cmd == CDROMEJECT) {
324 if (pf->access == 1) {
325 pf_eject(pf);
326 return 0;
327 }
328 return -EBUSY;
329 }
330 if (cmd != HDIO_GETGEO)
331 return -EINVAL;
332 capacity = get_capacity(pf->disk);
333 if (capacity < PF_FD_MAX) {
334 g.cylinders = sector_div(capacity, PF_FD_HDS * PF_FD_SPT);
335 g.heads = PF_FD_HDS;
336 g.sectors = PF_FD_SPT;
337 } else {
338 g.cylinders = sector_div(capacity, PF_HD_HDS * PF_HD_SPT);
339 g.heads = PF_HD_HDS;
340 g.sectors = PF_HD_SPT;
341 }
342 if (copy_to_user(geo, &g, sizeof(g)))
343 return -EFAULT;
344 return 0;
345}
346
347static int pf_release(struct inode *inode, struct file *file)
348{
349 struct pf_unit *pf = inode->i_bdev->bd_disk->private_data;
350
351 if (pf->access <= 0)
352 return -EINVAL;
353
354 pf->access--;
355
356 if (!pf->access && pf->removable)
357 pf_lock(pf, 0);
358
359 return 0;
360
361}
362
363static int pf_check_media(struct gendisk *disk)
364{
365 return 1;
366}
367
368static inline int status_reg(struct pf_unit *pf)
369{
370 return pi_read_regr(pf->pi, 1, 6);
371}
372
373static inline int read_reg(struct pf_unit *pf, int reg)
374{
375 return pi_read_regr(pf->pi, 0, reg);
376}
377
378static inline void write_reg(struct pf_unit *pf, int reg, int val)
379{
380 pi_write_regr(pf->pi, 0, reg, val);
381}
382
383static int pf_wait(struct pf_unit *pf, int go, int stop, char *fun, char *msg)
384{
385 int j, r, e, s, p;
386
387 j = 0;
388 while ((((r = status_reg(pf)) & go) || (stop && (!(r & stop))))
389 && (j++ < PF_SPIN))
390 udelay(PF_SPIN_DEL);
391
392 if ((r & (STAT_ERR & stop)) || (j >= PF_SPIN)) {
393 s = read_reg(pf, 7);
394 e = read_reg(pf, 1);
395 p = read_reg(pf, 2);
396 if (j >= PF_SPIN)
397 e |= 0x100;
398 if (fun)
399 printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x"
400 " loop=%d phase=%d\n",
401 pf->name, fun, msg, r, s, e, j, p);
402 return (e << 8) + s;
403 }
404 return 0;
405}
406
407static int pf_command(struct pf_unit *pf, char *cmd, int dlen, char *fun)
408{
409 pi_connect(pf->pi);
410
411 write_reg(pf, 6, 0xa0+0x10*pf->drive);
412
413 if (pf_wait(pf, STAT_BUSY | STAT_DRQ, 0, fun, "before command")) {
414 pi_disconnect(pf->pi);
415 return -1;
416 }
417
418 write_reg(pf, 4, dlen % 256);
419 write_reg(pf, 5, dlen / 256);
420 write_reg(pf, 7, 0xa0); /* ATAPI packet command */
421
422 if (pf_wait(pf, STAT_BUSY, STAT_DRQ, fun, "command DRQ")) {
423 pi_disconnect(pf->pi);
424 return -1;
425 }
426
427 if (read_reg(pf, 2) != 1) {
428 printk("%s: %s: command phase error\n", pf->name, fun);
429 pi_disconnect(pf->pi);
430 return -1;
431 }
432
433 pi_write_block(pf->pi, cmd, 12);
434
435 return 0;
436}
437
438static int pf_completion(struct pf_unit *pf, char *buf, char *fun)
439{
440 int r, s, n;
441
442 r = pf_wait(pf, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR,
443 fun, "completion");
444
445 if ((read_reg(pf, 2) & 2) && (read_reg(pf, 7) & STAT_DRQ)) {
446 n = (((read_reg(pf, 4) + 256 * read_reg(pf, 5)) +
447 3) & 0xfffc);
448 pi_read_block(pf->pi, buf, n);
449 }
450
451 s = pf_wait(pf, STAT_BUSY, STAT_READY | STAT_ERR, fun, "data done");
452
453 pi_disconnect(pf->pi);
454
455 return (r ? r : s);
456}
457
458static void pf_req_sense(struct pf_unit *pf, int quiet)
459{
460 char rs_cmd[12] =
461 { ATAPI_REQ_SENSE, pf->lun << 5, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 };
462 char buf[16];
463 int r;
464
465 r = pf_command(pf, rs_cmd, 16, "Request sense");
466 mdelay(1);
467 if (!r)
468 pf_completion(pf, buf, "Request sense");
469
470 if ((!r) && (!quiet))
471 printk("%s: Sense key: %x, ASC: %x, ASQ: %x\n",
472 pf->name, buf[2] & 0xf, buf[12], buf[13]);
473}
474
475static int pf_atapi(struct pf_unit *pf, char *cmd, int dlen, char *buf, char *fun)
476{
477 int r;
478
479 r = pf_command(pf, cmd, dlen, fun);
480 mdelay(1);
481 if (!r)
482 r = pf_completion(pf, buf, fun);
483 if (r)
484 pf_req_sense(pf, !fun);
485
486 return r;
487}
488
489#define DBMSG(msg) ((verbose>1)?(msg):NULL)
490
491static void pf_lock(struct pf_unit *pf, int func)
492{
493 char lo_cmd[12] = { ATAPI_LOCK, pf->lun << 5, 0, 0, func, 0, 0, 0, 0, 0, 0, 0 };
494
495 pf_atapi(pf, lo_cmd, 0, pf_scratch, func ? "unlock" : "lock");
496}
497
498static void pf_eject(struct pf_unit *pf)
499{
500 char ej_cmd[12] = { ATAPI_DOOR, pf->lun << 5, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0 };
501
502 pf_lock(pf, 0);
503 pf_atapi(pf, ej_cmd, 0, pf_scratch, "eject");
504}
505
506#define PF_RESET_TMO 30 /* in tenths of a second */
507
508static void pf_sleep(int cs)
509{
510 current->state = TASK_INTERRUPTIBLE;
511 schedule_timeout(cs);
512}
513
514/* the ATAPI standard actually specifies the contents of all 7 registers
515 after a reset, but the specification is ambiguous concerning the last
516 two bytes, and different drives interpret the standard differently.
517 */
518
519static int pf_reset(struct pf_unit *pf)
520{
521 int i, k, flg;
522 int expect[5] = { 1, 1, 1, 0x14, 0xeb };
523
524 pi_connect(pf->pi);
525 write_reg(pf, 6, 0xa0+0x10*pf->drive);
526 write_reg(pf, 7, 8);
527
528 pf_sleep(20 * HZ / 1000);
529
530 k = 0;
531 while ((k++ < PF_RESET_TMO) && (status_reg(pf) & STAT_BUSY))
532 pf_sleep(HZ / 10);
533
534 flg = 1;
535 for (i = 0; i < 5; i++)
536 flg &= (read_reg(pf, i + 1) == expect[i]);
537
538 if (verbose) {
539 printk("%s: Reset (%d) signature = ", pf->name, k);
540 for (i = 0; i < 5; i++)
541 printk("%3x", read_reg(pf, i + 1));
542 if (!flg)
543 printk(" (incorrect)");
544 printk("\n");
545 }
546
547 pi_disconnect(pf->pi);
548 return flg - 1;
549}
550
551static void pf_mode_sense(struct pf_unit *pf)
552{
553 char ms_cmd[12] =
554 { ATAPI_MODE_SENSE, pf->lun << 5, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0 };
555 char buf[8];
556
557 pf_atapi(pf, ms_cmd, 8, buf, DBMSG("mode sense"));
558 pf->media_status = PF_RW;
559 if (buf[3] & 0x80)
560 pf->media_status = PF_RO;
561}
562
563static void xs(char *buf, char *targ, int offs, int len)
564{
565 int j, k, l;
566
567 j = 0;
568 l = 0;
569 for (k = 0; k < len; k++)
570 if ((buf[k + offs] != 0x20) || (buf[k + offs] != l))
571 l = targ[j++] = buf[k + offs];
572 if (l == 0x20)
573 j--;
574 targ[j] = 0;
575}
576
577static int xl(char *buf, int offs)
578{
579 int v, k;
580
581 v = 0;
582 for (k = 0; k < 4; k++)
583 v = v * 256 + (buf[k + offs] & 0xff);
584 return v;
585}
586
587static void pf_get_capacity(struct pf_unit *pf)
588{
589 char rc_cmd[12] = { ATAPI_CAPACITY, pf->lun << 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
590 char buf[8];
591 int bs;
592
593 if (pf_atapi(pf, rc_cmd, 8, buf, DBMSG("get capacity"))) {
594 pf->media_status = PF_NM;
595 return;
596 }
597 set_capacity(pf->disk, xl(buf, 0) + 1);
598 bs = xl(buf, 4);
599 if (bs != 512) {
600 set_capacity(pf->disk, 0);
601 if (verbose)
602 printk("%s: Drive %d, LUN %d,"
603 " unsupported block size %d\n",
604 pf->name, pf->drive, pf->lun, bs);
605 }
606}
607
608static int pf_identify(struct pf_unit *pf)
609{
610 int dt, s;
611 char *ms[2] = { "master", "slave" };
612 char mf[10], id[18];
613 char id_cmd[12] =
614 { ATAPI_IDENTIFY, pf->lun << 5, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
615 char buf[36];
616
617 s = pf_atapi(pf, id_cmd, 36, buf, "identify");
618 if (s)
619 return -1;
620
621 dt = buf[0] & 0x1f;
622 if ((dt != 0) && (dt != 7)) {
623 if (verbose)
624 printk("%s: Drive %d, LUN %d, unsupported type %d\n",
625 pf->name, pf->drive, pf->lun, dt);
626 return -1;
627 }
628
629 xs(buf, mf, 8, 8);
630 xs(buf, id, 16, 16);
631
632 pf->removable = (buf[1] & 0x80);
633
634 pf_mode_sense(pf);
635 pf_mode_sense(pf);
636 pf_mode_sense(pf);
637
638 pf_get_capacity(pf);
639
640 printk("%s: %s %s, %s LUN %d, type %d",
641 pf->name, mf, id, ms[pf->drive], pf->lun, dt);
642 if (pf->removable)
643 printk(", removable");
644 if (pf->media_status == PF_NM)
645 printk(", no media\n");
646 else {
647 if (pf->media_status == PF_RO)
648 printk(", RO");
649 printk(", %llu blocks\n",
650 (unsigned long long)get_capacity(pf->disk));
651 }
652 return 0;
653}
654
655/* returns 0, with id set if drive is detected
656 -1, if drive detection failed
657*/
658static int pf_probe(struct pf_unit *pf)
659{
660 if (pf->drive == -1) {
661 for (pf->drive = 0; pf->drive <= 1; pf->drive++)
662 if (!pf_reset(pf)) {
663 if (pf->lun != -1)
664 return pf_identify(pf);
665 else
666 for (pf->lun = 0; pf->lun < 8; pf->lun++)
667 if (!pf_identify(pf))
668 return 0;
669 }
670 } else {
671 if (pf_reset(pf))
672 return -1;
673 if (pf->lun != -1)
674 return pf_identify(pf);
675 for (pf->lun = 0; pf->lun < 8; pf->lun++)
676 if (!pf_identify(pf))
677 return 0;
678 }
679 return -1;
680}
681
682static int pf_detect(void)
683{
684 struct pf_unit *pf = units;
685 int k, unit;
686
687 printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
688 name, name, PF_VERSION, major, cluster, nice);
689
690 k = 0;
691 if (pf_drive_count == 0) {
692 if (pi_init(pf->pi, 1, -1, -1, -1, -1, -1, pf_scratch, PI_PF,
693 verbose, pf->name)) {
694 if (!pf_probe(pf) && pf->disk) {
695 pf->present = 1;
696 k++;
697 } else
698 pi_release(pf->pi);
699 }
700
701 } else
702 for (unit = 0; unit < PF_UNITS; unit++, pf++) {
703 int *conf = *drives[unit];
704 if (!conf[D_PRT])
705 continue;
706 if (pi_init(pf->pi, 0, conf[D_PRT], conf[D_MOD],
707 conf[D_UNI], conf[D_PRO], conf[D_DLY],
708 pf_scratch, PI_PF, verbose, pf->name)) {
709 if (!pf_probe(pf) && pf->disk) {
710 pf->present = 1;
711 k++;
712 } else
713 pi_release(pf->pi);
714 }
715 }
716 if (k)
717 return 0;
718
719 printk("%s: No ATAPI disk detected\n", name);
720 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
721 put_disk(pf->disk);
722 return -1;
723}
724
725/* The i/o request engine */
726
727static int pf_start(struct pf_unit *pf, int cmd, int b, int c)
728{
729 int i;
730 char io_cmd[12] = { cmd, pf->lun << 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
731
732 for (i = 0; i < 4; i++) {
733 io_cmd[5 - i] = b & 0xff;
734 b = b >> 8;
735 }
736
737 io_cmd[8] = c & 0xff;
738 io_cmd[7] = (c >> 8) & 0xff;
739
740 i = pf_command(pf, io_cmd, c * 512, "start i/o");
741
742 mdelay(1);
743
744 return i;
745}
746
747static int pf_ready(void)
748{
749 return (((status_reg(pf_current) & (STAT_BUSY | pf_mask)) == pf_mask));
750}
751
752static struct request_queue *pf_queue;
753
754static void do_pf_request(request_queue_t * q)
755{
756 if (pf_busy)
757 return;
758repeat:
759 pf_req = elv_next_request(q);
760 if (!pf_req)
761 return;
762
763 pf_current = pf_req->rq_disk->private_data;
764 pf_block = pf_req->sector;
765 pf_run = pf_req->nr_sectors;
766 pf_count = pf_req->current_nr_sectors;
767
768 if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) {
769 end_request(pf_req, 0);
770 goto repeat;
771 }
772
773 pf_cmd = rq_data_dir(pf_req);
774 pf_buf = pf_req->buffer;
775 pf_retries = 0;
776
777 pf_busy = 1;
778 if (pf_cmd == READ)
779 pi_do_claimed(pf_current->pi, do_pf_read);
780 else if (pf_cmd == WRITE)
781 pi_do_claimed(pf_current->pi, do_pf_write);
782 else {
783 pf_busy = 0;
784 end_request(pf_req, 0);
785 goto repeat;
786 }
787}
788
789static int pf_next_buf(void)
790{
791 unsigned long saved_flags;
792
793 pf_count--;
794 pf_run--;
795 pf_buf += 512;
796 pf_block++;
797 if (!pf_run)
798 return 0;
799 if (!pf_count)
800 return 1;
801 spin_lock_irqsave(&pf_spin_lock, saved_flags);
802 end_request(pf_req, 1);
803 pf_count = pf_req->current_nr_sectors;
804 pf_buf = pf_req->buffer;
805 spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
806 return 1;
807}
808
809static inline void next_request(int success)
810{
811 unsigned long saved_flags;
812
813 spin_lock_irqsave(&pf_spin_lock, saved_flags);
814 end_request(pf_req, success);
815 pf_busy = 0;
816 do_pf_request(pf_queue);
817 spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
818}
819
820/* detach from the calling context - in case the spinlock is held */
821static void do_pf_read(void)
822{
823 ps_set_intr(do_pf_read_start, NULL, 0, nice);
824}
825
826static void do_pf_read_start(void)
827{
828 pf_busy = 1;
829
830 if (pf_start(pf_current, ATAPI_READ_10, pf_block, pf_run)) {
831 pi_disconnect(pf_current->pi);
832 if (pf_retries < PF_MAX_RETRIES) {
833 pf_retries++;
834 pi_do_claimed(pf_current->pi, do_pf_read_start);
835 return;
836 }
837 next_request(0);
838 return;
839 }
840 pf_mask = STAT_DRQ;
841 ps_set_intr(do_pf_read_drq, pf_ready, PF_TMO, nice);
842}
843
844static void do_pf_read_drq(void)
845{
846 while (1) {
847 if (pf_wait(pf_current, STAT_BUSY, STAT_DRQ | STAT_ERR,
848 "read block", "completion") & STAT_ERR) {
849 pi_disconnect(pf_current->pi);
850 if (pf_retries < PF_MAX_RETRIES) {
851 pf_req_sense(pf_current, 0);
852 pf_retries++;
853 pi_do_claimed(pf_current->pi, do_pf_read_start);
854 return;
855 }
856 next_request(0);
857 return;
858 }
859 pi_read_block(pf_current->pi, pf_buf, 512);
860 if (pf_next_buf())
861 break;
862 }
863 pi_disconnect(pf_current->pi);
864 next_request(1);
865}
866
867static void do_pf_write(void)
868{
869 ps_set_intr(do_pf_write_start, NULL, 0, nice);
870}
871
872static void do_pf_write_start(void)
873{
874 pf_busy = 1;
875
876 if (pf_start(pf_current, ATAPI_WRITE_10, pf_block, pf_run)) {
877 pi_disconnect(pf_current->pi);
878 if (pf_retries < PF_MAX_RETRIES) {
879 pf_retries++;
880 pi_do_claimed(pf_current->pi, do_pf_write_start);
881 return;
882 }
883 next_request(0);
884 return;
885 }
886
887 while (1) {
888 if (pf_wait(pf_current, STAT_BUSY, STAT_DRQ | STAT_ERR,
889 "write block", "data wait") & STAT_ERR) {
890 pi_disconnect(pf_current->pi);
891 if (pf_retries < PF_MAX_RETRIES) {
892 pf_retries++;
893 pi_do_claimed(pf_current->pi, do_pf_write_start);
894 return;
895 }
896 next_request(0);
897 return;
898 }
899 pi_write_block(pf_current->pi, pf_buf, 512);
900 if (pf_next_buf())
901 break;
902 }
903 pf_mask = 0;
904 ps_set_intr(do_pf_write_done, pf_ready, PF_TMO, nice);
905}
906
907static void do_pf_write_done(void)
908{
909 if (pf_wait(pf_current, STAT_BUSY, 0, "write block", "done") & STAT_ERR) {
910 pi_disconnect(pf_current->pi);
911 if (pf_retries < PF_MAX_RETRIES) {
912 pf_retries++;
913 pi_do_claimed(pf_current->pi, do_pf_write_start);
914 return;
915 }
916 next_request(0);
917 return;
918 }
919 pi_disconnect(pf_current->pi);
920 next_request(1);
921}
922
923static int __init pf_init(void)
924{ /* preliminary initialisation */
925 struct pf_unit *pf;
926 int unit;
927
928 if (disable)
929 return -1;
930
931 pf_init_units();
932
933 if (pf_detect())
934 return -1;
935 pf_busy = 0;
936
937 if (register_blkdev(major, name)) {
938 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
939 put_disk(pf->disk);
940 return -1;
941 }
942 pf_queue = blk_init_queue(do_pf_request, &pf_spin_lock);
943 if (!pf_queue) {
944 unregister_blkdev(major, name);
945 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
946 put_disk(pf->disk);
947 return -1;
948 }
949
950 blk_queue_max_phys_segments(pf_queue, cluster);
951 blk_queue_max_hw_segments(pf_queue, cluster);
952
953 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
954 struct gendisk *disk = pf->disk;
955
956 if (!pf->present)
957 continue;
958 disk->private_data = pf;
959 disk->queue = pf_queue;
960 add_disk(disk);
961 }
962 return 0;
963}
964
965static void __exit pf_exit(void)
966{
967 struct pf_unit *pf;
968 int unit;
969 unregister_blkdev(major, name);
970 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
971 if (!pf->present)
972 continue;
973 del_gendisk(pf->disk);
974 put_disk(pf->disk);
975 pi_release(pf->pi);
976 }
977 blk_cleanup_queue(pf_queue);
978}
979
980MODULE_LICENSE("GPL");
981module_init(pf_init)
982module_exit(pf_exit)
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
new file mode 100644
index 000000000000..dbeb107bb971
--- /dev/null
+++ b/drivers/block/paride/pg.c
@@ -0,0 +1,723 @@
1/*
2 pg.c (c) 1998 Grant R. Guenther <grant@torque.net>
3 Under the terms of the GNU General Public License.
4
5 The pg driver provides a simple character device interface for
6 sending ATAPI commands to a device. With the exception of the
7 ATAPI reset operation, all operations are performed by a pair
8 of read and write operations to the appropriate /dev/pgN device.
9 A write operation delivers a command and any outbound data in
10 a single buffer. Normally, the write will succeed unless the
11 device is offline or malfunctioning, or there is already another
12 command pending. If the write succeeds, it should be followed
13 immediately by a read operation, to obtain any returned data and
14 status information. A read will fail if there is no operation
15 in progress.
16
17 As a special case, the device can be reset with a write operation,
18 and in this case, no following read is expected, or permitted.
19
20 There are no ioctl() operations. Any single operation
21 may transfer at most PG_MAX_DATA bytes. Note that the driver must
22 copy the data through an internal buffer. In keeping with all
23 current ATAPI devices, command packets are assumed to be exactly
24 12 bytes in length.
25
26 To permit future changes to this interface, the headers in the
27 read and write buffers contain a single character "magic" flag.
28 Currently this flag must be the character "P".
29
30 By default, the driver will autoprobe for a single parallel
31 port ATAPI device, but if their individual parameters are
32 specified, the driver can handle up to 4 devices.
33
34 To use this device, you must have the following device
35 special files defined:
36
37 /dev/pg0 c 97 0
38 /dev/pg1 c 97 1
39 /dev/pg2 c 97 2
40 /dev/pg3 c 97 3
41
42 (You'll need to change the 97 to something else if you use
43 the 'major' parameter to install the driver on a different
44 major number.)
45
46 The behaviour of the pg driver can be altered by setting
47 some parameters from the insmod command line. The following
48 parameters are adjustable:
49
50 drive0 These four arguments can be arrays of
51 drive1 1-6 integers as follows:
52 drive2
53 drive3 <prt>,<pro>,<uni>,<mod>,<slv>,<dly>
54
55 Where,
56
57 <prt> is the base of the parallel port address for
58 the corresponding drive. (required)
59
60 <pro> is the protocol number for the adapter that
61 supports this drive. These numbers are
62 logged by 'paride' when the protocol modules
63 are initialised. (0 if not given)
64
65 <uni> for those adapters that support chained
66 devices, this is the unit selector for the
67 chain of devices on the given port. It should
68 be zero for devices that don't support chaining.
69 (0 if not given)
70
71 <mod> this can be -1 to choose the best mode, or one
72 of the mode numbers supported by the adapter.
73 (-1 if not given)
74
75 <slv> ATAPI devices can be jumpered to master or slave.
76 Set this to 0 to choose the master drive, 1 to
77 choose the slave, -1 (the default) to choose the
78 first drive found.
79
80 <dly> some parallel ports require the driver to
81 go more slowly. -1 sets a default value that
82 should work with the chosen protocol. Otherwise,
83 set this to a small integer, the larger it is
84 the slower the port i/o. In some cases, setting
85 this to zero will speed up the device. (default -1)
86
87 major You may use this parameter to overide the
88 default major number (97) that this driver
89 will use. Be sure to change the device
90 name as well.
91
92 name This parameter is a character string that
93 contains the name the kernel will use for this
94 device (in /proc output, for instance).
95 (default "pg").
96
97 verbose This parameter controls the amount of logging
98 that is done by the driver. Set it to 0 for
99 quiet operation, to 1 to enable progress
100 messages while the driver probes for devices,
101 or to 2 for full debug logging. (default 0)
102
103 If this driver is built into the kernel, you can use
104 the following command line parameters, with the same values
105 as the corresponding module parameters listed above:
106
107 pg.drive0
108 pg.drive1
109 pg.drive2
110 pg.drive3
111
112 In addition, you can use the parameter pg.disable to disable
113 the driver entirely.
114
115*/
116
117/* Changes:
118
119 1.01 GRG 1998.06.16 Bug fixes
120 1.02 GRG 1998.09.24 Added jumbo support
121
122*/
123
124#define PG_VERSION "1.02"
125#define PG_MAJOR 97
126#define PG_NAME "pg"
127#define PG_UNITS 4
128
129#ifndef PI_PG
130#define PI_PG 4
131#endif
132
133/* Here are things one can override from the insmod command.
134 Most are autoprobed by paride unless set here. Verbose is 0
135 by default.
136
137*/
138
139static int verbose = 0;
140static int major = PG_MAJOR;
141static char *name = PG_NAME;
142static int disable = 0;
143
144static int drive0[6] = { 0, 0, 0, -1, -1, -1 };
145static int drive1[6] = { 0, 0, 0, -1, -1, -1 };
146static int drive2[6] = { 0, 0, 0, -1, -1, -1 };
147static int drive3[6] = { 0, 0, 0, -1, -1, -1 };
148
149static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
150static int pg_drive_count;
151
152enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY};
153
154/* end of parameters */
155
156#include <linux/module.h>
157#include <linux/init.h>
158#include <linux/fs.h>
159#include <linux/devfs_fs_kernel.h>
160#include <linux/delay.h>
161#include <linux/slab.h>
162#include <linux/mtio.h>
163#include <linux/pg.h>
164#include <linux/device.h>
165
166#include <asm/uaccess.h>
167
168module_param(verbose, bool, 0644);
169module_param(major, int, 0);
170module_param(name, charp, 0);
171module_param_array(drive0, int, NULL, 0);
172module_param_array(drive1, int, NULL, 0);
173module_param_array(drive2, int, NULL, 0);
174module_param_array(drive3, int, NULL, 0);
175
176#include "paride.h"
177
178#define PG_SPIN_DEL 50 /* spin delay in micro-seconds */
179#define PG_SPIN 200
180#define PG_TMO HZ
181#define PG_RESET_TMO 10*HZ
182
183#define STAT_ERR 0x01
184#define STAT_INDEX 0x02
185#define STAT_ECC 0x04
186#define STAT_DRQ 0x08
187#define STAT_SEEK 0x10
188#define STAT_WRERR 0x20
189#define STAT_READY 0x40
190#define STAT_BUSY 0x80
191
192#define ATAPI_IDENTIFY 0x12
193
194static int pg_open(struct inode *inode, struct file *file);
195static int pg_release(struct inode *inode, struct file *file);
196static ssize_t pg_read(struct file *filp, char __user *buf,
197 size_t count, loff_t * ppos);
198static ssize_t pg_write(struct file *filp, const char __user *buf,
199 size_t count, loff_t * ppos);
200static int pg_detect(void);
201
202#define PG_NAMELEN 8
203
204struct pg {
205 struct pi_adapter pia; /* interface to paride layer */
206 struct pi_adapter *pi;
207 int busy; /* write done, read expected */
208 int start; /* jiffies at command start */
209 int dlen; /* transfer size requested */
210 unsigned long timeout; /* timeout requested */
211 int status; /* last sense key */
212 int drive; /* drive */
213 unsigned long access; /* count of active opens ... */
214 int present; /* device present ? */
215 char *bufptr;
216 char name[PG_NAMELEN]; /* pg0, pg1, ... */
217};
218
219static struct pg devices[PG_UNITS];
220
221static int pg_identify(struct pg *dev, int log);
222
223static char pg_scratch[512]; /* scratch block buffer */
224
225static struct class_simple *pg_class;
226
227/* kernel glue structures */
228
229static struct file_operations pg_fops = {
230 .owner = THIS_MODULE,
231 .read = pg_read,
232 .write = pg_write,
233 .open = pg_open,
234 .release = pg_release,
235};
236
237static void pg_init_units(void)
238{
239 int unit;
240
241 pg_drive_count = 0;
242 for (unit = 0; unit < PG_UNITS; unit++) {
243 int *parm = *drives[unit];
244 struct pg *dev = &devices[unit];
245 dev->pi = &dev->pia;
246 clear_bit(0, &dev->access);
247 dev->busy = 0;
248 dev->present = 0;
249 dev->bufptr = NULL;
250 dev->drive = parm[D_SLV];
251 snprintf(dev->name, PG_NAMELEN, "%s%c", name, 'a'+unit);
252 if (parm[D_PRT])
253 pg_drive_count++;
254 }
255}
256
257static inline int status_reg(struct pg *dev)
258{
259 return pi_read_regr(dev->pi, 1, 6);
260}
261
262static inline int read_reg(struct pg *dev, int reg)
263{
264 return pi_read_regr(dev->pi, 0, reg);
265}
266
267static inline void write_reg(struct pg *dev, int reg, int val)
268{
269 pi_write_regr(dev->pi, 0, reg, val);
270}
271
272static inline u8 DRIVE(struct pg *dev)
273{
274 return 0xa0+0x10*dev->drive;
275}
276
277static void pg_sleep(int cs)
278{
279 current->state = TASK_INTERRUPTIBLE;
280 schedule_timeout(cs);
281}
282
283static int pg_wait(struct pg *dev, int go, int stop, unsigned long tmo, char *msg)
284{
285 int j, r, e, s, p, to;
286
287 dev->status = 0;
288
289 j = 0;
290 while ((((r = status_reg(dev)) & go) || (stop && (!(r & stop))))
291 && time_before(jiffies, tmo)) {
292 if (j++ < PG_SPIN)
293 udelay(PG_SPIN_DEL);
294 else
295 pg_sleep(1);
296 }
297
298 to = time_after_eq(jiffies, tmo);
299
300 if ((r & (STAT_ERR & stop)) || to) {
301 s = read_reg(dev, 7);
302 e = read_reg(dev, 1);
303 p = read_reg(dev, 2);
304 if (verbose > 1)
305 printk("%s: %s: stat=0x%x err=0x%x phase=%d%s\n",
306 dev->name, msg, s, e, p, to ? " timeout" : "");
307 if (to)
308 e |= 0x100;
309 dev->status = (e >> 4) & 0xff;
310 return -1;
311 }
312 return 0;
313}
314
315static int pg_command(struct pg *dev, char *cmd, int dlen, unsigned long tmo)
316{
317 int k;
318
319 pi_connect(dev->pi);
320
321 write_reg(dev, 6, DRIVE(dev));
322
323 if (pg_wait(dev, STAT_BUSY | STAT_DRQ, 0, tmo, "before command"))
324 goto fail;
325
326 write_reg(dev, 4, dlen % 256);
327 write_reg(dev, 5, dlen / 256);
328 write_reg(dev, 7, 0xa0); /* ATAPI packet command */
329
330 if (pg_wait(dev, STAT_BUSY, STAT_DRQ, tmo, "command DRQ"))
331 goto fail;
332
333 if (read_reg(dev, 2) != 1) {
334 printk("%s: command phase error\n", dev->name);
335 goto fail;
336 }
337
338 pi_write_block(dev->pi, cmd, 12);
339
340 if (verbose > 1) {
341 printk("%s: Command sent, dlen=%d packet= ", dev->name, dlen);
342 for (k = 0; k < 12; k++)
343 printk("%02x ", cmd[k] & 0xff);
344 printk("\n");
345 }
346 return 0;
347fail:
348 pi_disconnect(dev->pi);
349 return -1;
350}
351
352static int pg_completion(struct pg *dev, char *buf, unsigned long tmo)
353{
354 int r, d, n, p;
355
356 r = pg_wait(dev, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR,
357 tmo, "completion");
358
359 dev->dlen = 0;
360
361 while (read_reg(dev, 7) & STAT_DRQ) {
362 d = (read_reg(dev, 4) + 256 * read_reg(dev, 5));
363 n = ((d + 3) & 0xfffc);
364 p = read_reg(dev, 2) & 3;
365 if (p == 0)
366 pi_write_block(dev->pi, buf, n);
367 if (p == 2)
368 pi_read_block(dev->pi, buf, n);
369 if (verbose > 1)
370 printk("%s: %s %d bytes\n", dev->name,
371 p ? "Read" : "Write", n);
372 dev->dlen += (1 - p) * d;
373 buf += d;
374 r = pg_wait(dev, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR,
375 tmo, "completion");
376 }
377
378 pi_disconnect(dev->pi);
379
380 return r;
381}
382
383static int pg_reset(struct pg *dev)
384{
385 int i, k, err;
386 int expect[5] = { 1, 1, 1, 0x14, 0xeb };
387 int got[5];
388
389 pi_connect(dev->pi);
390 write_reg(dev, 6, DRIVE(dev));
391 write_reg(dev, 7, 8);
392
393 pg_sleep(20 * HZ / 1000);
394
395 k = 0;
396 while ((k++ < PG_RESET_TMO) && (status_reg(dev) & STAT_BUSY))
397 pg_sleep(1);
398
399 for (i = 0; i < 5; i++)
400 got[i] = read_reg(dev, i + 1);
401
402 err = memcmp(expect, got, sizeof(got)) ? -1 : 0;
403
404 if (verbose) {
405 printk("%s: Reset (%d) signature = ", dev->name, k);
406 for (i = 0; i < 5; i++)
407 printk("%3x", got[i]);
408 if (err)
409 printk(" (incorrect)");
410 printk("\n");
411 }
412
413 pi_disconnect(dev->pi);
414 return err;
415}
416
417static void xs(char *buf, char *targ, int len)
418{
419 char l = '\0';
420 int k;
421
422 for (k = 0; k < len; k++) {
423 char c = *buf++;
424 if (c != ' ' || c != l)
425 l = *targ++ = c;
426 }
427 if (l == ' ')
428 targ--;
429 *targ = '\0';
430}
431
432static int pg_identify(struct pg *dev, int log)
433{
434 int s;
435 char *ms[2] = { "master", "slave" };
436 char mf[10], id[18];
437 char id_cmd[12] = { ATAPI_IDENTIFY, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
438 char buf[36];
439
440 s = pg_command(dev, id_cmd, 36, jiffies + PG_TMO);
441 if (s)
442 return -1;
443 s = pg_completion(dev, buf, jiffies + PG_TMO);
444 if (s)
445 return -1;
446
447 if (log) {
448 xs(buf + 8, mf, 8);
449 xs(buf + 16, id, 16);
450 printk("%s: %s %s, %s\n", dev->name, mf, id, ms[dev->drive]);
451 }
452
453 return 0;
454}
455
456/*
457 * returns 0, with id set if drive is detected
458 * -1, if drive detection failed
459 */
460static int pg_probe(struct pg *dev)
461{
462 if (dev->drive == -1) {
463 for (dev->drive = 0; dev->drive <= 1; dev->drive++)
464 if (!pg_reset(dev))
465 return pg_identify(dev, 1);
466 } else {
467 if (!pg_reset(dev))
468 return pg_identify(dev, 1);
469 }
470 return -1;
471}
472
473static int pg_detect(void)
474{
475 struct pg *dev = &devices[0];
476 int k, unit;
477
478 printk("%s: %s version %s, major %d\n", name, name, PG_VERSION, major);
479
480 k = 0;
481 if (pg_drive_count == 0) {
482 if (pi_init(dev->pi, 1, -1, -1, -1, -1, -1, pg_scratch,
483 PI_PG, verbose, dev->name)) {
484 if (!pg_probe(dev)) {
485 dev->present = 1;
486 k++;
487 } else
488 pi_release(dev->pi);
489 }
490
491 } else
492 for (unit = 0; unit < PG_UNITS; unit++, dev++) {
493 int *parm = *drives[unit];
494 if (!parm[D_PRT])
495 continue;
496 if (pi_init(dev->pi, 0, parm[D_PRT], parm[D_MOD],
497 parm[D_UNI], parm[D_PRO], parm[D_DLY],
498 pg_scratch, PI_PG, verbose, dev->name)) {
499 if (!pg_probe(dev)) {
500 dev->present = 1;
501 k++;
502 } else
503 pi_release(dev->pi);
504 }
505 }
506
507 if (k)
508 return 0;
509
510 printk("%s: No ATAPI device detected\n", name);
511 return -1;
512}
513
514static int pg_open(struct inode *inode, struct file *file)
515{
516 int unit = iminor(inode) & 0x7f;
517 struct pg *dev = &devices[unit];
518
519 if ((unit >= PG_UNITS) || (!dev->present))
520 return -ENODEV;
521
522 if (test_and_set_bit(0, &dev->access))
523 return -EBUSY;
524
525 if (dev->busy) {
526 pg_reset(dev);
527 dev->busy = 0;
528 }
529
530 pg_identify(dev, (verbose > 1));
531
532 dev->bufptr = kmalloc(PG_MAX_DATA, GFP_KERNEL);
533 if (dev->bufptr == NULL) {
534 clear_bit(0, &dev->access);
535 printk("%s: buffer allocation failed\n", dev->name);
536 return -ENOMEM;
537 }
538
539 file->private_data = dev;
540
541 return 0;
542}
543
544static int pg_release(struct inode *inode, struct file *file)
545{
546 struct pg *dev = file->private_data;
547
548 kfree(dev->bufptr);
549 dev->bufptr = NULL;
550 clear_bit(0, &dev->access);
551
552 return 0;
553}
554
555static ssize_t pg_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos)
556{
557 struct pg *dev = filp->private_data;
558 struct pg_write_hdr hdr;
559 int hs = sizeof (hdr);
560
561 if (dev->busy)
562 return -EBUSY;
563 if (count < hs)
564 return -EINVAL;
565
566 if (copy_from_user(&hdr, buf, hs))
567 return -EFAULT;
568
569 if (hdr.magic != PG_MAGIC)
570 return -EINVAL;
571 if (hdr.dlen > PG_MAX_DATA)
572 return -EINVAL;
573 if ((count - hs) > PG_MAX_DATA)
574 return -EINVAL;
575
576 if (hdr.func == PG_RESET) {
577 if (count != hs)
578 return -EINVAL;
579 if (pg_reset(dev))
580 return -EIO;
581 return count;
582 }
583
584 if (hdr.func != PG_COMMAND)
585 return -EINVAL;
586
587 dev->start = jiffies;
588 dev->timeout = hdr.timeout * HZ + HZ / 2 + jiffies;
589
590 if (pg_command(dev, hdr.packet, hdr.dlen, jiffies + PG_TMO)) {
591 if (dev->status & 0x10)
592 return -ETIME;
593 return -EIO;
594 }
595
596 dev->busy = 1;
597
598 if (copy_from_user(dev->bufptr, buf + hs, count - hs))
599 return -EFAULT;
600 return count;
601}
602
603static ssize_t pg_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
604{
605 struct pg *dev = filp->private_data;
606 struct pg_read_hdr hdr;
607 int hs = sizeof (hdr);
608 int copy;
609
610 if (!dev->busy)
611 return -EINVAL;
612 if (count < hs)
613 return -EINVAL;
614
615 dev->busy = 0;
616
617 if (pg_completion(dev, dev->bufptr, dev->timeout))
618 if (dev->status & 0x10)
619 return -ETIME;
620
621 hdr.magic = PG_MAGIC;
622 hdr.dlen = dev->dlen;
623 copy = 0;
624
625 if (hdr.dlen < 0) {
626 hdr.dlen = -1 * hdr.dlen;
627 copy = hdr.dlen;
628 if (copy > (count - hs))
629 copy = count - hs;
630 }
631
632 hdr.duration = (jiffies - dev->start + HZ / 2) / HZ;
633 hdr.scsi = dev->status & 0x0f;
634
635 if (copy_to_user(buf, &hdr, hs))
636 return -EFAULT;
637 if (copy > 0)
638 if (copy_to_user(buf + hs, dev->bufptr, copy))
639 return -EFAULT;
640 return copy + hs;
641}
642
643static int __init pg_init(void)
644{
645 int unit, err = 0;
646
647 if (disable){
648 err = -1;
649 goto out;
650 }
651
652 pg_init_units();
653
654 if (pg_detect()) {
655 err = -1;
656 goto out;
657 }
658
659 if (register_chrdev(major, name, &pg_fops)) {
660 printk("pg_init: unable to get major number %d\n", major);
661 for (unit = 0; unit < PG_UNITS; unit++) {
662 struct pg *dev = &devices[unit];
663 if (dev->present)
664 pi_release(dev->pi);
665 }
666 err = -1;
667 goto out;
668 }
669 pg_class = class_simple_create(THIS_MODULE, "pg");
670 if (IS_ERR(pg_class)) {
671 err = PTR_ERR(pg_class);
672 goto out_chrdev;
673 }
674 devfs_mk_dir("pg");
675 for (unit = 0; unit < PG_UNITS; unit++) {
676 struct pg *dev = &devices[unit];
677 if (dev->present) {
678 class_simple_device_add(pg_class, MKDEV(major, unit),
679 NULL, "pg%u", unit);
680 err = devfs_mk_cdev(MKDEV(major, unit),
681 S_IFCHR | S_IRUSR | S_IWUSR, "pg/%u",
682 unit);
683 if (err)
684 goto out_class;
685 }
686 }
687 err = 0;
688 goto out;
689
690out_class:
691 class_simple_device_remove(MKDEV(major, unit));
692 class_simple_destroy(pg_class);
693out_chrdev:
694 unregister_chrdev(major, "pg");
695out:
696 return err;
697}
698
699static void __exit pg_exit(void)
700{
701 int unit;
702
703 for (unit = 0; unit < PG_UNITS; unit++) {
704 struct pg *dev = &devices[unit];
705 if (dev->present) {
706 class_simple_device_remove(MKDEV(major, unit));
707 devfs_remove("pg/%u", unit);
708 }
709 }
710 class_simple_destroy(pg_class);
711 devfs_remove("pg");
712 unregister_chrdev(major, name);
713
714 for (unit = 0; unit < PG_UNITS; unit++) {
715 struct pg *dev = &devices[unit];
716 if (dev->present)
717 pi_release(dev->pi);
718 }
719}
720
721MODULE_LICENSE("GPL");
722module_init(pg_init)
723module_exit(pg_exit)
diff --git a/drivers/block/paride/ppc6lnx.c b/drivers/block/paride/ppc6lnx.c
new file mode 100644
index 000000000000..5e5521d3b1dd
--- /dev/null
+++ b/drivers/block/paride/ppc6lnx.c
@@ -0,0 +1,726 @@
1/*
2 ppc6lnx.c (c) 2001 Micro Solutions Inc.
3 Released under the terms of the GNU General Public license
4
5 ppc6lnx.c is a par of the protocol driver for the Micro Solutions
6 "BACKPACK" parallel port IDE adapter
7 (Works on Series 6 drives)
8
9*/
10
11//***************************************************************************
12
13// PPC 6 Code in C sanitized for LINUX
14// Original x86 ASM by Ron, Converted to C by Clive
15
16//***************************************************************************
17
18
19#define port_stb 1
20#define port_afd 2
21#define cmd_stb port_afd
22#define port_init 4
23#define data_stb port_init
24#define port_sel 8
25#define port_int 16
26#define port_dir 0x20
27
28#define ECR_EPP 0x80
29#define ECR_BI 0x20
30
31//***************************************************************************
32
33// 60772 Commands
34
35#define ACCESS_REG 0x00
36#define ACCESS_PORT 0x40
37
38#define ACCESS_READ 0x00
39#define ACCESS_WRITE 0x20
40
41// 60772 Command Prefix
42
43#define CMD_PREFIX_SET 0xe0 // Special command that modifies the next command's operation
44#define CMD_PREFIX_RESET 0xc0 // Resets current cmd modifier reg bits
45 #define PREFIX_IO16 0x01 // perform 16-bit wide I/O
46 #define PREFIX_FASTWR 0x04 // enable PPC mode fast-write
47 #define PREFIX_BLK 0x08 // enable block transfer mode
48
49// 60772 Registers
50
51#define REG_STATUS 0x00 // status register
52 #define STATUS_IRQA 0x01 // Peripheral IRQA line
53 #define STATUS_EEPROM_DO 0x40 // Serial EEPROM data bit
54#define REG_VERSION 0x01 // PPC version register (read)
55#define REG_HWCFG 0x02 // Hardware Config register
56#define REG_RAMSIZE 0x03 // Size of RAM Buffer
57 #define RAMSIZE_128K 0x02
58#define REG_EEPROM 0x06 // EEPROM control register
59 #define EEPROM_SK 0x01 // eeprom SK bit
60 #define EEPROM_DI 0x02 // eeprom DI bit
61 #define EEPROM_CS 0x04 // eeprom CS bit
62 #define EEPROM_EN 0x08 // eeprom output enable
63#define REG_BLKSIZE 0x08 // Block transfer len (24 bit)
64
65//***************************************************************************
66
67typedef struct ppc_storage {
68 u16 lpt_addr; // LPT base address
69 u8 ppc_id;
70 u8 mode; // operating mode
71 // 0 = PPC Uni SW
72 // 1 = PPC Uni FW
73 // 2 = PPC Bi SW
74 // 3 = PPC Bi FW
75 // 4 = EPP Byte
76 // 5 = EPP Word
77 // 6 = EPP Dword
78 u8 ppc_flags;
79 u8 org_data; // original LPT data port contents
80 u8 org_ctrl; // original LPT control port contents
81 u8 cur_ctrl; // current control port contents
82} Interface;
83
84//***************************************************************************
85
86// ppc_flags
87
88#define fifo_wait 0x10
89
90//***************************************************************************
91
92// DONT CHANGE THESE LEST YOU BREAK EVERYTHING - BIT FIELD DEPENDENCIES
93
94#define PPCMODE_UNI_SW 0
95#define PPCMODE_UNI_FW 1
96#define PPCMODE_BI_SW 2
97#define PPCMODE_BI_FW 3
98#define PPCMODE_EPP_BYTE 4
99#define PPCMODE_EPP_WORD 5
100#define PPCMODE_EPP_DWORD 6
101
102//***************************************************************************
103
104static int ppc6_select(Interface *ppc);
105static void ppc6_deselect(Interface *ppc);
106static void ppc6_send_cmd(Interface *ppc, u8 cmd);
107static void ppc6_wr_data_byte(Interface *ppc, u8 data);
108static u8 ppc6_rd_data_byte(Interface *ppc);
109static u8 ppc6_rd_port(Interface *ppc, u8 port);
110static void ppc6_wr_port(Interface *ppc, u8 port, u8 data);
111static void ppc6_rd_data_blk(Interface *ppc, u8 *data, long count);
112static void ppc6_wait_for_fifo(Interface *ppc);
113static void ppc6_wr_data_blk(Interface *ppc, u8 *data, long count);
114static void ppc6_rd_port16_blk(Interface *ppc, u8 port, u8 *data, long length);
115static void ppc6_wr_port16_blk(Interface *ppc, u8 port, u8 *data, long length);
116static void ppc6_wr_extout(Interface *ppc, u8 regdata);
117static int ppc6_open(Interface *ppc);
118static void ppc6_close(Interface *ppc);
119
120//***************************************************************************
121
122static int ppc6_select(Interface *ppc)
123{
124 u8 i, j, k;
125
126 i = inb(ppc->lpt_addr + 1);
127
128 if (i & 1)
129 outb(i, ppc->lpt_addr + 1);
130
131 ppc->org_data = inb(ppc->lpt_addr);
132
133 ppc->org_ctrl = inb(ppc->lpt_addr + 2) & 0x5F; // readback ctrl
134
135 ppc->cur_ctrl = ppc->org_ctrl;
136
137 ppc->cur_ctrl |= port_sel;
138
139 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
140
141 if (ppc->org_data == 'b')
142 outb('x', ppc->lpt_addr);
143
144 outb('b', ppc->lpt_addr);
145 outb('p', ppc->lpt_addr);
146 outb(ppc->ppc_id, ppc->lpt_addr);
147 outb(~ppc->ppc_id,ppc->lpt_addr);
148
149 ppc->cur_ctrl &= ~port_sel;
150
151 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
152
153 ppc->cur_ctrl = (ppc->cur_ctrl & port_int) | port_init;
154
155 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
156
157 i = ppc->mode & 0x0C;
158
159 if (i == 0)
160 i = (ppc->mode & 2) | 1;
161
162 outb(i, ppc->lpt_addr);
163
164 ppc->cur_ctrl |= port_sel;
165
166 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
167
168 // DELAY
169
170 ppc->cur_ctrl |= port_afd;
171
172 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
173
174 j = ((i & 0x08) << 4) | ((i & 0x07) << 3);
175
176 k = inb(ppc->lpt_addr + 1) & 0xB8;
177
178 if (j == k)
179 {
180 ppc->cur_ctrl &= ~port_afd;
181
182 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
183
184 k = (inb(ppc->lpt_addr + 1) & 0xB8) ^ 0xB8;
185
186 if (j == k)
187 {
188 if (i & 4) // EPP
189 ppc->cur_ctrl &= ~(port_sel | port_init);
190 else // PPC/ECP
191 ppc->cur_ctrl &= ~port_sel;
192
193 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
194
195 return(1);
196 }
197 }
198
199 outb(ppc->org_ctrl, ppc->lpt_addr + 2);
200
201 outb(ppc->org_data, ppc->lpt_addr);
202
203 return(0); // FAIL
204}
205
206//***************************************************************************
207
208static void ppc6_deselect(Interface *ppc)
209{
210 if (ppc->mode & 4) // EPP
211 ppc->cur_ctrl |= port_init;
212 else // PPC/ECP
213 ppc->cur_ctrl |= port_sel;
214
215 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
216
217 outb(ppc->org_data, ppc->lpt_addr);
218
219 outb((ppc->org_ctrl | port_sel), ppc->lpt_addr + 2);
220
221 outb(ppc->org_ctrl, ppc->lpt_addr + 2);
222}
223
224//***************************************************************************
225
226static void ppc6_send_cmd(Interface *ppc, u8 cmd)
227{
228 switch(ppc->mode)
229 {
230 case PPCMODE_UNI_SW :
231 case PPCMODE_UNI_FW :
232 case PPCMODE_BI_SW :
233 case PPCMODE_BI_FW :
234 {
235 outb(cmd, ppc->lpt_addr);
236
237 ppc->cur_ctrl ^= cmd_stb;
238
239 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
240
241 break;
242 }
243
244 case PPCMODE_EPP_BYTE :
245 case PPCMODE_EPP_WORD :
246 case PPCMODE_EPP_DWORD :
247 {
248 outb(cmd, ppc->lpt_addr + 3);
249
250 break;
251 }
252 }
253}
254
255//***************************************************************************
256
257static void ppc6_wr_data_byte(Interface *ppc, u8 data)
258{
259 switch(ppc->mode)
260 {
261 case PPCMODE_UNI_SW :
262 case PPCMODE_UNI_FW :
263 case PPCMODE_BI_SW :
264 case PPCMODE_BI_FW :
265 {
266 outb(data, ppc->lpt_addr);
267
268 ppc->cur_ctrl ^= data_stb;
269
270 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
271
272 break;
273 }
274
275 case PPCMODE_EPP_BYTE :
276 case PPCMODE_EPP_WORD :
277 case PPCMODE_EPP_DWORD :
278 {
279 outb(data, ppc->lpt_addr + 4);
280
281 break;
282 }
283 }
284}
285
286//***************************************************************************
287
288static u8 ppc6_rd_data_byte(Interface *ppc)
289{
290 u8 data = 0;
291
292 switch(ppc->mode)
293 {
294 case PPCMODE_UNI_SW :
295 case PPCMODE_UNI_FW :
296 {
297 ppc->cur_ctrl = (ppc->cur_ctrl & ~port_stb) ^ data_stb;
298
299 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
300
301 // DELAY
302
303 data = inb(ppc->lpt_addr + 1);
304
305 data = ((data & 0x80) >> 1) | ((data & 0x38) >> 3);
306
307 ppc->cur_ctrl |= port_stb;
308
309 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
310
311 // DELAY
312
313 data |= inb(ppc->lpt_addr + 1) & 0xB8;
314
315 break;
316 }
317
318 case PPCMODE_BI_SW :
319 case PPCMODE_BI_FW :
320 {
321 ppc->cur_ctrl |= port_dir;
322
323 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
324
325 ppc->cur_ctrl = (ppc->cur_ctrl | port_stb) ^ data_stb;
326
327 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
328
329 data = inb(ppc->lpt_addr);
330
331 ppc->cur_ctrl &= ~port_stb;
332
333 outb(ppc->cur_ctrl,ppc->lpt_addr + 2);
334
335 ppc->cur_ctrl &= ~port_dir;
336
337 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
338
339 break;
340 }
341
342 case PPCMODE_EPP_BYTE :
343 case PPCMODE_EPP_WORD :
344 case PPCMODE_EPP_DWORD :
345 {
346 outb((ppc->cur_ctrl | port_dir),ppc->lpt_addr + 2);
347
348 data = inb(ppc->lpt_addr + 4);
349
350 outb(ppc->cur_ctrl,ppc->lpt_addr + 2);
351
352 break;
353 }
354 }
355
356 return(data);
357}
358
359//***************************************************************************
360
361static u8 ppc6_rd_port(Interface *ppc, u8 port)
362{
363 ppc6_send_cmd(ppc,(u8)(port | ACCESS_PORT | ACCESS_READ));
364
365 return(ppc6_rd_data_byte(ppc));
366}
367
368//***************************************************************************
369
370static void ppc6_wr_port(Interface *ppc, u8 port, u8 data)
371{
372 ppc6_send_cmd(ppc,(u8)(port | ACCESS_PORT | ACCESS_WRITE));
373
374 ppc6_wr_data_byte(ppc, data);
375}
376
377//***************************************************************************
378
379static void ppc6_rd_data_blk(Interface *ppc, u8 *data, long count)
380{
381 switch(ppc->mode)
382 {
383 case PPCMODE_UNI_SW :
384 case PPCMODE_UNI_FW :
385 {
386 while(count)
387 {
388 u8 d;
389
390 ppc->cur_ctrl = (ppc->cur_ctrl & ~port_stb) ^ data_stb;
391
392 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
393
394 // DELAY
395
396 d = inb(ppc->lpt_addr + 1);
397
398 d = ((d & 0x80) >> 1) | ((d & 0x38) >> 3);
399
400 ppc->cur_ctrl |= port_stb;
401
402 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
403
404 // DELAY
405
406 d |= inb(ppc->lpt_addr + 1) & 0xB8;
407
408 *data++ = d;
409 count--;
410 }
411
412 break;
413 }
414
415 case PPCMODE_BI_SW :
416 case PPCMODE_BI_FW :
417 {
418 ppc->cur_ctrl |= port_dir;
419
420 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
421
422 ppc->cur_ctrl |= port_stb;
423
424 while(count)
425 {
426 ppc->cur_ctrl ^= data_stb;
427
428 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
429
430 *data++ = inb(ppc->lpt_addr);
431 count--;
432 }
433
434 ppc->cur_ctrl &= ~port_stb;
435
436 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
437
438 ppc->cur_ctrl &= ~port_dir;
439
440 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
441
442 break;
443 }
444
445 case PPCMODE_EPP_BYTE :
446 {
447 outb((ppc->cur_ctrl | port_dir), ppc->lpt_addr + 2);
448
449 // DELAY
450
451 while(count)
452 {
453 *data++ = inb(ppc->lpt_addr + 4);
454 count--;
455 }
456
457 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
458
459 break;
460 }
461
462 case PPCMODE_EPP_WORD :
463 {
464 outb((ppc->cur_ctrl | port_dir), ppc->lpt_addr + 2);
465
466 // DELAY
467
468 while(count > 1)
469 {
470 *((u16 *)data) = inw(ppc->lpt_addr + 4);
471 data += 2;
472 count -= 2;
473 }
474
475 while(count)
476 {
477 *data++ = inb(ppc->lpt_addr + 4);
478 count--;
479 }
480
481 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
482
483 break;
484 }
485
486 case PPCMODE_EPP_DWORD :
487 {
488 outb((ppc->cur_ctrl | port_dir),ppc->lpt_addr + 2);
489
490 // DELAY
491
492 while(count > 3)
493 {
494 *((u32 *)data) = inl(ppc->lpt_addr + 4);
495 data += 4;
496 count -= 4;
497 }
498
499 while(count)
500 {
501 *data++ = inb(ppc->lpt_addr + 4);
502 count--;
503 }
504
505 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
506
507 break;
508 }
509 }
510
511}
512
513//***************************************************************************
514
515static void ppc6_wait_for_fifo(Interface *ppc)
516{
517 int i;
518
519 if (ppc->ppc_flags & fifo_wait)
520 {
521 for(i=0; i<20; i++)
522 inb(ppc->lpt_addr + 1);
523 }
524}
525
526//***************************************************************************
527
528static void ppc6_wr_data_blk(Interface *ppc, u8 *data, long count)
529{
530 switch(ppc->mode)
531 {
532 case PPCMODE_UNI_SW :
533 case PPCMODE_BI_SW :
534 {
535 while(count--)
536 {
537 outb(*data++, ppc->lpt_addr);
538
539 ppc->cur_ctrl ^= data_stb;
540
541 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
542 }
543
544 break;
545 }
546
547 case PPCMODE_UNI_FW :
548 case PPCMODE_BI_FW :
549 {
550 u8 this, last;
551
552 ppc6_send_cmd(ppc,(CMD_PREFIX_SET | PREFIX_FASTWR));
553
554 ppc->cur_ctrl |= port_stb;
555
556 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
557
558 last = *data;
559
560 outb(last, ppc->lpt_addr);
561
562 while(count)
563 {
564 this = *data++;
565 count--;
566
567 if (this == last)
568 {
569 ppc->cur_ctrl ^= data_stb;
570
571 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
572 }
573 else
574 {
575 outb(this, ppc->lpt_addr);
576
577 last = this;
578 }
579 }
580
581 ppc->cur_ctrl &= ~port_stb;
582
583 outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
584
585 ppc6_send_cmd(ppc,(CMD_PREFIX_RESET | PREFIX_FASTWR));
586
587 break;
588 }
589
590 case PPCMODE_EPP_BYTE :
591 {
592 while(count)
593 {
594 outb(*data++,ppc->lpt_addr + 4);
595 count--;
596 }
597
598 ppc6_wait_for_fifo(ppc);
599
600 break;
601 }
602
603 case PPCMODE_EPP_WORD :
604 {
605 while(count > 1)
606 {
607 outw(*((u16 *)data),ppc->lpt_addr + 4);
608 data += 2;
609 count -= 2;
610 }
611
612 while(count)
613 {
614 outb(*data++,ppc->lpt_addr + 4);
615 count--;
616 }
617
618 ppc6_wait_for_fifo(ppc);
619
620 break;
621 }
622
623 case PPCMODE_EPP_DWORD :
624 {
625 while(count > 3)
626 {
627 outl(*((u32 *)data),ppc->lpt_addr + 4);
628 data += 4;
629 count -= 4;
630 }
631
632 while(count)
633 {
634 outb(*data++,ppc->lpt_addr + 4);
635 count--;
636 }
637
638 ppc6_wait_for_fifo(ppc);
639
640 break;
641 }
642 }
643}
644
645//***************************************************************************
646
647static void ppc6_rd_port16_blk(Interface *ppc, u8 port, u8 *data, long length)
648{
649 length = length << 1;
650
651 ppc6_send_cmd(ppc, (REG_BLKSIZE | ACCESS_REG | ACCESS_WRITE));
652 ppc6_wr_data_byte(ppc,(u8)length);
653 ppc6_wr_data_byte(ppc,(u8)(length >> 8));
654 ppc6_wr_data_byte(ppc,0);
655
656 ppc6_send_cmd(ppc, (CMD_PREFIX_SET | PREFIX_IO16 | PREFIX_BLK));
657
658 ppc6_send_cmd(ppc, (u8)(port | ACCESS_PORT | ACCESS_READ));
659
660 ppc6_rd_data_blk(ppc, data, length);
661
662 ppc6_send_cmd(ppc, (CMD_PREFIX_RESET | PREFIX_IO16 | PREFIX_BLK));
663}
664
665//***************************************************************************
666
667static void ppc6_wr_port16_blk(Interface *ppc, u8 port, u8 *data, long length)
668{
669 length = length << 1;
670
671 ppc6_send_cmd(ppc, (REG_BLKSIZE | ACCESS_REG | ACCESS_WRITE));
672 ppc6_wr_data_byte(ppc,(u8)length);
673 ppc6_wr_data_byte(ppc,(u8)(length >> 8));
674 ppc6_wr_data_byte(ppc,0);
675
676 ppc6_send_cmd(ppc, (CMD_PREFIX_SET | PREFIX_IO16 | PREFIX_BLK));
677
678 ppc6_send_cmd(ppc, (u8)(port | ACCESS_PORT | ACCESS_WRITE));
679
680 ppc6_wr_data_blk(ppc, data, length);
681
682 ppc6_send_cmd(ppc, (CMD_PREFIX_RESET | PREFIX_IO16 | PREFIX_BLK));
683}
684
685//***************************************************************************
686
687static void ppc6_wr_extout(Interface *ppc, u8 regdata)
688{
689 ppc6_send_cmd(ppc,(REG_VERSION | ACCESS_REG | ACCESS_WRITE));
690
691 ppc6_wr_data_byte(ppc, (u8)((regdata & 0x03) << 6));
692}
693
694//***************************************************************************
695
696static int ppc6_open(Interface *ppc)
697{
698 int ret;
699
700 ret = ppc6_select(ppc);
701
702 if (ret == 0)
703 return(ret);
704
705 ppc->ppc_flags &= ~fifo_wait;
706
707 ppc6_send_cmd(ppc, (ACCESS_REG | ACCESS_WRITE | REG_RAMSIZE));
708 ppc6_wr_data_byte(ppc, RAMSIZE_128K);
709
710 ppc6_send_cmd(ppc, (ACCESS_REG | ACCESS_READ | REG_VERSION));
711
712 if ((ppc6_rd_data_byte(ppc) & 0x3F) == 0x0C)
713 ppc->ppc_flags |= fifo_wait;
714
715 return(ret);
716}
717
718//***************************************************************************
719
720static void ppc6_close(Interface *ppc)
721{
722 ppc6_deselect(ppc);
723}
724
725//***************************************************************************
726
diff --git a/drivers/block/paride/pseudo.h b/drivers/block/paride/pseudo.h
new file mode 100644
index 000000000000..932342d7a8eb
--- /dev/null
+++ b/drivers/block/paride/pseudo.h
@@ -0,0 +1,102 @@
1/*
2 pseudo.h (c) 1997-8 Grant R. Guenther <grant@torque.net>
3 Under the terms of the GNU General Public License.
4
5 This is the "pseudo-interrupt" logic for parallel port drivers.
6
7 This module is #included into each driver. It makes one
8 function available:
9
10 ps_set_intr( void (*continuation)(void),
11 int (*ready)(void),
12 int timeout,
13 int nice )
14
15 Which will arrange for ready() to be evaluated frequently and
16 when either it returns true, or timeout jiffies have passed,
17 continuation() will be invoked.
18
19 If nice is 1, the test will done approximately once a
20 jiffy. If nice is 0, the test will also be done whenever
21 the scheduler runs (by adding it to a task queue). If
22 nice is greater than 1, the test will be done once every
23 (nice-1) jiffies.
24
25*/
26
27/* Changes:
28
29 1.01 1998.05.03 Switched from cli()/sti() to spinlocks
30 1.02 1998.12.14 Added support for nice > 1
31*/
32
33#define PS_VERSION "1.02"
34
35#include <linux/sched.h>
36#include <linux/workqueue.h>
37
38static void ps_tq_int( void *data);
39
40static void (* ps_continuation)(void);
41static int (* ps_ready)(void);
42static unsigned long ps_timeout;
43static int ps_tq_active = 0;
44static int ps_nice = 0;
45
46static DEFINE_SPINLOCK(ps_spinlock __attribute__((unused)));
47
48static DECLARE_WORK(ps_tq, ps_tq_int, NULL);
49
50static void ps_set_intr(void (*continuation)(void),
51 int (*ready)(void),
52 int timeout, int nice)
53{
54 unsigned long flags;
55
56 spin_lock_irqsave(&ps_spinlock,flags);
57
58 ps_continuation = continuation;
59 ps_ready = ready;
60 ps_timeout = jiffies + timeout;
61 ps_nice = nice;
62
63 if (!ps_tq_active) {
64 ps_tq_active = 1;
65 if (!ps_nice)
66 schedule_work(&ps_tq);
67 else
68 schedule_delayed_work(&ps_tq, ps_nice-1);
69 }
70 spin_unlock_irqrestore(&ps_spinlock,flags);
71}
72
73static void ps_tq_int(void *data)
74{
75 void (*con)(void);
76 unsigned long flags;
77
78 spin_lock_irqsave(&ps_spinlock,flags);
79
80 con = ps_continuation;
81 ps_tq_active = 0;
82
83 if (!con) {
84 spin_unlock_irqrestore(&ps_spinlock,flags);
85 return;
86 }
87 if (!ps_ready || ps_ready() || time_after_eq(jiffies, ps_timeout)) {
88 ps_continuation = NULL;
89 spin_unlock_irqrestore(&ps_spinlock,flags);
90 con();
91 return;
92 }
93 ps_tq_active = 1;
94 if (!ps_nice)
95 schedule_work(&ps_tq);
96 else
97 schedule_delayed_work(&ps_tq, ps_nice-1);
98 spin_unlock_irqrestore(&ps_spinlock,flags);
99}
100
101/* end of pseudo.h */
102
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
new file mode 100644
index 000000000000..8fbd6922fe0d
--- /dev/null
+++ b/drivers/block/paride/pt.c
@@ -0,0 +1,1024 @@
1/*
2 pt.c (c) 1998 Grant R. Guenther <grant@torque.net>
3 Under the terms of the GNU General Public License.
4
5 This is the high-level driver for parallel port ATAPI tape
6 drives based on chips supported by the paride module.
7
8 The driver implements both rewinding and non-rewinding
9 devices, filemarks, and the rewind ioctl. It allocates
10 a small internal "bounce buffer" for each open device, but
11 otherwise expects buffering and blocking to be done at the
12 user level. As with most block-structured tapes, short
13 writes are padded to full tape blocks, so reading back a file
14 may return more data than was actually written.
15
16 By default, the driver will autoprobe for a single parallel
17 port ATAPI tape drive, but if their individual parameters are
18 specified, the driver can handle up to 4 drives.
19
20 The rewinding devices are named /dev/pt0, /dev/pt1, ...
21 while the non-rewinding devices are /dev/npt0, /dev/npt1, etc.
22
23 The behaviour of the pt driver can be altered by setting
24 some parameters from the insmod command line. The following
25 parameters are adjustable:
26
27 drive0 These four arguments can be arrays of
28 drive1 1-6 integers as follows:
29 drive2
30 drive3 <prt>,<pro>,<uni>,<mod>,<slv>,<dly>
31
32 Where,
33
34 <prt> is the base of the parallel port address for
35 the corresponding drive. (required)
36
37 <pro> is the protocol number for the adapter that
38 supports this drive. These numbers are
39 logged by 'paride' when the protocol modules
40 are initialised. (0 if not given)
41
42 <uni> for those adapters that support chained
43 devices, this is the unit selector for the
44 chain of devices on the given port. It should
45 be zero for devices that don't support chaining.
46 (0 if not given)
47
48 <mod> this can be -1 to choose the best mode, or one
49 of the mode numbers supported by the adapter.
50 (-1 if not given)
51
52 <slv> ATAPI devices can be jumpered to master or slave.
53 Set this to 0 to choose the master drive, 1 to
54 choose the slave, -1 (the default) to choose the
55 first drive found.
56
57 <dly> some parallel ports require the driver to
58 go more slowly. -1 sets a default value that
59 should work with the chosen protocol. Otherwise,
60 set this to a small integer, the larger it is
61 the slower the port i/o. In some cases, setting
62 this to zero will speed up the device. (default -1)
63
64 major You may use this parameter to overide the
65 default major number (96) that this driver
66 will use. Be sure to change the device
67 name as well.
68
69 name This parameter is a character string that
70 contains the name the kernel will use for this
71 device (in /proc output, for instance).
72 (default "pt").
73
74 verbose This parameter controls the amount of logging
75 that the driver will do. Set it to 0 for
76 normal operation, 1 to see autoprobe progress
77 messages, or 2 to see additional debugging
78 output. (default 0)
79
80 If this driver is built into the kernel, you can use
81 the following command line parameters, with the same values
82 as the corresponding module parameters listed above:
83
84 pt.drive0
85 pt.drive1
86 pt.drive2
87 pt.drive3
88
89 In addition, you can use the parameter pt.disable to disable
90 the driver entirely.
91
92*/
93
94/* Changes:
95
96 1.01 GRG 1998.05.06 Round up transfer size, fix ready_wait,
97 loosed interpretation of ATAPI standard
98 for clearing error status.
99 Eliminate sti();
100 1.02 GRG 1998.06.16 Eliminate an Ugh.
101 1.03 GRG 1998.08.15 Adjusted PT_TMO, use HZ in loop timing,
102 extra debugging
103 1.04 GRG 1998.09.24 Repair minor coding error, added jumbo support
104
105*/
106
107#define PT_VERSION "1.04"
108#define PT_MAJOR 96
109#define PT_NAME "pt"
110#define PT_UNITS 4
111
112/* Here are things one can override from the insmod command.
113 Most are autoprobed by paride unless set here. Verbose is on
114 by default.
115
116*/
117
118static int verbose = 0;
119static int major = PT_MAJOR;
120static char *name = PT_NAME;
121static int disable = 0;
122
123static int drive0[6] = { 0, 0, 0, -1, -1, -1 };
124static int drive1[6] = { 0, 0, 0, -1, -1, -1 };
125static int drive2[6] = { 0, 0, 0, -1, -1, -1 };
126static int drive3[6] = { 0, 0, 0, -1, -1, -1 };
127
128static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
129
130#define D_PRT 0
131#define D_PRO 1
132#define D_UNI 2
133#define D_MOD 3
134#define D_SLV 4
135#define D_DLY 5
136
137#define DU (*drives[unit])
138
139/* end of parameters */
140
141#include <linux/module.h>
142#include <linux/init.h>
143#include <linux/fs.h>
144#include <linux/devfs_fs_kernel.h>
145#include <linux/delay.h>
146#include <linux/slab.h>
147#include <linux/mtio.h>
148#include <linux/device.h>
149
150#include <asm/uaccess.h>
151
152module_param(verbose, bool, 0);
153module_param(major, int, 0);
154module_param(name, charp, 0);
155module_param_array(drive0, int, NULL, 0);
156module_param_array(drive1, int, NULL, 0);
157module_param_array(drive2, int, NULL, 0);
158module_param_array(drive3, int, NULL, 0);
159
160#include "paride.h"
161
162#define PT_MAX_RETRIES 5
163#define PT_TMO 3000 /* interrupt timeout in jiffies */
164#define PT_SPIN_DEL 50 /* spin delay in micro-seconds */
165#define PT_RESET_TMO 30 /* 30 seconds */
166#define PT_READY_TMO 60 /* 60 seconds */
167#define PT_REWIND_TMO 1200 /* 20 minutes */
168
169#define PT_SPIN ((1000000/(HZ*PT_SPIN_DEL))*PT_TMO)
170
171#define STAT_ERR 0x00001
172#define STAT_INDEX 0x00002
173#define STAT_ECC 0x00004
174#define STAT_DRQ 0x00008
175#define STAT_SEEK 0x00010
176#define STAT_WRERR 0x00020
177#define STAT_READY 0x00040
178#define STAT_BUSY 0x00080
179#define STAT_SENSE 0x1f000
180
181#define ATAPI_TEST_READY 0x00
182#define ATAPI_REWIND 0x01
183#define ATAPI_REQ_SENSE 0x03
184#define ATAPI_READ_6 0x08
185#define ATAPI_WRITE_6 0x0a
186#define ATAPI_WFM 0x10
187#define ATAPI_IDENTIFY 0x12
188#define ATAPI_MODE_SENSE 0x1a
189#define ATAPI_LOG_SENSE 0x4d
190
191static int pt_open(struct inode *inode, struct file *file);
192static int pt_ioctl(struct inode *inode, struct file *file,
193 unsigned int cmd, unsigned long arg);
194static int pt_release(struct inode *inode, struct file *file);
195static ssize_t pt_read(struct file *filp, char __user *buf,
196 size_t count, loff_t * ppos);
197static ssize_t pt_write(struct file *filp, const char __user *buf,
198 size_t count, loff_t * ppos);
199static int pt_detect(void);
200
201/* bits in tape->flags */
202
203#define PT_MEDIA 1
204#define PT_WRITE_OK 2
205#define PT_REWIND 4
206#define PT_WRITING 8
207#define PT_READING 16
208#define PT_EOF 32
209
210#define PT_NAMELEN 8
211#define PT_BUFSIZE 16384
212
213struct pt_unit {
214 struct pi_adapter pia; /* interface to paride layer */
215 struct pi_adapter *pi;
216 int flags; /* various state flags */
217 int last_sense; /* result of last request sense */
218 int drive; /* drive */
219 atomic_t available; /* 1 if access is available 0 otherwise */
220 int bs; /* block size */
221 int capacity; /* Size of tape in KB */
222 int present; /* device present ? */
223 char *bufptr;
224 char name[PT_NAMELEN]; /* pf0, pf1, ... */
225};
226
227static int pt_identify(struct pt_unit *tape);
228
229static struct pt_unit pt[PT_UNITS];
230
231static char pt_scratch[512]; /* scratch block buffer */
232
233/* kernel glue structures */
234
235static struct file_operations pt_fops = {
236 .owner = THIS_MODULE,
237 .read = pt_read,
238 .write = pt_write,
239 .ioctl = pt_ioctl,
240 .open = pt_open,
241 .release = pt_release,
242};
243
244/* sysfs class support */
245static struct class_simple *pt_class;
246
247static inline int status_reg(struct pi_adapter *pi)
248{
249 return pi_read_regr(pi, 1, 6);
250}
251
252static inline int read_reg(struct pi_adapter *pi, int reg)
253{
254 return pi_read_regr(pi, 0, reg);
255}
256
257static inline void write_reg(struct pi_adapter *pi, int reg, int val)
258{
259 pi_write_regr(pi, 0, reg, val);
260}
261
262static inline u8 DRIVE(struct pt_unit *tape)
263{
264 return 0xa0+0x10*tape->drive;
265}
266
267static int pt_wait(struct pt_unit *tape, int go, int stop, char *fun, char *msg)
268{
269 int j, r, e, s, p;
270 struct pi_adapter *pi = tape->pi;
271
272 j = 0;
273 while ((((r = status_reg(pi)) & go) || (stop && (!(r & stop))))
274 && (j++ < PT_SPIN))
275 udelay(PT_SPIN_DEL);
276
277 if ((r & (STAT_ERR & stop)) || (j >= PT_SPIN)) {
278 s = read_reg(pi, 7);
279 e = read_reg(pi, 1);
280 p = read_reg(pi, 2);
281 if (j >= PT_SPIN)
282 e |= 0x100;
283 if (fun)
284 printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x"
285 " loop=%d phase=%d\n",
286 tape->name, fun, msg, r, s, e, j, p);
287 return (e << 8) + s;
288 }
289 return 0;
290}
291
292static int pt_command(struct pt_unit *tape, char *cmd, int dlen, char *fun)
293{
294 struct pi_adapter *pi = tape->pi;
295 pi_connect(pi);
296
297 write_reg(pi, 6, DRIVE(tape));
298
299 if (pt_wait(tape, STAT_BUSY | STAT_DRQ, 0, fun, "before command")) {
300 pi_disconnect(pi);
301 return -1;
302 }
303
304 write_reg(pi, 4, dlen % 256);
305 write_reg(pi, 5, dlen / 256);
306 write_reg(pi, 7, 0xa0); /* ATAPI packet command */
307
308 if (pt_wait(tape, STAT_BUSY, STAT_DRQ, fun, "command DRQ")) {
309 pi_disconnect(pi);
310 return -1;
311 }
312
313 if (read_reg(pi, 2) != 1) {
314 printk("%s: %s: command phase error\n", tape->name, fun);
315 pi_disconnect(pi);
316 return -1;
317 }
318
319 pi_write_block(pi, cmd, 12);
320
321 return 0;
322}
323
324static int pt_completion(struct pt_unit *tape, char *buf, char *fun)
325{
326 struct pi_adapter *pi = tape->pi;
327 int r, s, n, p;
328
329 r = pt_wait(tape, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR,
330 fun, "completion");
331
332 if (read_reg(pi, 7) & STAT_DRQ) {
333 n = (((read_reg(pi, 4) + 256 * read_reg(pi, 5)) +
334 3) & 0xfffc);
335 p = read_reg(pi, 2) & 3;
336 if (p == 0)
337 pi_write_block(pi, buf, n);
338 if (p == 2)
339 pi_read_block(pi, buf, n);
340 }
341
342 s = pt_wait(tape, STAT_BUSY, STAT_READY | STAT_ERR, fun, "data done");
343
344 pi_disconnect(pi);
345
346 return (r ? r : s);
347}
348
349static void pt_req_sense(struct pt_unit *tape, int quiet)
350{
351 char rs_cmd[12] = { ATAPI_REQ_SENSE, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 };
352 char buf[16];
353 int r;
354
355 r = pt_command(tape, rs_cmd, 16, "Request sense");
356 mdelay(1);
357 if (!r)
358 pt_completion(tape, buf, "Request sense");
359
360 tape->last_sense = -1;
361 if (!r) {
362 if (!quiet)
363 printk("%s: Sense key: %x, ASC: %x, ASQ: %x\n",
364 tape->name, buf[2] & 0xf, buf[12], buf[13]);
365 tape->last_sense = (buf[2] & 0xf) | ((buf[12] & 0xff) << 8)
366 | ((buf[13] & 0xff) << 16);
367 }
368}
369
370static int pt_atapi(struct pt_unit *tape, char *cmd, int dlen, char *buf, char *fun)
371{
372 int r;
373
374 r = pt_command(tape, cmd, dlen, fun);
375 mdelay(1);
376 if (!r)
377 r = pt_completion(tape, buf, fun);
378 if (r)
379 pt_req_sense(tape, !fun);
380
381 return r;
382}
383
384static void pt_sleep(int cs)
385{
386 current->state = TASK_INTERRUPTIBLE;
387 schedule_timeout(cs);
388}
389
390static int pt_poll_dsc(struct pt_unit *tape, int pause, int tmo, char *msg)
391{
392 struct pi_adapter *pi = tape->pi;
393 int k, e, s;
394
395 k = 0;
396 e = 0;
397 s = 0;
398 while (k < tmo) {
399 pt_sleep(pause);
400 k++;
401 pi_connect(pi);
402 write_reg(pi, 6, DRIVE(tape));
403 s = read_reg(pi, 7);
404 e = read_reg(pi, 1);
405 pi_disconnect(pi);
406 if (s & (STAT_ERR | STAT_SEEK))
407 break;
408 }
409 if ((k >= tmo) || (s & STAT_ERR)) {
410 if (k >= tmo)
411 printk("%s: %s DSC timeout\n", tape->name, msg);
412 else
413 printk("%s: %s stat=0x%x err=0x%x\n", tape->name, msg, s,
414 e);
415 pt_req_sense(tape, 0);
416 return 0;
417 }
418 return 1;
419}
420
421static void pt_media_access_cmd(struct pt_unit *tape, int tmo, char *cmd, char *fun)
422{
423 if (pt_command(tape, cmd, 0, fun)) {
424 pt_req_sense(tape, 0);
425 return;
426 }
427 pi_disconnect(tape->pi);
428 pt_poll_dsc(tape, HZ, tmo, fun);
429}
430
431static void pt_rewind(struct pt_unit *tape)
432{
433 char rw_cmd[12] = { ATAPI_REWIND, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
434
435 pt_media_access_cmd(tape, PT_REWIND_TMO, rw_cmd, "rewind");
436}
437
438static void pt_write_fm(struct pt_unit *tape)
439{
440 char wm_cmd[12] = { ATAPI_WFM, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 };
441
442 pt_media_access_cmd(tape, PT_TMO, wm_cmd, "write filemark");
443}
444
445#define DBMSG(msg) ((verbose>1)?(msg):NULL)
446
447static int pt_reset(struct pt_unit *tape)
448{
449 struct pi_adapter *pi = tape->pi;
450 int i, k, flg;
451 int expect[5] = { 1, 1, 1, 0x14, 0xeb };
452
453 pi_connect(pi);
454 write_reg(pi, 6, DRIVE(tape));
455 write_reg(pi, 7, 8);
456
457 pt_sleep(20 * HZ / 1000);
458
459 k = 0;
460 while ((k++ < PT_RESET_TMO) && (status_reg(pi) & STAT_BUSY))
461 pt_sleep(HZ / 10);
462
463 flg = 1;
464 for (i = 0; i < 5; i++)
465 flg &= (read_reg(pi, i + 1) == expect[i]);
466
467 if (verbose) {
468 printk("%s: Reset (%d) signature = ", tape->name, k);
469 for (i = 0; i < 5; i++)
470 printk("%3x", read_reg(pi, i + 1));
471 if (!flg)
472 printk(" (incorrect)");
473 printk("\n");
474 }
475
476 pi_disconnect(pi);
477 return flg - 1;
478}
479
480static int pt_ready_wait(struct pt_unit *tape, int tmo)
481{
482 char tr_cmd[12] = { ATAPI_TEST_READY, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
483 int k, p;
484
485 k = 0;
486 while (k < tmo) {
487 tape->last_sense = 0;
488 pt_atapi(tape, tr_cmd, 0, NULL, DBMSG("test unit ready"));
489 p = tape->last_sense;
490 if (!p)
491 return 0;
492 if (!(((p & 0xffff) == 0x0402) || ((p & 0xff) == 6)))
493 return p;
494 k++;
495 pt_sleep(HZ);
496 }
497 return 0x000020; /* timeout */
498}
499
500static void xs(char *buf, char *targ, int offs, int len)
501{
502 int j, k, l;
503
504 j = 0;
505 l = 0;
506 for (k = 0; k < len; k++)
507 if ((buf[k + offs] != 0x20) || (buf[k + offs] != l))
508 l = targ[j++] = buf[k + offs];
509 if (l == 0x20)
510 j--;
511 targ[j] = 0;
512}
513
514static int xn(char *buf, int offs, int size)
515{
516 int v, k;
517
518 v = 0;
519 for (k = 0; k < size; k++)
520 v = v * 256 + (buf[k + offs] & 0xff);
521 return v;
522}
523
524static int pt_identify(struct pt_unit *tape)
525{
526 int dt, s;
527 char *ms[2] = { "master", "slave" };
528 char mf[10], id[18];
529 char id_cmd[12] = { ATAPI_IDENTIFY, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
530 char ms_cmd[12] =
531 { ATAPI_MODE_SENSE, 0, 0x2a, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
532 char ls_cmd[12] =
533 { ATAPI_LOG_SENSE, 0, 0x71, 0, 0, 0, 0, 0, 36, 0, 0, 0 };
534 char buf[36];
535
536 s = pt_atapi(tape, id_cmd, 36, buf, "identify");
537 if (s)
538 return -1;
539
540 dt = buf[0] & 0x1f;
541 if (dt != 1) {
542 if (verbose)
543 printk("%s: Drive %d, unsupported type %d\n",
544 tape->name, tape->drive, dt);
545 return -1;
546 }
547
548 xs(buf, mf, 8, 8);
549 xs(buf, id, 16, 16);
550
551 tape->flags = 0;
552 tape->capacity = 0;
553 tape->bs = 0;
554
555 if (!pt_ready_wait(tape, PT_READY_TMO))
556 tape->flags |= PT_MEDIA;
557
558 if (!pt_atapi(tape, ms_cmd, 36, buf, "mode sense")) {
559 if (!(buf[2] & 0x80))
560 tape->flags |= PT_WRITE_OK;
561 tape->bs = xn(buf, 10, 2);
562 }
563
564 if (!pt_atapi(tape, ls_cmd, 36, buf, "log sense"))
565 tape->capacity = xn(buf, 24, 4);
566
567 printk("%s: %s %s, %s", tape->name, mf, id, ms[tape->drive]);
568 if (!(tape->flags & PT_MEDIA))
569 printk(", no media\n");
570 else {
571 if (!(tape->flags & PT_WRITE_OK))
572 printk(", RO");
573 printk(", blocksize %d, %d MB\n", tape->bs, tape->capacity / 1024);
574 }
575
576 return 0;
577}
578
579
580/*
581 * returns 0, with id set if drive is detected
582 * -1, if drive detection failed
583 */
584static int pt_probe(struct pt_unit *tape)
585{
586 if (tape->drive == -1) {
587 for (tape->drive = 0; tape->drive <= 1; tape->drive++)
588 if (!pt_reset(tape))
589 return pt_identify(tape);
590 } else {
591 if (!pt_reset(tape))
592 return pt_identify(tape);
593 }
594 return -1;
595}
596
597static int pt_detect(void)
598{
599 struct pt_unit *tape;
600 int specified = 0, found = 0;
601 int unit;
602
603 printk("%s: %s version %s, major %d\n", name, name, PT_VERSION, major);
604
605 specified = 0;
606 for (unit = 0; unit < PT_UNITS; unit++) {
607 struct pt_unit *tape = &pt[unit];
608 tape->pi = &tape->pia;
609 atomic_set(&tape->available, 1);
610 tape->flags = 0;
611 tape->last_sense = 0;
612 tape->present = 0;
613 tape->bufptr = NULL;
614 tape->drive = DU[D_SLV];
615 snprintf(tape->name, PT_NAMELEN, "%s%d", name, unit);
616 if (!DU[D_PRT])
617 continue;
618 specified++;
619 if (pi_init(tape->pi, 0, DU[D_PRT], DU[D_MOD], DU[D_UNI],
620 DU[D_PRO], DU[D_DLY], pt_scratch, PI_PT,
621 verbose, tape->name)) {
622 if (!pt_probe(tape)) {
623 tape->present = 1;
624 found++;
625 } else
626 pi_release(tape->pi);
627 }
628 }
629 if (specified == 0) {
630 tape = pt;
631 if (pi_init(tape->pi, 1, -1, -1, -1, -1, -1, pt_scratch,
632 PI_PT, verbose, tape->name)) {
633 if (!pt_probe(tape)) {
634 tape->present = 1;
635 found++;
636 } else
637 pi_release(tape->pi);
638 }
639
640 }
641 if (found)
642 return 0;
643
644 printk("%s: No ATAPI tape drive detected\n", name);
645 return -1;
646}
647
648static int pt_open(struct inode *inode, struct file *file)
649{
650 int unit = iminor(inode) & 0x7F;
651 struct pt_unit *tape = pt + unit;
652 int err;
653
654 if (unit >= PT_UNITS || (!tape->present))
655 return -ENODEV;
656
657 err = -EBUSY;
658 if (!atomic_dec_and_test(&tape->available))
659 goto out;
660
661 pt_identify(tape);
662
663 err = -ENODEV;
664 if (!tape->flags & PT_MEDIA)
665 goto out;
666
667 err = -EROFS;
668 if ((!tape->flags & PT_WRITE_OK) && (file->f_mode & 2))
669 goto out;
670
671 if (!(iminor(inode) & 128))
672 tape->flags |= PT_REWIND;
673
674 err = -ENOMEM;
675 tape->bufptr = kmalloc(PT_BUFSIZE, GFP_KERNEL);
676 if (tape->bufptr == NULL) {
677 printk("%s: buffer allocation failed\n", tape->name);
678 goto out;
679 }
680
681 file->private_data = tape;
682 return 0;
683
684out:
685 atomic_inc(&tape->available);
686 return err;
687}
688
689static int pt_ioctl(struct inode *inode, struct file *file,
690 unsigned int cmd, unsigned long arg)
691{
692 struct pt_unit *tape = file->private_data;
693 struct mtop __user *p = (void __user *)arg;
694 struct mtop mtop;
695
696 switch (cmd) {
697 case MTIOCTOP:
698 if (copy_from_user(&mtop, p, sizeof(struct mtop)))
699 return -EFAULT;
700
701 switch (mtop.mt_op) {
702
703 case MTREW:
704 pt_rewind(tape);
705 return 0;
706
707 case MTWEOF:
708 pt_write_fm(tape);
709 return 0;
710
711 default:
712 printk("%s: Unimplemented mt_op %d\n", tape->name,
713 mtop.mt_op);
714 return -EINVAL;
715 }
716
717 default:
718 printk("%s: Unimplemented ioctl 0x%x\n", tape->name, cmd);
719 return -EINVAL;
720
721 }
722}
723
724static int
725pt_release(struct inode *inode, struct file *file)
726{
727 struct pt_unit *tape = file->private_data;
728
729 if (atomic_read(&tape->available) > 1)
730 return -EINVAL;
731
732 if (tape->flags & PT_WRITING)
733 pt_write_fm(tape);
734
735 if (tape->flags & PT_REWIND)
736 pt_rewind(tape);
737
738 kfree(tape->bufptr);
739 tape->bufptr = NULL;
740
741 atomic_inc(&tape->available);
742
743 return 0;
744
745}
746
747static ssize_t pt_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
748{
749 struct pt_unit *tape = filp->private_data;
750 struct pi_adapter *pi = tape->pi;
751 char rd_cmd[12] = { ATAPI_READ_6, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
752 int k, n, r, p, s, t, b;
753
754 if (!(tape->flags & (PT_READING | PT_WRITING))) {
755 tape->flags |= PT_READING;
756 if (pt_atapi(tape, rd_cmd, 0, NULL, "start read-ahead"))
757 return -EIO;
758 } else if (tape->flags & PT_WRITING)
759 return -EIO;
760
761 if (tape->flags & PT_EOF)
762 return 0;
763
764 t = 0;
765
766 while (count > 0) {
767
768 if (!pt_poll_dsc(tape, HZ / 100, PT_TMO, "read"))
769 return -EIO;
770
771 n = count;
772 if (n > 32768)
773 n = 32768; /* max per command */
774 b = (n - 1 + tape->bs) / tape->bs;
775 n = b * tape->bs; /* rounded up to even block */
776
777 rd_cmd[4] = b;
778
779 r = pt_command(tape, rd_cmd, n, "read");
780
781 mdelay(1);
782
783 if (r) {
784 pt_req_sense(tape, 0);
785 return -EIO;
786 }
787
788 while (1) {
789
790 r = pt_wait(tape, STAT_BUSY,
791 STAT_DRQ | STAT_ERR | STAT_READY,
792 DBMSG("read DRQ"), "");
793
794 if (r & STAT_SENSE) {
795 pi_disconnect(pi);
796 pt_req_sense(tape, 0);
797 return -EIO;
798 }
799
800 if (r)
801 tape->flags |= PT_EOF;
802
803 s = read_reg(pi, 7);
804
805 if (!(s & STAT_DRQ))
806 break;
807
808 n = (read_reg(pi, 4) + 256 * read_reg(pi, 5));
809 p = (read_reg(pi, 2) & 3);
810 if (p != 2) {
811 pi_disconnect(pi);
812 printk("%s: Phase error on read: %d\n", tape->name,
813 p);
814 return -EIO;
815 }
816
817 while (n > 0) {
818 k = n;
819 if (k > PT_BUFSIZE)
820 k = PT_BUFSIZE;
821 pi_read_block(pi, tape->bufptr, k);
822 n -= k;
823 b = k;
824 if (b > count)
825 b = count;
826 if (copy_to_user(buf + t, tape->bufptr, b)) {
827 pi_disconnect(pi);
828 return -EFAULT;
829 }
830 t += b;
831 count -= b;
832 }
833
834 }
835 pi_disconnect(pi);
836 if (tape->flags & PT_EOF)
837 break;
838 }
839
840 return t;
841
842}
843
844static ssize_t pt_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
845{
846 struct pt_unit *tape = filp->private_data;
847 struct pi_adapter *pi = tape->pi;
848 char wr_cmd[12] = { ATAPI_WRITE_6, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
849 int k, n, r, p, s, t, b;
850
851 if (!(tape->flags & PT_WRITE_OK))
852 return -EROFS;
853
854 if (!(tape->flags & (PT_READING | PT_WRITING))) {
855 tape->flags |= PT_WRITING;
856 if (pt_atapi
857 (tape, wr_cmd, 0, NULL, "start buffer-available mode"))
858 return -EIO;
859 } else if (tape->flags & PT_READING)
860 return -EIO;
861
862 if (tape->flags & PT_EOF)
863 return -ENOSPC;
864
865 t = 0;
866
867 while (count > 0) {
868
869 if (!pt_poll_dsc(tape, HZ / 100, PT_TMO, "write"))
870 return -EIO;
871
872 n = count;
873 if (n > 32768)
874 n = 32768; /* max per command */
875 b = (n - 1 + tape->bs) / tape->bs;
876 n = b * tape->bs; /* rounded up to even block */
877
878 wr_cmd[4] = b;
879
880 r = pt_command(tape, wr_cmd, n, "write");
881
882 mdelay(1);
883
884 if (r) { /* error delivering command only */
885 pt_req_sense(tape, 0);
886 return -EIO;
887 }
888
889 while (1) {
890
891 r = pt_wait(tape, STAT_BUSY,
892 STAT_DRQ | STAT_ERR | STAT_READY,
893 DBMSG("write DRQ"), NULL);
894
895 if (r & STAT_SENSE) {
896 pi_disconnect(pi);
897 pt_req_sense(tape, 0);
898 return -EIO;
899 }
900
901 if (r)
902 tape->flags |= PT_EOF;
903
904 s = read_reg(pi, 7);
905
906 if (!(s & STAT_DRQ))
907 break;
908
909 n = (read_reg(pi, 4) + 256 * read_reg(pi, 5));
910 p = (read_reg(pi, 2) & 3);
911 if (p != 0) {
912 pi_disconnect(pi);
913 printk("%s: Phase error on write: %d \n",
914 tape->name, p);
915 return -EIO;
916 }
917
918 while (n > 0) {
919 k = n;
920 if (k > PT_BUFSIZE)
921 k = PT_BUFSIZE;
922 b = k;
923 if (b > count)
924 b = count;
925 if (copy_from_user(tape->bufptr, buf + t, b)) {
926 pi_disconnect(pi);
927 return -EFAULT;
928 }
929 pi_write_block(pi, tape->bufptr, k);
930 t += b;
931 count -= b;
932 n -= k;
933 }
934
935 }
936 pi_disconnect(pi);
937 if (tape->flags & PT_EOF)
938 break;
939 }
940
941 return t;
942}
943
944static int __init pt_init(void)
945{
946 int unit, err = 0;
947
948 if (disable) {
949 err = -1;
950 goto out;
951 }
952
953 if (pt_detect()) {
954 err = -1;
955 goto out;
956 }
957
958 if (register_chrdev(major, name, &pt_fops)) {
959 printk("pt_init: unable to get major number %d\n", major);
960 for (unit = 0; unit < PT_UNITS; unit++)
961 if (pt[unit].present)
962 pi_release(pt[unit].pi);
963 err = -1;
964 goto out;
965 }
966 pt_class = class_simple_create(THIS_MODULE, "pt");
967 if (IS_ERR(pt_class)) {
968 err = PTR_ERR(pt_class);
969 goto out_chrdev;
970 }
971
972 devfs_mk_dir("pt");
973 for (unit = 0; unit < PT_UNITS; unit++)
974 if (pt[unit].present) {
975 class_simple_device_add(pt_class, MKDEV(major, unit),
976 NULL, "pt%d", unit);
977 err = devfs_mk_cdev(MKDEV(major, unit),
978 S_IFCHR | S_IRUSR | S_IWUSR,
979 "pt/%d", unit);
980 if (err) {
981 class_simple_device_remove(MKDEV(major, unit));
982 goto out_class;
983 }
984 class_simple_device_add(pt_class, MKDEV(major, unit + 128),
985 NULL, "pt%dn", unit);
986 err = devfs_mk_cdev(MKDEV(major, unit + 128),
987 S_IFCHR | S_IRUSR | S_IWUSR,
988 "pt/%dn", unit);
989 if (err) {
990 class_simple_device_remove(MKDEV(major, unit + 128));
991 goto out_class;
992 }
993 }
994 goto out;
995
996out_class:
997 class_simple_destroy(pt_class);
998out_chrdev:
999 unregister_chrdev(major, "pt");
1000out:
1001 return err;
1002}
1003
1004static void __exit pt_exit(void)
1005{
1006 int unit;
1007 for (unit = 0; unit < PT_UNITS; unit++)
1008 if (pt[unit].present) {
1009 class_simple_device_remove(MKDEV(major, unit));
1010 devfs_remove("pt/%d", unit);
1011 class_simple_device_remove(MKDEV(major, unit + 128));
1012 devfs_remove("pt/%dn", unit);
1013 }
1014 class_simple_destroy(pt_class);
1015 devfs_remove("pt");
1016 unregister_chrdev(major, name);
1017 for (unit = 0; unit < PT_UNITS; unit++)
1018 if (pt[unit].present)
1019 pi_release(pt[unit].pi);
1020}
1021
1022MODULE_LICENSE("GPL");
1023module_init(pt_init)
1024module_exit(pt_exit)
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
new file mode 100644
index 000000000000..1a1fa3ccb913
--- /dev/null
+++ b/drivers/block/pktcdvd.c
@@ -0,0 +1,2681 @@
1/*
2 * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
3 * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
4 *
5 * May be copied or modified under the terms of the GNU General Public
6 * License. See linux/COPYING for more information.
7 *
8 * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and
9 * DVD-RW devices (aka an exercise in block layer masturbation)
10 *
11 *
12 * TODO: (circa order of when I will fix it)
13 * - Only able to write on CD-RW media right now.
14 * - check host application code on media and set it in write page
15 * - interface for UDF <-> packet to negotiate a new location when a write
16 * fails.
17 * - handle OPC, especially for -RW media
18 *
19 * Theory of operation:
20 *
21 * We use a custom make_request_fn function that forwards reads directly to
22 * the underlying CD device. Write requests are either attached directly to
23 * a live packet_data object, or simply stored sequentially in a list for
24 * later processing by the kcdrwd kernel thread. This driver doesn't use
25 * any elevator functionally as defined by the elevator_s struct, but the
26 * underlying CD device uses a standard elevator.
27 *
28 * This strategy makes it possible to do very late merging of IO requests.
29 * A new bio sent to pkt_make_request can be merged with a live packet_data
30 * object even if the object is in the data gathering state.
31 *
32 *************************************************************************/
33
34#define VERSION_CODE "v0.2.0a 2004-07-14 Jens Axboe (axboe@suse.de) and petero2@telia.com"
35
36#include <linux/pktcdvd.h>
37#include <linux/config.h>
38#include <linux/module.h>
39#include <linux/types.h>
40#include <linux/kernel.h>
41#include <linux/kthread.h>
42#include <linux/errno.h>
43#include <linux/spinlock.h>
44#include <linux/file.h>
45#include <linux/proc_fs.h>
46#include <linux/seq_file.h>
47#include <linux/miscdevice.h>
48#include <linux/suspend.h>
49#include <scsi/scsi_cmnd.h>
50#include <scsi/scsi_ioctl.h>
51
52#include <asm/uaccess.h>
53
54#if PACKET_DEBUG
55#define DPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
56#else
57#define DPRINTK(fmt, args...)
58#endif
59
60#if PACKET_DEBUG > 1
61#define VPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
62#else
63#define VPRINTK(fmt, args...)
64#endif
65
66#define MAX_SPEED 0xffff
67
68#define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1))
69
70static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
71static struct proc_dir_entry *pkt_proc;
72static int pkt_major;
73static struct semaphore ctl_mutex; /* Serialize open/close/setup/teardown */
74static mempool_t *psd_pool;
75
76
77static void pkt_bio_finished(struct pktcdvd_device *pd)
78{
79 BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
80 if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
81 VPRINTK("pktcdvd: queue empty\n");
82 atomic_set(&pd->iosched.attention, 1);
83 wake_up(&pd->wqueue);
84 }
85}
86
87static void pkt_bio_destructor(struct bio *bio)
88{
89 kfree(bio->bi_io_vec);
90 kfree(bio);
91}
92
93static struct bio *pkt_bio_alloc(int nr_iovecs)
94{
95 struct bio_vec *bvl = NULL;
96 struct bio *bio;
97
98 bio = kmalloc(sizeof(struct bio), GFP_KERNEL);
99 if (!bio)
100 goto no_bio;
101 bio_init(bio);
102
103 bvl = kmalloc(nr_iovecs * sizeof(struct bio_vec), GFP_KERNEL);
104 if (!bvl)
105 goto no_bvl;
106 memset(bvl, 0, nr_iovecs * sizeof(struct bio_vec));
107
108 bio->bi_max_vecs = nr_iovecs;
109 bio->bi_io_vec = bvl;
110 bio->bi_destructor = pkt_bio_destructor;
111
112 return bio;
113
114 no_bvl:
115 kfree(bio);
116 no_bio:
117 return NULL;
118}
119
120/*
121 * Allocate a packet_data struct
122 */
123static struct packet_data *pkt_alloc_packet_data(void)
124{
125 int i;
126 struct packet_data *pkt;
127
128 pkt = kmalloc(sizeof(struct packet_data), GFP_KERNEL);
129 if (!pkt)
130 goto no_pkt;
131 memset(pkt, 0, sizeof(struct packet_data));
132
133 pkt->w_bio = pkt_bio_alloc(PACKET_MAX_SIZE);
134 if (!pkt->w_bio)
135 goto no_bio;
136
137 for (i = 0; i < PAGES_PER_PACKET; i++) {
138 pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
139 if (!pkt->pages[i])
140 goto no_page;
141 }
142
143 spin_lock_init(&pkt->lock);
144
145 for (i = 0; i < PACKET_MAX_SIZE; i++) {
146 struct bio *bio = pkt_bio_alloc(1);
147 if (!bio)
148 goto no_rd_bio;
149 pkt->r_bios[i] = bio;
150 }
151
152 return pkt;
153
154no_rd_bio:
155 for (i = 0; i < PACKET_MAX_SIZE; i++) {
156 struct bio *bio = pkt->r_bios[i];
157 if (bio)
158 bio_put(bio);
159 }
160
161no_page:
162 for (i = 0; i < PAGES_PER_PACKET; i++)
163 if (pkt->pages[i])
164 __free_page(pkt->pages[i]);
165 bio_put(pkt->w_bio);
166no_bio:
167 kfree(pkt);
168no_pkt:
169 return NULL;
170}
171
172/*
173 * Free a packet_data struct
174 */
175static void pkt_free_packet_data(struct packet_data *pkt)
176{
177 int i;
178
179 for (i = 0; i < PACKET_MAX_SIZE; i++) {
180 struct bio *bio = pkt->r_bios[i];
181 if (bio)
182 bio_put(bio);
183 }
184 for (i = 0; i < PAGES_PER_PACKET; i++)
185 __free_page(pkt->pages[i]);
186 bio_put(pkt->w_bio);
187 kfree(pkt);
188}
189
190static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
191{
192 struct packet_data *pkt, *next;
193
194 BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
195
196 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
197 pkt_free_packet_data(pkt);
198 }
199}
200
201static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
202{
203 struct packet_data *pkt;
204
205 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
206 INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
207 spin_lock_init(&pd->cdrw.active_list_lock);
208 while (nr_packets > 0) {
209 pkt = pkt_alloc_packet_data();
210 if (!pkt) {
211 pkt_shrink_pktlist(pd);
212 return 0;
213 }
214 pkt->id = nr_packets;
215 pkt->pd = pd;
216 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
217 nr_packets--;
218 }
219 return 1;
220}
221
222static void *pkt_rb_alloc(unsigned int __nocast gfp_mask, void *data)
223{
224 return kmalloc(sizeof(struct pkt_rb_node), gfp_mask);
225}
226
227static void pkt_rb_free(void *ptr, void *data)
228{
229 kfree(ptr);
230}
231
232static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
233{
234 struct rb_node *n = rb_next(&node->rb_node);
235 if (!n)
236 return NULL;
237 return rb_entry(n, struct pkt_rb_node, rb_node);
238}
239
240static inline void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
241{
242 rb_erase(&node->rb_node, &pd->bio_queue);
243 mempool_free(node, pd->rb_pool);
244 pd->bio_queue_size--;
245 BUG_ON(pd->bio_queue_size < 0);
246}
247
248/*
249 * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
250 */
251static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
252{
253 struct rb_node *n = pd->bio_queue.rb_node;
254 struct rb_node *next;
255 struct pkt_rb_node *tmp;
256
257 if (!n) {
258 BUG_ON(pd->bio_queue_size > 0);
259 return NULL;
260 }
261
262 for (;;) {
263 tmp = rb_entry(n, struct pkt_rb_node, rb_node);
264 if (s <= tmp->bio->bi_sector)
265 next = n->rb_left;
266 else
267 next = n->rb_right;
268 if (!next)
269 break;
270 n = next;
271 }
272
273 if (s > tmp->bio->bi_sector) {
274 tmp = pkt_rbtree_next(tmp);
275 if (!tmp)
276 return NULL;
277 }
278 BUG_ON(s > tmp->bio->bi_sector);
279 return tmp;
280}
281
282/*
283 * Insert a node into the pd->bio_queue rb tree.
284 */
285static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
286{
287 struct rb_node **p = &pd->bio_queue.rb_node;
288 struct rb_node *parent = NULL;
289 sector_t s = node->bio->bi_sector;
290 struct pkt_rb_node *tmp;
291
292 while (*p) {
293 parent = *p;
294 tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
295 if (s < tmp->bio->bi_sector)
296 p = &(*p)->rb_left;
297 else
298 p = &(*p)->rb_right;
299 }
300 rb_link_node(&node->rb_node, parent, p);
301 rb_insert_color(&node->rb_node, &pd->bio_queue);
302 pd->bio_queue_size++;
303}
304
305/*
306 * Add a bio to a single linked list defined by its head and tail pointers.
307 */
308static inline void pkt_add_list_last(struct bio *bio, struct bio **list_head, struct bio **list_tail)
309{
310 bio->bi_next = NULL;
311 if (*list_tail) {
312 BUG_ON((*list_head) == NULL);
313 (*list_tail)->bi_next = bio;
314 (*list_tail) = bio;
315 } else {
316 BUG_ON((*list_head) != NULL);
317 (*list_head) = bio;
318 (*list_tail) = bio;
319 }
320}
321
322/*
323 * Remove and return the first bio from a single linked list defined by its
324 * head and tail pointers.
325 */
326static inline struct bio *pkt_get_list_first(struct bio **list_head, struct bio **list_tail)
327{
328 struct bio *bio;
329
330 if (*list_head == NULL)
331 return NULL;
332
333 bio = *list_head;
334 *list_head = bio->bi_next;
335 if (*list_head == NULL)
336 *list_tail = NULL;
337
338 bio->bi_next = NULL;
339 return bio;
340}
341
342/*
343 * Send a packet_command to the underlying block device and
344 * wait for completion.
345 */
346static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
347{
348 char sense[SCSI_SENSE_BUFFERSIZE];
349 request_queue_t *q;
350 struct request *rq;
351 DECLARE_COMPLETION(wait);
352 int err = 0;
353
354 q = bdev_get_queue(pd->bdev);
355
356 rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? WRITE : READ,
357 __GFP_WAIT);
358 rq->errors = 0;
359 rq->rq_disk = pd->bdev->bd_disk;
360 rq->bio = NULL;
361 rq->buffer = NULL;
362 rq->timeout = 60*HZ;
363 rq->data = cgc->buffer;
364 rq->data_len = cgc->buflen;
365 rq->sense = sense;
366 memset(sense, 0, sizeof(sense));
367 rq->sense_len = 0;
368 rq->flags |= REQ_BLOCK_PC | REQ_HARDBARRIER;
369 if (cgc->quiet)
370 rq->flags |= REQ_QUIET;
371 memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
372 if (sizeof(rq->cmd) > CDROM_PACKET_SIZE)
373 memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE);
374
375 rq->ref_count++;
376 rq->flags |= REQ_NOMERGE;
377 rq->waiting = &wait;
378 rq->end_io = blk_end_sync_rq;
379 elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
380 generic_unplug_device(q);
381 wait_for_completion(&wait);
382
383 if (rq->errors)
384 err = -EIO;
385
386 blk_put_request(rq);
387 return err;
388}
389
390/*
391 * A generic sense dump / resolve mechanism should be implemented across
392 * all ATAPI + SCSI devices.
393 */
394static void pkt_dump_sense(struct packet_command *cgc)
395{
396 static char *info[9] = { "No sense", "Recovered error", "Not ready",
397 "Medium error", "Hardware error", "Illegal request",
398 "Unit attention", "Data protect", "Blank check" };
399 int i;
400 struct request_sense *sense = cgc->sense;
401
402 printk("pktcdvd:");
403 for (i = 0; i < CDROM_PACKET_SIZE; i++)
404 printk(" %02x", cgc->cmd[i]);
405 printk(" - ");
406
407 if (sense == NULL) {
408 printk("no sense\n");
409 return;
410 }
411
412 printk("sense %02x.%02x.%02x", sense->sense_key, sense->asc, sense->ascq);
413
414 if (sense->sense_key > 8) {
415 printk(" (INVALID)\n");
416 return;
417 }
418
419 printk(" (%s)\n", info[sense->sense_key]);
420}
421
422/*
423 * flush the drive cache to media
424 */
425static int pkt_flush_cache(struct pktcdvd_device *pd)
426{
427 struct packet_command cgc;
428
429 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
430 cgc.cmd[0] = GPCMD_FLUSH_CACHE;
431 cgc.quiet = 1;
432
433 /*
434 * the IMMED bit -- we default to not setting it, although that
435 * would allow a much faster close, this is safer
436 */
437#if 0
438 cgc.cmd[1] = 1 << 1;
439#endif
440 return pkt_generic_packet(pd, &cgc);
441}
442
443/*
444 * speed is given as the normal factor, e.g. 4 for 4x
445 */
446static int pkt_set_speed(struct pktcdvd_device *pd, unsigned write_speed, unsigned read_speed)
447{
448 struct packet_command cgc;
449 struct request_sense sense;
450 int ret;
451
452 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
453 cgc.sense = &sense;
454 cgc.cmd[0] = GPCMD_SET_SPEED;
455 cgc.cmd[2] = (read_speed >> 8) & 0xff;
456 cgc.cmd[3] = read_speed & 0xff;
457 cgc.cmd[4] = (write_speed >> 8) & 0xff;
458 cgc.cmd[5] = write_speed & 0xff;
459
460 if ((ret = pkt_generic_packet(pd, &cgc)))
461 pkt_dump_sense(&cgc);
462
463 return ret;
464}
465
466/*
467 * Queue a bio for processing by the low-level CD device. Must be called
468 * from process context.
469 */
470static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio, int high_prio_read)
471{
472 spin_lock(&pd->iosched.lock);
473 if (bio_data_dir(bio) == READ) {
474 pkt_add_list_last(bio, &pd->iosched.read_queue,
475 &pd->iosched.read_queue_tail);
476 if (high_prio_read)
477 pd->iosched.high_prio_read = 1;
478 } else {
479 pkt_add_list_last(bio, &pd->iosched.write_queue,
480 &pd->iosched.write_queue_tail);
481 }
482 spin_unlock(&pd->iosched.lock);
483
484 atomic_set(&pd->iosched.attention, 1);
485 wake_up(&pd->wqueue);
486}
487
488/*
489 * Process the queued read/write requests. This function handles special
490 * requirements for CDRW drives:
491 * - A cache flush command must be inserted before a read request if the
492 * previous request was a write.
493 * - Switching between reading and writing is slow, so don't it more often
494 * than necessary.
495 * - Set the read speed according to current usage pattern. When only reading
496 * from the device, it's best to use the highest possible read speed, but
497 * when switching often between reading and writing, it's better to have the
498 * same read and write speeds.
499 * - Reads originating from user space should have higher priority than reads
500 * originating from pkt_gather_data, because some process is usually waiting
501 * on reads of the first kind.
502 */
503static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
504{
505 request_queue_t *q;
506
507 if (atomic_read(&pd->iosched.attention) == 0)
508 return;
509 atomic_set(&pd->iosched.attention, 0);
510
511 q = bdev_get_queue(pd->bdev);
512
513 for (;;) {
514 struct bio *bio;
515 int reads_queued, writes_queued, high_prio_read;
516
517 spin_lock(&pd->iosched.lock);
518 reads_queued = (pd->iosched.read_queue != NULL);
519 writes_queued = (pd->iosched.write_queue != NULL);
520 if (!reads_queued)
521 pd->iosched.high_prio_read = 0;
522 high_prio_read = pd->iosched.high_prio_read;
523 spin_unlock(&pd->iosched.lock);
524
525 if (!reads_queued && !writes_queued)
526 break;
527
528 if (pd->iosched.writing) {
529 if (high_prio_read || (!writes_queued && reads_queued)) {
530 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
531 VPRINTK("pktcdvd: write, waiting\n");
532 break;
533 }
534 pkt_flush_cache(pd);
535 pd->iosched.writing = 0;
536 }
537 } else {
538 if (!reads_queued && writes_queued) {
539 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
540 VPRINTK("pktcdvd: read, waiting\n");
541 break;
542 }
543 pd->iosched.writing = 1;
544 }
545 }
546
547 spin_lock(&pd->iosched.lock);
548 if (pd->iosched.writing) {
549 bio = pkt_get_list_first(&pd->iosched.write_queue,
550 &pd->iosched.write_queue_tail);
551 } else {
552 bio = pkt_get_list_first(&pd->iosched.read_queue,
553 &pd->iosched.read_queue_tail);
554 }
555 spin_unlock(&pd->iosched.lock);
556
557 if (!bio)
558 continue;
559
560 if (bio_data_dir(bio) == READ)
561 pd->iosched.successive_reads += bio->bi_size >> 10;
562 else
563 pd->iosched.successive_reads = 0;
564 if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
565 if (pd->read_speed == pd->write_speed) {
566 pd->read_speed = MAX_SPEED;
567 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
568 }
569 } else {
570 if (pd->read_speed != pd->write_speed) {
571 pd->read_speed = pd->write_speed;
572 pkt_set_speed(pd, pd->write_speed, pd->read_speed);
573 }
574 }
575
576 atomic_inc(&pd->cdrw.pending_bios);
577 generic_make_request(bio);
578 }
579}
580
581/*
582 * Special care is needed if the underlying block device has a small
583 * max_phys_segments value.
584 */
585static int pkt_set_segment_merging(struct pktcdvd_device *pd, request_queue_t *q)
586{
587 if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) {
588 /*
589 * The cdrom device can handle one segment/frame
590 */
591 clear_bit(PACKET_MERGE_SEGS, &pd->flags);
592 return 0;
593 } else if ((pd->settings.size << 9) / PAGE_SIZE <= q->max_phys_segments) {
594 /*
595 * We can handle this case at the expense of some extra memory
596 * copies during write operations
597 */
598 set_bit(PACKET_MERGE_SEGS, &pd->flags);
599 return 0;
600 } else {
601 printk("pktcdvd: cdrom max_phys_segments too small\n");
602 return -EIO;
603 }
604}
605
606/*
607 * Copy CD_FRAMESIZE bytes from src_bio into a destination page
608 */
609static void pkt_copy_bio_data(struct bio *src_bio, int seg, int offs, struct page *dst_page, int dst_offs)
610{
611 unsigned int copy_size = CD_FRAMESIZE;
612
613 while (copy_size > 0) {
614 struct bio_vec *src_bvl = bio_iovec_idx(src_bio, seg);
615 void *vfrom = kmap_atomic(src_bvl->bv_page, KM_USER0) +
616 src_bvl->bv_offset + offs;
617 void *vto = page_address(dst_page) + dst_offs;
618 int len = min_t(int, copy_size, src_bvl->bv_len - offs);
619
620 BUG_ON(len < 0);
621 memcpy(vto, vfrom, len);
622 kunmap_atomic(vfrom, KM_USER0);
623
624 seg++;
625 offs = 0;
626 dst_offs += len;
627 copy_size -= len;
628 }
629}
630
631/*
632 * Copy all data for this packet to pkt->pages[], so that
633 * a) The number of required segments for the write bio is minimized, which
634 * is necessary for some scsi controllers.
635 * b) The data can be used as cache to avoid read requests if we receive a
636 * new write request for the same zone.
637 */
638static void pkt_make_local_copy(struct packet_data *pkt, struct page **pages, int *offsets)
639{
640 int f, p, offs;
641
642 /* Copy all data to pkt->pages[] */
643 p = 0;
644 offs = 0;
645 for (f = 0; f < pkt->frames; f++) {
646 if (pages[f] != pkt->pages[p]) {
647 void *vfrom = kmap_atomic(pages[f], KM_USER0) + offsets[f];
648 void *vto = page_address(pkt->pages[p]) + offs;
649 memcpy(vto, vfrom, CD_FRAMESIZE);
650 kunmap_atomic(vfrom, KM_USER0);
651 pages[f] = pkt->pages[p];
652 offsets[f] = offs;
653 } else {
654 BUG_ON(offsets[f] != offs);
655 }
656 offs += CD_FRAMESIZE;
657 if (offs >= PAGE_SIZE) {
658 BUG_ON(offs > PAGE_SIZE);
659 offs = 0;
660 p++;
661 }
662 }
663}
664
665static int pkt_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
666{
667 struct packet_data *pkt = bio->bi_private;
668 struct pktcdvd_device *pd = pkt->pd;
669 BUG_ON(!pd);
670
671 if (bio->bi_size)
672 return 1;
673
674 VPRINTK("pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio,
675 (unsigned long long)pkt->sector, (unsigned long long)bio->bi_sector, err);
676
677 if (err)
678 atomic_inc(&pkt->io_errors);
679 if (atomic_dec_and_test(&pkt->io_wait)) {
680 atomic_inc(&pkt->run_sm);
681 wake_up(&pd->wqueue);
682 }
683 pkt_bio_finished(pd);
684
685 return 0;
686}
687
688static int pkt_end_io_packet_write(struct bio *bio, unsigned int bytes_done, int err)
689{
690 struct packet_data *pkt = bio->bi_private;
691 struct pktcdvd_device *pd = pkt->pd;
692 BUG_ON(!pd);
693
694 if (bio->bi_size)
695 return 1;
696
697 VPRINTK("pkt_end_io_packet_write: id=%d, err=%d\n", pkt->id, err);
698
699 pd->stats.pkt_ended++;
700
701 pkt_bio_finished(pd);
702 atomic_dec(&pkt->io_wait);
703 atomic_inc(&pkt->run_sm);
704 wake_up(&pd->wqueue);
705 return 0;
706}
707
708/*
709 * Schedule reads for the holes in a packet
710 */
711static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
712{
713 int frames_read = 0;
714 struct bio *bio;
715 int f;
716 char written[PACKET_MAX_SIZE];
717
718 BUG_ON(!pkt->orig_bios);
719
720 atomic_set(&pkt->io_wait, 0);
721 atomic_set(&pkt->io_errors, 0);
722
723 if (pkt->cache_valid) {
724 VPRINTK("pkt_gather_data: zone %llx cached\n",
725 (unsigned long long)pkt->sector);
726 goto out_account;
727 }
728
729 /*
730 * Figure out which frames we need to read before we can write.
731 */
732 memset(written, 0, sizeof(written));
733 spin_lock(&pkt->lock);
734 for (bio = pkt->orig_bios; bio; bio = bio->bi_next) {
735 int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
736 int num_frames = bio->bi_size / CD_FRAMESIZE;
737 BUG_ON(first_frame < 0);
738 BUG_ON(first_frame + num_frames > pkt->frames);
739 for (f = first_frame; f < first_frame + num_frames; f++)
740 written[f] = 1;
741 }
742 spin_unlock(&pkt->lock);
743
744 /*
745 * Schedule reads for missing parts of the packet.
746 */
747 for (f = 0; f < pkt->frames; f++) {
748 int p, offset;
749 if (written[f])
750 continue;
751 bio = pkt->r_bios[f];
752 bio_init(bio);
753 bio->bi_max_vecs = 1;
754 bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
755 bio->bi_bdev = pd->bdev;
756 bio->bi_end_io = pkt_end_io_read;
757 bio->bi_private = pkt;
758
759 p = (f * CD_FRAMESIZE) / PAGE_SIZE;
760 offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
761 VPRINTK("pkt_gather_data: Adding frame %d, page:%p offs:%d\n",
762 f, pkt->pages[p], offset);
763 if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
764 BUG();
765
766 atomic_inc(&pkt->io_wait);
767 bio->bi_rw = READ;
768 pkt_queue_bio(pd, bio, 0);
769 frames_read++;
770 }
771
772out_account:
773 VPRINTK("pkt_gather_data: need %d frames for zone %llx\n",
774 frames_read, (unsigned long long)pkt->sector);
775 pd->stats.pkt_started++;
776 pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
777 pd->stats.secs_w += pd->settings.size;
778}
779
780/*
781 * Find a packet matching zone, or the least recently used packet if
782 * there is no match.
783 */
784static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
785{
786 struct packet_data *pkt;
787
788 list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
789 if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
790 list_del_init(&pkt->list);
791 if (pkt->sector != zone)
792 pkt->cache_valid = 0;
793 break;
794 }
795 }
796 return pkt;
797}
798
799static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
800{
801 if (pkt->cache_valid) {
802 list_add(&pkt->list, &pd->cdrw.pkt_free_list);
803 } else {
804 list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
805 }
806}
807
808/*
809 * recover a failed write, query for relocation if possible
810 *
811 * returns 1 if recovery is possible, or 0 if not
812 *
813 */
814static int pkt_start_recovery(struct packet_data *pkt)
815{
816 /*
817 * FIXME. We need help from the file system to implement
818 * recovery handling.
819 */
820 return 0;
821#if 0
822 struct request *rq = pkt->rq;
823 struct pktcdvd_device *pd = rq->rq_disk->private_data;
824 struct block_device *pkt_bdev;
825 struct super_block *sb = NULL;
826 unsigned long old_block, new_block;
827 sector_t new_sector;
828
829 pkt_bdev = bdget(kdev_t_to_nr(pd->pkt_dev));
830 if (pkt_bdev) {
831 sb = get_super(pkt_bdev);
832 bdput(pkt_bdev);
833 }
834
835 if (!sb)
836 return 0;
837
838 if (!sb->s_op || !sb->s_op->relocate_blocks)
839 goto out;
840
841 old_block = pkt->sector / (CD_FRAMESIZE >> 9);
842 if (sb->s_op->relocate_blocks(sb, old_block, &new_block))
843 goto out;
844
845 new_sector = new_block * (CD_FRAMESIZE >> 9);
846 pkt->sector = new_sector;
847
848 pkt->bio->bi_sector = new_sector;
849 pkt->bio->bi_next = NULL;
850 pkt->bio->bi_flags = 1 << BIO_UPTODATE;
851 pkt->bio->bi_idx = 0;
852
853 BUG_ON(pkt->bio->bi_rw != (1 << BIO_RW));
854 BUG_ON(pkt->bio->bi_vcnt != pkt->frames);
855 BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE);
856 BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write);
857 BUG_ON(pkt->bio->bi_private != pkt);
858
859 drop_super(sb);
860 return 1;
861
862out:
863 drop_super(sb);
864 return 0;
865#endif
866}
867
868static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state)
869{
870#if PACKET_DEBUG > 1
871 static const char *state_name[] = {
872 "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
873 };
874 enum packet_data_state old_state = pkt->state;
875 VPRINTK("pkt %2d : s=%6llx %s -> %s\n", pkt->id, (unsigned long long)pkt->sector,
876 state_name[old_state], state_name[state]);
877#endif
878 pkt->state = state;
879}
880
881/*
882 * Scan the work queue to see if we can start a new packet.
883 * returns non-zero if any work was done.
884 */
885static int pkt_handle_queue(struct pktcdvd_device *pd)
886{
887 struct packet_data *pkt, *p;
888 struct bio *bio = NULL;
889 sector_t zone = 0; /* Suppress gcc warning */
890 struct pkt_rb_node *node, *first_node;
891 struct rb_node *n;
892
893 VPRINTK("handle_queue\n");
894
895 atomic_set(&pd->scan_queue, 0);
896
897 if (list_empty(&pd->cdrw.pkt_free_list)) {
898 VPRINTK("handle_queue: no pkt\n");
899 return 0;
900 }
901
902 /*
903 * Try to find a zone we are not already working on.
904 */
905 spin_lock(&pd->lock);
906 first_node = pkt_rbtree_find(pd, pd->current_sector);
907 if (!first_node) {
908 n = rb_first(&pd->bio_queue);
909 if (n)
910 first_node = rb_entry(n, struct pkt_rb_node, rb_node);
911 }
912 node = first_node;
913 while (node) {
914 bio = node->bio;
915 zone = ZONE(bio->bi_sector, pd);
916 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
917 if (p->sector == zone)
918 goto try_next_bio;
919 }
920 break;
921try_next_bio:
922 node = pkt_rbtree_next(node);
923 if (!node) {
924 n = rb_first(&pd->bio_queue);
925 if (n)
926 node = rb_entry(n, struct pkt_rb_node, rb_node);
927 }
928 if (node == first_node)
929 node = NULL;
930 }
931 spin_unlock(&pd->lock);
932 if (!bio) {
933 VPRINTK("handle_queue: no bio\n");
934 return 0;
935 }
936
937 pkt = pkt_get_packet_data(pd, zone);
938 BUG_ON(!pkt);
939
940 pd->current_sector = zone + pd->settings.size;
941 pkt->sector = zone;
942 pkt->frames = pd->settings.size >> 2;
943 BUG_ON(pkt->frames > PACKET_MAX_SIZE);
944 pkt->write_size = 0;
945
946 /*
947 * Scan work queue for bios in the same zone and link them
948 * to this packet.
949 */
950 spin_lock(&pd->lock);
951 VPRINTK("pkt_handle_queue: looking for zone %llx\n", (unsigned long long)zone);
952 while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
953 bio = node->bio;
954 VPRINTK("pkt_handle_queue: found zone=%llx\n",
955 (unsigned long long)ZONE(bio->bi_sector, pd));
956 if (ZONE(bio->bi_sector, pd) != zone)
957 break;
958 pkt_rbtree_erase(pd, node);
959 spin_lock(&pkt->lock);
960 pkt_add_list_last(bio, &pkt->orig_bios, &pkt->orig_bios_tail);
961 pkt->write_size += bio->bi_size / CD_FRAMESIZE;
962 spin_unlock(&pkt->lock);
963 }
964 spin_unlock(&pd->lock);
965
966 pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
967 pkt_set_state(pkt, PACKET_WAITING_STATE);
968 atomic_set(&pkt->run_sm, 1);
969
970 spin_lock(&pd->cdrw.active_list_lock);
971 list_add(&pkt->list, &pd->cdrw.pkt_active_list);
972 spin_unlock(&pd->cdrw.active_list_lock);
973
974 return 1;
975}
976
977/*
978 * Assemble a bio to write one packet and queue the bio for processing
979 * by the underlying block device.
980 */
981static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
982{
983 struct bio *bio;
984 struct page *pages[PACKET_MAX_SIZE];
985 int offsets[PACKET_MAX_SIZE];
986 int f;
987 int frames_write;
988
989 for (f = 0; f < pkt->frames; f++) {
990 pages[f] = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
991 offsets[f] = (f * CD_FRAMESIZE) % PAGE_SIZE;
992 }
993
994 /*
995 * Fill-in pages[] and offsets[] with data from orig_bios.
996 */
997 frames_write = 0;
998 spin_lock(&pkt->lock);
999 for (bio = pkt->orig_bios; bio; bio = bio->bi_next) {
1000 int segment = bio->bi_idx;
1001 int src_offs = 0;
1002 int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
1003 int num_frames = bio->bi_size / CD_FRAMESIZE;
1004 BUG_ON(first_frame < 0);
1005 BUG_ON(first_frame + num_frames > pkt->frames);
1006 for (f = first_frame; f < first_frame + num_frames; f++) {
1007 struct bio_vec *src_bvl = bio_iovec_idx(bio, segment);
1008
1009 while (src_offs >= src_bvl->bv_len) {
1010 src_offs -= src_bvl->bv_len;
1011 segment++;
1012 BUG_ON(segment >= bio->bi_vcnt);
1013 src_bvl = bio_iovec_idx(bio, segment);
1014 }
1015
1016 if (src_bvl->bv_len - src_offs >= CD_FRAMESIZE) {
1017 pages[f] = src_bvl->bv_page;
1018 offsets[f] = src_bvl->bv_offset + src_offs;
1019 } else {
1020 pkt_copy_bio_data(bio, segment, src_offs,
1021 pages[f], offsets[f]);
1022 }
1023 src_offs += CD_FRAMESIZE;
1024 frames_write++;
1025 }
1026 }
1027 pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
1028 spin_unlock(&pkt->lock);
1029
1030 VPRINTK("pkt_start_write: Writing %d frames for zone %llx\n",
1031 frames_write, (unsigned long long)pkt->sector);
1032 BUG_ON(frames_write != pkt->write_size);
1033
1034 if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
1035 pkt_make_local_copy(pkt, pages, offsets);
1036 pkt->cache_valid = 1;
1037 } else {
1038 pkt->cache_valid = 0;
1039 }
1040
1041 /* Start the write request */
1042 bio_init(pkt->w_bio);
1043 pkt->w_bio->bi_max_vecs = PACKET_MAX_SIZE;
1044 pkt->w_bio->bi_sector = pkt->sector;
1045 pkt->w_bio->bi_bdev = pd->bdev;
1046 pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
1047 pkt->w_bio->bi_private = pkt;
1048 for (f = 0; f < pkt->frames; f++) {
1049 if ((f + 1 < pkt->frames) && (pages[f + 1] == pages[f]) &&
1050 (offsets[f + 1] = offsets[f] + CD_FRAMESIZE)) {
1051 if (!bio_add_page(pkt->w_bio, pages[f], CD_FRAMESIZE * 2, offsets[f]))
1052 BUG();
1053 f++;
1054 } else {
1055 if (!bio_add_page(pkt->w_bio, pages[f], CD_FRAMESIZE, offsets[f]))
1056 BUG();
1057 }
1058 }
1059 VPRINTK("pktcdvd: vcnt=%d\n", pkt->w_bio->bi_vcnt);
1060
1061 atomic_set(&pkt->io_wait, 1);
1062 pkt->w_bio->bi_rw = WRITE;
1063 pkt_queue_bio(pd, pkt->w_bio, 0);
1064}
1065
1066static void pkt_finish_packet(struct packet_data *pkt, int uptodate)
1067{
1068 struct bio *bio, *next;
1069
1070 if (!uptodate)
1071 pkt->cache_valid = 0;
1072
1073 /* Finish all bios corresponding to this packet */
1074 bio = pkt->orig_bios;
1075 while (bio) {
1076 next = bio->bi_next;
1077 bio->bi_next = NULL;
1078 bio_endio(bio, bio->bi_size, uptodate ? 0 : -EIO);
1079 bio = next;
1080 }
1081 pkt->orig_bios = pkt->orig_bios_tail = NULL;
1082}
1083
1084static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
1085{
1086 int uptodate;
1087
1088 VPRINTK("run_state_machine: pkt %d\n", pkt->id);
1089
1090 for (;;) {
1091 switch (pkt->state) {
1092 case PACKET_WAITING_STATE:
1093 if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
1094 return;
1095
1096 pkt->sleep_time = 0;
1097 pkt_gather_data(pd, pkt);
1098 pkt_set_state(pkt, PACKET_READ_WAIT_STATE);
1099 break;
1100
1101 case PACKET_READ_WAIT_STATE:
1102 if (atomic_read(&pkt->io_wait) > 0)
1103 return;
1104
1105 if (atomic_read(&pkt->io_errors) > 0) {
1106 pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1107 } else {
1108 pkt_start_write(pd, pkt);
1109 }
1110 break;
1111
1112 case PACKET_WRITE_WAIT_STATE:
1113 if (atomic_read(&pkt->io_wait) > 0)
1114 return;
1115
1116 if (test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags)) {
1117 pkt_set_state(pkt, PACKET_FINISHED_STATE);
1118 } else {
1119 pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1120 }
1121 break;
1122
1123 case PACKET_RECOVERY_STATE:
1124 if (pkt_start_recovery(pkt)) {
1125 pkt_start_write(pd, pkt);
1126 } else {
1127 VPRINTK("No recovery possible\n");
1128 pkt_set_state(pkt, PACKET_FINISHED_STATE);
1129 }
1130 break;
1131
1132 case PACKET_FINISHED_STATE:
1133 uptodate = test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags);
1134 pkt_finish_packet(pkt, uptodate);
1135 return;
1136
1137 default:
1138 BUG();
1139 break;
1140 }
1141 }
1142}
1143
1144static void pkt_handle_packets(struct pktcdvd_device *pd)
1145{
1146 struct packet_data *pkt, *next;
1147
1148 VPRINTK("pkt_handle_packets\n");
1149
1150 /*
1151 * Run state machine for active packets
1152 */
1153 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1154 if (atomic_read(&pkt->run_sm) > 0) {
1155 atomic_set(&pkt->run_sm, 0);
1156 pkt_run_state_machine(pd, pkt);
1157 }
1158 }
1159
1160 /*
1161 * Move no longer active packets to the free list
1162 */
1163 spin_lock(&pd->cdrw.active_list_lock);
1164 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
1165 if (pkt->state == PACKET_FINISHED_STATE) {
1166 list_del(&pkt->list);
1167 pkt_put_packet_data(pd, pkt);
1168 pkt_set_state(pkt, PACKET_IDLE_STATE);
1169 atomic_set(&pd->scan_queue, 1);
1170 }
1171 }
1172 spin_unlock(&pd->cdrw.active_list_lock);
1173}
1174
1175static void pkt_count_states(struct pktcdvd_device *pd, int *states)
1176{
1177 struct packet_data *pkt;
1178 int i;
1179
1180 for (i = 0; i <= PACKET_NUM_STATES; i++)
1181 states[i] = 0;
1182
1183 spin_lock(&pd->cdrw.active_list_lock);
1184 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1185 states[pkt->state]++;
1186 }
1187 spin_unlock(&pd->cdrw.active_list_lock);
1188}
1189
1190/*
1191 * kcdrwd is woken up when writes have been queued for one of our
1192 * registered devices
1193 */
1194static int kcdrwd(void *foobar)
1195{
1196 struct pktcdvd_device *pd = foobar;
1197 struct packet_data *pkt;
1198 long min_sleep_time, residue;
1199
1200 set_user_nice(current, -20);
1201
1202 for (;;) {
1203 DECLARE_WAITQUEUE(wait, current);
1204
1205 /*
1206 * Wait until there is something to do
1207 */
1208 add_wait_queue(&pd->wqueue, &wait);
1209 for (;;) {
1210 set_current_state(TASK_INTERRUPTIBLE);
1211
1212 /* Check if we need to run pkt_handle_queue */
1213 if (atomic_read(&pd->scan_queue) > 0)
1214 goto work_to_do;
1215
1216 /* Check if we need to run the state machine for some packet */
1217 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1218 if (atomic_read(&pkt->run_sm) > 0)
1219 goto work_to_do;
1220 }
1221
1222 /* Check if we need to process the iosched queues */
1223 if (atomic_read(&pd->iosched.attention) != 0)
1224 goto work_to_do;
1225
1226 /* Otherwise, go to sleep */
1227 if (PACKET_DEBUG > 1) {
1228 int states[PACKET_NUM_STATES];
1229 pkt_count_states(pd, states);
1230 VPRINTK("kcdrwd: i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
1231 states[0], states[1], states[2], states[3],
1232 states[4], states[5]);
1233 }
1234
1235 min_sleep_time = MAX_SCHEDULE_TIMEOUT;
1236 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1237 if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
1238 min_sleep_time = pkt->sleep_time;
1239 }
1240
1241 generic_unplug_device(bdev_get_queue(pd->bdev));
1242
1243 VPRINTK("kcdrwd: sleeping\n");
1244 residue = schedule_timeout(min_sleep_time);
1245 VPRINTK("kcdrwd: wake up\n");
1246
1247 /* make swsusp happy with our thread */
1248 if (current->flags & PF_FREEZE)
1249 refrigerator(PF_FREEZE);
1250
1251 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1252 if (!pkt->sleep_time)
1253 continue;
1254 pkt->sleep_time -= min_sleep_time - residue;
1255 if (pkt->sleep_time <= 0) {
1256 pkt->sleep_time = 0;
1257 atomic_inc(&pkt->run_sm);
1258 }
1259 }
1260
1261 if (signal_pending(current)) {
1262 flush_signals(current);
1263 }
1264 if (kthread_should_stop())
1265 break;
1266 }
1267work_to_do:
1268 set_current_state(TASK_RUNNING);
1269 remove_wait_queue(&pd->wqueue, &wait);
1270
1271 if (kthread_should_stop())
1272 break;
1273
1274 /*
1275 * if pkt_handle_queue returns true, we can queue
1276 * another request.
1277 */
1278 while (pkt_handle_queue(pd))
1279 ;
1280
1281 /*
1282 * Handle packet state machine
1283 */
1284 pkt_handle_packets(pd);
1285
1286 /*
1287 * Handle iosched queues
1288 */
1289 pkt_iosched_process_queue(pd);
1290 }
1291
1292 return 0;
1293}
1294
1295static void pkt_print_settings(struct pktcdvd_device *pd)
1296{
1297 printk("pktcdvd: %s packets, ", pd->settings.fp ? "Fixed" : "Variable");
1298 printk("%u blocks, ", pd->settings.size >> 2);
1299 printk("Mode-%c disc\n", pd->settings.block_mode == 8 ? '1' : '2');
1300}
1301
1302static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
1303{
1304 memset(cgc->cmd, 0, sizeof(cgc->cmd));
1305
1306 cgc->cmd[0] = GPCMD_MODE_SENSE_10;
1307 cgc->cmd[2] = page_code | (page_control << 6);
1308 cgc->cmd[7] = cgc->buflen >> 8;
1309 cgc->cmd[8] = cgc->buflen & 0xff;
1310 cgc->data_direction = CGC_DATA_READ;
1311 return pkt_generic_packet(pd, cgc);
1312}
1313
1314static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
1315{
1316 memset(cgc->cmd, 0, sizeof(cgc->cmd));
1317 memset(cgc->buffer, 0, 2);
1318 cgc->cmd[0] = GPCMD_MODE_SELECT_10;
1319 cgc->cmd[1] = 0x10; /* PF */
1320 cgc->cmd[7] = cgc->buflen >> 8;
1321 cgc->cmd[8] = cgc->buflen & 0xff;
1322 cgc->data_direction = CGC_DATA_WRITE;
1323 return pkt_generic_packet(pd, cgc);
1324}
1325
1326static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
1327{
1328 struct packet_command cgc;
1329 int ret;
1330
1331 /* set up command and get the disc info */
1332 init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
1333 cgc.cmd[0] = GPCMD_READ_DISC_INFO;
1334 cgc.cmd[8] = cgc.buflen = 2;
1335 cgc.quiet = 1;
1336
1337 if ((ret = pkt_generic_packet(pd, &cgc)))
1338 return ret;
1339
1340 /* not all drives have the same disc_info length, so requeue
1341 * packet with the length the drive tells us it can supply
1342 */
1343 cgc.buflen = be16_to_cpu(di->disc_information_length) +
1344 sizeof(di->disc_information_length);
1345
1346 if (cgc.buflen > sizeof(disc_information))
1347 cgc.buflen = sizeof(disc_information);
1348
1349 cgc.cmd[8] = cgc.buflen;
1350 return pkt_generic_packet(pd, &cgc);
1351}
1352
1353static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
1354{
1355 struct packet_command cgc;
1356 int ret;
1357
1358 init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
1359 cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
1360 cgc.cmd[1] = type & 3;
1361 cgc.cmd[4] = (track & 0xff00) >> 8;
1362 cgc.cmd[5] = track & 0xff;
1363 cgc.cmd[8] = 8;
1364 cgc.quiet = 1;
1365
1366 if ((ret = pkt_generic_packet(pd, &cgc)))
1367 return ret;
1368
1369 cgc.buflen = be16_to_cpu(ti->track_information_length) +
1370 sizeof(ti->track_information_length);
1371
1372 if (cgc.buflen > sizeof(track_information))
1373 cgc.buflen = sizeof(track_information);
1374
1375 cgc.cmd[8] = cgc.buflen;
1376 return pkt_generic_packet(pd, &cgc);
1377}
1378
1379static int pkt_get_last_written(struct pktcdvd_device *pd, long *last_written)
1380{
1381 disc_information di;
1382 track_information ti;
1383 __u32 last_track;
1384 int ret = -1;
1385
1386 if ((ret = pkt_get_disc_info(pd, &di)))
1387 return ret;
1388
1389 last_track = (di.last_track_msb << 8) | di.last_track_lsb;
1390 if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
1391 return ret;
1392
1393 /* if this track is blank, try the previous. */
1394 if (ti.blank) {
1395 last_track--;
1396 if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
1397 return ret;
1398 }
1399
1400 /* if last recorded field is valid, return it. */
1401 if (ti.lra_v) {
1402 *last_written = be32_to_cpu(ti.last_rec_address);
1403 } else {
1404 /* make it up instead */
1405 *last_written = be32_to_cpu(ti.track_start) +
1406 be32_to_cpu(ti.track_size);
1407 if (ti.free_blocks)
1408 *last_written -= (be32_to_cpu(ti.free_blocks) + 7);
1409 }
1410 return 0;
1411}
1412
1413/*
1414 * write mode select package based on pd->settings
1415 */
1416static int pkt_set_write_settings(struct pktcdvd_device *pd)
1417{
1418 struct packet_command cgc;
1419 struct request_sense sense;
1420 write_param_page *wp;
1421 char buffer[128];
1422 int ret, size;
1423
1424 /* doesn't apply to DVD+RW or DVD-RAM */
1425 if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
1426 return 0;
1427
1428 memset(buffer, 0, sizeof(buffer));
1429 init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
1430 cgc.sense = &sense;
1431 if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
1432 pkt_dump_sense(&cgc);
1433 return ret;
1434 }
1435
1436 size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
1437 pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
1438 if (size > sizeof(buffer))
1439 size = sizeof(buffer);
1440
1441 /*
1442 * now get it all
1443 */
1444 init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
1445 cgc.sense = &sense;
1446 if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
1447 pkt_dump_sense(&cgc);
1448 return ret;
1449 }
1450
1451 /*
1452 * write page is offset header + block descriptor length
1453 */
1454 wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
1455
1456 wp->fp = pd->settings.fp;
1457 wp->track_mode = pd->settings.track_mode;
1458 wp->write_type = pd->settings.write_type;
1459 wp->data_block_type = pd->settings.block_mode;
1460
1461 wp->multi_session = 0;
1462
1463#ifdef PACKET_USE_LS
1464 wp->link_size = 7;
1465 wp->ls_v = 1;
1466#endif
1467
1468 if (wp->data_block_type == PACKET_BLOCK_MODE1) {
1469 wp->session_format = 0;
1470 wp->subhdr2 = 0x20;
1471 } else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
1472 wp->session_format = 0x20;
1473 wp->subhdr2 = 8;
1474#if 0
1475 wp->mcn[0] = 0x80;
1476 memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
1477#endif
1478 } else {
1479 /*
1480 * paranoia
1481 */
1482 printk("pktcdvd: write mode wrong %d\n", wp->data_block_type);
1483 return 1;
1484 }
1485 wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
1486
1487 cgc.buflen = cgc.cmd[8] = size;
1488 if ((ret = pkt_mode_select(pd, &cgc))) {
1489 pkt_dump_sense(&cgc);
1490 return ret;
1491 }
1492
1493 pkt_print_settings(pd);
1494 return 0;
1495}
1496
1497/*
1498 * 0 -- we can write to this track, 1 -- we can't
1499 */
1500static int pkt_good_track(track_information *ti)
1501{
1502 /*
1503 * only good for CD-RW at the moment, not DVD-RW
1504 */
1505
1506 /*
1507 * FIXME: only for FP
1508 */
1509 if (ti->fp == 0)
1510 return 0;
1511
1512 /*
1513 * "good" settings as per Mt Fuji.
1514 */
1515 if (ti->rt == 0 && ti->blank == 0 && ti->packet == 1)
1516 return 0;
1517
1518 if (ti->rt == 0 && ti->blank == 1 && ti->packet == 1)
1519 return 0;
1520
1521 if (ti->rt == 1 && ti->blank == 0 && ti->packet == 1)
1522 return 0;
1523
1524 printk("pktcdvd: bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
1525 return 1;
1526}
1527
1528/*
1529 * 0 -- we can write to this disc, 1 -- we can't
1530 */
1531static int pkt_good_disc(struct pktcdvd_device *pd, disc_information *di)
1532{
1533 switch (pd->mmc3_profile) {
1534 case 0x0a: /* CD-RW */
1535 case 0xffff: /* MMC3 not supported */
1536 break;
1537 case 0x1a: /* DVD+RW */
1538 case 0x13: /* DVD-RW */
1539 case 0x12: /* DVD-RAM */
1540 return 0;
1541 default:
1542 printk("pktcdvd: Wrong disc profile (%x)\n", pd->mmc3_profile);
1543 return 1;
1544 }
1545
1546 /*
1547 * for disc type 0xff we should probably reserve a new track.
1548 * but i'm not sure, should we leave this to user apps? probably.
1549 */
1550 if (di->disc_type == 0xff) {
1551 printk("pktcdvd: Unknown disc. No track?\n");
1552 return 1;
1553 }
1554
1555 if (di->disc_type != 0x20 && di->disc_type != 0) {
1556 printk("pktcdvd: Wrong disc type (%x)\n", di->disc_type);
1557 return 1;
1558 }
1559
1560 if (di->erasable == 0) {
1561 printk("pktcdvd: Disc not erasable\n");
1562 return 1;
1563 }
1564
1565 if (di->border_status == PACKET_SESSION_RESERVED) {
1566 printk("pktcdvd: Can't write to last track (reserved)\n");
1567 return 1;
1568 }
1569
1570 return 0;
1571}
1572
1573static int pkt_probe_settings(struct pktcdvd_device *pd)
1574{
1575 struct packet_command cgc;
1576 unsigned char buf[12];
1577 disc_information di;
1578 track_information ti;
1579 int ret, track;
1580
1581 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1582 cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
1583 cgc.cmd[8] = 8;
1584 ret = pkt_generic_packet(pd, &cgc);
1585 pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7];
1586
1587 memset(&di, 0, sizeof(disc_information));
1588 memset(&ti, 0, sizeof(track_information));
1589
1590 if ((ret = pkt_get_disc_info(pd, &di))) {
1591 printk("failed get_disc\n");
1592 return ret;
1593 }
1594
1595 if (pkt_good_disc(pd, &di))
1596 return -ENXIO;
1597
1598 switch (pd->mmc3_profile) {
1599 case 0x1a: /* DVD+RW */
1600 printk("pktcdvd: inserted media is DVD+RW\n");
1601 break;
1602 case 0x13: /* DVD-RW */
1603 printk("pktcdvd: inserted media is DVD-RW\n");
1604 break;
1605 case 0x12: /* DVD-RAM */
1606 printk("pktcdvd: inserted media is DVD-RAM\n");
1607 break;
1608 default:
1609 printk("pktcdvd: inserted media is CD-R%s\n", di.erasable ? "W" : "");
1610 break;
1611 }
1612 pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
1613
1614 track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
1615 if ((ret = pkt_get_track_info(pd, track, 1, &ti))) {
1616 printk("pktcdvd: failed get_track\n");
1617 return ret;
1618 }
1619
1620 if (pkt_good_track(&ti)) {
1621 printk("pktcdvd: can't write to this track\n");
1622 return -ENXIO;
1623 }
1624
1625 /*
1626 * we keep packet size in 512 byte units, makes it easier to
1627 * deal with request calculations.
1628 */
1629 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
1630 if (pd->settings.size == 0) {
1631 printk("pktcdvd: detected zero packet size!\n");
1632 pd->settings.size = 128;
1633 }
1634 pd->settings.fp = ti.fp;
1635 pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
1636
1637 if (ti.nwa_v) {
1638 pd->nwa = be32_to_cpu(ti.next_writable);
1639 set_bit(PACKET_NWA_VALID, &pd->flags);
1640 }
1641
1642 /*
1643 * in theory we could use lra on -RW media as well and just zero
1644 * blocks that haven't been written yet, but in practice that
1645 * is just a no-go. we'll use that for -R, naturally.
1646 */
1647 if (ti.lra_v) {
1648 pd->lra = be32_to_cpu(ti.last_rec_address);
1649 set_bit(PACKET_LRA_VALID, &pd->flags);
1650 } else {
1651 pd->lra = 0xffffffff;
1652 set_bit(PACKET_LRA_VALID, &pd->flags);
1653 }
1654
1655 /*
1656 * fine for now
1657 */
1658 pd->settings.link_loss = 7;
1659 pd->settings.write_type = 0; /* packet */
1660 pd->settings.track_mode = ti.track_mode;
1661
1662 /*
1663 * mode1 or mode2 disc
1664 */
1665 switch (ti.data_mode) {
1666 case PACKET_MODE1:
1667 pd->settings.block_mode = PACKET_BLOCK_MODE1;
1668 break;
1669 case PACKET_MODE2:
1670 pd->settings.block_mode = PACKET_BLOCK_MODE2;
1671 break;
1672 default:
1673 printk("pktcdvd: unknown data mode\n");
1674 return 1;
1675 }
1676 return 0;
1677}
1678
1679/*
1680 * enable/disable write caching on drive
1681 */
1682static int pkt_write_caching(struct pktcdvd_device *pd, int set)
1683{
1684 struct packet_command cgc;
1685 struct request_sense sense;
1686 unsigned char buf[64];
1687 int ret;
1688
1689 memset(buf, 0, sizeof(buf));
1690 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1691 cgc.sense = &sense;
1692 cgc.buflen = pd->mode_offset + 12;
1693
1694 /*
1695 * caching mode page might not be there, so quiet this command
1696 */
1697 cgc.quiet = 1;
1698
1699 if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0)))
1700 return ret;
1701
1702 buf[pd->mode_offset + 10] |= (!!set << 2);
1703
1704 cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
1705 ret = pkt_mode_select(pd, &cgc);
1706 if (ret) {
1707 printk("pktcdvd: write caching control failed\n");
1708 pkt_dump_sense(&cgc);
1709 } else if (!ret && set)
1710 printk("pktcdvd: enabled write caching on %s\n", pd->name);
1711 return ret;
1712}
1713
1714static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
1715{
1716 struct packet_command cgc;
1717
1718 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
1719 cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
1720 cgc.cmd[4] = lockflag ? 1 : 0;
1721 return pkt_generic_packet(pd, &cgc);
1722}
1723
1724/*
1725 * Returns drive maximum write speed
1726 */
1727static int pkt_get_max_speed(struct pktcdvd_device *pd, unsigned *write_speed)
1728{
1729 struct packet_command cgc;
1730 struct request_sense sense;
1731 unsigned char buf[256+18];
1732 unsigned char *cap_buf;
1733 int ret, offset;
1734
1735 memset(buf, 0, sizeof(buf));
1736 cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
1737 init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
1738 cgc.sense = &sense;
1739
1740 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1741 if (ret) {
1742 cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
1743 sizeof(struct mode_page_header);
1744 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1745 if (ret) {
1746 pkt_dump_sense(&cgc);
1747 return ret;
1748 }
1749 }
1750
1751 offset = 20; /* Obsoleted field, used by older drives */
1752 if (cap_buf[1] >= 28)
1753 offset = 28; /* Current write speed selected */
1754 if (cap_buf[1] >= 30) {
1755 /* If the drive reports at least one "Logical Unit Write
1756 * Speed Performance Descriptor Block", use the information
1757 * in the first block. (contains the highest speed)
1758 */
1759 int num_spdb = (cap_buf[30] << 8) + cap_buf[31];
1760 if (num_spdb > 0)
1761 offset = 34;
1762 }
1763
1764 *write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1];
1765 return 0;
1766}
1767
1768/* These tables from cdrecord - I don't have orange book */
1769/* standard speed CD-RW (1-4x) */
1770static char clv_to_speed[16] = {
1771 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
1772 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1773};
1774/* high speed CD-RW (-10x) */
1775static char hs_clv_to_speed[16] = {
1776 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
1777 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1778};
1779/* ultra high speed CD-RW */
1780static char us_clv_to_speed[16] = {
1781 /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
1782 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
1783};
1784
1785/*
1786 * reads the maximum media speed from ATIP
1787 */
1788static int pkt_media_speed(struct pktcdvd_device *pd, unsigned *speed)
1789{
1790 struct packet_command cgc;
1791 struct request_sense sense;
1792 unsigned char buf[64];
1793 unsigned int size, st, sp;
1794 int ret;
1795
1796 init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
1797 cgc.sense = &sense;
1798 cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
1799 cgc.cmd[1] = 2;
1800 cgc.cmd[2] = 4; /* READ ATIP */
1801 cgc.cmd[8] = 2;
1802 ret = pkt_generic_packet(pd, &cgc);
1803 if (ret) {
1804 pkt_dump_sense(&cgc);
1805 return ret;
1806 }
1807 size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
1808 if (size > sizeof(buf))
1809 size = sizeof(buf);
1810
1811 init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
1812 cgc.sense = &sense;
1813 cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
1814 cgc.cmd[1] = 2;
1815 cgc.cmd[2] = 4;
1816 cgc.cmd[8] = size;
1817 ret = pkt_generic_packet(pd, &cgc);
1818 if (ret) {
1819 pkt_dump_sense(&cgc);
1820 return ret;
1821 }
1822
1823 if (!buf[6] & 0x40) {
1824 printk("pktcdvd: Disc type is not CD-RW\n");
1825 return 1;
1826 }
1827 if (!buf[6] & 0x4) {
1828 printk("pktcdvd: A1 values on media are not valid, maybe not CDRW?\n");
1829 return 1;
1830 }
1831
1832 st = (buf[6] >> 3) & 0x7; /* disc sub-type */
1833
1834 sp = buf[16] & 0xf; /* max speed from ATIP A1 field */
1835
1836 /* Info from cdrecord */
1837 switch (st) {
1838 case 0: /* standard speed */
1839 *speed = clv_to_speed[sp];
1840 break;
1841 case 1: /* high speed */
1842 *speed = hs_clv_to_speed[sp];
1843 break;
1844 case 2: /* ultra high speed */
1845 *speed = us_clv_to_speed[sp];
1846 break;
1847 default:
1848 printk("pktcdvd: Unknown disc sub-type %d\n",st);
1849 return 1;
1850 }
1851 if (*speed) {
1852 printk("pktcdvd: Max. media speed: %d\n",*speed);
1853 return 0;
1854 } else {
1855 printk("pktcdvd: Unknown speed %d for sub-type %d\n",sp,st);
1856 return 1;
1857 }
1858}
1859
1860static int pkt_perform_opc(struct pktcdvd_device *pd)
1861{
1862 struct packet_command cgc;
1863 struct request_sense sense;
1864 int ret;
1865
1866 VPRINTK("pktcdvd: Performing OPC\n");
1867
1868 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
1869 cgc.sense = &sense;
1870 cgc.timeout = 60*HZ;
1871 cgc.cmd[0] = GPCMD_SEND_OPC;
1872 cgc.cmd[1] = 1;
1873 if ((ret = pkt_generic_packet(pd, &cgc)))
1874 pkt_dump_sense(&cgc);
1875 return ret;
1876}
1877
1878static int pkt_open_write(struct pktcdvd_device *pd)
1879{
1880 int ret;
1881 unsigned int write_speed, media_write_speed, read_speed;
1882
1883 if ((ret = pkt_probe_settings(pd))) {
1884 DPRINTK("pktcdvd: %s failed probe\n", pd->name);
1885 return -EIO;
1886 }
1887
1888 if ((ret = pkt_set_write_settings(pd))) {
1889 DPRINTK("pktcdvd: %s failed saving write settings\n", pd->name);
1890 return -EIO;
1891 }
1892
1893 pkt_write_caching(pd, USE_WCACHING);
1894
1895 if ((ret = pkt_get_max_speed(pd, &write_speed)))
1896 write_speed = 16 * 177;
1897 switch (pd->mmc3_profile) {
1898 case 0x13: /* DVD-RW */
1899 case 0x1a: /* DVD+RW */
1900 case 0x12: /* DVD-RAM */
1901 DPRINTK("pktcdvd: write speed %ukB/s\n", write_speed);
1902 break;
1903 default:
1904 if ((ret = pkt_media_speed(pd, &media_write_speed)))
1905 media_write_speed = 16;
1906 write_speed = min(write_speed, media_write_speed * 177);
1907 DPRINTK("pktcdvd: write speed %ux\n", write_speed / 176);
1908 break;
1909 }
1910 read_speed = write_speed;
1911
1912 if ((ret = pkt_set_speed(pd, write_speed, read_speed))) {
1913 DPRINTK("pktcdvd: %s couldn't set write speed\n", pd->name);
1914 return -EIO;
1915 }
1916 pd->write_speed = write_speed;
1917 pd->read_speed = read_speed;
1918
1919 if ((ret = pkt_perform_opc(pd))) {
1920 DPRINTK("pktcdvd: %s Optimum Power Calibration failed\n", pd->name);
1921 }
1922
1923 return 0;
1924}
1925
1926/*
1927 * called at open time.
1928 */
1929static int pkt_open_dev(struct pktcdvd_device *pd, int write)
1930{
1931 int ret;
1932 long lba;
1933 request_queue_t *q;
1934
1935 /*
1936 * We need to re-open the cdrom device without O_NONBLOCK to be able
1937 * to read/write from/to it. It is already opened in O_NONBLOCK mode
1938 * so bdget() can't fail.
1939 */
1940 bdget(pd->bdev->bd_dev);
1941 if ((ret = blkdev_get(pd->bdev, FMODE_READ, O_RDONLY)))
1942 goto out;
1943
1944 if ((ret = pkt_get_last_written(pd, &lba))) {
1945 printk("pktcdvd: pkt_get_last_written failed\n");
1946 goto out_putdev;
1947 }
1948
1949 set_capacity(pd->disk, lba << 2);
1950 set_capacity(pd->bdev->bd_disk, lba << 2);
1951 bd_set_size(pd->bdev, (loff_t)lba << 11);
1952
1953 q = bdev_get_queue(pd->bdev);
1954 if (write) {
1955 if ((ret = pkt_open_write(pd)))
1956 goto out_putdev;
1957 /*
1958 * Some CDRW drives can not handle writes larger than one packet,
1959 * even if the size is a multiple of the packet size.
1960 */
1961 spin_lock_irq(q->queue_lock);
1962 blk_queue_max_sectors(q, pd->settings.size);
1963 spin_unlock_irq(q->queue_lock);
1964 set_bit(PACKET_WRITABLE, &pd->flags);
1965 } else {
1966 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
1967 clear_bit(PACKET_WRITABLE, &pd->flags);
1968 }
1969
1970 if ((ret = pkt_set_segment_merging(pd, q)))
1971 goto out_putdev;
1972
1973 if (write)
1974 printk("pktcdvd: %lukB available on disc\n", lba << 1);
1975
1976 return 0;
1977
1978out_putdev:
1979 blkdev_put(pd->bdev);
1980out:
1981 return ret;
1982}
1983
1984/*
1985 * called when the device is closed. makes sure that the device flushes
1986 * the internal cache before we close.
1987 */
1988static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
1989{
1990 if (flush && pkt_flush_cache(pd))
1991 DPRINTK("pktcdvd: %s not flushing cache\n", pd->name);
1992
1993 pkt_lock_door(pd, 0);
1994
1995 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
1996 blkdev_put(pd->bdev);
1997}
1998
1999static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor)
2000{
2001 if (dev_minor >= MAX_WRITERS)
2002 return NULL;
2003 return pkt_devs[dev_minor];
2004}
2005
2006static int pkt_open(struct inode *inode, struct file *file)
2007{
2008 struct pktcdvd_device *pd = NULL;
2009 int ret;
2010
2011 VPRINTK("pktcdvd: entering open\n");
2012
2013 down(&ctl_mutex);
2014 pd = pkt_find_dev_from_minor(iminor(inode));
2015 if (!pd) {
2016 ret = -ENODEV;
2017 goto out;
2018 }
2019 BUG_ON(pd->refcnt < 0);
2020
2021 pd->refcnt++;
2022 if (pd->refcnt == 1) {
2023 if (pkt_open_dev(pd, file->f_mode & FMODE_WRITE)) {
2024 ret = -EIO;
2025 goto out_dec;
2026 }
2027 /*
2028 * needed here as well, since ext2 (among others) may change
2029 * the blocksize at mount time
2030 */
2031 set_blocksize(inode->i_bdev, CD_FRAMESIZE);
2032 }
2033
2034 up(&ctl_mutex);
2035 return 0;
2036
2037out_dec:
2038 pd->refcnt--;
2039out:
2040 VPRINTK("pktcdvd: failed open (%d)\n", ret);
2041 up(&ctl_mutex);
2042 return ret;
2043}
2044
2045static int pkt_close(struct inode *inode, struct file *file)
2046{
2047 struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data;
2048 int ret = 0;
2049
2050 down(&ctl_mutex);
2051 pd->refcnt--;
2052 BUG_ON(pd->refcnt < 0);
2053 if (pd->refcnt == 0) {
2054 int flush = test_bit(PACKET_WRITABLE, &pd->flags);
2055 pkt_release_dev(pd, flush);
2056 }
2057 up(&ctl_mutex);
2058 return ret;
2059}
2060
2061
2062static void *psd_pool_alloc(unsigned int __nocast gfp_mask, void *data)
2063{
2064 return kmalloc(sizeof(struct packet_stacked_data), gfp_mask);
2065}
2066
2067static void psd_pool_free(void *ptr, void *data)
2068{
2069 kfree(ptr);
2070}
2071
2072static int pkt_end_io_read_cloned(struct bio *bio, unsigned int bytes_done, int err)
2073{
2074 struct packet_stacked_data *psd = bio->bi_private;
2075 struct pktcdvd_device *pd = psd->pd;
2076
2077 if (bio->bi_size)
2078 return 1;
2079
2080 bio_put(bio);
2081 bio_endio(psd->bio, psd->bio->bi_size, err);
2082 mempool_free(psd, psd_pool);
2083 pkt_bio_finished(pd);
2084 return 0;
2085}
2086
2087static int pkt_make_request(request_queue_t *q, struct bio *bio)
2088{
2089 struct pktcdvd_device *pd;
2090 char b[BDEVNAME_SIZE];
2091 sector_t zone;
2092 struct packet_data *pkt;
2093 int was_empty, blocked_bio;
2094 struct pkt_rb_node *node;
2095
2096 pd = q->queuedata;
2097 if (!pd) {
2098 printk("pktcdvd: %s incorrect request queue\n", bdevname(bio->bi_bdev, b));
2099 goto end_io;
2100 }
2101
2102 /*
2103 * Clone READ bios so we can have our own bi_end_io callback.
2104 */
2105 if (bio_data_dir(bio) == READ) {
2106 struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
2107 struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
2108
2109 psd->pd = pd;
2110 psd->bio = bio;
2111 cloned_bio->bi_bdev = pd->bdev;
2112 cloned_bio->bi_private = psd;
2113 cloned_bio->bi_end_io = pkt_end_io_read_cloned;
2114 pd->stats.secs_r += bio->bi_size >> 9;
2115 pkt_queue_bio(pd, cloned_bio, 1);
2116 return 0;
2117 }
2118
2119 if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
2120 printk("pktcdvd: WRITE for ro device %s (%llu)\n",
2121 pd->name, (unsigned long long)bio->bi_sector);
2122 goto end_io;
2123 }
2124
2125 if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
2126 printk("pktcdvd: wrong bio size\n");
2127 goto end_io;
2128 }
2129
2130 blk_queue_bounce(q, &bio);
2131
2132 zone = ZONE(bio->bi_sector, pd);
2133 VPRINTK("pkt_make_request: start = %6llx stop = %6llx\n",
2134 (unsigned long long)bio->bi_sector,
2135 (unsigned long long)(bio->bi_sector + bio_sectors(bio)));
2136
2137 /* Check if we have to split the bio */
2138 {
2139 struct bio_pair *bp;
2140 sector_t last_zone;
2141 int first_sectors;
2142
2143 last_zone = ZONE(bio->bi_sector + bio_sectors(bio) - 1, pd);
2144 if (last_zone != zone) {
2145 BUG_ON(last_zone != zone + pd->settings.size);
2146 first_sectors = last_zone - bio->bi_sector;
2147 bp = bio_split(bio, bio_split_pool, first_sectors);
2148 BUG_ON(!bp);
2149 pkt_make_request(q, &bp->bio1);
2150 pkt_make_request(q, &bp->bio2);
2151 bio_pair_release(bp);
2152 return 0;
2153 }
2154 }
2155
2156 /*
2157 * If we find a matching packet in state WAITING or READ_WAIT, we can
2158 * just append this bio to that packet.
2159 */
2160 spin_lock(&pd->cdrw.active_list_lock);
2161 blocked_bio = 0;
2162 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
2163 if (pkt->sector == zone) {
2164 spin_lock(&pkt->lock);
2165 if ((pkt->state == PACKET_WAITING_STATE) ||
2166 (pkt->state == PACKET_READ_WAIT_STATE)) {
2167 pkt_add_list_last(bio, &pkt->orig_bios,
2168 &pkt->orig_bios_tail);
2169 pkt->write_size += bio->bi_size / CD_FRAMESIZE;
2170 if ((pkt->write_size >= pkt->frames) &&
2171 (pkt->state == PACKET_WAITING_STATE)) {
2172 atomic_inc(&pkt->run_sm);
2173 wake_up(&pd->wqueue);
2174 }
2175 spin_unlock(&pkt->lock);
2176 spin_unlock(&pd->cdrw.active_list_lock);
2177 return 0;
2178 } else {
2179 blocked_bio = 1;
2180 }
2181 spin_unlock(&pkt->lock);
2182 }
2183 }
2184 spin_unlock(&pd->cdrw.active_list_lock);
2185
2186 /*
2187 * No matching packet found. Store the bio in the work queue.
2188 */
2189 node = mempool_alloc(pd->rb_pool, GFP_NOIO);
2190 BUG_ON(!node);
2191 node->bio = bio;
2192 spin_lock(&pd->lock);
2193 BUG_ON(pd->bio_queue_size < 0);
2194 was_empty = (pd->bio_queue_size == 0);
2195 pkt_rbtree_insert(pd, node);
2196 spin_unlock(&pd->lock);
2197
2198 /*
2199 * Wake up the worker thread.
2200 */
2201 atomic_set(&pd->scan_queue, 1);
2202 if (was_empty) {
2203 /* This wake_up is required for correct operation */
2204 wake_up(&pd->wqueue);
2205 } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
2206 /*
2207 * This wake up is not required for correct operation,
2208 * but improves performance in some cases.
2209 */
2210 wake_up(&pd->wqueue);
2211 }
2212 return 0;
2213end_io:
2214 bio_io_error(bio, bio->bi_size);
2215 return 0;
2216}
2217
2218
2219
2220static int pkt_merge_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *bvec)
2221{
2222 struct pktcdvd_device *pd = q->queuedata;
2223 sector_t zone = ZONE(bio->bi_sector, pd);
2224 int used = ((bio->bi_sector - zone) << 9) + bio->bi_size;
2225 int remaining = (pd->settings.size << 9) - used;
2226 int remaining2;
2227
2228 /*
2229 * A bio <= PAGE_SIZE must be allowed. If it crosses a packet
2230 * boundary, pkt_make_request() will split the bio.
2231 */
2232 remaining2 = PAGE_SIZE - bio->bi_size;
2233 remaining = max(remaining, remaining2);
2234
2235 BUG_ON(remaining < 0);
2236 return remaining;
2237}
2238
2239static void pkt_init_queue(struct pktcdvd_device *pd)
2240{
2241 request_queue_t *q = pd->disk->queue;
2242
2243 blk_queue_make_request(q, pkt_make_request);
2244 blk_queue_hardsect_size(q, CD_FRAMESIZE);
2245 blk_queue_max_sectors(q, PACKET_MAX_SECTORS);
2246 blk_queue_merge_bvec(q, pkt_merge_bvec);
2247 q->queuedata = pd;
2248}
2249
2250static int pkt_seq_show(struct seq_file *m, void *p)
2251{
2252 struct pktcdvd_device *pd = m->private;
2253 char *msg;
2254 char bdev_buf[BDEVNAME_SIZE];
2255 int states[PACKET_NUM_STATES];
2256
2257 seq_printf(m, "Writer %s mapped to %s:\n", pd->name,
2258 bdevname(pd->bdev, bdev_buf));
2259
2260 seq_printf(m, "\nSettings:\n");
2261 seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
2262
2263 if (pd->settings.write_type == 0)
2264 msg = "Packet";
2265 else
2266 msg = "Unknown";
2267 seq_printf(m, "\twrite type:\t\t%s\n", msg);
2268
2269 seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
2270 seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
2271
2272 seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
2273
2274 if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
2275 msg = "Mode 1";
2276 else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
2277 msg = "Mode 2";
2278 else
2279 msg = "Unknown";
2280 seq_printf(m, "\tblock mode:\t\t%s\n", msg);
2281
2282 seq_printf(m, "\nStatistics:\n");
2283 seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
2284 seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
2285 seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
2286 seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
2287 seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
2288
2289 seq_printf(m, "\nMisc:\n");
2290 seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
2291 seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
2292 seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
2293 seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
2294 seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
2295 seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
2296
2297 seq_printf(m, "\nQueue state:\n");
2298 seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
2299 seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
2300 seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector);
2301
2302 pkt_count_states(pd, states);
2303 seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
2304 states[0], states[1], states[2], states[3], states[4], states[5]);
2305
2306 return 0;
2307}
2308
2309static int pkt_seq_open(struct inode *inode, struct file *file)
2310{
2311 return single_open(file, pkt_seq_show, PDE(inode)->data);
2312}
2313
2314static struct file_operations pkt_proc_fops = {
2315 .open = pkt_seq_open,
2316 .read = seq_read,
2317 .llseek = seq_lseek,
2318 .release = single_release
2319};
2320
2321static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2322{
2323 int i;
2324 int ret = 0;
2325 char b[BDEVNAME_SIZE];
2326 struct proc_dir_entry *proc;
2327 struct block_device *bdev;
2328
2329 if (pd->pkt_dev == dev) {
2330 printk("pktcdvd: Recursive setup not allowed\n");
2331 return -EBUSY;
2332 }
2333 for (i = 0; i < MAX_WRITERS; i++) {
2334 struct pktcdvd_device *pd2 = pkt_devs[i];
2335 if (!pd2)
2336 continue;
2337 if (pd2->bdev->bd_dev == dev) {
2338 printk("pktcdvd: %s already setup\n", bdevname(pd2->bdev, b));
2339 return -EBUSY;
2340 }
2341 if (pd2->pkt_dev == dev) {
2342 printk("pktcdvd: Can't chain pktcdvd devices\n");
2343 return -EBUSY;
2344 }
2345 }
2346
2347 bdev = bdget(dev);
2348 if (!bdev)
2349 return -ENOMEM;
2350 ret = blkdev_get(bdev, FMODE_READ, O_RDONLY | O_NONBLOCK);
2351 if (ret)
2352 return ret;
2353
2354 /* This is safe, since we have a reference from open(). */
2355 __module_get(THIS_MODULE);
2356
2357 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
2358 printk("pktcdvd: not enough memory for buffers\n");
2359 ret = -ENOMEM;
2360 goto out_mem;
2361 }
2362
2363 pd->bdev = bdev;
2364 set_blocksize(bdev, CD_FRAMESIZE);
2365
2366 pkt_init_queue(pd);
2367
2368 atomic_set(&pd->cdrw.pending_bios, 0);
2369 pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
2370 if (IS_ERR(pd->cdrw.thread)) {
2371 printk("pktcdvd: can't start kernel thread\n");
2372 ret = -ENOMEM;
2373 goto out_thread;
2374 }
2375
2376 proc = create_proc_entry(pd->name, 0, pkt_proc);
2377 if (proc) {
2378 proc->data = pd;
2379 proc->proc_fops = &pkt_proc_fops;
2380 }
2381 DPRINTK("pktcdvd: writer %s mapped to %s\n", pd->name, bdevname(bdev, b));
2382 return 0;
2383
2384out_thread:
2385 pkt_shrink_pktlist(pd);
2386out_mem:
2387 blkdev_put(bdev);
2388 /* This is safe: open() is still holding a reference. */
2389 module_put(THIS_MODULE);
2390 return ret;
2391}
2392
2393static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
2394{
2395 struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data;
2396
2397 VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, imajor(inode), iminor(inode));
2398 BUG_ON(!pd);
2399
2400 switch (cmd) {
2401 /*
2402 * forward selected CDROM ioctls to CD-ROM, for UDF
2403 */
2404 case CDROMMULTISESSION:
2405 case CDROMREADTOCENTRY:
2406 case CDROM_LAST_WRITTEN:
2407 case CDROM_SEND_PACKET:
2408 case SCSI_IOCTL_SEND_COMMAND:
2409 return ioctl_by_bdev(pd->bdev, cmd, arg);
2410
2411 case CDROMEJECT:
2412 /*
2413 * The door gets locked when the device is opened, so we
2414 * have to unlock it or else the eject command fails.
2415 */
2416 pkt_lock_door(pd, 0);
2417 return ioctl_by_bdev(pd->bdev, cmd, arg);
2418
2419 default:
2420 printk("pktcdvd: Unknown ioctl for %s (%x)\n", pd->name, cmd);
2421 return -ENOTTY;
2422 }
2423
2424 return 0;
2425}
2426
2427static int pkt_media_changed(struct gendisk *disk)
2428{
2429 struct pktcdvd_device *pd = disk->private_data;
2430 struct gendisk *attached_disk;
2431
2432 if (!pd)
2433 return 0;
2434 if (!pd->bdev)
2435 return 0;
2436 attached_disk = pd->bdev->bd_disk;
2437 if (!attached_disk)
2438 return 0;
2439 return attached_disk->fops->media_changed(attached_disk);
2440}
2441
2442static struct block_device_operations pktcdvd_ops = {
2443 .owner = THIS_MODULE,
2444 .open = pkt_open,
2445 .release = pkt_close,
2446 .ioctl = pkt_ioctl,
2447 .media_changed = pkt_media_changed,
2448};
2449
2450/*
2451 * Set up mapping from pktcdvd device to CD-ROM device.
2452 */
2453static int pkt_setup_dev(struct pkt_ctrl_command *ctrl_cmd)
2454{
2455 int idx;
2456 int ret = -ENOMEM;
2457 struct pktcdvd_device *pd;
2458 struct gendisk *disk;
2459 dev_t dev = new_decode_dev(ctrl_cmd->dev);
2460
2461 for (idx = 0; idx < MAX_WRITERS; idx++)
2462 if (!pkt_devs[idx])
2463 break;
2464 if (idx == MAX_WRITERS) {
2465 printk("pktcdvd: max %d writers supported\n", MAX_WRITERS);
2466 return -EBUSY;
2467 }
2468
2469 pd = kmalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
2470 if (!pd)
2471 return ret;
2472 memset(pd, 0, sizeof(struct pktcdvd_device));
2473
2474 pd->rb_pool = mempool_create(PKT_RB_POOL_SIZE, pkt_rb_alloc, pkt_rb_free, NULL);
2475 if (!pd->rb_pool)
2476 goto out_mem;
2477
2478 disk = alloc_disk(1);
2479 if (!disk)
2480 goto out_mem;
2481 pd->disk = disk;
2482
2483 spin_lock_init(&pd->lock);
2484 spin_lock_init(&pd->iosched.lock);
2485 sprintf(pd->name, "pktcdvd%d", idx);
2486 init_waitqueue_head(&pd->wqueue);
2487 pd->bio_queue = RB_ROOT;
2488
2489 disk->major = pkt_major;
2490 disk->first_minor = idx;
2491 disk->fops = &pktcdvd_ops;
2492 disk->flags = GENHD_FL_REMOVABLE;
2493 sprintf(disk->disk_name, "pktcdvd%d", idx);
2494 disk->private_data = pd;
2495 disk->queue = blk_alloc_queue(GFP_KERNEL);
2496 if (!disk->queue)
2497 goto out_mem2;
2498
2499 pd->pkt_dev = MKDEV(disk->major, disk->first_minor);
2500 ret = pkt_new_dev(pd, dev);
2501 if (ret)
2502 goto out_new_dev;
2503
2504 add_disk(disk);
2505 pkt_devs[idx] = pd;
2506 ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
2507 return 0;
2508
2509out_new_dev:
2510 blk_put_queue(disk->queue);
2511out_mem2:
2512 put_disk(disk);
2513out_mem:
2514 if (pd->rb_pool)
2515 mempool_destroy(pd->rb_pool);
2516 kfree(pd);
2517 return ret;
2518}
2519
2520/*
2521 * Tear down mapping from pktcdvd device to CD-ROM device.
2522 */
2523static int pkt_remove_dev(struct pkt_ctrl_command *ctrl_cmd)
2524{
2525 struct pktcdvd_device *pd;
2526 int idx;
2527 dev_t pkt_dev = new_decode_dev(ctrl_cmd->pkt_dev);
2528
2529 for (idx = 0; idx < MAX_WRITERS; idx++) {
2530 pd = pkt_devs[idx];
2531 if (pd && (pd->pkt_dev == pkt_dev))
2532 break;
2533 }
2534 if (idx == MAX_WRITERS) {
2535 DPRINTK("pktcdvd: dev not setup\n");
2536 return -ENXIO;
2537 }
2538
2539 if (pd->refcnt > 0)
2540 return -EBUSY;
2541
2542 if (!IS_ERR(pd->cdrw.thread))
2543 kthread_stop(pd->cdrw.thread);
2544
2545 blkdev_put(pd->bdev);
2546
2547 pkt_shrink_pktlist(pd);
2548
2549 remove_proc_entry(pd->name, pkt_proc);
2550 DPRINTK("pktcdvd: writer %s unmapped\n", pd->name);
2551
2552 del_gendisk(pd->disk);
2553 blk_put_queue(pd->disk->queue);
2554 put_disk(pd->disk);
2555
2556 pkt_devs[idx] = NULL;
2557 mempool_destroy(pd->rb_pool);
2558 kfree(pd);
2559
2560 /* This is safe: open() is still holding a reference. */
2561 module_put(THIS_MODULE);
2562 return 0;
2563}
2564
2565static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
2566{
2567 struct pktcdvd_device *pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
2568 if (pd) {
2569 ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
2570 ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
2571 } else {
2572 ctrl_cmd->dev = 0;
2573 ctrl_cmd->pkt_dev = 0;
2574 }
2575 ctrl_cmd->num_devices = MAX_WRITERS;
2576}
2577
2578static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
2579{
2580 void __user *argp = (void __user *)arg;
2581 struct pkt_ctrl_command ctrl_cmd;
2582 int ret = 0;
2583
2584 if (cmd != PACKET_CTRL_CMD)
2585 return -ENOTTY;
2586
2587 if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
2588 return -EFAULT;
2589
2590 switch (ctrl_cmd.command) {
2591 case PKT_CTRL_CMD_SETUP:
2592 if (!capable(CAP_SYS_ADMIN))
2593 return -EPERM;
2594 down(&ctl_mutex);
2595 ret = pkt_setup_dev(&ctrl_cmd);
2596 up(&ctl_mutex);
2597 break;
2598 case PKT_CTRL_CMD_TEARDOWN:
2599 if (!capable(CAP_SYS_ADMIN))
2600 return -EPERM;
2601 down(&ctl_mutex);
2602 ret = pkt_remove_dev(&ctrl_cmd);
2603 up(&ctl_mutex);
2604 break;
2605 case PKT_CTRL_CMD_STATUS:
2606 down(&ctl_mutex);
2607 pkt_get_status(&ctrl_cmd);
2608 up(&ctl_mutex);
2609 break;
2610 default:
2611 return -ENOTTY;
2612 }
2613
2614 if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
2615 return -EFAULT;
2616 return ret;
2617}
2618
2619
2620static struct file_operations pkt_ctl_fops = {
2621 .ioctl = pkt_ctl_ioctl,
2622 .owner = THIS_MODULE,
2623};
2624
2625static struct miscdevice pkt_misc = {
2626 .minor = MISC_DYNAMIC_MINOR,
2627 .name = "pktcdvd",
2628 .devfs_name = "pktcdvd/control",
2629 .fops = &pkt_ctl_fops
2630};
2631
2632static int __init pkt_init(void)
2633{
2634 int ret;
2635
2636 psd_pool = mempool_create(PSD_POOL_SIZE, psd_pool_alloc, psd_pool_free, NULL);
2637 if (!psd_pool)
2638 return -ENOMEM;
2639
2640 ret = register_blkdev(pkt_major, "pktcdvd");
2641 if (ret < 0) {
2642 printk("pktcdvd: Unable to register block device\n");
2643 goto out2;
2644 }
2645 if (!pkt_major)
2646 pkt_major = ret;
2647
2648 ret = misc_register(&pkt_misc);
2649 if (ret) {
2650 printk("pktcdvd: Unable to register misc device\n");
2651 goto out;
2652 }
2653
2654 init_MUTEX(&ctl_mutex);
2655
2656 pkt_proc = proc_mkdir("pktcdvd", proc_root_driver);
2657
2658 DPRINTK("pktcdvd: %s\n", VERSION_CODE);
2659 return 0;
2660
2661out:
2662 unregister_blkdev(pkt_major, "pktcdvd");
2663out2:
2664 mempool_destroy(psd_pool);
2665 return ret;
2666}
2667
2668static void __exit pkt_exit(void)
2669{
2670 remove_proc_entry("pktcdvd", proc_root_driver);
2671 misc_deregister(&pkt_misc);
2672 unregister_blkdev(pkt_major, "pktcdvd");
2673 mempool_destroy(psd_pool);
2674}
2675
2676MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
2677MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
2678MODULE_LICENSE("GPL");
2679
2680module_init(pkt_init);
2681module_exit(pkt_exit);
diff --git a/drivers/block/ps2esdi.c b/drivers/block/ps2esdi.c
new file mode 100644
index 000000000000..29548784cb7b
--- /dev/null
+++ b/drivers/block/ps2esdi.c
@@ -0,0 +1,1092 @@
1/* ps2esdi driver based on assembler code by Arindam Banerji,
2 written by Peter De Schrijver */
3/* Reassuring note to IBM : This driver was NOT developed by vice-versa
4 engineering the PS/2's BIOS */
5/* Dedicated to Wannes, Tofke, Ykke, Godot, Killroy and all those
6 other lovely fish out there... */
7/* This code was written during the long and boring WINA
8 elections 1994 */
9/* Thanks to Arindam Banerij for giving me the source of his driver */
10/* This code may be freely distributed and modified in any way,
11 as long as these notes remain intact */
12
13/* Revised: 05/07/94 by Arindam Banerji (axb@cse.nd.edu) */
14/* Revised: 09/08/94 by Peter De Schrijver (stud11@cc4.kuleuven.ac.be)
15 Thanks to Arindam Banerij for sending me the docs of the adapter */
16
17/* BA Modified for ThinkPad 720 by Boris Ashkinazi */
18/* (bash@vnet.ibm.com) 08/08/95 */
19
20/* Modified further for ThinkPad-720C by Uri Blumenthal */
21/* (uri@watson.ibm.com) Sep 11, 1995 */
22
23/* TODO :
24 + Timeouts
25 + Get disk parameters
26 + DMA above 16MB
27 + reset after read/write error
28 */
29
30#define DEVICE_NAME "PS/2 ESDI"
31
32#include <linux/config.h>
33#include <linux/major.h>
34#include <linux/errno.h>
35#include <linux/wait.h>
36#include <linux/interrupt.h>
37#include <linux/fs.h>
38#include <linux/kernel.h>
39#include <linux/genhd.h>
40#include <linux/ps2esdi.h>
41#include <linux/blkdev.h>
42#include <linux/mca-legacy.h>
43#include <linux/init.h>
44#include <linux/ioport.h>
45#include <linux/module.h>
46
47#include <asm/system.h>
48#include <asm/io.h>
49#include <asm/dma.h>
50#include <asm/mca_dma.h>
51#include <asm/uaccess.h>
52
53#define PS2ESDI_IRQ 14
54#define MAX_HD 2
55#define MAX_RETRIES 5
56#define MAX_16BIT 65536
57#define ESDI_TIMEOUT 0xf000
58#define ESDI_STAT_TIMEOUT 4
59
60#define TYPE_0_CMD_BLK_LENGTH 2
61#define TYPE_1_CMD_BLK_LENGTH 4
62
63static void reset_ctrl(void);
64
65static int ps2esdi_geninit(void);
66
67static void do_ps2esdi_request(request_queue_t * q);
68
69static void ps2esdi_readwrite(int cmd, struct request *req);
70
71static void ps2esdi_fill_cmd_block(u_short * cmd_blk, u_short cmd,
72u_short cyl, u_short head, u_short sector, u_short length, u_char drive);
73
74static int ps2esdi_out_cmd_blk(u_short * cmd_blk);
75
76static void ps2esdi_prep_dma(char *buffer, u_short length, u_char dma_xmode);
77
78static irqreturn_t ps2esdi_interrupt_handler(int irq, void *dev_id,
79 struct pt_regs *regs);
80static void (*current_int_handler) (u_int) = NULL;
81static void ps2esdi_normal_interrupt_handler(u_int);
82static void ps2esdi_initial_reset_int_handler(u_int);
83static void ps2esdi_geometry_int_handler(u_int);
84static int ps2esdi_ioctl(struct inode *inode, struct file *file,
85 u_int cmd, u_long arg);
86
87static int ps2esdi_read_status_words(int num_words, int max_words, u_short * buffer);
88
89static void dump_cmd_complete_status(u_int int_ret_code);
90
91static void ps2esdi_get_device_cfg(void);
92
93static void ps2esdi_reset_timer(unsigned long unused);
94
95static u_int dma_arb_level; /* DMA arbitration level */
96
97static DECLARE_WAIT_QUEUE_HEAD(ps2esdi_int);
98
99static int no_int_yet;
100static int ps2esdi_drives;
101static u_short io_base;
102static struct timer_list esdi_timer =
103 TIMER_INITIALIZER(ps2esdi_reset_timer, 0, 0);
104static int reset_status;
105static int ps2esdi_slot = -1;
106static int tp720esdi = 0; /* Is it Integrated ESDI of ThinkPad-720? */
107static int intg_esdi = 0; /* If integrated adapter */
108struct ps2esdi_i_struct {
109 unsigned int head, sect, cyl, wpcom, lzone, ctl;
110};
111static DEFINE_SPINLOCK(ps2esdi_lock);
112static struct request_queue *ps2esdi_queue;
113static struct request *current_req;
114
115#if 0
116#if 0 /* try both - I don't know which one is better... UB */
117static struct ps2esdi_i_struct ps2esdi_info[MAX_HD] =
118{
119 {4, 48, 1553, 0, 0, 0},
120 {0, 0, 0, 0, 0, 0}};
121#else
122static struct ps2esdi_i_struct ps2esdi_info[MAX_HD] =
123{
124 {64, 32, 161, 0, 0, 0},
125 {0, 0, 0, 0, 0, 0}};
126#endif
127#endif
128static struct ps2esdi_i_struct ps2esdi_info[MAX_HD] =
129{
130 {0, 0, 0, 0, 0, 0},
131 {0, 0, 0, 0, 0, 0}};
132
133static struct block_device_operations ps2esdi_fops =
134{
135 .owner = THIS_MODULE,
136 .ioctl = ps2esdi_ioctl,
137};
138
139static struct gendisk *ps2esdi_gendisk[2];
140
141/* initialization routine called by ll_rw_blk.c */
142static int __init ps2esdi_init(void)
143{
144
145 int error = 0;
146
147 /* register the device - pass the name and major number */
148 if (register_blkdev(PS2ESDI_MAJOR, "ed"))
149 return -EBUSY;
150
151 /* set up some global information - indicating device specific info */
152 ps2esdi_queue = blk_init_queue(do_ps2esdi_request, &ps2esdi_lock);
153 if (!ps2esdi_queue) {
154 unregister_blkdev(PS2ESDI_MAJOR, "ed");
155 return -ENOMEM;
156 }
157
158 /* some minor housekeeping - setup the global gendisk structure */
159 error = ps2esdi_geninit();
160 if (error) {
161 printk(KERN_WARNING "PS2ESDI: error initialising"
162 " device, releasing resources\n");
163 unregister_blkdev(PS2ESDI_MAJOR, "ed");
164 blk_cleanup_queue(ps2esdi_queue);
165 return error;
166 }
167 return 0;
168} /* ps2esdi_init */
169
170#ifndef MODULE
171
172module_init(ps2esdi_init);
173
174#else
175
176static int cyl[MAX_HD] = {-1,-1};
177static int head[MAX_HD] = {-1, -1};
178static int sect[MAX_HD] = {-1, -1};
179
180module_param(tp720esdi, bool, 0);
181module_param_array(cyl, int, NULL, 0);
182module_param_array(head, int, NULL, 0);
183module_param_array(sect, int, NULL, 0);
184MODULE_LICENSE("GPL");
185
186int init_module(void) {
187 int drive;
188
189 for(drive = 0; drive < MAX_HD; drive++) {
190 struct ps2esdi_i_struct *info = &ps2esdi_info[drive];
191
192 if (cyl[drive] != -1) {
193 info->cyl = info->lzone = cyl[drive];
194 info->wpcom = 0;
195 }
196 if (head[drive] != -1) {
197 info->head = head[drive];
198 info->ctl = (head[drive] > 8 ? 8 : 0);
199 }
200 if (sect[drive] != -1) info->sect = sect[drive];
201 }
202 return ps2esdi_init();
203}
204
205void
206cleanup_module(void) {
207 int i;
208 if(ps2esdi_slot) {
209 mca_mark_as_unused(ps2esdi_slot);
210 mca_set_adapter_procfn(ps2esdi_slot, NULL, NULL);
211 }
212 release_region(io_base, 4);
213 free_dma(dma_arb_level);
214 free_irq(PS2ESDI_IRQ, &ps2esdi_gendisk);
215 unregister_blkdev(PS2ESDI_MAJOR, "ed");
216 blk_cleanup_queue(ps2esdi_queue);
217 for (i = 0; i < ps2esdi_drives; i++) {
218 del_gendisk(ps2esdi_gendisk[i]);
219 put_disk(ps2esdi_gendisk[i]);
220 }
221}
222#endif /* MODULE */
223
224/* handles boot time command line parameters */
225void __init tp720_setup(char *str, int *ints)
226{
227 /* no params, just sets the tp720esdi flag if it exists */
228
229 printk("%s: TP 720 ESDI flag set\n", DEVICE_NAME);
230 tp720esdi = 1;
231}
232
233void __init ed_setup(char *str, int *ints)
234{
235 int hdind = 0;
236
237 /* handles 3 parameters only - corresponding to
238 1. Number of cylinders
239 2. Number of heads
240 3. Sectors/track
241 */
242
243 if (ints[0] != 3)
244 return;
245
246 /* print out the information - seen at boot time */
247 printk("%s: ints[0]=%d ints[1]=%d ints[2]=%d ints[3]=%d\n",
248 DEVICE_NAME, ints[0], ints[1], ints[2], ints[3]);
249
250 /* set the index into device specific information table */
251 if (ps2esdi_info[0].head != 0)
252 hdind = 1;
253
254 /* set up all the device information */
255 ps2esdi_info[hdind].head = ints[2];
256 ps2esdi_info[hdind].sect = ints[3];
257 ps2esdi_info[hdind].cyl = ints[1];
258 ps2esdi_info[hdind].wpcom = 0;
259 ps2esdi_info[hdind].lzone = ints[1];
260 ps2esdi_info[hdind].ctl = (ints[2] > 8 ? 8 : 0);
261#if 0 /* this may be needed for PS2/Mod.80, but it hurts ThinkPad! */
262 ps2esdi_drives = hdind + 1; /* increment index for the next time */
263#endif
264} /* ed_setup */
265
266static int ps2esdi_getinfo(char *buf, int slot, void *d)
267{
268 int len = 0;
269
270 len += sprintf(buf + len, "DMA Arbitration Level: %d\n",
271 dma_arb_level);
272 len += sprintf(buf + len, "IO Port: %x\n", io_base);
273 len += sprintf(buf + len, "IRQ: 14\n");
274 len += sprintf(buf + len, "Drives: %d\n", ps2esdi_drives);
275
276 return len;
277}
278
279/* ps2 esdi specific initialization - called thru the gendisk chain */
280static int __init ps2esdi_geninit(void)
281{
282 /*
283 The first part contains the initialization code
284 for the ESDI disk subsystem. All we really do
285 is search for the POS registers of the controller
286 to do some simple setup operations. First, we
287 must ensure that the controller is installed,
288 enabled, and configured as PRIMARY. Then we must
289 determine the DMA arbitration level being used by
290 the controller so we can handle data transfer
291 operations properly. If all of this works, then
292 we will set the INIT_FLAG to a non-zero value.
293 */
294
295 int slot = 0, i, reset_start, reset_end;
296 u_char status;
297 unsigned short adapterID;
298 int error = 0;
299
300 if ((slot = mca_find_adapter(INTG_ESDI_ID, 0)) != MCA_NOTFOUND) {
301 adapterID = INTG_ESDI_ID;
302 printk("%s: integrated ESDI adapter found in slot %d\n",
303 DEVICE_NAME, slot+1);
304#ifndef MODULE
305 mca_set_adapter_name(slot, "PS/2 Integrated ESDI");
306#endif
307 } else if ((slot = mca_find_adapter(NRML_ESDI_ID, 0)) != -1) {
308 adapterID = NRML_ESDI_ID;
309 printk("%s: normal ESDI adapter found in slot %d\n",
310 DEVICE_NAME, slot+1);
311 mca_set_adapter_name(slot, "PS/2 ESDI");
312 } else {
313 return -ENODEV;
314 }
315
316 ps2esdi_slot = slot;
317 mca_mark_as_used(slot);
318 mca_set_adapter_procfn(slot, (MCA_ProcFn) ps2esdi_getinfo, NULL);
319
320 /* Found the slot - read the POS register 2 to get the necessary
321 configuration and status information. POS register 2 has the
322 following information :
323 Bit Function
324 7 reserved = 0
325 6 arbitration method
326 0 - fairness enabled
327 1 - fairness disabled, linear priority assignment
328 5-2 arbitration level
329 1 alternate address
330 1 alternate address
331 0 - use addresses 0x3510 - 0x3517
332 0 adapter enable
333 */
334
335 status = mca_read_stored_pos(slot, 2);
336 /* is it enabled ? */
337 if (!(status & STATUS_ENABLED)) {
338 printk("%s: ESDI adapter disabled\n", DEVICE_NAME);
339 error = -ENODEV;
340 goto err_out1;
341 }
342 /* try to grab IRQ, and try to grab a slow IRQ if it fails, so we can
343 share with the SCSI driver */
344 if (request_irq(PS2ESDI_IRQ, ps2esdi_interrupt_handler,
345 SA_INTERRUPT | SA_SHIRQ, "PS/2 ESDI", &ps2esdi_gendisk)
346 && request_irq(PS2ESDI_IRQ, ps2esdi_interrupt_handler,
347 SA_SHIRQ, "PS/2 ESDI", &ps2esdi_gendisk)
348 ) {
349 printk("%s: Unable to get IRQ %d\n", DEVICE_NAME, PS2ESDI_IRQ);
350 error = -EBUSY;
351 goto err_out1;
352 }
353 if (status & STATUS_ALTERNATE)
354 io_base = ALT_IO_BASE;
355 else
356 io_base = PRIMARY_IO_BASE;
357
358 if (!request_region(io_base, 4, "ed")) {
359 printk(KERN_WARNING"Unable to request region 0x%x\n", io_base);
360 error = -EBUSY;
361 goto err_out2;
362 }
363 /* get the dma arbitration level */
364 dma_arb_level = (status >> 2) & 0xf;
365
366 /* BA */
367 printk("%s: DMA arbitration level : %d\n",
368 DEVICE_NAME, dma_arb_level);
369
370 LITE_ON;
371 current_int_handler = ps2esdi_initial_reset_int_handler;
372 reset_ctrl();
373 reset_status = 0;
374 reset_start = jiffies;
375 while (!reset_status) {
376 init_timer(&esdi_timer);
377 esdi_timer.expires = jiffies + HZ;
378 esdi_timer.data = 0;
379 add_timer(&esdi_timer);
380 sleep_on(&ps2esdi_int);
381 }
382 reset_end = jiffies;
383 LITE_OFF;
384 printk("%s: reset interrupt after %d jiffies, %u.%02u secs\n",
385 DEVICE_NAME, reset_end - reset_start, (reset_end - reset_start) / HZ,
386 (reset_end - reset_start) % HZ);
387
388
389 /* Integrated ESDI Disk and Controller has only one drive! */
390 if (adapterID == INTG_ESDI_ID) {/* if not "normal" PS2 ESDI adapter */
391 ps2esdi_drives = 1; /* then we have only one physical disk! */ intg_esdi = 1;
392 }
393
394
395
396 /* finally this part sets up some global data structures etc. */
397
398 ps2esdi_get_device_cfg();
399
400 /* some annoyance in the above routine returns TWO drives?
401 Is something else happining in the background?
402 Regaurdless we fix the # of drives again. AJK */
403 /* Integrated ESDI Disk and Controller has only one drive! */
404 if (adapterID == INTG_ESDI_ID) /* if not "normal" PS2 ESDI adapter */
405 ps2esdi_drives = 1; /* Not three or two, ONE DAMNIT! */
406
407 current_int_handler = ps2esdi_normal_interrupt_handler;
408
409 if (request_dma(dma_arb_level, "ed") !=0) {
410 printk(KERN_WARNING "PS2ESDI: Can't request dma-channel %d\n"
411 ,(int) dma_arb_level);
412 error = -EBUSY;
413 goto err_out3;
414 }
415 blk_queue_max_sectors(ps2esdi_queue, 128);
416
417 error = -ENOMEM;
418 for (i = 0; i < ps2esdi_drives; i++) {
419 struct gendisk *disk = alloc_disk(64);
420 if (!disk)
421 goto err_out4;
422 disk->major = PS2ESDI_MAJOR;
423 disk->first_minor = i<<6;
424 sprintf(disk->disk_name, "ed%c", 'a'+i);
425 sprintf(disk->devfs_name, "ed/target%d", i);
426 disk->fops = &ps2esdi_fops;
427 ps2esdi_gendisk[i] = disk;
428 }
429
430 for (i = 0; i < ps2esdi_drives; i++) {
431 struct gendisk *disk = ps2esdi_gendisk[i];
432 set_capacity(disk, ps2esdi_info[i].head * ps2esdi_info[i].sect *
433 ps2esdi_info[i].cyl);
434 disk->queue = ps2esdi_queue;
435 disk->private_data = &ps2esdi_info[i];
436 add_disk(disk);
437 }
438 return 0;
439err_out4:
440 while (i--)
441 put_disk(ps2esdi_gendisk[i]);
442err_out3:
443 release_region(io_base, 4);
444err_out2:
445 free_irq(PS2ESDI_IRQ, &ps2esdi_gendisk);
446err_out1:
447 if(ps2esdi_slot) {
448 mca_mark_as_unused(ps2esdi_slot);
449 mca_set_adapter_procfn(ps2esdi_slot, NULL, NULL);
450 }
451 return error;
452}
453
454static void __init ps2esdi_get_device_cfg(void)
455{
456 u_short cmd_blk[TYPE_0_CMD_BLK_LENGTH];
457
458 /*BA */ printk("%s: Drive 0\n", DEVICE_NAME);
459 current_int_handler = ps2esdi_geometry_int_handler;
460 cmd_blk[0] = CMD_GET_DEV_CONFIG | 0x600;
461 cmd_blk[1] = 0;
462 no_int_yet = TRUE;
463 ps2esdi_out_cmd_blk(cmd_blk);
464 if (no_int_yet)
465 sleep_on(&ps2esdi_int);
466
467 if (ps2esdi_drives > 1) {
468 printk("%s: Drive 1\n", DEVICE_NAME); /*BA */
469 cmd_blk[0] = CMD_GET_DEV_CONFIG | (1 << 5) | 0x600;
470 cmd_blk[1] = 0;
471 no_int_yet = TRUE;
472 ps2esdi_out_cmd_blk(cmd_blk);
473 if (no_int_yet)
474 sleep_on(&ps2esdi_int);
475 } /* if second physical drive is present */
476 return;
477}
478
479/* strategy routine that handles most of the IO requests */
480static void do_ps2esdi_request(request_queue_t * q)
481{
482 struct request *req;
483 /* since, this routine is called with interrupts cleared - they
484 must be before it finishes */
485
486 req = elv_next_request(q);
487 if (!req)
488 return;
489
490#if 0
491 printk("%s:got request. device : %s command : %d sector : %ld count : %ld, buffer: %p\n",
492 DEVICE_NAME,
493 req->rq_disk->disk_name,
494 req->cmd, req->sector,
495 req->current_nr_sectors, req->buffer);
496#endif
497
498 /* check for above 16Mb dmas */
499 if (isa_virt_to_bus(req->buffer + req->current_nr_sectors * 512) > 16 * MB) {
500 printk("%s: DMA above 16MB not supported\n", DEVICE_NAME);
501 end_request(req, FAIL);
502 return;
503 }
504
505 if (req->sector+req->current_nr_sectors > get_capacity(req->rq_disk)) {
506 printk("Grrr. error. ps2esdi_drives: %d, %llu %llu\n",
507 ps2esdi_drives, req->sector,
508 (unsigned long long)get_capacity(req->rq_disk));
509 end_request(req, FAIL);
510 return;
511 }
512
513 switch (rq_data_dir(req)) {
514 case READ:
515 ps2esdi_readwrite(READ, req);
516 break;
517 case WRITE:
518 ps2esdi_readwrite(WRITE, req);
519 break;
520 default:
521 printk("%s: Unknown command\n", req->rq_disk->disk_name);
522 end_request(req, FAIL);
523 break;
524 } /* handle different commands */
525} /* main strategy routine */
526
527/* resets the ESDI adapter */
528static void reset_ctrl(void)
529{
530
531 u_long expire;
532 u_short status;
533
534 /* enable interrupts on the controller */
535 status = inb(ESDI_INTRPT);
536 outb((status & 0xe0) | ATT_EOI, ESDI_ATTN); /* to be sure we don't have
537 any interrupt pending... */
538 outb_p(CTRL_ENABLE_INTR, ESDI_CONTROL);
539
540 /* read the ESDI status port - if the controller is not busy,
541 simply do a soft reset (fast) - otherwise we'll have to do a
542 hard (slow) reset. */
543 if (!(inb_p(ESDI_STATUS) & STATUS_BUSY)) {
544 /*BA */ printk("%s: soft reset...\n", DEVICE_NAME);
545 outb_p(CTRL_SOFT_RESET, ESDI_ATTN);
546 }
547 /* soft reset */
548 else {
549 /*BA */
550 printk("%s: hard reset...\n", DEVICE_NAME);
551 outb_p(CTRL_HARD_RESET, ESDI_CONTROL);
552 expire = jiffies + 2*HZ;
553 while (time_before(jiffies, expire));
554 outb_p(1, ESDI_CONTROL);
555 } /* hard reset */
556
557
558} /* reset the controller */
559
560/* called by the strategy routine to handle read and write requests */
561static void ps2esdi_readwrite(int cmd, struct request *req)
562{
563 struct ps2esdi_i_struct *p = req->rq_disk->private_data;
564 unsigned block = req->sector;
565 unsigned count = req->current_nr_sectors;
566 int drive = p - ps2esdi_info;
567 u_short track, head, cylinder, sector;
568 u_short cmd_blk[TYPE_1_CMD_BLK_LENGTH];
569
570 /* do some relevant arithmatic */
571 track = block / p->sect;
572 head = track % p->head;
573 cylinder = track / p->head;
574 sector = block % p->sect;
575
576#if 0
577 printk("%s: cyl=%d head=%d sect=%d\n", DEVICE_NAME, cylinder, head, sector);
578#endif
579 /* call the routine that actually fills out a command block */
580 ps2esdi_fill_cmd_block
581 (cmd_blk,
582 (cmd == READ) ? CMD_READ : CMD_WRITE,
583 cylinder, head, sector, count, drive);
584
585 /* send the command block to the controller */
586 current_req = req;
587 spin_unlock_irq(&ps2esdi_lock);
588 if (ps2esdi_out_cmd_blk(cmd_blk)) {
589 spin_lock_irq(&ps2esdi_lock);
590 printk("%s: Controller failed\n", DEVICE_NAME);
591 if ((++req->errors) >= MAX_RETRIES)
592 end_request(req, FAIL);
593 }
594 /* check for failure to put out the command block */
595 else {
596 spin_lock_irq(&ps2esdi_lock);
597#if 0
598 printk("%s: waiting for xfer\n", DEVICE_NAME);
599#endif
600 /* turn disk lights on */
601 LITE_ON;
602 }
603
604} /* ps2esdi_readwrite */
605
606/* fill out the command block */
607static void ps2esdi_fill_cmd_block(u_short * cmd_blk, u_short cmd,
608 u_short cyl, u_short head, u_short sector, u_short length, u_char drive)
609{
610
611 cmd_blk[0] = (drive << 5) | cmd;
612 cmd_blk[1] = length;
613 cmd_blk[2] = ((cyl & 0x1f) << 11) | (head << 5) | sector;
614 cmd_blk[3] = (cyl & 0x3E0) >> 5;
615
616} /* fill out the command block */
617
618/* write a command block to the controller */
619static int ps2esdi_out_cmd_blk(u_short * cmd_blk)
620{
621
622 int i;
623 unsigned long jif;
624 u_char status;
625
626 /* enable interrupts */
627 outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
628
629 /* do not write to the controller, if it is busy */
630 for (jif = jiffies + ESDI_STAT_TIMEOUT;
631 time_after(jif, jiffies) &&
632 (inb(ESDI_STATUS) & STATUS_BUSY); )
633 ;
634
635#if 0
636 printk("%s: i(1)=%ld\n", DEVICE_NAME, jif);
637#endif
638
639 /* if device is still busy - then just time out */
640 if (inb(ESDI_STATUS) & STATUS_BUSY) {
641 printk("%s: ps2esdi_out_cmd timed out (1)\n", DEVICE_NAME);
642 return ERROR;
643 } /* timeout ??? */
644 /* Set up the attention register in the controller */
645 outb(((*cmd_blk) & 0xE0) | 1, ESDI_ATTN);
646
647#if 0
648 printk("%s: sending %d words to controller\n", DEVICE_NAME, (((*cmd_blk) >> 14) + 1) << 1);
649#endif
650
651 /* one by one send each word out */
652 for (i = (((*cmd_blk) >> 14) + 1) << 1; i; i--) {
653 status = inb(ESDI_STATUS);
654 for (jif = jiffies + ESDI_STAT_TIMEOUT;
655 time_after(jif, jiffies) && (status & STATUS_BUSY) &&
656 (status & STATUS_CMD_INF); status = inb(ESDI_STATUS));
657 if ((status & (STATUS_BUSY | STATUS_CMD_INF)) == STATUS_BUSY) {
658#if 0
659 printk("%s: sending %04X\n", DEVICE_NAME, *cmd_blk);
660#endif
661 outw(*cmd_blk++, ESDI_CMD_INT);
662 } else {
663 printk("%s: ps2esdi_out_cmd timed out while sending command (status=%02X)\n",
664 DEVICE_NAME, status);
665 return ERROR;
666 }
667 } /* send all words out */
668 return OK;
669} /* send out the commands */
670
671
672/* prepare for dma - do all the necessary setup */
673static void ps2esdi_prep_dma(char *buffer, u_short length, u_char dma_xmode)
674{
675 unsigned long flags = claim_dma_lock();
676
677 mca_disable_dma(dma_arb_level);
678
679 mca_set_dma_addr(dma_arb_level, isa_virt_to_bus(buffer));
680
681 mca_set_dma_count(dma_arb_level, length * 512 / 2);
682
683 mca_set_dma_mode(dma_arb_level, dma_xmode);
684
685 mca_enable_dma(dma_arb_level);
686
687 release_dma_lock(flags);
688
689} /* prepare for dma */
690
691
692
693static irqreturn_t ps2esdi_interrupt_handler(int irq, void *dev_id,
694 struct pt_regs *regs)
695{
696 u_int int_ret_code;
697
698 if (inb(ESDI_STATUS) & STATUS_INTR) {
699 int_ret_code = inb(ESDI_INTRPT);
700 if (current_int_handler) {
701 /* Disable adapter interrupts till processing is finished */
702 outb(CTRL_DISABLE_INTR, ESDI_CONTROL);
703 current_int_handler(int_ret_code);
704 } else
705 printk("%s: help ! No interrupt handler.\n", DEVICE_NAME);
706 } else {
707 return IRQ_NONE;
708 }
709 return IRQ_HANDLED;
710}
711
712static void ps2esdi_initial_reset_int_handler(u_int int_ret_code)
713{
714
715 switch (int_ret_code & 0xf) {
716 case INT_RESET:
717 /*BA */
718 printk("%s: initial reset completed.\n", DEVICE_NAME);
719 outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
720 wake_up(&ps2esdi_int);
721 break;
722 case INT_ATTN_ERROR:
723 printk("%s: Attention error. interrupt status : %02X\n", DEVICE_NAME,
724 int_ret_code);
725 printk("%s: status: %02x\n", DEVICE_NAME, inb(ESDI_STATUS));
726 break;
727 default:
728 printk("%s: initial reset handler received interrupt: %02X\n",
729 DEVICE_NAME, int_ret_code);
730 outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
731 break;
732 }
733 outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
734}
735
736
737static void ps2esdi_geometry_int_handler(u_int int_ret_code)
738{
739 u_int status, drive_num;
740 unsigned long rba;
741 int i;
742
743 drive_num = int_ret_code >> 5;
744 switch (int_ret_code & 0xf) {
745 case INT_CMD_COMPLETE:
746 for (i = ESDI_TIMEOUT; i && !(inb(ESDI_STATUS) & STATUS_STAT_AVAIL); i--);
747 if (!(inb(ESDI_STATUS) & STATUS_STAT_AVAIL)) {
748 printk("%s: timeout reading status word\n", DEVICE_NAME);
749 outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
750 break;
751 }
752 status = inw(ESDI_STT_INT);
753 if ((status & 0x1F) == CMD_GET_DEV_CONFIG) {
754#define REPLY_WORDS 5 /* we already read word 0 */
755 u_short reply[REPLY_WORDS];
756
757 if (ps2esdi_read_status_words((status >> 8) - 1, REPLY_WORDS, reply)) {
758 /*BA */
759 printk("%s: Device Configuration Status for drive %u\n",
760 DEVICE_NAME, drive_num);
761
762 printk("%s: Spares/cyls: %u", DEVICE_NAME, reply[0] >> 8);
763
764 printk
765 ("Config bits: %s%s%s%s%s\n",
766 (reply[0] & CONFIG_IS) ? "Invalid Secondary, " : "",
767 ((reply[0] & CONFIG_ZD) && !(reply[0] & CONFIG_IS))
768 ? "Zero Defect, " : "Defects Present, ",
769 (reply[0] & CONFIG_SF) ? "Skewed Format, " : "",
770 (reply[0] & CONFIG_FR) ? "Removable, " : "Non-Removable, ",
771 (reply[0] & CONFIG_RT) ? "No Retries" : "Retries");
772
773 rba = reply[1] | ((unsigned long) reply[2] << 16);
774 printk("%s: Number of RBA's: %lu\n", DEVICE_NAME, rba);
775
776 printk("%s: Physical number of cylinders: %u, Sectors/Track: %u, Heads: %u\n",
777 DEVICE_NAME, reply[3], reply[4] >> 8, reply[4] & 0xff);
778
779 if (!ps2esdi_info[drive_num].head) {
780 ps2esdi_info[drive_num].head = 64;
781 ps2esdi_info[drive_num].sect = 32;
782 ps2esdi_info[drive_num].cyl = rba / (64 * 32);
783 ps2esdi_info[drive_num].wpcom = 0;
784 ps2esdi_info[drive_num].lzone = ps2esdi_info[drive_num].cyl;
785 ps2esdi_info[drive_num].ctl = 8;
786 if (tp720esdi) { /* store the retrieved parameters */
787 ps2esdi_info[0].head = reply[4] & 0Xff;
788 ps2esdi_info[0].sect = reply[4] >> 8;
789 ps2esdi_info[0].cyl = reply[3];
790 ps2esdi_info[0].wpcom = 0;
791 ps2esdi_info[0].lzone = reply[3];
792 } else {
793 if (!intg_esdi)
794 ps2esdi_drives++;
795 }
796 }
797#ifdef OBSOLETE
798 if (!ps2esdi_info[drive_num].head) {
799 ps2esdi_info[drive_num].head = reply[4] & 0Xff;
800 ps2esdi_info[drive_num].sect = reply[4] >> 8;
801 ps2esdi_info[drive_num].cyl = reply[3];
802 ps2esdi_info[drive_num].wpcom = 0;
803 ps2esdi_info[drive_num].lzone = reply[3];
804 if (tp720esdi) { /* store the retrieved parameters */
805 ps2esdi_info[0].head = reply[4] & 0Xff;
806 ps2esdi_info[0].sect = reply[4] >> 8;
807 ps2esdi_info[0].cyl = reply[3];
808 ps2esdi_info[0].wpcom = 0;
809 ps2esdi_info[0].lzone = reply[3];
810 } else {
811 ps2esdi_drives++;
812 }
813 }
814#endif
815
816 } else
817 printk("%s: failed while getting device config\n", DEVICE_NAME);
818#undef REPLY_WORDS
819 } else
820 printk("%s: command %02X unknown by geometry handler\n",
821 DEVICE_NAME, status & 0x1f);
822
823 outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
824 break;
825
826 case INT_ATTN_ERROR:
827 printk("%s: Attention error. interrupt status : %02X\n", DEVICE_NAME,
828 int_ret_code);
829 printk("%s: Device not available\n", DEVICE_NAME);
830 break;
831 case INT_CMD_ECC:
832 case INT_CMD_RETRY:
833 case INT_CMD_ECC_RETRY:
834 case INT_CMD_WARNING:
835 case INT_CMD_ABORT:
836 case INT_CMD_FAILED:
837 case INT_DMA_ERR:
838 case INT_CMD_BLK_ERR:
839 /*BA */ printk("%s: Whaa. Error occurred...\n", DEVICE_NAME);
840 dump_cmd_complete_status(int_ret_code);
841 outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
842 break;
843 default:
844 printk("%s: Unknown interrupt reason: %02X\n",
845 DEVICE_NAME, int_ret_code & 0xf);
846 outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
847 break;
848 }
849
850 wake_up(&ps2esdi_int);
851 no_int_yet = FALSE;
852 outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
853
854}
855
856static void ps2esdi_normal_interrupt_handler(u_int int_ret_code)
857{
858 unsigned long flags;
859 u_int status;
860 u_int ending;
861 int i;
862
863 switch (int_ret_code & 0x0f) {
864 case INT_TRANSFER_REQ:
865 ps2esdi_prep_dma(current_req->buffer,
866 current_req->current_nr_sectors,
867 (rq_data_dir(current_req) == READ)
868 ? MCA_DMA_MODE_16 | MCA_DMA_MODE_WRITE | MCA_DMA_MODE_XFER
869 : MCA_DMA_MODE_16 | MCA_DMA_MODE_READ);
870 outb(CTRL_ENABLE_DMA | CTRL_ENABLE_INTR, ESDI_CONTROL);
871 ending = -1;
872 break;
873
874 case INT_ATTN_ERROR:
875 printk("%s: Attention error. interrupt status : %02X\n", DEVICE_NAME,
876 int_ret_code);
877 outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
878 ending = FAIL;
879 break;
880
881 case INT_CMD_COMPLETE:
882 for (i = ESDI_TIMEOUT; i && !(inb(ESDI_STATUS) & STATUS_STAT_AVAIL); i--);
883 if (!(inb(ESDI_STATUS) & STATUS_STAT_AVAIL)) {
884 printk("%s: timeout reading status word\n", DEVICE_NAME);
885 outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
886 outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
887 if ((++current_req->errors) >= MAX_RETRIES)
888 ending = FAIL;
889 else
890 ending = -1;
891 break;
892 }
893 status = inw(ESDI_STT_INT);
894 switch (status & 0x1F) {
895 case (CMD_READ & 0xff):
896 case (CMD_WRITE & 0xff):
897 LITE_OFF;
898 outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
899 outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
900 ending = SUCCES;
901 break;
902 default:
903 printk("%s: interrupt for unknown command %02X\n",
904 DEVICE_NAME, status & 0x1f);
905 outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
906 outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
907 ending = -1;
908 break;
909 }
910 break;
911 case INT_CMD_ECC:
912 case INT_CMD_RETRY:
913 case INT_CMD_ECC_RETRY:
914 LITE_OFF;
915 dump_cmd_complete_status(int_ret_code);
916 outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
917 outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
918 ending = SUCCES;
919 break;
920 case INT_CMD_WARNING:
921 case INT_CMD_ABORT:
922 case INT_CMD_FAILED:
923 case INT_DMA_ERR:
924 LITE_OFF;
925 dump_cmd_complete_status(int_ret_code);
926 outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
927 outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
928 if ((++current_req->errors) >= MAX_RETRIES)
929 ending = FAIL;
930 else
931 ending = -1;
932 break;
933
934 case INT_CMD_BLK_ERR:
935 dump_cmd_complete_status(int_ret_code);
936 outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
937 outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
938 ending = FAIL;
939 break;
940
941 case INT_CMD_FORMAT:
942 printk("%s: huh ? Who issued this format command ?\n"
943 ,DEVICE_NAME);
944 outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
945 outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
946 ending = -1;
947 break;
948
949 case INT_RESET:
950 /* BA printk("%s: reset completed.\n", DEVICE_NAME) */ ;
951 outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
952 outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
953 ending = -1;
954 break;
955
956 default:
957 printk("%s: Unknown interrupt reason: %02X\n",
958 DEVICE_NAME, int_ret_code & 0xf);
959 outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
960 outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
961 ending = -1;
962 break;
963 }
964 if(ending != -1) {
965 spin_lock_irqsave(&ps2esdi_lock, flags);
966 end_request(current_req, ending);
967 current_req = NULL;
968 do_ps2esdi_request(ps2esdi_queue);
969 spin_unlock_irqrestore(&ps2esdi_lock, flags);
970 }
971} /* handle interrupts */
972
973
974
975static int ps2esdi_read_status_words(int num_words,
976 int max_words,
977 u_short * buffer)
978{
979 int i;
980
981 for (; max_words && num_words; max_words--, num_words--, buffer++) {
982 for (i = ESDI_TIMEOUT; i && !(inb(ESDI_STATUS) & STATUS_STAT_AVAIL); i--);
983 if (!(inb(ESDI_STATUS) & STATUS_STAT_AVAIL)) {
984 printk("%s: timeout reading status word\n", DEVICE_NAME);
985 return FAIL;
986 }
987 *buffer = inw(ESDI_STT_INT);
988 }
989 return SUCCES;
990}
991
992
993
994
995static void dump_cmd_complete_status(u_int int_ret_code)
996{
997#define WAIT_FOR_STATUS \
998 for(i=ESDI_TIMEOUT;i && !(inb(ESDI_STATUS) & STATUS_STAT_AVAIL);i--); \
999 if(!(inb(ESDI_STATUS) & STATUS_STAT_AVAIL)) { \
1000 printk("%s: timeout reading status word\n",DEVICE_NAME); \
1001 return; \
1002 }
1003
1004 int i, word_count;
1005 u_short stat_word;
1006 u_long rba;
1007
1008 printk("%s: Device: %u, interrupt ID: %02X\n",
1009 DEVICE_NAME, int_ret_code >> 5,
1010 int_ret_code & 0xf);
1011
1012 WAIT_FOR_STATUS;
1013 stat_word = inw(ESDI_STT_INT);
1014 word_count = (stat_word >> 8) - 1;
1015 printk("%s: %u status words, command: %02X\n", DEVICE_NAME, word_count,
1016 stat_word & 0xff);
1017
1018 if (word_count--) {
1019 WAIT_FOR_STATUS;
1020 stat_word = inw(ESDI_STT_INT);
1021 printk("%s: command status code: %02X, command error code: %02X\n",
1022 DEVICE_NAME, stat_word >> 8, stat_word & 0xff);
1023 }
1024 if (word_count--) {
1025 WAIT_FOR_STATUS;
1026 stat_word = inw(ESDI_STT_INT);
1027 printk("%s: device error code: %s%s%s%s%s,%02X\n", DEVICE_NAME,
1028 (stat_word & 0x1000) ? "Ready, " : "Not Ready, ",
1029 (stat_word & 0x0800) ? "Selected, " : "Not Selected, ",
1030 (stat_word & 0x0400) ? "Write Fault, " : "",
1031 (stat_word & 0x0200) ? "Track 0, " : "",
1032 (stat_word & 0x0100) ? "Seek or command complete, " : "",
1033 stat_word >> 8);
1034 }
1035 if (word_count--) {
1036 WAIT_FOR_STATUS;
1037 stat_word = inw(ESDI_STT_INT);
1038 printk("%s: Blocks to do: %u", DEVICE_NAME, stat_word);
1039 }
1040 if (word_count -= 2) {
1041 WAIT_FOR_STATUS;
1042 rba = inw(ESDI_STT_INT);
1043 WAIT_FOR_STATUS;
1044 rba |= inw(ESDI_STT_INT) << 16;
1045 printk(", Last Cyl: %u Head: %u Sector: %u\n",
1046 (u_short) ((rba & 0x1ff80000) >> 11),
1047 (u_short) ((rba & 0x7E0) >> 5), (u_short) (rba & 0x1f));
1048 } else
1049 printk("\n");
1050
1051 if (word_count--) {
1052 WAIT_FOR_STATUS;
1053 stat_word = inw(ESDI_STT_INT);
1054 printk("%s: Blocks required ECC: %u", DEVICE_NAME, stat_word);
1055 }
1056 printk("\n");
1057
1058#undef WAIT_FOR_STATUS
1059
1060}
1061
1062static int ps2esdi_ioctl(struct inode *inode,
1063 struct file *file, u_int cmd, u_long arg)
1064{
1065 struct ps2esdi_i_struct *p = inode->i_bdev->bd_disk->private_data;
1066 struct ps2esdi_geometry geom;
1067
1068 if (cmd != HDIO_GETGEO)
1069 return -EINVAL;
1070 memset(&geom, 0, sizeof(geom));
1071 geom.heads = p->head;
1072 geom.sectors = p->sect;
1073 geom.cylinders = p->cyl;
1074 geom.start = get_start_sect(inode->i_bdev);
1075 if (copy_to_user((void __user *)arg, &geom, sizeof(geom)))
1076 return -EFAULT;
1077 return 0;
1078}
1079
1080static void ps2esdi_reset_timer(unsigned long unused)
1081{
1082
1083 int status;
1084
1085 status = inb(ESDI_INTRPT);
1086 if ((status & 0xf) == INT_RESET) {
1087 outb((status & 0xe0) | ATT_EOI, ESDI_ATTN);
1088 outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
1089 reset_status = 1;
1090 }
1091 wake_up(&ps2esdi_int);
1092}
diff --git a/drivers/block/rd.c b/drivers/block/rd.c
new file mode 100644
index 000000000000..145c1fbffe01
--- /dev/null
+++ b/drivers/block/rd.c
@@ -0,0 +1,515 @@
1/*
2 * ramdisk.c - Multiple RAM disk driver - gzip-loading version - v. 0.8 beta.
3 *
4 * (C) Chad Page, Theodore Ts'o, et. al, 1995.
5 *
6 * This RAM disk is designed to have filesystems created on it and mounted
7 * just like a regular floppy disk.
8 *
9 * It also does something suggested by Linus: use the buffer cache as the
10 * RAM disk data. This makes it possible to dynamically allocate the RAM disk
11 * buffer - with some consequences I have to deal with as I write this.
12 *
13 * This code is based on the original ramdisk.c, written mostly by
14 * Theodore Ts'o (TYT) in 1991. The code was largely rewritten by
15 * Chad Page to use the buffer cache to store the RAM disk data in
16 * 1995; Theodore then took over the driver again, and cleaned it up
17 * for inclusion in the mainline kernel.
18 *
19 * The original CRAMDISK code was written by Richard Lyons, and
20 * adapted by Chad Page to use the new RAM disk interface. Theodore
21 * Ts'o rewrote it so that both the compressed RAM disk loader and the
22 * kernel decompressor uses the same inflate.c codebase. The RAM disk
23 * loader now also loads into a dynamic (buffer cache based) RAM disk,
24 * not the old static RAM disk. Support for the old static RAM disk has
25 * been completely removed.
26 *
27 * Loadable module support added by Tom Dyas.
28 *
29 * Further cleanups by Chad Page (page0588@sundance.sjsu.edu):
30 * Cosmetic changes in #ifdef MODULE, code movement, etc.
31 * When the RAM disk module is removed, free the protected buffers
32 * Default RAM disk size changed to 2.88 MB
33 *
34 * Added initrd: Werner Almesberger & Hans Lermen, Feb '96
35 *
36 * 4/25/96 : Made RAM disk size a parameter (default is now 4 MB)
37 * - Chad Page
38 *
39 * Add support for fs images split across >1 disk, Paul Gortmaker, Mar '98
40 *
41 * Make block size and block size shift for RAM disks a global macro
42 * and set blk_size for -ENOSPC, Werner Fink <werner@suse.de>, Apr '99
43 */
44
45#include <linux/config.h>
46#include <linux/string.h>
47#include <linux/slab.h>
48#include <asm/atomic.h>
49#include <linux/bio.h>
50#include <linux/module.h>
51#include <linux/moduleparam.h>
52#include <linux/init.h>
53#include <linux/devfs_fs_kernel.h>
54#include <linux/pagemap.h>
55#include <linux/blkdev.h>
56#include <linux/genhd.h>
57#include <linux/buffer_head.h> /* for invalidate_bdev() */
58#include <linux/backing-dev.h>
59#include <linux/blkpg.h>
60#include <linux/writeback.h>
61
62#include <asm/uaccess.h>
63
64/* Various static variables go here. Most are used only in the RAM disk code.
65 */
66
67static struct gendisk *rd_disks[CONFIG_BLK_DEV_RAM_COUNT];
68static struct block_device *rd_bdev[CONFIG_BLK_DEV_RAM_COUNT];/* Protected device data */
69static struct request_queue *rd_queue[CONFIG_BLK_DEV_RAM_COUNT];
70
71/*
72 * Parameters for the boot-loading of the RAM disk. These are set by
73 * init/main.c (from arguments to the kernel command line) or from the
74 * architecture-specific setup routine (from the stored boot sector
75 * information).
76 */
77int rd_size = CONFIG_BLK_DEV_RAM_SIZE; /* Size of the RAM disks */
78/*
79 * It would be very desirable to have a soft-blocksize (that in the case
80 * of the ramdisk driver is also the hardblocksize ;) of PAGE_SIZE because
81 * doing that we'll achieve a far better MM footprint. Using a rd_blocksize of
82 * BLOCK_SIZE in the worst case we'll make PAGE_SIZE/BLOCK_SIZE buffer-pages
83 * unfreeable. With a rd_blocksize of PAGE_SIZE instead we are sure that only
84 * 1 page will be protected. Depending on the size of the ramdisk you
85 * may want to change the ramdisk blocksize to achieve a better or worse MM
86 * behaviour. The default is still BLOCK_SIZE (needed by rd_load_image that
87 * supposes the filesystem in the image uses a BLOCK_SIZE blocksize).
88 */
89static int rd_blocksize = BLOCK_SIZE; /* blocksize of the RAM disks */
90
91/*
92 * Copyright (C) 2000 Linus Torvalds.
93 * 2000 Transmeta Corp.
94 * aops copied from ramfs.
95 */
96
97/*
98 * If a ramdisk page has buffers, some may be uptodate and some may be not.
99 * To bring the page uptodate we zero out the non-uptodate buffers. The
100 * page must be locked.
101 */
102static void make_page_uptodate(struct page *page)
103{
104 if (page_has_buffers(page)) {
105 struct buffer_head *bh = page_buffers(page);
106 struct buffer_head *head = bh;
107
108 do {
109 if (!buffer_uptodate(bh)) {
110 memset(bh->b_data, 0, bh->b_size);
111 /*
112 * akpm: I'm totally undecided about this. The
113 * buffer has just been magically brought "up to
114 * date", but nobody should want to be reading
115 * it anyway, because it hasn't been used for
116 * anything yet. It is still in a "not read
117 * from disk yet" state.
118 *
119 * But non-uptodate buffers against an uptodate
120 * page are against the rules. So do it anyway.
121 */
122 set_buffer_uptodate(bh);
123 }
124 } while ((bh = bh->b_this_page) != head);
125 } else {
126 memset(page_address(page), 0, PAGE_CACHE_SIZE);
127 }
128 flush_dcache_page(page);
129 SetPageUptodate(page);
130}
131
132static int ramdisk_readpage(struct file *file, struct page *page)
133{
134 if (!PageUptodate(page))
135 make_page_uptodate(page);
136 unlock_page(page);
137 return 0;
138}
139
140static int ramdisk_prepare_write(struct file *file, struct page *page,
141 unsigned offset, unsigned to)
142{
143 if (!PageUptodate(page))
144 make_page_uptodate(page);
145 return 0;
146}
147
148static int ramdisk_commit_write(struct file *file, struct page *page,
149 unsigned offset, unsigned to)
150{
151 set_page_dirty(page);
152 return 0;
153}
154
155/*
156 * ->writepage to the the blockdev's mapping has to redirty the page so that the
157 * VM doesn't go and steal it. We return WRITEPAGE_ACTIVATE so that the VM
158 * won't try to (pointlessly) write the page again for a while.
159 *
160 * Really, these pages should not be on the LRU at all.
161 */
162static int ramdisk_writepage(struct page *page, struct writeback_control *wbc)
163{
164 if (!PageUptodate(page))
165 make_page_uptodate(page);
166 SetPageDirty(page);
167 if (wbc->for_reclaim)
168 return WRITEPAGE_ACTIVATE;
169 unlock_page(page);
170 return 0;
171}
172
173/*
174 * This is a little speedup thing: short-circuit attempts to write back the
175 * ramdisk blockdev inode to its non-existent backing store.
176 */
177static int ramdisk_writepages(struct address_space *mapping,
178 struct writeback_control *wbc)
179{
180 return 0;
181}
182
183/*
184 * ramdisk blockdev pages have their own ->set_page_dirty() because we don't
185 * want them to contribute to dirty memory accounting.
186 */
187static int ramdisk_set_page_dirty(struct page *page)
188{
189 SetPageDirty(page);
190 return 0;
191}
192
193static struct address_space_operations ramdisk_aops = {
194 .readpage = ramdisk_readpage,
195 .prepare_write = ramdisk_prepare_write,
196 .commit_write = ramdisk_commit_write,
197 .writepage = ramdisk_writepage,
198 .set_page_dirty = ramdisk_set_page_dirty,
199 .writepages = ramdisk_writepages,
200};
201
202static int rd_blkdev_pagecache_IO(int rw, struct bio_vec *vec, sector_t sector,
203 struct address_space *mapping)
204{
205 pgoff_t index = sector >> (PAGE_CACHE_SHIFT - 9);
206 unsigned int vec_offset = vec->bv_offset;
207 int offset = (sector << 9) & ~PAGE_CACHE_MASK;
208 int size = vec->bv_len;
209 int err = 0;
210
211 do {
212 int count;
213 struct page *page;
214 char *src;
215 char *dst;
216
217 count = PAGE_CACHE_SIZE - offset;
218 if (count > size)
219 count = size;
220 size -= count;
221
222 page = grab_cache_page(mapping, index);
223 if (!page) {
224 err = -ENOMEM;
225 goto out;
226 }
227
228 if (!PageUptodate(page))
229 make_page_uptodate(page);
230
231 index++;
232
233 if (rw == READ) {
234 src = kmap_atomic(page, KM_USER0) + offset;
235 dst = kmap_atomic(vec->bv_page, KM_USER1) + vec_offset;
236 } else {
237 src = kmap_atomic(vec->bv_page, KM_USER0) + vec_offset;
238 dst = kmap_atomic(page, KM_USER1) + offset;
239 }
240 offset = 0;
241 vec_offset += count;
242
243 memcpy(dst, src, count);
244
245 kunmap_atomic(src, KM_USER0);
246 kunmap_atomic(dst, KM_USER1);
247
248 if (rw == READ)
249 flush_dcache_page(vec->bv_page);
250 else
251 set_page_dirty(page);
252 unlock_page(page);
253 put_page(page);
254 } while (size);
255
256 out:
257 return err;
258}
259
260/*
261 * Basically, my strategy here is to set up a buffer-head which can't be
262 * deleted, and make that my Ramdisk. If the request is outside of the
263 * allocated size, we must get rid of it...
264 *
265 * 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Added devfs support
266 *
267 */
268static int rd_make_request(request_queue_t *q, struct bio *bio)
269{
270 struct block_device *bdev = bio->bi_bdev;
271 struct address_space * mapping = bdev->bd_inode->i_mapping;
272 sector_t sector = bio->bi_sector;
273 unsigned long len = bio->bi_size >> 9;
274 int rw = bio_data_dir(bio);
275 struct bio_vec *bvec;
276 int ret = 0, i;
277
278 if (sector + len > get_capacity(bdev->bd_disk))
279 goto fail;
280
281 if (rw==READA)
282 rw=READ;
283
284 bio_for_each_segment(bvec, bio, i) {
285 ret |= rd_blkdev_pagecache_IO(rw, bvec, sector, mapping);
286 sector += bvec->bv_len >> 9;
287 }
288 if (ret)
289 goto fail;
290
291 bio_endio(bio, bio->bi_size, 0);
292 return 0;
293fail:
294 bio_io_error(bio, bio->bi_size);
295 return 0;
296}
297
298static int rd_ioctl(struct inode *inode, struct file *file,
299 unsigned int cmd, unsigned long arg)
300{
301 int error;
302 struct block_device *bdev = inode->i_bdev;
303
304 if (cmd != BLKFLSBUF)
305 return -ENOTTY;
306
307 /*
308 * special: we want to release the ramdisk memory, it's not like with
309 * the other blockdevices where this ioctl only flushes away the buffer
310 * cache
311 */
312 error = -EBUSY;
313 down(&bdev->bd_sem);
314 if (bdev->bd_openers <= 2) {
315 truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
316 error = 0;
317 }
318 up(&bdev->bd_sem);
319 return error;
320}
321
322/*
323 * This is the backing_dev_info for the blockdev inode itself. It doesn't need
324 * writeback and it does not contribute to dirty memory accounting.
325 */
326static struct backing_dev_info rd_backing_dev_info = {
327 .ra_pages = 0, /* No readahead */
328 .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK | BDI_CAP_MAP_COPY,
329 .unplug_io_fn = default_unplug_io_fn,
330};
331
332/*
333 * This is the backing_dev_info for the files which live atop the ramdisk
334 * "device". These files do need writeback and they do contribute to dirty
335 * memory accounting.
336 */
337static struct backing_dev_info rd_file_backing_dev_info = {
338 .ra_pages = 0, /* No readahead */
339 .capabilities = BDI_CAP_MAP_COPY, /* Does contribute to dirty memory */
340 .unplug_io_fn = default_unplug_io_fn,
341};
342
343static int rd_open(struct inode *inode, struct file *filp)
344{
345 unsigned unit = iminor(inode);
346
347 if (rd_bdev[unit] == NULL) {
348 struct block_device *bdev = inode->i_bdev;
349 struct address_space *mapping;
350 unsigned bsize;
351 int gfp_mask;
352
353 inode = igrab(bdev->bd_inode);
354 rd_bdev[unit] = bdev;
355 bdev->bd_openers++;
356 bsize = bdev_hardsect_size(bdev);
357 bdev->bd_block_size = bsize;
358 inode->i_blkbits = blksize_bits(bsize);
359 inode->i_size = get_capacity(bdev->bd_disk)<<9;
360
361 mapping = inode->i_mapping;
362 mapping->a_ops = &ramdisk_aops;
363 mapping->backing_dev_info = &rd_backing_dev_info;
364 bdev->bd_inode_backing_dev_info = &rd_file_backing_dev_info;
365
366 /*
367 * Deep badness. rd_blkdev_pagecache_IO() needs to allocate
368 * pagecache pages within a request_fn. We cannot recur back
369 * into the filesytem which is mounted atop the ramdisk, because
370 * that would deadlock on fs locks. And we really don't want
371 * to reenter rd_blkdev_pagecache_IO when we're already within
372 * that function.
373 *
374 * So we turn off __GFP_FS and __GFP_IO.
375 *
376 * And to give this thing a hope of working, turn on __GFP_HIGH.
377 * Hopefully, there's enough regular memory allocation going on
378 * for the page allocator emergency pools to keep the ramdisk
379 * driver happy.
380 */
381 gfp_mask = mapping_gfp_mask(mapping);
382 gfp_mask &= ~(__GFP_FS|__GFP_IO);
383 gfp_mask |= __GFP_HIGH;
384 mapping_set_gfp_mask(mapping, gfp_mask);
385 }
386
387 return 0;
388}
389
390static struct block_device_operations rd_bd_op = {
391 .owner = THIS_MODULE,
392 .open = rd_open,
393 .ioctl = rd_ioctl,
394};
395
396/*
397 * Before freeing the module, invalidate all of the protected buffers!
398 */
399static void __exit rd_cleanup(void)
400{
401 int i;
402
403 for (i = 0; i < CONFIG_BLK_DEV_RAM_COUNT; i++) {
404 struct block_device *bdev = rd_bdev[i];
405 rd_bdev[i] = NULL;
406 if (bdev) {
407 invalidate_bdev(bdev, 1);
408 blkdev_put(bdev);
409 }
410 del_gendisk(rd_disks[i]);
411 put_disk(rd_disks[i]);
412 blk_cleanup_queue(rd_queue[i]);
413 }
414 devfs_remove("rd");
415 unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
416}
417
418/*
419 * This is the registration and initialization section of the RAM disk driver
420 */
421static int __init rd_init(void)
422{
423 int i;
424 int err = -ENOMEM;
425
426 if (rd_blocksize > PAGE_SIZE || rd_blocksize < 512 ||
427 (rd_blocksize & (rd_blocksize-1))) {
428 printk("RAMDISK: wrong blocksize %d, reverting to defaults\n",
429 rd_blocksize);
430 rd_blocksize = BLOCK_SIZE;
431 }
432
433 for (i = 0; i < CONFIG_BLK_DEV_RAM_COUNT; i++) {
434 rd_disks[i] = alloc_disk(1);
435 if (!rd_disks[i])
436 goto out;
437 }
438
439 if (register_blkdev(RAMDISK_MAJOR, "ramdisk")) {
440 err = -EIO;
441 goto out;
442 }
443
444 devfs_mk_dir("rd");
445
446 for (i = 0; i < CONFIG_BLK_DEV_RAM_COUNT; i++) {
447 struct gendisk *disk = rd_disks[i];
448
449 rd_queue[i] = blk_alloc_queue(GFP_KERNEL);
450 if (!rd_queue[i])
451 goto out_queue;
452
453 blk_queue_make_request(rd_queue[i], &rd_make_request);
454 blk_queue_hardsect_size(rd_queue[i], rd_blocksize);
455
456 /* rd_size is given in kB */
457 disk->major = RAMDISK_MAJOR;
458 disk->first_minor = i;
459 disk->fops = &rd_bd_op;
460 disk->queue = rd_queue[i];
461 disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
462 sprintf(disk->disk_name, "ram%d", i);
463 sprintf(disk->devfs_name, "rd/%d", i);
464 set_capacity(disk, rd_size * 2);
465 add_disk(rd_disks[i]);
466 }
467
468 /* rd_size is given in kB */
469 printk("RAMDISK driver initialized: "
470 "%d RAM disks of %dK size %d blocksize\n",
471 CONFIG_BLK_DEV_RAM_COUNT, rd_size, rd_blocksize);
472
473 return 0;
474out_queue:
475 unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
476out:
477 while (i--) {
478 put_disk(rd_disks[i]);
479 blk_cleanup_queue(rd_queue[i]);
480 }
481 return err;
482}
483
484module_init(rd_init);
485module_exit(rd_cleanup);
486
487/* options - nonmodular */
488#ifndef MODULE
489static int __init ramdisk_size(char *str)
490{
491 rd_size = simple_strtol(str,NULL,0);
492 return 1;
493}
494static int __init ramdisk_size2(char *str) /* kludge */
495{
496 return ramdisk_size(str);
497}
498static int __init ramdisk_blocksize(char *str)
499{
500 rd_blocksize = simple_strtol(str,NULL,0);
501 return 1;
502}
503__setup("ramdisk=", ramdisk_size);
504__setup("ramdisk_size=", ramdisk_size2);
505__setup("ramdisk_blocksize=", ramdisk_blocksize);
506#endif
507
508/* options - modular */
509module_param(rd_size, int, 0);
510MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
511module_param(rd_blocksize, int, 0);
512MODULE_PARM_DESC(rd_blocksize, "Blocksize of each RAM disk in bytes.");
513MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
514
515MODULE_LICENSE("GPL");
diff --git a/drivers/block/scsi_ioctl.c b/drivers/block/scsi_ioctl.c
new file mode 100644
index 000000000000..689527a89de7
--- /dev/null
+++ b/drivers/block/scsi_ioctl.c
@@ -0,0 +1,580 @@
1/*
2 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public Licens
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
17 *
18 */
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/string.h>
22#include <linux/module.h>
23#include <linux/blkdev.h>
24#include <linux/completion.h>
25#include <linux/cdrom.h>
26#include <linux/slab.h>
27#include <linux/times.h>
28#include <asm/uaccess.h>
29
30#include <scsi/scsi.h>
31#include <scsi/scsi_ioctl.h>
32#include <scsi/scsi_cmnd.h>
33
34/* Command group 3 is reserved and should never be used. */
35const unsigned char scsi_command_size[8] =
36{
37 6, 10, 10, 12,
38 16, 12, 10, 10
39};
40
41EXPORT_SYMBOL(scsi_command_size);
42
43#define BLK_DEFAULT_TIMEOUT (60 * HZ)
44
45#include <scsi/sg.h>
46
47static int sg_get_version(int __user *p)
48{
49 static int sg_version_num = 30527;
50 return put_user(sg_version_num, p);
51}
52
53static int scsi_get_idlun(request_queue_t *q, int __user *p)
54{
55 return put_user(0, p);
56}
57
58static int scsi_get_bus(request_queue_t *q, int __user *p)
59{
60 return put_user(0, p);
61}
62
63static int sg_get_timeout(request_queue_t *q)
64{
65 return q->sg_timeout / (HZ / USER_HZ);
66}
67
68static int sg_set_timeout(request_queue_t *q, int __user *p)
69{
70 int timeout, err = get_user(timeout, p);
71
72 if (!err)
73 q->sg_timeout = timeout * (HZ / USER_HZ);
74
75 return err;
76}
77
78static int sg_get_reserved_size(request_queue_t *q, int __user *p)
79{
80 return put_user(q->sg_reserved_size, p);
81}
82
83static int sg_set_reserved_size(request_queue_t *q, int __user *p)
84{
85 int size, err = get_user(size, p);
86
87 if (err)
88 return err;
89
90 if (size < 0)
91 return -EINVAL;
92 if (size > (q->max_sectors << 9))
93 size = q->max_sectors << 9;
94
95 q->sg_reserved_size = size;
96 return 0;
97}
98
99/*
100 * will always return that we are ATAPI even for a real SCSI drive, I'm not
101 * so sure this is worth doing anything about (why would you care??)
102 */
103static int sg_emulated_host(request_queue_t *q, int __user *p)
104{
105 return put_user(1, p);
106}
107
108#define CMD_READ_SAFE 0x01
109#define CMD_WRITE_SAFE 0x02
110#define CMD_WARNED 0x04
111#define safe_for_read(cmd) [cmd] = CMD_READ_SAFE
112#define safe_for_write(cmd) [cmd] = CMD_WRITE_SAFE
113
114static int verify_command(struct file *file, unsigned char *cmd)
115{
116 static unsigned char cmd_type[256] = {
117
118 /* Basic read-only commands */
119 safe_for_read(TEST_UNIT_READY),
120 safe_for_read(REQUEST_SENSE),
121 safe_for_read(READ_6),
122 safe_for_read(READ_10),
123 safe_for_read(READ_12),
124 safe_for_read(READ_16),
125 safe_for_read(READ_BUFFER),
126 safe_for_read(READ_LONG),
127 safe_for_read(INQUIRY),
128 safe_for_read(MODE_SENSE),
129 safe_for_read(MODE_SENSE_10),
130 safe_for_read(LOG_SENSE),
131 safe_for_read(START_STOP),
132 safe_for_read(GPCMD_VERIFY_10),
133 safe_for_read(VERIFY_16),
134
135 /* Audio CD commands */
136 safe_for_read(GPCMD_PLAY_CD),
137 safe_for_read(GPCMD_PLAY_AUDIO_10),
138 safe_for_read(GPCMD_PLAY_AUDIO_MSF),
139 safe_for_read(GPCMD_PLAY_AUDIO_TI),
140 safe_for_read(GPCMD_PAUSE_RESUME),
141
142 /* CD/DVD data reading */
143 safe_for_read(GPCMD_READ_BUFFER_CAPACITY),
144 safe_for_read(GPCMD_READ_CD),
145 safe_for_read(GPCMD_READ_CD_MSF),
146 safe_for_read(GPCMD_READ_DISC_INFO),
147 safe_for_read(GPCMD_READ_CDVD_CAPACITY),
148 safe_for_read(GPCMD_READ_DVD_STRUCTURE),
149 safe_for_read(GPCMD_READ_HEADER),
150 safe_for_read(GPCMD_READ_TRACK_RZONE_INFO),
151 safe_for_read(GPCMD_READ_SUBCHANNEL),
152 safe_for_read(GPCMD_READ_TOC_PMA_ATIP),
153 safe_for_read(GPCMD_REPORT_KEY),
154 safe_for_read(GPCMD_SCAN),
155 safe_for_read(GPCMD_GET_CONFIGURATION),
156 safe_for_read(GPCMD_READ_FORMAT_CAPACITIES),
157 safe_for_read(GPCMD_GET_EVENT_STATUS_NOTIFICATION),
158 safe_for_read(GPCMD_GET_PERFORMANCE),
159 safe_for_read(GPCMD_SEEK),
160 safe_for_read(GPCMD_STOP_PLAY_SCAN),
161
162 /* Basic writing commands */
163 safe_for_write(WRITE_6),
164 safe_for_write(WRITE_10),
165 safe_for_write(WRITE_VERIFY),
166 safe_for_write(WRITE_12),
167 safe_for_write(WRITE_VERIFY_12),
168 safe_for_write(WRITE_16),
169 safe_for_write(WRITE_LONG),
170 safe_for_write(ERASE),
171 safe_for_write(GPCMD_MODE_SELECT_10),
172 safe_for_write(MODE_SELECT),
173 safe_for_write(LOG_SELECT),
174 safe_for_write(GPCMD_BLANK),
175 safe_for_write(GPCMD_CLOSE_TRACK),
176 safe_for_write(GPCMD_FLUSH_CACHE),
177 safe_for_write(GPCMD_FORMAT_UNIT),
178 safe_for_write(GPCMD_REPAIR_RZONE_TRACK),
179 safe_for_write(GPCMD_RESERVE_RZONE_TRACK),
180 safe_for_write(GPCMD_SEND_DVD_STRUCTURE),
181 safe_for_write(GPCMD_SEND_EVENT),
182 safe_for_write(GPCMD_SEND_KEY),
183 safe_for_write(GPCMD_SEND_OPC),
184 safe_for_write(GPCMD_SEND_CUE_SHEET),
185 safe_for_write(GPCMD_SET_SPEED),
186 safe_for_write(GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL),
187 safe_for_write(GPCMD_LOAD_UNLOAD),
188 safe_for_write(GPCMD_SET_STREAMING),
189 };
190 unsigned char type = cmd_type[cmd[0]];
191
192 /* Anybody who can open the device can do a read-safe command */
193 if (type & CMD_READ_SAFE)
194 return 0;
195
196 /* Write-safe commands just require a writable open.. */
197 if (type & CMD_WRITE_SAFE) {
198 if (file->f_mode & FMODE_WRITE)
199 return 0;
200 }
201
202 if (!type) {
203 cmd_type[cmd[0]] = CMD_WARNED;
204 printk(KERN_WARNING "scsi: unknown opcode 0x%02x\n", cmd[0]);
205 }
206
207 /* And root can do any command.. */
208 if (capable(CAP_SYS_RAWIO))
209 return 0;
210
211 /* Otherwise fail it with an "Operation not permitted" */
212 return -EPERM;
213}
214
215static int sg_io(struct file *file, request_queue_t *q,
216 struct gendisk *bd_disk, struct sg_io_hdr *hdr)
217{
218 unsigned long start_time;
219 int reading, writing;
220 struct request *rq;
221 struct bio *bio;
222 char sense[SCSI_SENSE_BUFFERSIZE];
223 unsigned char cmd[BLK_MAX_CDB];
224
225 if (hdr->interface_id != 'S')
226 return -EINVAL;
227 if (hdr->cmd_len > BLK_MAX_CDB)
228 return -EINVAL;
229 if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
230 return -EFAULT;
231 if (verify_command(file, cmd))
232 return -EPERM;
233
234 /*
235 * we'll do that later
236 */
237 if (hdr->iovec_count)
238 return -EOPNOTSUPP;
239
240 if (hdr->dxfer_len > (q->max_sectors << 9))
241 return -EIO;
242
243 reading = writing = 0;
244 if (hdr->dxfer_len) {
245 switch (hdr->dxfer_direction) {
246 default:
247 return -EINVAL;
248 case SG_DXFER_TO_FROM_DEV:
249 reading = 1;
250 /* fall through */
251 case SG_DXFER_TO_DEV:
252 writing = 1;
253 break;
254 case SG_DXFER_FROM_DEV:
255 reading = 1;
256 break;
257 }
258
259 rq = blk_rq_map_user(q, writing ? WRITE : READ, hdr->dxferp,
260 hdr->dxfer_len);
261
262 if (IS_ERR(rq))
263 return PTR_ERR(rq);
264 } else
265 rq = blk_get_request(q, READ, __GFP_WAIT);
266
267 /*
268 * fill in request structure
269 */
270 rq->cmd_len = hdr->cmd_len;
271 memcpy(rq->cmd, cmd, hdr->cmd_len);
272 if (sizeof(rq->cmd) != hdr->cmd_len)
273 memset(rq->cmd + hdr->cmd_len, 0, sizeof(rq->cmd) - hdr->cmd_len);
274
275 memset(sense, 0, sizeof(sense));
276 rq->sense = sense;
277 rq->sense_len = 0;
278
279 rq->flags |= REQ_BLOCK_PC;
280 bio = rq->bio;
281
282 /*
283 * bounce this after holding a reference to the original bio, it's
284 * needed for proper unmapping
285 */
286 if (rq->bio)
287 blk_queue_bounce(q, &rq->bio);
288
289 rq->timeout = (hdr->timeout * HZ) / 1000;
290 if (!rq->timeout)
291 rq->timeout = q->sg_timeout;
292 if (!rq->timeout)
293 rq->timeout = BLK_DEFAULT_TIMEOUT;
294
295 start_time = jiffies;
296
297 /* ignore return value. All information is passed back to caller
298 * (if he doesn't check that is his problem).
299 * N.B. a non-zero SCSI status is _not_ necessarily an error.
300 */
301 blk_execute_rq(q, bd_disk, rq);
302
303 /* write to all output members */
304 hdr->status = 0xff & rq->errors;
305 hdr->masked_status = status_byte(rq->errors);
306 hdr->msg_status = msg_byte(rq->errors);
307 hdr->host_status = host_byte(rq->errors);
308 hdr->driver_status = driver_byte(rq->errors);
309 hdr->info = 0;
310 if (hdr->masked_status || hdr->host_status || hdr->driver_status)
311 hdr->info |= SG_INFO_CHECK;
312 hdr->resid = rq->data_len;
313 hdr->duration = ((jiffies - start_time) * 1000) / HZ;
314 hdr->sb_len_wr = 0;
315
316 if (rq->sense_len && hdr->sbp) {
317 int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len);
318
319 if (!copy_to_user(hdr->sbp, rq->sense, len))
320 hdr->sb_len_wr = len;
321 }
322
323 if (blk_rq_unmap_user(rq, bio, hdr->dxfer_len))
324 return -EFAULT;
325
326 /* may not have succeeded, but output values written to control
327 * structure (struct sg_io_hdr). */
328 return 0;
329}
330
331#define FORMAT_UNIT_TIMEOUT (2 * 60 * 60 * HZ)
332#define START_STOP_TIMEOUT (60 * HZ)
333#define MOVE_MEDIUM_TIMEOUT (5 * 60 * HZ)
334#define READ_ELEMENT_STATUS_TIMEOUT (5 * 60 * HZ)
335#define READ_DEFECT_DATA_TIMEOUT (60 * HZ )
336#define OMAX_SB_LEN 16 /* For backward compatibility */
337
338static int sg_scsi_ioctl(struct file *file, request_queue_t *q,
339 struct gendisk *bd_disk, Scsi_Ioctl_Command __user *sic)
340{
341 struct request *rq;
342 int err;
343 unsigned int in_len, out_len, bytes, opcode, cmdlen;
344 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
345
346 /*
347 * get in an out lengths, verify they don't exceed a page worth of data
348 */
349 if (get_user(in_len, &sic->inlen))
350 return -EFAULT;
351 if (get_user(out_len, &sic->outlen))
352 return -EFAULT;
353 if (in_len > PAGE_SIZE || out_len > PAGE_SIZE)
354 return -EINVAL;
355 if (get_user(opcode, sic->data))
356 return -EFAULT;
357
358 bytes = max(in_len, out_len);
359 if (bytes) {
360 buffer = kmalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN);
361 if (!buffer)
362 return -ENOMEM;
363
364 memset(buffer, 0, bytes);
365 }
366
367 rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
368
369 cmdlen = COMMAND_SIZE(opcode);
370
371 /*
372 * get command and data to send to device, if any
373 */
374 err = -EFAULT;
375 rq->cmd_len = cmdlen;
376 if (copy_from_user(rq->cmd, sic->data, cmdlen))
377 goto error;
378
379 if (copy_from_user(buffer, sic->data + cmdlen, in_len))
380 goto error;
381
382 err = verify_command(file, rq->cmd);
383 if (err)
384 goto error;
385
386 switch (opcode) {
387 case SEND_DIAGNOSTIC:
388 case FORMAT_UNIT:
389 rq->timeout = FORMAT_UNIT_TIMEOUT;
390 break;
391 case START_STOP:
392 rq->timeout = START_STOP_TIMEOUT;
393 break;
394 case MOVE_MEDIUM:
395 rq->timeout = MOVE_MEDIUM_TIMEOUT;
396 break;
397 case READ_ELEMENT_STATUS:
398 rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
399 break;
400 case READ_DEFECT_DATA:
401 rq->timeout = READ_DEFECT_DATA_TIMEOUT;
402 break;
403 default:
404 rq->timeout = BLK_DEFAULT_TIMEOUT;
405 break;
406 }
407
408 memset(sense, 0, sizeof(sense));
409 rq->sense = sense;
410 rq->sense_len = 0;
411
412 rq->data = buffer;
413 rq->data_len = bytes;
414 rq->flags |= REQ_BLOCK_PC;
415
416 blk_execute_rq(q, bd_disk, rq);
417 err = rq->errors & 0xff; /* only 8 bit SCSI status */
418 if (err) {
419 if (rq->sense_len && rq->sense) {
420 bytes = (OMAX_SB_LEN > rq->sense_len) ?
421 rq->sense_len : OMAX_SB_LEN;
422 if (copy_to_user(sic->data, rq->sense, bytes))
423 err = -EFAULT;
424 }
425 } else {
426 if (copy_to_user(sic->data, buffer, out_len))
427 err = -EFAULT;
428 }
429
430error:
431 kfree(buffer);
432 blk_put_request(rq);
433 return err;
434}
435
436int scsi_cmd_ioctl(struct file *file, struct gendisk *bd_disk, unsigned int cmd, void __user *arg)
437{
438 request_queue_t *q;
439 struct request *rq;
440 int close = 0, err;
441
442 q = bd_disk->queue;
443 if (!q)
444 return -ENXIO;
445
446 if (blk_get_queue(q))
447 return -ENXIO;
448
449 switch (cmd) {
450 /*
451 * new sgv3 interface
452 */
453 case SG_GET_VERSION_NUM:
454 err = sg_get_version(arg);
455 break;
456 case SCSI_IOCTL_GET_IDLUN:
457 err = scsi_get_idlun(q, arg);
458 break;
459 case SCSI_IOCTL_GET_BUS_NUMBER:
460 err = scsi_get_bus(q, arg);
461 break;
462 case SG_SET_TIMEOUT:
463 err = sg_set_timeout(q, arg);
464 break;
465 case SG_GET_TIMEOUT:
466 err = sg_get_timeout(q);
467 break;
468 case SG_GET_RESERVED_SIZE:
469 err = sg_get_reserved_size(q, arg);
470 break;
471 case SG_SET_RESERVED_SIZE:
472 err = sg_set_reserved_size(q, arg);
473 break;
474 case SG_EMULATED_HOST:
475 err = sg_emulated_host(q, arg);
476 break;
477 case SG_IO: {
478 struct sg_io_hdr hdr;
479
480 err = -EFAULT;
481 if (copy_from_user(&hdr, arg, sizeof(hdr)))
482 break;
483 err = sg_io(file, q, bd_disk, &hdr);
484 if (err == -EFAULT)
485 break;
486
487 if (copy_to_user(arg, &hdr, sizeof(hdr)))
488 err = -EFAULT;
489 break;
490 }
491 case CDROM_SEND_PACKET: {
492 struct cdrom_generic_command cgc;
493 struct sg_io_hdr hdr;
494
495 err = -EFAULT;
496 if (copy_from_user(&cgc, arg, sizeof(cgc)))
497 break;
498 cgc.timeout = clock_t_to_jiffies(cgc.timeout);
499 memset(&hdr, 0, sizeof(hdr));
500 hdr.interface_id = 'S';
501 hdr.cmd_len = sizeof(cgc.cmd);
502 hdr.dxfer_len = cgc.buflen;
503 err = 0;
504 switch (cgc.data_direction) {
505 case CGC_DATA_UNKNOWN:
506 hdr.dxfer_direction = SG_DXFER_UNKNOWN;
507 break;
508 case CGC_DATA_WRITE:
509 hdr.dxfer_direction = SG_DXFER_TO_DEV;
510 break;
511 case CGC_DATA_READ:
512 hdr.dxfer_direction = SG_DXFER_FROM_DEV;
513 break;
514 case CGC_DATA_NONE:
515 hdr.dxfer_direction = SG_DXFER_NONE;
516 break;
517 default:
518 err = -EINVAL;
519 }
520 if (err)
521 break;
522
523 hdr.dxferp = cgc.buffer;
524 hdr.sbp = cgc.sense;
525 if (hdr.sbp)
526 hdr.mx_sb_len = sizeof(struct request_sense);
527 hdr.timeout = cgc.timeout;
528 hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd;
529 hdr.cmd_len = sizeof(cgc.cmd);
530
531 err = sg_io(file, q, bd_disk, &hdr);
532 if (err == -EFAULT)
533 break;
534
535 if (hdr.status)
536 err = -EIO;
537
538 cgc.stat = err;
539 cgc.buflen = hdr.resid;
540 if (copy_to_user(arg, &cgc, sizeof(cgc)))
541 err = -EFAULT;
542
543 break;
544 }
545
546 /*
547 * old junk scsi send command ioctl
548 */
549 case SCSI_IOCTL_SEND_COMMAND:
550 printk(KERN_WARNING "program %s is using a deprecated SCSI ioctl, please convert it to SG_IO\n", current->comm);
551 err = -EINVAL;
552 if (!arg)
553 break;
554
555 err = sg_scsi_ioctl(file, q, bd_disk, arg);
556 break;
557 case CDROMCLOSETRAY:
558 close = 1;
559 case CDROMEJECT:
560 rq = blk_get_request(q, WRITE, __GFP_WAIT);
561 rq->flags |= REQ_BLOCK_PC;
562 rq->data = NULL;
563 rq->data_len = 0;
564 rq->timeout = BLK_DEFAULT_TIMEOUT;
565 memset(rq->cmd, 0, sizeof(rq->cmd));
566 rq->cmd[0] = GPCMD_START_STOP_UNIT;
567 rq->cmd[4] = 0x02 + (close != 0);
568 rq->cmd_len = 6;
569 err = blk_execute_rq(q, bd_disk, rq);
570 blk_put_request(rq);
571 break;
572 default:
573 err = -ENOTTY;
574 }
575
576 blk_put_queue(q);
577 return err;
578}
579
580EXPORT_SYMBOL(scsi_cmd_ioctl);
diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
new file mode 100644
index 000000000000..a0b403a6b4ed
--- /dev/null
+++ b/drivers/block/smart1,2.h
@@ -0,0 +1,278 @@
1/*
2 * Disk Array driver for Compaq SMART2 Controllers
3 * Copyright 1998 Compaq Computer Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
20 *
21 * If you want to make changes, improve or add functionality to this
22 * driver, you'll probably need the Compaq Array Controller Interface
23 * Specificiation (Document number ECG086/1198)
24 */
25
26/*
27 * This file contains the controller communication implementation for
28 * Compaq SMART-1 and SMART-2 controllers. To the best of my knowledge,
29 * this should support:
30 *
31 * PCI:
32 * SMART-2/P, SMART-2DH, SMART-2SL, SMART-221, SMART-3100ES, SMART-3200
33 * Integerated SMART Array Controller, SMART-4200, SMART-4250ES
34 *
35 * EISA:
36 * SMART-2/E, SMART, IAES, IDA-2, IDA
37 */
38
39/*
40 * Memory mapped FIFO interface (SMART 42xx cards)
41 */
42static void smart4_submit_command(ctlr_info_t *h, cmdlist_t *c)
43{
44 writel(c->busaddr, h->vaddr + S42XX_REQUEST_PORT_OFFSET);
45}
46
47/*
48 * This card is the opposite of the other cards.
49 * 0 turns interrupts on...
50 * 0x08 turns them off...
51 */
52static void smart4_intr_mask(ctlr_info_t *h, unsigned long val)
53{
54 if (val)
55 { /* Turn interrupts on */
56 writel(0, h->vaddr + S42XX_REPLY_INTR_MASK_OFFSET);
57 } else /* Turn them off */
58 {
59 writel( S42XX_INTR_OFF,
60 h->vaddr + S42XX_REPLY_INTR_MASK_OFFSET);
61 }
62}
63
64/*
65 * For older cards FIFO Full = 0.
66 * On this card 0 means there is room, anything else FIFO Full.
67 *
68 */
69static unsigned long smart4_fifo_full(ctlr_info_t *h)
70{
71
72 return (!readl(h->vaddr + S42XX_REQUEST_PORT_OFFSET));
73}
74
75/* This type of controller returns -1 if the fifo is empty,
76 * Not 0 like the others.
77 * And we need to let it know we read a value out
78 */
79static unsigned long smart4_completed(ctlr_info_t *h)
80{
81 long register_value
82 = readl(h->vaddr + S42XX_REPLY_PORT_OFFSET);
83
84 /* Fifo is empty */
85 if( register_value == 0xffffffff)
86 return 0;
87
88 /* Need to let it know we got the reply */
89 /* We do this by writing a 0 to the port we just read from */
90 writel(0, h->vaddr + S42XX_REPLY_PORT_OFFSET);
91
92 return ((unsigned long) register_value);
93}
94
95 /*
96 * This hardware returns interrupt pending at a different place and
97 * it does not tell us if the fifo is empty, we will have check
98 * that by getting a 0 back from the comamnd_completed call.
99 */
100static unsigned long smart4_intr_pending(ctlr_info_t *h)
101{
102 unsigned long register_value =
103 readl(h->vaddr + S42XX_INTR_STATUS);
104
105 if( register_value & S42XX_INTR_PENDING)
106 return FIFO_NOT_EMPTY;
107 return 0 ;
108}
109
110static struct access_method smart4_access = {
111 smart4_submit_command,
112 smart4_intr_mask,
113 smart4_fifo_full,
114 smart4_intr_pending,
115 smart4_completed,
116};
117
118/*
119 * Memory mapped FIFO interface (PCI SMART2 and SMART 3xxx cards)
120 */
121static void smart2_submit_command(ctlr_info_t *h, cmdlist_t *c)
122{
123 writel(c->busaddr, h->vaddr + COMMAND_FIFO);
124}
125
126static void smart2_intr_mask(ctlr_info_t *h, unsigned long val)
127{
128 writel(val, h->vaddr + INTR_MASK);
129}
130
131static unsigned long smart2_fifo_full(ctlr_info_t *h)
132{
133 return readl(h->vaddr + COMMAND_FIFO);
134}
135
136static unsigned long smart2_completed(ctlr_info_t *h)
137{
138 return readl(h->vaddr + COMMAND_COMPLETE_FIFO);
139}
140
141static unsigned long smart2_intr_pending(ctlr_info_t *h)
142{
143 return readl(h->vaddr + INTR_PENDING);
144}
145
146static struct access_method smart2_access = {
147 smart2_submit_command,
148 smart2_intr_mask,
149 smart2_fifo_full,
150 smart2_intr_pending,
151 smart2_completed,
152};
153
154/*
155 * IO access for SMART-2/E cards
156 */
157static void smart2e_submit_command(ctlr_info_t *h, cmdlist_t *c)
158{
159 outl(c->busaddr, h->io_mem_addr + COMMAND_FIFO);
160}
161
162static void smart2e_intr_mask(ctlr_info_t *h, unsigned long val)
163{
164 outl(val, h->io_mem_addr + INTR_MASK);
165}
166
167static unsigned long smart2e_fifo_full(ctlr_info_t *h)
168{
169 return inl(h->io_mem_addr + COMMAND_FIFO);
170}
171
172static unsigned long smart2e_completed(ctlr_info_t *h)
173{
174 return inl(h->io_mem_addr + COMMAND_COMPLETE_FIFO);
175}
176
177static unsigned long smart2e_intr_pending(ctlr_info_t *h)
178{
179 return inl(h->io_mem_addr + INTR_PENDING);
180}
181
182static struct access_method smart2e_access = {
183 smart2e_submit_command,
184 smart2e_intr_mask,
185 smart2e_fifo_full,
186 smart2e_intr_pending,
187 smart2e_completed,
188};
189
190/*
191 * IO access for older SMART-1 type cards
192 */
193#define SMART1_SYSTEM_MASK 0xC8E
194#define SMART1_SYSTEM_DOORBELL 0xC8F
195#define SMART1_LOCAL_MASK 0xC8C
196#define SMART1_LOCAL_DOORBELL 0xC8D
197#define SMART1_INTR_MASK 0xC89
198#define SMART1_LISTADDR 0xC90
199#define SMART1_LISTLEN 0xC94
200#define SMART1_TAG 0xC97
201#define SMART1_COMPLETE_ADDR 0xC98
202#define SMART1_LISTSTATUS 0xC9E
203
204#define CHANNEL_BUSY 0x01
205#define CHANNEL_CLEAR 0x02
206
207static void smart1_submit_command(ctlr_info_t *h, cmdlist_t *c)
208{
209 /*
210 * This __u16 is actually a bunch of control flags on SMART
211 * and below. We want them all to be zero.
212 */
213 c->hdr.size = 0;
214
215 outb(CHANNEL_CLEAR, h->io_mem_addr + SMART1_SYSTEM_DOORBELL);
216
217 outl(c->busaddr, h->io_mem_addr + SMART1_LISTADDR);
218 outw(c->size, h->io_mem_addr + SMART1_LISTLEN);
219
220 outb(CHANNEL_BUSY, h->io_mem_addr + SMART1_LOCAL_DOORBELL);
221}
222
223static void smart1_intr_mask(ctlr_info_t *h, unsigned long val)
224{
225 if (val == 1) {
226 outb(0xFD, h->io_mem_addr + SMART1_SYSTEM_DOORBELL);
227 outb(CHANNEL_BUSY, h->io_mem_addr + SMART1_LOCAL_DOORBELL);
228 outb(0x01, h->io_mem_addr + SMART1_INTR_MASK);
229 outb(0x01, h->io_mem_addr + SMART1_SYSTEM_MASK);
230 } else {
231 outb(0, h->io_mem_addr + 0xC8E);
232 }
233}
234
235static unsigned long smart1_fifo_full(ctlr_info_t *h)
236{
237 unsigned char chan;
238 chan = inb(h->io_mem_addr + SMART1_SYSTEM_DOORBELL) & CHANNEL_CLEAR;
239 return chan;
240}
241
242static unsigned long smart1_completed(ctlr_info_t *h)
243{
244 unsigned char status;
245 unsigned long cmd;
246
247 if (inb(h->io_mem_addr + SMART1_SYSTEM_DOORBELL) & CHANNEL_BUSY) {
248 outb(CHANNEL_BUSY, h->io_mem_addr + SMART1_SYSTEM_DOORBELL);
249
250 cmd = inl(h->io_mem_addr + SMART1_COMPLETE_ADDR);
251 status = inb(h->io_mem_addr + SMART1_LISTSTATUS);
252
253 outb(CHANNEL_CLEAR, h->io_mem_addr + SMART1_LOCAL_DOORBELL);
254
255 /*
256 * this is x86 (actually compaq x86) only, so it's ok
257 */
258 if (cmd) ((cmdlist_t*)bus_to_virt(cmd))->req.hdr.rcode = status;
259 } else {
260 cmd = 0;
261 }
262 return cmd;
263}
264
265static unsigned long smart1_intr_pending(ctlr_info_t *h)
266{
267 unsigned char chan;
268 chan = inb(h->io_mem_addr + SMART1_SYSTEM_DOORBELL) & CHANNEL_BUSY;
269 return chan;
270}
271
272static struct access_method smart1_access = {
273 smart1_submit_command,
274 smart1_intr_mask,
275 smart1_fifo_full,
276 smart1_intr_pending,
277 smart1_completed,
278};
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
new file mode 100644
index 000000000000..5b09cf154ac7
--- /dev/null
+++ b/drivers/block/swim3.c
@@ -0,0 +1,1154 @@
1/*
2 * Driver for the SWIM3 (Super Woz Integrated Machine 3)
3 * floppy controller found on Power Macintoshes.
4 *
5 * Copyright (C) 1996 Paul Mackerras.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13/*
14 * TODO:
15 * handle 2 drives
16 * handle GCR disks
17 */
18
19#include <linux/config.h>
20#include <linux/stddef.h>
21#include <linux/kernel.h>
22#include <linux/sched.h>
23#include <linux/timer.h>
24#include <linux/delay.h>
25#include <linux/fd.h>
26#include <linux/ioctl.h>
27#include <linux/blkdev.h>
28#include <linux/devfs_fs_kernel.h>
29#include <linux/interrupt.h>
30#include <linux/module.h>
31#include <asm/io.h>
32#include <asm/dbdma.h>
33#include <asm/prom.h>
34#include <asm/uaccess.h>
35#include <asm/mediabay.h>
36#include <asm/machdep.h>
37#include <asm/pmac_feature.h>
38
39static struct request_queue *swim3_queue;
40static struct gendisk *disks[2];
41static struct request *fd_req;
42
43#define MAX_FLOPPIES 2
44
45enum swim_state {
46 idle,
47 locating,
48 seeking,
49 settling,
50 do_transfer,
51 jogging,
52 available,
53 revalidating,
54 ejecting
55};
56
57#define REG(x) unsigned char x; char x ## _pad[15];
58
59/*
60 * The names for these registers mostly represent speculation on my part.
61 * It will be interesting to see how close they are to the names Apple uses.
62 */
63struct swim3 {
64 REG(data);
65 REG(timer); /* counts down at 1MHz */
66 REG(error);
67 REG(mode);
68 REG(select); /* controls CA0, CA1, CA2 and LSTRB signals */
69 REG(setup);
70 REG(control); /* writing bits clears them */
71 REG(status); /* writing bits sets them in control */
72 REG(intr);
73 REG(nseek); /* # tracks to seek */
74 REG(ctrack); /* current track number */
75 REG(csect); /* current sector number */
76 REG(gap3); /* size of gap 3 in track format */
77 REG(sector); /* sector # to read or write */
78 REG(nsect); /* # sectors to read or write */
79 REG(intr_enable);
80};
81
82#define control_bic control
83#define control_bis status
84
85/* Bits in select register */
86#define CA_MASK 7
87#define LSTRB 8
88
89/* Bits in control register */
90#define DO_SEEK 0x80
91#define FORMAT 0x40
92#define SELECT 0x20
93#define WRITE_SECTORS 0x10
94#define DO_ACTION 0x08
95#define DRIVE2_ENABLE 0x04
96#define DRIVE_ENABLE 0x02
97#define INTR_ENABLE 0x01
98
99/* Bits in status register */
100#define FIFO_1BYTE 0x80
101#define FIFO_2BYTE 0x40
102#define ERROR 0x20
103#define DATA 0x08
104#define RDDATA 0x04
105#define INTR_PENDING 0x02
106#define MARK_BYTE 0x01
107
108/* Bits in intr and intr_enable registers */
109#define ERROR_INTR 0x20
110#define DATA_CHANGED 0x10
111#define TRANSFER_DONE 0x08
112#define SEEN_SECTOR 0x04
113#define SEEK_DONE 0x02
114#define TIMER_DONE 0x01
115
116/* Bits in error register */
117#define ERR_DATA_CRC 0x80
118#define ERR_ADDR_CRC 0x40
119#define ERR_OVERRUN 0x04
120#define ERR_UNDERRUN 0x01
121
122/* Bits in setup register */
123#define S_SW_RESET 0x80
124#define S_GCR_WRITE 0x40
125#define S_IBM_DRIVE 0x20
126#define S_TEST_MODE 0x10
127#define S_FCLK_DIV2 0x08
128#define S_GCR 0x04
129#define S_COPY_PROT 0x02
130#define S_INV_WDATA 0x01
131
132/* Select values for swim3_action */
133#define SEEK_POSITIVE 0
134#define SEEK_NEGATIVE 4
135#define STEP 1
136#define MOTOR_ON 2
137#define MOTOR_OFF 6
138#define INDEX 3
139#define EJECT 7
140#define SETMFM 9
141#define SETGCR 13
142
143/* Select values for swim3_select and swim3_readbit */
144#define STEP_DIR 0
145#define STEPPING 1
146#define MOTOR_ON 2
147#define RELAX 3 /* also eject in progress */
148#define READ_DATA_0 4
149#define TWOMEG_DRIVE 5
150#define SINGLE_SIDED 6 /* drive or diskette is 4MB type? */
151#define DRIVE_PRESENT 7
152#define DISK_IN 8
153#define WRITE_PROT 9
154#define TRACK_ZERO 10
155#define TACHO 11
156#define READ_DATA_1 12
157#define MFM_MODE 13
158#define SEEK_COMPLETE 14
159#define ONEMEG_MEDIA 15
160
161/* Definitions of values used in writing and formatting */
162#define DATA_ESCAPE 0x99
163#define GCR_SYNC_EXC 0x3f
164#define GCR_SYNC_CONV 0x80
165#define GCR_FIRST_MARK 0xd5
166#define GCR_SECOND_MARK 0xaa
167#define GCR_ADDR_MARK "\xd5\xaa\x00"
168#define GCR_DATA_MARK "\xd5\xaa\x0b"
169#define GCR_SLIP_BYTE "\x27\xaa"
170#define GCR_SELF_SYNC "\x3f\xbf\x1e\x34\x3c\x3f"
171
172#define DATA_99 "\x99\x99"
173#define MFM_ADDR_MARK "\x99\xa1\x99\xa1\x99\xa1\x99\xfe"
174#define MFM_INDEX_MARK "\x99\xc2\x99\xc2\x99\xc2\x99\xfc"
175#define MFM_GAP_LEN 12
176
177struct floppy_state {
178 enum swim_state state;
179 struct swim3 __iomem *swim3; /* hardware registers */
180 struct dbdma_regs __iomem *dma; /* DMA controller registers */
181 int swim3_intr; /* interrupt number for SWIM3 */
182 int dma_intr; /* interrupt number for DMA channel */
183 int cur_cyl; /* cylinder head is on, or -1 */
184 int cur_sector; /* last sector we saw go past */
185 int req_cyl; /* the cylinder for the current r/w request */
186 int head; /* head number ditto */
187 int req_sector; /* sector number ditto */
188 int scount; /* # sectors we're transferring at present */
189 int retries;
190 int settle_time;
191 int secpercyl; /* disk geometry information */
192 int secpertrack;
193 int total_secs;
194 int write_prot; /* 1 if write-protected, 0 if not, -1 dunno */
195 struct dbdma_cmd *dma_cmd;
196 int ref_count;
197 int expect_cyl;
198 struct timer_list timeout;
199 int timeout_pending;
200 int ejected;
201 wait_queue_head_t wait;
202 int wanted;
203 struct device_node* media_bay; /* NULL when not in bay */
204 char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
205};
206
207static struct floppy_state floppy_states[MAX_FLOPPIES];
208static int floppy_count = 0;
209static DEFINE_SPINLOCK(swim3_lock);
210
211static unsigned short write_preamble[] = {
212 0x4e4e, 0x4e4e, 0x4e4e, 0x4e4e, 0x4e4e, /* gap field */
213 0, 0, 0, 0, 0, 0, /* sync field */
214 0x99a1, 0x99a1, 0x99a1, 0x99fb, /* data address mark */
215 0x990f /* no escape for 512 bytes */
216};
217
218static unsigned short write_postamble[] = {
219 0x9904, /* insert CRC */
220 0x4e4e, 0x4e4e,
221 0x9908, /* stop writing */
222 0, 0, 0, 0, 0, 0
223};
224
225static void swim3_select(struct floppy_state *fs, int sel);
226static void swim3_action(struct floppy_state *fs, int action);
227static int swim3_readbit(struct floppy_state *fs, int bit);
228static void do_fd_request(request_queue_t * q);
229static void start_request(struct floppy_state *fs);
230static void set_timeout(struct floppy_state *fs, int nticks,
231 void (*proc)(unsigned long));
232static void scan_track(struct floppy_state *fs);
233static void seek_track(struct floppy_state *fs, int n);
234static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count);
235static void setup_transfer(struct floppy_state *fs);
236static void act(struct floppy_state *fs);
237static void scan_timeout(unsigned long data);
238static void seek_timeout(unsigned long data);
239static void settle_timeout(unsigned long data);
240static void xfer_timeout(unsigned long data);
241static irqreturn_t swim3_interrupt(int irq, void *dev_id, struct pt_regs *regs);
242/*static void fd_dma_interrupt(int irq, void *dev_id, struct pt_regs *regs);*/
243static int grab_drive(struct floppy_state *fs, enum swim_state state,
244 int interruptible);
245static void release_drive(struct floppy_state *fs);
246static int fd_eject(struct floppy_state *fs);
247static int floppy_ioctl(struct inode *inode, struct file *filp,
248 unsigned int cmd, unsigned long param);
249static int floppy_open(struct inode *inode, struct file *filp);
250static int floppy_release(struct inode *inode, struct file *filp);
251static int floppy_check_change(struct gendisk *disk);
252static int floppy_revalidate(struct gendisk *disk);
253static int swim3_add_device(struct device_node *swims);
254int swim3_init(void);
255
256#ifndef CONFIG_PMAC_PBOOK
257#define check_media_bay(which, what) 1
258#endif
259
260static void swim3_select(struct floppy_state *fs, int sel)
261{
262 struct swim3 __iomem *sw = fs->swim3;
263
264 out_8(&sw->select, RELAX);
265 if (sel & 8)
266 out_8(&sw->control_bis, SELECT);
267 else
268 out_8(&sw->control_bic, SELECT);
269 out_8(&sw->select, sel & CA_MASK);
270}
271
272static void swim3_action(struct floppy_state *fs, int action)
273{
274 struct swim3 __iomem *sw = fs->swim3;
275
276 swim3_select(fs, action);
277 udelay(1);
278 out_8(&sw->select, sw->select | LSTRB);
279 udelay(2);
280 out_8(&sw->select, sw->select & ~LSTRB);
281 udelay(1);
282}
283
284static int swim3_readbit(struct floppy_state *fs, int bit)
285{
286 struct swim3 __iomem *sw = fs->swim3;
287 int stat;
288
289 swim3_select(fs, bit);
290 udelay(1);
291 stat = in_8(&sw->status);
292 return (stat & DATA) == 0;
293}
294
295static void do_fd_request(request_queue_t * q)
296{
297 int i;
298 for(i=0;i<floppy_count;i++)
299 {
300 if (floppy_states[i].media_bay &&
301 check_media_bay(floppy_states[i].media_bay, MB_FD))
302 continue;
303 start_request(&floppy_states[i]);
304 }
305 sti();
306}
307
308static void start_request(struct floppy_state *fs)
309{
310 struct request *req;
311 unsigned long x;
312
313 if (fs->state == idle && fs->wanted) {
314 fs->state = available;
315 wake_up(&fs->wait);
316 return;
317 }
318 while (fs->state == idle && (req = elv_next_request(swim3_queue))) {
319#if 0
320 printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%ld buf=%p\n",
321 req->rq_disk->disk_name, req->cmd,
322 (long)req->sector, req->nr_sectors, req->buffer);
323 printk(" rq_status=%d errors=%d current_nr_sectors=%ld\n",
324 req->rq_status, req->errors, req->current_nr_sectors);
325#endif
326
327 if (req->sector < 0 || req->sector >= fs->total_secs) {
328 end_request(req, 0);
329 continue;
330 }
331 if (req->current_nr_sectors == 0) {
332 end_request(req, 1);
333 continue;
334 }
335 if (fs->ejected) {
336 end_request(req, 0);
337 continue;
338 }
339
340 if (rq_data_dir(req) == WRITE) {
341 if (fs->write_prot < 0)
342 fs->write_prot = swim3_readbit(fs, WRITE_PROT);
343 if (fs->write_prot) {
344 end_request(req, 0);
345 continue;
346 }
347 }
348
349 /* Do not remove the cast. req->sector is now a sector_t and
350 * can be 64 bits, but it will never go past 32 bits for this
351 * driver anyway, so we can safely cast it down and not have
352 * to do a 64/32 division
353 */
354 fs->req_cyl = ((long)req->sector) / fs->secpercyl;
355 x = ((long)req->sector) % fs->secpercyl;
356 fs->head = x / fs->secpertrack;
357 fs->req_sector = x % fs->secpertrack + 1;
358 fd_req = req;
359 fs->state = do_transfer;
360 fs->retries = 0;
361
362 act(fs);
363 }
364}
365
366static void set_timeout(struct floppy_state *fs, int nticks,
367 void (*proc)(unsigned long))
368{
369 unsigned long flags;
370
371 save_flags(flags); cli();
372 if (fs->timeout_pending)
373 del_timer(&fs->timeout);
374 fs->timeout.expires = jiffies + nticks;
375 fs->timeout.function = proc;
376 fs->timeout.data = (unsigned long) fs;
377 add_timer(&fs->timeout);
378 fs->timeout_pending = 1;
379 restore_flags(flags);
380}
381
382static inline void scan_track(struct floppy_state *fs)
383{
384 struct swim3 __iomem *sw = fs->swim3;
385
386 swim3_select(fs, READ_DATA_0);
387 in_8(&sw->intr); /* clear SEEN_SECTOR bit */
388 in_8(&sw->error);
389 out_8(&sw->intr_enable, SEEN_SECTOR);
390 out_8(&sw->control_bis, DO_ACTION);
391 /* enable intr when track found */
392 set_timeout(fs, HZ, scan_timeout); /* enable timeout */
393}
394
395static inline void seek_track(struct floppy_state *fs, int n)
396{
397 struct swim3 __iomem *sw = fs->swim3;
398
399 if (n >= 0) {
400 swim3_action(fs, SEEK_POSITIVE);
401 sw->nseek = n;
402 } else {
403 swim3_action(fs, SEEK_NEGATIVE);
404 sw->nseek = -n;
405 }
406 fs->expect_cyl = (fs->cur_cyl >= 0)? fs->cur_cyl + n: -1;
407 swim3_select(fs, STEP);
408 in_8(&sw->error);
409 /* enable intr when seek finished */
410 out_8(&sw->intr_enable, SEEK_DONE);
411 out_8(&sw->control_bis, DO_SEEK);
412 set_timeout(fs, 3*HZ, seek_timeout); /* enable timeout */
413 fs->settle_time = 0;
414}
415
416static inline void init_dma(struct dbdma_cmd *cp, int cmd,
417 void *buf, int count)
418{
419 st_le16(&cp->req_count, count);
420 st_le16(&cp->command, cmd);
421 st_le32(&cp->phy_addr, virt_to_bus(buf));
422 cp->xfer_status = 0;
423}
424
425static inline void setup_transfer(struct floppy_state *fs)
426{
427 int n;
428 struct swim3 __iomem *sw = fs->swim3;
429 struct dbdma_cmd *cp = fs->dma_cmd;
430 struct dbdma_regs __iomem *dr = fs->dma;
431
432 if (fd_req->current_nr_sectors <= 0) {
433 printk(KERN_ERR "swim3: transfer 0 sectors?\n");
434 return;
435 }
436 if (rq_data_dir(fd_req) == WRITE)
437 n = 1;
438 else {
439 n = fs->secpertrack - fs->req_sector + 1;
440 if (n > fd_req->current_nr_sectors)
441 n = fd_req->current_nr_sectors;
442 }
443 fs->scount = n;
444 swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
445 out_8(&sw->sector, fs->req_sector);
446 out_8(&sw->nsect, n);
447 out_8(&sw->gap3, 0);
448 out_le32(&dr->cmdptr, virt_to_bus(cp));
449 if (rq_data_dir(fd_req) == WRITE) {
450 /* Set up 3 dma commands: write preamble, data, postamble */
451 init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
452 ++cp;
453 init_dma(cp, OUTPUT_MORE, fd_req->buffer, 512);
454 ++cp;
455 init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble));
456 } else {
457 init_dma(cp, INPUT_LAST, fd_req->buffer, n * 512);
458 }
459 ++cp;
460 out_le16(&cp->command, DBDMA_STOP);
461 out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
462 in_8(&sw->error);
463 out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
464 if (rq_data_dir(fd_req) == WRITE)
465 out_8(&sw->control_bis, WRITE_SECTORS);
466 in_8(&sw->intr);
467 out_le32(&dr->control, (RUN << 16) | RUN);
468 /* enable intr when transfer complete */
469 out_8(&sw->intr_enable, TRANSFER_DONE);
470 out_8(&sw->control_bis, DO_ACTION);
471 set_timeout(fs, 2*HZ, xfer_timeout); /* enable timeout */
472}
473
474static void act(struct floppy_state *fs)
475{
476 for (;;) {
477 switch (fs->state) {
478 case idle:
479 return; /* XXX shouldn't get here */
480
481 case locating:
482 if (swim3_readbit(fs, TRACK_ZERO)) {
483 fs->cur_cyl = 0;
484 if (fs->req_cyl == 0)
485 fs->state = do_transfer;
486 else
487 fs->state = seeking;
488 break;
489 }
490 scan_track(fs);
491 return;
492
493 case seeking:
494 if (fs->cur_cyl < 0) {
495 fs->expect_cyl = -1;
496 fs->state = locating;
497 break;
498 }
499 if (fs->req_cyl == fs->cur_cyl) {
500 printk("whoops, seeking 0\n");
501 fs->state = do_transfer;
502 break;
503 }
504 seek_track(fs, fs->req_cyl - fs->cur_cyl);
505 return;
506
507 case settling:
508 /* check for SEEK_COMPLETE after 30ms */
509 fs->settle_time = (HZ + 32) / 33;
510 set_timeout(fs, fs->settle_time, settle_timeout);
511 return;
512
513 case do_transfer:
514 if (fs->cur_cyl != fs->req_cyl) {
515 if (fs->retries > 5) {
516 end_request(fd_req, 0);
517 fs->state = idle;
518 return;
519 }
520 fs->state = seeking;
521 break;
522 }
523 setup_transfer(fs);
524 return;
525
526 case jogging:
527 seek_track(fs, -5);
528 return;
529
530 default:
531 printk(KERN_ERR"swim3: unknown state %d\n", fs->state);
532 return;
533 }
534 }
535}
536
537static void scan_timeout(unsigned long data)
538{
539 struct floppy_state *fs = (struct floppy_state *) data;
540 struct swim3 __iomem *sw = fs->swim3;
541
542 fs->timeout_pending = 0;
543 out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
544 out_8(&sw->select, RELAX);
545 out_8(&sw->intr_enable, 0);
546 fs->cur_cyl = -1;
547 if (fs->retries > 5) {
548 end_request(fd_req, 0);
549 fs->state = idle;
550 start_request(fs);
551 } else {
552 fs->state = jogging;
553 act(fs);
554 }
555}
556
557static void seek_timeout(unsigned long data)
558{
559 struct floppy_state *fs = (struct floppy_state *) data;
560 struct swim3 __iomem *sw = fs->swim3;
561
562 fs->timeout_pending = 0;
563 out_8(&sw->control_bic, DO_SEEK);
564 out_8(&sw->select, RELAX);
565 out_8(&sw->intr_enable, 0);
566 printk(KERN_ERR "swim3: seek timeout\n");
567 end_request(fd_req, 0);
568 fs->state = idle;
569 start_request(fs);
570}
571
572static void settle_timeout(unsigned long data)
573{
574 struct floppy_state *fs = (struct floppy_state *) data;
575 struct swim3 __iomem *sw = fs->swim3;
576
577 fs->timeout_pending = 0;
578 if (swim3_readbit(fs, SEEK_COMPLETE)) {
579 out_8(&sw->select, RELAX);
580 fs->state = locating;
581 act(fs);
582 return;
583 }
584 out_8(&sw->select, RELAX);
585 if (fs->settle_time < 2*HZ) {
586 ++fs->settle_time;
587 set_timeout(fs, 1, settle_timeout);
588 return;
589 }
590 printk(KERN_ERR "swim3: seek settle timeout\n");
591 end_request(fd_req, 0);
592 fs->state = idle;
593 start_request(fs);
594}
595
596static void xfer_timeout(unsigned long data)
597{
598 struct floppy_state *fs = (struct floppy_state *) data;
599 struct swim3 __iomem *sw = fs->swim3;
600 struct dbdma_regs __iomem *dr = fs->dma;
601 struct dbdma_cmd *cp = fs->dma_cmd;
602 unsigned long s;
603 int n;
604
605 fs->timeout_pending = 0;
606 out_le32(&dr->control, RUN << 16);
607 /* We must wait a bit for dbdma to stop */
608 for (n = 0; (in_le32(&dr->status) & ACTIVE) && n < 1000; n++)
609 udelay(1);
610 out_8(&sw->intr_enable, 0);
611 out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
612 out_8(&sw->select, RELAX);
613 if (rq_data_dir(fd_req) == WRITE)
614 ++cp;
615 if (ld_le16(&cp->xfer_status) != 0)
616 s = fs->scount - ((ld_le16(&cp->res_count) + 511) >> 9);
617 else
618 s = 0;
619 fd_req->sector += s;
620 fd_req->current_nr_sectors -= s;
621 printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
622 (rq_data_dir(fd_req)==WRITE? "writ": "read"), (long)fd_req->sector);
623 end_request(fd_req, 0);
624 fs->state = idle;
625 start_request(fs);
626}
627
628static irqreturn_t swim3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
629{
630 struct floppy_state *fs = (struct floppy_state *) dev_id;
631 struct swim3 __iomem *sw = fs->swim3;
632 int intr, err, n;
633 int stat, resid;
634 struct dbdma_regs __iomem *dr;
635 struct dbdma_cmd *cp;
636
637 intr = in_8(&sw->intr);
638 err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
639 if ((intr & ERROR_INTR) && fs->state != do_transfer)
640 printk(KERN_ERR "swim3_interrupt, state=%d, dir=%lx, intr=%x, err=%x\n",
641 fs->state, rq_data_dir(fd_req), intr, err);
642 switch (fs->state) {
643 case locating:
644 if (intr & SEEN_SECTOR) {
645 out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
646 out_8(&sw->select, RELAX);
647 out_8(&sw->intr_enable, 0);
648 del_timer(&fs->timeout);
649 fs->timeout_pending = 0;
650 if (sw->ctrack == 0xff) {
651 printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
652 fs->cur_cyl = -1;
653 if (fs->retries > 5) {
654 end_request(fd_req, 0);
655 fs->state = idle;
656 start_request(fs);
657 } else {
658 fs->state = jogging;
659 act(fs);
660 }
661 break;
662 }
663 fs->cur_cyl = sw->ctrack;
664 fs->cur_sector = sw->csect;
665 if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
666 printk(KERN_ERR "swim3: expected cyl %d, got %d\n",
667 fs->expect_cyl, fs->cur_cyl);
668 fs->state = do_transfer;
669 act(fs);
670 }
671 break;
672 case seeking:
673 case jogging:
674 if (sw->nseek == 0) {
675 out_8(&sw->control_bic, DO_SEEK);
676 out_8(&sw->select, RELAX);
677 out_8(&sw->intr_enable, 0);
678 del_timer(&fs->timeout);
679 fs->timeout_pending = 0;
680 if (fs->state == seeking)
681 ++fs->retries;
682 fs->state = settling;
683 act(fs);
684 }
685 break;
686 case settling:
687 out_8(&sw->intr_enable, 0);
688 del_timer(&fs->timeout);
689 fs->timeout_pending = 0;
690 act(fs);
691 break;
692 case do_transfer:
693 if ((intr & (ERROR_INTR | TRANSFER_DONE)) == 0)
694 break;
695 out_8(&sw->intr_enable, 0);
696 out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
697 out_8(&sw->select, RELAX);
698 del_timer(&fs->timeout);
699 fs->timeout_pending = 0;
700 dr = fs->dma;
701 cp = fs->dma_cmd;
702 if (rq_data_dir(fd_req) == WRITE)
703 ++cp;
704 /*
705 * Check that the main data transfer has finished.
706 * On writing, the swim3 sometimes doesn't use
707 * up all the bytes of the postamble, so we can still
708 * see DMA active here. That doesn't matter as long
709 * as all the sector data has been transferred.
710 */
711 if ((intr & ERROR_INTR) == 0 && cp->xfer_status == 0) {
712 /* wait a little while for DMA to complete */
713 for (n = 0; n < 100; ++n) {
714 if (cp->xfer_status != 0)
715 break;
716 udelay(1);
717 barrier();
718 }
719 }
720 /* turn off DMA */
721 out_le32(&dr->control, (RUN | PAUSE) << 16);
722 stat = ld_le16(&cp->xfer_status);
723 resid = ld_le16(&cp->res_count);
724 if (intr & ERROR_INTR) {
725 n = fs->scount - 1 - resid / 512;
726 if (n > 0) {
727 fd_req->sector += n;
728 fd_req->current_nr_sectors -= n;
729 fd_req->buffer += n * 512;
730 fs->req_sector += n;
731 }
732 if (fs->retries < 5) {
733 ++fs->retries;
734 act(fs);
735 } else {
736 printk("swim3: error %sing block %ld (err=%x)\n",
737 rq_data_dir(fd_req) == WRITE? "writ": "read",
738 (long)fd_req->sector, err);
739 end_request(fd_req, 0);
740 fs->state = idle;
741 }
742 } else {
743 if ((stat & ACTIVE) == 0 || resid != 0) {
744 /* musta been an error */
745 printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
746 printk(KERN_ERR " state=%d, dir=%lx, intr=%x, err=%x\n",
747 fs->state, rq_data_dir(fd_req), intr, err);
748 end_request(fd_req, 0);
749 fs->state = idle;
750 start_request(fs);
751 break;
752 }
753 fd_req->sector += fs->scount;
754 fd_req->current_nr_sectors -= fs->scount;
755 fd_req->buffer += fs->scount * 512;
756 if (fd_req->current_nr_sectors <= 0) {
757 end_request(fd_req, 1);
758 fs->state = idle;
759 } else {
760 fs->req_sector += fs->scount;
761 if (fs->req_sector > fs->secpertrack) {
762 fs->req_sector -= fs->secpertrack;
763 if (++fs->head > 1) {
764 fs->head = 0;
765 ++fs->req_cyl;
766 }
767 }
768 act(fs);
769 }
770 }
771 if (fs->state == idle)
772 start_request(fs);
773 break;
774 default:
775 printk(KERN_ERR "swim3: don't know what to do in state %d\n", fs->state);
776 }
777 return IRQ_HANDLED;
778}
779
780/*
781static void fd_dma_interrupt(int irq, void *dev_id, struct pt_regs *regs)
782{
783}
784*/
785
786static int grab_drive(struct floppy_state *fs, enum swim_state state,
787 int interruptible)
788{
789 unsigned long flags;
790
791 save_flags(flags);
792 cli();
793 if (fs->state != idle) {
794 ++fs->wanted;
795 while (fs->state != available) {
796 if (interruptible && signal_pending(current)) {
797 --fs->wanted;
798 restore_flags(flags);
799 return -EINTR;
800 }
801 interruptible_sleep_on(&fs->wait);
802 }
803 --fs->wanted;
804 }
805 fs->state = state;
806 restore_flags(flags);
807 return 0;
808}
809
810static void release_drive(struct floppy_state *fs)
811{
812 unsigned long flags;
813
814 save_flags(flags);
815 cli();
816 fs->state = idle;
817 start_request(fs);
818 restore_flags(flags);
819}
820
821static int fd_eject(struct floppy_state *fs)
822{
823 int err, n;
824
825 err = grab_drive(fs, ejecting, 1);
826 if (err)
827 return err;
828 swim3_action(fs, EJECT);
829 for (n = 20; n > 0; --n) {
830 if (signal_pending(current)) {
831 err = -EINTR;
832 break;
833 }
834 swim3_select(fs, RELAX);
835 current->state = TASK_INTERRUPTIBLE;
836 schedule_timeout(1);
837 if (swim3_readbit(fs, DISK_IN) == 0)
838 break;
839 }
840 swim3_select(fs, RELAX);
841 udelay(150);
842 fs->ejected = 1;
843 release_drive(fs);
844 return err;
845}
846
847static struct floppy_struct floppy_type =
848 { 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL }; /* 7 1.44MB 3.5" */
849
850static int floppy_ioctl(struct inode *inode, struct file *filp,
851 unsigned int cmd, unsigned long param)
852{
853 struct floppy_state *fs = inode->i_bdev->bd_disk->private_data;
854 int err;
855
856 if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN))
857 return -EPERM;
858
859 if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD))
860 return -ENXIO;
861
862 switch (cmd) {
863 case FDEJECT:
864 if (fs->ref_count != 1)
865 return -EBUSY;
866 err = fd_eject(fs);
867 return err;
868 case FDGETPRM:
869 if (copy_to_user((void __user *) param, &floppy_type,
870 sizeof(struct floppy_struct)))
871 return -EFAULT;
872 return 0;
873 }
874 return -ENOTTY;
875}
876
877static int floppy_open(struct inode *inode, struct file *filp)
878{
879 struct floppy_state *fs = inode->i_bdev->bd_disk->private_data;
880 struct swim3 __iomem *sw = fs->swim3;
881 int n, err = 0;
882
883 if (fs->ref_count == 0) {
884 if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD))
885 return -ENXIO;
886 out_8(&sw->setup, S_IBM_DRIVE | S_FCLK_DIV2);
887 out_8(&sw->control_bic, 0xff);
888 out_8(&sw->mode, 0x95);
889 udelay(10);
890 out_8(&sw->intr_enable, 0);
891 out_8(&sw->control_bis, DRIVE_ENABLE | INTR_ENABLE);
892 swim3_action(fs, MOTOR_ON);
893 fs->write_prot = -1;
894 fs->cur_cyl = -1;
895 for (n = 0; n < 2 * HZ; ++n) {
896 if (n >= HZ/30 && swim3_readbit(fs, SEEK_COMPLETE))
897 break;
898 if (signal_pending(current)) {
899 err = -EINTR;
900 break;
901 }
902 swim3_select(fs, RELAX);
903 current->state = TASK_INTERRUPTIBLE;
904 schedule_timeout(1);
905 }
906 if (err == 0 && (swim3_readbit(fs, SEEK_COMPLETE) == 0
907 || swim3_readbit(fs, DISK_IN) == 0))
908 err = -ENXIO;
909 swim3_action(fs, SETMFM);
910 swim3_select(fs, RELAX);
911
912 } else if (fs->ref_count == -1 || filp->f_flags & O_EXCL)
913 return -EBUSY;
914
915 if (err == 0 && (filp->f_flags & O_NDELAY) == 0
916 && (filp->f_mode & 3)) {
917 check_disk_change(inode->i_bdev);
918 if (fs->ejected)
919 err = -ENXIO;
920 }
921
922 if (err == 0 && (filp->f_mode & 2)) {
923 if (fs->write_prot < 0)
924 fs->write_prot = swim3_readbit(fs, WRITE_PROT);
925 if (fs->write_prot)
926 err = -EROFS;
927 }
928
929 if (err) {
930 if (fs->ref_count == 0) {
931 swim3_action(fs, MOTOR_OFF);
932 out_8(&sw->control_bic, DRIVE_ENABLE | INTR_ENABLE);
933 swim3_select(fs, RELAX);
934 }
935 return err;
936 }
937
938 if (filp->f_flags & O_EXCL)
939 fs->ref_count = -1;
940 else
941 ++fs->ref_count;
942
943 return 0;
944}
945
946static int floppy_release(struct inode *inode, struct file *filp)
947{
948 struct floppy_state *fs = inode->i_bdev->bd_disk->private_data;
949 struct swim3 __iomem *sw = fs->swim3;
950 if (fs->ref_count > 0 && --fs->ref_count == 0) {
951 swim3_action(fs, MOTOR_OFF);
952 out_8(&sw->control_bic, 0xff);
953 swim3_select(fs, RELAX);
954 }
955 return 0;
956}
957
958static int floppy_check_change(struct gendisk *disk)
959{
960 struct floppy_state *fs = disk->private_data;
961 return fs->ejected;
962}
963
964static int floppy_revalidate(struct gendisk *disk)
965{
966 struct floppy_state *fs = disk->private_data;
967 struct swim3 __iomem *sw;
968 int ret, n;
969
970 if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD))
971 return -ENXIO;
972
973 sw = fs->swim3;
974 grab_drive(fs, revalidating, 0);
975 out_8(&sw->intr_enable, 0);
976 out_8(&sw->control_bis, DRIVE_ENABLE);
977 swim3_action(fs, MOTOR_ON); /* necessary? */
978 fs->write_prot = -1;
979 fs->cur_cyl = -1;
980 mdelay(1);
981 for (n = HZ; n > 0; --n) {
982 if (swim3_readbit(fs, SEEK_COMPLETE))
983 break;
984 if (signal_pending(current))
985 break;
986 swim3_select(fs, RELAX);
987 current->state = TASK_INTERRUPTIBLE;
988 schedule_timeout(1);
989 }
990 ret = swim3_readbit(fs, SEEK_COMPLETE) == 0
991 || swim3_readbit(fs, DISK_IN) == 0;
992 if (ret)
993 swim3_action(fs, MOTOR_OFF);
994 else {
995 fs->ejected = 0;
996 swim3_action(fs, SETMFM);
997 }
998 swim3_select(fs, RELAX);
999
1000 release_drive(fs);
1001 return ret;
1002}
1003
1004static struct block_device_operations floppy_fops = {
1005 .open = floppy_open,
1006 .release = floppy_release,
1007 .ioctl = floppy_ioctl,
1008 .media_changed = floppy_check_change,
1009 .revalidate_disk= floppy_revalidate,
1010};
1011
1012int swim3_init(void)
1013{
1014 struct device_node *swim;
1015 int err = -ENOMEM;
1016 int i;
1017
1018 devfs_mk_dir("floppy");
1019
1020 swim = find_devices("floppy");
1021 while (swim && (floppy_count < MAX_FLOPPIES))
1022 {
1023 swim3_add_device(swim);
1024 swim = swim->next;
1025 }
1026
1027 swim = find_devices("swim3");
1028 while (swim && (floppy_count < MAX_FLOPPIES))
1029 {
1030 swim3_add_device(swim);
1031 swim = swim->next;
1032 }
1033
1034 if (!floppy_count)
1035 return -ENODEV;
1036
1037 for (i = 0; i < floppy_count; i++) {
1038 disks[i] = alloc_disk(1);
1039 if (!disks[i])
1040 goto out;
1041 }
1042
1043 if (register_blkdev(FLOPPY_MAJOR, "fd")) {
1044 err = -EBUSY;
1045 goto out;
1046 }
1047
1048 swim3_queue = blk_init_queue(do_fd_request, &swim3_lock);
1049 if (!swim3_queue) {
1050 err = -ENOMEM;
1051 goto out_queue;
1052 }
1053
1054 for (i = 0; i < floppy_count; i++) {
1055 struct gendisk *disk = disks[i];
1056 disk->major = FLOPPY_MAJOR;
1057 disk->first_minor = i;
1058 disk->fops = &floppy_fops;
1059 disk->private_data = &floppy_states[i];
1060 disk->queue = swim3_queue;
1061 disk->flags |= GENHD_FL_REMOVABLE;
1062 sprintf(disk->disk_name, "fd%d", i);
1063 sprintf(disk->devfs_name, "floppy/%d", i);
1064 set_capacity(disk, 2880);
1065 add_disk(disk);
1066 }
1067 return 0;
1068
1069out_queue:
1070 unregister_blkdev(FLOPPY_MAJOR, "fd");
1071out:
1072 while (i--)
1073 put_disk(disks[i]);
1074 /* shouldn't we do something with results of swim_add_device()? */
1075 return err;
1076}
1077
1078static int swim3_add_device(struct device_node *swim)
1079{
1080 struct device_node *mediabay;
1081 struct floppy_state *fs = &floppy_states[floppy_count];
1082
1083 if (swim->n_addrs < 2)
1084 {
1085 printk(KERN_INFO "swim3: expecting 2 addrs (n_addrs:%d, n_intrs:%d)\n",
1086 swim->n_addrs, swim->n_intrs);
1087 return -EINVAL;
1088 }
1089
1090 if (swim->n_intrs < 2)
1091 {
1092 printk(KERN_INFO "swim3: expecting 2 intrs (n_addrs:%d, n_intrs:%d)\n",
1093 swim->n_addrs, swim->n_intrs);
1094 return -EINVAL;
1095 }
1096
1097 if (!request_OF_resource(swim, 0, NULL)) {
1098 printk(KERN_INFO "swim3: can't request IO resource !\n");
1099 return -EINVAL;
1100 }
1101
1102 mediabay = (strcasecmp(swim->parent->type, "media-bay") == 0) ? swim->parent : NULL;
1103 if (mediabay == NULL)
1104 pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
1105
1106 memset(fs, 0, sizeof(*fs));
1107 fs->state = idle;
1108 fs->swim3 = (struct swim3 __iomem *)
1109 ioremap(swim->addrs[0].address, 0x200);
1110 fs->dma = (struct dbdma_regs __iomem *)
1111 ioremap(swim->addrs[1].address, 0x200);
1112 fs->swim3_intr = swim->intrs[0].line;
1113 fs->dma_intr = swim->intrs[1].line;
1114 fs->cur_cyl = -1;
1115 fs->cur_sector = -1;
1116 fs->secpercyl = 36;
1117 fs->secpertrack = 18;
1118 fs->total_secs = 2880;
1119 fs->media_bay = mediabay;
1120 init_waitqueue_head(&fs->wait);
1121
1122 fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
1123 memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
1124 st_le16(&fs->dma_cmd[1].command, DBDMA_STOP);
1125
1126 if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) {
1127 printk(KERN_ERR "Couldn't get irq %d for SWIM3\n", fs->swim3_intr);
1128 pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
1129 return -EBUSY;
1130 }
1131/*
1132 if (request_irq(fs->dma_intr, fd_dma_interrupt, 0, "SWIM3-dma", fs)) {
1133 printk(KERN_ERR "Couldn't get irq %d for SWIM3 DMA",
1134 fs->dma_intr);
1135 pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
1136 return -EBUSY;
1137 }
1138*/
1139
1140 init_timer(&fs->timeout);
1141
1142 printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count,
1143 mediabay ? "in media bay" : "");
1144
1145 floppy_count++;
1146
1147 return 0;
1148}
1149
1150module_init(swim3_init)
1151
1152MODULE_LICENSE("GPL");
1153MODULE_AUTHOR("Paul Mackerras");
1154MODULE_ALIAS_BLOCKDEV_MAJOR(FLOPPY_MAJOR);
diff --git a/drivers/block/swim_iop.c b/drivers/block/swim_iop.c
new file mode 100644
index 000000000000..a1283f6dc018
--- /dev/null
+++ b/drivers/block/swim_iop.c
@@ -0,0 +1,579 @@
1/*
2 * Driver for the SWIM (Super Woz Integrated Machine) IOP
3 * floppy controller on the Macintosh IIfx and Quadra 900/950
4 *
5 * Written by Joshua M. Thompson (funaho@jurai.org)
6 * based on the SWIM3 driver (c) 1996 by Paul Mackerras.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * 1999-06-12 (jmt) - Initial implementation.
14 */
15
16/*
17 * -------------------
18 * Theory of Operation
19 * -------------------
20 *
21 * Since the SWIM IOP is message-driven we implement a simple request queue
22 * system. One outstanding request may be queued at any given time (this is
23 * an IOP limitation); only when that request has completed can a new request
24 * be sent.
25 */
26
27#include <linux/stddef.h>
28#include <linux/kernel.h>
29#include <linux/sched.h>
30#include <linux/timer.h>
31#include <linux/delay.h>
32#include <linux/fd.h>
33#include <linux/ioctl.h>
34#include <linux/blkdev.h>
35#include <asm/io.h>
36#include <asm/uaccess.h>
37#include <asm/mac_iop.h>
38#include <asm/swim_iop.h>
39
40#define DRIVER_VERSION "Version 0.1 (1999-06-12)"
41
42#define MAX_FLOPPIES 4
43
44enum swim_state {
45 idle,
46 available,
47 revalidating,
48 transferring,
49 ejecting
50};
51
52struct floppy_state {
53 enum swim_state state;
54 int drive_num; /* device number */
55 int secpercyl; /* disk geometry information */
56 int secpertrack;
57 int total_secs;
58 int write_prot; /* 1 if write-protected, 0 if not, -1 dunno */
59 int ref_count;
60 struct timer_list timeout;
61 int ejected;
62 struct wait_queue *wait;
63 int wanted;
64 int timeout_pending;
65};
66
67struct swim_iop_req {
68 int sent;
69 int complete;
70 __u8 command[32];
71 struct floppy_state *fs;
72 void (*done)(struct swim_iop_req *);
73};
74
75static struct swim_iop_req *current_req;
76static int floppy_count;
77
78static struct floppy_state floppy_states[MAX_FLOPPIES];
79static DEFINE_SPINLOCK(swim_iop_lock);
80
81#define CURRENT elv_next_request(swim_queue)
82
83static char *drive_names[7] = {
84 "not installed", /* DRV_NONE */
85 "unknown (1)", /* DRV_UNKNOWN */
86 "a 400K drive", /* DRV_400K */
87 "an 800K drive" /* DRV_800K */
88 "unknown (4)", /* ???? */
89 "an FDHD", /* DRV_FDHD */
90 "unknown (6)", /* ???? */
91 "an Apple HD20" /* DRV_HD20 */
92};
93
94int swimiop_init(void);
95static void swimiop_init_request(struct swim_iop_req *);
96static int swimiop_send_request(struct swim_iop_req *);
97static void swimiop_receive(struct iop_msg *, struct pt_regs *);
98static void swimiop_status_update(int, struct swim_drvstatus *);
99static int swimiop_eject(struct floppy_state *fs);
100
101static int floppy_ioctl(struct inode *inode, struct file *filp,
102 unsigned int cmd, unsigned long param);
103static int floppy_open(struct inode *inode, struct file *filp);
104static int floppy_release(struct inode *inode, struct file *filp);
105static int floppy_check_change(struct gendisk *disk);
106static int floppy_revalidate(struct gendisk *disk);
107static int grab_drive(struct floppy_state *fs, enum swim_state state,
108 int interruptible);
109static void release_drive(struct floppy_state *fs);
110static void set_timeout(struct floppy_state *fs, int nticks,
111 void (*proc)(unsigned long));
112static void fd_request_timeout(unsigned long);
113static void do_fd_request(request_queue_t * q);
114static void start_request(struct floppy_state *fs);
115
116static struct block_device_operations floppy_fops = {
117 .open = floppy_open,
118 .release = floppy_release,
119 .ioctl = floppy_ioctl,
120 .media_changed = floppy_check_change,
121 .revalidate_disk= floppy_revalidate,
122};
123
124static struct request_queue *swim_queue;
125/*
126 * SWIM IOP initialization
127 */
128
129int swimiop_init(void)
130{
131 volatile struct swim_iop_req req;
132 struct swimcmd_status *cmd = (struct swimcmd_status *) &req.command[0];
133 struct swim_drvstatus *ds = &cmd->status;
134 struct floppy_state *fs;
135 int i;
136
137 current_req = NULL;
138 floppy_count = 0;
139
140 if (!iop_ism_present)
141 return -ENODEV;
142
143 if (register_blkdev(FLOPPY_MAJOR, "fd"))
144 return -EBUSY;
145
146 swim_queue = blk_init_queue(do_fd_request, &swim_iop_lock);
147 if (!swim_queue) {
148 unregister_blkdev(FLOPPY_MAJOR, "fd");
149 return -ENOMEM;
150 }
151
152 printk("SWIM-IOP: %s by Joshua M. Thompson (funaho@jurai.org)\n",
153 DRIVER_VERSION);
154
155 if (iop_listen(SWIM_IOP, SWIM_CHAN, swimiop_receive, "SWIM") != 0) {
156 printk(KERN_ERR "SWIM-IOP: IOP channel already in use; can't initialize.\n");
157 unregister_blkdev(FLOPPY_MAJOR, "fd");
158 blk_cleanup_queue(swim_queue);
159 return -EBUSY;
160 }
161
162 printk(KERN_ERR "SWIM_IOP: probing for installed drives.\n");
163
164 for (i = 0 ; i < MAX_FLOPPIES ; i++) {
165 memset(&floppy_states[i], 0, sizeof(struct floppy_state));
166 fs = &floppy_states[floppy_count];
167
168 swimiop_init_request(&req);
169 cmd->code = CMD_STATUS;
170 cmd->drive_num = i + 1;
171 if (swimiop_send_request(&req) != 0) continue;
172 while (!req.complete);
173 if (cmd->error != 0) {
174 printk(KERN_ERR "SWIM-IOP: probe on drive %d returned error %d\n", i, (uint) cmd->error);
175 continue;
176 }
177 if (ds->installed != 0x01) continue;
178 printk("SWIM-IOP: drive %d is %s (%s, %s, %s, %s)\n", i,
179 drive_names[ds->info.type],
180 ds->info.external? "ext" : "int",
181 ds->info.scsi? "scsi" : "floppy",
182 ds->info.fixed? "fixed" : "removable",
183 ds->info.secondary? "secondary" : "primary");
184 swimiop_status_update(floppy_count, ds);
185 fs->state = idle;
186
187 init_timer(&fs->timeout);
188 floppy_count++;
189 }
190 printk("SWIM-IOP: detected %d installed drives.\n", floppy_count);
191
192 for (i = 0; i < floppy_count; i++) {
193 struct gendisk *disk = alloc_disk(1);
194 if (!disk)
195 continue;
196 disk->major = FLOPPY_MAJOR;
197 disk->first_minor = i;
198 disk->fops = &floppy_fops;
199 sprintf(disk->disk_name, "fd%d", i);
200 disk->private_data = &floppy_states[i];
201 disk->queue = swim_queue;
202 set_capacity(disk, 2880 * 2);
203 add_disk(disk);
204 }
205
206 return 0;
207}
208
209static void swimiop_init_request(struct swim_iop_req *req)
210{
211 req->sent = 0;
212 req->complete = 0;
213 req->done = NULL;
214}
215
216static int swimiop_send_request(struct swim_iop_req *req)
217{
218 unsigned long flags;
219 int err;
220
221 /* It's doubtful an interrupt routine would try to send */
222 /* a SWIM request, but I'd rather play it safe here. */
223
224 local_irq_save(flags);
225
226 if (current_req != NULL) {
227 local_irq_restore(flags);
228 return -ENOMEM;
229 }
230
231 current_req = req;
232
233 /* Interrupts should be back on for iop_send_message() */
234
235 local_irq_restore(flags);
236
237 err = iop_send_message(SWIM_IOP, SWIM_CHAN, (void *) req,
238 sizeof(req->command), (__u8 *) &req->command[0],
239 swimiop_receive);
240
241 /* No race condition here; we own current_req at this point */
242
243 if (err) {
244 current_req = NULL;
245 } else {
246 req->sent = 1;
247 }
248 return err;
249}
250
251/*
252 * Receive a SWIM message from the IOP.
253 *
254 * This will be called in two cases:
255 *
256 * 1. A message has been successfully sent to the IOP.
257 * 2. An unsolicited message was received from the IOP.
258 */
259
260void swimiop_receive(struct iop_msg *msg, struct pt_regs *regs)
261{
262 struct swim_iop_req *req;
263 struct swimmsg_status *sm;
264 struct swim_drvstatus *ds;
265
266 req = current_req;
267
268 switch(msg->status) {
269 case IOP_MSGSTATUS_COMPLETE:
270 memcpy(&req->command[0], &msg->reply[0], sizeof(req->command));
271 req->complete = 1;
272 if (req->done) (*req->done)(req);
273 current_req = NULL;
274 break;
275 case IOP_MSGSTATUS_UNSOL:
276 sm = (struct swimmsg_status *) &msg->message[0];
277 ds = &sm->status;
278 swimiop_status_update(sm->drive_num, ds);
279 iop_complete_message(msg);
280 break;
281 }
282}
283
284static void swimiop_status_update(int drive_num, struct swim_drvstatus *ds)
285{
286 struct floppy_state *fs = &floppy_states[drive_num];
287
288 fs->write_prot = (ds->write_prot == 0x80);
289 if ((ds->disk_in_drive != 0x01) && (ds->disk_in_drive != 0x02)) {
290 fs->ejected = 1;
291 } else {
292 fs->ejected = 0;
293 }
294 switch(ds->info.type) {
295 case DRV_400K:
296 fs->secpercyl = 10;
297 fs->secpertrack = 10;
298 fs->total_secs = 800;
299 break;
300 case DRV_800K:
301 fs->secpercyl = 20;
302 fs->secpertrack = 10;
303 fs->total_secs = 1600;
304 break;
305 case DRV_FDHD:
306 fs->secpercyl = 36;
307 fs->secpertrack = 18;
308 fs->total_secs = 2880;
309 break;
310 default:
311 fs->secpercyl = 0;
312 fs->secpertrack = 0;
313 fs->total_secs = 0;
314 break;
315 }
316}
317
318static int swimiop_eject(struct floppy_state *fs)
319{
320 int err, n;
321 struct swim_iop_req req;
322 struct swimcmd_eject *cmd = (struct swimcmd_eject *) &req.command[0];
323
324 err = grab_drive(fs, ejecting, 1);
325 if (err) return err;
326
327 swimiop_init_request(&req);
328 cmd->code = CMD_EJECT;
329 cmd->drive_num = fs->drive_num;
330 err = swimiop_send_request(&req);
331 if (err) {
332 release_drive(fs);
333 return err;
334 }
335 for (n = 2*HZ; n > 0; --n) {
336 if (req.complete) break;
337 if (signal_pending(current)) {
338 err = -EINTR;
339 break;
340 }
341 current->state = TASK_INTERRUPTIBLE;
342 schedule_timeout(1);
343 }
344 release_drive(fs);
345 return cmd->error;
346}
347
348static struct floppy_struct floppy_type =
349 { 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL }; /* 7 1.44MB 3.5" */
350
351static int floppy_ioctl(struct inode *inode, struct file *filp,
352 unsigned int cmd, unsigned long param)
353{
354 struct floppy_state *fs = inode->i_bdev->bd_disk->private_data;
355 int err;
356
357 if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN))
358 return -EPERM;
359
360 switch (cmd) {
361 case FDEJECT:
362 if (fs->ref_count != 1)
363 return -EBUSY;
364 err = swimiop_eject(fs);
365 return err;
366 case FDGETPRM:
367 if (copy_to_user((void *) param, (void *) &floppy_type,
368 sizeof(struct floppy_struct)))
369 return -EFAULT;
370 return 0;
371 }
372 return -ENOTTY;
373}
374
375static int floppy_open(struct inode *inode, struct file *filp)
376{
377 struct floppy_state *fs = inode->i_bdev->bd_disk->private_data;
378
379 if (fs->ref_count == -1 || filp->f_flags & O_EXCL)
380 return -EBUSY;
381
382 if ((filp->f_flags & O_NDELAY) == 0 && (filp->f_mode & 3)) {
383 check_disk_change(inode->i_bdev);
384 if (fs->ejected)
385 return -ENXIO;
386 }
387
388 if ((filp->f_mode & 2) && fs->write_prot)
389 return -EROFS;
390
391 if (filp->f_flags & O_EXCL)
392 fs->ref_count = -1;
393 else
394 ++fs->ref_count;
395
396 return 0;
397}
398
399static int floppy_release(struct inode *inode, struct file *filp)
400{
401 struct floppy_state *fs = inode->i_bdev->bd_disk->private_data;
402 if (fs->ref_count > 0)
403 fs->ref_count--;
404 return 0;
405}
406
407static int floppy_check_change(struct gendisk *disk)
408{
409 struct floppy_state *fs = disk->private_data;
410 return fs->ejected;
411}
412
413static int floppy_revalidate(struct gendisk *disk)
414{
415 struct floppy_state *fs = disk->private_data;
416 grab_drive(fs, revalidating, 0);
417 /* yadda, yadda */
418 release_drive(fs);
419 return 0;
420}
421
422static void floppy_off(unsigned int nr)
423{
424}
425
426static int grab_drive(struct floppy_state *fs, enum swim_state state,
427 int interruptible)
428{
429 unsigned long flags;
430
431 local_irq_save(flags);
432 if (fs->state != idle) {
433 ++fs->wanted;
434 while (fs->state != available) {
435 if (interruptible && signal_pending(current)) {
436 --fs->wanted;
437 local_irq_restore(flags);
438 return -EINTR;
439 }
440 interruptible_sleep_on(&fs->wait);
441 }
442 --fs->wanted;
443 }
444 fs->state = state;
445 local_irq_restore(flags);
446 return 0;
447}
448
449static void release_drive(struct floppy_state *fs)
450{
451 unsigned long flags;
452
453 local_irq_save(flags);
454 fs->state = idle;
455 start_request(fs);
456 local_irq_restore(flags);
457}
458
459static void set_timeout(struct floppy_state *fs, int nticks,
460 void (*proc)(unsigned long))
461{
462 unsigned long flags;
463
464 local_irq_save(flags);
465 if (fs->timeout_pending)
466 del_timer(&fs->timeout);
467 init_timer(&fs->timeout);
468 fs->timeout.expires = jiffies + nticks;
469 fs->timeout.function = proc;
470 fs->timeout.data = (unsigned long) fs;
471 add_timer(&fs->timeout);
472 fs->timeout_pending = 1;
473 local_irq_restore(flags);
474}
475
476static void do_fd_request(request_queue_t * q)
477{
478 int i;
479
480 for (i = 0 ; i < floppy_count ; i++) {
481 start_request(&floppy_states[i]);
482 }
483}
484
485static void fd_request_complete(struct swim_iop_req *req)
486{
487 struct floppy_state *fs = req->fs;
488 struct swimcmd_rw *cmd = (struct swimcmd_rw *) &req->command[0];
489
490 del_timer(&fs->timeout);
491 fs->timeout_pending = 0;
492 fs->state = idle;
493 if (cmd->error) {
494 printk(KERN_ERR "SWIM-IOP: error %d on read/write request.\n", cmd->error);
495 end_request(CURRENT, 0);
496 } else {
497 CURRENT->sector += cmd->num_blocks;
498 CURRENT->current_nr_sectors -= cmd->num_blocks;
499 if (CURRENT->current_nr_sectors <= 0) {
500 end_request(CURRENT, 1);
501 return;
502 }
503 }
504 start_request(fs);
505}
506
507static void fd_request_timeout(unsigned long data)
508{
509 struct floppy_state *fs = (struct floppy_state *) data;
510
511 fs->timeout_pending = 0;
512 end_request(CURRENT, 0);
513 fs->state = idle;
514}
515
516static void start_request(struct floppy_state *fs)
517{
518 volatile struct swim_iop_req req;
519 struct swimcmd_rw *cmd = (struct swimcmd_rw *) &req.command[0];
520
521 if (fs->state == idle && fs->wanted) {
522 fs->state = available;
523 wake_up(&fs->wait);
524 return;
525 }
526 while (CURRENT && fs->state == idle) {
527 if (CURRENT->bh && !buffer_locked(CURRENT->bh))
528 panic("floppy: block not locked");
529#if 0
530 printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%ld buf=%p\n",
531 CURRENT->rq_disk->disk_name, CURRENT->cmd,
532 CURRENT->sector, CURRENT->nr_sectors, CURRENT->buffer);
533 printk(" rq_status=%d errors=%d current_nr_sectors=%ld\n",
534 CURRENT->rq_status, CURRENT->errors, CURRENT->current_nr_sectors);
535#endif
536
537 if (CURRENT->sector < 0 || CURRENT->sector >= fs->total_secs) {
538 end_request(CURRENT, 0);
539 continue;
540 }
541 if (CURRENT->current_nr_sectors == 0) {
542 end_request(CURRENT, 1);
543 continue;
544 }
545 if (fs->ejected) {
546 end_request(CURRENT, 0);
547 continue;
548 }
549
550 swimiop_init_request(&req);
551 req.fs = fs;
552 req.done = fd_request_complete;
553
554 if (CURRENT->cmd == WRITE) {
555 if (fs->write_prot) {
556 end_request(CURRENT, 0);
557 continue;
558 }
559 cmd->code = CMD_WRITE;
560 } else {
561 cmd->code = CMD_READ;
562
563 }
564 cmd->drive_num = fs->drive_num;
565 cmd->buffer = CURRENT->buffer;
566 cmd->first_block = CURRENT->sector;
567 cmd->num_blocks = CURRENT->current_nr_sectors;
568
569 if (swimiop_send_request(&req)) {
570 end_request(CURRENT, 0);
571 continue;
572 }
573
574 set_timeout(fs, HZ*CURRENT->current_nr_sectors,
575 fd_request_timeout);
576
577 fs->state = transferring;
578 }
579}
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
new file mode 100644
index 000000000000..797f5988c2b5
--- /dev/null
+++ b/drivers/block/sx8.c
@@ -0,0 +1,1764 @@
1/*
2 * sx8.c: Driver for Promise SATA SX8 looks-like-I2O hardware
3 *
4 * Copyright 2004 Red Hat, Inc.
5 *
6 * Author/maintainer: Jeff Garzik <jgarzik@pobox.com>
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/pci.h>
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19#include <linux/blkdev.h>
20#include <linux/sched.h>
21#include <linux/devfs_fs_kernel.h>
22#include <linux/interrupt.h>
23#include <linux/compiler.h>
24#include <linux/workqueue.h>
25#include <linux/bitops.h>
26#include <linux/delay.h>
27#include <linux/time.h>
28#include <linux/hdreg.h>
29#include <asm/io.h>
30#include <asm/semaphore.h>
31#include <asm/uaccess.h>
32
33MODULE_AUTHOR("Jeff Garzik");
34MODULE_LICENSE("GPL");
35MODULE_DESCRIPTION("Promise SATA SX8 block driver");
36
37#if 0
38#define CARM_DEBUG
39#define CARM_VERBOSE_DEBUG
40#else
41#undef CARM_DEBUG
42#undef CARM_VERBOSE_DEBUG
43#endif
44#undef CARM_NDEBUG
45
46#define DRV_NAME "sx8"
47#define DRV_VERSION "0.8"
48#define PFX DRV_NAME ": "
49
50#define NEXT_RESP(idx) ((idx + 1) % RMSG_Q_LEN)
51
52/* 0xf is just arbitrary, non-zero noise; this is sorta like poisoning */
53#define TAG_ENCODE(tag) (((tag) << 16) | 0xf)
54#define TAG_DECODE(tag) (((tag) >> 16) & 0x1f)
55#define TAG_VALID(tag) ((((tag) & 0xf) == 0xf) && (TAG_DECODE(tag) < 32))
56
57/* note: prints function name for you */
58#ifdef CARM_DEBUG
59#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
60#ifdef CARM_VERBOSE_DEBUG
61#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
62#else
63#define VPRINTK(fmt, args...)
64#endif /* CARM_VERBOSE_DEBUG */
65#else
66#define DPRINTK(fmt, args...)
67#define VPRINTK(fmt, args...)
68#endif /* CARM_DEBUG */
69
70#ifdef CARM_NDEBUG
71#define assert(expr)
72#else
73#define assert(expr) \
74 if(unlikely(!(expr))) { \
75 printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
76 #expr,__FILE__,__FUNCTION__,__LINE__); \
77 }
78#endif
79
80/* defines only for the constants which don't work well as enums */
81struct carm_host;
82
83enum {
84 /* adapter-wide limits */
85 CARM_MAX_PORTS = 8,
86 CARM_SHM_SIZE = (4096 << 7),
87 CARM_MINORS_PER_MAJOR = 256 / CARM_MAX_PORTS,
88 CARM_MAX_WAIT_Q = CARM_MAX_PORTS + 1,
89
90 /* command message queue limits */
91 CARM_MAX_REQ = 64, /* max command msgs per host */
92 CARM_MAX_Q = 1, /* one command at a time */
93 CARM_MSG_LOW_WATER = (CARM_MAX_REQ / 4), /* refill mark */
94
95 /* S/G limits, host-wide and per-request */
96 CARM_MAX_REQ_SG = 32, /* max s/g entries per request */
97 CARM_SG_BOUNDARY = 0xffffUL, /* s/g segment boundary */
98 CARM_MAX_HOST_SG = 600, /* max s/g entries per host */
99 CARM_SG_LOW_WATER = (CARM_MAX_HOST_SG / 4), /* re-fill mark */
100
101 /* hardware registers */
102 CARM_IHQP = 0x1c,
103 CARM_INT_STAT = 0x10, /* interrupt status */
104 CARM_INT_MASK = 0x14, /* interrupt mask */
105 CARM_HMUC = 0x18, /* host message unit control */
106 RBUF_ADDR_LO = 0x20, /* response msg DMA buf low 32 bits */
107 RBUF_ADDR_HI = 0x24, /* response msg DMA buf high 32 bits */
108 RBUF_BYTE_SZ = 0x28,
109 CARM_RESP_IDX = 0x2c,
110 CARM_CMS0 = 0x30, /* command message size reg 0 */
111 CARM_LMUC = 0x48,
112 CARM_HMPHA = 0x6c,
113 CARM_INITC = 0xb5,
114
115 /* bits in CARM_INT_{STAT,MASK} */
116 INT_RESERVED = 0xfffffff0,
117 INT_WATCHDOG = (1 << 3), /* watchdog timer */
118 INT_Q_OVERFLOW = (1 << 2), /* cmd msg q overflow */
119 INT_Q_AVAILABLE = (1 << 1), /* cmd msg q has free space */
120 INT_RESPONSE = (1 << 0), /* response msg available */
121 INT_ACK_MASK = INT_WATCHDOG | INT_Q_OVERFLOW,
122 INT_DEF_MASK = INT_RESERVED | INT_Q_OVERFLOW |
123 INT_RESPONSE,
124
125 /* command messages, and related register bits */
126 CARM_HAVE_RESP = 0x01,
127 CARM_MSG_READ = 1,
128 CARM_MSG_WRITE = 2,
129 CARM_MSG_VERIFY = 3,
130 CARM_MSG_GET_CAPACITY = 4,
131 CARM_MSG_FLUSH = 5,
132 CARM_MSG_IOCTL = 6,
133 CARM_MSG_ARRAY = 8,
134 CARM_MSG_MISC = 9,
135 CARM_CME = (1 << 2),
136 CARM_RME = (1 << 1),
137 CARM_WZBC = (1 << 0),
138 CARM_RMI = (1 << 0),
139 CARM_Q_FULL = (1 << 3),
140 CARM_MSG_SIZE = 288,
141 CARM_Q_LEN = 48,
142
143 /* CARM_MSG_IOCTL messages */
144 CARM_IOC_SCAN_CHAN = 5, /* scan channels for devices */
145 CARM_IOC_GET_TCQ = 13, /* get tcq/ncq depth */
146 CARM_IOC_SET_TCQ = 14, /* set tcq/ncq depth */
147
148 IOC_SCAN_CHAN_NODEV = 0x1f,
149 IOC_SCAN_CHAN_OFFSET = 0x40,
150
151 /* CARM_MSG_ARRAY messages */
152 CARM_ARRAY_INFO = 0,
153
154 ARRAY_NO_EXIST = (1 << 31),
155
156 /* response messages */
157 RMSG_SZ = 8, /* sizeof(struct carm_response) */
158 RMSG_Q_LEN = 48, /* resp. msg list length */
159 RMSG_OK = 1, /* bit indicating msg was successful */
160 /* length of entire resp. msg buffer */
161 RBUF_LEN = RMSG_SZ * RMSG_Q_LEN,
162
163 PDC_SHM_SIZE = (4096 << 7), /* length of entire h/w buffer */
164
165 /* CARM_MSG_MISC messages */
166 MISC_GET_FW_VER = 2,
167 MISC_ALLOC_MEM = 3,
168 MISC_SET_TIME = 5,
169
170 /* MISC_GET_FW_VER feature bits */
171 FW_VER_4PORT = (1 << 2), /* 1=4 ports, 0=8 ports */
172 FW_VER_NON_RAID = (1 << 1), /* 1=non-RAID firmware, 0=RAID */
173 FW_VER_ZCR = (1 << 0), /* zero channel RAID (whatever that is) */
174
175 /* carm_host flags */
176 FL_NON_RAID = FW_VER_NON_RAID,
177 FL_4PORT = FW_VER_4PORT,
178 FL_FW_VER_MASK = (FW_VER_NON_RAID | FW_VER_4PORT),
179 FL_DAC = (1 << 16),
180 FL_DYN_MAJOR = (1 << 17),
181};
182
183enum scatter_gather_types {
184 SGT_32BIT = 0,
185 SGT_64BIT = 1,
186};
187
188enum host_states {
189 HST_INVALID, /* invalid state; never used */
190 HST_ALLOC_BUF, /* setting up master SHM area */
191 HST_ERROR, /* we never leave here */
192 HST_PORT_SCAN, /* start dev scan */
193 HST_DEV_SCAN_START, /* start per-device probe */
194 HST_DEV_SCAN, /* continue per-device probe */
195 HST_DEV_ACTIVATE, /* activate devices we found */
196 HST_PROBE_FINISHED, /* probe is complete */
197 HST_PROBE_START, /* initiate probe */
198 HST_SYNC_TIME, /* tell firmware what time it is */
199 HST_GET_FW_VER, /* get firmware version, adapter port cnt */
200};
201
202#ifdef CARM_DEBUG
203static const char *state_name[] = {
204 "HST_INVALID",
205 "HST_ALLOC_BUF",
206 "HST_ERROR",
207 "HST_PORT_SCAN",
208 "HST_DEV_SCAN_START",
209 "HST_DEV_SCAN",
210 "HST_DEV_ACTIVATE",
211 "HST_PROBE_FINISHED",
212 "HST_PROBE_START",
213 "HST_SYNC_TIME",
214 "HST_GET_FW_VER",
215};
216#endif
217
218struct carm_port {
219 unsigned int port_no;
220 unsigned int n_queued;
221 struct gendisk *disk;
222 struct carm_host *host;
223
224 /* attached device characteristics */
225 u64 capacity;
226 char name[41];
227 u16 dev_geom_head;
228 u16 dev_geom_sect;
229 u16 dev_geom_cyl;
230};
231
232struct carm_request {
233 unsigned int tag;
234 int n_elem;
235 unsigned int msg_type;
236 unsigned int msg_subtype;
237 unsigned int msg_bucket;
238 struct request *rq;
239 struct carm_port *port;
240 struct scatterlist sg[CARM_MAX_REQ_SG];
241};
242
243struct carm_host {
244 unsigned long flags;
245 void __iomem *mmio;
246 void *shm;
247 dma_addr_t shm_dma;
248
249 int major;
250 int id;
251 char name[32];
252
253 spinlock_t lock;
254 struct pci_dev *pdev;
255 unsigned int state;
256 u32 fw_ver;
257
258 request_queue_t *oob_q;
259 unsigned int n_oob;
260
261 unsigned int hw_sg_used;
262
263 unsigned int resp_idx;
264
265 unsigned int wait_q_prod;
266 unsigned int wait_q_cons;
267 request_queue_t *wait_q[CARM_MAX_WAIT_Q];
268
269 unsigned int n_msgs;
270 u64 msg_alloc;
271 struct carm_request req[CARM_MAX_REQ];
272 void *msg_base;
273 dma_addr_t msg_dma;
274
275 int cur_scan_dev;
276 unsigned long dev_active;
277 unsigned long dev_present;
278 struct carm_port port[CARM_MAX_PORTS];
279
280 struct work_struct fsm_task;
281
282 struct semaphore probe_sem;
283};
284
285struct carm_response {
286 __le32 ret_handle;
287 __le32 status;
288} __attribute__((packed));
289
290struct carm_msg_sg {
291 __le32 start;
292 __le32 len;
293} __attribute__((packed));
294
295struct carm_msg_rw {
296 u8 type;
297 u8 id;
298 u8 sg_count;
299 u8 sg_type;
300 __le32 handle;
301 __le32 lba;
302 __le16 lba_count;
303 __le16 lba_high;
304 struct carm_msg_sg sg[32];
305} __attribute__((packed));
306
307struct carm_msg_allocbuf {
308 u8 type;
309 u8 subtype;
310 u8 n_sg;
311 u8 sg_type;
312 __le32 handle;
313 __le32 addr;
314 __le32 len;
315 __le32 evt_pool;
316 __le32 n_evt;
317 __le32 rbuf_pool;
318 __le32 n_rbuf;
319 __le32 msg_pool;
320 __le32 n_msg;
321 struct carm_msg_sg sg[8];
322} __attribute__((packed));
323
324struct carm_msg_ioctl {
325 u8 type;
326 u8 subtype;
327 u8 array_id;
328 u8 reserved1;
329 __le32 handle;
330 __le32 data_addr;
331 u32 reserved2;
332} __attribute__((packed));
333
334struct carm_msg_sync_time {
335 u8 type;
336 u8 subtype;
337 u16 reserved1;
338 __le32 handle;
339 u32 reserved2;
340 __le32 timestamp;
341} __attribute__((packed));
342
343struct carm_msg_get_fw_ver {
344 u8 type;
345 u8 subtype;
346 u16 reserved1;
347 __le32 handle;
348 __le32 data_addr;
349 u32 reserved2;
350} __attribute__((packed));
351
352struct carm_fw_ver {
353 __le32 version;
354 u8 features;
355 u8 reserved1;
356 u16 reserved2;
357} __attribute__((packed));
358
359struct carm_array_info {
360 __le32 size;
361
362 __le16 size_hi;
363 __le16 stripe_size;
364
365 __le32 mode;
366
367 __le16 stripe_blk_sz;
368 __le16 reserved1;
369
370 __le16 cyl;
371 __le16 head;
372
373 __le16 sect;
374 u8 array_id;
375 u8 reserved2;
376
377 char name[40];
378
379 __le32 array_status;
380
381 /* device list continues beyond this point? */
382} __attribute__((packed));
383
384static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
385static void carm_remove_one (struct pci_dev *pdev);
386static int carm_bdev_ioctl(struct inode *ino, struct file *fil,
387 unsigned int cmd, unsigned long arg);
388
389static struct pci_device_id carm_pci_tbl[] = {
390 { PCI_VENDOR_ID_PROMISE, 0x8000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
391 { PCI_VENDOR_ID_PROMISE, 0x8002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
392 { } /* terminate list */
393};
394MODULE_DEVICE_TABLE(pci, carm_pci_tbl);
395
396static struct pci_driver carm_driver = {
397 .name = DRV_NAME,
398 .id_table = carm_pci_tbl,
399 .probe = carm_init_one,
400 .remove = carm_remove_one,
401};
402
403static struct block_device_operations carm_bd_ops = {
404 .owner = THIS_MODULE,
405 .ioctl = carm_bdev_ioctl,
406};
407
408static unsigned int carm_host_id;
409static unsigned long carm_major_alloc;
410
411
412
413static int carm_bdev_ioctl(struct inode *ino, struct file *fil,
414 unsigned int cmd, unsigned long arg)
415{
416 void __user *usermem = (void __user *) arg;
417 struct carm_port *port = ino->i_bdev->bd_disk->private_data;
418 struct hd_geometry geom;
419
420 switch (cmd) {
421 case HDIO_GETGEO:
422 if (!usermem)
423 return -EINVAL;
424
425 geom.heads = (u8) port->dev_geom_head;
426 geom.sectors = (u8) port->dev_geom_sect;
427 geom.cylinders = port->dev_geom_cyl;
428 geom.start = get_start_sect(ino->i_bdev);
429
430 if (copy_to_user(usermem, &geom, sizeof(geom)))
431 return -EFAULT;
432 return 0;
433
434 default:
435 break;
436 }
437
438 return -EOPNOTSUPP;
439}
440
441static const u32 msg_sizes[] = { 32, 64, 128, CARM_MSG_SIZE };
442
443static inline int carm_lookup_bucket(u32 msg_size)
444{
445 int i;
446
447 for (i = 0; i < ARRAY_SIZE(msg_sizes); i++)
448 if (msg_size <= msg_sizes[i])
449 return i;
450
451 return -ENOENT;
452}
453
454static void carm_init_buckets(void __iomem *mmio)
455{
456 unsigned int i;
457
458 for (i = 0; i < ARRAY_SIZE(msg_sizes); i++)
459 writel(msg_sizes[i], mmio + CARM_CMS0 + (4 * i));
460}
461
462static inline void *carm_ref_msg(struct carm_host *host,
463 unsigned int msg_idx)
464{
465 return host->msg_base + (msg_idx * CARM_MSG_SIZE);
466}
467
468static inline dma_addr_t carm_ref_msg_dma(struct carm_host *host,
469 unsigned int msg_idx)
470{
471 return host->msg_dma + (msg_idx * CARM_MSG_SIZE);
472}
473
474static int carm_send_msg(struct carm_host *host,
475 struct carm_request *crq)
476{
477 void __iomem *mmio = host->mmio;
478 u32 msg = (u32) carm_ref_msg_dma(host, crq->tag);
479 u32 cm_bucket = crq->msg_bucket;
480 u32 tmp;
481 int rc = 0;
482
483 VPRINTK("ENTER\n");
484
485 tmp = readl(mmio + CARM_HMUC);
486 if (tmp & CARM_Q_FULL) {
487#if 0
488 tmp = readl(mmio + CARM_INT_MASK);
489 tmp |= INT_Q_AVAILABLE;
490 writel(tmp, mmio + CARM_INT_MASK);
491 readl(mmio + CARM_INT_MASK); /* flush */
492#endif
493 DPRINTK("host msg queue full\n");
494 rc = -EBUSY;
495 } else {
496 writel(msg | (cm_bucket << 1), mmio + CARM_IHQP);
497 readl(mmio + CARM_IHQP); /* flush */
498 }
499
500 return rc;
501}
502
503static struct carm_request *carm_get_request(struct carm_host *host)
504{
505 unsigned int i;
506
507 /* obey global hardware limit on S/G entries */
508 if (host->hw_sg_used >= (CARM_MAX_HOST_SG - CARM_MAX_REQ_SG))
509 return NULL;
510
511 for (i = 0; i < CARM_MAX_Q; i++)
512 if ((host->msg_alloc & (1ULL << i)) == 0) {
513 struct carm_request *crq = &host->req[i];
514 crq->port = NULL;
515 crq->n_elem = 0;
516
517 host->msg_alloc |= (1ULL << i);
518 host->n_msgs++;
519
520 assert(host->n_msgs <= CARM_MAX_REQ);
521 return crq;
522 }
523
524 DPRINTK("no request available, returning NULL\n");
525 return NULL;
526}
527
528static int carm_put_request(struct carm_host *host, struct carm_request *crq)
529{
530 assert(crq->tag < CARM_MAX_Q);
531
532 if (unlikely((host->msg_alloc & (1ULL << crq->tag)) == 0))
533 return -EINVAL; /* tried to clear a tag that was not active */
534
535 assert(host->hw_sg_used >= crq->n_elem);
536
537 host->msg_alloc &= ~(1ULL << crq->tag);
538 host->hw_sg_used -= crq->n_elem;
539 host->n_msgs--;
540
541 return 0;
542}
543
544static struct carm_request *carm_get_special(struct carm_host *host)
545{
546 unsigned long flags;
547 struct carm_request *crq = NULL;
548 struct request *rq;
549 int tries = 5000;
550
551 while (tries-- > 0) {
552 spin_lock_irqsave(&host->lock, flags);
553 crq = carm_get_request(host);
554 spin_unlock_irqrestore(&host->lock, flags);
555
556 if (crq)
557 break;
558 msleep(10);
559 }
560
561 if (!crq)
562 return NULL;
563
564 rq = blk_get_request(host->oob_q, WRITE /* bogus */, GFP_KERNEL);
565 if (!rq) {
566 spin_lock_irqsave(&host->lock, flags);
567 carm_put_request(host, crq);
568 spin_unlock_irqrestore(&host->lock, flags);
569 return NULL;
570 }
571
572 crq->rq = rq;
573 return crq;
574}
575
576static int carm_array_info (struct carm_host *host, unsigned int array_idx)
577{
578 struct carm_msg_ioctl *ioc;
579 unsigned int idx;
580 u32 msg_data;
581 dma_addr_t msg_dma;
582 struct carm_request *crq;
583 int rc;
584
585 crq = carm_get_special(host);
586 if (!crq) {
587 rc = -ENOMEM;
588 goto err_out;
589 }
590
591 idx = crq->tag;
592
593 ioc = carm_ref_msg(host, idx);
594 msg_dma = carm_ref_msg_dma(host, idx);
595 msg_data = (u32) (msg_dma + sizeof(struct carm_array_info));
596
597 crq->msg_type = CARM_MSG_ARRAY;
598 crq->msg_subtype = CARM_ARRAY_INFO;
599 rc = carm_lookup_bucket(sizeof(struct carm_msg_ioctl) +
600 sizeof(struct carm_array_info));
601 BUG_ON(rc < 0);
602 crq->msg_bucket = (u32) rc;
603
604 memset(ioc, 0, sizeof(*ioc));
605 ioc->type = CARM_MSG_ARRAY;
606 ioc->subtype = CARM_ARRAY_INFO;
607 ioc->array_id = (u8) array_idx;
608 ioc->handle = cpu_to_le32(TAG_ENCODE(idx));
609 ioc->data_addr = cpu_to_le32(msg_data);
610
611 spin_lock_irq(&host->lock);
612 assert(host->state == HST_DEV_SCAN_START ||
613 host->state == HST_DEV_SCAN);
614 spin_unlock_irq(&host->lock);
615
616 DPRINTK("blk_insert_request, tag == %u\n", idx);
617 blk_insert_request(host->oob_q, crq->rq, 1, crq, 0);
618
619 return 0;
620
621err_out:
622 spin_lock_irq(&host->lock);
623 host->state = HST_ERROR;
624 spin_unlock_irq(&host->lock);
625 return rc;
626}
627
628typedef unsigned int (*carm_sspc_t)(struct carm_host *, unsigned int, void *);
629
630static int carm_send_special (struct carm_host *host, carm_sspc_t func)
631{
632 struct carm_request *crq;
633 struct carm_msg_ioctl *ioc;
634 void *mem;
635 unsigned int idx, msg_size;
636 int rc;
637
638 crq = carm_get_special(host);
639 if (!crq)
640 return -ENOMEM;
641
642 idx = crq->tag;
643
644 mem = carm_ref_msg(host, idx);
645
646 msg_size = func(host, idx, mem);
647
648 ioc = mem;
649 crq->msg_type = ioc->type;
650 crq->msg_subtype = ioc->subtype;
651 rc = carm_lookup_bucket(msg_size);
652 BUG_ON(rc < 0);
653 crq->msg_bucket = (u32) rc;
654
655 DPRINTK("blk_insert_request, tag == %u\n", idx);
656 blk_insert_request(host->oob_q, crq->rq, 1, crq, 0);
657
658 return 0;
659}
660
661static unsigned int carm_fill_sync_time(struct carm_host *host,
662 unsigned int idx, void *mem)
663{
664 struct timeval tv;
665 struct carm_msg_sync_time *st = mem;
666
667 do_gettimeofday(&tv);
668
669 memset(st, 0, sizeof(*st));
670 st->type = CARM_MSG_MISC;
671 st->subtype = MISC_SET_TIME;
672 st->handle = cpu_to_le32(TAG_ENCODE(idx));
673 st->timestamp = cpu_to_le32(tv.tv_sec);
674
675 return sizeof(struct carm_msg_sync_time);
676}
677
678static unsigned int carm_fill_alloc_buf(struct carm_host *host,
679 unsigned int idx, void *mem)
680{
681 struct carm_msg_allocbuf *ab = mem;
682
683 memset(ab, 0, sizeof(*ab));
684 ab->type = CARM_MSG_MISC;
685 ab->subtype = MISC_ALLOC_MEM;
686 ab->handle = cpu_to_le32(TAG_ENCODE(idx));
687 ab->n_sg = 1;
688 ab->sg_type = SGT_32BIT;
689 ab->addr = cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1));
690 ab->len = cpu_to_le32(PDC_SHM_SIZE >> 1);
691 ab->evt_pool = cpu_to_le32(host->shm_dma + (16 * 1024));
692 ab->n_evt = cpu_to_le32(1024);
693 ab->rbuf_pool = cpu_to_le32(host->shm_dma);
694 ab->n_rbuf = cpu_to_le32(RMSG_Q_LEN);
695 ab->msg_pool = cpu_to_le32(host->shm_dma + RBUF_LEN);
696 ab->n_msg = cpu_to_le32(CARM_Q_LEN);
697 ab->sg[0].start = cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1));
698 ab->sg[0].len = cpu_to_le32(65536);
699
700 return sizeof(struct carm_msg_allocbuf);
701}
702
703static unsigned int carm_fill_scan_channels(struct carm_host *host,
704 unsigned int idx, void *mem)
705{
706 struct carm_msg_ioctl *ioc = mem;
707 u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) +
708 IOC_SCAN_CHAN_OFFSET);
709
710 memset(ioc, 0, sizeof(*ioc));
711 ioc->type = CARM_MSG_IOCTL;
712 ioc->subtype = CARM_IOC_SCAN_CHAN;
713 ioc->handle = cpu_to_le32(TAG_ENCODE(idx));
714 ioc->data_addr = cpu_to_le32(msg_data);
715
716 /* fill output data area with "no device" default values */
717 mem += IOC_SCAN_CHAN_OFFSET;
718 memset(mem, IOC_SCAN_CHAN_NODEV, CARM_MAX_PORTS);
719
720 return IOC_SCAN_CHAN_OFFSET + CARM_MAX_PORTS;
721}
722
723static unsigned int carm_fill_get_fw_ver(struct carm_host *host,
724 unsigned int idx, void *mem)
725{
726 struct carm_msg_get_fw_ver *ioc = mem;
727 u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) + sizeof(*ioc));
728
729 memset(ioc, 0, sizeof(*ioc));
730 ioc->type = CARM_MSG_MISC;
731 ioc->subtype = MISC_GET_FW_VER;
732 ioc->handle = cpu_to_le32(TAG_ENCODE(idx));
733 ioc->data_addr = cpu_to_le32(msg_data);
734
735 return sizeof(struct carm_msg_get_fw_ver) +
736 sizeof(struct carm_fw_ver);
737}
738
739static inline void carm_end_request_queued(struct carm_host *host,
740 struct carm_request *crq,
741 int uptodate)
742{
743 struct request *req = crq->rq;
744 int rc;
745
746 rc = end_that_request_first(req, uptodate, req->hard_nr_sectors);
747 assert(rc == 0);
748
749 end_that_request_last(req);
750
751 rc = carm_put_request(host, crq);
752 assert(rc == 0);
753}
754
755static inline void carm_push_q (struct carm_host *host, request_queue_t *q)
756{
757 unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q;
758
759 blk_stop_queue(q);
760 VPRINTK("STOPPED QUEUE %p\n", q);
761
762 host->wait_q[idx] = q;
763 host->wait_q_prod++;
764 BUG_ON(host->wait_q_prod == host->wait_q_cons); /* overrun */
765}
766
767static inline request_queue_t *carm_pop_q(struct carm_host *host)
768{
769 unsigned int idx;
770
771 if (host->wait_q_prod == host->wait_q_cons)
772 return NULL;
773
774 idx = host->wait_q_cons % CARM_MAX_WAIT_Q;
775 host->wait_q_cons++;
776
777 return host->wait_q[idx];
778}
779
780static inline void carm_round_robin(struct carm_host *host)
781{
782 request_queue_t *q = carm_pop_q(host);
783 if (q) {
784 blk_start_queue(q);
785 VPRINTK("STARTED QUEUE %p\n", q);
786 }
787}
788
789static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq,
790 int is_ok)
791{
792 carm_end_request_queued(host, crq, is_ok);
793 if (CARM_MAX_Q == 1)
794 carm_round_robin(host);
795 else if ((host->n_msgs <= CARM_MSG_LOW_WATER) &&
796 (host->hw_sg_used <= CARM_SG_LOW_WATER)) {
797 carm_round_robin(host);
798 }
799}
800
801static void carm_oob_rq_fn(request_queue_t *q)
802{
803 struct carm_host *host = q->queuedata;
804 struct carm_request *crq;
805 struct request *rq;
806 int rc;
807
808 while (1) {
809 DPRINTK("get req\n");
810 rq = elv_next_request(q);
811 if (!rq)
812 break;
813
814 blkdev_dequeue_request(rq);
815
816 crq = rq->special;
817 assert(crq != NULL);
818 assert(crq->rq == rq);
819
820 crq->n_elem = 0;
821
822 DPRINTK("send req\n");
823 rc = carm_send_msg(host, crq);
824 if (rc) {
825 blk_requeue_request(q, rq);
826 carm_push_q(host, q);
827 return; /* call us again later, eventually */
828 }
829 }
830}
831
832static void carm_rq_fn(request_queue_t *q)
833{
834 struct carm_port *port = q->queuedata;
835 struct carm_host *host = port->host;
836 struct carm_msg_rw *msg;
837 struct carm_request *crq;
838 struct request *rq;
839 struct scatterlist *sg;
840 int writing = 0, pci_dir, i, n_elem, rc;
841 u32 tmp;
842 unsigned int msg_size;
843
844queue_one_request:
845 VPRINTK("get req\n");
846 rq = elv_next_request(q);
847 if (!rq)
848 return;
849
850 crq = carm_get_request(host);
851 if (!crq) {
852 carm_push_q(host, q);
853 return; /* call us again later, eventually */
854 }
855 crq->rq = rq;
856
857 blkdev_dequeue_request(rq);
858
859 if (rq_data_dir(rq) == WRITE) {
860 writing = 1;
861 pci_dir = PCI_DMA_TODEVICE;
862 } else {
863 pci_dir = PCI_DMA_FROMDEVICE;
864 }
865
866 /* get scatterlist from block layer */
867 sg = &crq->sg[0];
868 n_elem = blk_rq_map_sg(q, rq, sg);
869 if (n_elem <= 0) {
870 carm_end_rq(host, crq, 0);
871 return; /* request with no s/g entries? */
872 }
873
874 /* map scatterlist to PCI bus addresses */
875 n_elem = pci_map_sg(host->pdev, sg, n_elem, pci_dir);
876 if (n_elem <= 0) {
877 carm_end_rq(host, crq, 0);
878 return; /* request with no s/g entries? */
879 }
880 crq->n_elem = n_elem;
881 crq->port = port;
882 host->hw_sg_used += n_elem;
883
884 /*
885 * build read/write message
886 */
887
888 VPRINTK("build msg\n");
889 msg = (struct carm_msg_rw *) carm_ref_msg(host, crq->tag);
890
891 if (writing) {
892 msg->type = CARM_MSG_WRITE;
893 crq->msg_type = CARM_MSG_WRITE;
894 } else {
895 msg->type = CARM_MSG_READ;
896 crq->msg_type = CARM_MSG_READ;
897 }
898
899 msg->id = port->port_no;
900 msg->sg_count = n_elem;
901 msg->sg_type = SGT_32BIT;
902 msg->handle = cpu_to_le32(TAG_ENCODE(crq->tag));
903 msg->lba = cpu_to_le32(rq->sector & 0xffffffff);
904 tmp = (rq->sector >> 16) >> 16;
905 msg->lba_high = cpu_to_le16( (u16) tmp );
906 msg->lba_count = cpu_to_le16(rq->nr_sectors);
907
908 msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg);
909 for (i = 0; i < n_elem; i++) {
910 struct carm_msg_sg *carm_sg = &msg->sg[i];
911 carm_sg->start = cpu_to_le32(sg_dma_address(&crq->sg[i]));
912 carm_sg->len = cpu_to_le32(sg_dma_len(&crq->sg[i]));
913 msg_size += sizeof(struct carm_msg_sg);
914 }
915
916 rc = carm_lookup_bucket(msg_size);
917 BUG_ON(rc < 0);
918 crq->msg_bucket = (u32) rc;
919
920 /*
921 * queue read/write message to hardware
922 */
923
924 VPRINTK("send msg, tag == %u\n", crq->tag);
925 rc = carm_send_msg(host, crq);
926 if (rc) {
927 carm_put_request(host, crq);
928 blk_requeue_request(q, rq);
929 carm_push_q(host, q);
930 return; /* call us again later, eventually */
931 }
932
933 goto queue_one_request;
934}
935
936static void carm_handle_array_info(struct carm_host *host,
937 struct carm_request *crq, u8 *mem,
938 int is_ok)
939{
940 struct carm_port *port;
941 u8 *msg_data = mem + sizeof(struct carm_array_info);
942 struct carm_array_info *desc = (struct carm_array_info *) msg_data;
943 u64 lo, hi;
944 int cur_port;
945 size_t slen;
946
947 DPRINTK("ENTER\n");
948
949 carm_end_rq(host, crq, is_ok);
950
951 if (!is_ok)
952 goto out;
953 if (le32_to_cpu(desc->array_status) & ARRAY_NO_EXIST)
954 goto out;
955
956 cur_port = host->cur_scan_dev;
957
958 /* should never occur */
959 if ((cur_port < 0) || (cur_port >= CARM_MAX_PORTS)) {
960 printk(KERN_ERR PFX "BUG: cur_scan_dev==%d, array_id==%d\n",
961 cur_port, (int) desc->array_id);
962 goto out;
963 }
964
965 port = &host->port[cur_port];
966
967 lo = (u64) le32_to_cpu(desc->size);
968 hi = (u64) le16_to_cpu(desc->size_hi);
969
970 port->capacity = lo | (hi << 32);
971 port->dev_geom_head = le16_to_cpu(desc->head);
972 port->dev_geom_sect = le16_to_cpu(desc->sect);
973 port->dev_geom_cyl = le16_to_cpu(desc->cyl);
974
975 host->dev_active |= (1 << cur_port);
976
977 strncpy(port->name, desc->name, sizeof(port->name));
978 port->name[sizeof(port->name) - 1] = 0;
979 slen = strlen(port->name);
980 while (slen && (port->name[slen - 1] == ' ')) {
981 port->name[slen - 1] = 0;
982 slen--;
983 }
984
985 printk(KERN_INFO DRV_NAME "(%s): port %u device %Lu sectors\n",
986 pci_name(host->pdev), port->port_no,
987 (unsigned long long) port->capacity);
988 printk(KERN_INFO DRV_NAME "(%s): port %u device \"%s\"\n",
989 pci_name(host->pdev), port->port_no, port->name);
990
991out:
992 assert(host->state == HST_DEV_SCAN);
993 schedule_work(&host->fsm_task);
994}
995
996static void carm_handle_scan_chan(struct carm_host *host,
997 struct carm_request *crq, u8 *mem,
998 int is_ok)
999{
1000 u8 *msg_data = mem + IOC_SCAN_CHAN_OFFSET;
1001 unsigned int i, dev_count = 0;
1002 int new_state = HST_DEV_SCAN_START;
1003
1004 DPRINTK("ENTER\n");
1005
1006 carm_end_rq(host, crq, is_ok);
1007
1008 if (!is_ok) {
1009 new_state = HST_ERROR;
1010 goto out;
1011 }
1012
1013 /* TODO: scan and support non-disk devices */
1014 for (i = 0; i < 8; i++)
1015 if (msg_data[i] == 0) { /* direct-access device (disk) */
1016 host->dev_present |= (1 << i);
1017 dev_count++;
1018 }
1019
1020 printk(KERN_INFO DRV_NAME "(%s): found %u interesting devices\n",
1021 pci_name(host->pdev), dev_count);
1022
1023out:
1024 assert(host->state == HST_PORT_SCAN);
1025 host->state = new_state;
1026 schedule_work(&host->fsm_task);
1027}
1028
1029static void carm_handle_generic(struct carm_host *host,
1030 struct carm_request *crq, int is_ok,
1031 int cur_state, int next_state)
1032{
1033 DPRINTK("ENTER\n");
1034
1035 carm_end_rq(host, crq, is_ok);
1036
1037 assert(host->state == cur_state);
1038 if (is_ok)
1039 host->state = next_state;
1040 else
1041 host->state = HST_ERROR;
1042 schedule_work(&host->fsm_task);
1043}
1044
1045static inline void carm_handle_rw(struct carm_host *host,
1046 struct carm_request *crq, int is_ok)
1047{
1048 int pci_dir;
1049
1050 VPRINTK("ENTER\n");
1051
1052 if (rq_data_dir(crq->rq) == WRITE)
1053 pci_dir = PCI_DMA_TODEVICE;
1054 else
1055 pci_dir = PCI_DMA_FROMDEVICE;
1056
1057 pci_unmap_sg(host->pdev, &crq->sg[0], crq->n_elem, pci_dir);
1058
1059 carm_end_rq(host, crq, is_ok);
1060}
1061
1062static inline void carm_handle_resp(struct carm_host *host,
1063 __le32 ret_handle_le, u32 status)
1064{
1065 u32 handle = le32_to_cpu(ret_handle_le);
1066 unsigned int msg_idx;
1067 struct carm_request *crq;
1068 int is_ok = (status == RMSG_OK);
1069 u8 *mem;
1070
1071 VPRINTK("ENTER, handle == 0x%x\n", handle);
1072
1073 if (unlikely(!TAG_VALID(handle))) {
1074 printk(KERN_ERR DRV_NAME "(%s): BUG: invalid tag 0x%x\n",
1075 pci_name(host->pdev), handle);
1076 return;
1077 }
1078
1079 msg_idx = TAG_DECODE(handle);
1080 VPRINTK("tag == %u\n", msg_idx);
1081
1082 crq = &host->req[msg_idx];
1083
1084 /* fast path */
1085 if (likely(crq->msg_type == CARM_MSG_READ ||
1086 crq->msg_type == CARM_MSG_WRITE)) {
1087 carm_handle_rw(host, crq, is_ok);
1088 return;
1089 }
1090
1091 mem = carm_ref_msg(host, msg_idx);
1092
1093 switch (crq->msg_type) {
1094 case CARM_MSG_IOCTL: {
1095 switch (crq->msg_subtype) {
1096 case CARM_IOC_SCAN_CHAN:
1097 carm_handle_scan_chan(host, crq, mem, is_ok);
1098 break;
1099 default:
1100 /* unknown / invalid response */
1101 goto err_out;
1102 }
1103 break;
1104 }
1105
1106 case CARM_MSG_MISC: {
1107 switch (crq->msg_subtype) {
1108 case MISC_ALLOC_MEM:
1109 carm_handle_generic(host, crq, is_ok,
1110 HST_ALLOC_BUF, HST_SYNC_TIME);
1111 break;
1112 case MISC_SET_TIME:
1113 carm_handle_generic(host, crq, is_ok,
1114 HST_SYNC_TIME, HST_GET_FW_VER);
1115 break;
1116 case MISC_GET_FW_VER: {
1117 struct carm_fw_ver *ver = (struct carm_fw_ver *)
1118 mem + sizeof(struct carm_msg_get_fw_ver);
1119 if (is_ok) {
1120 host->fw_ver = le32_to_cpu(ver->version);
1121 host->flags |= (ver->features & FL_FW_VER_MASK);
1122 }
1123 carm_handle_generic(host, crq, is_ok,
1124 HST_GET_FW_VER, HST_PORT_SCAN);
1125 break;
1126 }
1127 default:
1128 /* unknown / invalid response */
1129 goto err_out;
1130 }
1131 break;
1132 }
1133
1134 case CARM_MSG_ARRAY: {
1135 switch (crq->msg_subtype) {
1136 case CARM_ARRAY_INFO:
1137 carm_handle_array_info(host, crq, mem, is_ok);
1138 break;
1139 default:
1140 /* unknown / invalid response */
1141 goto err_out;
1142 }
1143 break;
1144 }
1145
1146 default:
1147 /* unknown / invalid response */
1148 goto err_out;
1149 }
1150
1151 return;
1152
1153err_out:
1154 printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n",
1155 pci_name(host->pdev), crq->msg_type, crq->msg_subtype);
1156 carm_end_rq(host, crq, 0);
1157}
1158
1159static inline void carm_handle_responses(struct carm_host *host)
1160{
1161 void __iomem *mmio = host->mmio;
1162 struct carm_response *resp = (struct carm_response *) host->shm;
1163 unsigned int work = 0;
1164 unsigned int idx = host->resp_idx % RMSG_Q_LEN;
1165
1166 while (1) {
1167 u32 status = le32_to_cpu(resp[idx].status);
1168
1169 if (status == 0xffffffff) {
1170 VPRINTK("ending response on index %u\n", idx);
1171 writel(idx << 3, mmio + CARM_RESP_IDX);
1172 break;
1173 }
1174
1175 /* response to a message we sent */
1176 else if ((status & (1 << 31)) == 0) {
1177 VPRINTK("handling msg response on index %u\n", idx);
1178 carm_handle_resp(host, resp[idx].ret_handle, status);
1179 resp[idx].status = cpu_to_le32(0xffffffff);
1180 }
1181
1182 /* asynchronous events the hardware throws our way */
1183 else if ((status & 0xff000000) == (1 << 31)) {
1184 u8 *evt_type_ptr = (u8 *) &resp[idx];
1185 u8 evt_type = *evt_type_ptr;
1186 printk(KERN_WARNING DRV_NAME "(%s): unhandled event type %d\n",
1187 pci_name(host->pdev), (int) evt_type);
1188 resp[idx].status = cpu_to_le32(0xffffffff);
1189 }
1190
1191 idx = NEXT_RESP(idx);
1192 work++;
1193 }
1194
1195 VPRINTK("EXIT, work==%u\n", work);
1196 host->resp_idx += work;
1197}
1198
1199static irqreturn_t carm_interrupt(int irq, void *__host, struct pt_regs *regs)
1200{
1201 struct carm_host *host = __host;
1202 void __iomem *mmio;
1203 u32 mask;
1204 int handled = 0;
1205 unsigned long flags;
1206
1207 if (!host) {
1208 VPRINTK("no host\n");
1209 return IRQ_NONE;
1210 }
1211
1212 spin_lock_irqsave(&host->lock, flags);
1213
1214 mmio = host->mmio;
1215
1216 /* reading should also clear interrupts */
1217 mask = readl(mmio + CARM_INT_STAT);
1218
1219 if (mask == 0 || mask == 0xffffffff) {
1220 VPRINTK("no work, mask == 0x%x\n", mask);
1221 goto out;
1222 }
1223
1224 if (mask & INT_ACK_MASK)
1225 writel(mask, mmio + CARM_INT_STAT);
1226
1227 if (unlikely(host->state == HST_INVALID)) {
1228 VPRINTK("not initialized yet, mask = 0x%x\n", mask);
1229 goto out;
1230 }
1231
1232 if (mask & CARM_HAVE_RESP) {
1233 handled = 1;
1234 carm_handle_responses(host);
1235 }
1236
1237out:
1238 spin_unlock_irqrestore(&host->lock, flags);
1239 VPRINTK("EXIT\n");
1240 return IRQ_RETVAL(handled);
1241}
1242
1243static void carm_fsm_task (void *_data)
1244{
1245 struct carm_host *host = _data;
1246 unsigned long flags;
1247 unsigned int state;
1248 int rc, i, next_dev;
1249 int reschedule = 0;
1250 int new_state = HST_INVALID;
1251
1252 spin_lock_irqsave(&host->lock, flags);
1253 state = host->state;
1254 spin_unlock_irqrestore(&host->lock, flags);
1255
1256 DPRINTK("ENTER, state == %s\n", state_name[state]);
1257
1258 switch (state) {
1259 case HST_PROBE_START:
1260 new_state = HST_ALLOC_BUF;
1261 reschedule = 1;
1262 break;
1263
1264 case HST_ALLOC_BUF:
1265 rc = carm_send_special(host, carm_fill_alloc_buf);
1266 if (rc) {
1267 new_state = HST_ERROR;
1268 reschedule = 1;
1269 }
1270 break;
1271
1272 case HST_SYNC_TIME:
1273 rc = carm_send_special(host, carm_fill_sync_time);
1274 if (rc) {
1275 new_state = HST_ERROR;
1276 reschedule = 1;
1277 }
1278 break;
1279
1280 case HST_GET_FW_VER:
1281 rc = carm_send_special(host, carm_fill_get_fw_ver);
1282 if (rc) {
1283 new_state = HST_ERROR;
1284 reschedule = 1;
1285 }
1286 break;
1287
1288 case HST_PORT_SCAN:
1289 rc = carm_send_special(host, carm_fill_scan_channels);
1290 if (rc) {
1291 new_state = HST_ERROR;
1292 reschedule = 1;
1293 }
1294 break;
1295
1296 case HST_DEV_SCAN_START:
1297 host->cur_scan_dev = -1;
1298 new_state = HST_DEV_SCAN;
1299 reschedule = 1;
1300 break;
1301
1302 case HST_DEV_SCAN:
1303 next_dev = -1;
1304 for (i = host->cur_scan_dev + 1; i < CARM_MAX_PORTS; i++)
1305 if (host->dev_present & (1 << i)) {
1306 next_dev = i;
1307 break;
1308 }
1309
1310 if (next_dev >= 0) {
1311 host->cur_scan_dev = next_dev;
1312 rc = carm_array_info(host, next_dev);
1313 if (rc) {
1314 new_state = HST_ERROR;
1315 reschedule = 1;
1316 }
1317 } else {
1318 new_state = HST_DEV_ACTIVATE;
1319 reschedule = 1;
1320 }
1321 break;
1322
1323 case HST_DEV_ACTIVATE: {
1324 int activated = 0;
1325 for (i = 0; i < CARM_MAX_PORTS; i++)
1326 if (host->dev_active & (1 << i)) {
1327 struct carm_port *port = &host->port[i];
1328 struct gendisk *disk = port->disk;
1329
1330 set_capacity(disk, port->capacity);
1331 add_disk(disk);
1332 activated++;
1333 }
1334
1335 printk(KERN_INFO DRV_NAME "(%s): %d ports activated\n",
1336 pci_name(host->pdev), activated);
1337
1338 new_state = HST_PROBE_FINISHED;
1339 reschedule = 1;
1340 break;
1341 }
1342
1343 case HST_PROBE_FINISHED:
1344 up(&host->probe_sem);
1345 break;
1346
1347 case HST_ERROR:
1348 /* FIXME: TODO */
1349 break;
1350
1351 default:
1352 /* should never occur */
1353 printk(KERN_ERR PFX "BUG: unknown state %d\n", state);
1354 assert(0);
1355 break;
1356 }
1357
1358 if (new_state != HST_INVALID) {
1359 spin_lock_irqsave(&host->lock, flags);
1360 host->state = new_state;
1361 spin_unlock_irqrestore(&host->lock, flags);
1362 }
1363 if (reschedule)
1364 schedule_work(&host->fsm_task);
1365}
1366
1367static int carm_init_wait(void __iomem *mmio, u32 bits, unsigned int test_bit)
1368{
1369 unsigned int i;
1370
1371 for (i = 0; i < 50000; i++) {
1372 u32 tmp = readl(mmio + CARM_LMUC);
1373 udelay(100);
1374
1375 if (test_bit) {
1376 if ((tmp & bits) == bits)
1377 return 0;
1378 } else {
1379 if ((tmp & bits) == 0)
1380 return 0;
1381 }
1382
1383 cond_resched();
1384 }
1385
1386 printk(KERN_ERR PFX "carm_init_wait timeout, bits == 0x%x, test_bit == %s\n",
1387 bits, test_bit ? "yes" : "no");
1388 return -EBUSY;
1389}
1390
1391static void carm_init_responses(struct carm_host *host)
1392{
1393 void __iomem *mmio = host->mmio;
1394 unsigned int i;
1395 struct carm_response *resp = (struct carm_response *) host->shm;
1396
1397 for (i = 0; i < RMSG_Q_LEN; i++)
1398 resp[i].status = cpu_to_le32(0xffffffff);
1399
1400 writel(0, mmio + CARM_RESP_IDX);
1401}
1402
1403static int carm_init_host(struct carm_host *host)
1404{
1405 void __iomem *mmio = host->mmio;
1406 u32 tmp;
1407 u8 tmp8;
1408 int rc;
1409
1410 DPRINTK("ENTER\n");
1411
1412 writel(0, mmio + CARM_INT_MASK);
1413
1414 tmp8 = readb(mmio + CARM_INITC);
1415 if (tmp8 & 0x01) {
1416 tmp8 &= ~0x01;
1417 writeb(tmp8, mmio + CARM_INITC);
1418 readb(mmio + CARM_INITC); /* flush */
1419
1420 DPRINTK("snooze...\n");
1421 msleep(5000);
1422 }
1423
1424 tmp = readl(mmio + CARM_HMUC);
1425 if (tmp & CARM_CME) {
1426 DPRINTK("CME bit present, waiting\n");
1427 rc = carm_init_wait(mmio, CARM_CME, 1);
1428 if (rc) {
1429 DPRINTK("EXIT, carm_init_wait 1 failed\n");
1430 return rc;
1431 }
1432 }
1433 if (tmp & CARM_RME) {
1434 DPRINTK("RME bit present, waiting\n");
1435 rc = carm_init_wait(mmio, CARM_RME, 1);
1436 if (rc) {
1437 DPRINTK("EXIT, carm_init_wait 2 failed\n");
1438 return rc;
1439 }
1440 }
1441
1442 tmp &= ~(CARM_RME | CARM_CME);
1443 writel(tmp, mmio + CARM_HMUC);
1444 readl(mmio + CARM_HMUC); /* flush */
1445
1446 rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 0);
1447 if (rc) {
1448 DPRINTK("EXIT, carm_init_wait 3 failed\n");
1449 return rc;
1450 }
1451
1452 carm_init_buckets(mmio);
1453
1454 writel(host->shm_dma & 0xffffffff, mmio + RBUF_ADDR_LO);
1455 writel((host->shm_dma >> 16) >> 16, mmio + RBUF_ADDR_HI);
1456 writel(RBUF_LEN, mmio + RBUF_BYTE_SZ);
1457
1458 tmp = readl(mmio + CARM_HMUC);
1459 tmp |= (CARM_RME | CARM_CME | CARM_WZBC);
1460 writel(tmp, mmio + CARM_HMUC);
1461 readl(mmio + CARM_HMUC); /* flush */
1462
1463 rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 1);
1464 if (rc) {
1465 DPRINTK("EXIT, carm_init_wait 4 failed\n");
1466 return rc;
1467 }
1468
1469 writel(0, mmio + CARM_HMPHA);
1470 writel(INT_DEF_MASK, mmio + CARM_INT_MASK);
1471
1472 carm_init_responses(host);
1473
1474 /* start initialization, probing state machine */
1475 spin_lock_irq(&host->lock);
1476 assert(host->state == HST_INVALID);
1477 host->state = HST_PROBE_START;
1478 spin_unlock_irq(&host->lock);
1479 schedule_work(&host->fsm_task);
1480
1481 DPRINTK("EXIT\n");
1482 return 0;
1483}
1484
1485static int carm_init_disks(struct carm_host *host)
1486{
1487 unsigned int i;
1488 int rc = 0;
1489
1490 for (i = 0; i < CARM_MAX_PORTS; i++) {
1491 struct gendisk *disk;
1492 request_queue_t *q;
1493 struct carm_port *port;
1494
1495 port = &host->port[i];
1496 port->host = host;
1497 port->port_no = i;
1498
1499 disk = alloc_disk(CARM_MINORS_PER_MAJOR);
1500 if (!disk) {
1501 rc = -ENOMEM;
1502 break;
1503 }
1504
1505 port->disk = disk;
1506 sprintf(disk->disk_name, DRV_NAME "/%u",
1507 (unsigned int) (host->id * CARM_MAX_PORTS) + i);
1508 sprintf(disk->devfs_name, DRV_NAME "/%u_%u", host->id, i);
1509 disk->major = host->major;
1510 disk->first_minor = i * CARM_MINORS_PER_MAJOR;
1511 disk->fops = &carm_bd_ops;
1512 disk->private_data = port;
1513
1514 q = blk_init_queue(carm_rq_fn, &host->lock);
1515 if (!q) {
1516 rc = -ENOMEM;
1517 break;
1518 }
1519 disk->queue = q;
1520 blk_queue_max_hw_segments(q, CARM_MAX_REQ_SG);
1521 blk_queue_max_phys_segments(q, CARM_MAX_REQ_SG);
1522 blk_queue_segment_boundary(q, CARM_SG_BOUNDARY);
1523
1524 q->queuedata = port;
1525 }
1526
1527 return rc;
1528}
1529
1530static void carm_free_disks(struct carm_host *host)
1531{
1532 unsigned int i;
1533
1534 for (i = 0; i < CARM_MAX_PORTS; i++) {
1535 struct gendisk *disk = host->port[i].disk;
1536 if (disk) {
1537 request_queue_t *q = disk->queue;
1538
1539 if (disk->flags & GENHD_FL_UP)
1540 del_gendisk(disk);
1541 if (q)
1542 blk_cleanup_queue(q);
1543 put_disk(disk);
1544 }
1545 }
1546}
1547
1548static int carm_init_shm(struct carm_host *host)
1549{
1550 host->shm = pci_alloc_consistent(host->pdev, CARM_SHM_SIZE,
1551 &host->shm_dma);
1552 if (!host->shm)
1553 return -ENOMEM;
1554
1555 host->msg_base = host->shm + RBUF_LEN;
1556 host->msg_dma = host->shm_dma + RBUF_LEN;
1557
1558 memset(host->shm, 0xff, RBUF_LEN);
1559 memset(host->msg_base, 0, PDC_SHM_SIZE - RBUF_LEN);
1560
1561 return 0;
1562}
1563
1564static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1565{
1566 static unsigned int printed_version;
1567 struct carm_host *host;
1568 unsigned int pci_dac;
1569 int rc;
1570 request_queue_t *q;
1571 unsigned int i;
1572
1573 if (!printed_version++)
1574 printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
1575
1576 rc = pci_enable_device(pdev);
1577 if (rc)
1578 return rc;
1579
1580 rc = pci_request_regions(pdev, DRV_NAME);
1581 if (rc)
1582 goto err_out;
1583
1584#if IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */
1585 rc = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
1586 if (!rc) {
1587 rc = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
1588 if (rc) {
1589 printk(KERN_ERR DRV_NAME "(%s): consistent DMA mask failure\n",
1590 pci_name(pdev));
1591 goto err_out_regions;
1592 }
1593 pci_dac = 1;
1594 } else {
1595#endif
1596 rc = pci_set_dma_mask(pdev, 0xffffffffULL);
1597 if (rc) {
1598 printk(KERN_ERR DRV_NAME "(%s): DMA mask failure\n",
1599 pci_name(pdev));
1600 goto err_out_regions;
1601 }
1602 pci_dac = 0;
1603#if IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */
1604 }
1605#endif
1606
1607 host = kmalloc(sizeof(*host), GFP_KERNEL);
1608 if (!host) {
1609 printk(KERN_ERR DRV_NAME "(%s): memory alloc failure\n",
1610 pci_name(pdev));
1611 rc = -ENOMEM;
1612 goto err_out_regions;
1613 }
1614
1615 memset(host, 0, sizeof(*host));
1616 host->pdev = pdev;
1617 host->flags = pci_dac ? FL_DAC : 0;
1618 spin_lock_init(&host->lock);
1619 INIT_WORK(&host->fsm_task, carm_fsm_task, host);
1620 init_MUTEX_LOCKED(&host->probe_sem);
1621
1622 for (i = 0; i < ARRAY_SIZE(host->req); i++)
1623 host->req[i].tag = i;
1624
1625 host->mmio = ioremap(pci_resource_start(pdev, 0),
1626 pci_resource_len(pdev, 0));
1627 if (!host->mmio) {
1628 printk(KERN_ERR DRV_NAME "(%s): MMIO alloc failure\n",
1629 pci_name(pdev));
1630 rc = -ENOMEM;
1631 goto err_out_kfree;
1632 }
1633
1634 rc = carm_init_shm(host);
1635 if (rc) {
1636 printk(KERN_ERR DRV_NAME "(%s): DMA SHM alloc failure\n",
1637 pci_name(pdev));
1638 goto err_out_iounmap;
1639 }
1640
1641 q = blk_init_queue(carm_oob_rq_fn, &host->lock);
1642 if (!q) {
1643 printk(KERN_ERR DRV_NAME "(%s): OOB queue alloc failure\n",
1644 pci_name(pdev));
1645 rc = -ENOMEM;
1646 goto err_out_pci_free;
1647 }
1648 host->oob_q = q;
1649 q->queuedata = host;
1650
1651 /*
1652 * Figure out which major to use: 160, 161, or dynamic
1653 */
1654 if (!test_and_set_bit(0, &carm_major_alloc))
1655 host->major = 160;
1656 else if (!test_and_set_bit(1, &carm_major_alloc))
1657 host->major = 161;
1658 else
1659 host->flags |= FL_DYN_MAJOR;
1660
1661 host->id = carm_host_id;
1662 sprintf(host->name, DRV_NAME "%d", carm_host_id);
1663
1664 rc = register_blkdev(host->major, host->name);
1665 if (rc < 0)
1666 goto err_out_free_majors;
1667 if (host->flags & FL_DYN_MAJOR)
1668 host->major = rc;
1669
1670 devfs_mk_dir(DRV_NAME);
1671
1672 rc = carm_init_disks(host);
1673 if (rc)
1674 goto err_out_blkdev_disks;
1675
1676 pci_set_master(pdev);
1677
1678 rc = request_irq(pdev->irq, carm_interrupt, SA_SHIRQ, DRV_NAME, host);
1679 if (rc) {
1680 printk(KERN_ERR DRV_NAME "(%s): irq alloc failure\n",
1681 pci_name(pdev));
1682 goto err_out_blkdev_disks;
1683 }
1684
1685 rc = carm_init_host(host);
1686 if (rc)
1687 goto err_out_free_irq;
1688
1689 DPRINTK("waiting for probe_sem\n");
1690 down(&host->probe_sem);
1691
1692 printk(KERN_INFO "%s: pci %s, ports %d, io %lx, irq %u, major %d\n",
1693 host->name, pci_name(pdev), (int) CARM_MAX_PORTS,
1694 pci_resource_start(pdev, 0), pdev->irq, host->major);
1695
1696 carm_host_id++;
1697 pci_set_drvdata(pdev, host);
1698 return 0;
1699
1700err_out_free_irq:
1701 free_irq(pdev->irq, host);
1702err_out_blkdev_disks:
1703 carm_free_disks(host);
1704 unregister_blkdev(host->major, host->name);
1705err_out_free_majors:
1706 if (host->major == 160)
1707 clear_bit(0, &carm_major_alloc);
1708 else if (host->major == 161)
1709 clear_bit(1, &carm_major_alloc);
1710 blk_cleanup_queue(host->oob_q);
1711err_out_pci_free:
1712 pci_free_consistent(pdev, CARM_SHM_SIZE, host->shm, host->shm_dma);
1713err_out_iounmap:
1714 iounmap(host->mmio);
1715err_out_kfree:
1716 kfree(host);
1717err_out_regions:
1718 pci_release_regions(pdev);
1719err_out:
1720 pci_disable_device(pdev);
1721 return rc;
1722}
1723
1724static void carm_remove_one (struct pci_dev *pdev)
1725{
1726 struct carm_host *host = pci_get_drvdata(pdev);
1727
1728 if (!host) {
1729 printk(KERN_ERR PFX "BUG: no host data for PCI(%s)\n",
1730 pci_name(pdev));
1731 return;
1732 }
1733
1734 free_irq(pdev->irq, host);
1735 carm_free_disks(host);
1736 devfs_remove(DRV_NAME);
1737 unregister_blkdev(host->major, host->name);
1738 if (host->major == 160)
1739 clear_bit(0, &carm_major_alloc);
1740 else if (host->major == 161)
1741 clear_bit(1, &carm_major_alloc);
1742 blk_cleanup_queue(host->oob_q);
1743 pci_free_consistent(pdev, CARM_SHM_SIZE, host->shm, host->shm_dma);
1744 iounmap(host->mmio);
1745 kfree(host);
1746 pci_release_regions(pdev);
1747 pci_disable_device(pdev);
1748 pci_set_drvdata(pdev, NULL);
1749}
1750
1751static int __init carm_init(void)
1752{
1753 return pci_module_init(&carm_driver);
1754}
1755
1756static void __exit carm_exit(void)
1757{
1758 pci_unregister_driver(&carm_driver);
1759}
1760
1761module_init(carm_init);
1762module_exit(carm_exit);
1763
1764
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
new file mode 100644
index 000000000000..ce42889f98fb
--- /dev/null
+++ b/drivers/block/ub.c
@@ -0,0 +1,2215 @@
1/*
2 * The low performance USB storage driver (ub).
3 *
4 * Copyright (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
5 * Copyright (C) 2004 Pete Zaitcev (zaitcev@yahoo.com)
6 *
7 * This work is a part of Linux kernel, is derived from it,
8 * and is not licensed separately. See file COPYING for details.
9 *
10 * TODO (sorted by decreasing priority)
11 * -- Do resets with usb_device_reset (needs a thread context, use khubd)
12 * -- set readonly flag for CDs, set removable flag for CF readers
13 * -- do inquiry and verify we got a disk and not a tape (for LUN mismatch)
14 * -- support pphaneuf's SDDR-75 with two LUNs (also broken capacity...)
15 * -- special case some senses, e.g. 3a/0 -> no media present, reduce retries
16 * -- verify the 13 conditions and do bulk resets
17 * -- normal pool of commands instead of cmdv[]?
18 * -- kill last_pipe and simply do two-state clearing on both pipes
19 * -- verify protocol (bulk) from USB descriptors (maybe...)
20 * -- highmem and sg
21 * -- move top_sense and work_bcs into separate allocations (if they survive)
22 * for cache purists and esoteric architectures.
23 * -- prune comments, they are too volumnous
24 * -- Exterminate P3 printks
25 * -- Resove XXX's
26 * -- Redo "benh's retries", perhaps have spin-up code to handle them. V:D=?
27 */
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/usb.h>
31#include <linux/blkdev.h>
32#include <linux/devfs_fs_kernel.h>
33#include <linux/timer.h>
34#include <scsi/scsi.h>
35
36#define DRV_NAME "ub"
37#define DEVFS_NAME DRV_NAME
38
39#define UB_MAJOR 180
40
41/*
42 * Definitions which have to be scattered once we understand the layout better.
43 */
44
45/* Transport (despite PR in the name) */
46#define US_PR_BULK 0x50 /* bulk only */
47
48/* Protocol */
49#define US_SC_SCSI 0x06 /* Transparent */
50
51/*
52 */
53#define UB_MINORS_PER_MAJOR 8
54
55#define UB_MAX_CDB_SIZE 16 /* Corresponds to Bulk */
56
57#define UB_SENSE_SIZE 18
58
59/*
60 */
61
62/* command block wrapper */
63struct bulk_cb_wrap {
64 __le32 Signature; /* contains 'USBC' */
65 u32 Tag; /* unique per command id */
66 __le32 DataTransferLength; /* size of data */
67 u8 Flags; /* direction in bit 0 */
68 u8 Lun; /* LUN normally 0 */
69 u8 Length; /* of of the CDB */
70 u8 CDB[UB_MAX_CDB_SIZE]; /* max command */
71};
72
73#define US_BULK_CB_WRAP_LEN 31
74#define US_BULK_CB_SIGN 0x43425355 /*spells out USBC */
75#define US_BULK_FLAG_IN 1
76#define US_BULK_FLAG_OUT 0
77
78/* command status wrapper */
79struct bulk_cs_wrap {
80 __le32 Signature; /* should = 'USBS' */
81 u32 Tag; /* same as original command */
82 __le32 Residue; /* amount not transferred */
83 u8 Status; /* see below */
84};
85
86#define US_BULK_CS_WRAP_LEN 13
87#define US_BULK_CS_SIGN 0x53425355 /* spells out 'USBS' */
88/* This is for Olympus Camedia digital cameras */
89#define US_BULK_CS_OLYMPUS_SIGN 0x55425355 /* spells out 'USBU' */
90#define US_BULK_STAT_OK 0
91#define US_BULK_STAT_FAIL 1
92#define US_BULK_STAT_PHASE 2
93
94/* bulk-only class specific requests */
95#define US_BULK_RESET_REQUEST 0xff
96#define US_BULK_GET_MAX_LUN 0xfe
97
98/*
99 */
100struct ub_dev;
101
102#define UB_MAX_REQ_SG 1
103#define UB_MAX_SECTORS 64
104
105/*
106 * A second is more than enough for a 32K transfer (UB_MAX_SECTORS)
107 * even if a webcam hogs the bus, but some devices need time to spin up.
108 */
109#define UB_URB_TIMEOUT (HZ*2)
110#define UB_DATA_TIMEOUT (HZ*5) /* ZIP does spin-ups in the data phase */
111#define UB_STAT_TIMEOUT (HZ*5) /* Same spinups and eject for a dataless cmd. */
112#define UB_CTRL_TIMEOUT (HZ/2) /* 500ms ought to be enough to clear a stall */
113
114/*
115 * An instance of a SCSI command in transit.
116 */
117#define UB_DIR_NONE 0
118#define UB_DIR_READ 1
119#define UB_DIR_ILLEGAL2 2
120#define UB_DIR_WRITE 3
121
122#define UB_DIR_CHAR(c) (((c)==UB_DIR_WRITE)? 'w': \
123 (((c)==UB_DIR_READ)? 'r': 'n'))
124
125enum ub_scsi_cmd_state {
126 UB_CMDST_INIT, /* Initial state */
127 UB_CMDST_CMD, /* Command submitted */
128 UB_CMDST_DATA, /* Data phase */
129 UB_CMDST_CLR2STS, /* Clearing before requesting status */
130 UB_CMDST_STAT, /* Status phase */
131 UB_CMDST_CLEAR, /* Clearing a stall (halt, actually) */
132 UB_CMDST_SENSE, /* Sending Request Sense */
133 UB_CMDST_DONE /* Final state */
134};
135
136static char *ub_scsi_cmd_stname[] = {
137 ". ",
138 "Cmd",
139 "dat",
140 "c2s",
141 "sts",
142 "clr",
143 "Sen",
144 "fin"
145};
146
147struct ub_scsi_cmd {
148 unsigned char cdb[UB_MAX_CDB_SIZE];
149 unsigned char cdb_len;
150
151 unsigned char dir; /* 0 - none, 1 - read, 3 - write. */
152 unsigned char trace_index;
153 enum ub_scsi_cmd_state state;
154 unsigned int tag;
155 struct ub_scsi_cmd *next;
156
157 int error; /* Return code - valid upon done */
158 unsigned int act_len; /* Return size */
159 unsigned char key, asc, ascq; /* May be valid if error==-EIO */
160
161 int stat_count; /* Retries getting status. */
162
163 /*
164 * We do not support transfers from highmem pages
165 * because the underlying USB framework does not do what we need.
166 */
167 char *data; /* Requested buffer */
168 unsigned int len; /* Requested length */
169 // struct scatterlist sgv[UB_MAX_REQ_SG];
170
171 void (*done)(struct ub_dev *, struct ub_scsi_cmd *);
172 void *back;
173};
174
175/*
176 */
177struct ub_capacity {
178 unsigned long nsec; /* Linux size - 512 byte sectors */
179 unsigned int bsize; /* Linux hardsect_size */
180 unsigned int bshift; /* Shift between 512 and hard sects */
181};
182
183/*
184 * The SCSI command tracing structure.
185 */
186
187#define SCMD_ST_HIST_SZ 8
188#define SCMD_TRACE_SZ 63 /* Less than 4KB of 61-byte lines */
189
190struct ub_scsi_cmd_trace {
191 int hcur;
192 unsigned int tag;
193 unsigned int req_size, act_size;
194 unsigned char op;
195 unsigned char dir;
196 unsigned char key, asc, ascq;
197 char st_hst[SCMD_ST_HIST_SZ];
198};
199
200struct ub_scsi_trace {
201 int cur;
202 struct ub_scsi_cmd_trace vec[SCMD_TRACE_SZ];
203};
204
205/*
206 * This is a direct take-off from linux/include/completion.h
207 * The difference is that I do not wait on this thing, just poll.
208 * When I want to wait (ub_probe), I just use the stock completion.
209 *
210 * Note that INIT_COMPLETION takes no lock. It is correct. But why
211 * in the bloody hell that thing takes struct instead of pointer to struct
212 * is quite beyond me. I just copied it from the stock completion.
213 */
214struct ub_completion {
215 unsigned int done;
216 spinlock_t lock;
217};
218
219static inline void ub_init_completion(struct ub_completion *x)
220{
221 x->done = 0;
222 spin_lock_init(&x->lock);
223}
224
225#define UB_INIT_COMPLETION(x) ((x).done = 0)
226
227static void ub_complete(struct ub_completion *x)
228{
229 unsigned long flags;
230
231 spin_lock_irqsave(&x->lock, flags);
232 x->done++;
233 spin_unlock_irqrestore(&x->lock, flags);
234}
235
236static int ub_is_completed(struct ub_completion *x)
237{
238 unsigned long flags;
239 int ret;
240
241 spin_lock_irqsave(&x->lock, flags);
242 ret = x->done;
243 spin_unlock_irqrestore(&x->lock, flags);
244 return ret;
245}
246
247/*
248 */
249struct ub_scsi_cmd_queue {
250 int qlen, qmax;
251 struct ub_scsi_cmd *head, *tail;
252};
253
254/*
255 * The UB device instance.
256 */
257struct ub_dev {
258 spinlock_t lock;
259 int id; /* Number among ub's */
260 atomic_t poison; /* The USB device is disconnected */
261 int openc; /* protected by ub_lock! */
262 /* kref is too implicit for our taste */
263 unsigned int tagcnt;
264 int changed; /* Media was changed */
265 int removable;
266 int readonly;
267 int first_open; /* Kludge. See ub_bd_open. */
268 char name[8];
269 struct usb_device *dev;
270 struct usb_interface *intf;
271
272 struct ub_capacity capacity;
273 struct gendisk *disk;
274
275 unsigned int send_bulk_pipe; /* cached pipe values */
276 unsigned int recv_bulk_pipe;
277 unsigned int send_ctrl_pipe;
278 unsigned int recv_ctrl_pipe;
279
280 struct tasklet_struct tasklet;
281
282 /* XXX Use Ingo's mempool (once we have more than one) */
283 int cmda[1];
284 struct ub_scsi_cmd cmdv[1];
285
286 struct ub_scsi_cmd_queue cmd_queue;
287 struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */
288 unsigned char top_sense[UB_SENSE_SIZE];
289
290 struct ub_completion work_done;
291 struct urb work_urb;
292 struct timer_list work_timer;
293 int last_pipe; /* What might need clearing */
294 struct bulk_cb_wrap work_bcb;
295 struct bulk_cs_wrap work_bcs;
296 struct usb_ctrlrequest work_cr;
297
298 struct ub_scsi_trace tr;
299};
300
301/*
302 */
303static void ub_cleanup(struct ub_dev *sc);
304static int ub_bd_rq_fn_1(struct ub_dev *sc, struct request *rq);
305static int ub_cmd_build_block(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
306 struct request *rq);
307static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
308 struct request *rq);
309static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
310static void ub_end_rq(struct request *rq, int uptodate);
311static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
312static void ub_urb_complete(struct urb *urb, struct pt_regs *pt);
313static void ub_scsi_action(unsigned long _dev);
314static void ub_scsi_dispatch(struct ub_dev *sc);
315static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
316static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc);
317static void __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
318static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
319static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
320static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
321 int stalled_pipe);
322static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
323static int ub_sync_tur(struct ub_dev *sc);
324static int ub_sync_read_cap(struct ub_dev *sc, struct ub_capacity *ret);
325
326/*
327 */
328static struct usb_device_id ub_usb_ids[] = {
329 // { USB_DEVICE_VER(0x0781, 0x0002, 0x0009, 0x0009) }, /* SDDR-31 */
330 { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) },
331 { }
332};
333
334MODULE_DEVICE_TABLE(usb, ub_usb_ids);
335
336/*
337 * Find me a way to identify "next free minor" for add_disk(),
338 * and the array disappears the next day. However, the number of
339 * hosts has something to do with the naming and /proc/partitions.
340 * This has to be thought out in detail before changing.
341 * If UB_MAX_HOST was 1000, we'd use a bitmap. Or a better data structure.
342 */
343#define UB_MAX_HOSTS 26
344static char ub_hostv[UB_MAX_HOSTS];
345static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */
346
347/*
348 * The SCSI command tracing procedures.
349 */
350
351static void ub_cmdtr_new(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
352{
353 int n;
354 struct ub_scsi_cmd_trace *t;
355
356 if ((n = sc->tr.cur + 1) == SCMD_TRACE_SZ) n = 0;
357 t = &sc->tr.vec[n];
358
359 memset(t, 0, sizeof(struct ub_scsi_cmd_trace));
360 t->tag = cmd->tag;
361 t->op = cmd->cdb[0];
362 t->dir = cmd->dir;
363 t->req_size = cmd->len;
364 t->st_hst[0] = cmd->state;
365
366 sc->tr.cur = n;
367 cmd->trace_index = n;
368}
369
370static void ub_cmdtr_state(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
371{
372 int n;
373 struct ub_scsi_cmd_trace *t;
374
375 t = &sc->tr.vec[cmd->trace_index];
376 if (t->tag == cmd->tag) {
377 if ((n = t->hcur + 1) == SCMD_ST_HIST_SZ) n = 0;
378 t->st_hst[n] = cmd->state;
379 t->hcur = n;
380 }
381}
382
383static void ub_cmdtr_act_len(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
384{
385 struct ub_scsi_cmd_trace *t;
386
387 t = &sc->tr.vec[cmd->trace_index];
388 if (t->tag == cmd->tag)
389 t->act_size = cmd->act_len;
390}
391
392static void ub_cmdtr_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
393 unsigned char *sense)
394{
395 struct ub_scsi_cmd_trace *t;
396
397 t = &sc->tr.vec[cmd->trace_index];
398 if (t->tag == cmd->tag) {
399 t->key = sense[2] & 0x0F;
400 t->asc = sense[12];
401 t->ascq = sense[13];
402 }
403}
404
405static ssize_t ub_diag_show(struct device *dev, char *page)
406{
407 struct usb_interface *intf;
408 struct ub_dev *sc;
409 int cnt;
410 unsigned long flags;
411 int nc, nh;
412 int i, j;
413 struct ub_scsi_cmd_trace *t;
414
415 intf = to_usb_interface(dev);
416 sc = usb_get_intfdata(intf);
417 if (sc == NULL)
418 return 0;
419
420 cnt = 0;
421 spin_lock_irqsave(&sc->lock, flags);
422
423 cnt += sprintf(page + cnt,
424 "qlen %d qmax %d changed %d removable %d readonly %d\n",
425 sc->cmd_queue.qlen, sc->cmd_queue.qmax,
426 sc->changed, sc->removable, sc->readonly);
427
428 if ((nc = sc->tr.cur + 1) == SCMD_TRACE_SZ) nc = 0;
429 for (j = 0; j < SCMD_TRACE_SZ; j++) {
430 t = &sc->tr.vec[nc];
431
432 cnt += sprintf(page + cnt, "%08x %02x", t->tag, t->op);
433 if (t->op == REQUEST_SENSE) {
434 cnt += sprintf(page + cnt, " [sense %x %02x %02x]",
435 t->key, t->asc, t->ascq);
436 } else {
437 cnt += sprintf(page + cnt, " %c", UB_DIR_CHAR(t->dir));
438 cnt += sprintf(page + cnt, " [%5d %5d]",
439 t->req_size, t->act_size);
440 }
441 if ((nh = t->hcur + 1) == SCMD_ST_HIST_SZ) nh = 0;
442 for (i = 0; i < SCMD_ST_HIST_SZ; i++) {
443 cnt += sprintf(page + cnt, " %s",
444 ub_scsi_cmd_stname[(int)t->st_hst[nh]]);
445 if (++nh == SCMD_ST_HIST_SZ) nh = 0;
446 }
447 cnt += sprintf(page + cnt, "\n");
448
449 if (++nc == SCMD_TRACE_SZ) nc = 0;
450 }
451
452 spin_unlock_irqrestore(&sc->lock, flags);
453 return cnt;
454}
455
456static DEVICE_ATTR(diag, S_IRUGO, ub_diag_show, NULL); /* N.B. World readable */
457
458/*
459 * The id allocator.
460 *
461 * This also stores the host for indexing by minor, which is somewhat dirty.
462 */
463static int ub_id_get(void)
464{
465 unsigned long flags;
466 int i;
467
468 spin_lock_irqsave(&ub_lock, flags);
469 for (i = 0; i < UB_MAX_HOSTS; i++) {
470 if (ub_hostv[i] == 0) {
471 ub_hostv[i] = 1;
472 spin_unlock_irqrestore(&ub_lock, flags);
473 return i;
474 }
475 }
476 spin_unlock_irqrestore(&ub_lock, flags);
477 return -1;
478}
479
480static void ub_id_put(int id)
481{
482 unsigned long flags;
483
484 if (id < 0 || id >= UB_MAX_HOSTS) {
485 printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id);
486 return;
487 }
488
489 spin_lock_irqsave(&ub_lock, flags);
490 if (ub_hostv[id] == 0) {
491 spin_unlock_irqrestore(&ub_lock, flags);
492 printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id);
493 return;
494 }
495 ub_hostv[id] = 0;
496 spin_unlock_irqrestore(&ub_lock, flags);
497}
498
499/*
500 * Downcount for deallocation. This rides on two assumptions:
501 * - once something is poisoned, its refcount cannot grow
502 * - opens cannot happen at this time (del_gendisk was done)
503 * If the above is true, we can drop the lock, which we need for
504 * blk_cleanup_queue(): the silly thing may attempt to sleep.
505 * [Actually, it never needs to sleep for us, but it calls might_sleep()]
506 */
507static void ub_put(struct ub_dev *sc)
508{
509 unsigned long flags;
510
511 spin_lock_irqsave(&ub_lock, flags);
512 --sc->openc;
513 if (sc->openc == 0 && atomic_read(&sc->poison)) {
514 spin_unlock_irqrestore(&ub_lock, flags);
515 ub_cleanup(sc);
516 } else {
517 spin_unlock_irqrestore(&ub_lock, flags);
518 }
519}
520
521/*
522 * Final cleanup and deallocation.
523 */
524static void ub_cleanup(struct ub_dev *sc)
525{
526 request_queue_t *q;
527
528 /* I don't think queue can be NULL. But... Stolen from sx8.c */
529 if ((q = sc->disk->queue) != NULL)
530 blk_cleanup_queue(q);
531
532 /*
533 * If we zero disk->private_data BEFORE put_disk, we have to check
534 * for NULL all over the place in open, release, check_media and
535 * revalidate, because the block level semaphore is well inside the
536 * put_disk. But we cannot zero after the call, because *disk is gone.
537 * The sd.c is blatantly racy in this area.
538 */
539 /* disk->private_data = NULL; */
540 put_disk(sc->disk);
541 sc->disk = NULL;
542
543 ub_id_put(sc->id);
544 kfree(sc);
545}
546
547/*
548 * The "command allocator".
549 */
550static struct ub_scsi_cmd *ub_get_cmd(struct ub_dev *sc)
551{
552 struct ub_scsi_cmd *ret;
553
554 if (sc->cmda[0])
555 return NULL;
556 ret = &sc->cmdv[0];
557 sc->cmda[0] = 1;
558 return ret;
559}
560
561static void ub_put_cmd(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
562{
563 if (cmd != &sc->cmdv[0]) {
564 printk(KERN_WARNING "%s: releasing a foreign cmd %p\n",
565 sc->name, cmd);
566 return;
567 }
568 if (!sc->cmda[0]) {
569 printk(KERN_WARNING "%s: releasing a free cmd\n", sc->name);
570 return;
571 }
572 sc->cmda[0] = 0;
573}
574
575/*
576 * The command queue.
577 */
578static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
579{
580 struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
581
582 if (t->qlen++ == 0) {
583 t->head = cmd;
584 t->tail = cmd;
585 } else {
586 t->tail->next = cmd;
587 t->tail = cmd;
588 }
589
590 if (t->qlen > t->qmax)
591 t->qmax = t->qlen;
592}
593
594static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
595{
596 struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
597
598 if (t->qlen++ == 0) {
599 t->head = cmd;
600 t->tail = cmd;
601 } else {
602 cmd->next = t->head;
603 t->head = cmd;
604 }
605
606 if (t->qlen > t->qmax)
607 t->qmax = t->qlen;
608}
609
610static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc)
611{
612 struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
613 struct ub_scsi_cmd *cmd;
614
615 if (t->qlen == 0)
616 return NULL;
617 if (--t->qlen == 0)
618 t->tail = NULL;
619 cmd = t->head;
620 t->head = cmd->next;
621 cmd->next = NULL;
622 return cmd;
623}
624
625#define ub_cmdq_peek(sc) ((sc)->cmd_queue.head)
626
627/*
628 * The request function is our main entry point
629 */
630
631static void ub_bd_rq_fn(request_queue_t *q)
632{
633 struct ub_dev *sc = q->queuedata;
634 struct request *rq;
635
636 while ((rq = elv_next_request(q)) != NULL) {
637 if (ub_bd_rq_fn_1(sc, rq) != 0) {
638 blk_stop_queue(q);
639 break;
640 }
641 }
642}
643
644static int ub_bd_rq_fn_1(struct ub_dev *sc, struct request *rq)
645{
646 struct ub_scsi_cmd *cmd;
647 int rc;
648
649 if (atomic_read(&sc->poison) || sc->changed) {
650 blkdev_dequeue_request(rq);
651 ub_end_rq(rq, 0);
652 return 0;
653 }
654
655 if ((cmd = ub_get_cmd(sc)) == NULL)
656 return -1;
657 memset(cmd, 0, sizeof(struct ub_scsi_cmd));
658
659 blkdev_dequeue_request(rq);
660
661 if (blk_pc_request(rq)) {
662 rc = ub_cmd_build_packet(sc, cmd, rq);
663 } else {
664 rc = ub_cmd_build_block(sc, cmd, rq);
665 }
666 if (rc != 0) {
667 ub_put_cmd(sc, cmd);
668 ub_end_rq(rq, 0);
669 blk_start_queue(sc->disk->queue);
670 return 0;
671 }
672
673 cmd->state = UB_CMDST_INIT;
674 cmd->done = ub_rw_cmd_done;
675 cmd->back = rq;
676
677 cmd->tag = sc->tagcnt++;
678 if ((rc = ub_submit_scsi(sc, cmd)) != 0) {
679 ub_put_cmd(sc, cmd);
680 ub_end_rq(rq, 0);
681 blk_start_queue(sc->disk->queue);
682 return 0;
683 }
684
685 return 0;
686}
687
688static int ub_cmd_build_block(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
689 struct request *rq)
690{
691 int ub_dir;
692#if 0 /* We use rq->buffer for now */
693 struct scatterlist *sg;
694 int n_elem;
695#endif
696 unsigned int block, nblks;
697
698 if (rq_data_dir(rq) == WRITE)
699 ub_dir = UB_DIR_WRITE;
700 else
701 ub_dir = UB_DIR_READ;
702
703 /*
704 * get scatterlist from block layer
705 */
706#if 0 /* We use rq->buffer for now */
707 sg = &cmd->sgv[0];
708 n_elem = blk_rq_map_sg(q, rq, sg);
709 if (n_elem <= 0) {
710 ub_put_cmd(sc, cmd);
711 ub_end_rq(rq, 0);
712 blk_start_queue(q);
713 return 0; /* request with no s/g entries? */
714 }
715
716 if (n_elem != 1) { /* Paranoia */
717 printk(KERN_WARNING "%s: request with %d segments\n",
718 sc->name, n_elem);
719 ub_put_cmd(sc, cmd);
720 ub_end_rq(rq, 0);
721 blk_start_queue(q);
722 return 0;
723 }
724#endif
725
726 /*
727 * XXX Unfortunately, this check does not work. It is quite possible
728 * to get bogus non-null rq->buffer if you allow sg by mistake.
729 */
730 if (rq->buffer == NULL) {
731 /*
732 * This must not happen if we set the queue right.
733 * The block level must create bounce buffers for us.
734 */
735 static int do_print = 1;
736 if (do_print) {
737 printk(KERN_WARNING "%s: unmapped block request"
738 " flags 0x%lx sectors %lu\n",
739 sc->name, rq->flags, rq->nr_sectors);
740 do_print = 0;
741 }
742 return -1;
743 }
744
745 /*
746 * build the command
747 *
748 * The call to blk_queue_hardsect_size() guarantees that request
749 * is aligned, but it is given in terms of 512 byte units, always.
750 */
751 block = rq->sector >> sc->capacity.bshift;
752 nblks = rq->nr_sectors >> sc->capacity.bshift;
753
754 cmd->cdb[0] = (ub_dir == UB_DIR_READ)? READ_10: WRITE_10;
755 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
756 cmd->cdb[2] = block >> 24;
757 cmd->cdb[3] = block >> 16;
758 cmd->cdb[4] = block >> 8;
759 cmd->cdb[5] = block;
760 cmd->cdb[7] = nblks >> 8;
761 cmd->cdb[8] = nblks;
762 cmd->cdb_len = 10;
763
764 cmd->dir = ub_dir;
765 cmd->data = rq->buffer;
766 cmd->len = rq->nr_sectors * 512;
767
768 return 0;
769}
770
771static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
772 struct request *rq)
773{
774
775 if (rq->data_len != 0 && rq->data == NULL) {
776 static int do_print = 1;
777 if (do_print) {
778 printk(KERN_WARNING "%s: unmapped packet request"
779 " flags 0x%lx length %d\n",
780 sc->name, rq->flags, rq->data_len);
781 do_print = 0;
782 }
783 return -1;
784 }
785
786 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
787 cmd->cdb_len = rq->cmd_len;
788
789 if (rq->data_len == 0) {
790 cmd->dir = UB_DIR_NONE;
791 } else {
792 if (rq_data_dir(rq) == WRITE)
793 cmd->dir = UB_DIR_WRITE;
794 else
795 cmd->dir = UB_DIR_READ;
796 }
797 cmd->data = rq->data;
798 cmd->len = rq->data_len;
799
800 return 0;
801}
802
803static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
804{
805 struct request *rq = cmd->back;
806 struct gendisk *disk = sc->disk;
807 request_queue_t *q = disk->queue;
808 int uptodate;
809
810 if (blk_pc_request(rq)) {
811 /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
812 memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
813 rq->sense_len = UB_SENSE_SIZE;
814 }
815
816 if (cmd->error == 0)
817 uptodate = 1;
818 else
819 uptodate = 0;
820
821 ub_put_cmd(sc, cmd);
822 ub_end_rq(rq, uptodate);
823 blk_start_queue(q);
824}
825
826static void ub_end_rq(struct request *rq, int uptodate)
827{
828 int rc;
829
830 rc = end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
831 // assert(rc == 0);
832 end_that_request_last(rq);
833}
834
835/*
836 * Submit a regular SCSI operation (not an auto-sense).
837 *
838 * The Iron Law of Good Submit Routine is:
839 * Zero return - callback is done, Nonzero return - callback is not done.
840 * No exceptions.
841 *
842 * Host is assumed locked.
843 *
844 * XXX We only support Bulk for the moment.
845 */
846static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
847{
848
849 if (cmd->state != UB_CMDST_INIT ||
850 (cmd->dir != UB_DIR_NONE && cmd->len == 0)) {
851 return -EINVAL;
852 }
853
854 ub_cmdq_add(sc, cmd);
855 /*
856 * We can call ub_scsi_dispatch(sc) right away here, but it's a little
857 * safer to jump to a tasklet, in case upper layers do something silly.
858 */
859 tasklet_schedule(&sc->tasklet);
860 return 0;
861}
862
863/*
864 * Submit the first URB for the queued command.
865 * This function does not deal with queueing in any way.
866 */
867static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
868{
869 struct bulk_cb_wrap *bcb;
870 int rc;
871
872 bcb = &sc->work_bcb;
873
874 /*
875 * ``If the allocation length is eighteen or greater, and a device
876 * server returns less than eithteen bytes of data, the application
877 * client should assume that the bytes not transferred would have been
878 * zeroes had the device server returned those bytes.''
879 *
880 * We zero sense for all commands so that when a packet request
881 * fails it does not return a stale sense.
882 */
883 memset(&sc->top_sense, 0, UB_SENSE_SIZE);
884
885 /* set up the command wrapper */
886 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
887 bcb->Tag = cmd->tag; /* Endianness is not important */
888 bcb->DataTransferLength = cpu_to_le32(cmd->len);
889 bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0;
890 bcb->Lun = 0; /* No multi-LUN yet */
891 bcb->Length = cmd->cdb_len;
892
893 /* copy the command payload */
894 memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE);
895
896 UB_INIT_COMPLETION(sc->work_done);
897
898 sc->last_pipe = sc->send_bulk_pipe;
899 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,
900 bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);
901 sc->work_urb.transfer_flags = URB_ASYNC_UNLINK;
902
903 /* Fill what we shouldn't be filling, because usb-storage did so. */
904 sc->work_urb.actual_length = 0;
905 sc->work_urb.error_count = 0;
906 sc->work_urb.status = 0;
907
908 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
909 /* XXX Clear stalls */
910 printk("ub: cmd #%d start failed (%d)\n", cmd->tag, rc); /* P3 */
911 ub_complete(&sc->work_done);
912 return rc;
913 }
914
915 sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;
916 add_timer(&sc->work_timer);
917
918 cmd->state = UB_CMDST_CMD;
919 ub_cmdtr_state(sc, cmd);
920 return 0;
921}
922
923/*
924 * Timeout handler.
925 */
926static void ub_urb_timeout(unsigned long arg)
927{
928 struct ub_dev *sc = (struct ub_dev *) arg;
929 unsigned long flags;
930
931 spin_lock_irqsave(&sc->lock, flags);
932 usb_unlink_urb(&sc->work_urb);
933 spin_unlock_irqrestore(&sc->lock, flags);
934}
935
936/*
937 * Completion routine for the work URB.
938 *
939 * This can be called directly from usb_submit_urb (while we have
940 * the sc->lock taken) and from an interrupt (while we do NOT have
941 * the sc->lock taken). Therefore, bounce this off to a tasklet.
942 */
943static void ub_urb_complete(struct urb *urb, struct pt_regs *pt)
944{
945 struct ub_dev *sc = urb->context;
946
947 ub_complete(&sc->work_done);
948 tasklet_schedule(&sc->tasklet);
949}
950
951static void ub_scsi_action(unsigned long _dev)
952{
953 struct ub_dev *sc = (struct ub_dev *) _dev;
954 unsigned long flags;
955
956 spin_lock_irqsave(&sc->lock, flags);
957 del_timer(&sc->work_timer);
958 ub_scsi_dispatch(sc);
959 spin_unlock_irqrestore(&sc->lock, flags);
960}
961
962static void ub_scsi_dispatch(struct ub_dev *sc)
963{
964 struct ub_scsi_cmd *cmd;
965 int rc;
966
967 while ((cmd = ub_cmdq_peek(sc)) != NULL) {
968 if (cmd->state == UB_CMDST_DONE) {
969 ub_cmdq_pop(sc);
970 (*cmd->done)(sc, cmd);
971 } else if (cmd->state == UB_CMDST_INIT) {
972 ub_cmdtr_new(sc, cmd);
973 if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0)
974 break;
975 cmd->error = rc;
976 cmd->state = UB_CMDST_DONE;
977 ub_cmdtr_state(sc, cmd);
978 } else {
979 if (!ub_is_completed(&sc->work_done))
980 break;
981 ub_scsi_urb_compl(sc, cmd);
982 }
983 }
984}
985
986static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
987{
988 struct urb *urb = &sc->work_urb;
989 struct bulk_cs_wrap *bcs;
990 int pipe;
991 int rc;
992
993 if (atomic_read(&sc->poison)) {
994 /* A little too simplistic, I feel... */
995 goto Bad_End;
996 }
997
998 if (cmd->state == UB_CMDST_CLEAR) {
999 if (urb->status == -EPIPE) {
1000 /*
1001 * STALL while clearning STALL.
1002 * The control pipe clears itself - nothing to do.
1003 * XXX Might try to reset the device here and retry.
1004 */
1005 printk(KERN_NOTICE "%s: "
1006 "stall on control pipe for device %u\n",
1007 sc->name, sc->dev->devnum);
1008 goto Bad_End;
1009 }
1010
1011 /*
1012 * We ignore the result for the halt clear.
1013 */
1014
1015 /* reset the endpoint toggle */
1016 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
1017 usb_pipeout(sc->last_pipe), 0);
1018
1019 ub_state_sense(sc, cmd);
1020
1021 } else if (cmd->state == UB_CMDST_CLR2STS) {
1022 if (urb->status == -EPIPE) {
1023 /*
1024 * STALL while clearning STALL.
1025 * The control pipe clears itself - nothing to do.
1026 * XXX Might try to reset the device here and retry.
1027 */
1028 printk(KERN_NOTICE "%s: "
1029 "stall on control pipe for device %u\n",
1030 sc->name, sc->dev->devnum);
1031 goto Bad_End;
1032 }
1033
1034 /*
1035 * We ignore the result for the halt clear.
1036 */
1037
1038 /* reset the endpoint toggle */
1039 usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
1040 usb_pipeout(sc->last_pipe), 0);
1041
1042 ub_state_stat(sc, cmd);
1043
1044 } else if (cmd->state == UB_CMDST_CMD) {
1045 if (urb->status == -EPIPE) {
1046 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1047 if (rc != 0) {
1048 printk(KERN_NOTICE "%s: "
1049 "unable to submit clear for device %u"
1050 " (code %d)\n",
1051 sc->name, sc->dev->devnum, rc);
1052 /*
1053 * This is typically ENOMEM or some other such shit.
1054 * Retrying is pointless. Just do Bad End on it...
1055 */
1056 goto Bad_End;
1057 }
1058 cmd->state = UB_CMDST_CLEAR;
1059 ub_cmdtr_state(sc, cmd);
1060 return;
1061 }
1062 if (urb->status != 0) {
1063 printk("ub: cmd #%d cmd status (%d)\n", cmd->tag, urb->status); /* P3 */
1064 goto Bad_End;
1065 }
1066 if (urb->actual_length != US_BULK_CB_WRAP_LEN) {
1067 printk("ub: cmd #%d xferred %d\n", cmd->tag, urb->actual_length); /* P3 */
1068 /* XXX Must do reset here to unconfuse the device */
1069 goto Bad_End;
1070 }
1071
1072 if (cmd->dir == UB_DIR_NONE) {
1073 ub_state_stat(sc, cmd);
1074 return;
1075 }
1076
1077 UB_INIT_COMPLETION(sc->work_done);
1078
1079 if (cmd->dir == UB_DIR_READ)
1080 pipe = sc->recv_bulk_pipe;
1081 else
1082 pipe = sc->send_bulk_pipe;
1083 sc->last_pipe = pipe;
1084 usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe,
1085 cmd->data, cmd->len, ub_urb_complete, sc);
1086 sc->work_urb.transfer_flags = URB_ASYNC_UNLINK;
1087 sc->work_urb.actual_length = 0;
1088 sc->work_urb.error_count = 0;
1089 sc->work_urb.status = 0;
1090
1091 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1092 /* XXX Clear stalls */
1093 printk("ub: data #%d submit failed (%d)\n", cmd->tag, rc); /* P3 */
1094 ub_complete(&sc->work_done);
1095 ub_state_done(sc, cmd, rc);
1096 return;
1097 }
1098
1099 sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT;
1100 add_timer(&sc->work_timer);
1101
1102 cmd->state = UB_CMDST_DATA;
1103 ub_cmdtr_state(sc, cmd);
1104
1105 } else if (cmd->state == UB_CMDST_DATA) {
1106 if (urb->status == -EPIPE) {
1107 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1108 if (rc != 0) {
1109 printk(KERN_NOTICE "%s: "
1110 "unable to submit clear for device %u"
1111 " (code %d)\n",
1112 sc->name, sc->dev->devnum, rc);
1113 /*
1114 * This is typically ENOMEM or some other such shit.
1115 * Retrying is pointless. Just do Bad End on it...
1116 */
1117 goto Bad_End;
1118 }
1119 cmd->state = UB_CMDST_CLR2STS;
1120 ub_cmdtr_state(sc, cmd);
1121 return;
1122 }
1123 if (urb->status == -EOVERFLOW) {
1124 /*
1125 * A babble? Failure, but we must transfer CSW now.
1126 */
1127 cmd->error = -EOVERFLOW; /* A cheap trick... */
1128 } else {
1129 if (urb->status != 0)
1130 goto Bad_End;
1131 }
1132
1133 cmd->act_len = urb->actual_length;
1134 ub_cmdtr_act_len(sc, cmd);
1135
1136 ub_state_stat(sc, cmd);
1137
1138 } else if (cmd->state == UB_CMDST_STAT) {
1139 if (urb->status == -EPIPE) {
1140 rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
1141 if (rc != 0) {
1142 printk(KERN_NOTICE "%s: "
1143 "unable to submit clear for device %u"
1144 " (code %d)\n",
1145 sc->name, sc->dev->devnum, rc);
1146 /*
1147 * This is typically ENOMEM or some other such shit.
1148 * Retrying is pointless. Just do Bad End on it...
1149 */
1150 goto Bad_End;
1151 }
1152 cmd->state = UB_CMDST_CLEAR;
1153 ub_cmdtr_state(sc, cmd);
1154 return;
1155 }
1156 if (urb->status != 0)
1157 goto Bad_End;
1158
1159 if (urb->actual_length == 0) {
1160 /*
1161 * Some broken devices add unnecessary zero-length
1162 * packets to the end of their data transfers.
1163 * Such packets show up as 0-length CSWs. If we
1164 * encounter such a thing, try to read the CSW again.
1165 */
1166 if (++cmd->stat_count >= 4) {
1167 printk(KERN_NOTICE "%s: "
1168 "unable to get CSW on device %u\n",
1169 sc->name, sc->dev->devnum);
1170 goto Bad_End;
1171 }
1172 __ub_state_stat(sc, cmd);
1173 return;
1174 }
1175
1176 /*
1177 * Check the returned Bulk protocol status.
1178 */
1179
1180 bcs = &sc->work_bcs;
1181 rc = le32_to_cpu(bcs->Residue);
1182 if (rc != cmd->len - cmd->act_len) {
1183 /*
1184 * It is all right to transfer less, the caller has
1185 * to check. But it's not all right if the device
1186 * counts disagree with our counts.
1187 */
1188 /* P3 */ printk("%s: resid %d len %d act %d\n",
1189 sc->name, rc, cmd->len, cmd->act_len);
1190 goto Bad_End;
1191 }
1192
1193#if 0
1194 if (bcs->Signature != cpu_to_le32(US_BULK_CS_SIGN) &&
1195 bcs->Signature != cpu_to_le32(US_BULK_CS_OLYMPUS_SIGN)) {
1196 /* Windows ignores signatures, so do we. */
1197 }
1198#endif
1199
1200 if (bcs->Tag != cmd->tag) {
1201 /*
1202 * This usually happens when we disagree with the
1203 * device's microcode about something. For instance,
1204 * a few of them throw this after timeouts. They buffer
1205 * commands and reply at commands we timed out before.
1206 * Without flushing these replies we loop forever.
1207 */
1208 if (++cmd->stat_count >= 4) {
1209 printk(KERN_NOTICE "%s: "
1210 "tag mismatch orig 0x%x reply 0x%x "
1211 "on device %u\n",
1212 sc->name, cmd->tag, bcs->Tag,
1213 sc->dev->devnum);
1214 goto Bad_End;
1215 }
1216 __ub_state_stat(sc, cmd);
1217 return;
1218 }
1219
1220 switch (bcs->Status) {
1221 case US_BULK_STAT_OK:
1222 break;
1223 case US_BULK_STAT_FAIL:
1224 ub_state_sense(sc, cmd);
1225 return;
1226 case US_BULK_STAT_PHASE:
1227 /* XXX We must reset the transport here */
1228 /* P3 */ printk("%s: status PHASE\n", sc->name);
1229 goto Bad_End;
1230 default:
1231 printk(KERN_INFO "%s: unknown CSW status 0x%x\n",
1232 sc->name, bcs->Status);
1233 goto Bad_End;
1234 }
1235
1236 /* Not zeroing error to preserve a babble indicator */
1237 cmd->state = UB_CMDST_DONE;
1238 ub_cmdtr_state(sc, cmd);
1239 ub_cmdq_pop(sc);
1240 (*cmd->done)(sc, cmd);
1241
1242 } else if (cmd->state == UB_CMDST_SENSE) {
1243 ub_state_done(sc, cmd, -EIO);
1244
1245 } else {
1246 printk(KERN_WARNING "%s: "
1247 "wrong command state %d on device %u\n",
1248 sc->name, cmd->state, sc->dev->devnum);
1249 goto Bad_End;
1250 }
1251 return;
1252
1253Bad_End: /* Little Excel is dead */
1254 ub_state_done(sc, cmd, -EIO);
1255}
1256
1257/*
1258 * Factorization helper for the command state machine:
1259 * Finish the command.
1260 */
1261static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc)
1262{
1263
1264 cmd->error = rc;
1265 cmd->state = UB_CMDST_DONE;
1266 ub_cmdtr_state(sc, cmd);
1267 ub_cmdq_pop(sc);
1268 (*cmd->done)(sc, cmd);
1269}
1270
1271/*
1272 * Factorization helper for the command state machine:
1273 * Submit a CSW read.
1274 */
1275static void __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1276{
1277 int rc;
1278
1279 UB_INIT_COMPLETION(sc->work_done);
1280
1281 sc->last_pipe = sc->recv_bulk_pipe;
1282 usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe,
1283 &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);
1284 sc->work_urb.transfer_flags = URB_ASYNC_UNLINK;
1285 sc->work_urb.actual_length = 0;
1286 sc->work_urb.error_count = 0;
1287 sc->work_urb.status = 0;
1288
1289 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1290 /* XXX Clear stalls */
1291 printk("%s: CSW #%d submit failed (%d)\n", sc->name, cmd->tag, rc); /* P3 */
1292 ub_complete(&sc->work_done);
1293 ub_state_done(sc, cmd, rc);
1294 return;
1295 }
1296
1297 sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT;
1298 add_timer(&sc->work_timer);
1299}
1300
1301/*
1302 * Factorization helper for the command state machine:
1303 * Submit a CSW read and go to STAT state.
1304 */
1305static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1306{
1307 __ub_state_stat(sc, cmd);
1308
1309 cmd->stat_count = 0;
1310 cmd->state = UB_CMDST_STAT;
1311 ub_cmdtr_state(sc, cmd);
1312}
1313
1314/*
1315 * Factorization helper for the command state machine:
1316 * Submit a REQUEST SENSE and go to SENSE state.
1317 */
1318static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1319{
1320 struct ub_scsi_cmd *scmd;
1321 int rc;
1322
1323 if (cmd->cdb[0] == REQUEST_SENSE) {
1324 rc = -EPIPE;
1325 goto error;
1326 }
1327
1328 scmd = &sc->top_rqs_cmd;
1329 scmd->cdb[0] = REQUEST_SENSE;
1330 scmd->cdb[4] = UB_SENSE_SIZE;
1331 scmd->cdb_len = 6;
1332 scmd->dir = UB_DIR_READ;
1333 scmd->state = UB_CMDST_INIT;
1334 scmd->data = sc->top_sense;
1335 scmd->len = UB_SENSE_SIZE;
1336 scmd->done = ub_top_sense_done;
1337 scmd->back = cmd;
1338
1339 scmd->tag = sc->tagcnt++;
1340
1341 cmd->state = UB_CMDST_SENSE;
1342 ub_cmdtr_state(sc, cmd);
1343
1344 ub_cmdq_insert(sc, scmd);
1345 return;
1346
1347error:
1348 ub_state_done(sc, cmd, rc);
1349}
1350
1351/*
1352 * A helper for the command's state machine:
1353 * Submit a stall clear.
1354 */
1355static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
1356 int stalled_pipe)
1357{
1358 int endp;
1359 struct usb_ctrlrequest *cr;
1360 int rc;
1361
1362 endp = usb_pipeendpoint(stalled_pipe);
1363 if (usb_pipein (stalled_pipe))
1364 endp |= USB_DIR_IN;
1365
1366 cr = &sc->work_cr;
1367 cr->bRequestType = USB_RECIP_ENDPOINT;
1368 cr->bRequest = USB_REQ_CLEAR_FEATURE;
1369 cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
1370 cr->wIndex = cpu_to_le16(endp);
1371 cr->wLength = cpu_to_le16(0);
1372
1373 UB_INIT_COMPLETION(sc->work_done);
1374
1375 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1376 (unsigned char*) cr, NULL, 0, ub_urb_complete, sc);
1377 sc->work_urb.transfer_flags = URB_ASYNC_UNLINK;
1378 sc->work_urb.actual_length = 0;
1379 sc->work_urb.error_count = 0;
1380 sc->work_urb.status = 0;
1381
1382 if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
1383 ub_complete(&sc->work_done);
1384 return rc;
1385 }
1386
1387 sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT;
1388 add_timer(&sc->work_timer);
1389 return 0;
1390}
1391
1392/*
1393 */
1394static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
1395{
1396 unsigned char *sense = scmd->data;
1397 struct ub_scsi_cmd *cmd;
1398
1399 /*
1400 * Ignoring scmd->act_len, because the buffer was pre-zeroed.
1401 */
1402 ub_cmdtr_sense(sc, scmd, sense);
1403
1404 /*
1405 * Find the command which triggered the unit attention or a check,
1406 * save the sense into it, and advance its state machine.
1407 */
1408 if ((cmd = ub_cmdq_peek(sc)) == NULL) {
1409 printk(KERN_WARNING "%s: sense done while idle\n", sc->name);
1410 return;
1411 }
1412 if (cmd != scmd->back) {
1413 printk(KERN_WARNING "%s: "
1414 "sense done for wrong command 0x%x on device %u\n",
1415 sc->name, cmd->tag, sc->dev->devnum);
1416 return;
1417 }
1418 if (cmd->state != UB_CMDST_SENSE) {
1419 printk(KERN_WARNING "%s: "
1420 "sense done with bad cmd state %d on device %u\n",
1421 sc->name, cmd->state, sc->dev->devnum);
1422 return;
1423 }
1424
1425 cmd->key = sense[2] & 0x0F;
1426 cmd->asc = sense[12];
1427 cmd->ascq = sense[13];
1428
1429 ub_scsi_urb_compl(sc, cmd);
1430}
1431
1432#if 0
1433/* Determine what the maximum LUN supported is */
1434int usb_stor_Bulk_max_lun(struct us_data *us)
1435{
1436 int result;
1437
1438 /* issue the command */
1439 result = usb_stor_control_msg(us, us->recv_ctrl_pipe,
1440 US_BULK_GET_MAX_LUN,
1441 USB_DIR_IN | USB_TYPE_CLASS |
1442 USB_RECIP_INTERFACE,
1443 0, us->ifnum, us->iobuf, 1, HZ);
1444
1445 /*
1446 * Some devices (i.e. Iomega Zip100) need this -- apparently
1447 * the bulk pipes get STALLed when the GetMaxLUN request is
1448 * processed. This is, in theory, harmless to all other devices
1449 * (regardless of if they stall or not).
1450 */
1451 if (result < 0) {
1452 usb_stor_clear_halt(us, us->recv_bulk_pipe);
1453 usb_stor_clear_halt(us, us->send_bulk_pipe);
1454 }
1455
1456 US_DEBUGP("GetMaxLUN command result is %d, data is %d\n",
1457 result, us->iobuf[0]);
1458
1459 /* if we have a successful request, return the result */
1460 if (result == 1)
1461 return us->iobuf[0];
1462
1463 /* return the default -- no LUNs */
1464 return 0;
1465}
1466#endif
1467
1468/*
1469 * This is called from a process context.
1470 */
1471static void ub_revalidate(struct ub_dev *sc)
1472{
1473
1474 sc->readonly = 0; /* XXX Query this from the device */
1475
1476 sc->capacity.nsec = 0;
1477 sc->capacity.bsize = 512;
1478 sc->capacity.bshift = 0;
1479
1480 if (ub_sync_tur(sc) != 0)
1481 return; /* Not ready */
1482 sc->changed = 0;
1483
1484 if (ub_sync_read_cap(sc, &sc->capacity) != 0) {
1485 /*
1486 * The retry here means something is wrong, either with the
1487 * device, with the transport, or with our code.
1488 * We keep this because sd.c has retries for capacity.
1489 */
1490 if (ub_sync_read_cap(sc, &sc->capacity) != 0) {
1491 sc->capacity.nsec = 0;
1492 sc->capacity.bsize = 512;
1493 sc->capacity.bshift = 0;
1494 }
1495 }
1496}
1497
1498/*
1499 * The open funcion.
1500 * This is mostly needed to keep refcounting, but also to support
1501 * media checks on removable media drives.
1502 */
1503static int ub_bd_open(struct inode *inode, struct file *filp)
1504{
1505 struct gendisk *disk = inode->i_bdev->bd_disk;
1506 struct ub_dev *sc;
1507 unsigned long flags;
1508 int rc;
1509
1510 if ((sc = disk->private_data) == NULL)
1511 return -ENXIO;
1512 spin_lock_irqsave(&ub_lock, flags);
1513 if (atomic_read(&sc->poison)) {
1514 spin_unlock_irqrestore(&ub_lock, flags);
1515 return -ENXIO;
1516 }
1517 sc->openc++;
1518 spin_unlock_irqrestore(&ub_lock, flags);
1519
1520 /*
1521 * This is a workaround for a specific problem in our block layer.
1522 * In 2.6.9, register_disk duplicates the code from rescan_partitions.
1523 * However, if we do add_disk with a device which persistently reports
1524 * a changed media, add_disk calls register_disk, which does do_open,
1525 * which will call rescan_paritions for changed media. After that,
1526 * register_disk attempts to do it all again and causes double kobject
1527 * registration and a eventually an oops on module removal.
1528 *
1529 * The bottom line is, Al Viro says that we should not allow
1530 * bdev->bd_invalidated to be set when doing add_disk no matter what.
1531 */
1532 if (sc->first_open) {
1533 if (sc->changed) {
1534 sc->first_open = 0;
1535 rc = -ENOMEDIUM;
1536 goto err_open;
1537 }
1538 }
1539
1540 if (sc->removable || sc->readonly)
1541 check_disk_change(inode->i_bdev);
1542
1543 /*
1544 * The sd.c considers ->media_present and ->changed not equivalent,
1545 * under some pretty murky conditions (a failure of READ CAPACITY).
1546 * We may need it one day.
1547 */
1548 if (sc->removable && sc->changed && !(filp->f_flags & O_NDELAY)) {
1549 rc = -ENOMEDIUM;
1550 goto err_open;
1551 }
1552
1553 if (sc->readonly && (filp->f_mode & FMODE_WRITE)) {
1554 rc = -EROFS;
1555 goto err_open;
1556 }
1557
1558 return 0;
1559
1560err_open:
1561 ub_put(sc);
1562 return rc;
1563}
1564
1565/*
1566 */
1567static int ub_bd_release(struct inode *inode, struct file *filp)
1568{
1569 struct gendisk *disk = inode->i_bdev->bd_disk;
1570 struct ub_dev *sc = disk->private_data;
1571
1572 ub_put(sc);
1573 return 0;
1574}
1575
1576/*
1577 * The ioctl interface.
1578 */
1579static int ub_bd_ioctl(struct inode *inode, struct file *filp,
1580 unsigned int cmd, unsigned long arg)
1581{
1582 struct gendisk *disk = inode->i_bdev->bd_disk;
1583 void __user *usermem = (void __user *) arg;
1584
1585 return scsi_cmd_ioctl(filp, disk, cmd, usermem);
1586}
1587
1588/*
1589 * This is called once a new disk was seen by the block layer or by ub_probe().
1590 * The main onjective here is to discover the features of the media such as
1591 * the capacity, read-only status, etc. USB storage generally does not
1592 * need to be spun up, but if we needed it, this would be the place.
1593 *
1594 * This call can sleep.
1595 *
1596 * The return code is not used.
1597 */
1598static int ub_bd_revalidate(struct gendisk *disk)
1599{
1600 struct ub_dev *sc = disk->private_data;
1601
1602 ub_revalidate(sc);
1603 /* This is pretty much a long term P3 */
1604 if (!atomic_read(&sc->poison)) { /* Cover sc->dev */
1605 printk(KERN_INFO "%s: device %u capacity nsec %ld bsize %u\n",
1606 sc->name, sc->dev->devnum,
1607 sc->capacity.nsec, sc->capacity.bsize);
1608 }
1609
1610 /* XXX Support sector size switching like in sr.c */
1611 blk_queue_hardsect_size(disk->queue, sc->capacity.bsize);
1612 set_capacity(disk, sc->capacity.nsec);
1613 // set_disk_ro(sdkp->disk, sc->readonly);
1614
1615 return 0;
1616}
1617
1618/*
1619 * The check is called by the block layer to verify if the media
1620 * is still available. It is supposed to be harmless, lightweight and
1621 * non-intrusive in case the media was not changed.
1622 *
1623 * This call can sleep.
1624 *
1625 * The return code is bool!
1626 */
1627static int ub_bd_media_changed(struct gendisk *disk)
1628{
1629 struct ub_dev *sc = disk->private_data;
1630
1631 if (!sc->removable)
1632 return 0;
1633
1634 /*
1635 * We clean checks always after every command, so this is not
1636 * as dangerous as it looks. If the TEST_UNIT_READY fails here,
1637 * the device is actually not ready with operator or software
1638 * intervention required. One dangerous item might be a drive which
1639 * spins itself down, and come the time to write dirty pages, this
1640 * will fail, then block layer discards the data. Since we never
1641 * spin drives up, such devices simply cannot be used with ub anyway.
1642 */
1643 if (ub_sync_tur(sc) != 0) {
1644 sc->changed = 1;
1645 return 1;
1646 }
1647
1648 return sc->changed;
1649}
1650
1651static struct block_device_operations ub_bd_fops = {
1652 .owner = THIS_MODULE,
1653 .open = ub_bd_open,
1654 .release = ub_bd_release,
1655 .ioctl = ub_bd_ioctl,
1656 .media_changed = ub_bd_media_changed,
1657 .revalidate_disk = ub_bd_revalidate,
1658};
1659
1660/*
1661 * Common ->done routine for commands executed synchronously.
1662 */
1663static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
1664{
1665 struct completion *cop = cmd->back;
1666 complete(cop);
1667}
1668
1669/*
1670 * Test if the device has a check condition on it, synchronously.
1671 */
1672static int ub_sync_tur(struct ub_dev *sc)
1673{
1674 struct ub_scsi_cmd *cmd;
1675 enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) };
1676 unsigned long flags;
1677 struct completion compl;
1678 int rc;
1679
1680 init_completion(&compl);
1681
1682 rc = -ENOMEM;
1683 if ((cmd = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1684 goto err_alloc;
1685 memset(cmd, 0, ALLOC_SIZE);
1686
1687 cmd->cdb[0] = TEST_UNIT_READY;
1688 cmd->cdb_len = 6;
1689 cmd->dir = UB_DIR_NONE;
1690 cmd->state = UB_CMDST_INIT;
1691 cmd->done = ub_probe_done;
1692 cmd->back = &compl;
1693
1694 spin_lock_irqsave(&sc->lock, flags);
1695 cmd->tag = sc->tagcnt++;
1696
1697 rc = ub_submit_scsi(sc, cmd);
1698 spin_unlock_irqrestore(&sc->lock, flags);
1699
1700 if (rc != 0) {
1701 printk("ub: testing ready: submit error (%d)\n", rc); /* P3 */
1702 goto err_submit;
1703 }
1704
1705 wait_for_completion(&compl);
1706
1707 rc = cmd->error;
1708
1709 if (rc == -EIO && cmd->key != 0) /* Retries for benh's key */
1710 rc = cmd->key;
1711
1712err_submit:
1713 kfree(cmd);
1714err_alloc:
1715 return rc;
1716}
1717
1718/*
1719 * Read the SCSI capacity synchronously (for probing).
1720 */
1721static int ub_sync_read_cap(struct ub_dev *sc, struct ub_capacity *ret)
1722{
1723 struct ub_scsi_cmd *cmd;
1724 char *p;
1725 enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 };
1726 unsigned long flags;
1727 unsigned int bsize, shift;
1728 unsigned long nsec;
1729 struct completion compl;
1730 int rc;
1731
1732 init_completion(&compl);
1733
1734 rc = -ENOMEM;
1735 if ((cmd = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
1736 goto err_alloc;
1737 memset(cmd, 0, ALLOC_SIZE);
1738 p = (char *)cmd + sizeof(struct ub_scsi_cmd);
1739
1740 cmd->cdb[0] = 0x25;
1741 cmd->cdb_len = 10;
1742 cmd->dir = UB_DIR_READ;
1743 cmd->state = UB_CMDST_INIT;
1744 cmd->data = p;
1745 cmd->len = 8;
1746 cmd->done = ub_probe_done;
1747 cmd->back = &compl;
1748
1749 spin_lock_irqsave(&sc->lock, flags);
1750 cmd->tag = sc->tagcnt++;
1751
1752 rc = ub_submit_scsi(sc, cmd);
1753 spin_unlock_irqrestore(&sc->lock, flags);
1754
1755 if (rc != 0) {
1756 printk("ub: reading capacity: submit error (%d)\n", rc); /* P3 */
1757 goto err_submit;
1758 }
1759
1760 wait_for_completion(&compl);
1761
1762 if (cmd->error != 0) {
1763 printk("ub: reading capacity: error %d\n", cmd->error); /* P3 */
1764 rc = -EIO;
1765 goto err_read;
1766 }
1767 if (cmd->act_len != 8) {
1768 printk("ub: reading capacity: size %d\n", cmd->act_len); /* P3 */
1769 rc = -EIO;
1770 goto err_read;
1771 }
1772
1773 /* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */
1774 nsec = be32_to_cpu(*(__be32 *)p) + 1;
1775 bsize = be32_to_cpu(*(__be32 *)(p + 4));
1776 switch (bsize) {
1777 case 512: shift = 0; break;
1778 case 1024: shift = 1; break;
1779 case 2048: shift = 2; break;
1780 case 4096: shift = 3; break;
1781 default:
1782 printk("ub: Bad sector size %u\n", bsize); /* P3 */
1783 rc = -EDOM;
1784 goto err_inv_bsize;
1785 }
1786
1787 ret->bsize = bsize;
1788 ret->bshift = shift;
1789 ret->nsec = nsec << shift;
1790 rc = 0;
1791
1792err_inv_bsize:
1793err_read:
1794err_submit:
1795 kfree(cmd);
1796err_alloc:
1797 return rc;
1798}
1799
1800/*
1801 */
1802static void ub_probe_urb_complete(struct urb *urb, struct pt_regs *pt)
1803{
1804 struct completion *cop = urb->context;
1805 complete(cop);
1806}
1807
1808static void ub_probe_timeout(unsigned long arg)
1809{
1810 struct completion *cop = (struct completion *) arg;
1811 complete(cop);
1812}
1813
1814/*
1815 * Clear initial stalls.
1816 */
1817static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe)
1818{
1819 int endp;
1820 struct usb_ctrlrequest *cr;
1821 struct completion compl;
1822 struct timer_list timer;
1823 int rc;
1824
1825 init_completion(&compl);
1826
1827 endp = usb_pipeendpoint(stalled_pipe);
1828 if (usb_pipein (stalled_pipe))
1829 endp |= USB_DIR_IN;
1830
1831 cr = &sc->work_cr;
1832 cr->bRequestType = USB_RECIP_ENDPOINT;
1833 cr->bRequest = USB_REQ_CLEAR_FEATURE;
1834 cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
1835 cr->wIndex = cpu_to_le16(endp);
1836 cr->wLength = cpu_to_le16(0);
1837
1838 usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
1839 (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
1840 sc->work_urb.transfer_flags = 0;
1841 sc->work_urb.actual_length = 0;
1842 sc->work_urb.error_count = 0;
1843 sc->work_urb.status = 0;
1844
1845 if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
1846 printk(KERN_WARNING
1847 "%s: Unable to submit a probe clear (%d)\n", sc->name, rc);
1848 return rc;
1849 }
1850
1851 init_timer(&timer);
1852 timer.function = ub_probe_timeout;
1853 timer.data = (unsigned long) &compl;
1854 timer.expires = jiffies + UB_CTRL_TIMEOUT;
1855 add_timer(&timer);
1856
1857 wait_for_completion(&compl);
1858
1859 del_timer_sync(&timer);
1860 usb_kill_urb(&sc->work_urb);
1861
1862 /* reset the endpoint toggle */
1863 usb_settoggle(sc->dev, endp, usb_pipeout(sc->last_pipe), 0);
1864
1865 return 0;
1866}
1867
1868/*
1869 * Get the pipe settings.
1870 */
1871static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev,
1872 struct usb_interface *intf)
1873{
1874 struct usb_host_interface *altsetting = intf->cur_altsetting;
1875 struct usb_endpoint_descriptor *ep_in = NULL;
1876 struct usb_endpoint_descriptor *ep_out = NULL;
1877 struct usb_endpoint_descriptor *ep;
1878 int i;
1879
1880 /*
1881 * Find the endpoints we need.
1882 * We are expecting a minimum of 2 endpoints - in and out (bulk).
1883 * We will ignore any others.
1884 */
1885 for (i = 0; i < altsetting->desc.bNumEndpoints; i++) {
1886 ep = &altsetting->endpoint[i].desc;
1887
1888 /* Is it a BULK endpoint? */
1889 if ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
1890 == USB_ENDPOINT_XFER_BULK) {
1891 /* BULK in or out? */
1892 if (ep->bEndpointAddress & USB_DIR_IN)
1893 ep_in = ep;
1894 else
1895 ep_out = ep;
1896 }
1897 }
1898
1899 if (ep_in == NULL || ep_out == NULL) {
1900 printk(KERN_NOTICE "%s: device %u failed endpoint check\n",
1901 sc->name, sc->dev->devnum);
1902 return -EIO;
1903 }
1904
1905 /* Calculate and store the pipe values */
1906 sc->send_ctrl_pipe = usb_sndctrlpipe(dev, 0);
1907 sc->recv_ctrl_pipe = usb_rcvctrlpipe(dev, 0);
1908 sc->send_bulk_pipe = usb_sndbulkpipe(dev,
1909 ep_out->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
1910 sc->recv_bulk_pipe = usb_rcvbulkpipe(dev,
1911 ep_in->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
1912
1913 return 0;
1914}
1915
1916/*
1917 * Probing is done in the process context, which allows us to cheat
1918 * and not to build a state machine for the discovery.
1919 */
1920static int ub_probe(struct usb_interface *intf,
1921 const struct usb_device_id *dev_id)
1922{
1923 struct ub_dev *sc;
1924 request_queue_t *q;
1925 struct gendisk *disk;
1926 int rc;
1927 int i;
1928
1929 rc = -ENOMEM;
1930 if ((sc = kmalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL)
1931 goto err_core;
1932 memset(sc, 0, sizeof(struct ub_dev));
1933 spin_lock_init(&sc->lock);
1934 usb_init_urb(&sc->work_urb);
1935 tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
1936 atomic_set(&sc->poison, 0);
1937
1938 init_timer(&sc->work_timer);
1939 sc->work_timer.data = (unsigned long) sc;
1940 sc->work_timer.function = ub_urb_timeout;
1941
1942 ub_init_completion(&sc->work_done);
1943 sc->work_done.done = 1; /* A little yuk, but oh well... */
1944
1945 rc = -ENOSR;
1946 if ((sc->id = ub_id_get()) == -1)
1947 goto err_id;
1948 snprintf(sc->name, 8, DRV_NAME "%c", sc->id + 'a');
1949
1950 sc->dev = interface_to_usbdev(intf);
1951 sc->intf = intf;
1952 // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
1953
1954 usb_set_intfdata(intf, sc);
1955 usb_get_dev(sc->dev);
1956 // usb_get_intf(sc->intf); /* Do we need this? */
1957
1958 /* XXX Verify that we can handle the device (from descriptors) */
1959
1960 ub_get_pipes(sc, sc->dev, intf);
1961
1962 if (device_create_file(&sc->intf->dev, &dev_attr_diag) != 0)
1963 goto err_diag;
1964
1965 /*
1966 * At this point, all USB initialization is done, do upper layer.
1967 * We really hate halfway initialized structures, so from the
1968 * invariants perspective, this ub_dev is fully constructed at
1969 * this point.
1970 */
1971
1972 /*
1973 * This is needed to clear toggles. It is a problem only if we do
1974 * `rmmod ub && modprobe ub` without disconnects, but we like that.
1975 */
1976 ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
1977 ub_probe_clear_stall(sc, sc->send_bulk_pipe);
1978
1979 /*
1980 * The way this is used by the startup code is a little specific.
1981 * A SCSI check causes a USB stall. Our common case code sees it
1982 * and clears the check, after which the device is ready for use.
1983 * But if a check was not present, any command other than
1984 * TEST_UNIT_READY ends with a lockup (including REQUEST_SENSE).
1985 *
1986 * If we neglect to clear the SCSI check, the first real command fails
1987 * (which is the capacity readout). We clear that and retry, but why
1988 * causing spurious retries for no reason.
1989 *
1990 * Revalidation may start with its own TEST_UNIT_READY, but that one
1991 * has to succeed, so we clear checks with an additional one here.
1992 * In any case it's not our business how revaliadation is implemented.
1993 */
1994 for (i = 0; i < 3; i++) { /* Retries for benh's key */
1995 if ((rc = ub_sync_tur(sc)) <= 0) break;
1996 if (rc != 0x6) break;
1997 msleep(10);
1998 }
1999
2000 sc->removable = 1; /* XXX Query this from the device */
2001 sc->changed = 1; /* ub_revalidate clears only */
2002 sc->first_open = 1;
2003
2004 ub_revalidate(sc);
2005 /* This is pretty much a long term P3 */
2006 printk(KERN_INFO "%s: device %u capacity nsec %ld bsize %u\n",
2007 sc->name, sc->dev->devnum, sc->capacity.nsec, sc->capacity.bsize);
2008
2009 /*
2010 * Just one disk per sc currently, but maybe more.
2011 */
2012 rc = -ENOMEM;
2013 if ((disk = alloc_disk(UB_MINORS_PER_MAJOR)) == NULL)
2014 goto err_diskalloc;
2015
2016 sc->disk = disk;
2017 sprintf(disk->disk_name, DRV_NAME "%c", sc->id + 'a');
2018 sprintf(disk->devfs_name, DEVFS_NAME "/%c", sc->id + 'a');
2019 disk->major = UB_MAJOR;
2020 disk->first_minor = sc->id * UB_MINORS_PER_MAJOR;
2021 disk->fops = &ub_bd_fops;
2022 disk->private_data = sc;
2023 disk->driverfs_dev = &intf->dev;
2024
2025 rc = -ENOMEM;
2026 if ((q = blk_init_queue(ub_bd_rq_fn, &sc->lock)) == NULL)
2027 goto err_blkqinit;
2028
2029 disk->queue = q;
2030
2031 // blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
2032 blk_queue_max_hw_segments(q, UB_MAX_REQ_SG);
2033 blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
2034 // blk_queue_segment_boundary(q, CARM_SG_BOUNDARY);
2035 blk_queue_max_sectors(q, UB_MAX_SECTORS);
2036 blk_queue_hardsect_size(q, sc->capacity.bsize);
2037
2038 /*
2039 * This is a serious infraction, caused by a deficiency in the
2040 * USB sg interface (usb_sg_wait()). We plan to remove this once
2041 * we get mileage on the driver and can justify a change to USB API.
2042 * See blk_queue_bounce_limit() to understand this part.
2043 *
2044 * XXX And I still need to be aware of the DMA mask in the HC.
2045 */
2046 q->bounce_pfn = blk_max_low_pfn;
2047 q->bounce_gfp = GFP_NOIO;
2048
2049 q->queuedata = sc;
2050
2051 set_capacity(disk, sc->capacity.nsec);
2052 if (sc->removable)
2053 disk->flags |= GENHD_FL_REMOVABLE;
2054
2055 add_disk(disk);
2056
2057 return 0;
2058
2059err_blkqinit:
2060 put_disk(disk);
2061err_diskalloc:
2062 device_remove_file(&sc->intf->dev, &dev_attr_diag);
2063err_diag:
2064 usb_set_intfdata(intf, NULL);
2065 // usb_put_intf(sc->intf);
2066 usb_put_dev(sc->dev);
2067 ub_id_put(sc->id);
2068err_id:
2069 kfree(sc);
2070err_core:
2071 return rc;
2072}
2073
2074static void ub_disconnect(struct usb_interface *intf)
2075{
2076 struct ub_dev *sc = usb_get_intfdata(intf);
2077 struct gendisk *disk = sc->disk;
2078 unsigned long flags;
2079
2080 /*
2081 * Prevent ub_bd_release from pulling the rug from under us.
2082 * XXX This is starting to look like a kref.
2083 * XXX Why not to take this ref at probe time?
2084 */
2085 spin_lock_irqsave(&ub_lock, flags);
2086 sc->openc++;
2087 spin_unlock_irqrestore(&ub_lock, flags);
2088
2089 /*
2090 * Fence stall clearnings, operations triggered by unlinkings and so on.
2091 * We do not attempt to unlink any URBs, because we do not trust the
2092 * unlink paths in HC drivers. Also, we get -84 upon disconnect anyway.
2093 */
2094 atomic_set(&sc->poison, 1);
2095
2096 /*
2097 * Blow away queued commands.
2098 *
2099 * Actually, this never works, because before we get here
2100 * the HCD terminates outstanding URB(s). It causes our
2101 * SCSI command queue to advance, commands fail to submit,
2102 * and the whole queue drains. So, we just use this code to
2103 * print warnings.
2104 */
2105 spin_lock_irqsave(&sc->lock, flags);
2106 {
2107 struct ub_scsi_cmd *cmd;
2108 int cnt = 0;
2109 while ((cmd = ub_cmdq_pop(sc)) != NULL) {
2110 cmd->error = -ENOTCONN;
2111 cmd->state = UB_CMDST_DONE;
2112 ub_cmdtr_state(sc, cmd);
2113 ub_cmdq_pop(sc);
2114 (*cmd->done)(sc, cmd);
2115 cnt++;
2116 }
2117 if (cnt != 0) {
2118 printk(KERN_WARNING "%s: "
2119 "%d was queued after shutdown\n", sc->name, cnt);
2120 }
2121 }
2122 spin_unlock_irqrestore(&sc->lock, flags);
2123
2124 /*
2125 * Unregister the upper layer.
2126 */
2127 if (disk->flags & GENHD_FL_UP)
2128 del_gendisk(disk);
2129 /*
2130 * I wish I could do:
2131 * set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
2132 * As it is, we rely on our internal poisoning and let
2133 * the upper levels to spin furiously failing all the I/O.
2134 */
2135
2136 /*
2137 * Taking a lock on a structure which is about to be freed
2138 * is very nonsensual. Here it is largely a way to do a debug freeze,
2139 * and a bracket which shows where the nonsensual code segment ends.
2140 *
2141 * Testing for -EINPROGRESS is always a bug, so we are bending
2142 * the rules a little.
2143 */
2144 spin_lock_irqsave(&sc->lock, flags);
2145 if (sc->work_urb.status == -EINPROGRESS) { /* janitors: ignore */
2146 printk(KERN_WARNING "%s: "
2147 "URB is active after disconnect\n", sc->name);
2148 }
2149 spin_unlock_irqrestore(&sc->lock, flags);
2150
2151 /*
2152 * There is virtually no chance that other CPU runs times so long
2153 * after ub_urb_complete should have called del_timer, but only if HCD
2154 * didn't forget to deliver a callback on unlink.
2155 */
2156 del_timer_sync(&sc->work_timer);
2157
2158 /*
2159 * At this point there must be no commands coming from anyone
2160 * and no URBs left in transit.
2161 */
2162
2163 device_remove_file(&sc->intf->dev, &dev_attr_diag);
2164 usb_set_intfdata(intf, NULL);
2165 // usb_put_intf(sc->intf);
2166 sc->intf = NULL;
2167 usb_put_dev(sc->dev);
2168 sc->dev = NULL;
2169
2170 ub_put(sc);
2171}
2172
2173static struct usb_driver ub_driver = {
2174 .owner = THIS_MODULE,
2175 .name = "ub",
2176 .probe = ub_probe,
2177 .disconnect = ub_disconnect,
2178 .id_table = ub_usb_ids,
2179};
2180
2181static int __init ub_init(void)
2182{
2183 int rc;
2184
2185 /* P3 */ printk("ub: sizeof ub_scsi_cmd %zu ub_dev %zu\n",
2186 sizeof(struct ub_scsi_cmd), sizeof(struct ub_dev));
2187
2188 if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0)
2189 goto err_regblkdev;
2190 devfs_mk_dir(DEVFS_NAME);
2191
2192 if ((rc = usb_register(&ub_driver)) != 0)
2193 goto err_register;
2194
2195 return 0;
2196
2197err_register:
2198 devfs_remove(DEVFS_NAME);
2199 unregister_blkdev(UB_MAJOR, DRV_NAME);
2200err_regblkdev:
2201 return rc;
2202}
2203
2204static void __exit ub_exit(void)
2205{
2206 usb_deregister(&ub_driver);
2207
2208 devfs_remove(DEVFS_NAME);
2209 unregister_blkdev(UB_MAJOR, DRV_NAME);
2210}
2211
2212module_init(ub_init);
2213module_exit(ub_exit);
2214
2215MODULE_LICENSE("GPL");
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
new file mode 100644
index 000000000000..0c4c121d2e79
--- /dev/null
+++ b/drivers/block/umem.c
@@ -0,0 +1,1256 @@
1/*
2 * mm.c - Micro Memory(tm) PCI memory board block device driver - v2.3
3 *
4 * (C) 2001 San Mehat <nettwerk@valinux.com>
5 * (C) 2001 Johannes Erdfelt <jerdfelt@valinux.com>
6 * (C) 2001 NeilBrown <neilb@cse.unsw.edu.au>
7 *
8 * This driver for the Micro Memory PCI Memory Module with Battery Backup
9 * is Copyright Micro Memory Inc 2001-2002. All rights reserved.
10 *
11 * This driver is released to the public under the terms of the
12 * GNU GENERAL PUBLIC LICENSE version 2
13 * See the file COPYING for details.
14 *
15 * This driver provides a standard block device interface for Micro Memory(tm)
16 * PCI based RAM boards.
17 * 10/05/01: Phap Nguyen - Rebuilt the driver
18 * 10/22/01: Phap Nguyen - v2.1 Added disk partitioning
19 * 29oct2001:NeilBrown - Use make_request_fn instead of request_fn
20 * - use stand disk partitioning (so fdisk works).
21 * 08nov2001:NeilBrown - change driver name from "mm" to "umem"
22 * - incorporate into main kernel
23 * 08apr2002:NeilBrown - Move some of interrupt handle to tasklet
24 * - use spin_lock_bh instead of _irq
25 * - Never block on make_request. queue
26 * bh's instead.
27 * - unregister umem from devfs at mod unload
28 * - Change version to 2.3
29 * 07Nov2001:Phap Nguyen - Select pci read command: 06, 12, 15 (Decimal)
30 * 07Jan2002: P. Nguyen - Used PCI Memory Write & Invalidate for DMA
31 * 15May2002:NeilBrown - convert to bio for 2.5
32 * 17May2002:NeilBrown - remove init_mem initialisation. Instead detect
33 * - a sequence of writes that cover the card, and
34 * - set initialised bit then.
35 */
36
37#include <linux/config.h>
38#include <linux/sched.h>
39#include <linux/fs.h>
40#include <linux/bio.h>
41#include <linux/kernel.h>
42#include <linux/mm.h>
43#include <linux/mman.h>
44#include <linux/ioctl.h>
45#include <linux/module.h>
46#include <linux/init.h>
47#include <linux/interrupt.h>
48#include <linux/smp_lock.h>
49#include <linux/timer.h>
50#include <linux/pci.h>
51#include <linux/slab.h>
52
53#include <linux/fcntl.h> /* O_ACCMODE */
54#include <linux/hdreg.h> /* HDIO_GETGEO */
55
56#include <linux/umem.h>
57
58#include <asm/uaccess.h>
59#include <asm/io.h>
60
61#define PRINTK(x...) do {} while (0)
62#define dprintk(x...) do {} while (0)
63/*#define dprintk(x...) printk(x) */
64
65#define MM_MAXCARDS 4
66#define MM_RAHEAD 2 /* two sectors */
67#define MM_BLKSIZE 1024 /* 1k blocks */
68#define MM_HARDSECT 512 /* 512-byte hardware sectors */
69#define MM_SHIFT 6 /* max 64 partitions on 4 cards */
70
71/*
72 * Version Information
73 */
74
75#define DRIVER_VERSION "v2.3"
76#define DRIVER_AUTHOR "San Mehat, Johannes Erdfelt, NeilBrown"
77#define DRIVER_DESC "Micro Memory(tm) PCI memory board block driver"
78
79static int debug;
80/* #define HW_TRACE(x) writeb(x,cards[0].csr_remap + MEMCTRLSTATUS_MAGIC) */
81#define HW_TRACE(x)
82
83#define DEBUG_LED_ON_TRANSFER 0x01
84#define DEBUG_BATTERY_POLLING 0x02
85
86module_param(debug, int, 0644);
87MODULE_PARM_DESC(debug, "Debug bitmask");
88
89static int pci_read_cmd = 0x0C; /* Read Multiple */
90module_param(pci_read_cmd, int, 0);
91MODULE_PARM_DESC(pci_read_cmd, "PCI read command");
92
93static int pci_write_cmd = 0x0F; /* Write and Invalidate */
94module_param(pci_write_cmd, int, 0);
95MODULE_PARM_DESC(pci_write_cmd, "PCI write command");
96
97static int pci_cmds;
98
99static int major_nr;
100
101#include <linux/blkdev.h>
102#include <linux/blkpg.h>
103
104struct cardinfo {
105 int card_number;
106 struct pci_dev *dev;
107
108 int irq;
109
110 unsigned long csr_base;
111 unsigned char __iomem *csr_remap;
112 unsigned long csr_len;
113#ifdef CONFIG_MM_MAP_MEMORY
114 unsigned long mem_base;
115 unsigned char __iomem *mem_remap;
116 unsigned long mem_len;
117#endif
118
119 unsigned int win_size; /* PCI window size */
120 unsigned int mm_size; /* size in kbytes */
121
122 unsigned int init_size; /* initial segment, in sectors,
123 * that we know to
124 * have been written
125 */
126 struct bio *bio, *currentbio, **biotail;
127
128 request_queue_t *queue;
129
130 struct mm_page {
131 dma_addr_t page_dma;
132 struct mm_dma_desc *desc;
133 int cnt, headcnt;
134 struct bio *bio, **biotail;
135 } mm_pages[2];
136#define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc))
137
138 int Active, Ready;
139
140 struct tasklet_struct tasklet;
141 unsigned int dma_status;
142
143 struct {
144 int good;
145 int warned;
146 unsigned long last_change;
147 } battery[2];
148
149 spinlock_t lock;
150 int check_batteries;
151
152 int flags;
153};
154
155static struct cardinfo cards[MM_MAXCARDS];
156static struct block_device_operations mm_fops;
157static struct timer_list battery_timer;
158
159static int num_cards = 0;
160
161static struct gendisk *mm_gendisk[MM_MAXCARDS];
162
163static void check_batteries(struct cardinfo *card);
164
165/*
166-----------------------------------------------------------------------------------
167-- get_userbit
168-----------------------------------------------------------------------------------
169*/
170static int get_userbit(struct cardinfo *card, int bit)
171{
172 unsigned char led;
173
174 led = readb(card->csr_remap + MEMCTRLCMD_LEDCTRL);
175 return led & bit;
176}
177/*
178-----------------------------------------------------------------------------------
179-- set_userbit
180-----------------------------------------------------------------------------------
181*/
182static int set_userbit(struct cardinfo *card, int bit, unsigned char state)
183{
184 unsigned char led;
185
186 led = readb(card->csr_remap + MEMCTRLCMD_LEDCTRL);
187 if (state)
188 led |= bit;
189 else
190 led &= ~bit;
191 writeb(led, card->csr_remap + MEMCTRLCMD_LEDCTRL);
192
193 return 0;
194}
195/*
196-----------------------------------------------------------------------------------
197-- set_led
198-----------------------------------------------------------------------------------
199*/
200/*
201 * NOTE: For the power LED, use the LED_POWER_* macros since they differ
202 */
203static void set_led(struct cardinfo *card, int shift, unsigned char state)
204{
205 unsigned char led;
206
207 led = readb(card->csr_remap + MEMCTRLCMD_LEDCTRL);
208 if (state == LED_FLIP)
209 led ^= (1<<shift);
210 else {
211 led &= ~(0x03 << shift);
212 led |= (state << shift);
213 }
214 writeb(led, card->csr_remap + MEMCTRLCMD_LEDCTRL);
215
216}
217
218#ifdef MM_DIAG
219/*
220-----------------------------------------------------------------------------------
221-- dump_regs
222-----------------------------------------------------------------------------------
223*/
224static void dump_regs(struct cardinfo *card)
225{
226 unsigned char *p;
227 int i, i1;
228
229 p = card->csr_remap;
230 for (i = 0; i < 8; i++) {
231 printk(KERN_DEBUG "%p ", p);
232
233 for (i1 = 0; i1 < 16; i1++)
234 printk("%02x ", *p++);
235
236 printk("\n");
237 }
238}
239#endif
240/*
241-----------------------------------------------------------------------------------
242-- dump_dmastat
243-----------------------------------------------------------------------------------
244*/
245static void dump_dmastat(struct cardinfo *card, unsigned int dmastat)
246{
247 printk(KERN_DEBUG "MM%d*: DMAstat - ", card->card_number);
248 if (dmastat & DMASCR_ANY_ERR)
249 printk("ANY_ERR ");
250 if (dmastat & DMASCR_MBE_ERR)
251 printk("MBE_ERR ");
252 if (dmastat & DMASCR_PARITY_ERR_REP)
253 printk("PARITY_ERR_REP ");
254 if (dmastat & DMASCR_PARITY_ERR_DET)
255 printk("PARITY_ERR_DET ");
256 if (dmastat & DMASCR_SYSTEM_ERR_SIG)
257 printk("SYSTEM_ERR_SIG ");
258 if (dmastat & DMASCR_TARGET_ABT)
259 printk("TARGET_ABT ");
260 if (dmastat & DMASCR_MASTER_ABT)
261 printk("MASTER_ABT ");
262 if (dmastat & DMASCR_CHAIN_COMPLETE)
263 printk("CHAIN_COMPLETE ");
264 if (dmastat & DMASCR_DMA_COMPLETE)
265 printk("DMA_COMPLETE ");
266 printk("\n");
267}
268
269/*
270 * Theory of request handling
271 *
272 * Each bio is assigned to one mm_dma_desc - which may not be enough FIXME
273 * We have two pages of mm_dma_desc, holding about 64 descriptors
274 * each. These are allocated at init time.
275 * One page is "Ready" and is either full, or can have request added.
276 * The other page might be "Active", which DMA is happening on it.
277 *
278 * Whenever IO on the active page completes, the Ready page is activated
279 * and the ex-Active page is clean out and made Ready.
280 * Otherwise the Ready page is only activated when it becomes full, or
281 * when mm_unplug_device is called via the unplug_io_fn.
282 *
283 * If a request arrives while both pages a full, it is queued, and b_rdev is
284 * overloaded to record whether it was a read or a write.
285 *
286 * The interrupt handler only polls the device to clear the interrupt.
287 * The processing of the result is done in a tasklet.
288 */
289
290static void mm_start_io(struct cardinfo *card)
291{
292 /* we have the lock, we know there is
293 * no IO active, and we know that card->Active
294 * is set
295 */
296 struct mm_dma_desc *desc;
297 struct mm_page *page;
298 int offset;
299
300 /* make the last descriptor end the chain */
301 page = &card->mm_pages[card->Active];
302 PRINTK("start_io: %d %d->%d\n", card->Active, page->headcnt, page->cnt-1);
303 desc = &page->desc[page->cnt-1];
304
305 desc->control_bits |= cpu_to_le32(DMASCR_CHAIN_COMP_EN);
306 desc->control_bits &= ~cpu_to_le32(DMASCR_CHAIN_EN);
307 desc->sem_control_bits = desc->control_bits;
308
309
310 if (debug & DEBUG_LED_ON_TRANSFER)
311 set_led(card, LED_REMOVE, LED_ON);
312
313 desc = &page->desc[page->headcnt];
314 writel(0, card->csr_remap + DMA_PCI_ADDR);
315 writel(0, card->csr_remap + DMA_PCI_ADDR + 4);
316
317 writel(0, card->csr_remap + DMA_LOCAL_ADDR);
318 writel(0, card->csr_remap + DMA_LOCAL_ADDR + 4);
319
320 writel(0, card->csr_remap + DMA_TRANSFER_SIZE);
321 writel(0, card->csr_remap + DMA_TRANSFER_SIZE + 4);
322
323 writel(0, card->csr_remap + DMA_SEMAPHORE_ADDR);
324 writel(0, card->csr_remap + DMA_SEMAPHORE_ADDR + 4);
325
326 offset = ((char*)desc) - ((char*)page->desc);
327 writel(cpu_to_le32((page->page_dma+offset)&0xffffffff),
328 card->csr_remap + DMA_DESCRIPTOR_ADDR);
329 /* Force the value to u64 before shifting otherwise >> 32 is undefined C
330 * and on some ports will do nothing ! */
331 writel(cpu_to_le32(((u64)page->page_dma)>>32),
332 card->csr_remap + DMA_DESCRIPTOR_ADDR + 4);
333
334 /* Go, go, go */
335 writel(cpu_to_le32(DMASCR_GO | DMASCR_CHAIN_EN | pci_cmds),
336 card->csr_remap + DMA_STATUS_CTRL);
337}
338
339static int add_bio(struct cardinfo *card);
340
341static void activate(struct cardinfo *card)
342{
343 /* if No page is Active, and Ready is
344 * not empty, then switch Ready page
345 * to active and start IO.
346 * Then add any bh's that are available to Ready
347 */
348
349 do {
350 while (add_bio(card))
351 ;
352
353 if (card->Active == -1 &&
354 card->mm_pages[card->Ready].cnt > 0) {
355 card->Active = card->Ready;
356 card->Ready = 1-card->Ready;
357 mm_start_io(card);
358 }
359
360 } while (card->Active == -1 && add_bio(card));
361}
362
363static inline void reset_page(struct mm_page *page)
364{
365 page->cnt = 0;
366 page->headcnt = 0;
367 page->bio = NULL;
368 page->biotail = & page->bio;
369}
370
371static void mm_unplug_device(request_queue_t *q)
372{
373 struct cardinfo *card = q->queuedata;
374 unsigned long flags;
375
376 spin_lock_irqsave(&card->lock, flags);
377 if (blk_remove_plug(q))
378 activate(card);
379 spin_unlock_irqrestore(&card->lock, flags);
380}
381
382/*
383 * If there is room on Ready page, take
384 * one bh off list and add it.
385 * return 1 if there was room, else 0.
386 */
387static int add_bio(struct cardinfo *card)
388{
389 struct mm_page *p;
390 struct mm_dma_desc *desc;
391 dma_addr_t dma_handle;
392 int offset;
393 struct bio *bio;
394 int rw;
395 int len;
396
397 bio = card->currentbio;
398 if (!bio && card->bio) {
399 card->currentbio = card->bio;
400 card->bio = card->bio->bi_next;
401 if (card->bio == NULL)
402 card->biotail = &card->bio;
403 card->currentbio->bi_next = NULL;
404 return 1;
405 }
406 if (!bio)
407 return 0;
408
409 rw = bio_rw(bio);
410 if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE)
411 return 0;
412
413 len = bio_iovec(bio)->bv_len;
414 dma_handle = pci_map_page(card->dev,
415 bio_page(bio),
416 bio_offset(bio),
417 len,
418 (rw==READ) ?
419 PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
420
421 p = &card->mm_pages[card->Ready];
422 desc = &p->desc[p->cnt];
423 p->cnt++;
424 if ((p->biotail) != &bio->bi_next) {
425 *(p->biotail) = bio;
426 p->biotail = &(bio->bi_next);
427 bio->bi_next = NULL;
428 }
429
430 desc->data_dma_handle = dma_handle;
431
432 desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle);
433 desc->local_addr= cpu_to_le64(bio->bi_sector << 9);
434 desc->transfer_size = cpu_to_le32(len);
435 offset = ( ((char*)&desc->sem_control_bits) - ((char*)p->desc));
436 desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset));
437 desc->zero1 = desc->zero2 = 0;
438 offset = ( ((char*)(desc+1)) - ((char*)p->desc));
439 desc->next_desc_addr = cpu_to_le64(p->page_dma+offset);
440 desc->control_bits = cpu_to_le32(DMASCR_GO|DMASCR_ERR_INT_EN|
441 DMASCR_PARITY_INT_EN|
442 DMASCR_CHAIN_EN |
443 DMASCR_SEM_EN |
444 pci_cmds);
445 if (rw == WRITE)
446 desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ);
447 desc->sem_control_bits = desc->control_bits;
448
449 bio->bi_sector += (len>>9);
450 bio->bi_size -= len;
451 bio->bi_idx++;
452 if (bio->bi_idx >= bio->bi_vcnt)
453 card->currentbio = NULL;
454
455 return 1;
456}
457
458static void process_page(unsigned long data)
459{
460 /* check if any of the requests in the page are DMA_COMPLETE,
461 * and deal with them appropriately.
462 * If we find a descriptor without DMA_COMPLETE in the semaphore, then
463 * dma must have hit an error on that descriptor, so use dma_status instead
464 * and assume that all following descriptors must be re-tried.
465 */
466 struct mm_page *page;
467 struct bio *return_bio=NULL;
468 struct cardinfo *card = (struct cardinfo *)data;
469 unsigned int dma_status = card->dma_status;
470
471 spin_lock_bh(&card->lock);
472 if (card->Active < 0)
473 goto out_unlock;
474 page = &card->mm_pages[card->Active];
475
476 while (page->headcnt < page->cnt) {
477 struct bio *bio = page->bio;
478 struct mm_dma_desc *desc = &page->desc[page->headcnt];
479 int control = le32_to_cpu(desc->sem_control_bits);
480 int last=0;
481 int idx;
482
483 if (!(control & DMASCR_DMA_COMPLETE)) {
484 control = dma_status;
485 last=1;
486 }
487 page->headcnt++;
488 idx = bio->bi_phys_segments;
489 bio->bi_phys_segments++;
490 if (bio->bi_phys_segments >= bio->bi_vcnt)
491 page->bio = bio->bi_next;
492
493 pci_unmap_page(card->dev, desc->data_dma_handle,
494 bio_iovec_idx(bio,idx)->bv_len,
495 (control& DMASCR_TRANSFER_READ) ?
496 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
497 if (control & DMASCR_HARD_ERROR) {
498 /* error */
499 clear_bit(BIO_UPTODATE, &bio->bi_flags);
500 printk(KERN_WARNING "MM%d: I/O error on sector %d/%d\n",
501 card->card_number,
502 le32_to_cpu(desc->local_addr)>>9,
503 le32_to_cpu(desc->transfer_size));
504 dump_dmastat(card, control);
505 } else if (test_bit(BIO_RW, &bio->bi_rw) &&
506 le32_to_cpu(desc->local_addr)>>9 == card->init_size) {
507 card->init_size += le32_to_cpu(desc->transfer_size)>>9;
508 if (card->init_size>>1 >= card->mm_size) {
509 printk(KERN_INFO "MM%d: memory now initialised\n",
510 card->card_number);
511 set_userbit(card, MEMORY_INITIALIZED, 1);
512 }
513 }
514 if (bio != page->bio) {
515 bio->bi_next = return_bio;
516 return_bio = bio;
517 }
518
519 if (last) break;
520 }
521
522 if (debug & DEBUG_LED_ON_TRANSFER)
523 set_led(card, LED_REMOVE, LED_OFF);
524
525 if (card->check_batteries) {
526 card->check_batteries = 0;
527 check_batteries(card);
528 }
529 if (page->headcnt >= page->cnt) {
530 reset_page(page);
531 card->Active = -1;
532 activate(card);
533 } else {
534 /* haven't finished with this one yet */
535 PRINTK("do some more\n");
536 mm_start_io(card);
537 }
538 out_unlock:
539 spin_unlock_bh(&card->lock);
540
541 while(return_bio) {
542 struct bio *bio = return_bio;
543
544 return_bio = bio->bi_next;
545 bio->bi_next = NULL;
546 bio_endio(bio, bio->bi_size, 0);
547 }
548}
549
550/*
551-----------------------------------------------------------------------------------
552-- mm_make_request
553-----------------------------------------------------------------------------------
554*/
555static int mm_make_request(request_queue_t *q, struct bio *bio)
556{
557 struct cardinfo *card = q->queuedata;
558 PRINTK("mm_make_request %ld %d\n", bh->b_rsector, bh->b_size);
559
560 bio->bi_phys_segments = bio->bi_idx; /* count of completed segments*/
561 spin_lock_irq(&card->lock);
562 *card->biotail = bio;
563 bio->bi_next = NULL;
564 card->biotail = &bio->bi_next;
565 blk_plug_device(q);
566 spin_unlock_irq(&card->lock);
567
568 return 0;
569}
570
571/*
572-----------------------------------------------------------------------------------
573-- mm_interrupt
574-----------------------------------------------------------------------------------
575*/
576static irqreturn_t mm_interrupt(int irq, void *__card, struct pt_regs *regs)
577{
578 struct cardinfo *card = (struct cardinfo *) __card;
579 unsigned int dma_status;
580 unsigned short cfg_status;
581
582HW_TRACE(0x30);
583
584 dma_status = le32_to_cpu(readl(card->csr_remap + DMA_STATUS_CTRL));
585
586 if (!(dma_status & (DMASCR_ERROR_MASK | DMASCR_CHAIN_COMPLETE))) {
587 /* interrupt wasn't for me ... */
588 return IRQ_NONE;
589 }
590
591 /* clear COMPLETION interrupts */
592 if (card->flags & UM_FLAG_NO_BYTE_STATUS)
593 writel(cpu_to_le32(DMASCR_DMA_COMPLETE|DMASCR_CHAIN_COMPLETE),
594 card->csr_remap+ DMA_STATUS_CTRL);
595 else
596 writeb((DMASCR_DMA_COMPLETE|DMASCR_CHAIN_COMPLETE) >> 16,
597 card->csr_remap+ DMA_STATUS_CTRL + 2);
598
599 /* log errors and clear interrupt status */
600 if (dma_status & DMASCR_ANY_ERR) {
601 unsigned int data_log1, data_log2;
602 unsigned int addr_log1, addr_log2;
603 unsigned char stat, count, syndrome, check;
604
605 stat = readb(card->csr_remap + MEMCTRLCMD_ERRSTATUS);
606
607 data_log1 = le32_to_cpu(readl(card->csr_remap + ERROR_DATA_LOG));
608 data_log2 = le32_to_cpu(readl(card->csr_remap + ERROR_DATA_LOG + 4));
609 addr_log1 = le32_to_cpu(readl(card->csr_remap + ERROR_ADDR_LOG));
610 addr_log2 = readb(card->csr_remap + ERROR_ADDR_LOG + 4);
611
612 count = readb(card->csr_remap + ERROR_COUNT);
613 syndrome = readb(card->csr_remap + ERROR_SYNDROME);
614 check = readb(card->csr_remap + ERROR_CHECK);
615
616 dump_dmastat(card, dma_status);
617
618 if (stat & 0x01)
619 printk(KERN_ERR "MM%d*: Memory access error detected (err count %d)\n",
620 card->card_number, count);
621 if (stat & 0x02)
622 printk(KERN_ERR "MM%d*: Multi-bit EDC error\n",
623 card->card_number);
624
625 printk(KERN_ERR "MM%d*: Fault Address 0x%02x%08x, Fault Data 0x%08x%08x\n",
626 card->card_number, addr_log2, addr_log1, data_log2, data_log1);
627 printk(KERN_ERR "MM%d*: Fault Check 0x%02x, Fault Syndrome 0x%02x\n",
628 card->card_number, check, syndrome);
629
630 writeb(0, card->csr_remap + ERROR_COUNT);
631 }
632
633 if (dma_status & DMASCR_PARITY_ERR_REP) {
634 printk(KERN_ERR "MM%d*: PARITY ERROR REPORTED\n", card->card_number);
635 pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
636 pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
637 }
638
639 if (dma_status & DMASCR_PARITY_ERR_DET) {
640 printk(KERN_ERR "MM%d*: PARITY ERROR DETECTED\n", card->card_number);
641 pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
642 pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
643 }
644
645 if (dma_status & DMASCR_SYSTEM_ERR_SIG) {
646 printk(KERN_ERR "MM%d*: SYSTEM ERROR\n", card->card_number);
647 pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
648 pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
649 }
650
651 if (dma_status & DMASCR_TARGET_ABT) {
652 printk(KERN_ERR "MM%d*: TARGET ABORT\n", card->card_number);
653 pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
654 pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
655 }
656
657 if (dma_status & DMASCR_MASTER_ABT) {
658 printk(KERN_ERR "MM%d*: MASTER ABORT\n", card->card_number);
659 pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
660 pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
661 }
662
663 /* and process the DMA descriptors */
664 card->dma_status = dma_status;
665 tasklet_schedule(&card->tasklet);
666
667HW_TRACE(0x36);
668
669 return IRQ_HANDLED;
670}
671/*
672-----------------------------------------------------------------------------------
673-- set_fault_to_battery_status
674-----------------------------------------------------------------------------------
675*/
676/*
677 * If both batteries are good, no LED
678 * If either battery has been warned, solid LED
679 * If both batteries are bad, flash the LED quickly
680 * If either battery is bad, flash the LED semi quickly
681 */
682static void set_fault_to_battery_status(struct cardinfo *card)
683{
684 if (card->battery[0].good && card->battery[1].good)
685 set_led(card, LED_FAULT, LED_OFF);
686 else if (card->battery[0].warned || card->battery[1].warned)
687 set_led(card, LED_FAULT, LED_ON);
688 else if (!card->battery[0].good && !card->battery[1].good)
689 set_led(card, LED_FAULT, LED_FLASH_7_0);
690 else
691 set_led(card, LED_FAULT, LED_FLASH_3_5);
692}
693
694static void init_battery_timer(void);
695
696
697/*
698-----------------------------------------------------------------------------------
699-- check_battery
700-----------------------------------------------------------------------------------
701*/
702static int check_battery(struct cardinfo *card, int battery, int status)
703{
704 if (status != card->battery[battery].good) {
705 card->battery[battery].good = !card->battery[battery].good;
706 card->battery[battery].last_change = jiffies;
707
708 if (card->battery[battery].good) {
709 printk(KERN_ERR "MM%d: Battery %d now good\n",
710 card->card_number, battery + 1);
711 card->battery[battery].warned = 0;
712 } else
713 printk(KERN_ERR "MM%d: Battery %d now FAILED\n",
714 card->card_number, battery + 1);
715
716 return 1;
717 } else if (!card->battery[battery].good &&
718 !card->battery[battery].warned &&
719 time_after_eq(jiffies, card->battery[battery].last_change +
720 (HZ * 60 * 60 * 5))) {
721 printk(KERN_ERR "MM%d: Battery %d still FAILED after 5 hours\n",
722 card->card_number, battery + 1);
723 card->battery[battery].warned = 1;
724
725 return 1;
726 }
727
728 return 0;
729}
730/*
731-----------------------------------------------------------------------------------
732-- check_batteries
733-----------------------------------------------------------------------------------
734*/
735static void check_batteries(struct cardinfo *card)
736{
737 /* NOTE: this must *never* be called while the card
738 * is doing (bus-to-card) DMA, or you will need the
739 * reset switch
740 */
741 unsigned char status;
742 int ret1, ret2;
743
744 status = readb(card->csr_remap + MEMCTRLSTATUS_BATTERY);
745 if (debug & DEBUG_BATTERY_POLLING)
746 printk(KERN_DEBUG "MM%d: checking battery status, 1 = %s, 2 = %s\n",
747 card->card_number,
748 (status & BATTERY_1_FAILURE) ? "FAILURE" : "OK",
749 (status & BATTERY_2_FAILURE) ? "FAILURE" : "OK");
750
751 ret1 = check_battery(card, 0, !(status & BATTERY_1_FAILURE));
752 ret2 = check_battery(card, 1, !(status & BATTERY_2_FAILURE));
753
754 if (ret1 || ret2)
755 set_fault_to_battery_status(card);
756}
757
758static void check_all_batteries(unsigned long ptr)
759{
760 int i;
761
762 for (i = 0; i < num_cards; i++)
763 if (!(cards[i].flags & UM_FLAG_NO_BATT)) {
764 struct cardinfo *card = &cards[i];
765 spin_lock_bh(&card->lock);
766 if (card->Active >= 0)
767 card->check_batteries = 1;
768 else
769 check_batteries(card);
770 spin_unlock_bh(&card->lock);
771 }
772
773 init_battery_timer();
774}
775/*
776-----------------------------------------------------------------------------------
777-- init_battery_timer
778-----------------------------------------------------------------------------------
779*/
780static void init_battery_timer(void)
781{
782 init_timer(&battery_timer);
783 battery_timer.function = check_all_batteries;
784 battery_timer.expires = jiffies + (HZ * 60);
785 add_timer(&battery_timer);
786}
787/*
788-----------------------------------------------------------------------------------
789-- del_battery_timer
790-----------------------------------------------------------------------------------
791*/
792static void del_battery_timer(void)
793{
794 del_timer(&battery_timer);
795}
796/*
797-----------------------------------------------------------------------------------
798-- mm_revalidate
799-----------------------------------------------------------------------------------
800*/
801/*
802 * Note no locks taken out here. In a worst case scenario, we could drop
803 * a chunk of system memory. But that should never happen, since validation
804 * happens at open or mount time, when locks are held.
805 *
806 * That's crap, since doing that while some partitions are opened
807 * or mounted will give you really nasty results.
808 */
809static int mm_revalidate(struct gendisk *disk)
810{
811 struct cardinfo *card = disk->private_data;
812 set_capacity(disk, card->mm_size << 1);
813 return 0;
814}
815/*
816-----------------------------------------------------------------------------------
817-- mm_ioctl
818-----------------------------------------------------------------------------------
819*/
820static int mm_ioctl(struct inode *i, struct file *f, unsigned int cmd, unsigned long arg)
821{
822 if (cmd == HDIO_GETGEO) {
823 struct cardinfo *card = i->i_bdev->bd_disk->private_data;
824 int size = card->mm_size * (1024 / MM_HARDSECT);
825 struct hd_geometry geo;
826 /*
827 * get geometry: we have to fake one... trim the size to a
828 * multiple of 2048 (1M): tell we have 32 sectors, 64 heads,
829 * whatever cylinders.
830 */
831 geo.heads = 64;
832 geo.sectors = 32;
833 geo.start = get_start_sect(i->i_bdev);
834 geo.cylinders = size / (geo.heads * geo.sectors);
835
836 if (copy_to_user((void __user *) arg, &geo, sizeof(geo)))
837 return -EFAULT;
838 return 0;
839 }
840
841 return -EINVAL;
842}
843/*
844-----------------------------------------------------------------------------------
845-- mm_check_change
846-----------------------------------------------------------------------------------
847 Future support for removable devices
848*/
849static int mm_check_change(struct gendisk *disk)
850{
851/* struct cardinfo *dev = disk->private_data; */
852 return 0;
853}
854/*
855-----------------------------------------------------------------------------------
856-- mm_fops
857-----------------------------------------------------------------------------------
858*/
859static struct block_device_operations mm_fops = {
860 .owner = THIS_MODULE,
861 .ioctl = mm_ioctl,
862 .revalidate_disk= mm_revalidate,
863 .media_changed = mm_check_change,
864};
865/*
866-----------------------------------------------------------------------------------
867-- mm_pci_probe
868-----------------------------------------------------------------------------------
869*/
870static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
871{
872 int ret = -ENODEV;
873 struct cardinfo *card = &cards[num_cards];
874 unsigned char mem_present;
875 unsigned char batt_status;
876 unsigned int saved_bar, data;
877 int magic_number;
878
879 if (pci_enable_device(dev) < 0)
880 return -ENODEV;
881
882 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF8);
883 pci_set_master(dev);
884
885 card->dev = dev;
886 card->card_number = num_cards;
887
888 card->csr_base = pci_resource_start(dev, 0);
889 card->csr_len = pci_resource_len(dev, 0);
890#ifdef CONFIG_MM_MAP_MEMORY
891 card->mem_base = pci_resource_start(dev, 1);
892 card->mem_len = pci_resource_len(dev, 1);
893#endif
894
895 printk(KERN_INFO "Micro Memory(tm) controller #%d found at %02x:%02x (PCI Mem Module (Battery Backup))\n",
896 card->card_number, dev->bus->number, dev->devfn);
897
898 if (pci_set_dma_mask(dev, 0xffffffffffffffffLL) &&
899 !pci_set_dma_mask(dev, 0xffffffffLL)) {
900 printk(KERN_WARNING "MM%d: NO suitable DMA found\n",num_cards);
901 return -ENOMEM;
902 }
903 if (!request_mem_region(card->csr_base, card->csr_len, "Micro Memory")) {
904 printk(KERN_ERR "MM%d: Unable to request memory region\n", card->card_number);
905 ret = -ENOMEM;
906
907 goto failed_req_csr;
908 }
909
910 card->csr_remap = ioremap_nocache(card->csr_base, card->csr_len);
911 if (!card->csr_remap) {
912 printk(KERN_ERR "MM%d: Unable to remap memory region\n", card->card_number);
913 ret = -ENOMEM;
914
915 goto failed_remap_csr;
916 }
917
918 printk(KERN_INFO "MM%d: CSR 0x%08lx -> 0x%p (0x%lx)\n", card->card_number,
919 card->csr_base, card->csr_remap, card->csr_len);
920
921#ifdef CONFIG_MM_MAP_MEMORY
922 if (!request_mem_region(card->mem_base, card->mem_len, "Micro Memory")) {
923 printk(KERN_ERR "MM%d: Unable to request memory region\n", card->card_number);
924 ret = -ENOMEM;
925
926 goto failed_req_mem;
927 }
928
929 if (!(card->mem_remap = ioremap(card->mem_base, cards->mem_len))) {
930 printk(KERN_ERR "MM%d: Unable to remap memory region\n", card->card_number);
931 ret = -ENOMEM;
932
933 goto failed_remap_mem;
934 }
935
936 printk(KERN_INFO "MM%d: MEM 0x%8lx -> 0x%8lx (0x%lx)\n", card->card_number,
937 card->mem_base, card->mem_remap, card->mem_len);
938#else
939 printk(KERN_INFO "MM%d: MEM area not remapped (CONFIG_MM_MAP_MEMORY not set)\n",
940 card->card_number);
941#endif
942 switch(card->dev->device) {
943 case 0x5415:
944 card->flags |= UM_FLAG_NO_BYTE_STATUS | UM_FLAG_NO_BATTREG;
945 magic_number = 0x59;
946 break;
947
948 case 0x5425:
949 card->flags |= UM_FLAG_NO_BYTE_STATUS;
950 magic_number = 0x5C;
951 break;
952
953 case 0x6155:
954 card->flags |= UM_FLAG_NO_BYTE_STATUS | UM_FLAG_NO_BATTREG | UM_FLAG_NO_BATT;
955 magic_number = 0x99;
956 break;
957
958 default:
959 magic_number = 0x100;
960 break;
961 }
962
963 if (readb(card->csr_remap + MEMCTRLSTATUS_MAGIC) != magic_number) {
964 printk(KERN_ERR "MM%d: Magic number invalid\n", card->card_number);
965 ret = -ENOMEM;
966 goto failed_magic;
967 }
968
969 card->mm_pages[0].desc = pci_alloc_consistent(card->dev,
970 PAGE_SIZE*2,
971 &card->mm_pages[0].page_dma);
972 card->mm_pages[1].desc = pci_alloc_consistent(card->dev,
973 PAGE_SIZE*2,
974 &card->mm_pages[1].page_dma);
975 if (card->mm_pages[0].desc == NULL ||
976 card->mm_pages[1].desc == NULL) {
977 printk(KERN_ERR "MM%d: alloc failed\n", card->card_number);
978 goto failed_alloc;
979 }
980 reset_page(&card->mm_pages[0]);
981 reset_page(&card->mm_pages[1]);
982 card->Ready = 0; /* page 0 is ready */
983 card->Active = -1; /* no page is active */
984 card->bio = NULL;
985 card->biotail = &card->bio;
986
987 card->queue = blk_alloc_queue(GFP_KERNEL);
988 if (!card->queue)
989 goto failed_alloc;
990
991 blk_queue_make_request(card->queue, mm_make_request);
992 card->queue->queuedata = card;
993 card->queue->unplug_fn = mm_unplug_device;
994
995 tasklet_init(&card->tasklet, process_page, (unsigned long)card);
996
997 card->check_batteries = 0;
998
999 mem_present = readb(card->csr_remap + MEMCTRLSTATUS_MEMORY);
1000 switch (mem_present) {
1001 case MEM_128_MB:
1002 card->mm_size = 1024 * 128;
1003 break;
1004 case MEM_256_MB:
1005 card->mm_size = 1024 * 256;
1006 break;
1007 case MEM_512_MB:
1008 card->mm_size = 1024 * 512;
1009 break;
1010 case MEM_1_GB:
1011 card->mm_size = 1024 * 1024;
1012 break;
1013 case MEM_2_GB:
1014 card->mm_size = 1024 * 2048;
1015 break;
1016 default:
1017 card->mm_size = 0;
1018 break;
1019 }
1020
1021 /* Clear the LED's we control */
1022 set_led(card, LED_REMOVE, LED_OFF);
1023 set_led(card, LED_FAULT, LED_OFF);
1024
1025 batt_status = readb(card->csr_remap + MEMCTRLSTATUS_BATTERY);
1026
1027 card->battery[0].good = !(batt_status & BATTERY_1_FAILURE);
1028 card->battery[1].good = !(batt_status & BATTERY_2_FAILURE);
1029 card->battery[0].last_change = card->battery[1].last_change = jiffies;
1030
1031 if (card->flags & UM_FLAG_NO_BATT)
1032 printk(KERN_INFO "MM%d: Size %d KB\n",
1033 card->card_number, card->mm_size);
1034 else {
1035 printk(KERN_INFO "MM%d: Size %d KB, Battery 1 %s (%s), Battery 2 %s (%s)\n",
1036 card->card_number, card->mm_size,
1037 (batt_status & BATTERY_1_DISABLED ? "Disabled" : "Enabled"),
1038 card->battery[0].good ? "OK" : "FAILURE",
1039 (batt_status & BATTERY_2_DISABLED ? "Disabled" : "Enabled"),
1040 card->battery[1].good ? "OK" : "FAILURE");
1041
1042 set_fault_to_battery_status(card);
1043 }
1044
1045 pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &saved_bar);
1046 data = 0xffffffff;
1047 pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, data);
1048 pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &data);
1049 pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, saved_bar);
1050 data &= 0xfffffff0;
1051 data = ~data;
1052 data += 1;
1053
1054 card->win_size = data;
1055
1056
1057 if (request_irq(dev->irq, mm_interrupt, SA_SHIRQ, "pci-umem", card)) {
1058 printk(KERN_ERR "MM%d: Unable to allocate IRQ\n", card->card_number);
1059 ret = -ENODEV;
1060
1061 goto failed_req_irq;
1062 }
1063
1064 card->irq = dev->irq;
1065 printk(KERN_INFO "MM%d: Window size %d bytes, IRQ %d\n", card->card_number,
1066 card->win_size, card->irq);
1067
1068 spin_lock_init(&card->lock);
1069
1070 pci_set_drvdata(dev, card);
1071
1072 if (pci_write_cmd != 0x0F) /* If not Memory Write & Invalidate */
1073 pci_write_cmd = 0x07; /* then Memory Write command */
1074
1075 if (pci_write_cmd & 0x08) { /* use Memory Write and Invalidate */
1076 unsigned short cfg_command;
1077 pci_read_config_word(dev, PCI_COMMAND, &cfg_command);
1078 cfg_command |= 0x10; /* Memory Write & Invalidate Enable */
1079 pci_write_config_word(dev, PCI_COMMAND, cfg_command);
1080 }
1081 pci_cmds = (pci_read_cmd << 28) | (pci_write_cmd << 24);
1082
1083 num_cards++;
1084
1085 if (!get_userbit(card, MEMORY_INITIALIZED)) {
1086 printk(KERN_INFO "MM%d: memory NOT initialized. Consider over-writing whole device.\n", card->card_number);
1087 card->init_size = 0;
1088 } else {
1089 printk(KERN_INFO "MM%d: memory already initialized\n", card->card_number);
1090 card->init_size = card->mm_size;
1091 }
1092
1093 /* Enable ECC */
1094 writeb(EDC_STORE_CORRECT, card->csr_remap + MEMCTRLCMD_ERRCTRL);
1095
1096 return 0;
1097
1098 failed_req_irq:
1099 failed_alloc:
1100 if (card->mm_pages[0].desc)
1101 pci_free_consistent(card->dev, PAGE_SIZE*2,
1102 card->mm_pages[0].desc,
1103 card->mm_pages[0].page_dma);
1104 if (card->mm_pages[1].desc)
1105 pci_free_consistent(card->dev, PAGE_SIZE*2,
1106 card->mm_pages[1].desc,
1107 card->mm_pages[1].page_dma);
1108 failed_magic:
1109#ifdef CONFIG_MM_MAP_MEMORY
1110 iounmap(card->mem_remap);
1111 failed_remap_mem:
1112 release_mem_region(card->mem_base, card->mem_len);
1113 failed_req_mem:
1114#endif
1115 iounmap(card->csr_remap);
1116 failed_remap_csr:
1117 release_mem_region(card->csr_base, card->csr_len);
1118 failed_req_csr:
1119
1120 return ret;
1121}
1122/*
1123-----------------------------------------------------------------------------------
1124-- mm_pci_remove
1125-----------------------------------------------------------------------------------
1126*/
1127static void mm_pci_remove(struct pci_dev *dev)
1128{
1129 struct cardinfo *card = pci_get_drvdata(dev);
1130
1131 tasklet_kill(&card->tasklet);
1132 iounmap(card->csr_remap);
1133 release_mem_region(card->csr_base, card->csr_len);
1134#ifdef CONFIG_MM_MAP_MEMORY
1135 iounmap(card->mem_remap);
1136 release_mem_region(card->mem_base, card->mem_len);
1137#endif
1138 free_irq(card->irq, card);
1139
1140 if (card->mm_pages[0].desc)
1141 pci_free_consistent(card->dev, PAGE_SIZE*2,
1142 card->mm_pages[0].desc,
1143 card->mm_pages[0].page_dma);
1144 if (card->mm_pages[1].desc)
1145 pci_free_consistent(card->dev, PAGE_SIZE*2,
1146 card->mm_pages[1].desc,
1147 card->mm_pages[1].page_dma);
1148 blk_put_queue(card->queue);
1149}
1150
1151static const struct pci_device_id mm_pci_ids[] = { {
1152 .vendor = PCI_VENDOR_ID_MICRO_MEMORY,
1153 .device = PCI_DEVICE_ID_MICRO_MEMORY_5415CN,
1154 }, {
1155 .vendor = PCI_VENDOR_ID_MICRO_MEMORY,
1156 .device = PCI_DEVICE_ID_MICRO_MEMORY_5425CN,
1157 }, {
1158 .vendor = PCI_VENDOR_ID_MICRO_MEMORY,
1159 .device = PCI_DEVICE_ID_MICRO_MEMORY_6155,
1160 }, {
1161 .vendor = 0x8086,
1162 .device = 0xB555,
1163 .subvendor= 0x1332,
1164 .subdevice= 0x5460,
1165 .class = 0x050000,
1166 .class_mask= 0,
1167 }, { /* end: all zeroes */ }
1168};
1169
1170MODULE_DEVICE_TABLE(pci, mm_pci_ids);
1171
1172static struct pci_driver mm_pci_driver = {
1173 .name = "umem",
1174 .id_table = mm_pci_ids,
1175 .probe = mm_pci_probe,
1176 .remove = mm_pci_remove,
1177};
1178/*
1179-----------------------------------------------------------------------------------
1180-- mm_init
1181-----------------------------------------------------------------------------------
1182*/
1183
1184static int __init mm_init(void)
1185{
1186 int retval, i;
1187 int err;
1188
1189 printk(KERN_INFO DRIVER_VERSION " : " DRIVER_DESC "\n");
1190
1191 retval = pci_module_init(&mm_pci_driver);
1192 if (retval)
1193 return -ENOMEM;
1194
1195 err = major_nr = register_blkdev(0, "umem");
1196 if (err < 0)
1197 return -EIO;
1198
1199 for (i = 0; i < num_cards; i++) {
1200 mm_gendisk[i] = alloc_disk(1 << MM_SHIFT);
1201 if (!mm_gendisk[i])
1202 goto out;
1203 }
1204
1205 for (i = 0; i < num_cards; i++) {
1206 struct gendisk *disk = mm_gendisk[i];
1207 sprintf(disk->disk_name, "umem%c", 'a'+i);
1208 sprintf(disk->devfs_name, "umem/card%d", i);
1209 spin_lock_init(&cards[i].lock);
1210 disk->major = major_nr;
1211 disk->first_minor = i << MM_SHIFT;
1212 disk->fops = &mm_fops;
1213 disk->private_data = &cards[i];
1214 disk->queue = cards[i].queue;
1215 set_capacity(disk, cards[i].mm_size << 1);
1216 add_disk(disk);
1217 }
1218
1219 init_battery_timer();
1220 printk("MM: desc_per_page = %ld\n", DESC_PER_PAGE);
1221/* printk("mm_init: Done. 10-19-01 9:00\n"); */
1222 return 0;
1223
1224out:
1225 unregister_blkdev(major_nr, "umem");
1226 while (i--)
1227 put_disk(mm_gendisk[i]);
1228 return -ENOMEM;
1229}
1230/*
1231-----------------------------------------------------------------------------------
1232-- mm_cleanup
1233-----------------------------------------------------------------------------------
1234*/
1235static void __exit mm_cleanup(void)
1236{
1237 int i;
1238
1239 del_battery_timer();
1240
1241 for (i=0; i < num_cards ; i++) {
1242 del_gendisk(mm_gendisk[i]);
1243 put_disk(mm_gendisk[i]);
1244 }
1245
1246 pci_unregister_driver(&mm_pci_driver);
1247
1248 unregister_blkdev(major_nr, "umem");
1249}
1250
1251module_init(mm_init);
1252module_exit(mm_cleanup);
1253
1254MODULE_AUTHOR(DRIVER_AUTHOR);
1255MODULE_DESCRIPTION(DRIVER_DESC);
1256MODULE_LICENSE("GPL");
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
new file mode 100644
index 000000000000..46e56a25d2c8
--- /dev/null
+++ b/drivers/block/viodasd.c
@@ -0,0 +1,846 @@
1/* -*- linux-c -*-
2 * viodasd.c
3 * Authors: Dave Boutcher <boutcher@us.ibm.com>
4 * Ryan Arnold <ryanarn@us.ibm.com>
5 * Colin Devilbiss <devilbis@us.ibm.com>
6 * Stephen Rothwell <sfr@au1.ibm.com>
7 *
8 * (C) Copyright 2000-2004 IBM Corporation
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * This routine provides access to disk space (termed "DASD" in historical
25 * IBM terms) owned and managed by an OS/400 partition running on the
26 * same box as this Linux partition.
27 *
28 * All disk operations are performed by sending messages back and forth to
29 * the OS/400 partition.
30 */
31#include <linux/major.h>
32#include <linux/fs.h>
33#include <linux/module.h>
34#include <linux/kernel.h>
35#include <linux/blkdev.h>
36#include <linux/genhd.h>
37#include <linux/hdreg.h>
38#include <linux/errno.h>
39#include <linux/init.h>
40#include <linux/string.h>
41#include <linux/dma-mapping.h>
42#include <linux/completion.h>
43#include <linux/device.h>
44#include <linux/kernel.h>
45
46#include <asm/uaccess.h>
47#include <asm/vio.h>
48#include <asm/iSeries/HvTypes.h>
49#include <asm/iSeries/HvLpEvent.h>
50#include <asm/iSeries/HvLpConfig.h>
51#include <asm/iSeries/vio.h>
52
53MODULE_DESCRIPTION("iSeries Virtual DASD");
54MODULE_AUTHOR("Dave Boutcher");
55MODULE_LICENSE("GPL");
56
57/*
58 * We only support 7 partitions per physical disk....so with minor
59 * numbers 0-255 we get a maximum of 32 disks.
60 */
61#define VIOD_GENHD_NAME "iseries/vd"
62#define VIOD_GENHD_DEVFS_NAME "iseries/disc"
63
64#define VIOD_VERS "1.64"
65
66#define VIOD_KERN_WARNING KERN_WARNING "viod: "
67#define VIOD_KERN_INFO KERN_INFO "viod: "
68
69enum {
70 PARTITION_SHIFT = 3,
71 MAX_DISKNO = HVMAXARCHITECTEDVIRTUALDISKS,
72 MAX_DISK_NAME = sizeof(((struct gendisk *)0)->disk_name)
73};
74
75static DEFINE_SPINLOCK(viodasd_spinlock);
76
77#define VIOMAXREQ 16
78#define VIOMAXBLOCKDMA 12
79
80#define DEVICE_NO(cell) ((struct viodasd_device *)(cell) - &viodasd_devices[0])
81
82struct open_data {
83 u64 disk_size;
84 u16 max_disk;
85 u16 cylinders;
86 u16 tracks;
87 u16 sectors;
88 u16 bytes_per_sector;
89};
90
91struct rw_data {
92 u64 offset;
93 struct {
94 u32 token;
95 u32 reserved;
96 u64 len;
97 } dma_info[VIOMAXBLOCKDMA];
98};
99
100struct vioblocklpevent {
101 struct HvLpEvent event;
102 u32 reserved;
103 u16 version;
104 u16 sub_result;
105 u16 disk;
106 u16 flags;
107 union {
108 struct open_data open_data;
109 struct rw_data rw_data;
110 u64 changed;
111 } u;
112};
113
114#define vioblockflags_ro 0x0001
115
116enum vioblocksubtype {
117 vioblockopen = 0x0001,
118 vioblockclose = 0x0002,
119 vioblockread = 0x0003,
120 vioblockwrite = 0x0004,
121 vioblockflush = 0x0005,
122 vioblockcheck = 0x0007
123};
124
125struct viodasd_waitevent {
126 struct completion com;
127 int rc;
128 u16 sub_result;
129 int max_disk; /* open */
130};
131
132static const struct vio_error_entry viodasd_err_table[] = {
133 { 0x0201, EINVAL, "Invalid Range" },
134 { 0x0202, EINVAL, "Invalid Token" },
135 { 0x0203, EIO, "DMA Error" },
136 { 0x0204, EIO, "Use Error" },
137 { 0x0205, EIO, "Release Error" },
138 { 0x0206, EINVAL, "Invalid Disk" },
139 { 0x0207, EBUSY, "Cant Lock" },
140 { 0x0208, EIO, "Already Locked" },
141 { 0x0209, EIO, "Already Unlocked" },
142 { 0x020A, EIO, "Invalid Arg" },
143 { 0x020B, EIO, "Bad IFS File" },
144 { 0x020C, EROFS, "Read Only Device" },
145 { 0x02FF, EIO, "Internal Error" },
146 { 0x0000, 0, NULL },
147};
148
149/*
150 * Figure out the biggest I/O request (in sectors) we can accept
151 */
152#define VIODASD_MAXSECTORS (4096 / 512 * VIOMAXBLOCKDMA)
153
154/*
155 * Number of disk I/O requests we've sent to OS/400
156 */
157static int num_req_outstanding;
158
159/*
160 * This is our internal structure for keeping track of disk devices
161 */
162struct viodasd_device {
163 u16 cylinders;
164 u16 tracks;
165 u16 sectors;
166 u16 bytes_per_sector;
167 u64 size;
168 int read_only;
169 spinlock_t q_lock;
170 struct gendisk *disk;
171 struct device *dev;
172} viodasd_devices[MAX_DISKNO];
173
174/*
175 * External open entry point.
176 */
177static int viodasd_open(struct inode *ino, struct file *fil)
178{
179 struct viodasd_device *d = ino->i_bdev->bd_disk->private_data;
180 HvLpEvent_Rc hvrc;
181 struct viodasd_waitevent we;
182 u16 flags = 0;
183
184 if (d->read_only) {
185 if ((fil != NULL) && (fil->f_mode & FMODE_WRITE))
186 return -EROFS;
187 flags = vioblockflags_ro;
188 }
189
190 init_completion(&we.com);
191
192 /* Send the open event to OS/400 */
193 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
194 HvLpEvent_Type_VirtualIo,
195 viomajorsubtype_blockio | vioblockopen,
196 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
197 viopath_sourceinst(viopath_hostLp),
198 viopath_targetinst(viopath_hostLp),
199 (u64)(unsigned long)&we, VIOVERSION << 16,
200 ((u64)DEVICE_NO(d) << 48) | ((u64)flags << 32),
201 0, 0, 0);
202 if (hvrc != 0) {
203 printk(VIOD_KERN_WARNING "HV open failed %d\n", (int)hvrc);
204 return -EIO;
205 }
206
207 wait_for_completion(&we.com);
208
209 /* Check the return code */
210 if (we.rc != 0) {
211 const struct vio_error_entry *err =
212 vio_lookup_rc(viodasd_err_table, we.sub_result);
213
214 printk(VIOD_KERN_WARNING
215 "bad rc opening disk: %d:0x%04x (%s)\n",
216 (int)we.rc, we.sub_result, err->msg);
217 return -EIO;
218 }
219
220 return 0;
221}
222
223/*
224 * External release entry point.
225 */
226static int viodasd_release(struct inode *ino, struct file *fil)
227{
228 struct viodasd_device *d = ino->i_bdev->bd_disk->private_data;
229 HvLpEvent_Rc hvrc;
230
231 /* Send the event to OS/400. We DON'T expect a response */
232 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
233 HvLpEvent_Type_VirtualIo,
234 viomajorsubtype_blockio | vioblockclose,
235 HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck,
236 viopath_sourceinst(viopath_hostLp),
237 viopath_targetinst(viopath_hostLp),
238 0, VIOVERSION << 16,
239 ((u64)DEVICE_NO(d) << 48) /* | ((u64)flags << 32) */,
240 0, 0, 0);
241 if (hvrc != 0)
242 printk(VIOD_KERN_WARNING "HV close call failed %d\n",
243 (int)hvrc);
244 return 0;
245}
246
247
248/* External ioctl entry point.
249 */
250static int viodasd_ioctl(struct inode *ino, struct file *fil,
251 unsigned int cmd, unsigned long arg)
252{
253 unsigned char sectors;
254 unsigned char heads;
255 unsigned short cylinders;
256 struct hd_geometry *geo;
257 struct gendisk *gendisk;
258 struct viodasd_device *d;
259
260 switch (cmd) {
261 case HDIO_GETGEO:
262 geo = (struct hd_geometry *)arg;
263 if (geo == NULL)
264 return -EINVAL;
265 if (!access_ok(VERIFY_WRITE, geo, sizeof(*geo)))
266 return -EFAULT;
267 gendisk = ino->i_bdev->bd_disk;
268 d = gendisk->private_data;
269 sectors = d->sectors;
270 if (sectors == 0)
271 sectors = 32;
272 heads = d->tracks;
273 if (heads == 0)
274 heads = 64;
275 cylinders = d->cylinders;
276 if (cylinders == 0)
277 cylinders = get_capacity(gendisk) / (sectors * heads);
278 if (__put_user(sectors, &geo->sectors) ||
279 __put_user(heads, &geo->heads) ||
280 __put_user(cylinders, &geo->cylinders) ||
281 __put_user(get_start_sect(ino->i_bdev), &geo->start))
282 return -EFAULT;
283 return 0;
284 }
285
286 return -EINVAL;
287}
288
289/*
290 * Our file operations table
291 */
292static struct block_device_operations viodasd_fops = {
293 .owner = THIS_MODULE,
294 .open = viodasd_open,
295 .release = viodasd_release,
296 .ioctl = viodasd_ioctl,
297};
298
299/*
300 * End a request
301 */
302static void viodasd_end_request(struct request *req, int uptodate,
303 int num_sectors)
304{
305 if (end_that_request_first(req, uptodate, num_sectors))
306 return;
307 add_disk_randomness(req->rq_disk);
308 end_that_request_last(req);
309}
310
311/*
312 * Send an actual I/O request to OS/400
313 */
314static int send_request(struct request *req)
315{
316 u64 start;
317 int direction;
318 int nsg;
319 u16 viocmd;
320 HvLpEvent_Rc hvrc;
321 struct vioblocklpevent *bevent;
322 struct scatterlist sg[VIOMAXBLOCKDMA];
323 int sgindex;
324 int statindex;
325 struct viodasd_device *d;
326 unsigned long flags;
327
328 start = (u64)req->sector << 9;
329
330 if (rq_data_dir(req) == READ) {
331 direction = DMA_FROM_DEVICE;
332 viocmd = viomajorsubtype_blockio | vioblockread;
333 statindex = 0;
334 } else {
335 direction = DMA_TO_DEVICE;
336 viocmd = viomajorsubtype_blockio | vioblockwrite;
337 statindex = 1;
338 }
339
340 d = req->rq_disk->private_data;
341
342 /* Now build the scatter-gather list */
343 nsg = blk_rq_map_sg(req->q, req, sg);
344 nsg = dma_map_sg(d->dev, sg, nsg, direction);
345
346 spin_lock_irqsave(&viodasd_spinlock, flags);
347 num_req_outstanding++;
348
349 /* This optimization handles a single DMA block */
350 if (nsg == 1)
351 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
352 HvLpEvent_Type_VirtualIo, viocmd,
353 HvLpEvent_AckInd_DoAck,
354 HvLpEvent_AckType_ImmediateAck,
355 viopath_sourceinst(viopath_hostLp),
356 viopath_targetinst(viopath_hostLp),
357 (u64)(unsigned long)req, VIOVERSION << 16,
358 ((u64)DEVICE_NO(d) << 48), start,
359 ((u64)sg_dma_address(&sg[0])) << 32,
360 sg_dma_len(&sg[0]));
361 else {
362 bevent = (struct vioblocklpevent *)
363 vio_get_event_buffer(viomajorsubtype_blockio);
364 if (bevent == NULL) {
365 printk(VIOD_KERN_WARNING
366 "error allocating disk event buffer\n");
367 goto error_ret;
368 }
369
370 /*
371 * Now build up the actual request. Note that we store
372 * the pointer to the request in the correlation
373 * token so we can match the response up later
374 */
375 memset(bevent, 0, sizeof(struct vioblocklpevent));
376 bevent->event.xFlags.xValid = 1;
377 bevent->event.xFlags.xFunction = HvLpEvent_Function_Int;
378 bevent->event.xFlags.xAckInd = HvLpEvent_AckInd_DoAck;
379 bevent->event.xFlags.xAckType = HvLpEvent_AckType_ImmediateAck;
380 bevent->event.xType = HvLpEvent_Type_VirtualIo;
381 bevent->event.xSubtype = viocmd;
382 bevent->event.xSourceLp = HvLpConfig_getLpIndex();
383 bevent->event.xTargetLp = viopath_hostLp;
384 bevent->event.xSizeMinus1 =
385 offsetof(struct vioblocklpevent, u.rw_data.dma_info) +
386 (sizeof(bevent->u.rw_data.dma_info[0]) * nsg) - 1;
387 bevent->event.xSourceInstanceId =
388 viopath_sourceinst(viopath_hostLp);
389 bevent->event.xTargetInstanceId =
390 viopath_targetinst(viopath_hostLp);
391 bevent->event.xCorrelationToken = (u64)req;
392 bevent->version = VIOVERSION;
393 bevent->disk = DEVICE_NO(d);
394 bevent->u.rw_data.offset = start;
395
396 /*
397 * Copy just the dma information from the sg list
398 * into the request
399 */
400 for (sgindex = 0; sgindex < nsg; sgindex++) {
401 bevent->u.rw_data.dma_info[sgindex].token =
402 sg_dma_address(&sg[sgindex]);
403 bevent->u.rw_data.dma_info[sgindex].len =
404 sg_dma_len(&sg[sgindex]);
405 }
406
407 /* Send the request */
408 hvrc = HvCallEvent_signalLpEvent(&bevent->event);
409 vio_free_event_buffer(viomajorsubtype_blockio, bevent);
410 }
411
412 if (hvrc != HvLpEvent_Rc_Good) {
413 printk(VIOD_KERN_WARNING
414 "error sending disk event to OS/400 (rc %d)\n",
415 (int)hvrc);
416 goto error_ret;
417 }
418 spin_unlock_irqrestore(&viodasd_spinlock, flags);
419 return 0;
420
421error_ret:
422 num_req_outstanding--;
423 spin_unlock_irqrestore(&viodasd_spinlock, flags);
424 dma_unmap_sg(d->dev, sg, nsg, direction);
425 return -1;
426}
427
428/*
429 * This is the external request processing routine
430 */
431static void do_viodasd_request(request_queue_t *q)
432{
433 struct request *req;
434
435 /*
436 * If we already have the maximum number of requests
437 * outstanding to OS/400 just bail out. We'll come
438 * back later.
439 */
440 while (num_req_outstanding < VIOMAXREQ) {
441 req = elv_next_request(q);
442 if (req == NULL)
443 return;
444 /* dequeue the current request from the queue */
445 blkdev_dequeue_request(req);
446 /* check that request contains a valid command */
447 if (!blk_fs_request(req)) {
448 viodasd_end_request(req, 0, req->hard_nr_sectors);
449 continue;
450 }
451 /* Try sending the request */
452 if (send_request(req) != 0)
453 viodasd_end_request(req, 0, req->hard_nr_sectors);
454 }
455}
456
457/*
458 * Probe a single disk and fill in the viodasd_device structure
459 * for it.
460 */
461static void probe_disk(struct viodasd_device *d)
462{
463 HvLpEvent_Rc hvrc;
464 struct viodasd_waitevent we;
465 int dev_no = DEVICE_NO(d);
466 struct gendisk *g;
467 struct request_queue *q;
468 u16 flags = 0;
469
470retry:
471 init_completion(&we.com);
472
473 /* Send the open event to OS/400 */
474 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
475 HvLpEvent_Type_VirtualIo,
476 viomajorsubtype_blockio | vioblockopen,
477 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
478 viopath_sourceinst(viopath_hostLp),
479 viopath_targetinst(viopath_hostLp),
480 (u64)(unsigned long)&we, VIOVERSION << 16,
481 ((u64)dev_no << 48) | ((u64)flags<< 32),
482 0, 0, 0);
483 if (hvrc != 0) {
484 printk(VIOD_KERN_WARNING "bad rc on HV open %d\n", (int)hvrc);
485 return;
486 }
487
488 wait_for_completion(&we.com);
489
490 if (we.rc != 0) {
491 if (flags != 0)
492 return;
493 /* try again with read only flag set */
494 flags = vioblockflags_ro;
495 goto retry;
496 }
497 if (we.max_disk > (MAX_DISKNO - 1)) {
498 static int warned;
499
500 if (warned == 0) {
501 warned++;
502 printk(VIOD_KERN_INFO
503 "Only examining the first %d "
504 "of %d disks connected\n",
505 MAX_DISKNO, we.max_disk + 1);
506 }
507 }
508
509 /* Send the close event to OS/400. We DON'T expect a response */
510 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
511 HvLpEvent_Type_VirtualIo,
512 viomajorsubtype_blockio | vioblockclose,
513 HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck,
514 viopath_sourceinst(viopath_hostLp),
515 viopath_targetinst(viopath_hostLp),
516 0, VIOVERSION << 16,
517 ((u64)dev_no << 48) | ((u64)flags << 32),
518 0, 0, 0);
519 if (hvrc != 0) {
520 printk(VIOD_KERN_WARNING
521 "bad rc sending event to OS/400 %d\n", (int)hvrc);
522 return;
523 }
524 /* create the request queue for the disk */
525 spin_lock_init(&d->q_lock);
526 q = blk_init_queue(do_viodasd_request, &d->q_lock);
527 if (q == NULL) {
528 printk(VIOD_KERN_WARNING "cannot allocate queue for disk %d\n",
529 dev_no);
530 return;
531 }
532 g = alloc_disk(1 << PARTITION_SHIFT);
533 if (g == NULL) {
534 printk(VIOD_KERN_WARNING
535 "cannot allocate disk structure for disk %d\n",
536 dev_no);
537 blk_cleanup_queue(q);
538 return;
539 }
540
541 d->disk = g;
542 blk_queue_max_hw_segments(q, VIOMAXBLOCKDMA);
543 blk_queue_max_phys_segments(q, VIOMAXBLOCKDMA);
544 blk_queue_max_sectors(q, VIODASD_MAXSECTORS);
545 g->major = VIODASD_MAJOR;
546 g->first_minor = dev_no << PARTITION_SHIFT;
547 if (dev_no >= 26)
548 snprintf(g->disk_name, sizeof(g->disk_name),
549 VIOD_GENHD_NAME "%c%c",
550 'a' + (dev_no / 26) - 1, 'a' + (dev_no % 26));
551 else
552 snprintf(g->disk_name, sizeof(g->disk_name),
553 VIOD_GENHD_NAME "%c", 'a' + (dev_no % 26));
554 snprintf(g->devfs_name, sizeof(g->devfs_name),
555 "%s%d", VIOD_GENHD_DEVFS_NAME, dev_no);
556 g->fops = &viodasd_fops;
557 g->queue = q;
558 g->private_data = d;
559 g->driverfs_dev = d->dev;
560 set_capacity(g, d->size >> 9);
561
562 printk(VIOD_KERN_INFO "disk %d: %lu sectors (%lu MB) "
563 "CHS=%d/%d/%d sector size %d%s\n",
564 dev_no, (unsigned long)(d->size >> 9),
565 (unsigned long)(d->size >> 20),
566 (int)d->cylinders, (int)d->tracks,
567 (int)d->sectors, (int)d->bytes_per_sector,
568 d->read_only ? " (RO)" : "");
569
570 /* register us in the global list */
571 add_disk(g);
572}
573
574/* returns the total number of scatterlist elements converted */
575static int block_event_to_scatterlist(const struct vioblocklpevent *bevent,
576 struct scatterlist *sg, int *total_len)
577{
578 int i, numsg;
579 const struct rw_data *rw_data = &bevent->u.rw_data;
580 static const int offset =
581 offsetof(struct vioblocklpevent, u.rw_data.dma_info);
582 static const int element_size = sizeof(rw_data->dma_info[0]);
583
584 numsg = ((bevent->event.xSizeMinus1 + 1) - offset) / element_size;
585 if (numsg > VIOMAXBLOCKDMA)
586 numsg = VIOMAXBLOCKDMA;
587
588 *total_len = 0;
589 memset(sg, 0, sizeof(sg[0]) * VIOMAXBLOCKDMA);
590
591 for (i = 0; (i < numsg) && (rw_data->dma_info[i].len > 0); ++i) {
592 sg_dma_address(&sg[i]) = rw_data->dma_info[i].token;
593 sg_dma_len(&sg[i]) = rw_data->dma_info[i].len;
594 *total_len += rw_data->dma_info[i].len;
595 }
596 return i;
597}
598
599/*
600 * Restart all queues, starting with the one _after_ the disk given,
601 * thus reducing the chance of starvation of higher numbered disks.
602 */
603static void viodasd_restart_all_queues_starting_from(int first_index)
604{
605 int i;
606
607 for (i = first_index + 1; i < MAX_DISKNO; ++i)
608 if (viodasd_devices[i].disk)
609 blk_run_queue(viodasd_devices[i].disk->queue);
610 for (i = 0; i <= first_index; ++i)
611 if (viodasd_devices[i].disk)
612 blk_run_queue(viodasd_devices[i].disk->queue);
613}
614
615/*
616 * For read and write requests, decrement the number of outstanding requests,
617 * Free the DMA buffers we allocated.
618 */
619static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
620{
621 int num_sg, num_sect, pci_direction, total_len;
622 struct request *req;
623 struct scatterlist sg[VIOMAXBLOCKDMA];
624 struct HvLpEvent *event = &bevent->event;
625 unsigned long irq_flags;
626 struct viodasd_device *d;
627 int error;
628 spinlock_t *qlock;
629
630 num_sg = block_event_to_scatterlist(bevent, sg, &total_len);
631 num_sect = total_len >> 9;
632 if (event->xSubtype == (viomajorsubtype_blockio | vioblockread))
633 pci_direction = DMA_FROM_DEVICE;
634 else
635 pci_direction = DMA_TO_DEVICE;
636 req = (struct request *)bevent->event.xCorrelationToken;
637 d = req->rq_disk->private_data;
638
639 dma_unmap_sg(d->dev, sg, num_sg, pci_direction);
640
641 /*
642 * Since this is running in interrupt mode, we need to make sure
643 * we're not stepping on any global I/O operations
644 */
645 spin_lock_irqsave(&viodasd_spinlock, irq_flags);
646 num_req_outstanding--;
647 spin_unlock_irqrestore(&viodasd_spinlock, irq_flags);
648
649 error = event->xRc != HvLpEvent_Rc_Good;
650 if (error) {
651 const struct vio_error_entry *err;
652 err = vio_lookup_rc(viodasd_err_table, bevent->sub_result);
653 printk(VIOD_KERN_WARNING "read/write error %d:0x%04x (%s)\n",
654 event->xRc, bevent->sub_result, err->msg);
655 num_sect = req->hard_nr_sectors;
656 }
657 qlock = req->q->queue_lock;
658 spin_lock_irqsave(qlock, irq_flags);
659 viodasd_end_request(req, !error, num_sect);
660 spin_unlock_irqrestore(qlock, irq_flags);
661
662 /* Finally, try to get more requests off of this device's queue */
663 viodasd_restart_all_queues_starting_from(DEVICE_NO(d));
664
665 return 0;
666}
667
668/* This routine handles incoming block LP events */
669static void handle_block_event(struct HvLpEvent *event)
670{
671 struct vioblocklpevent *bevent = (struct vioblocklpevent *)event;
672 struct viodasd_waitevent *pwe;
673
674 if (event == NULL)
675 /* Notification that a partition went away! */
676 return;
677 /* First, we should NEVER get an int here...only acks */
678 if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
679 printk(VIOD_KERN_WARNING
680 "Yikes! got an int in viodasd event handler!\n");
681 if (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck) {
682 event->xRc = HvLpEvent_Rc_InvalidSubtype;
683 HvCallEvent_ackLpEvent(event);
684 }
685 }
686
687 switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
688 case vioblockopen:
689 /*
690 * Handle a response to an open request. We get all the
691 * disk information in the response, so update it. The
692 * correlation token contains a pointer to a waitevent
693 * structure that has a completion in it. update the
694 * return code in the waitevent structure and post the
695 * completion to wake up the guy who sent the request
696 */
697 pwe = (struct viodasd_waitevent *)event->xCorrelationToken;
698 pwe->rc = event->xRc;
699 pwe->sub_result = bevent->sub_result;
700 if (event->xRc == HvLpEvent_Rc_Good) {
701 const struct open_data *data = &bevent->u.open_data;
702 struct viodasd_device *device =
703 &viodasd_devices[bevent->disk];
704 device->read_only =
705 bevent->flags & vioblockflags_ro;
706 device->size = data->disk_size;
707 device->cylinders = data->cylinders;
708 device->tracks = data->tracks;
709 device->sectors = data->sectors;
710 device->bytes_per_sector = data->bytes_per_sector;
711 pwe->max_disk = data->max_disk;
712 }
713 complete(&pwe->com);
714 break;
715 case vioblockclose:
716 break;
717 case vioblockread:
718 case vioblockwrite:
719 viodasd_handle_read_write(bevent);
720 break;
721
722 default:
723 printk(VIOD_KERN_WARNING "invalid subtype!");
724 if (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck) {
725 event->xRc = HvLpEvent_Rc_InvalidSubtype;
726 HvCallEvent_ackLpEvent(event);
727 }
728 }
729}
730
731/*
732 * Get the driver to reprobe for more disks.
733 */
734static ssize_t probe_disks(struct device_driver *drv, const char *buf,
735 size_t count)
736{
737 struct viodasd_device *d;
738
739 for (d = viodasd_devices; d < &viodasd_devices[MAX_DISKNO]; d++) {
740 if (d->disk == NULL)
741 probe_disk(d);
742 }
743 return count;
744}
745static DRIVER_ATTR(probe, S_IWUSR, NULL, probe_disks);
746
747static int viodasd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
748{
749 struct viodasd_device *d = &viodasd_devices[vdev->unit_address];
750
751 d->dev = &vdev->dev;
752 probe_disk(d);
753 if (d->disk == NULL)
754 return -ENODEV;
755 return 0;
756}
757
758static int viodasd_remove(struct vio_dev *vdev)
759{
760 struct viodasd_device *d;
761
762 d = &viodasd_devices[vdev->unit_address];
763 if (d->disk) {
764 del_gendisk(d->disk);
765 blk_cleanup_queue(d->disk->queue);
766 put_disk(d->disk);
767 d->disk = NULL;
768 }
769 d->dev = NULL;
770 return 0;
771}
772
773/**
774 * viodasd_device_table: Used by vio.c to match devices that we
775 * support.
776 */
777static struct vio_device_id viodasd_device_table[] __devinitdata = {
778 { "viodasd", "" },
779 { 0, }
780};
781
782MODULE_DEVICE_TABLE(vio, viodasd_device_table);
783static struct vio_driver viodasd_driver = {
784 .name = "viodasd",
785 .id_table = viodasd_device_table,
786 .probe = viodasd_probe,
787 .remove = viodasd_remove
788};
789
790/*
791 * Initialize the whole device driver. Handle module and non-module
792 * versions
793 */
794static int __init viodasd_init(void)
795{
796 int rc;
797
798 /* Try to open to our host lp */
799 if (viopath_hostLp == HvLpIndexInvalid)
800 vio_set_hostlp();
801
802 if (viopath_hostLp == HvLpIndexInvalid) {
803 printk(VIOD_KERN_WARNING "invalid hosting partition\n");
804 return -EIO;
805 }
806
807 printk(VIOD_KERN_INFO "vers " VIOD_VERS ", hosting partition %d\n",
808 viopath_hostLp);
809
810 /* register the block device */
811 if (register_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME)) {
812 printk(VIOD_KERN_WARNING
813 "Unable to get major number %d for %s\n",
814 VIODASD_MAJOR, VIOD_GENHD_NAME);
815 return -EIO;
816 }
817 /* Actually open the path to the hosting partition */
818 if (viopath_open(viopath_hostLp, viomajorsubtype_blockio,
819 VIOMAXREQ + 2)) {
820 printk(VIOD_KERN_WARNING
821 "error opening path to host partition %d\n",
822 viopath_hostLp);
823 unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
824 return -EIO;
825 }
826
827 /* Initialize our request handler */
828 vio_setHandler(viomajorsubtype_blockio, handle_block_event);
829
830 rc = vio_register_driver(&viodasd_driver);
831 if (rc == 0)
832 driver_create_file(&viodasd_driver.driver, &driver_attr_probe);
833 return rc;
834}
835module_init(viodasd_init);
836
837void viodasd_exit(void)
838{
839 driver_remove_file(&viodasd_driver.driver, &driver_attr_probe);
840 vio_unregister_driver(&viodasd_driver);
841 vio_clearHandler(viomajorsubtype_blockio);
842 unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
843 viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2);
844}
845
846module_exit(viodasd_exit);
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
new file mode 100644
index 000000000000..1676033da6c6
--- /dev/null
+++ b/drivers/block/xd.c
@@ -0,0 +1,1112 @@
1/*
2 * This file contains the driver for an XT hard disk controller
3 * (at least the DTC 5150X) for Linux.
4 *
5 * Author: Pat Mackinlay, pat@it.com.au
6 * Date: 29/09/92
7 *
8 * Revised: 01/01/93, ...
9 *
10 * Ref: DTC 5150X Controller Specification (thanks to Kevin Fowler,
11 * kevinf@agora.rain.com)
12 * Also thanks to: Salvador Abreu, Dave Thaler, Risto Kankkunen and
13 * Wim Van Dorst.
14 *
15 * Revised: 04/04/94 by Risto Kankkunen
16 * Moved the detection code from xd_init() to xd_geninit() as it needed
17 * interrupts enabled and Linus didn't want to enable them in that first
18 * phase. xd_geninit() is the place to do these kinds of things anyway,
19 * he says.
20 *
21 * Modularized: 04/10/96 by Todd Fries, tfries@umr.edu
22 *
23 * Revised: 13/12/97 by Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl
24 * Fixed some problems with disk initialization and module initiation.
25 * Added support for manual geometry setting (except Seagate controllers)
26 * in form:
27 * xd_geo=<cyl_xda>,<head_xda>,<sec_xda>[,<cyl_xdb>,<head_xdb>,<sec_xdb>]
28 * Recovered DMA access. Abridged messages. Added support for DTC5051CX,
29 * WD1002-27X & XEBEC controllers. Driver uses now some jumper settings.
30 * Extended ioctl() support.
31 *
32 * Bugfix: 15/02/01, Paul G. - inform queue layer of tiny xd_maxsect.
33 *
34 */
35
36#include <linux/module.h>
37#include <linux/errno.h>
38#include <linux/interrupt.h>
39#include <linux/mm.h>
40#include <linux/fs.h>
41#include <linux/kernel.h>
42#include <linux/timer.h>
43#include <linux/genhd.h>
44#include <linux/hdreg.h>
45#include <linux/ioport.h>
46#include <linux/init.h>
47#include <linux/wait.h>
48#include <linux/blkdev.h>
49#include <linux/blkpg.h>
50
51#include <asm/system.h>
52#include <asm/io.h>
53#include <asm/uaccess.h>
54#include <asm/dma.h>
55
56#include "xd.h"
57
58static void __init do_xd_setup (int *integers);
59#ifdef MODULE
60static int xd[5] = { -1,-1,-1,-1, };
61#endif
62
63#define XD_DONT_USE_DMA 0 /* Initial value. may be overriden using
64 "nodma" module option */
65#define XD_INIT_DISK_DELAY (30*HZ/1000) /* 30 ms delay during disk initialization */
66
67/* Above may need to be increased if a problem with the 2nd drive detection
68 (ST11M controller) or resetting a controller (WD) appears */
69
70static XD_INFO xd_info[XD_MAXDRIVES];
71
72/* If you try this driver and find that your card is not detected by the driver at bootup, you need to add your BIOS
73 signature and details to the following list of signatures. A BIOS signature is a string embedded into the first
74 few bytes of your controller's on-board ROM BIOS. To find out what yours is, use something like MS-DOS's DEBUG
75 command. Run DEBUG, and then you can examine your BIOS signature with:
76
77 d xxxx:0000
78
79 where xxxx is the segment of your controller (like C800 or D000 or something). On the ASCII dump at the right, you should
80 be able to see a string mentioning the manufacturer's copyright etc. Add this string into the table below. The parameters
81 in the table are, in order:
82
83 offset ; this is the offset (in bytes) from the start of your ROM where the signature starts
84 signature ; this is the actual text of the signature
85 xd_?_init_controller ; this is the controller init routine used by your controller
86 xd_?_init_drive ; this is the drive init routine used by your controller
87
88 The controllers directly supported at the moment are: DTC 5150x, WD 1004A27X, ST11M/R and override. If your controller is
89 made by the same manufacturer as one of these, try using the same init routines as they do. If that doesn't work, your
90 best bet is to use the "override" routines. These routines use a "portable" method of getting the disk's geometry, and
91 may work with your card. If none of these seem to work, try sending me some email and I'll see what I can do <grin>.
92
93 NOTE: You can now specify your XT controller's parameters from the command line in the form xd=TYPE,IRQ,IO,DMA. The driver
94 should be able to detect your drive's geometry from this info. (eg: xd=0,5,0x320,3 is the "standard"). */
95
96#include <asm/page.h>
97#define xd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL,get_order(size))
98#define xd_dma_mem_free(addr, size) free_pages(addr, get_order(size))
99static char *xd_dma_buffer;
100
101static XD_SIGNATURE xd_sigs[] __initdata = {
102 { 0x0000,"Override geometry handler",NULL,xd_override_init_drive,"n unknown" }, /* Pat Mackinlay, pat@it.com.au */
103 { 0x0008,"[BXD06 (C) DTC 17-MAY-1985]",xd_dtc_init_controller,xd_dtc5150cx_init_drive," DTC 5150CX" }, /* Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl */
104 { 0x000B,"CRD18A Not an IBM rom. (C) Copyright Data Technology Corp. 05/31/88",xd_dtc_init_controller,xd_dtc_init_drive," DTC 5150X" }, /* Todd Fries, tfries@umr.edu */
105 { 0x000B,"CXD23A Not an IBM ROM (C)Copyright Data Technology Corp 12/03/88",xd_dtc_init_controller,xd_dtc_init_drive," DTC 5150X" }, /* Pat Mackinlay, pat@it.com.au */
106 { 0x0008,"07/15/86(C) Copyright 1986 Western Digital Corp.",xd_wd_init_controller,xd_wd_init_drive," Western Dig. 1002-27X" }, /* Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl */
107 { 0x0008,"06/24/88(C) Copyright 1988 Western Digital Corp.",xd_wd_init_controller,xd_wd_init_drive," Western Dig. WDXT-GEN2" }, /* Dan Newcombe, newcombe@aa.csc.peachnet.edu */
108 { 0x0015,"SEAGATE ST11 BIOS REVISION",xd_seagate_init_controller,xd_seagate_init_drive," Seagate ST11M/R" }, /* Salvador Abreu, spa@fct.unl.pt */
109 { 0x0010,"ST11R BIOS",xd_seagate_init_controller,xd_seagate_init_drive," Seagate ST11M/R" }, /* Risto Kankkunen, risto.kankkunen@cs.helsinki.fi */
110 { 0x0010,"ST11 BIOS v1.7",xd_seagate_init_controller,xd_seagate_init_drive," Seagate ST11R" }, /* Alan Hourihane, alanh@fairlite.demon.co.uk */
111 { 0x1000,"(c)Copyright 1987 SMS",xd_omti_init_controller,xd_omti_init_drive,"n OMTI 5520" }, /* Dirk Melchers, dirk@merlin.nbg.sub.org */
112 { 0x0006,"COPYRIGHT XEBEC (C) 1984",xd_xebec_init_controller,xd_xebec_init_drive," XEBEC" }, /* Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl */
113 { 0x0008,"(C) Copyright 1984 Western Digital Corp", xd_wd_init_controller, xd_wd_init_drive," Western Dig. 1002s-wx2" },
114 { 0x0008,"(C) Copyright 1986 Western Digital Corporation", xd_wd_init_controller, xd_wd_init_drive," 1986 Western Digital" }, /* jfree@sovereign.org */
115};
116
117static unsigned int xd_bases[] __initdata =
118{
119 0xC8000, 0xCA000, 0xCC000,
120 0xCE000, 0xD0000, 0xD2000,
121 0xD4000, 0xD6000, 0xD8000,
122 0xDA000, 0xDC000, 0xDE000,
123 0xE0000
124};
125
126static DEFINE_SPINLOCK(xd_lock);
127
128static struct gendisk *xd_gendisk[2];
129
130static struct block_device_operations xd_fops = {
131 .owner = THIS_MODULE,
132 .ioctl = xd_ioctl,
133};
134static DECLARE_WAIT_QUEUE_HEAD(xd_wait_int);
135static u_char xd_drives, xd_irq = 5, xd_dma = 3, xd_maxsectors;
136static u_char xd_override __initdata = 0, xd_type __initdata = 0;
137static u_short xd_iobase = 0x320;
138static int xd_geo[XD_MAXDRIVES*3] __initdata = { 0, };
139
140static volatile int xdc_busy;
141static struct timer_list xd_watchdog_int;
142
143static volatile u_char xd_error;
144static int nodma = XD_DONT_USE_DMA;
145
146static struct request_queue *xd_queue;
147
148/* xd_init: register the block device number and set up pointer tables */
149static int __init xd_init(void)
150{
151 u_char i,controller;
152 unsigned int address;
153 int err;
154
155#ifdef MODULE
156 {
157 u_char count = 0;
158 for (i = 4; i > 0; i--)
159 if (((xd[i] = xd[i-1]) >= 0) && !count)
160 count = i;
161 if ((xd[0] = count))
162 do_xd_setup(xd);
163 }
164#endif
165
166 init_timer (&xd_watchdog_int); xd_watchdog_int.function = xd_watchdog;
167
168 if (!xd_dma_buffer)
169 xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200);
170 if (!xd_dma_buffer) {
171 printk(KERN_ERR "xd: Out of memory.\n");
172 return -ENOMEM;
173 }
174
175 err = -EBUSY;
176 if (register_blkdev(XT_DISK_MAJOR, "xd"))
177 goto out1;
178
179 err = -ENOMEM;
180 xd_queue = blk_init_queue(do_xd_request, &xd_lock);
181 if (!xd_queue)
182 goto out1a;
183
184 if (xd_detect(&controller,&address)) {
185
186 printk("Detected a%s controller (type %d) at address %06x\n",
187 xd_sigs[controller].name,controller,address);
188 if (!request_region(xd_iobase,4,"xd")) {
189 printk("xd: Ports at 0x%x are not available\n",
190 xd_iobase);
191 goto out2;
192 }
193 if (controller)
194 xd_sigs[controller].init_controller(address);
195 xd_drives = xd_initdrives(xd_sigs[controller].init_drive);
196
197 printk("Detected %d hard drive%s (using IRQ%d & DMA%d)\n",
198 xd_drives,xd_drives == 1 ? "" : "s",xd_irq,xd_dma);
199 }
200
201 err = -ENODEV;
202 if (!xd_drives)
203 goto out3;
204
205 for (i = 0; i < xd_drives; i++) {
206 XD_INFO *p = &xd_info[i];
207 struct gendisk *disk = alloc_disk(64);
208 if (!disk)
209 goto Enomem;
210 p->unit = i;
211 disk->major = XT_DISK_MAJOR;
212 disk->first_minor = i<<6;
213 sprintf(disk->disk_name, "xd%c", i+'a');
214 sprintf(disk->devfs_name, "xd/target%d", i);
215 disk->fops = &xd_fops;
216 disk->private_data = p;
217 disk->queue = xd_queue;
218 set_capacity(disk, p->heads * p->cylinders * p->sectors);
219 printk(" %s: CHS=%d/%d/%d\n", disk->disk_name,
220 p->cylinders, p->heads, p->sectors);
221 xd_gendisk[i] = disk;
222 }
223
224 err = -EBUSY;
225 if (request_irq(xd_irq,xd_interrupt_handler, 0, "XT hard disk", NULL)) {
226 printk("xd: unable to get IRQ%d\n",xd_irq);
227 goto out4;
228 }
229
230 if (request_dma(xd_dma,"xd")) {
231 printk("xd: unable to get DMA%d\n",xd_dma);
232 goto out5;
233 }
234
235 /* xd_maxsectors depends on controller - so set after detection */
236 blk_queue_max_sectors(xd_queue, xd_maxsectors);
237
238 for (i = 0; i < xd_drives; i++)
239 add_disk(xd_gendisk[i]);
240
241 return 0;
242
243out5:
244 free_irq(xd_irq, NULL);
245out4:
246 for (i = 0; i < xd_drives; i++)
247 put_disk(xd_gendisk[i]);
248out3:
249 release_region(xd_iobase,4);
250out2:
251 blk_cleanup_queue(xd_queue);
252out1a:
253 unregister_blkdev(XT_DISK_MAJOR, "xd");
254out1:
255 if (xd_dma_buffer)
256 xd_dma_mem_free((unsigned long)xd_dma_buffer,
257 xd_maxsectors * 0x200);
258 return err;
259Enomem:
260 err = -ENOMEM;
261 while (i--)
262 put_disk(xd_gendisk[i]);
263 goto out3;
264}
265
266/* xd_detect: scan the possible BIOS ROM locations for the signature strings */
267static u_char __init xd_detect (u_char *controller, unsigned int *address)
268{
269 int i, j;
270
271 if (xd_override)
272 {
273 *controller = xd_type;
274 *address = 0;
275 return(1);
276 }
277
278 for (i = 0; i < (sizeof(xd_bases) / sizeof(xd_bases[0])); i++) {
279 void __iomem *p = ioremap(xd_bases[i], 0x2000);
280 if (!p)
281 continue;
282 for (j = 1; j < (sizeof(xd_sigs) / sizeof(xd_sigs[0])); j++) {
283 const char *s = xd_sigs[j].string;
284 if (check_signature(p + xd_sigs[j].offset, s, strlen(s))) {
285 *controller = j;
286 xd_type = j;
287 *address = xd_bases[i];
288 iounmap(p);
289 return 1;
290 }
291 }
292 iounmap(p);
293 }
294 return 0;
295}
296
297/* do_xd_request: handle an incoming request */
298static void do_xd_request (request_queue_t * q)
299{
300 struct request *req;
301
302 if (xdc_busy)
303 return;
304
305 while ((req = elv_next_request(q)) != NULL) {
306 unsigned block = req->sector;
307 unsigned count = req->nr_sectors;
308 int rw = rq_data_dir(req);
309 XD_INFO *disk = req->rq_disk->private_data;
310 int res = 0;
311 int retry;
312
313 if (!(req->flags & REQ_CMD)) {
314 end_request(req, 0);
315 continue;
316 }
317 if (block + count > get_capacity(req->rq_disk)) {
318 end_request(req, 0);
319 continue;
320 }
321 if (rw != READ && rw != WRITE) {
322 printk("do_xd_request: unknown request\n");
323 end_request(req, 0);
324 continue;
325 }
326 for (retry = 0; (retry < XD_RETRIES) && !res; retry++)
327 res = xd_readwrite(rw, disk, req->buffer, block, count);
328 end_request(req, res); /* wrap up, 0 = fail, 1 = success */
329 }
330}
331
332/* xd_ioctl: handle device ioctl's */
333static int xd_ioctl (struct inode *inode,struct file *file,u_int cmd,u_long arg)
334{
335 XD_INFO *p = inode->i_bdev->bd_disk->private_data;
336
337 switch (cmd) {
338 case HDIO_GETGEO:
339 {
340 struct hd_geometry g;
341 struct hd_geometry __user *geom= (void __user *)arg;
342 g.heads = p->heads;
343 g.sectors = p->sectors;
344 g.cylinders = p->cylinders;
345 g.start = get_start_sect(inode->i_bdev);
346 return copy_to_user(geom, &g, sizeof(g)) ? -EFAULT : 0;
347 }
348 case HDIO_SET_DMA:
349 if (!capable(CAP_SYS_ADMIN)) return -EACCES;
350 if (xdc_busy) return -EBUSY;
351 nodma = !arg;
352 if (nodma && xd_dma_buffer) {
353 xd_dma_mem_free((unsigned long)xd_dma_buffer,
354 xd_maxsectors * 0x200);
355 xd_dma_buffer = NULL;
356 } else if (!nodma && !xd_dma_buffer) {
357 xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200);
358 if (!xd_dma_buffer) {
359 nodma = XD_DONT_USE_DMA;
360 return -ENOMEM;
361 }
362 }
363 return 0;
364 case HDIO_GET_DMA:
365 return put_user(!nodma, (long __user *) arg);
366 case HDIO_GET_MULTCOUNT:
367 return put_user(xd_maxsectors, (long __user *) arg);
368 default:
369 return -EINVAL;
370 }
371}
372
373/* xd_readwrite: handle a read/write request */
374static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_int count)
375{
376 int drive = p->unit;
377 u_char cmdblk[6],sense[4];
378 u_short track,cylinder;
379 u_char head,sector,control,mode = PIO_MODE,temp;
380 char **real_buffer;
381 register int i;
382
383#ifdef DEBUG_READWRITE
384 printk("xd_readwrite: operation = %s, drive = %d, buffer = 0x%X, block = %d, count = %d\n",operation == READ ? "read" : "write",drive,buffer,block,count);
385#endif /* DEBUG_READWRITE */
386
387 spin_unlock_irq(&xd_lock);
388
389 control = p->control;
390 if (!xd_dma_buffer)
391 xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200);
392 while (count) {
393 temp = count < xd_maxsectors ? count : xd_maxsectors;
394
395 track = block / p->sectors;
396 head = track % p->heads;
397 cylinder = track / p->heads;
398 sector = block % p->sectors;
399
400#ifdef DEBUG_READWRITE
401 printk("xd_readwrite: drive = %d, head = %d, cylinder = %d, sector = %d, count = %d\n",drive,head,cylinder,sector,temp);
402#endif /* DEBUG_READWRITE */
403
404 if (xd_dma_buffer) {
405 mode = xd_setup_dma(operation == READ ? DMA_MODE_READ : DMA_MODE_WRITE,(u_char *)(xd_dma_buffer),temp * 0x200);
406 real_buffer = &xd_dma_buffer;
407 for (i=0; i < (temp * 0x200); i++)
408 xd_dma_buffer[i] = buffer[i];
409 }
410 else
411 real_buffer = &buffer;
412
413 xd_build(cmdblk,operation == READ ? CMD_READ : CMD_WRITE,drive,head,cylinder,sector,temp & 0xFF,control);
414
415 switch (xd_command(cmdblk,mode,(u_char *)(*real_buffer),(u_char *)(*real_buffer),sense,XD_TIMEOUT)) {
416 case 1:
417 printk("xd%c: %s timeout, recalibrating drive\n",'a'+drive,(operation == READ ? "read" : "write"));
418 xd_recalibrate(drive);
419 spin_lock_irq(&xd_lock);
420 return (0);
421 case 2:
422 if (sense[0] & 0x30) {
423 printk("xd%c: %s - ",'a'+drive,(operation == READ ? "reading" : "writing"));
424 switch ((sense[0] & 0x30) >> 4) {
425 case 0: printk("drive error, code = 0x%X",sense[0] & 0x0F);
426 break;
427 case 1: printk("controller error, code = 0x%X",sense[0] & 0x0F);
428 break;
429 case 2: printk("command error, code = 0x%X",sense[0] & 0x0F);
430 break;
431 case 3: printk("miscellaneous error, code = 0x%X",sense[0] & 0x0F);
432 break;
433 }
434 }
435 if (sense[0] & 0x80)
436 printk(" - CHS = %d/%d/%d\n",((sense[2] & 0xC0) << 2) | sense[3],sense[1] & 0x1F,sense[2] & 0x3F);
437 /* reported drive number = (sense[1] & 0xE0) >> 5 */
438 else
439 printk(" - no valid disk address\n");
440 spin_lock_irq(&xd_lock);
441 return (0);
442 }
443 if (xd_dma_buffer)
444 for (i=0; i < (temp * 0x200); i++)
445 buffer[i] = xd_dma_buffer[i];
446
447 count -= temp, buffer += temp * 0x200, block += temp;
448 }
449 spin_lock_irq(&xd_lock);
450 return (1);
451}
452
453/* xd_recalibrate: recalibrate a given drive and reset controller if necessary */
454static void xd_recalibrate (u_char drive)
455{
456 u_char cmdblk[6];
457
458 xd_build(cmdblk,CMD_RECALIBRATE,drive,0,0,0,0,0);
459 if (xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT * 8))
460 printk("xd%c: warning! error recalibrating, controller may be unstable\n", 'a'+drive);
461}
462
463/* xd_interrupt_handler: interrupt service routine */
464static irqreturn_t xd_interrupt_handler(int irq, void *dev_id,
465 struct pt_regs *regs)
466{
467 if (inb(XD_STATUS) & STAT_INTERRUPT) { /* check if it was our device */
468#ifdef DEBUG_OTHER
469 printk("xd_interrupt_handler: interrupt detected\n");
470#endif /* DEBUG_OTHER */
471 outb(0,XD_CONTROL); /* acknowledge interrupt */
472 wake_up(&xd_wait_int); /* and wake up sleeping processes */
473 return IRQ_HANDLED;
474 }
475 else
476 printk("xd: unexpected interrupt\n");
477 return IRQ_NONE;
478}
479
480/* xd_setup_dma: set up the DMA controller for a data transfer */
481static u_char xd_setup_dma (u_char mode,u_char *buffer,u_int count)
482{
483 unsigned long f;
484
485 if (nodma)
486 return (PIO_MODE);
487 if (((unsigned long) buffer & 0xFFFF0000) != (((unsigned long) buffer + count) & 0xFFFF0000)) {
488#ifdef DEBUG_OTHER
489 printk("xd_setup_dma: using PIO, transfer overlaps 64k boundary\n");
490#endif /* DEBUG_OTHER */
491 return (PIO_MODE);
492 }
493
494 f=claim_dma_lock();
495 disable_dma(xd_dma);
496 clear_dma_ff(xd_dma);
497 set_dma_mode(xd_dma,mode);
498 set_dma_addr(xd_dma, (unsigned long) buffer);
499 set_dma_count(xd_dma,count);
500
501 release_dma_lock(f);
502
503 return (DMA_MODE); /* use DMA and INT */
504}
505
506/* xd_build: put stuff into an array in a format suitable for the controller */
507static u_char *xd_build (u_char *cmdblk,u_char command,u_char drive,u_char head,u_short cylinder,u_char sector,u_char count,u_char control)
508{
509 cmdblk[0] = command;
510 cmdblk[1] = ((drive & 0x07) << 5) | (head & 0x1F);
511 cmdblk[2] = ((cylinder & 0x300) >> 2) | (sector & 0x3F);
512 cmdblk[3] = cylinder & 0xFF;
513 cmdblk[4] = count;
514 cmdblk[5] = control;
515
516 return (cmdblk);
517}
518
519static void xd_watchdog (unsigned long unused)
520{
521 xd_error = 1;
522 wake_up(&xd_wait_int);
523}
524
525/* xd_waitport: waits until port & mask == flags or a timeout occurs. return 1 for a timeout */
526static inline u_char xd_waitport (u_short port,u_char flags,u_char mask,u_long timeout)
527{
528 u_long expiry = jiffies + timeout;
529 int success;
530
531 xdc_busy = 1;
532 while ((success = ((inb(port) & mask) != flags)) && time_before(jiffies, expiry)) {
533 set_current_state(TASK_UNINTERRUPTIBLE);
534 schedule_timeout(1);
535 }
536 xdc_busy = 0;
537 return (success);
538}
539
540static inline u_int xd_wait_for_IRQ (void)
541{
542 unsigned long flags;
543 xd_watchdog_int.expires = jiffies + 8 * HZ;
544 add_timer(&xd_watchdog_int);
545
546 flags=claim_dma_lock();
547 enable_dma(xd_dma);
548 release_dma_lock(flags);
549
550 sleep_on(&xd_wait_int);
551 del_timer(&xd_watchdog_int);
552 xdc_busy = 0;
553
554 flags=claim_dma_lock();
555 disable_dma(xd_dma);
556 release_dma_lock(flags);
557
558 if (xd_error) {
559 printk("xd: missed IRQ - command aborted\n");
560 xd_error = 0;
561 return (1);
562 }
563 return (0);
564}
565
566/* xd_command: handle all data transfers necessary for a single command */
567static u_int xd_command (u_char *command,u_char mode,u_char *indata,u_char *outdata,u_char *sense,u_long timeout)
568{
569 u_char cmdblk[6],csb,complete = 0;
570
571#ifdef DEBUG_COMMAND
572 printk("xd_command: command = 0x%X, mode = 0x%X, indata = 0x%X, outdata = 0x%X, sense = 0x%X\n",command,mode,indata,outdata,sense);
573#endif /* DEBUG_COMMAND */
574
575 outb(0,XD_SELECT);
576 outb(mode,XD_CONTROL);
577
578 if (xd_waitport(XD_STATUS,STAT_SELECT,STAT_SELECT,timeout))
579 return (1);
580
581 while (!complete) {
582 if (xd_waitport(XD_STATUS,STAT_READY,STAT_READY,timeout))
583 return (1);
584
585 switch (inb(XD_STATUS) & (STAT_COMMAND | STAT_INPUT)) {
586 case 0:
587 if (mode == DMA_MODE) {
588 if (xd_wait_for_IRQ())
589 return (1);
590 } else
591 outb(outdata ? *outdata++ : 0,XD_DATA);
592 break;
593 case STAT_INPUT:
594 if (mode == DMA_MODE) {
595 if (xd_wait_for_IRQ())
596 return (1);
597 } else
598 if (indata)
599 *indata++ = inb(XD_DATA);
600 else
601 inb(XD_DATA);
602 break;
603 case STAT_COMMAND:
604 outb(command ? *command++ : 0,XD_DATA);
605 break;
606 case STAT_COMMAND | STAT_INPUT:
607 complete = 1;
608 break;
609 }
610 }
611 csb = inb(XD_DATA);
612
613 if (xd_waitport(XD_STATUS,0,STAT_SELECT,timeout)) /* wait until deselected */
614 return (1);
615
616 if (csb & CSB_ERROR) { /* read sense data if error */
617 xd_build(cmdblk,CMD_SENSE,(csb & CSB_LUN) >> 5,0,0,0,0,0);
618 if (xd_command(cmdblk,0,sense,NULL,NULL,XD_TIMEOUT))
619 printk("xd: warning! sense command failed!\n");
620 }
621
622#ifdef DEBUG_COMMAND
623 printk("xd_command: completed with csb = 0x%X\n",csb);
624#endif /* DEBUG_COMMAND */
625
626 return (csb & CSB_ERROR);
627}
628
629static u_char __init xd_initdrives (void (*init_drive)(u_char drive))
630{
631 u_char cmdblk[6],i,count = 0;
632
633 for (i = 0; i < XD_MAXDRIVES; i++) {
634 xd_build(cmdblk,CMD_TESTREADY,i,0,0,0,0,0);
635 if (!xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT*8)) {
636 set_current_state(TASK_INTERRUPTIBLE);
637 schedule_timeout(XD_INIT_DISK_DELAY);
638
639 init_drive(count);
640 count++;
641
642 set_current_state(TASK_INTERRUPTIBLE);
643 schedule_timeout(XD_INIT_DISK_DELAY);
644 }
645 }
646 return (count);
647}
648
649static void __init xd_manual_geo_set (u_char drive)
650{
651 xd_info[drive].heads = (u_char)(xd_geo[3 * drive + 1]);
652 xd_info[drive].cylinders = (u_short)(xd_geo[3 * drive]);
653 xd_info[drive].sectors = (u_char)(xd_geo[3 * drive + 2]);
654}
655
656static void __init xd_dtc_init_controller (unsigned int address)
657{
658 switch (address) {
659 case 0x00000:
660 case 0xC8000: break; /*initial: 0x320 */
661 case 0xCA000: xd_iobase = 0x324;
662 case 0xD0000: /*5150CX*/
663 case 0xD8000: break; /*5150CX & 5150XL*/
664 default: printk("xd_dtc_init_controller: unsupported BIOS address %06x\n",address);
665 break;
666 }
667 xd_maxsectors = 0x01; /* my card seems to have trouble doing multi-block transfers? */
668
669 outb(0,XD_RESET); /* reset the controller */
670}
671
672
673static void __init xd_dtc5150cx_init_drive (u_char drive)
674{
675 /* values from controller's BIOS - BIOS chip may be removed */
676 static u_short geometry_table[][4] = {
677 {0x200,8,0x200,0x100},
678 {0x267,2,0x267,0x267},
679 {0x264,4,0x264,0x80},
680 {0x132,4,0x132,0x0},
681 {0x132,2,0x80, 0x132},
682 {0x177,8,0x177,0x0},
683 {0x132,8,0x84, 0x0},
684 {}, /* not used */
685 {0x132,6,0x80, 0x100},
686 {0x200,6,0x100,0x100},
687 {0x264,2,0x264,0x80},
688 {0x280,4,0x280,0x100},
689 {0x2B9,3,0x2B9,0x2B9},
690 {0x2B9,5,0x2B9,0x2B9},
691 {0x280,6,0x280,0x100},
692 {0x132,4,0x132,0x0}};
693 u_char n;
694
695 n = inb(XD_JUMPER);
696 n = (drive ? n : (n >> 2)) & 0x33;
697 n = (n | (n >> 2)) & 0x0F;
698 if (xd_geo[3*drive])
699 xd_manual_geo_set(drive);
700 else
701 if (n != 7) {
702 xd_info[drive].heads = (u_char)(geometry_table[n][1]); /* heads */
703 xd_info[drive].cylinders = geometry_table[n][0]; /* cylinders */
704 xd_info[drive].sectors = 17; /* sectors */
705#if 0
706 xd_info[drive].rwrite = geometry_table[n][2]; /* reduced write */
707 xd_info[drive].precomp = geometry_table[n][3] /* write precomp */
708 xd_info[drive].ecc = 0x0B; /* ecc length */
709#endif /* 0 */
710 }
711 else {
712 printk("xd%c: undetermined drive geometry\n",'a'+drive);
713 return;
714 }
715 xd_info[drive].control = 5; /* control byte */
716 xd_setparam(CMD_DTCSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,geometry_table[n][2],geometry_table[n][3],0x0B);
717 xd_recalibrate(drive);
718}
719
720static void __init xd_dtc_init_drive (u_char drive)
721{
722 u_char cmdblk[6],buf[64];
723
724 xd_build(cmdblk,CMD_DTCGETGEOM,drive,0,0,0,0,0);
725 if (!xd_command(cmdblk,PIO_MODE,buf,NULL,NULL,XD_TIMEOUT * 2)) {
726 xd_info[drive].heads = buf[0x0A]; /* heads */
727 xd_info[drive].cylinders = ((u_short *) (buf))[0x04]; /* cylinders */
728 xd_info[drive].sectors = 17; /* sectors */
729 if (xd_geo[3*drive])
730 xd_manual_geo_set(drive);
731#if 0
732 xd_info[drive].rwrite = ((u_short *) (buf + 1))[0x05]; /* reduced write */
733 xd_info[drive].precomp = ((u_short *) (buf + 1))[0x06]; /* write precomp */
734 xd_info[drive].ecc = buf[0x0F]; /* ecc length */
735#endif /* 0 */
736 xd_info[drive].control = 0; /* control byte */
737
738 xd_setparam(CMD_DTCSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,((u_short *) (buf + 1))[0x05],((u_short *) (buf + 1))[0x06],buf[0x0F]);
739 xd_build(cmdblk,CMD_DTCSETSTEP,drive,0,0,0,0,7);
740 if (xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT * 2))
741 printk("xd_dtc_init_drive: error setting step rate for xd%c\n", 'a'+drive);
742 }
743 else
744 printk("xd_dtc_init_drive: error reading geometry for xd%c\n", 'a'+drive);
745}
746
747static void __init xd_wd_init_controller (unsigned int address)
748{
749 switch (address) {
750 case 0x00000:
751 case 0xC8000: break; /*initial: 0x320 */
752 case 0xCA000: xd_iobase = 0x324; break;
753 case 0xCC000: xd_iobase = 0x328; break;
754 case 0xCE000: xd_iobase = 0x32C; break;
755 case 0xD0000: xd_iobase = 0x328; break; /* ? */
756 case 0xD8000: xd_iobase = 0x32C; break; /* ? */
757 default: printk("xd_wd_init_controller: unsupported BIOS address %06x\n",address);
758 break;
759 }
760 xd_maxsectors = 0x01; /* this one doesn't wrap properly either... */
761
762 outb(0,XD_RESET); /* reset the controller */
763
764 set_current_state(TASK_UNINTERRUPTIBLE);
765 schedule_timeout(XD_INIT_DISK_DELAY);
766}
767
768static void __init xd_wd_init_drive (u_char drive)
769{
770 /* values from controller's BIOS - BIOS may be disabled */
771 static u_short geometry_table[][4] = {
772 {0x264,4,0x1C2,0x1C2}, /* common part */
773 {0x132,4,0x099,0x0},
774 {0x267,2,0x1C2,0x1C2},
775 {0x267,4,0x1C2,0x1C2},
776
777 {0x334,6,0x335,0x335}, /* 1004 series RLL */
778 {0x30E,4,0x30F,0x3DC},
779 {0x30E,2,0x30F,0x30F},
780 {0x267,4,0x268,0x268},
781
782 {0x3D5,5,0x3D6,0x3D6}, /* 1002 series RLL */
783 {0x3DB,7,0x3DC,0x3DC},
784 {0x264,4,0x265,0x265},
785 {0x267,4,0x268,0x268}};
786
787 u_char cmdblk[6],buf[0x200];
788 u_char n = 0,rll,jumper_state,use_jumper_geo;
789 u_char wd_1002 = (xd_sigs[xd_type].string[7] == '6');
790
791 jumper_state = ~(inb(0x322));
792 if (jumper_state & 0x40)
793 xd_irq = 9;
794 rll = (jumper_state & 0x30) ? (0x04 << wd_1002) : 0;
795 xd_build(cmdblk,CMD_READ,drive,0,0,0,1,0);
796 if (!xd_command(cmdblk,PIO_MODE,buf,NULL,NULL,XD_TIMEOUT * 2)) {
797 xd_info[drive].heads = buf[0x1AF]; /* heads */
798 xd_info[drive].cylinders = ((u_short *) (buf + 1))[0xD6]; /* cylinders */
799 xd_info[drive].sectors = 17; /* sectors */
800 if (xd_geo[3*drive])
801 xd_manual_geo_set(drive);
802#if 0
803 xd_info[drive].rwrite = ((u_short *) (buf))[0xD8]; /* reduced write */
804 xd_info[drive].wprecomp = ((u_short *) (buf))[0xDA]; /* write precomp */
805 xd_info[drive].ecc = buf[0x1B4]; /* ecc length */
806#endif /* 0 */
807 xd_info[drive].control = buf[0x1B5]; /* control byte */
808 use_jumper_geo = !(xd_info[drive].heads) || !(xd_info[drive].cylinders);
809 if (xd_geo[3*drive]) {
810 xd_manual_geo_set(drive);
811 xd_info[drive].control = rll ? 7 : 5;
812 }
813 else if (use_jumper_geo) {
814 n = (((jumper_state & 0x0F) >> (drive << 1)) & 0x03) | rll;
815 xd_info[drive].cylinders = geometry_table[n][0];
816 xd_info[drive].heads = (u_char)(geometry_table[n][1]);
817 xd_info[drive].control = rll ? 7 : 5;
818#if 0
819 xd_info[drive].rwrite = geometry_table[n][2];
820 xd_info[drive].wprecomp = geometry_table[n][3];
821 xd_info[drive].ecc = 0x0B;
822#endif /* 0 */
823 }
824 if (!wd_1002) {
825 if (use_jumper_geo)
826 xd_setparam(CMD_WDSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,
827 geometry_table[n][2],geometry_table[n][3],0x0B);
828 else
829 xd_setparam(CMD_WDSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,
830 ((u_short *) (buf))[0xD8],((u_short *) (buf))[0xDA],buf[0x1B4]);
831 }
832 /* 1002 based RLL controller requests converted addressing, but reports physical
833 (physical 26 sec., logical 17 sec.)
834 1004 based ???? */
835 if (rll & wd_1002) {
836 if ((xd_info[drive].cylinders *= 26,
837 xd_info[drive].cylinders /= 17) > 1023)
838 xd_info[drive].cylinders = 1023; /* 1024 ? */
839#if 0
840 xd_info[drive].rwrite *= 26;
841 xd_info[drive].rwrite /= 17;
842 xd_info[drive].wprecomp *= 26
843 xd_info[drive].wprecomp /= 17;
844#endif /* 0 */
845 }
846 }
847 else
848 printk("xd_wd_init_drive: error reading geometry for xd%c\n",'a'+drive);
849
850}
851
852static void __init xd_seagate_init_controller (unsigned int address)
853{
854 switch (address) {
855 case 0x00000:
856 case 0xC8000: break; /*initial: 0x320 */
857 case 0xD0000: xd_iobase = 0x324; break;
858 case 0xD8000: xd_iobase = 0x328; break;
859 case 0xE0000: xd_iobase = 0x32C; break;
860 default: printk("xd_seagate_init_controller: unsupported BIOS address %06x\n",address);
861 break;
862 }
863 xd_maxsectors = 0x40;
864
865 outb(0,XD_RESET); /* reset the controller */
866}
867
868static void __init xd_seagate_init_drive (u_char drive)
869{
870 u_char cmdblk[6],buf[0x200];
871
872 xd_build(cmdblk,CMD_ST11GETGEOM,drive,0,0,0,1,0);
873 if (!xd_command(cmdblk,PIO_MODE,buf,NULL,NULL,XD_TIMEOUT * 2)) {
874 xd_info[drive].heads = buf[0x04]; /* heads */
875 xd_info[drive].cylinders = (buf[0x02] << 8) | buf[0x03]; /* cylinders */
876 xd_info[drive].sectors = buf[0x05]; /* sectors */
877 xd_info[drive].control = 0; /* control byte */
878 }
879 else
880 printk("xd_seagate_init_drive: error reading geometry from xd%c\n", 'a'+drive);
881}
882
883/* Omti support courtesy Dirk Melchers */
884static void __init xd_omti_init_controller (unsigned int address)
885{
886 switch (address) {
887 case 0x00000:
888 case 0xC8000: break; /*initial: 0x320 */
889 case 0xD0000: xd_iobase = 0x324; break;
890 case 0xD8000: xd_iobase = 0x328; break;
891 case 0xE0000: xd_iobase = 0x32C; break;
892 default: printk("xd_omti_init_controller: unsupported BIOS address %06x\n",address);
893 break;
894 }
895
896 xd_maxsectors = 0x40;
897
898 outb(0,XD_RESET); /* reset the controller */
899}
900
901static void __init xd_omti_init_drive (u_char drive)
902{
903 /* gets infos from drive */
904 xd_override_init_drive(drive);
905
906 /* set other parameters, Hardcoded, not that nice :-) */
907 xd_info[drive].control = 2;
908}
909
910/* Xebec support (AK) */
911static void __init xd_xebec_init_controller (unsigned int address)
912{
913/* iobase may be set manually in range 0x300 - 0x33C
914 irq may be set manually to 2(9),3,4,5,6,7
915 dma may be set manually to 1,2,3
916 (How to detect them ???)
917BIOS address may be set manually in range 0x0 - 0xF8000
918If you need non-standard settings use the xd=... command */
919
920 switch (address) {
921 case 0x00000:
922 case 0xC8000: /* initially: xd_iobase==0x320 */
923 case 0xD0000:
924 case 0xD2000:
925 case 0xD4000:
926 case 0xD6000:
927 case 0xD8000:
928 case 0xDA000:
929 case 0xDC000:
930 case 0xDE000:
931 case 0xE0000: break;
932 default: printk("xd_xebec_init_controller: unsupported BIOS address %06x\n",address);
933 break;
934 }
935
936 xd_maxsectors = 0x01;
937 outb(0,XD_RESET); /* reset the controller */
938
939 set_current_state(TASK_UNINTERRUPTIBLE);
940 schedule_timeout(XD_INIT_DISK_DELAY);
941}
942
943static void __init xd_xebec_init_drive (u_char drive)
944{
945 /* values from controller's BIOS - BIOS chip may be removed */
946 static u_short geometry_table[][5] = {
947 {0x132,4,0x080,0x080,0x7},
948 {0x132,4,0x080,0x080,0x17},
949 {0x264,2,0x100,0x100,0x7},
950 {0x264,2,0x100,0x100,0x17},
951 {0x132,8,0x080,0x080,0x7},
952 {0x132,8,0x080,0x080,0x17},
953 {0x264,4,0x100,0x100,0x6},
954 {0x264,4,0x100,0x100,0x17},
955 {0x2BC,5,0x2BC,0x12C,0x6},
956 {0x3A5,4,0x3A5,0x3A5,0x7},
957 {0x26C,6,0x26C,0x26C,0x7},
958 {0x200,8,0x200,0x100,0x17},
959 {0x400,5,0x400,0x400,0x7},
960 {0x400,6,0x400,0x400,0x7},
961 {0x264,8,0x264,0x200,0x17},
962 {0x33E,7,0x33E,0x200,0x7}};
963 u_char n;
964
965 n = inb(XD_JUMPER) & 0x0F; /* BIOS's drive number: same geometry
966 is assumed for BOTH drives */
967 if (xd_geo[3*drive])
968 xd_manual_geo_set(drive);
969 else {
970 xd_info[drive].heads = (u_char)(geometry_table[n][1]); /* heads */
971 xd_info[drive].cylinders = geometry_table[n][0]; /* cylinders */
972 xd_info[drive].sectors = 17; /* sectors */
973#if 0
974 xd_info[drive].rwrite = geometry_table[n][2]; /* reduced write */
975 xd_info[drive].precomp = geometry_table[n][3] /* write precomp */
976 xd_info[drive].ecc = 0x0B; /* ecc length */
977#endif /* 0 */
978 }
979 xd_info[drive].control = geometry_table[n][4]; /* control byte */
980 xd_setparam(CMD_XBSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,geometry_table[n][2],geometry_table[n][3],0x0B);
981 xd_recalibrate(drive);
982}
983
984/* xd_override_init_drive: this finds disk geometry in a "binary search" style, narrowing in on the "correct" number of heads
985 etc. by trying values until it gets the highest successful value. Idea courtesy Salvador Abreu (spa@fct.unl.pt). */
986static void __init xd_override_init_drive (u_char drive)
987{
988 u_short min[] = { 0,0,0 },max[] = { 16,1024,64 },test[] = { 0,0,0 };
989 u_char cmdblk[6],i;
990
991 if (xd_geo[3*drive])
992 xd_manual_geo_set(drive);
993 else {
994 for (i = 0; i < 3; i++) {
995 while (min[i] != max[i] - 1) {
996 test[i] = (min[i] + max[i]) / 2;
997 xd_build(cmdblk,CMD_SEEK,drive,(u_char) test[0],(u_short) test[1],(u_char) test[2],0,0);
998 if (!xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT * 2))
999 min[i] = test[i];
1000 else
1001 max[i] = test[i];
1002 }
1003 test[i] = min[i];
1004 }
1005 xd_info[drive].heads = (u_char) min[0] + 1;
1006 xd_info[drive].cylinders = (u_short) min[1] + 1;
1007 xd_info[drive].sectors = (u_char) min[2] + 1;
1008 }
1009 xd_info[drive].control = 0;
1010}
1011
1012/* xd_setup: initialise controller from command line parameters */
1013static void __init do_xd_setup (int *integers)
1014{
1015 switch (integers[0]) {
1016 case 4: if (integers[4] < 0)
1017 nodma = 1;
1018 else if (integers[4] < 8)
1019 xd_dma = integers[4];
1020 case 3: if ((integers[3] > 0) && (integers[3] <= 0x3FC))
1021 xd_iobase = integers[3];
1022 case 2: if ((integers[2] > 0) && (integers[2] < 16))
1023 xd_irq = integers[2];
1024 case 1: xd_override = 1;
1025 if ((integers[1] >= 0) && (integers[1] < (sizeof(xd_sigs) / sizeof(xd_sigs[0]))))
1026 xd_type = integers[1];
1027 case 0: break;
1028 default:printk("xd: too many parameters for xd\n");
1029 }
1030 xd_maxsectors = 0x01;
1031}
1032
1033/* xd_setparam: set the drive characteristics */
1034static void __init xd_setparam (u_char command,u_char drive,u_char heads,u_short cylinders,u_short rwrite,u_short wprecomp,u_char ecc)
1035{
1036 u_char cmdblk[14];
1037
1038 xd_build(cmdblk,command,drive,0,0,0,0,0);
1039 cmdblk[6] = (u_char) (cylinders >> 8) & 0x03;
1040 cmdblk[7] = (u_char) (cylinders & 0xFF);
1041 cmdblk[8] = heads & 0x1F;
1042 cmdblk[9] = (u_char) (rwrite >> 8) & 0x03;
1043 cmdblk[10] = (u_char) (rwrite & 0xFF);
1044 cmdblk[11] = (u_char) (wprecomp >> 8) & 0x03;
1045 cmdblk[12] = (u_char) (wprecomp & 0xFF);
1046 cmdblk[13] = ecc;
1047
1048 /* Some controllers require geometry info as data, not command */
1049
1050 if (xd_command(cmdblk,PIO_MODE,NULL,&cmdblk[6],NULL,XD_TIMEOUT * 2))
1051 printk("xd: error setting characteristics for xd%c\n", 'a'+drive);
1052}
1053
1054
1055#ifdef MODULE
1056
1057module_param_array(xd, int, NULL, 0);
1058module_param_array(xd_geo, int, NULL, 0);
1059module_param(nodma, bool, 0);
1060
1061MODULE_LICENSE("GPL");
1062
1063void cleanup_module(void)
1064{
1065 int i;
1066 unregister_blkdev(XT_DISK_MAJOR, "xd");
1067 for (i = 0; i < xd_drives; i++) {
1068 del_gendisk(xd_gendisk[i]);
1069 put_disk(xd_gendisk[i]);
1070 }
1071 blk_cleanup_queue(xd_queue);
1072 release_region(xd_iobase,4);
1073 if (xd_drives) {
1074 free_irq(xd_irq, NULL);
1075 free_dma(xd_dma);
1076 if (xd_dma_buffer)
1077 xd_dma_mem_free((unsigned long)xd_dma_buffer, xd_maxsectors * 0x200);
1078 }
1079}
1080#else
1081
1082static int __init xd_setup (char *str)
1083{
1084 int ints[5];
1085 get_options (str, ARRAY_SIZE (ints), ints);
1086 do_xd_setup (ints);
1087 return 1;
1088}
1089
1090/* xd_manual_geo_init: initialise drive geometry from command line parameters
1091 (used only for WD drives) */
1092static int __init xd_manual_geo_init (char *str)
1093{
1094 int i, integers[1 + 3*XD_MAXDRIVES];
1095
1096 get_options (str, ARRAY_SIZE (integers), integers);
1097 if (integers[0]%3 != 0) {
1098 printk("xd: incorrect number of parameters for xd_geo\n");
1099 return 1;
1100 }
1101 for (i = 0; (i < integers[0]) && (i < 3*XD_MAXDRIVES); i++)
1102 xd_geo[i] = integers[i+1];
1103 return 1;
1104}
1105
1106__setup ("xd=", xd_setup);
1107__setup ("xd_geo=", xd_manual_geo_init);
1108
1109#endif /* MODULE */
1110
1111module_init(xd_init);
1112MODULE_ALIAS_BLOCKDEV_MAJOR(XT_DISK_MAJOR);
diff --git a/drivers/block/xd.h b/drivers/block/xd.h
new file mode 100644
index 000000000000..71ac2e3dffc8
--- /dev/null
+++ b/drivers/block/xd.h
@@ -0,0 +1,135 @@
1#ifndef _LINUX_XD_H
2#define _LINUX_XD_H
3
4/*
5 * This file contains the definitions for the IO ports and errors etc. for XT hard disk controllers (at least the DTC 5150X).
6 *
7 * Author: Pat Mackinlay, pat@it.com.au
8 * Date: 29/09/92
9 *
10 * Revised: 01/01/93, ...
11 *
12 * Ref: DTC 5150X Controller Specification (thanks to Kevin Fowler, kevinf@agora.rain.com)
13 * Also thanks to: Salvador Abreu, Dave Thaler, Risto Kankkunen and Wim Van Dorst.
14 */
15
16#include <linux/interrupt.h>
17
18/* XT hard disk controller registers */
19#define XD_DATA (xd_iobase + 0x00) /* data RW register */
20#define XD_RESET (xd_iobase + 0x01) /* reset WO register */
21#define XD_STATUS (xd_iobase + 0x01) /* status RO register */
22#define XD_SELECT (xd_iobase + 0x02) /* select WO register */
23#define XD_JUMPER (xd_iobase + 0x02) /* jumper RO register */
24#define XD_CONTROL (xd_iobase + 0x03) /* DMAE/INTE WO register */
25#define XD_RESERVED (xd_iobase + 0x03) /* reserved */
26
27/* XT hard disk controller commands (incomplete list) */
28#define CMD_TESTREADY 0x00 /* test drive ready */
29#define CMD_RECALIBRATE 0x01 /* recalibrate drive */
30#define CMD_SENSE 0x03 /* request sense */
31#define CMD_FORMATDRV 0x04 /* format drive */
32#define CMD_VERIFY 0x05 /* read verify */
33#define CMD_FORMATTRK 0x06 /* format track */
34#define CMD_FORMATBAD 0x07 /* format bad track */
35#define CMD_READ 0x08 /* read */
36#define CMD_WRITE 0x0A /* write */
37#define CMD_SEEK 0x0B /* seek */
38
39/* Controller specific commands */
40#define CMD_DTCSETPARAM 0x0C /* set drive parameters (DTC 5150X & CX only?) */
41#define CMD_DTCGETECC 0x0D /* get ecc error length (DTC 5150X only?) */
42#define CMD_DTCREADBUF 0x0E /* read sector buffer (DTC 5150X only?) */
43#define CMD_DTCWRITEBUF 0x0F /* write sector buffer (DTC 5150X only?) */
44#define CMD_DTCREMAPTRK 0x11 /* assign alternate track (DTC 5150X only?) */
45#define CMD_DTCGETPARAM 0xFB /* get drive parameters (DTC 5150X only?) */
46#define CMD_DTCSETSTEP 0xFC /* set step rate (DTC 5150X only?) */
47#define CMD_DTCSETGEOM 0xFE /* set geometry data (DTC 5150X only?) */
48#define CMD_DTCGETGEOM 0xFF /* get geometry data (DTC 5150X only?) */
49#define CMD_ST11GETGEOM 0xF8 /* get geometry data (Seagate ST11R/M only?) */
50#define CMD_WDSETPARAM 0x0C /* set drive parameters (WD 1004A27X only?) */
51#define CMD_XBSETPARAM 0x0C /* set drive parameters (XEBEC only?) */
52
53/* Bits for command status byte */
54#define CSB_ERROR 0x02 /* error */
55#define CSB_LUN 0x20 /* logical Unit Number */
56
57/* XT hard disk controller status bits */
58#define STAT_READY 0x01 /* controller is ready */
59#define STAT_INPUT 0x02 /* data flowing from controller to host */
60#define STAT_COMMAND 0x04 /* controller in command phase */
61#define STAT_SELECT 0x08 /* controller is selected */
62#define STAT_REQUEST 0x10 /* controller requesting data */
63#define STAT_INTERRUPT 0x20 /* controller requesting interrupt */
64
65/* XT hard disk controller control bits */
66#define PIO_MODE 0x00 /* control bits to set for PIO */
67#define DMA_MODE 0x03 /* control bits to set for DMA & interrupt */
68
69#define XD_MAXDRIVES 2 /* maximum 2 drives */
70#define XD_TIMEOUT HZ /* 1 second timeout */
71#define XD_RETRIES 4 /* maximum 4 retries */
72
73#undef DEBUG /* define for debugging output */
74
75#ifdef DEBUG
76 #define DEBUG_STARTUP /* debug driver initialisation */
77 #define DEBUG_OVERRIDE /* debug override geometry detection */
78 #define DEBUG_READWRITE /* debug each read/write command */
79 #define DEBUG_OTHER /* debug misc. interrupt/DMA stuff */
80 #define DEBUG_COMMAND /* debug each controller command */
81#endif /* DEBUG */
82
83/* this structure defines the XT drives and their types */
84typedef struct {
85 u_char heads;
86 u_short cylinders;
87 u_char sectors;
88 u_char control;
89 int unit;
90} XD_INFO;
91
92/* this structure defines a ROM BIOS signature */
93typedef struct {
94 unsigned int offset;
95 const char *string;
96 void (*init_controller)(unsigned int address);
97 void (*init_drive)(u_char drive);
98 const char *name;
99} XD_SIGNATURE;
100
101#ifndef MODULE
102static int xd_manual_geo_init (char *command);
103#endif /* MODULE */
104static u_char xd_detect (u_char *controller, unsigned int *address);
105static u_char xd_initdrives (void (*init_drive)(u_char drive));
106
107static void do_xd_request (request_queue_t * q);
108static int xd_ioctl (struct inode *inode,struct file *file,unsigned int cmd,unsigned long arg);
109static int xd_readwrite (u_char operation,XD_INFO *disk,char *buffer,u_int block,u_int count);
110static void xd_recalibrate (u_char drive);
111
112static irqreturn_t xd_interrupt_handler(int irq, void *dev_id,
113 struct pt_regs *regs);
114static u_char xd_setup_dma (u_char opcode,u_char *buffer,u_int count);
115static u_char *xd_build (u_char *cmdblk,u_char command,u_char drive,u_char head,u_short cylinder,u_char sector,u_char count,u_char control);
116static void xd_watchdog (unsigned long unused);
117static inline u_char xd_waitport (u_short port,u_char flags,u_char mask,u_long timeout);
118static u_int xd_command (u_char *command,u_char mode,u_char *indata,u_char *outdata,u_char *sense,u_long timeout);
119
120/* card specific setup and geometry gathering code */
121static void xd_dtc_init_controller (unsigned int address);
122static void xd_dtc5150cx_init_drive (u_char drive);
123static void xd_dtc_init_drive (u_char drive);
124static void xd_wd_init_controller (unsigned int address);
125static void xd_wd_init_drive (u_char drive);
126static void xd_seagate_init_controller (unsigned int address);
127static void xd_seagate_init_drive (u_char drive);
128static void xd_omti_init_controller (unsigned int address);
129static void xd_omti_init_drive (u_char drive);
130static void xd_xebec_init_controller (unsigned int address);
131static void xd_xebec_init_drive (u_char drive);
132static void xd_setparam (u_char command,u_char drive,u_char heads,u_short cylinders,u_short rwrite,u_short wprecomp,u_char ecc);
133static void xd_override_init_drive (u_char drive);
134
135#endif /* _LINUX_XD_H */
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
new file mode 100644
index 000000000000..007f6a662439
--- /dev/null
+++ b/drivers/block/z2ram.c
@@ -0,0 +1,429 @@
1/*
2** z2ram - Amiga pseudo-driver to access 16bit-RAM in ZorroII space
3** as a block device, to be used as a RAM disk or swap space
4**
5** Copyright (C) 1994 by Ingo Wilken (Ingo.Wilken@informatik.uni-oldenburg.de)
6**
7** ++Geert: support for zorro_unused_z2ram, better range checking
8** ++roman: translate accesses via an array
9** ++Milan: support for ChipRAM usage
10** ++yambo: converted to 2.0 kernel
11** ++yambo: modularized and support added for 3 minor devices including:
12** MAJOR MINOR DESCRIPTION
13** ----- ----- ----------------------------------------------
14** 37 0 Use Zorro II and Chip ram
15** 37 1 Use only Zorro II ram
16** 37 2 Use only Chip ram
17** 37 4-7 Use memory list entry 1-4 (first is 0)
18** ++jskov: support for 1-4th memory list entry.
19**
20** Permission to use, copy, modify, and distribute this software and its
21** documentation for any purpose and without fee is hereby granted, provided
22** that the above copyright notice appear in all copies and that both that
23** copyright notice and this permission notice appear in supporting
24** documentation. This software is provided "as is" without express or
25** implied warranty.
26*/
27
28#define DEVICE_NAME "Z2RAM"
29
30#include <linux/major.h>
31#include <linux/vmalloc.h>
32#include <linux/init.h>
33#include <linux/module.h>
34#include <linux/blkdev.h>
35#include <linux/bitops.h>
36
37#include <asm/setup.h>
38#include <asm/amigahw.h>
39#include <asm/pgtable.h>
40
41#include <linux/zorro.h>
42
43
44extern int m68k_realnum_memory;
45extern struct mem_info m68k_memory[NUM_MEMINFO];
46
47#define TRUE (1)
48#define FALSE (0)
49
50#define Z2MINOR_COMBINED (0)
51#define Z2MINOR_Z2ONLY (1)
52#define Z2MINOR_CHIPONLY (2)
53#define Z2MINOR_MEMLIST1 (4)
54#define Z2MINOR_MEMLIST2 (5)
55#define Z2MINOR_MEMLIST3 (6)
56#define Z2MINOR_MEMLIST4 (7)
57#define Z2MINOR_COUNT (8) /* Move this down when adding a new minor */
58
59#define Z2RAM_CHUNK1024 ( Z2RAM_CHUNKSIZE >> 10 )
60
61static u_long *z2ram_map = NULL;
62static u_long z2ram_size = 0;
63static int z2_count = 0;
64static int chip_count = 0;
65static int list_count = 0;
66static int current_device = -1;
67
68static DEFINE_SPINLOCK(z2ram_lock);
69
70static struct block_device_operations z2_fops;
71static struct gendisk *z2ram_gendisk;
72
73static void do_z2_request(request_queue_t *q)
74{
75 struct request *req;
76 while ((req = elv_next_request(q)) != NULL) {
77 unsigned long start = req->sector << 9;
78 unsigned long len = req->current_nr_sectors << 9;
79
80 if (start + len > z2ram_size) {
81 printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n",
82 req->sector, req->current_nr_sectors);
83 end_request(req, 0);
84 continue;
85 }
86 while (len) {
87 unsigned long addr = start & Z2RAM_CHUNKMASK;
88 unsigned long size = Z2RAM_CHUNKSIZE - addr;
89 if (len < size)
90 size = len;
91 addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ];
92 if (rq_data_dir(req) == READ)
93 memcpy(req->buffer, (char *)addr, size);
94 else
95 memcpy((char *)addr, req->buffer, size);
96 start += size;
97 len -= size;
98 }
99 end_request(req, 1);
100 }
101}
102
103static void
104get_z2ram( void )
105{
106 int i;
107
108 for ( i = 0; i < Z2RAM_SIZE / Z2RAM_CHUNKSIZE; i++ )
109 {
110 if ( test_bit( i, zorro_unused_z2ram ) )
111 {
112 z2_count++;
113 z2ram_map[ z2ram_size++ ] =
114 ZTWO_VADDR( Z2RAM_START ) + ( i << Z2RAM_CHUNKSHIFT );
115 clear_bit( i, zorro_unused_z2ram );
116 }
117 }
118
119 return;
120}
121
122static void
123get_chipram( void )
124{
125
126 while ( amiga_chip_avail() > ( Z2RAM_CHUNKSIZE * 4 ) )
127 {
128 chip_count++;
129 z2ram_map[ z2ram_size ] =
130 (u_long)amiga_chip_alloc( Z2RAM_CHUNKSIZE, "z2ram" );
131
132 if ( z2ram_map[ z2ram_size ] == 0 )
133 {
134 break;
135 }
136
137 z2ram_size++;
138 }
139
140 return;
141}
142
143static int
144z2_open( struct inode *inode, struct file *filp )
145{
146 int device;
147 int max_z2_map = ( Z2RAM_SIZE / Z2RAM_CHUNKSIZE ) *
148 sizeof( z2ram_map[0] );
149 int max_chip_map = ( amiga_chip_size / Z2RAM_CHUNKSIZE ) *
150 sizeof( z2ram_map[0] );
151 int rc = -ENOMEM;
152
153 device = iminor(inode);
154
155 if ( current_device != -1 && current_device != device )
156 {
157 rc = -EBUSY;
158 goto err_out;
159 }
160
161 if ( current_device == -1 )
162 {
163 z2_count = 0;
164 chip_count = 0;
165 list_count = 0;
166 z2ram_size = 0;
167
168 /* Use a specific list entry. */
169 if (device >= Z2MINOR_MEMLIST1 && device <= Z2MINOR_MEMLIST4) {
170 int index = device - Z2MINOR_MEMLIST1 + 1;
171 unsigned long size, paddr, vaddr;
172
173 if (index >= m68k_realnum_memory) {
174 printk( KERN_ERR DEVICE_NAME
175 ": no such entry in z2ram_map\n" );
176 goto err_out;
177 }
178
179 paddr = m68k_memory[index].addr;
180 size = m68k_memory[index].size & ~(Z2RAM_CHUNKSIZE-1);
181
182#ifdef __powerpc__
183 /* FIXME: ioremap doesn't build correct memory tables. */
184 {
185 vfree(vmalloc (size));
186 }
187
188 vaddr = (unsigned long) __ioremap (paddr, size,
189 _PAGE_WRITETHRU);
190
191#else
192 vaddr = (unsigned long)z_remap_nocache_nonser(paddr, size);
193#endif
194 z2ram_map =
195 kmalloc((size/Z2RAM_CHUNKSIZE)*sizeof(z2ram_map[0]),
196 GFP_KERNEL);
197 if ( z2ram_map == NULL )
198 {
199 printk( KERN_ERR DEVICE_NAME
200 ": cannot get mem for z2ram_map\n" );
201 goto err_out;
202 }
203
204 while (size) {
205 z2ram_map[ z2ram_size++ ] = vaddr;
206 size -= Z2RAM_CHUNKSIZE;
207 vaddr += Z2RAM_CHUNKSIZE;
208 list_count++;
209 }
210
211 if ( z2ram_size != 0 )
212 printk( KERN_INFO DEVICE_NAME
213 ": using %iK List Entry %d Memory\n",
214 list_count * Z2RAM_CHUNK1024, index );
215 } else
216
217 switch ( device )
218 {
219 case Z2MINOR_COMBINED:
220
221 z2ram_map = kmalloc( max_z2_map + max_chip_map, GFP_KERNEL );
222 if ( z2ram_map == NULL )
223 {
224 printk( KERN_ERR DEVICE_NAME
225 ": cannot get mem for z2ram_map\n" );
226 goto err_out;
227 }
228
229 get_z2ram();
230 get_chipram();
231
232 if ( z2ram_size != 0 )
233 printk( KERN_INFO DEVICE_NAME
234 ": using %iK Zorro II RAM and %iK Chip RAM (Total %dK)\n",
235 z2_count * Z2RAM_CHUNK1024,
236 chip_count * Z2RAM_CHUNK1024,
237 ( z2_count + chip_count ) * Z2RAM_CHUNK1024 );
238
239 break;
240
241 case Z2MINOR_Z2ONLY:
242 z2ram_map = kmalloc( max_z2_map, GFP_KERNEL );
243 if ( z2ram_map == NULL )
244 {
245 printk( KERN_ERR DEVICE_NAME
246 ": cannot get mem for z2ram_map\n" );
247 goto err_out;
248 }
249
250 get_z2ram();
251
252 if ( z2ram_size != 0 )
253 printk( KERN_INFO DEVICE_NAME
254 ": using %iK of Zorro II RAM\n",
255 z2_count * Z2RAM_CHUNK1024 );
256
257 break;
258
259 case Z2MINOR_CHIPONLY:
260 z2ram_map = kmalloc( max_chip_map, GFP_KERNEL );
261 if ( z2ram_map == NULL )
262 {
263 printk( KERN_ERR DEVICE_NAME
264 ": cannot get mem for z2ram_map\n" );
265 goto err_out;
266 }
267
268 get_chipram();
269
270 if ( z2ram_size != 0 )
271 printk( KERN_INFO DEVICE_NAME
272 ": using %iK Chip RAM\n",
273 chip_count * Z2RAM_CHUNK1024 );
274
275 break;
276
277 default:
278 rc = -ENODEV;
279 goto err_out;
280
281 break;
282 }
283
284 if ( z2ram_size == 0 )
285 {
286 printk( KERN_NOTICE DEVICE_NAME
287 ": no unused ZII/Chip RAM found\n" );
288 goto err_out_kfree;
289 }
290
291 current_device = device;
292 z2ram_size <<= Z2RAM_CHUNKSHIFT;
293 set_capacity(z2ram_gendisk, z2ram_size >> 9);
294 }
295
296 return 0;
297
298err_out_kfree:
299 kfree( z2ram_map );
300err_out:
301 return rc;
302}
303
304static int
305z2_release( struct inode *inode, struct file *filp )
306{
307 if ( current_device == -1 )
308 return 0;
309
310 /*
311 * FIXME: unmap memory
312 */
313
314 return 0;
315}
316
317static struct block_device_operations z2_fops =
318{
319 .owner = THIS_MODULE,
320 .open = z2_open,
321 .release = z2_release,
322};
323
324static struct kobject *z2_find(dev_t dev, int *part, void *data)
325{
326 *part = 0;
327 return get_disk(z2ram_gendisk);
328}
329
330static struct request_queue *z2_queue;
331
332int __init
333z2_init(void)
334{
335 int ret;
336
337 if (!MACH_IS_AMIGA)
338 return -ENXIO;
339
340 ret = -EBUSY;
341 if (register_blkdev(Z2RAM_MAJOR, DEVICE_NAME))
342 goto err;
343
344 ret = -ENOMEM;
345 z2ram_gendisk = alloc_disk(1);
346 if (!z2ram_gendisk)
347 goto out_disk;
348
349 z2_queue = blk_init_queue(do_z2_request, &z2ram_lock);
350 if (!z2_queue)
351 goto out_queue;
352
353 z2ram_gendisk->major = Z2RAM_MAJOR;
354 z2ram_gendisk->first_minor = 0;
355 z2ram_gendisk->fops = &z2_fops;
356 sprintf(z2ram_gendisk->disk_name, "z2ram");
357 strcpy(z2ram_gendisk->devfs_name, z2ram_gendisk->disk_name);
358
359 z2ram_gendisk->queue = z2_queue;
360 add_disk(z2ram_gendisk);
361 blk_register_region(MKDEV(Z2RAM_MAJOR, 0), Z2MINOR_COUNT, THIS_MODULE,
362 z2_find, NULL, NULL);
363
364 return 0;
365
366out_queue:
367 put_disk(z2ram_gendisk);
368out_disk:
369 unregister_blkdev(Z2RAM_MAJOR, DEVICE_NAME);
370err:
371 return ret;
372}
373
374#if defined(MODULE)
375
376MODULE_LICENSE("GPL");
377
378int
379init_module( void )
380{
381 int error;
382
383 error = z2_init();
384 if ( error == 0 )
385 {
386 printk( KERN_INFO DEVICE_NAME ": loaded as module\n" );
387 }
388
389 return error;
390}
391
392void
393cleanup_module( void )
394{
395 int i, j;
396 blk_unregister_region(MKDEV(Z2RAM_MAJOR, 0), 256);
397 if ( unregister_blkdev( Z2RAM_MAJOR, DEVICE_NAME ) != 0 )
398 printk( KERN_ERR DEVICE_NAME ": unregister of device failed\n");
399
400 del_gendisk(z2ram_gendisk);
401 put_disk(z2ram_gendisk);
402 blk_cleanup_queue(z2_queue);
403
404 if ( current_device != -1 )
405 {
406 i = 0;
407
408 for ( j = 0 ; j < z2_count; j++ )
409 {
410 set_bit( i++, zorro_unused_z2ram );
411 }
412
413 for ( j = 0 ; j < chip_count; j++ )
414 {
415 if ( z2ram_map[ i ] )
416 {
417 amiga_chip_free( (void *) z2ram_map[ i++ ] );
418 }
419 }
420
421 if ( z2ram_map != NULL )
422 {
423 kfree( z2ram_map );
424 }
425 }
426
427 return;
428}
429#endif