diff options
Diffstat (limited to 'drivers/message/i2o')
-rw-r--r-- | drivers/message/i2o/Kconfig | 75 | ||||
-rw-r--r-- | drivers/message/i2o/Makefile | 13 | ||||
-rw-r--r-- | drivers/message/i2o/README | 98 | ||||
-rw-r--r-- | drivers/message/i2o/README.ioctl | 394 | ||||
-rw-r--r-- | drivers/message/i2o/debug.c | 481 | ||||
-rw-r--r-- | drivers/message/i2o/device.c | 634 | ||||
-rw-r--r-- | drivers/message/i2o/driver.c | 374 | ||||
-rw-r--r-- | drivers/message/i2o/exec-osm.c | 507 | ||||
-rw-r--r-- | drivers/message/i2o/i2o_block.c | 1247 | ||||
-rw-r--r-- | drivers/message/i2o/i2o_block.h | 99 | ||||
-rw-r--r-- | drivers/message/i2o/i2o_config.c | 1160 | ||||
-rw-r--r-- | drivers/message/i2o/i2o_lan.h | 159 | ||||
-rw-r--r-- | drivers/message/i2o/i2o_proc.c | 2112 | ||||
-rw-r--r-- | drivers/message/i2o/i2o_scsi.c | 830 | ||||
-rw-r--r-- | drivers/message/i2o/iop.c | 1327 | ||||
-rw-r--r-- | drivers/message/i2o/pci.c | 528 |
16 files changed, 10038 insertions, 0 deletions
diff --git a/drivers/message/i2o/Kconfig b/drivers/message/i2o/Kconfig new file mode 100644 index 000000000000..8d132b0d6b12 --- /dev/null +++ b/drivers/message/i2o/Kconfig | |||
@@ -0,0 +1,75 @@ | |||
1 | |||
2 | menu "I2O device support" | ||
3 | |||
4 | config I2O | ||
5 | tristate "I2O support" | ||
6 | depends on PCI | ||
7 | ---help--- | ||
8 | The Intelligent Input/Output (I2O) architecture allows hardware | ||
9 | drivers to be split into two parts: an operating system specific | ||
10 | module called the OSM and an hardware specific module called the | ||
11 | HDM. The OSM can talk to a whole range of HDM's, and ideally the | ||
12 | HDM's are not OS dependent. This allows for the same HDM driver to | ||
13 | be used under different operating systems if the relevant OSM is in | ||
14 | place. In order for this to work, you need to have an I2O interface | ||
15 | adapter card in your computer. This card contains a special I/O | ||
16 | processor (IOP), thus allowing high speeds since the CPU does not | ||
17 | have to deal with I/O. | ||
18 | |||
19 | If you say Y here, you will get a choice of interface adapter | ||
20 | drivers and OSM's with the following questions. | ||
21 | |||
22 | To compile this support as a module, choose M here: the | ||
23 | modules will be called i2o_core. | ||
24 | |||
25 | If unsure, say N. | ||
26 | |||
27 | config I2O_CONFIG | ||
28 | tristate "I2O Configuration support" | ||
29 | depends on PCI && I2O | ||
30 | help | ||
31 | Say Y for support of the configuration interface for the I2O adapters. | ||
32 | If you have a RAID controller from Adaptec and you want to use the | ||
33 | raidutils to manage your RAID array, you have to say Y here. | ||
34 | |||
35 | To compile this support as a module, choose M here: the | ||
36 | module will be called i2o_config. | ||
37 | |||
38 | config I2O_BLOCK | ||
39 | tristate "I2O Block OSM" | ||
40 | depends on I2O | ||
41 | help | ||
42 | Include support for the I2O Block OSM. The Block OSM presents disk | ||
43 | and other structured block devices to the operating system. If you | ||
44 | are using an RAID controller, you could access the array only by | ||
45 | the Block OSM driver. But it is possible to access the single disks | ||
46 | by the SCSI OSM driver, for example to monitor the disks. | ||
47 | |||
48 | To compile this support as a module, choose M here: the | ||
49 | module will be called i2o_block. | ||
50 | |||
51 | config I2O_SCSI | ||
52 | tristate "I2O SCSI OSM" | ||
53 | depends on I2O && SCSI | ||
54 | help | ||
55 | Allows direct SCSI access to SCSI devices on a SCSI or FibreChannel | ||
56 | I2O controller. You can use both the SCSI and Block OSM together if | ||
57 | you wish. To access a RAID array, you must use the Block OSM driver. | ||
58 | But you could use the SCSI OSM driver to monitor the single disks. | ||
59 | |||
60 | To compile this support as a module, choose M here: the | ||
61 | module will be called i2o_scsi. | ||
62 | |||
63 | config I2O_PROC | ||
64 | tristate "I2O /proc support" | ||
65 | depends on I2O | ||
66 | help | ||
67 | If you say Y here and to "/proc file system support", you will be | ||
68 | able to read I2O related information from the virtual directory | ||
69 | /proc/i2o. | ||
70 | |||
71 | To compile this support as a module, choose M here: the | ||
72 | module will be called i2o_proc. | ||
73 | |||
74 | endmenu | ||
75 | |||
diff --git a/drivers/message/i2o/Makefile b/drivers/message/i2o/Makefile new file mode 100644 index 000000000000..aabc6cdc3fce --- /dev/null +++ b/drivers/message/i2o/Makefile | |||
@@ -0,0 +1,13 @@ | |||
1 | # | ||
2 | # Makefile for the kernel I2O OSM. | ||
3 | # | ||
4 | # Note : at this point, these files are compiled on all systems. | ||
5 | # In the future, some of these should be built conditionally. | ||
6 | # | ||
7 | |||
8 | i2o_core-y += iop.o driver.o device.o debug.o pci.o exec-osm.o | ||
9 | obj-$(CONFIG_I2O) += i2o_core.o | ||
10 | obj-$(CONFIG_I2O_CONFIG)+= i2o_config.o | ||
11 | obj-$(CONFIG_I2O_BLOCK) += i2o_block.o | ||
12 | obj-$(CONFIG_I2O_SCSI) += i2o_scsi.o | ||
13 | obj-$(CONFIG_I2O_PROC) += i2o_proc.o | ||
diff --git a/drivers/message/i2o/README b/drivers/message/i2o/README new file mode 100644 index 000000000000..a81f851f7b5d --- /dev/null +++ b/drivers/message/i2o/README | |||
@@ -0,0 +1,98 @@ | |||
1 | |||
2 | Linux I2O Support (c) Copyright 1999 Red Hat Software | ||
3 | and others. | ||
4 | |||
5 | This program is free software; you can redistribute it and/or | ||
6 | modify it under the terms of the GNU General Public License | ||
7 | as published by the Free Software Foundation; either version | ||
8 | 2 of the License, or (at your option) any later version. | ||
9 | |||
10 | AUTHORS (so far) | ||
11 | |||
12 | Alan Cox, Building Number Three Ltd. | ||
13 | Core code, SCSI and Block OSMs | ||
14 | |||
15 | Steve Ralston, LSI Logic Corp. | ||
16 | Debugging SCSI and Block OSM | ||
17 | |||
18 | Deepak Saxena, Intel Corp. | ||
19 | Various core/block extensions | ||
20 | /proc interface, bug fixes | ||
21 | Ioctl interfaces for control | ||
22 | Debugging LAN OSM | ||
23 | |||
24 | Philip Rumpf | ||
25 | Fixed assorted dumb SMP locking bugs | ||
26 | |||
27 | Juha Sievanen, University of Helsinki Finland | ||
28 | LAN OSM code | ||
29 | /proc interface to LAN class | ||
30 | Bug fixes | ||
31 | Core code extensions | ||
32 | |||
33 | Auvo Häkkinen, University of Helsinki Finland | ||
34 | LAN OSM code | ||
35 | /Proc interface to LAN class | ||
36 | Bug fixes | ||
37 | Core code extensions | ||
38 | |||
39 | Taneli Vähäkangas, University of Helsinki Finland | ||
40 | Fixes to i2o_config | ||
41 | |||
42 | CREDITS | ||
43 | |||
44 | This work was made possible by | ||
45 | |||
46 | Red Hat Software | ||
47 | Funding for the Building #3 part of the project | ||
48 | |||
49 | Symbios Logic (Now LSI) | ||
50 | Host adapters, hints, known to work platforms when I hit | ||
51 | compatibility problems | ||
52 | |||
53 | BoxHill Corporation | ||
54 | Loan of initial FibreChannel disk array used for development work. | ||
55 | |||
56 | European Comission | ||
57 | Funding the work done by the University of Helsinki | ||
58 | |||
59 | SysKonnect | ||
60 | Loan of FDDI and Gigabit Ethernet cards | ||
61 | |||
62 | ASUSTeK | ||
63 | Loan of I2O motherboard | ||
64 | |||
65 | STATUS: | ||
66 | |||
67 | o The core setup works within limits. | ||
68 | o The scsi layer seems to almost work. | ||
69 | I'm still chasing down the hang bug. | ||
70 | o The block OSM is mostly functional | ||
71 | o LAN OSM works with FDDI and Ethernet cards. | ||
72 | |||
73 | TO DO: | ||
74 | |||
75 | General: | ||
76 | o Provide hidden address space if asked | ||
77 | o Long term message flow control | ||
78 | o PCI IOP's without interrupts are not supported yet | ||
79 | o Push FAIL handling into the core | ||
80 | o DDM control interfaces for module load etc | ||
81 | o Add I2O 2.0 support (Deffered to 2.5 kernel) | ||
82 | |||
83 | Block: | ||
84 | o Multiple major numbers | ||
85 | o Read ahead and cache handling stuff. Talk to Ingo and people | ||
86 | o Power management | ||
87 | o Finish Media changers | ||
88 | |||
89 | SCSI: | ||
90 | o Find the right way to associate drives/luns/busses | ||
91 | |||
92 | Lan: | ||
93 | o Performance tuning | ||
94 | o Test Fibre Channel code | ||
95 | |||
96 | Tape: | ||
97 | o Anyone seen anything implementing this ? | ||
98 | (D.S: Will attempt to do so if spare cycles permit) | ||
diff --git a/drivers/message/i2o/README.ioctl b/drivers/message/i2o/README.ioctl new file mode 100644 index 000000000000..73dd084c0e98 --- /dev/null +++ b/drivers/message/i2o/README.ioctl | |||
@@ -0,0 +1,394 @@ | |||
1 | |||
2 | Linux I2O User Space Interface | ||
3 | rev 0.3 - 04/20/99 | ||
4 | |||
5 | ============================================================================= | ||
6 | Originally written by Deepak Saxena(deepak@plexity.net) | ||
7 | Currently maintained by Deepak Saxena(deepak@plexity.net) | ||
8 | ============================================================================= | ||
9 | |||
10 | I. Introduction | ||
11 | |||
12 | The Linux I2O subsystem provides a set of ioctl() commands that can be | ||
13 | utilized by user space applications to communicate with IOPs and devices | ||
14 | on individual IOPs. This document defines the specific ioctl() commands | ||
15 | that are available to the user and provides examples of their uses. | ||
16 | |||
17 | This document assumes the reader is familiar with or has access to the | ||
18 | I2O specification as no I2O message parameters are outlined. For information | ||
19 | on the specification, see http://www.i2osig.org | ||
20 | |||
21 | This document and the I2O user space interface are currently maintained | ||
22 | by Deepak Saxena. Please send all comments, errata, and bug fixes to | ||
23 | deepak@csociety.purdue.edu | ||
24 | |||
25 | II. IOP Access | ||
26 | |||
27 | Access to the I2O subsystem is provided through the device file named | ||
28 | /dev/i2o/ctl. This file is a character file with major number 10 and minor | ||
29 | number 166. It can be created through the following command: | ||
30 | |||
31 | mknod /dev/i2o/ctl c 10 166 | ||
32 | |||
33 | III. Determining the IOP Count | ||
34 | |||
35 | SYNOPSIS | ||
36 | |||
37 | ioctl(fd, I2OGETIOPS, int *count); | ||
38 | |||
39 | u8 count[MAX_I2O_CONTROLLERS]; | ||
40 | |||
41 | DESCRIPTION | ||
42 | |||
43 | This function returns the system's active IOP table. count should | ||
44 | point to a buffer containing MAX_I2O_CONTROLLERS entries. Upon | ||
45 | returning, each entry will contain a non-zero value if the given | ||
46 | IOP unit is active, and NULL if it is inactive or non-existent. | ||
47 | |||
48 | RETURN VALUE. | ||
49 | |||
50 | Returns 0 if no errors occur, and -1 otherwise. If an error occurs, | ||
51 | errno is set appropriately: | ||
52 | |||
53 | EFAULT Invalid user space pointer was passed | ||
54 | |||
55 | IV. Getting Hardware Resource Table | ||
56 | |||
57 | SYNOPSIS | ||
58 | |||
59 | ioctl(fd, I2OHRTGET, struct i2o_cmd_hrt *hrt); | ||
60 | |||
61 | struct i2o_cmd_hrtlct | ||
62 | { | ||
63 | u32 iop; /* IOP unit number */ | ||
64 | void *resbuf; /* Buffer for result */ | ||
65 | u32 *reslen; /* Buffer length in bytes */ | ||
66 | }; | ||
67 | |||
68 | DESCRIPTION | ||
69 | |||
70 | This function returns the Hardware Resource Table of the IOP specified | ||
71 | by hrt->iop in the buffer pointed to by hrt->resbuf. The actual size of | ||
72 | the data is written into *(hrt->reslen). | ||
73 | |||
74 | RETURNS | ||
75 | |||
76 | This function returns 0 if no errors occur. If an error occurs, -1 | ||
77 | is returned and errno is set appropriately: | ||
78 | |||
79 | EFAULT Invalid user space pointer was passed | ||
80 | ENXIO Invalid IOP number | ||
81 | ENOBUFS Buffer not large enough. If this occurs, the required | ||
82 | buffer length is written into *(hrt->reslen) | ||
83 | |||
84 | V. Getting Logical Configuration Table | ||
85 | |||
86 | SYNOPSIS | ||
87 | |||
88 | ioctl(fd, I2OLCTGET, struct i2o_cmd_lct *lct); | ||
89 | |||
90 | struct i2o_cmd_hrtlct | ||
91 | { | ||
92 | u32 iop; /* IOP unit number */ | ||
93 | void *resbuf; /* Buffer for result */ | ||
94 | u32 *reslen; /* Buffer length in bytes */ | ||
95 | }; | ||
96 | |||
97 | DESCRIPTION | ||
98 | |||
99 | This function returns the Logical Configuration Table of the IOP specified | ||
100 | by lct->iop in the buffer pointed to by lct->resbuf. The actual size of | ||
101 | the data is written into *(lct->reslen). | ||
102 | |||
103 | RETURNS | ||
104 | |||
105 | This function returns 0 if no errors occur. If an error occurs, -1 | ||
106 | is returned and errno is set appropriately: | ||
107 | |||
108 | EFAULT Invalid user space pointer was passed | ||
109 | ENXIO Invalid IOP number | ||
110 | ENOBUFS Buffer not large enough. If this occurs, the required | ||
111 | buffer length is written into *(lct->reslen) | ||
112 | |||
113 | VI. Settting Parameters | ||
114 | |||
115 | SYNOPSIS | ||
116 | |||
117 | ioctl(fd, I2OPARMSET, struct i2o_parm_setget *ops); | ||
118 | |||
119 | struct i2o_cmd_psetget | ||
120 | { | ||
121 | u32 iop; /* IOP unit number */ | ||
122 | u32 tid; /* Target device TID */ | ||
123 | void *opbuf; /* Operation List buffer */ | ||
124 | u32 oplen; /* Operation List buffer length in bytes */ | ||
125 | void *resbuf; /* Result List buffer */ | ||
126 | u32 *reslen; /* Result List buffer length in bytes */ | ||
127 | }; | ||
128 | |||
129 | DESCRIPTION | ||
130 | |||
131 | This function posts a UtilParamsSet message to the device identified | ||
132 | by ops->iop and ops->tid. The operation list for the message is | ||
133 | sent through the ops->opbuf buffer, and the result list is written | ||
134 | into the buffer pointed to by ops->resbuf. The number of bytes | ||
135 | written is placed into *(ops->reslen). | ||
136 | |||
137 | RETURNS | ||
138 | |||
139 | The return value is the size in bytes of the data written into | ||
140 | ops->resbuf if no errors occur. If an error occurs, -1 is returned | ||
141 | and errno is set appropriatly: | ||
142 | |||
143 | EFAULT Invalid user space pointer was passed | ||
144 | ENXIO Invalid IOP number | ||
145 | ENOBUFS Buffer not large enough. If this occurs, the required | ||
146 | buffer length is written into *(ops->reslen) | ||
147 | ETIMEDOUT Timeout waiting for reply message | ||
148 | ENOMEM Kernel memory allocation error | ||
149 | |||
150 | A return value of 0 does not mean that the value was actually | ||
151 | changed properly on the IOP. The user should check the result | ||
152 | list to determine the specific status of the transaction. | ||
153 | |||
154 | VII. Getting Parameters | ||
155 | |||
156 | SYNOPSIS | ||
157 | |||
158 | ioctl(fd, I2OPARMGET, struct i2o_parm_setget *ops); | ||
159 | |||
160 | struct i2o_parm_setget | ||
161 | { | ||
162 | u32 iop; /* IOP unit number */ | ||
163 | u32 tid; /* Target device TID */ | ||
164 | void *opbuf; /* Operation List buffer */ | ||
165 | u32 oplen; /* Operation List buffer length in bytes */ | ||
166 | void *resbuf; /* Result List buffer */ | ||
167 | u32 *reslen; /* Result List buffer length in bytes */ | ||
168 | }; | ||
169 | |||
170 | DESCRIPTION | ||
171 | |||
172 | This function posts a UtilParamsGet message to the device identified | ||
173 | by ops->iop and ops->tid. The operation list for the message is | ||
174 | sent through the ops->opbuf buffer, and the result list is written | ||
175 | into the buffer pointed to by ops->resbuf. The actual size of data | ||
176 | written is placed into *(ops->reslen). | ||
177 | |||
178 | RETURNS | ||
179 | |||
180 | EFAULT Invalid user space pointer was passed | ||
181 | ENXIO Invalid IOP number | ||
182 | ENOBUFS Buffer not large enough. If this occurs, the required | ||
183 | buffer length is written into *(ops->reslen) | ||
184 | ETIMEDOUT Timeout waiting for reply message | ||
185 | ENOMEM Kernel memory allocation error | ||
186 | |||
187 | A return value of 0 does not mean that the value was actually | ||
188 | properly retreived. The user should check the result list | ||
189 | to determine the specific status of the transaction. | ||
190 | |||
191 | VIII. Downloading Software | ||
192 | |||
193 | SYNOPSIS | ||
194 | |||
195 | ioctl(fd, I2OSWDL, struct i2o_sw_xfer *sw); | ||
196 | |||
197 | struct i2o_sw_xfer | ||
198 | { | ||
199 | u32 iop; /* IOP unit number */ | ||
200 | u8 flags; /* DownloadFlags field */ | ||
201 | u8 sw_type; /* Software type */ | ||
202 | u32 sw_id; /* Software ID */ | ||
203 | void *buf; /* Pointer to software buffer */ | ||
204 | u32 *swlen; /* Length of software buffer */ | ||
205 | u32 *maxfrag; /* Number of fragments */ | ||
206 | u32 *curfrag; /* Current fragment number */ | ||
207 | }; | ||
208 | |||
209 | DESCRIPTION | ||
210 | |||
211 | This function downloads a software fragment pointed by sw->buf | ||
212 | to the iop identified by sw->iop. The DownloadFlags, SwID, SwType | ||
213 | and SwSize fields of the ExecSwDownload message are filled in with | ||
214 | the values of sw->flags, sw->sw_id, sw->sw_type and *(sw->swlen). | ||
215 | |||
216 | The fragments _must_ be sent in order and be 8K in size. The last | ||
217 | fragment _may_ be shorter, however. The kernel will compute its | ||
218 | size based on information in the sw->swlen field. | ||
219 | |||
220 | Please note that SW transfers can take a long time. | ||
221 | |||
222 | RETURNS | ||
223 | |||
224 | This function returns 0 no errors occur. If an error occurs, -1 | ||
225 | is returned and errno is set appropriatly: | ||
226 | |||
227 | EFAULT Invalid user space pointer was passed | ||
228 | ENXIO Invalid IOP number | ||
229 | ETIMEDOUT Timeout waiting for reply message | ||
230 | ENOMEM Kernel memory allocation error | ||
231 | |||
232 | IX. Uploading Software | ||
233 | |||
234 | SYNOPSIS | ||
235 | |||
236 | ioctl(fd, I2OSWUL, struct i2o_sw_xfer *sw); | ||
237 | |||
238 | struct i2o_sw_xfer | ||
239 | { | ||
240 | u32 iop; /* IOP unit number */ | ||
241 | u8 flags; /* UploadFlags */ | ||
242 | u8 sw_type; /* Software type */ | ||
243 | u32 sw_id; /* Software ID */ | ||
244 | void *buf; /* Pointer to software buffer */ | ||
245 | u32 *swlen; /* Length of software buffer */ | ||
246 | u32 *maxfrag; /* Number of fragments */ | ||
247 | u32 *curfrag; /* Current fragment number */ | ||
248 | }; | ||
249 | |||
250 | DESCRIPTION | ||
251 | |||
252 | This function uploads a software fragment from the IOP identified | ||
253 | by sw->iop, sw->sw_type, sw->sw_id and optionally sw->swlen fields. | ||
254 | The UploadFlags, SwID, SwType and SwSize fields of the ExecSwUpload | ||
255 | message are filled in with the values of sw->flags, sw->sw_id, | ||
256 | sw->sw_type and *(sw->swlen). | ||
257 | |||
258 | The fragments _must_ be requested in order and be 8K in size. The | ||
259 | user is responsible for allocating memory pointed by sw->buf. The | ||
260 | last fragment _may_ be shorter. | ||
261 | |||
262 | Please note that SW transfers can take a long time. | ||
263 | |||
264 | RETURNS | ||
265 | |||
266 | This function returns 0 if no errors occur. If an error occurs, -1 | ||
267 | is returned and errno is set appropriatly: | ||
268 | |||
269 | EFAULT Invalid user space pointer was passed | ||
270 | ENXIO Invalid IOP number | ||
271 | ETIMEDOUT Timeout waiting for reply message | ||
272 | ENOMEM Kernel memory allocation error | ||
273 | |||
274 | X. Removing Software | ||
275 | |||
276 | SYNOPSIS | ||
277 | |||
278 | ioctl(fd, I2OSWDEL, struct i2o_sw_xfer *sw); | ||
279 | |||
280 | struct i2o_sw_xfer | ||
281 | { | ||
282 | u32 iop; /* IOP unit number */ | ||
283 | u8 flags; /* RemoveFlags */ | ||
284 | u8 sw_type; /* Software type */ | ||
285 | u32 sw_id; /* Software ID */ | ||
286 | void *buf; /* Unused */ | ||
287 | u32 *swlen; /* Length of the software data */ | ||
288 | u32 *maxfrag; /* Unused */ | ||
289 | u32 *curfrag; /* Unused */ | ||
290 | }; | ||
291 | |||
292 | DESCRIPTION | ||
293 | |||
294 | This function removes software from the IOP identified by sw->iop. | ||
295 | The RemoveFlags, SwID, SwType and SwSize fields of the ExecSwRemove message | ||
296 | are filled in with the values of sw->flags, sw->sw_id, sw->sw_type and | ||
297 | *(sw->swlen). Give zero in *(sw->len) if the value is unknown. IOP uses | ||
298 | *(sw->swlen) value to verify correct identication of the module to remove. | ||
299 | The actual size of the module is written into *(sw->swlen). | ||
300 | |||
301 | RETURNS | ||
302 | |||
303 | This function returns 0 if no errors occur. If an error occurs, -1 | ||
304 | is returned and errno is set appropriatly: | ||
305 | |||
306 | EFAULT Invalid user space pointer was passed | ||
307 | ENXIO Invalid IOP number | ||
308 | ETIMEDOUT Timeout waiting for reply message | ||
309 | ENOMEM Kernel memory allocation error | ||
310 | |||
311 | X. Validating Configuration | ||
312 | |||
313 | SYNOPSIS | ||
314 | |||
315 | ioctl(fd, I2OVALIDATE, int *iop); | ||
316 | u32 iop; | ||
317 | |||
318 | DESCRIPTION | ||
319 | |||
320 | This function posts an ExecConfigValidate message to the controller | ||
321 | identified by iop. This message indicates that the current | ||
322 | configuration is accepted. The iop changes the status of suspect drivers | ||
323 | to valid and may delete old drivers from its store. | ||
324 | |||
325 | RETURNS | ||
326 | |||
327 | This function returns 0 if no erro occur. If an error occurs, -1 is | ||
328 | returned and errno is set appropriatly: | ||
329 | |||
330 | ETIMEDOUT Timeout waiting for reply message | ||
331 | ENXIO Invalid IOP number | ||
332 | |||
333 | XI. Configuration Dialog | ||
334 | |||
335 | SYNOPSIS | ||
336 | |||
337 | ioctl(fd, I2OHTML, struct i2o_html *htquery); | ||
338 | struct i2o_html | ||
339 | { | ||
340 | u32 iop; /* IOP unit number */ | ||
341 | u32 tid; /* Target device ID */ | ||
342 | u32 page; /* HTML page */ | ||
343 | void *resbuf; /* Buffer for reply HTML page */ | ||
344 | u32 *reslen; /* Length in bytes of reply buffer */ | ||
345 | void *qbuf; /* Pointer to HTTP query string */ | ||
346 | u32 qlen; /* Length in bytes of query string buffer */ | ||
347 | }; | ||
348 | |||
349 | DESCRIPTION | ||
350 | |||
351 | This function posts an UtilConfigDialog message to the device identified | ||
352 | by htquery->iop and htquery->tid. The requested HTML page number is | ||
353 | provided by the htquery->page field, and the resultant data is stored | ||
354 | in the buffer pointed to by htquery->resbuf. If there is an HTTP query | ||
355 | string that is to be sent to the device, it should be sent in the buffer | ||
356 | pointed to by htquery->qbuf. If there is no query string, this field | ||
357 | should be set to NULL. The actual size of the reply received is written | ||
358 | into *(htquery->reslen). | ||
359 | |||
360 | RETURNS | ||
361 | |||
362 | This function returns 0 if no error occur. If an error occurs, -1 | ||
363 | is returned and errno is set appropriatly: | ||
364 | |||
365 | EFAULT Invalid user space pointer was passed | ||
366 | ENXIO Invalid IOP number | ||
367 | ENOBUFS Buffer not large enough. If this occurs, the required | ||
368 | buffer length is written into *(ops->reslen) | ||
369 | ETIMEDOUT Timeout waiting for reply message | ||
370 | ENOMEM Kernel memory allocation error | ||
371 | |||
372 | XII. Events | ||
373 | |||
374 | In the process of determining this. Current idea is to have use | ||
375 | the select() interface to allow user apps to periodically poll | ||
376 | the /dev/i2o/ctl device for events. When select() notifies the user | ||
377 | that an event is available, the user would call read() to retrieve | ||
378 | a list of all the events that are pending for the specific device. | ||
379 | |||
380 | ============================================================================= | ||
381 | Revision History | ||
382 | ============================================================================= | ||
383 | |||
384 | Rev 0.1 - 04/01/99 | ||
385 | - Initial revision | ||
386 | |||
387 | Rev 0.2 - 04/06/99 | ||
388 | - Changed return values to match UNIX ioctl() standard. Only return values | ||
389 | are 0 and -1. All errors are reported through errno. | ||
390 | - Added summary of proposed possible event interfaces | ||
391 | |||
392 | Rev 0.3 - 04/20/99 | ||
393 | - Changed all ioctls() to use pointers to user data instead of actual data | ||
394 | - Updated error values to match the code | ||
diff --git a/drivers/message/i2o/debug.c b/drivers/message/i2o/debug.c new file mode 100644 index 000000000000..2a5d478fc60e --- /dev/null +++ b/drivers/message/i2o/debug.c | |||
@@ -0,0 +1,481 @@ | |||
1 | #include <linux/config.h> | ||
2 | #include <linux/module.h> | ||
3 | #include <linux/kernel.h> | ||
4 | #include <linux/pci.h> | ||
5 | #include <linux/i2o.h> | ||
6 | |||
7 | extern struct i2o_driver **i2o_drivers; | ||
8 | extern unsigned int i2o_max_drivers; | ||
9 | static void i2o_report_util_cmd(u8 cmd); | ||
10 | static void i2o_report_exec_cmd(u8 cmd); | ||
11 | static void i2o_report_fail_status(u8 req_status, u32 * msg); | ||
12 | static void i2o_report_common_status(u8 req_status); | ||
13 | static void i2o_report_common_dsc(u16 detailed_status); | ||
14 | |||
15 | /* | ||
16 | * Used for error reporting/debugging purposes. | ||
17 | * Report Cmd name, Request status, Detailed Status. | ||
18 | */ | ||
19 | void i2o_report_status(const char *severity, const char *str, | ||
20 | struct i2o_message *m) | ||
21 | { | ||
22 | u32 *msg = (u32 *) m; | ||
23 | u8 cmd = (msg[1] >> 24) & 0xFF; | ||
24 | u8 req_status = (msg[4] >> 24) & 0xFF; | ||
25 | u16 detailed_status = msg[4] & 0xFFFF; | ||
26 | //struct i2o_driver *h = i2o_drivers[msg[2] & (i2o_max_drivers-1)]; | ||
27 | |||
28 | if (cmd == I2O_CMD_UTIL_EVT_REGISTER) | ||
29 | return; // No status in this reply | ||
30 | |||
31 | printk(KERN_DEBUG "%s%s: ", severity, str); | ||
32 | |||
33 | if (cmd < 0x1F) // Utility cmd | ||
34 | i2o_report_util_cmd(cmd); | ||
35 | |||
36 | else if (cmd >= 0xA0 && cmd <= 0xEF) // Executive cmd | ||
37 | i2o_report_exec_cmd(cmd); | ||
38 | else | ||
39 | printk(KERN_DEBUG "Cmd = %0#2x, ", cmd); // Other cmds | ||
40 | |||
41 | if (msg[0] & MSG_FAIL) { | ||
42 | i2o_report_fail_status(req_status, msg); | ||
43 | return; | ||
44 | } | ||
45 | |||
46 | i2o_report_common_status(req_status); | ||
47 | |||
48 | if (cmd < 0x1F || (cmd >= 0xA0 && cmd <= 0xEF)) | ||
49 | i2o_report_common_dsc(detailed_status); | ||
50 | else | ||
51 | printk(KERN_DEBUG " / DetailedStatus = %0#4x.\n", | ||
52 | detailed_status); | ||
53 | } | ||
54 | |||
55 | /* Used to dump a message to syslog during debugging */ | ||
56 | void i2o_dump_message(struct i2o_message *m) | ||
57 | { | ||
58 | #ifdef DEBUG | ||
59 | u32 *msg = (u32 *) m; | ||
60 | int i; | ||
61 | printk(KERN_INFO "Dumping I2O message size %d @ %p\n", | ||
62 | msg[0] >> 16 & 0xffff, msg); | ||
63 | for (i = 0; i < ((msg[0] >> 16) & 0xffff); i++) | ||
64 | printk(KERN_INFO " msg[%d] = %0#10x\n", i, msg[i]); | ||
65 | #endif | ||
66 | } | ||
67 | |||
68 | /* | ||
69 | * Used for error reporting/debugging purposes. | ||
70 | * Following fail status are common to all classes. | ||
71 | * The preserved message must be handled in the reply handler. | ||
72 | */ | ||
73 | static void i2o_report_fail_status(u8 req_status, u32 * msg) | ||
74 | { | ||
75 | static char *FAIL_STATUS[] = { | ||
76 | "0x80", /* not used */ | ||
77 | "SERVICE_SUSPENDED", /* 0x81 */ | ||
78 | "SERVICE_TERMINATED", /* 0x82 */ | ||
79 | "CONGESTION", | ||
80 | "FAILURE", | ||
81 | "STATE_ERROR", | ||
82 | "TIME_OUT", | ||
83 | "ROUTING_FAILURE", | ||
84 | "INVALID_VERSION", | ||
85 | "INVALID_OFFSET", | ||
86 | "INVALID_MSG_FLAGS", | ||
87 | "FRAME_TOO_SMALL", | ||
88 | "FRAME_TOO_LARGE", | ||
89 | "INVALID_TARGET_ID", | ||
90 | "INVALID_INITIATOR_ID", | ||
91 | "INVALID_INITIATOR_CONTEX", /* 0x8F */ | ||
92 | "UNKNOWN_FAILURE" /* 0xFF */ | ||
93 | }; | ||
94 | |||
95 | if (req_status == I2O_FSC_TRANSPORT_UNKNOWN_FAILURE) | ||
96 | printk(KERN_DEBUG "TRANSPORT_UNKNOWN_FAILURE (%0#2x)\n.", | ||
97 | req_status); | ||
98 | else | ||
99 | printk(KERN_DEBUG "TRANSPORT_%s.\n", | ||
100 | FAIL_STATUS[req_status & 0x0F]); | ||
101 | |||
102 | /* Dump some details */ | ||
103 | |||
104 | printk(KERN_ERR " InitiatorId = %d, TargetId = %d\n", | ||
105 | (msg[1] >> 12) & 0xFFF, msg[1] & 0xFFF); | ||
106 | printk(KERN_ERR " LowestVersion = 0x%02X, HighestVersion = 0x%02X\n", | ||
107 | (msg[4] >> 8) & 0xFF, msg[4] & 0xFF); | ||
108 | printk(KERN_ERR " FailingHostUnit = 0x%04X, FailingIOP = 0x%03X\n", | ||
109 | msg[5] >> 16, msg[5] & 0xFFF); | ||
110 | |||
111 | printk(KERN_ERR " Severity: 0x%02X ", (msg[4] >> 16) & 0xFF); | ||
112 | if (msg[4] & (1 << 16)) | ||
113 | printk(KERN_DEBUG "(FormatError), " | ||
114 | "this msg can never be delivered/processed.\n"); | ||
115 | if (msg[4] & (1 << 17)) | ||
116 | printk(KERN_DEBUG "(PathError), " | ||
117 | "this msg can no longer be delivered/processed.\n"); | ||
118 | if (msg[4] & (1 << 18)) | ||
119 | printk(KERN_DEBUG "(PathState), " | ||
120 | "the system state does not allow delivery.\n"); | ||
121 | if (msg[4] & (1 << 19)) | ||
122 | printk(KERN_DEBUG | ||
123 | "(Congestion), resources temporarily not available;" | ||
124 | "do not retry immediately.\n"); | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * Used for error reporting/debugging purposes. | ||
129 | * Following reply status are common to all classes. | ||
130 | */ | ||
131 | static void i2o_report_common_status(u8 req_status) | ||
132 | { | ||
133 | static char *REPLY_STATUS[] = { | ||
134 | "SUCCESS", | ||
135 | "ABORT_DIRTY", | ||
136 | "ABORT_NO_DATA_TRANSFER", | ||
137 | "ABORT_PARTIAL_TRANSFER", | ||
138 | "ERROR_DIRTY", | ||
139 | "ERROR_NO_DATA_TRANSFER", | ||
140 | "ERROR_PARTIAL_TRANSFER", | ||
141 | "PROCESS_ABORT_DIRTY", | ||
142 | "PROCESS_ABORT_NO_DATA_TRANSFER", | ||
143 | "PROCESS_ABORT_PARTIAL_TRANSFER", | ||
144 | "TRANSACTION_ERROR", | ||
145 | "PROGRESS_REPORT" | ||
146 | }; | ||
147 | |||
148 | if (req_status >= ARRAY_SIZE(REPLY_STATUS)) | ||
149 | printk(KERN_DEBUG "RequestStatus = %0#2x", req_status); | ||
150 | else | ||
151 | printk(KERN_DEBUG "%s", REPLY_STATUS[req_status]); | ||
152 | } | ||
153 | |||
154 | /* | ||
155 | * Used for error reporting/debugging purposes. | ||
156 | * Following detailed status are valid for executive class, | ||
157 | * utility class, DDM class and for transaction error replies. | ||
158 | */ | ||
159 | static void i2o_report_common_dsc(u16 detailed_status) | ||
160 | { | ||
161 | static char *COMMON_DSC[] = { | ||
162 | "SUCCESS", | ||
163 | "0x01", // not used | ||
164 | "BAD_KEY", | ||
165 | "TCL_ERROR", | ||
166 | "REPLY_BUFFER_FULL", | ||
167 | "NO_SUCH_PAGE", | ||
168 | "INSUFFICIENT_RESOURCE_SOFT", | ||
169 | "INSUFFICIENT_RESOURCE_HARD", | ||
170 | "0x08", // not used | ||
171 | "CHAIN_BUFFER_TOO_LARGE", | ||
172 | "UNSUPPORTED_FUNCTION", | ||
173 | "DEVICE_LOCKED", | ||
174 | "DEVICE_RESET", | ||
175 | "INAPPROPRIATE_FUNCTION", | ||
176 | "INVALID_INITIATOR_ADDRESS", | ||
177 | "INVALID_MESSAGE_FLAGS", | ||
178 | "INVALID_OFFSET", | ||
179 | "INVALID_PARAMETER", | ||
180 | "INVALID_REQUEST", | ||
181 | "INVALID_TARGET_ADDRESS", | ||
182 | "MESSAGE_TOO_LARGE", | ||
183 | "MESSAGE_TOO_SMALL", | ||
184 | "MISSING_PARAMETER", | ||
185 | "TIMEOUT", | ||
186 | "UNKNOWN_ERROR", | ||
187 | "UNKNOWN_FUNCTION", | ||
188 | "UNSUPPORTED_VERSION", | ||
189 | "DEVICE_BUSY", | ||
190 | "DEVICE_NOT_AVAILABLE" | ||
191 | }; | ||
192 | |||
193 | if (detailed_status > I2O_DSC_DEVICE_NOT_AVAILABLE) | ||
194 | printk(KERN_DEBUG " / DetailedStatus = %0#4x.\n", | ||
195 | detailed_status); | ||
196 | else | ||
197 | printk(KERN_DEBUG " / %s.\n", COMMON_DSC[detailed_status]); | ||
198 | } | ||
199 | |||
200 | /* | ||
201 | * Used for error reporting/debugging purposes | ||
202 | */ | ||
203 | static void i2o_report_util_cmd(u8 cmd) | ||
204 | { | ||
205 | switch (cmd) { | ||
206 | case I2O_CMD_UTIL_NOP: | ||
207 | printk(KERN_DEBUG "UTIL_NOP, "); | ||
208 | break; | ||
209 | case I2O_CMD_UTIL_ABORT: | ||
210 | printk(KERN_DEBUG "UTIL_ABORT, "); | ||
211 | break; | ||
212 | case I2O_CMD_UTIL_CLAIM: | ||
213 | printk(KERN_DEBUG "UTIL_CLAIM, "); | ||
214 | break; | ||
215 | case I2O_CMD_UTIL_RELEASE: | ||
216 | printk(KERN_DEBUG "UTIL_CLAIM_RELEASE, "); | ||
217 | break; | ||
218 | case I2O_CMD_UTIL_CONFIG_DIALOG: | ||
219 | printk(KERN_DEBUG "UTIL_CONFIG_DIALOG, "); | ||
220 | break; | ||
221 | case I2O_CMD_UTIL_DEVICE_RESERVE: | ||
222 | printk(KERN_DEBUG "UTIL_DEVICE_RESERVE, "); | ||
223 | break; | ||
224 | case I2O_CMD_UTIL_DEVICE_RELEASE: | ||
225 | printk(KERN_DEBUG "UTIL_DEVICE_RELEASE, "); | ||
226 | break; | ||
227 | case I2O_CMD_UTIL_EVT_ACK: | ||
228 | printk(KERN_DEBUG "UTIL_EVENT_ACKNOWLEDGE, "); | ||
229 | break; | ||
230 | case I2O_CMD_UTIL_EVT_REGISTER: | ||
231 | printk(KERN_DEBUG "UTIL_EVENT_REGISTER, "); | ||
232 | break; | ||
233 | case I2O_CMD_UTIL_LOCK: | ||
234 | printk(KERN_DEBUG "UTIL_LOCK, "); | ||
235 | break; | ||
236 | case I2O_CMD_UTIL_LOCK_RELEASE: | ||
237 | printk(KERN_DEBUG "UTIL_LOCK_RELEASE, "); | ||
238 | break; | ||
239 | case I2O_CMD_UTIL_PARAMS_GET: | ||
240 | printk(KERN_DEBUG "UTIL_PARAMS_GET, "); | ||
241 | break; | ||
242 | case I2O_CMD_UTIL_PARAMS_SET: | ||
243 | printk(KERN_DEBUG "UTIL_PARAMS_SET, "); | ||
244 | break; | ||
245 | case I2O_CMD_UTIL_REPLY_FAULT_NOTIFY: | ||
246 | printk(KERN_DEBUG "UTIL_REPLY_FAULT_NOTIFY, "); | ||
247 | break; | ||
248 | default: | ||
249 | printk(KERN_DEBUG "Cmd = %0#2x, ", cmd); | ||
250 | } | ||
251 | } | ||
252 | |||
253 | /* | ||
254 | * Used for error reporting/debugging purposes | ||
255 | */ | ||
256 | static void i2o_report_exec_cmd(u8 cmd) | ||
257 | { | ||
258 | switch (cmd) { | ||
259 | case I2O_CMD_ADAPTER_ASSIGN: | ||
260 | printk(KERN_DEBUG "EXEC_ADAPTER_ASSIGN, "); | ||
261 | break; | ||
262 | case I2O_CMD_ADAPTER_READ: | ||
263 | printk(KERN_DEBUG "EXEC_ADAPTER_READ, "); | ||
264 | break; | ||
265 | case I2O_CMD_ADAPTER_RELEASE: | ||
266 | printk(KERN_DEBUG "EXEC_ADAPTER_RELEASE, "); | ||
267 | break; | ||
268 | case I2O_CMD_BIOS_INFO_SET: | ||
269 | printk(KERN_DEBUG "EXEC_BIOS_INFO_SET, "); | ||
270 | break; | ||
271 | case I2O_CMD_BOOT_DEVICE_SET: | ||
272 | printk(KERN_DEBUG "EXEC_BOOT_DEVICE_SET, "); | ||
273 | break; | ||
274 | case I2O_CMD_CONFIG_VALIDATE: | ||
275 | printk(KERN_DEBUG "EXEC_CONFIG_VALIDATE, "); | ||
276 | break; | ||
277 | case I2O_CMD_CONN_SETUP: | ||
278 | printk(KERN_DEBUG "EXEC_CONN_SETUP, "); | ||
279 | break; | ||
280 | case I2O_CMD_DDM_DESTROY: | ||
281 | printk(KERN_DEBUG "EXEC_DDM_DESTROY, "); | ||
282 | break; | ||
283 | case I2O_CMD_DDM_ENABLE: | ||
284 | printk(KERN_DEBUG "EXEC_DDM_ENABLE, "); | ||
285 | break; | ||
286 | case I2O_CMD_DDM_QUIESCE: | ||
287 | printk(KERN_DEBUG "EXEC_DDM_QUIESCE, "); | ||
288 | break; | ||
289 | case I2O_CMD_DDM_RESET: | ||
290 | printk(KERN_DEBUG "EXEC_DDM_RESET, "); | ||
291 | break; | ||
292 | case I2O_CMD_DDM_SUSPEND: | ||
293 | printk(KERN_DEBUG "EXEC_DDM_SUSPEND, "); | ||
294 | break; | ||
295 | case I2O_CMD_DEVICE_ASSIGN: | ||
296 | printk(KERN_DEBUG "EXEC_DEVICE_ASSIGN, "); | ||
297 | break; | ||
298 | case I2O_CMD_DEVICE_RELEASE: | ||
299 | printk(KERN_DEBUG "EXEC_DEVICE_RELEASE, "); | ||
300 | break; | ||
301 | case I2O_CMD_HRT_GET: | ||
302 | printk(KERN_DEBUG "EXEC_HRT_GET, "); | ||
303 | break; | ||
304 | case I2O_CMD_ADAPTER_CLEAR: | ||
305 | printk(KERN_DEBUG "EXEC_IOP_CLEAR, "); | ||
306 | break; | ||
307 | case I2O_CMD_ADAPTER_CONNECT: | ||
308 | printk(KERN_DEBUG "EXEC_IOP_CONNECT, "); | ||
309 | break; | ||
310 | case I2O_CMD_ADAPTER_RESET: | ||
311 | printk(KERN_DEBUG "EXEC_IOP_RESET, "); | ||
312 | break; | ||
313 | case I2O_CMD_LCT_NOTIFY: | ||
314 | printk(KERN_DEBUG "EXEC_LCT_NOTIFY, "); | ||
315 | break; | ||
316 | case I2O_CMD_OUTBOUND_INIT: | ||
317 | printk(KERN_DEBUG "EXEC_OUTBOUND_INIT, "); | ||
318 | break; | ||
319 | case I2O_CMD_PATH_ENABLE: | ||
320 | printk(KERN_DEBUG "EXEC_PATH_ENABLE, "); | ||
321 | break; | ||
322 | case I2O_CMD_PATH_QUIESCE: | ||
323 | printk(KERN_DEBUG "EXEC_PATH_QUIESCE, "); | ||
324 | break; | ||
325 | case I2O_CMD_PATH_RESET: | ||
326 | printk(KERN_DEBUG "EXEC_PATH_RESET, "); | ||
327 | break; | ||
328 | case I2O_CMD_STATIC_MF_CREATE: | ||
329 | printk(KERN_DEBUG "EXEC_STATIC_MF_CREATE, "); | ||
330 | break; | ||
331 | case I2O_CMD_STATIC_MF_RELEASE: | ||
332 | printk(KERN_DEBUG "EXEC_STATIC_MF_RELEASE, "); | ||
333 | break; | ||
334 | case I2O_CMD_STATUS_GET: | ||
335 | printk(KERN_DEBUG "EXEC_STATUS_GET, "); | ||
336 | break; | ||
337 | case I2O_CMD_SW_DOWNLOAD: | ||
338 | printk(KERN_DEBUG "EXEC_SW_DOWNLOAD, "); | ||
339 | break; | ||
340 | case I2O_CMD_SW_UPLOAD: | ||
341 | printk(KERN_DEBUG "EXEC_SW_UPLOAD, "); | ||
342 | break; | ||
343 | case I2O_CMD_SW_REMOVE: | ||
344 | printk(KERN_DEBUG "EXEC_SW_REMOVE, "); | ||
345 | break; | ||
346 | case I2O_CMD_SYS_ENABLE: | ||
347 | printk(KERN_DEBUG "EXEC_SYS_ENABLE, "); | ||
348 | break; | ||
349 | case I2O_CMD_SYS_MODIFY: | ||
350 | printk(KERN_DEBUG "EXEC_SYS_MODIFY, "); | ||
351 | break; | ||
352 | case I2O_CMD_SYS_QUIESCE: | ||
353 | printk(KERN_DEBUG "EXEC_SYS_QUIESCE, "); | ||
354 | break; | ||
355 | case I2O_CMD_SYS_TAB_SET: | ||
356 | printk(KERN_DEBUG "EXEC_SYS_TAB_SET, "); | ||
357 | break; | ||
358 | default: | ||
359 | printk(KERN_DEBUG "Cmd = %#02x, ", cmd); | ||
360 | } | ||
361 | } | ||
362 | |||
363 | void i2o_debug_state(struct i2o_controller *c) | ||
364 | { | ||
365 | printk(KERN_INFO "%s: State = ", c->name); | ||
366 | switch (((i2o_status_block *) c->status_block.virt)->iop_state) { | ||
367 | case 0x01: | ||
368 | printk(KERN_DEBUG "INIT\n"); | ||
369 | break; | ||
370 | case 0x02: | ||
371 | printk(KERN_DEBUG "RESET\n"); | ||
372 | break; | ||
373 | case 0x04: | ||
374 | printk(KERN_DEBUG "HOLD\n"); | ||
375 | break; | ||
376 | case 0x05: | ||
377 | printk(KERN_DEBUG "READY\n"); | ||
378 | break; | ||
379 | case 0x08: | ||
380 | printk(KERN_DEBUG "OPERATIONAL\n"); | ||
381 | break; | ||
382 | case 0x10: | ||
383 | printk(KERN_DEBUG "FAILED\n"); | ||
384 | break; | ||
385 | case 0x11: | ||
386 | printk(KERN_DEBUG "FAULTED\n"); | ||
387 | break; | ||
388 | default: | ||
389 | printk(KERN_DEBUG "%x (unknown !!)\n", | ||
390 | ((i2o_status_block *) c->status_block.virt)->iop_state); | ||
391 | } | ||
392 | }; | ||
393 | |||
394 | void i2o_dump_hrt(struct i2o_controller *c) | ||
395 | { | ||
396 | u32 *rows = (u32 *) c->hrt.virt; | ||
397 | u8 *p = (u8 *) c->hrt.virt; | ||
398 | u8 *d; | ||
399 | int count; | ||
400 | int length; | ||
401 | int i; | ||
402 | int state; | ||
403 | |||
404 | if (p[3] != 0) { | ||
405 | printk(KERN_ERR | ||
406 | "%s: HRT table for controller is too new a version.\n", | ||
407 | c->name); | ||
408 | return; | ||
409 | } | ||
410 | |||
411 | count = p[0] | (p[1] << 8); | ||
412 | length = p[2]; | ||
413 | |||
414 | printk(KERN_INFO "%s: HRT has %d entries of %d bytes each.\n", | ||
415 | c->name, count, length << 2); | ||
416 | |||
417 | rows += 2; | ||
418 | |||
419 | for (i = 0; i < count; i++) { | ||
420 | printk(KERN_INFO "Adapter %08X: ", rows[0]); | ||
421 | p = (u8 *) (rows + 1); | ||
422 | d = (u8 *) (rows + 2); | ||
423 | state = p[1] << 8 | p[0]; | ||
424 | |||
425 | printk(KERN_DEBUG "TID %04X:[", state & 0xFFF); | ||
426 | state >>= 12; | ||
427 | if (state & (1 << 0)) | ||
428 | printk(KERN_DEBUG "H"); /* Hidden */ | ||
429 | if (state & (1 << 2)) { | ||
430 | printk(KERN_DEBUG "P"); /* Present */ | ||
431 | if (state & (1 << 1)) | ||
432 | printk(KERN_DEBUG "C"); /* Controlled */ | ||
433 | } | ||
434 | if (state > 9) | ||
435 | printk(KERN_DEBUG "*"); /* Hard */ | ||
436 | |||
437 | printk(KERN_DEBUG "]:"); | ||
438 | |||
439 | switch (p[3] & 0xFFFF) { | ||
440 | case 0: | ||
441 | /* Adapter private bus - easy */ | ||
442 | printk(KERN_DEBUG | ||
443 | "Local bus %d: I/O at 0x%04X Mem 0x%08X", p[2], | ||
444 | d[1] << 8 | d[0], *(u32 *) (d + 4)); | ||
445 | break; | ||
446 | case 1: | ||
447 | /* ISA bus */ | ||
448 | printk(KERN_DEBUG | ||
449 | "ISA %d: CSN %d I/O at 0x%04X Mem 0x%08X", p[2], | ||
450 | d[2], d[1] << 8 | d[0], *(u32 *) (d + 4)); | ||
451 | break; | ||
452 | |||
453 | case 2: /* EISA bus */ | ||
454 | printk(KERN_DEBUG | ||
455 | "EISA %d: Slot %d I/O at 0x%04X Mem 0x%08X", | ||
456 | p[2], d[3], d[1] << 8 | d[0], *(u32 *) (d + 4)); | ||
457 | break; | ||
458 | |||
459 | case 3: /* MCA bus */ | ||
460 | printk(KERN_DEBUG | ||
461 | "MCA %d: Slot %d I/O at 0x%04X Mem 0x%08X", p[2], | ||
462 | d[3], d[1] << 8 | d[0], *(u32 *) (d + 4)); | ||
463 | break; | ||
464 | |||
465 | case 4: /* PCI bus */ | ||
466 | printk(KERN_DEBUG | ||
467 | "PCI %d: Bus %d Device %d Function %d", p[2], | ||
468 | d[2], d[1], d[0]); | ||
469 | break; | ||
470 | |||
471 | case 0x80: /* Other */ | ||
472 | default: | ||
473 | printk(KERN_DEBUG "Unsupported bus type."); | ||
474 | break; | ||
475 | } | ||
476 | printk(KERN_DEBUG "\n"); | ||
477 | rows += length; | ||
478 | } | ||
479 | } | ||
480 | |||
481 | EXPORT_SYMBOL(i2o_dump_message); | ||
diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c new file mode 100644 index 000000000000..eb907e87bc7b --- /dev/null +++ b/drivers/message/i2o/device.c | |||
@@ -0,0 +1,634 @@ | |||
1 | /* | ||
2 | * Functions to handle I2O devices | ||
3 | * | ||
4 | * Copyright (C) 2004 Markus Lidel <Markus.Lidel@shadowconnect.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; either version 2 of the License, or (at your | ||
9 | * option) any later version. | ||
10 | * | ||
11 | * Fixes/additions: | ||
12 | * Markus Lidel <Markus.Lidel@shadowconnect.com> | ||
13 | * initial version. | ||
14 | */ | ||
15 | |||
16 | #include <linux/module.h> | ||
17 | #include <linux/i2o.h> | ||
18 | #include <linux/delay.h> | ||
19 | |||
20 | /* Exec OSM functions */ | ||
21 | extern struct bus_type i2o_bus_type; | ||
22 | |||
23 | /** | ||
24 | * i2o_device_issue_claim - claim or release a device | ||
25 | * @dev: I2O device to claim or release | ||
26 | * @cmd: claim or release command | ||
27 | * @type: type of claim | ||
28 | * | ||
29 | * Issue I2O UTIL_CLAIM or UTIL_RELEASE messages. The message to be sent | ||
30 | * is set by cmd. dev is the I2O device which should be claim or | ||
31 | * released and the type is the claim type (see the I2O spec). | ||
32 | * | ||
33 | * Returs 0 on success or negative error code on failure. | ||
34 | */ | ||
35 | static inline int i2o_device_issue_claim(struct i2o_device *dev, u32 cmd, | ||
36 | u32 type) | ||
37 | { | ||
38 | struct i2o_message __iomem *msg; | ||
39 | u32 m; | ||
40 | |||
41 | m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
42 | if (m == I2O_QUEUE_EMPTY) | ||
43 | return -ETIMEDOUT; | ||
44 | |||
45 | writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); | ||
46 | writel(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid, &msg->u.head[1]); | ||
47 | writel(type, &msg->body[0]); | ||
48 | |||
49 | return i2o_msg_post_wait(dev->iop, m, 60); | ||
50 | }; | ||
51 | |||
52 | /** | ||
53 | * i2o_device_claim - claim a device for use by an OSM | ||
54 | * @dev: I2O device to claim | ||
55 | * @drv: I2O driver which wants to claim the device | ||
56 | * | ||
57 | * Do the leg work to assign a device to a given OSM. If the claim succeed | ||
58 | * the owner of the rimary. If the attempt fails a negative errno code | ||
59 | * is returned. On success zero is returned. | ||
60 | */ | ||
61 | int i2o_device_claim(struct i2o_device *dev) | ||
62 | { | ||
63 | int rc = 0; | ||
64 | |||
65 | down(&dev->lock); | ||
66 | |||
67 | rc = i2o_device_issue_claim(dev, I2O_CMD_UTIL_CLAIM, I2O_CLAIM_PRIMARY); | ||
68 | if (!rc) | ||
69 | pr_debug("i2o: claim of device %d succeded\n", | ||
70 | dev->lct_data.tid); | ||
71 | else | ||
72 | pr_debug("i2o: claim of device %d failed %d\n", | ||
73 | dev->lct_data.tid, rc); | ||
74 | |||
75 | up(&dev->lock); | ||
76 | |||
77 | return rc; | ||
78 | }; | ||
79 | |||
80 | /** | ||
81 | * i2o_device_claim_release - release a device that the OSM is using | ||
82 | * @dev: device to release | ||
83 | * @drv: driver which claimed the device | ||
84 | * | ||
85 | * Drop a claim by an OSM on a given I2O device. | ||
86 | * | ||
87 | * AC - some devices seem to want to refuse an unclaim until they have | ||
88 | * finished internal processing. It makes sense since you don't want a | ||
89 | * new device to go reconfiguring the entire system until you are done. | ||
90 | * Thus we are prepared to wait briefly. | ||
91 | * | ||
92 | * Returns 0 on success or negative error code on failure. | ||
93 | */ | ||
94 | int i2o_device_claim_release(struct i2o_device *dev) | ||
95 | { | ||
96 | int tries; | ||
97 | int rc = 0; | ||
98 | |||
99 | down(&dev->lock); | ||
100 | |||
101 | /* | ||
102 | * If the controller takes a nonblocking approach to | ||
103 | * releases we have to sleep/poll for a few times. | ||
104 | */ | ||
105 | for (tries = 0; tries < 10; tries++) { | ||
106 | rc = i2o_device_issue_claim(dev, I2O_CMD_UTIL_RELEASE, | ||
107 | I2O_CLAIM_PRIMARY); | ||
108 | if (!rc) | ||
109 | break; | ||
110 | |||
111 | ssleep(1); | ||
112 | } | ||
113 | |||
114 | if (!rc) | ||
115 | pr_debug("i2o: claim release of device %d succeded\n", | ||
116 | dev->lct_data.tid); | ||
117 | else | ||
118 | pr_debug("i2o: claim release of device %d failed %d\n", | ||
119 | dev->lct_data.tid, rc); | ||
120 | |||
121 | up(&dev->lock); | ||
122 | |||
123 | return rc; | ||
124 | }; | ||
125 | |||
126 | /** | ||
127 | * i2o_device_release - release the memory for a I2O device | ||
128 | * @dev: I2O device which should be released | ||
129 | * | ||
130 | * Release the allocated memory. This function is called if refcount of | ||
131 | * device reaches 0 automatically. | ||
132 | */ | ||
133 | static void i2o_device_release(struct device *dev) | ||
134 | { | ||
135 | struct i2o_device *i2o_dev = to_i2o_device(dev); | ||
136 | |||
137 | pr_debug("i2o: device %s released\n", dev->bus_id); | ||
138 | |||
139 | kfree(i2o_dev); | ||
140 | }; | ||
141 | |||
142 | /** | ||
143 | * i2o_device_class_release - Remove I2O device attributes | ||
144 | * @cd: I2O class device which is added to the I2O device class | ||
145 | * | ||
146 | * Removes attributes from the I2O device again. Also search each device | ||
147 | * on the controller for I2O devices which refert to this device as parent | ||
148 | * or user and remove this links also. | ||
149 | */ | ||
150 | static void i2o_device_class_release(struct class_device *cd) | ||
151 | { | ||
152 | struct i2o_device *i2o_dev, *tmp; | ||
153 | struct i2o_controller *c; | ||
154 | |||
155 | i2o_dev = to_i2o_device(cd->dev); | ||
156 | c = i2o_dev->iop; | ||
157 | |||
158 | sysfs_remove_link(&i2o_dev->device.kobj, "parent"); | ||
159 | sysfs_remove_link(&i2o_dev->device.kobj, "user"); | ||
160 | |||
161 | list_for_each_entry(tmp, &c->devices, list) { | ||
162 | if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid) | ||
163 | sysfs_remove_link(&tmp->device.kobj, "parent"); | ||
164 | if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid) | ||
165 | sysfs_remove_link(&tmp->device.kobj, "user"); | ||
166 | } | ||
167 | }; | ||
168 | |||
169 | /* I2O device class */ | ||
170 | static struct class i2o_device_class = { | ||
171 | .name = "i2o_device", | ||
172 | .release = i2o_device_class_release | ||
173 | }; | ||
174 | |||
175 | /** | ||
176 | * i2o_device_alloc - Allocate a I2O device and initialize it | ||
177 | * | ||
178 | * Allocate the memory for a I2O device and initialize locks and lists | ||
179 | * | ||
180 | * Returns the allocated I2O device or a negative error code if the device | ||
181 | * could not be allocated. | ||
182 | */ | ||
183 | static struct i2o_device *i2o_device_alloc(void) | ||
184 | { | ||
185 | struct i2o_device *dev; | ||
186 | |||
187 | dev = kmalloc(sizeof(*dev), GFP_KERNEL); | ||
188 | if (!dev) | ||
189 | return ERR_PTR(-ENOMEM); | ||
190 | |||
191 | memset(dev, 0, sizeof(*dev)); | ||
192 | |||
193 | INIT_LIST_HEAD(&dev->list); | ||
194 | init_MUTEX(&dev->lock); | ||
195 | |||
196 | dev->device.bus = &i2o_bus_type; | ||
197 | dev->device.release = &i2o_device_release; | ||
198 | dev->classdev.class = &i2o_device_class; | ||
199 | dev->classdev.dev = &dev->device; | ||
200 | |||
201 | return dev; | ||
202 | }; | ||
203 | |||
204 | /** | ||
205 | * i2o_device_add - allocate a new I2O device and add it to the IOP | ||
206 | * @iop: I2O controller where the device is on | ||
207 | * @entry: LCT entry of the I2O device | ||
208 | * | ||
209 | * Allocate a new I2O device and initialize it with the LCT entry. The | ||
210 | * device is appended to the device list of the controller. | ||
211 | * | ||
212 | * Returns a pointer to the I2O device on success or negative error code | ||
213 | * on failure. | ||
214 | */ | ||
215 | static struct i2o_device *i2o_device_add(struct i2o_controller *c, | ||
216 | i2o_lct_entry * entry) | ||
217 | { | ||
218 | struct i2o_device *dev; | ||
219 | |||
220 | dev = i2o_device_alloc(); | ||
221 | if (IS_ERR(dev)) { | ||
222 | printk(KERN_ERR "i2o: unable to allocate i2o device\n"); | ||
223 | return dev; | ||
224 | } | ||
225 | |||
226 | dev->lct_data = *entry; | ||
227 | |||
228 | snprintf(dev->device.bus_id, BUS_ID_SIZE, "%d:%03x", c->unit, | ||
229 | dev->lct_data.tid); | ||
230 | |||
231 | snprintf(dev->classdev.class_id, BUS_ID_SIZE, "%d:%03x", c->unit, | ||
232 | dev->lct_data.tid); | ||
233 | |||
234 | dev->iop = c; | ||
235 | dev->device.parent = &c->device; | ||
236 | |||
237 | device_register(&dev->device); | ||
238 | |||
239 | list_add_tail(&dev->list, &c->devices); | ||
240 | |||
241 | class_device_register(&dev->classdev); | ||
242 | |||
243 | i2o_driver_notify_device_add_all(dev); | ||
244 | |||
245 | pr_debug("i2o: device %s added\n", dev->device.bus_id); | ||
246 | |||
247 | return dev; | ||
248 | }; | ||
249 | |||
250 | /** | ||
251 | * i2o_device_remove - remove an I2O device from the I2O core | ||
252 | * @dev: I2O device which should be released | ||
253 | * | ||
254 | * Is used on I2O controller removal or LCT modification, when the device | ||
255 | * is removed from the system. Note that the device could still hang | ||
256 | * around until the refcount reaches 0. | ||
257 | */ | ||
258 | void i2o_device_remove(struct i2o_device *i2o_dev) | ||
259 | { | ||
260 | i2o_driver_notify_device_remove_all(i2o_dev); | ||
261 | class_device_unregister(&i2o_dev->classdev); | ||
262 | list_del(&i2o_dev->list); | ||
263 | device_unregister(&i2o_dev->device); | ||
264 | }; | ||
265 | |||
266 | /** | ||
267 | * i2o_device_parse_lct - Parse a previously fetched LCT and create devices | ||
268 | * @c: I2O controller from which the LCT should be parsed. | ||
269 | * | ||
270 | * The Logical Configuration Table tells us what we can talk to on the | ||
271 | * board. For every entry we create an I2O device, which is registered in | ||
272 | * the I2O core. | ||
273 | * | ||
274 | * Returns 0 on success or negative error code on failure. | ||
275 | */ | ||
276 | int i2o_device_parse_lct(struct i2o_controller *c) | ||
277 | { | ||
278 | struct i2o_device *dev, *tmp; | ||
279 | i2o_lct *lct; | ||
280 | int i; | ||
281 | int max; | ||
282 | |||
283 | down(&c->lct_lock); | ||
284 | |||
285 | if (c->lct) | ||
286 | kfree(c->lct); | ||
287 | |||
288 | lct = c->dlct.virt; | ||
289 | |||
290 | c->lct = kmalloc(lct->table_size * 4, GFP_KERNEL); | ||
291 | if (!c->lct) { | ||
292 | up(&c->lct_lock); | ||
293 | return -ENOMEM; | ||
294 | } | ||
295 | |||
296 | if (lct->table_size * 4 > c->dlct.len) { | ||
297 | memcpy_fromio(c->lct, c->dlct.virt, c->dlct.len); | ||
298 | up(&c->lct_lock); | ||
299 | return -EAGAIN; | ||
300 | } | ||
301 | |||
302 | memcpy_fromio(c->lct, c->dlct.virt, lct->table_size * 4); | ||
303 | |||
304 | lct = c->lct; | ||
305 | |||
306 | max = (lct->table_size - 3) / 9; | ||
307 | |||
308 | pr_debug("%s: LCT has %d entries (LCT size: %d)\n", c->name, max, | ||
309 | lct->table_size); | ||
310 | |||
311 | /* remove devices, which are not in the LCT anymore */ | ||
312 | list_for_each_entry_safe(dev, tmp, &c->devices, list) { | ||
313 | int found = 0; | ||
314 | |||
315 | for (i = 0; i < max; i++) { | ||
316 | if (lct->lct_entry[i].tid == dev->lct_data.tid) { | ||
317 | found = 1; | ||
318 | break; | ||
319 | } | ||
320 | } | ||
321 | |||
322 | if (!found) | ||
323 | i2o_device_remove(dev); | ||
324 | } | ||
325 | |||
326 | /* add new devices, which are new in the LCT */ | ||
327 | for (i = 0; i < max; i++) { | ||
328 | int found = 0; | ||
329 | |||
330 | list_for_each_entry_safe(dev, tmp, &c->devices, list) { | ||
331 | if (lct->lct_entry[i].tid == dev->lct_data.tid) { | ||
332 | found = 1; | ||
333 | break; | ||
334 | } | ||
335 | } | ||
336 | |||
337 | if (!found) | ||
338 | i2o_device_add(c, &lct->lct_entry[i]); | ||
339 | } | ||
340 | up(&c->lct_lock); | ||
341 | |||
342 | return 0; | ||
343 | }; | ||
344 | |||
345 | /** | ||
346 | * i2o_device_class_show_class_id - Displays class id of I2O device | ||
347 | * @cd: class device of which the class id should be displayed | ||
348 | * @buf: buffer into which the class id should be printed | ||
349 | * | ||
350 | * Returns the number of bytes which are printed into the buffer. | ||
351 | */ | ||
352 | static ssize_t i2o_device_class_show_class_id(struct class_device *cd, | ||
353 | char *buf) | ||
354 | { | ||
355 | struct i2o_device *dev = to_i2o_device(cd->dev); | ||
356 | |||
357 | sprintf(buf, "%03x\n", dev->lct_data.class_id); | ||
358 | return strlen(buf) + 1; | ||
359 | }; | ||
360 | |||
361 | /** | ||
362 | * i2o_device_class_show_tid - Displays TID of I2O device | ||
363 | * @cd: class device of which the TID should be displayed | ||
364 | * @buf: buffer into which the class id should be printed | ||
365 | * | ||
366 | * Returns the number of bytes which are printed into the buffer. | ||
367 | */ | ||
368 | static ssize_t i2o_device_class_show_tid(struct class_device *cd, char *buf) | ||
369 | { | ||
370 | struct i2o_device *dev = to_i2o_device(cd->dev); | ||
371 | |||
372 | sprintf(buf, "%03x\n", dev->lct_data.tid); | ||
373 | return strlen(buf) + 1; | ||
374 | }; | ||
375 | |||
376 | /* I2O device class attributes */ | ||
377 | static CLASS_DEVICE_ATTR(class_id, S_IRUGO, i2o_device_class_show_class_id, | ||
378 | NULL); | ||
379 | static CLASS_DEVICE_ATTR(tid, S_IRUGO, i2o_device_class_show_tid, NULL); | ||
380 | |||
381 | /** | ||
382 | * i2o_device_class_add - Adds attributes to the I2O device | ||
383 | * @cd: I2O class device which is added to the I2O device class | ||
384 | * | ||
385 | * This function get called when a I2O device is added to the class. It | ||
386 | * creates the attributes for each device and creates user/parent symlink | ||
387 | * if necessary. | ||
388 | * | ||
389 | * Returns 0 on success or negative error code on failure. | ||
390 | */ | ||
391 | static int i2o_device_class_add(struct class_device *cd) | ||
392 | { | ||
393 | struct i2o_device *i2o_dev, *tmp; | ||
394 | struct i2o_controller *c; | ||
395 | |||
396 | i2o_dev = to_i2o_device(cd->dev); | ||
397 | c = i2o_dev->iop; | ||
398 | |||
399 | class_device_create_file(cd, &class_device_attr_class_id); | ||
400 | class_device_create_file(cd, &class_device_attr_tid); | ||
401 | |||
402 | /* create user entries for this device */ | ||
403 | tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid); | ||
404 | if (tmp) | ||
405 | sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj, | ||
406 | "user"); | ||
407 | |||
408 | /* create user entries refering to this device */ | ||
409 | list_for_each_entry(tmp, &c->devices, list) | ||
410 | if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid) | ||
411 | sysfs_create_link(&tmp->device.kobj, | ||
412 | &i2o_dev->device.kobj, "user"); | ||
413 | |||
414 | /* create parent entries for this device */ | ||
415 | tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid); | ||
416 | if (tmp) | ||
417 | sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj, | ||
418 | "parent"); | ||
419 | |||
420 | /* create parent entries refering to this device */ | ||
421 | list_for_each_entry(tmp, &c->devices, list) | ||
422 | if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid) | ||
423 | sysfs_create_link(&tmp->device.kobj, | ||
424 | &i2o_dev->device.kobj, "parent"); | ||
425 | |||
426 | return 0; | ||
427 | }; | ||
428 | |||
429 | /* I2O device class interface */ | ||
430 | static struct class_interface i2o_device_class_interface = { | ||
431 | .class = &i2o_device_class, | ||
432 | .add = i2o_device_class_add | ||
433 | }; | ||
434 | |||
435 | /* | ||
436 | * Run time support routines | ||
437 | */ | ||
438 | |||
439 | /* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET | ||
440 | * | ||
441 | * This function can be used for all UtilParamsGet/Set operations. | ||
442 | * The OperationList is given in oplist-buffer, | ||
443 | * and results are returned in reslist-buffer. | ||
444 | * Note that the minimum sized reslist is 8 bytes and contains | ||
445 | * ResultCount, ErrorInfoSize, BlockStatus and BlockSize. | ||
446 | */ | ||
447 | |||
448 | int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist, | ||
449 | int oplen, void *reslist, int reslen) | ||
450 | { | ||
451 | struct i2o_message __iomem *msg; | ||
452 | u32 m; | ||
453 | u32 *res32 = (u32 *) reslist; | ||
454 | u32 *restmp = (u32 *) reslist; | ||
455 | int len = 0; | ||
456 | int i = 0; | ||
457 | int rc; | ||
458 | struct i2o_dma res; | ||
459 | struct i2o_controller *c = i2o_dev->iop; | ||
460 | struct device *dev = &c->pdev->dev; | ||
461 | |||
462 | res.virt = NULL; | ||
463 | |||
464 | if (i2o_dma_alloc(dev, &res, reslen, GFP_KERNEL)) | ||
465 | return -ENOMEM; | ||
466 | |||
467 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
468 | if (m == I2O_QUEUE_EMPTY) { | ||
469 | i2o_dma_free(dev, &res); | ||
470 | return -ETIMEDOUT; | ||
471 | } | ||
472 | |||
473 | i = 0; | ||
474 | writel(cmd << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid, | ||
475 | &msg->u.head[1]); | ||
476 | writel(0, &msg->body[i++]); | ||
477 | writel(0x4C000000 | oplen, &msg->body[i++]); /* OperationList */ | ||
478 | memcpy_toio(&msg->body[i], oplist, oplen); | ||
479 | i += (oplen / 4 + (oplen % 4 ? 1 : 0)); | ||
480 | writel(0xD0000000 | res.len, &msg->body[i++]); /* ResultList */ | ||
481 | writel(res.phys, &msg->body[i++]); | ||
482 | |||
483 | writel(I2O_MESSAGE_SIZE(i + sizeof(struct i2o_message) / 4) | | ||
484 | SGL_OFFSET_5, &msg->u.head[0]); | ||
485 | |||
486 | rc = i2o_msg_post_wait_mem(c, m, 10, &res); | ||
487 | |||
488 | /* This only looks like a memory leak - don't "fix" it. */ | ||
489 | if (rc == -ETIMEDOUT) | ||
490 | return rc; | ||
491 | |||
492 | memcpy_fromio(reslist, res.virt, res.len); | ||
493 | i2o_dma_free(dev, &res); | ||
494 | |||
495 | /* Query failed */ | ||
496 | if (rc) | ||
497 | return rc; | ||
498 | /* | ||
499 | * Calculate number of bytes of Result LIST | ||
500 | * We need to loop through each Result BLOCK and grab the length | ||
501 | */ | ||
502 | restmp = res32 + 1; | ||
503 | len = 1; | ||
504 | for (i = 0; i < (res32[0] & 0X0000FFFF); i++) { | ||
505 | if (restmp[0] & 0x00FF0000) { /* BlockStatus != SUCCESS */ | ||
506 | printk(KERN_WARNING | ||
507 | "%s - Error:\n ErrorInfoSize = 0x%02x, " | ||
508 | "BlockStatus = 0x%02x, BlockSize = 0x%04x\n", | ||
509 | (cmd == | ||
510 | I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET" : | ||
511 | "PARAMS_GET", res32[1] >> 24, | ||
512 | (res32[1] >> 16) & 0xFF, res32[1] & 0xFFFF); | ||
513 | |||
514 | /* | ||
515 | * If this is the only request,than we return an error | ||
516 | */ | ||
517 | if ((res32[0] & 0x0000FFFF) == 1) { | ||
518 | return -((res32[1] >> 16) & 0xFF); /* -BlockStatus */ | ||
519 | } | ||
520 | } | ||
521 | len += restmp[0] & 0x0000FFFF; /* Length of res BLOCK */ | ||
522 | restmp += restmp[0] & 0x0000FFFF; /* Skip to next BLOCK */ | ||
523 | } | ||
524 | return (len << 2); /* bytes used by result list */ | ||
525 | } | ||
526 | |||
527 | /* | ||
528 | * Query one field group value or a whole scalar group. | ||
529 | */ | ||
530 | int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field, | ||
531 | void *buf, int buflen) | ||
532 | { | ||
533 | u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field }; | ||
534 | u8 resblk[8 + buflen]; /* 8 bytes for header */ | ||
535 | int size; | ||
536 | |||
537 | if (field == -1) /* whole group */ | ||
538 | opblk[4] = -1; | ||
539 | |||
540 | size = i2o_parm_issue(i2o_dev, I2O_CMD_UTIL_PARAMS_GET, opblk, | ||
541 | sizeof(opblk), resblk, sizeof(resblk)); | ||
542 | |||
543 | memcpy(buf, resblk + 8, buflen); /* cut off header */ | ||
544 | |||
545 | if (size > buflen) | ||
546 | return buflen; | ||
547 | |||
548 | return size; | ||
549 | } | ||
550 | |||
551 | /* | ||
552 | * if oper == I2O_PARAMS_TABLE_GET, get from all rows | ||
553 | * if fieldcount == -1 return all fields | ||
554 | * ibuf and ibuflen are unused (use NULL, 0) | ||
555 | * else return specific fields | ||
556 | * ibuf contains fieldindexes | ||
557 | * | ||
558 | * if oper == I2O_PARAMS_LIST_GET, get from specific rows | ||
559 | * if fieldcount == -1 return all fields | ||
560 | * ibuf contains rowcount, keyvalues | ||
561 | * else return specific fields | ||
562 | * fieldcount is # of fieldindexes | ||
563 | * ibuf contains fieldindexes, rowcount, keyvalues | ||
564 | * | ||
565 | * You could also use directly function i2o_issue_params(). | ||
566 | */ | ||
567 | int i2o_parm_table_get(struct i2o_device *dev, int oper, int group, | ||
568 | int fieldcount, void *ibuf, int ibuflen, void *resblk, | ||
569 | int reslen) | ||
570 | { | ||
571 | u16 *opblk; | ||
572 | int size; | ||
573 | |||
574 | size = 10 + ibuflen; | ||
575 | if (size % 4) | ||
576 | size += 4 - size % 4; | ||
577 | |||
578 | opblk = kmalloc(size, GFP_KERNEL); | ||
579 | if (opblk == NULL) { | ||
580 | printk(KERN_ERR "i2o: no memory for query buffer.\n"); | ||
581 | return -ENOMEM; | ||
582 | } | ||
583 | |||
584 | opblk[0] = 1; /* operation count */ | ||
585 | opblk[1] = 0; /* pad */ | ||
586 | opblk[2] = oper; | ||
587 | opblk[3] = group; | ||
588 | opblk[4] = fieldcount; | ||
589 | memcpy(opblk + 5, ibuf, ibuflen); /* other params */ | ||
590 | |||
591 | size = i2o_parm_issue(dev, I2O_CMD_UTIL_PARAMS_GET, opblk, | ||
592 | size, resblk, reslen); | ||
593 | |||
594 | kfree(opblk); | ||
595 | if (size > reslen) | ||
596 | return reslen; | ||
597 | |||
598 | return size; | ||
599 | } | ||
600 | |||
601 | /** | ||
602 | * i2o_device_init - Initialize I2O devices | ||
603 | * | ||
604 | * Registers the I2O device class. | ||
605 | * | ||
606 | * Returns 0 on success or negative error code on failure. | ||
607 | */ | ||
608 | int i2o_device_init(void) | ||
609 | { | ||
610 | int rc; | ||
611 | |||
612 | rc = class_register(&i2o_device_class); | ||
613 | if (rc) | ||
614 | return rc; | ||
615 | |||
616 | return class_interface_register(&i2o_device_class_interface); | ||
617 | }; | ||
618 | |||
619 | /** | ||
620 | * i2o_device_exit - I2O devices exit function | ||
621 | * | ||
622 | * Unregisters the I2O device class. | ||
623 | */ | ||
624 | void i2o_device_exit(void) | ||
625 | { | ||
626 | class_interface_register(&i2o_device_class_interface); | ||
627 | class_unregister(&i2o_device_class); | ||
628 | }; | ||
629 | |||
630 | EXPORT_SYMBOL(i2o_device_claim); | ||
631 | EXPORT_SYMBOL(i2o_device_claim_release); | ||
632 | EXPORT_SYMBOL(i2o_parm_field_get); | ||
633 | EXPORT_SYMBOL(i2o_parm_table_get); | ||
634 | EXPORT_SYMBOL(i2o_parm_issue); | ||
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c new file mode 100644 index 000000000000..91f4edbb2a27 --- /dev/null +++ b/drivers/message/i2o/driver.c | |||
@@ -0,0 +1,374 @@ | |||
1 | /* | ||
2 | * Functions to handle I2O drivers (OSMs) and I2O bus type for sysfs | ||
3 | * | ||
4 | * Copyright (C) 2004 Markus Lidel <Markus.Lidel@shadowconnect.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; either version 2 of the License, or (at your | ||
9 | * option) any later version. | ||
10 | * | ||
11 | * Fixes/additions: | ||
12 | * Markus Lidel <Markus.Lidel@shadowconnect.com> | ||
13 | * initial version. | ||
14 | */ | ||
15 | |||
16 | #include <linux/device.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/rwsem.h> | ||
19 | #include <linux/i2o.h> | ||
20 | |||
21 | /* max_drivers - Maximum I2O drivers (OSMs) which could be registered */ | ||
22 | unsigned int i2o_max_drivers = I2O_MAX_DRIVERS; | ||
23 | module_param_named(max_drivers, i2o_max_drivers, uint, 0); | ||
24 | MODULE_PARM_DESC(max_drivers, "maximum number of OSM's to support"); | ||
25 | |||
26 | /* I2O drivers lock and array */ | ||
27 | static spinlock_t i2o_drivers_lock; | ||
28 | static struct i2o_driver **i2o_drivers; | ||
29 | |||
30 | /** | ||
31 | * i2o_bus_match - Tell if a I2O device class id match the class ids of | ||
32 | * the I2O driver (OSM) | ||
33 | * | ||
34 | * @dev: device which should be verified | ||
35 | * @drv: the driver to match against | ||
36 | * | ||
37 | * Used by the bus to check if the driver wants to handle the device. | ||
38 | * | ||
39 | * Returns 1 if the class ids of the driver match the class id of the | ||
40 | * device, otherwise 0. | ||
41 | */ | ||
42 | static int i2o_bus_match(struct device *dev, struct device_driver *drv) | ||
43 | { | ||
44 | struct i2o_device *i2o_dev = to_i2o_device(dev); | ||
45 | struct i2o_driver *i2o_drv = to_i2o_driver(drv); | ||
46 | struct i2o_class_id *ids = i2o_drv->classes; | ||
47 | |||
48 | if (ids) | ||
49 | while (ids->class_id != I2O_CLASS_END) { | ||
50 | if (ids->class_id == i2o_dev->lct_data.class_id) | ||
51 | return 1; | ||
52 | ids++; | ||
53 | } | ||
54 | return 0; | ||
55 | }; | ||
56 | |||
57 | /* I2O bus type */ | ||
58 | struct bus_type i2o_bus_type = { | ||
59 | .name = "i2o", | ||
60 | .match = i2o_bus_match, | ||
61 | }; | ||
62 | |||
63 | /** | ||
64 | * i2o_driver_register - Register a I2O driver (OSM) in the I2O core | ||
65 | * @drv: I2O driver which should be registered | ||
66 | * | ||
67 | * Registers the OSM drv in the I2O core and creates an event queues if | ||
68 | * necessary. | ||
69 | * | ||
70 | * Returns 0 on success or negative error code on failure. | ||
71 | */ | ||
72 | int i2o_driver_register(struct i2o_driver *drv) | ||
73 | { | ||
74 | struct i2o_controller *c; | ||
75 | int i; | ||
76 | int rc = 0; | ||
77 | unsigned long flags; | ||
78 | |||
79 | pr_debug("i2o: Register driver %s\n", drv->name); | ||
80 | |||
81 | if (drv->event) { | ||
82 | drv->event_queue = create_workqueue(drv->name); | ||
83 | if (!drv->event_queue) { | ||
84 | printk(KERN_ERR "i2o: Could not initialize event queue " | ||
85 | "for driver %s\n", drv->name); | ||
86 | return -EFAULT; | ||
87 | } | ||
88 | pr_debug("i2o: Event queue initialized for driver %s\n", | ||
89 | drv->name); | ||
90 | } else | ||
91 | drv->event_queue = NULL; | ||
92 | |||
93 | drv->driver.name = drv->name; | ||
94 | drv->driver.bus = &i2o_bus_type; | ||
95 | |||
96 | spin_lock_irqsave(&i2o_drivers_lock, flags); | ||
97 | |||
98 | for (i = 0; i2o_drivers[i]; i++) | ||
99 | if (i >= i2o_max_drivers) { | ||
100 | printk(KERN_ERR "i2o: too many drivers registered, " | ||
101 | "increase max_drivers\n"); | ||
102 | spin_unlock_irqrestore(&i2o_drivers_lock, flags); | ||
103 | return -EFAULT; | ||
104 | } | ||
105 | |||
106 | drv->context = i; | ||
107 | i2o_drivers[i] = drv; | ||
108 | |||
109 | spin_unlock_irqrestore(&i2o_drivers_lock, flags); | ||
110 | |||
111 | pr_debug("i2o: driver %s gets context id %d\n", drv->name, | ||
112 | drv->context); | ||
113 | |||
114 | list_for_each_entry(c, &i2o_controllers, list) { | ||
115 | struct i2o_device *i2o_dev; | ||
116 | |||
117 | i2o_driver_notify_controller_add(drv, c); | ||
118 | list_for_each_entry(i2o_dev, &c->devices, list) | ||
119 | i2o_driver_notify_device_add(drv, i2o_dev); | ||
120 | } | ||
121 | |||
122 | |||
123 | rc = driver_register(&drv->driver); | ||
124 | if (rc) | ||
125 | destroy_workqueue(drv->event_queue); | ||
126 | |||
127 | return rc; | ||
128 | }; | ||
129 | |||
130 | /** | ||
131 | * i2o_driver_unregister - Unregister a I2O driver (OSM) from the I2O core | ||
132 | * @drv: I2O driver which should be unregistered | ||
133 | * | ||
134 | * Unregisters the OSM drv from the I2O core and cleanup event queues if | ||
135 | * necessary. | ||
136 | */ | ||
137 | void i2o_driver_unregister(struct i2o_driver *drv) | ||
138 | { | ||
139 | struct i2o_controller *c; | ||
140 | unsigned long flags; | ||
141 | |||
142 | pr_debug("i2o: unregister driver %s\n", drv->name); | ||
143 | |||
144 | driver_unregister(&drv->driver); | ||
145 | |||
146 | list_for_each_entry(c, &i2o_controllers, list) { | ||
147 | struct i2o_device *i2o_dev; | ||
148 | |||
149 | list_for_each_entry(i2o_dev, &c->devices, list) | ||
150 | i2o_driver_notify_device_remove(drv, i2o_dev); | ||
151 | |||
152 | i2o_driver_notify_controller_remove(drv, c); | ||
153 | } | ||
154 | |||
155 | spin_lock_irqsave(&i2o_drivers_lock, flags); | ||
156 | i2o_drivers[drv->context] = NULL; | ||
157 | spin_unlock_irqrestore(&i2o_drivers_lock, flags); | ||
158 | |||
159 | if (drv->event_queue) { | ||
160 | destroy_workqueue(drv->event_queue); | ||
161 | drv->event_queue = NULL; | ||
162 | pr_debug("i2o: event queue removed for %s\n", drv->name); | ||
163 | } | ||
164 | }; | ||
165 | |||
166 | /** | ||
167 | * i2o_driver_dispatch - dispatch an I2O reply message | ||
168 | * @c: I2O controller of the message | ||
169 | * @m: I2O message number | ||
170 | * @msg: I2O message to be delivered | ||
171 | * | ||
172 | * The reply is delivered to the driver from which the original message | ||
173 | * was. This function is only called from interrupt context. | ||
174 | * | ||
175 | * Returns 0 on success and the message should not be flushed. Returns > 0 | ||
176 | * on success and if the message should be flushed afterwords. Returns | ||
177 | * negative error code on failure (the message will be flushed too). | ||
178 | */ | ||
179 | int i2o_driver_dispatch(struct i2o_controller *c, u32 m, | ||
180 | struct i2o_message __iomem *msg) | ||
181 | { | ||
182 | struct i2o_driver *drv; | ||
183 | u32 context = readl(&msg->u.s.icntxt); | ||
184 | |||
185 | if (likely(context < i2o_max_drivers)) { | ||
186 | spin_lock(&i2o_drivers_lock); | ||
187 | drv = i2o_drivers[context]; | ||
188 | spin_unlock(&i2o_drivers_lock); | ||
189 | |||
190 | if (unlikely(!drv)) { | ||
191 | printk(KERN_WARNING "%s: Spurious reply to unknown " | ||
192 | "driver %d\n", c->name, context); | ||
193 | return -EIO; | ||
194 | } | ||
195 | |||
196 | if ((readl(&msg->u.head[1]) >> 24) == I2O_CMD_UTIL_EVT_REGISTER) { | ||
197 | struct i2o_device *dev, *tmp; | ||
198 | struct i2o_event *evt; | ||
199 | u16 size; | ||
200 | u16 tid; | ||
201 | |||
202 | tid = readl(&msg->u.head[1]) & 0x1fff; | ||
203 | |||
204 | pr_debug("%s: event received from device %d\n", c->name, | ||
205 | tid); | ||
206 | |||
207 | /* cut of header from message size (in 32-bit words) */ | ||
208 | size = (readl(&msg->u.head[0]) >> 16) - 5; | ||
209 | |||
210 | evt = kmalloc(size * 4 + sizeof(*evt), GFP_ATOMIC); | ||
211 | if (!evt) | ||
212 | return -ENOMEM; | ||
213 | memset(evt, 0, size * 4 + sizeof(*evt)); | ||
214 | |||
215 | evt->size = size; | ||
216 | memcpy_fromio(&evt->tcntxt, &msg->u.s.tcntxt, | ||
217 | (size + 2) * 4); | ||
218 | |||
219 | list_for_each_entry_safe(dev, tmp, &c->devices, list) | ||
220 | if (dev->lct_data.tid == tid) { | ||
221 | evt->i2o_dev = dev; | ||
222 | break; | ||
223 | } | ||
224 | |||
225 | INIT_WORK(&evt->work, (void (*)(void *))drv->event, | ||
226 | evt); | ||
227 | queue_work(drv->event_queue, &evt->work); | ||
228 | return 1; | ||
229 | } | ||
230 | |||
231 | if (likely(drv->reply)) | ||
232 | return drv->reply(c, m, msg); | ||
233 | else | ||
234 | pr_debug("%s: Reply to driver %s, but no reply function" | ||
235 | " defined!\n", c->name, drv->name); | ||
236 | return -EIO; | ||
237 | } else | ||
238 | printk(KERN_WARNING "%s: Spurious reply to unknown driver " | ||
239 | "%d\n", c->name, readl(&msg->u.s.icntxt)); | ||
240 | return -EIO; | ||
241 | } | ||
242 | |||
243 | /** | ||
244 | * i2o_driver_notify_controller_add_all - Send notify of added controller | ||
245 | * to all I2O drivers | ||
246 | * | ||
247 | * Send notifications to all registered drivers that a new controller was | ||
248 | * added. | ||
249 | */ | ||
250 | void i2o_driver_notify_controller_add_all(struct i2o_controller *c) | ||
251 | { | ||
252 | int i; | ||
253 | struct i2o_driver *drv; | ||
254 | |||
255 | for (i = 0; i < I2O_MAX_DRIVERS; i++) { | ||
256 | drv = i2o_drivers[i]; | ||
257 | |||
258 | if (drv) | ||
259 | i2o_driver_notify_controller_add(drv, c); | ||
260 | } | ||
261 | } | ||
262 | |||
263 | /** | ||
264 | * i2o_driver_notify_controller_remove_all - Send notify of removed | ||
265 | * controller to all I2O drivers | ||
266 | * | ||
267 | * Send notifications to all registered drivers that a controller was | ||
268 | * removed. | ||
269 | */ | ||
270 | void i2o_driver_notify_controller_remove_all(struct i2o_controller *c) | ||
271 | { | ||
272 | int i; | ||
273 | struct i2o_driver *drv; | ||
274 | |||
275 | for (i = 0; i < I2O_MAX_DRIVERS; i++) { | ||
276 | drv = i2o_drivers[i]; | ||
277 | |||
278 | if (drv) | ||
279 | i2o_driver_notify_controller_remove(drv, c); | ||
280 | } | ||
281 | } | ||
282 | |||
283 | /** | ||
284 | * i2o_driver_notify_device_add_all - Send notify of added device to all | ||
285 | * I2O drivers | ||
286 | * | ||
287 | * Send notifications to all registered drivers that a device was added. | ||
288 | */ | ||
289 | void i2o_driver_notify_device_add_all(struct i2o_device *i2o_dev) | ||
290 | { | ||
291 | int i; | ||
292 | struct i2o_driver *drv; | ||
293 | |||
294 | for (i = 0; i < I2O_MAX_DRIVERS; i++) { | ||
295 | drv = i2o_drivers[i]; | ||
296 | |||
297 | if (drv) | ||
298 | i2o_driver_notify_device_add(drv, i2o_dev); | ||
299 | } | ||
300 | } | ||
301 | |||
302 | /** | ||
303 | * i2o_driver_notify_device_remove_all - Send notify of removed device to | ||
304 | * all I2O drivers | ||
305 | * | ||
306 | * Send notifications to all registered drivers that a device was removed. | ||
307 | */ | ||
308 | void i2o_driver_notify_device_remove_all(struct i2o_device *i2o_dev) | ||
309 | { | ||
310 | int i; | ||
311 | struct i2o_driver *drv; | ||
312 | |||
313 | for (i = 0; i < I2O_MAX_DRIVERS; i++) { | ||
314 | drv = i2o_drivers[i]; | ||
315 | |||
316 | if (drv) | ||
317 | i2o_driver_notify_device_remove(drv, i2o_dev); | ||
318 | } | ||
319 | } | ||
320 | |||
321 | /** | ||
322 | * i2o_driver_init - initialize I2O drivers (OSMs) | ||
323 | * | ||
324 | * Registers the I2O bus and allocate memory for the array of OSMs. | ||
325 | * | ||
326 | * Returns 0 on success or negative error code on failure. | ||
327 | */ | ||
328 | int __init i2o_driver_init(void) | ||
329 | { | ||
330 | int rc = 0; | ||
331 | |||
332 | spin_lock_init(&i2o_drivers_lock); | ||
333 | |||
334 | if ((i2o_max_drivers < 2) || (i2o_max_drivers > 64) || | ||
335 | ((i2o_max_drivers ^ (i2o_max_drivers - 1)) != | ||
336 | (2 * i2o_max_drivers - 1))) { | ||
337 | printk(KERN_WARNING "i2o: max_drivers set to %d, but must be " | ||
338 | ">=2 and <= 64 and a power of 2\n", i2o_max_drivers); | ||
339 | i2o_max_drivers = I2O_MAX_DRIVERS; | ||
340 | } | ||
341 | printk(KERN_INFO "i2o: max drivers = %d\n", i2o_max_drivers); | ||
342 | |||
343 | i2o_drivers = | ||
344 | kmalloc(i2o_max_drivers * sizeof(*i2o_drivers), GFP_KERNEL); | ||
345 | if (!i2o_drivers) | ||
346 | return -ENOMEM; | ||
347 | |||
348 | memset(i2o_drivers, 0, i2o_max_drivers * sizeof(*i2o_drivers)); | ||
349 | |||
350 | rc = bus_register(&i2o_bus_type); | ||
351 | |||
352 | if (rc < 0) | ||
353 | kfree(i2o_drivers); | ||
354 | |||
355 | return rc; | ||
356 | }; | ||
357 | |||
358 | /** | ||
359 | * i2o_driver_exit - clean up I2O drivers (OSMs) | ||
360 | * | ||
361 | * Unregisters the I2O bus and free driver array. | ||
362 | */ | ||
363 | void __exit i2o_driver_exit(void) | ||
364 | { | ||
365 | bus_unregister(&i2o_bus_type); | ||
366 | kfree(i2o_drivers); | ||
367 | }; | ||
368 | |||
369 | EXPORT_SYMBOL(i2o_driver_register); | ||
370 | EXPORT_SYMBOL(i2o_driver_unregister); | ||
371 | EXPORT_SYMBOL(i2o_driver_notify_controller_add_all); | ||
372 | EXPORT_SYMBOL(i2o_driver_notify_controller_remove_all); | ||
373 | EXPORT_SYMBOL(i2o_driver_notify_device_add_all); | ||
374 | EXPORT_SYMBOL(i2o_driver_notify_device_remove_all); | ||
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c new file mode 100644 index 000000000000..79c1cbfb8f44 --- /dev/null +++ b/drivers/message/i2o/exec-osm.c | |||
@@ -0,0 +1,507 @@ | |||
1 | /* | ||
2 | * Executive OSM | ||
3 | * | ||
4 | * Copyright (C) 1999-2002 Red Hat Software | ||
5 | * | ||
6 | * Written by Alan Cox, Building Number Three Ltd | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | * | ||
13 | * A lot of the I2O message side code from this is taken from the Red | ||
14 | * Creek RCPCI45 adapter driver by Red Creek Communications | ||
15 | * | ||
16 | * Fixes/additions: | ||
17 | * Philipp Rumpf | ||
18 | * Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI> | ||
19 | * Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI> | ||
20 | * Deepak Saxena <deepak@plexity.net> | ||
21 | * Boji T Kannanthanam <boji.t.kannanthanam@intel.com> | ||
22 | * Alan Cox <alan@redhat.com>: | ||
23 | * Ported to Linux 2.5. | ||
24 | * Markus Lidel <Markus.Lidel@shadowconnect.com>: | ||
25 | * Minor fixes for 2.6. | ||
26 | * Markus Lidel <Markus.Lidel@shadowconnect.com>: | ||
27 | * Support for sysfs included. | ||
28 | */ | ||
29 | |||
30 | #include <linux/module.h> | ||
31 | #include <linux/i2o.h> | ||
32 | #include <linux/delay.h> | ||
33 | |||
34 | #define OSM_NAME "exec-osm" | ||
35 | |||
36 | struct i2o_driver i2o_exec_driver; | ||
37 | |||
38 | static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind); | ||
39 | |||
40 | /* Module internal functions from other sources */ | ||
41 | extern int i2o_device_parse_lct(struct i2o_controller *); | ||
42 | |||
43 | /* global wait list for POST WAIT */ | ||
44 | static LIST_HEAD(i2o_exec_wait_list); | ||
45 | |||
46 | /* Wait struct needed for POST WAIT */ | ||
47 | struct i2o_exec_wait { | ||
48 | wait_queue_head_t *wq; /* Pointer to Wait queue */ | ||
49 | struct i2o_dma dma; /* DMA buffers to free on failure */ | ||
50 | u32 tcntxt; /* transaction context from reply */ | ||
51 | int complete; /* 1 if reply received otherwise 0 */ | ||
52 | u32 m; /* message id */ | ||
53 | struct i2o_message __iomem *msg; /* pointer to the reply message */ | ||
54 | struct list_head list; /* node in global wait list */ | ||
55 | }; | ||
56 | |||
57 | /* Exec OSM class handling definition */ | ||
58 | static struct i2o_class_id i2o_exec_class_id[] = { | ||
59 | {I2O_CLASS_EXECUTIVE}, | ||
60 | {I2O_CLASS_END} | ||
61 | }; | ||
62 | |||
63 | /** | ||
64 | * i2o_exec_wait_alloc - Allocate a i2o_exec_wait struct an initialize it | ||
65 | * | ||
66 | * Allocate the i2o_exec_wait struct and initialize the wait. | ||
67 | * | ||
68 | * Returns i2o_exec_wait pointer on success or negative error code on | ||
69 | * failure. | ||
70 | */ | ||
71 | static struct i2o_exec_wait *i2o_exec_wait_alloc(void) | ||
72 | { | ||
73 | struct i2o_exec_wait *wait; | ||
74 | |||
75 | wait = kmalloc(sizeof(*wait), GFP_KERNEL); | ||
76 | if (!wait) | ||
77 | return ERR_PTR(-ENOMEM); | ||
78 | |||
79 | memset(wait, 0, sizeof(*wait)); | ||
80 | |||
81 | INIT_LIST_HEAD(&wait->list); | ||
82 | |||
83 | return wait; | ||
84 | }; | ||
85 | |||
86 | /** | ||
87 | * i2o_exec_wait_free - Free a i2o_exec_wait struct | ||
88 | * @i2o_exec_wait: I2O wait data which should be cleaned up | ||
89 | */ | ||
90 | static void i2o_exec_wait_free(struct i2o_exec_wait *wait) | ||
91 | { | ||
92 | kfree(wait); | ||
93 | }; | ||
94 | |||
95 | /** | ||
96 | * i2o_msg_post_wait_mem - Post and wait a message with DMA buffers | ||
97 | * @c: controller | ||
98 | * @m: message to post | ||
99 | * @timeout: time in seconds to wait | ||
100 | * @dma: i2o_dma struct of the DMA buffer to free on failure | ||
101 | * | ||
102 | * This API allows an OSM to post a message and then be told whether or | ||
103 | * not the system received a successful reply. If the message times out | ||
104 | * then the value '-ETIMEDOUT' is returned. This is a special case. In | ||
105 | * this situation the message may (should) complete at an indefinite time | ||
106 | * in the future. When it completes it will use the memory buffer | ||
107 | * attached to the request. If -ETIMEDOUT is returned then the memory | ||
108 | * buffer must not be freed. Instead the event completion will free them | ||
109 | * for you. In all other cases the buffer are your problem. | ||
110 | * | ||
111 | * Returns 0 on success or negative error code on failure. | ||
112 | */ | ||
113 | int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long | ||
114 | timeout, struct i2o_dma *dma) | ||
115 | { | ||
116 | DECLARE_WAIT_QUEUE_HEAD(wq); | ||
117 | struct i2o_exec_wait *wait; | ||
118 | static u32 tcntxt = 0x80000000; | ||
119 | struct i2o_message __iomem *msg = c->in_queue.virt + m; | ||
120 | int rc = 0; | ||
121 | |||
122 | wait = i2o_exec_wait_alloc(); | ||
123 | if (!wait) | ||
124 | return -ENOMEM; | ||
125 | |||
126 | if (tcntxt == 0xffffffff) | ||
127 | tcntxt = 0x80000000; | ||
128 | |||
129 | if (dma) | ||
130 | wait->dma = *dma; | ||
131 | |||
132 | /* | ||
133 | * Fill in the message initiator context and transaction context. | ||
134 | * We will only use transaction contexts >= 0x80000000 for POST WAIT, | ||
135 | * so we could find a POST WAIT reply easier in the reply handler. | ||
136 | */ | ||
137 | writel(i2o_exec_driver.context, &msg->u.s.icntxt); | ||
138 | wait->tcntxt = tcntxt++; | ||
139 | writel(wait->tcntxt, &msg->u.s.tcntxt); | ||
140 | |||
141 | /* | ||
142 | * Post the message to the controller. At some point later it will | ||
143 | * return. If we time out before it returns then complete will be zero. | ||
144 | */ | ||
145 | i2o_msg_post(c, m); | ||
146 | |||
147 | if (!wait->complete) { | ||
148 | wait->wq = &wq; | ||
149 | /* | ||
150 | * we add elements add the head, because if a entry in the list | ||
151 | * will never be removed, we have to iterate over it every time | ||
152 | */ | ||
153 | list_add(&wait->list, &i2o_exec_wait_list); | ||
154 | |||
155 | wait_event_interruptible_timeout(wq, wait->complete, | ||
156 | timeout * HZ); | ||
157 | |||
158 | wait->wq = NULL; | ||
159 | } | ||
160 | |||
161 | barrier(); | ||
162 | |||
163 | if (wait->complete) { | ||
164 | if (readl(&wait->msg->body[0]) >> 24) | ||
165 | rc = readl(&wait->msg->body[0]) & 0xff; | ||
166 | i2o_flush_reply(c, wait->m); | ||
167 | i2o_exec_wait_free(wait); | ||
168 | } else { | ||
169 | /* | ||
170 | * We cannot remove it now. This is important. When it does | ||
171 | * terminate (which it must do if the controller has not | ||
172 | * died...) then it will otherwise scribble on stuff. | ||
173 | * | ||
174 | * FIXME: try abort message | ||
175 | */ | ||
176 | if (dma) | ||
177 | dma->virt = NULL; | ||
178 | |||
179 | rc = -ETIMEDOUT; | ||
180 | } | ||
181 | |||
182 | return rc; | ||
183 | }; | ||
184 | |||
185 | /** | ||
186 | * i2o_msg_post_wait_complete - Reply to a i2o_msg_post request from IOP | ||
187 | * @c: I2O controller which answers | ||
188 | * @m: message id | ||
189 | * @msg: pointer to the I2O reply message | ||
190 | * | ||
191 | * This function is called in interrupt context only. If the reply reached | ||
192 | * before the timeout, the i2o_exec_wait struct is filled with the message | ||
193 | * and the task will be waked up. The task is now responsible for returning | ||
194 | * the message m back to the controller! If the message reaches us after | ||
195 | * the timeout clean up the i2o_exec_wait struct (including allocated | ||
196 | * DMA buffer). | ||
197 | * | ||
198 | * Return 0 on success and if the message m should not be given back to the | ||
199 | * I2O controller, or >0 on success and if the message should be given back | ||
200 | * afterwords. Returns negative error code on failure. In this case the | ||
201 | * message must also be given back to the controller. | ||
202 | */ | ||
203 | static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m, | ||
204 | struct i2o_message __iomem *msg) | ||
205 | { | ||
206 | struct i2o_exec_wait *wait, *tmp; | ||
207 | static spinlock_t lock; | ||
208 | int rc = 1; | ||
209 | u32 context; | ||
210 | |||
211 | spin_lock_init(&lock); | ||
212 | |||
213 | context = readl(&msg->u.s.tcntxt); | ||
214 | |||
215 | /* | ||
216 | * We need to search through the i2o_exec_wait_list to see if the given | ||
217 | * message is still outstanding. If not, it means that the IOP took | ||
218 | * longer to respond to the message than we had allowed and timer has | ||
219 | * already expired. Not much we can do about that except log it for | ||
220 | * debug purposes, increase timeout, and recompile. | ||
221 | */ | ||
222 | spin_lock(&lock); | ||
223 | list_for_each_entry_safe(wait, tmp, &i2o_exec_wait_list, list) { | ||
224 | if (wait->tcntxt == context) { | ||
225 | list_del(&wait->list); | ||
226 | |||
227 | wait->m = m; | ||
228 | wait->msg = msg; | ||
229 | wait->complete = 1; | ||
230 | |||
231 | barrier(); | ||
232 | |||
233 | if (wait->wq) { | ||
234 | wake_up_interruptible(wait->wq); | ||
235 | rc = 0; | ||
236 | } else { | ||
237 | struct device *dev; | ||
238 | |||
239 | dev = &c->pdev->dev; | ||
240 | |||
241 | pr_debug("%s: timedout reply received!\n", | ||
242 | c->name); | ||
243 | i2o_dma_free(dev, &wait->dma); | ||
244 | i2o_exec_wait_free(wait); | ||
245 | rc = -1; | ||
246 | } | ||
247 | |||
248 | spin_unlock(&lock); | ||
249 | |||
250 | return rc; | ||
251 | } | ||
252 | } | ||
253 | |||
254 | spin_unlock(&lock); | ||
255 | |||
256 | pr_debug("%s: Bogus reply in POST WAIT (tr-context: %08x)!\n", c->name, | ||
257 | context); | ||
258 | |||
259 | return -1; | ||
260 | }; | ||
261 | |||
262 | /** | ||
263 | * i2o_exec_probe - Called if a new I2O device (executive class) appears | ||
264 | * @dev: I2O device which should be probed | ||
265 | * | ||
266 | * Registers event notification for every event from Executive device. The | ||
267 | * return is always 0, because we want all devices of class Executive. | ||
268 | * | ||
269 | * Returns 0 on success. | ||
270 | */ | ||
271 | static int i2o_exec_probe(struct device *dev) | ||
272 | { | ||
273 | struct i2o_device *i2o_dev = to_i2o_device(dev); | ||
274 | |||
275 | i2o_event_register(i2o_dev, &i2o_exec_driver, 0, 0xffffffff); | ||
276 | |||
277 | i2o_dev->iop->exec = i2o_dev; | ||
278 | |||
279 | return 0; | ||
280 | }; | ||
281 | |||
282 | /** | ||
283 | * i2o_exec_remove - Called on I2O device removal | ||
284 | * @dev: I2O device which was removed | ||
285 | * | ||
286 | * Unregisters event notification from Executive I2O device. | ||
287 | * | ||
288 | * Returns 0 on success. | ||
289 | */ | ||
290 | static int i2o_exec_remove(struct device *dev) | ||
291 | { | ||
292 | i2o_event_register(to_i2o_device(dev), &i2o_exec_driver, 0, 0); | ||
293 | |||
294 | return 0; | ||
295 | }; | ||
296 | |||
297 | /** | ||
298 | * i2o_exec_lct_modified - Called on LCT NOTIFY reply | ||
299 | * @c: I2O controller on which the LCT has modified | ||
300 | * | ||
301 | * This function handles asynchronus LCT NOTIFY replies. It parses the | ||
302 | * new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY | ||
303 | * again. | ||
304 | */ | ||
305 | static void i2o_exec_lct_modified(struct i2o_controller *c) | ||
306 | { | ||
307 | if (i2o_device_parse_lct(c) == -EAGAIN) | ||
308 | i2o_exec_lct_notify(c, 0); | ||
309 | }; | ||
310 | |||
311 | /** | ||
312 | * i2o_exec_reply - I2O Executive reply handler | ||
313 | * @c: I2O controller from which the reply comes | ||
314 | * @m: message id | ||
315 | * @msg: pointer to the I2O reply message | ||
316 | * | ||
317 | * This function is always called from interrupt context. If a POST WAIT | ||
318 | * reply was received, pass it to the complete function. If a LCT NOTIFY | ||
319 | * reply was received, a new event is created to handle the update. | ||
320 | * | ||
321 | * Returns 0 on success and if the reply should not be flushed or > 0 | ||
322 | * on success and if the reply should be flushed. Returns negative error | ||
323 | * code on failure and if the reply should be flushed. | ||
324 | */ | ||
325 | static int i2o_exec_reply(struct i2o_controller *c, u32 m, | ||
326 | struct i2o_message *msg) | ||
327 | { | ||
328 | if (le32_to_cpu(msg->u.head[0]) & MSG_FAIL) { // Fail bit is set | ||
329 | struct i2o_message __iomem *pmsg; /* preserved message */ | ||
330 | u32 pm; | ||
331 | |||
332 | pm = le32_to_cpu(msg->body[3]); | ||
333 | |||
334 | pmsg = i2o_msg_in_to_virt(c, pm); | ||
335 | |||
336 | i2o_report_status(KERN_INFO, "i2o_core", msg); | ||
337 | |||
338 | /* Release the preserved msg by resubmitting it as a NOP */ | ||
339 | i2o_msg_nop(c, pm); | ||
340 | |||
341 | /* If reply to i2o_post_wait failed, return causes a timeout */ | ||
342 | return -1; | ||
343 | } | ||
344 | |||
345 | if (le32_to_cpu(msg->u.s.tcntxt) & 0x80000000) | ||
346 | return i2o_msg_post_wait_complete(c, m, msg); | ||
347 | |||
348 | if ((le32_to_cpu(msg->u.head[1]) >> 24) == I2O_CMD_LCT_NOTIFY) { | ||
349 | struct work_struct *work; | ||
350 | |||
351 | pr_debug("%s: LCT notify received\n", c->name); | ||
352 | |||
353 | work = kmalloc(sizeof(*work), GFP_ATOMIC); | ||
354 | if (!work) | ||
355 | return -ENOMEM; | ||
356 | |||
357 | INIT_WORK(work, (void (*)(void *))i2o_exec_lct_modified, c); | ||
358 | queue_work(i2o_exec_driver.event_queue, work); | ||
359 | return 1; | ||
360 | } | ||
361 | |||
362 | /* | ||
363 | * If this happens, we want to dump the message to the syslog so | ||
364 | * it can be sent back to the card manufacturer by the end user | ||
365 | * to aid in debugging. | ||
366 | * | ||
367 | */ | ||
368 | printk(KERN_WARNING "%s: Unsolicited message reply sent to core!" | ||
369 | "Message dumped to syslog\n", c->name); | ||
370 | i2o_dump_message(msg); | ||
371 | |||
372 | return -EFAULT; | ||
373 | } | ||
374 | |||
375 | /** | ||
376 | * i2o_exec_event - Event handling function | ||
377 | * @evt: Event which occurs | ||
378 | * | ||
379 | * Handles events send by the Executive device. At the moment does not do | ||
380 | * anything useful. | ||
381 | */ | ||
382 | static void i2o_exec_event(struct i2o_event *evt) | ||
383 | { | ||
384 | osm_info("Event received from device: %d\n", | ||
385 | evt->i2o_dev->lct_data.tid); | ||
386 | kfree(evt); | ||
387 | }; | ||
388 | |||
389 | /** | ||
390 | * i2o_exec_lct_get - Get the IOP's Logical Configuration Table | ||
391 | * @c: I2O controller from which the LCT should be fetched | ||
392 | * | ||
393 | * Send a LCT NOTIFY request to the controller, and wait | ||
394 | * I2O_TIMEOUT_LCT_GET seconds until arrival of response. If the LCT is | ||
395 | * to large, retry it. | ||
396 | * | ||
397 | * Returns 0 on success or negative error code on failure. | ||
398 | */ | ||
399 | int i2o_exec_lct_get(struct i2o_controller *c) | ||
400 | { | ||
401 | struct i2o_message __iomem *msg; | ||
402 | u32 m; | ||
403 | int i = 0; | ||
404 | int rc = -EAGAIN; | ||
405 | |||
406 | for (i = 1; i <= I2O_LCT_GET_TRIES; i++) { | ||
407 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
408 | if (m == I2O_QUEUE_EMPTY) | ||
409 | return -ETIMEDOUT; | ||
410 | |||
411 | writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]); | ||
412 | writel(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | ADAPTER_TID, | ||
413 | &msg->u.head[1]); | ||
414 | writel(0xffffffff, &msg->body[0]); | ||
415 | writel(0x00000000, &msg->body[1]); | ||
416 | writel(0xd0000000 | c->dlct.len, &msg->body[2]); | ||
417 | writel(c->dlct.phys, &msg->body[3]); | ||
418 | |||
419 | rc = i2o_msg_post_wait(c, m, I2O_TIMEOUT_LCT_GET); | ||
420 | if (rc < 0) | ||
421 | break; | ||
422 | |||
423 | rc = i2o_device_parse_lct(c); | ||
424 | if (rc != -EAGAIN) | ||
425 | break; | ||
426 | } | ||
427 | |||
428 | return rc; | ||
429 | } | ||
430 | |||
431 | /** | ||
432 | * i2o_exec_lct_notify - Send a asynchronus LCT NOTIFY request | ||
433 | * @c: I2O controller to which the request should be send | ||
434 | * @change_ind: change indicator | ||
435 | * | ||
436 | * This function sends a LCT NOTIFY request to the I2O controller with | ||
437 | * the change indicator change_ind. If the change_ind == 0 the controller | ||
438 | * replies immediately after the request. If change_ind > 0 the reply is | ||
439 | * send after change indicator of the LCT is > change_ind. | ||
440 | */ | ||
441 | static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind) | ||
442 | { | ||
443 | i2o_status_block *sb = c->status_block.virt; | ||
444 | struct device *dev; | ||
445 | struct i2o_message __iomem *msg; | ||
446 | u32 m; | ||
447 | |||
448 | dev = &c->pdev->dev; | ||
449 | |||
450 | if (i2o_dma_realloc(dev, &c->dlct, sb->expected_lct_size, GFP_KERNEL)) | ||
451 | return -ENOMEM; | ||
452 | |||
453 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
454 | if (m == I2O_QUEUE_EMPTY) | ||
455 | return -ETIMEDOUT; | ||
456 | |||
457 | writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]); | ||
458 | writel(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | ADAPTER_TID, | ||
459 | &msg->u.head[1]); | ||
460 | writel(i2o_exec_driver.context, &msg->u.s.icntxt); | ||
461 | writel(0, &msg->u.s.tcntxt); /* FIXME */ | ||
462 | writel(0xffffffff, &msg->body[0]); | ||
463 | writel(change_ind, &msg->body[1]); | ||
464 | writel(0xd0000000 | c->dlct.len, &msg->body[2]); | ||
465 | writel(c->dlct.phys, &msg->body[3]); | ||
466 | |||
467 | i2o_msg_post(c, m); | ||
468 | |||
469 | return 0; | ||
470 | }; | ||
471 | |||
472 | /* Exec OSM driver struct */ | ||
473 | struct i2o_driver i2o_exec_driver = { | ||
474 | .name = OSM_NAME, | ||
475 | .reply = i2o_exec_reply, | ||
476 | .event = i2o_exec_event, | ||
477 | .classes = i2o_exec_class_id, | ||
478 | .driver = { | ||
479 | .probe = i2o_exec_probe, | ||
480 | .remove = i2o_exec_remove, | ||
481 | }, | ||
482 | }; | ||
483 | |||
484 | /** | ||
485 | * i2o_exec_init - Registers the Exec OSM | ||
486 | * | ||
487 | * Registers the Exec OSM in the I2O core. | ||
488 | * | ||
489 | * Returns 0 on success or negative error code on failure. | ||
490 | */ | ||
491 | int __init i2o_exec_init(void) | ||
492 | { | ||
493 | return i2o_driver_register(&i2o_exec_driver); | ||
494 | }; | ||
495 | |||
496 | /** | ||
497 | * i2o_exec_exit - Removes the Exec OSM | ||
498 | * | ||
499 | * Unregisters the Exec OSM from the I2O core. | ||
500 | */ | ||
501 | void __exit i2o_exec_exit(void) | ||
502 | { | ||
503 | i2o_driver_unregister(&i2o_exec_driver); | ||
504 | }; | ||
505 | |||
506 | EXPORT_SYMBOL(i2o_msg_post_wait_mem); | ||
507 | EXPORT_SYMBOL(i2o_exec_lct_get); | ||
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c new file mode 100644 index 000000000000..7b74c87b569e --- /dev/null +++ b/drivers/message/i2o/i2o_block.c | |||
@@ -0,0 +1,1247 @@ | |||
1 | /* | ||
2 | * Block OSM | ||
3 | * | ||
4 | * Copyright (C) 1999-2002 Red Hat Software | ||
5 | * | ||
6 | * Written by Alan Cox, Building Number Three Ltd | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | * | ||
18 | * For the purpose of avoiding doubt the preferred form of the work | ||
19 | * for making modifications shall be a standards compliant form such | ||
20 | * gzipped tar and not one requiring a proprietary or patent encumbered | ||
21 | * tool to unpack. | ||
22 | * | ||
23 | * Fixes/additions: | ||
24 | * Steve Ralston: | ||
25 | * Multiple device handling error fixes, | ||
26 | * Added a queue depth. | ||
27 | * Alan Cox: | ||
28 | * FC920 has an rmw bug. Dont or in the end marker. | ||
29 | * Removed queue walk, fixed for 64bitness. | ||
30 | * Rewrote much of the code over time | ||
31 | * Added indirect block lists | ||
32 | * Handle 64K limits on many controllers | ||
33 | * Don't use indirects on the Promise (breaks) | ||
34 | * Heavily chop down the queue depths | ||
35 | * Deepak Saxena: | ||
36 | * Independent queues per IOP | ||
37 | * Support for dynamic device creation/deletion | ||
38 | * Code cleanup | ||
39 | * Support for larger I/Os through merge* functions | ||
40 | * (taken from DAC960 driver) | ||
41 | * Boji T Kannanthanam: | ||
42 | * Set the I2O Block devices to be detected in increasing | ||
43 | * order of TIDs during boot. | ||
44 | * Search and set the I2O block device that we boot off | ||
45 | * from as the first device to be claimed (as /dev/i2o/hda) | ||
46 | * Properly attach/detach I2O gendisk structure from the | ||
47 | * system gendisk list. The I2O block devices now appear in | ||
48 | * /proc/partitions. | ||
49 | * Markus Lidel <Markus.Lidel@shadowconnect.com>: | ||
50 | * Minor bugfixes for 2.6. | ||
51 | */ | ||
52 | |||
53 | #include <linux/module.h> | ||
54 | #include <linux/i2o.h> | ||
55 | |||
56 | #include <linux/mempool.h> | ||
57 | |||
58 | #include <linux/genhd.h> | ||
59 | #include <linux/blkdev.h> | ||
60 | #include <linux/hdreg.h> | ||
61 | |||
62 | #include "i2o_block.h" | ||
63 | |||
64 | #define OSM_NAME "block-osm" | ||
65 | #define OSM_VERSION "$Rev$" | ||
66 | #define OSM_DESCRIPTION "I2O Block Device OSM" | ||
67 | |||
68 | static struct i2o_driver i2o_block_driver; | ||
69 | |||
70 | /* global Block OSM request mempool */ | ||
71 | static struct i2o_block_mempool i2o_blk_req_pool; | ||
72 | |||
73 | /* Block OSM class handling definition */ | ||
74 | static struct i2o_class_id i2o_block_class_id[] = { | ||
75 | {I2O_CLASS_RANDOM_BLOCK_STORAGE}, | ||
76 | {I2O_CLASS_END} | ||
77 | }; | ||
78 | |||
79 | /** | ||
80 | * i2o_block_device_free - free the memory of the I2O Block device | ||
81 | * @dev: I2O Block device, which should be cleaned up | ||
82 | * | ||
83 | * Frees the request queue, gendisk and the i2o_block_device structure. | ||
84 | */ | ||
85 | static void i2o_block_device_free(struct i2o_block_device *dev) | ||
86 | { | ||
87 | blk_cleanup_queue(dev->gd->queue); | ||
88 | |||
89 | put_disk(dev->gd); | ||
90 | |||
91 | kfree(dev); | ||
92 | }; | ||
93 | |||
94 | /** | ||
95 | * i2o_block_remove - remove the I2O Block device from the system again | ||
96 | * @dev: I2O Block device which should be removed | ||
97 | * | ||
98 | * Remove gendisk from system and free all allocated memory. | ||
99 | * | ||
100 | * Always returns 0. | ||
101 | */ | ||
102 | static int i2o_block_remove(struct device *dev) | ||
103 | { | ||
104 | struct i2o_device *i2o_dev = to_i2o_device(dev); | ||
105 | struct i2o_block_device *i2o_blk_dev = dev_get_drvdata(dev); | ||
106 | |||
107 | osm_info("Device removed %s\n", i2o_blk_dev->gd->disk_name); | ||
108 | |||
109 | i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0); | ||
110 | |||
111 | del_gendisk(i2o_blk_dev->gd); | ||
112 | |||
113 | dev_set_drvdata(dev, NULL); | ||
114 | |||
115 | i2o_device_claim_release(i2o_dev); | ||
116 | |||
117 | i2o_block_device_free(i2o_blk_dev); | ||
118 | |||
119 | return 0; | ||
120 | }; | ||
121 | |||
122 | /** | ||
123 | * i2o_block_device flush - Flush all dirty data of I2O device dev | ||
124 | * @dev: I2O device which should be flushed | ||
125 | * | ||
126 | * Flushes all dirty data on device dev. | ||
127 | * | ||
128 | * Returns 0 on success or negative error code on failure. | ||
129 | */ | ||
130 | static int i2o_block_device_flush(struct i2o_device *dev) | ||
131 | { | ||
132 | struct i2o_message __iomem *msg; | ||
133 | u32 m; | ||
134 | |||
135 | m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
136 | if (m == I2O_QUEUE_EMPTY) | ||
137 | return -ETIMEDOUT; | ||
138 | |||
139 | writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); | ||
140 | writel(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->lct_data.tid, | ||
141 | &msg->u.head[1]); | ||
142 | writel(60 << 16, &msg->body[0]); | ||
143 | osm_debug("Flushing...\n"); | ||
144 | |||
145 | return i2o_msg_post_wait(dev->iop, m, 60); | ||
146 | }; | ||
147 | |||
148 | /** | ||
149 | * i2o_block_device_mount - Mount (load) the media of device dev | ||
150 | * @dev: I2O device which should receive the mount request | ||
151 | * @media_id: Media Identifier | ||
152 | * | ||
153 | * Load a media into drive. Identifier should be set to -1, because the | ||
154 | * spec does not support any other value. | ||
155 | * | ||
156 | * Returns 0 on success or negative error code on failure. | ||
157 | */ | ||
158 | static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id) | ||
159 | { | ||
160 | struct i2o_message __iomem *msg; | ||
161 | u32 m; | ||
162 | |||
163 | m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
164 | if (m == I2O_QUEUE_EMPTY) | ||
165 | return -ETIMEDOUT; | ||
166 | |||
167 | writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); | ||
168 | writel(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->lct_data.tid, | ||
169 | &msg->u.head[1]); | ||
170 | writel(-1, &msg->body[0]); | ||
171 | writel(0, &msg->body[1]); | ||
172 | osm_debug("Mounting...\n"); | ||
173 | |||
174 | return i2o_msg_post_wait(dev->iop, m, 2); | ||
175 | }; | ||
176 | |||
177 | /** | ||
178 | * i2o_block_device_lock - Locks the media of device dev | ||
179 | * @dev: I2O device which should receive the lock request | ||
180 | * @media_id: Media Identifier | ||
181 | * | ||
182 | * Lock media of device dev to prevent removal. The media identifier | ||
183 | * should be set to -1, because the spec does not support any other value. | ||
184 | * | ||
185 | * Returns 0 on success or negative error code on failure. | ||
186 | */ | ||
187 | static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id) | ||
188 | { | ||
189 | struct i2o_message __iomem *msg; | ||
190 | u32 m; | ||
191 | |||
192 | m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
193 | if (m == I2O_QUEUE_EMPTY) | ||
194 | return -ETIMEDOUT; | ||
195 | |||
196 | writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); | ||
197 | writel(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid, | ||
198 | &msg->u.head[1]); | ||
199 | writel(-1, &msg->body[0]); | ||
200 | osm_debug("Locking...\n"); | ||
201 | |||
202 | return i2o_msg_post_wait(dev->iop, m, 2); | ||
203 | }; | ||
204 | |||
205 | /** | ||
206 | * i2o_block_device_unlock - Unlocks the media of device dev | ||
207 | * @dev: I2O device which should receive the unlocked request | ||
208 | * @media_id: Media Identifier | ||
209 | * | ||
210 | * Unlocks the media in device dev. The media identifier should be set to | ||
211 | * -1, because the spec does not support any other value. | ||
212 | * | ||
213 | * Returns 0 on success or negative error code on failure. | ||
214 | */ | ||
215 | static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id) | ||
216 | { | ||
217 | struct i2o_message __iomem *msg; | ||
218 | u32 m; | ||
219 | |||
220 | m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
221 | if (m == I2O_QUEUE_EMPTY) | ||
222 | return -ETIMEDOUT; | ||
223 | |||
224 | writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); | ||
225 | writel(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid, | ||
226 | &msg->u.head[1]); | ||
227 | writel(media_id, &msg->body[0]); | ||
228 | osm_debug("Unlocking...\n"); | ||
229 | |||
230 | return i2o_msg_post_wait(dev->iop, m, 2); | ||
231 | }; | ||
232 | |||
233 | /** | ||
234 | * i2o_block_device_power - Power management for device dev | ||
235 | * @dev: I2O device which should receive the power management request | ||
236 | * @operation: Operation which should be send | ||
237 | * | ||
238 | * Send a power management request to the device dev. | ||
239 | * | ||
240 | * Returns 0 on success or negative error code on failure. | ||
241 | */ | ||
242 | static int i2o_block_device_power(struct i2o_block_device *dev, u8 op) | ||
243 | { | ||
244 | struct i2o_device *i2o_dev = dev->i2o_dev; | ||
245 | struct i2o_controller *c = i2o_dev->iop; | ||
246 | struct i2o_message __iomem *msg; | ||
247 | u32 m; | ||
248 | int rc; | ||
249 | |||
250 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
251 | if (m == I2O_QUEUE_EMPTY) | ||
252 | return -ETIMEDOUT; | ||
253 | |||
254 | writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); | ||
255 | writel(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->lct_data. | ||
256 | tid, &msg->u.head[1]); | ||
257 | writel(op << 24, &msg->body[0]); | ||
258 | osm_debug("Power...\n"); | ||
259 | |||
260 | rc = i2o_msg_post_wait(c, m, 60); | ||
261 | if (!rc) | ||
262 | dev->power = op; | ||
263 | |||
264 | return rc; | ||
265 | }; | ||
266 | |||
267 | /** | ||
268 | * i2o_block_request_alloc - Allocate an I2O block request struct | ||
269 | * | ||
270 | * Allocates an I2O block request struct and initialize the list. | ||
271 | * | ||
272 | * Returns a i2o_block_request pointer on success or negative error code | ||
273 | * on failure. | ||
274 | */ | ||
275 | static inline struct i2o_block_request *i2o_block_request_alloc(void) | ||
276 | { | ||
277 | struct i2o_block_request *ireq; | ||
278 | |||
279 | ireq = mempool_alloc(i2o_blk_req_pool.pool, GFP_ATOMIC); | ||
280 | if (!ireq) | ||
281 | return ERR_PTR(-ENOMEM); | ||
282 | |||
283 | INIT_LIST_HEAD(&ireq->queue); | ||
284 | |||
285 | return ireq; | ||
286 | }; | ||
287 | |||
288 | /** | ||
289 | * i2o_block_request_free - Frees a I2O block request | ||
290 | * @ireq: I2O block request which should be freed | ||
291 | * | ||
292 | * Fres the allocated memory (give it back to the request mempool). | ||
293 | */ | ||
294 | static inline void i2o_block_request_free(struct i2o_block_request *ireq) | ||
295 | { | ||
296 | mempool_free(ireq, i2o_blk_req_pool.pool); | ||
297 | }; | ||
298 | |||
299 | /** | ||
300 | * i2o_block_sglist_alloc - Allocate the SG list and map it | ||
301 | * @ireq: I2O block request | ||
302 | * | ||
303 | * Builds the SG list and map it into to be accessable by the controller. | ||
304 | * | ||
305 | * Returns the number of elements in the SG list or 0 on failure. | ||
306 | */ | ||
307 | static inline int i2o_block_sglist_alloc(struct i2o_block_request *ireq) | ||
308 | { | ||
309 | struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev; | ||
310 | int nents; | ||
311 | |||
312 | nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table); | ||
313 | |||
314 | if (rq_data_dir(ireq->req) == READ) | ||
315 | ireq->sg_dma_direction = PCI_DMA_FROMDEVICE; | ||
316 | else | ||
317 | ireq->sg_dma_direction = PCI_DMA_TODEVICE; | ||
318 | |||
319 | ireq->sg_nents = dma_map_sg(dev, ireq->sg_table, nents, | ||
320 | ireq->sg_dma_direction); | ||
321 | |||
322 | return ireq->sg_nents; | ||
323 | }; | ||
324 | |||
325 | /** | ||
326 | * i2o_block_sglist_free - Frees the SG list | ||
327 | * @ireq: I2O block request from which the SG should be freed | ||
328 | * | ||
329 | * Frees the SG list from the I2O block request. | ||
330 | */ | ||
331 | static inline void i2o_block_sglist_free(struct i2o_block_request *ireq) | ||
332 | { | ||
333 | struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev; | ||
334 | |||
335 | dma_unmap_sg(dev, ireq->sg_table, ireq->sg_nents, | ||
336 | ireq->sg_dma_direction); | ||
337 | }; | ||
338 | |||
339 | /** | ||
340 | * i2o_block_prep_req_fn - Allocates I2O block device specific struct | ||
341 | * @q: request queue for the request | ||
342 | * @req: the request to prepare | ||
343 | * | ||
344 | * Allocate the necessary i2o_block_request struct and connect it to | ||
345 | * the request. This is needed that we not loose the SG list later on. | ||
346 | * | ||
347 | * Returns BLKPREP_OK on success or BLKPREP_DEFER on failure. | ||
348 | */ | ||
349 | static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req) | ||
350 | { | ||
351 | struct i2o_block_device *i2o_blk_dev = q->queuedata; | ||
352 | struct i2o_block_request *ireq; | ||
353 | |||
354 | /* request is already processed by us, so return */ | ||
355 | if (req->flags & REQ_SPECIAL) { | ||
356 | osm_debug("REQ_SPECIAL already set!\n"); | ||
357 | req->flags |= REQ_DONTPREP; | ||
358 | return BLKPREP_OK; | ||
359 | } | ||
360 | |||
361 | /* connect the i2o_block_request to the request */ | ||
362 | if (!req->special) { | ||
363 | ireq = i2o_block_request_alloc(); | ||
364 | if (unlikely(IS_ERR(ireq))) { | ||
365 | osm_debug("unable to allocate i2o_block_request!\n"); | ||
366 | return BLKPREP_DEFER; | ||
367 | } | ||
368 | |||
369 | ireq->i2o_blk_dev = i2o_blk_dev; | ||
370 | req->special = ireq; | ||
371 | ireq->req = req; | ||
372 | } else | ||
373 | ireq = req->special; | ||
374 | |||
375 | /* do not come back here */ | ||
376 | req->flags |= REQ_DONTPREP | REQ_SPECIAL; | ||
377 | |||
378 | return BLKPREP_OK; | ||
379 | }; | ||
380 | |||
381 | /** | ||
382 | * i2o_block_delayed_request_fn - delayed request queue function | ||
383 | * delayed_request: the delayed request with the queue to start | ||
384 | * | ||
385 | * If the request queue is stopped for a disk, and there is no open | ||
386 | * request, a new event is created, which calls this function to start | ||
387 | * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never | ||
388 | * be started again. | ||
389 | */ | ||
390 | static void i2o_block_delayed_request_fn(void *delayed_request) | ||
391 | { | ||
392 | struct i2o_block_delayed_request *dreq = delayed_request; | ||
393 | struct request_queue *q = dreq->queue; | ||
394 | unsigned long flags; | ||
395 | |||
396 | spin_lock_irqsave(q->queue_lock, flags); | ||
397 | blk_start_queue(q); | ||
398 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
399 | kfree(dreq); | ||
400 | }; | ||
401 | |||
402 | /** | ||
403 | * i2o_block_reply - Block OSM reply handler. | ||
404 | * @c: I2O controller from which the message arrives | ||
405 | * @m: message id of reply | ||
406 | * qmsg: the actuall I2O message reply | ||
407 | * | ||
408 | * This function gets all the message replies. | ||
409 | * | ||
410 | */ | ||
411 | static int i2o_block_reply(struct i2o_controller *c, u32 m, | ||
412 | struct i2o_message *msg) | ||
413 | { | ||
414 | struct i2o_block_request *ireq; | ||
415 | struct request *req; | ||
416 | struct i2o_block_device *dev; | ||
417 | struct request_queue *q; | ||
418 | u8 st; | ||
419 | unsigned long flags; | ||
420 | |||
421 | /* FAILed message */ | ||
422 | if (unlikely(le32_to_cpu(msg->u.head[0]) & (1 << 13))) { | ||
423 | struct i2o_message *pmsg; | ||
424 | u32 pm; | ||
425 | |||
426 | /* | ||
427 | * FAILed message from controller | ||
428 | * We increment the error count and abort it | ||
429 | * | ||
430 | * In theory this will never happen. The I2O block class | ||
431 | * specification states that block devices never return | ||
432 | * FAILs but instead use the REQ status field...but | ||
433 | * better be on the safe side since no one really follows | ||
434 | * the spec to the book :) | ||
435 | */ | ||
436 | pm = le32_to_cpu(msg->body[3]); | ||
437 | pmsg = i2o_msg_in_to_virt(c, pm); | ||
438 | |||
439 | req = i2o_cntxt_list_get(c, le32_to_cpu(pmsg->u.s.tcntxt)); | ||
440 | if (unlikely(!req)) { | ||
441 | osm_err("NULL reply received!\n"); | ||
442 | return -1; | ||
443 | } | ||
444 | |||
445 | ireq = req->special; | ||
446 | dev = ireq->i2o_blk_dev; | ||
447 | q = dev->gd->queue; | ||
448 | |||
449 | req->errors++; | ||
450 | |||
451 | spin_lock_irqsave(q->queue_lock, flags); | ||
452 | |||
453 | while (end_that_request_chunk(req, !req->errors, | ||
454 | le32_to_cpu(pmsg->body[1]))) ; | ||
455 | end_that_request_last(req); | ||
456 | |||
457 | dev->open_queue_depth--; | ||
458 | list_del(&ireq->queue); | ||
459 | blk_start_queue(q); | ||
460 | |||
461 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
462 | |||
463 | /* Now flush the message by making it a NOP */ | ||
464 | i2o_msg_nop(c, pm); | ||
465 | |||
466 | return -1; | ||
467 | } | ||
468 | |||
469 | req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt)); | ||
470 | if (unlikely(!req)) { | ||
471 | osm_err("NULL reply received!\n"); | ||
472 | return -1; | ||
473 | } | ||
474 | |||
475 | ireq = req->special; | ||
476 | dev = ireq->i2o_blk_dev; | ||
477 | q = dev->gd->queue; | ||
478 | |||
479 | if (unlikely(!dev->i2o_dev)) { | ||
480 | /* | ||
481 | * This is HACK, but Intel Integrated RAID allows user | ||
482 | * to delete a volume that is claimed, locked, and in use | ||
483 | * by the OS. We have to check for a reply from a | ||
484 | * non-existent device and flag it as an error or the system | ||
485 | * goes kaput... | ||
486 | */ | ||
487 | req->errors++; | ||
488 | osm_warn("Data transfer to deleted device!\n"); | ||
489 | spin_lock_irqsave(q->queue_lock, flags); | ||
490 | while (end_that_request_chunk | ||
491 | (req, !req->errors, le32_to_cpu(msg->body[1]))) ; | ||
492 | end_that_request_last(req); | ||
493 | |||
494 | dev->open_queue_depth--; | ||
495 | list_del(&ireq->queue); | ||
496 | blk_start_queue(q); | ||
497 | |||
498 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
499 | return -1; | ||
500 | } | ||
501 | |||
502 | /* | ||
503 | * Lets see what is cooking. We stuffed the | ||
504 | * request in the context. | ||
505 | */ | ||
506 | |||
507 | st = le32_to_cpu(msg->body[0]) >> 24; | ||
508 | |||
509 | if (st != 0) { | ||
510 | int err; | ||
511 | char *bsa_errors[] = { | ||
512 | "Success", | ||
513 | "Media Error", | ||
514 | "Failure communicating to device", | ||
515 | "Device Failure", | ||
516 | "Device is not ready", | ||
517 | "Media not present", | ||
518 | "Media is locked by another user", | ||
519 | "Media has failed", | ||
520 | "Failure communicating to device", | ||
521 | "Device bus failure", | ||
522 | "Device is locked by another user", | ||
523 | "Device is write protected", | ||
524 | "Device has reset", | ||
525 | "Volume has changed, waiting for acknowledgement" | ||
526 | }; | ||
527 | |||
528 | err = le32_to_cpu(msg->body[0]) & 0xffff; | ||
529 | |||
530 | /* | ||
531 | * Device not ready means two things. One is that the | ||
532 | * the thing went offline (but not a removal media) | ||
533 | * | ||
534 | * The second is that you have a SuperTrak 100 and the | ||
535 | * firmware got constipated. Unlike standard i2o card | ||
536 | * setups the supertrak returns an error rather than | ||
537 | * blocking for the timeout in these cases. | ||
538 | * | ||
539 | * Don't stick a supertrak100 into cache aggressive modes | ||
540 | */ | ||
541 | |||
542 | osm_err("block-osm: /dev/%s error: %s", dev->gd->disk_name, | ||
543 | bsa_errors[le32_to_cpu(msg->body[0]) & 0xffff]); | ||
544 | if (le32_to_cpu(msg->body[0]) & 0x00ff0000) | ||
545 | printk(KERN_ERR " - DDM attempted %d retries", | ||
546 | (le32_to_cpu(msg->body[0]) >> 16) & 0x00ff); | ||
547 | printk(KERN_ERR ".\n"); | ||
548 | req->errors++; | ||
549 | } else | ||
550 | req->errors = 0; | ||
551 | |||
552 | if (!end_that_request_chunk | ||
553 | (req, !req->errors, le32_to_cpu(msg->body[1]))) { | ||
554 | add_disk_randomness(req->rq_disk); | ||
555 | spin_lock_irqsave(q->queue_lock, flags); | ||
556 | |||
557 | end_that_request_last(req); | ||
558 | |||
559 | dev->open_queue_depth--; | ||
560 | list_del(&ireq->queue); | ||
561 | blk_start_queue(q); | ||
562 | |||
563 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
564 | |||
565 | i2o_block_sglist_free(ireq); | ||
566 | i2o_block_request_free(ireq); | ||
567 | } else | ||
568 | osm_err("still remaining chunks\n"); | ||
569 | |||
570 | return 1; | ||
571 | }; | ||
572 | |||
573 | static void i2o_block_event(struct i2o_event *evt) | ||
574 | { | ||
575 | osm_info("block-osm: event received\n"); | ||
576 | }; | ||
577 | |||
578 | /* | ||
579 | * SCSI-CAM for ioctl geometry mapping | ||
580 | * Duplicated with SCSI - this should be moved into somewhere common | ||
581 | * perhaps genhd ? | ||
582 | * | ||
583 | * LBA -> CHS mapping table taken from: | ||
584 | * | ||
585 | * "Incorporating the I2O Architecture into BIOS for Intel Architecture | ||
586 | * Platforms" | ||
587 | * | ||
588 | * This is an I2O document that is only available to I2O members, | ||
589 | * not developers. | ||
590 | * | ||
591 | * From my understanding, this is how all the I2O cards do this | ||
592 | * | ||
593 | * Disk Size | Sectors | Heads | Cylinders | ||
594 | * ---------------+---------+-------+------------------- | ||
595 | * 1 < X <= 528M | 63 | 16 | X/(63 * 16 * 512) | ||
596 | * 528M < X <= 1G | 63 | 32 | X/(63 * 32 * 512) | ||
597 | * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512) | ||
598 | * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512) | ||
599 | * | ||
600 | */ | ||
601 | #define BLOCK_SIZE_528M 1081344 | ||
602 | #define BLOCK_SIZE_1G 2097152 | ||
603 | #define BLOCK_SIZE_21G 4403200 | ||
604 | #define BLOCK_SIZE_42G 8806400 | ||
605 | #define BLOCK_SIZE_84G 17612800 | ||
606 | |||
607 | static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls, | ||
608 | unsigned char *hds, unsigned char *secs) | ||
609 | { | ||
610 | unsigned long heads, sectors, cylinders; | ||
611 | |||
612 | sectors = 63L; /* Maximize sectors per track */ | ||
613 | if (capacity <= BLOCK_SIZE_528M) | ||
614 | heads = 16; | ||
615 | else if (capacity <= BLOCK_SIZE_1G) | ||
616 | heads = 32; | ||
617 | else if (capacity <= BLOCK_SIZE_21G) | ||
618 | heads = 64; | ||
619 | else if (capacity <= BLOCK_SIZE_42G) | ||
620 | heads = 128; | ||
621 | else | ||
622 | heads = 255; | ||
623 | |||
624 | cylinders = (unsigned long)capacity / (heads * sectors); | ||
625 | |||
626 | *cyls = (unsigned short)cylinders; /* Stuff return values */ | ||
627 | *secs = (unsigned char)sectors; | ||
628 | *hds = (unsigned char)heads; | ||
629 | } | ||
630 | |||
631 | /** | ||
632 | * i2o_block_open - Open the block device | ||
633 | * | ||
634 | * Power up the device, mount and lock the media. This function is called, | ||
635 | * if the block device is opened for access. | ||
636 | * | ||
637 | * Returns 0 on success or negative error code on failure. | ||
638 | */ | ||
639 | static int i2o_block_open(struct inode *inode, struct file *file) | ||
640 | { | ||
641 | struct i2o_block_device *dev = inode->i_bdev->bd_disk->private_data; | ||
642 | |||
643 | if (!dev->i2o_dev) | ||
644 | return -ENODEV; | ||
645 | |||
646 | if (dev->power > 0x1f) | ||
647 | i2o_block_device_power(dev, 0x02); | ||
648 | |||
649 | i2o_block_device_mount(dev->i2o_dev, -1); | ||
650 | |||
651 | i2o_block_device_lock(dev->i2o_dev, -1); | ||
652 | |||
653 | osm_debug("Ready.\n"); | ||
654 | |||
655 | return 0; | ||
656 | }; | ||
657 | |||
658 | /** | ||
659 | * i2o_block_release - Release the I2O block device | ||
660 | * | ||
661 | * Unlock and unmount the media, and power down the device. Gets called if | ||
662 | * the block device is closed. | ||
663 | * | ||
664 | * Returns 0 on success or negative error code on failure. | ||
665 | */ | ||
666 | static int i2o_block_release(struct inode *inode, struct file *file) | ||
667 | { | ||
668 | struct gendisk *disk = inode->i_bdev->bd_disk; | ||
669 | struct i2o_block_device *dev = disk->private_data; | ||
670 | u8 operation; | ||
671 | |||
672 | /* | ||
673 | * This is to deail with the case of an application | ||
674 | * opening a device and then the device dissapears while | ||
675 | * it's in use, and then the application tries to release | ||
676 | * it. ex: Unmounting a deleted RAID volume at reboot. | ||
677 | * If we send messages, it will just cause FAILs since | ||
678 | * the TID no longer exists. | ||
679 | */ | ||
680 | if (!dev->i2o_dev) | ||
681 | return 0; | ||
682 | |||
683 | i2o_block_device_flush(dev->i2o_dev); | ||
684 | |||
685 | i2o_block_device_unlock(dev->i2o_dev, -1); | ||
686 | |||
687 | if (dev->flags & (1 << 3 | 1 << 4)) /* Removable */ | ||
688 | operation = 0x21; | ||
689 | else | ||
690 | operation = 0x24; | ||
691 | |||
692 | i2o_block_device_power(dev, operation); | ||
693 | |||
694 | return 0; | ||
695 | } | ||
696 | |||
697 | /** | ||
698 | * i2o_block_ioctl - Issue device specific ioctl calls. | ||
699 | * @cmd: ioctl command | ||
700 | * @arg: arg | ||
701 | * | ||
702 | * Handles ioctl request for the block device. | ||
703 | * | ||
704 | * Return 0 on success or negative error on failure. | ||
705 | */ | ||
706 | static int i2o_block_ioctl(struct inode *inode, struct file *file, | ||
707 | unsigned int cmd, unsigned long arg) | ||
708 | { | ||
709 | struct gendisk *disk = inode->i_bdev->bd_disk; | ||
710 | struct i2o_block_device *dev = disk->private_data; | ||
711 | void __user *argp = (void __user *)arg; | ||
712 | |||
713 | /* Anyone capable of this syscall can do *real bad* things */ | ||
714 | |||
715 | if (!capable(CAP_SYS_ADMIN)) | ||
716 | return -EPERM; | ||
717 | |||
718 | switch (cmd) { | ||
719 | case HDIO_GETGEO: | ||
720 | { | ||
721 | struct hd_geometry g; | ||
722 | i2o_block_biosparam(get_capacity(disk), | ||
723 | &g.cylinders, &g.heads, &g.sectors); | ||
724 | g.start = get_start_sect(inode->i_bdev); | ||
725 | return copy_to_user(argp, &g, sizeof(g)) ? -EFAULT : 0; | ||
726 | } | ||
727 | |||
728 | case BLKI2OGRSTRAT: | ||
729 | return put_user(dev->rcache, (int __user *)arg); | ||
730 | case BLKI2OGWSTRAT: | ||
731 | return put_user(dev->wcache, (int __user *)arg); | ||
732 | case BLKI2OSRSTRAT: | ||
733 | if (arg < 0 || arg > CACHE_SMARTFETCH) | ||
734 | return -EINVAL; | ||
735 | dev->rcache = arg; | ||
736 | break; | ||
737 | case BLKI2OSWSTRAT: | ||
738 | if (arg != 0 | ||
739 | && (arg < CACHE_WRITETHROUGH || arg > CACHE_SMARTBACK)) | ||
740 | return -EINVAL; | ||
741 | dev->wcache = arg; | ||
742 | break; | ||
743 | } | ||
744 | return -ENOTTY; | ||
745 | }; | ||
746 | |||
747 | /** | ||
748 | * i2o_block_media_changed - Have we seen a media change? | ||
749 | * @disk: gendisk which should be verified | ||
750 | * | ||
751 | * Verifies if the media has changed. | ||
752 | * | ||
753 | * Returns 1 if the media was changed or 0 otherwise. | ||
754 | */ | ||
755 | static int i2o_block_media_changed(struct gendisk *disk) | ||
756 | { | ||
757 | struct i2o_block_device *p = disk->private_data; | ||
758 | |||
759 | if (p->media_change_flag) { | ||
760 | p->media_change_flag = 0; | ||
761 | return 1; | ||
762 | } | ||
763 | return 0; | ||
764 | } | ||
765 | |||
766 | /** | ||
767 | * i2o_block_transfer - Transfer a request to/from the I2O controller | ||
768 | * @req: the request which should be transfered | ||
769 | * | ||
770 | * This function converts the request into a I2O message. The necessary | ||
771 | * DMA buffers are allocated and after everything is setup post the message | ||
772 | * to the I2O controller. No cleanup is done by this function. It is done | ||
773 | * on the interrupt side when the reply arrives. | ||
774 | * | ||
775 | * Return 0 on success or negative error code on failure. | ||
776 | */ | ||
777 | static int i2o_block_transfer(struct request *req) | ||
778 | { | ||
779 | struct i2o_block_device *dev = req->rq_disk->private_data; | ||
780 | struct i2o_controller *c = dev->i2o_dev->iop; | ||
781 | int tid = dev->i2o_dev->lct_data.tid; | ||
782 | struct i2o_message __iomem *msg; | ||
783 | void __iomem *mptr; | ||
784 | struct i2o_block_request *ireq = req->special; | ||
785 | struct scatterlist *sg; | ||
786 | int sgnum; | ||
787 | int i; | ||
788 | u32 m; | ||
789 | u32 tcntxt; | ||
790 | u32 sg_flags; | ||
791 | int rc; | ||
792 | |||
793 | m = i2o_msg_get(c, &msg); | ||
794 | if (m == I2O_QUEUE_EMPTY) { | ||
795 | rc = -EBUSY; | ||
796 | goto exit; | ||
797 | } | ||
798 | |||
799 | tcntxt = i2o_cntxt_list_add(c, req); | ||
800 | if (!tcntxt) { | ||
801 | rc = -ENOMEM; | ||
802 | goto nop_msg; | ||
803 | } | ||
804 | |||
805 | if ((sgnum = i2o_block_sglist_alloc(ireq)) <= 0) { | ||
806 | rc = -ENOMEM; | ||
807 | goto context_remove; | ||
808 | } | ||
809 | |||
810 | /* Build the message based on the request. */ | ||
811 | writel(i2o_block_driver.context, &msg->u.s.icntxt); | ||
812 | writel(tcntxt, &msg->u.s.tcntxt); | ||
813 | writel(req->nr_sectors << 9, &msg->body[1]); | ||
814 | |||
815 | writel((((u64) req->sector) << 9) & 0xffffffff, &msg->body[2]); | ||
816 | writel(req->sector >> 23, &msg->body[3]); | ||
817 | |||
818 | mptr = &msg->body[4]; | ||
819 | |||
820 | sg = ireq->sg_table; | ||
821 | |||
822 | if (rq_data_dir(req) == READ) { | ||
823 | writel(I2O_CMD_BLOCK_READ << 24 | HOST_TID << 12 | tid, | ||
824 | &msg->u.head[1]); | ||
825 | sg_flags = 0x10000000; | ||
826 | switch (dev->rcache) { | ||
827 | case CACHE_NULL: | ||
828 | writel(0, &msg->body[0]); | ||
829 | break; | ||
830 | case CACHE_PREFETCH: | ||
831 | writel(0x201F0008, &msg->body[0]); | ||
832 | break; | ||
833 | case CACHE_SMARTFETCH: | ||
834 | if (req->nr_sectors > 16) | ||
835 | writel(0x201F0008, &msg->body[0]); | ||
836 | else | ||
837 | writel(0x001F0000, &msg->body[0]); | ||
838 | break; | ||
839 | } | ||
840 | } else { | ||
841 | writel(I2O_CMD_BLOCK_WRITE << 24 | HOST_TID << 12 | tid, | ||
842 | &msg->u.head[1]); | ||
843 | sg_flags = 0x14000000; | ||
844 | switch (dev->wcache) { | ||
845 | case CACHE_NULL: | ||
846 | writel(0, &msg->body[0]); | ||
847 | break; | ||
848 | case CACHE_WRITETHROUGH: | ||
849 | writel(0x001F0008, &msg->body[0]); | ||
850 | break; | ||
851 | case CACHE_WRITEBACK: | ||
852 | writel(0x001F0010, &msg->body[0]); | ||
853 | break; | ||
854 | case CACHE_SMARTBACK: | ||
855 | if (req->nr_sectors > 16) | ||
856 | writel(0x001F0004, &msg->body[0]); | ||
857 | else | ||
858 | writel(0x001F0010, &msg->body[0]); | ||
859 | break; | ||
860 | case CACHE_SMARTTHROUGH: | ||
861 | if (req->nr_sectors > 16) | ||
862 | writel(0x001F0004, &msg->body[0]); | ||
863 | else | ||
864 | writel(0x001F0010, &msg->body[0]); | ||
865 | } | ||
866 | } | ||
867 | |||
868 | for (i = sgnum; i > 0; i--) { | ||
869 | if (i == 1) | ||
870 | sg_flags |= 0x80000000; | ||
871 | writel(sg_flags | sg_dma_len(sg), mptr); | ||
872 | writel(sg_dma_address(sg), mptr + 4); | ||
873 | mptr += 8; | ||
874 | sg++; | ||
875 | } | ||
876 | |||
877 | writel(I2O_MESSAGE_SIZE | ||
878 | (((unsigned long)mptr - | ||
879 | (unsigned long)&msg->u.head[0]) >> 2) | SGL_OFFSET_8, | ||
880 | &msg->u.head[0]); | ||
881 | |||
882 | list_add_tail(&ireq->queue, &dev->open_queue); | ||
883 | dev->open_queue_depth++; | ||
884 | |||
885 | i2o_msg_post(c, m); | ||
886 | |||
887 | return 0; | ||
888 | |||
889 | context_remove: | ||
890 | i2o_cntxt_list_remove(c, req); | ||
891 | |||
892 | nop_msg: | ||
893 | i2o_msg_nop(c, m); | ||
894 | |||
895 | exit: | ||
896 | return rc; | ||
897 | }; | ||
898 | |||
899 | /** | ||
900 | * i2o_block_request_fn - request queue handling function | ||
901 | * q: request queue from which the request could be fetched | ||
902 | * | ||
903 | * Takes the next request from the queue, transfers it and if no error | ||
904 | * occurs dequeue it from the queue. On arrival of the reply the message | ||
905 | * will be processed further. If an error occurs requeue the request. | ||
906 | */ | ||
907 | static void i2o_block_request_fn(struct request_queue *q) | ||
908 | { | ||
909 | struct request *req; | ||
910 | |||
911 | while (!blk_queue_plugged(q)) { | ||
912 | req = elv_next_request(q); | ||
913 | if (!req) | ||
914 | break; | ||
915 | |||
916 | if (blk_fs_request(req)) { | ||
917 | struct i2o_block_delayed_request *dreq; | ||
918 | struct i2o_block_request *ireq = req->special; | ||
919 | unsigned int queue_depth; | ||
920 | |||
921 | queue_depth = ireq->i2o_blk_dev->open_queue_depth; | ||
922 | |||
923 | if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) | ||
924 | if (!i2o_block_transfer(req)) { | ||
925 | blkdev_dequeue_request(req); | ||
926 | continue; | ||
927 | } | ||
928 | |||
929 | if (queue_depth) | ||
930 | break; | ||
931 | |||
932 | /* stop the queue and retry later */ | ||
933 | dreq = kmalloc(sizeof(*dreq), GFP_ATOMIC); | ||
934 | if (!dreq) | ||
935 | continue; | ||
936 | |||
937 | dreq->queue = q; | ||
938 | INIT_WORK(&dreq->work, i2o_block_delayed_request_fn, | ||
939 | dreq); | ||
940 | |||
941 | osm_info("transfer error\n"); | ||
942 | if (!queue_delayed_work(i2o_block_driver.event_queue, | ||
943 | &dreq->work, | ||
944 | I2O_BLOCK_RETRY_TIME)) | ||
945 | kfree(dreq); | ||
946 | else { | ||
947 | blk_stop_queue(q); | ||
948 | break; | ||
949 | } | ||
950 | } else | ||
951 | end_request(req, 0); | ||
952 | } | ||
953 | }; | ||
954 | |||
955 | /* I2O Block device operations definition */ | ||
956 | static struct block_device_operations i2o_block_fops = { | ||
957 | .owner = THIS_MODULE, | ||
958 | .open = i2o_block_open, | ||
959 | .release = i2o_block_release, | ||
960 | .ioctl = i2o_block_ioctl, | ||
961 | .media_changed = i2o_block_media_changed | ||
962 | }; | ||
963 | |||
964 | /** | ||
965 | * i2o_block_device_alloc - Allocate memory for a I2O Block device | ||
966 | * | ||
967 | * Allocate memory for the i2o_block_device struct, gendisk and request | ||
968 | * queue and initialize them as far as no additional information is needed. | ||
969 | * | ||
970 | * Returns a pointer to the allocated I2O Block device on succes or a | ||
971 | * negative error code on failure. | ||
972 | */ | ||
973 | static struct i2o_block_device *i2o_block_device_alloc(void) | ||
974 | { | ||
975 | struct i2o_block_device *dev; | ||
976 | struct gendisk *gd; | ||
977 | struct request_queue *queue; | ||
978 | int rc; | ||
979 | |||
980 | dev = kmalloc(sizeof(*dev), GFP_KERNEL); | ||
981 | if (!dev) { | ||
982 | osm_err("Insufficient memory to allocate I2O Block disk.\n"); | ||
983 | rc = -ENOMEM; | ||
984 | goto exit; | ||
985 | } | ||
986 | memset(dev, 0, sizeof(*dev)); | ||
987 | |||
988 | INIT_LIST_HEAD(&dev->open_queue); | ||
989 | spin_lock_init(&dev->lock); | ||
990 | dev->rcache = CACHE_PREFETCH; | ||
991 | dev->wcache = CACHE_WRITEBACK; | ||
992 | |||
993 | /* allocate a gendisk with 16 partitions */ | ||
994 | gd = alloc_disk(16); | ||
995 | if (!gd) { | ||
996 | osm_err("Insufficient memory to allocate gendisk.\n"); | ||
997 | rc = -ENOMEM; | ||
998 | goto cleanup_dev; | ||
999 | } | ||
1000 | |||
1001 | /* initialize the request queue */ | ||
1002 | queue = blk_init_queue(i2o_block_request_fn, &dev->lock); | ||
1003 | if (!queue) { | ||
1004 | osm_err("Insufficient memory to allocate request queue.\n"); | ||
1005 | rc = -ENOMEM; | ||
1006 | goto cleanup_queue; | ||
1007 | } | ||
1008 | |||
1009 | blk_queue_prep_rq(queue, i2o_block_prep_req_fn); | ||
1010 | |||
1011 | gd->major = I2O_MAJOR; | ||
1012 | gd->queue = queue; | ||
1013 | gd->fops = &i2o_block_fops; | ||
1014 | gd->private_data = dev; | ||
1015 | |||
1016 | dev->gd = gd; | ||
1017 | |||
1018 | return dev; | ||
1019 | |||
1020 | cleanup_queue: | ||
1021 | put_disk(gd); | ||
1022 | |||
1023 | cleanup_dev: | ||
1024 | kfree(dev); | ||
1025 | |||
1026 | exit: | ||
1027 | return ERR_PTR(rc); | ||
1028 | }; | ||
1029 | |||
1030 | /** | ||
1031 | * i2o_block_probe - verify if dev is a I2O Block device and install it | ||
1032 | * @dev: device to verify if it is a I2O Block device | ||
1033 | * | ||
1034 | * We only verify if the user_tid of the device is 0xfff and then install | ||
1035 | * the device. Otherwise it is used by some other device (e. g. RAID). | ||
1036 | * | ||
1037 | * Returns 0 on success or negative error code on failure. | ||
1038 | */ | ||
1039 | static int i2o_block_probe(struct device *dev) | ||
1040 | { | ||
1041 | struct i2o_device *i2o_dev = to_i2o_device(dev); | ||
1042 | struct i2o_block_device *i2o_blk_dev; | ||
1043 | struct i2o_controller *c = i2o_dev->iop; | ||
1044 | struct gendisk *gd; | ||
1045 | struct request_queue *queue; | ||
1046 | static int unit = 0; | ||
1047 | int rc; | ||
1048 | u64 size; | ||
1049 | u32 blocksize; | ||
1050 | u16 power; | ||
1051 | u32 flags, status; | ||
1052 | int segments; | ||
1053 | |||
1054 | /* skip devices which are used by IOP */ | ||
1055 | if (i2o_dev->lct_data.user_tid != 0xfff) { | ||
1056 | osm_debug("skipping used device %03x\n", i2o_dev->lct_data.tid); | ||
1057 | return -ENODEV; | ||
1058 | } | ||
1059 | |||
1060 | osm_info("New device detected (TID: %03x)\n", i2o_dev->lct_data.tid); | ||
1061 | |||
1062 | if (i2o_device_claim(i2o_dev)) { | ||
1063 | osm_warn("Unable to claim device. Installation aborted\n"); | ||
1064 | rc = -EFAULT; | ||
1065 | goto exit; | ||
1066 | } | ||
1067 | |||
1068 | i2o_blk_dev = i2o_block_device_alloc(); | ||
1069 | if (IS_ERR(i2o_blk_dev)) { | ||
1070 | osm_err("could not alloc a new I2O block device"); | ||
1071 | rc = PTR_ERR(i2o_blk_dev); | ||
1072 | goto claim_release; | ||
1073 | } | ||
1074 | |||
1075 | i2o_blk_dev->i2o_dev = i2o_dev; | ||
1076 | dev_set_drvdata(dev, i2o_blk_dev); | ||
1077 | |||
1078 | /* setup gendisk */ | ||
1079 | gd = i2o_blk_dev->gd; | ||
1080 | gd->first_minor = unit << 4; | ||
1081 | sprintf(gd->disk_name, "i2o/hd%c", 'a' + unit); | ||
1082 | sprintf(gd->devfs_name, "i2o/hd%c", 'a' + unit); | ||
1083 | gd->driverfs_dev = &i2o_dev->device; | ||
1084 | |||
1085 | /* setup request queue */ | ||
1086 | queue = gd->queue; | ||
1087 | queue->queuedata = i2o_blk_dev; | ||
1088 | |||
1089 | blk_queue_max_phys_segments(queue, I2O_MAX_SEGMENTS); | ||
1090 | blk_queue_max_sectors(queue, I2O_MAX_SECTORS); | ||
1091 | |||
1092 | if (c->short_req) | ||
1093 | segments = 8; | ||
1094 | else { | ||
1095 | i2o_status_block *sb; | ||
1096 | |||
1097 | sb = c->status_block.virt; | ||
1098 | |||
1099 | segments = (sb->inbound_frame_size - | ||
1100 | sizeof(struct i2o_message) / 4 - 4) / 2; | ||
1101 | } | ||
1102 | |||
1103 | blk_queue_max_hw_segments(queue, segments); | ||
1104 | |||
1105 | osm_debug("max sectors = %d\n", I2O_MAX_SECTORS); | ||
1106 | osm_debug("phys segments = %d\n", I2O_MAX_SEGMENTS); | ||
1107 | osm_debug("hw segments = %d\n", segments); | ||
1108 | |||
1109 | /* | ||
1110 | * Ask for the current media data. If that isn't supported | ||
1111 | * then we ask for the device capacity data | ||
1112 | */ | ||
1113 | if (i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) != 0 | ||
1114 | || i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) != 0) { | ||
1115 | i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4); | ||
1116 | i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8); | ||
1117 | } | ||
1118 | osm_debug("blocksize = %d\n", blocksize); | ||
1119 | |||
1120 | if (i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2)) | ||
1121 | power = 0; | ||
1122 | i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4); | ||
1123 | i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4); | ||
1124 | |||
1125 | set_capacity(gd, size >> 9); | ||
1126 | |||
1127 | i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff); | ||
1128 | |||
1129 | add_disk(gd); | ||
1130 | |||
1131 | unit++; | ||
1132 | |||
1133 | return 0; | ||
1134 | |||
1135 | claim_release: | ||
1136 | i2o_device_claim_release(i2o_dev); | ||
1137 | |||
1138 | exit: | ||
1139 | return rc; | ||
1140 | }; | ||
1141 | |||
1142 | /* Block OSM driver struct */ | ||
1143 | static struct i2o_driver i2o_block_driver = { | ||
1144 | .name = OSM_NAME, | ||
1145 | .event = i2o_block_event, | ||
1146 | .reply = i2o_block_reply, | ||
1147 | .classes = i2o_block_class_id, | ||
1148 | .driver = { | ||
1149 | .probe = i2o_block_probe, | ||
1150 | .remove = i2o_block_remove, | ||
1151 | }, | ||
1152 | }; | ||
1153 | |||
1154 | /** | ||
1155 | * i2o_block_init - Block OSM initialization function | ||
1156 | * | ||
1157 | * Allocate the slab and mempool for request structs, registers i2o_block | ||
1158 | * block device and finally register the Block OSM in the I2O core. | ||
1159 | * | ||
1160 | * Returns 0 on success or negative error code on failure. | ||
1161 | */ | ||
1162 | static int __init i2o_block_init(void) | ||
1163 | { | ||
1164 | int rc; | ||
1165 | int size; | ||
1166 | |||
1167 | printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); | ||
1168 | |||
1169 | /* Allocate request mempool and slab */ | ||
1170 | size = sizeof(struct i2o_block_request); | ||
1171 | i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0, | ||
1172 | SLAB_HWCACHE_ALIGN, NULL, | ||
1173 | NULL); | ||
1174 | if (!i2o_blk_req_pool.slab) { | ||
1175 | osm_err("can't init request slab\n"); | ||
1176 | rc = -ENOMEM; | ||
1177 | goto exit; | ||
1178 | } | ||
1179 | |||
1180 | i2o_blk_req_pool.pool = mempool_create(I2O_REQ_MEMPOOL_SIZE, | ||
1181 | mempool_alloc_slab, | ||
1182 | mempool_free_slab, | ||
1183 | i2o_blk_req_pool.slab); | ||
1184 | if (!i2o_blk_req_pool.pool) { | ||
1185 | osm_err("can't init request mempool\n"); | ||
1186 | rc = -ENOMEM; | ||
1187 | goto free_slab; | ||
1188 | } | ||
1189 | |||
1190 | /* Register the block device interfaces */ | ||
1191 | rc = register_blkdev(I2O_MAJOR, "i2o_block"); | ||
1192 | if (rc) { | ||
1193 | osm_err("unable to register block device\n"); | ||
1194 | goto free_mempool; | ||
1195 | } | ||
1196 | #ifdef MODULE | ||
1197 | osm_info("registered device at major %d\n", I2O_MAJOR); | ||
1198 | #endif | ||
1199 | |||
1200 | /* Register Block OSM into I2O core */ | ||
1201 | rc = i2o_driver_register(&i2o_block_driver); | ||
1202 | if (rc) { | ||
1203 | osm_err("Could not register Block driver\n"); | ||
1204 | goto unregister_blkdev; | ||
1205 | } | ||
1206 | |||
1207 | return 0; | ||
1208 | |||
1209 | unregister_blkdev: | ||
1210 | unregister_blkdev(I2O_MAJOR, "i2o_block"); | ||
1211 | |||
1212 | free_mempool: | ||
1213 | mempool_destroy(i2o_blk_req_pool.pool); | ||
1214 | |||
1215 | free_slab: | ||
1216 | kmem_cache_destroy(i2o_blk_req_pool.slab); | ||
1217 | |||
1218 | exit: | ||
1219 | return rc; | ||
1220 | }; | ||
1221 | |||
1222 | /** | ||
1223 | * i2o_block_exit - Block OSM exit function | ||
1224 | * | ||
1225 | * Unregisters Block OSM from I2O core, unregisters i2o_block block device | ||
1226 | * and frees the mempool and slab. | ||
1227 | */ | ||
1228 | static void __exit i2o_block_exit(void) | ||
1229 | { | ||
1230 | /* Unregister I2O Block OSM from I2O core */ | ||
1231 | i2o_driver_unregister(&i2o_block_driver); | ||
1232 | |||
1233 | /* Unregister block device */ | ||
1234 | unregister_blkdev(I2O_MAJOR, "i2o_block"); | ||
1235 | |||
1236 | /* Free request mempool and slab */ | ||
1237 | mempool_destroy(i2o_blk_req_pool.pool); | ||
1238 | kmem_cache_destroy(i2o_blk_req_pool.slab); | ||
1239 | }; | ||
1240 | |||
1241 | MODULE_AUTHOR("Red Hat"); | ||
1242 | MODULE_LICENSE("GPL"); | ||
1243 | MODULE_DESCRIPTION(OSM_DESCRIPTION); | ||
1244 | MODULE_VERSION(OSM_VERSION); | ||
1245 | |||
1246 | module_init(i2o_block_init); | ||
1247 | module_exit(i2o_block_exit); | ||
diff --git a/drivers/message/i2o/i2o_block.h b/drivers/message/i2o/i2o_block.h new file mode 100644 index 000000000000..ddd9a15679c0 --- /dev/null +++ b/drivers/message/i2o/i2o_block.h | |||
@@ -0,0 +1,99 @@ | |||
1 | /* | ||
2 | * Block OSM structures/API | ||
3 | * | ||
4 | * Copyright (C) 1999-2002 Red Hat Software | ||
5 | * | ||
6 | * Written by Alan Cox, Building Number Three Ltd | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | * | ||
18 | * For the purpose of avoiding doubt the preferred form of the work | ||
19 | * for making modifications shall be a standards compliant form such | ||
20 | * gzipped tar and not one requiring a proprietary or patent encumbered | ||
21 | * tool to unpack. | ||
22 | * | ||
23 | * Fixes/additions: | ||
24 | * Steve Ralston: | ||
25 | * Multiple device handling error fixes, | ||
26 | * Added a queue depth. | ||
27 | * Alan Cox: | ||
28 | * FC920 has an rmw bug. Dont or in the end marker. | ||
29 | * Removed queue walk, fixed for 64bitness. | ||
30 | * Rewrote much of the code over time | ||
31 | * Added indirect block lists | ||
32 | * Handle 64K limits on many controllers | ||
33 | * Don't use indirects on the Promise (breaks) | ||
34 | * Heavily chop down the queue depths | ||
35 | * Deepak Saxena: | ||
36 | * Independent queues per IOP | ||
37 | * Support for dynamic device creation/deletion | ||
38 | * Code cleanup | ||
39 | * Support for larger I/Os through merge* functions | ||
40 | * (taken from DAC960 driver) | ||
41 | * Boji T Kannanthanam: | ||
42 | * Set the I2O Block devices to be detected in increasing | ||
43 | * order of TIDs during boot. | ||
44 | * Search and set the I2O block device that we boot off | ||
45 | * from as the first device to be claimed (as /dev/i2o/hda) | ||
46 | * Properly attach/detach I2O gendisk structure from the | ||
47 | * system gendisk list. The I2O block devices now appear in | ||
48 | * /proc/partitions. | ||
49 | * Markus Lidel <Markus.Lidel@shadowconnect.com>: | ||
50 | * Minor bugfixes for 2.6. | ||
51 | */ | ||
52 | |||
53 | #ifndef I2O_BLOCK_OSM_H | ||
54 | #define I2O_BLOCK_OSM_H | ||
55 | |||
56 | #define I2O_BLOCK_RETRY_TIME HZ/4 | ||
57 | #define I2O_BLOCK_MAX_OPEN_REQUESTS 50 | ||
58 | |||
59 | /* I2O Block OSM mempool struct */ | ||
60 | struct i2o_block_mempool { | ||
61 | kmem_cache_t *slab; | ||
62 | mempool_t *pool; | ||
63 | }; | ||
64 | |||
65 | /* I2O Block device descriptor */ | ||
66 | struct i2o_block_device { | ||
67 | struct i2o_device *i2o_dev; /* pointer to I2O device */ | ||
68 | struct gendisk *gd; | ||
69 | spinlock_t lock; /* queue lock */ | ||
70 | struct list_head open_queue; /* list of transfered, but unfinished | ||
71 | requests */ | ||
72 | unsigned int open_queue_depth; /* number of requests in the queue */ | ||
73 | |||
74 | int rcache; /* read cache flags */ | ||
75 | int wcache; /* write cache flags */ | ||
76 | int flags; | ||
77 | int power; /* power state */ | ||
78 | int media_change_flag; /* media changed flag */ | ||
79 | }; | ||
80 | |||
81 | /* I2O Block device request */ | ||
82 | struct i2o_block_request | ||
83 | { | ||
84 | struct list_head queue; | ||
85 | struct request *req; /* corresponding request */ | ||
86 | struct i2o_block_device *i2o_blk_dev; /* I2O block device */ | ||
87 | int sg_dma_direction; /* direction of DMA buffer read/write */ | ||
88 | int sg_nents; /* number of SG elements */ | ||
89 | struct scatterlist sg_table[I2O_MAX_SEGMENTS]; /* SG table */ | ||
90 | }; | ||
91 | |||
92 | /* I2O Block device delayed request */ | ||
93 | struct i2o_block_delayed_request | ||
94 | { | ||
95 | struct work_struct work; | ||
96 | struct request_queue *queue; | ||
97 | }; | ||
98 | |||
99 | #endif | ||
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c new file mode 100644 index 000000000000..5fc5004ea07a --- /dev/null +++ b/drivers/message/i2o/i2o_config.c | |||
@@ -0,0 +1,1160 @@ | |||
1 | /* | ||
2 | * I2O Configuration Interface Driver | ||
3 | * | ||
4 | * (C) Copyright 1999-2002 Red Hat | ||
5 | * | ||
6 | * Written by Alan Cox, Building Number Three Ltd | ||
7 | * | ||
8 | * Fixes/additions: | ||
9 | * Deepak Saxena (04/20/1999): | ||
10 | * Added basic ioctl() support | ||
11 | * Deepak Saxena (06/07/1999): | ||
12 | * Added software download ioctl (still testing) | ||
13 | * Auvo Häkkinen (09/10/1999): | ||
14 | * Changes to i2o_cfg_reply(), ioctl_parms() | ||
15 | * Added ioct_validate() | ||
16 | * Taneli Vähäkangas (09/30/1999): | ||
17 | * Fixed ioctl_swdl() | ||
18 | * Taneli Vähäkangas (10/04/1999): | ||
19 | * Changed ioctl_swdl(), implemented ioctl_swul() and ioctl_swdel() | ||
20 | * Deepak Saxena (11/18/1999): | ||
21 | * Added event managmenet support | ||
22 | * Alan Cox <alan@redhat.com>: | ||
23 | * 2.4 rewrite ported to 2.5 | ||
24 | * Markus Lidel <Markus.Lidel@shadowconnect.com>: | ||
25 | * Added pass-thru support for Adaptec's raidutils | ||
26 | * | ||
27 | * This program is free software; you can redistribute it and/or | ||
28 | * modify it under the terms of the GNU General Public License | ||
29 | * as published by the Free Software Foundation; either version | ||
30 | * 2 of the License, or (at your option) any later version. | ||
31 | */ | ||
32 | |||
33 | #include <linux/module.h> | ||
34 | #include <linux/kernel.h> | ||
35 | #include <linux/pci.h> | ||
36 | #include <linux/i2o.h> | ||
37 | #include <linux/errno.h> | ||
38 | #include <linux/init.h> | ||
39 | #include <linux/slab.h> | ||
40 | #include <linux/miscdevice.h> | ||
41 | #include <linux/mm.h> | ||
42 | #include <linux/spinlock.h> | ||
43 | #include <linux/smp_lock.h> | ||
44 | #include <linux/ioctl32.h> | ||
45 | #include <linux/compat.h> | ||
46 | #include <linux/syscalls.h> | ||
47 | |||
48 | #include <asm/uaccess.h> | ||
49 | #include <asm/io.h> | ||
50 | |||
51 | #define OSM_NAME "config-osm" | ||
52 | #define OSM_VERSION "$Rev$" | ||
53 | #define OSM_DESCRIPTION "I2O Configuration OSM" | ||
54 | |||
55 | extern int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int); | ||
56 | |||
57 | static spinlock_t i2o_config_lock; | ||
58 | |||
59 | #define MODINC(x,y) ((x) = ((x) + 1) % (y)) | ||
60 | |||
61 | struct sg_simple_element { | ||
62 | u32 flag_count; | ||
63 | u32 addr_bus; | ||
64 | }; | ||
65 | |||
66 | struct i2o_cfg_info { | ||
67 | struct file *fp; | ||
68 | struct fasync_struct *fasync; | ||
69 | struct i2o_evt_info event_q[I2O_EVT_Q_LEN]; | ||
70 | u16 q_in; // Queue head index | ||
71 | u16 q_out; // Queue tail index | ||
72 | u16 q_len; // Queue length | ||
73 | u16 q_lost; // Number of lost events | ||
74 | ulong q_id; // Event queue ID...used as tx_context | ||
75 | struct i2o_cfg_info *next; | ||
76 | }; | ||
77 | static struct i2o_cfg_info *open_files = NULL; | ||
78 | static ulong i2o_cfg_info_id = 0; | ||
79 | |||
80 | /* | ||
81 | * Each of these describes an i2o message handler. They are | ||
82 | * multiplexed by the i2o_core code | ||
83 | */ | ||
84 | |||
85 | static struct i2o_driver i2o_config_driver = { | ||
86 | .name = OSM_NAME | ||
87 | }; | ||
88 | |||
89 | static int i2o_cfg_getiops(unsigned long arg) | ||
90 | { | ||
91 | struct i2o_controller *c; | ||
92 | u8 __user *user_iop_table = (void __user *)arg; | ||
93 | u8 tmp[MAX_I2O_CONTROLLERS]; | ||
94 | int ret = 0; | ||
95 | |||
96 | memset(tmp, 0, MAX_I2O_CONTROLLERS); | ||
97 | |||
98 | list_for_each_entry(c, &i2o_controllers, list) | ||
99 | tmp[c->unit] = 1; | ||
100 | |||
101 | if (copy_to_user(user_iop_table, tmp, MAX_I2O_CONTROLLERS)) | ||
102 | ret = -EFAULT; | ||
103 | |||
104 | return ret; | ||
105 | }; | ||
106 | |||
107 | static int i2o_cfg_gethrt(unsigned long arg) | ||
108 | { | ||
109 | struct i2o_controller *c; | ||
110 | struct i2o_cmd_hrtlct __user *cmd = (struct i2o_cmd_hrtlct __user *)arg; | ||
111 | struct i2o_cmd_hrtlct kcmd; | ||
112 | i2o_hrt *hrt; | ||
113 | int len; | ||
114 | u32 reslen; | ||
115 | int ret = 0; | ||
116 | |||
117 | if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct))) | ||
118 | return -EFAULT; | ||
119 | |||
120 | if (get_user(reslen, kcmd.reslen) < 0) | ||
121 | return -EFAULT; | ||
122 | |||
123 | if (kcmd.resbuf == NULL) | ||
124 | return -EFAULT; | ||
125 | |||
126 | c = i2o_find_iop(kcmd.iop); | ||
127 | if (!c) | ||
128 | return -ENXIO; | ||
129 | |||
130 | hrt = (i2o_hrt *) c->hrt.virt; | ||
131 | |||
132 | len = 8 + ((hrt->entry_len * hrt->num_entries) << 2); | ||
133 | |||
134 | /* We did a get user...so assuming mem is ok...is this bad? */ | ||
135 | put_user(len, kcmd.reslen); | ||
136 | if (len > reslen) | ||
137 | ret = -ENOBUFS; | ||
138 | if (copy_to_user(kcmd.resbuf, (void *)hrt, len)) | ||
139 | ret = -EFAULT; | ||
140 | |||
141 | return ret; | ||
142 | }; | ||
143 | |||
144 | static int i2o_cfg_getlct(unsigned long arg) | ||
145 | { | ||
146 | struct i2o_controller *c; | ||
147 | struct i2o_cmd_hrtlct __user *cmd = (struct i2o_cmd_hrtlct __user *)arg; | ||
148 | struct i2o_cmd_hrtlct kcmd; | ||
149 | i2o_lct *lct; | ||
150 | int len; | ||
151 | int ret = 0; | ||
152 | u32 reslen; | ||
153 | |||
154 | if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct))) | ||
155 | return -EFAULT; | ||
156 | |||
157 | if (get_user(reslen, kcmd.reslen) < 0) | ||
158 | return -EFAULT; | ||
159 | |||
160 | if (kcmd.resbuf == NULL) | ||
161 | return -EFAULT; | ||
162 | |||
163 | c = i2o_find_iop(kcmd.iop); | ||
164 | if (!c) | ||
165 | return -ENXIO; | ||
166 | |||
167 | lct = (i2o_lct *) c->lct; | ||
168 | |||
169 | len = (unsigned int)lct->table_size << 2; | ||
170 | put_user(len, kcmd.reslen); | ||
171 | if (len > reslen) | ||
172 | ret = -ENOBUFS; | ||
173 | else if (copy_to_user(kcmd.resbuf, lct, len)) | ||
174 | ret = -EFAULT; | ||
175 | |||
176 | return ret; | ||
177 | }; | ||
178 | |||
179 | static int i2o_cfg_parms(unsigned long arg, unsigned int type) | ||
180 | { | ||
181 | int ret = 0; | ||
182 | struct i2o_controller *c; | ||
183 | struct i2o_device *dev; | ||
184 | struct i2o_cmd_psetget __user *cmd = | ||
185 | (struct i2o_cmd_psetget __user *)arg; | ||
186 | struct i2o_cmd_psetget kcmd; | ||
187 | u32 reslen; | ||
188 | u8 *ops; | ||
189 | u8 *res; | ||
190 | int len = 0; | ||
191 | |||
192 | u32 i2o_cmd = (type == I2OPARMGET ? | ||
193 | I2O_CMD_UTIL_PARAMS_GET : I2O_CMD_UTIL_PARAMS_SET); | ||
194 | |||
195 | if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_psetget))) | ||
196 | return -EFAULT; | ||
197 | |||
198 | if (get_user(reslen, kcmd.reslen)) | ||
199 | return -EFAULT; | ||
200 | |||
201 | c = i2o_find_iop(kcmd.iop); | ||
202 | if (!c) | ||
203 | return -ENXIO; | ||
204 | |||
205 | dev = i2o_iop_find_device(c, kcmd.tid); | ||
206 | if (!dev) | ||
207 | return -ENXIO; | ||
208 | |||
209 | ops = (u8 *) kmalloc(kcmd.oplen, GFP_KERNEL); | ||
210 | if (!ops) | ||
211 | return -ENOMEM; | ||
212 | |||
213 | if (copy_from_user(ops, kcmd.opbuf, kcmd.oplen)) { | ||
214 | kfree(ops); | ||
215 | return -EFAULT; | ||
216 | } | ||
217 | |||
218 | /* | ||
219 | * It's possible to have a _very_ large table | ||
220 | * and that the user asks for all of it at once... | ||
221 | */ | ||
222 | res = (u8 *) kmalloc(65536, GFP_KERNEL); | ||
223 | if (!res) { | ||
224 | kfree(ops); | ||
225 | return -ENOMEM; | ||
226 | } | ||
227 | |||
228 | len = i2o_parm_issue(dev, i2o_cmd, ops, kcmd.oplen, res, 65536); | ||
229 | kfree(ops); | ||
230 | |||
231 | if (len < 0) { | ||
232 | kfree(res); | ||
233 | return -EAGAIN; | ||
234 | } | ||
235 | |||
236 | put_user(len, kcmd.reslen); | ||
237 | if (len > reslen) | ||
238 | ret = -ENOBUFS; | ||
239 | else if (copy_to_user(kcmd.resbuf, res, len)) | ||
240 | ret = -EFAULT; | ||
241 | |||
242 | kfree(res); | ||
243 | |||
244 | return ret; | ||
245 | }; | ||
246 | |||
247 | static int i2o_cfg_swdl(unsigned long arg) | ||
248 | { | ||
249 | struct i2o_sw_xfer kxfer; | ||
250 | struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; | ||
251 | unsigned char maxfrag = 0, curfrag = 1; | ||
252 | struct i2o_dma buffer; | ||
253 | struct i2o_message __iomem *msg; | ||
254 | u32 m; | ||
255 | unsigned int status = 0, swlen = 0, fragsize = 8192; | ||
256 | struct i2o_controller *c; | ||
257 | |||
258 | if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer))) | ||
259 | return -EFAULT; | ||
260 | |||
261 | if (get_user(swlen, kxfer.swlen) < 0) | ||
262 | return -EFAULT; | ||
263 | |||
264 | if (get_user(maxfrag, kxfer.maxfrag) < 0) | ||
265 | return -EFAULT; | ||
266 | |||
267 | if (get_user(curfrag, kxfer.curfrag) < 0) | ||
268 | return -EFAULT; | ||
269 | |||
270 | if (curfrag == maxfrag) | ||
271 | fragsize = swlen - (maxfrag - 1) * 8192; | ||
272 | |||
273 | if (!kxfer.buf || !access_ok(VERIFY_READ, kxfer.buf, fragsize)) | ||
274 | return -EFAULT; | ||
275 | |||
276 | c = i2o_find_iop(kxfer.iop); | ||
277 | if (!c) | ||
278 | return -ENXIO; | ||
279 | |||
280 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
281 | if (m == I2O_QUEUE_EMPTY) | ||
282 | return -EBUSY; | ||
283 | |||
284 | if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) { | ||
285 | i2o_msg_nop(c, m); | ||
286 | return -ENOMEM; | ||
287 | } | ||
288 | |||
289 | __copy_from_user(buffer.virt, kxfer.buf, fragsize); | ||
290 | |||
291 | writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]); | ||
292 | writel(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 | ADAPTER_TID, | ||
293 | &msg->u.head[1]); | ||
294 | writel(i2o_config_driver.context, &msg->u.head[2]); | ||
295 | writel(0, &msg->u.head[3]); | ||
296 | writel((((u32) kxfer.flags) << 24) | (((u32) kxfer.sw_type) << 16) | | ||
297 | (((u32) maxfrag) << 8) | (((u32) curfrag)), &msg->body[0]); | ||
298 | writel(swlen, &msg->body[1]); | ||
299 | writel(kxfer.sw_id, &msg->body[2]); | ||
300 | writel(0xD0000000 | fragsize, &msg->body[3]); | ||
301 | writel(buffer.phys, &msg->body[4]); | ||
302 | |||
303 | osm_debug("swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); | ||
304 | status = i2o_msg_post_wait_mem(c, m, 60, &buffer); | ||
305 | |||
306 | if (status != -ETIMEDOUT) | ||
307 | i2o_dma_free(&c->pdev->dev, &buffer); | ||
308 | |||
309 | if (status != I2O_POST_WAIT_OK) { | ||
310 | // it fails if you try and send frags out of order | ||
311 | // and for some yet unknown reasons too | ||
312 | osm_info("swdl failed, DetailedStatus = %d\n", status); | ||
313 | return status; | ||
314 | } | ||
315 | |||
316 | return 0; | ||
317 | }; | ||
318 | |||
319 | static int i2o_cfg_swul(unsigned long arg) | ||
320 | { | ||
321 | struct i2o_sw_xfer kxfer; | ||
322 | struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; | ||
323 | unsigned char maxfrag = 0, curfrag = 1; | ||
324 | struct i2o_dma buffer; | ||
325 | struct i2o_message __iomem *msg; | ||
326 | u32 m; | ||
327 | unsigned int status = 0, swlen = 0, fragsize = 8192; | ||
328 | struct i2o_controller *c; | ||
329 | int ret = 0; | ||
330 | |||
331 | if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer))) | ||
332 | goto return_fault; | ||
333 | |||
334 | if (get_user(swlen, kxfer.swlen) < 0) | ||
335 | goto return_fault; | ||
336 | |||
337 | if (get_user(maxfrag, kxfer.maxfrag) < 0) | ||
338 | goto return_fault; | ||
339 | |||
340 | if (get_user(curfrag, kxfer.curfrag) < 0) | ||
341 | goto return_fault; | ||
342 | |||
343 | if (curfrag == maxfrag) | ||
344 | fragsize = swlen - (maxfrag - 1) * 8192; | ||
345 | |||
346 | if (!kxfer.buf) | ||
347 | goto return_fault; | ||
348 | |||
349 | c = i2o_find_iop(kxfer.iop); | ||
350 | if (!c) | ||
351 | return -ENXIO; | ||
352 | |||
353 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
354 | if (m == I2O_QUEUE_EMPTY) | ||
355 | return -EBUSY; | ||
356 | |||
357 | if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) { | ||
358 | i2o_msg_nop(c, m); | ||
359 | return -ENOMEM; | ||
360 | } | ||
361 | |||
362 | writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]); | ||
363 | writel(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID, | ||
364 | &msg->u.head[1]); | ||
365 | writel(i2o_config_driver.context, &msg->u.head[2]); | ||
366 | writel(0, &msg->u.head[3]); | ||
367 | writel((u32) kxfer.flags << 24 | (u32) kxfer. | ||
368 | sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag, | ||
369 | &msg->body[0]); | ||
370 | writel(swlen, &msg->body[1]); | ||
371 | writel(kxfer.sw_id, &msg->body[2]); | ||
372 | writel(0xD0000000 | fragsize, &msg->body[3]); | ||
373 | writel(buffer.phys, &msg->body[4]); | ||
374 | |||
375 | osm_debug("swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); | ||
376 | status = i2o_msg_post_wait_mem(c, m, 60, &buffer); | ||
377 | |||
378 | if (status != I2O_POST_WAIT_OK) { | ||
379 | if (status != -ETIMEDOUT) | ||
380 | i2o_dma_free(&c->pdev->dev, &buffer); | ||
381 | |||
382 | osm_info("swul failed, DetailedStatus = %d\n", status); | ||
383 | return status; | ||
384 | } | ||
385 | |||
386 | if (copy_to_user(kxfer.buf, buffer.virt, fragsize)) | ||
387 | ret = -EFAULT; | ||
388 | |||
389 | i2o_dma_free(&c->pdev->dev, &buffer); | ||
390 | |||
391 | return_ret: | ||
392 | return ret; | ||
393 | return_fault: | ||
394 | ret = -EFAULT; | ||
395 | goto return_ret; | ||
396 | }; | ||
397 | |||
398 | static int i2o_cfg_swdel(unsigned long arg) | ||
399 | { | ||
400 | struct i2o_controller *c; | ||
401 | struct i2o_sw_xfer kxfer; | ||
402 | struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; | ||
403 | struct i2o_message __iomem *msg; | ||
404 | u32 m; | ||
405 | unsigned int swlen; | ||
406 | int token; | ||
407 | |||
408 | if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer))) | ||
409 | return -EFAULT; | ||
410 | |||
411 | if (get_user(swlen, kxfer.swlen) < 0) | ||
412 | return -EFAULT; | ||
413 | |||
414 | c = i2o_find_iop(kxfer.iop); | ||
415 | if (!c) | ||
416 | return -ENXIO; | ||
417 | |||
418 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
419 | if (m == I2O_QUEUE_EMPTY) | ||
420 | return -EBUSY; | ||
421 | |||
422 | writel(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); | ||
423 | writel(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID, | ||
424 | &msg->u.head[1]); | ||
425 | writel(i2o_config_driver.context, &msg->u.head[2]); | ||
426 | writel(0, &msg->u.head[3]); | ||
427 | writel((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16, | ||
428 | &msg->body[0]); | ||
429 | writel(swlen, &msg->body[1]); | ||
430 | writel(kxfer.sw_id, &msg->body[2]); | ||
431 | |||
432 | token = i2o_msg_post_wait(c, m, 10); | ||
433 | |||
434 | if (token != I2O_POST_WAIT_OK) { | ||
435 | osm_info("swdel failed, DetailedStatus = %d\n", token); | ||
436 | return -ETIMEDOUT; | ||
437 | } | ||
438 | |||
439 | return 0; | ||
440 | }; | ||
441 | |||
442 | static int i2o_cfg_validate(unsigned long arg) | ||
443 | { | ||
444 | int token; | ||
445 | int iop = (int)arg; | ||
446 | struct i2o_message __iomem *msg; | ||
447 | u32 m; | ||
448 | struct i2o_controller *c; | ||
449 | |||
450 | c = i2o_find_iop(iop); | ||
451 | if (!c) | ||
452 | return -ENXIO; | ||
453 | |||
454 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
455 | if (m == I2O_QUEUE_EMPTY) | ||
456 | return -EBUSY; | ||
457 | |||
458 | writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); | ||
459 | writel(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop, | ||
460 | &msg->u.head[1]); | ||
461 | writel(i2o_config_driver.context, &msg->u.head[2]); | ||
462 | writel(0, &msg->u.head[3]); | ||
463 | |||
464 | token = i2o_msg_post_wait(c, m, 10); | ||
465 | |||
466 | if (token != I2O_POST_WAIT_OK) { | ||
467 | osm_info("Can't validate configuration, ErrorStatus = %d\n", | ||
468 | token); | ||
469 | return -ETIMEDOUT; | ||
470 | } | ||
471 | |||
472 | return 0; | ||
473 | }; | ||
474 | |||
475 | static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp) | ||
476 | { | ||
477 | struct i2o_message __iomem *msg; | ||
478 | u32 m; | ||
479 | struct i2o_evt_id __user *pdesc = (struct i2o_evt_id __user *)arg; | ||
480 | struct i2o_evt_id kdesc; | ||
481 | struct i2o_controller *c; | ||
482 | struct i2o_device *d; | ||
483 | |||
484 | if (copy_from_user(&kdesc, pdesc, sizeof(struct i2o_evt_id))) | ||
485 | return -EFAULT; | ||
486 | |||
487 | /* IOP exists? */ | ||
488 | c = i2o_find_iop(kdesc.iop); | ||
489 | if (!c) | ||
490 | return -ENXIO; | ||
491 | |||
492 | /* Device exists? */ | ||
493 | d = i2o_iop_find_device(c, kdesc.tid); | ||
494 | if (!d) | ||
495 | return -ENODEV; | ||
496 | |||
497 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
498 | if (m == I2O_QUEUE_EMPTY) | ||
499 | return -EBUSY; | ||
500 | |||
501 | writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); | ||
502 | writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | kdesc.tid, | ||
503 | &msg->u.head[1]); | ||
504 | writel(i2o_config_driver.context, &msg->u.head[2]); | ||
505 | writel(i2o_cntxt_list_add(c, fp->private_data), &msg->u.head[3]); | ||
506 | writel(kdesc.evt_mask, &msg->body[0]); | ||
507 | |||
508 | i2o_msg_post(c, m); | ||
509 | |||
510 | return 0; | ||
511 | } | ||
512 | |||
513 | static int i2o_cfg_evt_get(unsigned long arg, struct file *fp) | ||
514 | { | ||
515 | struct i2o_cfg_info *p = NULL; | ||
516 | struct i2o_evt_get __user *uget = (struct i2o_evt_get __user *)arg; | ||
517 | struct i2o_evt_get kget; | ||
518 | unsigned long flags; | ||
519 | |||
520 | for (p = open_files; p; p = p->next) | ||
521 | if (p->q_id == (ulong) fp->private_data) | ||
522 | break; | ||
523 | |||
524 | if (!p->q_len) | ||
525 | return -ENOENT; | ||
526 | |||
527 | memcpy(&kget.info, &p->event_q[p->q_out], sizeof(struct i2o_evt_info)); | ||
528 | MODINC(p->q_out, I2O_EVT_Q_LEN); | ||
529 | spin_lock_irqsave(&i2o_config_lock, flags); | ||
530 | p->q_len--; | ||
531 | kget.pending = p->q_len; | ||
532 | kget.lost = p->q_lost; | ||
533 | spin_unlock_irqrestore(&i2o_config_lock, flags); | ||
534 | |||
535 | if (copy_to_user(uget, &kget, sizeof(struct i2o_evt_get))) | ||
536 | return -EFAULT; | ||
537 | return 0; | ||
538 | } | ||
539 | |||
540 | #ifdef CONFIG_COMPAT | ||
541 | static int i2o_cfg_passthru32(unsigned fd, unsigned cmnd, unsigned long arg, | ||
542 | struct file *file) | ||
543 | { | ||
544 | struct i2o_cmd_passthru32 __user *cmd; | ||
545 | struct i2o_controller *c; | ||
546 | u32 __user *user_msg; | ||
547 | u32 *reply = NULL; | ||
548 | u32 __user *user_reply = NULL; | ||
549 | u32 size = 0; | ||
550 | u32 reply_size = 0; | ||
551 | u32 rcode = 0; | ||
552 | struct i2o_dma sg_list[SG_TABLESIZE]; | ||
553 | u32 sg_offset = 0; | ||
554 | u32 sg_count = 0; | ||
555 | u32 i = 0; | ||
556 | i2o_status_block *sb; | ||
557 | struct i2o_message *msg; | ||
558 | u32 m; | ||
559 | unsigned int iop; | ||
560 | |||
561 | cmd = (struct i2o_cmd_passthru32 __user *)arg; | ||
562 | |||
563 | if (get_user(iop, &cmd->iop) || get_user(i, &cmd->msg)) | ||
564 | return -EFAULT; | ||
565 | |||
566 | user_msg = compat_ptr(i); | ||
567 | |||
568 | c = i2o_find_iop(iop); | ||
569 | if (!c) { | ||
570 | osm_debug("controller %d not found\n", iop); | ||
571 | return -ENXIO; | ||
572 | } | ||
573 | |||
574 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
575 | |||
576 | sb = c->status_block.virt; | ||
577 | |||
578 | if (get_user(size, &user_msg[0])) { | ||
579 | osm_warn("unable to get size!\n"); | ||
580 | return -EFAULT; | ||
581 | } | ||
582 | size = size >> 16; | ||
583 | |||
584 | if (size > sb->inbound_frame_size) { | ||
585 | osm_warn("size of message > inbound_frame_size"); | ||
586 | return -EFAULT; | ||
587 | } | ||
588 | |||
589 | user_reply = &user_msg[size]; | ||
590 | |||
591 | size <<= 2; // Convert to bytes | ||
592 | |||
593 | /* Copy in the user's I2O command */ | ||
594 | if (copy_from_user(msg, user_msg, size)) { | ||
595 | osm_warn("unable to copy user message\n"); | ||
596 | return -EFAULT; | ||
597 | } | ||
598 | i2o_dump_message(msg); | ||
599 | |||
600 | if (get_user(reply_size, &user_reply[0]) < 0) | ||
601 | return -EFAULT; | ||
602 | |||
603 | reply_size >>= 16; | ||
604 | reply_size <<= 2; | ||
605 | |||
606 | reply = kmalloc(reply_size, GFP_KERNEL); | ||
607 | if (!reply) { | ||
608 | printk(KERN_WARNING "%s: Could not allocate reply buffer\n", | ||
609 | c->name); | ||
610 | return -ENOMEM; | ||
611 | } | ||
612 | memset(reply, 0, reply_size); | ||
613 | |||
614 | sg_offset = (msg->u.head[0] >> 4) & 0x0f; | ||
615 | |||
616 | writel(i2o_config_driver.context, &msg->u.s.icntxt); | ||
617 | writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt); | ||
618 | |||
619 | memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); | ||
620 | if (sg_offset) { | ||
621 | struct sg_simple_element *sg; | ||
622 | |||
623 | if (sg_offset * 4 >= size) { | ||
624 | rcode = -EFAULT; | ||
625 | goto cleanup; | ||
626 | } | ||
627 | // TODO 64bit fix | ||
628 | sg = (struct sg_simple_element *)((&msg->u.head[0]) + | ||
629 | sg_offset); | ||
630 | sg_count = | ||
631 | (size - sg_offset * 4) / sizeof(struct sg_simple_element); | ||
632 | if (sg_count > SG_TABLESIZE) { | ||
633 | printk(KERN_DEBUG "%s:IOCTL SG List too large (%u)\n", | ||
634 | c->name, sg_count); | ||
635 | kfree(reply); | ||
636 | return -EINVAL; | ||
637 | } | ||
638 | |||
639 | for (i = 0; i < sg_count; i++) { | ||
640 | int sg_size; | ||
641 | struct i2o_dma *p; | ||
642 | |||
643 | if (!(sg[i].flag_count & 0x10000000 | ||
644 | /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT */ )) { | ||
645 | printk(KERN_DEBUG | ||
646 | "%s:Bad SG element %d - not simple (%x)\n", | ||
647 | c->name, i, sg[i].flag_count); | ||
648 | rcode = -EINVAL; | ||
649 | goto cleanup; | ||
650 | } | ||
651 | sg_size = sg[i].flag_count & 0xffffff; | ||
652 | p = &(sg_list[i]); | ||
653 | /* Allocate memory for the transfer */ | ||
654 | if (i2o_dma_alloc | ||
655 | (&c->pdev->dev, p, sg_size, | ||
656 | PCI_DMA_BIDIRECTIONAL)) { | ||
657 | printk(KERN_DEBUG | ||
658 | "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", | ||
659 | c->name, sg_size, i, sg_count); | ||
660 | rcode = -ENOMEM; | ||
661 | goto cleanup; | ||
662 | } | ||
663 | /* Copy in the user's SG buffer if necessary */ | ||
664 | if (sg[i]. | ||
665 | flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) { | ||
666 | // TODO 64bit fix | ||
667 | if (copy_from_user | ||
668 | (p->virt, (void __user *)(unsigned long)sg[i].addr_bus, | ||
669 | sg_size)) { | ||
670 | printk(KERN_DEBUG | ||
671 | "%s: Could not copy SG buf %d FROM user\n", | ||
672 | c->name, i); | ||
673 | rcode = -EFAULT; | ||
674 | goto cleanup; | ||
675 | } | ||
676 | } | ||
677 | //TODO 64bit fix | ||
678 | sg[i].addr_bus = (u32) p->phys; | ||
679 | } | ||
680 | } | ||
681 | |||
682 | rcode = i2o_msg_post_wait(c, m, 60); | ||
683 | if (rcode) | ||
684 | goto cleanup; | ||
685 | |||
686 | if (sg_offset) { | ||
687 | u32 msg[128]; | ||
688 | /* Copy back the Scatter Gather buffers back to user space */ | ||
689 | u32 j; | ||
690 | // TODO 64bit fix | ||
691 | struct sg_simple_element *sg; | ||
692 | int sg_size; | ||
693 | |||
694 | // re-acquire the original message to handle correctly the sg copy operation | ||
695 | memset(&msg, 0, MSG_FRAME_SIZE * 4); | ||
696 | // get user msg size in u32s | ||
697 | if (get_user(size, &user_msg[0])) { | ||
698 | rcode = -EFAULT; | ||
699 | goto cleanup; | ||
700 | } | ||
701 | size = size >> 16; | ||
702 | size *= 4; | ||
703 | /* Copy in the user's I2O command */ | ||
704 | if (copy_from_user(msg, user_msg, size)) { | ||
705 | rcode = -EFAULT; | ||
706 | goto cleanup; | ||
707 | } | ||
708 | sg_count = | ||
709 | (size - sg_offset * 4) / sizeof(struct sg_simple_element); | ||
710 | |||
711 | // TODO 64bit fix | ||
712 | sg = (struct sg_simple_element *)(msg + sg_offset); | ||
713 | for (j = 0; j < sg_count; j++) { | ||
714 | /* Copy out the SG list to user's buffer if necessary */ | ||
715 | if (! | ||
716 | (sg[j]. | ||
717 | flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR */ )) { | ||
718 | sg_size = sg[j].flag_count & 0xffffff; | ||
719 | // TODO 64bit fix | ||
720 | if (copy_to_user | ||
721 | ((void __user *)(u64) sg[j].addr_bus, | ||
722 | sg_list[j].virt, sg_size)) { | ||
723 | printk(KERN_WARNING | ||
724 | "%s: Could not copy %p TO user %x\n", | ||
725 | c->name, sg_list[j].virt, | ||
726 | sg[j].addr_bus); | ||
727 | rcode = -EFAULT; | ||
728 | goto cleanup; | ||
729 | } | ||
730 | } | ||
731 | } | ||
732 | } | ||
733 | |||
734 | /* Copy back the reply to user space */ | ||
735 | if (reply_size) { | ||
736 | // we wrote our own values for context - now restore the user supplied ones | ||
737 | if (copy_from_user(reply + 2, user_msg + 2, sizeof(u32) * 2)) { | ||
738 | printk(KERN_WARNING | ||
739 | "%s: Could not copy message context FROM user\n", | ||
740 | c->name); | ||
741 | rcode = -EFAULT; | ||
742 | } | ||
743 | if (copy_to_user(user_reply, reply, reply_size)) { | ||
744 | printk(KERN_WARNING | ||
745 | "%s: Could not copy reply TO user\n", c->name); | ||
746 | rcode = -EFAULT; | ||
747 | } | ||
748 | } | ||
749 | |||
750 | cleanup: | ||
751 | kfree(reply); | ||
752 | return rcode; | ||
753 | } | ||
754 | |||
755 | #else | ||
756 | |||
757 | static int i2o_cfg_passthru(unsigned long arg) | ||
758 | { | ||
759 | struct i2o_cmd_passthru __user *cmd = | ||
760 | (struct i2o_cmd_passthru __user *)arg; | ||
761 | struct i2o_controller *c; | ||
762 | u32 __user *user_msg; | ||
763 | u32 *reply = NULL; | ||
764 | u32 __user *user_reply = NULL; | ||
765 | u32 size = 0; | ||
766 | u32 reply_size = 0; | ||
767 | u32 rcode = 0; | ||
768 | void *sg_list[SG_TABLESIZE]; | ||
769 | u32 sg_offset = 0; | ||
770 | u32 sg_count = 0; | ||
771 | int sg_index = 0; | ||
772 | u32 i = 0; | ||
773 | void *p = NULL; | ||
774 | i2o_status_block *sb; | ||
775 | struct i2o_message __iomem *msg; | ||
776 | u32 m; | ||
777 | unsigned int iop; | ||
778 | |||
779 | if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg)) | ||
780 | return -EFAULT; | ||
781 | |||
782 | c = i2o_find_iop(iop); | ||
783 | if (!c) { | ||
784 | osm_warn("controller %d not found\n", iop); | ||
785 | return -ENXIO; | ||
786 | } | ||
787 | |||
788 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
789 | |||
790 | sb = c->status_block.virt; | ||
791 | |||
792 | if (get_user(size, &user_msg[0])) | ||
793 | return -EFAULT; | ||
794 | size = size >> 16; | ||
795 | |||
796 | if (size > sb->inbound_frame_size) { | ||
797 | osm_warn("size of message > inbound_frame_size"); | ||
798 | return -EFAULT; | ||
799 | } | ||
800 | |||
801 | user_reply = &user_msg[size]; | ||
802 | |||
803 | size <<= 2; // Convert to bytes | ||
804 | |||
805 | /* Copy in the user's I2O command */ | ||
806 | if (copy_from_user(msg, user_msg, size)) | ||
807 | return -EFAULT; | ||
808 | |||
809 | if (get_user(reply_size, &user_reply[0]) < 0) | ||
810 | return -EFAULT; | ||
811 | |||
812 | reply_size >>= 16; | ||
813 | reply_size <<= 2; | ||
814 | |||
815 | reply = kmalloc(reply_size, GFP_KERNEL); | ||
816 | if (!reply) { | ||
817 | printk(KERN_WARNING "%s: Could not allocate reply buffer\n", | ||
818 | c->name); | ||
819 | return -ENOMEM; | ||
820 | } | ||
821 | memset(reply, 0, reply_size); | ||
822 | |||
823 | sg_offset = (msg->u.head[0] >> 4) & 0x0f; | ||
824 | |||
825 | writel(i2o_config_driver.context, &msg->u.s.icntxt); | ||
826 | writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt); | ||
827 | |||
828 | memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); | ||
829 | if (sg_offset) { | ||
830 | struct sg_simple_element *sg; | ||
831 | |||
832 | if (sg_offset * 4 >= size) { | ||
833 | rcode = -EFAULT; | ||
834 | goto cleanup; | ||
835 | } | ||
836 | // TODO 64bit fix | ||
837 | sg = (struct sg_simple_element *)((&msg->u.head[0]) + | ||
838 | sg_offset); | ||
839 | sg_count = | ||
840 | (size - sg_offset * 4) / sizeof(struct sg_simple_element); | ||
841 | if (sg_count > SG_TABLESIZE) { | ||
842 | printk(KERN_DEBUG "%s:IOCTL SG List too large (%u)\n", | ||
843 | c->name, sg_count); | ||
844 | kfree(reply); | ||
845 | return -EINVAL; | ||
846 | } | ||
847 | |||
848 | for (i = 0; i < sg_count; i++) { | ||
849 | int sg_size; | ||
850 | |||
851 | if (!(sg[i].flag_count & 0x10000000 | ||
852 | /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT */ )) { | ||
853 | printk(KERN_DEBUG | ||
854 | "%s:Bad SG element %d - not simple (%x)\n", | ||
855 | c->name, i, sg[i].flag_count); | ||
856 | rcode = -EINVAL; | ||
857 | goto cleanup; | ||
858 | } | ||
859 | sg_size = sg[i].flag_count & 0xffffff; | ||
860 | /* Allocate memory for the transfer */ | ||
861 | p = kmalloc(sg_size, GFP_KERNEL); | ||
862 | if (!p) { | ||
863 | printk(KERN_DEBUG | ||
864 | "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", | ||
865 | c->name, sg_size, i, sg_count); | ||
866 | rcode = -ENOMEM; | ||
867 | goto cleanup; | ||
868 | } | ||
869 | sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame. | ||
870 | /* Copy in the user's SG buffer if necessary */ | ||
871 | if (sg[i]. | ||
872 | flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) { | ||
873 | // TODO 64bit fix | ||
874 | if (copy_from_user | ||
875 | (p, (void __user *)sg[i].addr_bus, | ||
876 | sg_size)) { | ||
877 | printk(KERN_DEBUG | ||
878 | "%s: Could not copy SG buf %d FROM user\n", | ||
879 | c->name, i); | ||
880 | rcode = -EFAULT; | ||
881 | goto cleanup; | ||
882 | } | ||
883 | } | ||
884 | //TODO 64bit fix | ||
885 | sg[i].addr_bus = virt_to_bus(p); | ||
886 | } | ||
887 | } | ||
888 | |||
889 | rcode = i2o_msg_post_wait(c, m, 60); | ||
890 | if (rcode) | ||
891 | goto cleanup; | ||
892 | |||
893 | if (sg_offset) { | ||
894 | u32 msg[128]; | ||
895 | /* Copy back the Scatter Gather buffers back to user space */ | ||
896 | u32 j; | ||
897 | // TODO 64bit fix | ||
898 | struct sg_simple_element *sg; | ||
899 | int sg_size; | ||
900 | |||
901 | // re-acquire the original message to handle correctly the sg copy operation | ||
902 | memset(&msg, 0, MSG_FRAME_SIZE * 4); | ||
903 | // get user msg size in u32s | ||
904 | if (get_user(size, &user_msg[0])) { | ||
905 | rcode = -EFAULT; | ||
906 | goto cleanup; | ||
907 | } | ||
908 | size = size >> 16; | ||
909 | size *= 4; | ||
910 | /* Copy in the user's I2O command */ | ||
911 | if (copy_from_user(msg, user_msg, size)) { | ||
912 | rcode = -EFAULT; | ||
913 | goto cleanup; | ||
914 | } | ||
915 | sg_count = | ||
916 | (size - sg_offset * 4) / sizeof(struct sg_simple_element); | ||
917 | |||
918 | // TODO 64bit fix | ||
919 | sg = (struct sg_simple_element *)(msg + sg_offset); | ||
920 | for (j = 0; j < sg_count; j++) { | ||
921 | /* Copy out the SG list to user's buffer if necessary */ | ||
922 | if (! | ||
923 | (sg[j]. | ||
924 | flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR */ )) { | ||
925 | sg_size = sg[j].flag_count & 0xffffff; | ||
926 | // TODO 64bit fix | ||
927 | if (copy_to_user | ||
928 | ((void __user *)sg[j].addr_bus, sg_list[j], | ||
929 | sg_size)) { | ||
930 | printk(KERN_WARNING | ||
931 | "%s: Could not copy %p TO user %x\n", | ||
932 | c->name, sg_list[j], | ||
933 | sg[j].addr_bus); | ||
934 | rcode = -EFAULT; | ||
935 | goto cleanup; | ||
936 | } | ||
937 | } | ||
938 | } | ||
939 | } | ||
940 | |||
941 | /* Copy back the reply to user space */ | ||
942 | if (reply_size) { | ||
943 | // we wrote our own values for context - now restore the user supplied ones | ||
944 | if (copy_from_user(reply + 2, user_msg + 2, sizeof(u32) * 2)) { | ||
945 | printk(KERN_WARNING | ||
946 | "%s: Could not copy message context FROM user\n", | ||
947 | c->name); | ||
948 | rcode = -EFAULT; | ||
949 | } | ||
950 | if (copy_to_user(user_reply, reply, reply_size)) { | ||
951 | printk(KERN_WARNING | ||
952 | "%s: Could not copy reply TO user\n", c->name); | ||
953 | rcode = -EFAULT; | ||
954 | } | ||
955 | } | ||
956 | |||
957 | cleanup: | ||
958 | kfree(reply); | ||
959 | return rcode; | ||
960 | } | ||
961 | #endif | ||
962 | |||
963 | /* | ||
964 | * IOCTL Handler | ||
965 | */ | ||
966 | static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, | ||
967 | unsigned long arg) | ||
968 | { | ||
969 | int ret; | ||
970 | |||
971 | switch (cmd) { | ||
972 | case I2OGETIOPS: | ||
973 | ret = i2o_cfg_getiops(arg); | ||
974 | break; | ||
975 | |||
976 | case I2OHRTGET: | ||
977 | ret = i2o_cfg_gethrt(arg); | ||
978 | break; | ||
979 | |||
980 | case I2OLCTGET: | ||
981 | ret = i2o_cfg_getlct(arg); | ||
982 | break; | ||
983 | |||
984 | case I2OPARMSET: | ||
985 | ret = i2o_cfg_parms(arg, I2OPARMSET); | ||
986 | break; | ||
987 | |||
988 | case I2OPARMGET: | ||
989 | ret = i2o_cfg_parms(arg, I2OPARMGET); | ||
990 | break; | ||
991 | |||
992 | case I2OSWDL: | ||
993 | ret = i2o_cfg_swdl(arg); | ||
994 | break; | ||
995 | |||
996 | case I2OSWUL: | ||
997 | ret = i2o_cfg_swul(arg); | ||
998 | break; | ||
999 | |||
1000 | case I2OSWDEL: | ||
1001 | ret = i2o_cfg_swdel(arg); | ||
1002 | break; | ||
1003 | |||
1004 | case I2OVALIDATE: | ||
1005 | ret = i2o_cfg_validate(arg); | ||
1006 | break; | ||
1007 | |||
1008 | case I2OEVTREG: | ||
1009 | ret = i2o_cfg_evt_reg(arg, fp); | ||
1010 | break; | ||
1011 | |||
1012 | case I2OEVTGET: | ||
1013 | ret = i2o_cfg_evt_get(arg, fp); | ||
1014 | break; | ||
1015 | |||
1016 | #ifndef CONFIG_COMPAT | ||
1017 | case I2OPASSTHRU: | ||
1018 | ret = i2o_cfg_passthru(arg); | ||
1019 | break; | ||
1020 | #endif | ||
1021 | |||
1022 | default: | ||
1023 | osm_debug("unknown ioctl called!\n"); | ||
1024 | ret = -EINVAL; | ||
1025 | } | ||
1026 | |||
1027 | return ret; | ||
1028 | } | ||
1029 | |||
1030 | static int cfg_open(struct inode *inode, struct file *file) | ||
1031 | { | ||
1032 | struct i2o_cfg_info *tmp = | ||
1033 | (struct i2o_cfg_info *)kmalloc(sizeof(struct i2o_cfg_info), | ||
1034 | GFP_KERNEL); | ||
1035 | unsigned long flags; | ||
1036 | |||
1037 | if (!tmp) | ||
1038 | return -ENOMEM; | ||
1039 | |||
1040 | file->private_data = (void *)(i2o_cfg_info_id++); | ||
1041 | tmp->fp = file; | ||
1042 | tmp->fasync = NULL; | ||
1043 | tmp->q_id = (ulong) file->private_data; | ||
1044 | tmp->q_len = 0; | ||
1045 | tmp->q_in = 0; | ||
1046 | tmp->q_out = 0; | ||
1047 | tmp->q_lost = 0; | ||
1048 | tmp->next = open_files; | ||
1049 | |||
1050 | spin_lock_irqsave(&i2o_config_lock, flags); | ||
1051 | open_files = tmp; | ||
1052 | spin_unlock_irqrestore(&i2o_config_lock, flags); | ||
1053 | |||
1054 | return 0; | ||
1055 | } | ||
1056 | |||
1057 | static int cfg_fasync(int fd, struct file *fp, int on) | ||
1058 | { | ||
1059 | ulong id = (ulong) fp->private_data; | ||
1060 | struct i2o_cfg_info *p; | ||
1061 | |||
1062 | for (p = open_files; p; p = p->next) | ||
1063 | if (p->q_id == id) | ||
1064 | break; | ||
1065 | |||
1066 | if (!p) | ||
1067 | return -EBADF; | ||
1068 | |||
1069 | return fasync_helper(fd, fp, on, &p->fasync); | ||
1070 | } | ||
1071 | |||
1072 | static int cfg_release(struct inode *inode, struct file *file) | ||
1073 | { | ||
1074 | ulong id = (ulong) file->private_data; | ||
1075 | struct i2o_cfg_info *p1, *p2; | ||
1076 | unsigned long flags; | ||
1077 | |||
1078 | lock_kernel(); | ||
1079 | p1 = p2 = NULL; | ||
1080 | |||
1081 | spin_lock_irqsave(&i2o_config_lock, flags); | ||
1082 | for (p1 = open_files; p1;) { | ||
1083 | if (p1->q_id == id) { | ||
1084 | |||
1085 | if (p1->fasync) | ||
1086 | cfg_fasync(-1, file, 0); | ||
1087 | if (p2) | ||
1088 | p2->next = p1->next; | ||
1089 | else | ||
1090 | open_files = p1->next; | ||
1091 | |||
1092 | kfree(p1); | ||
1093 | break; | ||
1094 | } | ||
1095 | p2 = p1; | ||
1096 | p1 = p1->next; | ||
1097 | } | ||
1098 | spin_unlock_irqrestore(&i2o_config_lock, flags); | ||
1099 | unlock_kernel(); | ||
1100 | |||
1101 | return 0; | ||
1102 | } | ||
1103 | |||
1104 | static struct file_operations config_fops = { | ||
1105 | .owner = THIS_MODULE, | ||
1106 | .llseek = no_llseek, | ||
1107 | .ioctl = i2o_cfg_ioctl, | ||
1108 | .open = cfg_open, | ||
1109 | .release = cfg_release, | ||
1110 | .fasync = cfg_fasync, | ||
1111 | }; | ||
1112 | |||
1113 | static struct miscdevice i2o_miscdev = { | ||
1114 | I2O_MINOR, | ||
1115 | "i2octl", | ||
1116 | &config_fops | ||
1117 | }; | ||
1118 | |||
1119 | static int __init i2o_config_init(void) | ||
1120 | { | ||
1121 | printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); | ||
1122 | |||
1123 | spin_lock_init(&i2o_config_lock); | ||
1124 | |||
1125 | if (misc_register(&i2o_miscdev) < 0) { | ||
1126 | osm_err("can't register device.\n"); | ||
1127 | return -EBUSY; | ||
1128 | } | ||
1129 | /* | ||
1130 | * Install our handler | ||
1131 | */ | ||
1132 | if (i2o_driver_register(&i2o_config_driver)) { | ||
1133 | osm_err("handler register failed.\n"); | ||
1134 | misc_deregister(&i2o_miscdev); | ||
1135 | return -EBUSY; | ||
1136 | } | ||
1137 | #ifdef CONFIG_COMPAT | ||
1138 | register_ioctl32_conversion(I2OPASSTHRU32, i2o_cfg_passthru32); | ||
1139 | register_ioctl32_conversion(I2OGETIOPS, (void *)sys_ioctl); | ||
1140 | #endif | ||
1141 | return 0; | ||
1142 | } | ||
1143 | |||
1144 | static void i2o_config_exit(void) | ||
1145 | { | ||
1146 | #ifdef CONFIG_COMPAT | ||
1147 | unregister_ioctl32_conversion(I2OPASSTHRU32); | ||
1148 | unregister_ioctl32_conversion(I2OGETIOPS); | ||
1149 | #endif | ||
1150 | misc_deregister(&i2o_miscdev); | ||
1151 | i2o_driver_unregister(&i2o_config_driver); | ||
1152 | } | ||
1153 | |||
1154 | MODULE_AUTHOR("Red Hat Software"); | ||
1155 | MODULE_LICENSE("GPL"); | ||
1156 | MODULE_DESCRIPTION(OSM_DESCRIPTION); | ||
1157 | MODULE_VERSION(OSM_VERSION); | ||
1158 | |||
1159 | module_init(i2o_config_init); | ||
1160 | module_exit(i2o_config_exit); | ||
diff --git a/drivers/message/i2o/i2o_lan.h b/drivers/message/i2o/i2o_lan.h new file mode 100644 index 000000000000..561d63304d7e --- /dev/null +++ b/drivers/message/i2o/i2o_lan.h | |||
@@ -0,0 +1,159 @@ | |||
1 | /* | ||
2 | * i2o_lan.h I2O LAN Class definitions | ||
3 | * | ||
4 | * I2O LAN CLASS OSM May 26th 2000 | ||
5 | * | ||
6 | * (C) Copyright 1999, 2000 University of Helsinki, | ||
7 | * Department of Computer Science | ||
8 | * | ||
9 | * This code is still under development / test. | ||
10 | * | ||
11 | * Author: Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI> | ||
12 | * Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI> | ||
13 | * Taneli Vähäkangas <Taneli.Vahakangas@cs.Helsinki.FI> | ||
14 | */ | ||
15 | |||
16 | #ifndef _I2O_LAN_H | ||
17 | #define _I2O_LAN_H | ||
18 | |||
19 | /* Default values for tunable parameters first */ | ||
20 | |||
21 | #define I2O_LAN_MAX_BUCKETS_OUT 96 | ||
22 | #define I2O_LAN_BUCKET_THRESH 18 /* 9 buckets in one message */ | ||
23 | #define I2O_LAN_RX_COPYBREAK 200 | ||
24 | #define I2O_LAN_TX_TIMEOUT (1*HZ) | ||
25 | #define I2O_LAN_TX_BATCH_MODE 2 /* 2=automatic, 1=on, 0=off */ | ||
26 | #define I2O_LAN_EVENT_MASK 0 /* 0=None, 0xFFC00002=All */ | ||
27 | |||
28 | /* LAN types */ | ||
29 | #define I2O_LAN_ETHERNET 0x0030 | ||
30 | #define I2O_LAN_100VG 0x0040 | ||
31 | #define I2O_LAN_TR 0x0050 | ||
32 | #define I2O_LAN_FDDI 0x0060 | ||
33 | #define I2O_LAN_FIBRE_CHANNEL 0x0070 | ||
34 | #define I2O_LAN_UNKNOWN 0x00000000 | ||
35 | |||
36 | /* Connector types */ | ||
37 | |||
38 | /* Ethernet */ | ||
39 | #define I2O_LAN_AUI (I2O_LAN_ETHERNET << 4) + 0x00000001 | ||
40 | #define I2O_LAN_10BASE5 (I2O_LAN_ETHERNET << 4) + 0x00000002 | ||
41 | #define I2O_LAN_FIORL (I2O_LAN_ETHERNET << 4) + 0x00000003 | ||
42 | #define I2O_LAN_10BASE2 (I2O_LAN_ETHERNET << 4) + 0x00000004 | ||
43 | #define I2O_LAN_10BROAD36 (I2O_LAN_ETHERNET << 4) + 0x00000005 | ||
44 | #define I2O_LAN_10BASE_T (I2O_LAN_ETHERNET << 4) + 0x00000006 | ||
45 | #define I2O_LAN_10BASE_FP (I2O_LAN_ETHERNET << 4) + 0x00000007 | ||
46 | #define I2O_LAN_10BASE_FB (I2O_LAN_ETHERNET << 4) + 0x00000008 | ||
47 | #define I2O_LAN_10BASE_FL (I2O_LAN_ETHERNET << 4) + 0x00000009 | ||
48 | #define I2O_LAN_100BASE_TX (I2O_LAN_ETHERNET << 4) + 0x0000000A | ||
49 | #define I2O_LAN_100BASE_FX (I2O_LAN_ETHERNET << 4) + 0x0000000B | ||
50 | #define I2O_LAN_100BASE_T4 (I2O_LAN_ETHERNET << 4) + 0x0000000C | ||
51 | #define I2O_LAN_1000BASE_SX (I2O_LAN_ETHERNET << 4) + 0x0000000D | ||
52 | #define I2O_LAN_1000BASE_LX (I2O_LAN_ETHERNET << 4) + 0x0000000E | ||
53 | #define I2O_LAN_1000BASE_CX (I2O_LAN_ETHERNET << 4) + 0x0000000F | ||
54 | #define I2O_LAN_1000BASE_T (I2O_LAN_ETHERNET << 4) + 0x00000010 | ||
55 | |||
56 | /* AnyLAN */ | ||
57 | #define I2O_LAN_100VG_ETHERNET (I2O_LAN_100VG << 4) + 0x00000001 | ||
58 | #define I2O_LAN_100VG_TR (I2O_LAN_100VG << 4) + 0x00000002 | ||
59 | |||
60 | /* Token Ring */ | ||
61 | #define I2O_LAN_4MBIT (I2O_LAN_TR << 4) + 0x00000001 | ||
62 | #define I2O_LAN_16MBIT (I2O_LAN_TR << 4) + 0x00000002 | ||
63 | |||
64 | /* FDDI */ | ||
65 | #define I2O_LAN_125MBAUD (I2O_LAN_FDDI << 4) + 0x00000001 | ||
66 | |||
67 | /* Fibre Channel */ | ||
68 | #define I2O_LAN_POINT_POINT (I2O_LAN_FIBRE_CHANNEL << 4) + 0x00000001 | ||
69 | #define I2O_LAN_ARB_LOOP (I2O_LAN_FIBRE_CHANNEL << 4) + 0x00000002 | ||
70 | #define I2O_LAN_PUBLIC_LOOP (I2O_LAN_FIBRE_CHANNEL << 4) + 0x00000003 | ||
71 | #define I2O_LAN_FABRIC (I2O_LAN_FIBRE_CHANNEL << 4) + 0x00000004 | ||
72 | |||
73 | #define I2O_LAN_EMULATION 0x00000F00 | ||
74 | #define I2O_LAN_OTHER 0x00000F01 | ||
75 | #define I2O_LAN_DEFAULT 0xFFFFFFFF | ||
76 | |||
77 | /* LAN class functions */ | ||
78 | |||
79 | #define LAN_PACKET_SEND 0x3B | ||
80 | #define LAN_SDU_SEND 0x3D | ||
81 | #define LAN_RECEIVE_POST 0x3E | ||
82 | #define LAN_RESET 0x35 | ||
83 | #define LAN_SUSPEND 0x37 | ||
84 | |||
85 | /* LAN DetailedStatusCode defines */ | ||
86 | #define I2O_LAN_DSC_SUCCESS 0x00 | ||
87 | #define I2O_LAN_DSC_DEVICE_FAILURE 0x01 | ||
88 | #define I2O_LAN_DSC_DESTINATION_NOT_FOUND 0x02 | ||
89 | #define I2O_LAN_DSC_TRANSMIT_ERROR 0x03 | ||
90 | #define I2O_LAN_DSC_TRANSMIT_ABORTED 0x04 | ||
91 | #define I2O_LAN_DSC_RECEIVE_ERROR 0x05 | ||
92 | #define I2O_LAN_DSC_RECEIVE_ABORTED 0x06 | ||
93 | #define I2O_LAN_DSC_DMA_ERROR 0x07 | ||
94 | #define I2O_LAN_DSC_BAD_PACKET_DETECTED 0x08 | ||
95 | #define I2O_LAN_DSC_OUT_OF_MEMORY 0x09 | ||
96 | #define I2O_LAN_DSC_BUCKET_OVERRUN 0x0A | ||
97 | #define I2O_LAN_DSC_IOP_INTERNAL_ERROR 0x0B | ||
98 | #define I2O_LAN_DSC_CANCELED 0x0C | ||
99 | #define I2O_LAN_DSC_INVALID_TRANSACTION_CONTEXT 0x0D | ||
100 | #define I2O_LAN_DSC_DEST_ADDRESS_DETECTED 0x0E | ||
101 | #define I2O_LAN_DSC_DEST_ADDRESS_OMITTED 0x0F | ||
102 | #define I2O_LAN_DSC_PARTIAL_PACKET_RETURNED 0x10 | ||
103 | #define I2O_LAN_DSC_SUSPENDED 0x11 | ||
104 | |||
105 | struct i2o_packet_info { | ||
106 | u32 offset : 24; | ||
107 | u32 flags : 8; | ||
108 | u32 len : 24; | ||
109 | u32 status : 8; | ||
110 | }; | ||
111 | |||
112 | struct i2o_bucket_descriptor { | ||
113 | u32 context; /* FIXME: 64bit support */ | ||
114 | struct i2o_packet_info packet_info[1]; | ||
115 | }; | ||
116 | |||
117 | /* Event Indicator Mask Flags for LAN OSM */ | ||
118 | |||
119 | #define I2O_LAN_EVT_LINK_DOWN 0x01 | ||
120 | #define I2O_LAN_EVT_LINK_UP 0x02 | ||
121 | #define I2O_LAN_EVT_MEDIA_CHANGE 0x04 | ||
122 | |||
123 | #include <linux/netdevice.h> | ||
124 | #include <linux/fddidevice.h> | ||
125 | |||
126 | struct i2o_lan_local { | ||
127 | u8 unit; | ||
128 | struct i2o_device *i2o_dev; | ||
129 | |||
130 | struct fddi_statistics stats; /* see also struct net_device_stats */ | ||
131 | unsigned short (*type_trans)(struct sk_buff *, struct net_device *); | ||
132 | atomic_t buckets_out; /* nbr of unused buckets on DDM */ | ||
133 | atomic_t tx_out; /* outstanding TXes */ | ||
134 | u8 tx_count; /* packets in one TX message frame */ | ||
135 | u16 tx_max_out; /* DDM's Tx queue len */ | ||
136 | u8 sgl_max; /* max SGLs in one message frame */ | ||
137 | u32 m; /* IOP address of the batch msg frame */ | ||
138 | |||
139 | struct work_struct i2o_batch_send_task; | ||
140 | int send_active; | ||
141 | struct sk_buff **i2o_fbl; /* Free bucket list (to reuse skbs) */ | ||
142 | int i2o_fbl_tail; | ||
143 | spinlock_t fbl_lock; | ||
144 | |||
145 | spinlock_t tx_lock; | ||
146 | |||
147 | u32 max_size_mc_table; /* max number of multicast addresses */ | ||
148 | |||
149 | /* LAN OSM configurable parameters are here: */ | ||
150 | |||
151 | u16 max_buckets_out; /* max nbr of buckets to send to DDM */ | ||
152 | u16 bucket_thresh; /* send more when this many used */ | ||
153 | u16 rx_copybreak; | ||
154 | |||
155 | u8 tx_batch_mode; /* Set when using batch mode sends */ | ||
156 | u32 i2o_event_mask; /* To turn on interesting event flags */ | ||
157 | }; | ||
158 | |||
159 | #endif /* _I2O_LAN_H */ | ||
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c new file mode 100644 index 000000000000..b176d0eeff7f --- /dev/null +++ b/drivers/message/i2o/i2o_proc.c | |||
@@ -0,0 +1,2112 @@ | |||
1 | /* | ||
2 | * procfs handler for Linux I2O subsystem | ||
3 | * | ||
4 | * (c) Copyright 1999 Deepak Saxena | ||
5 | * | ||
6 | * Originally written by Deepak Saxena(deepak@plexity.net) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | * | ||
13 | * This is an initial test release. The code is based on the design of the | ||
14 | * ide procfs system (drivers/block/ide-proc.c). Some code taken from | ||
15 | * i2o-core module by Alan Cox. | ||
16 | * | ||
17 | * DISCLAIMER: This code is still under development/test and may cause | ||
18 | * your system to behave unpredictably. Use at your own discretion. | ||
19 | * | ||
20 | * | ||
21 | * Fixes/additions: | ||
22 | * Juha Sievänen (Juha.Sievanen@cs.Helsinki.FI), | ||
23 | * Auvo Häkkinen (Auvo.Hakkinen@cs.Helsinki.FI) | ||
24 | * University of Helsinki, Department of Computer Science | ||
25 | * LAN entries | ||
26 | * Markus Lidel <Markus.Lidel@shadowconnect.com> | ||
27 | * Changes for new I2O API | ||
28 | */ | ||
29 | |||
30 | #define OSM_NAME "proc-osm" | ||
31 | #define OSM_VERSION "$Rev$" | ||
32 | #define OSM_DESCRIPTION "I2O ProcFS OSM" | ||
33 | |||
34 | #define I2O_MAX_MODULES 4 | ||
35 | // FIXME! | ||
36 | #define FMT_U64_HEX "0x%08x%08x" | ||
37 | #define U64_VAL(pu64) *((u32*)(pu64)+1), *((u32*)(pu64)) | ||
38 | |||
39 | #include <linux/types.h> | ||
40 | #include <linux/kernel.h> | ||
41 | #include <linux/pci.h> | ||
42 | #include <linux/i2o.h> | ||
43 | #include <linux/proc_fs.h> | ||
44 | #include <linux/seq_file.h> | ||
45 | #include <linux/init.h> | ||
46 | #include <linux/module.h> | ||
47 | #include <linux/errno.h> | ||
48 | #include <linux/spinlock.h> | ||
49 | #include <linux/workqueue.h> | ||
50 | |||
51 | #include <asm/io.h> | ||
52 | #include <asm/uaccess.h> | ||
53 | #include <asm/byteorder.h> | ||
54 | |||
55 | /* Structure used to define /proc entries */ | ||
56 | typedef struct _i2o_proc_entry_t { | ||
57 | char *name; /* entry name */ | ||
58 | mode_t mode; /* mode */ | ||
59 | struct file_operations *fops; /* open function */ | ||
60 | } i2o_proc_entry; | ||
61 | |||
62 | /* global I2O /proc/i2o entry */ | ||
63 | static struct proc_dir_entry *i2o_proc_dir_root; | ||
64 | |||
65 | /* proc OSM driver struct */ | ||
66 | static struct i2o_driver i2o_proc_driver = { | ||
67 | .name = OSM_NAME, | ||
68 | }; | ||
69 | |||
70 | static int print_serial_number(struct seq_file *seq, u8 * serialno, int max_len) | ||
71 | { | ||
72 | int i; | ||
73 | |||
74 | /* 19990419 -sralston | ||
75 | * The I2O v1.5 (and v2.0 so far) "official specification" | ||
76 | * got serial numbers WRONG! | ||
77 | * Apparently, and despite what Section 3.4.4 says and | ||
78 | * Figure 3-35 shows (pg 3-39 in the pdf doc), | ||
79 | * the convention / consensus seems to be: | ||
80 | * + First byte is SNFormat | ||
81 | * + Second byte is SNLen (but only if SNFormat==7 (?)) | ||
82 | * + (v2.0) SCSI+BS may use IEEE Registered (64 or 128 bit) format | ||
83 | */ | ||
84 | switch (serialno[0]) { | ||
85 | case I2O_SNFORMAT_BINARY: /* Binary */ | ||
86 | seq_printf(seq, "0x"); | ||
87 | for (i = 0; i < serialno[1]; i++) { | ||
88 | seq_printf(seq, "%02X", serialno[2 + i]); | ||
89 | } | ||
90 | break; | ||
91 | |||
92 | case I2O_SNFORMAT_ASCII: /* ASCII */ | ||
93 | if (serialno[1] < ' ') { /* printable or SNLen? */ | ||
94 | /* sanity */ | ||
95 | max_len = | ||
96 | (max_len < serialno[1]) ? max_len : serialno[1]; | ||
97 | serialno[1 + max_len] = '\0'; | ||
98 | |||
99 | /* just print it */ | ||
100 | seq_printf(seq, "%s", &serialno[2]); | ||
101 | } else { | ||
102 | /* print chars for specified length */ | ||
103 | for (i = 0; i < serialno[1]; i++) { | ||
104 | seq_printf(seq, "%c", serialno[2 + i]); | ||
105 | } | ||
106 | } | ||
107 | break; | ||
108 | |||
109 | case I2O_SNFORMAT_UNICODE: /* UNICODE */ | ||
110 | seq_printf(seq, "UNICODE Format. Can't Display\n"); | ||
111 | break; | ||
112 | |||
113 | case I2O_SNFORMAT_LAN48_MAC: /* LAN-48 MAC Address */ | ||
114 | seq_printf(seq, | ||
115 | "LAN-48 MAC address @ %02X:%02X:%02X:%02X:%02X:%02X", | ||
116 | serialno[2], serialno[3], | ||
117 | serialno[4], serialno[5], serialno[6], serialno[7]); | ||
118 | break; | ||
119 | |||
120 | case I2O_SNFORMAT_WAN: /* WAN MAC Address */ | ||
121 | /* FIXME: Figure out what a WAN access address looks like?? */ | ||
122 | seq_printf(seq, "WAN Access Address"); | ||
123 | break; | ||
124 | |||
125 | /* plus new in v2.0 */ | ||
126 | case I2O_SNFORMAT_LAN64_MAC: /* LAN-64 MAC Address */ | ||
127 | /* FIXME: Figure out what a LAN-64 address really looks like?? */ | ||
128 | seq_printf(seq, | ||
129 | "LAN-64 MAC address @ [?:%02X:%02X:?] %02X:%02X:%02X:%02X:%02X:%02X", | ||
130 | serialno[8], serialno[9], | ||
131 | serialno[2], serialno[3], | ||
132 | serialno[4], serialno[5], serialno[6], serialno[7]); | ||
133 | break; | ||
134 | |||
135 | case I2O_SNFORMAT_DDM: /* I2O DDM */ | ||
136 | seq_printf(seq, | ||
137 | "DDM: Tid=%03Xh, Rsvd=%04Xh, OrgId=%04Xh", | ||
138 | *(u16 *) & serialno[2], | ||
139 | *(u16 *) & serialno[4], *(u16 *) & serialno[6]); | ||
140 | break; | ||
141 | |||
142 | case I2O_SNFORMAT_IEEE_REG64: /* IEEE Registered (64-bit) */ | ||
143 | case I2O_SNFORMAT_IEEE_REG128: /* IEEE Registered (128-bit) */ | ||
144 | /* FIXME: Figure if this is even close?? */ | ||
145 | seq_printf(seq, | ||
146 | "IEEE NodeName(hi,lo)=(%08Xh:%08Xh), PortName(hi,lo)=(%08Xh:%08Xh)\n", | ||
147 | *(u32 *) & serialno[2], | ||
148 | *(u32 *) & serialno[6], | ||
149 | *(u32 *) & serialno[10], *(u32 *) & serialno[14]); | ||
150 | break; | ||
151 | |||
152 | case I2O_SNFORMAT_UNKNOWN: /* Unknown 0 */ | ||
153 | case I2O_SNFORMAT_UNKNOWN2: /* Unknown 0xff */ | ||
154 | default: | ||
155 | seq_printf(seq, "Unknown data format (0x%02x)", serialno[0]); | ||
156 | break; | ||
157 | } | ||
158 | |||
159 | return 0; | ||
160 | } | ||
161 | |||
162 | /** | ||
163 | * i2o_get_class_name - do i2o class name lookup | ||
164 | * @class: class number | ||
165 | * | ||
166 | * Return a descriptive string for an i2o class | ||
167 | */ | ||
168 | static const char *i2o_get_class_name(int class) | ||
169 | { | ||
170 | int idx = 16; | ||
171 | static char *i2o_class_name[] = { | ||
172 | "Executive", | ||
173 | "Device Driver Module", | ||
174 | "Block Device", | ||
175 | "Tape Device", | ||
176 | "LAN Interface", | ||
177 | "WAN Interface", | ||
178 | "Fibre Channel Port", | ||
179 | "Fibre Channel Device", | ||
180 | "SCSI Device", | ||
181 | "ATE Port", | ||
182 | "ATE Device", | ||
183 | "Floppy Controller", | ||
184 | "Floppy Device", | ||
185 | "Secondary Bus Port", | ||
186 | "Peer Transport Agent", | ||
187 | "Peer Transport", | ||
188 | "Unknown" | ||
189 | }; | ||
190 | |||
191 | switch (class & 0xfff) { | ||
192 | case I2O_CLASS_EXECUTIVE: | ||
193 | idx = 0; | ||
194 | break; | ||
195 | case I2O_CLASS_DDM: | ||
196 | idx = 1; | ||
197 | break; | ||
198 | case I2O_CLASS_RANDOM_BLOCK_STORAGE: | ||
199 | idx = 2; | ||
200 | break; | ||
201 | case I2O_CLASS_SEQUENTIAL_STORAGE: | ||
202 | idx = 3; | ||
203 | break; | ||
204 | case I2O_CLASS_LAN: | ||
205 | idx = 4; | ||
206 | break; | ||
207 | case I2O_CLASS_WAN: | ||
208 | idx = 5; | ||
209 | break; | ||
210 | case I2O_CLASS_FIBRE_CHANNEL_PORT: | ||
211 | idx = 6; | ||
212 | break; | ||
213 | case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL: | ||
214 | idx = 7; | ||
215 | break; | ||
216 | case I2O_CLASS_SCSI_PERIPHERAL: | ||
217 | idx = 8; | ||
218 | break; | ||
219 | case I2O_CLASS_ATE_PORT: | ||
220 | idx = 9; | ||
221 | break; | ||
222 | case I2O_CLASS_ATE_PERIPHERAL: | ||
223 | idx = 10; | ||
224 | break; | ||
225 | case I2O_CLASS_FLOPPY_CONTROLLER: | ||
226 | idx = 11; | ||
227 | break; | ||
228 | case I2O_CLASS_FLOPPY_DEVICE: | ||
229 | idx = 12; | ||
230 | break; | ||
231 | case I2O_CLASS_BUS_ADAPTER_PORT: | ||
232 | idx = 13; | ||
233 | break; | ||
234 | case I2O_CLASS_PEER_TRANSPORT_AGENT: | ||
235 | idx = 14; | ||
236 | break; | ||
237 | case I2O_CLASS_PEER_TRANSPORT: | ||
238 | idx = 15; | ||
239 | break; | ||
240 | } | ||
241 | |||
242 | return i2o_class_name[idx]; | ||
243 | } | ||
244 | |||
245 | #define SCSI_TABLE_SIZE 13 | ||
246 | static char *scsi_devices[] = { | ||
247 | "Direct-Access Read/Write", | ||
248 | "Sequential-Access Storage", | ||
249 | "Printer", | ||
250 | "Processor", | ||
251 | "WORM Device", | ||
252 | "CD-ROM Device", | ||
253 | "Scanner Device", | ||
254 | "Optical Memory Device", | ||
255 | "Medium Changer Device", | ||
256 | "Communications Device", | ||
257 | "Graphics Art Pre-Press Device", | ||
258 | "Graphics Art Pre-Press Device", | ||
259 | "Array Controller Device" | ||
260 | }; | ||
261 | |||
262 | static char *chtostr(u8 * chars, int n) | ||
263 | { | ||
264 | char tmp[256]; | ||
265 | tmp[0] = 0; | ||
266 | return strncat(tmp, (char *)chars, n); | ||
267 | } | ||
268 | |||
269 | static int i2o_report_query_status(struct seq_file *seq, int block_status, | ||
270 | char *group) | ||
271 | { | ||
272 | switch (block_status) { | ||
273 | case -ETIMEDOUT: | ||
274 | return seq_printf(seq, "Timeout reading group %s.\n", group); | ||
275 | case -ENOMEM: | ||
276 | return seq_printf(seq, "No free memory to read the table.\n"); | ||
277 | case -I2O_PARAMS_STATUS_INVALID_GROUP_ID: | ||
278 | return seq_printf(seq, "Group %s not supported.\n", group); | ||
279 | default: | ||
280 | return seq_printf(seq, | ||
281 | "Error reading group %s. BlockStatus 0x%02X\n", | ||
282 | group, -block_status); | ||
283 | } | ||
284 | } | ||
285 | |||
286 | static char *bus_strings[] = { | ||
287 | "Local Bus", | ||
288 | "ISA", | ||
289 | "EISA", | ||
290 | "MCA", | ||
291 | "PCI", | ||
292 | "PCMCIA", | ||
293 | "NUBUS", | ||
294 | "CARDBUS" | ||
295 | }; | ||
296 | |||
297 | static int i2o_seq_show_hrt(struct seq_file *seq, void *v) | ||
298 | { | ||
299 | struct i2o_controller *c = (struct i2o_controller *)seq->private; | ||
300 | i2o_hrt *hrt = (i2o_hrt *) c->hrt.virt; | ||
301 | u32 bus; | ||
302 | int i; | ||
303 | |||
304 | if (hrt->hrt_version) { | ||
305 | seq_printf(seq, | ||
306 | "HRT table for controller is too new a version.\n"); | ||
307 | return 0; | ||
308 | } | ||
309 | |||
310 | seq_printf(seq, "HRT has %d entries of %d bytes each.\n", | ||
311 | hrt->num_entries, hrt->entry_len << 2); | ||
312 | |||
313 | for (i = 0; i < hrt->num_entries; i++) { | ||
314 | seq_printf(seq, "Entry %d:\n", i); | ||
315 | seq_printf(seq, " Adapter ID: %0#10x\n", | ||
316 | hrt->hrt_entry[i].adapter_id); | ||
317 | seq_printf(seq, " Controlling tid: %0#6x\n", | ||
318 | hrt->hrt_entry[i].parent_tid); | ||
319 | |||
320 | if (hrt->hrt_entry[i].bus_type != 0x80) { | ||
321 | bus = hrt->hrt_entry[i].bus_type; | ||
322 | seq_printf(seq, " %s Information\n", | ||
323 | bus_strings[bus]); | ||
324 | |||
325 | switch (bus) { | ||
326 | case I2O_BUS_LOCAL: | ||
327 | seq_printf(seq, " IOBase: %0#6x,", | ||
328 | hrt->hrt_entry[i].bus.local_bus. | ||
329 | LbBaseIOPort); | ||
330 | seq_printf(seq, " MemoryBase: %0#10x\n", | ||
331 | hrt->hrt_entry[i].bus.local_bus. | ||
332 | LbBaseMemoryAddress); | ||
333 | break; | ||
334 | |||
335 | case I2O_BUS_ISA: | ||
336 | seq_printf(seq, " IOBase: %0#6x,", | ||
337 | hrt->hrt_entry[i].bus.isa_bus. | ||
338 | IsaBaseIOPort); | ||
339 | seq_printf(seq, " MemoryBase: %0#10x,", | ||
340 | hrt->hrt_entry[i].bus.isa_bus. | ||
341 | IsaBaseMemoryAddress); | ||
342 | seq_printf(seq, " CSN: %0#4x,", | ||
343 | hrt->hrt_entry[i].bus.isa_bus.CSN); | ||
344 | break; | ||
345 | |||
346 | case I2O_BUS_EISA: | ||
347 | seq_printf(seq, " IOBase: %0#6x,", | ||
348 | hrt->hrt_entry[i].bus.eisa_bus. | ||
349 | EisaBaseIOPort); | ||
350 | seq_printf(seq, " MemoryBase: %0#10x,", | ||
351 | hrt->hrt_entry[i].bus.eisa_bus. | ||
352 | EisaBaseMemoryAddress); | ||
353 | seq_printf(seq, " Slot: %0#4x,", | ||
354 | hrt->hrt_entry[i].bus.eisa_bus. | ||
355 | EisaSlotNumber); | ||
356 | break; | ||
357 | |||
358 | case I2O_BUS_MCA: | ||
359 | seq_printf(seq, " IOBase: %0#6x,", | ||
360 | hrt->hrt_entry[i].bus.mca_bus. | ||
361 | McaBaseIOPort); | ||
362 | seq_printf(seq, " MemoryBase: %0#10x,", | ||
363 | hrt->hrt_entry[i].bus.mca_bus. | ||
364 | McaBaseMemoryAddress); | ||
365 | seq_printf(seq, " Slot: %0#4x,", | ||
366 | hrt->hrt_entry[i].bus.mca_bus. | ||
367 | McaSlotNumber); | ||
368 | break; | ||
369 | |||
370 | case I2O_BUS_PCI: | ||
371 | seq_printf(seq, " Bus: %0#4x", | ||
372 | hrt->hrt_entry[i].bus.pci_bus. | ||
373 | PciBusNumber); | ||
374 | seq_printf(seq, " Dev: %0#4x", | ||
375 | hrt->hrt_entry[i].bus.pci_bus. | ||
376 | PciDeviceNumber); | ||
377 | seq_printf(seq, " Func: %0#4x", | ||
378 | hrt->hrt_entry[i].bus.pci_bus. | ||
379 | PciFunctionNumber); | ||
380 | seq_printf(seq, " Vendor: %0#6x", | ||
381 | hrt->hrt_entry[i].bus.pci_bus. | ||
382 | PciVendorID); | ||
383 | seq_printf(seq, " Device: %0#6x\n", | ||
384 | hrt->hrt_entry[i].bus.pci_bus. | ||
385 | PciDeviceID); | ||
386 | break; | ||
387 | |||
388 | default: | ||
389 | seq_printf(seq, " Unsupported Bus Type\n"); | ||
390 | } | ||
391 | } else | ||
392 | seq_printf(seq, " Unknown Bus Type\n"); | ||
393 | } | ||
394 | |||
395 | return 0; | ||
396 | } | ||
397 | |||
398 | static int i2o_seq_show_lct(struct seq_file *seq, void *v) | ||
399 | { | ||
400 | struct i2o_controller *c = (struct i2o_controller *)seq->private; | ||
401 | i2o_lct *lct = (i2o_lct *) c->lct; | ||
402 | int entries; | ||
403 | int i; | ||
404 | |||
405 | #define BUS_TABLE_SIZE 3 | ||
406 | static char *bus_ports[] = { | ||
407 | "Generic Bus", | ||
408 | "SCSI Bus", | ||
409 | "Fibre Channel Bus" | ||
410 | }; | ||
411 | |||
412 | entries = (lct->table_size - 3) / 9; | ||
413 | |||
414 | seq_printf(seq, "LCT contains %d %s\n", entries, | ||
415 | entries == 1 ? "entry" : "entries"); | ||
416 | if (lct->boot_tid) | ||
417 | seq_printf(seq, "Boot Device @ ID %d\n", lct->boot_tid); | ||
418 | |||
419 | seq_printf(seq, "Current Change Indicator: %#10x\n", lct->change_ind); | ||
420 | |||
421 | for (i = 0; i < entries; i++) { | ||
422 | seq_printf(seq, "Entry %d\n", i); | ||
423 | seq_printf(seq, " Class, SubClass : %s", | ||
424 | i2o_get_class_name(lct->lct_entry[i].class_id)); | ||
425 | |||
426 | /* | ||
427 | * Classes which we'll print subclass info for | ||
428 | */ | ||
429 | switch (lct->lct_entry[i].class_id & 0xFFF) { | ||
430 | case I2O_CLASS_RANDOM_BLOCK_STORAGE: | ||
431 | switch (lct->lct_entry[i].sub_class) { | ||
432 | case 0x00: | ||
433 | seq_printf(seq, ", Direct-Access Read/Write"); | ||
434 | break; | ||
435 | |||
436 | case 0x04: | ||
437 | seq_printf(seq, ", WORM Drive"); | ||
438 | break; | ||
439 | |||
440 | case 0x05: | ||
441 | seq_printf(seq, ", CD-ROM Drive"); | ||
442 | break; | ||
443 | |||
444 | case 0x07: | ||
445 | seq_printf(seq, ", Optical Memory Device"); | ||
446 | break; | ||
447 | |||
448 | default: | ||
449 | seq_printf(seq, ", Unknown (0x%02x)", | ||
450 | lct->lct_entry[i].sub_class); | ||
451 | break; | ||
452 | } | ||
453 | break; | ||
454 | |||
455 | case I2O_CLASS_LAN: | ||
456 | switch (lct->lct_entry[i].sub_class & 0xFF) { | ||
457 | case 0x30: | ||
458 | seq_printf(seq, ", Ethernet"); | ||
459 | break; | ||
460 | |||
461 | case 0x40: | ||
462 | seq_printf(seq, ", 100base VG"); | ||
463 | break; | ||
464 | |||
465 | case 0x50: | ||
466 | seq_printf(seq, ", IEEE 802.5/Token-Ring"); | ||
467 | break; | ||
468 | |||
469 | case 0x60: | ||
470 | seq_printf(seq, ", ANSI X3T9.5 FDDI"); | ||
471 | break; | ||
472 | |||
473 | case 0x70: | ||
474 | seq_printf(seq, ", Fibre Channel"); | ||
475 | break; | ||
476 | |||
477 | default: | ||
478 | seq_printf(seq, ", Unknown Sub-Class (0x%02x)", | ||
479 | lct->lct_entry[i].sub_class & 0xFF); | ||
480 | break; | ||
481 | } | ||
482 | break; | ||
483 | |||
484 | case I2O_CLASS_SCSI_PERIPHERAL: | ||
485 | if (lct->lct_entry[i].sub_class < SCSI_TABLE_SIZE) | ||
486 | seq_printf(seq, ", %s", | ||
487 | scsi_devices[lct->lct_entry[i]. | ||
488 | sub_class]); | ||
489 | else | ||
490 | seq_printf(seq, ", Unknown Device Type"); | ||
491 | break; | ||
492 | |||
493 | case I2O_CLASS_BUS_ADAPTER_PORT: | ||
494 | if (lct->lct_entry[i].sub_class < BUS_TABLE_SIZE) | ||
495 | seq_printf(seq, ", %s", | ||
496 | bus_ports[lct->lct_entry[i]. | ||
497 | sub_class]); | ||
498 | else | ||
499 | seq_printf(seq, ", Unknown Bus Type"); | ||
500 | break; | ||
501 | } | ||
502 | seq_printf(seq, "\n"); | ||
503 | |||
504 | seq_printf(seq, " Local TID : 0x%03x\n", | ||
505 | lct->lct_entry[i].tid); | ||
506 | seq_printf(seq, " User TID : 0x%03x\n", | ||
507 | lct->lct_entry[i].user_tid); | ||
508 | seq_printf(seq, " Parent TID : 0x%03x\n", | ||
509 | lct->lct_entry[i].parent_tid); | ||
510 | seq_printf(seq, " Identity Tag : 0x%x%x%x%x%x%x%x%x\n", | ||
511 | lct->lct_entry[i].identity_tag[0], | ||
512 | lct->lct_entry[i].identity_tag[1], | ||
513 | lct->lct_entry[i].identity_tag[2], | ||
514 | lct->lct_entry[i].identity_tag[3], | ||
515 | lct->lct_entry[i].identity_tag[4], | ||
516 | lct->lct_entry[i].identity_tag[5], | ||
517 | lct->lct_entry[i].identity_tag[6], | ||
518 | lct->lct_entry[i].identity_tag[7]); | ||
519 | seq_printf(seq, " Change Indicator : %0#10x\n", | ||
520 | lct->lct_entry[i].change_ind); | ||
521 | seq_printf(seq, " Event Capab Mask : %0#10x\n", | ||
522 | lct->lct_entry[i].device_flags); | ||
523 | } | ||
524 | |||
525 | return 0; | ||
526 | } | ||
527 | |||
528 | static int i2o_seq_show_status(struct seq_file *seq, void *v) | ||
529 | { | ||
530 | struct i2o_controller *c = (struct i2o_controller *)seq->private; | ||
531 | char prodstr[25]; | ||
532 | int version; | ||
533 | i2o_status_block *sb = c->status_block.virt; | ||
534 | |||
535 | i2o_status_get(c); // reread the status block | ||
536 | |||
537 | seq_printf(seq, "Organization ID : %0#6x\n", sb->org_id); | ||
538 | |||
539 | version = sb->i2o_version; | ||
540 | |||
541 | /* FIXME for Spec 2.0 | ||
542 | if (version == 0x02) { | ||
543 | seq_printf(seq, "Lowest I2O version supported: "); | ||
544 | switch(workspace[2]) { | ||
545 | case 0x00: | ||
546 | seq_printf(seq, "1.0\n"); | ||
547 | break; | ||
548 | case 0x01: | ||
549 | seq_printf(seq, "1.5\n"); | ||
550 | break; | ||
551 | case 0x02: | ||
552 | seq_printf(seq, "2.0\n"); | ||
553 | break; | ||
554 | } | ||
555 | |||
556 | seq_printf(seq, "Highest I2O version supported: "); | ||
557 | switch(workspace[3]) { | ||
558 | case 0x00: | ||
559 | seq_printf(seq, "1.0\n"); | ||
560 | break; | ||
561 | case 0x01: | ||
562 | seq_printf(seq, "1.5\n"); | ||
563 | break; | ||
564 | case 0x02: | ||
565 | seq_printf(seq, "2.0\n"); | ||
566 | break; | ||
567 | } | ||
568 | } | ||
569 | */ | ||
570 | seq_printf(seq, "IOP ID : %0#5x\n", sb->iop_id); | ||
571 | seq_printf(seq, "Host Unit ID : %0#6x\n", sb->host_unit_id); | ||
572 | seq_printf(seq, "Segment Number : %0#5x\n", sb->segment_number); | ||
573 | |||
574 | seq_printf(seq, "I2O version : "); | ||
575 | switch (version) { | ||
576 | case 0x00: | ||
577 | seq_printf(seq, "1.0\n"); | ||
578 | break; | ||
579 | case 0x01: | ||
580 | seq_printf(seq, "1.5\n"); | ||
581 | break; | ||
582 | case 0x02: | ||
583 | seq_printf(seq, "2.0\n"); | ||
584 | break; | ||
585 | default: | ||
586 | seq_printf(seq, "Unknown version\n"); | ||
587 | } | ||
588 | |||
589 | seq_printf(seq, "IOP State : "); | ||
590 | switch (sb->iop_state) { | ||
591 | case 0x01: | ||
592 | seq_printf(seq, "INIT\n"); | ||
593 | break; | ||
594 | |||
595 | case 0x02: | ||
596 | seq_printf(seq, "RESET\n"); | ||
597 | break; | ||
598 | |||
599 | case 0x04: | ||
600 | seq_printf(seq, "HOLD\n"); | ||
601 | break; | ||
602 | |||
603 | case 0x05: | ||
604 | seq_printf(seq, "READY\n"); | ||
605 | break; | ||
606 | |||
607 | case 0x08: | ||
608 | seq_printf(seq, "OPERATIONAL\n"); | ||
609 | break; | ||
610 | |||
611 | case 0x10: | ||
612 | seq_printf(seq, "FAILED\n"); | ||
613 | break; | ||
614 | |||
615 | case 0x11: | ||
616 | seq_printf(seq, "FAULTED\n"); | ||
617 | break; | ||
618 | |||
619 | default: | ||
620 | seq_printf(seq, "Unknown\n"); | ||
621 | break; | ||
622 | } | ||
623 | |||
624 | seq_printf(seq, "Messenger Type : "); | ||
625 | switch (sb->msg_type) { | ||
626 | case 0x00: | ||
627 | seq_printf(seq, "Memory mapped\n"); | ||
628 | break; | ||
629 | case 0x01: | ||
630 | seq_printf(seq, "Memory mapped only\n"); | ||
631 | break; | ||
632 | case 0x02: | ||
633 | seq_printf(seq, "Remote only\n"); | ||
634 | break; | ||
635 | case 0x03: | ||
636 | seq_printf(seq, "Memory mapped and remote\n"); | ||
637 | break; | ||
638 | default: | ||
639 | seq_printf(seq, "Unknown\n"); | ||
640 | } | ||
641 | |||
642 | seq_printf(seq, "Inbound Frame Size : %d bytes\n", | ||
643 | sb->inbound_frame_size << 2); | ||
644 | seq_printf(seq, "Max Inbound Frames : %d\n", | ||
645 | sb->max_inbound_frames); | ||
646 | seq_printf(seq, "Current Inbound Frames : %d\n", | ||
647 | sb->cur_inbound_frames); | ||
648 | seq_printf(seq, "Max Outbound Frames : %d\n", | ||
649 | sb->max_outbound_frames); | ||
650 | |||
651 | /* Spec doesn't say if NULL terminated or not... */ | ||
652 | memcpy(prodstr, sb->product_id, 24); | ||
653 | prodstr[24] = '\0'; | ||
654 | seq_printf(seq, "Product ID : %s\n", prodstr); | ||
655 | seq_printf(seq, "Expected LCT Size : %d bytes\n", | ||
656 | sb->expected_lct_size); | ||
657 | |||
658 | seq_printf(seq, "IOP Capabilities\n"); | ||
659 | seq_printf(seq, " Context Field Size Support : "); | ||
660 | switch (sb->iop_capabilities & 0x0000003) { | ||
661 | case 0: | ||
662 | seq_printf(seq, "Supports only 32-bit context fields\n"); | ||
663 | break; | ||
664 | case 1: | ||
665 | seq_printf(seq, "Supports only 64-bit context fields\n"); | ||
666 | break; | ||
667 | case 2: | ||
668 | seq_printf(seq, "Supports 32-bit and 64-bit context fields, " | ||
669 | "but not concurrently\n"); | ||
670 | break; | ||
671 | case 3: | ||
672 | seq_printf(seq, "Supports 32-bit and 64-bit context fields " | ||
673 | "concurrently\n"); | ||
674 | break; | ||
675 | default: | ||
676 | seq_printf(seq, "0x%08x\n", sb->iop_capabilities); | ||
677 | } | ||
678 | seq_printf(seq, " Current Context Field Size : "); | ||
679 | switch (sb->iop_capabilities & 0x0000000C) { | ||
680 | case 0: | ||
681 | seq_printf(seq, "not configured\n"); | ||
682 | break; | ||
683 | case 4: | ||
684 | seq_printf(seq, "Supports only 32-bit context fields\n"); | ||
685 | break; | ||
686 | case 8: | ||
687 | seq_printf(seq, "Supports only 64-bit context fields\n"); | ||
688 | break; | ||
689 | case 12: | ||
690 | seq_printf(seq, "Supports both 32-bit or 64-bit context fields " | ||
691 | "concurrently\n"); | ||
692 | break; | ||
693 | default: | ||
694 | seq_printf(seq, "\n"); | ||
695 | } | ||
696 | seq_printf(seq, " Inbound Peer Support : %s\n", | ||
697 | (sb-> | ||
698 | iop_capabilities & 0x00000010) ? "Supported" : | ||
699 | "Not supported"); | ||
700 | seq_printf(seq, " Outbound Peer Support : %s\n", | ||
701 | (sb-> | ||
702 | iop_capabilities & 0x00000020) ? "Supported" : | ||
703 | "Not supported"); | ||
704 | seq_printf(seq, " Peer to Peer Support : %s\n", | ||
705 | (sb-> | ||
706 | iop_capabilities & 0x00000040) ? "Supported" : | ||
707 | "Not supported"); | ||
708 | |||
709 | seq_printf(seq, "Desired private memory size : %d kB\n", | ||
710 | sb->desired_mem_size >> 10); | ||
711 | seq_printf(seq, "Allocated private memory size : %d kB\n", | ||
712 | sb->current_mem_size >> 10); | ||
713 | seq_printf(seq, "Private memory base address : %0#10x\n", | ||
714 | sb->current_mem_base); | ||
715 | seq_printf(seq, "Desired private I/O size : %d kB\n", | ||
716 | sb->desired_io_size >> 10); | ||
717 | seq_printf(seq, "Allocated private I/O size : %d kB\n", | ||
718 | sb->current_io_size >> 10); | ||
719 | seq_printf(seq, "Private I/O base address : %0#10x\n", | ||
720 | sb->current_io_base); | ||
721 | |||
722 | return 0; | ||
723 | } | ||
724 | |||
725 | static int i2o_seq_show_hw(struct seq_file *seq, void *v) | ||
726 | { | ||
727 | struct i2o_controller *c = (struct i2o_controller *)seq->private; | ||
728 | static u32 work32[5]; | ||
729 | static u8 *work8 = (u8 *) work32; | ||
730 | static u16 *work16 = (u16 *) work32; | ||
731 | int token; | ||
732 | u32 hwcap; | ||
733 | |||
734 | static char *cpu_table[] = { | ||
735 | "Intel 80960 series", | ||
736 | "AMD2900 series", | ||
737 | "Motorola 68000 series", | ||
738 | "ARM series", | ||
739 | "MIPS series", | ||
740 | "Sparc series", | ||
741 | "PowerPC series", | ||
742 | "Intel x86 series" | ||
743 | }; | ||
744 | |||
745 | token = | ||
746 | i2o_parm_field_get(c->exec, 0x0000, -1, &work32, sizeof(work32)); | ||
747 | |||
748 | if (token < 0) { | ||
749 | i2o_report_query_status(seq, token, "0x0000 IOP Hardware"); | ||
750 | return 0; | ||
751 | } | ||
752 | |||
753 | seq_printf(seq, "I2O Vendor ID : %0#6x\n", work16[0]); | ||
754 | seq_printf(seq, "Product ID : %0#6x\n", work16[1]); | ||
755 | seq_printf(seq, "CPU : "); | ||
756 | if (work8[16] > 8) | ||
757 | seq_printf(seq, "Unknown\n"); | ||
758 | else | ||
759 | seq_printf(seq, "%s\n", cpu_table[work8[16]]); | ||
760 | /* Anyone using ProcessorVersion? */ | ||
761 | |||
762 | seq_printf(seq, "RAM : %dkB\n", work32[1] >> 10); | ||
763 | seq_printf(seq, "Non-Volatile Mem : %dkB\n", work32[2] >> 10); | ||
764 | |||
765 | hwcap = work32[3]; | ||
766 | seq_printf(seq, "Capabilities : 0x%08x\n", hwcap); | ||
767 | seq_printf(seq, " [%s] Self booting\n", | ||
768 | (hwcap & 0x00000001) ? "+" : "-"); | ||
769 | seq_printf(seq, " [%s] Upgradable IRTOS\n", | ||
770 | (hwcap & 0x00000002) ? "+" : "-"); | ||
771 | seq_printf(seq, " [%s] Supports downloading DDMs\n", | ||
772 | (hwcap & 0x00000004) ? "+" : "-"); | ||
773 | seq_printf(seq, " [%s] Supports installing DDMs\n", | ||
774 | (hwcap & 0x00000008) ? "+" : "-"); | ||
775 | seq_printf(seq, " [%s] Battery-backed RAM\n", | ||
776 | (hwcap & 0x00000010) ? "+" : "-"); | ||
777 | |||
778 | return 0; | ||
779 | } | ||
780 | |||
781 | /* Executive group 0003h - Executing DDM List (table) */ | ||
782 | static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v) | ||
783 | { | ||
784 | struct i2o_controller *c = (struct i2o_controller *)seq->private; | ||
785 | int token; | ||
786 | int i; | ||
787 | |||
788 | typedef struct _i2o_exec_execute_ddm_table { | ||
789 | u16 ddm_tid; | ||
790 | u8 module_type; | ||
791 | u8 reserved; | ||
792 | u16 i2o_vendor_id; | ||
793 | u16 module_id; | ||
794 | u8 module_name_version[28]; | ||
795 | u32 data_size; | ||
796 | u32 code_size; | ||
797 | } i2o_exec_execute_ddm_table; | ||
798 | |||
799 | struct { | ||
800 | u16 result_count; | ||
801 | u16 pad; | ||
802 | u16 block_size; | ||
803 | u8 block_status; | ||
804 | u8 error_info_size; | ||
805 | u16 row_count; | ||
806 | u16 more_flag; | ||
807 | i2o_exec_execute_ddm_table ddm_table[I2O_MAX_MODULES]; | ||
808 | } *result; | ||
809 | |||
810 | i2o_exec_execute_ddm_table ddm_table; | ||
811 | |||
812 | result = kmalloc(sizeof(*result), GFP_KERNEL); | ||
813 | if (!result) | ||
814 | return -ENOMEM; | ||
815 | |||
816 | token = i2o_parm_table_get(c->exec, I2O_PARAMS_TABLE_GET, 0x0003, -1, | ||
817 | NULL, 0, result, sizeof(*result)); | ||
818 | |||
819 | if (token < 0) { | ||
820 | i2o_report_query_status(seq, token, | ||
821 | "0x0003 Executing DDM List"); | ||
822 | goto out; | ||
823 | } | ||
824 | |||
825 | seq_printf(seq, | ||
826 | "Tid Module_type Vendor Mod_id Module_name Vrs Data_size Code_size\n"); | ||
827 | ddm_table = result->ddm_table[0]; | ||
828 | |||
829 | for (i = 0; i < result->row_count; ddm_table = result->ddm_table[++i]) { | ||
830 | seq_printf(seq, "0x%03x ", ddm_table.ddm_tid & 0xFFF); | ||
831 | |||
832 | switch (ddm_table.module_type) { | ||
833 | case 0x01: | ||
834 | seq_printf(seq, "Downloaded DDM "); | ||
835 | break; | ||
836 | case 0x22: | ||
837 | seq_printf(seq, "Embedded DDM "); | ||
838 | break; | ||
839 | default: | ||
840 | seq_printf(seq, " "); | ||
841 | } | ||
842 | |||
843 | seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id); | ||
844 | seq_printf(seq, "%-#8x", ddm_table.module_id); | ||
845 | seq_printf(seq, "%-29s", | ||
846 | chtostr(ddm_table.module_name_version, 28)); | ||
847 | seq_printf(seq, "%9d ", ddm_table.data_size); | ||
848 | seq_printf(seq, "%8d", ddm_table.code_size); | ||
849 | |||
850 | seq_printf(seq, "\n"); | ||
851 | } | ||
852 | out: | ||
853 | kfree(result); | ||
854 | return 0; | ||
855 | } | ||
856 | |||
857 | /* Executive group 0004h - Driver Store (scalar) */ | ||
858 | static int i2o_seq_show_driver_store(struct seq_file *seq, void *v) | ||
859 | { | ||
860 | struct i2o_controller *c = (struct i2o_controller *)seq->private; | ||
861 | u32 work32[8]; | ||
862 | int token; | ||
863 | |||
864 | token = | ||
865 | i2o_parm_field_get(c->exec, 0x0004, -1, &work32, sizeof(work32)); | ||
866 | if (token < 0) { | ||
867 | i2o_report_query_status(seq, token, "0x0004 Driver Store"); | ||
868 | return 0; | ||
869 | } | ||
870 | |||
871 | seq_printf(seq, "Module limit : %d\n" | ||
872 | "Module count : %d\n" | ||
873 | "Current space : %d kB\n" | ||
874 | "Free space : %d kB\n", | ||
875 | work32[0], work32[1], work32[2] >> 10, work32[3] >> 10); | ||
876 | |||
877 | return 0; | ||
878 | } | ||
879 | |||
880 | /* Executive group 0005h - Driver Store Table (table) */ | ||
881 | static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v) | ||
882 | { | ||
883 | typedef struct _i2o_driver_store { | ||
884 | u16 stored_ddm_index; | ||
885 | u8 module_type; | ||
886 | u8 reserved; | ||
887 | u16 i2o_vendor_id; | ||
888 | u16 module_id; | ||
889 | u8 module_name_version[28]; | ||
890 | u8 date[8]; | ||
891 | u32 module_size; | ||
892 | u32 mpb_size; | ||
893 | u32 module_flags; | ||
894 | } i2o_driver_store_table; | ||
895 | |||
896 | struct i2o_controller *c = (struct i2o_controller *)seq->private; | ||
897 | int token; | ||
898 | int i; | ||
899 | |||
900 | typedef struct { | ||
901 | u16 result_count; | ||
902 | u16 pad; | ||
903 | u16 block_size; | ||
904 | u8 block_status; | ||
905 | u8 error_info_size; | ||
906 | u16 row_count; | ||
907 | u16 more_flag; | ||
908 | i2o_driver_store_table dst[I2O_MAX_MODULES]; | ||
909 | } i2o_driver_result_table; | ||
910 | |||
911 | i2o_driver_result_table *result; | ||
912 | i2o_driver_store_table *dst; | ||
913 | |||
914 | result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL); | ||
915 | if (result == NULL) | ||
916 | return -ENOMEM; | ||
917 | |||
918 | token = i2o_parm_table_get(c->exec, I2O_PARAMS_TABLE_GET, 0x0005, -1, | ||
919 | NULL, 0, result, sizeof(*result)); | ||
920 | |||
921 | if (token < 0) { | ||
922 | i2o_report_query_status(seq, token, | ||
923 | "0x0005 DRIVER STORE TABLE"); | ||
924 | kfree(result); | ||
925 | return 0; | ||
926 | } | ||
927 | |||
928 | seq_printf(seq, | ||
929 | "# Module_type Vendor Mod_id Module_name Vrs" | ||
930 | "Date Mod_size Par_size Flags\n"); | ||
931 | for (i = 0, dst = &result->dst[0]; i < result->row_count; | ||
932 | dst = &result->dst[++i]) { | ||
933 | seq_printf(seq, "%-3d", dst->stored_ddm_index); | ||
934 | switch (dst->module_type) { | ||
935 | case 0x01: | ||
936 | seq_printf(seq, "Downloaded DDM "); | ||
937 | break; | ||
938 | case 0x22: | ||
939 | seq_printf(seq, "Embedded DDM "); | ||
940 | break; | ||
941 | default: | ||
942 | seq_printf(seq, " "); | ||
943 | } | ||
944 | |||
945 | seq_printf(seq, "%-#7x", dst->i2o_vendor_id); | ||
946 | seq_printf(seq, "%-#8x", dst->module_id); | ||
947 | seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28)); | ||
948 | seq_printf(seq, "%-9s", chtostr(dst->date, 8)); | ||
949 | seq_printf(seq, "%8d ", dst->module_size); | ||
950 | seq_printf(seq, "%8d ", dst->mpb_size); | ||
951 | seq_printf(seq, "0x%04x", dst->module_flags); | ||
952 | seq_printf(seq, "\n"); | ||
953 | } | ||
954 | |||
955 | kfree(result); | ||
956 | return 0; | ||
957 | } | ||
958 | |||
959 | /* Generic group F000h - Params Descriptor (table) */ | ||
960 | static int i2o_seq_show_groups(struct seq_file *seq, void *v) | ||
961 | { | ||
962 | struct i2o_device *d = (struct i2o_device *)seq->private; | ||
963 | int token; | ||
964 | int i; | ||
965 | u8 properties; | ||
966 | |||
967 | typedef struct _i2o_group_info { | ||
968 | u16 group_number; | ||
969 | u16 field_count; | ||
970 | u16 row_count; | ||
971 | u8 properties; | ||
972 | u8 reserved; | ||
973 | } i2o_group_info; | ||
974 | |||
975 | struct { | ||
976 | u16 result_count; | ||
977 | u16 pad; | ||
978 | u16 block_size; | ||
979 | u8 block_status; | ||
980 | u8 error_info_size; | ||
981 | u16 row_count; | ||
982 | u16 more_flag; | ||
983 | i2o_group_info group[256]; | ||
984 | } *result; | ||
985 | |||
986 | result = kmalloc(sizeof(*result), GFP_KERNEL); | ||
987 | if (!result) | ||
988 | return -ENOMEM; | ||
989 | |||
990 | token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF000, -1, NULL, 0, | ||
991 | result, sizeof(*result)); | ||
992 | |||
993 | if (token < 0) { | ||
994 | i2o_report_query_status(seq, token, "0xF000 Params Descriptor"); | ||
995 | goto out; | ||
996 | } | ||
997 | |||
998 | seq_printf(seq, | ||
999 | "# Group FieldCount RowCount Type Add Del Clear\n"); | ||
1000 | |||
1001 | for (i = 0; i < result->row_count; i++) { | ||
1002 | seq_printf(seq, "%-3d", i); | ||
1003 | seq_printf(seq, "0x%04X ", result->group[i].group_number); | ||
1004 | seq_printf(seq, "%10d ", result->group[i].field_count); | ||
1005 | seq_printf(seq, "%8d ", result->group[i].row_count); | ||
1006 | |||
1007 | properties = result->group[i].properties; | ||
1008 | if (properties & 0x1) | ||
1009 | seq_printf(seq, "Table "); | ||
1010 | else | ||
1011 | seq_printf(seq, "Scalar "); | ||
1012 | if (properties & 0x2) | ||
1013 | seq_printf(seq, " + "); | ||
1014 | else | ||
1015 | seq_printf(seq, " - "); | ||
1016 | if (properties & 0x4) | ||
1017 | seq_printf(seq, " + "); | ||
1018 | else | ||
1019 | seq_printf(seq, " - "); | ||
1020 | if (properties & 0x8) | ||
1021 | seq_printf(seq, " + "); | ||
1022 | else | ||
1023 | seq_printf(seq, " - "); | ||
1024 | |||
1025 | seq_printf(seq, "\n"); | ||
1026 | } | ||
1027 | |||
1028 | if (result->more_flag) | ||
1029 | seq_printf(seq, "There is more...\n"); | ||
1030 | out: | ||
1031 | kfree(result); | ||
1032 | return 0; | ||
1033 | } | ||
1034 | |||
1035 | /* Generic group F001h - Physical Device Table (table) */ | ||
1036 | static int i2o_seq_show_phys_device(struct seq_file *seq, void *v) | ||
1037 | { | ||
1038 | struct i2o_device *d = (struct i2o_device *)seq->private; | ||
1039 | int token; | ||
1040 | int i; | ||
1041 | |||
1042 | struct { | ||
1043 | u16 result_count; | ||
1044 | u16 pad; | ||
1045 | u16 block_size; | ||
1046 | u8 block_status; | ||
1047 | u8 error_info_size; | ||
1048 | u16 row_count; | ||
1049 | u16 more_flag; | ||
1050 | u32 adapter_id[64]; | ||
1051 | } result; | ||
1052 | |||
1053 | token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF001, -1, NULL, 0, | ||
1054 | &result, sizeof(result)); | ||
1055 | |||
1056 | if (token < 0) { | ||
1057 | i2o_report_query_status(seq, token, | ||
1058 | "0xF001 Physical Device Table"); | ||
1059 | return 0; | ||
1060 | } | ||
1061 | |||
1062 | if (result.row_count) | ||
1063 | seq_printf(seq, "# AdapterId\n"); | ||
1064 | |||
1065 | for (i = 0; i < result.row_count; i++) { | ||
1066 | seq_printf(seq, "%-2d", i); | ||
1067 | seq_printf(seq, "%#7x\n", result.adapter_id[i]); | ||
1068 | } | ||
1069 | |||
1070 | if (result.more_flag) | ||
1071 | seq_printf(seq, "There is more...\n"); | ||
1072 | |||
1073 | return 0; | ||
1074 | } | ||
1075 | |||
1076 | /* Generic group F002h - Claimed Table (table) */ | ||
1077 | static int i2o_seq_show_claimed(struct seq_file *seq, void *v) | ||
1078 | { | ||
1079 | struct i2o_device *d = (struct i2o_device *)seq->private; | ||
1080 | int token; | ||
1081 | int i; | ||
1082 | |||
1083 | struct { | ||
1084 | u16 result_count; | ||
1085 | u16 pad; | ||
1086 | u16 block_size; | ||
1087 | u8 block_status; | ||
1088 | u8 error_info_size; | ||
1089 | u16 row_count; | ||
1090 | u16 more_flag; | ||
1091 | u16 claimed_tid[64]; | ||
1092 | } result; | ||
1093 | |||
1094 | token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF002, -1, NULL, 0, | ||
1095 | &result, sizeof(result)); | ||
1096 | |||
1097 | if (token < 0) { | ||
1098 | i2o_report_query_status(seq, token, "0xF002 Claimed Table"); | ||
1099 | return 0; | ||
1100 | } | ||
1101 | |||
1102 | if (result.row_count) | ||
1103 | seq_printf(seq, "# ClaimedTid\n"); | ||
1104 | |||
1105 | for (i = 0; i < result.row_count; i++) { | ||
1106 | seq_printf(seq, "%-2d", i); | ||
1107 | seq_printf(seq, "%#7x\n", result.claimed_tid[i]); | ||
1108 | } | ||
1109 | |||
1110 | if (result.more_flag) | ||
1111 | seq_printf(seq, "There is more...\n"); | ||
1112 | |||
1113 | return 0; | ||
1114 | } | ||
1115 | |||
1116 | /* Generic group F003h - User Table (table) */ | ||
1117 | static int i2o_seq_show_users(struct seq_file *seq, void *v) | ||
1118 | { | ||
1119 | struct i2o_device *d = (struct i2o_device *)seq->private; | ||
1120 | int token; | ||
1121 | int i; | ||
1122 | |||
1123 | typedef struct _i2o_user_table { | ||
1124 | u16 instance; | ||
1125 | u16 user_tid; | ||
1126 | u8 claim_type; | ||
1127 | u8 reserved1; | ||
1128 | u16 reserved2; | ||
1129 | } i2o_user_table; | ||
1130 | |||
1131 | struct { | ||
1132 | u16 result_count; | ||
1133 | u16 pad; | ||
1134 | u16 block_size; | ||
1135 | u8 block_status; | ||
1136 | u8 error_info_size; | ||
1137 | u16 row_count; | ||
1138 | u16 more_flag; | ||
1139 | i2o_user_table user[64]; | ||
1140 | } *result; | ||
1141 | |||
1142 | result = kmalloc(sizeof(*result), GFP_KERNEL); | ||
1143 | if (!result) | ||
1144 | return -ENOMEM; | ||
1145 | |||
1146 | token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF003, -1, NULL, 0, | ||
1147 | result, sizeof(*result)); | ||
1148 | |||
1149 | if (token < 0) { | ||
1150 | i2o_report_query_status(seq, token, "0xF003 User Table"); | ||
1151 | goto out; | ||
1152 | } | ||
1153 | |||
1154 | seq_printf(seq, "# Instance UserTid ClaimType\n"); | ||
1155 | |||
1156 | for (i = 0; i < result->row_count; i++) { | ||
1157 | seq_printf(seq, "%-3d", i); | ||
1158 | seq_printf(seq, "%#8x ", result->user[i].instance); | ||
1159 | seq_printf(seq, "%#7x ", result->user[i].user_tid); | ||
1160 | seq_printf(seq, "%#9x\n", result->user[i].claim_type); | ||
1161 | } | ||
1162 | |||
1163 | if (result->more_flag) | ||
1164 | seq_printf(seq, "There is more...\n"); | ||
1165 | out: | ||
1166 | kfree(result); | ||
1167 | return 0; | ||
1168 | } | ||
1169 | |||
1170 | /* Generic group F005h - Private message extensions (table) (optional) */ | ||
1171 | static int i2o_seq_show_priv_msgs(struct seq_file *seq, void *v) | ||
1172 | { | ||
1173 | struct i2o_device *d = (struct i2o_device *)seq->private; | ||
1174 | int token; | ||
1175 | int i; | ||
1176 | |||
1177 | typedef struct _i2o_private { | ||
1178 | u16 ext_instance; | ||
1179 | u16 organization_id; | ||
1180 | u16 x_function_code; | ||
1181 | } i2o_private; | ||
1182 | |||
1183 | struct { | ||
1184 | u16 result_count; | ||
1185 | u16 pad; | ||
1186 | u16 block_size; | ||
1187 | u8 block_status; | ||
1188 | u8 error_info_size; | ||
1189 | u16 row_count; | ||
1190 | u16 more_flag; | ||
1191 | i2o_private extension[64]; | ||
1192 | } result; | ||
1193 | |||
1194 | token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF000, -1, NULL, 0, | ||
1195 | &result, sizeof(result)); | ||
1196 | |||
1197 | if (token < 0) { | ||
1198 | i2o_report_query_status(seq, token, | ||
1199 | "0xF005 Private Message Extensions (optional)"); | ||
1200 | return 0; | ||
1201 | } | ||
1202 | |||
1203 | seq_printf(seq, "Instance# OrgId FunctionCode\n"); | ||
1204 | |||
1205 | for (i = 0; i < result.row_count; i++) { | ||
1206 | seq_printf(seq, "%0#9x ", result.extension[i].ext_instance); | ||
1207 | seq_printf(seq, "%0#6x ", result.extension[i].organization_id); | ||
1208 | seq_printf(seq, "%0#6x", result.extension[i].x_function_code); | ||
1209 | |||
1210 | seq_printf(seq, "\n"); | ||
1211 | } | ||
1212 | |||
1213 | if (result.more_flag) | ||
1214 | seq_printf(seq, "There is more...\n"); | ||
1215 | |||
1216 | return 0; | ||
1217 | } | ||
1218 | |||
1219 | /* Generic group F006h - Authorized User Table (table) */ | ||
1220 | static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v) | ||
1221 | { | ||
1222 | struct i2o_device *d = (struct i2o_device *)seq->private; | ||
1223 | int token; | ||
1224 | int i; | ||
1225 | |||
1226 | struct { | ||
1227 | u16 result_count; | ||
1228 | u16 pad; | ||
1229 | u16 block_size; | ||
1230 | u8 block_status; | ||
1231 | u8 error_info_size; | ||
1232 | u16 row_count; | ||
1233 | u16 more_flag; | ||
1234 | u32 alternate_tid[64]; | ||
1235 | } result; | ||
1236 | |||
1237 | token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF006, -1, NULL, 0, | ||
1238 | &result, sizeof(result)); | ||
1239 | |||
1240 | if (token < 0) { | ||
1241 | i2o_report_query_status(seq, token, | ||
1242 | "0xF006 Autohorized User Table"); | ||
1243 | return 0; | ||
1244 | } | ||
1245 | |||
1246 | if (result.row_count) | ||
1247 | seq_printf(seq, "# AlternateTid\n"); | ||
1248 | |||
1249 | for (i = 0; i < result.row_count; i++) { | ||
1250 | seq_printf(seq, "%-2d", i); | ||
1251 | seq_printf(seq, "%#7x ", result.alternate_tid[i]); | ||
1252 | } | ||
1253 | |||
1254 | if (result.more_flag) | ||
1255 | seq_printf(seq, "There is more...\n"); | ||
1256 | |||
1257 | return 0; | ||
1258 | } | ||
1259 | |||
1260 | /* Generic group F100h - Device Identity (scalar) */ | ||
1261 | static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v) | ||
1262 | { | ||
1263 | struct i2o_device *d = (struct i2o_device *)seq->private; | ||
1264 | static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number | ||
1265 | // == (allow) 512d bytes (max) | ||
1266 | static u16 *work16 = (u16 *) work32; | ||
1267 | int token; | ||
1268 | |||
1269 | token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32)); | ||
1270 | |||
1271 | if (token < 0) { | ||
1272 | i2o_report_query_status(seq, token, "0xF100 Device Identity"); | ||
1273 | return 0; | ||
1274 | } | ||
1275 | |||
1276 | seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0])); | ||
1277 | seq_printf(seq, "Owner TID : %0#5x\n", work16[2]); | ||
1278 | seq_printf(seq, "Parent TID : %0#5x\n", work16[3]); | ||
1279 | seq_printf(seq, "Vendor info : %s\n", | ||
1280 | chtostr((u8 *) (work32 + 2), 16)); | ||
1281 | seq_printf(seq, "Product info : %s\n", | ||
1282 | chtostr((u8 *) (work32 + 6), 16)); | ||
1283 | seq_printf(seq, "Description : %s\n", | ||
1284 | chtostr((u8 *) (work32 + 10), 16)); | ||
1285 | seq_printf(seq, "Product rev. : %s\n", | ||
1286 | chtostr((u8 *) (work32 + 14), 8)); | ||
1287 | |||
1288 | seq_printf(seq, "Serial number : "); | ||
1289 | print_serial_number(seq, (u8 *) (work32 + 16), | ||
1290 | /* allow for SNLen plus | ||
1291 | * possible trailing '\0' | ||
1292 | */ | ||
1293 | sizeof(work32) - (16 * sizeof(u32)) - 2); | ||
1294 | seq_printf(seq, "\n"); | ||
1295 | |||
1296 | return 0; | ||
1297 | } | ||
1298 | |||
1299 | static int i2o_seq_show_dev_name(struct seq_file *seq, void *v) | ||
1300 | { | ||
1301 | struct i2o_device *d = (struct i2o_device *)seq->private; | ||
1302 | |||
1303 | seq_printf(seq, "%s\n", d->device.bus_id); | ||
1304 | |||
1305 | return 0; | ||
1306 | } | ||
1307 | |||
1308 | /* Generic group F101h - DDM Identity (scalar) */ | ||
1309 | static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v) | ||
1310 | { | ||
1311 | struct i2o_device *d = (struct i2o_device *)seq->private; | ||
1312 | int token; | ||
1313 | |||
1314 | struct { | ||
1315 | u16 ddm_tid; | ||
1316 | u8 module_name[24]; | ||
1317 | u8 module_rev[8]; | ||
1318 | u8 sn_format; | ||
1319 | u8 serial_number[12]; | ||
1320 | u8 pad[256]; // allow up to 256 byte (max) serial number | ||
1321 | } result; | ||
1322 | |||
1323 | token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result)); | ||
1324 | |||
1325 | if (token < 0) { | ||
1326 | i2o_report_query_status(seq, token, "0xF101 DDM Identity"); | ||
1327 | return 0; | ||
1328 | } | ||
1329 | |||
1330 | seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid); | ||
1331 | seq_printf(seq, "Module name : %s\n", | ||
1332 | chtostr(result.module_name, 24)); | ||
1333 | seq_printf(seq, "Module revision : %s\n", | ||
1334 | chtostr(result.module_rev, 8)); | ||
1335 | |||
1336 | seq_printf(seq, "Serial number : "); | ||
1337 | print_serial_number(seq, result.serial_number, sizeof(result) - 36); | ||
1338 | /* allow for SNLen plus possible trailing '\0' */ | ||
1339 | |||
1340 | seq_printf(seq, "\n"); | ||
1341 | |||
1342 | return 0; | ||
1343 | } | ||
1344 | |||
1345 | /* Generic group F102h - User Information (scalar) */ | ||
1346 | static int i2o_seq_show_uinfo(struct seq_file *seq, void *v) | ||
1347 | { | ||
1348 | struct i2o_device *d = (struct i2o_device *)seq->private; | ||
1349 | int token; | ||
1350 | |||
1351 | struct { | ||
1352 | u8 device_name[64]; | ||
1353 | u8 service_name[64]; | ||
1354 | u8 physical_location[64]; | ||
1355 | u8 instance_number[4]; | ||
1356 | } result; | ||
1357 | |||
1358 | token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result)); | ||
1359 | |||
1360 | if (token < 0) { | ||
1361 | i2o_report_query_status(seq, token, "0xF102 User Information"); | ||
1362 | return 0; | ||
1363 | } | ||
1364 | |||
1365 | seq_printf(seq, "Device name : %s\n", | ||
1366 | chtostr(result.device_name, 64)); | ||
1367 | seq_printf(seq, "Service name : %s\n", | ||
1368 | chtostr(result.service_name, 64)); | ||
1369 | seq_printf(seq, "Physical name : %s\n", | ||
1370 | chtostr(result.physical_location, 64)); | ||
1371 | seq_printf(seq, "Instance number : %s\n", | ||
1372 | chtostr(result.instance_number, 4)); | ||
1373 | |||
1374 | return 0; | ||
1375 | } | ||
1376 | |||
1377 | /* Generic group F103h - SGL Operating Limits (scalar) */ | ||
1378 | static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v) | ||
1379 | { | ||
1380 | struct i2o_device *d = (struct i2o_device *)seq->private; | ||
1381 | static u32 work32[12]; | ||
1382 | static u16 *work16 = (u16 *) work32; | ||
1383 | static u8 *work8 = (u8 *) work32; | ||
1384 | int token; | ||
1385 | |||
1386 | token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32)); | ||
1387 | |||
1388 | if (token < 0) { | ||
1389 | i2o_report_query_status(seq, token, | ||
1390 | "0xF103 SGL Operating Limits"); | ||
1391 | return 0; | ||
1392 | } | ||
1393 | |||
1394 | seq_printf(seq, "SGL chain size : %d\n", work32[0]); | ||
1395 | seq_printf(seq, "Max SGL chain size : %d\n", work32[1]); | ||
1396 | seq_printf(seq, "SGL chain size target : %d\n", work32[2]); | ||
1397 | seq_printf(seq, "SGL frag count : %d\n", work16[6]); | ||
1398 | seq_printf(seq, "Max SGL frag count : %d\n", work16[7]); | ||
1399 | seq_printf(seq, "SGL frag count target : %d\n", work16[8]); | ||
1400 | |||
1401 | /* FIXME | ||
1402 | if (d->i2oversion == 0x02) | ||
1403 | { | ||
1404 | */ | ||
1405 | seq_printf(seq, "SGL data alignment : %d\n", work16[8]); | ||
1406 | seq_printf(seq, "SGL addr limit : %d\n", work8[20]); | ||
1407 | seq_printf(seq, "SGL addr sizes supported : "); | ||
1408 | if (work8[21] & 0x01) | ||
1409 | seq_printf(seq, "32 bit "); | ||
1410 | if (work8[21] & 0x02) | ||
1411 | seq_printf(seq, "64 bit "); | ||
1412 | if (work8[21] & 0x04) | ||
1413 | seq_printf(seq, "96 bit "); | ||
1414 | if (work8[21] & 0x08) | ||
1415 | seq_printf(seq, "128 bit "); | ||
1416 | seq_printf(seq, "\n"); | ||
1417 | /* | ||
1418 | } | ||
1419 | */ | ||
1420 | |||
1421 | return 0; | ||
1422 | } | ||
1423 | |||
1424 | /* Generic group F200h - Sensors (scalar) */ | ||
1425 | static int i2o_seq_show_sensors(struct seq_file *seq, void *v) | ||
1426 | { | ||
1427 | struct i2o_device *d = (struct i2o_device *)seq->private; | ||
1428 | int token; | ||
1429 | |||
1430 | struct { | ||
1431 | u16 sensor_instance; | ||
1432 | u8 component; | ||
1433 | u16 component_instance; | ||
1434 | u8 sensor_class; | ||
1435 | u8 sensor_type; | ||
1436 | u8 scaling_exponent; | ||
1437 | u32 actual_reading; | ||
1438 | u32 minimum_reading; | ||
1439 | u32 low2lowcat_treshold; | ||
1440 | u32 lowcat2low_treshold; | ||
1441 | u32 lowwarn2low_treshold; | ||
1442 | u32 low2lowwarn_treshold; | ||
1443 | u32 norm2lowwarn_treshold; | ||
1444 | u32 lowwarn2norm_treshold; | ||
1445 | u32 nominal_reading; | ||
1446 | u32 hiwarn2norm_treshold; | ||
1447 | u32 norm2hiwarn_treshold; | ||
1448 | u32 high2hiwarn_treshold; | ||
1449 | u32 hiwarn2high_treshold; | ||
1450 | u32 hicat2high_treshold; | ||
1451 | u32 hi2hicat_treshold; | ||
1452 | u32 maximum_reading; | ||
1453 | u8 sensor_state; | ||
1454 | u16 event_enable; | ||
1455 | } result; | ||
1456 | |||
1457 | token = i2o_parm_field_get(d, 0xF200, -1, &result, sizeof(result)); | ||
1458 | |||
1459 | if (token < 0) { | ||
1460 | i2o_report_query_status(seq, token, | ||
1461 | "0xF200 Sensors (optional)"); | ||
1462 | return 0; | ||
1463 | } | ||
1464 | |||
1465 | seq_printf(seq, "Sensor instance : %d\n", result.sensor_instance); | ||
1466 | |||
1467 | seq_printf(seq, "Component : %d = ", result.component); | ||
1468 | switch (result.component) { | ||
1469 | case 0: | ||
1470 | seq_printf(seq, "Other"); | ||
1471 | break; | ||
1472 | case 1: | ||
1473 | seq_printf(seq, "Planar logic Board"); | ||
1474 | break; | ||
1475 | case 2: | ||
1476 | seq_printf(seq, "CPU"); | ||
1477 | break; | ||
1478 | case 3: | ||
1479 | seq_printf(seq, "Chassis"); | ||
1480 | break; | ||
1481 | case 4: | ||
1482 | seq_printf(seq, "Power Supply"); | ||
1483 | break; | ||
1484 | case 5: | ||
1485 | seq_printf(seq, "Storage"); | ||
1486 | break; | ||
1487 | case 6: | ||
1488 | seq_printf(seq, "External"); | ||
1489 | break; | ||
1490 | } | ||
1491 | seq_printf(seq, "\n"); | ||
1492 | |||
1493 | seq_printf(seq, "Component instance : %d\n", | ||
1494 | result.component_instance); | ||
1495 | seq_printf(seq, "Sensor class : %s\n", | ||
1496 | result.sensor_class ? "Analog" : "Digital"); | ||
1497 | |||
1498 | seq_printf(seq, "Sensor type : %d = ", result.sensor_type); | ||
1499 | switch (result.sensor_type) { | ||
1500 | case 0: | ||
1501 | seq_printf(seq, "Other\n"); | ||
1502 | break; | ||
1503 | case 1: | ||
1504 | seq_printf(seq, "Thermal\n"); | ||
1505 | break; | ||
1506 | case 2: | ||
1507 | seq_printf(seq, "DC voltage (DC volts)\n"); | ||
1508 | break; | ||
1509 | case 3: | ||
1510 | seq_printf(seq, "AC voltage (AC volts)\n"); | ||
1511 | break; | ||
1512 | case 4: | ||
1513 | seq_printf(seq, "DC current (DC amps)\n"); | ||
1514 | break; | ||
1515 | case 5: | ||
1516 | seq_printf(seq, "AC current (AC volts)\n"); | ||
1517 | break; | ||
1518 | case 6: | ||
1519 | seq_printf(seq, "Door open\n"); | ||
1520 | break; | ||
1521 | case 7: | ||
1522 | seq_printf(seq, "Fan operational\n"); | ||
1523 | break; | ||
1524 | } | ||
1525 | |||
1526 | seq_printf(seq, "Scaling exponent : %d\n", | ||
1527 | result.scaling_exponent); | ||
1528 | seq_printf(seq, "Actual reading : %d\n", result.actual_reading); | ||
1529 | seq_printf(seq, "Minimum reading : %d\n", result.minimum_reading); | ||
1530 | seq_printf(seq, "Low2LowCat treshold : %d\n", | ||
1531 | result.low2lowcat_treshold); | ||
1532 | seq_printf(seq, "LowCat2Low treshold : %d\n", | ||
1533 | result.lowcat2low_treshold); | ||
1534 | seq_printf(seq, "LowWarn2Low treshold : %d\n", | ||
1535 | result.lowwarn2low_treshold); | ||
1536 | seq_printf(seq, "Low2LowWarn treshold : %d\n", | ||
1537 | result.low2lowwarn_treshold); | ||
1538 | seq_printf(seq, "Norm2LowWarn treshold : %d\n", | ||
1539 | result.norm2lowwarn_treshold); | ||
1540 | seq_printf(seq, "LowWarn2Norm treshold : %d\n", | ||
1541 | result.lowwarn2norm_treshold); | ||
1542 | seq_printf(seq, "Nominal reading : %d\n", result.nominal_reading); | ||
1543 | seq_printf(seq, "HiWarn2Norm treshold : %d\n", | ||
1544 | result.hiwarn2norm_treshold); | ||
1545 | seq_printf(seq, "Norm2HiWarn treshold : %d\n", | ||
1546 | result.norm2hiwarn_treshold); | ||
1547 | seq_printf(seq, "High2HiWarn treshold : %d\n", | ||
1548 | result.high2hiwarn_treshold); | ||
1549 | seq_printf(seq, "HiWarn2High treshold : %d\n", | ||
1550 | result.hiwarn2high_treshold); | ||
1551 | seq_printf(seq, "HiCat2High treshold : %d\n", | ||
1552 | result.hicat2high_treshold); | ||
1553 | seq_printf(seq, "High2HiCat treshold : %d\n", | ||
1554 | result.hi2hicat_treshold); | ||
1555 | seq_printf(seq, "Maximum reading : %d\n", result.maximum_reading); | ||
1556 | |||
1557 | seq_printf(seq, "Sensor state : %d = ", result.sensor_state); | ||
1558 | switch (result.sensor_state) { | ||
1559 | case 0: | ||
1560 | seq_printf(seq, "Normal\n"); | ||
1561 | break; | ||
1562 | case 1: | ||
1563 | seq_printf(seq, "Abnormal\n"); | ||
1564 | break; | ||
1565 | case 2: | ||
1566 | seq_printf(seq, "Unknown\n"); | ||
1567 | break; | ||
1568 | case 3: | ||
1569 | seq_printf(seq, "Low Catastrophic (LoCat)\n"); | ||
1570 | break; | ||
1571 | case 4: | ||
1572 | seq_printf(seq, "Low (Low)\n"); | ||
1573 | break; | ||
1574 | case 5: | ||
1575 | seq_printf(seq, "Low Warning (LoWarn)\n"); | ||
1576 | break; | ||
1577 | case 6: | ||
1578 | seq_printf(seq, "High Warning (HiWarn)\n"); | ||
1579 | break; | ||
1580 | case 7: | ||
1581 | seq_printf(seq, "High (High)\n"); | ||
1582 | break; | ||
1583 | case 8: | ||
1584 | seq_printf(seq, "High Catastrophic (HiCat)\n"); | ||
1585 | break; | ||
1586 | } | ||
1587 | |||
1588 | seq_printf(seq, "Event_enable : 0x%02X\n", result.event_enable); | ||
1589 | seq_printf(seq, " [%s] Operational state change. \n", | ||
1590 | (result.event_enable & 0x01) ? "+" : "-"); | ||
1591 | seq_printf(seq, " [%s] Low catastrophic. \n", | ||
1592 | (result.event_enable & 0x02) ? "+" : "-"); | ||
1593 | seq_printf(seq, " [%s] Low reading. \n", | ||
1594 | (result.event_enable & 0x04) ? "+" : "-"); | ||
1595 | seq_printf(seq, " [%s] Low warning. \n", | ||
1596 | (result.event_enable & 0x08) ? "+" : "-"); | ||
1597 | seq_printf(seq, | ||
1598 | " [%s] Change back to normal from out of range state. \n", | ||
1599 | (result.event_enable & 0x10) ? "+" : "-"); | ||
1600 | seq_printf(seq, " [%s] High warning. \n", | ||
1601 | (result.event_enable & 0x20) ? "+" : "-"); | ||
1602 | seq_printf(seq, " [%s] High reading. \n", | ||
1603 | (result.event_enable & 0x40) ? "+" : "-"); | ||
1604 | seq_printf(seq, " [%s] High catastrophic. \n", | ||
1605 | (result.event_enable & 0x80) ? "+" : "-"); | ||
1606 | |||
1607 | return 0; | ||
1608 | } | ||
1609 | |||
1610 | static int i2o_seq_open_hrt(struct inode *inode, struct file *file) | ||
1611 | { | ||
1612 | return single_open(file, i2o_seq_show_hrt, PDE(inode)->data); | ||
1613 | }; | ||
1614 | |||
1615 | static int i2o_seq_open_lct(struct inode *inode, struct file *file) | ||
1616 | { | ||
1617 | return single_open(file, i2o_seq_show_lct, PDE(inode)->data); | ||
1618 | }; | ||
1619 | |||
1620 | static int i2o_seq_open_status(struct inode *inode, struct file *file) | ||
1621 | { | ||
1622 | return single_open(file, i2o_seq_show_status, PDE(inode)->data); | ||
1623 | }; | ||
1624 | |||
1625 | static int i2o_seq_open_hw(struct inode *inode, struct file *file) | ||
1626 | { | ||
1627 | return single_open(file, i2o_seq_show_hw, PDE(inode)->data); | ||
1628 | }; | ||
1629 | |||
1630 | static int i2o_seq_open_ddm_table(struct inode *inode, struct file *file) | ||
1631 | { | ||
1632 | return single_open(file, i2o_seq_show_ddm_table, PDE(inode)->data); | ||
1633 | }; | ||
1634 | |||
1635 | static int i2o_seq_open_driver_store(struct inode *inode, struct file *file) | ||
1636 | { | ||
1637 | return single_open(file, i2o_seq_show_driver_store, PDE(inode)->data); | ||
1638 | }; | ||
1639 | |||
1640 | static int i2o_seq_open_drivers_stored(struct inode *inode, struct file *file) | ||
1641 | { | ||
1642 | return single_open(file, i2o_seq_show_drivers_stored, PDE(inode)->data); | ||
1643 | }; | ||
1644 | |||
1645 | static int i2o_seq_open_groups(struct inode *inode, struct file *file) | ||
1646 | { | ||
1647 | return single_open(file, i2o_seq_show_groups, PDE(inode)->data); | ||
1648 | }; | ||
1649 | |||
1650 | static int i2o_seq_open_phys_device(struct inode *inode, struct file *file) | ||
1651 | { | ||
1652 | return single_open(file, i2o_seq_show_phys_device, PDE(inode)->data); | ||
1653 | }; | ||
1654 | |||
1655 | static int i2o_seq_open_claimed(struct inode *inode, struct file *file) | ||
1656 | { | ||
1657 | return single_open(file, i2o_seq_show_claimed, PDE(inode)->data); | ||
1658 | }; | ||
1659 | |||
1660 | static int i2o_seq_open_users(struct inode *inode, struct file *file) | ||
1661 | { | ||
1662 | return single_open(file, i2o_seq_show_users, PDE(inode)->data); | ||
1663 | }; | ||
1664 | |||
1665 | static int i2o_seq_open_priv_msgs(struct inode *inode, struct file *file) | ||
1666 | { | ||
1667 | return single_open(file, i2o_seq_show_priv_msgs, PDE(inode)->data); | ||
1668 | }; | ||
1669 | |||
1670 | static int i2o_seq_open_authorized_users(struct inode *inode, struct file *file) | ||
1671 | { | ||
1672 | return single_open(file, i2o_seq_show_authorized_users, | ||
1673 | PDE(inode)->data); | ||
1674 | }; | ||
1675 | |||
1676 | static int i2o_seq_open_dev_identity(struct inode *inode, struct file *file) | ||
1677 | { | ||
1678 | return single_open(file, i2o_seq_show_dev_identity, PDE(inode)->data); | ||
1679 | }; | ||
1680 | |||
1681 | static int i2o_seq_open_ddm_identity(struct inode *inode, struct file *file) | ||
1682 | { | ||
1683 | return single_open(file, i2o_seq_show_ddm_identity, PDE(inode)->data); | ||
1684 | }; | ||
1685 | |||
1686 | static int i2o_seq_open_uinfo(struct inode *inode, struct file *file) | ||
1687 | { | ||
1688 | return single_open(file, i2o_seq_show_uinfo, PDE(inode)->data); | ||
1689 | }; | ||
1690 | |||
1691 | static int i2o_seq_open_sgl_limits(struct inode *inode, struct file *file) | ||
1692 | { | ||
1693 | return single_open(file, i2o_seq_show_sgl_limits, PDE(inode)->data); | ||
1694 | }; | ||
1695 | |||
1696 | static int i2o_seq_open_sensors(struct inode *inode, struct file *file) | ||
1697 | { | ||
1698 | return single_open(file, i2o_seq_show_sensors, PDE(inode)->data); | ||
1699 | }; | ||
1700 | |||
1701 | static int i2o_seq_open_dev_name(struct inode *inode, struct file *file) | ||
1702 | { | ||
1703 | return single_open(file, i2o_seq_show_dev_name, PDE(inode)->data); | ||
1704 | }; | ||
1705 | |||
1706 | static struct file_operations i2o_seq_fops_lct = { | ||
1707 | .open = i2o_seq_open_lct, | ||
1708 | .read = seq_read, | ||
1709 | .llseek = seq_lseek, | ||
1710 | .release = single_release, | ||
1711 | }; | ||
1712 | |||
1713 | static struct file_operations i2o_seq_fops_hrt = { | ||
1714 | .open = i2o_seq_open_hrt, | ||
1715 | .read = seq_read, | ||
1716 | .llseek = seq_lseek, | ||
1717 | .release = single_release, | ||
1718 | }; | ||
1719 | |||
1720 | static struct file_operations i2o_seq_fops_status = { | ||
1721 | .open = i2o_seq_open_status, | ||
1722 | .read = seq_read, | ||
1723 | .llseek = seq_lseek, | ||
1724 | .release = single_release, | ||
1725 | }; | ||
1726 | |||
1727 | static struct file_operations i2o_seq_fops_hw = { | ||
1728 | .open = i2o_seq_open_hw, | ||
1729 | .read = seq_read, | ||
1730 | .llseek = seq_lseek, | ||
1731 | .release = single_release, | ||
1732 | }; | ||
1733 | |||
1734 | static struct file_operations i2o_seq_fops_ddm_table = { | ||
1735 | .open = i2o_seq_open_ddm_table, | ||
1736 | .read = seq_read, | ||
1737 | .llseek = seq_lseek, | ||
1738 | .release = single_release, | ||
1739 | }; | ||
1740 | |||
1741 | static struct file_operations i2o_seq_fops_driver_store = { | ||
1742 | .open = i2o_seq_open_driver_store, | ||
1743 | .read = seq_read, | ||
1744 | .llseek = seq_lseek, | ||
1745 | .release = single_release, | ||
1746 | }; | ||
1747 | |||
1748 | static struct file_operations i2o_seq_fops_drivers_stored = { | ||
1749 | .open = i2o_seq_open_drivers_stored, | ||
1750 | .read = seq_read, | ||
1751 | .llseek = seq_lseek, | ||
1752 | .release = single_release, | ||
1753 | }; | ||
1754 | |||
1755 | static struct file_operations i2o_seq_fops_groups = { | ||
1756 | .open = i2o_seq_open_groups, | ||
1757 | .read = seq_read, | ||
1758 | .llseek = seq_lseek, | ||
1759 | .release = single_release, | ||
1760 | }; | ||
1761 | |||
1762 | static struct file_operations i2o_seq_fops_phys_device = { | ||
1763 | .open = i2o_seq_open_phys_device, | ||
1764 | .read = seq_read, | ||
1765 | .llseek = seq_lseek, | ||
1766 | .release = single_release, | ||
1767 | }; | ||
1768 | |||
1769 | static struct file_operations i2o_seq_fops_claimed = { | ||
1770 | .open = i2o_seq_open_claimed, | ||
1771 | .read = seq_read, | ||
1772 | .llseek = seq_lseek, | ||
1773 | .release = single_release, | ||
1774 | }; | ||
1775 | |||
1776 | static struct file_operations i2o_seq_fops_users = { | ||
1777 | .open = i2o_seq_open_users, | ||
1778 | .read = seq_read, | ||
1779 | .llseek = seq_lseek, | ||
1780 | .release = single_release, | ||
1781 | }; | ||
1782 | |||
1783 | static struct file_operations i2o_seq_fops_priv_msgs = { | ||
1784 | .open = i2o_seq_open_priv_msgs, | ||
1785 | .read = seq_read, | ||
1786 | .llseek = seq_lseek, | ||
1787 | .release = single_release, | ||
1788 | }; | ||
1789 | |||
1790 | static struct file_operations i2o_seq_fops_authorized_users = { | ||
1791 | .open = i2o_seq_open_authorized_users, | ||
1792 | .read = seq_read, | ||
1793 | .llseek = seq_lseek, | ||
1794 | .release = single_release, | ||
1795 | }; | ||
1796 | |||
1797 | static struct file_operations i2o_seq_fops_dev_name = { | ||
1798 | .open = i2o_seq_open_dev_name, | ||
1799 | .read = seq_read, | ||
1800 | .llseek = seq_lseek, | ||
1801 | .release = single_release, | ||
1802 | }; | ||
1803 | |||
1804 | static struct file_operations i2o_seq_fops_dev_identity = { | ||
1805 | .open = i2o_seq_open_dev_identity, | ||
1806 | .read = seq_read, | ||
1807 | .llseek = seq_lseek, | ||
1808 | .release = single_release, | ||
1809 | }; | ||
1810 | |||
1811 | static struct file_operations i2o_seq_fops_ddm_identity = { | ||
1812 | .open = i2o_seq_open_ddm_identity, | ||
1813 | .read = seq_read, | ||
1814 | .llseek = seq_lseek, | ||
1815 | .release = single_release, | ||
1816 | }; | ||
1817 | |||
1818 | static struct file_operations i2o_seq_fops_uinfo = { | ||
1819 | .open = i2o_seq_open_uinfo, | ||
1820 | .read = seq_read, | ||
1821 | .llseek = seq_lseek, | ||
1822 | .release = single_release, | ||
1823 | }; | ||
1824 | |||
1825 | static struct file_operations i2o_seq_fops_sgl_limits = { | ||
1826 | .open = i2o_seq_open_sgl_limits, | ||
1827 | .read = seq_read, | ||
1828 | .llseek = seq_lseek, | ||
1829 | .release = single_release, | ||
1830 | }; | ||
1831 | |||
1832 | static struct file_operations i2o_seq_fops_sensors = { | ||
1833 | .open = i2o_seq_open_sensors, | ||
1834 | .read = seq_read, | ||
1835 | .llseek = seq_lseek, | ||
1836 | .release = single_release, | ||
1837 | }; | ||
1838 | |||
1839 | /* | ||
1840 | * IOP specific entries...write field just in case someone | ||
1841 | * ever wants one. | ||
1842 | */ | ||
1843 | static i2o_proc_entry i2o_proc_generic_iop_entries[] = { | ||
1844 | {"hrt", S_IFREG | S_IRUGO, &i2o_seq_fops_hrt}, | ||
1845 | {"lct", S_IFREG | S_IRUGO, &i2o_seq_fops_lct}, | ||
1846 | {"status", S_IFREG | S_IRUGO, &i2o_seq_fops_status}, | ||
1847 | {"hw", S_IFREG | S_IRUGO, &i2o_seq_fops_hw}, | ||
1848 | {"ddm_table", S_IFREG | S_IRUGO, &i2o_seq_fops_ddm_table}, | ||
1849 | {"driver_store", S_IFREG | S_IRUGO, &i2o_seq_fops_driver_store}, | ||
1850 | {"drivers_stored", S_IFREG | S_IRUGO, &i2o_seq_fops_drivers_stored}, | ||
1851 | {NULL, 0, NULL} | ||
1852 | }; | ||
1853 | |||
1854 | /* | ||
1855 | * Device specific entries | ||
1856 | */ | ||
1857 | static i2o_proc_entry generic_dev_entries[] = { | ||
1858 | {"groups", S_IFREG | S_IRUGO, &i2o_seq_fops_groups}, | ||
1859 | {"phys_dev", S_IFREG | S_IRUGO, &i2o_seq_fops_phys_device}, | ||
1860 | {"claimed", S_IFREG | S_IRUGO, &i2o_seq_fops_claimed}, | ||
1861 | {"users", S_IFREG | S_IRUGO, &i2o_seq_fops_users}, | ||
1862 | {"priv_msgs", S_IFREG | S_IRUGO, &i2o_seq_fops_priv_msgs}, | ||
1863 | {"authorized_users", S_IFREG | S_IRUGO, &i2o_seq_fops_authorized_users}, | ||
1864 | {"dev_identity", S_IFREG | S_IRUGO, &i2o_seq_fops_dev_identity}, | ||
1865 | {"ddm_identity", S_IFREG | S_IRUGO, &i2o_seq_fops_ddm_identity}, | ||
1866 | {"user_info", S_IFREG | S_IRUGO, &i2o_seq_fops_uinfo}, | ||
1867 | {"sgl_limits", S_IFREG | S_IRUGO, &i2o_seq_fops_sgl_limits}, | ||
1868 | {"sensors", S_IFREG | S_IRUGO, &i2o_seq_fops_sensors}, | ||
1869 | {NULL, 0, NULL} | ||
1870 | }; | ||
1871 | |||
1872 | /* | ||
1873 | * Storage unit specific entries (SCSI Periph, BS) with device names | ||
1874 | */ | ||
1875 | static i2o_proc_entry rbs_dev_entries[] = { | ||
1876 | {"dev_name", S_IFREG | S_IRUGO, &i2o_seq_fops_dev_name}, | ||
1877 | {NULL, 0, NULL} | ||
1878 | }; | ||
1879 | |||
1880 | /** | ||
1881 | * i2o_proc_create_entries - Creates proc dir entries | ||
1882 | * @dir: proc dir entry under which the entries should be placed | ||
1883 | * @i2o_pe: pointer to the entries which should be added | ||
1884 | * @data: pointer to I2O controller or device | ||
1885 | * | ||
1886 | * Create proc dir entries for a I2O controller or I2O device. | ||
1887 | * | ||
1888 | * Returns 0 on success or negative error code on failure. | ||
1889 | */ | ||
1890 | static int i2o_proc_create_entries(struct proc_dir_entry *dir, | ||
1891 | i2o_proc_entry * i2o_pe, void *data) | ||
1892 | { | ||
1893 | struct proc_dir_entry *tmp; | ||
1894 | |||
1895 | while (i2o_pe->name) { | ||
1896 | tmp = create_proc_entry(i2o_pe->name, i2o_pe->mode, dir); | ||
1897 | if (!tmp) | ||
1898 | return -1; | ||
1899 | |||
1900 | tmp->data = data; | ||
1901 | tmp->proc_fops = i2o_pe->fops; | ||
1902 | |||
1903 | i2o_pe++; | ||
1904 | } | ||
1905 | |||
1906 | return 0; | ||
1907 | } | ||
1908 | |||
1909 | /** | ||
1910 | * i2o_proc_subdir_remove - Remove child entries from a proc entry | ||
1911 | * @dir: proc dir entry from which the childs should be removed | ||
1912 | * | ||
1913 | * Iterate over each i2o proc entry under dir and remove it. If the child | ||
1914 | * also has entries, remove them too. | ||
1915 | */ | ||
1916 | static void i2o_proc_subdir_remove(struct proc_dir_entry *dir) | ||
1917 | { | ||
1918 | struct proc_dir_entry *pe, *tmp; | ||
1919 | pe = dir->subdir; | ||
1920 | while (pe) { | ||
1921 | tmp = pe->next; | ||
1922 | i2o_proc_subdir_remove(pe); | ||
1923 | remove_proc_entry(pe->name, dir); | ||
1924 | pe = tmp; | ||
1925 | } | ||
1926 | }; | ||
1927 | |||
1928 | /** | ||
1929 | * i2o_proc_device_add - Add an I2O device to the proc dir | ||
1930 | * @dir: proc dir entry to which the device should be added | ||
1931 | * @dev: I2O device which should be added | ||
1932 | * | ||
1933 | * Add an I2O device to the proc dir entry dir and create the entries for | ||
1934 | * the device depending on the class of the I2O device. | ||
1935 | */ | ||
1936 | static void i2o_proc_device_add(struct proc_dir_entry *dir, | ||
1937 | struct i2o_device *dev) | ||
1938 | { | ||
1939 | char buff[10]; | ||
1940 | struct proc_dir_entry *devdir; | ||
1941 | i2o_proc_entry *i2o_pe = NULL; | ||
1942 | |||
1943 | sprintf(buff, "%03x", dev->lct_data.tid); | ||
1944 | |||
1945 | osm_debug("adding device /proc/i2o/%s/%s\n", dev->iop->name, buff); | ||
1946 | |||
1947 | devdir = proc_mkdir(buff, dir); | ||
1948 | if (!devdir) { | ||
1949 | osm_warn("Could not allocate procdir!\n"); | ||
1950 | return; | ||
1951 | } | ||
1952 | |||
1953 | devdir->data = dev; | ||
1954 | |||
1955 | i2o_proc_create_entries(devdir, generic_dev_entries, dev); | ||
1956 | |||
1957 | /* Inform core that we want updates about this device's status */ | ||
1958 | switch (dev->lct_data.class_id) { | ||
1959 | case I2O_CLASS_SCSI_PERIPHERAL: | ||
1960 | case I2O_CLASS_RANDOM_BLOCK_STORAGE: | ||
1961 | i2o_pe = rbs_dev_entries; | ||
1962 | break; | ||
1963 | default: | ||
1964 | break; | ||
1965 | } | ||
1966 | if (i2o_pe) | ||
1967 | i2o_proc_create_entries(devdir, i2o_pe, dev); | ||
1968 | } | ||
1969 | |||
1970 | /** | ||
1971 | * i2o_proc_iop_add - Add an I2O controller to the i2o proc tree | ||
1972 | * @dir: parent proc dir entry | ||
1973 | * @c: I2O controller which should be added | ||
1974 | * | ||
1975 | * Add the entries to the parent proc dir entry. Also each device is added | ||
1976 | * to the controllers proc dir entry. | ||
1977 | * | ||
1978 | * Returns 0 on success or negative error code on failure. | ||
1979 | */ | ||
1980 | static int i2o_proc_iop_add(struct proc_dir_entry *dir, | ||
1981 | struct i2o_controller *c) | ||
1982 | { | ||
1983 | struct proc_dir_entry *iopdir; | ||
1984 | struct i2o_device *dev; | ||
1985 | |||
1986 | osm_debug("adding IOP /proc/i2o/%s\n", c->name); | ||
1987 | |||
1988 | iopdir = proc_mkdir(c->name, dir); | ||
1989 | if (!iopdir) | ||
1990 | return -1; | ||
1991 | |||
1992 | iopdir->data = c; | ||
1993 | |||
1994 | i2o_proc_create_entries(iopdir, i2o_proc_generic_iop_entries, c); | ||
1995 | |||
1996 | list_for_each_entry(dev, &c->devices, list) | ||
1997 | i2o_proc_device_add(iopdir, dev); | ||
1998 | |||
1999 | return 0; | ||
2000 | } | ||
2001 | |||
2002 | /** | ||
2003 | * i2o_proc_iop_remove - Removes an I2O controller from the i2o proc tree | ||
2004 | * @dir: parent proc dir entry | ||
2005 | * @c: I2O controller which should be removed | ||
2006 | * | ||
2007 | * Iterate over each i2o proc entry and search controller c. If it is found | ||
2008 | * remove it from the tree. | ||
2009 | */ | ||
2010 | static void i2o_proc_iop_remove(struct proc_dir_entry *dir, | ||
2011 | struct i2o_controller *c) | ||
2012 | { | ||
2013 | struct proc_dir_entry *pe, *tmp; | ||
2014 | |||
2015 | pe = dir->subdir; | ||
2016 | while (pe) { | ||
2017 | tmp = pe->next; | ||
2018 | if (pe->data == c) { | ||
2019 | i2o_proc_subdir_remove(pe); | ||
2020 | remove_proc_entry(pe->name, dir); | ||
2021 | } | ||
2022 | osm_debug("removing IOP /proc/i2o/%s\n", c->name); | ||
2023 | pe = tmp; | ||
2024 | } | ||
2025 | } | ||
2026 | |||
2027 | /** | ||
2028 | * i2o_proc_fs_create - Create the i2o proc fs. | ||
2029 | * | ||
2030 | * Iterate over each I2O controller and create the entries for it. | ||
2031 | * | ||
2032 | * Returns 0 on success or negative error code on failure. | ||
2033 | */ | ||
2034 | static int __init i2o_proc_fs_create(void) | ||
2035 | { | ||
2036 | struct i2o_controller *c; | ||
2037 | |||
2038 | i2o_proc_dir_root = proc_mkdir("i2o", NULL); | ||
2039 | if (!i2o_proc_dir_root) | ||
2040 | return -1; | ||
2041 | |||
2042 | i2o_proc_dir_root->owner = THIS_MODULE; | ||
2043 | |||
2044 | list_for_each_entry(c, &i2o_controllers, list) | ||
2045 | i2o_proc_iop_add(i2o_proc_dir_root, c); | ||
2046 | |||
2047 | return 0; | ||
2048 | }; | ||
2049 | |||
2050 | /** | ||
2051 | * i2o_proc_fs_destroy - Cleanup the all i2o proc entries | ||
2052 | * | ||
2053 | * Iterate over each I2O controller and remove the entries for it. | ||
2054 | * | ||
2055 | * Returns 0 on success or negative error code on failure. | ||
2056 | */ | ||
2057 | static int __exit i2o_proc_fs_destroy(void) | ||
2058 | { | ||
2059 | struct i2o_controller *c; | ||
2060 | |||
2061 | list_for_each_entry(c, &i2o_controllers, list) | ||
2062 | i2o_proc_iop_remove(i2o_proc_dir_root, c); | ||
2063 | |||
2064 | remove_proc_entry("i2o", NULL); | ||
2065 | |||
2066 | return 0; | ||
2067 | }; | ||
2068 | |||
2069 | /** | ||
2070 | * i2o_proc_init - Init function for procfs | ||
2071 | * | ||
2072 | * Registers Proc OSM and creates procfs entries. | ||
2073 | * | ||
2074 | * Returns 0 on success or negative error code on failure. | ||
2075 | */ | ||
2076 | static int __init i2o_proc_init(void) | ||
2077 | { | ||
2078 | int rc; | ||
2079 | |||
2080 | printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); | ||
2081 | |||
2082 | rc = i2o_driver_register(&i2o_proc_driver); | ||
2083 | if (rc) | ||
2084 | return rc; | ||
2085 | |||
2086 | rc = i2o_proc_fs_create(); | ||
2087 | if (rc) { | ||
2088 | i2o_driver_unregister(&i2o_proc_driver); | ||
2089 | return rc; | ||
2090 | } | ||
2091 | |||
2092 | return 0; | ||
2093 | }; | ||
2094 | |||
2095 | /** | ||
2096 | * i2o_proc_exit - Exit function for procfs | ||
2097 | * | ||
2098 | * Unregisters Proc OSM and removes procfs entries. | ||
2099 | */ | ||
2100 | static void __exit i2o_proc_exit(void) | ||
2101 | { | ||
2102 | i2o_driver_unregister(&i2o_proc_driver); | ||
2103 | i2o_proc_fs_destroy(); | ||
2104 | }; | ||
2105 | |||
2106 | MODULE_AUTHOR("Deepak Saxena"); | ||
2107 | MODULE_LICENSE("GPL"); | ||
2108 | MODULE_DESCRIPTION(OSM_DESCRIPTION); | ||
2109 | MODULE_VERSION(OSM_VERSION); | ||
2110 | |||
2111 | module_init(i2o_proc_init); | ||
2112 | module_exit(i2o_proc_exit); | ||
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c new file mode 100644 index 000000000000..43f5875e0be5 --- /dev/null +++ b/drivers/message/i2o/i2o_scsi.c | |||
@@ -0,0 +1,830 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License as published by the | ||
4 | * Free Software Foundation; either version 2, or (at your option) any | ||
5 | * later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, but | ||
8 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
10 | * General Public License for more details. | ||
11 | * | ||
12 | * For the avoidance of doubt the "preferred form" of this code is one which | ||
13 | * is in an open non patent encumbered format. Where cryptographic key signing | ||
14 | * forms part of the process of creating an executable the information | ||
15 | * including keys needed to generate an equivalently functional executable | ||
16 | * are deemed to be part of the source code. | ||
17 | * | ||
18 | * Complications for I2O scsi | ||
19 | * | ||
20 | * o Each (bus,lun) is a logical device in I2O. We keep a map | ||
21 | * table. We spoof failed selection for unmapped units | ||
22 | * o Request sense buffers can come back for free. | ||
23 | * o Scatter gather is a bit dynamic. We have to investigate at | ||
24 | * setup time. | ||
25 | * o Some of our resources are dynamically shared. The i2o core | ||
26 | * needs a message reservation protocol to avoid swap v net | ||
27 | * deadlocking. We need to back off queue requests. | ||
28 | * | ||
29 | * In general the firmware wants to help. Where its help isn't performance | ||
30 | * useful we just ignore the aid. Its not worth the code in truth. | ||
31 | * | ||
32 | * Fixes/additions: | ||
33 | * Steve Ralston: | ||
34 | * Scatter gather now works | ||
35 | * Markus Lidel <Markus.Lidel@shadowconnect.com>: | ||
36 | * Minor fixes for 2.6. | ||
37 | * | ||
38 | * To Do: | ||
39 | * 64bit cleanups | ||
40 | * Fix the resource management problems. | ||
41 | */ | ||
42 | |||
43 | #include <linux/module.h> | ||
44 | #include <linux/kernel.h> | ||
45 | #include <linux/types.h> | ||
46 | #include <linux/string.h> | ||
47 | #include <linux/ioport.h> | ||
48 | #include <linux/jiffies.h> | ||
49 | #include <linux/interrupt.h> | ||
50 | #include <linux/timer.h> | ||
51 | #include <linux/delay.h> | ||
52 | #include <linux/proc_fs.h> | ||
53 | #include <linux/prefetch.h> | ||
54 | #include <linux/pci.h> | ||
55 | #include <linux/blkdev.h> | ||
56 | #include <linux/i2o.h> | ||
57 | |||
58 | #include <asm/dma.h> | ||
59 | #include <asm/system.h> | ||
60 | #include <asm/io.h> | ||
61 | #include <asm/atomic.h> | ||
62 | |||
63 | #include <scsi/scsi.h> | ||
64 | #include <scsi/scsi_host.h> | ||
65 | #include <scsi/scsi_device.h> | ||
66 | #include <scsi/scsi_cmnd.h> | ||
67 | |||
68 | #define OSM_NAME "scsi-osm" | ||
69 | #define OSM_VERSION "$Rev$" | ||
70 | #define OSM_DESCRIPTION "I2O SCSI Peripheral OSM" | ||
71 | |||
72 | static struct i2o_driver i2o_scsi_driver; | ||
73 | |||
74 | static int i2o_scsi_max_id = 16; | ||
75 | static int i2o_scsi_max_lun = 8; | ||
76 | |||
77 | struct i2o_scsi_host { | ||
78 | struct Scsi_Host *scsi_host; /* pointer to the SCSI host */ | ||
79 | struct i2o_controller *iop; /* pointer to the I2O controller */ | ||
80 | struct i2o_device *channel[0]; /* channel->i2o_dev mapping table */ | ||
81 | }; | ||
82 | |||
83 | static struct scsi_host_template i2o_scsi_host_template; | ||
84 | |||
85 | #define I2O_SCSI_CAN_QUEUE 4 | ||
86 | |||
87 | /* SCSI OSM class handling definition */ | ||
88 | static struct i2o_class_id i2o_scsi_class_id[] = { | ||
89 | {I2O_CLASS_SCSI_PERIPHERAL}, | ||
90 | {I2O_CLASS_END} | ||
91 | }; | ||
92 | |||
93 | static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c) | ||
94 | { | ||
95 | struct i2o_scsi_host *i2o_shost; | ||
96 | struct i2o_device *i2o_dev; | ||
97 | struct Scsi_Host *scsi_host; | ||
98 | int max_channel = 0; | ||
99 | u8 type; | ||
100 | int i; | ||
101 | size_t size; | ||
102 | i2o_status_block *sb; | ||
103 | |||
104 | list_for_each_entry(i2o_dev, &c->devices, list) | ||
105 | if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT) { | ||
106 | if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) || (type == 1)) /* SCSI bus */ | ||
107 | max_channel++; | ||
108 | } | ||
109 | |||
110 | if (!max_channel) { | ||
111 | osm_warn("no channels found on %s\n", c->name); | ||
112 | return ERR_PTR(-EFAULT); | ||
113 | } | ||
114 | |||
115 | size = max_channel * sizeof(struct i2o_device *) | ||
116 | + sizeof(struct i2o_scsi_host); | ||
117 | |||
118 | scsi_host = scsi_host_alloc(&i2o_scsi_host_template, size); | ||
119 | if (!scsi_host) { | ||
120 | osm_warn("Could not allocate SCSI host\n"); | ||
121 | return ERR_PTR(-ENOMEM); | ||
122 | } | ||
123 | |||
124 | scsi_host->max_channel = max_channel - 1; | ||
125 | scsi_host->max_id = i2o_scsi_max_id; | ||
126 | scsi_host->max_lun = i2o_scsi_max_lun; | ||
127 | scsi_host->this_id = c->unit; | ||
128 | |||
129 | sb = c->status_block.virt; | ||
130 | |||
131 | scsi_host->sg_tablesize = (sb->inbound_frame_size - | ||
132 | sizeof(struct i2o_message) / 4 - 6) / 2; | ||
133 | |||
134 | i2o_shost = (struct i2o_scsi_host *)scsi_host->hostdata; | ||
135 | i2o_shost->scsi_host = scsi_host; | ||
136 | i2o_shost->iop = c; | ||
137 | |||
138 | i = 0; | ||
139 | list_for_each_entry(i2o_dev, &c->devices, list) | ||
140 | if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT) { | ||
141 | if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) || (type == 1)) /* only SCSI bus */ | ||
142 | i2o_shost->channel[i++] = i2o_dev; | ||
143 | |||
144 | if (i >= max_channel) | ||
145 | break; | ||
146 | } | ||
147 | |||
148 | return i2o_shost; | ||
149 | }; | ||
150 | |||
151 | /** | ||
152 | * i2o_scsi_get_host - Get an I2O SCSI host | ||
153 | * @c: I2O controller to for which to get the SCSI host | ||
154 | * | ||
155 | * If the I2O controller already exists as SCSI host, the SCSI host | ||
156 | * is returned, otherwise the I2O controller is added to the SCSI | ||
157 | * core. | ||
158 | * | ||
159 | * Returns pointer to the I2O SCSI host on success or NULL on failure. | ||
160 | */ | ||
161 | static struct i2o_scsi_host *i2o_scsi_get_host(struct i2o_controller *c) | ||
162 | { | ||
163 | return c->driver_data[i2o_scsi_driver.context]; | ||
164 | }; | ||
165 | |||
166 | /** | ||
167 | * i2o_scsi_remove - Remove I2O device from SCSI core | ||
168 | * @dev: device which should be removed | ||
169 | * | ||
170 | * Removes the I2O device from the SCSI core again. | ||
171 | * | ||
172 | * Returns 0 on success. | ||
173 | */ | ||
174 | static int i2o_scsi_remove(struct device *dev) | ||
175 | { | ||
176 | struct i2o_device *i2o_dev = to_i2o_device(dev); | ||
177 | struct i2o_controller *c = i2o_dev->iop; | ||
178 | struct i2o_scsi_host *i2o_shost; | ||
179 | struct scsi_device *scsi_dev; | ||
180 | |||
181 | i2o_shost = i2o_scsi_get_host(c); | ||
182 | |||
183 | shost_for_each_device(scsi_dev, i2o_shost->scsi_host) | ||
184 | if (scsi_dev->hostdata == i2o_dev) { | ||
185 | scsi_remove_device(scsi_dev); | ||
186 | scsi_device_put(scsi_dev); | ||
187 | break; | ||
188 | } | ||
189 | |||
190 | return 0; | ||
191 | }; | ||
192 | |||
193 | /** | ||
194 | * i2o_scsi_probe - verify if dev is a I2O SCSI device and install it | ||
195 | * @dev: device to verify if it is a I2O SCSI device | ||
196 | * | ||
197 | * Retrieve channel, id and lun for I2O device. If everthing goes well | ||
198 | * register the I2O device as SCSI device on the I2O SCSI controller. | ||
199 | * | ||
200 | * Returns 0 on success or negative error code on failure. | ||
201 | */ | ||
202 | static int i2o_scsi_probe(struct device *dev) | ||
203 | { | ||
204 | struct i2o_device *i2o_dev = to_i2o_device(dev); | ||
205 | struct i2o_controller *c = i2o_dev->iop; | ||
206 | struct i2o_scsi_host *i2o_shost; | ||
207 | struct Scsi_Host *scsi_host; | ||
208 | struct i2o_device *parent; | ||
209 | struct scsi_device *scsi_dev; | ||
210 | u32 id; | ||
211 | u64 lun; | ||
212 | int channel = -1; | ||
213 | int i; | ||
214 | |||
215 | i2o_shost = i2o_scsi_get_host(c); | ||
216 | if (!i2o_shost) | ||
217 | return -EFAULT; | ||
218 | |||
219 | scsi_host = i2o_shost->scsi_host; | ||
220 | |||
221 | if (i2o_parm_field_get(i2o_dev, 0, 3, &id, 4) < 0) | ||
222 | return -EFAULT; | ||
223 | |||
224 | if (id >= scsi_host->max_id) { | ||
225 | osm_warn("SCSI device id (%d) >= max_id of I2O host (%d)", id, | ||
226 | scsi_host->max_id); | ||
227 | return -EFAULT; | ||
228 | } | ||
229 | |||
230 | if (i2o_parm_field_get(i2o_dev, 0, 4, &lun, 8) < 0) | ||
231 | return -EFAULT; | ||
232 | if (lun >= scsi_host->max_lun) { | ||
233 | osm_warn("SCSI device id (%d) >= max_lun of I2O host (%d)", | ||
234 | (unsigned int)lun, scsi_host->max_lun); | ||
235 | return -EFAULT; | ||
236 | } | ||
237 | |||
238 | parent = i2o_iop_find_device(c, i2o_dev->lct_data.parent_tid); | ||
239 | if (!parent) { | ||
240 | osm_warn("can not find parent of device %03x\n", | ||
241 | i2o_dev->lct_data.tid); | ||
242 | return -EFAULT; | ||
243 | } | ||
244 | |||
245 | for (i = 0; i <= i2o_shost->scsi_host->max_channel; i++) | ||
246 | if (i2o_shost->channel[i] == parent) | ||
247 | channel = i; | ||
248 | |||
249 | if (channel == -1) { | ||
250 | osm_warn("can not find channel of device %03x\n", | ||
251 | i2o_dev->lct_data.tid); | ||
252 | return -EFAULT; | ||
253 | } | ||
254 | |||
255 | scsi_dev = | ||
256 | __scsi_add_device(i2o_shost->scsi_host, channel, id, lun, i2o_dev); | ||
257 | |||
258 | if (!scsi_dev) { | ||
259 | osm_warn("can not add SCSI device %03x\n", | ||
260 | i2o_dev->lct_data.tid); | ||
261 | return -EFAULT; | ||
262 | } | ||
263 | |||
264 | osm_debug("added new SCSI device %03x (cannel: %d, id: %d, lun: %d)\n", | ||
265 | i2o_dev->lct_data.tid, channel, id, (unsigned int)lun); | ||
266 | |||
267 | return 0; | ||
268 | }; | ||
269 | |||
270 | static const char *i2o_scsi_info(struct Scsi_Host *SChost) | ||
271 | { | ||
272 | struct i2o_scsi_host *hostdata; | ||
273 | hostdata = (struct i2o_scsi_host *)SChost->hostdata; | ||
274 | return hostdata->iop->name; | ||
275 | } | ||
276 | |||
277 | /** | ||
278 | * i2o_scsi_reply - SCSI OSM message reply handler | ||
279 | * @c: controller issuing the reply | ||
280 | * @m: message id for flushing | ||
281 | * @msg: the message from the controller | ||
282 | * | ||
283 | * Process reply messages (interrupts in normal scsi controller think). | ||
284 | * We can get a variety of messages to process. The normal path is | ||
285 | * scsi command completions. We must also deal with IOP failures, | ||
286 | * the reply to a bus reset and the reply to a LUN query. | ||
287 | * | ||
288 | * Returns 0 on success and if the reply should not be flushed or > 0 | ||
289 | * on success and if the reply should be flushed. Returns negative error | ||
290 | * code on failure and if the reply should be flushed. | ||
291 | */ | ||
292 | static int i2o_scsi_reply(struct i2o_controller *c, u32 m, | ||
293 | struct i2o_message *msg) | ||
294 | { | ||
295 | struct scsi_cmnd *cmd; | ||
296 | struct device *dev; | ||
297 | u8 as, ds, st; | ||
298 | |||
299 | cmd = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt)); | ||
300 | |||
301 | if (msg->u.head[0] & (1 << 13)) { | ||
302 | struct i2o_message __iomem *pmsg; /* preserved message */ | ||
303 | u32 pm; | ||
304 | int err = DID_ERROR; | ||
305 | |||
306 | pm = le32_to_cpu(msg->body[3]); | ||
307 | |||
308 | pmsg = i2o_msg_in_to_virt(c, pm); | ||
309 | |||
310 | osm_err("IOP fail.\n"); | ||
311 | osm_err("From %d To %d Cmd %d.\n", | ||
312 | (msg->u.head[1] >> 12) & 0xFFF, | ||
313 | msg->u.head[1] & 0xFFF, msg->u.head[1] >> 24); | ||
314 | osm_err("Failure Code %d.\n", msg->body[0] >> 24); | ||
315 | if (msg->body[0] & (1 << 16)) | ||
316 | osm_err("Format error.\n"); | ||
317 | if (msg->body[0] & (1 << 17)) | ||
318 | osm_err("Path error.\n"); | ||
319 | if (msg->body[0] & (1 << 18)) | ||
320 | osm_err("Path State.\n"); | ||
321 | if (msg->body[0] & (1 << 18)) | ||
322 | { | ||
323 | osm_err("Congestion.\n"); | ||
324 | err = DID_BUS_BUSY; | ||
325 | } | ||
326 | |||
327 | osm_debug("Failing message is %p.\n", pmsg); | ||
328 | |||
329 | cmd = i2o_cntxt_list_get(c, readl(&pmsg->u.s.tcntxt)); | ||
330 | if (!cmd) | ||
331 | return 1; | ||
332 | |||
333 | cmd->result = err << 16; | ||
334 | cmd->scsi_done(cmd); | ||
335 | |||
336 | /* Now flush the message by making it a NOP */ | ||
337 | i2o_msg_nop(c, pm); | ||
338 | |||
339 | return 1; | ||
340 | } | ||
341 | |||
342 | /* | ||
343 | * Low byte is device status, next is adapter status, | ||
344 | * (then one byte reserved), then request status. | ||
345 | */ | ||
346 | ds = (u8) le32_to_cpu(msg->body[0]); | ||
347 | as = (u8) (le32_to_cpu(msg->body[0]) >> 8); | ||
348 | st = (u8) (le32_to_cpu(msg->body[0]) >> 24); | ||
349 | |||
350 | /* | ||
351 | * Is this a control request coming back - eg an abort ? | ||
352 | */ | ||
353 | |||
354 | if (!cmd) { | ||
355 | if (st) | ||
356 | osm_warn("SCSI abort: %08X", le32_to_cpu(msg->body[0])); | ||
357 | osm_info("SCSI abort completed.\n"); | ||
358 | return -EFAULT; | ||
359 | } | ||
360 | |||
361 | osm_debug("Completed %ld\n", cmd->serial_number); | ||
362 | |||
363 | if (st) { | ||
364 | u32 count, error; | ||
365 | /* An error has occurred */ | ||
366 | |||
367 | switch (st) { | ||
368 | case 0x06: | ||
369 | count = le32_to_cpu(msg->body[1]); | ||
370 | if (count < cmd->underflow) { | ||
371 | int i; | ||
372 | |||
373 | osm_err("SCSI underflow 0x%08X 0x%08X\n", count, | ||
374 | cmd->underflow); | ||
375 | osm_debug("Cmd: "); | ||
376 | for (i = 0; i < 15; i++) | ||
377 | pr_debug("%02X ", cmd->cmnd[i]); | ||
378 | pr_debug(".\n"); | ||
379 | cmd->result = (DID_ERROR << 16); | ||
380 | } | ||
381 | break; | ||
382 | |||
383 | default: | ||
384 | error = le32_to_cpu(msg->body[0]); | ||
385 | |||
386 | osm_err("SCSI error %08x\n", error); | ||
387 | |||
388 | if ((error & 0xff) == 0x02 /*CHECK_CONDITION */ ) { | ||
389 | int i; | ||
390 | u32 len = sizeof(cmd->sense_buffer); | ||
391 | len = (len > 40) ? 40 : len; | ||
392 | // Copy over the sense data | ||
393 | memcpy(cmd->sense_buffer, (void *)&msg->body[3], | ||
394 | len); | ||
395 | for (i = 0; i <= len; i++) | ||
396 | osm_info("%02x\n", | ||
397 | cmd->sense_buffer[i]); | ||
398 | if (cmd->sense_buffer[0] == 0x70 | ||
399 | && cmd->sense_buffer[2] == DATA_PROTECT) { | ||
400 | /* This is to handle an array failed */ | ||
401 | cmd->result = (DID_TIME_OUT << 16); | ||
402 | printk(KERN_WARNING "%s: SCSI Data " | ||
403 | "Protect-Device (%d,%d,%d) " | ||
404 | "hba_status=0x%x, dev_status=" | ||
405 | "0x%x, cmd=0x%x\n", c->name, | ||
406 | (u32) cmd->device->channel, | ||
407 | (u32) cmd->device->id, | ||
408 | (u32) cmd->device->lun, | ||
409 | (error >> 8) & 0xff, | ||
410 | error & 0xff, cmd->cmnd[0]); | ||
411 | } else | ||
412 | cmd->result = (DID_ERROR << 16); | ||
413 | |||
414 | break; | ||
415 | } | ||
416 | |||
417 | switch (as) { | ||
418 | case 0x0E: | ||
419 | /* SCSI Reset */ | ||
420 | cmd->result = DID_RESET << 16; | ||
421 | break; | ||
422 | |||
423 | case 0x0F: | ||
424 | cmd->result = DID_PARITY << 16; | ||
425 | break; | ||
426 | |||
427 | default: | ||
428 | cmd->result = DID_ERROR << 16; | ||
429 | break; | ||
430 | } | ||
431 | |||
432 | break; | ||
433 | } | ||
434 | |||
435 | cmd->scsi_done(cmd); | ||
436 | return 1; | ||
437 | } | ||
438 | |||
439 | cmd->result = DID_OK << 16 | ds; | ||
440 | |||
441 | cmd->scsi_done(cmd); | ||
442 | |||
443 | dev = &c->pdev->dev; | ||
444 | if (cmd->use_sg) | ||
445 | dma_unmap_sg(dev, (struct scatterlist *)cmd->buffer, | ||
446 | cmd->use_sg, cmd->sc_data_direction); | ||
447 | else if (cmd->request_bufflen) | ||
448 | dma_unmap_single(dev, (dma_addr_t) ((long)cmd->SCp.ptr), | ||
449 | cmd->request_bufflen, cmd->sc_data_direction); | ||
450 | |||
451 | return 1; | ||
452 | }; | ||
453 | |||
454 | /** | ||
455 | * i2o_scsi_notify_controller_add - Retrieve notifications of added | ||
456 | * controllers | ||
457 | * @c: the controller which was added | ||
458 | * | ||
459 | * If a I2O controller is added, we catch the notification to add a | ||
460 | * corresponding Scsi_Host. | ||
461 | */ | ||
462 | static void i2o_scsi_notify_controller_add(struct i2o_controller *c) | ||
463 | { | ||
464 | struct i2o_scsi_host *i2o_shost; | ||
465 | int rc; | ||
466 | |||
467 | i2o_shost = i2o_scsi_host_alloc(c); | ||
468 | if (IS_ERR(i2o_shost)) { | ||
469 | osm_err("Could not initialize SCSI host\n"); | ||
470 | return; | ||
471 | } | ||
472 | |||
473 | rc = scsi_add_host(i2o_shost->scsi_host, &c->device); | ||
474 | if (rc) { | ||
475 | osm_err("Could not add SCSI host\n"); | ||
476 | scsi_host_put(i2o_shost->scsi_host); | ||
477 | return; | ||
478 | } | ||
479 | |||
480 | c->driver_data[i2o_scsi_driver.context] = i2o_shost; | ||
481 | |||
482 | osm_debug("new I2O SCSI host added\n"); | ||
483 | }; | ||
484 | |||
485 | /** | ||
486 | * i2o_scsi_notify_controller_remove - Retrieve notifications of removed | ||
487 | * controllers | ||
488 | * @c: the controller which was removed | ||
489 | * | ||
490 | * If a I2O controller is removed, we catch the notification to remove the | ||
491 | * corresponding Scsi_Host. | ||
492 | */ | ||
493 | static void i2o_scsi_notify_controller_remove(struct i2o_controller *c) | ||
494 | { | ||
495 | struct i2o_scsi_host *i2o_shost; | ||
496 | i2o_shost = i2o_scsi_get_host(c); | ||
497 | if (!i2o_shost) | ||
498 | return; | ||
499 | |||
500 | c->driver_data[i2o_scsi_driver.context] = NULL; | ||
501 | |||
502 | scsi_remove_host(i2o_shost->scsi_host); | ||
503 | scsi_host_put(i2o_shost->scsi_host); | ||
504 | pr_info("I2O SCSI host removed\n"); | ||
505 | }; | ||
506 | |||
507 | /* SCSI OSM driver struct */ | ||
508 | static struct i2o_driver i2o_scsi_driver = { | ||
509 | .name = OSM_NAME, | ||
510 | .reply = i2o_scsi_reply, | ||
511 | .classes = i2o_scsi_class_id, | ||
512 | .notify_controller_add = i2o_scsi_notify_controller_add, | ||
513 | .notify_controller_remove = i2o_scsi_notify_controller_remove, | ||
514 | .driver = { | ||
515 | .probe = i2o_scsi_probe, | ||
516 | .remove = i2o_scsi_remove, | ||
517 | }, | ||
518 | }; | ||
519 | |||
520 | /** | ||
521 | * i2o_scsi_queuecommand - queue a SCSI command | ||
522 | * @SCpnt: scsi command pointer | ||
523 | * @done: callback for completion | ||
524 | * | ||
525 | * Issue a scsi command asynchronously. Return 0 on success or 1 if | ||
526 | * we hit an error (normally message queue congestion). The only | ||
527 | * minor complication here is that I2O deals with the device addressing | ||
528 | * so we have to map the bus/dev/lun back to an I2O handle as well | ||
529 | * as faking absent devices ourself. | ||
530 | * | ||
531 | * Locks: takes the controller lock on error path only | ||
532 | */ | ||
533 | |||
534 | static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, | ||
535 | void (*done) (struct scsi_cmnd *)) | ||
536 | { | ||
537 | struct i2o_controller *c; | ||
538 | struct Scsi_Host *host; | ||
539 | struct i2o_device *i2o_dev; | ||
540 | struct device *dev; | ||
541 | int tid; | ||
542 | struct i2o_message __iomem *msg; | ||
543 | u32 m; | ||
544 | u32 scsi_flags, sg_flags; | ||
545 | u32 __iomem *mptr; | ||
546 | u32 __iomem *lenptr; | ||
547 | u32 len, reqlen; | ||
548 | int i; | ||
549 | |||
550 | /* | ||
551 | * Do the incoming paperwork | ||
552 | */ | ||
553 | |||
554 | i2o_dev = SCpnt->device->hostdata; | ||
555 | host = SCpnt->device->host; | ||
556 | c = i2o_dev->iop; | ||
557 | dev = &c->pdev->dev; | ||
558 | |||
559 | SCpnt->scsi_done = done; | ||
560 | |||
561 | if (unlikely(!i2o_dev)) { | ||
562 | osm_warn("no I2O device in request\n"); | ||
563 | SCpnt->result = DID_NO_CONNECT << 16; | ||
564 | done(SCpnt); | ||
565 | return 0; | ||
566 | } | ||
567 | |||
568 | tid = i2o_dev->lct_data.tid; | ||
569 | |||
570 | osm_debug("qcmd: Tid = %03x\n", tid); | ||
571 | osm_debug("Real scsi messages.\n"); | ||
572 | |||
573 | /* | ||
574 | * Obtain an I2O message. If there are none free then | ||
575 | * throw it back to the scsi layer | ||
576 | */ | ||
577 | |||
578 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
579 | if (m == I2O_QUEUE_EMPTY) | ||
580 | return SCSI_MLQUEUE_HOST_BUSY; | ||
581 | |||
582 | /* | ||
583 | * Put together a scsi execscb message | ||
584 | */ | ||
585 | |||
586 | len = SCpnt->request_bufflen; | ||
587 | |||
588 | switch (SCpnt->sc_data_direction) { | ||
589 | case PCI_DMA_NONE: | ||
590 | scsi_flags = 0x00000000; // DATA NO XFER | ||
591 | sg_flags = 0x00000000; | ||
592 | break; | ||
593 | |||
594 | case PCI_DMA_TODEVICE: | ||
595 | scsi_flags = 0x80000000; // DATA OUT (iop-->dev) | ||
596 | sg_flags = 0x14000000; | ||
597 | break; | ||
598 | |||
599 | case PCI_DMA_FROMDEVICE: | ||
600 | scsi_flags = 0x40000000; // DATA IN (iop<--dev) | ||
601 | sg_flags = 0x10000000; | ||
602 | break; | ||
603 | |||
604 | default: | ||
605 | /* Unknown - kill the command */ | ||
606 | SCpnt->result = DID_NO_CONNECT << 16; | ||
607 | done(SCpnt); | ||
608 | return 0; | ||
609 | } | ||
610 | |||
611 | writel(I2O_CMD_SCSI_EXEC << 24 | HOST_TID << 12 | tid, &msg->u.head[1]); | ||
612 | writel(i2o_scsi_driver.context, &msg->u.s.icntxt); | ||
613 | |||
614 | /* We want the SCSI control block back */ | ||
615 | writel(i2o_cntxt_list_add(c, SCpnt), &msg->u.s.tcntxt); | ||
616 | |||
617 | /* LSI_920_PCI_QUIRK | ||
618 | * | ||
619 | * Intermittant observations of msg frame word data corruption | ||
620 | * observed on msg[4] after: | ||
621 | * WRITE, READ-MODIFY-WRITE | ||
622 | * operations. 19990606 -sralston | ||
623 | * | ||
624 | * (Hence we build this word via tag. Its good practice anyway | ||
625 | * we don't want fetches over PCI needlessly) | ||
626 | */ | ||
627 | |||
628 | /* Attach tags to the devices */ | ||
629 | /* | ||
630 | if(SCpnt->device->tagged_supported) { | ||
631 | if(SCpnt->tag == HEAD_OF_QUEUE_TAG) | ||
632 | scsi_flags |= 0x01000000; | ||
633 | else if(SCpnt->tag == ORDERED_QUEUE_TAG) | ||
634 | scsi_flags |= 0x01800000; | ||
635 | } | ||
636 | */ | ||
637 | |||
638 | /* Direction, disconnect ok, tag, CDBLen */ | ||
639 | writel(scsi_flags | 0x20200000 | SCpnt->cmd_len, &msg->body[0]); | ||
640 | |||
641 | mptr = &msg->body[1]; | ||
642 | |||
643 | /* Write SCSI command into the message - always 16 byte block */ | ||
644 | memcpy_toio(mptr, SCpnt->cmnd, 16); | ||
645 | mptr += 4; | ||
646 | lenptr = mptr++; /* Remember me - fill in when we know */ | ||
647 | |||
648 | reqlen = 12; // SINGLE SGE | ||
649 | |||
650 | /* Now fill in the SGList and command */ | ||
651 | if (SCpnt->use_sg) { | ||
652 | struct scatterlist *sg; | ||
653 | int sg_count; | ||
654 | |||
655 | sg = SCpnt->request_buffer; | ||
656 | len = 0; | ||
657 | |||
658 | sg_count = dma_map_sg(dev, sg, SCpnt->use_sg, | ||
659 | SCpnt->sc_data_direction); | ||
660 | |||
661 | if (unlikely(sg_count <= 0)) | ||
662 | return -ENOMEM; | ||
663 | |||
664 | for (i = SCpnt->use_sg; i > 0; i--) { | ||
665 | if (i == 1) | ||
666 | sg_flags |= 0xC0000000; | ||
667 | writel(sg_flags | sg_dma_len(sg), mptr++); | ||
668 | writel(sg_dma_address(sg), mptr++); | ||
669 | len += sg_dma_len(sg); | ||
670 | sg++; | ||
671 | } | ||
672 | |||
673 | reqlen = mptr - &msg->u.head[0]; | ||
674 | writel(len, lenptr); | ||
675 | } else { | ||
676 | len = SCpnt->request_bufflen; | ||
677 | |||
678 | writel(len, lenptr); | ||
679 | |||
680 | if (len > 0) { | ||
681 | dma_addr_t dma_addr; | ||
682 | |||
683 | dma_addr = dma_map_single(dev, SCpnt->request_buffer, | ||
684 | SCpnt->request_bufflen, | ||
685 | SCpnt->sc_data_direction); | ||
686 | if (!dma_addr) | ||
687 | return -ENOMEM; | ||
688 | |||
689 | SCpnt->SCp.ptr = (void *)(unsigned long)dma_addr; | ||
690 | sg_flags |= 0xC0000000; | ||
691 | writel(sg_flags | SCpnt->request_bufflen, mptr++); | ||
692 | writel(dma_addr, mptr++); | ||
693 | } else | ||
694 | reqlen = 9; | ||
695 | } | ||
696 | |||
697 | /* Stick the headers on */ | ||
698 | writel(reqlen << 16 | SGL_OFFSET_10, &msg->u.head[0]); | ||
699 | |||
700 | /* Queue the message */ | ||
701 | i2o_msg_post(c, m); | ||
702 | |||
703 | osm_debug("Issued %ld\n", SCpnt->serial_number); | ||
704 | |||
705 | return 0; | ||
706 | }; | ||
707 | |||
708 | /** | ||
709 | * i2o_scsi_abort - abort a running command | ||
710 | * @SCpnt: command to abort | ||
711 | * | ||
712 | * Ask the I2O controller to abort a command. This is an asynchrnous | ||
713 | * process and our callback handler will see the command complete with an | ||
714 | * aborted message if it succeeds. | ||
715 | * | ||
716 | * Returns 0 if the command is successfully aborted or negative error code | ||
717 | * on failure. | ||
718 | */ | ||
719 | static int i2o_scsi_abort(struct scsi_cmnd *SCpnt) | ||
720 | { | ||
721 | struct i2o_device *i2o_dev; | ||
722 | struct i2o_controller *c; | ||
723 | struct i2o_message __iomem *msg; | ||
724 | u32 m; | ||
725 | int tid; | ||
726 | int status = FAILED; | ||
727 | |||
728 | osm_warn("Aborting command block.\n"); | ||
729 | |||
730 | i2o_dev = SCpnt->device->hostdata; | ||
731 | c = i2o_dev->iop; | ||
732 | tid = i2o_dev->lct_data.tid; | ||
733 | |||
734 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
735 | if (m == I2O_QUEUE_EMPTY) | ||
736 | return SCSI_MLQUEUE_HOST_BUSY; | ||
737 | |||
738 | writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); | ||
739 | writel(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid, | ||
740 | &msg->u.head[1]); | ||
741 | writel(i2o_cntxt_list_get_ptr(c, SCpnt), &msg->body[0]); | ||
742 | |||
743 | if (i2o_msg_post_wait(c, m, I2O_TIMEOUT_SCSI_SCB_ABORT)) | ||
744 | status = SUCCESS; | ||
745 | |||
746 | return status; | ||
747 | } | ||
748 | |||
749 | /** | ||
750 | * i2o_scsi_bios_param - Invent disk geometry | ||
751 | * @sdev: scsi device | ||
752 | * @dev: block layer device | ||
753 | * @capacity: size in sectors | ||
754 | * @ip: geometry array | ||
755 | * | ||
756 | * This is anyones guess quite frankly. We use the same rules everyone | ||
757 | * else appears to and hope. It seems to work. | ||
758 | */ | ||
759 | |||
760 | static int i2o_scsi_bios_param(struct scsi_device *sdev, | ||
761 | struct block_device *dev, sector_t capacity, | ||
762 | int *ip) | ||
763 | { | ||
764 | int size; | ||
765 | |||
766 | size = capacity; | ||
767 | ip[0] = 64; /* heads */ | ||
768 | ip[1] = 32; /* sectors */ | ||
769 | if ((ip[2] = size >> 11) > 1024) { /* cylinders, test for big disk */ | ||
770 | ip[0] = 255; /* heads */ | ||
771 | ip[1] = 63; /* sectors */ | ||
772 | ip[2] = size / (255 * 63); /* cylinders */ | ||
773 | } | ||
774 | return 0; | ||
775 | } | ||
776 | |||
777 | static struct scsi_host_template i2o_scsi_host_template = { | ||
778 | .proc_name = OSM_NAME, | ||
779 | .name = OSM_DESCRIPTION, | ||
780 | .info = i2o_scsi_info, | ||
781 | .queuecommand = i2o_scsi_queuecommand, | ||
782 | .eh_abort_handler = i2o_scsi_abort, | ||
783 | .bios_param = i2o_scsi_bios_param, | ||
784 | .can_queue = I2O_SCSI_CAN_QUEUE, | ||
785 | .sg_tablesize = 8, | ||
786 | .cmd_per_lun = 6, | ||
787 | .use_clustering = ENABLE_CLUSTERING, | ||
788 | }; | ||
789 | |||
790 | /** | ||
791 | * i2o_scsi_init - SCSI OSM initialization function | ||
792 | * | ||
793 | * Register SCSI OSM into I2O core. | ||
794 | * | ||
795 | * Returns 0 on success or negative error code on failure. | ||
796 | */ | ||
797 | static int __init i2o_scsi_init(void) | ||
798 | { | ||
799 | int rc; | ||
800 | |||
801 | printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); | ||
802 | |||
803 | /* Register SCSI OSM into I2O core */ | ||
804 | rc = i2o_driver_register(&i2o_scsi_driver); | ||
805 | if (rc) { | ||
806 | osm_err("Could not register SCSI driver\n"); | ||
807 | return rc; | ||
808 | } | ||
809 | |||
810 | return 0; | ||
811 | }; | ||
812 | |||
813 | /** | ||
814 | * i2o_scsi_exit - SCSI OSM exit function | ||
815 | * | ||
816 | * Unregisters SCSI OSM from I2O core. | ||
817 | */ | ||
818 | static void __exit i2o_scsi_exit(void) | ||
819 | { | ||
820 | /* Unregister I2O SCSI OSM from I2O core */ | ||
821 | i2o_driver_unregister(&i2o_scsi_driver); | ||
822 | }; | ||
823 | |||
824 | MODULE_AUTHOR("Red Hat Software"); | ||
825 | MODULE_LICENSE("GPL"); | ||
826 | MODULE_DESCRIPTION(OSM_DESCRIPTION); | ||
827 | MODULE_VERSION(OSM_VERSION); | ||
828 | |||
829 | module_init(i2o_scsi_init); | ||
830 | module_exit(i2o_scsi_exit); | ||
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c new file mode 100644 index 000000000000..50c8cedf7a2d --- /dev/null +++ b/drivers/message/i2o/iop.c | |||
@@ -0,0 +1,1327 @@ | |||
1 | /* | ||
2 | * Functions to handle I2O controllers and I2O message handling | ||
3 | * | ||
4 | * Copyright (C) 1999-2002 Red Hat Software | ||
5 | * | ||
6 | * Written by Alan Cox, Building Number Three Ltd | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | * | ||
13 | * A lot of the I2O message side code from this is taken from the | ||
14 | * Red Creek RCPCI45 adapter driver by Red Creek Communications | ||
15 | * | ||
16 | * Fixes/additions: | ||
17 | * Philipp Rumpf | ||
18 | * Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI> | ||
19 | * Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI> | ||
20 | * Deepak Saxena <deepak@plexity.net> | ||
21 | * Boji T Kannanthanam <boji.t.kannanthanam@intel.com> | ||
22 | * Alan Cox <alan@redhat.com>: | ||
23 | * Ported to Linux 2.5. | ||
24 | * Markus Lidel <Markus.Lidel@shadowconnect.com>: | ||
25 | * Minor fixes for 2.6. | ||
26 | */ | ||
27 | |||
28 | #include <linux/module.h> | ||
29 | #include <linux/i2o.h> | ||
30 | #include <linux/delay.h> | ||
31 | |||
32 | #define OSM_VERSION "$Rev$" | ||
33 | #define OSM_DESCRIPTION "I2O subsystem" | ||
34 | |||
35 | /* global I2O controller list */ | ||
36 | LIST_HEAD(i2o_controllers); | ||
37 | |||
38 | /* | ||
39 | * global I2O System Table. Contains information about all the IOPs in the | ||
40 | * system. Used to inform IOPs about each others existence. | ||
41 | */ | ||
42 | static struct i2o_dma i2o_systab; | ||
43 | |||
44 | static int i2o_hrt_get(struct i2o_controller *c); | ||
45 | |||
46 | /* Module internal functions from other sources */ | ||
47 | extern struct i2o_driver i2o_exec_driver; | ||
48 | extern int i2o_exec_lct_get(struct i2o_controller *); | ||
49 | extern void i2o_device_remove(struct i2o_device *); | ||
50 | |||
51 | extern int __init i2o_driver_init(void); | ||
52 | extern void __exit i2o_driver_exit(void); | ||
53 | extern int __init i2o_exec_init(void); | ||
54 | extern void __exit i2o_exec_exit(void); | ||
55 | extern int __init i2o_pci_init(void); | ||
56 | extern void __exit i2o_pci_exit(void); | ||
57 | extern int i2o_device_init(void); | ||
58 | extern void i2o_device_exit(void); | ||
59 | |||
60 | /** | ||
61 | * i2o_msg_nop - Returns a message which is not used | ||
62 | * @c: I2O controller from which the message was created | ||
63 | * @m: message which should be returned | ||
64 | * | ||
65 | * If you fetch a message via i2o_msg_get, and can't use it, you must | ||
66 | * return the message with this function. Otherwise the message frame | ||
67 | * is lost. | ||
68 | */ | ||
69 | void i2o_msg_nop(struct i2o_controller *c, u32 m) | ||
70 | { | ||
71 | struct i2o_message __iomem *msg = c->in_queue.virt + m; | ||
72 | |||
73 | writel(THREE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); | ||
74 | writel(I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID, | ||
75 | &msg->u.head[1]); | ||
76 | writel(0, &msg->u.head[2]); | ||
77 | writel(0, &msg->u.head[3]); | ||
78 | i2o_msg_post(c, m); | ||
79 | }; | ||
80 | |||
81 | /** | ||
82 | * i2o_msg_get_wait - obtain an I2O message from the IOP | ||
83 | * @c: I2O controller | ||
84 | * @msg: pointer to a I2O message pointer | ||
85 | * @wait: how long to wait until timeout | ||
86 | * | ||
87 | * This function waits up to wait seconds for a message slot to be | ||
88 | * available. | ||
89 | * | ||
90 | * On a success the message is returned and the pointer to the message is | ||
91 | * set in msg. The returned message is the physical page frame offset | ||
92 | * address from the read port (see the i2o spec). If no message is | ||
93 | * available returns I2O_QUEUE_EMPTY and msg is leaved untouched. | ||
94 | */ | ||
95 | u32 i2o_msg_get_wait(struct i2o_controller *c, struct i2o_message __iomem **msg, | ||
96 | int wait) | ||
97 | { | ||
98 | unsigned long timeout = jiffies + wait * HZ; | ||
99 | u32 m; | ||
100 | |||
101 | while ((m = i2o_msg_get(c, msg)) == I2O_QUEUE_EMPTY) { | ||
102 | if (time_after(jiffies, timeout)) { | ||
103 | pr_debug("%s: Timeout waiting for message frame.\n", | ||
104 | c->name); | ||
105 | return I2O_QUEUE_EMPTY; | ||
106 | } | ||
107 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
108 | schedule_timeout(1); | ||
109 | } | ||
110 | |||
111 | return m; | ||
112 | }; | ||
113 | |||
114 | #if BITS_PER_LONG == 64 | ||
115 | /** | ||
116 | * i2o_cntxt_list_add - Append a pointer to context list and return a id | ||
117 | * @c: controller to which the context list belong | ||
118 | * @ptr: pointer to add to the context list | ||
119 | * | ||
120 | * Because the context field in I2O is only 32-bit large, on 64-bit the | ||
121 | * pointer is to large to fit in the context field. The i2o_cntxt_list | ||
122 | * functions therefore map pointers to context fields. | ||
123 | * | ||
124 | * Returns context id > 0 on success or 0 on failure. | ||
125 | */ | ||
126 | u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr) | ||
127 | { | ||
128 | struct i2o_context_list_element *entry; | ||
129 | unsigned long flags; | ||
130 | |||
131 | if (!ptr) | ||
132 | printk(KERN_ERR "%s: couldn't add NULL pointer to context list!" | ||
133 | "\n", c->name); | ||
134 | |||
135 | entry = kmalloc(sizeof(*entry), GFP_ATOMIC); | ||
136 | if (!entry) { | ||
137 | printk(KERN_ERR "%s: Could not allocate memory for context " | ||
138 | "list element\n", c->name); | ||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | entry->ptr = ptr; | ||
143 | entry->timestamp = jiffies; | ||
144 | INIT_LIST_HEAD(&entry->list); | ||
145 | |||
146 | spin_lock_irqsave(&c->context_list_lock, flags); | ||
147 | |||
148 | if (unlikely(atomic_inc_and_test(&c->context_list_counter))) | ||
149 | atomic_inc(&c->context_list_counter); | ||
150 | |||
151 | entry->context = atomic_read(&c->context_list_counter); | ||
152 | |||
153 | list_add(&entry->list, &c->context_list); | ||
154 | |||
155 | spin_unlock_irqrestore(&c->context_list_lock, flags); | ||
156 | |||
157 | pr_debug("%s: Add context to list %p -> %d\n", c->name, ptr, context); | ||
158 | |||
159 | return entry->context; | ||
160 | }; | ||
161 | |||
162 | /** | ||
163 | * i2o_cntxt_list_remove - Remove a pointer from the context list | ||
164 | * @c: controller to which the context list belong | ||
165 | * @ptr: pointer which should be removed from the context list | ||
166 | * | ||
167 | * Removes a previously added pointer from the context list and returns | ||
168 | * the matching context id. | ||
169 | * | ||
170 | * Returns context id on succes or 0 on failure. | ||
171 | */ | ||
172 | u32 i2o_cntxt_list_remove(struct i2o_controller * c, void *ptr) | ||
173 | { | ||
174 | struct i2o_context_list_element *entry; | ||
175 | u32 context = 0; | ||
176 | unsigned long flags; | ||
177 | |||
178 | spin_lock_irqsave(&c->context_list_lock, flags); | ||
179 | list_for_each_entry(entry, &c->context_list, list) | ||
180 | if (entry->ptr == ptr) { | ||
181 | list_del(&entry->list); | ||
182 | context = entry->context; | ||
183 | kfree(entry); | ||
184 | break; | ||
185 | } | ||
186 | spin_unlock_irqrestore(&c->context_list_lock, flags); | ||
187 | |||
188 | if (!context) | ||
189 | printk(KERN_WARNING "%s: Could not remove nonexistent ptr " | ||
190 | "%p\n", c->name, ptr); | ||
191 | |||
192 | pr_debug("%s: remove ptr from context list %d -> %p\n", c->name, | ||
193 | context, ptr); | ||
194 | |||
195 | return context; | ||
196 | }; | ||
197 | |||
198 | /** | ||
199 | * i2o_cntxt_list_get - Get a pointer from the context list and remove it | ||
200 | * @c: controller to which the context list belong | ||
201 | * @context: context id to which the pointer belong | ||
202 | * | ||
203 | * Returns pointer to the matching context id on success or NULL on | ||
204 | * failure. | ||
205 | */ | ||
206 | void *i2o_cntxt_list_get(struct i2o_controller *c, u32 context) | ||
207 | { | ||
208 | struct i2o_context_list_element *entry; | ||
209 | unsigned long flags; | ||
210 | void *ptr = NULL; | ||
211 | |||
212 | spin_lock_irqsave(&c->context_list_lock, flags); | ||
213 | list_for_each_entry(entry, &c->context_list, list) | ||
214 | if (entry->context == context) { | ||
215 | list_del(&entry->list); | ||
216 | ptr = entry->ptr; | ||
217 | kfree(entry); | ||
218 | break; | ||
219 | } | ||
220 | spin_unlock_irqrestore(&c->context_list_lock, flags); | ||
221 | |||
222 | if (!ptr) | ||
223 | printk(KERN_WARNING "%s: context id %d not found\n", c->name, | ||
224 | context); | ||
225 | |||
226 | pr_debug("%s: get ptr from context list %d -> %p\n", c->name, context, | ||
227 | ptr); | ||
228 | |||
229 | return ptr; | ||
230 | }; | ||
231 | |||
232 | /** | ||
233 | * i2o_cntxt_list_get_ptr - Get a context id from the context list | ||
234 | * @c: controller to which the context list belong | ||
235 | * @ptr: pointer to which the context id should be fetched | ||
236 | * | ||
237 | * Returns context id which matches to the pointer on succes or 0 on | ||
238 | * failure. | ||
239 | */ | ||
240 | u32 i2o_cntxt_list_get_ptr(struct i2o_controller * c, void *ptr) | ||
241 | { | ||
242 | struct i2o_context_list_element *entry; | ||
243 | u32 context = 0; | ||
244 | unsigned long flags; | ||
245 | |||
246 | spin_lock_irqsave(&c->context_list_lock, flags); | ||
247 | list_for_each_entry(entry, &c->context_list, list) | ||
248 | if (entry->ptr == ptr) { | ||
249 | context = entry->context; | ||
250 | break; | ||
251 | } | ||
252 | spin_unlock_irqrestore(&c->context_list_lock, flags); | ||
253 | |||
254 | if (!context) | ||
255 | printk(KERN_WARNING "%s: Could not find nonexistent ptr " | ||
256 | "%p\n", c->name, ptr); | ||
257 | |||
258 | pr_debug("%s: get context id from context list %p -> %d\n", c->name, | ||
259 | ptr, context); | ||
260 | |||
261 | return context; | ||
262 | }; | ||
263 | #endif | ||
264 | |||
265 | /** | ||
266 | * i2o_iop_find - Find an I2O controller by id | ||
267 | * @unit: unit number of the I2O controller to search for | ||
268 | * | ||
269 | * Lookup the I2O controller on the controller list. | ||
270 | * | ||
271 | * Returns pointer to the I2O controller on success or NULL if not found. | ||
272 | */ | ||
273 | struct i2o_controller *i2o_find_iop(int unit) | ||
274 | { | ||
275 | struct i2o_controller *c; | ||
276 | |||
277 | list_for_each_entry(c, &i2o_controllers, list) { | ||
278 | if (c->unit == unit) | ||
279 | return c; | ||
280 | } | ||
281 | |||
282 | return NULL; | ||
283 | }; | ||
284 | |||
285 | /** | ||
286 | * i2o_iop_find_device - Find a I2O device on an I2O controller | ||
287 | * @c: I2O controller where the I2O device hangs on | ||
288 | * @tid: TID of the I2O device to search for | ||
289 | * | ||
290 | * Searches the devices of the I2O controller for a device with TID tid and | ||
291 | * returns it. | ||
292 | * | ||
293 | * Returns a pointer to the I2O device if found, otherwise NULL. | ||
294 | */ | ||
295 | struct i2o_device *i2o_iop_find_device(struct i2o_controller *c, u16 tid) | ||
296 | { | ||
297 | struct i2o_device *dev; | ||
298 | |||
299 | list_for_each_entry(dev, &c->devices, list) | ||
300 | if (dev->lct_data.tid == tid) | ||
301 | return dev; | ||
302 | |||
303 | return NULL; | ||
304 | }; | ||
305 | |||
306 | /** | ||
307 | * i2o_quiesce_controller - quiesce controller | ||
308 | * @c: controller | ||
309 | * | ||
310 | * Quiesce an IOP. Causes IOP to make external operation quiescent | ||
311 | * (i2o 'READY' state). Internal operation of the IOP continues normally. | ||
312 | * | ||
313 | * Returns 0 on success or negative error code on failure. | ||
314 | */ | ||
315 | static int i2o_iop_quiesce(struct i2o_controller *c) | ||
316 | { | ||
317 | struct i2o_message __iomem *msg; | ||
318 | u32 m; | ||
319 | i2o_status_block *sb = c->status_block.virt; | ||
320 | int rc; | ||
321 | |||
322 | i2o_status_get(c); | ||
323 | |||
324 | /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */ | ||
325 | if ((sb->iop_state != ADAPTER_STATE_READY) && | ||
326 | (sb->iop_state != ADAPTER_STATE_OPERATIONAL)) | ||
327 | return 0; | ||
328 | |||
329 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
330 | if (m == I2O_QUEUE_EMPTY) | ||
331 | return -ETIMEDOUT; | ||
332 | |||
333 | writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); | ||
334 | writel(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 | ADAPTER_TID, | ||
335 | &msg->u.head[1]); | ||
336 | |||
337 | /* Long timeout needed for quiesce if lots of devices */ | ||
338 | if ((rc = i2o_msg_post_wait(c, m, 240))) | ||
339 | printk(KERN_INFO "%s: Unable to quiesce (status=%#x).\n", | ||
340 | c->name, -rc); | ||
341 | else | ||
342 | pr_debug("%s: Quiesced.\n", c->name); | ||
343 | |||
344 | i2o_status_get(c); // Entered READY state | ||
345 | |||
346 | return rc; | ||
347 | }; | ||
348 | |||
349 | /** | ||
350 | * i2o_iop_enable - move controller from ready to OPERATIONAL | ||
351 | * @c: I2O controller | ||
352 | * | ||
353 | * Enable IOP. This allows the IOP to resume external operations and | ||
354 | * reverses the effect of a quiesce. Returns zero or an error code if | ||
355 | * an error occurs. | ||
356 | */ | ||
357 | static int i2o_iop_enable(struct i2o_controller *c) | ||
358 | { | ||
359 | struct i2o_message __iomem *msg; | ||
360 | u32 m; | ||
361 | i2o_status_block *sb = c->status_block.virt; | ||
362 | int rc; | ||
363 | |||
364 | i2o_status_get(c); | ||
365 | |||
366 | /* Enable only allowed on READY state */ | ||
367 | if (sb->iop_state != ADAPTER_STATE_READY) | ||
368 | return -EINVAL; | ||
369 | |||
370 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
371 | if (m == I2O_QUEUE_EMPTY) | ||
372 | return -ETIMEDOUT; | ||
373 | |||
374 | writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); | ||
375 | writel(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 | ADAPTER_TID, | ||
376 | &msg->u.head[1]); | ||
377 | |||
378 | /* How long of a timeout do we need? */ | ||
379 | if ((rc = i2o_msg_post_wait(c, m, 240))) | ||
380 | printk(KERN_ERR "%s: Could not enable (status=%#x).\n", | ||
381 | c->name, -rc); | ||
382 | else | ||
383 | pr_debug("%s: Enabled.\n", c->name); | ||
384 | |||
385 | i2o_status_get(c); // entered OPERATIONAL state | ||
386 | |||
387 | return rc; | ||
388 | }; | ||
389 | |||
390 | /** | ||
391 | * i2o_iop_quiesce_all - Quiesce all I2O controllers on the system | ||
392 | * | ||
393 | * Quiesce all I2O controllers which are connected to the system. | ||
394 | */ | ||
395 | static inline void i2o_iop_quiesce_all(void) | ||
396 | { | ||
397 | struct i2o_controller *c, *tmp; | ||
398 | |||
399 | list_for_each_entry_safe(c, tmp, &i2o_controllers, list) { | ||
400 | if (!c->no_quiesce) | ||
401 | i2o_iop_quiesce(c); | ||
402 | } | ||
403 | }; | ||
404 | |||
405 | /** | ||
406 | * i2o_iop_enable_all - Enables all controllers on the system | ||
407 | * | ||
408 | * Enables all I2O controllers which are connected to the system. | ||
409 | */ | ||
410 | static inline void i2o_iop_enable_all(void) | ||
411 | { | ||
412 | struct i2o_controller *c, *tmp; | ||
413 | |||
414 | list_for_each_entry_safe(c, tmp, &i2o_controllers, list) | ||
415 | i2o_iop_enable(c); | ||
416 | }; | ||
417 | |||
418 | /** | ||
419 | * i2o_clear_controller - Bring I2O controller into HOLD state | ||
420 | * @c: controller | ||
421 | * | ||
422 | * Clear an IOP to HOLD state, ie. terminate external operations, clear all | ||
423 | * input queues and prepare for a system restart. IOP's internal operation | ||
424 | * continues normally and the outbound queue is alive. The IOP is not | ||
425 | * expected to rebuild its LCT. | ||
426 | * | ||
427 | * Returns 0 on success or negative error code on failure. | ||
428 | */ | ||
429 | static int i2o_iop_clear(struct i2o_controller *c) | ||
430 | { | ||
431 | struct i2o_message __iomem *msg; | ||
432 | u32 m; | ||
433 | int rc; | ||
434 | |||
435 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
436 | if (m == I2O_QUEUE_EMPTY) | ||
437 | return -ETIMEDOUT; | ||
438 | |||
439 | /* Quiesce all IOPs first */ | ||
440 | i2o_iop_quiesce_all(); | ||
441 | |||
442 | writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); | ||
443 | writel(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 | ADAPTER_TID, | ||
444 | &msg->u.head[1]); | ||
445 | |||
446 | if ((rc = i2o_msg_post_wait(c, m, 30))) | ||
447 | printk(KERN_INFO "%s: Unable to clear (status=%#x).\n", | ||
448 | c->name, -rc); | ||
449 | else | ||
450 | pr_debug("%s: Cleared.\n", c->name); | ||
451 | |||
452 | /* Enable all IOPs */ | ||
453 | i2o_iop_enable_all(); | ||
454 | |||
455 | i2o_status_get(c); | ||
456 | |||
457 | return rc; | ||
458 | } | ||
459 | |||
460 | /** | ||
461 | * i2o_iop_reset - reset an I2O controller | ||
462 | * @c: controller to reset | ||
463 | * | ||
464 | * Reset the IOP into INIT state and wait until IOP gets into RESET state. | ||
465 | * Terminate all external operations, clear IOP's inbound and outbound | ||
466 | * queues, terminate all DDMs, and reload the IOP's operating environment | ||
467 | * and all local DDMs. The IOP rebuilds its LCT. | ||
468 | */ | ||
469 | static int i2o_iop_reset(struct i2o_controller *c) | ||
470 | { | ||
471 | u8 *status = c->status.virt; | ||
472 | struct i2o_message __iomem *msg; | ||
473 | u32 m; | ||
474 | unsigned long timeout; | ||
475 | i2o_status_block *sb = c->status_block.virt; | ||
476 | int rc = 0; | ||
477 | |||
478 | pr_debug("%s: Resetting controller\n", c->name); | ||
479 | |||
480 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
481 | if (m == I2O_QUEUE_EMPTY) | ||
482 | return -ETIMEDOUT; | ||
483 | |||
484 | memset(status, 0, 8); | ||
485 | |||
486 | /* Quiesce all IOPs first */ | ||
487 | i2o_iop_quiesce_all(); | ||
488 | |||
489 | writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); | ||
490 | writel(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 | ADAPTER_TID, | ||
491 | &msg->u.head[1]); | ||
492 | writel(i2o_exec_driver.context, &msg->u.s.icntxt); | ||
493 | writel(0, &msg->u.s.tcntxt); //FIXME: use reasonable transaction context | ||
494 | writel(0, &msg->body[0]); | ||
495 | writel(0, &msg->body[1]); | ||
496 | writel(i2o_ptr_low((void *)c->status.phys), &msg->body[2]); | ||
497 | writel(i2o_ptr_high((void *)c->status.phys), &msg->body[3]); | ||
498 | |||
499 | i2o_msg_post(c, m); | ||
500 | |||
501 | /* Wait for a reply */ | ||
502 | timeout = jiffies + I2O_TIMEOUT_RESET * HZ; | ||
503 | while (!*status) { | ||
504 | if (time_after(jiffies, timeout)) { | ||
505 | printk(KERN_ERR "%s: IOP reset timeout.\n", c->name); | ||
506 | rc = -ETIMEDOUT; | ||
507 | goto exit; | ||
508 | } | ||
509 | |||
510 | /* Promise bug */ | ||
511 | if (status[1] || status[4]) { | ||
512 | *status = 0; | ||
513 | break; | ||
514 | } | ||
515 | |||
516 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
517 | schedule_timeout(1); | ||
518 | |||
519 | rmb(); | ||
520 | } | ||
521 | |||
522 | if (*status == I2O_CMD_IN_PROGRESS) { | ||
523 | /* | ||
524 | * Once the reset is sent, the IOP goes into the INIT state | ||
525 | * which is indeterminate. We need to wait until the IOP | ||
526 | * has rebooted before we can let the system talk to | ||
527 | * it. We read the inbound Free_List until a message is | ||
528 | * available. If we can't read one in the given ammount of | ||
529 | * time, we assume the IOP could not reboot properly. | ||
530 | */ | ||
531 | pr_debug("%s: Reset in progress, waiting for reboot...\n", | ||
532 | c->name); | ||
533 | |||
534 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET); | ||
535 | while (m == I2O_QUEUE_EMPTY) { | ||
536 | if (time_after(jiffies, timeout)) { | ||
537 | printk(KERN_ERR "%s: IOP reset timeout.\n", | ||
538 | c->name); | ||
539 | rc = -ETIMEDOUT; | ||
540 | goto exit; | ||
541 | } | ||
542 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
543 | schedule_timeout(1); | ||
544 | |||
545 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET); | ||
546 | } | ||
547 | i2o_msg_nop(c, m); | ||
548 | } | ||
549 | |||
550 | /* from here all quiesce commands are safe */ | ||
551 | c->no_quiesce = 0; | ||
552 | |||
553 | /* If IopReset was rejected or didn't perform reset, try IopClear */ | ||
554 | i2o_status_get(c); | ||
555 | if (*status == I2O_CMD_REJECTED || sb->iop_state != ADAPTER_STATE_RESET) { | ||
556 | printk(KERN_WARNING "%s: Reset rejected, trying to clear\n", | ||
557 | c->name); | ||
558 | i2o_iop_clear(c); | ||
559 | } else | ||
560 | pr_debug("%s: Reset completed.\n", c->name); | ||
561 | |||
562 | exit: | ||
563 | /* Enable all IOPs */ | ||
564 | i2o_iop_enable_all(); | ||
565 | |||
566 | return rc; | ||
567 | }; | ||
568 | |||
569 | /** | ||
570 | * i2o_iop_init_outbound_queue - setup the outbound message queue | ||
571 | * @c: I2O controller | ||
572 | * | ||
573 | * Clear and (re)initialize IOP's outbound queue and post the message | ||
574 | * frames to the IOP. | ||
575 | * | ||
576 | * Returns 0 on success or a negative errno code on failure. | ||
577 | */ | ||
578 | static int i2o_iop_init_outbound_queue(struct i2o_controller *c) | ||
579 | { | ||
580 | u8 *status = c->status.virt; | ||
581 | u32 m; | ||
582 | struct i2o_message __iomem *msg; | ||
583 | ulong timeout; | ||
584 | int i; | ||
585 | |||
586 | pr_debug("%s: Initializing Outbound Queue...\n", c->name); | ||
587 | |||
588 | memset(status, 0, 4); | ||
589 | |||
590 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
591 | if (m == I2O_QUEUE_EMPTY) | ||
592 | return -ETIMEDOUT; | ||
593 | |||
594 | writel(EIGHT_WORD_MSG_SIZE | TRL_OFFSET_6, &msg->u.head[0]); | ||
595 | writel(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | ADAPTER_TID, | ||
596 | &msg->u.head[1]); | ||
597 | writel(i2o_exec_driver.context, &msg->u.s.icntxt); | ||
598 | writel(0x0106, &msg->u.s.tcntxt); /* FIXME: why 0x0106, maybe in | ||
599 | Spec? */ | ||
600 | writel(PAGE_SIZE, &msg->body[0]); | ||
601 | writel(MSG_FRAME_SIZE << 16 | 0x80, &msg->body[1]); /* Outbound msg frame | ||
602 | size in words and Initcode */ | ||
603 | writel(0xd0000004, &msg->body[2]); | ||
604 | writel(i2o_ptr_low((void *)c->status.phys), &msg->body[3]); | ||
605 | writel(i2o_ptr_high((void *)c->status.phys), &msg->body[4]); | ||
606 | |||
607 | i2o_msg_post(c, m); | ||
608 | |||
609 | timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ; | ||
610 | while (*status <= I2O_CMD_IN_PROGRESS) { | ||
611 | if (time_after(jiffies, timeout)) { | ||
612 | printk(KERN_WARNING "%s: Timeout Initializing\n", | ||
613 | c->name); | ||
614 | return -ETIMEDOUT; | ||
615 | } | ||
616 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
617 | schedule_timeout(1); | ||
618 | |||
619 | rmb(); | ||
620 | } | ||
621 | |||
622 | m = c->out_queue.phys; | ||
623 | |||
624 | /* Post frames */ | ||
625 | for (i = 0; i < NMBR_MSG_FRAMES; i++) { | ||
626 | i2o_flush_reply(c, m); | ||
627 | udelay(1); /* Promise */ | ||
628 | m += MSG_FRAME_SIZE * 4; | ||
629 | } | ||
630 | |||
631 | return 0; | ||
632 | } | ||
633 | |||
634 | /** | ||
635 | * i2o_iop_send_nop - send a core NOP message | ||
636 | * @c: controller | ||
637 | * | ||
638 | * Send a no-operation message with a reply set to cause no | ||
639 | * action either. Needed for bringing up promise controllers. | ||
640 | */ | ||
641 | static int i2o_iop_send_nop(struct i2o_controller *c) | ||
642 | { | ||
643 | struct i2o_message __iomem *msg; | ||
644 | u32 m = i2o_msg_get_wait(c, &msg, HZ); | ||
645 | if (m == I2O_QUEUE_EMPTY) | ||
646 | return -ETIMEDOUT; | ||
647 | i2o_msg_nop(c, m); | ||
648 | return 0; | ||
649 | } | ||
650 | |||
651 | /** | ||
652 | * i2o_iop_activate - Bring controller up to HOLD | ||
653 | * @c: controller | ||
654 | * | ||
655 | * This function brings an I2O controller into HOLD state. The adapter | ||
656 | * is reset if necessary and then the queues and resource table are read. | ||
657 | * | ||
658 | * Returns 0 on success or negative error code on failure. | ||
659 | */ | ||
660 | static int i2o_iop_activate(struct i2o_controller *c) | ||
661 | { | ||
662 | struct pci_dev *i960 = NULL; | ||
663 | i2o_status_block *sb = c->status_block.virt; | ||
664 | int rc; | ||
665 | |||
666 | if (c->promise) { | ||
667 | /* Beat up the hardware first of all */ | ||
668 | i960 = | ||
669 | pci_find_slot(c->pdev->bus->number, | ||
670 | PCI_DEVFN(PCI_SLOT(c->pdev->devfn), 0)); | ||
671 | if (i960) | ||
672 | pci_write_config_word(i960, 0x42, 0); | ||
673 | |||
674 | /* Follow this sequence precisely or the controller | ||
675 | ceases to perform useful functions until reboot */ | ||
676 | if ((rc = i2o_iop_send_nop(c))) | ||
677 | return rc; | ||
678 | |||
679 | if ((rc = i2o_iop_reset(c))) | ||
680 | return rc; | ||
681 | } | ||
682 | |||
683 | /* In INIT state, Wait Inbound Q to initialize (in i2o_status_get) */ | ||
684 | /* In READY state, Get status */ | ||
685 | |||
686 | rc = i2o_status_get(c); | ||
687 | if (rc) { | ||
688 | printk(KERN_INFO "%s: Unable to obtain status, " | ||
689 | "attempting a reset.\n", c->name); | ||
690 | if (i2o_iop_reset(c)) | ||
691 | return rc; | ||
692 | } | ||
693 | |||
694 | if (sb->i2o_version > I2OVER15) { | ||
695 | printk(KERN_ERR "%s: Not running version 1.5 of the I2O " | ||
696 | "Specification.\n", c->name); | ||
697 | return -ENODEV; | ||
698 | } | ||
699 | |||
700 | switch (sb->iop_state) { | ||
701 | case ADAPTER_STATE_FAULTED: | ||
702 | printk(KERN_CRIT "%s: hardware fault\n", c->name); | ||
703 | return -ENODEV; | ||
704 | |||
705 | case ADAPTER_STATE_READY: | ||
706 | case ADAPTER_STATE_OPERATIONAL: | ||
707 | case ADAPTER_STATE_HOLD: | ||
708 | case ADAPTER_STATE_FAILED: | ||
709 | pr_debug("%s: already running, trying to reset...\n", c->name); | ||
710 | if (i2o_iop_reset(c)) | ||
711 | return -ENODEV; | ||
712 | } | ||
713 | |||
714 | rc = i2o_iop_init_outbound_queue(c); | ||
715 | if (rc) | ||
716 | return rc; | ||
717 | |||
718 | if (c->promise) { | ||
719 | if ((rc = i2o_iop_send_nop(c))) | ||
720 | return rc; | ||
721 | |||
722 | if ((rc = i2o_status_get(c))) | ||
723 | return rc; | ||
724 | |||
725 | if (i960) | ||
726 | pci_write_config_word(i960, 0x42, 0x3FF); | ||
727 | } | ||
728 | |||
729 | /* In HOLD state */ | ||
730 | |||
731 | rc = i2o_hrt_get(c); | ||
732 | |||
733 | return rc; | ||
734 | }; | ||
735 | |||
736 | /** | ||
737 | * i2o_iop_systab_set - Set the I2O System Table of the specified IOP | ||
738 | * @c: I2O controller to which the system table should be send | ||
739 | * | ||
740 | * Before the systab could be set i2o_systab_build() must be called. | ||
741 | * | ||
742 | * Returns 0 on success or negative error code on failure. | ||
743 | */ | ||
744 | static int i2o_iop_systab_set(struct i2o_controller *c) | ||
745 | { | ||
746 | struct i2o_message __iomem *msg; | ||
747 | u32 m; | ||
748 | i2o_status_block *sb = c->status_block.virt; | ||
749 | struct device *dev = &c->pdev->dev; | ||
750 | struct resource *root; | ||
751 | int rc; | ||
752 | |||
753 | if (sb->current_mem_size < sb->desired_mem_size) { | ||
754 | struct resource *res = &c->mem_resource; | ||
755 | res->name = c->pdev->bus->name; | ||
756 | res->flags = IORESOURCE_MEM; | ||
757 | res->start = 0; | ||
758 | res->end = 0; | ||
759 | printk(KERN_INFO "%s: requires private memory resources.\n", | ||
760 | c->name); | ||
761 | root = pci_find_parent_resource(c->pdev, res); | ||
762 | if (root == NULL) | ||
763 | printk(KERN_WARNING "%s: Can't find parent resource!\n", | ||
764 | c->name); | ||
765 | if (root && allocate_resource(root, res, sb->desired_mem_size, sb->desired_mem_size, sb->desired_mem_size, 1 << 20, /* Unspecified, so use 1Mb and play safe */ | ||
766 | NULL, NULL) >= 0) { | ||
767 | c->mem_alloc = 1; | ||
768 | sb->current_mem_size = 1 + res->end - res->start; | ||
769 | sb->current_mem_base = res->start; | ||
770 | printk(KERN_INFO "%s: allocated %ld bytes of PCI memory" | ||
771 | " at 0x%08lX.\n", c->name, | ||
772 | 1 + res->end - res->start, res->start); | ||
773 | } | ||
774 | } | ||
775 | |||
776 | if (sb->current_io_size < sb->desired_io_size) { | ||
777 | struct resource *res = &c->io_resource; | ||
778 | res->name = c->pdev->bus->name; | ||
779 | res->flags = IORESOURCE_IO; | ||
780 | res->start = 0; | ||
781 | res->end = 0; | ||
782 | printk(KERN_INFO "%s: requires private memory resources.\n", | ||
783 | c->name); | ||
784 | root = pci_find_parent_resource(c->pdev, res); | ||
785 | if (root == NULL) | ||
786 | printk(KERN_WARNING "%s: Can't find parent resource!\n", | ||
787 | c->name); | ||
788 | if (root && allocate_resource(root, res, sb->desired_io_size, sb->desired_io_size, sb->desired_io_size, 1 << 20, /* Unspecified, so use 1Mb and play safe */ | ||
789 | NULL, NULL) >= 0) { | ||
790 | c->io_alloc = 1; | ||
791 | sb->current_io_size = 1 + res->end - res->start; | ||
792 | sb->current_mem_base = res->start; | ||
793 | printk(KERN_INFO "%s: allocated %ld bytes of PCI I/O at" | ||
794 | " 0x%08lX.\n", c->name, | ||
795 | 1 + res->end - res->start, res->start); | ||
796 | } | ||
797 | } | ||
798 | |||
799 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
800 | if (m == I2O_QUEUE_EMPTY) | ||
801 | return -ETIMEDOUT; | ||
802 | |||
803 | i2o_systab.phys = dma_map_single(dev, i2o_systab.virt, i2o_systab.len, | ||
804 | PCI_DMA_TODEVICE); | ||
805 | if (!i2o_systab.phys) { | ||
806 | i2o_msg_nop(c, m); | ||
807 | return -ENOMEM; | ||
808 | } | ||
809 | |||
810 | writel(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6, &msg->u.head[0]); | ||
811 | writel(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 | ADAPTER_TID, | ||
812 | &msg->u.head[1]); | ||
813 | |||
814 | /* | ||
815 | * Provide three SGL-elements: | ||
816 | * System table (SysTab), Private memory space declaration and | ||
817 | * Private i/o space declaration | ||
818 | * | ||
819 | * FIXME: is this still true? | ||
820 | * Nasty one here. We can't use dma_alloc_coherent to send the | ||
821 | * same table to everyone. We have to go remap it for them all | ||
822 | */ | ||
823 | |||
824 | writel(c->unit + 2, &msg->body[0]); | ||
825 | writel(0, &msg->body[1]); | ||
826 | writel(0x54000000 | i2o_systab.len, &msg->body[2]); | ||
827 | writel(i2o_systab.phys, &msg->body[3]); | ||
828 | writel(0x54000000 | sb->current_mem_size, &msg->body[4]); | ||
829 | writel(sb->current_mem_base, &msg->body[5]); | ||
830 | writel(0xd4000000 | sb->current_io_size, &msg->body[6]); | ||
831 | writel(sb->current_io_base, &msg->body[6]); | ||
832 | |||
833 | rc = i2o_msg_post_wait(c, m, 120); | ||
834 | |||
835 | dma_unmap_single(dev, i2o_systab.phys, i2o_systab.len, | ||
836 | PCI_DMA_TODEVICE); | ||
837 | |||
838 | if (rc < 0) | ||
839 | printk(KERN_ERR "%s: Unable to set SysTab (status=%#x).\n", | ||
840 | c->name, -rc); | ||
841 | else | ||
842 | pr_debug("%s: SysTab set.\n", c->name); | ||
843 | |||
844 | i2o_status_get(c); // Entered READY state | ||
845 | |||
846 | return rc; | ||
847 | } | ||
848 | |||
849 | /** | ||
850 | * i2o_iop_online - Bring a controller online into OPERATIONAL state. | ||
851 | * @c: I2O controller | ||
852 | * | ||
853 | * Send the system table and enable the I2O controller. | ||
854 | * | ||
855 | * Returns 0 on success or negativer error code on failure. | ||
856 | */ | ||
857 | static int i2o_iop_online(struct i2o_controller *c) | ||
858 | { | ||
859 | int rc; | ||
860 | |||
861 | rc = i2o_iop_systab_set(c); | ||
862 | if (rc) | ||
863 | return rc; | ||
864 | |||
865 | /* In READY state */ | ||
866 | pr_debug("%s: Attempting to enable...\n", c->name); | ||
867 | rc = i2o_iop_enable(c); | ||
868 | if (rc) | ||
869 | return rc; | ||
870 | |||
871 | return 0; | ||
872 | }; | ||
873 | |||
874 | /** | ||
875 | * i2o_iop_remove - Remove the I2O controller from the I2O core | ||
876 | * @c: I2O controller | ||
877 | * | ||
878 | * Remove the I2O controller from the I2O core. If devices are attached to | ||
879 | * the controller remove these also and finally reset the controller. | ||
880 | */ | ||
881 | void i2o_iop_remove(struct i2o_controller *c) | ||
882 | { | ||
883 | struct i2o_device *dev, *tmp; | ||
884 | |||
885 | pr_debug("%s: deleting controller\n", c->name); | ||
886 | |||
887 | i2o_driver_notify_controller_remove_all(c); | ||
888 | |||
889 | list_del(&c->list); | ||
890 | |||
891 | list_for_each_entry_safe(dev, tmp, &c->devices, list) | ||
892 | i2o_device_remove(dev); | ||
893 | |||
894 | /* Ask the IOP to switch to RESET state */ | ||
895 | i2o_iop_reset(c); | ||
896 | } | ||
897 | |||
898 | /** | ||
899 | * i2o_systab_build - Build system table | ||
900 | * | ||
901 | * The system table contains information about all the IOPs in the system | ||
902 | * (duh) and is used by the Executives on the IOPs to establish peer2peer | ||
903 | * connections. We're not supporting peer2peer at the moment, but this | ||
904 | * will be needed down the road for things like lan2lan forwarding. | ||
905 | * | ||
906 | * Returns 0 on success or negative error code on failure. | ||
907 | */ | ||
908 | static int i2o_systab_build(void) | ||
909 | { | ||
910 | struct i2o_controller *c, *tmp; | ||
911 | int num_controllers = 0; | ||
912 | u32 change_ind = 0; | ||
913 | int count = 0; | ||
914 | struct i2o_sys_tbl *systab = i2o_systab.virt; | ||
915 | |||
916 | list_for_each_entry_safe(c, tmp, &i2o_controllers, list) | ||
917 | num_controllers++; | ||
918 | |||
919 | if (systab) { | ||
920 | change_ind = systab->change_ind; | ||
921 | kfree(i2o_systab.virt); | ||
922 | } | ||
923 | |||
924 | /* Header + IOPs */ | ||
925 | i2o_systab.len = sizeof(struct i2o_sys_tbl) + num_controllers * | ||
926 | sizeof(struct i2o_sys_tbl_entry); | ||
927 | |||
928 | systab = i2o_systab.virt = kmalloc(i2o_systab.len, GFP_KERNEL); | ||
929 | if (!systab) { | ||
930 | printk(KERN_ERR "i2o: unable to allocate memory for System " | ||
931 | "Table\n"); | ||
932 | return -ENOMEM; | ||
933 | } | ||
934 | memset(systab, 0, i2o_systab.len); | ||
935 | |||
936 | systab->version = I2OVERSION; | ||
937 | systab->change_ind = change_ind + 1; | ||
938 | |||
939 | list_for_each_entry_safe(c, tmp, &i2o_controllers, list) { | ||
940 | i2o_status_block *sb; | ||
941 | |||
942 | if (count >= num_controllers) { | ||
943 | printk(KERN_ERR "i2o: controller added while building " | ||
944 | "system table\n"); | ||
945 | break; | ||
946 | } | ||
947 | |||
948 | sb = c->status_block.virt; | ||
949 | |||
950 | /* | ||
951 | * Get updated IOP state so we have the latest information | ||
952 | * | ||
953 | * We should delete the controller at this point if it | ||
954 | * doesn't respond since if it's not on the system table | ||
955 | * it is techninically not part of the I2O subsystem... | ||
956 | */ | ||
957 | if (unlikely(i2o_status_get(c))) { | ||
958 | printk(KERN_ERR "%s: Deleting b/c could not get status" | ||
959 | " while attempting to build system table\n", | ||
960 | c->name); | ||
961 | i2o_iop_remove(c); | ||
962 | continue; // try the next one | ||
963 | } | ||
964 | |||
965 | systab->iops[count].org_id = sb->org_id; | ||
966 | systab->iops[count].iop_id = c->unit + 2; | ||
967 | systab->iops[count].seg_num = 0; | ||
968 | systab->iops[count].i2o_version = sb->i2o_version; | ||
969 | systab->iops[count].iop_state = sb->iop_state; | ||
970 | systab->iops[count].msg_type = sb->msg_type; | ||
971 | systab->iops[count].frame_size = sb->inbound_frame_size; | ||
972 | systab->iops[count].last_changed = change_ind; | ||
973 | systab->iops[count].iop_capabilities = sb->iop_capabilities; | ||
974 | systab->iops[count].inbound_low = i2o_ptr_low(c->post_port); | ||
975 | systab->iops[count].inbound_high = i2o_ptr_high(c->post_port); | ||
976 | |||
977 | count++; | ||
978 | } | ||
979 | |||
980 | systab->num_entries = count; | ||
981 | |||
982 | return 0; | ||
983 | }; | ||
984 | |||
985 | /** | ||
986 | * i2o_parse_hrt - Parse the hardware resource table. | ||
987 | * @c: I2O controller | ||
988 | * | ||
989 | * We don't do anything with it except dumping it (in debug mode). | ||
990 | * | ||
991 | * Returns 0. | ||
992 | */ | ||
993 | static int i2o_parse_hrt(struct i2o_controller *c) | ||
994 | { | ||
995 | i2o_dump_hrt(c); | ||
996 | return 0; | ||
997 | }; | ||
998 | |||
999 | /** | ||
1000 | * i2o_status_get - Get the status block from the I2O controller | ||
1001 | * @c: I2O controller | ||
1002 | * | ||
1003 | * Issue a status query on the controller. This updates the attached | ||
1004 | * status block. The status block could then be accessed through | ||
1005 | * c->status_block. | ||
1006 | * | ||
1007 | * Returns 0 on sucess or negative error code on failure. | ||
1008 | */ | ||
1009 | int i2o_status_get(struct i2o_controller *c) | ||
1010 | { | ||
1011 | struct i2o_message __iomem *msg; | ||
1012 | u32 m; | ||
1013 | u8 *status_block; | ||
1014 | unsigned long timeout; | ||
1015 | |||
1016 | status_block = (u8 *) c->status_block.virt; | ||
1017 | memset(status_block, 0, sizeof(i2o_status_block)); | ||
1018 | |||
1019 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
1020 | if (m == I2O_QUEUE_EMPTY) | ||
1021 | return -ETIMEDOUT; | ||
1022 | |||
1023 | writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); | ||
1024 | writel(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 | ADAPTER_TID, | ||
1025 | &msg->u.head[1]); | ||
1026 | writel(i2o_exec_driver.context, &msg->u.s.icntxt); | ||
1027 | writel(0, &msg->u.s.tcntxt); // FIXME: use resonable transaction context | ||
1028 | writel(0, &msg->body[0]); | ||
1029 | writel(0, &msg->body[1]); | ||
1030 | writel(i2o_ptr_low((void *)c->status_block.phys), &msg->body[2]); | ||
1031 | writel(i2o_ptr_high((void *)c->status_block.phys), &msg->body[3]); | ||
1032 | writel(sizeof(i2o_status_block), &msg->body[4]); /* always 88 bytes */ | ||
1033 | |||
1034 | i2o_msg_post(c, m); | ||
1035 | |||
1036 | /* Wait for a reply */ | ||
1037 | timeout = jiffies + I2O_TIMEOUT_STATUS_GET * HZ; | ||
1038 | while (status_block[87] != 0xFF) { | ||
1039 | if (time_after(jiffies, timeout)) { | ||
1040 | printk(KERN_ERR "%s: Get status timeout.\n", c->name); | ||
1041 | return -ETIMEDOUT; | ||
1042 | } | ||
1043 | |||
1044 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
1045 | schedule_timeout(1); | ||
1046 | |||
1047 | rmb(); | ||
1048 | } | ||
1049 | |||
1050 | #ifdef DEBUG | ||
1051 | i2o_debug_state(c); | ||
1052 | #endif | ||
1053 | |||
1054 | return 0; | ||
1055 | } | ||
1056 | |||
1057 | /* | ||
1058 | * i2o_hrt_get - Get the Hardware Resource Table from the I2O controller | ||
1059 | * @c: I2O controller from which the HRT should be fetched | ||
1060 | * | ||
1061 | * The HRT contains information about possible hidden devices but is | ||
1062 | * mostly useless to us. | ||
1063 | * | ||
1064 | * Returns 0 on success or negativer error code on failure. | ||
1065 | */ | ||
1066 | static int i2o_hrt_get(struct i2o_controller *c) | ||
1067 | { | ||
1068 | int rc; | ||
1069 | int i; | ||
1070 | i2o_hrt *hrt = c->hrt.virt; | ||
1071 | u32 size = sizeof(i2o_hrt); | ||
1072 | struct device *dev = &c->pdev->dev; | ||
1073 | |||
1074 | for (i = 0; i < I2O_HRT_GET_TRIES; i++) { | ||
1075 | struct i2o_message __iomem *msg; | ||
1076 | u32 m; | ||
1077 | |||
1078 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
1079 | if (m == I2O_QUEUE_EMPTY) | ||
1080 | return -ETIMEDOUT; | ||
1081 | |||
1082 | writel(SIX_WORD_MSG_SIZE | SGL_OFFSET_4, &msg->u.head[0]); | ||
1083 | writel(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 | ADAPTER_TID, | ||
1084 | &msg->u.head[1]); | ||
1085 | writel(0xd0000000 | c->hrt.len, &msg->body[0]); | ||
1086 | writel(c->hrt.phys, &msg->body[1]); | ||
1087 | |||
1088 | rc = i2o_msg_post_wait_mem(c, m, 20, &c->hrt); | ||
1089 | |||
1090 | if (rc < 0) { | ||
1091 | printk(KERN_ERR "%s: Unable to get HRT (status=%#x)\n", | ||
1092 | c->name, -rc); | ||
1093 | return rc; | ||
1094 | } | ||
1095 | |||
1096 | size = hrt->num_entries * hrt->entry_len << 2; | ||
1097 | if (size > c->hrt.len) { | ||
1098 | if (i2o_dma_realloc(dev, &c->hrt, size, GFP_KERNEL)) | ||
1099 | return -ENOMEM; | ||
1100 | else | ||
1101 | hrt = c->hrt.virt; | ||
1102 | } else | ||
1103 | return i2o_parse_hrt(c); | ||
1104 | } | ||
1105 | |||
1106 | printk(KERN_ERR "%s: Unable to get HRT after %d tries, giving up\n", | ||
1107 | c->name, I2O_HRT_GET_TRIES); | ||
1108 | |||
1109 | return -EBUSY; | ||
1110 | } | ||
1111 | |||
1112 | /** | ||
1113 | * i2o_iop_alloc - Allocate and initialize a i2o_controller struct | ||
1114 | * | ||
1115 | * Allocate the necessary memory for a i2o_controller struct and | ||
1116 | * initialize the lists. | ||
1117 | * | ||
1118 | * Returns a pointer to the I2O controller or a negative error code on | ||
1119 | * failure. | ||
1120 | */ | ||
1121 | struct i2o_controller *i2o_iop_alloc(void) | ||
1122 | { | ||
1123 | static int unit = 0; /* 0 and 1 are NULL IOP and Local Host */ | ||
1124 | struct i2o_controller *c; | ||
1125 | |||
1126 | c = kmalloc(sizeof(*c), GFP_KERNEL); | ||
1127 | if (!c) { | ||
1128 | printk(KERN_ERR "i2o: Insufficient memory to allocate a I2O " | ||
1129 | "controller.\n"); | ||
1130 | return ERR_PTR(-ENOMEM); | ||
1131 | } | ||
1132 | memset(c, 0, sizeof(*c)); | ||
1133 | |||
1134 | INIT_LIST_HEAD(&c->devices); | ||
1135 | spin_lock_init(&c->lock); | ||
1136 | init_MUTEX(&c->lct_lock); | ||
1137 | c->unit = unit++; | ||
1138 | sprintf(c->name, "iop%d", c->unit); | ||
1139 | |||
1140 | #if BITS_PER_LONG == 64 | ||
1141 | spin_lock_init(&c->context_list_lock); | ||
1142 | atomic_set(&c->context_list_counter, 0); | ||
1143 | INIT_LIST_HEAD(&c->context_list); | ||
1144 | #endif | ||
1145 | |||
1146 | return c; | ||
1147 | }; | ||
1148 | |||
1149 | /** | ||
1150 | * i2o_iop_free - Free the i2o_controller struct | ||
1151 | * @c: I2O controller to free | ||
1152 | */ | ||
1153 | void i2o_iop_free(struct i2o_controller *c) | ||
1154 | { | ||
1155 | kfree(c); | ||
1156 | }; | ||
1157 | |||
1158 | /** | ||
1159 | * i2o_iop_add - Initialize the I2O controller and add him to the I2O core | ||
1160 | * @c: controller | ||
1161 | * | ||
1162 | * Initialize the I2O controller and if no error occurs add him to the I2O | ||
1163 | * core. | ||
1164 | * | ||
1165 | * Returns 0 on success or negative error code on failure. | ||
1166 | */ | ||
1167 | int i2o_iop_add(struct i2o_controller *c) | ||
1168 | { | ||
1169 | int rc; | ||
1170 | |||
1171 | printk(KERN_INFO "%s: Activating I2O controller...\n", c->name); | ||
1172 | printk(KERN_INFO "%s: This may take a few minutes if there are many " | ||
1173 | "devices\n", c->name); | ||
1174 | |||
1175 | if ((rc = i2o_iop_activate(c))) { | ||
1176 | printk(KERN_ERR "%s: could not activate controller\n", | ||
1177 | c->name); | ||
1178 | i2o_iop_reset(c); | ||
1179 | return rc; | ||
1180 | } | ||
1181 | |||
1182 | pr_debug("%s: building sys table...\n", c->name); | ||
1183 | |||
1184 | if ((rc = i2o_systab_build())) { | ||
1185 | i2o_iop_reset(c); | ||
1186 | return rc; | ||
1187 | } | ||
1188 | |||
1189 | pr_debug("%s: online controller...\n", c->name); | ||
1190 | |||
1191 | if ((rc = i2o_iop_online(c))) { | ||
1192 | i2o_iop_reset(c); | ||
1193 | return rc; | ||
1194 | } | ||
1195 | |||
1196 | pr_debug("%s: getting LCT...\n", c->name); | ||
1197 | |||
1198 | if ((rc = i2o_exec_lct_get(c))) { | ||
1199 | i2o_iop_reset(c); | ||
1200 | return rc; | ||
1201 | } | ||
1202 | |||
1203 | list_add(&c->list, &i2o_controllers); | ||
1204 | |||
1205 | i2o_driver_notify_controller_add_all(c); | ||
1206 | |||
1207 | printk(KERN_INFO "%s: Controller added\n", c->name); | ||
1208 | |||
1209 | return 0; | ||
1210 | }; | ||
1211 | |||
1212 | /** | ||
1213 | * i2o_event_register - Turn on/off event notification for a I2O device | ||
1214 | * @dev: I2O device which should receive the event registration request | ||
1215 | * @drv: driver which want to get notified | ||
1216 | * @tcntxt: transaction context to use with this notifier | ||
1217 | * @evt_mask: mask of events | ||
1218 | * | ||
1219 | * Create and posts an event registration message to the task. No reply | ||
1220 | * is waited for, or expected. If you do not want further notifications, | ||
1221 | * call the i2o_event_register again with a evt_mask of 0. | ||
1222 | * | ||
1223 | * Returns 0 on success or -ETIMEDOUT if no message could be fetched for | ||
1224 | * sending the request. | ||
1225 | */ | ||
1226 | int i2o_event_register(struct i2o_device *dev, struct i2o_driver *drv, | ||
1227 | int tcntxt, u32 evt_mask) | ||
1228 | { | ||
1229 | struct i2o_controller *c = dev->iop; | ||
1230 | struct i2o_message __iomem *msg; | ||
1231 | u32 m; | ||
1232 | |||
1233 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
1234 | if (m == I2O_QUEUE_EMPTY) | ||
1235 | return -ETIMEDOUT; | ||
1236 | |||
1237 | writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); | ||
1238 | writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev->lct_data. | ||
1239 | tid, &msg->u.head[1]); | ||
1240 | writel(drv->context, &msg->u.s.icntxt); | ||
1241 | writel(tcntxt, &msg->u.s.tcntxt); | ||
1242 | writel(evt_mask, &msg->body[0]); | ||
1243 | |||
1244 | i2o_msg_post(c, m); | ||
1245 | |||
1246 | return 0; | ||
1247 | }; | ||
1248 | |||
1249 | /** | ||
1250 | * i2o_iop_init - I2O main initialization function | ||
1251 | * | ||
1252 | * Initialize the I2O drivers (OSM) functions, register the Executive OSM, | ||
1253 | * initialize the I2O PCI part and finally initialize I2O device stuff. | ||
1254 | * | ||
1255 | * Returns 0 on success or negative error code on failure. | ||
1256 | */ | ||
1257 | static int __init i2o_iop_init(void) | ||
1258 | { | ||
1259 | int rc = 0; | ||
1260 | |||
1261 | printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); | ||
1262 | |||
1263 | rc = i2o_device_init(); | ||
1264 | if (rc) | ||
1265 | goto exit; | ||
1266 | |||
1267 | rc = i2o_driver_init(); | ||
1268 | if (rc) | ||
1269 | goto device_exit; | ||
1270 | |||
1271 | rc = i2o_exec_init(); | ||
1272 | if (rc) | ||
1273 | goto driver_exit; | ||
1274 | |||
1275 | rc = i2o_pci_init(); | ||
1276 | if (rc < 0) | ||
1277 | goto exec_exit; | ||
1278 | |||
1279 | return 0; | ||
1280 | |||
1281 | exec_exit: | ||
1282 | i2o_exec_exit(); | ||
1283 | |||
1284 | driver_exit: | ||
1285 | i2o_driver_exit(); | ||
1286 | |||
1287 | device_exit: | ||
1288 | i2o_device_exit(); | ||
1289 | |||
1290 | exit: | ||
1291 | return rc; | ||
1292 | } | ||
1293 | |||
1294 | /** | ||
1295 | * i2o_iop_exit - I2O main exit function | ||
1296 | * | ||
1297 | * Removes I2O controllers from PCI subsystem and shut down OSMs. | ||
1298 | */ | ||
1299 | static void __exit i2o_iop_exit(void) | ||
1300 | { | ||
1301 | i2o_pci_exit(); | ||
1302 | i2o_exec_exit(); | ||
1303 | i2o_driver_exit(); | ||
1304 | i2o_device_exit(); | ||
1305 | }; | ||
1306 | |||
1307 | module_init(i2o_iop_init); | ||
1308 | module_exit(i2o_iop_exit); | ||
1309 | |||
1310 | MODULE_AUTHOR("Red Hat Software"); | ||
1311 | MODULE_LICENSE("GPL"); | ||
1312 | MODULE_DESCRIPTION(OSM_DESCRIPTION); | ||
1313 | MODULE_VERSION(OSM_VERSION); | ||
1314 | |||
1315 | #if BITS_PER_LONG == 64 | ||
1316 | EXPORT_SYMBOL(i2o_cntxt_list_add); | ||
1317 | EXPORT_SYMBOL(i2o_cntxt_list_get); | ||
1318 | EXPORT_SYMBOL(i2o_cntxt_list_remove); | ||
1319 | EXPORT_SYMBOL(i2o_cntxt_list_get_ptr); | ||
1320 | #endif | ||
1321 | EXPORT_SYMBOL(i2o_msg_get_wait); | ||
1322 | EXPORT_SYMBOL(i2o_msg_nop); | ||
1323 | EXPORT_SYMBOL(i2o_find_iop); | ||
1324 | EXPORT_SYMBOL(i2o_iop_find_device); | ||
1325 | EXPORT_SYMBOL(i2o_event_register); | ||
1326 | EXPORT_SYMBOL(i2o_status_get); | ||
1327 | EXPORT_SYMBOL(i2o_controllers); | ||
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c new file mode 100644 index 000000000000..e772752f056d --- /dev/null +++ b/drivers/message/i2o/pci.c | |||
@@ -0,0 +1,528 @@ | |||
1 | /* | ||
2 | * PCI handling of I2O controller | ||
3 | * | ||
4 | * Copyright (C) 1999-2002 Red Hat Software | ||
5 | * | ||
6 | * Written by Alan Cox, Building Number Three Ltd | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | * | ||
13 | * A lot of the I2O message side code from this is taken from the Red | ||
14 | * Creek RCPCI45 adapter driver by Red Creek Communications | ||
15 | * | ||
16 | * Fixes/additions: | ||
17 | * Philipp Rumpf | ||
18 | * Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI> | ||
19 | * Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI> | ||
20 | * Deepak Saxena <deepak@plexity.net> | ||
21 | * Boji T Kannanthanam <boji.t.kannanthanam@intel.com> | ||
22 | * Alan Cox <alan@redhat.com>: | ||
23 | * Ported to Linux 2.5. | ||
24 | * Markus Lidel <Markus.Lidel@shadowconnect.com>: | ||
25 | * Minor fixes for 2.6. | ||
26 | * Markus Lidel <Markus.Lidel@shadowconnect.com>: | ||
27 | * Support for sysfs included. | ||
28 | */ | ||
29 | |||
30 | #include <linux/pci.h> | ||
31 | #include <linux/interrupt.h> | ||
32 | #include <linux/i2o.h> | ||
33 | |||
34 | #ifdef CONFIG_MTRR | ||
35 | #include <asm/mtrr.h> | ||
36 | #endif // CONFIG_MTRR | ||
37 | |||
38 | /* Module internal functions from other sources */ | ||
39 | extern struct i2o_controller *i2o_iop_alloc(void); | ||
40 | extern void i2o_iop_free(struct i2o_controller *); | ||
41 | |||
42 | extern int i2o_iop_add(struct i2o_controller *); | ||
43 | extern void i2o_iop_remove(struct i2o_controller *); | ||
44 | |||
45 | extern int i2o_driver_dispatch(struct i2o_controller *, u32, | ||
46 | struct i2o_message *); | ||
47 | |||
48 | /* PCI device id table for all I2O controllers */ | ||
49 | static struct pci_device_id __devinitdata i2o_pci_ids[] = { | ||
50 | {PCI_DEVICE_CLASS(PCI_CLASS_INTELLIGENT_I2O << 8, 0xffff00)}, | ||
51 | {PCI_DEVICE(PCI_VENDOR_ID_DPT, 0xa511)}, | ||
52 | {0} | ||
53 | }; | ||
54 | |||
55 | /** | ||
56 | * i2o_dma_realloc - Realloc DMA memory | ||
57 | * @dev: struct device pointer to the PCI device of the I2O controller | ||
58 | * @addr: pointer to a i2o_dma struct DMA buffer | ||
59 | * @len: new length of memory | ||
60 | * @gfp_mask: GFP mask | ||
61 | * | ||
62 | * If there was something allocated in the addr, free it first. If len > 0 | ||
63 | * than try to allocate it and write the addresses back to the addr | ||
64 | * structure. If len == 0 set the virtual address to NULL. | ||
65 | * | ||
66 | * Returns the 0 on success or negative error code on failure. | ||
67 | */ | ||
68 | int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, size_t len, | ||
69 | unsigned int gfp_mask) | ||
70 | { | ||
71 | i2o_dma_free(dev, addr); | ||
72 | |||
73 | if (len) | ||
74 | return i2o_dma_alloc(dev, addr, len, gfp_mask); | ||
75 | |||
76 | return 0; | ||
77 | }; | ||
78 | |||
79 | /** | ||
80 | * i2o_pci_free - Frees the DMA memory for the I2O controller | ||
81 | * @c: I2O controller to free | ||
82 | * | ||
83 | * Remove all allocated DMA memory and unmap memory IO regions. If MTRR | ||
84 | * is enabled, also remove it again. | ||
85 | */ | ||
86 | static void i2o_pci_free(struct i2o_controller *c) | ||
87 | { | ||
88 | struct device *dev; | ||
89 | |||
90 | dev = &c->pdev->dev; | ||
91 | |||
92 | i2o_dma_free(dev, &c->out_queue); | ||
93 | i2o_dma_free(dev, &c->status_block); | ||
94 | if (c->lct) | ||
95 | kfree(c->lct); | ||
96 | i2o_dma_free(dev, &c->dlct); | ||
97 | i2o_dma_free(dev, &c->hrt); | ||
98 | i2o_dma_free(dev, &c->status); | ||
99 | |||
100 | #ifdef CONFIG_MTRR | ||
101 | if (c->mtrr_reg0 >= 0) | ||
102 | mtrr_del(c->mtrr_reg0, 0, 0); | ||
103 | if (c->mtrr_reg1 >= 0) | ||
104 | mtrr_del(c->mtrr_reg1, 0, 0); | ||
105 | #endif | ||
106 | |||
107 | if (c->raptor && c->in_queue.virt) | ||
108 | iounmap(c->in_queue.virt); | ||
109 | |||
110 | if (c->base.virt) | ||
111 | iounmap(c->base.virt); | ||
112 | } | ||
113 | |||
114 | /** | ||
115 | * i2o_pci_alloc - Allocate DMA memory, map IO memory for I2O controller | ||
116 | * @c: I2O controller | ||
117 | * | ||
118 | * Allocate DMA memory for a PCI (or in theory AGP) I2O controller. All | ||
119 | * IO mappings are also done here. If MTRR is enabled, also do add memory | ||
120 | * regions here. | ||
121 | * | ||
122 | * Returns 0 on success or negative error code on failure. | ||
123 | */ | ||
124 | static int __devinit i2o_pci_alloc(struct i2o_controller *c) | ||
125 | { | ||
126 | struct pci_dev *pdev = c->pdev; | ||
127 | struct device *dev = &pdev->dev; | ||
128 | int i; | ||
129 | |||
130 | for (i = 0; i < 6; i++) { | ||
131 | /* Skip I/O spaces */ | ||
132 | if (!(pci_resource_flags(pdev, i) & IORESOURCE_IO)) { | ||
133 | if (!c->base.phys) { | ||
134 | c->base.phys = pci_resource_start(pdev, i); | ||
135 | c->base.len = pci_resource_len(pdev, i); | ||
136 | |||
137 | /* | ||
138 | * If we know what card it is, set the size | ||
139 | * correctly. Code is taken from dpt_i2o.c | ||
140 | */ | ||
141 | if (pdev->device == 0xa501) { | ||
142 | if (pdev->subsystem_device >= 0xc032 && | ||
143 | pdev->subsystem_device <= 0xc03b) { | ||
144 | if (c->base.len > 0x400000) | ||
145 | c->base.len = 0x400000; | ||
146 | } else { | ||
147 | if (c->base.len > 0x100000) | ||
148 | c->base.len = 0x100000; | ||
149 | } | ||
150 | } | ||
151 | if (!c->raptor) | ||
152 | break; | ||
153 | } else { | ||
154 | c->in_queue.phys = pci_resource_start(pdev, i); | ||
155 | c->in_queue.len = pci_resource_len(pdev, i); | ||
156 | break; | ||
157 | } | ||
158 | } | ||
159 | } | ||
160 | |||
161 | if (i == 6) { | ||
162 | printk(KERN_ERR "%s: I2O controller has no memory regions" | ||
163 | " defined.\n", c->name); | ||
164 | i2o_pci_free(c); | ||
165 | return -EINVAL; | ||
166 | } | ||
167 | |||
168 | /* Map the I2O controller */ | ||
169 | if (c->raptor) { | ||
170 | printk(KERN_INFO "%s: PCI I2O controller\n", c->name); | ||
171 | printk(KERN_INFO " BAR0 at 0x%08lX size=%ld\n", | ||
172 | (unsigned long)c->base.phys, (unsigned long)c->base.len); | ||
173 | printk(KERN_INFO " BAR1 at 0x%08lX size=%ld\n", | ||
174 | (unsigned long)c->in_queue.phys, | ||
175 | (unsigned long)c->in_queue.len); | ||
176 | } else | ||
177 | printk(KERN_INFO "%s: PCI I2O controller at %08lX size=%ld\n", | ||
178 | c->name, (unsigned long)c->base.phys, | ||
179 | (unsigned long)c->base.len); | ||
180 | |||
181 | c->base.virt = ioremap(c->base.phys, c->base.len); | ||
182 | if (!c->base.virt) { | ||
183 | printk(KERN_ERR "%s: Unable to map controller.\n", c->name); | ||
184 | return -ENOMEM; | ||
185 | } | ||
186 | |||
187 | if (c->raptor) { | ||
188 | c->in_queue.virt = ioremap(c->in_queue.phys, c->in_queue.len); | ||
189 | if (!c->in_queue.virt) { | ||
190 | printk(KERN_ERR "%s: Unable to map controller.\n", | ||
191 | c->name); | ||
192 | i2o_pci_free(c); | ||
193 | return -ENOMEM; | ||
194 | } | ||
195 | } else | ||
196 | c->in_queue = c->base; | ||
197 | |||
198 | c->irq_mask = c->base.virt + 0x34; | ||
199 | c->post_port = c->base.virt + 0x40; | ||
200 | c->reply_port = c->base.virt + 0x44; | ||
201 | |||
202 | #ifdef CONFIG_MTRR | ||
203 | /* Enable Write Combining MTRR for IOP's memory region */ | ||
204 | c->mtrr_reg0 = mtrr_add(c->in_queue.phys, c->in_queue.len, | ||
205 | MTRR_TYPE_WRCOMB, 1); | ||
206 | c->mtrr_reg1 = -1; | ||
207 | |||
208 | if (c->mtrr_reg0 < 0) | ||
209 | printk(KERN_WARNING "%s: could not enable write combining " | ||
210 | "MTRR\n", c->name); | ||
211 | else | ||
212 | printk(KERN_INFO "%s: using write combining MTRR\n", c->name); | ||
213 | |||
214 | /* | ||
215 | * If it is an INTEL i960 I/O processor then set the first 64K to | ||
216 | * Uncacheable since the region contains the messaging unit which | ||
217 | * shouldn't be cached. | ||
218 | */ | ||
219 | if ((pdev->vendor == PCI_VENDOR_ID_INTEL || | ||
220 | pdev->vendor == PCI_VENDOR_ID_DPT) && !c->raptor) { | ||
221 | printk(KERN_INFO "%s: MTRR workaround for Intel i960 processor" | ||
222 | "\n", c->name); | ||
223 | c->mtrr_reg1 = mtrr_add(c->base.phys, 0x10000, | ||
224 | MTRR_TYPE_UNCACHABLE, 1); | ||
225 | |||
226 | if (c->mtrr_reg1 < 0) { | ||
227 | printk(KERN_WARNING "%s: Error in setting " | ||
228 | "MTRR_TYPE_UNCACHABLE\n", c->name); | ||
229 | mtrr_del(c->mtrr_reg0, c->in_queue.phys, | ||
230 | c->in_queue.len); | ||
231 | c->mtrr_reg0 = -1; | ||
232 | } | ||
233 | } | ||
234 | #endif | ||
235 | |||
236 | if (i2o_dma_alloc(dev, &c->status, 8, GFP_KERNEL)) { | ||
237 | i2o_pci_free(c); | ||
238 | return -ENOMEM; | ||
239 | } | ||
240 | |||
241 | if (i2o_dma_alloc(dev, &c->hrt, sizeof(i2o_hrt), GFP_KERNEL)) { | ||
242 | i2o_pci_free(c); | ||
243 | return -ENOMEM; | ||
244 | } | ||
245 | |||
246 | if (i2o_dma_alloc(dev, &c->dlct, 8192, GFP_KERNEL)) { | ||
247 | i2o_pci_free(c); | ||
248 | return -ENOMEM; | ||
249 | } | ||
250 | |||
251 | if (i2o_dma_alloc(dev, &c->status_block, sizeof(i2o_status_block), | ||
252 | GFP_KERNEL)) { | ||
253 | i2o_pci_free(c); | ||
254 | return -ENOMEM; | ||
255 | } | ||
256 | |||
257 | if (i2o_dma_alloc(dev, &c->out_queue, MSG_POOL_SIZE, GFP_KERNEL)) { | ||
258 | i2o_pci_free(c); | ||
259 | return -ENOMEM; | ||
260 | } | ||
261 | |||
262 | pci_set_drvdata(pdev, c); | ||
263 | |||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | /** | ||
268 | * i2o_pci_interrupt - Interrupt handler for I2O controller | ||
269 | * @irq: interrupt line | ||
270 | * @dev_id: pointer to the I2O controller | ||
271 | * @r: pointer to registers | ||
272 | * | ||
273 | * Handle an interrupt from a PCI based I2O controller. This turns out | ||
274 | * to be rather simple. We keep the controller pointer in the cookie. | ||
275 | */ | ||
276 | static irqreturn_t i2o_pci_interrupt(int irq, void *dev_id, struct pt_regs *r) | ||
277 | { | ||
278 | struct i2o_controller *c = dev_id; | ||
279 | struct device *dev = &c->pdev->dev; | ||
280 | struct i2o_message *m; | ||
281 | u32 mv; | ||
282 | |||
283 | /* | ||
284 | * Old 960 steppings had a bug in the I2O unit that caused | ||
285 | * the queue to appear empty when it wasn't. | ||
286 | */ | ||
287 | mv = I2O_REPLY_READ32(c); | ||
288 | if (mv == I2O_QUEUE_EMPTY) { | ||
289 | mv = I2O_REPLY_READ32(c); | ||
290 | if (unlikely(mv == I2O_QUEUE_EMPTY)) { | ||
291 | return IRQ_NONE; | ||
292 | } else | ||
293 | pr_debug("%s: 960 bug detected\n", c->name); | ||
294 | } | ||
295 | |||
296 | while (mv != I2O_QUEUE_EMPTY) { | ||
297 | /* | ||
298 | * Map the message from the page frame map to kernel virtual. | ||
299 | * Because bus_to_virt is deprecated, we have calculate the | ||
300 | * location by ourself! | ||
301 | */ | ||
302 | m = i2o_msg_out_to_virt(c, mv); | ||
303 | |||
304 | /* | ||
305 | * Ensure this message is seen coherently but cachably by | ||
306 | * the processor | ||
307 | */ | ||
308 | dma_sync_single_for_cpu(dev, mv, MSG_FRAME_SIZE * 4, | ||
309 | PCI_DMA_FROMDEVICE); | ||
310 | |||
311 | /* dispatch it */ | ||
312 | if (i2o_driver_dispatch(c, mv, m)) | ||
313 | /* flush it if result != 0 */ | ||
314 | i2o_flush_reply(c, mv); | ||
315 | |||
316 | /* | ||
317 | * That 960 bug again... | ||
318 | */ | ||
319 | mv = I2O_REPLY_READ32(c); | ||
320 | if (mv == I2O_QUEUE_EMPTY) | ||
321 | mv = I2O_REPLY_READ32(c); | ||
322 | } | ||
323 | return IRQ_HANDLED; | ||
324 | } | ||
325 | |||
326 | /** | ||
327 | * i2o_pci_irq_enable - Allocate interrupt for I2O controller | ||
328 | * | ||
329 | * Allocate an interrupt for the I2O controller, and activate interrupts | ||
330 | * on the I2O controller. | ||
331 | * | ||
332 | * Returns 0 on success or negative error code on failure. | ||
333 | */ | ||
334 | static int i2o_pci_irq_enable(struct i2o_controller *c) | ||
335 | { | ||
336 | struct pci_dev *pdev = c->pdev; | ||
337 | int rc; | ||
338 | |||
339 | I2O_IRQ_WRITE32(c, 0xffffffff); | ||
340 | |||
341 | if (pdev->irq) { | ||
342 | rc = request_irq(pdev->irq, i2o_pci_interrupt, SA_SHIRQ, | ||
343 | c->name, c); | ||
344 | if (rc < 0) { | ||
345 | printk(KERN_ERR "%s: unable to allocate interrupt %d." | ||
346 | "\n", c->name, pdev->irq); | ||
347 | return rc; | ||
348 | } | ||
349 | } | ||
350 | |||
351 | I2O_IRQ_WRITE32(c, 0x00000000); | ||
352 | |||
353 | printk(KERN_INFO "%s: Installed at IRQ %d\n", c->name, pdev->irq); | ||
354 | |||
355 | return 0; | ||
356 | } | ||
357 | |||
358 | /** | ||
359 | * i2o_pci_irq_disable - Free interrupt for I2O controller | ||
360 | * @c: I2O controller | ||
361 | * | ||
362 | * Disable interrupts in I2O controller and then free interrupt. | ||
363 | */ | ||
364 | static void i2o_pci_irq_disable(struct i2o_controller *c) | ||
365 | { | ||
366 | I2O_IRQ_WRITE32(c, 0xffffffff); | ||
367 | |||
368 | if (c->pdev->irq > 0) | ||
369 | free_irq(c->pdev->irq, c); | ||
370 | } | ||
371 | |||
372 | /** | ||
373 | * i2o_pci_probe - Probe the PCI device for an I2O controller | ||
374 | * @dev: PCI device to test | ||
375 | * @id: id which matched with the PCI device id table | ||
376 | * | ||
377 | * Probe the PCI device for any device which is a memory of the | ||
378 | * Intelligent, I2O class or an Adaptec Zero Channel Controller. We | ||
379 | * attempt to set up each such device and register it with the core. | ||
380 | * | ||
381 | * Returns 0 on success or negative error code on failure. | ||
382 | */ | ||
383 | static int __devinit i2o_pci_probe(struct pci_dev *pdev, | ||
384 | const struct pci_device_id *id) | ||
385 | { | ||
386 | struct i2o_controller *c; | ||
387 | int rc; | ||
388 | |||
389 | printk(KERN_INFO "i2o: Checking for PCI I2O controllers...\n"); | ||
390 | |||
391 | if ((pdev->class & 0xff) > 1) { | ||
392 | printk(KERN_WARNING "i2o: I2O controller found but does not " | ||
393 | "support I2O 1.5 (skipping).\n"); | ||
394 | return -ENODEV; | ||
395 | } | ||
396 | |||
397 | if ((rc = pci_enable_device(pdev))) { | ||
398 | printk(KERN_WARNING "i2o: I2O controller found but could not be" | ||
399 | " enabled.\n"); | ||
400 | return rc; | ||
401 | } | ||
402 | |||
403 | printk(KERN_INFO "i2o: I2O controller found on bus %d at %d.\n", | ||
404 | pdev->bus->number, pdev->devfn); | ||
405 | |||
406 | if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { | ||
407 | printk(KERN_WARNING "i2o: I2O controller on bus %d at %d: No " | ||
408 | "suitable DMA available!\n", pdev->bus->number, | ||
409 | pdev->devfn); | ||
410 | rc = -ENODEV; | ||
411 | goto disable; | ||
412 | } | ||
413 | |||
414 | pci_set_master(pdev); | ||
415 | |||
416 | c = i2o_iop_alloc(); | ||
417 | if (IS_ERR(c)) { | ||
418 | printk(KERN_ERR "i2o: memory for I2O controller could not be " | ||
419 | "allocated\n"); | ||
420 | rc = PTR_ERR(c); | ||
421 | goto disable; | ||
422 | } | ||
423 | |||
424 | c->pdev = pdev; | ||
425 | c->device = pdev->dev; | ||
426 | |||
427 | /* Cards that fall apart if you hit them with large I/O loads... */ | ||
428 | if (pdev->vendor == PCI_VENDOR_ID_NCR && pdev->device == 0x0630) { | ||
429 | c->short_req = 1; | ||
430 | printk(KERN_INFO "%s: Symbios FC920 workarounds activated.\n", | ||
431 | c->name); | ||
432 | } | ||
433 | |||
434 | if (pdev->subsystem_vendor == PCI_VENDOR_ID_PROMISE) { | ||
435 | c->promise = 1; | ||
436 | printk(KERN_INFO "%s: Promise workarounds activated.\n", | ||
437 | c->name); | ||
438 | } | ||
439 | |||
440 | /* Cards that go bananas if you quiesce them before you reset them. */ | ||
441 | if (pdev->vendor == PCI_VENDOR_ID_DPT) { | ||
442 | c->no_quiesce = 1; | ||
443 | if (pdev->device == 0xa511) | ||
444 | c->raptor = 1; | ||
445 | } | ||
446 | |||
447 | if ((rc = i2o_pci_alloc(c))) { | ||
448 | printk(KERN_ERR "%s: DMA / IO allocation for I2O controller " | ||
449 | " failed\n", c->name); | ||
450 | goto free_controller; | ||
451 | } | ||
452 | |||
453 | if (i2o_pci_irq_enable(c)) { | ||
454 | printk(KERN_ERR "%s: unable to enable interrupts for I2O " | ||
455 | "controller\n", c->name); | ||
456 | goto free_pci; | ||
457 | } | ||
458 | |||
459 | if ((rc = i2o_iop_add(c))) | ||
460 | goto uninstall; | ||
461 | |||
462 | return 0; | ||
463 | |||
464 | uninstall: | ||
465 | i2o_pci_irq_disable(c); | ||
466 | |||
467 | free_pci: | ||
468 | i2o_pci_free(c); | ||
469 | |||
470 | free_controller: | ||
471 | i2o_iop_free(c); | ||
472 | |||
473 | disable: | ||
474 | pci_disable_device(pdev); | ||
475 | |||
476 | return rc; | ||
477 | } | ||
478 | |||
479 | /** | ||
480 | * i2o_pci_remove - Removes a I2O controller from the system | ||
481 | * pdev: I2O controller which should be removed | ||
482 | * | ||
483 | * Reset the I2O controller, disable interrupts and remove all allocated | ||
484 | * resources. | ||
485 | */ | ||
486 | static void __devexit i2o_pci_remove(struct pci_dev *pdev) | ||
487 | { | ||
488 | struct i2o_controller *c; | ||
489 | c = pci_get_drvdata(pdev); | ||
490 | |||
491 | i2o_iop_remove(c); | ||
492 | i2o_pci_irq_disable(c); | ||
493 | i2o_pci_free(c); | ||
494 | |||
495 | printk(KERN_INFO "%s: Controller removed.\n", c->name); | ||
496 | |||
497 | i2o_iop_free(c); | ||
498 | pci_disable_device(pdev); | ||
499 | }; | ||
500 | |||
501 | /* PCI driver for I2O controller */ | ||
502 | static struct pci_driver i2o_pci_driver = { | ||
503 | .name = "I2O controller", | ||
504 | .id_table = i2o_pci_ids, | ||
505 | .probe = i2o_pci_probe, | ||
506 | .remove = __devexit_p(i2o_pci_remove), | ||
507 | }; | ||
508 | |||
509 | /** | ||
510 | * i2o_pci_init - registers I2O PCI driver in PCI subsystem | ||
511 | * | ||
512 | * Returns > 0 on success or negative error code on failure. | ||
513 | */ | ||
514 | int __init i2o_pci_init(void) | ||
515 | { | ||
516 | return pci_register_driver(&i2o_pci_driver); | ||
517 | }; | ||
518 | |||
519 | /** | ||
520 | * i2o_pci_exit - unregisters I2O PCI driver from PCI subsystem | ||
521 | */ | ||
522 | void __exit i2o_pci_exit(void) | ||
523 | { | ||
524 | pci_unregister_driver(&i2o_pci_driver); | ||
525 | }; | ||
526 | |||
527 | EXPORT_SYMBOL(i2o_dma_realloc); | ||
528 | MODULE_DEVICE_TABLE(pci, i2o_pci_ids); | ||