diff options
author | Alan Cox <alan@linux.intel.com> | 2015-02-03 08:18:55 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2015-02-03 18:58:39 -0500 |
commit | 2cbf7fe2d5d32a4747c1f8ad163e886dccad930c (patch) | |
tree | 9d7c6d230459353a94d074271e57ba4f7488a4c9 /drivers/message | |
parent | 178cf7de6f1d3b95407f5a76af249fc924d42576 (diff) |
i2o: move to staging
The I2O layer deals with a technology that to say the least didn't catch on
in the market.
The only relevant products are some of the AMI MegaRAID - which supported I2O
and its native mode (The native mode is faster and runs on Linux), an
obscure crypto ethernet card that's now so many years out of date nobody
would use it, the old DPT controllers, which speak their own dialect and
have their own driver - and ermm.. thats about it.
We also know the code isn't in good shape as recently a patch was proposed
and queried as buggy, which in turn showed the existing code was broken
already by prior "clean up" and nobody had noticed that either.
It's coding style robot code nothing more. Like some forgotten corridor
cleaned relentlessly by a lost Roomba but where no user has trodden in years.
Move it to staging and then to /dev/null.
The headers remain as they are shared with dpt_i2o.
Signed-off-by: Alan Cox <alan@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/message')
-rw-r--r-- | drivers/message/Makefile | 1 | ||||
-rw-r--r-- | drivers/message/i2o/Kconfig | 121 | ||||
-rw-r--r-- | drivers/message/i2o/Makefile | 16 | ||||
-rw-r--r-- | drivers/message/i2o/README | 98 | ||||
-rw-r--r-- | drivers/message/i2o/README.ioctl | 394 | ||||
-rw-r--r-- | drivers/message/i2o/bus-osm.c | 176 | ||||
-rw-r--r-- | drivers/message/i2o/config-osm.c | 90 | ||||
-rw-r--r-- | drivers/message/i2o/core.h | 69 | ||||
-rw-r--r-- | drivers/message/i2o/debug.c | 472 | ||||
-rw-r--r-- | drivers/message/i2o/device.c | 594 | ||||
-rw-r--r-- | drivers/message/i2o/driver.c | 382 | ||||
-rw-r--r-- | drivers/message/i2o/exec-osm.c | 612 | ||||
-rw-r--r-- | drivers/message/i2o/i2o_block.c | 1228 | ||||
-rw-r--r-- | drivers/message/i2o/i2o_block.h | 103 | ||||
-rw-r--r-- | drivers/message/i2o/i2o_config.c | 1163 | ||||
-rw-r--r-- | drivers/message/i2o/i2o_proc.c | 2045 | ||||
-rw-r--r-- | drivers/message/i2o/i2o_scsi.c | 814 | ||||
-rw-r--r-- | drivers/message/i2o/iop.c | 1247 | ||||
-rw-r--r-- | drivers/message/i2o/memory.c | 313 | ||||
-rw-r--r-- | drivers/message/i2o/pci.c | 497 |
20 files changed, 0 insertions, 10435 deletions
diff --git a/drivers/message/Makefile b/drivers/message/Makefile index 97ef5a01ad11..755676ded67c 100644 --- a/drivers/message/Makefile +++ b/drivers/message/Makefile | |||
@@ -2,5 +2,4 @@ | |||
2 | # Makefile for MPT based block devices | 2 | # Makefile for MPT based block devices |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_I2O) += i2o/ | ||
6 | obj-$(CONFIG_FUSION) += fusion/ | 5 | obj-$(CONFIG_FUSION) += fusion/ |
diff --git a/drivers/message/i2o/Kconfig b/drivers/message/i2o/Kconfig deleted file mode 100644 index 5afa0e393ecf..000000000000 --- a/drivers/message/i2o/Kconfig +++ /dev/null | |||
@@ -1,121 +0,0 @@ | |||
1 | |||
2 | menuconfig I2O | ||
3 | tristate "I2O device support" | ||
4 | depends on PCI | ||
5 | ---help--- | ||
6 | The Intelligent Input/Output (I2O) architecture allows hardware | ||
7 | drivers to be split into two parts: an operating system specific | ||
8 | module called the OSM and an hardware specific module called the | ||
9 | HDM. The OSM can talk to a whole range of HDM's, and ideally the | ||
10 | HDM's are not OS dependent. This allows for the same HDM driver to | ||
11 | be used under different operating systems if the relevant OSM is in | ||
12 | place. In order for this to work, you need to have an I2O interface | ||
13 | adapter card in your computer. This card contains a special I/O | ||
14 | processor (IOP), thus allowing high speeds since the CPU does not | ||
15 | have to deal with I/O. | ||
16 | |||
17 | If you say Y here, you will get a choice of interface adapter | ||
18 | drivers and OSM's with the following questions. | ||
19 | |||
20 | To compile this support as a module, choose M here: the | ||
21 | modules will be called i2o_core. | ||
22 | |||
23 | If unsure, say N. | ||
24 | |||
25 | if I2O | ||
26 | |||
27 | config I2O_LCT_NOTIFY_ON_CHANGES | ||
28 | bool "Enable LCT notification" | ||
29 | default y | ||
30 | ---help--- | ||
31 | Only say N here if you have a I2O controller from SUN. The SUN | ||
32 | firmware doesn't support LCT notification on changes. If this option | ||
33 | is enabled on such a controller the driver will hang up in a endless | ||
34 | loop. On all other controllers say Y. | ||
35 | |||
36 | If unsure, say Y. | ||
37 | |||
38 | config I2O_EXT_ADAPTEC | ||
39 | bool "Enable Adaptec extensions" | ||
40 | default y | ||
41 | ---help--- | ||
42 | Say Y for support of raidutils for Adaptec I2O controllers. You also | ||
43 | have to say Y to "I2O Configuration support", "I2O SCSI OSM" below | ||
44 | and to "SCSI generic support" under "SCSI device configuration". | ||
45 | |||
46 | config I2O_EXT_ADAPTEC_DMA64 | ||
47 | bool "Enable 64-bit DMA" | ||
48 | depends on I2O_EXT_ADAPTEC && ( 64BIT || HIGHMEM64G ) | ||
49 | default y | ||
50 | ---help--- | ||
51 | Say Y for support of 64-bit DMA transfer mode on Adaptec I2O | ||
52 | controllers. | ||
53 | Note: You need at least firmware version 3709. | ||
54 | |||
55 | config I2O_CONFIG | ||
56 | tristate "I2O Configuration support" | ||
57 | depends on VIRT_TO_BUS | ||
58 | ---help--- | ||
59 | Say Y for support of the configuration interface for the I2O adapters. | ||
60 | If you have a RAID controller from Adaptec and you want to use the | ||
61 | raidutils to manage your RAID array, you have to say Y here. | ||
62 | |||
63 | To compile this support as a module, choose M here: the | ||
64 | module will be called i2o_config. | ||
65 | |||
66 | Note: If you want to use the new API you have to download the | ||
67 | i2o_config patch from http://i2o.shadowconnect.com/ | ||
68 | |||
69 | config I2O_CONFIG_OLD_IOCTL | ||
70 | bool "Enable ioctls (OBSOLETE)" | ||
71 | depends on I2O_CONFIG | ||
72 | default y | ||
73 | ---help--- | ||
74 | Enables old ioctls. | ||
75 | |||
76 | config I2O_BUS | ||
77 | tristate "I2O Bus Adapter OSM" | ||
78 | ---help--- | ||
79 | Include support for the I2O Bus Adapter OSM. The Bus Adapter OSM | ||
80 | provides access to the busses on the I2O controller. The main purpose | ||
81 | is to rescan the bus to find new devices. | ||
82 | |||
83 | To compile this support as a module, choose M here: the | ||
84 | module will be called i2o_bus. | ||
85 | |||
86 | config I2O_BLOCK | ||
87 | tristate "I2O Block OSM" | ||
88 | depends on BLOCK | ||
89 | ---help--- | ||
90 | Include support for the I2O Block OSM. The Block OSM presents disk | ||
91 | and other structured block devices to the operating system. If you | ||
92 | are using an RAID controller, you could access the array only by | ||
93 | the Block OSM driver. But it is possible to access the single disks | ||
94 | by the SCSI OSM driver, for example to monitor the disks. | ||
95 | |||
96 | To compile this support as a module, choose M here: the | ||
97 | module will be called i2o_block. | ||
98 | |||
99 | config I2O_SCSI | ||
100 | tristate "I2O SCSI OSM" | ||
101 | depends on SCSI | ||
102 | ---help--- | ||
103 | Allows direct SCSI access to SCSI devices on a SCSI or FibreChannel | ||
104 | I2O controller. You can use both the SCSI and Block OSM together if | ||
105 | you wish. To access a RAID array, you must use the Block OSM driver. | ||
106 | But you could use the SCSI OSM driver to monitor the single disks. | ||
107 | |||
108 | To compile this support as a module, choose M here: the | ||
109 | module will be called i2o_scsi. | ||
110 | |||
111 | config I2O_PROC | ||
112 | tristate "I2O /proc support" | ||
113 | ---help--- | ||
114 | If you say Y here and to "/proc file system support", you will be | ||
115 | able to read I2O related information from the virtual directory | ||
116 | /proc/i2o. | ||
117 | |||
118 | To compile this support as a module, choose M here: the | ||
119 | module will be called i2o_proc. | ||
120 | |||
121 | endif # I2O | ||
diff --git a/drivers/message/i2o/Makefile b/drivers/message/i2o/Makefile deleted file mode 100644 index b0982dacfd0a..000000000000 --- a/drivers/message/i2o/Makefile +++ /dev/null | |||
@@ -1,16 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for the kernel I2O OSM. | ||
3 | # | ||
4 | # Note : at this point, these files are compiled on all systems. | ||
5 | # In the future, some of these should be built conditionally. | ||
6 | # | ||
7 | |||
8 | i2o_core-y += iop.o driver.o device.o debug.o pci.o exec-osm.o memory.o | ||
9 | i2o_bus-y += bus-osm.o | ||
10 | i2o_config-y += config-osm.o | ||
11 | obj-$(CONFIG_I2O) += i2o_core.o | ||
12 | obj-$(CONFIG_I2O_CONFIG)+= i2o_config.o | ||
13 | obj-$(CONFIG_I2O_BUS) += i2o_bus.o | ||
14 | obj-$(CONFIG_I2O_BLOCK) += i2o_block.o | ||
15 | obj-$(CONFIG_I2O_SCSI) += i2o_scsi.o | ||
16 | obj-$(CONFIG_I2O_PROC) += i2o_proc.o | ||
diff --git a/drivers/message/i2o/README b/drivers/message/i2o/README deleted file mode 100644 index f072a8eb3041..000000000000 --- a/drivers/message/i2o/README +++ /dev/null | |||
@@ -1,98 +0,0 @@ | |||
1 | |||
2 | Linux I2O Support (c) Copyright 1999 Red Hat Software | ||
3 | and others. | ||
4 | |||
5 | This program is free software; you can redistribute it and/or | ||
6 | modify it under the terms of the GNU General Public License | ||
7 | as published by the Free Software Foundation; either version | ||
8 | 2 of the License, or (at your option) any later version. | ||
9 | |||
10 | AUTHORS (so far) | ||
11 | |||
12 | Alan Cox, Building Number Three Ltd. | ||
13 | Core code, SCSI and Block OSMs | ||
14 | |||
15 | Steve Ralston, LSI Logic Corp. | ||
16 | Debugging SCSI and Block OSM | ||
17 | |||
18 | Deepak Saxena, Intel Corp. | ||
19 | Various core/block extensions | ||
20 | /proc interface, bug fixes | ||
21 | Ioctl interfaces for control | ||
22 | Debugging LAN OSM | ||
23 | |||
24 | Philip Rumpf | ||
25 | Fixed assorted dumb SMP locking bugs | ||
26 | |||
27 | Juha Sievanen, University of Helsinki Finland | ||
28 | LAN OSM code | ||
29 | /proc interface to LAN class | ||
30 | Bug fixes | ||
31 | Core code extensions | ||
32 | |||
33 | Auvo Häkkinen, University of Helsinki Finland | ||
34 | LAN OSM code | ||
35 | /Proc interface to LAN class | ||
36 | Bug fixes | ||
37 | Core code extensions | ||
38 | |||
39 | Taneli Vähäkangas, University of Helsinki Finland | ||
40 | Fixes to i2o_config | ||
41 | |||
42 | CREDITS | ||
43 | |||
44 | This work was made possible by | ||
45 | |||
46 | Red Hat Software | ||
47 | Funding for the Building #3 part of the project | ||
48 | |||
49 | Symbios Logic (Now LSI) | ||
50 | Host adapters, hints, known to work platforms when I hit | ||
51 | compatibility problems | ||
52 | |||
53 | BoxHill Corporation | ||
54 | Loan of initial FibreChannel disk array used for development work. | ||
55 | |||
56 | European Commission | ||
57 | Funding the work done by the University of Helsinki | ||
58 | |||
59 | SysKonnect | ||
60 | Loan of FDDI and Gigabit Ethernet cards | ||
61 | |||
62 | ASUSTeK | ||
63 | Loan of I2O motherboard | ||
64 | |||
65 | STATUS: | ||
66 | |||
67 | o The core setup works within limits. | ||
68 | o The scsi layer seems to almost work. | ||
69 | I'm still chasing down the hang bug. | ||
70 | o The block OSM is mostly functional | ||
71 | o LAN OSM works with FDDI and Ethernet cards. | ||
72 | |||
73 | TO DO: | ||
74 | |||
75 | General: | ||
76 | o Provide hidden address space if asked | ||
77 | o Long term message flow control | ||
78 | o PCI IOP's without interrupts are not supported yet | ||
79 | o Push FAIL handling into the core | ||
80 | o DDM control interfaces for module load etc | ||
81 | o Add I2O 2.0 support (Deffered to 2.5 kernel) | ||
82 | |||
83 | Block: | ||
84 | o Multiple major numbers | ||
85 | o Read ahead and cache handling stuff. Talk to Ingo and people | ||
86 | o Power management | ||
87 | o Finish Media changers | ||
88 | |||
89 | SCSI: | ||
90 | o Find the right way to associate drives/luns/busses | ||
91 | |||
92 | Lan: | ||
93 | o Performance tuning | ||
94 | o Test Fibre Channel code | ||
95 | |||
96 | Tape: | ||
97 | o Anyone seen anything implementing this ? | ||
98 | (D.S: Will attempt to do so if spare cycles permit) | ||
diff --git a/drivers/message/i2o/README.ioctl b/drivers/message/i2o/README.ioctl deleted file mode 100644 index 4a7d2ebdfc97..000000000000 --- a/drivers/message/i2o/README.ioctl +++ /dev/null | |||
@@ -1,394 +0,0 @@ | |||
1 | |||
2 | Linux I2O User Space Interface | ||
3 | rev 0.3 - 04/20/99 | ||
4 | |||
5 | ============================================================================= | ||
6 | Originally written by Deepak Saxena(deepak@plexity.net) | ||
7 | Currently maintained by Deepak Saxena(deepak@plexity.net) | ||
8 | ============================================================================= | ||
9 | |||
10 | I. Introduction | ||
11 | |||
12 | The Linux I2O subsystem provides a set of ioctl() commands that can be | ||
13 | utilized by user space applications to communicate with IOPs and devices | ||
14 | on individual IOPs. This document defines the specific ioctl() commands | ||
15 | that are available to the user and provides examples of their uses. | ||
16 | |||
17 | This document assumes the reader is familiar with or has access to the | ||
18 | I2O specification as no I2O message parameters are outlined. For information | ||
19 | on the specification, see http://www.i2osig.org | ||
20 | |||
21 | This document and the I2O user space interface are currently maintained | ||
22 | by Deepak Saxena. Please send all comments, errata, and bug fixes to | ||
23 | deepak@csociety.purdue.edu | ||
24 | |||
25 | II. IOP Access | ||
26 | |||
27 | Access to the I2O subsystem is provided through the device file named | ||
28 | /dev/i2o/ctl. This file is a character file with major number 10 and minor | ||
29 | number 166. It can be created through the following command: | ||
30 | |||
31 | mknod /dev/i2o/ctl c 10 166 | ||
32 | |||
33 | III. Determining the IOP Count | ||
34 | |||
35 | SYNOPSIS | ||
36 | |||
37 | ioctl(fd, I2OGETIOPS, int *count); | ||
38 | |||
39 | u8 count[MAX_I2O_CONTROLLERS]; | ||
40 | |||
41 | DESCRIPTION | ||
42 | |||
43 | This function returns the system's active IOP table. count should | ||
44 | point to a buffer containing MAX_I2O_CONTROLLERS entries. Upon | ||
45 | returning, each entry will contain a non-zero value if the given | ||
46 | IOP unit is active, and NULL if it is inactive or non-existent. | ||
47 | |||
48 | RETURN VALUE. | ||
49 | |||
50 | Returns 0 if no errors occur, and -1 otherwise. If an error occurs, | ||
51 | errno is set appropriately: | ||
52 | |||
53 | EFAULT Invalid user space pointer was passed | ||
54 | |||
55 | IV. Getting Hardware Resource Table | ||
56 | |||
57 | SYNOPSIS | ||
58 | |||
59 | ioctl(fd, I2OHRTGET, struct i2o_cmd_hrt *hrt); | ||
60 | |||
61 | struct i2o_cmd_hrtlct | ||
62 | { | ||
63 | u32 iop; /* IOP unit number */ | ||
64 | void *resbuf; /* Buffer for result */ | ||
65 | u32 *reslen; /* Buffer length in bytes */ | ||
66 | }; | ||
67 | |||
68 | DESCRIPTION | ||
69 | |||
70 | This function returns the Hardware Resource Table of the IOP specified | ||
71 | by hrt->iop in the buffer pointed to by hrt->resbuf. The actual size of | ||
72 | the data is written into *(hrt->reslen). | ||
73 | |||
74 | RETURNS | ||
75 | |||
76 | This function returns 0 if no errors occur. If an error occurs, -1 | ||
77 | is returned and errno is set appropriately: | ||
78 | |||
79 | EFAULT Invalid user space pointer was passed | ||
80 | ENXIO Invalid IOP number | ||
81 | ENOBUFS Buffer not large enough. If this occurs, the required | ||
82 | buffer length is written into *(hrt->reslen) | ||
83 | |||
84 | V. Getting Logical Configuration Table | ||
85 | |||
86 | SYNOPSIS | ||
87 | |||
88 | ioctl(fd, I2OLCTGET, struct i2o_cmd_lct *lct); | ||
89 | |||
90 | struct i2o_cmd_hrtlct | ||
91 | { | ||
92 | u32 iop; /* IOP unit number */ | ||
93 | void *resbuf; /* Buffer for result */ | ||
94 | u32 *reslen; /* Buffer length in bytes */ | ||
95 | }; | ||
96 | |||
97 | DESCRIPTION | ||
98 | |||
99 | This function returns the Logical Configuration Table of the IOP specified | ||
100 | by lct->iop in the buffer pointed to by lct->resbuf. The actual size of | ||
101 | the data is written into *(lct->reslen). | ||
102 | |||
103 | RETURNS | ||
104 | |||
105 | This function returns 0 if no errors occur. If an error occurs, -1 | ||
106 | is returned and errno is set appropriately: | ||
107 | |||
108 | EFAULT Invalid user space pointer was passed | ||
109 | ENXIO Invalid IOP number | ||
110 | ENOBUFS Buffer not large enough. If this occurs, the required | ||
111 | buffer length is written into *(lct->reslen) | ||
112 | |||
113 | VI. Setting Parameters | ||
114 | |||
115 | SYNOPSIS | ||
116 | |||
117 | ioctl(fd, I2OPARMSET, struct i2o_parm_setget *ops); | ||
118 | |||
119 | struct i2o_cmd_psetget | ||
120 | { | ||
121 | u32 iop; /* IOP unit number */ | ||
122 | u32 tid; /* Target device TID */ | ||
123 | void *opbuf; /* Operation List buffer */ | ||
124 | u32 oplen; /* Operation List buffer length in bytes */ | ||
125 | void *resbuf; /* Result List buffer */ | ||
126 | u32 *reslen; /* Result List buffer length in bytes */ | ||
127 | }; | ||
128 | |||
129 | DESCRIPTION | ||
130 | |||
131 | This function posts a UtilParamsSet message to the device identified | ||
132 | by ops->iop and ops->tid. The operation list for the message is | ||
133 | sent through the ops->opbuf buffer, and the result list is written | ||
134 | into the buffer pointed to by ops->resbuf. The number of bytes | ||
135 | written is placed into *(ops->reslen). | ||
136 | |||
137 | RETURNS | ||
138 | |||
139 | The return value is the size in bytes of the data written into | ||
140 | ops->resbuf if no errors occur. If an error occurs, -1 is returned | ||
141 | and errno is set appropriately: | ||
142 | |||
143 | EFAULT Invalid user space pointer was passed | ||
144 | ENXIO Invalid IOP number | ||
145 | ENOBUFS Buffer not large enough. If this occurs, the required | ||
146 | buffer length is written into *(ops->reslen) | ||
147 | ETIMEDOUT Timeout waiting for reply message | ||
148 | ENOMEM Kernel memory allocation error | ||
149 | |||
150 | A return value of 0 does not mean that the value was actually | ||
151 | changed properly on the IOP. The user should check the result | ||
152 | list to determine the specific status of the transaction. | ||
153 | |||
154 | VII. Getting Parameters | ||
155 | |||
156 | SYNOPSIS | ||
157 | |||
158 | ioctl(fd, I2OPARMGET, struct i2o_parm_setget *ops); | ||
159 | |||
160 | struct i2o_parm_setget | ||
161 | { | ||
162 | u32 iop; /* IOP unit number */ | ||
163 | u32 tid; /* Target device TID */ | ||
164 | void *opbuf; /* Operation List buffer */ | ||
165 | u32 oplen; /* Operation List buffer length in bytes */ | ||
166 | void *resbuf; /* Result List buffer */ | ||
167 | u32 *reslen; /* Result List buffer length in bytes */ | ||
168 | }; | ||
169 | |||
170 | DESCRIPTION | ||
171 | |||
172 | This function posts a UtilParamsGet message to the device identified | ||
173 | by ops->iop and ops->tid. The operation list for the message is | ||
174 | sent through the ops->opbuf buffer, and the result list is written | ||
175 | into the buffer pointed to by ops->resbuf. The actual size of data | ||
176 | written is placed into *(ops->reslen). | ||
177 | |||
178 | RETURNS | ||
179 | |||
180 | EFAULT Invalid user space pointer was passed | ||
181 | ENXIO Invalid IOP number | ||
182 | ENOBUFS Buffer not large enough. If this occurs, the required | ||
183 | buffer length is written into *(ops->reslen) | ||
184 | ETIMEDOUT Timeout waiting for reply message | ||
185 | ENOMEM Kernel memory allocation error | ||
186 | |||
187 | A return value of 0 does not mean that the value was actually | ||
188 | properly retrieved. The user should check the result list | ||
189 | to determine the specific status of the transaction. | ||
190 | |||
191 | VIII. Downloading Software | ||
192 | |||
193 | SYNOPSIS | ||
194 | |||
195 | ioctl(fd, I2OSWDL, struct i2o_sw_xfer *sw); | ||
196 | |||
197 | struct i2o_sw_xfer | ||
198 | { | ||
199 | u32 iop; /* IOP unit number */ | ||
200 | u8 flags; /* DownloadFlags field */ | ||
201 | u8 sw_type; /* Software type */ | ||
202 | u32 sw_id; /* Software ID */ | ||
203 | void *buf; /* Pointer to software buffer */ | ||
204 | u32 *swlen; /* Length of software buffer */ | ||
205 | u32 *maxfrag; /* Number of fragments */ | ||
206 | u32 *curfrag; /* Current fragment number */ | ||
207 | }; | ||
208 | |||
209 | DESCRIPTION | ||
210 | |||
211 | This function downloads a software fragment pointed by sw->buf | ||
212 | to the iop identified by sw->iop. The DownloadFlags, SwID, SwType | ||
213 | and SwSize fields of the ExecSwDownload message are filled in with | ||
214 | the values of sw->flags, sw->sw_id, sw->sw_type and *(sw->swlen). | ||
215 | |||
216 | The fragments _must_ be sent in order and be 8K in size. The last | ||
217 | fragment _may_ be shorter, however. The kernel will compute its | ||
218 | size based on information in the sw->swlen field. | ||
219 | |||
220 | Please note that SW transfers can take a long time. | ||
221 | |||
222 | RETURNS | ||
223 | |||
224 | This function returns 0 no errors occur. If an error occurs, -1 | ||
225 | is returned and errno is set appropriately: | ||
226 | |||
227 | EFAULT Invalid user space pointer was passed | ||
228 | ENXIO Invalid IOP number | ||
229 | ETIMEDOUT Timeout waiting for reply message | ||
230 | ENOMEM Kernel memory allocation error | ||
231 | |||
232 | IX. Uploading Software | ||
233 | |||
234 | SYNOPSIS | ||
235 | |||
236 | ioctl(fd, I2OSWUL, struct i2o_sw_xfer *sw); | ||
237 | |||
238 | struct i2o_sw_xfer | ||
239 | { | ||
240 | u32 iop; /* IOP unit number */ | ||
241 | u8 flags; /* UploadFlags */ | ||
242 | u8 sw_type; /* Software type */ | ||
243 | u32 sw_id; /* Software ID */ | ||
244 | void *buf; /* Pointer to software buffer */ | ||
245 | u32 *swlen; /* Length of software buffer */ | ||
246 | u32 *maxfrag; /* Number of fragments */ | ||
247 | u32 *curfrag; /* Current fragment number */ | ||
248 | }; | ||
249 | |||
250 | DESCRIPTION | ||
251 | |||
252 | This function uploads a software fragment from the IOP identified | ||
253 | by sw->iop, sw->sw_type, sw->sw_id and optionally sw->swlen fields. | ||
254 | The UploadFlags, SwID, SwType and SwSize fields of the ExecSwUpload | ||
255 | message are filled in with the values of sw->flags, sw->sw_id, | ||
256 | sw->sw_type and *(sw->swlen). | ||
257 | |||
258 | The fragments _must_ be requested in order and be 8K in size. The | ||
259 | user is responsible for allocating memory pointed by sw->buf. The | ||
260 | last fragment _may_ be shorter. | ||
261 | |||
262 | Please note that SW transfers can take a long time. | ||
263 | |||
264 | RETURNS | ||
265 | |||
266 | This function returns 0 if no errors occur. If an error occurs, -1 | ||
267 | is returned and errno is set appropriately: | ||
268 | |||
269 | EFAULT Invalid user space pointer was passed | ||
270 | ENXIO Invalid IOP number | ||
271 | ETIMEDOUT Timeout waiting for reply message | ||
272 | ENOMEM Kernel memory allocation error | ||
273 | |||
274 | X. Removing Software | ||
275 | |||
276 | SYNOPSIS | ||
277 | |||
278 | ioctl(fd, I2OSWDEL, struct i2o_sw_xfer *sw); | ||
279 | |||
280 | struct i2o_sw_xfer | ||
281 | { | ||
282 | u32 iop; /* IOP unit number */ | ||
283 | u8 flags; /* RemoveFlags */ | ||
284 | u8 sw_type; /* Software type */ | ||
285 | u32 sw_id; /* Software ID */ | ||
286 | void *buf; /* Unused */ | ||
287 | u32 *swlen; /* Length of the software data */ | ||
288 | u32 *maxfrag; /* Unused */ | ||
289 | u32 *curfrag; /* Unused */ | ||
290 | }; | ||
291 | |||
292 | DESCRIPTION | ||
293 | |||
294 | This function removes software from the IOP identified by sw->iop. | ||
295 | The RemoveFlags, SwID, SwType and SwSize fields of the ExecSwRemove message | ||
296 | are filled in with the values of sw->flags, sw->sw_id, sw->sw_type and | ||
297 | *(sw->swlen). Give zero in *(sw->len) if the value is unknown. IOP uses | ||
298 | *(sw->swlen) value to verify correct identication of the module to remove. | ||
299 | The actual size of the module is written into *(sw->swlen). | ||
300 | |||
301 | RETURNS | ||
302 | |||
303 | This function returns 0 if no errors occur. If an error occurs, -1 | ||
304 | is returned and errno is set appropriately: | ||
305 | |||
306 | EFAULT Invalid user space pointer was passed | ||
307 | ENXIO Invalid IOP number | ||
308 | ETIMEDOUT Timeout waiting for reply message | ||
309 | ENOMEM Kernel memory allocation error | ||
310 | |||
311 | X. Validating Configuration | ||
312 | |||
313 | SYNOPSIS | ||
314 | |||
315 | ioctl(fd, I2OVALIDATE, int *iop); | ||
316 | u32 iop; | ||
317 | |||
318 | DESCRIPTION | ||
319 | |||
320 | This function posts an ExecConfigValidate message to the controller | ||
321 | identified by iop. This message indicates that the current | ||
322 | configuration is accepted. The iop changes the status of suspect drivers | ||
323 | to valid and may delete old drivers from its store. | ||
324 | |||
325 | RETURNS | ||
326 | |||
327 | This function returns 0 if no erro occur. If an error occurs, -1 is | ||
328 | returned and errno is set appropriately: | ||
329 | |||
330 | ETIMEDOUT Timeout waiting for reply message | ||
331 | ENXIO Invalid IOP number | ||
332 | |||
333 | XI. Configuration Dialog | ||
334 | |||
335 | SYNOPSIS | ||
336 | |||
337 | ioctl(fd, I2OHTML, struct i2o_html *htquery); | ||
338 | struct i2o_html | ||
339 | { | ||
340 | u32 iop; /* IOP unit number */ | ||
341 | u32 tid; /* Target device ID */ | ||
342 | u32 page; /* HTML page */ | ||
343 | void *resbuf; /* Buffer for reply HTML page */ | ||
344 | u32 *reslen; /* Length in bytes of reply buffer */ | ||
345 | void *qbuf; /* Pointer to HTTP query string */ | ||
346 | u32 qlen; /* Length in bytes of query string buffer */ | ||
347 | }; | ||
348 | |||
349 | DESCRIPTION | ||
350 | |||
351 | This function posts an UtilConfigDialog message to the device identified | ||
352 | by htquery->iop and htquery->tid. The requested HTML page number is | ||
353 | provided by the htquery->page field, and the resultant data is stored | ||
354 | in the buffer pointed to by htquery->resbuf. If there is an HTTP query | ||
355 | string that is to be sent to the device, it should be sent in the buffer | ||
356 | pointed to by htquery->qbuf. If there is no query string, this field | ||
357 | should be set to NULL. The actual size of the reply received is written | ||
358 | into *(htquery->reslen). | ||
359 | |||
360 | RETURNS | ||
361 | |||
362 | This function returns 0 if no error occur. If an error occurs, -1 | ||
363 | is returned and errno is set appropriately: | ||
364 | |||
365 | EFAULT Invalid user space pointer was passed | ||
366 | ENXIO Invalid IOP number | ||
367 | ENOBUFS Buffer not large enough. If this occurs, the required | ||
368 | buffer length is written into *(ops->reslen) | ||
369 | ETIMEDOUT Timeout waiting for reply message | ||
370 | ENOMEM Kernel memory allocation error | ||
371 | |||
372 | XII. Events | ||
373 | |||
374 | In the process of determining this. Current idea is to have use | ||
375 | the select() interface to allow user apps to periodically poll | ||
376 | the /dev/i2o/ctl device for events. When select() notifies the user | ||
377 | that an event is available, the user would call read() to retrieve | ||
378 | a list of all the events that are pending for the specific device. | ||
379 | |||
380 | ============================================================================= | ||
381 | Revision History | ||
382 | ============================================================================= | ||
383 | |||
384 | Rev 0.1 - 04/01/99 | ||
385 | - Initial revision | ||
386 | |||
387 | Rev 0.2 - 04/06/99 | ||
388 | - Changed return values to match UNIX ioctl() standard. Only return values | ||
389 | are 0 and -1. All errors are reported through errno. | ||
390 | - Added summary of proposed possible event interfaces | ||
391 | |||
392 | Rev 0.3 - 04/20/99 | ||
393 | - Changed all ioctls() to use pointers to user data instead of actual data | ||
394 | - Updated error values to match the code | ||
diff --git a/drivers/message/i2o/bus-osm.c b/drivers/message/i2o/bus-osm.c deleted file mode 100644 index c463dc2efc09..000000000000 --- a/drivers/message/i2o/bus-osm.c +++ /dev/null | |||
@@ -1,176 +0,0 @@ | |||
1 | /* | ||
2 | * Bus Adapter OSM | ||
3 | * | ||
4 | * Copyright (C) 2005 Markus Lidel <Markus.Lidel@shadowconnect.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; either version 2 of the License, or (at your | ||
9 | * option) any later version. | ||
10 | * | ||
11 | * Fixes/additions: | ||
12 | * Markus Lidel <Markus.Lidel@shadowconnect.com> | ||
13 | * initial version. | ||
14 | */ | ||
15 | |||
16 | #include <linux/module.h> | ||
17 | #include <linux/i2o.h> | ||
18 | |||
19 | #define OSM_NAME "bus-osm" | ||
20 | #define OSM_VERSION "1.317" | ||
21 | #define OSM_DESCRIPTION "I2O Bus Adapter OSM" | ||
22 | |||
23 | static struct i2o_driver i2o_bus_driver; | ||
24 | |||
25 | /* Bus OSM class handling definition */ | ||
26 | static struct i2o_class_id i2o_bus_class_id[] = { | ||
27 | {I2O_CLASS_BUS_ADAPTER}, | ||
28 | {I2O_CLASS_END} | ||
29 | }; | ||
30 | |||
31 | /** | ||
32 | * i2o_bus_scan - Scan the bus for new devices | ||
33 | * @dev: I2O device of the bus, which should be scanned | ||
34 | * | ||
35 | * Scans the bus dev for new / removed devices. After the scan a new LCT | ||
36 | * will be fetched automatically. | ||
37 | * | ||
38 | * Returns 0 on success or negative error code on failure. | ||
39 | */ | ||
40 | static int i2o_bus_scan(struct i2o_device *dev) | ||
41 | { | ||
42 | struct i2o_message *msg; | ||
43 | |||
44 | msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); | ||
45 | if (IS_ERR(msg)) | ||
46 | return -ETIMEDOUT; | ||
47 | |||
48 | msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); | ||
49 | msg->u.head[1] = | ||
50 | cpu_to_le32(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data. | ||
51 | tid); | ||
52 | |||
53 | return i2o_msg_post_wait(dev->iop, msg, 60); | ||
54 | }; | ||
55 | |||
56 | /** | ||
57 | * i2o_bus_store_scan - Scan the I2O Bus Adapter | ||
58 | * @d: device which should be scanned | ||
59 | * @attr: device_attribute | ||
60 | * @buf: output buffer | ||
61 | * @count: buffer size | ||
62 | * | ||
63 | * Returns count. | ||
64 | */ | ||
65 | static ssize_t i2o_bus_store_scan(struct device *d, | ||
66 | struct device_attribute *attr, | ||
67 | const char *buf, size_t count) | ||
68 | { | ||
69 | struct i2o_device *i2o_dev = to_i2o_device(d); | ||
70 | int rc; | ||
71 | |||
72 | if ((rc = i2o_bus_scan(i2o_dev))) | ||
73 | osm_warn("bus scan failed %d\n", rc); | ||
74 | |||
75 | return count; | ||
76 | } | ||
77 | |||
78 | /* Bus Adapter OSM device attributes */ | ||
79 | static DEVICE_ATTR(scan, S_IWUSR, NULL, i2o_bus_store_scan); | ||
80 | |||
81 | /** | ||
82 | * i2o_bus_probe - verify if dev is a I2O Bus Adapter device and install it | ||
83 | * @dev: device to verify if it is a I2O Bus Adapter device | ||
84 | * | ||
85 | * Because we want all Bus Adapters always return 0. | ||
86 | * Except when we fail. Then we are sad. | ||
87 | * | ||
88 | * Returns 0, except when we fail to excel. | ||
89 | */ | ||
90 | static int i2o_bus_probe(struct device *dev) | ||
91 | { | ||
92 | struct i2o_device *i2o_dev = to_i2o_device(get_device(dev)); | ||
93 | int rc; | ||
94 | |||
95 | rc = device_create_file(dev, &dev_attr_scan); | ||
96 | if (rc) | ||
97 | goto err_out; | ||
98 | |||
99 | osm_info("device added (TID: %03x)\n", i2o_dev->lct_data.tid); | ||
100 | |||
101 | return 0; | ||
102 | |||
103 | err_out: | ||
104 | put_device(dev); | ||
105 | return rc; | ||
106 | }; | ||
107 | |||
108 | /** | ||
109 | * i2o_bus_remove - remove the I2O Bus Adapter device from the system again | ||
110 | * @dev: I2O Bus Adapter device which should be removed | ||
111 | * | ||
112 | * Always returns 0. | ||
113 | */ | ||
114 | static int i2o_bus_remove(struct device *dev) | ||
115 | { | ||
116 | struct i2o_device *i2o_dev = to_i2o_device(dev); | ||
117 | |||
118 | device_remove_file(dev, &dev_attr_scan); | ||
119 | |||
120 | put_device(dev); | ||
121 | |||
122 | osm_info("device removed (TID: %03x)\n", i2o_dev->lct_data.tid); | ||
123 | |||
124 | return 0; | ||
125 | }; | ||
126 | |||
127 | /* Bus Adapter OSM driver struct */ | ||
128 | static struct i2o_driver i2o_bus_driver = { | ||
129 | .name = OSM_NAME, | ||
130 | .classes = i2o_bus_class_id, | ||
131 | .driver = { | ||
132 | .probe = i2o_bus_probe, | ||
133 | .remove = i2o_bus_remove, | ||
134 | }, | ||
135 | }; | ||
136 | |||
137 | /** | ||
138 | * i2o_bus_init - Bus Adapter OSM initialization function | ||
139 | * | ||
140 | * Only register the Bus Adapter OSM in the I2O core. | ||
141 | * | ||
142 | * Returns 0 on success or negative error code on failure. | ||
143 | */ | ||
144 | static int __init i2o_bus_init(void) | ||
145 | { | ||
146 | int rc; | ||
147 | |||
148 | printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); | ||
149 | |||
150 | /* Register Bus Adapter OSM into I2O core */ | ||
151 | rc = i2o_driver_register(&i2o_bus_driver); | ||
152 | if (rc) { | ||
153 | osm_err("Could not register Bus Adapter OSM\n"); | ||
154 | return rc; | ||
155 | } | ||
156 | |||
157 | return 0; | ||
158 | }; | ||
159 | |||
160 | /** | ||
161 | * i2o_bus_exit - Bus Adapter OSM exit function | ||
162 | * | ||
163 | * Unregisters Bus Adapter OSM from I2O core. | ||
164 | */ | ||
165 | static void __exit i2o_bus_exit(void) | ||
166 | { | ||
167 | i2o_driver_unregister(&i2o_bus_driver); | ||
168 | }; | ||
169 | |||
170 | MODULE_AUTHOR("Markus Lidel <Markus.Lidel@shadowconnect.com>"); | ||
171 | MODULE_LICENSE("GPL"); | ||
172 | MODULE_DESCRIPTION(OSM_DESCRIPTION); | ||
173 | MODULE_VERSION(OSM_VERSION); | ||
174 | |||
175 | module_init(i2o_bus_init); | ||
176 | module_exit(i2o_bus_exit); | ||
diff --git a/drivers/message/i2o/config-osm.c b/drivers/message/i2o/config-osm.c deleted file mode 100644 index 3bba7aa82e58..000000000000 --- a/drivers/message/i2o/config-osm.c +++ /dev/null | |||
@@ -1,90 +0,0 @@ | |||
1 | /* | ||
2 | * Configuration OSM | ||
3 | * | ||
4 | * Copyright (C) 2005 Markus Lidel <Markus.Lidel@shadowconnect.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; either version 2 of the License, or (at your | ||
9 | * option) any later version. | ||
10 | * | ||
11 | * Fixes/additions: | ||
12 | * Markus Lidel <Markus.Lidel@shadowconnect.com> | ||
13 | * initial version. | ||
14 | */ | ||
15 | |||
16 | #include <linux/module.h> | ||
17 | #include <linux/i2o.h> | ||
18 | #include <linux/dcache.h> | ||
19 | #include <linux/namei.h> | ||
20 | #include <linux/fs.h> | ||
21 | |||
22 | #include <asm/uaccess.h> | ||
23 | |||
24 | #define OSM_NAME "config-osm" | ||
25 | #define OSM_VERSION "1.323" | ||
26 | #define OSM_DESCRIPTION "I2O Configuration OSM" | ||
27 | |||
28 | /* access mode user rw */ | ||
29 | #define S_IWRSR (S_IRUSR | S_IWUSR) | ||
30 | |||
31 | static struct i2o_driver i2o_config_driver; | ||
32 | |||
33 | /* Config OSM driver struct */ | ||
34 | static struct i2o_driver i2o_config_driver = { | ||
35 | .name = OSM_NAME, | ||
36 | }; | ||
37 | |||
38 | #ifdef CONFIG_I2O_CONFIG_OLD_IOCTL | ||
39 | #include "i2o_config.c" | ||
40 | #endif | ||
41 | |||
42 | /** | ||
43 | * i2o_config_init - Configuration OSM initialization function | ||
44 | * | ||
45 | * Registers Configuration OSM in the I2O core and if old ioctl's are | ||
46 | * compiled in initialize them. | ||
47 | * | ||
48 | * Returns 0 on success or negative error code on failure. | ||
49 | */ | ||
50 | static int __init i2o_config_init(void) | ||
51 | { | ||
52 | printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); | ||
53 | |||
54 | if (i2o_driver_register(&i2o_config_driver)) { | ||
55 | osm_err("handler register failed.\n"); | ||
56 | return -EBUSY; | ||
57 | } | ||
58 | #ifdef CONFIG_I2O_CONFIG_OLD_IOCTL | ||
59 | if (i2o_config_old_init()) { | ||
60 | osm_err("old config handler initialization failed\n"); | ||
61 | i2o_driver_unregister(&i2o_config_driver); | ||
62 | return -EBUSY; | ||
63 | } | ||
64 | #endif | ||
65 | |||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | /** | ||
70 | * i2o_config_exit - Configuration OSM exit function | ||
71 | * | ||
72 | * If old ioctl's are compiled in exit remove them and unregisters | ||
73 | * Configuration OSM from I2O core. | ||
74 | */ | ||
75 | static void i2o_config_exit(void) | ||
76 | { | ||
77 | #ifdef CONFIG_I2O_CONFIG_OLD_IOCTL | ||
78 | i2o_config_old_exit(); | ||
79 | #endif | ||
80 | |||
81 | i2o_driver_unregister(&i2o_config_driver); | ||
82 | } | ||
83 | |||
84 | MODULE_AUTHOR("Markus Lidel <Markus.Lidel@shadowconnect.com>"); | ||
85 | MODULE_LICENSE("GPL"); | ||
86 | MODULE_DESCRIPTION(OSM_DESCRIPTION); | ||
87 | MODULE_VERSION(OSM_VERSION); | ||
88 | |||
89 | module_init(i2o_config_init); | ||
90 | module_exit(i2o_config_exit); | ||
diff --git a/drivers/message/i2o/core.h b/drivers/message/i2o/core.h deleted file mode 100644 index 91614f11f89a..000000000000 --- a/drivers/message/i2o/core.h +++ /dev/null | |||
@@ -1,69 +0,0 @@ | |||
1 | /* | ||
2 | * I2O core internal declarations | ||
3 | * | ||
4 | * Copyright (C) 2005 Markus Lidel <Markus.Lidel@shadowconnect.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; either version 2 of the License, or (at your | ||
9 | * option) any later version. | ||
10 | * | ||
11 | * Fixes/additions: | ||
12 | * Markus Lidel <Markus.Lidel@shadowconnect.com> | ||
13 | * initial version. | ||
14 | */ | ||
15 | |||
16 | /* Exec-OSM */ | ||
17 | extern struct i2o_driver i2o_exec_driver; | ||
18 | extern int i2o_exec_lct_get(struct i2o_controller *); | ||
19 | |||
20 | extern int __init i2o_exec_init(void); | ||
21 | extern void i2o_exec_exit(void); | ||
22 | |||
23 | /* driver */ | ||
24 | extern struct bus_type i2o_bus_type; | ||
25 | |||
26 | extern int i2o_driver_dispatch(struct i2o_controller *, u32); | ||
27 | |||
28 | extern int __init i2o_driver_init(void); | ||
29 | extern void i2o_driver_exit(void); | ||
30 | |||
31 | /* PCI */ | ||
32 | extern int __init i2o_pci_init(void); | ||
33 | extern void __exit i2o_pci_exit(void); | ||
34 | |||
35 | /* device */ | ||
36 | extern const struct attribute_group *i2o_device_groups[]; | ||
37 | |||
38 | extern void i2o_device_remove(struct i2o_device *); | ||
39 | extern int i2o_device_parse_lct(struct i2o_controller *); | ||
40 | |||
41 | int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist, | ||
42 | int oplen, void *reslist, int reslen); | ||
43 | |||
44 | /* IOP */ | ||
45 | extern struct i2o_controller *i2o_iop_alloc(void); | ||
46 | |||
47 | /** | ||
48 | * i2o_iop_free - Free the i2o_controller struct | ||
49 | * @c: I2O controller to free | ||
50 | */ | ||
51 | static inline void i2o_iop_free(struct i2o_controller *c) | ||
52 | { | ||
53 | i2o_pool_free(&c->in_msg); | ||
54 | kfree(c); | ||
55 | } | ||
56 | |||
57 | extern int i2o_iop_add(struct i2o_controller *); | ||
58 | extern void i2o_iop_remove(struct i2o_controller *); | ||
59 | |||
60 | /* control registers relative to c->base */ | ||
61 | #define I2O_IRQ_STATUS 0x30 | ||
62 | #define I2O_IRQ_MASK 0x34 | ||
63 | #define I2O_IN_PORT 0x40 | ||
64 | #define I2O_OUT_PORT 0x44 | ||
65 | |||
66 | /* Motorola/Freescale specific register offset */ | ||
67 | #define I2O_MOTOROLA_PORT_OFFSET 0x10400 | ||
68 | |||
69 | #define I2O_IRQ_OUTBOUND_POST 0x00000008 | ||
diff --git a/drivers/message/i2o/debug.c b/drivers/message/i2o/debug.c deleted file mode 100644 index ce62d8bfe1c8..000000000000 --- a/drivers/message/i2o/debug.c +++ /dev/null | |||
@@ -1,472 +0,0 @@ | |||
1 | #include <linux/module.h> | ||
2 | #include <linux/kernel.h> | ||
3 | #include <linux/pci.h> | ||
4 | #include <linux/i2o.h> | ||
5 | |||
6 | static void i2o_report_util_cmd(u8 cmd); | ||
7 | static void i2o_report_exec_cmd(u8 cmd); | ||
8 | static void i2o_report_fail_status(u8 req_status, u32 * msg); | ||
9 | static void i2o_report_common_status(u8 req_status); | ||
10 | static void i2o_report_common_dsc(u16 detailed_status); | ||
11 | |||
12 | /* | ||
13 | * Used for error reporting/debugging purposes. | ||
14 | * Report Cmd name, Request status, Detailed Status. | ||
15 | */ | ||
16 | void i2o_report_status(const char *severity, const char *str, | ||
17 | struct i2o_message *m) | ||
18 | { | ||
19 | u32 *msg = (u32 *) m; | ||
20 | u8 cmd = (msg[1] >> 24) & 0xFF; | ||
21 | u8 req_status = (msg[4] >> 24) & 0xFF; | ||
22 | u16 detailed_status = msg[4] & 0xFFFF; | ||
23 | |||
24 | if (cmd == I2O_CMD_UTIL_EVT_REGISTER) | ||
25 | return; // No status in this reply | ||
26 | |||
27 | printk("%s%s: ", severity, str); | ||
28 | |||
29 | if (cmd < 0x1F) // Utility cmd | ||
30 | i2o_report_util_cmd(cmd); | ||
31 | |||
32 | else if (cmd >= 0xA0 && cmd <= 0xEF) // Executive cmd | ||
33 | i2o_report_exec_cmd(cmd); | ||
34 | else | ||
35 | printk("Cmd = %0#2x, ", cmd); // Other cmds | ||
36 | |||
37 | if (msg[0] & MSG_FAIL) { | ||
38 | i2o_report_fail_status(req_status, msg); | ||
39 | return; | ||
40 | } | ||
41 | |||
42 | i2o_report_common_status(req_status); | ||
43 | |||
44 | if (cmd < 0x1F || (cmd >= 0xA0 && cmd <= 0xEF)) | ||
45 | i2o_report_common_dsc(detailed_status); | ||
46 | else | ||
47 | printk(" / DetailedStatus = %0#4x.\n", | ||
48 | detailed_status); | ||
49 | } | ||
50 | |||
51 | /* Used to dump a message to syslog during debugging */ | ||
52 | void i2o_dump_message(struct i2o_message *m) | ||
53 | { | ||
54 | #ifdef DEBUG | ||
55 | u32 *msg = (u32 *) m; | ||
56 | int i; | ||
57 | printk(KERN_INFO "Dumping I2O message size %d @ %p\n", | ||
58 | msg[0] >> 16 & 0xffff, msg); | ||
59 | for (i = 0; i < ((msg[0] >> 16) & 0xffff); i++) | ||
60 | printk(KERN_INFO " msg[%d] = %0#10x\n", i, msg[i]); | ||
61 | #endif | ||
62 | } | ||
63 | |||
64 | /* | ||
65 | * Used for error reporting/debugging purposes. | ||
66 | * Following fail status are common to all classes. | ||
67 | * The preserved message must be handled in the reply handler. | ||
68 | */ | ||
69 | static void i2o_report_fail_status(u8 req_status, u32 * msg) | ||
70 | { | ||
71 | static char *FAIL_STATUS[] = { | ||
72 | "0x80", /* not used */ | ||
73 | "SERVICE_SUSPENDED", /* 0x81 */ | ||
74 | "SERVICE_TERMINATED", /* 0x82 */ | ||
75 | "CONGESTION", | ||
76 | "FAILURE", | ||
77 | "STATE_ERROR", | ||
78 | "TIME_OUT", | ||
79 | "ROUTING_FAILURE", | ||
80 | "INVALID_VERSION", | ||
81 | "INVALID_OFFSET", | ||
82 | "INVALID_MSG_FLAGS", | ||
83 | "FRAME_TOO_SMALL", | ||
84 | "FRAME_TOO_LARGE", | ||
85 | "INVALID_TARGET_ID", | ||
86 | "INVALID_INITIATOR_ID", | ||
87 | "INVALID_INITIATOR_CONTEX", /* 0x8F */ | ||
88 | "UNKNOWN_FAILURE" /* 0xFF */ | ||
89 | }; | ||
90 | |||
91 | if (req_status == I2O_FSC_TRANSPORT_UNKNOWN_FAILURE) | ||
92 | printk("TRANSPORT_UNKNOWN_FAILURE (%0#2x).\n", | ||
93 | req_status); | ||
94 | else | ||
95 | printk("TRANSPORT_%s.\n", | ||
96 | FAIL_STATUS[req_status & 0x0F]); | ||
97 | |||
98 | /* Dump some details */ | ||
99 | |||
100 | printk(KERN_ERR " InitiatorId = %d, TargetId = %d\n", | ||
101 | (msg[1] >> 12) & 0xFFF, msg[1] & 0xFFF); | ||
102 | printk(KERN_ERR " LowestVersion = 0x%02X, HighestVersion = 0x%02X\n", | ||
103 | (msg[4] >> 8) & 0xFF, msg[4] & 0xFF); | ||
104 | printk(KERN_ERR " FailingHostUnit = 0x%04X, FailingIOP = 0x%03X\n", | ||
105 | msg[5] >> 16, msg[5] & 0xFFF); | ||
106 | |||
107 | printk(KERN_ERR " Severity: 0x%02X\n", (msg[4] >> 16) & 0xFF); | ||
108 | if (msg[4] & (1 << 16)) | ||
109 | printk(KERN_DEBUG "(FormatError), " | ||
110 | "this msg can never be delivered/processed.\n"); | ||
111 | if (msg[4] & (1 << 17)) | ||
112 | printk(KERN_DEBUG "(PathError), " | ||
113 | "this msg can no longer be delivered/processed.\n"); | ||
114 | if (msg[4] & (1 << 18)) | ||
115 | printk(KERN_DEBUG "(PathState), " | ||
116 | "the system state does not allow delivery.\n"); | ||
117 | if (msg[4] & (1 << 19)) | ||
118 | printk(KERN_DEBUG | ||
119 | "(Congestion), resources temporarily not available;" | ||
120 | "do not retry immediately.\n"); | ||
121 | } | ||
122 | |||
123 | /* | ||
124 | * Used for error reporting/debugging purposes. | ||
125 | * Following reply status are common to all classes. | ||
126 | */ | ||
127 | static void i2o_report_common_status(u8 req_status) | ||
128 | { | ||
129 | static char *REPLY_STATUS[] = { | ||
130 | "SUCCESS", | ||
131 | "ABORT_DIRTY", | ||
132 | "ABORT_NO_DATA_TRANSFER", | ||
133 | "ABORT_PARTIAL_TRANSFER", | ||
134 | "ERROR_DIRTY", | ||
135 | "ERROR_NO_DATA_TRANSFER", | ||
136 | "ERROR_PARTIAL_TRANSFER", | ||
137 | "PROCESS_ABORT_DIRTY", | ||
138 | "PROCESS_ABORT_NO_DATA_TRANSFER", | ||
139 | "PROCESS_ABORT_PARTIAL_TRANSFER", | ||
140 | "TRANSACTION_ERROR", | ||
141 | "PROGRESS_REPORT" | ||
142 | }; | ||
143 | |||
144 | if (req_status >= ARRAY_SIZE(REPLY_STATUS)) | ||
145 | printk("RequestStatus = %0#2x", req_status); | ||
146 | else | ||
147 | printk("%s", REPLY_STATUS[req_status]); | ||
148 | } | ||
149 | |||
150 | /* | ||
151 | * Used for error reporting/debugging purposes. | ||
152 | * Following detailed status are valid for executive class, | ||
153 | * utility class, DDM class and for transaction error replies. | ||
154 | */ | ||
155 | static void i2o_report_common_dsc(u16 detailed_status) | ||
156 | { | ||
157 | static char *COMMON_DSC[] = { | ||
158 | "SUCCESS", | ||
159 | "0x01", // not used | ||
160 | "BAD_KEY", | ||
161 | "TCL_ERROR", | ||
162 | "REPLY_BUFFER_FULL", | ||
163 | "NO_SUCH_PAGE", | ||
164 | "INSUFFICIENT_RESOURCE_SOFT", | ||
165 | "INSUFFICIENT_RESOURCE_HARD", | ||
166 | "0x08", // not used | ||
167 | "CHAIN_BUFFER_TOO_LARGE", | ||
168 | "UNSUPPORTED_FUNCTION", | ||
169 | "DEVICE_LOCKED", | ||
170 | "DEVICE_RESET", | ||
171 | "INAPPROPRIATE_FUNCTION", | ||
172 | "INVALID_INITIATOR_ADDRESS", | ||
173 | "INVALID_MESSAGE_FLAGS", | ||
174 | "INVALID_OFFSET", | ||
175 | "INVALID_PARAMETER", | ||
176 | "INVALID_REQUEST", | ||
177 | "INVALID_TARGET_ADDRESS", | ||
178 | "MESSAGE_TOO_LARGE", | ||
179 | "MESSAGE_TOO_SMALL", | ||
180 | "MISSING_PARAMETER", | ||
181 | "TIMEOUT", | ||
182 | "UNKNOWN_ERROR", | ||
183 | "UNKNOWN_FUNCTION", | ||
184 | "UNSUPPORTED_VERSION", | ||
185 | "DEVICE_BUSY", | ||
186 | "DEVICE_NOT_AVAILABLE" | ||
187 | }; | ||
188 | |||
189 | if (detailed_status > I2O_DSC_DEVICE_NOT_AVAILABLE) | ||
190 | printk(" / DetailedStatus = %0#4x.\n", | ||
191 | detailed_status); | ||
192 | else | ||
193 | printk(" / %s.\n", COMMON_DSC[detailed_status]); | ||
194 | } | ||
195 | |||
196 | /* | ||
197 | * Used for error reporting/debugging purposes | ||
198 | */ | ||
199 | static void i2o_report_util_cmd(u8 cmd) | ||
200 | { | ||
201 | switch (cmd) { | ||
202 | case I2O_CMD_UTIL_NOP: | ||
203 | printk("UTIL_NOP, "); | ||
204 | break; | ||
205 | case I2O_CMD_UTIL_ABORT: | ||
206 | printk("UTIL_ABORT, "); | ||
207 | break; | ||
208 | case I2O_CMD_UTIL_CLAIM: | ||
209 | printk("UTIL_CLAIM, "); | ||
210 | break; | ||
211 | case I2O_CMD_UTIL_RELEASE: | ||
212 | printk("UTIL_CLAIM_RELEASE, "); | ||
213 | break; | ||
214 | case I2O_CMD_UTIL_CONFIG_DIALOG: | ||
215 | printk("UTIL_CONFIG_DIALOG, "); | ||
216 | break; | ||
217 | case I2O_CMD_UTIL_DEVICE_RESERVE: | ||
218 | printk("UTIL_DEVICE_RESERVE, "); | ||
219 | break; | ||
220 | case I2O_CMD_UTIL_DEVICE_RELEASE: | ||
221 | printk("UTIL_DEVICE_RELEASE, "); | ||
222 | break; | ||
223 | case I2O_CMD_UTIL_EVT_ACK: | ||
224 | printk("UTIL_EVENT_ACKNOWLEDGE, "); | ||
225 | break; | ||
226 | case I2O_CMD_UTIL_EVT_REGISTER: | ||
227 | printk("UTIL_EVENT_REGISTER, "); | ||
228 | break; | ||
229 | case I2O_CMD_UTIL_LOCK: | ||
230 | printk("UTIL_LOCK, "); | ||
231 | break; | ||
232 | case I2O_CMD_UTIL_LOCK_RELEASE: | ||
233 | printk("UTIL_LOCK_RELEASE, "); | ||
234 | break; | ||
235 | case I2O_CMD_UTIL_PARAMS_GET: | ||
236 | printk("UTIL_PARAMS_GET, "); | ||
237 | break; | ||
238 | case I2O_CMD_UTIL_PARAMS_SET: | ||
239 | printk("UTIL_PARAMS_SET, "); | ||
240 | break; | ||
241 | case I2O_CMD_UTIL_REPLY_FAULT_NOTIFY: | ||
242 | printk("UTIL_REPLY_FAULT_NOTIFY, "); | ||
243 | break; | ||
244 | default: | ||
245 | printk("Cmd = %0#2x, ", cmd); | ||
246 | } | ||
247 | } | ||
248 | |||
249 | /* | ||
250 | * Used for error reporting/debugging purposes | ||
251 | */ | ||
252 | static void i2o_report_exec_cmd(u8 cmd) | ||
253 | { | ||
254 | switch (cmd) { | ||
255 | case I2O_CMD_ADAPTER_ASSIGN: | ||
256 | printk("EXEC_ADAPTER_ASSIGN, "); | ||
257 | break; | ||
258 | case I2O_CMD_ADAPTER_READ: | ||
259 | printk("EXEC_ADAPTER_READ, "); | ||
260 | break; | ||
261 | case I2O_CMD_ADAPTER_RELEASE: | ||
262 | printk("EXEC_ADAPTER_RELEASE, "); | ||
263 | break; | ||
264 | case I2O_CMD_BIOS_INFO_SET: | ||
265 | printk("EXEC_BIOS_INFO_SET, "); | ||
266 | break; | ||
267 | case I2O_CMD_BOOT_DEVICE_SET: | ||
268 | printk("EXEC_BOOT_DEVICE_SET, "); | ||
269 | break; | ||
270 | case I2O_CMD_CONFIG_VALIDATE: | ||
271 | printk("EXEC_CONFIG_VALIDATE, "); | ||
272 | break; | ||
273 | case I2O_CMD_CONN_SETUP: | ||
274 | printk("EXEC_CONN_SETUP, "); | ||
275 | break; | ||
276 | case I2O_CMD_DDM_DESTROY: | ||
277 | printk("EXEC_DDM_DESTROY, "); | ||
278 | break; | ||
279 | case I2O_CMD_DDM_ENABLE: | ||
280 | printk("EXEC_DDM_ENABLE, "); | ||
281 | break; | ||
282 | case I2O_CMD_DDM_QUIESCE: | ||
283 | printk("EXEC_DDM_QUIESCE, "); | ||
284 | break; | ||
285 | case I2O_CMD_DDM_RESET: | ||
286 | printk("EXEC_DDM_RESET, "); | ||
287 | break; | ||
288 | case I2O_CMD_DDM_SUSPEND: | ||
289 | printk("EXEC_DDM_SUSPEND, "); | ||
290 | break; | ||
291 | case I2O_CMD_DEVICE_ASSIGN: | ||
292 | printk("EXEC_DEVICE_ASSIGN, "); | ||
293 | break; | ||
294 | case I2O_CMD_DEVICE_RELEASE: | ||
295 | printk("EXEC_DEVICE_RELEASE, "); | ||
296 | break; | ||
297 | case I2O_CMD_HRT_GET: | ||
298 | printk("EXEC_HRT_GET, "); | ||
299 | break; | ||
300 | case I2O_CMD_ADAPTER_CLEAR: | ||
301 | printk("EXEC_IOP_CLEAR, "); | ||
302 | break; | ||
303 | case I2O_CMD_ADAPTER_CONNECT: | ||
304 | printk("EXEC_IOP_CONNECT, "); | ||
305 | break; | ||
306 | case I2O_CMD_ADAPTER_RESET: | ||
307 | printk("EXEC_IOP_RESET, "); | ||
308 | break; | ||
309 | case I2O_CMD_LCT_NOTIFY: | ||
310 | printk("EXEC_LCT_NOTIFY, "); | ||
311 | break; | ||
312 | case I2O_CMD_OUTBOUND_INIT: | ||
313 | printk("EXEC_OUTBOUND_INIT, "); | ||
314 | break; | ||
315 | case I2O_CMD_PATH_ENABLE: | ||
316 | printk("EXEC_PATH_ENABLE, "); | ||
317 | break; | ||
318 | case I2O_CMD_PATH_QUIESCE: | ||
319 | printk("EXEC_PATH_QUIESCE, "); | ||
320 | break; | ||
321 | case I2O_CMD_PATH_RESET: | ||
322 | printk("EXEC_PATH_RESET, "); | ||
323 | break; | ||
324 | case I2O_CMD_STATIC_MF_CREATE: | ||
325 | printk("EXEC_STATIC_MF_CREATE, "); | ||
326 | break; | ||
327 | case I2O_CMD_STATIC_MF_RELEASE: | ||
328 | printk("EXEC_STATIC_MF_RELEASE, "); | ||
329 | break; | ||
330 | case I2O_CMD_STATUS_GET: | ||
331 | printk("EXEC_STATUS_GET, "); | ||
332 | break; | ||
333 | case I2O_CMD_SW_DOWNLOAD: | ||
334 | printk("EXEC_SW_DOWNLOAD, "); | ||
335 | break; | ||
336 | case I2O_CMD_SW_UPLOAD: | ||
337 | printk("EXEC_SW_UPLOAD, "); | ||
338 | break; | ||
339 | case I2O_CMD_SW_REMOVE: | ||
340 | printk("EXEC_SW_REMOVE, "); | ||
341 | break; | ||
342 | case I2O_CMD_SYS_ENABLE: | ||
343 | printk("EXEC_SYS_ENABLE, "); | ||
344 | break; | ||
345 | case I2O_CMD_SYS_MODIFY: | ||
346 | printk("EXEC_SYS_MODIFY, "); | ||
347 | break; | ||
348 | case I2O_CMD_SYS_QUIESCE: | ||
349 | printk("EXEC_SYS_QUIESCE, "); | ||
350 | break; | ||
351 | case I2O_CMD_SYS_TAB_SET: | ||
352 | printk("EXEC_SYS_TAB_SET, "); | ||
353 | break; | ||
354 | default: | ||
355 | printk("Cmd = %#02x, ", cmd); | ||
356 | } | ||
357 | } | ||
358 | |||
359 | void i2o_debug_state(struct i2o_controller *c) | ||
360 | { | ||
361 | printk(KERN_INFO "%s: State = ", c->name); | ||
362 | switch (((i2o_status_block *) c->status_block.virt)->iop_state) { | ||
363 | case 0x01: | ||
364 | printk("INIT\n"); | ||
365 | break; | ||
366 | case 0x02: | ||
367 | printk("RESET\n"); | ||
368 | break; | ||
369 | case 0x04: | ||
370 | printk("HOLD\n"); | ||
371 | break; | ||
372 | case 0x05: | ||
373 | printk("READY\n"); | ||
374 | break; | ||
375 | case 0x08: | ||
376 | printk("OPERATIONAL\n"); | ||
377 | break; | ||
378 | case 0x10: | ||
379 | printk("FAILED\n"); | ||
380 | break; | ||
381 | case 0x11: | ||
382 | printk("FAULTED\n"); | ||
383 | break; | ||
384 | default: | ||
385 | printk("%x (unknown !!)\n", | ||
386 | ((i2o_status_block *) c->status_block.virt)->iop_state); | ||
387 | } | ||
388 | }; | ||
389 | |||
390 | void i2o_dump_hrt(struct i2o_controller *c) | ||
391 | { | ||
392 | u32 *rows = (u32 *) c->hrt.virt; | ||
393 | u8 *p = (u8 *) c->hrt.virt; | ||
394 | u8 *d; | ||
395 | int count; | ||
396 | int length; | ||
397 | int i; | ||
398 | int state; | ||
399 | |||
400 | if (p[3] != 0) { | ||
401 | printk(KERN_ERR | ||
402 | "%s: HRT table for controller is too new a version.\n", | ||
403 | c->name); | ||
404 | return; | ||
405 | } | ||
406 | |||
407 | count = p[0] | (p[1] << 8); | ||
408 | length = p[2]; | ||
409 | |||
410 | printk(KERN_INFO "%s: HRT has %d entries of %d bytes each.\n", | ||
411 | c->name, count, length << 2); | ||
412 | |||
413 | rows += 2; | ||
414 | |||
415 | for (i = 0; i < count; i++) { | ||
416 | printk(KERN_INFO "Adapter %08X: ", rows[0]); | ||
417 | p = (u8 *) (rows + 1); | ||
418 | d = (u8 *) (rows + 2); | ||
419 | state = p[1] << 8 | p[0]; | ||
420 | |||
421 | printk("TID %04X:[", state & 0xFFF); | ||
422 | state >>= 12; | ||
423 | if (state & (1 << 0)) | ||
424 | printk("H"); /* Hidden */ | ||
425 | if (state & (1 << 2)) { | ||
426 | printk("P"); /* Present */ | ||
427 | if (state & (1 << 1)) | ||
428 | printk("C"); /* Controlled */ | ||
429 | } | ||
430 | if (state > 9) | ||
431 | printk("*"); /* Hard */ | ||
432 | |||
433 | printk("]:"); | ||
434 | |||
435 | switch (p[3] & 0xFFFF) { | ||
436 | case 0: | ||
437 | /* Adapter private bus - easy */ | ||
438 | printk("Local bus %d: I/O at 0x%04X Mem 0x%08X", p[2], | ||
439 | d[1] << 8 | d[0], *(u32 *) (d + 4)); | ||
440 | break; | ||
441 | case 1: | ||
442 | /* ISA bus */ | ||
443 | printk("ISA %d: CSN %d I/O at 0x%04X Mem 0x%08X", p[2], | ||
444 | d[2], d[1] << 8 | d[0], *(u32 *) (d + 4)); | ||
445 | break; | ||
446 | |||
447 | case 2: /* EISA bus */ | ||
448 | printk("EISA %d: Slot %d I/O at 0x%04X Mem 0x%08X", | ||
449 | p[2], d[3], d[1] << 8 | d[0], *(u32 *) (d + 4)); | ||
450 | break; | ||
451 | |||
452 | case 3: /* MCA bus */ | ||
453 | printk("MCA %d: Slot %d I/O at 0x%04X Mem 0x%08X", p[2], | ||
454 | d[3], d[1] << 8 | d[0], *(u32 *) (d + 4)); | ||
455 | break; | ||
456 | |||
457 | case 4: /* PCI bus */ | ||
458 | printk("PCI %d: Bus %d Device %d Function %d", p[2], | ||
459 | d[2], d[1], d[0]); | ||
460 | break; | ||
461 | |||
462 | case 0x80: /* Other */ | ||
463 | default: | ||
464 | printk("Unsupported bus type."); | ||
465 | break; | ||
466 | } | ||
467 | printk("\n"); | ||
468 | rows += length; | ||
469 | } | ||
470 | } | ||
471 | |||
472 | EXPORT_SYMBOL(i2o_dump_message); | ||
diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c deleted file mode 100644 index 98348f420b52..000000000000 --- a/drivers/message/i2o/device.c +++ /dev/null | |||
@@ -1,594 +0,0 @@ | |||
1 | /* | ||
2 | * Functions to handle I2O devices | ||
3 | * | ||
4 | * Copyright (C) 2004 Markus Lidel <Markus.Lidel@shadowconnect.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; either version 2 of the License, or (at your | ||
9 | * option) any later version. | ||
10 | * | ||
11 | * Fixes/additions: | ||
12 | * Markus Lidel <Markus.Lidel@shadowconnect.com> | ||
13 | * initial version. | ||
14 | */ | ||
15 | |||
16 | #include <linux/module.h> | ||
17 | #include <linux/i2o.h> | ||
18 | #include <linux/delay.h> | ||
19 | #include <linux/string.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include "core.h" | ||
22 | |||
23 | /** | ||
24 | * i2o_device_issue_claim - claim or release a device | ||
25 | * @dev: I2O device to claim or release | ||
26 | * @cmd: claim or release command | ||
27 | * @type: type of claim | ||
28 | * | ||
29 | * Issue I2O UTIL_CLAIM or UTIL_RELEASE messages. The message to be sent | ||
30 | * is set by cmd. dev is the I2O device which should be claim or | ||
31 | * released and the type is the claim type (see the I2O spec). | ||
32 | * | ||
33 | * Returs 0 on success or negative error code on failure. | ||
34 | */ | ||
35 | static inline int i2o_device_issue_claim(struct i2o_device *dev, u32 cmd, | ||
36 | u32 type) | ||
37 | { | ||
38 | struct i2o_message *msg; | ||
39 | |||
40 | msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); | ||
41 | if (IS_ERR(msg)) | ||
42 | return PTR_ERR(msg); | ||
43 | |||
44 | msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); | ||
45 | msg->u.head[1] = | ||
46 | cpu_to_le32(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid); | ||
47 | msg->body[0] = cpu_to_le32(type); | ||
48 | |||
49 | return i2o_msg_post_wait(dev->iop, msg, 60); | ||
50 | } | ||
51 | |||
52 | /** | ||
53 | * i2o_device_claim - claim a device for use by an OSM | ||
54 | * @dev: I2O device to claim | ||
55 | * | ||
56 | * Do the leg work to assign a device to a given OSM. If the claim succeeds, | ||
57 | * the owner is the primary. If the attempt fails a negative errno code | ||
58 | * is returned. On success zero is returned. | ||
59 | */ | ||
60 | int i2o_device_claim(struct i2o_device *dev) | ||
61 | { | ||
62 | int rc = 0; | ||
63 | |||
64 | mutex_lock(&dev->lock); | ||
65 | |||
66 | rc = i2o_device_issue_claim(dev, I2O_CMD_UTIL_CLAIM, I2O_CLAIM_PRIMARY); | ||
67 | if (!rc) | ||
68 | pr_debug("i2o: claim of device %d succeeded\n", | ||
69 | dev->lct_data.tid); | ||
70 | else | ||
71 | pr_debug("i2o: claim of device %d failed %d\n", | ||
72 | dev->lct_data.tid, rc); | ||
73 | |||
74 | mutex_unlock(&dev->lock); | ||
75 | |||
76 | return rc; | ||
77 | } | ||
78 | |||
79 | /** | ||
80 | * i2o_device_claim_release - release a device that the OSM is using | ||
81 | * @dev: device to release | ||
82 | * | ||
83 | * Drop a claim by an OSM on a given I2O device. | ||
84 | * | ||
85 | * AC - some devices seem to want to refuse an unclaim until they have | ||
86 | * finished internal processing. It makes sense since you don't want a | ||
87 | * new device to go reconfiguring the entire system until you are done. | ||
88 | * Thus we are prepared to wait briefly. | ||
89 | * | ||
90 | * Returns 0 on success or negative error code on failure. | ||
91 | */ | ||
92 | int i2o_device_claim_release(struct i2o_device *dev) | ||
93 | { | ||
94 | int tries; | ||
95 | int rc = 0; | ||
96 | |||
97 | mutex_lock(&dev->lock); | ||
98 | |||
99 | /* | ||
100 | * If the controller takes a nonblocking approach to | ||
101 | * releases we have to sleep/poll for a few times. | ||
102 | */ | ||
103 | for (tries = 0; tries < 10; tries++) { | ||
104 | rc = i2o_device_issue_claim(dev, I2O_CMD_UTIL_RELEASE, | ||
105 | I2O_CLAIM_PRIMARY); | ||
106 | if (!rc) | ||
107 | break; | ||
108 | |||
109 | ssleep(1); | ||
110 | } | ||
111 | |||
112 | if (!rc) | ||
113 | pr_debug("i2o: claim release of device %d succeeded\n", | ||
114 | dev->lct_data.tid); | ||
115 | else | ||
116 | pr_debug("i2o: claim release of device %d failed %d\n", | ||
117 | dev->lct_data.tid, rc); | ||
118 | |||
119 | mutex_unlock(&dev->lock); | ||
120 | |||
121 | return rc; | ||
122 | } | ||
123 | |||
124 | /** | ||
125 | * i2o_device_release - release the memory for a I2O device | ||
126 | * @dev: I2O device which should be released | ||
127 | * | ||
128 | * Release the allocated memory. This function is called if refcount of | ||
129 | * device reaches 0 automatically. | ||
130 | */ | ||
131 | static void i2o_device_release(struct device *dev) | ||
132 | { | ||
133 | struct i2o_device *i2o_dev = to_i2o_device(dev); | ||
134 | |||
135 | pr_debug("i2o: device %s released\n", dev_name(dev)); | ||
136 | |||
137 | kfree(i2o_dev); | ||
138 | } | ||
139 | |||
140 | /** | ||
141 | * class_id_show - Displays class id of I2O device | ||
142 | * @dev: device of which the class id should be displayed | ||
143 | * @attr: pointer to device attribute | ||
144 | * @buf: buffer into which the class id should be printed | ||
145 | * | ||
146 | * Returns the number of bytes which are printed into the buffer. | ||
147 | */ | ||
148 | static ssize_t class_id_show(struct device *dev, struct device_attribute *attr, | ||
149 | char *buf) | ||
150 | { | ||
151 | struct i2o_device *i2o_dev = to_i2o_device(dev); | ||
152 | |||
153 | sprintf(buf, "0x%03x\n", i2o_dev->lct_data.class_id); | ||
154 | return strlen(buf) + 1; | ||
155 | } | ||
156 | static DEVICE_ATTR_RO(class_id); | ||
157 | |||
158 | /** | ||
159 | * tid_show - Displays TID of I2O device | ||
160 | * @dev: device of which the TID should be displayed | ||
161 | * @attr: pointer to device attribute | ||
162 | * @buf: buffer into which the TID should be printed | ||
163 | * | ||
164 | * Returns the number of bytes which are printed into the buffer. | ||
165 | */ | ||
166 | static ssize_t tid_show(struct device *dev, struct device_attribute *attr, | ||
167 | char *buf) | ||
168 | { | ||
169 | struct i2o_device *i2o_dev = to_i2o_device(dev); | ||
170 | |||
171 | sprintf(buf, "0x%03x\n", i2o_dev->lct_data.tid); | ||
172 | return strlen(buf) + 1; | ||
173 | } | ||
174 | static DEVICE_ATTR_RO(tid); | ||
175 | |||
176 | /* I2O device attributes */ | ||
177 | static struct attribute *i2o_device_attrs[] = { | ||
178 | &dev_attr_class_id.attr, | ||
179 | &dev_attr_tid.attr, | ||
180 | NULL, | ||
181 | }; | ||
182 | |||
183 | static const struct attribute_group i2o_device_group = { | ||
184 | .attrs = i2o_device_attrs, | ||
185 | }; | ||
186 | |||
187 | const struct attribute_group *i2o_device_groups[] = { | ||
188 | &i2o_device_group, | ||
189 | NULL, | ||
190 | }; | ||
191 | |||
192 | /** | ||
193 | * i2o_device_alloc - Allocate a I2O device and initialize it | ||
194 | * | ||
195 | * Allocate the memory for a I2O device and initialize locks and lists | ||
196 | * | ||
197 | * Returns the allocated I2O device or a negative error code if the device | ||
198 | * could not be allocated. | ||
199 | */ | ||
200 | static struct i2o_device *i2o_device_alloc(void) | ||
201 | { | ||
202 | struct i2o_device *dev; | ||
203 | |||
204 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | ||
205 | if (!dev) | ||
206 | return ERR_PTR(-ENOMEM); | ||
207 | |||
208 | INIT_LIST_HEAD(&dev->list); | ||
209 | mutex_init(&dev->lock); | ||
210 | |||
211 | dev->device.bus = &i2o_bus_type; | ||
212 | dev->device.release = &i2o_device_release; | ||
213 | |||
214 | return dev; | ||
215 | } | ||
216 | |||
217 | /** | ||
218 | * i2o_device_add - allocate a new I2O device and add it to the IOP | ||
219 | * @c: I2O controller that the device is on | ||
220 | * @entry: LCT entry of the I2O device | ||
221 | * | ||
222 | * Allocate a new I2O device and initialize it with the LCT entry. The | ||
223 | * device is appended to the device list of the controller. | ||
224 | * | ||
225 | * Returns zero on success, or a -ve errno. | ||
226 | */ | ||
227 | static int i2o_device_add(struct i2o_controller *c, i2o_lct_entry *entry) | ||
228 | { | ||
229 | struct i2o_device *i2o_dev, *tmp; | ||
230 | int rc; | ||
231 | |||
232 | i2o_dev = i2o_device_alloc(); | ||
233 | if (IS_ERR(i2o_dev)) { | ||
234 | printk(KERN_ERR "i2o: unable to allocate i2o device\n"); | ||
235 | return PTR_ERR(i2o_dev); | ||
236 | } | ||
237 | |||
238 | i2o_dev->lct_data = *entry; | ||
239 | |||
240 | dev_set_name(&i2o_dev->device, "%d:%03x", c->unit, | ||
241 | i2o_dev->lct_data.tid); | ||
242 | |||
243 | i2o_dev->iop = c; | ||
244 | i2o_dev->device.parent = &c->device; | ||
245 | |||
246 | rc = device_register(&i2o_dev->device); | ||
247 | if (rc) | ||
248 | goto err; | ||
249 | |||
250 | list_add_tail(&i2o_dev->list, &c->devices); | ||
251 | |||
252 | /* create user entries for this device */ | ||
253 | tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid); | ||
254 | if (tmp && (tmp != i2o_dev)) { | ||
255 | rc = sysfs_create_link(&i2o_dev->device.kobj, | ||
256 | &tmp->device.kobj, "user"); | ||
257 | if (rc) | ||
258 | goto unreg_dev; | ||
259 | } | ||
260 | |||
261 | /* create user entries referring to this device */ | ||
262 | list_for_each_entry(tmp, &c->devices, list) | ||
263 | if ((tmp->lct_data.user_tid == i2o_dev->lct_data.tid) | ||
264 | && (tmp != i2o_dev)) { | ||
265 | rc = sysfs_create_link(&tmp->device.kobj, | ||
266 | &i2o_dev->device.kobj, "user"); | ||
267 | if (rc) | ||
268 | goto rmlink1; | ||
269 | } | ||
270 | |||
271 | /* create parent entries for this device */ | ||
272 | tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid); | ||
273 | if (tmp && (tmp != i2o_dev)) { | ||
274 | rc = sysfs_create_link(&i2o_dev->device.kobj, | ||
275 | &tmp->device.kobj, "parent"); | ||
276 | if (rc) | ||
277 | goto rmlink1; | ||
278 | } | ||
279 | |||
280 | /* create parent entries referring to this device */ | ||
281 | list_for_each_entry(tmp, &c->devices, list) | ||
282 | if ((tmp->lct_data.parent_tid == i2o_dev->lct_data.tid) | ||
283 | && (tmp != i2o_dev)) { | ||
284 | rc = sysfs_create_link(&tmp->device.kobj, | ||
285 | &i2o_dev->device.kobj, "parent"); | ||
286 | if (rc) | ||
287 | goto rmlink2; | ||
288 | } | ||
289 | |||
290 | i2o_driver_notify_device_add_all(i2o_dev); | ||
291 | |||
292 | pr_debug("i2o: device %s added\n", dev_name(&i2o_dev->device)); | ||
293 | |||
294 | return 0; | ||
295 | |||
296 | rmlink2: | ||
297 | /* If link creating failed halfway, we loop whole list to cleanup. | ||
298 | * And we don't care wrong removing of link, because sysfs_remove_link | ||
299 | * will take care of it. | ||
300 | */ | ||
301 | list_for_each_entry(tmp, &c->devices, list) { | ||
302 | if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid) | ||
303 | sysfs_remove_link(&tmp->device.kobj, "parent"); | ||
304 | } | ||
305 | sysfs_remove_link(&i2o_dev->device.kobj, "parent"); | ||
306 | rmlink1: | ||
307 | list_for_each_entry(tmp, &c->devices, list) | ||
308 | if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid) | ||
309 | sysfs_remove_link(&tmp->device.kobj, "user"); | ||
310 | sysfs_remove_link(&i2o_dev->device.kobj, "user"); | ||
311 | unreg_dev: | ||
312 | list_del(&i2o_dev->list); | ||
313 | device_unregister(&i2o_dev->device); | ||
314 | err: | ||
315 | kfree(i2o_dev); | ||
316 | return rc; | ||
317 | } | ||
318 | |||
319 | /** | ||
320 | * i2o_device_remove - remove an I2O device from the I2O core | ||
321 | * @i2o_dev: I2O device which should be released | ||
322 | * | ||
323 | * Is used on I2O controller removal or LCT modification, when the device | ||
324 | * is removed from the system. Note that the device could still hang | ||
325 | * around until the refcount reaches 0. | ||
326 | */ | ||
327 | void i2o_device_remove(struct i2o_device *i2o_dev) | ||
328 | { | ||
329 | struct i2o_device *tmp; | ||
330 | struct i2o_controller *c = i2o_dev->iop; | ||
331 | |||
332 | i2o_driver_notify_device_remove_all(i2o_dev); | ||
333 | |||
334 | sysfs_remove_link(&i2o_dev->device.kobj, "parent"); | ||
335 | sysfs_remove_link(&i2o_dev->device.kobj, "user"); | ||
336 | |||
337 | list_for_each_entry(tmp, &c->devices, list) { | ||
338 | if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid) | ||
339 | sysfs_remove_link(&tmp->device.kobj, "parent"); | ||
340 | if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid) | ||
341 | sysfs_remove_link(&tmp->device.kobj, "user"); | ||
342 | } | ||
343 | list_del(&i2o_dev->list); | ||
344 | |||
345 | device_unregister(&i2o_dev->device); | ||
346 | } | ||
347 | |||
348 | /** | ||
349 | * i2o_device_parse_lct - Parse a previously fetched LCT and create devices | ||
350 | * @c: I2O controller from which the LCT should be parsed. | ||
351 | * | ||
352 | * The Logical Configuration Table tells us what we can talk to on the | ||
353 | * board. For every entry we create an I2O device, which is registered in | ||
354 | * the I2O core. | ||
355 | * | ||
356 | * Returns 0 on success or negative error code on failure. | ||
357 | */ | ||
358 | int i2o_device_parse_lct(struct i2o_controller *c) | ||
359 | { | ||
360 | struct i2o_device *dev, *tmp; | ||
361 | i2o_lct *lct; | ||
362 | u32 *dlct = c->dlct.virt; | ||
363 | int max = 0, i = 0; | ||
364 | u16 table_size; | ||
365 | u32 buf; | ||
366 | |||
367 | mutex_lock(&c->lct_lock); | ||
368 | |||
369 | kfree(c->lct); | ||
370 | |||
371 | buf = le32_to_cpu(*dlct++); | ||
372 | table_size = buf & 0xffff; | ||
373 | |||
374 | lct = c->lct = kmalloc(table_size * 4, GFP_KERNEL); | ||
375 | if (!lct) { | ||
376 | mutex_unlock(&c->lct_lock); | ||
377 | return -ENOMEM; | ||
378 | } | ||
379 | |||
380 | lct->lct_ver = buf >> 28; | ||
381 | lct->boot_tid = buf >> 16 & 0xfff; | ||
382 | lct->table_size = table_size; | ||
383 | lct->change_ind = le32_to_cpu(*dlct++); | ||
384 | lct->iop_flags = le32_to_cpu(*dlct++); | ||
385 | |||
386 | table_size -= 3; | ||
387 | |||
388 | pr_debug("%s: LCT has %d entries (LCT size: %d)\n", c->name, max, | ||
389 | lct->table_size); | ||
390 | |||
391 | while (table_size > 0) { | ||
392 | i2o_lct_entry *entry = &lct->lct_entry[max]; | ||
393 | int found = 0; | ||
394 | |||
395 | buf = le32_to_cpu(*dlct++); | ||
396 | entry->entry_size = buf & 0xffff; | ||
397 | entry->tid = buf >> 16 & 0xfff; | ||
398 | |||
399 | entry->change_ind = le32_to_cpu(*dlct++); | ||
400 | entry->device_flags = le32_to_cpu(*dlct++); | ||
401 | |||
402 | buf = le32_to_cpu(*dlct++); | ||
403 | entry->class_id = buf & 0xfff; | ||
404 | entry->version = buf >> 12 & 0xf; | ||
405 | entry->vendor_id = buf >> 16; | ||
406 | |||
407 | entry->sub_class = le32_to_cpu(*dlct++); | ||
408 | |||
409 | buf = le32_to_cpu(*dlct++); | ||
410 | entry->user_tid = buf & 0xfff; | ||
411 | entry->parent_tid = buf >> 12 & 0xfff; | ||
412 | entry->bios_info = buf >> 24; | ||
413 | |||
414 | memcpy(&entry->identity_tag, dlct, 8); | ||
415 | dlct += 2; | ||
416 | |||
417 | entry->event_capabilities = le32_to_cpu(*dlct++); | ||
418 | |||
419 | /* add new devices, which are new in the LCT */ | ||
420 | list_for_each_entry_safe(dev, tmp, &c->devices, list) { | ||
421 | if (entry->tid == dev->lct_data.tid) { | ||
422 | found = 1; | ||
423 | break; | ||
424 | } | ||
425 | } | ||
426 | |||
427 | if (!found) | ||
428 | i2o_device_add(c, entry); | ||
429 | |||
430 | table_size -= 9; | ||
431 | max++; | ||
432 | } | ||
433 | |||
434 | /* remove devices, which are not in the LCT anymore */ | ||
435 | list_for_each_entry_safe(dev, tmp, &c->devices, list) { | ||
436 | int found = 0; | ||
437 | |||
438 | for (i = 0; i < max; i++) { | ||
439 | if (lct->lct_entry[i].tid == dev->lct_data.tid) { | ||
440 | found = 1; | ||
441 | break; | ||
442 | } | ||
443 | } | ||
444 | |||
445 | if (!found) | ||
446 | i2o_device_remove(dev); | ||
447 | } | ||
448 | |||
449 | mutex_unlock(&c->lct_lock); | ||
450 | |||
451 | return 0; | ||
452 | } | ||
453 | |||
454 | /* | ||
455 | * Run time support routines | ||
456 | */ | ||
457 | |||
458 | /* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET | ||
459 | * | ||
460 | * This function can be used for all UtilParamsGet/Set operations. | ||
461 | * The OperationList is given in oplist-buffer, | ||
462 | * and results are returned in reslist-buffer. | ||
463 | * Note that the minimum sized reslist is 8 bytes and contains | ||
464 | * ResultCount, ErrorInfoSize, BlockStatus and BlockSize. | ||
465 | */ | ||
466 | int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist, | ||
467 | int oplen, void *reslist, int reslen) | ||
468 | { | ||
469 | struct i2o_message *msg; | ||
470 | int i = 0; | ||
471 | int rc; | ||
472 | struct i2o_dma res; | ||
473 | struct i2o_controller *c = i2o_dev->iop; | ||
474 | struct device *dev = &c->pdev->dev; | ||
475 | |||
476 | res.virt = NULL; | ||
477 | |||
478 | if (i2o_dma_alloc(dev, &res, reslen)) | ||
479 | return -ENOMEM; | ||
480 | |||
481 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); | ||
482 | if (IS_ERR(msg)) { | ||
483 | i2o_dma_free(dev, &res); | ||
484 | return PTR_ERR(msg); | ||
485 | } | ||
486 | |||
487 | i = 0; | ||
488 | msg->u.head[1] = | ||
489 | cpu_to_le32(cmd << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid); | ||
490 | msg->body[i++] = cpu_to_le32(0x00000000); | ||
491 | msg->body[i++] = cpu_to_le32(0x4C000000 | oplen); /* OperationList */ | ||
492 | memcpy(&msg->body[i], oplist, oplen); | ||
493 | i += (oplen / 4 + (oplen % 4 ? 1 : 0)); | ||
494 | msg->body[i++] = cpu_to_le32(0xD0000000 | res.len); /* ResultList */ | ||
495 | msg->body[i++] = cpu_to_le32(res.phys); | ||
496 | |||
497 | msg->u.head[0] = | ||
498 | cpu_to_le32(I2O_MESSAGE_SIZE(i + sizeof(struct i2o_message) / 4) | | ||
499 | SGL_OFFSET_5); | ||
500 | |||
501 | rc = i2o_msg_post_wait_mem(c, msg, 10, &res); | ||
502 | |||
503 | /* This only looks like a memory leak - don't "fix" it. */ | ||
504 | if (rc == -ETIMEDOUT) | ||
505 | return rc; | ||
506 | |||
507 | memcpy(reslist, res.virt, res.len); | ||
508 | i2o_dma_free(dev, &res); | ||
509 | |||
510 | return rc; | ||
511 | } | ||
512 | |||
513 | /* | ||
514 | * Query one field group value or a whole scalar group. | ||
515 | */ | ||
516 | int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field, | ||
517 | void *buf, int buflen) | ||
518 | { | ||
519 | u32 opblk[] = { cpu_to_le32(0x00000001), | ||
520 | cpu_to_le32((u16) group << 16 | I2O_PARAMS_FIELD_GET), | ||
521 | cpu_to_le32((s16) field << 16 | 0x00000001) | ||
522 | }; | ||
523 | u8 *resblk; /* 8 bytes for header */ | ||
524 | int rc; | ||
525 | |||
526 | resblk = kmalloc(buflen + 8, GFP_KERNEL); | ||
527 | if (!resblk) | ||
528 | return -ENOMEM; | ||
529 | |||
530 | rc = i2o_parm_issue(i2o_dev, I2O_CMD_UTIL_PARAMS_GET, opblk, | ||
531 | sizeof(opblk), resblk, buflen + 8); | ||
532 | |||
533 | memcpy(buf, resblk + 8, buflen); /* cut off header */ | ||
534 | |||
535 | kfree(resblk); | ||
536 | |||
537 | return rc; | ||
538 | } | ||
539 | |||
540 | /* | ||
541 | * if oper == I2O_PARAMS_TABLE_GET, get from all rows | ||
542 | * if fieldcount == -1 return all fields | ||
543 | * ibuf and ibuflen are unused (use NULL, 0) | ||
544 | * else return specific fields | ||
545 | * ibuf contains fieldindexes | ||
546 | * | ||
547 | * if oper == I2O_PARAMS_LIST_GET, get from specific rows | ||
548 | * if fieldcount == -1 return all fields | ||
549 | * ibuf contains rowcount, keyvalues | ||
550 | * else return specific fields | ||
551 | * fieldcount is # of fieldindexes | ||
552 | * ibuf contains fieldindexes, rowcount, keyvalues | ||
553 | * | ||
554 | * You could also use directly function i2o_issue_params(). | ||
555 | */ | ||
556 | int i2o_parm_table_get(struct i2o_device *dev, int oper, int group, | ||
557 | int fieldcount, void *ibuf, int ibuflen, void *resblk, | ||
558 | int reslen) | ||
559 | { | ||
560 | u16 *opblk; | ||
561 | int size; | ||
562 | |||
563 | size = 10 + ibuflen; | ||
564 | if (size % 4) | ||
565 | size += 4 - size % 4; | ||
566 | |||
567 | opblk = kmalloc(size, GFP_KERNEL); | ||
568 | if (opblk == NULL) { | ||
569 | printk(KERN_ERR "i2o: no memory for query buffer.\n"); | ||
570 | return -ENOMEM; | ||
571 | } | ||
572 | |||
573 | opblk[0] = 1; /* operation count */ | ||
574 | opblk[1] = 0; /* pad */ | ||
575 | opblk[2] = oper; | ||
576 | opblk[3] = group; | ||
577 | opblk[4] = fieldcount; | ||
578 | memcpy(opblk + 5, ibuf, ibuflen); /* other params */ | ||
579 | |||
580 | size = i2o_parm_issue(dev, I2O_CMD_UTIL_PARAMS_GET, opblk, | ||
581 | size, resblk, reslen); | ||
582 | |||
583 | kfree(opblk); | ||
584 | if (size > reslen) | ||
585 | return reslen; | ||
586 | |||
587 | return size; | ||
588 | } | ||
589 | |||
590 | EXPORT_SYMBOL(i2o_device_claim); | ||
591 | EXPORT_SYMBOL(i2o_device_claim_release); | ||
592 | EXPORT_SYMBOL(i2o_parm_field_get); | ||
593 | EXPORT_SYMBOL(i2o_parm_table_get); | ||
594 | EXPORT_SYMBOL(i2o_parm_issue); | ||
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c deleted file mode 100644 index 1b18a0d1d05b..000000000000 --- a/drivers/message/i2o/driver.c +++ /dev/null | |||
@@ -1,382 +0,0 @@ | |||
1 | /* | ||
2 | * Functions to handle I2O drivers (OSMs) and I2O bus type for sysfs | ||
3 | * | ||
4 | * Copyright (C) 2004 Markus Lidel <Markus.Lidel@shadowconnect.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; either version 2 of the License, or (at your | ||
9 | * option) any later version. | ||
10 | * | ||
11 | * Fixes/additions: | ||
12 | * Markus Lidel <Markus.Lidel@shadowconnect.com> | ||
13 | * initial version. | ||
14 | */ | ||
15 | |||
16 | #include <linux/device.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/rwsem.h> | ||
19 | #include <linux/i2o.h> | ||
20 | #include <linux/workqueue.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include "core.h" | ||
24 | |||
25 | #define OSM_NAME "i2o" | ||
26 | |||
27 | /* max_drivers - Maximum I2O drivers (OSMs) which could be registered */ | ||
28 | static unsigned int i2o_max_drivers = I2O_MAX_DRIVERS; | ||
29 | module_param_named(max_drivers, i2o_max_drivers, uint, 0); | ||
30 | MODULE_PARM_DESC(max_drivers, "maximum number of OSM's to support"); | ||
31 | |||
32 | /* I2O drivers lock and array */ | ||
33 | static spinlock_t i2o_drivers_lock; | ||
34 | static struct i2o_driver **i2o_drivers; | ||
35 | |||
36 | /** | ||
37 | * i2o_bus_match - Tell if I2O device class id matches the class ids of the I2O driver (OSM) | ||
38 | * @dev: device which should be verified | ||
39 | * @drv: the driver to match against | ||
40 | * | ||
41 | * Used by the bus to check if the driver wants to handle the device. | ||
42 | * | ||
43 | * Returns 1 if the class ids of the driver match the class id of the | ||
44 | * device, otherwise 0. | ||
45 | */ | ||
46 | static int i2o_bus_match(struct device *dev, struct device_driver *drv) | ||
47 | { | ||
48 | struct i2o_device *i2o_dev = to_i2o_device(dev); | ||
49 | struct i2o_driver *i2o_drv = to_i2o_driver(drv); | ||
50 | struct i2o_class_id *ids = i2o_drv->classes; | ||
51 | |||
52 | if (ids) | ||
53 | while (ids->class_id != I2O_CLASS_END) { | ||
54 | if (ids->class_id == i2o_dev->lct_data.class_id) | ||
55 | return 1; | ||
56 | ids++; | ||
57 | } | ||
58 | return 0; | ||
59 | }; | ||
60 | |||
61 | /* I2O bus type */ | ||
62 | struct bus_type i2o_bus_type = { | ||
63 | .name = "i2o", | ||
64 | .match = i2o_bus_match, | ||
65 | .dev_groups = i2o_device_groups, | ||
66 | }; | ||
67 | |||
68 | /** | ||
69 | * i2o_driver_register - Register a I2O driver (OSM) in the I2O core | ||
70 | * @drv: I2O driver which should be registered | ||
71 | * | ||
72 | * Registers the OSM drv in the I2O core and creates an event queues if | ||
73 | * necessary. | ||
74 | * | ||
75 | * Returns 0 on success or negative error code on failure. | ||
76 | */ | ||
77 | int i2o_driver_register(struct i2o_driver *drv) | ||
78 | { | ||
79 | struct i2o_controller *c; | ||
80 | int i; | ||
81 | int rc = 0; | ||
82 | unsigned long flags; | ||
83 | |||
84 | osm_debug("Register driver %s\n", drv->name); | ||
85 | |||
86 | if (drv->event) { | ||
87 | drv->event_queue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, | ||
88 | drv->name); | ||
89 | if (!drv->event_queue) { | ||
90 | osm_err("Could not initialize event queue for driver " | ||
91 | "%s\n", drv->name); | ||
92 | return -EFAULT; | ||
93 | } | ||
94 | osm_debug("Event queue initialized for driver %s\n", drv->name); | ||
95 | } else | ||
96 | drv->event_queue = NULL; | ||
97 | |||
98 | drv->driver.name = drv->name; | ||
99 | drv->driver.bus = &i2o_bus_type; | ||
100 | |||
101 | spin_lock_irqsave(&i2o_drivers_lock, flags); | ||
102 | |||
103 | for (i = 0; i2o_drivers[i]; i++) | ||
104 | if (i >= i2o_max_drivers) { | ||
105 | osm_err("too many drivers registered, increase " | ||
106 | "max_drivers\n"); | ||
107 | spin_unlock_irqrestore(&i2o_drivers_lock, flags); | ||
108 | rc = -EFAULT; | ||
109 | goto out; | ||
110 | } | ||
111 | |||
112 | drv->context = i; | ||
113 | i2o_drivers[i] = drv; | ||
114 | |||
115 | spin_unlock_irqrestore(&i2o_drivers_lock, flags); | ||
116 | |||
117 | osm_debug("driver %s gets context id %d\n", drv->name, drv->context); | ||
118 | |||
119 | list_for_each_entry(c, &i2o_controllers, list) { | ||
120 | struct i2o_device *i2o_dev; | ||
121 | |||
122 | i2o_driver_notify_controller_add(drv, c); | ||
123 | list_for_each_entry(i2o_dev, &c->devices, list) | ||
124 | i2o_driver_notify_device_add(drv, i2o_dev); | ||
125 | } | ||
126 | |||
127 | rc = driver_register(&drv->driver); | ||
128 | if (rc) | ||
129 | goto out; | ||
130 | |||
131 | return 0; | ||
132 | out: | ||
133 | if (drv->event_queue) { | ||
134 | destroy_workqueue(drv->event_queue); | ||
135 | drv->event_queue = NULL; | ||
136 | } | ||
137 | |||
138 | return rc; | ||
139 | }; | ||
140 | |||
141 | /** | ||
142 | * i2o_driver_unregister - Unregister a I2O driver (OSM) from the I2O core | ||
143 | * @drv: I2O driver which should be unregistered | ||
144 | * | ||
145 | * Unregisters the OSM drv from the I2O core and cleanup event queues if | ||
146 | * necessary. | ||
147 | */ | ||
148 | void i2o_driver_unregister(struct i2o_driver *drv) | ||
149 | { | ||
150 | struct i2o_controller *c; | ||
151 | unsigned long flags; | ||
152 | |||
153 | osm_debug("unregister driver %s\n", drv->name); | ||
154 | |||
155 | driver_unregister(&drv->driver); | ||
156 | |||
157 | list_for_each_entry(c, &i2o_controllers, list) { | ||
158 | struct i2o_device *i2o_dev; | ||
159 | |||
160 | list_for_each_entry(i2o_dev, &c->devices, list) | ||
161 | i2o_driver_notify_device_remove(drv, i2o_dev); | ||
162 | |||
163 | i2o_driver_notify_controller_remove(drv, c); | ||
164 | } | ||
165 | |||
166 | spin_lock_irqsave(&i2o_drivers_lock, flags); | ||
167 | i2o_drivers[drv->context] = NULL; | ||
168 | spin_unlock_irqrestore(&i2o_drivers_lock, flags); | ||
169 | |||
170 | if (drv->event_queue) { | ||
171 | destroy_workqueue(drv->event_queue); | ||
172 | drv->event_queue = NULL; | ||
173 | osm_debug("event queue removed for %s\n", drv->name); | ||
174 | } | ||
175 | }; | ||
176 | |||
177 | /** | ||
178 | * i2o_driver_dispatch - dispatch an I2O reply message | ||
179 | * @c: I2O controller of the message | ||
180 | * @m: I2O message number | ||
181 | * | ||
182 | * The reply is delivered to the driver from which the original message | ||
183 | * was. This function is only called from interrupt context. | ||
184 | * | ||
185 | * Returns 0 on success and the message should not be flushed. Returns > 0 | ||
186 | * on success and if the message should be flushed afterwords. Returns | ||
187 | * negative error code on failure (the message will be flushed too). | ||
188 | */ | ||
189 | int i2o_driver_dispatch(struct i2o_controller *c, u32 m) | ||
190 | { | ||
191 | struct i2o_driver *drv; | ||
192 | struct i2o_message *msg = i2o_msg_out_to_virt(c, m); | ||
193 | u32 context = le32_to_cpu(msg->u.s.icntxt); | ||
194 | unsigned long flags; | ||
195 | |||
196 | if (unlikely(context >= i2o_max_drivers)) { | ||
197 | osm_warn("%s: Spurious reply to unknown driver %d\n", c->name, | ||
198 | context); | ||
199 | return -EIO; | ||
200 | } | ||
201 | |||
202 | spin_lock_irqsave(&i2o_drivers_lock, flags); | ||
203 | drv = i2o_drivers[context]; | ||
204 | spin_unlock_irqrestore(&i2o_drivers_lock, flags); | ||
205 | |||
206 | if (unlikely(!drv)) { | ||
207 | osm_warn("%s: Spurious reply to unknown driver %d\n", c->name, | ||
208 | context); | ||
209 | return -EIO; | ||
210 | } | ||
211 | |||
212 | if ((le32_to_cpu(msg->u.head[1]) >> 24) == I2O_CMD_UTIL_EVT_REGISTER) { | ||
213 | struct i2o_device *dev, *tmp; | ||
214 | struct i2o_event *evt; | ||
215 | u16 size; | ||
216 | u16 tid = le32_to_cpu(msg->u.head[1]) & 0xfff; | ||
217 | |||
218 | osm_debug("event received from device %d\n", tid); | ||
219 | |||
220 | if (!drv->event) | ||
221 | return -EIO; | ||
222 | |||
223 | /* cut of header from message size (in 32-bit words) */ | ||
224 | size = (le32_to_cpu(msg->u.head[0]) >> 16) - 5; | ||
225 | |||
226 | evt = kzalloc(size * 4 + sizeof(*evt), GFP_ATOMIC); | ||
227 | if (!evt) | ||
228 | return -ENOMEM; | ||
229 | |||
230 | evt->size = size; | ||
231 | evt->tcntxt = le32_to_cpu(msg->u.s.tcntxt); | ||
232 | evt->event_indicator = le32_to_cpu(msg->body[0]); | ||
233 | memcpy(&evt->data, &msg->body[1], size * 4); | ||
234 | |||
235 | list_for_each_entry_safe(dev, tmp, &c->devices, list) | ||
236 | if (dev->lct_data.tid == tid) { | ||
237 | evt->i2o_dev = dev; | ||
238 | break; | ||
239 | } | ||
240 | |||
241 | INIT_WORK(&evt->work, drv->event); | ||
242 | queue_work(drv->event_queue, &evt->work); | ||
243 | return 1; | ||
244 | } | ||
245 | |||
246 | if (unlikely(!drv->reply)) { | ||
247 | osm_debug("%s: Reply to driver %s, but no reply function" | ||
248 | " defined!\n", c->name, drv->name); | ||
249 | return -EIO; | ||
250 | } | ||
251 | |||
252 | return drv->reply(c, m, msg); | ||
253 | } | ||
254 | |||
255 | /** | ||
256 | * i2o_driver_notify_controller_add_all - Send notify of added controller | ||
257 | * @c: newly added controller | ||
258 | * | ||
259 | * Send notifications to all registered drivers that a new controller was | ||
260 | * added. | ||
261 | */ | ||
262 | void i2o_driver_notify_controller_add_all(struct i2o_controller *c) | ||
263 | { | ||
264 | int i; | ||
265 | struct i2o_driver *drv; | ||
266 | |||
267 | for (i = 0; i < i2o_max_drivers; i++) { | ||
268 | drv = i2o_drivers[i]; | ||
269 | |||
270 | if (drv) | ||
271 | i2o_driver_notify_controller_add(drv, c); | ||
272 | } | ||
273 | } | ||
274 | |||
275 | /** | ||
276 | * i2o_driver_notify_controller_remove_all - Send notify of removed controller | ||
277 | * @c: controller that is being removed | ||
278 | * | ||
279 | * Send notifications to all registered drivers that a controller was | ||
280 | * removed. | ||
281 | */ | ||
282 | void i2o_driver_notify_controller_remove_all(struct i2o_controller *c) | ||
283 | { | ||
284 | int i; | ||
285 | struct i2o_driver *drv; | ||
286 | |||
287 | for (i = 0; i < i2o_max_drivers; i++) { | ||
288 | drv = i2o_drivers[i]; | ||
289 | |||
290 | if (drv) | ||
291 | i2o_driver_notify_controller_remove(drv, c); | ||
292 | } | ||
293 | } | ||
294 | |||
295 | /** | ||
296 | * i2o_driver_notify_device_add_all - Send notify of added device | ||
297 | * @i2o_dev: newly added I2O device | ||
298 | * | ||
299 | * Send notifications to all registered drivers that a device was added. | ||
300 | */ | ||
301 | void i2o_driver_notify_device_add_all(struct i2o_device *i2o_dev) | ||
302 | { | ||
303 | int i; | ||
304 | struct i2o_driver *drv; | ||
305 | |||
306 | for (i = 0; i < i2o_max_drivers; i++) { | ||
307 | drv = i2o_drivers[i]; | ||
308 | |||
309 | if (drv) | ||
310 | i2o_driver_notify_device_add(drv, i2o_dev); | ||
311 | } | ||
312 | } | ||
313 | |||
314 | /** | ||
315 | * i2o_driver_notify_device_remove_all - Send notify of removed device | ||
316 | * @i2o_dev: device that is being removed | ||
317 | * | ||
318 | * Send notifications to all registered drivers that a device was removed. | ||
319 | */ | ||
320 | void i2o_driver_notify_device_remove_all(struct i2o_device *i2o_dev) | ||
321 | { | ||
322 | int i; | ||
323 | struct i2o_driver *drv; | ||
324 | |||
325 | for (i = 0; i < i2o_max_drivers; i++) { | ||
326 | drv = i2o_drivers[i]; | ||
327 | |||
328 | if (drv) | ||
329 | i2o_driver_notify_device_remove(drv, i2o_dev); | ||
330 | } | ||
331 | } | ||
332 | |||
333 | /** | ||
334 | * i2o_driver_init - initialize I2O drivers (OSMs) | ||
335 | * | ||
336 | * Registers the I2O bus and allocate memory for the array of OSMs. | ||
337 | * | ||
338 | * Returns 0 on success or negative error code on failure. | ||
339 | */ | ||
340 | int __init i2o_driver_init(void) | ||
341 | { | ||
342 | int rc = 0; | ||
343 | |||
344 | spin_lock_init(&i2o_drivers_lock); | ||
345 | |||
346 | if ((i2o_max_drivers < 2) || (i2o_max_drivers > 64)) { | ||
347 | osm_warn("max_drivers set to %d, but must be >=2 and <= 64\n", | ||
348 | i2o_max_drivers); | ||
349 | i2o_max_drivers = I2O_MAX_DRIVERS; | ||
350 | } | ||
351 | osm_info("max drivers = %d\n", i2o_max_drivers); | ||
352 | |||
353 | i2o_drivers = | ||
354 | kcalloc(i2o_max_drivers, sizeof(*i2o_drivers), GFP_KERNEL); | ||
355 | if (!i2o_drivers) | ||
356 | return -ENOMEM; | ||
357 | |||
358 | rc = bus_register(&i2o_bus_type); | ||
359 | |||
360 | if (rc < 0) | ||
361 | kfree(i2o_drivers); | ||
362 | |||
363 | return rc; | ||
364 | }; | ||
365 | |||
366 | /** | ||
367 | * i2o_driver_exit - clean up I2O drivers (OSMs) | ||
368 | * | ||
369 | * Unregisters the I2O bus and frees driver array. | ||
370 | */ | ||
371 | void i2o_driver_exit(void) | ||
372 | { | ||
373 | bus_unregister(&i2o_bus_type); | ||
374 | kfree(i2o_drivers); | ||
375 | }; | ||
376 | |||
377 | EXPORT_SYMBOL(i2o_driver_register); | ||
378 | EXPORT_SYMBOL(i2o_driver_unregister); | ||
379 | EXPORT_SYMBOL(i2o_driver_notify_controller_add_all); | ||
380 | EXPORT_SYMBOL(i2o_driver_notify_controller_remove_all); | ||
381 | EXPORT_SYMBOL(i2o_driver_notify_device_add_all); | ||
382 | EXPORT_SYMBOL(i2o_driver_notify_device_remove_all); | ||
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c deleted file mode 100644 index a3970e56ae53..000000000000 --- a/drivers/message/i2o/exec-osm.c +++ /dev/null | |||
@@ -1,612 +0,0 @@ | |||
1 | /* | ||
2 | * Executive OSM | ||
3 | * | ||
4 | * Copyright (C) 1999-2002 Red Hat Software | ||
5 | * | ||
6 | * Written by Alan Cox, Building Number Three Ltd | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | * | ||
13 | * A lot of the I2O message side code from this is taken from the Red | ||
14 | * Creek RCPCI45 adapter driver by Red Creek Communications | ||
15 | * | ||
16 | * Fixes/additions: | ||
17 | * Philipp Rumpf | ||
18 | * Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI> | ||
19 | * Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI> | ||
20 | * Deepak Saxena <deepak@plexity.net> | ||
21 | * Boji T Kannanthanam <boji.t.kannanthanam@intel.com> | ||
22 | * Alan Cox <alan@lxorguk.ukuu.org.uk>: | ||
23 | * Ported to Linux 2.5. | ||
24 | * Markus Lidel <Markus.Lidel@shadowconnect.com>: | ||
25 | * Minor fixes for 2.6. | ||
26 | * Markus Lidel <Markus.Lidel@shadowconnect.com>: | ||
27 | * Support for sysfs included. | ||
28 | */ | ||
29 | |||
30 | #include <linux/module.h> | ||
31 | #include <linux/i2o.h> | ||
32 | #include <linux/delay.h> | ||
33 | #include <linux/workqueue.h> | ||
34 | #include <linux/string.h> | ||
35 | #include <linux/slab.h> | ||
36 | #include <linux/sched.h> /* wait_event_interruptible_timeout() needs this */ | ||
37 | #include <asm/param.h> /* HZ */ | ||
38 | #include "core.h" | ||
39 | |||
40 | #define OSM_NAME "exec-osm" | ||
41 | |||
42 | struct i2o_driver i2o_exec_driver; | ||
43 | |||
44 | /* global wait list for POST WAIT */ | ||
45 | static LIST_HEAD(i2o_exec_wait_list); | ||
46 | |||
47 | /* Wait struct needed for POST WAIT */ | ||
48 | struct i2o_exec_wait { | ||
49 | wait_queue_head_t *wq; /* Pointer to Wait queue */ | ||
50 | struct i2o_dma dma; /* DMA buffers to free on failure */ | ||
51 | u32 tcntxt; /* transaction context from reply */ | ||
52 | int complete; /* 1 if reply received otherwise 0 */ | ||
53 | u32 m; /* message id */ | ||
54 | struct i2o_message *msg; /* pointer to the reply message */ | ||
55 | struct list_head list; /* node in global wait list */ | ||
56 | spinlock_t lock; /* lock before modifying */ | ||
57 | }; | ||
58 | |||
59 | /* Work struct needed to handle LCT NOTIFY replies */ | ||
60 | struct i2o_exec_lct_notify_work { | ||
61 | struct work_struct work; /* work struct */ | ||
62 | struct i2o_controller *c; /* controller on which the LCT NOTIFY | ||
63 | was received */ | ||
64 | }; | ||
65 | |||
66 | /* Exec OSM class handling definition */ | ||
67 | static struct i2o_class_id i2o_exec_class_id[] = { | ||
68 | {I2O_CLASS_EXECUTIVE}, | ||
69 | {I2O_CLASS_END} | ||
70 | }; | ||
71 | |||
72 | /** | ||
73 | * i2o_exec_wait_alloc - Allocate a i2o_exec_wait struct an initialize it | ||
74 | * | ||
75 | * Allocate the i2o_exec_wait struct and initialize the wait. | ||
76 | * | ||
77 | * Returns i2o_exec_wait pointer on success or negative error code on | ||
78 | * failure. | ||
79 | */ | ||
80 | static struct i2o_exec_wait *i2o_exec_wait_alloc(void) | ||
81 | { | ||
82 | struct i2o_exec_wait *wait; | ||
83 | |||
84 | wait = kzalloc(sizeof(*wait), GFP_KERNEL); | ||
85 | if (!wait) | ||
86 | return NULL; | ||
87 | |||
88 | INIT_LIST_HEAD(&wait->list); | ||
89 | spin_lock_init(&wait->lock); | ||
90 | |||
91 | return wait; | ||
92 | }; | ||
93 | |||
94 | /** | ||
95 | * i2o_exec_wait_free - Free an i2o_exec_wait struct | ||
96 | * @wait: I2O wait data which should be cleaned up | ||
97 | */ | ||
98 | static void i2o_exec_wait_free(struct i2o_exec_wait *wait) | ||
99 | { | ||
100 | kfree(wait); | ||
101 | }; | ||
102 | |||
103 | /** | ||
104 | * i2o_msg_post_wait_mem - Post and wait a message with DMA buffers | ||
105 | * @c: controller | ||
106 | * @msg: message to post | ||
107 | * @timeout: time in seconds to wait | ||
108 | * @dma: i2o_dma struct of the DMA buffer to free on failure | ||
109 | * | ||
110 | * This API allows an OSM to post a message and then be told whether or | ||
111 | * not the system received a successful reply. If the message times out | ||
112 | * then the value '-ETIMEDOUT' is returned. This is a special case. In | ||
113 | * this situation the message may (should) complete at an indefinite time | ||
114 | * in the future. When it completes it will use the memory buffer | ||
115 | * attached to the request. If -ETIMEDOUT is returned then the memory | ||
116 | * buffer must not be freed. Instead the event completion will free them | ||
117 | * for you. In all other cases the buffer are your problem. | ||
118 | * | ||
119 | * Returns 0 on success, negative error code on timeout or positive error | ||
120 | * code from reply. | ||
121 | */ | ||
122 | int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg, | ||
123 | unsigned long timeout, struct i2o_dma *dma) | ||
124 | { | ||
125 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); | ||
126 | struct i2o_exec_wait *wait; | ||
127 | static u32 tcntxt = 0x80000000; | ||
128 | unsigned long flags; | ||
129 | int rc = 0; | ||
130 | |||
131 | wait = i2o_exec_wait_alloc(); | ||
132 | if (!wait) { | ||
133 | i2o_msg_nop(c, msg); | ||
134 | return -ENOMEM; | ||
135 | } | ||
136 | |||
137 | if (tcntxt == 0xffffffff) | ||
138 | tcntxt = 0x80000000; | ||
139 | |||
140 | if (dma) | ||
141 | wait->dma = *dma; | ||
142 | |||
143 | /* | ||
144 | * Fill in the message initiator context and transaction context. | ||
145 | * We will only use transaction contexts >= 0x80000000 for POST WAIT, | ||
146 | * so we could find a POST WAIT reply easier in the reply handler. | ||
147 | */ | ||
148 | msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context); | ||
149 | wait->tcntxt = tcntxt++; | ||
150 | msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt); | ||
151 | |||
152 | wait->wq = &wq; | ||
153 | /* | ||
154 | * we add elements to the head, because if a entry in the list will | ||
155 | * never be removed, we have to iterate over it every time | ||
156 | */ | ||
157 | list_add(&wait->list, &i2o_exec_wait_list); | ||
158 | |||
159 | /* | ||
160 | * Post the message to the controller. At some point later it will | ||
161 | * return. If we time out before it returns then complete will be zero. | ||
162 | */ | ||
163 | i2o_msg_post(c, msg); | ||
164 | |||
165 | wait_event_interruptible_timeout(wq, wait->complete, timeout * HZ); | ||
166 | |||
167 | spin_lock_irqsave(&wait->lock, flags); | ||
168 | |||
169 | wait->wq = NULL; | ||
170 | |||
171 | if (wait->complete) | ||
172 | rc = le32_to_cpu(wait->msg->body[0]) >> 24; | ||
173 | else { | ||
174 | /* | ||
175 | * We cannot remove it now. This is important. When it does | ||
176 | * terminate (which it must do if the controller has not | ||
177 | * died...) then it will otherwise scribble on stuff. | ||
178 | * | ||
179 | * FIXME: try abort message | ||
180 | */ | ||
181 | if (dma) | ||
182 | dma->virt = NULL; | ||
183 | |||
184 | rc = -ETIMEDOUT; | ||
185 | } | ||
186 | |||
187 | spin_unlock_irqrestore(&wait->lock, flags); | ||
188 | |||
189 | if (rc != -ETIMEDOUT) { | ||
190 | i2o_flush_reply(c, wait->m); | ||
191 | i2o_exec_wait_free(wait); | ||
192 | } | ||
193 | |||
194 | return rc; | ||
195 | }; | ||
196 | |||
197 | /** | ||
198 | * i2o_msg_post_wait_complete - Reply to a i2o_msg_post request from IOP | ||
199 | * @c: I2O controller which answers | ||
200 | * @m: message id | ||
201 | * @msg: pointer to the I2O reply message | ||
202 | * @context: transaction context of request | ||
203 | * | ||
204 | * This function is called in interrupt context only. If the reply reached | ||
205 | * before the timeout, the i2o_exec_wait struct is filled with the message | ||
206 | * and the task will be waked up. The task is now responsible for returning | ||
207 | * the message m back to the controller! If the message reaches us after | ||
208 | * the timeout clean up the i2o_exec_wait struct (including allocated | ||
209 | * DMA buffer). | ||
210 | * | ||
211 | * Return 0 on success and if the message m should not be given back to the | ||
212 | * I2O controller, or >0 on success and if the message should be given back | ||
213 | * afterwords. Returns negative error code on failure. In this case the | ||
214 | * message must also be given back to the controller. | ||
215 | */ | ||
216 | static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m, | ||
217 | struct i2o_message *msg, u32 context) | ||
218 | { | ||
219 | struct i2o_exec_wait *wait, *tmp; | ||
220 | unsigned long flags; | ||
221 | int rc = 1; | ||
222 | |||
223 | /* | ||
224 | * We need to search through the i2o_exec_wait_list to see if the given | ||
225 | * message is still outstanding. If not, it means that the IOP took | ||
226 | * longer to respond to the message than we had allowed and timer has | ||
227 | * already expired. Not much we can do about that except log it for | ||
228 | * debug purposes, increase timeout, and recompile. | ||
229 | */ | ||
230 | list_for_each_entry_safe(wait, tmp, &i2o_exec_wait_list, list) { | ||
231 | if (wait->tcntxt == context) { | ||
232 | spin_lock_irqsave(&wait->lock, flags); | ||
233 | |||
234 | list_del(&wait->list); | ||
235 | |||
236 | wait->m = m; | ||
237 | wait->msg = msg; | ||
238 | wait->complete = 1; | ||
239 | |||
240 | if (wait->wq) | ||
241 | rc = 0; | ||
242 | else | ||
243 | rc = -1; | ||
244 | |||
245 | spin_unlock_irqrestore(&wait->lock, flags); | ||
246 | |||
247 | if (rc) { | ||
248 | struct device *dev; | ||
249 | |||
250 | dev = &c->pdev->dev; | ||
251 | |||
252 | pr_debug("%s: timedout reply received!\n", | ||
253 | c->name); | ||
254 | i2o_dma_free(dev, &wait->dma); | ||
255 | i2o_exec_wait_free(wait); | ||
256 | } else | ||
257 | wake_up_interruptible(wait->wq); | ||
258 | |||
259 | return rc; | ||
260 | } | ||
261 | } | ||
262 | |||
263 | osm_warn("%s: Bogus reply in POST WAIT (tr-context: %08x)!\n", c->name, | ||
264 | context); | ||
265 | |||
266 | return -1; | ||
267 | }; | ||
268 | |||
269 | /** | ||
270 | * i2o_exec_show_vendor_id - Displays Vendor ID of controller | ||
271 | * @d: device of which the Vendor ID should be displayed | ||
272 | * @attr: device_attribute to display | ||
273 | * @buf: buffer into which the Vendor ID should be printed | ||
274 | * | ||
275 | * Returns number of bytes printed into buffer. | ||
276 | */ | ||
277 | static ssize_t i2o_exec_show_vendor_id(struct device *d, | ||
278 | struct device_attribute *attr, char *buf) | ||
279 | { | ||
280 | struct i2o_device *dev = to_i2o_device(d); | ||
281 | u16 id; | ||
282 | |||
283 | if (!i2o_parm_field_get(dev, 0x0000, 0, &id, 2)) { | ||
284 | sprintf(buf, "0x%04x", le16_to_cpu(id)); | ||
285 | return strlen(buf) + 1; | ||
286 | } | ||
287 | |||
288 | return 0; | ||
289 | }; | ||
290 | |||
291 | /** | ||
292 | * i2o_exec_show_product_id - Displays Product ID of controller | ||
293 | * @d: device of which the Product ID should be displayed | ||
294 | * @attr: device_attribute to display | ||
295 | * @buf: buffer into which the Product ID should be printed | ||
296 | * | ||
297 | * Returns number of bytes printed into buffer. | ||
298 | */ | ||
299 | static ssize_t i2o_exec_show_product_id(struct device *d, | ||
300 | struct device_attribute *attr, | ||
301 | char *buf) | ||
302 | { | ||
303 | struct i2o_device *dev = to_i2o_device(d); | ||
304 | u16 id; | ||
305 | |||
306 | if (!i2o_parm_field_get(dev, 0x0000, 1, &id, 2)) { | ||
307 | sprintf(buf, "0x%04x", le16_to_cpu(id)); | ||
308 | return strlen(buf) + 1; | ||
309 | } | ||
310 | |||
311 | return 0; | ||
312 | }; | ||
313 | |||
314 | /* Exec-OSM device attributes */ | ||
315 | static DEVICE_ATTR(vendor_id, S_IRUGO, i2o_exec_show_vendor_id, NULL); | ||
316 | static DEVICE_ATTR(product_id, S_IRUGO, i2o_exec_show_product_id, NULL); | ||
317 | |||
318 | /** | ||
319 | * i2o_exec_probe - Called if a new I2O device (executive class) appears | ||
320 | * @dev: I2O device which should be probed | ||
321 | * | ||
322 | * Registers event notification for every event from Executive device. The | ||
323 | * return is always 0, because we want all devices of class Executive. | ||
324 | * | ||
325 | * Returns 0 on success. | ||
326 | */ | ||
327 | static int i2o_exec_probe(struct device *dev) | ||
328 | { | ||
329 | struct i2o_device *i2o_dev = to_i2o_device(dev); | ||
330 | int rc; | ||
331 | |||
332 | rc = i2o_event_register(i2o_dev, &i2o_exec_driver, 0, 0xffffffff); | ||
333 | if (rc) goto err_out; | ||
334 | |||
335 | rc = device_create_file(dev, &dev_attr_vendor_id); | ||
336 | if (rc) goto err_evtreg; | ||
337 | rc = device_create_file(dev, &dev_attr_product_id); | ||
338 | if (rc) goto err_vid; | ||
339 | |||
340 | i2o_dev->iop->exec = i2o_dev; | ||
341 | |||
342 | return 0; | ||
343 | |||
344 | err_vid: | ||
345 | device_remove_file(dev, &dev_attr_vendor_id); | ||
346 | err_evtreg: | ||
347 | i2o_event_register(to_i2o_device(dev), &i2o_exec_driver, 0, 0); | ||
348 | err_out: | ||
349 | return rc; | ||
350 | }; | ||
351 | |||
352 | /** | ||
353 | * i2o_exec_remove - Called on I2O device removal | ||
354 | * @dev: I2O device which was removed | ||
355 | * | ||
356 | * Unregisters event notification from Executive I2O device. | ||
357 | * | ||
358 | * Returns 0 on success. | ||
359 | */ | ||
360 | static int i2o_exec_remove(struct device *dev) | ||
361 | { | ||
362 | device_remove_file(dev, &dev_attr_product_id); | ||
363 | device_remove_file(dev, &dev_attr_vendor_id); | ||
364 | |||
365 | i2o_event_register(to_i2o_device(dev), &i2o_exec_driver, 0, 0); | ||
366 | |||
367 | return 0; | ||
368 | }; | ||
369 | |||
370 | #ifdef CONFIG_I2O_LCT_NOTIFY_ON_CHANGES | ||
371 | /** | ||
372 | * i2o_exec_lct_notify - Send a asynchronus LCT NOTIFY request | ||
373 | * @c: I2O controller to which the request should be send | ||
374 | * @change_ind: change indicator | ||
375 | * | ||
376 | * This function sends a LCT NOTIFY request to the I2O controller with | ||
377 | * the change indicator change_ind. If the change_ind == 0 the controller | ||
378 | * replies immediately after the request. If change_ind > 0 the reply is | ||
379 | * send after change indicator of the LCT is > change_ind. | ||
380 | */ | ||
381 | static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind) | ||
382 | { | ||
383 | i2o_status_block *sb = c->status_block.virt; | ||
384 | struct device *dev; | ||
385 | struct i2o_message *msg; | ||
386 | |||
387 | mutex_lock(&c->lct_lock); | ||
388 | |||
389 | dev = &c->pdev->dev; | ||
390 | |||
391 | if (i2o_dma_realloc(dev, &c->dlct, | ||
392 | le32_to_cpu(sb->expected_lct_size))) { | ||
393 | mutex_unlock(&c->lct_lock); | ||
394 | return -ENOMEM; | ||
395 | } | ||
396 | |||
397 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); | ||
398 | if (IS_ERR(msg)) { | ||
399 | mutex_unlock(&c->lct_lock); | ||
400 | return PTR_ERR(msg); | ||
401 | } | ||
402 | |||
403 | msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6); | ||
404 | msg->u.head[1] = cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | | ||
405 | ADAPTER_TID); | ||
406 | msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context); | ||
407 | msg->u.s.tcntxt = cpu_to_le32(0x00000000); | ||
408 | msg->body[0] = cpu_to_le32(0xffffffff); | ||
409 | msg->body[1] = cpu_to_le32(change_ind); | ||
410 | msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len); | ||
411 | msg->body[3] = cpu_to_le32(c->dlct.phys); | ||
412 | |||
413 | i2o_msg_post(c, msg); | ||
414 | |||
415 | mutex_unlock(&c->lct_lock); | ||
416 | |||
417 | return 0; | ||
418 | } | ||
419 | #endif | ||
420 | |||
421 | /** | ||
422 | * i2o_exec_lct_modified - Called on LCT NOTIFY reply | ||
423 | * @_work: work struct for a specific controller | ||
424 | * | ||
425 | * This function handles asynchronus LCT NOTIFY replies. It parses the | ||
426 | * new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY | ||
427 | * again, otherwise send LCT NOTIFY to get informed on next LCT change. | ||
428 | */ | ||
429 | static void i2o_exec_lct_modified(struct work_struct *_work) | ||
430 | { | ||
431 | struct i2o_exec_lct_notify_work *work = | ||
432 | container_of(_work, struct i2o_exec_lct_notify_work, work); | ||
433 | u32 change_ind = 0; | ||
434 | struct i2o_controller *c = work->c; | ||
435 | |||
436 | kfree(work); | ||
437 | |||
438 | if (i2o_device_parse_lct(c) != -EAGAIN) | ||
439 | change_ind = c->lct->change_ind + 1; | ||
440 | |||
441 | #ifdef CONFIG_I2O_LCT_NOTIFY_ON_CHANGES | ||
442 | i2o_exec_lct_notify(c, change_ind); | ||
443 | #endif | ||
444 | }; | ||
445 | |||
446 | /** | ||
447 | * i2o_exec_reply - I2O Executive reply handler | ||
448 | * @c: I2O controller from which the reply comes | ||
449 | * @m: message id | ||
450 | * @msg: pointer to the I2O reply message | ||
451 | * | ||
452 | * This function is always called from interrupt context. If a POST WAIT | ||
453 | * reply was received, pass it to the complete function. If a LCT NOTIFY | ||
454 | * reply was received, a new event is created to handle the update. | ||
455 | * | ||
456 | * Returns 0 on success and if the reply should not be flushed or > 0 | ||
457 | * on success and if the reply should be flushed. Returns negative error | ||
458 | * code on failure and if the reply should be flushed. | ||
459 | */ | ||
460 | static int i2o_exec_reply(struct i2o_controller *c, u32 m, | ||
461 | struct i2o_message *msg) | ||
462 | { | ||
463 | u32 context; | ||
464 | |||
465 | if (le32_to_cpu(msg->u.head[0]) & MSG_FAIL) { | ||
466 | struct i2o_message __iomem *pmsg; | ||
467 | u32 pm; | ||
468 | |||
469 | /* | ||
470 | * If Fail bit is set we must take the transaction context of | ||
471 | * the preserved message to find the right request again. | ||
472 | */ | ||
473 | |||
474 | pm = le32_to_cpu(msg->body[3]); | ||
475 | pmsg = i2o_msg_in_to_virt(c, pm); | ||
476 | context = readl(&pmsg->u.s.tcntxt); | ||
477 | |||
478 | i2o_report_status(KERN_INFO, "i2o_core", msg); | ||
479 | |||
480 | /* Release the preserved msg */ | ||
481 | i2o_msg_nop_mfa(c, pm); | ||
482 | } else | ||
483 | context = le32_to_cpu(msg->u.s.tcntxt); | ||
484 | |||
485 | if (context & 0x80000000) | ||
486 | return i2o_msg_post_wait_complete(c, m, msg, context); | ||
487 | |||
488 | if ((le32_to_cpu(msg->u.head[1]) >> 24) == I2O_CMD_LCT_NOTIFY) { | ||
489 | struct i2o_exec_lct_notify_work *work; | ||
490 | |||
491 | pr_debug("%s: LCT notify received\n", c->name); | ||
492 | |||
493 | work = kmalloc(sizeof(*work), GFP_ATOMIC); | ||
494 | if (!work) | ||
495 | return -ENOMEM; | ||
496 | |||
497 | work->c = c; | ||
498 | |||
499 | INIT_WORK(&work->work, i2o_exec_lct_modified); | ||
500 | queue_work(i2o_exec_driver.event_queue, &work->work); | ||
501 | return 1; | ||
502 | } | ||
503 | |||
504 | /* | ||
505 | * If this happens, we want to dump the message to the syslog so | ||
506 | * it can be sent back to the card manufacturer by the end user | ||
507 | * to aid in debugging. | ||
508 | * | ||
509 | */ | ||
510 | printk(KERN_WARNING "%s: Unsolicited message reply sent to core!" | ||
511 | "Message dumped to syslog\n", c->name); | ||
512 | i2o_dump_message(msg); | ||
513 | |||
514 | return -EFAULT; | ||
515 | } | ||
516 | |||
517 | /** | ||
518 | * i2o_exec_event - Event handling function | ||
519 | * @work: Work item in occurring event | ||
520 | * | ||
521 | * Handles events send by the Executive device. At the moment does not do | ||
522 | * anything useful. | ||
523 | */ | ||
524 | static void i2o_exec_event(struct work_struct *work) | ||
525 | { | ||
526 | struct i2o_event *evt = container_of(work, struct i2o_event, work); | ||
527 | |||
528 | if (likely(evt->i2o_dev)) | ||
529 | osm_debug("Event received from device: %d\n", | ||
530 | evt->i2o_dev->lct_data.tid); | ||
531 | kfree(evt); | ||
532 | }; | ||
533 | |||
534 | /** | ||
535 | * i2o_exec_lct_get - Get the IOP's Logical Configuration Table | ||
536 | * @c: I2O controller from which the LCT should be fetched | ||
537 | * | ||
538 | * Send a LCT NOTIFY request to the controller, and wait | ||
539 | * I2O_TIMEOUT_LCT_GET seconds until arrival of response. If the LCT is | ||
540 | * to large, retry it. | ||
541 | * | ||
542 | * Returns 0 on success or negative error code on failure. | ||
543 | */ | ||
544 | int i2o_exec_lct_get(struct i2o_controller *c) | ||
545 | { | ||
546 | struct i2o_message *msg; | ||
547 | int i = 0; | ||
548 | int rc = -EAGAIN; | ||
549 | |||
550 | for (i = 1; i <= I2O_LCT_GET_TRIES; i++) { | ||
551 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); | ||
552 | if (IS_ERR(msg)) | ||
553 | return PTR_ERR(msg); | ||
554 | |||
555 | msg->u.head[0] = | ||
556 | cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6); | ||
557 | msg->u.head[1] = | ||
558 | cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | | ||
559 | ADAPTER_TID); | ||
560 | msg->body[0] = cpu_to_le32(0xffffffff); | ||
561 | msg->body[1] = cpu_to_le32(0x00000000); | ||
562 | msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len); | ||
563 | msg->body[3] = cpu_to_le32(c->dlct.phys); | ||
564 | |||
565 | rc = i2o_msg_post_wait(c, msg, I2O_TIMEOUT_LCT_GET); | ||
566 | if (rc < 0) | ||
567 | break; | ||
568 | |||
569 | rc = i2o_device_parse_lct(c); | ||
570 | if (rc != -EAGAIN) | ||
571 | break; | ||
572 | } | ||
573 | |||
574 | return rc; | ||
575 | } | ||
576 | |||
577 | /* Exec OSM driver struct */ | ||
578 | struct i2o_driver i2o_exec_driver = { | ||
579 | .name = OSM_NAME, | ||
580 | .reply = i2o_exec_reply, | ||
581 | .event = i2o_exec_event, | ||
582 | .classes = i2o_exec_class_id, | ||
583 | .driver = { | ||
584 | .probe = i2o_exec_probe, | ||
585 | .remove = i2o_exec_remove, | ||
586 | }, | ||
587 | }; | ||
588 | |||
589 | /** | ||
590 | * i2o_exec_init - Registers the Exec OSM | ||
591 | * | ||
592 | * Registers the Exec OSM in the I2O core. | ||
593 | * | ||
594 | * Returns 0 on success or negative error code on failure. | ||
595 | */ | ||
596 | int __init i2o_exec_init(void) | ||
597 | { | ||
598 | return i2o_driver_register(&i2o_exec_driver); | ||
599 | }; | ||
600 | |||
601 | /** | ||
602 | * i2o_exec_exit - Removes the Exec OSM | ||
603 | * | ||
604 | * Unregisters the Exec OSM from the I2O core. | ||
605 | */ | ||
606 | void i2o_exec_exit(void) | ||
607 | { | ||
608 | i2o_driver_unregister(&i2o_exec_driver); | ||
609 | }; | ||
610 | |||
611 | EXPORT_SYMBOL(i2o_msg_post_wait_mem); | ||
612 | EXPORT_SYMBOL(i2o_exec_lct_get); | ||
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c deleted file mode 100644 index 6fc3866965df..000000000000 --- a/drivers/message/i2o/i2o_block.c +++ /dev/null | |||
@@ -1,1228 +0,0 @@ | |||
1 | /* | ||
2 | * Block OSM | ||
3 | * | ||
4 | * Copyright (C) 1999-2002 Red Hat Software | ||
5 | * | ||
6 | * Written by Alan Cox, Building Number Three Ltd | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | * | ||
18 | * For the purpose of avoiding doubt the preferred form of the work | ||
19 | * for making modifications shall be a standards compliant form such | ||
20 | * gzipped tar and not one requiring a proprietary or patent encumbered | ||
21 | * tool to unpack. | ||
22 | * | ||
23 | * Fixes/additions: | ||
24 | * Steve Ralston: | ||
25 | * Multiple device handling error fixes, | ||
26 | * Added a queue depth. | ||
27 | * Alan Cox: | ||
28 | * FC920 has an rmw bug. Dont or in the end marker. | ||
29 | * Removed queue walk, fixed for 64bitness. | ||
30 | * Rewrote much of the code over time | ||
31 | * Added indirect block lists | ||
32 | * Handle 64K limits on many controllers | ||
33 | * Don't use indirects on the Promise (breaks) | ||
34 | * Heavily chop down the queue depths | ||
35 | * Deepak Saxena: | ||
36 | * Independent queues per IOP | ||
37 | * Support for dynamic device creation/deletion | ||
38 | * Code cleanup | ||
39 | * Support for larger I/Os through merge* functions | ||
40 | * (taken from DAC960 driver) | ||
41 | * Boji T Kannanthanam: | ||
42 | * Set the I2O Block devices to be detected in increasing | ||
43 | * order of TIDs during boot. | ||
44 | * Search and set the I2O block device that we boot off | ||
45 | * from as the first device to be claimed (as /dev/i2o/hda) | ||
46 | * Properly attach/detach I2O gendisk structure from the | ||
47 | * system gendisk list. The I2O block devices now appear in | ||
48 | * /proc/partitions. | ||
49 | * Markus Lidel <Markus.Lidel@shadowconnect.com>: | ||
50 | * Minor bugfixes for 2.6. | ||
51 | */ | ||
52 | |||
53 | #include <linux/module.h> | ||
54 | #include <linux/slab.h> | ||
55 | #include <linux/i2o.h> | ||
56 | #include <linux/mutex.h> | ||
57 | |||
58 | #include <linux/mempool.h> | ||
59 | |||
60 | #include <linux/genhd.h> | ||
61 | #include <linux/blkdev.h> | ||
62 | #include <linux/hdreg.h> | ||
63 | |||
64 | #include <scsi/scsi.h> | ||
65 | |||
66 | #include "i2o_block.h" | ||
67 | |||
68 | #define OSM_NAME "block-osm" | ||
69 | #define OSM_VERSION "1.325" | ||
70 | #define OSM_DESCRIPTION "I2O Block Device OSM" | ||
71 | |||
72 | static DEFINE_MUTEX(i2o_block_mutex); | ||
73 | static struct i2o_driver i2o_block_driver; | ||
74 | |||
75 | /* global Block OSM request mempool */ | ||
76 | static struct i2o_block_mempool i2o_blk_req_pool; | ||
77 | |||
78 | /* Block OSM class handling definition */ | ||
79 | static struct i2o_class_id i2o_block_class_id[] = { | ||
80 | {I2O_CLASS_RANDOM_BLOCK_STORAGE}, | ||
81 | {I2O_CLASS_END} | ||
82 | }; | ||
83 | |||
84 | /** | ||
85 | * i2o_block_device_free - free the memory of the I2O Block device | ||
86 | * @dev: I2O Block device, which should be cleaned up | ||
87 | * | ||
88 | * Frees the request queue, gendisk and the i2o_block_device structure. | ||
89 | */ | ||
90 | static void i2o_block_device_free(struct i2o_block_device *dev) | ||
91 | { | ||
92 | blk_cleanup_queue(dev->gd->queue); | ||
93 | |||
94 | put_disk(dev->gd); | ||
95 | |||
96 | kfree(dev); | ||
97 | }; | ||
98 | |||
99 | /** | ||
100 | * i2o_block_remove - remove the I2O Block device from the system again | ||
101 | * @dev: I2O Block device which should be removed | ||
102 | * | ||
103 | * Remove gendisk from system and free all allocated memory. | ||
104 | * | ||
105 | * Always returns 0. | ||
106 | */ | ||
107 | static int i2o_block_remove(struct device *dev) | ||
108 | { | ||
109 | struct i2o_device *i2o_dev = to_i2o_device(dev); | ||
110 | struct i2o_block_device *i2o_blk_dev = dev_get_drvdata(dev); | ||
111 | |||
112 | osm_info("device removed (TID: %03x): %s\n", i2o_dev->lct_data.tid, | ||
113 | i2o_blk_dev->gd->disk_name); | ||
114 | |||
115 | i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0); | ||
116 | |||
117 | del_gendisk(i2o_blk_dev->gd); | ||
118 | |||
119 | dev_set_drvdata(dev, NULL); | ||
120 | |||
121 | i2o_device_claim_release(i2o_dev); | ||
122 | |||
123 | i2o_block_device_free(i2o_blk_dev); | ||
124 | |||
125 | return 0; | ||
126 | }; | ||
127 | |||
128 | /** | ||
129 | * i2o_block_device flush - Flush all dirty data of I2O device dev | ||
130 | * @dev: I2O device which should be flushed | ||
131 | * | ||
132 | * Flushes all dirty data on device dev. | ||
133 | * | ||
134 | * Returns 0 on success or negative error code on failure. | ||
135 | */ | ||
136 | static int i2o_block_device_flush(struct i2o_device *dev) | ||
137 | { | ||
138 | struct i2o_message *msg; | ||
139 | |||
140 | msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); | ||
141 | if (IS_ERR(msg)) | ||
142 | return PTR_ERR(msg); | ||
143 | |||
144 | msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); | ||
145 | msg->u.head[1] = | ||
146 | cpu_to_le32(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev-> | ||
147 | lct_data.tid); | ||
148 | msg->body[0] = cpu_to_le32(60 << 16); | ||
149 | osm_debug("Flushing...\n"); | ||
150 | |||
151 | return i2o_msg_post_wait(dev->iop, msg, 60); | ||
152 | }; | ||
153 | |||
154 | /** | ||
155 | * i2o_block_device_mount - Mount (load) the media of device dev | ||
156 | * @dev: I2O device which should receive the mount request | ||
157 | * @media_id: Media Identifier | ||
158 | * | ||
159 | * Load a media into drive. Identifier should be set to -1, because the | ||
160 | * spec does not support any other value. | ||
161 | * | ||
162 | * Returns 0 on success or negative error code on failure. | ||
163 | */ | ||
164 | static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id) | ||
165 | { | ||
166 | struct i2o_message *msg; | ||
167 | |||
168 | msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); | ||
169 | if (IS_ERR(msg)) | ||
170 | return PTR_ERR(msg); | ||
171 | |||
172 | msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); | ||
173 | msg->u.head[1] = | ||
174 | cpu_to_le32(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev-> | ||
175 | lct_data.tid); | ||
176 | msg->body[0] = cpu_to_le32(-1); | ||
177 | msg->body[1] = cpu_to_le32(0x00000000); | ||
178 | osm_debug("Mounting...\n"); | ||
179 | |||
180 | return i2o_msg_post_wait(dev->iop, msg, 2); | ||
181 | }; | ||
182 | |||
183 | /** | ||
184 | * i2o_block_device_lock - Locks the media of device dev | ||
185 | * @dev: I2O device which should receive the lock request | ||
186 | * @media_id: Media Identifier | ||
187 | * | ||
188 | * Lock media of device dev to prevent removal. The media identifier | ||
189 | * should be set to -1, because the spec does not support any other value. | ||
190 | * | ||
191 | * Returns 0 on success or negative error code on failure. | ||
192 | */ | ||
193 | static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id) | ||
194 | { | ||
195 | struct i2o_message *msg; | ||
196 | |||
197 | msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); | ||
198 | if (IS_ERR(msg)) | ||
199 | return PTR_ERR(msg); | ||
200 | |||
201 | msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); | ||
202 | msg->u.head[1] = | ||
203 | cpu_to_le32(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev-> | ||
204 | lct_data.tid); | ||
205 | msg->body[0] = cpu_to_le32(-1); | ||
206 | osm_debug("Locking...\n"); | ||
207 | |||
208 | return i2o_msg_post_wait(dev->iop, msg, 2); | ||
209 | }; | ||
210 | |||
211 | /** | ||
212 | * i2o_block_device_unlock - Unlocks the media of device dev | ||
213 | * @dev: I2O device which should receive the unlocked request | ||
214 | * @media_id: Media Identifier | ||
215 | * | ||
216 | * Unlocks the media in device dev. The media identifier should be set to | ||
217 | * -1, because the spec does not support any other value. | ||
218 | * | ||
219 | * Returns 0 on success or negative error code on failure. | ||
220 | */ | ||
221 | static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id) | ||
222 | { | ||
223 | struct i2o_message *msg; | ||
224 | |||
225 | msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); | ||
226 | if (IS_ERR(msg)) | ||
227 | return PTR_ERR(msg); | ||
228 | |||
229 | msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); | ||
230 | msg->u.head[1] = | ||
231 | cpu_to_le32(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev-> | ||
232 | lct_data.tid); | ||
233 | msg->body[0] = cpu_to_le32(media_id); | ||
234 | osm_debug("Unlocking...\n"); | ||
235 | |||
236 | return i2o_msg_post_wait(dev->iop, msg, 2); | ||
237 | }; | ||
238 | |||
239 | /** | ||
240 | * i2o_block_device_power - Power management for device dev | ||
241 | * @dev: I2O device which should receive the power management request | ||
242 | * @op: Operation to send | ||
243 | * | ||
244 | * Send a power management request to the device dev. | ||
245 | * | ||
246 | * Returns 0 on success or negative error code on failure. | ||
247 | */ | ||
248 | static int i2o_block_device_power(struct i2o_block_device *dev, u8 op) | ||
249 | { | ||
250 | struct i2o_device *i2o_dev = dev->i2o_dev; | ||
251 | struct i2o_controller *c = i2o_dev->iop; | ||
252 | struct i2o_message *msg; | ||
253 | int rc; | ||
254 | |||
255 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); | ||
256 | if (IS_ERR(msg)) | ||
257 | return PTR_ERR(msg); | ||
258 | |||
259 | msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); | ||
260 | msg->u.head[1] = | ||
261 | cpu_to_le32(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev-> | ||
262 | lct_data.tid); | ||
263 | msg->body[0] = cpu_to_le32(op << 24); | ||
264 | osm_debug("Power...\n"); | ||
265 | |||
266 | rc = i2o_msg_post_wait(c, msg, 60); | ||
267 | if (!rc) | ||
268 | dev->power = op; | ||
269 | |||
270 | return rc; | ||
271 | }; | ||
272 | |||
273 | /** | ||
274 | * i2o_block_request_alloc - Allocate an I2O block request struct | ||
275 | * | ||
276 | * Allocates an I2O block request struct and initialize the list. | ||
277 | * | ||
278 | * Returns a i2o_block_request pointer on success or negative error code | ||
279 | * on failure. | ||
280 | */ | ||
281 | static inline struct i2o_block_request *i2o_block_request_alloc(void) | ||
282 | { | ||
283 | struct i2o_block_request *ireq; | ||
284 | |||
285 | ireq = mempool_alloc(i2o_blk_req_pool.pool, GFP_ATOMIC); | ||
286 | if (!ireq) | ||
287 | return ERR_PTR(-ENOMEM); | ||
288 | |||
289 | INIT_LIST_HEAD(&ireq->queue); | ||
290 | sg_init_table(ireq->sg_table, I2O_MAX_PHYS_SEGMENTS); | ||
291 | |||
292 | return ireq; | ||
293 | }; | ||
294 | |||
295 | /** | ||
296 | * i2o_block_request_free - Frees a I2O block request | ||
297 | * @ireq: I2O block request which should be freed | ||
298 | * | ||
299 | * Frees the allocated memory (give it back to the request mempool). | ||
300 | */ | ||
301 | static inline void i2o_block_request_free(struct i2o_block_request *ireq) | ||
302 | { | ||
303 | mempool_free(ireq, i2o_blk_req_pool.pool); | ||
304 | }; | ||
305 | |||
306 | /** | ||
307 | * i2o_block_sglist_alloc - Allocate the SG list and map it | ||
308 | * @c: I2O controller to which the request belongs | ||
309 | * @ireq: I2O block request | ||
310 | * @mptr: message body pointer | ||
311 | * | ||
312 | * Builds the SG list and map it to be accessible by the controller. | ||
313 | * | ||
314 | * Returns 0 on failure or 1 on success. | ||
315 | */ | ||
316 | static inline int i2o_block_sglist_alloc(struct i2o_controller *c, | ||
317 | struct i2o_block_request *ireq, | ||
318 | u32 ** mptr) | ||
319 | { | ||
320 | int nents; | ||
321 | enum dma_data_direction direction; | ||
322 | |||
323 | ireq->dev = &c->pdev->dev; | ||
324 | nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table); | ||
325 | |||
326 | if (rq_data_dir(ireq->req) == READ) | ||
327 | direction = PCI_DMA_FROMDEVICE; | ||
328 | else | ||
329 | direction = PCI_DMA_TODEVICE; | ||
330 | |||
331 | ireq->sg_nents = nents; | ||
332 | |||
333 | return i2o_dma_map_sg(c, ireq->sg_table, nents, direction, mptr); | ||
334 | }; | ||
335 | |||
336 | /** | ||
337 | * i2o_block_sglist_free - Frees the SG list | ||
338 | * @ireq: I2O block request from which the SG should be freed | ||
339 | * | ||
340 | * Frees the SG list from the I2O block request. | ||
341 | */ | ||
342 | static inline void i2o_block_sglist_free(struct i2o_block_request *ireq) | ||
343 | { | ||
344 | enum dma_data_direction direction; | ||
345 | |||
346 | if (rq_data_dir(ireq->req) == READ) | ||
347 | direction = PCI_DMA_FROMDEVICE; | ||
348 | else | ||
349 | direction = PCI_DMA_TODEVICE; | ||
350 | |||
351 | dma_unmap_sg(ireq->dev, ireq->sg_table, ireq->sg_nents, direction); | ||
352 | }; | ||
353 | |||
354 | /** | ||
355 | * i2o_block_prep_req_fn - Allocates I2O block device specific struct | ||
356 | * @q: request queue for the request | ||
357 | * @req: the request to prepare | ||
358 | * | ||
359 | * Allocate the necessary i2o_block_request struct and connect it to | ||
360 | * the request. This is needed that we not lose the SG list later on. | ||
361 | * | ||
362 | * Returns BLKPREP_OK on success or BLKPREP_DEFER on failure. | ||
363 | */ | ||
364 | static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req) | ||
365 | { | ||
366 | struct i2o_block_device *i2o_blk_dev = q->queuedata; | ||
367 | struct i2o_block_request *ireq; | ||
368 | |||
369 | if (unlikely(!i2o_blk_dev)) { | ||
370 | osm_err("block device already removed\n"); | ||
371 | return BLKPREP_KILL; | ||
372 | } | ||
373 | |||
374 | /* connect the i2o_block_request to the request */ | ||
375 | if (!req->special) { | ||
376 | ireq = i2o_block_request_alloc(); | ||
377 | if (IS_ERR(ireq)) { | ||
378 | osm_debug("unable to allocate i2o_block_request!\n"); | ||
379 | return BLKPREP_DEFER; | ||
380 | } | ||
381 | |||
382 | ireq->i2o_blk_dev = i2o_blk_dev; | ||
383 | req->special = ireq; | ||
384 | ireq->req = req; | ||
385 | } | ||
386 | /* do not come back here */ | ||
387 | req->cmd_flags |= REQ_DONTPREP; | ||
388 | |||
389 | return BLKPREP_OK; | ||
390 | }; | ||
391 | |||
392 | /** | ||
393 | * i2o_block_delayed_request_fn - delayed request queue function | ||
394 | * @work: the delayed request with the queue to start | ||
395 | * | ||
396 | * If the request queue is stopped for a disk, and there is no open | ||
397 | * request, a new event is created, which calls this function to start | ||
398 | * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never | ||
399 | * be started again. | ||
400 | */ | ||
401 | static void i2o_block_delayed_request_fn(struct work_struct *work) | ||
402 | { | ||
403 | struct i2o_block_delayed_request *dreq = | ||
404 | container_of(work, struct i2o_block_delayed_request, | ||
405 | work.work); | ||
406 | struct request_queue *q = dreq->queue; | ||
407 | unsigned long flags; | ||
408 | |||
409 | spin_lock_irqsave(q->queue_lock, flags); | ||
410 | blk_start_queue(q); | ||
411 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
412 | kfree(dreq); | ||
413 | }; | ||
414 | |||
415 | /** | ||
416 | * i2o_block_end_request - Post-processing of completed commands | ||
417 | * @req: request which should be completed | ||
418 | * @error: 0 for success, < 0 for error | ||
419 | * @nr_bytes: number of bytes to complete | ||
420 | * | ||
421 | * Mark the request as complete. The lock must not be held when entering. | ||
422 | * | ||
423 | */ | ||
424 | static void i2o_block_end_request(struct request *req, int error, | ||
425 | int nr_bytes) | ||
426 | { | ||
427 | struct i2o_block_request *ireq = req->special; | ||
428 | struct i2o_block_device *dev = ireq->i2o_blk_dev; | ||
429 | struct request_queue *q = req->q; | ||
430 | unsigned long flags; | ||
431 | |||
432 | if (blk_end_request(req, error, nr_bytes)) | ||
433 | if (error) | ||
434 | blk_end_request_all(req, -EIO); | ||
435 | |||
436 | spin_lock_irqsave(q->queue_lock, flags); | ||
437 | |||
438 | if (likely(dev)) { | ||
439 | dev->open_queue_depth--; | ||
440 | list_del(&ireq->queue); | ||
441 | } | ||
442 | |||
443 | blk_start_queue(q); | ||
444 | |||
445 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
446 | |||
447 | i2o_block_sglist_free(ireq); | ||
448 | i2o_block_request_free(ireq); | ||
449 | }; | ||
450 | |||
451 | /** | ||
452 | * i2o_block_reply - Block OSM reply handler. | ||
453 | * @c: I2O controller from which the message arrives | ||
454 | * @m: message id of reply | ||
455 | * @msg: the actual I2O message reply | ||
456 | * | ||
457 | * This function gets all the message replies. | ||
458 | * | ||
459 | */ | ||
460 | static int i2o_block_reply(struct i2o_controller *c, u32 m, | ||
461 | struct i2o_message *msg) | ||
462 | { | ||
463 | struct request *req; | ||
464 | int error = 0; | ||
465 | |||
466 | req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt)); | ||
467 | if (unlikely(!req)) { | ||
468 | osm_err("NULL reply received!\n"); | ||
469 | return -1; | ||
470 | } | ||
471 | |||
472 | /* | ||
473 | * Lets see what is cooking. We stuffed the | ||
474 | * request in the context. | ||
475 | */ | ||
476 | |||
477 | if ((le32_to_cpu(msg->body[0]) >> 24) != 0) { | ||
478 | u32 status = le32_to_cpu(msg->body[0]); | ||
479 | /* | ||
480 | * Device not ready means two things. One is that the | ||
481 | * the thing went offline (but not a removal media) | ||
482 | * | ||
483 | * The second is that you have a SuperTrak 100 and the | ||
484 | * firmware got constipated. Unlike standard i2o card | ||
485 | * setups the supertrak returns an error rather than | ||
486 | * blocking for the timeout in these cases. | ||
487 | * | ||
488 | * Don't stick a supertrak100 into cache aggressive modes | ||
489 | */ | ||
490 | |||
491 | osm_err("TID %03x error status: 0x%02x, detailed status: " | ||
492 | "0x%04x\n", (le32_to_cpu(msg->u.head[1]) >> 12 & 0xfff), | ||
493 | status >> 24, status & 0xffff); | ||
494 | |||
495 | req->errors++; | ||
496 | |||
497 | error = -EIO; | ||
498 | } | ||
499 | |||
500 | i2o_block_end_request(req, error, le32_to_cpu(msg->body[1])); | ||
501 | |||
502 | return 1; | ||
503 | }; | ||
504 | |||
505 | static void i2o_block_event(struct work_struct *work) | ||
506 | { | ||
507 | struct i2o_event *evt = container_of(work, struct i2o_event, work); | ||
508 | osm_debug("event received\n"); | ||
509 | kfree(evt); | ||
510 | }; | ||
511 | |||
512 | /* | ||
513 | * SCSI-CAM for ioctl geometry mapping | ||
514 | * Duplicated with SCSI - this should be moved into somewhere common | ||
515 | * perhaps genhd ? | ||
516 | * | ||
517 | * LBA -> CHS mapping table taken from: | ||
518 | * | ||
519 | * "Incorporating the I2O Architecture into BIOS for Intel Architecture | ||
520 | * Platforms" | ||
521 | * | ||
522 | * This is an I2O document that is only available to I2O members, | ||
523 | * not developers. | ||
524 | * | ||
525 | * From my understanding, this is how all the I2O cards do this | ||
526 | * | ||
527 | * Disk Size | Sectors | Heads | Cylinders | ||
528 | * ---------------+---------+-------+------------------- | ||
529 | * 1 < X <= 528M | 63 | 16 | X/(63 * 16 * 512) | ||
530 | * 528M < X <= 1G | 63 | 32 | X/(63 * 32 * 512) | ||
531 | * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512) | ||
532 | * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512) | ||
533 | * | ||
534 | */ | ||
535 | #define BLOCK_SIZE_528M 1081344 | ||
536 | #define BLOCK_SIZE_1G 2097152 | ||
537 | #define BLOCK_SIZE_21G 4403200 | ||
538 | #define BLOCK_SIZE_42G 8806400 | ||
539 | #define BLOCK_SIZE_84G 17612800 | ||
540 | |||
541 | static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls, | ||
542 | unsigned char *hds, unsigned char *secs) | ||
543 | { | ||
544 | unsigned long heads, sectors, cylinders; | ||
545 | |||
546 | sectors = 63L; /* Maximize sectors per track */ | ||
547 | if (capacity <= BLOCK_SIZE_528M) | ||
548 | heads = 16; | ||
549 | else if (capacity <= BLOCK_SIZE_1G) | ||
550 | heads = 32; | ||
551 | else if (capacity <= BLOCK_SIZE_21G) | ||
552 | heads = 64; | ||
553 | else if (capacity <= BLOCK_SIZE_42G) | ||
554 | heads = 128; | ||
555 | else | ||
556 | heads = 255; | ||
557 | |||
558 | cylinders = (unsigned long)capacity / (heads * sectors); | ||
559 | |||
560 | *cyls = (unsigned short)cylinders; /* Stuff return values */ | ||
561 | *secs = (unsigned char)sectors; | ||
562 | *hds = (unsigned char)heads; | ||
563 | } | ||
564 | |||
565 | /** | ||
566 | * i2o_block_open - Open the block device | ||
567 | * @bdev: block device being opened | ||
568 | * @mode: file open mode | ||
569 | * | ||
570 | * Power up the device, mount and lock the media. This function is called, | ||
571 | * if the block device is opened for access. | ||
572 | * | ||
573 | * Returns 0 on success or negative error code on failure. | ||
574 | */ | ||
575 | static int i2o_block_open(struct block_device *bdev, fmode_t mode) | ||
576 | { | ||
577 | struct i2o_block_device *dev = bdev->bd_disk->private_data; | ||
578 | |||
579 | if (!dev->i2o_dev) | ||
580 | return -ENODEV; | ||
581 | |||
582 | mutex_lock(&i2o_block_mutex); | ||
583 | if (dev->power > 0x1f) | ||
584 | i2o_block_device_power(dev, 0x02); | ||
585 | |||
586 | i2o_block_device_mount(dev->i2o_dev, -1); | ||
587 | |||
588 | i2o_block_device_lock(dev->i2o_dev, -1); | ||
589 | |||
590 | osm_debug("Ready.\n"); | ||
591 | mutex_unlock(&i2o_block_mutex); | ||
592 | |||
593 | return 0; | ||
594 | }; | ||
595 | |||
596 | /** | ||
597 | * i2o_block_release - Release the I2O block device | ||
598 | * @disk: gendisk device being released | ||
599 | * @mode: file open mode | ||
600 | * | ||
601 | * Unlock and unmount the media, and power down the device. Gets called if | ||
602 | * the block device is closed. | ||
603 | */ | ||
604 | static void i2o_block_release(struct gendisk *disk, fmode_t mode) | ||
605 | { | ||
606 | struct i2o_block_device *dev = disk->private_data; | ||
607 | u8 operation; | ||
608 | |||
609 | /* | ||
610 | * This is to deal with the case of an application | ||
611 | * opening a device and then the device disappears while | ||
612 | * it's in use, and then the application tries to release | ||
613 | * it. ex: Unmounting a deleted RAID volume at reboot. | ||
614 | * If we send messages, it will just cause FAILs since | ||
615 | * the TID no longer exists. | ||
616 | */ | ||
617 | if (!dev->i2o_dev) | ||
618 | return; | ||
619 | |||
620 | mutex_lock(&i2o_block_mutex); | ||
621 | i2o_block_device_flush(dev->i2o_dev); | ||
622 | |||
623 | i2o_block_device_unlock(dev->i2o_dev, -1); | ||
624 | |||
625 | if (dev->flags & (1 << 3 | 1 << 4)) /* Removable */ | ||
626 | operation = 0x21; | ||
627 | else | ||
628 | operation = 0x24; | ||
629 | |||
630 | i2o_block_device_power(dev, operation); | ||
631 | mutex_unlock(&i2o_block_mutex); | ||
632 | } | ||
633 | |||
634 | static int i2o_block_getgeo(struct block_device *bdev, struct hd_geometry *geo) | ||
635 | { | ||
636 | i2o_block_biosparam(get_capacity(bdev->bd_disk), | ||
637 | &geo->cylinders, &geo->heads, &geo->sectors); | ||
638 | return 0; | ||
639 | } | ||
640 | |||
641 | /** | ||
642 | * i2o_block_ioctl - Issue device specific ioctl calls. | ||
643 | * @bdev: block device being opened | ||
644 | * @mode: file open mode | ||
645 | * @cmd: ioctl command | ||
646 | * @arg: arg | ||
647 | * | ||
648 | * Handles ioctl request for the block device. | ||
649 | * | ||
650 | * Return 0 on success or negative error on failure. | ||
651 | */ | ||
652 | static int i2o_block_ioctl(struct block_device *bdev, fmode_t mode, | ||
653 | unsigned int cmd, unsigned long arg) | ||
654 | { | ||
655 | struct gendisk *disk = bdev->bd_disk; | ||
656 | struct i2o_block_device *dev = disk->private_data; | ||
657 | int ret = -ENOTTY; | ||
658 | |||
659 | /* Anyone capable of this syscall can do *real bad* things */ | ||
660 | |||
661 | if (!capable(CAP_SYS_ADMIN)) | ||
662 | return -EPERM; | ||
663 | |||
664 | mutex_lock(&i2o_block_mutex); | ||
665 | switch (cmd) { | ||
666 | case BLKI2OGRSTRAT: | ||
667 | ret = put_user(dev->rcache, (int __user *)arg); | ||
668 | break; | ||
669 | case BLKI2OGWSTRAT: | ||
670 | ret = put_user(dev->wcache, (int __user *)arg); | ||
671 | break; | ||
672 | case BLKI2OSRSTRAT: | ||
673 | ret = -EINVAL; | ||
674 | if (arg < 0 || arg > CACHE_SMARTFETCH) | ||
675 | break; | ||
676 | dev->rcache = arg; | ||
677 | ret = 0; | ||
678 | break; | ||
679 | case BLKI2OSWSTRAT: | ||
680 | ret = -EINVAL; | ||
681 | if (arg != 0 | ||
682 | && (arg < CACHE_WRITETHROUGH || arg > CACHE_SMARTBACK)) | ||
683 | break; | ||
684 | dev->wcache = arg; | ||
685 | ret = 0; | ||
686 | break; | ||
687 | } | ||
688 | mutex_unlock(&i2o_block_mutex); | ||
689 | |||
690 | return ret; | ||
691 | }; | ||
692 | |||
693 | /** | ||
694 | * i2o_block_check_events - Have we seen a media change? | ||
695 | * @disk: gendisk which should be verified | ||
696 | * @clearing: events being cleared | ||
697 | * | ||
698 | * Verifies if the media has changed. | ||
699 | * | ||
700 | * Returns 1 if the media was changed or 0 otherwise. | ||
701 | */ | ||
702 | static unsigned int i2o_block_check_events(struct gendisk *disk, | ||
703 | unsigned int clearing) | ||
704 | { | ||
705 | struct i2o_block_device *p = disk->private_data; | ||
706 | |||
707 | if (p->media_change_flag) { | ||
708 | p->media_change_flag = 0; | ||
709 | return DISK_EVENT_MEDIA_CHANGE; | ||
710 | } | ||
711 | return 0; | ||
712 | } | ||
713 | |||
714 | /** | ||
715 | * i2o_block_transfer - Transfer a request to/from the I2O controller | ||
716 | * @req: the request which should be transferred | ||
717 | * | ||
718 | * This function converts the request into a I2O message. The necessary | ||
719 | * DMA buffers are allocated and after everything is setup post the message | ||
720 | * to the I2O controller. No cleanup is done by this function. It is done | ||
721 | * on the interrupt side when the reply arrives. | ||
722 | * | ||
723 | * Return 0 on success or negative error code on failure. | ||
724 | */ | ||
725 | static int i2o_block_transfer(struct request *req) | ||
726 | { | ||
727 | struct i2o_block_device *dev = req->rq_disk->private_data; | ||
728 | struct i2o_controller *c; | ||
729 | u32 tid; | ||
730 | struct i2o_message *msg; | ||
731 | u32 *mptr; | ||
732 | struct i2o_block_request *ireq = req->special; | ||
733 | u32 tcntxt; | ||
734 | u32 sgl_offset = SGL_OFFSET_8; | ||
735 | u32 ctl_flags = 0x00000000; | ||
736 | int rc; | ||
737 | u32 cmd; | ||
738 | |||
739 | if (unlikely(!dev->i2o_dev)) { | ||
740 | osm_err("transfer to removed drive\n"); | ||
741 | rc = -ENODEV; | ||
742 | goto exit; | ||
743 | } | ||
744 | |||
745 | tid = dev->i2o_dev->lct_data.tid; | ||
746 | c = dev->i2o_dev->iop; | ||
747 | |||
748 | msg = i2o_msg_get(c); | ||
749 | if (IS_ERR(msg)) { | ||
750 | rc = PTR_ERR(msg); | ||
751 | goto exit; | ||
752 | } | ||
753 | |||
754 | tcntxt = i2o_cntxt_list_add(c, req); | ||
755 | if (!tcntxt) { | ||
756 | rc = -ENOMEM; | ||
757 | goto nop_msg; | ||
758 | } | ||
759 | |||
760 | msg->u.s.icntxt = cpu_to_le32(i2o_block_driver.context); | ||
761 | msg->u.s.tcntxt = cpu_to_le32(tcntxt); | ||
762 | |||
763 | mptr = &msg->body[0]; | ||
764 | |||
765 | if (rq_data_dir(req) == READ) { | ||
766 | cmd = I2O_CMD_BLOCK_READ << 24; | ||
767 | |||
768 | switch (dev->rcache) { | ||
769 | case CACHE_PREFETCH: | ||
770 | ctl_flags = 0x201F0008; | ||
771 | break; | ||
772 | |||
773 | case CACHE_SMARTFETCH: | ||
774 | if (blk_rq_sectors(req) > 16) | ||
775 | ctl_flags = 0x201F0008; | ||
776 | else | ||
777 | ctl_flags = 0x001F0000; | ||
778 | break; | ||
779 | |||
780 | default: | ||
781 | break; | ||
782 | } | ||
783 | } else { | ||
784 | cmd = I2O_CMD_BLOCK_WRITE << 24; | ||
785 | |||
786 | switch (dev->wcache) { | ||
787 | case CACHE_WRITETHROUGH: | ||
788 | ctl_flags = 0x001F0008; | ||
789 | break; | ||
790 | case CACHE_WRITEBACK: | ||
791 | ctl_flags = 0x001F0010; | ||
792 | break; | ||
793 | case CACHE_SMARTBACK: | ||
794 | if (blk_rq_sectors(req) > 16) | ||
795 | ctl_flags = 0x001F0004; | ||
796 | else | ||
797 | ctl_flags = 0x001F0010; | ||
798 | break; | ||
799 | case CACHE_SMARTTHROUGH: | ||
800 | if (blk_rq_sectors(req) > 16) | ||
801 | ctl_flags = 0x001F0004; | ||
802 | else | ||
803 | ctl_flags = 0x001F0010; | ||
804 | default: | ||
805 | break; | ||
806 | } | ||
807 | } | ||
808 | |||
809 | #ifdef CONFIG_I2O_EXT_ADAPTEC | ||
810 | if (c->adaptec) { | ||
811 | u8 cmd[10]; | ||
812 | u32 scsi_flags; | ||
813 | u16 hwsec; | ||
814 | |||
815 | hwsec = queue_logical_block_size(req->q) >> KERNEL_SECTOR_SHIFT; | ||
816 | memset(cmd, 0, 10); | ||
817 | |||
818 | sgl_offset = SGL_OFFSET_12; | ||
819 | |||
820 | msg->u.head[1] = | ||
821 | cpu_to_le32(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid); | ||
822 | |||
823 | *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC); | ||
824 | *mptr++ = cpu_to_le32(tid); | ||
825 | |||
826 | /* | ||
827 | * ENABLE_DISCONNECT | ||
828 | * SIMPLE_TAG | ||
829 | * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME | ||
830 | */ | ||
831 | if (rq_data_dir(req) == READ) { | ||
832 | cmd[0] = READ_10; | ||
833 | scsi_flags = 0x60a0000a; | ||
834 | } else { | ||
835 | cmd[0] = WRITE_10; | ||
836 | scsi_flags = 0xa0a0000a; | ||
837 | } | ||
838 | |||
839 | *mptr++ = cpu_to_le32(scsi_flags); | ||
840 | |||
841 | *((u32 *) & cmd[2]) = cpu_to_be32(blk_rq_pos(req) * hwsec); | ||
842 | *((u16 *) & cmd[7]) = cpu_to_be16(blk_rq_sectors(req) * hwsec); | ||
843 | |||
844 | memcpy(mptr, cmd, 10); | ||
845 | mptr += 4; | ||
846 | *mptr++ = cpu_to_le32(blk_rq_bytes(req)); | ||
847 | } else | ||
848 | #endif | ||
849 | { | ||
850 | msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid); | ||
851 | *mptr++ = cpu_to_le32(ctl_flags); | ||
852 | *mptr++ = cpu_to_le32(blk_rq_bytes(req)); | ||
853 | *mptr++ = | ||
854 | cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT)); | ||
855 | *mptr++ = | ||
856 | cpu_to_le32(blk_rq_pos(req) >> (32 - KERNEL_SECTOR_SHIFT)); | ||
857 | } | ||
858 | |||
859 | if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { | ||
860 | rc = -ENOMEM; | ||
861 | goto context_remove; | ||
862 | } | ||
863 | |||
864 | msg->u.head[0] = | ||
865 | cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset); | ||
866 | |||
867 | list_add_tail(&ireq->queue, &dev->open_queue); | ||
868 | dev->open_queue_depth++; | ||
869 | |||
870 | i2o_msg_post(c, msg); | ||
871 | |||
872 | return 0; | ||
873 | |||
874 | context_remove: | ||
875 | i2o_cntxt_list_remove(c, req); | ||
876 | |||
877 | nop_msg: | ||
878 | i2o_msg_nop(c, msg); | ||
879 | |||
880 | exit: | ||
881 | return rc; | ||
882 | }; | ||
883 | |||
884 | /** | ||
885 | * i2o_block_request_fn - request queue handling function | ||
886 | * @q: request queue from which the request could be fetched | ||
887 | * | ||
888 | * Takes the next request from the queue, transfers it and if no error | ||
889 | * occurs dequeue it from the queue. On arrival of the reply the message | ||
890 | * will be processed further. If an error occurs requeue the request. | ||
891 | */ | ||
892 | static void i2o_block_request_fn(struct request_queue *q) | ||
893 | { | ||
894 | struct request *req; | ||
895 | |||
896 | while ((req = blk_peek_request(q)) != NULL) { | ||
897 | if (req->cmd_type == REQ_TYPE_FS) { | ||
898 | struct i2o_block_delayed_request *dreq; | ||
899 | struct i2o_block_request *ireq = req->special; | ||
900 | unsigned int queue_depth; | ||
901 | |||
902 | queue_depth = ireq->i2o_blk_dev->open_queue_depth; | ||
903 | |||
904 | if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) { | ||
905 | if (!i2o_block_transfer(req)) { | ||
906 | blk_start_request(req); | ||
907 | continue; | ||
908 | } else | ||
909 | osm_info("transfer error\n"); | ||
910 | } | ||
911 | |||
912 | if (queue_depth) | ||
913 | break; | ||
914 | |||
915 | /* stop the queue and retry later */ | ||
916 | dreq = kmalloc(sizeof(*dreq), GFP_ATOMIC); | ||
917 | if (!dreq) | ||
918 | continue; | ||
919 | |||
920 | dreq->queue = q; | ||
921 | INIT_DELAYED_WORK(&dreq->work, | ||
922 | i2o_block_delayed_request_fn); | ||
923 | |||
924 | if (!queue_delayed_work(i2o_block_driver.event_queue, | ||
925 | &dreq->work, | ||
926 | I2O_BLOCK_RETRY_TIME)) | ||
927 | kfree(dreq); | ||
928 | else { | ||
929 | blk_stop_queue(q); | ||
930 | break; | ||
931 | } | ||
932 | } else { | ||
933 | blk_start_request(req); | ||
934 | __blk_end_request_all(req, -EIO); | ||
935 | } | ||
936 | } | ||
937 | }; | ||
938 | |||
939 | /* I2O Block device operations definition */ | ||
940 | static const struct block_device_operations i2o_block_fops = { | ||
941 | .owner = THIS_MODULE, | ||
942 | .open = i2o_block_open, | ||
943 | .release = i2o_block_release, | ||
944 | .ioctl = i2o_block_ioctl, | ||
945 | .compat_ioctl = i2o_block_ioctl, | ||
946 | .getgeo = i2o_block_getgeo, | ||
947 | .check_events = i2o_block_check_events, | ||
948 | }; | ||
949 | |||
950 | /** | ||
951 | * i2o_block_device_alloc - Allocate memory for a I2O Block device | ||
952 | * | ||
953 | * Allocate memory for the i2o_block_device struct, gendisk and request | ||
954 | * queue and initialize them as far as no additional information is needed. | ||
955 | * | ||
956 | * Returns a pointer to the allocated I2O Block device on success or a | ||
957 | * negative error code on failure. | ||
958 | */ | ||
959 | static struct i2o_block_device *i2o_block_device_alloc(void) | ||
960 | { | ||
961 | struct i2o_block_device *dev; | ||
962 | struct gendisk *gd; | ||
963 | struct request_queue *queue; | ||
964 | int rc; | ||
965 | |||
966 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | ||
967 | if (!dev) { | ||
968 | osm_err("Insufficient memory to allocate I2O Block disk.\n"); | ||
969 | rc = -ENOMEM; | ||
970 | goto exit; | ||
971 | } | ||
972 | |||
973 | INIT_LIST_HEAD(&dev->open_queue); | ||
974 | spin_lock_init(&dev->lock); | ||
975 | dev->rcache = CACHE_PREFETCH; | ||
976 | dev->wcache = CACHE_WRITEBACK; | ||
977 | |||
978 | /* allocate a gendisk with 16 partitions */ | ||
979 | gd = alloc_disk(16); | ||
980 | if (!gd) { | ||
981 | osm_err("Insufficient memory to allocate gendisk.\n"); | ||
982 | rc = -ENOMEM; | ||
983 | goto cleanup_dev; | ||
984 | } | ||
985 | |||
986 | /* initialize the request queue */ | ||
987 | queue = blk_init_queue(i2o_block_request_fn, &dev->lock); | ||
988 | if (!queue) { | ||
989 | osm_err("Insufficient memory to allocate request queue.\n"); | ||
990 | rc = -ENOMEM; | ||
991 | goto cleanup_queue; | ||
992 | } | ||
993 | |||
994 | blk_queue_prep_rq(queue, i2o_block_prep_req_fn); | ||
995 | |||
996 | gd->major = I2O_MAJOR; | ||
997 | gd->queue = queue; | ||
998 | gd->fops = &i2o_block_fops; | ||
999 | gd->private_data = dev; | ||
1000 | |||
1001 | dev->gd = gd; | ||
1002 | |||
1003 | return dev; | ||
1004 | |||
1005 | cleanup_queue: | ||
1006 | put_disk(gd); | ||
1007 | |||
1008 | cleanup_dev: | ||
1009 | kfree(dev); | ||
1010 | |||
1011 | exit: | ||
1012 | return ERR_PTR(rc); | ||
1013 | }; | ||
1014 | |||
1015 | /** | ||
1016 | * i2o_block_probe - verify if dev is a I2O Block device and install it | ||
1017 | * @dev: device to verify if it is a I2O Block device | ||
1018 | * | ||
1019 | * We only verify if the user_tid of the device is 0xfff and then install | ||
1020 | * the device. Otherwise it is used by some other device (e. g. RAID). | ||
1021 | * | ||
1022 | * Returns 0 on success or negative error code on failure. | ||
1023 | */ | ||
1024 | static int i2o_block_probe(struct device *dev) | ||
1025 | { | ||
1026 | struct i2o_device *i2o_dev = to_i2o_device(dev); | ||
1027 | struct i2o_controller *c = i2o_dev->iop; | ||
1028 | struct i2o_block_device *i2o_blk_dev; | ||
1029 | struct gendisk *gd; | ||
1030 | struct request_queue *queue; | ||
1031 | static int unit = 0; | ||
1032 | int rc; | ||
1033 | u64 size; | ||
1034 | u32 blocksize; | ||
1035 | u16 body_size = 4; | ||
1036 | u16 power; | ||
1037 | unsigned short max_sectors; | ||
1038 | |||
1039 | #ifdef CONFIG_I2O_EXT_ADAPTEC | ||
1040 | if (c->adaptec) | ||
1041 | body_size = 8; | ||
1042 | #endif | ||
1043 | |||
1044 | if (c->limit_sectors) | ||
1045 | max_sectors = I2O_MAX_SECTORS_LIMITED; | ||
1046 | else | ||
1047 | max_sectors = I2O_MAX_SECTORS; | ||
1048 | |||
1049 | /* skip devices which are used by IOP */ | ||
1050 | if (i2o_dev->lct_data.user_tid != 0xfff) { | ||
1051 | osm_debug("skipping used device %03x\n", i2o_dev->lct_data.tid); | ||
1052 | return -ENODEV; | ||
1053 | } | ||
1054 | |||
1055 | if (i2o_device_claim(i2o_dev)) { | ||
1056 | osm_warn("Unable to claim device. Installation aborted\n"); | ||
1057 | rc = -EFAULT; | ||
1058 | goto exit; | ||
1059 | } | ||
1060 | |||
1061 | i2o_blk_dev = i2o_block_device_alloc(); | ||
1062 | if (IS_ERR(i2o_blk_dev)) { | ||
1063 | osm_err("could not alloc a new I2O block device"); | ||
1064 | rc = PTR_ERR(i2o_blk_dev); | ||
1065 | goto claim_release; | ||
1066 | } | ||
1067 | |||
1068 | i2o_blk_dev->i2o_dev = i2o_dev; | ||
1069 | dev_set_drvdata(dev, i2o_blk_dev); | ||
1070 | |||
1071 | /* setup gendisk */ | ||
1072 | gd = i2o_blk_dev->gd; | ||
1073 | gd->first_minor = unit << 4; | ||
1074 | sprintf(gd->disk_name, "i2o/hd%c", 'a' + unit); | ||
1075 | gd->driverfs_dev = &i2o_dev->device; | ||
1076 | |||
1077 | /* setup request queue */ | ||
1078 | queue = gd->queue; | ||
1079 | queue->queuedata = i2o_blk_dev; | ||
1080 | |||
1081 | blk_queue_max_hw_sectors(queue, max_sectors); | ||
1082 | blk_queue_max_segments(queue, i2o_sg_tablesize(c, body_size)); | ||
1083 | |||
1084 | osm_debug("max sectors = %d\n", queue->max_sectors); | ||
1085 | osm_debug("phys segments = %d\n", queue->max_phys_segments); | ||
1086 | osm_debug("max hw segments = %d\n", queue->max_hw_segments); | ||
1087 | |||
1088 | /* | ||
1089 | * Ask for the current media data. If that isn't supported | ||
1090 | * then we ask for the device capacity data | ||
1091 | */ | ||
1092 | if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || | ||
1093 | !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { | ||
1094 | blk_queue_logical_block_size(queue, le32_to_cpu(blocksize)); | ||
1095 | } else | ||
1096 | osm_warn("unable to get blocksize of %s\n", gd->disk_name); | ||
1097 | |||
1098 | if (!i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) || | ||
1099 | !i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) { | ||
1100 | set_capacity(gd, le64_to_cpu(size) >> KERNEL_SECTOR_SHIFT); | ||
1101 | } else | ||
1102 | osm_warn("could not get size of %s\n", gd->disk_name); | ||
1103 | |||
1104 | if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2)) | ||
1105 | i2o_blk_dev->power = power; | ||
1106 | |||
1107 | i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff); | ||
1108 | |||
1109 | add_disk(gd); | ||
1110 | |||
1111 | unit++; | ||
1112 | |||
1113 | osm_info("device added (TID: %03x): %s\n", i2o_dev->lct_data.tid, | ||
1114 | i2o_blk_dev->gd->disk_name); | ||
1115 | |||
1116 | return 0; | ||
1117 | |||
1118 | claim_release: | ||
1119 | i2o_device_claim_release(i2o_dev); | ||
1120 | |||
1121 | exit: | ||
1122 | return rc; | ||
1123 | }; | ||
1124 | |||
1125 | /* Block OSM driver struct */ | ||
1126 | static struct i2o_driver i2o_block_driver = { | ||
1127 | .name = OSM_NAME, | ||
1128 | .event = i2o_block_event, | ||
1129 | .reply = i2o_block_reply, | ||
1130 | .classes = i2o_block_class_id, | ||
1131 | .driver = { | ||
1132 | .probe = i2o_block_probe, | ||
1133 | .remove = i2o_block_remove, | ||
1134 | }, | ||
1135 | }; | ||
1136 | |||
1137 | /** | ||
1138 | * i2o_block_init - Block OSM initialization function | ||
1139 | * | ||
1140 | * Allocate the slab and mempool for request structs, registers i2o_block | ||
1141 | * block device and finally register the Block OSM in the I2O core. | ||
1142 | * | ||
1143 | * Returns 0 on success or negative error code on failure. | ||
1144 | */ | ||
1145 | static int __init i2o_block_init(void) | ||
1146 | { | ||
1147 | int rc; | ||
1148 | int size; | ||
1149 | |||
1150 | printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); | ||
1151 | |||
1152 | /* Allocate request mempool and slab */ | ||
1153 | size = sizeof(struct i2o_block_request); | ||
1154 | i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0, | ||
1155 | SLAB_HWCACHE_ALIGN, NULL); | ||
1156 | if (!i2o_blk_req_pool.slab) { | ||
1157 | osm_err("can't init request slab\n"); | ||
1158 | rc = -ENOMEM; | ||
1159 | goto exit; | ||
1160 | } | ||
1161 | |||
1162 | i2o_blk_req_pool.pool = | ||
1163 | mempool_create_slab_pool(I2O_BLOCK_REQ_MEMPOOL_SIZE, | ||
1164 | i2o_blk_req_pool.slab); | ||
1165 | if (!i2o_blk_req_pool.pool) { | ||
1166 | osm_err("can't init request mempool\n"); | ||
1167 | rc = -ENOMEM; | ||
1168 | goto free_slab; | ||
1169 | } | ||
1170 | |||
1171 | /* Register the block device interfaces */ | ||
1172 | rc = register_blkdev(I2O_MAJOR, "i2o_block"); | ||
1173 | if (rc) { | ||
1174 | osm_err("unable to register block device\n"); | ||
1175 | goto free_mempool; | ||
1176 | } | ||
1177 | #ifdef MODULE | ||
1178 | osm_info("registered device at major %d\n", I2O_MAJOR); | ||
1179 | #endif | ||
1180 | |||
1181 | /* Register Block OSM into I2O core */ | ||
1182 | rc = i2o_driver_register(&i2o_block_driver); | ||
1183 | if (rc) { | ||
1184 | osm_err("Could not register Block driver\n"); | ||
1185 | goto unregister_blkdev; | ||
1186 | } | ||
1187 | |||
1188 | return 0; | ||
1189 | |||
1190 | unregister_blkdev: | ||
1191 | unregister_blkdev(I2O_MAJOR, "i2o_block"); | ||
1192 | |||
1193 | free_mempool: | ||
1194 | mempool_destroy(i2o_blk_req_pool.pool); | ||
1195 | |||
1196 | free_slab: | ||
1197 | kmem_cache_destroy(i2o_blk_req_pool.slab); | ||
1198 | |||
1199 | exit: | ||
1200 | return rc; | ||
1201 | }; | ||
1202 | |||
1203 | /** | ||
1204 | * i2o_block_exit - Block OSM exit function | ||
1205 | * | ||
1206 | * Unregisters Block OSM from I2O core, unregisters i2o_block block device | ||
1207 | * and frees the mempool and slab. | ||
1208 | */ | ||
1209 | static void __exit i2o_block_exit(void) | ||
1210 | { | ||
1211 | /* Unregister I2O Block OSM from I2O core */ | ||
1212 | i2o_driver_unregister(&i2o_block_driver); | ||
1213 | |||
1214 | /* Unregister block device */ | ||
1215 | unregister_blkdev(I2O_MAJOR, "i2o_block"); | ||
1216 | |||
1217 | /* Free request mempool and slab */ | ||
1218 | mempool_destroy(i2o_blk_req_pool.pool); | ||
1219 | kmem_cache_destroy(i2o_blk_req_pool.slab); | ||
1220 | }; | ||
1221 | |||
1222 | MODULE_AUTHOR("Red Hat"); | ||
1223 | MODULE_LICENSE("GPL"); | ||
1224 | MODULE_DESCRIPTION(OSM_DESCRIPTION); | ||
1225 | MODULE_VERSION(OSM_VERSION); | ||
1226 | |||
1227 | module_init(i2o_block_init); | ||
1228 | module_exit(i2o_block_exit); | ||
diff --git a/drivers/message/i2o/i2o_block.h b/drivers/message/i2o/i2o_block.h deleted file mode 100644 index cf8873cbca3f..000000000000 --- a/drivers/message/i2o/i2o_block.h +++ /dev/null | |||
@@ -1,103 +0,0 @@ | |||
1 | /* | ||
2 | * Block OSM structures/API | ||
3 | * | ||
4 | * Copyright (C) 1999-2002 Red Hat Software | ||
5 | * | ||
6 | * Written by Alan Cox, Building Number Three Ltd | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | * | ||
18 | * For the purpose of avoiding doubt the preferred form of the work | ||
19 | * for making modifications shall be a standards compliant form such | ||
20 | * gzipped tar and not one requiring a proprietary or patent encumbered | ||
21 | * tool to unpack. | ||
22 | * | ||
23 | * Fixes/additions: | ||
24 | * Steve Ralston: | ||
25 | * Multiple device handling error fixes, | ||
26 | * Added a queue depth. | ||
27 | * Alan Cox: | ||
28 | * FC920 has an rmw bug. Dont or in the end marker. | ||
29 | * Removed queue walk, fixed for 64bitness. | ||
30 | * Rewrote much of the code over time | ||
31 | * Added indirect block lists | ||
32 | * Handle 64K limits on many controllers | ||
33 | * Don't use indirects on the Promise (breaks) | ||
34 | * Heavily chop down the queue depths | ||
35 | * Deepak Saxena: | ||
36 | * Independent queues per IOP | ||
37 | * Support for dynamic device creation/deletion | ||
38 | * Code cleanup | ||
39 | * Support for larger I/Os through merge* functions | ||
40 | * (taken from DAC960 driver) | ||
41 | * Boji T Kannanthanam: | ||
42 | * Set the I2O Block devices to be detected in increasing | ||
43 | * order of TIDs during boot. | ||
44 | * Search and set the I2O block device that we boot off | ||
45 | * from as the first device to be claimed (as /dev/i2o/hda) | ||
46 | * Properly attach/detach I2O gendisk structure from the | ||
47 | * system gendisk list. The I2O block devices now appear in | ||
48 | * /proc/partitions. | ||
49 | * Markus Lidel <Markus.Lidel@shadowconnect.com>: | ||
50 | * Minor bugfixes for 2.6. | ||
51 | */ | ||
52 | |||
53 | #ifndef I2O_BLOCK_OSM_H | ||
54 | #define I2O_BLOCK_OSM_H | ||
55 | |||
56 | #define I2O_BLOCK_RETRY_TIME HZ/4 | ||
57 | #define I2O_BLOCK_MAX_OPEN_REQUESTS 50 | ||
58 | |||
59 | /* request queue sizes */ | ||
60 | #define I2O_BLOCK_REQ_MEMPOOL_SIZE 32 | ||
61 | |||
62 | #define KERNEL_SECTOR_SHIFT 9 | ||
63 | #define KERNEL_SECTOR_SIZE (1 << KERNEL_SECTOR_SHIFT) | ||
64 | |||
65 | /* I2O Block OSM mempool struct */ | ||
66 | struct i2o_block_mempool { | ||
67 | struct kmem_cache *slab; | ||
68 | mempool_t *pool; | ||
69 | }; | ||
70 | |||
71 | /* I2O Block device descriptor */ | ||
72 | struct i2o_block_device { | ||
73 | struct i2o_device *i2o_dev; /* pointer to I2O device */ | ||
74 | struct gendisk *gd; | ||
75 | spinlock_t lock; /* queue lock */ | ||
76 | struct list_head open_queue; /* list of transferred, but unfinished | ||
77 | requests */ | ||
78 | unsigned int open_queue_depth; /* number of requests in the queue */ | ||
79 | |||
80 | int rcache; /* read cache flags */ | ||
81 | int wcache; /* write cache flags */ | ||
82 | int flags; | ||
83 | u16 power; /* power state */ | ||
84 | int media_change_flag; /* media changed flag */ | ||
85 | }; | ||
86 | |||
87 | /* I2O Block device request */ | ||
88 | struct i2o_block_request { | ||
89 | struct list_head queue; | ||
90 | struct request *req; /* corresponding request */ | ||
91 | struct i2o_block_device *i2o_blk_dev; /* I2O block device */ | ||
92 | struct device *dev; /* device used for DMA */ | ||
93 | int sg_nents; /* number of SG elements */ | ||
94 | struct scatterlist sg_table[I2O_MAX_PHYS_SEGMENTS]; /* SG table */ | ||
95 | }; | ||
96 | |||
97 | /* I2O Block device delayed request */ | ||
98 | struct i2o_block_delayed_request { | ||
99 | struct delayed_work work; | ||
100 | struct request_queue *queue; | ||
101 | }; | ||
102 | |||
103 | #endif | ||
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c deleted file mode 100644 index 04bd3b6de401..000000000000 --- a/drivers/message/i2o/i2o_config.c +++ /dev/null | |||
@@ -1,1163 +0,0 @@ | |||
1 | /* | ||
2 | * I2O Configuration Interface Driver | ||
3 | * | ||
4 | * (C) Copyright 1999-2002 Red Hat | ||
5 | * | ||
6 | * Written by Alan Cox, Building Number Three Ltd | ||
7 | * | ||
8 | * Fixes/additions: | ||
9 | * Deepak Saxena (04/20/1999): | ||
10 | * Added basic ioctl() support | ||
11 | * Deepak Saxena (06/07/1999): | ||
12 | * Added software download ioctl (still testing) | ||
13 | * Auvo Häkkinen (09/10/1999): | ||
14 | * Changes to i2o_cfg_reply(), ioctl_parms() | ||
15 | * Added ioct_validate() | ||
16 | * Taneli Vähäkangas (09/30/1999): | ||
17 | * Fixed ioctl_swdl() | ||
18 | * Taneli Vähäkangas (10/04/1999): | ||
19 | * Changed ioctl_swdl(), implemented ioctl_swul() and ioctl_swdel() | ||
20 | * Deepak Saxena (11/18/1999): | ||
21 | * Added event managmenet support | ||
22 | * Alan Cox <alan@lxorguk.ukuu.org.uk>: | ||
23 | * 2.4 rewrite ported to 2.5 | ||
24 | * Markus Lidel <Markus.Lidel@shadowconnect.com>: | ||
25 | * Added pass-thru support for Adaptec's raidutils | ||
26 | * | ||
27 | * This program is free software; you can redistribute it and/or | ||
28 | * modify it under the terms of the GNU General Public License | ||
29 | * as published by the Free Software Foundation; either version | ||
30 | * 2 of the License, or (at your option) any later version. | ||
31 | */ | ||
32 | |||
33 | #include <linux/miscdevice.h> | ||
34 | #include <linux/mutex.h> | ||
35 | #include <linux/compat.h> | ||
36 | #include <linux/slab.h> | ||
37 | |||
38 | #include <asm/uaccess.h> | ||
39 | |||
40 | #include "core.h" | ||
41 | |||
42 | #define SG_TABLESIZE 30 | ||
43 | |||
44 | static DEFINE_MUTEX(i2o_cfg_mutex); | ||
45 | static long i2o_cfg_ioctl(struct file *, unsigned int, unsigned long); | ||
46 | |||
47 | static spinlock_t i2o_config_lock; | ||
48 | |||
49 | #define MODINC(x,y) ((x) = ((x) + 1) % (y)) | ||
50 | |||
51 | struct sg_simple_element { | ||
52 | u32 flag_count; | ||
53 | u32 addr_bus; | ||
54 | }; | ||
55 | |||
56 | struct i2o_cfg_info { | ||
57 | struct file *fp; | ||
58 | struct fasync_struct *fasync; | ||
59 | struct i2o_evt_info event_q[I2O_EVT_Q_LEN]; | ||
60 | u16 q_in; // Queue head index | ||
61 | u16 q_out; // Queue tail index | ||
62 | u16 q_len; // Queue length | ||
63 | u16 q_lost; // Number of lost events | ||
64 | ulong q_id; // Event queue ID...used as tx_context | ||
65 | struct i2o_cfg_info *next; | ||
66 | }; | ||
67 | static struct i2o_cfg_info *open_files = NULL; | ||
68 | static ulong i2o_cfg_info_id = 0; | ||
69 | |||
70 | static int i2o_cfg_getiops(unsigned long arg) | ||
71 | { | ||
72 | struct i2o_controller *c; | ||
73 | u8 __user *user_iop_table = (void __user *)arg; | ||
74 | u8 tmp[MAX_I2O_CONTROLLERS]; | ||
75 | int ret = 0; | ||
76 | |||
77 | memset(tmp, 0, MAX_I2O_CONTROLLERS); | ||
78 | |||
79 | list_for_each_entry(c, &i2o_controllers, list) | ||
80 | tmp[c->unit] = 1; | ||
81 | |||
82 | if (copy_to_user(user_iop_table, tmp, MAX_I2O_CONTROLLERS)) | ||
83 | ret = -EFAULT; | ||
84 | |||
85 | return ret; | ||
86 | }; | ||
87 | |||
88 | static int i2o_cfg_gethrt(unsigned long arg) | ||
89 | { | ||
90 | struct i2o_controller *c; | ||
91 | struct i2o_cmd_hrtlct __user *cmd = (struct i2o_cmd_hrtlct __user *)arg; | ||
92 | struct i2o_cmd_hrtlct kcmd; | ||
93 | i2o_hrt *hrt; | ||
94 | int len; | ||
95 | u32 reslen; | ||
96 | int ret = 0; | ||
97 | |||
98 | if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct))) | ||
99 | return -EFAULT; | ||
100 | |||
101 | if (get_user(reslen, kcmd.reslen) < 0) | ||
102 | return -EFAULT; | ||
103 | |||
104 | if (kcmd.resbuf == NULL) | ||
105 | return -EFAULT; | ||
106 | |||
107 | c = i2o_find_iop(kcmd.iop); | ||
108 | if (!c) | ||
109 | return -ENXIO; | ||
110 | |||
111 | hrt = (i2o_hrt *) c->hrt.virt; | ||
112 | |||
113 | len = 8 + ((hrt->entry_len * hrt->num_entries) << 2); | ||
114 | |||
115 | if (put_user(len, kcmd.reslen)) | ||
116 | ret = -EFAULT; | ||
117 | else if (len > reslen) | ||
118 | ret = -ENOBUFS; | ||
119 | else if (copy_to_user(kcmd.resbuf, (void *)hrt, len)) | ||
120 | ret = -EFAULT; | ||
121 | |||
122 | return ret; | ||
123 | }; | ||
124 | |||
125 | static int i2o_cfg_getlct(unsigned long arg) | ||
126 | { | ||
127 | struct i2o_controller *c; | ||
128 | struct i2o_cmd_hrtlct __user *cmd = (struct i2o_cmd_hrtlct __user *)arg; | ||
129 | struct i2o_cmd_hrtlct kcmd; | ||
130 | i2o_lct *lct; | ||
131 | int len; | ||
132 | int ret = 0; | ||
133 | u32 reslen; | ||
134 | |||
135 | if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct))) | ||
136 | return -EFAULT; | ||
137 | |||
138 | if (get_user(reslen, kcmd.reslen) < 0) | ||
139 | return -EFAULT; | ||
140 | |||
141 | if (kcmd.resbuf == NULL) | ||
142 | return -EFAULT; | ||
143 | |||
144 | c = i2o_find_iop(kcmd.iop); | ||
145 | if (!c) | ||
146 | return -ENXIO; | ||
147 | |||
148 | lct = (i2o_lct *) c->lct; | ||
149 | |||
150 | len = (unsigned int)lct->table_size << 2; | ||
151 | if (put_user(len, kcmd.reslen)) | ||
152 | ret = -EFAULT; | ||
153 | else if (len > reslen) | ||
154 | ret = -ENOBUFS; | ||
155 | else if (copy_to_user(kcmd.resbuf, lct, len)) | ||
156 | ret = -EFAULT; | ||
157 | |||
158 | return ret; | ||
159 | }; | ||
160 | |||
161 | static int i2o_cfg_parms(unsigned long arg, unsigned int type) | ||
162 | { | ||
163 | int ret = 0; | ||
164 | struct i2o_controller *c; | ||
165 | struct i2o_device *dev; | ||
166 | struct i2o_cmd_psetget __user *cmd = | ||
167 | (struct i2o_cmd_psetget __user *)arg; | ||
168 | struct i2o_cmd_psetget kcmd; | ||
169 | u32 reslen; | ||
170 | u8 *ops; | ||
171 | u8 *res; | ||
172 | int len = 0; | ||
173 | |||
174 | u32 i2o_cmd = (type == I2OPARMGET ? | ||
175 | I2O_CMD_UTIL_PARAMS_GET : I2O_CMD_UTIL_PARAMS_SET); | ||
176 | |||
177 | if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_psetget))) | ||
178 | return -EFAULT; | ||
179 | |||
180 | if (get_user(reslen, kcmd.reslen)) | ||
181 | return -EFAULT; | ||
182 | |||
183 | c = i2o_find_iop(kcmd.iop); | ||
184 | if (!c) | ||
185 | return -ENXIO; | ||
186 | |||
187 | dev = i2o_iop_find_device(c, kcmd.tid); | ||
188 | if (!dev) | ||
189 | return -ENXIO; | ||
190 | |||
191 | /* | ||
192 | * Stop users being able to try and allocate arbitrary amounts | ||
193 | * of DMA space. 64K is way more than sufficient for this. | ||
194 | */ | ||
195 | if (kcmd.oplen > 65536) | ||
196 | return -EMSGSIZE; | ||
197 | |||
198 | ops = memdup_user(kcmd.opbuf, kcmd.oplen); | ||
199 | if (IS_ERR(ops)) | ||
200 | return PTR_ERR(ops); | ||
201 | |||
202 | /* | ||
203 | * It's possible to have a _very_ large table | ||
204 | * and that the user asks for all of it at once... | ||
205 | */ | ||
206 | res = kmalloc(65536, GFP_KERNEL); | ||
207 | if (!res) { | ||
208 | kfree(ops); | ||
209 | return -ENOMEM; | ||
210 | } | ||
211 | |||
212 | len = i2o_parm_issue(dev, i2o_cmd, ops, kcmd.oplen, res, 65536); | ||
213 | kfree(ops); | ||
214 | |||
215 | if (len < 0) { | ||
216 | kfree(res); | ||
217 | return -EAGAIN; | ||
218 | } | ||
219 | |||
220 | if (put_user(len, kcmd.reslen)) | ||
221 | ret = -EFAULT; | ||
222 | else if (len > reslen) | ||
223 | ret = -ENOBUFS; | ||
224 | else if (copy_to_user(kcmd.resbuf, res, len)) | ||
225 | ret = -EFAULT; | ||
226 | |||
227 | kfree(res); | ||
228 | |||
229 | return ret; | ||
230 | }; | ||
231 | |||
232 | static int i2o_cfg_swdl(unsigned long arg) | ||
233 | { | ||
234 | struct i2o_sw_xfer kxfer; | ||
235 | struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; | ||
236 | unsigned char maxfrag = 0, curfrag = 1; | ||
237 | struct i2o_dma buffer; | ||
238 | struct i2o_message *msg; | ||
239 | unsigned int status = 0, swlen = 0, fragsize = 8192; | ||
240 | struct i2o_controller *c; | ||
241 | |||
242 | if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer))) | ||
243 | return -EFAULT; | ||
244 | |||
245 | if (get_user(swlen, kxfer.swlen) < 0) | ||
246 | return -EFAULT; | ||
247 | |||
248 | if (get_user(maxfrag, kxfer.maxfrag) < 0) | ||
249 | return -EFAULT; | ||
250 | |||
251 | if (get_user(curfrag, kxfer.curfrag) < 0) | ||
252 | return -EFAULT; | ||
253 | |||
254 | if (curfrag == maxfrag) | ||
255 | fragsize = swlen - (maxfrag - 1) * 8192; | ||
256 | |||
257 | if (!kxfer.buf || !access_ok(VERIFY_READ, kxfer.buf, fragsize)) | ||
258 | return -EFAULT; | ||
259 | |||
260 | c = i2o_find_iop(kxfer.iop); | ||
261 | if (!c) | ||
262 | return -ENXIO; | ||
263 | |||
264 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); | ||
265 | if (IS_ERR(msg)) | ||
266 | return PTR_ERR(msg); | ||
267 | |||
268 | if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize)) { | ||
269 | i2o_msg_nop(c, msg); | ||
270 | return -ENOMEM; | ||
271 | } | ||
272 | |||
273 | if (__copy_from_user(buffer.virt, kxfer.buf, fragsize)) { | ||
274 | i2o_msg_nop(c, msg); | ||
275 | i2o_dma_free(&c->pdev->dev, &buffer); | ||
276 | return -EFAULT; | ||
277 | } | ||
278 | |||
279 | msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7); | ||
280 | msg->u.head[1] = | ||
281 | cpu_to_le32(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 | | ||
282 | ADAPTER_TID); | ||
283 | msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); | ||
284 | msg->u.head[3] = cpu_to_le32(0); | ||
285 | msg->body[0] = | ||
286 | cpu_to_le32((((u32) kxfer.flags) << 24) | (((u32) kxfer. | ||
287 | sw_type) << 16) | | ||
288 | (((u32) maxfrag) << 8) | (((u32) curfrag))); | ||
289 | msg->body[1] = cpu_to_le32(swlen); | ||
290 | msg->body[2] = cpu_to_le32(kxfer.sw_id); | ||
291 | msg->body[3] = cpu_to_le32(0xD0000000 | fragsize); | ||
292 | msg->body[4] = cpu_to_le32(buffer.phys); | ||
293 | |||
294 | osm_debug("swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); | ||
295 | status = i2o_msg_post_wait_mem(c, msg, 60, &buffer); | ||
296 | |||
297 | if (status != -ETIMEDOUT) | ||
298 | i2o_dma_free(&c->pdev->dev, &buffer); | ||
299 | |||
300 | if (status != I2O_POST_WAIT_OK) { | ||
301 | // it fails if you try and send frags out of order | ||
302 | // and for some yet unknown reasons too | ||
303 | osm_info("swdl failed, DetailedStatus = %d\n", status); | ||
304 | return status; | ||
305 | } | ||
306 | |||
307 | return 0; | ||
308 | }; | ||
309 | |||
310 | static int i2o_cfg_swul(unsigned long arg) | ||
311 | { | ||
312 | struct i2o_sw_xfer kxfer; | ||
313 | struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; | ||
314 | unsigned char maxfrag = 0, curfrag = 1; | ||
315 | struct i2o_dma buffer; | ||
316 | struct i2o_message *msg; | ||
317 | unsigned int status = 0, swlen = 0, fragsize = 8192; | ||
318 | struct i2o_controller *c; | ||
319 | int ret = 0; | ||
320 | |||
321 | if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer))) | ||
322 | return -EFAULT; | ||
323 | |||
324 | if (get_user(swlen, kxfer.swlen) < 0) | ||
325 | return -EFAULT; | ||
326 | |||
327 | if (get_user(maxfrag, kxfer.maxfrag) < 0) | ||
328 | return -EFAULT; | ||
329 | |||
330 | if (get_user(curfrag, kxfer.curfrag) < 0) | ||
331 | return -EFAULT; | ||
332 | |||
333 | if (curfrag == maxfrag) | ||
334 | fragsize = swlen - (maxfrag - 1) * 8192; | ||
335 | |||
336 | if (!kxfer.buf) | ||
337 | return -EFAULT; | ||
338 | |||
339 | c = i2o_find_iop(kxfer.iop); | ||
340 | if (!c) | ||
341 | return -ENXIO; | ||
342 | |||
343 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); | ||
344 | if (IS_ERR(msg)) | ||
345 | return PTR_ERR(msg); | ||
346 | |||
347 | if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize)) { | ||
348 | i2o_msg_nop(c, msg); | ||
349 | return -ENOMEM; | ||
350 | } | ||
351 | |||
352 | msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7); | ||
353 | msg->u.head[1] = | ||
354 | cpu_to_le32(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID); | ||
355 | msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); | ||
356 | msg->u.head[3] = cpu_to_le32(0); | ||
357 | msg->body[0] = | ||
358 | cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer. | ||
359 | sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag); | ||
360 | msg->body[1] = cpu_to_le32(swlen); | ||
361 | msg->body[2] = cpu_to_le32(kxfer.sw_id); | ||
362 | msg->body[3] = cpu_to_le32(0xD0000000 | fragsize); | ||
363 | msg->body[4] = cpu_to_le32(buffer.phys); | ||
364 | |||
365 | osm_debug("swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); | ||
366 | status = i2o_msg_post_wait_mem(c, msg, 60, &buffer); | ||
367 | |||
368 | if (status != I2O_POST_WAIT_OK) { | ||
369 | if (status != -ETIMEDOUT) | ||
370 | i2o_dma_free(&c->pdev->dev, &buffer); | ||
371 | |||
372 | osm_info("swul failed, DetailedStatus = %d\n", status); | ||
373 | return status; | ||
374 | } | ||
375 | |||
376 | if (copy_to_user(kxfer.buf, buffer.virt, fragsize)) | ||
377 | ret = -EFAULT; | ||
378 | |||
379 | i2o_dma_free(&c->pdev->dev, &buffer); | ||
380 | |||
381 | return ret; | ||
382 | } | ||
383 | |||
384 | static int i2o_cfg_swdel(unsigned long arg) | ||
385 | { | ||
386 | struct i2o_controller *c; | ||
387 | struct i2o_sw_xfer kxfer; | ||
388 | struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; | ||
389 | struct i2o_message *msg; | ||
390 | unsigned int swlen; | ||
391 | int token; | ||
392 | |||
393 | if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer))) | ||
394 | return -EFAULT; | ||
395 | |||
396 | if (get_user(swlen, kxfer.swlen) < 0) | ||
397 | return -EFAULT; | ||
398 | |||
399 | c = i2o_find_iop(kxfer.iop); | ||
400 | if (!c) | ||
401 | return -ENXIO; | ||
402 | |||
403 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); | ||
404 | if (IS_ERR(msg)) | ||
405 | return PTR_ERR(msg); | ||
406 | |||
407 | msg->u.head[0] = cpu_to_le32(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0); | ||
408 | msg->u.head[1] = | ||
409 | cpu_to_le32(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID); | ||
410 | msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); | ||
411 | msg->u.head[3] = cpu_to_le32(0); | ||
412 | msg->body[0] = | ||
413 | cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16); | ||
414 | msg->body[1] = cpu_to_le32(swlen); | ||
415 | msg->body[2] = cpu_to_le32(kxfer.sw_id); | ||
416 | |||
417 | token = i2o_msg_post_wait(c, msg, 10); | ||
418 | |||
419 | if (token != I2O_POST_WAIT_OK) { | ||
420 | osm_info("swdel failed, DetailedStatus = %d\n", token); | ||
421 | return -ETIMEDOUT; | ||
422 | } | ||
423 | |||
424 | return 0; | ||
425 | }; | ||
426 | |||
427 | static int i2o_cfg_validate(unsigned long arg) | ||
428 | { | ||
429 | int token; | ||
430 | int iop = (int)arg; | ||
431 | struct i2o_message *msg; | ||
432 | struct i2o_controller *c; | ||
433 | |||
434 | c = i2o_find_iop(iop); | ||
435 | if (!c) | ||
436 | return -ENXIO; | ||
437 | |||
438 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); | ||
439 | if (IS_ERR(msg)) | ||
440 | return PTR_ERR(msg); | ||
441 | |||
442 | msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); | ||
443 | msg->u.head[1] = | ||
444 | cpu_to_le32(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop); | ||
445 | msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); | ||
446 | msg->u.head[3] = cpu_to_le32(0); | ||
447 | |||
448 | token = i2o_msg_post_wait(c, msg, 10); | ||
449 | |||
450 | if (token != I2O_POST_WAIT_OK) { | ||
451 | osm_info("Can't validate configuration, ErrorStatus = %d\n", | ||
452 | token); | ||
453 | return -ETIMEDOUT; | ||
454 | } | ||
455 | |||
456 | return 0; | ||
457 | }; | ||
458 | |||
459 | static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp) | ||
460 | { | ||
461 | struct i2o_message *msg; | ||
462 | struct i2o_evt_id __user *pdesc = (struct i2o_evt_id __user *)arg; | ||
463 | struct i2o_evt_id kdesc; | ||
464 | struct i2o_controller *c; | ||
465 | struct i2o_device *d; | ||
466 | |||
467 | if (copy_from_user(&kdesc, pdesc, sizeof(struct i2o_evt_id))) | ||
468 | return -EFAULT; | ||
469 | |||
470 | /* IOP exists? */ | ||
471 | c = i2o_find_iop(kdesc.iop); | ||
472 | if (!c) | ||
473 | return -ENXIO; | ||
474 | |||
475 | /* Device exists? */ | ||
476 | d = i2o_iop_find_device(c, kdesc.tid); | ||
477 | if (!d) | ||
478 | return -ENODEV; | ||
479 | |||
480 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); | ||
481 | if (IS_ERR(msg)) | ||
482 | return PTR_ERR(msg); | ||
483 | |||
484 | msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); | ||
485 | msg->u.head[1] = | ||
486 | cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | | ||
487 | kdesc.tid); | ||
488 | msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); | ||
489 | msg->u.head[3] = cpu_to_le32(i2o_cntxt_list_add(c, fp->private_data)); | ||
490 | msg->body[0] = cpu_to_le32(kdesc.evt_mask); | ||
491 | |||
492 | i2o_msg_post(c, msg); | ||
493 | |||
494 | return 0; | ||
495 | } | ||
496 | |||
497 | static int i2o_cfg_evt_get(unsigned long arg, struct file *fp) | ||
498 | { | ||
499 | struct i2o_cfg_info *p = NULL; | ||
500 | struct i2o_evt_get __user *uget = (struct i2o_evt_get __user *)arg; | ||
501 | struct i2o_evt_get kget; | ||
502 | unsigned long flags; | ||
503 | |||
504 | for (p = open_files; p; p = p->next) | ||
505 | if (p->q_id == (ulong) fp->private_data) | ||
506 | break; | ||
507 | |||
508 | if (!p->q_len) | ||
509 | return -ENOENT; | ||
510 | |||
511 | memcpy(&kget.info, &p->event_q[p->q_out], sizeof(struct i2o_evt_info)); | ||
512 | MODINC(p->q_out, I2O_EVT_Q_LEN); | ||
513 | spin_lock_irqsave(&i2o_config_lock, flags); | ||
514 | p->q_len--; | ||
515 | kget.pending = p->q_len; | ||
516 | kget.lost = p->q_lost; | ||
517 | spin_unlock_irqrestore(&i2o_config_lock, flags); | ||
518 | |||
519 | if (copy_to_user(uget, &kget, sizeof(struct i2o_evt_get))) | ||
520 | return -EFAULT; | ||
521 | return 0; | ||
522 | } | ||
523 | |||
524 | #ifdef CONFIG_COMPAT | ||
525 | static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, | ||
526 | unsigned long arg) | ||
527 | { | ||
528 | struct i2o_cmd_passthru32 __user *cmd; | ||
529 | struct i2o_controller *c; | ||
530 | u32 __user *user_msg; | ||
531 | u32 *reply = NULL; | ||
532 | u32 __user *user_reply = NULL; | ||
533 | u32 size = 0; | ||
534 | u32 reply_size = 0; | ||
535 | u32 rcode = 0; | ||
536 | struct i2o_dma sg_list[SG_TABLESIZE]; | ||
537 | u32 sg_offset = 0; | ||
538 | u32 sg_count = 0; | ||
539 | u32 i = 0; | ||
540 | u32 sg_index = 0; | ||
541 | i2o_status_block *sb; | ||
542 | struct i2o_message *msg; | ||
543 | unsigned int iop; | ||
544 | |||
545 | cmd = (struct i2o_cmd_passthru32 __user *)arg; | ||
546 | |||
547 | if (get_user(iop, &cmd->iop) || get_user(i, &cmd->msg)) | ||
548 | return -EFAULT; | ||
549 | |||
550 | user_msg = compat_ptr(i); | ||
551 | |||
552 | c = i2o_find_iop(iop); | ||
553 | if (!c) { | ||
554 | osm_debug("controller %d not found\n", iop); | ||
555 | return -ENXIO; | ||
556 | } | ||
557 | |||
558 | sb = c->status_block.virt; | ||
559 | |||
560 | if (get_user(size, &user_msg[0])) { | ||
561 | osm_warn("unable to get size!\n"); | ||
562 | return -EFAULT; | ||
563 | } | ||
564 | size = size >> 16; | ||
565 | |||
566 | if (size > sb->inbound_frame_size) { | ||
567 | osm_warn("size of message > inbound_frame_size"); | ||
568 | return -EFAULT; | ||
569 | } | ||
570 | |||
571 | user_reply = &user_msg[size]; | ||
572 | |||
573 | size <<= 2; // Convert to bytes | ||
574 | |||
575 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); | ||
576 | if (IS_ERR(msg)) | ||
577 | return PTR_ERR(msg); | ||
578 | |||
579 | rcode = -EFAULT; | ||
580 | /* Copy in the user's I2O command */ | ||
581 | if (copy_from_user(msg, user_msg, size)) { | ||
582 | osm_warn("unable to copy user message\n"); | ||
583 | goto out; | ||
584 | } | ||
585 | i2o_dump_message(msg); | ||
586 | |||
587 | if (get_user(reply_size, &user_reply[0]) < 0) | ||
588 | goto out; | ||
589 | |||
590 | reply_size >>= 16; | ||
591 | reply_size <<= 2; | ||
592 | |||
593 | rcode = -ENOMEM; | ||
594 | reply = kzalloc(reply_size, GFP_KERNEL); | ||
595 | if (!reply) { | ||
596 | printk(KERN_WARNING "%s: Could not allocate reply buffer\n", | ||
597 | c->name); | ||
598 | goto out; | ||
599 | } | ||
600 | |||
601 | sg_offset = (msg->u.head[0] >> 4) & 0x0f; | ||
602 | |||
603 | memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); | ||
604 | if (sg_offset) { | ||
605 | struct sg_simple_element *sg; | ||
606 | |||
607 | if (sg_offset * 4 >= size) { | ||
608 | rcode = -EFAULT; | ||
609 | goto cleanup; | ||
610 | } | ||
611 | // TODO 64bit fix | ||
612 | sg = (struct sg_simple_element *)((&msg->u.head[0]) + | ||
613 | sg_offset); | ||
614 | sg_count = | ||
615 | (size - sg_offset * 4) / sizeof(struct sg_simple_element); | ||
616 | if (sg_count > SG_TABLESIZE) { | ||
617 | printk(KERN_DEBUG "%s:IOCTL SG List too large (%u)\n", | ||
618 | c->name, sg_count); | ||
619 | rcode = -EINVAL; | ||
620 | goto cleanup; | ||
621 | } | ||
622 | |||
623 | for (i = 0; i < sg_count; i++) { | ||
624 | int sg_size; | ||
625 | struct i2o_dma *p; | ||
626 | |||
627 | if (!(sg[i].flag_count & 0x10000000 | ||
628 | /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT */ )) { | ||
629 | printk(KERN_DEBUG | ||
630 | "%s:Bad SG element %d - not simple (%x)\n", | ||
631 | c->name, i, sg[i].flag_count); | ||
632 | rcode = -EINVAL; | ||
633 | goto cleanup; | ||
634 | } | ||
635 | sg_size = sg[i].flag_count & 0xffffff; | ||
636 | p = &(sg_list[sg_index]); | ||
637 | /* Allocate memory for the transfer */ | ||
638 | if (i2o_dma_alloc(&c->pdev->dev, p, sg_size)) { | ||
639 | printk(KERN_DEBUG | ||
640 | "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", | ||
641 | c->name, sg_size, i, sg_count); | ||
642 | rcode = -ENOMEM; | ||
643 | goto sg_list_cleanup; | ||
644 | } | ||
645 | sg_index++; | ||
646 | /* Copy in the user's SG buffer if necessary */ | ||
647 | if (sg[i]. | ||
648 | flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) { | ||
649 | // TODO 64bit fix | ||
650 | if (copy_from_user | ||
651 | (p->virt, | ||
652 | (void __user *)(unsigned long)sg[i]. | ||
653 | addr_bus, sg_size)) { | ||
654 | printk(KERN_DEBUG | ||
655 | "%s: Could not copy SG buf %d FROM user\n", | ||
656 | c->name, i); | ||
657 | rcode = -EFAULT; | ||
658 | goto sg_list_cleanup; | ||
659 | } | ||
660 | } | ||
661 | //TODO 64bit fix | ||
662 | sg[i].addr_bus = (u32) p->phys; | ||
663 | } | ||
664 | } | ||
665 | |||
666 | rcode = i2o_msg_post_wait(c, msg, 60); | ||
667 | msg = NULL; | ||
668 | if (rcode) { | ||
669 | reply[4] = ((u32) rcode) << 24; | ||
670 | goto sg_list_cleanup; | ||
671 | } | ||
672 | |||
673 | if (sg_offset) { | ||
674 | u32 rmsg[I2O_OUTBOUND_MSG_FRAME_SIZE]; | ||
675 | /* Copy back the Scatter Gather buffers back to user space */ | ||
676 | u32 j; | ||
677 | // TODO 64bit fix | ||
678 | struct sg_simple_element *sg; | ||
679 | int sg_size; | ||
680 | |||
681 | // re-acquire the original message to handle correctly the sg copy operation | ||
682 | memset(&rmsg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4); | ||
683 | // get user msg size in u32s | ||
684 | if (get_user(size, &user_msg[0])) { | ||
685 | rcode = -EFAULT; | ||
686 | goto sg_list_cleanup; | ||
687 | } | ||
688 | size = size >> 16; | ||
689 | size *= 4; | ||
690 | if (size > sizeof(rmsg)) { | ||
691 | rcode = -EINVAL; | ||
692 | goto sg_list_cleanup; | ||
693 | } | ||
694 | |||
695 | /* Copy in the user's I2O command */ | ||
696 | if (copy_from_user(rmsg, user_msg, size)) { | ||
697 | rcode = -EFAULT; | ||
698 | goto sg_list_cleanup; | ||
699 | } | ||
700 | sg_count = | ||
701 | (size - sg_offset * 4) / sizeof(struct sg_simple_element); | ||
702 | |||
703 | // TODO 64bit fix | ||
704 | sg = (struct sg_simple_element *)(rmsg + sg_offset); | ||
705 | for (j = 0; j < sg_count; j++) { | ||
706 | /* Copy out the SG list to user's buffer if necessary */ | ||
707 | if (! | ||
708 | (sg[j]. | ||
709 | flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR */ )) { | ||
710 | sg_size = sg[j].flag_count & 0xffffff; | ||
711 | // TODO 64bit fix | ||
712 | if (copy_to_user | ||
713 | ((void __user *)(u64) sg[j].addr_bus, | ||
714 | sg_list[j].virt, sg_size)) { | ||
715 | printk(KERN_WARNING | ||
716 | "%s: Could not copy %p TO user %x\n", | ||
717 | c->name, sg_list[j].virt, | ||
718 | sg[j].addr_bus); | ||
719 | rcode = -EFAULT; | ||
720 | goto sg_list_cleanup; | ||
721 | } | ||
722 | } | ||
723 | } | ||
724 | } | ||
725 | |||
726 | sg_list_cleanup: | ||
727 | /* Copy back the reply to user space */ | ||
728 | if (reply_size) { | ||
729 | // we wrote our own values for context - now restore the user supplied ones | ||
730 | if (copy_from_user(reply + 2, user_msg + 2, sizeof(u32) * 2)) { | ||
731 | printk(KERN_WARNING | ||
732 | "%s: Could not copy message context FROM user\n", | ||
733 | c->name); | ||
734 | rcode = -EFAULT; | ||
735 | } | ||
736 | if (copy_to_user(user_reply, reply, reply_size)) { | ||
737 | printk(KERN_WARNING | ||
738 | "%s: Could not copy reply TO user\n", c->name); | ||
739 | rcode = -EFAULT; | ||
740 | } | ||
741 | } | ||
742 | for (i = 0; i < sg_index; i++) | ||
743 | i2o_dma_free(&c->pdev->dev, &sg_list[i]); | ||
744 | |||
745 | cleanup: | ||
746 | kfree(reply); | ||
747 | out: | ||
748 | if (msg) | ||
749 | i2o_msg_nop(c, msg); | ||
750 | return rcode; | ||
751 | } | ||
752 | |||
753 | static long i2o_cfg_compat_ioctl(struct file *file, unsigned cmd, | ||
754 | unsigned long arg) | ||
755 | { | ||
756 | int ret; | ||
757 | switch (cmd) { | ||
758 | case I2OGETIOPS: | ||
759 | ret = i2o_cfg_ioctl(file, cmd, arg); | ||
760 | break; | ||
761 | case I2OPASSTHRU32: | ||
762 | mutex_lock(&i2o_cfg_mutex); | ||
763 | ret = i2o_cfg_passthru32(file, cmd, arg); | ||
764 | mutex_unlock(&i2o_cfg_mutex); | ||
765 | break; | ||
766 | default: | ||
767 | ret = -ENOIOCTLCMD; | ||
768 | break; | ||
769 | } | ||
770 | return ret; | ||
771 | } | ||
772 | |||
773 | #endif | ||
774 | |||
775 | #ifdef CONFIG_I2O_EXT_ADAPTEC | ||
776 | static int i2o_cfg_passthru(unsigned long arg) | ||
777 | { | ||
778 | struct i2o_cmd_passthru __user *cmd = | ||
779 | (struct i2o_cmd_passthru __user *)arg; | ||
780 | struct i2o_controller *c; | ||
781 | u32 __user *user_msg; | ||
782 | u32 *reply = NULL; | ||
783 | u32 __user *user_reply = NULL; | ||
784 | u32 size = 0; | ||
785 | u32 reply_size = 0; | ||
786 | u32 rcode = 0; | ||
787 | struct i2o_dma sg_list[SG_TABLESIZE]; | ||
788 | u32 sg_offset = 0; | ||
789 | u32 sg_count = 0; | ||
790 | int sg_index = 0; | ||
791 | u32 i = 0; | ||
792 | i2o_status_block *sb; | ||
793 | struct i2o_message *msg; | ||
794 | unsigned int iop; | ||
795 | |||
796 | if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg)) | ||
797 | return -EFAULT; | ||
798 | |||
799 | c = i2o_find_iop(iop); | ||
800 | if (!c) { | ||
801 | osm_warn("controller %d not found\n", iop); | ||
802 | return -ENXIO; | ||
803 | } | ||
804 | |||
805 | sb = c->status_block.virt; | ||
806 | |||
807 | if (get_user(size, &user_msg[0])) | ||
808 | return -EFAULT; | ||
809 | size = size >> 16; | ||
810 | |||
811 | if (size > sb->inbound_frame_size) { | ||
812 | osm_warn("size of message > inbound_frame_size"); | ||
813 | return -EFAULT; | ||
814 | } | ||
815 | |||
816 | user_reply = &user_msg[size]; | ||
817 | |||
818 | size <<= 2; // Convert to bytes | ||
819 | |||
820 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); | ||
821 | if (IS_ERR(msg)) | ||
822 | return PTR_ERR(msg); | ||
823 | |||
824 | rcode = -EFAULT; | ||
825 | /* Copy in the user's I2O command */ | ||
826 | if (copy_from_user(msg, user_msg, size)) | ||
827 | goto out; | ||
828 | |||
829 | if (get_user(reply_size, &user_reply[0]) < 0) | ||
830 | goto out; | ||
831 | |||
832 | reply_size >>= 16; | ||
833 | reply_size <<= 2; | ||
834 | |||
835 | reply = kzalloc(reply_size, GFP_KERNEL); | ||
836 | if (!reply) { | ||
837 | printk(KERN_WARNING "%s: Could not allocate reply buffer\n", | ||
838 | c->name); | ||
839 | rcode = -ENOMEM; | ||
840 | goto out; | ||
841 | } | ||
842 | |||
843 | sg_offset = (msg->u.head[0] >> 4) & 0x0f; | ||
844 | |||
845 | memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); | ||
846 | if (sg_offset) { | ||
847 | struct sg_simple_element *sg; | ||
848 | struct i2o_dma *p; | ||
849 | |||
850 | if (sg_offset * 4 >= size) { | ||
851 | rcode = -EFAULT; | ||
852 | goto cleanup; | ||
853 | } | ||
854 | // TODO 64bit fix | ||
855 | sg = (struct sg_simple_element *)((&msg->u.head[0]) + | ||
856 | sg_offset); | ||
857 | sg_count = | ||
858 | (size - sg_offset * 4) / sizeof(struct sg_simple_element); | ||
859 | if (sg_count > SG_TABLESIZE) { | ||
860 | printk(KERN_DEBUG "%s:IOCTL SG List too large (%u)\n", | ||
861 | c->name, sg_count); | ||
862 | rcode = -EINVAL; | ||
863 | goto cleanup; | ||
864 | } | ||
865 | |||
866 | for (i = 0; i < sg_count; i++) { | ||
867 | int sg_size; | ||
868 | |||
869 | if (!(sg[i].flag_count & 0x10000000 | ||
870 | /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT */ )) { | ||
871 | printk(KERN_DEBUG | ||
872 | "%s:Bad SG element %d - not simple (%x)\n", | ||
873 | c->name, i, sg[i].flag_count); | ||
874 | rcode = -EINVAL; | ||
875 | goto sg_list_cleanup; | ||
876 | } | ||
877 | sg_size = sg[i].flag_count & 0xffffff; | ||
878 | p = &(sg_list[sg_index]); | ||
879 | if (i2o_dma_alloc(&c->pdev->dev, p, sg_size)) { | ||
880 | /* Allocate memory for the transfer */ | ||
881 | printk(KERN_DEBUG | ||
882 | "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", | ||
883 | c->name, sg_size, i, sg_count); | ||
884 | rcode = -ENOMEM; | ||
885 | goto sg_list_cleanup; | ||
886 | } | ||
887 | sg_index++; | ||
888 | /* Copy in the user's SG buffer if necessary */ | ||
889 | if (sg[i]. | ||
890 | flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) { | ||
891 | // TODO 64bit fix | ||
892 | if (copy_from_user | ||
893 | (p->virt, (void __user *)sg[i].addr_bus, | ||
894 | sg_size)) { | ||
895 | printk(KERN_DEBUG | ||
896 | "%s: Could not copy SG buf %d FROM user\n", | ||
897 | c->name, i); | ||
898 | rcode = -EFAULT; | ||
899 | goto sg_list_cleanup; | ||
900 | } | ||
901 | } | ||
902 | sg[i].addr_bus = p->phys; | ||
903 | } | ||
904 | } | ||
905 | |||
906 | rcode = i2o_msg_post_wait(c, msg, 60); | ||
907 | msg = NULL; | ||
908 | if (rcode) { | ||
909 | reply[4] = ((u32) rcode) << 24; | ||
910 | goto sg_list_cleanup; | ||
911 | } | ||
912 | |||
913 | if (sg_offset) { | ||
914 | u32 rmsg[I2O_OUTBOUND_MSG_FRAME_SIZE]; | ||
915 | /* Copy back the Scatter Gather buffers back to user space */ | ||
916 | u32 j; | ||
917 | // TODO 64bit fix | ||
918 | struct sg_simple_element *sg; | ||
919 | int sg_size; | ||
920 | |||
921 | // re-acquire the original message to handle correctly the sg copy operation | ||
922 | memset(&rmsg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4); | ||
923 | // get user msg size in u32s | ||
924 | if (get_user(size, &user_msg[0])) { | ||
925 | rcode = -EFAULT; | ||
926 | goto sg_list_cleanup; | ||
927 | } | ||
928 | size = size >> 16; | ||
929 | size *= 4; | ||
930 | if (size > sizeof(rmsg)) { | ||
931 | rcode = -EFAULT; | ||
932 | goto sg_list_cleanup; | ||
933 | } | ||
934 | |||
935 | /* Copy in the user's I2O command */ | ||
936 | if (copy_from_user(rmsg, user_msg, size)) { | ||
937 | rcode = -EFAULT; | ||
938 | goto sg_list_cleanup; | ||
939 | } | ||
940 | sg_count = | ||
941 | (size - sg_offset * 4) / sizeof(struct sg_simple_element); | ||
942 | |||
943 | // TODO 64bit fix | ||
944 | sg = (struct sg_simple_element *)(rmsg + sg_offset); | ||
945 | for (j = 0; j < sg_count; j++) { | ||
946 | /* Copy out the SG list to user's buffer if necessary */ | ||
947 | if (! | ||
948 | (sg[j]. | ||
949 | flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR */ )) { | ||
950 | sg_size = sg[j].flag_count & 0xffffff; | ||
951 | // TODO 64bit fix | ||
952 | if (copy_to_user | ||
953 | ((void __user *)sg[j].addr_bus, sg_list[j].virt, | ||
954 | sg_size)) { | ||
955 | printk(KERN_WARNING | ||
956 | "%s: Could not copy %p TO user %x\n", | ||
957 | c->name, sg_list[j].virt, | ||
958 | sg[j].addr_bus); | ||
959 | rcode = -EFAULT; | ||
960 | goto sg_list_cleanup; | ||
961 | } | ||
962 | } | ||
963 | } | ||
964 | } | ||
965 | |||
966 | sg_list_cleanup: | ||
967 | /* Copy back the reply to user space */ | ||
968 | if (reply_size) { | ||
969 | // we wrote our own values for context - now restore the user supplied ones | ||
970 | if (copy_from_user(reply + 2, user_msg + 2, sizeof(u32) * 2)) { | ||
971 | printk(KERN_WARNING | ||
972 | "%s: Could not copy message context FROM user\n", | ||
973 | c->name); | ||
974 | rcode = -EFAULT; | ||
975 | } | ||
976 | if (copy_to_user(user_reply, reply, reply_size)) { | ||
977 | printk(KERN_WARNING | ||
978 | "%s: Could not copy reply TO user\n", c->name); | ||
979 | rcode = -EFAULT; | ||
980 | } | ||
981 | } | ||
982 | |||
983 | for (i = 0; i < sg_index; i++) | ||
984 | i2o_dma_free(&c->pdev->dev, &sg_list[i]); | ||
985 | |||
986 | cleanup: | ||
987 | kfree(reply); | ||
988 | out: | ||
989 | if (msg) | ||
990 | i2o_msg_nop(c, msg); | ||
991 | return rcode; | ||
992 | } | ||
993 | #endif | ||
994 | |||
995 | /* | ||
996 | * IOCTL Handler | ||
997 | */ | ||
998 | static long i2o_cfg_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) | ||
999 | { | ||
1000 | int ret; | ||
1001 | |||
1002 | mutex_lock(&i2o_cfg_mutex); | ||
1003 | switch (cmd) { | ||
1004 | case I2OGETIOPS: | ||
1005 | ret = i2o_cfg_getiops(arg); | ||
1006 | break; | ||
1007 | |||
1008 | case I2OHRTGET: | ||
1009 | ret = i2o_cfg_gethrt(arg); | ||
1010 | break; | ||
1011 | |||
1012 | case I2OLCTGET: | ||
1013 | ret = i2o_cfg_getlct(arg); | ||
1014 | break; | ||
1015 | |||
1016 | case I2OPARMSET: | ||
1017 | ret = i2o_cfg_parms(arg, I2OPARMSET); | ||
1018 | break; | ||
1019 | |||
1020 | case I2OPARMGET: | ||
1021 | ret = i2o_cfg_parms(arg, I2OPARMGET); | ||
1022 | break; | ||
1023 | |||
1024 | case I2OSWDL: | ||
1025 | ret = i2o_cfg_swdl(arg); | ||
1026 | break; | ||
1027 | |||
1028 | case I2OSWUL: | ||
1029 | ret = i2o_cfg_swul(arg); | ||
1030 | break; | ||
1031 | |||
1032 | case I2OSWDEL: | ||
1033 | ret = i2o_cfg_swdel(arg); | ||
1034 | break; | ||
1035 | |||
1036 | case I2OVALIDATE: | ||
1037 | ret = i2o_cfg_validate(arg); | ||
1038 | break; | ||
1039 | |||
1040 | case I2OEVTREG: | ||
1041 | ret = i2o_cfg_evt_reg(arg, fp); | ||
1042 | break; | ||
1043 | |||
1044 | case I2OEVTGET: | ||
1045 | ret = i2o_cfg_evt_get(arg, fp); | ||
1046 | break; | ||
1047 | |||
1048 | #ifdef CONFIG_I2O_EXT_ADAPTEC | ||
1049 | case I2OPASSTHRU: | ||
1050 | ret = i2o_cfg_passthru(arg); | ||
1051 | break; | ||
1052 | #endif | ||
1053 | |||
1054 | default: | ||
1055 | osm_debug("unknown ioctl called!\n"); | ||
1056 | ret = -EINVAL; | ||
1057 | } | ||
1058 | mutex_unlock(&i2o_cfg_mutex); | ||
1059 | return ret; | ||
1060 | } | ||
1061 | |||
1062 | static int cfg_open(struct inode *inode, struct file *file) | ||
1063 | { | ||
1064 | struct i2o_cfg_info *tmp = kmalloc(sizeof(struct i2o_cfg_info), | ||
1065 | GFP_KERNEL); | ||
1066 | unsigned long flags; | ||
1067 | |||
1068 | if (!tmp) | ||
1069 | return -ENOMEM; | ||
1070 | |||
1071 | mutex_lock(&i2o_cfg_mutex); | ||
1072 | file->private_data = (void *)(i2o_cfg_info_id++); | ||
1073 | tmp->fp = file; | ||
1074 | tmp->fasync = NULL; | ||
1075 | tmp->q_id = (ulong) file->private_data; | ||
1076 | tmp->q_len = 0; | ||
1077 | tmp->q_in = 0; | ||
1078 | tmp->q_out = 0; | ||
1079 | tmp->q_lost = 0; | ||
1080 | tmp->next = open_files; | ||
1081 | |||
1082 | spin_lock_irqsave(&i2o_config_lock, flags); | ||
1083 | open_files = tmp; | ||
1084 | spin_unlock_irqrestore(&i2o_config_lock, flags); | ||
1085 | mutex_unlock(&i2o_cfg_mutex); | ||
1086 | |||
1087 | return 0; | ||
1088 | } | ||
1089 | |||
1090 | static int cfg_fasync(int fd, struct file *fp, int on) | ||
1091 | { | ||
1092 | ulong id = (ulong) fp->private_data; | ||
1093 | struct i2o_cfg_info *p; | ||
1094 | int ret = -EBADF; | ||
1095 | |||
1096 | mutex_lock(&i2o_cfg_mutex); | ||
1097 | for (p = open_files; p; p = p->next) | ||
1098 | if (p->q_id == id) | ||
1099 | break; | ||
1100 | |||
1101 | if (p) | ||
1102 | ret = fasync_helper(fd, fp, on, &p->fasync); | ||
1103 | mutex_unlock(&i2o_cfg_mutex); | ||
1104 | return ret; | ||
1105 | } | ||
1106 | |||
1107 | static int cfg_release(struct inode *inode, struct file *file) | ||
1108 | { | ||
1109 | ulong id = (ulong) file->private_data; | ||
1110 | struct i2o_cfg_info *p, **q; | ||
1111 | unsigned long flags; | ||
1112 | |||
1113 | mutex_lock(&i2o_cfg_mutex); | ||
1114 | spin_lock_irqsave(&i2o_config_lock, flags); | ||
1115 | for (q = &open_files; (p = *q) != NULL; q = &p->next) { | ||
1116 | if (p->q_id == id) { | ||
1117 | *q = p->next; | ||
1118 | kfree(p); | ||
1119 | break; | ||
1120 | } | ||
1121 | } | ||
1122 | spin_unlock_irqrestore(&i2o_config_lock, flags); | ||
1123 | mutex_unlock(&i2o_cfg_mutex); | ||
1124 | |||
1125 | return 0; | ||
1126 | } | ||
1127 | |||
1128 | static const struct file_operations config_fops = { | ||
1129 | .owner = THIS_MODULE, | ||
1130 | .llseek = no_llseek, | ||
1131 | .unlocked_ioctl = i2o_cfg_ioctl, | ||
1132 | #ifdef CONFIG_COMPAT | ||
1133 | .compat_ioctl = i2o_cfg_compat_ioctl, | ||
1134 | #endif | ||
1135 | .open = cfg_open, | ||
1136 | .release = cfg_release, | ||
1137 | .fasync = cfg_fasync, | ||
1138 | }; | ||
1139 | |||
1140 | static struct miscdevice i2o_miscdev = { | ||
1141 | I2O_MINOR, | ||
1142 | "i2octl", | ||
1143 | &config_fops | ||
1144 | }; | ||
1145 | |||
1146 | static int __init i2o_config_old_init(void) | ||
1147 | { | ||
1148 | spin_lock_init(&i2o_config_lock); | ||
1149 | |||
1150 | if (misc_register(&i2o_miscdev) < 0) { | ||
1151 | osm_err("can't register device.\n"); | ||
1152 | return -EBUSY; | ||
1153 | } | ||
1154 | |||
1155 | return 0; | ||
1156 | } | ||
1157 | |||
1158 | static void i2o_config_old_exit(void) | ||
1159 | { | ||
1160 | misc_deregister(&i2o_miscdev); | ||
1161 | } | ||
1162 | |||
1163 | MODULE_AUTHOR("Red Hat Software"); | ||
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c deleted file mode 100644 index b7d87cd227a9..000000000000 --- a/drivers/message/i2o/i2o_proc.c +++ /dev/null | |||
@@ -1,2045 +0,0 @@ | |||
1 | /* | ||
2 | * procfs handler for Linux I2O subsystem | ||
3 | * | ||
4 | * (c) Copyright 1999 Deepak Saxena | ||
5 | * | ||
6 | * Originally written by Deepak Saxena(deepak@plexity.net) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | * | ||
13 | * This is an initial test release. The code is based on the design of the | ||
14 | * ide procfs system (drivers/block/ide-proc.c). Some code taken from | ||
15 | * i2o-core module by Alan Cox. | ||
16 | * | ||
17 | * DISCLAIMER: This code is still under development/test and may cause | ||
18 | * your system to behave unpredictably. Use at your own discretion. | ||
19 | * | ||
20 | * | ||
21 | * Fixes/additions: | ||
22 | * Juha Sievänen (Juha.Sievanen@cs.Helsinki.FI), | ||
23 | * Auvo Häkkinen (Auvo.Hakkinen@cs.Helsinki.FI) | ||
24 | * University of Helsinki, Department of Computer Science | ||
25 | * LAN entries | ||
26 | * Markus Lidel <Markus.Lidel@shadowconnect.com> | ||
27 | * Changes for new I2O API | ||
28 | */ | ||
29 | |||
30 | #define OSM_NAME "proc-osm" | ||
31 | #define OSM_VERSION "1.316" | ||
32 | #define OSM_DESCRIPTION "I2O ProcFS OSM" | ||
33 | |||
34 | #define I2O_MAX_MODULES 4 | ||
35 | // FIXME! | ||
36 | #define FMT_U64_HEX "0x%08x%08x" | ||
37 | #define U64_VAL(pu64) *((u32*)(pu64)+1), *((u32*)(pu64)) | ||
38 | |||
39 | #include <linux/types.h> | ||
40 | #include <linux/kernel.h> | ||
41 | #include <linux/pci.h> | ||
42 | #include <linux/i2o.h> | ||
43 | #include <linux/slab.h> | ||
44 | #include <linux/proc_fs.h> | ||
45 | #include <linux/seq_file.h> | ||
46 | #include <linux/init.h> | ||
47 | #include <linux/module.h> | ||
48 | #include <linux/errno.h> | ||
49 | #include <linux/spinlock.h> | ||
50 | #include <linux/workqueue.h> | ||
51 | |||
52 | #include <asm/io.h> | ||
53 | #include <asm/uaccess.h> | ||
54 | #include <asm/byteorder.h> | ||
55 | |||
56 | /* Structure used to define /proc entries */ | ||
57 | typedef struct _i2o_proc_entry_t { | ||
58 | char *name; /* entry name */ | ||
59 | umode_t mode; /* mode */ | ||
60 | const struct file_operations *fops; /* open function */ | ||
61 | } i2o_proc_entry; | ||
62 | |||
63 | /* global I2O /proc/i2o entry */ | ||
64 | static struct proc_dir_entry *i2o_proc_dir_root; | ||
65 | |||
66 | /* proc OSM driver struct */ | ||
67 | static struct i2o_driver i2o_proc_driver = { | ||
68 | .name = OSM_NAME, | ||
69 | }; | ||
70 | |||
71 | static int print_serial_number(struct seq_file *seq, u8 * serialno, int max_len) | ||
72 | { | ||
73 | int i; | ||
74 | |||
75 | /* 19990419 -sralston | ||
76 | * The I2O v1.5 (and v2.0 so far) "official specification" | ||
77 | * got serial numbers WRONG! | ||
78 | * Apparently, and despite what Section 3.4.4 says and | ||
79 | * Figure 3-35 shows (pg 3-39 in the pdf doc), | ||
80 | * the convention / consensus seems to be: | ||
81 | * + First byte is SNFormat | ||
82 | * + Second byte is SNLen (but only if SNFormat==7 (?)) | ||
83 | * + (v2.0) SCSI+BS may use IEEE Registered (64 or 128 bit) format | ||
84 | */ | ||
85 | switch (serialno[0]) { | ||
86 | case I2O_SNFORMAT_BINARY: /* Binary */ | ||
87 | seq_printf(seq, "0x"); | ||
88 | for (i = 0; i < serialno[1]; i++) { | ||
89 | seq_printf(seq, "%02X", serialno[2 + i]); | ||
90 | } | ||
91 | break; | ||
92 | |||
93 | case I2O_SNFORMAT_ASCII: /* ASCII */ | ||
94 | if (serialno[1] < ' ') { /* printable or SNLen? */ | ||
95 | /* sanity */ | ||
96 | max_len = | ||
97 | (max_len < serialno[1]) ? max_len : serialno[1]; | ||
98 | serialno[1 + max_len] = '\0'; | ||
99 | |||
100 | /* just print it */ | ||
101 | seq_printf(seq, "%s", &serialno[2]); | ||
102 | } else { | ||
103 | /* print chars for specified length */ | ||
104 | for (i = 0; i < serialno[1]; i++) { | ||
105 | seq_printf(seq, "%c", serialno[2 + i]); | ||
106 | } | ||
107 | } | ||
108 | break; | ||
109 | |||
110 | case I2O_SNFORMAT_UNICODE: /* UNICODE */ | ||
111 | seq_printf(seq, "UNICODE Format. Can't Display\n"); | ||
112 | break; | ||
113 | |||
114 | case I2O_SNFORMAT_LAN48_MAC: /* LAN-48 MAC Address */ | ||
115 | seq_printf(seq, "LAN-48 MAC address @ %pM", &serialno[2]); | ||
116 | break; | ||
117 | |||
118 | case I2O_SNFORMAT_WAN: /* WAN MAC Address */ | ||
119 | /* FIXME: Figure out what a WAN access address looks like?? */ | ||
120 | seq_printf(seq, "WAN Access Address"); | ||
121 | break; | ||
122 | |||
123 | /* plus new in v2.0 */ | ||
124 | case I2O_SNFORMAT_LAN64_MAC: /* LAN-64 MAC Address */ | ||
125 | /* FIXME: Figure out what a LAN-64 address really looks like?? */ | ||
126 | seq_printf(seq, | ||
127 | "LAN-64 MAC address @ [?:%02X:%02X:?] %pM", | ||
128 | serialno[8], serialno[9], &serialno[2]); | ||
129 | break; | ||
130 | |||
131 | case I2O_SNFORMAT_DDM: /* I2O DDM */ | ||
132 | seq_printf(seq, | ||
133 | "DDM: Tid=%03Xh, Rsvd=%04Xh, OrgId=%04Xh", | ||
134 | *(u16 *) & serialno[2], | ||
135 | *(u16 *) & serialno[4], *(u16 *) & serialno[6]); | ||
136 | break; | ||
137 | |||
138 | case I2O_SNFORMAT_IEEE_REG64: /* IEEE Registered (64-bit) */ | ||
139 | case I2O_SNFORMAT_IEEE_REG128: /* IEEE Registered (128-bit) */ | ||
140 | /* FIXME: Figure if this is even close?? */ | ||
141 | seq_printf(seq, | ||
142 | "IEEE NodeName(hi,lo)=(%08Xh:%08Xh), PortName(hi,lo)=(%08Xh:%08Xh)\n", | ||
143 | *(u32 *) & serialno[2], | ||
144 | *(u32 *) & serialno[6], | ||
145 | *(u32 *) & serialno[10], *(u32 *) & serialno[14]); | ||
146 | break; | ||
147 | |||
148 | case I2O_SNFORMAT_UNKNOWN: /* Unknown 0 */ | ||
149 | case I2O_SNFORMAT_UNKNOWN2: /* Unknown 0xff */ | ||
150 | default: | ||
151 | seq_printf(seq, "Unknown data format (0x%02x)", serialno[0]); | ||
152 | break; | ||
153 | } | ||
154 | |||
155 | return 0; | ||
156 | } | ||
157 | |||
158 | /** | ||
159 | * i2o_get_class_name - do i2o class name lookup | ||
160 | * @class: class number | ||
161 | * | ||
162 | * Return a descriptive string for an i2o class. | ||
163 | */ | ||
164 | static const char *i2o_get_class_name(int class) | ||
165 | { | ||
166 | int idx = 16; | ||
167 | static char *i2o_class_name[] = { | ||
168 | "Executive", | ||
169 | "Device Driver Module", | ||
170 | "Block Device", | ||
171 | "Tape Device", | ||
172 | "LAN Interface", | ||
173 | "WAN Interface", | ||
174 | "Fibre Channel Port", | ||
175 | "Fibre Channel Device", | ||
176 | "SCSI Device", | ||
177 | "ATE Port", | ||
178 | "ATE Device", | ||
179 | "Floppy Controller", | ||
180 | "Floppy Device", | ||
181 | "Secondary Bus Port", | ||
182 | "Peer Transport Agent", | ||
183 | "Peer Transport", | ||
184 | "Unknown" | ||
185 | }; | ||
186 | |||
187 | switch (class & 0xfff) { | ||
188 | case I2O_CLASS_EXECUTIVE: | ||
189 | idx = 0; | ||
190 | break; | ||
191 | case I2O_CLASS_DDM: | ||
192 | idx = 1; | ||
193 | break; | ||
194 | case I2O_CLASS_RANDOM_BLOCK_STORAGE: | ||
195 | idx = 2; | ||
196 | break; | ||
197 | case I2O_CLASS_SEQUENTIAL_STORAGE: | ||
198 | idx = 3; | ||
199 | break; | ||
200 | case I2O_CLASS_LAN: | ||
201 | idx = 4; | ||
202 | break; | ||
203 | case I2O_CLASS_WAN: | ||
204 | idx = 5; | ||
205 | break; | ||
206 | case I2O_CLASS_FIBRE_CHANNEL_PORT: | ||
207 | idx = 6; | ||
208 | break; | ||
209 | case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL: | ||
210 | idx = 7; | ||
211 | break; | ||
212 | case I2O_CLASS_SCSI_PERIPHERAL: | ||
213 | idx = 8; | ||
214 | break; | ||
215 | case I2O_CLASS_ATE_PORT: | ||
216 | idx = 9; | ||
217 | break; | ||
218 | case I2O_CLASS_ATE_PERIPHERAL: | ||
219 | idx = 10; | ||
220 | break; | ||
221 | case I2O_CLASS_FLOPPY_CONTROLLER: | ||
222 | idx = 11; | ||
223 | break; | ||
224 | case I2O_CLASS_FLOPPY_DEVICE: | ||
225 | idx = 12; | ||
226 | break; | ||
227 | case I2O_CLASS_BUS_ADAPTER: | ||
228 | idx = 13; | ||
229 | break; | ||
230 | case I2O_CLASS_PEER_TRANSPORT_AGENT: | ||
231 | idx = 14; | ||
232 | break; | ||
233 | case I2O_CLASS_PEER_TRANSPORT: | ||
234 | idx = 15; | ||
235 | break; | ||
236 | } | ||
237 | |||
238 | return i2o_class_name[idx]; | ||
239 | } | ||
240 | |||
241 | #define SCSI_TABLE_SIZE 13 | ||
242 | static char *scsi_devices[] = { | ||
243 | "Direct-Access Read/Write", | ||
244 | "Sequential-Access Storage", | ||
245 | "Printer", | ||
246 | "Processor", | ||
247 | "WORM Device", | ||
248 | "CD-ROM Device", | ||
249 | "Scanner Device", | ||
250 | "Optical Memory Device", | ||
251 | "Medium Changer Device", | ||
252 | "Communications Device", | ||
253 | "Graphics Art Pre-Press Device", | ||
254 | "Graphics Art Pre-Press Device", | ||
255 | "Array Controller Device" | ||
256 | }; | ||
257 | |||
258 | static char *chtostr(char *tmp, u8 *chars, int n) | ||
259 | { | ||
260 | tmp[0] = 0; | ||
261 | return strncat(tmp, (char *)chars, n); | ||
262 | } | ||
263 | |||
264 | static int i2o_report_query_status(struct seq_file *seq, int block_status, | ||
265 | char *group) | ||
266 | { | ||
267 | switch (block_status) { | ||
268 | case -ETIMEDOUT: | ||
269 | return seq_printf(seq, "Timeout reading group %s.\n", group); | ||
270 | case -ENOMEM: | ||
271 | return seq_printf(seq, "No free memory to read the table.\n"); | ||
272 | case -I2O_PARAMS_STATUS_INVALID_GROUP_ID: | ||
273 | return seq_printf(seq, "Group %s not supported.\n", group); | ||
274 | default: | ||
275 | return seq_printf(seq, | ||
276 | "Error reading group %s. BlockStatus 0x%02X\n", | ||
277 | group, -block_status); | ||
278 | } | ||
279 | } | ||
280 | |||
281 | static char *bus_strings[] = { | ||
282 | "Local Bus", | ||
283 | "ISA", | ||
284 | "EISA", | ||
285 | "PCI", | ||
286 | "PCMCIA", | ||
287 | "NUBUS", | ||
288 | "CARDBUS" | ||
289 | }; | ||
290 | |||
291 | static int i2o_seq_show_hrt(struct seq_file *seq, void *v) | ||
292 | { | ||
293 | struct i2o_controller *c = (struct i2o_controller *)seq->private; | ||
294 | i2o_hrt *hrt = (i2o_hrt *) c->hrt.virt; | ||
295 | u32 bus; | ||
296 | int i; | ||
297 | |||
298 | if (hrt->hrt_version) { | ||
299 | seq_printf(seq, | ||
300 | "HRT table for controller is too new a version.\n"); | ||
301 | return 0; | ||
302 | } | ||
303 | |||
304 | seq_printf(seq, "HRT has %d entries of %d bytes each.\n", | ||
305 | hrt->num_entries, hrt->entry_len << 2); | ||
306 | |||
307 | for (i = 0; i < hrt->num_entries; i++) { | ||
308 | seq_printf(seq, "Entry %d:\n", i); | ||
309 | seq_printf(seq, " Adapter ID: %0#10x\n", | ||
310 | hrt->hrt_entry[i].adapter_id); | ||
311 | seq_printf(seq, " Controlling tid: %0#6x\n", | ||
312 | hrt->hrt_entry[i].parent_tid); | ||
313 | |||
314 | if (hrt->hrt_entry[i].bus_type != 0x80) { | ||
315 | bus = hrt->hrt_entry[i].bus_type; | ||
316 | seq_printf(seq, " %s Information\n", | ||
317 | bus_strings[bus]); | ||
318 | |||
319 | switch (bus) { | ||
320 | case I2O_BUS_LOCAL: | ||
321 | seq_printf(seq, " IOBase: %0#6x,", | ||
322 | hrt->hrt_entry[i].bus.local_bus. | ||
323 | LbBaseIOPort); | ||
324 | seq_printf(seq, " MemoryBase: %0#10x\n", | ||
325 | hrt->hrt_entry[i].bus.local_bus. | ||
326 | LbBaseMemoryAddress); | ||
327 | break; | ||
328 | |||
329 | case I2O_BUS_ISA: | ||
330 | seq_printf(seq, " IOBase: %0#6x,", | ||
331 | hrt->hrt_entry[i].bus.isa_bus. | ||
332 | IsaBaseIOPort); | ||
333 | seq_printf(seq, " MemoryBase: %0#10x,", | ||
334 | hrt->hrt_entry[i].bus.isa_bus. | ||
335 | IsaBaseMemoryAddress); | ||
336 | seq_printf(seq, " CSN: %0#4x,", | ||
337 | hrt->hrt_entry[i].bus.isa_bus.CSN); | ||
338 | break; | ||
339 | |||
340 | case I2O_BUS_EISA: | ||
341 | seq_printf(seq, " IOBase: %0#6x,", | ||
342 | hrt->hrt_entry[i].bus.eisa_bus. | ||
343 | EisaBaseIOPort); | ||
344 | seq_printf(seq, " MemoryBase: %0#10x,", | ||
345 | hrt->hrt_entry[i].bus.eisa_bus. | ||
346 | EisaBaseMemoryAddress); | ||
347 | seq_printf(seq, " Slot: %0#4x,", | ||
348 | hrt->hrt_entry[i].bus.eisa_bus. | ||
349 | EisaSlotNumber); | ||
350 | break; | ||
351 | |||
352 | case I2O_BUS_PCI: | ||
353 | seq_printf(seq, " Bus: %0#4x", | ||
354 | hrt->hrt_entry[i].bus.pci_bus. | ||
355 | PciBusNumber); | ||
356 | seq_printf(seq, " Dev: %0#4x", | ||
357 | hrt->hrt_entry[i].bus.pci_bus. | ||
358 | PciDeviceNumber); | ||
359 | seq_printf(seq, " Func: %0#4x", | ||
360 | hrt->hrt_entry[i].bus.pci_bus. | ||
361 | PciFunctionNumber); | ||
362 | seq_printf(seq, " Vendor: %0#6x", | ||
363 | hrt->hrt_entry[i].bus.pci_bus. | ||
364 | PciVendorID); | ||
365 | seq_printf(seq, " Device: %0#6x\n", | ||
366 | hrt->hrt_entry[i].bus.pci_bus. | ||
367 | PciDeviceID); | ||
368 | break; | ||
369 | |||
370 | default: | ||
371 | seq_printf(seq, " Unsupported Bus Type\n"); | ||
372 | } | ||
373 | } else | ||
374 | seq_printf(seq, " Unknown Bus Type\n"); | ||
375 | } | ||
376 | |||
377 | return 0; | ||
378 | } | ||
379 | |||
380 | static int i2o_seq_show_lct(struct seq_file *seq, void *v) | ||
381 | { | ||
382 | struct i2o_controller *c = (struct i2o_controller *)seq->private; | ||
383 | i2o_lct *lct = (i2o_lct *) c->lct; | ||
384 | int entries; | ||
385 | int i; | ||
386 | |||
387 | #define BUS_TABLE_SIZE 3 | ||
388 | static char *bus_ports[] = { | ||
389 | "Generic Bus", | ||
390 | "SCSI Bus", | ||
391 | "Fibre Channel Bus" | ||
392 | }; | ||
393 | |||
394 | entries = (lct->table_size - 3) / 9; | ||
395 | |||
396 | seq_printf(seq, "LCT contains %d %s\n", entries, | ||
397 | entries == 1 ? "entry" : "entries"); | ||
398 | if (lct->boot_tid) | ||
399 | seq_printf(seq, "Boot Device @ ID %d\n", lct->boot_tid); | ||
400 | |||
401 | seq_printf(seq, "Current Change Indicator: %#10x\n", lct->change_ind); | ||
402 | |||
403 | for (i = 0; i < entries; i++) { | ||
404 | seq_printf(seq, "Entry %d\n", i); | ||
405 | seq_printf(seq, " Class, SubClass : %s", | ||
406 | i2o_get_class_name(lct->lct_entry[i].class_id)); | ||
407 | |||
408 | /* | ||
409 | * Classes which we'll print subclass info for | ||
410 | */ | ||
411 | switch (lct->lct_entry[i].class_id & 0xFFF) { | ||
412 | case I2O_CLASS_RANDOM_BLOCK_STORAGE: | ||
413 | switch (lct->lct_entry[i].sub_class) { | ||
414 | case 0x00: | ||
415 | seq_printf(seq, ", Direct-Access Read/Write"); | ||
416 | break; | ||
417 | |||
418 | case 0x04: | ||
419 | seq_printf(seq, ", WORM Drive"); | ||
420 | break; | ||
421 | |||
422 | case 0x05: | ||
423 | seq_printf(seq, ", CD-ROM Drive"); | ||
424 | break; | ||
425 | |||
426 | case 0x07: | ||
427 | seq_printf(seq, ", Optical Memory Device"); | ||
428 | break; | ||
429 | |||
430 | default: | ||
431 | seq_printf(seq, ", Unknown (0x%02x)", | ||
432 | lct->lct_entry[i].sub_class); | ||
433 | break; | ||
434 | } | ||
435 | break; | ||
436 | |||
437 | case I2O_CLASS_LAN: | ||
438 | switch (lct->lct_entry[i].sub_class & 0xFF) { | ||
439 | case 0x30: | ||
440 | seq_printf(seq, ", Ethernet"); | ||
441 | break; | ||
442 | |||
443 | case 0x40: | ||
444 | seq_printf(seq, ", 100base VG"); | ||
445 | break; | ||
446 | |||
447 | case 0x50: | ||
448 | seq_printf(seq, ", IEEE 802.5/Token-Ring"); | ||
449 | break; | ||
450 | |||
451 | case 0x60: | ||
452 | seq_printf(seq, ", ANSI X3T9.5 FDDI"); | ||
453 | break; | ||
454 | |||
455 | case 0x70: | ||
456 | seq_printf(seq, ", Fibre Channel"); | ||
457 | break; | ||
458 | |||
459 | default: | ||
460 | seq_printf(seq, ", Unknown Sub-Class (0x%02x)", | ||
461 | lct->lct_entry[i].sub_class & 0xFF); | ||
462 | break; | ||
463 | } | ||
464 | break; | ||
465 | |||
466 | case I2O_CLASS_SCSI_PERIPHERAL: | ||
467 | if (lct->lct_entry[i].sub_class < SCSI_TABLE_SIZE) | ||
468 | seq_printf(seq, ", %s", | ||
469 | scsi_devices[lct->lct_entry[i]. | ||
470 | sub_class]); | ||
471 | else | ||
472 | seq_printf(seq, ", Unknown Device Type"); | ||
473 | break; | ||
474 | |||
475 | case I2O_CLASS_BUS_ADAPTER: | ||
476 | if (lct->lct_entry[i].sub_class < BUS_TABLE_SIZE) | ||
477 | seq_printf(seq, ", %s", | ||
478 | bus_ports[lct->lct_entry[i]. | ||
479 | sub_class]); | ||
480 | else | ||
481 | seq_printf(seq, ", Unknown Bus Type"); | ||
482 | break; | ||
483 | } | ||
484 | seq_printf(seq, "\n"); | ||
485 | |||
486 | seq_printf(seq, " Local TID : 0x%03x\n", | ||
487 | lct->lct_entry[i].tid); | ||
488 | seq_printf(seq, " User TID : 0x%03x\n", | ||
489 | lct->lct_entry[i].user_tid); | ||
490 | seq_printf(seq, " Parent TID : 0x%03x\n", | ||
491 | lct->lct_entry[i].parent_tid); | ||
492 | seq_printf(seq, " Identity Tag : 0x%x%x%x%x%x%x%x%x\n", | ||
493 | lct->lct_entry[i].identity_tag[0], | ||
494 | lct->lct_entry[i].identity_tag[1], | ||
495 | lct->lct_entry[i].identity_tag[2], | ||
496 | lct->lct_entry[i].identity_tag[3], | ||
497 | lct->lct_entry[i].identity_tag[4], | ||
498 | lct->lct_entry[i].identity_tag[5], | ||
499 | lct->lct_entry[i].identity_tag[6], | ||
500 | lct->lct_entry[i].identity_tag[7]); | ||
501 | seq_printf(seq, " Change Indicator : %0#10x\n", | ||
502 | lct->lct_entry[i].change_ind); | ||
503 | seq_printf(seq, " Event Capab Mask : %0#10x\n", | ||
504 | lct->lct_entry[i].device_flags); | ||
505 | } | ||
506 | |||
507 | return 0; | ||
508 | } | ||
509 | |||
510 | static int i2o_seq_show_status(struct seq_file *seq, void *v) | ||
511 | { | ||
512 | struct i2o_controller *c = (struct i2o_controller *)seq->private; | ||
513 | char prodstr[25]; | ||
514 | int version; | ||
515 | i2o_status_block *sb = c->status_block.virt; | ||
516 | |||
517 | i2o_status_get(c); // reread the status block | ||
518 | |||
519 | seq_printf(seq, "Organization ID : %0#6x\n", sb->org_id); | ||
520 | |||
521 | version = sb->i2o_version; | ||
522 | |||
523 | /* FIXME for Spec 2.0 | ||
524 | if (version == 0x02) { | ||
525 | seq_printf(seq, "Lowest I2O version supported: "); | ||
526 | switch(workspace[2]) { | ||
527 | case 0x00: | ||
528 | seq_printf(seq, "1.0\n"); | ||
529 | break; | ||
530 | case 0x01: | ||
531 | seq_printf(seq, "1.5\n"); | ||
532 | break; | ||
533 | case 0x02: | ||
534 | seq_printf(seq, "2.0\n"); | ||
535 | break; | ||
536 | } | ||
537 | |||
538 | seq_printf(seq, "Highest I2O version supported: "); | ||
539 | switch(workspace[3]) { | ||
540 | case 0x00: | ||
541 | seq_printf(seq, "1.0\n"); | ||
542 | break; | ||
543 | case 0x01: | ||
544 | seq_printf(seq, "1.5\n"); | ||
545 | break; | ||
546 | case 0x02: | ||
547 | seq_printf(seq, "2.0\n"); | ||
548 | break; | ||
549 | } | ||
550 | } | ||
551 | */ | ||
552 | seq_printf(seq, "IOP ID : %0#5x\n", sb->iop_id); | ||
553 | seq_printf(seq, "Host Unit ID : %0#6x\n", sb->host_unit_id); | ||
554 | seq_printf(seq, "Segment Number : %0#5x\n", sb->segment_number); | ||
555 | |||
556 | seq_printf(seq, "I2O version : "); | ||
557 | switch (version) { | ||
558 | case 0x00: | ||
559 | seq_printf(seq, "1.0\n"); | ||
560 | break; | ||
561 | case 0x01: | ||
562 | seq_printf(seq, "1.5\n"); | ||
563 | break; | ||
564 | case 0x02: | ||
565 | seq_printf(seq, "2.0\n"); | ||
566 | break; | ||
567 | default: | ||
568 | seq_printf(seq, "Unknown version\n"); | ||
569 | } | ||
570 | |||
571 | seq_printf(seq, "IOP State : "); | ||
572 | switch (sb->iop_state) { | ||
573 | case 0x01: | ||
574 | seq_printf(seq, "INIT\n"); | ||
575 | break; | ||
576 | |||
577 | case 0x02: | ||
578 | seq_printf(seq, "RESET\n"); | ||
579 | break; | ||
580 | |||
581 | case 0x04: | ||
582 | seq_printf(seq, "HOLD\n"); | ||
583 | break; | ||
584 | |||
585 | case 0x05: | ||
586 | seq_printf(seq, "READY\n"); | ||
587 | break; | ||
588 | |||
589 | case 0x08: | ||
590 | seq_printf(seq, "OPERATIONAL\n"); | ||
591 | break; | ||
592 | |||
593 | case 0x10: | ||
594 | seq_printf(seq, "FAILED\n"); | ||
595 | break; | ||
596 | |||
597 | case 0x11: | ||
598 | seq_printf(seq, "FAULTED\n"); | ||
599 | break; | ||
600 | |||
601 | default: | ||
602 | seq_printf(seq, "Unknown\n"); | ||
603 | break; | ||
604 | } | ||
605 | |||
606 | seq_printf(seq, "Messenger Type : "); | ||
607 | switch (sb->msg_type) { | ||
608 | case 0x00: | ||
609 | seq_printf(seq, "Memory mapped\n"); | ||
610 | break; | ||
611 | case 0x01: | ||
612 | seq_printf(seq, "Memory mapped only\n"); | ||
613 | break; | ||
614 | case 0x02: | ||
615 | seq_printf(seq, "Remote only\n"); | ||
616 | break; | ||
617 | case 0x03: | ||
618 | seq_printf(seq, "Memory mapped and remote\n"); | ||
619 | break; | ||
620 | default: | ||
621 | seq_printf(seq, "Unknown\n"); | ||
622 | } | ||
623 | |||
624 | seq_printf(seq, "Inbound Frame Size : %d bytes\n", | ||
625 | sb->inbound_frame_size << 2); | ||
626 | seq_printf(seq, "Max Inbound Frames : %d\n", | ||
627 | sb->max_inbound_frames); | ||
628 | seq_printf(seq, "Current Inbound Frames : %d\n", | ||
629 | sb->cur_inbound_frames); | ||
630 | seq_printf(seq, "Max Outbound Frames : %d\n", | ||
631 | sb->max_outbound_frames); | ||
632 | |||
633 | /* Spec doesn't say if NULL terminated or not... */ | ||
634 | memcpy(prodstr, sb->product_id, 24); | ||
635 | prodstr[24] = '\0'; | ||
636 | seq_printf(seq, "Product ID : %s\n", prodstr); | ||
637 | seq_printf(seq, "Expected LCT Size : %d bytes\n", | ||
638 | sb->expected_lct_size); | ||
639 | |||
640 | seq_printf(seq, "IOP Capabilities\n"); | ||
641 | seq_printf(seq, " Context Field Size Support : "); | ||
642 | switch (sb->iop_capabilities & 0x0000003) { | ||
643 | case 0: | ||
644 | seq_printf(seq, "Supports only 32-bit context fields\n"); | ||
645 | break; | ||
646 | case 1: | ||
647 | seq_printf(seq, "Supports only 64-bit context fields\n"); | ||
648 | break; | ||
649 | case 2: | ||
650 | seq_printf(seq, "Supports 32-bit and 64-bit context fields, " | ||
651 | "but not concurrently\n"); | ||
652 | break; | ||
653 | case 3: | ||
654 | seq_printf(seq, "Supports 32-bit and 64-bit context fields " | ||
655 | "concurrently\n"); | ||
656 | break; | ||
657 | default: | ||
658 | seq_printf(seq, "0x%08x\n", sb->iop_capabilities); | ||
659 | } | ||
660 | seq_printf(seq, " Current Context Field Size : "); | ||
661 | switch (sb->iop_capabilities & 0x0000000C) { | ||
662 | case 0: | ||
663 | seq_printf(seq, "not configured\n"); | ||
664 | break; | ||
665 | case 4: | ||
666 | seq_printf(seq, "Supports only 32-bit context fields\n"); | ||
667 | break; | ||
668 | case 8: | ||
669 | seq_printf(seq, "Supports only 64-bit context fields\n"); | ||
670 | break; | ||
671 | case 12: | ||
672 | seq_printf(seq, "Supports both 32-bit or 64-bit context fields " | ||
673 | "concurrently\n"); | ||
674 | break; | ||
675 | default: | ||
676 | seq_printf(seq, "\n"); | ||
677 | } | ||
678 | seq_printf(seq, " Inbound Peer Support : %s\n", | ||
679 | (sb-> | ||
680 | iop_capabilities & 0x00000010) ? "Supported" : | ||
681 | "Not supported"); | ||
682 | seq_printf(seq, " Outbound Peer Support : %s\n", | ||
683 | (sb-> | ||
684 | iop_capabilities & 0x00000020) ? "Supported" : | ||
685 | "Not supported"); | ||
686 | seq_printf(seq, " Peer to Peer Support : %s\n", | ||
687 | (sb-> | ||
688 | iop_capabilities & 0x00000040) ? "Supported" : | ||
689 | "Not supported"); | ||
690 | |||
691 | seq_printf(seq, "Desired private memory size : %d kB\n", | ||
692 | sb->desired_mem_size >> 10); | ||
693 | seq_printf(seq, "Allocated private memory size : %d kB\n", | ||
694 | sb->current_mem_size >> 10); | ||
695 | seq_printf(seq, "Private memory base address : %0#10x\n", | ||
696 | sb->current_mem_base); | ||
697 | seq_printf(seq, "Desired private I/O size : %d kB\n", | ||
698 | sb->desired_io_size >> 10); | ||
699 | seq_printf(seq, "Allocated private I/O size : %d kB\n", | ||
700 | sb->current_io_size >> 10); | ||
701 | seq_printf(seq, "Private I/O base address : %0#10x\n", | ||
702 | sb->current_io_base); | ||
703 | |||
704 | return 0; | ||
705 | } | ||
706 | |||
707 | static int i2o_seq_show_hw(struct seq_file *seq, void *v) | ||
708 | { | ||
709 | struct i2o_controller *c = (struct i2o_controller *)seq->private; | ||
710 | static u32 work32[5]; | ||
711 | static u8 *work8 = (u8 *) work32; | ||
712 | static u16 *work16 = (u16 *) work32; | ||
713 | int token; | ||
714 | u32 hwcap; | ||
715 | |||
716 | static char *cpu_table[] = { | ||
717 | "Intel 80960 series", | ||
718 | "AMD2900 series", | ||
719 | "Motorola 68000 series", | ||
720 | "ARM series", | ||
721 | "MIPS series", | ||
722 | "Sparc series", | ||
723 | "PowerPC series", | ||
724 | "Intel x86 series" | ||
725 | }; | ||
726 | |||
727 | token = | ||
728 | i2o_parm_field_get(c->exec, 0x0000, -1, &work32, sizeof(work32)); | ||
729 | |||
730 | if (token < 0) { | ||
731 | i2o_report_query_status(seq, token, "0x0000 IOP Hardware"); | ||
732 | return 0; | ||
733 | } | ||
734 | |||
735 | seq_printf(seq, "I2O Vendor ID : %0#6x\n", work16[0]); | ||
736 | seq_printf(seq, "Product ID : %0#6x\n", work16[1]); | ||
737 | seq_printf(seq, "CPU : "); | ||
738 | if (work8[16] > 8) | ||
739 | seq_printf(seq, "Unknown\n"); | ||
740 | else | ||
741 | seq_printf(seq, "%s\n", cpu_table[work8[16]]); | ||
742 | /* Anyone using ProcessorVersion? */ | ||
743 | |||
744 | seq_printf(seq, "RAM : %dkB\n", work32[1] >> 10); | ||
745 | seq_printf(seq, "Non-Volatile Mem : %dkB\n", work32[2] >> 10); | ||
746 | |||
747 | hwcap = work32[3]; | ||
748 | seq_printf(seq, "Capabilities : 0x%08x\n", hwcap); | ||
749 | seq_printf(seq, " [%s] Self booting\n", | ||
750 | (hwcap & 0x00000001) ? "+" : "-"); | ||
751 | seq_printf(seq, " [%s] Upgradable IRTOS\n", | ||
752 | (hwcap & 0x00000002) ? "+" : "-"); | ||
753 | seq_printf(seq, " [%s] Supports downloading DDMs\n", | ||
754 | (hwcap & 0x00000004) ? "+" : "-"); | ||
755 | seq_printf(seq, " [%s] Supports installing DDMs\n", | ||
756 | (hwcap & 0x00000008) ? "+" : "-"); | ||
757 | seq_printf(seq, " [%s] Battery-backed RAM\n", | ||
758 | (hwcap & 0x00000010) ? "+" : "-"); | ||
759 | |||
760 | return 0; | ||
761 | } | ||
762 | |||
763 | /* Executive group 0003h - Executing DDM List (table) */ | ||
764 | static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v) | ||
765 | { | ||
766 | struct i2o_controller *c = (struct i2o_controller *)seq->private; | ||
767 | int token; | ||
768 | int i; | ||
769 | |||
770 | typedef struct _i2o_exec_execute_ddm_table { | ||
771 | u16 ddm_tid; | ||
772 | u8 module_type; | ||
773 | u8 reserved; | ||
774 | u16 i2o_vendor_id; | ||
775 | u16 module_id; | ||
776 | u8 module_name_version[28]; | ||
777 | u32 data_size; | ||
778 | u32 code_size; | ||
779 | } i2o_exec_execute_ddm_table; | ||
780 | |||
781 | struct { | ||
782 | u16 result_count; | ||
783 | u16 pad; | ||
784 | u16 block_size; | ||
785 | u8 block_status; | ||
786 | u8 error_info_size; | ||
787 | u16 row_count; | ||
788 | u16 more_flag; | ||
789 | i2o_exec_execute_ddm_table ddm_table[I2O_MAX_MODULES]; | ||
790 | } *result; | ||
791 | |||
792 | i2o_exec_execute_ddm_table ddm_table; | ||
793 | char tmp[28 + 1]; | ||
794 | |||
795 | result = kmalloc(sizeof(*result), GFP_KERNEL); | ||
796 | if (!result) | ||
797 | return -ENOMEM; | ||
798 | |||
799 | token = i2o_parm_table_get(c->exec, I2O_PARAMS_TABLE_GET, 0x0003, -1, | ||
800 | NULL, 0, result, sizeof(*result)); | ||
801 | |||
802 | if (token < 0) { | ||
803 | i2o_report_query_status(seq, token, | ||
804 | "0x0003 Executing DDM List"); | ||
805 | goto out; | ||
806 | } | ||
807 | |||
808 | seq_printf(seq, | ||
809 | "Tid Module_type Vendor Mod_id Module_name Vrs Data_size Code_size\n"); | ||
810 | ddm_table = result->ddm_table[0]; | ||
811 | |||
812 | for (i = 0; i < result->row_count; ddm_table = result->ddm_table[++i]) { | ||
813 | seq_printf(seq, "0x%03x ", ddm_table.ddm_tid & 0xFFF); | ||
814 | |||
815 | switch (ddm_table.module_type) { | ||
816 | case 0x01: | ||
817 | seq_printf(seq, "Downloaded DDM "); | ||
818 | break; | ||
819 | case 0x22: | ||
820 | seq_printf(seq, "Embedded DDM "); | ||
821 | break; | ||
822 | default: | ||
823 | seq_printf(seq, " "); | ||
824 | } | ||
825 | |||
826 | seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id); | ||
827 | seq_printf(seq, "%-#8x", ddm_table.module_id); | ||
828 | seq_printf(seq, "%-29s", | ||
829 | chtostr(tmp, ddm_table.module_name_version, 28)); | ||
830 | seq_printf(seq, "%9d ", ddm_table.data_size); | ||
831 | seq_printf(seq, "%8d", ddm_table.code_size); | ||
832 | |||
833 | seq_printf(seq, "\n"); | ||
834 | } | ||
835 | out: | ||
836 | kfree(result); | ||
837 | return 0; | ||
838 | } | ||
839 | |||
840 | /* Executive group 0004h - Driver Store (scalar) */ | ||
841 | static int i2o_seq_show_driver_store(struct seq_file *seq, void *v) | ||
842 | { | ||
843 | struct i2o_controller *c = (struct i2o_controller *)seq->private; | ||
844 | u32 work32[8]; | ||
845 | int token; | ||
846 | |||
847 | token = | ||
848 | i2o_parm_field_get(c->exec, 0x0004, -1, &work32, sizeof(work32)); | ||
849 | if (token < 0) { | ||
850 | i2o_report_query_status(seq, token, "0x0004 Driver Store"); | ||
851 | return 0; | ||
852 | } | ||
853 | |||
854 | seq_printf(seq, "Module limit : %d\n" | ||
855 | "Module count : %d\n" | ||
856 | "Current space : %d kB\n" | ||
857 | "Free space : %d kB\n", | ||
858 | work32[0], work32[1], work32[2] >> 10, work32[3] >> 10); | ||
859 | |||
860 | return 0; | ||
861 | } | ||
862 | |||
863 | /* Executive group 0005h - Driver Store Table (table) */ | ||
864 | static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v) | ||
865 | { | ||
866 | typedef struct _i2o_driver_store { | ||
867 | u16 stored_ddm_index; | ||
868 | u8 module_type; | ||
869 | u8 reserved; | ||
870 | u16 i2o_vendor_id; | ||
871 | u16 module_id; | ||
872 | u8 module_name_version[28]; | ||
873 | u8 date[8]; | ||
874 | u32 module_size; | ||
875 | u32 mpb_size; | ||
876 | u32 module_flags; | ||
877 | } i2o_driver_store_table; | ||
878 | |||
879 | struct i2o_controller *c = (struct i2o_controller *)seq->private; | ||
880 | int token; | ||
881 | int i; | ||
882 | |||
883 | typedef struct { | ||
884 | u16 result_count; | ||
885 | u16 pad; | ||
886 | u16 block_size; | ||
887 | u8 block_status; | ||
888 | u8 error_info_size; | ||
889 | u16 row_count; | ||
890 | u16 more_flag; | ||
891 | i2o_driver_store_table dst[I2O_MAX_MODULES]; | ||
892 | } i2o_driver_result_table; | ||
893 | |||
894 | i2o_driver_result_table *result; | ||
895 | i2o_driver_store_table *dst; | ||
896 | char tmp[28 + 1]; | ||
897 | |||
898 | result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL); | ||
899 | if (result == NULL) | ||
900 | return -ENOMEM; | ||
901 | |||
902 | token = i2o_parm_table_get(c->exec, I2O_PARAMS_TABLE_GET, 0x0005, -1, | ||
903 | NULL, 0, result, sizeof(*result)); | ||
904 | |||
905 | if (token < 0) { | ||
906 | i2o_report_query_status(seq, token, | ||
907 | "0x0005 DRIVER STORE TABLE"); | ||
908 | kfree(result); | ||
909 | return 0; | ||
910 | } | ||
911 | |||
912 | seq_printf(seq, | ||
913 | "# Module_type Vendor Mod_id Module_name Vrs" | ||
914 | "Date Mod_size Par_size Flags\n"); | ||
915 | for (i = 0, dst = &result->dst[0]; i < result->row_count; | ||
916 | dst = &result->dst[++i]) { | ||
917 | seq_printf(seq, "%-3d", dst->stored_ddm_index); | ||
918 | switch (dst->module_type) { | ||
919 | case 0x01: | ||
920 | seq_printf(seq, "Downloaded DDM "); | ||
921 | break; | ||
922 | case 0x22: | ||
923 | seq_printf(seq, "Embedded DDM "); | ||
924 | break; | ||
925 | default: | ||
926 | seq_printf(seq, " "); | ||
927 | } | ||
928 | |||
929 | seq_printf(seq, "%-#7x", dst->i2o_vendor_id); | ||
930 | seq_printf(seq, "%-#8x", dst->module_id); | ||
931 | seq_printf(seq, "%-29s", | ||
932 | chtostr(tmp, dst->module_name_version, 28)); | ||
933 | seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8)); | ||
934 | seq_printf(seq, "%8d ", dst->module_size); | ||
935 | seq_printf(seq, "%8d ", dst->mpb_size); | ||
936 | seq_printf(seq, "0x%04x", dst->module_flags); | ||
937 | seq_printf(seq, "\n"); | ||
938 | } | ||
939 | |||
940 | kfree(result); | ||
941 | return 0; | ||
942 | } | ||
943 | |||
944 | /* Generic group F000h - Params Descriptor (table) */ | ||
945 | static int i2o_seq_show_groups(struct seq_file *seq, void *v) | ||
946 | { | ||
947 | struct i2o_device *d = (struct i2o_device *)seq->private; | ||
948 | int token; | ||
949 | int i; | ||
950 | u8 properties; | ||
951 | |||
952 | typedef struct _i2o_group_info { | ||
953 | u16 group_number; | ||
954 | u16 field_count; | ||
955 | u16 row_count; | ||
956 | u8 properties; | ||
957 | u8 reserved; | ||
958 | } i2o_group_info; | ||
959 | |||
960 | struct { | ||
961 | u16 result_count; | ||
962 | u16 pad; | ||
963 | u16 block_size; | ||
964 | u8 block_status; | ||
965 | u8 error_info_size; | ||
966 | u16 row_count; | ||
967 | u16 more_flag; | ||
968 | i2o_group_info group[256]; | ||
969 | } *result; | ||
970 | |||
971 | result = kmalloc(sizeof(*result), GFP_KERNEL); | ||
972 | if (!result) | ||
973 | return -ENOMEM; | ||
974 | |||
975 | token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF000, -1, NULL, 0, | ||
976 | result, sizeof(*result)); | ||
977 | |||
978 | if (token < 0) { | ||
979 | i2o_report_query_status(seq, token, "0xF000 Params Descriptor"); | ||
980 | goto out; | ||
981 | } | ||
982 | |||
983 | seq_printf(seq, | ||
984 | "# Group FieldCount RowCount Type Add Del Clear\n"); | ||
985 | |||
986 | for (i = 0; i < result->row_count; i++) { | ||
987 | seq_printf(seq, "%-3d", i); | ||
988 | seq_printf(seq, "0x%04X ", result->group[i].group_number); | ||
989 | seq_printf(seq, "%10d ", result->group[i].field_count); | ||
990 | seq_printf(seq, "%8d ", result->group[i].row_count); | ||
991 | |||
992 | properties = result->group[i].properties; | ||
993 | if (properties & 0x1) | ||
994 | seq_printf(seq, "Table "); | ||
995 | else | ||
996 | seq_printf(seq, "Scalar "); | ||
997 | if (properties & 0x2) | ||
998 | seq_printf(seq, " + "); | ||
999 | else | ||
1000 | seq_printf(seq, " - "); | ||
1001 | if (properties & 0x4) | ||
1002 | seq_printf(seq, " + "); | ||
1003 | else | ||
1004 | seq_printf(seq, " - "); | ||
1005 | if (properties & 0x8) | ||
1006 | seq_printf(seq, " + "); | ||
1007 | else | ||
1008 | seq_printf(seq, " - "); | ||
1009 | |||
1010 | seq_printf(seq, "\n"); | ||
1011 | } | ||
1012 | |||
1013 | if (result->more_flag) | ||
1014 | seq_printf(seq, "There is more...\n"); | ||
1015 | out: | ||
1016 | kfree(result); | ||
1017 | return 0; | ||
1018 | } | ||
1019 | |||
1020 | /* Generic group F001h - Physical Device Table (table) */ | ||
1021 | static int i2o_seq_show_phys_device(struct seq_file *seq, void *v) | ||
1022 | { | ||
1023 | struct i2o_device *d = (struct i2o_device *)seq->private; | ||
1024 | int token; | ||
1025 | int i; | ||
1026 | |||
1027 | struct { | ||
1028 | u16 result_count; | ||
1029 | u16 pad; | ||
1030 | u16 block_size; | ||
1031 | u8 block_status; | ||
1032 | u8 error_info_size; | ||
1033 | u16 row_count; | ||
1034 | u16 more_flag; | ||
1035 | u32 adapter_id[64]; | ||
1036 | } result; | ||
1037 | |||
1038 | token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF001, -1, NULL, 0, | ||
1039 | &result, sizeof(result)); | ||
1040 | |||
1041 | if (token < 0) { | ||
1042 | i2o_report_query_status(seq, token, | ||
1043 | "0xF001 Physical Device Table"); | ||
1044 | return 0; | ||
1045 | } | ||
1046 | |||
1047 | if (result.row_count) | ||
1048 | seq_printf(seq, "# AdapterId\n"); | ||
1049 | |||
1050 | for (i = 0; i < result.row_count; i++) { | ||
1051 | seq_printf(seq, "%-2d", i); | ||
1052 | seq_printf(seq, "%#7x\n", result.adapter_id[i]); | ||
1053 | } | ||
1054 | |||
1055 | if (result.more_flag) | ||
1056 | seq_printf(seq, "There is more...\n"); | ||
1057 | |||
1058 | return 0; | ||
1059 | } | ||
1060 | |||
1061 | /* Generic group F002h - Claimed Table (table) */ | ||
1062 | static int i2o_seq_show_claimed(struct seq_file *seq, void *v) | ||
1063 | { | ||
1064 | struct i2o_device *d = (struct i2o_device *)seq->private; | ||
1065 | int token; | ||
1066 | int i; | ||
1067 | |||
1068 | struct { | ||
1069 | u16 result_count; | ||
1070 | u16 pad; | ||
1071 | u16 block_size; | ||
1072 | u8 block_status; | ||
1073 | u8 error_info_size; | ||
1074 | u16 row_count; | ||
1075 | u16 more_flag; | ||
1076 | u16 claimed_tid[64]; | ||
1077 | } result; | ||
1078 | |||
1079 | token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF002, -1, NULL, 0, | ||
1080 | &result, sizeof(result)); | ||
1081 | |||
1082 | if (token < 0) { | ||
1083 | i2o_report_query_status(seq, token, "0xF002 Claimed Table"); | ||
1084 | return 0; | ||
1085 | } | ||
1086 | |||
1087 | if (result.row_count) | ||
1088 | seq_printf(seq, "# ClaimedTid\n"); | ||
1089 | |||
1090 | for (i = 0; i < result.row_count; i++) { | ||
1091 | seq_printf(seq, "%-2d", i); | ||
1092 | seq_printf(seq, "%#7x\n", result.claimed_tid[i]); | ||
1093 | } | ||
1094 | |||
1095 | if (result.more_flag) | ||
1096 | seq_printf(seq, "There is more...\n"); | ||
1097 | |||
1098 | return 0; | ||
1099 | } | ||
1100 | |||
1101 | /* Generic group F003h - User Table (table) */ | ||
1102 | static int i2o_seq_show_users(struct seq_file *seq, void *v) | ||
1103 | { | ||
1104 | struct i2o_device *d = (struct i2o_device *)seq->private; | ||
1105 | int token; | ||
1106 | int i; | ||
1107 | |||
1108 | typedef struct _i2o_user_table { | ||
1109 | u16 instance; | ||
1110 | u16 user_tid; | ||
1111 | u8 claim_type; | ||
1112 | u8 reserved1; | ||
1113 | u16 reserved2; | ||
1114 | } i2o_user_table; | ||
1115 | |||
1116 | struct { | ||
1117 | u16 result_count; | ||
1118 | u16 pad; | ||
1119 | u16 block_size; | ||
1120 | u8 block_status; | ||
1121 | u8 error_info_size; | ||
1122 | u16 row_count; | ||
1123 | u16 more_flag; | ||
1124 | i2o_user_table user[64]; | ||
1125 | } *result; | ||
1126 | |||
1127 | result = kmalloc(sizeof(*result), GFP_KERNEL); | ||
1128 | if (!result) | ||
1129 | return -ENOMEM; | ||
1130 | |||
1131 | token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF003, -1, NULL, 0, | ||
1132 | result, sizeof(*result)); | ||
1133 | |||
1134 | if (token < 0) { | ||
1135 | i2o_report_query_status(seq, token, "0xF003 User Table"); | ||
1136 | goto out; | ||
1137 | } | ||
1138 | |||
1139 | seq_printf(seq, "# Instance UserTid ClaimType\n"); | ||
1140 | |||
1141 | for (i = 0; i < result->row_count; i++) { | ||
1142 | seq_printf(seq, "%-3d", i); | ||
1143 | seq_printf(seq, "%#8x ", result->user[i].instance); | ||
1144 | seq_printf(seq, "%#7x ", result->user[i].user_tid); | ||
1145 | seq_printf(seq, "%#9x\n", result->user[i].claim_type); | ||
1146 | } | ||
1147 | |||
1148 | if (result->more_flag) | ||
1149 | seq_printf(seq, "There is more...\n"); | ||
1150 | out: | ||
1151 | kfree(result); | ||
1152 | return 0; | ||
1153 | } | ||
1154 | |||
1155 | /* Generic group F005h - Private message extensions (table) (optional) */ | ||
1156 | static int i2o_seq_show_priv_msgs(struct seq_file *seq, void *v) | ||
1157 | { | ||
1158 | struct i2o_device *d = (struct i2o_device *)seq->private; | ||
1159 | int token; | ||
1160 | int i; | ||
1161 | |||
1162 | typedef struct _i2o_private { | ||
1163 | u16 ext_instance; | ||
1164 | u16 organization_id; | ||
1165 | u16 x_function_code; | ||
1166 | } i2o_private; | ||
1167 | |||
1168 | struct { | ||
1169 | u16 result_count; | ||
1170 | u16 pad; | ||
1171 | u16 block_size; | ||
1172 | u8 block_status; | ||
1173 | u8 error_info_size; | ||
1174 | u16 row_count; | ||
1175 | u16 more_flag; | ||
1176 | i2o_private extension[64]; | ||
1177 | } result; | ||
1178 | |||
1179 | token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF000, -1, NULL, 0, | ||
1180 | &result, sizeof(result)); | ||
1181 | |||
1182 | if (token < 0) { | ||
1183 | i2o_report_query_status(seq, token, | ||
1184 | "0xF005 Private Message Extensions (optional)"); | ||
1185 | return 0; | ||
1186 | } | ||
1187 | |||
1188 | seq_printf(seq, "Instance# OrgId FunctionCode\n"); | ||
1189 | |||
1190 | for (i = 0; i < result.row_count; i++) { | ||
1191 | seq_printf(seq, "%0#9x ", result.extension[i].ext_instance); | ||
1192 | seq_printf(seq, "%0#6x ", result.extension[i].organization_id); | ||
1193 | seq_printf(seq, "%0#6x", result.extension[i].x_function_code); | ||
1194 | |||
1195 | seq_printf(seq, "\n"); | ||
1196 | } | ||
1197 | |||
1198 | if (result.more_flag) | ||
1199 | seq_printf(seq, "There is more...\n"); | ||
1200 | |||
1201 | return 0; | ||
1202 | } | ||
1203 | |||
1204 | /* Generic group F006h - Authorized User Table (table) */ | ||
1205 | static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v) | ||
1206 | { | ||
1207 | struct i2o_device *d = (struct i2o_device *)seq->private; | ||
1208 | int token; | ||
1209 | int i; | ||
1210 | |||
1211 | struct { | ||
1212 | u16 result_count; | ||
1213 | u16 pad; | ||
1214 | u16 block_size; | ||
1215 | u8 block_status; | ||
1216 | u8 error_info_size; | ||
1217 | u16 row_count; | ||
1218 | u16 more_flag; | ||
1219 | u32 alternate_tid[64]; | ||
1220 | } result; | ||
1221 | |||
1222 | token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF006, -1, NULL, 0, | ||
1223 | &result, sizeof(result)); | ||
1224 | |||
1225 | if (token < 0) { | ||
1226 | i2o_report_query_status(seq, token, | ||
1227 | "0xF006 Autohorized User Table"); | ||
1228 | return 0; | ||
1229 | } | ||
1230 | |||
1231 | if (result.row_count) | ||
1232 | seq_printf(seq, "# AlternateTid\n"); | ||
1233 | |||
1234 | for (i = 0; i < result.row_count; i++) { | ||
1235 | seq_printf(seq, "%-2d", i); | ||
1236 | seq_printf(seq, "%#7x ", result.alternate_tid[i]); | ||
1237 | } | ||
1238 | |||
1239 | if (result.more_flag) | ||
1240 | seq_printf(seq, "There is more...\n"); | ||
1241 | |||
1242 | return 0; | ||
1243 | } | ||
1244 | |||
1245 | /* Generic group F100h - Device Identity (scalar) */ | ||
1246 | static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v) | ||
1247 | { | ||
1248 | struct i2o_device *d = (struct i2o_device *)seq->private; | ||
1249 | static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number | ||
1250 | // == (allow) 512d bytes (max) | ||
1251 | static u16 *work16 = (u16 *) work32; | ||
1252 | int token; | ||
1253 | char tmp[16 + 1]; | ||
1254 | |||
1255 | token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32)); | ||
1256 | |||
1257 | if (token < 0) { | ||
1258 | i2o_report_query_status(seq, token, "0xF100 Device Identity"); | ||
1259 | return 0; | ||
1260 | } | ||
1261 | |||
1262 | seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0])); | ||
1263 | seq_printf(seq, "Owner TID : %0#5x\n", work16[2]); | ||
1264 | seq_printf(seq, "Parent TID : %0#5x\n", work16[3]); | ||
1265 | seq_printf(seq, "Vendor info : %s\n", | ||
1266 | chtostr(tmp, (u8 *) (work32 + 2), 16)); | ||
1267 | seq_printf(seq, "Product info : %s\n", | ||
1268 | chtostr(tmp, (u8 *) (work32 + 6), 16)); | ||
1269 | seq_printf(seq, "Description : %s\n", | ||
1270 | chtostr(tmp, (u8 *) (work32 + 10), 16)); | ||
1271 | seq_printf(seq, "Product rev. : %s\n", | ||
1272 | chtostr(tmp, (u8 *) (work32 + 14), 8)); | ||
1273 | |||
1274 | seq_printf(seq, "Serial number : "); | ||
1275 | print_serial_number(seq, (u8 *) (work32 + 16), | ||
1276 | /* allow for SNLen plus | ||
1277 | * possible trailing '\0' | ||
1278 | */ | ||
1279 | sizeof(work32) - (16 * sizeof(u32)) - 2); | ||
1280 | seq_printf(seq, "\n"); | ||
1281 | |||
1282 | return 0; | ||
1283 | } | ||
1284 | |||
1285 | static int i2o_seq_show_dev_name(struct seq_file *seq, void *v) | ||
1286 | { | ||
1287 | struct i2o_device *d = (struct i2o_device *)seq->private; | ||
1288 | |||
1289 | seq_printf(seq, "%s\n", dev_name(&d->device)); | ||
1290 | |||
1291 | return 0; | ||
1292 | } | ||
1293 | |||
1294 | /* Generic group F101h - DDM Identity (scalar) */ | ||
1295 | static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v) | ||
1296 | { | ||
1297 | struct i2o_device *d = (struct i2o_device *)seq->private; | ||
1298 | int token; | ||
1299 | |||
1300 | struct { | ||
1301 | u16 ddm_tid; | ||
1302 | u8 module_name[24]; | ||
1303 | u8 module_rev[8]; | ||
1304 | u8 sn_format; | ||
1305 | u8 serial_number[12]; | ||
1306 | u8 pad[256]; // allow up to 256 byte (max) serial number | ||
1307 | } result; | ||
1308 | |||
1309 | char tmp[24 + 1]; | ||
1310 | |||
1311 | token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result)); | ||
1312 | |||
1313 | if (token < 0) { | ||
1314 | i2o_report_query_status(seq, token, "0xF101 DDM Identity"); | ||
1315 | return 0; | ||
1316 | } | ||
1317 | |||
1318 | seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid); | ||
1319 | seq_printf(seq, "Module name : %s\n", | ||
1320 | chtostr(tmp, result.module_name, 24)); | ||
1321 | seq_printf(seq, "Module revision : %s\n", | ||
1322 | chtostr(tmp, result.module_rev, 8)); | ||
1323 | |||
1324 | seq_printf(seq, "Serial number : "); | ||
1325 | print_serial_number(seq, result.serial_number, sizeof(result) - 36); | ||
1326 | /* allow for SNLen plus possible trailing '\0' */ | ||
1327 | |||
1328 | seq_printf(seq, "\n"); | ||
1329 | |||
1330 | return 0; | ||
1331 | } | ||
1332 | |||
1333 | /* Generic group F102h - User Information (scalar) */ | ||
1334 | static int i2o_seq_show_uinfo(struct seq_file *seq, void *v) | ||
1335 | { | ||
1336 | struct i2o_device *d = (struct i2o_device *)seq->private; | ||
1337 | int token; | ||
1338 | |||
1339 | struct { | ||
1340 | u8 device_name[64]; | ||
1341 | u8 service_name[64]; | ||
1342 | u8 physical_location[64]; | ||
1343 | u8 instance_number[4]; | ||
1344 | } result; | ||
1345 | |||
1346 | char tmp[64 + 1]; | ||
1347 | |||
1348 | token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result)); | ||
1349 | |||
1350 | if (token < 0) { | ||
1351 | i2o_report_query_status(seq, token, "0xF102 User Information"); | ||
1352 | return 0; | ||
1353 | } | ||
1354 | |||
1355 | seq_printf(seq, "Device name : %s\n", | ||
1356 | chtostr(tmp, result.device_name, 64)); | ||
1357 | seq_printf(seq, "Service name : %s\n", | ||
1358 | chtostr(tmp, result.service_name, 64)); | ||
1359 | seq_printf(seq, "Physical name : %s\n", | ||
1360 | chtostr(tmp, result.physical_location, 64)); | ||
1361 | seq_printf(seq, "Instance number : %s\n", | ||
1362 | chtostr(tmp, result.instance_number, 4)); | ||
1363 | |||
1364 | return 0; | ||
1365 | } | ||
1366 | |||
1367 | /* Generic group F103h - SGL Operating Limits (scalar) */ | ||
1368 | static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v) | ||
1369 | { | ||
1370 | struct i2o_device *d = (struct i2o_device *)seq->private; | ||
1371 | static u32 work32[12]; | ||
1372 | static u16 *work16 = (u16 *) work32; | ||
1373 | static u8 *work8 = (u8 *) work32; | ||
1374 | int token; | ||
1375 | |||
1376 | token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32)); | ||
1377 | |||
1378 | if (token < 0) { | ||
1379 | i2o_report_query_status(seq, token, | ||
1380 | "0xF103 SGL Operating Limits"); | ||
1381 | return 0; | ||
1382 | } | ||
1383 | |||
1384 | seq_printf(seq, "SGL chain size : %d\n", work32[0]); | ||
1385 | seq_printf(seq, "Max SGL chain size : %d\n", work32[1]); | ||
1386 | seq_printf(seq, "SGL chain size target : %d\n", work32[2]); | ||
1387 | seq_printf(seq, "SGL frag count : %d\n", work16[6]); | ||
1388 | seq_printf(seq, "Max SGL frag count : %d\n", work16[7]); | ||
1389 | seq_printf(seq, "SGL frag count target : %d\n", work16[8]); | ||
1390 | |||
1391 | /* FIXME | ||
1392 | if (d->i2oversion == 0x02) | ||
1393 | { | ||
1394 | */ | ||
1395 | seq_printf(seq, "SGL data alignment : %d\n", work16[8]); | ||
1396 | seq_printf(seq, "SGL addr limit : %d\n", work8[20]); | ||
1397 | seq_printf(seq, "SGL addr sizes supported : "); | ||
1398 | if (work8[21] & 0x01) | ||
1399 | seq_printf(seq, "32 bit "); | ||
1400 | if (work8[21] & 0x02) | ||
1401 | seq_printf(seq, "64 bit "); | ||
1402 | if (work8[21] & 0x04) | ||
1403 | seq_printf(seq, "96 bit "); | ||
1404 | if (work8[21] & 0x08) | ||
1405 | seq_printf(seq, "128 bit "); | ||
1406 | seq_printf(seq, "\n"); | ||
1407 | /* | ||
1408 | } | ||
1409 | */ | ||
1410 | |||
1411 | return 0; | ||
1412 | } | ||
1413 | |||
1414 | /* Generic group F200h - Sensors (scalar) */ | ||
1415 | static int i2o_seq_show_sensors(struct seq_file *seq, void *v) | ||
1416 | { | ||
1417 | struct i2o_device *d = (struct i2o_device *)seq->private; | ||
1418 | int token; | ||
1419 | |||
1420 | struct { | ||
1421 | u16 sensor_instance; | ||
1422 | u8 component; | ||
1423 | u16 component_instance; | ||
1424 | u8 sensor_class; | ||
1425 | u8 sensor_type; | ||
1426 | u8 scaling_exponent; | ||
1427 | u32 actual_reading; | ||
1428 | u32 minimum_reading; | ||
1429 | u32 low2lowcat_treshold; | ||
1430 | u32 lowcat2low_treshold; | ||
1431 | u32 lowwarn2low_treshold; | ||
1432 | u32 low2lowwarn_treshold; | ||
1433 | u32 norm2lowwarn_treshold; | ||
1434 | u32 lowwarn2norm_treshold; | ||
1435 | u32 nominal_reading; | ||
1436 | u32 hiwarn2norm_treshold; | ||
1437 | u32 norm2hiwarn_treshold; | ||
1438 | u32 high2hiwarn_treshold; | ||
1439 | u32 hiwarn2high_treshold; | ||
1440 | u32 hicat2high_treshold; | ||
1441 | u32 hi2hicat_treshold; | ||
1442 | u32 maximum_reading; | ||
1443 | u8 sensor_state; | ||
1444 | u16 event_enable; | ||
1445 | } result; | ||
1446 | |||
1447 | token = i2o_parm_field_get(d, 0xF200, -1, &result, sizeof(result)); | ||
1448 | |||
1449 | if (token < 0) { | ||
1450 | i2o_report_query_status(seq, token, | ||
1451 | "0xF200 Sensors (optional)"); | ||
1452 | return 0; | ||
1453 | } | ||
1454 | |||
1455 | seq_printf(seq, "Sensor instance : %d\n", result.sensor_instance); | ||
1456 | |||
1457 | seq_printf(seq, "Component : %d = ", result.component); | ||
1458 | switch (result.component) { | ||
1459 | case 0: | ||
1460 | seq_printf(seq, "Other"); | ||
1461 | break; | ||
1462 | case 1: | ||
1463 | seq_printf(seq, "Planar logic Board"); | ||
1464 | break; | ||
1465 | case 2: | ||
1466 | seq_printf(seq, "CPU"); | ||
1467 | break; | ||
1468 | case 3: | ||
1469 | seq_printf(seq, "Chassis"); | ||
1470 | break; | ||
1471 | case 4: | ||
1472 | seq_printf(seq, "Power Supply"); | ||
1473 | break; | ||
1474 | case 5: | ||
1475 | seq_printf(seq, "Storage"); | ||
1476 | break; | ||
1477 | case 6: | ||
1478 | seq_printf(seq, "External"); | ||
1479 | break; | ||
1480 | } | ||
1481 | seq_printf(seq, "\n"); | ||
1482 | |||
1483 | seq_printf(seq, "Component instance : %d\n", | ||
1484 | result.component_instance); | ||
1485 | seq_printf(seq, "Sensor class : %s\n", | ||
1486 | result.sensor_class ? "Analog" : "Digital"); | ||
1487 | |||
1488 | seq_printf(seq, "Sensor type : %d = ", result.sensor_type); | ||
1489 | switch (result.sensor_type) { | ||
1490 | case 0: | ||
1491 | seq_printf(seq, "Other\n"); | ||
1492 | break; | ||
1493 | case 1: | ||
1494 | seq_printf(seq, "Thermal\n"); | ||
1495 | break; | ||
1496 | case 2: | ||
1497 | seq_printf(seq, "DC voltage (DC volts)\n"); | ||
1498 | break; | ||
1499 | case 3: | ||
1500 | seq_printf(seq, "AC voltage (AC volts)\n"); | ||
1501 | break; | ||
1502 | case 4: | ||
1503 | seq_printf(seq, "DC current (DC amps)\n"); | ||
1504 | break; | ||
1505 | case 5: | ||
1506 | seq_printf(seq, "AC current (AC volts)\n"); | ||
1507 | break; | ||
1508 | case 6: | ||
1509 | seq_printf(seq, "Door open\n"); | ||
1510 | break; | ||
1511 | case 7: | ||
1512 | seq_printf(seq, "Fan operational\n"); | ||
1513 | break; | ||
1514 | } | ||
1515 | |||
1516 | seq_printf(seq, "Scaling exponent : %d\n", | ||
1517 | result.scaling_exponent); | ||
1518 | seq_printf(seq, "Actual reading : %d\n", result.actual_reading); | ||
1519 | seq_printf(seq, "Minimum reading : %d\n", result.minimum_reading); | ||
1520 | seq_printf(seq, "Low2LowCat treshold : %d\n", | ||
1521 | result.low2lowcat_treshold); | ||
1522 | seq_printf(seq, "LowCat2Low treshold : %d\n", | ||
1523 | result.lowcat2low_treshold); | ||
1524 | seq_printf(seq, "LowWarn2Low treshold : %d\n", | ||
1525 | result.lowwarn2low_treshold); | ||
1526 | seq_printf(seq, "Low2LowWarn treshold : %d\n", | ||
1527 | result.low2lowwarn_treshold); | ||
1528 | seq_printf(seq, "Norm2LowWarn treshold : %d\n", | ||
1529 | result.norm2lowwarn_treshold); | ||
1530 | seq_printf(seq, "LowWarn2Norm treshold : %d\n", | ||
1531 | result.lowwarn2norm_treshold); | ||
1532 | seq_printf(seq, "Nominal reading : %d\n", result.nominal_reading); | ||
1533 | seq_printf(seq, "HiWarn2Norm treshold : %d\n", | ||
1534 | result.hiwarn2norm_treshold); | ||
1535 | seq_printf(seq, "Norm2HiWarn treshold : %d\n", | ||
1536 | result.norm2hiwarn_treshold); | ||
1537 | seq_printf(seq, "High2HiWarn treshold : %d\n", | ||
1538 | result.high2hiwarn_treshold); | ||
1539 | seq_printf(seq, "HiWarn2High treshold : %d\n", | ||
1540 | result.hiwarn2high_treshold); | ||
1541 | seq_printf(seq, "HiCat2High treshold : %d\n", | ||
1542 | result.hicat2high_treshold); | ||
1543 | seq_printf(seq, "High2HiCat treshold : %d\n", | ||
1544 | result.hi2hicat_treshold); | ||
1545 | seq_printf(seq, "Maximum reading : %d\n", result.maximum_reading); | ||
1546 | |||
1547 | seq_printf(seq, "Sensor state : %d = ", result.sensor_state); | ||
1548 | switch (result.sensor_state) { | ||
1549 | case 0: | ||
1550 | seq_printf(seq, "Normal\n"); | ||
1551 | break; | ||
1552 | case 1: | ||
1553 | seq_printf(seq, "Abnormal\n"); | ||
1554 | break; | ||
1555 | case 2: | ||
1556 | seq_printf(seq, "Unknown\n"); | ||
1557 | break; | ||
1558 | case 3: | ||
1559 | seq_printf(seq, "Low Catastrophic (LoCat)\n"); | ||
1560 | break; | ||
1561 | case 4: | ||
1562 | seq_printf(seq, "Low (Low)\n"); | ||
1563 | break; | ||
1564 | case 5: | ||
1565 | seq_printf(seq, "Low Warning (LoWarn)\n"); | ||
1566 | break; | ||
1567 | case 6: | ||
1568 | seq_printf(seq, "High Warning (HiWarn)\n"); | ||
1569 | break; | ||
1570 | case 7: | ||
1571 | seq_printf(seq, "High (High)\n"); | ||
1572 | break; | ||
1573 | case 8: | ||
1574 | seq_printf(seq, "High Catastrophic (HiCat)\n"); | ||
1575 | break; | ||
1576 | } | ||
1577 | |||
1578 | seq_printf(seq, "Event_enable : 0x%02X\n", result.event_enable); | ||
1579 | seq_printf(seq, " [%s] Operational state change. \n", | ||
1580 | (result.event_enable & 0x01) ? "+" : "-"); | ||
1581 | seq_printf(seq, " [%s] Low catastrophic. \n", | ||
1582 | (result.event_enable & 0x02) ? "+" : "-"); | ||
1583 | seq_printf(seq, " [%s] Low reading. \n", | ||
1584 | (result.event_enable & 0x04) ? "+" : "-"); | ||
1585 | seq_printf(seq, " [%s] Low warning. \n", | ||
1586 | (result.event_enable & 0x08) ? "+" : "-"); | ||
1587 | seq_printf(seq, | ||
1588 | " [%s] Change back to normal from out of range state. \n", | ||
1589 | (result.event_enable & 0x10) ? "+" : "-"); | ||
1590 | seq_printf(seq, " [%s] High warning. \n", | ||
1591 | (result.event_enable & 0x20) ? "+" : "-"); | ||
1592 | seq_printf(seq, " [%s] High reading. \n", | ||
1593 | (result.event_enable & 0x40) ? "+" : "-"); | ||
1594 | seq_printf(seq, " [%s] High catastrophic. \n", | ||
1595 | (result.event_enable & 0x80) ? "+" : "-"); | ||
1596 | |||
1597 | return 0; | ||
1598 | } | ||
1599 | |||
1600 | static int i2o_seq_open_hrt(struct inode *inode, struct file *file) | ||
1601 | { | ||
1602 | return single_open(file, i2o_seq_show_hrt, PDE_DATA(inode)); | ||
1603 | }; | ||
1604 | |||
1605 | static int i2o_seq_open_lct(struct inode *inode, struct file *file) | ||
1606 | { | ||
1607 | return single_open(file, i2o_seq_show_lct, PDE_DATA(inode)); | ||
1608 | }; | ||
1609 | |||
1610 | static int i2o_seq_open_status(struct inode *inode, struct file *file) | ||
1611 | { | ||
1612 | return single_open(file, i2o_seq_show_status, PDE_DATA(inode)); | ||
1613 | }; | ||
1614 | |||
1615 | static int i2o_seq_open_hw(struct inode *inode, struct file *file) | ||
1616 | { | ||
1617 | return single_open(file, i2o_seq_show_hw, PDE_DATA(inode)); | ||
1618 | }; | ||
1619 | |||
1620 | static int i2o_seq_open_ddm_table(struct inode *inode, struct file *file) | ||
1621 | { | ||
1622 | return single_open(file, i2o_seq_show_ddm_table, PDE_DATA(inode)); | ||
1623 | }; | ||
1624 | |||
1625 | static int i2o_seq_open_driver_store(struct inode *inode, struct file *file) | ||
1626 | { | ||
1627 | return single_open(file, i2o_seq_show_driver_store, PDE_DATA(inode)); | ||
1628 | }; | ||
1629 | |||
1630 | static int i2o_seq_open_drivers_stored(struct inode *inode, struct file *file) | ||
1631 | { | ||
1632 | return single_open(file, i2o_seq_show_drivers_stored, PDE_DATA(inode)); | ||
1633 | }; | ||
1634 | |||
1635 | static int i2o_seq_open_groups(struct inode *inode, struct file *file) | ||
1636 | { | ||
1637 | return single_open(file, i2o_seq_show_groups, PDE_DATA(inode)); | ||
1638 | }; | ||
1639 | |||
1640 | static int i2o_seq_open_phys_device(struct inode *inode, struct file *file) | ||
1641 | { | ||
1642 | return single_open(file, i2o_seq_show_phys_device, PDE_DATA(inode)); | ||
1643 | }; | ||
1644 | |||
1645 | static int i2o_seq_open_claimed(struct inode *inode, struct file *file) | ||
1646 | { | ||
1647 | return single_open(file, i2o_seq_show_claimed, PDE_DATA(inode)); | ||
1648 | }; | ||
1649 | |||
1650 | static int i2o_seq_open_users(struct inode *inode, struct file *file) | ||
1651 | { | ||
1652 | return single_open(file, i2o_seq_show_users, PDE_DATA(inode)); | ||
1653 | }; | ||
1654 | |||
1655 | static int i2o_seq_open_priv_msgs(struct inode *inode, struct file *file) | ||
1656 | { | ||
1657 | return single_open(file, i2o_seq_show_priv_msgs, PDE_DATA(inode)); | ||
1658 | }; | ||
1659 | |||
1660 | static int i2o_seq_open_authorized_users(struct inode *inode, struct file *file) | ||
1661 | { | ||
1662 | return single_open(file, i2o_seq_show_authorized_users, | ||
1663 | PDE_DATA(inode)); | ||
1664 | }; | ||
1665 | |||
1666 | static int i2o_seq_open_dev_identity(struct inode *inode, struct file *file) | ||
1667 | { | ||
1668 | return single_open(file, i2o_seq_show_dev_identity, PDE_DATA(inode)); | ||
1669 | }; | ||
1670 | |||
1671 | static int i2o_seq_open_ddm_identity(struct inode *inode, struct file *file) | ||
1672 | { | ||
1673 | return single_open(file, i2o_seq_show_ddm_identity, PDE_DATA(inode)); | ||
1674 | }; | ||
1675 | |||
1676 | static int i2o_seq_open_uinfo(struct inode *inode, struct file *file) | ||
1677 | { | ||
1678 | return single_open(file, i2o_seq_show_uinfo, PDE_DATA(inode)); | ||
1679 | }; | ||
1680 | |||
1681 | static int i2o_seq_open_sgl_limits(struct inode *inode, struct file *file) | ||
1682 | { | ||
1683 | return single_open(file, i2o_seq_show_sgl_limits, PDE_DATA(inode)); | ||
1684 | }; | ||
1685 | |||
1686 | static int i2o_seq_open_sensors(struct inode *inode, struct file *file) | ||
1687 | { | ||
1688 | return single_open(file, i2o_seq_show_sensors, PDE_DATA(inode)); | ||
1689 | }; | ||
1690 | |||
1691 | static int i2o_seq_open_dev_name(struct inode *inode, struct file *file) | ||
1692 | { | ||
1693 | return single_open(file, i2o_seq_show_dev_name, PDE_DATA(inode)); | ||
1694 | }; | ||
1695 | |||
1696 | static const struct file_operations i2o_seq_fops_lct = { | ||
1697 | .open = i2o_seq_open_lct, | ||
1698 | .read = seq_read, | ||
1699 | .llseek = seq_lseek, | ||
1700 | .release = single_release, | ||
1701 | }; | ||
1702 | |||
1703 | static const struct file_operations i2o_seq_fops_hrt = { | ||
1704 | .open = i2o_seq_open_hrt, | ||
1705 | .read = seq_read, | ||
1706 | .llseek = seq_lseek, | ||
1707 | .release = single_release, | ||
1708 | }; | ||
1709 | |||
1710 | static const struct file_operations i2o_seq_fops_status = { | ||
1711 | .open = i2o_seq_open_status, | ||
1712 | .read = seq_read, | ||
1713 | .llseek = seq_lseek, | ||
1714 | .release = single_release, | ||
1715 | }; | ||
1716 | |||
1717 | static const struct file_operations i2o_seq_fops_hw = { | ||
1718 | .open = i2o_seq_open_hw, | ||
1719 | .read = seq_read, | ||
1720 | .llseek = seq_lseek, | ||
1721 | .release = single_release, | ||
1722 | }; | ||
1723 | |||
1724 | static const struct file_operations i2o_seq_fops_ddm_table = { | ||
1725 | .open = i2o_seq_open_ddm_table, | ||
1726 | .read = seq_read, | ||
1727 | .llseek = seq_lseek, | ||
1728 | .release = single_release, | ||
1729 | }; | ||
1730 | |||
1731 | static const struct file_operations i2o_seq_fops_driver_store = { | ||
1732 | .open = i2o_seq_open_driver_store, | ||
1733 | .read = seq_read, | ||
1734 | .llseek = seq_lseek, | ||
1735 | .release = single_release, | ||
1736 | }; | ||
1737 | |||
1738 | static const struct file_operations i2o_seq_fops_drivers_stored = { | ||
1739 | .open = i2o_seq_open_drivers_stored, | ||
1740 | .read = seq_read, | ||
1741 | .llseek = seq_lseek, | ||
1742 | .release = single_release, | ||
1743 | }; | ||
1744 | |||
1745 | static const struct file_operations i2o_seq_fops_groups = { | ||
1746 | .open = i2o_seq_open_groups, | ||
1747 | .read = seq_read, | ||
1748 | .llseek = seq_lseek, | ||
1749 | .release = single_release, | ||
1750 | }; | ||
1751 | |||
1752 | static const struct file_operations i2o_seq_fops_phys_device = { | ||
1753 | .open = i2o_seq_open_phys_device, | ||
1754 | .read = seq_read, | ||
1755 | .llseek = seq_lseek, | ||
1756 | .release = single_release, | ||
1757 | }; | ||
1758 | |||
1759 | static const struct file_operations i2o_seq_fops_claimed = { | ||
1760 | .open = i2o_seq_open_claimed, | ||
1761 | .read = seq_read, | ||
1762 | .llseek = seq_lseek, | ||
1763 | .release = single_release, | ||
1764 | }; | ||
1765 | |||
1766 | static const struct file_operations i2o_seq_fops_users = { | ||
1767 | .open = i2o_seq_open_users, | ||
1768 | .read = seq_read, | ||
1769 | .llseek = seq_lseek, | ||
1770 | .release = single_release, | ||
1771 | }; | ||
1772 | |||
1773 | static const struct file_operations i2o_seq_fops_priv_msgs = { | ||
1774 | .open = i2o_seq_open_priv_msgs, | ||
1775 | .read = seq_read, | ||
1776 | .llseek = seq_lseek, | ||
1777 | .release = single_release, | ||
1778 | }; | ||
1779 | |||
1780 | static const struct file_operations i2o_seq_fops_authorized_users = { | ||
1781 | .open = i2o_seq_open_authorized_users, | ||
1782 | .read = seq_read, | ||
1783 | .llseek = seq_lseek, | ||
1784 | .release = single_release, | ||
1785 | }; | ||
1786 | |||
1787 | static const struct file_operations i2o_seq_fops_dev_name = { | ||
1788 | .open = i2o_seq_open_dev_name, | ||
1789 | .read = seq_read, | ||
1790 | .llseek = seq_lseek, | ||
1791 | .release = single_release, | ||
1792 | }; | ||
1793 | |||
1794 | static const struct file_operations i2o_seq_fops_dev_identity = { | ||
1795 | .open = i2o_seq_open_dev_identity, | ||
1796 | .read = seq_read, | ||
1797 | .llseek = seq_lseek, | ||
1798 | .release = single_release, | ||
1799 | }; | ||
1800 | |||
1801 | static const struct file_operations i2o_seq_fops_ddm_identity = { | ||
1802 | .open = i2o_seq_open_ddm_identity, | ||
1803 | .read = seq_read, | ||
1804 | .llseek = seq_lseek, | ||
1805 | .release = single_release, | ||
1806 | }; | ||
1807 | |||
1808 | static const struct file_operations i2o_seq_fops_uinfo = { | ||
1809 | .open = i2o_seq_open_uinfo, | ||
1810 | .read = seq_read, | ||
1811 | .llseek = seq_lseek, | ||
1812 | .release = single_release, | ||
1813 | }; | ||
1814 | |||
1815 | static const struct file_operations i2o_seq_fops_sgl_limits = { | ||
1816 | .open = i2o_seq_open_sgl_limits, | ||
1817 | .read = seq_read, | ||
1818 | .llseek = seq_lseek, | ||
1819 | .release = single_release, | ||
1820 | }; | ||
1821 | |||
1822 | static const struct file_operations i2o_seq_fops_sensors = { | ||
1823 | .open = i2o_seq_open_sensors, | ||
1824 | .read = seq_read, | ||
1825 | .llseek = seq_lseek, | ||
1826 | .release = single_release, | ||
1827 | }; | ||
1828 | |||
1829 | /* | ||
1830 | * IOP specific entries...write field just in case someone | ||
1831 | * ever wants one. | ||
1832 | */ | ||
1833 | static i2o_proc_entry i2o_proc_generic_iop_entries[] = { | ||
1834 | {"hrt", S_IFREG | S_IRUGO, &i2o_seq_fops_hrt}, | ||
1835 | {"lct", S_IFREG | S_IRUGO, &i2o_seq_fops_lct}, | ||
1836 | {"status", S_IFREG | S_IRUGO, &i2o_seq_fops_status}, | ||
1837 | {"hw", S_IFREG | S_IRUGO, &i2o_seq_fops_hw}, | ||
1838 | {"ddm_table", S_IFREG | S_IRUGO, &i2o_seq_fops_ddm_table}, | ||
1839 | {"driver_store", S_IFREG | S_IRUGO, &i2o_seq_fops_driver_store}, | ||
1840 | {"drivers_stored", S_IFREG | S_IRUGO, &i2o_seq_fops_drivers_stored}, | ||
1841 | {NULL, 0, NULL} | ||
1842 | }; | ||
1843 | |||
1844 | /* | ||
1845 | * Device specific entries | ||
1846 | */ | ||
1847 | static i2o_proc_entry generic_dev_entries[] = { | ||
1848 | {"groups", S_IFREG | S_IRUGO, &i2o_seq_fops_groups}, | ||
1849 | {"phys_dev", S_IFREG | S_IRUGO, &i2o_seq_fops_phys_device}, | ||
1850 | {"claimed", S_IFREG | S_IRUGO, &i2o_seq_fops_claimed}, | ||
1851 | {"users", S_IFREG | S_IRUGO, &i2o_seq_fops_users}, | ||
1852 | {"priv_msgs", S_IFREG | S_IRUGO, &i2o_seq_fops_priv_msgs}, | ||
1853 | {"authorized_users", S_IFREG | S_IRUGO, &i2o_seq_fops_authorized_users}, | ||
1854 | {"dev_identity", S_IFREG | S_IRUGO, &i2o_seq_fops_dev_identity}, | ||
1855 | {"ddm_identity", S_IFREG | S_IRUGO, &i2o_seq_fops_ddm_identity}, | ||
1856 | {"user_info", S_IFREG | S_IRUGO, &i2o_seq_fops_uinfo}, | ||
1857 | {"sgl_limits", S_IFREG | S_IRUGO, &i2o_seq_fops_sgl_limits}, | ||
1858 | {"sensors", S_IFREG | S_IRUGO, &i2o_seq_fops_sensors}, | ||
1859 | {NULL, 0, NULL} | ||
1860 | }; | ||
1861 | |||
1862 | /* | ||
1863 | * Storage unit specific entries (SCSI Periph, BS) with device names | ||
1864 | */ | ||
1865 | static i2o_proc_entry rbs_dev_entries[] = { | ||
1866 | {"dev_name", S_IFREG | S_IRUGO, &i2o_seq_fops_dev_name}, | ||
1867 | {NULL, 0, NULL} | ||
1868 | }; | ||
1869 | |||
1870 | /** | ||
1871 | * i2o_proc_create_entries - Creates proc dir entries | ||
1872 | * @dir: proc dir entry under which the entries should be placed | ||
1873 | * @i2o_pe: pointer to the entries which should be added | ||
1874 | * @data: pointer to I2O controller or device | ||
1875 | * | ||
1876 | * Create proc dir entries for a I2O controller or I2O device. | ||
1877 | * | ||
1878 | * Returns 0 on success or negative error code on failure. | ||
1879 | */ | ||
1880 | static int i2o_proc_create_entries(struct proc_dir_entry *dir, | ||
1881 | i2o_proc_entry * i2o_pe, void *data) | ||
1882 | { | ||
1883 | struct proc_dir_entry *tmp; | ||
1884 | |||
1885 | while (i2o_pe->name) { | ||
1886 | tmp = proc_create_data(i2o_pe->name, i2o_pe->mode, dir, | ||
1887 | i2o_pe->fops, data); | ||
1888 | if (!tmp) | ||
1889 | return -1; | ||
1890 | |||
1891 | i2o_pe++; | ||
1892 | } | ||
1893 | |||
1894 | return 0; | ||
1895 | } | ||
1896 | |||
1897 | /** | ||
1898 | * i2o_proc_device_add - Add an I2O device to the proc dir | ||
1899 | * @dir: proc dir entry to which the device should be added | ||
1900 | * @dev: I2O device which should be added | ||
1901 | * | ||
1902 | * Add an I2O device to the proc dir entry dir and create the entries for | ||
1903 | * the device depending on the class of the I2O device. | ||
1904 | */ | ||
1905 | static void i2o_proc_device_add(struct proc_dir_entry *dir, | ||
1906 | struct i2o_device *dev) | ||
1907 | { | ||
1908 | char buff[10]; | ||
1909 | struct proc_dir_entry *devdir; | ||
1910 | i2o_proc_entry *i2o_pe = NULL; | ||
1911 | |||
1912 | sprintf(buff, "%03x", dev->lct_data.tid); | ||
1913 | |||
1914 | osm_debug("adding device /proc/i2o/%s/%s\n", dev->iop->name, buff); | ||
1915 | |||
1916 | devdir = proc_mkdir_data(buff, 0, dir, dev); | ||
1917 | if (!devdir) { | ||
1918 | osm_warn("Could not allocate procdir!\n"); | ||
1919 | return; | ||
1920 | } | ||
1921 | |||
1922 | i2o_proc_create_entries(devdir, generic_dev_entries, dev); | ||
1923 | |||
1924 | /* Inform core that we want updates about this device's status */ | ||
1925 | switch (dev->lct_data.class_id) { | ||
1926 | case I2O_CLASS_SCSI_PERIPHERAL: | ||
1927 | case I2O_CLASS_RANDOM_BLOCK_STORAGE: | ||
1928 | i2o_pe = rbs_dev_entries; | ||
1929 | break; | ||
1930 | default: | ||
1931 | break; | ||
1932 | } | ||
1933 | if (i2o_pe) | ||
1934 | i2o_proc_create_entries(devdir, i2o_pe, dev); | ||
1935 | } | ||
1936 | |||
1937 | /** | ||
1938 | * i2o_proc_iop_add - Add an I2O controller to the i2o proc tree | ||
1939 | * @dir: parent proc dir entry | ||
1940 | * @c: I2O controller which should be added | ||
1941 | * | ||
1942 | * Add the entries to the parent proc dir entry. Also each device is added | ||
1943 | * to the controllers proc dir entry. | ||
1944 | * | ||
1945 | * Returns 0 on success or negative error code on failure. | ||
1946 | */ | ||
1947 | static int i2o_proc_iop_add(struct proc_dir_entry *dir, | ||
1948 | struct i2o_controller *c) | ||
1949 | { | ||
1950 | struct proc_dir_entry *iopdir; | ||
1951 | struct i2o_device *dev; | ||
1952 | |||
1953 | osm_debug("adding IOP /proc/i2o/%s\n", c->name); | ||
1954 | |||
1955 | iopdir = proc_mkdir_data(c->name, 0, dir, c); | ||
1956 | if (!iopdir) | ||
1957 | return -1; | ||
1958 | |||
1959 | i2o_proc_create_entries(iopdir, i2o_proc_generic_iop_entries, c); | ||
1960 | |||
1961 | list_for_each_entry(dev, &c->devices, list) | ||
1962 | i2o_proc_device_add(iopdir, dev); | ||
1963 | |||
1964 | return 0; | ||
1965 | } | ||
1966 | |||
1967 | /** | ||
1968 | * i2o_proc_fs_create - Create the i2o proc fs. | ||
1969 | * | ||
1970 | * Iterate over each I2O controller and create the entries for it. | ||
1971 | * | ||
1972 | * Returns 0 on success or negative error code on failure. | ||
1973 | */ | ||
1974 | static int __init i2o_proc_fs_create(void) | ||
1975 | { | ||
1976 | struct i2o_controller *c; | ||
1977 | |||
1978 | i2o_proc_dir_root = proc_mkdir("i2o", NULL); | ||
1979 | if (!i2o_proc_dir_root) | ||
1980 | return -1; | ||
1981 | |||
1982 | list_for_each_entry(c, &i2o_controllers, list) | ||
1983 | i2o_proc_iop_add(i2o_proc_dir_root, c); | ||
1984 | |||
1985 | return 0; | ||
1986 | }; | ||
1987 | |||
1988 | /** | ||
1989 | * i2o_proc_fs_destroy - Cleanup the all i2o proc entries | ||
1990 | * | ||
1991 | * Iterate over each I2O controller and remove the entries for it. | ||
1992 | * | ||
1993 | * Returns 0 on success or negative error code on failure. | ||
1994 | */ | ||
1995 | static int __exit i2o_proc_fs_destroy(void) | ||
1996 | { | ||
1997 | remove_proc_subtree("i2o", NULL); | ||
1998 | |||
1999 | return 0; | ||
2000 | }; | ||
2001 | |||
2002 | /** | ||
2003 | * i2o_proc_init - Init function for procfs | ||
2004 | * | ||
2005 | * Registers Proc OSM and creates procfs entries. | ||
2006 | * | ||
2007 | * Returns 0 on success or negative error code on failure. | ||
2008 | */ | ||
2009 | static int __init i2o_proc_init(void) | ||
2010 | { | ||
2011 | int rc; | ||
2012 | |||
2013 | printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); | ||
2014 | |||
2015 | rc = i2o_driver_register(&i2o_proc_driver); | ||
2016 | if (rc) | ||
2017 | return rc; | ||
2018 | |||
2019 | rc = i2o_proc_fs_create(); | ||
2020 | if (rc) { | ||
2021 | i2o_driver_unregister(&i2o_proc_driver); | ||
2022 | return rc; | ||
2023 | } | ||
2024 | |||
2025 | return 0; | ||
2026 | }; | ||
2027 | |||
2028 | /** | ||
2029 | * i2o_proc_exit - Exit function for procfs | ||
2030 | * | ||
2031 | * Unregisters Proc OSM and removes procfs entries. | ||
2032 | */ | ||
2033 | static void __exit i2o_proc_exit(void) | ||
2034 | { | ||
2035 | i2o_driver_unregister(&i2o_proc_driver); | ||
2036 | i2o_proc_fs_destroy(); | ||
2037 | }; | ||
2038 | |||
2039 | MODULE_AUTHOR("Deepak Saxena"); | ||
2040 | MODULE_LICENSE("GPL"); | ||
2041 | MODULE_DESCRIPTION(OSM_DESCRIPTION); | ||
2042 | MODULE_VERSION(OSM_VERSION); | ||
2043 | |||
2044 | module_init(i2o_proc_init); | ||
2045 | module_exit(i2o_proc_exit); | ||
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c deleted file mode 100644 index 8152e9fa9d95..000000000000 --- a/drivers/message/i2o/i2o_scsi.c +++ /dev/null | |||
@@ -1,814 +0,0 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify it | ||
3 | * under the terms of the GNU General Public License as published by the | ||
4 | * Free Software Foundation; either version 2, or (at your option) any | ||
5 | * later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, but | ||
8 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
10 | * General Public License for more details. | ||
11 | * | ||
12 | * For the avoidance of doubt the "preferred form" of this code is one which | ||
13 | * is in an open non patent encumbered format. Where cryptographic key signing | ||
14 | * forms part of the process of creating an executable the information | ||
15 | * including keys needed to generate an equivalently functional executable | ||
16 | * are deemed to be part of the source code. | ||
17 | * | ||
18 | * Complications for I2O scsi | ||
19 | * | ||
20 | * o Each (bus,lun) is a logical device in I2O. We keep a map | ||
21 | * table. We spoof failed selection for unmapped units | ||
22 | * o Request sense buffers can come back for free. | ||
23 | * o Scatter gather is a bit dynamic. We have to investigate at | ||
24 | * setup time. | ||
25 | * o Some of our resources are dynamically shared. The i2o core | ||
26 | * needs a message reservation protocol to avoid swap v net | ||
27 | * deadlocking. We need to back off queue requests. | ||
28 | * | ||
29 | * In general the firmware wants to help. Where its help isn't performance | ||
30 | * useful we just ignore the aid. Its not worth the code in truth. | ||
31 | * | ||
32 | * Fixes/additions: | ||
33 | * Steve Ralston: | ||
34 | * Scatter gather now works | ||
35 | * Markus Lidel <Markus.Lidel@shadowconnect.com>: | ||
36 | * Minor fixes for 2.6. | ||
37 | * | ||
38 | * To Do: | ||
39 | * 64bit cleanups | ||
40 | * Fix the resource management problems. | ||
41 | */ | ||
42 | |||
43 | #include <linux/module.h> | ||
44 | #include <linux/kernel.h> | ||
45 | #include <linux/types.h> | ||
46 | #include <linux/string.h> | ||
47 | #include <linux/ioport.h> | ||
48 | #include <linux/jiffies.h> | ||
49 | #include <linux/interrupt.h> | ||
50 | #include <linux/timer.h> | ||
51 | #include <linux/delay.h> | ||
52 | #include <linux/proc_fs.h> | ||
53 | #include <linux/prefetch.h> | ||
54 | #include <linux/pci.h> | ||
55 | #include <linux/blkdev.h> | ||
56 | #include <linux/i2o.h> | ||
57 | #include <linux/scatterlist.h> | ||
58 | |||
59 | #include <asm/dma.h> | ||
60 | #include <asm/io.h> | ||
61 | #include <linux/atomic.h> | ||
62 | |||
63 | #include <scsi/scsi.h> | ||
64 | #include <scsi/scsi_host.h> | ||
65 | #include <scsi/scsi_device.h> | ||
66 | #include <scsi/scsi_cmnd.h> | ||
67 | #include <scsi/sg.h> | ||
68 | |||
69 | #define OSM_NAME "scsi-osm" | ||
70 | #define OSM_VERSION "1.316" | ||
71 | #define OSM_DESCRIPTION "I2O SCSI Peripheral OSM" | ||
72 | |||
73 | static struct i2o_driver i2o_scsi_driver; | ||
74 | |||
75 | static unsigned int i2o_scsi_max_id = 16; | ||
76 | static unsigned int i2o_scsi_max_lun = 255; | ||
77 | |||
78 | struct i2o_scsi_host { | ||
79 | struct Scsi_Host *scsi_host; /* pointer to the SCSI host */ | ||
80 | struct i2o_controller *iop; /* pointer to the I2O controller */ | ||
81 | u64 lun; /* lun's used for block devices */ | ||
82 | struct i2o_device *channel[0]; /* channel->i2o_dev mapping table */ | ||
83 | }; | ||
84 | |||
85 | static struct scsi_host_template i2o_scsi_host_template; | ||
86 | |||
87 | #define I2O_SCSI_CAN_QUEUE 4 | ||
88 | |||
89 | /* SCSI OSM class handling definition */ | ||
90 | static struct i2o_class_id i2o_scsi_class_id[] = { | ||
91 | {I2O_CLASS_SCSI_PERIPHERAL}, | ||
92 | {I2O_CLASS_END} | ||
93 | }; | ||
94 | |||
95 | static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c) | ||
96 | { | ||
97 | struct i2o_scsi_host *i2o_shost; | ||
98 | struct i2o_device *i2o_dev; | ||
99 | struct Scsi_Host *scsi_host; | ||
100 | int max_channel = 0; | ||
101 | u8 type; | ||
102 | int i; | ||
103 | size_t size; | ||
104 | u16 body_size = 6; | ||
105 | |||
106 | #ifdef CONFIG_I2O_EXT_ADAPTEC | ||
107 | if (c->adaptec) | ||
108 | body_size = 8; | ||
109 | #endif | ||
110 | |||
111 | list_for_each_entry(i2o_dev, &c->devices, list) | ||
112 | if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) { | ||
113 | if (!i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) | ||
114 | && (type == 0x01)) /* SCSI bus */ | ||
115 | max_channel++; | ||
116 | } | ||
117 | |||
118 | if (!max_channel) { | ||
119 | osm_warn("no channels found on %s\n", c->name); | ||
120 | return ERR_PTR(-EFAULT); | ||
121 | } | ||
122 | |||
123 | size = max_channel * sizeof(struct i2o_device *) | ||
124 | + sizeof(struct i2o_scsi_host); | ||
125 | |||
126 | scsi_host = scsi_host_alloc(&i2o_scsi_host_template, size); | ||
127 | if (!scsi_host) { | ||
128 | osm_warn("Could not allocate SCSI host\n"); | ||
129 | return ERR_PTR(-ENOMEM); | ||
130 | } | ||
131 | |||
132 | scsi_host->max_channel = max_channel - 1; | ||
133 | scsi_host->max_id = i2o_scsi_max_id; | ||
134 | scsi_host->max_lun = i2o_scsi_max_lun; | ||
135 | scsi_host->this_id = c->unit; | ||
136 | scsi_host->sg_tablesize = i2o_sg_tablesize(c, body_size); | ||
137 | |||
138 | i2o_shost = (struct i2o_scsi_host *)scsi_host->hostdata; | ||
139 | i2o_shost->scsi_host = scsi_host; | ||
140 | i2o_shost->iop = c; | ||
141 | i2o_shost->lun = 1; | ||
142 | |||
143 | i = 0; | ||
144 | list_for_each_entry(i2o_dev, &c->devices, list) | ||
145 | if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) { | ||
146 | if (!i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) | ||
147 | && (type == 0x01)) /* only SCSI bus */ | ||
148 | i2o_shost->channel[i++] = i2o_dev; | ||
149 | |||
150 | if (i >= max_channel) | ||
151 | break; | ||
152 | } | ||
153 | |||
154 | return i2o_shost; | ||
155 | }; | ||
156 | |||
157 | /** | ||
158 | * i2o_scsi_get_host - Get an I2O SCSI host | ||
159 | * @c: I2O controller to for which to get the SCSI host | ||
160 | * | ||
161 | * If the I2O controller already exists as SCSI host, the SCSI host | ||
162 | * is returned, otherwise the I2O controller is added to the SCSI | ||
163 | * core. | ||
164 | * | ||
165 | * Returns pointer to the I2O SCSI host on success or NULL on failure. | ||
166 | */ | ||
167 | static struct i2o_scsi_host *i2o_scsi_get_host(struct i2o_controller *c) | ||
168 | { | ||
169 | return c->driver_data[i2o_scsi_driver.context]; | ||
170 | }; | ||
171 | |||
172 | /** | ||
173 | * i2o_scsi_remove - Remove I2O device from SCSI core | ||
174 | * @dev: device which should be removed | ||
175 | * | ||
176 | * Removes the I2O device from the SCSI core again. | ||
177 | * | ||
178 | * Returns 0 on success. | ||
179 | */ | ||
180 | static int i2o_scsi_remove(struct device *dev) | ||
181 | { | ||
182 | struct i2o_device *i2o_dev = to_i2o_device(dev); | ||
183 | struct i2o_controller *c = i2o_dev->iop; | ||
184 | struct i2o_scsi_host *i2o_shost; | ||
185 | struct scsi_device *scsi_dev; | ||
186 | |||
187 | osm_info("device removed (TID: %03x)\n", i2o_dev->lct_data.tid); | ||
188 | |||
189 | i2o_shost = i2o_scsi_get_host(c); | ||
190 | |||
191 | shost_for_each_device(scsi_dev, i2o_shost->scsi_host) | ||
192 | if (scsi_dev->hostdata == i2o_dev) { | ||
193 | sysfs_remove_link(&i2o_dev->device.kobj, "scsi"); | ||
194 | scsi_remove_device(scsi_dev); | ||
195 | scsi_device_put(scsi_dev); | ||
196 | break; | ||
197 | } | ||
198 | |||
199 | return 0; | ||
200 | }; | ||
201 | |||
202 | /** | ||
203 | * i2o_scsi_probe - verify if dev is a I2O SCSI device and install it | ||
204 | * @dev: device to verify if it is a I2O SCSI device | ||
205 | * | ||
206 | * Retrieve channel, id and lun for I2O device. If everything goes well | ||
207 | * register the I2O device as SCSI device on the I2O SCSI controller. | ||
208 | * | ||
209 | * Returns 0 on success or negative error code on failure. | ||
210 | */ | ||
211 | static int i2o_scsi_probe(struct device *dev) | ||
212 | { | ||
213 | struct i2o_device *i2o_dev = to_i2o_device(dev); | ||
214 | struct i2o_controller *c = i2o_dev->iop; | ||
215 | struct i2o_scsi_host *i2o_shost; | ||
216 | struct Scsi_Host *scsi_host; | ||
217 | struct i2o_device *parent; | ||
218 | struct scsi_device *scsi_dev; | ||
219 | u32 id = -1; | ||
220 | u64 lun = -1; | ||
221 | int channel = -1; | ||
222 | int i, rc; | ||
223 | |||
224 | i2o_shost = i2o_scsi_get_host(c); | ||
225 | if (!i2o_shost) | ||
226 | return -EFAULT; | ||
227 | |||
228 | scsi_host = i2o_shost->scsi_host; | ||
229 | |||
230 | switch (i2o_dev->lct_data.class_id) { | ||
231 | case I2O_CLASS_RANDOM_BLOCK_STORAGE: | ||
232 | case I2O_CLASS_EXECUTIVE: | ||
233 | #ifdef CONFIG_I2O_EXT_ADAPTEC | ||
234 | if (c->adaptec) { | ||
235 | u8 type; | ||
236 | struct i2o_device *d = i2o_shost->channel[0]; | ||
237 | |||
238 | if (!i2o_parm_field_get(d, 0x0000, 0, &type, 1) | ||
239 | && (type == 0x01)) /* SCSI bus */ | ||
240 | if (!i2o_parm_field_get(d, 0x0200, 4, &id, 4)) { | ||
241 | channel = 0; | ||
242 | if (i2o_dev->lct_data.class_id == | ||
243 | I2O_CLASS_RANDOM_BLOCK_STORAGE) | ||
244 | lun = | ||
245 | cpu_to_le64(i2o_shost-> | ||
246 | lun++); | ||
247 | else | ||
248 | lun = 0; | ||
249 | } | ||
250 | } | ||
251 | #endif | ||
252 | break; | ||
253 | |||
254 | case I2O_CLASS_SCSI_PERIPHERAL: | ||
255 | if (i2o_parm_field_get(i2o_dev, 0x0000, 3, &id, 4)) | ||
256 | return -EFAULT; | ||
257 | |||
258 | if (i2o_parm_field_get(i2o_dev, 0x0000, 4, &lun, 8)) | ||
259 | return -EFAULT; | ||
260 | |||
261 | parent = i2o_iop_find_device(c, i2o_dev->lct_data.parent_tid); | ||
262 | if (!parent) { | ||
263 | osm_warn("can not find parent of device %03x\n", | ||
264 | i2o_dev->lct_data.tid); | ||
265 | return -EFAULT; | ||
266 | } | ||
267 | |||
268 | for (i = 0; i <= i2o_shost->scsi_host->max_channel; i++) | ||
269 | if (i2o_shost->channel[i] == parent) | ||
270 | channel = i; | ||
271 | break; | ||
272 | |||
273 | default: | ||
274 | return -EFAULT; | ||
275 | } | ||
276 | |||
277 | if (channel == -1) { | ||
278 | osm_warn("can not find channel of device %03x\n", | ||
279 | i2o_dev->lct_data.tid); | ||
280 | return -EFAULT; | ||
281 | } | ||
282 | |||
283 | if (le32_to_cpu(id) >= scsi_host->max_id) { | ||
284 | osm_warn("SCSI device id (%d) >= max_id of I2O host (%d)", | ||
285 | le32_to_cpu(id), scsi_host->max_id); | ||
286 | return -EFAULT; | ||
287 | } | ||
288 | |||
289 | if (le64_to_cpu(lun) >= scsi_host->max_lun) { | ||
290 | osm_warn("SCSI device lun (%llu) >= max_lun of I2O host (%llu)", | ||
291 | le64_to_cpu(lun), scsi_host->max_lun); | ||
292 | return -EFAULT; | ||
293 | } | ||
294 | |||
295 | scsi_dev = | ||
296 | __scsi_add_device(i2o_shost->scsi_host, channel, le32_to_cpu(id), | ||
297 | le64_to_cpu(lun), i2o_dev); | ||
298 | |||
299 | if (IS_ERR(scsi_dev)) { | ||
300 | osm_warn("can not add SCSI device %03x\n", | ||
301 | i2o_dev->lct_data.tid); | ||
302 | return PTR_ERR(scsi_dev); | ||
303 | } | ||
304 | |||
305 | rc = sysfs_create_link(&i2o_dev->device.kobj, | ||
306 | &scsi_dev->sdev_gendev.kobj, "scsi"); | ||
307 | if (rc) | ||
308 | goto err; | ||
309 | |||
310 | osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %llu\n", | ||
311 | i2o_dev->lct_data.tid, channel, le32_to_cpu(id), | ||
312 | le64_to_cpu(lun)); | ||
313 | |||
314 | return 0; | ||
315 | |||
316 | err: | ||
317 | scsi_remove_device(scsi_dev); | ||
318 | return rc; | ||
319 | }; | ||
320 | |||
321 | static const char *i2o_scsi_info(struct Scsi_Host *SChost) | ||
322 | { | ||
323 | struct i2o_scsi_host *hostdata; | ||
324 | hostdata = (struct i2o_scsi_host *)SChost->hostdata; | ||
325 | return hostdata->iop->name; | ||
326 | } | ||
327 | |||
328 | /** | ||
329 | * i2o_scsi_reply - SCSI OSM message reply handler | ||
330 | * @c: controller issuing the reply | ||
331 | * @m: message id for flushing | ||
332 | * @msg: the message from the controller | ||
333 | * | ||
334 | * Process reply messages (interrupts in normal scsi controller think). | ||
335 | * We can get a variety of messages to process. The normal path is | ||
336 | * scsi command completions. We must also deal with IOP failures, | ||
337 | * the reply to a bus reset and the reply to a LUN query. | ||
338 | * | ||
339 | * Returns 0 on success and if the reply should not be flushed or > 0 | ||
340 | * on success and if the reply should be flushed. Returns negative error | ||
341 | * code on failure and if the reply should be flushed. | ||
342 | */ | ||
343 | static int i2o_scsi_reply(struct i2o_controller *c, u32 m, | ||
344 | struct i2o_message *msg) | ||
345 | { | ||
346 | struct scsi_cmnd *cmd; | ||
347 | u32 error; | ||
348 | struct device *dev; | ||
349 | |||
350 | cmd = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt)); | ||
351 | if (unlikely(!cmd)) { | ||
352 | osm_err("NULL reply received!\n"); | ||
353 | return -1; | ||
354 | } | ||
355 | |||
356 | /* | ||
357 | * Low byte is device status, next is adapter status, | ||
358 | * (then one byte reserved), then request status. | ||
359 | */ | ||
360 | error = le32_to_cpu(msg->body[0]); | ||
361 | |||
362 | osm_debug("Completed %0x%p\n", cmd); | ||
363 | |||
364 | cmd->result = error & 0xff; | ||
365 | /* | ||
366 | * if DeviceStatus is not SCSI_SUCCESS copy over the sense data and let | ||
367 | * the SCSI layer handle the error | ||
368 | */ | ||
369 | if (cmd->result) | ||
370 | memcpy(cmd->sense_buffer, &msg->body[3], | ||
371 | min(SCSI_SENSE_BUFFERSIZE, 40)); | ||
372 | |||
373 | /* only output error code if AdapterStatus is not HBA_SUCCESS */ | ||
374 | if ((error >> 8) & 0xff) | ||
375 | osm_err("SCSI error %08x\n", error); | ||
376 | |||
377 | dev = &c->pdev->dev; | ||
378 | |||
379 | scsi_dma_unmap(cmd); | ||
380 | |||
381 | cmd->scsi_done(cmd); | ||
382 | |||
383 | return 1; | ||
384 | }; | ||
385 | |||
386 | /** | ||
387 | * i2o_scsi_notify_device_add - Retrieve notifications of added devices | ||
388 | * @i2o_dev: the I2O device which was added | ||
389 | * | ||
390 | * If a I2O device is added we catch the notification, because I2O classes | ||
391 | * other than SCSI peripheral will not be received through | ||
392 | * i2o_scsi_probe(). | ||
393 | */ | ||
394 | static void i2o_scsi_notify_device_add(struct i2o_device *i2o_dev) | ||
395 | { | ||
396 | switch (i2o_dev->lct_data.class_id) { | ||
397 | case I2O_CLASS_EXECUTIVE: | ||
398 | case I2O_CLASS_RANDOM_BLOCK_STORAGE: | ||
399 | i2o_scsi_probe(&i2o_dev->device); | ||
400 | break; | ||
401 | |||
402 | default: | ||
403 | break; | ||
404 | } | ||
405 | }; | ||
406 | |||
407 | /** | ||
408 | * i2o_scsi_notify_device_remove - Retrieve notifications of removed devices | ||
409 | * @i2o_dev: the I2O device which was removed | ||
410 | * | ||
411 | * If a I2O device is removed, we catch the notification to remove the | ||
412 | * corresponding SCSI device. | ||
413 | */ | ||
414 | static void i2o_scsi_notify_device_remove(struct i2o_device *i2o_dev) | ||
415 | { | ||
416 | switch (i2o_dev->lct_data.class_id) { | ||
417 | case I2O_CLASS_EXECUTIVE: | ||
418 | case I2O_CLASS_RANDOM_BLOCK_STORAGE: | ||
419 | i2o_scsi_remove(&i2o_dev->device); | ||
420 | break; | ||
421 | |||
422 | default: | ||
423 | break; | ||
424 | } | ||
425 | }; | ||
426 | |||
427 | /** | ||
428 | * i2o_scsi_notify_controller_add - Retrieve notifications of added controllers | ||
429 | * @c: the controller which was added | ||
430 | * | ||
431 | * If a I2O controller is added, we catch the notification to add a | ||
432 | * corresponding Scsi_Host. | ||
433 | */ | ||
434 | static void i2o_scsi_notify_controller_add(struct i2o_controller *c) | ||
435 | { | ||
436 | struct i2o_scsi_host *i2o_shost; | ||
437 | int rc; | ||
438 | |||
439 | i2o_shost = i2o_scsi_host_alloc(c); | ||
440 | if (IS_ERR(i2o_shost)) { | ||
441 | osm_err("Could not initialize SCSI host\n"); | ||
442 | return; | ||
443 | } | ||
444 | |||
445 | rc = scsi_add_host(i2o_shost->scsi_host, &c->device); | ||
446 | if (rc) { | ||
447 | osm_err("Could not add SCSI host\n"); | ||
448 | scsi_host_put(i2o_shost->scsi_host); | ||
449 | return; | ||
450 | } | ||
451 | |||
452 | c->driver_data[i2o_scsi_driver.context] = i2o_shost; | ||
453 | |||
454 | osm_debug("new I2O SCSI host added\n"); | ||
455 | }; | ||
456 | |||
457 | /** | ||
458 | * i2o_scsi_notify_controller_remove - Retrieve notifications of removed controllers | ||
459 | * @c: the controller which was removed | ||
460 | * | ||
461 | * If a I2O controller is removed, we catch the notification to remove the | ||
462 | * corresponding Scsi_Host. | ||
463 | */ | ||
464 | static void i2o_scsi_notify_controller_remove(struct i2o_controller *c) | ||
465 | { | ||
466 | struct i2o_scsi_host *i2o_shost; | ||
467 | i2o_shost = i2o_scsi_get_host(c); | ||
468 | if (!i2o_shost) | ||
469 | return; | ||
470 | |||
471 | c->driver_data[i2o_scsi_driver.context] = NULL; | ||
472 | |||
473 | scsi_remove_host(i2o_shost->scsi_host); | ||
474 | scsi_host_put(i2o_shost->scsi_host); | ||
475 | osm_debug("I2O SCSI host removed\n"); | ||
476 | }; | ||
477 | |||
478 | /* SCSI OSM driver struct */ | ||
479 | static struct i2o_driver i2o_scsi_driver = { | ||
480 | .name = OSM_NAME, | ||
481 | .reply = i2o_scsi_reply, | ||
482 | .classes = i2o_scsi_class_id, | ||
483 | .notify_device_add = i2o_scsi_notify_device_add, | ||
484 | .notify_device_remove = i2o_scsi_notify_device_remove, | ||
485 | .notify_controller_add = i2o_scsi_notify_controller_add, | ||
486 | .notify_controller_remove = i2o_scsi_notify_controller_remove, | ||
487 | .driver = { | ||
488 | .probe = i2o_scsi_probe, | ||
489 | .remove = i2o_scsi_remove, | ||
490 | }, | ||
491 | }; | ||
492 | |||
493 | /** | ||
494 | * i2o_scsi_queuecommand - queue a SCSI command | ||
495 | * @SCpnt: scsi command pointer | ||
496 | * @done: callback for completion | ||
497 | * | ||
498 | * Issue a scsi command asynchronously. Return 0 on success or 1 if | ||
499 | * we hit an error (normally message queue congestion). The only | ||
500 | * minor complication here is that I2O deals with the device addressing | ||
501 | * so we have to map the bus/dev/lun back to an I2O handle as well | ||
502 | * as faking absent devices ourself. | ||
503 | * | ||
504 | * Locks: takes the controller lock on error path only | ||
505 | */ | ||
506 | |||
507 | static int i2o_scsi_queuecommand_lck(struct scsi_cmnd *SCpnt, | ||
508 | void (*done) (struct scsi_cmnd *)) | ||
509 | { | ||
510 | struct i2o_controller *c; | ||
511 | struct i2o_device *i2o_dev; | ||
512 | int tid; | ||
513 | struct i2o_message *msg; | ||
514 | /* | ||
515 | * ENABLE_DISCONNECT | ||
516 | * SIMPLE_TAG | ||
517 | * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME | ||
518 | */ | ||
519 | u32 scsi_flags = 0x20a00000; | ||
520 | u32 sgl_offset; | ||
521 | u32 *mptr; | ||
522 | u32 cmd = I2O_CMD_SCSI_EXEC << 24; | ||
523 | int rc = 0; | ||
524 | |||
525 | /* | ||
526 | * Do the incoming paperwork | ||
527 | */ | ||
528 | i2o_dev = SCpnt->device->hostdata; | ||
529 | |||
530 | SCpnt->scsi_done = done; | ||
531 | |||
532 | if (unlikely(!i2o_dev)) { | ||
533 | osm_warn("no I2O device in request\n"); | ||
534 | SCpnt->result = DID_NO_CONNECT << 16; | ||
535 | done(SCpnt); | ||
536 | goto exit; | ||
537 | } | ||
538 | c = i2o_dev->iop; | ||
539 | tid = i2o_dev->lct_data.tid; | ||
540 | |||
541 | osm_debug("qcmd: Tid = %03x\n", tid); | ||
542 | osm_debug("Real scsi messages.\n"); | ||
543 | |||
544 | /* | ||
545 | * Put together a scsi execscb message | ||
546 | */ | ||
547 | switch (SCpnt->sc_data_direction) { | ||
548 | case PCI_DMA_NONE: | ||
549 | /* DATA NO XFER */ | ||
550 | sgl_offset = SGL_OFFSET_0; | ||
551 | break; | ||
552 | |||
553 | case PCI_DMA_TODEVICE: | ||
554 | /* DATA OUT (iop-->dev) */ | ||
555 | scsi_flags |= 0x80000000; | ||
556 | sgl_offset = SGL_OFFSET_10; | ||
557 | break; | ||
558 | |||
559 | case PCI_DMA_FROMDEVICE: | ||
560 | /* DATA IN (iop<--dev) */ | ||
561 | scsi_flags |= 0x40000000; | ||
562 | sgl_offset = SGL_OFFSET_10; | ||
563 | break; | ||
564 | |||
565 | default: | ||
566 | /* Unknown - kill the command */ | ||
567 | SCpnt->result = DID_NO_CONNECT << 16; | ||
568 | done(SCpnt); | ||
569 | goto exit; | ||
570 | } | ||
571 | |||
572 | /* | ||
573 | * Obtain an I2O message. If there are none free then | ||
574 | * throw it back to the scsi layer | ||
575 | */ | ||
576 | |||
577 | msg = i2o_msg_get(c); | ||
578 | if (IS_ERR(msg)) { | ||
579 | rc = SCSI_MLQUEUE_HOST_BUSY; | ||
580 | goto exit; | ||
581 | } | ||
582 | |||
583 | mptr = &msg->body[0]; | ||
584 | |||
585 | #if 0 /* this code can't work */ | ||
586 | #ifdef CONFIG_I2O_EXT_ADAPTEC | ||
587 | if (c->adaptec) { | ||
588 | u32 adpt_flags = 0; | ||
589 | |||
590 | if (SCpnt->sc_request && SCpnt->sc_request->upper_private_data) { | ||
591 | i2o_sg_io_hdr_t __user *usr_ptr = | ||
592 | ((Sg_request *) (SCpnt->sc_request-> | ||
593 | upper_private_data))->header. | ||
594 | usr_ptr; | ||
595 | |||
596 | if (usr_ptr) | ||
597 | get_user(adpt_flags, &usr_ptr->flags); | ||
598 | } | ||
599 | |||
600 | switch (i2o_dev->lct_data.class_id) { | ||
601 | case I2O_CLASS_EXECUTIVE: | ||
602 | case I2O_CLASS_RANDOM_BLOCK_STORAGE: | ||
603 | /* interpret flag has to be set for executive */ | ||
604 | adpt_flags ^= I2O_DPT_SG_FLAG_INTERPRET; | ||
605 | break; | ||
606 | |||
607 | default: | ||
608 | break; | ||
609 | } | ||
610 | |||
611 | /* | ||
612 | * for Adaptec controllers we use the PRIVATE command, because | ||
613 | * the normal SCSI EXEC doesn't support all SCSI commands on | ||
614 | * all controllers (for example READ CAPACITY). | ||
615 | */ | ||
616 | if (sgl_offset == SGL_OFFSET_10) | ||
617 | sgl_offset = SGL_OFFSET_12; | ||
618 | cmd = I2O_CMD_PRIVATE << 24; | ||
619 | *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC); | ||
620 | *mptr++ = cpu_to_le32(adpt_flags | tid); | ||
621 | } | ||
622 | #endif | ||
623 | #endif | ||
624 | |||
625 | msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid); | ||
626 | msg->u.s.icntxt = cpu_to_le32(i2o_scsi_driver.context); | ||
627 | |||
628 | /* We want the SCSI control block back */ | ||
629 | msg->u.s.tcntxt = cpu_to_le32(i2o_cntxt_list_add(c, SCpnt)); | ||
630 | |||
631 | /* LSI_920_PCI_QUIRK | ||
632 | * | ||
633 | * Intermittant observations of msg frame word data corruption | ||
634 | * observed on msg[4] after: | ||
635 | * WRITE, READ-MODIFY-WRITE | ||
636 | * operations. 19990606 -sralston | ||
637 | * | ||
638 | * (Hence we build this word via tag. Its good practice anyway | ||
639 | * we don't want fetches over PCI needlessly) | ||
640 | */ | ||
641 | |||
642 | /* Attach tags to the devices */ | ||
643 | /* FIXME: implement | ||
644 | if(SCpnt->device->tagged_supported) { | ||
645 | if(SCpnt->tag == HEAD_OF_QUEUE_TAG) | ||
646 | scsi_flags |= 0x01000000; | ||
647 | else if(SCpnt->tag == ORDERED_QUEUE_TAG) | ||
648 | scsi_flags |= 0x01800000; | ||
649 | } | ||
650 | */ | ||
651 | |||
652 | *mptr++ = cpu_to_le32(scsi_flags | SCpnt->cmd_len); | ||
653 | |||
654 | /* Write SCSI command into the message - always 16 byte block */ | ||
655 | memcpy(mptr, SCpnt->cmnd, 16); | ||
656 | mptr += 4; | ||
657 | |||
658 | if (sgl_offset != SGL_OFFSET_0) { | ||
659 | /* write size of data addressed by SGL */ | ||
660 | *mptr++ = cpu_to_le32(scsi_bufflen(SCpnt)); | ||
661 | |||
662 | /* Now fill in the SGList and command */ | ||
663 | |||
664 | if (scsi_sg_count(SCpnt)) { | ||
665 | if (!i2o_dma_map_sg(c, scsi_sglist(SCpnt), | ||
666 | scsi_sg_count(SCpnt), | ||
667 | SCpnt->sc_data_direction, &mptr)) | ||
668 | goto nomem; | ||
669 | } | ||
670 | } | ||
671 | |||
672 | /* Stick the headers on */ | ||
673 | msg->u.head[0] = | ||
674 | cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset); | ||
675 | |||
676 | /* Queue the message */ | ||
677 | i2o_msg_post(c, msg); | ||
678 | |||
679 | osm_debug("Issued %0x%p\n", SCpnt); | ||
680 | |||
681 | return 0; | ||
682 | |||
683 | nomem: | ||
684 | rc = -ENOMEM; | ||
685 | i2o_msg_nop(c, msg); | ||
686 | |||
687 | exit: | ||
688 | return rc; | ||
689 | } | ||
690 | |||
691 | static DEF_SCSI_QCMD(i2o_scsi_queuecommand) | ||
692 | |||
693 | /** | ||
694 | * i2o_scsi_abort - abort a running command | ||
695 | * @SCpnt: command to abort | ||
696 | * | ||
697 | * Ask the I2O controller to abort a command. This is an asynchrnous | ||
698 | * process and our callback handler will see the command complete with an | ||
699 | * aborted message if it succeeds. | ||
700 | * | ||
701 | * Returns 0 if the command is successfully aborted or negative error code | ||
702 | * on failure. | ||
703 | */ | ||
704 | static int i2o_scsi_abort(struct scsi_cmnd *SCpnt) | ||
705 | { | ||
706 | struct i2o_device *i2o_dev; | ||
707 | struct i2o_controller *c; | ||
708 | struct i2o_message *msg; | ||
709 | int tid; | ||
710 | int status = FAILED; | ||
711 | |||
712 | osm_warn("Aborting command block.\n"); | ||
713 | |||
714 | i2o_dev = SCpnt->device->hostdata; | ||
715 | c = i2o_dev->iop; | ||
716 | tid = i2o_dev->lct_data.tid; | ||
717 | |||
718 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); | ||
719 | if (IS_ERR(msg)) | ||
720 | return SCSI_MLQUEUE_HOST_BUSY; | ||
721 | |||
722 | msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); | ||
723 | msg->u.head[1] = | ||
724 | cpu_to_le32(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid); | ||
725 | msg->body[0] = cpu_to_le32(i2o_cntxt_list_get_ptr(c, SCpnt)); | ||
726 | |||
727 | if (!i2o_msg_post_wait(c, msg, I2O_TIMEOUT_SCSI_SCB_ABORT)) | ||
728 | status = SUCCESS; | ||
729 | |||
730 | return status; | ||
731 | } | ||
732 | |||
733 | /** | ||
734 | * i2o_scsi_bios_param - Invent disk geometry | ||
735 | * @sdev: scsi device | ||
736 | * @dev: block layer device | ||
737 | * @capacity: size in sectors | ||
738 | * @ip: geometry array | ||
739 | * | ||
740 | * This is anyone's guess quite frankly. We use the same rules everyone | ||
741 | * else appears to and hope. It seems to work. | ||
742 | */ | ||
743 | |||
744 | static int i2o_scsi_bios_param(struct scsi_device *sdev, | ||
745 | struct block_device *dev, sector_t capacity, | ||
746 | int *ip) | ||
747 | { | ||
748 | int size; | ||
749 | |||
750 | size = capacity; | ||
751 | ip[0] = 64; /* heads */ | ||
752 | ip[1] = 32; /* sectors */ | ||
753 | if ((ip[2] = size >> 11) > 1024) { /* cylinders, test for big disk */ | ||
754 | ip[0] = 255; /* heads */ | ||
755 | ip[1] = 63; /* sectors */ | ||
756 | ip[2] = size / (255 * 63); /* cylinders */ | ||
757 | } | ||
758 | return 0; | ||
759 | } | ||
760 | |||
761 | static struct scsi_host_template i2o_scsi_host_template = { | ||
762 | .proc_name = OSM_NAME, | ||
763 | .name = OSM_DESCRIPTION, | ||
764 | .info = i2o_scsi_info, | ||
765 | .queuecommand = i2o_scsi_queuecommand, | ||
766 | .eh_abort_handler = i2o_scsi_abort, | ||
767 | .bios_param = i2o_scsi_bios_param, | ||
768 | .can_queue = I2O_SCSI_CAN_QUEUE, | ||
769 | .sg_tablesize = 8, | ||
770 | .cmd_per_lun = 6, | ||
771 | .use_clustering = ENABLE_CLUSTERING, | ||
772 | }; | ||
773 | |||
774 | /** | ||
775 | * i2o_scsi_init - SCSI OSM initialization function | ||
776 | * | ||
777 | * Register SCSI OSM into I2O core. | ||
778 | * | ||
779 | * Returns 0 on success or negative error code on failure. | ||
780 | */ | ||
781 | static int __init i2o_scsi_init(void) | ||
782 | { | ||
783 | int rc; | ||
784 | |||
785 | printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); | ||
786 | |||
787 | /* Register SCSI OSM into I2O core */ | ||
788 | rc = i2o_driver_register(&i2o_scsi_driver); | ||
789 | if (rc) { | ||
790 | osm_err("Could not register SCSI driver\n"); | ||
791 | return rc; | ||
792 | } | ||
793 | |||
794 | return 0; | ||
795 | }; | ||
796 | |||
797 | /** | ||
798 | * i2o_scsi_exit - SCSI OSM exit function | ||
799 | * | ||
800 | * Unregisters SCSI OSM from I2O core. | ||
801 | */ | ||
802 | static void __exit i2o_scsi_exit(void) | ||
803 | { | ||
804 | /* Unregister I2O SCSI OSM from I2O core */ | ||
805 | i2o_driver_unregister(&i2o_scsi_driver); | ||
806 | }; | ||
807 | |||
808 | MODULE_AUTHOR("Red Hat Software"); | ||
809 | MODULE_LICENSE("GPL"); | ||
810 | MODULE_DESCRIPTION(OSM_DESCRIPTION); | ||
811 | MODULE_VERSION(OSM_VERSION); | ||
812 | |||
813 | module_init(i2o_scsi_init); | ||
814 | module_exit(i2o_scsi_exit); | ||
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c deleted file mode 100644 index 92752fb5b2d3..000000000000 --- a/drivers/message/i2o/iop.c +++ /dev/null | |||
@@ -1,1247 +0,0 @@ | |||
1 | /* | ||
2 | * Functions to handle I2O controllers and I2O message handling | ||
3 | * | ||
4 | * Copyright (C) 1999-2002 Red Hat Software | ||
5 | * | ||
6 | * Written by Alan Cox, Building Number Three Ltd | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | * | ||
13 | * A lot of the I2O message side code from this is taken from the | ||
14 | * Red Creek RCPCI45 adapter driver by Red Creek Communications | ||
15 | * | ||
16 | * Fixes/additions: | ||
17 | * Philipp Rumpf | ||
18 | * Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI> | ||
19 | * Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI> | ||
20 | * Deepak Saxena <deepak@plexity.net> | ||
21 | * Boji T Kannanthanam <boji.t.kannanthanam@intel.com> | ||
22 | * Alan Cox <alan@lxorguk.ukuu.org.uk>: | ||
23 | * Ported to Linux 2.5. | ||
24 | * Markus Lidel <Markus.Lidel@shadowconnect.com>: | ||
25 | * Minor fixes for 2.6. | ||
26 | */ | ||
27 | |||
28 | #include <linux/module.h> | ||
29 | #include <linux/i2o.h> | ||
30 | #include <linux/delay.h> | ||
31 | #include <linux/sched.h> | ||
32 | #include <linux/slab.h> | ||
33 | #include "core.h" | ||
34 | |||
35 | #define OSM_NAME "i2o" | ||
36 | #define OSM_VERSION "1.325" | ||
37 | #define OSM_DESCRIPTION "I2O subsystem" | ||
38 | |||
39 | /* global I2O controller list */ | ||
40 | LIST_HEAD(i2o_controllers); | ||
41 | |||
42 | /* | ||
43 | * global I2O System Table. Contains information about all the IOPs in the | ||
44 | * system. Used to inform IOPs about each others existence. | ||
45 | */ | ||
46 | static struct i2o_dma i2o_systab; | ||
47 | |||
48 | static int i2o_hrt_get(struct i2o_controller *c); | ||
49 | |||
50 | /** | ||
51 | * i2o_msg_get_wait - obtain an I2O message from the IOP | ||
52 | * @c: I2O controller | ||
53 | * @wait: how long to wait until timeout | ||
54 | * | ||
55 | * This function waits up to wait seconds for a message slot to be | ||
56 | * available. | ||
57 | * | ||
58 | * On a success the message is returned and the pointer to the message is | ||
59 | * set in msg. The returned message is the physical page frame offset | ||
60 | * address from the read port (see the i2o spec). If no message is | ||
61 | * available returns I2O_QUEUE_EMPTY and msg is leaved untouched. | ||
62 | */ | ||
63 | struct i2o_message *i2o_msg_get_wait(struct i2o_controller *c, int wait) | ||
64 | { | ||
65 | unsigned long timeout = jiffies + wait * HZ; | ||
66 | struct i2o_message *msg; | ||
67 | |||
68 | while (IS_ERR(msg = i2o_msg_get(c))) { | ||
69 | if (time_after(jiffies, timeout)) { | ||
70 | osm_debug("%s: Timeout waiting for message frame.\n", | ||
71 | c->name); | ||
72 | return ERR_PTR(-ETIMEDOUT); | ||
73 | } | ||
74 | schedule_timeout_uninterruptible(1); | ||
75 | } | ||
76 | |||
77 | return msg; | ||
78 | }; | ||
79 | |||
80 | #if BITS_PER_LONG == 64 | ||
81 | /** | ||
82 | * i2o_cntxt_list_add - Append a pointer to context list and return a id | ||
83 | * @c: controller to which the context list belong | ||
84 | * @ptr: pointer to add to the context list | ||
85 | * | ||
86 | * Because the context field in I2O is only 32-bit large, on 64-bit the | ||
87 | * pointer is to large to fit in the context field. The i2o_cntxt_list | ||
88 | * functions therefore map pointers to context fields. | ||
89 | * | ||
90 | * Returns context id > 0 on success or 0 on failure. | ||
91 | */ | ||
92 | u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr) | ||
93 | { | ||
94 | struct i2o_context_list_element *entry; | ||
95 | unsigned long flags; | ||
96 | |||
97 | if (!ptr) | ||
98 | osm_err("%s: couldn't add NULL pointer to context list!\n", | ||
99 | c->name); | ||
100 | |||
101 | entry = kmalloc(sizeof(*entry), GFP_ATOMIC); | ||
102 | if (!entry) { | ||
103 | osm_err("%s: Could not allocate memory for context list element" | ||
104 | "\n", c->name); | ||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | entry->ptr = ptr; | ||
109 | entry->timestamp = jiffies; | ||
110 | INIT_LIST_HEAD(&entry->list); | ||
111 | |||
112 | spin_lock_irqsave(&c->context_list_lock, flags); | ||
113 | |||
114 | if (unlikely(atomic_inc_and_test(&c->context_list_counter))) | ||
115 | atomic_inc(&c->context_list_counter); | ||
116 | |||
117 | entry->context = atomic_read(&c->context_list_counter); | ||
118 | |||
119 | list_add(&entry->list, &c->context_list); | ||
120 | |||
121 | spin_unlock_irqrestore(&c->context_list_lock, flags); | ||
122 | |||
123 | osm_debug("%s: Add context to list %p -> %d\n", c->name, ptr, context); | ||
124 | |||
125 | return entry->context; | ||
126 | }; | ||
127 | |||
128 | /** | ||
129 | * i2o_cntxt_list_remove - Remove a pointer from the context list | ||
130 | * @c: controller to which the context list belong | ||
131 | * @ptr: pointer which should be removed from the context list | ||
132 | * | ||
133 | * Removes a previously added pointer from the context list and returns | ||
134 | * the matching context id. | ||
135 | * | ||
136 | * Returns context id on success or 0 on failure. | ||
137 | */ | ||
138 | u32 i2o_cntxt_list_remove(struct i2o_controller * c, void *ptr) | ||
139 | { | ||
140 | struct i2o_context_list_element *entry; | ||
141 | u32 context = 0; | ||
142 | unsigned long flags; | ||
143 | |||
144 | spin_lock_irqsave(&c->context_list_lock, flags); | ||
145 | list_for_each_entry(entry, &c->context_list, list) | ||
146 | if (entry->ptr == ptr) { | ||
147 | list_del(&entry->list); | ||
148 | context = entry->context; | ||
149 | kfree(entry); | ||
150 | break; | ||
151 | } | ||
152 | spin_unlock_irqrestore(&c->context_list_lock, flags); | ||
153 | |||
154 | if (!context) | ||
155 | osm_warn("%s: Could not remove nonexistent ptr %p\n", c->name, | ||
156 | ptr); | ||
157 | |||
158 | osm_debug("%s: remove ptr from context list %d -> %p\n", c->name, | ||
159 | context, ptr); | ||
160 | |||
161 | return context; | ||
162 | }; | ||
163 | |||
164 | /** | ||
165 | * i2o_cntxt_list_get - Get a pointer from the context list and remove it | ||
166 | * @c: controller to which the context list belong | ||
167 | * @context: context id to which the pointer belong | ||
168 | * | ||
169 | * Returns pointer to the matching context id on success or NULL on | ||
170 | * failure. | ||
171 | */ | ||
172 | void *i2o_cntxt_list_get(struct i2o_controller *c, u32 context) | ||
173 | { | ||
174 | struct i2o_context_list_element *entry; | ||
175 | unsigned long flags; | ||
176 | void *ptr = NULL; | ||
177 | |||
178 | spin_lock_irqsave(&c->context_list_lock, flags); | ||
179 | list_for_each_entry(entry, &c->context_list, list) | ||
180 | if (entry->context == context) { | ||
181 | list_del(&entry->list); | ||
182 | ptr = entry->ptr; | ||
183 | kfree(entry); | ||
184 | break; | ||
185 | } | ||
186 | spin_unlock_irqrestore(&c->context_list_lock, flags); | ||
187 | |||
188 | if (!ptr) | ||
189 | osm_warn("%s: context id %d not found\n", c->name, context); | ||
190 | |||
191 | osm_debug("%s: get ptr from context list %d -> %p\n", c->name, context, | ||
192 | ptr); | ||
193 | |||
194 | return ptr; | ||
195 | }; | ||
196 | |||
197 | /** | ||
198 | * i2o_cntxt_list_get_ptr - Get a context id from the context list | ||
199 | * @c: controller to which the context list belong | ||
200 | * @ptr: pointer to which the context id should be fetched | ||
201 | * | ||
202 | * Returns context id which matches to the pointer on success or 0 on | ||
203 | * failure. | ||
204 | */ | ||
205 | u32 i2o_cntxt_list_get_ptr(struct i2o_controller * c, void *ptr) | ||
206 | { | ||
207 | struct i2o_context_list_element *entry; | ||
208 | u32 context = 0; | ||
209 | unsigned long flags; | ||
210 | |||
211 | spin_lock_irqsave(&c->context_list_lock, flags); | ||
212 | list_for_each_entry(entry, &c->context_list, list) | ||
213 | if (entry->ptr == ptr) { | ||
214 | context = entry->context; | ||
215 | break; | ||
216 | } | ||
217 | spin_unlock_irqrestore(&c->context_list_lock, flags); | ||
218 | |||
219 | if (!context) | ||
220 | osm_warn("%s: Could not find nonexistent ptr %p\n", c->name, | ||
221 | ptr); | ||
222 | |||
223 | osm_debug("%s: get context id from context list %p -> %d\n", c->name, | ||
224 | ptr, context); | ||
225 | |||
226 | return context; | ||
227 | }; | ||
228 | #endif | ||
229 | |||
230 | /** | ||
231 | * i2o_iop_find - Find an I2O controller by id | ||
232 | * @unit: unit number of the I2O controller to search for | ||
233 | * | ||
234 | * Lookup the I2O controller on the controller list. | ||
235 | * | ||
236 | * Returns pointer to the I2O controller on success or NULL if not found. | ||
237 | */ | ||
238 | struct i2o_controller *i2o_find_iop(int unit) | ||
239 | { | ||
240 | struct i2o_controller *c; | ||
241 | |||
242 | list_for_each_entry(c, &i2o_controllers, list) { | ||
243 | if (c->unit == unit) | ||
244 | return c; | ||
245 | } | ||
246 | |||
247 | return NULL; | ||
248 | }; | ||
249 | |||
250 | /** | ||
251 | * i2o_iop_find_device - Find a I2O device on an I2O controller | ||
252 | * @c: I2O controller where the I2O device hangs on | ||
253 | * @tid: TID of the I2O device to search for | ||
254 | * | ||
255 | * Searches the devices of the I2O controller for a device with TID tid and | ||
256 | * returns it. | ||
257 | * | ||
258 | * Returns a pointer to the I2O device if found, otherwise NULL. | ||
259 | */ | ||
260 | struct i2o_device *i2o_iop_find_device(struct i2o_controller *c, u16 tid) | ||
261 | { | ||
262 | struct i2o_device *dev; | ||
263 | |||
264 | list_for_each_entry(dev, &c->devices, list) | ||
265 | if (dev->lct_data.tid == tid) | ||
266 | return dev; | ||
267 | |||
268 | return NULL; | ||
269 | }; | ||
270 | |||
271 | /** | ||
272 | * i2o_quiesce_controller - quiesce controller | ||
273 | * @c: controller | ||
274 | * | ||
275 | * Quiesce an IOP. Causes IOP to make external operation quiescent | ||
276 | * (i2o 'READY' state). Internal operation of the IOP continues normally. | ||
277 | * | ||
278 | * Returns 0 on success or negative error code on failure. | ||
279 | */ | ||
280 | static int i2o_iop_quiesce(struct i2o_controller *c) | ||
281 | { | ||
282 | struct i2o_message *msg; | ||
283 | i2o_status_block *sb = c->status_block.virt; | ||
284 | int rc; | ||
285 | |||
286 | i2o_status_get(c); | ||
287 | |||
288 | /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */ | ||
289 | if ((sb->iop_state != ADAPTER_STATE_READY) && | ||
290 | (sb->iop_state != ADAPTER_STATE_OPERATIONAL)) | ||
291 | return 0; | ||
292 | |||
293 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); | ||
294 | if (IS_ERR(msg)) | ||
295 | return PTR_ERR(msg); | ||
296 | |||
297 | msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); | ||
298 | msg->u.head[1] = | ||
299 | cpu_to_le32(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 | | ||
300 | ADAPTER_TID); | ||
301 | |||
302 | /* Long timeout needed for quiesce if lots of devices */ | ||
303 | if ((rc = i2o_msg_post_wait(c, msg, 240))) | ||
304 | osm_info("%s: Unable to quiesce (status=%#x).\n", c->name, -rc); | ||
305 | else | ||
306 | osm_debug("%s: Quiesced.\n", c->name); | ||
307 | |||
308 | i2o_status_get(c); // Entered READY state | ||
309 | |||
310 | return rc; | ||
311 | }; | ||
312 | |||
313 | /** | ||
314 | * i2o_iop_enable - move controller from ready to OPERATIONAL | ||
315 | * @c: I2O controller | ||
316 | * | ||
317 | * Enable IOP. This allows the IOP to resume external operations and | ||
318 | * reverses the effect of a quiesce. Returns zero or an error code if | ||
319 | * an error occurs. | ||
320 | */ | ||
321 | static int i2o_iop_enable(struct i2o_controller *c) | ||
322 | { | ||
323 | struct i2o_message *msg; | ||
324 | i2o_status_block *sb = c->status_block.virt; | ||
325 | int rc; | ||
326 | |||
327 | i2o_status_get(c); | ||
328 | |||
329 | /* Enable only allowed on READY state */ | ||
330 | if (sb->iop_state != ADAPTER_STATE_READY) | ||
331 | return -EINVAL; | ||
332 | |||
333 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); | ||
334 | if (IS_ERR(msg)) | ||
335 | return PTR_ERR(msg); | ||
336 | |||
337 | msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); | ||
338 | msg->u.head[1] = | ||
339 | cpu_to_le32(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 | | ||
340 | ADAPTER_TID); | ||
341 | |||
342 | /* How long of a timeout do we need? */ | ||
343 | if ((rc = i2o_msg_post_wait(c, msg, 240))) | ||
344 | osm_err("%s: Could not enable (status=%#x).\n", c->name, -rc); | ||
345 | else | ||
346 | osm_debug("%s: Enabled.\n", c->name); | ||
347 | |||
348 | i2o_status_get(c); // entered OPERATIONAL state | ||
349 | |||
350 | return rc; | ||
351 | }; | ||
352 | |||
353 | /** | ||
354 | * i2o_iop_quiesce_all - Quiesce all I2O controllers on the system | ||
355 | * | ||
356 | * Quiesce all I2O controllers which are connected to the system. | ||
357 | */ | ||
358 | static inline void i2o_iop_quiesce_all(void) | ||
359 | { | ||
360 | struct i2o_controller *c, *tmp; | ||
361 | |||
362 | list_for_each_entry_safe(c, tmp, &i2o_controllers, list) { | ||
363 | if (!c->no_quiesce) | ||
364 | i2o_iop_quiesce(c); | ||
365 | } | ||
366 | }; | ||
367 | |||
368 | /** | ||
369 | * i2o_iop_enable_all - Enables all controllers on the system | ||
370 | * | ||
371 | * Enables all I2O controllers which are connected to the system. | ||
372 | */ | ||
373 | static inline void i2o_iop_enable_all(void) | ||
374 | { | ||
375 | struct i2o_controller *c, *tmp; | ||
376 | |||
377 | list_for_each_entry_safe(c, tmp, &i2o_controllers, list) | ||
378 | i2o_iop_enable(c); | ||
379 | }; | ||
380 | |||
381 | /** | ||
382 | * i2o_clear_controller - Bring I2O controller into HOLD state | ||
383 | * @c: controller | ||
384 | * | ||
385 | * Clear an IOP to HOLD state, ie. terminate external operations, clear all | ||
386 | * input queues and prepare for a system restart. IOP's internal operation | ||
387 | * continues normally and the outbound queue is alive. The IOP is not | ||
388 | * expected to rebuild its LCT. | ||
389 | * | ||
390 | * Returns 0 on success or negative error code on failure. | ||
391 | */ | ||
392 | static int i2o_iop_clear(struct i2o_controller *c) | ||
393 | { | ||
394 | struct i2o_message *msg; | ||
395 | int rc; | ||
396 | |||
397 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); | ||
398 | if (IS_ERR(msg)) | ||
399 | return PTR_ERR(msg); | ||
400 | |||
401 | /* Quiesce all IOPs first */ | ||
402 | i2o_iop_quiesce_all(); | ||
403 | |||
404 | msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); | ||
405 | msg->u.head[1] = | ||
406 | cpu_to_le32(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 | | ||
407 | ADAPTER_TID); | ||
408 | |||
409 | if ((rc = i2o_msg_post_wait(c, msg, 30))) | ||
410 | osm_info("%s: Unable to clear (status=%#x).\n", c->name, -rc); | ||
411 | else | ||
412 | osm_debug("%s: Cleared.\n", c->name); | ||
413 | |||
414 | /* Enable all IOPs */ | ||
415 | i2o_iop_enable_all(); | ||
416 | |||
417 | return rc; | ||
418 | } | ||
419 | |||
420 | /** | ||
421 | * i2o_iop_init_outbound_queue - setup the outbound message queue | ||
422 | * @c: I2O controller | ||
423 | * | ||
424 | * Clear and (re)initialize IOP's outbound queue and post the message | ||
425 | * frames to the IOP. | ||
426 | * | ||
427 | * Returns 0 on success or negative error code on failure. | ||
428 | */ | ||
429 | static int i2o_iop_init_outbound_queue(struct i2o_controller *c) | ||
430 | { | ||
431 | u32 m; | ||
432 | volatile u8 *status = c->status.virt; | ||
433 | struct i2o_message *msg; | ||
434 | ulong timeout; | ||
435 | int i; | ||
436 | |||
437 | osm_debug("%s: Initializing Outbound Queue...\n", c->name); | ||
438 | |||
439 | memset(c->status.virt, 0, 4); | ||
440 | |||
441 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); | ||
442 | if (IS_ERR(msg)) | ||
443 | return PTR_ERR(msg); | ||
444 | |||
445 | msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6); | ||
446 | msg->u.head[1] = | ||
447 | cpu_to_le32(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | | ||
448 | ADAPTER_TID); | ||
449 | msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context); | ||
450 | msg->u.s.tcntxt = cpu_to_le32(0x00000000); | ||
451 | msg->body[0] = cpu_to_le32(PAGE_SIZE); | ||
452 | /* Outbound msg frame size in words and Initcode */ | ||
453 | msg->body[1] = cpu_to_le32(I2O_OUTBOUND_MSG_FRAME_SIZE << 16 | 0x80); | ||
454 | msg->body[2] = cpu_to_le32(0xd0000004); | ||
455 | msg->body[3] = cpu_to_le32(i2o_dma_low(c->status.phys)); | ||
456 | msg->body[4] = cpu_to_le32(i2o_dma_high(c->status.phys)); | ||
457 | |||
458 | i2o_msg_post(c, msg); | ||
459 | |||
460 | timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ; | ||
461 | while (*status <= I2O_CMD_IN_PROGRESS) { | ||
462 | if (time_after(jiffies, timeout)) { | ||
463 | osm_warn("%s: Timeout Initializing\n", c->name); | ||
464 | return -ETIMEDOUT; | ||
465 | } | ||
466 | schedule_timeout_uninterruptible(1); | ||
467 | } | ||
468 | |||
469 | m = c->out_queue.phys; | ||
470 | |||
471 | /* Post frames */ | ||
472 | for (i = 0; i < I2O_MAX_OUTBOUND_MSG_FRAMES; i++) { | ||
473 | i2o_flush_reply(c, m); | ||
474 | udelay(1); /* Promise */ | ||
475 | m += I2O_OUTBOUND_MSG_FRAME_SIZE * sizeof(u32); | ||
476 | } | ||
477 | |||
478 | return 0; | ||
479 | } | ||
480 | |||
481 | /** | ||
482 | * i2o_iop_reset - reset an I2O controller | ||
483 | * @c: controller to reset | ||
484 | * | ||
485 | * Reset the IOP into INIT state and wait until IOP gets into RESET state. | ||
486 | * Terminate all external operations, clear IOP's inbound and outbound | ||
487 | * queues, terminate all DDMs, and reload the IOP's operating environment | ||
488 | * and all local DDMs. The IOP rebuilds its LCT. | ||
489 | */ | ||
490 | static int i2o_iop_reset(struct i2o_controller *c) | ||
491 | { | ||
492 | volatile u8 *status = c->status.virt; | ||
493 | struct i2o_message *msg; | ||
494 | unsigned long timeout; | ||
495 | i2o_status_block *sb = c->status_block.virt; | ||
496 | int rc = 0; | ||
497 | |||
498 | osm_debug("%s: Resetting controller\n", c->name); | ||
499 | |||
500 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); | ||
501 | if (IS_ERR(msg)) | ||
502 | return PTR_ERR(msg); | ||
503 | |||
504 | memset(c->status_block.virt, 0, 8); | ||
505 | |||
506 | /* Quiesce all IOPs first */ | ||
507 | i2o_iop_quiesce_all(); | ||
508 | |||
509 | msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0); | ||
510 | msg->u.head[1] = | ||
511 | cpu_to_le32(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 | | ||
512 | ADAPTER_TID); | ||
513 | msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context); | ||
514 | msg->u.s.tcntxt = cpu_to_le32(0x00000000); | ||
515 | msg->body[0] = cpu_to_le32(0x00000000); | ||
516 | msg->body[1] = cpu_to_le32(0x00000000); | ||
517 | msg->body[2] = cpu_to_le32(i2o_dma_low(c->status.phys)); | ||
518 | msg->body[3] = cpu_to_le32(i2o_dma_high(c->status.phys)); | ||
519 | |||
520 | i2o_msg_post(c, msg); | ||
521 | |||
522 | /* Wait for a reply */ | ||
523 | timeout = jiffies + I2O_TIMEOUT_RESET * HZ; | ||
524 | while (!*status) { | ||
525 | if (time_after(jiffies, timeout)) | ||
526 | break; | ||
527 | |||
528 | schedule_timeout_uninterruptible(1); | ||
529 | } | ||
530 | |||
531 | switch (*status) { | ||
532 | case I2O_CMD_REJECTED: | ||
533 | osm_warn("%s: IOP reset rejected\n", c->name); | ||
534 | rc = -EPERM; | ||
535 | break; | ||
536 | |||
537 | case I2O_CMD_IN_PROGRESS: | ||
538 | /* | ||
539 | * Once the reset is sent, the IOP goes into the INIT state | ||
540 | * which is indeterminate. We need to wait until the IOP has | ||
541 | * rebooted before we can let the system talk to it. We read | ||
542 | * the inbound Free_List until a message is available. If we | ||
543 | * can't read one in the given amount of time, we assume the | ||
544 | * IOP could not reboot properly. | ||
545 | */ | ||
546 | osm_debug("%s: Reset in progress, waiting for reboot...\n", | ||
547 | c->name); | ||
548 | |||
549 | while (IS_ERR(msg = i2o_msg_get_wait(c, I2O_TIMEOUT_RESET))) { | ||
550 | if (time_after(jiffies, timeout)) { | ||
551 | osm_err("%s: IOP reset timeout.\n", c->name); | ||
552 | rc = PTR_ERR(msg); | ||
553 | goto exit; | ||
554 | } | ||
555 | schedule_timeout_uninterruptible(1); | ||
556 | } | ||
557 | i2o_msg_nop(c, msg); | ||
558 | |||
559 | /* from here all quiesce commands are safe */ | ||
560 | c->no_quiesce = 0; | ||
561 | |||
562 | /* verify if controller is in state RESET */ | ||
563 | i2o_status_get(c); | ||
564 | |||
565 | if (!c->promise && (sb->iop_state != ADAPTER_STATE_RESET)) | ||
566 | osm_warn("%s: reset completed, but adapter not in RESET" | ||
567 | " state.\n", c->name); | ||
568 | else | ||
569 | osm_debug("%s: reset completed.\n", c->name); | ||
570 | |||
571 | break; | ||
572 | |||
573 | default: | ||
574 | osm_err("%s: IOP reset timeout.\n", c->name); | ||
575 | rc = -ETIMEDOUT; | ||
576 | break; | ||
577 | } | ||
578 | |||
579 | exit: | ||
580 | /* Enable all IOPs */ | ||
581 | i2o_iop_enable_all(); | ||
582 | |||
583 | return rc; | ||
584 | }; | ||
585 | |||
586 | /** | ||
587 | * i2o_iop_activate - Bring controller up to HOLD | ||
588 | * @c: controller | ||
589 | * | ||
590 | * This function brings an I2O controller into HOLD state. The adapter | ||
591 | * is reset if necessary and then the queues and resource table are read. | ||
592 | * | ||
593 | * Returns 0 on success or negative error code on failure. | ||
594 | */ | ||
595 | static int i2o_iop_activate(struct i2o_controller *c) | ||
596 | { | ||
597 | i2o_status_block *sb = c->status_block.virt; | ||
598 | int rc; | ||
599 | int state; | ||
600 | |||
601 | /* In INIT state, Wait Inbound Q to initialize (in i2o_status_get) */ | ||
602 | /* In READY state, Get status */ | ||
603 | |||
604 | rc = i2o_status_get(c); | ||
605 | if (rc) { | ||
606 | osm_info("%s: Unable to obtain status, attempting a reset.\n", | ||
607 | c->name); | ||
608 | rc = i2o_iop_reset(c); | ||
609 | if (rc) | ||
610 | return rc; | ||
611 | } | ||
612 | |||
613 | if (sb->i2o_version > I2OVER15) { | ||
614 | osm_err("%s: Not running version 1.5 of the I2O Specification." | ||
615 | "\n", c->name); | ||
616 | return -ENODEV; | ||
617 | } | ||
618 | |||
619 | switch (sb->iop_state) { | ||
620 | case ADAPTER_STATE_FAULTED: | ||
621 | osm_err("%s: hardware fault\n", c->name); | ||
622 | return -EFAULT; | ||
623 | |||
624 | case ADAPTER_STATE_READY: | ||
625 | case ADAPTER_STATE_OPERATIONAL: | ||
626 | case ADAPTER_STATE_HOLD: | ||
627 | case ADAPTER_STATE_FAILED: | ||
628 | osm_debug("%s: already running, trying to reset...\n", c->name); | ||
629 | rc = i2o_iop_reset(c); | ||
630 | if (rc) | ||
631 | return rc; | ||
632 | } | ||
633 | |||
634 | /* preserve state */ | ||
635 | state = sb->iop_state; | ||
636 | |||
637 | rc = i2o_iop_init_outbound_queue(c); | ||
638 | if (rc) | ||
639 | return rc; | ||
640 | |||
641 | /* if adapter was not in RESET state clear now */ | ||
642 | if (state != ADAPTER_STATE_RESET) | ||
643 | i2o_iop_clear(c); | ||
644 | |||
645 | i2o_status_get(c); | ||
646 | |||
647 | if (sb->iop_state != ADAPTER_STATE_HOLD) { | ||
648 | osm_err("%s: failed to bring IOP into HOLD state\n", c->name); | ||
649 | return -EIO; | ||
650 | } | ||
651 | |||
652 | return i2o_hrt_get(c); | ||
653 | }; | ||
654 | |||
655 | static void i2o_res_alloc(struct i2o_controller *c, unsigned long flags) | ||
656 | { | ||
657 | i2o_status_block *sb = c->status_block.virt; | ||
658 | struct resource *res = &c->mem_resource; | ||
659 | resource_size_t size, align; | ||
660 | int err; | ||
661 | |||
662 | res->name = c->pdev->bus->name; | ||
663 | res->flags = flags; | ||
664 | res->start = 0; | ||
665 | res->end = 0; | ||
666 | osm_info("%s: requires private memory resources.\n", c->name); | ||
667 | |||
668 | if (flags & IORESOURCE_MEM) { | ||
669 | size = sb->desired_mem_size; | ||
670 | align = 1 << 20; /* unspecified, use 1Mb and play safe */ | ||
671 | } else { | ||
672 | size = sb->desired_io_size; | ||
673 | align = 1 << 12; /* unspecified, use 4Kb and play safe */ | ||
674 | } | ||
675 | |||
676 | err = pci_bus_alloc_resource(c->pdev->bus, res, size, align, 0, 0, | ||
677 | NULL, NULL); | ||
678 | if (err < 0) | ||
679 | return; | ||
680 | |||
681 | if (flags & IORESOURCE_MEM) { | ||
682 | c->mem_alloc = 1; | ||
683 | sb->current_mem_size = resource_size(res); | ||
684 | sb->current_mem_base = res->start; | ||
685 | } else if (flags & IORESOURCE_IO) { | ||
686 | c->io_alloc = 1; | ||
687 | sb->current_io_size = resource_size(res); | ||
688 | sb->current_io_base = res->start; | ||
689 | } | ||
690 | osm_info("%s: allocated PCI space %pR\n", c->name, res); | ||
691 | } | ||
692 | |||
693 | /** | ||
694 | * i2o_iop_systab_set - Set the I2O System Table of the specified IOP | ||
695 | * @c: I2O controller to which the system table should be send | ||
696 | * | ||
697 | * Before the systab could be set i2o_systab_build() must be called. | ||
698 | * | ||
699 | * Returns 0 on success or negative error code on failure. | ||
700 | */ | ||
701 | static int i2o_iop_systab_set(struct i2o_controller *c) | ||
702 | { | ||
703 | struct i2o_message *msg; | ||
704 | i2o_status_block *sb = c->status_block.virt; | ||
705 | struct device *dev = &c->pdev->dev; | ||
706 | int rc; | ||
707 | |||
708 | if (sb->current_mem_size < sb->desired_mem_size) | ||
709 | i2o_res_alloc(c, IORESOURCE_MEM); | ||
710 | |||
711 | if (sb->current_io_size < sb->desired_io_size) | ||
712 | i2o_res_alloc(c, IORESOURCE_IO); | ||
713 | |||
714 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); | ||
715 | if (IS_ERR(msg)) | ||
716 | return PTR_ERR(msg); | ||
717 | |||
718 | i2o_systab.phys = dma_map_single(dev, i2o_systab.virt, i2o_systab.len, | ||
719 | PCI_DMA_TODEVICE); | ||
720 | if (!i2o_systab.phys) { | ||
721 | i2o_msg_nop(c, msg); | ||
722 | return -ENOMEM; | ||
723 | } | ||
724 | |||
725 | msg->u.head[0] = cpu_to_le32(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6); | ||
726 | msg->u.head[1] = | ||
727 | cpu_to_le32(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 | | ||
728 | ADAPTER_TID); | ||
729 | |||
730 | /* | ||
731 | * Provide three SGL-elements: | ||
732 | * System table (SysTab), Private memory space declaration and | ||
733 | * Private i/o space declaration | ||
734 | */ | ||
735 | |||
736 | msg->body[0] = cpu_to_le32(c->unit + 2); | ||
737 | msg->body[1] = cpu_to_le32(0x00000000); | ||
738 | msg->body[2] = cpu_to_le32(0x54000000 | i2o_systab.len); | ||
739 | msg->body[3] = cpu_to_le32(i2o_systab.phys); | ||
740 | msg->body[4] = cpu_to_le32(0x54000000 | sb->current_mem_size); | ||
741 | msg->body[5] = cpu_to_le32(sb->current_mem_base); | ||
742 | msg->body[6] = cpu_to_le32(0xd4000000 | sb->current_io_size); | ||
743 | msg->body[6] = cpu_to_le32(sb->current_io_base); | ||
744 | |||
745 | rc = i2o_msg_post_wait(c, msg, 120); | ||
746 | |||
747 | dma_unmap_single(dev, i2o_systab.phys, i2o_systab.len, | ||
748 | PCI_DMA_TODEVICE); | ||
749 | |||
750 | if (rc < 0) | ||
751 | osm_err("%s: Unable to set SysTab (status=%#x).\n", c->name, | ||
752 | -rc); | ||
753 | else | ||
754 | osm_debug("%s: SysTab set.\n", c->name); | ||
755 | |||
756 | return rc; | ||
757 | } | ||
758 | |||
759 | /** | ||
760 | * i2o_iop_online - Bring a controller online into OPERATIONAL state. | ||
761 | * @c: I2O controller | ||
762 | * | ||
763 | * Send the system table and enable the I2O controller. | ||
764 | * | ||
765 | * Returns 0 on success or negative error code on failure. | ||
766 | */ | ||
767 | static int i2o_iop_online(struct i2o_controller *c) | ||
768 | { | ||
769 | int rc; | ||
770 | |||
771 | rc = i2o_iop_systab_set(c); | ||
772 | if (rc) | ||
773 | return rc; | ||
774 | |||
775 | /* In READY state */ | ||
776 | osm_debug("%s: Attempting to enable...\n", c->name); | ||
777 | rc = i2o_iop_enable(c); | ||
778 | if (rc) | ||
779 | return rc; | ||
780 | |||
781 | return 0; | ||
782 | }; | ||
783 | |||
784 | /** | ||
785 | * i2o_iop_remove - Remove the I2O controller from the I2O core | ||
786 | * @c: I2O controller | ||
787 | * | ||
788 | * Remove the I2O controller from the I2O core. If devices are attached to | ||
789 | * the controller remove these also and finally reset the controller. | ||
790 | */ | ||
791 | void i2o_iop_remove(struct i2o_controller *c) | ||
792 | { | ||
793 | struct i2o_device *dev, *tmp; | ||
794 | |||
795 | osm_debug("%s: deleting controller\n", c->name); | ||
796 | |||
797 | i2o_driver_notify_controller_remove_all(c); | ||
798 | |||
799 | list_del(&c->list); | ||
800 | |||
801 | list_for_each_entry_safe(dev, tmp, &c->devices, list) | ||
802 | i2o_device_remove(dev); | ||
803 | |||
804 | device_del(&c->device); | ||
805 | |||
806 | /* Ask the IOP to switch to RESET state */ | ||
807 | i2o_iop_reset(c); | ||
808 | } | ||
809 | |||
810 | /** | ||
811 | * i2o_systab_build - Build system table | ||
812 | * | ||
813 | * The system table contains information about all the IOPs in the system | ||
814 | * (duh) and is used by the Executives on the IOPs to establish peer2peer | ||
815 | * connections. We're not supporting peer2peer at the moment, but this | ||
816 | * will be needed down the road for things like lan2lan forwarding. | ||
817 | * | ||
818 | * Returns 0 on success or negative error code on failure. | ||
819 | */ | ||
820 | static int i2o_systab_build(void) | ||
821 | { | ||
822 | struct i2o_controller *c, *tmp; | ||
823 | int num_controllers = 0; | ||
824 | u32 change_ind = 0; | ||
825 | int count = 0; | ||
826 | struct i2o_sys_tbl *systab = i2o_systab.virt; | ||
827 | |||
828 | list_for_each_entry_safe(c, tmp, &i2o_controllers, list) | ||
829 | num_controllers++; | ||
830 | |||
831 | if (systab) { | ||
832 | change_ind = systab->change_ind; | ||
833 | kfree(i2o_systab.virt); | ||
834 | } | ||
835 | |||
836 | /* Header + IOPs */ | ||
837 | i2o_systab.len = sizeof(struct i2o_sys_tbl) + num_controllers * | ||
838 | sizeof(struct i2o_sys_tbl_entry); | ||
839 | |||
840 | systab = i2o_systab.virt = kzalloc(i2o_systab.len, GFP_KERNEL); | ||
841 | if (!systab) { | ||
842 | osm_err("unable to allocate memory for System Table\n"); | ||
843 | return -ENOMEM; | ||
844 | } | ||
845 | |||
846 | systab->version = I2OVERSION; | ||
847 | systab->change_ind = change_ind + 1; | ||
848 | |||
849 | list_for_each_entry_safe(c, tmp, &i2o_controllers, list) { | ||
850 | i2o_status_block *sb; | ||
851 | |||
852 | if (count >= num_controllers) { | ||
853 | osm_err("controller added while building system table" | ||
854 | "\n"); | ||
855 | break; | ||
856 | } | ||
857 | |||
858 | sb = c->status_block.virt; | ||
859 | |||
860 | /* | ||
861 | * Get updated IOP state so we have the latest information | ||
862 | * | ||
863 | * We should delete the controller at this point if it | ||
864 | * doesn't respond since if it's not on the system table | ||
865 | * it is techninically not part of the I2O subsystem... | ||
866 | */ | ||
867 | if (unlikely(i2o_status_get(c))) { | ||
868 | osm_err("%s: Deleting b/c could not get status while " | ||
869 | "attempting to build system table\n", c->name); | ||
870 | i2o_iop_remove(c); | ||
871 | continue; // try the next one | ||
872 | } | ||
873 | |||
874 | systab->iops[count].org_id = sb->org_id; | ||
875 | systab->iops[count].iop_id = c->unit + 2; | ||
876 | systab->iops[count].seg_num = 0; | ||
877 | systab->iops[count].i2o_version = sb->i2o_version; | ||
878 | systab->iops[count].iop_state = sb->iop_state; | ||
879 | systab->iops[count].msg_type = sb->msg_type; | ||
880 | systab->iops[count].frame_size = sb->inbound_frame_size; | ||
881 | systab->iops[count].last_changed = change_ind; | ||
882 | systab->iops[count].iop_capabilities = sb->iop_capabilities; | ||
883 | systab->iops[count].inbound_low = | ||
884 | i2o_dma_low(c->base.phys + I2O_IN_PORT); | ||
885 | systab->iops[count].inbound_high = | ||
886 | i2o_dma_high(c->base.phys + I2O_IN_PORT); | ||
887 | |||
888 | count++; | ||
889 | } | ||
890 | |||
891 | systab->num_entries = count; | ||
892 | |||
893 | return 0; | ||
894 | }; | ||
895 | |||
896 | /** | ||
897 | * i2o_parse_hrt - Parse the hardware resource table. | ||
898 | * @c: I2O controller | ||
899 | * | ||
900 | * We don't do anything with it except dumping it (in debug mode). | ||
901 | * | ||
902 | * Returns 0. | ||
903 | */ | ||
904 | static int i2o_parse_hrt(struct i2o_controller *c) | ||
905 | { | ||
906 | i2o_dump_hrt(c); | ||
907 | return 0; | ||
908 | }; | ||
909 | |||
910 | /** | ||
911 | * i2o_status_get - Get the status block from the I2O controller | ||
912 | * @c: I2O controller | ||
913 | * | ||
914 | * Issue a status query on the controller. This updates the attached | ||
915 | * status block. The status block could then be accessed through | ||
916 | * c->status_block. | ||
917 | * | ||
918 | * Returns 0 on success or negative error code on failure. | ||
919 | */ | ||
920 | int i2o_status_get(struct i2o_controller *c) | ||
921 | { | ||
922 | struct i2o_message *msg; | ||
923 | volatile u8 *status_block; | ||
924 | unsigned long timeout; | ||
925 | |||
926 | status_block = (u8 *) c->status_block.virt; | ||
927 | memset(c->status_block.virt, 0, sizeof(i2o_status_block)); | ||
928 | |||
929 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); | ||
930 | if (IS_ERR(msg)) | ||
931 | return PTR_ERR(msg); | ||
932 | |||
933 | msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_0); | ||
934 | msg->u.head[1] = | ||
935 | cpu_to_le32(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 | | ||
936 | ADAPTER_TID); | ||
937 | msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context); | ||
938 | msg->u.s.tcntxt = cpu_to_le32(0x00000000); | ||
939 | msg->body[0] = cpu_to_le32(0x00000000); | ||
940 | msg->body[1] = cpu_to_le32(0x00000000); | ||
941 | msg->body[2] = cpu_to_le32(i2o_dma_low(c->status_block.phys)); | ||
942 | msg->body[3] = cpu_to_le32(i2o_dma_high(c->status_block.phys)); | ||
943 | msg->body[4] = cpu_to_le32(sizeof(i2o_status_block)); /* always 88 bytes */ | ||
944 | |||
945 | i2o_msg_post(c, msg); | ||
946 | |||
947 | /* Wait for a reply */ | ||
948 | timeout = jiffies + I2O_TIMEOUT_STATUS_GET * HZ; | ||
949 | while (status_block[87] != 0xFF) { | ||
950 | if (time_after(jiffies, timeout)) { | ||
951 | osm_err("%s: Get status timeout.\n", c->name); | ||
952 | return -ETIMEDOUT; | ||
953 | } | ||
954 | |||
955 | schedule_timeout_uninterruptible(1); | ||
956 | } | ||
957 | |||
958 | #ifdef DEBUG | ||
959 | i2o_debug_state(c); | ||
960 | #endif | ||
961 | |||
962 | return 0; | ||
963 | } | ||
964 | |||
965 | /* | ||
966 | * i2o_hrt_get - Get the Hardware Resource Table from the I2O controller | ||
967 | * @c: I2O controller from which the HRT should be fetched | ||
968 | * | ||
969 | * The HRT contains information about possible hidden devices but is | ||
970 | * mostly useless to us. | ||
971 | * | ||
972 | * Returns 0 on success or negative error code on failure. | ||
973 | */ | ||
974 | static int i2o_hrt_get(struct i2o_controller *c) | ||
975 | { | ||
976 | int rc; | ||
977 | int i; | ||
978 | i2o_hrt *hrt = c->hrt.virt; | ||
979 | u32 size = sizeof(i2o_hrt); | ||
980 | struct device *dev = &c->pdev->dev; | ||
981 | |||
982 | for (i = 0; i < I2O_HRT_GET_TRIES; i++) { | ||
983 | struct i2o_message *msg; | ||
984 | |||
985 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); | ||
986 | if (IS_ERR(msg)) | ||
987 | return PTR_ERR(msg); | ||
988 | |||
989 | msg->u.head[0] = cpu_to_le32(SIX_WORD_MSG_SIZE | SGL_OFFSET_4); | ||
990 | msg->u.head[1] = | ||
991 | cpu_to_le32(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 | | ||
992 | ADAPTER_TID); | ||
993 | msg->body[0] = cpu_to_le32(0xd0000000 | c->hrt.len); | ||
994 | msg->body[1] = cpu_to_le32(c->hrt.phys); | ||
995 | |||
996 | rc = i2o_msg_post_wait_mem(c, msg, 20, &c->hrt); | ||
997 | |||
998 | if (rc < 0) { | ||
999 | osm_err("%s: Unable to get HRT (status=%#x)\n", c->name, | ||
1000 | -rc); | ||
1001 | return rc; | ||
1002 | } | ||
1003 | |||
1004 | size = hrt->num_entries * hrt->entry_len << 2; | ||
1005 | if (size > c->hrt.len) { | ||
1006 | if (i2o_dma_realloc(dev, &c->hrt, size)) | ||
1007 | return -ENOMEM; | ||
1008 | else | ||
1009 | hrt = c->hrt.virt; | ||
1010 | } else | ||
1011 | return i2o_parse_hrt(c); | ||
1012 | } | ||
1013 | |||
1014 | osm_err("%s: Unable to get HRT after %d tries, giving up\n", c->name, | ||
1015 | I2O_HRT_GET_TRIES); | ||
1016 | |||
1017 | return -EBUSY; | ||
1018 | } | ||
1019 | |||
1020 | /** | ||
1021 | * i2o_iop_release - release the memory for a I2O controller | ||
1022 | * @dev: I2O controller which should be released | ||
1023 | * | ||
1024 | * Release the allocated memory. This function is called if refcount of | ||
1025 | * device reaches 0 automatically. | ||
1026 | */ | ||
1027 | static void i2o_iop_release(struct device *dev) | ||
1028 | { | ||
1029 | struct i2o_controller *c = to_i2o_controller(dev); | ||
1030 | |||
1031 | i2o_iop_free(c); | ||
1032 | }; | ||
1033 | |||
1034 | /** | ||
1035 | * i2o_iop_alloc - Allocate and initialize a i2o_controller struct | ||
1036 | * | ||
1037 | * Allocate the necessary memory for a i2o_controller struct and | ||
1038 | * initialize the lists and message mempool. | ||
1039 | * | ||
1040 | * Returns a pointer to the I2O controller or a negative error code on | ||
1041 | * failure. | ||
1042 | */ | ||
1043 | struct i2o_controller *i2o_iop_alloc(void) | ||
1044 | { | ||
1045 | static int unit = 0; /* 0 and 1 are NULL IOP and Local Host */ | ||
1046 | struct i2o_controller *c; | ||
1047 | char poolname[32]; | ||
1048 | |||
1049 | c = kzalloc(sizeof(*c), GFP_KERNEL); | ||
1050 | if (!c) { | ||
1051 | osm_err("i2o: Insufficient memory to allocate a I2O controller." | ||
1052 | "\n"); | ||
1053 | return ERR_PTR(-ENOMEM); | ||
1054 | } | ||
1055 | |||
1056 | c->unit = unit++; | ||
1057 | sprintf(c->name, "iop%d", c->unit); | ||
1058 | |||
1059 | snprintf(poolname, sizeof(poolname), "i2o_%s_msg_inpool", c->name); | ||
1060 | if (i2o_pool_alloc | ||
1061 | (&c->in_msg, poolname, I2O_INBOUND_MSG_FRAME_SIZE * 4 + sizeof(u32), | ||
1062 | I2O_MSG_INPOOL_MIN)) { | ||
1063 | kfree(c); | ||
1064 | return ERR_PTR(-ENOMEM); | ||
1065 | }; | ||
1066 | |||
1067 | INIT_LIST_HEAD(&c->devices); | ||
1068 | spin_lock_init(&c->lock); | ||
1069 | mutex_init(&c->lct_lock); | ||
1070 | |||
1071 | device_initialize(&c->device); | ||
1072 | |||
1073 | c->device.release = &i2o_iop_release; | ||
1074 | |||
1075 | dev_set_name(&c->device, "iop%d", c->unit); | ||
1076 | |||
1077 | #if BITS_PER_LONG == 64 | ||
1078 | spin_lock_init(&c->context_list_lock); | ||
1079 | atomic_set(&c->context_list_counter, 0); | ||
1080 | INIT_LIST_HEAD(&c->context_list); | ||
1081 | #endif | ||
1082 | |||
1083 | return c; | ||
1084 | }; | ||
1085 | |||
1086 | /** | ||
1087 | * i2o_iop_add - Initialize the I2O controller and add him to the I2O core | ||
1088 | * @c: controller | ||
1089 | * | ||
1090 | * Initialize the I2O controller and if no error occurs add him to the I2O | ||
1091 | * core. | ||
1092 | * | ||
1093 | * Returns 0 on success or negative error code on failure. | ||
1094 | */ | ||
1095 | int i2o_iop_add(struct i2o_controller *c) | ||
1096 | { | ||
1097 | int rc; | ||
1098 | |||
1099 | if ((rc = device_add(&c->device))) { | ||
1100 | osm_err("%s: could not add controller\n", c->name); | ||
1101 | goto iop_reset; | ||
1102 | } | ||
1103 | |||
1104 | osm_info("%s: Activating I2O controller...\n", c->name); | ||
1105 | osm_info("%s: This may take a few minutes if there are many devices\n", | ||
1106 | c->name); | ||
1107 | |||
1108 | if ((rc = i2o_iop_activate(c))) { | ||
1109 | osm_err("%s: could not activate controller\n", c->name); | ||
1110 | goto device_del; | ||
1111 | } | ||
1112 | |||
1113 | osm_debug("%s: building sys table...\n", c->name); | ||
1114 | |||
1115 | if ((rc = i2o_systab_build())) | ||
1116 | goto device_del; | ||
1117 | |||
1118 | osm_debug("%s: online controller...\n", c->name); | ||
1119 | |||
1120 | if ((rc = i2o_iop_online(c))) | ||
1121 | goto device_del; | ||
1122 | |||
1123 | osm_debug("%s: getting LCT...\n", c->name); | ||
1124 | |||
1125 | if ((rc = i2o_exec_lct_get(c))) | ||
1126 | goto device_del; | ||
1127 | |||
1128 | list_add(&c->list, &i2o_controllers); | ||
1129 | |||
1130 | i2o_driver_notify_controller_add_all(c); | ||
1131 | |||
1132 | osm_info("%s: Controller added\n", c->name); | ||
1133 | |||
1134 | return 0; | ||
1135 | |||
1136 | device_del: | ||
1137 | device_del(&c->device); | ||
1138 | |||
1139 | iop_reset: | ||
1140 | i2o_iop_reset(c); | ||
1141 | |||
1142 | return rc; | ||
1143 | }; | ||
1144 | |||
1145 | /** | ||
1146 | * i2o_event_register - Turn on/off event notification for a I2O device | ||
1147 | * @dev: I2O device which should receive the event registration request | ||
1148 | * @drv: driver which want to get notified | ||
1149 | * @tcntxt: transaction context to use with this notifier | ||
1150 | * @evt_mask: mask of events | ||
1151 | * | ||
1152 | * Create and posts an event registration message to the task. No reply | ||
1153 | * is waited for, or expected. If you do not want further notifications, | ||
1154 | * call the i2o_event_register again with a evt_mask of 0. | ||
1155 | * | ||
1156 | * Returns 0 on success or negative error code on failure. | ||
1157 | */ | ||
1158 | int i2o_event_register(struct i2o_device *dev, struct i2o_driver *drv, | ||
1159 | int tcntxt, u32 evt_mask) | ||
1160 | { | ||
1161 | struct i2o_controller *c = dev->iop; | ||
1162 | struct i2o_message *msg; | ||
1163 | |||
1164 | msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); | ||
1165 | if (IS_ERR(msg)) | ||
1166 | return PTR_ERR(msg); | ||
1167 | |||
1168 | msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); | ||
1169 | msg->u.head[1] = | ||
1170 | cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev-> | ||
1171 | lct_data.tid); | ||
1172 | msg->u.s.icntxt = cpu_to_le32(drv->context); | ||
1173 | msg->u.s.tcntxt = cpu_to_le32(tcntxt); | ||
1174 | msg->body[0] = cpu_to_le32(evt_mask); | ||
1175 | |||
1176 | i2o_msg_post(c, msg); | ||
1177 | |||
1178 | return 0; | ||
1179 | }; | ||
1180 | |||
1181 | /** | ||
1182 | * i2o_iop_init - I2O main initialization function | ||
1183 | * | ||
1184 | * Initialize the I2O drivers (OSM) functions, register the Executive OSM, | ||
1185 | * initialize the I2O PCI part and finally initialize I2O device stuff. | ||
1186 | * | ||
1187 | * Returns 0 on success or negative error code on failure. | ||
1188 | */ | ||
1189 | static int __init i2o_iop_init(void) | ||
1190 | { | ||
1191 | int rc = 0; | ||
1192 | |||
1193 | printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); | ||
1194 | |||
1195 | if ((rc = i2o_driver_init())) | ||
1196 | goto exit; | ||
1197 | |||
1198 | if ((rc = i2o_exec_init())) | ||
1199 | goto driver_exit; | ||
1200 | |||
1201 | if ((rc = i2o_pci_init())) | ||
1202 | goto exec_exit; | ||
1203 | |||
1204 | return 0; | ||
1205 | |||
1206 | exec_exit: | ||
1207 | i2o_exec_exit(); | ||
1208 | |||
1209 | driver_exit: | ||
1210 | i2o_driver_exit(); | ||
1211 | |||
1212 | exit: | ||
1213 | return rc; | ||
1214 | } | ||
1215 | |||
1216 | /** | ||
1217 | * i2o_iop_exit - I2O main exit function | ||
1218 | * | ||
1219 | * Removes I2O controllers from PCI subsystem and shut down OSMs. | ||
1220 | */ | ||
1221 | static void __exit i2o_iop_exit(void) | ||
1222 | { | ||
1223 | i2o_pci_exit(); | ||
1224 | i2o_exec_exit(); | ||
1225 | i2o_driver_exit(); | ||
1226 | }; | ||
1227 | |||
1228 | module_init(i2o_iop_init); | ||
1229 | module_exit(i2o_iop_exit); | ||
1230 | |||
1231 | MODULE_AUTHOR("Red Hat Software"); | ||
1232 | MODULE_LICENSE("GPL"); | ||
1233 | MODULE_DESCRIPTION(OSM_DESCRIPTION); | ||
1234 | MODULE_VERSION(OSM_VERSION); | ||
1235 | |||
1236 | #if BITS_PER_LONG == 64 | ||
1237 | EXPORT_SYMBOL(i2o_cntxt_list_add); | ||
1238 | EXPORT_SYMBOL(i2o_cntxt_list_get); | ||
1239 | EXPORT_SYMBOL(i2o_cntxt_list_remove); | ||
1240 | EXPORT_SYMBOL(i2o_cntxt_list_get_ptr); | ||
1241 | #endif | ||
1242 | EXPORT_SYMBOL(i2o_msg_get_wait); | ||
1243 | EXPORT_SYMBOL(i2o_find_iop); | ||
1244 | EXPORT_SYMBOL(i2o_iop_find_device); | ||
1245 | EXPORT_SYMBOL(i2o_event_register); | ||
1246 | EXPORT_SYMBOL(i2o_status_get); | ||
1247 | EXPORT_SYMBOL(i2o_controllers); | ||
diff --git a/drivers/message/i2o/memory.c b/drivers/message/i2o/memory.c deleted file mode 100644 index 292b41e49fbd..000000000000 --- a/drivers/message/i2o/memory.c +++ /dev/null | |||
@@ -1,313 +0,0 @@ | |||
1 | /* | ||
2 | * Functions to handle I2O memory | ||
3 | * | ||
4 | * Pulled from the inlines in i2o headers and uninlined | ||
5 | * | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License as published by the | ||
9 | * Free Software Foundation; either version 2 of the License, or (at your | ||
10 | * option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/i2o.h> | ||
15 | #include <linux/delay.h> | ||
16 | #include <linux/string.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include "core.h" | ||
19 | |||
20 | /* Protects our 32/64bit mask switching */ | ||
21 | static DEFINE_MUTEX(mem_lock); | ||
22 | |||
23 | /** | ||
24 | * i2o_sg_tablesize - Calculate the maximum number of elements in a SGL | ||
25 | * @c: I2O controller for which the calculation should be done | ||
26 | * @body_size: maximum body size used for message in 32-bit words. | ||
27 | * | ||
28 | * Return the maximum number of SG elements in a SG list. | ||
29 | */ | ||
30 | u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size) | ||
31 | { | ||
32 | i2o_status_block *sb = c->status_block.virt; | ||
33 | u16 sg_count = | ||
34 | (sb->inbound_frame_size - sizeof(struct i2o_message) / 4) - | ||
35 | body_size; | ||
36 | |||
37 | if (c->pae_support) { | ||
38 | /* | ||
39 | * for 64-bit a SG attribute element must be added and each | ||
40 | * SG element needs 12 bytes instead of 8. | ||
41 | */ | ||
42 | sg_count -= 2; | ||
43 | sg_count /= 3; | ||
44 | } else | ||
45 | sg_count /= 2; | ||
46 | |||
47 | if (c->short_req && (sg_count > 8)) | ||
48 | sg_count = 8; | ||
49 | |||
50 | return sg_count; | ||
51 | } | ||
52 | EXPORT_SYMBOL_GPL(i2o_sg_tablesize); | ||
53 | |||
54 | |||
55 | /** | ||
56 | * i2o_dma_map_single - Map pointer to controller and fill in I2O message. | ||
57 | * @c: I2O controller | ||
58 | * @ptr: pointer to the data which should be mapped | ||
59 | * @size: size of data in bytes | ||
60 | * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE | ||
61 | * @sg_ptr: pointer to the SG list inside the I2O message | ||
62 | * | ||
63 | * This function does all necessary DMA handling and also writes the I2O | ||
64 | * SGL elements into the I2O message. For details on DMA handling see also | ||
65 | * dma_map_single(). The pointer sg_ptr will only be set to the end of the | ||
66 | * SG list if the allocation was successful. | ||
67 | * | ||
68 | * Returns DMA address which must be checked for failures using | ||
69 | * dma_mapping_error(). | ||
70 | */ | ||
71 | dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, | ||
72 | size_t size, | ||
73 | enum dma_data_direction direction, | ||
74 | u32 ** sg_ptr) | ||
75 | { | ||
76 | u32 sg_flags; | ||
77 | u32 *mptr = *sg_ptr; | ||
78 | dma_addr_t dma_addr; | ||
79 | |||
80 | switch (direction) { | ||
81 | case DMA_TO_DEVICE: | ||
82 | sg_flags = 0xd4000000; | ||
83 | break; | ||
84 | case DMA_FROM_DEVICE: | ||
85 | sg_flags = 0xd0000000; | ||
86 | break; | ||
87 | default: | ||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction); | ||
92 | if (!dma_mapping_error(&c->pdev->dev, dma_addr)) { | ||
93 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | ||
94 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) { | ||
95 | *mptr++ = cpu_to_le32(0x7C020002); | ||
96 | *mptr++ = cpu_to_le32(PAGE_SIZE); | ||
97 | } | ||
98 | #endif | ||
99 | |||
100 | *mptr++ = cpu_to_le32(sg_flags | size); | ||
101 | *mptr++ = cpu_to_le32(i2o_dma_low(dma_addr)); | ||
102 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | ||
103 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) | ||
104 | *mptr++ = cpu_to_le32(i2o_dma_high(dma_addr)); | ||
105 | #endif | ||
106 | *sg_ptr = mptr; | ||
107 | } | ||
108 | return dma_addr; | ||
109 | } | ||
110 | EXPORT_SYMBOL_GPL(i2o_dma_map_single); | ||
111 | |||
112 | /** | ||
113 | * i2o_dma_map_sg - Map a SG List to controller and fill in I2O message. | ||
114 | * @c: I2O controller | ||
115 | * @sg: SG list to be mapped | ||
116 | * @sg_count: number of elements in the SG list | ||
117 | * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE | ||
118 | * @sg_ptr: pointer to the SG list inside the I2O message | ||
119 | * | ||
120 | * This function does all necessary DMA handling and also writes the I2O | ||
121 | * SGL elements into the I2O message. For details on DMA handling see also | ||
122 | * dma_map_sg(). The pointer sg_ptr will only be set to the end of the SG | ||
123 | * list if the allocation was successful. | ||
124 | * | ||
125 | * Returns 0 on failure or 1 on success. | ||
126 | */ | ||
127 | int i2o_dma_map_sg(struct i2o_controller *c, struct scatterlist *sg, | ||
128 | int sg_count, enum dma_data_direction direction, u32 ** sg_ptr) | ||
129 | { | ||
130 | u32 sg_flags; | ||
131 | u32 *mptr = *sg_ptr; | ||
132 | |||
133 | switch (direction) { | ||
134 | case DMA_TO_DEVICE: | ||
135 | sg_flags = 0x14000000; | ||
136 | break; | ||
137 | case DMA_FROM_DEVICE: | ||
138 | sg_flags = 0x10000000; | ||
139 | break; | ||
140 | default: | ||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction); | ||
145 | if (!sg_count) | ||
146 | return 0; | ||
147 | |||
148 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | ||
149 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) { | ||
150 | *mptr++ = cpu_to_le32(0x7C020002); | ||
151 | *mptr++ = cpu_to_le32(PAGE_SIZE); | ||
152 | } | ||
153 | #endif | ||
154 | |||
155 | while (sg_count-- > 0) { | ||
156 | if (!sg_count) | ||
157 | sg_flags |= 0xC0000000; | ||
158 | *mptr++ = cpu_to_le32(sg_flags | sg_dma_len(sg)); | ||
159 | *mptr++ = cpu_to_le32(i2o_dma_low(sg_dma_address(sg))); | ||
160 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | ||
161 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) | ||
162 | *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg))); | ||
163 | #endif | ||
164 | sg = sg_next(sg); | ||
165 | } | ||
166 | *sg_ptr = mptr; | ||
167 | |||
168 | return 1; | ||
169 | } | ||
170 | EXPORT_SYMBOL_GPL(i2o_dma_map_sg); | ||
171 | |||
172 | /** | ||
173 | * i2o_dma_alloc - Allocate DMA memory | ||
174 | * @dev: struct device pointer to the PCI device of the I2O controller | ||
175 | * @addr: i2o_dma struct which should get the DMA buffer | ||
176 | * @len: length of the new DMA memory | ||
177 | * | ||
178 | * Allocate a coherent DMA memory and write the pointers into addr. | ||
179 | * | ||
180 | * Returns 0 on success or -ENOMEM on failure. | ||
181 | */ | ||
182 | int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, size_t len) | ||
183 | { | ||
184 | struct pci_dev *pdev = to_pci_dev(dev); | ||
185 | int dma_64 = 0; | ||
186 | |||
187 | mutex_lock(&mem_lock); | ||
188 | if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_BIT_MASK(64))) { | ||
189 | dma_64 = 1; | ||
190 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { | ||
191 | mutex_unlock(&mem_lock); | ||
192 | return -ENOMEM; | ||
193 | } | ||
194 | } | ||
195 | |||
196 | addr->virt = dma_alloc_coherent(dev, len, &addr->phys, GFP_KERNEL); | ||
197 | |||
198 | if ((sizeof(dma_addr_t) > 4) && dma_64) | ||
199 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) | ||
200 | printk(KERN_WARNING "i2o: unable to set 64-bit DMA"); | ||
201 | mutex_unlock(&mem_lock); | ||
202 | |||
203 | if (!addr->virt) | ||
204 | return -ENOMEM; | ||
205 | |||
206 | memset(addr->virt, 0, len); | ||
207 | addr->len = len; | ||
208 | |||
209 | return 0; | ||
210 | } | ||
211 | EXPORT_SYMBOL_GPL(i2o_dma_alloc); | ||
212 | |||
213 | |||
214 | /** | ||
215 | * i2o_dma_free - Free DMA memory | ||
216 | * @dev: struct device pointer to the PCI device of the I2O controller | ||
217 | * @addr: i2o_dma struct which contains the DMA buffer | ||
218 | * | ||
219 | * Free a coherent DMA memory and set virtual address of addr to NULL. | ||
220 | */ | ||
221 | void i2o_dma_free(struct device *dev, struct i2o_dma *addr) | ||
222 | { | ||
223 | if (addr->virt) { | ||
224 | if (addr->phys) | ||
225 | dma_free_coherent(dev, addr->len, addr->virt, | ||
226 | addr->phys); | ||
227 | else | ||
228 | kfree(addr->virt); | ||
229 | addr->virt = NULL; | ||
230 | } | ||
231 | } | ||
232 | EXPORT_SYMBOL_GPL(i2o_dma_free); | ||
233 | |||
234 | |||
235 | /** | ||
236 | * i2o_dma_realloc - Realloc DMA memory | ||
237 | * @dev: struct device pointer to the PCI device of the I2O controller | ||
238 | * @addr: pointer to a i2o_dma struct DMA buffer | ||
239 | * @len: new length of memory | ||
240 | * | ||
241 | * If there was something allocated in the addr, free it first. If len > 0 | ||
242 | * than try to allocate it and write the addresses back to the addr | ||
243 | * structure. If len == 0 set the virtual address to NULL. | ||
244 | * | ||
245 | * Returns the 0 on success or negative error code on failure. | ||
246 | */ | ||
247 | int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, size_t len) | ||
248 | { | ||
249 | i2o_dma_free(dev, addr); | ||
250 | |||
251 | if (len) | ||
252 | return i2o_dma_alloc(dev, addr, len); | ||
253 | |||
254 | return 0; | ||
255 | } | ||
256 | EXPORT_SYMBOL_GPL(i2o_dma_realloc); | ||
257 | |||
258 | /* | ||
259 | * i2o_pool_alloc - Allocate an slab cache and mempool | ||
260 | * @mempool: pointer to struct i2o_pool to write data into. | ||
261 | * @name: name which is used to identify cache | ||
262 | * @size: size of each object | ||
263 | * @min_nr: minimum number of objects | ||
264 | * | ||
265 | * First allocates a slab cache with name and size. Then allocates a | ||
266 | * mempool which uses the slab cache for allocation and freeing. | ||
267 | * | ||
268 | * Returns 0 on success or negative error code on failure. | ||
269 | */ | ||
270 | int i2o_pool_alloc(struct i2o_pool *pool, const char *name, | ||
271 | size_t size, int min_nr) | ||
272 | { | ||
273 | pool->name = kmalloc(strlen(name) + 1, GFP_KERNEL); | ||
274 | if (!pool->name) | ||
275 | goto exit; | ||
276 | strcpy(pool->name, name); | ||
277 | |||
278 | pool->slab = | ||
279 | kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL); | ||
280 | if (!pool->slab) | ||
281 | goto free_name; | ||
282 | |||
283 | pool->mempool = mempool_create_slab_pool(min_nr, pool->slab); | ||
284 | if (!pool->mempool) | ||
285 | goto free_slab; | ||
286 | |||
287 | return 0; | ||
288 | |||
289 | free_slab: | ||
290 | kmem_cache_destroy(pool->slab); | ||
291 | |||
292 | free_name: | ||
293 | kfree(pool->name); | ||
294 | |||
295 | exit: | ||
296 | return -ENOMEM; | ||
297 | } | ||
298 | EXPORT_SYMBOL_GPL(i2o_pool_alloc); | ||
299 | |||
300 | /* | ||
301 | * i2o_pool_free - Free slab cache and mempool again | ||
302 | * @mempool: pointer to struct i2o_pool which should be freed | ||
303 | * | ||
304 | * Note that you have to return all objects to the mempool again before | ||
305 | * calling i2o_pool_free(). | ||
306 | */ | ||
307 | void i2o_pool_free(struct i2o_pool *pool) | ||
308 | { | ||
309 | mempool_destroy(pool->mempool); | ||
310 | kmem_cache_destroy(pool->slab); | ||
311 | kfree(pool->name); | ||
312 | }; | ||
313 | EXPORT_SYMBOL_GPL(i2o_pool_free); | ||
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c deleted file mode 100644 index 0f9f3e1a2b6b..000000000000 --- a/drivers/message/i2o/pci.c +++ /dev/null | |||
@@ -1,497 +0,0 @@ | |||
1 | /* | ||
2 | * PCI handling of I2O controller | ||
3 | * | ||
4 | * Copyright (C) 1999-2002 Red Hat Software | ||
5 | * | ||
6 | * Written by Alan Cox, Building Number Three Ltd | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | * | ||
13 | * A lot of the I2O message side code from this is taken from the Red | ||
14 | * Creek RCPCI45 adapter driver by Red Creek Communications | ||
15 | * | ||
16 | * Fixes/additions: | ||
17 | * Philipp Rumpf | ||
18 | * Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI> | ||
19 | * Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI> | ||
20 | * Deepak Saxena <deepak@plexity.net> | ||
21 | * Boji T Kannanthanam <boji.t.kannanthanam@intel.com> | ||
22 | * Alan Cox <alan@lxorguk.ukuu.org.uk>: | ||
23 | * Ported to Linux 2.5. | ||
24 | * Markus Lidel <Markus.Lidel@shadowconnect.com>: | ||
25 | * Minor fixes for 2.6. | ||
26 | * Markus Lidel <Markus.Lidel@shadowconnect.com>: | ||
27 | * Support for sysfs included. | ||
28 | */ | ||
29 | |||
30 | #include <linux/pci.h> | ||
31 | #include <linux/interrupt.h> | ||
32 | #include <linux/slab.h> | ||
33 | #include <linux/i2o.h> | ||
34 | #include <linux/module.h> | ||
35 | #include "core.h" | ||
36 | |||
37 | #define OSM_DESCRIPTION "I2O-subsystem" | ||
38 | |||
39 | /* PCI device id table for all I2O controllers */ | ||
40 | static struct pci_device_id i2o_pci_ids[] = { | ||
41 | {PCI_DEVICE_CLASS(PCI_CLASS_INTELLIGENT_I2O << 8, 0xffff00)}, | ||
42 | {PCI_DEVICE(PCI_VENDOR_ID_DPT, 0xa511)}, | ||
43 | {.vendor = PCI_VENDOR_ID_INTEL,.device = 0x1962, | ||
44 | .subvendor = PCI_VENDOR_ID_PROMISE,.subdevice = PCI_ANY_ID}, | ||
45 | {0} | ||
46 | }; | ||
47 | |||
48 | /** | ||
49 | * i2o_pci_free - Frees the DMA memory for the I2O controller | ||
50 | * @c: I2O controller to free | ||
51 | * | ||
52 | * Remove all allocated DMA memory and unmap memory IO regions. If MTRR | ||
53 | * is enabled, also remove it again. | ||
54 | */ | ||
55 | static void i2o_pci_free(struct i2o_controller *c) | ||
56 | { | ||
57 | struct device *dev; | ||
58 | |||
59 | dev = &c->pdev->dev; | ||
60 | |||
61 | i2o_dma_free(dev, &c->out_queue); | ||
62 | i2o_dma_free(dev, &c->status_block); | ||
63 | kfree(c->lct); | ||
64 | i2o_dma_free(dev, &c->dlct); | ||
65 | i2o_dma_free(dev, &c->hrt); | ||
66 | i2o_dma_free(dev, &c->status); | ||
67 | |||
68 | if (c->raptor && c->in_queue.virt) | ||
69 | iounmap(c->in_queue.virt); | ||
70 | |||
71 | if (c->base.virt) | ||
72 | iounmap(c->base.virt); | ||
73 | |||
74 | pci_release_regions(c->pdev); | ||
75 | } | ||
76 | |||
77 | /** | ||
78 | * i2o_pci_alloc - Allocate DMA memory, map IO memory for I2O controller | ||
79 | * @c: I2O controller | ||
80 | * | ||
81 | * Allocate DMA memory for a PCI (or in theory AGP) I2O controller. All | ||
82 | * IO mappings are also done here. If MTRR is enabled, also do add memory | ||
83 | * regions here. | ||
84 | * | ||
85 | * Returns 0 on success or negative error code on failure. | ||
86 | */ | ||
87 | static int i2o_pci_alloc(struct i2o_controller *c) | ||
88 | { | ||
89 | struct pci_dev *pdev = c->pdev; | ||
90 | struct device *dev = &pdev->dev; | ||
91 | int i; | ||
92 | |||
93 | if (pci_request_regions(pdev, OSM_DESCRIPTION)) { | ||
94 | printk(KERN_ERR "%s: device already claimed\n", c->name); | ||
95 | return -ENODEV; | ||
96 | } | ||
97 | |||
98 | for (i = 0; i < 6; i++) { | ||
99 | /* Skip I/O spaces */ | ||
100 | if (!(pci_resource_flags(pdev, i) & IORESOURCE_IO)) { | ||
101 | if (!c->base.phys) { | ||
102 | c->base.phys = pci_resource_start(pdev, i); | ||
103 | c->base.len = pci_resource_len(pdev, i); | ||
104 | |||
105 | /* | ||
106 | * If we know what card it is, set the size | ||
107 | * correctly. Code is taken from dpt_i2o.c | ||
108 | */ | ||
109 | if (pdev->device == 0xa501) { | ||
110 | if (pdev->subsystem_device >= 0xc032 && | ||
111 | pdev->subsystem_device <= 0xc03b) { | ||
112 | if (c->base.len > 0x400000) | ||
113 | c->base.len = 0x400000; | ||
114 | } else { | ||
115 | if (c->base.len > 0x100000) | ||
116 | c->base.len = 0x100000; | ||
117 | } | ||
118 | } | ||
119 | if (!c->raptor) | ||
120 | break; | ||
121 | } else { | ||
122 | c->in_queue.phys = pci_resource_start(pdev, i); | ||
123 | c->in_queue.len = pci_resource_len(pdev, i); | ||
124 | break; | ||
125 | } | ||
126 | } | ||
127 | } | ||
128 | |||
129 | if (i == 6) { | ||
130 | printk(KERN_ERR "%s: I2O controller has no memory regions" | ||
131 | " defined.\n", c->name); | ||
132 | i2o_pci_free(c); | ||
133 | return -EINVAL; | ||
134 | } | ||
135 | |||
136 | /* Map the I2O controller */ | ||
137 | if (c->raptor) { | ||
138 | printk(KERN_INFO "%s: PCI I2O controller\n", c->name); | ||
139 | printk(KERN_INFO " BAR0 at 0x%08lX size=%ld\n", | ||
140 | (unsigned long)c->base.phys, (unsigned long)c->base.len); | ||
141 | printk(KERN_INFO " BAR1 at 0x%08lX size=%ld\n", | ||
142 | (unsigned long)c->in_queue.phys, | ||
143 | (unsigned long)c->in_queue.len); | ||
144 | } else | ||
145 | printk(KERN_INFO "%s: PCI I2O controller at %08lX size=%ld\n", | ||
146 | c->name, (unsigned long)c->base.phys, | ||
147 | (unsigned long)c->base.len); | ||
148 | |||
149 | c->base.virt = ioremap_nocache(c->base.phys, c->base.len); | ||
150 | if (!c->base.virt) { | ||
151 | printk(KERN_ERR "%s: Unable to map controller.\n", c->name); | ||
152 | i2o_pci_free(c); | ||
153 | return -ENOMEM; | ||
154 | } | ||
155 | |||
156 | if (c->raptor) { | ||
157 | c->in_queue.virt = | ||
158 | ioremap_nocache(c->in_queue.phys, c->in_queue.len); | ||
159 | if (!c->in_queue.virt) { | ||
160 | printk(KERN_ERR "%s: Unable to map controller.\n", | ||
161 | c->name); | ||
162 | i2o_pci_free(c); | ||
163 | return -ENOMEM; | ||
164 | } | ||
165 | } else | ||
166 | c->in_queue = c->base; | ||
167 | |||
168 | c->irq_status = c->base.virt + I2O_IRQ_STATUS; | ||
169 | c->irq_mask = c->base.virt + I2O_IRQ_MASK; | ||
170 | c->in_port = c->base.virt + I2O_IN_PORT; | ||
171 | c->out_port = c->base.virt + I2O_OUT_PORT; | ||
172 | |||
173 | /* Motorola/Freescale chip does not follow spec */ | ||
174 | if (pdev->vendor == PCI_VENDOR_ID_MOTOROLA && pdev->device == 0x18c0) { | ||
175 | /* Check if CPU is enabled */ | ||
176 | if (be32_to_cpu(readl(c->base.virt + 0x10000)) & 0x10000000) { | ||
177 | printk(KERN_INFO "%s: MPC82XX needs CPU running to " | ||
178 | "service I2O.\n", c->name); | ||
179 | i2o_pci_free(c); | ||
180 | return -ENODEV; | ||
181 | } else { | ||
182 | c->irq_status += I2O_MOTOROLA_PORT_OFFSET; | ||
183 | c->irq_mask += I2O_MOTOROLA_PORT_OFFSET; | ||
184 | c->in_port += I2O_MOTOROLA_PORT_OFFSET; | ||
185 | c->out_port += I2O_MOTOROLA_PORT_OFFSET; | ||
186 | printk(KERN_INFO "%s: MPC82XX workarounds activated.\n", | ||
187 | c->name); | ||
188 | } | ||
189 | } | ||
190 | |||
191 | if (i2o_dma_alloc(dev, &c->status, 8)) { | ||
192 | i2o_pci_free(c); | ||
193 | return -ENOMEM; | ||
194 | } | ||
195 | |||
196 | if (i2o_dma_alloc(dev, &c->hrt, sizeof(i2o_hrt))) { | ||
197 | i2o_pci_free(c); | ||
198 | return -ENOMEM; | ||
199 | } | ||
200 | |||
201 | if (i2o_dma_alloc(dev, &c->dlct, 8192)) { | ||
202 | i2o_pci_free(c); | ||
203 | return -ENOMEM; | ||
204 | } | ||
205 | |||
206 | if (i2o_dma_alloc(dev, &c->status_block, sizeof(i2o_status_block))) { | ||
207 | i2o_pci_free(c); | ||
208 | return -ENOMEM; | ||
209 | } | ||
210 | |||
211 | if (i2o_dma_alloc(dev, &c->out_queue, | ||
212 | I2O_MAX_OUTBOUND_MSG_FRAMES * I2O_OUTBOUND_MSG_FRAME_SIZE * | ||
213 | sizeof(u32))) { | ||
214 | i2o_pci_free(c); | ||
215 | return -ENOMEM; | ||
216 | } | ||
217 | |||
218 | pci_set_drvdata(pdev, c); | ||
219 | |||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | /** | ||
224 | * i2o_pci_interrupt - Interrupt handler for I2O controller | ||
225 | * @irq: interrupt line | ||
226 | * @dev_id: pointer to the I2O controller | ||
227 | * | ||
228 | * Handle an interrupt from a PCI based I2O controller. This turns out | ||
229 | * to be rather simple. We keep the controller pointer in the cookie. | ||
230 | */ | ||
231 | static irqreturn_t i2o_pci_interrupt(int irq, void *dev_id) | ||
232 | { | ||
233 | struct i2o_controller *c = dev_id; | ||
234 | u32 m; | ||
235 | irqreturn_t rc = IRQ_NONE; | ||
236 | |||
237 | while (readl(c->irq_status) & I2O_IRQ_OUTBOUND_POST) { | ||
238 | m = readl(c->out_port); | ||
239 | if (m == I2O_QUEUE_EMPTY) { | ||
240 | /* | ||
241 | * Old 960 steppings had a bug in the I2O unit that | ||
242 | * caused the queue to appear empty when it wasn't. | ||
243 | */ | ||
244 | m = readl(c->out_port); | ||
245 | if (unlikely(m == I2O_QUEUE_EMPTY)) | ||
246 | break; | ||
247 | } | ||
248 | |||
249 | /* dispatch it */ | ||
250 | if (i2o_driver_dispatch(c, m)) | ||
251 | /* flush it if result != 0 */ | ||
252 | i2o_flush_reply(c, m); | ||
253 | |||
254 | rc = IRQ_HANDLED; | ||
255 | } | ||
256 | |||
257 | return rc; | ||
258 | } | ||
259 | |||
260 | /** | ||
261 | * i2o_pci_irq_enable - Allocate interrupt for I2O controller | ||
262 | * @c: i2o_controller that the request is for | ||
263 | * | ||
264 | * Allocate an interrupt for the I2O controller, and activate interrupts | ||
265 | * on the I2O controller. | ||
266 | * | ||
267 | * Returns 0 on success or negative error code on failure. | ||
268 | */ | ||
269 | static int i2o_pci_irq_enable(struct i2o_controller *c) | ||
270 | { | ||
271 | struct pci_dev *pdev = c->pdev; | ||
272 | int rc; | ||
273 | |||
274 | writel(0xffffffff, c->irq_mask); | ||
275 | |||
276 | if (pdev->irq) { | ||
277 | rc = request_irq(pdev->irq, i2o_pci_interrupt, IRQF_SHARED, | ||
278 | c->name, c); | ||
279 | if (rc < 0) { | ||
280 | printk(KERN_ERR "%s: unable to allocate interrupt %d." | ||
281 | "\n", c->name, pdev->irq); | ||
282 | return rc; | ||
283 | } | ||
284 | } | ||
285 | |||
286 | writel(0x00000000, c->irq_mask); | ||
287 | |||
288 | printk(KERN_INFO "%s: Installed at IRQ %d\n", c->name, pdev->irq); | ||
289 | |||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | /** | ||
294 | * i2o_pci_irq_disable - Free interrupt for I2O controller | ||
295 | * @c: I2O controller | ||
296 | * | ||
297 | * Disable interrupts in I2O controller and then free interrupt. | ||
298 | */ | ||
299 | static void i2o_pci_irq_disable(struct i2o_controller *c) | ||
300 | { | ||
301 | writel(0xffffffff, c->irq_mask); | ||
302 | |||
303 | if (c->pdev->irq > 0) | ||
304 | free_irq(c->pdev->irq, c); | ||
305 | } | ||
306 | |||
307 | /** | ||
308 | * i2o_pci_probe - Probe the PCI device for an I2O controller | ||
309 | * @pdev: PCI device to test | ||
310 | * @id: id which matched with the PCI device id table | ||
311 | * | ||
312 | * Probe the PCI device for any device which is a memory of the | ||
313 | * Intelligent, I2O class or an Adaptec Zero Channel Controller. We | ||
314 | * attempt to set up each such device and register it with the core. | ||
315 | * | ||
316 | * Returns 0 on success or negative error code on failure. | ||
317 | */ | ||
318 | static int i2o_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | ||
319 | { | ||
320 | struct i2o_controller *c; | ||
321 | int rc; | ||
322 | struct pci_dev *i960 = NULL; | ||
323 | |||
324 | printk(KERN_INFO "i2o: Checking for PCI I2O controllers...\n"); | ||
325 | |||
326 | if ((pdev->class & 0xff) > 1) { | ||
327 | printk(KERN_WARNING "i2o: %s does not support I2O 1.5 " | ||
328 | "(skipping).\n", pci_name(pdev)); | ||
329 | return -ENODEV; | ||
330 | } | ||
331 | |||
332 | if ((rc = pci_enable_device(pdev))) { | ||
333 | printk(KERN_WARNING "i2o: couldn't enable device %s\n", | ||
334 | pci_name(pdev)); | ||
335 | return rc; | ||
336 | } | ||
337 | |||
338 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { | ||
339 | printk(KERN_WARNING "i2o: no suitable DMA found for %s\n", | ||
340 | pci_name(pdev)); | ||
341 | rc = -ENODEV; | ||
342 | goto disable; | ||
343 | } | ||
344 | |||
345 | pci_set_master(pdev); | ||
346 | |||
347 | c = i2o_iop_alloc(); | ||
348 | if (IS_ERR(c)) { | ||
349 | printk(KERN_ERR "i2o: couldn't allocate memory for %s\n", | ||
350 | pci_name(pdev)); | ||
351 | rc = PTR_ERR(c); | ||
352 | goto disable; | ||
353 | } else | ||
354 | printk(KERN_INFO "%s: controller found (%s)\n", c->name, | ||
355 | pci_name(pdev)); | ||
356 | |||
357 | c->pdev = pdev; | ||
358 | c->device.parent = &pdev->dev; | ||
359 | |||
360 | /* Cards that fall apart if you hit them with large I/O loads... */ | ||
361 | if (pdev->vendor == PCI_VENDOR_ID_NCR && pdev->device == 0x0630) { | ||
362 | c->short_req = 1; | ||
363 | printk(KERN_INFO "%s: Symbios FC920 workarounds activated.\n", | ||
364 | c->name); | ||
365 | } | ||
366 | |||
367 | if (pdev->subsystem_vendor == PCI_VENDOR_ID_PROMISE) { | ||
368 | /* | ||
369 | * Expose the ship behind i960 for initialization, or it will | ||
370 | * failed | ||
371 | */ | ||
372 | i960 = pci_get_slot(c->pdev->bus, | ||
373 | PCI_DEVFN(PCI_SLOT(c->pdev->devfn), 0)); | ||
374 | |||
375 | if (i960) { | ||
376 | pci_write_config_word(i960, 0x42, 0); | ||
377 | pci_dev_put(i960); | ||
378 | } | ||
379 | |||
380 | c->promise = 1; | ||
381 | c->limit_sectors = 1; | ||
382 | } | ||
383 | |||
384 | if (pdev->subsystem_vendor == PCI_VENDOR_ID_DPT) | ||
385 | c->adaptec = 1; | ||
386 | |||
387 | /* Cards that go bananas if you quiesce them before you reset them. */ | ||
388 | if (pdev->vendor == PCI_VENDOR_ID_DPT) { | ||
389 | c->no_quiesce = 1; | ||
390 | if (pdev->device == 0xa511) | ||
391 | c->raptor = 1; | ||
392 | |||
393 | if (pdev->subsystem_device == 0xc05a) { | ||
394 | c->limit_sectors = 1; | ||
395 | printk(KERN_INFO | ||
396 | "%s: limit sectors per request to %d\n", c->name, | ||
397 | I2O_MAX_SECTORS_LIMITED); | ||
398 | } | ||
399 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | ||
400 | if (sizeof(dma_addr_t) > 4) { | ||
401 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) | ||
402 | printk(KERN_INFO "%s: 64-bit DMA unavailable\n", | ||
403 | c->name); | ||
404 | else { | ||
405 | c->pae_support = 1; | ||
406 | printk(KERN_INFO "%s: using 64-bit DMA\n", | ||
407 | c->name); | ||
408 | } | ||
409 | } | ||
410 | #endif | ||
411 | } | ||
412 | |||
413 | if ((rc = i2o_pci_alloc(c))) { | ||
414 | printk(KERN_ERR "%s: DMA / IO allocation for I2O controller " | ||
415 | "failed\n", c->name); | ||
416 | goto free_controller; | ||
417 | } | ||
418 | |||
419 | if (i2o_pci_irq_enable(c)) { | ||
420 | printk(KERN_ERR "%s: unable to enable interrupts for I2O " | ||
421 | "controller\n", c->name); | ||
422 | goto free_pci; | ||
423 | } | ||
424 | |||
425 | if ((rc = i2o_iop_add(c))) | ||
426 | goto uninstall; | ||
427 | |||
428 | if (i960) | ||
429 | pci_write_config_word(i960, 0x42, 0x03ff); | ||
430 | |||
431 | return 0; | ||
432 | |||
433 | uninstall: | ||
434 | i2o_pci_irq_disable(c); | ||
435 | |||
436 | free_pci: | ||
437 | i2o_pci_free(c); | ||
438 | |||
439 | free_controller: | ||
440 | i2o_iop_free(c); | ||
441 | |||
442 | disable: | ||
443 | pci_disable_device(pdev); | ||
444 | |||
445 | return rc; | ||
446 | } | ||
447 | |||
448 | /** | ||
449 | * i2o_pci_remove - Removes a I2O controller from the system | ||
450 | * @pdev: I2O controller which should be removed | ||
451 | * | ||
452 | * Reset the I2O controller, disable interrupts and remove all allocated | ||
453 | * resources. | ||
454 | */ | ||
455 | static void i2o_pci_remove(struct pci_dev *pdev) | ||
456 | { | ||
457 | struct i2o_controller *c; | ||
458 | c = pci_get_drvdata(pdev); | ||
459 | |||
460 | i2o_iop_remove(c); | ||
461 | i2o_pci_irq_disable(c); | ||
462 | i2o_pci_free(c); | ||
463 | |||
464 | pci_disable_device(pdev); | ||
465 | |||
466 | printk(KERN_INFO "%s: Controller removed.\n", c->name); | ||
467 | |||
468 | put_device(&c->device); | ||
469 | }; | ||
470 | |||
471 | /* PCI driver for I2O controller */ | ||
472 | static struct pci_driver i2o_pci_driver = { | ||
473 | .name = "PCI_I2O", | ||
474 | .id_table = i2o_pci_ids, | ||
475 | .probe = i2o_pci_probe, | ||
476 | .remove = i2o_pci_remove, | ||
477 | }; | ||
478 | |||
479 | /** | ||
480 | * i2o_pci_init - registers I2O PCI driver in PCI subsystem | ||
481 | * | ||
482 | * Returns > 0 on success or negative error code on failure. | ||
483 | */ | ||
484 | int __init i2o_pci_init(void) | ||
485 | { | ||
486 | return pci_register_driver(&i2o_pci_driver); | ||
487 | }; | ||
488 | |||
489 | /** | ||
490 | * i2o_pci_exit - unregisters I2O PCI driver from PCI subsystem | ||
491 | */ | ||
492 | void __exit i2o_pci_exit(void) | ||
493 | { | ||
494 | pci_unregister_driver(&i2o_pci_driver); | ||
495 | }; | ||
496 | |||
497 | MODULE_DEVICE_TABLE(pci, i2o_pci_ids); | ||