diff options
author | Jeff Garzik <jgarzik@pretzel.yyz.us> | 2005-06-26 23:38:58 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-06-26 23:38:58 -0400 |
commit | 5696c1944a33b4434a9a1ebb6383b906afd43a10 (patch) | |
tree | 16fbe6ba431bcf949ee8645510b0c2fd39b5810f /drivers/message/i2o | |
parent | 66b04a80eea60cabf9d89fd34deb3234a740052f (diff) | |
parent | 020f46a39eb7b99a575b9f4d105fce2b142acdf1 (diff) |
Merge /spare/repo/linux-2.6/
Diffstat (limited to 'drivers/message/i2o')
-rw-r--r-- | drivers/message/i2o/Kconfig | 46 | ||||
-rw-r--r-- | drivers/message/i2o/Makefile | 3 | ||||
-rw-r--r-- | drivers/message/i2o/bus-osm.c | 164 | ||||
-rw-r--r-- | drivers/message/i2o/config-osm.c | 579 | ||||
-rw-r--r-- | drivers/message/i2o/core.h | 58 | ||||
-rw-r--r-- | drivers/message/i2o/debug.c | 3 | ||||
-rw-r--r-- | drivers/message/i2o/device.c | 40 | ||||
-rw-r--r-- | drivers/message/i2o/driver.c | 130 | ||||
-rw-r--r-- | drivers/message/i2o/exec-osm.c | 131 | ||||
-rw-r--r-- | drivers/message/i2o/i2o_block.c | 454 | ||||
-rw-r--r-- | drivers/message/i2o/i2o_block.h | 34 | ||||
-rw-r--r-- | drivers/message/i2o/i2o_config.c | 124 | ||||
-rw-r--r-- | drivers/message/i2o/i2o_proc.c | 6 | ||||
-rw-r--r-- | drivers/message/i2o/i2o_scsi.c | 505 | ||||
-rw-r--r-- | drivers/message/i2o/iop.c | 566 | ||||
-rw-r--r-- | drivers/message/i2o/pci.c | 246 |
16 files changed, 1951 insertions, 1138 deletions
diff --git a/drivers/message/i2o/Kconfig b/drivers/message/i2o/Kconfig index 8d132b0d6b12..06e8eb19a05c 100644 --- a/drivers/message/i2o/Kconfig +++ b/drivers/message/i2o/Kconfig | |||
@@ -24,10 +24,28 @@ config I2O | |||
24 | 24 | ||
25 | If unsure, say N. | 25 | If unsure, say N. |
26 | 26 | ||
27 | config I2O_EXT_ADAPTEC | ||
28 | bool "Enable Adaptec extensions" | ||
29 | depends on I2O | ||
30 | default y | ||
31 | ---help--- | ||
32 | Say Y for support of raidutils for Adaptec I2O controllers. You also | ||
33 | have to say Y to "I2O Configuration support", "I2O SCSI OSM" below | ||
34 | and to "SCSI generic support" under "SCSI device configuration". | ||
35 | |||
36 | config I2O_EXT_ADAPTEC_DMA64 | ||
37 | bool "Enable 64-bit DMA" | ||
38 | depends on I2O_EXT_ADAPTEC && ( 64BIT || HIGHMEM64G ) | ||
39 | default y | ||
40 | ---help--- | ||
41 | Say Y for support of 64-bit DMA transfer mode on Adaptec I2O | ||
42 | controllers. | ||
43 | Note: You need at least firmware version 3709. | ||
44 | |||
27 | config I2O_CONFIG | 45 | config I2O_CONFIG |
28 | tristate "I2O Configuration support" | 46 | tristate "I2O Configuration support" |
29 | depends on PCI && I2O | 47 | depends on I2O |
30 | help | 48 | ---help--- |
31 | Say Y for support of the configuration interface for the I2O adapters. | 49 | Say Y for support of the configuration interface for the I2O adapters. |
32 | If you have a RAID controller from Adaptec and you want to use the | 50 | If you have a RAID controller from Adaptec and you want to use the |
33 | raidutils to manage your RAID array, you have to say Y here. | 51 | raidutils to manage your RAID array, you have to say Y here. |
@@ -35,10 +53,28 @@ config I2O_CONFIG | |||
35 | To compile this support as a module, choose M here: the | 53 | To compile this support as a module, choose M here: the |
36 | module will be called i2o_config. | 54 | module will be called i2o_config. |
37 | 55 | ||
56 | config I2O_CONFIG_OLD_IOCTL | ||
57 | bool "Enable ioctls (OBSOLETE)" | ||
58 | depends on I2O_CONFIG | ||
59 | default y | ||
60 | ---help--- | ||
61 | Enables old ioctls. | ||
62 | |||
63 | config I2O_BUS | ||
64 | tristate "I2O Bus Adapter OSM" | ||
65 | depends on I2O | ||
66 | ---help--- | ||
67 | Include support for the I2O Bus Adapter OSM. The Bus Adapter OSM | ||
68 | provides access to the busses on the I2O controller. The main purpose | ||
69 | is to rescan the bus to find new devices. | ||
70 | |||
71 | To compile this support as a module, choose M here: the | ||
72 | module will be called i2o_bus. | ||
73 | |||
38 | config I2O_BLOCK | 74 | config I2O_BLOCK |
39 | tristate "I2O Block OSM" | 75 | tristate "I2O Block OSM" |
40 | depends on I2O | 76 | depends on I2O |
41 | help | 77 | ---help--- |
42 | Include support for the I2O Block OSM. The Block OSM presents disk | 78 | Include support for the I2O Block OSM. The Block OSM presents disk |
43 | and other structured block devices to the operating system. If you | 79 | and other structured block devices to the operating system. If you |
44 | are using an RAID controller, you could access the array only by | 80 | are using an RAID controller, you could access the array only by |
@@ -51,7 +87,7 @@ config I2O_BLOCK | |||
51 | config I2O_SCSI | 87 | config I2O_SCSI |
52 | tristate "I2O SCSI OSM" | 88 | tristate "I2O SCSI OSM" |
53 | depends on I2O && SCSI | 89 | depends on I2O && SCSI |
54 | help | 90 | ---help--- |
55 | Allows direct SCSI access to SCSI devices on a SCSI or FibreChannel | 91 | Allows direct SCSI access to SCSI devices on a SCSI or FibreChannel |
56 | I2O controller. You can use both the SCSI and Block OSM together if | 92 | I2O controller. You can use both the SCSI and Block OSM together if |
57 | you wish. To access a RAID array, you must use the Block OSM driver. | 93 | you wish. To access a RAID array, you must use the Block OSM driver. |
@@ -63,7 +99,7 @@ config I2O_SCSI | |||
63 | config I2O_PROC | 99 | config I2O_PROC |
64 | tristate "I2O /proc support" | 100 | tristate "I2O /proc support" |
65 | depends on I2O | 101 | depends on I2O |
66 | help | 102 | ---help--- |
67 | If you say Y here and to "/proc file system support", you will be | 103 | If you say Y here and to "/proc file system support", you will be |
68 | able to read I2O related information from the virtual directory | 104 | able to read I2O related information from the virtual directory |
69 | /proc/i2o. | 105 | /proc/i2o. |
diff --git a/drivers/message/i2o/Makefile b/drivers/message/i2o/Makefile index aabc6cdc3fce..2c2e39aa1efa 100644 --- a/drivers/message/i2o/Makefile +++ b/drivers/message/i2o/Makefile | |||
@@ -6,8 +6,11 @@ | |||
6 | # | 6 | # |
7 | 7 | ||
8 | i2o_core-y += iop.o driver.o device.o debug.o pci.o exec-osm.o | 8 | i2o_core-y += iop.o driver.o device.o debug.o pci.o exec-osm.o |
9 | i2o_bus-y += bus-osm.o | ||
10 | i2o_config-y += config-osm.o | ||
9 | obj-$(CONFIG_I2O) += i2o_core.o | 11 | obj-$(CONFIG_I2O) += i2o_core.o |
10 | obj-$(CONFIG_I2O_CONFIG)+= i2o_config.o | 12 | obj-$(CONFIG_I2O_CONFIG)+= i2o_config.o |
13 | obj-$(CONFIG_I2O_BUS) += i2o_bus.o | ||
11 | obj-$(CONFIG_I2O_BLOCK) += i2o_block.o | 14 | obj-$(CONFIG_I2O_BLOCK) += i2o_block.o |
12 | obj-$(CONFIG_I2O_SCSI) += i2o_scsi.o | 15 | obj-$(CONFIG_I2O_SCSI) += i2o_scsi.o |
13 | obj-$(CONFIG_I2O_PROC) += i2o_proc.o | 16 | obj-$(CONFIG_I2O_PROC) += i2o_proc.o |
diff --git a/drivers/message/i2o/bus-osm.c b/drivers/message/i2o/bus-osm.c new file mode 100644 index 000000000000..151b228e1cb3 --- /dev/null +++ b/drivers/message/i2o/bus-osm.c | |||
@@ -0,0 +1,164 @@ | |||
1 | /* | ||
2 | * Bus Adapter OSM | ||
3 | * | ||
4 | * Copyright (C) 2005 Markus Lidel <Markus.Lidel@shadowconnect.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; either version 2 of the License, or (at your | ||
9 | * option) any later version. | ||
10 | * | ||
11 | * Fixes/additions: | ||
12 | * Markus Lidel <Markus.Lidel@shadowconnect.com> | ||
13 | * initial version. | ||
14 | */ | ||
15 | |||
16 | #include <linux/module.h> | ||
17 | #include <linux/i2o.h> | ||
18 | |||
19 | #define OSM_NAME "bus-osm" | ||
20 | #define OSM_VERSION "$Rev$" | ||
21 | #define OSM_DESCRIPTION "I2O Bus Adapter OSM" | ||
22 | |||
23 | static struct i2o_driver i2o_bus_driver; | ||
24 | |||
25 | /* Bus OSM class handling definition */ | ||
26 | static struct i2o_class_id i2o_bus_class_id[] = { | ||
27 | {I2O_CLASS_BUS_ADAPTER}, | ||
28 | {I2O_CLASS_END} | ||
29 | }; | ||
30 | |||
31 | /** | ||
32 | * i2o_bus_scan - Scan the bus for new devices | ||
33 | * @dev: I2O device of the bus, which should be scanned | ||
34 | * | ||
35 | * Scans the bus dev for new / removed devices. After the scan a new LCT | ||
36 | * will be fetched automatically. | ||
37 | * | ||
38 | * Returns 0 on success or negative error code on failure. | ||
39 | */ | ||
40 | static int i2o_bus_scan(struct i2o_device *dev) | ||
41 | { | ||
42 | struct i2o_message __iomem *msg; | ||
43 | u32 m; | ||
44 | |||
45 | m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
46 | if (m == I2O_QUEUE_EMPTY) | ||
47 | return -ETIMEDOUT; | ||
48 | |||
49 | writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); | ||
50 | writel(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data.tid, | ||
51 | &msg->u.head[1]); | ||
52 | |||
53 | return i2o_msg_post_wait(dev->iop, m, 60); | ||
54 | }; | ||
55 | |||
56 | /** | ||
57 | * i2o_bus_store_scan - Scan the I2O Bus Adapter | ||
58 | * @d: device which should be scanned | ||
59 | * | ||
60 | * Returns count. | ||
61 | */ | ||
62 | static ssize_t i2o_bus_store_scan(struct device *d, struct device_attribute *attr, const char *buf, | ||
63 | size_t count) | ||
64 | { | ||
65 | struct i2o_device *i2o_dev = to_i2o_device(d); | ||
66 | int rc; | ||
67 | |||
68 | if ((rc = i2o_bus_scan(i2o_dev))) | ||
69 | osm_warn("bus scan failed %d\n", rc); | ||
70 | |||
71 | return count; | ||
72 | } | ||
73 | |||
74 | /* Bus Adapter OSM device attributes */ | ||
75 | static DEVICE_ATTR(scan, S_IWUSR, NULL, i2o_bus_store_scan); | ||
76 | |||
77 | /** | ||
78 | * i2o_bus_probe - verify if dev is a I2O Bus Adapter device and install it | ||
79 | * @dev: device to verify if it is a I2O Bus Adapter device | ||
80 | * | ||
81 | * Because we want all Bus Adapters always return 0. | ||
82 | * | ||
83 | * Returns 0. | ||
84 | */ | ||
85 | static int i2o_bus_probe(struct device *dev) | ||
86 | { | ||
87 | struct i2o_device *i2o_dev = to_i2o_device(get_device(dev)); | ||
88 | |||
89 | device_create_file(dev, &dev_attr_scan); | ||
90 | |||
91 | osm_info("device added (TID: %03x)\n", i2o_dev->lct_data.tid); | ||
92 | |||
93 | return 0; | ||
94 | }; | ||
95 | |||
96 | /** | ||
97 | * i2o_bus_remove - remove the I2O Bus Adapter device from the system again | ||
98 | * @dev: I2O Bus Adapter device which should be removed | ||
99 | * | ||
100 | * Always returns 0. | ||
101 | */ | ||
102 | static int i2o_bus_remove(struct device *dev) | ||
103 | { | ||
104 | struct i2o_device *i2o_dev = to_i2o_device(dev); | ||
105 | |||
106 | device_remove_file(dev, &dev_attr_scan); | ||
107 | |||
108 | put_device(dev); | ||
109 | |||
110 | osm_info("device removed (TID: %03x)\n", i2o_dev->lct_data.tid); | ||
111 | |||
112 | return 0; | ||
113 | }; | ||
114 | |||
115 | /* Bus Adapter OSM driver struct */ | ||
116 | static struct i2o_driver i2o_bus_driver = { | ||
117 | .name = OSM_NAME, | ||
118 | .classes = i2o_bus_class_id, | ||
119 | .driver = { | ||
120 | .probe = i2o_bus_probe, | ||
121 | .remove = i2o_bus_remove, | ||
122 | }, | ||
123 | }; | ||
124 | |||
125 | /** | ||
126 | * i2o_bus_init - Bus Adapter OSM initialization function | ||
127 | * | ||
128 | * Only register the Bus Adapter OSM in the I2O core. | ||
129 | * | ||
130 | * Returns 0 on success or negative error code on failure. | ||
131 | */ | ||
132 | static int __init i2o_bus_init(void) | ||
133 | { | ||
134 | int rc; | ||
135 | |||
136 | printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); | ||
137 | |||
138 | /* Register Bus Adapter OSM into I2O core */ | ||
139 | rc = i2o_driver_register(&i2o_bus_driver); | ||
140 | if (rc) { | ||
141 | osm_err("Could not register Bus Adapter OSM\n"); | ||
142 | return rc; | ||
143 | } | ||
144 | |||
145 | return 0; | ||
146 | }; | ||
147 | |||
148 | /** | ||
149 | * i2o_bus_exit - Bus Adapter OSM exit function | ||
150 | * | ||
151 | * Unregisters Bus Adapter OSM from I2O core. | ||
152 | */ | ||
153 | static void __exit i2o_bus_exit(void) | ||
154 | { | ||
155 | i2o_driver_unregister(&i2o_bus_driver); | ||
156 | }; | ||
157 | |||
158 | MODULE_AUTHOR("Markus Lidel <Markus.Lidel@shadowconnect.com>"); | ||
159 | MODULE_LICENSE("GPL"); | ||
160 | MODULE_DESCRIPTION(OSM_DESCRIPTION); | ||
161 | MODULE_VERSION(OSM_VERSION); | ||
162 | |||
163 | module_init(i2o_bus_init); | ||
164 | module_exit(i2o_bus_exit); | ||
diff --git a/drivers/message/i2o/config-osm.c b/drivers/message/i2o/config-osm.c new file mode 100644 index 000000000000..d0267609a949 --- /dev/null +++ b/drivers/message/i2o/config-osm.c | |||
@@ -0,0 +1,579 @@ | |||
1 | /* | ||
2 | * Configuration OSM | ||
3 | * | ||
4 | * Copyright (C) 2005 Markus Lidel <Markus.Lidel@shadowconnect.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; either version 2 of the License, or (at your | ||
9 | * option) any later version. | ||
10 | * | ||
11 | * Fixes/additions: | ||
12 | * Markus Lidel <Markus.Lidel@shadowconnect.com> | ||
13 | * initial version. | ||
14 | */ | ||
15 | |||
16 | #include <linux/module.h> | ||
17 | #include <linux/i2o.h> | ||
18 | #include <linux/namei.h> | ||
19 | |||
20 | #include <asm/uaccess.h> | ||
21 | |||
22 | #define OSM_NAME "config-osm" | ||
23 | #define OSM_VERSION "1.248" | ||
24 | #define OSM_DESCRIPTION "I2O Configuration OSM" | ||
25 | |||
26 | /* access mode user rw */ | ||
27 | #define S_IWRSR (S_IRUSR | S_IWUSR) | ||
28 | |||
29 | static struct i2o_driver i2o_config_driver; | ||
30 | |||
31 | /* Special file operations for sysfs */ | ||
32 | struct fops_attribute { | ||
33 | struct bin_attribute bin; | ||
34 | struct file_operations fops; | ||
35 | }; | ||
36 | |||
37 | /** | ||
38 | * sysfs_read_dummy | ||
39 | */ | ||
40 | static ssize_t sysfs_read_dummy(struct kobject *kobj, char *buf, loff_t offset, | ||
41 | size_t count) | ||
42 | { | ||
43 | return 0; | ||
44 | }; | ||
45 | |||
46 | /** | ||
47 | * sysfs_write_dummy | ||
48 | */ | ||
49 | static ssize_t sysfs_write_dummy(struct kobject *kobj, char *buf, loff_t offset, | ||
50 | size_t count) | ||
51 | { | ||
52 | return 0; | ||
53 | }; | ||
54 | |||
55 | /** | ||
56 | * sysfs_create_fops_file - Creates attribute with special file operations | ||
57 | * @kobj: kobject which should contains the attribute | ||
58 | * @attr: attributes which should be used to create file | ||
59 | * | ||
60 | * First creates attribute @attr in kobject @kobj. If it is the first time | ||
61 | * this function is called, merge old fops from sysfs with new one and | ||
62 | * write it back. Afterwords the new fops will be set for the created | ||
63 | * attribute. | ||
64 | * | ||
65 | * Returns 0 on success or negative error code on failure. | ||
66 | */ | ||
67 | static int sysfs_create_fops_file(struct kobject *kobj, | ||
68 | struct fops_attribute *attr) | ||
69 | { | ||
70 | struct file_operations tmp, *fops; | ||
71 | struct dentry *d; | ||
72 | struct qstr qstr; | ||
73 | int rc; | ||
74 | |||
75 | fops = &attr->fops; | ||
76 | |||
77 | if (fops->read) | ||
78 | attr->bin.read = sysfs_read_dummy; | ||
79 | |||
80 | if (fops->write) | ||
81 | attr->bin.write = sysfs_write_dummy; | ||
82 | |||
83 | if ((rc = sysfs_create_bin_file(kobj, &attr->bin))) | ||
84 | return rc; | ||
85 | |||
86 | qstr.name = attr->bin.attr.name; | ||
87 | qstr.len = strlen(qstr.name); | ||
88 | qstr.hash = full_name_hash(qstr.name, qstr.len); | ||
89 | |||
90 | if ((d = lookup_hash(&qstr, kobj->dentry))) { | ||
91 | if (!fops->owner) { | ||
92 | memcpy(&tmp, d->d_inode->i_fop, sizeof(tmp)); | ||
93 | if (fops->read) | ||
94 | tmp.read = fops->read; | ||
95 | if (fops->write) | ||
96 | tmp.write = fops->write; | ||
97 | memcpy(fops, &tmp, sizeof(tmp)); | ||
98 | } | ||
99 | |||
100 | d->d_inode->i_fop = fops; | ||
101 | } else | ||
102 | sysfs_remove_bin_file(kobj, &attr->bin); | ||
103 | |||
104 | return -ENOENT; | ||
105 | }; | ||
106 | |||
107 | /** | ||
108 | * sysfs_remove_fops_file - Remove attribute with special file operations | ||
109 | * @kobj: kobject which contains the attribute | ||
110 | * @attr: attributes which are used to create file | ||
111 | * | ||
112 | * Only wrapper arround sysfs_remove_bin_file() | ||
113 | * | ||
114 | * Returns 0 on success or negative error code on failure. | ||
115 | */ | ||
116 | static inline int sysfs_remove_fops_file(struct kobject *kobj, | ||
117 | struct fops_attribute *attr) | ||
118 | { | ||
119 | return sysfs_remove_bin_file(kobj, &attr->bin); | ||
120 | }; | ||
121 | |||
122 | /** | ||
123 | * i2o_config_read_hrt - Returns the HRT of the controller | ||
124 | * @kob: kernel object handle | ||
125 | * @buf: buffer into which the HRT should be copied | ||
126 | * @off: file offset | ||
127 | * @count: number of bytes to read | ||
128 | * | ||
129 | * Put @count bytes starting at @off into @buf from the HRT of the I2O | ||
130 | * controller corresponding to @kobj. | ||
131 | * | ||
132 | * Returns number of bytes copied into buffer. | ||
133 | */ | ||
134 | static ssize_t i2o_config_read_hrt(struct kobject *kobj, char *buf, | ||
135 | loff_t offset, size_t count) | ||
136 | { | ||
137 | struct i2o_controller *c = kobj_to_i2o_device(kobj)->iop; | ||
138 | i2o_hrt *hrt = c->hrt.virt; | ||
139 | |||
140 | u32 size = (hrt->num_entries * hrt->entry_len + 2) * 4; | ||
141 | |||
142 | if (offset > size) | ||
143 | return 0; | ||
144 | |||
145 | if (offset + count > size) | ||
146 | count = size - offset; | ||
147 | |||
148 | memcpy(buf, (u8 *) hrt + offset, count); | ||
149 | |||
150 | return count; | ||
151 | }; | ||
152 | |||
153 | /** | ||
154 | * i2o_config_read_lct - Returns the LCT of the controller | ||
155 | * @kob: kernel object handle | ||
156 | * @buf: buffer into which the LCT should be copied | ||
157 | * @off: file offset | ||
158 | * @count: number of bytes to read | ||
159 | * | ||
160 | * Put @count bytes starting at @off into @buf from the LCT of the I2O | ||
161 | * controller corresponding to @kobj. | ||
162 | * | ||
163 | * Returns number of bytes copied into buffer. | ||
164 | */ | ||
165 | static ssize_t i2o_config_read_lct(struct kobject *kobj, char *buf, | ||
166 | loff_t offset, size_t count) | ||
167 | { | ||
168 | struct i2o_controller *c = kobj_to_i2o_device(kobj)->iop; | ||
169 | u32 size = c->lct->table_size * 4; | ||
170 | |||
171 | if (offset > size) | ||
172 | return 0; | ||
173 | |||
174 | if (offset + count > size) | ||
175 | count = size - offset; | ||
176 | |||
177 | memcpy(buf, (u8 *) c->lct + offset, count); | ||
178 | |||
179 | return count; | ||
180 | }; | ||
181 | |||
182 | #define I2O_CONFIG_SW_ATTR(_name,_mode,_type,_swid) \ | ||
183 | static ssize_t i2o_config_##_name##_read(struct file *file, char __user *buf, size_t count, loff_t * offset) { \ | ||
184 | return i2o_config_sw_read(file, buf, count, offset, _type, _swid); \ | ||
185 | };\ | ||
186 | \ | ||
187 | static ssize_t i2o_config_##_name##_write(struct file *file, const char __user *buf, size_t count, loff_t * offset) { \ | ||
188 | return i2o_config_sw_write(file, buf, count, offset, _type, _swid); \ | ||
189 | }; \ | ||
190 | \ | ||
191 | static struct fops_attribute i2o_config_attr_##_name = { \ | ||
192 | .bin = { .attr = { .name = __stringify(_name), .mode = _mode, \ | ||
193 | .owner = THIS_MODULE }, \ | ||
194 | .size = 0, }, \ | ||
195 | .fops = { .write = i2o_config_##_name##_write, \ | ||
196 | .read = i2o_config_##_name##_read} \ | ||
197 | }; | ||
198 | |||
199 | #ifdef CONFIG_I2O_EXT_ADAPTEC | ||
200 | |||
201 | /** | ||
202 | * i2o_config_dpt_reagion - Converts type and id to flash region | ||
203 | * @swtype: type of software module reading | ||
204 | * @swid: id of software which should be read | ||
205 | * | ||
206 | * Converts type and id from I2O spec to the matching region for DPT / | ||
207 | * Adaptec controllers. | ||
208 | * | ||
209 | * Returns region which match type and id or -1 on error. | ||
210 | */ | ||
211 | static u32 i2o_config_dpt_region(u8 swtype, u8 swid) | ||
212 | { | ||
213 | switch (swtype) { | ||
214 | case I2O_SOFTWARE_MODULE_IRTOS: | ||
215 | /* | ||
216 | * content: operation firmware | ||
217 | * region size: | ||
218 | * 0xbc000 for 2554, 3754, 2564, 3757 | ||
219 | * 0x170000 for 2865 | ||
220 | * 0x17c000 for 3966 | ||
221 | */ | ||
222 | if (!swid) | ||
223 | return 0; | ||
224 | |||
225 | break; | ||
226 | |||
227 | case I2O_SOFTWARE_MODULE_IOP_PRIVATE: | ||
228 | /* | ||
229 | * content: BIOS and SMOR | ||
230 | * BIOS size: first 0x8000 bytes | ||
231 | * region size: | ||
232 | * 0x40000 for 2554, 3754, 2564, 3757 | ||
233 | * 0x80000 for 2865, 3966 | ||
234 | */ | ||
235 | if (!swid) | ||
236 | return 1; | ||
237 | |||
238 | break; | ||
239 | |||
240 | case I2O_SOFTWARE_MODULE_IOP_CONFIG: | ||
241 | switch (swid) { | ||
242 | case 0: | ||
243 | /* | ||
244 | * content: NVRAM defaults | ||
245 | * region size: 0x2000 bytes | ||
246 | */ | ||
247 | return 2; | ||
248 | case 1: | ||
249 | /* | ||
250 | * content: serial number | ||
251 | * region size: 0x2000 bytes | ||
252 | */ | ||
253 | return 3; | ||
254 | } | ||
255 | break; | ||
256 | } | ||
257 | |||
258 | return -1; | ||
259 | }; | ||
260 | |||
261 | #endif | ||
262 | |||
263 | /** | ||
264 | * i2o_config_sw_read - Read a software module from controller | ||
265 | * @file: file pointer | ||
266 | * @buf: buffer into which the data should be copied | ||
267 | * @count: number of bytes to read | ||
268 | * @off: file offset | ||
269 | * @swtype: type of software module reading | ||
270 | * @swid: id of software which should be read | ||
271 | * | ||
272 | * Transfers @count bytes at offset @offset from IOP into buffer using | ||
273 | * type @swtype and id @swid as described in I2O spec. | ||
274 | * | ||
275 | * Returns number of bytes copied into buffer or error code on failure. | ||
276 | */ | ||
277 | static ssize_t i2o_config_sw_read(struct file *file, char __user * buf, | ||
278 | size_t count, loff_t * offset, u8 swtype, | ||
279 | u32 swid) | ||
280 | { | ||
281 | struct sysfs_dirent *sd = file->f_dentry->d_parent->d_fsdata; | ||
282 | struct kobject *kobj = sd->s_element; | ||
283 | struct i2o_controller *c = kobj_to_i2o_device(kobj)->iop; | ||
284 | u32 m, function = I2O_CMD_SW_UPLOAD; | ||
285 | struct i2o_dma buffer; | ||
286 | struct i2o_message __iomem *msg; | ||
287 | u32 __iomem *mptr; | ||
288 | int rc, status; | ||
289 | |||
290 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
291 | if (m == I2O_QUEUE_EMPTY) | ||
292 | return -EBUSY; | ||
293 | |||
294 | mptr = &msg->body[3]; | ||
295 | |||
296 | if ((rc = i2o_dma_alloc(&c->pdev->dev, &buffer, count, GFP_KERNEL))) { | ||
297 | i2o_msg_nop(c, m); | ||
298 | return rc; | ||
299 | } | ||
300 | #ifdef CONFIG_I2O_EXT_ADAPTEC | ||
301 | if (c->adaptec) { | ||
302 | mptr = &msg->body[4]; | ||
303 | function = I2O_CMD_PRIVATE; | ||
304 | |||
305 | writel(TEN_WORD_MSG_SIZE | SGL_OFFSET_8, &msg->u.head[0]); | ||
306 | |||
307 | writel(I2O_VENDOR_DPT << 16 | I2O_DPT_FLASH_READ, | ||
308 | &msg->body[0]); | ||
309 | writel(i2o_config_dpt_region(swtype, swid), &msg->body[1]); | ||
310 | writel(*offset, &msg->body[2]); | ||
311 | writel(count, &msg->body[3]); | ||
312 | } else | ||
313 | #endif | ||
314 | writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]); | ||
315 | |||
316 | writel(0xD0000000 | count, mptr++); | ||
317 | writel(buffer.phys, mptr); | ||
318 | |||
319 | writel(function << 24 | HOST_TID << 12 | ADAPTER_TID, &msg->u.head[1]); | ||
320 | writel(i2o_config_driver.context, &msg->u.head[2]); | ||
321 | writel(0, &msg->u.head[3]); | ||
322 | |||
323 | #ifdef CONFIG_I2O_EXT_ADAPTEC | ||
324 | if (!c->adaptec) | ||
325 | #endif | ||
326 | { | ||
327 | writel((u32) swtype << 16 | (u32) 1 << 8, &msg->body[0]); | ||
328 | writel(0, &msg->body[1]); | ||
329 | writel(swid, &msg->body[2]); | ||
330 | } | ||
331 | |||
332 | status = i2o_msg_post_wait_mem(c, m, 60, &buffer); | ||
333 | |||
334 | if (status == I2O_POST_WAIT_OK) { | ||
335 | if (!(rc = copy_to_user(buf, buffer.virt, count))) { | ||
336 | rc = count; | ||
337 | *offset += count; | ||
338 | } | ||
339 | } else | ||
340 | rc = -EIO; | ||
341 | |||
342 | if (status != -ETIMEDOUT) | ||
343 | i2o_dma_free(&c->pdev->dev, &buffer); | ||
344 | |||
345 | return rc; | ||
346 | }; | ||
347 | |||
348 | /** | ||
349 | * i2o_config_sw_write - Write a software module to controller | ||
350 | * @file: file pointer | ||
351 | * @buf: buffer into which the data should be copied | ||
352 | * @count: number of bytes to read | ||
353 | * @off: file offset | ||
354 | * @swtype: type of software module writing | ||
355 | * @swid: id of software which should be written | ||
356 | * | ||
357 | * Transfers @count bytes at offset @offset from buffer to IOP using | ||
358 | * type @swtype and id @swid as described in I2O spec. | ||
359 | * | ||
360 | * Returns number of bytes copied from buffer or error code on failure. | ||
361 | */ | ||
362 | static ssize_t i2o_config_sw_write(struct file *file, const char __user * buf, | ||
363 | size_t count, loff_t * offset, u8 swtype, | ||
364 | u32 swid) | ||
365 | { | ||
366 | struct sysfs_dirent *sd = file->f_dentry->d_parent->d_fsdata; | ||
367 | struct kobject *kobj = sd->s_element; | ||
368 | struct i2o_controller *c = kobj_to_i2o_device(kobj)->iop; | ||
369 | u32 m, function = I2O_CMD_SW_DOWNLOAD; | ||
370 | struct i2o_dma buffer; | ||
371 | struct i2o_message __iomem *msg; | ||
372 | u32 __iomem *mptr; | ||
373 | int rc, status; | ||
374 | |||
375 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
376 | if (m == I2O_QUEUE_EMPTY) | ||
377 | return -EBUSY; | ||
378 | |||
379 | mptr = &msg->body[3]; | ||
380 | |||
381 | if ((rc = i2o_dma_alloc(&c->pdev->dev, &buffer, count, GFP_KERNEL))) | ||
382 | goto nop_msg; | ||
383 | |||
384 | if ((rc = copy_from_user(buffer.virt, buf, count))) | ||
385 | goto free_buffer; | ||
386 | |||
387 | #ifdef CONFIG_I2O_EXT_ADAPTEC | ||
388 | if (c->adaptec) { | ||
389 | mptr = &msg->body[4]; | ||
390 | function = I2O_CMD_PRIVATE; | ||
391 | |||
392 | writel(TEN_WORD_MSG_SIZE | SGL_OFFSET_8, &msg->u.head[0]); | ||
393 | |||
394 | writel(I2O_VENDOR_DPT << 16 | I2O_DPT_FLASH_WRITE, | ||
395 | &msg->body[0]); | ||
396 | writel(i2o_config_dpt_region(swtype, swid), &msg->body[1]); | ||
397 | writel(*offset, &msg->body[2]); | ||
398 | writel(count, &msg->body[3]); | ||
399 | } else | ||
400 | #endif | ||
401 | writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]); | ||
402 | |||
403 | writel(0xD4000000 | count, mptr++); | ||
404 | writel(buffer.phys, mptr); | ||
405 | |||
406 | writel(function << 24 | HOST_TID << 12 | ADAPTER_TID, &msg->u.head[1]); | ||
407 | writel(i2o_config_driver.context, &msg->u.head[2]); | ||
408 | writel(0, &msg->u.head[3]); | ||
409 | |||
410 | #ifdef CONFIG_I2O_EXT_ADAPTEC | ||
411 | if (!c->adaptec) | ||
412 | #endif | ||
413 | { | ||
414 | writel((u32) swtype << 16 | (u32) 1 << 8, &msg->body[0]); | ||
415 | writel(0, &msg->body[1]); | ||
416 | writel(swid, &msg->body[2]); | ||
417 | } | ||
418 | |||
419 | status = i2o_msg_post_wait_mem(c, m, 60, &buffer); | ||
420 | |||
421 | if (status != -ETIMEDOUT) | ||
422 | i2o_dma_free(&c->pdev->dev, &buffer); | ||
423 | |||
424 | if (status != I2O_POST_WAIT_OK) | ||
425 | return -EIO; | ||
426 | |||
427 | *offset += count; | ||
428 | |||
429 | return count; | ||
430 | |||
431 | free_buffer: | ||
432 | i2o_dma_free(&c->pdev->dev, &buffer); | ||
433 | |||
434 | nop_msg: | ||
435 | i2o_msg_nop(c, m); | ||
436 | |||
437 | return rc; | ||
438 | }; | ||
439 | |||
440 | /* attribute for HRT in sysfs */ | ||
441 | static struct bin_attribute i2o_config_hrt_attr = { | ||
442 | .attr = { | ||
443 | .name = "hrt", | ||
444 | .mode = S_IRUGO, | ||
445 | .owner = THIS_MODULE}, | ||
446 | .size = 0, | ||
447 | .read = i2o_config_read_hrt | ||
448 | }; | ||
449 | |||
450 | /* attribute for LCT in sysfs */ | ||
451 | static struct bin_attribute i2o_config_lct_attr = { | ||
452 | .attr = { | ||
453 | .name = "lct", | ||
454 | .mode = S_IRUGO, | ||
455 | .owner = THIS_MODULE}, | ||
456 | .size = 0, | ||
457 | .read = i2o_config_read_lct | ||
458 | }; | ||
459 | |||
460 | /* IRTOS firmware access */ | ||
461 | I2O_CONFIG_SW_ATTR(irtos, S_IWRSR, I2O_SOFTWARE_MODULE_IRTOS, 0); | ||
462 | |||
463 | #ifdef CONFIG_I2O_EXT_ADAPTEC | ||
464 | |||
465 | /* | ||
466 | * attribute for BIOS / SMOR, nvram and serial number access on DPT / Adaptec | ||
467 | * controllers | ||
468 | */ | ||
469 | I2O_CONFIG_SW_ATTR(bios, S_IWRSR, I2O_SOFTWARE_MODULE_IOP_PRIVATE, 0); | ||
470 | I2O_CONFIG_SW_ATTR(nvram, S_IWRSR, I2O_SOFTWARE_MODULE_IOP_CONFIG, 0); | ||
471 | I2O_CONFIG_SW_ATTR(serial, S_IWRSR, I2O_SOFTWARE_MODULE_IOP_CONFIG, 1); | ||
472 | |||
473 | #endif | ||
474 | |||
475 | /** | ||
476 | * i2o_config_notify_controller_add - Notify of added controller | ||
477 | * @c: the controller which was added | ||
478 | * | ||
479 | * If a I2O controller is added, we catch the notification to add sysfs | ||
480 | * entries. | ||
481 | */ | ||
482 | static void i2o_config_notify_controller_add(struct i2o_controller *c) | ||
483 | { | ||
484 | struct kobject *kobj = &c->exec->device.kobj; | ||
485 | |||
486 | sysfs_create_bin_file(kobj, &i2o_config_hrt_attr); | ||
487 | sysfs_create_bin_file(kobj, &i2o_config_lct_attr); | ||
488 | |||
489 | sysfs_create_fops_file(kobj, &i2o_config_attr_irtos); | ||
490 | #ifdef CONFIG_I2O_EXT_ADAPTEC | ||
491 | if (c->adaptec) { | ||
492 | sysfs_create_fops_file(kobj, &i2o_config_attr_bios); | ||
493 | sysfs_create_fops_file(kobj, &i2o_config_attr_nvram); | ||
494 | sysfs_create_fops_file(kobj, &i2o_config_attr_serial); | ||
495 | } | ||
496 | #endif | ||
497 | }; | ||
498 | |||
499 | /** | ||
500 | * i2o_config_notify_controller_remove - Notify of removed controller | ||
501 | * @c: the controller which was removed | ||
502 | * | ||
503 | * If a I2O controller is removed, we catch the notification to remove the | ||
504 | * sysfs entries. | ||
505 | */ | ||
506 | static void i2o_config_notify_controller_remove(struct i2o_controller *c) | ||
507 | { | ||
508 | struct kobject *kobj = &c->exec->device.kobj; | ||
509 | |||
510 | #ifdef CONFIG_I2O_EXT_ADAPTEC | ||
511 | if (c->adaptec) { | ||
512 | sysfs_remove_fops_file(kobj, &i2o_config_attr_serial); | ||
513 | sysfs_remove_fops_file(kobj, &i2o_config_attr_nvram); | ||
514 | sysfs_remove_fops_file(kobj, &i2o_config_attr_bios); | ||
515 | } | ||
516 | #endif | ||
517 | sysfs_remove_fops_file(kobj, &i2o_config_attr_irtos); | ||
518 | |||
519 | sysfs_remove_bin_file(kobj, &i2o_config_lct_attr); | ||
520 | sysfs_remove_bin_file(kobj, &i2o_config_hrt_attr); | ||
521 | }; | ||
522 | |||
523 | /* Config OSM driver struct */ | ||
524 | static struct i2o_driver i2o_config_driver = { | ||
525 | .name = OSM_NAME, | ||
526 | .notify_controller_add = i2o_config_notify_controller_add, | ||
527 | .notify_controller_remove = i2o_config_notify_controller_remove | ||
528 | }; | ||
529 | |||
530 | #ifdef CONFIG_I2O_CONFIG_OLD_IOCTL | ||
531 | #include "i2o_config.c" | ||
532 | #endif | ||
533 | |||
534 | /** | ||
535 | * i2o_config_init - Configuration OSM initialization function | ||
536 | * | ||
537 | * Registers Configuration OSM in the I2O core and if old ioctl's are | ||
538 | * compiled in initialize them. | ||
539 | * | ||
540 | * Returns 0 on success or negative error code on failure. | ||
541 | */ | ||
542 | static int __init i2o_config_init(void) | ||
543 | { | ||
544 | printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); | ||
545 | |||
546 | if (i2o_driver_register(&i2o_config_driver)) { | ||
547 | osm_err("handler register failed.\n"); | ||
548 | return -EBUSY; | ||
549 | } | ||
550 | #ifdef CONFIG_I2O_CONFIG_OLD_IOCTL | ||
551 | if (i2o_config_old_init()) | ||
552 | i2o_driver_unregister(&i2o_config_driver); | ||
553 | #endif | ||
554 | |||
555 | return 0; | ||
556 | } | ||
557 | |||
558 | /** | ||
559 | * i2o_config_exit - Configuration OSM exit function | ||
560 | * | ||
561 | * If old ioctl's are compiled in exit remove them and unregisters | ||
562 | * Configuration OSM from I2O core. | ||
563 | */ | ||
564 | static void i2o_config_exit(void) | ||
565 | { | ||
566 | #ifdef CONFIG_I2O_CONFIG_OLD_IOCTL | ||
567 | i2o_config_old_exit(); | ||
568 | #endif | ||
569 | |||
570 | i2o_driver_unregister(&i2o_config_driver); | ||
571 | } | ||
572 | |||
573 | MODULE_AUTHOR("Markus Lidel <Markus.Lidel@shadowconnect.com>"); | ||
574 | MODULE_LICENSE("GPL"); | ||
575 | MODULE_DESCRIPTION(OSM_DESCRIPTION); | ||
576 | MODULE_VERSION(OSM_VERSION); | ||
577 | |||
578 | module_init(i2o_config_init); | ||
579 | module_exit(i2o_config_exit); | ||
diff --git a/drivers/message/i2o/core.h b/drivers/message/i2o/core.h new file mode 100644 index 000000000000..c5bcfd70f711 --- /dev/null +++ b/drivers/message/i2o/core.h | |||
@@ -0,0 +1,58 @@ | |||
1 | /* | ||
2 | * I2O core internal declarations | ||
3 | * | ||
4 | * Copyright (C) 2005 Markus Lidel <Markus.Lidel@shadowconnect.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; either version 2 of the License, or (at your | ||
9 | * option) any later version. | ||
10 | * | ||
11 | * Fixes/additions: | ||
12 | * Markus Lidel <Markus.Lidel@shadowconnect.com> | ||
13 | * initial version. | ||
14 | */ | ||
15 | |||
16 | /* Exec-OSM */ | ||
17 | extern struct bus_type i2o_bus_type; | ||
18 | |||
19 | extern struct i2o_driver i2o_exec_driver; | ||
20 | extern int i2o_exec_lct_get(struct i2o_controller *); | ||
21 | |||
22 | extern int __init i2o_exec_init(void); | ||
23 | extern void __exit i2o_exec_exit(void); | ||
24 | |||
25 | /* driver */ | ||
26 | extern int i2o_driver_dispatch(struct i2o_controller *, u32); | ||
27 | |||
28 | extern int __init i2o_driver_init(void); | ||
29 | extern void __exit i2o_driver_exit(void); | ||
30 | |||
31 | /* PCI */ | ||
32 | extern int __init i2o_pci_init(void); | ||
33 | extern void __exit i2o_pci_exit(void); | ||
34 | |||
35 | /* device */ | ||
36 | extern void i2o_device_remove(struct i2o_device *); | ||
37 | extern int i2o_device_parse_lct(struct i2o_controller *); | ||
38 | |||
39 | extern int i2o_device_init(void); | ||
40 | extern void i2o_device_exit(void); | ||
41 | |||
42 | /* IOP */ | ||
43 | extern struct i2o_controller *i2o_iop_alloc(void); | ||
44 | extern void i2o_iop_free(struct i2o_controller *); | ||
45 | |||
46 | extern int i2o_iop_add(struct i2o_controller *); | ||
47 | extern void i2o_iop_remove(struct i2o_controller *); | ||
48 | |||
49 | /* config */ | ||
50 | extern int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int); | ||
51 | |||
52 | /* control registers relative to c->base */ | ||
53 | #define I2O_IRQ_STATUS 0x30 | ||
54 | #define I2O_IRQ_MASK 0x34 | ||
55 | #define I2O_IN_PORT 0x40 | ||
56 | #define I2O_OUT_PORT 0x44 | ||
57 | |||
58 | #define I2O_IRQ_OUTBOUND_POST 0x00000008 | ||
diff --git a/drivers/message/i2o/debug.c b/drivers/message/i2o/debug.c index 2a5d478fc60e..018ca887ca85 100644 --- a/drivers/message/i2o/debug.c +++ b/drivers/message/i2o/debug.c | |||
@@ -4,8 +4,6 @@ | |||
4 | #include <linux/pci.h> | 4 | #include <linux/pci.h> |
5 | #include <linux/i2o.h> | 5 | #include <linux/i2o.h> |
6 | 6 | ||
7 | extern struct i2o_driver **i2o_drivers; | ||
8 | extern unsigned int i2o_max_drivers; | ||
9 | static void i2o_report_util_cmd(u8 cmd); | 7 | static void i2o_report_util_cmd(u8 cmd); |
10 | static void i2o_report_exec_cmd(u8 cmd); | 8 | static void i2o_report_exec_cmd(u8 cmd); |
11 | static void i2o_report_fail_status(u8 req_status, u32 * msg); | 9 | static void i2o_report_fail_status(u8 req_status, u32 * msg); |
@@ -23,7 +21,6 @@ void i2o_report_status(const char *severity, const char *str, | |||
23 | u8 cmd = (msg[1] >> 24) & 0xFF; | 21 | u8 cmd = (msg[1] >> 24) & 0xFF; |
24 | u8 req_status = (msg[4] >> 24) & 0xFF; | 22 | u8 req_status = (msg[4] >> 24) & 0xFF; |
25 | u16 detailed_status = msg[4] & 0xFFFF; | 23 | u16 detailed_status = msg[4] & 0xFFFF; |
26 | //struct i2o_driver *h = i2o_drivers[msg[2] & (i2o_max_drivers-1)]; | ||
27 | 24 | ||
28 | if (cmd == I2O_CMD_UTIL_EVT_REGISTER) | 25 | if (cmd == I2O_CMD_UTIL_EVT_REGISTER) |
29 | return; // No status in this reply | 26 | return; // No status in this reply |
diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c index eb907e87bc7b..21f16ba3ac38 100644 --- a/drivers/message/i2o/device.c +++ b/drivers/message/i2o/device.c | |||
@@ -16,9 +16,7 @@ | |||
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/i2o.h> | 17 | #include <linux/i2o.h> |
18 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
19 | 19 | #include "core.h" | |
20 | /* Exec OSM functions */ | ||
21 | extern struct bus_type i2o_bus_type; | ||
22 | 20 | ||
23 | /** | 21 | /** |
24 | * i2o_device_issue_claim - claim or release a device | 22 | * i2o_device_issue_claim - claim or release a device |
@@ -282,8 +280,7 @@ int i2o_device_parse_lct(struct i2o_controller *c) | |||
282 | 280 | ||
283 | down(&c->lct_lock); | 281 | down(&c->lct_lock); |
284 | 282 | ||
285 | if (c->lct) | 283 | kfree(c->lct); |
286 | kfree(c->lct); | ||
287 | 284 | ||
288 | lct = c->dlct.virt; | 285 | lct = c->dlct.virt; |
289 | 286 | ||
@@ -294,12 +291,12 @@ int i2o_device_parse_lct(struct i2o_controller *c) | |||
294 | } | 291 | } |
295 | 292 | ||
296 | if (lct->table_size * 4 > c->dlct.len) { | 293 | if (lct->table_size * 4 > c->dlct.len) { |
297 | memcpy_fromio(c->lct, c->dlct.virt, c->dlct.len); | 294 | memcpy(c->lct, c->dlct.virt, c->dlct.len); |
298 | up(&c->lct_lock); | 295 | up(&c->lct_lock); |
299 | return -EAGAIN; | 296 | return -EAGAIN; |
300 | } | 297 | } |
301 | 298 | ||
302 | memcpy_fromio(c->lct, c->dlct.virt, lct->table_size * 4); | 299 | memcpy(c->lct, c->dlct.virt, lct->table_size * 4); |
303 | 300 | ||
304 | lct = c->lct; | 301 | lct = c->lct; |
305 | 302 | ||
@@ -354,7 +351,7 @@ static ssize_t i2o_device_class_show_class_id(struct class_device *cd, | |||
354 | { | 351 | { |
355 | struct i2o_device *dev = to_i2o_device(cd->dev); | 352 | struct i2o_device *dev = to_i2o_device(cd->dev); |
356 | 353 | ||
357 | sprintf(buf, "%03x\n", dev->lct_data.class_id); | 354 | sprintf(buf, "0x%03x\n", dev->lct_data.class_id); |
358 | return strlen(buf) + 1; | 355 | return strlen(buf) + 1; |
359 | }; | 356 | }; |
360 | 357 | ||
@@ -369,7 +366,7 @@ static ssize_t i2o_device_class_show_tid(struct class_device *cd, char *buf) | |||
369 | { | 366 | { |
370 | struct i2o_device *dev = to_i2o_device(cd->dev); | 367 | struct i2o_device *dev = to_i2o_device(cd->dev); |
371 | 368 | ||
372 | sprintf(buf, "%03x\n", dev->lct_data.tid); | 369 | sprintf(buf, "0x%03x\n", dev->lct_data.tid); |
373 | return strlen(buf) + 1; | 370 | return strlen(buf) + 1; |
374 | }; | 371 | }; |
375 | 372 | ||
@@ -401,25 +398,27 @@ static int i2o_device_class_add(struct class_device *cd) | |||
401 | 398 | ||
402 | /* create user entries for this device */ | 399 | /* create user entries for this device */ |
403 | tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid); | 400 | tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid); |
404 | if (tmp) | 401 | if (tmp && (tmp != i2o_dev)) |
405 | sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj, | 402 | sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj, |
406 | "user"); | 403 | "user"); |
407 | 404 | ||
408 | /* create user entries refering to this device */ | 405 | /* create user entries refering to this device */ |
409 | list_for_each_entry(tmp, &c->devices, list) | 406 | list_for_each_entry(tmp, &c->devices, list) |
410 | if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid) | 407 | if ((tmp->lct_data.user_tid == i2o_dev->lct_data.tid) |
408 | && (tmp != i2o_dev)) | ||
411 | sysfs_create_link(&tmp->device.kobj, | 409 | sysfs_create_link(&tmp->device.kobj, |
412 | &i2o_dev->device.kobj, "user"); | 410 | &i2o_dev->device.kobj, "user"); |
413 | 411 | ||
414 | /* create parent entries for this device */ | 412 | /* create parent entries for this device */ |
415 | tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid); | 413 | tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid); |
416 | if (tmp) | 414 | if (tmp && (tmp != i2o_dev)) |
417 | sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj, | 415 | sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj, |
418 | "parent"); | 416 | "parent"); |
419 | 417 | ||
420 | /* create parent entries refering to this device */ | 418 | /* create parent entries refering to this device */ |
421 | list_for_each_entry(tmp, &c->devices, list) | 419 | list_for_each_entry(tmp, &c->devices, list) |
422 | if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid) | 420 | if ((tmp->lct_data.parent_tid == i2o_dev->lct_data.tid) |
421 | && (tmp != i2o_dev)) | ||
423 | sysfs_create_link(&tmp->device.kobj, | 422 | sysfs_create_link(&tmp->device.kobj, |
424 | &i2o_dev->device.kobj, "parent"); | 423 | &i2o_dev->device.kobj, "parent"); |
425 | 424 | ||
@@ -444,9 +443,8 @@ static struct class_interface i2o_device_class_interface = { | |||
444 | * Note that the minimum sized reslist is 8 bytes and contains | 443 | * Note that the minimum sized reslist is 8 bytes and contains |
445 | * ResultCount, ErrorInfoSize, BlockStatus and BlockSize. | 444 | * ResultCount, ErrorInfoSize, BlockStatus and BlockSize. |
446 | */ | 445 | */ |
447 | |||
448 | int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist, | 446 | int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist, |
449 | int oplen, void *reslist, int reslen) | 447 | int oplen, void *reslist, int reslen) |
450 | { | 448 | { |
451 | struct i2o_message __iomem *msg; | 449 | struct i2o_message __iomem *msg; |
452 | u32 m; | 450 | u32 m; |
@@ -489,7 +487,7 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist, | |||
489 | if (rc == -ETIMEDOUT) | 487 | if (rc == -ETIMEDOUT) |
490 | return rc; | 488 | return rc; |
491 | 489 | ||
492 | memcpy_fromio(reslist, res.virt, res.len); | 490 | memcpy(reslist, res.virt, res.len); |
493 | i2o_dma_free(dev, &res); | 491 | i2o_dma_free(dev, &res); |
494 | 492 | ||
495 | /* Query failed */ | 493 | /* Query failed */ |
@@ -531,17 +529,23 @@ int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field, | |||
531 | void *buf, int buflen) | 529 | void *buf, int buflen) |
532 | { | 530 | { |
533 | u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field }; | 531 | u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field }; |
534 | u8 resblk[8 + buflen]; /* 8 bytes for header */ | 532 | u8 *resblk; /* 8 bytes for header */ |
535 | int size; | 533 | int size; |
536 | 534 | ||
537 | if (field == -1) /* whole group */ | 535 | if (field == -1) /* whole group */ |
538 | opblk[4] = -1; | 536 | opblk[4] = -1; |
539 | 537 | ||
538 | resblk = kmalloc(buflen + 8, GFP_KERNEL | GFP_ATOMIC); | ||
539 | if (!resblk) | ||
540 | return -ENOMEM; | ||
541 | |||
540 | size = i2o_parm_issue(i2o_dev, I2O_CMD_UTIL_PARAMS_GET, opblk, | 542 | size = i2o_parm_issue(i2o_dev, I2O_CMD_UTIL_PARAMS_GET, opblk, |
541 | sizeof(opblk), resblk, sizeof(resblk)); | 543 | sizeof(opblk), resblk, buflen + 8); |
542 | 544 | ||
543 | memcpy(buf, resblk + 8, buflen); /* cut off header */ | 545 | memcpy(buf, resblk + 8, buflen); /* cut off header */ |
544 | 546 | ||
547 | kfree(resblk); | ||
548 | |||
545 | if (size > buflen) | 549 | if (size > buflen) |
546 | return buflen; | 550 | return buflen; |
547 | 551 | ||
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c index 91f4edbb2a27..739bfdef0c6d 100644 --- a/drivers/message/i2o/driver.c +++ b/drivers/message/i2o/driver.c | |||
@@ -17,9 +17,12 @@ | |||
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/rwsem.h> | 18 | #include <linux/rwsem.h> |
19 | #include <linux/i2o.h> | 19 | #include <linux/i2o.h> |
20 | #include "core.h" | ||
21 | |||
22 | #define OSM_NAME "i2o" | ||
20 | 23 | ||
21 | /* max_drivers - Maximum I2O drivers (OSMs) which could be registered */ | 24 | /* max_drivers - Maximum I2O drivers (OSMs) which could be registered */ |
22 | unsigned int i2o_max_drivers = I2O_MAX_DRIVERS; | 25 | static unsigned int i2o_max_drivers = I2O_MAX_DRIVERS; |
23 | module_param_named(max_drivers, i2o_max_drivers, uint, 0); | 26 | module_param_named(max_drivers, i2o_max_drivers, uint, 0); |
24 | MODULE_PARM_DESC(max_drivers, "maximum number of OSM's to support"); | 27 | MODULE_PARM_DESC(max_drivers, "maximum number of OSM's to support"); |
25 | 28 | ||
@@ -76,17 +79,16 @@ int i2o_driver_register(struct i2o_driver *drv) | |||
76 | int rc = 0; | 79 | int rc = 0; |
77 | unsigned long flags; | 80 | unsigned long flags; |
78 | 81 | ||
79 | pr_debug("i2o: Register driver %s\n", drv->name); | 82 | osm_debug("Register driver %s\n", drv->name); |
80 | 83 | ||
81 | if (drv->event) { | 84 | if (drv->event) { |
82 | drv->event_queue = create_workqueue(drv->name); | 85 | drv->event_queue = create_workqueue(drv->name); |
83 | if (!drv->event_queue) { | 86 | if (!drv->event_queue) { |
84 | printk(KERN_ERR "i2o: Could not initialize event queue " | 87 | osm_err("Could not initialize event queue for driver " |
85 | "for driver %s\n", drv->name); | 88 | "%s\n", drv->name); |
86 | return -EFAULT; | 89 | return -EFAULT; |
87 | } | 90 | } |
88 | pr_debug("i2o: Event queue initialized for driver %s\n", | 91 | osm_debug("Event queue initialized for driver %s\n", drv->name); |
89 | drv->name); | ||
90 | } else | 92 | } else |
91 | drv->event_queue = NULL; | 93 | drv->event_queue = NULL; |
92 | 94 | ||
@@ -97,8 +99,8 @@ int i2o_driver_register(struct i2o_driver *drv) | |||
97 | 99 | ||
98 | for (i = 0; i2o_drivers[i]; i++) | 100 | for (i = 0; i2o_drivers[i]; i++) |
99 | if (i >= i2o_max_drivers) { | 101 | if (i >= i2o_max_drivers) { |
100 | printk(KERN_ERR "i2o: too many drivers registered, " | 102 | osm_err("too many drivers registered, increase " |
101 | "increase max_drivers\n"); | 103 | "max_drivers\n"); |
102 | spin_unlock_irqrestore(&i2o_drivers_lock, flags); | 104 | spin_unlock_irqrestore(&i2o_drivers_lock, flags); |
103 | return -EFAULT; | 105 | return -EFAULT; |
104 | } | 106 | } |
@@ -108,18 +110,16 @@ int i2o_driver_register(struct i2o_driver *drv) | |||
108 | 110 | ||
109 | spin_unlock_irqrestore(&i2o_drivers_lock, flags); | 111 | spin_unlock_irqrestore(&i2o_drivers_lock, flags); |
110 | 112 | ||
111 | pr_debug("i2o: driver %s gets context id %d\n", drv->name, | 113 | osm_debug("driver %s gets context id %d\n", drv->name, drv->context); |
112 | drv->context); | ||
113 | 114 | ||
114 | list_for_each_entry(c, &i2o_controllers, list) { | 115 | list_for_each_entry(c, &i2o_controllers, list) { |
115 | struct i2o_device *i2o_dev; | 116 | struct i2o_device *i2o_dev; |
116 | 117 | ||
117 | i2o_driver_notify_controller_add(drv, c); | 118 | i2o_driver_notify_controller_add(drv, c); |
118 | list_for_each_entry(i2o_dev, &c->devices, list) | 119 | list_for_each_entry(i2o_dev, &c->devices, list) |
119 | i2o_driver_notify_device_add(drv, i2o_dev); | 120 | i2o_driver_notify_device_add(drv, i2o_dev); |
120 | } | 121 | } |
121 | 122 | ||
122 | |||
123 | rc = driver_register(&drv->driver); | 123 | rc = driver_register(&drv->driver); |
124 | if (rc) | 124 | if (rc) |
125 | destroy_workqueue(drv->event_queue); | 125 | destroy_workqueue(drv->event_queue); |
@@ -139,7 +139,7 @@ void i2o_driver_unregister(struct i2o_driver *drv) | |||
139 | struct i2o_controller *c; | 139 | struct i2o_controller *c; |
140 | unsigned long flags; | 140 | unsigned long flags; |
141 | 141 | ||
142 | pr_debug("i2o: unregister driver %s\n", drv->name); | 142 | osm_debug("unregister driver %s\n", drv->name); |
143 | 143 | ||
144 | driver_unregister(&drv->driver); | 144 | driver_unregister(&drv->driver); |
145 | 145 | ||
@@ -159,7 +159,7 @@ void i2o_driver_unregister(struct i2o_driver *drv) | |||
159 | if (drv->event_queue) { | 159 | if (drv->event_queue) { |
160 | destroy_workqueue(drv->event_queue); | 160 | destroy_workqueue(drv->event_queue); |
161 | drv->event_queue = NULL; | 161 | drv->event_queue = NULL; |
162 | pr_debug("i2o: event queue removed for %s\n", drv->name); | 162 | osm_debug("event queue removed for %s\n", drv->name); |
163 | } | 163 | } |
164 | }; | 164 | }; |
165 | 165 | ||
@@ -176,68 +176,70 @@ void i2o_driver_unregister(struct i2o_driver *drv) | |||
176 | * on success and if the message should be flushed afterwords. Returns | 176 | * on success and if the message should be flushed afterwords. Returns |
177 | * negative error code on failure (the message will be flushed too). | 177 | * negative error code on failure (the message will be flushed too). |
178 | */ | 178 | */ |
179 | int i2o_driver_dispatch(struct i2o_controller *c, u32 m, | 179 | int i2o_driver_dispatch(struct i2o_controller *c, u32 m) |
180 | struct i2o_message __iomem *msg) | ||
181 | { | 180 | { |
182 | struct i2o_driver *drv; | 181 | struct i2o_driver *drv; |
183 | u32 context = readl(&msg->u.s.icntxt); | 182 | struct i2o_message *msg = i2o_msg_out_to_virt(c, m); |
183 | u32 context = le32_to_cpu(msg->u.s.icntxt); | ||
184 | unsigned long flags; | ||
184 | 185 | ||
185 | if (likely(context < i2o_max_drivers)) { | 186 | if (unlikely(context >= i2o_max_drivers)) { |
186 | spin_lock(&i2o_drivers_lock); | 187 | osm_warn("%s: Spurious reply to unknown driver %d\n", c->name, |
187 | drv = i2o_drivers[context]; | 188 | context); |
188 | spin_unlock(&i2o_drivers_lock); | 189 | return -EIO; |
190 | } | ||
189 | 191 | ||
190 | if (unlikely(!drv)) { | 192 | spin_lock_irqsave(&i2o_drivers_lock, flags); |
191 | printk(KERN_WARNING "%s: Spurious reply to unknown " | 193 | drv = i2o_drivers[context]; |
192 | "driver %d\n", c->name, context); | 194 | spin_unlock_irqrestore(&i2o_drivers_lock, flags); |
193 | return -EIO; | ||
194 | } | ||
195 | 195 | ||
196 | if ((readl(&msg->u.head[1]) >> 24) == I2O_CMD_UTIL_EVT_REGISTER) { | 196 | if (unlikely(!drv)) { |
197 | struct i2o_device *dev, *tmp; | 197 | osm_warn("%s: Spurious reply to unknown driver %d\n", c->name, |
198 | struct i2o_event *evt; | 198 | context); |
199 | u16 size; | 199 | return -EIO; |
200 | u16 tid; | 200 | } |
201 | 201 | ||
202 | tid = readl(&msg->u.head[1]) & 0x1fff; | 202 | if ((le32_to_cpu(msg->u.head[1]) >> 24) == I2O_CMD_UTIL_EVT_REGISTER) { |
203 | struct i2o_device *dev, *tmp; | ||
204 | struct i2o_event *evt; | ||
205 | u16 size; | ||
206 | u16 tid = le32_to_cpu(msg->u.head[1]) & 0xfff; | ||
203 | 207 | ||
204 | pr_debug("%s: event received from device %d\n", c->name, | 208 | osm_debug("event received from device %d\n", tid); |
205 | tid); | ||
206 | 209 | ||
207 | /* cut of header from message size (in 32-bit words) */ | 210 | if (!drv->event) |
208 | size = (readl(&msg->u.head[0]) >> 16) - 5; | 211 | return -EIO; |
209 | 212 | ||
210 | evt = kmalloc(size * 4 + sizeof(*evt), GFP_ATOMIC); | 213 | /* cut of header from message size (in 32-bit words) */ |
211 | if (!evt) | 214 | size = (le32_to_cpu(msg->u.head[0]) >> 16) - 5; |
212 | return -ENOMEM; | ||
213 | memset(evt, 0, size * 4 + sizeof(*evt)); | ||
214 | 215 | ||
215 | evt->size = size; | 216 | evt = kmalloc(size * 4 + sizeof(*evt), GFP_ATOMIC | __GFP_ZERO); |
216 | memcpy_fromio(&evt->tcntxt, &msg->u.s.tcntxt, | 217 | if (!evt) |
217 | (size + 2) * 4); | 218 | return -ENOMEM; |
218 | 219 | ||
219 | list_for_each_entry_safe(dev, tmp, &c->devices, list) | 220 | evt->size = size; |
220 | if (dev->lct_data.tid == tid) { | 221 | evt->tcntxt = le32_to_cpu(msg->u.s.tcntxt); |
221 | evt->i2o_dev = dev; | 222 | evt->event_indicator = le32_to_cpu(msg->body[0]); |
222 | break; | 223 | memcpy(&evt->tcntxt, &msg->u.s.tcntxt, size * 4); |
223 | } | ||
224 | 224 | ||
225 | INIT_WORK(&evt->work, (void (*)(void *))drv->event, | 225 | list_for_each_entry_safe(dev, tmp, &c->devices, list) |
226 | evt); | 226 | if (dev->lct_data.tid == tid) { |
227 | queue_work(drv->event_queue, &evt->work); | 227 | evt->i2o_dev = dev; |
228 | return 1; | 228 | break; |
229 | } | 229 | } |
230 | 230 | ||
231 | if (likely(drv->reply)) | 231 | INIT_WORK(&evt->work, (void (*)(void *))drv->event, evt); |
232 | return drv->reply(c, m, msg); | 232 | queue_work(drv->event_queue, &evt->work); |
233 | else | 233 | return 1; |
234 | pr_debug("%s: Reply to driver %s, but no reply function" | 234 | } |
235 | " defined!\n", c->name, drv->name); | 235 | |
236 | if (unlikely(!drv->reply)) { | ||
237 | osm_debug("%s: Reply to driver %s, but no reply function" | ||
238 | " defined!\n", c->name, drv->name); | ||
236 | return -EIO; | 239 | return -EIO; |
237 | } else | 240 | } |
238 | printk(KERN_WARNING "%s: Spurious reply to unknown driver " | 241 | |
239 | "%d\n", c->name, readl(&msg->u.s.icntxt)); | 242 | return drv->reply(c, m, msg); |
240 | return -EIO; | ||
241 | } | 243 | } |
242 | 244 | ||
243 | /** | 245 | /** |
@@ -334,11 +336,11 @@ int __init i2o_driver_init(void) | |||
334 | if ((i2o_max_drivers < 2) || (i2o_max_drivers > 64) || | 336 | if ((i2o_max_drivers < 2) || (i2o_max_drivers > 64) || |
335 | ((i2o_max_drivers ^ (i2o_max_drivers - 1)) != | 337 | ((i2o_max_drivers ^ (i2o_max_drivers - 1)) != |
336 | (2 * i2o_max_drivers - 1))) { | 338 | (2 * i2o_max_drivers - 1))) { |
337 | printk(KERN_WARNING "i2o: max_drivers set to %d, but must be " | 339 | osm_warn("max_drivers set to %d, but must be >=2 and <= 64 and " |
338 | ">=2 and <= 64 and a power of 2\n", i2o_max_drivers); | 340 | "a power of 2\n", i2o_max_drivers); |
339 | i2o_max_drivers = I2O_MAX_DRIVERS; | 341 | i2o_max_drivers = I2O_MAX_DRIVERS; |
340 | } | 342 | } |
341 | printk(KERN_INFO "i2o: max drivers = %d\n", i2o_max_drivers); | 343 | osm_info("max drivers = %d\n", i2o_max_drivers); |
342 | 344 | ||
343 | i2o_drivers = | 345 | i2o_drivers = |
344 | kmalloc(i2o_max_drivers * sizeof(*i2o_drivers), GFP_KERNEL); | 346 | kmalloc(i2o_max_drivers * sizeof(*i2o_drivers), GFP_KERNEL); |
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c index 79c1cbfb8f44..bda2c62648ba 100644 --- a/drivers/message/i2o/exec-osm.c +++ b/drivers/message/i2o/exec-osm.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/i2o.h> | 31 | #include <linux/i2o.h> |
32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
33 | #include "core.h" | ||
33 | 34 | ||
34 | #define OSM_NAME "exec-osm" | 35 | #define OSM_NAME "exec-osm" |
35 | 36 | ||
@@ -37,9 +38,6 @@ struct i2o_driver i2o_exec_driver; | |||
37 | 38 | ||
38 | static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind); | 39 | static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind); |
39 | 40 | ||
40 | /* Module internal functions from other sources */ | ||
41 | extern int i2o_device_parse_lct(struct i2o_controller *); | ||
42 | |||
43 | /* global wait list for POST WAIT */ | 41 | /* global wait list for POST WAIT */ |
44 | static LIST_HEAD(i2o_exec_wait_list); | 42 | static LIST_HEAD(i2o_exec_wait_list); |
45 | 43 | ||
@@ -50,7 +48,7 @@ struct i2o_exec_wait { | |||
50 | u32 tcntxt; /* transaction context from reply */ | 48 | u32 tcntxt; /* transaction context from reply */ |
51 | int complete; /* 1 if reply received otherwise 0 */ | 49 | int complete; /* 1 if reply received otherwise 0 */ |
52 | u32 m; /* message id */ | 50 | u32 m; /* message id */ |
53 | struct i2o_message __iomem *msg; /* pointer to the reply message */ | 51 | struct i2o_message *msg; /* pointer to the reply message */ |
54 | struct list_head list; /* node in global wait list */ | 52 | struct list_head list; /* node in global wait list */ |
55 | }; | 53 | }; |
56 | 54 | ||
@@ -108,7 +106,8 @@ static void i2o_exec_wait_free(struct i2o_exec_wait *wait) | |||
108 | * buffer must not be freed. Instead the event completion will free them | 106 | * buffer must not be freed. Instead the event completion will free them |
109 | * for you. In all other cases the buffer are your problem. | 107 | * for you. In all other cases the buffer are your problem. |
110 | * | 108 | * |
111 | * Returns 0 on success or negative error code on failure. | 109 | * Returns 0 on success, negative error code on timeout or positive error |
110 | * code from reply. | ||
112 | */ | 111 | */ |
113 | int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long | 112 | int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long |
114 | timeout, struct i2o_dma *dma) | 113 | timeout, struct i2o_dma *dma) |
@@ -116,7 +115,7 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long | |||
116 | DECLARE_WAIT_QUEUE_HEAD(wq); | 115 | DECLARE_WAIT_QUEUE_HEAD(wq); |
117 | struct i2o_exec_wait *wait; | 116 | struct i2o_exec_wait *wait; |
118 | static u32 tcntxt = 0x80000000; | 117 | static u32 tcntxt = 0x80000000; |
119 | struct i2o_message __iomem *msg = c->in_queue.virt + m; | 118 | struct i2o_message __iomem *msg = i2o_msg_in_to_virt(c, m); |
120 | int rc = 0; | 119 | int rc = 0; |
121 | 120 | ||
122 | wait = i2o_exec_wait_alloc(); | 121 | wait = i2o_exec_wait_alloc(); |
@@ -153,7 +152,7 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long | |||
153 | list_add(&wait->list, &i2o_exec_wait_list); | 152 | list_add(&wait->list, &i2o_exec_wait_list); |
154 | 153 | ||
155 | wait_event_interruptible_timeout(wq, wait->complete, | 154 | wait_event_interruptible_timeout(wq, wait->complete, |
156 | timeout * HZ); | 155 | timeout * HZ); |
157 | 156 | ||
158 | wait->wq = NULL; | 157 | wait->wq = NULL; |
159 | } | 158 | } |
@@ -161,8 +160,7 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long | |||
161 | barrier(); | 160 | barrier(); |
162 | 161 | ||
163 | if (wait->complete) { | 162 | if (wait->complete) { |
164 | if (readl(&wait->msg->body[0]) >> 24) | 163 | rc = le32_to_cpu(wait->msg->body[0]) >> 24; |
165 | rc = readl(&wait->msg->body[0]) & 0xff; | ||
166 | i2o_flush_reply(c, wait->m); | 164 | i2o_flush_reply(c, wait->m); |
167 | i2o_exec_wait_free(wait); | 165 | i2o_exec_wait_free(wait); |
168 | } else { | 166 | } else { |
@@ -187,6 +185,7 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long | |||
187 | * @c: I2O controller which answers | 185 | * @c: I2O controller which answers |
188 | * @m: message id | 186 | * @m: message id |
189 | * @msg: pointer to the I2O reply message | 187 | * @msg: pointer to the I2O reply message |
188 | * @context: transaction context of request | ||
190 | * | 189 | * |
191 | * This function is called in interrupt context only. If the reply reached | 190 | * This function is called in interrupt context only. If the reply reached |
192 | * before the timeout, the i2o_exec_wait struct is filled with the message | 191 | * before the timeout, the i2o_exec_wait struct is filled with the message |
@@ -201,16 +200,12 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long | |||
201 | * message must also be given back to the controller. | 200 | * message must also be given back to the controller. |
202 | */ | 201 | */ |
203 | static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m, | 202 | static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m, |
204 | struct i2o_message __iomem *msg) | 203 | struct i2o_message *msg, u32 context) |
205 | { | 204 | { |
206 | struct i2o_exec_wait *wait, *tmp; | 205 | struct i2o_exec_wait *wait, *tmp; |
207 | static spinlock_t lock; | 206 | unsigned long flags; |
207 | static spinlock_t lock = SPIN_LOCK_UNLOCKED; | ||
208 | int rc = 1; | 208 | int rc = 1; |
209 | u32 context; | ||
210 | |||
211 | spin_lock_init(&lock); | ||
212 | |||
213 | context = readl(&msg->u.s.tcntxt); | ||
214 | 209 | ||
215 | /* | 210 | /* |
216 | * We need to search through the i2o_exec_wait_list to see if the given | 211 | * We need to search through the i2o_exec_wait_list to see if the given |
@@ -219,11 +214,13 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m, | |||
219 | * already expired. Not much we can do about that except log it for | 214 | * already expired. Not much we can do about that except log it for |
220 | * debug purposes, increase timeout, and recompile. | 215 | * debug purposes, increase timeout, and recompile. |
221 | */ | 216 | */ |
222 | spin_lock(&lock); | 217 | spin_lock_irqsave(&lock, flags); |
223 | list_for_each_entry_safe(wait, tmp, &i2o_exec_wait_list, list) { | 218 | list_for_each_entry_safe(wait, tmp, &i2o_exec_wait_list, list) { |
224 | if (wait->tcntxt == context) { | 219 | if (wait->tcntxt == context) { |
225 | list_del(&wait->list); | 220 | list_del(&wait->list); |
226 | 221 | ||
222 | spin_unlock_irqrestore(&lock, flags); | ||
223 | |||
227 | wait->m = m; | 224 | wait->m = m; |
228 | wait->msg = msg; | 225 | wait->msg = msg; |
229 | wait->complete = 1; | 226 | wait->complete = 1; |
@@ -245,21 +242,63 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m, | |||
245 | rc = -1; | 242 | rc = -1; |
246 | } | 243 | } |
247 | 244 | ||
248 | spin_unlock(&lock); | ||
249 | |||
250 | return rc; | 245 | return rc; |
251 | } | 246 | } |
252 | } | 247 | } |
253 | 248 | ||
254 | spin_unlock(&lock); | 249 | spin_unlock_irqrestore(&lock, flags); |
255 | 250 | ||
256 | pr_debug("%s: Bogus reply in POST WAIT (tr-context: %08x)!\n", c->name, | 251 | osm_warn("%s: Bogus reply in POST WAIT (tr-context: %08x)!\n", c->name, |
257 | context); | 252 | context); |
258 | 253 | ||
259 | return -1; | 254 | return -1; |
260 | }; | 255 | }; |
261 | 256 | ||
262 | /** | 257 | /** |
258 | * i2o_exec_show_vendor_id - Displays Vendor ID of controller | ||
259 | * @d: device of which the Vendor ID should be displayed | ||
260 | * @buf: buffer into which the Vendor ID should be printed | ||
261 | * | ||
262 | * Returns number of bytes printed into buffer. | ||
263 | */ | ||
264 | static ssize_t i2o_exec_show_vendor_id(struct device *d, struct device_attribute *attr, char *buf) | ||
265 | { | ||
266 | struct i2o_device *dev = to_i2o_device(d); | ||
267 | u16 id; | ||
268 | |||
269 | if (i2o_parm_field_get(dev, 0x0000, 0, &id, 2)) { | ||
270 | sprintf(buf, "0x%04x", id); | ||
271 | return strlen(buf) + 1; | ||
272 | } | ||
273 | |||
274 | return 0; | ||
275 | }; | ||
276 | |||
277 | /** | ||
278 | * i2o_exec_show_product_id - Displays Product ID of controller | ||
279 | * @d: device of which the Product ID should be displayed | ||
280 | * @buf: buffer into which the Product ID should be printed | ||
281 | * | ||
282 | * Returns number of bytes printed into buffer. | ||
283 | */ | ||
284 | static ssize_t i2o_exec_show_product_id(struct device *d, struct device_attribute *attr, char *buf) | ||
285 | { | ||
286 | struct i2o_device *dev = to_i2o_device(d); | ||
287 | u16 id; | ||
288 | |||
289 | if (i2o_parm_field_get(dev, 0x0000, 1, &id, 2)) { | ||
290 | sprintf(buf, "0x%04x", id); | ||
291 | return strlen(buf) + 1; | ||
292 | } | ||
293 | |||
294 | return 0; | ||
295 | }; | ||
296 | |||
297 | /* Exec-OSM device attributes */ | ||
298 | static DEVICE_ATTR(vendor_id, S_IRUGO, i2o_exec_show_vendor_id, NULL); | ||
299 | static DEVICE_ATTR(product_id, S_IRUGO, i2o_exec_show_product_id, NULL); | ||
300 | |||
301 | /** | ||
263 | * i2o_exec_probe - Called if a new I2O device (executive class) appears | 302 | * i2o_exec_probe - Called if a new I2O device (executive class) appears |
264 | * @dev: I2O device which should be probed | 303 | * @dev: I2O device which should be probed |
265 | * | 304 | * |
@@ -271,10 +310,16 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m, | |||
271 | static int i2o_exec_probe(struct device *dev) | 310 | static int i2o_exec_probe(struct device *dev) |
272 | { | 311 | { |
273 | struct i2o_device *i2o_dev = to_i2o_device(dev); | 312 | struct i2o_device *i2o_dev = to_i2o_device(dev); |
313 | struct i2o_controller *c = i2o_dev->iop; | ||
274 | 314 | ||
275 | i2o_event_register(i2o_dev, &i2o_exec_driver, 0, 0xffffffff); | 315 | i2o_event_register(i2o_dev, &i2o_exec_driver, 0, 0xffffffff); |
276 | 316 | ||
277 | i2o_dev->iop->exec = i2o_dev; | 317 | c->exec = i2o_dev; |
318 | |||
319 | i2o_exec_lct_notify(c, c->lct->change_ind + 1); | ||
320 | |||
321 | device_create_file(dev, &dev_attr_vendor_id); | ||
322 | device_create_file(dev, &dev_attr_product_id); | ||
278 | 323 | ||
279 | return 0; | 324 | return 0; |
280 | }; | 325 | }; |
@@ -289,6 +334,9 @@ static int i2o_exec_probe(struct device *dev) | |||
289 | */ | 334 | */ |
290 | static int i2o_exec_remove(struct device *dev) | 335 | static int i2o_exec_remove(struct device *dev) |
291 | { | 336 | { |
337 | device_remove_file(dev, &dev_attr_product_id); | ||
338 | device_remove_file(dev, &dev_attr_vendor_id); | ||
339 | |||
292 | i2o_event_register(to_i2o_device(dev), &i2o_exec_driver, 0, 0); | 340 | i2o_event_register(to_i2o_device(dev), &i2o_exec_driver, 0, 0); |
293 | 341 | ||
294 | return 0; | 342 | return 0; |
@@ -300,12 +348,16 @@ static int i2o_exec_remove(struct device *dev) | |||
300 | * | 348 | * |
301 | * This function handles asynchronus LCT NOTIFY replies. It parses the | 349 | * This function handles asynchronus LCT NOTIFY replies. It parses the |
302 | * new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY | 350 | * new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY |
303 | * again. | 351 | * again, otherwise send LCT NOTIFY to get informed on next LCT change. |
304 | */ | 352 | */ |
305 | static void i2o_exec_lct_modified(struct i2o_controller *c) | 353 | static void i2o_exec_lct_modified(struct i2o_controller *c) |
306 | { | 354 | { |
307 | if (i2o_device_parse_lct(c) == -EAGAIN) | 355 | u32 change_ind = 0; |
308 | i2o_exec_lct_notify(c, 0); | 356 | |
357 | if (i2o_device_parse_lct(c) != -EAGAIN) | ||
358 | change_ind = c->lct->change_ind + 1; | ||
359 | |||
360 | i2o_exec_lct_notify(c, change_ind); | ||
309 | }; | 361 | }; |
310 | 362 | ||
311 | /** | 363 | /** |
@@ -325,8 +377,14 @@ static void i2o_exec_lct_modified(struct i2o_controller *c) | |||
325 | static int i2o_exec_reply(struct i2o_controller *c, u32 m, | 377 | static int i2o_exec_reply(struct i2o_controller *c, u32 m, |
326 | struct i2o_message *msg) | 378 | struct i2o_message *msg) |
327 | { | 379 | { |
328 | if (le32_to_cpu(msg->u.head[0]) & MSG_FAIL) { // Fail bit is set | 380 | u32 context; |
329 | struct i2o_message __iomem *pmsg; /* preserved message */ | 381 | |
382 | if (le32_to_cpu(msg->u.head[0]) & MSG_FAIL) { | ||
383 | /* | ||
384 | * If Fail bit is set we must take the transaction context of | ||
385 | * the preserved message to find the right request again. | ||
386 | */ | ||
387 | struct i2o_message __iomem *pmsg; | ||
330 | u32 pm; | 388 | u32 pm; |
331 | 389 | ||
332 | pm = le32_to_cpu(msg->body[3]); | 390 | pm = le32_to_cpu(msg->body[3]); |
@@ -335,15 +393,15 @@ static int i2o_exec_reply(struct i2o_controller *c, u32 m, | |||
335 | 393 | ||
336 | i2o_report_status(KERN_INFO, "i2o_core", msg); | 394 | i2o_report_status(KERN_INFO, "i2o_core", msg); |
337 | 395 | ||
338 | /* Release the preserved msg by resubmitting it as a NOP */ | 396 | context = readl(&pmsg->u.s.tcntxt); |
339 | i2o_msg_nop(c, pm); | ||
340 | 397 | ||
341 | /* If reply to i2o_post_wait failed, return causes a timeout */ | 398 | /* Release the preserved msg */ |
342 | return -1; | 399 | i2o_msg_nop(c, pm); |
343 | } | 400 | } else |
401 | context = le32_to_cpu(msg->u.s.tcntxt); | ||
344 | 402 | ||
345 | if (le32_to_cpu(msg->u.s.tcntxt) & 0x80000000) | 403 | if (context & 0x80000000) |
346 | return i2o_msg_post_wait_complete(c, m, msg); | 404 | return i2o_msg_post_wait_complete(c, m, msg, context); |
347 | 405 | ||
348 | if ((le32_to_cpu(msg->u.head[1]) >> 24) == I2O_CMD_LCT_NOTIFY) { | 406 | if ((le32_to_cpu(msg->u.head[1]) >> 24) == I2O_CMD_LCT_NOTIFY) { |
349 | struct work_struct *work; | 407 | struct work_struct *work; |
@@ -381,8 +439,9 @@ static int i2o_exec_reply(struct i2o_controller *c, u32 m, | |||
381 | */ | 439 | */ |
382 | static void i2o_exec_event(struct i2o_event *evt) | 440 | static void i2o_exec_event(struct i2o_event *evt) |
383 | { | 441 | { |
384 | osm_info("Event received from device: %d\n", | 442 | if (likely(evt->i2o_dev)) |
385 | evt->i2o_dev->lct_data.tid); | 443 | osm_debug("Event received from device: %d\n", |
444 | evt->i2o_dev->lct_data.tid); | ||
386 | kfree(evt); | 445 | kfree(evt); |
387 | }; | 446 | }; |
388 | 447 | ||
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index 7b74c87b569e..f283b5bafdd3 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c | |||
@@ -62,7 +62,7 @@ | |||
62 | #include "i2o_block.h" | 62 | #include "i2o_block.h" |
63 | 63 | ||
64 | #define OSM_NAME "block-osm" | 64 | #define OSM_NAME "block-osm" |
65 | #define OSM_VERSION "$Rev$" | 65 | #define OSM_VERSION "1.287" |
66 | #define OSM_DESCRIPTION "I2O Block Device OSM" | 66 | #define OSM_DESCRIPTION "I2O Block Device OSM" |
67 | 67 | ||
68 | static struct i2o_driver i2o_block_driver; | 68 | static struct i2o_driver i2o_block_driver; |
@@ -104,7 +104,8 @@ static int i2o_block_remove(struct device *dev) | |||
104 | struct i2o_device *i2o_dev = to_i2o_device(dev); | 104 | struct i2o_device *i2o_dev = to_i2o_device(dev); |
105 | struct i2o_block_device *i2o_blk_dev = dev_get_drvdata(dev); | 105 | struct i2o_block_device *i2o_blk_dev = dev_get_drvdata(dev); |
106 | 106 | ||
107 | osm_info("Device removed %s\n", i2o_blk_dev->gd->disk_name); | 107 | osm_info("device removed (TID: %03x): %s\n", i2o_dev->lct_data.tid, |
108 | i2o_blk_dev->gd->disk_name); | ||
108 | 109 | ||
109 | i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0); | 110 | i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0); |
110 | 111 | ||
@@ -146,6 +147,29 @@ static int i2o_block_device_flush(struct i2o_device *dev) | |||
146 | }; | 147 | }; |
147 | 148 | ||
148 | /** | 149 | /** |
150 | * i2o_block_issue_flush - device-flush interface for block-layer | ||
151 | * @queue: the request queue of the device which should be flushed | ||
152 | * @disk: gendisk | ||
153 | * @error_sector: error offset | ||
154 | * | ||
155 | * Helper function to provide flush functionality to block-layer. | ||
156 | * | ||
157 | * Returns 0 on success or negative error code on failure. | ||
158 | */ | ||
159 | |||
160 | static int i2o_block_issue_flush(request_queue_t * queue, struct gendisk *disk, | ||
161 | sector_t * error_sector) | ||
162 | { | ||
163 | struct i2o_block_device *i2o_blk_dev = queue->queuedata; | ||
164 | int rc = -ENODEV; | ||
165 | |||
166 | if (likely(i2o_blk_dev)) | ||
167 | rc = i2o_block_device_flush(i2o_blk_dev->i2o_dev); | ||
168 | |||
169 | return rc; | ||
170 | } | ||
171 | |||
172 | /** | ||
149 | * i2o_block_device_mount - Mount (load) the media of device dev | 173 | * i2o_block_device_mount - Mount (load) the media of device dev |
150 | * @dev: I2O device which should receive the mount request | 174 | * @dev: I2O device which should receive the mount request |
151 | * @media_id: Media Identifier | 175 | * @media_id: Media Identifier |
@@ -298,28 +322,31 @@ static inline void i2o_block_request_free(struct i2o_block_request *ireq) | |||
298 | 322 | ||
299 | /** | 323 | /** |
300 | * i2o_block_sglist_alloc - Allocate the SG list and map it | 324 | * i2o_block_sglist_alloc - Allocate the SG list and map it |
325 | * @c: I2O controller to which the request belongs | ||
301 | * @ireq: I2O block request | 326 | * @ireq: I2O block request |
302 | * | 327 | * |
303 | * Builds the SG list and map it into to be accessable by the controller. | 328 | * Builds the SG list and map it to be accessable by the controller. |
304 | * | 329 | * |
305 | * Returns the number of elements in the SG list or 0 on failure. | 330 | * Returns 0 on failure or 1 on success. |
306 | */ | 331 | */ |
307 | static inline int i2o_block_sglist_alloc(struct i2o_block_request *ireq) | 332 | static inline int i2o_block_sglist_alloc(struct i2o_controller *c, |
333 | struct i2o_block_request *ireq, | ||
334 | u32 __iomem ** mptr) | ||
308 | { | 335 | { |
309 | struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev; | ||
310 | int nents; | 336 | int nents; |
337 | enum dma_data_direction direction; | ||
311 | 338 | ||
339 | ireq->dev = &c->pdev->dev; | ||
312 | nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table); | 340 | nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table); |
313 | 341 | ||
314 | if (rq_data_dir(ireq->req) == READ) | 342 | if (rq_data_dir(ireq->req) == READ) |
315 | ireq->sg_dma_direction = PCI_DMA_FROMDEVICE; | 343 | direction = PCI_DMA_FROMDEVICE; |
316 | else | 344 | else |
317 | ireq->sg_dma_direction = PCI_DMA_TODEVICE; | 345 | direction = PCI_DMA_TODEVICE; |
318 | 346 | ||
319 | ireq->sg_nents = dma_map_sg(dev, ireq->sg_table, nents, | 347 | ireq->sg_nents = nents; |
320 | ireq->sg_dma_direction); | ||
321 | 348 | ||
322 | return ireq->sg_nents; | 349 | return i2o_dma_map_sg(c, ireq->sg_table, nents, direction, mptr); |
323 | }; | 350 | }; |
324 | 351 | ||
325 | /** | 352 | /** |
@@ -330,10 +357,14 @@ static inline int i2o_block_sglist_alloc(struct i2o_block_request *ireq) | |||
330 | */ | 357 | */ |
331 | static inline void i2o_block_sglist_free(struct i2o_block_request *ireq) | 358 | static inline void i2o_block_sglist_free(struct i2o_block_request *ireq) |
332 | { | 359 | { |
333 | struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev; | 360 | enum dma_data_direction direction; |
334 | 361 | ||
335 | dma_unmap_sg(dev, ireq->sg_table, ireq->sg_nents, | 362 | if (rq_data_dir(ireq->req) == READ) |
336 | ireq->sg_dma_direction); | 363 | direction = PCI_DMA_FROMDEVICE; |
364 | else | ||
365 | direction = PCI_DMA_TODEVICE; | ||
366 | |||
367 | dma_unmap_sg(ireq->dev, ireq->sg_table, ireq->sg_nents, direction); | ||
337 | }; | 368 | }; |
338 | 369 | ||
339 | /** | 370 | /** |
@@ -351,6 +382,11 @@ static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req) | |||
351 | struct i2o_block_device *i2o_blk_dev = q->queuedata; | 382 | struct i2o_block_device *i2o_blk_dev = q->queuedata; |
352 | struct i2o_block_request *ireq; | 383 | struct i2o_block_request *ireq; |
353 | 384 | ||
385 | if (unlikely(!i2o_blk_dev)) { | ||
386 | osm_err("block device already removed\n"); | ||
387 | return BLKPREP_KILL; | ||
388 | } | ||
389 | |||
354 | /* request is already processed by us, so return */ | 390 | /* request is already processed by us, so return */ |
355 | if (req->flags & REQ_SPECIAL) { | 391 | if (req->flags & REQ_SPECIAL) { |
356 | osm_debug("REQ_SPECIAL already set!\n"); | 392 | osm_debug("REQ_SPECIAL already set!\n"); |
@@ -400,71 +436,65 @@ static void i2o_block_delayed_request_fn(void *delayed_request) | |||
400 | }; | 436 | }; |
401 | 437 | ||
402 | /** | 438 | /** |
403 | * i2o_block_reply - Block OSM reply handler. | 439 | * i2o_block_end_request - Post-processing of completed commands |
404 | * @c: I2O controller from which the message arrives | 440 | * @req: request which should be completed |
405 | * @m: message id of reply | 441 | * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error |
406 | * qmsg: the actuall I2O message reply | 442 | * @nr_bytes: number of bytes to complete |
407 | * | 443 | * |
408 | * This function gets all the message replies. | 444 | * Mark the request as complete. The lock must not be held when entering. |
409 | * | 445 | * |
410 | */ | 446 | */ |
411 | static int i2o_block_reply(struct i2o_controller *c, u32 m, | 447 | static void i2o_block_end_request(struct request *req, int uptodate, |
412 | struct i2o_message *msg) | 448 | int nr_bytes) |
413 | { | 449 | { |
414 | struct i2o_block_request *ireq; | 450 | struct i2o_block_request *ireq = req->special; |
415 | struct request *req; | 451 | struct i2o_block_device *dev = ireq->i2o_blk_dev; |
416 | struct i2o_block_device *dev; | 452 | request_queue_t *q = req->q; |
417 | struct request_queue *q; | ||
418 | u8 st; | ||
419 | unsigned long flags; | 453 | unsigned long flags; |
420 | 454 | ||
421 | /* FAILed message */ | 455 | if (end_that_request_chunk(req, uptodate, nr_bytes)) { |
422 | if (unlikely(le32_to_cpu(msg->u.head[0]) & (1 << 13))) { | 456 | int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT); |
423 | struct i2o_message *pmsg; | ||
424 | u32 pm; | ||
425 | 457 | ||
426 | /* | 458 | if (blk_pc_request(req)) |
427 | * FAILed message from controller | 459 | leftover = req->data_len; |
428 | * We increment the error count and abort it | ||
429 | * | ||
430 | * In theory this will never happen. The I2O block class | ||
431 | * specification states that block devices never return | ||
432 | * FAILs but instead use the REQ status field...but | ||
433 | * better be on the safe side since no one really follows | ||
434 | * the spec to the book :) | ||
435 | */ | ||
436 | pm = le32_to_cpu(msg->body[3]); | ||
437 | pmsg = i2o_msg_in_to_virt(c, pm); | ||
438 | 460 | ||
439 | req = i2o_cntxt_list_get(c, le32_to_cpu(pmsg->u.s.tcntxt)); | 461 | if (end_io_error(uptodate)) |
440 | if (unlikely(!req)) { | 462 | end_that_request_chunk(req, 0, leftover); |
441 | osm_err("NULL reply received!\n"); | 463 | } |
442 | return -1; | ||
443 | } | ||
444 | 464 | ||
445 | ireq = req->special; | 465 | add_disk_randomness(req->rq_disk); |
446 | dev = ireq->i2o_blk_dev; | ||
447 | q = dev->gd->queue; | ||
448 | 466 | ||
449 | req->errors++; | 467 | spin_lock_irqsave(q->queue_lock, flags); |
450 | |||
451 | spin_lock_irqsave(q->queue_lock, flags); | ||
452 | 468 | ||
453 | while (end_that_request_chunk(req, !req->errors, | 469 | end_that_request_last(req); |
454 | le32_to_cpu(pmsg->body[1]))) ; | ||
455 | end_that_request_last(req); | ||
456 | 470 | ||
471 | if (likely(dev)) { | ||
457 | dev->open_queue_depth--; | 472 | dev->open_queue_depth--; |
458 | list_del(&ireq->queue); | 473 | list_del(&ireq->queue); |
459 | blk_start_queue(q); | 474 | } |
460 | 475 | ||
461 | spin_unlock_irqrestore(q->queue_lock, flags); | 476 | blk_start_queue(q); |
462 | 477 | ||
463 | /* Now flush the message by making it a NOP */ | 478 | spin_unlock_irqrestore(q->queue_lock, flags); |
464 | i2o_msg_nop(c, pm); | ||
465 | 479 | ||
466 | return -1; | 480 | i2o_block_sglist_free(ireq); |
467 | } | 481 | i2o_block_request_free(ireq); |
482 | }; | ||
483 | |||
484 | /** | ||
485 | * i2o_block_reply - Block OSM reply handler. | ||
486 | * @c: I2O controller from which the message arrives | ||
487 | * @m: message id of reply | ||
488 | * qmsg: the actuall I2O message reply | ||
489 | * | ||
490 | * This function gets all the message replies. | ||
491 | * | ||
492 | */ | ||
493 | static int i2o_block_reply(struct i2o_controller *c, u32 m, | ||
494 | struct i2o_message *msg) | ||
495 | { | ||
496 | struct request *req; | ||
497 | int uptodate = 1; | ||
468 | 498 | ||
469 | req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt)); | 499 | req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt)); |
470 | if (unlikely(!req)) { | 500 | if (unlikely(!req)) { |
@@ -472,61 +502,13 @@ static int i2o_block_reply(struct i2o_controller *c, u32 m, | |||
472 | return -1; | 502 | return -1; |
473 | } | 503 | } |
474 | 504 | ||
475 | ireq = req->special; | ||
476 | dev = ireq->i2o_blk_dev; | ||
477 | q = dev->gd->queue; | ||
478 | |||
479 | if (unlikely(!dev->i2o_dev)) { | ||
480 | /* | ||
481 | * This is HACK, but Intel Integrated RAID allows user | ||
482 | * to delete a volume that is claimed, locked, and in use | ||
483 | * by the OS. We have to check for a reply from a | ||
484 | * non-existent device and flag it as an error or the system | ||
485 | * goes kaput... | ||
486 | */ | ||
487 | req->errors++; | ||
488 | osm_warn("Data transfer to deleted device!\n"); | ||
489 | spin_lock_irqsave(q->queue_lock, flags); | ||
490 | while (end_that_request_chunk | ||
491 | (req, !req->errors, le32_to_cpu(msg->body[1]))) ; | ||
492 | end_that_request_last(req); | ||
493 | |||
494 | dev->open_queue_depth--; | ||
495 | list_del(&ireq->queue); | ||
496 | blk_start_queue(q); | ||
497 | |||
498 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
499 | return -1; | ||
500 | } | ||
501 | |||
502 | /* | 505 | /* |
503 | * Lets see what is cooking. We stuffed the | 506 | * Lets see what is cooking. We stuffed the |
504 | * request in the context. | 507 | * request in the context. |
505 | */ | 508 | */ |
506 | 509 | ||
507 | st = le32_to_cpu(msg->body[0]) >> 24; | 510 | if ((le32_to_cpu(msg->body[0]) >> 24) != 0) { |
508 | 511 | u32 status = le32_to_cpu(msg->body[0]); | |
509 | if (st != 0) { | ||
510 | int err; | ||
511 | char *bsa_errors[] = { | ||
512 | "Success", | ||
513 | "Media Error", | ||
514 | "Failure communicating to device", | ||
515 | "Device Failure", | ||
516 | "Device is not ready", | ||
517 | "Media not present", | ||
518 | "Media is locked by another user", | ||
519 | "Media has failed", | ||
520 | "Failure communicating to device", | ||
521 | "Device bus failure", | ||
522 | "Device is locked by another user", | ||
523 | "Device is write protected", | ||
524 | "Device has reset", | ||
525 | "Volume has changed, waiting for acknowledgement" | ||
526 | }; | ||
527 | |||
528 | err = le32_to_cpu(msg->body[0]) & 0xffff; | ||
529 | |||
530 | /* | 512 | /* |
531 | * Device not ready means two things. One is that the | 513 | * Device not ready means two things. One is that the |
532 | * the thing went offline (but not a removal media) | 514 | * the thing went offline (but not a removal media) |
@@ -539,40 +521,24 @@ static int i2o_block_reply(struct i2o_controller *c, u32 m, | |||
539 | * Don't stick a supertrak100 into cache aggressive modes | 521 | * Don't stick a supertrak100 into cache aggressive modes |
540 | */ | 522 | */ |
541 | 523 | ||
542 | osm_err("block-osm: /dev/%s error: %s", dev->gd->disk_name, | 524 | osm_err("TID %03x error status: 0x%02x, detailed status: " |
543 | bsa_errors[le32_to_cpu(msg->body[0]) & 0xffff]); | 525 | "0x%04x\n", (le32_to_cpu(msg->u.head[1]) >> 12 & 0xfff), |
544 | if (le32_to_cpu(msg->body[0]) & 0x00ff0000) | 526 | status >> 24, status & 0xffff); |
545 | printk(KERN_ERR " - DDM attempted %d retries", | ||
546 | (le32_to_cpu(msg->body[0]) >> 16) & 0x00ff); | ||
547 | printk(KERN_ERR ".\n"); | ||
548 | req->errors++; | ||
549 | } else | ||
550 | req->errors = 0; | ||
551 | 527 | ||
552 | if (!end_that_request_chunk | 528 | req->errors++; |
553 | (req, !req->errors, le32_to_cpu(msg->body[1]))) { | ||
554 | add_disk_randomness(req->rq_disk); | ||
555 | spin_lock_irqsave(q->queue_lock, flags); | ||
556 | |||
557 | end_that_request_last(req); | ||
558 | 529 | ||
559 | dev->open_queue_depth--; | 530 | uptodate = 0; |
560 | list_del(&ireq->queue); | 531 | } |
561 | blk_start_queue(q); | ||
562 | 532 | ||
563 | spin_unlock_irqrestore(q->queue_lock, flags); | 533 | i2o_block_end_request(req, uptodate, le32_to_cpu(msg->body[1])); |
564 | |||
565 | i2o_block_sglist_free(ireq); | ||
566 | i2o_block_request_free(ireq); | ||
567 | } else | ||
568 | osm_err("still remaining chunks\n"); | ||
569 | 534 | ||
570 | return 1; | 535 | return 1; |
571 | }; | 536 | }; |
572 | 537 | ||
573 | static void i2o_block_event(struct i2o_event *evt) | 538 | static void i2o_block_event(struct i2o_event *evt) |
574 | { | 539 | { |
575 | osm_info("block-osm: event received\n"); | 540 | osm_debug("event received\n"); |
541 | kfree(evt); | ||
576 | }; | 542 | }; |
577 | 543 | ||
578 | /* | 544 | /* |
@@ -777,18 +743,25 @@ static int i2o_block_media_changed(struct gendisk *disk) | |||
777 | static int i2o_block_transfer(struct request *req) | 743 | static int i2o_block_transfer(struct request *req) |
778 | { | 744 | { |
779 | struct i2o_block_device *dev = req->rq_disk->private_data; | 745 | struct i2o_block_device *dev = req->rq_disk->private_data; |
780 | struct i2o_controller *c = dev->i2o_dev->iop; | 746 | struct i2o_controller *c; |
781 | int tid = dev->i2o_dev->lct_data.tid; | 747 | int tid = dev->i2o_dev->lct_data.tid; |
782 | struct i2o_message __iomem *msg; | 748 | struct i2o_message __iomem *msg; |
783 | void __iomem *mptr; | 749 | u32 __iomem *mptr; |
784 | struct i2o_block_request *ireq = req->special; | 750 | struct i2o_block_request *ireq = req->special; |
785 | struct scatterlist *sg; | ||
786 | int sgnum; | ||
787 | int i; | ||
788 | u32 m; | 751 | u32 m; |
789 | u32 tcntxt; | 752 | u32 tcntxt; |
790 | u32 sg_flags; | 753 | u32 sgl_offset = SGL_OFFSET_8; |
754 | u32 ctl_flags = 0x00000000; | ||
791 | int rc; | 755 | int rc; |
756 | u32 cmd; | ||
757 | |||
758 | if (unlikely(!dev->i2o_dev)) { | ||
759 | osm_err("transfer to removed drive\n"); | ||
760 | rc = -ENODEV; | ||
761 | goto exit; | ||
762 | } | ||
763 | |||
764 | c = dev->i2o_dev->iop; | ||
792 | 765 | ||
793 | m = i2o_msg_get(c, &msg); | 766 | m = i2o_msg_get(c, &msg); |
794 | if (m == I2O_QUEUE_EMPTY) { | 767 | if (m == I2O_QUEUE_EMPTY) { |
@@ -802,82 +775,109 @@ static int i2o_block_transfer(struct request *req) | |||
802 | goto nop_msg; | 775 | goto nop_msg; |
803 | } | 776 | } |
804 | 777 | ||
805 | if ((sgnum = i2o_block_sglist_alloc(ireq)) <= 0) { | ||
806 | rc = -ENOMEM; | ||
807 | goto context_remove; | ||
808 | } | ||
809 | |||
810 | /* Build the message based on the request. */ | ||
811 | writel(i2o_block_driver.context, &msg->u.s.icntxt); | 778 | writel(i2o_block_driver.context, &msg->u.s.icntxt); |
812 | writel(tcntxt, &msg->u.s.tcntxt); | 779 | writel(tcntxt, &msg->u.s.tcntxt); |
813 | writel(req->nr_sectors << 9, &msg->body[1]); | ||
814 | |||
815 | writel((((u64) req->sector) << 9) & 0xffffffff, &msg->body[2]); | ||
816 | writel(req->sector >> 23, &msg->body[3]); | ||
817 | 780 | ||
818 | mptr = &msg->body[4]; | 781 | mptr = &msg->body[0]; |
819 | |||
820 | sg = ireq->sg_table; | ||
821 | 782 | ||
822 | if (rq_data_dir(req) == READ) { | 783 | if (rq_data_dir(req) == READ) { |
823 | writel(I2O_CMD_BLOCK_READ << 24 | HOST_TID << 12 | tid, | 784 | cmd = I2O_CMD_BLOCK_READ << 24; |
824 | &msg->u.head[1]); | 785 | |
825 | sg_flags = 0x10000000; | ||
826 | switch (dev->rcache) { | 786 | switch (dev->rcache) { |
827 | case CACHE_NULL: | ||
828 | writel(0, &msg->body[0]); | ||
829 | break; | ||
830 | case CACHE_PREFETCH: | 787 | case CACHE_PREFETCH: |
831 | writel(0x201F0008, &msg->body[0]); | 788 | ctl_flags = 0x201F0008; |
832 | break; | 789 | break; |
790 | |||
833 | case CACHE_SMARTFETCH: | 791 | case CACHE_SMARTFETCH: |
834 | if (req->nr_sectors > 16) | 792 | if (req->nr_sectors > 16) |
835 | writel(0x201F0008, &msg->body[0]); | 793 | ctl_flags = 0x201F0008; |
836 | else | 794 | else |
837 | writel(0x001F0000, &msg->body[0]); | 795 | ctl_flags = 0x001F0000; |
796 | break; | ||
797 | |||
798 | default: | ||
838 | break; | 799 | break; |
839 | } | 800 | } |
840 | } else { | 801 | } else { |
841 | writel(I2O_CMD_BLOCK_WRITE << 24 | HOST_TID << 12 | tid, | 802 | cmd = I2O_CMD_BLOCK_WRITE << 24; |
842 | &msg->u.head[1]); | 803 | |
843 | sg_flags = 0x14000000; | ||
844 | switch (dev->wcache) { | 804 | switch (dev->wcache) { |
845 | case CACHE_NULL: | ||
846 | writel(0, &msg->body[0]); | ||
847 | break; | ||
848 | case CACHE_WRITETHROUGH: | 805 | case CACHE_WRITETHROUGH: |
849 | writel(0x001F0008, &msg->body[0]); | 806 | ctl_flags = 0x001F0008; |
850 | break; | 807 | break; |
851 | case CACHE_WRITEBACK: | 808 | case CACHE_WRITEBACK: |
852 | writel(0x001F0010, &msg->body[0]); | 809 | ctl_flags = 0x001F0010; |
853 | break; | 810 | break; |
854 | case CACHE_SMARTBACK: | 811 | case CACHE_SMARTBACK: |
855 | if (req->nr_sectors > 16) | 812 | if (req->nr_sectors > 16) |
856 | writel(0x001F0004, &msg->body[0]); | 813 | ctl_flags = 0x001F0004; |
857 | else | 814 | else |
858 | writel(0x001F0010, &msg->body[0]); | 815 | ctl_flags = 0x001F0010; |
859 | break; | 816 | break; |
860 | case CACHE_SMARTTHROUGH: | 817 | case CACHE_SMARTTHROUGH: |
861 | if (req->nr_sectors > 16) | 818 | if (req->nr_sectors > 16) |
862 | writel(0x001F0004, &msg->body[0]); | 819 | ctl_flags = 0x001F0004; |
863 | else | 820 | else |
864 | writel(0x001F0010, &msg->body[0]); | 821 | ctl_flags = 0x001F0010; |
822 | default: | ||
823 | break; | ||
824 | } | ||
825 | } | ||
826 | |||
827 | #ifdef CONFIG_I2O_EXT_ADAPTEC | ||
828 | if (c->adaptec) { | ||
829 | u8 cmd[10]; | ||
830 | u32 scsi_flags; | ||
831 | u16 hwsec = queue_hardsect_size(req->q) >> KERNEL_SECTOR_SHIFT; | ||
832 | |||
833 | memset(cmd, 0, 10); | ||
834 | |||
835 | sgl_offset = SGL_OFFSET_12; | ||
836 | |||
837 | writel(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid, | ||
838 | &msg->u.head[1]); | ||
839 | |||
840 | writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++); | ||
841 | writel(tid, mptr++); | ||
842 | |||
843 | /* | ||
844 | * ENABLE_DISCONNECT | ||
845 | * SIMPLE_TAG | ||
846 | * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME | ||
847 | */ | ||
848 | if (rq_data_dir(req) == READ) { | ||
849 | cmd[0] = 0x28; | ||
850 | scsi_flags = 0x60a0000a; | ||
851 | } else { | ||
852 | cmd[0] = 0x2A; | ||
853 | scsi_flags = 0xa0a0000a; | ||
865 | } | 854 | } |
855 | |||
856 | writel(scsi_flags, mptr++); | ||
857 | |||
858 | *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec); | ||
859 | *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec); | ||
860 | |||
861 | memcpy_toio(mptr, cmd, 10); | ||
862 | mptr += 4; | ||
863 | writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++); | ||
864 | } else | ||
865 | #endif | ||
866 | { | ||
867 | writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]); | ||
868 | writel(ctl_flags, mptr++); | ||
869 | writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++); | ||
870 | writel((u32) (req->sector << KERNEL_SECTOR_SHIFT), mptr++); | ||
871 | writel(req->sector >> (32 - KERNEL_SECTOR_SHIFT), mptr++); | ||
866 | } | 872 | } |
867 | 873 | ||
868 | for (i = sgnum; i > 0; i--) { | 874 | if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { |
869 | if (i == 1) | 875 | rc = -ENOMEM; |
870 | sg_flags |= 0x80000000; | 876 | goto context_remove; |
871 | writel(sg_flags | sg_dma_len(sg), mptr); | ||
872 | writel(sg_dma_address(sg), mptr + 4); | ||
873 | mptr += 8; | ||
874 | sg++; | ||
875 | } | 877 | } |
876 | 878 | ||
877 | writel(I2O_MESSAGE_SIZE | 879 | writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | |
878 | (((unsigned long)mptr - | 880 | sgl_offset, &msg->u.head[0]); |
879 | (unsigned long)&msg->u.head[0]) >> 2) | SGL_OFFSET_8, | ||
880 | &msg->u.head[0]); | ||
881 | 881 | ||
882 | list_add_tail(&ireq->queue, &dev->open_queue); | 882 | list_add_tail(&ireq->queue, &dev->open_queue); |
883 | dev->open_queue_depth++; | 883 | dev->open_queue_depth++; |
@@ -920,11 +920,13 @@ static void i2o_block_request_fn(struct request_queue *q) | |||
920 | 920 | ||
921 | queue_depth = ireq->i2o_blk_dev->open_queue_depth; | 921 | queue_depth = ireq->i2o_blk_dev->open_queue_depth; |
922 | 922 | ||
923 | if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) | 923 | if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) { |
924 | if (!i2o_block_transfer(req)) { | 924 | if (!i2o_block_transfer(req)) { |
925 | blkdev_dequeue_request(req); | 925 | blkdev_dequeue_request(req); |
926 | continue; | 926 | continue; |
927 | } | 927 | } else |
928 | osm_info("transfer error\n"); | ||
929 | } | ||
928 | 930 | ||
929 | if (queue_depth) | 931 | if (queue_depth) |
930 | break; | 932 | break; |
@@ -938,7 +940,6 @@ static void i2o_block_request_fn(struct request_queue *q) | |||
938 | INIT_WORK(&dreq->work, i2o_block_delayed_request_fn, | 940 | INIT_WORK(&dreq->work, i2o_block_delayed_request_fn, |
939 | dreq); | 941 | dreq); |
940 | 942 | ||
941 | osm_info("transfer error\n"); | ||
942 | if (!queue_delayed_work(i2o_block_driver.event_queue, | 943 | if (!queue_delayed_work(i2o_block_driver.event_queue, |
943 | &dreq->work, | 944 | &dreq->work, |
944 | I2O_BLOCK_RETRY_TIME)) | 945 | I2O_BLOCK_RETRY_TIME)) |
@@ -1007,6 +1008,7 @@ static struct i2o_block_device *i2o_block_device_alloc(void) | |||
1007 | } | 1008 | } |
1008 | 1009 | ||
1009 | blk_queue_prep_rq(queue, i2o_block_prep_req_fn); | 1010 | blk_queue_prep_rq(queue, i2o_block_prep_req_fn); |
1011 | blk_queue_issue_flush_fn(queue, i2o_block_issue_flush); | ||
1010 | 1012 | ||
1011 | gd->major = I2O_MAJOR; | 1013 | gd->major = I2O_MAJOR; |
1012 | gd->queue = queue; | 1014 | gd->queue = queue; |
@@ -1039,17 +1041,27 @@ static struct i2o_block_device *i2o_block_device_alloc(void) | |||
1039 | static int i2o_block_probe(struct device *dev) | 1041 | static int i2o_block_probe(struct device *dev) |
1040 | { | 1042 | { |
1041 | struct i2o_device *i2o_dev = to_i2o_device(dev); | 1043 | struct i2o_device *i2o_dev = to_i2o_device(dev); |
1042 | struct i2o_block_device *i2o_blk_dev; | ||
1043 | struct i2o_controller *c = i2o_dev->iop; | 1044 | struct i2o_controller *c = i2o_dev->iop; |
1045 | struct i2o_block_device *i2o_blk_dev; | ||
1044 | struct gendisk *gd; | 1046 | struct gendisk *gd; |
1045 | struct request_queue *queue; | 1047 | struct request_queue *queue; |
1046 | static int unit = 0; | 1048 | static int unit = 0; |
1047 | int rc; | 1049 | int rc; |
1048 | u64 size; | 1050 | u64 size; |
1049 | u32 blocksize; | 1051 | u32 blocksize; |
1050 | u16 power; | ||
1051 | u32 flags, status; | 1052 | u32 flags, status; |
1052 | int segments; | 1053 | u16 body_size = 4; |
1054 | unsigned short max_sectors; | ||
1055 | |||
1056 | #ifdef CONFIG_I2O_EXT_ADAPTEC | ||
1057 | if (c->adaptec) | ||
1058 | body_size = 8; | ||
1059 | #endif | ||
1060 | |||
1061 | if (c->limit_sectors) | ||
1062 | max_sectors = I2O_MAX_SECTORS_LIMITED; | ||
1063 | else | ||
1064 | max_sectors = I2O_MAX_SECTORS; | ||
1053 | 1065 | ||
1054 | /* skip devices which are used by IOP */ | 1066 | /* skip devices which are used by IOP */ |
1055 | if (i2o_dev->lct_data.user_tid != 0xfff) { | 1067 | if (i2o_dev->lct_data.user_tid != 0xfff) { |
@@ -1057,8 +1069,6 @@ static int i2o_block_probe(struct device *dev) | |||
1057 | return -ENODEV; | 1069 | return -ENODEV; |
1058 | } | 1070 | } |
1059 | 1071 | ||
1060 | osm_info("New device detected (TID: %03x)\n", i2o_dev->lct_data.tid); | ||
1061 | |||
1062 | if (i2o_device_claim(i2o_dev)) { | 1072 | if (i2o_device_claim(i2o_dev)) { |
1063 | osm_warn("Unable to claim device. Installation aborted\n"); | 1073 | osm_warn("Unable to claim device. Installation aborted\n"); |
1064 | rc = -EFAULT; | 1074 | rc = -EFAULT; |
@@ -1086,50 +1096,44 @@ static int i2o_block_probe(struct device *dev) | |||
1086 | queue = gd->queue; | 1096 | queue = gd->queue; |
1087 | queue->queuedata = i2o_blk_dev; | 1097 | queue->queuedata = i2o_blk_dev; |
1088 | 1098 | ||
1089 | blk_queue_max_phys_segments(queue, I2O_MAX_SEGMENTS); | 1099 | blk_queue_max_phys_segments(queue, I2O_MAX_PHYS_SEGMENTS); |
1090 | blk_queue_max_sectors(queue, I2O_MAX_SECTORS); | 1100 | blk_queue_max_sectors(queue, max_sectors); |
1091 | 1101 | blk_queue_max_hw_segments(queue, i2o_sg_tablesize(c, body_size)); | |
1092 | if (c->short_req) | ||
1093 | segments = 8; | ||
1094 | else { | ||
1095 | i2o_status_block *sb; | ||
1096 | 1102 | ||
1097 | sb = c->status_block.virt; | 1103 | osm_debug("max sectors = %d\n", queue->max_phys_segments); |
1098 | 1104 | osm_debug("phys segments = %d\n", queue->max_sectors); | |
1099 | segments = (sb->inbound_frame_size - | 1105 | osm_debug("max hw segments = %d\n", queue->max_hw_segments); |
1100 | sizeof(struct i2o_message) / 4 - 4) / 2; | ||
1101 | } | ||
1102 | |||
1103 | blk_queue_max_hw_segments(queue, segments); | ||
1104 | |||
1105 | osm_debug("max sectors = %d\n", I2O_MAX_SECTORS); | ||
1106 | osm_debug("phys segments = %d\n", I2O_MAX_SEGMENTS); | ||
1107 | osm_debug("hw segments = %d\n", segments); | ||
1108 | 1106 | ||
1109 | /* | 1107 | /* |
1110 | * Ask for the current media data. If that isn't supported | 1108 | * Ask for the current media data. If that isn't supported |
1111 | * then we ask for the device capacity data | 1109 | * then we ask for the device capacity data |
1112 | */ | 1110 | */ |
1113 | if (i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) != 0 | 1111 | if (i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || |
1114 | || i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) != 0) { | 1112 | i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { |
1115 | i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4); | 1113 | blk_queue_hardsect_size(queue, blocksize); |
1116 | i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8); | 1114 | } else |
1117 | } | 1115 | osm_warn("unable to get blocksize of %s\n", gd->disk_name); |
1118 | osm_debug("blocksize = %d\n", blocksize); | ||
1119 | 1116 | ||
1120 | if (i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2)) | 1117 | if (i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) || |
1121 | power = 0; | 1118 | i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) { |
1119 | set_capacity(gd, size >> KERNEL_SECTOR_SHIFT); | ||
1120 | } else | ||
1121 | osm_warn("could not get size of %s\n", gd->disk_name); | ||
1122 | |||
1123 | if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &i2o_blk_dev->power, 2)) | ||
1124 | i2o_blk_dev->power = 0; | ||
1122 | i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4); | 1125 | i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4); |
1123 | i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4); | 1126 | i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4); |
1124 | 1127 | ||
1125 | set_capacity(gd, size >> 9); | ||
1126 | |||
1127 | i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff); | 1128 | i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff); |
1128 | 1129 | ||
1129 | add_disk(gd); | 1130 | add_disk(gd); |
1130 | 1131 | ||
1131 | unit++; | 1132 | unit++; |
1132 | 1133 | ||
1134 | osm_info("device added (TID: %03x): %s\n", i2o_dev->lct_data.tid, | ||
1135 | i2o_blk_dev->gd->disk_name); | ||
1136 | |||
1133 | return 0; | 1137 | return 0; |
1134 | 1138 | ||
1135 | claim_release: | 1139 | claim_release: |
@@ -1177,7 +1181,7 @@ static int __init i2o_block_init(void) | |||
1177 | goto exit; | 1181 | goto exit; |
1178 | } | 1182 | } |
1179 | 1183 | ||
1180 | i2o_blk_req_pool.pool = mempool_create(I2O_REQ_MEMPOOL_SIZE, | 1184 | i2o_blk_req_pool.pool = mempool_create(I2O_BLOCK_REQ_MEMPOOL_SIZE, |
1181 | mempool_alloc_slab, | 1185 | mempool_alloc_slab, |
1182 | mempool_free_slab, | 1186 | mempool_free_slab, |
1183 | i2o_blk_req_pool.slab); | 1187 | i2o_blk_req_pool.slab); |
diff --git a/drivers/message/i2o/i2o_block.h b/drivers/message/i2o/i2o_block.h index ddd9a15679c0..4fdaa5bda412 100644 --- a/drivers/message/i2o/i2o_block.h +++ b/drivers/message/i2o/i2o_block.h | |||
@@ -56,42 +56,46 @@ | |||
56 | #define I2O_BLOCK_RETRY_TIME HZ/4 | 56 | #define I2O_BLOCK_RETRY_TIME HZ/4 |
57 | #define I2O_BLOCK_MAX_OPEN_REQUESTS 50 | 57 | #define I2O_BLOCK_MAX_OPEN_REQUESTS 50 |
58 | 58 | ||
59 | /* request queue sizes */ | ||
60 | #define I2O_BLOCK_REQ_MEMPOOL_SIZE 32 | ||
61 | |||
62 | #define KERNEL_SECTOR_SHIFT 9 | ||
63 | #define KERNEL_SECTOR_SIZE (1 << KERNEL_SECTOR_SHIFT) | ||
64 | |||
59 | /* I2O Block OSM mempool struct */ | 65 | /* I2O Block OSM mempool struct */ |
60 | struct i2o_block_mempool { | 66 | struct i2o_block_mempool { |
61 | kmem_cache_t *slab; | 67 | kmem_cache_t *slab; |
62 | mempool_t *pool; | 68 | mempool_t *pool; |
63 | }; | 69 | }; |
64 | 70 | ||
65 | /* I2O Block device descriptor */ | 71 | /* I2O Block device descriptor */ |
66 | struct i2o_block_device { | 72 | struct i2o_block_device { |
67 | struct i2o_device *i2o_dev; /* pointer to I2O device */ | 73 | struct i2o_device *i2o_dev; /* pointer to I2O device */ |
68 | struct gendisk *gd; | 74 | struct gendisk *gd; |
69 | spinlock_t lock; /* queue lock */ | 75 | spinlock_t lock; /* queue lock */ |
70 | struct list_head open_queue; /* list of transfered, but unfinished | 76 | struct list_head open_queue; /* list of transfered, but unfinished |
71 | requests */ | 77 | requests */ |
72 | unsigned int open_queue_depth; /* number of requests in the queue */ | 78 | unsigned int open_queue_depth; /* number of requests in the queue */ |
73 | 79 | ||
74 | int rcache; /* read cache flags */ | 80 | int rcache; /* read cache flags */ |
75 | int wcache; /* write cache flags */ | 81 | int wcache; /* write cache flags */ |
76 | int flags; | 82 | int flags; |
77 | int power; /* power state */ | 83 | u16 power; /* power state */ |
78 | int media_change_flag; /* media changed flag */ | 84 | int media_change_flag; /* media changed flag */ |
79 | }; | 85 | }; |
80 | 86 | ||
81 | /* I2O Block device request */ | 87 | /* I2O Block device request */ |
82 | struct i2o_block_request | 88 | struct i2o_block_request { |
83 | { | ||
84 | struct list_head queue; | 89 | struct list_head queue; |
85 | struct request *req; /* corresponding request */ | 90 | struct request *req; /* corresponding request */ |
86 | struct i2o_block_device *i2o_blk_dev; /* I2O block device */ | 91 | struct i2o_block_device *i2o_blk_dev; /* I2O block device */ |
87 | int sg_dma_direction; /* direction of DMA buffer read/write */ | 92 | struct device *dev; /* device used for DMA */ |
88 | int sg_nents; /* number of SG elements */ | 93 | int sg_nents; /* number of SG elements */ |
89 | struct scatterlist sg_table[I2O_MAX_SEGMENTS]; /* SG table */ | 94 | struct scatterlist sg_table[I2O_MAX_PHYS_SEGMENTS]; /* SG table */ |
90 | }; | 95 | }; |
91 | 96 | ||
92 | /* I2O Block device delayed request */ | 97 | /* I2O Block device delayed request */ |
93 | struct i2o_block_delayed_request | 98 | struct i2o_block_delayed_request { |
94 | { | ||
95 | struct work_struct work; | 99 | struct work_struct work; |
96 | struct request_queue *queue; | 100 | struct request_queue *queue; |
97 | }; | 101 | }; |
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c index 1fb5cdf67f8f..3c3a7abebb1b 100644 --- a/drivers/message/i2o/i2o_config.c +++ b/drivers/message/i2o/i2o_config.c | |||
@@ -30,29 +30,15 @@ | |||
30 | * 2 of the License, or (at your option) any later version. | 30 | * 2 of the License, or (at your option) any later version. |
31 | */ | 31 | */ |
32 | 32 | ||
33 | #include <linux/module.h> | ||
34 | #include <linux/kernel.h> | ||
35 | #include <linux/pci.h> | ||
36 | #include <linux/i2o.h> | ||
37 | #include <linux/errno.h> | ||
38 | #include <linux/init.h> | ||
39 | #include <linux/slab.h> | ||
40 | #include <linux/miscdevice.h> | 33 | #include <linux/miscdevice.h> |
41 | #include <linux/mm.h> | ||
42 | #include <linux/spinlock.h> | ||
43 | #include <linux/smp_lock.h> | 34 | #include <linux/smp_lock.h> |
44 | #include <linux/ioctl32.h> | ||
45 | #include <linux/compat.h> | 35 | #include <linux/compat.h> |
46 | #include <linux/syscalls.h> | ||
47 | 36 | ||
48 | #include <asm/uaccess.h> | 37 | #include <asm/uaccess.h> |
49 | #include <asm/io.h> | ||
50 | 38 | ||
51 | #define OSM_NAME "config-osm" | 39 | #include "core.h" |
52 | #define OSM_VERSION "$Rev$" | ||
53 | #define OSM_DESCRIPTION "I2O Configuration OSM" | ||
54 | 40 | ||
55 | extern int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int); | 41 | #define SG_TABLESIZE 30 |
56 | 42 | ||
57 | static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, | 43 | static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, |
58 | unsigned long arg); | 44 | unsigned long arg); |
@@ -80,15 +66,6 @@ struct i2o_cfg_info { | |||
80 | static struct i2o_cfg_info *open_files = NULL; | 66 | static struct i2o_cfg_info *open_files = NULL; |
81 | static ulong i2o_cfg_info_id = 0; | 67 | static ulong i2o_cfg_info_id = 0; |
82 | 68 | ||
83 | /* | ||
84 | * Each of these describes an i2o message handler. They are | ||
85 | * multiplexed by the i2o_core code | ||
86 | */ | ||
87 | |||
88 | static struct i2o_driver i2o_config_driver = { | ||
89 | .name = OSM_NAME | ||
90 | }; | ||
91 | |||
92 | static int i2o_cfg_getiops(unsigned long arg) | 69 | static int i2o_cfg_getiops(unsigned long arg) |
93 | { | 70 | { |
94 | struct i2o_controller *c; | 71 | struct i2o_controller *c; |
@@ -391,9 +368,9 @@ static int i2o_cfg_swul(unsigned long arg) | |||
391 | 368 | ||
392 | i2o_dma_free(&c->pdev->dev, &buffer); | 369 | i2o_dma_free(&c->pdev->dev, &buffer); |
393 | 370 | ||
394 | return_ret: | 371 | return_ret: |
395 | return ret; | 372 | return ret; |
396 | return_fault: | 373 | return_fault: |
397 | ret = -EFAULT; | 374 | ret = -EFAULT; |
398 | goto return_ret; | 375 | goto return_ret; |
399 | }; | 376 | }; |
@@ -540,8 +517,10 @@ static int i2o_cfg_evt_get(unsigned long arg, struct file *fp) | |||
540 | return 0; | 517 | return 0; |
541 | } | 518 | } |
542 | 519 | ||
520 | #ifdef CONFIG_I2O_EXT_ADAPTEC | ||
543 | #ifdef CONFIG_COMPAT | 521 | #ifdef CONFIG_COMPAT |
544 | static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, unsigned long arg) | 522 | static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, |
523 | unsigned long arg) | ||
545 | { | 524 | { |
546 | struct i2o_cmd_passthru32 __user *cmd; | 525 | struct i2o_cmd_passthru32 __user *cmd; |
547 | struct i2o_controller *c; | 526 | struct i2o_controller *c; |
@@ -555,6 +534,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, unsigned long ar | |||
555 | u32 sg_offset = 0; | 534 | u32 sg_offset = 0; |
556 | u32 sg_count = 0; | 535 | u32 sg_count = 0; |
557 | u32 i = 0; | 536 | u32 i = 0; |
537 | u32 sg_index = 0; | ||
558 | i2o_status_block *sb; | 538 | i2o_status_block *sb; |
559 | struct i2o_message *msg; | 539 | struct i2o_message *msg; |
560 | u32 m; | 540 | u32 m; |
@@ -634,8 +614,8 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, unsigned long ar | |||
634 | if (sg_count > SG_TABLESIZE) { | 614 | if (sg_count > SG_TABLESIZE) { |
635 | printk(KERN_DEBUG "%s:IOCTL SG List too large (%u)\n", | 615 | printk(KERN_DEBUG "%s:IOCTL SG List too large (%u)\n", |
636 | c->name, sg_count); | 616 | c->name, sg_count); |
637 | kfree(reply); | 617 | rcode = -EINVAL; |
638 | return -EINVAL; | 618 | goto cleanup; |
639 | } | 619 | } |
640 | 620 | ||
641 | for (i = 0; i < sg_count; i++) { | 621 | for (i = 0; i < sg_count; i++) { |
@@ -651,7 +631,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, unsigned long ar | |||
651 | goto cleanup; | 631 | goto cleanup; |
652 | } | 632 | } |
653 | sg_size = sg[i].flag_count & 0xffffff; | 633 | sg_size = sg[i].flag_count & 0xffffff; |
654 | p = &(sg_list[i]); | 634 | p = &(sg_list[sg_index++]); |
655 | /* Allocate memory for the transfer */ | 635 | /* Allocate memory for the transfer */ |
656 | if (i2o_dma_alloc | 636 | if (i2o_dma_alloc |
657 | (&c->pdev->dev, p, sg_size, | 637 | (&c->pdev->dev, p, sg_size, |
@@ -660,20 +640,21 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, unsigned long ar | |||
660 | "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", | 640 | "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", |
661 | c->name, sg_size, i, sg_count); | 641 | c->name, sg_size, i, sg_count); |
662 | rcode = -ENOMEM; | 642 | rcode = -ENOMEM; |
663 | goto cleanup; | 643 | goto sg_list_cleanup; |
664 | } | 644 | } |
665 | /* Copy in the user's SG buffer if necessary */ | 645 | /* Copy in the user's SG buffer if necessary */ |
666 | if (sg[i]. | 646 | if (sg[i]. |
667 | flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) { | 647 | flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) { |
668 | // TODO 64bit fix | 648 | // TODO 64bit fix |
669 | if (copy_from_user | 649 | if (copy_from_user |
670 | (p->virt, (void __user *)(unsigned long)sg[i].addr_bus, | 650 | (p->virt, |
671 | sg_size)) { | 651 | (void __user *)(unsigned long)sg[i]. |
652 | addr_bus, sg_size)) { | ||
672 | printk(KERN_DEBUG | 653 | printk(KERN_DEBUG |
673 | "%s: Could not copy SG buf %d FROM user\n", | 654 | "%s: Could not copy SG buf %d FROM user\n", |
674 | c->name, i); | 655 | c->name, i); |
675 | rcode = -EFAULT; | 656 | rcode = -EFAULT; |
676 | goto cleanup; | 657 | goto sg_list_cleanup; |
677 | } | 658 | } |
678 | } | 659 | } |
679 | //TODO 64bit fix | 660 | //TODO 64bit fix |
@@ -683,10 +664,10 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, unsigned long ar | |||
683 | 664 | ||
684 | rcode = i2o_msg_post_wait(c, m, 60); | 665 | rcode = i2o_msg_post_wait(c, m, 60); |
685 | if (rcode) | 666 | if (rcode) |
686 | goto cleanup; | 667 | goto sg_list_cleanup; |
687 | 668 | ||
688 | if (sg_offset) { | 669 | if (sg_offset) { |
689 | u32 msg[128]; | 670 | u32 msg[I2O_OUTBOUND_MSG_FRAME_SIZE]; |
690 | /* Copy back the Scatter Gather buffers back to user space */ | 671 | /* Copy back the Scatter Gather buffers back to user space */ |
691 | u32 j; | 672 | u32 j; |
692 | // TODO 64bit fix | 673 | // TODO 64bit fix |
@@ -694,18 +675,18 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, unsigned long ar | |||
694 | int sg_size; | 675 | int sg_size; |
695 | 676 | ||
696 | // re-acquire the original message to handle correctly the sg copy operation | 677 | // re-acquire the original message to handle correctly the sg copy operation |
697 | memset(&msg, 0, MSG_FRAME_SIZE * 4); | 678 | memset(&msg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4); |
698 | // get user msg size in u32s | 679 | // get user msg size in u32s |
699 | if (get_user(size, &user_msg[0])) { | 680 | if (get_user(size, &user_msg[0])) { |
700 | rcode = -EFAULT; | 681 | rcode = -EFAULT; |
701 | goto cleanup; | 682 | goto sg_list_cleanup; |
702 | } | 683 | } |
703 | size = size >> 16; | 684 | size = size >> 16; |
704 | size *= 4; | 685 | size *= 4; |
705 | /* Copy in the user's I2O command */ | 686 | /* Copy in the user's I2O command */ |
706 | if (copy_from_user(msg, user_msg, size)) { | 687 | if (copy_from_user(msg, user_msg, size)) { |
707 | rcode = -EFAULT; | 688 | rcode = -EFAULT; |
708 | goto cleanup; | 689 | goto sg_list_cleanup; |
709 | } | 690 | } |
710 | sg_count = | 691 | sg_count = |
711 | (size - sg_offset * 4) / sizeof(struct sg_simple_element); | 692 | (size - sg_offset * 4) / sizeof(struct sg_simple_element); |
@@ -727,7 +708,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, unsigned long ar | |||
727 | c->name, sg_list[j].virt, | 708 | c->name, sg_list[j].virt, |
728 | sg[j].addr_bus); | 709 | sg[j].addr_bus); |
729 | rcode = -EFAULT; | 710 | rcode = -EFAULT; |
730 | goto cleanup; | 711 | goto sg_list_cleanup; |
731 | } | 712 | } |
732 | } | 713 | } |
733 | } | 714 | } |
@@ -741,6 +722,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, unsigned long ar | |||
741 | "%s: Could not copy message context FROM user\n", | 722 | "%s: Could not copy message context FROM user\n", |
742 | c->name); | 723 | c->name); |
743 | rcode = -EFAULT; | 724 | rcode = -EFAULT; |
725 | goto sg_list_cleanup; | ||
744 | } | 726 | } |
745 | if (copy_to_user(user_reply, reply, reply_size)) { | 727 | if (copy_to_user(user_reply, reply, reply_size)) { |
746 | printk(KERN_WARNING | 728 | printk(KERN_WARNING |
@@ -749,16 +731,21 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, unsigned long ar | |||
749 | } | 731 | } |
750 | } | 732 | } |
751 | 733 | ||
734 | sg_list_cleanup: | ||
735 | for (i = 0; i < sg_index; i++) | ||
736 | i2o_dma_free(&c->pdev->dev, &sg_list[i]); | ||
737 | |||
752 | cleanup: | 738 | cleanup: |
753 | kfree(reply); | 739 | kfree(reply); |
754 | return rcode; | 740 | return rcode; |
755 | } | 741 | } |
756 | 742 | ||
757 | static long i2o_cfg_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg) | 743 | static long i2o_cfg_compat_ioctl(struct file *file, unsigned cmd, |
744 | unsigned long arg) | ||
758 | { | 745 | { |
759 | int ret; | 746 | int ret; |
760 | lock_kernel(); | 747 | lock_kernel(); |
761 | switch (cmd) { | 748 | switch (cmd) { |
762 | case I2OGETIOPS: | 749 | case I2OGETIOPS: |
763 | ret = i2o_cfg_ioctl(NULL, file, cmd, arg); | 750 | ret = i2o_cfg_ioctl(NULL, file, cmd, arg); |
764 | break; | 751 | break; |
@@ -862,8 +849,8 @@ static int i2o_cfg_passthru(unsigned long arg) | |||
862 | if (sg_count > SG_TABLESIZE) { | 849 | if (sg_count > SG_TABLESIZE) { |
863 | printk(KERN_DEBUG "%s:IOCTL SG List too large (%u)\n", | 850 | printk(KERN_DEBUG "%s:IOCTL SG List too large (%u)\n", |
864 | c->name, sg_count); | 851 | c->name, sg_count); |
865 | kfree(reply); | 852 | rcode = -EINVAL; |
866 | return -EINVAL; | 853 | goto cleanup; |
867 | } | 854 | } |
868 | 855 | ||
869 | for (i = 0; i < sg_count; i++) { | 856 | for (i = 0; i < sg_count; i++) { |
@@ -875,7 +862,7 @@ static int i2o_cfg_passthru(unsigned long arg) | |||
875 | "%s:Bad SG element %d - not simple (%x)\n", | 862 | "%s:Bad SG element %d - not simple (%x)\n", |
876 | c->name, i, sg[i].flag_count); | 863 | c->name, i, sg[i].flag_count); |
877 | rcode = -EINVAL; | 864 | rcode = -EINVAL; |
878 | goto cleanup; | 865 | goto sg_list_cleanup; |
879 | } | 866 | } |
880 | sg_size = sg[i].flag_count & 0xffffff; | 867 | sg_size = sg[i].flag_count & 0xffffff; |
881 | /* Allocate memory for the transfer */ | 868 | /* Allocate memory for the transfer */ |
@@ -885,7 +872,7 @@ static int i2o_cfg_passthru(unsigned long arg) | |||
885 | "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", | 872 | "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", |
886 | c->name, sg_size, i, sg_count); | 873 | c->name, sg_size, i, sg_count); |
887 | rcode = -ENOMEM; | 874 | rcode = -ENOMEM; |
888 | goto cleanup; | 875 | goto sg_list_cleanup; |
889 | } | 876 | } |
890 | sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame. | 877 | sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame. |
891 | /* Copy in the user's SG buffer if necessary */ | 878 | /* Copy in the user's SG buffer if necessary */ |
@@ -899,7 +886,7 @@ static int i2o_cfg_passthru(unsigned long arg) | |||
899 | "%s: Could not copy SG buf %d FROM user\n", | 886 | "%s: Could not copy SG buf %d FROM user\n", |
900 | c->name, i); | 887 | c->name, i); |
901 | rcode = -EFAULT; | 888 | rcode = -EFAULT; |
902 | goto cleanup; | 889 | goto sg_list_cleanup; |
903 | } | 890 | } |
904 | } | 891 | } |
905 | //TODO 64bit fix | 892 | //TODO 64bit fix |
@@ -909,7 +896,7 @@ static int i2o_cfg_passthru(unsigned long arg) | |||
909 | 896 | ||
910 | rcode = i2o_msg_post_wait(c, m, 60); | 897 | rcode = i2o_msg_post_wait(c, m, 60); |
911 | if (rcode) | 898 | if (rcode) |
912 | goto cleanup; | 899 | goto sg_list_cleanup; |
913 | 900 | ||
914 | if (sg_offset) { | 901 | if (sg_offset) { |
915 | u32 msg[128]; | 902 | u32 msg[128]; |
@@ -920,18 +907,18 @@ static int i2o_cfg_passthru(unsigned long arg) | |||
920 | int sg_size; | 907 | int sg_size; |
921 | 908 | ||
922 | // re-acquire the original message to handle correctly the sg copy operation | 909 | // re-acquire the original message to handle correctly the sg copy operation |
923 | memset(&msg, 0, MSG_FRAME_SIZE * 4); | 910 | memset(&msg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4); |
924 | // get user msg size in u32s | 911 | // get user msg size in u32s |
925 | if (get_user(size, &user_msg[0])) { | 912 | if (get_user(size, &user_msg[0])) { |
926 | rcode = -EFAULT; | 913 | rcode = -EFAULT; |
927 | goto cleanup; | 914 | goto sg_list_cleanup; |
928 | } | 915 | } |
929 | size = size >> 16; | 916 | size = size >> 16; |
930 | size *= 4; | 917 | size *= 4; |
931 | /* Copy in the user's I2O command */ | 918 | /* Copy in the user's I2O command */ |
932 | if (copy_from_user(msg, user_msg, size)) { | 919 | if (copy_from_user(msg, user_msg, size)) { |
933 | rcode = -EFAULT; | 920 | rcode = -EFAULT; |
934 | goto cleanup; | 921 | goto sg_list_cleanup; |
935 | } | 922 | } |
936 | sg_count = | 923 | sg_count = |
937 | (size - sg_offset * 4) / sizeof(struct sg_simple_element); | 924 | (size - sg_offset * 4) / sizeof(struct sg_simple_element); |
@@ -953,7 +940,7 @@ static int i2o_cfg_passthru(unsigned long arg) | |||
953 | c->name, sg_list[j], | 940 | c->name, sg_list[j], |
954 | sg[j].addr_bus); | 941 | sg[j].addr_bus); |
955 | rcode = -EFAULT; | 942 | rcode = -EFAULT; |
956 | goto cleanup; | 943 | goto sg_list_cleanup; |
957 | } | 944 | } |
958 | } | 945 | } |
959 | } | 946 | } |
@@ -975,10 +962,15 @@ static int i2o_cfg_passthru(unsigned long arg) | |||
975 | } | 962 | } |
976 | } | 963 | } |
977 | 964 | ||
965 | sg_list_cleanup: | ||
966 | for (i = 0; i < sg_index; i++) | ||
967 | kfree(sg_list[i]); | ||
968 | |||
978 | cleanup: | 969 | cleanup: |
979 | kfree(reply); | 970 | kfree(reply); |
980 | return rcode; | 971 | return rcode; |
981 | } | 972 | } |
973 | #endif | ||
982 | 974 | ||
983 | /* | 975 | /* |
984 | * IOCTL Handler | 976 | * IOCTL Handler |
@@ -1033,9 +1025,11 @@ static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, | |||
1033 | ret = i2o_cfg_evt_get(arg, fp); | 1025 | ret = i2o_cfg_evt_get(arg, fp); |
1034 | break; | 1026 | break; |
1035 | 1027 | ||
1028 | #ifdef CONFIG_I2O_EXT_ADAPTEC | ||
1036 | case I2OPASSTHRU: | 1029 | case I2OPASSTHRU: |
1037 | ret = i2o_cfg_passthru(arg); | 1030 | ret = i2o_cfg_passthru(arg); |
1038 | break; | 1031 | break; |
1032 | #endif | ||
1039 | 1033 | ||
1040 | default: | 1034 | default: |
1041 | osm_debug("unknown ioctl called!\n"); | 1035 | osm_debug("unknown ioctl called!\n"); |
@@ -1137,37 +1131,21 @@ static struct miscdevice i2o_miscdev = { | |||
1137 | &config_fops | 1131 | &config_fops |
1138 | }; | 1132 | }; |
1139 | 1133 | ||
1140 | static int __init i2o_config_init(void) | 1134 | static int __init i2o_config_old_init(void) |
1141 | { | 1135 | { |
1142 | printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); | ||
1143 | |||
1144 | spin_lock_init(&i2o_config_lock); | 1136 | spin_lock_init(&i2o_config_lock); |
1145 | 1137 | ||
1146 | if (misc_register(&i2o_miscdev) < 0) { | 1138 | if (misc_register(&i2o_miscdev) < 0) { |
1147 | osm_err("can't register device.\n"); | 1139 | osm_err("can't register device.\n"); |
1148 | return -EBUSY; | 1140 | return -EBUSY; |
1149 | } | 1141 | } |
1150 | /* | 1142 | |
1151 | * Install our handler | ||
1152 | */ | ||
1153 | if (i2o_driver_register(&i2o_config_driver)) { | ||
1154 | osm_err("handler register failed.\n"); | ||
1155 | misc_deregister(&i2o_miscdev); | ||
1156 | return -EBUSY; | ||
1157 | } | ||
1158 | return 0; | 1143 | return 0; |
1159 | } | 1144 | } |
1160 | 1145 | ||
1161 | static void i2o_config_exit(void) | 1146 | static void i2o_config_old_exit(void) |
1162 | { | 1147 | { |
1163 | misc_deregister(&i2o_miscdev); | 1148 | misc_deregister(&i2o_miscdev); |
1164 | i2o_driver_unregister(&i2o_config_driver); | ||
1165 | } | 1149 | } |
1166 | 1150 | ||
1167 | MODULE_AUTHOR("Red Hat Software"); | 1151 | MODULE_AUTHOR("Red Hat Software"); |
1168 | MODULE_LICENSE("GPL"); | ||
1169 | MODULE_DESCRIPTION(OSM_DESCRIPTION); | ||
1170 | MODULE_VERSION(OSM_VERSION); | ||
1171 | |||
1172 | module_init(i2o_config_init); | ||
1173 | module_exit(i2o_config_exit); | ||
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c index b176d0eeff7f..d559a1758363 100644 --- a/drivers/message/i2o/i2o_proc.c +++ b/drivers/message/i2o/i2o_proc.c | |||
@@ -28,7 +28,7 @@ | |||
28 | */ | 28 | */ |
29 | 29 | ||
30 | #define OSM_NAME "proc-osm" | 30 | #define OSM_NAME "proc-osm" |
31 | #define OSM_VERSION "$Rev$" | 31 | #define OSM_VERSION "1.145" |
32 | #define OSM_DESCRIPTION "I2O ProcFS OSM" | 32 | #define OSM_DESCRIPTION "I2O ProcFS OSM" |
33 | 33 | ||
34 | #define I2O_MAX_MODULES 4 | 34 | #define I2O_MAX_MODULES 4 |
@@ -228,7 +228,7 @@ static const char *i2o_get_class_name(int class) | |||
228 | case I2O_CLASS_FLOPPY_DEVICE: | 228 | case I2O_CLASS_FLOPPY_DEVICE: |
229 | idx = 12; | 229 | idx = 12; |
230 | break; | 230 | break; |
231 | case I2O_CLASS_BUS_ADAPTER_PORT: | 231 | case I2O_CLASS_BUS_ADAPTER: |
232 | idx = 13; | 232 | idx = 13; |
233 | break; | 233 | break; |
234 | case I2O_CLASS_PEER_TRANSPORT_AGENT: | 234 | case I2O_CLASS_PEER_TRANSPORT_AGENT: |
@@ -490,7 +490,7 @@ static int i2o_seq_show_lct(struct seq_file *seq, void *v) | |||
490 | seq_printf(seq, ", Unknown Device Type"); | 490 | seq_printf(seq, ", Unknown Device Type"); |
491 | break; | 491 | break; |
492 | 492 | ||
493 | case I2O_CLASS_BUS_ADAPTER_PORT: | 493 | case I2O_CLASS_BUS_ADAPTER: |
494 | if (lct->lct_entry[i].sub_class < BUS_TABLE_SIZE) | 494 | if (lct->lct_entry[i].sub_class < BUS_TABLE_SIZE) |
495 | seq_printf(seq, ", %s", | 495 | seq_printf(seq, ", %s", |
496 | bus_ports[lct->lct_entry[i]. | 496 | bus_ports[lct->lct_entry[i]. |
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c index 43f5875e0be5..9f1744c3933b 100644 --- a/drivers/message/i2o/i2o_scsi.c +++ b/drivers/message/i2o/i2o_scsi.c | |||
@@ -54,6 +54,7 @@ | |||
54 | #include <linux/pci.h> | 54 | #include <linux/pci.h> |
55 | #include <linux/blkdev.h> | 55 | #include <linux/blkdev.h> |
56 | #include <linux/i2o.h> | 56 | #include <linux/i2o.h> |
57 | #include <linux/scatterlist.h> | ||
57 | 58 | ||
58 | #include <asm/dma.h> | 59 | #include <asm/dma.h> |
59 | #include <asm/system.h> | 60 | #include <asm/system.h> |
@@ -64,19 +65,23 @@ | |||
64 | #include <scsi/scsi_host.h> | 65 | #include <scsi/scsi_host.h> |
65 | #include <scsi/scsi_device.h> | 66 | #include <scsi/scsi_device.h> |
66 | #include <scsi/scsi_cmnd.h> | 67 | #include <scsi/scsi_cmnd.h> |
68 | #include <scsi/scsi_request.h> | ||
69 | #include <scsi/sg.h> | ||
70 | #include <scsi/sg_request.h> | ||
67 | 71 | ||
68 | #define OSM_NAME "scsi-osm" | 72 | #define OSM_NAME "scsi-osm" |
69 | #define OSM_VERSION "$Rev$" | 73 | #define OSM_VERSION "1.282" |
70 | #define OSM_DESCRIPTION "I2O SCSI Peripheral OSM" | 74 | #define OSM_DESCRIPTION "I2O SCSI Peripheral OSM" |
71 | 75 | ||
72 | static struct i2o_driver i2o_scsi_driver; | 76 | static struct i2o_driver i2o_scsi_driver; |
73 | 77 | ||
74 | static int i2o_scsi_max_id = 16; | 78 | static unsigned int i2o_scsi_max_id = 16; |
75 | static int i2o_scsi_max_lun = 8; | 79 | static unsigned int i2o_scsi_max_lun = 255; |
76 | 80 | ||
77 | struct i2o_scsi_host { | 81 | struct i2o_scsi_host { |
78 | struct Scsi_Host *scsi_host; /* pointer to the SCSI host */ | 82 | struct Scsi_Host *scsi_host; /* pointer to the SCSI host */ |
79 | struct i2o_controller *iop; /* pointer to the I2O controller */ | 83 | struct i2o_controller *iop; /* pointer to the I2O controller */ |
84 | unsigned int lun; /* lun's used for block devices */ | ||
80 | struct i2o_device *channel[0]; /* channel->i2o_dev mapping table */ | 85 | struct i2o_device *channel[0]; /* channel->i2o_dev mapping table */ |
81 | }; | 86 | }; |
82 | 87 | ||
@@ -99,11 +104,17 @@ static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c) | |||
99 | u8 type; | 104 | u8 type; |
100 | int i; | 105 | int i; |
101 | size_t size; | 106 | size_t size; |
102 | i2o_status_block *sb; | 107 | u16 body_size = 6; |
108 | |||
109 | #ifdef CONFIG_I2O_EXT_ADAPTEC | ||
110 | if (c->adaptec) | ||
111 | body_size = 8; | ||
112 | #endif | ||
103 | 113 | ||
104 | list_for_each_entry(i2o_dev, &c->devices, list) | 114 | list_for_each_entry(i2o_dev, &c->devices, list) |
105 | if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT) { | 115 | if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) { |
106 | if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) || (type == 1)) /* SCSI bus */ | 116 | if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) |
117 | && (type == 0x01)) /* SCSI bus */ | ||
107 | max_channel++; | 118 | max_channel++; |
108 | } | 119 | } |
109 | 120 | ||
@@ -125,20 +136,18 @@ static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c) | |||
125 | scsi_host->max_id = i2o_scsi_max_id; | 136 | scsi_host->max_id = i2o_scsi_max_id; |
126 | scsi_host->max_lun = i2o_scsi_max_lun; | 137 | scsi_host->max_lun = i2o_scsi_max_lun; |
127 | scsi_host->this_id = c->unit; | 138 | scsi_host->this_id = c->unit; |
128 | 139 | scsi_host->sg_tablesize = i2o_sg_tablesize(c, body_size); | |
129 | sb = c->status_block.virt; | ||
130 | |||
131 | scsi_host->sg_tablesize = (sb->inbound_frame_size - | ||
132 | sizeof(struct i2o_message) / 4 - 6) / 2; | ||
133 | 140 | ||
134 | i2o_shost = (struct i2o_scsi_host *)scsi_host->hostdata; | 141 | i2o_shost = (struct i2o_scsi_host *)scsi_host->hostdata; |
135 | i2o_shost->scsi_host = scsi_host; | 142 | i2o_shost->scsi_host = scsi_host; |
136 | i2o_shost->iop = c; | 143 | i2o_shost->iop = c; |
144 | i2o_shost->lun = 1; | ||
137 | 145 | ||
138 | i = 0; | 146 | i = 0; |
139 | list_for_each_entry(i2o_dev, &c->devices, list) | 147 | list_for_each_entry(i2o_dev, &c->devices, list) |
140 | if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT) { | 148 | if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) { |
141 | if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) || (type == 1)) /* only SCSI bus */ | 149 | if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) |
150 | && (type == 0x01)) /* only SCSI bus */ | ||
142 | i2o_shost->channel[i++] = i2o_dev; | 151 | i2o_shost->channel[i++] = i2o_dev; |
143 | 152 | ||
144 | if (i >= max_channel) | 153 | if (i >= max_channel) |
@@ -178,10 +187,13 @@ static int i2o_scsi_remove(struct device *dev) | |||
178 | struct i2o_scsi_host *i2o_shost; | 187 | struct i2o_scsi_host *i2o_shost; |
179 | struct scsi_device *scsi_dev; | 188 | struct scsi_device *scsi_dev; |
180 | 189 | ||
190 | osm_info("device removed (TID: %03x)\n", i2o_dev->lct_data.tid); | ||
191 | |||
181 | i2o_shost = i2o_scsi_get_host(c); | 192 | i2o_shost = i2o_scsi_get_host(c); |
182 | 193 | ||
183 | shost_for_each_device(scsi_dev, i2o_shost->scsi_host) | 194 | shost_for_each_device(scsi_dev, i2o_shost->scsi_host) |
184 | if (scsi_dev->hostdata == i2o_dev) { | 195 | if (scsi_dev->hostdata == i2o_dev) { |
196 | sysfs_remove_link(&i2o_dev->device.kobj, "scsi"); | ||
185 | scsi_remove_device(scsi_dev); | 197 | scsi_remove_device(scsi_dev); |
186 | scsi_device_put(scsi_dev); | 198 | scsi_device_put(scsi_dev); |
187 | break; | 199 | break; |
@@ -207,8 +219,8 @@ static int i2o_scsi_probe(struct device *dev) | |||
207 | struct Scsi_Host *scsi_host; | 219 | struct Scsi_Host *scsi_host; |
208 | struct i2o_device *parent; | 220 | struct i2o_device *parent; |
209 | struct scsi_device *scsi_dev; | 221 | struct scsi_device *scsi_dev; |
210 | u32 id; | 222 | u32 id = -1; |
211 | u64 lun; | 223 | u64 lun = -1; |
212 | int channel = -1; | 224 | int channel = -1; |
213 | int i; | 225 | int i; |
214 | 226 | ||
@@ -218,8 +230,56 @@ static int i2o_scsi_probe(struct device *dev) | |||
218 | 230 | ||
219 | scsi_host = i2o_shost->scsi_host; | 231 | scsi_host = i2o_shost->scsi_host; |
220 | 232 | ||
221 | if (i2o_parm_field_get(i2o_dev, 0, 3, &id, 4) < 0) | 233 | switch (i2o_dev->lct_data.class_id) { |
234 | case I2O_CLASS_RANDOM_BLOCK_STORAGE: | ||
235 | case I2O_CLASS_EXECUTIVE: | ||
236 | #ifdef CONFIG_I2O_EXT_ADAPTEC | ||
237 | if (c->adaptec) { | ||
238 | u8 type; | ||
239 | struct i2o_device *d = i2o_shost->channel[0]; | ||
240 | |||
241 | if (i2o_parm_field_get(d, 0x0000, 0, &type, 1) | ||
242 | && (type == 0x01)) /* SCSI bus */ | ||
243 | if (i2o_parm_field_get(d, 0x0200, 4, &id, 4)) { | ||
244 | channel = 0; | ||
245 | if (i2o_dev->lct_data.class_id == | ||
246 | I2O_CLASS_RANDOM_BLOCK_STORAGE) | ||
247 | lun = i2o_shost->lun++; | ||
248 | else | ||
249 | lun = 0; | ||
250 | } | ||
251 | } | ||
252 | #endif | ||
253 | break; | ||
254 | |||
255 | case I2O_CLASS_SCSI_PERIPHERAL: | ||
256 | if (i2o_parm_field_get(i2o_dev, 0x0000, 3, &id, 4) < 0) | ||
257 | return -EFAULT; | ||
258 | |||
259 | if (i2o_parm_field_get(i2o_dev, 0x0000, 4, &lun, 8) < 0) | ||
260 | return -EFAULT; | ||
261 | |||
262 | parent = i2o_iop_find_device(c, i2o_dev->lct_data.parent_tid); | ||
263 | if (!parent) { | ||
264 | osm_warn("can not find parent of device %03x\n", | ||
265 | i2o_dev->lct_data.tid); | ||
266 | return -EFAULT; | ||
267 | } | ||
268 | |||
269 | for (i = 0; i <= i2o_shost->scsi_host->max_channel; i++) | ||
270 | if (i2o_shost->channel[i] == parent) | ||
271 | channel = i; | ||
272 | break; | ||
273 | |||
274 | default: | ||
222 | return -EFAULT; | 275 | return -EFAULT; |
276 | } | ||
277 | |||
278 | if (channel == -1) { | ||
279 | osm_warn("can not find channel of device %03x\n", | ||
280 | i2o_dev->lct_data.tid); | ||
281 | return -EFAULT; | ||
282 | } | ||
223 | 283 | ||
224 | if (id >= scsi_host->max_id) { | 284 | if (id >= scsi_host->max_id) { |
225 | osm_warn("SCSI device id (%d) >= max_id of I2O host (%d)", id, | 285 | osm_warn("SCSI device id (%d) >= max_id of I2O host (%d)", id, |
@@ -227,42 +287,26 @@ static int i2o_scsi_probe(struct device *dev) | |||
227 | return -EFAULT; | 287 | return -EFAULT; |
228 | } | 288 | } |
229 | 289 | ||
230 | if (i2o_parm_field_get(i2o_dev, 0, 4, &lun, 8) < 0) | ||
231 | return -EFAULT; | ||
232 | if (lun >= scsi_host->max_lun) { | 290 | if (lun >= scsi_host->max_lun) { |
233 | osm_warn("SCSI device id (%d) >= max_lun of I2O host (%d)", | 291 | osm_warn("SCSI device id (%d) >= max_lun of I2O host (%d)", |
234 | (unsigned int)lun, scsi_host->max_lun); | 292 | (unsigned int)lun, scsi_host->max_lun); |
235 | return -EFAULT; | 293 | return -EFAULT; |
236 | } | 294 | } |
237 | 295 | ||
238 | parent = i2o_iop_find_device(c, i2o_dev->lct_data.parent_tid); | ||
239 | if (!parent) { | ||
240 | osm_warn("can not find parent of device %03x\n", | ||
241 | i2o_dev->lct_data.tid); | ||
242 | return -EFAULT; | ||
243 | } | ||
244 | |||
245 | for (i = 0; i <= i2o_shost->scsi_host->max_channel; i++) | ||
246 | if (i2o_shost->channel[i] == parent) | ||
247 | channel = i; | ||
248 | |||
249 | if (channel == -1) { | ||
250 | osm_warn("can not find channel of device %03x\n", | ||
251 | i2o_dev->lct_data.tid); | ||
252 | return -EFAULT; | ||
253 | } | ||
254 | |||
255 | scsi_dev = | 296 | scsi_dev = |
256 | __scsi_add_device(i2o_shost->scsi_host, channel, id, lun, i2o_dev); | 297 | __scsi_add_device(i2o_shost->scsi_host, channel, id, lun, i2o_dev); |
257 | 298 | ||
258 | if (!scsi_dev) { | 299 | if (IS_ERR(scsi_dev)) { |
259 | osm_warn("can not add SCSI device %03x\n", | 300 | osm_warn("can not add SCSI device %03x\n", |
260 | i2o_dev->lct_data.tid); | 301 | i2o_dev->lct_data.tid); |
261 | return -EFAULT; | 302 | return PTR_ERR(scsi_dev); |
262 | } | 303 | } |
263 | 304 | ||
264 | osm_debug("added new SCSI device %03x (cannel: %d, id: %d, lun: %d)\n", | 305 | sysfs_create_link(&i2o_dev->device.kobj, &scsi_dev->sdev_gendev.kobj, |
265 | i2o_dev->lct_data.tid, channel, id, (unsigned int)lun); | 306 | "scsi"); |
307 | |||
308 | osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %d\n", | ||
309 | i2o_dev->lct_data.tid, channel, id, (unsigned int)lun); | ||
266 | 310 | ||
267 | return 0; | 311 | return 0; |
268 | }; | 312 | }; |
@@ -293,162 +337,89 @@ static int i2o_scsi_reply(struct i2o_controller *c, u32 m, | |||
293 | struct i2o_message *msg) | 337 | struct i2o_message *msg) |
294 | { | 338 | { |
295 | struct scsi_cmnd *cmd; | 339 | struct scsi_cmnd *cmd; |
340 | u32 error; | ||
296 | struct device *dev; | 341 | struct device *dev; |
297 | u8 as, ds, st; | ||
298 | 342 | ||
299 | cmd = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt)); | 343 | cmd = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt)); |
300 | 344 | if (unlikely(!cmd)) { | |
301 | if (msg->u.head[0] & (1 << 13)) { | 345 | osm_err("NULL reply received!\n"); |
302 | struct i2o_message __iomem *pmsg; /* preserved message */ | 346 | return -1; |
303 | u32 pm; | ||
304 | int err = DID_ERROR; | ||
305 | |||
306 | pm = le32_to_cpu(msg->body[3]); | ||
307 | |||
308 | pmsg = i2o_msg_in_to_virt(c, pm); | ||
309 | |||
310 | osm_err("IOP fail.\n"); | ||
311 | osm_err("From %d To %d Cmd %d.\n", | ||
312 | (msg->u.head[1] >> 12) & 0xFFF, | ||
313 | msg->u.head[1] & 0xFFF, msg->u.head[1] >> 24); | ||
314 | osm_err("Failure Code %d.\n", msg->body[0] >> 24); | ||
315 | if (msg->body[0] & (1 << 16)) | ||
316 | osm_err("Format error.\n"); | ||
317 | if (msg->body[0] & (1 << 17)) | ||
318 | osm_err("Path error.\n"); | ||
319 | if (msg->body[0] & (1 << 18)) | ||
320 | osm_err("Path State.\n"); | ||
321 | if (msg->body[0] & (1 << 18)) | ||
322 | { | ||
323 | osm_err("Congestion.\n"); | ||
324 | err = DID_BUS_BUSY; | ||
325 | } | ||
326 | |||
327 | osm_debug("Failing message is %p.\n", pmsg); | ||
328 | |||
329 | cmd = i2o_cntxt_list_get(c, readl(&pmsg->u.s.tcntxt)); | ||
330 | if (!cmd) | ||
331 | return 1; | ||
332 | |||
333 | cmd->result = err << 16; | ||
334 | cmd->scsi_done(cmd); | ||
335 | |||
336 | /* Now flush the message by making it a NOP */ | ||
337 | i2o_msg_nop(c, pm); | ||
338 | |||
339 | return 1; | ||
340 | } | 347 | } |
341 | 348 | ||
342 | /* | 349 | /* |
343 | * Low byte is device status, next is adapter status, | 350 | * Low byte is device status, next is adapter status, |
344 | * (then one byte reserved), then request status. | 351 | * (then one byte reserved), then request status. |
345 | */ | 352 | */ |
346 | ds = (u8) le32_to_cpu(msg->body[0]); | 353 | error = le32_to_cpu(msg->body[0]); |
347 | as = (u8) (le32_to_cpu(msg->body[0]) >> 8); | 354 | |
348 | st = (u8) (le32_to_cpu(msg->body[0]) >> 24); | 355 | osm_debug("Completed %ld\n", cmd->serial_number); |
349 | 356 | ||
357 | cmd->result = error & 0xff; | ||
350 | /* | 358 | /* |
351 | * Is this a control request coming back - eg an abort ? | 359 | * if DeviceStatus is not SCSI_SUCCESS copy over the sense data and let |
360 | * the SCSI layer handle the error | ||
352 | */ | 361 | */ |
362 | if (cmd->result) | ||
363 | memcpy(cmd->sense_buffer, &msg->body[3], | ||
364 | min(sizeof(cmd->sense_buffer), (size_t) 40)); | ||
353 | 365 | ||
354 | if (!cmd) { | 366 | /* only output error code if AdapterStatus is not HBA_SUCCESS */ |
355 | if (st) | 367 | if ((error >> 8) & 0xff) |
356 | osm_warn("SCSI abort: %08X", le32_to_cpu(msg->body[0])); | 368 | osm_err("SCSI error %08x\n", error); |
357 | osm_info("SCSI abort completed.\n"); | ||
358 | return -EFAULT; | ||
359 | } | ||
360 | 369 | ||
361 | osm_debug("Completed %ld\n", cmd->serial_number); | 370 | dev = &c->pdev->dev; |
371 | if (cmd->use_sg) | ||
372 | dma_unmap_sg(dev, cmd->request_buffer, cmd->use_sg, | ||
373 | cmd->sc_data_direction); | ||
374 | else if (cmd->SCp.dma_handle) | ||
375 | dma_unmap_single(dev, cmd->SCp.dma_handle, cmd->request_bufflen, | ||
376 | cmd->sc_data_direction); | ||
362 | 377 | ||
363 | if (st) { | 378 | cmd->scsi_done(cmd); |
364 | u32 count, error; | ||
365 | /* An error has occurred */ | ||
366 | |||
367 | switch (st) { | ||
368 | case 0x06: | ||
369 | count = le32_to_cpu(msg->body[1]); | ||
370 | if (count < cmd->underflow) { | ||
371 | int i; | ||
372 | |||
373 | osm_err("SCSI underflow 0x%08X 0x%08X\n", count, | ||
374 | cmd->underflow); | ||
375 | osm_debug("Cmd: "); | ||
376 | for (i = 0; i < 15; i++) | ||
377 | pr_debug("%02X ", cmd->cmnd[i]); | ||
378 | pr_debug(".\n"); | ||
379 | cmd->result = (DID_ERROR << 16); | ||
380 | } | ||
381 | break; | ||
382 | 379 | ||
383 | default: | 380 | return 1; |
384 | error = le32_to_cpu(msg->body[0]); | 381 | }; |
385 | |||
386 | osm_err("SCSI error %08x\n", error); | ||
387 | |||
388 | if ((error & 0xff) == 0x02 /*CHECK_CONDITION */ ) { | ||
389 | int i; | ||
390 | u32 len = sizeof(cmd->sense_buffer); | ||
391 | len = (len > 40) ? 40 : len; | ||
392 | // Copy over the sense data | ||
393 | memcpy(cmd->sense_buffer, (void *)&msg->body[3], | ||
394 | len); | ||
395 | for (i = 0; i <= len; i++) | ||
396 | osm_info("%02x\n", | ||
397 | cmd->sense_buffer[i]); | ||
398 | if (cmd->sense_buffer[0] == 0x70 | ||
399 | && cmd->sense_buffer[2] == DATA_PROTECT) { | ||
400 | /* This is to handle an array failed */ | ||
401 | cmd->result = (DID_TIME_OUT << 16); | ||
402 | printk(KERN_WARNING "%s: SCSI Data " | ||
403 | "Protect-Device (%d,%d,%d) " | ||
404 | "hba_status=0x%x, dev_status=" | ||
405 | "0x%x, cmd=0x%x\n", c->name, | ||
406 | (u32) cmd->device->channel, | ||
407 | (u32) cmd->device->id, | ||
408 | (u32) cmd->device->lun, | ||
409 | (error >> 8) & 0xff, | ||
410 | error & 0xff, cmd->cmnd[0]); | ||
411 | } else | ||
412 | cmd->result = (DID_ERROR << 16); | ||
413 | |||
414 | break; | ||
415 | } | ||
416 | |||
417 | switch (as) { | ||
418 | case 0x0E: | ||
419 | /* SCSI Reset */ | ||
420 | cmd->result = DID_RESET << 16; | ||
421 | break; | ||
422 | |||
423 | case 0x0F: | ||
424 | cmd->result = DID_PARITY << 16; | ||
425 | break; | ||
426 | |||
427 | default: | ||
428 | cmd->result = DID_ERROR << 16; | ||
429 | break; | ||
430 | } | ||
431 | 382 | ||
432 | break; | 383 | /** |
433 | } | 384 | * i2o_scsi_notify_device_add - Retrieve notifications of added devices |
385 | * @i2o_dev: the I2O device which was added | ||
386 | * | ||
387 | * If a I2O device is added we catch the notification, because I2O classes | ||
388 | * other then SCSI peripheral will not be received through | ||
389 | * i2o_scsi_probe(). | ||
390 | */ | ||
391 | static void i2o_scsi_notify_device_add(struct i2o_device *i2o_dev) | ||
392 | { | ||
393 | switch (i2o_dev->lct_data.class_id) { | ||
394 | case I2O_CLASS_EXECUTIVE: | ||
395 | case I2O_CLASS_RANDOM_BLOCK_STORAGE: | ||
396 | i2o_scsi_probe(&i2o_dev->device); | ||
397 | break; | ||
434 | 398 | ||
435 | cmd->scsi_done(cmd); | 399 | default: |
436 | return 1; | 400 | break; |
437 | } | 401 | } |
402 | }; | ||
438 | 403 | ||
439 | cmd->result = DID_OK << 16 | ds; | 404 | /** |
440 | 405 | * i2o_scsi_notify_device_remove - Retrieve notifications of removed | |
441 | cmd->scsi_done(cmd); | 406 | * devices |
442 | 407 | * @i2o_dev: the I2O device which was removed | |
443 | dev = &c->pdev->dev; | 408 | * |
444 | if (cmd->use_sg) | 409 | * If a I2O device is removed, we catch the notification to remove the |
445 | dma_unmap_sg(dev, (struct scatterlist *)cmd->buffer, | 410 | * corresponding SCSI device. |
446 | cmd->use_sg, cmd->sc_data_direction); | 411 | */ |
447 | else if (cmd->request_bufflen) | 412 | static void i2o_scsi_notify_device_remove(struct i2o_device *i2o_dev) |
448 | dma_unmap_single(dev, (dma_addr_t) ((long)cmd->SCp.ptr), | 413 | { |
449 | cmd->request_bufflen, cmd->sc_data_direction); | 414 | switch (i2o_dev->lct_data.class_id) { |
415 | case I2O_CLASS_EXECUTIVE: | ||
416 | case I2O_CLASS_RANDOM_BLOCK_STORAGE: | ||
417 | i2o_scsi_remove(&i2o_dev->device); | ||
418 | break; | ||
450 | 419 | ||
451 | return 1; | 420 | default: |
421 | break; | ||
422 | } | ||
452 | }; | 423 | }; |
453 | 424 | ||
454 | /** | 425 | /** |
@@ -501,7 +472,7 @@ static void i2o_scsi_notify_controller_remove(struct i2o_controller *c) | |||
501 | 472 | ||
502 | scsi_remove_host(i2o_shost->scsi_host); | 473 | scsi_remove_host(i2o_shost->scsi_host); |
503 | scsi_host_put(i2o_shost->scsi_host); | 474 | scsi_host_put(i2o_shost->scsi_host); |
504 | pr_info("I2O SCSI host removed\n"); | 475 | osm_debug("I2O SCSI host removed\n"); |
505 | }; | 476 | }; |
506 | 477 | ||
507 | /* SCSI OSM driver struct */ | 478 | /* SCSI OSM driver struct */ |
@@ -509,6 +480,8 @@ static struct i2o_driver i2o_scsi_driver = { | |||
509 | .name = OSM_NAME, | 480 | .name = OSM_NAME, |
510 | .reply = i2o_scsi_reply, | 481 | .reply = i2o_scsi_reply, |
511 | .classes = i2o_scsi_class_id, | 482 | .classes = i2o_scsi_class_id, |
483 | .notify_device_add = i2o_scsi_notify_device_add, | ||
484 | .notify_device_remove = i2o_scsi_notify_device_remove, | ||
512 | .notify_controller_add = i2o_scsi_notify_controller_add, | 485 | .notify_controller_add = i2o_scsi_notify_controller_add, |
513 | .notify_controller_remove = i2o_scsi_notify_controller_remove, | 486 | .notify_controller_remove = i2o_scsi_notify_controller_remove, |
514 | .driver = { | 487 | .driver = { |
@@ -535,26 +508,26 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, | |||
535 | void (*done) (struct scsi_cmnd *)) | 508 | void (*done) (struct scsi_cmnd *)) |
536 | { | 509 | { |
537 | struct i2o_controller *c; | 510 | struct i2o_controller *c; |
538 | struct Scsi_Host *host; | ||
539 | struct i2o_device *i2o_dev; | 511 | struct i2o_device *i2o_dev; |
540 | struct device *dev; | ||
541 | int tid; | 512 | int tid; |
542 | struct i2o_message __iomem *msg; | 513 | struct i2o_message __iomem *msg; |
543 | u32 m; | 514 | u32 m; |
544 | u32 scsi_flags, sg_flags; | 515 | /* |
516 | * ENABLE_DISCONNECT | ||
517 | * SIMPLE_TAG | ||
518 | * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME | ||
519 | */ | ||
520 | u32 scsi_flags = 0x20a00000; | ||
521 | u32 sgl_offset; | ||
545 | u32 __iomem *mptr; | 522 | u32 __iomem *mptr; |
546 | u32 __iomem *lenptr; | 523 | u32 cmd = I2O_CMD_SCSI_EXEC << 24; |
547 | u32 len, reqlen; | 524 | int rc = 0; |
548 | int i; | ||
549 | 525 | ||
550 | /* | 526 | /* |
551 | * Do the incoming paperwork | 527 | * Do the incoming paperwork |
552 | */ | 528 | */ |
553 | |||
554 | i2o_dev = SCpnt->device->hostdata; | 529 | i2o_dev = SCpnt->device->hostdata; |
555 | host = SCpnt->device->host; | ||
556 | c = i2o_dev->iop; | 530 | c = i2o_dev->iop; |
557 | dev = &c->pdev->dev; | ||
558 | 531 | ||
559 | SCpnt->scsi_done = done; | 532 | SCpnt->scsi_done = done; |
560 | 533 | ||
@@ -562,7 +535,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, | |||
562 | osm_warn("no I2O device in request\n"); | 535 | osm_warn("no I2O device in request\n"); |
563 | SCpnt->result = DID_NO_CONNECT << 16; | 536 | SCpnt->result = DID_NO_CONNECT << 16; |
564 | done(SCpnt); | 537 | done(SCpnt); |
565 | return 0; | 538 | goto exit; |
566 | } | 539 | } |
567 | 540 | ||
568 | tid = i2o_dev->lct_data.tid; | 541 | tid = i2o_dev->lct_data.tid; |
@@ -571,44 +544,85 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, | |||
571 | osm_debug("Real scsi messages.\n"); | 544 | osm_debug("Real scsi messages.\n"); |
572 | 545 | ||
573 | /* | 546 | /* |
574 | * Obtain an I2O message. If there are none free then | ||
575 | * throw it back to the scsi layer | ||
576 | */ | ||
577 | |||
578 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
579 | if (m == I2O_QUEUE_EMPTY) | ||
580 | return SCSI_MLQUEUE_HOST_BUSY; | ||
581 | |||
582 | /* | ||
583 | * Put together a scsi execscb message | 547 | * Put together a scsi execscb message |
584 | */ | 548 | */ |
585 | |||
586 | len = SCpnt->request_bufflen; | ||
587 | |||
588 | switch (SCpnt->sc_data_direction) { | 549 | switch (SCpnt->sc_data_direction) { |
589 | case PCI_DMA_NONE: | 550 | case PCI_DMA_NONE: |
590 | scsi_flags = 0x00000000; // DATA NO XFER | 551 | /* DATA NO XFER */ |
591 | sg_flags = 0x00000000; | 552 | sgl_offset = SGL_OFFSET_0; |
592 | break; | 553 | break; |
593 | 554 | ||
594 | case PCI_DMA_TODEVICE: | 555 | case PCI_DMA_TODEVICE: |
595 | scsi_flags = 0x80000000; // DATA OUT (iop-->dev) | 556 | /* DATA OUT (iop-->dev) */ |
596 | sg_flags = 0x14000000; | 557 | scsi_flags |= 0x80000000; |
558 | sgl_offset = SGL_OFFSET_10; | ||
597 | break; | 559 | break; |
598 | 560 | ||
599 | case PCI_DMA_FROMDEVICE: | 561 | case PCI_DMA_FROMDEVICE: |
600 | scsi_flags = 0x40000000; // DATA IN (iop<--dev) | 562 | /* DATA IN (iop<--dev) */ |
601 | sg_flags = 0x10000000; | 563 | scsi_flags |= 0x40000000; |
564 | sgl_offset = SGL_OFFSET_10; | ||
602 | break; | 565 | break; |
603 | 566 | ||
604 | default: | 567 | default: |
605 | /* Unknown - kill the command */ | 568 | /* Unknown - kill the command */ |
606 | SCpnt->result = DID_NO_CONNECT << 16; | 569 | SCpnt->result = DID_NO_CONNECT << 16; |
607 | done(SCpnt); | 570 | done(SCpnt); |
608 | return 0; | 571 | goto exit; |
609 | } | 572 | } |
610 | 573 | ||
611 | writel(I2O_CMD_SCSI_EXEC << 24 | HOST_TID << 12 | tid, &msg->u.head[1]); | 574 | /* |
575 | * Obtain an I2O message. If there are none free then | ||
576 | * throw it back to the scsi layer | ||
577 | */ | ||
578 | |||
579 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
580 | if (m == I2O_QUEUE_EMPTY) { | ||
581 | rc = SCSI_MLQUEUE_HOST_BUSY; | ||
582 | goto exit; | ||
583 | } | ||
584 | |||
585 | mptr = &msg->body[0]; | ||
586 | |||
587 | #ifdef CONFIG_I2O_EXT_ADAPTEC | ||
588 | if (c->adaptec) { | ||
589 | u32 adpt_flags = 0; | ||
590 | |||
591 | if (SCpnt->sc_request && SCpnt->sc_request->upper_private_data) { | ||
592 | i2o_sg_io_hdr_t __user *usr_ptr = | ||
593 | ((Sg_request *) (SCpnt->sc_request-> | ||
594 | upper_private_data))->header. | ||
595 | usr_ptr; | ||
596 | |||
597 | if (usr_ptr) | ||
598 | get_user(adpt_flags, &usr_ptr->flags); | ||
599 | } | ||
600 | |||
601 | switch (i2o_dev->lct_data.class_id) { | ||
602 | case I2O_CLASS_EXECUTIVE: | ||
603 | case I2O_CLASS_RANDOM_BLOCK_STORAGE: | ||
604 | /* interpret flag has to be set for executive */ | ||
605 | adpt_flags ^= I2O_DPT_SG_FLAG_INTERPRET; | ||
606 | break; | ||
607 | |||
608 | default: | ||
609 | break; | ||
610 | } | ||
611 | |||
612 | /* | ||
613 | * for Adaptec controllers we use the PRIVATE command, because | ||
614 | * the normal SCSI EXEC doesn't support all SCSI commands on | ||
615 | * all controllers (for example READ CAPACITY). | ||
616 | */ | ||
617 | if (sgl_offset == SGL_OFFSET_10) | ||
618 | sgl_offset = SGL_OFFSET_12; | ||
619 | cmd = I2O_CMD_PRIVATE << 24; | ||
620 | writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++); | ||
621 | writel(adpt_flags | tid, mptr++); | ||
622 | } | ||
623 | #endif | ||
624 | |||
625 | writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]); | ||
612 | writel(i2o_scsi_driver.context, &msg->u.s.icntxt); | 626 | writel(i2o_scsi_driver.context, &msg->u.s.icntxt); |
613 | 627 | ||
614 | /* We want the SCSI control block back */ | 628 | /* We want the SCSI control block back */ |
@@ -626,7 +640,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, | |||
626 | */ | 640 | */ |
627 | 641 | ||
628 | /* Attach tags to the devices */ | 642 | /* Attach tags to the devices */ |
629 | /* | 643 | /* FIXME: implement |
630 | if(SCpnt->device->tagged_supported) { | 644 | if(SCpnt->device->tagged_supported) { |
631 | if(SCpnt->tag == HEAD_OF_QUEUE_TAG) | 645 | if(SCpnt->tag == HEAD_OF_QUEUE_TAG) |
632 | scsi_flags |= 0x01000000; | 646 | scsi_flags |= 0x01000000; |
@@ -635,67 +649,35 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, | |||
635 | } | 649 | } |
636 | */ | 650 | */ |
637 | 651 | ||
638 | /* Direction, disconnect ok, tag, CDBLen */ | 652 | writel(scsi_flags | SCpnt->cmd_len, mptr++); |
639 | writel(scsi_flags | 0x20200000 | SCpnt->cmd_len, &msg->body[0]); | ||
640 | |||
641 | mptr = &msg->body[1]; | ||
642 | 653 | ||
643 | /* Write SCSI command into the message - always 16 byte block */ | 654 | /* Write SCSI command into the message - always 16 byte block */ |
644 | memcpy_toio(mptr, SCpnt->cmnd, 16); | 655 | memcpy_toio(mptr, SCpnt->cmnd, 16); |
645 | mptr += 4; | 656 | mptr += 4; |
646 | lenptr = mptr++; /* Remember me - fill in when we know */ | ||
647 | |||
648 | reqlen = 12; // SINGLE SGE | ||
649 | |||
650 | /* Now fill in the SGList and command */ | ||
651 | if (SCpnt->use_sg) { | ||
652 | struct scatterlist *sg; | ||
653 | int sg_count; | ||
654 | |||
655 | sg = SCpnt->request_buffer; | ||
656 | len = 0; | ||
657 | |||
658 | sg_count = dma_map_sg(dev, sg, SCpnt->use_sg, | ||
659 | SCpnt->sc_data_direction); | ||
660 | 657 | ||
661 | if (unlikely(sg_count <= 0)) | 658 | if (sgl_offset != SGL_OFFSET_0) { |
662 | return -ENOMEM; | 659 | /* write size of data addressed by SGL */ |
663 | 660 | writel(SCpnt->request_bufflen, mptr++); | |
664 | for (i = SCpnt->use_sg; i > 0; i--) { | 661 | |
665 | if (i == 1) | 662 | /* Now fill in the SGList and command */ |
666 | sg_flags |= 0xC0000000; | 663 | if (SCpnt->use_sg) { |
667 | writel(sg_flags | sg_dma_len(sg), mptr++); | 664 | if (!i2o_dma_map_sg(c, SCpnt->request_buffer, |
668 | writel(sg_dma_address(sg), mptr++); | 665 | SCpnt->use_sg, |
669 | len += sg_dma_len(sg); | 666 | SCpnt->sc_data_direction, &mptr)) |
670 | sg++; | 667 | goto nomem; |
668 | } else { | ||
669 | SCpnt->SCp.dma_handle = | ||
670 | i2o_dma_map_single(c, SCpnt->request_buffer, | ||
671 | SCpnt->request_bufflen, | ||
672 | SCpnt->sc_data_direction, &mptr); | ||
673 | if (dma_mapping_error(SCpnt->SCp.dma_handle)) | ||
674 | goto nomem; | ||
671 | } | 675 | } |
672 | |||
673 | reqlen = mptr - &msg->u.head[0]; | ||
674 | writel(len, lenptr); | ||
675 | } else { | ||
676 | len = SCpnt->request_bufflen; | ||
677 | |||
678 | writel(len, lenptr); | ||
679 | |||
680 | if (len > 0) { | ||
681 | dma_addr_t dma_addr; | ||
682 | |||
683 | dma_addr = dma_map_single(dev, SCpnt->request_buffer, | ||
684 | SCpnt->request_bufflen, | ||
685 | SCpnt->sc_data_direction); | ||
686 | if (!dma_addr) | ||
687 | return -ENOMEM; | ||
688 | |||
689 | SCpnt->SCp.ptr = (void *)(unsigned long)dma_addr; | ||
690 | sg_flags |= 0xC0000000; | ||
691 | writel(sg_flags | SCpnt->request_bufflen, mptr++); | ||
692 | writel(dma_addr, mptr++); | ||
693 | } else | ||
694 | reqlen = 9; | ||
695 | } | 676 | } |
696 | 677 | ||
697 | /* Stick the headers on */ | 678 | /* Stick the headers on */ |
698 | writel(reqlen << 16 | SGL_OFFSET_10, &msg->u.head[0]); | 679 | writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset, |
680 | &msg->u.head[0]); | ||
699 | 681 | ||
700 | /* Queue the message */ | 682 | /* Queue the message */ |
701 | i2o_msg_post(c, m); | 683 | i2o_msg_post(c, m); |
@@ -703,6 +685,13 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, | |||
703 | osm_debug("Issued %ld\n", SCpnt->serial_number); | 685 | osm_debug("Issued %ld\n", SCpnt->serial_number); |
704 | 686 | ||
705 | return 0; | 687 | return 0; |
688 | |||
689 | nomem: | ||
690 | rc = -ENOMEM; | ||
691 | i2o_msg_nop(c, m); | ||
692 | |||
693 | exit: | ||
694 | return rc; | ||
706 | }; | 695 | }; |
707 | 696 | ||
708 | /** | 697 | /** |
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c index 50c8cedf7a2d..42f8b810d6e5 100644 --- a/drivers/message/i2o/iop.c +++ b/drivers/message/i2o/iop.c | |||
@@ -28,8 +28,10 @@ | |||
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/i2o.h> | 29 | #include <linux/i2o.h> |
30 | #include <linux/delay.h> | 30 | #include <linux/delay.h> |
31 | #include "core.h" | ||
31 | 32 | ||
32 | #define OSM_VERSION "$Rev$" | 33 | #define OSM_NAME "i2o" |
34 | #define OSM_VERSION "1.288" | ||
33 | #define OSM_DESCRIPTION "I2O subsystem" | 35 | #define OSM_DESCRIPTION "I2O subsystem" |
34 | 36 | ||
35 | /* global I2O controller list */ | 37 | /* global I2O controller list */ |
@@ -43,20 +45,6 @@ static struct i2o_dma i2o_systab; | |||
43 | 45 | ||
44 | static int i2o_hrt_get(struct i2o_controller *c); | 46 | static int i2o_hrt_get(struct i2o_controller *c); |
45 | 47 | ||
46 | /* Module internal functions from other sources */ | ||
47 | extern struct i2o_driver i2o_exec_driver; | ||
48 | extern int i2o_exec_lct_get(struct i2o_controller *); | ||
49 | extern void i2o_device_remove(struct i2o_device *); | ||
50 | |||
51 | extern int __init i2o_driver_init(void); | ||
52 | extern void __exit i2o_driver_exit(void); | ||
53 | extern int __init i2o_exec_init(void); | ||
54 | extern void __exit i2o_exec_exit(void); | ||
55 | extern int __init i2o_pci_init(void); | ||
56 | extern void __exit i2o_pci_exit(void); | ||
57 | extern int i2o_device_init(void); | ||
58 | extern void i2o_device_exit(void); | ||
59 | |||
60 | /** | 48 | /** |
61 | * i2o_msg_nop - Returns a message which is not used | 49 | * i2o_msg_nop - Returns a message which is not used |
62 | * @c: I2O controller from which the message was created | 50 | * @c: I2O controller from which the message was created |
@@ -68,7 +56,7 @@ extern void i2o_device_exit(void); | |||
68 | */ | 56 | */ |
69 | void i2o_msg_nop(struct i2o_controller *c, u32 m) | 57 | void i2o_msg_nop(struct i2o_controller *c, u32 m) |
70 | { | 58 | { |
71 | struct i2o_message __iomem *msg = c->in_queue.virt + m; | 59 | struct i2o_message __iomem *msg = i2o_msg_in_to_virt(c, m); |
72 | 60 | ||
73 | writel(THREE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); | 61 | writel(THREE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); |
74 | writel(I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID, | 62 | writel(I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID, |
@@ -92,16 +80,16 @@ void i2o_msg_nop(struct i2o_controller *c, u32 m) | |||
92 | * address from the read port (see the i2o spec). If no message is | 80 | * address from the read port (see the i2o spec). If no message is |
93 | * available returns I2O_QUEUE_EMPTY and msg is leaved untouched. | 81 | * available returns I2O_QUEUE_EMPTY and msg is leaved untouched. |
94 | */ | 82 | */ |
95 | u32 i2o_msg_get_wait(struct i2o_controller *c, struct i2o_message __iomem **msg, | 83 | u32 i2o_msg_get_wait(struct i2o_controller *c, |
96 | int wait) | 84 | struct i2o_message __iomem ** msg, int wait) |
97 | { | 85 | { |
98 | unsigned long timeout = jiffies + wait * HZ; | 86 | unsigned long timeout = jiffies + wait * HZ; |
99 | u32 m; | 87 | u32 m; |
100 | 88 | ||
101 | while ((m = i2o_msg_get(c, msg)) == I2O_QUEUE_EMPTY) { | 89 | while ((m = i2o_msg_get(c, msg)) == I2O_QUEUE_EMPTY) { |
102 | if (time_after(jiffies, timeout)) { | 90 | if (time_after(jiffies, timeout)) { |
103 | pr_debug("%s: Timeout waiting for message frame.\n", | 91 | osm_debug("%s: Timeout waiting for message frame.\n", |
104 | c->name); | 92 | c->name); |
105 | return I2O_QUEUE_EMPTY; | 93 | return I2O_QUEUE_EMPTY; |
106 | } | 94 | } |
107 | set_current_state(TASK_UNINTERRUPTIBLE); | 95 | set_current_state(TASK_UNINTERRUPTIBLE); |
@@ -129,13 +117,13 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr) | |||
129 | unsigned long flags; | 117 | unsigned long flags; |
130 | 118 | ||
131 | if (!ptr) | 119 | if (!ptr) |
132 | printk(KERN_ERR "%s: couldn't add NULL pointer to context list!" | 120 | osm_err("%s: couldn't add NULL pointer to context list!\n", |
133 | "\n", c->name); | 121 | c->name); |
134 | 122 | ||
135 | entry = kmalloc(sizeof(*entry), GFP_ATOMIC); | 123 | entry = kmalloc(sizeof(*entry), GFP_ATOMIC); |
136 | if (!entry) { | 124 | if (!entry) { |
137 | printk(KERN_ERR "%s: Could not allocate memory for context " | 125 | osm_err("%s: Could not allocate memory for context list element" |
138 | "list element\n", c->name); | 126 | "\n", c->name); |
139 | return 0; | 127 | return 0; |
140 | } | 128 | } |
141 | 129 | ||
@@ -154,7 +142,7 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr) | |||
154 | 142 | ||
155 | spin_unlock_irqrestore(&c->context_list_lock, flags); | 143 | spin_unlock_irqrestore(&c->context_list_lock, flags); |
156 | 144 | ||
157 | pr_debug("%s: Add context to list %p -> %d\n", c->name, ptr, context); | 145 | osm_debug("%s: Add context to list %p -> %d\n", c->name, ptr, context); |
158 | 146 | ||
159 | return entry->context; | 147 | return entry->context; |
160 | }; | 148 | }; |
@@ -186,11 +174,11 @@ u32 i2o_cntxt_list_remove(struct i2o_controller * c, void *ptr) | |||
186 | spin_unlock_irqrestore(&c->context_list_lock, flags); | 174 | spin_unlock_irqrestore(&c->context_list_lock, flags); |
187 | 175 | ||
188 | if (!context) | 176 | if (!context) |
189 | printk(KERN_WARNING "%s: Could not remove nonexistent ptr " | 177 | osm_warn("%s: Could not remove nonexistent ptr %p\n", c->name, |
190 | "%p\n", c->name, ptr); | 178 | ptr); |
191 | 179 | ||
192 | pr_debug("%s: remove ptr from context list %d -> %p\n", c->name, | 180 | osm_debug("%s: remove ptr from context list %d -> %p\n", c->name, |
193 | context, ptr); | 181 | context, ptr); |
194 | 182 | ||
195 | return context; | 183 | return context; |
196 | }; | 184 | }; |
@@ -220,11 +208,10 @@ void *i2o_cntxt_list_get(struct i2o_controller *c, u32 context) | |||
220 | spin_unlock_irqrestore(&c->context_list_lock, flags); | 208 | spin_unlock_irqrestore(&c->context_list_lock, flags); |
221 | 209 | ||
222 | if (!ptr) | 210 | if (!ptr) |
223 | printk(KERN_WARNING "%s: context id %d not found\n", c->name, | 211 | osm_warn("%s: context id %d not found\n", c->name, context); |
224 | context); | ||
225 | 212 | ||
226 | pr_debug("%s: get ptr from context list %d -> %p\n", c->name, context, | 213 | osm_debug("%s: get ptr from context list %d -> %p\n", c->name, context, |
227 | ptr); | 214 | ptr); |
228 | 215 | ||
229 | return ptr; | 216 | return ptr; |
230 | }; | 217 | }; |
@@ -252,11 +239,11 @@ u32 i2o_cntxt_list_get_ptr(struct i2o_controller * c, void *ptr) | |||
252 | spin_unlock_irqrestore(&c->context_list_lock, flags); | 239 | spin_unlock_irqrestore(&c->context_list_lock, flags); |
253 | 240 | ||
254 | if (!context) | 241 | if (!context) |
255 | printk(KERN_WARNING "%s: Could not find nonexistent ptr " | 242 | osm_warn("%s: Could not find nonexistent ptr %p\n", c->name, |
256 | "%p\n", c->name, ptr); | 243 | ptr); |
257 | 244 | ||
258 | pr_debug("%s: get context id from context list %p -> %d\n", c->name, | 245 | osm_debug("%s: get context id from context list %p -> %d\n", c->name, |
259 | ptr, context); | 246 | ptr, context); |
260 | 247 | ||
261 | return context; | 248 | return context; |
262 | }; | 249 | }; |
@@ -336,10 +323,9 @@ static int i2o_iop_quiesce(struct i2o_controller *c) | |||
336 | 323 | ||
337 | /* Long timeout needed for quiesce if lots of devices */ | 324 | /* Long timeout needed for quiesce if lots of devices */ |
338 | if ((rc = i2o_msg_post_wait(c, m, 240))) | 325 | if ((rc = i2o_msg_post_wait(c, m, 240))) |
339 | printk(KERN_INFO "%s: Unable to quiesce (status=%#x).\n", | 326 | osm_info("%s: Unable to quiesce (status=%#x).\n", c->name, -rc); |
340 | c->name, -rc); | ||
341 | else | 327 | else |
342 | pr_debug("%s: Quiesced.\n", c->name); | 328 | osm_debug("%s: Quiesced.\n", c->name); |
343 | 329 | ||
344 | i2o_status_get(c); // Entered READY state | 330 | i2o_status_get(c); // Entered READY state |
345 | 331 | ||
@@ -377,10 +363,9 @@ static int i2o_iop_enable(struct i2o_controller *c) | |||
377 | 363 | ||
378 | /* How long of a timeout do we need? */ | 364 | /* How long of a timeout do we need? */ |
379 | if ((rc = i2o_msg_post_wait(c, m, 240))) | 365 | if ((rc = i2o_msg_post_wait(c, m, 240))) |
380 | printk(KERN_ERR "%s: Could not enable (status=%#x).\n", | 366 | osm_err("%s: Could not enable (status=%#x).\n", c->name, -rc); |
381 | c->name, -rc); | ||
382 | else | 367 | else |
383 | pr_debug("%s: Enabled.\n", c->name); | 368 | osm_debug("%s: Enabled.\n", c->name); |
384 | 369 | ||
385 | i2o_status_get(c); // entered OPERATIONAL state | 370 | i2o_status_get(c); // entered OPERATIONAL state |
386 | 371 | ||
@@ -444,20 +429,78 @@ static int i2o_iop_clear(struct i2o_controller *c) | |||
444 | &msg->u.head[1]); | 429 | &msg->u.head[1]); |
445 | 430 | ||
446 | if ((rc = i2o_msg_post_wait(c, m, 30))) | 431 | if ((rc = i2o_msg_post_wait(c, m, 30))) |
447 | printk(KERN_INFO "%s: Unable to clear (status=%#x).\n", | 432 | osm_info("%s: Unable to clear (status=%#x).\n", c->name, -rc); |
448 | c->name, -rc); | ||
449 | else | 433 | else |
450 | pr_debug("%s: Cleared.\n", c->name); | 434 | osm_debug("%s: Cleared.\n", c->name); |
451 | 435 | ||
452 | /* Enable all IOPs */ | 436 | /* Enable all IOPs */ |
453 | i2o_iop_enable_all(); | 437 | i2o_iop_enable_all(); |
454 | 438 | ||
455 | i2o_status_get(c); | ||
456 | |||
457 | return rc; | 439 | return rc; |
458 | } | 440 | } |
459 | 441 | ||
460 | /** | 442 | /** |
443 | * i2o_iop_init_outbound_queue - setup the outbound message queue | ||
444 | * @c: I2O controller | ||
445 | * | ||
446 | * Clear and (re)initialize IOP's outbound queue and post the message | ||
447 | * frames to the IOP. | ||
448 | * | ||
449 | * Returns 0 on success or a negative errno code on failure. | ||
450 | */ | ||
451 | static int i2o_iop_init_outbound_queue(struct i2o_controller *c) | ||
452 | { | ||
453 | volatile u8 *status = c->status.virt; | ||
454 | u32 m; | ||
455 | struct i2o_message __iomem *msg; | ||
456 | ulong timeout; | ||
457 | int i; | ||
458 | |||
459 | osm_debug("%s: Initializing Outbound Queue...\n", c->name); | ||
460 | |||
461 | memset(c->status.virt, 0, 4); | ||
462 | |||
463 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
464 | if (m == I2O_QUEUE_EMPTY) | ||
465 | return -ETIMEDOUT; | ||
466 | |||
467 | writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]); | ||
468 | writel(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | ADAPTER_TID, | ||
469 | &msg->u.head[1]); | ||
470 | writel(i2o_exec_driver.context, &msg->u.s.icntxt); | ||
471 | writel(0x00000000, &msg->u.s.tcntxt); | ||
472 | writel(PAGE_SIZE, &msg->body[0]); | ||
473 | /* Outbound msg frame size in words and Initcode */ | ||
474 | writel(I2O_OUTBOUND_MSG_FRAME_SIZE << 16 | 0x80, &msg->body[1]); | ||
475 | writel(0xd0000004, &msg->body[2]); | ||
476 | writel(i2o_dma_low(c->status.phys), &msg->body[3]); | ||
477 | writel(i2o_dma_high(c->status.phys), &msg->body[4]); | ||
478 | |||
479 | i2o_msg_post(c, m); | ||
480 | |||
481 | timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ; | ||
482 | while (*status <= I2O_CMD_IN_PROGRESS) { | ||
483 | if (time_after(jiffies, timeout)) { | ||
484 | osm_warn("%s: Timeout Initializing\n", c->name); | ||
485 | return -ETIMEDOUT; | ||
486 | } | ||
487 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
488 | schedule_timeout(1); | ||
489 | } | ||
490 | |||
491 | m = c->out_queue.phys; | ||
492 | |||
493 | /* Post frames */ | ||
494 | for (i = 0; i < I2O_MAX_OUTBOUND_MSG_FRAMES; i++) { | ||
495 | i2o_flush_reply(c, m); | ||
496 | udelay(1); /* Promise */ | ||
497 | m += I2O_OUTBOUND_MSG_FRAME_SIZE * sizeof(u32); | ||
498 | } | ||
499 | |||
500 | return 0; | ||
501 | } | ||
502 | |||
503 | /** | ||
461 | * i2o_iop_reset - reset an I2O controller | 504 | * i2o_iop_reset - reset an I2O controller |
462 | * @c: controller to reset | 505 | * @c: controller to reset |
463 | * | 506 | * |
@@ -468,20 +511,20 @@ static int i2o_iop_clear(struct i2o_controller *c) | |||
468 | */ | 511 | */ |
469 | static int i2o_iop_reset(struct i2o_controller *c) | 512 | static int i2o_iop_reset(struct i2o_controller *c) |
470 | { | 513 | { |
471 | u8 *status = c->status.virt; | 514 | volatile u8 *status = c->status.virt; |
472 | struct i2o_message __iomem *msg; | 515 | struct i2o_message __iomem *msg; |
473 | u32 m; | 516 | u32 m; |
474 | unsigned long timeout; | 517 | unsigned long timeout; |
475 | i2o_status_block *sb = c->status_block.virt; | 518 | i2o_status_block *sb = c->status_block.virt; |
476 | int rc = 0; | 519 | int rc = 0; |
477 | 520 | ||
478 | pr_debug("%s: Resetting controller\n", c->name); | 521 | osm_debug("%s: Resetting controller\n", c->name); |
479 | 522 | ||
480 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | 523 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); |
481 | if (m == I2O_QUEUE_EMPTY) | 524 | if (m == I2O_QUEUE_EMPTY) |
482 | return -ETIMEDOUT; | 525 | return -ETIMEDOUT; |
483 | 526 | ||
484 | memset(status, 0, 8); | 527 | memset(c->status_block.virt, 0, 8); |
485 | 528 | ||
486 | /* Quiesce all IOPs first */ | 529 | /* Quiesce all IOPs first */ |
487 | i2o_iop_quiesce_all(); | 530 | i2o_iop_quiesce_all(); |
@@ -493,49 +536,43 @@ static int i2o_iop_reset(struct i2o_controller *c) | |||
493 | writel(0, &msg->u.s.tcntxt); //FIXME: use reasonable transaction context | 536 | writel(0, &msg->u.s.tcntxt); //FIXME: use reasonable transaction context |
494 | writel(0, &msg->body[0]); | 537 | writel(0, &msg->body[0]); |
495 | writel(0, &msg->body[1]); | 538 | writel(0, &msg->body[1]); |
496 | writel(i2o_ptr_low((void *)c->status.phys), &msg->body[2]); | 539 | writel(i2o_dma_low(c->status.phys), &msg->body[2]); |
497 | writel(i2o_ptr_high((void *)c->status.phys), &msg->body[3]); | 540 | writel(i2o_dma_high(c->status.phys), &msg->body[3]); |
498 | 541 | ||
499 | i2o_msg_post(c, m); | 542 | i2o_msg_post(c, m); |
500 | 543 | ||
501 | /* Wait for a reply */ | 544 | /* Wait for a reply */ |
502 | timeout = jiffies + I2O_TIMEOUT_RESET * HZ; | 545 | timeout = jiffies + I2O_TIMEOUT_RESET * HZ; |
503 | while (!*status) { | 546 | while (!*status) { |
504 | if (time_after(jiffies, timeout)) { | 547 | if (time_after(jiffies, timeout)) |
505 | printk(KERN_ERR "%s: IOP reset timeout.\n", c->name); | ||
506 | rc = -ETIMEDOUT; | ||
507 | goto exit; | ||
508 | } | ||
509 | |||
510 | /* Promise bug */ | ||
511 | if (status[1] || status[4]) { | ||
512 | *status = 0; | ||
513 | break; | 548 | break; |
514 | } | ||
515 | 549 | ||
516 | set_current_state(TASK_UNINTERRUPTIBLE); | 550 | set_current_state(TASK_UNINTERRUPTIBLE); |
517 | schedule_timeout(1); | 551 | schedule_timeout(1); |
518 | |||
519 | rmb(); | ||
520 | } | 552 | } |
521 | 553 | ||
522 | if (*status == I2O_CMD_IN_PROGRESS) { | 554 | switch (*status) { |
555 | case I2O_CMD_REJECTED: | ||
556 | osm_warn("%s: IOP reset rejected\n", c->name); | ||
557 | rc = -EPERM; | ||
558 | break; | ||
559 | |||
560 | case I2O_CMD_IN_PROGRESS: | ||
523 | /* | 561 | /* |
524 | * Once the reset is sent, the IOP goes into the INIT state | 562 | * Once the reset is sent, the IOP goes into the INIT state |
525 | * which is indeterminate. We need to wait until the IOP | 563 | * which is indeterminate. We need to wait until the IOP has |
526 | * has rebooted before we can let the system talk to | 564 | * rebooted before we can let the system talk to it. We read |
527 | * it. We read the inbound Free_List until a message is | 565 | * the inbound Free_List until a message is available. If we |
528 | * available. If we can't read one in the given ammount of | 566 | * can't read one in the given ammount of time, we assume the |
529 | * time, we assume the IOP could not reboot properly. | 567 | * IOP could not reboot properly. |
530 | */ | 568 | */ |
531 | pr_debug("%s: Reset in progress, waiting for reboot...\n", | 569 | osm_debug("%s: Reset in progress, waiting for reboot...\n", |
532 | c->name); | 570 | c->name); |
533 | 571 | ||
534 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET); | 572 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET); |
535 | while (m == I2O_QUEUE_EMPTY) { | 573 | while (m == I2O_QUEUE_EMPTY) { |
536 | if (time_after(jiffies, timeout)) { | 574 | if (time_after(jiffies, timeout)) { |
537 | printk(KERN_ERR "%s: IOP reset timeout.\n", | 575 | osm_err("%s: IOP reset timeout.\n", c->name); |
538 | c->name); | ||
539 | rc = -ETIMEDOUT; | 576 | rc = -ETIMEDOUT; |
540 | goto exit; | 577 | goto exit; |
541 | } | 578 | } |
@@ -545,19 +582,26 @@ static int i2o_iop_reset(struct i2o_controller *c) | |||
545 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET); | 582 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET); |
546 | } | 583 | } |
547 | i2o_msg_nop(c, m); | 584 | i2o_msg_nop(c, m); |
548 | } | ||
549 | 585 | ||
550 | /* from here all quiesce commands are safe */ | 586 | /* from here all quiesce commands are safe */ |
551 | c->no_quiesce = 0; | 587 | c->no_quiesce = 0; |
552 | 588 | ||
553 | /* If IopReset was rejected or didn't perform reset, try IopClear */ | 589 | /* verify if controller is in state RESET */ |
554 | i2o_status_get(c); | 590 | i2o_status_get(c); |
555 | if (*status == I2O_CMD_REJECTED || sb->iop_state != ADAPTER_STATE_RESET) { | 591 | |
556 | printk(KERN_WARNING "%s: Reset rejected, trying to clear\n", | 592 | if (!c->promise && (sb->iop_state != ADAPTER_STATE_RESET)) |
557 | c->name); | 593 | osm_warn("%s: reset completed, but adapter not in RESET" |
558 | i2o_iop_clear(c); | 594 | " state.\n", c->name); |
559 | } else | 595 | else |
560 | pr_debug("%s: Reset completed.\n", c->name); | 596 | osm_debug("%s: reset completed.\n", c->name); |
597 | |||
598 | break; | ||
599 | |||
600 | default: | ||
601 | osm_err("%s: IOP reset timeout.\n", c->name); | ||
602 | rc = -ETIMEDOUT; | ||
603 | break; | ||
604 | } | ||
561 | 605 | ||
562 | exit: | 606 | exit: |
563 | /* Enable all IOPs */ | 607 | /* Enable all IOPs */ |
@@ -567,88 +611,6 @@ static int i2o_iop_reset(struct i2o_controller *c) | |||
567 | }; | 611 | }; |
568 | 612 | ||
569 | /** | 613 | /** |
570 | * i2o_iop_init_outbound_queue - setup the outbound message queue | ||
571 | * @c: I2O controller | ||
572 | * | ||
573 | * Clear and (re)initialize IOP's outbound queue and post the message | ||
574 | * frames to the IOP. | ||
575 | * | ||
576 | * Returns 0 on success or a negative errno code on failure. | ||
577 | */ | ||
578 | static int i2o_iop_init_outbound_queue(struct i2o_controller *c) | ||
579 | { | ||
580 | u8 *status = c->status.virt; | ||
581 | u32 m; | ||
582 | struct i2o_message __iomem *msg; | ||
583 | ulong timeout; | ||
584 | int i; | ||
585 | |||
586 | pr_debug("%s: Initializing Outbound Queue...\n", c->name); | ||
587 | |||
588 | memset(status, 0, 4); | ||
589 | |||
590 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | ||
591 | if (m == I2O_QUEUE_EMPTY) | ||
592 | return -ETIMEDOUT; | ||
593 | |||
594 | writel(EIGHT_WORD_MSG_SIZE | TRL_OFFSET_6, &msg->u.head[0]); | ||
595 | writel(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | ADAPTER_TID, | ||
596 | &msg->u.head[1]); | ||
597 | writel(i2o_exec_driver.context, &msg->u.s.icntxt); | ||
598 | writel(0x0106, &msg->u.s.tcntxt); /* FIXME: why 0x0106, maybe in | ||
599 | Spec? */ | ||
600 | writel(PAGE_SIZE, &msg->body[0]); | ||
601 | writel(MSG_FRAME_SIZE << 16 | 0x80, &msg->body[1]); /* Outbound msg frame | ||
602 | size in words and Initcode */ | ||
603 | writel(0xd0000004, &msg->body[2]); | ||
604 | writel(i2o_ptr_low((void *)c->status.phys), &msg->body[3]); | ||
605 | writel(i2o_ptr_high((void *)c->status.phys), &msg->body[4]); | ||
606 | |||
607 | i2o_msg_post(c, m); | ||
608 | |||
609 | timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ; | ||
610 | while (*status <= I2O_CMD_IN_PROGRESS) { | ||
611 | if (time_after(jiffies, timeout)) { | ||
612 | printk(KERN_WARNING "%s: Timeout Initializing\n", | ||
613 | c->name); | ||
614 | return -ETIMEDOUT; | ||
615 | } | ||
616 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
617 | schedule_timeout(1); | ||
618 | |||
619 | rmb(); | ||
620 | } | ||
621 | |||
622 | m = c->out_queue.phys; | ||
623 | |||
624 | /* Post frames */ | ||
625 | for (i = 0; i < NMBR_MSG_FRAMES; i++) { | ||
626 | i2o_flush_reply(c, m); | ||
627 | udelay(1); /* Promise */ | ||
628 | m += MSG_FRAME_SIZE * 4; | ||
629 | } | ||
630 | |||
631 | return 0; | ||
632 | } | ||
633 | |||
634 | /** | ||
635 | * i2o_iop_send_nop - send a core NOP message | ||
636 | * @c: controller | ||
637 | * | ||
638 | * Send a no-operation message with a reply set to cause no | ||
639 | * action either. Needed for bringing up promise controllers. | ||
640 | */ | ||
641 | static int i2o_iop_send_nop(struct i2o_controller *c) | ||
642 | { | ||
643 | struct i2o_message __iomem *msg; | ||
644 | u32 m = i2o_msg_get_wait(c, &msg, HZ); | ||
645 | if (m == I2O_QUEUE_EMPTY) | ||
646 | return -ETIMEDOUT; | ||
647 | i2o_msg_nop(c, m); | ||
648 | return 0; | ||
649 | } | ||
650 | |||
651 | /** | ||
652 | * i2o_iop_activate - Bring controller up to HOLD | 614 | * i2o_iop_activate - Bring controller up to HOLD |
653 | * @c: controller | 615 | * @c: controller |
654 | * | 616 | * |
@@ -659,78 +621,62 @@ static int i2o_iop_send_nop(struct i2o_controller *c) | |||
659 | */ | 621 | */ |
660 | static int i2o_iop_activate(struct i2o_controller *c) | 622 | static int i2o_iop_activate(struct i2o_controller *c) |
661 | { | 623 | { |
662 | struct pci_dev *i960 = NULL; | ||
663 | i2o_status_block *sb = c->status_block.virt; | 624 | i2o_status_block *sb = c->status_block.virt; |
664 | int rc; | 625 | int rc; |
665 | 626 | int state; | |
666 | if (c->promise) { | ||
667 | /* Beat up the hardware first of all */ | ||
668 | i960 = | ||
669 | pci_find_slot(c->pdev->bus->number, | ||
670 | PCI_DEVFN(PCI_SLOT(c->pdev->devfn), 0)); | ||
671 | if (i960) | ||
672 | pci_write_config_word(i960, 0x42, 0); | ||
673 | |||
674 | /* Follow this sequence precisely or the controller | ||
675 | ceases to perform useful functions until reboot */ | ||
676 | if ((rc = i2o_iop_send_nop(c))) | ||
677 | return rc; | ||
678 | |||
679 | if ((rc = i2o_iop_reset(c))) | ||
680 | return rc; | ||
681 | } | ||
682 | 627 | ||
683 | /* In INIT state, Wait Inbound Q to initialize (in i2o_status_get) */ | 628 | /* In INIT state, Wait Inbound Q to initialize (in i2o_status_get) */ |
684 | /* In READY state, Get status */ | 629 | /* In READY state, Get status */ |
685 | 630 | ||
686 | rc = i2o_status_get(c); | 631 | rc = i2o_status_get(c); |
687 | if (rc) { | 632 | if (rc) { |
688 | printk(KERN_INFO "%s: Unable to obtain status, " | 633 | osm_info("%s: Unable to obtain status, attempting a reset.\n", |
689 | "attempting a reset.\n", c->name); | 634 | c->name); |
690 | if (i2o_iop_reset(c)) | 635 | rc = i2o_iop_reset(c); |
636 | if (rc) | ||
691 | return rc; | 637 | return rc; |
692 | } | 638 | } |
693 | 639 | ||
694 | if (sb->i2o_version > I2OVER15) { | 640 | if (sb->i2o_version > I2OVER15) { |
695 | printk(KERN_ERR "%s: Not running version 1.5 of the I2O " | 641 | osm_err("%s: Not running version 1.5 of the I2O Specification." |
696 | "Specification.\n", c->name); | 642 | "\n", c->name); |
697 | return -ENODEV; | 643 | return -ENODEV; |
698 | } | 644 | } |
699 | 645 | ||
700 | switch (sb->iop_state) { | 646 | switch (sb->iop_state) { |
701 | case ADAPTER_STATE_FAULTED: | 647 | case ADAPTER_STATE_FAULTED: |
702 | printk(KERN_CRIT "%s: hardware fault\n", c->name); | 648 | osm_err("%s: hardware fault\n", c->name); |
703 | return -ENODEV; | 649 | return -EFAULT; |
704 | 650 | ||
705 | case ADAPTER_STATE_READY: | 651 | case ADAPTER_STATE_READY: |
706 | case ADAPTER_STATE_OPERATIONAL: | 652 | case ADAPTER_STATE_OPERATIONAL: |
707 | case ADAPTER_STATE_HOLD: | 653 | case ADAPTER_STATE_HOLD: |
708 | case ADAPTER_STATE_FAILED: | 654 | case ADAPTER_STATE_FAILED: |
709 | pr_debug("%s: already running, trying to reset...\n", c->name); | 655 | osm_debug("%s: already running, trying to reset...\n", c->name); |
710 | if (i2o_iop_reset(c)) | 656 | rc = i2o_iop_reset(c); |
711 | return -ENODEV; | 657 | if (rc) |
658 | return rc; | ||
712 | } | 659 | } |
713 | 660 | ||
661 | /* preserve state */ | ||
662 | state = sb->iop_state; | ||
663 | |||
714 | rc = i2o_iop_init_outbound_queue(c); | 664 | rc = i2o_iop_init_outbound_queue(c); |
715 | if (rc) | 665 | if (rc) |
716 | return rc; | 666 | return rc; |
717 | 667 | ||
718 | if (c->promise) { | 668 | /* if adapter was not in RESET state clear now */ |
719 | if ((rc = i2o_iop_send_nop(c))) | 669 | if (state != ADAPTER_STATE_RESET) |
720 | return rc; | 670 | i2o_iop_clear(c); |
721 | 671 | ||
722 | if ((rc = i2o_status_get(c))) | 672 | i2o_status_get(c); |
723 | return rc; | ||
724 | 673 | ||
725 | if (i960) | 674 | if (sb->iop_state != ADAPTER_STATE_HOLD) { |
726 | pci_write_config_word(i960, 0x42, 0x3FF); | 675 | osm_err("%s: failed to bring IOP into HOLD state\n", c->name); |
676 | return -EIO; | ||
727 | } | 677 | } |
728 | 678 | ||
729 | /* In HOLD state */ | 679 | return i2o_hrt_get(c); |
730 | |||
731 | rc = i2o_hrt_get(c); | ||
732 | |||
733 | return rc; | ||
734 | }; | 680 | }; |
735 | 681 | ||
736 | /** | 682 | /** |
@@ -756,20 +702,18 @@ static int i2o_iop_systab_set(struct i2o_controller *c) | |||
756 | res->flags = IORESOURCE_MEM; | 702 | res->flags = IORESOURCE_MEM; |
757 | res->start = 0; | 703 | res->start = 0; |
758 | res->end = 0; | 704 | res->end = 0; |
759 | printk(KERN_INFO "%s: requires private memory resources.\n", | 705 | osm_info("%s: requires private memory resources.\n", c->name); |
760 | c->name); | ||
761 | root = pci_find_parent_resource(c->pdev, res); | 706 | root = pci_find_parent_resource(c->pdev, res); |
762 | if (root == NULL) | 707 | if (root == NULL) |
763 | printk(KERN_WARNING "%s: Can't find parent resource!\n", | 708 | osm_warn("%s: Can't find parent resource!\n", c->name); |
764 | c->name); | ||
765 | if (root && allocate_resource(root, res, sb->desired_mem_size, sb->desired_mem_size, sb->desired_mem_size, 1 << 20, /* Unspecified, so use 1Mb and play safe */ | 709 | if (root && allocate_resource(root, res, sb->desired_mem_size, sb->desired_mem_size, sb->desired_mem_size, 1 << 20, /* Unspecified, so use 1Mb and play safe */ |
766 | NULL, NULL) >= 0) { | 710 | NULL, NULL) >= 0) { |
767 | c->mem_alloc = 1; | 711 | c->mem_alloc = 1; |
768 | sb->current_mem_size = 1 + res->end - res->start; | 712 | sb->current_mem_size = 1 + res->end - res->start; |
769 | sb->current_mem_base = res->start; | 713 | sb->current_mem_base = res->start; |
770 | printk(KERN_INFO "%s: allocated %ld bytes of PCI memory" | 714 | osm_info("%s: allocated %ld bytes of PCI memory at " |
771 | " at 0x%08lX.\n", c->name, | 715 | "0x%08lX.\n", c->name, |
772 | 1 + res->end - res->start, res->start); | 716 | 1 + res->end - res->start, res->start); |
773 | } | 717 | } |
774 | } | 718 | } |
775 | 719 | ||
@@ -779,20 +723,18 @@ static int i2o_iop_systab_set(struct i2o_controller *c) | |||
779 | res->flags = IORESOURCE_IO; | 723 | res->flags = IORESOURCE_IO; |
780 | res->start = 0; | 724 | res->start = 0; |
781 | res->end = 0; | 725 | res->end = 0; |
782 | printk(KERN_INFO "%s: requires private memory resources.\n", | 726 | osm_info("%s: requires private memory resources.\n", c->name); |
783 | c->name); | ||
784 | root = pci_find_parent_resource(c->pdev, res); | 727 | root = pci_find_parent_resource(c->pdev, res); |
785 | if (root == NULL) | 728 | if (root == NULL) |
786 | printk(KERN_WARNING "%s: Can't find parent resource!\n", | 729 | osm_warn("%s: Can't find parent resource!\n", c->name); |
787 | c->name); | ||
788 | if (root && allocate_resource(root, res, sb->desired_io_size, sb->desired_io_size, sb->desired_io_size, 1 << 20, /* Unspecified, so use 1Mb and play safe */ | 730 | if (root && allocate_resource(root, res, sb->desired_io_size, sb->desired_io_size, sb->desired_io_size, 1 << 20, /* Unspecified, so use 1Mb and play safe */ |
789 | NULL, NULL) >= 0) { | 731 | NULL, NULL) >= 0) { |
790 | c->io_alloc = 1; | 732 | c->io_alloc = 1; |
791 | sb->current_io_size = 1 + res->end - res->start; | 733 | sb->current_io_size = 1 + res->end - res->start; |
792 | sb->current_mem_base = res->start; | 734 | sb->current_mem_base = res->start; |
793 | printk(KERN_INFO "%s: allocated %ld bytes of PCI I/O at" | 735 | osm_info("%s: allocated %ld bytes of PCI I/O at 0x%08lX" |
794 | " 0x%08lX.\n", c->name, | 736 | ".\n", c->name, 1 + res->end - res->start, |
795 | 1 + res->end - res->start, res->start); | 737 | res->start); |
796 | } | 738 | } |
797 | } | 739 | } |
798 | 740 | ||
@@ -836,10 +778,10 @@ static int i2o_iop_systab_set(struct i2o_controller *c) | |||
836 | PCI_DMA_TODEVICE); | 778 | PCI_DMA_TODEVICE); |
837 | 779 | ||
838 | if (rc < 0) | 780 | if (rc < 0) |
839 | printk(KERN_ERR "%s: Unable to set SysTab (status=%#x).\n", | 781 | osm_err("%s: Unable to set SysTab (status=%#x).\n", c->name, |
840 | c->name, -rc); | 782 | -rc); |
841 | else | 783 | else |
842 | pr_debug("%s: SysTab set.\n", c->name); | 784 | osm_debug("%s: SysTab set.\n", c->name); |
843 | 785 | ||
844 | i2o_status_get(c); // Entered READY state | 786 | i2o_status_get(c); // Entered READY state |
845 | 787 | ||
@@ -863,7 +805,7 @@ static int i2o_iop_online(struct i2o_controller *c) | |||
863 | return rc; | 805 | return rc; |
864 | 806 | ||
865 | /* In READY state */ | 807 | /* In READY state */ |
866 | pr_debug("%s: Attempting to enable...\n", c->name); | 808 | osm_debug("%s: Attempting to enable...\n", c->name); |
867 | rc = i2o_iop_enable(c); | 809 | rc = i2o_iop_enable(c); |
868 | if (rc) | 810 | if (rc) |
869 | return rc; | 811 | return rc; |
@@ -882,7 +824,7 @@ void i2o_iop_remove(struct i2o_controller *c) | |||
882 | { | 824 | { |
883 | struct i2o_device *dev, *tmp; | 825 | struct i2o_device *dev, *tmp; |
884 | 826 | ||
885 | pr_debug("%s: deleting controller\n", c->name); | 827 | osm_debug("%s: deleting controller\n", c->name); |
886 | 828 | ||
887 | i2o_driver_notify_controller_remove_all(c); | 829 | i2o_driver_notify_controller_remove_all(c); |
888 | 830 | ||
@@ -891,8 +833,12 @@ void i2o_iop_remove(struct i2o_controller *c) | |||
891 | list_for_each_entry_safe(dev, tmp, &c->devices, list) | 833 | list_for_each_entry_safe(dev, tmp, &c->devices, list) |
892 | i2o_device_remove(dev); | 834 | i2o_device_remove(dev); |
893 | 835 | ||
836 | device_del(&c->device); | ||
837 | |||
894 | /* Ask the IOP to switch to RESET state */ | 838 | /* Ask the IOP to switch to RESET state */ |
895 | i2o_iop_reset(c); | 839 | i2o_iop_reset(c); |
840 | |||
841 | put_device(&c->device); | ||
896 | } | 842 | } |
897 | 843 | ||
898 | /** | 844 | /** |
@@ -927,8 +873,7 @@ static int i2o_systab_build(void) | |||
927 | 873 | ||
928 | systab = i2o_systab.virt = kmalloc(i2o_systab.len, GFP_KERNEL); | 874 | systab = i2o_systab.virt = kmalloc(i2o_systab.len, GFP_KERNEL); |
929 | if (!systab) { | 875 | if (!systab) { |
930 | printk(KERN_ERR "i2o: unable to allocate memory for System " | 876 | osm_err("unable to allocate memory for System Table\n"); |
931 | "Table\n"); | ||
932 | return -ENOMEM; | 877 | return -ENOMEM; |
933 | } | 878 | } |
934 | memset(systab, 0, i2o_systab.len); | 879 | memset(systab, 0, i2o_systab.len); |
@@ -940,8 +885,8 @@ static int i2o_systab_build(void) | |||
940 | i2o_status_block *sb; | 885 | i2o_status_block *sb; |
941 | 886 | ||
942 | if (count >= num_controllers) { | 887 | if (count >= num_controllers) { |
943 | printk(KERN_ERR "i2o: controller added while building " | 888 | osm_err("controller added while building system table" |
944 | "system table\n"); | 889 | "\n"); |
945 | break; | 890 | break; |
946 | } | 891 | } |
947 | 892 | ||
@@ -955,9 +900,8 @@ static int i2o_systab_build(void) | |||
955 | * it is techninically not part of the I2O subsystem... | 900 | * it is techninically not part of the I2O subsystem... |
956 | */ | 901 | */ |
957 | if (unlikely(i2o_status_get(c))) { | 902 | if (unlikely(i2o_status_get(c))) { |
958 | printk(KERN_ERR "%s: Deleting b/c could not get status" | 903 | osm_err("%s: Deleting b/c could not get status while " |
959 | " while attempting to build system table\n", | 904 | "attempting to build system table\n", c->name); |
960 | c->name); | ||
961 | i2o_iop_remove(c); | 905 | i2o_iop_remove(c); |
962 | continue; // try the next one | 906 | continue; // try the next one |
963 | } | 907 | } |
@@ -971,8 +915,10 @@ static int i2o_systab_build(void) | |||
971 | systab->iops[count].frame_size = sb->inbound_frame_size; | 915 | systab->iops[count].frame_size = sb->inbound_frame_size; |
972 | systab->iops[count].last_changed = change_ind; | 916 | systab->iops[count].last_changed = change_ind; |
973 | systab->iops[count].iop_capabilities = sb->iop_capabilities; | 917 | systab->iops[count].iop_capabilities = sb->iop_capabilities; |
974 | systab->iops[count].inbound_low = i2o_ptr_low(c->post_port); | 918 | systab->iops[count].inbound_low = |
975 | systab->iops[count].inbound_high = i2o_ptr_high(c->post_port); | 919 | i2o_dma_low(c->base.phys + I2O_IN_PORT); |
920 | systab->iops[count].inbound_high = | ||
921 | i2o_dma_high(c->base.phys + I2O_IN_PORT); | ||
976 | 922 | ||
977 | count++; | 923 | count++; |
978 | } | 924 | } |
@@ -1010,11 +956,11 @@ int i2o_status_get(struct i2o_controller *c) | |||
1010 | { | 956 | { |
1011 | struct i2o_message __iomem *msg; | 957 | struct i2o_message __iomem *msg; |
1012 | u32 m; | 958 | u32 m; |
1013 | u8 *status_block; | 959 | volatile u8 *status_block; |
1014 | unsigned long timeout; | 960 | unsigned long timeout; |
1015 | 961 | ||
1016 | status_block = (u8 *) c->status_block.virt; | 962 | status_block = (u8 *) c->status_block.virt; |
1017 | memset(status_block, 0, sizeof(i2o_status_block)); | 963 | memset(c->status_block.virt, 0, sizeof(i2o_status_block)); |
1018 | 964 | ||
1019 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); | 965 | m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); |
1020 | if (m == I2O_QUEUE_EMPTY) | 966 | if (m == I2O_QUEUE_EMPTY) |
@@ -1027,8 +973,8 @@ int i2o_status_get(struct i2o_controller *c) | |||
1027 | writel(0, &msg->u.s.tcntxt); // FIXME: use resonable transaction context | 973 | writel(0, &msg->u.s.tcntxt); // FIXME: use resonable transaction context |
1028 | writel(0, &msg->body[0]); | 974 | writel(0, &msg->body[0]); |
1029 | writel(0, &msg->body[1]); | 975 | writel(0, &msg->body[1]); |
1030 | writel(i2o_ptr_low((void *)c->status_block.phys), &msg->body[2]); | 976 | writel(i2o_dma_low(c->status_block.phys), &msg->body[2]); |
1031 | writel(i2o_ptr_high((void *)c->status_block.phys), &msg->body[3]); | 977 | writel(i2o_dma_high(c->status_block.phys), &msg->body[3]); |
1032 | writel(sizeof(i2o_status_block), &msg->body[4]); /* always 88 bytes */ | 978 | writel(sizeof(i2o_status_block), &msg->body[4]); /* always 88 bytes */ |
1033 | 979 | ||
1034 | i2o_msg_post(c, m); | 980 | i2o_msg_post(c, m); |
@@ -1037,14 +983,12 @@ int i2o_status_get(struct i2o_controller *c) | |||
1037 | timeout = jiffies + I2O_TIMEOUT_STATUS_GET * HZ; | 983 | timeout = jiffies + I2O_TIMEOUT_STATUS_GET * HZ; |
1038 | while (status_block[87] != 0xFF) { | 984 | while (status_block[87] != 0xFF) { |
1039 | if (time_after(jiffies, timeout)) { | 985 | if (time_after(jiffies, timeout)) { |
1040 | printk(KERN_ERR "%s: Get status timeout.\n", c->name); | 986 | osm_err("%s: Get status timeout.\n", c->name); |
1041 | return -ETIMEDOUT; | 987 | return -ETIMEDOUT; |
1042 | } | 988 | } |
1043 | 989 | ||
1044 | set_current_state(TASK_UNINTERRUPTIBLE); | 990 | set_current_state(TASK_UNINTERRUPTIBLE); |
1045 | schedule_timeout(1); | 991 | schedule_timeout(1); |
1046 | |||
1047 | rmb(); | ||
1048 | } | 992 | } |
1049 | 993 | ||
1050 | #ifdef DEBUG | 994 | #ifdef DEBUG |
@@ -1088,8 +1032,8 @@ static int i2o_hrt_get(struct i2o_controller *c) | |||
1088 | rc = i2o_msg_post_wait_mem(c, m, 20, &c->hrt); | 1032 | rc = i2o_msg_post_wait_mem(c, m, 20, &c->hrt); |
1089 | 1033 | ||
1090 | if (rc < 0) { | 1034 | if (rc < 0) { |
1091 | printk(KERN_ERR "%s: Unable to get HRT (status=%#x)\n", | 1035 | osm_err("%s: Unable to get HRT (status=%#x)\n", c->name, |
1092 | c->name, -rc); | 1036 | -rc); |
1093 | return rc; | 1037 | return rc; |
1094 | } | 1038 | } |
1095 | 1039 | ||
@@ -1103,13 +1047,41 @@ static int i2o_hrt_get(struct i2o_controller *c) | |||
1103 | return i2o_parse_hrt(c); | 1047 | return i2o_parse_hrt(c); |
1104 | } | 1048 | } |
1105 | 1049 | ||
1106 | printk(KERN_ERR "%s: Unable to get HRT after %d tries, giving up\n", | 1050 | osm_err("%s: Unable to get HRT after %d tries, giving up\n", c->name, |
1107 | c->name, I2O_HRT_GET_TRIES); | 1051 | I2O_HRT_GET_TRIES); |
1108 | 1052 | ||
1109 | return -EBUSY; | 1053 | return -EBUSY; |
1110 | } | 1054 | } |
1111 | 1055 | ||
1112 | /** | 1056 | /** |
1057 | * i2o_iop_free - Free the i2o_controller struct | ||
1058 | * @c: I2O controller to free | ||
1059 | */ | ||
1060 | void i2o_iop_free(struct i2o_controller *c) | ||
1061 | { | ||
1062 | kfree(c); | ||
1063 | }; | ||
1064 | |||
1065 | /** | ||
1066 | * i2o_iop_release - release the memory for a I2O controller | ||
1067 | * @dev: I2O controller which should be released | ||
1068 | * | ||
1069 | * Release the allocated memory. This function is called if refcount of | ||
1070 | * device reaches 0 automatically. | ||
1071 | */ | ||
1072 | static void i2o_iop_release(struct device *dev) | ||
1073 | { | ||
1074 | struct i2o_controller *c = to_i2o_controller(dev); | ||
1075 | |||
1076 | i2o_iop_free(c); | ||
1077 | }; | ||
1078 | |||
1079 | /* I2O controller class */ | ||
1080 | static struct class i2o_controller_class = { | ||
1081 | .name = "i2o_controller", | ||
1082 | }; | ||
1083 | |||
1084 | /** | ||
1113 | * i2o_iop_alloc - Allocate and initialize a i2o_controller struct | 1085 | * i2o_iop_alloc - Allocate and initialize a i2o_controller struct |
1114 | * | 1086 | * |
1115 | * Allocate the necessary memory for a i2o_controller struct and | 1087 | * Allocate the necessary memory for a i2o_controller struct and |
@@ -1125,8 +1097,8 @@ struct i2o_controller *i2o_iop_alloc(void) | |||
1125 | 1097 | ||
1126 | c = kmalloc(sizeof(*c), GFP_KERNEL); | 1098 | c = kmalloc(sizeof(*c), GFP_KERNEL); |
1127 | if (!c) { | 1099 | if (!c) { |
1128 | printk(KERN_ERR "i2o: Insufficient memory to allocate a I2O " | 1100 | osm_err("i2o: Insufficient memory to allocate a I2O controller." |
1129 | "controller.\n"); | 1101 | "\n"); |
1130 | return ERR_PTR(-ENOMEM); | 1102 | return ERR_PTR(-ENOMEM); |
1131 | } | 1103 | } |
1132 | memset(c, 0, sizeof(*c)); | 1104 | memset(c, 0, sizeof(*c)); |
@@ -1137,6 +1109,16 @@ struct i2o_controller *i2o_iop_alloc(void) | |||
1137 | c->unit = unit++; | 1109 | c->unit = unit++; |
1138 | sprintf(c->name, "iop%d", c->unit); | 1110 | sprintf(c->name, "iop%d", c->unit); |
1139 | 1111 | ||
1112 | device_initialize(&c->device); | ||
1113 | class_device_initialize(&c->classdev); | ||
1114 | |||
1115 | c->device.release = &i2o_iop_release; | ||
1116 | c->classdev.class = &i2o_controller_class; | ||
1117 | c->classdev.dev = &c->device; | ||
1118 | |||
1119 | snprintf(c->device.bus_id, BUS_ID_SIZE, "iop%d", c->unit); | ||
1120 | snprintf(c->classdev.class_id, BUS_ID_SIZE, "iop%d", c->unit); | ||
1121 | |||
1140 | #if BITS_PER_LONG == 64 | 1122 | #if BITS_PER_LONG == 64 |
1141 | spin_lock_init(&c->context_list_lock); | 1123 | spin_lock_init(&c->context_list_lock); |
1142 | atomic_set(&c->context_list_counter, 0); | 1124 | atomic_set(&c->context_list_counter, 0); |
@@ -1147,15 +1129,6 @@ struct i2o_controller *i2o_iop_alloc(void) | |||
1147 | }; | 1129 | }; |
1148 | 1130 | ||
1149 | /** | 1131 | /** |
1150 | * i2o_iop_free - Free the i2o_controller struct | ||
1151 | * @c: I2O controller to free | ||
1152 | */ | ||
1153 | void i2o_iop_free(struct i2o_controller *c) | ||
1154 | { | ||
1155 | kfree(c); | ||
1156 | }; | ||
1157 | |||
1158 | /** | ||
1159 | * i2o_iop_add - Initialize the I2O controller and add him to the I2O core | 1132 | * i2o_iop_add - Initialize the I2O controller and add him to the I2O core |
1160 | * @c: controller | 1133 | * @c: controller |
1161 | * | 1134 | * |
@@ -1168,45 +1141,58 @@ int i2o_iop_add(struct i2o_controller *c) | |||
1168 | { | 1141 | { |
1169 | int rc; | 1142 | int rc; |
1170 | 1143 | ||
1171 | printk(KERN_INFO "%s: Activating I2O controller...\n", c->name); | 1144 | if ((rc = device_add(&c->device))) { |
1172 | printk(KERN_INFO "%s: This may take a few minutes if there are many " | 1145 | osm_err("%s: could not add controller\n", c->name); |
1173 | "devices\n", c->name); | 1146 | goto iop_reset; |
1147 | } | ||
1174 | 1148 | ||
1175 | if ((rc = i2o_iop_activate(c))) { | 1149 | if ((rc = class_device_add(&c->classdev))) { |
1176 | printk(KERN_ERR "%s: could not activate controller\n", | 1150 | osm_err("%s: could not add controller class\n", c->name); |
1177 | c->name); | 1151 | goto device_del; |
1178 | i2o_iop_reset(c); | ||
1179 | return rc; | ||
1180 | } | 1152 | } |
1181 | 1153 | ||
1182 | pr_debug("%s: building sys table...\n", c->name); | 1154 | osm_info("%s: Activating I2O controller...\n", c->name); |
1155 | osm_info("%s: This may take a few minutes if there are many devices\n", | ||
1156 | c->name); | ||
1183 | 1157 | ||
1184 | if ((rc = i2o_systab_build())) { | 1158 | if ((rc = i2o_iop_activate(c))) { |
1185 | i2o_iop_reset(c); | 1159 | osm_err("%s: could not activate controller\n", c->name); |
1186 | return rc; | 1160 | goto class_del; |
1187 | } | 1161 | } |
1188 | 1162 | ||
1189 | pr_debug("%s: online controller...\n", c->name); | 1163 | osm_debug("%s: building sys table...\n", c->name); |
1190 | 1164 | ||
1191 | if ((rc = i2o_iop_online(c))) { | 1165 | if ((rc = i2o_systab_build())) |
1192 | i2o_iop_reset(c); | 1166 | goto class_del; |
1193 | return rc; | ||
1194 | } | ||
1195 | 1167 | ||
1196 | pr_debug("%s: getting LCT...\n", c->name); | 1168 | osm_debug("%s: online controller...\n", c->name); |
1197 | 1169 | ||
1198 | if ((rc = i2o_exec_lct_get(c))) { | 1170 | if ((rc = i2o_iop_online(c))) |
1199 | i2o_iop_reset(c); | 1171 | goto class_del; |
1200 | return rc; | 1172 | |
1201 | } | 1173 | osm_debug("%s: getting LCT...\n", c->name); |
1174 | |||
1175 | if ((rc = i2o_exec_lct_get(c))) | ||
1176 | goto class_del; | ||
1202 | 1177 | ||
1203 | list_add(&c->list, &i2o_controllers); | 1178 | list_add(&c->list, &i2o_controllers); |
1204 | 1179 | ||
1205 | i2o_driver_notify_controller_add_all(c); | 1180 | i2o_driver_notify_controller_add_all(c); |
1206 | 1181 | ||
1207 | printk(KERN_INFO "%s: Controller added\n", c->name); | 1182 | osm_info("%s: Controller added\n", c->name); |
1208 | 1183 | ||
1209 | return 0; | 1184 | return 0; |
1185 | |||
1186 | class_del: | ||
1187 | class_device_del(&c->classdev); | ||
1188 | |||
1189 | device_del: | ||
1190 | device_del(&c->device); | ||
1191 | |||
1192 | iop_reset: | ||
1193 | i2o_iop_reset(c); | ||
1194 | |||
1195 | return rc; | ||
1210 | }; | 1196 | }; |
1211 | 1197 | ||
1212 | /** | 1198 | /** |
@@ -1264,16 +1250,18 @@ static int __init i2o_iop_init(void) | |||
1264 | if (rc) | 1250 | if (rc) |
1265 | goto exit; | 1251 | goto exit; |
1266 | 1252 | ||
1267 | rc = i2o_driver_init(); | 1253 | if ((rc = class_register(&i2o_controller_class))) { |
1268 | if (rc) | 1254 | osm_err("can't register class i2o_controller\n"); |
1269 | goto device_exit; | 1255 | goto device_exit; |
1256 | } | ||
1270 | 1257 | ||
1271 | rc = i2o_exec_init(); | 1258 | if ((rc = i2o_driver_init())) |
1272 | if (rc) | 1259 | goto class_exit; |
1260 | |||
1261 | if ((rc = i2o_exec_init())) | ||
1273 | goto driver_exit; | 1262 | goto driver_exit; |
1274 | 1263 | ||
1275 | rc = i2o_pci_init(); | 1264 | if ((rc = i2o_pci_init())) |
1276 | if (rc < 0) | ||
1277 | goto exec_exit; | 1265 | goto exec_exit; |
1278 | 1266 | ||
1279 | return 0; | 1267 | return 0; |
@@ -1284,6 +1272,9 @@ static int __init i2o_iop_init(void) | |||
1284 | driver_exit: | 1272 | driver_exit: |
1285 | i2o_driver_exit(); | 1273 | i2o_driver_exit(); |
1286 | 1274 | ||
1275 | class_exit: | ||
1276 | class_unregister(&i2o_controller_class); | ||
1277 | |||
1287 | device_exit: | 1278 | device_exit: |
1288 | i2o_device_exit(); | 1279 | i2o_device_exit(); |
1289 | 1280 | ||
@@ -1301,6 +1292,7 @@ static void __exit i2o_iop_exit(void) | |||
1301 | i2o_pci_exit(); | 1292 | i2o_pci_exit(); |
1302 | i2o_exec_exit(); | 1293 | i2o_exec_exit(); |
1303 | i2o_driver_exit(); | 1294 | i2o_driver_exit(); |
1295 | class_unregister(&i2o_controller_class); | ||
1304 | i2o_device_exit(); | 1296 | i2o_device_exit(); |
1305 | }; | 1297 | }; |
1306 | 1298 | ||
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c index e772752f056d..7a60fd7be8ad 100644 --- a/drivers/message/i2o/pci.c +++ b/drivers/message/i2o/pci.c | |||
@@ -30,53 +30,18 @@ | |||
30 | #include <linux/pci.h> | 30 | #include <linux/pci.h> |
31 | #include <linux/interrupt.h> | 31 | #include <linux/interrupt.h> |
32 | #include <linux/i2o.h> | 32 | #include <linux/i2o.h> |
33 | 33 | #include "core.h" | |
34 | #ifdef CONFIG_MTRR | ||
35 | #include <asm/mtrr.h> | ||
36 | #endif // CONFIG_MTRR | ||
37 | |||
38 | /* Module internal functions from other sources */ | ||
39 | extern struct i2o_controller *i2o_iop_alloc(void); | ||
40 | extern void i2o_iop_free(struct i2o_controller *); | ||
41 | |||
42 | extern int i2o_iop_add(struct i2o_controller *); | ||
43 | extern void i2o_iop_remove(struct i2o_controller *); | ||
44 | |||
45 | extern int i2o_driver_dispatch(struct i2o_controller *, u32, | ||
46 | struct i2o_message *); | ||
47 | 34 | ||
48 | /* PCI device id table for all I2O controllers */ | 35 | /* PCI device id table for all I2O controllers */ |
49 | static struct pci_device_id __devinitdata i2o_pci_ids[] = { | 36 | static struct pci_device_id __devinitdata i2o_pci_ids[] = { |
50 | {PCI_DEVICE_CLASS(PCI_CLASS_INTELLIGENT_I2O << 8, 0xffff00)}, | 37 | {PCI_DEVICE_CLASS(PCI_CLASS_INTELLIGENT_I2O << 8, 0xffff00)}, |
51 | {PCI_DEVICE(PCI_VENDOR_ID_DPT, 0xa511)}, | 38 | {PCI_DEVICE(PCI_VENDOR_ID_DPT, 0xa511)}, |
39 | {.vendor = PCI_VENDOR_ID_INTEL,.device = 0x1962, | ||
40 | .subvendor = PCI_VENDOR_ID_PROMISE,.subdevice = PCI_ANY_ID}, | ||
52 | {0} | 41 | {0} |
53 | }; | 42 | }; |
54 | 43 | ||
55 | /** | 44 | /** |
56 | * i2o_dma_realloc - Realloc DMA memory | ||
57 | * @dev: struct device pointer to the PCI device of the I2O controller | ||
58 | * @addr: pointer to a i2o_dma struct DMA buffer | ||
59 | * @len: new length of memory | ||
60 | * @gfp_mask: GFP mask | ||
61 | * | ||
62 | * If there was something allocated in the addr, free it first. If len > 0 | ||
63 | * than try to allocate it and write the addresses back to the addr | ||
64 | * structure. If len == 0 set the virtual address to NULL. | ||
65 | * | ||
66 | * Returns the 0 on success or negative error code on failure. | ||
67 | */ | ||
68 | int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, size_t len, | ||
69 | unsigned int gfp_mask) | ||
70 | { | ||
71 | i2o_dma_free(dev, addr); | ||
72 | |||
73 | if (len) | ||
74 | return i2o_dma_alloc(dev, addr, len, gfp_mask); | ||
75 | |||
76 | return 0; | ||
77 | }; | ||
78 | |||
79 | /** | ||
80 | * i2o_pci_free - Frees the DMA memory for the I2O controller | 45 | * i2o_pci_free - Frees the DMA memory for the I2O controller |
81 | * @c: I2O controller to free | 46 | * @c: I2O controller to free |
82 | * | 47 | * |
@@ -91,19 +56,11 @@ static void i2o_pci_free(struct i2o_controller *c) | |||
91 | 56 | ||
92 | i2o_dma_free(dev, &c->out_queue); | 57 | i2o_dma_free(dev, &c->out_queue); |
93 | i2o_dma_free(dev, &c->status_block); | 58 | i2o_dma_free(dev, &c->status_block); |
94 | if (c->lct) | 59 | kfree(c->lct); |
95 | kfree(c->lct); | ||
96 | i2o_dma_free(dev, &c->dlct); | 60 | i2o_dma_free(dev, &c->dlct); |
97 | i2o_dma_free(dev, &c->hrt); | 61 | i2o_dma_free(dev, &c->hrt); |
98 | i2o_dma_free(dev, &c->status); | 62 | i2o_dma_free(dev, &c->status); |
99 | 63 | ||
100 | #ifdef CONFIG_MTRR | ||
101 | if (c->mtrr_reg0 >= 0) | ||
102 | mtrr_del(c->mtrr_reg0, 0, 0); | ||
103 | if (c->mtrr_reg1 >= 0) | ||
104 | mtrr_del(c->mtrr_reg1, 0, 0); | ||
105 | #endif | ||
106 | |||
107 | if (c->raptor && c->in_queue.virt) | 64 | if (c->raptor && c->in_queue.virt) |
108 | iounmap(c->in_queue.virt); | 65 | iounmap(c->in_queue.virt); |
109 | 66 | ||
@@ -178,14 +135,15 @@ static int __devinit i2o_pci_alloc(struct i2o_controller *c) | |||
178 | c->name, (unsigned long)c->base.phys, | 135 | c->name, (unsigned long)c->base.phys, |
179 | (unsigned long)c->base.len); | 136 | (unsigned long)c->base.len); |
180 | 137 | ||
181 | c->base.virt = ioremap(c->base.phys, c->base.len); | 138 | c->base.virt = ioremap_nocache(c->base.phys, c->base.len); |
182 | if (!c->base.virt) { | 139 | if (!c->base.virt) { |
183 | printk(KERN_ERR "%s: Unable to map controller.\n", c->name); | 140 | printk(KERN_ERR "%s: Unable to map controller.\n", c->name); |
184 | return -ENOMEM; | 141 | return -ENOMEM; |
185 | } | 142 | } |
186 | 143 | ||
187 | if (c->raptor) { | 144 | if (c->raptor) { |
188 | c->in_queue.virt = ioremap(c->in_queue.phys, c->in_queue.len); | 145 | c->in_queue.virt = |
146 | ioremap_nocache(c->in_queue.phys, c->in_queue.len); | ||
189 | if (!c->in_queue.virt) { | 147 | if (!c->in_queue.virt) { |
190 | printk(KERN_ERR "%s: Unable to map controller.\n", | 148 | printk(KERN_ERR "%s: Unable to map controller.\n", |
191 | c->name); | 149 | c->name); |
@@ -195,43 +153,10 @@ static int __devinit i2o_pci_alloc(struct i2o_controller *c) | |||
195 | } else | 153 | } else |
196 | c->in_queue = c->base; | 154 | c->in_queue = c->base; |
197 | 155 | ||
198 | c->irq_mask = c->base.virt + 0x34; | 156 | c->irq_status = c->base.virt + I2O_IRQ_STATUS; |
199 | c->post_port = c->base.virt + 0x40; | 157 | c->irq_mask = c->base.virt + I2O_IRQ_MASK; |
200 | c->reply_port = c->base.virt + 0x44; | 158 | c->in_port = c->base.virt + I2O_IN_PORT; |
201 | 159 | c->out_port = c->base.virt + I2O_OUT_PORT; | |
202 | #ifdef CONFIG_MTRR | ||
203 | /* Enable Write Combining MTRR for IOP's memory region */ | ||
204 | c->mtrr_reg0 = mtrr_add(c->in_queue.phys, c->in_queue.len, | ||
205 | MTRR_TYPE_WRCOMB, 1); | ||
206 | c->mtrr_reg1 = -1; | ||
207 | |||
208 | if (c->mtrr_reg0 < 0) | ||
209 | printk(KERN_WARNING "%s: could not enable write combining " | ||
210 | "MTRR\n", c->name); | ||
211 | else | ||
212 | printk(KERN_INFO "%s: using write combining MTRR\n", c->name); | ||
213 | |||
214 | /* | ||
215 | * If it is an INTEL i960 I/O processor then set the first 64K to | ||
216 | * Uncacheable since the region contains the messaging unit which | ||
217 | * shouldn't be cached. | ||
218 | */ | ||
219 | if ((pdev->vendor == PCI_VENDOR_ID_INTEL || | ||
220 | pdev->vendor == PCI_VENDOR_ID_DPT) && !c->raptor) { | ||
221 | printk(KERN_INFO "%s: MTRR workaround for Intel i960 processor" | ||
222 | "\n", c->name); | ||
223 | c->mtrr_reg1 = mtrr_add(c->base.phys, 0x10000, | ||
224 | MTRR_TYPE_UNCACHABLE, 1); | ||
225 | |||
226 | if (c->mtrr_reg1 < 0) { | ||
227 | printk(KERN_WARNING "%s: Error in setting " | ||
228 | "MTRR_TYPE_UNCACHABLE\n", c->name); | ||
229 | mtrr_del(c->mtrr_reg0, c->in_queue.phys, | ||
230 | c->in_queue.len); | ||
231 | c->mtrr_reg0 = -1; | ||
232 | } | ||
233 | } | ||
234 | #endif | ||
235 | 160 | ||
236 | if (i2o_dma_alloc(dev, &c->status, 8, GFP_KERNEL)) { | 161 | if (i2o_dma_alloc(dev, &c->status, 8, GFP_KERNEL)) { |
237 | i2o_pci_free(c); | 162 | i2o_pci_free(c); |
@@ -254,7 +179,10 @@ static int __devinit i2o_pci_alloc(struct i2o_controller *c) | |||
254 | return -ENOMEM; | 179 | return -ENOMEM; |
255 | } | 180 | } |
256 | 181 | ||
257 | if (i2o_dma_alloc(dev, &c->out_queue, MSG_POOL_SIZE, GFP_KERNEL)) { | 182 | if (i2o_dma_alloc |
183 | (dev, &c->out_queue, | ||
184 | I2O_MAX_OUTBOUND_MSG_FRAMES * I2O_OUTBOUND_MSG_FRAME_SIZE * | ||
185 | sizeof(u32), GFP_KERNEL)) { | ||
258 | i2o_pci_free(c); | 186 | i2o_pci_free(c); |
259 | return -ENOMEM; | 187 | return -ENOMEM; |
260 | } | 188 | } |
@@ -276,51 +204,30 @@ static int __devinit i2o_pci_alloc(struct i2o_controller *c) | |||
276 | static irqreturn_t i2o_pci_interrupt(int irq, void *dev_id, struct pt_regs *r) | 204 | static irqreturn_t i2o_pci_interrupt(int irq, void *dev_id, struct pt_regs *r) |
277 | { | 205 | { |
278 | struct i2o_controller *c = dev_id; | 206 | struct i2o_controller *c = dev_id; |
279 | struct device *dev = &c->pdev->dev; | 207 | u32 m; |
280 | struct i2o_message *m; | 208 | irqreturn_t rc = IRQ_NONE; |
281 | u32 mv; | 209 | |
282 | 210 | while (readl(c->irq_status) & I2O_IRQ_OUTBOUND_POST) { | |
283 | /* | 211 | m = readl(c->out_port); |
284 | * Old 960 steppings had a bug in the I2O unit that caused | 212 | if (m == I2O_QUEUE_EMPTY) { |
285 | * the queue to appear empty when it wasn't. | 213 | /* |
286 | */ | 214 | * Old 960 steppings had a bug in the I2O unit that |
287 | mv = I2O_REPLY_READ32(c); | 215 | * caused the queue to appear empty when it wasn't. |
288 | if (mv == I2O_QUEUE_EMPTY) { | 216 | */ |
289 | mv = I2O_REPLY_READ32(c); | 217 | m = readl(c->out_port); |
290 | if (unlikely(mv == I2O_QUEUE_EMPTY)) { | 218 | if (unlikely(m == I2O_QUEUE_EMPTY)) |
291 | return IRQ_NONE; | 219 | break; |
292 | } else | 220 | } |
293 | pr_debug("%s: 960 bug detected\n", c->name); | ||
294 | } | ||
295 | |||
296 | while (mv != I2O_QUEUE_EMPTY) { | ||
297 | /* | ||
298 | * Map the message from the page frame map to kernel virtual. | ||
299 | * Because bus_to_virt is deprecated, we have calculate the | ||
300 | * location by ourself! | ||
301 | */ | ||
302 | m = i2o_msg_out_to_virt(c, mv); | ||
303 | |||
304 | /* | ||
305 | * Ensure this message is seen coherently but cachably by | ||
306 | * the processor | ||
307 | */ | ||
308 | dma_sync_single_for_cpu(dev, mv, MSG_FRAME_SIZE * 4, | ||
309 | PCI_DMA_FROMDEVICE); | ||
310 | 221 | ||
311 | /* dispatch it */ | 222 | /* dispatch it */ |
312 | if (i2o_driver_dispatch(c, mv, m)) | 223 | if (i2o_driver_dispatch(c, m)) |
313 | /* flush it if result != 0 */ | 224 | /* flush it if result != 0 */ |
314 | i2o_flush_reply(c, mv); | 225 | i2o_flush_reply(c, m); |
315 | 226 | ||
316 | /* | 227 | rc = IRQ_HANDLED; |
317 | * That 960 bug again... | ||
318 | */ | ||
319 | mv = I2O_REPLY_READ32(c); | ||
320 | if (mv == I2O_QUEUE_EMPTY) | ||
321 | mv = I2O_REPLY_READ32(c); | ||
322 | } | 228 | } |
323 | return IRQ_HANDLED; | 229 | |
230 | return rc; | ||
324 | } | 231 | } |
325 | 232 | ||
326 | /** | 233 | /** |
@@ -336,7 +243,7 @@ static int i2o_pci_irq_enable(struct i2o_controller *c) | |||
336 | struct pci_dev *pdev = c->pdev; | 243 | struct pci_dev *pdev = c->pdev; |
337 | int rc; | 244 | int rc; |
338 | 245 | ||
339 | I2O_IRQ_WRITE32(c, 0xffffffff); | 246 | writel(0xffffffff, c->irq_mask); |
340 | 247 | ||
341 | if (pdev->irq) { | 248 | if (pdev->irq) { |
342 | rc = request_irq(pdev->irq, i2o_pci_interrupt, SA_SHIRQ, | 249 | rc = request_irq(pdev->irq, i2o_pci_interrupt, SA_SHIRQ, |
@@ -348,7 +255,7 @@ static int i2o_pci_irq_enable(struct i2o_controller *c) | |||
348 | } | 255 | } |
349 | } | 256 | } |
350 | 257 | ||
351 | I2O_IRQ_WRITE32(c, 0x00000000); | 258 | writel(0x00000000, c->irq_mask); |
352 | 259 | ||
353 | printk(KERN_INFO "%s: Installed at IRQ %d\n", c->name, pdev->irq); | 260 | printk(KERN_INFO "%s: Installed at IRQ %d\n", c->name, pdev->irq); |
354 | 261 | ||
@@ -363,7 +270,7 @@ static int i2o_pci_irq_enable(struct i2o_controller *c) | |||
363 | */ | 270 | */ |
364 | static void i2o_pci_irq_disable(struct i2o_controller *c) | 271 | static void i2o_pci_irq_disable(struct i2o_controller *c) |
365 | { | 272 | { |
366 | I2O_IRQ_WRITE32(c, 0xffffffff); | 273 | writel(0xffffffff, c->irq_mask); |
367 | 274 | ||
368 | if (c->pdev->irq > 0) | 275 | if (c->pdev->irq > 0) |
369 | free_irq(c->pdev->irq, c); | 276 | free_irq(c->pdev->irq, c); |
@@ -385,28 +292,25 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev, | |||
385 | { | 292 | { |
386 | struct i2o_controller *c; | 293 | struct i2o_controller *c; |
387 | int rc; | 294 | int rc; |
295 | struct pci_dev *i960 = NULL; | ||
388 | 296 | ||
389 | printk(KERN_INFO "i2o: Checking for PCI I2O controllers...\n"); | 297 | printk(KERN_INFO "i2o: Checking for PCI I2O controllers...\n"); |
390 | 298 | ||
391 | if ((pdev->class & 0xff) > 1) { | 299 | if ((pdev->class & 0xff) > 1) { |
392 | printk(KERN_WARNING "i2o: I2O controller found but does not " | 300 | printk(KERN_WARNING "i2o: %s does not support I2O 1.5 " |
393 | "support I2O 1.5 (skipping).\n"); | 301 | "(skipping).\n", pci_name(pdev)); |
394 | return -ENODEV; | 302 | return -ENODEV; |
395 | } | 303 | } |
396 | 304 | ||
397 | if ((rc = pci_enable_device(pdev))) { | 305 | if ((rc = pci_enable_device(pdev))) { |
398 | printk(KERN_WARNING "i2o: I2O controller found but could not be" | 306 | printk(KERN_WARNING "i2o: couldn't enable device %s\n", |
399 | " enabled.\n"); | 307 | pci_name(pdev)); |
400 | return rc; | 308 | return rc; |
401 | } | 309 | } |
402 | 310 | ||
403 | printk(KERN_INFO "i2o: I2O controller found on bus %d at %d.\n", | ||
404 | pdev->bus->number, pdev->devfn); | ||
405 | |||
406 | if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { | 311 | if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) { |
407 | printk(KERN_WARNING "i2o: I2O controller on bus %d at %d: No " | 312 | printk(KERN_WARNING "i2o: no suitable DMA found for %s\n", |
408 | "suitable DMA available!\n", pdev->bus->number, | 313 | pci_name(pdev)); |
409 | pdev->devfn); | ||
410 | rc = -ENODEV; | 314 | rc = -ENODEV; |
411 | goto disable; | 315 | goto disable; |
412 | } | 316 | } |
@@ -415,14 +319,16 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev, | |||
415 | 319 | ||
416 | c = i2o_iop_alloc(); | 320 | c = i2o_iop_alloc(); |
417 | if (IS_ERR(c)) { | 321 | if (IS_ERR(c)) { |
418 | printk(KERN_ERR "i2o: memory for I2O controller could not be " | 322 | printk(KERN_ERR "i2o: couldn't allocate memory for %s\n", |
419 | "allocated\n"); | 323 | pci_name(pdev)); |
420 | rc = PTR_ERR(c); | 324 | rc = PTR_ERR(c); |
421 | goto disable; | 325 | goto disable; |
422 | } | 326 | } else |
327 | printk(KERN_INFO "%s: controller found (%s)\n", c->name, | ||
328 | pci_name(pdev)); | ||
423 | 329 | ||
424 | c->pdev = pdev; | 330 | c->pdev = pdev; |
425 | c->device = pdev->dev; | 331 | c->device.parent = get_device(&pdev->dev); |
426 | 332 | ||
427 | /* Cards that fall apart if you hit them with large I/O loads... */ | 333 | /* Cards that fall apart if you hit them with large I/O loads... */ |
428 | if (pdev->vendor == PCI_VENDOR_ID_NCR && pdev->device == 0x0630) { | 334 | if (pdev->vendor == PCI_VENDOR_ID_NCR && pdev->device == 0x0630) { |
@@ -432,16 +338,48 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev, | |||
432 | } | 338 | } |
433 | 339 | ||
434 | if (pdev->subsystem_vendor == PCI_VENDOR_ID_PROMISE) { | 340 | if (pdev->subsystem_vendor == PCI_VENDOR_ID_PROMISE) { |
341 | /* | ||
342 | * Expose the ship behind i960 for initialization, or it will | ||
343 | * failed | ||
344 | */ | ||
345 | i960 = | ||
346 | pci_find_slot(c->pdev->bus->number, | ||
347 | PCI_DEVFN(PCI_SLOT(c->pdev->devfn), 0)); | ||
348 | |||
349 | if (i960) | ||
350 | pci_write_config_word(i960, 0x42, 0); | ||
351 | |||
435 | c->promise = 1; | 352 | c->promise = 1; |
436 | printk(KERN_INFO "%s: Promise workarounds activated.\n", | 353 | c->limit_sectors = 1; |
437 | c->name); | ||
438 | } | 354 | } |
439 | 355 | ||
356 | if (pdev->subsystem_vendor == PCI_VENDOR_ID_DPT) | ||
357 | c->adaptec = 1; | ||
358 | |||
440 | /* Cards that go bananas if you quiesce them before you reset them. */ | 359 | /* Cards that go bananas if you quiesce them before you reset them. */ |
441 | if (pdev->vendor == PCI_VENDOR_ID_DPT) { | 360 | if (pdev->vendor == PCI_VENDOR_ID_DPT) { |
442 | c->no_quiesce = 1; | 361 | c->no_quiesce = 1; |
443 | if (pdev->device == 0xa511) | 362 | if (pdev->device == 0xa511) |
444 | c->raptor = 1; | 363 | c->raptor = 1; |
364 | |||
365 | if (pdev->subsystem_device == 0xc05a) { | ||
366 | c->limit_sectors = 1; | ||
367 | printk(KERN_INFO | ||
368 | "%s: limit sectors per request to %d\n", c->name, | ||
369 | I2O_MAX_SECTORS_LIMITED); | ||
370 | } | ||
371 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | ||
372 | if (sizeof(dma_addr_t) > 4) { | ||
373 | if (pci_set_dma_mask(pdev, DMA_64BIT_MASK)) | ||
374 | printk(KERN_INFO "%s: 64-bit DMA unavailable\n", | ||
375 | c->name); | ||
376 | else { | ||
377 | c->pae_support = 1; | ||
378 | printk(KERN_INFO "%s: using 64-bit DMA\n", | ||
379 | c->name); | ||
380 | } | ||
381 | } | ||
382 | #endif | ||
445 | } | 383 | } |
446 | 384 | ||
447 | if ((rc = i2o_pci_alloc(c))) { | 385 | if ((rc = i2o_pci_alloc(c))) { |
@@ -459,6 +397,11 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev, | |||
459 | if ((rc = i2o_iop_add(c))) | 397 | if ((rc = i2o_iop_add(c))) |
460 | goto uninstall; | 398 | goto uninstall; |
461 | 399 | ||
400 | get_device(&c->device); | ||
401 | |||
402 | if (i960) | ||
403 | pci_write_config_word(i960, 0x42, 0x03ff); | ||
404 | |||
462 | return 0; | 405 | return 0; |
463 | 406 | ||
464 | uninstall: | 407 | uninstall: |
@@ -469,6 +412,7 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev, | |||
469 | 412 | ||
470 | free_controller: | 413 | free_controller: |
471 | i2o_iop_free(c); | 414 | i2o_iop_free(c); |
415 | put_device(c->device.parent); | ||
472 | 416 | ||
473 | disable: | 417 | disable: |
474 | pci_disable_device(pdev); | 418 | pci_disable_device(pdev); |
@@ -492,15 +436,17 @@ static void __devexit i2o_pci_remove(struct pci_dev *pdev) | |||
492 | i2o_pci_irq_disable(c); | 436 | i2o_pci_irq_disable(c); |
493 | i2o_pci_free(c); | 437 | i2o_pci_free(c); |
494 | 438 | ||
439 | pci_disable_device(pdev); | ||
440 | |||
495 | printk(KERN_INFO "%s: Controller removed.\n", c->name); | 441 | printk(KERN_INFO "%s: Controller removed.\n", c->name); |
496 | 442 | ||
497 | i2o_iop_free(c); | 443 | put_device(c->device.parent); |
498 | pci_disable_device(pdev); | 444 | put_device(&c->device); |
499 | }; | 445 | }; |
500 | 446 | ||
501 | /* PCI driver for I2O controller */ | 447 | /* PCI driver for I2O controller */ |
502 | static struct pci_driver i2o_pci_driver = { | 448 | static struct pci_driver i2o_pci_driver = { |
503 | .name = "I2O controller", | 449 | .name = "PCI_I2O", |
504 | .id_table = i2o_pci_ids, | 450 | .id_table = i2o_pci_ids, |
505 | .probe = i2o_pci_probe, | 451 | .probe = i2o_pci_probe, |
506 | .remove = __devexit_p(i2o_pci_remove), | 452 | .remove = __devexit_p(i2o_pci_remove), |
@@ -523,6 +469,4 @@ void __exit i2o_pci_exit(void) | |||
523 | { | 469 | { |
524 | pci_unregister_driver(&i2o_pci_driver); | 470 | pci_unregister_driver(&i2o_pci_driver); |
525 | }; | 471 | }; |
526 | |||
527 | EXPORT_SYMBOL(i2o_dma_realloc); | ||
528 | MODULE_DEVICE_TABLE(pci, i2o_pci_ids); | 472 | MODULE_DEVICE_TABLE(pci, i2o_pci_ids); |