aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/dispatcher/dsmethod.c12
-rw-r--r--drivers/acpi/dispatcher/dsopcode.c3
-rw-r--r--drivers/acpi/dispatcher/dsutils.c7
-rw-r--r--drivers/acpi/dispatcher/dswstate.c9
-rw-r--r--drivers/acpi/ec.c39
-rw-r--r--drivers/acpi/events/evgpe.c5
-rw-r--r--drivers/acpi/events/evgpeblk.c3
-rw-r--r--drivers/acpi/events/evmisc.c20
-rw-r--r--drivers/acpi/events/evregion.c15
-rw-r--r--drivers/acpi/events/evrgnini.c3
-rw-r--r--drivers/acpi/events/evxface.c7
-rw-r--r--drivers/acpi/events/evxfevnt.c2
-rw-r--r--drivers/acpi/executer/exconvrt.c5
-rw-r--r--drivers/acpi/executer/excreate.c6
-rw-r--r--drivers/acpi/executer/exdump.c17
-rw-r--r--drivers/acpi/executer/exmutex.c37
-rw-r--r--drivers/acpi/executer/exnames.c3
-rw-r--r--drivers/acpi/executer/exprep.c2
-rw-r--r--drivers/acpi/executer/exresop.c3
-rw-r--r--drivers/acpi/executer/exsystem.c30
-rw-r--r--drivers/acpi/executer/exutils.c104
-rw-r--r--drivers/acpi/hardware/hwsleep.c1
-rw-r--r--drivers/acpi/namespace/nseval.c13
-rw-r--r--drivers/acpi/namespace/nsinit.c7
-rw-r--r--drivers/acpi/namespace/nswalk.c6
-rw-r--r--drivers/acpi/namespace/nsxfeval.c17
-rw-r--r--drivers/acpi/osl.c45
-rw-r--r--drivers/acpi/parser/psopcode.c618
-rw-r--r--drivers/acpi/resources/rscalc.c3
-rw-r--r--drivers/acpi/resources/rscreate.c13
-rw-r--r--drivers/acpi/resources/rsdump.c8
-rw-r--r--drivers/acpi/resources/rsinfo.c2
-rw-r--r--drivers/acpi/resources/rslist.c7
-rw-r--r--drivers/acpi/resources/rsmisc.c4
-rw-r--r--drivers/acpi/resources/rsutils.c6
-rw-r--r--drivers/acpi/resources/rsxface.c3
-rw-r--r--drivers/acpi/sleep/main.c3
-rw-r--r--drivers/acpi/sleep/proc.c11
-rw-r--r--drivers/acpi/tables/tbfadt.c6
-rw-r--r--drivers/acpi/tables/tbxface.c16
-rw-r--r--drivers/acpi/thermal.c104
-rw-r--r--drivers/acpi/utilities/utalloc.c1
-rw-r--r--drivers/acpi/utilities/utcache.c3
-rw-r--r--drivers/acpi/utilities/utcopy.c4
-rw-r--r--drivers/acpi/utilities/utdebug.c4
-rw-r--r--drivers/acpi/utilities/utdelete.c1
-rw-r--r--drivers/acpi/utilities/utglobal.c6
-rw-r--r--drivers/acpi/utilities/utmisc.c6
-rw-r--r--drivers/acpi/utilities/utmutex.c8
-rw-r--r--drivers/acpi/utilities/utresrc.c1
-rw-r--r--drivers/acpi/utilities/utxface.c2
-rw-r--r--drivers/ata/Kconfig1
-rw-r--r--drivers/auxdisplay/Kconfig1
-rw-r--r--drivers/block/Kconfig4
-rw-r--r--drivers/char/Kconfig11
-rw-r--r--drivers/char/ipmi/Kconfig2
-rw-r--r--drivers/char/tpm/Kconfig1
-rw-r--r--drivers/crypto/Kconfig22
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/edac/Kconfig1
-rw-r--r--drivers/firewire/Kconfig61
-rw-r--r--drivers/firewire/Makefile10
-rw-r--r--drivers/firewire/fw-card.c560
-rw-r--r--drivers/firewire/fw-cdev.c961
-rw-r--r--drivers/firewire/fw-device.c813
-rw-r--r--drivers/firewire/fw-device.h146
-rw-r--r--drivers/firewire/fw-iso.c163
-rw-r--r--drivers/firewire/fw-ohci.c1943
-rw-r--r--drivers/firewire/fw-ohci.h153
-rw-r--r--drivers/firewire/fw-sbp2.c1147
-rw-r--r--drivers/firewire/fw-topology.c537
-rw-r--r--drivers/firewire/fw-topology.h92
-rw-r--r--drivers/firewire/fw-transaction.c910
-rw-r--r--drivers/firewire/fw-transaction.h458
-rw-r--r--drivers/hwmon/Kconfig1
-rw-r--r--drivers/hwmon/ams/ams-input.c2
-rw-r--r--drivers/hwmon/applesmc.c2
-rw-r--r--drivers/hwmon/hdaps.c2
-rw-r--r--drivers/i2c/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-at91.c7
-rw-r--r--drivers/i2c/busses/i2c-pxa.c2
-rw-r--r--drivers/ide/Kconfig1
-rw-r--r--drivers/ieee1394/Kconfig3
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/input/Kconfig1
-rw-r--r--drivers/isdn/Kconfig1
-rw-r--r--drivers/kvm/Kconfig1
-rw-r--r--drivers/leds/Kconfig1
-rw-r--r--drivers/md/raid1.c33
-rw-r--r--drivers/media/Kconfig1
-rw-r--r--drivers/message/fusion/Kconfig1
-rw-r--r--drivers/message/i2o/Kconfig1
-rw-r--r--drivers/mfd/Kconfig1
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/misc/asus-laptop.c66
-rw-r--r--drivers/misc/msi-laptop.c12
-rw-r--r--drivers/misc/sony-laptop.c8
-rw-r--r--drivers/mmc/Kconfig1
-rw-r--r--drivers/mtd/Kconfig1
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/parport/Kconfig1
-rw-r--r--drivers/pnp/Kconfig1
-rw-r--r--drivers/rtc/Kconfig1
-rw-r--r--drivers/s390/block/Kconfig11
-rw-r--r--drivers/s390/block/dasd.c8
-rw-r--r--drivers/s390/block/dasd_diag.c10
-rw-r--r--drivers/s390/block/dasd_eckd.c6
-rw-r--r--drivers/s390/block/dasd_ioctl.c4
-rw-r--r--drivers/s390/char/Kconfig (renamed from drivers/s390/Kconfig)111
-rw-r--r--drivers/s390/char/monreader.c14
-rw-r--r--drivers/s390/char/raw3270.c5
-rw-r--r--drivers/s390/char/sclp.h3
-rw-r--r--drivers/s390/char/sclp_sdias.c8
-rw-r--r--drivers/s390/char/zcore.c9
-rw-r--r--drivers/s390/cio/css.c3
-rw-r--r--drivers/s390/cio/css.h2
-rw-r--r--drivers/s390/cio/device.c4
-rw-r--r--drivers/s390/cio/device_ops.c11
-rw-r--r--drivers/s390/cio/qdio.c1
-rw-r--r--drivers/s390/net/Kconfig8
-rw-r--r--drivers/s390/net/qeth_mpc.c4
-rw-r--r--drivers/s390/scsi/zfcp_aux.c8
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c2
-rw-r--r--drivers/serial/Kconfig1
-rw-r--r--drivers/serial/sunzilog.c138
-rw-r--r--drivers/serial/sunzilog.h19
-rw-r--r--drivers/spi/Kconfig1
-rw-r--r--drivers/telephony/Kconfig1
-rw-r--r--drivers/usb/Kconfig1
-rw-r--r--drivers/video/Kconfig1
-rw-r--r--drivers/w1/Kconfig1
133 files changed, 8922 insertions, 891 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 26ca9031ea49..adad2f3d438a 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_FC4) += fc4/
36obj-$(CONFIG_SCSI) += scsi/ 36obj-$(CONFIG_SCSI) += scsi/
37obj-$(CONFIG_ATA) += ata/ 37obj-$(CONFIG_ATA) += ata/
38obj-$(CONFIG_FUSION) += message/ 38obj-$(CONFIG_FUSION) += message/
39obj-$(CONFIG_FIREWIRE) += firewire/
39obj-$(CONFIG_IEEE1394) += ieee1394/ 40obj-$(CONFIG_IEEE1394) += ieee1394/
40obj-y += cdrom/ 41obj-y += cdrom/
41obj-y += auxdisplay/ 42obj-y += auxdisplay/
diff --git a/drivers/acpi/dispatcher/dsmethod.c b/drivers/acpi/dispatcher/dsmethod.c
index 1683e5c5b94c..1cbe61905824 100644
--- a/drivers/acpi/dispatcher/dsmethod.c
+++ b/drivers/acpi/dispatcher/dsmethod.c
@@ -231,8 +231,10 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
231 * Obtain the method mutex if necessary. Do not acquire mutex for a 231 * Obtain the method mutex if necessary. Do not acquire mutex for a
232 * recursive call. 232 * recursive call.
233 */ 233 */
234 if (acpi_os_get_thread_id() != 234 if (!walk_state ||
235 obj_desc->method.mutex->mutex.owner_thread_id) { 235 !obj_desc->method.mutex->mutex.owner_thread ||
236 (walk_state->thread !=
237 obj_desc->method.mutex->mutex.owner_thread)) {
236 /* 238 /*
237 * Acquire the method mutex. This releases the interpreter if we 239 * Acquire the method mutex. This releases the interpreter if we
238 * block (and reacquires it before it returns) 240 * block (and reacquires it before it returns)
@@ -246,14 +248,14 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
246 } 248 }
247 249
248 /* Update the mutex and walk info and save the original sync_level */ 250 /* Update the mutex and walk info and save the original sync_level */
249 obj_desc->method.mutex->mutex.owner_thread_id =
250 acpi_os_get_thread_id();
251 251
252 if (walk_state) { 252 if (walk_state) {
253 obj_desc->method.mutex->mutex. 253 obj_desc->method.mutex->mutex.
254 original_sync_level = 254 original_sync_level =
255 walk_state->thread->current_sync_level; 255 walk_state->thread->current_sync_level;
256 256
257 obj_desc->method.mutex->mutex.owner_thread =
258 walk_state->thread;
257 walk_state->thread->current_sync_level = 259 walk_state->thread->current_sync_level =
258 obj_desc->method.sync_level; 260 obj_desc->method.sync_level;
259 } else { 261 } else {
@@ -567,7 +569,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
567 569
568 acpi_os_release_mutex(method_desc->method.mutex->mutex. 570 acpi_os_release_mutex(method_desc->method.mutex->mutex.
569 os_mutex); 571 os_mutex);
570 method_desc->method.mutex->mutex.owner_thread_id = ACPI_MUTEX_NOT_ACQUIRED; 572 method_desc->method.mutex->mutex.owner_thread = NULL;
571 } 573 }
572 } 574 }
573 575
diff --git a/drivers/acpi/dispatcher/dsopcode.c b/drivers/acpi/dispatcher/dsopcode.c
index 6c6104a7a247..fc9da4879cbf 100644
--- a/drivers/acpi/dispatcher/dsopcode.c
+++ b/drivers/acpi/dispatcher/dsopcode.c
@@ -866,8 +866,7 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state,
866 ((op->common.parent->common.aml_opcode != AML_PACKAGE_OP) && 866 ((op->common.parent->common.aml_opcode != AML_PACKAGE_OP) &&
867 (op->common.parent->common.aml_opcode != 867 (op->common.parent->common.aml_opcode !=
868 AML_VAR_PACKAGE_OP) 868 AML_VAR_PACKAGE_OP)
869 && (op->common.parent->common.aml_opcode != 869 && (op->common.parent->common.aml_opcode != AML_NAME_OP))) {
870 AML_NAME_OP))) {
871 walk_state->result_obj = obj_desc; 870 walk_state->result_obj = obj_desc;
872 } 871 }
873 } 872 }
diff --git a/drivers/acpi/dispatcher/dsutils.c b/drivers/acpi/dispatcher/dsutils.c
index e4073e05a75c..71503c036f7c 100644
--- a/drivers/acpi/dispatcher/dsutils.c
+++ b/drivers/acpi/dispatcher/dsutils.c
@@ -556,10 +556,9 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
556 * indicate this to the interpreter, set the 556 * indicate this to the interpreter, set the
557 * object to the root 557 * object to the root
558 */ 558 */
559 obj_desc = 559 obj_desc = ACPI_CAST_PTR(union
560 ACPI_CAST_PTR(union 560 acpi_operand_object,
561 acpi_operand_object, 561 acpi_gbl_root_node);
562 acpi_gbl_root_node);
563 status = AE_OK; 562 status = AE_OK;
564 } else { 563 } else {
565 /* 564 /*
diff --git a/drivers/acpi/dispatcher/dswstate.c b/drivers/acpi/dispatcher/dswstate.c
index 16c8e38b51ef..5afcdd9c7449 100644
--- a/drivers/acpi/dispatcher/dswstate.c
+++ b/drivers/acpi/dispatcher/dswstate.c
@@ -630,12 +630,9 @@ struct acpi_walk_state *acpi_ds_pop_walk_state(struct acpi_thread_state *thread)
630 * 630 *
631 ******************************************************************************/ 631 ******************************************************************************/
632 632
633struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, 633struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, union acpi_parse_object
634 union acpi_parse_object 634 *origin, union acpi_operand_object
635 *origin, 635 *method_desc, struct acpi_thread_state
636 union acpi_operand_object
637 *method_desc,
638 struct acpi_thread_state
639 *thread) 636 *thread)
640{ 637{
641 struct acpi_walk_state *walk_state; 638 struct acpi_walk_state *walk_state;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index e08cf98f504f..82f496c07675 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -147,9 +147,10 @@ static inline int acpi_ec_check_status(struct acpi_ec *ec, enum ec_event event,
147 return 0; 147 return 0;
148} 148}
149 149
150static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, unsigned count) 150static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event,
151 unsigned count, int force_poll)
151{ 152{
152 if (acpi_ec_mode == EC_POLL) { 153 if (unlikely(force_poll) || acpi_ec_mode == EC_POLL) {
153 unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY); 154 unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
154 while (time_before(jiffies, delay)) { 155 while (time_before(jiffies, delay)) {
155 if (acpi_ec_check_status(ec, event, 0)) 156 if (acpi_ec_check_status(ec, event, 0))
@@ -173,14 +174,15 @@ static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, unsigned count)
173 174
174static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command, 175static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
175 const u8 * wdata, unsigned wdata_len, 176 const u8 * wdata, unsigned wdata_len,
176 u8 * rdata, unsigned rdata_len) 177 u8 * rdata, unsigned rdata_len,
178 int force_poll)
177{ 179{
178 int result = 0; 180 int result = 0;
179 unsigned count = atomic_read(&ec->event_count); 181 unsigned count = atomic_read(&ec->event_count);
180 acpi_ec_write_cmd(ec, command); 182 acpi_ec_write_cmd(ec, command);
181 183
182 for (; wdata_len > 0; --wdata_len) { 184 for (; wdata_len > 0; --wdata_len) {
183 result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count); 185 result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count, force_poll);
184 if (result) { 186 if (result) {
185 printk(KERN_ERR PREFIX 187 printk(KERN_ERR PREFIX
186 "write_cmd timeout, command = %d\n", command); 188 "write_cmd timeout, command = %d\n", command);
@@ -191,7 +193,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
191 } 193 }
192 194
193 if (!rdata_len) { 195 if (!rdata_len) {
194 result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count); 196 result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count, force_poll);
195 if (result) { 197 if (result) {
196 printk(KERN_ERR PREFIX 198 printk(KERN_ERR PREFIX
197 "finish-write timeout, command = %d\n", command); 199 "finish-write timeout, command = %d\n", command);
@@ -202,7 +204,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
202 } 204 }
203 205
204 for (; rdata_len > 0; --rdata_len) { 206 for (; rdata_len > 0; --rdata_len) {
205 result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, count); 207 result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, count, force_poll);
206 if (result) { 208 if (result) {
207 printk(KERN_ERR PREFIX "read timeout, command = %d\n", 209 printk(KERN_ERR PREFIX "read timeout, command = %d\n",
208 command); 210 command);
@@ -217,7 +219,8 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
217 219
218static int acpi_ec_transaction(struct acpi_ec *ec, u8 command, 220static int acpi_ec_transaction(struct acpi_ec *ec, u8 command,
219 const u8 * wdata, unsigned wdata_len, 221 const u8 * wdata, unsigned wdata_len,
220 u8 * rdata, unsigned rdata_len) 222 u8 * rdata, unsigned rdata_len,
223 int force_poll)
221{ 224{
222 int status; 225 int status;
223 u32 glk; 226 u32 glk;
@@ -240,7 +243,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, u8 command,
240 /* Make sure GPE is enabled before doing transaction */ 243 /* Make sure GPE is enabled before doing transaction */
241 acpi_enable_gpe(NULL, ec->gpe, ACPI_NOT_ISR); 244 acpi_enable_gpe(NULL, ec->gpe, ACPI_NOT_ISR);
242 245
243 status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, 0); 246 status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, 0, 0);
244 if (status) { 247 if (status) {
245 printk(KERN_DEBUG PREFIX 248 printk(KERN_DEBUG PREFIX
246 "input buffer is not empty, aborting transaction\n"); 249 "input buffer is not empty, aborting transaction\n");
@@ -249,7 +252,8 @@ static int acpi_ec_transaction(struct acpi_ec *ec, u8 command,
249 252
250 status = acpi_ec_transaction_unlocked(ec, command, 253 status = acpi_ec_transaction_unlocked(ec, command,
251 wdata, wdata_len, 254 wdata, wdata_len,
252 rdata, rdata_len); 255 rdata, rdata_len,
256 force_poll);
253 257
254 end: 258 end:
255 259
@@ -267,12 +271,12 @@ static int acpi_ec_transaction(struct acpi_ec *ec, u8 command,
267int acpi_ec_burst_enable(struct acpi_ec *ec) 271int acpi_ec_burst_enable(struct acpi_ec *ec)
268{ 272{
269 u8 d; 273 u8 d;
270 return acpi_ec_transaction(ec, ACPI_EC_BURST_ENABLE, NULL, 0, &d, 1); 274 return acpi_ec_transaction(ec, ACPI_EC_BURST_ENABLE, NULL, 0, &d, 1, 0);
271} 275}
272 276
273int acpi_ec_burst_disable(struct acpi_ec *ec) 277int acpi_ec_burst_disable(struct acpi_ec *ec)
274{ 278{
275 return acpi_ec_transaction(ec, ACPI_EC_BURST_DISABLE, NULL, 0, NULL, 0); 279 return acpi_ec_transaction(ec, ACPI_EC_BURST_DISABLE, NULL, 0, NULL, 0, 0);
276} 280}
277 281
278static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data) 282static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data)
@@ -281,7 +285,7 @@ static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data)
281 u8 d; 285 u8 d;
282 286
283 result = acpi_ec_transaction(ec, ACPI_EC_COMMAND_READ, 287 result = acpi_ec_transaction(ec, ACPI_EC_COMMAND_READ,
284 &address, 1, &d, 1); 288 &address, 1, &d, 1, 0);
285 *data = d; 289 *data = d;
286 return result; 290 return result;
287} 291}
@@ -290,7 +294,7 @@ static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
290{ 294{
291 u8 wdata[2] = { address, data }; 295 u8 wdata[2] = { address, data };
292 return acpi_ec_transaction(ec, ACPI_EC_COMMAND_WRITE, 296 return acpi_ec_transaction(ec, ACPI_EC_COMMAND_WRITE,
293 wdata, 2, NULL, 0); 297 wdata, 2, NULL, 0, 0);
294} 298}
295 299
296/* 300/*
@@ -349,13 +353,15 @@ EXPORT_SYMBOL(ec_write);
349 353
350int ec_transaction(u8 command, 354int ec_transaction(u8 command,
351 const u8 * wdata, unsigned wdata_len, 355 const u8 * wdata, unsigned wdata_len,
352 u8 * rdata, unsigned rdata_len) 356 u8 * rdata, unsigned rdata_len,
357 int force_poll)
353{ 358{
354 if (!first_ec) 359 if (!first_ec)
355 return -ENODEV; 360 return -ENODEV;
356 361
357 return acpi_ec_transaction(first_ec, command, wdata, 362 return acpi_ec_transaction(first_ec, command, wdata,
358 wdata_len, rdata, rdata_len); 363 wdata_len, rdata, rdata_len,
364 force_poll);
359} 365}
360 366
361EXPORT_SYMBOL(ec_transaction); 367EXPORT_SYMBOL(ec_transaction);
@@ -374,7 +380,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 * data)
374 * bit to be cleared (and thus clearing the interrupt source). 380 * bit to be cleared (and thus clearing the interrupt source).
375 */ 381 */
376 382
377 result = acpi_ec_transaction(ec, ACPI_EC_COMMAND_QUERY, NULL, 0, &d, 1); 383 result = acpi_ec_transaction(ec, ACPI_EC_COMMAND_QUERY, NULL, 0, &d, 1, 0);
378 if (result) 384 if (result)
379 return result; 385 return result;
380 386
@@ -410,6 +416,7 @@ static u32 acpi_ec_gpe_handler(void *data)
410 acpi_status status = AE_OK; 416 acpi_status status = AE_OK;
411 u8 value; 417 u8 value;
412 struct acpi_ec *ec = data; 418 struct acpi_ec *ec = data;
419
413 atomic_inc(&ec->event_count); 420 atomic_inc(&ec->event_count);
414 421
415 if (acpi_ec_mode == EC_INTR) { 422 if (acpi_ec_mode == EC_INTR) {
diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c
index 635ba449ebc2..e22f4a973c0f 100644
--- a/drivers/acpi/events/evgpe.c
+++ b/drivers/acpi/events/evgpe.c
@@ -341,9 +341,8 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
341 341
342 /* A Non-NULL gpe_device means this is a GPE Block Device */ 342 /* A Non-NULL gpe_device means this is a GPE Block Device */
343 343
344 obj_desc = 344 obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *)
345 acpi_ns_get_attached_object((struct acpi_namespace_node *) 345 gpe_device);
346 gpe_device);
347 if (!obj_desc || !obj_desc->device.gpe_block) { 346 if (!obj_desc || !obj_desc->device.gpe_block) {
348 return (NULL); 347 return (NULL);
349 } 348 }
diff --git a/drivers/acpi/events/evgpeblk.c b/drivers/acpi/events/evgpeblk.c
index ad5bc76edf46..902c287b3a4f 100644
--- a/drivers/acpi/events/evgpeblk.c
+++ b/drivers/acpi/events/evgpeblk.c
@@ -1033,8 +1033,7 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
1033 1033
1034 if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 1034 if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
1035 ACPI_GPE_DISPATCH_METHOD) 1035 ACPI_GPE_DISPATCH_METHOD)
1036 && (gpe_event_info-> 1036 && (gpe_event_info->flags & ACPI_GPE_TYPE_RUNTIME)) {
1037 flags & ACPI_GPE_TYPE_RUNTIME)) {
1038 gpe_enabled_count++; 1037 gpe_enabled_count++;
1039 } 1038 }
1040 1039
diff --git a/drivers/acpi/events/evmisc.c b/drivers/acpi/events/evmisc.c
index cae786ca8600..21cb749d0c75 100644
--- a/drivers/acpi/events/evmisc.c
+++ b/drivers/acpi/events/evmisc.c
@@ -196,15 +196,12 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
196 notify_info->notify.value = (u16) notify_value; 196 notify_info->notify.value = (u16) notify_value;
197 notify_info->notify.handler_obj = handler_obj; 197 notify_info->notify.handler_obj = handler_obj;
198 198
199 acpi_ex_exit_interpreter(); 199 status =
200 200 acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch,
201 acpi_ev_notify_dispatch(notify_info); 201 notify_info);
202
203 status = acpi_ex_enter_interpreter();
204 if (ACPI_FAILURE(status)) { 202 if (ACPI_FAILURE(status)) {
205 return_ACPI_STATUS(status); 203 acpi_ut_delete_generic_state(notify_info);
206 } 204 }
207
208 } 205 }
209 206
210 if (!handler_obj) { 207 if (!handler_obj) {
@@ -323,8 +320,9 @@ static u32 acpi_ev_global_lock_handler(void *context)
323 acpi_gbl_global_lock_acquired = TRUE; 320 acpi_gbl_global_lock_acquired = TRUE;
324 /* Send a unit to the semaphore */ 321 /* Send a unit to the semaphore */
325 322
326 if (ACPI_FAILURE(acpi_os_signal_semaphore( 323 if (ACPI_FAILURE
327 acpi_gbl_global_lock_semaphore, 1))) { 324 (acpi_os_signal_semaphore
325 (acpi_gbl_global_lock_semaphore, 1))) {
328 ACPI_ERROR((AE_INFO, 326 ACPI_ERROR((AE_INFO,
329 "Could not signal Global Lock semaphore")); 327 "Could not signal Global Lock semaphore"));
330 } 328 }
@@ -450,7 +448,9 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
450 } 448 }
451 449
452 if (ACPI_FAILURE(status)) { 450 if (ACPI_FAILURE(status)) {
453 status = acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex, timeout); 451 status =
452 acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex,
453 timeout);
454 } 454 }
455 if (ACPI_FAILURE(status)) { 455 if (ACPI_FAILURE(status)) {
456 return_ACPI_STATUS(status); 456 return_ACPI_STATUS(status);
diff --git a/drivers/acpi/events/evregion.c b/drivers/acpi/events/evregion.c
index 96b0e8431748..e99f0c435a47 100644
--- a/drivers/acpi/events/evregion.c
+++ b/drivers/acpi/events/evregion.c
@@ -291,7 +291,6 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
291 u32 bit_width, acpi_integer * value) 291 u32 bit_width, acpi_integer * value)
292{ 292{
293 acpi_status status; 293 acpi_status status;
294 acpi_status status2;
295 acpi_adr_space_handler handler; 294 acpi_adr_space_handler handler;
296 acpi_adr_space_setup region_setup; 295 acpi_adr_space_setup region_setup;
297 union acpi_operand_object *handler_desc; 296 union acpi_operand_object *handler_desc;
@@ -345,7 +344,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
345 * setup will potentially execute control methods 344 * setup will potentially execute control methods
346 * (e.g., _REG method for this region) 345 * (e.g., _REG method for this region)
347 */ 346 */
348 acpi_ex_exit_interpreter(); 347 acpi_ex_relinquish_interpreter();
349 348
350 status = region_setup(region_obj, ACPI_REGION_ACTIVATE, 349 status = region_setup(region_obj, ACPI_REGION_ACTIVATE,
351 handler_desc->address_space.context, 350 handler_desc->address_space.context,
@@ -353,10 +352,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
353 352
354 /* Re-enter the interpreter */ 353 /* Re-enter the interpreter */
355 354
356 status2 = acpi_ex_enter_interpreter(); 355 acpi_ex_reacquire_interpreter();
357 if (ACPI_FAILURE(status2)) {
358 return_ACPI_STATUS(status2);
359 }
360 356
361 /* Check for failure of the Region Setup */ 357 /* Check for failure of the Region Setup */
362 358
@@ -409,7 +405,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
409 * exit the interpreter because the handler *might* block -- we don't 405 * exit the interpreter because the handler *might* block -- we don't
410 * know what it will do, so we can't hold the lock on the intepreter. 406 * know what it will do, so we can't hold the lock on the intepreter.
411 */ 407 */
412 acpi_ex_exit_interpreter(); 408 acpi_ex_relinquish_interpreter();
413 } 409 }
414 410
415 /* Call the handler */ 411 /* Call the handler */
@@ -430,10 +426,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
430 * We just returned from a non-default handler, we must re-enter the 426 * We just returned from a non-default handler, we must re-enter the
431 * interpreter 427 * interpreter
432 */ 428 */
433 status2 = acpi_ex_enter_interpreter(); 429 acpi_ex_reacquire_interpreter();
434 if (ACPI_FAILURE(status2)) {
435 return_ACPI_STATUS(status2);
436 }
437 } 430 }
438 431
439 return_ACPI_STATUS(status); 432 return_ACPI_STATUS(status);
diff --git a/drivers/acpi/events/evrgnini.c b/drivers/acpi/events/evrgnini.c
index a4fa7e6822a3..400d90fca966 100644
--- a/drivers/acpi/events/evrgnini.c
+++ b/drivers/acpi/events/evrgnini.c
@@ -228,7 +228,8 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
228 228
229 /* Install a handler for this PCI root bridge */ 229 /* Install a handler for this PCI root bridge */
230 230
231 status = acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL); 231 status =
232 acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
232 if (ACPI_FAILURE(status)) { 233 if (ACPI_FAILURE(status)) {
233 if (status == AE_SAME_HANDLER) { 234 if (status == AE_SAME_HANDLER) {
234 /* 235 /*
diff --git a/drivers/acpi/events/evxface.c b/drivers/acpi/events/evxface.c
index a3379bafa676..6d866a01f5f4 100644
--- a/drivers/acpi/events/evxface.c
+++ b/drivers/acpi/events/evxface.c
@@ -91,7 +91,6 @@ acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
91 91
92ACPI_EXPORT_SYMBOL(acpi_install_exception_handler) 92ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
93#endif /* ACPI_FUTURE_USAGE */ 93#endif /* ACPI_FUTURE_USAGE */
94
95/******************************************************************************* 94/*******************************************************************************
96 * 95 *
97 * FUNCTION: acpi_install_fixed_event_handler 96 * FUNCTION: acpi_install_fixed_event_handler
@@ -768,11 +767,9 @@ acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle)
768 return (AE_BAD_PARAMETER); 767 return (AE_BAD_PARAMETER);
769 } 768 }
770 769
771 status = acpi_ex_enter_interpreter(); 770 /* Must lock interpreter to prevent race conditions */
772 if (ACPI_FAILURE(status)) {
773 return (status);
774 }
775 771
772 acpi_ex_enter_interpreter();
776 status = acpi_ev_acquire_global_lock(timeout); 773 status = acpi_ev_acquire_global_lock(timeout);
777 acpi_ex_exit_interpreter(); 774 acpi_ex_exit_interpreter();
778 775
diff --git a/drivers/acpi/events/evxfevnt.c b/drivers/acpi/events/evxfevnt.c
index 17065e98807c..9cbd3414a574 100644
--- a/drivers/acpi/events/evxfevnt.c
+++ b/drivers/acpi/events/evxfevnt.c
@@ -472,7 +472,6 @@ acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
472} 472}
473 473
474ACPI_EXPORT_SYMBOL(acpi_clear_gpe) 474ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
475
476#ifdef ACPI_FUTURE_USAGE 475#ifdef ACPI_FUTURE_USAGE
477/******************************************************************************* 476/*******************************************************************************
478 * 477 *
@@ -568,7 +567,6 @@ acpi_get_gpe_status(acpi_handle gpe_device,
568 567
569ACPI_EXPORT_SYMBOL(acpi_get_gpe_status) 568ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
570#endif /* ACPI_FUTURE_USAGE */ 569#endif /* ACPI_FUTURE_USAGE */
571
572/******************************************************************************* 570/*******************************************************************************
573 * 571 *
574 * FUNCTION: acpi_install_gpe_block 572 * FUNCTION: acpi_install_gpe_block
diff --git a/drivers/acpi/executer/exconvrt.c b/drivers/acpi/executer/exconvrt.c
index d470e8b1f4ea..79f2c0d42c06 100644
--- a/drivers/acpi/executer/exconvrt.c
+++ b/drivers/acpi/executer/exconvrt.c
@@ -512,9 +512,8 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
512 * Create a new string object and string buffer 512 * Create a new string object and string buffer
513 * (-1 because of extra separator included in string_length from above) 513 * (-1 because of extra separator included in string_length from above)
514 */ 514 */
515 return_desc = 515 return_desc = acpi_ut_create_string_object((acpi_size)
516 acpi_ut_create_string_object((acpi_size) 516 (string_length - 1));
517 (string_length - 1));
518 if (!return_desc) { 517 if (!return_desc) {
519 return_ACPI_STATUS(AE_NO_MEMORY); 518 return_ACPI_STATUS(AE_NO_MEMORY);
520 } 519 }
diff --git a/drivers/acpi/executer/excreate.c b/drivers/acpi/executer/excreate.c
index ae97812681a3..6e9a23e47fef 100644
--- a/drivers/acpi/executer/excreate.c
+++ b/drivers/acpi/executer/excreate.c
@@ -50,7 +50,6 @@
50 50
51#define _COMPONENT ACPI_EXECUTER 51#define _COMPONENT ACPI_EXECUTER
52ACPI_MODULE_NAME("excreate") 52ACPI_MODULE_NAME("excreate")
53
54#ifndef ACPI_NO_METHOD_EXECUTION 53#ifndef ACPI_NO_METHOD_EXECUTION
55/******************************************************************************* 54/*******************************************************************************
56 * 55 *
@@ -583,10 +582,7 @@ acpi_ex_create_method(u8 * aml_start,
583 * Get the sync_level. If method is serialized, a mutex will be 582 * Get the sync_level. If method is serialized, a mutex will be
584 * created for this method when it is parsed. 583 * created for this method when it is parsed.
585 */ 584 */
586 if (acpi_gbl_all_methods_serialized) { 585 if (method_flags & AML_METHOD_SERIALIZED) {
587 obj_desc->method.sync_level = 0;
588 obj_desc->method.method_flags |= AML_METHOD_SERIALIZED;
589 } else if (method_flags & AML_METHOD_SERIALIZED) {
590 /* 586 /*
591 * ACPI 1.0: sync_level = 0 587 * ACPI 1.0: sync_level = 0
592 * ACPI 2.0: sync_level = sync_level in method declaration 588 * ACPI 2.0: sync_level = sync_level in method declaration
diff --git a/drivers/acpi/executer/exdump.c b/drivers/acpi/executer/exdump.c
index 1a73c14df2c5..51c9c29987c3 100644
--- a/drivers/acpi/executer/exdump.c
+++ b/drivers/acpi/executer/exdump.c
@@ -134,7 +134,7 @@ static struct acpi_exdump_info acpi_ex_dump_method[8] = {
134static struct acpi_exdump_info acpi_ex_dump_mutex[5] = { 134static struct acpi_exdump_info acpi_ex_dump_mutex[5] = {
135 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_mutex), NULL}, 135 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_mutex), NULL},
136 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(mutex.sync_level), "Sync Level"}, 136 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(mutex.sync_level), "Sync Level"},
137 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.owner_thread_id), "Owner Thread"}, 137 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.owner_thread), "Owner Thread"},
138 {ACPI_EXD_UINT16, ACPI_EXD_OFFSET(mutex.acquisition_depth), 138 {ACPI_EXD_UINT16, ACPI_EXD_OFFSET(mutex.acquisition_depth),
139 "Acquire Depth"}, 139 "Acquire Depth"},
140 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.os_mutex), "OsMutex"} 140 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.os_mutex), "OsMutex"}
@@ -451,9 +451,8 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
451 451
452 ACPI_FUNCTION_NAME(ex_dump_operand) 452 ACPI_FUNCTION_NAME(ex_dump_operand)
453 453
454 if (! 454 if (!((ACPI_LV_EXEC & acpi_dbg_level)
455 ((ACPI_LV_EXEC & acpi_dbg_level) 455 && (_COMPONENT & acpi_dbg_layer))) {
456 && (_COMPONENT & acpi_dbg_layer))) {
457 return; 456 return;
458 } 457 }
459 458
@@ -844,9 +843,8 @@ void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags)
844 ACPI_FUNCTION_ENTRY(); 843 ACPI_FUNCTION_ENTRY();
845 844
846 if (!flags) { 845 if (!flags) {
847 if (! 846 if (!((ACPI_LV_OBJECTS & acpi_dbg_level)
848 ((ACPI_LV_OBJECTS & acpi_dbg_level) 847 && (_COMPONENT & acpi_dbg_layer))) {
849 && (_COMPONENT & acpi_dbg_layer))) {
850 return; 848 return;
851 } 849 }
852 } 850 }
@@ -1011,9 +1009,8 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
1011 } 1009 }
1012 1010
1013 if (!flags) { 1011 if (!flags) {
1014 if (! 1012 if (!((ACPI_LV_OBJECTS & acpi_dbg_level)
1015 ((ACPI_LV_OBJECTS & acpi_dbg_level) 1013 && (_COMPONENT & acpi_dbg_layer))) {
1016 && (_COMPONENT & acpi_dbg_layer))) {
1017 return_VOID; 1014 return_VOID;
1018 } 1015 }
1019 } 1016 }
diff --git a/drivers/acpi/executer/exmutex.c b/drivers/acpi/executer/exmutex.c
index 4eb883bda6ae..6748e3ef0997 100644
--- a/drivers/acpi/executer/exmutex.c
+++ b/drivers/acpi/executer/exmutex.c
@@ -66,9 +66,10 @@ acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
66 * 66 *
67 ******************************************************************************/ 67 ******************************************************************************/
68 68
69void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc, 69void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
70 struct acpi_thread_state *thread)
71{ 70{
71 struct acpi_thread_state *thread = obj_desc->mutex.owner_thread;
72
72 if (!thread) { 73 if (!thread) {
73 return; 74 return;
74 } 75 }
@@ -173,13 +174,16 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
173 174
174 /* Support for multiple acquires by the owning thread */ 175 /* Support for multiple acquires by the owning thread */
175 176
176 if (obj_desc->mutex.owner_thread_id == acpi_os_get_thread_id()) { 177 if (obj_desc->mutex.owner_thread) {
177 /* 178 if (obj_desc->mutex.owner_thread->thread_id ==
178 * The mutex is already owned by this thread, just increment the 179 walk_state->thread->thread_id) {
179 * acquisition depth 180 /*
180 */ 181 * The mutex is already owned by this thread, just increment the
181 obj_desc->mutex.acquisition_depth++; 182 * acquisition depth
182 return_ACPI_STATUS(AE_OK); 183 */
184 obj_desc->mutex.acquisition_depth++;
185 return_ACPI_STATUS(AE_OK);
186 }
183 } 187 }
184 188
185 /* Acquire the mutex, wait if necessary. Special case for Global Lock */ 189 /* Acquire the mutex, wait if necessary. Special case for Global Lock */
@@ -202,7 +206,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
202 206
203 /* Have the mutex: update mutex and walk info and save the sync_level */ 207 /* Have the mutex: update mutex and walk info and save the sync_level */
204 208
205 obj_desc->mutex.owner_thread_id = acpi_os_get_thread_id(); 209 obj_desc->mutex.owner_thread = walk_state->thread;
206 obj_desc->mutex.acquisition_depth = 1; 210 obj_desc->mutex.acquisition_depth = 1;
207 obj_desc->mutex.original_sync_level = 211 obj_desc->mutex.original_sync_level =
208 walk_state->thread->current_sync_level; 212 walk_state->thread->current_sync_level;
@@ -242,7 +246,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
242 246
243 /* The mutex must have been previously acquired in order to release it */ 247 /* The mutex must have been previously acquired in order to release it */
244 248
245 if (!obj_desc->mutex.owner_thread_id) { 249 if (!obj_desc->mutex.owner_thread) {
246 ACPI_ERROR((AE_INFO, 250 ACPI_ERROR((AE_INFO,
247 "Cannot release Mutex [%4.4s], not acquired", 251 "Cannot release Mutex [%4.4s], not acquired",
248 acpi_ut_get_node_name(obj_desc->mutex.node))); 252 acpi_ut_get_node_name(obj_desc->mutex.node)));
@@ -262,14 +266,15 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
262 * The Mutex is owned, but this thread must be the owner. 266 * The Mutex is owned, but this thread must be the owner.
263 * Special case for Global Lock, any thread can release 267 * Special case for Global Lock, any thread can release
264 */ 268 */
265 if ((obj_desc->mutex.owner_thread_id != 269 if ((obj_desc->mutex.owner_thread->thread_id !=
266 walk_state->thread->thread_id) 270 walk_state->thread->thread_id)
267 && (obj_desc->mutex.os_mutex != acpi_gbl_global_lock_mutex)) { 271 && (obj_desc->mutex.os_mutex != acpi_gbl_global_lock_mutex)) {
268 ACPI_ERROR((AE_INFO, 272 ACPI_ERROR((AE_INFO,
269 "Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX", 273 "Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX",
270 (unsigned long)walk_state->thread->thread_id, 274 (unsigned long)walk_state->thread->thread_id,
271 acpi_ut_get_node_name(obj_desc->mutex.node), 275 acpi_ut_get_node_name(obj_desc->mutex.node),
272 (unsigned long)obj_desc->mutex.owner_thread_id)); 276 (unsigned long)obj_desc->mutex.owner_thread->
277 thread_id));
273 return_ACPI_STATUS(AE_AML_NOT_OWNER); 278 return_ACPI_STATUS(AE_AML_NOT_OWNER);
274 } 279 }
275 280
@@ -296,7 +301,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
296 301
297 /* Unlink the mutex from the owner's list */ 302 /* Unlink the mutex from the owner's list */
298 303
299 acpi_ex_unlink_mutex(obj_desc, walk_state->thread); 304 acpi_ex_unlink_mutex(obj_desc);
300 305
301 /* Release the mutex, special case for Global Lock */ 306 /* Release the mutex, special case for Global Lock */
302 307
@@ -308,7 +313,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
308 313
309 /* Update the mutex and restore sync_level */ 314 /* Update the mutex and restore sync_level */
310 315
311 obj_desc->mutex.owner_thread_id = ACPI_MUTEX_NOT_ACQUIRED; 316 obj_desc->mutex.owner_thread = NULL;
312 walk_state->thread->current_sync_level = 317 walk_state->thread->current_sync_level =
313 obj_desc->mutex.original_sync_level; 318 obj_desc->mutex.original_sync_level;
314 319
@@ -363,7 +368,7 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
363 368
364 /* Mark mutex unowned */ 369 /* Mark mutex unowned */
365 370
366 obj_desc->mutex.owner_thread_id = ACPI_MUTEX_NOT_ACQUIRED; 371 obj_desc->mutex.owner_thread = NULL;
367 372
368 /* Update Thread sync_level (Last mutex is the important one) */ 373 /* Update Thread sync_level (Last mutex is the important one) */
369 374
diff --git a/drivers/acpi/executer/exnames.c b/drivers/acpi/executer/exnames.c
index 1ee4fb1175c6..308eae52dc05 100644
--- a/drivers/acpi/executer/exnames.c
+++ b/drivers/acpi/executer/exnames.c
@@ -177,8 +177,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
177 177
178 ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Bytes from stream:\n")); 178 ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Bytes from stream:\n"));
179 179
180 for (index = 0; 180 for (index = 0; (index < ACPI_NAME_SIZE)
181 (index < ACPI_NAME_SIZE)
182 && (acpi_ut_valid_acpi_char(*aml_address, 0)); index++) { 181 && (acpi_ut_valid_acpi_char(*aml_address, 0)); index++) {
183 char_buf[index] = *aml_address++; 182 char_buf[index] = *aml_address++;
184 ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "%c\n", char_buf[index])); 183 ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "%c\n", char_buf[index]));
diff --git a/drivers/acpi/executer/exprep.c b/drivers/acpi/executer/exprep.c
index a6696621ff1b..efe5d4b461a4 100644
--- a/drivers/acpi/executer/exprep.c
+++ b/drivers/acpi/executer/exprep.c
@@ -242,7 +242,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
242 obj_desc->common_field.bit_length, 242 obj_desc->common_field.bit_length,
243 0xFFFFFFFF 243 0xFFFFFFFF
244 /* Temp until we pass region_length as parameter */ 244 /* Temp until we pass region_length as parameter */
245 ); 245 );
246 bit_length = byte_alignment * 8; 246 bit_length = byte_alignment * 8;
247#endif 247#endif
248 248
diff --git a/drivers/acpi/executer/exresop.c b/drivers/acpi/executer/exresop.c
index ba761862a599..09d897b3f6d5 100644
--- a/drivers/acpi/executer/exresop.c
+++ b/drivers/acpi/executer/exresop.c
@@ -354,8 +354,7 @@ acpi_ex_resolve_operands(u16 opcode,
354 if ((opcode == AML_STORE_OP) && 354 if ((opcode == AML_STORE_OP) &&
355 (ACPI_GET_OBJECT_TYPE(*stack_ptr) == 355 (ACPI_GET_OBJECT_TYPE(*stack_ptr) ==
356 ACPI_TYPE_LOCAL_REFERENCE) 356 ACPI_TYPE_LOCAL_REFERENCE)
357 && ((*stack_ptr)->reference.opcode == 357 && ((*stack_ptr)->reference.opcode == AML_INDEX_OP)) {
358 AML_INDEX_OP)) {
359 goto next_operand; 358 goto next_operand;
360 } 359 }
361 break; 360 break;
diff --git a/drivers/acpi/executer/exsystem.c b/drivers/acpi/executer/exsystem.c
index b2edf620ba89..9460baff3032 100644
--- a/drivers/acpi/executer/exsystem.c
+++ b/drivers/acpi/executer/exsystem.c
@@ -66,7 +66,6 @@ ACPI_MODULE_NAME("exsystem")
66acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout) 66acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
67{ 67{
68 acpi_status status; 68 acpi_status status;
69 acpi_status status2;
70 69
71 ACPI_FUNCTION_TRACE(ex_system_wait_semaphore); 70 ACPI_FUNCTION_TRACE(ex_system_wait_semaphore);
72 71
@@ -79,7 +78,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
79 78
80 /* We must wait, so unlock the interpreter */ 79 /* We must wait, so unlock the interpreter */
81 80
82 acpi_ex_exit_interpreter(); 81 acpi_ex_relinquish_interpreter();
83 82
84 status = acpi_os_wait_semaphore(semaphore, 1, timeout); 83 status = acpi_os_wait_semaphore(semaphore, 1, timeout);
85 84
@@ -89,13 +88,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
89 88
90 /* Reacquire the interpreter */ 89 /* Reacquire the interpreter */
91 90
92 status2 = acpi_ex_enter_interpreter(); 91 acpi_ex_reacquire_interpreter();
93 if (ACPI_FAILURE(status2)) {
94
95 /* Report fatal error, could not acquire interpreter */
96
97 return_ACPI_STATUS(status2);
98 }
99 } 92 }
100 93
101 return_ACPI_STATUS(status); 94 return_ACPI_STATUS(status);
@@ -119,7 +112,6 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
119acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout) 112acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
120{ 113{
121 acpi_status status; 114 acpi_status status;
122 acpi_status status2;
123 115
124 ACPI_FUNCTION_TRACE(ex_system_wait_mutex); 116 ACPI_FUNCTION_TRACE(ex_system_wait_mutex);
125 117
@@ -132,7 +124,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
132 124
133 /* We must wait, so unlock the interpreter */ 125 /* We must wait, so unlock the interpreter */
134 126
135 acpi_ex_exit_interpreter(); 127 acpi_ex_relinquish_interpreter();
136 128
137 status = acpi_os_acquire_mutex(mutex, timeout); 129 status = acpi_os_acquire_mutex(mutex, timeout);
138 130
@@ -142,13 +134,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
142 134
143 /* Reacquire the interpreter */ 135 /* Reacquire the interpreter */
144 136
145 status2 = acpi_ex_enter_interpreter(); 137 acpi_ex_reacquire_interpreter();
146 if (ACPI_FAILURE(status2)) {
147
148 /* Report fatal error, could not acquire interpreter */
149
150 return_ACPI_STATUS(status2);
151 }
152 } 138 }
153 139
154 return_ACPI_STATUS(status); 140 return_ACPI_STATUS(status);
@@ -209,20 +195,18 @@ acpi_status acpi_ex_system_do_stall(u32 how_long)
209 195
210acpi_status acpi_ex_system_do_suspend(acpi_integer how_long) 196acpi_status acpi_ex_system_do_suspend(acpi_integer how_long)
211{ 197{
212 acpi_status status;
213
214 ACPI_FUNCTION_ENTRY(); 198 ACPI_FUNCTION_ENTRY();
215 199
216 /* Since this thread will sleep, we must release the interpreter */ 200 /* Since this thread will sleep, we must release the interpreter */
217 201
218 acpi_ex_exit_interpreter(); 202 acpi_ex_relinquish_interpreter();
219 203
220 acpi_os_sleep(how_long); 204 acpi_os_sleep(how_long);
221 205
222 /* And now we must get the interpreter again */ 206 /* And now we must get the interpreter again */
223 207
224 status = acpi_ex_enter_interpreter(); 208 acpi_ex_reacquire_interpreter();
225 return (status); 209 return (AE_OK);
226} 210}
227 211
228/******************************************************************************* 212/*******************************************************************************
diff --git a/drivers/acpi/executer/exutils.c b/drivers/acpi/executer/exutils.c
index aea461f3a48c..6b0aeccbb69b 100644
--- a/drivers/acpi/executer/exutils.c
+++ b/drivers/acpi/executer/exutils.c
@@ -76,14 +76,15 @@ static u32 acpi_ex_digits_needed(acpi_integer value, u32 base);
76 * 76 *
77 * PARAMETERS: None 77 * PARAMETERS: None
78 * 78 *
79 * RETURN: Status 79 * RETURN: None
80 * 80 *
81 * DESCRIPTION: Enter the interpreter execution region. Failure to enter 81 * DESCRIPTION: Enter the interpreter execution region. Failure to enter
82 * the interpreter region is a fatal system error 82 * the interpreter region is a fatal system error. Used in
83 * conjunction with exit_interpreter.
83 * 84 *
84 ******************************************************************************/ 85 ******************************************************************************/
85 86
86acpi_status acpi_ex_enter_interpreter(void) 87void acpi_ex_enter_interpreter(void)
87{ 88{
88 acpi_status status; 89 acpi_status status;
89 90
@@ -91,31 +92,55 @@ acpi_status acpi_ex_enter_interpreter(void)
91 92
92 status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER); 93 status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER);
93 if (ACPI_FAILURE(status)) { 94 if (ACPI_FAILURE(status)) {
94 ACPI_ERROR((AE_INFO, "Could not acquire interpreter mutex")); 95 ACPI_ERROR((AE_INFO,
96 "Could not acquire AML Interpreter mutex"));
95 } 97 }
96 98
97 return_ACPI_STATUS(status); 99 return_VOID;
98} 100}
99 101
100/******************************************************************************* 102/*******************************************************************************
101 * 103 *
102 * FUNCTION: acpi_ex_exit_interpreter 104 * FUNCTION: acpi_ex_reacquire_interpreter
103 * 105 *
104 * PARAMETERS: None 106 * PARAMETERS: None
105 * 107 *
106 * RETURN: None 108 * RETURN: None
107 * 109 *
108 * DESCRIPTION: Exit the interpreter execution region 110 * DESCRIPTION: Reacquire the interpreter execution region from within the
111 * interpreter code. Failure to enter the interpreter region is a
112 * fatal system error. Used in conjuction with
113 * relinquish_interpreter
114 *
115 ******************************************************************************/
116
117void acpi_ex_reacquire_interpreter(void)
118{
119 ACPI_FUNCTION_TRACE(ex_reacquire_interpreter);
120
121 /*
122 * If the global serialized flag is set, do not release the interpreter,
123 * since it was not actually released by acpi_ex_relinquish_interpreter.
124 * This forces the interpreter to be single threaded.
125 */
126 if (!acpi_gbl_all_methods_serialized) {
127 acpi_ex_enter_interpreter();
128 }
129
130 return_VOID;
131}
132
133/*******************************************************************************
134 *
135 * FUNCTION: acpi_ex_exit_interpreter
136 *
137 * PARAMETERS: None
138 *
139 * RETURN: None
109 * 140 *
110 * Cases where the interpreter is unlocked: 141 * DESCRIPTION: Exit the interpreter execution region. This is the top level
111 * 1) Completion of the execution of a control method 142 * routine used to exit the interpreter when all processing has
112 * 2) Method blocked on a Sleep() AML opcode 143 * been completed.
113 * 3) Method blocked on an Acquire() AML opcode
114 * 4) Method blocked on a Wait() AML opcode
115 * 5) Method blocked to acquire the global lock
116 * 6) Method blocked to execute a serialized control method that is
117 * already executing
118 * 7) About to invoke a user-installed opregion handler
119 * 144 *
120 ******************************************************************************/ 145 ******************************************************************************/
121 146
@@ -127,7 +152,46 @@ void acpi_ex_exit_interpreter(void)
127 152
128 status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER); 153 status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
129 if (ACPI_FAILURE(status)) { 154 if (ACPI_FAILURE(status)) {
130 ACPI_ERROR((AE_INFO, "Could not release interpreter mutex")); 155 ACPI_ERROR((AE_INFO,
156 "Could not release AML Interpreter mutex"));
157 }
158
159 return_VOID;
160}
161
162/*******************************************************************************
163 *
164 * FUNCTION: acpi_ex_relinquish_interpreter
165 *
166 * PARAMETERS: None
167 *
168 * RETURN: None
169 *
170 * DESCRIPTION: Exit the interpreter execution region, from within the
171 * interpreter - before attempting an operation that will possibly
172 * block the running thread.
173 *
174 * Cases where the interpreter is unlocked internally
175 * 1) Method to be blocked on a Sleep() AML opcode
176 * 2) Method to be blocked on an Acquire() AML opcode
177 * 3) Method to be blocked on a Wait() AML opcode
178 * 4) Method to be blocked to acquire the global lock
179 * 5) Method to be blocked waiting to execute a serialized control method
180 * that is currently executing
181 * 6) About to invoke a user-installed opregion handler
182 *
183 ******************************************************************************/
184
185void acpi_ex_relinquish_interpreter(void)
186{
187 ACPI_FUNCTION_TRACE(ex_relinquish_interpreter);
188
189 /*
190 * If the global serialized flag is set, do not release the interpreter.
191 * This forces the interpreter to be single threaded.
192 */
193 if (!acpi_gbl_all_methods_serialized) {
194 acpi_ex_exit_interpreter();
131 } 195 }
132 196
133 return_VOID; 197 return_VOID;
@@ -141,8 +205,8 @@ void acpi_ex_exit_interpreter(void)
141 * 205 *
142 * RETURN: none 206 * RETURN: none
143 * 207 *
144 * DESCRIPTION: Truncate a number to 32-bits if the currently executing method 208 * DESCRIPTION: Truncate an ACPI Integer to 32 bits if the execution mode is
145 * belongs to a 32-bit ACPI table. 209 * 32-bit, as determined by the revision of the DSDT.
146 * 210 *
147 ******************************************************************************/ 211 ******************************************************************************/
148 212
diff --git a/drivers/acpi/hardware/hwsleep.c b/drivers/acpi/hardware/hwsleep.c
index c84b1faba28c..76c525dc590b 100644
--- a/drivers/acpi/hardware/hwsleep.c
+++ b/drivers/acpi/hardware/hwsleep.c
@@ -152,7 +152,6 @@ acpi_get_firmware_waking_vector(acpi_physical_address * physical_address)
152 152
153ACPI_EXPORT_SYMBOL(acpi_get_firmware_waking_vector) 153ACPI_EXPORT_SYMBOL(acpi_get_firmware_waking_vector)
154#endif 154#endif
155
156/******************************************************************************* 155/*******************************************************************************
157 * 156 *
158 * FUNCTION: acpi_enter_sleep_state_prep 157 * FUNCTION: acpi_enter_sleep_state_prep
diff --git a/drivers/acpi/namespace/nseval.c b/drivers/acpi/namespace/nseval.c
index 26fd0dd6953d..97b2ac57c16b 100644
--- a/drivers/acpi/namespace/nseval.c
+++ b/drivers/acpi/namespace/nseval.c
@@ -75,7 +75,7 @@ ACPI_MODULE_NAME("nseval")
75 * MUTEX: Locks interpreter 75 * MUTEX: Locks interpreter
76 * 76 *
77 ******************************************************************************/ 77 ******************************************************************************/
78acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info) 78acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info)
79{ 79{
80 acpi_status status; 80 acpi_status status;
81 81
@@ -154,11 +154,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
154 * Execute the method via the interpreter. The interpreter is locked 154 * Execute the method via the interpreter. The interpreter is locked
155 * here before calling into the AML parser 155 * here before calling into the AML parser
156 */ 156 */
157 status = acpi_ex_enter_interpreter(); 157 acpi_ex_enter_interpreter();
158 if (ACPI_FAILURE(status)) {
159 return_ACPI_STATUS(status);
160 }
161
162 status = acpi_ps_execute_method(info); 158 status = acpi_ps_execute_method(info);
163 acpi_ex_exit_interpreter(); 159 acpi_ex_exit_interpreter();
164 } else { 160 } else {
@@ -182,10 +178,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
182 * resolution, we must lock it because we could access an opregion. 178 * resolution, we must lock it because we could access an opregion.
183 * The opregion access code assumes that the interpreter is locked. 179 * The opregion access code assumes that the interpreter is locked.
184 */ 180 */
185 status = acpi_ex_enter_interpreter(); 181 acpi_ex_enter_interpreter();
186 if (ACPI_FAILURE(status)) {
187 return_ACPI_STATUS(status);
188 }
189 182
190 /* Function has a strange interface */ 183 /* Function has a strange interface */
191 184
diff --git a/drivers/acpi/namespace/nsinit.c b/drivers/acpi/namespace/nsinit.c
index c4ab615f77fe..33db2241044e 100644
--- a/drivers/acpi/namespace/nsinit.c
+++ b/drivers/acpi/namespace/nsinit.c
@@ -214,7 +214,7 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
214 u32 level, void *context, void **return_value) 214 u32 level, void *context, void **return_value)
215{ 215{
216 acpi_object_type type; 216 acpi_object_type type;
217 acpi_status status; 217 acpi_status status = AE_OK;
218 struct acpi_init_walk_info *info = 218 struct acpi_init_walk_info *info =
219 (struct acpi_init_walk_info *)context; 219 (struct acpi_init_walk_info *)context;
220 struct acpi_namespace_node *node = 220 struct acpi_namespace_node *node =
@@ -268,10 +268,7 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
268 /* 268 /*
269 * Must lock the interpreter before executing AML code 269 * Must lock the interpreter before executing AML code
270 */ 270 */
271 status = acpi_ex_enter_interpreter(); 271 acpi_ex_enter_interpreter();
272 if (ACPI_FAILURE(status)) {
273 return (status);
274 }
275 272
276 /* 273 /*
277 * Each of these types can contain executable AML code within the 274 * Each of these types can contain executable AML code within the
diff --git a/drivers/acpi/namespace/nswalk.c b/drivers/acpi/namespace/nswalk.c
index 94eb8f332d94..280b8357c46c 100644
--- a/drivers/acpi/namespace/nswalk.c
+++ b/drivers/acpi/namespace/nswalk.c
@@ -65,10 +65,8 @@ ACPI_MODULE_NAME("nswalk")
65 * within Scope is returned. 65 * within Scope is returned.
66 * 66 *
67 ******************************************************************************/ 67 ******************************************************************************/
68struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type, 68struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type, struct acpi_namespace_node
69 struct acpi_namespace_node 69 *parent_node, struct acpi_namespace_node
70 *parent_node,
71 struct acpi_namespace_node
72 *child_node) 70 *child_node)
73{ 71{
74 struct acpi_namespace_node *next_node = NULL; 72 struct acpi_namespace_node *next_node = NULL;
diff --git a/drivers/acpi/namespace/nsxfeval.c b/drivers/acpi/namespace/nsxfeval.c
index 8904d0fae6a2..be4f2899de74 100644
--- a/drivers/acpi/namespace/nsxfeval.c
+++ b/drivers/acpi/namespace/nsxfeval.c
@@ -48,7 +48,6 @@
48 48
49#define _COMPONENT ACPI_NAMESPACE 49#define _COMPONENT ACPI_NAMESPACE
50ACPI_MODULE_NAME("nsxfeval") 50ACPI_MODULE_NAME("nsxfeval")
51
52#ifdef ACPI_FUTURE_USAGE 51#ifdef ACPI_FUTURE_USAGE
53/******************************************************************************* 52/*******************************************************************************
54 * 53 *
@@ -73,8 +72,8 @@ ACPI_MODULE_NAME("nsxfeval")
73acpi_status 72acpi_status
74acpi_evaluate_object_typed(acpi_handle handle, 73acpi_evaluate_object_typed(acpi_handle handle,
75 acpi_string pathname, 74 acpi_string pathname,
76 struct acpi_object_list * external_params, 75 struct acpi_object_list *external_params,
77 struct acpi_buffer * return_buffer, 76 struct acpi_buffer *return_buffer,
78 acpi_object_type return_type) 77 acpi_object_type return_type)
79{ 78{
80 acpi_status status; 79 acpi_status status;
@@ -143,7 +142,6 @@ acpi_evaluate_object_typed(acpi_handle handle,
143 142
144ACPI_EXPORT_SYMBOL(acpi_evaluate_object_typed) 143ACPI_EXPORT_SYMBOL(acpi_evaluate_object_typed)
145#endif /* ACPI_FUTURE_USAGE */ 144#endif /* ACPI_FUTURE_USAGE */
146
147/******************************************************************************* 145/*******************************************************************************
148 * 146 *
149 * FUNCTION: acpi_evaluate_object 147 * FUNCTION: acpi_evaluate_object
@@ -170,7 +168,6 @@ acpi_evaluate_object(acpi_handle handle,
170 struct acpi_buffer *return_buffer) 168 struct acpi_buffer *return_buffer)
171{ 169{
172 acpi_status status; 170 acpi_status status;
173 acpi_status status2;
174 struct acpi_evaluate_info *info; 171 struct acpi_evaluate_info *info;
175 acpi_size buffer_space_needed; 172 acpi_size buffer_space_needed;
176 u32 i; 173 u32 i;
@@ -329,14 +326,12 @@ acpi_evaluate_object(acpi_handle handle,
329 * Delete the internal return object. NOTE: Interpreter must be 326 * Delete the internal return object. NOTE: Interpreter must be
330 * locked to avoid race condition. 327 * locked to avoid race condition.
331 */ 328 */
332 status2 = acpi_ex_enter_interpreter(); 329 acpi_ex_enter_interpreter();
333 if (ACPI_SUCCESS(status2)) {
334 330
335 /* Remove one reference on the return object (should delete it) */ 331 /* Remove one reference on the return object (should delete it) */
336 332
337 acpi_ut_remove_reference(info->return_object); 333 acpi_ut_remove_reference(info->return_object);
338 acpi_ex_exit_interpreter(); 334 acpi_ex_exit_interpreter();
339 }
340 } 335 }
341 336
342 cleanup: 337 cleanup:
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index c2bed56915e1..b998340e23d4 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -71,6 +71,7 @@ static unsigned int acpi_irq_irq;
71static acpi_osd_handler acpi_irq_handler; 71static acpi_osd_handler acpi_irq_handler;
72static void *acpi_irq_context; 72static void *acpi_irq_context;
73static struct workqueue_struct *kacpid_wq; 73static struct workqueue_struct *kacpid_wq;
74static struct workqueue_struct *kacpi_notify_wq;
74 75
75static void __init acpi_request_region (struct acpi_generic_address *addr, 76static void __init acpi_request_region (struct acpi_generic_address *addr,
76 unsigned int length, char *desc) 77 unsigned int length, char *desc)
@@ -137,8 +138,9 @@ acpi_status acpi_os_initialize1(void)
137 return AE_NULL_ENTRY; 138 return AE_NULL_ENTRY;
138 } 139 }
139 kacpid_wq = create_singlethread_workqueue("kacpid"); 140 kacpid_wq = create_singlethread_workqueue("kacpid");
141 kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
140 BUG_ON(!kacpid_wq); 142 BUG_ON(!kacpid_wq);
141 143 BUG_ON(!kacpi_notify_wq);
142 return AE_OK; 144 return AE_OK;
143} 145}
144 146
@@ -150,6 +152,7 @@ acpi_status acpi_os_terminate(void)
150 } 152 }
151 153
152 destroy_workqueue(kacpid_wq); 154 destroy_workqueue(kacpid_wq);
155 destroy_workqueue(kacpi_notify_wq);
153 156
154 return AE_OK; 157 return AE_OK;
155} 158}
@@ -603,6 +606,23 @@ void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound */
603static void acpi_os_execute_deferred(struct work_struct *work) 606static void acpi_os_execute_deferred(struct work_struct *work)
604{ 607{
605 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); 608 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
609 if (!dpc) {
610 printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
611 return;
612 }
613
614 dpc->function(dpc->context);
615 kfree(dpc);
616
617 /* Yield cpu to notify thread */
618 cond_resched();
619
620 return;
621}
622
623static void acpi_os_execute_notify(struct work_struct *work)
624{
625 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
606 626
607 if (!dpc) { 627 if (!dpc) {
608 printk(KERN_ERR PREFIX "Invalid (NULL) context\n"); 628 printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
@@ -637,14 +657,12 @@ acpi_status acpi_os_execute(acpi_execute_type type,
637 acpi_status status = AE_OK; 657 acpi_status status = AE_OK;
638 struct acpi_os_dpc *dpc; 658 struct acpi_os_dpc *dpc;
639 659
640 ACPI_FUNCTION_TRACE("os_queue_for_execution");
641
642 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 660 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
643 "Scheduling function [%p(%p)] for deferred execution.\n", 661 "Scheduling function [%p(%p)] for deferred execution.\n",
644 function, context)); 662 function, context));
645 663
646 if (!function) 664 if (!function)
647 return_ACPI_STATUS(AE_BAD_PARAMETER); 665 return AE_BAD_PARAMETER;
648 666
649 /* 667 /*
650 * Allocate/initialize DPC structure. Note that this memory will be 668 * Allocate/initialize DPC structure. Note that this memory will be
@@ -662,14 +680,21 @@ acpi_status acpi_os_execute(acpi_execute_type type,
662 dpc->function = function; 680 dpc->function = function;
663 dpc->context = context; 681 dpc->context = context;
664 682
665 INIT_WORK(&dpc->work, acpi_os_execute_deferred); 683 if (type == OSL_NOTIFY_HANDLER) {
666 if (!queue_work(kacpid_wq, &dpc->work)) { 684 INIT_WORK(&dpc->work, acpi_os_execute_notify);
667 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 685 if (!queue_work(kacpi_notify_wq, &dpc->work)) {
686 status = AE_ERROR;
687 kfree(dpc);
688 }
689 } else {
690 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
691 if (!queue_work(kacpid_wq, &dpc->work)) {
692 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
668 "Call to queue_work() failed.\n")); 693 "Call to queue_work() failed.\n"));
669 kfree(dpc); 694 status = AE_ERROR;
670 status = AE_ERROR; 695 kfree(dpc);
696 }
671 } 697 }
672
673 return_ACPI_STATUS(status); 698 return_ACPI_STATUS(status);
674} 699}
675 700
diff --git a/drivers/acpi/parser/psopcode.c b/drivers/acpi/parser/psopcode.c
index 16d8b6cc3c22..9296e86761d7 100644
--- a/drivers/acpi/parser/psopcode.c
+++ b/drivers/acpi/parser/psopcode.c
@@ -185,459 +185,453 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
185/* Index Name Parser Args Interpreter Args ObjectType Class Type Flags */ 185/* Index Name Parser Args Interpreter Args ObjectType Class Type Flags */
186 186
187/* 00 */ ACPI_OP("Zero", ARGP_ZERO_OP, ARGI_ZERO_OP, ACPI_TYPE_INTEGER, 187/* 00 */ ACPI_OP("Zero", ARGP_ZERO_OP, ARGI_ZERO_OP, ACPI_TYPE_INTEGER,
188 AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT), 188 AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT),
189/* 01 */ ACPI_OP("One", ARGP_ONE_OP, ARGI_ONE_OP, ACPI_TYPE_INTEGER, 189/* 01 */ ACPI_OP("One", ARGP_ONE_OP, ARGI_ONE_OP, ACPI_TYPE_INTEGER,
190 AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT), 190 AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT),
191/* 02 */ ACPI_OP("Alias", ARGP_ALIAS_OP, ARGI_ALIAS_OP, 191/* 02 */ ACPI_OP("Alias", ARGP_ALIAS_OP, ARGI_ALIAS_OP,
192 ACPI_TYPE_LOCAL_ALIAS, AML_CLASS_NAMED_OBJECT, 192 ACPI_TYPE_LOCAL_ALIAS, AML_CLASS_NAMED_OBJECT,
193 AML_TYPE_NAMED_SIMPLE, 193 AML_TYPE_NAMED_SIMPLE,
194 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 194 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
195 AML_NSNODE | AML_NAMED), 195 AML_NSNODE | AML_NAMED),
196/* 03 */ ACPI_OP("Name", ARGP_NAME_OP, ARGI_NAME_OP, ACPI_TYPE_ANY, 196/* 03 */ ACPI_OP("Name", ARGP_NAME_OP, ARGI_NAME_OP, ACPI_TYPE_ANY,
197 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_COMPLEX, 197 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_COMPLEX,
198 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 198 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
199 AML_NSNODE | AML_NAMED), 199 AML_NSNODE | AML_NAMED),
200/* 04 */ ACPI_OP("ByteConst", ARGP_BYTE_OP, ARGI_BYTE_OP, 200/* 04 */ ACPI_OP("ByteConst", ARGP_BYTE_OP, ARGI_BYTE_OP,
201 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, 201 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
202 AML_TYPE_LITERAL, AML_CONSTANT), 202 AML_TYPE_LITERAL, AML_CONSTANT),
203/* 05 */ ACPI_OP("WordConst", ARGP_WORD_OP, ARGI_WORD_OP, 203/* 05 */ ACPI_OP("WordConst", ARGP_WORD_OP, ARGI_WORD_OP,
204 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, 204 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
205 AML_TYPE_LITERAL, AML_CONSTANT), 205 AML_TYPE_LITERAL, AML_CONSTANT),
206/* 06 */ ACPI_OP("DwordConst", ARGP_DWORD_OP, ARGI_DWORD_OP, 206/* 06 */ ACPI_OP("DwordConst", ARGP_DWORD_OP, ARGI_DWORD_OP,
207 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, 207 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
208 AML_TYPE_LITERAL, AML_CONSTANT), 208 AML_TYPE_LITERAL, AML_CONSTANT),
209/* 07 */ ACPI_OP("String", ARGP_STRING_OP, ARGI_STRING_OP, 209/* 07 */ ACPI_OP("String", ARGP_STRING_OP, ARGI_STRING_OP,
210 ACPI_TYPE_STRING, AML_CLASS_ARGUMENT, 210 ACPI_TYPE_STRING, AML_CLASS_ARGUMENT,
211 AML_TYPE_LITERAL, AML_CONSTANT), 211 AML_TYPE_LITERAL, AML_CONSTANT),
212/* 08 */ ACPI_OP("Scope", ARGP_SCOPE_OP, ARGI_SCOPE_OP, 212/* 08 */ ACPI_OP("Scope", ARGP_SCOPE_OP, ARGI_SCOPE_OP,
213 ACPI_TYPE_LOCAL_SCOPE, AML_CLASS_NAMED_OBJECT, 213 ACPI_TYPE_LOCAL_SCOPE, AML_CLASS_NAMED_OBJECT,
214 AML_TYPE_NAMED_NO_OBJ, 214 AML_TYPE_NAMED_NO_OBJ,
215 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 215 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
216 AML_NSNODE | AML_NAMED), 216 AML_NSNODE | AML_NAMED),
217/* 09 */ ACPI_OP("Buffer", ARGP_BUFFER_OP, ARGI_BUFFER_OP, 217/* 09 */ ACPI_OP("Buffer", ARGP_BUFFER_OP, ARGI_BUFFER_OP,
218 ACPI_TYPE_BUFFER, AML_CLASS_CREATE, 218 ACPI_TYPE_BUFFER, AML_CLASS_CREATE,
219 AML_TYPE_CREATE_OBJECT, 219 AML_TYPE_CREATE_OBJECT,
220 AML_HAS_ARGS | AML_DEFER | AML_CONSTANT), 220 AML_HAS_ARGS | AML_DEFER | AML_CONSTANT),
221/* 0A */ ACPI_OP("Package", ARGP_PACKAGE_OP, ARGI_PACKAGE_OP, 221/* 0A */ ACPI_OP("Package", ARGP_PACKAGE_OP, ARGI_PACKAGE_OP,
222 ACPI_TYPE_PACKAGE, AML_CLASS_CREATE, 222 ACPI_TYPE_PACKAGE, AML_CLASS_CREATE,
223 AML_TYPE_CREATE_OBJECT, 223 AML_TYPE_CREATE_OBJECT,
224 AML_HAS_ARGS | AML_DEFER | AML_CONSTANT), 224 AML_HAS_ARGS | AML_DEFER | AML_CONSTANT),
225/* 0B */ ACPI_OP("Method", ARGP_METHOD_OP, ARGI_METHOD_OP, 225/* 0B */ ACPI_OP("Method", ARGP_METHOD_OP, ARGI_METHOD_OP,
226 ACPI_TYPE_METHOD, AML_CLASS_NAMED_OBJECT, 226 ACPI_TYPE_METHOD, AML_CLASS_NAMED_OBJECT,
227 AML_TYPE_NAMED_COMPLEX, 227 AML_TYPE_NAMED_COMPLEX,
228 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 228 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
229 AML_NSNODE | AML_NAMED | AML_DEFER), 229 AML_NSNODE | AML_NAMED | AML_DEFER),
230/* 0C */ ACPI_OP("Local0", ARGP_LOCAL0, ARGI_LOCAL0, 230/* 0C */ ACPI_OP("Local0", ARGP_LOCAL0, ARGI_LOCAL0,
231 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 231 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
232 AML_TYPE_LOCAL_VARIABLE, 0), 232 AML_TYPE_LOCAL_VARIABLE, 0),
233/* 0D */ ACPI_OP("Local1", ARGP_LOCAL1, ARGI_LOCAL1, 233/* 0D */ ACPI_OP("Local1", ARGP_LOCAL1, ARGI_LOCAL1,
234 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 234 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
235 AML_TYPE_LOCAL_VARIABLE, 0), 235 AML_TYPE_LOCAL_VARIABLE, 0),
236/* 0E */ ACPI_OP("Local2", ARGP_LOCAL2, ARGI_LOCAL2, 236/* 0E */ ACPI_OP("Local2", ARGP_LOCAL2, ARGI_LOCAL2,
237 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 237 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
238 AML_TYPE_LOCAL_VARIABLE, 0), 238 AML_TYPE_LOCAL_VARIABLE, 0),
239/* 0F */ ACPI_OP("Local3", ARGP_LOCAL3, ARGI_LOCAL3, 239/* 0F */ ACPI_OP("Local3", ARGP_LOCAL3, ARGI_LOCAL3,
240 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 240 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
241 AML_TYPE_LOCAL_VARIABLE, 0), 241 AML_TYPE_LOCAL_VARIABLE, 0),
242/* 10 */ ACPI_OP("Local4", ARGP_LOCAL4, ARGI_LOCAL4, 242/* 10 */ ACPI_OP("Local4", ARGP_LOCAL4, ARGI_LOCAL4,
243 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 243 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
244 AML_TYPE_LOCAL_VARIABLE, 0), 244 AML_TYPE_LOCAL_VARIABLE, 0),
245/* 11 */ ACPI_OP("Local5", ARGP_LOCAL5, ARGI_LOCAL5, 245/* 11 */ ACPI_OP("Local5", ARGP_LOCAL5, ARGI_LOCAL5,
246 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 246 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
247 AML_TYPE_LOCAL_VARIABLE, 0), 247 AML_TYPE_LOCAL_VARIABLE, 0),
248/* 12 */ ACPI_OP("Local6", ARGP_LOCAL6, ARGI_LOCAL6, 248/* 12 */ ACPI_OP("Local6", ARGP_LOCAL6, ARGI_LOCAL6,
249 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 249 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
250 AML_TYPE_LOCAL_VARIABLE, 0), 250 AML_TYPE_LOCAL_VARIABLE, 0),
251/* 13 */ ACPI_OP("Local7", ARGP_LOCAL7, ARGI_LOCAL7, 251/* 13 */ ACPI_OP("Local7", ARGP_LOCAL7, ARGI_LOCAL7,
252 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 252 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
253 AML_TYPE_LOCAL_VARIABLE, 0), 253 AML_TYPE_LOCAL_VARIABLE, 0),
254/* 14 */ ACPI_OP("Arg0", ARGP_ARG0, ARGI_ARG0, 254/* 14 */ ACPI_OP("Arg0", ARGP_ARG0, ARGI_ARG0,
255 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 255 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
256 AML_TYPE_METHOD_ARGUMENT, 0), 256 AML_TYPE_METHOD_ARGUMENT, 0),
257/* 15 */ ACPI_OP("Arg1", ARGP_ARG1, ARGI_ARG1, 257/* 15 */ ACPI_OP("Arg1", ARGP_ARG1, ARGI_ARG1,
258 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 258 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
259 AML_TYPE_METHOD_ARGUMENT, 0), 259 AML_TYPE_METHOD_ARGUMENT, 0),
260/* 16 */ ACPI_OP("Arg2", ARGP_ARG2, ARGI_ARG2, 260/* 16 */ ACPI_OP("Arg2", ARGP_ARG2, ARGI_ARG2,
261 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 261 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
262 AML_TYPE_METHOD_ARGUMENT, 0), 262 AML_TYPE_METHOD_ARGUMENT, 0),
263/* 17 */ ACPI_OP("Arg3", ARGP_ARG3, ARGI_ARG3, 263/* 17 */ ACPI_OP("Arg3", ARGP_ARG3, ARGI_ARG3,
264 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 264 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
265 AML_TYPE_METHOD_ARGUMENT, 0), 265 AML_TYPE_METHOD_ARGUMENT, 0),
266/* 18 */ ACPI_OP("Arg4", ARGP_ARG4, ARGI_ARG4, 266/* 18 */ ACPI_OP("Arg4", ARGP_ARG4, ARGI_ARG4,
267 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 267 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
268 AML_TYPE_METHOD_ARGUMENT, 0), 268 AML_TYPE_METHOD_ARGUMENT, 0),
269/* 19 */ ACPI_OP("Arg5", ARGP_ARG5, ARGI_ARG5, 269/* 19 */ ACPI_OP("Arg5", ARGP_ARG5, ARGI_ARG5,
270 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 270 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
271 AML_TYPE_METHOD_ARGUMENT, 0), 271 AML_TYPE_METHOD_ARGUMENT, 0),
272/* 1A */ ACPI_OP("Arg6", ARGP_ARG6, ARGI_ARG6, 272/* 1A */ ACPI_OP("Arg6", ARGP_ARG6, ARGI_ARG6,
273 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 273 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
274 AML_TYPE_METHOD_ARGUMENT, 0), 274 AML_TYPE_METHOD_ARGUMENT, 0),
275/* 1B */ ACPI_OP("Store", ARGP_STORE_OP, ARGI_STORE_OP, ACPI_TYPE_ANY, 275/* 1B */ ACPI_OP("Store", ARGP_STORE_OP, ARGI_STORE_OP, ACPI_TYPE_ANY,
276 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, 276 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
277 AML_FLAGS_EXEC_1A_1T_1R), 277 AML_FLAGS_EXEC_1A_1T_1R),
278/* 1C */ ACPI_OP("RefOf", ARGP_REF_OF_OP, ARGI_REF_OF_OP, ACPI_TYPE_ANY, 278/* 1C */ ACPI_OP("RefOf", ARGP_REF_OF_OP, ARGI_REF_OF_OP, ACPI_TYPE_ANY,
279 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R, 279 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R,
280 AML_FLAGS_EXEC_1A_0T_1R), 280 AML_FLAGS_EXEC_1A_0T_1R),
281/* 1D */ ACPI_OP("Add", ARGP_ADD_OP, ARGI_ADD_OP, ACPI_TYPE_ANY, 281/* 1D */ ACPI_OP("Add", ARGP_ADD_OP, ARGI_ADD_OP, ACPI_TYPE_ANY,
282 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, 282 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
283 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), 283 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
284/* 1E */ ACPI_OP("Concatenate", ARGP_CONCAT_OP, ARGI_CONCAT_OP, 284/* 1E */ ACPI_OP("Concatenate", ARGP_CONCAT_OP, ARGI_CONCAT_OP,
285 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 285 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
286 AML_TYPE_EXEC_2A_1T_1R, 286 AML_TYPE_EXEC_2A_1T_1R,
287 AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT), 287 AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
288/* 1F */ ACPI_OP("Subtract", ARGP_SUBTRACT_OP, ARGI_SUBTRACT_OP, 288/* 1F */ ACPI_OP("Subtract", ARGP_SUBTRACT_OP, ARGI_SUBTRACT_OP,
289 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 289 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
290 AML_TYPE_EXEC_2A_1T_1R, 290 AML_TYPE_EXEC_2A_1T_1R,
291 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), 291 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
292/* 20 */ ACPI_OP("Increment", ARGP_INCREMENT_OP, ARGI_INCREMENT_OP, 292/* 20 */ ACPI_OP("Increment", ARGP_INCREMENT_OP, ARGI_INCREMENT_OP,
293 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 293 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
294 AML_TYPE_EXEC_1A_0T_1R, 294 AML_TYPE_EXEC_1A_0T_1R,
295 AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT), 295 AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT),
296/* 21 */ ACPI_OP("Decrement", ARGP_DECREMENT_OP, ARGI_DECREMENT_OP, 296/* 21 */ ACPI_OP("Decrement", ARGP_DECREMENT_OP, ARGI_DECREMENT_OP,
297 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 297 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
298 AML_TYPE_EXEC_1A_0T_1R, 298 AML_TYPE_EXEC_1A_0T_1R,
299 AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT), 299 AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT),
300/* 22 */ ACPI_OP("Multiply", ARGP_MULTIPLY_OP, ARGI_MULTIPLY_OP, 300/* 22 */ ACPI_OP("Multiply", ARGP_MULTIPLY_OP, ARGI_MULTIPLY_OP,
301 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 301 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
302 AML_TYPE_EXEC_2A_1T_1R, 302 AML_TYPE_EXEC_2A_1T_1R,
303 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), 303 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
304/* 23 */ ACPI_OP("Divide", ARGP_DIVIDE_OP, ARGI_DIVIDE_OP, 304/* 23 */ ACPI_OP("Divide", ARGP_DIVIDE_OP, ARGI_DIVIDE_OP,
305 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 305 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
306 AML_TYPE_EXEC_2A_2T_1R, 306 AML_TYPE_EXEC_2A_2T_1R,
307 AML_FLAGS_EXEC_2A_2T_1R | AML_CONSTANT), 307 AML_FLAGS_EXEC_2A_2T_1R | AML_CONSTANT),
308/* 24 */ ACPI_OP("ShiftLeft", ARGP_SHIFT_LEFT_OP, ARGI_SHIFT_LEFT_OP, 308/* 24 */ ACPI_OP("ShiftLeft", ARGP_SHIFT_LEFT_OP, ARGI_SHIFT_LEFT_OP,
309 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 309 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
310 AML_TYPE_EXEC_2A_1T_1R, 310 AML_TYPE_EXEC_2A_1T_1R,
311 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), 311 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
312/* 25 */ ACPI_OP("ShiftRight", ARGP_SHIFT_RIGHT_OP, ARGI_SHIFT_RIGHT_OP, 312/* 25 */ ACPI_OP("ShiftRight", ARGP_SHIFT_RIGHT_OP, ARGI_SHIFT_RIGHT_OP,
313 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 313 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
314 AML_TYPE_EXEC_2A_1T_1R, 314 AML_TYPE_EXEC_2A_1T_1R,
315 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), 315 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
316/* 26 */ ACPI_OP("And", ARGP_BIT_AND_OP, ARGI_BIT_AND_OP, ACPI_TYPE_ANY, 316/* 26 */ ACPI_OP("And", ARGP_BIT_AND_OP, ARGI_BIT_AND_OP, ACPI_TYPE_ANY,
317 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, 317 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
318 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), 318 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
319/* 27 */ ACPI_OP("NAnd", ARGP_BIT_NAND_OP, ARGI_BIT_NAND_OP, 319/* 27 */ ACPI_OP("NAnd", ARGP_BIT_NAND_OP, ARGI_BIT_NAND_OP,
320 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 320 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
321 AML_TYPE_EXEC_2A_1T_1R, 321 AML_TYPE_EXEC_2A_1T_1R,
322 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), 322 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
323/* 28 */ ACPI_OP("Or", ARGP_BIT_OR_OP, ARGI_BIT_OR_OP, ACPI_TYPE_ANY, 323/* 28 */ ACPI_OP("Or", ARGP_BIT_OR_OP, ARGI_BIT_OR_OP, ACPI_TYPE_ANY,
324 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, 324 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
325 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), 325 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
326/* 29 */ ACPI_OP("NOr", ARGP_BIT_NOR_OP, ARGI_BIT_NOR_OP, ACPI_TYPE_ANY, 326/* 29 */ ACPI_OP("NOr", ARGP_BIT_NOR_OP, ARGI_BIT_NOR_OP, ACPI_TYPE_ANY,
327 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, 327 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
328 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), 328 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
329/* 2A */ ACPI_OP("XOr", ARGP_BIT_XOR_OP, ARGI_BIT_XOR_OP, ACPI_TYPE_ANY, 329/* 2A */ ACPI_OP("XOr", ARGP_BIT_XOR_OP, ARGI_BIT_XOR_OP, ACPI_TYPE_ANY,
330 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, 330 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
331 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT), 331 AML_FLAGS_EXEC_2A_1T_1R | AML_MATH | AML_CONSTANT),
332/* 2B */ ACPI_OP("Not", ARGP_BIT_NOT_OP, ARGI_BIT_NOT_OP, ACPI_TYPE_ANY, 332/* 2B */ ACPI_OP("Not", ARGP_BIT_NOT_OP, ARGI_BIT_NOT_OP, ACPI_TYPE_ANY,
333 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, 333 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
334 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), 334 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
335/* 2C */ ACPI_OP("FindSetLeftBit", ARGP_FIND_SET_LEFT_BIT_OP, 335/* 2C */ ACPI_OP("FindSetLeftBit", ARGP_FIND_SET_LEFT_BIT_OP,
336 ARGI_FIND_SET_LEFT_BIT_OP, ACPI_TYPE_ANY, 336 ARGI_FIND_SET_LEFT_BIT_OP, ACPI_TYPE_ANY,
337 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, 337 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
338 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), 338 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
339/* 2D */ ACPI_OP("FindSetRightBit", ARGP_FIND_SET_RIGHT_BIT_OP, 339/* 2D */ ACPI_OP("FindSetRightBit", ARGP_FIND_SET_RIGHT_BIT_OP,
340 ARGI_FIND_SET_RIGHT_BIT_OP, ACPI_TYPE_ANY, 340 ARGI_FIND_SET_RIGHT_BIT_OP, ACPI_TYPE_ANY,
341 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, 341 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
342 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), 342 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
343/* 2E */ ACPI_OP("DerefOf", ARGP_DEREF_OF_OP, ARGI_DEREF_OF_OP, 343/* 2E */ ACPI_OP("DerefOf", ARGP_DEREF_OF_OP, ARGI_DEREF_OF_OP,
344 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 344 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
345 AML_TYPE_EXEC_1A_0T_1R, AML_FLAGS_EXEC_1A_0T_1R), 345 AML_TYPE_EXEC_1A_0T_1R, AML_FLAGS_EXEC_1A_0T_1R),
346/* 2F */ ACPI_OP("Notify", ARGP_NOTIFY_OP, ARGI_NOTIFY_OP, 346/* 2F */ ACPI_OP("Notify", ARGP_NOTIFY_OP, ARGI_NOTIFY_OP,
347 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 347 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
348 AML_TYPE_EXEC_2A_0T_0R, AML_FLAGS_EXEC_2A_0T_0R), 348 AML_TYPE_EXEC_2A_0T_0R, AML_FLAGS_EXEC_2A_0T_0R),
349/* 30 */ ACPI_OP("SizeOf", ARGP_SIZE_OF_OP, ARGI_SIZE_OF_OP, 349/* 30 */ ACPI_OP("SizeOf", ARGP_SIZE_OF_OP, ARGI_SIZE_OF_OP,
350 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 350 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
351 AML_TYPE_EXEC_1A_0T_1R, 351 AML_TYPE_EXEC_1A_0T_1R,
352 AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE), 352 AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE),
353/* 31 */ ACPI_OP("Index", ARGP_INDEX_OP, ARGI_INDEX_OP, ACPI_TYPE_ANY, 353/* 31 */ ACPI_OP("Index", ARGP_INDEX_OP, ARGI_INDEX_OP, ACPI_TYPE_ANY,
354 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, 354 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
355 AML_FLAGS_EXEC_2A_1T_1R), 355 AML_FLAGS_EXEC_2A_1T_1R),
356/* 32 */ ACPI_OP("Match", ARGP_MATCH_OP, ARGI_MATCH_OP, ACPI_TYPE_ANY, 356/* 32 */ ACPI_OP("Match", ARGP_MATCH_OP, ARGI_MATCH_OP, ACPI_TYPE_ANY,
357 AML_CLASS_EXECUTE, AML_TYPE_EXEC_6A_0T_1R, 357 AML_CLASS_EXECUTE, AML_TYPE_EXEC_6A_0T_1R,
358 AML_FLAGS_EXEC_6A_0T_1R | AML_CONSTANT), 358 AML_FLAGS_EXEC_6A_0T_1R | AML_CONSTANT),
359/* 33 */ ACPI_OP("CreateDWordField", ARGP_CREATE_DWORD_FIELD_OP, 359/* 33 */ ACPI_OP("CreateDWordField", ARGP_CREATE_DWORD_FIELD_OP,
360 ARGI_CREATE_DWORD_FIELD_OP, 360 ARGI_CREATE_DWORD_FIELD_OP,
361 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE, 361 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
362 AML_TYPE_CREATE_FIELD, 362 AML_TYPE_CREATE_FIELD,
363 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | 363 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
364 AML_DEFER | AML_CREATE), 364 AML_DEFER | AML_CREATE),
365/* 34 */ ACPI_OP("CreateWordField", ARGP_CREATE_WORD_FIELD_OP, 365/* 34 */ ACPI_OP("CreateWordField", ARGP_CREATE_WORD_FIELD_OP,
366 ARGI_CREATE_WORD_FIELD_OP, 366 ARGI_CREATE_WORD_FIELD_OP,
367 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE, 367 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
368 AML_TYPE_CREATE_FIELD, 368 AML_TYPE_CREATE_FIELD,
369 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | 369 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
370 AML_DEFER | AML_CREATE), 370 AML_DEFER | AML_CREATE),
371/* 35 */ ACPI_OP("CreateByteField", ARGP_CREATE_BYTE_FIELD_OP, 371/* 35 */ ACPI_OP("CreateByteField", ARGP_CREATE_BYTE_FIELD_OP,
372 ARGI_CREATE_BYTE_FIELD_OP, 372 ARGI_CREATE_BYTE_FIELD_OP,
373 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE, 373 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
374 AML_TYPE_CREATE_FIELD, 374 AML_TYPE_CREATE_FIELD,
375 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | 375 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
376 AML_DEFER | AML_CREATE), 376 AML_DEFER | AML_CREATE),
377/* 36 */ ACPI_OP("CreateBitField", ARGP_CREATE_BIT_FIELD_OP, 377/* 36 */ ACPI_OP("CreateBitField", ARGP_CREATE_BIT_FIELD_OP,
378 ARGI_CREATE_BIT_FIELD_OP, 378 ARGI_CREATE_BIT_FIELD_OP,
379 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE, 379 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
380 AML_TYPE_CREATE_FIELD, 380 AML_TYPE_CREATE_FIELD,
381 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | 381 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
382 AML_DEFER | AML_CREATE), 382 AML_DEFER | AML_CREATE),
383/* 37 */ ACPI_OP("ObjectType", ARGP_TYPE_OP, ARGI_TYPE_OP, 383/* 37 */ ACPI_OP("ObjectType", ARGP_TYPE_OP, ARGI_TYPE_OP,
384 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 384 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
385 AML_TYPE_EXEC_1A_0T_1R, 385 AML_TYPE_EXEC_1A_0T_1R,
386 AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE), 386 AML_FLAGS_EXEC_1A_0T_1R | AML_NO_OPERAND_RESOLVE),
387/* 38 */ ACPI_OP("LAnd", ARGP_LAND_OP, ARGI_LAND_OP, ACPI_TYPE_ANY, 387/* 38 */ ACPI_OP("LAnd", ARGP_LAND_OP, ARGI_LAND_OP, ACPI_TYPE_ANY,
388 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, 388 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
389 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | 389 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | AML_CONSTANT),
390 AML_CONSTANT),
391/* 39 */ ACPI_OP("LOr", ARGP_LOR_OP, ARGI_LOR_OP, ACPI_TYPE_ANY, 390/* 39 */ ACPI_OP("LOr", ARGP_LOR_OP, ARGI_LOR_OP, ACPI_TYPE_ANY,
392 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, 391 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
393 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | 392 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL_NUMERIC | AML_CONSTANT),
394 AML_CONSTANT),
395/* 3A */ ACPI_OP("LNot", ARGP_LNOT_OP, ARGI_LNOT_OP, ACPI_TYPE_ANY, 393/* 3A */ ACPI_OP("LNot", ARGP_LNOT_OP, ARGI_LNOT_OP, ACPI_TYPE_ANY,
396 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R, 394 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_1R,
397 AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT), 395 AML_FLAGS_EXEC_1A_0T_1R | AML_CONSTANT),
398/* 3B */ ACPI_OP("LEqual", ARGP_LEQUAL_OP, ARGI_LEQUAL_OP, 396/* 3B */ ACPI_OP("LEqual", ARGP_LEQUAL_OP, ARGI_LEQUAL_OP,
399 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 397 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
400 AML_TYPE_EXEC_2A_0T_1R, 398 AML_TYPE_EXEC_2A_0T_1R,
401 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT), 399 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT),
402/* 3C */ ACPI_OP("LGreater", ARGP_LGREATER_OP, ARGI_LGREATER_OP, 400/* 3C */ ACPI_OP("LGreater", ARGP_LGREATER_OP, ARGI_LGREATER_OP,
403 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 401 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
404 AML_TYPE_EXEC_2A_0T_1R, 402 AML_TYPE_EXEC_2A_0T_1R,
405 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT), 403 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT),
406/* 3D */ ACPI_OP("LLess", ARGP_LLESS_OP, ARGI_LLESS_OP, ACPI_TYPE_ANY, 404/* 3D */ ACPI_OP("LLess", ARGP_LLESS_OP, ARGI_LLESS_OP, ACPI_TYPE_ANY,
407 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, 405 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
408 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT), 406 AML_FLAGS_EXEC_2A_0T_1R | AML_LOGICAL | AML_CONSTANT),
409/* 3E */ ACPI_OP("If", ARGP_IF_OP, ARGI_IF_OP, ACPI_TYPE_ANY, 407/* 3E */ ACPI_OP("If", ARGP_IF_OP, ARGI_IF_OP, ACPI_TYPE_ANY,
410 AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS), 408 AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS),
411/* 3F */ ACPI_OP("Else", ARGP_ELSE_OP, ARGI_ELSE_OP, ACPI_TYPE_ANY, 409/* 3F */ ACPI_OP("Else", ARGP_ELSE_OP, ARGI_ELSE_OP, ACPI_TYPE_ANY,
412 AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS), 410 AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS),
413/* 40 */ ACPI_OP("While", ARGP_WHILE_OP, ARGI_WHILE_OP, ACPI_TYPE_ANY, 411/* 40 */ ACPI_OP("While", ARGP_WHILE_OP, ARGI_WHILE_OP, ACPI_TYPE_ANY,
414 AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS), 412 AML_CLASS_CONTROL, AML_TYPE_CONTROL, AML_HAS_ARGS),
415/* 41 */ ACPI_OP("Noop", ARGP_NOOP_OP, ARGI_NOOP_OP, ACPI_TYPE_ANY, 413/* 41 */ ACPI_OP("Noop", ARGP_NOOP_OP, ARGI_NOOP_OP, ACPI_TYPE_ANY,
416 AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0), 414 AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
417/* 42 */ ACPI_OP("Return", ARGP_RETURN_OP, ARGI_RETURN_OP, 415/* 42 */ ACPI_OP("Return", ARGP_RETURN_OP, ARGI_RETURN_OP,
418 ACPI_TYPE_ANY, AML_CLASS_CONTROL, 416 ACPI_TYPE_ANY, AML_CLASS_CONTROL,
419 AML_TYPE_CONTROL, AML_HAS_ARGS), 417 AML_TYPE_CONTROL, AML_HAS_ARGS),
420/* 43 */ ACPI_OP("Break", ARGP_BREAK_OP, ARGI_BREAK_OP, ACPI_TYPE_ANY, 418/* 43 */ ACPI_OP("Break", ARGP_BREAK_OP, ARGI_BREAK_OP, ACPI_TYPE_ANY,
421 AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0), 419 AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
422/* 44 */ ACPI_OP("BreakPoint", ARGP_BREAK_POINT_OP, ARGI_BREAK_POINT_OP, 420/* 44 */ ACPI_OP("BreakPoint", ARGP_BREAK_POINT_OP, ARGI_BREAK_POINT_OP,
423 ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0), 421 ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
424/* 45 */ ACPI_OP("Ones", ARGP_ONES_OP, ARGI_ONES_OP, ACPI_TYPE_INTEGER, 422/* 45 */ ACPI_OP("Ones", ARGP_ONES_OP, ARGI_ONES_OP, ACPI_TYPE_INTEGER,
425 AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT), 423 AML_CLASS_ARGUMENT, AML_TYPE_CONSTANT, AML_CONSTANT),
426 424
427/* Prefixed opcodes (Two-byte opcodes with a prefix op) */ 425/* Prefixed opcodes (Two-byte opcodes with a prefix op) */
428 426
429/* 46 */ ACPI_OP("Mutex", ARGP_MUTEX_OP, ARGI_MUTEX_OP, ACPI_TYPE_MUTEX, 427/* 46 */ ACPI_OP("Mutex", ARGP_MUTEX_OP, ARGI_MUTEX_OP, ACPI_TYPE_MUTEX,
430 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE, 428 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE,
431 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 429 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
432 AML_NSNODE | AML_NAMED), 430 AML_NSNODE | AML_NAMED),
433/* 47 */ ACPI_OP("Event", ARGP_EVENT_OP, ARGI_EVENT_OP, ACPI_TYPE_EVENT, 431/* 47 */ ACPI_OP("Event", ARGP_EVENT_OP, ARGI_EVENT_OP, ACPI_TYPE_EVENT,
434 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE, 432 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE,
435 AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED), 433 AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED),
436/* 48 */ ACPI_OP("CondRefOf", ARGP_COND_REF_OF_OP, ARGI_COND_REF_OF_OP, 434/* 48 */ ACPI_OP("CondRefOf", ARGP_COND_REF_OF_OP, ARGI_COND_REF_OF_OP,
437 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 435 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
438 AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R), 436 AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R),
439/* 49 */ ACPI_OP("CreateField", ARGP_CREATE_FIELD_OP, 437/* 49 */ ACPI_OP("CreateField", ARGP_CREATE_FIELD_OP,
440 ARGI_CREATE_FIELD_OP, ACPI_TYPE_BUFFER_FIELD, 438 ARGI_CREATE_FIELD_OP, ACPI_TYPE_BUFFER_FIELD,
441 AML_CLASS_CREATE, AML_TYPE_CREATE_FIELD, 439 AML_CLASS_CREATE, AML_TYPE_CREATE_FIELD,
442 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | 440 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
443 AML_DEFER | AML_FIELD | AML_CREATE), 441 AML_DEFER | AML_FIELD | AML_CREATE),
444/* 4A */ ACPI_OP("Load", ARGP_LOAD_OP, ARGI_LOAD_OP, ACPI_TYPE_ANY, 442/* 4A */ ACPI_OP("Load", ARGP_LOAD_OP, ARGI_LOAD_OP, ACPI_TYPE_ANY,
445 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_0R, 443 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_0R,
446 AML_FLAGS_EXEC_1A_1T_0R), 444 AML_FLAGS_EXEC_1A_1T_0R),
447/* 4B */ ACPI_OP("Stall", ARGP_STALL_OP, ARGI_STALL_OP, ACPI_TYPE_ANY, 445/* 4B */ ACPI_OP("Stall", ARGP_STALL_OP, ARGI_STALL_OP, ACPI_TYPE_ANY,
448 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R, 446 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R,
449 AML_FLAGS_EXEC_1A_0T_0R), 447 AML_FLAGS_EXEC_1A_0T_0R),
450/* 4C */ ACPI_OP("Sleep", ARGP_SLEEP_OP, ARGI_SLEEP_OP, ACPI_TYPE_ANY, 448/* 4C */ ACPI_OP("Sleep", ARGP_SLEEP_OP, ARGI_SLEEP_OP, ACPI_TYPE_ANY,
451 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R, 449 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R,
452 AML_FLAGS_EXEC_1A_0T_0R), 450 AML_FLAGS_EXEC_1A_0T_0R),
453/* 4D */ ACPI_OP("Acquire", ARGP_ACQUIRE_OP, ARGI_ACQUIRE_OP, 451/* 4D */ ACPI_OP("Acquire", ARGP_ACQUIRE_OP, ARGI_ACQUIRE_OP,
454 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 452 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
455 AML_TYPE_EXEC_2A_0T_1R, AML_FLAGS_EXEC_2A_0T_1R), 453 AML_TYPE_EXEC_2A_0T_1R, AML_FLAGS_EXEC_2A_0T_1R),
456/* 4E */ ACPI_OP("Signal", ARGP_SIGNAL_OP, ARGI_SIGNAL_OP, 454/* 4E */ ACPI_OP("Signal", ARGP_SIGNAL_OP, ARGI_SIGNAL_OP,
457 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 455 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
458 AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R), 456 AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R),
459/* 4F */ ACPI_OP("Wait", ARGP_WAIT_OP, ARGI_WAIT_OP, ACPI_TYPE_ANY, 457/* 4F */ ACPI_OP("Wait", ARGP_WAIT_OP, ARGI_WAIT_OP, ACPI_TYPE_ANY,
460 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R, 458 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_0T_1R,
461 AML_FLAGS_EXEC_2A_0T_1R), 459 AML_FLAGS_EXEC_2A_0T_1R),
462/* 50 */ ACPI_OP("Reset", ARGP_RESET_OP, ARGI_RESET_OP, ACPI_TYPE_ANY, 460/* 50 */ ACPI_OP("Reset", ARGP_RESET_OP, ARGI_RESET_OP, ACPI_TYPE_ANY,
463 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R, 461 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_0T_0R,
464 AML_FLAGS_EXEC_1A_0T_0R), 462 AML_FLAGS_EXEC_1A_0T_0R),
465/* 51 */ ACPI_OP("Release", ARGP_RELEASE_OP, ARGI_RELEASE_OP, 463/* 51 */ ACPI_OP("Release", ARGP_RELEASE_OP, ARGI_RELEASE_OP,
466 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 464 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
467 AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R), 465 AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R),
468/* 52 */ ACPI_OP("FromBCD", ARGP_FROM_BCD_OP, ARGI_FROM_BCD_OP, 466/* 52 */ ACPI_OP("FromBCD", ARGP_FROM_BCD_OP, ARGI_FROM_BCD_OP,
469 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 467 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
470 AML_TYPE_EXEC_1A_1T_1R, 468 AML_TYPE_EXEC_1A_1T_1R,
471 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), 469 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
472/* 53 */ ACPI_OP("ToBCD", ARGP_TO_BCD_OP, ARGI_TO_BCD_OP, ACPI_TYPE_ANY, 470/* 53 */ ACPI_OP("ToBCD", ARGP_TO_BCD_OP, ARGI_TO_BCD_OP, ACPI_TYPE_ANY,
473 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, 471 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
474 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), 472 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
475/* 54 */ ACPI_OP("Unload", ARGP_UNLOAD_OP, ARGI_UNLOAD_OP, 473/* 54 */ ACPI_OP("Unload", ARGP_UNLOAD_OP, ARGI_UNLOAD_OP,
476 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 474 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
477 AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R), 475 AML_TYPE_EXEC_1A_0T_0R, AML_FLAGS_EXEC_1A_0T_0R),
478/* 55 */ ACPI_OP("Revision", ARGP_REVISION_OP, ARGI_REVISION_OP, 476/* 55 */ ACPI_OP("Revision", ARGP_REVISION_OP, ARGI_REVISION_OP,
479 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, 477 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
480 AML_TYPE_CONSTANT, 0), 478 AML_TYPE_CONSTANT, 0),
481/* 56 */ ACPI_OP("Debug", ARGP_DEBUG_OP, ARGI_DEBUG_OP, 479/* 56 */ ACPI_OP("Debug", ARGP_DEBUG_OP, ARGI_DEBUG_OP,
482 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 480 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
483 AML_TYPE_CONSTANT, 0), 481 AML_TYPE_CONSTANT, 0),
484/* 57 */ ACPI_OP("Fatal", ARGP_FATAL_OP, ARGI_FATAL_OP, ACPI_TYPE_ANY, 482/* 57 */ ACPI_OP("Fatal", ARGP_FATAL_OP, ARGI_FATAL_OP, ACPI_TYPE_ANY,
485 AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_0T_0R, 483 AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_0T_0R,
486 AML_FLAGS_EXEC_3A_0T_0R), 484 AML_FLAGS_EXEC_3A_0T_0R),
487/* 58 */ ACPI_OP("OperationRegion", ARGP_REGION_OP, ARGI_REGION_OP, 485/* 58 */ ACPI_OP("OperationRegion", ARGP_REGION_OP, ARGI_REGION_OP,
488 ACPI_TYPE_REGION, AML_CLASS_NAMED_OBJECT, 486 ACPI_TYPE_REGION, AML_CLASS_NAMED_OBJECT,
489 AML_TYPE_NAMED_COMPLEX, 487 AML_TYPE_NAMED_COMPLEX,
490 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 488 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
491 AML_NSNODE | AML_NAMED | AML_DEFER), 489 AML_NSNODE | AML_NAMED | AML_DEFER),
492/* 59 */ ACPI_OP("Field", ARGP_FIELD_OP, ARGI_FIELD_OP, ACPI_TYPE_ANY, 490/* 59 */ ACPI_OP("Field", ARGP_FIELD_OP, ARGI_FIELD_OP, ACPI_TYPE_ANY,
493 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD, 491 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_FIELD,
494 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 492 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD),
495 AML_FIELD),
496/* 5A */ ACPI_OP("Device", ARGP_DEVICE_OP, ARGI_DEVICE_OP, 493/* 5A */ ACPI_OP("Device", ARGP_DEVICE_OP, ARGI_DEVICE_OP,
497 ACPI_TYPE_DEVICE, AML_CLASS_NAMED_OBJECT, 494 ACPI_TYPE_DEVICE, AML_CLASS_NAMED_OBJECT,
498 AML_TYPE_NAMED_NO_OBJ, 495 AML_TYPE_NAMED_NO_OBJ,
499 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 496 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
500 AML_NSNODE | AML_NAMED), 497 AML_NSNODE | AML_NAMED),
501/* 5B */ ACPI_OP("Processor", ARGP_PROCESSOR_OP, ARGI_PROCESSOR_OP, 498/* 5B */ ACPI_OP("Processor", ARGP_PROCESSOR_OP, ARGI_PROCESSOR_OP,
502 ACPI_TYPE_PROCESSOR, AML_CLASS_NAMED_OBJECT, 499 ACPI_TYPE_PROCESSOR, AML_CLASS_NAMED_OBJECT,
503 AML_TYPE_NAMED_SIMPLE, 500 AML_TYPE_NAMED_SIMPLE,
504 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 501 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
505 AML_NSNODE | AML_NAMED), 502 AML_NSNODE | AML_NAMED),
506/* 5C */ ACPI_OP("PowerResource", ARGP_POWER_RES_OP, ARGI_POWER_RES_OP, 503/* 5C */ ACPI_OP("PowerResource", ARGP_POWER_RES_OP, ARGI_POWER_RES_OP,
507 ACPI_TYPE_POWER, AML_CLASS_NAMED_OBJECT, 504 ACPI_TYPE_POWER, AML_CLASS_NAMED_OBJECT,
508 AML_TYPE_NAMED_SIMPLE, 505 AML_TYPE_NAMED_SIMPLE,
509 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 506 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
510 AML_NSNODE | AML_NAMED), 507 AML_NSNODE | AML_NAMED),
511/* 5D */ ACPI_OP("ThermalZone", ARGP_THERMAL_ZONE_OP, 508/* 5D */ ACPI_OP("ThermalZone", ARGP_THERMAL_ZONE_OP,
512 ARGI_THERMAL_ZONE_OP, ACPI_TYPE_THERMAL, 509 ARGI_THERMAL_ZONE_OP, ACPI_TYPE_THERMAL,
513 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_NO_OBJ, 510 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_NO_OBJ,
514 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 511 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
515 AML_NSNODE | AML_NAMED), 512 AML_NSNODE | AML_NAMED),
516/* 5E */ ACPI_OP("IndexField", ARGP_INDEX_FIELD_OP, ARGI_INDEX_FIELD_OP, 513/* 5E */ ACPI_OP("IndexField", ARGP_INDEX_FIELD_OP, ARGI_INDEX_FIELD_OP,
517 ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, 514 ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
518 AML_TYPE_NAMED_FIELD, 515 AML_TYPE_NAMED_FIELD,
519 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 516 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD),
520 AML_FIELD),
521/* 5F */ ACPI_OP("BankField", ARGP_BANK_FIELD_OP, ARGI_BANK_FIELD_OP, 517/* 5F */ ACPI_OP("BankField", ARGP_BANK_FIELD_OP, ARGI_BANK_FIELD_OP,
522 ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, 518 ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
523 AML_TYPE_NAMED_FIELD, 519 AML_TYPE_NAMED_FIELD,
524 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 520 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD),
525 AML_FIELD),
526 521
527/* Internal opcodes that map to invalid AML opcodes */ 522/* Internal opcodes that map to invalid AML opcodes */
528 523
529/* 60 */ ACPI_OP("LNotEqual", ARGP_LNOTEQUAL_OP, ARGI_LNOTEQUAL_OP, 524/* 60 */ ACPI_OP("LNotEqual", ARGP_LNOTEQUAL_OP, ARGI_LNOTEQUAL_OP,
530 ACPI_TYPE_ANY, AML_CLASS_INTERNAL, 525 ACPI_TYPE_ANY, AML_CLASS_INTERNAL,
531 AML_TYPE_BOGUS, AML_HAS_ARGS | AML_CONSTANT), 526 AML_TYPE_BOGUS, AML_HAS_ARGS | AML_CONSTANT),
532/* 61 */ ACPI_OP("LLessEqual", ARGP_LLESSEQUAL_OP, ARGI_LLESSEQUAL_OP, 527/* 61 */ ACPI_OP("LLessEqual", ARGP_LLESSEQUAL_OP, ARGI_LLESSEQUAL_OP,
533 ACPI_TYPE_ANY, AML_CLASS_INTERNAL, 528 ACPI_TYPE_ANY, AML_CLASS_INTERNAL,
534 AML_TYPE_BOGUS, AML_HAS_ARGS | AML_CONSTANT), 529 AML_TYPE_BOGUS, AML_HAS_ARGS | AML_CONSTANT),
535/* 62 */ ACPI_OP("LGreaterEqual", ARGP_LGREATEREQUAL_OP, 530/* 62 */ ACPI_OP("LGreaterEqual", ARGP_LGREATEREQUAL_OP,
536 ARGI_LGREATEREQUAL_OP, ACPI_TYPE_ANY, 531 ARGI_LGREATEREQUAL_OP, ACPI_TYPE_ANY,
537 AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 532 AML_CLASS_INTERNAL, AML_TYPE_BOGUS,
538 AML_HAS_ARGS | AML_CONSTANT), 533 AML_HAS_ARGS | AML_CONSTANT),
539/* 63 */ ACPI_OP("-NamePath-", ARGP_NAMEPATH_OP, ARGI_NAMEPATH_OP, 534/* 63 */ ACPI_OP("-NamePath-", ARGP_NAMEPATH_OP, ARGI_NAMEPATH_OP,
540 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT, 535 ACPI_TYPE_LOCAL_REFERENCE, AML_CLASS_ARGUMENT,
541 AML_TYPE_LITERAL, AML_NSOBJECT | AML_NSNODE), 536 AML_TYPE_LITERAL, AML_NSOBJECT | AML_NSNODE),
542/* 64 */ ACPI_OP("-MethodCall-", ARGP_METHODCALL_OP, ARGI_METHODCALL_OP, 537/* 64 */ ACPI_OP("-MethodCall-", ARGP_METHODCALL_OP, ARGI_METHODCALL_OP,
543 ACPI_TYPE_METHOD, AML_CLASS_METHOD_CALL, 538 ACPI_TYPE_METHOD, AML_CLASS_METHOD_CALL,
544 AML_TYPE_METHOD_CALL, 539 AML_TYPE_METHOD_CALL,
545 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE), 540 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE),
546/* 65 */ ACPI_OP("-ByteList-", ARGP_BYTELIST_OP, ARGI_BYTELIST_OP, 541/* 65 */ ACPI_OP("-ByteList-", ARGP_BYTELIST_OP, ARGI_BYTELIST_OP,
547 ACPI_TYPE_ANY, AML_CLASS_ARGUMENT, 542 ACPI_TYPE_ANY, AML_CLASS_ARGUMENT,
548 AML_TYPE_LITERAL, 0), 543 AML_TYPE_LITERAL, 0),
549/* 66 */ ACPI_OP("-ReservedField-", ARGP_RESERVEDFIELD_OP, 544/* 66 */ ACPI_OP("-ReservedField-", ARGP_RESERVEDFIELD_OP,
550 ARGI_RESERVEDFIELD_OP, ACPI_TYPE_ANY, 545 ARGI_RESERVEDFIELD_OP, ACPI_TYPE_ANY,
551 AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0), 546 AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0),
552/* 67 */ ACPI_OP("-NamedField-", ARGP_NAMEDFIELD_OP, ARGI_NAMEDFIELD_OP, 547/* 67 */ ACPI_OP("-NamedField-", ARGP_NAMEDFIELD_OP, ARGI_NAMEDFIELD_OP,
553 ACPI_TYPE_ANY, AML_CLASS_INTERNAL, 548 ACPI_TYPE_ANY, AML_CLASS_INTERNAL,
554 AML_TYPE_BOGUS, 549 AML_TYPE_BOGUS,
555 AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED), 550 AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE | AML_NAMED),
556/* 68 */ ACPI_OP("-AccessField-", ARGP_ACCESSFIELD_OP, 551/* 68 */ ACPI_OP("-AccessField-", ARGP_ACCESSFIELD_OP,
557 ARGI_ACCESSFIELD_OP, ACPI_TYPE_ANY, 552 ARGI_ACCESSFIELD_OP, ACPI_TYPE_ANY,
558 AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0), 553 AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0),
559/* 69 */ ACPI_OP("-StaticString", ARGP_STATICSTRING_OP, 554/* 69 */ ACPI_OP("-StaticString", ARGP_STATICSTRING_OP,
560 ARGI_STATICSTRING_OP, ACPI_TYPE_ANY, 555 ARGI_STATICSTRING_OP, ACPI_TYPE_ANY,
561 AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0), 556 AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0),
562/* 6A */ ACPI_OP("-Return Value-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY, 557/* 6A */ ACPI_OP("-Return Value-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY,
563 AML_CLASS_RETURN_VALUE, AML_TYPE_RETURN, 558 AML_CLASS_RETURN_VALUE, AML_TYPE_RETURN,
564 AML_HAS_ARGS | AML_HAS_RETVAL), 559 AML_HAS_ARGS | AML_HAS_RETVAL),
565/* 6B */ ACPI_OP("-UNKNOWN_OP-", ARG_NONE, ARG_NONE, ACPI_TYPE_INVALID, 560/* 6B */ ACPI_OP("-UNKNOWN_OP-", ARG_NONE, ARG_NONE, ACPI_TYPE_INVALID,
566 AML_CLASS_UNKNOWN, AML_TYPE_BOGUS, AML_HAS_ARGS), 561 AML_CLASS_UNKNOWN, AML_TYPE_BOGUS, AML_HAS_ARGS),
567/* 6C */ ACPI_OP("-ASCII_ONLY-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY, 562/* 6C */ ACPI_OP("-ASCII_ONLY-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY,
568 AML_CLASS_ASCII, AML_TYPE_BOGUS, AML_HAS_ARGS), 563 AML_CLASS_ASCII, AML_TYPE_BOGUS, AML_HAS_ARGS),
569/* 6D */ ACPI_OP("-PREFIX_ONLY-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY, 564/* 6D */ ACPI_OP("-PREFIX_ONLY-", ARG_NONE, ARG_NONE, ACPI_TYPE_ANY,
570 AML_CLASS_PREFIX, AML_TYPE_BOGUS, AML_HAS_ARGS), 565 AML_CLASS_PREFIX, AML_TYPE_BOGUS, AML_HAS_ARGS),
571 566
572/* ACPI 2.0 opcodes */ 567/* ACPI 2.0 opcodes */
573 568
574/* 6E */ ACPI_OP("QwordConst", ARGP_QWORD_OP, ARGI_QWORD_OP, 569/* 6E */ ACPI_OP("QwordConst", ARGP_QWORD_OP, ARGI_QWORD_OP,
575 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT, 570 ACPI_TYPE_INTEGER, AML_CLASS_ARGUMENT,
576 AML_TYPE_LITERAL, AML_CONSTANT), 571 AML_TYPE_LITERAL, AML_CONSTANT),
577 /* 6F */ ACPI_OP("Package", /* Var */ ARGP_VAR_PACKAGE_OP, 572 /* 6F */ ACPI_OP("Package", /* Var */ ARGP_VAR_PACKAGE_OP,
578 ARGI_VAR_PACKAGE_OP, ACPI_TYPE_PACKAGE, 573 ARGI_VAR_PACKAGE_OP, ACPI_TYPE_PACKAGE,
579 AML_CLASS_CREATE, AML_TYPE_CREATE_OBJECT, 574 AML_CLASS_CREATE, AML_TYPE_CREATE_OBJECT,
580 AML_HAS_ARGS | AML_DEFER), 575 AML_HAS_ARGS | AML_DEFER),
581/* 70 */ ACPI_OP("ConcatenateResTemplate", ARGP_CONCAT_RES_OP, 576/* 70 */ ACPI_OP("ConcatenateResTemplate", ARGP_CONCAT_RES_OP,
582 ARGI_CONCAT_RES_OP, ACPI_TYPE_ANY, 577 ARGI_CONCAT_RES_OP, ACPI_TYPE_ANY,
583 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, 578 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
584 AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT), 579 AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
585/* 71 */ ACPI_OP("Mod", ARGP_MOD_OP, ARGI_MOD_OP, ACPI_TYPE_ANY, 580/* 71 */ ACPI_OP("Mod", ARGP_MOD_OP, ARGI_MOD_OP, ACPI_TYPE_ANY,
586 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R, 581 AML_CLASS_EXECUTE, AML_TYPE_EXEC_2A_1T_1R,
587 AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT), 582 AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
588/* 72 */ ACPI_OP("CreateQWordField", ARGP_CREATE_QWORD_FIELD_OP, 583/* 72 */ ACPI_OP("CreateQWordField", ARGP_CREATE_QWORD_FIELD_OP,
589 ARGI_CREATE_QWORD_FIELD_OP, 584 ARGI_CREATE_QWORD_FIELD_OP,
590 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE, 585 ACPI_TYPE_BUFFER_FIELD, AML_CLASS_CREATE,
591 AML_TYPE_CREATE_FIELD, 586 AML_TYPE_CREATE_FIELD,
592 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE | 587 AML_HAS_ARGS | AML_NSOBJECT | AML_NSNODE |
593 AML_DEFER | AML_CREATE), 588 AML_DEFER | AML_CREATE),
594/* 73 */ ACPI_OP("ToBuffer", ARGP_TO_BUFFER_OP, ARGI_TO_BUFFER_OP, 589/* 73 */ ACPI_OP("ToBuffer", ARGP_TO_BUFFER_OP, ARGI_TO_BUFFER_OP,
595 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 590 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
596 AML_TYPE_EXEC_1A_1T_1R, 591 AML_TYPE_EXEC_1A_1T_1R,
597 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), 592 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
598/* 74 */ ACPI_OP("ToDecimalString", ARGP_TO_DEC_STR_OP, 593/* 74 */ ACPI_OP("ToDecimalString", ARGP_TO_DEC_STR_OP,
599 ARGI_TO_DEC_STR_OP, ACPI_TYPE_ANY, 594 ARGI_TO_DEC_STR_OP, ACPI_TYPE_ANY,
600 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R, 595 AML_CLASS_EXECUTE, AML_TYPE_EXEC_1A_1T_1R,
601 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), 596 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
602/* 75 */ ACPI_OP("ToHexString", ARGP_TO_HEX_STR_OP, ARGI_TO_HEX_STR_OP, 597/* 75 */ ACPI_OP("ToHexString", ARGP_TO_HEX_STR_OP, ARGI_TO_HEX_STR_OP,
603 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 598 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
604 AML_TYPE_EXEC_1A_1T_1R, 599 AML_TYPE_EXEC_1A_1T_1R,
605 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), 600 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
606/* 76 */ ACPI_OP("ToInteger", ARGP_TO_INTEGER_OP, ARGI_TO_INTEGER_OP, 601/* 76 */ ACPI_OP("ToInteger", ARGP_TO_INTEGER_OP, ARGI_TO_INTEGER_OP,
607 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 602 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
608 AML_TYPE_EXEC_1A_1T_1R, 603 AML_TYPE_EXEC_1A_1T_1R,
609 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT), 604 AML_FLAGS_EXEC_1A_1T_1R | AML_CONSTANT),
610/* 77 */ ACPI_OP("ToString", ARGP_TO_STRING_OP, ARGI_TO_STRING_OP, 605/* 77 */ ACPI_OP("ToString", ARGP_TO_STRING_OP, ARGI_TO_STRING_OP,
611 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 606 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
612 AML_TYPE_EXEC_2A_1T_1R, 607 AML_TYPE_EXEC_2A_1T_1R,
613 AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT), 608 AML_FLAGS_EXEC_2A_1T_1R | AML_CONSTANT),
614/* 78 */ ACPI_OP("CopyObject", ARGP_COPY_OP, ARGI_COPY_OP, 609/* 78 */ ACPI_OP("CopyObject", ARGP_COPY_OP, ARGI_COPY_OP,
615 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 610 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
616 AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R), 611 AML_TYPE_EXEC_1A_1T_1R, AML_FLAGS_EXEC_1A_1T_1R),
617/* 79 */ ACPI_OP("Mid", ARGP_MID_OP, ARGI_MID_OP, ACPI_TYPE_ANY, 612/* 79 */ ACPI_OP("Mid", ARGP_MID_OP, ARGI_MID_OP, ACPI_TYPE_ANY,
618 AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_1T_1R, 613 AML_CLASS_EXECUTE, AML_TYPE_EXEC_3A_1T_1R,
619 AML_FLAGS_EXEC_3A_1T_1R | AML_CONSTANT), 614 AML_FLAGS_EXEC_3A_1T_1R | AML_CONSTANT),
620/* 7A */ ACPI_OP("Continue", ARGP_CONTINUE_OP, ARGI_CONTINUE_OP, 615/* 7A */ ACPI_OP("Continue", ARGP_CONTINUE_OP, ARGI_CONTINUE_OP,
621 ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0), 616 ACPI_TYPE_ANY, AML_CLASS_CONTROL, AML_TYPE_CONTROL, 0),
622/* 7B */ ACPI_OP("LoadTable", ARGP_LOAD_TABLE_OP, ARGI_LOAD_TABLE_OP, 617/* 7B */ ACPI_OP("LoadTable", ARGP_LOAD_TABLE_OP, ARGI_LOAD_TABLE_OP,
623 ACPI_TYPE_ANY, AML_CLASS_EXECUTE, 618 ACPI_TYPE_ANY, AML_CLASS_EXECUTE,
624 AML_TYPE_EXEC_6A_0T_1R, AML_FLAGS_EXEC_6A_0T_1R), 619 AML_TYPE_EXEC_6A_0T_1R, AML_FLAGS_EXEC_6A_0T_1R),
625/* 7C */ ACPI_OP("DataTableRegion", ARGP_DATA_REGION_OP, 620/* 7C */ ACPI_OP("DataTableRegion", ARGP_DATA_REGION_OP,
626 ARGI_DATA_REGION_OP, ACPI_TYPE_REGION, 621 ARGI_DATA_REGION_OP, ACPI_TYPE_REGION,
627 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE, 622 AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE,
628 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 623 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
629 AML_NSNODE | AML_NAMED), 624 AML_NSNODE | AML_NAMED),
630/* 7D */ ACPI_OP("[EvalSubTree]", ARGP_SCOPE_OP, ARGI_SCOPE_OP, 625/* 7D */ ACPI_OP("[EvalSubTree]", ARGP_SCOPE_OP, ARGI_SCOPE_OP,
631 ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT, 626 ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
632 AML_TYPE_NAMED_NO_OBJ, 627 AML_TYPE_NAMED_NO_OBJ,
633 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | 628 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_NSNODE),
634 AML_NSNODE),
635 629
636/* ACPI 3.0 opcodes */ 630/* ACPI 3.0 opcodes */
637 631
638/* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY, 632/* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY,
639 AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R, 633 AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R,
640 AML_FLAGS_EXEC_0A_0T_1R) 634 AML_FLAGS_EXEC_0A_0T_1R)
641 635
642/*! [End] no source code translation !*/ 636/*! [End] no source code translation !*/
643}; 637};
diff --git a/drivers/acpi/resources/rscalc.c b/drivers/acpi/resources/rscalc.c
index 8c6d3fdec38a..0dd2ce8a3475 100644
--- a/drivers/acpi/resources/rscalc.c
+++ b/drivers/acpi/resources/rscalc.c
@@ -567,7 +567,8 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
567 (*sub_object_list)->string. 567 (*sub_object_list)->string.
568 length + 1); 568 length + 1);
569 } else { 569 } else {
570 temp_size_needed += acpi_ns_get_pathname_length((*sub_object_list)->reference.node); 570 temp_size_needed +=
571 acpi_ns_get_pathname_length((*sub_object_list)->reference.node);
571 } 572 }
572 } else { 573 } else {
573 /* 574 /*
diff --git a/drivers/acpi/resources/rscreate.c b/drivers/acpi/resources/rscreate.c
index cc48ab05676c..50da494c3ee2 100644
--- a/drivers/acpi/resources/rscreate.c
+++ b/drivers/acpi/resources/rscreate.c
@@ -267,16 +267,19 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
267 * If BIOS erroneously reversed the _PRT source_name and source_index, 267 * If BIOS erroneously reversed the _PRT source_name and source_index,
268 * then reverse them back. 268 * then reverse them back.
269 */ 269 */
270 if (ACPI_GET_OBJECT_TYPE (sub_object_list[3]) != ACPI_TYPE_INTEGER) { 270 if (ACPI_GET_OBJECT_TYPE(sub_object_list[3]) !=
271 ACPI_TYPE_INTEGER) {
271 if (acpi_gbl_enable_interpreter_slack) { 272 if (acpi_gbl_enable_interpreter_slack) {
272 source_name_index = 3; 273 source_name_index = 3;
273 source_index_index = 2; 274 source_index_index = 2;
274 printk(KERN_WARNING "ACPI: Handling Garbled _PRT entry\n"); 275 printk(KERN_WARNING
276 "ACPI: Handling Garbled _PRT entry\n");
275 } else { 277 } else {
276 ACPI_ERROR((AE_INFO, 278 ACPI_ERROR((AE_INFO,
277 "(PRT[%X].source_index) Need Integer, found %s", 279 "(PRT[%X].source_index) Need Integer, found %s",
278 index, 280 index,
279 acpi_ut_get_object_type_name(sub_object_list[3]))); 281 acpi_ut_get_object_type_name
282 (sub_object_list[3])));
280 return_ACPI_STATUS(AE_BAD_DATA); 283 return_ACPI_STATUS(AE_BAD_DATA);
281 } 284 }
282 } 285 }
diff --git a/drivers/acpi/resources/rsdump.c b/drivers/acpi/resources/rsdump.c
index de20a5d6decf..46da116a4030 100644
--- a/drivers/acpi/resources/rsdump.c
+++ b/drivers/acpi/resources/rsdump.c
@@ -46,7 +46,6 @@
46 46
47#define _COMPONENT ACPI_RESOURCES 47#define _COMPONENT ACPI_RESOURCES
48ACPI_MODULE_NAME("rsdump") 48ACPI_MODULE_NAME("rsdump")
49
50#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) 49#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
51/* Local prototypes */ 50/* Local prototypes */
52static void acpi_rs_out_string(char *title, char *value); 51static void acpi_rs_out_string(char *title, char *value);
@@ -489,10 +488,9 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
489 /* 488 /*
490 * Optional resource_source for Address resources 489 * Optional resource_source for Address resources
491 */ 490 */
492 acpi_rs_dump_resource_source(ACPI_CAST_PTR 491 acpi_rs_dump_resource_source(ACPI_CAST_PTR(struct
493 (struct 492 acpi_resource_source,
494 acpi_resource_source, 493 target));
495 target));
496 break; 494 break;
497 495
498 default: 496 default:
diff --git a/drivers/acpi/resources/rsinfo.c b/drivers/acpi/resources/rsinfo.c
index 7e3c335ab320..2c2adb6292c1 100644
--- a/drivers/acpi/resources/rsinfo.c
+++ b/drivers/acpi/resources/rsinfo.c
@@ -142,7 +142,7 @@ struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[] = {
142}; 142};
143#endif 143#endif
144 144
145#endif /* ACPI_FUTURE_USAGE */ 145#endif /* ACPI_FUTURE_USAGE */
146/* 146/*
147 * Base sizes for external AML resource descriptors, indexed by internal type. 147 * Base sizes for external AML resource descriptors, indexed by internal type.
148 * Includes size of the descriptor header (1 byte for small descriptors, 148 * Includes size of the descriptor header (1 byte for small descriptors,
diff --git a/drivers/acpi/resources/rslist.c b/drivers/acpi/resources/rslist.c
index a92755c8877d..ca21e4660c79 100644
--- a/drivers/acpi/resources/rslist.c
+++ b/drivers/acpi/resources/rslist.c
@@ -153,10 +153,9 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
153 153
154 /* Perform the conversion */ 154 /* Perform the conversion */
155 155
156 status = acpi_rs_convert_resource_to_aml(resource, 156 status = acpi_rs_convert_resource_to_aml(resource, ACPI_CAST_PTR(union
157 ACPI_CAST_PTR(union 157 aml_resource,
158 aml_resource, 158 aml),
159 aml),
160 acpi_gbl_set_resource_dispatch 159 acpi_gbl_set_resource_dispatch
161 [resource->type]); 160 [resource->type]);
162 if (ACPI_FAILURE(status)) { 161 if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/resources/rsmisc.c b/drivers/acpi/resources/rsmisc.c
index 3b63b561b94e..c7081afa893a 100644
--- a/drivers/acpi/resources/rsmisc.c
+++ b/drivers/acpi/resources/rsmisc.c
@@ -46,7 +46,6 @@
46 46
47#define _COMPONENT ACPI_RESOURCES 47#define _COMPONENT ACPI_RESOURCES
48ACPI_MODULE_NAME("rsmisc") 48ACPI_MODULE_NAME("rsmisc")
49
50#define INIT_RESOURCE_TYPE(i) i->resource_offset 49#define INIT_RESOURCE_TYPE(i) i->resource_offset
51#define INIT_RESOURCE_LENGTH(i) i->aml_offset 50#define INIT_RESOURCE_LENGTH(i) i->aml_offset
52#define INIT_TABLE_LENGTH(i) i->value 51#define INIT_TABLE_LENGTH(i) i->value
@@ -429,8 +428,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
429 * Optional resource_source (Index and String) 428 * Optional resource_source (Index and String)
430 */ 429 */
431 aml_length = 430 aml_length =
432 acpi_rs_set_resource_source(aml, 431 acpi_rs_set_resource_source(aml, (acpi_rs_length)
433 (acpi_rs_length)
434 aml_length, source); 432 aml_length, source);
435 acpi_rs_set_resource_length(aml_length, aml); 433 acpi_rs_set_resource_length(aml_length, aml);
436 break; 434 break;
diff --git a/drivers/acpi/resources/rsutils.c b/drivers/acpi/resources/rsutils.c
index 2442a8f8df57..11c0bd7b9cfd 100644
--- a/drivers/acpi/resources/rsutils.c
+++ b/drivers/acpi/resources/rsutils.c
@@ -353,10 +353,8 @@ acpi_rs_get_resource_source(acpi_rs_length resource_length,
353 * 353 *
354 * Zero the entire area of the buffer. 354 * Zero the entire area of the buffer.
355 */ 355 */
356 total_length = 356 total_length = (u32)
357 (u32) 357 ACPI_STRLEN(ACPI_CAST_PTR(char, &aml_resource_source[1])) + 1;
358 ACPI_STRLEN(ACPI_CAST_PTR(char, &aml_resource_source[1])) +
359 1;
360 total_length = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(total_length); 358 total_length = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(total_length);
361 359
362 ACPI_MEMSET(resource_source->string_ptr, 0, total_length); 360 ACPI_MEMSET(resource_source->string_ptr, 0, total_length);
diff --git a/drivers/acpi/resources/rsxface.c b/drivers/acpi/resources/rsxface.c
index 991f8901498c..f63813a358c5 100644
--- a/drivers/acpi/resources/rsxface.c
+++ b/drivers/acpi/resources/rsxface.c
@@ -217,7 +217,6 @@ acpi_get_current_resources(acpi_handle device_handle,
217} 217}
218 218
219ACPI_EXPORT_SYMBOL(acpi_get_current_resources) 219ACPI_EXPORT_SYMBOL(acpi_get_current_resources)
220
221#ifdef ACPI_FUTURE_USAGE 220#ifdef ACPI_FUTURE_USAGE
222/******************************************************************************* 221/*******************************************************************************
223 * 222 *
@@ -261,7 +260,6 @@ acpi_get_possible_resources(acpi_handle device_handle,
261 260
262ACPI_EXPORT_SYMBOL(acpi_get_possible_resources) 261ACPI_EXPORT_SYMBOL(acpi_get_possible_resources)
263#endif /* ACPI_FUTURE_USAGE */ 262#endif /* ACPI_FUTURE_USAGE */
264
265/******************************************************************************* 263/*******************************************************************************
266 * 264 *
267 * FUNCTION: acpi_set_current_resources 265 * FUNCTION: acpi_set_current_resources
@@ -496,7 +494,6 @@ ACPI_EXPORT_SYMBOL(acpi_rs_match_vendor_resource)
496 * each resource in the list. 494 * each resource in the list.
497 * 495 *
498 ******************************************************************************/ 496 ******************************************************************************/
499
500acpi_status 497acpi_status
501acpi_walk_resources(acpi_handle device_handle, 498acpi_walk_resources(acpi_handle device_handle,
502 char *name, 499 char *name,
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
index 52b23471dd69..bc7e16ec8393 100644
--- a/drivers/acpi/sleep/main.c
+++ b/drivers/acpi/sleep/main.c
@@ -222,7 +222,7 @@ static struct hibernation_ops acpi_hibernation_ops = {
222 .enter = acpi_hibernation_enter, 222 .enter = acpi_hibernation_enter,
223 .finish = acpi_hibernation_finish, 223 .finish = acpi_hibernation_finish,
224}; 224};
225#endif /* CONFIG_SOFTWARE_SUSPEND */ 225#endif /* CONFIG_SOFTWARE_SUSPEND */
226 226
227/* 227/*
228 * Toshiba fails to preserve interrupts over S1, reinitialization 228 * Toshiba fails to preserve interrupts over S1, reinitialization
@@ -276,4 +276,3 @@ int __init acpi_sleep_init(void)
276 276
277 return 0; 277 return 0;
278} 278}
279
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c
index 76b45f0b8341..61f1822cc350 100644
--- a/drivers/acpi/sleep/proc.c
+++ b/drivers/acpi/sleep/proc.c
@@ -349,8 +349,7 @@ acpi_system_write_alarm(struct file *file,
349 end: 349 end:
350 return_VALUE(result ? result : count); 350 return_VALUE(result ? result : count);
351} 351}
352#endif /* HAVE_ACPI_LEGACY_ALARM */ 352#endif /* HAVE_ACPI_LEGACY_ALARM */
353
354 353
355extern struct list_head acpi_wakeup_device_list; 354extern struct list_head acpi_wakeup_device_list;
356extern spinlock_t acpi_device_lock; 355extern spinlock_t acpi_device_lock;
@@ -380,8 +379,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
380 dev->wakeup.state.enabled ? "enabled" : "disabled"); 379 dev->wakeup.state.enabled ? "enabled" : "disabled");
381 if (ldev) 380 if (ldev)
382 seq_printf(seq, "%s:%s", 381 seq_printf(seq, "%s:%s",
383 ldev->bus ? ldev->bus->name : "no-bus", 382 ldev->bus ? ldev->bus->name : "no-bus",
384 ldev->bus_id); 383 ldev->bus_id);
385 seq_printf(seq, "\n"); 384 seq_printf(seq, "\n");
386 put_device(ldev); 385 put_device(ldev);
387 386
@@ -490,7 +489,7 @@ static u32 rtc_handler(void *context)
490 489
491 return ACPI_INTERRUPT_HANDLED; 490 return ACPI_INTERRUPT_HANDLED;
492} 491}
493#endif /* HAVE_ACPI_LEGACY_ALARM */ 492#endif /* HAVE_ACPI_LEGACY_ALARM */
494 493
495static int __init acpi_sleep_proc_init(void) 494static int __init acpi_sleep_proc_init(void)
496{ 495{
@@ -517,7 +516,7 @@ static int __init acpi_sleep_proc_init(void)
517 entry->proc_fops = &acpi_system_alarm_fops; 516 entry->proc_fops = &acpi_system_alarm_fops;
518 517
519 acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL); 518 acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL);
520#endif /* HAVE_ACPI_LEGACY_ALARM */ 519#endif /* HAVE_ACPI_LEGACY_ALARM */
521 520
522 /* 'wakeup device' [R/W] */ 521 /* 'wakeup device' [R/W] */
523 entry = 522 entry =
diff --git a/drivers/acpi/tables/tbfadt.c b/drivers/acpi/tables/tbfadt.c
index 1db833eb2417..1285e91474fb 100644
--- a/drivers/acpi/tables/tbfadt.c
+++ b/drivers/acpi/tables/tbfadt.c
@@ -334,7 +334,8 @@ static void acpi_tb_convert_fadt(void)
334 (acpi_gbl_FADT.xpm1a_event_block.address + 334 (acpi_gbl_FADT.xpm1a_event_block.address +
335 pm1_register_length)); 335 pm1_register_length));
336 /* Don't forget to copy space_id of the GAS */ 336 /* Don't forget to copy space_id of the GAS */
337 acpi_gbl_xpm1a_enable.space_id = acpi_gbl_FADT.xpm1a_event_block.space_id; 337 acpi_gbl_xpm1a_enable.space_id =
338 acpi_gbl_FADT.xpm1a_event_block.space_id;
338 339
339 /* The PM1B register block is optional, ignore if not present */ 340 /* The PM1B register block is optional, ignore if not present */
340 341
@@ -344,7 +345,8 @@ static void acpi_tb_convert_fadt(void)
344 (acpi_gbl_FADT.xpm1b_event_block. 345 (acpi_gbl_FADT.xpm1b_event_block.
345 address + pm1_register_length)); 346 address + pm1_register_length));
346 /* Don't forget to copy space_id of the GAS */ 347 /* Don't forget to copy space_id of the GAS */
347 acpi_gbl_xpm1b_enable.space_id = acpi_gbl_FADT.xpm1a_event_block.space_id; 348 acpi_gbl_xpm1b_enable.space_id =
349 acpi_gbl_FADT.xpm1a_event_block.space_id;
348 350
349 } 351 }
350 352
diff --git a/drivers/acpi/tables/tbxface.c b/drivers/acpi/tables/tbxface.c
index 417ef5fa7666..5b302c4e293f 100644
--- a/drivers/acpi/tables/tbxface.c
+++ b/drivers/acpi/tables/tbxface.c
@@ -201,6 +201,7 @@ acpi_status acpi_reallocate_root_table(void)
201 201
202 return_ACPI_STATUS(AE_OK); 202 return_ACPI_STATUS(AE_OK);
203} 203}
204
204/******************************************************************************* 205/*******************************************************************************
205 * 206 *
206 * FUNCTION: acpi_load_table 207 * FUNCTION: acpi_load_table
@@ -262,7 +263,7 @@ ACPI_EXPORT_SYMBOL(acpi_load_table)
262acpi_status 263acpi_status
263acpi_get_table_header(char *signature, 264acpi_get_table_header(char *signature,
264 acpi_native_uint instance, 265 acpi_native_uint instance,
265 struct acpi_table_header *out_table_header) 266 struct acpi_table_header * out_table_header)
266{ 267{
267 acpi_native_uint i; 268 acpi_native_uint i;
268 acpi_native_uint j; 269 acpi_native_uint j;
@@ -321,7 +322,6 @@ acpi_get_table_header(char *signature,
321 322
322ACPI_EXPORT_SYMBOL(acpi_get_table_header) 323ACPI_EXPORT_SYMBOL(acpi_get_table_header)
323 324
324
325/****************************************************************************** 325/******************************************************************************
326 * 326 *
327 * FUNCTION: acpi_unload_table_id 327 * FUNCTION: acpi_unload_table_id
@@ -346,11 +346,11 @@ acpi_status acpi_unload_table_id(acpi_owner_id id)
346 continue; 346 continue;
347 } 347 }
348 /* 348 /*
349 * Delete all namespace objects owned by this table. Note that these 349 * Delete all namespace objects owned by this table. Note that these
350 * objects can appear anywhere in the namespace by virtue of the AML 350 * objects can appear anywhere in the namespace by virtue of the AML
351 * "Scope" operator. Thus, we need to track ownership by an ID, not 351 * "Scope" operator. Thus, we need to track ownership by an ID, not
352 * simply a position within the hierarchy 352 * simply a position within the hierarchy
353 */ 353 */
354 acpi_tb_delete_namespace_by_owner(i); 354 acpi_tb_delete_namespace_by_owner(i);
355 status = acpi_tb_release_owner_id(i); 355 status = acpi_tb_release_owner_id(i);
356 acpi_tb_set_table_loaded_flag(i, FALSE); 356 acpi_tb_set_table_loaded_flag(i, FALSE);
@@ -376,7 +376,7 @@ ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
376 *****************************************************************************/ 376 *****************************************************************************/
377acpi_status 377acpi_status
378acpi_get_table(char *signature, 378acpi_get_table(char *signature,
379 acpi_native_uint instance, struct acpi_table_header ** out_table) 379 acpi_native_uint instance, struct acpi_table_header **out_table)
380{ 380{
381 acpi_native_uint i; 381 acpi_native_uint i;
382 acpi_native_uint j; 382 acpi_native_uint j;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 589b98b7b216..1ada017d01ef 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -59,8 +59,6 @@
59#define ACPI_THERMAL_NOTIFY_CRITICAL 0xF0 59#define ACPI_THERMAL_NOTIFY_CRITICAL 0xF0
60#define ACPI_THERMAL_NOTIFY_HOT 0xF1 60#define ACPI_THERMAL_NOTIFY_HOT 0xF1
61#define ACPI_THERMAL_MODE_ACTIVE 0x00 61#define ACPI_THERMAL_MODE_ACTIVE 0x00
62#define ACPI_THERMAL_MODE_PASSIVE 0x01
63#define ACPI_THERMAL_MODE_CRITICAL 0xff
64#define ACPI_THERMAL_PATH_POWEROFF "/sbin/poweroff" 62#define ACPI_THERMAL_PATH_POWEROFF "/sbin/poweroff"
65 63
66#define ACPI_THERMAL_MAX_ACTIVE 10 64#define ACPI_THERMAL_MAX_ACTIVE 10
@@ -86,9 +84,6 @@ static int acpi_thermal_resume(struct acpi_device *device);
86static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file); 84static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file);
87static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file); 85static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file);
88static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file); 86static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file);
89static ssize_t acpi_thermal_write_trip_points(struct file *,
90 const char __user *, size_t,
91 loff_t *);
92static int acpi_thermal_cooling_open_fs(struct inode *inode, struct file *file); 87static int acpi_thermal_cooling_open_fs(struct inode *inode, struct file *file);
93static ssize_t acpi_thermal_write_cooling_mode(struct file *, 88static ssize_t acpi_thermal_write_cooling_mode(struct file *,
94 const char __user *, size_t, 89 const char __user *, size_t,
@@ -167,7 +162,6 @@ struct acpi_thermal {
167 unsigned long temperature; 162 unsigned long temperature;
168 unsigned long last_temperature; 163 unsigned long last_temperature;
169 unsigned long polling_frequency; 164 unsigned long polling_frequency;
170 u8 cooling_mode;
171 volatile u8 zombie; 165 volatile u8 zombie;
172 struct acpi_thermal_flags flags; 166 struct acpi_thermal_flags flags;
173 struct acpi_thermal_state state; 167 struct acpi_thermal_state state;
@@ -193,7 +187,6 @@ static const struct file_operations acpi_thermal_temp_fops = {
193static const struct file_operations acpi_thermal_trip_fops = { 187static const struct file_operations acpi_thermal_trip_fops = {
194 .open = acpi_thermal_trip_open_fs, 188 .open = acpi_thermal_trip_open_fs,
195 .read = seq_read, 189 .read = seq_read,
196 .write = acpi_thermal_write_trip_points,
197 .llseek = seq_lseek, 190 .llseek = seq_lseek,
198 .release = single_release, 191 .release = single_release,
199}; 192};
@@ -297,11 +290,6 @@ static int acpi_thermal_set_cooling_mode(struct acpi_thermal *tz, int mode)
297 if (ACPI_FAILURE(status)) 290 if (ACPI_FAILURE(status))
298 return -ENODEV; 291 return -ENODEV;
299 292
300 tz->cooling_mode = mode;
301
302 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Cooling mode [%s]\n",
303 mode ? "passive" : "active"));
304
305 return 0; 293 return 0;
306} 294}
307 295
@@ -889,67 +877,6 @@ static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file)
889 return single_open(file, acpi_thermal_trip_seq_show, PDE(inode)->data); 877 return single_open(file, acpi_thermal_trip_seq_show, PDE(inode)->data);
890} 878}
891 879
892static ssize_t
893acpi_thermal_write_trip_points(struct file *file,
894 const char __user * buffer,
895 size_t count, loff_t * ppos)
896{
897 struct seq_file *m = file->private_data;
898 struct acpi_thermal *tz = m->private;
899
900 char *limit_string;
901 int num, critical, hot, passive;
902 int *active;
903 int i = 0;
904
905
906 limit_string = kzalloc(ACPI_THERMAL_MAX_LIMIT_STR_LEN, GFP_KERNEL);
907 if (!limit_string)
908 return -ENOMEM;
909
910 active = kmalloc(ACPI_THERMAL_MAX_ACTIVE * sizeof(int), GFP_KERNEL);
911 if (!active) {
912 kfree(limit_string);
913 return -ENOMEM;
914 }
915
916 if (!tz || (count > ACPI_THERMAL_MAX_LIMIT_STR_LEN - 1)) {
917 count = -EINVAL;
918 goto end;
919 }
920
921 if (copy_from_user(limit_string, buffer, count)) {
922 count = -EFAULT;
923 goto end;
924 }
925
926 limit_string[count] = '\0';
927
928 num = sscanf(limit_string, "%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d",
929 &critical, &hot, &passive,
930 &active[0], &active[1], &active[2], &active[3], &active[4],
931 &active[5], &active[6], &active[7], &active[8],
932 &active[9]);
933 if (!(num >= 5 && num < (ACPI_THERMAL_MAX_ACTIVE + 3))) {
934 count = -EINVAL;
935 goto end;
936 }
937
938 tz->trips.critical.temperature = CELSIUS_TO_KELVIN(critical);
939 tz->trips.hot.temperature = CELSIUS_TO_KELVIN(hot);
940 tz->trips.passive.temperature = CELSIUS_TO_KELVIN(passive);
941 for (i = 0; i < num - 3; i++) {
942 if (!(tz->trips.active[i].flags.valid))
943 break;
944 tz->trips.active[i].temperature = CELSIUS_TO_KELVIN(active[i]);
945 }
946
947 end:
948 kfree(active);
949 kfree(limit_string);
950 return count;
951}
952
953static int acpi_thermal_cooling_seq_show(struct seq_file *seq, void *offset) 880static int acpi_thermal_cooling_seq_show(struct seq_file *seq, void *offset)
954{ 881{
955 struct acpi_thermal *tz = seq->private; 882 struct acpi_thermal *tz = seq->private;
@@ -958,15 +885,10 @@ static int acpi_thermal_cooling_seq_show(struct seq_file *seq, void *offset)
958 if (!tz) 885 if (!tz)
959 goto end; 886 goto end;
960 887
961 if (!tz->flags.cooling_mode) { 888 if (!tz->flags.cooling_mode)
962 seq_puts(seq, "<setting not supported>\n"); 889 seq_puts(seq, "<setting not supported>\n");
963 }
964
965 if (tz->cooling_mode == ACPI_THERMAL_MODE_CRITICAL)
966 seq_printf(seq, "cooling mode: critical\n");
967 else 890 else
968 seq_printf(seq, "cooling mode: %s\n", 891 seq_puts(seq, "0 - Active; 1 - Passive\n");
969 tz->cooling_mode ? "passive" : "active");
970 892
971 end: 893 end:
972 return 0; 894 return 0;
@@ -1223,28 +1145,6 @@ static int acpi_thermal_get_info(struct acpi_thermal *tz)
1223 result = acpi_thermal_set_cooling_mode(tz, ACPI_THERMAL_MODE_ACTIVE); 1145 result = acpi_thermal_set_cooling_mode(tz, ACPI_THERMAL_MODE_ACTIVE);
1224 if (!result) 1146 if (!result)
1225 tz->flags.cooling_mode = 1; 1147 tz->flags.cooling_mode = 1;
1226 else {
1227 /* Oh,we have not _SCP method.
1228 Generally show cooling_mode by _ACx, _PSV,spec 12.2 */
1229 tz->flags.cooling_mode = 0;
1230 if (tz->trips.active[0].flags.valid
1231 && tz->trips.passive.flags.valid) {
1232 if (tz->trips.passive.temperature >
1233 tz->trips.active[0].temperature)
1234 tz->cooling_mode = ACPI_THERMAL_MODE_ACTIVE;
1235 else
1236 tz->cooling_mode = ACPI_THERMAL_MODE_PASSIVE;
1237 } else if (!tz->trips.active[0].flags.valid
1238 && tz->trips.passive.flags.valid) {
1239 tz->cooling_mode = ACPI_THERMAL_MODE_PASSIVE;
1240 } else if (tz->trips.active[0].flags.valid
1241 && !tz->trips.passive.flags.valid) {
1242 tz->cooling_mode = ACPI_THERMAL_MODE_ACTIVE;
1243 } else {
1244 /* _ACx and _PSV are optional, but _CRT is required */
1245 tz->cooling_mode = ACPI_THERMAL_MODE_CRITICAL;
1246 }
1247 }
1248 1148
1249 /* Get default polling frequency [_TZP] (optional) */ 1149 /* Get default polling frequency [_TZP] (optional) */
1250 if (tzp) 1150 if (tzp)
diff --git a/drivers/acpi/utilities/utalloc.c b/drivers/acpi/utilities/utalloc.c
index 55a764807499..6e56d5f7c43a 100644
--- a/drivers/acpi/utilities/utalloc.c
+++ b/drivers/acpi/utilities/utalloc.c
@@ -107,7 +107,6 @@ acpi_status acpi_ut_create_caches(void)
107 if (ACPI_FAILURE(status)) { 107 if (ACPI_FAILURE(status)) {
108 return (status); 108 return (status);
109 } 109 }
110
111#ifdef ACPI_DBG_TRACK_ALLOCATIONS 110#ifdef ACPI_DBG_TRACK_ALLOCATIONS
112 111
113 /* Memory allocation lists */ 112 /* Memory allocation lists */
diff --git a/drivers/acpi/utilities/utcache.c b/drivers/acpi/utilities/utcache.c
index 870f6edeb5f2..285a0f531760 100644
--- a/drivers/acpi/utilities/utcache.c
+++ b/drivers/acpi/utilities/utcache.c
@@ -45,7 +45,6 @@
45 45
46#define _COMPONENT ACPI_UTILITIES 46#define _COMPONENT ACPI_UTILITIES
47ACPI_MODULE_NAME("utcache") 47ACPI_MODULE_NAME("utcache")
48
49#ifdef ACPI_USE_LOCAL_CACHE 48#ifdef ACPI_USE_LOCAL_CACHE
50/******************************************************************************* 49/*******************************************************************************
51 * 50 *
@@ -64,7 +63,7 @@ ACPI_MODULE_NAME("utcache")
64acpi_status 63acpi_status
65acpi_os_create_cache(char *cache_name, 64acpi_os_create_cache(char *cache_name,
66 u16 object_size, 65 u16 object_size,
67 u16 max_depth, struct acpi_memory_list **return_cache) 66 u16 max_depth, struct acpi_memory_list ** return_cache)
68{ 67{
69 struct acpi_memory_list *cache; 68 struct acpi_memory_list *cache;
70 69
diff --git a/drivers/acpi/utilities/utcopy.c b/drivers/acpi/utilities/utcopy.c
index 84d529db0a66..4c1e00874dff 100644
--- a/drivers/acpi/utilities/utcopy.c
+++ b/drivers/acpi/utilities/utcopy.c
@@ -814,7 +814,9 @@ acpi_ut_copy_ielement_to_ielement(u8 object_type,
814 /* 814 /*
815 * Create the object array 815 * Create the object array
816 */ 816 */
817 target_object->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size) source_object->package.count + 1) * sizeof(void *)); 817 target_object->package.elements =
818 ACPI_ALLOCATE_ZEROED(((acpi_size) source_object->package.
819 count + 1) * sizeof(void *));
818 if (!target_object->package.elements) { 820 if (!target_object->package.elements) {
819 status = AE_NO_MEMORY; 821 status = AE_NO_MEMORY;
820 goto error_exit; 822 goto error_exit;
diff --git a/drivers/acpi/utilities/utdebug.c b/drivers/acpi/utilities/utdebug.c
index 61ad4f2daee2..c7e128e5369b 100644
--- a/drivers/acpi/utilities/utdebug.c
+++ b/drivers/acpi/utilities/utdebug.c
@@ -45,7 +45,6 @@
45 45
46#define _COMPONENT ACPI_UTILITIES 46#define _COMPONENT ACPI_UTILITIES
47ACPI_MODULE_NAME("utdebug") 47ACPI_MODULE_NAME("utdebug")
48
49#ifdef ACPI_DEBUG_OUTPUT 48#ifdef ACPI_DEBUG_OUTPUT
50static acpi_thread_id acpi_gbl_prev_thread_id; 49static acpi_thread_id acpi_gbl_prev_thread_id;
51static char *acpi_gbl_fn_entry_str = "----Entry"; 50static char *acpi_gbl_fn_entry_str = "----Entry";
@@ -181,7 +180,8 @@ acpi_ut_debug_print(u32 requested_debug_level,
181 if (ACPI_LV_THREADS & acpi_dbg_level) { 180 if (ACPI_LV_THREADS & acpi_dbg_level) {
182 acpi_os_printf 181 acpi_os_printf
183 ("\n**** Context Switch from TID %lX to TID %lX ****\n\n", 182 ("\n**** Context Switch from TID %lX to TID %lX ****\n\n",
184 (unsigned long)acpi_gbl_prev_thread_id, (unsigned long)thread_id); 183 (unsigned long)acpi_gbl_prev_thread_id,
184 (unsigned long)thread_id);
185 } 185 }
186 186
187 acpi_gbl_prev_thread_id = thread_id; 187 acpi_gbl_prev_thread_id = thread_id;
diff --git a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/utilities/utdelete.c
index 673a0caa4073..f777cebdc46d 100644
--- a/drivers/acpi/utilities/utdelete.c
+++ b/drivers/acpi/utilities/utdelete.c
@@ -170,6 +170,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
170 acpi_os_delete_mutex(object->mutex.os_mutex); 170 acpi_os_delete_mutex(object->mutex.os_mutex);
171 acpi_gbl_global_lock_mutex = NULL; 171 acpi_gbl_global_lock_mutex = NULL;
172 } else { 172 } else {
173 acpi_ex_unlink_mutex(object);
173 acpi_os_delete_mutex(object->mutex.os_mutex); 174 acpi_os_delete_mutex(object->mutex.os_mutex);
174 } 175 }
175 break; 176 break;
diff --git a/drivers/acpi/utilities/utglobal.c b/drivers/acpi/utilities/utglobal.c
index af33358a964b..1621655d6e2b 100644
--- a/drivers/acpi/utilities/utglobal.c
+++ b/drivers/acpi/utilities/utglobal.c
@@ -55,12 +55,10 @@ ACPI_EXPORT_SYMBOL(acpi_gbl_FADT)
55 * Static global variable initialization. 55 * Static global variable initialization.
56 * 56 *
57 ******************************************************************************/ 57 ******************************************************************************/
58
59/* 58/*
60 * We want the debug switches statically initialized so they 59 * We want the debug switches statically initialized so they
61 * are already set when the debugger is entered. 60 * are already set when the debugger is entered.
62 */ 61 */
63
64/* Debug switch - level and trace mask */ 62/* Debug switch - level and trace mask */
65u32 acpi_dbg_level = ACPI_DEBUG_DEFAULT; 63u32 acpi_dbg_level = ACPI_DEBUG_DEFAULT;
66 64
@@ -735,5 +733,5 @@ void acpi_ut_init_globals(void)
735} 733}
736 734
737ACPI_EXPORT_SYMBOL(acpi_dbg_level) 735ACPI_EXPORT_SYMBOL(acpi_dbg_level)
738ACPI_EXPORT_SYMBOL(acpi_dbg_layer) 736 ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
739ACPI_EXPORT_SYMBOL(acpi_gpe_count) 737 ACPI_EXPORT_SYMBOL(acpi_gpe_count)
diff --git a/drivers/acpi/utilities/utmisc.c b/drivers/acpi/utilities/utmisc.c
index 50133fffe420..2d19f71e9cfa 100644
--- a/drivers/acpi/utilities/utmisc.c
+++ b/drivers/acpi/utilities/utmisc.c
@@ -802,9 +802,8 @@ acpi_ut_strtoul64(char *string, u32 base, acpi_integer * ret_integer)
802 802
803 valid_digits++; 803 valid_digits++;
804 804
805 if (sign_of0x 805 if (sign_of0x && ((valid_digits > 16)
806 && ((valid_digits > 16) 806 || ((valid_digits > 8) && mode32))) {
807 || ((valid_digits > 8) && mode32))) {
808 /* 807 /*
809 * This is to_integer operation case. 808 * This is to_integer operation case.
810 * No any restrictions for string-to-integer conversion, 809 * No any restrictions for string-to-integer conversion,
@@ -1049,6 +1048,7 @@ acpi_ut_exception(char *module_name,
1049 acpi_os_vprintf(format, args); 1048 acpi_os_vprintf(format, args);
1050 acpi_os_printf(" [%X]\n", ACPI_CA_VERSION); 1049 acpi_os_printf(" [%X]\n", ACPI_CA_VERSION);
1051} 1050}
1051
1052EXPORT_SYMBOL(acpi_ut_exception); 1052EXPORT_SYMBOL(acpi_ut_exception);
1053 1053
1054void ACPI_INTERNAL_VAR_XFACE 1054void ACPI_INTERNAL_VAR_XFACE
diff --git a/drivers/acpi/utilities/utmutex.c b/drivers/acpi/utilities/utmutex.c
index cbad2ef5987d..4820bc86d1f5 100644
--- a/drivers/acpi/utilities/utmutex.c
+++ b/drivers/acpi/utilities/utmutex.c
@@ -244,7 +244,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
244 244
245 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 245 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
246 "Thread %lX attempting to acquire Mutex [%s]\n", 246 "Thread %lX attempting to acquire Mutex [%s]\n",
247 (unsigned long) this_thread_id, 247 (unsigned long)this_thread_id,
248 acpi_ut_get_mutex_name(mutex_id))); 248 acpi_ut_get_mutex_name(mutex_id)));
249 249
250 status = acpi_os_acquire_mutex(acpi_gbl_mutex_info[mutex_id].mutex, 250 status = acpi_os_acquire_mutex(acpi_gbl_mutex_info[mutex_id].mutex,
@@ -252,7 +252,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
252 if (ACPI_SUCCESS(status)) { 252 if (ACPI_SUCCESS(status)) {
253 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 253 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
254 "Thread %lX acquired Mutex [%s]\n", 254 "Thread %lX acquired Mutex [%s]\n",
255 (unsigned long) this_thread_id, 255 (unsigned long)this_thread_id,
256 acpi_ut_get_mutex_name(mutex_id))); 256 acpi_ut_get_mutex_name(mutex_id)));
257 257
258 acpi_gbl_mutex_info[mutex_id].use_count++; 258 acpi_gbl_mutex_info[mutex_id].use_count++;
@@ -260,7 +260,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
260 } else { 260 } else {
261 ACPI_EXCEPTION((AE_INFO, status, 261 ACPI_EXCEPTION((AE_INFO, status,
262 "Thread %lX could not acquire Mutex [%X]", 262 "Thread %lX could not acquire Mutex [%X]",
263 (unsigned long) this_thread_id, mutex_id)); 263 (unsigned long)this_thread_id, mutex_id));
264 } 264 }
265 265
266 return (status); 266 return (status);
@@ -287,7 +287,7 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
287 this_thread_id = acpi_os_get_thread_id(); 287 this_thread_id = acpi_os_get_thread_id();
288 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 288 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
289 "Thread %lX releasing Mutex [%s]\n", 289 "Thread %lX releasing Mutex [%s]\n",
290 (unsigned long) this_thread_id, 290 (unsigned long)this_thread_id,
291 acpi_ut_get_mutex_name(mutex_id))); 291 acpi_ut_get_mutex_name(mutex_id)));
292 292
293 if (mutex_id > ACPI_MAX_MUTEX) { 293 if (mutex_id > ACPI_MAX_MUTEX) {
diff --git a/drivers/acpi/utilities/utresrc.c b/drivers/acpi/utilities/utresrc.c
index e8fe1ba6cc24..cbbd3315a1e2 100644
--- a/drivers/acpi/utilities/utresrc.c
+++ b/drivers/acpi/utilities/utresrc.c
@@ -46,7 +46,6 @@
46 46
47#define _COMPONENT ACPI_UTILITIES 47#define _COMPONENT ACPI_UTILITIES
48ACPI_MODULE_NAME("utresrc") 48ACPI_MODULE_NAME("utresrc")
49
50#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER) 49#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
51/* 50/*
52 * Strings used to decode resource descriptors. 51 * Strings used to decode resource descriptors.
diff --git a/drivers/acpi/utilities/utxface.c b/drivers/acpi/utilities/utxface.c
index de3276f4f468..e9a57806cd34 100644
--- a/drivers/acpi/utilities/utxface.c
+++ b/drivers/acpi/utilities/utxface.c
@@ -337,7 +337,6 @@ acpi_status acpi_terminate(void)
337} 337}
338 338
339ACPI_EXPORT_SYMBOL(acpi_terminate) 339ACPI_EXPORT_SYMBOL(acpi_terminate)
340
341#ifdef ACPI_FUTURE_USAGE 340#ifdef ACPI_FUTURE_USAGE
342/******************************************************************************* 341/*******************************************************************************
343 * 342 *
@@ -470,7 +469,6 @@ acpi_install_initialization_handler(acpi_init_handler handler, u32 function)
470 469
471ACPI_EXPORT_SYMBOL(acpi_install_initialization_handler) 470ACPI_EXPORT_SYMBOL(acpi_install_initialization_handler)
472#endif /* ACPI_FUTURE_USAGE */ 471#endif /* ACPI_FUTURE_USAGE */
473
474/***************************************************************************** 472/*****************************************************************************
475 * 473 *
476 * FUNCTION: acpi_purge_cached_objects 474 * FUNCTION: acpi_purge_cached_objects
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index c7219663f2b9..f031b8732330 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "Serial ATA (prod) and Parallel ATA (experimental) drivers" 5menu "Serial ATA (prod) and Parallel ATA (experimental) drivers"
6 depends on HAS_IOMEM
6 7
7config ATA 8config ATA
8 tristate "ATA device support" 9 tristate "ATA device support"
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 0300e7f54cc4..2e18a63ead36 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -6,6 +6,7 @@
6# 6#
7 7
8menu "Auxiliary Display support" 8menu "Auxiliary Display support"
9 depends on PARPORT
9 10
10config KS0108 11config KS0108
11 tristate "KS0108 LCD Controller" 12 tristate "KS0108 LCD Controller"
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 17ee97f3a99b..b4c8319138b2 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -444,8 +444,6 @@ config CDROM_PKTCDVD_WCACHE
444 this option is dangerous unless the CD-RW media is known good, as we 444 this option is dangerous unless the CD-RW media is known good, as we
445 don't do deferred write error handling yet. 445 don't do deferred write error handling yet.
446 446
447source "drivers/s390/block/Kconfig"
448
449config ATA_OVER_ETH 447config ATA_OVER_ETH
450 tristate "ATA over Ethernet support" 448 tristate "ATA over Ethernet support"
451 depends on NET 449 depends on NET
@@ -453,6 +451,8 @@ config ATA_OVER_ETH
453 This driver provides Support for ATA over Ethernet block 451 This driver provides Support for ATA over Ethernet block
454 devices like the Coraid EtherDrive (R) Storage Blade. 452 devices like the Coraid EtherDrive (R) Storage Blade.
455 453
454source "drivers/s390/block/Kconfig"
455
456endmenu 456endmenu
457 457
458endif 458endif
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 2df42fdcdc91..abcafac64738 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -6,6 +6,7 @@ menu "Character devices"
6 6
7config VT 7config VT
8 bool "Virtual terminal" if EMBEDDED 8 bool "Virtual terminal" if EMBEDDED
9 depends on !S390
9 select INPUT 10 select INPUT
10 default y if !VIOCONS 11 default y if !VIOCONS
11 ---help--- 12 ---help---
@@ -81,6 +82,7 @@ config VT_HW_CONSOLE_BINDING
81 82
82config SERIAL_NONSTANDARD 83config SERIAL_NONSTANDARD
83 bool "Non-standard serial port support" 84 bool "Non-standard serial port support"
85 depends on HAS_IOMEM
84 ---help--- 86 ---help---
85 Say Y here if you have any non-standard serial boards -- boards 87 Say Y here if you have any non-standard serial boards -- boards
86 which aren't supported using the standard "dumb" serial driver. 88 which aren't supported using the standard "dumb" serial driver.
@@ -765,7 +767,7 @@ config NVRAM
765 767
766config RTC 768config RTC
767 tristate "Enhanced Real Time Clock Support" 769 tristate "Enhanced Real Time Clock Support"
768 depends on !PPC && !PARISC && !IA64 && !M68K && (!SPARC || PCI) && !FRV && !ARM && !SUPERH 770 depends on !PPC && !PARISC && !IA64 && !M68K && (!SPARC || PCI) && !FRV && !ARM && !SUPERH && !S390
769 ---help--- 771 ---help---
770 If you say Y here and create a character special file /dev/rtc with 772 If you say Y here and create a character special file /dev/rtc with
771 major number 10 and minor number 135 using mknod ("man mknod"), you 773 major number 10 and minor number 135 using mknod ("man mknod"), you
@@ -813,7 +815,7 @@ config SGI_IP27_RTC
813 815
814config GEN_RTC 816config GEN_RTC
815 tristate "Generic /dev/rtc emulation" 817 tristate "Generic /dev/rtc emulation"
816 depends on RTC!=y && !IA64 && !ARM && !M32R && !SPARC && !FRV 818 depends on RTC!=y && !IA64 && !ARM && !M32R && !SPARC && !FRV && !S390
817 ---help--- 819 ---help---
818 If you say Y here and create a character special file /dev/rtc with 820 If you say Y here and create a character special file /dev/rtc with
819 major number 10 and minor number 135 using mknod ("man mknod"), you 821 major number 10 and minor number 135 using mknod ("man mknod"), you
@@ -858,6 +860,7 @@ config COBALT_LCD
858 860
859config DTLK 861config DTLK
860 tristate "Double Talk PC internal speech card support" 862 tristate "Double Talk PC internal speech card support"
863 depends on ISA
861 help 864 help
862 This driver is for the DoubleTalk PC, a speech synthesizer 865 This driver is for the DoubleTalk PC, a speech synthesizer
863 manufactured by RC Systems (<http://www.rcsys.com/>). It is also 866 manufactured by RC Systems (<http://www.rcsys.com/>). It is also
@@ -1043,7 +1046,7 @@ config HPET_MMAP
1043 1046
1044config HANGCHECK_TIMER 1047config HANGCHECK_TIMER
1045 tristate "Hangcheck timer" 1048 tristate "Hangcheck timer"
1046 depends on X86 || IA64 || PPC64 1049 depends on X86 || IA64 || PPC64 || S390
1047 help 1050 help
1048 The hangcheck-timer module detects when the system has gone 1051 The hangcheck-timer module detects when the system has gone
1049 out to lunch past a certain margin. It can reboot the system 1052 out to lunch past a certain margin. It can reboot the system
@@ -1078,5 +1081,7 @@ config DEVPORT
1078 depends on ISA || PCI 1081 depends on ISA || PCI
1079 default y 1082 default y
1080 1083
1084source "drivers/s390/char/Kconfig"
1085
1081endmenu 1086endmenu
1082 1087
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index a6dcb2918157..b894f67fdf14 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -3,6 +3,8 @@
3# 3#
4 4
5menu "IPMI" 5menu "IPMI"
6 depends on HAS_IOMEM
7
6config IPMI_HANDLER 8config IPMI_HANDLER
7 tristate 'IPMI top-level message handler' 9 tristate 'IPMI top-level message handler'
8 help 10 help
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 11089be0691b..dc4e1ff7f56f 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "TPM devices" 5menu "TPM devices"
6 depends on HAS_IOMEM
6 7
7config TCG_TPM 8config TCG_TPM
8 tristate "TPM Hardware Support" 9 tristate "TPM Hardware Support"
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index f4c634504d1a..e678a33ea672 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -56,4 +56,26 @@ config CRYPTO_DEV_GEODE
56 To compile this driver as a module, choose M here: the module 56 To compile this driver as a module, choose M here: the module
57 will be called geode-aes. 57 will be called geode-aes.
58 58
59config ZCRYPT
60 tristate "Support for PCI-attached cryptographic adapters"
61 depends on S390
62 select ZCRYPT_MONOLITHIC if ZCRYPT="y"
63 default "m"
64 help
65 Select this option if you want to use a PCI-attached cryptographic
66 adapter like:
67 + PCI Cryptographic Accelerator (PCICA)
68 + PCI Cryptographic Coprocessor (PCICC)
69 + PCI-X Cryptographic Coprocessor (PCIXCC)
70 + Crypto Express2 Coprocessor (CEX2C)
71 + Crypto Express2 Accelerator (CEX2A)
72
73config ZCRYPT_MONOLITHIC
74 bool "Monolithic zcrypt module"
75 depends on ZCRYPT="m"
76 help
77 Select this option if you want to have a single module z90crypt.ko
78 that contains all parts of the crypto device driver (ap bus,
79 request router and all the card drivers).
80
59endmenu 81endmenu
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 30d021d1a07c..72be6c63edfc 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "DMA Engine support" 5menu "DMA Engine support"
6 depends on !S390
6 7
7config DMA_ENGINE 8config DMA_ENGINE
8 bool "Support for DMA engines" 9 bool "Support for DMA engines"
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 4f0898400c6d..807c402df049 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -7,6 +7,7 @@
7# 7#
8 8
9menu 'EDAC - error detection and reporting (RAS) (EXPERIMENTAL)' 9menu 'EDAC - error detection and reporting (RAS) (EXPERIMENTAL)'
10 depends on HAS_IOMEM
10 11
11config EDAC 12config EDAC
12 tristate "EDAC core system error reporting (EXPERIMENTAL)" 13 tristate "EDAC core system error reporting (EXPERIMENTAL)"
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
new file mode 100644
index 000000000000..5932c72f9e42
--- /dev/null
+++ b/drivers/firewire/Kconfig
@@ -0,0 +1,61 @@
1# -*- shell-script -*-
2
3comment "An alternative FireWire stack is available with EXPERIMENTAL=y"
4 depends on EXPERIMENTAL=n
5
6config FIREWIRE
7 tristate "IEEE 1394 (FireWire) support (JUJU alternative stack, experimental)"
8 depends on EXPERIMENTAL
9 select CRC_ITU_T
10 help
11 IEEE 1394 describes a high performance serial bus, which is also
12 known as FireWire(tm) or i.Link(tm) and is used for connecting all
13 sorts of devices (most notably digital video cameras) to your
14 computer.
15
16 If you have FireWire hardware and want to use it, say Y here. This
17 is the core support only, you will also need to select a driver for
18 your IEEE 1394 adapter.
19
20 To compile this driver as a module, say M here: the module will be
21 called fw-core.
22
23 This is the "JUJU" FireWire stack, an alternative implementation
24 designed for robustness and simplicity. You can build either this
25 stack, or the classic stack (the ieee1394 driver, ohci1394 etc.)
26 or both.
27
28config FIREWIRE_OHCI
29 tristate "Support for OHCI FireWire host controllers"
30 depends on PCI && FIREWIRE
31 help
32 Enable this driver if you have a FireWire controller based
33 on the OHCI specification. For all practical purposes, this
34 is the only chipset in use, so say Y here.
35
36 To compile this driver as a module, say M here: The module will be
37 called fw-ohci.
38
39 If you also build ohci1394 of the classic IEEE 1394 driver stack,
40 blacklist either ohci1394 or fw-ohci to let hotplug load the desired
41 driver.
42
43config FIREWIRE_SBP2
44 tristate "Support for storage devices (SBP-2 protocol driver)"
45 depends on FIREWIRE && SCSI
46 help
47 This option enables you to use SBP-2 devices connected to a
48 FireWire bus. SBP-2 devices include storage devices like
49 harddisks and DVD drives, also some other FireWire devices
50 like scanners.
51
52 To compile this driver as a module, say M here: The module will be
53 called fw-sbp2.
54
55 You should also enable support for disks, CD-ROMs, etc. in the SCSI
56 configuration section.
57
58 If you also build sbp2 of the classic IEEE 1394 driver stack,
59 blacklist either sbp2 or fw-sbp2 to let hotplug load the desired
60 driver.
61
diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile
new file mode 100644
index 000000000000..fc7d59d4bce0
--- /dev/null
+++ b/drivers/firewire/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the Linux IEEE 1394 implementation
3#
4
5fw-core-y += fw-card.o fw-topology.o fw-transaction.o fw-iso.o \
6 fw-device.o fw-cdev.o
7
8obj-$(CONFIG_FIREWIRE) += fw-core.o
9obj-$(CONFIG_FIREWIRE_OHCI) += fw-ohci.o
10obj-$(CONFIG_FIREWIRE_SBP2) += fw-sbp2.o
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c
new file mode 100644
index 000000000000..636151a64add
--- /dev/null
+++ b/drivers/firewire/fw-card.c
@@ -0,0 +1,560 @@
1/*
2 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18
19#include <linux/module.h>
20#include <linux/errno.h>
21#include <linux/device.h>
22#include <linux/mutex.h>
23#include <linux/crc-itu-t.h>
24#include "fw-transaction.h"
25#include "fw-topology.h"
26#include "fw-device.h"
27
28int fw_compute_block_crc(u32 *block)
29{
30 __be32 be32_block[256];
31 int i, length;
32
33 length = (*block >> 16) & 0xff;
34 for (i = 0; i < length; i++)
35 be32_block[i] = cpu_to_be32(block[i + 1]);
36 *block |= crc_itu_t(0, (u8 *) be32_block, length * 4);
37
38 return length;
39}
40
41static DEFINE_MUTEX(card_mutex);
42static LIST_HEAD(card_list);
43
44static LIST_HEAD(descriptor_list);
45static int descriptor_count;
46
47#define BIB_CRC(v) ((v) << 0)
48#define BIB_CRC_LENGTH(v) ((v) << 16)
49#define BIB_INFO_LENGTH(v) ((v) << 24)
50
51#define BIB_LINK_SPEED(v) ((v) << 0)
52#define BIB_GENERATION(v) ((v) << 4)
53#define BIB_MAX_ROM(v) ((v) << 8)
54#define BIB_MAX_RECEIVE(v) ((v) << 12)
55#define BIB_CYC_CLK_ACC(v) ((v) << 16)
56#define BIB_PMC ((1) << 27)
57#define BIB_BMC ((1) << 28)
58#define BIB_ISC ((1) << 29)
59#define BIB_CMC ((1) << 30)
60#define BIB_IMC ((1) << 31)
61
62static u32 *
63generate_config_rom(struct fw_card *card, size_t *config_rom_length)
64{
65 struct fw_descriptor *desc;
66 static u32 config_rom[256];
67 int i, j, length;
68
69 /*
70 * Initialize contents of config rom buffer. On the OHCI
71 * controller, block reads to the config rom accesses the host
72 * memory, but quadlet read access the hardware bus info block
73 * registers. That's just crack, but it means we should make
74 * sure the contents of bus info block in host memory mathces
75 * the version stored in the OHCI registers.
76 */
77
78 memset(config_rom, 0, sizeof(config_rom));
79 config_rom[0] = BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0);
80 config_rom[1] = 0x31333934;
81
82 config_rom[2] =
83 BIB_LINK_SPEED(card->link_speed) |
84 BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
85 BIB_MAX_ROM(2) |
86 BIB_MAX_RECEIVE(card->max_receive) |
87 BIB_BMC | BIB_ISC | BIB_CMC | BIB_IMC;
88 config_rom[3] = card->guid >> 32;
89 config_rom[4] = card->guid;
90
91 /* Generate root directory. */
92 i = 5;
93 config_rom[i++] = 0;
94 config_rom[i++] = 0x0c0083c0; /* node capabilities */
95 j = i + descriptor_count;
96
97 /* Generate root directory entries for descriptors. */
98 list_for_each_entry (desc, &descriptor_list, link) {
99 if (desc->immediate > 0)
100 config_rom[i++] = desc->immediate;
101 config_rom[i] = desc->key | (j - i);
102 i++;
103 j += desc->length;
104 }
105
106 /* Update root directory length. */
107 config_rom[5] = (i - 5 - 1) << 16;
108
109 /* End of root directory, now copy in descriptors. */
110 list_for_each_entry (desc, &descriptor_list, link) {
111 memcpy(&config_rom[i], desc->data, desc->length * 4);
112 i += desc->length;
113 }
114
115 /* Calculate CRCs for all blocks in the config rom. This
116 * assumes that CRC length and info length are identical for
117 * the bus info block, which is always the case for this
118 * implementation. */
119 for (i = 0; i < j; i += length + 1)
120 length = fw_compute_block_crc(config_rom + i);
121
122 *config_rom_length = j;
123
124 return config_rom;
125}
126
127static void
128update_config_roms(void)
129{
130 struct fw_card *card;
131 u32 *config_rom;
132 size_t length;
133
134 list_for_each_entry (card, &card_list, link) {
135 config_rom = generate_config_rom(card, &length);
136 card->driver->set_config_rom(card, config_rom, length);
137 }
138}
139
140int
141fw_core_add_descriptor(struct fw_descriptor *desc)
142{
143 size_t i;
144
145 /*
146 * Check descriptor is valid; the length of all blocks in the
147 * descriptor has to add up to exactly the length of the
148 * block.
149 */
150 i = 0;
151 while (i < desc->length)
152 i += (desc->data[i] >> 16) + 1;
153
154 if (i != desc->length)
155 return -EINVAL;
156
157 mutex_lock(&card_mutex);
158
159 list_add_tail(&desc->link, &descriptor_list);
160 descriptor_count++;
161 if (desc->immediate > 0)
162 descriptor_count++;
163 update_config_roms();
164
165 mutex_unlock(&card_mutex);
166
167 return 0;
168}
169EXPORT_SYMBOL(fw_core_add_descriptor);
170
171void
172fw_core_remove_descriptor(struct fw_descriptor *desc)
173{
174 mutex_lock(&card_mutex);
175
176 list_del(&desc->link);
177 descriptor_count--;
178 if (desc->immediate > 0)
179 descriptor_count--;
180 update_config_roms();
181
182 mutex_unlock(&card_mutex);
183}
184EXPORT_SYMBOL(fw_core_remove_descriptor);
185
186static const char gap_count_table[] = {
187 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
188};
189
190struct bm_data {
191 struct fw_transaction t;
192 struct {
193 __be32 arg;
194 __be32 data;
195 } lock;
196 u32 old;
197 int rcode;
198 struct completion done;
199};
200
201static void
202complete_bm_lock(struct fw_card *card, int rcode,
203 void *payload, size_t length, void *data)
204{
205 struct bm_data *bmd = data;
206
207 if (rcode == RCODE_COMPLETE)
208 bmd->old = be32_to_cpu(*(__be32 *) payload);
209 bmd->rcode = rcode;
210 complete(&bmd->done);
211}
212
213static void
214fw_card_bm_work(struct work_struct *work)
215{
216 struct fw_card *card = container_of(work, struct fw_card, work.work);
217 struct fw_device *root;
218 struct bm_data bmd;
219 unsigned long flags;
220 int root_id, new_root_id, irm_id, gap_count, generation, grace;
221 int do_reset = 0;
222
223 spin_lock_irqsave(&card->lock, flags);
224
225 generation = card->generation;
226 root = card->root_node->data;
227 root_id = card->root_node->node_id;
228 grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10));
229
230 if (card->bm_generation + 1 == generation ||
231 (card->bm_generation != generation && grace)) {
232 /*
233 * This first step is to figure out who is IRM and
234 * then try to become bus manager. If the IRM is not
235 * well defined (e.g. does not have an active link
236 * layer or does not responds to our lock request, we
237 * will have to do a little vigilante bus management.
238 * In that case, we do a goto into the gap count logic
239 * so that when we do the reset, we still optimize the
240 * gap count. That could well save a reset in the
241 * next generation.
242 */
243
244 irm_id = card->irm_node->node_id;
245 if (!card->irm_node->link_on) {
246 new_root_id = card->local_node->node_id;
247 fw_notify("IRM has link off, making local node (%02x) root.\n",
248 new_root_id);
249 goto pick_me;
250 }
251
252 bmd.lock.arg = cpu_to_be32(0x3f);
253 bmd.lock.data = cpu_to_be32(card->local_node->node_id);
254
255 spin_unlock_irqrestore(&card->lock, flags);
256
257 init_completion(&bmd.done);
258 fw_send_request(card, &bmd.t, TCODE_LOCK_COMPARE_SWAP,
259 irm_id, generation,
260 SCODE_100, CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
261 &bmd.lock, sizeof(bmd.lock),
262 complete_bm_lock, &bmd);
263 wait_for_completion(&bmd.done);
264
265 if (bmd.rcode == RCODE_GENERATION) {
266 /*
267 * Another bus reset happened. Just return,
268 * the BM work has been rescheduled.
269 */
270 return;
271 }
272
273 if (bmd.rcode == RCODE_COMPLETE && bmd.old != 0x3f)
274 /* Somebody else is BM, let them do the work. */
275 return;
276
277 spin_lock_irqsave(&card->lock, flags);
278 if (bmd.rcode != RCODE_COMPLETE) {
279 /*
280 * The lock request failed, maybe the IRM
281 * isn't really IRM capable after all. Let's
282 * do a bus reset and pick the local node as
283 * root, and thus, IRM.
284 */
285 new_root_id = card->local_node->node_id;
286 fw_notify("BM lock failed, making local node (%02x) root.\n",
287 new_root_id);
288 goto pick_me;
289 }
290 } else if (card->bm_generation != generation) {
291 /*
292 * OK, we weren't BM in the last generation, and it's
293 * less than 100ms since last bus reset. Reschedule
294 * this task 100ms from now.
295 */
296 spin_unlock_irqrestore(&card->lock, flags);
297 schedule_delayed_work(&card->work, DIV_ROUND_UP(HZ, 10));
298 return;
299 }
300
301 /*
302 * We're bus manager for this generation, so next step is to
303 * make sure we have an active cycle master and do gap count
304 * optimization.
305 */
306 card->bm_generation = generation;
307
308 if (root == NULL) {
309 /*
310 * Either link_on is false, or we failed to read the
311 * config rom. In either case, pick another root.
312 */
313 new_root_id = card->local_node->node_id;
314 } else if (atomic_read(&root->state) != FW_DEVICE_RUNNING) {
315 /*
316 * If we haven't probed this device yet, bail out now
317 * and let's try again once that's done.
318 */
319 spin_unlock_irqrestore(&card->lock, flags);
320 return;
321 } else if (root->config_rom[2] & BIB_CMC) {
322 /*
323 * FIXME: I suppose we should set the cmstr bit in the
324 * STATE_CLEAR register of this node, as described in
325 * 1394-1995, 8.4.2.6. Also, send out a force root
326 * packet for this node.
327 */
328 new_root_id = root_id;
329 } else {
330 /*
331 * Current root has an active link layer and we
332 * successfully read the config rom, but it's not
333 * cycle master capable.
334 */
335 new_root_id = card->local_node->node_id;
336 }
337
338 pick_me:
339 /* Now figure out what gap count to set. */
340 if (card->topology_type == FW_TOPOLOGY_A &&
341 card->root_node->max_hops < ARRAY_SIZE(gap_count_table))
342 gap_count = gap_count_table[card->root_node->max_hops];
343 else
344 gap_count = 63;
345
346 /*
347 * Finally, figure out if we should do a reset or not. If we've
348 * done less that 5 resets with the same physical topology and we
349 * have either a new root or a new gap count setting, let's do it.
350 */
351
352 if (card->bm_retries++ < 5 &&
353 (card->gap_count != gap_count || new_root_id != root_id))
354 do_reset = 1;
355
356 spin_unlock_irqrestore(&card->lock, flags);
357
358 if (do_reset) {
359 fw_notify("phy config: card %d, new root=%x, gap_count=%d\n",
360 card->index, new_root_id, gap_count);
361 fw_send_phy_config(card, new_root_id, generation, gap_count);
362 fw_core_initiate_bus_reset(card, 1);
363 }
364}
365
366static void
367flush_timer_callback(unsigned long data)
368{
369 struct fw_card *card = (struct fw_card *)data;
370
371 fw_flush_transactions(card);
372}
373
374void
375fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
376 struct device *device)
377{
378 static atomic_t index = ATOMIC_INIT(-1);
379
380 kref_init(&card->kref);
381 card->index = atomic_inc_return(&index);
382 card->driver = driver;
383 card->device = device;
384 card->current_tlabel = 0;
385 card->tlabel_mask = 0;
386 card->color = 0;
387
388 INIT_LIST_HEAD(&card->transaction_list);
389 spin_lock_init(&card->lock);
390 setup_timer(&card->flush_timer,
391 flush_timer_callback, (unsigned long)card);
392
393 card->local_node = NULL;
394
395 INIT_DELAYED_WORK(&card->work, fw_card_bm_work);
396}
397EXPORT_SYMBOL(fw_card_initialize);
398
399int
400fw_card_add(struct fw_card *card,
401 u32 max_receive, u32 link_speed, u64 guid)
402{
403 u32 *config_rom;
404 size_t length;
405
406 card->max_receive = max_receive;
407 card->link_speed = link_speed;
408 card->guid = guid;
409
410 /* Activate link_on bit and contender bit in our self ID packets.*/
411 if (card->driver->update_phy_reg(card, 4, 0,
412 PHY_LINK_ACTIVE | PHY_CONTENDER) < 0)
413 return -EIO;
414
415 /*
416 * The subsystem grabs a reference when the card is added and
417 * drops it when the driver calls fw_core_remove_card.
418 */
419 fw_card_get(card);
420
421 mutex_lock(&card_mutex);
422 config_rom = generate_config_rom(card, &length);
423 list_add_tail(&card->link, &card_list);
424 mutex_unlock(&card_mutex);
425
426 return card->driver->enable(card, config_rom, length);
427}
428EXPORT_SYMBOL(fw_card_add);
429
430
431/*
432 * The next few functions implements a dummy driver that use once a
433 * card driver shuts down an fw_card. This allows the driver to
434 * cleanly unload, as all IO to the card will be handled by the dummy
435 * driver instead of calling into the (possibly) unloaded module. The
436 * dummy driver just fails all IO.
437 */
438
439static int
440dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
441{
442 BUG();
443 return -1;
444}
445
446static int
447dummy_update_phy_reg(struct fw_card *card, int address,
448 int clear_bits, int set_bits)
449{
450 return -ENODEV;
451}
452
453static int
454dummy_set_config_rom(struct fw_card *card,
455 u32 *config_rom, size_t length)
456{
457 /*
458 * We take the card out of card_list before setting the dummy
459 * driver, so this should never get called.
460 */
461 BUG();
462 return -1;
463}
464
465static void
466dummy_send_request(struct fw_card *card, struct fw_packet *packet)
467{
468 packet->callback(packet, card, -ENODEV);
469}
470
471static void
472dummy_send_response(struct fw_card *card, struct fw_packet *packet)
473{
474 packet->callback(packet, card, -ENODEV);
475}
476
477static int
478dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
479{
480 return -ENOENT;
481}
482
483static int
484dummy_enable_phys_dma(struct fw_card *card,
485 int node_id, int generation)
486{
487 return -ENODEV;
488}
489
490static struct fw_card_driver dummy_driver = {
491 .name = "dummy",
492 .enable = dummy_enable,
493 .update_phy_reg = dummy_update_phy_reg,
494 .set_config_rom = dummy_set_config_rom,
495 .send_request = dummy_send_request,
496 .cancel_packet = dummy_cancel_packet,
497 .send_response = dummy_send_response,
498 .enable_phys_dma = dummy_enable_phys_dma,
499};
500
501void
502fw_core_remove_card(struct fw_card *card)
503{
504 card->driver->update_phy_reg(card, 4,
505 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
506 fw_core_initiate_bus_reset(card, 1);
507
508 mutex_lock(&card_mutex);
509 list_del(&card->link);
510 mutex_unlock(&card_mutex);
511
512 /* Set up the dummy driver. */
513 card->driver = &dummy_driver;
514
515 fw_flush_transactions(card);
516
517 fw_destroy_nodes(card);
518
519 fw_card_put(card);
520}
521EXPORT_SYMBOL(fw_core_remove_card);
522
523struct fw_card *
524fw_card_get(struct fw_card *card)
525{
526 kref_get(&card->kref);
527
528 return card;
529}
530EXPORT_SYMBOL(fw_card_get);
531
532static void
533release_card(struct kref *kref)
534{
535 struct fw_card *card = container_of(kref, struct fw_card, kref);
536
537 kfree(card);
538}
539
540/*
541 * An assumption for fw_card_put() is that the card driver allocates
542 * the fw_card struct with kalloc and that it has been shut down
543 * before the last ref is dropped.
544 */
545void
546fw_card_put(struct fw_card *card)
547{
548 kref_put(&card->kref, release_card);
549}
550EXPORT_SYMBOL(fw_card_put);
551
552int
553fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
554{
555 int reg = short_reset ? 5 : 1;
556 int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
557
558 return card->driver->update_phy_reg(card, reg, 0, bit);
559}
560EXPORT_SYMBOL(fw_core_initiate_bus_reset);
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c
new file mode 100644
index 000000000000..0fa5bd54c6a1
--- /dev/null
+++ b/drivers/firewire/fw-cdev.c
@@ -0,0 +1,961 @@
1/*
2 * Char device for device raw access
3 *
4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/wait.h>
24#include <linux/errno.h>
25#include <linux/device.h>
26#include <linux/vmalloc.h>
27#include <linux/poll.h>
28#include <linux/delay.h>
29#include <linux/mm.h>
30#include <linux/idr.h>
31#include <linux/compat.h>
32#include <linux/firewire-cdev.h>
33#include <asm/uaccess.h>
34#include "fw-transaction.h"
35#include "fw-topology.h"
36#include "fw-device.h"
37
38struct client;
39struct client_resource {
40 struct list_head link;
41 void (*release)(struct client *client, struct client_resource *r);
42 u32 handle;
43};
44
45/*
46 * dequeue_event() just kfree()'s the event, so the event has to be
47 * the first field in the struct.
48 */
49
50struct event {
51 struct { void *data; size_t size; } v[2];
52 struct list_head link;
53};
54
55struct bus_reset {
56 struct event event;
57 struct fw_cdev_event_bus_reset reset;
58};
59
60struct response {
61 struct event event;
62 struct fw_transaction transaction;
63 struct client *client;
64 struct client_resource resource;
65 struct fw_cdev_event_response response;
66};
67
68struct iso_interrupt {
69 struct event event;
70 struct fw_cdev_event_iso_interrupt interrupt;
71};
72
73struct client {
74 u32 version;
75 struct fw_device *device;
76 spinlock_t lock;
77 u32 resource_handle;
78 struct list_head resource_list;
79 struct list_head event_list;
80 wait_queue_head_t wait;
81 u64 bus_reset_closure;
82
83 struct fw_iso_context *iso_context;
84 u64 iso_closure;
85 struct fw_iso_buffer buffer;
86 unsigned long vm_start;
87
88 struct list_head link;
89};
90
91static inline void __user *
92u64_to_uptr(__u64 value)
93{
94 return (void __user *)(unsigned long)value;
95}
96
97static inline __u64
98uptr_to_u64(void __user *ptr)
99{
100 return (__u64)(unsigned long)ptr;
101}
102
103static int fw_device_op_open(struct inode *inode, struct file *file)
104{
105 struct fw_device *device;
106 struct client *client;
107 unsigned long flags;
108
109 device = fw_device_from_devt(inode->i_rdev);
110 if (device == NULL)
111 return -ENODEV;
112
113 client = kzalloc(sizeof(*client), GFP_KERNEL);
114 if (client == NULL)
115 return -ENOMEM;
116
117 client->device = fw_device_get(device);
118 INIT_LIST_HEAD(&client->event_list);
119 INIT_LIST_HEAD(&client->resource_list);
120 spin_lock_init(&client->lock);
121 init_waitqueue_head(&client->wait);
122
123 file->private_data = client;
124
125 spin_lock_irqsave(&device->card->lock, flags);
126 list_add_tail(&client->link, &device->client_list);
127 spin_unlock_irqrestore(&device->card->lock, flags);
128
129 return 0;
130}
131
132static void queue_event(struct client *client, struct event *event,
133 void *data0, size_t size0, void *data1, size_t size1)
134{
135 unsigned long flags;
136
137 event->v[0].data = data0;
138 event->v[0].size = size0;
139 event->v[1].data = data1;
140 event->v[1].size = size1;
141
142 spin_lock_irqsave(&client->lock, flags);
143
144 list_add_tail(&event->link, &client->event_list);
145 wake_up_interruptible(&client->wait);
146
147 spin_unlock_irqrestore(&client->lock, flags);
148}
149
150static int
151dequeue_event(struct client *client, char __user *buffer, size_t count)
152{
153 unsigned long flags;
154 struct event *event;
155 size_t size, total;
156 int i, retval;
157
158 retval = wait_event_interruptible(client->wait,
159 !list_empty(&client->event_list) ||
160 fw_device_is_shutdown(client->device));
161 if (retval < 0)
162 return retval;
163
164 if (list_empty(&client->event_list) &&
165 fw_device_is_shutdown(client->device))
166 return -ENODEV;
167
168 spin_lock_irqsave(&client->lock, flags);
169 event = container_of(client->event_list.next, struct event, link);
170 list_del(&event->link);
171 spin_unlock_irqrestore(&client->lock, flags);
172
173 total = 0;
174 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
175 size = min(event->v[i].size, count - total);
176 if (copy_to_user(buffer + total, event->v[i].data, size)) {
177 retval = -EFAULT;
178 goto out;
179 }
180 total += size;
181 }
182 retval = total;
183
184 out:
185 kfree(event);
186
187 return retval;
188}
189
190static ssize_t
191fw_device_op_read(struct file *file,
192 char __user *buffer, size_t count, loff_t *offset)
193{
194 struct client *client = file->private_data;
195
196 return dequeue_event(client, buffer, count);
197}
198
199static void
200fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
201 struct client *client)
202{
203 struct fw_card *card = client->device->card;
204
205 event->closure = client->bus_reset_closure;
206 event->type = FW_CDEV_EVENT_BUS_RESET;
207 event->node_id = client->device->node_id;
208 event->local_node_id = card->local_node->node_id;
209 event->bm_node_id = 0; /* FIXME: We don't track the BM. */
210 event->irm_node_id = card->irm_node->node_id;
211 event->root_node_id = card->root_node->node_id;
212 event->generation = card->generation;
213}
214
215static void
216for_each_client(struct fw_device *device,
217 void (*callback)(struct client *client))
218{
219 struct fw_card *card = device->card;
220 struct client *c;
221 unsigned long flags;
222
223 spin_lock_irqsave(&card->lock, flags);
224
225 list_for_each_entry(c, &device->client_list, link)
226 callback(c);
227
228 spin_unlock_irqrestore(&card->lock, flags);
229}
230
231static void
232queue_bus_reset_event(struct client *client)
233{
234 struct bus_reset *bus_reset;
235
236 bus_reset = kzalloc(sizeof(*bus_reset), GFP_ATOMIC);
237 if (bus_reset == NULL) {
238 fw_notify("Out of memory when allocating bus reset event\n");
239 return;
240 }
241
242 fill_bus_reset_event(&bus_reset->reset, client);
243
244 queue_event(client, &bus_reset->event,
245 &bus_reset->reset, sizeof(bus_reset->reset), NULL, 0);
246}
247
248void fw_device_cdev_update(struct fw_device *device)
249{
250 for_each_client(device, queue_bus_reset_event);
251}
252
253static void wake_up_client(struct client *client)
254{
255 wake_up_interruptible(&client->wait);
256}
257
258void fw_device_cdev_remove(struct fw_device *device)
259{
260 for_each_client(device, wake_up_client);
261}
262
263static int ioctl_get_info(struct client *client, void *buffer)
264{
265 struct fw_cdev_get_info *get_info = buffer;
266 struct fw_cdev_event_bus_reset bus_reset;
267
268 client->version = get_info->version;
269 get_info->version = FW_CDEV_VERSION;
270
271 if (get_info->rom != 0) {
272 void __user *uptr = u64_to_uptr(get_info->rom);
273 size_t want = get_info->rom_length;
274 size_t have = client->device->config_rom_length * 4;
275
276 if (copy_to_user(uptr, client->device->config_rom,
277 min(want, have)))
278 return -EFAULT;
279 }
280 get_info->rom_length = client->device->config_rom_length * 4;
281
282 client->bus_reset_closure = get_info->bus_reset_closure;
283 if (get_info->bus_reset != 0) {
284 void __user *uptr = u64_to_uptr(get_info->bus_reset);
285
286 fill_bus_reset_event(&bus_reset, client);
287 if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
288 return -EFAULT;
289 }
290
291 get_info->card = client->device->card->index;
292
293 return 0;
294}
295
296static void
297add_client_resource(struct client *client, struct client_resource *resource)
298{
299 unsigned long flags;
300
301 spin_lock_irqsave(&client->lock, flags);
302 list_add_tail(&resource->link, &client->resource_list);
303 resource->handle = client->resource_handle++;
304 spin_unlock_irqrestore(&client->lock, flags);
305}
306
307static int
308release_client_resource(struct client *client, u32 handle,
309 struct client_resource **resource)
310{
311 struct client_resource *r;
312 unsigned long flags;
313
314 spin_lock_irqsave(&client->lock, flags);
315 list_for_each_entry(r, &client->resource_list, link) {
316 if (r->handle == handle) {
317 list_del(&r->link);
318 break;
319 }
320 }
321 spin_unlock_irqrestore(&client->lock, flags);
322
323 if (&r->link == &client->resource_list)
324 return -EINVAL;
325
326 if (resource)
327 *resource = r;
328 else
329 r->release(client, r);
330
331 return 0;
332}
333
334static void
335release_transaction(struct client *client, struct client_resource *resource)
336{
337 struct response *response =
338 container_of(resource, struct response, resource);
339
340 fw_cancel_transaction(client->device->card, &response->transaction);
341}
342
343static void
344complete_transaction(struct fw_card *card, int rcode,
345 void *payload, size_t length, void *data)
346{
347 struct response *response = data;
348 struct client *client = response->client;
349 unsigned long flags;
350
351 if (length < response->response.length)
352 response->response.length = length;
353 if (rcode == RCODE_COMPLETE)
354 memcpy(response->response.data, payload,
355 response->response.length);
356
357 spin_lock_irqsave(&client->lock, flags);
358 list_del(&response->resource.link);
359 spin_unlock_irqrestore(&client->lock, flags);
360
361 response->response.type = FW_CDEV_EVENT_RESPONSE;
362 response->response.rcode = rcode;
363 queue_event(client, &response->event,
364 &response->response, sizeof(response->response),
365 response->response.data, response->response.length);
366}
367
368static ssize_t ioctl_send_request(struct client *client, void *buffer)
369{
370 struct fw_device *device = client->device;
371 struct fw_cdev_send_request *request = buffer;
372 struct response *response;
373
374 /* What is the biggest size we'll accept, really? */
375 if (request->length > 4096)
376 return -EINVAL;
377
378 response = kmalloc(sizeof(*response) + request->length, GFP_KERNEL);
379 if (response == NULL)
380 return -ENOMEM;
381
382 response->client = client;
383 response->response.length = request->length;
384 response->response.closure = request->closure;
385
386 if (request->data &&
387 copy_from_user(response->response.data,
388 u64_to_uptr(request->data), request->length)) {
389 kfree(response);
390 return -EFAULT;
391 }
392
393 response->resource.release = release_transaction;
394 add_client_resource(client, &response->resource);
395
396 fw_send_request(device->card, &response->transaction,
397 request->tcode & 0x1f,
398 device->node->node_id,
399 request->generation,
400 device->node->max_speed,
401 request->offset,
402 response->response.data, request->length,
403 complete_transaction, response);
404
405 if (request->data)
406 return sizeof(request) + request->length;
407 else
408 return sizeof(request);
409}
410
411struct address_handler {
412 struct fw_address_handler handler;
413 __u64 closure;
414 struct client *client;
415 struct client_resource resource;
416};
417
418struct request {
419 struct fw_request *request;
420 void *data;
421 size_t length;
422 struct client_resource resource;
423};
424
425struct request_event {
426 struct event event;
427 struct fw_cdev_event_request request;
428};
429
430static void
431release_request(struct client *client, struct client_resource *resource)
432{
433 struct request *request =
434 container_of(resource, struct request, resource);
435
436 fw_send_response(client->device->card, request->request,
437 RCODE_CONFLICT_ERROR);
438 kfree(request);
439}
440
441static void
442handle_request(struct fw_card *card, struct fw_request *r,
443 int tcode, int destination, int source,
444 int generation, int speed,
445 unsigned long long offset,
446 void *payload, size_t length, void *callback_data)
447{
448 struct address_handler *handler = callback_data;
449 struct request *request;
450 struct request_event *e;
451 struct client *client = handler->client;
452
453 request = kmalloc(sizeof(*request), GFP_ATOMIC);
454 e = kmalloc(sizeof(*e), GFP_ATOMIC);
455 if (request == NULL || e == NULL) {
456 kfree(request);
457 kfree(e);
458 fw_send_response(card, r, RCODE_CONFLICT_ERROR);
459 return;
460 }
461
462 request->request = r;
463 request->data = payload;
464 request->length = length;
465
466 request->resource.release = release_request;
467 add_client_resource(client, &request->resource);
468
469 e->request.type = FW_CDEV_EVENT_REQUEST;
470 e->request.tcode = tcode;
471 e->request.offset = offset;
472 e->request.length = length;
473 e->request.handle = request->resource.handle;
474 e->request.closure = handler->closure;
475
476 queue_event(client, &e->event,
477 &e->request, sizeof(e->request), payload, length);
478}
479
480static void
481release_address_handler(struct client *client,
482 struct client_resource *resource)
483{
484 struct address_handler *handler =
485 container_of(resource, struct address_handler, resource);
486
487 fw_core_remove_address_handler(&handler->handler);
488 kfree(handler);
489}
490
491static int ioctl_allocate(struct client *client, void *buffer)
492{
493 struct fw_cdev_allocate *request = buffer;
494 struct address_handler *handler;
495 struct fw_address_region region;
496
497 handler = kmalloc(sizeof(*handler), GFP_KERNEL);
498 if (handler == NULL)
499 return -ENOMEM;
500
501 region.start = request->offset;
502 region.end = request->offset + request->length;
503 handler->handler.length = request->length;
504 handler->handler.address_callback = handle_request;
505 handler->handler.callback_data = handler;
506 handler->closure = request->closure;
507 handler->client = client;
508
509 if (fw_core_add_address_handler(&handler->handler, &region) < 0) {
510 kfree(handler);
511 return -EBUSY;
512 }
513
514 handler->resource.release = release_address_handler;
515 add_client_resource(client, &handler->resource);
516 request->handle = handler->resource.handle;
517
518 return 0;
519}
520
521static int ioctl_deallocate(struct client *client, void *buffer)
522{
523 struct fw_cdev_deallocate *request = buffer;
524
525 return release_client_resource(client, request->handle, NULL);
526}
527
528static int ioctl_send_response(struct client *client, void *buffer)
529{
530 struct fw_cdev_send_response *request = buffer;
531 struct client_resource *resource;
532 struct request *r;
533
534 if (release_client_resource(client, request->handle, &resource) < 0)
535 return -EINVAL;
536 r = container_of(resource, struct request, resource);
537 if (request->length < r->length)
538 r->length = request->length;
539 if (copy_from_user(r->data, u64_to_uptr(request->data), r->length))
540 return -EFAULT;
541
542 fw_send_response(client->device->card, r->request, request->rcode);
543 kfree(r);
544
545 return 0;
546}
547
548static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
549{
550 struct fw_cdev_initiate_bus_reset *request = buffer;
551 int short_reset;
552
553 short_reset = (request->type == FW_CDEV_SHORT_RESET);
554
555 return fw_core_initiate_bus_reset(client->device->card, short_reset);
556}
557
558struct descriptor {
559 struct fw_descriptor d;
560 struct client_resource resource;
561 u32 data[0];
562};
563
564static void release_descriptor(struct client *client,
565 struct client_resource *resource)
566{
567 struct descriptor *descriptor =
568 container_of(resource, struct descriptor, resource);
569
570 fw_core_remove_descriptor(&descriptor->d);
571 kfree(descriptor);
572}
573
574static int ioctl_add_descriptor(struct client *client, void *buffer)
575{
576 struct fw_cdev_add_descriptor *request = buffer;
577 struct descriptor *descriptor;
578 int retval;
579
580 if (request->length > 256)
581 return -EINVAL;
582
583 descriptor =
584 kmalloc(sizeof(*descriptor) + request->length * 4, GFP_KERNEL);
585 if (descriptor == NULL)
586 return -ENOMEM;
587
588 if (copy_from_user(descriptor->data,
589 u64_to_uptr(request->data), request->length * 4)) {
590 kfree(descriptor);
591 return -EFAULT;
592 }
593
594 descriptor->d.length = request->length;
595 descriptor->d.immediate = request->immediate;
596 descriptor->d.key = request->key;
597 descriptor->d.data = descriptor->data;
598
599 retval = fw_core_add_descriptor(&descriptor->d);
600 if (retval < 0) {
601 kfree(descriptor);
602 return retval;
603 }
604
605 descriptor->resource.release = release_descriptor;
606 add_client_resource(client, &descriptor->resource);
607 request->handle = descriptor->resource.handle;
608
609 return 0;
610}
611
612static int ioctl_remove_descriptor(struct client *client, void *buffer)
613{
614 struct fw_cdev_remove_descriptor *request = buffer;
615
616 return release_client_resource(client, request->handle, NULL);
617}
618
619static void
620iso_callback(struct fw_iso_context *context, u32 cycle,
621 size_t header_length, void *header, void *data)
622{
623 struct client *client = data;
624 struct iso_interrupt *interrupt;
625
626 interrupt = kzalloc(sizeof(*interrupt) + header_length, GFP_ATOMIC);
627 if (interrupt == NULL)
628 return;
629
630 interrupt->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
631 interrupt->interrupt.closure = client->iso_closure;
632 interrupt->interrupt.cycle = cycle;
633 interrupt->interrupt.header_length = header_length;
634 memcpy(interrupt->interrupt.header, header, header_length);
635 queue_event(client, &interrupt->event,
636 &interrupt->interrupt,
637 sizeof(interrupt->interrupt) + header_length, NULL, 0);
638}
639
640static int ioctl_create_iso_context(struct client *client, void *buffer)
641{
642 struct fw_cdev_create_iso_context *request = buffer;
643
644 if (request->channel > 63)
645 return -EINVAL;
646
647 switch (request->type) {
648 case FW_ISO_CONTEXT_RECEIVE:
649 if (request->header_size < 4 || (request->header_size & 3))
650 return -EINVAL;
651
652 break;
653
654 case FW_ISO_CONTEXT_TRANSMIT:
655 if (request->speed > SCODE_3200)
656 return -EINVAL;
657
658 break;
659
660 default:
661 return -EINVAL;
662 }
663
664 client->iso_closure = request->closure;
665 client->iso_context = fw_iso_context_create(client->device->card,
666 request->type,
667 request->channel,
668 request->speed,
669 request->header_size,
670 iso_callback, client);
671 if (IS_ERR(client->iso_context))
672 return PTR_ERR(client->iso_context);
673
674 /* We only support one context at this time. */
675 request->handle = 0;
676
677 return 0;
678}
679
680static int ioctl_queue_iso(struct client *client, void *buffer)
681{
682 struct fw_cdev_queue_iso *request = buffer;
683 struct fw_cdev_iso_packet __user *p, *end, *next;
684 struct fw_iso_context *ctx = client->iso_context;
685 unsigned long payload, buffer_end, header_length;
686 int count;
687 struct {
688 struct fw_iso_packet packet;
689 u8 header[256];
690 } u;
691
692 if (ctx == NULL || request->handle != 0)
693 return -EINVAL;
694
695 /*
696 * If the user passes a non-NULL data pointer, has mmap()'ed
697 * the iso buffer, and the pointer points inside the buffer,
698 * we setup the payload pointers accordingly. Otherwise we
699 * set them both to 0, which will still let packets with
700 * payload_length == 0 through. In other words, if no packets
701 * use the indirect payload, the iso buffer need not be mapped
702 * and the request->data pointer is ignored.
703 */
704
705 payload = (unsigned long)request->data - client->vm_start;
706 buffer_end = client->buffer.page_count << PAGE_SHIFT;
707 if (request->data == 0 || client->buffer.pages == NULL ||
708 payload >= buffer_end) {
709 payload = 0;
710 buffer_end = 0;
711 }
712
713 if (!access_ok(VERIFY_READ, request->packets, request->size))
714 return -EFAULT;
715
716 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);
717 end = (void __user *)p + request->size;
718 count = 0;
719 while (p < end) {
720 if (__copy_from_user(&u.packet, p, sizeof(*p)))
721 return -EFAULT;
722
723 if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) {
724 header_length = u.packet.header_length;
725 } else {
726 /*
727 * We require that header_length is a multiple of
728 * the fixed header size, ctx->header_size.
729 */
730 if (ctx->header_size == 0) {
731 if (u.packet.header_length > 0)
732 return -EINVAL;
733 } else if (u.packet.header_length % ctx->header_size != 0) {
734 return -EINVAL;
735 }
736 header_length = 0;
737 }
738
739 next = (struct fw_cdev_iso_packet __user *)
740 &p->header[header_length / 4];
741 if (next > end)
742 return -EINVAL;
743 if (__copy_from_user
744 (u.packet.header, p->header, header_length))
745 return -EFAULT;
746 if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
747 u.packet.header_length + u.packet.payload_length > 0)
748 return -EINVAL;
749 if (payload + u.packet.payload_length > buffer_end)
750 return -EINVAL;
751
752 if (fw_iso_context_queue(ctx, &u.packet,
753 &client->buffer, payload))
754 break;
755
756 p = next;
757 payload += u.packet.payload_length;
758 count++;
759 }
760
761 request->size -= uptr_to_u64(p) - request->packets;
762 request->packets = uptr_to_u64(p);
763 request->data = client->vm_start + payload;
764
765 return count;
766}
767
768static int ioctl_start_iso(struct client *client, void *buffer)
769{
770 struct fw_cdev_start_iso *request = buffer;
771
772 if (request->handle != 0)
773 return -EINVAL;
774 if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
775 if (request->tags == 0 || request->tags > 15)
776 return -EINVAL;
777
778 if (request->sync > 15)
779 return -EINVAL;
780 }
781
782 return fw_iso_context_start(client->iso_context, request->cycle,
783 request->sync, request->tags);
784}
785
786static int ioctl_stop_iso(struct client *client, void *buffer)
787{
788 struct fw_cdev_stop_iso *request = buffer;
789
790 if (request->handle != 0)
791 return -EINVAL;
792
793 return fw_iso_context_stop(client->iso_context);
794}
795
796static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
797 ioctl_get_info,
798 ioctl_send_request,
799 ioctl_allocate,
800 ioctl_deallocate,
801 ioctl_send_response,
802 ioctl_initiate_bus_reset,
803 ioctl_add_descriptor,
804 ioctl_remove_descriptor,
805 ioctl_create_iso_context,
806 ioctl_queue_iso,
807 ioctl_start_iso,
808 ioctl_stop_iso,
809};
810
811static int
812dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
813{
814 char buffer[256];
815 int retval;
816
817 if (_IOC_TYPE(cmd) != '#' ||
818 _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
819 return -EINVAL;
820
821 if (_IOC_DIR(cmd) & _IOC_WRITE) {
822 if (_IOC_SIZE(cmd) > sizeof(buffer) ||
823 copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
824 return -EFAULT;
825 }
826
827 retval = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
828 if (retval < 0)
829 return retval;
830
831 if (_IOC_DIR(cmd) & _IOC_READ) {
832 if (_IOC_SIZE(cmd) > sizeof(buffer) ||
833 copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
834 return -EFAULT;
835 }
836
837 return 0;
838}
839
840static long
841fw_device_op_ioctl(struct file *file,
842 unsigned int cmd, unsigned long arg)
843{
844 struct client *client = file->private_data;
845
846 return dispatch_ioctl(client, cmd, (void __user *) arg);
847}
848
849#ifdef CONFIG_COMPAT
850static long
851fw_device_op_compat_ioctl(struct file *file,
852 unsigned int cmd, unsigned long arg)
853{
854 struct client *client = file->private_data;
855
856 return dispatch_ioctl(client, cmd, compat_ptr(arg));
857}
858#endif
859
860static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
861{
862 struct client *client = file->private_data;
863 enum dma_data_direction direction;
864 unsigned long size;
865 int page_count, retval;
866
867 /* FIXME: We could support multiple buffers, but we don't. */
868 if (client->buffer.pages != NULL)
869 return -EBUSY;
870
871 if (!(vma->vm_flags & VM_SHARED))
872 return -EINVAL;
873
874 if (vma->vm_start & ~PAGE_MASK)
875 return -EINVAL;
876
877 client->vm_start = vma->vm_start;
878 size = vma->vm_end - vma->vm_start;
879 page_count = size >> PAGE_SHIFT;
880 if (size & ~PAGE_MASK)
881 return -EINVAL;
882
883 if (vma->vm_flags & VM_WRITE)
884 direction = DMA_TO_DEVICE;
885 else
886 direction = DMA_FROM_DEVICE;
887
888 retval = fw_iso_buffer_init(&client->buffer, client->device->card,
889 page_count, direction);
890 if (retval < 0)
891 return retval;
892
893 retval = fw_iso_buffer_map(&client->buffer, vma);
894 if (retval < 0)
895 fw_iso_buffer_destroy(&client->buffer, client->device->card);
896
897 return retval;
898}
899
900static int fw_device_op_release(struct inode *inode, struct file *file)
901{
902 struct client *client = file->private_data;
903 struct event *e, *next_e;
904 struct client_resource *r, *next_r;
905 unsigned long flags;
906
907 if (client->buffer.pages)
908 fw_iso_buffer_destroy(&client->buffer, client->device->card);
909
910 if (client->iso_context)
911 fw_iso_context_destroy(client->iso_context);
912
913 list_for_each_entry_safe(r, next_r, &client->resource_list, link)
914 r->release(client, r);
915
916 /*
917 * FIXME: We should wait for the async tasklets to stop
918 * running before freeing the memory.
919 */
920
921 list_for_each_entry_safe(e, next_e, &client->event_list, link)
922 kfree(e);
923
924 spin_lock_irqsave(&client->device->card->lock, flags);
925 list_del(&client->link);
926 spin_unlock_irqrestore(&client->device->card->lock, flags);
927
928 fw_device_put(client->device);
929 kfree(client);
930
931 return 0;
932}
933
934static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
935{
936 struct client *client = file->private_data;
937 unsigned int mask = 0;
938
939 poll_wait(file, &client->wait, pt);
940
941 if (fw_device_is_shutdown(client->device))
942 mask |= POLLHUP | POLLERR;
943 if (!list_empty(&client->event_list))
944 mask |= POLLIN | POLLRDNORM;
945
946 return mask;
947}
948
949const struct file_operations fw_device_ops = {
950 .owner = THIS_MODULE,
951 .open = fw_device_op_open,
952 .read = fw_device_op_read,
953 .unlocked_ioctl = fw_device_op_ioctl,
954 .poll = fw_device_op_poll,
955 .release = fw_device_op_release,
956 .mmap = fw_device_op_mmap,
957
958#ifdef CONFIG_COMPAT
959 .compat_ioctl = fw_device_op_compat_ioctl,
960#endif
961};
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c
new file mode 100644
index 000000000000..c1ce465d9710
--- /dev/null
+++ b/drivers/firewire/fw-device.c
@@ -0,0 +1,813 @@
1/*
2 * Device probing and sysfs code.
3 *
4 * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/module.h>
22#include <linux/wait.h>
23#include <linux/errno.h>
24#include <linux/kthread.h>
25#include <linux/device.h>
26#include <linux/delay.h>
27#include <linux/idr.h>
28#include <linux/rwsem.h>
29#include <asm/semaphore.h>
30#include <linux/ctype.h>
31#include "fw-transaction.h"
32#include "fw-topology.h"
33#include "fw-device.h"
34
35void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p)
36{
37 ci->p = p + 1;
38 ci->end = ci->p + (p[0] >> 16);
39}
40EXPORT_SYMBOL(fw_csr_iterator_init);
41
42int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value)
43{
44 *key = *ci->p >> 24;
45 *value = *ci->p & 0xffffff;
46
47 return ci->p++ < ci->end;
48}
49EXPORT_SYMBOL(fw_csr_iterator_next);
50
51static int is_fw_unit(struct device *dev);
52
53static int match_unit_directory(u32 * directory, const struct fw_device_id *id)
54{
55 struct fw_csr_iterator ci;
56 int key, value, match;
57
58 match = 0;
59 fw_csr_iterator_init(&ci, directory);
60 while (fw_csr_iterator_next(&ci, &key, &value)) {
61 if (key == CSR_VENDOR && value == id->vendor)
62 match |= FW_MATCH_VENDOR;
63 if (key == CSR_MODEL && value == id->model)
64 match |= FW_MATCH_MODEL;
65 if (key == CSR_SPECIFIER_ID && value == id->specifier_id)
66 match |= FW_MATCH_SPECIFIER_ID;
67 if (key == CSR_VERSION && value == id->version)
68 match |= FW_MATCH_VERSION;
69 }
70
71 return (match & id->match_flags) == id->match_flags;
72}
73
74static int fw_unit_match(struct device *dev, struct device_driver *drv)
75{
76 struct fw_unit *unit = fw_unit(dev);
77 struct fw_driver *driver = fw_driver(drv);
78 int i;
79
80 /* We only allow binding to fw_units. */
81 if (!is_fw_unit(dev))
82 return 0;
83
84 for (i = 0; driver->id_table[i].match_flags != 0; i++) {
85 if (match_unit_directory(unit->directory, &driver->id_table[i]))
86 return 1;
87 }
88
89 return 0;
90}
91
92static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
93{
94 struct fw_device *device = fw_device(unit->device.parent);
95 struct fw_csr_iterator ci;
96
97 int key, value;
98 int vendor = 0;
99 int model = 0;
100 int specifier_id = 0;
101 int version = 0;
102
103 fw_csr_iterator_init(&ci, &device->config_rom[5]);
104 while (fw_csr_iterator_next(&ci, &key, &value)) {
105 switch (key) {
106 case CSR_VENDOR:
107 vendor = value;
108 break;
109 case CSR_MODEL:
110 model = value;
111 break;
112 }
113 }
114
115 fw_csr_iterator_init(&ci, unit->directory);
116 while (fw_csr_iterator_next(&ci, &key, &value)) {
117 switch (key) {
118 case CSR_SPECIFIER_ID:
119 specifier_id = value;
120 break;
121 case CSR_VERSION:
122 version = value;
123 break;
124 }
125 }
126
127 return snprintf(buffer, buffer_size,
128 "ieee1394:ven%08Xmo%08Xsp%08Xver%08X",
129 vendor, model, specifier_id, version);
130}
131
132static int
133fw_unit_uevent(struct device *dev, char **envp, int num_envp,
134 char *buffer, int buffer_size)
135{
136 struct fw_unit *unit = fw_unit(dev);
137 char modalias[64];
138 int length = 0;
139 int i = 0;
140
141 get_modalias(unit, modalias, sizeof(modalias));
142
143 if (add_uevent_var(envp, num_envp, &i,
144 buffer, buffer_size, &length,
145 "MODALIAS=%s", modalias))
146 return -ENOMEM;
147
148 envp[i] = NULL;
149
150 return 0;
151}
152
153struct bus_type fw_bus_type = {
154 .name = "firewire",
155 .match = fw_unit_match,
156};
157EXPORT_SYMBOL(fw_bus_type);
158
159struct fw_device *fw_device_get(struct fw_device *device)
160{
161 get_device(&device->device);
162
163 return device;
164}
165
166void fw_device_put(struct fw_device *device)
167{
168 put_device(&device->device);
169}
170
171static void fw_device_release(struct device *dev)
172{
173 struct fw_device *device = fw_device(dev);
174 unsigned long flags;
175
176 /*
177 * Take the card lock so we don't set this to NULL while a
178 * FW_NODE_UPDATED callback is being handled.
179 */
180 spin_lock_irqsave(&device->card->lock, flags);
181 device->node->data = NULL;
182 spin_unlock_irqrestore(&device->card->lock, flags);
183
184 fw_node_put(device->node);
185 fw_card_put(device->card);
186 kfree(device->config_rom);
187 kfree(device);
188}
189
190int fw_device_enable_phys_dma(struct fw_device *device)
191{
192 return device->card->driver->enable_phys_dma(device->card,
193 device->node_id,
194 device->generation);
195}
196EXPORT_SYMBOL(fw_device_enable_phys_dma);
197
198struct config_rom_attribute {
199 struct device_attribute attr;
200 u32 key;
201};
202
203static ssize_t
204show_immediate(struct device *dev, struct device_attribute *dattr, char *buf)
205{
206 struct config_rom_attribute *attr =
207 container_of(dattr, struct config_rom_attribute, attr);
208 struct fw_csr_iterator ci;
209 u32 *dir;
210 int key, value;
211
212 if (is_fw_unit(dev))
213 dir = fw_unit(dev)->directory;
214 else
215 dir = fw_device(dev)->config_rom + 5;
216
217 fw_csr_iterator_init(&ci, dir);
218 while (fw_csr_iterator_next(&ci, &key, &value))
219 if (attr->key == key)
220 return snprintf(buf, buf ? PAGE_SIZE : 0,
221 "0x%06x\n", value);
222
223 return -ENOENT;
224}
225
226#define IMMEDIATE_ATTR(name, key) \
227 { __ATTR(name, S_IRUGO, show_immediate, NULL), key }
228
229static ssize_t
230show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf)
231{
232 struct config_rom_attribute *attr =
233 container_of(dattr, struct config_rom_attribute, attr);
234 struct fw_csr_iterator ci;
235 u32 *dir, *block = NULL, *p, *end;
236 int length, key, value, last_key = 0;
237 char *b;
238
239 if (is_fw_unit(dev))
240 dir = fw_unit(dev)->directory;
241 else
242 dir = fw_device(dev)->config_rom + 5;
243
244 fw_csr_iterator_init(&ci, dir);
245 while (fw_csr_iterator_next(&ci, &key, &value)) {
246 if (attr->key == last_key &&
247 key == (CSR_DESCRIPTOR | CSR_LEAF))
248 block = ci.p - 1 + value;
249 last_key = key;
250 }
251
252 if (block == NULL)
253 return -ENOENT;
254
255 length = min(block[0] >> 16, 256U);
256 if (length < 3)
257 return -ENOENT;
258
259 if (block[1] != 0 || block[2] != 0)
260 /* Unknown encoding. */
261 return -ENOENT;
262
263 if (buf == NULL)
264 return length * 4;
265
266 b = buf;
267 end = &block[length + 1];
268 for (p = &block[3]; p < end; p++, b += 4)
269 * (u32 *) b = (__force u32) __cpu_to_be32(*p);
270
271 /* Strip trailing whitespace and add newline. */
272 while (b--, (isspace(*b) || *b == '\0') && b > buf);
273 strcpy(b + 1, "\n");
274
275 return b + 2 - buf;
276}
277
278#define TEXT_LEAF_ATTR(name, key) \
279 { __ATTR(name, S_IRUGO, show_text_leaf, NULL), key }
280
281static struct config_rom_attribute config_rom_attributes[] = {
282 IMMEDIATE_ATTR(vendor, CSR_VENDOR),
283 IMMEDIATE_ATTR(hardware_version, CSR_HARDWARE_VERSION),
284 IMMEDIATE_ATTR(specifier_id, CSR_SPECIFIER_ID),
285 IMMEDIATE_ATTR(version, CSR_VERSION),
286 IMMEDIATE_ATTR(model, CSR_MODEL),
287 TEXT_LEAF_ATTR(vendor_name, CSR_VENDOR),
288 TEXT_LEAF_ATTR(model_name, CSR_MODEL),
289 TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION),
290};
291
292static void
293init_fw_attribute_group(struct device *dev,
294 struct device_attribute *attrs,
295 struct fw_attribute_group *group)
296{
297 struct device_attribute *attr;
298 int i, j;
299
300 for (j = 0; attrs[j].attr.name != NULL; j++)
301 group->attrs[j] = &attrs[j].attr;
302
303 for (i = 0; i < ARRAY_SIZE(config_rom_attributes); i++) {
304 attr = &config_rom_attributes[i].attr;
305 if (attr->show(dev, attr, NULL) < 0)
306 continue;
307 group->attrs[j++] = &attr->attr;
308 }
309
310 BUG_ON(j >= ARRAY_SIZE(group->attrs));
311 group->attrs[j++] = NULL;
312 group->groups[0] = &group->group;
313 group->groups[1] = NULL;
314 group->group.attrs = group->attrs;
315 dev->groups = group->groups;
316}
317
318static ssize_t
319modalias_show(struct device *dev,
320 struct device_attribute *attr, char *buf)
321{
322 struct fw_unit *unit = fw_unit(dev);
323 int length;
324
325 length = get_modalias(unit, buf, PAGE_SIZE);
326 strcpy(buf + length, "\n");
327
328 return length + 1;
329}
330
331static ssize_t
332rom_index_show(struct device *dev,
333 struct device_attribute *attr, char *buf)
334{
335 struct fw_device *device = fw_device(dev->parent);
336 struct fw_unit *unit = fw_unit(dev);
337
338 return snprintf(buf, PAGE_SIZE, "%d\n",
339 (int)(unit->directory - device->config_rom));
340}
341
342static struct device_attribute fw_unit_attributes[] = {
343 __ATTR_RO(modalias),
344 __ATTR_RO(rom_index),
345 __ATTR_NULL,
346};
347
348static ssize_t
349config_rom_show(struct device *dev, struct device_attribute *attr, char *buf)
350{
351 struct fw_device *device = fw_device(dev);
352
353 memcpy(buf, device->config_rom, device->config_rom_length * 4);
354
355 return device->config_rom_length * 4;
356}
357
358static ssize_t
359guid_show(struct device *dev, struct device_attribute *attr, char *buf)
360{
361 struct fw_device *device = fw_device(dev);
362 u64 guid;
363
364 guid = ((u64)device->config_rom[3] << 32) | device->config_rom[4];
365
366 return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
367 (unsigned long long)guid);
368}
369
370static struct device_attribute fw_device_attributes[] = {
371 __ATTR_RO(config_rom),
372 __ATTR_RO(guid),
373 __ATTR_NULL,
374};
375
376struct read_quadlet_callback_data {
377 struct completion done;
378 int rcode;
379 u32 data;
380};
381
382static void
383complete_transaction(struct fw_card *card, int rcode,
384 void *payload, size_t length, void *data)
385{
386 struct read_quadlet_callback_data *callback_data = data;
387
388 if (rcode == RCODE_COMPLETE)
389 callback_data->data = be32_to_cpu(*(__be32 *)payload);
390 callback_data->rcode = rcode;
391 complete(&callback_data->done);
392}
393
394static int read_rom(struct fw_device *device, int index, u32 * data)
395{
396 struct read_quadlet_callback_data callback_data;
397 struct fw_transaction t;
398 u64 offset;
399
400 init_completion(&callback_data.done);
401
402 offset = 0xfffff0000400ULL + index * 4;
403 fw_send_request(device->card, &t, TCODE_READ_QUADLET_REQUEST,
404 device->node_id,
405 device->generation, SCODE_100,
406 offset, NULL, 4, complete_transaction, &callback_data);
407
408 wait_for_completion(&callback_data.done);
409
410 *data = callback_data.data;
411
412 return callback_data.rcode;
413}
414
415static int read_bus_info_block(struct fw_device *device)
416{
417 static u32 rom[256];
418 u32 stack[16], sp, key;
419 int i, end, length;
420
421 /* First read the bus info block. */
422 for (i = 0; i < 5; i++) {
423 if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE)
424 return -1;
425 /*
426 * As per IEEE1212 7.2, during power-up, devices can
427 * reply with a 0 for the first quadlet of the config
428 * rom to indicate that they are booting (for example,
429 * if the firmware is on the disk of a external
430 * harddisk). In that case we just fail, and the
431 * retry mechanism will try again later.
432 */
433 if (i == 0 && rom[i] == 0)
434 return -1;
435 }
436
437 /*
438 * Now parse the config rom. The config rom is a recursive
439 * directory structure so we parse it using a stack of
440 * references to the blocks that make up the structure. We
441 * push a reference to the root directory on the stack to
442 * start things off.
443 */
444 length = i;
445 sp = 0;
446 stack[sp++] = 0xc0000005;
447 while (sp > 0) {
448 /*
449 * Pop the next block reference of the stack. The
450 * lower 24 bits is the offset into the config rom,
451 * the upper 8 bits are the type of the reference the
452 * block.
453 */
454 key = stack[--sp];
455 i = key & 0xffffff;
456 if (i >= ARRAY_SIZE(rom))
457 /*
458 * The reference points outside the standard
459 * config rom area, something's fishy.
460 */
461 return -1;
462
463 /* Read header quadlet for the block to get the length. */
464 if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE)
465 return -1;
466 end = i + (rom[i] >> 16) + 1;
467 i++;
468 if (end > ARRAY_SIZE(rom))
469 /*
470 * This block extends outside standard config
471 * area (and the array we're reading it
472 * into). That's broken, so ignore this
473 * device.
474 */
475 return -1;
476
477 /*
478 * Now read in the block. If this is a directory
479 * block, check the entries as we read them to see if
480 * it references another block, and push it in that case.
481 */
482 while (i < end) {
483 if (read_rom(device, i, &rom[i]) != RCODE_COMPLETE)
484 return -1;
485 if ((key >> 30) == 3 && (rom[i] >> 30) > 1 &&
486 sp < ARRAY_SIZE(stack))
487 stack[sp++] = i + rom[i];
488 i++;
489 }
490 if (length < i)
491 length = i;
492 }
493
494 device->config_rom = kmalloc(length * 4, GFP_KERNEL);
495 if (device->config_rom == NULL)
496 return -1;
497 memcpy(device->config_rom, rom, length * 4);
498 device->config_rom_length = length;
499
500 return 0;
501}
502
503static void fw_unit_release(struct device *dev)
504{
505 struct fw_unit *unit = fw_unit(dev);
506
507 kfree(unit);
508}
509
510static struct device_type fw_unit_type = {
511 .uevent = fw_unit_uevent,
512 .release = fw_unit_release,
513};
514
515static int is_fw_unit(struct device *dev)
516{
517 return dev->type == &fw_unit_type;
518}
519
520static void create_units(struct fw_device *device)
521{
522 struct fw_csr_iterator ci;
523 struct fw_unit *unit;
524 int key, value, i;
525
526 i = 0;
527 fw_csr_iterator_init(&ci, &device->config_rom[5]);
528 while (fw_csr_iterator_next(&ci, &key, &value)) {
529 if (key != (CSR_UNIT | CSR_DIRECTORY))
530 continue;
531
532 /*
533 * Get the address of the unit directory and try to
534 * match the drivers id_tables against it.
535 */
536 unit = kzalloc(sizeof(*unit), GFP_KERNEL);
537 if (unit == NULL) {
538 fw_error("failed to allocate memory for unit\n");
539 continue;
540 }
541
542 unit->directory = ci.p + value - 1;
543 unit->device.bus = &fw_bus_type;
544 unit->device.type = &fw_unit_type;
545 unit->device.parent = &device->device;
546 snprintf(unit->device.bus_id, sizeof(unit->device.bus_id),
547 "%s.%d", device->device.bus_id, i++);
548
549 init_fw_attribute_group(&unit->device,
550 fw_unit_attributes,
551 &unit->attribute_group);
552 if (device_register(&unit->device) < 0)
553 goto skip_unit;
554
555 continue;
556
557 skip_unit:
558 kfree(unit);
559 }
560}
561
562static int shutdown_unit(struct device *device, void *data)
563{
564 device_unregister(device);
565
566 return 0;
567}
568
569static DECLARE_RWSEM(idr_rwsem);
570static DEFINE_IDR(fw_device_idr);
571int fw_cdev_major;
572
573struct fw_device *fw_device_from_devt(dev_t devt)
574{
575 struct fw_device *device;
576
577 down_read(&idr_rwsem);
578 device = idr_find(&fw_device_idr, MINOR(devt));
579 up_read(&idr_rwsem);
580
581 return device;
582}
583
584static void fw_device_shutdown(struct work_struct *work)
585{
586 struct fw_device *device =
587 container_of(work, struct fw_device, work.work);
588 int minor = MINOR(device->device.devt);
589
590 down_write(&idr_rwsem);
591 idr_remove(&fw_device_idr, minor);
592 up_write(&idr_rwsem);
593
594 fw_device_cdev_remove(device);
595 device_for_each_child(&device->device, NULL, shutdown_unit);
596 device_unregister(&device->device);
597}
598
599static struct device_type fw_device_type = {
600 .release = fw_device_release,
601};
602
603/*
604 * These defines control the retry behavior for reading the config
605 * rom. It shouldn't be necessary to tweak these; if the device
606 * doesn't respond to a config rom read within 10 seconds, it's not
607 * going to respond at all. As for the initial delay, a lot of
608 * devices will be able to respond within half a second after bus
609 * reset. On the other hand, it's not really worth being more
610 * aggressive than that, since it scales pretty well; if 10 devices
611 * are plugged in, they're all getting read within one second.
612 */
613
614#define MAX_RETRIES 10
615#define RETRY_DELAY (3 * HZ)
616#define INITIAL_DELAY (HZ / 2)
617
618static void fw_device_init(struct work_struct *work)
619{
620 struct fw_device *device =
621 container_of(work, struct fw_device, work.work);
622 int minor, err;
623
624 /*
625 * All failure paths here set node->data to NULL, so that we
626 * don't try to do device_for_each_child() on a kfree()'d
627 * device.
628 */
629
630 if (read_bus_info_block(device) < 0) {
631 if (device->config_rom_retries < MAX_RETRIES) {
632 device->config_rom_retries++;
633 schedule_delayed_work(&device->work, RETRY_DELAY);
634 } else {
635 fw_notify("giving up on config rom for node id %x\n",
636 device->node_id);
637 if (device->node == device->card->root_node)
638 schedule_delayed_work(&device->card->work, 0);
639 fw_device_release(&device->device);
640 }
641 return;
642 }
643
644 err = -ENOMEM;
645 down_write(&idr_rwsem);
646 if (idr_pre_get(&fw_device_idr, GFP_KERNEL))
647 err = idr_get_new(&fw_device_idr, device, &minor);
648 up_write(&idr_rwsem);
649 if (err < 0)
650 goto error;
651
652 device->device.bus = &fw_bus_type;
653 device->device.type = &fw_device_type;
654 device->device.parent = device->card->device;
655 device->device.devt = MKDEV(fw_cdev_major, minor);
656 snprintf(device->device.bus_id, sizeof(device->device.bus_id),
657 "fw%d", minor);
658
659 init_fw_attribute_group(&device->device,
660 fw_device_attributes,
661 &device->attribute_group);
662 if (device_add(&device->device)) {
663 fw_error("Failed to add device.\n");
664 goto error_with_cdev;
665 }
666
667 create_units(device);
668
669 /*
670 * Transition the device to running state. If it got pulled
671 * out from under us while we did the intialization work, we
672 * have to shut down the device again here. Normally, though,
673 * fw_node_event will be responsible for shutting it down when
674 * necessary. We have to use the atomic cmpxchg here to avoid
675 * racing with the FW_NODE_DESTROYED case in
676 * fw_node_event().
677 */
678 if (atomic_cmpxchg(&device->state,
679 FW_DEVICE_INITIALIZING,
680 FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN)
681 fw_device_shutdown(&device->work.work);
682 else
683 fw_notify("created new fw device %s (%d config rom retries)\n",
684 device->device.bus_id, device->config_rom_retries);
685
686 /*
687 * Reschedule the IRM work if we just finished reading the
688 * root node config rom. If this races with a bus reset we
689 * just end up running the IRM work a couple of extra times -
690 * pretty harmless.
691 */
692 if (device->node == device->card->root_node)
693 schedule_delayed_work(&device->card->work, 0);
694
695 return;
696
697 error_with_cdev:
698 down_write(&idr_rwsem);
699 idr_remove(&fw_device_idr, minor);
700 up_write(&idr_rwsem);
701 error:
702 put_device(&device->device);
703}
704
705static int update_unit(struct device *dev, void *data)
706{
707 struct fw_unit *unit = fw_unit(dev);
708 struct fw_driver *driver = (struct fw_driver *)dev->driver;
709
710 if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) {
711 down(&dev->sem);
712 driver->update(unit);
713 up(&dev->sem);
714 }
715
716 return 0;
717}
718
719static void fw_device_update(struct work_struct *work)
720{
721 struct fw_device *device =
722 container_of(work, struct fw_device, work.work);
723
724 fw_device_cdev_update(device);
725 device_for_each_child(&device->device, NULL, update_unit);
726}
727
728void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
729{
730 struct fw_device *device;
731
732 switch (event) {
733 case FW_NODE_CREATED:
734 case FW_NODE_LINK_ON:
735 if (!node->link_on)
736 break;
737
738 device = kzalloc(sizeof(*device), GFP_ATOMIC);
739 if (device == NULL)
740 break;
741
742 /*
743 * Do minimal intialization of the device here, the
744 * rest will happen in fw_device_init(). We need the
745 * card and node so we can read the config rom and we
746 * need to do device_initialize() now so
747 * device_for_each_child() in FW_NODE_UPDATED is
748 * doesn't freak out.
749 */
750 device_initialize(&device->device);
751 atomic_set(&device->state, FW_DEVICE_INITIALIZING);
752 device->card = fw_card_get(card);
753 device->node = fw_node_get(node);
754 device->node_id = node->node_id;
755 device->generation = card->generation;
756 INIT_LIST_HEAD(&device->client_list);
757
758 /*
759 * Set the node data to point back to this device so
760 * FW_NODE_UPDATED callbacks can update the node_id
761 * and generation for the device.
762 */
763 node->data = device;
764
765 /*
766 * Many devices are slow to respond after bus resets,
767 * especially if they are bus powered and go through
768 * power-up after getting plugged in. We schedule the
769 * first config rom scan half a second after bus reset.
770 */
771 INIT_DELAYED_WORK(&device->work, fw_device_init);
772 schedule_delayed_work(&device->work, INITIAL_DELAY);
773 break;
774
775 case FW_NODE_UPDATED:
776 if (!node->link_on || node->data == NULL)
777 break;
778
779 device = node->data;
780 device->node_id = node->node_id;
781 device->generation = card->generation;
782 if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
783 PREPARE_DELAYED_WORK(&device->work, fw_device_update);
784 schedule_delayed_work(&device->work, 0);
785 }
786 break;
787
788 case FW_NODE_DESTROYED:
789 case FW_NODE_LINK_OFF:
790 if (!node->data)
791 break;
792
793 /*
794 * Destroy the device associated with the node. There
795 * are two cases here: either the device is fully
796 * initialized (FW_DEVICE_RUNNING) or we're in the
797 * process of reading its config rom
798 * (FW_DEVICE_INITIALIZING). If it is fully
799 * initialized we can reuse device->work to schedule a
800 * full fw_device_shutdown(). If not, there's work
801 * scheduled to read it's config rom, and we just put
802 * the device in shutdown state to have that code fail
803 * to create the device.
804 */
805 device = node->data;
806 if (atomic_xchg(&device->state,
807 FW_DEVICE_SHUTDOWN) == FW_DEVICE_RUNNING) {
808 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
809 schedule_delayed_work(&device->work, 0);
810 }
811 break;
812 }
813}
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h
new file mode 100644
index 000000000000..0ba9d64ccf4c
--- /dev/null
+++ b/drivers/firewire/fw-device.h
@@ -0,0 +1,146 @@
1/*
2 * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18
19#ifndef __fw_device_h
20#define __fw_device_h
21
22#include <linux/fs.h>
23#include <linux/cdev.h>
24#include <asm/atomic.h>
25
26enum fw_device_state {
27 FW_DEVICE_INITIALIZING,
28 FW_DEVICE_RUNNING,
29 FW_DEVICE_SHUTDOWN,
30};
31
32struct fw_attribute_group {
33 struct attribute_group *groups[2];
34 struct attribute_group group;
35 struct attribute *attrs[11];
36};
37
38struct fw_device {
39 atomic_t state;
40 struct fw_node *node;
41 int node_id;
42 int generation;
43 struct fw_card *card;
44 struct device device;
45 struct list_head link;
46 struct list_head client_list;
47 u32 *config_rom;
48 size_t config_rom_length;
49 int config_rom_retries;
50 struct delayed_work work;
51 struct fw_attribute_group attribute_group;
52};
53
54static inline struct fw_device *
55fw_device(struct device *dev)
56{
57 return container_of(dev, struct fw_device, device);
58}
59
60static inline int
61fw_device_is_shutdown(struct fw_device *device)
62{
63 return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN;
64}
65
66struct fw_device *fw_device_get(struct fw_device *device);
67void fw_device_put(struct fw_device *device);
68int fw_device_enable_phys_dma(struct fw_device *device);
69
70void fw_device_cdev_update(struct fw_device *device);
71void fw_device_cdev_remove(struct fw_device *device);
72
73struct fw_device *fw_device_from_devt(dev_t devt);
74extern int fw_cdev_major;
75
76struct fw_unit {
77 struct device device;
78 u32 *directory;
79 struct fw_attribute_group attribute_group;
80};
81
82static inline struct fw_unit *
83fw_unit(struct device *dev)
84{
85 return container_of(dev, struct fw_unit, device);
86}
87
88#define CSR_OFFSET 0x40
89#define CSR_LEAF 0x80
90#define CSR_DIRECTORY 0xc0
91
92#define CSR_DESCRIPTOR 0x01
93#define CSR_VENDOR 0x03
94#define CSR_HARDWARE_VERSION 0x04
95#define CSR_NODE_CAPABILITIES 0x0c
96#define CSR_UNIT 0x11
97#define CSR_SPECIFIER_ID 0x12
98#define CSR_VERSION 0x13
99#define CSR_DEPENDENT_INFO 0x14
100#define CSR_MODEL 0x17
101#define CSR_INSTANCE 0x18
102
103#define SBP2_COMMAND_SET_SPECIFIER 0x38
104#define SBP2_COMMAND_SET 0x39
105#define SBP2_COMMAND_SET_REVISION 0x3b
106#define SBP2_FIRMWARE_REVISION 0x3c
107
108struct fw_csr_iterator {
109 u32 *p;
110 u32 *end;
111};
112
113void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 *p);
114int fw_csr_iterator_next(struct fw_csr_iterator *ci,
115 int *key, int *value);
116
117#define FW_MATCH_VENDOR 0x0001
118#define FW_MATCH_MODEL 0x0002
119#define FW_MATCH_SPECIFIER_ID 0x0004
120#define FW_MATCH_VERSION 0x0008
121
122struct fw_device_id {
123 u32 match_flags;
124 u32 vendor;
125 u32 model;
126 u32 specifier_id;
127 u32 version;
128 void *driver_data;
129};
130
131struct fw_driver {
132 struct device_driver driver;
133 /* Called when the parent device sits through a bus reset. */
134 void (*update) (struct fw_unit *unit);
135 const struct fw_device_id *id_table;
136};
137
138static inline struct fw_driver *
139fw_driver(struct device_driver *drv)
140{
141 return container_of(drv, struct fw_driver, driver);
142}
143
144extern const struct file_operations fw_device_ops;
145
146#endif /* __fw_device_h */
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c
new file mode 100644
index 000000000000..2b640e9be6de
--- /dev/null
+++ b/drivers/firewire/fw-iso.c
@@ -0,0 +1,163 @@
1/*
2 * Isochronous IO functionality
3 *
4 * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/dma-mapping.h>
24#include <linux/vmalloc.h>
25#include <linux/mm.h>
26
27#include "fw-transaction.h"
28#include "fw-topology.h"
29#include "fw-device.h"
30
31int
32fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
33 int page_count, enum dma_data_direction direction)
34{
35 int i, j, retval = -ENOMEM;
36 dma_addr_t address;
37
38 buffer->page_count = page_count;
39 buffer->direction = direction;
40
41 buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]),
42 GFP_KERNEL);
43 if (buffer->pages == NULL)
44 goto out;
45
46 for (i = 0; i < buffer->page_count; i++) {
47 buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
48 if (buffer->pages[i] == NULL)
49 goto out_pages;
50
51 address = dma_map_page(card->device, buffer->pages[i],
52 0, PAGE_SIZE, direction);
53 if (dma_mapping_error(address)) {
54 __free_page(buffer->pages[i]);
55 goto out_pages;
56 }
57 set_page_private(buffer->pages[i], address);
58 }
59
60 return 0;
61
62 out_pages:
63 for (j = 0; j < i; j++) {
64 address = page_private(buffer->pages[j]);
65 dma_unmap_page(card->device, address,
66 PAGE_SIZE, DMA_TO_DEVICE);
67 __free_page(buffer->pages[j]);
68 }
69 kfree(buffer->pages);
70 out:
71 buffer->pages = NULL;
72 return retval;
73}
74
75int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
76{
77 unsigned long uaddr;
78 int i, retval;
79
80 uaddr = vma->vm_start;
81 for (i = 0; i < buffer->page_count; i++) {
82 retval = vm_insert_page(vma, uaddr, buffer->pages[i]);
83 if (retval)
84 return retval;
85 uaddr += PAGE_SIZE;
86 }
87
88 return 0;
89}
90
91void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
92 struct fw_card *card)
93{
94 int i;
95 dma_addr_t address;
96
97 for (i = 0; i < buffer->page_count; i++) {
98 address = page_private(buffer->pages[i]);
99 dma_unmap_page(card->device, address,
100 PAGE_SIZE, DMA_TO_DEVICE);
101 __free_page(buffer->pages[i]);
102 }
103
104 kfree(buffer->pages);
105 buffer->pages = NULL;
106}
107
108struct fw_iso_context *
109fw_iso_context_create(struct fw_card *card, int type,
110 int channel, int speed, size_t header_size,
111 fw_iso_callback_t callback, void *callback_data)
112{
113 struct fw_iso_context *ctx;
114
115 ctx = card->driver->allocate_iso_context(card, type, header_size);
116 if (IS_ERR(ctx))
117 return ctx;
118
119 ctx->card = card;
120 ctx->type = type;
121 ctx->channel = channel;
122 ctx->speed = speed;
123 ctx->header_size = header_size;
124 ctx->callback = callback;
125 ctx->callback_data = callback_data;
126
127 return ctx;
128}
129EXPORT_SYMBOL(fw_iso_context_create);
130
131void fw_iso_context_destroy(struct fw_iso_context *ctx)
132{
133 struct fw_card *card = ctx->card;
134
135 card->driver->free_iso_context(ctx);
136}
137EXPORT_SYMBOL(fw_iso_context_destroy);
138
139int
140fw_iso_context_start(struct fw_iso_context *ctx, int cycle, int sync, int tags)
141{
142 return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
143}
144EXPORT_SYMBOL(fw_iso_context_start);
145
146int
147fw_iso_context_queue(struct fw_iso_context *ctx,
148 struct fw_iso_packet *packet,
149 struct fw_iso_buffer *buffer,
150 unsigned long payload)
151{
152 struct fw_card *card = ctx->card;
153
154 return card->driver->queue_iso(ctx, packet, buffer, payload);
155}
156EXPORT_SYMBOL(fw_iso_context_queue);
157
158int
159fw_iso_context_stop(struct fw_iso_context *ctx)
160{
161 return ctx->card->driver->stop_iso(ctx);
162}
163EXPORT_SYMBOL(fw_iso_context_stop);
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
new file mode 100644
index 000000000000..1f5c70461b8b
--- /dev/null
+++ b/drivers/firewire/fw-ohci.c
@@ -0,0 +1,1943 @@
1/*
2 * Driver for OHCI 1394 controllers
3 *
4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/pci.h>
26#include <linux/delay.h>
27#include <linux/poll.h>
28#include <linux/dma-mapping.h>
29
30#include <asm/uaccess.h>
31#include <asm/semaphore.h>
32
33#include "fw-transaction.h"
34#include "fw-ohci.h"
35
36#define DESCRIPTOR_OUTPUT_MORE 0
37#define DESCRIPTOR_OUTPUT_LAST (1 << 12)
38#define DESCRIPTOR_INPUT_MORE (2 << 12)
39#define DESCRIPTOR_INPUT_LAST (3 << 12)
40#define DESCRIPTOR_STATUS (1 << 11)
41#define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
42#define DESCRIPTOR_PING (1 << 7)
43#define DESCRIPTOR_YY (1 << 6)
44#define DESCRIPTOR_NO_IRQ (0 << 4)
45#define DESCRIPTOR_IRQ_ERROR (1 << 4)
46#define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
47#define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
48#define DESCRIPTOR_WAIT (3 << 0)
49
50struct descriptor {
51 __le16 req_count;
52 __le16 control;
53 __le32 data_address;
54 __le32 branch_address;
55 __le16 res_count;
56 __le16 transfer_status;
57} __attribute__((aligned(16)));
58
59struct db_descriptor {
60 __le16 first_size;
61 __le16 control;
62 __le16 second_req_count;
63 __le16 first_req_count;
64 __le32 branch_address;
65 __le16 second_res_count;
66 __le16 first_res_count;
67 __le32 reserved0;
68 __le32 first_buffer;
69 __le32 second_buffer;
70 __le32 reserved1;
71} __attribute__((aligned(16)));
72
73#define CONTROL_SET(regs) (regs)
74#define CONTROL_CLEAR(regs) ((regs) + 4)
75#define COMMAND_PTR(regs) ((regs) + 12)
76#define CONTEXT_MATCH(regs) ((regs) + 16)
77
78struct ar_buffer {
79 struct descriptor descriptor;
80 struct ar_buffer *next;
81 __le32 data[0];
82};
83
84struct ar_context {
85 struct fw_ohci *ohci;
86 struct ar_buffer *current_buffer;
87 struct ar_buffer *last_buffer;
88 void *pointer;
89 u32 regs;
90 struct tasklet_struct tasklet;
91};
92
93struct context;
94
95typedef int (*descriptor_callback_t)(struct context *ctx,
96 struct descriptor *d,
97 struct descriptor *last);
98struct context {
99 struct fw_ohci *ohci;
100 u32 regs;
101
102 struct descriptor *buffer;
103 dma_addr_t buffer_bus;
104 size_t buffer_size;
105 struct descriptor *head_descriptor;
106 struct descriptor *tail_descriptor;
107 struct descriptor *tail_descriptor_last;
108 struct descriptor *prev_descriptor;
109
110 descriptor_callback_t callback;
111
112 struct tasklet_struct tasklet;
113};
114
115#define IT_HEADER_SY(v) ((v) << 0)
116#define IT_HEADER_TCODE(v) ((v) << 4)
117#define IT_HEADER_CHANNEL(v) ((v) << 8)
118#define IT_HEADER_TAG(v) ((v) << 14)
119#define IT_HEADER_SPEED(v) ((v) << 16)
120#define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
121
122struct iso_context {
123 struct fw_iso_context base;
124 struct context context;
125 void *header;
126 size_t header_length;
127};
128
129#define CONFIG_ROM_SIZE 1024
130
131struct fw_ohci {
132 struct fw_card card;
133
134 u32 version;
135 __iomem char *registers;
136 dma_addr_t self_id_bus;
137 __le32 *self_id_cpu;
138 struct tasklet_struct bus_reset_tasklet;
139 int node_id;
140 int generation;
141 int request_generation;
142 u32 bus_seconds;
143
144 /*
145 * Spinlock for accessing fw_ohci data. Never call out of
146 * this driver with this lock held.
147 */
148 spinlock_t lock;
149 u32 self_id_buffer[512];
150
151 /* Config rom buffers */
152 __be32 *config_rom;
153 dma_addr_t config_rom_bus;
154 __be32 *next_config_rom;
155 dma_addr_t next_config_rom_bus;
156 u32 next_header;
157
158 struct ar_context ar_request_ctx;
159 struct ar_context ar_response_ctx;
160 struct context at_request_ctx;
161 struct context at_response_ctx;
162
163 u32 it_context_mask;
164 struct iso_context *it_context_list;
165 u32 ir_context_mask;
166 struct iso_context *ir_context_list;
167};
168
169static inline struct fw_ohci *fw_ohci(struct fw_card *card)
170{
171 return container_of(card, struct fw_ohci, card);
172}
173
174#define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
175#define IR_CONTEXT_BUFFER_FILL 0x80000000
176#define IR_CONTEXT_ISOCH_HEADER 0x40000000
177#define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
178#define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
179#define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
180
181#define CONTEXT_RUN 0x8000
182#define CONTEXT_WAKE 0x1000
183#define CONTEXT_DEAD 0x0800
184#define CONTEXT_ACTIVE 0x0400
185
186#define OHCI1394_MAX_AT_REQ_RETRIES 0x2
187#define OHCI1394_MAX_AT_RESP_RETRIES 0x2
188#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
189
190#define FW_OHCI_MAJOR 240
191#define OHCI1394_REGISTER_SIZE 0x800
192#define OHCI_LOOP_COUNT 500
193#define OHCI1394_PCI_HCI_Control 0x40
194#define SELF_ID_BUF_SIZE 0x800
195#define OHCI_TCODE_PHY_PACKET 0x0e
196#define OHCI_VERSION_1_1 0x010010
197#define ISO_BUFFER_SIZE (64 * 1024)
198#define AT_BUFFER_SIZE 4096
199
200static char ohci_driver_name[] = KBUILD_MODNAME;
201
202static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
203{
204 writel(data, ohci->registers + offset);
205}
206
207static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
208{
209 return readl(ohci->registers + offset);
210}
211
212static inline void flush_writes(const struct fw_ohci *ohci)
213{
214 /* Do a dummy read to flush writes. */
215 reg_read(ohci, OHCI1394_Version);
216}
217
218static int
219ohci_update_phy_reg(struct fw_card *card, int addr,
220 int clear_bits, int set_bits)
221{
222 struct fw_ohci *ohci = fw_ohci(card);
223 u32 val, old;
224
225 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
226 msleep(2);
227 val = reg_read(ohci, OHCI1394_PhyControl);
228 if ((val & OHCI1394_PhyControl_ReadDone) == 0) {
229 fw_error("failed to set phy reg bits.\n");
230 return -EBUSY;
231 }
232
233 old = OHCI1394_PhyControl_ReadData(val);
234 old = (old & ~clear_bits) | set_bits;
235 reg_write(ohci, OHCI1394_PhyControl,
236 OHCI1394_PhyControl_Write(addr, old));
237
238 return 0;
239}
240
241static int ar_context_add_page(struct ar_context *ctx)
242{
243 struct device *dev = ctx->ohci->card.device;
244 struct ar_buffer *ab;
245 dma_addr_t ab_bus;
246 size_t offset;
247
248 ab = (struct ar_buffer *) __get_free_page(GFP_ATOMIC);
249 if (ab == NULL)
250 return -ENOMEM;
251
252 ab_bus = dma_map_single(dev, ab, PAGE_SIZE, DMA_BIDIRECTIONAL);
253 if (dma_mapping_error(ab_bus)) {
254 free_page((unsigned long) ab);
255 return -ENOMEM;
256 }
257
258 memset(&ab->descriptor, 0, sizeof(ab->descriptor));
259 ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
260 DESCRIPTOR_STATUS |
261 DESCRIPTOR_BRANCH_ALWAYS);
262 offset = offsetof(struct ar_buffer, data);
263 ab->descriptor.req_count = cpu_to_le16(PAGE_SIZE - offset);
264 ab->descriptor.data_address = cpu_to_le32(ab_bus + offset);
265 ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset);
266 ab->descriptor.branch_address = 0;
267
268 dma_sync_single_for_device(dev, ab_bus, PAGE_SIZE, DMA_BIDIRECTIONAL);
269
270 ctx->last_buffer->descriptor.branch_address = ab_bus | 1;
271 ctx->last_buffer->next = ab;
272 ctx->last_buffer = ab;
273
274 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
275 flush_writes(ctx->ohci);
276
277 return 0;
278}
279
280static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
281{
282 struct fw_ohci *ohci = ctx->ohci;
283 struct fw_packet p;
284 u32 status, length, tcode;
285
286 p.header[0] = le32_to_cpu(buffer[0]);
287 p.header[1] = le32_to_cpu(buffer[1]);
288 p.header[2] = le32_to_cpu(buffer[2]);
289
290 tcode = (p.header[0] >> 4) & 0x0f;
291 switch (tcode) {
292 case TCODE_WRITE_QUADLET_REQUEST:
293 case TCODE_READ_QUADLET_RESPONSE:
294 p.header[3] = (__force __u32) buffer[3];
295 p.header_length = 16;
296 p.payload_length = 0;
297 break;
298
299 case TCODE_READ_BLOCK_REQUEST :
300 p.header[3] = le32_to_cpu(buffer[3]);
301 p.header_length = 16;
302 p.payload_length = 0;
303 break;
304
305 case TCODE_WRITE_BLOCK_REQUEST:
306 case TCODE_READ_BLOCK_RESPONSE:
307 case TCODE_LOCK_REQUEST:
308 case TCODE_LOCK_RESPONSE:
309 p.header[3] = le32_to_cpu(buffer[3]);
310 p.header_length = 16;
311 p.payload_length = p.header[3] >> 16;
312 break;
313
314 case TCODE_WRITE_RESPONSE:
315 case TCODE_READ_QUADLET_REQUEST:
316 case OHCI_TCODE_PHY_PACKET:
317 p.header_length = 12;
318 p.payload_length = 0;
319 break;
320 }
321
322 p.payload = (void *) buffer + p.header_length;
323
324 /* FIXME: What to do about evt_* errors? */
325 length = (p.header_length + p.payload_length + 3) / 4;
326 status = le32_to_cpu(buffer[length]);
327
328 p.ack = ((status >> 16) & 0x1f) - 16;
329 p.speed = (status >> 21) & 0x7;
330 p.timestamp = status & 0xffff;
331 p.generation = ohci->request_generation;
332
333 /*
334 * The OHCI bus reset handler synthesizes a phy packet with
335 * the new generation number when a bus reset happens (see
336 * section 8.4.2.3). This helps us determine when a request
337 * was received and make sure we send the response in the same
338 * generation. We only need this for requests; for responses
339 * we use the unique tlabel for finding the matching
340 * request.
341 */
342
343 if (p.ack + 16 == 0x09)
344 ohci->request_generation = (buffer[2] >> 16) & 0xff;
345 else if (ctx == &ohci->ar_request_ctx)
346 fw_core_handle_request(&ohci->card, &p);
347 else
348 fw_core_handle_response(&ohci->card, &p);
349
350 return buffer + length + 1;
351}
352
353static void ar_context_tasklet(unsigned long data)
354{
355 struct ar_context *ctx = (struct ar_context *)data;
356 struct fw_ohci *ohci = ctx->ohci;
357 struct ar_buffer *ab;
358 struct descriptor *d;
359 void *buffer, *end;
360
361 ab = ctx->current_buffer;
362 d = &ab->descriptor;
363
364 if (d->res_count == 0) {
365 size_t size, rest, offset;
366
367 /*
368 * This descriptor is finished and we may have a
369 * packet split across this and the next buffer. We
370 * reuse the page for reassembling the split packet.
371 */
372
373 offset = offsetof(struct ar_buffer, data);
374 dma_unmap_single(ohci->card.device,
375 ab->descriptor.data_address - offset,
376 PAGE_SIZE, DMA_BIDIRECTIONAL);
377
378 buffer = ab;
379 ab = ab->next;
380 d = &ab->descriptor;
381 size = buffer + PAGE_SIZE - ctx->pointer;
382 rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
383 memmove(buffer, ctx->pointer, size);
384 memcpy(buffer + size, ab->data, rest);
385 ctx->current_buffer = ab;
386 ctx->pointer = (void *) ab->data + rest;
387 end = buffer + size + rest;
388
389 while (buffer < end)
390 buffer = handle_ar_packet(ctx, buffer);
391
392 free_page((unsigned long)buffer);
393 ar_context_add_page(ctx);
394 } else {
395 buffer = ctx->pointer;
396 ctx->pointer = end =
397 (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
398
399 while (buffer < end)
400 buffer = handle_ar_packet(ctx, buffer);
401 }
402}
403
404static int
405ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs)
406{
407 struct ar_buffer ab;
408
409 ctx->regs = regs;
410 ctx->ohci = ohci;
411 ctx->last_buffer = &ab;
412 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
413
414 ar_context_add_page(ctx);
415 ar_context_add_page(ctx);
416 ctx->current_buffer = ab.next;
417 ctx->pointer = ctx->current_buffer->data;
418
419 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ab.descriptor.branch_address);
420 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
421 flush_writes(ctx->ohci);
422
423 return 0;
424}
425
426static void context_tasklet(unsigned long data)
427{
428 struct context *ctx = (struct context *) data;
429 struct fw_ohci *ohci = ctx->ohci;
430 struct descriptor *d, *last;
431 u32 address;
432 int z;
433
434 dma_sync_single_for_cpu(ohci->card.device, ctx->buffer_bus,
435 ctx->buffer_size, DMA_TO_DEVICE);
436
437 d = ctx->tail_descriptor;
438 last = ctx->tail_descriptor_last;
439
440 while (last->branch_address != 0) {
441 address = le32_to_cpu(last->branch_address);
442 z = address & 0xf;
443 d = ctx->buffer + (address - ctx->buffer_bus) / sizeof(*d);
444 last = (z == 2) ? d : d + z - 1;
445
446 if (!ctx->callback(ctx, d, last))
447 break;
448
449 ctx->tail_descriptor = d;
450 ctx->tail_descriptor_last = last;
451 }
452}
453
454static int
455context_init(struct context *ctx, struct fw_ohci *ohci,
456 size_t buffer_size, u32 regs,
457 descriptor_callback_t callback)
458{
459 ctx->ohci = ohci;
460 ctx->regs = regs;
461 ctx->buffer_size = buffer_size;
462 ctx->buffer = kmalloc(buffer_size, GFP_KERNEL);
463 if (ctx->buffer == NULL)
464 return -ENOMEM;
465
466 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
467 ctx->callback = callback;
468
469 ctx->buffer_bus =
470 dma_map_single(ohci->card.device, ctx->buffer,
471 buffer_size, DMA_TO_DEVICE);
472 if (dma_mapping_error(ctx->buffer_bus)) {
473 kfree(ctx->buffer);
474 return -ENOMEM;
475 }
476
477 ctx->head_descriptor = ctx->buffer;
478 ctx->prev_descriptor = ctx->buffer;
479 ctx->tail_descriptor = ctx->buffer;
480 ctx->tail_descriptor_last = ctx->buffer;
481
482 /*
483 * We put a dummy descriptor in the buffer that has a NULL
484 * branch address and looks like it's been sent. That way we
485 * have a descriptor to append DMA programs to. Also, the
486 * ring buffer invariant is that it always has at least one
487 * element so that head == tail means buffer full.
488 */
489
490 memset(ctx->head_descriptor, 0, sizeof(*ctx->head_descriptor));
491 ctx->head_descriptor->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
492 ctx->head_descriptor->transfer_status = cpu_to_le16(0x8011);
493 ctx->head_descriptor++;
494
495 return 0;
496}
497
498static void
499context_release(struct context *ctx)
500{
501 struct fw_card *card = &ctx->ohci->card;
502
503 dma_unmap_single(card->device, ctx->buffer_bus,
504 ctx->buffer_size, DMA_TO_DEVICE);
505 kfree(ctx->buffer);
506}
507
508static struct descriptor *
509context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
510{
511 struct descriptor *d, *tail, *end;
512
513 d = ctx->head_descriptor;
514 tail = ctx->tail_descriptor;
515 end = ctx->buffer + ctx->buffer_size / sizeof(*d);
516
517 if (d + z <= tail) {
518 goto has_space;
519 } else if (d > tail && d + z <= end) {
520 goto has_space;
521 } else if (d > tail && ctx->buffer + z <= tail) {
522 d = ctx->buffer;
523 goto has_space;
524 }
525
526 return NULL;
527
528 has_space:
529 memset(d, 0, z * sizeof(*d));
530 *d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof(*d);
531
532 return d;
533}
534
535static void context_run(struct context *ctx, u32 extra)
536{
537 struct fw_ohci *ohci = ctx->ohci;
538
539 reg_write(ohci, COMMAND_PTR(ctx->regs),
540 le32_to_cpu(ctx->tail_descriptor_last->branch_address));
541 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
542 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
543 flush_writes(ohci);
544}
545
546static void context_append(struct context *ctx,
547 struct descriptor *d, int z, int extra)
548{
549 dma_addr_t d_bus;
550
551 d_bus = ctx->buffer_bus + (d - ctx->buffer) * sizeof(*d);
552
553 ctx->head_descriptor = d + z + extra;
554 ctx->prev_descriptor->branch_address = cpu_to_le32(d_bus | z);
555 ctx->prev_descriptor = z == 2 ? d : d + z - 1;
556
557 dma_sync_single_for_device(ctx->ohci->card.device, ctx->buffer_bus,
558 ctx->buffer_size, DMA_TO_DEVICE);
559
560 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
561 flush_writes(ctx->ohci);
562}
563
564static void context_stop(struct context *ctx)
565{
566 u32 reg;
567 int i;
568
569 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
570 flush_writes(ctx->ohci);
571
572 for (i = 0; i < 10; i++) {
573 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
574 if ((reg & CONTEXT_ACTIVE) == 0)
575 break;
576
577 fw_notify("context_stop: still active (0x%08x)\n", reg);
578 msleep(1);
579 }
580}
581
582struct driver_data {
583 struct fw_packet *packet;
584};
585
586/*
587 * This function apppends a packet to the DMA queue for transmission.
588 * Must always be called with the ochi->lock held to ensure proper
589 * generation handling and locking around packet queue manipulation.
590 */
591static int
592at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
593{
594 struct fw_ohci *ohci = ctx->ohci;
595 dma_addr_t d_bus, payload_bus;
596 struct driver_data *driver_data;
597 struct descriptor *d, *last;
598 __le32 *header;
599 int z, tcode;
600 u32 reg;
601
602 d = context_get_descriptors(ctx, 4, &d_bus);
603 if (d == NULL) {
604 packet->ack = RCODE_SEND_ERROR;
605 return -1;
606 }
607
608 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
609 d[0].res_count = cpu_to_le16(packet->timestamp);
610
611 /*
612 * The DMA format for asyncronous link packets is different
613 * from the IEEE1394 layout, so shift the fields around
614 * accordingly. If header_length is 8, it's a PHY packet, to
615 * which we need to prepend an extra quadlet.
616 */
617
618 header = (__le32 *) &d[1];
619 if (packet->header_length > 8) {
620 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
621 (packet->speed << 16));
622 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
623 (packet->header[0] & 0xffff0000));
624 header[2] = cpu_to_le32(packet->header[2]);
625
626 tcode = (packet->header[0] >> 4) & 0x0f;
627 if (TCODE_IS_BLOCK_PACKET(tcode))
628 header[3] = cpu_to_le32(packet->header[3]);
629 else
630 header[3] = (__force __le32) packet->header[3];
631
632 d[0].req_count = cpu_to_le16(packet->header_length);
633 } else {
634 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
635 (packet->speed << 16));
636 header[1] = cpu_to_le32(packet->header[0]);
637 header[2] = cpu_to_le32(packet->header[1]);
638 d[0].req_count = cpu_to_le16(12);
639 }
640
641 driver_data = (struct driver_data *) &d[3];
642 driver_data->packet = packet;
643 packet->driver_data = driver_data;
644
645 if (packet->payload_length > 0) {
646 payload_bus =
647 dma_map_single(ohci->card.device, packet->payload,
648 packet->payload_length, DMA_TO_DEVICE);
649 if (dma_mapping_error(payload_bus)) {
650 packet->ack = RCODE_SEND_ERROR;
651 return -1;
652 }
653
654 d[2].req_count = cpu_to_le16(packet->payload_length);
655 d[2].data_address = cpu_to_le32(payload_bus);
656 last = &d[2];
657 z = 3;
658 } else {
659 last = &d[0];
660 z = 2;
661 }
662
663 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
664 DESCRIPTOR_IRQ_ALWAYS |
665 DESCRIPTOR_BRANCH_ALWAYS);
666
667 /* FIXME: Document how the locking works. */
668 if (ohci->generation != packet->generation) {
669 packet->ack = RCODE_GENERATION;
670 return -1;
671 }
672
673 context_append(ctx, d, z, 4 - z);
674
675 /* If the context isn't already running, start it up. */
676 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
677 if ((reg & CONTEXT_RUN) == 0)
678 context_run(ctx, 0);
679
680 return 0;
681}
682
683static int handle_at_packet(struct context *context,
684 struct descriptor *d,
685 struct descriptor *last)
686{
687 struct driver_data *driver_data;
688 struct fw_packet *packet;
689 struct fw_ohci *ohci = context->ohci;
690 dma_addr_t payload_bus;
691 int evt;
692
693 if (last->transfer_status == 0)
694 /* This descriptor isn't done yet, stop iteration. */
695 return 0;
696
697 driver_data = (struct driver_data *) &d[3];
698 packet = driver_data->packet;
699 if (packet == NULL)
700 /* This packet was cancelled, just continue. */
701 return 1;
702
703 payload_bus = le32_to_cpu(last->data_address);
704 if (payload_bus != 0)
705 dma_unmap_single(ohci->card.device, payload_bus,
706 packet->payload_length, DMA_TO_DEVICE);
707
708 evt = le16_to_cpu(last->transfer_status) & 0x1f;
709 packet->timestamp = le16_to_cpu(last->res_count);
710
711 switch (evt) {
712 case OHCI1394_evt_timeout:
713 /* Async response transmit timed out. */
714 packet->ack = RCODE_CANCELLED;
715 break;
716
717 case OHCI1394_evt_flushed:
718 /*
719 * The packet was flushed should give same error as
720 * when we try to use a stale generation count.
721 */
722 packet->ack = RCODE_GENERATION;
723 break;
724
725 case OHCI1394_evt_missing_ack:
726 /*
727 * Using a valid (current) generation count, but the
728 * node is not on the bus or not sending acks.
729 */
730 packet->ack = RCODE_NO_ACK;
731 break;
732
733 case ACK_COMPLETE + 0x10:
734 case ACK_PENDING + 0x10:
735 case ACK_BUSY_X + 0x10:
736 case ACK_BUSY_A + 0x10:
737 case ACK_BUSY_B + 0x10:
738 case ACK_DATA_ERROR + 0x10:
739 case ACK_TYPE_ERROR + 0x10:
740 packet->ack = evt - 0x10;
741 break;
742
743 default:
744 packet->ack = RCODE_SEND_ERROR;
745 break;
746 }
747
748 packet->callback(packet, &ohci->card, packet->ack);
749
750 return 1;
751}
752
753#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
754#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
755#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
756#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
757#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
758
759static void
760handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
761{
762 struct fw_packet response;
763 int tcode, length, i;
764
765 tcode = HEADER_GET_TCODE(packet->header[0]);
766 if (TCODE_IS_BLOCK_PACKET(tcode))
767 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
768 else
769 length = 4;
770
771 i = csr - CSR_CONFIG_ROM;
772 if (i + length > CONFIG_ROM_SIZE) {
773 fw_fill_response(&response, packet->header,
774 RCODE_ADDRESS_ERROR, NULL, 0);
775 } else if (!TCODE_IS_READ_REQUEST(tcode)) {
776 fw_fill_response(&response, packet->header,
777 RCODE_TYPE_ERROR, NULL, 0);
778 } else {
779 fw_fill_response(&response, packet->header, RCODE_COMPLETE,
780 (void *) ohci->config_rom + i, length);
781 }
782
783 fw_core_handle_response(&ohci->card, &response);
784}
785
786static void
787handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
788{
789 struct fw_packet response;
790 int tcode, length, ext_tcode, sel;
791 __be32 *payload, lock_old;
792 u32 lock_arg, lock_data;
793
794 tcode = HEADER_GET_TCODE(packet->header[0]);
795 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
796 payload = packet->payload;
797 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
798
799 if (tcode == TCODE_LOCK_REQUEST &&
800 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
801 lock_arg = be32_to_cpu(payload[0]);
802 lock_data = be32_to_cpu(payload[1]);
803 } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
804 lock_arg = 0;
805 lock_data = 0;
806 } else {
807 fw_fill_response(&response, packet->header,
808 RCODE_TYPE_ERROR, NULL, 0);
809 goto out;
810 }
811
812 sel = (csr - CSR_BUS_MANAGER_ID) / 4;
813 reg_write(ohci, OHCI1394_CSRData, lock_data);
814 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
815 reg_write(ohci, OHCI1394_CSRControl, sel);
816
817 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
818 lock_old = cpu_to_be32(reg_read(ohci, OHCI1394_CSRData));
819 else
820 fw_notify("swap not done yet\n");
821
822 fw_fill_response(&response, packet->header,
823 RCODE_COMPLETE, &lock_old, sizeof(lock_old));
824 out:
825 fw_core_handle_response(&ohci->card, &response);
826}
827
828static void
829handle_local_request(struct context *ctx, struct fw_packet *packet)
830{
831 u64 offset;
832 u32 csr;
833
834 if (ctx == &ctx->ohci->at_request_ctx) {
835 packet->ack = ACK_PENDING;
836 packet->callback(packet, &ctx->ohci->card, packet->ack);
837 }
838
839 offset =
840 ((unsigned long long)
841 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
842 packet->header[2];
843 csr = offset - CSR_REGISTER_BASE;
844
845 /* Handle config rom reads. */
846 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
847 handle_local_rom(ctx->ohci, packet, csr);
848 else switch (csr) {
849 case CSR_BUS_MANAGER_ID:
850 case CSR_BANDWIDTH_AVAILABLE:
851 case CSR_CHANNELS_AVAILABLE_HI:
852 case CSR_CHANNELS_AVAILABLE_LO:
853 handle_local_lock(ctx->ohci, packet, csr);
854 break;
855 default:
856 if (ctx == &ctx->ohci->at_request_ctx)
857 fw_core_handle_request(&ctx->ohci->card, packet);
858 else
859 fw_core_handle_response(&ctx->ohci->card, packet);
860 break;
861 }
862
863 if (ctx == &ctx->ohci->at_response_ctx) {
864 packet->ack = ACK_COMPLETE;
865 packet->callback(packet, &ctx->ohci->card, packet->ack);
866 }
867}
868
869static void
870at_context_transmit(struct context *ctx, struct fw_packet *packet)
871{
872 unsigned long flags;
873 int retval;
874
875 spin_lock_irqsave(&ctx->ohci->lock, flags);
876
877 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
878 ctx->ohci->generation == packet->generation) {
879 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
880 handle_local_request(ctx, packet);
881 return;
882 }
883
884 retval = at_context_queue_packet(ctx, packet);
885 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
886
887 if (retval < 0)
888 packet->callback(packet, &ctx->ohci->card, packet->ack);
889
890}
891
892static void bus_reset_tasklet(unsigned long data)
893{
894 struct fw_ohci *ohci = (struct fw_ohci *)data;
895 int self_id_count, i, j, reg;
896 int generation, new_generation;
897 unsigned long flags;
898
899 reg = reg_read(ohci, OHCI1394_NodeID);
900 if (!(reg & OHCI1394_NodeID_idValid)) {
901 fw_error("node ID not valid, new bus reset in progress\n");
902 return;
903 }
904 ohci->node_id = reg & 0xffff;
905
906 /*
907 * The count in the SelfIDCount register is the number of
908 * bytes in the self ID receive buffer. Since we also receive
909 * the inverted quadlets and a header quadlet, we shift one
910 * bit extra to get the actual number of self IDs.
911 */
912
913 self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff;
914 generation = (le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
915
916 for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
917 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1])
918 fw_error("inconsistent self IDs\n");
919 ohci->self_id_buffer[j] = le32_to_cpu(ohci->self_id_cpu[i]);
920 }
921
922 /*
923 * Check the consistency of the self IDs we just read. The
924 * problem we face is that a new bus reset can start while we
925 * read out the self IDs from the DMA buffer. If this happens,
926 * the DMA buffer will be overwritten with new self IDs and we
927 * will read out inconsistent data. The OHCI specification
928 * (section 11.2) recommends a technique similar to
929 * linux/seqlock.h, where we remember the generation of the
930 * self IDs in the buffer before reading them out and compare
931 * it to the current generation after reading them out. If
932 * the two generations match we know we have a consistent set
933 * of self IDs.
934 */
935
936 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
937 if (new_generation != generation) {
938 fw_notify("recursive bus reset detected, "
939 "discarding self ids\n");
940 return;
941 }
942
943 /* FIXME: Document how the locking works. */
944 spin_lock_irqsave(&ohci->lock, flags);
945
946 ohci->generation = generation;
947 context_stop(&ohci->at_request_ctx);
948 context_stop(&ohci->at_response_ctx);
949 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
950
951 /*
952 * This next bit is unrelated to the AT context stuff but we
953 * have to do it under the spinlock also. If a new config rom
954 * was set up before this reset, the old one is now no longer
955 * in use and we can free it. Update the config rom pointers
956 * to point to the current config rom and clear the
957 * next_config_rom pointer so a new udpate can take place.
958 */
959
960 if (ohci->next_config_rom != NULL) {
961 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
962 ohci->config_rom, ohci->config_rom_bus);
963 ohci->config_rom = ohci->next_config_rom;
964 ohci->config_rom_bus = ohci->next_config_rom_bus;
965 ohci->next_config_rom = NULL;
966
967 /*
968 * Restore config_rom image and manually update
969 * config_rom registers. Writing the header quadlet
970 * will indicate that the config rom is ready, so we
971 * do that last.
972 */
973 reg_write(ohci, OHCI1394_BusOptions,
974 be32_to_cpu(ohci->config_rom[2]));
975 ohci->config_rom[0] = cpu_to_be32(ohci->next_header);
976 reg_write(ohci, OHCI1394_ConfigROMhdr, ohci->next_header);
977 }
978
979 spin_unlock_irqrestore(&ohci->lock, flags);
980
981 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
982 self_id_count, ohci->self_id_buffer);
983}
984
985static irqreturn_t irq_handler(int irq, void *data)
986{
987 struct fw_ohci *ohci = data;
988 u32 event, iso_event, cycle_time;
989 int i;
990
991 event = reg_read(ohci, OHCI1394_IntEventClear);
992
993 if (!event)
994 return IRQ_NONE;
995
996 reg_write(ohci, OHCI1394_IntEventClear, event);
997
998 if (event & OHCI1394_selfIDComplete)
999 tasklet_schedule(&ohci->bus_reset_tasklet);
1000
1001 if (event & OHCI1394_RQPkt)
1002 tasklet_schedule(&ohci->ar_request_ctx.tasklet);
1003
1004 if (event & OHCI1394_RSPkt)
1005 tasklet_schedule(&ohci->ar_response_ctx.tasklet);
1006
1007 if (event & OHCI1394_reqTxComplete)
1008 tasklet_schedule(&ohci->at_request_ctx.tasklet);
1009
1010 if (event & OHCI1394_respTxComplete)
1011 tasklet_schedule(&ohci->at_response_ctx.tasklet);
1012
1013 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
1014 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
1015
1016 while (iso_event) {
1017 i = ffs(iso_event) - 1;
1018 tasklet_schedule(&ohci->ir_context_list[i].context.tasklet);
1019 iso_event &= ~(1 << i);
1020 }
1021
1022 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
1023 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
1024
1025 while (iso_event) {
1026 i = ffs(iso_event) - 1;
1027 tasklet_schedule(&ohci->it_context_list[i].context.tasklet);
1028 iso_event &= ~(1 << i);
1029 }
1030
1031 if (event & OHCI1394_cycle64Seconds) {
1032 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1033 if ((cycle_time & 0x80000000) == 0)
1034 ohci->bus_seconds++;
1035 }
1036
1037 return IRQ_HANDLED;
1038}
1039
1040static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
1041{
1042 struct fw_ohci *ohci = fw_ohci(card);
1043 struct pci_dev *dev = to_pci_dev(card->device);
1044
1045 /*
1046 * When the link is not yet enabled, the atomic config rom
1047 * update mechanism described below in ohci_set_config_rom()
1048 * is not active. We have to update ConfigRomHeader and
1049 * BusOptions manually, and the write to ConfigROMmap takes
1050 * effect immediately. We tie this to the enabling of the
1051 * link, so we have a valid config rom before enabling - the
1052 * OHCI requires that ConfigROMhdr and BusOptions have valid
1053 * values before enabling.
1054 *
1055 * However, when the ConfigROMmap is written, some controllers
1056 * always read back quadlets 0 and 2 from the config rom to
1057 * the ConfigRomHeader and BusOptions registers on bus reset.
1058 * They shouldn't do that in this initial case where the link
1059 * isn't enabled. This means we have to use the same
1060 * workaround here, setting the bus header to 0 and then write
1061 * the right values in the bus reset tasklet.
1062 */
1063
1064 ohci->next_config_rom =
1065 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1066 &ohci->next_config_rom_bus, GFP_KERNEL);
1067 if (ohci->next_config_rom == NULL)
1068 return -ENOMEM;
1069
1070 memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
1071 fw_memcpy_to_be32(ohci->next_config_rom, config_rom, length * 4);
1072
1073 ohci->next_header = config_rom[0];
1074 ohci->next_config_rom[0] = 0;
1075 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
1076 reg_write(ohci, OHCI1394_BusOptions, config_rom[2]);
1077 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
1078
1079 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
1080
1081 if (request_irq(dev->irq, irq_handler,
1082 IRQF_SHARED, ohci_driver_name, ohci)) {
1083 fw_error("Failed to allocate shared interrupt %d.\n",
1084 dev->irq);
1085 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1086 ohci->config_rom, ohci->config_rom_bus);
1087 return -EIO;
1088 }
1089
1090 reg_write(ohci, OHCI1394_HCControlSet,
1091 OHCI1394_HCControl_linkEnable |
1092 OHCI1394_HCControl_BIBimageValid);
1093 flush_writes(ohci);
1094
1095 /*
1096 * We are ready to go, initiate bus reset to finish the
1097 * initialization.
1098 */
1099
1100 fw_core_initiate_bus_reset(&ohci->card, 1);
1101
1102 return 0;
1103}
1104
1105static int
1106ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
1107{
1108 struct fw_ohci *ohci;
1109 unsigned long flags;
1110 int retval = 0;
1111 __be32 *next_config_rom;
1112 dma_addr_t next_config_rom_bus;
1113
1114 ohci = fw_ohci(card);
1115
1116 /*
1117 * When the OHCI controller is enabled, the config rom update
1118 * mechanism is a bit tricky, but easy enough to use. See
1119 * section 5.5.6 in the OHCI specification.
1120 *
1121 * The OHCI controller caches the new config rom address in a
1122 * shadow register (ConfigROMmapNext) and needs a bus reset
1123 * for the changes to take place. When the bus reset is
1124 * detected, the controller loads the new values for the
1125 * ConfigRomHeader and BusOptions registers from the specified
1126 * config rom and loads ConfigROMmap from the ConfigROMmapNext
1127 * shadow register. All automatically and atomically.
1128 *
1129 * Now, there's a twist to this story. The automatic load of
1130 * ConfigRomHeader and BusOptions doesn't honor the
1131 * noByteSwapData bit, so with a be32 config rom, the
1132 * controller will load be32 values in to these registers
1133 * during the atomic update, even on litte endian
1134 * architectures. The workaround we use is to put a 0 in the
1135 * header quadlet; 0 is endian agnostic and means that the
1136 * config rom isn't ready yet. In the bus reset tasklet we
1137 * then set up the real values for the two registers.
1138 *
1139 * We use ohci->lock to avoid racing with the code that sets
1140 * ohci->next_config_rom to NULL (see bus_reset_tasklet).
1141 */
1142
1143 next_config_rom =
1144 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1145 &next_config_rom_bus, GFP_KERNEL);
1146 if (next_config_rom == NULL)
1147 return -ENOMEM;
1148
1149 spin_lock_irqsave(&ohci->lock, flags);
1150
1151 if (ohci->next_config_rom == NULL) {
1152 ohci->next_config_rom = next_config_rom;
1153 ohci->next_config_rom_bus = next_config_rom_bus;
1154
1155 memset(ohci->next_config_rom, 0, CONFIG_ROM_SIZE);
1156 fw_memcpy_to_be32(ohci->next_config_rom, config_rom,
1157 length * 4);
1158
1159 ohci->next_header = config_rom[0];
1160 ohci->next_config_rom[0] = 0;
1161
1162 reg_write(ohci, OHCI1394_ConfigROMmap,
1163 ohci->next_config_rom_bus);
1164 } else {
1165 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1166 next_config_rom, next_config_rom_bus);
1167 retval = -EBUSY;
1168 }
1169
1170 spin_unlock_irqrestore(&ohci->lock, flags);
1171
1172 /*
1173 * Now initiate a bus reset to have the changes take
1174 * effect. We clean up the old config rom memory and DMA
1175 * mappings in the bus reset tasklet, since the OHCI
1176 * controller could need to access it before the bus reset
1177 * takes effect.
1178 */
1179 if (retval == 0)
1180 fw_core_initiate_bus_reset(&ohci->card, 1);
1181
1182 return retval;
1183}
1184
1185static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
1186{
1187 struct fw_ohci *ohci = fw_ohci(card);
1188
1189 at_context_transmit(&ohci->at_request_ctx, packet);
1190}
1191
1192static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
1193{
1194 struct fw_ohci *ohci = fw_ohci(card);
1195
1196 at_context_transmit(&ohci->at_response_ctx, packet);
1197}
1198
1199static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
1200{
1201 struct fw_ohci *ohci = fw_ohci(card);
1202 struct context *ctx = &ohci->at_request_ctx;
1203 struct driver_data *driver_data = packet->driver_data;
1204 int retval = -ENOENT;
1205
1206 tasklet_disable(&ctx->tasklet);
1207
1208 if (packet->ack != 0)
1209 goto out;
1210
1211 driver_data->packet = NULL;
1212 packet->ack = RCODE_CANCELLED;
1213 packet->callback(packet, &ohci->card, packet->ack);
1214 retval = 0;
1215
1216 out:
1217 tasklet_enable(&ctx->tasklet);
1218
1219 return retval;
1220}
1221
1222static int
1223ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
1224{
1225 struct fw_ohci *ohci = fw_ohci(card);
1226 unsigned long flags;
1227 int n, retval = 0;
1228
1229 /*
1230 * FIXME: Make sure this bitmask is cleared when we clear the busReset
1231 * interrupt bit. Clear physReqResourceAllBuses on bus reset.
1232 */
1233
1234 spin_lock_irqsave(&ohci->lock, flags);
1235
1236 if (ohci->generation != generation) {
1237 retval = -ESTALE;
1238 goto out;
1239 }
1240
1241 /*
1242 * Note, if the node ID contains a non-local bus ID, physical DMA is
1243 * enabled for _all_ nodes on remote buses.
1244 */
1245
1246 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
1247 if (n < 32)
1248 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
1249 else
1250 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
1251
1252 flush_writes(ohci);
1253 out:
1254 spin_unlock_irqrestore(&ohci->lock, flags);
1255 return retval;
1256}
1257
1258static u64
1259ohci_get_bus_time(struct fw_card *card)
1260{
1261 struct fw_ohci *ohci = fw_ohci(card);
1262 u32 cycle_time;
1263 u64 bus_time;
1264
1265 cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1266 bus_time = ((u64) ohci->bus_seconds << 32) | cycle_time;
1267
1268 return bus_time;
1269}
1270
1271static int handle_ir_dualbuffer_packet(struct context *context,
1272 struct descriptor *d,
1273 struct descriptor *last)
1274{
1275 struct iso_context *ctx =
1276 container_of(context, struct iso_context, context);
1277 struct db_descriptor *db = (struct db_descriptor *) d;
1278 __le32 *ir_header;
1279 size_t header_length;
1280 void *p, *end;
1281 int i;
1282
1283 if (db->first_res_count > 0 && db->second_res_count > 0)
1284 /* This descriptor isn't done yet, stop iteration. */
1285 return 0;
1286
1287 header_length = le16_to_cpu(db->first_req_count) -
1288 le16_to_cpu(db->first_res_count);
1289
1290 i = ctx->header_length;
1291 p = db + 1;
1292 end = p + header_length;
1293 while (p < end && i + ctx->base.header_size <= PAGE_SIZE) {
1294 /*
1295 * The iso header is byteswapped to little endian by
1296 * the controller, but the remaining header quadlets
1297 * are big endian. We want to present all the headers
1298 * as big endian, so we have to swap the first
1299 * quadlet.
1300 */
1301 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
1302 memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
1303 i += ctx->base.header_size;
1304 p += ctx->base.header_size + 4;
1305 }
1306
1307 ctx->header_length = i;
1308
1309 if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
1310 ir_header = (__le32 *) (db + 1);
1311 ctx->base.callback(&ctx->base,
1312 le32_to_cpu(ir_header[0]) & 0xffff,
1313 ctx->header_length, ctx->header,
1314 ctx->base.callback_data);
1315 ctx->header_length = 0;
1316 }
1317
1318 return 1;
1319}
1320
1321static int handle_it_packet(struct context *context,
1322 struct descriptor *d,
1323 struct descriptor *last)
1324{
1325 struct iso_context *ctx =
1326 container_of(context, struct iso_context, context);
1327
1328 if (last->transfer_status == 0)
1329 /* This descriptor isn't done yet, stop iteration. */
1330 return 0;
1331
1332 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
1333 ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
1334 0, NULL, ctx->base.callback_data);
1335
1336 return 1;
1337}
1338
1339static struct fw_iso_context *
1340ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
1341{
1342 struct fw_ohci *ohci = fw_ohci(card);
1343 struct iso_context *ctx, *list;
1344 descriptor_callback_t callback;
1345 u32 *mask, regs;
1346 unsigned long flags;
1347 int index, retval = -ENOMEM;
1348
1349 if (type == FW_ISO_CONTEXT_TRANSMIT) {
1350 mask = &ohci->it_context_mask;
1351 list = ohci->it_context_list;
1352 callback = handle_it_packet;
1353 } else {
1354 mask = &ohci->ir_context_mask;
1355 list = ohci->ir_context_list;
1356 callback = handle_ir_dualbuffer_packet;
1357 }
1358
1359 /* FIXME: We need a fallback for pre 1.1 OHCI. */
1360 if (callback == handle_ir_dualbuffer_packet &&
1361 ohci->version < OHCI_VERSION_1_1)
1362 return ERR_PTR(-EINVAL);
1363
1364 spin_lock_irqsave(&ohci->lock, flags);
1365 index = ffs(*mask) - 1;
1366 if (index >= 0)
1367 *mask &= ~(1 << index);
1368 spin_unlock_irqrestore(&ohci->lock, flags);
1369
1370 if (index < 0)
1371 return ERR_PTR(-EBUSY);
1372
1373 if (type == FW_ISO_CONTEXT_TRANSMIT)
1374 regs = OHCI1394_IsoXmitContextBase(index);
1375 else
1376 regs = OHCI1394_IsoRcvContextBase(index);
1377
1378 ctx = &list[index];
1379 memset(ctx, 0, sizeof(*ctx));
1380 ctx->header_length = 0;
1381 ctx->header = (void *) __get_free_page(GFP_KERNEL);
1382 if (ctx->header == NULL)
1383 goto out;
1384
1385 retval = context_init(&ctx->context, ohci, ISO_BUFFER_SIZE,
1386 regs, callback);
1387 if (retval < 0)
1388 goto out_with_header;
1389
1390 return &ctx->base;
1391
1392 out_with_header:
1393 free_page((unsigned long)ctx->header);
1394 out:
1395 spin_lock_irqsave(&ohci->lock, flags);
1396 *mask |= 1 << index;
1397 spin_unlock_irqrestore(&ohci->lock, flags);
1398
1399 return ERR_PTR(retval);
1400}
1401
1402static int ohci_start_iso(struct fw_iso_context *base,
1403 s32 cycle, u32 sync, u32 tags)
1404{
1405 struct iso_context *ctx = container_of(base, struct iso_context, base);
1406 struct fw_ohci *ohci = ctx->context.ohci;
1407 u32 control, match;
1408 int index;
1409
1410 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1411 index = ctx - ohci->it_context_list;
1412 match = 0;
1413 if (cycle >= 0)
1414 match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
1415 (cycle & 0x7fff) << 16;
1416
1417 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
1418 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
1419 context_run(&ctx->context, match);
1420 } else {
1421 index = ctx - ohci->ir_context_list;
1422 control = IR_CONTEXT_DUAL_BUFFER_MODE | IR_CONTEXT_ISOCH_HEADER;
1423 match = (tags << 28) | (sync << 8) | ctx->base.channel;
1424 if (cycle >= 0) {
1425 match |= (cycle & 0x07fff) << 12;
1426 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
1427 }
1428
1429 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
1430 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
1431 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
1432 context_run(&ctx->context, control);
1433 }
1434
1435 return 0;
1436}
1437
1438static int ohci_stop_iso(struct fw_iso_context *base)
1439{
1440 struct fw_ohci *ohci = fw_ohci(base->card);
1441 struct iso_context *ctx = container_of(base, struct iso_context, base);
1442 int index;
1443
1444 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1445 index = ctx - ohci->it_context_list;
1446 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
1447 } else {
1448 index = ctx - ohci->ir_context_list;
1449 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
1450 }
1451 flush_writes(ohci);
1452 context_stop(&ctx->context);
1453
1454 return 0;
1455}
1456
1457static void ohci_free_iso_context(struct fw_iso_context *base)
1458{
1459 struct fw_ohci *ohci = fw_ohci(base->card);
1460 struct iso_context *ctx = container_of(base, struct iso_context, base);
1461 unsigned long flags;
1462 int index;
1463
1464 ohci_stop_iso(base);
1465 context_release(&ctx->context);
1466 free_page((unsigned long)ctx->header);
1467
1468 spin_lock_irqsave(&ohci->lock, flags);
1469
1470 if (ctx->base.type == FW_ISO_CONTEXT_TRANSMIT) {
1471 index = ctx - ohci->it_context_list;
1472 ohci->it_context_mask |= 1 << index;
1473 } else {
1474 index = ctx - ohci->ir_context_list;
1475 ohci->ir_context_mask |= 1 << index;
1476 }
1477
1478 spin_unlock_irqrestore(&ohci->lock, flags);
1479}
1480
1481static int
1482ohci_queue_iso_transmit(struct fw_iso_context *base,
1483 struct fw_iso_packet *packet,
1484 struct fw_iso_buffer *buffer,
1485 unsigned long payload)
1486{
1487 struct iso_context *ctx = container_of(base, struct iso_context, base);
1488 struct descriptor *d, *last, *pd;
1489 struct fw_iso_packet *p;
1490 __le32 *header;
1491 dma_addr_t d_bus, page_bus;
1492 u32 z, header_z, payload_z, irq;
1493 u32 payload_index, payload_end_index, next_page_index;
1494 int page, end_page, i, length, offset;
1495
1496 /*
1497 * FIXME: Cycle lost behavior should be configurable: lose
1498 * packet, retransmit or terminate..
1499 */
1500
1501 p = packet;
1502 payload_index = payload;
1503
1504 if (p->skip)
1505 z = 1;
1506 else
1507 z = 2;
1508 if (p->header_length > 0)
1509 z++;
1510
1511 /* Determine the first page the payload isn't contained in. */
1512 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
1513 if (p->payload_length > 0)
1514 payload_z = end_page - (payload_index >> PAGE_SHIFT);
1515 else
1516 payload_z = 0;
1517
1518 z += payload_z;
1519
1520 /* Get header size in number of descriptors. */
1521 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
1522
1523 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
1524 if (d == NULL)
1525 return -ENOMEM;
1526
1527 if (!p->skip) {
1528 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
1529 d[0].req_count = cpu_to_le16(8);
1530
1531 header = (__le32 *) &d[1];
1532 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
1533 IT_HEADER_TAG(p->tag) |
1534 IT_HEADER_TCODE(TCODE_STREAM_DATA) |
1535 IT_HEADER_CHANNEL(ctx->base.channel) |
1536 IT_HEADER_SPEED(ctx->base.speed));
1537 header[1] =
1538 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
1539 p->payload_length));
1540 }
1541
1542 if (p->header_length > 0) {
1543 d[2].req_count = cpu_to_le16(p->header_length);
1544 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
1545 memcpy(&d[z], p->header, p->header_length);
1546 }
1547
1548 pd = d + z - payload_z;
1549 payload_end_index = payload_index + p->payload_length;
1550 for (i = 0; i < payload_z; i++) {
1551 page = payload_index >> PAGE_SHIFT;
1552 offset = payload_index & ~PAGE_MASK;
1553 next_page_index = (page + 1) << PAGE_SHIFT;
1554 length =
1555 min(next_page_index, payload_end_index) - payload_index;
1556 pd[i].req_count = cpu_to_le16(length);
1557
1558 page_bus = page_private(buffer->pages[page]);
1559 pd[i].data_address = cpu_to_le32(page_bus + offset);
1560
1561 payload_index += length;
1562 }
1563
1564 if (p->interrupt)
1565 irq = DESCRIPTOR_IRQ_ALWAYS;
1566 else
1567 irq = DESCRIPTOR_NO_IRQ;
1568
1569 last = z == 2 ? d : d + z - 1;
1570 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
1571 DESCRIPTOR_STATUS |
1572 DESCRIPTOR_BRANCH_ALWAYS |
1573 irq);
1574
1575 context_append(&ctx->context, d, z, header_z);
1576
1577 return 0;
1578}
1579
1580static int
1581ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
1582 struct fw_iso_packet *packet,
1583 struct fw_iso_buffer *buffer,
1584 unsigned long payload)
1585{
1586 struct iso_context *ctx = container_of(base, struct iso_context, base);
1587 struct db_descriptor *db = NULL;
1588 struct descriptor *d;
1589 struct fw_iso_packet *p;
1590 dma_addr_t d_bus, page_bus;
1591 u32 z, header_z, length, rest;
1592 int page, offset, packet_count, header_size;
1593
1594 /*
1595 * FIXME: Cycle lost behavior should be configurable: lose
1596 * packet, retransmit or terminate..
1597 */
1598
1599 if (packet->skip) {
1600 d = context_get_descriptors(&ctx->context, 2, &d_bus);
1601 if (d == NULL)
1602 return -ENOMEM;
1603
1604 db = (struct db_descriptor *) d;
1605 db->control = cpu_to_le16(DESCRIPTOR_STATUS |
1606 DESCRIPTOR_BRANCH_ALWAYS |
1607 DESCRIPTOR_WAIT);
1608 db->first_size = cpu_to_le16(ctx->base.header_size + 4);
1609 context_append(&ctx->context, d, 2, 0);
1610 }
1611
1612 p = packet;
1613 z = 2;
1614
1615 /*
1616 * The OHCI controller puts the status word in the header
1617 * buffer too, so we need 4 extra bytes per packet.
1618 */
1619 packet_count = p->header_length / ctx->base.header_size;
1620 header_size = packet_count * (ctx->base.header_size + 4);
1621
1622 /* Get header size in number of descriptors. */
1623 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
1624 page = payload >> PAGE_SHIFT;
1625 offset = payload & ~PAGE_MASK;
1626 rest = p->payload_length;
1627
1628 /* FIXME: OHCI 1.0 doesn't support dual buffer receive */
1629 /* FIXME: make packet-per-buffer/dual-buffer a context option */
1630 while (rest > 0) {
1631 d = context_get_descriptors(&ctx->context,
1632 z + header_z, &d_bus);
1633 if (d == NULL)
1634 return -ENOMEM;
1635
1636 db = (struct db_descriptor *) d;
1637 db->control = cpu_to_le16(DESCRIPTOR_STATUS |
1638 DESCRIPTOR_BRANCH_ALWAYS);
1639 db->first_size = cpu_to_le16(ctx->base.header_size + 4);
1640 db->first_req_count = cpu_to_le16(header_size);
1641 db->first_res_count = db->first_req_count;
1642 db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
1643
1644 if (offset + rest < PAGE_SIZE)
1645 length = rest;
1646 else
1647 length = PAGE_SIZE - offset;
1648
1649 db->second_req_count = cpu_to_le16(length);
1650 db->second_res_count = db->second_req_count;
1651 page_bus = page_private(buffer->pages[page]);
1652 db->second_buffer = cpu_to_le32(page_bus + offset);
1653
1654 if (p->interrupt && length == rest)
1655 db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
1656
1657 context_append(&ctx->context, d, z, header_z);
1658 offset = (offset + length) & ~PAGE_MASK;
1659 rest -= length;
1660 page++;
1661 }
1662
1663 return 0;
1664}
1665
1666static int
1667ohci_queue_iso(struct fw_iso_context *base,
1668 struct fw_iso_packet *packet,
1669 struct fw_iso_buffer *buffer,
1670 unsigned long payload)
1671{
1672 struct iso_context *ctx = container_of(base, struct iso_context, base);
1673
1674 if (base->type == FW_ISO_CONTEXT_TRANSMIT)
1675 return ohci_queue_iso_transmit(base, packet, buffer, payload);
1676 else if (ctx->context.ohci->version >= OHCI_VERSION_1_1)
1677 return ohci_queue_iso_receive_dualbuffer(base, packet,
1678 buffer, payload);
1679 else
1680 /* FIXME: Implement fallback for OHCI 1.0 controllers. */
1681 return -EINVAL;
1682}
1683
1684static const struct fw_card_driver ohci_driver = {
1685 .name = ohci_driver_name,
1686 .enable = ohci_enable,
1687 .update_phy_reg = ohci_update_phy_reg,
1688 .set_config_rom = ohci_set_config_rom,
1689 .send_request = ohci_send_request,
1690 .send_response = ohci_send_response,
1691 .cancel_packet = ohci_cancel_packet,
1692 .enable_phys_dma = ohci_enable_phys_dma,
1693 .get_bus_time = ohci_get_bus_time,
1694
1695 .allocate_iso_context = ohci_allocate_iso_context,
1696 .free_iso_context = ohci_free_iso_context,
1697 .queue_iso = ohci_queue_iso,
1698 .start_iso = ohci_start_iso,
1699 .stop_iso = ohci_stop_iso,
1700};
1701
1702static int software_reset(struct fw_ohci *ohci)
1703{
1704 int i;
1705
1706 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
1707
1708 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
1709 if ((reg_read(ohci, OHCI1394_HCControlSet) &
1710 OHCI1394_HCControl_softReset) == 0)
1711 return 0;
1712 msleep(1);
1713 }
1714
1715 return -EBUSY;
1716}
1717
1718static int __devinit
1719pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
1720{
1721 struct fw_ohci *ohci;
1722 u32 bus_options, max_receive, link_speed;
1723 u64 guid;
1724 int err;
1725 size_t size;
1726
1727 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
1728 if (ohci == NULL) {
1729 fw_error("Could not malloc fw_ohci data.\n");
1730 return -ENOMEM;
1731 }
1732
1733 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
1734
1735 err = pci_enable_device(dev);
1736 if (err) {
1737 fw_error("Failed to enable OHCI hardware.\n");
1738 goto fail_put_card;
1739 }
1740
1741 pci_set_master(dev);
1742 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
1743 pci_set_drvdata(dev, ohci);
1744
1745 spin_lock_init(&ohci->lock);
1746
1747 tasklet_init(&ohci->bus_reset_tasklet,
1748 bus_reset_tasklet, (unsigned long)ohci);
1749
1750 err = pci_request_region(dev, 0, ohci_driver_name);
1751 if (err) {
1752 fw_error("MMIO resource unavailable\n");
1753 goto fail_disable;
1754 }
1755
1756 ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
1757 if (ohci->registers == NULL) {
1758 fw_error("Failed to remap registers\n");
1759 err = -ENXIO;
1760 goto fail_iomem;
1761 }
1762
1763 if (software_reset(ohci)) {
1764 fw_error("Failed to reset ohci card.\n");
1765 err = -EBUSY;
1766 goto fail_registers;
1767 }
1768
1769 /*
1770 * Now enable LPS, which we need in order to start accessing
1771 * most of the registers. In fact, on some cards (ALI M5251),
1772 * accessing registers in the SClk domain without LPS enabled
1773 * will lock up the machine. Wait 50msec to make sure we have
1774 * full link enabled.
1775 */
1776 reg_write(ohci, OHCI1394_HCControlSet,
1777 OHCI1394_HCControl_LPS |
1778 OHCI1394_HCControl_postedWriteEnable);
1779 flush_writes(ohci);
1780 msleep(50);
1781
1782 reg_write(ohci, OHCI1394_HCControlClear,
1783 OHCI1394_HCControl_noByteSwapData);
1784
1785 reg_write(ohci, OHCI1394_LinkControlSet,
1786 OHCI1394_LinkControl_rcvSelfID |
1787 OHCI1394_LinkControl_cycleTimerEnable |
1788 OHCI1394_LinkControl_cycleMaster);
1789
1790 ar_context_init(&ohci->ar_request_ctx, ohci,
1791 OHCI1394_AsReqRcvContextControlSet);
1792
1793 ar_context_init(&ohci->ar_response_ctx, ohci,
1794 OHCI1394_AsRspRcvContextControlSet);
1795
1796 context_init(&ohci->at_request_ctx, ohci, AT_BUFFER_SIZE,
1797 OHCI1394_AsReqTrContextControlSet, handle_at_packet);
1798
1799 context_init(&ohci->at_response_ctx, ohci, AT_BUFFER_SIZE,
1800 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
1801
1802 reg_write(ohci, OHCI1394_ATRetries,
1803 OHCI1394_MAX_AT_REQ_RETRIES |
1804 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
1805 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8));
1806
1807 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
1808 ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
1809 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
1810 size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask);
1811 ohci->it_context_list = kzalloc(size, GFP_KERNEL);
1812
1813 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
1814 ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
1815 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
1816 size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
1817 ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
1818
1819 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
1820 fw_error("Out of memory for it/ir contexts.\n");
1821 err = -ENOMEM;
1822 goto fail_registers;
1823 }
1824
1825 /* self-id dma buffer allocation */
1826 ohci->self_id_cpu = dma_alloc_coherent(ohci->card.device,
1827 SELF_ID_BUF_SIZE,
1828 &ohci->self_id_bus,
1829 GFP_KERNEL);
1830 if (ohci->self_id_cpu == NULL) {
1831 fw_error("Out of memory for self ID buffer.\n");
1832 err = -ENOMEM;
1833 goto fail_registers;
1834 }
1835
1836 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
1837 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
1838 reg_write(ohci, OHCI1394_IntEventClear, ~0);
1839 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
1840 reg_write(ohci, OHCI1394_IntMaskSet,
1841 OHCI1394_selfIDComplete |
1842 OHCI1394_RQPkt | OHCI1394_RSPkt |
1843 OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
1844 OHCI1394_isochRx | OHCI1394_isochTx |
1845 OHCI1394_masterIntEnable |
1846 OHCI1394_cycle64Seconds);
1847
1848 bus_options = reg_read(ohci, OHCI1394_BusOptions);
1849 max_receive = (bus_options >> 12) & 0xf;
1850 link_speed = bus_options & 0x7;
1851 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
1852 reg_read(ohci, OHCI1394_GUIDLo);
1853
1854 err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
1855 if (err < 0)
1856 goto fail_self_id;
1857
1858 ohci->version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
1859 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
1860 dev->dev.bus_id, ohci->version >> 16, ohci->version & 0xff);
1861
1862 return 0;
1863
1864 fail_self_id:
1865 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
1866 ohci->self_id_cpu, ohci->self_id_bus);
1867 fail_registers:
1868 kfree(ohci->it_context_list);
1869 kfree(ohci->ir_context_list);
1870 pci_iounmap(dev, ohci->registers);
1871 fail_iomem:
1872 pci_release_region(dev, 0);
1873 fail_disable:
1874 pci_disable_device(dev);
1875 fail_put_card:
1876 fw_card_put(&ohci->card);
1877
1878 return err;
1879}
1880
1881static void pci_remove(struct pci_dev *dev)
1882{
1883 struct fw_ohci *ohci;
1884
1885 ohci = pci_get_drvdata(dev);
1886 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
1887 flush_writes(ohci);
1888 fw_core_remove_card(&ohci->card);
1889
1890 /*
1891 * FIXME: Fail all pending packets here, now that the upper
1892 * layers can't queue any more.
1893 */
1894
1895 software_reset(ohci);
1896 free_irq(dev->irq, ohci);
1897 dma_free_coherent(ohci->card.device, SELF_ID_BUF_SIZE,
1898 ohci->self_id_cpu, ohci->self_id_bus);
1899 kfree(ohci->it_context_list);
1900 kfree(ohci->ir_context_list);
1901 pci_iounmap(dev, ohci->registers);
1902 pci_release_region(dev, 0);
1903 pci_disable_device(dev);
1904 fw_card_put(&ohci->card);
1905
1906 fw_notify("Removed fw-ohci device.\n");
1907}
1908
1909static struct pci_device_id pci_table[] = {
1910 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
1911 { }
1912};
1913
1914MODULE_DEVICE_TABLE(pci, pci_table);
1915
1916static struct pci_driver fw_ohci_pci_driver = {
1917 .name = ohci_driver_name,
1918 .id_table = pci_table,
1919 .probe = pci_probe,
1920 .remove = pci_remove,
1921};
1922
1923MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
1924MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
1925MODULE_LICENSE("GPL");
1926
1927/* Provide a module alias so root-on-sbp2 initrds don't break. */
1928#ifndef CONFIG_IEEE1394_OHCI1394_MODULE
1929MODULE_ALIAS("ohci1394");
1930#endif
1931
1932static int __init fw_ohci_init(void)
1933{
1934 return pci_register_driver(&fw_ohci_pci_driver);
1935}
1936
1937static void __exit fw_ohci_cleanup(void)
1938{
1939 pci_unregister_driver(&fw_ohci_pci_driver);
1940}
1941
1942module_init(fw_ohci_init);
1943module_exit(fw_ohci_cleanup);
diff --git a/drivers/firewire/fw-ohci.h b/drivers/firewire/fw-ohci.h
new file mode 100644
index 000000000000..fa15706397d7
--- /dev/null
+++ b/drivers/firewire/fw-ohci.h
@@ -0,0 +1,153 @@
1#ifndef __fw_ohci_h
2#define __fw_ohci_h
3
4/* OHCI register map */
5
6#define OHCI1394_Version 0x000
7#define OHCI1394_GUID_ROM 0x004
8#define OHCI1394_ATRetries 0x008
9#define OHCI1394_CSRData 0x00C
10#define OHCI1394_CSRCompareData 0x010
11#define OHCI1394_CSRControl 0x014
12#define OHCI1394_ConfigROMhdr 0x018
13#define OHCI1394_BusID 0x01C
14#define OHCI1394_BusOptions 0x020
15#define OHCI1394_GUIDHi 0x024
16#define OHCI1394_GUIDLo 0x028
17#define OHCI1394_ConfigROMmap 0x034
18#define OHCI1394_PostedWriteAddressLo 0x038
19#define OHCI1394_PostedWriteAddressHi 0x03C
20#define OHCI1394_VendorID 0x040
21#define OHCI1394_HCControlSet 0x050
22#define OHCI1394_HCControlClear 0x054
23#define OHCI1394_HCControl_BIBimageValid 0x80000000
24#define OHCI1394_HCControl_noByteSwapData 0x40000000
25#define OHCI1394_HCControl_programPhyEnable 0x00800000
26#define OHCI1394_HCControl_aPhyEnhanceEnable 0x00400000
27#define OHCI1394_HCControl_LPS 0x00080000
28#define OHCI1394_HCControl_postedWriteEnable 0x00040000
29#define OHCI1394_HCControl_linkEnable 0x00020000
30#define OHCI1394_HCControl_softReset 0x00010000
31#define OHCI1394_SelfIDBuffer 0x064
32#define OHCI1394_SelfIDCount 0x068
33#define OHCI1394_IRMultiChanMaskHiSet 0x070
34#define OHCI1394_IRMultiChanMaskHiClear 0x074
35#define OHCI1394_IRMultiChanMaskLoSet 0x078
36#define OHCI1394_IRMultiChanMaskLoClear 0x07C
37#define OHCI1394_IntEventSet 0x080
38#define OHCI1394_IntEventClear 0x084
39#define OHCI1394_IntMaskSet 0x088
40#define OHCI1394_IntMaskClear 0x08C
41#define OHCI1394_IsoXmitIntEventSet 0x090
42#define OHCI1394_IsoXmitIntEventClear 0x094
43#define OHCI1394_IsoXmitIntMaskSet 0x098
44#define OHCI1394_IsoXmitIntMaskClear 0x09C
45#define OHCI1394_IsoRecvIntEventSet 0x0A0
46#define OHCI1394_IsoRecvIntEventClear 0x0A4
47#define OHCI1394_IsoRecvIntMaskSet 0x0A8
48#define OHCI1394_IsoRecvIntMaskClear 0x0AC
49#define OHCI1394_InitialBandwidthAvailable 0x0B0
50#define OHCI1394_InitialChannelsAvailableHi 0x0B4
51#define OHCI1394_InitialChannelsAvailableLo 0x0B8
52#define OHCI1394_FairnessControl 0x0DC
53#define OHCI1394_LinkControlSet 0x0E0
54#define OHCI1394_LinkControlClear 0x0E4
55#define OHCI1394_LinkControl_rcvSelfID (1 << 9)
56#define OHCI1394_LinkControl_rcvPhyPkt (1 << 10)
57#define OHCI1394_LinkControl_cycleTimerEnable (1 << 20)
58#define OHCI1394_LinkControl_cycleMaster (1 << 21)
59#define OHCI1394_LinkControl_cycleSource (1 << 22)
60#define OHCI1394_NodeID 0x0E8
61#define OHCI1394_NodeID_idValid 0x80000000
62#define OHCI1394_PhyControl 0x0EC
63#define OHCI1394_PhyControl_Read(addr) (((addr) << 8) | 0x00008000)
64#define OHCI1394_PhyControl_ReadDone 0x80000000
65#define OHCI1394_PhyControl_ReadData(r) (((r) & 0x00ff0000) >> 16)
66#define OHCI1394_PhyControl_Write(addr, data) (((addr) << 8) | (data) | 0x00004000)
67#define OHCI1394_PhyControl_WriteDone 0x00004000
68#define OHCI1394_IsochronousCycleTimer 0x0F0
69#define OHCI1394_AsReqFilterHiSet 0x100
70#define OHCI1394_AsReqFilterHiClear 0x104
71#define OHCI1394_AsReqFilterLoSet 0x108
72#define OHCI1394_AsReqFilterLoClear 0x10C
73#define OHCI1394_PhyReqFilterHiSet 0x110
74#define OHCI1394_PhyReqFilterHiClear 0x114
75#define OHCI1394_PhyReqFilterLoSet 0x118
76#define OHCI1394_PhyReqFilterLoClear 0x11C
77#define OHCI1394_PhyUpperBound 0x120
78
79#define OHCI1394_AsReqTrContextBase 0x180
80#define OHCI1394_AsReqTrContextControlSet 0x180
81#define OHCI1394_AsReqTrContextControlClear 0x184
82#define OHCI1394_AsReqTrCommandPtr 0x18C
83
84#define OHCI1394_AsRspTrContextBase 0x1A0
85#define OHCI1394_AsRspTrContextControlSet 0x1A0
86#define OHCI1394_AsRspTrContextControlClear 0x1A4
87#define OHCI1394_AsRspTrCommandPtr 0x1AC
88
89#define OHCI1394_AsReqRcvContextBase 0x1C0
90#define OHCI1394_AsReqRcvContextControlSet 0x1C0
91#define OHCI1394_AsReqRcvContextControlClear 0x1C4
92#define OHCI1394_AsReqRcvCommandPtr 0x1CC
93
94#define OHCI1394_AsRspRcvContextBase 0x1E0
95#define OHCI1394_AsRspRcvContextControlSet 0x1E0
96#define OHCI1394_AsRspRcvContextControlClear 0x1E4
97#define OHCI1394_AsRspRcvCommandPtr 0x1EC
98
99/* Isochronous transmit registers */
100#define OHCI1394_IsoXmitContextBase(n) (0x200 + 16 * (n))
101#define OHCI1394_IsoXmitContextControlSet(n) (0x200 + 16 * (n))
102#define OHCI1394_IsoXmitContextControlClear(n) (0x204 + 16 * (n))
103#define OHCI1394_IsoXmitCommandPtr(n) (0x20C + 16 * (n))
104
105/* Isochronous receive registers */
106#define OHCI1394_IsoRcvContextBase(n) (0x400 + 32 * (n))
107#define OHCI1394_IsoRcvContextControlSet(n) (0x400 + 32 * (n))
108#define OHCI1394_IsoRcvContextControlClear(n) (0x404 + 32 * (n))
109#define OHCI1394_IsoRcvCommandPtr(n) (0x40C + 32 * (n))
110#define OHCI1394_IsoRcvContextMatch(n) (0x410 + 32 * (n))
111
112/* Interrupts Mask/Events */
113#define OHCI1394_reqTxComplete 0x00000001
114#define OHCI1394_respTxComplete 0x00000002
115#define OHCI1394_ARRQ 0x00000004
116#define OHCI1394_ARRS 0x00000008
117#define OHCI1394_RQPkt 0x00000010
118#define OHCI1394_RSPkt 0x00000020
119#define OHCI1394_isochTx 0x00000040
120#define OHCI1394_isochRx 0x00000080
121#define OHCI1394_postedWriteErr 0x00000100
122#define OHCI1394_lockRespErr 0x00000200
123#define OHCI1394_selfIDComplete 0x00010000
124#define OHCI1394_busReset 0x00020000
125#define OHCI1394_phy 0x00080000
126#define OHCI1394_cycleSynch 0x00100000
127#define OHCI1394_cycle64Seconds 0x00200000
128#define OHCI1394_cycleLost 0x00400000
129#define OHCI1394_cycleInconsistent 0x00800000
130#define OHCI1394_unrecoverableError 0x01000000
131#define OHCI1394_cycleTooLong 0x02000000
132#define OHCI1394_phyRegRcvd 0x04000000
133#define OHCI1394_masterIntEnable 0x80000000
134
135#define OHCI1394_evt_no_status 0x0
136#define OHCI1394_evt_long_packet 0x2
137#define OHCI1394_evt_missing_ack 0x3
138#define OHCI1394_evt_underrun 0x4
139#define OHCI1394_evt_overrun 0x5
140#define OHCI1394_evt_descriptor_read 0x6
141#define OHCI1394_evt_data_read 0x7
142#define OHCI1394_evt_data_write 0x8
143#define OHCI1394_evt_bus_reset 0x9
144#define OHCI1394_evt_timeout 0xa
145#define OHCI1394_evt_tcode_err 0xb
146#define OHCI1394_evt_reserved_b 0xc
147#define OHCI1394_evt_reserved_c 0xd
148#define OHCI1394_evt_unknown 0xe
149#define OHCI1394_evt_flushed 0xf
150
151#define OHCI1394_phy_tcode 0xe
152
153#endif /* __fw_ohci_h */
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
new file mode 100644
index 000000000000..68300414e5f4
--- /dev/null
+++ b/drivers/firewire/fw-sbp2.c
@@ -0,0 +1,1147 @@
1/*
2 * SBP2 driver (SCSI over IEEE1394)
3 *
4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 * The basic structure of this driver is based on the old storage driver,
23 * drivers/ieee1394/sbp2.c, originally written by
24 * James Goodwin <jamesg@filanet.com>
25 * with later contributions and ongoing maintenance from
26 * Ben Collins <bcollins@debian.org>,
27 * Stefan Richter <stefanr@s5r6.in-berlin.de>
28 * and many others.
29 */
30
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/mod_devicetable.h>
34#include <linux/device.h>
35#include <linux/scatterlist.h>
36#include <linux/dma-mapping.h>
37#include <linux/timer.h>
38
39#include <scsi/scsi.h>
40#include <scsi/scsi_cmnd.h>
41#include <scsi/scsi_dbg.h>
42#include <scsi/scsi_device.h>
43#include <scsi/scsi_host.h>
44
45#include "fw-transaction.h"
46#include "fw-topology.h"
47#include "fw-device.h"
48
49/* I don't know why the SCSI stack doesn't define something like this... */
50typedef void (*scsi_done_fn_t)(struct scsi_cmnd *);
51
52static const char sbp2_driver_name[] = "sbp2";
53
54struct sbp2_device {
55 struct kref kref;
56 struct fw_unit *unit;
57 struct fw_address_handler address_handler;
58 struct list_head orb_list;
59 u64 management_agent_address;
60 u64 command_block_agent_address;
61 u32 workarounds;
62 int login_id;
63
64 /*
65 * We cache these addresses and only update them once we've
66 * logged in or reconnected to the sbp2 device. That way, any
67 * IO to the device will automatically fail and get retried if
68 * it happens in a window where the device is not ready to
69 * handle it (e.g. after a bus reset but before we reconnect).
70 */
71 int node_id;
72 int address_high;
73 int generation;
74
75 int retries;
76 struct delayed_work work;
77};
78
79#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
80#define SBP2_MAX_SECTORS 255 /* Max sectors supported */
81#define SBP2_ORB_TIMEOUT 2000 /* Timeout in ms */
82
83#define SBP2_ORB_NULL 0x80000000
84
85#define SBP2_DIRECTION_TO_MEDIA 0x0
86#define SBP2_DIRECTION_FROM_MEDIA 0x1
87
88/* Unit directory keys */
89#define SBP2_COMMAND_SET_SPECIFIER 0x38
90#define SBP2_COMMAND_SET 0x39
91#define SBP2_COMMAND_SET_REVISION 0x3b
92#define SBP2_FIRMWARE_REVISION 0x3c
93
94/* Flags for detected oddities and brokeness */
95#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
96#define SBP2_WORKAROUND_INQUIRY_36 0x2
97#define SBP2_WORKAROUND_MODE_SENSE_8 0x4
98#define SBP2_WORKAROUND_FIX_CAPACITY 0x8
99#define SBP2_WORKAROUND_OVERRIDE 0x100
100
101/* Management orb opcodes */
102#define SBP2_LOGIN_REQUEST 0x0
103#define SBP2_QUERY_LOGINS_REQUEST 0x1
104#define SBP2_RECONNECT_REQUEST 0x3
105#define SBP2_SET_PASSWORD_REQUEST 0x4
106#define SBP2_LOGOUT_REQUEST 0x7
107#define SBP2_ABORT_TASK_REQUEST 0xb
108#define SBP2_ABORT_TASK_SET 0xc
109#define SBP2_LOGICAL_UNIT_RESET 0xe
110#define SBP2_TARGET_RESET_REQUEST 0xf
111
112/* Offsets for command block agent registers */
113#define SBP2_AGENT_STATE 0x00
114#define SBP2_AGENT_RESET 0x04
115#define SBP2_ORB_POINTER 0x08
116#define SBP2_DOORBELL 0x10
117#define SBP2_UNSOLICITED_STATUS_ENABLE 0x14
118
119/* Status write response codes */
120#define SBP2_STATUS_REQUEST_COMPLETE 0x0
121#define SBP2_STATUS_TRANSPORT_FAILURE 0x1
122#define SBP2_STATUS_ILLEGAL_REQUEST 0x2
123#define SBP2_STATUS_VENDOR_DEPENDENT 0x3
124
125#define STATUS_GET_ORB_HIGH(v) ((v).status & 0xffff)
126#define STATUS_GET_SBP_STATUS(v) (((v).status >> 16) & 0xff)
127#define STATUS_GET_LEN(v) (((v).status >> 24) & 0x07)
128#define STATUS_GET_DEAD(v) (((v).status >> 27) & 0x01)
129#define STATUS_GET_RESPONSE(v) (((v).status >> 28) & 0x03)
130#define STATUS_GET_SOURCE(v) (((v).status >> 30) & 0x03)
131#define STATUS_GET_ORB_LOW(v) ((v).orb_low)
132#define STATUS_GET_DATA(v) ((v).data)
133
134struct sbp2_status {
135 u32 status;
136 u32 orb_low;
137 u8 data[24];
138};
139
140struct sbp2_pointer {
141 u32 high;
142 u32 low;
143};
144
145struct sbp2_orb {
146 struct fw_transaction t;
147 dma_addr_t request_bus;
148 int rcode;
149 struct sbp2_pointer pointer;
150 void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status);
151 struct list_head link;
152};
153
154#define MANAGEMENT_ORB_LUN(v) ((v))
155#define MANAGEMENT_ORB_FUNCTION(v) ((v) << 16)
156#define MANAGEMENT_ORB_RECONNECT(v) ((v) << 20)
157#define MANAGEMENT_ORB_EXCLUSIVE ((1) << 28)
158#define MANAGEMENT_ORB_REQUEST_FORMAT(v) ((v) << 29)
159#define MANAGEMENT_ORB_NOTIFY ((1) << 31)
160
161#define MANAGEMENT_ORB_RESPONSE_LENGTH(v) ((v))
162#define MANAGEMENT_ORB_PASSWORD_LENGTH(v) ((v) << 16)
163
164struct sbp2_management_orb {
165 struct sbp2_orb base;
166 struct {
167 struct sbp2_pointer password;
168 struct sbp2_pointer response;
169 u32 misc;
170 u32 length;
171 struct sbp2_pointer status_fifo;
172 } request;
173 __be32 response[4];
174 dma_addr_t response_bus;
175 struct completion done;
176 struct sbp2_status status;
177};
178
179#define LOGIN_RESPONSE_GET_LOGIN_ID(v) ((v).misc & 0xffff)
180#define LOGIN_RESPONSE_GET_LENGTH(v) (((v).misc >> 16) & 0xffff)
181
182struct sbp2_login_response {
183 u32 misc;
184 struct sbp2_pointer command_block_agent;
185 u32 reconnect_hold;
186};
187#define COMMAND_ORB_DATA_SIZE(v) ((v))
188#define COMMAND_ORB_PAGE_SIZE(v) ((v) << 16)
189#define COMMAND_ORB_PAGE_TABLE_PRESENT ((1) << 19)
190#define COMMAND_ORB_MAX_PAYLOAD(v) ((v) << 20)
191#define COMMAND_ORB_SPEED(v) ((v) << 24)
192#define COMMAND_ORB_DIRECTION(v) ((v) << 27)
193#define COMMAND_ORB_REQUEST_FORMAT(v) ((v) << 29)
194#define COMMAND_ORB_NOTIFY ((1) << 31)
195
196struct sbp2_command_orb {
197 struct sbp2_orb base;
198 struct {
199 struct sbp2_pointer next;
200 struct sbp2_pointer data_descriptor;
201 u32 misc;
202 u8 command_block[12];
203 } request;
204 struct scsi_cmnd *cmd;
205 scsi_done_fn_t done;
206 struct fw_unit *unit;
207
208 struct sbp2_pointer page_table[SG_ALL];
209 dma_addr_t page_table_bus;
210 dma_addr_t request_buffer_bus;
211};
212
213/*
214 * List of devices with known bugs.
215 *
216 * The firmware_revision field, masked with 0xffff00, is the best
217 * indicator for the type of bridge chip of a device. It yields a few
218 * false positives but this did not break correctly behaving devices
219 * so far. We use ~0 as a wildcard, since the 24 bit values we get
220 * from the config rom can never match that.
221 */
222static const struct {
223 u32 firmware_revision;
224 u32 model;
225 unsigned workarounds;
226} sbp2_workarounds_table[] = {
227 /* DViCO Momobay CX-1 with TSB42AA9 bridge */ {
228 .firmware_revision = 0x002800,
229 .model = 0x001010,
230 .workarounds = SBP2_WORKAROUND_INQUIRY_36 |
231 SBP2_WORKAROUND_MODE_SENSE_8,
232 },
233 /* Initio bridges, actually only needed for some older ones */ {
234 .firmware_revision = 0x000200,
235 .model = ~0,
236 .workarounds = SBP2_WORKAROUND_INQUIRY_36,
237 },
238 /* Symbios bridge */ {
239 .firmware_revision = 0xa0b800,
240 .model = ~0,
241 .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
242 },
243
244 /*
245 * There are iPods (2nd gen, 3rd gen) with model_id == 0, but
246 * these iPods do not feature the read_capacity bug according
247 * to one report. Read_capacity behaviour as well as model_id
248 * could change due to Apple-supplied firmware updates though.
249 */
250
251 /* iPod 4th generation. */ {
252 .firmware_revision = 0x0a2700,
253 .model = 0x000021,
254 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
255 },
256 /* iPod mini */ {
257 .firmware_revision = 0x0a2700,
258 .model = 0x000023,
259 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
260 },
261 /* iPod Photo */ {
262 .firmware_revision = 0x0a2700,
263 .model = 0x00007e,
264 .workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
265 }
266};
267
268static void
269sbp2_status_write(struct fw_card *card, struct fw_request *request,
270 int tcode, int destination, int source,
271 int generation, int speed,
272 unsigned long long offset,
273 void *payload, size_t length, void *callback_data)
274{
275 struct sbp2_device *sd = callback_data;
276 struct sbp2_orb *orb;
277 struct sbp2_status status;
278 size_t header_size;
279 unsigned long flags;
280
281 if (tcode != TCODE_WRITE_BLOCK_REQUEST ||
282 length == 0 || length > sizeof(status)) {
283 fw_send_response(card, request, RCODE_TYPE_ERROR);
284 return;
285 }
286
287 header_size = min(length, 2 * sizeof(u32));
288 fw_memcpy_from_be32(&status, payload, header_size);
289 if (length > header_size)
290 memcpy(status.data, payload + 8, length - header_size);
291 if (STATUS_GET_SOURCE(status) == 2 || STATUS_GET_SOURCE(status) == 3) {
292 fw_notify("non-orb related status write, not handled\n");
293 fw_send_response(card, request, RCODE_COMPLETE);
294 return;
295 }
296
297 /* Lookup the orb corresponding to this status write. */
298 spin_lock_irqsave(&card->lock, flags);
299 list_for_each_entry(orb, &sd->orb_list, link) {
300 if (STATUS_GET_ORB_HIGH(status) == 0 &&
301 STATUS_GET_ORB_LOW(status) == orb->request_bus &&
302 orb->rcode == RCODE_COMPLETE) {
303 list_del(&orb->link);
304 break;
305 }
306 }
307 spin_unlock_irqrestore(&card->lock, flags);
308
309 if (&orb->link != &sd->orb_list)
310 orb->callback(orb, &status);
311 else
312 fw_error("status write for unknown orb\n");
313
314 fw_send_response(card, request, RCODE_COMPLETE);
315}
316
317static void
318complete_transaction(struct fw_card *card, int rcode,
319 void *payload, size_t length, void *data)
320{
321 struct sbp2_orb *orb = data;
322 unsigned long flags;
323
324 orb->rcode = rcode;
325 if (rcode != RCODE_COMPLETE) {
326 spin_lock_irqsave(&card->lock, flags);
327 list_del(&orb->link);
328 spin_unlock_irqrestore(&card->lock, flags);
329 orb->callback(orb, NULL);
330 }
331}
332
333static void
334sbp2_send_orb(struct sbp2_orb *orb, struct fw_unit *unit,
335 int node_id, int generation, u64 offset)
336{
337 struct fw_device *device = fw_device(unit->device.parent);
338 struct sbp2_device *sd = unit->device.driver_data;
339 unsigned long flags;
340
341 orb->pointer.high = 0;
342 orb->pointer.low = orb->request_bus;
343 fw_memcpy_to_be32(&orb->pointer, &orb->pointer, sizeof(orb->pointer));
344
345 spin_lock_irqsave(&device->card->lock, flags);
346 list_add_tail(&orb->link, &sd->orb_list);
347 spin_unlock_irqrestore(&device->card->lock, flags);
348
349 fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
350 node_id, generation,
351 device->node->max_speed, offset,
352 &orb->pointer, sizeof(orb->pointer),
353 complete_transaction, orb);
354}
355
356static int sbp2_cancel_orbs(struct fw_unit *unit)
357{
358 struct fw_device *device = fw_device(unit->device.parent);
359 struct sbp2_device *sd = unit->device.driver_data;
360 struct sbp2_orb *orb, *next;
361 struct list_head list;
362 unsigned long flags;
363 int retval = -ENOENT;
364
365 INIT_LIST_HEAD(&list);
366 spin_lock_irqsave(&device->card->lock, flags);
367 list_splice_init(&sd->orb_list, &list);
368 spin_unlock_irqrestore(&device->card->lock, flags);
369
370 list_for_each_entry_safe(orb, next, &list, link) {
371 retval = 0;
372 if (fw_cancel_transaction(device->card, &orb->t) == 0)
373 continue;
374
375 orb->rcode = RCODE_CANCELLED;
376 orb->callback(orb, NULL);
377 }
378
379 return retval;
380}
381
382static void
383complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
384{
385 struct sbp2_management_orb *orb =
386 (struct sbp2_management_orb *)base_orb;
387
388 if (status)
389 memcpy(&orb->status, status, sizeof(*status));
390 complete(&orb->done);
391}
392
393static int
394sbp2_send_management_orb(struct fw_unit *unit, int node_id, int generation,
395 int function, int lun, void *response)
396{
397 struct fw_device *device = fw_device(unit->device.parent);
398 struct sbp2_device *sd = unit->device.driver_data;
399 struct sbp2_management_orb *orb;
400 int retval = -ENOMEM;
401
402 orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
403 if (orb == NULL)
404 return -ENOMEM;
405
406 /*
407 * The sbp2 device is going to send a block read request to
408 * read out the request from host memory, so map it for dma.
409 */
410 orb->base.request_bus =
411 dma_map_single(device->card->device, &orb->request,
412 sizeof(orb->request), DMA_TO_DEVICE);
413 if (dma_mapping_error(orb->base.request_bus))
414 goto out;
415
416 orb->response_bus =
417 dma_map_single(device->card->device, &orb->response,
418 sizeof(orb->response), DMA_FROM_DEVICE);
419 if (dma_mapping_error(orb->response_bus))
420 goto out;
421
422 orb->request.response.high = 0;
423 orb->request.response.low = orb->response_bus;
424
425 orb->request.misc =
426 MANAGEMENT_ORB_NOTIFY |
427 MANAGEMENT_ORB_FUNCTION(function) |
428 MANAGEMENT_ORB_LUN(lun);
429 orb->request.length =
430 MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response));
431
432 orb->request.status_fifo.high = sd->address_handler.offset >> 32;
433 orb->request.status_fifo.low = sd->address_handler.offset;
434
435 /*
436 * FIXME: Yeah, ok this isn't elegant, we hardwire exclusive
437 * login and 1 second reconnect time. The reconnect setting
438 * is probably fine, but the exclusive login should be an option.
439 */
440 if (function == SBP2_LOGIN_REQUEST) {
441 orb->request.misc |=
442 MANAGEMENT_ORB_EXCLUSIVE |
443 MANAGEMENT_ORB_RECONNECT(0);
444 }
445
446 fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
447
448 init_completion(&orb->done);
449 orb->base.callback = complete_management_orb;
450
451 sbp2_send_orb(&orb->base, unit,
452 node_id, generation, sd->management_agent_address);
453
454 wait_for_completion_timeout(&orb->done,
455 msecs_to_jiffies(SBP2_ORB_TIMEOUT));
456
457 retval = -EIO;
458 if (sbp2_cancel_orbs(unit) == 0) {
459 fw_error("orb reply timed out, rcode=0x%02x\n",
460 orb->base.rcode);
461 goto out;
462 }
463
464 if (orb->base.rcode != RCODE_COMPLETE) {
465 fw_error("management write failed, rcode 0x%02x\n",
466 orb->base.rcode);
467 goto out;
468 }
469
470 if (STATUS_GET_RESPONSE(orb->status) != 0 ||
471 STATUS_GET_SBP_STATUS(orb->status) != 0) {
472 fw_error("error status: %d:%d\n",
473 STATUS_GET_RESPONSE(orb->status),
474 STATUS_GET_SBP_STATUS(orb->status));
475 goto out;
476 }
477
478 retval = 0;
479 out:
480 dma_unmap_single(device->card->device, orb->base.request_bus,
481 sizeof(orb->request), DMA_TO_DEVICE);
482 dma_unmap_single(device->card->device, orb->response_bus,
483 sizeof(orb->response), DMA_FROM_DEVICE);
484
485 if (response)
486 fw_memcpy_from_be32(response,
487 orb->response, sizeof(orb->response));
488 kfree(orb);
489
490 return retval;
491}
492
493static void
494complete_agent_reset_write(struct fw_card *card, int rcode,
495 void *payload, size_t length, void *data)
496{
497 struct fw_transaction *t = data;
498
499 kfree(t);
500}
501
502static int sbp2_agent_reset(struct fw_unit *unit)
503{
504 struct fw_device *device = fw_device(unit->device.parent);
505 struct sbp2_device *sd = unit->device.driver_data;
506 struct fw_transaction *t;
507 static u32 zero;
508
509 t = kzalloc(sizeof(*t), GFP_ATOMIC);
510 if (t == NULL)
511 return -ENOMEM;
512
513 fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
514 sd->node_id, sd->generation, SCODE_400,
515 sd->command_block_agent_address + SBP2_AGENT_RESET,
516 &zero, sizeof(zero), complete_agent_reset_write, t);
517
518 return 0;
519}
520
521static void sbp2_reconnect(struct work_struct *work);
522static struct scsi_host_template scsi_driver_template;
523
524static void
525release_sbp2_device(struct kref *kref)
526{
527 struct sbp2_device *sd = container_of(kref, struct sbp2_device, kref);
528 struct Scsi_Host *host =
529 container_of((void *)sd, struct Scsi_Host, hostdata[0]);
530
531 sbp2_send_management_orb(sd->unit, sd->node_id, sd->generation,
532 SBP2_LOGOUT_REQUEST, sd->login_id, NULL);
533
534 scsi_remove_host(host);
535 fw_core_remove_address_handler(&sd->address_handler);
536 fw_notify("removed sbp2 unit %s\n", sd->unit->device.bus_id);
537 put_device(&sd->unit->device);
538 scsi_host_put(host);
539}
540
541static void sbp2_login(struct work_struct *work)
542{
543 struct sbp2_device *sd =
544 container_of(work, struct sbp2_device, work.work);
545 struct Scsi_Host *host =
546 container_of((void *)sd, struct Scsi_Host, hostdata[0]);
547 struct fw_unit *unit = sd->unit;
548 struct fw_device *device = fw_device(unit->device.parent);
549 struct sbp2_login_response response;
550 int generation, node_id, local_node_id, lun, retval;
551
552 /* FIXME: Make this work for multi-lun devices. */
553 lun = 0;
554
555 generation = device->card->generation;
556 node_id = device->node->node_id;
557 local_node_id = device->card->local_node->node_id;
558
559 if (sbp2_send_management_orb(unit, node_id, generation,
560 SBP2_LOGIN_REQUEST, lun, &response) < 0) {
561 if (sd->retries++ < 5) {
562 schedule_delayed_work(&sd->work, DIV_ROUND_UP(HZ, 5));
563 } else {
564 fw_error("failed to login to %s\n",
565 unit->device.bus_id);
566 kref_put(&sd->kref, release_sbp2_device);
567 }
568 return;
569 }
570
571 sd->generation = generation;
572 sd->node_id = node_id;
573 sd->address_high = local_node_id << 16;
574
575 /* Get command block agent offset and login id. */
576 sd->command_block_agent_address =
577 ((u64) (response.command_block_agent.high & 0xffff) << 32) |
578 response.command_block_agent.low;
579 sd->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response);
580
581 fw_notify("logged in to sbp2 unit %s (%d retries)\n",
582 unit->device.bus_id, sd->retries);
583 fw_notify(" - management_agent_address: 0x%012llx\n",
584 (unsigned long long) sd->management_agent_address);
585 fw_notify(" - command_block_agent_address: 0x%012llx\n",
586 (unsigned long long) sd->command_block_agent_address);
587 fw_notify(" - status write address: 0x%012llx\n",
588 (unsigned long long) sd->address_handler.offset);
589
590#if 0
591 /* FIXME: The linux1394 sbp2 does this last step. */
592 sbp2_set_busy_timeout(scsi_id);
593#endif
594
595 PREPARE_DELAYED_WORK(&sd->work, sbp2_reconnect);
596 sbp2_agent_reset(unit);
597
598 /* FIXME: Loop over luns here. */
599 lun = 0;
600 retval = scsi_add_device(host, 0, 0, lun);
601 if (retval < 0) {
602 sbp2_send_management_orb(unit, sd->node_id, sd->generation,
603 SBP2_LOGOUT_REQUEST, sd->login_id,
604 NULL);
605 /*
606 * Set this back to sbp2_login so we fall back and
607 * retry login on bus reset.
608 */
609 PREPARE_DELAYED_WORK(&sd->work, sbp2_login);
610 }
611 kref_put(&sd->kref, release_sbp2_device);
612}
613
614static int sbp2_probe(struct device *dev)
615{
616 struct fw_unit *unit = fw_unit(dev);
617 struct fw_device *device = fw_device(unit->device.parent);
618 struct sbp2_device *sd;
619 struct fw_csr_iterator ci;
620 struct Scsi_Host *host;
621 int i, key, value, err;
622 u32 model, firmware_revision;
623
624 err = -ENOMEM;
625 host = scsi_host_alloc(&scsi_driver_template, sizeof(*sd));
626 if (host == NULL)
627 goto fail;
628
629 sd = (struct sbp2_device *) host->hostdata;
630 unit->device.driver_data = sd;
631 sd->unit = unit;
632 INIT_LIST_HEAD(&sd->orb_list);
633 kref_init(&sd->kref);
634
635 sd->address_handler.length = 0x100;
636 sd->address_handler.address_callback = sbp2_status_write;
637 sd->address_handler.callback_data = sd;
638
639 err = fw_core_add_address_handler(&sd->address_handler,
640 &fw_high_memory_region);
641 if (err < 0)
642 goto fail_host;
643
644 err = fw_device_enable_phys_dma(device);
645 if (err < 0)
646 goto fail_address_handler;
647
648 err = scsi_add_host(host, &unit->device);
649 if (err < 0)
650 goto fail_address_handler;
651
652 /*
653 * Scan unit directory to get management agent address,
654 * firmware revison and model. Initialize firmware_revision
655 * and model to values that wont match anything in our table.
656 */
657 firmware_revision = 0xff000000;
658 model = 0xff000000;
659 fw_csr_iterator_init(&ci, unit->directory);
660 while (fw_csr_iterator_next(&ci, &key, &value)) {
661 switch (key) {
662 case CSR_DEPENDENT_INFO | CSR_OFFSET:
663 sd->management_agent_address =
664 0xfffff0000000ULL + 4 * value;
665 break;
666 case SBP2_FIRMWARE_REVISION:
667 firmware_revision = value;
668 break;
669 case CSR_MODEL:
670 model = value;
671 break;
672 }
673 }
674
675 for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
676 if (sbp2_workarounds_table[i].firmware_revision !=
677 (firmware_revision & 0xffffff00))
678 continue;
679 if (sbp2_workarounds_table[i].model != model &&
680 sbp2_workarounds_table[i].model != ~0)
681 continue;
682 sd->workarounds |= sbp2_workarounds_table[i].workarounds;
683 break;
684 }
685
686 if (sd->workarounds)
687 fw_notify("Workarounds for node %s: 0x%x "
688 "(firmware_revision 0x%06x, model_id 0x%06x)\n",
689 unit->device.bus_id,
690 sd->workarounds, firmware_revision, model);
691
692 get_device(&unit->device);
693
694 /*
695 * We schedule work to do the login so we can easily
696 * reschedule retries. Always get the ref before scheduling
697 * work.
698 */
699 INIT_DELAYED_WORK(&sd->work, sbp2_login);
700 if (schedule_delayed_work(&sd->work, 0))
701 kref_get(&sd->kref);
702
703 return 0;
704
705 fail_address_handler:
706 fw_core_remove_address_handler(&sd->address_handler);
707 fail_host:
708 scsi_host_put(host);
709 fail:
710 return err;
711}
712
713static int sbp2_remove(struct device *dev)
714{
715 struct fw_unit *unit = fw_unit(dev);
716 struct sbp2_device *sd = unit->device.driver_data;
717
718 kref_put(&sd->kref, release_sbp2_device);
719
720 return 0;
721}
722
723static void sbp2_reconnect(struct work_struct *work)
724{
725 struct sbp2_device *sd =
726 container_of(work, struct sbp2_device, work.work);
727 struct fw_unit *unit = sd->unit;
728 struct fw_device *device = fw_device(unit->device.parent);
729 int generation, node_id, local_node_id;
730
731 generation = device->card->generation;
732 node_id = device->node->node_id;
733 local_node_id = device->card->local_node->node_id;
734
735 if (sbp2_send_management_orb(unit, node_id, generation,
736 SBP2_RECONNECT_REQUEST,
737 sd->login_id, NULL) < 0) {
738 if (sd->retries++ >= 5) {
739 fw_error("failed to reconnect to %s\n",
740 unit->device.bus_id);
741 /* Fall back and try to log in again. */
742 sd->retries = 0;
743 PREPARE_DELAYED_WORK(&sd->work, sbp2_login);
744 }
745 schedule_delayed_work(&sd->work, DIV_ROUND_UP(HZ, 5));
746 return;
747 }
748
749 sd->generation = generation;
750 sd->node_id = node_id;
751 sd->address_high = local_node_id << 16;
752
753 fw_notify("reconnected to unit %s (%d retries)\n",
754 unit->device.bus_id, sd->retries);
755 sbp2_agent_reset(unit);
756 sbp2_cancel_orbs(unit);
757 kref_put(&sd->kref, release_sbp2_device);
758}
759
760static void sbp2_update(struct fw_unit *unit)
761{
762 struct fw_device *device = fw_device(unit->device.parent);
763 struct sbp2_device *sd = unit->device.driver_data;
764
765 sd->retries = 0;
766 fw_device_enable_phys_dma(device);
767 if (schedule_delayed_work(&sd->work, 0))
768 kref_get(&sd->kref);
769}
770
771#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
772#define SBP2_SW_VERSION_ENTRY 0x00010483
773
774static const struct fw_device_id sbp2_id_table[] = {
775 {
776 .match_flags = FW_MATCH_SPECIFIER_ID | FW_MATCH_VERSION,
777 .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY,
778 .version = SBP2_SW_VERSION_ENTRY,
779 },
780 { }
781};
782
783static struct fw_driver sbp2_driver = {
784 .driver = {
785 .owner = THIS_MODULE,
786 .name = sbp2_driver_name,
787 .bus = &fw_bus_type,
788 .probe = sbp2_probe,
789 .remove = sbp2_remove,
790 },
791 .update = sbp2_update,
792 .id_table = sbp2_id_table,
793};
794
795static unsigned int
796sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
797{
798 int sam_status;
799
800 sense_data[0] = 0x70;
801 sense_data[1] = 0x0;
802 sense_data[2] = sbp2_status[1];
803 sense_data[3] = sbp2_status[4];
804 sense_data[4] = sbp2_status[5];
805 sense_data[5] = sbp2_status[6];
806 sense_data[6] = sbp2_status[7];
807 sense_data[7] = 10;
808 sense_data[8] = sbp2_status[8];
809 sense_data[9] = sbp2_status[9];
810 sense_data[10] = sbp2_status[10];
811 sense_data[11] = sbp2_status[11];
812 sense_data[12] = sbp2_status[2];
813 sense_data[13] = sbp2_status[3];
814 sense_data[14] = sbp2_status[12];
815 sense_data[15] = sbp2_status[13];
816
817 sam_status = sbp2_status[0] & 0x3f;
818
819 switch (sam_status) {
820 case SAM_STAT_GOOD:
821 case SAM_STAT_CHECK_CONDITION:
822 case SAM_STAT_CONDITION_MET:
823 case SAM_STAT_BUSY:
824 case SAM_STAT_RESERVATION_CONFLICT:
825 case SAM_STAT_COMMAND_TERMINATED:
826 return DID_OK << 16 | sam_status;
827
828 default:
829 return DID_ERROR << 16;
830 }
831}
832
833static void
834complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
835{
836 struct sbp2_command_orb *orb = (struct sbp2_command_orb *)base_orb;
837 struct fw_unit *unit = orb->unit;
838 struct fw_device *device = fw_device(unit->device.parent);
839 struct scatterlist *sg;
840 int result;
841
842 if (status != NULL) {
843 if (STATUS_GET_DEAD(*status))
844 sbp2_agent_reset(unit);
845
846 switch (STATUS_GET_RESPONSE(*status)) {
847 case SBP2_STATUS_REQUEST_COMPLETE:
848 result = DID_OK << 16;
849 break;
850 case SBP2_STATUS_TRANSPORT_FAILURE:
851 result = DID_BUS_BUSY << 16;
852 break;
853 case SBP2_STATUS_ILLEGAL_REQUEST:
854 case SBP2_STATUS_VENDOR_DEPENDENT:
855 default:
856 result = DID_ERROR << 16;
857 break;
858 }
859
860 if (result == DID_OK << 16 && STATUS_GET_LEN(*status) > 1)
861 result = sbp2_status_to_sense_data(STATUS_GET_DATA(*status),
862 orb->cmd->sense_buffer);
863 } else {
864 /*
865 * If the orb completes with status == NULL, something
866 * went wrong, typically a bus reset happened mid-orb
867 * or when sending the write (less likely).
868 */
869 result = DID_BUS_BUSY << 16;
870 }
871
872 dma_unmap_single(device->card->device, orb->base.request_bus,
873 sizeof(orb->request), DMA_TO_DEVICE);
874
875 if (orb->cmd->use_sg > 0) {
876 sg = (struct scatterlist *)orb->cmd->request_buffer;
877 dma_unmap_sg(device->card->device, sg, orb->cmd->use_sg,
878 orb->cmd->sc_data_direction);
879 }
880
881 if (orb->page_table_bus != 0)
882 dma_unmap_single(device->card->device, orb->page_table_bus,
883 sizeof(orb->page_table_bus), DMA_TO_DEVICE);
884
885 if (orb->request_buffer_bus != 0)
886 dma_unmap_single(device->card->device, orb->request_buffer_bus,
887 sizeof(orb->request_buffer_bus),
888 DMA_FROM_DEVICE);
889
890 orb->cmd->result = result;
891 orb->done(orb->cmd);
892 kfree(orb);
893}
894
895static int sbp2_command_orb_map_scatterlist(struct sbp2_command_orb *orb)
896{
897 struct sbp2_device *sd =
898 (struct sbp2_device *)orb->cmd->device->host->hostdata;
899 struct fw_unit *unit = sd->unit;
900 struct fw_device *device = fw_device(unit->device.parent);
901 struct scatterlist *sg;
902 int sg_len, l, i, j, count;
903 size_t size;
904 dma_addr_t sg_addr;
905
906 sg = (struct scatterlist *)orb->cmd->request_buffer;
907 count = dma_map_sg(device->card->device, sg, orb->cmd->use_sg,
908 orb->cmd->sc_data_direction);
909 if (count == 0)
910 goto fail;
911
912 /*
913 * Handle the special case where there is only one element in
914 * the scatter list by converting it to an immediate block
915 * request. This is also a workaround for broken devices such
916 * as the second generation iPod which doesn't support page
917 * tables.
918 */
919 if (count == 1 && sg_dma_len(sg) < SBP2_MAX_SG_ELEMENT_LENGTH) {
920 orb->request.data_descriptor.high = sd->address_high;
921 orb->request.data_descriptor.low = sg_dma_address(sg);
922 orb->request.misc |=
923 COMMAND_ORB_DATA_SIZE(sg_dma_len(sg));
924 return 0;
925 }
926
927 /*
928 * Convert the scatterlist to an sbp2 page table. If any
929 * scatterlist entries are too big for sbp2, we split them as we
930 * go. Even if we ask the block I/O layer to not give us sg
931 * elements larger than 65535 bytes, some IOMMUs may merge sg elements
932 * during DMA mapping, and Linux currently doesn't prevent this.
933 */
934 for (i = 0, j = 0; i < count; i++) {
935 sg_len = sg_dma_len(sg + i);
936 sg_addr = sg_dma_address(sg + i);
937 while (sg_len) {
938 l = min(sg_len, SBP2_MAX_SG_ELEMENT_LENGTH);
939 orb->page_table[j].low = sg_addr;
940 orb->page_table[j].high = (l << 16);
941 sg_addr += l;
942 sg_len -= l;
943 j++;
944 }
945 }
946
947 size = sizeof(orb->page_table[0]) * j;
948
949 /*
950 * The data_descriptor pointer is the one case where we need
951 * to fill in the node ID part of the address. All other
952 * pointers assume that the data referenced reside on the
953 * initiator (i.e. us), but data_descriptor can refer to data
954 * on other nodes so we need to put our ID in descriptor.high.
955 */
956
957 orb->page_table_bus =
958 dma_map_single(device->card->device, orb->page_table,
959 size, DMA_TO_DEVICE);
960 if (dma_mapping_error(orb->page_table_bus))
961 goto fail_page_table;
962 orb->request.data_descriptor.high = sd->address_high;
963 orb->request.data_descriptor.low = orb->page_table_bus;
964 orb->request.misc |=
965 COMMAND_ORB_PAGE_TABLE_PRESENT |
966 COMMAND_ORB_DATA_SIZE(j);
967
968 fw_memcpy_to_be32(orb->page_table, orb->page_table, size);
969
970 return 0;
971
972 fail_page_table:
973 dma_unmap_sg(device->card->device, sg, orb->cmd->use_sg,
974 orb->cmd->sc_data_direction);
975 fail:
976 return -ENOMEM;
977}
978
979/* SCSI stack integration */
980
981static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
982{
983 struct sbp2_device *sd =
984 (struct sbp2_device *)cmd->device->host->hostdata;
985 struct fw_unit *unit = sd->unit;
986 struct fw_device *device = fw_device(unit->device.parent);
987 struct sbp2_command_orb *orb;
988
989 /*
990 * Bidirectional commands are not yet implemented, and unknown
991 * transfer direction not handled.
992 */
993 if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) {
994 fw_error("Cannot handle DMA_BIDIRECTIONAL - rejecting command");
995 cmd->result = DID_ERROR << 16;
996 done(cmd);
997 return 0;
998 }
999
1000 orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
1001 if (orb == NULL) {
1002 fw_notify("failed to alloc orb\n");
1003 goto fail_alloc;
1004 }
1005
1006 /* Initialize rcode to something not RCODE_COMPLETE. */
1007 orb->base.rcode = -1;
1008 orb->base.request_bus =
1009 dma_map_single(device->card->device, &orb->request,
1010 sizeof(orb->request), DMA_TO_DEVICE);
1011 if (dma_mapping_error(orb->base.request_bus))
1012 goto fail_mapping;
1013
1014 orb->unit = unit;
1015 orb->done = done;
1016 orb->cmd = cmd;
1017
1018 orb->request.next.high = SBP2_ORB_NULL;
1019 orb->request.next.low = 0x0;
1020 /*
1021 * At speed 100 we can do 512 bytes per packet, at speed 200,
1022 * 1024 bytes per packet etc. The SBP-2 max_payload field
1023 * specifies the max payload size as 2 ^ (max_payload + 2), so
1024 * if we set this to max_speed + 7, we get the right value.
1025 */
1026 orb->request.misc =
1027 COMMAND_ORB_MAX_PAYLOAD(device->node->max_speed + 7) |
1028 COMMAND_ORB_SPEED(device->node->max_speed) |
1029 COMMAND_ORB_NOTIFY;
1030
1031 if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1032 orb->request.misc |=
1033 COMMAND_ORB_DIRECTION(SBP2_DIRECTION_FROM_MEDIA);
1034 else if (cmd->sc_data_direction == DMA_TO_DEVICE)
1035 orb->request.misc |=
1036 COMMAND_ORB_DIRECTION(SBP2_DIRECTION_TO_MEDIA);
1037
1038 if (cmd->use_sg && sbp2_command_orb_map_scatterlist(orb) < 0)
1039 goto fail_map_payload;
1040
1041 fw_memcpy_to_be32(&orb->request, &orb->request, sizeof(orb->request));
1042
1043 memset(orb->request.command_block,
1044 0, sizeof(orb->request.command_block));
1045 memcpy(orb->request.command_block, cmd->cmnd, COMMAND_SIZE(*cmd->cmnd));
1046
1047 orb->base.callback = complete_command_orb;
1048
1049 sbp2_send_orb(&orb->base, unit, sd->node_id, sd->generation,
1050 sd->command_block_agent_address + SBP2_ORB_POINTER);
1051
1052 return 0;
1053
1054 fail_map_payload:
1055 dma_unmap_single(device->card->device, orb->base.request_bus,
1056 sizeof(orb->request), DMA_TO_DEVICE);
1057 fail_mapping:
1058 kfree(orb);
1059 fail_alloc:
1060 return SCSI_MLQUEUE_HOST_BUSY;
1061}
1062
1063static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
1064{
1065 struct sbp2_device *sd = (struct sbp2_device *)sdev->host->hostdata;
1066
1067 sdev->allow_restart = 1;
1068
1069 if (sd->workarounds & SBP2_WORKAROUND_INQUIRY_36)
1070 sdev->inquiry_len = 36;
1071 return 0;
1072}
1073
1074static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
1075{
1076 struct sbp2_device *sd = (struct sbp2_device *)sdev->host->hostdata;
1077 struct fw_unit *unit = sd->unit;
1078
1079 sdev->use_10_for_rw = 1;
1080
1081 if (sdev->type == TYPE_ROM)
1082 sdev->use_10_for_ms = 1;
1083 if (sdev->type == TYPE_DISK &&
1084 sd->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
1085 sdev->skip_ms_page_8 = 1;
1086 if (sd->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) {
1087 fw_notify("setting fix_capacity for %s\n", unit->device.bus_id);
1088 sdev->fix_capacity = 1;
1089 }
1090
1091 return 0;
1092}
1093
1094/*
1095 * Called by scsi stack when something has really gone wrong. Usually
1096 * called when a command has timed-out for some reason.
1097 */
1098static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
1099{
1100 struct sbp2_device *sd =
1101 (struct sbp2_device *)cmd->device->host->hostdata;
1102 struct fw_unit *unit = sd->unit;
1103
1104 fw_notify("sbp2_scsi_abort\n");
1105 sbp2_agent_reset(unit);
1106 sbp2_cancel_orbs(unit);
1107
1108 return SUCCESS;
1109}
1110
1111static struct scsi_host_template scsi_driver_template = {
1112 .module = THIS_MODULE,
1113 .name = "SBP-2 IEEE-1394",
1114 .proc_name = (char *)sbp2_driver_name,
1115 .queuecommand = sbp2_scsi_queuecommand,
1116 .slave_alloc = sbp2_scsi_slave_alloc,
1117 .slave_configure = sbp2_scsi_slave_configure,
1118 .eh_abort_handler = sbp2_scsi_abort,
1119 .this_id = -1,
1120 .sg_tablesize = SG_ALL,
1121 .use_clustering = ENABLE_CLUSTERING,
1122 .cmd_per_lun = 1,
1123 .can_queue = 1,
1124};
1125
1126MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
1127MODULE_DESCRIPTION("SCSI over IEEE1394");
1128MODULE_LICENSE("GPL");
1129MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
1130
1131/* Provide a module alias so root-on-sbp2 initrds don't break. */
1132#ifndef CONFIG_IEEE1394_SBP2_MODULE
1133MODULE_ALIAS("sbp2");
1134#endif
1135
1136static int __init sbp2_init(void)
1137{
1138 return driver_register(&sbp2_driver.driver);
1139}
1140
1141static void __exit sbp2_cleanup(void)
1142{
1143 driver_unregister(&sbp2_driver.driver);
1144}
1145
1146module_init(sbp2_init);
1147module_exit(sbp2_cleanup);
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c
new file mode 100644
index 000000000000..7aebb8ae0efa
--- /dev/null
+++ b/drivers/firewire/fw-topology.c
@@ -0,0 +1,537 @@
1/*
2 * Incremental bus scan, based on bus topology
3 *
4 * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/module.h>
22#include <linux/wait.h>
23#include <linux/errno.h>
24#include "fw-transaction.h"
25#include "fw-topology.h"
26
27#define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f)
28#define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01)
29#define SELF_ID_LINK_ON(q) (((q) >> 22) & 0x01)
30#define SELF_ID_GAP_COUNT(q) (((q) >> 16) & 0x3f)
31#define SELF_ID_PHY_SPEED(q) (((q) >> 14) & 0x03)
32#define SELF_ID_CONTENDER(q) (((q) >> 11) & 0x01)
33#define SELF_ID_PHY_INITIATOR(q) (((q) >> 1) & 0x01)
34#define SELF_ID_MORE_PACKETS(q) (((q) >> 0) & 0x01)
35
36#define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07)
37
38static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
39{
40 u32 q;
41 int port_type, shift, seq;
42
43 *total_port_count = 0;
44 *child_port_count = 0;
45
46 shift = 6;
47 q = *sid;
48 seq = 0;
49
50 while (1) {
51 port_type = (q >> shift) & 0x03;
52 switch (port_type) {
53 case SELFID_PORT_CHILD:
54 (*child_port_count)++;
55 case SELFID_PORT_PARENT:
56 case SELFID_PORT_NCONN:
57 (*total_port_count)++;
58 case SELFID_PORT_NONE:
59 break;
60 }
61
62 shift -= 2;
63 if (shift == 0) {
64 if (!SELF_ID_MORE_PACKETS(q))
65 return sid + 1;
66
67 shift = 16;
68 sid++;
69 q = *sid;
70
71 /*
72 * Check that the extra packets actually are
73 * extended self ID packets and that the
74 * sequence numbers in the extended self ID
75 * packets increase as expected.
76 */
77
78 if (!SELF_ID_EXTENDED(q) ||
79 seq != SELF_ID_EXT_SEQUENCE(q))
80 return NULL;
81
82 seq++;
83 }
84 }
85}
86
87static int get_port_type(u32 *sid, int port_index)
88{
89 int index, shift;
90
91 index = (port_index + 5) / 8;
92 shift = 16 - ((port_index + 5) & 7) * 2;
93 return (sid[index] >> shift) & 0x03;
94}
95
96static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
97{
98 struct fw_node *node;
99
100 node = kzalloc(sizeof(*node) + port_count * sizeof(node->ports[0]),
101 GFP_ATOMIC);
102 if (node == NULL)
103 return NULL;
104
105 node->color = color;
106 node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid);
107 node->link_on = SELF_ID_LINK_ON(sid);
108 node->phy_speed = SELF_ID_PHY_SPEED(sid);
109 node->port_count = port_count;
110
111 atomic_set(&node->ref_count, 1);
112 INIT_LIST_HEAD(&node->link);
113
114 return node;
115}
116
117/*
118 * Compute the maximum hop count for this node and it's children. The
119 * maximum hop count is the maximum number of connections between any
120 * two nodes in the subtree rooted at this node. We need this for
121 * setting the gap count. As we build the tree bottom up in
122 * build_tree() below, this is fairly easy to do: for each node we
123 * maintain the max hop count and the max depth, ie the number of hops
124 * to the furthest leaf. Computing the max hop count breaks down into
125 * two cases: either the path goes through this node, in which case
126 * the hop count is the sum of the two biggest child depths plus 2.
127 * Or it could be the case that the max hop path is entirely
128 * containted in a child tree, in which case the max hop count is just
129 * the max hop count of this child.
130 */
131static void update_hop_count(struct fw_node *node)
132{
133 int depths[2] = { -1, -1 };
134 int max_child_hops = 0;
135 int i;
136
137 for (i = 0; i < node->port_count; i++) {
138 if (node->ports[i].node == NULL)
139 continue;
140
141 if (node->ports[i].node->max_hops > max_child_hops)
142 max_child_hops = node->ports[i].node->max_hops;
143
144 if (node->ports[i].node->max_depth > depths[0]) {
145 depths[1] = depths[0];
146 depths[0] = node->ports[i].node->max_depth;
147 } else if (node->ports[i].node->max_depth > depths[1])
148 depths[1] = node->ports[i].node->max_depth;
149 }
150
151 node->max_depth = depths[0] + 1;
152 node->max_hops = max(max_child_hops, depths[0] + depths[1] + 2);
153}
154
155
156/**
157 * build_tree - Build the tree representation of the topology
158 * @self_ids: array of self IDs to create the tree from
159 * @self_id_count: the length of the self_ids array
160 * @local_id: the node ID of the local node
161 *
162 * This function builds the tree representation of the topology given
163 * by the self IDs from the latest bus reset. During the construction
164 * of the tree, the function checks that the self IDs are valid and
165 * internally consistent. On succcess this funtions returns the
166 * fw_node corresponding to the local card otherwise NULL.
167 */
168static struct fw_node *build_tree(struct fw_card *card,
169 u32 *sid, int self_id_count)
170{
171 struct fw_node *node, *child, *local_node, *irm_node;
172 struct list_head stack, *h;
173 u32 *next_sid, *end, q;
174 int i, port_count, child_port_count, phy_id, parent_count, stack_depth;
175 int gap_count, topology_type;
176
177 local_node = NULL;
178 node = NULL;
179 INIT_LIST_HEAD(&stack);
180 stack_depth = 0;
181 end = sid + self_id_count;
182 phy_id = 0;
183 irm_node = NULL;
184 gap_count = SELF_ID_GAP_COUNT(*sid);
185 topology_type = 0;
186
187 while (sid < end) {
188 next_sid = count_ports(sid, &port_count, &child_port_count);
189
190 if (next_sid == NULL) {
191 fw_error("Inconsistent extended self IDs.\n");
192 return NULL;
193 }
194
195 q = *sid;
196 if (phy_id != SELF_ID_PHY_ID(q)) {
197 fw_error("PHY ID mismatch in self ID: %d != %d.\n",
198 phy_id, SELF_ID_PHY_ID(q));
199 return NULL;
200 }
201
202 if (child_port_count > stack_depth) {
203 fw_error("Topology stack underflow\n");
204 return NULL;
205 }
206
207 /*
208 * Seek back from the top of our stack to find the
209 * start of the child nodes for this node.
210 */
211 for (i = 0, h = &stack; i < child_port_count; i++)
212 h = h->prev;
213 child = fw_node(h);
214
215 node = fw_node_create(q, port_count, card->color);
216 if (node == NULL) {
217 fw_error("Out of memory while building topology.");
218 return NULL;
219 }
220
221 if (phy_id == (card->node_id & 0x3f))
222 local_node = node;
223
224 if (SELF_ID_CONTENDER(q))
225 irm_node = node;
226
227 if (node->phy_speed == SCODE_BETA)
228 topology_type |= FW_TOPOLOGY_B;
229 else
230 topology_type |= FW_TOPOLOGY_A;
231
232 parent_count = 0;
233
234 for (i = 0; i < port_count; i++) {
235 switch (get_port_type(sid, i)) {
236 case SELFID_PORT_PARENT:
237 /*
238 * Who's your daddy? We dont know the
239 * parent node at this time, so we
240 * temporarily abuse node->color for
241 * remembering the entry in the
242 * node->ports array where the parent
243 * node should be. Later, when we
244 * handle the parent node, we fix up
245 * the reference.
246 */
247 parent_count++;
248 node->color = i;
249 break;
250
251 case SELFID_PORT_CHILD:
252 node->ports[i].node = child;
253 /*
254 * Fix up parent reference for this
255 * child node.
256 */
257 child->ports[child->color].node = node;
258 child->color = card->color;
259 child = fw_node(child->link.next);
260 break;
261 }
262 }
263
264 /*
265 * Check that the node reports exactly one parent
266 * port, except for the root, which of course should
267 * have no parents.
268 */
269 if ((next_sid == end && parent_count != 0) ||
270 (next_sid < end && parent_count != 1)) {
271 fw_error("Parent port inconsistency for node %d: "
272 "parent_count=%d\n", phy_id, parent_count);
273 return NULL;
274 }
275
276 /* Pop the child nodes off the stack and push the new node. */
277 __list_del(h->prev, &stack);
278 list_add_tail(&node->link, &stack);
279 stack_depth += 1 - child_port_count;
280
281 /*
282 * If all PHYs does not report the same gap count
283 * setting, we fall back to 63 which will force a gap
284 * count reconfiguration and a reset.
285 */
286 if (SELF_ID_GAP_COUNT(q) != gap_count)
287 gap_count = 63;
288
289 update_hop_count(node);
290
291 sid = next_sid;
292 phy_id++;
293 }
294
295 card->root_node = node;
296 card->irm_node = irm_node;
297 card->gap_count = gap_count;
298 card->topology_type = topology_type;
299
300 return local_node;
301}
302
303typedef void (*fw_node_callback_t)(struct fw_card * card,
304 struct fw_node * node,
305 struct fw_node * parent);
306
307static void
308for_each_fw_node(struct fw_card *card, struct fw_node *root,
309 fw_node_callback_t callback)
310{
311 struct list_head list;
312 struct fw_node *node, *next, *child, *parent;
313 int i;
314
315 INIT_LIST_HEAD(&list);
316
317 fw_node_get(root);
318 list_add_tail(&root->link, &list);
319 parent = NULL;
320 list_for_each_entry(node, &list, link) {
321 node->color = card->color;
322
323 for (i = 0; i < node->port_count; i++) {
324 child = node->ports[i].node;
325 if (!child)
326 continue;
327 if (child->color == card->color)
328 parent = child;
329 else {
330 fw_node_get(child);
331 list_add_tail(&child->link, &list);
332 }
333 }
334
335 callback(card, node, parent);
336 }
337
338 list_for_each_entry_safe(node, next, &list, link)
339 fw_node_put(node);
340}
341
342static void
343report_lost_node(struct fw_card *card,
344 struct fw_node *node, struct fw_node *parent)
345{
346 fw_node_event(card, node, FW_NODE_DESTROYED);
347 fw_node_put(node);
348}
349
350static void
351report_found_node(struct fw_card *card,
352 struct fw_node *node, struct fw_node *parent)
353{
354 int b_path = (node->phy_speed == SCODE_BETA);
355
356 if (parent != NULL) {
357 /* min() macro doesn't work here with gcc 3.4 */
358 node->max_speed = parent->max_speed < node->phy_speed ?
359 parent->max_speed : node->phy_speed;
360 node->b_path = parent->b_path && b_path;
361 } else {
362 node->max_speed = node->phy_speed;
363 node->b_path = b_path;
364 }
365
366 fw_node_event(card, node, FW_NODE_CREATED);
367}
368
369void fw_destroy_nodes(struct fw_card *card)
370{
371 unsigned long flags;
372
373 spin_lock_irqsave(&card->lock, flags);
374 card->color++;
375 if (card->local_node != NULL)
376 for_each_fw_node(card, card->local_node, report_lost_node);
377 spin_unlock_irqrestore(&card->lock, flags);
378}
379
380static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
381{
382 struct fw_node *tree;
383 int i;
384
385 tree = node1->ports[port].node;
386 node0->ports[port].node = tree;
387 for (i = 0; i < tree->port_count; i++) {
388 if (tree->ports[i].node == node1) {
389 tree->ports[i].node = node0;
390 break;
391 }
392 }
393}
394
395/**
396 * update_tree - compare the old topology tree for card with the new
397 * one specified by root. Queue the nodes and mark them as either
398 * found, lost or updated. Update the nodes in the card topology tree
399 * as we go.
400 */
401static void
402update_tree(struct fw_card *card, struct fw_node *root)
403{
404 struct list_head list0, list1;
405 struct fw_node *node0, *node1;
406 int i, event;
407
408 INIT_LIST_HEAD(&list0);
409 list_add_tail(&card->local_node->link, &list0);
410 INIT_LIST_HEAD(&list1);
411 list_add_tail(&root->link, &list1);
412
413 node0 = fw_node(list0.next);
414 node1 = fw_node(list1.next);
415
416 while (&node0->link != &list0) {
417
418 /* assert(node0->port_count == node1->port_count); */
419 if (node0->link_on && !node1->link_on)
420 event = FW_NODE_LINK_OFF;
421 else if (!node0->link_on && node1->link_on)
422 event = FW_NODE_LINK_ON;
423 else
424 event = FW_NODE_UPDATED;
425
426 node0->node_id = node1->node_id;
427 node0->color = card->color;
428 node0->link_on = node1->link_on;
429 node0->initiated_reset = node1->initiated_reset;
430 node0->max_hops = node1->max_hops;
431 node1->color = card->color;
432 fw_node_event(card, node0, event);
433
434 if (card->root_node == node1)
435 card->root_node = node0;
436 if (card->irm_node == node1)
437 card->irm_node = node0;
438
439 for (i = 0; i < node0->port_count; i++) {
440 if (node0->ports[i].node && node1->ports[i].node) {
441 /*
442 * This port didn't change, queue the
443 * connected node for further
444 * investigation.
445 */
446 if (node0->ports[i].node->color == card->color)
447 continue;
448 list_add_tail(&node0->ports[i].node->link,
449 &list0);
450 list_add_tail(&node1->ports[i].node->link,
451 &list1);
452 } else if (node0->ports[i].node) {
453 /*
454 * The nodes connected here were
455 * unplugged; unref the lost nodes and
456 * queue FW_NODE_LOST callbacks for
457 * them.
458 */
459
460 for_each_fw_node(card, node0->ports[i].node,
461 report_lost_node);
462 node0->ports[i].node = NULL;
463 } else if (node1->ports[i].node) {
464 /*
465 * One or more node were connected to
466 * this port. Move the new nodes into
467 * the tree and queue FW_NODE_CREATED
468 * callbacks for them.
469 */
470 move_tree(node0, node1, i);
471 for_each_fw_node(card, node0->ports[i].node,
472 report_found_node);
473 }
474 }
475
476 node0 = fw_node(node0->link.next);
477 node1 = fw_node(node1->link.next);
478 }
479}
480
481static void
482update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count)
483{
484 int node_count;
485
486 card->topology_map[1]++;
487 node_count = (card->root_node->node_id & 0x3f) + 1;
488 card->topology_map[2] = (node_count << 16) | self_id_count;
489 card->topology_map[0] = (self_id_count + 2) << 16;
490 memcpy(&card->topology_map[3], self_ids, self_id_count * 4);
491 fw_compute_block_crc(card->topology_map);
492}
493
494void
495fw_core_handle_bus_reset(struct fw_card *card,
496 int node_id, int generation,
497 int self_id_count, u32 * self_ids)
498{
499 struct fw_node *local_node;
500 unsigned long flags;
501
502 fw_flush_transactions(card);
503
504 spin_lock_irqsave(&card->lock, flags);
505
506 /*
507 * If the new topology has a different self_id_count the topology
508 * changed, either nodes were added or removed. In that case we
509 * reset the IRM reset counter.
510 */
511 if (card->self_id_count != self_id_count)
512 card->bm_retries = 0;
513
514 card->node_id = node_id;
515 card->generation = generation;
516 card->reset_jiffies = jiffies;
517 schedule_delayed_work(&card->work, 0);
518
519 local_node = build_tree(card, self_ids, self_id_count);
520
521 update_topology_map(card, self_ids, self_id_count);
522
523 card->color++;
524
525 if (local_node == NULL) {
526 fw_error("topology build failed\n");
527 /* FIXME: We need to issue a bus reset in this case. */
528 } else if (card->local_node == NULL) {
529 card->local_node = local_node;
530 for_each_fw_node(card, local_node, report_found_node);
531 } else {
532 update_tree(card, local_node);
533 }
534
535 spin_unlock_irqrestore(&card->lock, flags);
536}
537EXPORT_SYMBOL(fw_core_handle_bus_reset);
diff --git a/drivers/firewire/fw-topology.h b/drivers/firewire/fw-topology.h
new file mode 100644
index 000000000000..363b6cbcd0b3
--- /dev/null
+++ b/drivers/firewire/fw-topology.h
@@ -0,0 +1,92 @@
1/*
2 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18
19#ifndef __fw_topology_h
20#define __fw_topology_h
21
22enum {
23 FW_TOPOLOGY_A = 0x01,
24 FW_TOPOLOGY_B = 0x02,
25 FW_TOPOLOGY_MIXED = 0x03,
26};
27
28enum {
29 FW_NODE_CREATED = 0x00,
30 FW_NODE_UPDATED = 0x01,
31 FW_NODE_DESTROYED = 0x02,
32 FW_NODE_LINK_ON = 0x03,
33 FW_NODE_LINK_OFF = 0x04,
34};
35
36struct fw_port {
37 struct fw_node *node;
38 unsigned speed : 3; /* S100, S200, ... S3200 */
39};
40
41struct fw_node {
42 u16 node_id;
43 u8 color;
44 u8 port_count;
45 unsigned link_on : 1;
46 unsigned initiated_reset : 1;
47 unsigned b_path : 1;
48 u8 phy_speed : 3; /* As in the self ID packet. */
49 u8 max_speed : 5; /* Minimum of all phy-speeds and port speeds on
50 * the path from the local node to this node. */
51 u8 max_depth : 4; /* Maximum depth to any leaf node */
52 u8 max_hops : 4; /* Max hops in this sub tree */
53 atomic_t ref_count;
54
55 /* For serializing node topology into a list. */
56 struct list_head link;
57
58 /* Upper layer specific data. */
59 void *data;
60
61 struct fw_port ports[0];
62};
63
64static inline struct fw_node *
65fw_node(struct list_head *l)
66{
67 return list_entry(l, struct fw_node, link);
68}
69
70static inline struct fw_node *
71fw_node_get(struct fw_node *node)
72{
73 atomic_inc(&node->ref_count);
74
75 return node;
76}
77
78static inline void
79fw_node_put(struct fw_node *node)
80{
81 if (atomic_dec_and_test(&node->ref_count))
82 kfree(node);
83}
84
85void
86fw_destroy_nodes(struct fw_card *card);
87
88int
89fw_compute_block_crc(u32 *block);
90
91
92#endif /* __fw_topology_h */
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
new file mode 100644
index 000000000000..80d0121463d0
--- /dev/null
+++ b/drivers/firewire/fw-transaction.c
@@ -0,0 +1,910 @@
1/*
2 * Core IEEE1394 transaction logic
3 *
4 * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/pci.h>
26#include <linux/delay.h>
27#include <linux/poll.h>
28#include <linux/list.h>
29#include <linux/kthread.h>
30#include <asm/uaccess.h>
31#include <asm/semaphore.h>
32
33#include "fw-transaction.h"
34#include "fw-topology.h"
35#include "fw-device.h"
36
37#define HEADER_PRI(pri) ((pri) << 0)
38#define HEADER_TCODE(tcode) ((tcode) << 4)
39#define HEADER_RETRY(retry) ((retry) << 8)
40#define HEADER_TLABEL(tlabel) ((tlabel) << 10)
41#define HEADER_DESTINATION(destination) ((destination) << 16)
42#define HEADER_SOURCE(source) ((source) << 16)
43#define HEADER_RCODE(rcode) ((rcode) << 12)
44#define HEADER_OFFSET_HIGH(offset_high) ((offset_high) << 0)
45#define HEADER_DATA_LENGTH(length) ((length) << 16)
46#define HEADER_EXTENDED_TCODE(tcode) ((tcode) << 0)
47
48#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
49#define HEADER_GET_TLABEL(q) (((q) >> 10) & 0x3f)
50#define HEADER_GET_RCODE(q) (((q) >> 12) & 0x0f)
51#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
52#define HEADER_GET_SOURCE(q) (((q) >> 16) & 0xffff)
53#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
54#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
55#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
56
57#define PHY_CONFIG_GAP_COUNT(gap_count) (((gap_count) << 16) | (1 << 22))
58#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
59#define PHY_IDENTIFIER(id) ((id) << 30)
60
61static int
62close_transaction(struct fw_transaction *transaction,
63 struct fw_card *card, int rcode,
64 u32 *payload, size_t length)
65{
66 struct fw_transaction *t;
67 unsigned long flags;
68
69 spin_lock_irqsave(&card->lock, flags);
70 list_for_each_entry(t, &card->transaction_list, link) {
71 if (t == transaction) {
72 list_del(&t->link);
73 card->tlabel_mask &= ~(1 << t->tlabel);
74 break;
75 }
76 }
77 spin_unlock_irqrestore(&card->lock, flags);
78
79 if (&t->link != &card->transaction_list) {
80 t->callback(card, rcode, payload, length, t->callback_data);
81 return 0;
82 }
83
84 return -ENOENT;
85}
86
87/*
88 * Only valid for transactions that are potentially pending (ie have
89 * been sent).
90 */
91int
92fw_cancel_transaction(struct fw_card *card,
93 struct fw_transaction *transaction)
94{
95 /*
96 * Cancel the packet transmission if it's still queued. That
97 * will call the packet transmission callback which cancels
98 * the transaction.
99 */
100
101 if (card->driver->cancel_packet(card, &transaction->packet) == 0)
102 return 0;
103
104 /*
105 * If the request packet has already been sent, we need to see
106 * if the transaction is still pending and remove it in that case.
107 */
108
109 return close_transaction(transaction, card, RCODE_CANCELLED, NULL, 0);
110}
111EXPORT_SYMBOL(fw_cancel_transaction);
112
113static void
114transmit_complete_callback(struct fw_packet *packet,
115 struct fw_card *card, int status)
116{
117 struct fw_transaction *t =
118 container_of(packet, struct fw_transaction, packet);
119
120 switch (status) {
121 case ACK_COMPLETE:
122 close_transaction(t, card, RCODE_COMPLETE, NULL, 0);
123 break;
124 case ACK_PENDING:
125 t->timestamp = packet->timestamp;
126 break;
127 case ACK_BUSY_X:
128 case ACK_BUSY_A:
129 case ACK_BUSY_B:
130 close_transaction(t, card, RCODE_BUSY, NULL, 0);
131 break;
132 case ACK_DATA_ERROR:
133 close_transaction(t, card, RCODE_DATA_ERROR, NULL, 0);
134 break;
135 case ACK_TYPE_ERROR:
136 close_transaction(t, card, RCODE_TYPE_ERROR, NULL, 0);
137 break;
138 default:
139 /*
140 * In this case the ack is really a juju specific
141 * rcode, so just forward that to the callback.
142 */
143 close_transaction(t, card, status, NULL, 0);
144 break;
145 }
146}
147
148static void
149fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
150 int node_id, int source_id, int generation, int speed,
151 unsigned long long offset, void *payload, size_t length)
152{
153 int ext_tcode;
154
155 if (tcode > 0x10) {
156 ext_tcode = tcode - 0x10;
157 tcode = TCODE_LOCK_REQUEST;
158 } else
159 ext_tcode = 0;
160
161 packet->header[0] =
162 HEADER_RETRY(RETRY_X) |
163 HEADER_TLABEL(tlabel) |
164 HEADER_TCODE(tcode) |
165 HEADER_DESTINATION(node_id);
166 packet->header[1] =
167 HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id);
168 packet->header[2] =
169 offset;
170
171 switch (tcode) {
172 case TCODE_WRITE_QUADLET_REQUEST:
173 packet->header[3] = *(u32 *)payload;
174 packet->header_length = 16;
175 packet->payload_length = 0;
176 break;
177
178 case TCODE_LOCK_REQUEST:
179 case TCODE_WRITE_BLOCK_REQUEST:
180 packet->header[3] =
181 HEADER_DATA_LENGTH(length) |
182 HEADER_EXTENDED_TCODE(ext_tcode);
183 packet->header_length = 16;
184 packet->payload = payload;
185 packet->payload_length = length;
186 break;
187
188 case TCODE_READ_QUADLET_REQUEST:
189 packet->header_length = 12;
190 packet->payload_length = 0;
191 break;
192
193 case TCODE_READ_BLOCK_REQUEST:
194 packet->header[3] =
195 HEADER_DATA_LENGTH(length) |
196 HEADER_EXTENDED_TCODE(ext_tcode);
197 packet->header_length = 16;
198 packet->payload_length = 0;
199 break;
200 }
201
202 packet->speed = speed;
203 packet->generation = generation;
204 packet->ack = 0;
205}
206
207/**
208 * This function provides low-level access to the IEEE1394 transaction
209 * logic. Most C programs would use either fw_read(), fw_write() or
210 * fw_lock() instead - those function are convenience wrappers for
211 * this function. The fw_send_request() function is primarily
212 * provided as a flexible, one-stop entry point for languages bindings
213 * and protocol bindings.
214 *
215 * FIXME: Document this function further, in particular the possible
216 * values for rcode in the callback. In short, we map ACK_COMPLETE to
217 * RCODE_COMPLETE, internal errors set errno and set rcode to
218 * RCODE_SEND_ERROR (which is out of range for standard ieee1394
219 * rcodes). All other rcodes are forwarded unchanged. For all
220 * errors, payload is NULL, length is 0.
221 *
222 * Can not expect the callback to be called before the function
223 * returns, though this does happen in some cases (ACK_COMPLETE and
224 * errors).
225 *
226 * The payload is only used for write requests and must not be freed
227 * until the callback has been called.
228 *
229 * @param card the card from which to send the request
230 * @param tcode the tcode for this transaction. Do not use
231 * TCODE_LOCK_REQUEST directly, insted use TCODE_LOCK_MASK_SWAP
232 * etc. to specify tcode and ext_tcode.
233 * @param node_id the destination node ID (bus ID and PHY ID concatenated)
234 * @param generation the generation for which node_id is valid
235 * @param speed the speed to use for sending the request
236 * @param offset the 48 bit offset on the destination node
237 * @param payload the data payload for the request subaction
238 * @param length the length in bytes of the data to read
239 * @param callback function to be called when the transaction is completed
240 * @param callback_data pointer to arbitrary data, which will be
241 * passed to the callback
242 */
243void
244fw_send_request(struct fw_card *card, struct fw_transaction *t,
245 int tcode, int node_id, int generation, int speed,
246 unsigned long long offset,
247 void *payload, size_t length,
248 fw_transaction_callback_t callback, void *callback_data)
249{
250 unsigned long flags;
251 int tlabel, source;
252
253 /*
254 * Bump the flush timer up 100ms first of all so we
255 * don't race with a flush timer callback.
256 */
257
258 mod_timer(&card->flush_timer, jiffies + DIV_ROUND_UP(HZ, 10));
259
260 /*
261 * Allocate tlabel from the bitmap and put the transaction on
262 * the list while holding the card spinlock.
263 */
264
265 spin_lock_irqsave(&card->lock, flags);
266
267 source = card->node_id;
268 tlabel = card->current_tlabel;
269 if (card->tlabel_mask & (1 << tlabel)) {
270 spin_unlock_irqrestore(&card->lock, flags);
271 callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
272 return;
273 }
274
275 card->current_tlabel = (card->current_tlabel + 1) & 0x1f;
276 card->tlabel_mask |= (1 << tlabel);
277
278 list_add_tail(&t->link, &card->transaction_list);
279
280 spin_unlock_irqrestore(&card->lock, flags);
281
282 /* Initialize rest of transaction, fill out packet and send it. */
283 t->node_id = node_id;
284 t->tlabel = tlabel;
285 t->callback = callback;
286 t->callback_data = callback_data;
287
288 fw_fill_request(&t->packet, tcode, t->tlabel,
289 node_id, source, generation,
290 speed, offset, payload, length);
291 t->packet.callback = transmit_complete_callback;
292
293 card->driver->send_request(card, &t->packet);
294}
295EXPORT_SYMBOL(fw_send_request);
296
297static void
298transmit_phy_packet_callback(struct fw_packet *packet,
299 struct fw_card *card, int status)
300{
301 kfree(packet);
302}
303
304static void send_phy_packet(struct fw_card *card, u32 data, int generation)
305{
306 struct fw_packet *packet;
307
308 packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
309 if (packet == NULL)
310 return;
311
312 packet->header[0] = data;
313 packet->header[1] = ~data;
314 packet->header_length = 8;
315 packet->payload_length = 0;
316 packet->speed = SCODE_100;
317 packet->generation = generation;
318 packet->callback = transmit_phy_packet_callback;
319
320 card->driver->send_request(card, packet);
321}
322
323void fw_send_phy_config(struct fw_card *card,
324 int node_id, int generation, int gap_count)
325{
326 u32 q;
327
328 q = PHY_IDENTIFIER(PHY_PACKET_CONFIG) |
329 PHY_CONFIG_ROOT_ID(node_id) |
330 PHY_CONFIG_GAP_COUNT(gap_count);
331
332 send_phy_packet(card, q, generation);
333}
334
335void fw_flush_transactions(struct fw_card *card)
336{
337 struct fw_transaction *t, *next;
338 struct list_head list;
339 unsigned long flags;
340
341 INIT_LIST_HEAD(&list);
342 spin_lock_irqsave(&card->lock, flags);
343 list_splice_init(&card->transaction_list, &list);
344 card->tlabel_mask = 0;
345 spin_unlock_irqrestore(&card->lock, flags);
346
347 list_for_each_entry_safe(t, next, &list, link) {
348 card->driver->cancel_packet(card, &t->packet);
349
350 /*
351 * At this point cancel_packet will never call the
352 * transaction callback, since we just took all the
353 * transactions out of the list. So do it here.
354 */
355 t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
356 }
357}
358
359static struct fw_address_handler *
360lookup_overlapping_address_handler(struct list_head *list,
361 unsigned long long offset, size_t length)
362{
363 struct fw_address_handler *handler;
364
365 list_for_each_entry(handler, list, link) {
366 if (handler->offset < offset + length &&
367 offset < handler->offset + handler->length)
368 return handler;
369 }
370
371 return NULL;
372}
373
374static struct fw_address_handler *
375lookup_enclosing_address_handler(struct list_head *list,
376 unsigned long long offset, size_t length)
377{
378 struct fw_address_handler *handler;
379
380 list_for_each_entry(handler, list, link) {
381 if (handler->offset <= offset &&
382 offset + length <= handler->offset + handler->length)
383 return handler;
384 }
385
386 return NULL;
387}
388
389static DEFINE_SPINLOCK(address_handler_lock);
390static LIST_HEAD(address_handler_list);
391
392const struct fw_address_region fw_low_memory_region =
393 { .start = 0x000000000000ULL, .end = 0x000100000000ULL, };
394const struct fw_address_region fw_high_memory_region =
395 { .start = 0x000100000000ULL, .end = 0xffffe0000000ULL, };
396const struct fw_address_region fw_private_region =
397 { .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, };
398const struct fw_address_region fw_csr_region =
399 { .start = 0xfffff0000000ULL, .end = 0xfffff0000800ULL, };
400const struct fw_address_region fw_unit_space_region =
401 { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
402EXPORT_SYMBOL(fw_low_memory_region);
403EXPORT_SYMBOL(fw_high_memory_region);
404EXPORT_SYMBOL(fw_private_region);
405EXPORT_SYMBOL(fw_csr_region);
406EXPORT_SYMBOL(fw_unit_space_region);
407
408/**
409 * Allocate a range of addresses in the node space of the OHCI
410 * controller. When a request is received that falls within the
411 * specified address range, the specified callback is invoked. The
412 * parameters passed to the callback give the details of the
413 * particular request
414 */
415int
416fw_core_add_address_handler(struct fw_address_handler *handler,
417 const struct fw_address_region *region)
418{
419 struct fw_address_handler *other;
420 unsigned long flags;
421 int ret = -EBUSY;
422
423 spin_lock_irqsave(&address_handler_lock, flags);
424
425 handler->offset = region->start;
426 while (handler->offset + handler->length <= region->end) {
427 other =
428 lookup_overlapping_address_handler(&address_handler_list,
429 handler->offset,
430 handler->length);
431 if (other != NULL) {
432 handler->offset += other->length;
433 } else {
434 list_add_tail(&handler->link, &address_handler_list);
435 ret = 0;
436 break;
437 }
438 }
439
440 spin_unlock_irqrestore(&address_handler_lock, flags);
441
442 return ret;
443}
444EXPORT_SYMBOL(fw_core_add_address_handler);
445
446/**
447 * Deallocate a range of addresses allocated with fw_allocate. This
448 * will call the associated callback one last time with a the special
449 * tcode TCODE_DEALLOCATE, to let the client destroy the registered
450 * callback data. For convenience, the callback parameters offset and
451 * length are set to the start and the length respectively for the
452 * deallocated region, payload is set to NULL.
453 */
454void fw_core_remove_address_handler(struct fw_address_handler *handler)
455{
456 unsigned long flags;
457
458 spin_lock_irqsave(&address_handler_lock, flags);
459 list_del(&handler->link);
460 spin_unlock_irqrestore(&address_handler_lock, flags);
461}
462EXPORT_SYMBOL(fw_core_remove_address_handler);
463
464struct fw_request {
465 struct fw_packet response;
466 u32 request_header[4];
467 int ack;
468 u32 length;
469 u32 data[0];
470};
471
472static void
473free_response_callback(struct fw_packet *packet,
474 struct fw_card *card, int status)
475{
476 struct fw_request *request;
477
478 request = container_of(packet, struct fw_request, response);
479 kfree(request);
480}
481
482void
483fw_fill_response(struct fw_packet *response, u32 *request_header,
484 int rcode, void *payload, size_t length)
485{
486 int tcode, tlabel, extended_tcode, source, destination;
487
488 tcode = HEADER_GET_TCODE(request_header[0]);
489 tlabel = HEADER_GET_TLABEL(request_header[0]);
490 source = HEADER_GET_DESTINATION(request_header[0]);
491 destination = HEADER_GET_SOURCE(request_header[1]);
492 extended_tcode = HEADER_GET_EXTENDED_TCODE(request_header[3]);
493
494 response->header[0] =
495 HEADER_RETRY(RETRY_1) |
496 HEADER_TLABEL(tlabel) |
497 HEADER_DESTINATION(destination);
498 response->header[1] =
499 HEADER_SOURCE(source) |
500 HEADER_RCODE(rcode);
501 response->header[2] = 0;
502
503 switch (tcode) {
504 case TCODE_WRITE_QUADLET_REQUEST:
505 case TCODE_WRITE_BLOCK_REQUEST:
506 response->header[0] |= HEADER_TCODE(TCODE_WRITE_RESPONSE);
507 response->header_length = 12;
508 response->payload_length = 0;
509 break;
510
511 case TCODE_READ_QUADLET_REQUEST:
512 response->header[0] |=
513 HEADER_TCODE(TCODE_READ_QUADLET_RESPONSE);
514 if (payload != NULL)
515 response->header[3] = *(u32 *)payload;
516 else
517 response->header[3] = 0;
518 response->header_length = 16;
519 response->payload_length = 0;
520 break;
521
522 case TCODE_READ_BLOCK_REQUEST:
523 case TCODE_LOCK_REQUEST:
524 response->header[0] |= HEADER_TCODE(tcode + 2);
525 response->header[3] =
526 HEADER_DATA_LENGTH(length) |
527 HEADER_EXTENDED_TCODE(extended_tcode);
528 response->header_length = 16;
529 response->payload = payload;
530 response->payload_length = length;
531 break;
532
533 default:
534 BUG();
535 return;
536 }
537}
538EXPORT_SYMBOL(fw_fill_response);
539
540static struct fw_request *
541allocate_request(struct fw_packet *p)
542{
543 struct fw_request *request;
544 u32 *data, length;
545 int request_tcode, t;
546
547 request_tcode = HEADER_GET_TCODE(p->header[0]);
548 switch (request_tcode) {
549 case TCODE_WRITE_QUADLET_REQUEST:
550 data = &p->header[3];
551 length = 4;
552 break;
553
554 case TCODE_WRITE_BLOCK_REQUEST:
555 case TCODE_LOCK_REQUEST:
556 data = p->payload;
557 length = HEADER_GET_DATA_LENGTH(p->header[3]);
558 break;
559
560 case TCODE_READ_QUADLET_REQUEST:
561 data = NULL;
562 length = 4;
563 break;
564
565 case TCODE_READ_BLOCK_REQUEST:
566 data = NULL;
567 length = HEADER_GET_DATA_LENGTH(p->header[3]);
568 break;
569
570 default:
571 BUG();
572 return NULL;
573 }
574
575 request = kmalloc(sizeof(*request) + length, GFP_ATOMIC);
576 if (request == NULL)
577 return NULL;
578
579 t = (p->timestamp & 0x1fff) + 4000;
580 if (t >= 8000)
581 t = (p->timestamp & ~0x1fff) + 0x2000 + t - 8000;
582 else
583 t = (p->timestamp & ~0x1fff) + t;
584
585 request->response.speed = p->speed;
586 request->response.timestamp = t;
587 request->response.generation = p->generation;
588 request->response.ack = 0;
589 request->response.callback = free_response_callback;
590 request->ack = p->ack;
591 request->length = length;
592 if (data)
593 memcpy(request->data, data, length);
594
595 memcpy(request->request_header, p->header, sizeof(p->header));
596
597 return request;
598}
599
600void
601fw_send_response(struct fw_card *card, struct fw_request *request, int rcode)
602{
603 /*
604 * Broadcast packets are reported as ACK_COMPLETE, so this
605 * check is sufficient to ensure we don't send response to
606 * broadcast packets or posted writes.
607 */
608 if (request->ack != ACK_PENDING)
609 return;
610
611 if (rcode == RCODE_COMPLETE)
612 fw_fill_response(&request->response, request->request_header,
613 rcode, request->data, request->length);
614 else
615 fw_fill_response(&request->response, request->request_header,
616 rcode, NULL, 0);
617
618 card->driver->send_response(card, &request->response);
619}
620EXPORT_SYMBOL(fw_send_response);
621
622void
623fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
624{
625 struct fw_address_handler *handler;
626 struct fw_request *request;
627 unsigned long long offset;
628 unsigned long flags;
629 int tcode, destination, source;
630
631 if (p->payload_length > 2048) {
632 /* FIXME: send error response. */
633 return;
634 }
635
636 if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
637 return;
638
639 request = allocate_request(p);
640 if (request == NULL) {
641 /* FIXME: send statically allocated busy packet. */
642 return;
643 }
644
645 offset =
646 ((unsigned long long)
647 HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | p->header[2];
648 tcode = HEADER_GET_TCODE(p->header[0]);
649 destination = HEADER_GET_DESTINATION(p->header[0]);
650 source = HEADER_GET_SOURCE(p->header[0]);
651
652 spin_lock_irqsave(&address_handler_lock, flags);
653 handler = lookup_enclosing_address_handler(&address_handler_list,
654 offset, request->length);
655 spin_unlock_irqrestore(&address_handler_lock, flags);
656
657 /*
658 * FIXME: lookup the fw_node corresponding to the sender of
659 * this request and pass that to the address handler instead
660 * of the node ID. We may also want to move the address
661 * allocations to fw_node so we only do this callback if the
662 * upper layers registered it for this node.
663 */
664
665 if (handler == NULL)
666 fw_send_response(card, request, RCODE_ADDRESS_ERROR);
667 else
668 handler->address_callback(card, request,
669 tcode, destination, source,
670 p->generation, p->speed, offset,
671 request->data, request->length,
672 handler->callback_data);
673}
674EXPORT_SYMBOL(fw_core_handle_request);
675
676void
677fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
678{
679 struct fw_transaction *t;
680 unsigned long flags;
681 u32 *data;
682 size_t data_length;
683 int tcode, tlabel, destination, source, rcode;
684
685 tcode = HEADER_GET_TCODE(p->header[0]);
686 tlabel = HEADER_GET_TLABEL(p->header[0]);
687 destination = HEADER_GET_DESTINATION(p->header[0]);
688 source = HEADER_GET_SOURCE(p->header[1]);
689 rcode = HEADER_GET_RCODE(p->header[1]);
690
691 spin_lock_irqsave(&card->lock, flags);
692 list_for_each_entry(t, &card->transaction_list, link) {
693 if (t->node_id == source && t->tlabel == tlabel) {
694 list_del(&t->link);
695 card->tlabel_mask &= ~(1 << t->tlabel);
696 break;
697 }
698 }
699 spin_unlock_irqrestore(&card->lock, flags);
700
701 if (&t->link == &card->transaction_list) {
702 fw_notify("Unsolicited response (source %x, tlabel %x)\n",
703 source, tlabel);
704 return;
705 }
706
707 /*
708 * FIXME: sanity check packet, is length correct, does tcodes
709 * and addresses match.
710 */
711
712 switch (tcode) {
713 case TCODE_READ_QUADLET_RESPONSE:
714 data = (u32 *) &p->header[3];
715 data_length = 4;
716 break;
717
718 case TCODE_WRITE_RESPONSE:
719 data = NULL;
720 data_length = 0;
721 break;
722
723 case TCODE_READ_BLOCK_RESPONSE:
724 case TCODE_LOCK_RESPONSE:
725 data = p->payload;
726 data_length = HEADER_GET_DATA_LENGTH(p->header[3]);
727 break;
728
729 default:
730 /* Should never happen, this is just to shut up gcc. */
731 data = NULL;
732 data_length = 0;
733 break;
734 }
735
736 t->callback(card, rcode, data, data_length, t->callback_data);
737}
738EXPORT_SYMBOL(fw_core_handle_response);
739
740const struct fw_address_region topology_map_region =
741 { .start = 0xfffff0001000ull, .end = 0xfffff0001400ull, };
742
743static void
744handle_topology_map(struct fw_card *card, struct fw_request *request,
745 int tcode, int destination, int source,
746 int generation, int speed,
747 unsigned long long offset,
748 void *payload, size_t length, void *callback_data)
749{
750 int i, start, end;
751 u32 *map;
752
753 if (!TCODE_IS_READ_REQUEST(tcode)) {
754 fw_send_response(card, request, RCODE_TYPE_ERROR);
755 return;
756 }
757
758 if ((offset & 3) > 0 || (length & 3) > 0) {
759 fw_send_response(card, request, RCODE_ADDRESS_ERROR);
760 return;
761 }
762
763 start = (offset - topology_map_region.start) / 4;
764 end = start + length / 4;
765 map = payload;
766
767 for (i = 0; i < length / 4; i++)
768 map[i] = cpu_to_be32(card->topology_map[start + i]);
769
770 fw_send_response(card, request, RCODE_COMPLETE);
771}
772
773static struct fw_address_handler topology_map = {
774 .length = 0x200,
775 .address_callback = handle_topology_map,
776};
777
778const struct fw_address_region registers_region =
779 { .start = 0xfffff0000000ull, .end = 0xfffff0000400ull, };
780
781static void
782handle_registers(struct fw_card *card, struct fw_request *request,
783 int tcode, int destination, int source,
784 int generation, int speed,
785 unsigned long long offset,
786 void *payload, size_t length, void *callback_data)
787{
788 int reg = offset - CSR_REGISTER_BASE;
789 unsigned long long bus_time;
790 __be32 *data = payload;
791
792 switch (reg) {
793 case CSR_CYCLE_TIME:
794 case CSR_BUS_TIME:
795 if (!TCODE_IS_READ_REQUEST(tcode) || length != 4) {
796 fw_send_response(card, request, RCODE_TYPE_ERROR);
797 break;
798 }
799
800 bus_time = card->driver->get_bus_time(card);
801 if (reg == CSR_CYCLE_TIME)
802 *data = cpu_to_be32(bus_time);
803 else
804 *data = cpu_to_be32(bus_time >> 25);
805 fw_send_response(card, request, RCODE_COMPLETE);
806 break;
807
808 case CSR_BUS_MANAGER_ID:
809 case CSR_BANDWIDTH_AVAILABLE:
810 case CSR_CHANNELS_AVAILABLE_HI:
811 case CSR_CHANNELS_AVAILABLE_LO:
812 /*
813 * FIXME: these are handled by the OHCI hardware and
814 * the stack never sees these request. If we add
815 * support for a new type of controller that doesn't
816 * handle this in hardware we need to deal with these
817 * transactions.
818 */
819 BUG();
820 break;
821
822 case CSR_BUSY_TIMEOUT:
823 /* FIXME: Implement this. */
824 default:
825 fw_send_response(card, request, RCODE_ADDRESS_ERROR);
826 break;
827 }
828}
829
830static struct fw_address_handler registers = {
831 .length = 0x400,
832 .address_callback = handle_registers,
833};
834
835MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
836MODULE_DESCRIPTION("Core IEEE1394 transaction logic");
837MODULE_LICENSE("GPL");
838
839static const u32 vendor_textual_descriptor[] = {
840 /* textual descriptor leaf () */
841 0x00060000,
842 0x00000000,
843 0x00000000,
844 0x4c696e75, /* L i n u */
845 0x78204669, /* x F i */
846 0x72657769, /* r e w i */
847 0x72650000, /* r e */
848};
849
850static const u32 model_textual_descriptor[] = {
851 /* model descriptor leaf () */
852 0x00030000,
853 0x00000000,
854 0x00000000,
855 0x4a756a75, /* J u j u */
856};
857
858static struct fw_descriptor vendor_id_descriptor = {
859 .length = ARRAY_SIZE(vendor_textual_descriptor),
860 .immediate = 0x03d00d1e,
861 .key = 0x81000000,
862 .data = vendor_textual_descriptor,
863};
864
865static struct fw_descriptor model_id_descriptor = {
866 .length = ARRAY_SIZE(model_textual_descriptor),
867 .immediate = 0x17000001,
868 .key = 0x81000000,
869 .data = model_textual_descriptor,
870};
871
872static int __init fw_core_init(void)
873{
874 int retval;
875
876 retval = bus_register(&fw_bus_type);
877 if (retval < 0)
878 return retval;
879
880 fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
881 if (fw_cdev_major < 0) {
882 bus_unregister(&fw_bus_type);
883 return fw_cdev_major;
884 }
885
886 retval = fw_core_add_address_handler(&topology_map,
887 &topology_map_region);
888 BUG_ON(retval < 0);
889
890 retval = fw_core_add_address_handler(&registers,
891 &registers_region);
892 BUG_ON(retval < 0);
893
894 /* Add the vendor textual descriptor. */
895 retval = fw_core_add_descriptor(&vendor_id_descriptor);
896 BUG_ON(retval < 0);
897 retval = fw_core_add_descriptor(&model_id_descriptor);
898 BUG_ON(retval < 0);
899
900 return 0;
901}
902
903static void __exit fw_core_cleanup(void)
904{
905 unregister_chrdev(fw_cdev_major, "firewire");
906 bus_unregister(&fw_bus_type);
907}
908
909module_init(fw_core_init);
910module_exit(fw_core_cleanup);
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h
new file mode 100644
index 000000000000..acdc3be38c61
--- /dev/null
+++ b/drivers/firewire/fw-transaction.h
@@ -0,0 +1,458 @@
1/*
2 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18
19#ifndef __fw_transaction_h
20#define __fw_transaction_h
21
22#include <linux/device.h>
23#include <linux/timer.h>
24#include <linux/interrupt.h>
25#include <linux/list.h>
26#include <linux/fs.h>
27#include <linux/dma-mapping.h>
28#include <linux/firewire-constants.h>
29
30#define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
31#define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0)
32#define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0)
33#define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0)
34#define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4)
35#define TCODE_HAS_RESPONSE_DATA(tcode) (((tcode) & 12) != 0)
36
37#define LOCAL_BUS 0xffc0
38
39#define SELFID_PORT_CHILD 0x3
40#define SELFID_PORT_PARENT 0x2
41#define SELFID_PORT_NCONN 0x1
42#define SELFID_PORT_NONE 0x0
43
44#define PHY_PACKET_CONFIG 0x0
45#define PHY_PACKET_LINK_ON 0x1
46#define PHY_PACKET_SELF_ID 0x2
47
48/* Bit fields _within_ the PHY registers. */
49#define PHY_LINK_ACTIVE 0x80
50#define PHY_CONTENDER 0x40
51#define PHY_BUS_RESET 0x40
52#define PHY_BUS_SHORT_RESET 0x40
53
54#define CSR_REGISTER_BASE 0xfffff0000000ULL
55
56/* register offsets relative to CSR_REGISTER_BASE */
57#define CSR_STATE_CLEAR 0x0
58#define CSR_STATE_SET 0x4
59#define CSR_NODE_IDS 0x8
60#define CSR_RESET_START 0xc
61#define CSR_SPLIT_TIMEOUT_HI 0x18
62#define CSR_SPLIT_TIMEOUT_LO 0x1c
63#define CSR_CYCLE_TIME 0x200
64#define CSR_BUS_TIME 0x204
65#define CSR_BUSY_TIMEOUT 0x210
66#define CSR_BUS_MANAGER_ID 0x21c
67#define CSR_BANDWIDTH_AVAILABLE 0x220
68#define CSR_CHANNELS_AVAILABLE 0x224
69#define CSR_CHANNELS_AVAILABLE_HI 0x224
70#define CSR_CHANNELS_AVAILABLE_LO 0x228
71#define CSR_BROADCAST_CHANNEL 0x234
72#define CSR_CONFIG_ROM 0x400
73#define CSR_CONFIG_ROM_END 0x800
74#define CSR_FCP_COMMAND 0xB00
75#define CSR_FCP_RESPONSE 0xD00
76#define CSR_FCP_END 0xF00
77#define CSR_TOPOLOGY_MAP 0x1000
78#define CSR_TOPOLOGY_MAP_END 0x1400
79#define CSR_SPEED_MAP 0x2000
80#define CSR_SPEED_MAP_END 0x3000
81
82#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
83#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
84#define fw_debug(s, args...) printk(KERN_DEBUG KBUILD_MODNAME ": " s, ## args)
85
86static inline void
87fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
88{
89 u32 *dst = _dst;
90 u32 *src = _src;
91 int i;
92
93 for (i = 0; i < size / 4; i++)
94 dst[i] = cpu_to_be32(src[i]);
95}
96
97static inline void
98fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
99{
100 fw_memcpy_from_be32(_dst, _src, size);
101}
102
103struct fw_card;
104struct fw_packet;
105struct fw_node;
106struct fw_request;
107
108struct fw_descriptor {
109 struct list_head link;
110 size_t length;
111 u32 immediate;
112 u32 key;
113 const u32 *data;
114};
115
116int fw_core_add_descriptor(struct fw_descriptor *desc);
117void fw_core_remove_descriptor(struct fw_descriptor *desc);
118
119typedef void (*fw_packet_callback_t)(struct fw_packet *packet,
120 struct fw_card *card, int status);
121
122typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
123 void *data,
124 size_t length,
125 void *callback_data);
126
127typedef void (*fw_address_callback_t)(struct fw_card *card,
128 struct fw_request *request,
129 int tcode, int destination, int source,
130 int generation, int speed,
131 unsigned long long offset,
132 void *data, size_t length,
133 void *callback_data);
134
135typedef void (*fw_bus_reset_callback_t)(struct fw_card *handle,
136 int node_id, int generation,
137 u32 *self_ids,
138 int self_id_count,
139 void *callback_data);
140
141struct fw_packet {
142 int speed;
143 int generation;
144 u32 header[4];
145 size_t header_length;
146 void *payload;
147 size_t payload_length;
148 u32 timestamp;
149
150 /*
151 * This callback is called when the packet transmission has
152 * completed; for successful transmission, the status code is
153 * the ack received from the destination, otherwise it's a
154 * negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO.
155 * The callback can be called from tasklet context and thus
156 * must never block.
157 */
158 fw_packet_callback_t callback;
159 int ack;
160 struct list_head link;
161 void *driver_data;
162};
163
164struct fw_transaction {
165 int node_id; /* The generation is implied; it is always the current. */
166 int tlabel;
167 int timestamp;
168 struct list_head link;
169
170 struct fw_packet packet;
171
172 /*
173 * The data passed to the callback is valid only during the
174 * callback.
175 */
176 fw_transaction_callback_t callback;
177 void *callback_data;
178};
179
180static inline struct fw_packet *
181fw_packet(struct list_head *l)
182{
183 return list_entry(l, struct fw_packet, link);
184}
185
186struct fw_address_handler {
187 u64 offset;
188 size_t length;
189 fw_address_callback_t address_callback;
190 void *callback_data;
191 struct list_head link;
192};
193
194
195struct fw_address_region {
196 u64 start;
197 u64 end;
198};
199
200extern const struct fw_address_region fw_low_memory_region;
201extern const struct fw_address_region fw_high_memory_region;
202extern const struct fw_address_region fw_private_region;
203extern const struct fw_address_region fw_csr_region;
204extern const struct fw_address_region fw_unit_space_region;
205
206int fw_core_add_address_handler(struct fw_address_handler *handler,
207 const struct fw_address_region *region);
208void fw_core_remove_address_handler(struct fw_address_handler *handler);
209void fw_fill_response(struct fw_packet *response, u32 *request_header,
210 int rcode, void *payload, size_t length);
211void fw_send_response(struct fw_card *card,
212 struct fw_request *request, int rcode);
213
214extern struct bus_type fw_bus_type;
215
216struct fw_card {
217 const struct fw_card_driver *driver;
218 struct device *device;
219 struct kref kref;
220
221 int node_id;
222 int generation;
223 /* This is the generation used for timestamping incoming requests. */
224 int request_generation;
225 int current_tlabel, tlabel_mask;
226 struct list_head transaction_list;
227 struct timer_list flush_timer;
228 unsigned long reset_jiffies;
229
230 unsigned long long guid;
231 int max_receive;
232 int link_speed;
233 int config_rom_generation;
234
235 /*
236 * We need to store up to 4 self ID for a maximum of 63
237 * devices plus 3 words for the topology map header.
238 */
239 int self_id_count;
240 u32 topology_map[252 + 3];
241
242 spinlock_t lock; /* Take this lock when handling the lists in
243 * this struct. */
244 struct fw_node *local_node;
245 struct fw_node *root_node;
246 struct fw_node *irm_node;
247 int color;
248 int gap_count;
249 int topology_type;
250
251 int index;
252
253 struct list_head link;
254
255 /* Work struct for BM duties. */
256 struct delayed_work work;
257 int bm_retries;
258 int bm_generation;
259};
260
261struct fw_card *fw_card_get(struct fw_card *card);
262void fw_card_put(struct fw_card *card);
263
264/*
265 * The iso packet format allows for an immediate header/payload part
266 * stored in 'header' immediately after the packet info plus an
267 * indirect payload part that is pointer to by the 'payload' field.
268 * Applications can use one or the other or both to implement simple
269 * low-bandwidth streaming (e.g. audio) or more advanced
270 * scatter-gather streaming (e.g. assembling video frame automatically).
271 */
272
273struct fw_iso_packet {
274 u16 payload_length; /* Length of indirect payload. */
275 u32 interrupt : 1; /* Generate interrupt on this packet */
276 u32 skip : 1; /* Set to not send packet at all. */
277 u32 tag : 2;
278 u32 sy : 4;
279 u32 header_length : 8; /* Length of immediate header. */
280 u32 header[0];
281};
282
283#define FW_ISO_CONTEXT_TRANSMIT 0
284#define FW_ISO_CONTEXT_RECEIVE 1
285
286#define FW_ISO_CONTEXT_MATCH_TAG0 1
287#define FW_ISO_CONTEXT_MATCH_TAG1 2
288#define FW_ISO_CONTEXT_MATCH_TAG2 4
289#define FW_ISO_CONTEXT_MATCH_TAG3 8
290#define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15
291
292struct fw_iso_context;
293
294typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
295 u32 cycle,
296 size_t header_length,
297 void *header,
298 void *data);
299
300/*
301 * An iso buffer is just a set of pages mapped for DMA in the
302 * specified direction. Since the pages are to be used for DMA, they
303 * are not mapped into the kernel virtual address space. We store the
304 * DMA address in the page private. The helper function
305 * fw_iso_buffer_map() will map the pages into a given vma.
306 */
307
308struct fw_iso_buffer {
309 enum dma_data_direction direction;
310 struct page **pages;
311 int page_count;
312};
313
314struct fw_iso_context {
315 struct fw_card *card;
316 int type;
317 int channel;
318 int speed;
319 size_t header_size;
320 fw_iso_callback_t callback;
321 void *callback_data;
322};
323
324int
325fw_iso_buffer_init(struct fw_iso_buffer *buffer,
326 struct fw_card *card,
327 int page_count,
328 enum dma_data_direction direction);
329int
330fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
331void
332fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
333
334struct fw_iso_context *
335fw_iso_context_create(struct fw_card *card, int type,
336 int channel, int speed, size_t header_size,
337 fw_iso_callback_t callback, void *callback_data);
338
339void
340fw_iso_context_destroy(struct fw_iso_context *ctx);
341
342int
343fw_iso_context_queue(struct fw_iso_context *ctx,
344 struct fw_iso_packet *packet,
345 struct fw_iso_buffer *buffer,
346 unsigned long payload);
347
348int
349fw_iso_context_start(struct fw_iso_context *ctx,
350 int cycle, int sync, int tags);
351
352int
353fw_iso_context_stop(struct fw_iso_context *ctx);
354
355struct fw_card_driver {
356 const char *name;
357
358 /*
359 * Enable the given card with the given initial config rom.
360 * This function is expected to activate the card, and either
361 * enable the PHY or set the link_on bit and initiate a bus
362 * reset.
363 */
364 int (*enable)(struct fw_card *card, u32 *config_rom, size_t length);
365
366 int (*update_phy_reg)(struct fw_card *card, int address,
367 int clear_bits, int set_bits);
368
369 /*
370 * Update the config rom for an enabled card. This function
371 * should change the config rom that is presented on the bus
372 * an initiate a bus reset.
373 */
374 int (*set_config_rom)(struct fw_card *card,
375 u32 *config_rom, size_t length);
376
377 void (*send_request)(struct fw_card *card, struct fw_packet *packet);
378 void (*send_response)(struct fw_card *card, struct fw_packet *packet);
379 /* Calling cancel is valid once a packet has been submitted. */
380 int (*cancel_packet)(struct fw_card *card, struct fw_packet *packet);
381
382 /*
383 * Allow the specified node ID to do direct DMA out and in of
384 * host memory. The card will disable this for all node when
385 * a bus reset happens, so driver need to reenable this after
386 * bus reset. Returns 0 on success, -ENODEV if the card
387 * doesn't support this, -ESTALE if the generation doesn't
388 * match.
389 */
390 int (*enable_phys_dma)(struct fw_card *card,
391 int node_id, int generation);
392
393 u64 (*get_bus_time)(struct fw_card *card);
394
395 struct fw_iso_context *
396 (*allocate_iso_context)(struct fw_card *card,
397 int type, size_t header_size);
398 void (*free_iso_context)(struct fw_iso_context *ctx);
399
400 int (*start_iso)(struct fw_iso_context *ctx,
401 s32 cycle, u32 sync, u32 tags);
402
403 int (*queue_iso)(struct fw_iso_context *ctx,
404 struct fw_iso_packet *packet,
405 struct fw_iso_buffer *buffer,
406 unsigned long payload);
407
408 int (*stop_iso)(struct fw_iso_context *ctx);
409};
410
411int
412fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
413
414void
415fw_send_request(struct fw_card *card, struct fw_transaction *t,
416 int tcode, int node_id, int generation, int speed,
417 unsigned long long offset,
418 void *data, size_t length,
419 fw_transaction_callback_t callback, void *callback_data);
420
421int fw_cancel_transaction(struct fw_card *card,
422 struct fw_transaction *transaction);
423
424void fw_flush_transactions(struct fw_card *card);
425
426void fw_send_phy_config(struct fw_card *card,
427 int node_id, int generation, int gap_count);
428
429/*
430 * Called by the topology code to inform the device code of node
431 * activity; found, lost, or updated nodes.
432 */
433void
434fw_node_event(struct fw_card *card, struct fw_node *node, int event);
435
436/* API used by card level drivers */
437
438void
439fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
440 struct device *device);
441int
442fw_card_add(struct fw_card *card,
443 u32 max_receive, u32 link_speed, u64 guid);
444
445void
446fw_core_remove_card(struct fw_card *card);
447
448void
449fw_core_handle_bus_reset(struct fw_card *card,
450 int node_id, int generation,
451 int self_id_count, u32 *self_ids);
452void
453fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
454
455void
456fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
457
458#endif /* __fw_transaction_h */
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 3ba3a5221c41..4d1cb5b855d1 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -4,6 +4,7 @@
4 4
5menuconfig HWMON 5menuconfig HWMON
6 tristate "Hardware Monitoring support" 6 tristate "Hardware Monitoring support"
7 depends on HAS_IOMEM
7 default y 8 default y
8 help 9 help
9 Hardware monitoring devices let you monitor the hardware health 10 Hardware monitoring devices let you monitor the hardware health
diff --git a/drivers/hwmon/ams/ams-input.c b/drivers/hwmon/ams/ams-input.c
index 18210164e307..ca7095d96ad0 100644
--- a/drivers/hwmon/ams/ams-input.c
+++ b/drivers/hwmon/ams/ams-input.c
@@ -87,7 +87,7 @@ static void ams_input_enable(void)
87 ams_info.idev->id.vendor = 0; 87 ams_info.idev->id.vendor = 0;
88 ams_info.idev->open = ams_input_open; 88 ams_info.idev->open = ams_input_open;
89 ams_info.idev->close = ams_input_close; 89 ams_info.idev->close = ams_input_close;
90 ams_info.idev->cdev.dev = &ams_info.of_dev->dev; 90 ams_info.idev->dev.parent = &ams_info.of_dev->dev;
91 91
92 input_set_abs_params(ams_info.idev, ABS_X, -50, 50, 3, 0); 92 input_set_abs_params(ams_info.idev, ABS_X, -50, 50, 3, 0);
93 input_set_abs_params(ams_info.idev, ABS_Y, -50, 50, 3, 0); 93 input_set_abs_params(ams_info.idev, ABS_Y, -50, 50, 3, 0);
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index b51c104a28a2..0c160675b3ac 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -1100,7 +1100,7 @@ static int applesmc_create_accelerometer(void)
1100 /* initialize the input class */ 1100 /* initialize the input class */
1101 applesmc_idev->name = "applesmc"; 1101 applesmc_idev->name = "applesmc";
1102 applesmc_idev->id.bustype = BUS_HOST; 1102 applesmc_idev->id.bustype = BUS_HOST;
1103 applesmc_idev->cdev.dev = &pdev->dev; 1103 applesmc_idev->dev.parent = &pdev->dev;
1104 applesmc_idev->evbit[0] = BIT(EV_ABS); 1104 applesmc_idev->evbit[0] = BIT(EV_ABS);
1105 applesmc_idev->open = applesmc_idev_open; 1105 applesmc_idev->open = applesmc_idev_open;
1106 applesmc_idev->close = applesmc_idev_close; 1106 applesmc_idev->close = applesmc_idev_close;
diff --git a/drivers/hwmon/hdaps.c b/drivers/hwmon/hdaps.c
index f82fa2d23f95..e0cf5e6fe5bc 100644
--- a/drivers/hwmon/hdaps.c
+++ b/drivers/hwmon/hdaps.c
@@ -574,7 +574,7 @@ static int __init hdaps_init(void)
574 574
575 /* initialize the input class */ 575 /* initialize the input class */
576 hdaps_idev->name = "hdaps"; 576 hdaps_idev->name = "hdaps";
577 hdaps_idev->cdev.dev = &pdev->dev; 577 hdaps_idev->dev.parent = &pdev->dev;
578 hdaps_idev->evbit[0] = BIT(EV_ABS); 578 hdaps_idev->evbit[0] = BIT(EV_ABS);
579 input_set_abs_params(hdaps_idev, ABS_X, 579 input_set_abs_params(hdaps_idev, ABS_X,
580 -256, 256, HDAPS_INPUT_FUZZ, HDAPS_INPUT_FLAT); 580 -256, 256, HDAPS_INPUT_FUZZ, HDAPS_INPUT_FLAT);
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 434a61b415a3..96867347bcbf 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -4,6 +4,7 @@
4 4
5menuconfig I2C 5menuconfig I2C
6 tristate "I2C support" 6 tristate "I2C support"
7 depends on HAS_IOMEM
7 ---help--- 8 ---help---
8 I2C (pronounce: I-square-C) is a slow serial bus protocol used in 9 I2C (pronounce: I-square-C) is a slow serial bus protocol used in
9 many micro controller applications and developed by Philips. SMBus, 10 many micro controller applications and developed by Philips. SMBus,
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index f35156c58922..9c8b6d5eaec9 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -16,6 +16,7 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/version.h> 17#include <linux/version.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/err.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
20#include <linux/types.h> 21#include <linux/types.h>
21#include <linux/delay.h> 22#include <linux/delay.h>
@@ -226,13 +227,14 @@ static int __devinit at91_i2c_probe(struct platform_device *pdev)
226 adapter->algo = &at91_algorithm; 227 adapter->algo = &at91_algorithm;
227 adapter->class = I2C_CLASS_HWMON; 228 adapter->class = I2C_CLASS_HWMON;
228 adapter->dev.parent = &pdev->dev; 229 adapter->dev.parent = &pdev->dev;
230 /* adapter->id == 0 ... only one TWI controller for now */
229 231
230 platform_set_drvdata(pdev, adapter); 232 platform_set_drvdata(pdev, adapter);
231 233
232 clk_enable(twi_clk); /* enable peripheral clock */ 234 clk_enable(twi_clk); /* enable peripheral clock */
233 at91_twi_hwinit(); /* initialize TWI controller */ 235 at91_twi_hwinit(); /* initialize TWI controller */
234 236
235 rc = i2c_add_adapter(adapter); 237 rc = i2c_add_numbered_adapter(adapter);
236 if (rc) { 238 if (rc) {
237 dev_err(&pdev->dev, "Adapter %s registration failed\n", 239 dev_err(&pdev->dev, "Adapter %s registration failed\n",
238 adapter->name); 240 adapter->name);
@@ -295,6 +297,9 @@ static int at91_i2c_resume(struct platform_device *pdev)
295#define at91_i2c_resume NULL 297#define at91_i2c_resume NULL
296#endif 298#endif
297 299
300/* work with "modprobe at91_i2c" from hotplugging or coldplugging */
301MODULE_ALIAS("at91_i2c");
302
298static struct platform_driver at91_i2c_driver = { 303static struct platform_driver at91_i2c_driver = {
299 .probe = at91_i2c_probe, 304 .probe = at91_i2c_probe,
300 .remove = __devexit_p(at91_i2c_remove), 305 .remove = __devexit_p(at91_i2c_remove),
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 873544ab598e..8a0a99b93641 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -548,7 +548,7 @@ static inline void i2c_pxa_stop_message(struct pxa_i2c *i2c)
548 */ 548 */
549 icr = readl(_ICR(i2c)); 549 icr = readl(_ICR(i2c));
550 icr &= ~(ICR_STOP | ICR_ACKNAK); 550 icr &= ~(ICR_STOP | ICR_ACKNAK);
551 writel(icr, _IRC(i2c)); 551 writel(icr, _ICR(i2c));
552} 552}
553 553
554/* 554/*
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 1d06b415ede9..9040809d2c25 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -7,6 +7,7 @@
7if BLOCK 7if BLOCK
8 8
9menu "ATA/ATAPI/MFM/RLL support" 9menu "ATA/ATAPI/MFM/RLL support"
10 depends on HAS_IOMEM
10 11
11config IDE 12config IDE
12 tristate "ATA/ATAPI/MFM/RLL support" 13 tristate "ATA/ATAPI/MFM/RLL support"
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
index 61d7809a5a26..8012b3b0ce75 100644
--- a/drivers/ieee1394/Kconfig
+++ b/drivers/ieee1394/Kconfig
@@ -1,4 +1,7 @@
1menu "IEEE 1394 (FireWire) support" 1menu "IEEE 1394 (FireWire) support"
2 depends on PCI || BROKEN
3
4source "drivers/firewire/Kconfig"
2 5
3config IEEE1394 6config IEEE1394
4 tristate "IEEE 1394 (FireWire) support" 7 tristate "IEEE 1394 (FireWire) support"
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 37deaae49190..994decc7bcf2 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -1,4 +1,5 @@
1menu "InfiniBand support" 1menu "InfiniBand support"
2 depends on HAS_IOMEM
2 3
3config INFINIBAND 4config INFINIBAND
4 depends on PCI || BROKEN 5 depends on PCI || BROKEN
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 0e9b69535ad6..f814fb3a469d 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "Input device support" 5menu "Input device support"
6 depends on !S390
6 7
7config INPUT 8config INPUT
8 tristate "Generic input layer (needed for keyboard, mouse, ...)" if EMBEDDED 9 tristate "Generic input layer (needed for keyboard, mouse, ...)" if EMBEDDED
diff --git a/drivers/isdn/Kconfig b/drivers/isdn/Kconfig
index c90afeea54aa..d42fe89cddf6 100644
--- a/drivers/isdn/Kconfig
+++ b/drivers/isdn/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "ISDN subsystem" 5menu "ISDN subsystem"
6 depends on !S390
6 7
7config ISDN 8config ISDN
8 tristate "ISDN support" 9 tristate "ISDN support"
diff --git a/drivers/kvm/Kconfig b/drivers/kvm/Kconfig
index 703cc88d1ef9..e8e37d826478 100644
--- a/drivers/kvm/Kconfig
+++ b/drivers/kvm/Kconfig
@@ -2,6 +2,7 @@
2# KVM configuration 2# KVM configuration
3# 3#
4menu "Virtualization" 4menu "Virtualization"
5 depends on X86
5 6
6config KVM 7config KVM
7 tristate "Kernel-based Virtual Machine (KVM) support" 8 tristate "Kernel-based Virtual Machine (KVM) support"
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 80acd08f0e97..87d2046f866c 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -1,5 +1,6 @@
1 1
2menu "LED devices" 2menu "LED devices"
3 depends on HAS_IOMEM
3 4
4config NEW_LEDS 5config NEW_LEDS
5 bool "LED Support" 6 bool "LED Support"
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 97ee870b265d..3a95cc5e029c 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -271,21 +271,25 @@ static int raid1_end_read_request(struct bio *bio, unsigned int bytes_done, int
271 */ 271 */
272 update_head_pos(mirror, r1_bio); 272 update_head_pos(mirror, r1_bio);
273 273
274 if (uptodate || (conf->raid_disks - conf->mddev->degraded) <= 1) { 274 if (uptodate)
275 /* 275 set_bit(R1BIO_Uptodate, &r1_bio->state);
276 * Set R1BIO_Uptodate in our master bio, so that 276 else {
277 * we will return a good error code for to the higher 277 /* If all other devices have failed, we want to return
278 * levels even if IO on some other mirrored buffer fails. 278 * the error upwards rather than fail the last device.
279 * 279 * Here we redefine "uptodate" to mean "Don't want to retry"
280 * The 'master' represents the composite IO operation to
281 * user-side. So if something waits for IO, then it will
282 * wait for the 'master' bio.
283 */ 280 */
284 if (uptodate) 281 unsigned long flags;
285 set_bit(R1BIO_Uptodate, &r1_bio->state); 282 spin_lock_irqsave(&conf->device_lock, flags);
283 if (r1_bio->mddev->degraded == conf->raid_disks ||
284 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
285 !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
286 uptodate = 1;
287 spin_unlock_irqrestore(&conf->device_lock, flags);
288 }
286 289
290 if (uptodate)
287 raid_end_bio_io(r1_bio); 291 raid_end_bio_io(r1_bio);
288 } else { 292 else {
289 /* 293 /*
290 * oops, read error: 294 * oops, read error:
291 */ 295 */
@@ -992,13 +996,14 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
992 unsigned long flags; 996 unsigned long flags;
993 spin_lock_irqsave(&conf->device_lock, flags); 997 spin_lock_irqsave(&conf->device_lock, flags);
994 mddev->degraded++; 998 mddev->degraded++;
999 set_bit(Faulty, &rdev->flags);
995 spin_unlock_irqrestore(&conf->device_lock, flags); 1000 spin_unlock_irqrestore(&conf->device_lock, flags);
996 /* 1001 /*
997 * if recovery is running, make sure it aborts. 1002 * if recovery is running, make sure it aborts.
998 */ 1003 */
999 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 1004 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
1000 } 1005 } else
1001 set_bit(Faulty, &rdev->flags); 1006 set_bit(Faulty, &rdev->flags);
1002 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1007 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1003 printk(KERN_ALERT "raid1: Disk failure on %s, disabling device. \n" 1008 printk(KERN_ALERT "raid1: Disk failure on %s, disabling device. \n"
1004 " Operation continuing on %d devices\n", 1009 " Operation continuing on %d devices\n",
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 91d25798ae4a..3a80e0cc7369 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "Multimedia devices" 5menu "Multimedia devices"
6 depends on HAS_IOMEM
6 7
7config VIDEO_DEV 8config VIDEO_DEV
8 tristate "Video For Linux" 9 tristate "Video For Linux"
diff --git a/drivers/message/fusion/Kconfig b/drivers/message/fusion/Kconfig
index 71037f91c222..c88cc75ab49b 100644
--- a/drivers/message/fusion/Kconfig
+++ b/drivers/message/fusion/Kconfig
@@ -1,5 +1,6 @@
1 1
2menu "Fusion MPT device support" 2menu "Fusion MPT device support"
3 depends on PCI
3 4
4config FUSION 5config FUSION
5 bool 6 bool
diff --git a/drivers/message/i2o/Kconfig b/drivers/message/i2o/Kconfig
index 6443392bffff..f4ac21e5771e 100644
--- a/drivers/message/i2o/Kconfig
+++ b/drivers/message/i2o/Kconfig
@@ -1,5 +1,6 @@
1 1
2menu "I2O device support" 2menu "I2O device support"
3 depends on PCI
3 4
4config I2O 5config I2O
5 tristate "I2O support" 6 tristate "I2O support"
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index ab6e985275b2..a20a51efe118 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "Multifunction device drivers" 5menu "Multifunction device drivers"
6 depends on HAS_IOMEM
6 7
7config MFD_SM501 8config MFD_SM501
8 tristate "Support for Silicon Motion SM501" 9 tristate "Support for Silicon Motion SM501"
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 877e7909a0e5..2f2fbffafbe0 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -130,7 +130,7 @@ config SONY_LAPTOP
130 130
131 Read <file:Documentation/sony-laptop.txt> for more information. 131 Read <file:Documentation/sony-laptop.txt> for more information.
132 132
133config SONY_LAPTOP_OLD 133config SONYPI_COMPAT
134 bool "Sonypi compatibility" 134 bool "Sonypi compatibility"
135 depends on SONY_LAPTOP 135 depends on SONY_LAPTOP
136 ---help--- 136 ---help---
diff --git a/drivers/misc/asus-laptop.c b/drivers/misc/asus-laptop.c
index 65c32a95e121..4f9060a2a2f2 100644
--- a/drivers/misc/asus-laptop.c
+++ b/drivers/misc/asus-laptop.c
@@ -30,7 +30,7 @@
30 * Eric Burghard - LED display support for W1N 30 * Eric Burghard - LED display support for W1N
31 * Josh Green - Light Sens support 31 * Josh Green - Light Sens support
32 * Thomas Tuttle - His first patch for led support was very helpfull 32 * Thomas Tuttle - His first patch for led support was very helpfull
33 * 33 * Sam Lin - GPS support
34 */ 34 */
35 35
36#include <linux/autoconf.h> 36#include <linux/autoconf.h>
@@ -48,7 +48,7 @@
48#include <acpi/acpi_bus.h> 48#include <acpi/acpi_bus.h>
49#include <asm/uaccess.h> 49#include <asm/uaccess.h>
50 50
51#define ASUS_LAPTOP_VERSION "0.41" 51#define ASUS_LAPTOP_VERSION "0.42"
52 52
53#define ASUS_HOTK_NAME "Asus Laptop Support" 53#define ASUS_HOTK_NAME "Asus Laptop Support"
54#define ASUS_HOTK_CLASS "hotkey" 54#define ASUS_HOTK_CLASS "hotkey"
@@ -83,6 +83,7 @@
83#define PLED_ON 0x20 //Phone LED 83#define PLED_ON 0x20 //Phone LED
84#define GLED_ON 0x40 //Gaming LED 84#define GLED_ON 0x40 //Gaming LED
85#define LCD_ON 0x80 //LCD backlight 85#define LCD_ON 0x80 //LCD backlight
86#define GPS_ON 0x100 //GPS
86 87
87#define ASUS_LOG ASUS_HOTK_FILE ": " 88#define ASUS_LOG ASUS_HOTK_FILE ": "
88#define ASUS_ERR KERN_ERR ASUS_LOG 89#define ASUS_ERR KERN_ERR ASUS_LOG
@@ -148,7 +149,7 @@ ASUS_HANDLE(display_set, ASUS_HOTK_PREFIX "SDSP");
148ASUS_HANDLE(display_get, "\\_SB.PCI0.P0P1.VGA.GETD", /* A6B, A6K A6R A7D F3JM L4R M6R A3G 149ASUS_HANDLE(display_get, "\\_SB.PCI0.P0P1.VGA.GETD", /* A6B, A6K A6R A7D F3JM L4R M6R A3G
149 M6A M6V VX-1 V6J V6V W3Z */ 150 M6A M6V VX-1 V6J V6V W3Z */
150 "\\_SB.PCI0.P0P2.VGA.GETD", /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V 151 "\\_SB.PCI0.P0P2.VGA.GETD", /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V
151 S5A M5A z33A W1Jc W2V */ 152 S5A M5A z33A W1Jc W2V G1 */
152 "\\_SB.PCI0.P0P3.VGA.GETD", /* A6V A6Q */ 153 "\\_SB.PCI0.P0P3.VGA.GETD", /* A6V A6Q */
153 "\\_SB.PCI0.P0PA.VGA.GETD", /* A6T, A6M */ 154 "\\_SB.PCI0.P0PA.VGA.GETD", /* A6T, A6M */
154 "\\_SB.PCI0.PCI1.VGAC.NMAP", /* L3C */ 155 "\\_SB.PCI0.PCI1.VGAC.NMAP", /* L3C */
@@ -162,6 +163,12 @@ ASUS_HANDLE(display_get, "\\_SB.PCI0.P0P1.VGA.GETD", /* A6B, A6K A6R A7D F3JM L
162ASUS_HANDLE(ls_switch, ASUS_HOTK_PREFIX "ALSC"); /* Z71A Z71V */ 163ASUS_HANDLE(ls_switch, ASUS_HOTK_PREFIX "ALSC"); /* Z71A Z71V */
163ASUS_HANDLE(ls_level, ASUS_HOTK_PREFIX "ALSL"); /* Z71A Z71V */ 164ASUS_HANDLE(ls_level, ASUS_HOTK_PREFIX "ALSL"); /* Z71A Z71V */
164 165
166/* GPS */
167/* R2H use different handle for GPS on/off */
168ASUS_HANDLE(gps_on, ASUS_HOTK_PREFIX "SDON"); /* R2H */
169ASUS_HANDLE(gps_off, ASUS_HOTK_PREFIX "SDOF"); /* R2H */
170ASUS_HANDLE(gps_status, ASUS_HOTK_PREFIX "GPST");
171
165/* 172/*
166 * This is the main structure, we can use it to store anything interesting 173 * This is the main structure, we can use it to store anything interesting
167 * about the hotk device 174 * about the hotk device
@@ -278,12 +285,28 @@ static int read_wireless_status(int mask)
278 return (hotk->status & mask) ? 1 : 0; 285 return (hotk->status & mask) ? 1 : 0;
279} 286}
280 287
288static int read_gps_status(void)
289{
290 ulong status;
291 acpi_status rv = AE_OK;
292
293 rv = acpi_evaluate_integer(gps_status_handle, NULL, NULL, &status);
294 if (ACPI_FAILURE(rv))
295 printk(ASUS_WARNING "Error reading GPS status\n");
296 else
297 return status ? 1 : 0;
298
299 return (hotk->status & GPS_ON) ? 1 : 0;
300}
301
281/* Generic LED functions */ 302/* Generic LED functions */
282static int read_status(int mask) 303static int read_status(int mask)
283{ 304{
284 /* There is a special method for both wireless devices */ 305 /* There is a special method for both wireless devices */
285 if (mask == BT_ON || mask == WL_ON) 306 if (mask == BT_ON || mask == WL_ON)
286 return read_wireless_status(mask); 307 return read_wireless_status(mask);
308 else if (mask == GPS_ON)
309 return read_gps_status();
287 310
288 return (hotk->status & mask) ? 1 : 0; 311 return (hotk->status & mask) ? 1 : 0;
289} 312}
@@ -299,6 +322,10 @@ static void write_status(acpi_handle handle, int out, int mask)
299 case GLED_ON: 322 case GLED_ON:
300 out = (out & 0x1) + 1; 323 out = (out & 0x1) + 1;
301 break; 324 break;
325 case GPS_ON:
326 handle = (out) ? gps_on_handle : gps_off_handle;
327 out = 0x02;
328 break;
302 default: 329 default:
303 out &= 0x1; 330 out &= 0x1;
304 break; 331 break;
@@ -667,6 +694,21 @@ static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr,
667 return rv; 694 return rv;
668} 695}
669 696
697/*
698 * GPS
699 */
700static ssize_t show_gps(struct device *dev,
701 struct device_attribute *attr, char *buf)
702{
703 return sprintf(buf, "%d\n", read_status(GPS_ON));
704}
705
706static ssize_t store_gps(struct device *dev, struct device_attribute *attr,
707 const char *buf, size_t count)
708{
709 return store_status(buf, count, NULL, GPS_ON);
710}
711
670static void asus_hotk_notify(acpi_handle handle, u32 event, void *data) 712static void asus_hotk_notify(acpi_handle handle, u32 event, void *data)
671{ 713{
672 /* TODO Find a better way to handle events count. */ 714 /* TODO Find a better way to handle events count. */
@@ -715,6 +757,7 @@ static ASUS_CREATE_DEVICE_ATTR(display);
715static ASUS_CREATE_DEVICE_ATTR(ledd); 757static ASUS_CREATE_DEVICE_ATTR(ledd);
716static ASUS_CREATE_DEVICE_ATTR(ls_switch); 758static ASUS_CREATE_DEVICE_ATTR(ls_switch);
717static ASUS_CREATE_DEVICE_ATTR(ls_level); 759static ASUS_CREATE_DEVICE_ATTR(ls_level);
760static ASUS_CREATE_DEVICE_ATTR(gps);
718 761
719static struct attribute *asuspf_attributes[] = { 762static struct attribute *asuspf_attributes[] = {
720 &dev_attr_infos.attr, 763 &dev_attr_infos.attr,
@@ -724,6 +767,7 @@ static struct attribute *asuspf_attributes[] = {
724 &dev_attr_ledd.attr, 767 &dev_attr_ledd.attr,
725 &dev_attr_ls_switch.attr, 768 &dev_attr_ls_switch.attr,
726 &dev_attr_ls_level.attr, 769 &dev_attr_ls_level.attr,
770 &dev_attr_gps.attr,
727 NULL 771 NULL
728}; 772};
729 773
@@ -763,6 +807,9 @@ static void asus_hotk_add_fs(void)
763 ASUS_SET_DEVICE_ATTR(ls_level, 0644, show_lslvl, store_lslvl); 807 ASUS_SET_DEVICE_ATTR(ls_level, 0644, show_lslvl, store_lslvl);
764 ASUS_SET_DEVICE_ATTR(ls_switch, 0644, show_lssw, store_lssw); 808 ASUS_SET_DEVICE_ATTR(ls_switch, 0644, show_lssw, store_lssw);
765 } 809 }
810
811 if (gps_status_handle && gps_on_handle && gps_off_handle)
812 ASUS_SET_DEVICE_ATTR(gps, 0644, show_gps, store_gps);
766} 813}
767 814
768static int asus_handle_init(char *name, acpi_handle * handle, 815static int asus_handle_init(char *name, acpi_handle * handle,
@@ -890,9 +937,13 @@ static int asus_hotk_get_info(void)
890 937
891 /* There is a lot of models with "ALSL", but a few get 938 /* There is a lot of models with "ALSL", but a few get
892 a real light sens, so we need to check it. */ 939 a real light sens, so we need to check it. */
893 if (ASUS_HANDLE_INIT(ls_switch)) 940 if (!ASUS_HANDLE_INIT(ls_switch))
894 ASUS_HANDLE_INIT(ls_level); 941 ASUS_HANDLE_INIT(ls_level);
895 942
943 ASUS_HANDLE_INIT(gps_on);
944 ASUS_HANDLE_INIT(gps_off);
945 ASUS_HANDLE_INIT(gps_status);
946
896 kfree(model); 947 kfree(model);
897 948
898 return AE_OK; 949 return AE_OK;
@@ -950,7 +1001,7 @@ static int asus_hotk_add(struct acpi_device *device)
950 * We install the handler, it will receive the hotk in parameter, so, we 1001 * We install the handler, it will receive the hotk in parameter, so, we
951 * could add other data to the hotk struct 1002 * could add other data to the hotk struct
952 */ 1003 */
953 status = acpi_install_notify_handler(hotk->handle, ACPI_SYSTEM_NOTIFY, 1004 status = acpi_install_notify_handler(hotk->handle, ACPI_ALL_NOTIFY,
954 asus_hotk_notify, hotk); 1005 asus_hotk_notify, hotk);
955 if (ACPI_FAILURE(status)) 1006 if (ACPI_FAILURE(status))
956 printk(ASUS_ERR "Error installing notify handler\n"); 1007 printk(ASUS_ERR "Error installing notify handler\n");
@@ -981,6 +1032,9 @@ static int asus_hotk_add(struct acpi_device *device)
981 if (ls_level_handle) 1032 if (ls_level_handle)
982 set_light_sens_level(hotk->light_level); 1033 set_light_sens_level(hotk->light_level);
983 1034
1035 /* GPS is on by default */
1036 write_status(NULL, 1, GPS_ON);
1037
984 end: 1038 end:
985 if (result) { 1039 if (result) {
986 kfree(hotk->name); 1040 kfree(hotk->name);
@@ -997,7 +1051,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
997 if (!device || !acpi_driver_data(device)) 1051 if (!device || !acpi_driver_data(device))
998 return -EINVAL; 1052 return -EINVAL;
999 1053
1000 status = acpi_remove_notify_handler(hotk->handle, ACPI_SYSTEM_NOTIFY, 1054 status = acpi_remove_notify_handler(hotk->handle, ACPI_ALL_NOTIFY,
1001 asus_hotk_notify); 1055 asus_hotk_notify);
1002 if (ACPI_FAILURE(status)) 1056 if (ACPI_FAILURE(status))
1003 printk(ASUS_ERR "Error removing notify handler\n"); 1057 printk(ASUS_ERR "Error removing notify handler\n");
diff --git a/drivers/misc/msi-laptop.c b/drivers/misc/msi-laptop.c
index 68c4b58525ba..41e901f53e7c 100644
--- a/drivers/misc/msi-laptop.c
+++ b/drivers/misc/msi-laptop.c
@@ -85,7 +85,7 @@ static int set_lcd_level(int level)
85 buf[0] = 0x80; 85 buf[0] = 0x80;
86 buf[1] = (u8) (level*31); 86 buf[1] = (u8) (level*31);
87 87
88 return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, buf, sizeof(buf), NULL, 0); 88 return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, buf, sizeof(buf), NULL, 0, 1);
89} 89}
90 90
91static int get_lcd_level(void) 91static int get_lcd_level(void)
@@ -93,7 +93,7 @@ static int get_lcd_level(void)
93 u8 wdata = 0, rdata; 93 u8 wdata = 0, rdata;
94 int result; 94 int result;
95 95
96 result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, &rdata, 1); 96 result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, &rdata, 1, 1);
97 if (result < 0) 97 if (result < 0)
98 return result; 98 return result;
99 99
@@ -105,7 +105,7 @@ static int get_auto_brightness(void)
105 u8 wdata = 4, rdata; 105 u8 wdata = 4, rdata;
106 int result; 106 int result;
107 107
108 result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, &rdata, 1); 108 result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, &rdata, 1, 1);
109 if (result < 0) 109 if (result < 0)
110 return result; 110 return result;
111 111
@@ -119,14 +119,14 @@ static int set_auto_brightness(int enable)
119 119
120 wdata[0] = 4; 120 wdata[0] = 4;
121 121
122 result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 1, &rdata, 1); 122 result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 1, &rdata, 1, 1);
123 if (result < 0) 123 if (result < 0)
124 return result; 124 return result;
125 125
126 wdata[0] = 0x84; 126 wdata[0] = 0x84;
127 wdata[1] = (rdata & 0xF7) | (enable ? 8 : 0); 127 wdata[1] = (rdata & 0xF7) | (enable ? 8 : 0);
128 128
129 return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 2, NULL, 0); 129 return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 2, NULL, 0, 1);
130} 130}
131 131
132static int get_wireless_state(int *wlan, int *bluetooth) 132static int get_wireless_state(int *wlan, int *bluetooth)
@@ -134,7 +134,7 @@ static int get_wireless_state(int *wlan, int *bluetooth)
134 u8 wdata = 0, rdata; 134 u8 wdata = 0, rdata;
135 int result; 135 int result;
136 136
137 result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1); 137 result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1, 1);
138 if (result < 0) 138 if (result < 0)
139 return -1; 139 return -1;
140 140
diff --git a/drivers/misc/sony-laptop.c b/drivers/misc/sony-laptop.c
index c15c1f61bd1b..8ee0321ef1c8 100644
--- a/drivers/misc/sony-laptop.c
+++ b/drivers/misc/sony-laptop.c
@@ -63,7 +63,7 @@
63#include <asm/uaccess.h> 63#include <asm/uaccess.h>
64#include <linux/sonypi.h> 64#include <linux/sonypi.h>
65#include <linux/sony-laptop.h> 65#include <linux/sony-laptop.h>
66#ifdef CONFIG_SONY_LAPTOP_OLD 66#ifdef CONFIG_SONYPI_COMPAT
67#include <linux/poll.h> 67#include <linux/poll.h>
68#include <linux/miscdevice.h> 68#include <linux/miscdevice.h>
69#endif 69#endif
@@ -114,7 +114,7 @@ MODULE_PARM_DESC(camera,
114 "set this to 1 to enable Motion Eye camera controls " 114 "set this to 1 to enable Motion Eye camera controls "
115 "(only use it if you have a C1VE or C1VN model)"); 115 "(only use it if you have a C1VE or C1VN model)");
116 116
117#ifdef CONFIG_SONY_LAPTOP_OLD 117#ifdef CONFIG_SONYPI_COMPAT
118static int minor = -1; 118static int minor = -1;
119module_param(minor, int, 0); 119module_param(minor, int, 0);
120MODULE_PARM_DESC(minor, 120MODULE_PARM_DESC(minor,
@@ -1504,7 +1504,7 @@ static struct attribute_group spic_attribute_group = {
1504}; 1504};
1505 1505
1506/******** SONYPI compatibility **********/ 1506/******** SONYPI compatibility **********/
1507#ifdef CONFIG_SONY_LAPTOP_OLD 1507#ifdef CONFIG_SONYPI_COMPAT
1508 1508
1509/* battery / brightness / temperature addresses */ 1509/* battery / brightness / temperature addresses */
1510#define SONYPI_BAT_FLAGS 0x81 1510#define SONYPI_BAT_FLAGS 0x81
@@ -1798,7 +1798,7 @@ static void sonypi_compat_exit(void)
1798static int sonypi_compat_init(void) { return 0; } 1798static int sonypi_compat_init(void) { return 0; }
1799static void sonypi_compat_exit(void) { } 1799static void sonypi_compat_exit(void) { }
1800static void sonypi_compat_report_event(u8 event) { } 1800static void sonypi_compat_report_event(u8 event) { }
1801#endif /* CONFIG_SONY_LAPTOP_OLD */ 1801#endif /* CONFIG_SONYPI_COMPAT */
1802 1802
1803/* 1803/*
1804 * ACPI callbacks 1804 * ACPI callbacks
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index 45b7d53b949c..c0b41e8bcd9d 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -4,6 +4,7 @@
4 4
5menuconfig MMC 5menuconfig MMC
6 tristate "MMC/SD card support" 6 tristate "MMC/SD card support"
7 depends on HAS_IOMEM
7 help 8 help
8 MMC is the "multi-media card" bus protocol. 9 MMC is the "multi-media card" bus protocol.
9 10
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index c1b47db29bd2..fbec8cd55e38 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -2,6 +2,7 @@
2 2
3menuconfig MTD 3menuconfig MTD
4 tristate "Memory Technology Device (MTD) support" 4 tristate "Memory Technology Device (MTD) support"
5 depends on HAS_IOMEM
5 help 6 help
6 Memory Technology Devices are flash, RAM and similar chips, often 7 Memory Technology Devices are flash, RAM and similar chips, often
7 used for solid state file systems on embedded devices. This option 8 used for solid state file systems on embedded devices. This option
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index f994f129f3d8..c0d3101eb6a0 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "PHY device support" 5menu "PHY device support"
6 depends on !S390
6 7
7config PHYLIB 8config PHYLIB
8 tristate "PHY Device support and infrastructure" 9 tristate "PHY Device support and infrastructure"
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index e273347dc606..e3f5bb0fe603 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "Wireless LAN" 5menu "Wireless LAN"
6 depends on !S390
6 7
7config WLAN_PRE80211 8config WLAN_PRE80211
8 bool "Wireless LAN (pre-802.11)" 9 bool "Wireless LAN (pre-802.11)"
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 36c6a1bfe558..f46c69e4ed82 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -6,6 +6,7 @@
6# 6#
7 7
8menu "Parallel port support" 8menu "Parallel port support"
9 depends on HAS_IOMEM
9 10
10config PARPORT 11config PARPORT
11 tristate "Parallel port support" 12 tristate "Parallel port support"
diff --git a/drivers/pnp/Kconfig b/drivers/pnp/Kconfig
index c5143201419a..1959cef8e9de 100644
--- a/drivers/pnp/Kconfig
+++ b/drivers/pnp/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "Plug and Play support" 5menu "Plug and Play support"
6 depends on HAS_IOMEM
6 7
7config PNP 8config PNP
8 bool "Plug and Play support" 9 bool "Plug and Play support"
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 76422eded36e..1759baad439c 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "Real Time Clock" 5menu "Real Time Clock"
6 depends on !S390
6 7
7config RTC_LIB 8config RTC_LIB
8 tristate 9 tristate
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index b250c5354503..e879b212cf43 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -1,11 +1,9 @@
1if S390 && BLOCK
2
3comment "S/390 block device drivers" 1comment "S/390 block device drivers"
4 depends on S390 2 depends on S390 && BLOCK
5 3
6config BLK_DEV_XPRAM 4config BLK_DEV_XPRAM
7 tristate "XPRAM disk support" 5 tristate "XPRAM disk support"
8 depends on S390 6 depends on S390 && BLOCK
9 help 7 help
10 Select this option if you want to use your expanded storage on S/390 8 Select this option if you want to use your expanded storage on S/390
11 or zSeries as a disk. This is useful as a _fast_ swap device if you 9 or zSeries as a disk. This is useful as a _fast_ swap device if you
@@ -15,12 +13,13 @@ config BLK_DEV_XPRAM
15 13
16config DCSSBLK 14config DCSSBLK
17 tristate "DCSSBLK support" 15 tristate "DCSSBLK support"
16 depends on S390 && BLOCK
18 help 17 help
19 Support for dcss block device 18 Support for dcss block device
20 19
21config DASD 20config DASD
22 tristate "Support for DASD devices" 21 tristate "Support for DASD devices"
23 depends on CCW 22 depends on CCW && BLOCK
24 help 23 help
25 Enable this option if you want to access DASDs directly utilizing 24 Enable this option if you want to access DASDs directly utilizing
26 S/390s channel subsystem commands. This is necessary for running 25 S/390s channel subsystem commands. This is necessary for running
@@ -62,5 +61,3 @@ config DASD_EER
62 This driver provides a character device interface to the 61 This driver provides a character device interface to the
63 DASD extended error reporting. This is only needed if you want to 62 DASD extended error reporting. This is only needed if you want to
64 use applications written for the EER facility. 63 use applications written for the EER facility.
65
66endif
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 977521013fe8..bfeca57098fa 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2174,9 +2174,10 @@ dasd_generic_notify(struct ccw_device *cdev, int event)
2174 return ret; 2174 return ret;
2175} 2175}
2176 2176
2177struct dasd_ccw_req * dasd_generic_build_rdc(struct dasd_device *device, 2177static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2178 void *rdc_buffer, 2178 void *rdc_buffer,
2179 int rdc_buffer_size, char *magic) 2179 int rdc_buffer_size,
2180 char *magic)
2180{ 2181{
2181 struct dasd_ccw_req *cqr; 2182 struct dasd_ccw_req *cqr;
2182 struct ccw1 *ccw; 2183 struct ccw1 *ccw;
@@ -2219,6 +2220,7 @@ int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic,
2219 dasd_sfree_request(cqr, cqr->device); 2220 dasd_sfree_request(cqr, cqr->device);
2220 return ret; 2221 return ret;
2221} 2222}
2223EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
2222 2224
2223static int __init 2225static int __init
2224dasd_init(void) 2226dasd_init(void)
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index e810e4a44ed4..eccac1c3b71b 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -50,6 +50,7 @@ struct dasd_diag_private {
50 struct dasd_diag_rw_io iob; 50 struct dasd_diag_rw_io iob;
51 struct dasd_diag_init_io iib; 51 struct dasd_diag_init_io iib;
52 blocknum_t pt_block; 52 blocknum_t pt_block;
53 struct ccw_dev_id dev_id;
53}; 54};
54 55
55struct dasd_diag_req { 56struct dasd_diag_req {
@@ -102,7 +103,7 @@ mdsk_init_io(struct dasd_device *device, unsigned int blocksize,
102 iib = &private->iib; 103 iib = &private->iib;
103 memset(iib, 0, sizeof (struct dasd_diag_init_io)); 104 memset(iib, 0, sizeof (struct dasd_diag_init_io));
104 105
105 iib->dev_nr = _ccw_device_get_device_number(device->cdev); 106 iib->dev_nr = private->dev_id.devno;
106 iib->block_size = blocksize; 107 iib->block_size = blocksize;
107 iib->offset = offset; 108 iib->offset = offset;
108 iib->flaga = DASD_DIAG_FLAGA_DEFAULT; 109 iib->flaga = DASD_DIAG_FLAGA_DEFAULT;
@@ -127,7 +128,7 @@ mdsk_term_io(struct dasd_device * device)
127 private = (struct dasd_diag_private *) device->private; 128 private = (struct dasd_diag_private *) device->private;
128 iib = &private->iib; 129 iib = &private->iib;
129 memset(iib, 0, sizeof (struct dasd_diag_init_io)); 130 memset(iib, 0, sizeof (struct dasd_diag_init_io));
130 iib->dev_nr = _ccw_device_get_device_number(device->cdev); 131 iib->dev_nr = private->dev_id.devno;
131 rc = dia250(iib, TERM_BIO); 132 rc = dia250(iib, TERM_BIO);
132 return rc; 133 return rc;
133} 134}
@@ -166,7 +167,7 @@ dasd_start_diag(struct dasd_ccw_req * cqr)
166 private = (struct dasd_diag_private *) device->private; 167 private = (struct dasd_diag_private *) device->private;
167 dreq = (struct dasd_diag_req *) cqr->data; 168 dreq = (struct dasd_diag_req *) cqr->data;
168 169
169 private->iob.dev_nr = _ccw_device_get_device_number(device->cdev); 170 private->iob.dev_nr = private->dev_id.devno;
170 private->iob.key = 0; 171 private->iob.key = 0;
171 private->iob.flags = DASD_DIAG_RWFLAG_ASYNC; 172 private->iob.flags = DASD_DIAG_RWFLAG_ASYNC;
172 private->iob.block_count = dreq->block_count; 173 private->iob.block_count = dreq->block_count;
@@ -323,11 +324,12 @@ dasd_diag_check_device(struct dasd_device *device)
323 "memory allocation failed for private data"); 324 "memory allocation failed for private data");
324 return -ENOMEM; 325 return -ENOMEM;
325 } 326 }
327 ccw_device_get_id(device->cdev, &private->dev_id);
326 device->private = (void *) private; 328 device->private = (void *) private;
327 } 329 }
328 /* Read Device Characteristics */ 330 /* Read Device Characteristics */
329 rdc_data = (void *) &(private->rdc_data); 331 rdc_data = (void *) &(private->rdc_data);
330 rdc_data->dev_nr = _ccw_device_get_device_number(device->cdev); 332 rdc_data->dev_nr = private->dev_id.devno;
331 rdc_data->rdc_len = sizeof (struct dasd_diag_characteristics); 333 rdc_data->rdc_len = sizeof (struct dasd_diag_characteristics);
332 334
333 rc = diag210((struct diag210 *) rdc_data); 335 rc = diag210((struct diag210 *) rdc_data);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index c9583fbc2a7d..418b4e63a4fa 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -450,9 +450,9 @@ dasd_eckd_generate_uid(struct dasd_device *device, struct dasd_uid *uid)
450 return 0; 450 return 0;
451} 451}
452 452
453struct dasd_ccw_req * dasd_eckd_build_rcd_lpm(struct dasd_device *device, 453static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
454 void *rcd_buffer, 454 void *rcd_buffer,
455 struct ciw *ciw, __u8 lpm) 455 struct ciw *ciw, __u8 lpm)
456{ 456{
457 struct dasd_ccw_req *cqr; 457 struct dasd_ccw_req *cqr;
458 struct ccw1 *ccw; 458 struct ccw1 *ccw;
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 758cfb542865..672eb0a3dd0b 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -255,6 +255,7 @@ dasd_ioctl_information(struct dasd_device *device,
255 unsigned long flags; 255 unsigned long flags;
256 int rc; 256 int rc;
257 struct ccw_device *cdev; 257 struct ccw_device *cdev;
258 struct ccw_dev_id dev_id;
258 259
259 if (!device->discipline->fill_info) 260 if (!device->discipline->fill_info)
260 return -EINVAL; 261 return -EINVAL;
@@ -270,8 +271,9 @@ dasd_ioctl_information(struct dasd_device *device,
270 } 271 }
271 272
272 cdev = device->cdev; 273 cdev = device->cdev;
274 ccw_device_get_id(cdev, &dev_id);
273 275
274 dasd_info->devno = _ccw_device_get_device_number(device->cdev); 276 dasd_info->devno = dev_id.devno;
275 dasd_info->schid = _ccw_device_get_subchannel_number(device->cdev); 277 dasd_info->schid = _ccw_device_get_subchannel_number(device->cdev);
276 dasd_info->cu_type = cdev->id.cu_type; 278 dasd_info->cu_type = cdev->id.cu_type;
277 dasd_info->cu_model = cdev->id.cu_model; 279 dasd_info->cu_model = cdev->id.cu_model;
diff --git a/drivers/s390/Kconfig b/drivers/s390/char/Kconfig
index 165af398fdea..66102a184322 100644
--- a/drivers/s390/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -1,69 +1,9 @@
1config CCW
2 bool
3 default y
4
5source "drivers/block/Kconfig"
6
7source "drivers/md/Kconfig"
8
9
10menu "Character device drivers"
11
12config UNIX98_PTYS
13 bool "Unix98 PTY support"
14 ---help---
15 A pseudo terminal (PTY) is a software device consisting of two
16 halves: a master and a slave. The slave device behaves identical to
17 a physical terminal; the master device is used by a process to
18 read data from and write data to the slave, thereby emulating a
19 terminal. Typical programs for the master side are telnet servers
20 and xterms.
21
22 Linux has traditionally used the BSD-like names /dev/ptyxx for
23 masters and /dev/ttyxx for slaves of pseudo terminals. This scheme
24 has a number of problems. The GNU C library glibc 2.1 and later,
25 however, supports the Unix98 naming standard: in order to acquire a
26 pseudo terminal, a process opens /dev/ptmx; the number of the pseudo
27 terminal is then made available to the process and the pseudo
28 terminal slave can be accessed as /dev/pts/<number>. What was
29 traditionally /dev/ttyp2 will then be /dev/pts/2, for example.
30
31 The entries in /dev/pts/ are created on the fly by a virtual
32 file system; therefore, if you say Y here you should say Y to
33 "/dev/pts file system for Unix98 PTYs" as well.
34
35 If you want to say Y here, you need to have the C library glibc 2.1
36 or later (equal to libc-6.1, check with "ls -l /lib/libc.so.*").
37 Read the instructions in <file:Documentation/Changes> pertaining to
38 pseudo terminals. It's safe to say N.
39
40config UNIX98_PTY_COUNT
41 int "Maximum number of Unix98 PTYs in use (0-2048)"
42 depends on UNIX98_PTYS
43 default "256"
44 help
45 The maximum number of Unix98 PTYs that can be used at any one time.
46 The default is 256, and should be enough for desktop systems. Server
47 machines which support incoming telnet/rlogin/ssh connections and/or
48 serve several X terminals may want to increase this: every incoming
49 connection and every xterm uses up one PTY.
50
51 When not in use, each additional set of 256 PTYs occupy
52 approximately 8 KB of kernel memory on 32-bit architectures.
53
54config HANGCHECK_TIMER
55 tristate "Hangcheck timer"
56 help
57 The hangcheck-timer module detects when the system has gone
58 out to lunch past a certain margin. It can reboot the system
59 or merely print a warning.
60
61source "drivers/char/watchdog/Kconfig"
62
63comment "S/390 character device drivers" 1comment "S/390 character device drivers"
2 depends on S390
64 3
65config TN3270 4config TN3270
66 tristate "Support for locally attached 3270 terminals" 5 tristate "Support for locally attached 3270 terminals"
6 depends on CCW
67 help 7 help
68 Include support for IBM 3270 terminals. 8 Include support for IBM 3270 terminals.
69 9
@@ -88,6 +28,7 @@ config TN3270_CONSOLE
88 28
89config TN3215 29config TN3215
90 bool "Support for 3215 line mode terminal" 30 bool "Support for 3215 line mode terminal"
31 depends on CCW
91 help 32 help
92 Include support for IBM 3215 line-mode terminals. 33 Include support for IBM 3215 line-mode terminals.
93 34
@@ -99,12 +40,19 @@ config TN3215_CONSOLE
99 Linux system console. 40 Linux system console.
100 41
101config CCW_CONSOLE 42config CCW_CONSOLE
102 bool 43 bool
103 depends on TN3215_CONSOLE || TN3270_CONSOLE 44 depends on TN3215_CONSOLE || TN3270_CONSOLE
104 default y 45 default y
105 46
47config SCLP
48 bool "Support for SCLP"
49 depends on S390
50 help
51 Include support for the SCLP interface to the service element.
52
106config SCLP_TTY 53config SCLP_TTY
107 bool "Support for SCLP line mode terminal" 54 bool "Support for SCLP line mode terminal"
55 depends on SCLP
108 help 56 help
109 Include support for IBM SCLP line-mode terminals. 57 Include support for IBM SCLP line-mode terminals.
110 58
@@ -117,6 +65,7 @@ config SCLP_CONSOLE
117 65
118config SCLP_VT220_TTY 66config SCLP_VT220_TTY
119 bool "Support for SCLP VT220-compatible terminal" 67 bool "Support for SCLP VT220-compatible terminal"
68 depends on SCLP
120 help 69 help
121 Include support for an IBM SCLP VT220-compatible terminal. 70 Include support for an IBM SCLP VT220-compatible terminal.
122 71
@@ -129,6 +78,7 @@ config SCLP_VT220_CONSOLE
129 78
130config SCLP_CPI 79config SCLP_CPI
131 tristate "Control-Program Identification" 80 tristate "Control-Program Identification"
81 depends on SCLP
132 help 82 help
133 This option enables the hardware console interface for system 83 This option enables the hardware console interface for system
134 identification. This is commonly used for workload management and 84 identification. This is commonly used for workload management and
@@ -140,6 +90,7 @@ config SCLP_CPI
140 90
141config S390_TAPE 91config S390_TAPE
142 tristate "S/390 tape device support" 92 tristate "S/390 tape device support"
93 depends on CCW
143 help 94 help
144 Select this option if you want to access channel-attached tape 95 Select this option if you want to access channel-attached tape
145 devices on IBM S/390 or zSeries. 96 devices on IBM S/390 or zSeries.
@@ -194,6 +145,7 @@ config VMLOGRDR
194 145
195config VMCP 146config VMCP
196 tristate "Support for the z/VM CP interface (VM only)" 147 tristate "Support for the z/VM CP interface (VM only)"
148 depends on S390
197 help 149 help
198 Select this option if you want to be able to interact with the control 150 Select this option if you want to be able to interact with the control
199 program on z/VM 151 program on z/VM
@@ -207,33 +159,8 @@ config MONREADER
207 159
208config MONWRITER 160config MONWRITER
209 tristate "API for writing z/VM monitor service records" 161 tristate "API for writing z/VM monitor service records"
162 depends on S390
210 default "m" 163 default "m"
211 help 164 help
212 Character device driver for writing z/VM monitor service records 165 Character device driver for writing z/VM monitor service records
213 166
214endmenu
215
216menu "Cryptographic devices"
217
218config ZCRYPT
219 tristate "Support for PCI-attached cryptographic adapters"
220 select ZCRYPT_MONOLITHIC if ZCRYPT="y"
221 default "m"
222 help
223 Select this option if you want to use a PCI-attached cryptographic
224 adapter like:
225 + PCI Cryptographic Accelerator (PCICA)
226 + PCI Cryptographic Coprocessor (PCICC)
227 + PCI-X Cryptographic Coprocessor (PCIXCC)
228 + Crypto Express2 Coprocessor (CEX2C)
229 + Crypto Express2 Accelerator (CEX2A)
230
231config ZCRYPT_MONOLITHIC
232 bool "Monolithic zcrypt module"
233 depends on ZCRYPT="m"
234 help
235 Select this option if you want to have a single module z90crypt.ko
236 that contains all parts of the crypto device driver (ap bus,
237 request router and all the card drivers).
238
239endmenu
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 8df7b1323c05..67009bfa093e 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -97,7 +97,7 @@ static u8 user_data_sever[16] = {
97 * Create the 8 bytes EBCDIC DCSS segment name from 97 * Create the 8 bytes EBCDIC DCSS segment name from
98 * an ASCII name, incl. padding 98 * an ASCII name, incl. padding
99 */ 99 */
100static inline void dcss_mkname(char *ascii_name, char *ebcdic_name) 100static void dcss_mkname(char *ascii_name, char *ebcdic_name)
101{ 101{
102 int i; 102 int i;
103 103
@@ -191,7 +191,7 @@ static inline u32 mon_rec_end(struct mon_msg *monmsg)
191 return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8)); 191 return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8));
192} 192}
193 193
194static inline int mon_check_mca(struct mon_msg *monmsg) 194static int mon_check_mca(struct mon_msg *monmsg)
195{ 195{
196 if ((mon_rec_end(monmsg) <= mon_rec_start(monmsg)) || 196 if ((mon_rec_end(monmsg) <= mon_rec_start(monmsg)) ||
197 (mon_rec_start(monmsg) < mon_dcss_start) || 197 (mon_rec_start(monmsg) < mon_dcss_start) ||
@@ -209,8 +209,8 @@ static inline int mon_check_mca(struct mon_msg *monmsg)
209 return 0; 209 return 0;
210} 210}
211 211
212static inline int mon_send_reply(struct mon_msg *monmsg, 212static int mon_send_reply(struct mon_msg *monmsg,
213 struct mon_private *monpriv) 213 struct mon_private *monpriv)
214{ 214{
215 int rc; 215 int rc;
216 216
@@ -236,7 +236,7 @@ static inline int mon_send_reply(struct mon_msg *monmsg,
236 return 0; 236 return 0;
237} 237}
238 238
239static inline void mon_free_mem(struct mon_private *monpriv) 239static void mon_free_mem(struct mon_private *monpriv)
240{ 240{
241 int i; 241 int i;
242 242
@@ -246,7 +246,7 @@ static inline void mon_free_mem(struct mon_private *monpriv)
246 kfree(monpriv); 246 kfree(monpriv);
247} 247}
248 248
249static inline struct mon_private *mon_alloc_mem(void) 249static struct mon_private *mon_alloc_mem(void)
250{ 250{
251 int i; 251 int i;
252 struct mon_private *monpriv; 252 struct mon_private *monpriv;
@@ -307,7 +307,7 @@ static inline void mon_next_mca(struct mon_msg *monmsg)
307 monmsg->pos = 0; 307 monmsg->pos = 0;
308} 308}
309 309
310static inline struct mon_msg *mon_next_message(struct mon_private *monpriv) 310static struct mon_msg *mon_next_message(struct mon_private *monpriv)
311{ 311{
312 struct mon_msg *monmsg; 312 struct mon_msg *monmsg;
313 313
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 8facd14adb7c..f6ef90ee3e7d 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -589,9 +589,10 @@ static int
589__raw3270_size_device_vm(struct raw3270 *rp) 589__raw3270_size_device_vm(struct raw3270 *rp)
590{ 590{
591 int rc, model; 591 int rc, model;
592 struct ccw_dev_id dev_id;
592 593
593 raw3270_init_diag210.vrdcdvno = 594 ccw_device_get_id(rp->cdev, &dev_id);
594 _ccw_device_get_device_number(rp->cdev); 595 raw3270_init_diag210.vrdcdvno = dev_id.devno;
595 raw3270_init_diag210.vrdclen = sizeof(struct diag210); 596 raw3270_init_diag210.vrdclen = sizeof(struct diag210);
596 rc = diag210(&raw3270_init_diag210); 597 rc = diag210(&raw3270_init_diag210);
597 if (rc) 598 if (rc)
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 87ac4a3ad49d..dbb99d1b6f57 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -132,6 +132,9 @@ int sclp_deactivate(void);
132int sclp_reactivate(void); 132int sclp_reactivate(void);
133int sclp_service_call(sclp_cmdw_t command, void *sccb); 133int sclp_service_call(sclp_cmdw_t command, void *sccb);
134 134
135int sclp_sdias_init(void);
136void sclp_sdias_exit(void);
137
135/* useful inlines */ 138/* useful inlines */
136 139
137/* VM uses EBCDIC 037, LPAR+native(SE+HMC) use EBCDIC 500 */ 140/* VM uses EBCDIC 037, LPAR+native(SE+HMC) use EBCDIC 500 */
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 52283daddaef..1c064976b32b 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -66,9 +66,9 @@ static DEFINE_MUTEX(sdias_mutex);
66 66
67static void sdias_callback(struct sclp_req *request, void *data) 67static void sdias_callback(struct sclp_req *request, void *data)
68{ 68{
69 struct sdias_sccb *sccb; 69 struct sdias_sccb *cbsccb;
70 70
71 sccb = (struct sdias_sccb *) request->sccb; 71 cbsccb = (struct sdias_sccb *) request->sccb;
72 sclp_req_done = 1; 72 sclp_req_done = 1;
73 wake_up(&sdias_wq); /* Inform caller, that request is complete */ 73 wake_up(&sdias_wq); /* Inform caller, that request is complete */
74 TRACE("callback done\n"); 74 TRACE("callback done\n");
@@ -229,7 +229,7 @@ out:
229 return rc; 229 return rc;
230} 230}
231 231
232int __init sdias_init(void) 232int __init sclp_sdias_init(void)
233{ 233{
234 int rc; 234 int rc;
235 235
@@ -248,7 +248,7 @@ int __init sdias_init(void)
248 return 0; 248 return 0;
249} 249}
250 250
251void __exit sdias_exit(void) 251void __exit sclp_sdias_exit(void)
252{ 252{
253 debug_unregister(sdias_dbf); 253 debug_unregister(sdias_dbf);
254 sclp_unregister(&sclp_sdias_register); 254 sclp_unregister(&sclp_sdias_register);
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 89d439316a53..66eb0688d523 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -21,6 +21,7 @@
21#include <asm/debug.h> 21#include <asm/debug.h>
22#include <asm/processor.h> 22#include <asm/processor.h>
23#include <asm/irqflags.h> 23#include <asm/irqflags.h>
24#include "sclp.h"
24 25
25#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x) 26#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
26#define MSG(x...) printk( KERN_ALERT x ) 27#define MSG(x...) printk( KERN_ALERT x )
@@ -564,8 +565,6 @@ static void __init zcore_header_init(int arch, struct zcore_header *hdr)
564 get_cpu_id(&hdr->cpu_id); 565 get_cpu_id(&hdr->cpu_id);
565} 566}
566 567
567extern int sdias_init(void);
568
569static int __init zcore_init(void) 568static int __init zcore_init(void)
570{ 569{
571 unsigned char arch; 570 unsigned char arch;
@@ -582,7 +581,7 @@ static int __init zcore_init(void)
582 TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn); 581 TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn);
583 TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun); 582 TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun);
584 583
585 rc = sdias_init(); 584 rc = sclp_sdias_init();
586 if (rc) 585 if (rc)
587 goto fail; 586 goto fail;
588 587
@@ -634,12 +633,10 @@ fail:
634 return rc; 633 return rc;
635} 634}
636 635
637extern void sdias_exit(void);
638
639static void __exit zcore_exit(void) 636static void __exit zcore_exit(void)
640{ 637{
641 debug_unregister(zcore_dbf); 638 debug_unregister(zcore_dbf);
642 sdias_exit(); 639 sclp_sdias_exit();
643 diag308(DIAG308_REL_HSA, NULL); 640 diag308(DIAG308_REL_HSA, NULL);
644} 641}
645 642
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 27c6d9e55b23..dfca0ef139fd 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -191,8 +191,7 @@ static int css_register_subchannel(struct subchannel *sch)
191 return ret; 191 return ret;
192} 192}
193 193
194int 194static int css_probe_device(struct subchannel_id schid)
195css_probe_device(struct subchannel_id schid)
196{ 195{
197 int ret; 196 int ret;
198 struct subchannel *sch; 197 struct subchannel *sch;
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 71fcfdc42800..ed7977531c3f 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -138,9 +138,7 @@ struct css_driver {
138 * all css_drivers have the css_bus_type 138 * all css_drivers have the css_bus_type
139 */ 139 */
140extern struct bus_type css_bus_type; 140extern struct bus_type css_bus_type;
141extern struct css_driver io_subchannel_driver;
142 141
143extern int css_probe_device(struct subchannel_id);
144extern int css_sch_device_register(struct subchannel *); 142extern int css_sch_device_register(struct subchannel *);
145extern void css_sch_device_unregister(struct subchannel *); 143extern void css_sch_device_unregister(struct subchannel *);
146extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); 144extern struct subchannel * get_subchannel_by_schid(struct subchannel_id);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index a23ff582db9d..a8b373f69cf0 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -129,7 +129,7 @@ static void io_subchannel_verify(struct device *);
129static void io_subchannel_ioterm(struct device *); 129static void io_subchannel_ioterm(struct device *);
130static void io_subchannel_shutdown(struct subchannel *); 130static void io_subchannel_shutdown(struct subchannel *);
131 131
132struct css_driver io_subchannel_driver = { 132static struct css_driver io_subchannel_driver = {
133 .subchannel_type = SUBCHANNEL_TYPE_IO, 133 .subchannel_type = SUBCHANNEL_TYPE_IO,
134 .drv = { 134 .drv = {
135 .name = "io_subchannel", 135 .name = "io_subchannel",
@@ -546,7 +546,7 @@ static struct attribute_group ccwdev_attr_group = {
546 .attrs = ccwdev_attrs, 546 .attrs = ccwdev_attrs,
547}; 547};
548 548
549struct attribute_group *ccwdev_attr_groups[] = { 549static struct attribute_group *ccwdev_attr_groups[] = {
550 &ccwdev_attr_group, 550 &ccwdev_attr_group,
551 NULL, 551 NULL,
552}; 552};
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 16f59fcb66b1..a5d263fb55ae 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -616,6 +616,17 @@ ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
616 return chp_get_chp_desc(chpid); 616 return chp_get_chp_desc(chpid);
617} 617}
618 618
619/**
620 * ccw_device_get_id - obtain a ccw device id
621 * @cdev: device to obtain the id for
622 * @dev_id: where to fill in the values
623 */
624void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)
625{
626 *dev_id = cdev->private->dev_id;
627}
628EXPORT_SYMBOL(ccw_device_get_id);
629
619// FIXME: these have to go: 630// FIXME: these have to go:
620 631
621int 632int
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index f770018fe1d5..e70aeb7a3781 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -1983,6 +1983,7 @@ qdio_handle_pci(struct qdio_irq *irq_ptr)
1983 if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) 1983 if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT)
1984 qdio_mark_q(q); 1984 qdio_mark_q(q);
1985 else { 1985 else {
1986 qdio_perf_stat_dec(&perf_stats.tl_runs);
1986 __qdio_inbound_processing(q); 1987 __qdio_inbound_processing(q);
1987 } 1988 }
1988 } 1989 }
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index f98fa465df0a..eada69dec4fe 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -3,7 +3,7 @@ menu "S/390 network device drivers"
3 3
4config LCS 4config LCS
5 tristate "Lan Channel Station Interface" 5 tristate "Lan Channel Station Interface"
6 depends on NETDEVICES && (NET_ETHERNET || TR || FDDI) 6 depends on CCW && NETDEVICES && (NET_ETHERNET || TR || FDDI)
7 help 7 help
8 Select this option if you want to use LCS networking on IBM S/390 8 Select this option if you want to use LCS networking on IBM S/390
9 or zSeries. This device driver supports Token Ring (IEEE 802.5), 9 or zSeries. This device driver supports Token Ring (IEEE 802.5),
@@ -13,7 +13,7 @@ config LCS
13 13
14config CTC 14config CTC
15 tristate "CTC device support" 15 tristate "CTC device support"
16 depends on NETDEVICES 16 depends on CCW && NETDEVICES
17 help 17 help
18 Select this option if you want to use channel-to-channel networking 18 Select this option if you want to use channel-to-channel networking
19 on IBM S/390 or zSeries. This device driver supports real CTC 19 on IBM S/390 or zSeries. This device driver supports real CTC
@@ -42,7 +42,7 @@ config SMSGIUCV
42 42
43config CLAW 43config CLAW
44 tristate "CLAW device support" 44 tristate "CLAW device support"
45 depends on NETDEVICES 45 depends on CCW && NETDEVICES
46 help 46 help
47 This driver supports channel attached CLAW devices. 47 This driver supports channel attached CLAW devices.
48 CLAW is Common Link Access for Workstation. Common devices 48 CLAW is Common Link Access for Workstation. Common devices
@@ -52,7 +52,7 @@ config CLAW
52 52
53config QETH 53config QETH
54 tristate "Gigabit Ethernet device support" 54 tristate "Gigabit Ethernet device support"
55 depends on NETDEVICES && IP_MULTICAST && QDIO 55 depends on CCW && NETDEVICES && IP_MULTICAST && QDIO
56 help 56 help
57 This driver supports the IBM S/390 and zSeries OSA Express adapters 57 This driver supports the IBM S/390 and zSeries OSA Express adapters
58 in QDIO mode (all media types), HiperSockets interfaces and VM GuestLAN 58 in QDIO mode (all media types), HiperSockets interfaces and VM GuestLAN
diff --git a/drivers/s390/net/qeth_mpc.c b/drivers/s390/net/qeth_mpc.c
index f54fdfdbf06f..f29a4bc4f6f2 100644
--- a/drivers/s390/net/qeth_mpc.c
+++ b/drivers/s390/net/qeth_mpc.c
@@ -162,7 +162,7 @@ struct ipa_rc_msg {
162 char *msg; 162 char *msg;
163}; 163};
164 164
165struct ipa_rc_msg qeth_ipa_rc_msg[] = { 165static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
166 {IPA_RC_SUCCESS, "success"}, 166 {IPA_RC_SUCCESS, "success"},
167 {IPA_RC_NOTSUPP, "Command not supported"}, 167 {IPA_RC_NOTSUPP, "Command not supported"},
168 {IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"}, 168 {IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"},
@@ -226,7 +226,7 @@ struct ipa_cmd_names {
226 char *name; 226 char *name;
227}; 227};
228 228
229struct ipa_cmd_names qeth_ipa_cmd_names[] = { 229static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
230 {IPA_CMD_STARTLAN, "startlan"}, 230 {IPA_CMD_STARTLAN, "startlan"},
231 {IPA_CMD_STOPLAN, "stoplan"}, 231 {IPA_CMD_STOPLAN, "stoplan"},
232 {IPA_CMD_SETVMAC, "setvmac"}, 232 {IPA_CMD_SETVMAC, "setvmac"},
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 324899c96efe..ddff40c4212c 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -607,8 +607,7 @@ zfcp_sg_list_free(struct zfcp_sg_list *sg_list)
607 * @sg_count: elements in array 607 * @sg_count: elements in array
608 * Return: size of entire scatter-gather list 608 * Return: size of entire scatter-gather list
609 */ 609 */
610size_t 610static size_t zfcp_sg_size(struct scatterlist *sg, unsigned int sg_count)
611zfcp_sg_size(struct scatterlist *sg, unsigned int sg_count)
612{ 611{
613 unsigned int i; 612 unsigned int i;
614 struct scatterlist *p; 613 struct scatterlist *p;
@@ -975,8 +974,7 @@ zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
975 mempool_destroy(adapter->pool.data_gid_pn); 974 mempool_destroy(adapter->pool.data_gid_pn);
976} 975}
977 976
978void 977static void zfcp_dummy_release(struct device *dev)
979zfcp_dummy_release(struct device *dev)
980{ 978{
981 return; 979 return;
982} 980}
@@ -1336,7 +1334,7 @@ zfcp_nameserver_enqueue(struct zfcp_adapter *adapter)
1336 1334
1337#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FC 1335#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FC
1338 1336
1339void 1337static void
1340zfcp_fsf_incoming_els_rscn(struct zfcp_adapter *adapter, 1338zfcp_fsf_incoming_els_rscn(struct zfcp_adapter *adapter,
1341 struct fsf_status_read_buffer *status_buffer) 1339 struct fsf_status_read_buffer *status_buffer)
1342{ 1340{
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index d8191d115c14..5f3212440f68 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -478,7 +478,7 @@ static struct debug_view zfcp_hba_dbf_view = {
478 NULL 478 NULL
479}; 479};
480 480
481void 481static void
482_zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req, 482_zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req,
483 u32 s_id, u32 d_id, void *buffer, int buflen) 483 u32 s_id, u32 d_id, void *buffer, int buflen)
484{ 484{
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index e8efe938c4e7..a6f5bfbb777b 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -5,6 +5,7 @@
5# 5#
6 6
7menu "Serial drivers" 7menu "Serial drivers"
8 depends on HAS_IOMEM
8 9
9# 10#
10# The new 8250/16550 serial drivers 11# The new 8250/16550 serial drivers
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index da73205e54cd..0985193dc57d 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -92,6 +92,8 @@ struct uart_sunzilog_port {
92#define SUNZILOG_FLAG_REGS_HELD 0x00000040 92#define SUNZILOG_FLAG_REGS_HELD 0x00000040
93#define SUNZILOG_FLAG_TX_STOPPED 0x00000080 93#define SUNZILOG_FLAG_TX_STOPPED 0x00000080
94#define SUNZILOG_FLAG_TX_ACTIVE 0x00000100 94#define SUNZILOG_FLAG_TX_ACTIVE 0x00000100
95#define SUNZILOG_FLAG_ESCC 0x00000200
96#define SUNZILOG_FLAG_ISR_HANDLER 0x00000400
95 97
96 unsigned int cflag; 98 unsigned int cflag;
97 99
@@ -174,9 +176,11 @@ static void sunzilog_clear_fifo(struct zilog_channel __iomem *channel)
174/* This function must only be called when the TX is not busy. The UART 176/* This function must only be called when the TX is not busy. The UART
175 * port lock must be held and local interrupts disabled. 177 * port lock must be held and local interrupts disabled.
176 */ 178 */
177static void __load_zsregs(struct zilog_channel __iomem *channel, unsigned char *regs) 179static int __load_zsregs(struct zilog_channel __iomem *channel, unsigned char *regs)
178{ 180{
179 int i; 181 int i;
182 int escc;
183 unsigned char r15;
180 184
181 /* Let pending transmits finish. */ 185 /* Let pending transmits finish. */
182 for (i = 0; i < 1000; i++) { 186 for (i = 0; i < 1000; i++) {
@@ -229,11 +233,25 @@ static void __load_zsregs(struct zilog_channel __iomem *channel, unsigned char *
229 write_zsreg(channel, R14, regs[R14]); 233 write_zsreg(channel, R14, regs[R14]);
230 234
231 /* External status interrupt control. */ 235 /* External status interrupt control. */
232 write_zsreg(channel, R15, regs[R15]); 236 write_zsreg(channel, R15, (regs[R15] | WR7pEN) & ~FIFOEN);
237
238 /* ESCC Extension Register */
239 r15 = read_zsreg(channel, R15);
240 if (r15 & 0x01) {
241 write_zsreg(channel, R7, regs[R7p]);
242
243 /* External status interrupt and FIFO control. */
244 write_zsreg(channel, R15, regs[R15] & ~WR7pEN);
245 escc = 1;
246 } else {
247 /* Clear FIFO bit case it is an issue */
248 regs[R15] &= ~FIFOEN;
249 escc = 0;
250 }
233 251
234 /* Reset external status interrupts. */ 252 /* Reset external status interrupts. */
235 write_zsreg(channel, R0, RES_EXT_INT); 253 write_zsreg(channel, R0, RES_EXT_INT); /* First Latch */
236 write_zsreg(channel, R0, RES_EXT_INT); 254 write_zsreg(channel, R0, RES_EXT_INT); /* Second Latch */
237 255
238 /* Rewrite R3/R5, this time without enables masked. */ 256 /* Rewrite R3/R5, this time without enables masked. */
239 write_zsreg(channel, R3, regs[R3]); 257 write_zsreg(channel, R3, regs[R3]);
@@ -241,6 +259,8 @@ static void __load_zsregs(struct zilog_channel __iomem *channel, unsigned char *
241 259
242 /* Rewrite R1, this time without IRQ enabled masked. */ 260 /* Rewrite R1, this time without IRQ enabled masked. */
243 write_zsreg(channel, R1, regs[R1]); 261 write_zsreg(channel, R1, regs[R1]);
262
263 return escc;
244} 264}
245 265
246/* Reprogram the Zilog channel HW registers with the copies found in the 266/* Reprogram the Zilog channel HW registers with the copies found in the
@@ -731,7 +751,7 @@ static void sunzilog_enable_ms(struct uart_port *port)
731 up->curregs[R15] = new_reg; 751 up->curregs[R15] = new_reg;
732 752
733 /* NOTE: Not subject to 'transmitter active' rule. */ 753 /* NOTE: Not subject to 'transmitter active' rule. */
734 write_zsreg(channel, R15, up->curregs[R15]); 754 write_zsreg(channel, R15, up->curregs[R15] & ~WR7pEN);
735 } 755 }
736} 756}
737 757
@@ -861,44 +881,44 @@ sunzilog_convert_to_zs(struct uart_sunzilog_port *up, unsigned int cflag,
861 up->curregs[R14] = BRSRC | BRENAB; 881 up->curregs[R14] = BRSRC | BRENAB;
862 882
863 /* Character size, stop bits, and parity. */ 883 /* Character size, stop bits, and parity. */
864 up->curregs[3] &= ~RxN_MASK; 884 up->curregs[R3] &= ~RxN_MASK;
865 up->curregs[5] &= ~TxN_MASK; 885 up->curregs[R5] &= ~TxN_MASK;
866 switch (cflag & CSIZE) { 886 switch (cflag & CSIZE) {
867 case CS5: 887 case CS5:
868 up->curregs[3] |= Rx5; 888 up->curregs[R3] |= Rx5;
869 up->curregs[5] |= Tx5; 889 up->curregs[R5] |= Tx5;
870 up->parity_mask = 0x1f; 890 up->parity_mask = 0x1f;
871 break; 891 break;
872 case CS6: 892 case CS6:
873 up->curregs[3] |= Rx6; 893 up->curregs[R3] |= Rx6;
874 up->curregs[5] |= Tx6; 894 up->curregs[R5] |= Tx6;
875 up->parity_mask = 0x3f; 895 up->parity_mask = 0x3f;
876 break; 896 break;
877 case CS7: 897 case CS7:
878 up->curregs[3] |= Rx7; 898 up->curregs[R3] |= Rx7;
879 up->curregs[5] |= Tx7; 899 up->curregs[R5] |= Tx7;
880 up->parity_mask = 0x7f; 900 up->parity_mask = 0x7f;
881 break; 901 break;
882 case CS8: 902 case CS8:
883 default: 903 default:
884 up->curregs[3] |= Rx8; 904 up->curregs[R3] |= Rx8;
885 up->curregs[5] |= Tx8; 905 up->curregs[R5] |= Tx8;
886 up->parity_mask = 0xff; 906 up->parity_mask = 0xff;
887 break; 907 break;
888 }; 908 };
889 up->curregs[4] &= ~0x0c; 909 up->curregs[R4] &= ~0x0c;
890 if (cflag & CSTOPB) 910 if (cflag & CSTOPB)
891 up->curregs[4] |= SB2; 911 up->curregs[R4] |= SB2;
892 else 912 else
893 up->curregs[4] |= SB1; 913 up->curregs[R4] |= SB1;
894 if (cflag & PARENB) 914 if (cflag & PARENB)
895 up->curregs[4] |= PAR_ENAB; 915 up->curregs[R4] |= PAR_ENAB;
896 else 916 else
897 up->curregs[4] &= ~PAR_ENAB; 917 up->curregs[R4] &= ~PAR_ENAB;
898 if (!(cflag & PARODD)) 918 if (!(cflag & PARODD))
899 up->curregs[4] |= PAR_EVEN; 919 up->curregs[R4] |= PAR_EVEN;
900 else 920 else
901 up->curregs[4] &= ~PAR_EVEN; 921 up->curregs[R4] &= ~PAR_EVEN;
902 922
903 up->port.read_status_mask = Rx_OVR; 923 up->port.read_status_mask = Rx_OVR;
904 if (iflag & INPCK) 924 if (iflag & INPCK)
@@ -952,7 +972,9 @@ sunzilog_set_termios(struct uart_port *port, struct ktermios *termios,
952 972
953static const char *sunzilog_type(struct uart_port *port) 973static const char *sunzilog_type(struct uart_port *port)
954{ 974{
955 return "zs"; 975 struct uart_sunzilog_port *up = UART_ZILOG(port);
976
977 return (up->flags & SUNZILOG_FLAG_ESCC) ? "zs (ESCC)" : "zs";
956} 978}
957 979
958/* We do not request/release mappings of the registers here, this 980/* We do not request/release mappings of the registers here, this
@@ -1170,7 +1192,7 @@ static int __init sunzilog_console_setup(struct console *con, char *options)
1170 1192
1171 spin_lock_irqsave(&up->port.lock, flags); 1193 spin_lock_irqsave(&up->port.lock, flags);
1172 1194
1173 up->curregs[R15] = BRKIE; 1195 up->curregs[R15] |= BRKIE;
1174 sunzilog_convert_to_zs(up, con->cflag, 0, brg); 1196 sunzilog_convert_to_zs(up, con->cflag, 0, brg);
1175 1197
1176 sunzilog_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS); 1198 sunzilog_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS);
@@ -1229,7 +1251,7 @@ static void __init sunzilog_init_kbdms(struct uart_sunzilog_port *up, int channe
1229 baud = 4800; 1251 baud = 4800;
1230 } 1252 }
1231 1253
1232 up->curregs[R15] = BRKIE; 1254 up->curregs[R15] |= BRKIE;
1233 brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR); 1255 brg = BPS_TO_BRG(baud, ZS_CLOCK / ZS_CLOCK_DIVISOR);
1234 sunzilog_convert_to_zs(up, up->cflag, 0, brg); 1256 sunzilog_convert_to_zs(up, up->cflag, 0, brg);
1235 sunzilog_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS); 1257 sunzilog_set_mctrl(&up->port, TIOCM_DTR | TIOCM_RTS);
@@ -1283,8 +1305,18 @@ static void __devinit sunzilog_init_hw(struct uart_sunzilog_port *up)
1283 1305
1284 if (up->flags & (SUNZILOG_FLAG_CONS_KEYB | 1306 if (up->flags & (SUNZILOG_FLAG_CONS_KEYB |
1285 SUNZILOG_FLAG_CONS_MOUSE)) { 1307 SUNZILOG_FLAG_CONS_MOUSE)) {
1308 up->curregs[R1] = EXT_INT_ENAB | INT_ALL_Rx | TxINT_ENAB;
1309 up->curregs[R4] = PAR_EVEN | X16CLK | SB1;
1310 up->curregs[R3] = RxENAB | Rx8;
1311 up->curregs[R5] = TxENAB | Tx8;
1312 up->curregs[R6] = 0x00; /* SDLC Address */
1313 up->curregs[R7] = 0x7E; /* SDLC Flag */
1314 up->curregs[R9] = NV;
1315 up->curregs[R7p] = 0x00;
1286 sunzilog_init_kbdms(up, up->port.line); 1316 sunzilog_init_kbdms(up, up->port.line);
1287 up->curregs[R9] |= (NV | MIE); 1317 /* Only enable interrupts if an ISR handler available */
1318 if (up->flags & SUNZILOG_FLAG_ISR_HANDLER)
1319 up->curregs[R9] |= MIE;
1288 write_zsreg(channel, R9, up->curregs[R9]); 1320 write_zsreg(channel, R9, up->curregs[R9]);
1289 } else { 1321 } else {
1290 /* Normal serial TTY. */ 1322 /* Normal serial TTY. */
@@ -1293,7 +1325,9 @@ static void __devinit sunzilog_init_hw(struct uart_sunzilog_port *up)
1293 up->curregs[R4] = PAR_EVEN | X16CLK | SB1; 1325 up->curregs[R4] = PAR_EVEN | X16CLK | SB1;
1294 up->curregs[R3] = RxENAB | Rx8; 1326 up->curregs[R3] = RxENAB | Rx8;
1295 up->curregs[R5] = TxENAB | Tx8; 1327 up->curregs[R5] = TxENAB | Tx8;
1296 up->curregs[R9] = NV | MIE; 1328 up->curregs[R6] = 0x00; /* SDLC Address */
1329 up->curregs[R7] = 0x7E; /* SDLC Flag */
1330 up->curregs[R9] = NV;
1297 up->curregs[R10] = NRZ; 1331 up->curregs[R10] = NRZ;
1298 up->curregs[R11] = TCBR | RCBR; 1332 up->curregs[R11] = TCBR | RCBR;
1299 baud = 9600; 1333 baud = 9600;
@@ -1301,7 +1335,14 @@ static void __devinit sunzilog_init_hw(struct uart_sunzilog_port *up)
1301 up->curregs[R12] = (brg & 0xff); 1335 up->curregs[R12] = (brg & 0xff);
1302 up->curregs[R13] = (brg >> 8) & 0xff; 1336 up->curregs[R13] = (brg >> 8) & 0xff;
1303 up->curregs[R14] = BRSRC | BRENAB; 1337 up->curregs[R14] = BRSRC | BRENAB;
1304 __load_zsregs(channel, up->curregs); 1338 up->curregs[R15] = FIFOEN; /* Use FIFO if on ESCC */
1339 up->curregs[R7p] = TxFIFO_LVL | RxFIFO_LVL;
1340 if (__load_zsregs(channel, up->curregs)) {
1341 up->flags |= SUNZILOG_FLAG_ESCC;
1342 }
1343 /* Only enable interrupts if an ISR handler available */
1344 if (up->flags & SUNZILOG_FLAG_ISR_HANDLER)
1345 up->curregs[R9] |= MIE;
1305 write_zsreg(channel, R9, up->curregs[R9]); 1346 write_zsreg(channel, R9, up->curregs[R9]);
1306 } 1347 }
1307 1348
@@ -1390,12 +1431,14 @@ static int __devinit zs_probe(struct of_device *op, const struct of_device_id *m
1390 return err; 1431 return err;
1391 } 1432 }
1392 } else { 1433 } else {
1393 printk(KERN_INFO "%s: Keyboard at MMIO %lx (irq = %d) " 1434 printk(KERN_INFO "%s: Keyboard at MMIO 0x%lx (irq = %d) "
1394 "is a zs\n", 1435 "is a %s\n",
1395 op->dev.bus_id, up[0].port.mapbase, op->irqs[0]); 1436 op->dev.bus_id, up[0].port.mapbase, op->irqs[0],
1396 printk(KERN_INFO "%s: Mouse at MMIO %lx (irq = %d) " 1437 sunzilog_type (&up[0].port));
1397 "is a zs\n", 1438 printk(KERN_INFO "%s: Mouse at MMIO 0x%lx (irq = %d) "
1398 op->dev.bus_id, up[1].port.mapbase, op->irqs[0]); 1439 "is a %s\n",
1440 op->dev.bus_id, up[1].port.mapbase, op->irqs[0],
1441 sunzilog_type (&up[1].port));
1399 } 1442 }
1400 1443
1401 dev_set_drvdata(&op->dev, &up[0]); 1444 dev_set_drvdata(&op->dev, &up[0]);
@@ -1487,10 +1530,23 @@ static int __init sunzilog_init(void)
1487 goto out_unregister_uart; 1530 goto out_unregister_uart;
1488 1531
1489 if (zilog_irq != -1) { 1532 if (zilog_irq != -1) {
1533 struct uart_sunzilog_port *up = sunzilog_irq_chain;
1490 err = request_irq(zilog_irq, sunzilog_interrupt, IRQF_SHARED, 1534 err = request_irq(zilog_irq, sunzilog_interrupt, IRQF_SHARED,
1491 "zs", sunzilog_irq_chain); 1535 "zs", sunzilog_irq_chain);
1492 if (err) 1536 if (err)
1493 goto out_unregister_driver; 1537 goto out_unregister_driver;
1538
1539 /* Enable Interrupts */
1540 while (up) {
1541 struct zilog_channel __iomem *channel;
1542
1543 /* printk (KERN_INFO "Enable IRQ for ZILOG Hardware %p\n", up); */
1544 channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
1545 up->flags |= SUNZILOG_FLAG_ISR_HANDLER;
1546 up->curregs[R9] |= MIE;
1547 write_zsreg(channel, R9, up->curregs[R9]);
1548 up = up->next;
1549 }
1494 } 1550 }
1495 1551
1496out: 1552out:
@@ -1515,6 +1571,20 @@ static void __exit sunzilog_exit(void)
1515 of_unregister_driver(&zs_driver); 1571 of_unregister_driver(&zs_driver);
1516 1572
1517 if (zilog_irq != -1) { 1573 if (zilog_irq != -1) {
1574 struct uart_sunzilog_port *up = sunzilog_irq_chain;
1575
1576 /* Disable Interrupts */
1577 while (up) {
1578 struct zilog_channel __iomem *channel;
1579
1580 /* printk (KERN_INFO "Disable IRQ for ZILOG Hardware %p\n", up); */
1581 channel = ZILOG_CHANNEL_FROM_PORT(&up->port);
1582 up->flags &= ~SUNZILOG_FLAG_ISR_HANDLER;
1583 up->curregs[R9] &= ~MIE;
1584 write_zsreg(channel, R9, up->curregs[R9]);
1585 up = up->next;
1586 }
1587
1518 free_irq(zilog_irq, sunzilog_irq_chain); 1588 free_irq(zilog_irq, sunzilog_irq_chain);
1519 zilog_irq = -1; 1589 zilog_irq = -1;
1520 } 1590 }
diff --git a/drivers/serial/sunzilog.h b/drivers/serial/sunzilog.h
index 7939b6d71270..5dec7b47cc38 100644
--- a/drivers/serial/sunzilog.h
+++ b/drivers/serial/sunzilog.h
@@ -13,7 +13,8 @@ struct zilog_layout {
13 struct zilog_channel channelA; 13 struct zilog_channel channelA;
14}; 14};
15 15
16#define NUM_ZSREGS 16 16#define NUM_ZSREGS 17
17#define R7p 16 /* Written as R7 with P15 bit 0 set */
17 18
18/* Conversion routines to/from brg time constants from/to bits 19/* Conversion routines to/from brg time constants from/to bits
19 * per second. 20 * per second.
@@ -127,6 +128,15 @@ struct zilog_layout {
127 128
128/* Write Register 7 (Sync bits 8-15/SDLC 01111110) */ 129/* Write Register 7 (Sync bits 8-15/SDLC 01111110) */
129 130
131/* Write Register 7' (ESCC Only) */
132#define AUTO_TxFLAG 1 /* Automatic Tx SDLC Flag */
133#define AUTO_EOM_RST 2 /* Automatic EOM Reset */
134#define AUTOnRTS 4 /* Automatic /RTS pin deactivation */
135#define RxFIFO_LVL 8 /* Receive FIFO interrupt level */
136#define nDTRnREQ 0x10 /* /DTR/REQ timing */
137#define TxFIFO_LVL 0x20 /* Transmit FIFO interrupt level */
138#define EXT_RD_EN 0x40 /* Extended read register enable */
139
130/* Write Register 8 (transmit buffer) */ 140/* Write Register 8 (transmit buffer) */
131 141
132/* Write Register 9 (Master interrupt control) */ 142/* Write Register 9 (Master interrupt control) */
@@ -135,6 +145,7 @@ struct zilog_layout {
135#define DLC 4 /* Disable Lower Chain */ 145#define DLC 4 /* Disable Lower Chain */
136#define MIE 8 /* Master Interrupt Enable */ 146#define MIE 8 /* Master Interrupt Enable */
137#define STATHI 0x10 /* Status high */ 147#define STATHI 0x10 /* Status high */
148#define SWIACK 0x20 /* Software Interrupt Ack (not on NMOS) */
138#define NORESET 0 /* No reset on write to R9 */ 149#define NORESET 0 /* No reset on write to R9 */
139#define CHRB 0x40 /* Reset channel B */ 150#define CHRB 0x40 /* Reset channel B */
140#define CHRA 0x80 /* Reset channel A */ 151#define CHRA 0x80 /* Reset channel A */
@@ -187,7 +198,9 @@ struct zilog_layout {
187#define SNRZI 0xe0 /* Set NRZI mode */ 198#define SNRZI 0xe0 /* Set NRZI mode */
188 199
189/* Write Register 15 (external/status interrupt control) */ 200/* Write Register 15 (external/status interrupt control) */
201#define WR7pEN 1 /* WR7' Enable (ESCC only) */
190#define ZCIE 2 /* Zero count IE */ 202#define ZCIE 2 /* Zero count IE */
203#define FIFOEN 4 /* FIFO Enable (ESCC only) */
191#define DCDIE 8 /* DCD IE */ 204#define DCDIE 8 /* DCD IE */
192#define SYNCIE 0x10 /* Sync/hunt IE */ 205#define SYNCIE 0x10 /* Sync/hunt IE */
193#define CTSIE 0x20 /* CTS IE */ 206#define CTSIE 0x20 /* CTS IE */
@@ -241,6 +254,10 @@ struct zilog_layout {
241#define CHATxIP 0x10 /* Channel A Tx IP */ 254#define CHATxIP 0x10 /* Channel A Tx IP */
242#define CHARxIP 0x20 /* Channel A Rx IP */ 255#define CHARxIP 0x20 /* Channel A Rx IP */
243 256
257/* Read Register 6 (LSB frame byte count [Not on NMOS]) */
258
259/* Read Register 7 (MSB frame byte count and FIFO status [Not on NMOS]) */
260
244/* Read Register 8 (receive data register) */ 261/* Read Register 8 (receive data register) */
245 262
246/* Read Register 10 (misc status bits) */ 263/* Read Register 10 (misc status bits) */
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 07c587ec71be..7c9d37f651e3 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -6,6 +6,7 @@
6# fully appropriate there, so it'd need some thought to do well. 6# fully appropriate there, so it'd need some thought to do well.
7# 7#
8menu "SPI support" 8menu "SPI support"
9 depends on HAS_IOMEM
9 10
10config SPI 11config SPI
11 bool "SPI support" 12 bool "SPI support"
diff --git a/drivers/telephony/Kconfig b/drivers/telephony/Kconfig
index 7625b1816baf..dd1d6a53f3c0 100644
--- a/drivers/telephony/Kconfig
+++ b/drivers/telephony/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "Telephony Support" 5menu "Telephony Support"
6 depends on HAS_IOMEM
6 7
7config PHONE 8config PHONE
8 tristate "Linux telephony support" 9 tristate "Linux telephony support"
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 278a22cea5bf..15499b7e33f4 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "USB support" 5menu "USB support"
6 depends on HAS_IOMEM
6 7
7# Host-side USB depends on having a host controller 8# Host-side USB depends on having a host controller
8# NOTE: dummy_hcd is always an option, but it's ignored here ... 9# NOTE: dummy_hcd is always an option, but it's ignored here ...
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 9a256d2ff9dc..f54438828cb9 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4 4
5menu "Graphics support" 5menu "Graphics support"
6 depends on HAS_IOMEM
6 7
7source "drivers/video/backlight/Kconfig" 8source "drivers/video/backlight/Kconfig"
8source "drivers/video/display/Kconfig" 9source "drivers/video/display/Kconfig"
diff --git a/drivers/w1/Kconfig b/drivers/w1/Kconfig
index c287a9ae4fdd..ca75b3ad3a2e 100644
--- a/drivers/w1/Kconfig
+++ b/drivers/w1/Kconfig
@@ -1,4 +1,5 @@
1menu "Dallas's 1-wire bus" 1menu "Dallas's 1-wire bus"
2 depends on HAS_IOMEM
2 3
3config W1 4config W1
4 tristate "Dallas's 1-wire support" 5 tristate "Dallas's 1-wire support"