diff options
author | David S. Miller <davem@davemloft.net> | 2008-03-18 02:44:31 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-03-18 02:44:31 -0400 |
commit | 2f633928cbba8a5858bb39b11e7219a41b0fbef5 (patch) | |
tree | 9a82f4b7f2c3afe4b0208d8e44ea61bae90a7d22 /drivers | |
parent | 5e226e4d9016daee170699f8a4188a5505021756 (diff) | |
parent | bde4f8fa8db2abd5ac9c542d76012d0fedab050f (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'drivers')
264 files changed, 7579 insertions, 2835 deletions
diff --git a/drivers/acorn/char/defkeymap-l7200.c b/drivers/acorn/char/defkeymap-l7200.c index 28a5fbc6aa1a..93d80a1c36f9 100644 --- a/drivers/acorn/char/defkeymap-l7200.c +++ b/drivers/acorn/char/defkeymap-l7200.c | |||
@@ -347,40 +347,40 @@ char *func_table[MAX_NR_FUNC] = { | |||
347 | }; | 347 | }; |
348 | 348 | ||
349 | struct kbdiacruc accent_table[MAX_DIACR] = { | 349 | struct kbdiacruc accent_table[MAX_DIACR] = { |
350 | {'`', 'A', '\300'}, {'`', 'a', '\340'}, | 350 | {'`', 'A', 0300}, {'`', 'a', 0340}, |
351 | {'\'', 'A', '\301'}, {'\'', 'a', '\341'}, | 351 | {'\'', 'A', 0301}, {'\'', 'a', 0341}, |
352 | {'^', 'A', '\302'}, {'^', 'a', '\342'}, | 352 | {'^', 'A', 0302}, {'^', 'a', 0342}, |
353 | {'~', 'A', '\303'}, {'~', 'a', '\343'}, | 353 | {'~', 'A', 0303}, {'~', 'a', 0343}, |
354 | {'"', 'A', '\304'}, {'"', 'a', '\344'}, | 354 | {'"', 'A', 0304}, {'"', 'a', 0344}, |
355 | {'O', 'A', '\305'}, {'o', 'a', '\345'}, | 355 | {'O', 'A', 0305}, {'o', 'a', 0345}, |
356 | {'0', 'A', '\305'}, {'0', 'a', '\345'}, | 356 | {'0', 'A', 0305}, {'0', 'a', 0345}, |
357 | {'A', 'A', '\305'}, {'a', 'a', '\345'}, | 357 | {'A', 'A', 0305}, {'a', 'a', 0345}, |
358 | {'A', 'E', '\306'}, {'a', 'e', '\346'}, | 358 | {'A', 'E', 0306}, {'a', 'e', 0346}, |
359 | {',', 'C', '\307'}, {',', 'c', '\347'}, | 359 | {',', 'C', 0307}, {',', 'c', 0347}, |
360 | {'`', 'E', '\310'}, {'`', 'e', '\350'}, | 360 | {'`', 'E', 0310}, {'`', 'e', 0350}, |
361 | {'\'', 'E', '\311'}, {'\'', 'e', '\351'}, | 361 | {'\'', 'E', 0311}, {'\'', 'e', 0351}, |
362 | {'^', 'E', '\312'}, {'^', 'e', '\352'}, | 362 | {'^', 'E', 0312}, {'^', 'e', 0352}, |
363 | {'"', 'E', '\313'}, {'"', 'e', '\353'}, | 363 | {'"', 'E', 0313}, {'"', 'e', 0353}, |
364 | {'`', 'I', '\314'}, {'`', 'i', '\354'}, | 364 | {'`', 'I', 0314}, {'`', 'i', 0354}, |
365 | {'\'', 'I', '\315'}, {'\'', 'i', '\355'}, | 365 | {'\'', 'I', 0315}, {'\'', 'i', 0355}, |
366 | {'^', 'I', '\316'}, {'^', 'i', '\356'}, | 366 | {'^', 'I', 0316}, {'^', 'i', 0356}, |
367 | {'"', 'I', '\317'}, {'"', 'i', '\357'}, | 367 | {'"', 'I', 0317}, {'"', 'i', 0357}, |
368 | {'-', 'D', '\320'}, {'-', 'd', '\360'}, | 368 | {'-', 'D', 0320}, {'-', 'd', 0360}, |
369 | {'~', 'N', '\321'}, {'~', 'n', '\361'}, | 369 | {'~', 'N', 0321}, {'~', 'n', 0361}, |
370 | {'`', 'O', '\322'}, {'`', 'o', '\362'}, | 370 | {'`', 'O', 0322}, {'`', 'o', 0362}, |
371 | {'\'', 'O', '\323'}, {'\'', 'o', '\363'}, | 371 | {'\'', 'O', 0323}, {'\'', 'o', 0363}, |
372 | {'^', 'O', '\324'}, {'^', 'o', '\364'}, | 372 | {'^', 'O', 0324}, {'^', 'o', 0364}, |
373 | {'~', 'O', '\325'}, {'~', 'o', '\365'}, | 373 | {'~', 'O', 0325}, {'~', 'o', 0365}, |
374 | {'"', 'O', '\326'}, {'"', 'o', '\366'}, | 374 | {'"', 'O', 0326}, {'"', 'o', 0366}, |
375 | {'/', 'O', '\330'}, {'/', 'o', '\370'}, | 375 | {'/', 'O', 0330}, {'/', 'o', 0370}, |
376 | {'`', 'U', '\331'}, {'`', 'u', '\371'}, | 376 | {'`', 'U', 0331}, {'`', 'u', 0371}, |
377 | {'\'', 'U', '\332'}, {'\'', 'u', '\372'}, | 377 | {'\'', 'U', 0332}, {'\'', 'u', 0372}, |
378 | {'^', 'U', '\333'}, {'^', 'u', '\373'}, | 378 | {'^', 'U', 0333}, {'^', 'u', 0373}, |
379 | {'"', 'U', '\334'}, {'"', 'u', '\374'}, | 379 | {'"', 'U', 0334}, {'"', 'u', 0374}, |
380 | {'\'', 'Y', '\335'}, {'\'', 'y', '\375'}, | 380 | {'\'', 'Y', 0335}, {'\'', 'y', 0375}, |
381 | {'T', 'H', '\336'}, {'t', 'h', '\376'}, | 381 | {'T', 'H', 0336}, {'t', 'h', 0376}, |
382 | {'s', 's', '\337'}, {'"', 'y', '\377'}, | 382 | {'s', 's', 0337}, {'"', 'y', 0377}, |
383 | {'s', 'z', '\337'}, {'i', 'j', '\377'}, | 383 | {'s', 'z', 0337}, {'i', 'j', 0377}, |
384 | }; | 384 | }; |
385 | 385 | ||
386 | unsigned int accent_table_size = 68; | 386 | unsigned int accent_table_size = 68; |
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index f688c214be0c..b4f5e8542829 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
@@ -283,34 +283,22 @@ config ACPI_TOSHIBA | |||
283 | If you have a legacy free Toshiba laptop (such as the Libretto L1 | 283 | If you have a legacy free Toshiba laptop (such as the Libretto L1 |
284 | series), say Y. | 284 | series), say Y. |
285 | 285 | ||
286 | config ACPI_CUSTOM_DSDT | 286 | config ACPI_CUSTOM_DSDT_FILE |
287 | bool "Include Custom DSDT" | 287 | string "Custom DSDT Table file to include" |
288 | default "" | ||
288 | depends on !STANDALONE | 289 | depends on !STANDALONE |
289 | default n | ||
290 | help | 290 | help |
291 | This option supports a custom DSDT by linking it into the kernel. | 291 | This option supports a custom DSDT by linking it into the kernel. |
292 | See Documentation/acpi/dsdt-override.txt | 292 | See Documentation/acpi/dsdt-override.txt |
293 | 293 | ||
294 | If unsure, say N. | ||
295 | |||
296 | config ACPI_CUSTOM_DSDT_FILE | ||
297 | string "Custom DSDT Table file to include" | ||
298 | depends on ACPI_CUSTOM_DSDT | ||
299 | default "" | ||
300 | help | ||
301 | Enter the full path name to the file which includes the AmlCode | 294 | Enter the full path name to the file which includes the AmlCode |
302 | declaration. | 295 | declaration. |
303 | 296 | ||
304 | config ACPI_CUSTOM_DSDT_INITRD | 297 | If unsure, don't enter a file name. |
305 | bool "Read Custom DSDT from initramfs" | ||
306 | depends on BLK_DEV_INITRD | ||
307 | default n | ||
308 | help | ||
309 | This option supports a custom DSDT by optionally loading it from initrd. | ||
310 | See Documentation/acpi/dsdt-override.txt | ||
311 | 298 | ||
312 | If you are not using this feature now, but may use it later, | 299 | config ACPI_CUSTOM_DSDT |
313 | it is safe to say Y here. | 300 | bool |
301 | default ACPI_CUSTOM_DSDT_FILE != "" | ||
314 | 302 | ||
315 | config ACPI_BLACKLIST_YEAR | 303 | config ACPI_BLACKLIST_YEAR |
316 | int "Disable ACPI for systems before Jan 1st this year" if X86_32 | 304 | int "Disable ACPI for systems before Jan 1st this year" if X86_32 |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index ce3c0a2cbac4..5b6760e0f957 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
@@ -776,7 +776,7 @@ static int __init acpi_init(void) | |||
776 | 776 | ||
777 | acpi_kobj = kobject_create_and_add("acpi", firmware_kobj); | 777 | acpi_kobj = kobject_create_and_add("acpi", firmware_kobj); |
778 | if (!acpi_kobj) { | 778 | if (!acpi_kobj) { |
779 | printk(KERN_WARNING "%s: kset create error\n", __FUNCTION__); | 779 | printk(KERN_WARNING "%s: kset create error\n", __func__); |
780 | acpi_kobj = NULL; | 780 | acpi_kobj = NULL; |
781 | } | 781 | } |
782 | 782 | ||
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 24a7865a57cb..6c5da83cdb68 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c | |||
@@ -449,6 +449,7 @@ static int acpi_button_add(struct acpi_device *device) | |||
449 | input->phys = button->phys; | 449 | input->phys = button->phys; |
450 | input->id.bustype = BUS_HOST; | 450 | input->id.bustype = BUS_HOST; |
451 | input->id.product = button->type; | 451 | input->id.product = button->type; |
452 | input->dev.parent = &device->dev; | ||
452 | 453 | ||
453 | switch (button->type) { | 454 | switch (button->type) { |
454 | case ACPI_BUTTON_TYPE_POWER: | 455 | case ACPI_BUTTON_TYPE_POWER: |
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index caf873c14bfb..e7e197e3a4ff 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
@@ -129,6 +129,7 @@ static struct acpi_ec { | |||
129 | struct mutex lock; | 129 | struct mutex lock; |
130 | wait_queue_head_t wait; | 130 | wait_queue_head_t wait; |
131 | struct list_head list; | 131 | struct list_head list; |
132 | atomic_t irq_count; | ||
132 | u8 handlers_installed; | 133 | u8 handlers_installed; |
133 | } *boot_ec, *first_ec; | 134 | } *boot_ec, *first_ec; |
134 | 135 | ||
@@ -181,6 +182,8 @@ static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll) | |||
181 | { | 182 | { |
182 | int ret = 0; | 183 | int ret = 0; |
183 | 184 | ||
185 | atomic_set(&ec->irq_count, 0); | ||
186 | |||
184 | if (unlikely(event == ACPI_EC_EVENT_OBF_1 && | 187 | if (unlikely(event == ACPI_EC_EVENT_OBF_1 && |
185 | test_bit(EC_FLAGS_NO_OBF1_GPE, &ec->flags))) | 188 | test_bit(EC_FLAGS_NO_OBF1_GPE, &ec->flags))) |
186 | force_poll = 1; | 189 | force_poll = 1; |
@@ -227,6 +230,7 @@ static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll) | |||
227 | while (time_before(jiffies, delay)) { | 230 | while (time_before(jiffies, delay)) { |
228 | if (acpi_ec_check_status(ec, event)) | 231 | if (acpi_ec_check_status(ec, event)) |
229 | goto end; | 232 | goto end; |
233 | msleep(5); | ||
230 | } | 234 | } |
231 | } | 235 | } |
232 | pr_err(PREFIX "acpi_ec_wait timeout," | 236 | pr_err(PREFIX "acpi_ec_wait timeout," |
@@ -529,6 +533,13 @@ static u32 acpi_ec_gpe_handler(void *data) | |||
529 | struct acpi_ec *ec = data; | 533 | struct acpi_ec *ec = data; |
530 | 534 | ||
531 | pr_debug(PREFIX "~~~> interrupt\n"); | 535 | pr_debug(PREFIX "~~~> interrupt\n"); |
536 | atomic_inc(&ec->irq_count); | ||
537 | if (atomic_read(&ec->irq_count) > 5) { | ||
538 | pr_err(PREFIX "GPE storm detected, disabling EC GPE\n"); | ||
539 | acpi_disable_gpe(NULL, ec->gpe, ACPI_ISR); | ||
540 | clear_bit(EC_FLAGS_GPE_MODE, &ec->flags); | ||
541 | return ACPI_INTERRUPT_HANDLED; | ||
542 | } | ||
532 | clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags); | 543 | clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags); |
533 | if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags)) | 544 | if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags)) |
534 | wake_up(&ec->wait); | 545 | wake_up(&ec->wait); |
@@ -943,11 +954,7 @@ int __init acpi_ec_ecdt_probe(void) | |||
943 | boot_ec->command_addr = ecdt_ptr->control.address; | 954 | boot_ec->command_addr = ecdt_ptr->control.address; |
944 | boot_ec->data_addr = ecdt_ptr->data.address; | 955 | boot_ec->data_addr = ecdt_ptr->data.address; |
945 | boot_ec->gpe = ecdt_ptr->gpe; | 956 | boot_ec->gpe = ecdt_ptr->gpe; |
946 | if (ACPI_FAILURE(acpi_get_handle(NULL, ecdt_ptr->id, | 957 | boot_ec->handle = ACPI_ROOT_OBJECT; |
947 | &boot_ec->handle))) { | ||
948 | pr_info("Failed to locate handle for boot EC\n"); | ||
949 | boot_ec->handle = ACPI_ROOT_OBJECT; | ||
950 | } | ||
951 | } else { | 958 | } else { |
952 | /* This workaround is needed only on some broken machines, | 959 | /* This workaround is needed only on some broken machines, |
953 | * which require early EC, but fail to provide ECDT */ | 960 | * which require early EC, but fail to provide ECDT */ |
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 8edba7b678eb..a697fb6cf050 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -91,10 +91,6 @@ static DEFINE_SPINLOCK(acpi_res_lock); | |||
91 | #define OSI_STRING_LENGTH_MAX 64 /* arbitrary */ | 91 | #define OSI_STRING_LENGTH_MAX 64 /* arbitrary */ |
92 | static char osi_additional_string[OSI_STRING_LENGTH_MAX]; | 92 | static char osi_additional_string[OSI_STRING_LENGTH_MAX]; |
93 | 93 | ||
94 | #ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD | ||
95 | static int acpi_no_initrd_override; | ||
96 | #endif | ||
97 | |||
98 | /* | 94 | /* |
99 | * "Ode to _OSI(Linux)" | 95 | * "Ode to _OSI(Linux)" |
100 | * | 96 | * |
@@ -324,67 +320,6 @@ acpi_os_predefined_override(const struct acpi_predefined_names *init_val, | |||
324 | return AE_OK; | 320 | return AE_OK; |
325 | } | 321 | } |
326 | 322 | ||
327 | #ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD | ||
328 | static struct acpi_table_header *acpi_find_dsdt_initrd(void) | ||
329 | { | ||
330 | struct file *firmware_file; | ||
331 | mm_segment_t oldfs; | ||
332 | unsigned long len, len2; | ||
333 | struct acpi_table_header *dsdt_buffer, *ret = NULL; | ||
334 | struct kstat stat; | ||
335 | char *ramfs_dsdt_name = "/DSDT.aml"; | ||
336 | |||
337 | printk(KERN_INFO PREFIX "Checking initramfs for custom DSDT\n"); | ||
338 | |||
339 | /* | ||
340 | * Never do this at home, only the user-space is allowed to open a file. | ||
341 | * The clean way would be to use the firmware loader. | ||
342 | * But this code must be run before there is any userspace available. | ||
343 | * A static/init firmware infrastructure doesn't exist yet... | ||
344 | */ | ||
345 | if (vfs_stat(ramfs_dsdt_name, &stat) < 0) | ||
346 | return ret; | ||
347 | |||
348 | len = stat.size; | ||
349 | /* check especially against empty files */ | ||
350 | if (len <= 4) { | ||
351 | printk(KERN_ERR PREFIX "Failed: DSDT only %lu bytes.\n", len); | ||
352 | return ret; | ||
353 | } | ||
354 | |||
355 | firmware_file = filp_open(ramfs_dsdt_name, O_RDONLY, 0); | ||
356 | if (IS_ERR(firmware_file)) { | ||
357 | printk(KERN_ERR PREFIX "Failed to open %s.\n", ramfs_dsdt_name); | ||
358 | return ret; | ||
359 | } | ||
360 | |||
361 | dsdt_buffer = kmalloc(len, GFP_ATOMIC); | ||
362 | if (!dsdt_buffer) { | ||
363 | printk(KERN_ERR PREFIX "Failed to allocate %lu bytes.\n", len); | ||
364 | goto err; | ||
365 | } | ||
366 | |||
367 | oldfs = get_fs(); | ||
368 | set_fs(KERNEL_DS); | ||
369 | len2 = vfs_read(firmware_file, (char __user *)dsdt_buffer, len, | ||
370 | &firmware_file->f_pos); | ||
371 | set_fs(oldfs); | ||
372 | if (len2 < len) { | ||
373 | printk(KERN_ERR PREFIX "Failed to read %lu bytes from %s.\n", | ||
374 | len, ramfs_dsdt_name); | ||
375 | ACPI_FREE(dsdt_buffer); | ||
376 | goto err; | ||
377 | } | ||
378 | |||
379 | printk(KERN_INFO PREFIX "Found %lu byte DSDT in %s.\n", | ||
380 | len, ramfs_dsdt_name); | ||
381 | ret = dsdt_buffer; | ||
382 | err: | ||
383 | filp_close(firmware_file, NULL); | ||
384 | return ret; | ||
385 | } | ||
386 | #endif | ||
387 | |||
388 | acpi_status | 323 | acpi_status |
389 | acpi_os_table_override(struct acpi_table_header * existing_table, | 324 | acpi_os_table_override(struct acpi_table_header * existing_table, |
390 | struct acpi_table_header ** new_table) | 325 | struct acpi_table_header ** new_table) |
@@ -398,16 +333,6 @@ acpi_os_table_override(struct acpi_table_header * existing_table, | |||
398 | if (strncmp(existing_table->signature, "DSDT", 4) == 0) | 333 | if (strncmp(existing_table->signature, "DSDT", 4) == 0) |
399 | *new_table = (struct acpi_table_header *)AmlCode; | 334 | *new_table = (struct acpi_table_header *)AmlCode; |
400 | #endif | 335 | #endif |
401 | #ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD | ||
402 | if ((strncmp(existing_table->signature, "DSDT", 4) == 0) && | ||
403 | !acpi_no_initrd_override) { | ||
404 | struct acpi_table_header *initrd_table; | ||
405 | |||
406 | initrd_table = acpi_find_dsdt_initrd(); | ||
407 | if (initrd_table) | ||
408 | *new_table = initrd_table; | ||
409 | } | ||
410 | #endif | ||
411 | if (*new_table != NULL) { | 336 | if (*new_table != NULL) { |
412 | printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], " | 337 | printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], " |
413 | "this is unsafe: tainting kernel\n", | 338 | "this is unsafe: tainting kernel\n", |
@@ -418,15 +343,6 @@ acpi_os_table_override(struct acpi_table_header * existing_table, | |||
418 | return AE_OK; | 343 | return AE_OK; |
419 | } | 344 | } |
420 | 345 | ||
421 | #ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD | ||
422 | static int __init acpi_no_initrd_override_setup(char *s) | ||
423 | { | ||
424 | acpi_no_initrd_override = 1; | ||
425 | return 1; | ||
426 | } | ||
427 | __setup("acpi_no_initrd_override", acpi_no_initrd_override_setup); | ||
428 | #endif | ||
429 | |||
430 | static irqreturn_t acpi_irq(int irq, void *dev_id) | 346 | static irqreturn_t acpi_irq(int irq, void *dev_id) |
431 | { | 347 | { |
432 | u32 handled; | 348 | u32 handled; |
@@ -1237,7 +1153,7 @@ int acpi_check_resource_conflict(struct resource *res) | |||
1237 | 1153 | ||
1238 | if (clash) { | 1154 | if (clash) { |
1239 | if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { | 1155 | if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { |
1240 | printk(KERN_INFO "%sACPI: %s resource %s [0x%llx-0x%llx]" | 1156 | printk("%sACPI: %s resource %s [0x%llx-0x%llx]" |
1241 | " conflicts with ACPI region %s" | 1157 | " conflicts with ACPI region %s" |
1242 | " [0x%llx-0x%llx]\n", | 1158 | " [0x%llx-0x%llx]\n", |
1243 | acpi_enforce_resources == ENFORCE_RESOURCES_LAX | 1159 | acpi_enforce_resources == ENFORCE_RESOURCES_LAX |
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 7f19859580c7..7af414a3c63e 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c | |||
@@ -25,6 +25,7 @@ | |||
25 | */ | 25 | */ |
26 | 26 | ||
27 | 27 | ||
28 | #include <linux/dmi.h> | ||
28 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
29 | #include <linux/module.h> | 30 | #include <linux/module.h> |
30 | #include <linux/init.h> | 31 | #include <linux/init.h> |
@@ -76,6 +77,101 @@ static struct acpi_prt_entry *acpi_pci_irq_find_prt_entry(int segment, | |||
76 | return NULL; | 77 | return NULL; |
77 | } | 78 | } |
78 | 79 | ||
80 | /* http://bugzilla.kernel.org/show_bug.cgi?id=4773 */ | ||
81 | static struct dmi_system_id medion_md9580[] = { | ||
82 | { | ||
83 | .ident = "Medion MD9580-F laptop", | ||
84 | .matches = { | ||
85 | DMI_MATCH(DMI_SYS_VENDOR, "MEDIONNB"), | ||
86 | DMI_MATCH(DMI_PRODUCT_NAME, "A555"), | ||
87 | }, | ||
88 | }, | ||
89 | { } | ||
90 | }; | ||
91 | |||
92 | /* http://bugzilla.kernel.org/show_bug.cgi?id=5044 */ | ||
93 | static struct dmi_system_id dell_optiplex[] = { | ||
94 | { | ||
95 | .ident = "Dell Optiplex GX1", | ||
96 | .matches = { | ||
97 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), | ||
98 | DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex GX1 600S+"), | ||
99 | }, | ||
100 | }, | ||
101 | { } | ||
102 | }; | ||
103 | |||
104 | /* http://bugzilla.kernel.org/show_bug.cgi?id=10138 */ | ||
105 | static struct dmi_system_id hp_t5710[] = { | ||
106 | { | ||
107 | .ident = "HP t5710", | ||
108 | .matches = { | ||
109 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
110 | DMI_MATCH(DMI_PRODUCT_NAME, "hp t5000 series"), | ||
111 | DMI_MATCH(DMI_BOARD_NAME, "098Ch"), | ||
112 | }, | ||
113 | }, | ||
114 | { } | ||
115 | }; | ||
116 | |||
117 | struct prt_quirk { | ||
118 | struct dmi_system_id *system; | ||
119 | unsigned int segment; | ||
120 | unsigned int bus; | ||
121 | unsigned int device; | ||
122 | unsigned char pin; | ||
123 | char *source; /* according to BIOS */ | ||
124 | char *actual_source; | ||
125 | }; | ||
126 | |||
127 | /* | ||
128 | * These systems have incorrect _PRT entries. The BIOS claims the PCI | ||
129 | * interrupt at the listed segment/bus/device/pin is connected to the first | ||
130 | * link device, but it is actually connected to the second. | ||
131 | */ | ||
132 | static struct prt_quirk prt_quirks[] = { | ||
133 | { medion_md9580, 0, 0, 9, 'A', | ||
134 | "\\_SB_.PCI0.ISA.LNKA", | ||
135 | "\\_SB_.PCI0.ISA.LNKB"}, | ||
136 | { dell_optiplex, 0, 0, 0xd, 'A', | ||
137 | "\\_SB_.LNKB", | ||
138 | "\\_SB_.LNKA"}, | ||
139 | { hp_t5710, 0, 0, 1, 'A', | ||
140 | "\\_SB_.PCI0.LNK1", | ||
141 | "\\_SB_.PCI0.LNK3"}, | ||
142 | }; | ||
143 | |||
144 | static void | ||
145 | do_prt_fixups(struct acpi_prt_entry *entry, struct acpi_pci_routing_table *prt) | ||
146 | { | ||
147 | int i; | ||
148 | struct prt_quirk *quirk; | ||
149 | |||
150 | for (i = 0; i < ARRAY_SIZE(prt_quirks); i++) { | ||
151 | quirk = &prt_quirks[i]; | ||
152 | |||
153 | /* All current quirks involve link devices, not GSIs */ | ||
154 | if (!prt->source) | ||
155 | continue; | ||
156 | |||
157 | if (dmi_check_system(quirk->system) && | ||
158 | entry->id.segment == quirk->segment && | ||
159 | entry->id.bus == quirk->bus && | ||
160 | entry->id.device == quirk->device && | ||
161 | entry->pin + 'A' == quirk->pin && | ||
162 | !strcmp(prt->source, quirk->source) && | ||
163 | strlen(prt->source) >= strlen(quirk->actual_source)) { | ||
164 | printk(KERN_WARNING PREFIX "firmware reports " | ||
165 | "%04x:%02x:%02x[%c] connected to %s; " | ||
166 | "changing to %s\n", | ||
167 | entry->id.segment, entry->id.bus, | ||
168 | entry->id.device, 'A' + entry->pin, | ||
169 | prt->source, quirk->actual_source); | ||
170 | strcpy(prt->source, quirk->actual_source); | ||
171 | } | ||
172 | } | ||
173 | } | ||
174 | |||
79 | static int | 175 | static int |
80 | acpi_pci_irq_add_entry(acpi_handle handle, | 176 | acpi_pci_irq_add_entry(acpi_handle handle, |
81 | int segment, int bus, struct acpi_pci_routing_table *prt) | 177 | int segment, int bus, struct acpi_pci_routing_table *prt) |
@@ -96,6 +192,8 @@ acpi_pci_irq_add_entry(acpi_handle handle, | |||
96 | entry->id.function = prt->address & 0xFFFF; | 192 | entry->id.function = prt->address & 0xFFFF; |
97 | entry->pin = prt->pin; | 193 | entry->pin = prt->pin; |
98 | 194 | ||
195 | do_prt_fixups(entry, prt); | ||
196 | |||
99 | /* | 197 | /* |
100 | * Type 1: Dynamic | 198 | * Type 1: Dynamic |
101 | * --------------- | 199 | * --------------- |
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index f14ff1ffab29..c3fed31166b5 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c | |||
@@ -184,7 +184,7 @@ static void acpi_pci_bridge_scan(struct acpi_device *device) | |||
184 | } | 184 | } |
185 | } | 185 | } |
186 | 186 | ||
187 | static int acpi_pci_root_add(struct acpi_device *device) | 187 | static int __devinit acpi_pci_root_add(struct acpi_device *device) |
188 | { | 188 | { |
189 | int result = 0; | 189 | int result = 0; |
190 | struct acpi_pci_root *root = NULL; | 190 | struct acpi_pci_root *root = NULL; |
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index a3cc8a98255c..36a68fa114e3 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
@@ -840,17 +840,19 @@ static int is_processor_present(acpi_handle handle) | |||
840 | 840 | ||
841 | 841 | ||
842 | status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); | 842 | status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); |
843 | /* | ||
844 | * if a processor object does not have an _STA object, | ||
845 | * OSPM assumes that the processor is present. | ||
846 | */ | ||
847 | if (status == AE_NOT_FOUND) | ||
848 | return 1; | ||
849 | 843 | ||
850 | if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT)) | 844 | if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT)) |
851 | return 1; | 845 | return 1; |
852 | 846 | ||
853 | ACPI_EXCEPTION((AE_INFO, status, "Processor Device is not present")); | 847 | /* |
848 | * _STA is mandatory for a processor that supports hot plug | ||
849 | */ | ||
850 | if (status == AE_NOT_FOUND) | ||
851 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
852 | "Processor does not support hot plug\n")); | ||
853 | else | ||
854 | ACPI_EXCEPTION((AE_INFO, status, | ||
855 | "Processor Device is not present")); | ||
854 | return 0; | 856 | return 0; |
855 | } | 857 | } |
856 | 858 | ||
@@ -886,8 +888,8 @@ int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device) | |||
886 | return 0; | 888 | return 0; |
887 | } | 889 | } |
888 | 890 | ||
889 | static void | 891 | static void __ref acpi_processor_hotplug_notify(acpi_handle handle, |
890 | acpi_processor_hotplug_notify(acpi_handle handle, u32 event, void *data) | 892 | u32 event, void *data) |
891 | { | 893 | { |
892 | struct acpi_processor *pr; | 894 | struct acpi_processor *pr; |
893 | struct acpi_device *device = NULL; | 895 | struct acpi_device *device = NULL; |
@@ -897,9 +899,10 @@ acpi_processor_hotplug_notify(acpi_handle handle, u32 event, void *data) | |||
897 | switch (event) { | 899 | switch (event) { |
898 | case ACPI_NOTIFY_BUS_CHECK: | 900 | case ACPI_NOTIFY_BUS_CHECK: |
899 | case ACPI_NOTIFY_DEVICE_CHECK: | 901 | case ACPI_NOTIFY_DEVICE_CHECK: |
900 | printk("Processor driver received %s event\n", | 902 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
903 | "Processor driver received %s event\n", | ||
901 | (event == ACPI_NOTIFY_BUS_CHECK) ? | 904 | (event == ACPI_NOTIFY_BUS_CHECK) ? |
902 | "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK"); | 905 | "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK")); |
903 | 906 | ||
904 | if (!is_processor_present(handle)) | 907 | if (!is_processor_present(handle)) |
905 | break; | 908 | break; |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 3fac011f9cf9..57570ac47803 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -609,7 +609,8 @@ acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd) | |||
609 | status = acpi_evaluate_object(handle, "_EJD", NULL, &buffer); | 609 | status = acpi_evaluate_object(handle, "_EJD", NULL, &buffer); |
610 | if (ACPI_SUCCESS(status)) { | 610 | if (ACPI_SUCCESS(status)) { |
611 | obj = buffer.pointer; | 611 | obj = buffer.pointer; |
612 | status = acpi_get_handle(NULL, obj->string.pointer, ejd); | 612 | status = acpi_get_handle(ACPI_ROOT_OBJECT, obj->string.pointer, |
613 | ejd); | ||
613 | kfree(buffer.pointer); | 614 | kfree(buffer.pointer); |
614 | } | 615 | } |
615 | return status; | 616 | return status; |
@@ -966,7 +967,7 @@ static void acpi_device_set_id(struct acpi_device *device, | |||
966 | case ACPI_BUS_TYPE_DEVICE: | 967 | case ACPI_BUS_TYPE_DEVICE: |
967 | status = acpi_get_object_info(handle, &buffer); | 968 | status = acpi_get_object_info(handle, &buffer); |
968 | if (ACPI_FAILURE(status)) { | 969 | if (ACPI_FAILURE(status)) { |
969 | printk(KERN_ERR PREFIX "%s: Error reading device info\n", __FUNCTION__); | 970 | printk(KERN_ERR PREFIX "%s: Error reading device info\n", __func__); |
970 | return; | 971 | return; |
971 | } | 972 | } |
972 | 973 | ||
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c index 293a1cbb47c0..d2f71a54726c 100644 --- a/drivers/acpi/sleep/main.c +++ b/drivers/acpi/sleep/main.c | |||
@@ -504,7 +504,7 @@ static void acpi_power_off_prepare(void) | |||
504 | static void acpi_power_off(void) | 504 | static void acpi_power_off(void) |
505 | { | 505 | { |
506 | /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ | 506 | /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ |
507 | printk("%s called\n", __FUNCTION__); | 507 | printk("%s called\n", __func__); |
508 | local_irq_disable(); | 508 | local_irq_disable(); |
509 | acpi_enable_wakeup_device(ACPI_STATE_S5); | 509 | acpi_enable_wakeup_device(ACPI_STATE_S5); |
510 | acpi_enter_sleep_state(ACPI_STATE_S5); | 510 | acpi_enter_sleep_state(ACPI_STATE_S5); |
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c index 55cf4c05bb74..4749f379a915 100644 --- a/drivers/acpi/system.c +++ b/drivers/acpi/system.c | |||
@@ -319,7 +319,7 @@ void acpi_irq_stats_init(void) | |||
319 | goto fail; | 319 | goto fail; |
320 | 320 | ||
321 | for (i = 0; i < num_counters; ++i) { | 321 | for (i = 0; i < num_counters; ++i) { |
322 | char buffer[10]; | 322 | char buffer[12]; |
323 | char *name; | 323 | char *name; |
324 | 324 | ||
325 | if (i < num_gpes) | 325 | if (i < num_gpes) |
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 8d4b79b4f933..c4e00ac8ea85 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c | |||
@@ -879,6 +879,8 @@ static void acpi_thermal_check(void *data) | |||
879 | } | 879 | } |
880 | 880 | ||
881 | /* sys I/F for generic thermal sysfs support */ | 881 | /* sys I/F for generic thermal sysfs support */ |
882 | #define KELVIN_TO_MILLICELSIUS(t) (t * 100 - 273200) | ||
883 | |||
882 | static int thermal_get_temp(struct thermal_zone_device *thermal, char *buf) | 884 | static int thermal_get_temp(struct thermal_zone_device *thermal, char *buf) |
883 | { | 885 | { |
884 | struct acpi_thermal *tz = thermal->devdata; | 886 | struct acpi_thermal *tz = thermal->devdata; |
@@ -886,7 +888,7 @@ static int thermal_get_temp(struct thermal_zone_device *thermal, char *buf) | |||
886 | if (!tz) | 888 | if (!tz) |
887 | return -EINVAL; | 889 | return -EINVAL; |
888 | 890 | ||
889 | return sprintf(buf, "%ld\n", KELVIN_TO_CELSIUS(tz->temperature)); | 891 | return sprintf(buf, "%ld\n", KELVIN_TO_MILLICELSIUS(tz->temperature)); |
890 | } | 892 | } |
891 | 893 | ||
892 | static const char enabled[] = "kernel"; | 894 | static const char enabled[] = "kernel"; |
@@ -980,21 +982,21 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal, | |||
980 | 982 | ||
981 | if (tz->trips.critical.flags.valid) { | 983 | if (tz->trips.critical.flags.valid) { |
982 | if (!trip) | 984 | if (!trip) |
983 | return sprintf(buf, "%ld\n", KELVIN_TO_CELSIUS( | 985 | return sprintf(buf, "%ld\n", KELVIN_TO_MILLICELSIUS( |
984 | tz->trips.critical.temperature)); | 986 | tz->trips.critical.temperature)); |
985 | trip--; | 987 | trip--; |
986 | } | 988 | } |
987 | 989 | ||
988 | if (tz->trips.hot.flags.valid) { | 990 | if (tz->trips.hot.flags.valid) { |
989 | if (!trip) | 991 | if (!trip) |
990 | return sprintf(buf, "%ld\n", KELVIN_TO_CELSIUS( | 992 | return sprintf(buf, "%ld\n", KELVIN_TO_MILLICELSIUS( |
991 | tz->trips.hot.temperature)); | 993 | tz->trips.hot.temperature)); |
992 | trip--; | 994 | trip--; |
993 | } | 995 | } |
994 | 996 | ||
995 | if (tz->trips.passive.flags.valid) { | 997 | if (tz->trips.passive.flags.valid) { |
996 | if (!trip) | 998 | if (!trip) |
997 | return sprintf(buf, "%ld\n", KELVIN_TO_CELSIUS( | 999 | return sprintf(buf, "%ld\n", KELVIN_TO_MILLICELSIUS( |
998 | tz->trips.passive.temperature)); | 1000 | tz->trips.passive.temperature)); |
999 | trip--; | 1001 | trip--; |
1000 | } | 1002 | } |
@@ -1002,7 +1004,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal, | |||
1002 | for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && | 1004 | for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && |
1003 | tz->trips.active[i].flags.valid; i++) { | 1005 | tz->trips.active[i].flags.valid; i++) { |
1004 | if (!trip) | 1006 | if (!trip) |
1005 | return sprintf(buf, "%ld\n", KELVIN_TO_CELSIUS( | 1007 | return sprintf(buf, "%ld\n", KELVIN_TO_MILLICELSIUS( |
1006 | tz->trips.active[i].temperature)); | 1008 | tz->trips.active[i].temperature)); |
1007 | trip--; | 1009 | trip--; |
1008 | } | 1010 | } |
diff --git a/drivers/acpi/toshiba_acpi.c b/drivers/acpi/toshiba_acpi.c index 9e8c20c6a0b7..0a43c8e0eff3 100644 --- a/drivers/acpi/toshiba_acpi.c +++ b/drivers/acpi/toshiba_acpi.c | |||
@@ -99,6 +99,13 @@ MODULE_LICENSE("GPL"); | |||
99 | #define HCI_VIDEO_OUT_CRT 0x2 | 99 | #define HCI_VIDEO_OUT_CRT 0x2 |
100 | #define HCI_VIDEO_OUT_TV 0x4 | 100 | #define HCI_VIDEO_OUT_TV 0x4 |
101 | 101 | ||
102 | static const struct acpi_device_id toshiba_device_ids[] = { | ||
103 | {"TOS6200", 0}, | ||
104 | {"TOS1900", 0}, | ||
105 | {"", 0}, | ||
106 | }; | ||
107 | MODULE_DEVICE_TABLE(acpi, toshiba_device_ids); | ||
108 | |||
102 | /* utility | 109 | /* utility |
103 | */ | 110 | */ |
104 | 111 | ||
diff --git a/drivers/acpi/utilities/utdebug.c b/drivers/acpi/utilities/utdebug.c index c7e128e5369b..7361204b1eef 100644 --- a/drivers/acpi/utilities/utdebug.c +++ b/drivers/acpi/utilities/utdebug.c | |||
@@ -109,7 +109,7 @@ void acpi_ut_track_stack_ptr(void) | |||
109 | * RETURN: Updated pointer to the function name | 109 | * RETURN: Updated pointer to the function name |
110 | * | 110 | * |
111 | * DESCRIPTION: Remove the "Acpi" prefix from the function name, if present. | 111 | * DESCRIPTION: Remove the "Acpi" prefix from the function name, if present. |
112 | * This allows compiler macros such as __FUNCTION__ to be used | 112 | * This allows compiler macros such as __func__ to be used |
113 | * with no change to the debug output. | 113 | * with no change to the debug output. |
114 | * | 114 | * |
115 | ******************************************************************************/ | 115 | ******************************************************************************/ |
diff --git a/drivers/acpi/utilities/utobject.c b/drivers/acpi/utilities/utobject.c index 76ee766c84f9..e08b3fa6639f 100644 --- a/drivers/acpi/utilities/utobject.c +++ b/drivers/acpi/utilities/utobject.c | |||
@@ -432,7 +432,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object, | |||
432 | * element -- which is legal) | 432 | * element -- which is legal) |
433 | */ | 433 | */ |
434 | if (!internal_object) { | 434 | if (!internal_object) { |
435 | *obj_length = 0; | 435 | *obj_length = sizeof(union acpi_object); |
436 | return_ACPI_STATUS(AE_OK); | 436 | return_ACPI_STATUS(AE_OK); |
437 | } | 437 | } |
438 | 438 | ||
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index eba55b7d6c95..44ea60cf21c0 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c | |||
@@ -407,6 +407,12 @@ acpi_evaluate_reference(acpi_handle handle, | |||
407 | break; | 407 | break; |
408 | } | 408 | } |
409 | 409 | ||
410 | if (!element->reference.handle) { | ||
411 | printk(KERN_WARNING PREFIX "Invalid reference in" | ||
412 | " package %s\n", pathname); | ||
413 | status = AE_NULL_ENTRY; | ||
414 | break; | ||
415 | } | ||
410 | /* Get the acpi_handle. */ | 416 | /* Get the acpi_handle. */ |
411 | 417 | ||
412 | list->handles[i] = element->reference.handle; | 418 | list->handles[i] = element->reference.handle; |
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 12cce69b5441..1bc0c74f2755 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
@@ -713,7 +713,7 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) | |||
713 | 713 | ||
714 | kfree(obj); | 714 | kfree(obj); |
715 | 715 | ||
716 | if (device->cap._BCL && device->cap._BCM && device->cap._BQC && max_level > 0){ | 716 | if (device->cap._BCL && device->cap._BCM && max_level > 0) { |
717 | int result; | 717 | int result; |
718 | static int count = 0; | 718 | static int count = 0; |
719 | char *name; | 719 | char *name; |
@@ -1201,7 +1201,7 @@ static int acpi_video_bus_ROM_seq_show(struct seq_file *seq, void *offset) | |||
1201 | if (!video) | 1201 | if (!video) |
1202 | goto end; | 1202 | goto end; |
1203 | 1203 | ||
1204 | printk(KERN_INFO PREFIX "Please implement %s\n", __FUNCTION__); | 1204 | printk(KERN_INFO PREFIX "Please implement %s\n", __func__); |
1205 | seq_printf(seq, "<TODO>\n"); | 1205 | seq_printf(seq, "<TODO>\n"); |
1206 | 1206 | ||
1207 | end: | 1207 | end: |
diff --git a/drivers/acpi/wmi.c b/drivers/acpi/wmi.c index efacc9f8bfe3..c33b1c6e93b1 100644 --- a/drivers/acpi/wmi.c +++ b/drivers/acpi/wmi.c | |||
@@ -293,7 +293,7 @@ struct acpi_buffer *out) | |||
293 | { | 293 | { |
294 | struct guid_block *block = NULL; | 294 | struct guid_block *block = NULL; |
295 | struct wmi_block *wblock = NULL; | 295 | struct wmi_block *wblock = NULL; |
296 | acpi_handle handle; | 296 | acpi_handle handle, wc_handle; |
297 | acpi_status status, wc_status = AE_ERROR; | 297 | acpi_status status, wc_status = AE_ERROR; |
298 | struct acpi_object_list input, wc_input; | 298 | struct acpi_object_list input, wc_input; |
299 | union acpi_object wc_params[1], wq_params[1]; | 299 | union acpi_object wc_params[1], wq_params[1]; |
@@ -338,8 +338,10 @@ struct acpi_buffer *out) | |||
338 | * expensive, but have no corresponding WCxx method. So we | 338 | * expensive, but have no corresponding WCxx method. So we |
339 | * should not fail if this happens. | 339 | * should not fail if this happens. |
340 | */ | 340 | */ |
341 | wc_status = acpi_evaluate_object(handle, wc_method, | 341 | wc_status = acpi_get_handle(handle, wc_method, &wc_handle); |
342 | &wc_input, NULL); | 342 | if (ACPI_SUCCESS(wc_status)) |
343 | wc_status = acpi_evaluate_object(handle, wc_method, | ||
344 | &wc_input, NULL); | ||
343 | } | 345 | } |
344 | 346 | ||
345 | strcpy(method, "WQ"); | 347 | strcpy(method, "WQ"); |
@@ -351,7 +353,7 @@ struct acpi_buffer *out) | |||
351 | * If ACPI_WMI_EXPENSIVE, call the relevant WCxx method, even if | 353 | * If ACPI_WMI_EXPENSIVE, call the relevant WCxx method, even if |
352 | * the WQxx method failed - we should disable collection anyway. | 354 | * the WQxx method failed - we should disable collection anyway. |
353 | */ | 355 | */ |
354 | if ((block->flags & ACPI_WMI_EXPENSIVE) && wc_status) { | 356 | if ((block->flags & ACPI_WMI_EXPENSIVE) && ACPI_SUCCESS(wc_status)) { |
355 | wc_params[0].integer.value = 0; | 357 | wc_params[0].integer.value = 0; |
356 | status = acpi_evaluate_object(handle, | 358 | status = acpi_evaluate_object(handle, |
357 | wc_method, &wc_input, NULL); | 359 | wc_method, &wc_input, NULL); |
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index ba8f7f4dfa11..e469647330de 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig | |||
@@ -538,6 +538,15 @@ config PATA_RADISYS | |||
538 | 538 | ||
539 | If unsure, say N. | 539 | If unsure, say N. |
540 | 540 | ||
541 | config PATA_RB500 | ||
542 | tristate "RouterBoard 500 PATA CompactFlash support" | ||
543 | depends on MIKROTIK_RB500 | ||
544 | help | ||
545 | This option enables support for the RouterBoard 500 | ||
546 | PATA CompactFlash controller. | ||
547 | |||
548 | If unsure, say N. | ||
549 | |||
541 | config PATA_RZ1000 | 550 | config PATA_RZ1000 |
542 | tristate "PC Tech RZ1000 PATA support" | 551 | tristate "PC Tech RZ1000 PATA support" |
543 | depends on PCI | 552 | depends on PCI |
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index 701651e37c89..0511e6f0bb58 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile | |||
@@ -55,6 +55,7 @@ obj-$(CONFIG_PATA_PDC2027X) += pata_pdc2027x.o | |||
55 | obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o | 55 | obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o |
56 | obj-$(CONFIG_PATA_QDI) += pata_qdi.o | 56 | obj-$(CONFIG_PATA_QDI) += pata_qdi.o |
57 | obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o | 57 | obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o |
58 | obj-$(CONFIG_PATA_RB500) += pata_rb500_cf.o | ||
58 | obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o | 59 | obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o |
59 | obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o | 60 | obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o |
60 | obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o | 61 | obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 1db93b619074..17ee6ed985d9 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -49,6 +49,10 @@ | |||
49 | #define DRV_NAME "ahci" | 49 | #define DRV_NAME "ahci" |
50 | #define DRV_VERSION "3.0" | 50 | #define DRV_VERSION "3.0" |
51 | 51 | ||
52 | static int ahci_skip_host_reset; | ||
53 | module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444); | ||
54 | MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)"); | ||
55 | |||
52 | static int ahci_enable_alpm(struct ata_port *ap, | 56 | static int ahci_enable_alpm(struct ata_port *ap, |
53 | enum link_pm policy); | 57 | enum link_pm policy); |
54 | static void ahci_disable_alpm(struct ata_port *ap); | 58 | static void ahci_disable_alpm(struct ata_port *ap); |
@@ -186,6 +190,7 @@ enum { | |||
186 | AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */ | 190 | AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */ |
187 | AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ | 191 | AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ |
188 | AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */ | 192 | AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */ |
193 | AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ | ||
189 | 194 | ||
190 | /* ap->flags bits */ | 195 | /* ap->flags bits */ |
191 | 196 | ||
@@ -255,6 +260,7 @@ static void ahci_vt8251_error_handler(struct ata_port *ap); | |||
255 | static void ahci_p5wdh_error_handler(struct ata_port *ap); | 260 | static void ahci_p5wdh_error_handler(struct ata_port *ap); |
256 | static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); | 261 | static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); |
257 | static int ahci_port_resume(struct ata_port *ap); | 262 | static int ahci_port_resume(struct ata_port *ap); |
263 | static void ahci_dev_config(struct ata_device *dev); | ||
258 | static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl); | 264 | static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl); |
259 | static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, | 265 | static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, |
260 | u32 opts); | 266 | u32 opts); |
@@ -294,6 +300,8 @@ static const struct ata_port_operations ahci_ops = { | |||
294 | .check_altstatus = ahci_check_status, | 300 | .check_altstatus = ahci_check_status, |
295 | .dev_select = ata_noop_dev_select, | 301 | .dev_select = ata_noop_dev_select, |
296 | 302 | ||
303 | .dev_config = ahci_dev_config, | ||
304 | |||
297 | .tf_read = ahci_tf_read, | 305 | .tf_read = ahci_tf_read, |
298 | 306 | ||
299 | .qc_defer = sata_pmp_qc_defer_cmd_switch, | 307 | .qc_defer = sata_pmp_qc_defer_cmd_switch, |
@@ -425,7 +433,7 @@ static const struct ata_port_info ahci_port_info[] = { | |||
425 | /* board_ahci_sb600 */ | 433 | /* board_ahci_sb600 */ |
426 | { | 434 | { |
427 | AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | | 435 | AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | |
428 | AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_PMP), | 436 | AHCI_HFLAG_SECT255 | AHCI_HFLAG_NO_PMP), |
429 | .flags = AHCI_FLAG_COMMON, | 437 | .flags = AHCI_FLAG_COMMON, |
430 | .link_flags = AHCI_LFLAG_COMMON, | 438 | .link_flags = AHCI_LFLAG_COMMON, |
431 | .pio_mask = 0x1f, /* pio0-4 */ | 439 | .pio_mask = 0x1f, /* pio0-4 */ |
@@ -563,6 +571,18 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
563 | { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */ | 571 | { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */ |
564 | { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */ | 572 | { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */ |
565 | { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */ | 573 | { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */ |
574 | { PCI_VDEVICE(NVIDIA, 0x0bc8), board_ahci }, /* MCP7B */ | ||
575 | { PCI_VDEVICE(NVIDIA, 0x0bc9), board_ahci }, /* MCP7B */ | ||
576 | { PCI_VDEVICE(NVIDIA, 0x0bca), board_ahci }, /* MCP7B */ | ||
577 | { PCI_VDEVICE(NVIDIA, 0x0bcb), board_ahci }, /* MCP7B */ | ||
578 | { PCI_VDEVICE(NVIDIA, 0x0bcc), board_ahci }, /* MCP7B */ | ||
579 | { PCI_VDEVICE(NVIDIA, 0x0bcd), board_ahci }, /* MCP7B */ | ||
580 | { PCI_VDEVICE(NVIDIA, 0x0bce), board_ahci }, /* MCP7B */ | ||
581 | { PCI_VDEVICE(NVIDIA, 0x0bcf), board_ahci }, /* MCP7B */ | ||
582 | { PCI_VDEVICE(NVIDIA, 0x0bd0), board_ahci }, /* MCP7B */ | ||
583 | { PCI_VDEVICE(NVIDIA, 0x0bd1), board_ahci }, /* MCP7B */ | ||
584 | { PCI_VDEVICE(NVIDIA, 0x0bd2), board_ahci }, /* MCP7B */ | ||
585 | { PCI_VDEVICE(NVIDIA, 0x0bd3), board_ahci }, /* MCP7B */ | ||
566 | 586 | ||
567 | /* SiS */ | 587 | /* SiS */ |
568 | { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */ | 588 | { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */ |
@@ -571,6 +591,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
571 | 591 | ||
572 | /* Marvell */ | 592 | /* Marvell */ |
573 | { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ | 593 | { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ |
594 | { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */ | ||
574 | 595 | ||
575 | /* Generic, PCI class code for AHCI */ | 596 | /* Generic, PCI class code for AHCI */ |
576 | { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, | 597 | { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
@@ -645,6 +666,7 @@ static void ahci_save_initial_config(struct pci_dev *pdev, | |||
645 | void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR]; | 666 | void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR]; |
646 | u32 cap, port_map; | 667 | u32 cap, port_map; |
647 | int i; | 668 | int i; |
669 | int mv; | ||
648 | 670 | ||
649 | /* make sure AHCI mode is enabled before accessing CAP */ | 671 | /* make sure AHCI mode is enabled before accessing CAP */ |
650 | ahci_enable_ahci(mmio); | 672 | ahci_enable_ahci(mmio); |
@@ -668,7 +690,7 @@ static void ahci_save_initial_config(struct pci_dev *pdev, | |||
668 | cap &= ~HOST_CAP_NCQ; | 690 | cap &= ~HOST_CAP_NCQ; |
669 | } | 691 | } |
670 | 692 | ||
671 | if ((cap && HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) { | 693 | if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) { |
672 | dev_printk(KERN_INFO, &pdev->dev, | 694 | dev_printk(KERN_INFO, &pdev->dev, |
673 | "controller can't do PMP, turning off CAP_PMP\n"); | 695 | "controller can't do PMP, turning off CAP_PMP\n"); |
674 | cap &= ~HOST_CAP_PMP; | 696 | cap &= ~HOST_CAP_PMP; |
@@ -680,12 +702,16 @@ static void ahci_save_initial_config(struct pci_dev *pdev, | |||
680 | * presence register, as bit 4 (counting from 0) | 702 | * presence register, as bit 4 (counting from 0) |
681 | */ | 703 | */ |
682 | if (hpriv->flags & AHCI_HFLAG_MV_PATA) { | 704 | if (hpriv->flags & AHCI_HFLAG_MV_PATA) { |
705 | if (pdev->device == 0x6121) | ||
706 | mv = 0x3; | ||
707 | else | ||
708 | mv = 0xf; | ||
683 | dev_printk(KERN_ERR, &pdev->dev, | 709 | dev_printk(KERN_ERR, &pdev->dev, |
684 | "MV_AHCI HACK: port_map %x -> %x\n", | 710 | "MV_AHCI HACK: port_map %x -> %x\n", |
685 | hpriv->port_map, | 711 | port_map, |
686 | hpriv->port_map & 0xf); | 712 | port_map & mv); |
687 | 713 | ||
688 | port_map &= 0xf; | 714 | port_map &= mv; |
689 | } | 715 | } |
690 | 716 | ||
691 | /* cross check port_map and cap.n_ports */ | 717 | /* cross check port_map and cap.n_ports */ |
@@ -1072,29 +1098,35 @@ static int ahci_reset_controller(struct ata_host *host) | |||
1072 | ahci_enable_ahci(mmio); | 1098 | ahci_enable_ahci(mmio); |
1073 | 1099 | ||
1074 | /* global controller reset */ | 1100 | /* global controller reset */ |
1075 | tmp = readl(mmio + HOST_CTL); | 1101 | if (!ahci_skip_host_reset) { |
1076 | if ((tmp & HOST_RESET) == 0) { | 1102 | tmp = readl(mmio + HOST_CTL); |
1077 | writel(tmp | HOST_RESET, mmio + HOST_CTL); | 1103 | if ((tmp & HOST_RESET) == 0) { |
1078 | readl(mmio + HOST_CTL); /* flush */ | 1104 | writel(tmp | HOST_RESET, mmio + HOST_CTL); |
1079 | } | 1105 | readl(mmio + HOST_CTL); /* flush */ |
1106 | } | ||
1080 | 1107 | ||
1081 | /* reset must complete within 1 second, or | 1108 | /* reset must complete within 1 second, or |
1082 | * the hardware should be considered fried. | 1109 | * the hardware should be considered fried. |
1083 | */ | 1110 | */ |
1084 | ssleep(1); | 1111 | ssleep(1); |
1085 | 1112 | ||
1086 | tmp = readl(mmio + HOST_CTL); | 1113 | tmp = readl(mmio + HOST_CTL); |
1087 | if (tmp & HOST_RESET) { | 1114 | if (tmp & HOST_RESET) { |
1088 | dev_printk(KERN_ERR, host->dev, | 1115 | dev_printk(KERN_ERR, host->dev, |
1089 | "controller reset failed (0x%x)\n", tmp); | 1116 | "controller reset failed (0x%x)\n", tmp); |
1090 | return -EIO; | 1117 | return -EIO; |
1091 | } | 1118 | } |
1092 | 1119 | ||
1093 | /* turn on AHCI mode */ | 1120 | /* turn on AHCI mode */ |
1094 | ahci_enable_ahci(mmio); | 1121 | ahci_enable_ahci(mmio); |
1095 | 1122 | ||
1096 | /* some registers might be cleared on reset. restore initial values */ | 1123 | /* Some registers might be cleared on reset. Restore |
1097 | ahci_restore_initial_config(host); | 1124 | * initial values. |
1125 | */ | ||
1126 | ahci_restore_initial_config(host); | ||
1127 | } else | ||
1128 | dev_printk(KERN_INFO, host->dev, | ||
1129 | "skipping global host reset\n"); | ||
1098 | 1130 | ||
1099 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) { | 1131 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) { |
1100 | u16 tmp16; | 1132 | u16 tmp16; |
@@ -1146,9 +1178,14 @@ static void ahci_init_controller(struct ata_host *host) | |||
1146 | int i; | 1178 | int i; |
1147 | void __iomem *port_mmio; | 1179 | void __iomem *port_mmio; |
1148 | u32 tmp; | 1180 | u32 tmp; |
1181 | int mv; | ||
1149 | 1182 | ||
1150 | if (hpriv->flags & AHCI_HFLAG_MV_PATA) { | 1183 | if (hpriv->flags & AHCI_HFLAG_MV_PATA) { |
1151 | port_mmio = __ahci_port_base(host, 4); | 1184 | if (pdev->device == 0x6121) |
1185 | mv = 2; | ||
1186 | else | ||
1187 | mv = 4; | ||
1188 | port_mmio = __ahci_port_base(host, mv); | ||
1152 | 1189 | ||
1153 | writel(0, port_mmio + PORT_IRQ_MASK); | 1190 | writel(0, port_mmio + PORT_IRQ_MASK); |
1154 | 1191 | ||
@@ -1176,6 +1213,14 @@ static void ahci_init_controller(struct ata_host *host) | |||
1176 | VPRINTK("HOST_CTL 0x%x\n", tmp); | 1213 | VPRINTK("HOST_CTL 0x%x\n", tmp); |
1177 | } | 1214 | } |
1178 | 1215 | ||
1216 | static void ahci_dev_config(struct ata_device *dev) | ||
1217 | { | ||
1218 | struct ahci_host_priv *hpriv = dev->link->ap->host->private_data; | ||
1219 | |||
1220 | if (hpriv->flags & AHCI_HFLAG_SECT255) | ||
1221 | dev->max_sectors = 255; | ||
1222 | } | ||
1223 | |||
1179 | static unsigned int ahci_dev_classify(struct ata_port *ap) | 1224 | static unsigned int ahci_dev_classify(struct ata_port *ap) |
1180 | { | 1225 | { |
1181 | void __iomem *port_mmio = ahci_port_base(ap); | 1226 | void __iomem *port_mmio = ahci_port_base(ap); |
@@ -2217,7 +2262,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2217 | if (rc) | 2262 | if (rc) |
2218 | return rc; | 2263 | return rc; |
2219 | 2264 | ||
2220 | rc = pcim_iomap_regions(pdev, 1 << AHCI_PCI_BAR, DRV_NAME); | 2265 | /* AHCI controllers often implement SFF compatible interface. |
2266 | * Grab all PCI BARs just in case. | ||
2267 | */ | ||
2268 | rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME); | ||
2221 | if (rc == -EBUSY) | 2269 | if (rc == -EBUSY) |
2222 | pcim_pin_device(pdev); | 2270 | pcim_pin_device(pdev); |
2223 | if (rc) | 2271 | if (rc) |
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index 9e8ec19260af..bf98a566adac 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c | |||
@@ -118,45 +118,77 @@ static void ata_acpi_associate_ide_port(struct ata_port *ap) | |||
118 | ap->pflags |= ATA_PFLAG_INIT_GTM_VALID; | 118 | ap->pflags |= ATA_PFLAG_INIT_GTM_VALID; |
119 | } | 119 | } |
120 | 120 | ||
121 | static void ata_acpi_handle_hotplug(struct ata_port *ap, struct kobject *kobj, | 121 | static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev, |
122 | u32 event) | 122 | u32 event) |
123 | { | 123 | { |
124 | char event_string[12]; | 124 | char event_string[12]; |
125 | char *envp[] = { event_string, NULL }; | 125 | char *envp[] = { event_string, NULL }; |
126 | struct ata_eh_info *ehi = &ap->link.eh_info; | 126 | struct ata_eh_info *ehi; |
127 | 127 | struct kobject *kobj = NULL; | |
128 | if (event == 0 || event == 1) { | 128 | int wait = 0; |
129 | unsigned long flags; | 129 | unsigned long flags; |
130 | spin_lock_irqsave(ap->lock, flags); | 130 | |
131 | ata_ehi_clear_desc(ehi); | 131 | if (!ap) |
132 | ata_ehi_push_desc(ehi, "ACPI event"); | 132 | ap = dev->link->ap; |
133 | ata_ehi_hotplugged(ehi); | 133 | ehi = &ap->link.eh_info; |
134 | ata_port_freeze(ap); | 134 | |
135 | spin_unlock_irqrestore(ap->lock, flags); | 135 | spin_lock_irqsave(ap->lock, flags); |
136 | |||
137 | switch (event) { | ||
138 | case ACPI_NOTIFY_BUS_CHECK: | ||
139 | case ACPI_NOTIFY_DEVICE_CHECK: | ||
140 | ata_ehi_push_desc(ehi, "ACPI event"); | ||
141 | ata_ehi_hotplugged(ehi); | ||
142 | ata_port_freeze(ap); | ||
143 | break; | ||
144 | |||
145 | case ACPI_NOTIFY_EJECT_REQUEST: | ||
146 | ata_ehi_push_desc(ehi, "ACPI event"); | ||
147 | if (dev) | ||
148 | dev->flags |= ATA_DFLAG_DETACH; | ||
149 | else { | ||
150 | struct ata_link *tlink; | ||
151 | struct ata_device *tdev; | ||
152 | |||
153 | ata_port_for_each_link(tlink, ap) | ||
154 | ata_link_for_each_dev(tdev, tlink) | ||
155 | tdev->flags |= ATA_DFLAG_DETACH; | ||
156 | } | ||
157 | |||
158 | ata_port_schedule_eh(ap); | ||
159 | wait = 1; | ||
160 | break; | ||
136 | } | 161 | } |
137 | 162 | ||
163 | if (dev) { | ||
164 | if (dev->sdev) | ||
165 | kobj = &dev->sdev->sdev_gendev.kobj; | ||
166 | } else | ||
167 | kobj = &ap->dev->kobj; | ||
168 | |||
138 | if (kobj) { | 169 | if (kobj) { |
139 | sprintf(event_string, "BAY_EVENT=%d", event); | 170 | sprintf(event_string, "BAY_EVENT=%d", event); |
140 | kobject_uevent_env(kobj, KOBJ_CHANGE, envp); | 171 | kobject_uevent_env(kobj, KOBJ_CHANGE, envp); |
141 | } | 172 | } |
173 | |||
174 | spin_unlock_irqrestore(ap->lock, flags); | ||
175 | |||
176 | if (wait) | ||
177 | ata_port_wait_eh(ap); | ||
142 | } | 178 | } |
143 | 179 | ||
144 | static void ata_acpi_dev_notify(acpi_handle handle, u32 event, void *data) | 180 | static void ata_acpi_dev_notify(acpi_handle handle, u32 event, void *data) |
145 | { | 181 | { |
146 | struct ata_device *dev = data; | 182 | struct ata_device *dev = data; |
147 | struct kobject *kobj = NULL; | ||
148 | 183 | ||
149 | if (dev->sdev) | 184 | ata_acpi_handle_hotplug(NULL, dev, event); |
150 | kobj = &dev->sdev->sdev_gendev.kobj; | ||
151 | |||
152 | ata_acpi_handle_hotplug(dev->link->ap, kobj, event); | ||
153 | } | 185 | } |
154 | 186 | ||
155 | static void ata_acpi_ap_notify(acpi_handle handle, u32 event, void *data) | 187 | static void ata_acpi_ap_notify(acpi_handle handle, u32 event, void *data) |
156 | { | 188 | { |
157 | struct ata_port *ap = data; | 189 | struct ata_port *ap = data; |
158 | 190 | ||
159 | ata_acpi_handle_hotplug(ap, &ap->dev->kobj, event); | 191 | ata_acpi_handle_hotplug(ap, NULL, event); |
160 | } | 192 | } |
161 | 193 | ||
162 | /** | 194 | /** |
@@ -191,20 +223,30 @@ void ata_acpi_associate(struct ata_host *host) | |||
191 | else | 223 | else |
192 | ata_acpi_associate_ide_port(ap); | 224 | ata_acpi_associate_ide_port(ap); |
193 | 225 | ||
194 | if (ap->acpi_handle) | 226 | if (ap->acpi_handle) { |
195 | acpi_install_notify_handler (ap->acpi_handle, | 227 | acpi_install_notify_handler(ap->acpi_handle, |
196 | ACPI_SYSTEM_NOTIFY, | 228 | ACPI_SYSTEM_NOTIFY, |
197 | ata_acpi_ap_notify, | 229 | ata_acpi_ap_notify, ap); |
198 | ap); | 230 | #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE) |
231 | /* we might be on a docking station */ | ||
232 | register_hotplug_dock_device(ap->acpi_handle, | ||
233 | ata_acpi_ap_notify, ap); | ||
234 | #endif | ||
235 | } | ||
199 | 236 | ||
200 | for (j = 0; j < ata_link_max_devices(&ap->link); j++) { | 237 | for (j = 0; j < ata_link_max_devices(&ap->link); j++) { |
201 | struct ata_device *dev = &ap->link.device[j]; | 238 | struct ata_device *dev = &ap->link.device[j]; |
202 | 239 | ||
203 | if (dev->acpi_handle) | 240 | if (dev->acpi_handle) { |
204 | acpi_install_notify_handler (dev->acpi_handle, | 241 | acpi_install_notify_handler(dev->acpi_handle, |
205 | ACPI_SYSTEM_NOTIFY, | 242 | ACPI_SYSTEM_NOTIFY, |
206 | ata_acpi_dev_notify, | 243 | ata_acpi_dev_notify, dev); |
207 | dev); | 244 | #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE) |
245 | /* we might be on a docking station */ | ||
246 | register_hotplug_dock_device(dev->acpi_handle, | ||
247 | ata_acpi_dev_notify, dev); | ||
248 | #endif | ||
249 | } | ||
208 | } | 250 | } |
209 | } | 251 | } |
210 | } | 252 | } |
@@ -382,7 +424,7 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf) | |||
382 | 424 | ||
383 | if (ata_msg_probe(ap)) | 425 | if (ata_msg_probe(ap)) |
384 | ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER: port#: %d\n", | 426 | ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER: port#: %d\n", |
385 | __FUNCTION__, ap->port_no); | 427 | __func__, ap->port_no); |
386 | 428 | ||
387 | /* _GTF has no input parameters */ | 429 | /* _GTF has no input parameters */ |
388 | status = acpi_evaluate_object(dev->acpi_handle, "_GTF", NULL, &output); | 430 | status = acpi_evaluate_object(dev->acpi_handle, "_GTF", NULL, &output); |
@@ -402,7 +444,7 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf) | |||
402 | if (ata_msg_probe(ap)) | 444 | if (ata_msg_probe(ap)) |
403 | ata_dev_printk(dev, KERN_DEBUG, "%s: Run _GTF: " | 445 | ata_dev_printk(dev, KERN_DEBUG, "%s: Run _GTF: " |
404 | "length or ptr is NULL (0x%llx, 0x%p)\n", | 446 | "length or ptr is NULL (0x%llx, 0x%p)\n", |
405 | __FUNCTION__, | 447 | __func__, |
406 | (unsigned long long)output.length, | 448 | (unsigned long long)output.length, |
407 | output.pointer); | 449 | output.pointer); |
408 | rc = -EINVAL; | 450 | rc = -EINVAL; |
@@ -432,7 +474,7 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf) | |||
432 | if (ata_msg_probe(ap)) | 474 | if (ata_msg_probe(ap)) |
433 | ata_dev_printk(dev, KERN_DEBUG, | 475 | ata_dev_printk(dev, KERN_DEBUG, |
434 | "%s: returning gtf=%p, gtf_count=%d\n", | 476 | "%s: returning gtf=%p, gtf_count=%d\n", |
435 | __FUNCTION__, *gtf, rc); | 477 | __func__, *gtf, rc); |
436 | } | 478 | } |
437 | return rc; | 479 | return rc; |
438 | 480 | ||
@@ -725,7 +767,7 @@ static int ata_acpi_push_id(struct ata_device *dev) | |||
725 | 767 | ||
726 | if (ata_msg_probe(ap)) | 768 | if (ata_msg_probe(ap)) |
727 | ata_dev_printk(dev, KERN_DEBUG, "%s: ix = %d, port#: %d\n", | 769 | ata_dev_printk(dev, KERN_DEBUG, "%s: ix = %d, port#: %d\n", |
728 | __FUNCTION__, dev->devno, ap->port_no); | 770 | __func__, dev->devno, ap->port_no); |
729 | 771 | ||
730 | /* Give the drive Identify data to the drive via the _SDD method */ | 772 | /* Give the drive Identify data to the drive via the _SDD method */ |
731 | /* _SDD: set up input parameters */ | 773 | /* _SDD: set up input parameters */ |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index fbc24358ada0..4bbe31f98ef8 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -106,14 +106,15 @@ static struct ata_force_ent *ata_force_tbl; | |||
106 | static int ata_force_tbl_size; | 106 | static int ata_force_tbl_size; |
107 | 107 | ||
108 | static char ata_force_param_buf[PAGE_SIZE] __initdata; | 108 | static char ata_force_param_buf[PAGE_SIZE] __initdata; |
109 | module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0444); | 109 | /* param_buf is thrown away after initialization, disallow read */ |
110 | module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0); | ||
110 | MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)"); | 111 | MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)"); |
111 | 112 | ||
112 | int atapi_enabled = 1; | 113 | int atapi_enabled = 1; |
113 | module_param(atapi_enabled, int, 0444); | 114 | module_param(atapi_enabled, int, 0444); |
114 | MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); | 115 | MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); |
115 | 116 | ||
116 | int atapi_dmadir = 0; | 117 | static int atapi_dmadir = 0; |
117 | module_param(atapi_dmadir, int, 0444); | 118 | module_param(atapi_dmadir, int, 0444); |
118 | MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)"); | 119 | MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)"); |
119 | 120 | ||
@@ -1719,7 +1720,7 @@ void ata_port_flush_task(struct ata_port *ap) | |||
1719 | cancel_rearming_delayed_work(&ap->port_task); | 1720 | cancel_rearming_delayed_work(&ap->port_task); |
1720 | 1721 | ||
1721 | if (ata_msg_ctl(ap)) | 1722 | if (ata_msg_ctl(ap)) |
1722 | ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__); | 1723 | ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__); |
1723 | } | 1724 | } |
1724 | 1725 | ||
1725 | static void ata_qc_complete_internal(struct ata_queued_cmd *qc) | 1726 | static void ata_qc_complete_internal(struct ata_queued_cmd *qc) |
@@ -2056,7 +2057,7 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, | |||
2056 | int rc; | 2057 | int rc; |
2057 | 2058 | ||
2058 | if (ata_msg_ctl(ap)) | 2059 | if (ata_msg_ctl(ap)) |
2059 | ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__); | 2060 | ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__); |
2060 | 2061 | ||
2061 | ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */ | 2062 | ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */ |
2062 | retry: | 2063 | retry: |
@@ -2253,12 +2254,12 @@ int ata_dev_configure(struct ata_device *dev) | |||
2253 | 2254 | ||
2254 | if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { | 2255 | if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { |
2255 | ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n", | 2256 | ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n", |
2256 | __FUNCTION__); | 2257 | __func__); |
2257 | return 0; | 2258 | return 0; |
2258 | } | 2259 | } |
2259 | 2260 | ||
2260 | if (ata_msg_probe(ap)) | 2261 | if (ata_msg_probe(ap)) |
2261 | ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__); | 2262 | ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__); |
2262 | 2263 | ||
2263 | /* set horkage */ | 2264 | /* set horkage */ |
2264 | dev->horkage |= ata_dev_blacklisted(dev); | 2265 | dev->horkage |= ata_dev_blacklisted(dev); |
@@ -2279,7 +2280,7 @@ int ata_dev_configure(struct ata_device *dev) | |||
2279 | ata_dev_printk(dev, KERN_DEBUG, | 2280 | ata_dev_printk(dev, KERN_DEBUG, |
2280 | "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " | 2281 | "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " |
2281 | "85:%04x 86:%04x 87:%04x 88:%04x\n", | 2282 | "85:%04x 86:%04x 87:%04x 88:%04x\n", |
2282 | __FUNCTION__, | 2283 | __func__, |
2283 | id[49], id[82], id[83], id[84], | 2284 | id[49], id[82], id[83], id[84], |
2284 | id[85], id[86], id[87], id[88]); | 2285 | id[85], id[86], id[87], id[88]); |
2285 | 2286 | ||
@@ -2511,13 +2512,13 @@ int ata_dev_configure(struct ata_device *dev) | |||
2511 | 2512 | ||
2512 | if (ata_msg_probe(ap)) | 2513 | if (ata_msg_probe(ap)) |
2513 | ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n", | 2514 | ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n", |
2514 | __FUNCTION__, ata_chk_status(ap)); | 2515 | __func__, ata_chk_status(ap)); |
2515 | return 0; | 2516 | return 0; |
2516 | 2517 | ||
2517 | err_out_nosup: | 2518 | err_out_nosup: |
2518 | if (ata_msg_probe(ap)) | 2519 | if (ata_msg_probe(ap)) |
2519 | ata_dev_printk(dev, KERN_DEBUG, | 2520 | ata_dev_printk(dev, KERN_DEBUG, |
2520 | "%s: EXIT, err\n", __FUNCTION__); | 2521 | "%s: EXIT, err\n", __func__); |
2521 | return rc; | 2522 | return rc; |
2522 | } | 2523 | } |
2523 | 2524 | ||
@@ -6567,6 +6568,8 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg) | |||
6567 | ata_lpm_enable(host); | 6568 | ata_lpm_enable(host); |
6568 | 6569 | ||
6569 | rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); | 6570 | rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); |
6571 | if (rc == 0) | ||
6572 | host->dev->power.power_state = mesg; | ||
6570 | return rc; | 6573 | return rc; |
6571 | } | 6574 | } |
6572 | 6575 | ||
@@ -6585,6 +6588,7 @@ void ata_host_resume(struct ata_host *host) | |||
6585 | { | 6588 | { |
6586 | ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET, | 6589 | ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET, |
6587 | ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); | 6590 | ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); |
6591 | host->dev->power.power_state = PMSG_ON; | ||
6588 | 6592 | ||
6589 | /* reenable link pm */ | 6593 | /* reenable link pm */ |
6590 | ata_lpm_disable(host); | 6594 | ata_lpm_disable(host); |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 698ce2cea52c..681252fd8143 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -2150,6 +2150,15 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2150 | ap->ops->set_piomode(ap, dev); | 2150 | ap->ops->set_piomode(ap, dev); |
2151 | } | 2151 | } |
2152 | 2152 | ||
2153 | if (!softreset && !hardreset) { | ||
2154 | if (verbose) | ||
2155 | ata_link_printk(link, KERN_INFO, "no reset method " | ||
2156 | "available, skipping reset\n"); | ||
2157 | if (!(lflags & ATA_LFLAG_ASSUME_CLASS)) | ||
2158 | lflags |= ATA_LFLAG_ASSUME_ATA; | ||
2159 | goto done; | ||
2160 | } | ||
2161 | |||
2153 | /* Determine which reset to use and record in ehc->i.action. | 2162 | /* Determine which reset to use and record in ehc->i.action. |
2154 | * prereset() may examine and modify it. | 2163 | * prereset() may examine and modify it. |
2155 | */ | 2164 | */ |
@@ -2254,6 +2263,7 @@ int ata_eh_reset(struct ata_link *link, int classify, | |||
2254 | lflags |= ATA_LFLAG_ASSUME_ATA; | 2263 | lflags |= ATA_LFLAG_ASSUME_ATA; |
2255 | } | 2264 | } |
2256 | 2265 | ||
2266 | done: | ||
2257 | ata_link_for_each_dev(dev, link) { | 2267 | ata_link_for_each_dev(dev, link) { |
2258 | /* After the reset, the device state is PIO 0 and the | 2268 | /* After the reset, the device state is PIO 0 and the |
2259 | * controller state is undefined. Reset also wakes up | 2269 | * controller state is undefined. Reset also wakes up |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 0562b0a49f3b..8f0e8f2bc628 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -862,9 +862,10 @@ static int ata_scsi_dev_config(struct scsi_device *sdev, | |||
862 | struct request_queue *q = sdev->request_queue; | 862 | struct request_queue *q = sdev->request_queue; |
863 | void *buf; | 863 | void *buf; |
864 | 864 | ||
865 | /* set the min alignment */ | 865 | /* set the min alignment and padding */ |
866 | blk_queue_update_dma_alignment(sdev->request_queue, | 866 | blk_queue_update_dma_alignment(sdev->request_queue, |
867 | ATA_DMA_PAD_SZ - 1); | 867 | ATA_DMA_PAD_SZ - 1); |
868 | blk_queue_dma_pad(sdev->request_queue, ATA_DMA_PAD_SZ - 1); | ||
868 | 869 | ||
869 | /* configure draining */ | 870 | /* configure draining */ |
870 | buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL); | 871 | buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL); |
@@ -1694,12 +1695,17 @@ void ata_scsi_rbuf_fill(struct ata_scsi_args *args, | |||
1694 | u8 *rbuf; | 1695 | u8 *rbuf; |
1695 | unsigned int buflen, rc; | 1696 | unsigned int buflen, rc; |
1696 | struct scsi_cmnd *cmd = args->cmd; | 1697 | struct scsi_cmnd *cmd = args->cmd; |
1698 | unsigned long flags; | ||
1699 | |||
1700 | local_irq_save(flags); | ||
1697 | 1701 | ||
1698 | buflen = ata_scsi_rbuf_get(cmd, &rbuf); | 1702 | buflen = ata_scsi_rbuf_get(cmd, &rbuf); |
1699 | memset(rbuf, 0, buflen); | 1703 | memset(rbuf, 0, buflen); |
1700 | rc = actor(args, rbuf, buflen); | 1704 | rc = actor(args, rbuf, buflen); |
1701 | ata_scsi_rbuf_put(cmd, rbuf); | 1705 | ata_scsi_rbuf_put(cmd, rbuf); |
1702 | 1706 | ||
1707 | local_irq_restore(flags); | ||
1708 | |||
1703 | if (rc == 0) | 1709 | if (rc == 0) |
1704 | cmd->result = SAM_STAT_GOOD; | 1710 | cmd->result = SAM_STAT_GOOD; |
1705 | args->done(cmd); | 1711 | args->done(cmd); |
@@ -2473,6 +2479,9 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) | |||
2473 | if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) { | 2479 | if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) { |
2474 | u8 *buf = NULL; | 2480 | u8 *buf = NULL; |
2475 | unsigned int buflen; | 2481 | unsigned int buflen; |
2482 | unsigned long flags; | ||
2483 | |||
2484 | local_irq_save(flags); | ||
2476 | 2485 | ||
2477 | buflen = ata_scsi_rbuf_get(cmd, &buf); | 2486 | buflen = ata_scsi_rbuf_get(cmd, &buf); |
2478 | 2487 | ||
@@ -2490,6 +2499,8 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) | |||
2490 | } | 2499 | } |
2491 | 2500 | ||
2492 | ata_scsi_rbuf_put(cmd, buf); | 2501 | ata_scsi_rbuf_put(cmd, buf); |
2502 | |||
2503 | local_irq_restore(flags); | ||
2493 | } | 2504 | } |
2494 | 2505 | ||
2495 | cmd->result = SAM_STAT_GOOD; | 2506 | cmd->result = SAM_STAT_GOOD; |
@@ -2528,7 +2539,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) | |||
2528 | } | 2539 | } |
2529 | 2540 | ||
2530 | qc->tf.command = ATA_CMD_PACKET; | 2541 | qc->tf.command = ATA_CMD_PACKET; |
2531 | qc->nbytes = scsi_bufflen(scmd); | 2542 | qc->nbytes = scsi_bufflen(scmd) + scmd->request->extra_len; |
2532 | 2543 | ||
2533 | /* check whether ATAPI DMA is safe */ | 2544 | /* check whether ATAPI DMA is safe */ |
2534 | if (!using_pio && ata_check_atapi_dma(qc)) | 2545 | if (!using_pio && ata_check_atapi_dma(qc)) |
@@ -2539,7 +2550,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) | |||
2539 | * want to set it properly, and for DMA where it is | 2550 | * want to set it properly, and for DMA where it is |
2540 | * effectively meaningless. | 2551 | * effectively meaningless. |
2541 | */ | 2552 | */ |
2542 | nbytes = min(scmd->request->raw_data_len, (unsigned int)63 * 1024); | 2553 | nbytes = min(scmd->request->data_len, (unsigned int)63 * 1024); |
2543 | 2554 | ||
2544 | /* Most ATAPI devices which honor transfer chunk size don't | 2555 | /* Most ATAPI devices which honor transfer chunk size don't |
2545 | * behave according to the spec when odd chunk size which | 2556 | * behave according to the spec when odd chunk size which |
@@ -2865,7 +2876,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) | |||
2865 | * TODO: find out if we need to do more here to | 2876 | * TODO: find out if we need to do more here to |
2866 | * cover scatter/gather case. | 2877 | * cover scatter/gather case. |
2867 | */ | 2878 | */ |
2868 | qc->nbytes = scsi_bufflen(scmd); | 2879 | qc->nbytes = scsi_bufflen(scmd) + scmd->request->extra_len; |
2869 | 2880 | ||
2870 | /* request result TF and be quiet about device error */ | 2881 | /* request result TF and be quiet about device error */ |
2871 | qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; | 2882 | qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; |
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 60cd4b179766..20dc572fb45a 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -56,7 +56,8 @@ u8 ata_irq_on(struct ata_port *ap) | |||
56 | ap->ctl &= ~ATA_NIEN; | 56 | ap->ctl &= ~ATA_NIEN; |
57 | ap->last_ctl = ap->ctl; | 57 | ap->last_ctl = ap->ctl; |
58 | 58 | ||
59 | iowrite8(ap->ctl, ioaddr->ctl_addr); | 59 | if (ioaddr->ctl_addr) |
60 | iowrite8(ap->ctl, ioaddr->ctl_addr); | ||
60 | tmp = ata_wait_idle(ap); | 61 | tmp = ata_wait_idle(ap); |
61 | 62 | ||
62 | ap->ops->irq_clear(ap); | 63 | ap->ops->irq_clear(ap); |
@@ -81,12 +82,14 @@ void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) | |||
81 | unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; | 82 | unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; |
82 | 83 | ||
83 | if (tf->ctl != ap->last_ctl) { | 84 | if (tf->ctl != ap->last_ctl) { |
84 | iowrite8(tf->ctl, ioaddr->ctl_addr); | 85 | if (ioaddr->ctl_addr) |
86 | iowrite8(tf->ctl, ioaddr->ctl_addr); | ||
85 | ap->last_ctl = tf->ctl; | 87 | ap->last_ctl = tf->ctl; |
86 | ata_wait_idle(ap); | 88 | ata_wait_idle(ap); |
87 | } | 89 | } |
88 | 90 | ||
89 | if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { | 91 | if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { |
92 | WARN_ON(!ioaddr->ctl_addr); | ||
90 | iowrite8(tf->hob_feature, ioaddr->feature_addr); | 93 | iowrite8(tf->hob_feature, ioaddr->feature_addr); |
91 | iowrite8(tf->hob_nsect, ioaddr->nsect_addr); | 94 | iowrite8(tf->hob_nsect, ioaddr->nsect_addr); |
92 | iowrite8(tf->hob_lbal, ioaddr->lbal_addr); | 95 | iowrite8(tf->hob_lbal, ioaddr->lbal_addr); |
@@ -167,14 +170,17 @@ void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) | |||
167 | tf->device = ioread8(ioaddr->device_addr); | 170 | tf->device = ioread8(ioaddr->device_addr); |
168 | 171 | ||
169 | if (tf->flags & ATA_TFLAG_LBA48) { | 172 | if (tf->flags & ATA_TFLAG_LBA48) { |
170 | iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr); | 173 | if (likely(ioaddr->ctl_addr)) { |
171 | tf->hob_feature = ioread8(ioaddr->error_addr); | 174 | iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr); |
172 | tf->hob_nsect = ioread8(ioaddr->nsect_addr); | 175 | tf->hob_feature = ioread8(ioaddr->error_addr); |
173 | tf->hob_lbal = ioread8(ioaddr->lbal_addr); | 176 | tf->hob_nsect = ioread8(ioaddr->nsect_addr); |
174 | tf->hob_lbam = ioread8(ioaddr->lbam_addr); | 177 | tf->hob_lbal = ioread8(ioaddr->lbal_addr); |
175 | tf->hob_lbah = ioread8(ioaddr->lbah_addr); | 178 | tf->hob_lbam = ioread8(ioaddr->lbam_addr); |
176 | iowrite8(tf->ctl, ioaddr->ctl_addr); | 179 | tf->hob_lbah = ioread8(ioaddr->lbah_addr); |
177 | ap->last_ctl = tf->ctl; | 180 | iowrite8(tf->ctl, ioaddr->ctl_addr); |
181 | ap->last_ctl = tf->ctl; | ||
182 | } else | ||
183 | WARN_ON(1); | ||
178 | } | 184 | } |
179 | } | 185 | } |
180 | 186 | ||
@@ -352,7 +358,8 @@ void ata_bmdma_freeze(struct ata_port *ap) | |||
352 | ap->ctl |= ATA_NIEN; | 358 | ap->ctl |= ATA_NIEN; |
353 | ap->last_ctl = ap->ctl; | 359 | ap->last_ctl = ap->ctl; |
354 | 360 | ||
355 | iowrite8(ap->ctl, ioaddr->ctl_addr); | 361 | if (ioaddr->ctl_addr) |
362 | iowrite8(ap->ctl, ioaddr->ctl_addr); | ||
356 | 363 | ||
357 | /* Under certain circumstances, some controllers raise IRQ on | 364 | /* Under certain circumstances, some controllers raise IRQ on |
358 | * ATA_NIEN manipulation. Also, many controllers fail to mask | 365 | * ATA_NIEN manipulation. Also, many controllers fail to mask |
@@ -459,13 +466,14 @@ void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset, | |||
459 | */ | 466 | */ |
460 | void ata_bmdma_error_handler(struct ata_port *ap) | 467 | void ata_bmdma_error_handler(struct ata_port *ap) |
461 | { | 468 | { |
462 | ata_reset_fn_t hardreset; | 469 | ata_reset_fn_t softreset = NULL, hardreset = NULL; |
463 | 470 | ||
464 | hardreset = NULL; | 471 | if (ap->ioaddr.ctl_addr) |
472 | softreset = ata_std_softreset; | ||
465 | if (sata_scr_valid(&ap->link)) | 473 | if (sata_scr_valid(&ap->link)) |
466 | hardreset = sata_std_hardreset; | 474 | hardreset = sata_std_hardreset; |
467 | 475 | ||
468 | ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, hardreset, | 476 | ata_bmdma_drive_eh(ap, ata_std_prereset, softreset, hardreset, |
469 | ata_std_postreset); | 477 | ata_std_postreset); |
470 | } | 478 | } |
471 | 479 | ||
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 6036dedfe377..aa884f71a12a 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h | |||
@@ -56,7 +56,6 @@ enum { | |||
56 | extern unsigned int ata_print_id; | 56 | extern unsigned int ata_print_id; |
57 | extern struct workqueue_struct *ata_aux_wq; | 57 | extern struct workqueue_struct *ata_aux_wq; |
58 | extern int atapi_enabled; | 58 | extern int atapi_enabled; |
59 | extern int atapi_dmadir; | ||
60 | extern int atapi_passthru16; | 59 | extern int atapi_passthru16; |
61 | extern int libata_fua; | 60 | extern int libata_fua; |
62 | extern int libata_noacpi; | 61 | extern int libata_noacpi; |
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c index 7e68edf3c0f3..8786455c901d 100644 --- a/drivers/ata/pata_ali.c +++ b/drivers/ata/pata_ali.c | |||
@@ -295,7 +295,7 @@ static void ali_lock_sectors(struct ata_device *adev) | |||
295 | static int ali_check_atapi_dma(struct ata_queued_cmd *qc) | 295 | static int ali_check_atapi_dma(struct ata_queued_cmd *qc) |
296 | { | 296 | { |
297 | /* If its not a media command, its not worth it */ | 297 | /* If its not a media command, its not worth it */ |
298 | if (qc->nbytes < 2048) | 298 | if (atapi_cmd_type(qc->cdb[0]) == ATAPI_MISC) |
299 | return -EOPNOTSUPP; | 299 | return -EOPNOTSUPP; |
300 | return 0; | 300 | return 0; |
301 | } | 301 | } |
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c index 0713872cf65c..a742efa0da2b 100644 --- a/drivers/ata/pata_hpt366.c +++ b/drivers/ata/pata_hpt366.c | |||
@@ -27,7 +27,7 @@ | |||
27 | #include <linux/libata.h> | 27 | #include <linux/libata.h> |
28 | 28 | ||
29 | #define DRV_NAME "pata_hpt366" | 29 | #define DRV_NAME "pata_hpt366" |
30 | #define DRV_VERSION "0.6.1" | 30 | #define DRV_VERSION "0.6.2" |
31 | 31 | ||
32 | struct hpt_clock { | 32 | struct hpt_clock { |
33 | u8 xfer_speed; | 33 | u8 xfer_speed; |
@@ -180,9 +180,9 @@ static unsigned long hpt366_filter(struct ata_device *adev, unsigned long mask) | |||
180 | if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33)) | 180 | if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33)) |
181 | mask &= ~ATA_MASK_UDMA; | 181 | mask &= ~ATA_MASK_UDMA; |
182 | if (hpt_dma_blacklisted(adev, "UDMA3", bad_ata66_3)) | 182 | if (hpt_dma_blacklisted(adev, "UDMA3", bad_ata66_3)) |
183 | mask &= ~(0x07 << ATA_SHIFT_UDMA); | 183 | mask &= ~(0xF8 << ATA_SHIFT_UDMA); |
184 | if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4)) | 184 | if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4)) |
185 | mask &= ~(0x0F << ATA_SHIFT_UDMA); | 185 | mask &= ~(0xF0 << ATA_SHIFT_UDMA); |
186 | } | 186 | } |
187 | return ata_pci_default_filter(adev, mask); | 187 | return ata_pci_default_filter(adev, mask); |
188 | } | 188 | } |
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index 68eb34929cec..9a10878b2ad8 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c | |||
@@ -24,7 +24,7 @@ | |||
24 | #include <linux/libata.h> | 24 | #include <linux/libata.h> |
25 | 25 | ||
26 | #define DRV_NAME "pata_hpt37x" | 26 | #define DRV_NAME "pata_hpt37x" |
27 | #define DRV_VERSION "0.6.9" | 27 | #define DRV_VERSION "0.6.11" |
28 | 28 | ||
29 | struct hpt_clock { | 29 | struct hpt_clock { |
30 | u8 xfer_speed; | 30 | u8 xfer_speed; |
@@ -281,7 +281,7 @@ static unsigned long hpt370_filter(struct ata_device *adev, unsigned long mask) | |||
281 | if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33)) | 281 | if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33)) |
282 | mask &= ~ATA_MASK_UDMA; | 282 | mask &= ~ATA_MASK_UDMA; |
283 | if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) | 283 | if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) |
284 | mask &= ~(0x1F << ATA_SHIFT_UDMA); | 284 | mask &= ~(0xE0 << ATA_SHIFT_UDMA); |
285 | } | 285 | } |
286 | return ata_pci_default_filter(adev, mask); | 286 | return ata_pci_default_filter(adev, mask); |
287 | } | 287 | } |
@@ -297,7 +297,7 @@ static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask) | |||
297 | { | 297 | { |
298 | if (adev->class == ATA_DEV_ATA) { | 298 | if (adev->class == ATA_DEV_ATA) { |
299 | if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) | 299 | if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) |
300 | mask &= ~ (0x1F << ATA_SHIFT_UDMA); | 300 | mask &= ~(0xE0 << ATA_SHIFT_UDMA); |
301 | } | 301 | } |
302 | return ata_pci_default_filter(adev, mask); | 302 | return ata_pci_default_filter(adev, mask); |
303 | } | 303 | } |
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c index 028af5dbeed6..511c89b9bae8 100644 --- a/drivers/ata/pata_pdc2027x.c +++ b/drivers/ata/pata_pdc2027x.c | |||
@@ -39,7 +39,7 @@ | |||
39 | #undef PDC_DEBUG | 39 | #undef PDC_DEBUG |
40 | 40 | ||
41 | #ifdef PDC_DEBUG | 41 | #ifdef PDC_DEBUG |
42 | #define PDPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) | 42 | #define PDPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) |
43 | #else | 43 | #else |
44 | #define PDPRINTK(fmt, args...) | 44 | #define PDPRINTK(fmt, args...) |
45 | #endif | 45 | #endif |
diff --git a/drivers/ata/pata_rb500_cf.c b/drivers/ata/pata_rb500_cf.c new file mode 100644 index 000000000000..4ce9b03fe6c8 --- /dev/null +++ b/drivers/ata/pata_rb500_cf.c | |||
@@ -0,0 +1,314 @@ | |||
1 | /* | ||
2 | * A low-level PATA driver to handle a Compact Flash connected on the | ||
3 | * Mikrotik's RouterBoard 532 board. | ||
4 | * | ||
5 | * Copyright (C) 2007 Gabor Juhos <juhosg at openwrt.org> | ||
6 | * Copyright (C) 2008 Florian Fainelli <florian@openwrt.org> | ||
7 | * | ||
8 | * This file was based on: drivers/ata/pata_ixp4xx_cf.c | ||
9 | * Copyright (C) 2006-07 Tower Technologies | ||
10 | * Author: Alessandro Zummo <a.zummo@towertech.it> | ||
11 | * | ||
12 | * Also was based on the driver for Linux 2.4.xx published by Mikrotik for | ||
13 | * their RouterBoard 1xx and 5xx series devices. The original Mikrotik code | ||
14 | * seems not to have a license. | ||
15 | * | ||
16 | * This program is free software; you can redistribute it and/or modify | ||
17 | * it under the terms of the GNU General Public License version 2 as | ||
18 | * published by the Free Software Foundation. | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | |||
26 | #include <linux/io.h> | ||
27 | #include <linux/interrupt.h> | ||
28 | #include <linux/irq.h> | ||
29 | |||
30 | #include <linux/libata.h> | ||
31 | #include <scsi/scsi_host.h> | ||
32 | |||
33 | #include <asm/gpio.h> | ||
34 | |||
35 | #define DRV_NAME "pata-rb500-cf" | ||
36 | #define DRV_VERSION "0.1.0" | ||
37 | #define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash" | ||
38 | |||
39 | #define RB500_CF_MAXPORTS 1 | ||
40 | #define RB500_CF_IO_DELAY 400 | ||
41 | |||
42 | #define RB500_CF_REG_CMD 0x0800 | ||
43 | #define RB500_CF_REG_CTRL 0x080E | ||
44 | #define RB500_CF_REG_DATA 0x0C00 | ||
45 | |||
46 | struct rb500_cf_info { | ||
47 | void __iomem *iobase; | ||
48 | unsigned int gpio_line; | ||
49 | int frozen; | ||
50 | unsigned int irq; | ||
51 | }; | ||
52 | |||
53 | /* ------------------------------------------------------------------------ */ | ||
54 | |||
55 | static inline void rb500_pata_finish_io(struct ata_port *ap) | ||
56 | { | ||
57 | struct ata_host *ah = ap->host; | ||
58 | struct rb500_cf_info *info = ah->private_data; | ||
59 | |||
60 | ata_altstatus(ap); | ||
61 | ndelay(RB500_CF_IO_DELAY); | ||
62 | |||
63 | set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH); | ||
64 | } | ||
65 | |||
66 | static void rb500_pata_exec_command(struct ata_port *ap, | ||
67 | const struct ata_taskfile *tf) | ||
68 | { | ||
69 | writeb(tf->command, ap->ioaddr.command_addr); | ||
70 | rb500_pata_finish_io(ap); | ||
71 | } | ||
72 | |||
73 | static void rb500_pata_data_xfer(struct ata_device *adev, unsigned char *buf, | ||
74 | unsigned int buflen, int write_data) | ||
75 | { | ||
76 | struct ata_port *ap = adev->link->ap; | ||
77 | void __iomem *ioaddr = ap->ioaddr.data_addr; | ||
78 | |||
79 | if (write_data) { | ||
80 | for (; buflen > 0; buflen--, buf++) | ||
81 | writeb(*buf, ioaddr); | ||
82 | } else { | ||
83 | for (; buflen > 0; buflen--, buf++) | ||
84 | *buf = readb(ioaddr); | ||
85 | } | ||
86 | |||
87 | rb500_pata_finish_io(adev->link->ap); | ||
88 | } | ||
89 | |||
90 | static void rb500_pata_freeze(struct ata_port *ap) | ||
91 | { | ||
92 | struct rb500_cf_info *info = ap->host->private_data; | ||
93 | |||
94 | info->frozen = 1; | ||
95 | } | ||
96 | |||
97 | static void rb500_pata_thaw(struct ata_port *ap) | ||
98 | { | ||
99 | struct rb500_cf_info *info = ap->host->private_data; | ||
100 | |||
101 | info->frozen = 0; | ||
102 | } | ||
103 | |||
104 | static irqreturn_t rb500_pata_irq_handler(int irq, void *dev_instance) | ||
105 | { | ||
106 | struct ata_host *ah = dev_instance; | ||
107 | struct rb500_cf_info *info = ah->private_data; | ||
108 | |||
109 | if (gpio_get_value(info->gpio_line)) { | ||
110 | set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW); | ||
111 | if (!info->frozen) | ||
112 | ata_interrupt(info->irq, dev_instance); | ||
113 | } else { | ||
114 | set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH); | ||
115 | } | ||
116 | |||
117 | return IRQ_HANDLED; | ||
118 | } | ||
119 | |||
120 | static void rb500_pata_irq_clear(struct ata_port *ap) | ||
121 | { | ||
122 | } | ||
123 | |||
124 | static int rb500_pata_port_start(struct ata_port *ap) | ||
125 | { | ||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | static struct ata_port_operations rb500_pata_port_ops = { | ||
130 | .tf_load = ata_tf_load, | ||
131 | .tf_read = ata_tf_read, | ||
132 | |||
133 | .exec_command = rb500_pata_exec_command, | ||
134 | .check_status = ata_check_status, | ||
135 | .dev_select = ata_std_dev_select, | ||
136 | |||
137 | .data_xfer = rb500_pata_data_xfer, | ||
138 | |||
139 | .qc_prep = ata_qc_prep, | ||
140 | .qc_issue = ata_qc_issue_prot, | ||
141 | |||
142 | .freeze = rb500_pata_freeze, | ||
143 | .thaw = rb500_pata_thaw, | ||
144 | .error_handler = ata_bmdma_error_handler, | ||
145 | |||
146 | .irq_handler = rb500_pata_irq_handler, | ||
147 | .irq_clear = rb500_pata_irq_clear, | ||
148 | .irq_on = ata_irq_on, | ||
149 | |||
150 | .port_start = rb500_pata_port_start, | ||
151 | }; | ||
152 | |||
153 | /* ------------------------------------------------------------------------ */ | ||
154 | |||
155 | static struct scsi_host_template rb500_pata_sht = { | ||
156 | .module = THIS_MODULE, | ||
157 | .name = DRV_NAME, | ||
158 | .ioctl = ata_scsi_ioctl, | ||
159 | .queuecommand = ata_scsi_queuecmd, | ||
160 | .slave_configure = ata_scsi_slave_config, | ||
161 | .slave_destroy = ata_scsi_slave_destroy, | ||
162 | .bios_param = ata_std_bios_param, | ||
163 | .proc_name = DRV_NAME, | ||
164 | |||
165 | .can_queue = ATA_DEF_QUEUE, | ||
166 | .this_id = ATA_SHT_THIS_ID, | ||
167 | .sg_tablesize = LIBATA_MAX_PRD, | ||
168 | .dma_boundary = ATA_DMA_BOUNDARY, | ||
169 | .cmd_per_lun = ATA_SHT_CMD_PER_LUN, | ||
170 | .emulated = ATA_SHT_EMULATED, | ||
171 | .use_clustering = ATA_SHT_USE_CLUSTERING, | ||
172 | }; | ||
173 | |||
174 | /* ------------------------------------------------------------------------ */ | ||
175 | |||
176 | static void rb500_pata_setup_ports(struct ata_host *ah) | ||
177 | { | ||
178 | struct rb500_cf_info *info = ah->private_data; | ||
179 | struct ata_port *ap; | ||
180 | |||
181 | ap = ah->ports[0]; | ||
182 | |||
183 | ap->ops = &rb500_pata_port_ops; | ||
184 | ap->pio_mask = 0x1f; /* PIO4 */ | ||
185 | ap->flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO; | ||
186 | |||
187 | ap->ioaddr.cmd_addr = info->iobase + RB500_CF_REG_CMD; | ||
188 | ap->ioaddr.ctl_addr = info->iobase + RB500_CF_REG_CTRL; | ||
189 | ap->ioaddr.altstatus_addr = info->iobase + RB500_CF_REG_CTRL; | ||
190 | |||
191 | ata_std_ports(&ap->ioaddr); | ||
192 | |||
193 | ap->ioaddr.data_addr = info->iobase + RB500_CF_REG_DATA; | ||
194 | } | ||
195 | |||
196 | static __devinit int rb500_pata_driver_probe(struct platform_device *pdev) | ||
197 | { | ||
198 | unsigned int irq; | ||
199 | int gpio; | ||
200 | struct resource *res; | ||
201 | struct ata_host *ah; | ||
202 | struct rb500_cf_info *info; | ||
203 | int ret; | ||
204 | |||
205 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
206 | if (!res) { | ||
207 | dev_err(&pdev->dev, "no IOMEM resource found\n"); | ||
208 | return -EINVAL; | ||
209 | } | ||
210 | |||
211 | irq = platform_get_irq(pdev, 0); | ||
212 | if (irq <= 0) { | ||
213 | dev_err(&pdev->dev, "no IRQ resource found\n"); | ||
214 | return -ENOENT; | ||
215 | } | ||
216 | |||
217 | gpio = irq_to_gpio(irq); | ||
218 | if (gpio < 0) { | ||
219 | dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq); | ||
220 | return -ENOENT; | ||
221 | } | ||
222 | |||
223 | ret = gpio_request(gpio, DRV_NAME); | ||
224 | if (ret) { | ||
225 | dev_err(&pdev->dev, "GPIO request failed\n"); | ||
226 | return ret; | ||
227 | } | ||
228 | |||
229 | /* allocate host */ | ||
230 | ah = ata_host_alloc(&pdev->dev, RB500_CF_MAXPORTS); | ||
231 | if (!ah) | ||
232 | return -ENOMEM; | ||
233 | |||
234 | platform_set_drvdata(pdev, ah); | ||
235 | |||
236 | info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); | ||
237 | if (!info) | ||
238 | return -ENOMEM; | ||
239 | |||
240 | ah->private_data = info; | ||
241 | info->gpio_line = gpio; | ||
242 | info->irq = irq; | ||
243 | |||
244 | info->iobase = devm_ioremap_nocache(&pdev->dev, res->start, | ||
245 | res->end - res->start + 1); | ||
246 | if (!info->iobase) | ||
247 | return -ENOMEM; | ||
248 | |||
249 | ret = gpio_direction_input(gpio); | ||
250 | if (ret) { | ||
251 | dev_err(&pdev->dev, "unable to set GPIO direction, err=%d\n", | ||
252 | ret); | ||
253 | goto err_free_gpio; | ||
254 | } | ||
255 | |||
256 | rb500_pata_setup_ports(ah); | ||
257 | |||
258 | ret = ata_host_activate(ah, irq, rb500_pata_irq_handler, | ||
259 | IRQF_TRIGGER_LOW, &rb500_pata_sht); | ||
260 | if (ret) | ||
261 | goto err_free_gpio; | ||
262 | |||
263 | return 0; | ||
264 | |||
265 | err_free_gpio: | ||
266 | gpio_free(gpio); | ||
267 | |||
268 | return ret; | ||
269 | } | ||
270 | |||
271 | static __devexit int rb500_pata_driver_remove(struct platform_device *pdev) | ||
272 | { | ||
273 | struct ata_host *ah = platform_get_drvdata(pdev); | ||
274 | struct rb500_cf_info *info = ah->private_data; | ||
275 | |||
276 | ata_host_detach(ah); | ||
277 | gpio_free(info->gpio_line); | ||
278 | |||
279 | return 0; | ||
280 | } | ||
281 | |||
282 | static struct platform_driver rb500_pata_platform_driver = { | ||
283 | .probe = rb500_pata_driver_probe, | ||
284 | .remove = __devexit_p(rb500_pata_driver_remove), | ||
285 | .driver = { | ||
286 | .name = DRV_NAME, | ||
287 | .owner = THIS_MODULE, | ||
288 | }, | ||
289 | }; | ||
290 | |||
291 | /* ------------------------------------------------------------------------ */ | ||
292 | |||
293 | #define DRV_INFO DRV_DESC " version " DRV_VERSION | ||
294 | |||
295 | static int __init rb500_pata_module_init(void) | ||
296 | { | ||
297 | printk(KERN_INFO DRV_INFO "\n"); | ||
298 | |||
299 | return platform_driver_register(&rb500_pata_platform_driver); | ||
300 | } | ||
301 | |||
302 | static void __exit rb500_pata_module_exit(void) | ||
303 | { | ||
304 | platform_driver_unregister(&rb500_pata_platform_driver); | ||
305 | } | ||
306 | |||
307 | MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>"); | ||
308 | MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); | ||
309 | MODULE_DESCRIPTION(DRV_DESC); | ||
310 | MODULE_VERSION(DRV_VERSION); | ||
311 | MODULE_LICENSE("GPL"); | ||
312 | |||
313 | module_init(rb500_pata_module_init); | ||
314 | module_exit(rb500_pata_module_exit); | ||
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c index 9c523fbf529e..a589c0fa0dbb 100644 --- a/drivers/ata/pata_serverworks.c +++ b/drivers/ata/pata_serverworks.c | |||
@@ -226,7 +226,7 @@ static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned lo | |||
226 | 226 | ||
227 | for (i = 0; (p = csb_bad_ata100[i]) != NULL; i++) { | 227 | for (i = 0; (p = csb_bad_ata100[i]) != NULL; i++) { |
228 | if (!strcmp(p, model_num)) | 228 | if (!strcmp(p, model_num)) |
229 | mask &= ~(0x1F << ATA_SHIFT_UDMA); | 229 | mask &= ~(0xE0 << ATA_SHIFT_UDMA); |
230 | } | 230 | } |
231 | return ata_pci_default_filter(adev, mask); | 231 | return ata_pci_default_filter(adev, mask); |
232 | } | 232 | } |
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index 69f651e0bc98..840d1c4a7850 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c | |||
@@ -45,6 +45,8 @@ | |||
45 | #include <linux/interrupt.h> | 45 | #include <linux/interrupt.h> |
46 | #include <linux/device.h> | 46 | #include <linux/device.h> |
47 | #include <scsi/scsi_host.h> | 47 | #include <scsi/scsi_host.h> |
48 | #include <scsi/scsi_cmnd.h> | ||
49 | #include <scsi/scsi.h> | ||
48 | #include <linux/libata.h> | 50 | #include <linux/libata.h> |
49 | 51 | ||
50 | #ifdef CONFIG_PPC_OF | 52 | #ifdef CONFIG_PPC_OF |
@@ -59,6 +61,7 @@ enum { | |||
59 | /* ap->flags bits */ | 61 | /* ap->flags bits */ |
60 | K2_FLAG_SATA_8_PORTS = (1 << 24), | 62 | K2_FLAG_SATA_8_PORTS = (1 << 24), |
61 | K2_FLAG_NO_ATAPI_DMA = (1 << 25), | 63 | K2_FLAG_NO_ATAPI_DMA = (1 << 25), |
64 | K2_FLAG_BAR_POS_3 = (1 << 26), | ||
62 | 65 | ||
63 | /* Taskfile registers offsets */ | 66 | /* Taskfile registers offsets */ |
64 | K2_SATA_TF_CMD_OFFSET = 0x00, | 67 | K2_SATA_TF_CMD_OFFSET = 0x00, |
@@ -88,8 +91,10 @@ enum { | |||
88 | /* Port stride */ | 91 | /* Port stride */ |
89 | K2_SATA_PORT_OFFSET = 0x100, | 92 | K2_SATA_PORT_OFFSET = 0x100, |
90 | 93 | ||
91 | board_svw4 = 0, | 94 | chip_svw4 = 0, |
92 | board_svw8 = 1, | 95 | chip_svw8 = 1, |
96 | chip_svw42 = 2, /* bar 3 */ | ||
97 | chip_svw43 = 3, /* bar 5 */ | ||
93 | }; | 98 | }; |
94 | 99 | ||
95 | static u8 k2_stat_check_status(struct ata_port *ap); | 100 | static u8 k2_stat_check_status(struct ata_port *ap); |
@@ -97,10 +102,25 @@ static u8 k2_stat_check_status(struct ata_port *ap); | |||
97 | 102 | ||
98 | static int k2_sata_check_atapi_dma(struct ata_queued_cmd *qc) | 103 | static int k2_sata_check_atapi_dma(struct ata_queued_cmd *qc) |
99 | { | 104 | { |
105 | u8 cmnd = qc->scsicmd->cmnd[0]; | ||
106 | |||
100 | if (qc->ap->flags & K2_FLAG_NO_ATAPI_DMA) | 107 | if (qc->ap->flags & K2_FLAG_NO_ATAPI_DMA) |
101 | return -1; /* ATAPI DMA not supported */ | 108 | return -1; /* ATAPI DMA not supported */ |
109 | else { | ||
110 | switch (cmnd) { | ||
111 | case READ_10: | ||
112 | case READ_12: | ||
113 | case READ_16: | ||
114 | case WRITE_10: | ||
115 | case WRITE_12: | ||
116 | case WRITE_16: | ||
117 | return 0; | ||
118 | |||
119 | default: | ||
120 | return -1; | ||
121 | } | ||
102 | 122 | ||
103 | return 0; | 123 | } |
104 | } | 124 | } |
105 | 125 | ||
106 | static int k2_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) | 126 | static int k2_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) |
@@ -354,7 +374,7 @@ static const struct ata_port_operations k2_sata_ops = { | |||
354 | }; | 374 | }; |
355 | 375 | ||
356 | static const struct ata_port_info k2_port_info[] = { | 376 | static const struct ata_port_info k2_port_info[] = { |
357 | /* board_svw4 */ | 377 | /* chip_svw4 */ |
358 | { | 378 | { |
359 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 379 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
360 | ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA, | 380 | ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA, |
@@ -363,7 +383,7 @@ static const struct ata_port_info k2_port_info[] = { | |||
363 | .udma_mask = ATA_UDMA6, | 383 | .udma_mask = ATA_UDMA6, |
364 | .port_ops = &k2_sata_ops, | 384 | .port_ops = &k2_sata_ops, |
365 | }, | 385 | }, |
366 | /* board_svw8 */ | 386 | /* chip_svw8 */ |
367 | { | 387 | { |
368 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 388 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
369 | ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA | | 389 | ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA | |
@@ -373,6 +393,24 @@ static const struct ata_port_info k2_port_info[] = { | |||
373 | .udma_mask = ATA_UDMA6, | 393 | .udma_mask = ATA_UDMA6, |
374 | .port_ops = &k2_sata_ops, | 394 | .port_ops = &k2_sata_ops, |
375 | }, | 395 | }, |
396 | /* chip_svw42 */ | ||
397 | { | ||
398 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | ||
399 | ATA_FLAG_MMIO | K2_FLAG_BAR_POS_3, | ||
400 | .pio_mask = 0x1f, | ||
401 | .mwdma_mask = 0x07, | ||
402 | .udma_mask = ATA_UDMA6, | ||
403 | .port_ops = &k2_sata_ops, | ||
404 | }, | ||
405 | /* chip_svw43 */ | ||
406 | { | ||
407 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | ||
408 | ATA_FLAG_MMIO, | ||
409 | .pio_mask = 0x1f, | ||
410 | .mwdma_mask = 0x07, | ||
411 | .udma_mask = ATA_UDMA6, | ||
412 | .port_ops = &k2_sata_ops, | ||
413 | }, | ||
376 | }; | 414 | }; |
377 | 415 | ||
378 | static void k2_sata_setup_port(struct ata_ioports *port, void __iomem *base) | 416 | static void k2_sata_setup_port(struct ata_ioports *port, void __iomem *base) |
@@ -402,7 +440,7 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en | |||
402 | { &k2_port_info[ent->driver_data], NULL }; | 440 | { &k2_port_info[ent->driver_data], NULL }; |
403 | struct ata_host *host; | 441 | struct ata_host *host; |
404 | void __iomem *mmio_base; | 442 | void __iomem *mmio_base; |
405 | int n_ports, i, rc; | 443 | int n_ports, i, rc, bar_pos; |
406 | 444 | ||
407 | if (!printed_version++) | 445 | if (!printed_version++) |
408 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); | 446 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); |
@@ -416,6 +454,9 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en | |||
416 | if (!host) | 454 | if (!host) |
417 | return -ENOMEM; | 455 | return -ENOMEM; |
418 | 456 | ||
457 | bar_pos = 5; | ||
458 | if (ppi[0]->flags & K2_FLAG_BAR_POS_3) | ||
459 | bar_pos = 3; | ||
419 | /* | 460 | /* |
420 | * If this driver happens to only be useful on Apple's K2, then | 461 | * If this driver happens to only be useful on Apple's K2, then |
421 | * we should check that here as it has a normal Serverworks ID | 462 | * we should check that here as it has a normal Serverworks ID |
@@ -428,17 +469,23 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en | |||
428 | * Check if we have resources mapped at all (second function may | 469 | * Check if we have resources mapped at all (second function may |
429 | * have been disabled by firmware) | 470 | * have been disabled by firmware) |
430 | */ | 471 | */ |
431 | if (pci_resource_len(pdev, 5) == 0) | 472 | if (pci_resource_len(pdev, bar_pos) == 0) { |
473 | /* In IDE mode we need to pin the device to ensure that | ||
474 | pcim_release does not clear the busmaster bit in config | ||
475 | space, clearing causes busmaster DMA to fail on | ||
476 | ports 3 & 4 */ | ||
477 | pcim_pin_device(pdev); | ||
432 | return -ENODEV; | 478 | return -ENODEV; |
479 | } | ||
433 | 480 | ||
434 | /* Request and iomap PCI regions */ | 481 | /* Request and iomap PCI regions */ |
435 | rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME); | 482 | rc = pcim_iomap_regions(pdev, 1 << bar_pos, DRV_NAME); |
436 | if (rc == -EBUSY) | 483 | if (rc == -EBUSY) |
437 | pcim_pin_device(pdev); | 484 | pcim_pin_device(pdev); |
438 | if (rc) | 485 | if (rc) |
439 | return rc; | 486 | return rc; |
440 | host->iomap = pcim_iomap_table(pdev); | 487 | host->iomap = pcim_iomap_table(pdev); |
441 | mmio_base = host->iomap[5]; | 488 | mmio_base = host->iomap[bar_pos]; |
442 | 489 | ||
443 | /* different controllers have different number of ports - currently 4 or 8 */ | 490 | /* different controllers have different number of ports - currently 4 or 8 */ |
444 | /* All ports are on the same function. Multi-function device is no | 491 | /* All ports are on the same function. Multi-function device is no |
@@ -483,11 +530,13 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en | |||
483 | * controller | 530 | * controller |
484 | * */ | 531 | * */ |
485 | static const struct pci_device_id k2_sata_pci_tbl[] = { | 532 | static const struct pci_device_id k2_sata_pci_tbl[] = { |
486 | { PCI_VDEVICE(SERVERWORKS, 0x0240), board_svw4 }, | 533 | { PCI_VDEVICE(SERVERWORKS, 0x0240), chip_svw4 }, |
487 | { PCI_VDEVICE(SERVERWORKS, 0x0241), board_svw4 }, | 534 | { PCI_VDEVICE(SERVERWORKS, 0x0241), chip_svw4 }, |
488 | { PCI_VDEVICE(SERVERWORKS, 0x0242), board_svw8 }, | 535 | { PCI_VDEVICE(SERVERWORKS, 0x0242), chip_svw8 }, |
489 | { PCI_VDEVICE(SERVERWORKS, 0x024a), board_svw4 }, | 536 | { PCI_VDEVICE(SERVERWORKS, 0x024a), chip_svw4 }, |
490 | { PCI_VDEVICE(SERVERWORKS, 0x024b), board_svw4 }, | 537 | { PCI_VDEVICE(SERVERWORKS, 0x024b), chip_svw4 }, |
538 | { PCI_VDEVICE(SERVERWORKS, 0x0410), chip_svw42 }, | ||
539 | { PCI_VDEVICE(SERVERWORKS, 0x0411), chip_svw43 }, | ||
491 | 540 | ||
492 | { } | 541 | { } |
493 | }; | 542 | }; |
diff --git a/drivers/base/core.c b/drivers/base/core.c index 9c0070b5bd3e..7de543d1d0b4 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
@@ -621,7 +621,8 @@ static struct kobject *get_device_parent(struct device *dev, | |||
621 | static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) | 621 | static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) |
622 | { | 622 | { |
623 | /* see if we live in a "glue" directory */ | 623 | /* see if we live in a "glue" directory */ |
624 | if (!dev->class || glue_dir->kset != &dev->class->class_dirs) | 624 | if (!glue_dir || !dev->class || |
625 | glue_dir->kset != &dev->class->class_dirs) | ||
625 | return; | 626 | return; |
626 | 627 | ||
627 | kobject_put(glue_dir); | 628 | kobject_put(glue_dir); |
@@ -770,17 +771,10 @@ int device_add(struct device *dev) | |||
770 | struct class_interface *class_intf; | 771 | struct class_interface *class_intf; |
771 | int error; | 772 | int error; |
772 | 773 | ||
773 | error = pm_sleep_lock(); | ||
774 | if (error) { | ||
775 | dev_warn(dev, "Suspicious %s during suspend\n", __FUNCTION__); | ||
776 | dump_stack(); | ||
777 | return error; | ||
778 | } | ||
779 | |||
780 | dev = get_device(dev); | 774 | dev = get_device(dev); |
781 | if (!dev || !strlen(dev->bus_id)) { | 775 | if (!dev || !strlen(dev->bus_id)) { |
782 | error = -EINVAL; | 776 | error = -EINVAL; |
783 | goto Error; | 777 | goto Done; |
784 | } | 778 | } |
785 | 779 | ||
786 | pr_debug("device: '%s': %s\n", dev->bus_id, __FUNCTION__); | 780 | pr_debug("device: '%s': %s\n", dev->bus_id, __FUNCTION__); |
@@ -843,11 +837,9 @@ int device_add(struct device *dev) | |||
843 | } | 837 | } |
844 | Done: | 838 | Done: |
845 | put_device(dev); | 839 | put_device(dev); |
846 | pm_sleep_unlock(); | ||
847 | return error; | 840 | return error; |
848 | BusError: | 841 | BusError: |
849 | device_pm_remove(dev); | 842 | device_pm_remove(dev); |
850 | dpm_sysfs_remove(dev); | ||
851 | PMError: | 843 | PMError: |
852 | if (dev->bus) | 844 | if (dev->bus) |
853 | blocking_notifier_call_chain(&dev->bus->p->bus_notifier, | 845 | blocking_notifier_call_chain(&dev->bus->p->bus_notifier, |
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index efaf282c438c..911ec600fe71 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c | |||
@@ -648,7 +648,7 @@ u64 dma_get_required_mask(struct device *dev) | |||
648 | high_totalram += high_totalram - 1; | 648 | high_totalram += high_totalram - 1; |
649 | mask = (((u64)high_totalram) << 32) + 0xffffffff; | 649 | mask = (((u64)high_totalram) << 32) + 0xffffffff; |
650 | } | 650 | } |
651 | return mask & *dev->dma_mask; | 651 | return mask; |
652 | } | 652 | } |
653 | EXPORT_SYMBOL_GPL(dma_get_required_mask); | 653 | EXPORT_SYMBOL_GPL(dma_get_required_mask); |
654 | #endif | 654 | #endif |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index ee9d1c8db0d6..d887d5cb5bef 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -48,7 +48,6 @@ | |||
48 | */ | 48 | */ |
49 | 49 | ||
50 | LIST_HEAD(dpm_active); | 50 | LIST_HEAD(dpm_active); |
51 | static LIST_HEAD(dpm_locked); | ||
52 | static LIST_HEAD(dpm_off); | 51 | static LIST_HEAD(dpm_off); |
53 | static LIST_HEAD(dpm_off_irq); | 52 | static LIST_HEAD(dpm_off_irq); |
54 | static LIST_HEAD(dpm_destroy); | 53 | static LIST_HEAD(dpm_destroy); |
@@ -81,28 +80,6 @@ void device_pm_add(struct device *dev) | |||
81 | */ | 80 | */ |
82 | void device_pm_remove(struct device *dev) | 81 | void device_pm_remove(struct device *dev) |
83 | { | 82 | { |
84 | /* | ||
85 | * If this function is called during a suspend, it will be blocked, | ||
86 | * because we're holding the device's semaphore at that time, which may | ||
87 | * lead to a deadlock. In that case we want to print a warning. | ||
88 | * However, it may also be called by unregister_dropped_devices() with | ||
89 | * the device's semaphore released, in which case the warning should | ||
90 | * not be printed. | ||
91 | */ | ||
92 | if (down_trylock(&dev->sem)) { | ||
93 | if (down_read_trylock(&pm_sleep_rwsem)) { | ||
94 | /* No suspend in progress, wait on dev->sem */ | ||
95 | down(&dev->sem); | ||
96 | up_read(&pm_sleep_rwsem); | ||
97 | } else { | ||
98 | /* Suspend in progress, we may deadlock */ | ||
99 | dev_warn(dev, "Suspicious %s during suspend\n", | ||
100 | __FUNCTION__); | ||
101 | dump_stack(); | ||
102 | /* The user has been warned ... */ | ||
103 | down(&dev->sem); | ||
104 | } | ||
105 | } | ||
106 | pr_debug("PM: Removing info for %s:%s\n", | 83 | pr_debug("PM: Removing info for %s:%s\n", |
107 | dev->bus ? dev->bus->name : "No Bus", | 84 | dev->bus ? dev->bus->name : "No Bus", |
108 | kobject_name(&dev->kobj)); | 85 | kobject_name(&dev->kobj)); |
@@ -110,7 +87,6 @@ void device_pm_remove(struct device *dev) | |||
110 | dpm_sysfs_remove(dev); | 87 | dpm_sysfs_remove(dev); |
111 | list_del_init(&dev->power.entry); | 88 | list_del_init(&dev->power.entry); |
112 | mutex_unlock(&dpm_list_mtx); | 89 | mutex_unlock(&dpm_list_mtx); |
113 | up(&dev->sem); | ||
114 | } | 90 | } |
115 | 91 | ||
116 | /** | 92 | /** |
@@ -230,6 +206,8 @@ static int resume_device(struct device *dev) | |||
230 | TRACE_DEVICE(dev); | 206 | TRACE_DEVICE(dev); |
231 | TRACE_RESUME(0); | 207 | TRACE_RESUME(0); |
232 | 208 | ||
209 | down(&dev->sem); | ||
210 | |||
233 | if (dev->bus && dev->bus->resume) { | 211 | if (dev->bus && dev->bus->resume) { |
234 | dev_dbg(dev,"resuming\n"); | 212 | dev_dbg(dev,"resuming\n"); |
235 | error = dev->bus->resume(dev); | 213 | error = dev->bus->resume(dev); |
@@ -245,6 +223,8 @@ static int resume_device(struct device *dev) | |||
245 | error = dev->class->resume(dev); | 223 | error = dev->class->resume(dev); |
246 | } | 224 | } |
247 | 225 | ||
226 | up(&dev->sem); | ||
227 | |||
248 | TRACE_RESUME(error); | 228 | TRACE_RESUME(error); |
249 | return error; | 229 | return error; |
250 | } | 230 | } |
@@ -266,7 +246,7 @@ static void dpm_resume(void) | |||
266 | struct list_head *entry = dpm_off.next; | 246 | struct list_head *entry = dpm_off.next; |
267 | struct device *dev = to_device(entry); | 247 | struct device *dev = to_device(entry); |
268 | 248 | ||
269 | list_move_tail(entry, &dpm_locked); | 249 | list_move_tail(entry, &dpm_active); |
270 | mutex_unlock(&dpm_list_mtx); | 250 | mutex_unlock(&dpm_list_mtx); |
271 | resume_device(dev); | 251 | resume_device(dev); |
272 | mutex_lock(&dpm_list_mtx); | 252 | mutex_lock(&dpm_list_mtx); |
@@ -275,25 +255,6 @@ static void dpm_resume(void) | |||
275 | } | 255 | } |
276 | 256 | ||
277 | /** | 257 | /** |
278 | * unlock_all_devices - Release each device's semaphore | ||
279 | * | ||
280 | * Go through the dpm_off list. Put each device on the dpm_active | ||
281 | * list and unlock it. | ||
282 | */ | ||
283 | static void unlock_all_devices(void) | ||
284 | { | ||
285 | mutex_lock(&dpm_list_mtx); | ||
286 | while (!list_empty(&dpm_locked)) { | ||
287 | struct list_head *entry = dpm_locked.prev; | ||
288 | struct device *dev = to_device(entry); | ||
289 | |||
290 | list_move(entry, &dpm_active); | ||
291 | up(&dev->sem); | ||
292 | } | ||
293 | mutex_unlock(&dpm_list_mtx); | ||
294 | } | ||
295 | |||
296 | /** | ||
297 | * unregister_dropped_devices - Unregister devices scheduled for removal | 258 | * unregister_dropped_devices - Unregister devices scheduled for removal |
298 | * | 259 | * |
299 | * Unregister all devices on the dpm_destroy list. | 260 | * Unregister all devices on the dpm_destroy list. |
@@ -305,7 +266,6 @@ static void unregister_dropped_devices(void) | |||
305 | struct list_head *entry = dpm_destroy.next; | 266 | struct list_head *entry = dpm_destroy.next; |
306 | struct device *dev = to_device(entry); | 267 | struct device *dev = to_device(entry); |
307 | 268 | ||
308 | up(&dev->sem); | ||
309 | mutex_unlock(&dpm_list_mtx); | 269 | mutex_unlock(&dpm_list_mtx); |
310 | /* This also removes the device from the list */ | 270 | /* This also removes the device from the list */ |
311 | device_unregister(dev); | 271 | device_unregister(dev); |
@@ -324,7 +284,6 @@ void device_resume(void) | |||
324 | { | 284 | { |
325 | might_sleep(); | 285 | might_sleep(); |
326 | dpm_resume(); | 286 | dpm_resume(); |
327 | unlock_all_devices(); | ||
328 | unregister_dropped_devices(); | 287 | unregister_dropped_devices(); |
329 | up_write(&pm_sleep_rwsem); | 288 | up_write(&pm_sleep_rwsem); |
330 | } | 289 | } |
@@ -388,18 +347,15 @@ int device_power_down(pm_message_t state) | |||
388 | struct list_head *entry = dpm_off.prev; | 347 | struct list_head *entry = dpm_off.prev; |
389 | struct device *dev = to_device(entry); | 348 | struct device *dev = to_device(entry); |
390 | 349 | ||
391 | list_del_init(&dev->power.entry); | ||
392 | error = suspend_device_late(dev, state); | 350 | error = suspend_device_late(dev, state); |
393 | if (error) { | 351 | if (error) { |
394 | printk(KERN_ERR "Could not power down device %s: " | 352 | printk(KERN_ERR "Could not power down device %s: " |
395 | "error %d\n", | 353 | "error %d\n", |
396 | kobject_name(&dev->kobj), error); | 354 | kobject_name(&dev->kobj), error); |
397 | if (list_empty(&dev->power.entry)) | ||
398 | list_add(&dev->power.entry, &dpm_off); | ||
399 | break; | 355 | break; |
400 | } | 356 | } |
401 | if (list_empty(&dev->power.entry)) | 357 | if (!list_empty(&dev->power.entry)) |
402 | list_add(&dev->power.entry, &dpm_off_irq); | 358 | list_move(&dev->power.entry, &dpm_off_irq); |
403 | } | 359 | } |
404 | 360 | ||
405 | if (!error) | 361 | if (!error) |
@@ -419,6 +375,8 @@ static int suspend_device(struct device *dev, pm_message_t state) | |||
419 | { | 375 | { |
420 | int error = 0; | 376 | int error = 0; |
421 | 377 | ||
378 | down(&dev->sem); | ||
379 | |||
422 | if (dev->power.power_state.event) { | 380 | if (dev->power.power_state.event) { |
423 | dev_dbg(dev, "PM: suspend %d-->%d\n", | 381 | dev_dbg(dev, "PM: suspend %d-->%d\n", |
424 | dev->power.power_state.event, state.event); | 382 | dev->power.power_state.event, state.event); |
@@ -441,6 +399,9 @@ static int suspend_device(struct device *dev, pm_message_t state) | |||
441 | error = dev->bus->suspend(dev, state); | 399 | error = dev->bus->suspend(dev, state); |
442 | suspend_report_result(dev->bus->suspend, error); | 400 | suspend_report_result(dev->bus->suspend, error); |
443 | } | 401 | } |
402 | |||
403 | up(&dev->sem); | ||
404 | |||
444 | return error; | 405 | return error; |
445 | } | 406 | } |
446 | 407 | ||
@@ -461,13 +422,13 @@ static int dpm_suspend(pm_message_t state) | |||
461 | int error = 0; | 422 | int error = 0; |
462 | 423 | ||
463 | mutex_lock(&dpm_list_mtx); | 424 | mutex_lock(&dpm_list_mtx); |
464 | while (!list_empty(&dpm_locked)) { | 425 | while (!list_empty(&dpm_active)) { |
465 | struct list_head *entry = dpm_locked.prev; | 426 | struct list_head *entry = dpm_active.prev; |
466 | struct device *dev = to_device(entry); | 427 | struct device *dev = to_device(entry); |
467 | 428 | ||
468 | list_del_init(&dev->power.entry); | ||
469 | mutex_unlock(&dpm_list_mtx); | 429 | mutex_unlock(&dpm_list_mtx); |
470 | error = suspend_device(dev, state); | 430 | error = suspend_device(dev, state); |
431 | mutex_lock(&dpm_list_mtx); | ||
471 | if (error) { | 432 | if (error) { |
472 | printk(KERN_ERR "Could not suspend device %s: " | 433 | printk(KERN_ERR "Could not suspend device %s: " |
473 | "error %d%s\n", | 434 | "error %d%s\n", |
@@ -476,14 +437,10 @@ static int dpm_suspend(pm_message_t state) | |||
476 | (error == -EAGAIN ? | 437 | (error == -EAGAIN ? |
477 | " (please convert to suspend_late)" : | 438 | " (please convert to suspend_late)" : |
478 | "")); | 439 | "")); |
479 | mutex_lock(&dpm_list_mtx); | ||
480 | if (list_empty(&dev->power.entry)) | ||
481 | list_add(&dev->power.entry, &dpm_locked); | ||
482 | break; | 440 | break; |
483 | } | 441 | } |
484 | mutex_lock(&dpm_list_mtx); | 442 | if (!list_empty(&dev->power.entry)) |
485 | if (list_empty(&dev->power.entry)) | 443 | list_move(&dev->power.entry, &dpm_off); |
486 | list_add(&dev->power.entry, &dpm_off); | ||
487 | } | 444 | } |
488 | mutex_unlock(&dpm_list_mtx); | 445 | mutex_unlock(&dpm_list_mtx); |
489 | 446 | ||
@@ -491,36 +448,6 @@ static int dpm_suspend(pm_message_t state) | |||
491 | } | 448 | } |
492 | 449 | ||
493 | /** | 450 | /** |
494 | * lock_all_devices - Acquire every device's semaphore | ||
495 | * | ||
496 | * Go through the dpm_active list. Carefully lock each device's | ||
497 | * semaphore and put it in on the dpm_locked list. | ||
498 | */ | ||
499 | static void lock_all_devices(void) | ||
500 | { | ||
501 | mutex_lock(&dpm_list_mtx); | ||
502 | while (!list_empty(&dpm_active)) { | ||
503 | struct list_head *entry = dpm_active.next; | ||
504 | struct device *dev = to_device(entry); | ||
505 | |||
506 | /* Required locking order is dev->sem first, | ||
507 | * then dpm_list_mutex. Hence this awkward code. | ||
508 | */ | ||
509 | get_device(dev); | ||
510 | mutex_unlock(&dpm_list_mtx); | ||
511 | down(&dev->sem); | ||
512 | mutex_lock(&dpm_list_mtx); | ||
513 | |||
514 | if (list_empty(entry)) | ||
515 | up(&dev->sem); /* Device was removed */ | ||
516 | else | ||
517 | list_move_tail(entry, &dpm_locked); | ||
518 | put_device(dev); | ||
519 | } | ||
520 | mutex_unlock(&dpm_list_mtx); | ||
521 | } | ||
522 | |||
523 | /** | ||
524 | * device_suspend - Save state and stop all devices in system. | 451 | * device_suspend - Save state and stop all devices in system. |
525 | * @state: new power management state | 452 | * @state: new power management state |
526 | * | 453 | * |
@@ -533,7 +460,6 @@ int device_suspend(pm_message_t state) | |||
533 | 460 | ||
534 | might_sleep(); | 461 | might_sleep(); |
535 | down_write(&pm_sleep_rwsem); | 462 | down_write(&pm_sleep_rwsem); |
536 | lock_all_devices(); | ||
537 | error = dpm_suspend(state); | 463 | error = dpm_suspend(state); |
538 | if (error) | 464 | if (error) |
539 | device_resume(); | 465 | device_resume(); |
diff --git a/drivers/base/sys.c b/drivers/base/sys.c index 2f79c55acdcc..8e13fd942163 100644 --- a/drivers/base/sys.c +++ b/drivers/base/sys.c | |||
@@ -133,6 +133,7 @@ int sysdev_class_register(struct sysdev_class * cls) | |||
133 | pr_debug("Registering sysdev class '%s'\n", | 133 | pr_debug("Registering sysdev class '%s'\n", |
134 | kobject_name(&cls->kset.kobj)); | 134 | kobject_name(&cls->kset.kobj)); |
135 | INIT_LIST_HEAD(&cls->drivers); | 135 | INIT_LIST_HEAD(&cls->drivers); |
136 | memset(&cls->kset.kobj, 0x00, sizeof(struct kobject)); | ||
136 | cls->kset.kobj.parent = &system_kset->kobj; | 137 | cls->kset.kobj.parent = &system_kset->kobj; |
137 | cls->kset.kobj.ktype = &ktype_sysdev_class; | 138 | cls->kset.kobj.ktype = &ktype_sysdev_class; |
138 | cls->kset.kobj.kset = system_kset; | 139 | cls->kset.kobj.kset = system_kset; |
@@ -227,6 +228,9 @@ int sysdev_register(struct sys_device * sysdev) | |||
227 | 228 | ||
228 | pr_debug("Registering sys device '%s'\n", kobject_name(&sysdev->kobj)); | 229 | pr_debug("Registering sys device '%s'\n", kobject_name(&sysdev->kobj)); |
229 | 230 | ||
231 | /* initialize the kobject to 0, in case it had previously been used */ | ||
232 | memset(&sysdev->kobj, 0x00, sizeof(struct kobject)); | ||
233 | |||
230 | /* Make sure the kset is set */ | 234 | /* Make sure the kset is set */ |
231 | sysdev->kobj.kset = &cls->kset; | 235 | sysdev->kobj.kset = &cls->kset; |
232 | 236 | ||
diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c index f25e7c6b2d27..40bca48abc12 100644 --- a/drivers/base/transport_class.c +++ b/drivers/base/transport_class.c | |||
@@ -126,9 +126,7 @@ static int transport_setup_classdev(struct attribute_container *cont, | |||
126 | } | 126 | } |
127 | 127 | ||
128 | /** | 128 | /** |
129 | * transport_setup_device - declare a new dev for transport class association | 129 | * transport_setup_device - declare a new dev for transport class association but don't make it visible yet. |
130 | * but don't make it visible yet. | ||
131 | * | ||
132 | * @dev: the generic device representing the entity being added | 130 | * @dev: the generic device representing the entity being added |
133 | * | 131 | * |
134 | * Usually, dev represents some component in the HBA system (either | 132 | * Usually, dev represents some component in the HBA system (either |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 9715be3f2487..55bd35c0f082 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/blkpg.h> | 33 | #include <linux/blkpg.h> |
34 | #include <linux/timer.h> | 34 | #include <linux/timer.h> |
35 | #include <linux/proc_fs.h> | 35 | #include <linux/proc_fs.h> |
36 | #include <linux/seq_file.h> | ||
36 | #include <linux/init.h> | 37 | #include <linux/init.h> |
37 | #include <linux/hdreg.h> | 38 | #include <linux/hdreg.h> |
38 | #include <linux/spinlock.h> | 39 | #include <linux/spinlock.h> |
@@ -131,7 +132,6 @@ static struct board_type products[] = { | |||
131 | /*define how many times we will try a command because of bus resets */ | 132 | /*define how many times we will try a command because of bus resets */ |
132 | #define MAX_CMD_RETRIES 3 | 133 | #define MAX_CMD_RETRIES 3 |
133 | 134 | ||
134 | #define READ_AHEAD 1024 | ||
135 | #define MAX_CTLR 32 | 135 | #define MAX_CTLR 32 |
136 | 136 | ||
137 | /* Originally cciss driver only supports 8 major numbers */ | 137 | /* Originally cciss driver only supports 8 major numbers */ |
@@ -174,8 +174,6 @@ static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size, | |||
174 | static void fail_all_cmds(unsigned long ctlr); | 174 | static void fail_all_cmds(unsigned long ctlr); |
175 | 175 | ||
176 | #ifdef CONFIG_PROC_FS | 176 | #ifdef CONFIG_PROC_FS |
177 | static int cciss_proc_get_info(char *buffer, char **start, off_t offset, | ||
178 | int length, int *eof, void *data); | ||
179 | static void cciss_procinit(int i); | 177 | static void cciss_procinit(int i); |
180 | #else | 178 | #else |
181 | static void cciss_procinit(int i) | 179 | static void cciss_procinit(int i) |
@@ -240,24 +238,46 @@ static inline CommandList_struct *removeQ(CommandList_struct **Qptr, | |||
240 | */ | 238 | */ |
241 | #define ENG_GIG 1000000000 | 239 | #define ENG_GIG 1000000000 |
242 | #define ENG_GIG_FACTOR (ENG_GIG/512) | 240 | #define ENG_GIG_FACTOR (ENG_GIG/512) |
241 | #define ENGAGE_SCSI "engage scsi" | ||
243 | static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", | 242 | static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", |
244 | "UNKNOWN" | 243 | "UNKNOWN" |
245 | }; | 244 | }; |
246 | 245 | ||
247 | static struct proc_dir_entry *proc_cciss; | 246 | static struct proc_dir_entry *proc_cciss; |
248 | 247 | ||
249 | static int cciss_proc_get_info(char *buffer, char **start, off_t offset, | 248 | static void cciss_seq_show_header(struct seq_file *seq) |
250 | int length, int *eof, void *data) | ||
251 | { | 249 | { |
252 | off_t pos = 0; | 250 | ctlr_info_t *h = seq->private; |
253 | off_t len = 0; | 251 | |
254 | int size, i, ctlr; | 252 | seq_printf(seq, "%s: HP %s Controller\n" |
255 | ctlr_info_t *h = (ctlr_info_t *) data; | 253 | "Board ID: 0x%08lx\n" |
256 | drive_info_struct *drv; | 254 | "Firmware Version: %c%c%c%c\n" |
257 | unsigned long flags; | 255 | "IRQ: %d\n" |
258 | sector_t vol_sz, vol_sz_frac; | 256 | "Logical drives: %d\n" |
257 | "Current Q depth: %d\n" | ||
258 | "Current # commands on controller: %d\n" | ||
259 | "Max Q depth since init: %d\n" | ||
260 | "Max # commands on controller since init: %d\n" | ||
261 | "Max SG entries since init: %d\n", | ||
262 | h->devname, | ||
263 | h->product_name, | ||
264 | (unsigned long)h->board_id, | ||
265 | h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], | ||
266 | h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT], | ||
267 | h->num_luns, | ||
268 | h->Qdepth, h->commands_outstanding, | ||
269 | h->maxQsinceinit, h->max_outstanding, h->maxSG); | ||
259 | 270 | ||
260 | ctlr = h->ctlr; | 271 | #ifdef CONFIG_CISS_SCSI_TAPE |
272 | cciss_seq_tape_report(seq, h->ctlr); | ||
273 | #endif /* CONFIG_CISS_SCSI_TAPE */ | ||
274 | } | ||
275 | |||
276 | static void *cciss_seq_start(struct seq_file *seq, loff_t *pos) | ||
277 | { | ||
278 | ctlr_info_t *h = seq->private; | ||
279 | unsigned ctlr = h->ctlr; | ||
280 | unsigned long flags; | ||
261 | 281 | ||
262 | /* prevent displaying bogus info during configuration | 282 | /* prevent displaying bogus info during configuration |
263 | * or deconfiguration of a logical volume | 283 | * or deconfiguration of a logical volume |
@@ -265,115 +285,155 @@ static int cciss_proc_get_info(char *buffer, char **start, off_t offset, | |||
265 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); | 285 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); |
266 | if (h->busy_configuring) { | 286 | if (h->busy_configuring) { |
267 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); | 287 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); |
268 | return -EBUSY; | 288 | return ERR_PTR(-EBUSY); |
269 | } | 289 | } |
270 | h->busy_configuring = 1; | 290 | h->busy_configuring = 1; |
271 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); | 291 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); |
272 | 292 | ||
273 | size = sprintf(buffer, "%s: HP %s Controller\n" | 293 | if (*pos == 0) |
274 | "Board ID: 0x%08lx\n" | 294 | cciss_seq_show_header(seq); |
275 | "Firmware Version: %c%c%c%c\n" | ||
276 | "IRQ: %d\n" | ||
277 | "Logical drives: %d\n" | ||
278 | "Max sectors: %d\n" | ||
279 | "Current Q depth: %d\n" | ||
280 | "Current # commands on controller: %d\n" | ||
281 | "Max Q depth since init: %d\n" | ||
282 | "Max # commands on controller since init: %d\n" | ||
283 | "Max SG entries since init: %d\n\n", | ||
284 | h->devname, | ||
285 | h->product_name, | ||
286 | (unsigned long)h->board_id, | ||
287 | h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], | ||
288 | h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT], | ||
289 | h->num_luns, | ||
290 | h->cciss_max_sectors, | ||
291 | h->Qdepth, h->commands_outstanding, | ||
292 | h->maxQsinceinit, h->max_outstanding, h->maxSG); | ||
293 | |||
294 | pos += size; | ||
295 | len += size; | ||
296 | cciss_proc_tape_report(ctlr, buffer, &pos, &len); | ||
297 | for (i = 0; i <= h->highest_lun; i++) { | ||
298 | |||
299 | drv = &h->drv[i]; | ||
300 | if (drv->heads == 0) | ||
301 | continue; | ||
302 | 295 | ||
303 | vol_sz = drv->nr_blocks; | 296 | return pos; |
304 | vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR); | 297 | } |
305 | vol_sz_frac *= 100; | 298 | |
306 | sector_div(vol_sz_frac, ENG_GIG_FACTOR); | 299 | static int cciss_seq_show(struct seq_file *seq, void *v) |
300 | { | ||
301 | sector_t vol_sz, vol_sz_frac; | ||
302 | ctlr_info_t *h = seq->private; | ||
303 | unsigned ctlr = h->ctlr; | ||
304 | loff_t *pos = v; | ||
305 | drive_info_struct *drv = &h->drv[*pos]; | ||
306 | |||
307 | if (*pos > h->highest_lun) | ||
308 | return 0; | ||
309 | |||
310 | if (drv->heads == 0) | ||
311 | return 0; | ||
312 | |||
313 | vol_sz = drv->nr_blocks; | ||
314 | vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR); | ||
315 | vol_sz_frac *= 100; | ||
316 | sector_div(vol_sz_frac, ENG_GIG_FACTOR); | ||
317 | |||
318 | if (drv->raid_level > 5) | ||
319 | drv->raid_level = RAID_UNKNOWN; | ||
320 | seq_printf(seq, "cciss/c%dd%d:" | ||
321 | "\t%4u.%02uGB\tRAID %s\n", | ||
322 | ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac, | ||
323 | raid_label[drv->raid_level]); | ||
324 | return 0; | ||
325 | } | ||
326 | |||
327 | static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
328 | { | ||
329 | ctlr_info_t *h = seq->private; | ||
330 | |||
331 | if (*pos > h->highest_lun) | ||
332 | return NULL; | ||
333 | *pos += 1; | ||
334 | |||
335 | return pos; | ||
336 | } | ||
337 | |||
338 | static void cciss_seq_stop(struct seq_file *seq, void *v) | ||
339 | { | ||
340 | ctlr_info_t *h = seq->private; | ||
341 | |||
342 | /* Only reset h->busy_configuring if we succeeded in setting | ||
343 | * it during cciss_seq_start. */ | ||
344 | if (v == ERR_PTR(-EBUSY)) | ||
345 | return; | ||
307 | 346 | ||
308 | if (drv->raid_level > 5) | ||
309 | drv->raid_level = RAID_UNKNOWN; | ||
310 | size = sprintf(buffer + len, "cciss/c%dd%d:" | ||
311 | "\t%4u.%02uGB\tRAID %s\n", | ||
312 | ctlr, i, (int)vol_sz, (int)vol_sz_frac, | ||
313 | raid_label[drv->raid_level]); | ||
314 | pos += size; | ||
315 | len += size; | ||
316 | } | ||
317 | |||
318 | *eof = 1; | ||
319 | *start = buffer + offset; | ||
320 | len -= offset; | ||
321 | if (len > length) | ||
322 | len = length; | ||
323 | h->busy_configuring = 0; | 347 | h->busy_configuring = 0; |
324 | return len; | ||
325 | } | 348 | } |
326 | 349 | ||
327 | static int | 350 | static struct seq_operations cciss_seq_ops = { |
328 | cciss_proc_write(struct file *file, const char __user *buffer, | 351 | .start = cciss_seq_start, |
329 | unsigned long count, void *data) | 352 | .show = cciss_seq_show, |
353 | .next = cciss_seq_next, | ||
354 | .stop = cciss_seq_stop, | ||
355 | }; | ||
356 | |||
357 | static int cciss_seq_open(struct inode *inode, struct file *file) | ||
330 | { | 358 | { |
331 | unsigned char cmd[80]; | 359 | int ret = seq_open(file, &cciss_seq_ops); |
332 | int len; | 360 | struct seq_file *seq = file->private_data; |
333 | #ifdef CONFIG_CISS_SCSI_TAPE | 361 | |
334 | ctlr_info_t *h = (ctlr_info_t *) data; | 362 | if (!ret) |
335 | int rc; | 363 | seq->private = PDE(inode)->data; |
364 | |||
365 | return ret; | ||
366 | } | ||
367 | |||
368 | static ssize_t | ||
369 | cciss_proc_write(struct file *file, const char __user *buf, | ||
370 | size_t length, loff_t *ppos) | ||
371 | { | ||
372 | int err; | ||
373 | char *buffer; | ||
374 | |||
375 | #ifndef CONFIG_CISS_SCSI_TAPE | ||
376 | return -EINVAL; | ||
336 | #endif | 377 | #endif |
337 | 378 | ||
338 | if (count > sizeof(cmd) - 1) | 379 | if (!buf || length > PAGE_SIZE - 1) |
339 | return -EINVAL; | 380 | return -EINVAL; |
340 | if (copy_from_user(cmd, buffer, count)) | 381 | |
341 | return -EFAULT; | 382 | buffer = (char *)__get_free_page(GFP_KERNEL); |
342 | cmd[count] = '\0'; | 383 | if (!buffer) |
343 | len = strlen(cmd); // above 3 lines ensure safety | 384 | return -ENOMEM; |
344 | if (len && cmd[len - 1] == '\n') | 385 | |
345 | cmd[--len] = '\0'; | 386 | err = -EFAULT; |
346 | # ifdef CONFIG_CISS_SCSI_TAPE | 387 | if (copy_from_user(buffer, buf, length)) |
347 | if (strcmp("engage scsi", cmd) == 0) { | 388 | goto out; |
389 | buffer[length] = '\0'; | ||
390 | |||
391 | #ifdef CONFIG_CISS_SCSI_TAPE | ||
392 | if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) { | ||
393 | struct seq_file *seq = file->private_data; | ||
394 | ctlr_info_t *h = seq->private; | ||
395 | int rc; | ||
396 | |||
348 | rc = cciss_engage_scsi(h->ctlr); | 397 | rc = cciss_engage_scsi(h->ctlr); |
349 | if (rc != 0) | 398 | if (rc != 0) |
350 | return -rc; | 399 | err = -rc; |
351 | return count; | 400 | else |
352 | } | 401 | err = length; |
402 | } else | ||
403 | #endif /* CONFIG_CISS_SCSI_TAPE */ | ||
404 | err = -EINVAL; | ||
353 | /* might be nice to have "disengage" too, but it's not | 405 | /* might be nice to have "disengage" too, but it's not |
354 | safely possible. (only 1 module use count, lock issues.) */ | 406 | safely possible. (only 1 module use count, lock issues.) */ |
355 | # endif | 407 | |
356 | return -EINVAL; | 408 | out: |
409 | free_page((unsigned long)buffer); | ||
410 | return err; | ||
357 | } | 411 | } |
358 | 412 | ||
359 | /* | 413 | static struct file_operations cciss_proc_fops = { |
360 | * Get us a file in /proc/cciss that says something about each controller. | 414 | .owner = THIS_MODULE, |
361 | * Create /proc/cciss if it doesn't exist yet. | 415 | .open = cciss_seq_open, |
362 | */ | 416 | .read = seq_read, |
417 | .llseek = seq_lseek, | ||
418 | .release = seq_release, | ||
419 | .write = cciss_proc_write, | ||
420 | }; | ||
421 | |||
363 | static void __devinit cciss_procinit(int i) | 422 | static void __devinit cciss_procinit(int i) |
364 | { | 423 | { |
365 | struct proc_dir_entry *pde; | 424 | struct proc_dir_entry *pde; |
366 | 425 | ||
367 | if (proc_cciss == NULL) { | 426 | if (proc_cciss == NULL) |
368 | proc_cciss = proc_mkdir("cciss", proc_root_driver); | 427 | proc_cciss = proc_mkdir("cciss", proc_root_driver); |
369 | if (!proc_cciss) | 428 | if (!proc_cciss) |
370 | return; | 429 | return; |
371 | } | 430 | pde = proc_create(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP | |
431 | S_IROTH, proc_cciss, | ||
432 | &cciss_proc_fops); | ||
433 | if (!pde) | ||
434 | return; | ||
372 | 435 | ||
373 | pde = create_proc_read_entry(hba[i]->devname, | 436 | pde->data = hba[i]; |
374 | S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH, | ||
375 | proc_cciss, cciss_proc_get_info, hba[i]); | ||
376 | pde->write_proc = cciss_proc_write; | ||
377 | } | 437 | } |
378 | #endif /* CONFIG_PROC_FS */ | 438 | #endif /* CONFIG_PROC_FS */ |
379 | 439 | ||
@@ -1341,7 +1401,6 @@ geo_inq: | |||
1341 | disk->private_data = &h->drv[drv_index]; | 1401 | disk->private_data = &h->drv[drv_index]; |
1342 | 1402 | ||
1343 | /* Set up queue information */ | 1403 | /* Set up queue information */ |
1344 | disk->queue->backing_dev_info.ra_pages = READ_AHEAD; | ||
1345 | blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask); | 1404 | blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask); |
1346 | 1405 | ||
1347 | /* This is a hardware imposed limit. */ | 1406 | /* This is a hardware imposed limit. */ |
@@ -3434,7 +3493,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
3434 | } | 3493 | } |
3435 | drv->queue = q; | 3494 | drv->queue = q; |
3436 | 3495 | ||
3437 | q->backing_dev_info.ra_pages = READ_AHEAD; | ||
3438 | blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask); | 3496 | blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask); |
3439 | 3497 | ||
3440 | /* This is a hardware imposed limit. */ | 3498 | /* This is a hardware imposed limit. */ |
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index 55178e9973a0..45ac09300eb3 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c | |||
@@ -1404,21 +1404,18 @@ cciss_engage_scsi(int ctlr) | |||
1404 | } | 1404 | } |
1405 | 1405 | ||
1406 | static void | 1406 | static void |
1407 | cciss_proc_tape_report(int ctlr, unsigned char *buffer, off_t *pos, off_t *len) | 1407 | cciss_seq_tape_report(struct seq_file *seq, int ctlr) |
1408 | { | 1408 | { |
1409 | unsigned long flags; | 1409 | unsigned long flags; |
1410 | int size; | ||
1411 | |||
1412 | *pos = *pos -1; *len = *len - 1; // cut off the last trailing newline | ||
1413 | 1410 | ||
1414 | CPQ_TAPE_LOCK(ctlr, flags); | 1411 | CPQ_TAPE_LOCK(ctlr, flags); |
1415 | size = sprintf(buffer + *len, | 1412 | seq_printf(seq, |
1416 | "Sequential access devices: %d\n\n", | 1413 | "Sequential access devices: %d\n\n", |
1417 | ccissscsi[ctlr].ndevices); | 1414 | ccissscsi[ctlr].ndevices); |
1418 | CPQ_TAPE_UNLOCK(ctlr, flags); | 1415 | CPQ_TAPE_UNLOCK(ctlr, flags); |
1419 | *pos += size; *len += size; | ||
1420 | } | 1416 | } |
1421 | 1417 | ||
1418 | |||
1422 | /* Need at least one of these error handlers to keep ../scsi/hosts.c from | 1419 | /* Need at least one of these error handlers to keep ../scsi/hosts.c from |
1423 | * complaining. Doing a host- or bus-reset can't do anything good here. | 1420 | * complaining. Doing a host- or bus-reset can't do anything good here. |
1424 | * Despite what it might say in scsi_error.c, there may well be commands | 1421 | * Despite what it might say in scsi_error.c, there may well be commands |
@@ -1498,6 +1495,5 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd) | |||
1498 | #define cciss_scsi_setup(cntl_num) | 1495 | #define cciss_scsi_setup(cntl_num) |
1499 | #define cciss_unregister_scsi(ctlr) | 1496 | #define cciss_unregister_scsi(ctlr) |
1500 | #define cciss_register_scsi(ctlr) | 1497 | #define cciss_register_scsi(ctlr) |
1501 | #define cciss_proc_tape_report(ctlr, buffer, pos, len) | ||
1502 | 1498 | ||
1503 | #endif /* CONFIG_CISS_SCSI_TAPE */ | 1499 | #endif /* CONFIG_CISS_SCSI_TAPE */ |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 32c79a55511b..7652e87d60c5 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -217,7 +217,6 @@ static int use_virtual_dma; | |||
217 | */ | 217 | */ |
218 | 218 | ||
219 | static DEFINE_SPINLOCK(floppy_lock); | 219 | static DEFINE_SPINLOCK(floppy_lock); |
220 | static struct completion device_release; | ||
221 | 220 | ||
222 | static unsigned short virtual_dma_port = 0x3f0; | 221 | static unsigned short virtual_dma_port = 0x3f0; |
223 | irqreturn_t floppy_interrupt(int irq, void *dev_id); | 222 | irqreturn_t floppy_interrupt(int irq, void *dev_id); |
@@ -4144,7 +4143,6 @@ DEVICE_ATTR(cmos,S_IRUGO,floppy_cmos_show,NULL); | |||
4144 | 4143 | ||
4145 | static void floppy_device_release(struct device *dev) | 4144 | static void floppy_device_release(struct device *dev) |
4146 | { | 4145 | { |
4147 | complete(&device_release); | ||
4148 | } | 4146 | } |
4149 | 4147 | ||
4150 | static struct platform_device floppy_device[N_DRIVE]; | 4148 | static struct platform_device floppy_device[N_DRIVE]; |
@@ -4539,7 +4537,6 @@ void cleanup_module(void) | |||
4539 | { | 4537 | { |
4540 | int drive; | 4538 | int drive; |
4541 | 4539 | ||
4542 | init_completion(&device_release); | ||
4543 | blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); | 4540 | blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); |
4544 | unregister_blkdev(FLOPPY_MAJOR, "fd"); | 4541 | unregister_blkdev(FLOPPY_MAJOR, "fd"); |
4545 | 4542 | ||
@@ -4564,8 +4561,6 @@ void cleanup_module(void) | |||
4564 | 4561 | ||
4565 | /* eject disk, if any */ | 4562 | /* eject disk, if any */ |
4566 | fd_eject(0); | 4563 | fd_eject(0); |
4567 | |||
4568 | wait_for_completion(&device_release); | ||
4569 | } | 4564 | } |
4570 | 4565 | ||
4571 | module_param(floppy, charp, 0); | 4566 | module_param(floppy, charp, 0); |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 674cd66dcaba..18feb1c7c33b 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -849,7 +849,8 @@ static int pkt_flush_cache(struct pktcdvd_device *pd) | |||
849 | /* | 849 | /* |
850 | * speed is given as the normal factor, e.g. 4 for 4x | 850 | * speed is given as the normal factor, e.g. 4 for 4x |
851 | */ | 851 | */ |
852 | static int pkt_set_speed(struct pktcdvd_device *pd, unsigned write_speed, unsigned read_speed) | 852 | static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd, |
853 | unsigned write_speed, unsigned read_speed) | ||
853 | { | 854 | { |
854 | struct packet_command cgc; | 855 | struct packet_command cgc; |
855 | struct request_sense sense; | 856 | struct request_sense sense; |
@@ -1776,7 +1777,8 @@ static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, | |||
1776 | return pkt_generic_packet(pd, &cgc); | 1777 | return pkt_generic_packet(pd, &cgc); |
1777 | } | 1778 | } |
1778 | 1779 | ||
1779 | static int pkt_get_last_written(struct pktcdvd_device *pd, long *last_written) | 1780 | static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd, |
1781 | long *last_written) | ||
1780 | { | 1782 | { |
1781 | disc_information di; | 1783 | disc_information di; |
1782 | track_information ti; | 1784 | track_information ti; |
@@ -1813,7 +1815,7 @@ static int pkt_get_last_written(struct pktcdvd_device *pd, long *last_written) | |||
1813 | /* | 1815 | /* |
1814 | * write mode select package based on pd->settings | 1816 | * write mode select package based on pd->settings |
1815 | */ | 1817 | */ |
1816 | static int pkt_set_write_settings(struct pktcdvd_device *pd) | 1818 | static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) |
1817 | { | 1819 | { |
1818 | struct packet_command cgc; | 1820 | struct packet_command cgc; |
1819 | struct request_sense sense; | 1821 | struct request_sense sense; |
@@ -1972,7 +1974,7 @@ static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di) | |||
1972 | return 1; | 1974 | return 1; |
1973 | } | 1975 | } |
1974 | 1976 | ||
1975 | static int pkt_probe_settings(struct pktcdvd_device *pd) | 1977 | static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) |
1976 | { | 1978 | { |
1977 | struct packet_command cgc; | 1979 | struct packet_command cgc; |
1978 | unsigned char buf[12]; | 1980 | unsigned char buf[12]; |
@@ -2071,7 +2073,8 @@ static int pkt_probe_settings(struct pktcdvd_device *pd) | |||
2071 | /* | 2073 | /* |
2072 | * enable/disable write caching on drive | 2074 | * enable/disable write caching on drive |
2073 | */ | 2075 | */ |
2074 | static int pkt_write_caching(struct pktcdvd_device *pd, int set) | 2076 | static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd, |
2077 | int set) | ||
2075 | { | 2078 | { |
2076 | struct packet_command cgc; | 2079 | struct packet_command cgc; |
2077 | struct request_sense sense; | 2080 | struct request_sense sense; |
@@ -2116,7 +2119,8 @@ static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag) | |||
2116 | /* | 2119 | /* |
2117 | * Returns drive maximum write speed | 2120 | * Returns drive maximum write speed |
2118 | */ | 2121 | */ |
2119 | static int pkt_get_max_speed(struct pktcdvd_device *pd, unsigned *write_speed) | 2122 | static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd, |
2123 | unsigned *write_speed) | ||
2120 | { | 2124 | { |
2121 | struct packet_command cgc; | 2125 | struct packet_command cgc; |
2122 | struct request_sense sense; | 2126 | struct request_sense sense; |
@@ -2177,7 +2181,8 @@ static char us_clv_to_speed[16] = { | |||
2177 | /* | 2181 | /* |
2178 | * reads the maximum media speed from ATIP | 2182 | * reads the maximum media speed from ATIP |
2179 | */ | 2183 | */ |
2180 | static int pkt_media_speed(struct pktcdvd_device *pd, unsigned *speed) | 2184 | static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd, |
2185 | unsigned *speed) | ||
2181 | { | 2186 | { |
2182 | struct packet_command cgc; | 2187 | struct packet_command cgc; |
2183 | struct request_sense sense; | 2188 | struct request_sense sense; |
@@ -2249,7 +2254,7 @@ static int pkt_media_speed(struct pktcdvd_device *pd, unsigned *speed) | |||
2249 | } | 2254 | } |
2250 | } | 2255 | } |
2251 | 2256 | ||
2252 | static int pkt_perform_opc(struct pktcdvd_device *pd) | 2257 | static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd) |
2253 | { | 2258 | { |
2254 | struct packet_command cgc; | 2259 | struct packet_command cgc; |
2255 | struct request_sense sense; | 2260 | struct request_sense sense; |
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c index 9e61fca46117..41ca721d2523 100644 --- a/drivers/block/viodasd.c +++ b/drivers/block/viodasd.c | |||
@@ -528,8 +528,7 @@ static int block_event_to_scatterlist(const struct vioblocklpevent *bevent, | |||
528 | numsg = VIOMAXBLOCKDMA; | 528 | numsg = VIOMAXBLOCKDMA; |
529 | 529 | ||
530 | *total_len = 0; | 530 | *total_len = 0; |
531 | memset(sg, 0, sizeof(sg[0]) * VIOMAXBLOCKDMA); | 531 | sg_init_table(sg, VIOMAXBLOCKDMA); |
532 | |||
533 | for (i = 0; (i < numsg) && (rw_data->dma_info[i].len > 0); ++i) { | 532 | for (i = 0; (i < numsg) && (rw_data->dma_info[i].len > 0); ++i) { |
534 | sg_dma_address(&sg[i]) = rw_data->dma_info[i].token; | 533 | sg_dma_address(&sg[i]) = rw_data->dma_info[i].token; |
535 | sg_dma_len(&sg[i]) = rw_data->dma_info[i].len; | 534 | sg_dma_len(&sg[i]) = rw_data->dma_info[i].len; |
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 3b1a68d6eddb..0cfbe8c594a5 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
@@ -238,6 +238,7 @@ static int virtblk_probe(struct virtio_device *vdev) | |||
238 | vblk->disk->first_minor = index_to_minor(index); | 238 | vblk->disk->first_minor = index_to_minor(index); |
239 | vblk->disk->private_data = vblk; | 239 | vblk->disk->private_data = vblk; |
240 | vblk->disk->fops = &virtblk_fops; | 240 | vblk->disk->fops = &virtblk_fops; |
241 | vblk->disk->driverfs_dev = &vdev->dev; | ||
241 | index++; | 242 | index++; |
242 | 243 | ||
243 | /* If barriers are supported, tell block layer that queue is ordered */ | 244 | /* If barriers are supported, tell block layer that queue is ordered */ |
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c index 4f8a744c90b7..8b884f87d8b7 100644 --- a/drivers/bluetooth/hci_usb.c +++ b/drivers/bluetooth/hci_usb.c | |||
@@ -116,6 +116,7 @@ static struct usb_device_id blacklist_ids[] = { | |||
116 | { USB_DEVICE(0x0a5c, 0x2009), .driver_info = HCI_BCM92035 }, | 116 | { USB_DEVICE(0x0a5c, 0x2009), .driver_info = HCI_BCM92035 }, |
117 | 117 | ||
118 | /* Broadcom BCM2045 */ | 118 | /* Broadcom BCM2045 */ |
119 | { USB_DEVICE(0x0a5c, 0x2039), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU }, | ||
119 | { USB_DEVICE(0x0a5c, 0x2101), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU }, | 120 | { USB_DEVICE(0x0a5c, 0x2101), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU }, |
120 | 121 | ||
121 | /* IBM/Lenovo ThinkPad with Broadcom chip */ | 122 | /* IBM/Lenovo ThinkPad with Broadcom chip */ |
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index db259e60289b..12f5baea439b 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c | |||
@@ -1152,8 +1152,8 @@ clean_up_and_return: | |||
1152 | /* This code is similar to that in open_for_data. The routine is called | 1152 | /* This code is similar to that in open_for_data. The routine is called |
1153 | whenever an audio play operation is requested. | 1153 | whenever an audio play operation is requested. |
1154 | */ | 1154 | */ |
1155 | int check_for_audio_disc(struct cdrom_device_info * cdi, | 1155 | static int check_for_audio_disc(struct cdrom_device_info * cdi, |
1156 | struct cdrom_device_ops * cdo) | 1156 | struct cdrom_device_ops * cdo) |
1157 | { | 1157 | { |
1158 | int ret; | 1158 | int ret; |
1159 | tracktype tracks; | 1159 | tracktype tracks; |
diff --git a/drivers/char/defkeymap.c_shipped b/drivers/char/defkeymap.c_shipped index 0aa419a61767..d2208dfe3f67 100644 --- a/drivers/char/defkeymap.c_shipped +++ b/drivers/char/defkeymap.c_shipped | |||
@@ -223,40 +223,40 @@ char *func_table[MAX_NR_FUNC] = { | |||
223 | }; | 223 | }; |
224 | 224 | ||
225 | struct kbdiacruc accent_table[MAX_DIACR] = { | 225 | struct kbdiacruc accent_table[MAX_DIACR] = { |
226 | {'`', 'A', '\300'}, {'`', 'a', '\340'}, | 226 | {'`', 'A', 0300}, {'`', 'a', 0340}, |
227 | {'\'', 'A', '\301'}, {'\'', 'a', '\341'}, | 227 | {'\'', 'A', 0301}, {'\'', 'a', 0341}, |
228 | {'^', 'A', '\302'}, {'^', 'a', '\342'}, | 228 | {'^', 'A', 0302}, {'^', 'a', 0342}, |
229 | {'~', 'A', '\303'}, {'~', 'a', '\343'}, | 229 | {'~', 'A', 0303}, {'~', 'a', 0343}, |
230 | {'"', 'A', '\304'}, {'"', 'a', '\344'}, | 230 | {'"', 'A', 0304}, {'"', 'a', 0344}, |
231 | {'O', 'A', '\305'}, {'o', 'a', '\345'}, | 231 | {'O', 'A', 0305}, {'o', 'a', 0345}, |
232 | {'0', 'A', '\305'}, {'0', 'a', '\345'}, | 232 | {'0', 'A', 0305}, {'0', 'a', 0345}, |
233 | {'A', 'A', '\305'}, {'a', 'a', '\345'}, | 233 | {'A', 'A', 0305}, {'a', 'a', 0345}, |
234 | {'A', 'E', '\306'}, {'a', 'e', '\346'}, | 234 | {'A', 'E', 0306}, {'a', 'e', 0346}, |
235 | {',', 'C', '\307'}, {',', 'c', '\347'}, | 235 | {',', 'C', 0307}, {',', 'c', 0347}, |
236 | {'`', 'E', '\310'}, {'`', 'e', '\350'}, | 236 | {'`', 'E', 0310}, {'`', 'e', 0350}, |
237 | {'\'', 'E', '\311'}, {'\'', 'e', '\351'}, | 237 | {'\'', 'E', 0311}, {'\'', 'e', 0351}, |
238 | {'^', 'E', '\312'}, {'^', 'e', '\352'}, | 238 | {'^', 'E', 0312}, {'^', 'e', 0352}, |
239 | {'"', 'E', '\313'}, {'"', 'e', '\353'}, | 239 | {'"', 'E', 0313}, {'"', 'e', 0353}, |
240 | {'`', 'I', '\314'}, {'`', 'i', '\354'}, | 240 | {'`', 'I', 0314}, {'`', 'i', 0354}, |
241 | {'\'', 'I', '\315'}, {'\'', 'i', '\355'}, | 241 | {'\'', 'I', 0315}, {'\'', 'i', 0355}, |
242 | {'^', 'I', '\316'}, {'^', 'i', '\356'}, | 242 | {'^', 'I', 0316}, {'^', 'i', 0356}, |
243 | {'"', 'I', '\317'}, {'"', 'i', '\357'}, | 243 | {'"', 'I', 0317}, {'"', 'i', 0357}, |
244 | {'-', 'D', '\320'}, {'-', 'd', '\360'}, | 244 | {'-', 'D', 0320}, {'-', 'd', 0360}, |
245 | {'~', 'N', '\321'}, {'~', 'n', '\361'}, | 245 | {'~', 'N', 0321}, {'~', 'n', 0361}, |
246 | {'`', 'O', '\322'}, {'`', 'o', '\362'}, | 246 | {'`', 'O', 0322}, {'`', 'o', 0362}, |
247 | {'\'', 'O', '\323'}, {'\'', 'o', '\363'}, | 247 | {'\'', 'O', 0323}, {'\'', 'o', 0363}, |
248 | {'^', 'O', '\324'}, {'^', 'o', '\364'}, | 248 | {'^', 'O', 0324}, {'^', 'o', 0364}, |
249 | {'~', 'O', '\325'}, {'~', 'o', '\365'}, | 249 | {'~', 'O', 0325}, {'~', 'o', 0365}, |
250 | {'"', 'O', '\326'}, {'"', 'o', '\366'}, | 250 | {'"', 'O', 0326}, {'"', 'o', 0366}, |
251 | {'/', 'O', '\330'}, {'/', 'o', '\370'}, | 251 | {'/', 'O', 0330}, {'/', 'o', 0370}, |
252 | {'`', 'U', '\331'}, {'`', 'u', '\371'}, | 252 | {'`', 'U', 0331}, {'`', 'u', 0371}, |
253 | {'\'', 'U', '\332'}, {'\'', 'u', '\372'}, | 253 | {'\'', 'U', 0332}, {'\'', 'u', 0372}, |
254 | {'^', 'U', '\333'}, {'^', 'u', '\373'}, | 254 | {'^', 'U', 0333}, {'^', 'u', 0373}, |
255 | {'"', 'U', '\334'}, {'"', 'u', '\374'}, | 255 | {'"', 'U', 0334}, {'"', 'u', 0374}, |
256 | {'\'', 'Y', '\335'}, {'\'', 'y', '\375'}, | 256 | {'\'', 'Y', 0335}, {'\'', 'y', 0375}, |
257 | {'T', 'H', '\336'}, {'t', 'h', '\376'}, | 257 | {'T', 'H', 0336}, {'t', 'h', 0376}, |
258 | {'s', 's', '\337'}, {'"', 'y', '\377'}, | 258 | {'s', 's', 0337}, {'"', 'y', 0377}, |
259 | {'s', 'z', '\337'}, {'i', 'j', '\377'}, | 259 | {'s', 'z', 0337}, {'i', 'j', 0377}, |
260 | }; | 260 | }; |
261 | 261 | ||
262 | unsigned int accent_table_size = 68; | 262 | unsigned int accent_table_size = 68; |
diff --git a/drivers/char/esp.c b/drivers/char/esp.c index c01e26d9ee5e..f3fe62067344 100644 --- a/drivers/char/esp.c +++ b/drivers/char/esp.c | |||
@@ -2484,6 +2484,7 @@ static int __init espserial_init(void) | |||
2484 | return 0; | 2484 | return 0; |
2485 | } | 2485 | } |
2486 | 2486 | ||
2487 | spin_lock_init(&info->lock); | ||
2487 | /* rx_trigger, tx_trigger are needed by autoconfig */ | 2488 | /* rx_trigger, tx_trigger are needed by autoconfig */ |
2488 | info->config.rx_trigger = rx_trigger; | 2489 | info->config.rx_trigger = rx_trigger; |
2489 | info->config.tx_trigger = tx_trigger; | 2490 | info->config.tx_trigger = tx_trigger; |
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c index 85d596a3c18c..eba2883b630e 100644 --- a/drivers/char/isicom.c +++ b/drivers/char/isicom.c | |||
@@ -1527,7 +1527,7 @@ static int __devinit reset_card(struct pci_dev *pdev, | |||
1527 | msleep(10); | 1527 | msleep(10); |
1528 | 1528 | ||
1529 | portcount = inw(base + 0x2); | 1529 | portcount = inw(base + 0x2); |
1530 | if (!inw(base + 0xe) & 0x1 || (portcount != 0 && portcount != 4 && | 1530 | if (!(inw(base + 0xe) & 0x1) || (portcount != 0 && portcount != 4 && |
1531 | portcount != 8 && portcount != 16)) { | 1531 | portcount != 8 && portcount != 16)) { |
1532 | dev_err(&pdev->dev, "ISILoad:PCI Card%d reset failure.\n", | 1532 | dev_err(&pdev->dev, "ISILoad:PCI Card%d reset failure.\n", |
1533 | card + 1); | 1533 | card + 1); |
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c index dfaab2322de3..6d0dc5f9b6bb 100644 --- a/drivers/char/nozomi.c +++ b/drivers/char/nozomi.c | |||
@@ -190,6 +190,14 @@ enum card_type { | |||
190 | F32_8 = 8192, /* 3072 bytes downl. + 1024 bytes uplink * 2 -> 8192 */ | 190 | F32_8 = 8192, /* 3072 bytes downl. + 1024 bytes uplink * 2 -> 8192 */ |
191 | }; | 191 | }; |
192 | 192 | ||
193 | /* Initialization states a card can be in */ | ||
194 | enum card_state { | ||
195 | NOZOMI_STATE_UKNOWN = 0, | ||
196 | NOZOMI_STATE_ENABLED = 1, /* pci device enabled */ | ||
197 | NOZOMI_STATE_ALLOCATED = 2, /* config setup done */ | ||
198 | NOZOMI_STATE_READY = 3, /* flowcontrols received */ | ||
199 | }; | ||
200 | |||
193 | /* Two different toggle channels exist */ | 201 | /* Two different toggle channels exist */ |
194 | enum channel_type { | 202 | enum channel_type { |
195 | CH_A = 0, | 203 | CH_A = 0, |
@@ -385,6 +393,7 @@ struct nozomi { | |||
385 | spinlock_t spin_mutex; /* secures access to registers and tty */ | 393 | spinlock_t spin_mutex; /* secures access to registers and tty */ |
386 | 394 | ||
387 | unsigned int index_start; | 395 | unsigned int index_start; |
396 | enum card_state state; | ||
388 | u32 open_ttys; | 397 | u32 open_ttys; |
389 | }; | 398 | }; |
390 | 399 | ||
@@ -686,6 +695,7 @@ static int nozomi_read_config_table(struct nozomi *dc) | |||
686 | dc->last_ier = dc->last_ier | CTRL_DL; | 695 | dc->last_ier = dc->last_ier | CTRL_DL; |
687 | writew(dc->last_ier, dc->reg_ier); | 696 | writew(dc->last_ier, dc->reg_ier); |
688 | 697 | ||
698 | dc->state = NOZOMI_STATE_ALLOCATED; | ||
689 | dev_info(&dc->pdev->dev, "Initialization OK!\n"); | 699 | dev_info(&dc->pdev->dev, "Initialization OK!\n"); |
690 | return 1; | 700 | return 1; |
691 | } | 701 | } |
@@ -944,6 +954,14 @@ static int receive_flow_control(struct nozomi *dc) | |||
944 | case CTRL_APP2: | 954 | case CTRL_APP2: |
945 | port = PORT_APP2; | 955 | port = PORT_APP2; |
946 | enable_ier = APP2_DL; | 956 | enable_ier = APP2_DL; |
957 | if (dc->state == NOZOMI_STATE_ALLOCATED) { | ||
958 | /* | ||
959 | * After card initialization the flow control | ||
960 | * received for APP2 is always the last | ||
961 | */ | ||
962 | dc->state = NOZOMI_STATE_READY; | ||
963 | dev_info(&dc->pdev->dev, "Device READY!\n"); | ||
964 | } | ||
947 | break; | 965 | break; |
948 | default: | 966 | default: |
949 | dev_err(&dc->pdev->dev, | 967 | dev_err(&dc->pdev->dev, |
@@ -1366,22 +1384,12 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev, | |||
1366 | 1384 | ||
1367 | dc->pdev = pdev; | 1385 | dc->pdev = pdev; |
1368 | 1386 | ||
1369 | /* Find out what card type it is */ | ||
1370 | nozomi_get_card_type(dc); | ||
1371 | |||
1372 | ret = pci_enable_device(dc->pdev); | 1387 | ret = pci_enable_device(dc->pdev); |
1373 | if (ret) { | 1388 | if (ret) { |
1374 | dev_err(&pdev->dev, "Failed to enable PCI Device\n"); | 1389 | dev_err(&pdev->dev, "Failed to enable PCI Device\n"); |
1375 | goto err_free; | 1390 | goto err_free; |
1376 | } | 1391 | } |
1377 | 1392 | ||
1378 | start = pci_resource_start(dc->pdev, 0); | ||
1379 | if (start == 0) { | ||
1380 | dev_err(&pdev->dev, "No I/O address for card detected\n"); | ||
1381 | ret = -ENODEV; | ||
1382 | goto err_disable_device; | ||
1383 | } | ||
1384 | |||
1385 | ret = pci_request_regions(dc->pdev, NOZOMI_NAME); | 1393 | ret = pci_request_regions(dc->pdev, NOZOMI_NAME); |
1386 | if (ret) { | 1394 | if (ret) { |
1387 | dev_err(&pdev->dev, "I/O address 0x%04x already in use\n", | 1395 | dev_err(&pdev->dev, "I/O address 0x%04x already in use\n", |
@@ -1389,6 +1397,16 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev, | |||
1389 | goto err_disable_device; | 1397 | goto err_disable_device; |
1390 | } | 1398 | } |
1391 | 1399 | ||
1400 | start = pci_resource_start(dc->pdev, 0); | ||
1401 | if (start == 0) { | ||
1402 | dev_err(&pdev->dev, "No I/O address for card detected\n"); | ||
1403 | ret = -ENODEV; | ||
1404 | goto err_rel_regs; | ||
1405 | } | ||
1406 | |||
1407 | /* Find out what card type it is */ | ||
1408 | nozomi_get_card_type(dc); | ||
1409 | |||
1392 | dc->base_addr = ioremap(start, dc->card_type); | 1410 | dc->base_addr = ioremap(start, dc->card_type); |
1393 | if (!dc->base_addr) { | 1411 | if (!dc->base_addr) { |
1394 | dev_err(&pdev->dev, "Unable to map card MMIO\n"); | 1412 | dev_err(&pdev->dev, "Unable to map card MMIO\n"); |
@@ -1425,6 +1443,14 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev, | |||
1425 | dc->index_start = ndev_idx * MAX_PORT; | 1443 | dc->index_start = ndev_idx * MAX_PORT; |
1426 | ndevs[ndev_idx] = dc; | 1444 | ndevs[ndev_idx] = dc; |
1427 | 1445 | ||
1446 | pci_set_drvdata(pdev, dc); | ||
1447 | |||
1448 | /* Enable RESET interrupt */ | ||
1449 | dc->last_ier = RESET; | ||
1450 | iowrite16(dc->last_ier, dc->reg_ier); | ||
1451 | |||
1452 | dc->state = NOZOMI_STATE_ENABLED; | ||
1453 | |||
1428 | for (i = 0; i < MAX_PORT; i++) { | 1454 | for (i = 0; i < MAX_PORT; i++) { |
1429 | mutex_init(&dc->port[i].tty_sem); | 1455 | mutex_init(&dc->port[i].tty_sem); |
1430 | dc->port[i].tty_open_count = 0; | 1456 | dc->port[i].tty_open_count = 0; |
@@ -1433,12 +1459,6 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev, | |||
1433 | &pdev->dev); | 1459 | &pdev->dev); |
1434 | } | 1460 | } |
1435 | 1461 | ||
1436 | /* Enable RESET interrupt. */ | ||
1437 | dc->last_ier = RESET; | ||
1438 | writew(dc->last_ier, dc->reg_ier); | ||
1439 | |||
1440 | pci_set_drvdata(pdev, dc); | ||
1441 | |||
1442 | return 0; | 1462 | return 0; |
1443 | 1463 | ||
1444 | err_free_sbuf: | 1464 | err_free_sbuf: |
@@ -1553,7 +1573,7 @@ static int ntty_open(struct tty_struct *tty, struct file *file) | |||
1553 | struct nozomi *dc = get_dc_by_tty(tty); | 1573 | struct nozomi *dc = get_dc_by_tty(tty); |
1554 | unsigned long flags; | 1574 | unsigned long flags; |
1555 | 1575 | ||
1556 | if (!port || !dc) | 1576 | if (!port || !dc || dc->state != NOZOMI_STATE_READY) |
1557 | return -ENODEV; | 1577 | return -ENODEV; |
1558 | 1578 | ||
1559 | if (mutex_lock_interruptible(&port->tty_sem)) | 1579 | if (mutex_lock_interruptible(&port->tty_sem)) |
@@ -1716,6 +1736,10 @@ static int ntty_tiocmget(struct tty_struct *tty, struct file *file) | |||
1716 | static int ntty_tiocmset(struct tty_struct *tty, struct file *file, | 1736 | static int ntty_tiocmset(struct tty_struct *tty, struct file *file, |
1717 | unsigned int set, unsigned int clear) | 1737 | unsigned int set, unsigned int clear) |
1718 | { | 1738 | { |
1739 | struct nozomi *dc = get_dc_by_tty(tty); | ||
1740 | unsigned long flags; | ||
1741 | |||
1742 | spin_lock_irqsave(&dc->spin_mutex, flags); | ||
1719 | if (set & TIOCM_RTS) | 1743 | if (set & TIOCM_RTS) |
1720 | set_rts(tty, 1); | 1744 | set_rts(tty, 1); |
1721 | else if (clear & TIOCM_RTS) | 1745 | else if (clear & TIOCM_RTS) |
@@ -1725,6 +1749,7 @@ static int ntty_tiocmset(struct tty_struct *tty, struct file *file, | |||
1725 | set_dtr(tty, 1); | 1749 | set_dtr(tty, 1); |
1726 | else if (clear & TIOCM_DTR) | 1750 | else if (clear & TIOCM_DTR) |
1727 | set_dtr(tty, 0); | 1751 | set_dtr(tty, 0); |
1752 | spin_unlock_irqrestore(&dc->spin_mutex, flags); | ||
1728 | 1753 | ||
1729 | return 0; | 1754 | return 0; |
1730 | } | 1755 | } |
@@ -1762,7 +1787,7 @@ static int ntty_ioctl_tiocgicount(struct port *port, void __user *argp) | |||
1762 | icount.brk = cnow.brk; | 1787 | icount.brk = cnow.brk; |
1763 | icount.buf_overrun = cnow.buf_overrun; | 1788 | icount.buf_overrun = cnow.buf_overrun; |
1764 | 1789 | ||
1765 | return copy_to_user(argp, &icount, sizeof(icount)); | 1790 | return copy_to_user(argp, &icount, sizeof(icount)) ? -EFAULT : 0; |
1766 | } | 1791 | } |
1767 | 1792 | ||
1768 | static int ntty_ioctl(struct tty_struct *tty, struct file *file, | 1793 | static int ntty_ioctl(struct tty_struct *tty, struct file *file, |
diff --git a/drivers/char/pcmcia/ipwireless/network.c b/drivers/char/pcmcia/ipwireless/network.c index ff35230058d3..d793e68b3e0d 100644 --- a/drivers/char/pcmcia/ipwireless/network.c +++ b/drivers/char/pcmcia/ipwireless/network.c | |||
@@ -377,13 +377,16 @@ void ipwireless_network_packet_received(struct ipw_network *network, | |||
377 | for (i = 0; i < MAX_ASSOCIATED_TTYS; i++) { | 377 | for (i = 0; i < MAX_ASSOCIATED_TTYS; i++) { |
378 | struct ipw_tty *tty = network->associated_ttys[channel_idx][i]; | 378 | struct ipw_tty *tty = network->associated_ttys[channel_idx][i]; |
379 | 379 | ||
380 | if (!tty) | ||
381 | continue; | ||
382 | |||
380 | /* | 383 | /* |
381 | * If it's associated with a tty (other than the RAS channel | 384 | * If it's associated with a tty (other than the RAS channel |
382 | * when we're online), then send the data to that tty. The RAS | 385 | * when we're online), then send the data to that tty. The RAS |
383 | * channel's data is handled above - it always goes through | 386 | * channel's data is handled above - it always goes through |
384 | * ppp_generic. | 387 | * ppp_generic. |
385 | */ | 388 | */ |
386 | if (tty && channel_idx == IPW_CHANNEL_RAS | 389 | if (channel_idx == IPW_CHANNEL_RAS |
387 | && (network->ras_control_lines & | 390 | && (network->ras_control_lines & |
388 | IPW_CONTROL_LINE_DCD) != 0 | 391 | IPW_CONTROL_LINE_DCD) != 0 |
389 | && ipwireless_tty_is_modem(tty)) { | 392 | && ipwireless_tty_is_modem(tty)) { |
diff --git a/drivers/char/riscom8.c b/drivers/char/riscom8.c index 8fc4fe4e38f1..3f9d0a9ac36d 100644 --- a/drivers/char/riscom8.c +++ b/drivers/char/riscom8.c | |||
@@ -1620,14 +1620,8 @@ static int __init rc_init_drivers(void) | |||
1620 | 1620 | ||
1621 | static void rc_release_drivers(void) | 1621 | static void rc_release_drivers(void) |
1622 | { | 1622 | { |
1623 | unsigned long flags; | ||
1624 | |||
1625 | spin_lock_irqsave(&riscom_lock, flags); | ||
1626 | |||
1627 | tty_unregister_driver(riscom_driver); | 1623 | tty_unregister_driver(riscom_driver); |
1628 | put_tty_driver(riscom_driver); | 1624 | put_tty_driver(riscom_driver); |
1629 | |||
1630 | spin_unlock_irqrestore(&riscom_lock, flags); | ||
1631 | } | 1625 | } |
1632 | 1626 | ||
1633 | #ifndef MODULE | 1627 | #ifndef MODULE |
@@ -1715,7 +1709,7 @@ static int __init riscom8_init_module (void) | |||
1715 | 1709 | ||
1716 | if (iobase || iobase1 || iobase2 || iobase3) { | 1710 | if (iobase || iobase1 || iobase2 || iobase3) { |
1717 | for(i = 0; i < RC_NBOARD; i++) | 1711 | for(i = 0; i < RC_NBOARD; i++) |
1718 | rc_board[0].base = 0; | 1712 | rc_board[i].base = 0; |
1719 | } | 1713 | } |
1720 | 1714 | ||
1721 | if (iobase) | 1715 | if (iobase) |
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c index 78b151c4d20f..5c3142b6f1fc 100644 --- a/drivers/char/rtc.c +++ b/drivers/char/rtc.c | |||
@@ -110,8 +110,8 @@ static int rtc_has_irq = 1; | |||
110 | #define hpet_set_rtc_irq_bit(arg) 0 | 110 | #define hpet_set_rtc_irq_bit(arg) 0 |
111 | #define hpet_rtc_timer_init() do { } while (0) | 111 | #define hpet_rtc_timer_init() do { } while (0) |
112 | #define hpet_rtc_dropped_irq() 0 | 112 | #define hpet_rtc_dropped_irq() 0 |
113 | #define hpet_register_irq_handler(h) 0 | 113 | #define hpet_register_irq_handler(h) ({ 0; }) |
114 | #define hpet_unregister_irq_handler(h) 0 | 114 | #define hpet_unregister_irq_handler(h) ({ 0; }) |
115 | #ifdef RTC_IRQ | 115 | #ifdef RTC_IRQ |
116 | static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) | 116 | static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) |
117 | { | 117 | { |
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c index c0e08c7bca2f..5ff83df67b44 100644 --- a/drivers/char/specialix.c +++ b/drivers/char/specialix.c | |||
@@ -2109,7 +2109,6 @@ static void sx_throttle(struct tty_struct * tty) | |||
2109 | sx_out(bp, CD186x_CAR, port_No(port)); | 2109 | sx_out(bp, CD186x_CAR, port_No(port)); |
2110 | spin_unlock_irqrestore(&bp->lock, flags); | 2110 | spin_unlock_irqrestore(&bp->lock, flags); |
2111 | if (I_IXOFF(tty)) { | 2111 | if (I_IXOFF(tty)) { |
2112 | spin_unlock_irqrestore(&bp->lock, flags); | ||
2113 | sx_wait_CCR(bp); | 2112 | sx_wait_CCR(bp); |
2114 | spin_lock_irqsave(&bp->lock, flags); | 2113 | spin_lock_irqsave(&bp->lock, flags); |
2115 | sx_out(bp, CD186x_CCR, CCR_SSCH2); | 2114 | sx_out(bp, CD186x_CCR, CCR_SSCH2); |
diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 367be9175061..9b58b894f823 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c | |||
@@ -702,6 +702,7 @@ void redraw_screen(struct vc_data *vc, int is_switch) | |||
702 | if (is_switch) { | 702 | if (is_switch) { |
703 | set_leds(); | 703 | set_leds(); |
704 | compute_shiftstate(); | 704 | compute_shiftstate(); |
705 | notify_update(vc); | ||
705 | } | 706 | } |
706 | } | 707 | } |
707 | 708 | ||
diff --git a/drivers/char/xilinx_hwicap/buffer_icap.c b/drivers/char/xilinx_hwicap/buffer_icap.c index dfea2bde162b..f577daedb630 100644 --- a/drivers/char/xilinx_hwicap/buffer_icap.c +++ b/drivers/char/xilinx_hwicap/buffer_icap.c | |||
@@ -73,8 +73,8 @@ | |||
73 | #define XHI_BUFFER_START 0 | 73 | #define XHI_BUFFER_START 0 |
74 | 74 | ||
75 | /** | 75 | /** |
76 | * buffer_icap_get_status: Get the contents of the status register. | 76 | * buffer_icap_get_status - Get the contents of the status register. |
77 | * @parameter base_address: is the base address of the device | 77 | * @base_address: is the base address of the device |
78 | * | 78 | * |
79 | * The status register contains the ICAP status and the done bit. | 79 | * The status register contains the ICAP status and the done bit. |
80 | * | 80 | * |
@@ -94,9 +94,9 @@ static inline u32 buffer_icap_get_status(void __iomem *base_address) | |||
94 | } | 94 | } |
95 | 95 | ||
96 | /** | 96 | /** |
97 | * buffer_icap_get_bram: Reads data from the storage buffer bram. | 97 | * buffer_icap_get_bram - Reads data from the storage buffer bram. |
98 | * @parameter base_address: contains the base address of the component. | 98 | * @base_address: contains the base address of the component. |
99 | * @parameter offset: The word offset from which the data should be read. | 99 | * @offset: The word offset from which the data should be read. |
100 | * | 100 | * |
101 | * A bram is used as a configuration memory cache. One frame of data can | 101 | * A bram is used as a configuration memory cache. One frame of data can |
102 | * be stored in this "storage buffer". | 102 | * be stored in this "storage buffer". |
@@ -108,8 +108,8 @@ static inline u32 buffer_icap_get_bram(void __iomem *base_address, | |||
108 | } | 108 | } |
109 | 109 | ||
110 | /** | 110 | /** |
111 | * buffer_icap_busy: Return true if the icap device is busy | 111 | * buffer_icap_busy - Return true if the icap device is busy |
112 | * @parameter base_address: is the base address of the device | 112 | * @base_address: is the base address of the device |
113 | * | 113 | * |
114 | * The queries the low order bit of the status register, which | 114 | * The queries the low order bit of the status register, which |
115 | * indicates whether the current configuration or readback operation | 115 | * indicates whether the current configuration or readback operation |
@@ -121,8 +121,8 @@ static inline bool buffer_icap_busy(void __iomem *base_address) | |||
121 | } | 121 | } |
122 | 122 | ||
123 | /** | 123 | /** |
124 | * buffer_icap_busy: Return true if the icap device is not busy | 124 | * buffer_icap_busy - Return true if the icap device is not busy |
125 | * @parameter base_address: is the base address of the device | 125 | * @base_address: is the base address of the device |
126 | * | 126 | * |
127 | * The queries the low order bit of the status register, which | 127 | * The queries the low order bit of the status register, which |
128 | * indicates whether the current configuration or readback operation | 128 | * indicates whether the current configuration or readback operation |
@@ -134,9 +134,9 @@ static inline bool buffer_icap_done(void __iomem *base_address) | |||
134 | } | 134 | } |
135 | 135 | ||
136 | /** | 136 | /** |
137 | * buffer_icap_set_size: Set the size register. | 137 | * buffer_icap_set_size - Set the size register. |
138 | * @parameter base_address: is the base address of the device | 138 | * @base_address: is the base address of the device |
139 | * @parameter data: The size in bytes. | 139 | * @data: The size in bytes. |
140 | * | 140 | * |
141 | * The size register holds the number of 8 bit bytes to transfer between | 141 | * The size register holds the number of 8 bit bytes to transfer between |
142 | * bram and the icap (or icap to bram). | 142 | * bram and the icap (or icap to bram). |
@@ -148,9 +148,9 @@ static inline void buffer_icap_set_size(void __iomem *base_address, | |||
148 | } | 148 | } |
149 | 149 | ||
150 | /** | 150 | /** |
151 | * buffer_icap_mSetoffsetReg: Set the bram offset register. | 151 | * buffer_icap_set_offset - Set the bram offset register. |
152 | * @parameter base_address: contains the base address of the device. | 152 | * @base_address: contains the base address of the device. |
153 | * @parameter data: is the value to be written to the data register. | 153 | * @data: is the value to be written to the data register. |
154 | * | 154 | * |
155 | * The bram offset register holds the starting bram address to transfer | 155 | * The bram offset register holds the starting bram address to transfer |
156 | * data from during configuration or write data to during readback. | 156 | * data from during configuration or write data to during readback. |
@@ -162,9 +162,9 @@ static inline void buffer_icap_set_offset(void __iomem *base_address, | |||
162 | } | 162 | } |
163 | 163 | ||
164 | /** | 164 | /** |
165 | * buffer_icap_set_rnc: Set the RNC (Readback not Configure) register. | 165 | * buffer_icap_set_rnc - Set the RNC (Readback not Configure) register. |
166 | * @parameter base_address: contains the base address of the device. | 166 | * @base_address: contains the base address of the device. |
167 | * @parameter data: is the value to be written to the data register. | 167 | * @data: is the value to be written to the data register. |
168 | * | 168 | * |
169 | * The RNC register determines the direction of the data transfer. It | 169 | * The RNC register determines the direction of the data transfer. It |
170 | * controls whether a configuration or readback take place. Writing to | 170 | * controls whether a configuration or readback take place. Writing to |
@@ -178,10 +178,10 @@ static inline void buffer_icap_set_rnc(void __iomem *base_address, | |||
178 | } | 178 | } |
179 | 179 | ||
180 | /** | 180 | /** |
181 | * buffer_icap_set_bram: Write data to the storage buffer bram. | 181 | * buffer_icap_set_bram - Write data to the storage buffer bram. |
182 | * @parameter base_address: contains the base address of the component. | 182 | * @base_address: contains the base address of the component. |
183 | * @parameter offset: The word offset at which the data should be written. | 183 | * @offset: The word offset at which the data should be written. |
184 | * @parameter data: The value to be written to the bram offset. | 184 | * @data: The value to be written to the bram offset. |
185 | * | 185 | * |
186 | * A bram is used as a configuration memory cache. One frame of data can | 186 | * A bram is used as a configuration memory cache. One frame of data can |
187 | * be stored in this "storage buffer". | 187 | * be stored in this "storage buffer". |
@@ -193,10 +193,10 @@ static inline void buffer_icap_set_bram(void __iomem *base_address, | |||
193 | } | 193 | } |
194 | 194 | ||
195 | /** | 195 | /** |
196 | * buffer_icap_device_read: Transfer bytes from ICAP to the storage buffer. | 196 | * buffer_icap_device_read - Transfer bytes from ICAP to the storage buffer. |
197 | * @parameter drvdata: a pointer to the drvdata. | 197 | * @drvdata: a pointer to the drvdata. |
198 | * @parameter offset: The storage buffer start address. | 198 | * @offset: The storage buffer start address. |
199 | * @parameter count: The number of words (32 bit) to read from the | 199 | * @count: The number of words (32 bit) to read from the |
200 | * device (ICAP). | 200 | * device (ICAP). |
201 | **/ | 201 | **/ |
202 | static int buffer_icap_device_read(struct hwicap_drvdata *drvdata, | 202 | static int buffer_icap_device_read(struct hwicap_drvdata *drvdata, |
@@ -227,10 +227,10 @@ static int buffer_icap_device_read(struct hwicap_drvdata *drvdata, | |||
227 | }; | 227 | }; |
228 | 228 | ||
229 | /** | 229 | /** |
230 | * buffer_icap_device_write: Transfer bytes from ICAP to the storage buffer. | 230 | * buffer_icap_device_write - Transfer bytes from ICAP to the storage buffer. |
231 | * @parameter drvdata: a pointer to the drvdata. | 231 | * @drvdata: a pointer to the drvdata. |
232 | * @parameter offset: The storage buffer start address. | 232 | * @offset: The storage buffer start address. |
233 | * @parameter count: The number of words (32 bit) to read from the | 233 | * @count: The number of words (32 bit) to read from the |
234 | * device (ICAP). | 234 | * device (ICAP). |
235 | **/ | 235 | **/ |
236 | static int buffer_icap_device_write(struct hwicap_drvdata *drvdata, | 236 | static int buffer_icap_device_write(struct hwicap_drvdata *drvdata, |
@@ -261,8 +261,8 @@ static int buffer_icap_device_write(struct hwicap_drvdata *drvdata, | |||
261 | }; | 261 | }; |
262 | 262 | ||
263 | /** | 263 | /** |
264 | * buffer_icap_reset: Reset the logic of the icap device. | 264 | * buffer_icap_reset - Reset the logic of the icap device. |
265 | * @parameter drvdata: a pointer to the drvdata. | 265 | * @drvdata: a pointer to the drvdata. |
266 | * | 266 | * |
267 | * Writing to the status register resets the ICAP logic in an internal | 267 | * Writing to the status register resets the ICAP logic in an internal |
268 | * version of the core. For the version of the core published in EDK, | 268 | * version of the core. For the version of the core published in EDK, |
@@ -274,10 +274,10 @@ void buffer_icap_reset(struct hwicap_drvdata *drvdata) | |||
274 | } | 274 | } |
275 | 275 | ||
276 | /** | 276 | /** |
277 | * buffer_icap_set_configuration: Load a partial bitstream from system memory. | 277 | * buffer_icap_set_configuration - Load a partial bitstream from system memory. |
278 | * @parameter drvdata: a pointer to the drvdata. | 278 | * @drvdata: a pointer to the drvdata. |
279 | * @parameter data: Kernel address of the partial bitstream. | 279 | * @data: Kernel address of the partial bitstream. |
280 | * @parameter size: the size of the partial bitstream in 32 bit words. | 280 | * @size: the size of the partial bitstream in 32 bit words. |
281 | **/ | 281 | **/ |
282 | int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data, | 282 | int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data, |
283 | u32 size) | 283 | u32 size) |
@@ -333,10 +333,10 @@ int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data, | |||
333 | }; | 333 | }; |
334 | 334 | ||
335 | /** | 335 | /** |
336 | * buffer_icap_get_configuration: Read configuration data from the device. | 336 | * buffer_icap_get_configuration - Read configuration data from the device. |
337 | * @parameter drvdata: a pointer to the drvdata. | 337 | * @drvdata: a pointer to the drvdata. |
338 | * @parameter data: Address of the data representing the partial bitstream | 338 | * @data: Address of the data representing the partial bitstream |
339 | * @parameter size: the size of the partial bitstream in 32 bit words. | 339 | * @size: the size of the partial bitstream in 32 bit words. |
340 | **/ | 340 | **/ |
341 | int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data, | 341 | int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data, |
342 | u32 size) | 342 | u32 size) |
diff --git a/drivers/char/xilinx_hwicap/fifo_icap.c b/drivers/char/xilinx_hwicap/fifo_icap.c index 0988314694a6..6f45dbd47125 100644 --- a/drivers/char/xilinx_hwicap/fifo_icap.c +++ b/drivers/char/xilinx_hwicap/fifo_icap.c | |||
@@ -94,9 +94,9 @@ | |||
94 | 94 | ||
95 | 95 | ||
96 | /** | 96 | /** |
97 | * fifo_icap_fifo_write: Write data to the write FIFO. | 97 | * fifo_icap_fifo_write - Write data to the write FIFO. |
98 | * @parameter drvdata: a pointer to the drvdata. | 98 | * @drvdata: a pointer to the drvdata. |
99 | * @parameter data: the 32-bit value to be written to the FIFO. | 99 | * @data: the 32-bit value to be written to the FIFO. |
100 | * | 100 | * |
101 | * This function will silently fail if the fifo is full. | 101 | * This function will silently fail if the fifo is full. |
102 | **/ | 102 | **/ |
@@ -108,8 +108,8 @@ static inline void fifo_icap_fifo_write(struct hwicap_drvdata *drvdata, | |||
108 | } | 108 | } |
109 | 109 | ||
110 | /** | 110 | /** |
111 | * fifo_icap_fifo_read: Read data from the Read FIFO. | 111 | * fifo_icap_fifo_read - Read data from the Read FIFO. |
112 | * @parameter drvdata: a pointer to the drvdata. | 112 | * @drvdata: a pointer to the drvdata. |
113 | * | 113 | * |
114 | * This function will silently fail if the fifo is empty. | 114 | * This function will silently fail if the fifo is empty. |
115 | **/ | 115 | **/ |
@@ -121,9 +121,9 @@ static inline u32 fifo_icap_fifo_read(struct hwicap_drvdata *drvdata) | |||
121 | } | 121 | } |
122 | 122 | ||
123 | /** | 123 | /** |
124 | * fifo_icap_set_read_size: Set the the size register. | 124 | * fifo_icap_set_read_size - Set the the size register. |
125 | * @parameter drvdata: a pointer to the drvdata. | 125 | * @drvdata: a pointer to the drvdata. |
126 | * @parameter data: the size of the following read transaction, in words. | 126 | * @data: the size of the following read transaction, in words. |
127 | **/ | 127 | **/ |
128 | static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata, | 128 | static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata, |
129 | u32 data) | 129 | u32 data) |
@@ -132,8 +132,8 @@ static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata, | |||
132 | } | 132 | } |
133 | 133 | ||
134 | /** | 134 | /** |
135 | * fifo_icap_start_config: Initiate a configuration (write) to the device. | 135 | * fifo_icap_start_config - Initiate a configuration (write) to the device. |
136 | * @parameter drvdata: a pointer to the drvdata. | 136 | * @drvdata: a pointer to the drvdata. |
137 | **/ | 137 | **/ |
138 | static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata) | 138 | static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata) |
139 | { | 139 | { |
@@ -142,8 +142,8 @@ static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata) | |||
142 | } | 142 | } |
143 | 143 | ||
144 | /** | 144 | /** |
145 | * fifo_icap_start_readback: Initiate a readback from the device. | 145 | * fifo_icap_start_readback - Initiate a readback from the device. |
146 | * @parameter drvdata: a pointer to the drvdata. | 146 | * @drvdata: a pointer to the drvdata. |
147 | **/ | 147 | **/ |
148 | static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata) | 148 | static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata) |
149 | { | 149 | { |
@@ -152,8 +152,8 @@ static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata) | |||
152 | } | 152 | } |
153 | 153 | ||
154 | /** | 154 | /** |
155 | * fifo_icap_busy: Return true if the ICAP is still processing a transaction. | 155 | * fifo_icap_busy - Return true if the ICAP is still processing a transaction. |
156 | * @parameter drvdata: a pointer to the drvdata. | 156 | * @drvdata: a pointer to the drvdata. |
157 | **/ | 157 | **/ |
158 | static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata) | 158 | static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata) |
159 | { | 159 | { |
@@ -163,8 +163,8 @@ static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata) | |||
163 | } | 163 | } |
164 | 164 | ||
165 | /** | 165 | /** |
166 | * fifo_icap_write_fifo_vacancy: Query the write fifo available space. | 166 | * fifo_icap_write_fifo_vacancy - Query the write fifo available space. |
167 | * @parameter drvdata: a pointer to the drvdata. | 167 | * @drvdata: a pointer to the drvdata. |
168 | * | 168 | * |
169 | * Return the number of words that can be safely pushed into the write fifo. | 169 | * Return the number of words that can be safely pushed into the write fifo. |
170 | **/ | 170 | **/ |
@@ -175,8 +175,8 @@ static inline u32 fifo_icap_write_fifo_vacancy( | |||
175 | } | 175 | } |
176 | 176 | ||
177 | /** | 177 | /** |
178 | * fifo_icap_read_fifo_occupancy: Query the read fifo available data. | 178 | * fifo_icap_read_fifo_occupancy - Query the read fifo available data. |
179 | * @parameter drvdata: a pointer to the drvdata. | 179 | * @drvdata: a pointer to the drvdata. |
180 | * | 180 | * |
181 | * Return the number of words that can be safely read from the read fifo. | 181 | * Return the number of words that can be safely read from the read fifo. |
182 | **/ | 182 | **/ |
@@ -187,11 +187,11 @@ static inline u32 fifo_icap_read_fifo_occupancy( | |||
187 | } | 187 | } |
188 | 188 | ||
189 | /** | 189 | /** |
190 | * fifo_icap_set_configuration: Send configuration data to the ICAP. | 190 | * fifo_icap_set_configuration - Send configuration data to the ICAP. |
191 | * @parameter drvdata: a pointer to the drvdata. | 191 | * @drvdata: a pointer to the drvdata. |
192 | * @parameter frame_buffer: a pointer to the data to be written to the | 192 | * @frame_buffer: a pointer to the data to be written to the |
193 | * ICAP device. | 193 | * ICAP device. |
194 | * @parameter num_words: the number of words (32 bit) to write to the ICAP | 194 | * @num_words: the number of words (32 bit) to write to the ICAP |
195 | * device. | 195 | * device. |
196 | 196 | ||
197 | * This function writes the given user data to the Write FIFO in | 197 | * This function writes the given user data to the Write FIFO in |
@@ -266,10 +266,10 @@ int fifo_icap_set_configuration(struct hwicap_drvdata *drvdata, | |||
266 | } | 266 | } |
267 | 267 | ||
268 | /** | 268 | /** |
269 | * fifo_icap_get_configuration: Read configuration data from the device. | 269 | * fifo_icap_get_configuration - Read configuration data from the device. |
270 | * @parameter drvdata: a pointer to the drvdata. | 270 | * @drvdata: a pointer to the drvdata. |
271 | * @parameter data: Address of the data representing the partial bitstream | 271 | * @data: Address of the data representing the partial bitstream |
272 | * @parameter size: the size of the partial bitstream in 32 bit words. | 272 | * @size: the size of the partial bitstream in 32 bit words. |
273 | * | 273 | * |
274 | * This function reads the specified number of words from the ICAP device in | 274 | * This function reads the specified number of words from the ICAP device in |
275 | * the polled mode. | 275 | * the polled mode. |
@@ -335,8 +335,8 @@ int fifo_icap_get_configuration(struct hwicap_drvdata *drvdata, | |||
335 | } | 335 | } |
336 | 336 | ||
337 | /** | 337 | /** |
338 | * buffer_icap_reset: Reset the logic of the icap device. | 338 | * buffer_icap_reset - Reset the logic of the icap device. |
339 | * @parameter drvdata: a pointer to the drvdata. | 339 | * @drvdata: a pointer to the drvdata. |
340 | * | 340 | * |
341 | * This function forces the software reset of the complete HWICAP device. | 341 | * This function forces the software reset of the complete HWICAP device. |
342 | * All the registers will return to the default value and the FIFO is also | 342 | * All the registers will return to the default value and the FIFO is also |
@@ -360,8 +360,8 @@ void fifo_icap_reset(struct hwicap_drvdata *drvdata) | |||
360 | } | 360 | } |
361 | 361 | ||
362 | /** | 362 | /** |
363 | * fifo_icap_flush_fifo: This function flushes the FIFOs in the device. | 363 | * fifo_icap_flush_fifo - This function flushes the FIFOs in the device. |
364 | * @parameter drvdata: a pointer to the drvdata. | 364 | * @drvdata: a pointer to the drvdata. |
365 | */ | 365 | */ |
366 | void fifo_icap_flush_fifo(struct hwicap_drvdata *drvdata) | 366 | void fifo_icap_flush_fifo(struct hwicap_drvdata *drvdata) |
367 | { | 367 | { |
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c index 24f6aef0fd3c..2284fa2a5a57 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c | |||
@@ -84,7 +84,7 @@ | |||
84 | #include <linux/init.h> | 84 | #include <linux/init.h> |
85 | #include <linux/poll.h> | 85 | #include <linux/poll.h> |
86 | #include <linux/proc_fs.h> | 86 | #include <linux/proc_fs.h> |
87 | #include <asm/semaphore.h> | 87 | #include <linux/mutex.h> |
88 | #include <linux/sysctl.h> | 88 | #include <linux/sysctl.h> |
89 | #include <linux/version.h> | 89 | #include <linux/version.h> |
90 | #include <linux/fs.h> | 90 | #include <linux/fs.h> |
@@ -119,6 +119,7 @@ module_param(xhwicap_minor, int, S_IRUGO); | |||
119 | 119 | ||
120 | /* An array, which is set to true when the device is registered. */ | 120 | /* An array, which is set to true when the device is registered. */ |
121 | static bool probed_devices[HWICAP_DEVICES]; | 121 | static bool probed_devices[HWICAP_DEVICES]; |
122 | static struct mutex icap_sem; | ||
122 | 123 | ||
123 | static struct class *icap_class; | 124 | static struct class *icap_class; |
124 | 125 | ||
@@ -199,14 +200,14 @@ static const struct config_registers v5_config_registers = { | |||
199 | }; | 200 | }; |
200 | 201 | ||
201 | /** | 202 | /** |
202 | * hwicap_command_desync: Send a DESYNC command to the ICAP port. | 203 | * hwicap_command_desync - Send a DESYNC command to the ICAP port. |
203 | * @parameter drvdata: a pointer to the drvdata. | 204 | * @drvdata: a pointer to the drvdata. |
204 | * | 205 | * |
205 | * This command desynchronizes the ICAP After this command, a | 206 | * This command desynchronizes the ICAP After this command, a |
206 | * bitstream containing a NULL packet, followed by a SYNCH packet is | 207 | * bitstream containing a NULL packet, followed by a SYNCH packet is |
207 | * required before the ICAP will recognize commands. | 208 | * required before the ICAP will recognize commands. |
208 | */ | 209 | */ |
209 | int hwicap_command_desync(struct hwicap_drvdata *drvdata) | 210 | static int hwicap_command_desync(struct hwicap_drvdata *drvdata) |
210 | { | 211 | { |
211 | u32 buffer[4]; | 212 | u32 buffer[4]; |
212 | u32 index = 0; | 213 | u32 index = 0; |
@@ -228,51 +229,18 @@ int hwicap_command_desync(struct hwicap_drvdata *drvdata) | |||
228 | } | 229 | } |
229 | 230 | ||
230 | /** | 231 | /** |
231 | * hwicap_command_capture: Send a CAPTURE command to the ICAP port. | 232 | * hwicap_get_configuration_register - Query a configuration register. |
232 | * @parameter drvdata: a pointer to the drvdata. | 233 | * @drvdata: a pointer to the drvdata. |
233 | * | 234 | * @reg: a constant which represents the configuration |
234 | * This command captures all of the flip flop states so they will be | ||
235 | * available during readback. One can use this command instead of | ||
236 | * enabling the CAPTURE block in the design. | ||
237 | */ | ||
238 | int hwicap_command_capture(struct hwicap_drvdata *drvdata) | ||
239 | { | ||
240 | u32 buffer[7]; | ||
241 | u32 index = 0; | ||
242 | |||
243 | /* | ||
244 | * Create the data to be written to the ICAP. | ||
245 | */ | ||
246 | buffer[index++] = XHI_DUMMY_PACKET; | ||
247 | buffer[index++] = XHI_SYNC_PACKET; | ||
248 | buffer[index++] = XHI_NOOP_PACKET; | ||
249 | buffer[index++] = hwicap_type_1_write(drvdata->config_regs->CMD) | 1; | ||
250 | buffer[index++] = XHI_CMD_GCAPTURE; | ||
251 | buffer[index++] = XHI_DUMMY_PACKET; | ||
252 | buffer[index++] = XHI_DUMMY_PACKET; | ||
253 | |||
254 | /* | ||
255 | * Write the data to the FIFO and intiate the transfer of data | ||
256 | * present in the FIFO to the ICAP device. | ||
257 | */ | ||
258 | return drvdata->config->set_configuration(drvdata, | ||
259 | &buffer[0], index); | ||
260 | |||
261 | } | ||
262 | |||
263 | /** | ||
264 | * hwicap_get_configuration_register: Query a configuration register. | ||
265 | * @parameter drvdata: a pointer to the drvdata. | ||
266 | * @parameter reg: a constant which represents the configuration | ||
267 | * register value to be returned. | 235 | * register value to be returned. |
268 | * Examples: XHI_IDCODE, XHI_FLR. | 236 | * Examples: XHI_IDCODE, XHI_FLR. |
269 | * @parameter RegData: returns the value of the register. | 237 | * @reg_data: returns the value of the register. |
270 | * | 238 | * |
271 | * Sends a query packet to the ICAP and then receives the response. | 239 | * Sends a query packet to the ICAP and then receives the response. |
272 | * The icap is left in Synched state. | 240 | * The icap is left in Synched state. |
273 | */ | 241 | */ |
274 | int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata, | 242 | static int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata, |
275 | u32 reg, u32 *RegData) | 243 | u32 reg, u32 *reg_data) |
276 | { | 244 | { |
277 | int status; | 245 | int status; |
278 | u32 buffer[6]; | 246 | u32 buffer[6]; |
@@ -300,14 +268,14 @@ int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata, | |||
300 | /* | 268 | /* |
301 | * Read the configuration register | 269 | * Read the configuration register |
302 | */ | 270 | */ |
303 | status = drvdata->config->get_configuration(drvdata, RegData, 1); | 271 | status = drvdata->config->get_configuration(drvdata, reg_data, 1); |
304 | if (status) | 272 | if (status) |
305 | return status; | 273 | return status; |
306 | 274 | ||
307 | return 0; | 275 | return 0; |
308 | } | 276 | } |
309 | 277 | ||
310 | int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata) | 278 | static int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata) |
311 | { | 279 | { |
312 | int status; | 280 | int status; |
313 | u32 idcode; | 281 | u32 idcode; |
@@ -344,7 +312,7 @@ int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata) | |||
344 | } | 312 | } |
345 | 313 | ||
346 | static ssize_t | 314 | static ssize_t |
347 | hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos) | 315 | hwicap_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) |
348 | { | 316 | { |
349 | struct hwicap_drvdata *drvdata = file->private_data; | 317 | struct hwicap_drvdata *drvdata = file->private_data; |
350 | ssize_t bytes_to_read = 0; | 318 | ssize_t bytes_to_read = 0; |
@@ -353,8 +321,9 @@ hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos) | |||
353 | u32 bytes_remaining; | 321 | u32 bytes_remaining; |
354 | int status; | 322 | int status; |
355 | 323 | ||
356 | if (down_interruptible(&drvdata->sem)) | 324 | status = mutex_lock_interruptible(&drvdata->sem); |
357 | return -ERESTARTSYS; | 325 | if (status) |
326 | return status; | ||
358 | 327 | ||
359 | if (drvdata->read_buffer_in_use) { | 328 | if (drvdata->read_buffer_in_use) { |
360 | /* If there are leftover bytes in the buffer, just */ | 329 | /* If there are leftover bytes in the buffer, just */ |
@@ -370,8 +339,9 @@ hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos) | |||
370 | goto error; | 339 | goto error; |
371 | } | 340 | } |
372 | drvdata->read_buffer_in_use -= bytes_to_read; | 341 | drvdata->read_buffer_in_use -= bytes_to_read; |
373 | memcpy(drvdata->read_buffer + bytes_to_read, | 342 | memmove(drvdata->read_buffer, |
374 | drvdata->read_buffer, 4 - bytes_to_read); | 343 | drvdata->read_buffer + bytes_to_read, |
344 | 4 - bytes_to_read); | ||
375 | } else { | 345 | } else { |
376 | /* Get new data from the ICAP, and return was was requested. */ | 346 | /* Get new data from the ICAP, and return was was requested. */ |
377 | kbuf = (u32 *) get_zeroed_page(GFP_KERNEL); | 347 | kbuf = (u32 *) get_zeroed_page(GFP_KERNEL); |
@@ -414,18 +384,20 @@ hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos) | |||
414 | status = -EFAULT; | 384 | status = -EFAULT; |
415 | goto error; | 385 | goto error; |
416 | } | 386 | } |
417 | memcpy(kbuf, drvdata->read_buffer, bytes_remaining); | 387 | memcpy(drvdata->read_buffer, |
388 | kbuf, | ||
389 | bytes_remaining); | ||
418 | drvdata->read_buffer_in_use = bytes_remaining; | 390 | drvdata->read_buffer_in_use = bytes_remaining; |
419 | free_page((unsigned long)kbuf); | 391 | free_page((unsigned long)kbuf); |
420 | } | 392 | } |
421 | status = bytes_to_read; | 393 | status = bytes_to_read; |
422 | error: | 394 | error: |
423 | up(&drvdata->sem); | 395 | mutex_unlock(&drvdata->sem); |
424 | return status; | 396 | return status; |
425 | } | 397 | } |
426 | 398 | ||
427 | static ssize_t | 399 | static ssize_t |
428 | hwicap_write(struct file *file, const char *buf, | 400 | hwicap_write(struct file *file, const char __user *buf, |
429 | size_t count, loff_t *ppos) | 401 | size_t count, loff_t *ppos) |
430 | { | 402 | { |
431 | struct hwicap_drvdata *drvdata = file->private_data; | 403 | struct hwicap_drvdata *drvdata = file->private_data; |
@@ -435,8 +407,9 @@ hwicap_write(struct file *file, const char *buf, | |||
435 | ssize_t len; | 407 | ssize_t len; |
436 | ssize_t status; | 408 | ssize_t status; |
437 | 409 | ||
438 | if (down_interruptible(&drvdata->sem)) | 410 | status = mutex_lock_interruptible(&drvdata->sem); |
439 | return -ERESTARTSYS; | 411 | if (status) |
412 | return status; | ||
440 | 413 | ||
441 | left += drvdata->write_buffer_in_use; | 414 | left += drvdata->write_buffer_in_use; |
442 | 415 | ||
@@ -465,7 +438,7 @@ hwicap_write(struct file *file, const char *buf, | |||
465 | memcpy(kbuf, drvdata->write_buffer, | 438 | memcpy(kbuf, drvdata->write_buffer, |
466 | drvdata->write_buffer_in_use); | 439 | drvdata->write_buffer_in_use); |
467 | if (copy_from_user( | 440 | if (copy_from_user( |
468 | (((char *)kbuf) + (drvdata->write_buffer_in_use)), | 441 | (((char *)kbuf) + drvdata->write_buffer_in_use), |
469 | buf + written, | 442 | buf + written, |
470 | len - (drvdata->write_buffer_in_use))) { | 443 | len - (drvdata->write_buffer_in_use))) { |
471 | free_page((unsigned long)kbuf); | 444 | free_page((unsigned long)kbuf); |
@@ -508,7 +481,7 @@ hwicap_write(struct file *file, const char *buf, | |||
508 | free_page((unsigned long)kbuf); | 481 | free_page((unsigned long)kbuf); |
509 | status = written; | 482 | status = written; |
510 | error: | 483 | error: |
511 | up(&drvdata->sem); | 484 | mutex_unlock(&drvdata->sem); |
512 | return status; | 485 | return status; |
513 | } | 486 | } |
514 | 487 | ||
@@ -519,8 +492,9 @@ static int hwicap_open(struct inode *inode, struct file *file) | |||
519 | 492 | ||
520 | drvdata = container_of(inode->i_cdev, struct hwicap_drvdata, cdev); | 493 | drvdata = container_of(inode->i_cdev, struct hwicap_drvdata, cdev); |
521 | 494 | ||
522 | if (down_interruptible(&drvdata->sem)) | 495 | status = mutex_lock_interruptible(&drvdata->sem); |
523 | return -ERESTARTSYS; | 496 | if (status) |
497 | return status; | ||
524 | 498 | ||
525 | if (drvdata->is_open) { | 499 | if (drvdata->is_open) { |
526 | status = -EBUSY; | 500 | status = -EBUSY; |
@@ -539,7 +513,7 @@ static int hwicap_open(struct inode *inode, struct file *file) | |||
539 | drvdata->is_open = 1; | 513 | drvdata->is_open = 1; |
540 | 514 | ||
541 | error: | 515 | error: |
542 | up(&drvdata->sem); | 516 | mutex_unlock(&drvdata->sem); |
543 | return status; | 517 | return status; |
544 | } | 518 | } |
545 | 519 | ||
@@ -549,8 +523,7 @@ static int hwicap_release(struct inode *inode, struct file *file) | |||
549 | int i; | 523 | int i; |
550 | int status = 0; | 524 | int status = 0; |
551 | 525 | ||
552 | if (down_interruptible(&drvdata->sem)) | 526 | mutex_lock(&drvdata->sem); |
553 | return -ERESTARTSYS; | ||
554 | 527 | ||
555 | if (drvdata->write_buffer_in_use) { | 528 | if (drvdata->write_buffer_in_use) { |
556 | /* Flush write buffer. */ | 529 | /* Flush write buffer. */ |
@@ -569,7 +542,7 @@ static int hwicap_release(struct inode *inode, struct file *file) | |||
569 | 542 | ||
570 | error: | 543 | error: |
571 | drvdata->is_open = 0; | 544 | drvdata->is_open = 0; |
572 | up(&drvdata->sem); | 545 | mutex_unlock(&drvdata->sem); |
573 | return status; | 546 | return status; |
574 | } | 547 | } |
575 | 548 | ||
@@ -592,31 +565,36 @@ static int __devinit hwicap_setup(struct device *dev, int id, | |||
592 | 565 | ||
593 | dev_info(dev, "Xilinx icap port driver\n"); | 566 | dev_info(dev, "Xilinx icap port driver\n"); |
594 | 567 | ||
568 | mutex_lock(&icap_sem); | ||
569 | |||
595 | if (id < 0) { | 570 | if (id < 0) { |
596 | for (id = 0; id < HWICAP_DEVICES; id++) | 571 | for (id = 0; id < HWICAP_DEVICES; id++) |
597 | if (!probed_devices[id]) | 572 | if (!probed_devices[id]) |
598 | break; | 573 | break; |
599 | } | 574 | } |
600 | if (id < 0 || id >= HWICAP_DEVICES) { | 575 | if (id < 0 || id >= HWICAP_DEVICES) { |
576 | mutex_unlock(&icap_sem); | ||
601 | dev_err(dev, "%s%i too large\n", DRIVER_NAME, id); | 577 | dev_err(dev, "%s%i too large\n", DRIVER_NAME, id); |
602 | return -EINVAL; | 578 | return -EINVAL; |
603 | } | 579 | } |
604 | if (probed_devices[id]) { | 580 | if (probed_devices[id]) { |
581 | mutex_unlock(&icap_sem); | ||
605 | dev_err(dev, "cannot assign to %s%i; it is already in use\n", | 582 | dev_err(dev, "cannot assign to %s%i; it is already in use\n", |
606 | DRIVER_NAME, id); | 583 | DRIVER_NAME, id); |
607 | return -EBUSY; | 584 | return -EBUSY; |
608 | } | 585 | } |
609 | 586 | ||
610 | probed_devices[id] = 1; | 587 | probed_devices[id] = 1; |
588 | mutex_unlock(&icap_sem); | ||
611 | 589 | ||
612 | devt = MKDEV(xhwicap_major, xhwicap_minor + id); | 590 | devt = MKDEV(xhwicap_major, xhwicap_minor + id); |
613 | 591 | ||
614 | drvdata = kmalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL); | 592 | drvdata = kzalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL); |
615 | if (!drvdata) { | 593 | if (!drvdata) { |
616 | dev_err(dev, "Couldn't allocate device private record\n"); | 594 | dev_err(dev, "Couldn't allocate device private record\n"); |
617 | return -ENOMEM; | 595 | retval = -ENOMEM; |
596 | goto failed0; | ||
618 | } | 597 | } |
619 | memset((void *)drvdata, 0, sizeof(struct hwicap_drvdata)); | ||
620 | dev_set_drvdata(dev, (void *)drvdata); | 598 | dev_set_drvdata(dev, (void *)drvdata); |
621 | 599 | ||
622 | if (!regs_res) { | 600 | if (!regs_res) { |
@@ -648,7 +626,7 @@ static int __devinit hwicap_setup(struct device *dev, int id, | |||
648 | drvdata->config = config; | 626 | drvdata->config = config; |
649 | drvdata->config_regs = config_regs; | 627 | drvdata->config_regs = config_regs; |
650 | 628 | ||
651 | init_MUTEX(&drvdata->sem); | 629 | mutex_init(&drvdata->sem); |
652 | drvdata->is_open = 0; | 630 | drvdata->is_open = 0; |
653 | 631 | ||
654 | dev_info(dev, "ioremap %lx to %p with size %x\n", | 632 | dev_info(dev, "ioremap %lx to %p with size %x\n", |
@@ -663,7 +641,7 @@ static int __devinit hwicap_setup(struct device *dev, int id, | |||
663 | goto failed3; | 641 | goto failed3; |
664 | } | 642 | } |
665 | /* devfs_mk_cdev(devt, S_IFCHR|S_IRUGO|S_IWUGO, DRIVER_NAME); */ | 643 | /* devfs_mk_cdev(devt, S_IFCHR|S_IRUGO|S_IWUGO, DRIVER_NAME); */ |
666 | class_device_create(icap_class, NULL, devt, NULL, DRIVER_NAME); | 644 | device_create(icap_class, dev, devt, "%s%d", DRIVER_NAME, id); |
667 | return 0; /* success */ | 645 | return 0; /* success */ |
668 | 646 | ||
669 | failed3: | 647 | failed3: |
@@ -675,6 +653,11 @@ static int __devinit hwicap_setup(struct device *dev, int id, | |||
675 | failed1: | 653 | failed1: |
676 | kfree(drvdata); | 654 | kfree(drvdata); |
677 | 655 | ||
656 | failed0: | ||
657 | mutex_lock(&icap_sem); | ||
658 | probed_devices[id] = 0; | ||
659 | mutex_unlock(&icap_sem); | ||
660 | |||
678 | return retval; | 661 | return retval; |
679 | } | 662 | } |
680 | 663 | ||
@@ -699,14 +682,16 @@ static int __devexit hwicap_remove(struct device *dev) | |||
699 | if (!drvdata) | 682 | if (!drvdata) |
700 | return 0; | 683 | return 0; |
701 | 684 | ||
702 | class_device_destroy(icap_class, drvdata->devt); | 685 | device_destroy(icap_class, drvdata->devt); |
703 | cdev_del(&drvdata->cdev); | 686 | cdev_del(&drvdata->cdev); |
704 | iounmap(drvdata->base_address); | 687 | iounmap(drvdata->base_address); |
705 | release_mem_region(drvdata->mem_start, drvdata->mem_size); | 688 | release_mem_region(drvdata->mem_start, drvdata->mem_size); |
706 | kfree(drvdata); | 689 | kfree(drvdata); |
707 | dev_set_drvdata(dev, NULL); | 690 | dev_set_drvdata(dev, NULL); |
708 | probed_devices[MINOR(dev->devt)-xhwicap_minor] = 0; | ||
709 | 691 | ||
692 | mutex_lock(&icap_sem); | ||
693 | probed_devices[MINOR(dev->devt)-xhwicap_minor] = 0; | ||
694 | mutex_unlock(&icap_sem); | ||
710 | return 0; /* success */ | 695 | return 0; /* success */ |
711 | } | 696 | } |
712 | 697 | ||
@@ -821,28 +806,29 @@ static struct of_platform_driver hwicap_of_driver = { | |||
821 | }; | 806 | }; |
822 | 807 | ||
823 | /* Registration helpers to keep the number of #ifdefs to a minimum */ | 808 | /* Registration helpers to keep the number of #ifdefs to a minimum */ |
824 | static inline int __devinit hwicap_of_register(void) | 809 | static inline int __init hwicap_of_register(void) |
825 | { | 810 | { |
826 | pr_debug("hwicap: calling of_register_platform_driver()\n"); | 811 | pr_debug("hwicap: calling of_register_platform_driver()\n"); |
827 | return of_register_platform_driver(&hwicap_of_driver); | 812 | return of_register_platform_driver(&hwicap_of_driver); |
828 | } | 813 | } |
829 | 814 | ||
830 | static inline void __devexit hwicap_of_unregister(void) | 815 | static inline void __exit hwicap_of_unregister(void) |
831 | { | 816 | { |
832 | of_unregister_platform_driver(&hwicap_of_driver); | 817 | of_unregister_platform_driver(&hwicap_of_driver); |
833 | } | 818 | } |
834 | #else /* CONFIG_OF */ | 819 | #else /* CONFIG_OF */ |
835 | /* CONFIG_OF not enabled; do nothing helpers */ | 820 | /* CONFIG_OF not enabled; do nothing helpers */ |
836 | static inline int __devinit hwicap_of_register(void) { return 0; } | 821 | static inline int __init hwicap_of_register(void) { return 0; } |
837 | static inline void __devexit hwicap_of_unregister(void) { } | 822 | static inline void __exit hwicap_of_unregister(void) { } |
838 | #endif /* CONFIG_OF */ | 823 | #endif /* CONFIG_OF */ |
839 | 824 | ||
840 | static int __devinit hwicap_module_init(void) | 825 | static int __init hwicap_module_init(void) |
841 | { | 826 | { |
842 | dev_t devt; | 827 | dev_t devt; |
843 | int retval; | 828 | int retval; |
844 | 829 | ||
845 | icap_class = class_create(THIS_MODULE, "xilinx_config"); | 830 | icap_class = class_create(THIS_MODULE, "xilinx_config"); |
831 | mutex_init(&icap_sem); | ||
846 | 832 | ||
847 | if (xhwicap_major) { | 833 | if (xhwicap_major) { |
848 | devt = MKDEV(xhwicap_major, xhwicap_minor); | 834 | devt = MKDEV(xhwicap_major, xhwicap_minor); |
@@ -883,7 +869,7 @@ static int __devinit hwicap_module_init(void) | |||
883 | return retval; | 869 | return retval; |
884 | } | 870 | } |
885 | 871 | ||
886 | static void __devexit hwicap_module_cleanup(void) | 872 | static void __exit hwicap_module_cleanup(void) |
887 | { | 873 | { |
888 | dev_t devt = MKDEV(xhwicap_major, xhwicap_minor); | 874 | dev_t devt = MKDEV(xhwicap_major, xhwicap_minor); |
889 | 875 | ||
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.h b/drivers/char/xilinx_hwicap/xilinx_hwicap.h index ae771cac1629..405fee7e189b 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.h +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.h | |||
@@ -48,9 +48,9 @@ struct hwicap_drvdata { | |||
48 | u8 write_buffer[4]; | 48 | u8 write_buffer[4]; |
49 | u32 read_buffer_in_use; /* Always in [0,3] */ | 49 | u32 read_buffer_in_use; /* Always in [0,3] */ |
50 | u8 read_buffer[4]; | 50 | u8 read_buffer[4]; |
51 | u32 mem_start; /* phys. address of the control registers */ | 51 | resource_size_t mem_start;/* phys. address of the control registers */ |
52 | u32 mem_end; /* phys. address of the control registers */ | 52 | resource_size_t mem_end; /* phys. address of the control registers */ |
53 | u32 mem_size; | 53 | resource_size_t mem_size; |
54 | void __iomem *base_address;/* virt. address of the control registers */ | 54 | void __iomem *base_address;/* virt. address of the control registers */ |
55 | 55 | ||
56 | struct device *dev; | 56 | struct device *dev; |
@@ -61,7 +61,7 @@ struct hwicap_drvdata { | |||
61 | const struct config_registers *config_regs; | 61 | const struct config_registers *config_regs; |
62 | void *private_data; | 62 | void *private_data; |
63 | bool is_open; | 63 | bool is_open; |
64 | struct semaphore sem; | 64 | struct mutex sem; |
65 | }; | 65 | }; |
66 | 66 | ||
67 | struct hwicap_driver_config { | 67 | struct hwicap_driver_config { |
@@ -164,29 +164,29 @@ struct config_registers { | |||
164 | #define XHI_DISABLED_AUTO_CRC 0x0000DEFCUL | 164 | #define XHI_DISABLED_AUTO_CRC 0x0000DEFCUL |
165 | 165 | ||
166 | /** | 166 | /** |
167 | * hwicap_type_1_read: Generates a Type 1 read packet header. | 167 | * hwicap_type_1_read - Generates a Type 1 read packet header. |
168 | * @parameter: Register is the address of the register to be read back. | 168 | * @reg: is the address of the register to be read back. |
169 | * | 169 | * |
170 | * Generates a Type 1 read packet header, which is used to indirectly | 170 | * Generates a Type 1 read packet header, which is used to indirectly |
171 | * read registers in the configuration logic. This packet must then | 171 | * read registers in the configuration logic. This packet must then |
172 | * be sent through the icap device, and a return packet received with | 172 | * be sent through the icap device, and a return packet received with |
173 | * the information. | 173 | * the information. |
174 | **/ | 174 | **/ |
175 | static inline u32 hwicap_type_1_read(u32 Register) | 175 | static inline u32 hwicap_type_1_read(u32 reg) |
176 | { | 176 | { |
177 | return (XHI_TYPE_1 << XHI_TYPE_SHIFT) | | 177 | return (XHI_TYPE_1 << XHI_TYPE_SHIFT) | |
178 | (Register << XHI_REGISTER_SHIFT) | | 178 | (reg << XHI_REGISTER_SHIFT) | |
179 | (XHI_OP_READ << XHI_OP_SHIFT); | 179 | (XHI_OP_READ << XHI_OP_SHIFT); |
180 | } | 180 | } |
181 | 181 | ||
182 | /** | 182 | /** |
183 | * hwicap_type_1_write: Generates a Type 1 write packet header | 183 | * hwicap_type_1_write - Generates a Type 1 write packet header |
184 | * @parameter: Register is the address of the register to be read back. | 184 | * @reg: is the address of the register to be read back. |
185 | **/ | 185 | **/ |
186 | static inline u32 hwicap_type_1_write(u32 Register) | 186 | static inline u32 hwicap_type_1_write(u32 reg) |
187 | { | 187 | { |
188 | return (XHI_TYPE_1 << XHI_TYPE_SHIFT) | | 188 | return (XHI_TYPE_1 << XHI_TYPE_SHIFT) | |
189 | (Register << XHI_REGISTER_SHIFT) | | 189 | (reg << XHI_REGISTER_SHIFT) | |
190 | (XHI_OP_WRITE << XHI_OP_SHIFT); | 190 | (XHI_OP_WRITE << XHI_OP_SHIFT); |
191 | } | 191 | } |
192 | 192 | ||
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 89a29cd93783..35a26a3e5f68 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -671,13 +671,13 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf) | |||
671 | { | 671 | { |
672 | struct cpufreq_policy * policy = to_policy(kobj); | 672 | struct cpufreq_policy * policy = to_policy(kobj); |
673 | struct freq_attr * fattr = to_attr(attr); | 673 | struct freq_attr * fattr = to_attr(attr); |
674 | ssize_t ret; | 674 | ssize_t ret = -EINVAL; |
675 | policy = cpufreq_cpu_get(policy->cpu); | 675 | policy = cpufreq_cpu_get(policy->cpu); |
676 | if (!policy) | 676 | if (!policy) |
677 | return -EINVAL; | 677 | goto no_policy; |
678 | 678 | ||
679 | if (lock_policy_rwsem_read(policy->cpu) < 0) | 679 | if (lock_policy_rwsem_read(policy->cpu) < 0) |
680 | return -EINVAL; | 680 | goto fail; |
681 | 681 | ||
682 | if (fattr->show) | 682 | if (fattr->show) |
683 | ret = fattr->show(policy, buf); | 683 | ret = fattr->show(policy, buf); |
@@ -685,8 +685,9 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf) | |||
685 | ret = -EIO; | 685 | ret = -EIO; |
686 | 686 | ||
687 | unlock_policy_rwsem_read(policy->cpu); | 687 | unlock_policy_rwsem_read(policy->cpu); |
688 | 688 | fail: | |
689 | cpufreq_cpu_put(policy); | 689 | cpufreq_cpu_put(policy); |
690 | no_policy: | ||
690 | return ret; | 691 | return ret; |
691 | } | 692 | } |
692 | 693 | ||
@@ -695,13 +696,13 @@ static ssize_t store(struct kobject * kobj, struct attribute * attr, | |||
695 | { | 696 | { |
696 | struct cpufreq_policy * policy = to_policy(kobj); | 697 | struct cpufreq_policy * policy = to_policy(kobj); |
697 | struct freq_attr * fattr = to_attr(attr); | 698 | struct freq_attr * fattr = to_attr(attr); |
698 | ssize_t ret; | 699 | ssize_t ret = -EINVAL; |
699 | policy = cpufreq_cpu_get(policy->cpu); | 700 | policy = cpufreq_cpu_get(policy->cpu); |
700 | if (!policy) | 701 | if (!policy) |
701 | return -EINVAL; | 702 | goto no_policy; |
702 | 703 | ||
703 | if (lock_policy_rwsem_write(policy->cpu) < 0) | 704 | if (lock_policy_rwsem_write(policy->cpu) < 0) |
704 | return -EINVAL; | 705 | goto fail; |
705 | 706 | ||
706 | if (fattr->store) | 707 | if (fattr->store) |
707 | ret = fattr->store(policy, buf, count); | 708 | ret = fattr->store(policy, buf, count); |
@@ -709,8 +710,9 @@ static ssize_t store(struct kobject * kobj, struct attribute * attr, | |||
709 | ret = -EIO; | 710 | ret = -EIO; |
710 | 711 | ||
711 | unlock_policy_rwsem_write(policy->cpu); | 712 | unlock_policy_rwsem_write(policy->cpu); |
712 | 713 | fail: | |
713 | cpufreq_cpu_put(policy); | 714 | cpufreq_cpu_put(policy); |
715 | no_policy: | ||
714 | return ret; | 716 | return ret; |
715 | } | 717 | } |
716 | 718 | ||
@@ -1775,7 +1777,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb, | |||
1775 | return NOTIFY_OK; | 1777 | return NOTIFY_OK; |
1776 | } | 1778 | } |
1777 | 1779 | ||
1778 | static struct notifier_block __cpuinitdata cpufreq_cpu_notifier = | 1780 | static struct notifier_block __refdata cpufreq_cpu_notifier = |
1779 | { | 1781 | { |
1780 | .notifier_call = cpufreq_cpu_callback, | 1782 | .notifier_call = cpufreq_cpu_callback, |
1781 | }; | 1783 | }; |
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 1b8312b02006..070421a5480e 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c | |||
@@ -323,7 +323,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb, | |||
323 | return NOTIFY_OK; | 323 | return NOTIFY_OK; |
324 | } | 324 | } |
325 | 325 | ||
326 | static struct notifier_block cpufreq_stat_cpu_notifier __cpuinitdata = | 326 | static struct notifier_block cpufreq_stat_cpu_notifier __refdata = |
327 | { | 327 | { |
328 | .notifier_call = cpufreq_stat_cpu_callback, | 328 | .notifier_call = cpufreq_stat_cpu_callback, |
329 | }; | 329 | }; |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index a703deffb795..27340a7b19dd 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | menuconfig DMADEVICES | 5 | menuconfig DMADEVICES |
6 | bool "DMA Engine support" | 6 | bool "DMA Engine support" |
7 | depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX | 7 | depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX || PPC |
8 | depends on !HIGHMEM64G | 8 | depends on !HIGHMEM64G |
9 | help | 9 | help |
10 | DMA engines can do asynchronous data transfers without | 10 | DMA engines can do asynchronous data transfers without |
@@ -37,6 +37,23 @@ config INTEL_IOP_ADMA | |||
37 | help | 37 | help |
38 | Enable support for the Intel(R) IOP Series RAID engines. | 38 | Enable support for the Intel(R) IOP Series RAID engines. |
39 | 39 | ||
40 | config FSL_DMA | ||
41 | bool "Freescale MPC85xx/MPC83xx DMA support" | ||
42 | depends on PPC | ||
43 | select DMA_ENGINE | ||
44 | ---help--- | ||
45 | Enable support for the Freescale DMA engine. Now, it support | ||
46 | MPC8560/40, MPC8555, MPC8548 and MPC8641 processors. | ||
47 | The MPC8349, MPC8360 is also supported. | ||
48 | |||
49 | config FSL_DMA_SELFTEST | ||
50 | bool "Enable the self test for each DMA channel" | ||
51 | depends on FSL_DMA | ||
52 | default y | ||
53 | ---help--- | ||
54 | Enable the self test for each DMA channel. A self test will be | ||
55 | performed after the channel probed to ensure the DMA works well. | ||
56 | |||
40 | config DMA_ENGINE | 57 | config DMA_ENGINE |
41 | bool | 58 | bool |
42 | 59 | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index b152cd84e123..c8036d945902 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -3,3 +3,4 @@ obj-$(CONFIG_NET_DMA) += iovlock.o | |||
3 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o | 3 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o |
4 | ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o | 4 | ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o |
5 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o | 5 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o |
6 | obj-$(CONFIG_FSL_DMA) += fsldma.o | ||
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 29965231b912..8db0e7f9d3f4 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -357,7 +357,7 @@ int dma_async_device_register(struct dma_device *device) | |||
357 | !device->device_prep_dma_zero_sum); | 357 | !device->device_prep_dma_zero_sum); |
358 | BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && | 358 | BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && |
359 | !device->device_prep_dma_memset); | 359 | !device->device_prep_dma_memset); |
360 | BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) && | 360 | BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && |
361 | !device->device_prep_dma_interrupt); | 361 | !device->device_prep_dma_interrupt); |
362 | 362 | ||
363 | BUG_ON(!device->device_alloc_chan_resources); | 363 | BUG_ON(!device->device_alloc_chan_resources); |
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c new file mode 100644 index 000000000000..ad2f938597e2 --- /dev/null +++ b/drivers/dma/fsldma.c | |||
@@ -0,0 +1,1097 @@ | |||
1 | /* | ||
2 | * Freescale MPC85xx, MPC83xx DMA Engine support | ||
3 | * | ||
4 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | ||
5 | * | ||
6 | * Author: | ||
7 | * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 | ||
8 | * Ebony Zhu <ebony.zhu@freescale.com>, May 2007 | ||
9 | * | ||
10 | * Description: | ||
11 | * DMA engine driver for Freescale MPC8540 DMA controller, which is | ||
12 | * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc. | ||
13 | * The support for MPC8349 DMA contorller is also added. | ||
14 | * | ||
15 | * This is free software; you can redistribute it and/or modify | ||
16 | * it under the terms of the GNU General Public License as published by | ||
17 | * the Free Software Foundation; either version 2 of the License, or | ||
18 | * (at your option) any later version. | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include <linux/init.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/pci.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/dmaengine.h> | ||
27 | #include <linux/delay.h> | ||
28 | #include <linux/dma-mapping.h> | ||
29 | #include <linux/dmapool.h> | ||
30 | #include <linux/of_platform.h> | ||
31 | |||
32 | #include "fsldma.h" | ||
33 | |||
34 | static void dma_init(struct fsl_dma_chan *fsl_chan) | ||
35 | { | ||
36 | /* Reset the channel */ | ||
37 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32); | ||
38 | |||
39 | switch (fsl_chan->feature & FSL_DMA_IP_MASK) { | ||
40 | case FSL_DMA_IP_85XX: | ||
41 | /* Set the channel to below modes: | ||
42 | * EIE - Error interrupt enable | ||
43 | * EOSIE - End of segments interrupt enable (basic mode) | ||
44 | * EOLNIE - End of links interrupt enable | ||
45 | */ | ||
46 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE | ||
47 | | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); | ||
48 | break; | ||
49 | case FSL_DMA_IP_83XX: | ||
50 | /* Set the channel to below modes: | ||
51 | * EOTIE - End-of-transfer interrupt enable | ||
52 | */ | ||
53 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE, | ||
54 | 32); | ||
55 | break; | ||
56 | } | ||
57 | |||
58 | } | ||
59 | |||
60 | static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val) | ||
61 | { | ||
62 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32); | ||
63 | } | ||
64 | |||
65 | static u32 get_sr(struct fsl_dma_chan *fsl_chan) | ||
66 | { | ||
67 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32); | ||
68 | } | ||
69 | |||
70 | static void set_desc_cnt(struct fsl_dma_chan *fsl_chan, | ||
71 | struct fsl_dma_ld_hw *hw, u32 count) | ||
72 | { | ||
73 | hw->count = CPU_TO_DMA(fsl_chan, count, 32); | ||
74 | } | ||
75 | |||
76 | static void set_desc_src(struct fsl_dma_chan *fsl_chan, | ||
77 | struct fsl_dma_ld_hw *hw, dma_addr_t src) | ||
78 | { | ||
79 | u64 snoop_bits; | ||
80 | |||
81 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | ||
82 | ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; | ||
83 | hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64); | ||
84 | } | ||
85 | |||
86 | static void set_desc_dest(struct fsl_dma_chan *fsl_chan, | ||
87 | struct fsl_dma_ld_hw *hw, dma_addr_t dest) | ||
88 | { | ||
89 | u64 snoop_bits; | ||
90 | |||
91 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | ||
92 | ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; | ||
93 | hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64); | ||
94 | } | ||
95 | |||
96 | static void set_desc_next(struct fsl_dma_chan *fsl_chan, | ||
97 | struct fsl_dma_ld_hw *hw, dma_addr_t next) | ||
98 | { | ||
99 | u64 snoop_bits; | ||
100 | |||
101 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) | ||
102 | ? FSL_DMA_SNEN : 0; | ||
103 | hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64); | ||
104 | } | ||
105 | |||
106 | static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) | ||
107 | { | ||
108 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64); | ||
109 | } | ||
110 | |||
111 | static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan) | ||
112 | { | ||
113 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN; | ||
114 | } | ||
115 | |||
116 | static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) | ||
117 | { | ||
118 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64); | ||
119 | } | ||
120 | |||
121 | static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan) | ||
122 | { | ||
123 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64); | ||
124 | } | ||
125 | |||
126 | static int dma_is_idle(struct fsl_dma_chan *fsl_chan) | ||
127 | { | ||
128 | u32 sr = get_sr(fsl_chan); | ||
129 | return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); | ||
130 | } | ||
131 | |||
132 | static void dma_start(struct fsl_dma_chan *fsl_chan) | ||
133 | { | ||
134 | u32 mr_set = 0;; | ||
135 | |||
136 | if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { | ||
137 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32); | ||
138 | mr_set |= FSL_DMA_MR_EMP_EN; | ||
139 | } else | ||
140 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
141 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | ||
142 | & ~FSL_DMA_MR_EMP_EN, 32); | ||
143 | |||
144 | if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT) | ||
145 | mr_set |= FSL_DMA_MR_EMS_EN; | ||
146 | else | ||
147 | mr_set |= FSL_DMA_MR_CS; | ||
148 | |||
149 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
150 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | ||
151 | | mr_set, 32); | ||
152 | } | ||
153 | |||
154 | static void dma_halt(struct fsl_dma_chan *fsl_chan) | ||
155 | { | ||
156 | int i = 0; | ||
157 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
158 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA, | ||
159 | 32); | ||
160 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
161 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS | ||
162 | | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32); | ||
163 | |||
164 | while (!dma_is_idle(fsl_chan) && (i++ < 100)) | ||
165 | udelay(10); | ||
166 | if (i >= 100 && !dma_is_idle(fsl_chan)) | ||
167 | dev_err(fsl_chan->dev, "DMA halt timeout!\n"); | ||
168 | } | ||
169 | |||
170 | static void set_ld_eol(struct fsl_dma_chan *fsl_chan, | ||
171 | struct fsl_desc_sw *desc) | ||
172 | { | ||
173 | desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, | ||
174 | DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL, | ||
175 | 64); | ||
176 | } | ||
177 | |||
178 | static void append_ld_queue(struct fsl_dma_chan *fsl_chan, | ||
179 | struct fsl_desc_sw *new_desc) | ||
180 | { | ||
181 | struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev); | ||
182 | |||
183 | if (list_empty(&fsl_chan->ld_queue)) | ||
184 | return; | ||
185 | |||
186 | /* Link to the new descriptor physical address and | ||
187 | * Enable End-of-segment interrupt for | ||
188 | * the last link descriptor. | ||
189 | * (the previous node's next link descriptor) | ||
190 | * | ||
191 | * For FSL_DMA_IP_83xx, the snoop enable bit need be set. | ||
192 | */ | ||
193 | queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, | ||
194 | new_desc->async_tx.phys | FSL_DMA_EOSIE | | ||
195 | (((fsl_chan->feature & FSL_DMA_IP_MASK) | ||
196 | == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64); | ||
197 | } | ||
198 | |||
199 | /** | ||
200 | * fsl_chan_set_src_loop_size - Set source address hold transfer size | ||
201 | * @fsl_chan : Freescale DMA channel | ||
202 | * @size : Address loop size, 0 for disable loop | ||
203 | * | ||
204 | * The set source address hold transfer size. The source | ||
205 | * address hold or loop transfer size is when the DMA transfer | ||
206 | * data from source address (SA), if the loop size is 4, the DMA will | ||
207 | * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, | ||
208 | * SA + 1 ... and so on. | ||
209 | */ | ||
210 | static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size) | ||
211 | { | ||
212 | switch (size) { | ||
213 | case 0: | ||
214 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
215 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & | ||
216 | (~FSL_DMA_MR_SAHE), 32); | ||
217 | break; | ||
218 | case 1: | ||
219 | case 2: | ||
220 | case 4: | ||
221 | case 8: | ||
222 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
223 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | | ||
224 | FSL_DMA_MR_SAHE | (__ilog2(size) << 14), | ||
225 | 32); | ||
226 | break; | ||
227 | } | ||
228 | } | ||
229 | |||
230 | /** | ||
231 | * fsl_chan_set_dest_loop_size - Set destination address hold transfer size | ||
232 | * @fsl_chan : Freescale DMA channel | ||
233 | * @size : Address loop size, 0 for disable loop | ||
234 | * | ||
235 | * The set destination address hold transfer size. The destination | ||
236 | * address hold or loop transfer size is when the DMA transfer | ||
237 | * data to destination address (TA), if the loop size is 4, the DMA will | ||
238 | * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, | ||
239 | * TA + 1 ... and so on. | ||
240 | */ | ||
241 | static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) | ||
242 | { | ||
243 | switch (size) { | ||
244 | case 0: | ||
245 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
246 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & | ||
247 | (~FSL_DMA_MR_DAHE), 32); | ||
248 | break; | ||
249 | case 1: | ||
250 | case 2: | ||
251 | case 4: | ||
252 | case 8: | ||
253 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
254 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | | ||
255 | FSL_DMA_MR_DAHE | (__ilog2(size) << 16), | ||
256 | 32); | ||
257 | break; | ||
258 | } | ||
259 | } | ||
260 | |||
261 | /** | ||
262 | * fsl_chan_toggle_ext_pause - Toggle channel external pause status | ||
263 | * @fsl_chan : Freescale DMA channel | ||
264 | * @size : Pause control size, 0 for disable external pause control. | ||
265 | * The maximum is 1024. | ||
266 | * | ||
267 | * The Freescale DMA channel can be controlled by the external | ||
268 | * signal DREQ#. The pause control size is how many bytes are allowed | ||
269 | * to transfer before pausing the channel, after which a new assertion | ||
270 | * of DREQ# resumes channel operation. | ||
271 | */ | ||
272 | static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int size) | ||
273 | { | ||
274 | if (size > 1024) | ||
275 | return; | ||
276 | |||
277 | if (size) { | ||
278 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
279 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | ||
280 | | ((__ilog2(size) << 24) & 0x0f000000), | ||
281 | 32); | ||
282 | fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; | ||
283 | } else | ||
284 | fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; | ||
285 | } | ||
286 | |||
287 | /** | ||
288 | * fsl_chan_toggle_ext_start - Toggle channel external start status | ||
289 | * @fsl_chan : Freescale DMA channel | ||
290 | * @enable : 0 is disabled, 1 is enabled. | ||
291 | * | ||
292 | * If enable the external start, the channel can be started by an | ||
293 | * external DMA start pin. So the dma_start() does not start the | ||
294 | * transfer immediately. The DMA channel will wait for the | ||
295 | * control pin asserted. | ||
296 | */ | ||
297 | static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) | ||
298 | { | ||
299 | if (enable) | ||
300 | fsl_chan->feature |= FSL_DMA_CHAN_START_EXT; | ||
301 | else | ||
302 | fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT; | ||
303 | } | ||
304 | |||
305 | static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
306 | { | ||
307 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); | ||
308 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); | ||
309 | unsigned long flags; | ||
310 | dma_cookie_t cookie; | ||
311 | |||
312 | /* cookie increment and adding to ld_queue must be atomic */ | ||
313 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
314 | |||
315 | cookie = fsl_chan->common.cookie; | ||
316 | cookie++; | ||
317 | if (cookie < 0) | ||
318 | cookie = 1; | ||
319 | desc->async_tx.cookie = cookie; | ||
320 | fsl_chan->common.cookie = desc->async_tx.cookie; | ||
321 | |||
322 | append_ld_queue(fsl_chan, desc); | ||
323 | list_splice_init(&desc->async_tx.tx_list, fsl_chan->ld_queue.prev); | ||
324 | |||
325 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
326 | |||
327 | return cookie; | ||
328 | } | ||
329 | |||
330 | /** | ||
331 | * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. | ||
332 | * @fsl_chan : Freescale DMA channel | ||
333 | * | ||
334 | * Return - The descriptor allocated. NULL for failed. | ||
335 | */ | ||
336 | static struct fsl_desc_sw *fsl_dma_alloc_descriptor( | ||
337 | struct fsl_dma_chan *fsl_chan) | ||
338 | { | ||
339 | dma_addr_t pdesc; | ||
340 | struct fsl_desc_sw *desc_sw; | ||
341 | |||
342 | desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); | ||
343 | if (desc_sw) { | ||
344 | memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); | ||
345 | dma_async_tx_descriptor_init(&desc_sw->async_tx, | ||
346 | &fsl_chan->common); | ||
347 | desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; | ||
348 | INIT_LIST_HEAD(&desc_sw->async_tx.tx_list); | ||
349 | desc_sw->async_tx.phys = pdesc; | ||
350 | } | ||
351 | |||
352 | return desc_sw; | ||
353 | } | ||
354 | |||
355 | |||
356 | /** | ||
357 | * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. | ||
358 | * @fsl_chan : Freescale DMA channel | ||
359 | * | ||
360 | * This function will create a dma pool for descriptor allocation. | ||
361 | * | ||
362 | * Return - The number of descriptors allocated. | ||
363 | */ | ||
364 | static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) | ||
365 | { | ||
366 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | ||
367 | LIST_HEAD(tmp_list); | ||
368 | |||
369 | /* We need the descriptor to be aligned to 32bytes | ||
370 | * for meeting FSL DMA specification requirement. | ||
371 | */ | ||
372 | fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", | ||
373 | fsl_chan->dev, sizeof(struct fsl_desc_sw), | ||
374 | 32, 0); | ||
375 | if (!fsl_chan->desc_pool) { | ||
376 | dev_err(fsl_chan->dev, "No memory for channel %d " | ||
377 | "descriptor dma pool.\n", fsl_chan->id); | ||
378 | return 0; | ||
379 | } | ||
380 | |||
381 | return 1; | ||
382 | } | ||
383 | |||
384 | /** | ||
385 | * fsl_dma_free_chan_resources - Free all resources of the channel. | ||
386 | * @fsl_chan : Freescale DMA channel | ||
387 | */ | ||
388 | static void fsl_dma_free_chan_resources(struct dma_chan *chan) | ||
389 | { | ||
390 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | ||
391 | struct fsl_desc_sw *desc, *_desc; | ||
392 | unsigned long flags; | ||
393 | |||
394 | dev_dbg(fsl_chan->dev, "Free all channel resources.\n"); | ||
395 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
396 | list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { | ||
397 | #ifdef FSL_DMA_LD_DEBUG | ||
398 | dev_dbg(fsl_chan->dev, | ||
399 | "LD %p will be released.\n", desc); | ||
400 | #endif | ||
401 | list_del(&desc->node); | ||
402 | /* free link descriptor */ | ||
403 | dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); | ||
404 | } | ||
405 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
406 | dma_pool_destroy(fsl_chan->desc_pool); | ||
407 | } | ||
408 | |||
409 | static struct dma_async_tx_descriptor * | ||
410 | fsl_dma_prep_interrupt(struct dma_chan *chan) | ||
411 | { | ||
412 | struct fsl_dma_chan *fsl_chan; | ||
413 | struct fsl_desc_sw *new; | ||
414 | |||
415 | if (!chan) | ||
416 | return NULL; | ||
417 | |||
418 | fsl_chan = to_fsl_chan(chan); | ||
419 | |||
420 | new = fsl_dma_alloc_descriptor(fsl_chan); | ||
421 | if (!new) { | ||
422 | dev_err(fsl_chan->dev, "No free memory for link descriptor\n"); | ||
423 | return NULL; | ||
424 | } | ||
425 | |||
426 | new->async_tx.cookie = -EBUSY; | ||
427 | new->async_tx.ack = 0; | ||
428 | |||
429 | /* Set End-of-link to the last link descriptor of new list*/ | ||
430 | set_ld_eol(fsl_chan, new); | ||
431 | |||
432 | return &new->async_tx; | ||
433 | } | ||
434 | |||
435 | static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | ||
436 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | ||
437 | size_t len, unsigned long flags) | ||
438 | { | ||
439 | struct fsl_dma_chan *fsl_chan; | ||
440 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new; | ||
441 | size_t copy; | ||
442 | LIST_HEAD(link_chain); | ||
443 | |||
444 | if (!chan) | ||
445 | return NULL; | ||
446 | |||
447 | if (!len) | ||
448 | return NULL; | ||
449 | |||
450 | fsl_chan = to_fsl_chan(chan); | ||
451 | |||
452 | do { | ||
453 | |||
454 | /* Allocate the link descriptor from DMA pool */ | ||
455 | new = fsl_dma_alloc_descriptor(fsl_chan); | ||
456 | if (!new) { | ||
457 | dev_err(fsl_chan->dev, | ||
458 | "No free memory for link descriptor\n"); | ||
459 | return NULL; | ||
460 | } | ||
461 | #ifdef FSL_DMA_LD_DEBUG | ||
462 | dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); | ||
463 | #endif | ||
464 | |||
465 | copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); | ||
466 | |||
467 | set_desc_cnt(fsl_chan, &new->hw, copy); | ||
468 | set_desc_src(fsl_chan, &new->hw, dma_src); | ||
469 | set_desc_dest(fsl_chan, &new->hw, dma_dest); | ||
470 | |||
471 | if (!first) | ||
472 | first = new; | ||
473 | else | ||
474 | set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys); | ||
475 | |||
476 | new->async_tx.cookie = 0; | ||
477 | new->async_tx.ack = 1; | ||
478 | |||
479 | prev = new; | ||
480 | len -= copy; | ||
481 | dma_src += copy; | ||
482 | dma_dest += copy; | ||
483 | |||
484 | /* Insert the link descriptor to the LD ring */ | ||
485 | list_add_tail(&new->node, &first->async_tx.tx_list); | ||
486 | } while (len); | ||
487 | |||
488 | new->async_tx.ack = 0; /* client is in control of this ack */ | ||
489 | new->async_tx.cookie = -EBUSY; | ||
490 | |||
491 | /* Set End-of-link to the last link descriptor of new list*/ | ||
492 | set_ld_eol(fsl_chan, new); | ||
493 | |||
494 | return first ? &first->async_tx : NULL; | ||
495 | } | ||
496 | |||
497 | /** | ||
498 | * fsl_dma_update_completed_cookie - Update the completed cookie. | ||
499 | * @fsl_chan : Freescale DMA channel | ||
500 | */ | ||
501 | static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan) | ||
502 | { | ||
503 | struct fsl_desc_sw *cur_desc, *desc; | ||
504 | dma_addr_t ld_phy; | ||
505 | |||
506 | ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK; | ||
507 | |||
508 | if (ld_phy) { | ||
509 | cur_desc = NULL; | ||
510 | list_for_each_entry(desc, &fsl_chan->ld_queue, node) | ||
511 | if (desc->async_tx.phys == ld_phy) { | ||
512 | cur_desc = desc; | ||
513 | break; | ||
514 | } | ||
515 | |||
516 | if (cur_desc && cur_desc->async_tx.cookie) { | ||
517 | if (dma_is_idle(fsl_chan)) | ||
518 | fsl_chan->completed_cookie = | ||
519 | cur_desc->async_tx.cookie; | ||
520 | else | ||
521 | fsl_chan->completed_cookie = | ||
522 | cur_desc->async_tx.cookie - 1; | ||
523 | } | ||
524 | } | ||
525 | } | ||
526 | |||
527 | /** | ||
528 | * fsl_chan_ld_cleanup - Clean up link descriptors | ||
529 | * @fsl_chan : Freescale DMA channel | ||
530 | * | ||
531 | * This function clean up the ld_queue of DMA channel. | ||
532 | * If 'in_intr' is set, the function will move the link descriptor to | ||
533 | * the recycle list. Otherwise, free it directly. | ||
534 | */ | ||
535 | static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan) | ||
536 | { | ||
537 | struct fsl_desc_sw *desc, *_desc; | ||
538 | unsigned long flags; | ||
539 | |||
540 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
541 | |||
542 | dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n", | ||
543 | fsl_chan->completed_cookie); | ||
544 | list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { | ||
545 | dma_async_tx_callback callback; | ||
546 | void *callback_param; | ||
547 | |||
548 | if (dma_async_is_complete(desc->async_tx.cookie, | ||
549 | fsl_chan->completed_cookie, fsl_chan->common.cookie) | ||
550 | == DMA_IN_PROGRESS) | ||
551 | break; | ||
552 | |||
553 | callback = desc->async_tx.callback; | ||
554 | callback_param = desc->async_tx.callback_param; | ||
555 | |||
556 | /* Remove from ld_queue list */ | ||
557 | list_del(&desc->node); | ||
558 | |||
559 | dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n", | ||
560 | desc); | ||
561 | dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); | ||
562 | |||
563 | /* Run the link descriptor callback function */ | ||
564 | if (callback) { | ||
565 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
566 | dev_dbg(fsl_chan->dev, "link descriptor %p callback\n", | ||
567 | desc); | ||
568 | callback(callback_param); | ||
569 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
570 | } | ||
571 | } | ||
572 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
573 | } | ||
574 | |||
575 | /** | ||
576 | * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue. | ||
577 | * @fsl_chan : Freescale DMA channel | ||
578 | */ | ||
579 | static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) | ||
580 | { | ||
581 | struct list_head *ld_node; | ||
582 | dma_addr_t next_dest_addr; | ||
583 | unsigned long flags; | ||
584 | |||
585 | if (!dma_is_idle(fsl_chan)) | ||
586 | return; | ||
587 | |||
588 | dma_halt(fsl_chan); | ||
589 | |||
590 | /* If there are some link descriptors | ||
591 | * not transfered in queue. We need to start it. | ||
592 | */ | ||
593 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
594 | |||
595 | /* Find the first un-transfer desciptor */ | ||
596 | for (ld_node = fsl_chan->ld_queue.next; | ||
597 | (ld_node != &fsl_chan->ld_queue) | ||
598 | && (dma_async_is_complete( | ||
599 | to_fsl_desc(ld_node)->async_tx.cookie, | ||
600 | fsl_chan->completed_cookie, | ||
601 | fsl_chan->common.cookie) == DMA_SUCCESS); | ||
602 | ld_node = ld_node->next); | ||
603 | |||
604 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
605 | |||
606 | if (ld_node != &fsl_chan->ld_queue) { | ||
607 | /* Get the ld start address from ld_queue */ | ||
608 | next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; | ||
609 | dev_dbg(fsl_chan->dev, "xfer LDs staring from %p\n", | ||
610 | (void *)next_dest_addr); | ||
611 | set_cdar(fsl_chan, next_dest_addr); | ||
612 | dma_start(fsl_chan); | ||
613 | } else { | ||
614 | set_cdar(fsl_chan, 0); | ||
615 | set_ndar(fsl_chan, 0); | ||
616 | } | ||
617 | } | ||
618 | |||
619 | /** | ||
620 | * fsl_dma_memcpy_issue_pending - Issue the DMA start command | ||
621 | * @fsl_chan : Freescale DMA channel | ||
622 | */ | ||
623 | static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan) | ||
624 | { | ||
625 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | ||
626 | |||
627 | #ifdef FSL_DMA_LD_DEBUG | ||
628 | struct fsl_desc_sw *ld; | ||
629 | unsigned long flags; | ||
630 | |||
631 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
632 | if (list_empty(&fsl_chan->ld_queue)) { | ||
633 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
634 | return; | ||
635 | } | ||
636 | |||
637 | dev_dbg(fsl_chan->dev, "--memcpy issue--\n"); | ||
638 | list_for_each_entry(ld, &fsl_chan->ld_queue, node) { | ||
639 | int i; | ||
640 | dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n", | ||
641 | fsl_chan->id, ld->async_tx.phys); | ||
642 | for (i = 0; i < 8; i++) | ||
643 | dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n", | ||
644 | i, *(((u32 *)&ld->hw) + i)); | ||
645 | } | ||
646 | dev_dbg(fsl_chan->dev, "----------------\n"); | ||
647 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
648 | #endif | ||
649 | |||
650 | fsl_chan_xfer_ld_queue(fsl_chan); | ||
651 | } | ||
652 | |||
653 | static void fsl_dma_dependency_added(struct dma_chan *chan) | ||
654 | { | ||
655 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | ||
656 | |||
657 | fsl_chan_ld_cleanup(fsl_chan); | ||
658 | } | ||
659 | |||
660 | /** | ||
661 | * fsl_dma_is_complete - Determine the DMA status | ||
662 | * @fsl_chan : Freescale DMA channel | ||
663 | */ | ||
664 | static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, | ||
665 | dma_cookie_t cookie, | ||
666 | dma_cookie_t *done, | ||
667 | dma_cookie_t *used) | ||
668 | { | ||
669 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | ||
670 | dma_cookie_t last_used; | ||
671 | dma_cookie_t last_complete; | ||
672 | |||
673 | fsl_chan_ld_cleanup(fsl_chan); | ||
674 | |||
675 | last_used = chan->cookie; | ||
676 | last_complete = fsl_chan->completed_cookie; | ||
677 | |||
678 | if (done) | ||
679 | *done = last_complete; | ||
680 | |||
681 | if (used) | ||
682 | *used = last_used; | ||
683 | |||
684 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
685 | } | ||
686 | |||
687 | static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) | ||
688 | { | ||
689 | struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; | ||
690 | u32 stat; | ||
691 | |||
692 | stat = get_sr(fsl_chan); | ||
693 | dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n", | ||
694 | fsl_chan->id, stat); | ||
695 | set_sr(fsl_chan, stat); /* Clear the event register */ | ||
696 | |||
697 | stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); | ||
698 | if (!stat) | ||
699 | return IRQ_NONE; | ||
700 | |||
701 | if (stat & FSL_DMA_SR_TE) | ||
702 | dev_err(fsl_chan->dev, "Transfer Error!\n"); | ||
703 | |||
704 | /* If the link descriptor segment transfer finishes, | ||
705 | * we will recycle the used descriptor. | ||
706 | */ | ||
707 | if (stat & FSL_DMA_SR_EOSI) { | ||
708 | dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); | ||
709 | dev_dbg(fsl_chan->dev, "event: clndar %p, nlndar %p\n", | ||
710 | (void *)get_cdar(fsl_chan), (void *)get_ndar(fsl_chan)); | ||
711 | stat &= ~FSL_DMA_SR_EOSI; | ||
712 | fsl_dma_update_completed_cookie(fsl_chan); | ||
713 | } | ||
714 | |||
715 | /* If it current transfer is the end-of-transfer, | ||
716 | * we should clear the Channel Start bit for | ||
717 | * prepare next transfer. | ||
718 | */ | ||
719 | if (stat & (FSL_DMA_SR_EOLNI | FSL_DMA_SR_EOCDI)) { | ||
720 | dev_dbg(fsl_chan->dev, "event: End-of-link INT\n"); | ||
721 | stat &= ~FSL_DMA_SR_EOLNI; | ||
722 | fsl_chan_xfer_ld_queue(fsl_chan); | ||
723 | } | ||
724 | |||
725 | if (stat) | ||
726 | dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n", | ||
727 | stat); | ||
728 | |||
729 | dev_dbg(fsl_chan->dev, "event: Exit\n"); | ||
730 | tasklet_schedule(&fsl_chan->tasklet); | ||
731 | return IRQ_HANDLED; | ||
732 | } | ||
733 | |||
734 | static irqreturn_t fsl_dma_do_interrupt(int irq, void *data) | ||
735 | { | ||
736 | struct fsl_dma_device *fdev = (struct fsl_dma_device *)data; | ||
737 | u32 gsr; | ||
738 | int ch_nr; | ||
739 | |||
740 | gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base) | ||
741 | : in_le32(fdev->reg_base); | ||
742 | ch_nr = (32 - ffs(gsr)) / 8; | ||
743 | |||
744 | return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq, | ||
745 | fdev->chan[ch_nr]) : IRQ_NONE; | ||
746 | } | ||
747 | |||
748 | static void dma_do_tasklet(unsigned long data) | ||
749 | { | ||
750 | struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; | ||
751 | fsl_chan_ld_cleanup(fsl_chan); | ||
752 | } | ||
753 | |||
754 | #ifdef FSL_DMA_CALLBACKTEST | ||
755 | static void fsl_dma_callback_test(struct fsl_dma_chan *fsl_chan) | ||
756 | { | ||
757 | if (fsl_chan) | ||
758 | dev_info(fsl_chan->dev, "selftest: callback is ok!\n"); | ||
759 | } | ||
760 | #endif | ||
761 | |||
762 | #ifdef CONFIG_FSL_DMA_SELFTEST | ||
763 | static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) | ||
764 | { | ||
765 | struct dma_chan *chan; | ||
766 | int err = 0; | ||
767 | dma_addr_t dma_dest, dma_src; | ||
768 | dma_cookie_t cookie; | ||
769 | u8 *src, *dest; | ||
770 | int i; | ||
771 | size_t test_size; | ||
772 | struct dma_async_tx_descriptor *tx1, *tx2, *tx3; | ||
773 | |||
774 | test_size = 4096; | ||
775 | |||
776 | src = kmalloc(test_size * 2, GFP_KERNEL); | ||
777 | if (!src) { | ||
778 | dev_err(fsl_chan->dev, | ||
779 | "selftest: Cannot alloc memory for test!\n"); | ||
780 | err = -ENOMEM; | ||
781 | goto out; | ||
782 | } | ||
783 | |||
784 | dest = src + test_size; | ||
785 | |||
786 | for (i = 0; i < test_size; i++) | ||
787 | src[i] = (u8) i; | ||
788 | |||
789 | chan = &fsl_chan->common; | ||
790 | |||
791 | if (fsl_dma_alloc_chan_resources(chan) < 1) { | ||
792 | dev_err(fsl_chan->dev, | ||
793 | "selftest: Cannot alloc resources for DMA\n"); | ||
794 | err = -ENODEV; | ||
795 | goto out; | ||
796 | } | ||
797 | |||
798 | /* TX 1 */ | ||
799 | dma_src = dma_map_single(fsl_chan->dev, src, test_size / 2, | ||
800 | DMA_TO_DEVICE); | ||
801 | dma_dest = dma_map_single(fsl_chan->dev, dest, test_size / 2, | ||
802 | DMA_FROM_DEVICE); | ||
803 | tx1 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 2, 0); | ||
804 | async_tx_ack(tx1); | ||
805 | |||
806 | cookie = fsl_dma_tx_submit(tx1); | ||
807 | fsl_dma_memcpy_issue_pending(chan); | ||
808 | msleep(2); | ||
809 | |||
810 | if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) { | ||
811 | dev_err(fsl_chan->dev, "selftest: Time out!\n"); | ||
812 | err = -ENODEV; | ||
813 | goto out; | ||
814 | } | ||
815 | |||
816 | /* Test free and re-alloc channel resources */ | ||
817 | fsl_dma_free_chan_resources(chan); | ||
818 | |||
819 | if (fsl_dma_alloc_chan_resources(chan) < 1) { | ||
820 | dev_err(fsl_chan->dev, | ||
821 | "selftest: Cannot alloc resources for DMA\n"); | ||
822 | err = -ENODEV; | ||
823 | goto free_resources; | ||
824 | } | ||
825 | |||
826 | /* Continue to test | ||
827 | * TX 2 | ||
828 | */ | ||
829 | dma_src = dma_map_single(fsl_chan->dev, src + test_size / 2, | ||
830 | test_size / 4, DMA_TO_DEVICE); | ||
831 | dma_dest = dma_map_single(fsl_chan->dev, dest + test_size / 2, | ||
832 | test_size / 4, DMA_FROM_DEVICE); | ||
833 | tx2 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0); | ||
834 | async_tx_ack(tx2); | ||
835 | |||
836 | /* TX 3 */ | ||
837 | dma_src = dma_map_single(fsl_chan->dev, src + test_size * 3 / 4, | ||
838 | test_size / 4, DMA_TO_DEVICE); | ||
839 | dma_dest = dma_map_single(fsl_chan->dev, dest + test_size * 3 / 4, | ||
840 | test_size / 4, DMA_FROM_DEVICE); | ||
841 | tx3 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0); | ||
842 | async_tx_ack(tx3); | ||
843 | |||
844 | /* Test exchanging the prepared tx sort */ | ||
845 | cookie = fsl_dma_tx_submit(tx3); | ||
846 | cookie = fsl_dma_tx_submit(tx2); | ||
847 | |||
848 | #ifdef FSL_DMA_CALLBACKTEST | ||
849 | if (dma_has_cap(DMA_INTERRUPT, ((struct fsl_dma_device *) | ||
850 | dev_get_drvdata(fsl_chan->dev->parent))->common.cap_mask)) { | ||
851 | tx3->callback = fsl_dma_callback_test; | ||
852 | tx3->callback_param = fsl_chan; | ||
853 | } | ||
854 | #endif | ||
855 | fsl_dma_memcpy_issue_pending(chan); | ||
856 | msleep(2); | ||
857 | |||
858 | if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) { | ||
859 | dev_err(fsl_chan->dev, "selftest: Time out!\n"); | ||
860 | err = -ENODEV; | ||
861 | goto free_resources; | ||
862 | } | ||
863 | |||
864 | err = memcmp(src, dest, test_size); | ||
865 | if (err) { | ||
866 | for (i = 0; (*(src + i) == *(dest + i)) && (i < test_size); | ||
867 | i++); | ||
868 | dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%ld is " | ||
869 | "error! src 0x%x, dest 0x%x\n", | ||
870 | i, (long)test_size, *(src + i), *(dest + i)); | ||
871 | } | ||
872 | |||
873 | free_resources: | ||
874 | fsl_dma_free_chan_resources(chan); | ||
875 | out: | ||
876 | kfree(src); | ||
877 | return err; | ||
878 | } | ||
879 | #endif | ||
880 | |||
881 | static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, | ||
882 | const struct of_device_id *match) | ||
883 | { | ||
884 | struct fsl_dma_device *fdev; | ||
885 | struct fsl_dma_chan *new_fsl_chan; | ||
886 | int err; | ||
887 | |||
888 | fdev = dev_get_drvdata(dev->dev.parent); | ||
889 | BUG_ON(!fdev); | ||
890 | |||
891 | /* alloc channel */ | ||
892 | new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL); | ||
893 | if (!new_fsl_chan) { | ||
894 | dev_err(&dev->dev, "No free memory for allocating " | ||
895 | "dma channels!\n"); | ||
896 | err = -ENOMEM; | ||
897 | goto err; | ||
898 | } | ||
899 | |||
900 | /* get dma channel register base */ | ||
901 | err = of_address_to_resource(dev->node, 0, &new_fsl_chan->reg); | ||
902 | if (err) { | ||
903 | dev_err(&dev->dev, "Can't get %s property 'reg'\n", | ||
904 | dev->node->full_name); | ||
905 | goto err; | ||
906 | } | ||
907 | |||
908 | new_fsl_chan->feature = *(u32 *)match->data; | ||
909 | |||
910 | if (!fdev->feature) | ||
911 | fdev->feature = new_fsl_chan->feature; | ||
912 | |||
913 | /* If the DMA device's feature is different than its channels', | ||
914 | * report the bug. | ||
915 | */ | ||
916 | WARN_ON(fdev->feature != new_fsl_chan->feature); | ||
917 | |||
918 | new_fsl_chan->dev = &dev->dev; | ||
919 | new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, | ||
920 | new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); | ||
921 | |||
922 | new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; | ||
923 | if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) { | ||
924 | dev_err(&dev->dev, "There is no %d channel!\n", | ||
925 | new_fsl_chan->id); | ||
926 | err = -EINVAL; | ||
927 | goto err; | ||
928 | } | ||
929 | fdev->chan[new_fsl_chan->id] = new_fsl_chan; | ||
930 | tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet, | ||
931 | (unsigned long)new_fsl_chan); | ||
932 | |||
933 | /* Init the channel */ | ||
934 | dma_init(new_fsl_chan); | ||
935 | |||
936 | /* Clear cdar registers */ | ||
937 | set_cdar(new_fsl_chan, 0); | ||
938 | |||
939 | switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) { | ||
940 | case FSL_DMA_IP_85XX: | ||
941 | new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; | ||
942 | new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; | ||
943 | case FSL_DMA_IP_83XX: | ||
944 | new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; | ||
945 | new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; | ||
946 | } | ||
947 | |||
948 | spin_lock_init(&new_fsl_chan->desc_lock); | ||
949 | INIT_LIST_HEAD(&new_fsl_chan->ld_queue); | ||
950 | |||
951 | new_fsl_chan->common.device = &fdev->common; | ||
952 | |||
953 | /* Add the channel to DMA device channel list */ | ||
954 | list_add_tail(&new_fsl_chan->common.device_node, | ||
955 | &fdev->common.channels); | ||
956 | fdev->common.chancnt++; | ||
957 | |||
958 | new_fsl_chan->irq = irq_of_parse_and_map(dev->node, 0); | ||
959 | if (new_fsl_chan->irq != NO_IRQ) { | ||
960 | err = request_irq(new_fsl_chan->irq, | ||
961 | &fsl_dma_chan_do_interrupt, IRQF_SHARED, | ||
962 | "fsldma-channel", new_fsl_chan); | ||
963 | if (err) { | ||
964 | dev_err(&dev->dev, "DMA channel %s request_irq error " | ||
965 | "with return %d\n", dev->node->full_name, err); | ||
966 | goto err; | ||
967 | } | ||
968 | } | ||
969 | |||
970 | #ifdef CONFIG_FSL_DMA_SELFTEST | ||
971 | err = fsl_dma_self_test(new_fsl_chan); | ||
972 | if (err) | ||
973 | goto err; | ||
974 | #endif | ||
975 | |||
976 | dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, | ||
977 | match->compatible, new_fsl_chan->irq); | ||
978 | |||
979 | return 0; | ||
980 | err: | ||
981 | dma_halt(new_fsl_chan); | ||
982 | iounmap(new_fsl_chan->reg_base); | ||
983 | free_irq(new_fsl_chan->irq, new_fsl_chan); | ||
984 | list_del(&new_fsl_chan->common.device_node); | ||
985 | kfree(new_fsl_chan); | ||
986 | return err; | ||
987 | } | ||
988 | |||
989 | const u32 mpc8540_dma_ip_feature = FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN; | ||
990 | const u32 mpc8349_dma_ip_feature = FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN; | ||
991 | |||
992 | static struct of_device_id of_fsl_dma_chan_ids[] = { | ||
993 | { | ||
994 | .compatible = "fsl,mpc8540-dma-channel", | ||
995 | .data = (void *)&mpc8540_dma_ip_feature, | ||
996 | }, | ||
997 | { | ||
998 | .compatible = "fsl,mpc8349-dma-channel", | ||
999 | .data = (void *)&mpc8349_dma_ip_feature, | ||
1000 | }, | ||
1001 | {} | ||
1002 | }; | ||
1003 | |||
1004 | static struct of_platform_driver of_fsl_dma_chan_driver = { | ||
1005 | .name = "of-fsl-dma-channel", | ||
1006 | .match_table = of_fsl_dma_chan_ids, | ||
1007 | .probe = of_fsl_dma_chan_probe, | ||
1008 | }; | ||
1009 | |||
1010 | static __init int of_fsl_dma_chan_init(void) | ||
1011 | { | ||
1012 | return of_register_platform_driver(&of_fsl_dma_chan_driver); | ||
1013 | } | ||
1014 | |||
1015 | static int __devinit of_fsl_dma_probe(struct of_device *dev, | ||
1016 | const struct of_device_id *match) | ||
1017 | { | ||
1018 | int err; | ||
1019 | unsigned int irq; | ||
1020 | struct fsl_dma_device *fdev; | ||
1021 | |||
1022 | fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); | ||
1023 | if (!fdev) { | ||
1024 | dev_err(&dev->dev, "No enough memory for 'priv'\n"); | ||
1025 | err = -ENOMEM; | ||
1026 | goto err; | ||
1027 | } | ||
1028 | fdev->dev = &dev->dev; | ||
1029 | INIT_LIST_HEAD(&fdev->common.channels); | ||
1030 | |||
1031 | /* get DMA controller register base */ | ||
1032 | err = of_address_to_resource(dev->node, 0, &fdev->reg); | ||
1033 | if (err) { | ||
1034 | dev_err(&dev->dev, "Can't get %s property 'reg'\n", | ||
1035 | dev->node->full_name); | ||
1036 | goto err; | ||
1037 | } | ||
1038 | |||
1039 | dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " | ||
1040 | "controller at %p...\n", | ||
1041 | match->compatible, (void *)fdev->reg.start); | ||
1042 | fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end | ||
1043 | - fdev->reg.start + 1); | ||
1044 | |||
1045 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); | ||
1046 | dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); | ||
1047 | fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; | ||
1048 | fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; | ||
1049 | fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; | ||
1050 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; | ||
1051 | fdev->common.device_is_tx_complete = fsl_dma_is_complete; | ||
1052 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; | ||
1053 | fdev->common.device_dependency_added = fsl_dma_dependency_added; | ||
1054 | fdev->common.dev = &dev->dev; | ||
1055 | |||
1056 | irq = irq_of_parse_and_map(dev->node, 0); | ||
1057 | if (irq != NO_IRQ) { | ||
1058 | err = request_irq(irq, &fsl_dma_do_interrupt, IRQF_SHARED, | ||
1059 | "fsldma-device", fdev); | ||
1060 | if (err) { | ||
1061 | dev_err(&dev->dev, "DMA device request_irq error " | ||
1062 | "with return %d\n", err); | ||
1063 | goto err; | ||
1064 | } | ||
1065 | } | ||
1066 | |||
1067 | dev_set_drvdata(&(dev->dev), fdev); | ||
1068 | of_platform_bus_probe(dev->node, of_fsl_dma_chan_ids, &dev->dev); | ||
1069 | |||
1070 | dma_async_device_register(&fdev->common); | ||
1071 | return 0; | ||
1072 | |||
1073 | err: | ||
1074 | iounmap(fdev->reg_base); | ||
1075 | kfree(fdev); | ||
1076 | return err; | ||
1077 | } | ||
1078 | |||
1079 | static struct of_device_id of_fsl_dma_ids[] = { | ||
1080 | { .compatible = "fsl,mpc8540-dma", }, | ||
1081 | { .compatible = "fsl,mpc8349-dma", }, | ||
1082 | {} | ||
1083 | }; | ||
1084 | |||
1085 | static struct of_platform_driver of_fsl_dma_driver = { | ||
1086 | .name = "of-fsl-dma", | ||
1087 | .match_table = of_fsl_dma_ids, | ||
1088 | .probe = of_fsl_dma_probe, | ||
1089 | }; | ||
1090 | |||
1091 | static __init int of_fsl_dma_init(void) | ||
1092 | { | ||
1093 | return of_register_platform_driver(&of_fsl_dma_driver); | ||
1094 | } | ||
1095 | |||
1096 | subsys_initcall(of_fsl_dma_chan_init); | ||
1097 | subsys_initcall(of_fsl_dma_init); | ||
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h new file mode 100644 index 000000000000..ba78c42121ba --- /dev/null +++ b/drivers/dma/fsldma.h | |||
@@ -0,0 +1,189 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | ||
3 | * | ||
4 | * Author: | ||
5 | * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 | ||
6 | * Ebony Zhu <ebony.zhu@freescale.com>, May 2007 | ||
7 | * | ||
8 | * This is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | */ | ||
14 | #ifndef __DMA_FSLDMA_H | ||
15 | #define __DMA_FSLDMA_H | ||
16 | |||
17 | #include <linux/device.h> | ||
18 | #include <linux/dmapool.h> | ||
19 | #include <linux/dmaengine.h> | ||
20 | |||
21 | /* Define data structures needed by Freescale | ||
22 | * MPC8540 and MPC8349 DMA controller. | ||
23 | */ | ||
24 | #define FSL_DMA_MR_CS 0x00000001 | ||
25 | #define FSL_DMA_MR_CC 0x00000002 | ||
26 | #define FSL_DMA_MR_CA 0x00000008 | ||
27 | #define FSL_DMA_MR_EIE 0x00000040 | ||
28 | #define FSL_DMA_MR_XFE 0x00000020 | ||
29 | #define FSL_DMA_MR_EOLNIE 0x00000100 | ||
30 | #define FSL_DMA_MR_EOLSIE 0x00000080 | ||
31 | #define FSL_DMA_MR_EOSIE 0x00000200 | ||
32 | #define FSL_DMA_MR_CDSM 0x00000010 | ||
33 | #define FSL_DMA_MR_CTM 0x00000004 | ||
34 | #define FSL_DMA_MR_EMP_EN 0x00200000 | ||
35 | #define FSL_DMA_MR_EMS_EN 0x00040000 | ||
36 | #define FSL_DMA_MR_DAHE 0x00002000 | ||
37 | #define FSL_DMA_MR_SAHE 0x00001000 | ||
38 | |||
39 | /* Special MR definition for MPC8349 */ | ||
40 | #define FSL_DMA_MR_EOTIE 0x00000080 | ||
41 | |||
42 | #define FSL_DMA_SR_CH 0x00000020 | ||
43 | #define FSL_DMA_SR_CB 0x00000004 | ||
44 | #define FSL_DMA_SR_TE 0x00000080 | ||
45 | #define FSL_DMA_SR_EOSI 0x00000002 | ||
46 | #define FSL_DMA_SR_EOLSI 0x00000001 | ||
47 | #define FSL_DMA_SR_EOCDI 0x00000001 | ||
48 | #define FSL_DMA_SR_EOLNI 0x00000008 | ||
49 | |||
50 | #define FSL_DMA_SATR_SBPATMU 0x20000000 | ||
51 | #define FSL_DMA_SATR_STRANSINT_RIO 0x00c00000 | ||
52 | #define FSL_DMA_SATR_SREADTYPE_SNOOP_READ 0x00050000 | ||
53 | #define FSL_DMA_SATR_SREADTYPE_BP_IORH 0x00020000 | ||
54 | #define FSL_DMA_SATR_SREADTYPE_BP_NREAD 0x00040000 | ||
55 | #define FSL_DMA_SATR_SREADTYPE_BP_MREAD 0x00070000 | ||
56 | |||
57 | #define FSL_DMA_DATR_DBPATMU 0x20000000 | ||
58 | #define FSL_DMA_DATR_DTRANSINT_RIO 0x00c00000 | ||
59 | #define FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE 0x00050000 | ||
60 | #define FSL_DMA_DATR_DWRITETYPE_BP_FLUSH 0x00010000 | ||
61 | |||
62 | #define FSL_DMA_EOL ((u64)0x1) | ||
63 | #define FSL_DMA_SNEN ((u64)0x10) | ||
64 | #define FSL_DMA_EOSIE 0x8 | ||
65 | #define FSL_DMA_NLDA_MASK (~(u64)0x1f) | ||
66 | |||
67 | #define FSL_DMA_BCR_MAX_CNT 0x03ffffffu | ||
68 | |||
69 | #define FSL_DMA_DGSR_TE 0x80 | ||
70 | #define FSL_DMA_DGSR_CH 0x20 | ||
71 | #define FSL_DMA_DGSR_PE 0x10 | ||
72 | #define FSL_DMA_DGSR_EOLNI 0x08 | ||
73 | #define FSL_DMA_DGSR_CB 0x04 | ||
74 | #define FSL_DMA_DGSR_EOSI 0x02 | ||
75 | #define FSL_DMA_DGSR_EOLSI 0x01 | ||
76 | |||
77 | struct fsl_dma_ld_hw { | ||
78 | u64 __bitwise src_addr; | ||
79 | u64 __bitwise dst_addr; | ||
80 | u64 __bitwise next_ln_addr; | ||
81 | u32 __bitwise count; | ||
82 | u32 __bitwise reserve; | ||
83 | } __attribute__((aligned(32))); | ||
84 | |||
85 | struct fsl_desc_sw { | ||
86 | struct fsl_dma_ld_hw hw; | ||
87 | struct list_head node; | ||
88 | struct dma_async_tx_descriptor async_tx; | ||
89 | struct list_head *ld; | ||
90 | void *priv; | ||
91 | } __attribute__((aligned(32))); | ||
92 | |||
93 | struct fsl_dma_chan_regs { | ||
94 | u32 __bitwise mr; /* 0x00 - Mode Register */ | ||
95 | u32 __bitwise sr; /* 0x04 - Status Register */ | ||
96 | u64 __bitwise cdar; /* 0x08 - Current descriptor address register */ | ||
97 | u64 __bitwise sar; /* 0x10 - Source Address Register */ | ||
98 | u64 __bitwise dar; /* 0x18 - Destination Address Register */ | ||
99 | u32 __bitwise bcr; /* 0x20 - Byte Count Register */ | ||
100 | u64 __bitwise ndar; /* 0x24 - Next Descriptor Address Register */ | ||
101 | }; | ||
102 | |||
103 | struct fsl_dma_chan; | ||
104 | #define FSL_DMA_MAX_CHANS_PER_DEVICE 4 | ||
105 | |||
106 | struct fsl_dma_device { | ||
107 | void __iomem *reg_base; /* DGSR register base */ | ||
108 | struct resource reg; /* Resource for register */ | ||
109 | struct device *dev; | ||
110 | struct dma_device common; | ||
111 | struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE]; | ||
112 | u32 feature; /* The same as DMA channels */ | ||
113 | }; | ||
114 | |||
115 | /* Define macros for fsl_dma_chan->feature property */ | ||
116 | #define FSL_DMA_LITTLE_ENDIAN 0x00000000 | ||
117 | #define FSL_DMA_BIG_ENDIAN 0x00000001 | ||
118 | |||
119 | #define FSL_DMA_IP_MASK 0x00000ff0 | ||
120 | #define FSL_DMA_IP_85XX 0x00000010 | ||
121 | #define FSL_DMA_IP_83XX 0x00000020 | ||
122 | |||
123 | #define FSL_DMA_CHAN_PAUSE_EXT 0x00001000 | ||
124 | #define FSL_DMA_CHAN_START_EXT 0x00002000 | ||
125 | |||
126 | struct fsl_dma_chan { | ||
127 | struct fsl_dma_chan_regs __iomem *reg_base; | ||
128 | dma_cookie_t completed_cookie; /* The maximum cookie completed */ | ||
129 | spinlock_t desc_lock; /* Descriptor operation lock */ | ||
130 | struct list_head ld_queue; /* Link descriptors queue */ | ||
131 | struct dma_chan common; /* DMA common channel */ | ||
132 | struct dma_pool *desc_pool; /* Descriptors pool */ | ||
133 | struct device *dev; /* Channel device */ | ||
134 | struct resource reg; /* Resource for register */ | ||
135 | int irq; /* Channel IRQ */ | ||
136 | int id; /* Raw id of this channel */ | ||
137 | struct tasklet_struct tasklet; | ||
138 | u32 feature; | ||
139 | |||
140 | void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int size); | ||
141 | void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable); | ||
142 | void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size); | ||
143 | void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size); | ||
144 | }; | ||
145 | |||
146 | #define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common) | ||
147 | #define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node) | ||
148 | #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx) | ||
149 | |||
150 | #ifndef __powerpc64__ | ||
151 | static u64 in_be64(const u64 __iomem *addr) | ||
152 | { | ||
153 | return ((u64)in_be32((u32 *)addr) << 32) | (in_be32((u32 *)addr + 1)); | ||
154 | } | ||
155 | |||
156 | static void out_be64(u64 __iomem *addr, u64 val) | ||
157 | { | ||
158 | out_be32((u32 *)addr, val >> 32); | ||
159 | out_be32((u32 *)addr + 1, (u32)val); | ||
160 | } | ||
161 | |||
162 | /* There is no asm instructions for 64 bits reverse loads and stores */ | ||
163 | static u64 in_le64(const u64 __iomem *addr) | ||
164 | { | ||
165 | return ((u64)in_le32((u32 *)addr + 1) << 32) | (in_le32((u32 *)addr)); | ||
166 | } | ||
167 | |||
168 | static void out_le64(u64 __iomem *addr, u64 val) | ||
169 | { | ||
170 | out_le32((u32 *)addr + 1, val >> 32); | ||
171 | out_le32((u32 *)addr, (u32)val); | ||
172 | } | ||
173 | #endif | ||
174 | |||
175 | #define DMA_IN(fsl_chan, addr, width) \ | ||
176 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ | ||
177 | in_be##width(addr) : in_le##width(addr)) | ||
178 | #define DMA_OUT(fsl_chan, addr, val, width) \ | ||
179 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ | ||
180 | out_be##width(addr, val) : out_le##width(addr, val)) | ||
181 | |||
182 | #define DMA_TO_CPU(fsl_chan, d, width) \ | ||
183 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ | ||
184 | be##width##_to_cpu(d) : le##width##_to_cpu(d)) | ||
185 | #define CPU_TO_DMA(fsl_chan, c, width) \ | ||
186 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ | ||
187 | cpu_to_be##width(c) : cpu_to_le##width(c)) | ||
188 | |||
189 | #endif /* __DMA_FSLDMA_H */ | ||
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index dff38accc5c1..4017d9e7acd2 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c | |||
@@ -714,6 +714,7 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( | |||
714 | new->len = len; | 714 | new->len = len; |
715 | new->dst = dma_dest; | 715 | new->dst = dma_dest; |
716 | new->src = dma_src; | 716 | new->src = dma_src; |
717 | new->async_tx.ack = 0; | ||
717 | return &new->async_tx; | 718 | return &new->async_tx; |
718 | } else | 719 | } else |
719 | return NULL; | 720 | return NULL; |
@@ -741,6 +742,7 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( | |||
741 | new->len = len; | 742 | new->len = len; |
742 | new->dst = dma_dest; | 743 | new->dst = dma_dest; |
743 | new->src = dma_src; | 744 | new->src = dma_src; |
745 | new->async_tx.ack = 0; | ||
744 | return &new->async_tx; | 746 | return &new->async_tx; |
745 | } else | 747 | } else |
746 | return NULL; | 748 | return NULL; |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 3986d54492bd..f82b0906d466 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -140,7 +140,7 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan) | |||
140 | int busy = iop_chan_is_busy(iop_chan); | 140 | int busy = iop_chan_is_busy(iop_chan); |
141 | int seen_current = 0, slot_cnt = 0, slots_per_op = 0; | 141 | int seen_current = 0, slot_cnt = 0, slots_per_op = 0; |
142 | 142 | ||
143 | dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); | 143 | dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); |
144 | /* free completed slots from the chain starting with | 144 | /* free completed slots from the chain starting with |
145 | * the oldest descriptor | 145 | * the oldest descriptor |
146 | */ | 146 | */ |
@@ -438,7 +438,7 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
438 | spin_unlock_bh(&iop_chan->lock); | 438 | spin_unlock_bh(&iop_chan->lock); |
439 | 439 | ||
440 | dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n", | 440 | dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n", |
441 | __FUNCTION__, sw_desc->async_tx.cookie, sw_desc->idx); | 441 | __func__, sw_desc->async_tx.cookie, sw_desc->idx); |
442 | 442 | ||
443 | return cookie; | 443 | return cookie; |
444 | } | 444 | } |
@@ -520,7 +520,7 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan) | |||
520 | struct iop_adma_desc_slot *sw_desc, *grp_start; | 520 | struct iop_adma_desc_slot *sw_desc, *grp_start; |
521 | int slot_cnt, slots_per_op; | 521 | int slot_cnt, slots_per_op; |
522 | 522 | ||
523 | dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); | 523 | dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); |
524 | 524 | ||
525 | spin_lock_bh(&iop_chan->lock); | 525 | spin_lock_bh(&iop_chan->lock); |
526 | slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan); | 526 | slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan); |
@@ -548,7 +548,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, | |||
548 | BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); | 548 | BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); |
549 | 549 | ||
550 | dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", | 550 | dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", |
551 | __FUNCTION__, len); | 551 | __func__, len); |
552 | 552 | ||
553 | spin_lock_bh(&iop_chan->lock); | 553 | spin_lock_bh(&iop_chan->lock); |
554 | slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op); | 554 | slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op); |
@@ -580,7 +580,7 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest, | |||
580 | BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); | 580 | BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT)); |
581 | 581 | ||
582 | dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", | 582 | dev_dbg(iop_chan->device->common.dev, "%s len: %u\n", |
583 | __FUNCTION__, len); | 583 | __func__, len); |
584 | 584 | ||
585 | spin_lock_bh(&iop_chan->lock); | 585 | spin_lock_bh(&iop_chan->lock); |
586 | slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op); | 586 | slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op); |
@@ -614,7 +614,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, | |||
614 | 614 | ||
615 | dev_dbg(iop_chan->device->common.dev, | 615 | dev_dbg(iop_chan->device->common.dev, |
616 | "%s src_cnt: %d len: %u flags: %lx\n", | 616 | "%s src_cnt: %d len: %u flags: %lx\n", |
617 | __FUNCTION__, src_cnt, len, flags); | 617 | __func__, src_cnt, len, flags); |
618 | 618 | ||
619 | spin_lock_bh(&iop_chan->lock); | 619 | spin_lock_bh(&iop_chan->lock); |
620 | slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op); | 620 | slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op); |
@@ -648,7 +648,7 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src, | |||
648 | return NULL; | 648 | return NULL; |
649 | 649 | ||
650 | dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n", | 650 | dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n", |
651 | __FUNCTION__, src_cnt, len); | 651 | __func__, src_cnt, len); |
652 | 652 | ||
653 | spin_lock_bh(&iop_chan->lock); | 653 | spin_lock_bh(&iop_chan->lock); |
654 | slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op); | 654 | slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op); |
@@ -659,7 +659,7 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src, | |||
659 | iop_desc_set_zero_sum_byte_count(grp_start, len); | 659 | iop_desc_set_zero_sum_byte_count(grp_start, len); |
660 | grp_start->xor_check_result = result; | 660 | grp_start->xor_check_result = result; |
661 | pr_debug("\t%s: grp_start->xor_check_result: %p\n", | 661 | pr_debug("\t%s: grp_start->xor_check_result: %p\n", |
662 | __FUNCTION__, grp_start->xor_check_result); | 662 | __func__, grp_start->xor_check_result); |
663 | sw_desc->unmap_src_cnt = src_cnt; | 663 | sw_desc->unmap_src_cnt = src_cnt; |
664 | sw_desc->unmap_len = len; | 664 | sw_desc->unmap_len = len; |
665 | while (src_cnt--) | 665 | while (src_cnt--) |
@@ -700,7 +700,7 @@ static void iop_adma_free_chan_resources(struct dma_chan *chan) | |||
700 | iop_chan->last_used = NULL; | 700 | iop_chan->last_used = NULL; |
701 | 701 | ||
702 | dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n", | 702 | dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n", |
703 | __FUNCTION__, iop_chan->slots_allocated); | 703 | __func__, iop_chan->slots_allocated); |
704 | spin_unlock_bh(&iop_chan->lock); | 704 | spin_unlock_bh(&iop_chan->lock); |
705 | 705 | ||
706 | /* one is ok since we left it on there on purpose */ | 706 | /* one is ok since we left it on there on purpose */ |
@@ -753,7 +753,7 @@ static irqreturn_t iop_adma_eot_handler(int irq, void *data) | |||
753 | { | 753 | { |
754 | struct iop_adma_chan *chan = data; | 754 | struct iop_adma_chan *chan = data; |
755 | 755 | ||
756 | dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__); | 756 | dev_dbg(chan->device->common.dev, "%s\n", __func__); |
757 | 757 | ||
758 | tasklet_schedule(&chan->irq_tasklet); | 758 | tasklet_schedule(&chan->irq_tasklet); |
759 | 759 | ||
@@ -766,7 +766,7 @@ static irqreturn_t iop_adma_eoc_handler(int irq, void *data) | |||
766 | { | 766 | { |
767 | struct iop_adma_chan *chan = data; | 767 | struct iop_adma_chan *chan = data; |
768 | 768 | ||
769 | dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__); | 769 | dev_dbg(chan->device->common.dev, "%s\n", __func__); |
770 | 770 | ||
771 | tasklet_schedule(&chan->irq_tasklet); | 771 | tasklet_schedule(&chan->irq_tasklet); |
772 | 772 | ||
@@ -823,7 +823,7 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device) | |||
823 | int err = 0; | 823 | int err = 0; |
824 | struct iop_adma_chan *iop_chan; | 824 | struct iop_adma_chan *iop_chan; |
825 | 825 | ||
826 | dev_dbg(device->common.dev, "%s\n", __FUNCTION__); | 826 | dev_dbg(device->common.dev, "%s\n", __func__); |
827 | 827 | ||
828 | src = kzalloc(sizeof(u8) * IOP_ADMA_TEST_SIZE, GFP_KERNEL); | 828 | src = kzalloc(sizeof(u8) * IOP_ADMA_TEST_SIZE, GFP_KERNEL); |
829 | if (!src) | 829 | if (!src) |
@@ -906,7 +906,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) | |||
906 | int err = 0; | 906 | int err = 0; |
907 | struct iop_adma_chan *iop_chan; | 907 | struct iop_adma_chan *iop_chan; |
908 | 908 | ||
909 | dev_dbg(device->common.dev, "%s\n", __FUNCTION__); | 909 | dev_dbg(device->common.dev, "%s\n", __func__); |
910 | 910 | ||
911 | for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { | 911 | for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) { |
912 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); | 912 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); |
@@ -1159,7 +1159,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) | |||
1159 | } | 1159 | } |
1160 | 1160 | ||
1161 | dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n", | 1161 | dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n", |
1162 | __FUNCTION__, adev->dma_desc_pool_virt, | 1162 | __func__, adev->dma_desc_pool_virt, |
1163 | (void *) adev->dma_desc_pool); | 1163 | (void *) adev->dma_desc_pool); |
1164 | 1164 | ||
1165 | adev->id = plat_data->hw_id; | 1165 | adev->id = plat_data->hw_id; |
@@ -1289,7 +1289,7 @@ static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan) | |||
1289 | dma_cookie_t cookie; | 1289 | dma_cookie_t cookie; |
1290 | int slot_cnt, slots_per_op; | 1290 | int slot_cnt, slots_per_op; |
1291 | 1291 | ||
1292 | dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); | 1292 | dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); |
1293 | 1293 | ||
1294 | spin_lock_bh(&iop_chan->lock); | 1294 | spin_lock_bh(&iop_chan->lock); |
1295 | slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op); | 1295 | slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op); |
@@ -1346,7 +1346,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan) | |||
1346 | dma_cookie_t cookie; | 1346 | dma_cookie_t cookie; |
1347 | int slot_cnt, slots_per_op; | 1347 | int slot_cnt, slots_per_op; |
1348 | 1348 | ||
1349 | dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__); | 1349 | dev_dbg(iop_chan->device->common.dev, "%s\n", __func__); |
1350 | 1350 | ||
1351 | spin_lock_bh(&iop_chan->lock); | 1351 | spin_lock_bh(&iop_chan->lock); |
1352 | slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op); | 1352 | slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op); |
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig index fe9e768cfbc4..25bdc2dd9ce1 100644 --- a/drivers/firewire/Kconfig +++ b/drivers/firewire/Kconfig | |||
@@ -1,5 +1,3 @@ | |||
1 | # -*- shell-script -*- | ||
2 | |||
3 | comment "An alternative FireWire stack is available with EXPERIMENTAL=y" | 1 | comment "An alternative FireWire stack is available with EXPERIMENTAL=y" |
4 | depends on EXPERIMENTAL=n | 2 | depends on EXPERIMENTAL=n |
5 | 3 | ||
@@ -21,27 +19,7 @@ config FIREWIRE | |||
21 | NOTE: | 19 | NOTE: |
22 | 20 | ||
23 | You should only build ONE of the stacks, unless you REALLY know what | 21 | You should only build ONE of the stacks, unless you REALLY know what |
24 | you are doing. If you install both, you should configure them only as | 22 | you are doing. |
25 | modules rather than link them statically, and you should blacklist one | ||
26 | of the concurrent low-level drivers in /etc/modprobe.conf. Add either | ||
27 | |||
28 | blacklist firewire-ohci | ||
29 | or | ||
30 | blacklist ohci1394 | ||
31 | |||
32 | there depending on which driver you DON'T want to have auto-loaded. | ||
33 | You can optionally do the same with the other IEEE 1394/ FireWire | ||
34 | drivers. | ||
35 | |||
36 | If you have an old modprobe which doesn't implement the blacklist | ||
37 | directive, use either | ||
38 | |||
39 | install firewire-ohci /bin/true | ||
40 | or | ||
41 | install ohci1394 /bin/true | ||
42 | |||
43 | and so on, depending on which modules you DON't want to have | ||
44 | auto-loaded. | ||
45 | 23 | ||
46 | config FIREWIRE_OHCI | 24 | config FIREWIRE_OHCI |
47 | tristate "Support for OHCI FireWire host controllers" | 25 | tristate "Support for OHCI FireWire host controllers" |
@@ -57,8 +35,24 @@ config FIREWIRE_OHCI | |||
57 | 35 | ||
58 | NOTE: | 36 | NOTE: |
59 | 37 | ||
60 | If you also build ohci1394 of the classic stack, blacklist either | 38 | You should only build ohci1394 or firewire-ohci, but not both. |
61 | ohci1394 or firewire-ohci to let hotplug load only the desired driver. | 39 | If you nevertheless want to install both, you should configure them |
40 | only as modules and blacklist the driver(s) which you don't want to | ||
41 | have auto-loaded. Add either | ||
42 | |||
43 | blacklist firewire-ohci | ||
44 | or | ||
45 | blacklist ohci1394 | ||
46 | blacklist video1394 | ||
47 | blacklist dv1394 | ||
48 | |||
49 | to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf | ||
50 | depending on your distribution. The latter two modules should be | ||
51 | blacklisted together with ohci1394 because they depend on ohci1394. | ||
52 | |||
53 | If you have an old modprobe which doesn't implement the blacklist | ||
54 | directive, use "install modulename /bin/true" for the modules to be | ||
55 | blacklisted. | ||
62 | 56 | ||
63 | config FIREWIRE_SBP2 | 57 | config FIREWIRE_SBP2 |
64 | tristate "Support for storage devices (SBP-2 protocol driver)" | 58 | tristate "Support for storage devices (SBP-2 protocol driver)" |
@@ -75,9 +69,3 @@ config FIREWIRE_SBP2 | |||
75 | 69 | ||
76 | You should also enable support for disks, CD-ROMs, etc. in the SCSI | 70 | You should also enable support for disks, CD-ROMs, etc. in the SCSI |
77 | configuration section. | 71 | configuration section. |
78 | |||
79 | NOTE: | ||
80 | |||
81 | If you also build sbp2 of the classic stack, blacklist either sbp2 | ||
82 | or firewire-sbp2 to let hotplug load only the desired driver. | ||
83 | |||
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c index 3e9719948a8e..a03462750b95 100644 --- a/drivers/firewire/fw-card.c +++ b/drivers/firewire/fw-card.c | |||
@@ -18,6 +18,7 @@ | |||
18 | 18 | ||
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
21 | #include <linux/delay.h> | ||
21 | #include <linux/device.h> | 22 | #include <linux/device.h> |
22 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
23 | #include <linux/crc-itu-t.h> | 24 | #include <linux/crc-itu-t.h> |
@@ -214,17 +215,29 @@ static void | |||
214 | fw_card_bm_work(struct work_struct *work) | 215 | fw_card_bm_work(struct work_struct *work) |
215 | { | 216 | { |
216 | struct fw_card *card = container_of(work, struct fw_card, work.work); | 217 | struct fw_card *card = container_of(work, struct fw_card, work.work); |
217 | struct fw_device *root; | 218 | struct fw_device *root_device; |
219 | struct fw_node *root_node, *local_node; | ||
218 | struct bm_data bmd; | 220 | struct bm_data bmd; |
219 | unsigned long flags; | 221 | unsigned long flags; |
220 | int root_id, new_root_id, irm_id, gap_count, generation, grace; | 222 | int root_id, new_root_id, irm_id, gap_count, generation, grace; |
221 | int do_reset = 0; | 223 | int do_reset = 0; |
222 | 224 | ||
223 | spin_lock_irqsave(&card->lock, flags); | 225 | spin_lock_irqsave(&card->lock, flags); |
226 | local_node = card->local_node; | ||
227 | root_node = card->root_node; | ||
228 | |||
229 | if (local_node == NULL) { | ||
230 | spin_unlock_irqrestore(&card->lock, flags); | ||
231 | return; | ||
232 | } | ||
233 | fw_node_get(local_node); | ||
234 | fw_node_get(root_node); | ||
224 | 235 | ||
225 | generation = card->generation; | 236 | generation = card->generation; |
226 | root = card->root_node->data; | 237 | root_device = root_node->data; |
227 | root_id = card->root_node->node_id; | 238 | if (root_device) |
239 | fw_device_get(root_device); | ||
240 | root_id = root_node->node_id; | ||
228 | grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10)); | 241 | grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10)); |
229 | 242 | ||
230 | if (card->bm_generation + 1 == generation || | 243 | if (card->bm_generation + 1 == generation || |
@@ -243,14 +256,14 @@ fw_card_bm_work(struct work_struct *work) | |||
243 | 256 | ||
244 | irm_id = card->irm_node->node_id; | 257 | irm_id = card->irm_node->node_id; |
245 | if (!card->irm_node->link_on) { | 258 | if (!card->irm_node->link_on) { |
246 | new_root_id = card->local_node->node_id; | 259 | new_root_id = local_node->node_id; |
247 | fw_notify("IRM has link off, making local node (%02x) root.\n", | 260 | fw_notify("IRM has link off, making local node (%02x) root.\n", |
248 | new_root_id); | 261 | new_root_id); |
249 | goto pick_me; | 262 | goto pick_me; |
250 | } | 263 | } |
251 | 264 | ||
252 | bmd.lock.arg = cpu_to_be32(0x3f); | 265 | bmd.lock.arg = cpu_to_be32(0x3f); |
253 | bmd.lock.data = cpu_to_be32(card->local_node->node_id); | 266 | bmd.lock.data = cpu_to_be32(local_node->node_id); |
254 | 267 | ||
255 | spin_unlock_irqrestore(&card->lock, flags); | 268 | spin_unlock_irqrestore(&card->lock, flags); |
256 | 269 | ||
@@ -267,12 +280,12 @@ fw_card_bm_work(struct work_struct *work) | |||
267 | * Another bus reset happened. Just return, | 280 | * Another bus reset happened. Just return, |
268 | * the BM work has been rescheduled. | 281 | * the BM work has been rescheduled. |
269 | */ | 282 | */ |
270 | return; | 283 | goto out; |
271 | } | 284 | } |
272 | 285 | ||
273 | if (bmd.rcode == RCODE_COMPLETE && bmd.old != 0x3f) | 286 | if (bmd.rcode == RCODE_COMPLETE && bmd.old != 0x3f) |
274 | /* Somebody else is BM, let them do the work. */ | 287 | /* Somebody else is BM, let them do the work. */ |
275 | return; | 288 | goto out; |
276 | 289 | ||
277 | spin_lock_irqsave(&card->lock, flags); | 290 | spin_lock_irqsave(&card->lock, flags); |
278 | if (bmd.rcode != RCODE_COMPLETE) { | 291 | if (bmd.rcode != RCODE_COMPLETE) { |
@@ -282,7 +295,7 @@ fw_card_bm_work(struct work_struct *work) | |||
282 | * do a bus reset and pick the local node as | 295 | * do a bus reset and pick the local node as |
283 | * root, and thus, IRM. | 296 | * root, and thus, IRM. |
284 | */ | 297 | */ |
285 | new_root_id = card->local_node->node_id; | 298 | new_root_id = local_node->node_id; |
286 | fw_notify("BM lock failed, making local node (%02x) root.\n", | 299 | fw_notify("BM lock failed, making local node (%02x) root.\n", |
287 | new_root_id); | 300 | new_root_id); |
288 | goto pick_me; | 301 | goto pick_me; |
@@ -295,7 +308,7 @@ fw_card_bm_work(struct work_struct *work) | |||
295 | */ | 308 | */ |
296 | spin_unlock_irqrestore(&card->lock, flags); | 309 | spin_unlock_irqrestore(&card->lock, flags); |
297 | schedule_delayed_work(&card->work, DIV_ROUND_UP(HZ, 10)); | 310 | schedule_delayed_work(&card->work, DIV_ROUND_UP(HZ, 10)); |
298 | return; | 311 | goto out; |
299 | } | 312 | } |
300 | 313 | ||
301 | /* | 314 | /* |
@@ -305,20 +318,20 @@ fw_card_bm_work(struct work_struct *work) | |||
305 | */ | 318 | */ |
306 | card->bm_generation = generation; | 319 | card->bm_generation = generation; |
307 | 320 | ||
308 | if (root == NULL) { | 321 | if (root_device == NULL) { |
309 | /* | 322 | /* |
310 | * Either link_on is false, or we failed to read the | 323 | * Either link_on is false, or we failed to read the |
311 | * config rom. In either case, pick another root. | 324 | * config rom. In either case, pick another root. |
312 | */ | 325 | */ |
313 | new_root_id = card->local_node->node_id; | 326 | new_root_id = local_node->node_id; |
314 | } else if (atomic_read(&root->state) != FW_DEVICE_RUNNING) { | 327 | } else if (atomic_read(&root_device->state) != FW_DEVICE_RUNNING) { |
315 | /* | 328 | /* |
316 | * If we haven't probed this device yet, bail out now | 329 | * If we haven't probed this device yet, bail out now |
317 | * and let's try again once that's done. | 330 | * and let's try again once that's done. |
318 | */ | 331 | */ |
319 | spin_unlock_irqrestore(&card->lock, flags); | 332 | spin_unlock_irqrestore(&card->lock, flags); |
320 | return; | 333 | goto out; |
321 | } else if (root->config_rom[2] & BIB_CMC) { | 334 | } else if (root_device->config_rom[2] & BIB_CMC) { |
322 | /* | 335 | /* |
323 | * FIXME: I suppose we should set the cmstr bit in the | 336 | * FIXME: I suppose we should set the cmstr bit in the |
324 | * STATE_CLEAR register of this node, as described in | 337 | * STATE_CLEAR register of this node, as described in |
@@ -332,7 +345,7 @@ fw_card_bm_work(struct work_struct *work) | |||
332 | * successfully read the config rom, but it's not | 345 | * successfully read the config rom, but it's not |
333 | * cycle master capable. | 346 | * cycle master capable. |
334 | */ | 347 | */ |
335 | new_root_id = card->local_node->node_id; | 348 | new_root_id = local_node->node_id; |
336 | } | 349 | } |
337 | 350 | ||
338 | pick_me: | 351 | pick_me: |
@@ -341,8 +354,8 @@ fw_card_bm_work(struct work_struct *work) | |||
341 | * the typically much larger 1394b beta repeater delays though. | 354 | * the typically much larger 1394b beta repeater delays though. |
342 | */ | 355 | */ |
343 | if (!card->beta_repeaters_present && | 356 | if (!card->beta_repeaters_present && |
344 | card->root_node->max_hops < ARRAY_SIZE(gap_count_table)) | 357 | root_node->max_hops < ARRAY_SIZE(gap_count_table)) |
345 | gap_count = gap_count_table[card->root_node->max_hops]; | 358 | gap_count = gap_count_table[root_node->max_hops]; |
346 | else | 359 | else |
347 | gap_count = 63; | 360 | gap_count = 63; |
348 | 361 | ||
@@ -364,6 +377,11 @@ fw_card_bm_work(struct work_struct *work) | |||
364 | fw_send_phy_config(card, new_root_id, generation, gap_count); | 377 | fw_send_phy_config(card, new_root_id, generation, gap_count); |
365 | fw_core_initiate_bus_reset(card, 1); | 378 | fw_core_initiate_bus_reset(card, 1); |
366 | } | 379 | } |
380 | out: | ||
381 | if (root_device) | ||
382 | fw_device_put(root_device); | ||
383 | fw_node_put(root_node); | ||
384 | fw_node_put(local_node); | ||
367 | } | 385 | } |
368 | 386 | ||
369 | static void | 387 | static void |
@@ -381,6 +399,7 @@ fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, | |||
381 | static atomic_t index = ATOMIC_INIT(-1); | 399 | static atomic_t index = ATOMIC_INIT(-1); |
382 | 400 | ||
383 | kref_init(&card->kref); | 401 | kref_init(&card->kref); |
402 | atomic_set(&card->device_count, 0); | ||
384 | card->index = atomic_inc_return(&index); | 403 | card->index = atomic_inc_return(&index); |
385 | card->driver = driver; | 404 | card->driver = driver; |
386 | card->device = device; | 405 | card->device = device; |
@@ -511,8 +530,14 @@ fw_core_remove_card(struct fw_card *card) | |||
511 | card->driver = &dummy_driver; | 530 | card->driver = &dummy_driver; |
512 | 531 | ||
513 | fw_destroy_nodes(card); | 532 | fw_destroy_nodes(card); |
514 | flush_scheduled_work(); | 533 | /* |
534 | * Wait for all device workqueue jobs to finish. Otherwise the | ||
535 | * firewire-core module could be unloaded before the jobs ran. | ||
536 | */ | ||
537 | while (atomic_read(&card->device_count) > 0) | ||
538 | msleep(100); | ||
515 | 539 | ||
540 | cancel_delayed_work_sync(&card->work); | ||
516 | fw_flush_transactions(card); | 541 | fw_flush_transactions(card); |
517 | del_timer_sync(&card->flush_timer); | 542 | del_timer_sync(&card->flush_timer); |
518 | 543 | ||
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 7e73cbaa4121..46bc197a047f 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c | |||
@@ -109,15 +109,17 @@ static int fw_device_op_open(struct inode *inode, struct file *file) | |||
109 | struct client *client; | 109 | struct client *client; |
110 | unsigned long flags; | 110 | unsigned long flags; |
111 | 111 | ||
112 | device = fw_device_from_devt(inode->i_rdev); | 112 | device = fw_device_get_by_devt(inode->i_rdev); |
113 | if (device == NULL) | 113 | if (device == NULL) |
114 | return -ENODEV; | 114 | return -ENODEV; |
115 | 115 | ||
116 | client = kzalloc(sizeof(*client), GFP_KERNEL); | 116 | client = kzalloc(sizeof(*client), GFP_KERNEL); |
117 | if (client == NULL) | 117 | if (client == NULL) { |
118 | fw_device_put(device); | ||
118 | return -ENOMEM; | 119 | return -ENOMEM; |
120 | } | ||
119 | 121 | ||
120 | client->device = fw_device_get(device); | 122 | client->device = device; |
121 | INIT_LIST_HEAD(&client->event_list); | 123 | INIT_LIST_HEAD(&client->event_list); |
122 | INIT_LIST_HEAD(&client->resource_list); | 124 | INIT_LIST_HEAD(&client->resource_list); |
123 | spin_lock_init(&client->lock); | 125 | spin_lock_init(&client->lock); |
@@ -644,6 +646,10 @@ static int ioctl_create_iso_context(struct client *client, void *buffer) | |||
644 | struct fw_cdev_create_iso_context *request = buffer; | 646 | struct fw_cdev_create_iso_context *request = buffer; |
645 | struct fw_iso_context *context; | 647 | struct fw_iso_context *context; |
646 | 648 | ||
649 | /* We only support one context at this time. */ | ||
650 | if (client->iso_context != NULL) | ||
651 | return -EBUSY; | ||
652 | |||
647 | if (request->channel > 63) | 653 | if (request->channel > 63) |
648 | return -EINVAL; | 654 | return -EINVAL; |
649 | 655 | ||
@@ -790,8 +796,9 @@ static int ioctl_start_iso(struct client *client, void *buffer) | |||
790 | { | 796 | { |
791 | struct fw_cdev_start_iso *request = buffer; | 797 | struct fw_cdev_start_iso *request = buffer; |
792 | 798 | ||
793 | if (request->handle != 0) | 799 | if (client->iso_context == NULL || request->handle != 0) |
794 | return -EINVAL; | 800 | return -EINVAL; |
801 | |||
795 | if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) { | 802 | if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) { |
796 | if (request->tags == 0 || request->tags > 15) | 803 | if (request->tags == 0 || request->tags > 15) |
797 | return -EINVAL; | 804 | return -EINVAL; |
@@ -808,7 +815,7 @@ static int ioctl_stop_iso(struct client *client, void *buffer) | |||
808 | { | 815 | { |
809 | struct fw_cdev_stop_iso *request = buffer; | 816 | struct fw_cdev_stop_iso *request = buffer; |
810 | 817 | ||
811 | if (request->handle != 0) | 818 | if (client->iso_context == NULL || request->handle != 0) |
812 | return -EINVAL; | 819 | return -EINVAL; |
813 | 820 | ||
814 | return fw_iso_context_stop(client->iso_context); | 821 | return fw_iso_context_stop(client->iso_context); |
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c index de9066e69adf..870125a3638e 100644 --- a/drivers/firewire/fw-device.c +++ b/drivers/firewire/fw-device.c | |||
@@ -150,21 +150,10 @@ struct bus_type fw_bus_type = { | |||
150 | }; | 150 | }; |
151 | EXPORT_SYMBOL(fw_bus_type); | 151 | EXPORT_SYMBOL(fw_bus_type); |
152 | 152 | ||
153 | struct fw_device *fw_device_get(struct fw_device *device) | ||
154 | { | ||
155 | get_device(&device->device); | ||
156 | |||
157 | return device; | ||
158 | } | ||
159 | |||
160 | void fw_device_put(struct fw_device *device) | ||
161 | { | ||
162 | put_device(&device->device); | ||
163 | } | ||
164 | |||
165 | static void fw_device_release(struct device *dev) | 153 | static void fw_device_release(struct device *dev) |
166 | { | 154 | { |
167 | struct fw_device *device = fw_device(dev); | 155 | struct fw_device *device = fw_device(dev); |
156 | struct fw_card *card = device->card; | ||
168 | unsigned long flags; | 157 | unsigned long flags; |
169 | 158 | ||
170 | /* | 159 | /* |
@@ -176,9 +165,9 @@ static void fw_device_release(struct device *dev) | |||
176 | spin_unlock_irqrestore(&device->card->lock, flags); | 165 | spin_unlock_irqrestore(&device->card->lock, flags); |
177 | 166 | ||
178 | fw_node_put(device->node); | 167 | fw_node_put(device->node); |
179 | fw_card_put(device->card); | ||
180 | kfree(device->config_rom); | 168 | kfree(device->config_rom); |
181 | kfree(device); | 169 | kfree(device); |
170 | atomic_dec(&card->device_count); | ||
182 | } | 171 | } |
183 | 172 | ||
184 | int fw_device_enable_phys_dma(struct fw_device *device) | 173 | int fw_device_enable_phys_dma(struct fw_device *device) |
@@ -358,12 +347,9 @@ static ssize_t | |||
358 | guid_show(struct device *dev, struct device_attribute *attr, char *buf) | 347 | guid_show(struct device *dev, struct device_attribute *attr, char *buf) |
359 | { | 348 | { |
360 | struct fw_device *device = fw_device(dev); | 349 | struct fw_device *device = fw_device(dev); |
361 | u64 guid; | ||
362 | |||
363 | guid = ((u64)device->config_rom[3] << 32) | device->config_rom[4]; | ||
364 | 350 | ||
365 | return snprintf(buf, PAGE_SIZE, "0x%016llx\n", | 351 | return snprintf(buf, PAGE_SIZE, "0x%08x%08x\n", |
366 | (unsigned long long)guid); | 352 | device->config_rom[3], device->config_rom[4]); |
367 | } | 353 | } |
368 | 354 | ||
369 | static struct device_attribute fw_device_attributes[] = { | 355 | static struct device_attribute fw_device_attributes[] = { |
@@ -610,12 +596,14 @@ static DECLARE_RWSEM(idr_rwsem); | |||
610 | static DEFINE_IDR(fw_device_idr); | 596 | static DEFINE_IDR(fw_device_idr); |
611 | int fw_cdev_major; | 597 | int fw_cdev_major; |
612 | 598 | ||
613 | struct fw_device *fw_device_from_devt(dev_t devt) | 599 | struct fw_device *fw_device_get_by_devt(dev_t devt) |
614 | { | 600 | { |
615 | struct fw_device *device; | 601 | struct fw_device *device; |
616 | 602 | ||
617 | down_read(&idr_rwsem); | 603 | down_read(&idr_rwsem); |
618 | device = idr_find(&fw_device_idr, MINOR(devt)); | 604 | device = idr_find(&fw_device_idr, MINOR(devt)); |
605 | if (device) | ||
606 | fw_device_get(device); | ||
619 | up_read(&idr_rwsem); | 607 | up_read(&idr_rwsem); |
620 | 608 | ||
621 | return device; | 609 | return device; |
@@ -627,13 +615,14 @@ static void fw_device_shutdown(struct work_struct *work) | |||
627 | container_of(work, struct fw_device, work.work); | 615 | container_of(work, struct fw_device, work.work); |
628 | int minor = MINOR(device->device.devt); | 616 | int minor = MINOR(device->device.devt); |
629 | 617 | ||
630 | down_write(&idr_rwsem); | ||
631 | idr_remove(&fw_device_idr, minor); | ||
632 | up_write(&idr_rwsem); | ||
633 | |||
634 | fw_device_cdev_remove(device); | 618 | fw_device_cdev_remove(device); |
635 | device_for_each_child(&device->device, NULL, shutdown_unit); | 619 | device_for_each_child(&device->device, NULL, shutdown_unit); |
636 | device_unregister(&device->device); | 620 | device_unregister(&device->device); |
621 | |||
622 | down_write(&idr_rwsem); | ||
623 | idr_remove(&fw_device_idr, minor); | ||
624 | up_write(&idr_rwsem); | ||
625 | fw_device_put(device); | ||
637 | } | 626 | } |
638 | 627 | ||
639 | static struct device_type fw_device_type = { | 628 | static struct device_type fw_device_type = { |
@@ -668,7 +657,8 @@ static void fw_device_init(struct work_struct *work) | |||
668 | */ | 657 | */ |
669 | 658 | ||
670 | if (read_bus_info_block(device, device->generation) < 0) { | 659 | if (read_bus_info_block(device, device->generation) < 0) { |
671 | if (device->config_rom_retries < MAX_RETRIES) { | 660 | if (device->config_rom_retries < MAX_RETRIES && |
661 | atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { | ||
672 | device->config_rom_retries++; | 662 | device->config_rom_retries++; |
673 | schedule_delayed_work(&device->work, RETRY_DELAY); | 663 | schedule_delayed_work(&device->work, RETRY_DELAY); |
674 | } else { | 664 | } else { |
@@ -682,10 +672,13 @@ static void fw_device_init(struct work_struct *work) | |||
682 | } | 672 | } |
683 | 673 | ||
684 | err = -ENOMEM; | 674 | err = -ENOMEM; |
675 | |||
676 | fw_device_get(device); | ||
685 | down_write(&idr_rwsem); | 677 | down_write(&idr_rwsem); |
686 | if (idr_pre_get(&fw_device_idr, GFP_KERNEL)) | 678 | if (idr_pre_get(&fw_device_idr, GFP_KERNEL)) |
687 | err = idr_get_new(&fw_device_idr, device, &minor); | 679 | err = idr_get_new(&fw_device_idr, device, &minor); |
688 | up_write(&idr_rwsem); | 680 | up_write(&idr_rwsem); |
681 | |||
689 | if (err < 0) | 682 | if (err < 0) |
690 | goto error; | 683 | goto error; |
691 | 684 | ||
@@ -717,13 +710,22 @@ static void fw_device_init(struct work_struct *work) | |||
717 | */ | 710 | */ |
718 | if (atomic_cmpxchg(&device->state, | 711 | if (atomic_cmpxchg(&device->state, |
719 | FW_DEVICE_INITIALIZING, | 712 | FW_DEVICE_INITIALIZING, |
720 | FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) | 713 | FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) { |
721 | fw_device_shutdown(&device->work.work); | 714 | fw_device_shutdown(&device->work.work); |
722 | else | 715 | } else { |
723 | fw_notify("created new fw device %s " | 716 | if (device->config_rom_retries) |
724 | "(%d config rom retries, S%d00)\n", | 717 | fw_notify("created device %s: GUID %08x%08x, S%d00, " |
725 | device->device.bus_id, device->config_rom_retries, | 718 | "%d config ROM retries\n", |
726 | 1 << device->max_speed); | 719 | device->device.bus_id, |
720 | device->config_rom[3], device->config_rom[4], | ||
721 | 1 << device->max_speed, | ||
722 | device->config_rom_retries); | ||
723 | else | ||
724 | fw_notify("created device %s: GUID %08x%08x, S%d00\n", | ||
725 | device->device.bus_id, | ||
726 | device->config_rom[3], device->config_rom[4], | ||
727 | 1 << device->max_speed); | ||
728 | } | ||
727 | 729 | ||
728 | /* | 730 | /* |
729 | * Reschedule the IRM work if we just finished reading the | 731 | * Reschedule the IRM work if we just finished reading the |
@@ -741,7 +743,9 @@ static void fw_device_init(struct work_struct *work) | |||
741 | idr_remove(&fw_device_idr, minor); | 743 | idr_remove(&fw_device_idr, minor); |
742 | up_write(&idr_rwsem); | 744 | up_write(&idr_rwsem); |
743 | error: | 745 | error: |
744 | put_device(&device->device); | 746 | fw_device_put(device); /* fw_device_idr's reference */ |
747 | |||
748 | put_device(&device->device); /* our reference */ | ||
745 | } | 749 | } |
746 | 750 | ||
747 | static int update_unit(struct device *dev, void *data) | 751 | static int update_unit(struct device *dev, void *data) |
@@ -791,7 +795,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) | |||
791 | */ | 795 | */ |
792 | device_initialize(&device->device); | 796 | device_initialize(&device->device); |
793 | atomic_set(&device->state, FW_DEVICE_INITIALIZING); | 797 | atomic_set(&device->state, FW_DEVICE_INITIALIZING); |
794 | device->card = fw_card_get(card); | 798 | atomic_inc(&card->device_count); |
799 | device->card = card; | ||
795 | device->node = fw_node_get(node); | 800 | device->node = fw_node_get(node); |
796 | device->node_id = node->node_id; | 801 | device->node_id = node->node_id; |
797 | device->generation = card->generation; | 802 | device->generation = card->generation; |
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h index 0854fe2bc110..78ecd3991b7f 100644 --- a/drivers/firewire/fw-device.h +++ b/drivers/firewire/fw-device.h | |||
@@ -76,14 +76,26 @@ fw_device_is_shutdown(struct fw_device *device) | |||
76 | return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN; | 76 | return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN; |
77 | } | 77 | } |
78 | 78 | ||
79 | struct fw_device *fw_device_get(struct fw_device *device); | 79 | static inline struct fw_device * |
80 | void fw_device_put(struct fw_device *device); | 80 | fw_device_get(struct fw_device *device) |
81 | { | ||
82 | get_device(&device->device); | ||
83 | |||
84 | return device; | ||
85 | } | ||
86 | |||
87 | static inline void | ||
88 | fw_device_put(struct fw_device *device) | ||
89 | { | ||
90 | put_device(&device->device); | ||
91 | } | ||
92 | |||
93 | struct fw_device *fw_device_get_by_devt(dev_t devt); | ||
81 | int fw_device_enable_phys_dma(struct fw_device *device); | 94 | int fw_device_enable_phys_dma(struct fw_device *device); |
82 | 95 | ||
83 | void fw_device_cdev_update(struct fw_device *device); | 96 | void fw_device_cdev_update(struct fw_device *device); |
84 | void fw_device_cdev_remove(struct fw_device *device); | 97 | void fw_device_cdev_remove(struct fw_device *device); |
85 | 98 | ||
86 | struct fw_device *fw_device_from_devt(dev_t devt); | ||
87 | extern int fw_cdev_major; | 99 | extern int fw_cdev_major; |
88 | 100 | ||
89 | struct fw_unit { | 101 | struct fw_unit { |
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c index 7ebad3c14cb8..996d61f0d460 100644 --- a/drivers/firewire/fw-ohci.c +++ b/drivers/firewire/fw-ohci.c | |||
@@ -33,6 +33,10 @@ | |||
33 | #include <asm/page.h> | 33 | #include <asm/page.h> |
34 | #include <asm/system.h> | 34 | #include <asm/system.h> |
35 | 35 | ||
36 | #ifdef CONFIG_PPC_PMAC | ||
37 | #include <asm/pmac_feature.h> | ||
38 | #endif | ||
39 | |||
36 | #include "fw-ohci.h" | 40 | #include "fw-ohci.h" |
37 | #include "fw-transaction.h" | 41 | #include "fw-transaction.h" |
38 | 42 | ||
@@ -175,6 +179,7 @@ struct fw_ohci { | |||
175 | int generation; | 179 | int generation; |
176 | int request_generation; | 180 | int request_generation; |
177 | u32 bus_seconds; | 181 | u32 bus_seconds; |
182 | bool old_uninorth; | ||
178 | 183 | ||
179 | /* | 184 | /* |
180 | * Spinlock for accessing fw_ohci data. Never call out of | 185 | * Spinlock for accessing fw_ohci data. Never call out of |
@@ -276,19 +281,13 @@ static int ar_context_add_page(struct ar_context *ctx) | |||
276 | { | 281 | { |
277 | struct device *dev = ctx->ohci->card.device; | 282 | struct device *dev = ctx->ohci->card.device; |
278 | struct ar_buffer *ab; | 283 | struct ar_buffer *ab; |
279 | dma_addr_t ab_bus; | 284 | dma_addr_t uninitialized_var(ab_bus); |
280 | size_t offset; | 285 | size_t offset; |
281 | 286 | ||
282 | ab = (struct ar_buffer *) __get_free_page(GFP_ATOMIC); | 287 | ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC); |
283 | if (ab == NULL) | 288 | if (ab == NULL) |
284 | return -ENOMEM; | 289 | return -ENOMEM; |
285 | 290 | ||
286 | ab_bus = dma_map_single(dev, ab, PAGE_SIZE, DMA_BIDIRECTIONAL); | ||
287 | if (dma_mapping_error(ab_bus)) { | ||
288 | free_page((unsigned long) ab); | ||
289 | return -ENOMEM; | ||
290 | } | ||
291 | |||
292 | memset(&ab->descriptor, 0, sizeof(ab->descriptor)); | 291 | memset(&ab->descriptor, 0, sizeof(ab->descriptor)); |
293 | ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | | 292 | ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | |
294 | DESCRIPTOR_STATUS | | 293 | DESCRIPTOR_STATUS | |
@@ -299,8 +298,6 @@ static int ar_context_add_page(struct ar_context *ctx) | |||
299 | ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset); | 298 | ab->descriptor.res_count = cpu_to_le16(PAGE_SIZE - offset); |
300 | ab->descriptor.branch_address = 0; | 299 | ab->descriptor.branch_address = 0; |
301 | 300 | ||
302 | dma_sync_single_for_device(dev, ab_bus, PAGE_SIZE, DMA_BIDIRECTIONAL); | ||
303 | |||
304 | ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1); | 301 | ctx->last_buffer->descriptor.branch_address = cpu_to_le32(ab_bus | 1); |
305 | ctx->last_buffer->next = ab; | 302 | ctx->last_buffer->next = ab; |
306 | ctx->last_buffer = ab; | 303 | ctx->last_buffer = ab; |
@@ -311,15 +308,22 @@ static int ar_context_add_page(struct ar_context *ctx) | |||
311 | return 0; | 308 | return 0; |
312 | } | 309 | } |
313 | 310 | ||
311 | #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) | ||
312 | #define cond_le32_to_cpu(v) \ | ||
313 | (ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v)) | ||
314 | #else | ||
315 | #define cond_le32_to_cpu(v) le32_to_cpu(v) | ||
316 | #endif | ||
317 | |||
314 | static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) | 318 | static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) |
315 | { | 319 | { |
316 | struct fw_ohci *ohci = ctx->ohci; | 320 | struct fw_ohci *ohci = ctx->ohci; |
317 | struct fw_packet p; | 321 | struct fw_packet p; |
318 | u32 status, length, tcode; | 322 | u32 status, length, tcode; |
319 | 323 | ||
320 | p.header[0] = le32_to_cpu(buffer[0]); | 324 | p.header[0] = cond_le32_to_cpu(buffer[0]); |
321 | p.header[1] = le32_to_cpu(buffer[1]); | 325 | p.header[1] = cond_le32_to_cpu(buffer[1]); |
322 | p.header[2] = le32_to_cpu(buffer[2]); | 326 | p.header[2] = cond_le32_to_cpu(buffer[2]); |
323 | 327 | ||
324 | tcode = (p.header[0] >> 4) & 0x0f; | 328 | tcode = (p.header[0] >> 4) & 0x0f; |
325 | switch (tcode) { | 329 | switch (tcode) { |
@@ -331,7 +335,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) | |||
331 | break; | 335 | break; |
332 | 336 | ||
333 | case TCODE_READ_BLOCK_REQUEST : | 337 | case TCODE_READ_BLOCK_REQUEST : |
334 | p.header[3] = le32_to_cpu(buffer[3]); | 338 | p.header[3] = cond_le32_to_cpu(buffer[3]); |
335 | p.header_length = 16; | 339 | p.header_length = 16; |
336 | p.payload_length = 0; | 340 | p.payload_length = 0; |
337 | break; | 341 | break; |
@@ -340,7 +344,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) | |||
340 | case TCODE_READ_BLOCK_RESPONSE: | 344 | case TCODE_READ_BLOCK_RESPONSE: |
341 | case TCODE_LOCK_REQUEST: | 345 | case TCODE_LOCK_REQUEST: |
342 | case TCODE_LOCK_RESPONSE: | 346 | case TCODE_LOCK_RESPONSE: |
343 | p.header[3] = le32_to_cpu(buffer[3]); | 347 | p.header[3] = cond_le32_to_cpu(buffer[3]); |
344 | p.header_length = 16; | 348 | p.header_length = 16; |
345 | p.payload_length = p.header[3] >> 16; | 349 | p.payload_length = p.header[3] >> 16; |
346 | break; | 350 | break; |
@@ -357,7 +361,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) | |||
357 | 361 | ||
358 | /* FIXME: What to do about evt_* errors? */ | 362 | /* FIXME: What to do about evt_* errors? */ |
359 | length = (p.header_length + p.payload_length + 3) / 4; | 363 | length = (p.header_length + p.payload_length + 3) / 4; |
360 | status = le32_to_cpu(buffer[length]); | 364 | status = cond_le32_to_cpu(buffer[length]); |
361 | 365 | ||
362 | p.ack = ((status >> 16) & 0x1f) - 16; | 366 | p.ack = ((status >> 16) & 0x1f) - 16; |
363 | p.speed = (status >> 21) & 0x7; | 367 | p.speed = (status >> 21) & 0x7; |
@@ -375,7 +379,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) | |||
375 | */ | 379 | */ |
376 | 380 | ||
377 | if (p.ack + 16 == 0x09) | 381 | if (p.ack + 16 == 0x09) |
378 | ohci->request_generation = (buffer[2] >> 16) & 0xff; | 382 | ohci->request_generation = (p.header[2] >> 16) & 0xff; |
379 | else if (ctx == &ohci->ar_request_ctx) | 383 | else if (ctx == &ohci->ar_request_ctx) |
380 | fw_core_handle_request(&ohci->card, &p); | 384 | fw_core_handle_request(&ohci->card, &p); |
381 | else | 385 | else |
@@ -397,6 +401,7 @@ static void ar_context_tasklet(unsigned long data) | |||
397 | 401 | ||
398 | if (d->res_count == 0) { | 402 | if (d->res_count == 0) { |
399 | size_t size, rest, offset; | 403 | size_t size, rest, offset; |
404 | dma_addr_t buffer_bus; | ||
400 | 405 | ||
401 | /* | 406 | /* |
402 | * This descriptor is finished and we may have a | 407 | * This descriptor is finished and we may have a |
@@ -405,9 +410,7 @@ static void ar_context_tasklet(unsigned long data) | |||
405 | */ | 410 | */ |
406 | 411 | ||
407 | offset = offsetof(struct ar_buffer, data); | 412 | offset = offsetof(struct ar_buffer, data); |
408 | dma_unmap_single(ohci->card.device, | 413 | buffer_bus = le32_to_cpu(ab->descriptor.data_address) - offset; |
409 | le32_to_cpu(ab->descriptor.data_address) - offset, | ||
410 | PAGE_SIZE, DMA_BIDIRECTIONAL); | ||
411 | 414 | ||
412 | buffer = ab; | 415 | buffer = ab; |
413 | ab = ab->next; | 416 | ab = ab->next; |
@@ -423,7 +426,8 @@ static void ar_context_tasklet(unsigned long data) | |||
423 | while (buffer < end) | 426 | while (buffer < end) |
424 | buffer = handle_ar_packet(ctx, buffer); | 427 | buffer = handle_ar_packet(ctx, buffer); |
425 | 428 | ||
426 | free_page((unsigned long)buffer); | 429 | dma_free_coherent(ohci->card.device, PAGE_SIZE, |
430 | buffer, buffer_bus); | ||
427 | ar_context_add_page(ctx); | 431 | ar_context_add_page(ctx); |
428 | } else { | 432 | } else { |
429 | buffer = ctx->pointer; | 433 | buffer = ctx->pointer; |
@@ -532,7 +536,7 @@ static int | |||
532 | context_add_buffer(struct context *ctx) | 536 | context_add_buffer(struct context *ctx) |
533 | { | 537 | { |
534 | struct descriptor_buffer *desc; | 538 | struct descriptor_buffer *desc; |
535 | dma_addr_t bus_addr; | 539 | dma_addr_t uninitialized_var(bus_addr); |
536 | int offset; | 540 | int offset; |
537 | 541 | ||
538 | /* | 542 | /* |
@@ -1022,13 +1026,14 @@ static void bus_reset_tasklet(unsigned long data) | |||
1022 | */ | 1026 | */ |
1023 | 1027 | ||
1024 | self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff; | 1028 | self_id_count = (reg_read(ohci, OHCI1394_SelfIDCount) >> 3) & 0x3ff; |
1025 | generation = (le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff; | 1029 | generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff; |
1026 | rmb(); | 1030 | rmb(); |
1027 | 1031 | ||
1028 | for (i = 1, j = 0; j < self_id_count; i += 2, j++) { | 1032 | for (i = 1, j = 0; j < self_id_count; i += 2, j++) { |
1029 | if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) | 1033 | if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) |
1030 | fw_error("inconsistent self IDs\n"); | 1034 | fw_error("inconsistent self IDs\n"); |
1031 | ohci->self_id_buffer[j] = le32_to_cpu(ohci->self_id_cpu[i]); | 1035 | ohci->self_id_buffer[j] = |
1036 | cond_le32_to_cpu(ohci->self_id_cpu[i]); | ||
1032 | } | 1037 | } |
1033 | rmb(); | 1038 | rmb(); |
1034 | 1039 | ||
@@ -1316,7 +1321,7 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length) | |||
1316 | unsigned long flags; | 1321 | unsigned long flags; |
1317 | int retval = -EBUSY; | 1322 | int retval = -EBUSY; |
1318 | __be32 *next_config_rom; | 1323 | __be32 *next_config_rom; |
1319 | dma_addr_t next_config_rom_bus; | 1324 | dma_addr_t uninitialized_var(next_config_rom_bus); |
1320 | 1325 | ||
1321 | ohci = fw_ohci(card); | 1326 | ohci = fw_ohci(card); |
1322 | 1327 | ||
@@ -1487,7 +1492,7 @@ static int handle_ir_dualbuffer_packet(struct context *context, | |||
1487 | void *p, *end; | 1492 | void *p, *end; |
1488 | int i; | 1493 | int i; |
1489 | 1494 | ||
1490 | if (db->first_res_count > 0 && db->second_res_count > 0) { | 1495 | if (db->first_res_count != 0 && db->second_res_count != 0) { |
1491 | if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) { | 1496 | if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) { |
1492 | /* This descriptor isn't done yet, stop iteration. */ | 1497 | /* This descriptor isn't done yet, stop iteration. */ |
1493 | return 0; | 1498 | return 0; |
@@ -1513,7 +1518,7 @@ static int handle_ir_dualbuffer_packet(struct context *context, | |||
1513 | memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4); | 1518 | memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4); |
1514 | i += ctx->base.header_size; | 1519 | i += ctx->base.header_size; |
1515 | ctx->excess_bytes += | 1520 | ctx->excess_bytes += |
1516 | (le32_to_cpu(*(u32 *)(p + 4)) >> 16) & 0xffff; | 1521 | (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff; |
1517 | p += ctx->base.header_size + 4; | 1522 | p += ctx->base.header_size + 4; |
1518 | } | 1523 | } |
1519 | ctx->header_length = i; | 1524 | ctx->header_length = i; |
@@ -2048,6 +2053,18 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) | |||
2048 | int err; | 2053 | int err; |
2049 | size_t size; | 2054 | size_t size; |
2050 | 2055 | ||
2056 | #ifdef CONFIG_PPC_PMAC | ||
2057 | /* Necessary on some machines if fw-ohci was loaded/ unloaded before */ | ||
2058 | if (machine_is(powermac)) { | ||
2059 | struct device_node *ofn = pci_device_to_OF_node(dev); | ||
2060 | |||
2061 | if (ofn) { | ||
2062 | pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1); | ||
2063 | pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1); | ||
2064 | } | ||
2065 | } | ||
2066 | #endif /* CONFIG_PPC_PMAC */ | ||
2067 | |||
2051 | ohci = kzalloc(sizeof(*ohci), GFP_KERNEL); | 2068 | ohci = kzalloc(sizeof(*ohci), GFP_KERNEL); |
2052 | if (ohci == NULL) { | 2069 | if (ohci == NULL) { |
2053 | fw_error("Could not malloc fw_ohci data.\n"); | 2070 | fw_error("Could not malloc fw_ohci data.\n"); |
@@ -2066,6 +2083,10 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) | |||
2066 | pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0); | 2083 | pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0); |
2067 | pci_set_drvdata(dev, ohci); | 2084 | pci_set_drvdata(dev, ohci); |
2068 | 2085 | ||
2086 | #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) | ||
2087 | ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE && | ||
2088 | dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW; | ||
2089 | #endif | ||
2069 | spin_lock_init(&ohci->lock); | 2090 | spin_lock_init(&ohci->lock); |
2070 | 2091 | ||
2071 | tasklet_init(&ohci->bus_reset_tasklet, | 2092 | tasklet_init(&ohci->bus_reset_tasklet, |
@@ -2182,6 +2203,19 @@ static void pci_remove(struct pci_dev *dev) | |||
2182 | pci_disable_device(dev); | 2203 | pci_disable_device(dev); |
2183 | fw_card_put(&ohci->card); | 2204 | fw_card_put(&ohci->card); |
2184 | 2205 | ||
2206 | #ifdef CONFIG_PPC_PMAC | ||
2207 | /* On UniNorth, power down the cable and turn off the chip clock | ||
2208 | * to save power on laptops */ | ||
2209 | if (machine_is(powermac)) { | ||
2210 | struct device_node *ofn = pci_device_to_OF_node(dev); | ||
2211 | |||
2212 | if (ofn) { | ||
2213 | pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0); | ||
2214 | pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0); | ||
2215 | } | ||
2216 | } | ||
2217 | #endif /* CONFIG_PPC_PMAC */ | ||
2218 | |||
2185 | fw_notify("Removed fw-ohci device.\n"); | 2219 | fw_notify("Removed fw-ohci device.\n"); |
2186 | } | 2220 | } |
2187 | 2221 | ||
@@ -2202,6 +2236,16 @@ static int pci_suspend(struct pci_dev *pdev, pm_message_t state) | |||
2202 | if (err) | 2236 | if (err) |
2203 | fw_error("pci_set_power_state failed with %d\n", err); | 2237 | fw_error("pci_set_power_state failed with %d\n", err); |
2204 | 2238 | ||
2239 | /* PowerMac suspend code comes last */ | ||
2240 | #ifdef CONFIG_PPC_PMAC | ||
2241 | if (machine_is(powermac)) { | ||
2242 | struct device_node *ofn = pci_device_to_OF_node(pdev); | ||
2243 | |||
2244 | if (ofn) | ||
2245 | pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0); | ||
2246 | } | ||
2247 | #endif /* CONFIG_PPC_PMAC */ | ||
2248 | |||
2205 | return 0; | 2249 | return 0; |
2206 | } | 2250 | } |
2207 | 2251 | ||
@@ -2210,6 +2254,16 @@ static int pci_resume(struct pci_dev *pdev) | |||
2210 | struct fw_ohci *ohci = pci_get_drvdata(pdev); | 2254 | struct fw_ohci *ohci = pci_get_drvdata(pdev); |
2211 | int err; | 2255 | int err; |
2212 | 2256 | ||
2257 | /* PowerMac resume code comes first */ | ||
2258 | #ifdef CONFIG_PPC_PMAC | ||
2259 | if (machine_is(powermac)) { | ||
2260 | struct device_node *ofn = pci_device_to_OF_node(pdev); | ||
2261 | |||
2262 | if (ofn) | ||
2263 | pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1); | ||
2264 | } | ||
2265 | #endif /* CONFIG_PPC_PMAC */ | ||
2266 | |||
2213 | pci_set_power_state(pdev, PCI_D0); | 2267 | pci_set_power_state(pdev, PCI_D0); |
2214 | pci_restore_state(pdev); | 2268 | pci_restore_state(pdev); |
2215 | err = pci_enable_device(pdev); | 2269 | err = pci_enable_device(pdev); |
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c index 19ece9b6d742..62b4e47d0cc0 100644 --- a/drivers/firewire/fw-sbp2.c +++ b/drivers/firewire/fw-sbp2.c | |||
@@ -28,14 +28,15 @@ | |||
28 | * and many others. | 28 | * and many others. |
29 | */ | 29 | */ |
30 | 30 | ||
31 | #include <linux/blkdev.h> | ||
32 | #include <linux/delay.h> | ||
33 | #include <linux/device.h> | ||
34 | #include <linux/dma-mapping.h> | ||
31 | #include <linux/kernel.h> | 35 | #include <linux/kernel.h> |
36 | #include <linux/mod_devicetable.h> | ||
32 | #include <linux/module.h> | 37 | #include <linux/module.h> |
33 | #include <linux/moduleparam.h> | 38 | #include <linux/moduleparam.h> |
34 | #include <linux/mod_devicetable.h> | ||
35 | #include <linux/device.h> | ||
36 | #include <linux/scatterlist.h> | 39 | #include <linux/scatterlist.h> |
37 | #include <linux/dma-mapping.h> | ||
38 | #include <linux/blkdev.h> | ||
39 | #include <linux/string.h> | 40 | #include <linux/string.h> |
40 | #include <linux/stringify.h> | 41 | #include <linux/stringify.h> |
41 | #include <linux/timer.h> | 42 | #include <linux/timer.h> |
@@ -47,9 +48,9 @@ | |||
47 | #include <scsi/scsi_device.h> | 48 | #include <scsi/scsi_device.h> |
48 | #include <scsi/scsi_host.h> | 49 | #include <scsi/scsi_host.h> |
49 | 50 | ||
50 | #include "fw-transaction.h" | ||
51 | #include "fw-topology.h" | ||
52 | #include "fw-device.h" | 51 | #include "fw-device.h" |
52 | #include "fw-topology.h" | ||
53 | #include "fw-transaction.h" | ||
53 | 54 | ||
54 | /* | 55 | /* |
55 | * So far only bridges from Oxford Semiconductor are known to support | 56 | * So far only bridges from Oxford Semiconductor are known to support |
@@ -82,6 +83,9 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " | |||
82 | * Avoids access beyond actual disk limits on devices with an off-by-one bug. | 83 | * Avoids access beyond actual disk limits on devices with an off-by-one bug. |
83 | * Don't use this with devices which don't have this bug. | 84 | * Don't use this with devices which don't have this bug. |
84 | * | 85 | * |
86 | * - delay inquiry | ||
87 | * Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry. | ||
88 | * | ||
85 | * - override internal blacklist | 89 | * - override internal blacklist |
86 | * Instead of adding to the built-in blacklist, use only the workarounds | 90 | * Instead of adding to the built-in blacklist, use only the workarounds |
87 | * specified in the module load parameter. | 91 | * specified in the module load parameter. |
@@ -91,6 +95,8 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " | |||
91 | #define SBP2_WORKAROUND_INQUIRY_36 0x2 | 95 | #define SBP2_WORKAROUND_INQUIRY_36 0x2 |
92 | #define SBP2_WORKAROUND_MODE_SENSE_8 0x4 | 96 | #define SBP2_WORKAROUND_MODE_SENSE_8 0x4 |
93 | #define SBP2_WORKAROUND_FIX_CAPACITY 0x8 | 97 | #define SBP2_WORKAROUND_FIX_CAPACITY 0x8 |
98 | #define SBP2_WORKAROUND_DELAY_INQUIRY 0x10 | ||
99 | #define SBP2_INQUIRY_DELAY 12 | ||
94 | #define SBP2_WORKAROUND_OVERRIDE 0x100 | 100 | #define SBP2_WORKAROUND_OVERRIDE 0x100 |
95 | 101 | ||
96 | static int sbp2_param_workarounds; | 102 | static int sbp2_param_workarounds; |
@@ -100,6 +106,7 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0" | |||
100 | ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) | 106 | ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) |
101 | ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) | 107 | ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) |
102 | ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) | 108 | ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) |
109 | ", delay inquiry = " __stringify(SBP2_WORKAROUND_DELAY_INQUIRY) | ||
103 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) | 110 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) |
104 | ", or a combination)"); | 111 | ", or a combination)"); |
105 | 112 | ||
@@ -115,7 +122,6 @@ static const char sbp2_driver_name[] = "sbp2"; | |||
115 | struct sbp2_logical_unit { | 122 | struct sbp2_logical_unit { |
116 | struct sbp2_target *tgt; | 123 | struct sbp2_target *tgt; |
117 | struct list_head link; | 124 | struct list_head link; |
118 | struct scsi_device *sdev; | ||
119 | struct fw_address_handler address_handler; | 125 | struct fw_address_handler address_handler; |
120 | struct list_head orb_list; | 126 | struct list_head orb_list; |
121 | 127 | ||
@@ -132,6 +138,8 @@ struct sbp2_logical_unit { | |||
132 | int generation; | 138 | int generation; |
133 | int retries; | 139 | int retries; |
134 | struct delayed_work work; | 140 | struct delayed_work work; |
141 | bool has_sdev; | ||
142 | bool blocked; | ||
135 | }; | 143 | }; |
136 | 144 | ||
137 | /* | 145 | /* |
@@ -141,16 +149,18 @@ struct sbp2_logical_unit { | |||
141 | struct sbp2_target { | 149 | struct sbp2_target { |
142 | struct kref kref; | 150 | struct kref kref; |
143 | struct fw_unit *unit; | 151 | struct fw_unit *unit; |
152 | const char *bus_id; | ||
153 | struct list_head lu_list; | ||
144 | 154 | ||
145 | u64 management_agent_address; | 155 | u64 management_agent_address; |
146 | int directory_id; | 156 | int directory_id; |
147 | int node_id; | 157 | int node_id; |
148 | int address_high; | 158 | int address_high; |
149 | 159 | unsigned int workarounds; | |
150 | unsigned workarounds; | ||
151 | struct list_head lu_list; | ||
152 | |||
153 | unsigned int mgt_orb_timeout; | 160 | unsigned int mgt_orb_timeout; |
161 | |||
162 | int dont_block; /* counter for each logical unit */ | ||
163 | int blocked; /* ditto */ | ||
154 | }; | 164 | }; |
155 | 165 | ||
156 | /* | 166 | /* |
@@ -160,9 +170,10 @@ struct sbp2_target { | |||
160 | */ | 170 | */ |
161 | #define SBP2_MIN_LOGIN_ORB_TIMEOUT 5000U /* Timeout in ms */ | 171 | #define SBP2_MIN_LOGIN_ORB_TIMEOUT 5000U /* Timeout in ms */ |
162 | #define SBP2_MAX_LOGIN_ORB_TIMEOUT 40000U /* Timeout in ms */ | 172 | #define SBP2_MAX_LOGIN_ORB_TIMEOUT 40000U /* Timeout in ms */ |
163 | #define SBP2_ORB_TIMEOUT 2000 /* Timeout in ms */ | 173 | #define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */ |
164 | #define SBP2_ORB_NULL 0x80000000 | 174 | #define SBP2_ORB_NULL 0x80000000 |
165 | #define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000 | 175 | #define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000 |
176 | #define SBP2_RETRY_LIMIT 0xf /* 15 retries */ | ||
166 | 177 | ||
167 | #define SBP2_DIRECTION_TO_MEDIA 0x0 | 178 | #define SBP2_DIRECTION_TO_MEDIA 0x0 |
168 | #define SBP2_DIRECTION_FROM_MEDIA 0x1 | 179 | #define SBP2_DIRECTION_FROM_MEDIA 0x1 |
@@ -297,7 +308,7 @@ struct sbp2_command_orb { | |||
297 | static const struct { | 308 | static const struct { |
298 | u32 firmware_revision; | 309 | u32 firmware_revision; |
299 | u32 model; | 310 | u32 model; |
300 | unsigned workarounds; | 311 | unsigned int workarounds; |
301 | } sbp2_workarounds_table[] = { | 312 | } sbp2_workarounds_table[] = { |
302 | /* DViCO Momobay CX-1 with TSB42AA9 bridge */ { | 313 | /* DViCO Momobay CX-1 with TSB42AA9 bridge */ { |
303 | .firmware_revision = 0x002800, | 314 | .firmware_revision = 0x002800, |
@@ -305,6 +316,11 @@ static const struct { | |||
305 | .workarounds = SBP2_WORKAROUND_INQUIRY_36 | | 316 | .workarounds = SBP2_WORKAROUND_INQUIRY_36 | |
306 | SBP2_WORKAROUND_MODE_SENSE_8, | 317 | SBP2_WORKAROUND_MODE_SENSE_8, |
307 | }, | 318 | }, |
319 | /* DViCO Momobay FX-3A with TSB42AA9A bridge */ { | ||
320 | .firmware_revision = 0x002800, | ||
321 | .model = 0x000000, | ||
322 | .workarounds = SBP2_WORKAROUND_DELAY_INQUIRY, | ||
323 | }, | ||
308 | /* Initio bridges, actually only needed for some older ones */ { | 324 | /* Initio bridges, actually only needed for some older ones */ { |
309 | .firmware_revision = 0x000200, | 325 | .firmware_revision = 0x000200, |
310 | .model = ~0, | 326 | .model = ~0, |
@@ -315,6 +331,11 @@ static const struct { | |||
315 | .model = ~0, | 331 | .model = ~0, |
316 | .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, | 332 | .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, |
317 | }, | 333 | }, |
334 | /* Datafab MD2-FW2 with Symbios/LSILogic SYM13FW500 bridge */ { | ||
335 | .firmware_revision = 0x002600, | ||
336 | .model = ~0, | ||
337 | .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, | ||
338 | }, | ||
318 | 339 | ||
319 | /* | 340 | /* |
320 | * There are iPods (2nd gen, 3rd gen) with model_id == 0, but | 341 | * There are iPods (2nd gen, 3rd gen) with model_id == 0, but |
@@ -501,6 +522,9 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, | |||
501 | unsigned int timeout; | 522 | unsigned int timeout; |
502 | int retval = -ENOMEM; | 523 | int retval = -ENOMEM; |
503 | 524 | ||
525 | if (function == SBP2_LOGOUT_REQUEST && fw_device_is_shutdown(device)) | ||
526 | return 0; | ||
527 | |||
504 | orb = kzalloc(sizeof(*orb), GFP_ATOMIC); | 528 | orb = kzalloc(sizeof(*orb), GFP_ATOMIC); |
505 | if (orb == NULL) | 529 | if (orb == NULL) |
506 | return -ENOMEM; | 530 | return -ENOMEM; |
@@ -553,20 +577,20 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, | |||
553 | 577 | ||
554 | retval = -EIO; | 578 | retval = -EIO; |
555 | if (sbp2_cancel_orbs(lu) == 0) { | 579 | if (sbp2_cancel_orbs(lu) == 0) { |
556 | fw_error("orb reply timed out, rcode=0x%02x\n", | 580 | fw_error("%s: orb reply timed out, rcode=0x%02x\n", |
557 | orb->base.rcode); | 581 | lu->tgt->bus_id, orb->base.rcode); |
558 | goto out; | 582 | goto out; |
559 | } | 583 | } |
560 | 584 | ||
561 | if (orb->base.rcode != RCODE_COMPLETE) { | 585 | if (orb->base.rcode != RCODE_COMPLETE) { |
562 | fw_error("management write failed, rcode 0x%02x\n", | 586 | fw_error("%s: management write failed, rcode 0x%02x\n", |
563 | orb->base.rcode); | 587 | lu->tgt->bus_id, orb->base.rcode); |
564 | goto out; | 588 | goto out; |
565 | } | 589 | } |
566 | 590 | ||
567 | if (STATUS_GET_RESPONSE(orb->status) != 0 || | 591 | if (STATUS_GET_RESPONSE(orb->status) != 0 || |
568 | STATUS_GET_SBP_STATUS(orb->status) != 0) { | 592 | STATUS_GET_SBP_STATUS(orb->status) != 0) { |
569 | fw_error("error status: %d:%d\n", | 593 | fw_error("%s: error status: %d:%d\n", lu->tgt->bus_id, |
570 | STATUS_GET_RESPONSE(orb->status), | 594 | STATUS_GET_RESPONSE(orb->status), |
571 | STATUS_GET_SBP_STATUS(orb->status)); | 595 | STATUS_GET_SBP_STATUS(orb->status)); |
572 | goto out; | 596 | goto out; |
@@ -590,29 +614,158 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, | |||
590 | 614 | ||
591 | static void | 615 | static void |
592 | complete_agent_reset_write(struct fw_card *card, int rcode, | 616 | complete_agent_reset_write(struct fw_card *card, int rcode, |
593 | void *payload, size_t length, void *data) | 617 | void *payload, size_t length, void *done) |
594 | { | 618 | { |
595 | struct fw_transaction *t = data; | 619 | complete(done); |
620 | } | ||
596 | 621 | ||
597 | kfree(t); | 622 | static void sbp2_agent_reset(struct sbp2_logical_unit *lu) |
623 | { | ||
624 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); | ||
625 | DECLARE_COMPLETION_ONSTACK(done); | ||
626 | struct fw_transaction t; | ||
627 | static u32 z; | ||
628 | |||
629 | fw_send_request(device->card, &t, TCODE_WRITE_QUADLET_REQUEST, | ||
630 | lu->tgt->node_id, lu->generation, device->max_speed, | ||
631 | lu->command_block_agent_address + SBP2_AGENT_RESET, | ||
632 | &z, sizeof(z), complete_agent_reset_write, &done); | ||
633 | wait_for_completion(&done); | ||
598 | } | 634 | } |
599 | 635 | ||
600 | static int sbp2_agent_reset(struct sbp2_logical_unit *lu) | 636 | static void |
637 | complete_agent_reset_write_no_wait(struct fw_card *card, int rcode, | ||
638 | void *payload, size_t length, void *data) | ||
639 | { | ||
640 | kfree(data); | ||
641 | } | ||
642 | |||
643 | static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu) | ||
601 | { | 644 | { |
602 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); | 645 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); |
603 | struct fw_transaction *t; | 646 | struct fw_transaction *t; |
604 | static u32 zero; | 647 | static u32 z; |
605 | 648 | ||
606 | t = kzalloc(sizeof(*t), GFP_ATOMIC); | 649 | t = kmalloc(sizeof(*t), GFP_ATOMIC); |
607 | if (t == NULL) | 650 | if (t == NULL) |
608 | return -ENOMEM; | 651 | return; |
609 | 652 | ||
610 | fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, | 653 | fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, |
611 | lu->tgt->node_id, lu->generation, device->max_speed, | 654 | lu->tgt->node_id, lu->generation, device->max_speed, |
612 | lu->command_block_agent_address + SBP2_AGENT_RESET, | 655 | lu->command_block_agent_address + SBP2_AGENT_RESET, |
613 | &zero, sizeof(zero), complete_agent_reset_write, t); | 656 | &z, sizeof(z), complete_agent_reset_write_no_wait, t); |
657 | } | ||
614 | 658 | ||
615 | return 0; | 659 | static void sbp2_set_generation(struct sbp2_logical_unit *lu, int generation) |
660 | { | ||
661 | struct fw_card *card = fw_device(lu->tgt->unit->device.parent)->card; | ||
662 | unsigned long flags; | ||
663 | |||
664 | /* serialize with comparisons of lu->generation and card->generation */ | ||
665 | spin_lock_irqsave(&card->lock, flags); | ||
666 | lu->generation = generation; | ||
667 | spin_unlock_irqrestore(&card->lock, flags); | ||
668 | } | ||
669 | |||
670 | static inline void sbp2_allow_block(struct sbp2_logical_unit *lu) | ||
671 | { | ||
672 | /* | ||
673 | * We may access dont_block without taking card->lock here: | ||
674 | * All callers of sbp2_allow_block() and all callers of sbp2_unblock() | ||
675 | * are currently serialized against each other. | ||
676 | * And a wrong result in sbp2_conditionally_block()'s access of | ||
677 | * dont_block is rather harmless, it simply misses its first chance. | ||
678 | */ | ||
679 | --lu->tgt->dont_block; | ||
680 | } | ||
681 | |||
682 | /* | ||
683 | * Blocks lu->tgt if all of the following conditions are met: | ||
684 | * - Login, INQUIRY, and high-level SCSI setup of all of the target's | ||
685 | * logical units have been finished (indicated by dont_block == 0). | ||
686 | * - lu->generation is stale. | ||
687 | * | ||
688 | * Note, scsi_block_requests() must be called while holding card->lock, | ||
689 | * otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to | ||
690 | * unblock the target. | ||
691 | */ | ||
692 | static void sbp2_conditionally_block(struct sbp2_logical_unit *lu) | ||
693 | { | ||
694 | struct sbp2_target *tgt = lu->tgt; | ||
695 | struct fw_card *card = fw_device(tgt->unit->device.parent)->card; | ||
696 | struct Scsi_Host *shost = | ||
697 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | ||
698 | unsigned long flags; | ||
699 | |||
700 | spin_lock_irqsave(&card->lock, flags); | ||
701 | if (!tgt->dont_block && !lu->blocked && | ||
702 | lu->generation != card->generation) { | ||
703 | lu->blocked = true; | ||
704 | if (++tgt->blocked == 1) { | ||
705 | scsi_block_requests(shost); | ||
706 | fw_notify("blocked %s\n", lu->tgt->bus_id); | ||
707 | } | ||
708 | } | ||
709 | spin_unlock_irqrestore(&card->lock, flags); | ||
710 | } | ||
711 | |||
712 | /* | ||
713 | * Unblocks lu->tgt as soon as all its logical units can be unblocked. | ||
714 | * Note, it is harmless to run scsi_unblock_requests() outside the | ||
715 | * card->lock protected section. On the other hand, running it inside | ||
716 | * the section might clash with shost->host_lock. | ||
717 | */ | ||
718 | static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) | ||
719 | { | ||
720 | struct sbp2_target *tgt = lu->tgt; | ||
721 | struct fw_card *card = fw_device(tgt->unit->device.parent)->card; | ||
722 | struct Scsi_Host *shost = | ||
723 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | ||
724 | unsigned long flags; | ||
725 | bool unblock = false; | ||
726 | |||
727 | spin_lock_irqsave(&card->lock, flags); | ||
728 | if (lu->blocked && lu->generation == card->generation) { | ||
729 | lu->blocked = false; | ||
730 | unblock = --tgt->blocked == 0; | ||
731 | } | ||
732 | spin_unlock_irqrestore(&card->lock, flags); | ||
733 | |||
734 | if (unblock) { | ||
735 | scsi_unblock_requests(shost); | ||
736 | fw_notify("unblocked %s\n", lu->tgt->bus_id); | ||
737 | } | ||
738 | } | ||
739 | |||
740 | /* | ||
741 | * Prevents future blocking of tgt and unblocks it. | ||
742 | * Note, it is harmless to run scsi_unblock_requests() outside the | ||
743 | * card->lock protected section. On the other hand, running it inside | ||
744 | * the section might clash with shost->host_lock. | ||
745 | */ | ||
746 | static void sbp2_unblock(struct sbp2_target *tgt) | ||
747 | { | ||
748 | struct fw_card *card = fw_device(tgt->unit->device.parent)->card; | ||
749 | struct Scsi_Host *shost = | ||
750 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | ||
751 | unsigned long flags; | ||
752 | |||
753 | spin_lock_irqsave(&card->lock, flags); | ||
754 | ++tgt->dont_block; | ||
755 | spin_unlock_irqrestore(&card->lock, flags); | ||
756 | |||
757 | scsi_unblock_requests(shost); | ||
758 | } | ||
759 | |||
760 | static int sbp2_lun2int(u16 lun) | ||
761 | { | ||
762 | struct scsi_lun eight_bytes_lun; | ||
763 | |||
764 | memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun)); | ||
765 | eight_bytes_lun.scsi_lun[0] = (lun >> 8) & 0xff; | ||
766 | eight_bytes_lun.scsi_lun[1] = lun & 0xff; | ||
767 | |||
768 | return scsilun_to_int(&eight_bytes_lun); | ||
616 | } | 769 | } |
617 | 770 | ||
618 | static void sbp2_release_target(struct kref *kref) | 771 | static void sbp2_release_target(struct kref *kref) |
@@ -621,26 +774,31 @@ static void sbp2_release_target(struct kref *kref) | |||
621 | struct sbp2_logical_unit *lu, *next; | 774 | struct sbp2_logical_unit *lu, *next; |
622 | struct Scsi_Host *shost = | 775 | struct Scsi_Host *shost = |
623 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | 776 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); |
777 | struct scsi_device *sdev; | ||
624 | struct fw_device *device = fw_device(tgt->unit->device.parent); | 778 | struct fw_device *device = fw_device(tgt->unit->device.parent); |
625 | 779 | ||
626 | list_for_each_entry_safe(lu, next, &tgt->lu_list, link) { | 780 | /* prevent deadlocks */ |
627 | if (lu->sdev) | 781 | sbp2_unblock(tgt); |
628 | scsi_remove_device(lu->sdev); | ||
629 | 782 | ||
630 | if (!fw_device_is_shutdown(device)) | 783 | list_for_each_entry_safe(lu, next, &tgt->lu_list, link) { |
631 | sbp2_send_management_orb(lu, tgt->node_id, | 784 | sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun)); |
632 | lu->generation, SBP2_LOGOUT_REQUEST, | 785 | if (sdev) { |
633 | lu->login_id, NULL); | 786 | scsi_remove_device(sdev); |
787 | scsi_device_put(sdev); | ||
788 | } | ||
789 | sbp2_send_management_orb(lu, tgt->node_id, lu->generation, | ||
790 | SBP2_LOGOUT_REQUEST, lu->login_id, NULL); | ||
634 | 791 | ||
635 | fw_core_remove_address_handler(&lu->address_handler); | 792 | fw_core_remove_address_handler(&lu->address_handler); |
636 | list_del(&lu->link); | 793 | list_del(&lu->link); |
637 | kfree(lu); | 794 | kfree(lu); |
638 | } | 795 | } |
639 | scsi_remove_host(shost); | 796 | scsi_remove_host(shost); |
640 | fw_notify("released %s\n", tgt->unit->device.bus_id); | 797 | fw_notify("released %s\n", tgt->bus_id); |
641 | 798 | ||
642 | put_device(&tgt->unit->device); | 799 | put_device(&tgt->unit->device); |
643 | scsi_host_put(shost); | 800 | scsi_host_put(shost); |
801 | fw_device_put(device); | ||
644 | } | 802 | } |
645 | 803 | ||
646 | static struct workqueue_struct *sbp2_wq; | 804 | static struct workqueue_struct *sbp2_wq; |
@@ -660,39 +818,72 @@ static void sbp2_target_put(struct sbp2_target *tgt) | |||
660 | kref_put(&tgt->kref, sbp2_release_target); | 818 | kref_put(&tgt->kref, sbp2_release_target); |
661 | } | 819 | } |
662 | 820 | ||
821 | static void | ||
822 | complete_set_busy_timeout(struct fw_card *card, int rcode, | ||
823 | void *payload, size_t length, void *done) | ||
824 | { | ||
825 | complete(done); | ||
826 | } | ||
827 | |||
828 | static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu) | ||
829 | { | ||
830 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); | ||
831 | DECLARE_COMPLETION_ONSTACK(done); | ||
832 | struct fw_transaction t; | ||
833 | static __be32 busy_timeout; | ||
834 | |||
835 | /* FIXME: we should try to set dual-phase cycle_limit too */ | ||
836 | busy_timeout = cpu_to_be32(SBP2_RETRY_LIMIT); | ||
837 | |||
838 | fw_send_request(device->card, &t, TCODE_WRITE_QUADLET_REQUEST, | ||
839 | lu->tgt->node_id, lu->generation, device->max_speed, | ||
840 | CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT, &busy_timeout, | ||
841 | sizeof(busy_timeout), complete_set_busy_timeout, &done); | ||
842 | wait_for_completion(&done); | ||
843 | } | ||
844 | |||
663 | static void sbp2_reconnect(struct work_struct *work); | 845 | static void sbp2_reconnect(struct work_struct *work); |
664 | 846 | ||
665 | static void sbp2_login(struct work_struct *work) | 847 | static void sbp2_login(struct work_struct *work) |
666 | { | 848 | { |
667 | struct sbp2_logical_unit *lu = | 849 | struct sbp2_logical_unit *lu = |
668 | container_of(work, struct sbp2_logical_unit, work.work); | 850 | container_of(work, struct sbp2_logical_unit, work.work); |
669 | struct Scsi_Host *shost = | 851 | struct sbp2_target *tgt = lu->tgt; |
670 | container_of((void *)lu->tgt, struct Scsi_Host, hostdata[0]); | 852 | struct fw_device *device = fw_device(tgt->unit->device.parent); |
853 | struct Scsi_Host *shost; | ||
671 | struct scsi_device *sdev; | 854 | struct scsi_device *sdev; |
672 | struct scsi_lun eight_bytes_lun; | ||
673 | struct fw_unit *unit = lu->tgt->unit; | ||
674 | struct fw_device *device = fw_device(unit->device.parent); | ||
675 | struct sbp2_login_response response; | 855 | struct sbp2_login_response response; |
676 | int generation, node_id, local_node_id; | 856 | int generation, node_id, local_node_id; |
677 | 857 | ||
858 | if (fw_device_is_shutdown(device)) | ||
859 | goto out; | ||
860 | |||
678 | generation = device->generation; | 861 | generation = device->generation; |
679 | smp_rmb(); /* node_id must not be older than generation */ | 862 | smp_rmb(); /* node_id must not be older than generation */ |
680 | node_id = device->node_id; | 863 | node_id = device->node_id; |
681 | local_node_id = device->card->node_id; | 864 | local_node_id = device->card->node_id; |
682 | 865 | ||
866 | /* If this is a re-login attempt, log out, or we might be rejected. */ | ||
867 | if (lu->has_sdev) | ||
868 | sbp2_send_management_orb(lu, device->node_id, generation, | ||
869 | SBP2_LOGOUT_REQUEST, lu->login_id, NULL); | ||
870 | |||
683 | if (sbp2_send_management_orb(lu, node_id, generation, | 871 | if (sbp2_send_management_orb(lu, node_id, generation, |
684 | SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) { | 872 | SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) { |
685 | if (lu->retries++ < 5) | 873 | if (lu->retries++ < 5) { |
686 | sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); | 874 | sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); |
687 | else | 875 | } else { |
688 | fw_error("failed to login to %s LUN %04x\n", | 876 | fw_error("%s: failed to login to LUN %04x\n", |
689 | unit->device.bus_id, lu->lun); | 877 | tgt->bus_id, lu->lun); |
878 | /* Let any waiting I/O fail from now on. */ | ||
879 | sbp2_unblock(lu->tgt); | ||
880 | } | ||
690 | goto out; | 881 | goto out; |
691 | } | 882 | } |
692 | 883 | ||
693 | lu->generation = generation; | 884 | tgt->node_id = node_id; |
694 | lu->tgt->node_id = node_id; | 885 | tgt->address_high = local_node_id << 16; |
695 | lu->tgt->address_high = local_node_id << 16; | 886 | sbp2_set_generation(lu, generation); |
696 | 887 | ||
697 | /* Get command block agent offset and login id. */ | 888 | /* Get command block agent offset and login id. */ |
698 | lu->command_block_agent_address = | 889 | lu->command_block_agent_address = |
@@ -700,37 +891,67 @@ static void sbp2_login(struct work_struct *work) | |||
700 | response.command_block_agent.low; | 891 | response.command_block_agent.low; |
701 | lu->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response); | 892 | lu->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response); |
702 | 893 | ||
703 | fw_notify("logged in to %s LUN %04x (%d retries)\n", | 894 | fw_notify("%s: logged in to LUN %04x (%d retries)\n", |
704 | unit->device.bus_id, lu->lun, lu->retries); | 895 | tgt->bus_id, lu->lun, lu->retries); |
705 | 896 | ||
706 | #if 0 | 897 | /* set appropriate retry limit(s) in BUSY_TIMEOUT register */ |
707 | /* FIXME: The linux1394 sbp2 does this last step. */ | 898 | sbp2_set_busy_timeout(lu); |
708 | sbp2_set_busy_timeout(scsi_id); | ||
709 | #endif | ||
710 | 899 | ||
711 | PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect); | 900 | PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect); |
712 | sbp2_agent_reset(lu); | 901 | sbp2_agent_reset(lu); |
713 | 902 | ||
714 | memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun)); | 903 | /* This was a re-login. */ |
715 | eight_bytes_lun.scsi_lun[0] = (lu->lun >> 8) & 0xff; | 904 | if (lu->has_sdev) { |
716 | eight_bytes_lun.scsi_lun[1] = lu->lun & 0xff; | 905 | sbp2_cancel_orbs(lu); |
906 | sbp2_conditionally_unblock(lu); | ||
907 | goto out; | ||
908 | } | ||
717 | 909 | ||
718 | sdev = __scsi_add_device(shost, 0, 0, | 910 | if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY) |
719 | scsilun_to_int(&eight_bytes_lun), lu); | 911 | ssleep(SBP2_INQUIRY_DELAY); |
720 | if (IS_ERR(sdev)) { | 912 | |
721 | sbp2_send_management_orb(lu, node_id, generation, | 913 | shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]); |
722 | SBP2_LOGOUT_REQUEST, lu->login_id, NULL); | 914 | sdev = __scsi_add_device(shost, 0, 0, sbp2_lun2int(lu->lun), lu); |
723 | /* | 915 | /* |
724 | * Set this back to sbp2_login so we fall back and | 916 | * FIXME: We are unable to perform reconnects while in sbp2_login(). |
725 | * retry login on bus reset. | 917 | * Therefore __scsi_add_device() will get into trouble if a bus reset |
726 | */ | 918 | * happens in parallel. It will either fail or leave us with an |
727 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); | 919 | * unusable sdev. As a workaround we check for this and retry the |
728 | } else { | 920 | * whole login and SCSI probing. |
729 | lu->sdev = sdev; | 921 | */ |
922 | |||
923 | /* Reported error during __scsi_add_device() */ | ||
924 | if (IS_ERR(sdev)) | ||
925 | goto out_logout_login; | ||
926 | |||
927 | /* Unreported error during __scsi_add_device() */ | ||
928 | smp_rmb(); /* get current card generation */ | ||
929 | if (generation != device->card->generation) { | ||
930 | scsi_remove_device(sdev); | ||
730 | scsi_device_put(sdev); | 931 | scsi_device_put(sdev); |
932 | goto out_logout_login; | ||
731 | } | 933 | } |
934 | |||
935 | /* No error during __scsi_add_device() */ | ||
936 | lu->has_sdev = true; | ||
937 | scsi_device_put(sdev); | ||
938 | sbp2_allow_block(lu); | ||
939 | goto out; | ||
940 | |||
941 | out_logout_login: | ||
942 | smp_rmb(); /* generation may have changed */ | ||
943 | generation = device->generation; | ||
944 | smp_rmb(); /* node_id must not be older than generation */ | ||
945 | |||
946 | sbp2_send_management_orb(lu, device->node_id, generation, | ||
947 | SBP2_LOGOUT_REQUEST, lu->login_id, NULL); | ||
948 | /* | ||
949 | * If a bus reset happened, sbp2_update will have requeued | ||
950 | * lu->work already. Reset the work from reconnect to login. | ||
951 | */ | ||
952 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); | ||
732 | out: | 953 | out: |
733 | sbp2_target_put(lu->tgt); | 954 | sbp2_target_put(tgt); |
734 | } | 955 | } |
735 | 956 | ||
736 | static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) | 957 | static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) |
@@ -751,10 +972,12 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) | |||
751 | return -ENOMEM; | 972 | return -ENOMEM; |
752 | } | 973 | } |
753 | 974 | ||
754 | lu->tgt = tgt; | 975 | lu->tgt = tgt; |
755 | lu->sdev = NULL; | 976 | lu->lun = lun_entry & 0xffff; |
756 | lu->lun = lun_entry & 0xffff; | 977 | lu->retries = 0; |
757 | lu->retries = 0; | 978 | lu->has_sdev = false; |
979 | lu->blocked = false; | ||
980 | ++tgt->dont_block; | ||
758 | INIT_LIST_HEAD(&lu->orb_list); | 981 | INIT_LIST_HEAD(&lu->orb_list); |
759 | INIT_DELAYED_WORK(&lu->work, sbp2_login); | 982 | INIT_DELAYED_WORK(&lu->work, sbp2_login); |
760 | 983 | ||
@@ -813,7 +1036,7 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory, | |||
813 | if (timeout > tgt->mgt_orb_timeout) | 1036 | if (timeout > tgt->mgt_orb_timeout) |
814 | fw_notify("%s: config rom contains %ds " | 1037 | fw_notify("%s: config rom contains %ds " |
815 | "management ORB timeout, limiting " | 1038 | "management ORB timeout, limiting " |
816 | "to %ds\n", tgt->unit->device.bus_id, | 1039 | "to %ds\n", tgt->bus_id, |
817 | timeout / 1000, | 1040 | timeout / 1000, |
818 | tgt->mgt_orb_timeout / 1000); | 1041 | tgt->mgt_orb_timeout / 1000); |
819 | break; | 1042 | break; |
@@ -836,12 +1059,12 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model, | |||
836 | u32 firmware_revision) | 1059 | u32 firmware_revision) |
837 | { | 1060 | { |
838 | int i; | 1061 | int i; |
839 | unsigned w = sbp2_param_workarounds; | 1062 | unsigned int w = sbp2_param_workarounds; |
840 | 1063 | ||
841 | if (w) | 1064 | if (w) |
842 | fw_notify("Please notify linux1394-devel@lists.sourceforge.net " | 1065 | fw_notify("Please notify linux1394-devel@lists.sourceforge.net " |
843 | "if you need the workarounds parameter for %s\n", | 1066 | "if you need the workarounds parameter for %s\n", |
844 | tgt->unit->device.bus_id); | 1067 | tgt->bus_id); |
845 | 1068 | ||
846 | if (w & SBP2_WORKAROUND_OVERRIDE) | 1069 | if (w & SBP2_WORKAROUND_OVERRIDE) |
847 | goto out; | 1070 | goto out; |
@@ -863,8 +1086,7 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model, | |||
863 | if (w) | 1086 | if (w) |
864 | fw_notify("Workarounds for %s: 0x%x " | 1087 | fw_notify("Workarounds for %s: 0x%x " |
865 | "(firmware_revision 0x%06x, model_id 0x%06x)\n", | 1088 | "(firmware_revision 0x%06x, model_id 0x%06x)\n", |
866 | tgt->unit->device.bus_id, | 1089 | tgt->bus_id, w, firmware_revision, model); |
867 | w, firmware_revision, model); | ||
868 | tgt->workarounds = w; | 1090 | tgt->workarounds = w; |
869 | } | 1091 | } |
870 | 1092 | ||
@@ -888,6 +1110,7 @@ static int sbp2_probe(struct device *dev) | |||
888 | tgt->unit = unit; | 1110 | tgt->unit = unit; |
889 | kref_init(&tgt->kref); | 1111 | kref_init(&tgt->kref); |
890 | INIT_LIST_HEAD(&tgt->lu_list); | 1112 | INIT_LIST_HEAD(&tgt->lu_list); |
1113 | tgt->bus_id = unit->device.bus_id; | ||
891 | 1114 | ||
892 | if (fw_device_enable_phys_dma(device) < 0) | 1115 | if (fw_device_enable_phys_dma(device) < 0) |
893 | goto fail_shost_put; | 1116 | goto fail_shost_put; |
@@ -895,6 +1118,8 @@ static int sbp2_probe(struct device *dev) | |||
895 | if (scsi_add_host(shost, &unit->device) < 0) | 1118 | if (scsi_add_host(shost, &unit->device) < 0) |
896 | goto fail_shost_put; | 1119 | goto fail_shost_put; |
897 | 1120 | ||
1121 | fw_device_get(device); | ||
1122 | |||
898 | /* Initialize to values that won't match anything in our table. */ | 1123 | /* Initialize to values that won't match anything in our table. */ |
899 | firmware_revision = 0xff000000; | 1124 | firmware_revision = 0xff000000; |
900 | model = 0xff000000; | 1125 | model = 0xff000000; |
@@ -938,10 +1163,13 @@ static void sbp2_reconnect(struct work_struct *work) | |||
938 | { | 1163 | { |
939 | struct sbp2_logical_unit *lu = | 1164 | struct sbp2_logical_unit *lu = |
940 | container_of(work, struct sbp2_logical_unit, work.work); | 1165 | container_of(work, struct sbp2_logical_unit, work.work); |
941 | struct fw_unit *unit = lu->tgt->unit; | 1166 | struct sbp2_target *tgt = lu->tgt; |
942 | struct fw_device *device = fw_device(unit->device.parent); | 1167 | struct fw_device *device = fw_device(tgt->unit->device.parent); |
943 | int generation, node_id, local_node_id; | 1168 | int generation, node_id, local_node_id; |
944 | 1169 | ||
1170 | if (fw_device_is_shutdown(device)) | ||
1171 | goto out; | ||
1172 | |||
945 | generation = device->generation; | 1173 | generation = device->generation; |
946 | smp_rmb(); /* node_id must not be older than generation */ | 1174 | smp_rmb(); /* node_id must not be older than generation */ |
947 | node_id = device->node_id; | 1175 | node_id = device->node_id; |
@@ -950,10 +1178,17 @@ static void sbp2_reconnect(struct work_struct *work) | |||
950 | if (sbp2_send_management_orb(lu, node_id, generation, | 1178 | if (sbp2_send_management_orb(lu, node_id, generation, |
951 | SBP2_RECONNECT_REQUEST, | 1179 | SBP2_RECONNECT_REQUEST, |
952 | lu->login_id, NULL) < 0) { | 1180 | lu->login_id, NULL) < 0) { |
953 | if (lu->retries++ >= 5) { | 1181 | /* |
954 | fw_error("failed to reconnect to %s\n", | 1182 | * If reconnect was impossible even though we are in the |
955 | unit->device.bus_id); | 1183 | * current generation, fall back and try to log in again. |
956 | /* Fall back and try to log in again. */ | 1184 | * |
1185 | * We could check for "Function rejected" status, but | ||
1186 | * looking at the bus generation as simpler and more general. | ||
1187 | */ | ||
1188 | smp_rmb(); /* get current card generation */ | ||
1189 | if (generation == device->card->generation || | ||
1190 | lu->retries++ >= 5) { | ||
1191 | fw_error("%s: failed to reconnect\n", tgt->bus_id); | ||
957 | lu->retries = 0; | 1192 | lu->retries = 0; |
958 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); | 1193 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); |
959 | } | 1194 | } |
@@ -961,17 +1196,18 @@ static void sbp2_reconnect(struct work_struct *work) | |||
961 | goto out; | 1196 | goto out; |
962 | } | 1197 | } |
963 | 1198 | ||
964 | lu->generation = generation; | 1199 | tgt->node_id = node_id; |
965 | lu->tgt->node_id = node_id; | 1200 | tgt->address_high = local_node_id << 16; |
966 | lu->tgt->address_high = local_node_id << 16; | 1201 | sbp2_set_generation(lu, generation); |
967 | 1202 | ||
968 | fw_notify("reconnected to %s LUN %04x (%d retries)\n", | 1203 | fw_notify("%s: reconnected to LUN %04x (%d retries)\n", |
969 | unit->device.bus_id, lu->lun, lu->retries); | 1204 | tgt->bus_id, lu->lun, lu->retries); |
970 | 1205 | ||
971 | sbp2_agent_reset(lu); | 1206 | sbp2_agent_reset(lu); |
972 | sbp2_cancel_orbs(lu); | 1207 | sbp2_cancel_orbs(lu); |
1208 | sbp2_conditionally_unblock(lu); | ||
973 | out: | 1209 | out: |
974 | sbp2_target_put(lu->tgt); | 1210 | sbp2_target_put(tgt); |
975 | } | 1211 | } |
976 | 1212 | ||
977 | static void sbp2_update(struct fw_unit *unit) | 1213 | static void sbp2_update(struct fw_unit *unit) |
@@ -986,6 +1222,7 @@ static void sbp2_update(struct fw_unit *unit) | |||
986 | * Iteration over tgt->lu_list is therefore safe here. | 1222 | * Iteration over tgt->lu_list is therefore safe here. |
987 | */ | 1223 | */ |
988 | list_for_each_entry(lu, &tgt->lu_list, link) { | 1224 | list_for_each_entry(lu, &tgt->lu_list, link) { |
1225 | sbp2_conditionally_block(lu); | ||
989 | lu->retries = 0; | 1226 | lu->retries = 0; |
990 | sbp2_queue_work(lu, 0); | 1227 | sbp2_queue_work(lu, 0); |
991 | } | 1228 | } |
@@ -1063,7 +1300,7 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) | |||
1063 | 1300 | ||
1064 | if (status != NULL) { | 1301 | if (status != NULL) { |
1065 | if (STATUS_GET_DEAD(*status)) | 1302 | if (STATUS_GET_DEAD(*status)) |
1066 | sbp2_agent_reset(orb->lu); | 1303 | sbp2_agent_reset_no_wait(orb->lu); |
1067 | 1304 | ||
1068 | switch (STATUS_GET_RESPONSE(*status)) { | 1305 | switch (STATUS_GET_RESPONSE(*status)) { |
1069 | case SBP2_STATUS_REQUEST_COMPLETE: | 1306 | case SBP2_STATUS_REQUEST_COMPLETE: |
@@ -1089,6 +1326,7 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) | |||
1089 | * or when sending the write (less likely). | 1326 | * or when sending the write (less likely). |
1090 | */ | 1327 | */ |
1091 | result = DID_BUS_BUSY << 16; | 1328 | result = DID_BUS_BUSY << 16; |
1329 | sbp2_conditionally_block(orb->lu); | ||
1092 | } | 1330 | } |
1093 | 1331 | ||
1094 | dma_unmap_single(device->card->device, orb->base.request_bus, | 1332 | dma_unmap_single(device->card->device, orb->base.request_bus, |
@@ -1197,7 +1435,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) | |||
1197 | struct sbp2_logical_unit *lu = cmd->device->hostdata; | 1435 | struct sbp2_logical_unit *lu = cmd->device->hostdata; |
1198 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); | 1436 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); |
1199 | struct sbp2_command_orb *orb; | 1437 | struct sbp2_command_orb *orb; |
1200 | unsigned max_payload; | 1438 | unsigned int max_payload; |
1201 | int retval = SCSI_MLQUEUE_HOST_BUSY; | 1439 | int retval = SCSI_MLQUEUE_HOST_BUSY; |
1202 | 1440 | ||
1203 | /* | 1441 | /* |
@@ -1275,6 +1513,10 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev) | |||
1275 | { | 1513 | { |
1276 | struct sbp2_logical_unit *lu = sdev->hostdata; | 1514 | struct sbp2_logical_unit *lu = sdev->hostdata; |
1277 | 1515 | ||
1516 | /* (Re-)Adding logical units via the SCSI stack is not supported. */ | ||
1517 | if (!lu) | ||
1518 | return -ENOSYS; | ||
1519 | |||
1278 | sdev->allow_restart = 1; | 1520 | sdev->allow_restart = 1; |
1279 | 1521 | ||
1280 | /* | 1522 | /* |
@@ -1319,7 +1561,7 @@ static int sbp2_scsi_abort(struct scsi_cmnd *cmd) | |||
1319 | { | 1561 | { |
1320 | struct sbp2_logical_unit *lu = cmd->device->hostdata; | 1562 | struct sbp2_logical_unit *lu = cmd->device->hostdata; |
1321 | 1563 | ||
1322 | fw_notify("sbp2_scsi_abort\n"); | 1564 | fw_notify("%s: sbp2_scsi_abort\n", lu->tgt->bus_id); |
1323 | sbp2_agent_reset(lu); | 1565 | sbp2_agent_reset(lu); |
1324 | sbp2_cancel_orbs(lu); | 1566 | sbp2_cancel_orbs(lu); |
1325 | 1567 | ||
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c index 172c1867e9aa..d2c7a3d7e1cb 100644 --- a/drivers/firewire/fw-topology.c +++ b/drivers/firewire/fw-topology.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/wait.h> | 22 | #include <linux/wait.h> |
23 | #include <linux/errno.h> | 23 | #include <linux/errno.h> |
24 | #include <asm/bug.h> | ||
24 | #include <asm/system.h> | 25 | #include <asm/system.h> |
25 | #include "fw-transaction.h" | 26 | #include "fw-transaction.h" |
26 | #include "fw-topology.h" | 27 | #include "fw-topology.h" |
@@ -383,6 +384,7 @@ void fw_destroy_nodes(struct fw_card *card) | |||
383 | card->color++; | 384 | card->color++; |
384 | if (card->local_node != NULL) | 385 | if (card->local_node != NULL) |
385 | for_each_fw_node(card, card->local_node, report_lost_node); | 386 | for_each_fw_node(card, card->local_node, report_lost_node); |
387 | card->local_node = NULL; | ||
386 | spin_unlock_irqrestore(&card->lock, flags); | 388 | spin_unlock_irqrestore(&card->lock, flags); |
387 | } | 389 | } |
388 | 390 | ||
@@ -423,8 +425,8 @@ update_tree(struct fw_card *card, struct fw_node *root) | |||
423 | node1 = fw_node(list1.next); | 425 | node1 = fw_node(list1.next); |
424 | 426 | ||
425 | while (&node0->link != &list0) { | 427 | while (&node0->link != &list0) { |
428 | WARN_ON(node0->port_count != node1->port_count); | ||
426 | 429 | ||
427 | /* assert(node0->port_count == node1->port_count); */ | ||
428 | if (node0->link_on && !node1->link_on) | 430 | if (node0->link_on && !node1->link_on) |
429 | event = FW_NODE_LINK_OFF; | 431 | event = FW_NODE_LINK_OFF; |
430 | else if (!node0->link_on && node1->link_on) | 432 | else if (!node0->link_on && node1->link_on) |
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c index 7fcc59dedf08..99529e59a0b1 100644 --- a/drivers/firewire/fw-transaction.c +++ b/drivers/firewire/fw-transaction.c | |||
@@ -751,7 +751,7 @@ handle_topology_map(struct fw_card *card, struct fw_request *request, | |||
751 | void *payload, size_t length, void *callback_data) | 751 | void *payload, size_t length, void *callback_data) |
752 | { | 752 | { |
753 | int i, start, end; | 753 | int i, start, end; |
754 | u32 *map; | 754 | __be32 *map; |
755 | 755 | ||
756 | if (!TCODE_IS_READ_REQUEST(tcode)) { | 756 | if (!TCODE_IS_READ_REQUEST(tcode)) { |
757 | fw_send_response(card, request, RCODE_TYPE_ERROR); | 757 | fw_send_response(card, request, RCODE_TYPE_ERROR); |
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h index fa7967b57408..a43bb22912f9 100644 --- a/drivers/firewire/fw-transaction.h +++ b/drivers/firewire/fw-transaction.h | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
27 | #include <linux/dma-mapping.h> | 27 | #include <linux/dma-mapping.h> |
28 | #include <linux/firewire-constants.h> | 28 | #include <linux/firewire-constants.h> |
29 | #include <asm/atomic.h> | ||
29 | 30 | ||
30 | #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) | 31 | #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) |
31 | #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) | 32 | #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) |
@@ -85,12 +86,12 @@ | |||
85 | static inline void | 86 | static inline void |
86 | fw_memcpy_from_be32(void *_dst, void *_src, size_t size) | 87 | fw_memcpy_from_be32(void *_dst, void *_src, size_t size) |
87 | { | 88 | { |
88 | u32 *dst = _dst; | 89 | u32 *dst = _dst; |
89 | u32 *src = _src; | 90 | __be32 *src = _src; |
90 | int i; | 91 | int i; |
91 | 92 | ||
92 | for (i = 0; i < size / 4; i++) | 93 | for (i = 0; i < size / 4; i++) |
93 | dst[i] = cpu_to_be32(src[i]); | 94 | dst[i] = be32_to_cpu(src[i]); |
94 | } | 95 | } |
95 | 96 | ||
96 | static inline void | 97 | static inline void |
@@ -219,6 +220,7 @@ extern struct bus_type fw_bus_type; | |||
219 | struct fw_card { | 220 | struct fw_card { |
220 | const struct fw_card_driver *driver; | 221 | const struct fw_card_driver *driver; |
221 | struct device *device; | 222 | struct device *device; |
223 | atomic_t device_count; | ||
222 | struct kref kref; | 224 | struct kref kref; |
223 | 225 | ||
224 | int node_id; | 226 | int node_id; |
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c index 92583cd4bffd..6e72fd31184d 100644 --- a/drivers/gpio/pca953x.c +++ b/drivers/gpio/pca953x.c | |||
@@ -184,6 +184,7 @@ static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios) | |||
184 | gc->direction_output = pca953x_gpio_direction_output; | 184 | gc->direction_output = pca953x_gpio_direction_output; |
185 | gc->get = pca953x_gpio_get_value; | 185 | gc->get = pca953x_gpio_get_value; |
186 | gc->set = pca953x_gpio_set_value; | 186 | gc->set = pca953x_gpio_set_value; |
187 | gc->can_sleep = 1; | ||
187 | 188 | ||
188 | gc->base = chip->gpio_start; | 189 | gc->base = chip->gpio_start; |
189 | gc->ngpio = gpios; | 190 | gc->ngpio = gpios; |
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c index 573abe440842..2fa43183d375 100644 --- a/drivers/i2c/busses/i2c-amd756.c +++ b/drivers/i2c/busses/i2c-amd756.c | |||
@@ -335,7 +335,7 @@ static int __devinit amd756_probe(struct pci_dev *pdev, | |||
335 | u8 temp; | 335 | u8 temp; |
336 | 336 | ||
337 | /* driver_data might come from user-space, so check it */ | 337 | /* driver_data might come from user-space, so check it */ |
338 | if (id->driver_data > ARRAY_SIZE(chipname)) | 338 | if (id->driver_data >= ARRAY_SIZE(chipname)) |
339 | return -EINVAL; | 339 | return -EINVAL; |
340 | 340 | ||
341 | if (amd756_ioport) { | 341 | if (amd756_ioport) { |
diff --git a/drivers/i2c/chips/Makefile b/drivers/i2c/chips/Makefile index 501f00cea782..e47aca0ca5ae 100644 --- a/drivers/i2c/chips/Makefile +++ b/drivers/i2c/chips/Makefile | |||
@@ -1,6 +1,13 @@ | |||
1 | # | 1 | # |
2 | # Makefile for miscellaneous I2C chip drivers. | 2 | # Makefile for miscellaneous I2C chip drivers. |
3 | # | 3 | # |
4 | # Think twice before you add a new driver to this directory. | ||
5 | # Device drivers are better grouped according to the functionality they | ||
6 | # implement rather than to the bus they are connected to. In particular: | ||
7 | # * Hardware monitoring chip drivers go to drivers/hwmon | ||
8 | # * RTC chip drivers go to drivers/rtc | ||
9 | # * I/O expander drivers go to drivers/gpio | ||
10 | # | ||
4 | 11 | ||
5 | obj-$(CONFIG_DS1682) += ds1682.o | 12 | obj-$(CONFIG_DS1682) += ds1682.o |
6 | obj-$(CONFIG_SENSORS_EEPROM) += eeprom.o | 13 | obj-$(CONFIG_SENSORS_EEPROM) += eeprom.o |
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 96da22e9a5a4..fd84b2a36338 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
@@ -90,12 +90,16 @@ static int i2c_device_probe(struct device *dev) | |||
90 | { | 90 | { |
91 | struct i2c_client *client = to_i2c_client(dev); | 91 | struct i2c_client *client = to_i2c_client(dev); |
92 | struct i2c_driver *driver = to_i2c_driver(dev->driver); | 92 | struct i2c_driver *driver = to_i2c_driver(dev->driver); |
93 | int status; | ||
93 | 94 | ||
94 | if (!driver->probe) | 95 | if (!driver->probe) |
95 | return -ENODEV; | 96 | return -ENODEV; |
96 | client->driver = driver; | 97 | client->driver = driver; |
97 | dev_dbg(dev, "probe\n"); | 98 | dev_dbg(dev, "probe\n"); |
98 | return driver->probe(client); | 99 | status = driver->probe(client); |
100 | if (status) | ||
101 | client->driver = NULL; | ||
102 | return status; | ||
99 | } | 103 | } |
100 | 104 | ||
101 | static int i2c_device_remove(struct device *dev) | 105 | static int i2c_device_remove(struct device *dev) |
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index df752e690e47..eed6d8e1b5c7 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig | |||
@@ -50,7 +50,7 @@ menuconfig IDE | |||
50 | To compile this driver as a module, choose M here: the | 50 | To compile this driver as a module, choose M here: the |
51 | module will be called ide. | 51 | module will be called ide. |
52 | 52 | ||
53 | For further information, please read <file:Documentation/ide.txt>. | 53 | For further information, please read <file:Documentation/ide/ide.txt>. |
54 | 54 | ||
55 | If unsure, say Y. | 55 | If unsure, say Y. |
56 | 56 | ||
@@ -77,7 +77,7 @@ config BLK_DEV_IDE | |||
77 | Useful information about large (>540 MB) IDE disks, multiple | 77 | Useful information about large (>540 MB) IDE disks, multiple |
78 | interfaces, what to do if ATA/IDE devices are not automatically | 78 | interfaces, what to do if ATA/IDE devices are not automatically |
79 | detected, sound card ATA/IDE ports, module support, and other | 79 | detected, sound card ATA/IDE ports, module support, and other |
80 | topics, is contained in <file:Documentation/ide.txt>. For detailed | 80 | topics, is contained in <file:Documentation/ide/ide.txt>. For detailed |
81 | information about hard drives, consult the Disk-HOWTO and the | 81 | information about hard drives, consult the Disk-HOWTO and the |
82 | Multi-Disk-HOWTO, available from | 82 | Multi-Disk-HOWTO, available from |
83 | <http://www.tldp.org/docs.html#howto>. | 83 | <http://www.tldp.org/docs.html#howto>. |
@@ -87,7 +87,7 @@ config BLK_DEV_IDE | |||
87 | <ftp://ibiblio.org/pub/Linux/system/hardware/>. | 87 | <ftp://ibiblio.org/pub/Linux/system/hardware/>. |
88 | 88 | ||
89 | To compile this driver as a module, choose M here and read | 89 | To compile this driver as a module, choose M here and read |
90 | <file:Documentation/ide.txt>. The module will be called ide-mod. | 90 | <file:Documentation/ide/ide.txt>. The module will be called ide-mod. |
91 | Do not compile this driver as a module if your root file system (the | 91 | Do not compile this driver as a module if your root file system (the |
92 | one containing the directory /) is located on an IDE device. | 92 | one containing the directory /) is located on an IDE device. |
93 | 93 | ||
@@ -98,7 +98,7 @@ config BLK_DEV_IDE | |||
98 | 98 | ||
99 | if BLK_DEV_IDE | 99 | if BLK_DEV_IDE |
100 | 100 | ||
101 | comment "Please see Documentation/ide.txt for help/info on IDE drives" | 101 | comment "Please see Documentation/ide/ide.txt for help/info on IDE drives" |
102 | 102 | ||
103 | config BLK_DEV_IDE_SATA | 103 | config BLK_DEV_IDE_SATA |
104 | bool "Support for SATA (deprecated; conflicts with libata SATA driver)" | 104 | bool "Support for SATA (deprecated; conflicts with libata SATA driver)" |
@@ -235,8 +235,8 @@ config BLK_DEV_IDETAPE | |||
235 | along with other IDE devices, as "hdb" or "hdc", or something | 235 | along with other IDE devices, as "hdb" or "hdc", or something |
236 | similar, and will be mapped to a character device such as "ht0" | 236 | similar, and will be mapped to a character device such as "ht0" |
237 | (check the boot messages with dmesg). Be sure to consult the | 237 | (check the boot messages with dmesg). Be sure to consult the |
238 | <file:drivers/ide/ide-tape.c> and <file:Documentation/ide.txt> files | 238 | <file:drivers/ide/ide-tape.c> and <file:Documentation/ide/ide.txt> |
239 | for usage information. | 239 | files for usage information. |
240 | 240 | ||
241 | To compile this driver as a module, choose M here: the | 241 | To compile this driver as a module, choose M here: the |
242 | module will be called ide-tape. | 242 | module will be called ide-tape. |
@@ -358,7 +358,7 @@ config BLK_DEV_CMD640 | |||
358 | 358 | ||
359 | The CMD640 chip is also used on add-in cards by Acculogic, and on | 359 | The CMD640 chip is also used on add-in cards by Acculogic, and on |
360 | the "CSA-6400E PCI to IDE controller" that some people have. For | 360 | the "CSA-6400E PCI to IDE controller" that some people have. For |
361 | details, read <file:Documentation/ide.txt>. | 361 | details, read <file:Documentation/ide/ide.txt>. |
362 | 362 | ||
363 | config BLK_DEV_CMD640_ENHANCED | 363 | config BLK_DEV_CMD640_ENHANCED |
364 | bool "CMD640 enhanced support" | 364 | bool "CMD640 enhanced support" |
@@ -366,7 +366,7 @@ config BLK_DEV_CMD640_ENHANCED | |||
366 | help | 366 | help |
367 | This option includes support for setting/autotuning PIO modes and | 367 | This option includes support for setting/autotuning PIO modes and |
368 | prefetch on CMD640 IDE interfaces. For details, read | 368 | prefetch on CMD640 IDE interfaces. For details, read |
369 | <file:Documentation/ide.txt>. If you have a CMD640 IDE interface | 369 | <file:Documentation/ide/ide.txt>. If you have a CMD640 IDE interface |
370 | and your BIOS does not already do this for you, then say Y here. | 370 | and your BIOS does not already do this for you, then say Y here. |
371 | Otherwise say N. | 371 | Otherwise say N. |
372 | 372 | ||
@@ -1069,9 +1069,9 @@ config BLK_DEV_ALI14XX | |||
1069 | This driver is enabled at runtime using the "ali14xx.probe" kernel | 1069 | This driver is enabled at runtime using the "ali14xx.probe" kernel |
1070 | boot parameter. It enables support for the secondary IDE interface | 1070 | boot parameter. It enables support for the secondary IDE interface |
1071 | of the ALI M1439/1443/1445/1487/1489 chipsets, and permits faster | 1071 | of the ALI M1439/1443/1445/1487/1489 chipsets, and permits faster |
1072 | I/O speeds to be set as well. See the files | 1072 | I/O speeds to be set as well. |
1073 | <file:Documentation/ide.txt> and <file:drivers/ide/legacy/ali14xx.c> | 1073 | See the files <file:Documentation/ide/ide.txt> and |
1074 | for more info. | 1074 | <file:drivers/ide/legacy/ali14xx.c> for more info. |
1075 | 1075 | ||
1076 | config BLK_DEV_DTC2278 | 1076 | config BLK_DEV_DTC2278 |
1077 | tristate "DTC-2278 support" | 1077 | tristate "DTC-2278 support" |
@@ -1079,7 +1079,7 @@ config BLK_DEV_DTC2278 | |||
1079 | This driver is enabled at runtime using the "dtc2278.probe" kernel | 1079 | This driver is enabled at runtime using the "dtc2278.probe" kernel |
1080 | boot parameter. It enables support for the secondary IDE interface | 1080 | boot parameter. It enables support for the secondary IDE interface |
1081 | of the DTC-2278 card, and permits faster I/O speeds to be set as | 1081 | of the DTC-2278 card, and permits faster I/O speeds to be set as |
1082 | well. See the <file:Documentation/ide.txt> and | 1082 | well. See the <file:Documentation/ide/ide.txt> and |
1083 | <file:drivers/ide/legacy/dtc2278.c> files for more info. | 1083 | <file:drivers/ide/legacy/dtc2278.c> files for more info. |
1084 | 1084 | ||
1085 | config BLK_DEV_HT6560B | 1085 | config BLK_DEV_HT6560B |
@@ -1088,7 +1088,7 @@ config BLK_DEV_HT6560B | |||
1088 | This driver is enabled at runtime using the "ht6560b.probe" kernel | 1088 | This driver is enabled at runtime using the "ht6560b.probe" kernel |
1089 | boot parameter. It enables support for the secondary IDE interface | 1089 | boot parameter. It enables support for the secondary IDE interface |
1090 | of the Holtek card, and permits faster I/O speeds to be set as well. | 1090 | of the Holtek card, and permits faster I/O speeds to be set as well. |
1091 | See the <file:Documentation/ide.txt> and | 1091 | See the <file:Documentation/ide/ide.txt> and |
1092 | <file:drivers/ide/legacy/ht6560b.c> files for more info. | 1092 | <file:drivers/ide/legacy/ht6560b.c> files for more info. |
1093 | 1093 | ||
1094 | config BLK_DEV_QD65XX | 1094 | config BLK_DEV_QD65XX |
@@ -1096,7 +1096,7 @@ config BLK_DEV_QD65XX | |||
1096 | help | 1096 | help |
1097 | This driver is enabled at runtime using the "qd65xx.probe" kernel | 1097 | This driver is enabled at runtime using the "qd65xx.probe" kernel |
1098 | boot parameter. It permits faster I/O speeds to be set. See the | 1098 | boot parameter. It permits faster I/O speeds to be set. See the |
1099 | <file:Documentation/ide.txt> and <file:drivers/ide/legacy/qd65xx.c> | 1099 | <file:Documentation/ide/ide.txt> and <file:drivers/ide/legacy/qd65xx.c> |
1100 | for more info. | 1100 | for more info. |
1101 | 1101 | ||
1102 | config BLK_DEV_UMC8672 | 1102 | config BLK_DEV_UMC8672 |
@@ -1105,7 +1105,7 @@ config BLK_DEV_UMC8672 | |||
1105 | This driver is enabled at runtime using the "umc8672.probe" kernel | 1105 | This driver is enabled at runtime using the "umc8672.probe" kernel |
1106 | boot parameter. It enables support for the secondary IDE interface | 1106 | boot parameter. It enables support for the secondary IDE interface |
1107 | of the UMC-8672, and permits faster I/O speeds to be set as well. | 1107 | of the UMC-8672, and permits faster I/O speeds to be set as well. |
1108 | See the files <file:Documentation/ide.txt> and | 1108 | See the files <file:Documentation/ide/ide.txt> and |
1109 | <file:drivers/ide/legacy/umc8672.c> for more info. | 1109 | <file:drivers/ide/legacy/umc8672.c> for more info. |
1110 | 1110 | ||
1111 | endif | 1111 | endif |
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 310e497b5838..c8d0e8715997 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c | |||
@@ -670,8 +670,8 @@ static void cdrom_buffer_sectors (ide_drive_t *drive, unsigned long sector, | |||
670 | * and attempt to recover if there are problems. Returns 0 if everything's | 670 | * and attempt to recover if there are problems. Returns 0 if everything's |
671 | * ok; nonzero if the request has been terminated. | 671 | * ok; nonzero if the request has been terminated. |
672 | */ | 672 | */ |
673 | static | 673 | static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq, |
674 | int ide_cd_check_ireason(ide_drive_t *drive, int len, int ireason, int rw) | 674 | int len, int ireason, int rw) |
675 | { | 675 | { |
676 | /* | 676 | /* |
677 | * ireason == 0: the drive wants to receive data from us | 677 | * ireason == 0: the drive wants to receive data from us |
@@ -701,6 +701,9 @@ int ide_cd_check_ireason(ide_drive_t *drive, int len, int ireason, int rw) | |||
701 | drive->name, __FUNCTION__, ireason); | 701 | drive->name, __FUNCTION__, ireason); |
702 | } | 702 | } |
703 | 703 | ||
704 | if (rq->cmd_type == REQ_TYPE_ATA_PC) | ||
705 | rq->cmd_flags |= REQ_FAILED; | ||
706 | |||
704 | cdrom_end_request(drive, 0); | 707 | cdrom_end_request(drive, 0); |
705 | return -1; | 708 | return -1; |
706 | } | 709 | } |
@@ -1071,11 +1074,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) | |||
1071 | /* | 1074 | /* |
1072 | * check which way to transfer data | 1075 | * check which way to transfer data |
1073 | */ | 1076 | */ |
1074 | if (blk_fs_request(rq) || blk_pc_request(rq)) { | 1077 | if (ide_cd_check_ireason(drive, rq, len, ireason, write)) |
1075 | if (ide_cd_check_ireason(drive, len, ireason, write)) | 1078 | return ide_stopped; |
1076 | return ide_stopped; | ||
1077 | 1079 | ||
1078 | if (blk_fs_request(rq) && write == 0) { | 1080 | if (blk_fs_request(rq)) { |
1081 | if (write == 0) { | ||
1079 | int nskip; | 1082 | int nskip; |
1080 | 1083 | ||
1081 | if (ide_cd_check_transfer_size(drive, len)) { | 1084 | if (ide_cd_check_transfer_size(drive, len)) { |
@@ -1101,16 +1104,9 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) | |||
1101 | if (ireason == 0) { | 1104 | if (ireason == 0) { |
1102 | write = 1; | 1105 | write = 1; |
1103 | xferfunc = HWIF(drive)->atapi_output_bytes; | 1106 | xferfunc = HWIF(drive)->atapi_output_bytes; |
1104 | } else if (ireason == 2 || (ireason == 1 && | 1107 | } else { |
1105 | (blk_fs_request(rq) || blk_pc_request(rq)))) { | ||
1106 | write = 0; | 1108 | write = 0; |
1107 | xferfunc = HWIF(drive)->atapi_input_bytes; | 1109 | xferfunc = HWIF(drive)->atapi_input_bytes; |
1108 | } else { | ||
1109 | printk(KERN_ERR "%s: %s: The drive " | ||
1110 | "appears confused (ireason = 0x%02x). " | ||
1111 | "Trying to recover by ending request.\n", | ||
1112 | drive->name, __FUNCTION__, ireason); | ||
1113 | goto end_request; | ||
1114 | } | 1110 | } |
1115 | 1111 | ||
1116 | /* | 1112 | /* |
@@ -1182,11 +1178,10 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) | |||
1182 | else | 1178 | else |
1183 | rq->data += blen; | 1179 | rq->data += blen; |
1184 | } | 1180 | } |
1181 | if (!write && blk_sense_request(rq)) | ||
1182 | rq->sense_len += blen; | ||
1185 | } | 1183 | } |
1186 | 1184 | ||
1187 | if (write && blk_sense_request(rq)) | ||
1188 | rq->sense_len += thislen; | ||
1189 | |||
1190 | /* | 1185 | /* |
1191 | * pad, if necessary | 1186 | * pad, if necessary |
1192 | */ | 1187 | */ |
@@ -1931,6 +1926,7 @@ static const struct cd_list_entry ide_cd_quirks_list[] = { | |||
1931 | { "MATSHITADVD-ROM SR-8186", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, | 1926 | { "MATSHITADVD-ROM SR-8186", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, |
1932 | { "MATSHITADVD-ROM SR-8176", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, | 1927 | { "MATSHITADVD-ROM SR-8176", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, |
1933 | { "MATSHITADVD-ROM SR-8174", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, | 1928 | { "MATSHITADVD-ROM SR-8174", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, |
1929 | { "Optiarc DVD RW AD-5200A", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, | ||
1934 | { NULL, NULL, 0 } | 1930 | { NULL, NULL, 0 } |
1935 | }; | 1931 | }; |
1936 | 1932 | ||
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c index b68284de4e85..6d147ce6782f 100644 --- a/drivers/ide/ide-cd_ioctl.c +++ b/drivers/ide/ide-cd_ioctl.c | |||
@@ -457,6 +457,10 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi, | |||
457 | layer. the packet must be complete, as we do not | 457 | layer. the packet must be complete, as we do not |
458 | touch it at all. */ | 458 | touch it at all. */ |
459 | ide_cd_init_rq(drive, &req); | 459 | ide_cd_init_rq(drive, &req); |
460 | |||
461 | if (cgc->data_direction == CGC_DATA_WRITE) | ||
462 | req.cmd_flags |= REQ_RW; | ||
463 | |||
460 | memcpy(req.cmd, cgc->cmd, CDROM_PACKET_SIZE); | 464 | memcpy(req.cmd, cgc->cmd, CDROM_PACKET_SIZE); |
461 | if (cgc->sense) | 465 | if (cgc->sense) |
462 | memset(cgc->sense, 0, sizeof(struct request_sense)); | 466 | memset(cgc->sense, 0, sizeof(struct request_sense)); |
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 8f5bed471050..39501d130256 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c | |||
@@ -867,7 +867,7 @@ static void idedisk_setup (ide_drive_t *drive) | |||
867 | 867 | ||
868 | /* Only print cache size when it was specified */ | 868 | /* Only print cache size when it was specified */ |
869 | if (id->buf_size) | 869 | if (id->buf_size) |
870 | printk (" w/%dKiB Cache", id->buf_size/2); | 870 | printk(KERN_CONT " w/%dKiB Cache", id->buf_size / 2); |
871 | 871 | ||
872 | printk(KERN_CONT ", CHS=%d/%d/%d\n", | 872 | printk(KERN_CONT ", CHS=%d/%d/%d\n", |
873 | drive->bios_cyl, drive->bios_head, drive->bios_sect); | 873 | drive->bios_cyl, drive->bios_head, drive->bios_sect); |
@@ -949,7 +949,8 @@ static void ide_device_shutdown(ide_drive_t *drive) | |||
949 | return; | 949 | return; |
950 | } | 950 | } |
951 | 951 | ||
952 | printk("Shutdown: %s\n", drive->name); | 952 | printk(KERN_INFO "Shutdown: %s\n", drive->name); |
953 | |||
953 | drive->gendev.bus->suspend(&drive->gendev, PMSG_SUSPEND); | 954 | drive->gendev.bus->suspend(&drive->gendev, PMSG_SUSPEND); |
954 | } | 955 | } |
955 | 956 | ||
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index d0e7b537353e..d61e5788d310 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c | |||
@@ -1,9 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * IDE DMA support (including IDE PCI BM-DMA). | ||
3 | * | ||
2 | * Copyright (C) 1995-1998 Mark Lord | 4 | * Copyright (C) 1995-1998 Mark Lord |
3 | * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> | 5 | * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> |
4 | * Copyright (C) 2004, 2007 Bartlomiej Zolnierkiewicz | 6 | * Copyright (C) 2004, 2007 Bartlomiej Zolnierkiewicz |
5 | * | 7 | * |
6 | * May be copied or modified under the terms of the GNU General Public License | 8 | * May be copied or modified under the terms of the GNU General Public License |
9 | * | ||
10 | * DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies). | ||
7 | */ | 11 | */ |
8 | 12 | ||
9 | /* | 13 | /* |
@@ -11,49 +15,6 @@ | |||
11 | */ | 15 | */ |
12 | 16 | ||
13 | /* | 17 | /* |
14 | * This module provides support for the bus-master IDE DMA functions | ||
15 | * of various PCI chipsets, including the Intel PIIX (i82371FB for | ||
16 | * the 430 FX chipset), the PIIX3 (i82371SB for the 430 HX/VX and | ||
17 | * 440 chipsets), and the PIIX4 (i82371AB for the 430 TX chipset) | ||
18 | * ("PIIX" stands for "PCI ISA IDE Xcellerator"). | ||
19 | * | ||
20 | * Pretty much the same code works for other IDE PCI bus-mastering chipsets. | ||
21 | * | ||
22 | * DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies). | ||
23 | * | ||
24 | * By default, DMA support is prepared for use, but is currently enabled only | ||
25 | * for drives which already have DMA enabled (UltraDMA or mode 2 multi/single), | ||
26 | * or which are recognized as "good" (see table below). Drives with only mode0 | ||
27 | * or mode1 (multi/single) DMA should also work with this chipset/driver | ||
28 | * (eg. MC2112A) but are not enabled by default. | ||
29 | * | ||
30 | * Use "hdparm -i" to view modes supported by a given drive. | ||
31 | * | ||
32 | * The hdparm-3.5 (or later) utility can be used for manually enabling/disabling | ||
33 | * DMA support, but must be (re-)compiled against this kernel version or later. | ||
34 | * | ||
35 | * To enable DMA, use "hdparm -d1 /dev/hd?" on a per-drive basis after booting. | ||
36 | * If problems arise, ide.c will disable DMA operation after a few retries. | ||
37 | * This error recovery mechanism works and has been extremely well exercised. | ||
38 | * | ||
39 | * IDE drives, depending on their vintage, may support several different modes | ||
40 | * of DMA operation. The boot-time modes are indicated with a "*" in | ||
41 | * the "hdparm -i" listing, and can be changed with *knowledgeable* use of | ||
42 | * the "hdparm -X" feature. There is seldom a need to do this, as drives | ||
43 | * normally power-up with their "best" PIO/DMA modes enabled. | ||
44 | * | ||
45 | * Testing has been done with a rather extensive number of drives, | ||
46 | * with Quantum & Western Digital models generally outperforming the pack, | ||
47 | * and Fujitsu & Conner (and some Seagate which are really Conner) drives | ||
48 | * showing more lackluster throughput. | ||
49 | * | ||
50 | * Keep an eye on /var/adm/messages for "DMA disabled" messages. | ||
51 | * | ||
52 | * Some people have reported trouble with Intel Zappa motherboards. | ||
53 | * This can be fixed by upgrading the AMI BIOS to version 1.00.04.BS0, | ||
54 | * available from ftp://ftp.intel.com/pub/bios/10004bs0.exe | ||
55 | * (thanks to Glen Morrell <glen@spin.Stanford.edu> for researching this). | ||
56 | * | ||
57 | * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for | 18 | * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for |
58 | * fixing the problem with the BIOS on some Acer motherboards. | 19 | * fixing the problem with the BIOS on some Acer motherboards. |
59 | * | 20 | * |
@@ -65,11 +26,6 @@ | |||
65 | * | 26 | * |
66 | * Most importantly, thanks to Robert Bringman <rob@mars.trion.com> | 27 | * Most importantly, thanks to Robert Bringman <rob@mars.trion.com> |
67 | * for supplying a Promise UDMA board & WD UDMA drive for this work! | 28 | * for supplying a Promise UDMA board & WD UDMA drive for this work! |
68 | * | ||
69 | * And, yes, Intel Zappa boards really *do* use both PIIX IDE ports. | ||
70 | * | ||
71 | * ATA-66/100 and recovery functions, I forgot the rest...... | ||
72 | * | ||
73 | */ | 29 | */ |
74 | 30 | ||
75 | #include <linux/module.h> | 31 | #include <linux/module.h> |
@@ -757,7 +713,7 @@ static int ide_tune_dma(ide_drive_t *drive) | |||
757 | } | 713 | } |
758 | 714 | ||
759 | if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE) | 715 | if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE) |
760 | return 0; | 716 | return 1; |
761 | 717 | ||
762 | if (ide_set_dma_mode(drive, speed)) | 718 | if (ide_set_dma_mode(drive, speed)) |
763 | return 0; | 719 | return 0; |
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 4a2cb2868226..194ecb0049eb 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c | |||
@@ -756,7 +756,8 @@ static int ide_probe_port(ide_hwif_t *hwif) | |||
756 | 756 | ||
757 | BUG_ON(hwif->present); | 757 | BUG_ON(hwif->present); |
758 | 758 | ||
759 | if (hwif->noprobe) | 759 | if (hwif->noprobe || |
760 | (hwif->drives[0].noprobe && hwif->drives[1].noprobe)) | ||
760 | return -EACCES; | 761 | return -EACCES; |
761 | 762 | ||
762 | /* | 763 | /* |
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 0598ecfd5f37..43e0e0557776 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c | |||
@@ -3765,6 +3765,11 @@ static int ide_tape_probe(ide_drive_t *drive) | |||
3765 | g->fops = &idetape_block_ops; | 3765 | g->fops = &idetape_block_ops; |
3766 | ide_register_region(g); | 3766 | ide_register_region(g); |
3767 | 3767 | ||
3768 | printk(KERN_WARNING "It is possible that this driver does not have any" | ||
3769 | " users anymore and, as a result, it will be REMOVED soon." | ||
3770 | " Please notify Bart <bzolnier@gmail.com> or Boris" | ||
3771 | " <petkovbb@gmail.com> in case you still need it.\n"); | ||
3772 | |||
3768 | return 0; | 3773 | return 0; |
3769 | 3774 | ||
3770 | out_free_tape: | 3775 | out_free_tape: |
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index 477833f0daf5..9976f9d627d4 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c | |||
@@ -590,11 +590,6 @@ void ide_unregister(unsigned int index, int init_default, int restore) | |||
590 | hwif->extra_ports = 0; | 590 | hwif->extra_ports = 0; |
591 | } | 591 | } |
592 | 592 | ||
593 | /* | ||
594 | * Note that we only release the standard ports, | ||
595 | * and do not even try to handle any extra ports | ||
596 | * allocated for weird IDE interface chipsets. | ||
597 | */ | ||
598 | ide_hwif_release_regions(hwif); | 593 | ide_hwif_release_regions(hwif); |
599 | 594 | ||
600 | /* copy original settings */ | 595 | /* copy original settings */ |
@@ -672,7 +667,6 @@ int ide_register_hw(hw_regs_t *hw, void (*quirkproc)(ide_drive_t *), | |||
672 | 667 | ||
673 | do { | 668 | do { |
674 | hwif = ide_deprecated_find_port(hw->io_ports[IDE_DATA_OFFSET]); | 669 | hwif = ide_deprecated_find_port(hw->io_ports[IDE_DATA_OFFSET]); |
675 | index = hwif->index; | ||
676 | if (hwif) | 670 | if (hwif) |
677 | goto found; | 671 | goto found; |
678 | for (index = 0; index < MAX_HWIFS; index++) | 672 | for (index = 0; index < MAX_HWIFS; index++) |
@@ -680,6 +674,7 @@ int ide_register_hw(hw_regs_t *hw, void (*quirkproc)(ide_drive_t *), | |||
680 | } while (retry--); | 674 | } while (retry--); |
681 | return -1; | 675 | return -1; |
682 | found: | 676 | found: |
677 | index = hwif->index; | ||
683 | if (hwif->present) | 678 | if (hwif->present) |
684 | ide_unregister(index, 0, 1); | 679 | ide_unregister(index, 0, 1); |
685 | else if (!hwif->hold) | 680 | else if (!hwif->hold) |
@@ -1036,10 +1031,9 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device | |||
1036 | drive->nice1 = (arg >> IDE_NICE_1) & 1; | 1031 | drive->nice1 = (arg >> IDE_NICE_1) & 1; |
1037 | return 0; | 1032 | return 0; |
1038 | case HDIO_DRIVE_RESET: | 1033 | case HDIO_DRIVE_RESET: |
1039 | { | 1034 | if (!capable(CAP_SYS_ADMIN)) |
1040 | unsigned long flags; | 1035 | return -EACCES; |
1041 | if (!capable(CAP_SYS_ADMIN)) return -EACCES; | 1036 | |
1042 | |||
1043 | /* | 1037 | /* |
1044 | * Abort the current command on the | 1038 | * Abort the current command on the |
1045 | * group if there is one, taking | 1039 | * group if there is one, taking |
@@ -1058,17 +1052,15 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device | |||
1058 | ide_abort(drive, "drive reset"); | 1052 | ide_abort(drive, "drive reset"); |
1059 | 1053 | ||
1060 | BUG_ON(HWGROUP(drive)->handler); | 1054 | BUG_ON(HWGROUP(drive)->handler); |
1061 | 1055 | ||
1062 | /* Ensure nothing gets queued after we | 1056 | /* Ensure nothing gets queued after we |
1063 | drop the lock. Reset will clear the busy */ | 1057 | drop the lock. Reset will clear the busy */ |
1064 | 1058 | ||
1065 | HWGROUP(drive)->busy = 1; | 1059 | HWGROUP(drive)->busy = 1; |
1066 | spin_unlock_irqrestore(&ide_lock, flags); | 1060 | spin_unlock_irqrestore(&ide_lock, flags); |
1067 | (void) ide_do_reset(drive); | 1061 | (void) ide_do_reset(drive); |
1068 | 1062 | ||
1069 | return 0; | 1063 | return 0; |
1070 | } | ||
1071 | |||
1072 | case HDIO_GET_BUSSTATE: | 1064 | case HDIO_GET_BUSSTATE: |
1073 | if (!capable(CAP_SYS_ADMIN)) | 1065 | if (!capable(CAP_SYS_ADMIN)) |
1074 | return -EACCES; | 1066 | return -EACCES; |
@@ -1188,7 +1180,7 @@ static int __initdata is_chipset_set[MAX_HWIFS]; | |||
1188 | * ide_setup() gets called VERY EARLY during initialization, | 1180 | * ide_setup() gets called VERY EARLY during initialization, |
1189 | * to handle kernel "command line" strings beginning with "hdx=" or "ide". | 1181 | * to handle kernel "command line" strings beginning with "hdx=" or "ide". |
1190 | * | 1182 | * |
1191 | * Remember to update Documentation/ide.txt if you change something here. | 1183 | * Remember to update Documentation/ide/ide.txt if you change something here. |
1192 | */ | 1184 | */ |
1193 | static int __init ide_setup(char *s) | 1185 | static int __init ide_setup(char *s) |
1194 | { | 1186 | { |
@@ -1449,7 +1441,7 @@ static int __init ide_setup(char *s) | |||
1449 | 1441 | ||
1450 | case -1: /* "noprobe" */ | 1442 | case -1: /* "noprobe" */ |
1451 | hwif->noprobe = 1; | 1443 | hwif->noprobe = 1; |
1452 | goto done; | 1444 | goto obsolete_option; |
1453 | 1445 | ||
1454 | case 1: /* base */ | 1446 | case 1: /* base */ |
1455 | vals[1] = vals[0] + 0x206; /* default ctl */ | 1447 | vals[1] = vals[0] + 0x206; /* default ctl */ |
diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/legacy/qd65xx.c index bba29df5f21d..2f4f47ad602f 100644 --- a/drivers/ide/legacy/qd65xx.c +++ b/drivers/ide/legacy/qd65xx.c | |||
@@ -334,43 +334,6 @@ static void __init qd6580_port_init_devs(ide_hwif_t *hwif) | |||
334 | hwif->drives[1].drive_data = t2; | 334 | hwif->drives[1].drive_data = t2; |
335 | } | 335 | } |
336 | 336 | ||
337 | /* | ||
338 | * qd_unsetup: | ||
339 | * | ||
340 | * called to unsetup an ata channel : back to default values, unlinks tuning | ||
341 | */ | ||
342 | /* | ||
343 | static void __exit qd_unsetup(ide_hwif_t *hwif) | ||
344 | { | ||
345 | u8 config = hwif->config_data; | ||
346 | int base = hwif->select_data; | ||
347 | void *set_pio_mode = (void *)hwif->set_pio_mode; | ||
348 | |||
349 | if (hwif->chipset != ide_qd65xx) | ||
350 | return; | ||
351 | |||
352 | printk(KERN_NOTICE "%s: back to defaults\n", hwif->name); | ||
353 | |||
354 | hwif->selectproc = NULL; | ||
355 | hwif->set_pio_mode = NULL; | ||
356 | |||
357 | if (set_pio_mode == (void *)qd6500_set_pio_mode) { | ||
358 | // will do it for both | ||
359 | outb(QD6500_DEF_DATA, QD_TIMREG(&hwif->drives[0])); | ||
360 | } else if (set_pio_mode == (void *)qd6580_set_pio_mode) { | ||
361 | if (QD_CONTROL(hwif) & QD_CONTR_SEC_DISABLED) { | ||
362 | outb(QD6580_DEF_DATA, QD_TIMREG(&hwif->drives[0])); | ||
363 | outb(QD6580_DEF_DATA2, QD_TIMREG(&hwif->drives[1])); | ||
364 | } else { | ||
365 | outb(hwif->channel ? QD6580_DEF_DATA2 : QD6580_DEF_DATA, QD_TIMREG(&hwif->drives[0])); | ||
366 | } | ||
367 | } else { | ||
368 | printk(KERN_WARNING "Unknown qd65xx tuning fonction !\n"); | ||
369 | printk(KERN_WARNING "keeping settings !\n"); | ||
370 | } | ||
371 | } | ||
372 | */ | ||
373 | |||
374 | static const struct ide_port_info qd65xx_port_info __initdata = { | 337 | static const struct ide_port_info qd65xx_port_info __initdata = { |
375 | .chipset = ide_qd65xx, | 338 | .chipset = ide_qd65xx, |
376 | .host_flags = IDE_HFLAG_IO_32BIT | | 339 | .host_flags = IDE_HFLAG_IO_32BIT | |
@@ -444,6 +407,8 @@ static int __init qd_probe(int base) | |||
444 | printk(KERN_DEBUG "qd6580: config=%#x, control=%#x, ID3=%u\n", | 407 | printk(KERN_DEBUG "qd6580: config=%#x, control=%#x, ID3=%u\n", |
445 | config, control, QD_ID3); | 408 | config, control, QD_ID3); |
446 | 409 | ||
410 | outb(QD_DEF_CONTR, QD_CONTROL_PORT); | ||
411 | |||
447 | if (control & QD_CONTR_SEC_DISABLED) { | 412 | if (control & QD_CONTR_SEC_DISABLED) { |
448 | /* secondary disabled */ | 413 | /* secondary disabled */ |
449 | 414 | ||
@@ -460,8 +425,6 @@ static int __init qd_probe(int base) | |||
460 | 425 | ||
461 | ide_device_add(idx, &qd65xx_port_info); | 426 | ide_device_add(idx, &qd65xx_port_info); |
462 | 427 | ||
463 | outb(QD_DEF_CONTR, QD_CONTROL_PORT); | ||
464 | |||
465 | return 1; | 428 | return 1; |
466 | } else { | 429 | } else { |
467 | ide_hwif_t *mate; | 430 | ide_hwif_t *mate; |
@@ -487,8 +450,6 @@ static int __init qd_probe(int base) | |||
487 | 450 | ||
488 | ide_device_add(idx, &qd65xx_port_info); | 451 | ide_device_add(idx, &qd65xx_port_info); |
489 | 452 | ||
490 | outb(QD_DEF_CONTR, QD_CONTROL_PORT); | ||
491 | |||
492 | return 0; /* no other qd65xx possible */ | 453 | return 0; /* no other qd65xx possible */ |
493 | } | 454 | } |
494 | } | 455 | } |
diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c index bd24dad3cfc6..ec667982809c 100644 --- a/drivers/ide/pci/cmd640.c +++ b/drivers/ide/pci/cmd640.c | |||
@@ -787,7 +787,8 @@ static int __init cmd640x_init(void) | |||
787 | /* | 787 | /* |
788 | * Try to enable the secondary interface, if not already enabled | 788 | * Try to enable the secondary interface, if not already enabled |
789 | */ | 789 | */ |
790 | if (cmd_hwif1->noprobe) { | 790 | if (cmd_hwif1->noprobe || |
791 | (cmd_hwif1->drives[0].noprobe && cmd_hwif1->drives[1].noprobe)) { | ||
791 | port2 = "not probed"; | 792 | port2 = "not probed"; |
792 | } else { | 793 | } else { |
793 | b = get_cmd640_reg(CNTRL); | 794 | b = get_cmd640_reg(CNTRL); |
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c index d0f7bb8b8adf..6357bb6269ab 100644 --- a/drivers/ide/pci/hpt366.c +++ b/drivers/ide/pci/hpt366.c | |||
@@ -1570,10 +1570,12 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic | |||
1570 | if (rev < 3) | 1570 | if (rev < 3) |
1571 | info = &hpt36x; | 1571 | info = &hpt36x; |
1572 | else { | 1572 | else { |
1573 | static const struct hpt_info *hpt37x_info[] = | 1573 | switch (min_t(u8, rev, 6)) { |
1574 | { &hpt370, &hpt370a, &hpt372, &hpt372n }; | 1574 | case 3: info = &hpt370; break; |
1575 | 1575 | case 4: info = &hpt370a; break; | |
1576 | info = hpt37x_info[min_t(u8, rev, 6) - 3]; | 1576 | case 5: info = &hpt372; break; |
1577 | case 6: info = &hpt372n; break; | ||
1578 | } | ||
1577 | idx++; | 1579 | idx++; |
1578 | } | 1580 | } |
1579 | break; | 1581 | break; |
@@ -1626,7 +1628,7 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic | |||
1626 | return ide_setup_pci_device(dev, &d); | 1628 | return ide_setup_pci_device(dev, &d); |
1627 | } | 1629 | } |
1628 | 1630 | ||
1629 | static const struct pci_device_id hpt366_pci_tbl[] = { | 1631 | static const struct pci_device_id hpt366_pci_tbl[] __devinitconst = { |
1630 | { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), 0 }, | 1632 | { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), 0 }, |
1631 | { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), 1 }, | 1633 | { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), 1 }, |
1632 | { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), 2 }, | 1634 | { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), 2 }, |
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index 28e155a9e2a5..f53f72daae34 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c | |||
@@ -183,6 +183,9 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " | |||
183 | * Avoids access beyond actual disk limits on devices with an off-by-one bug. | 183 | * Avoids access beyond actual disk limits on devices with an off-by-one bug. |
184 | * Don't use this with devices which don't have this bug. | 184 | * Don't use this with devices which don't have this bug. |
185 | * | 185 | * |
186 | * - delay inquiry | ||
187 | * Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry. | ||
188 | * | ||
186 | * - override internal blacklist | 189 | * - override internal blacklist |
187 | * Instead of adding to the built-in blacklist, use only the workarounds | 190 | * Instead of adding to the built-in blacklist, use only the workarounds |
188 | * specified in the module load parameter. | 191 | * specified in the module load parameter. |
@@ -195,6 +198,7 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0" | |||
195 | ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) | 198 | ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) |
196 | ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) | 199 | ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) |
197 | ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) | 200 | ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) |
201 | ", delay inquiry = " __stringify(SBP2_WORKAROUND_DELAY_INQUIRY) | ||
198 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) | 202 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) |
199 | ", or a combination)"); | 203 | ", or a combination)"); |
200 | 204 | ||
@@ -357,6 +361,11 @@ static const struct { | |||
357 | .workarounds = SBP2_WORKAROUND_INQUIRY_36 | | 361 | .workarounds = SBP2_WORKAROUND_INQUIRY_36 | |
358 | SBP2_WORKAROUND_MODE_SENSE_8, | 362 | SBP2_WORKAROUND_MODE_SENSE_8, |
359 | }, | 363 | }, |
364 | /* DViCO Momobay FX-3A with TSB42AA9A bridge */ { | ||
365 | .firmware_revision = 0x002800, | ||
366 | .model_id = 0x000000, | ||
367 | .workarounds = SBP2_WORKAROUND_DELAY_INQUIRY, | ||
368 | }, | ||
360 | /* Initio bridges, actually only needed for some older ones */ { | 369 | /* Initio bridges, actually only needed for some older ones */ { |
361 | .firmware_revision = 0x000200, | 370 | .firmware_revision = 0x000200, |
362 | .model_id = SBP2_ROM_VALUE_WILDCARD, | 371 | .model_id = SBP2_ROM_VALUE_WILDCARD, |
@@ -367,6 +376,11 @@ static const struct { | |||
367 | .model_id = SBP2_ROM_VALUE_WILDCARD, | 376 | .model_id = SBP2_ROM_VALUE_WILDCARD, |
368 | .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, | 377 | .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, |
369 | }, | 378 | }, |
379 | /* Datafab MD2-FW2 with Symbios/LSILogic SYM13FW500 bridge */ { | ||
380 | .firmware_revision = 0x002600, | ||
381 | .model_id = SBP2_ROM_VALUE_WILDCARD, | ||
382 | .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, | ||
383 | }, | ||
370 | /* iPod 4th generation */ { | 384 | /* iPod 4th generation */ { |
371 | .firmware_revision = 0x0a2700, | 385 | .firmware_revision = 0x0a2700, |
372 | .model_id = 0x000021, | 386 | .model_id = 0x000021, |
@@ -914,6 +928,9 @@ static int sbp2_start_device(struct sbp2_lu *lu) | |||
914 | sbp2_agent_reset(lu, 1); | 928 | sbp2_agent_reset(lu, 1); |
915 | sbp2_max_speed_and_size(lu); | 929 | sbp2_max_speed_and_size(lu); |
916 | 930 | ||
931 | if (lu->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY) | ||
932 | ssleep(SBP2_INQUIRY_DELAY); | ||
933 | |||
917 | error = scsi_add_device(lu->shost, 0, lu->ud->id, 0); | 934 | error = scsi_add_device(lu->shost, 0, lu->ud->id, 0); |
918 | if (error) { | 935 | if (error) { |
919 | SBP2_ERR("scsi_add_device failed"); | 936 | SBP2_ERR("scsi_add_device failed"); |
@@ -1962,6 +1979,9 @@ static int sbp2scsi_slave_alloc(struct scsi_device *sdev) | |||
1962 | { | 1979 | { |
1963 | struct sbp2_lu *lu = (struct sbp2_lu *)sdev->host->hostdata[0]; | 1980 | struct sbp2_lu *lu = (struct sbp2_lu *)sdev->host->hostdata[0]; |
1964 | 1981 | ||
1982 | if (sdev->lun != 0 || sdev->id != lu->ud->id || sdev->channel != 0) | ||
1983 | return -ENODEV; | ||
1984 | |||
1965 | lu->sdev = sdev; | 1985 | lu->sdev = sdev; |
1966 | sdev->allow_restart = 1; | 1986 | sdev->allow_restart = 1; |
1967 | 1987 | ||
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h index d2ecb0d8a1bb..80d8e097b065 100644 --- a/drivers/ieee1394/sbp2.h +++ b/drivers/ieee1394/sbp2.h | |||
@@ -343,6 +343,8 @@ enum sbp2lu_state_types { | |||
343 | #define SBP2_WORKAROUND_INQUIRY_36 0x2 | 343 | #define SBP2_WORKAROUND_INQUIRY_36 0x2 |
344 | #define SBP2_WORKAROUND_MODE_SENSE_8 0x4 | 344 | #define SBP2_WORKAROUND_MODE_SENSE_8 0x4 |
345 | #define SBP2_WORKAROUND_FIX_CAPACITY 0x8 | 345 | #define SBP2_WORKAROUND_FIX_CAPACITY 0x8 |
346 | #define SBP2_WORKAROUND_DELAY_INQUIRY 0x10 | ||
347 | #define SBP2_INQUIRY_DELAY 12 | ||
346 | #define SBP2_WORKAROUND_OVERRIDE 0x100 | 348 | #define SBP2_WORKAROUND_OVERRIDE 0x100 |
347 | 349 | ||
348 | #endif /* SBP2_H */ | 350 | #endif /* SBP2_H */ |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index b10ade92efed..4df405157086 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -3759,6 +3759,7 @@ static void cm_remove_one(struct ib_device *device) | |||
3759 | port = cm_dev->port[i-1]; | 3759 | port = cm_dev->port[i-1]; |
3760 | ib_modify_port(device, port->port_num, 0, &port_modify); | 3760 | ib_modify_port(device, port->port_num, 0, &port_modify); |
3761 | ib_unregister_mad_agent(port->mad_agent); | 3761 | ib_unregister_mad_agent(port->mad_agent); |
3762 | flush_workqueue(cm.wq); | ||
3762 | cm_remove_port_fs(port); | 3763 | cm_remove_port_fs(port); |
3763 | } | 3764 | } |
3764 | kobject_put(&cm_dev->dev_obj); | 3765 | kobject_put(&cm_dev->dev_obj); |
@@ -3813,6 +3814,7 @@ static void __exit ib_cm_cleanup(void) | |||
3813 | cancel_delayed_work(&timewait_info->work.work); | 3814 | cancel_delayed_work(&timewait_info->work.work); |
3814 | spin_unlock_irq(&cm.lock); | 3815 | spin_unlock_irq(&cm.lock); |
3815 | 3816 | ||
3817 | ib_unregister_client(&cm_client); | ||
3816 | destroy_workqueue(cm.wq); | 3818 | destroy_workqueue(cm.wq); |
3817 | 3819 | ||
3818 | list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) { | 3820 | list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) { |
@@ -3820,7 +3822,6 @@ static void __exit ib_cm_cleanup(void) | |||
3820 | kfree(timewait_info); | 3822 | kfree(timewait_info); |
3821 | } | 3823 | } |
3822 | 3824 | ||
3823 | ib_unregister_client(&cm_client); | ||
3824 | class_unregister(&cm_class); | 3825 | class_unregister(&cm_class); |
3825 | idr_destroy(&cm.local_id_table); | 3826 | idr_destroy(&cm.local_id_table); |
3826 | } | 3827 | } |
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c index 7f00347364f7..06d502c06a4d 100644 --- a/drivers/infiniband/core/fmr_pool.c +++ b/drivers/infiniband/core/fmr_pool.c | |||
@@ -139,7 +139,7 @@ static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool, | |||
139 | static void ib_fmr_batch_release(struct ib_fmr_pool *pool) | 139 | static void ib_fmr_batch_release(struct ib_fmr_pool *pool) |
140 | { | 140 | { |
141 | int ret; | 141 | int ret; |
142 | struct ib_pool_fmr *fmr, *next; | 142 | struct ib_pool_fmr *fmr; |
143 | LIST_HEAD(unmap_list); | 143 | LIST_HEAD(unmap_list); |
144 | LIST_HEAD(fmr_list); | 144 | LIST_HEAD(fmr_list); |
145 | 145 | ||
@@ -158,20 +158,6 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool) | |||
158 | #endif | 158 | #endif |
159 | } | 159 | } |
160 | 160 | ||
161 | /* | ||
162 | * The free_list may hold FMRs that have been put there | ||
163 | * because they haven't reached the max_remap count. | ||
164 | * Invalidate their mapping as well. | ||
165 | */ | ||
166 | list_for_each_entry_safe(fmr, next, &pool->free_list, list) { | ||
167 | if (fmr->remap_count == 0) | ||
168 | continue; | ||
169 | hlist_del_init(&fmr->cache_node); | ||
170 | fmr->remap_count = 0; | ||
171 | list_add_tail(&fmr->fmr->list, &fmr_list); | ||
172 | list_move(&fmr->list, &unmap_list); | ||
173 | } | ||
174 | |||
175 | list_splice(&pool->dirty_list, &unmap_list); | 161 | list_splice(&pool->dirty_list, &unmap_list); |
176 | INIT_LIST_HEAD(&pool->dirty_list); | 162 | INIT_LIST_HEAD(&pool->dirty_list); |
177 | pool->dirty_len = 0; | 163 | pool->dirty_len = 0; |
@@ -384,6 +370,11 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool) | |||
384 | 370 | ||
385 | i = 0; | 371 | i = 0; |
386 | list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) { | 372 | list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) { |
373 | if (fmr->remap_count) { | ||
374 | INIT_LIST_HEAD(&fmr_list); | ||
375 | list_add_tail(&fmr->fmr->list, &fmr_list); | ||
376 | ib_unmap_fmr(&fmr_list); | ||
377 | } | ||
387 | ib_dealloc_fmr(fmr->fmr); | 378 | ib_dealloc_fmr(fmr->fmr); |
388 | list_del(&fmr->list); | 379 | list_del(&fmr->list); |
389 | kfree(fmr); | 380 | kfree(fmr); |
@@ -407,8 +398,23 @@ EXPORT_SYMBOL(ib_destroy_fmr_pool); | |||
407 | */ | 398 | */ |
408 | int ib_flush_fmr_pool(struct ib_fmr_pool *pool) | 399 | int ib_flush_fmr_pool(struct ib_fmr_pool *pool) |
409 | { | 400 | { |
410 | int serial = atomic_inc_return(&pool->req_ser); | 401 | int serial; |
402 | struct ib_pool_fmr *fmr, *next; | ||
403 | |||
404 | /* | ||
405 | * The free_list holds FMRs that may have been used | ||
406 | * but have not been remapped enough times to be dirty. | ||
407 | * Put them on the dirty list now so that the cleanup | ||
408 | * thread will reap them too. | ||
409 | */ | ||
410 | spin_lock_irq(&pool->pool_lock); | ||
411 | list_for_each_entry_safe(fmr, next, &pool->free_list, list) { | ||
412 | if (fmr->remap_count > 0) | ||
413 | list_move(&fmr->list, &pool->dirty_list); | ||
414 | } | ||
415 | spin_unlock_irq(&pool->pool_lock); | ||
411 | 416 | ||
417 | serial = atomic_inc_return(&pool->req_ser); | ||
412 | wake_up_process(pool->thread); | 418 | wake_up_process(pool->thread); |
413 | 419 | ||
414 | if (wait_event_interruptible(pool->force_wait, | 420 | if (wait_event_interruptible(pool->force_wait, |
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index 223b1aa7d92b..81c9195b512a 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c | |||
@@ -839,6 +839,7 @@ static void cm_work_handler(struct work_struct *_work) | |||
839 | unsigned long flags; | 839 | unsigned long flags; |
840 | int empty; | 840 | int empty; |
841 | int ret = 0; | 841 | int ret = 0; |
842 | int destroy_id; | ||
842 | 843 | ||
843 | spin_lock_irqsave(&cm_id_priv->lock, flags); | 844 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
844 | empty = list_empty(&cm_id_priv->work_list); | 845 | empty = list_empty(&cm_id_priv->work_list); |
@@ -857,9 +858,9 @@ static void cm_work_handler(struct work_struct *_work) | |||
857 | destroy_cm_id(&cm_id_priv->id); | 858 | destroy_cm_id(&cm_id_priv->id); |
858 | } | 859 | } |
859 | BUG_ON(atomic_read(&cm_id_priv->refcount)==0); | 860 | BUG_ON(atomic_read(&cm_id_priv->refcount)==0); |
861 | destroy_id = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); | ||
860 | if (iwcm_deref_id(cm_id_priv)) { | 862 | if (iwcm_deref_id(cm_id_priv)) { |
861 | if (test_bit(IWCM_F_CALLBACK_DESTROY, | 863 | if (destroy_id) { |
862 | &cm_id_priv->flags)) { | ||
863 | BUG_ON(!list_empty(&cm_id_priv->work_list)); | 864 | BUG_ON(!list_empty(&cm_id_priv->work_list)); |
864 | free_cm_id(cm_id_priv); | 865 | free_cm_id(cm_id_priv); |
865 | } | 866 | } |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c index 73bfd1656f86..b8797c66676d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_mem.c +++ b/drivers/infiniband/hw/cxgb3/iwch_mem.c | |||
@@ -136,14 +136,8 @@ int build_phys_page_list(struct ib_phys_buf *buffer_list, | |||
136 | 136 | ||
137 | /* Find largest page shift we can use to cover buffers */ | 137 | /* Find largest page shift we can use to cover buffers */ |
138 | for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift)) | 138 | for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift)) |
139 | if (num_phys_buf > 1) { | 139 | if ((1ULL << *shift) & mask) |
140 | if ((1ULL << *shift) & mask) | 140 | break; |
141 | break; | ||
142 | } else | ||
143 | if (1ULL << *shift >= | ||
144 | buffer_list[0].size + | ||
145 | (buffer_list[0].addr & ((1ULL << *shift) - 1))) | ||
146 | break; | ||
147 | 141 | ||
148 | buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1); | 142 | buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1); |
149 | buffer_list[0].addr &= ~0ull << *shift; | 143 | buffer_list[0].addr &= ~0ull << *shift; |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index df1838f8f94d..b2ea9210467f 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
@@ -189,7 +189,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve | |||
189 | return ERR_PTR(-ENOMEM); | 189 | return ERR_PTR(-ENOMEM); |
190 | } | 190 | } |
191 | chp->rhp = rhp; | 191 | chp->rhp = rhp; |
192 | chp->ibcq.cqe = (1 << chp->cq.size_log2) - 1; | 192 | chp->ibcq.cqe = 1 << chp->cq.size_log2; |
193 | spin_lock_init(&chp->lock); | 193 | spin_lock_init(&chp->lock); |
194 | atomic_set(&chp->refcnt, 1); | 194 | atomic_set(&chp->refcnt, 1); |
195 | init_waitqueue_head(&chp->wait); | 195 | init_waitqueue_head(&chp->wait); |
@@ -819,8 +819,11 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd, | |||
819 | kfree(qhp); | 819 | kfree(qhp); |
820 | return ERR_PTR(-ENOMEM); | 820 | return ERR_PTR(-ENOMEM); |
821 | } | 821 | } |
822 | |||
822 | attrs->cap.max_recv_wr = rqsize - 1; | 823 | attrs->cap.max_recv_wr = rqsize - 1; |
823 | attrs->cap.max_send_wr = sqsize; | 824 | attrs->cap.max_send_wr = sqsize; |
825 | attrs->cap.max_inline_data = T3_MAX_INLINE; | ||
826 | |||
824 | qhp->rhp = rhp; | 827 | qhp->rhp = rhp; |
825 | qhp->attr.pd = php->pdid; | 828 | qhp->attr.pd = php->pdid; |
826 | qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid; | 829 | qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid; |
diff --git a/drivers/infiniband/hw/ipath/ipath_common.h b/drivers/infiniband/hw/ipath/ipath_common.h index 414621095540..591901aab6b7 100644 --- a/drivers/infiniband/hw/ipath/ipath_common.h +++ b/drivers/infiniband/hw/ipath/ipath_common.h | |||
@@ -75,7 +75,7 @@ | |||
75 | #define IPATH_IB_LINKDOWN 0 | 75 | #define IPATH_IB_LINKDOWN 0 |
76 | #define IPATH_IB_LINKARM 1 | 76 | #define IPATH_IB_LINKARM 1 |
77 | #define IPATH_IB_LINKACTIVE 2 | 77 | #define IPATH_IB_LINKACTIVE 2 |
78 | #define IPATH_IB_LINKINIT 3 | 78 | #define IPATH_IB_LINKDOWN_ONLY 3 |
79 | #define IPATH_IB_LINKDOWN_SLEEP 4 | 79 | #define IPATH_IB_LINKDOWN_SLEEP 4 |
80 | #define IPATH_IB_LINKDOWN_DISABLE 5 | 80 | #define IPATH_IB_LINKDOWN_DISABLE 5 |
81 | #define IPATH_IB_LINK_LOOPBACK 6 /* enable local loopback */ | 81 | #define IPATH_IB_LINK_LOOPBACK 6 /* enable local loopback */ |
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index d5ff6ca2db30..ca4d0acc6786 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c | |||
@@ -851,8 +851,7 @@ void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first, | |||
851 | * -ETIMEDOUT state can have multiple states set, for any of several | 851 | * -ETIMEDOUT state can have multiple states set, for any of several |
852 | * transitions. | 852 | * transitions. |
853 | */ | 853 | */ |
854 | static int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, | 854 | int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs) |
855 | int msecs) | ||
856 | { | 855 | { |
857 | dd->ipath_state_wanted = state; | 856 | dd->ipath_state_wanted = state; |
858 | wait_event_interruptible_timeout(ipath_state_wait, | 857 | wait_event_interruptible_timeout(ipath_state_wait, |
@@ -1656,8 +1655,8 @@ void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl) | |||
1656 | static void ipath_set_ib_lstate(struct ipath_devdata *dd, int which) | 1655 | static void ipath_set_ib_lstate(struct ipath_devdata *dd, int which) |
1657 | { | 1656 | { |
1658 | static const char *what[4] = { | 1657 | static const char *what[4] = { |
1659 | [0] = "DOWN", | 1658 | [0] = "NOP", |
1660 | [INFINIPATH_IBCC_LINKCMD_INIT] = "INIT", | 1659 | [INFINIPATH_IBCC_LINKCMD_DOWN] = "DOWN", |
1661 | [INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED", | 1660 | [INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED", |
1662 | [INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE" | 1661 | [INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE" |
1663 | }; | 1662 | }; |
@@ -1672,9 +1671,9 @@ static void ipath_set_ib_lstate(struct ipath_devdata *dd, int which) | |||
1672 | (dd, dd->ipath_kregs->kr_ibcstatus) >> | 1671 | (dd, dd->ipath_kregs->kr_ibcstatus) >> |
1673 | INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) & | 1672 | INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) & |
1674 | INFINIPATH_IBCS_LINKTRAININGSTATE_MASK]); | 1673 | INFINIPATH_IBCS_LINKTRAININGSTATE_MASK]); |
1675 | /* flush all queued sends when going to DOWN or INIT, to be sure that | 1674 | /* flush all queued sends when going to DOWN to be sure that |
1676 | * they don't block MAD packets */ | 1675 | * they don't block MAD packets */ |
1677 | if (!linkcmd || linkcmd == INFINIPATH_IBCC_LINKCMD_INIT) | 1676 | if (linkcmd == INFINIPATH_IBCC_LINKCMD_DOWN) |
1678 | ipath_cancel_sends(dd, 1); | 1677 | ipath_cancel_sends(dd, 1); |
1679 | 1678 | ||
1680 | ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, | 1679 | ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, |
@@ -1687,6 +1686,13 @@ int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate) | |||
1687 | int ret; | 1686 | int ret; |
1688 | 1687 | ||
1689 | switch (newstate) { | 1688 | switch (newstate) { |
1689 | case IPATH_IB_LINKDOWN_ONLY: | ||
1690 | ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN << | ||
1691 | INFINIPATH_IBCC_LINKCMD_SHIFT); | ||
1692 | /* don't wait */ | ||
1693 | ret = 0; | ||
1694 | goto bail; | ||
1695 | |||
1690 | case IPATH_IB_LINKDOWN: | 1696 | case IPATH_IB_LINKDOWN: |
1691 | ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL << | 1697 | ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL << |
1692 | INFINIPATH_IBCC_LINKINITCMD_SHIFT); | 1698 | INFINIPATH_IBCC_LINKINITCMD_SHIFT); |
@@ -1709,16 +1715,6 @@ int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate) | |||
1709 | ret = 0; | 1715 | ret = 0; |
1710 | goto bail; | 1716 | goto bail; |
1711 | 1717 | ||
1712 | case IPATH_IB_LINKINIT: | ||
1713 | if (dd->ipath_flags & IPATH_LINKINIT) { | ||
1714 | ret = 0; | ||
1715 | goto bail; | ||
1716 | } | ||
1717 | ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT << | ||
1718 | INFINIPATH_IBCC_LINKCMD_SHIFT); | ||
1719 | lstate = IPATH_LINKINIT; | ||
1720 | break; | ||
1721 | |||
1722 | case IPATH_IB_LINKARM: | 1718 | case IPATH_IB_LINKARM: |
1723 | if (dd->ipath_flags & IPATH_LINKARMED) { | 1719 | if (dd->ipath_flags & IPATH_LINKARMED) { |
1724 | ret = 0; | 1720 | ret = 0; |
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h index 4cc0f95ea877..ecf3f7ff7717 100644 --- a/drivers/infiniband/hw/ipath/ipath_kernel.h +++ b/drivers/infiniband/hw/ipath/ipath_kernel.h | |||
@@ -767,6 +767,7 @@ void ipath_kreceive(struct ipath_portdata *); | |||
767 | int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned); | 767 | int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned); |
768 | int ipath_reset_device(int); | 768 | int ipath_reset_device(int); |
769 | void ipath_get_faststats(unsigned long); | 769 | void ipath_get_faststats(unsigned long); |
770 | int ipath_wait_linkstate(struct ipath_devdata *, u32, int); | ||
770 | int ipath_set_linkstate(struct ipath_devdata *, u8); | 771 | int ipath_set_linkstate(struct ipath_devdata *, u8); |
771 | int ipath_set_mtu(struct ipath_devdata *, u16); | 772 | int ipath_set_mtu(struct ipath_devdata *, u16); |
772 | int ipath_set_lid(struct ipath_devdata *, u32, u8); | 773 | int ipath_set_lid(struct ipath_devdata *, u32, u8); |
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c index d98d5f103700..b34b91d3723a 100644 --- a/drivers/infiniband/hw/ipath/ipath_mad.c +++ b/drivers/infiniband/hw/ipath/ipath_mad.c | |||
@@ -555,10 +555,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, | |||
555 | /* FALLTHROUGH */ | 555 | /* FALLTHROUGH */ |
556 | case IB_PORT_DOWN: | 556 | case IB_PORT_DOWN: |
557 | if (lstate == 0) | 557 | if (lstate == 0) |
558 | if (get_linkdowndefaultstate(dd)) | 558 | lstate = IPATH_IB_LINKDOWN_ONLY; |
559 | lstate = IPATH_IB_LINKDOWN_SLEEP; | ||
560 | else | ||
561 | lstate = IPATH_IB_LINKDOWN; | ||
562 | else if (lstate == 1) | 559 | else if (lstate == 1) |
563 | lstate = IPATH_IB_LINKDOWN_SLEEP; | 560 | lstate = IPATH_IB_LINKDOWN_SLEEP; |
564 | else if (lstate == 2) | 561 | else if (lstate == 2) |
@@ -568,6 +565,8 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, | |||
568 | else | 565 | else |
569 | goto err; | 566 | goto err; |
570 | ipath_set_linkstate(dd, lstate); | 567 | ipath_set_linkstate(dd, lstate); |
568 | ipath_wait_linkstate(dd, IPATH_LINKINIT | IPATH_LINKARMED | | ||
569 | IPATH_LINKACTIVE, 1000); | ||
571 | break; | 570 | break; |
572 | case IB_PORT_ARMED: | 571 | case IB_PORT_ARMED: |
573 | ipath_set_linkstate(dd, IPATH_IB_LINKARM); | 572 | ipath_set_linkstate(dd, IPATH_IB_LINKARM); |
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c index 80dc623cee40..087ed3166479 100644 --- a/drivers/infiniband/hw/ipath/ipath_qp.c +++ b/drivers/infiniband/hw/ipath/ipath_qp.c | |||
@@ -329,8 +329,9 @@ struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn) | |||
329 | /** | 329 | /** |
330 | * ipath_reset_qp - initialize the QP state to the reset state | 330 | * ipath_reset_qp - initialize the QP state to the reset state |
331 | * @qp: the QP to reset | 331 | * @qp: the QP to reset |
332 | * @type: the QP type | ||
332 | */ | 333 | */ |
333 | static void ipath_reset_qp(struct ipath_qp *qp) | 334 | static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type) |
334 | { | 335 | { |
335 | qp->remote_qpn = 0; | 336 | qp->remote_qpn = 0; |
336 | qp->qkey = 0; | 337 | qp->qkey = 0; |
@@ -342,7 +343,7 @@ static void ipath_reset_qp(struct ipath_qp *qp) | |||
342 | qp->s_psn = 0; | 343 | qp->s_psn = 0; |
343 | qp->r_psn = 0; | 344 | qp->r_psn = 0; |
344 | qp->r_msn = 0; | 345 | qp->r_msn = 0; |
345 | if (qp->ibqp.qp_type == IB_QPT_RC) { | 346 | if (type == IB_QPT_RC) { |
346 | qp->s_state = IB_OPCODE_RC_SEND_LAST; | 347 | qp->s_state = IB_OPCODE_RC_SEND_LAST; |
347 | qp->r_state = IB_OPCODE_RC_SEND_LAST; | 348 | qp->r_state = IB_OPCODE_RC_SEND_LAST; |
348 | } else { | 349 | } else { |
@@ -414,7 +415,7 @@ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err) | |||
414 | wc.wr_id = qp->r_wr_id; | 415 | wc.wr_id = qp->r_wr_id; |
415 | wc.opcode = IB_WC_RECV; | 416 | wc.opcode = IB_WC_RECV; |
416 | wc.status = err; | 417 | wc.status = err; |
417 | ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); | 418 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); |
418 | } | 419 | } |
419 | wc.status = IB_WC_WR_FLUSH_ERR; | 420 | wc.status = IB_WC_WR_FLUSH_ERR; |
420 | 421 | ||
@@ -534,7 +535,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
534 | 535 | ||
535 | switch (new_state) { | 536 | switch (new_state) { |
536 | case IB_QPS_RESET: | 537 | case IB_QPS_RESET: |
537 | ipath_reset_qp(qp); | 538 | ipath_reset_qp(qp, ibqp->qp_type); |
538 | break; | 539 | break; |
539 | 540 | ||
540 | case IB_QPS_ERR: | 541 | case IB_QPS_ERR: |
@@ -647,7 +648,7 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
647 | attr->port_num = 1; | 648 | attr->port_num = 1; |
648 | attr->timeout = qp->timeout; | 649 | attr->timeout = qp->timeout; |
649 | attr->retry_cnt = qp->s_retry_cnt; | 650 | attr->retry_cnt = qp->s_retry_cnt; |
650 | attr->rnr_retry = qp->s_rnr_retry; | 651 | attr->rnr_retry = qp->s_rnr_retry_cnt; |
651 | attr->alt_port_num = 0; | 652 | attr->alt_port_num = 0; |
652 | attr->alt_timeout = 0; | 653 | attr->alt_timeout = 0; |
653 | 654 | ||
@@ -839,7 +840,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, | |||
839 | goto bail_qp; | 840 | goto bail_qp; |
840 | } | 841 | } |
841 | qp->ip = NULL; | 842 | qp->ip = NULL; |
842 | ipath_reset_qp(qp); | 843 | ipath_reset_qp(qp, init_attr->qp_type); |
843 | break; | 844 | break; |
844 | 845 | ||
845 | default: | 846 | default: |
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c index 459e46e2c016..40f3e37d7adc 100644 --- a/drivers/infiniband/hw/ipath/ipath_rc.c +++ b/drivers/infiniband/hw/ipath/ipath_rc.c | |||
@@ -1196,6 +1196,10 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev, | |||
1196 | list_move_tail(&qp->timerwait, | 1196 | list_move_tail(&qp->timerwait, |
1197 | &dev->pending[dev->pending_index]); | 1197 | &dev->pending[dev->pending_index]); |
1198 | spin_unlock(&dev->pending_lock); | 1198 | spin_unlock(&dev->pending_lock); |
1199 | |||
1200 | if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE)) | ||
1201 | qp->s_retry = qp->s_retry_cnt; | ||
1202 | |||
1199 | /* | 1203 | /* |
1200 | * Update the RDMA receive state but do the copy w/o | 1204 | * Update the RDMA receive state but do the copy w/o |
1201 | * holding the locks and blocking interrupts. | 1205 | * holding the locks and blocking interrupts. |
diff --git a/drivers/infiniband/hw/ipath/ipath_registers.h b/drivers/infiniband/hw/ipath/ipath_registers.h index 6d2a17f9c1da..92ad73a7fff0 100644 --- a/drivers/infiniband/hw/ipath/ipath_registers.h +++ b/drivers/infiniband/hw/ipath/ipath_registers.h | |||
@@ -185,7 +185,7 @@ | |||
185 | #define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3 | 185 | #define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3 |
186 | #define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16 | 186 | #define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16 |
187 | #define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL | 187 | #define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL |
188 | #define INFINIPATH_IBCC_LINKCMD_INIT 1 /* move to 0x11 */ | 188 | #define INFINIPATH_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */ |
189 | #define INFINIPATH_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */ | 189 | #define INFINIPATH_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */ |
190 | #define INFINIPATH_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */ | 190 | #define INFINIPATH_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */ |
191 | #define INFINIPATH_IBCC_LINKCMD_SHIFT 18 | 191 | #define INFINIPATH_IBCC_LINKCMD_SHIFT 18 |
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 7f8853b44ee1..b2112f5a422f 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c | |||
@@ -567,12 +567,12 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i | |||
567 | 567 | ||
568 | /* Init the adapter */ | 568 | /* Init the adapter */ |
569 | nesdev->nesadapter = nes_init_adapter(nesdev, hw_rev); | 569 | nesdev->nesadapter = nes_init_adapter(nesdev, hw_rev); |
570 | nesdev->nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval; | ||
571 | if (!nesdev->nesadapter) { | 570 | if (!nesdev->nesadapter) { |
572 | printk(KERN_ERR PFX "Unable to initialize adapter.\n"); | 571 | printk(KERN_ERR PFX "Unable to initialize adapter.\n"); |
573 | ret = -ENOMEM; | 572 | ret = -ENOMEM; |
574 | goto bail5; | 573 | goto bail5; |
575 | } | 574 | } |
575 | nesdev->nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval; | ||
576 | 576 | ||
577 | /* nesdev->base_doorbell_index = | 577 | /* nesdev->base_doorbell_index = |
578 | nesdev->nesadapter->pd_config_base[PCI_FUNC(nesdev->pcidev->devfn)]; */ | 578 | nesdev->nesadapter->pd_config_base[PCI_FUNC(nesdev->pcidev->devfn)]; */ |
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h index fd57e8a1582f..a48b288618ec 100644 --- a/drivers/infiniband/hw/nes/nes.h +++ b/drivers/infiniband/hw/nes/nes.h | |||
@@ -285,6 +285,21 @@ struct nes_device { | |||
285 | }; | 285 | }; |
286 | 286 | ||
287 | 287 | ||
288 | static inline __le32 get_crc_value(struct nes_v4_quad *nes_quad) | ||
289 | { | ||
290 | u32 crc_value; | ||
291 | crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad)); | ||
292 | |||
293 | /* | ||
294 | * With commit ef19454b ("[LIB] crc32c: Keep intermediate crc | ||
295 | * state in cpu order"), behavior of crc32c changes on | ||
296 | * big-endian platforms. Our algorithm expects the previous | ||
297 | * behavior; otherwise we have RDMA connection establishment | ||
298 | * issue on big-endian. | ||
299 | */ | ||
300 | return cpu_to_le32(crc_value); | ||
301 | } | ||
302 | |||
288 | static inline void | 303 | static inline void |
289 | set_wqe_64bit_value(__le32 *wqe_words, u32 index, u64 value) | 304 | set_wqe_64bit_value(__le32 *wqe_words, u32 index, u64 value) |
290 | { | 305 | { |
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index bd5cfeaac203..39adb267fb15 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
@@ -370,11 +370,11 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
370 | int ret = 0; | 370 | int ret = 0; |
371 | u32 was_timer_set; | 371 | u32 was_timer_set; |
372 | 372 | ||
373 | if (!cm_node) | ||
374 | return -EINVAL; | ||
373 | new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC); | 375 | new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC); |
374 | if (!new_send) | 376 | if (!new_send) |
375 | return -1; | 377 | return -1; |
376 | if (!cm_node) | ||
377 | return -EINVAL; | ||
378 | 378 | ||
379 | /* new_send->timetosend = currenttime */ | 379 | /* new_send->timetosend = currenttime */ |
380 | new_send->retrycount = NES_DEFAULT_RETRYS; | 380 | new_send->retrycount = NES_DEFAULT_RETRYS; |
@@ -947,6 +947,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, | |||
947 | nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener); | 947 | nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener); |
948 | 948 | ||
949 | kfree(listener); | 949 | kfree(listener); |
950 | listener = NULL; | ||
950 | ret = 0; | 951 | ret = 0; |
951 | cm_listens_destroyed++; | 952 | cm_listens_destroyed++; |
952 | } else { | 953 | } else { |
@@ -2319,6 +2320,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2319 | struct iw_cm_event cm_event; | 2320 | struct iw_cm_event cm_event; |
2320 | struct nes_hw_qp_wqe *wqe; | 2321 | struct nes_hw_qp_wqe *wqe; |
2321 | struct nes_v4_quad nes_quad; | 2322 | struct nes_v4_quad nes_quad; |
2323 | u32 crc_value; | ||
2322 | int ret; | 2324 | int ret; |
2323 | 2325 | ||
2324 | ibqp = nes_get_qp(cm_id->device, conn_param->qpn); | 2326 | ibqp = nes_get_qp(cm_id->device, conn_param->qpn); |
@@ -2435,8 +2437,8 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2435 | nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port; | 2437 | nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port; |
2436 | 2438 | ||
2437 | /* Produce hash key */ | 2439 | /* Produce hash key */ |
2438 | nesqp->hte_index = cpu_to_be32( | 2440 | crc_value = get_crc_value(&nes_quad); |
2439 | crc32c(~0, (void *)&nes_quad, sizeof(nes_quad)) ^ 0xffffffff); | 2441 | nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff); |
2440 | nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, CRC = 0x%08X\n", | 2442 | nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, CRC = 0x%08X\n", |
2441 | nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask); | 2443 | nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask); |
2442 | 2444 | ||
@@ -2750,6 +2752,7 @@ void cm_event_connected(struct nes_cm_event *event) | |||
2750 | struct iw_cm_event cm_event; | 2752 | struct iw_cm_event cm_event; |
2751 | struct nes_hw_qp_wqe *wqe; | 2753 | struct nes_hw_qp_wqe *wqe; |
2752 | struct nes_v4_quad nes_quad; | 2754 | struct nes_v4_quad nes_quad; |
2755 | u32 crc_value; | ||
2753 | int ret; | 2756 | int ret; |
2754 | 2757 | ||
2755 | /* get all our handles */ | 2758 | /* get all our handles */ |
@@ -2827,8 +2830,8 @@ void cm_event_connected(struct nes_cm_event *event) | |||
2827 | nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port; | 2830 | nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port; |
2828 | 2831 | ||
2829 | /* Produce hash key */ | 2832 | /* Produce hash key */ |
2830 | nesqp->hte_index = cpu_to_be32( | 2833 | crc_value = get_crc_value(&nes_quad); |
2831 | crc32c(~0, (void *)&nes_quad, sizeof(nes_quad)) ^ 0xffffffff); | 2834 | nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff); |
2832 | nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, After CRC = 0x%08X\n", | 2835 | nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, After CRC = 0x%08X\n", |
2833 | nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask); | 2836 | nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask); |
2834 | 2837 | ||
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 7c4c0fbf0abd..49e53e4c1ebe 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c | |||
@@ -156,15 +156,14 @@ static void nes_nic_tune_timer(struct nes_device *nesdev) | |||
156 | 156 | ||
157 | spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags); | 157 | spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags); |
158 | 158 | ||
159 | if (shared_timer->cq_count_old < cq_count) { | 159 | if (shared_timer->cq_count_old <= cq_count) |
160 | if (cq_count > shared_timer->threshold_low) | 160 | shared_timer->cq_direction_downward = 0; |
161 | shared_timer->cq_direction_downward=0; | 161 | else |
162 | } | ||
163 | if (shared_timer->cq_count_old >= cq_count) | ||
164 | shared_timer->cq_direction_downward++; | 162 | shared_timer->cq_direction_downward++; |
165 | shared_timer->cq_count_old = cq_count; | 163 | shared_timer->cq_count_old = cq_count; |
166 | if (shared_timer->cq_direction_downward > NES_NIC_CQ_DOWNWARD_TREND) { | 164 | if (shared_timer->cq_direction_downward > NES_NIC_CQ_DOWNWARD_TREND) { |
167 | if (cq_count <= shared_timer->threshold_low) { | 165 | if (cq_count <= shared_timer->threshold_low && |
166 | shared_timer->threshold_low > 4) { | ||
168 | shared_timer->threshold_low = shared_timer->threshold_low/2; | 167 | shared_timer->threshold_low = shared_timer->threshold_low/2; |
169 | shared_timer->cq_direction_downward=0; | 168 | shared_timer->cq_direction_downward=0; |
170 | nesdev->currcq_count = 0; | 169 | nesdev->currcq_count = 0; |
@@ -1728,7 +1727,6 @@ int nes_napi_isr(struct nes_device *nesdev) | |||
1728 | nesdev->int_req &= ~NES_INT_TIMER; | 1727 | nesdev->int_req &= ~NES_INT_TIMER; |
1729 | nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); | 1728 | nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); |
1730 | nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); | 1729 | nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); |
1731 | nesadapter->tune_timer.timer_in_use_old = 0; | ||
1732 | } | 1730 | } |
1733 | nesdev->deepcq_count = 0; | 1731 | nesdev->deepcq_count = 0; |
1734 | return 1; | 1732 | return 1; |
@@ -1867,7 +1865,6 @@ void nes_dpc(unsigned long param) | |||
1867 | nesdev->int_req &= ~NES_INT_TIMER; | 1865 | nesdev->int_req &= ~NES_INT_TIMER; |
1868 | nes_write32(nesdev->regs + NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); | 1866 | nes_write32(nesdev->regs + NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); |
1869 | nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); | 1867 | nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); |
1870 | nesdev->nesadapter->tune_timer.timer_in_use_old = 0; | ||
1871 | } else { | 1868 | } else { |
1872 | nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff|(~nesdev->int_req)); | 1869 | nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff|(~nesdev->int_req)); |
1873 | } | 1870 | } |
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h index 1e10df550c9e..b7e2844f096b 100644 --- a/drivers/infiniband/hw/nes/nes_hw.h +++ b/drivers/infiniband/hw/nes/nes_hw.h | |||
@@ -962,7 +962,7 @@ struct nes_arp_entry { | |||
962 | #define DEFAULT_JUMBO_NES_QL_LOW 12 | 962 | #define DEFAULT_JUMBO_NES_QL_LOW 12 |
963 | #define DEFAULT_JUMBO_NES_QL_TARGET 40 | 963 | #define DEFAULT_JUMBO_NES_QL_TARGET 40 |
964 | #define DEFAULT_JUMBO_NES_QL_HIGH 128 | 964 | #define DEFAULT_JUMBO_NES_QL_HIGH 128 |
965 | #define NES_NIC_CQ_DOWNWARD_TREND 8 | 965 | #define NES_NIC_CQ_DOWNWARD_TREND 16 |
966 | 966 | ||
967 | struct nes_hw_tune_timer { | 967 | struct nes_hw_tune_timer { |
968 | //u16 cq_count; | 968 | //u16 cq_count; |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 4dafbe16e82a..a651e9d9f0ef 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
@@ -929,7 +929,7 @@ static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev, | |||
929 | NES_MAX_USER_DB_REGIONS, nesucontext->first_free_db); | 929 | NES_MAX_USER_DB_REGIONS, nesucontext->first_free_db); |
930 | nes_debug(NES_DBG_PD, "find_first_zero_biton doorbells returned %u, mapping pd_id %u.\n", | 930 | nes_debug(NES_DBG_PD, "find_first_zero_biton doorbells returned %u, mapping pd_id %u.\n", |
931 | nespd->mmap_db_index, nespd->pd_id); | 931 | nespd->mmap_db_index, nespd->pd_id); |
932 | if (nespd->mmap_db_index > NES_MAX_USER_DB_REGIONS) { | 932 | if (nespd->mmap_db_index >= NES_MAX_USER_DB_REGIONS) { |
933 | nes_debug(NES_DBG_PD, "mmap_db_index > MAX\n"); | 933 | nes_debug(NES_DBG_PD, "mmap_db_index > MAX\n"); |
934 | nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num); | 934 | nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num); |
935 | kfree(nespd); | 935 | kfree(nespd); |
@@ -1327,7 +1327,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd, | |||
1327 | (long long unsigned int)req.user_wqe_buffers); | 1327 | (long long unsigned int)req.user_wqe_buffers); |
1328 | nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num); | 1328 | nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num); |
1329 | kfree(nesqp->allocated_buffer); | 1329 | kfree(nesqp->allocated_buffer); |
1330 | return ERR_PTR(-ENOMEM); | 1330 | return ERR_PTR(-EFAULT); |
1331 | } | 1331 | } |
1332 | } | 1332 | } |
1333 | 1333 | ||
@@ -1674,6 +1674,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, | |||
1674 | } | 1674 | } |
1675 | nes_debug(NES_DBG_CQ, "CQ Virtual Address = %08lX, size = %u.\n", | 1675 | nes_debug(NES_DBG_CQ, "CQ Virtual Address = %08lX, size = %u.\n", |
1676 | (unsigned long)req.user_cq_buffer, entries); | 1676 | (unsigned long)req.user_cq_buffer, entries); |
1677 | err = 1; | ||
1677 | list_for_each_entry(nespbl, &nes_ucontext->cq_reg_mem_list, list) { | 1678 | list_for_each_entry(nespbl, &nes_ucontext->cq_reg_mem_list, list) { |
1678 | if (nespbl->user_base == (unsigned long )req.user_cq_buffer) { | 1679 | if (nespbl->user_base == (unsigned long )req.user_cq_buffer) { |
1679 | list_del(&nespbl->list); | 1680 | list_del(&nespbl->list); |
@@ -1686,7 +1687,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, | |||
1686 | if (err) { | 1687 | if (err) { |
1687 | nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); | 1688 | nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); |
1688 | kfree(nescq); | 1689 | kfree(nescq); |
1689 | return ERR_PTR(err); | 1690 | return ERR_PTR(-EFAULT); |
1690 | } | 1691 | } |
1691 | 1692 | ||
1692 | pbl_entries = nespbl->pbl_size >> 3; | 1693 | pbl_entries = nespbl->pbl_size >> 3; |
@@ -1831,9 +1832,6 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, | |||
1831 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | 1832 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); |
1832 | } | 1833 | } |
1833 | } | 1834 | } |
1834 | nes_debug(NES_DBG_CQ, "iWARP CQ%u create timeout expired, major code = 0x%04X," | ||
1835 | " minor code = 0x%04X\n", | ||
1836 | nescq->hw_cq.cq_number, cqp_request->major_code, cqp_request->minor_code); | ||
1837 | if (!context) | 1835 | if (!context) |
1838 | pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, | 1836 | pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, |
1839 | nescq->hw_cq.cq_pbase); | 1837 | nescq->hw_cq.cq_pbase); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 52b1bebfa744..2490b2d79dbb 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <net/icmp.h> | 38 | #include <net/icmp.h> |
39 | #include <linux/icmpv6.h> | 39 | #include <linux/icmpv6.h> |
40 | #include <linux/delay.h> | 40 | #include <linux/delay.h> |
41 | #include <linux/vmalloc.h> | ||
41 | 42 | ||
42 | #include "ipoib.h" | 43 | #include "ipoib.h" |
43 | 44 | ||
@@ -637,6 +638,7 @@ static inline int post_send(struct ipoib_dev_priv *priv, | |||
637 | priv->tx_sge[0].addr = addr; | 638 | priv->tx_sge[0].addr = addr; |
638 | priv->tx_sge[0].length = len; | 639 | priv->tx_sge[0].length = len; |
639 | 640 | ||
641 | priv->tx_wr.num_sge = 1; | ||
640 | priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM; | 642 | priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM; |
641 | 643 | ||
642 | return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr); | 644 | return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr); |
@@ -1030,13 +1032,13 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn, | |||
1030 | struct ipoib_dev_priv *priv = netdev_priv(p->dev); | 1032 | struct ipoib_dev_priv *priv = netdev_priv(p->dev); |
1031 | int ret; | 1033 | int ret; |
1032 | 1034 | ||
1033 | p->tx_ring = kzalloc(ipoib_sendq_size * sizeof *p->tx_ring, | 1035 | p->tx_ring = vmalloc(ipoib_sendq_size * sizeof *p->tx_ring); |
1034 | GFP_KERNEL); | ||
1035 | if (!p->tx_ring) { | 1036 | if (!p->tx_ring) { |
1036 | ipoib_warn(priv, "failed to allocate tx ring\n"); | 1037 | ipoib_warn(priv, "failed to allocate tx ring\n"); |
1037 | ret = -ENOMEM; | 1038 | ret = -ENOMEM; |
1038 | goto err_tx; | 1039 | goto err_tx; |
1039 | } | 1040 | } |
1041 | memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring); | ||
1040 | 1042 | ||
1041 | p->qp = ipoib_cm_create_tx_qp(p->dev, p); | 1043 | p->qp = ipoib_cm_create_tx_qp(p->dev, p); |
1042 | if (IS_ERR(p->qp)) { | 1044 | if (IS_ERR(p->qp)) { |
@@ -1077,6 +1079,7 @@ err_id: | |||
1077 | ib_destroy_qp(p->qp); | 1079 | ib_destroy_qp(p->qp); |
1078 | err_qp: | 1080 | err_qp: |
1079 | p->qp = NULL; | 1081 | p->qp = NULL; |
1082 | vfree(p->tx_ring); | ||
1080 | err_tx: | 1083 | err_tx: |
1081 | return ret; | 1084 | return ret; |
1082 | } | 1085 | } |
@@ -1127,7 +1130,7 @@ timeout: | |||
1127 | if (p->qp) | 1130 | if (p->qp) |
1128 | ib_destroy_qp(p->qp); | 1131 | ib_destroy_qp(p->qp); |
1129 | 1132 | ||
1130 | kfree(p->tx_ring); | 1133 | vfree(p->tx_ring); |
1131 | kfree(p); | 1134 | kfree(p); |
1132 | } | 1135 | } |
1133 | 1136 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index f96477a8ca5a..57282048865c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/init.h> | 41 | #include <linux/init.h> |
42 | #include <linux/slab.h> | 42 | #include <linux/slab.h> |
43 | #include <linux/kernel.h> | 43 | #include <linux/kernel.h> |
44 | #include <linux/vmalloc.h> | ||
44 | 45 | ||
45 | #include <linux/if_arp.h> /* For ARPHRD_xxx */ | 46 | #include <linux/if_arp.h> /* For ARPHRD_xxx */ |
46 | 47 | ||
@@ -887,13 +888,13 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port) | |||
887 | goto out; | 888 | goto out; |
888 | } | 889 | } |
889 | 890 | ||
890 | priv->tx_ring = kzalloc(ipoib_sendq_size * sizeof *priv->tx_ring, | 891 | priv->tx_ring = vmalloc(ipoib_sendq_size * sizeof *priv->tx_ring); |
891 | GFP_KERNEL); | ||
892 | if (!priv->tx_ring) { | 892 | if (!priv->tx_ring) { |
893 | printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n", | 893 | printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n", |
894 | ca->name, ipoib_sendq_size); | 894 | ca->name, ipoib_sendq_size); |
895 | goto out_rx_ring_cleanup; | 895 | goto out_rx_ring_cleanup; |
896 | } | 896 | } |
897 | memset(priv->tx_ring, 0, ipoib_sendq_size * sizeof *priv->tx_ring); | ||
897 | 898 | ||
898 | /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ | 899 | /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ |
899 | 900 | ||
@@ -903,7 +904,7 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port) | |||
903 | return 0; | 904 | return 0; |
904 | 905 | ||
905 | out_tx_ring_cleanup: | 906 | out_tx_ring_cleanup: |
906 | kfree(priv->tx_ring); | 907 | vfree(priv->tx_ring); |
907 | 908 | ||
908 | out_rx_ring_cleanup: | 909 | out_rx_ring_cleanup: |
909 | kfree(priv->rx_ring); | 910 | kfree(priv->rx_ring); |
@@ -928,7 +929,7 @@ void ipoib_dev_cleanup(struct net_device *dev) | |||
928 | ipoib_ib_dev_cleanup(dev); | 929 | ipoib_ib_dev_cleanup(dev); |
929 | 930 | ||
930 | kfree(priv->rx_ring); | 931 | kfree(priv->rx_ring); |
931 | kfree(priv->tx_ring); | 932 | vfree(priv->tx_ring); |
932 | 933 | ||
933 | priv->rx_ring = NULL; | 934 | priv->rx_ring = NULL; |
934 | priv->tx_ring = NULL; | 935 | priv->tx_ring = NULL; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 2628339e3a99..31a53c5bcb13 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -650,7 +650,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) | |||
650 | */ | 650 | */ |
651 | spin_lock(&priv->lock); | 651 | spin_lock(&priv->lock); |
652 | 652 | ||
653 | if (!test_bit(IPOIB_MCAST_STARTED, &priv->flags) || | 653 | if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) || |
654 | !priv->broadcast || | 654 | !priv->broadcast || |
655 | !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { | 655 | !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { |
656 | ++dev->stats.tx_dropped; | 656 | ++dev->stats.tx_dropped; |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 714b8db02b29..993f0a8ff28f 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -237,36 +237,32 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn) | |||
237 | static | 237 | static |
238 | struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id) | 238 | struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id) |
239 | { | 239 | { |
240 | struct list_head *p_list; | 240 | struct iser_device *device; |
241 | struct iser_device *device = NULL; | ||
242 | 241 | ||
243 | mutex_lock(&ig.device_list_mutex); | 242 | mutex_lock(&ig.device_list_mutex); |
244 | 243 | ||
245 | p_list = ig.device_list.next; | 244 | list_for_each_entry(device, &ig.device_list, ig_list) |
246 | while (p_list != &ig.device_list) { | ||
247 | device = list_entry(p_list, struct iser_device, ig_list); | ||
248 | /* find if there's a match using the node GUID */ | 245 | /* find if there's a match using the node GUID */ |
249 | if (device->ib_device->node_guid == cma_id->device->node_guid) | 246 | if (device->ib_device->node_guid == cma_id->device->node_guid) |
250 | break; | 247 | goto inc_refcnt; |
251 | } | ||
252 | 248 | ||
253 | if (device == NULL) { | 249 | device = kzalloc(sizeof *device, GFP_KERNEL); |
254 | device = kzalloc(sizeof *device, GFP_KERNEL); | 250 | if (device == NULL) |
255 | if (device == NULL) | 251 | goto out; |
256 | goto out; | 252 | |
257 | /* assign this device to the device */ | 253 | /* assign this device to the device */ |
258 | device->ib_device = cma_id->device; | 254 | device->ib_device = cma_id->device; |
259 | /* init the device and link it into ig device list */ | 255 | /* init the device and link it into ig device list */ |
260 | if (iser_create_device_ib_res(device)) { | 256 | if (iser_create_device_ib_res(device)) { |
261 | kfree(device); | 257 | kfree(device); |
262 | device = NULL; | 258 | device = NULL; |
263 | goto out; | 259 | goto out; |
264 | } | ||
265 | list_add(&device->ig_list, &ig.device_list); | ||
266 | } | 260 | } |
267 | out: | 261 | list_add(&device->ig_list, &ig.device_list); |
268 | BUG_ON(device == NULL); | 262 | |
263 | inc_refcnt: | ||
269 | device->refcount++; | 264 | device->refcount++; |
265 | out: | ||
270 | mutex_unlock(&ig.device_list_mutex); | 266 | mutex_unlock(&ig.device_list_mutex); |
271 | return device; | 267 | return device; |
272 | } | 268 | } |
@@ -372,6 +368,12 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id) | |||
372 | int ret; | 368 | int ret; |
373 | 369 | ||
374 | device = iser_device_find_by_ib_device(cma_id); | 370 | device = iser_device_find_by_ib_device(cma_id); |
371 | if (!device) { | ||
372 | iser_err("device lookup/creation failed\n"); | ||
373 | iser_connect_error(cma_id); | ||
374 | return; | ||
375 | } | ||
376 | |||
375 | ib_conn = (struct iser_conn *)cma_id->context; | 377 | ib_conn = (struct iser_conn *)cma_id->context; |
376 | ib_conn->device = device; | 378 | ib_conn->device = device; |
377 | 379 | ||
@@ -380,7 +382,6 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id) | |||
380 | iser_err("resolve route failed: %d\n", ret); | 382 | iser_err("resolve route failed: %d\n", ret); |
381 | iser_connect_error(cma_id); | 383 | iser_connect_error(cma_id); |
382 | } | 384 | } |
383 | return; | ||
384 | } | 385 | } |
385 | 386 | ||
386 | static void iser_route_handler(struct rdma_cm_id *cma_id) | 387 | static void iser_route_handler(struct rdma_cm_id *cma_id) |
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 8b10d9f23bef..c5263d63aca3 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig | |||
@@ -42,14 +42,14 @@ config INPUT_M68K_BEEP | |||
42 | 42 | ||
43 | config INPUT_APANEL | 43 | config INPUT_APANEL |
44 | tristate "Fujitsu Lifebook Application Panel buttons" | 44 | tristate "Fujitsu Lifebook Application Panel buttons" |
45 | depends on X86 | 45 | depends on X86 && I2C && LEDS_CLASS |
46 | select I2C_I801 | ||
47 | select INPUT_POLLDEV | 46 | select INPUT_POLLDEV |
48 | select CHECK_SIGNATURE | 47 | select CHECK_SIGNATURE |
49 | help | 48 | help |
50 | Say Y here for support of the Application Panel buttons, used on | 49 | Say Y here for support of the Application Panel buttons, used on |
51 | Fujitsu Lifebook. These are attached to the mainboard through | 50 | Fujitsu Lifebook. These are attached to the mainboard through |
52 | an SMBus interface managed by the I2C Intel ICH (i801) driver. | 51 | an SMBus interface managed by the I2C Intel ICH (i801) driver, |
52 | which you should also build for this kernel. | ||
53 | 53 | ||
54 | To compile this driver as a module, choose M here: the module will | 54 | To compile this driver as a module, choose M here: the module will |
55 | be called apanel. | 55 | be called apanel. |
diff --git a/drivers/input/serio/i8042.h b/drivers/input/serio/i8042.h index dd22d91f8b39..c972e5d03a3f 100644 --- a/drivers/input/serio/i8042.h +++ b/drivers/input/serio/i8042.h | |||
@@ -16,7 +16,7 @@ | |||
16 | 16 | ||
17 | #if defined(CONFIG_MACH_JAZZ) | 17 | #if defined(CONFIG_MACH_JAZZ) |
18 | #include "i8042-jazzio.h" | 18 | #include "i8042-jazzio.h" |
19 | #elif defined(CONFIG_SGI_IP22) | 19 | #elif defined(CONFIG_SGI_HAS_I8042) |
20 | #include "i8042-ip22io.h" | 20 | #include "i8042-ip22io.h" |
21 | #elif defined(CONFIG_PPC) | 21 | #elif defined(CONFIG_PPC) |
22 | #include "i8042-ppcio.h" | 22 | #include "i8042-ppcio.h" |
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c index aacedec4986f..827c32c16795 100644 --- a/drivers/isdn/gigaset/common.c +++ b/drivers/isdn/gigaset/common.c | |||
@@ -637,7 +637,6 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels, | |||
637 | err("maximum number of devices exceeded"); | 637 | err("maximum number of devices exceeded"); |
638 | return NULL; | 638 | return NULL; |
639 | } | 639 | } |
640 | mutex_init(&cs->mutex); | ||
641 | 640 | ||
642 | gig_dbg(DEBUG_INIT, "allocating bcs[0..%d]", channels - 1); | 641 | gig_dbg(DEBUG_INIT, "allocating bcs[0..%d]", channels - 1); |
643 | cs->bcs = kmalloc(channels * sizeof(struct bc_state), GFP_KERNEL); | 642 | cs->bcs = kmalloc(channels * sizeof(struct bc_state), GFP_KERNEL); |
@@ -898,8 +897,10 @@ int gigaset_shutdown(struct cardstate *cs) | |||
898 | { | 897 | { |
899 | mutex_lock(&cs->mutex); | 898 | mutex_lock(&cs->mutex); |
900 | 899 | ||
901 | if (!(cs->flags & VALID_MINOR)) | 900 | if (!(cs->flags & VALID_MINOR)) { |
901 | mutex_unlock(&cs->mutex); | ||
902 | return -1; | 902 | return -1; |
903 | } | ||
903 | 904 | ||
904 | cs->waiting = 1; | 905 | cs->waiting = 1; |
905 | 906 | ||
@@ -1086,6 +1087,7 @@ struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors, | |||
1086 | drv->cs[i].driver = drv; | 1087 | drv->cs[i].driver = drv; |
1087 | drv->cs[i].ops = drv->ops; | 1088 | drv->cs[i].ops = drv->ops; |
1088 | drv->cs[i].minor_index = i; | 1089 | drv->cs[i].minor_index = i; |
1090 | mutex_init(&drv->cs[i].mutex); | ||
1089 | } | 1091 | } |
1090 | 1092 | ||
1091 | gigaset_if_initdriver(drv, procname, devname); | 1093 | gigaset_if_initdriver(drv, procname, devname); |
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c index 7993e01f9fc5..76043dedba5b 100644 --- a/drivers/isdn/hisax/hisax_fcpcipnp.c +++ b/drivers/isdn/hisax/hisax_fcpcipnp.c | |||
@@ -725,23 +725,6 @@ static int __devinit fcpcipnp_setup(struct fritz_adapter *adapter) | |||
725 | 725 | ||
726 | switch (adapter->type) { | 726 | switch (adapter->type) { |
727 | case AVM_FRITZ_PCIV2: | 727 | case AVM_FRITZ_PCIV2: |
728 | retval = request_irq(adapter->irq, fcpci2_irq, IRQF_SHARED, | ||
729 | "fcpcipnp", adapter); | ||
730 | break; | ||
731 | case AVM_FRITZ_PCI: | ||
732 | retval = request_irq(adapter->irq, fcpci_irq, IRQF_SHARED, | ||
733 | "fcpcipnp", adapter); | ||
734 | break; | ||
735 | case AVM_FRITZ_PNP: | ||
736 | retval = request_irq(adapter->irq, fcpci_irq, 0, | ||
737 | "fcpcipnp", adapter); | ||
738 | break; | ||
739 | } | ||
740 | if (retval) | ||
741 | goto err_region; | ||
742 | |||
743 | switch (adapter->type) { | ||
744 | case AVM_FRITZ_PCIV2: | ||
745 | case AVM_FRITZ_PCI: | 728 | case AVM_FRITZ_PCI: |
746 | val = inl(adapter->io); | 729 | val = inl(adapter->io); |
747 | break; | 730 | break; |
@@ -796,6 +779,23 @@ static int __devinit fcpcipnp_setup(struct fritz_adapter *adapter) | |||
796 | 779 | ||
797 | switch (adapter->type) { | 780 | switch (adapter->type) { |
798 | case AVM_FRITZ_PCIV2: | 781 | case AVM_FRITZ_PCIV2: |
782 | retval = request_irq(adapter->irq, fcpci2_irq, IRQF_SHARED, | ||
783 | "fcpcipnp", adapter); | ||
784 | break; | ||
785 | case AVM_FRITZ_PCI: | ||
786 | retval = request_irq(adapter->irq, fcpci_irq, IRQF_SHARED, | ||
787 | "fcpcipnp", adapter); | ||
788 | break; | ||
789 | case AVM_FRITZ_PNP: | ||
790 | retval = request_irq(adapter->irq, fcpci_irq, 0, | ||
791 | "fcpcipnp", adapter); | ||
792 | break; | ||
793 | } | ||
794 | if (retval) | ||
795 | goto err_region; | ||
796 | |||
797 | switch (adapter->type) { | ||
798 | case AVM_FRITZ_PCIV2: | ||
799 | fcpci2_init(adapter); | 799 | fcpci2_init(adapter); |
800 | isacsx_setup(&adapter->isac); | 800 | isacsx_setup(&adapter->isac); |
801 | break; | 801 | break; |
diff --git a/drivers/isdn/i4l/isdn_ttyfax.c b/drivers/isdn/i4l/isdn_ttyfax.c index f93de4a30355..78f7660c1d0e 100644 --- a/drivers/isdn/i4l/isdn_ttyfax.c +++ b/drivers/isdn/i4l/isdn_ttyfax.c | |||
@@ -906,7 +906,8 @@ isdn_tty_cmd_FCLASS2(char **p, modem_info * info) | |||
906 | sprintf(rs, "\r\n0-2"); | 906 | sprintf(rs, "\r\n0-2"); |
907 | isdn_tty_at_cout(rs, info); | 907 | isdn_tty_at_cout(rs, info); |
908 | } else { | 908 | } else { |
909 | if ((f->phase != ISDN_FAX_PHASE_D) || (!info->faxonline & 1)) | 909 | if ((f->phase != ISDN_FAX_PHASE_D) || |
910 | (!(info->faxonline & 1))) | ||
910 | PARSE_ERROR1; | 911 | PARSE_ERROR1; |
911 | par = isdn_getnum(p); | 912 | par = isdn_getnum(p); |
912 | if ((par < 0) || (par > 2)) | 913 | if ((par < 0) || (par > 2)) |
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c index 655ef9a3f4df..a335c85a736e 100644 --- a/drivers/isdn/isdnloop/isdnloop.c +++ b/drivers/isdn/isdnloop/isdnloop.c | |||
@@ -1289,7 +1289,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card) | |||
1289 | } | 1289 | } |
1290 | break; | 1290 | break; |
1291 | case ISDN_CMD_CLREAZ: | 1291 | case ISDN_CMD_CLREAZ: |
1292 | if (!card->flags & ISDNLOOP_FLAGS_RUNNING) | 1292 | if (!(card->flags & ISDNLOOP_FLAGS_RUNNING)) |
1293 | return -ENODEV; | 1293 | return -ENODEV; |
1294 | if (card->leased) | 1294 | if (card->leased) |
1295 | break; | 1295 | break; |
@@ -1333,7 +1333,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card) | |||
1333 | } | 1333 | } |
1334 | break; | 1334 | break; |
1335 | case ISDN_CMD_SETL3: | 1335 | case ISDN_CMD_SETL3: |
1336 | if (!card->flags & ISDNLOOP_FLAGS_RUNNING) | 1336 | if (!(card->flags & ISDNLOOP_FLAGS_RUNNING)) |
1337 | return -ENODEV; | 1337 | return -ENODEV; |
1338 | return 0; | 1338 | return 0; |
1339 | default: | 1339 | default: |
@@ -1380,7 +1380,7 @@ if_writecmd(const u_char __user *buf, int len, int id, int channel) | |||
1380 | isdnloop_card *card = isdnloop_findcard(id); | 1380 | isdnloop_card *card = isdnloop_findcard(id); |
1381 | 1381 | ||
1382 | if (card) { | 1382 | if (card) { |
1383 | if (!card->flags & ISDNLOOP_FLAGS_RUNNING) | 1383 | if (!(card->flags & ISDNLOOP_FLAGS_RUNNING)) |
1384 | return -ENODEV; | 1384 | return -ENODEV; |
1385 | return (isdnloop_writecmd(buf, len, 1, card)); | 1385 | return (isdnloop_writecmd(buf, len, 1, card)); |
1386 | } | 1386 | } |
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index 7743d73768df..c632c08cbbdc 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c | |||
@@ -69,11 +69,22 @@ static __init int map_switcher(void) | |||
69 | switcher_page[i] = virt_to_page(addr); | 69 | switcher_page[i] = virt_to_page(addr); |
70 | } | 70 | } |
71 | 71 | ||
72 | /* First we check that the Switcher won't overlap the fixmap area at | ||
73 | * the top of memory. It's currently nowhere near, but it could have | ||
74 | * very strange effects if it ever happened. */ | ||
75 | if (SWITCHER_ADDR + (TOTAL_SWITCHER_PAGES+1)*PAGE_SIZE > FIXADDR_START){ | ||
76 | err = -ENOMEM; | ||
77 | printk("lguest: mapping switcher would thwack fixmap\n"); | ||
78 | goto free_pages; | ||
79 | } | ||
80 | |||
72 | /* Now we reserve the "virtual memory area" we want: 0xFFC00000 | 81 | /* Now we reserve the "virtual memory area" we want: 0xFFC00000 |
73 | * (SWITCHER_ADDR). We might not get it in theory, but in practice | 82 | * (SWITCHER_ADDR). We might not get it in theory, but in practice |
74 | * it's worked so far. */ | 83 | * it's worked so far. The end address needs +1 because __get_vm_area |
84 | * allocates an extra guard page, so we need space for that. */ | ||
75 | switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE, | 85 | switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE, |
76 | VM_ALLOC, SWITCHER_ADDR, VMALLOC_END); | 86 | VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR |
87 | + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE); | ||
77 | if (!switcher_vma) { | 88 | if (!switcher_vma) { |
78 | err = -ENOMEM; | 89 | err = -ENOMEM; |
79 | printk("lguest: could not map switcher pages high\n"); | 90 | printk("lguest: could not map switcher pages high\n"); |
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index 85d42d3d01a9..2221485b0773 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c | |||
@@ -241,15 +241,16 @@ static ssize_t write(struct file *file, const char __user *in, | |||
241 | cpu = &lg->cpus[cpu_id]; | 241 | cpu = &lg->cpus[cpu_id]; |
242 | if (!cpu) | 242 | if (!cpu) |
243 | return -EINVAL; | 243 | return -EINVAL; |
244 | } | ||
245 | 244 | ||
246 | /* Once the Guest is dead, all you can do is read() why it died. */ | 245 | /* Once the Guest is dead, you can only read() why it died. */ |
247 | if (lg && lg->dead) | 246 | if (lg->dead) |
248 | return -ENOENT; | 247 | return -ENOENT; |
249 | 248 | ||
250 | /* If you're not the task which owns the Guest, you can only break */ | 249 | /* If you're not the task which owns the Guest, all you can do |
251 | if (lg && current != cpu->tsk && req != LHREQ_BREAK) | 250 | * is break the Launcher out of running the Guest. */ |
252 | return -EPERM; | 251 | if (current != cpu->tsk && req != LHREQ_BREAK) |
252 | return -EPERM; | ||
253 | } | ||
253 | 254 | ||
254 | switch (req) { | 255 | switch (req) { |
255 | case LHREQ_INITIALIZE: | 256 | case LHREQ_INITIALIZE: |
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index 275f23c2deb4..a7f64a9d67e0 100644 --- a/drivers/lguest/page_tables.c +++ b/drivers/lguest/page_tables.c | |||
@@ -391,7 +391,7 @@ static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable) | |||
391 | { | 391 | { |
392 | unsigned int i; | 392 | unsigned int i; |
393 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) | 393 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) |
394 | if (lg->pgdirs[i].gpgdir == pgtable) | 394 | if (lg->pgdirs[i].pgdir && lg->pgdirs[i].gpgdir == pgtable) |
395 | break; | 395 | break; |
396 | return i; | 396 | return i; |
397 | } | 397 | } |
diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c index 741a2e3f4fc6..a348bb0791d3 100644 --- a/drivers/macintosh/via-pmu-backlight.c +++ b/drivers/macintosh/via-pmu-backlight.c | |||
@@ -17,7 +17,7 @@ | |||
17 | 17 | ||
18 | static struct backlight_ops pmu_backlight_data; | 18 | static struct backlight_ops pmu_backlight_data; |
19 | static DEFINE_SPINLOCK(pmu_backlight_lock); | 19 | static DEFINE_SPINLOCK(pmu_backlight_lock); |
20 | static int sleeping; | 20 | static int sleeping, uses_pmu_bl; |
21 | static u8 bl_curve[FB_BACKLIGHT_LEVELS]; | 21 | static u8 bl_curve[FB_BACKLIGHT_LEVELS]; |
22 | 22 | ||
23 | static void pmu_backlight_init_curve(u8 off, u8 min, u8 max) | 23 | static void pmu_backlight_init_curve(u8 off, u8 min, u8 max) |
@@ -128,7 +128,7 @@ void pmu_backlight_set_sleep(int sleep) | |||
128 | 128 | ||
129 | spin_lock_irqsave(&pmu_backlight_lock, flags); | 129 | spin_lock_irqsave(&pmu_backlight_lock, flags); |
130 | sleeping = sleep; | 130 | sleeping = sleep; |
131 | if (pmac_backlight) { | 131 | if (pmac_backlight && uses_pmu_bl) { |
132 | if (sleep) { | 132 | if (sleep) { |
133 | struct adb_request req; | 133 | struct adb_request req; |
134 | 134 | ||
@@ -166,6 +166,7 @@ void __init pmu_backlight_init() | |||
166 | printk(KERN_ERR "PMU Backlight registration failed\n"); | 166 | printk(KERN_ERR "PMU Backlight registration failed\n"); |
167 | return; | 167 | return; |
168 | } | 168 | } |
169 | uses_pmu_bl = 1; | ||
169 | bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1; | 170 | bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1; |
170 | pmu_backlight_init_curve(0x7F, 0x46, 0x0E); | 171 | pmu_backlight_init_curve(0x7F, 0x46, 0x0E); |
171 | 172 | ||
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index ebec663d5d37..d6365a9f0637 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c | |||
@@ -2528,7 +2528,7 @@ EXPORT_SYMBOL(pmu_wait_complete); | |||
2528 | EXPORT_SYMBOL(pmu_suspend); | 2528 | EXPORT_SYMBOL(pmu_suspend); |
2529 | EXPORT_SYMBOL(pmu_resume); | 2529 | EXPORT_SYMBOL(pmu_resume); |
2530 | EXPORT_SYMBOL(pmu_unlock); | 2530 | EXPORT_SYMBOL(pmu_unlock); |
2531 | #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32) | 2531 | #if defined(CONFIG_PPC32) |
2532 | EXPORT_SYMBOL(pmu_enable_irled); | 2532 | EXPORT_SYMBOL(pmu_enable_irled); |
2533 | EXPORT_SYMBOL(pmu_battery_count); | 2533 | EXPORT_SYMBOL(pmu_battery_count); |
2534 | EXPORT_SYMBOL(pmu_batteries); | 2534 | EXPORT_SYMBOL(pmu_batteries); |
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 7aeceedcf7d4..c14dacdacfac 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -1045,8 +1045,14 @@ void bitmap_daemon_work(struct bitmap *bitmap) | |||
1045 | if (bitmap == NULL) | 1045 | if (bitmap == NULL) |
1046 | return; | 1046 | return; |
1047 | if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ)) | 1047 | if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ)) |
1048 | return; | 1048 | goto done; |
1049 | |||
1049 | bitmap->daemon_lastrun = jiffies; | 1050 | bitmap->daemon_lastrun = jiffies; |
1051 | if (bitmap->allclean) { | ||
1052 | bitmap->mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; | ||
1053 | return; | ||
1054 | } | ||
1055 | bitmap->allclean = 1; | ||
1050 | 1056 | ||
1051 | for (j = 0; j < bitmap->chunks; j++) { | 1057 | for (j = 0; j < bitmap->chunks; j++) { |
1052 | bitmap_counter_t *bmc; | 1058 | bitmap_counter_t *bmc; |
@@ -1068,8 +1074,10 @@ void bitmap_daemon_work(struct bitmap *bitmap) | |||
1068 | clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); | 1074 | clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); |
1069 | 1075 | ||
1070 | spin_unlock_irqrestore(&bitmap->lock, flags); | 1076 | spin_unlock_irqrestore(&bitmap->lock, flags); |
1071 | if (need_write) | 1077 | if (need_write) { |
1072 | write_page(bitmap, page, 0); | 1078 | write_page(bitmap, page, 0); |
1079 | bitmap->allclean = 0; | ||
1080 | } | ||
1073 | continue; | 1081 | continue; |
1074 | } | 1082 | } |
1075 | 1083 | ||
@@ -1098,6 +1106,9 @@ void bitmap_daemon_work(struct bitmap *bitmap) | |||
1098 | /* | 1106 | /* |
1099 | if (j < 100) printk("bitmap: j=%lu, *bmc = 0x%x\n", j, *bmc); | 1107 | if (j < 100) printk("bitmap: j=%lu, *bmc = 0x%x\n", j, *bmc); |
1100 | */ | 1108 | */ |
1109 | if (*bmc) | ||
1110 | bitmap->allclean = 0; | ||
1111 | |||
1101 | if (*bmc == 2) { | 1112 | if (*bmc == 2) { |
1102 | *bmc=1; /* maybe clear the bit next time */ | 1113 | *bmc=1; /* maybe clear the bit next time */ |
1103 | set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); | 1114 | set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); |
@@ -1132,6 +1143,9 @@ void bitmap_daemon_work(struct bitmap *bitmap) | |||
1132 | } | 1143 | } |
1133 | } | 1144 | } |
1134 | 1145 | ||
1146 | done: | ||
1147 | if (bitmap->allclean == 0) | ||
1148 | bitmap->mddev->thread->timeout = bitmap->daemon_sleep * HZ; | ||
1135 | } | 1149 | } |
1136 | 1150 | ||
1137 | static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, | 1151 | static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, |
@@ -1226,6 +1240,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect | |||
1226 | sectors -= blocks; | 1240 | sectors -= blocks; |
1227 | else sectors = 0; | 1241 | else sectors = 0; |
1228 | } | 1242 | } |
1243 | bitmap->allclean = 0; | ||
1229 | return 0; | 1244 | return 0; |
1230 | } | 1245 | } |
1231 | 1246 | ||
@@ -1296,6 +1311,7 @@ int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, | |||
1296 | } | 1311 | } |
1297 | } | 1312 | } |
1298 | spin_unlock_irq(&bitmap->lock); | 1313 | spin_unlock_irq(&bitmap->lock); |
1314 | bitmap->allclean = 0; | ||
1299 | return rv; | 1315 | return rv; |
1300 | } | 1316 | } |
1301 | 1317 | ||
@@ -1332,6 +1348,7 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int ab | |||
1332 | } | 1348 | } |
1333 | unlock: | 1349 | unlock: |
1334 | spin_unlock_irqrestore(&bitmap->lock, flags); | 1350 | spin_unlock_irqrestore(&bitmap->lock, flags); |
1351 | bitmap->allclean = 0; | ||
1335 | } | 1352 | } |
1336 | 1353 | ||
1337 | void bitmap_close_sync(struct bitmap *bitmap) | 1354 | void bitmap_close_sync(struct bitmap *bitmap) |
@@ -1399,7 +1416,7 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n | |||
1399 | set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); | 1416 | set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); |
1400 | } | 1417 | } |
1401 | spin_unlock_irq(&bitmap->lock); | 1418 | spin_unlock_irq(&bitmap->lock); |
1402 | 1419 | bitmap->allclean = 0; | |
1403 | } | 1420 | } |
1404 | 1421 | ||
1405 | /* dirty the memory and file bits for bitmap chunks "s" to "e" */ | 1422 | /* dirty the memory and file bits for bitmap chunks "s" to "e" */ |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 7da6ec244e15..ccbbf63727cc 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -1105,7 +1105,11 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) | |||
1105 | rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; | 1105 | rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; |
1106 | bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; | 1106 | bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; |
1107 | if (rdev->sb_size & bmask) | 1107 | if (rdev->sb_size & bmask) |
1108 | rdev-> sb_size = (rdev->sb_size | bmask)+1; | 1108 | rdev->sb_size = (rdev->sb_size | bmask) + 1; |
1109 | |||
1110 | if (minor_version | ||
1111 | && rdev->data_offset < sb_offset + (rdev->sb_size/512)) | ||
1112 | return -EINVAL; | ||
1109 | 1113 | ||
1110 | if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) | 1114 | if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) |
1111 | rdev->desc_nr = -1; | 1115 | rdev->desc_nr = -1; |
@@ -1137,7 +1141,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) | |||
1137 | else | 1141 | else |
1138 | ret = 0; | 1142 | ret = 0; |
1139 | } | 1143 | } |
1140 | if (minor_version) | 1144 | if (minor_version) |
1141 | rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2; | 1145 | rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2; |
1142 | else | 1146 | else |
1143 | rdev->size = rdev->sb_offset; | 1147 | rdev->size = rdev->sb_offset; |
@@ -1499,7 +1503,8 @@ static void export_rdev(mdk_rdev_t * rdev) | |||
1499 | free_disk_sb(rdev); | 1503 | free_disk_sb(rdev); |
1500 | list_del_init(&rdev->same_set); | 1504 | list_del_init(&rdev->same_set); |
1501 | #ifndef MODULE | 1505 | #ifndef MODULE |
1502 | md_autodetect_dev(rdev->bdev->bd_dev); | 1506 | if (test_bit(AutoDetected, &rdev->flags)) |
1507 | md_autodetect_dev(rdev->bdev->bd_dev); | ||
1503 | #endif | 1508 | #endif |
1504 | unlock_rdev(rdev); | 1509 | unlock_rdev(rdev); |
1505 | kobject_put(&rdev->kobj); | 1510 | kobject_put(&rdev->kobj); |
@@ -1996,9 +2001,11 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
1996 | char *e; | 2001 | char *e; |
1997 | unsigned long long size = simple_strtoull(buf, &e, 10); | 2002 | unsigned long long size = simple_strtoull(buf, &e, 10); |
1998 | unsigned long long oldsize = rdev->size; | 2003 | unsigned long long oldsize = rdev->size; |
2004 | mddev_t *my_mddev = rdev->mddev; | ||
2005 | |||
1999 | if (e==buf || (*e && *e != '\n')) | 2006 | if (e==buf || (*e && *e != '\n')) |
2000 | return -EINVAL; | 2007 | return -EINVAL; |
2001 | if (rdev->mddev->pers) | 2008 | if (my_mddev->pers) |
2002 | return -EBUSY; | 2009 | return -EBUSY; |
2003 | rdev->size = size; | 2010 | rdev->size = size; |
2004 | if (size > oldsize && rdev->mddev->external) { | 2011 | if (size > oldsize && rdev->mddev->external) { |
@@ -2011,7 +2018,7 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
2011 | int overlap = 0; | 2018 | int overlap = 0; |
2012 | struct list_head *tmp, *tmp2; | 2019 | struct list_head *tmp, *tmp2; |
2013 | 2020 | ||
2014 | mddev_unlock(rdev->mddev); | 2021 | mddev_unlock(my_mddev); |
2015 | for_each_mddev(mddev, tmp) { | 2022 | for_each_mddev(mddev, tmp) { |
2016 | mdk_rdev_t *rdev2; | 2023 | mdk_rdev_t *rdev2; |
2017 | 2024 | ||
@@ -2031,7 +2038,7 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
2031 | break; | 2038 | break; |
2032 | } | 2039 | } |
2033 | } | 2040 | } |
2034 | mddev_lock(rdev->mddev); | 2041 | mddev_lock(my_mddev); |
2035 | if (overlap) { | 2042 | if (overlap) { |
2036 | /* Someone else could have slipped in a size | 2043 | /* Someone else could have slipped in a size |
2037 | * change here, but doing so is just silly. | 2044 | * change here, but doing so is just silly. |
@@ -2043,8 +2050,8 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
2043 | return -EBUSY; | 2050 | return -EBUSY; |
2044 | } | 2051 | } |
2045 | } | 2052 | } |
2046 | if (size < rdev->mddev->size || rdev->mddev->size == 0) | 2053 | if (size < my_mddev->size || my_mddev->size == 0) |
2047 | rdev->mddev->size = size; | 2054 | my_mddev->size = size; |
2048 | return len; | 2055 | return len; |
2049 | } | 2056 | } |
2050 | 2057 | ||
@@ -2065,10 +2072,21 @@ rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |||
2065 | { | 2072 | { |
2066 | struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); | 2073 | struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); |
2067 | mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); | 2074 | mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); |
2075 | mddev_t *mddev = rdev->mddev; | ||
2076 | ssize_t rv; | ||
2068 | 2077 | ||
2069 | if (!entry->show) | 2078 | if (!entry->show) |
2070 | return -EIO; | 2079 | return -EIO; |
2071 | return entry->show(rdev, page); | 2080 | |
2081 | rv = mddev ? mddev_lock(mddev) : -EBUSY; | ||
2082 | if (!rv) { | ||
2083 | if (rdev->mddev == NULL) | ||
2084 | rv = -EBUSY; | ||
2085 | else | ||
2086 | rv = entry->show(rdev, page); | ||
2087 | mddev_unlock(mddev); | ||
2088 | } | ||
2089 | return rv; | ||
2072 | } | 2090 | } |
2073 | 2091 | ||
2074 | static ssize_t | 2092 | static ssize_t |
@@ -2077,15 +2095,19 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr, | |||
2077 | { | 2095 | { |
2078 | struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); | 2096 | struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); |
2079 | mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); | 2097 | mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); |
2080 | int rv; | 2098 | ssize_t rv; |
2099 | mddev_t *mddev = rdev->mddev; | ||
2081 | 2100 | ||
2082 | if (!entry->store) | 2101 | if (!entry->store) |
2083 | return -EIO; | 2102 | return -EIO; |
2084 | if (!capable(CAP_SYS_ADMIN)) | 2103 | if (!capable(CAP_SYS_ADMIN)) |
2085 | return -EACCES; | 2104 | return -EACCES; |
2086 | rv = mddev_lock(rdev->mddev); | 2105 | rv = mddev ? mddev_lock(mddev): -EBUSY; |
2087 | if (!rv) { | 2106 | if (!rv) { |
2088 | rv = entry->store(rdev, page, length); | 2107 | if (rdev->mddev == NULL) |
2108 | rv = -EBUSY; | ||
2109 | else | ||
2110 | rv = entry->store(rdev, page, length); | ||
2089 | mddev_unlock(rdev->mddev); | 2111 | mddev_unlock(rdev->mddev); |
2090 | } | 2112 | } |
2091 | return rv; | 2113 | return rv; |
@@ -5127,7 +5149,7 @@ static int md_seq_show(struct seq_file *seq, void *v) | |||
5127 | if (mddev->ro==1) | 5149 | if (mddev->ro==1) |
5128 | seq_printf(seq, " (read-only)"); | 5150 | seq_printf(seq, " (read-only)"); |
5129 | if (mddev->ro==2) | 5151 | if (mddev->ro==2) |
5130 | seq_printf(seq, "(auto-read-only)"); | 5152 | seq_printf(seq, " (auto-read-only)"); |
5131 | seq_printf(seq, " %s", mddev->pers->name); | 5153 | seq_printf(seq, " %s", mddev->pers->name); |
5132 | } | 5154 | } |
5133 | 5155 | ||
@@ -5351,6 +5373,7 @@ void md_write_start(mddev_t *mddev, struct bio *bi) | |||
5351 | mddev->ro = 0; | 5373 | mddev->ro = 0; |
5352 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | 5374 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
5353 | md_wakeup_thread(mddev->thread); | 5375 | md_wakeup_thread(mddev->thread); |
5376 | md_wakeup_thread(mddev->sync_thread); | ||
5354 | } | 5377 | } |
5355 | atomic_inc(&mddev->writes_pending); | 5378 | atomic_inc(&mddev->writes_pending); |
5356 | if (mddev->in_sync) { | 5379 | if (mddev->in_sync) { |
@@ -6021,6 +6044,7 @@ static void autostart_arrays(int part) | |||
6021 | MD_BUG(); | 6044 | MD_BUG(); |
6022 | continue; | 6045 | continue; |
6023 | } | 6046 | } |
6047 | set_bit(AutoDetected, &rdev->flags); | ||
6024 | list_add(&rdev->same_set, &pending_raid_disks); | 6048 | list_add(&rdev->same_set, &pending_raid_disks); |
6025 | i_passed++; | 6049 | i_passed++; |
6026 | } | 6050 | } |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 5c7fef091cec..ff61b309129a 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -592,6 +592,37 @@ static int raid1_congested(void *data, int bits) | |||
592 | } | 592 | } |
593 | 593 | ||
594 | 594 | ||
595 | static int flush_pending_writes(conf_t *conf) | ||
596 | { | ||
597 | /* Any writes that have been queued but are awaiting | ||
598 | * bitmap updates get flushed here. | ||
599 | * We return 1 if any requests were actually submitted. | ||
600 | */ | ||
601 | int rv = 0; | ||
602 | |||
603 | spin_lock_irq(&conf->device_lock); | ||
604 | |||
605 | if (conf->pending_bio_list.head) { | ||
606 | struct bio *bio; | ||
607 | bio = bio_list_get(&conf->pending_bio_list); | ||
608 | blk_remove_plug(conf->mddev->queue); | ||
609 | spin_unlock_irq(&conf->device_lock); | ||
610 | /* flush any pending bitmap writes to | ||
611 | * disk before proceeding w/ I/O */ | ||
612 | bitmap_unplug(conf->mddev->bitmap); | ||
613 | |||
614 | while (bio) { /* submit pending writes */ | ||
615 | struct bio *next = bio->bi_next; | ||
616 | bio->bi_next = NULL; | ||
617 | generic_make_request(bio); | ||
618 | bio = next; | ||
619 | } | ||
620 | rv = 1; | ||
621 | } else | ||
622 | spin_unlock_irq(&conf->device_lock); | ||
623 | return rv; | ||
624 | } | ||
625 | |||
595 | /* Barriers.... | 626 | /* Barriers.... |
596 | * Sometimes we need to suspend IO while we do something else, | 627 | * Sometimes we need to suspend IO while we do something else, |
597 | * either some resync/recovery, or reconfigure the array. | 628 | * either some resync/recovery, or reconfigure the array. |
@@ -673,15 +704,23 @@ static void freeze_array(conf_t *conf) | |||
673 | /* stop syncio and normal IO and wait for everything to | 704 | /* stop syncio and normal IO and wait for everything to |
674 | * go quite. | 705 | * go quite. |
675 | * We increment barrier and nr_waiting, and then | 706 | * We increment barrier and nr_waiting, and then |
676 | * wait until barrier+nr_pending match nr_queued+2 | 707 | * wait until nr_pending match nr_queued+1 |
708 | * This is called in the context of one normal IO request | ||
709 | * that has failed. Thus any sync request that might be pending | ||
710 | * will be blocked by nr_pending, and we need to wait for | ||
711 | * pending IO requests to complete or be queued for re-try. | ||
712 | * Thus the number queued (nr_queued) plus this request (1) | ||
713 | * must match the number of pending IOs (nr_pending) before | ||
714 | * we continue. | ||
677 | */ | 715 | */ |
678 | spin_lock_irq(&conf->resync_lock); | 716 | spin_lock_irq(&conf->resync_lock); |
679 | conf->barrier++; | 717 | conf->barrier++; |
680 | conf->nr_waiting++; | 718 | conf->nr_waiting++; |
681 | wait_event_lock_irq(conf->wait_barrier, | 719 | wait_event_lock_irq(conf->wait_barrier, |
682 | conf->barrier+conf->nr_pending == conf->nr_queued+2, | 720 | conf->nr_pending == conf->nr_queued+1, |
683 | conf->resync_lock, | 721 | conf->resync_lock, |
684 | raid1_unplug(conf->mddev->queue)); | 722 | ({ flush_pending_writes(conf); |
723 | raid1_unplug(conf->mddev->queue); })); | ||
685 | spin_unlock_irq(&conf->resync_lock); | 724 | spin_unlock_irq(&conf->resync_lock); |
686 | } | 725 | } |
687 | static void unfreeze_array(conf_t *conf) | 726 | static void unfreeze_array(conf_t *conf) |
@@ -907,6 +946,9 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
907 | blk_plug_device(mddev->queue); | 946 | blk_plug_device(mddev->queue); |
908 | spin_unlock_irqrestore(&conf->device_lock, flags); | 947 | spin_unlock_irqrestore(&conf->device_lock, flags); |
909 | 948 | ||
949 | /* In case raid1d snuck into freeze_array */ | ||
950 | wake_up(&conf->wait_barrier); | ||
951 | |||
910 | if (do_sync) | 952 | if (do_sync) |
911 | md_wakeup_thread(mddev->thread); | 953 | md_wakeup_thread(mddev->thread); |
912 | #if 0 | 954 | #if 0 |
@@ -1473,28 +1515,14 @@ static void raid1d(mddev_t *mddev) | |||
1473 | 1515 | ||
1474 | for (;;) { | 1516 | for (;;) { |
1475 | char b[BDEVNAME_SIZE]; | 1517 | char b[BDEVNAME_SIZE]; |
1476 | spin_lock_irqsave(&conf->device_lock, flags); | ||
1477 | |||
1478 | if (conf->pending_bio_list.head) { | ||
1479 | bio = bio_list_get(&conf->pending_bio_list); | ||
1480 | blk_remove_plug(mddev->queue); | ||
1481 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1482 | /* flush any pending bitmap writes to disk before proceeding w/ I/O */ | ||
1483 | bitmap_unplug(mddev->bitmap); | ||
1484 | 1518 | ||
1485 | while (bio) { /* submit pending writes */ | 1519 | unplug += flush_pending_writes(conf); |
1486 | struct bio *next = bio->bi_next; | ||
1487 | bio->bi_next = NULL; | ||
1488 | generic_make_request(bio); | ||
1489 | bio = next; | ||
1490 | } | ||
1491 | unplug = 1; | ||
1492 | 1520 | ||
1493 | continue; | 1521 | spin_lock_irqsave(&conf->device_lock, flags); |
1494 | } | 1522 | if (list_empty(head)) { |
1495 | 1523 | spin_unlock_irqrestore(&conf->device_lock, flags); | |
1496 | if (list_empty(head)) | ||
1497 | break; | 1524 | break; |
1525 | } | ||
1498 | r1_bio = list_entry(head->prev, r1bio_t, retry_list); | 1526 | r1_bio = list_entry(head->prev, r1bio_t, retry_list); |
1499 | list_del(head->prev); | 1527 | list_del(head->prev); |
1500 | conf->nr_queued--; | 1528 | conf->nr_queued--; |
@@ -1590,7 +1618,6 @@ static void raid1d(mddev_t *mddev) | |||
1590 | } | 1618 | } |
1591 | } | 1619 | } |
1592 | } | 1620 | } |
1593 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1594 | if (unplug) | 1621 | if (unplug) |
1595 | unplug_slaves(mddev); | 1622 | unplug_slaves(mddev); |
1596 | } | 1623 | } |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 017f58113c33..32389d2f18fc 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -537,7 +537,8 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) | |||
537 | current_distance = abs(r10_bio->devs[slot].addr - | 537 | current_distance = abs(r10_bio->devs[slot].addr - |
538 | conf->mirrors[disk].head_position); | 538 | conf->mirrors[disk].head_position); |
539 | 539 | ||
540 | /* Find the disk whose head is closest */ | 540 | /* Find the disk whose head is closest, |
541 | * or - for far > 1 - find the closest to partition beginning */ | ||
541 | 542 | ||
542 | for (nslot = slot; nslot < conf->copies; nslot++) { | 543 | for (nslot = slot; nslot < conf->copies; nslot++) { |
543 | int ndisk = r10_bio->devs[nslot].devnum; | 544 | int ndisk = r10_bio->devs[nslot].devnum; |
@@ -557,8 +558,13 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) | |||
557 | slot = nslot; | 558 | slot = nslot; |
558 | break; | 559 | break; |
559 | } | 560 | } |
560 | new_distance = abs(r10_bio->devs[nslot].addr - | 561 | |
561 | conf->mirrors[ndisk].head_position); | 562 | /* for far > 1 always use the lowest address */ |
563 | if (conf->far_copies > 1) | ||
564 | new_distance = r10_bio->devs[nslot].addr; | ||
565 | else | ||
566 | new_distance = abs(r10_bio->devs[nslot].addr - | ||
567 | conf->mirrors[ndisk].head_position); | ||
562 | if (new_distance < current_distance) { | 568 | if (new_distance < current_distance) { |
563 | current_distance = new_distance; | 569 | current_distance = new_distance; |
564 | disk = ndisk; | 570 | disk = ndisk; |
@@ -629,7 +635,36 @@ static int raid10_congested(void *data, int bits) | |||
629 | return ret; | 635 | return ret; |
630 | } | 636 | } |
631 | 637 | ||
632 | 638 | static int flush_pending_writes(conf_t *conf) | |
639 | { | ||
640 | /* Any writes that have been queued but are awaiting | ||
641 | * bitmap updates get flushed here. | ||
642 | * We return 1 if any requests were actually submitted. | ||
643 | */ | ||
644 | int rv = 0; | ||
645 | |||
646 | spin_lock_irq(&conf->device_lock); | ||
647 | |||
648 | if (conf->pending_bio_list.head) { | ||
649 | struct bio *bio; | ||
650 | bio = bio_list_get(&conf->pending_bio_list); | ||
651 | blk_remove_plug(conf->mddev->queue); | ||
652 | spin_unlock_irq(&conf->device_lock); | ||
653 | /* flush any pending bitmap writes to disk | ||
654 | * before proceeding w/ I/O */ | ||
655 | bitmap_unplug(conf->mddev->bitmap); | ||
656 | |||
657 | while (bio) { /* submit pending writes */ | ||
658 | struct bio *next = bio->bi_next; | ||
659 | bio->bi_next = NULL; | ||
660 | generic_make_request(bio); | ||
661 | bio = next; | ||
662 | } | ||
663 | rv = 1; | ||
664 | } else | ||
665 | spin_unlock_irq(&conf->device_lock); | ||
666 | return rv; | ||
667 | } | ||
633 | /* Barriers.... | 668 | /* Barriers.... |
634 | * Sometimes we need to suspend IO while we do something else, | 669 | * Sometimes we need to suspend IO while we do something else, |
635 | * either some resync/recovery, or reconfigure the array. | 670 | * either some resync/recovery, or reconfigure the array. |
@@ -712,15 +747,23 @@ static void freeze_array(conf_t *conf) | |||
712 | /* stop syncio and normal IO and wait for everything to | 747 | /* stop syncio and normal IO and wait for everything to |
713 | * go quiet. | 748 | * go quiet. |
714 | * We increment barrier and nr_waiting, and then | 749 | * We increment barrier and nr_waiting, and then |
715 | * wait until barrier+nr_pending match nr_queued+2 | 750 | * wait until nr_pending match nr_queued+1 |
751 | * This is called in the context of one normal IO request | ||
752 | * that has failed. Thus any sync request that might be pending | ||
753 | * will be blocked by nr_pending, and we need to wait for | ||
754 | * pending IO requests to complete or be queued for re-try. | ||
755 | * Thus the number queued (nr_queued) plus this request (1) | ||
756 | * must match the number of pending IOs (nr_pending) before | ||
757 | * we continue. | ||
716 | */ | 758 | */ |
717 | spin_lock_irq(&conf->resync_lock); | 759 | spin_lock_irq(&conf->resync_lock); |
718 | conf->barrier++; | 760 | conf->barrier++; |
719 | conf->nr_waiting++; | 761 | conf->nr_waiting++; |
720 | wait_event_lock_irq(conf->wait_barrier, | 762 | wait_event_lock_irq(conf->wait_barrier, |
721 | conf->barrier+conf->nr_pending == conf->nr_queued+2, | 763 | conf->nr_pending == conf->nr_queued+1, |
722 | conf->resync_lock, | 764 | conf->resync_lock, |
723 | raid10_unplug(conf->mddev->queue)); | 765 | ({ flush_pending_writes(conf); |
766 | raid10_unplug(conf->mddev->queue); })); | ||
724 | spin_unlock_irq(&conf->resync_lock); | 767 | spin_unlock_irq(&conf->resync_lock); |
725 | } | 768 | } |
726 | 769 | ||
@@ -892,6 +935,9 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
892 | blk_plug_device(mddev->queue); | 935 | blk_plug_device(mddev->queue); |
893 | spin_unlock_irqrestore(&conf->device_lock, flags); | 936 | spin_unlock_irqrestore(&conf->device_lock, flags); |
894 | 937 | ||
938 | /* In case raid10d snuck in to freeze_array */ | ||
939 | wake_up(&conf->wait_barrier); | ||
940 | |||
895 | if (do_sync) | 941 | if (do_sync) |
896 | md_wakeup_thread(mddev->thread); | 942 | md_wakeup_thread(mddev->thread); |
897 | 943 | ||
@@ -1464,28 +1510,14 @@ static void raid10d(mddev_t *mddev) | |||
1464 | 1510 | ||
1465 | for (;;) { | 1511 | for (;;) { |
1466 | char b[BDEVNAME_SIZE]; | 1512 | char b[BDEVNAME_SIZE]; |
1467 | spin_lock_irqsave(&conf->device_lock, flags); | ||
1468 | 1513 | ||
1469 | if (conf->pending_bio_list.head) { | 1514 | unplug += flush_pending_writes(conf); |
1470 | bio = bio_list_get(&conf->pending_bio_list); | ||
1471 | blk_remove_plug(mddev->queue); | ||
1472 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1473 | /* flush any pending bitmap writes to disk before proceeding w/ I/O */ | ||
1474 | bitmap_unplug(mddev->bitmap); | ||
1475 | |||
1476 | while (bio) { /* submit pending writes */ | ||
1477 | struct bio *next = bio->bi_next; | ||
1478 | bio->bi_next = NULL; | ||
1479 | generic_make_request(bio); | ||
1480 | bio = next; | ||
1481 | } | ||
1482 | unplug = 1; | ||
1483 | |||
1484 | continue; | ||
1485 | } | ||
1486 | 1515 | ||
1487 | if (list_empty(head)) | 1516 | spin_lock_irqsave(&conf->device_lock, flags); |
1517 | if (list_empty(head)) { | ||
1518 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1488 | break; | 1519 | break; |
1520 | } | ||
1489 | r10_bio = list_entry(head->prev, r10bio_t, retry_list); | 1521 | r10_bio = list_entry(head->prev, r10bio_t, retry_list); |
1490 | list_del(head->prev); | 1522 | list_del(head->prev); |
1491 | conf->nr_queued--; | 1523 | conf->nr_queued--; |
@@ -1548,7 +1580,6 @@ static void raid10d(mddev_t *mddev) | |||
1548 | } | 1580 | } |
1549 | } | 1581 | } |
1550 | } | 1582 | } |
1551 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1552 | if (unplug) | 1583 | if (unplug) |
1553 | unplug_slaves(mddev); | 1584 | unplug_slaves(mddev); |
1554 | } | 1585 | } |
@@ -1787,6 +1818,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
1787 | if (j == conf->copies) { | 1818 | if (j == conf->copies) { |
1788 | /* Cannot recover, so abort the recovery */ | 1819 | /* Cannot recover, so abort the recovery */ |
1789 | put_buf(r10_bio); | 1820 | put_buf(r10_bio); |
1821 | if (rb2) | ||
1822 | atomic_dec(&rb2->remaining); | ||
1790 | r10_bio = rb2; | 1823 | r10_bio = rb2; |
1791 | if (!test_and_set_bit(MD_RECOVERY_ERR, &mddev->recovery)) | 1824 | if (!test_and_set_bit(MD_RECOVERY_ERR, &mddev->recovery)) |
1792 | printk(KERN_INFO "raid10: %s: insufficient working devices for recovery.\n", | 1825 | printk(KERN_INFO "raid10: %s: insufficient working devices for recovery.\n", |
diff --git a/drivers/memstick/Kconfig b/drivers/memstick/Kconfig index 1093fdb07297..f0ca41c20323 100644 --- a/drivers/memstick/Kconfig +++ b/drivers/memstick/Kconfig | |||
@@ -8,7 +8,7 @@ menuconfig MEMSTICK | |||
8 | Sony MemoryStick is a proprietary storage/extension card protocol. | 8 | Sony MemoryStick is a proprietary storage/extension card protocol. |
9 | 9 | ||
10 | If you want MemoryStick support, you should say Y here and also | 10 | If you want MemoryStick support, you should say Y here and also |
11 | to the specific driver for your MMC interface. | 11 | to the specific driver for your MemoryStick interface. |
12 | 12 | ||
13 | if MEMSTICK | 13 | if MEMSTICK |
14 | 14 | ||
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c index bba467fe4bce..de80dba12f9b 100644 --- a/drivers/memstick/core/memstick.c +++ b/drivers/memstick/core/memstick.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
19 | 19 | ||
20 | #define DRIVER_NAME "memstick" | 20 | #define DRIVER_NAME "memstick" |
21 | #define DRIVER_VERSION "0.2" | ||
22 | 21 | ||
23 | static unsigned int cmd_retries = 3; | 22 | static unsigned int cmd_retries = 3; |
24 | module_param(cmd_retries, uint, 0644); | 23 | module_param(cmd_retries, uint, 0644); |
@@ -236,7 +235,7 @@ int memstick_next_req(struct memstick_host *host, struct memstick_request **mrq) | |||
236 | rc = host->card->next_request(host->card, mrq); | 235 | rc = host->card->next_request(host->card, mrq); |
237 | 236 | ||
238 | if (!rc) | 237 | if (!rc) |
239 | host->retries = cmd_retries; | 238 | host->retries = cmd_retries > 1 ? cmd_retries - 1 : 1; |
240 | else | 239 | else |
241 | *mrq = NULL; | 240 | *mrq = NULL; |
242 | 241 | ||
@@ -271,7 +270,7 @@ void memstick_init_req_sg(struct memstick_request *mrq, unsigned char tpc, | |||
271 | mrq->data_dir = READ; | 270 | mrq->data_dir = READ; |
272 | 271 | ||
273 | mrq->sg = *sg; | 272 | mrq->sg = *sg; |
274 | mrq->io_type = MEMSTICK_IO_SG; | 273 | mrq->long_data = 1; |
275 | 274 | ||
276 | if (tpc == MS_TPC_SET_CMD || tpc == MS_TPC_EX_SET_CMD) | 275 | if (tpc == MS_TPC_SET_CMD || tpc == MS_TPC_EX_SET_CMD) |
277 | mrq->need_card_int = 1; | 276 | mrq->need_card_int = 1; |
@@ -306,7 +305,7 @@ void memstick_init_req(struct memstick_request *mrq, unsigned char tpc, | |||
306 | if (mrq->data_dir == WRITE) | 305 | if (mrq->data_dir == WRITE) |
307 | memcpy(mrq->data, buf, mrq->data_len); | 306 | memcpy(mrq->data, buf, mrq->data_len); |
308 | 307 | ||
309 | mrq->io_type = MEMSTICK_IO_VAL; | 308 | mrq->long_data = 0; |
310 | 309 | ||
311 | if (tpc == MS_TPC_SET_CMD || tpc == MS_TPC_EX_SET_CMD) | 310 | if (tpc == MS_TPC_SET_CMD || tpc == MS_TPC_EX_SET_CMD) |
312 | mrq->need_card_int = 1; | 311 | mrq->need_card_int = 1; |
@@ -561,6 +560,31 @@ void memstick_free_host(struct memstick_host *host) | |||
561 | } | 560 | } |
562 | EXPORT_SYMBOL(memstick_free_host); | 561 | EXPORT_SYMBOL(memstick_free_host); |
563 | 562 | ||
563 | /** | ||
564 | * memstick_suspend_host - notify bus driver of host suspension | ||
565 | * @host - host to use | ||
566 | */ | ||
567 | void memstick_suspend_host(struct memstick_host *host) | ||
568 | { | ||
569 | mutex_lock(&host->lock); | ||
570 | host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF); | ||
571 | mutex_unlock(&host->lock); | ||
572 | } | ||
573 | EXPORT_SYMBOL(memstick_suspend_host); | ||
574 | |||
575 | /** | ||
576 | * memstick_resume_host - notify bus driver of host resumption | ||
577 | * @host - host to use | ||
578 | */ | ||
579 | void memstick_resume_host(struct memstick_host *host) | ||
580 | { | ||
581 | mutex_lock(&host->lock); | ||
582 | host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_ON); | ||
583 | mutex_unlock(&host->lock); | ||
584 | memstick_detect_change(host); | ||
585 | } | ||
586 | EXPORT_SYMBOL(memstick_resume_host); | ||
587 | |||
564 | int memstick_register_driver(struct memstick_driver *drv) | 588 | int memstick_register_driver(struct memstick_driver *drv) |
565 | { | 589 | { |
566 | drv->driver.bus = &memstick_bus_type; | 590 | drv->driver.bus = &memstick_bus_type; |
@@ -611,4 +635,3 @@ module_exit(memstick_exit); | |||
611 | MODULE_AUTHOR("Alex Dubov"); | 635 | MODULE_AUTHOR("Alex Dubov"); |
612 | MODULE_LICENSE("GPL"); | 636 | MODULE_LICENSE("GPL"); |
613 | MODULE_DESCRIPTION("Sony MemoryStick core driver"); | 637 | MODULE_DESCRIPTION("Sony MemoryStick core driver"); |
614 | MODULE_VERSION(DRIVER_VERSION); | ||
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c index 423ad8cf4bb9..1d637e4561d3 100644 --- a/drivers/memstick/core/mspro_block.c +++ b/drivers/memstick/core/mspro_block.c | |||
@@ -16,10 +16,10 @@ | |||
16 | #include <linux/idr.h> | 16 | #include <linux/idr.h> |
17 | #include <linux/hdreg.h> | 17 | #include <linux/hdreg.h> |
18 | #include <linux/kthread.h> | 18 | #include <linux/kthread.h> |
19 | #include <linux/delay.h> | ||
19 | #include <linux/memstick.h> | 20 | #include <linux/memstick.h> |
20 | 21 | ||
21 | #define DRIVER_NAME "mspro_block" | 22 | #define DRIVER_NAME "mspro_block" |
22 | #define DRIVER_VERSION "0.2" | ||
23 | 23 | ||
24 | static int major; | 24 | static int major; |
25 | module_param(major, int, 0644); | 25 | module_param(major, int, 0644); |
@@ -110,6 +110,17 @@ struct mspro_mbr { | |||
110 | unsigned int sectors_per_partition; | 110 | unsigned int sectors_per_partition; |
111 | } __attribute__((packed)); | 111 | } __attribute__((packed)); |
112 | 112 | ||
113 | struct mspro_specfile { | ||
114 | char name[8]; | ||
115 | char ext[3]; | ||
116 | unsigned char attr; | ||
117 | unsigned char reserved[10]; | ||
118 | unsigned short time; | ||
119 | unsigned short date; | ||
120 | unsigned short cluster; | ||
121 | unsigned int size; | ||
122 | } __attribute__((packed)); | ||
123 | |||
113 | struct mspro_devinfo { | 124 | struct mspro_devinfo { |
114 | unsigned short cylinders; | 125 | unsigned short cylinders; |
115 | unsigned short heads; | 126 | unsigned short heads; |
@@ -293,6 +304,20 @@ static ssize_t mspro_block_attr_show_sysinfo(struct device *dev, | |||
293 | dev_attr); | 304 | dev_attr); |
294 | struct mspro_sys_info *x_sys = x_attr->data; | 305 | struct mspro_sys_info *x_sys = x_attr->data; |
295 | ssize_t rc = 0; | 306 | ssize_t rc = 0; |
307 | int date_tz = 0, date_tz_f = 0; | ||
308 | |||
309 | if (x_sys->assembly_date[0] > 0x80U) { | ||
310 | date_tz = (~x_sys->assembly_date[0]) + 1; | ||
311 | date_tz_f = date_tz & 3; | ||
312 | date_tz >>= 2; | ||
313 | date_tz = -date_tz; | ||
314 | date_tz_f *= 15; | ||
315 | } else if (x_sys->assembly_date[0] < 0x80U) { | ||
316 | date_tz = x_sys->assembly_date[0]; | ||
317 | date_tz_f = date_tz & 3; | ||
318 | date_tz >>= 2; | ||
319 | date_tz_f *= 15; | ||
320 | } | ||
296 | 321 | ||
297 | rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "class: %x\n", | 322 | rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "class: %x\n", |
298 | x_sys->class); | 323 | x_sys->class); |
@@ -305,8 +330,8 @@ static ssize_t mspro_block_attr_show_sysinfo(struct device *dev, | |||
305 | rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "page size: %x\n", | 330 | rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "page size: %x\n", |
306 | be16_to_cpu(x_sys->page_size)); | 331 | be16_to_cpu(x_sys->page_size)); |
307 | rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "assembly date: " | 332 | rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "assembly date: " |
308 | "%d %04u-%02u-%02u %02u:%02u:%02u\n", | 333 | "GMT%+d:%d %04u-%02u-%02u %02u:%02u:%02u\n", |
309 | x_sys->assembly_date[0], | 334 | date_tz, date_tz_f, |
310 | be16_to_cpu(*(unsigned short *) | 335 | be16_to_cpu(*(unsigned short *) |
311 | &x_sys->assembly_date[1]), | 336 | &x_sys->assembly_date[1]), |
312 | x_sys->assembly_date[3], x_sys->assembly_date[4], | 337 | x_sys->assembly_date[3], x_sys->assembly_date[4], |
@@ -398,6 +423,41 @@ static ssize_t mspro_block_attr_show_mbr(struct device *dev, | |||
398 | return rc; | 423 | return rc; |
399 | } | 424 | } |
400 | 425 | ||
426 | static ssize_t mspro_block_attr_show_specfile(struct device *dev, | ||
427 | struct device_attribute *attr, | ||
428 | char *buffer) | ||
429 | { | ||
430 | struct mspro_sys_attr *x_attr = container_of(attr, | ||
431 | struct mspro_sys_attr, | ||
432 | dev_attr); | ||
433 | struct mspro_specfile *x_spfile = x_attr->data; | ||
434 | char name[9], ext[4]; | ||
435 | ssize_t rc = 0; | ||
436 | |||
437 | memcpy(name, x_spfile->name, 8); | ||
438 | name[8] = 0; | ||
439 | memcpy(ext, x_spfile->ext, 3); | ||
440 | ext[3] = 0; | ||
441 | |||
442 | rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "name: %s\n", name); | ||
443 | rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "ext: %s\n", ext); | ||
444 | rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "attribute: %x\n", | ||
445 | x_spfile->attr); | ||
446 | rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "time: %d:%d:%d\n", | ||
447 | x_spfile->time >> 11, | ||
448 | (x_spfile->time >> 5) & 0x3f, | ||
449 | (x_spfile->time & 0x1f) * 2); | ||
450 | rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "date: %d-%d-%d\n", | ||
451 | (x_spfile->date >> 9) + 1980, | ||
452 | (x_spfile->date >> 5) & 0xf, | ||
453 | x_spfile->date & 0x1f); | ||
454 | rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "start cluster: %x\n", | ||
455 | x_spfile->cluster); | ||
456 | rc += scnprintf(buffer + rc, PAGE_SIZE - rc, "size: %x\n", | ||
457 | x_spfile->size); | ||
458 | return rc; | ||
459 | } | ||
460 | |||
401 | static ssize_t mspro_block_attr_show_devinfo(struct device *dev, | 461 | static ssize_t mspro_block_attr_show_devinfo(struct device *dev, |
402 | struct device_attribute *attr, | 462 | struct device_attribute *attr, |
403 | char *buffer) | 463 | char *buffer) |
@@ -430,6 +490,9 @@ static sysfs_show_t mspro_block_attr_show(unsigned char tag) | |||
430 | return mspro_block_attr_show_modelname; | 490 | return mspro_block_attr_show_modelname; |
431 | case MSPRO_BLOCK_ID_MBR: | 491 | case MSPRO_BLOCK_ID_MBR: |
432 | return mspro_block_attr_show_mbr; | 492 | return mspro_block_attr_show_mbr; |
493 | case MSPRO_BLOCK_ID_SPECFILEVALUES1: | ||
494 | case MSPRO_BLOCK_ID_SPECFILEVALUES2: | ||
495 | return mspro_block_attr_show_specfile; | ||
433 | case MSPRO_BLOCK_ID_DEVINFO: | 496 | case MSPRO_BLOCK_ID_DEVINFO: |
434 | return mspro_block_attr_show_devinfo; | 497 | return mspro_block_attr_show_devinfo; |
435 | default: | 498 | default: |
@@ -629,7 +692,7 @@ static void mspro_block_process_request(struct memstick_dev *card, | |||
629 | param.system = msb->system; | 692 | param.system = msb->system; |
630 | param.data_count = cpu_to_be16(page_count); | 693 | param.data_count = cpu_to_be16(page_count); |
631 | param.data_address = cpu_to_be32((uint32_t)t_sec); | 694 | param.data_address = cpu_to_be32((uint32_t)t_sec); |
632 | param.cmd_param = 0; | 695 | param.tpc_param = 0; |
633 | 696 | ||
634 | msb->data_dir = rq_data_dir(req); | 697 | msb->data_dir = rq_data_dir(req); |
635 | msb->transfer_cmd = msb->data_dir == READ | 698 | msb->transfer_cmd = msb->data_dir == READ |
@@ -758,10 +821,10 @@ static int mspro_block_switch_to_parallel(struct memstick_dev *card) | |||
758 | struct memstick_host *host = card->host; | 821 | struct memstick_host *host = card->host; |
759 | struct mspro_block_data *msb = memstick_get_drvdata(card); | 822 | struct mspro_block_data *msb = memstick_get_drvdata(card); |
760 | struct mspro_param_register param = { | 823 | struct mspro_param_register param = { |
761 | .system = 0, | 824 | .system = MEMSTICK_SYS_PAR4, |
762 | .data_count = 0, | 825 | .data_count = 0, |
763 | .data_address = 0, | 826 | .data_address = 0, |
764 | .cmd_param = 0 | 827 | .tpc_param = 0 |
765 | }; | 828 | }; |
766 | 829 | ||
767 | card->next_request = h_mspro_block_req_init; | 830 | card->next_request = h_mspro_block_req_init; |
@@ -773,8 +836,8 @@ static int mspro_block_switch_to_parallel(struct memstick_dev *card) | |||
773 | if (card->current_mrq.error) | 836 | if (card->current_mrq.error) |
774 | return card->current_mrq.error; | 837 | return card->current_mrq.error; |
775 | 838 | ||
776 | msb->system = 0; | 839 | msb->system = MEMSTICK_SYS_PAR4; |
777 | host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PARALLEL); | 840 | host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4); |
778 | 841 | ||
779 | card->next_request = h_mspro_block_req_init; | 842 | card->next_request = h_mspro_block_req_init; |
780 | msb->mrq_handler = h_mspro_block_default; | 843 | msb->mrq_handler = h_mspro_block_default; |
@@ -783,8 +846,24 @@ static int mspro_block_switch_to_parallel(struct memstick_dev *card) | |||
783 | wait_for_completion(&card->mrq_complete); | 846 | wait_for_completion(&card->mrq_complete); |
784 | 847 | ||
785 | if (card->current_mrq.error) { | 848 | if (card->current_mrq.error) { |
786 | msb->system = 0x80; | 849 | msb->system = MEMSTICK_SYS_SERIAL; |
850 | host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF); | ||
851 | msleep(1000); | ||
852 | host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_ON); | ||
787 | host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_SERIAL); | 853 | host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_SERIAL); |
854 | |||
855 | if (memstick_set_rw_addr(card)) | ||
856 | return card->current_mrq.error; | ||
857 | |||
858 | param.system = msb->system; | ||
859 | |||
860 | card->next_request = h_mspro_block_req_init; | ||
861 | msb->mrq_handler = h_mspro_block_default; | ||
862 | memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG, ¶m, | ||
863 | sizeof(param)); | ||
864 | memstick_new_req(host); | ||
865 | wait_for_completion(&card->mrq_complete); | ||
866 | |||
788 | return -EFAULT; | 867 | return -EFAULT; |
789 | } | 868 | } |
790 | 869 | ||
@@ -802,7 +881,7 @@ static int mspro_block_read_attributes(struct memstick_dev *card) | |||
802 | .system = msb->system, | 881 | .system = msb->system, |
803 | .data_count = cpu_to_be16(1), | 882 | .data_count = cpu_to_be16(1), |
804 | .data_address = 0, | 883 | .data_address = 0, |
805 | .cmd_param = 0 | 884 | .tpc_param = 0 |
806 | }; | 885 | }; |
807 | struct mspro_attribute *attr = NULL; | 886 | struct mspro_attribute *attr = NULL; |
808 | struct mspro_sys_attr *s_attr = NULL; | 887 | struct mspro_sys_attr *s_attr = NULL; |
@@ -922,7 +1001,7 @@ static int mspro_block_read_attributes(struct memstick_dev *card) | |||
922 | param.system = msb->system; | 1001 | param.system = msb->system; |
923 | param.data_count = cpu_to_be16((rc / msb->page_size) + 1); | 1002 | param.data_count = cpu_to_be16((rc / msb->page_size) + 1); |
924 | param.data_address = cpu_to_be32(addr / msb->page_size); | 1003 | param.data_address = cpu_to_be32(addr / msb->page_size); |
925 | param.cmd_param = 0; | 1004 | param.tpc_param = 0; |
926 | 1005 | ||
927 | sg_init_one(&msb->req_sg[0], buffer, | 1006 | sg_init_one(&msb->req_sg[0], buffer, |
928 | be16_to_cpu(param.data_count) * msb->page_size); | 1007 | be16_to_cpu(param.data_count) * msb->page_size); |
@@ -964,7 +1043,7 @@ static int mspro_block_init_card(struct memstick_dev *card) | |||
964 | struct memstick_host *host = card->host; | 1043 | struct memstick_host *host = card->host; |
965 | int rc = 0; | 1044 | int rc = 0; |
966 | 1045 | ||
967 | msb->system = 0x80; | 1046 | msb->system = MEMSTICK_SYS_SERIAL; |
968 | card->reg_addr.r_offset = offsetof(struct mspro_register, status); | 1047 | card->reg_addr.r_offset = offsetof(struct mspro_register, status); |
969 | card->reg_addr.r_length = sizeof(struct ms_status_register); | 1048 | card->reg_addr.r_length = sizeof(struct ms_status_register); |
970 | card->reg_addr.w_offset = offsetof(struct mspro_register, param); | 1049 | card->reg_addr.w_offset = offsetof(struct mspro_register, param); |
@@ -973,7 +1052,7 @@ static int mspro_block_init_card(struct memstick_dev *card) | |||
973 | if (memstick_set_rw_addr(card)) | 1052 | if (memstick_set_rw_addr(card)) |
974 | return -EIO; | 1053 | return -EIO; |
975 | 1054 | ||
976 | if (host->caps & MEMSTICK_CAP_PARALLEL) { | 1055 | if (host->caps & MEMSTICK_CAP_PAR4) { |
977 | if (mspro_block_switch_to_parallel(card)) | 1056 | if (mspro_block_switch_to_parallel(card)) |
978 | printk(KERN_WARNING "%s: could not switch to " | 1057 | printk(KERN_WARNING "%s: could not switch to " |
979 | "parallel interface\n", card->dev.bus_id); | 1058 | "parallel interface\n", card->dev.bus_id); |
@@ -1348,4 +1427,3 @@ MODULE_LICENSE("GPL"); | |||
1348 | MODULE_AUTHOR("Alex Dubov"); | 1427 | MODULE_AUTHOR("Alex Dubov"); |
1349 | MODULE_DESCRIPTION("Sony MemoryStickPro block device driver"); | 1428 | MODULE_DESCRIPTION("Sony MemoryStickPro block device driver"); |
1350 | MODULE_DEVICE_TABLE(memstick, mspro_block_id_tbl); | 1429 | MODULE_DEVICE_TABLE(memstick, mspro_block_id_tbl); |
1351 | MODULE_VERSION(DRIVER_VERSION); | ||
diff --git a/drivers/memstick/host/Kconfig b/drivers/memstick/host/Kconfig index c002fcc3c879..4ce5c8dffb68 100644 --- a/drivers/memstick/host/Kconfig +++ b/drivers/memstick/host/Kconfig | |||
@@ -20,3 +20,13 @@ config MEMSTICK_TIFM_MS | |||
20 | To compile this driver as a module, choose M here: the | 20 | To compile this driver as a module, choose M here: the |
21 | module will be called tifm_ms. | 21 | module will be called tifm_ms. |
22 | 22 | ||
23 | config MEMSTICK_JMICRON_38X | ||
24 | tristate "JMicron JMB38X MemoryStick interface support (EXPERIMENTAL)" | ||
25 | depends on EXPERIMENTAL && PCI | ||
26 | |||
27 | help | ||
28 | Say Y here if you want to be able to access MemoryStick cards with | ||
29 | the JMicron(R) JMB38X MemoryStick card reader. | ||
30 | |||
31 | To compile this driver as a module, choose M here: the | ||
32 | module will be called jmb38x_ms. | ||
diff --git a/drivers/memstick/host/Makefile b/drivers/memstick/host/Makefile index ee666380efa1..12530e4311d3 100644 --- a/drivers/memstick/host/Makefile +++ b/drivers/memstick/host/Makefile | |||
@@ -3,8 +3,8 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | ifeq ($(CONFIG_MEMSTICK_DEBUG),y) | 5 | ifeq ($(CONFIG_MEMSTICK_DEBUG),y) |
6 | EXTRA_CFLAGS += -DDEBUG | 6 | EXTRA_CFLAGS += -DDEBUG |
7 | endif | 7 | endif |
8 | 8 | ||
9 | obj-$(CONFIG_MEMSTICK_TIFM_MS) += tifm_ms.o | 9 | obj-$(CONFIG_MEMSTICK_TIFM_MS) += tifm_ms.o |
10 | 10 | obj-$(CONFIG_MEMSTICK_JMICRON_38X) += jmb38x_ms.o | |
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c new file mode 100644 index 000000000000..03fe8783b1ee --- /dev/null +++ b/drivers/memstick/host/jmb38x_ms.c | |||
@@ -0,0 +1,945 @@ | |||
1 | /* | ||
2 | * jmb38x_ms.c - JMicron jmb38x MemoryStick card reader | ||
3 | * | ||
4 | * Copyright (C) 2008 Alex Dubov <oakad@yahoo.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <linux/spinlock.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/pci.h> | ||
15 | #include <linux/delay.h> | ||
16 | #include <linux/highmem.h> | ||
17 | #include <linux/memstick.h> | ||
18 | |||
19 | #define DRIVER_NAME "jmb38x_ms" | ||
20 | |||
21 | static int no_dma; | ||
22 | module_param(no_dma, bool, 0644); | ||
23 | |||
24 | enum { | ||
25 | DMA_ADDRESS = 0x00, | ||
26 | BLOCK = 0x04, | ||
27 | DMA_CONTROL = 0x08, | ||
28 | TPC_P0 = 0x0c, | ||
29 | TPC_P1 = 0x10, | ||
30 | TPC = 0x14, | ||
31 | HOST_CONTROL = 0x18, | ||
32 | DATA = 0x1c, | ||
33 | STATUS = 0x20, | ||
34 | INT_STATUS = 0x24, | ||
35 | INT_STATUS_ENABLE = 0x28, | ||
36 | INT_SIGNAL_ENABLE = 0x2c, | ||
37 | TIMER = 0x30, | ||
38 | TIMER_CONTROL = 0x34, | ||
39 | PAD_OUTPUT_ENABLE = 0x38, | ||
40 | PAD_PU_PD = 0x3c, | ||
41 | CLOCK_DELAY = 0x40, | ||
42 | ADMA_ADDRESS = 0x44, | ||
43 | CLOCK_CONTROL = 0x48, | ||
44 | LED_CONTROL = 0x4c, | ||
45 | VERSION = 0x50 | ||
46 | }; | ||
47 | |||
48 | struct jmb38x_ms_host { | ||
49 | struct jmb38x_ms *chip; | ||
50 | void __iomem *addr; | ||
51 | spinlock_t lock; | ||
52 | int id; | ||
53 | char host_id[DEVICE_ID_SIZE]; | ||
54 | int irq; | ||
55 | unsigned int block_pos; | ||
56 | unsigned long timeout_jiffies; | ||
57 | struct timer_list timer; | ||
58 | struct memstick_request *req; | ||
59 | unsigned char eject:1, | ||
60 | use_dma:1; | ||
61 | unsigned char cmd_flags; | ||
62 | unsigned char io_pos; | ||
63 | unsigned int io_word[2]; | ||
64 | }; | ||
65 | |||
66 | struct jmb38x_ms { | ||
67 | struct pci_dev *pdev; | ||
68 | int host_cnt; | ||
69 | struct memstick_host *hosts[]; | ||
70 | }; | ||
71 | |||
72 | #define BLOCK_COUNT_MASK 0xffff0000 | ||
73 | #define BLOCK_SIZE_MASK 0x00000fff | ||
74 | |||
75 | #define DMA_CONTROL_ENABLE 0x00000001 | ||
76 | |||
77 | #define TPC_DATA_SEL 0x00008000 | ||
78 | #define TPC_DIR 0x00004000 | ||
79 | #define TPC_WAIT_INT 0x00002000 | ||
80 | #define TPC_GET_INT 0x00000800 | ||
81 | #define TPC_CODE_SZ_MASK 0x00000700 | ||
82 | #define TPC_DATA_SZ_MASK 0x00000007 | ||
83 | |||
84 | #define HOST_CONTROL_RESET_REQ 0x00008000 | ||
85 | #define HOST_CONTROL_REI 0x00004000 | ||
86 | #define HOST_CONTROL_LED 0x00000400 | ||
87 | #define HOST_CONTROL_FAST_CLK 0x00000200 | ||
88 | #define HOST_CONTROL_RESET 0x00000100 | ||
89 | #define HOST_CONTROL_POWER_EN 0x00000080 | ||
90 | #define HOST_CONTROL_CLOCK_EN 0x00000040 | ||
91 | #define HOST_CONTROL_IF_SHIFT 4 | ||
92 | |||
93 | #define HOST_CONTROL_IF_SERIAL 0x0 | ||
94 | #define HOST_CONTROL_IF_PAR4 0x1 | ||
95 | #define HOST_CONTROL_IF_PAR8 0x3 | ||
96 | |||
97 | #define STATUS_HAS_MEDIA 0x00000400 | ||
98 | #define STATUS_FIFO_EMPTY 0x00000200 | ||
99 | #define STATUS_FIFO_FULL 0x00000100 | ||
100 | |||
101 | #define INT_STATUS_TPC_ERR 0x00080000 | ||
102 | #define INT_STATUS_CRC_ERR 0x00040000 | ||
103 | #define INT_STATUS_TIMER_TO 0x00020000 | ||
104 | #define INT_STATUS_HSK_TO 0x00010000 | ||
105 | #define INT_STATUS_ANY_ERR 0x00008000 | ||
106 | #define INT_STATUS_FIFO_WRDY 0x00000080 | ||
107 | #define INT_STATUS_FIFO_RRDY 0x00000040 | ||
108 | #define INT_STATUS_MEDIA_OUT 0x00000010 | ||
109 | #define INT_STATUS_MEDIA_IN 0x00000008 | ||
110 | #define INT_STATUS_DMA_BOUNDARY 0x00000004 | ||
111 | #define INT_STATUS_EOTRAN 0x00000002 | ||
112 | #define INT_STATUS_EOTPC 0x00000001 | ||
113 | |||
114 | #define INT_STATUS_ALL 0x000f801f | ||
115 | |||
116 | #define PAD_OUTPUT_ENABLE_MS 0x0F3F | ||
117 | |||
118 | #define PAD_PU_PD_OFF 0x7FFF0000 | ||
119 | #define PAD_PU_PD_ON_MS_SOCK0 0x5f8f0000 | ||
120 | #define PAD_PU_PD_ON_MS_SOCK1 0x0f0f0000 | ||
121 | |||
122 | enum { | ||
123 | CMD_READY = 0x01, | ||
124 | FIFO_READY = 0x02, | ||
125 | REG_DATA = 0x04, | ||
126 | AUTO_GET_INT = 0x08 | ||
127 | }; | ||
128 | |||
129 | static unsigned int jmb38x_ms_read_data(struct jmb38x_ms_host *host, | ||
130 | unsigned char *buf, unsigned int length) | ||
131 | { | ||
132 | unsigned int off = 0; | ||
133 | |||
134 | while (host->io_pos && length) { | ||
135 | buf[off++] = host->io_word[0] & 0xff; | ||
136 | host->io_word[0] >>= 8; | ||
137 | length--; | ||
138 | host->io_pos--; | ||
139 | } | ||
140 | |||
141 | if (!length) | ||
142 | return off; | ||
143 | |||
144 | while (!(STATUS_FIFO_EMPTY & readl(host->addr + STATUS))) { | ||
145 | if (length < 4) | ||
146 | break; | ||
147 | *(unsigned int *)(buf + off) = __raw_readl(host->addr + DATA); | ||
148 | length -= 4; | ||
149 | off += 4; | ||
150 | } | ||
151 | |||
152 | if (length | ||
153 | && !(STATUS_FIFO_EMPTY & readl(host->addr + STATUS))) { | ||
154 | host->io_word[0] = readl(host->addr + DATA); | ||
155 | for (host->io_pos = 4; host->io_pos; --host->io_pos) { | ||
156 | buf[off++] = host->io_word[0] & 0xff; | ||
157 | host->io_word[0] >>= 8; | ||
158 | length--; | ||
159 | if (!length) | ||
160 | break; | ||
161 | } | ||
162 | } | ||
163 | |||
164 | return off; | ||
165 | } | ||
166 | |||
167 | static unsigned int jmb38x_ms_read_reg_data(struct jmb38x_ms_host *host, | ||
168 | unsigned char *buf, | ||
169 | unsigned int length) | ||
170 | { | ||
171 | unsigned int off = 0; | ||
172 | |||
173 | while (host->io_pos > 4 && length) { | ||
174 | buf[off++] = host->io_word[0] & 0xff; | ||
175 | host->io_word[0] >>= 8; | ||
176 | length--; | ||
177 | host->io_pos--; | ||
178 | } | ||
179 | |||
180 | if (!length) | ||
181 | return off; | ||
182 | |||
183 | while (host->io_pos && length) { | ||
184 | buf[off++] = host->io_word[1] & 0xff; | ||
185 | host->io_word[1] >>= 8; | ||
186 | length--; | ||
187 | host->io_pos--; | ||
188 | } | ||
189 | |||
190 | return off; | ||
191 | } | ||
192 | |||
193 | static unsigned int jmb38x_ms_write_data(struct jmb38x_ms_host *host, | ||
194 | unsigned char *buf, | ||
195 | unsigned int length) | ||
196 | { | ||
197 | unsigned int off = 0; | ||
198 | |||
199 | if (host->io_pos) { | ||
200 | while (host->io_pos < 4 && length) { | ||
201 | host->io_word[0] |= buf[off++] << (host->io_pos * 8); | ||
202 | host->io_pos++; | ||
203 | length--; | ||
204 | } | ||
205 | } | ||
206 | |||
207 | if (host->io_pos == 4 | ||
208 | && !(STATUS_FIFO_FULL & readl(host->addr + STATUS))) { | ||
209 | writel(host->io_word[0], host->addr + DATA); | ||
210 | host->io_pos = 0; | ||
211 | host->io_word[0] = 0; | ||
212 | } else if (host->io_pos) { | ||
213 | return off; | ||
214 | } | ||
215 | |||
216 | if (!length) | ||
217 | return off; | ||
218 | |||
219 | while (!(STATUS_FIFO_FULL & readl(host->addr + STATUS))) { | ||
220 | if (length < 4) | ||
221 | break; | ||
222 | |||
223 | __raw_writel(*(unsigned int *)(buf + off), | ||
224 | host->addr + DATA); | ||
225 | length -= 4; | ||
226 | off += 4; | ||
227 | } | ||
228 | |||
229 | switch (length) { | ||
230 | case 3: | ||
231 | host->io_word[0] |= buf[off + 2] << 16; | ||
232 | host->io_pos++; | ||
233 | case 2: | ||
234 | host->io_word[0] |= buf[off + 1] << 8; | ||
235 | host->io_pos++; | ||
236 | case 1: | ||
237 | host->io_word[0] |= buf[off]; | ||
238 | host->io_pos++; | ||
239 | } | ||
240 | |||
241 | off += host->io_pos; | ||
242 | |||
243 | return off; | ||
244 | } | ||
245 | |||
246 | static unsigned int jmb38x_ms_write_reg_data(struct jmb38x_ms_host *host, | ||
247 | unsigned char *buf, | ||
248 | unsigned int length) | ||
249 | { | ||
250 | unsigned int off = 0; | ||
251 | |||
252 | while (host->io_pos < 4 && length) { | ||
253 | host->io_word[0] &= ~(0xff << (host->io_pos * 8)); | ||
254 | host->io_word[0] |= buf[off++] << (host->io_pos * 8); | ||
255 | host->io_pos++; | ||
256 | length--; | ||
257 | } | ||
258 | |||
259 | if (!length) | ||
260 | return off; | ||
261 | |||
262 | while (host->io_pos < 8 && length) { | ||
263 | host->io_word[1] &= ~(0xff << (host->io_pos * 8)); | ||
264 | host->io_word[1] |= buf[off++] << (host->io_pos * 8); | ||
265 | host->io_pos++; | ||
266 | length--; | ||
267 | } | ||
268 | |||
269 | return off; | ||
270 | } | ||
271 | |||
272 | static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host) | ||
273 | { | ||
274 | unsigned int length; | ||
275 | unsigned int off; | ||
276 | unsigned int t_size, p_off, p_cnt; | ||
277 | unsigned char *buf; | ||
278 | struct page *pg; | ||
279 | unsigned long flags = 0; | ||
280 | |||
281 | if (host->req->long_data) { | ||
282 | length = host->req->sg.length - host->block_pos; | ||
283 | off = host->req->sg.offset + host->block_pos; | ||
284 | } else { | ||
285 | length = host->req->data_len - host->block_pos; | ||
286 | off = 0; | ||
287 | } | ||
288 | |||
289 | while (length) { | ||
290 | if (host->req->long_data) { | ||
291 | pg = nth_page(sg_page(&host->req->sg), | ||
292 | off >> PAGE_SHIFT); | ||
293 | p_off = offset_in_page(off); | ||
294 | p_cnt = PAGE_SIZE - p_off; | ||
295 | p_cnt = min(p_cnt, length); | ||
296 | |||
297 | local_irq_save(flags); | ||
298 | buf = kmap_atomic(pg, KM_BIO_SRC_IRQ) + p_off; | ||
299 | } else { | ||
300 | buf = host->req->data + host->block_pos; | ||
301 | p_cnt = host->req->data_len - host->block_pos; | ||
302 | } | ||
303 | |||
304 | if (host->req->data_dir == WRITE) | ||
305 | t_size = !(host->cmd_flags & REG_DATA) | ||
306 | ? jmb38x_ms_write_data(host, buf, p_cnt) | ||
307 | : jmb38x_ms_write_reg_data(host, buf, p_cnt); | ||
308 | else | ||
309 | t_size = !(host->cmd_flags & REG_DATA) | ||
310 | ? jmb38x_ms_read_data(host, buf, p_cnt) | ||
311 | : jmb38x_ms_read_reg_data(host, buf, p_cnt); | ||
312 | |||
313 | if (host->req->long_data) { | ||
314 | kunmap_atomic(buf - p_off, KM_BIO_SRC_IRQ); | ||
315 | local_irq_restore(flags); | ||
316 | } | ||
317 | |||
318 | if (!t_size) | ||
319 | break; | ||
320 | host->block_pos += t_size; | ||
321 | length -= t_size; | ||
322 | off += t_size; | ||
323 | } | ||
324 | |||
325 | if (!length && host->req->data_dir == WRITE) { | ||
326 | if (host->cmd_flags & REG_DATA) { | ||
327 | writel(host->io_word[0], host->addr + TPC_P0); | ||
328 | writel(host->io_word[1], host->addr + TPC_P1); | ||
329 | } else if (host->io_pos) { | ||
330 | writel(host->io_word[0], host->addr + DATA); | ||
331 | } | ||
332 | } | ||
333 | |||
334 | return length; | ||
335 | } | ||
336 | |||
337 | static int jmb38x_ms_issue_cmd(struct memstick_host *msh) | ||
338 | { | ||
339 | struct jmb38x_ms_host *host = memstick_priv(msh); | ||
340 | unsigned char *data; | ||
341 | unsigned int data_len, cmd, t_val; | ||
342 | |||
343 | if (!(STATUS_HAS_MEDIA & readl(host->addr + STATUS))) { | ||
344 | dev_dbg(msh->cdev.dev, "no media status\n"); | ||
345 | host->req->error = -ETIME; | ||
346 | return host->req->error; | ||
347 | } | ||
348 | |||
349 | dev_dbg(msh->cdev.dev, "control %08x\n", | ||
350 | readl(host->addr + HOST_CONTROL)); | ||
351 | dev_dbg(msh->cdev.dev, "status %08x\n", readl(host->addr + INT_STATUS)); | ||
352 | dev_dbg(msh->cdev.dev, "hstatus %08x\n", readl(host->addr + STATUS)); | ||
353 | |||
354 | host->cmd_flags = 0; | ||
355 | host->block_pos = 0; | ||
356 | host->io_pos = 0; | ||
357 | host->io_word[0] = 0; | ||
358 | host->io_word[1] = 0; | ||
359 | |||
360 | cmd = host->req->tpc << 16; | ||
361 | cmd |= TPC_DATA_SEL; | ||
362 | |||
363 | if (host->req->data_dir == READ) | ||
364 | cmd |= TPC_DIR; | ||
365 | if (host->req->need_card_int) | ||
366 | cmd |= TPC_WAIT_INT; | ||
367 | if (host->req->get_int_reg) | ||
368 | cmd |= TPC_GET_INT; | ||
369 | |||
370 | data = host->req->data; | ||
371 | |||
372 | host->use_dma = !no_dma; | ||
373 | |||
374 | if (host->req->long_data) { | ||
375 | data_len = host->req->sg.length; | ||
376 | } else { | ||
377 | data_len = host->req->data_len; | ||
378 | host->use_dma = 0; | ||
379 | } | ||
380 | |||
381 | if (data_len <= 8) { | ||
382 | cmd &= ~(TPC_DATA_SEL | 0xf); | ||
383 | host->cmd_flags |= REG_DATA; | ||
384 | cmd |= data_len & 0xf; | ||
385 | host->use_dma = 0; | ||
386 | } | ||
387 | |||
388 | if (host->use_dma) { | ||
389 | if (1 != pci_map_sg(host->chip->pdev, &host->req->sg, 1, | ||
390 | host->req->data_dir == READ | ||
391 | ? PCI_DMA_FROMDEVICE | ||
392 | : PCI_DMA_TODEVICE)) { | ||
393 | host->req->error = -ENOMEM; | ||
394 | return host->req->error; | ||
395 | } | ||
396 | data_len = sg_dma_len(&host->req->sg); | ||
397 | writel(sg_dma_address(&host->req->sg), | ||
398 | host->addr + DMA_ADDRESS); | ||
399 | writel(((1 << 16) & BLOCK_COUNT_MASK) | ||
400 | | (data_len & BLOCK_SIZE_MASK), | ||
401 | host->addr + BLOCK); | ||
402 | writel(DMA_CONTROL_ENABLE, host->addr + DMA_CONTROL); | ||
403 | } else if (!(host->cmd_flags & REG_DATA)) { | ||
404 | writel(((1 << 16) & BLOCK_COUNT_MASK) | ||
405 | | (data_len & BLOCK_SIZE_MASK), | ||
406 | host->addr + BLOCK); | ||
407 | t_val = readl(host->addr + INT_STATUS_ENABLE); | ||
408 | t_val |= host->req->data_dir == READ | ||
409 | ? INT_STATUS_FIFO_RRDY | ||
410 | : INT_STATUS_FIFO_WRDY; | ||
411 | |||
412 | writel(t_val, host->addr + INT_STATUS_ENABLE); | ||
413 | writel(t_val, host->addr + INT_SIGNAL_ENABLE); | ||
414 | } else { | ||
415 | cmd &= ~(TPC_DATA_SEL | 0xf); | ||
416 | host->cmd_flags |= REG_DATA; | ||
417 | cmd |= data_len & 0xf; | ||
418 | |||
419 | if (host->req->data_dir == WRITE) { | ||
420 | jmb38x_ms_transfer_data(host); | ||
421 | writel(host->io_word[0], host->addr + TPC_P0); | ||
422 | writel(host->io_word[1], host->addr + TPC_P1); | ||
423 | } | ||
424 | } | ||
425 | |||
426 | mod_timer(&host->timer, jiffies + host->timeout_jiffies); | ||
427 | writel(HOST_CONTROL_LED | readl(host->addr + HOST_CONTROL), | ||
428 | host->addr + HOST_CONTROL); | ||
429 | host->req->error = 0; | ||
430 | |||
431 | writel(cmd, host->addr + TPC); | ||
432 | dev_dbg(msh->cdev.dev, "executing TPC %08x, len %x\n", cmd, data_len); | ||
433 | |||
434 | return 0; | ||
435 | } | ||
436 | |||
437 | static void jmb38x_ms_complete_cmd(struct memstick_host *msh, int last) | ||
438 | { | ||
439 | struct jmb38x_ms_host *host = memstick_priv(msh); | ||
440 | unsigned int t_val = 0; | ||
441 | int rc; | ||
442 | |||
443 | del_timer(&host->timer); | ||
444 | |||
445 | dev_dbg(msh->cdev.dev, "c control %08x\n", | ||
446 | readl(host->addr + HOST_CONTROL)); | ||
447 | dev_dbg(msh->cdev.dev, "c status %08x\n", | ||
448 | readl(host->addr + INT_STATUS)); | ||
449 | dev_dbg(msh->cdev.dev, "c hstatus %08x\n", readl(host->addr + STATUS)); | ||
450 | |||
451 | if (host->req->get_int_reg) { | ||
452 | t_val = readl(host->addr + TPC_P0); | ||
453 | host->req->int_reg = (t_val & 0xff); | ||
454 | } | ||
455 | |||
456 | if (host->use_dma) { | ||
457 | writel(0, host->addr + DMA_CONTROL); | ||
458 | pci_unmap_sg(host->chip->pdev, &host->req->sg, 1, | ||
459 | host->req->data_dir == READ | ||
460 | ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); | ||
461 | } else { | ||
462 | t_val = readl(host->addr + INT_STATUS_ENABLE); | ||
463 | if (host->req->data_dir == READ) | ||
464 | t_val &= ~INT_STATUS_FIFO_RRDY; | ||
465 | else | ||
466 | t_val &= ~INT_STATUS_FIFO_WRDY; | ||
467 | |||
468 | writel(t_val, host->addr + INT_STATUS_ENABLE); | ||
469 | writel(t_val, host->addr + INT_SIGNAL_ENABLE); | ||
470 | } | ||
471 | |||
472 | writel((~HOST_CONTROL_LED) & readl(host->addr + HOST_CONTROL), | ||
473 | host->addr + HOST_CONTROL); | ||
474 | |||
475 | if (!last) { | ||
476 | do { | ||
477 | rc = memstick_next_req(msh, &host->req); | ||
478 | } while (!rc && jmb38x_ms_issue_cmd(msh)); | ||
479 | } else { | ||
480 | do { | ||
481 | rc = memstick_next_req(msh, &host->req); | ||
482 | if (!rc) | ||
483 | host->req->error = -ETIME; | ||
484 | } while (!rc); | ||
485 | } | ||
486 | } | ||
487 | |||
488 | static irqreturn_t jmb38x_ms_isr(int irq, void *dev_id) | ||
489 | { | ||
490 | struct memstick_host *msh = dev_id; | ||
491 | struct jmb38x_ms_host *host = memstick_priv(msh); | ||
492 | unsigned int irq_status; | ||
493 | |||
494 | spin_lock(&host->lock); | ||
495 | irq_status = readl(host->addr + INT_STATUS); | ||
496 | dev_dbg(&host->chip->pdev->dev, "irq_status = %08x\n", irq_status); | ||
497 | if (irq_status == 0 || irq_status == (~0)) { | ||
498 | spin_unlock(&host->lock); | ||
499 | return IRQ_NONE; | ||
500 | } | ||
501 | |||
502 | if (host->req) { | ||
503 | if (irq_status & INT_STATUS_ANY_ERR) { | ||
504 | if (irq_status & INT_STATUS_CRC_ERR) | ||
505 | host->req->error = -EILSEQ; | ||
506 | else | ||
507 | host->req->error = -ETIME; | ||
508 | } else { | ||
509 | if (host->use_dma) { | ||
510 | if (irq_status & INT_STATUS_EOTRAN) | ||
511 | host->cmd_flags |= FIFO_READY; | ||
512 | } else { | ||
513 | if (irq_status & (INT_STATUS_FIFO_RRDY | ||
514 | | INT_STATUS_FIFO_WRDY)) | ||
515 | jmb38x_ms_transfer_data(host); | ||
516 | |||
517 | if (irq_status & INT_STATUS_EOTRAN) { | ||
518 | jmb38x_ms_transfer_data(host); | ||
519 | host->cmd_flags |= FIFO_READY; | ||
520 | } | ||
521 | } | ||
522 | |||
523 | if (irq_status & INT_STATUS_EOTPC) { | ||
524 | host->cmd_flags |= CMD_READY; | ||
525 | if (host->cmd_flags & REG_DATA) { | ||
526 | if (host->req->data_dir == READ) { | ||
527 | host->io_word[0] | ||
528 | = readl(host->addr | ||
529 | + TPC_P0); | ||
530 | host->io_word[1] | ||
531 | = readl(host->addr | ||
532 | + TPC_P1); | ||
533 | host->io_pos = 8; | ||
534 | |||
535 | jmb38x_ms_transfer_data(host); | ||
536 | } | ||
537 | host->cmd_flags |= FIFO_READY; | ||
538 | } | ||
539 | } | ||
540 | } | ||
541 | } | ||
542 | |||
543 | if (irq_status & (INT_STATUS_MEDIA_IN | INT_STATUS_MEDIA_OUT)) { | ||
544 | dev_dbg(&host->chip->pdev->dev, "media changed\n"); | ||
545 | memstick_detect_change(msh); | ||
546 | } | ||
547 | |||
548 | writel(irq_status, host->addr + INT_STATUS); | ||
549 | |||
550 | if (host->req | ||
551 | && (((host->cmd_flags & CMD_READY) | ||
552 | && (host->cmd_flags & FIFO_READY)) | ||
553 | || host->req->error)) | ||
554 | jmb38x_ms_complete_cmd(msh, 0); | ||
555 | |||
556 | spin_unlock(&host->lock); | ||
557 | return IRQ_HANDLED; | ||
558 | } | ||
559 | |||
560 | static void jmb38x_ms_abort(unsigned long data) | ||
561 | { | ||
562 | struct memstick_host *msh = (struct memstick_host *)data; | ||
563 | struct jmb38x_ms_host *host = memstick_priv(msh); | ||
564 | unsigned long flags; | ||
565 | |||
566 | dev_dbg(&host->chip->pdev->dev, "abort\n"); | ||
567 | spin_lock_irqsave(&host->lock, flags); | ||
568 | if (host->req) { | ||
569 | host->req->error = -ETIME; | ||
570 | jmb38x_ms_complete_cmd(msh, 0); | ||
571 | } | ||
572 | spin_unlock_irqrestore(&host->lock, flags); | ||
573 | } | ||
574 | |||
575 | static void jmb38x_ms_request(struct memstick_host *msh) | ||
576 | { | ||
577 | struct jmb38x_ms_host *host = memstick_priv(msh); | ||
578 | unsigned long flags; | ||
579 | int rc; | ||
580 | |||
581 | spin_lock_irqsave(&host->lock, flags); | ||
582 | if (host->req) { | ||
583 | spin_unlock_irqrestore(&host->lock, flags); | ||
584 | BUG(); | ||
585 | return; | ||
586 | } | ||
587 | |||
588 | do { | ||
589 | rc = memstick_next_req(msh, &host->req); | ||
590 | } while (!rc && jmb38x_ms_issue_cmd(msh)); | ||
591 | spin_unlock_irqrestore(&host->lock, flags); | ||
592 | } | ||
593 | |||
594 | static void jmb38x_ms_reset(struct jmb38x_ms_host *host) | ||
595 | { | ||
596 | unsigned int host_ctl = readl(host->addr + HOST_CONTROL); | ||
597 | |||
598 | writel(host_ctl | HOST_CONTROL_RESET_REQ | HOST_CONTROL_RESET, | ||
599 | host->addr + HOST_CONTROL); | ||
600 | |||
601 | while (HOST_CONTROL_RESET_REQ | ||
602 | & (host_ctl = readl(host->addr + HOST_CONTROL))) { | ||
603 | ndelay(100); | ||
604 | dev_dbg(&host->chip->pdev->dev, "reset\n"); | ||
605 | } | ||
606 | |||
607 | writel(INT_STATUS_ALL, host->addr + INT_STATUS_ENABLE); | ||
608 | writel(INT_STATUS_ALL, host->addr + INT_SIGNAL_ENABLE); | ||
609 | |||
610 | dev_dbg(&host->chip->pdev->dev, "reset\n"); | ||
611 | } | ||
612 | |||
613 | static void jmb38x_ms_set_param(struct memstick_host *msh, | ||
614 | enum memstick_param param, | ||
615 | int value) | ||
616 | { | ||
617 | struct jmb38x_ms_host *host = memstick_priv(msh); | ||
618 | unsigned int host_ctl; | ||
619 | unsigned long flags; | ||
620 | |||
621 | spin_lock_irqsave(&host->lock, flags); | ||
622 | |||
623 | switch (param) { | ||
624 | case MEMSTICK_POWER: | ||
625 | if (value == MEMSTICK_POWER_ON) { | ||
626 | jmb38x_ms_reset(host); | ||
627 | |||
628 | writel(host->id ? PAD_PU_PD_ON_MS_SOCK1 | ||
629 | : PAD_PU_PD_ON_MS_SOCK0, | ||
630 | host->addr + PAD_PU_PD); | ||
631 | |||
632 | writel(PAD_OUTPUT_ENABLE_MS, | ||
633 | host->addr + PAD_OUTPUT_ENABLE); | ||
634 | |||
635 | host_ctl = readl(host->addr + HOST_CONTROL); | ||
636 | host_ctl |= 7; | ||
637 | writel(host_ctl | (HOST_CONTROL_POWER_EN | ||
638 | | HOST_CONTROL_CLOCK_EN), | ||
639 | host->addr + HOST_CONTROL); | ||
640 | |||
641 | dev_dbg(&host->chip->pdev->dev, "power on\n"); | ||
642 | } else if (value == MEMSTICK_POWER_OFF) { | ||
643 | writel(readl(host->addr + HOST_CONTROL) | ||
644 | & ~(HOST_CONTROL_POWER_EN | ||
645 | | HOST_CONTROL_CLOCK_EN), | ||
646 | host->addr + HOST_CONTROL); | ||
647 | writel(0, host->addr + PAD_OUTPUT_ENABLE); | ||
648 | writel(PAD_PU_PD_OFF, host->addr + PAD_PU_PD); | ||
649 | dev_dbg(&host->chip->pdev->dev, "power off\n"); | ||
650 | } | ||
651 | break; | ||
652 | case MEMSTICK_INTERFACE: | ||
653 | /* jmb38x_ms_reset(host); */ | ||
654 | |||
655 | host_ctl = readl(host->addr + HOST_CONTROL); | ||
656 | host_ctl &= ~(3 << HOST_CONTROL_IF_SHIFT); | ||
657 | /* host_ctl |= 7; */ | ||
658 | |||
659 | if (value == MEMSTICK_SERIAL) { | ||
660 | host_ctl &= ~HOST_CONTROL_FAST_CLK; | ||
661 | host_ctl |= HOST_CONTROL_IF_SERIAL | ||
662 | << HOST_CONTROL_IF_SHIFT; | ||
663 | host_ctl |= HOST_CONTROL_REI; | ||
664 | writel(0, host->addr + CLOCK_DELAY); | ||
665 | } else if (value == MEMSTICK_PAR4) { | ||
666 | host_ctl |= HOST_CONTROL_FAST_CLK; | ||
667 | host_ctl |= HOST_CONTROL_IF_PAR4 | ||
668 | << HOST_CONTROL_IF_SHIFT; | ||
669 | host_ctl &= ~HOST_CONTROL_REI; | ||
670 | writel(4, host->addr + CLOCK_DELAY); | ||
671 | } else if (value == MEMSTICK_PAR8) { | ||
672 | host_ctl |= HOST_CONTROL_FAST_CLK; | ||
673 | host_ctl |= HOST_CONTROL_IF_PAR8 | ||
674 | << HOST_CONTROL_IF_SHIFT; | ||
675 | host_ctl &= ~HOST_CONTROL_REI; | ||
676 | writel(4, host->addr + CLOCK_DELAY); | ||
677 | } | ||
678 | writel(host_ctl, host->addr + HOST_CONTROL); | ||
679 | break; | ||
680 | }; | ||
681 | |||
682 | spin_unlock_irqrestore(&host->lock, flags); | ||
683 | } | ||
684 | |||
685 | #ifdef CONFIG_PM | ||
686 | |||
687 | static int jmb38x_ms_suspend(struct pci_dev *dev, pm_message_t state) | ||
688 | { | ||
689 | struct jmb38x_ms *jm = pci_get_drvdata(dev); | ||
690 | int cnt; | ||
691 | |||
692 | for (cnt = 0; cnt < jm->host_cnt; ++cnt) { | ||
693 | if (!jm->hosts[cnt]) | ||
694 | break; | ||
695 | memstick_suspend_host(jm->hosts[cnt]); | ||
696 | } | ||
697 | |||
698 | pci_save_state(dev); | ||
699 | pci_enable_wake(dev, pci_choose_state(dev, state), 0); | ||
700 | pci_disable_device(dev); | ||
701 | pci_set_power_state(dev, pci_choose_state(dev, state)); | ||
702 | return 0; | ||
703 | } | ||
704 | |||
705 | static int jmb38x_ms_resume(struct pci_dev *dev) | ||
706 | { | ||
707 | struct jmb38x_ms *jm = pci_get_drvdata(dev); | ||
708 | int rc; | ||
709 | |||
710 | pci_set_power_state(dev, PCI_D0); | ||
711 | pci_restore_state(dev); | ||
712 | rc = pci_enable_device(dev); | ||
713 | if (rc) | ||
714 | return rc; | ||
715 | pci_set_master(dev); | ||
716 | |||
717 | pci_read_config_dword(dev, 0xac, &rc); | ||
718 | pci_write_config_dword(dev, 0xac, rc | 0x00470000); | ||
719 | |||
720 | for (rc = 0; rc < jm->host_cnt; ++rc) { | ||
721 | if (!jm->hosts[rc]) | ||
722 | break; | ||
723 | memstick_resume_host(jm->hosts[rc]); | ||
724 | memstick_detect_change(jm->hosts[rc]); | ||
725 | } | ||
726 | |||
727 | return 0; | ||
728 | } | ||
729 | |||
730 | #else | ||
731 | |||
732 | #define jmb38x_ms_suspend NULL | ||
733 | #define jmb38x_ms_resume NULL | ||
734 | |||
735 | #endif /* CONFIG_PM */ | ||
736 | |||
737 | static int jmb38x_ms_count_slots(struct pci_dev *pdev) | ||
738 | { | ||
739 | int cnt, rc = 0; | ||
740 | |||
741 | for (cnt = 0; cnt < PCI_ROM_RESOURCE; ++cnt) { | ||
742 | if (!(IORESOURCE_MEM & pci_resource_flags(pdev, cnt))) | ||
743 | break; | ||
744 | |||
745 | if (256 != pci_resource_len(pdev, cnt)) | ||
746 | break; | ||
747 | |||
748 | ++rc; | ||
749 | } | ||
750 | return rc; | ||
751 | } | ||
752 | |||
753 | static struct memstick_host *jmb38x_ms_alloc_host(struct jmb38x_ms *jm, int cnt) | ||
754 | { | ||
755 | struct memstick_host *msh; | ||
756 | struct jmb38x_ms_host *host; | ||
757 | |||
758 | msh = memstick_alloc_host(sizeof(struct jmb38x_ms_host), | ||
759 | &jm->pdev->dev); | ||
760 | if (!msh) | ||
761 | return NULL; | ||
762 | |||
763 | host = memstick_priv(msh); | ||
764 | host->chip = jm; | ||
765 | host->addr = ioremap(pci_resource_start(jm->pdev, cnt), | ||
766 | pci_resource_len(jm->pdev, cnt)); | ||
767 | if (!host->addr) | ||
768 | goto err_out_free; | ||
769 | |||
770 | spin_lock_init(&host->lock); | ||
771 | host->id = cnt; | ||
772 | snprintf(host->host_id, DEVICE_ID_SIZE, DRIVER_NAME ":slot%d", | ||
773 | host->id); | ||
774 | host->irq = jm->pdev->irq; | ||
775 | host->timeout_jiffies = msecs_to_jiffies(4000); | ||
776 | msh->request = jmb38x_ms_request; | ||
777 | msh->set_param = jmb38x_ms_set_param; | ||
778 | /* | ||
779 | msh->caps = MEMSTICK_CAP_AUTO_GET_INT | MEMSTICK_CAP_PAR4 | ||
780 | | MEMSTICK_CAP_PAR8; | ||
781 | */ | ||
782 | msh->caps = MEMSTICK_CAP_PAR4 | MEMSTICK_CAP_PAR8; | ||
783 | |||
784 | setup_timer(&host->timer, jmb38x_ms_abort, (unsigned long)msh); | ||
785 | |||
786 | if (!request_irq(host->irq, jmb38x_ms_isr, IRQF_SHARED, host->host_id, | ||
787 | msh)) | ||
788 | return msh; | ||
789 | |||
790 | iounmap(host->addr); | ||
791 | err_out_free: | ||
792 | kfree(msh); | ||
793 | return NULL; | ||
794 | } | ||
795 | |||
796 | static void jmb38x_ms_free_host(struct memstick_host *msh) | ||
797 | { | ||
798 | struct jmb38x_ms_host *host = memstick_priv(msh); | ||
799 | |||
800 | free_irq(host->irq, msh); | ||
801 | iounmap(host->addr); | ||
802 | memstick_free_host(msh); | ||
803 | } | ||
804 | |||
805 | static int jmb38x_ms_probe(struct pci_dev *pdev, | ||
806 | const struct pci_device_id *dev_id) | ||
807 | { | ||
808 | struct jmb38x_ms *jm; | ||
809 | int pci_dev_busy = 0; | ||
810 | int rc, cnt; | ||
811 | |||
812 | rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | ||
813 | if (rc) | ||
814 | return rc; | ||
815 | |||
816 | rc = pci_enable_device(pdev); | ||
817 | if (rc) | ||
818 | return rc; | ||
819 | |||
820 | pci_set_master(pdev); | ||
821 | |||
822 | rc = pci_request_regions(pdev, DRIVER_NAME); | ||
823 | if (rc) { | ||
824 | pci_dev_busy = 1; | ||
825 | goto err_out; | ||
826 | } | ||
827 | |||
828 | pci_read_config_dword(pdev, 0xac, &rc); | ||
829 | pci_write_config_dword(pdev, 0xac, rc | 0x00470000); | ||
830 | |||
831 | cnt = jmb38x_ms_count_slots(pdev); | ||
832 | if (!cnt) { | ||
833 | rc = -ENODEV; | ||
834 | pci_dev_busy = 1; | ||
835 | goto err_out; | ||
836 | } | ||
837 | |||
838 | jm = kzalloc(sizeof(struct jmb38x_ms) | ||
839 | + cnt * sizeof(struct memstick_host *), GFP_KERNEL); | ||
840 | if (!jm) { | ||
841 | rc = -ENOMEM; | ||
842 | goto err_out_int; | ||
843 | } | ||
844 | |||
845 | jm->pdev = pdev; | ||
846 | jm->host_cnt = cnt; | ||
847 | pci_set_drvdata(pdev, jm); | ||
848 | |||
849 | for (cnt = 0; cnt < jm->host_cnt; ++cnt) { | ||
850 | jm->hosts[cnt] = jmb38x_ms_alloc_host(jm, cnt); | ||
851 | if (!jm->hosts[cnt]) | ||
852 | break; | ||
853 | |||
854 | rc = memstick_add_host(jm->hosts[cnt]); | ||
855 | |||
856 | if (rc) { | ||
857 | jmb38x_ms_free_host(jm->hosts[cnt]); | ||
858 | jm->hosts[cnt] = NULL; | ||
859 | break; | ||
860 | } | ||
861 | } | ||
862 | |||
863 | if (cnt) | ||
864 | return 0; | ||
865 | |||
866 | rc = -ENODEV; | ||
867 | |||
868 | pci_set_drvdata(pdev, NULL); | ||
869 | kfree(jm); | ||
870 | err_out_int: | ||
871 | pci_release_regions(pdev); | ||
872 | err_out: | ||
873 | if (!pci_dev_busy) | ||
874 | pci_disable_device(pdev); | ||
875 | return rc; | ||
876 | } | ||
877 | |||
878 | static void jmb38x_ms_remove(struct pci_dev *dev) | ||
879 | { | ||
880 | struct jmb38x_ms *jm = pci_get_drvdata(dev); | ||
881 | struct jmb38x_ms_host *host; | ||
882 | int cnt; | ||
883 | unsigned long flags; | ||
884 | |||
885 | for (cnt = 0; cnt < jm->host_cnt; ++cnt) { | ||
886 | if (!jm->hosts[cnt]) | ||
887 | break; | ||
888 | |||
889 | host = memstick_priv(jm->hosts[cnt]); | ||
890 | |||
891 | writel(0, host->addr + INT_SIGNAL_ENABLE); | ||
892 | writel(0, host->addr + INT_STATUS_ENABLE); | ||
893 | mmiowb(); | ||
894 | dev_dbg(&jm->pdev->dev, "interrupts off\n"); | ||
895 | spin_lock_irqsave(&host->lock, flags); | ||
896 | if (host->req) { | ||
897 | host->req->error = -ETIME; | ||
898 | jmb38x_ms_complete_cmd(jm->hosts[cnt], 1); | ||
899 | } | ||
900 | spin_unlock_irqrestore(&host->lock, flags); | ||
901 | |||
902 | memstick_remove_host(jm->hosts[cnt]); | ||
903 | dev_dbg(&jm->pdev->dev, "host removed\n"); | ||
904 | |||
905 | jmb38x_ms_free_host(jm->hosts[cnt]); | ||
906 | } | ||
907 | |||
908 | pci_set_drvdata(dev, NULL); | ||
909 | pci_release_regions(dev); | ||
910 | pci_disable_device(dev); | ||
911 | kfree(jm); | ||
912 | } | ||
913 | |||
914 | static struct pci_device_id jmb38x_ms_id_tbl [] = { | ||
915 | { PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_MS, PCI_ANY_ID, | ||
916 | PCI_ANY_ID, 0, 0, 0 }, | ||
917 | { } | ||
918 | }; | ||
919 | |||
920 | static struct pci_driver jmb38x_ms_driver = { | ||
921 | .name = DRIVER_NAME, | ||
922 | .id_table = jmb38x_ms_id_tbl, | ||
923 | .probe = jmb38x_ms_probe, | ||
924 | .remove = jmb38x_ms_remove, | ||
925 | .suspend = jmb38x_ms_suspend, | ||
926 | .resume = jmb38x_ms_resume | ||
927 | }; | ||
928 | |||
929 | static int __init jmb38x_ms_init(void) | ||
930 | { | ||
931 | return pci_register_driver(&jmb38x_ms_driver); | ||
932 | } | ||
933 | |||
934 | static void __exit jmb38x_ms_exit(void) | ||
935 | { | ||
936 | pci_unregister_driver(&jmb38x_ms_driver); | ||
937 | } | ||
938 | |||
939 | MODULE_AUTHOR("Alex Dubov"); | ||
940 | MODULE_DESCRIPTION("JMicron jmb38x MemoryStick driver"); | ||
941 | MODULE_LICENSE("GPL"); | ||
942 | MODULE_DEVICE_TABLE(pci, jmb38x_ms_id_tbl); | ||
943 | |||
944 | module_init(jmb38x_ms_init); | ||
945 | module_exit(jmb38x_ms_exit); | ||
diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c index 4fb24215bd95..2b5bf52a8302 100644 --- a/drivers/memstick/host/tifm_ms.c +++ b/drivers/memstick/host/tifm_ms.c | |||
@@ -20,293 +20,315 @@ | |||
20 | #include <asm/io.h> | 20 | #include <asm/io.h> |
21 | 21 | ||
22 | #define DRIVER_NAME "tifm_ms" | 22 | #define DRIVER_NAME "tifm_ms" |
23 | #define DRIVER_VERSION "0.1" | ||
24 | 23 | ||
25 | static int no_dma; | 24 | static int no_dma; |
26 | module_param(no_dma, bool, 0644); | 25 | module_param(no_dma, bool, 0644); |
27 | 26 | ||
28 | #define TIFM_MS_TIMEOUT 0x00100 | 27 | /* |
29 | #define TIFM_MS_BADCRC 0x00200 | 28 | * Some control bits of TIFM appear to conform to Sony's reference design, |
30 | #define TIFM_MS_EOTPC 0x01000 | 29 | * so I'm just assuming they all are. |
31 | #define TIFM_MS_INT 0x02000 | 30 | */ |
32 | |||
33 | /* The meaning of the bit majority in this constant is unknown. */ | ||
34 | #define TIFM_MS_SERIAL 0x04010 | ||
35 | 31 | ||
36 | #define TIFM_MS_SYS_LATCH 0x00100 | 32 | #define TIFM_MS_STAT_DRQ 0x04000 |
37 | #define TIFM_MS_SYS_NOT_RDY 0x00800 | 33 | #define TIFM_MS_STAT_MSINT 0x02000 |
38 | #define TIFM_MS_SYS_DATA 0x10000 | 34 | #define TIFM_MS_STAT_RDY 0x01000 |
35 | #define TIFM_MS_STAT_CRC 0x00200 | ||
36 | #define TIFM_MS_STAT_TOE 0x00100 | ||
37 | #define TIFM_MS_STAT_EMP 0x00020 | ||
38 | #define TIFM_MS_STAT_FUL 0x00010 | ||
39 | #define TIFM_MS_STAT_CED 0x00008 | ||
40 | #define TIFM_MS_STAT_ERR 0x00004 | ||
41 | #define TIFM_MS_STAT_BRQ 0x00002 | ||
42 | #define TIFM_MS_STAT_CNK 0x00001 | ||
43 | |||
44 | #define TIFM_MS_SYS_DMA 0x10000 | ||
45 | #define TIFM_MS_SYS_RESET 0x08000 | ||
46 | #define TIFM_MS_SYS_SRAC 0x04000 | ||
47 | #define TIFM_MS_SYS_INTEN 0x02000 | ||
48 | #define TIFM_MS_SYS_NOCRC 0x01000 | ||
49 | #define TIFM_MS_SYS_INTCLR 0x00800 | ||
50 | #define TIFM_MS_SYS_MSIEN 0x00400 | ||
51 | #define TIFM_MS_SYS_FCLR 0x00200 | ||
52 | #define TIFM_MS_SYS_FDIR 0x00100 | ||
53 | #define TIFM_MS_SYS_DAM 0x00080 | ||
54 | #define TIFM_MS_SYS_DRM 0x00040 | ||
55 | #define TIFM_MS_SYS_DRQSL 0x00020 | ||
56 | #define TIFM_MS_SYS_REI 0x00010 | ||
57 | #define TIFM_MS_SYS_REO 0x00008 | ||
58 | #define TIFM_MS_SYS_BSY_MASK 0x00007 | ||
59 | |||
60 | #define TIFM_MS_SYS_FIFO (TIFM_MS_SYS_INTEN | TIFM_MS_SYS_MSIEN \ | ||
61 | | TIFM_MS_SYS_FCLR | TIFM_MS_SYS_BSY_MASK) | ||
39 | 62 | ||
40 | /* Hardware flags */ | 63 | /* Hardware flags */ |
41 | enum { | 64 | enum { |
42 | CMD_READY = 0x0001, | 65 | CMD_READY = 0x01, |
43 | FIFO_READY = 0x0002, | 66 | FIFO_READY = 0x02, |
44 | CARD_READY = 0x0004, | 67 | CARD_INT = 0x04 |
45 | DATA_CARRY = 0x0008 | ||
46 | }; | 68 | }; |
47 | 69 | ||
48 | struct tifm_ms { | 70 | struct tifm_ms { |
49 | struct tifm_dev *dev; | 71 | struct tifm_dev *dev; |
50 | unsigned short eject:1, | 72 | struct timer_list timer; |
51 | no_dma:1; | 73 | struct memstick_request *req; |
52 | unsigned short cmd_flags; | ||
53 | unsigned int mode_mask; | 74 | unsigned int mode_mask; |
54 | unsigned int block_pos; | 75 | unsigned int block_pos; |
55 | unsigned long timeout_jiffies; | 76 | unsigned long timeout_jiffies; |
56 | 77 | unsigned char eject:1, | |
57 | struct timer_list timer; | 78 | use_dma:1; |
58 | struct memstick_request *req; | 79 | unsigned char cmd_flags; |
80 | unsigned char io_pos; | ||
59 | unsigned int io_word; | 81 | unsigned int io_word; |
60 | }; | 82 | }; |
61 | 83 | ||
62 | static void tifm_ms_read_fifo(struct tifm_ms *host, unsigned int fifo_offset, | 84 | static unsigned int tifm_ms_read_data(struct tifm_ms *host, |
63 | struct page *pg, unsigned int page_off, | 85 | unsigned char *buf, unsigned int length) |
64 | unsigned int length) | ||
65 | { | 86 | { |
66 | struct tifm_dev *sock = host->dev; | 87 | struct tifm_dev *sock = host->dev; |
67 | unsigned int cnt = 0, off = 0; | 88 | unsigned int off = 0; |
68 | unsigned char *buf = kmap_atomic(pg, KM_BIO_DST_IRQ) + page_off; | 89 | |
90 | while (host->io_pos && length) { | ||
91 | buf[off++] = host->io_word & 0xff; | ||
92 | host->io_word >>= 8; | ||
93 | length--; | ||
94 | host->io_pos--; | ||
95 | } | ||
69 | 96 | ||
70 | if (host->cmd_flags & DATA_CARRY) { | 97 | if (!length) |
71 | while ((fifo_offset & 3) && length) { | 98 | return off; |
99 | |||
100 | while (!(TIFM_MS_STAT_EMP & readl(sock->addr + SOCK_MS_STATUS))) { | ||
101 | if (length < 4) | ||
102 | break; | ||
103 | *(unsigned int *)(buf + off) = __raw_readl(sock->addr | ||
104 | + SOCK_MS_DATA); | ||
105 | length -= 4; | ||
106 | off += 4; | ||
107 | } | ||
108 | |||
109 | if (length | ||
110 | && !(TIFM_MS_STAT_EMP & readl(sock->addr + SOCK_MS_STATUS))) { | ||
111 | host->io_word = readl(sock->addr + SOCK_MS_DATA); | ||
112 | for (host->io_pos = 4; host->io_pos; --host->io_pos) { | ||
72 | buf[off++] = host->io_word & 0xff; | 113 | buf[off++] = host->io_word & 0xff; |
73 | host->io_word >>= 8; | 114 | host->io_word >>= 8; |
74 | length--; | 115 | length--; |
75 | fifo_offset++; | 116 | if (!length) |
117 | break; | ||
76 | } | 118 | } |
77 | if (!(fifo_offset & 3)) | ||
78 | host->cmd_flags &= ~DATA_CARRY; | ||
79 | if (!length) | ||
80 | return; | ||
81 | } | 119 | } |
82 | 120 | ||
83 | do { | 121 | return off; |
84 | host->io_word = readl(sock->addr + SOCK_FIFO_ACCESS | ||
85 | + fifo_offset); | ||
86 | cnt = 4; | ||
87 | while (length && cnt) { | ||
88 | buf[off++] = (host->io_word >> 8) & 0xff; | ||
89 | cnt--; | ||
90 | length--; | ||
91 | } | ||
92 | fifo_offset += 4 - cnt; | ||
93 | } while (length); | ||
94 | |||
95 | if (cnt) | ||
96 | host->cmd_flags |= DATA_CARRY; | ||
97 | |||
98 | kunmap_atomic(buf - page_off, KM_BIO_DST_IRQ); | ||
99 | } | 122 | } |
100 | 123 | ||
101 | static void tifm_ms_write_fifo(struct tifm_ms *host, unsigned int fifo_offset, | 124 | static unsigned int tifm_ms_write_data(struct tifm_ms *host, |
102 | struct page *pg, unsigned int page_off, | 125 | unsigned char *buf, unsigned int length) |
103 | unsigned int length) | ||
104 | { | 126 | { |
105 | struct tifm_dev *sock = host->dev; | 127 | struct tifm_dev *sock = host->dev; |
106 | unsigned int cnt = 0, off = 0; | 128 | unsigned int off = 0; |
107 | unsigned char *buf = kmap_atomic(pg, KM_BIO_SRC_IRQ) + page_off; | ||
108 | 129 | ||
109 | if (host->cmd_flags & DATA_CARRY) { | 130 | if (host->io_pos) { |
110 | while (fifo_offset & 3) { | 131 | while (host->io_pos < 4 && length) { |
111 | host->io_word |= buf[off++] << (8 * (fifo_offset & 3)); | 132 | host->io_word |= buf[off++] << (host->io_pos * 8); |
133 | host->io_pos++; | ||
112 | length--; | 134 | length--; |
113 | fifo_offset++; | ||
114 | } | 135 | } |
115 | if (!(fifo_offset & 3)) { | ||
116 | writel(host->io_word, sock->addr + SOCK_FIFO_ACCESS | ||
117 | + fifo_offset - 4); | ||
118 | |||
119 | host->cmd_flags &= ~DATA_CARRY; | ||
120 | } | ||
121 | if (!length) | ||
122 | return; | ||
123 | } | 136 | } |
124 | 137 | ||
125 | do { | 138 | if (host->io_pos == 4 |
126 | cnt = 4; | 139 | && !(TIFM_MS_STAT_FUL & readl(sock->addr + SOCK_MS_STATUS))) { |
140 | writel(TIFM_MS_SYS_FDIR | readl(sock->addr + SOCK_MS_SYSTEM), | ||
141 | sock->addr + SOCK_MS_SYSTEM); | ||
142 | writel(host->io_word, sock->addr + SOCK_MS_DATA); | ||
143 | host->io_pos = 0; | ||
127 | host->io_word = 0; | 144 | host->io_word = 0; |
128 | while (length && cnt) { | 145 | } else if (host->io_pos) { |
129 | host->io_word |= buf[off++] << (4 - cnt); | 146 | return off; |
130 | cnt--; | 147 | } |
131 | length--; | ||
132 | } | ||
133 | fifo_offset += 4 - cnt; | ||
134 | if (!cnt) | ||
135 | writel(host->io_word, sock->addr + SOCK_FIFO_ACCESS | ||
136 | + fifo_offset - 4); | ||
137 | |||
138 | } while (length); | ||
139 | |||
140 | if (cnt) | ||
141 | host->cmd_flags |= DATA_CARRY; | ||
142 | 148 | ||
143 | kunmap_atomic(buf - page_off, KM_BIO_SRC_IRQ); | 149 | if (!length) |
144 | } | 150 | return off; |
145 | 151 | ||
146 | static void tifm_ms_move_block(struct tifm_ms *host, unsigned int length) | 152 | while (!(TIFM_MS_STAT_FUL & readl(sock->addr + SOCK_MS_STATUS))) { |
147 | { | 153 | if (length < 4) |
148 | unsigned int t_size; | 154 | break; |
149 | unsigned int off = host->req->sg.offset + host->block_pos; | 155 | writel(TIFM_MS_SYS_FDIR | readl(sock->addr + SOCK_MS_SYSTEM), |
150 | unsigned int p_off, p_cnt; | 156 | sock->addr + SOCK_MS_SYSTEM); |
151 | struct page *pg; | 157 | __raw_writel(*(unsigned int *)(buf + off), |
152 | unsigned long flags; | 158 | sock->addr + SOCK_MS_DATA); |
159 | length -= 4; | ||
160 | off += 4; | ||
161 | } | ||
153 | 162 | ||
154 | dev_dbg(&host->dev->dev, "moving block\n"); | 163 | switch (length) { |
155 | local_irq_save(flags); | 164 | case 3: |
156 | t_size = length; | 165 | host->io_word |= buf[off + 2] << 16; |
157 | while (t_size) { | 166 | host->io_pos++; |
158 | pg = nth_page(sg_page(&host->req->sg), off >> PAGE_SHIFT); | 167 | case 2: |
159 | p_off = offset_in_page(off); | 168 | host->io_word |= buf[off + 1] << 8; |
160 | p_cnt = PAGE_SIZE - p_off; | 169 | host->io_pos++; |
161 | p_cnt = min(p_cnt, t_size); | 170 | case 1: |
171 | host->io_word |= buf[off]; | ||
172 | host->io_pos++; | ||
173 | } | ||
162 | 174 | ||
163 | if (host->req->data_dir == WRITE) | 175 | off += host->io_pos; |
164 | tifm_ms_write_fifo(host, length - t_size, | ||
165 | pg, p_off, p_cnt); | ||
166 | else | ||
167 | tifm_ms_read_fifo(host, length - t_size, | ||
168 | pg, p_off, p_cnt); | ||
169 | 176 | ||
170 | t_size -= p_cnt; | 177 | return off; |
171 | } | ||
172 | local_irq_restore(flags); | ||
173 | } | 178 | } |
174 | 179 | ||
175 | static int tifm_ms_transfer_data(struct tifm_ms *host, int skip) | 180 | static unsigned int tifm_ms_transfer_data(struct tifm_ms *host) |
176 | { | 181 | { |
177 | struct tifm_dev *sock = host->dev; | 182 | struct tifm_dev *sock = host->dev; |
178 | unsigned int length = host->req->sg.length - host->block_pos; | 183 | unsigned int length; |
184 | unsigned int off; | ||
185 | unsigned int t_size, p_off, p_cnt; | ||
186 | unsigned char *buf; | ||
187 | struct page *pg; | ||
188 | unsigned long flags = 0; | ||
189 | |||
190 | if (host->req->long_data) { | ||
191 | length = host->req->sg.length - host->block_pos; | ||
192 | off = host->req->sg.offset + host->block_pos; | ||
193 | } else { | ||
194 | length = host->req->data_len - host->block_pos; | ||
195 | off = 0; | ||
196 | } | ||
197 | dev_dbg(&sock->dev, "fifo data transfer, %d, %d\n", length, | ||
198 | host->block_pos); | ||
199 | |||
200 | while (length) { | ||
201 | if (host->req->long_data) { | ||
202 | pg = nth_page(sg_page(&host->req->sg), | ||
203 | off >> PAGE_SHIFT); | ||
204 | p_off = offset_in_page(off); | ||
205 | p_cnt = PAGE_SIZE - p_off; | ||
206 | p_cnt = min(p_cnt, length); | ||
207 | |||
208 | local_irq_save(flags); | ||
209 | buf = kmap_atomic(pg, KM_BIO_SRC_IRQ) + p_off; | ||
210 | } else { | ||
211 | buf = host->req->data + host->block_pos; | ||
212 | p_cnt = host->req->data_len - host->block_pos; | ||
213 | } | ||
179 | 214 | ||
180 | if (!length) | 215 | t_size = host->req->data_dir == WRITE |
181 | return 1; | 216 | ? tifm_ms_write_data(host, buf, p_cnt) |
217 | : tifm_ms_read_data(host, buf, p_cnt); | ||
182 | 218 | ||
183 | if (length > TIFM_FIFO_SIZE) | 219 | if (host->req->long_data) { |
184 | length = TIFM_FIFO_SIZE; | 220 | kunmap_atomic(buf - p_off, KM_BIO_SRC_IRQ); |
221 | local_irq_restore(flags); | ||
222 | } | ||
185 | 223 | ||
186 | if (!skip) { | 224 | if (!t_size) |
187 | tifm_ms_move_block(host, length); | 225 | break; |
188 | host->block_pos += length; | 226 | host->block_pos += t_size; |
227 | length -= t_size; | ||
228 | off += t_size; | ||
189 | } | 229 | } |
190 | 230 | ||
191 | if ((host->req->data_dir == READ) | 231 | dev_dbg(&sock->dev, "fifo data transfer, %d remaining\n", length); |
192 | && (host->block_pos == host->req->sg.length)) | 232 | if (!length && (host->req->data_dir == WRITE)) { |
193 | return 1; | 233 | if (host->io_pos) { |
194 | 234 | writel(TIFM_MS_SYS_FDIR | |
195 | writel(ilog2(length) - 2, sock->addr + SOCK_FIFO_PAGE_SIZE); | 235 | | readl(sock->addr + SOCK_MS_SYSTEM), |
196 | if (host->req->data_dir == WRITE) | 236 | sock->addr + SOCK_MS_SYSTEM); |
197 | writel((1 << 8) | TIFM_DMA_TX, sock->addr + SOCK_DMA_CONTROL); | 237 | writel(host->io_word, sock->addr + SOCK_MS_DATA); |
198 | else | 238 | } |
199 | writel((1 << 8), sock->addr + SOCK_DMA_CONTROL); | 239 | writel(TIFM_MS_SYS_FDIR |
240 | | readl(sock->addr + SOCK_MS_SYSTEM), | ||
241 | sock->addr + SOCK_MS_SYSTEM); | ||
242 | writel(0, sock->addr + SOCK_MS_DATA); | ||
243 | } else { | ||
244 | readl(sock->addr + SOCK_MS_DATA); | ||
245 | } | ||
200 | 246 | ||
201 | return 0; | 247 | return length; |
202 | } | 248 | } |
203 | 249 | ||
204 | static int tifm_ms_issue_cmd(struct tifm_ms *host) | 250 | static int tifm_ms_issue_cmd(struct tifm_ms *host) |
205 | { | 251 | { |
206 | struct tifm_dev *sock = host->dev; | 252 | struct tifm_dev *sock = host->dev; |
207 | unsigned char *data; | 253 | unsigned char *data; |
208 | unsigned int data_len = 0, cmd = 0, cmd_mask = 0, cnt, tval = 0; | 254 | unsigned int data_len, cmd, sys_param; |
209 | 255 | ||
210 | host->cmd_flags = 0; | 256 | host->cmd_flags = 0; |
257 | host->block_pos = 0; | ||
258 | host->io_pos = 0; | ||
259 | host->io_word = 0; | ||
260 | host->cmd_flags = 0; | ||
211 | 261 | ||
212 | if (host->req->io_type == MEMSTICK_IO_SG) { | 262 | data = host->req->data; |
213 | if (!host->no_dma) { | ||
214 | if (1 != tifm_map_sg(sock, &host->req->sg, 1, | ||
215 | host->req->data_dir == READ | ||
216 | ? PCI_DMA_FROMDEVICE | ||
217 | : PCI_DMA_TODEVICE)) { | ||
218 | host->req->error = -ENOMEM; | ||
219 | return host->req->error; | ||
220 | } | ||
221 | data_len = sg_dma_len(&host->req->sg); | ||
222 | } else | ||
223 | data_len = host->req->sg.length; | ||
224 | |||
225 | writel(TIFM_FIFO_INT_SETALL, | ||
226 | sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); | ||
227 | writel(TIFM_FIFO_ENABLE, | ||
228 | sock->addr + SOCK_FIFO_CONTROL); | ||
229 | writel(TIFM_FIFO_INTMASK, | ||
230 | sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET); | ||
231 | 263 | ||
232 | if (!host->no_dma) { | 264 | host->use_dma = !no_dma; |
233 | writel(ilog2(data_len) - 2, | ||
234 | sock->addr + SOCK_FIFO_PAGE_SIZE); | ||
235 | writel(sg_dma_address(&host->req->sg), | ||
236 | sock->addr + SOCK_DMA_ADDRESS); | ||
237 | if (host->req->data_dir == WRITE) | ||
238 | writel((1 << 8) | TIFM_DMA_TX | TIFM_DMA_EN, | ||
239 | sock->addr + SOCK_DMA_CONTROL); | ||
240 | else | ||
241 | writel((1 << 8) | TIFM_DMA_EN, | ||
242 | sock->addr + SOCK_DMA_CONTROL); | ||
243 | } else { | ||
244 | tifm_ms_transfer_data(host, | ||
245 | host->req->data_dir == READ); | ||
246 | } | ||
247 | 265 | ||
248 | cmd_mask = readl(sock->addr + SOCK_MS_SYSTEM); | 266 | if (host->req->long_data) { |
249 | cmd_mask |= TIFM_MS_SYS_DATA | TIFM_MS_SYS_NOT_RDY; | 267 | data_len = host->req->sg.length; |
250 | writel(cmd_mask, sock->addr + SOCK_MS_SYSTEM); | 268 | if (!is_power_of_2(data_len)) |
251 | } else if (host->req->io_type == MEMSTICK_IO_VAL) { | 269 | host->use_dma = 0; |
252 | data = host->req->data; | 270 | } else { |
253 | data_len = host->req->data_len; | 271 | data_len = host->req->data_len; |
272 | host->use_dma = 0; | ||
273 | } | ||
254 | 274 | ||
255 | cmd_mask = host->mode_mask | 0x2607; /* unknown constant */ | 275 | writel(TIFM_FIFO_INT_SETALL, |
256 | 276 | sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); | |
257 | if (host->req->data_dir == WRITE) { | 277 | writel(TIFM_FIFO_ENABLE, |
258 | cmd_mask |= TIFM_MS_SYS_LATCH; | 278 | sock->addr + SOCK_FIFO_CONTROL); |
259 | writel(cmd_mask, sock->addr + SOCK_MS_SYSTEM); | 279 | |
260 | for (cnt = 0; (data_len - cnt) >= 4; cnt += 4) { | 280 | if (host->use_dma) { |
261 | writel(TIFM_MS_SYS_LATCH | 281 | if (1 != tifm_map_sg(sock, &host->req->sg, 1, |
262 | | readl(sock->addr + SOCK_MS_SYSTEM), | 282 | host->req->data_dir == READ |
263 | sock->addr + SOCK_MS_SYSTEM); | 283 | ? PCI_DMA_FROMDEVICE |
264 | __raw_writel(*(unsigned int *)(data + cnt), | 284 | : PCI_DMA_TODEVICE)) { |
265 | sock->addr + SOCK_MS_DATA); | 285 | host->req->error = -ENOMEM; |
266 | dev_dbg(&sock->dev, "writing %x\n", | 286 | return host->req->error; |
267 | *(int *)(data + cnt)); | 287 | } |
268 | } | 288 | data_len = sg_dma_len(&host->req->sg); |
269 | switch (data_len - cnt) { | ||
270 | case 3: | ||
271 | tval |= data[cnt + 2] << 16; | ||
272 | case 2: | ||
273 | tval |= data[cnt + 1] << 8; | ||
274 | case 1: | ||
275 | tval |= data[cnt]; | ||
276 | writel(TIFM_MS_SYS_LATCH | ||
277 | | readl(sock->addr + SOCK_MS_SYSTEM), | ||
278 | sock->addr + SOCK_MS_SYSTEM); | ||
279 | writel(tval, sock->addr + SOCK_MS_DATA); | ||
280 | dev_dbg(&sock->dev, "writing %x\n", tval); | ||
281 | } | ||
282 | 289 | ||
283 | writel(TIFM_MS_SYS_LATCH | 290 | writel(ilog2(data_len) - 2, |
284 | | readl(sock->addr + SOCK_MS_SYSTEM), | 291 | sock->addr + SOCK_FIFO_PAGE_SIZE); |
285 | sock->addr + SOCK_MS_SYSTEM); | 292 | writel(TIFM_FIFO_INTMASK, |
286 | writel(0, sock->addr + SOCK_MS_DATA); | 293 | sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET); |
287 | dev_dbg(&sock->dev, "writing %x\n", 0); | 294 | sys_param = TIFM_DMA_EN | (1 << 8); |
295 | if (host->req->data_dir == WRITE) | ||
296 | sys_param |= TIFM_DMA_TX; | ||
297 | |||
298 | writel(TIFM_FIFO_INTMASK, | ||
299 | sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET); | ||
288 | 300 | ||
289 | } else | 301 | writel(sg_dma_address(&host->req->sg), |
290 | writel(cmd_mask, sock->addr + SOCK_MS_SYSTEM); | 302 | sock->addr + SOCK_DMA_ADDRESS); |
303 | writel(sys_param, sock->addr + SOCK_DMA_CONTROL); | ||
304 | } else { | ||
305 | writel(host->mode_mask | TIFM_MS_SYS_FIFO, | ||
306 | sock->addr + SOCK_MS_SYSTEM); | ||
291 | 307 | ||
292 | cmd_mask = readl(sock->addr + SOCK_MS_SYSTEM); | 308 | writel(TIFM_FIFO_MORE, |
293 | cmd_mask &= ~TIFM_MS_SYS_DATA; | 309 | sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET); |
294 | cmd_mask |= TIFM_MS_SYS_NOT_RDY; | 310 | } |
295 | dev_dbg(&sock->dev, "mask %x\n", cmd_mask); | ||
296 | writel(cmd_mask, sock->addr + SOCK_MS_SYSTEM); | ||
297 | } else | ||
298 | BUG(); | ||
299 | 311 | ||
300 | mod_timer(&host->timer, jiffies + host->timeout_jiffies); | 312 | mod_timer(&host->timer, jiffies + host->timeout_jiffies); |
301 | writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL), | 313 | writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL), |
302 | sock->addr + SOCK_CONTROL); | 314 | sock->addr + SOCK_CONTROL); |
303 | host->req->error = 0; | 315 | host->req->error = 0; |
304 | 316 | ||
317 | sys_param = readl(sock->addr + SOCK_MS_SYSTEM); | ||
318 | sys_param |= TIFM_MS_SYS_INTCLR; | ||
319 | |||
320 | if (host->use_dma) | ||
321 | sys_param |= TIFM_MS_SYS_DMA; | ||
322 | else | ||
323 | sys_param &= ~TIFM_MS_SYS_DMA; | ||
324 | |||
325 | writel(sys_param, sock->addr + SOCK_MS_SYSTEM); | ||
326 | |||
305 | cmd = (host->req->tpc & 0xf) << 12; | 327 | cmd = (host->req->tpc & 0xf) << 12; |
306 | cmd |= data_len; | 328 | cmd |= data_len; |
307 | writel(cmd, sock->addr + SOCK_MS_COMMAND); | 329 | writel(cmd, sock->addr + SOCK_MS_COMMAND); |
308 | 330 | ||
309 | dev_dbg(&sock->dev, "executing TPC %x, %x\n", cmd, cmd_mask); | 331 | dev_dbg(&sock->dev, "executing TPC %x, %x\n", cmd, sys_param); |
310 | return 0; | 332 | return 0; |
311 | } | 333 | } |
312 | 334 | ||
@@ -314,47 +336,20 @@ static void tifm_ms_complete_cmd(struct tifm_ms *host) | |||
314 | { | 336 | { |
315 | struct tifm_dev *sock = host->dev; | 337 | struct tifm_dev *sock = host->dev; |
316 | struct memstick_host *msh = tifm_get_drvdata(sock); | 338 | struct memstick_host *msh = tifm_get_drvdata(sock); |
317 | unsigned int tval = 0, data_len; | ||
318 | unsigned char *data; | ||
319 | int rc; | 339 | int rc; |
320 | 340 | ||
321 | del_timer(&host->timer); | 341 | del_timer(&host->timer); |
322 | if (host->req->io_type == MEMSTICK_IO_SG) { | ||
323 | if (!host->no_dma) | ||
324 | tifm_unmap_sg(sock, &host->req->sg, 1, | ||
325 | host->req->data_dir == READ | ||
326 | ? PCI_DMA_FROMDEVICE | ||
327 | : PCI_DMA_TODEVICE); | ||
328 | } else if (host->req->io_type == MEMSTICK_IO_VAL) { | ||
329 | writel(~TIFM_MS_SYS_DATA & readl(sock->addr + SOCK_MS_SYSTEM), | ||
330 | sock->addr + SOCK_MS_SYSTEM); | ||
331 | |||
332 | data = host->req->data; | ||
333 | data_len = host->req->data_len; | ||
334 | 342 | ||
335 | if (host->req->data_dir == READ) { | 343 | if (host->use_dma) |
336 | for (rc = 0; (data_len - rc) >= 4; rc += 4) | 344 | tifm_unmap_sg(sock, &host->req->sg, 1, |
337 | *(int *)(data + rc) | 345 | host->req->data_dir == READ |
338 | = __raw_readl(sock->addr | 346 | ? PCI_DMA_FROMDEVICE |
339 | + SOCK_MS_DATA); | 347 | : PCI_DMA_TODEVICE); |
340 | |||
341 | if (data_len - rc) | ||
342 | tval = readl(sock->addr + SOCK_MS_DATA); | ||
343 | switch (data_len - rc) { | ||
344 | case 3: | ||
345 | data[rc + 2] = (tval >> 16) & 0xff; | ||
346 | case 2: | ||
347 | data[rc + 1] = (tval >> 8) & 0xff; | ||
348 | case 1: | ||
349 | data[rc] = tval & 0xff; | ||
350 | } | ||
351 | readl(sock->addr + SOCK_MS_DATA); | ||
352 | } | ||
353 | } | ||
354 | 348 | ||
355 | writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL), | 349 | writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL), |
356 | sock->addr + SOCK_CONTROL); | 350 | sock->addr + SOCK_CONTROL); |
357 | 351 | ||
352 | dev_dbg(&sock->dev, "TPC complete\n"); | ||
358 | do { | 353 | do { |
359 | rc = memstick_next_req(msh, &host->req); | 354 | rc = memstick_next_req(msh, &host->req); |
360 | } while (!rc && tifm_ms_issue_cmd(host)); | 355 | } while (!rc && tifm_ms_issue_cmd(host)); |
@@ -365,11 +360,10 @@ static int tifm_ms_check_status(struct tifm_ms *host) | |||
365 | if (!host->req->error) { | 360 | if (!host->req->error) { |
366 | if (!(host->cmd_flags & CMD_READY)) | 361 | if (!(host->cmd_flags & CMD_READY)) |
367 | return 1; | 362 | return 1; |
368 | if ((host->req->io_type == MEMSTICK_IO_SG) | 363 | if (!(host->cmd_flags & FIFO_READY)) |
369 | && !(host->cmd_flags & FIFO_READY)) | ||
370 | return 1; | 364 | return 1; |
371 | if (host->req->need_card_int | 365 | if (host->req->need_card_int |
372 | && !(host->cmd_flags & CARD_READY)) | 366 | && !(host->cmd_flags & CARD_INT)) |
373 | return 1; | 367 | return 1; |
374 | } | 368 | } |
375 | return 0; | 369 | return 0; |
@@ -379,18 +373,24 @@ static int tifm_ms_check_status(struct tifm_ms *host) | |||
379 | static void tifm_ms_data_event(struct tifm_dev *sock) | 373 | static void tifm_ms_data_event(struct tifm_dev *sock) |
380 | { | 374 | { |
381 | struct tifm_ms *host; | 375 | struct tifm_ms *host; |
382 | unsigned int fifo_status = 0; | 376 | unsigned int fifo_status = 0, host_status = 0; |
383 | int rc = 1; | 377 | int rc = 1; |
384 | 378 | ||
385 | spin_lock(&sock->lock); | 379 | spin_lock(&sock->lock); |
386 | host = memstick_priv((struct memstick_host *)tifm_get_drvdata(sock)); | 380 | host = memstick_priv((struct memstick_host *)tifm_get_drvdata(sock)); |
387 | fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS); | 381 | fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS); |
388 | dev_dbg(&sock->dev, "data event: fifo_status %x, flags %x\n", | 382 | host_status = readl(sock->addr + SOCK_MS_STATUS); |
389 | fifo_status, host->cmd_flags); | 383 | dev_dbg(&sock->dev, |
384 | "data event: fifo_status %x, host_status %x, flags %x\n", | ||
385 | fifo_status, host_status, host->cmd_flags); | ||
390 | 386 | ||
391 | if (host->req) { | 387 | if (host->req) { |
392 | if (fifo_status & TIFM_FIFO_READY) { | 388 | if (host->use_dma && (fifo_status & 1)) { |
393 | if (!host->no_dma || tifm_ms_transfer_data(host, 0)) { | 389 | host->cmd_flags |= FIFO_READY; |
390 | rc = tifm_ms_check_status(host); | ||
391 | } | ||
392 | if (!host->use_dma && (fifo_status & TIFM_FIFO_MORE)) { | ||
393 | if (!tifm_ms_transfer_data(host)) { | ||
394 | host->cmd_flags |= FIFO_READY; | 394 | host->cmd_flags |= FIFO_READY; |
395 | rc = tifm_ms_check_status(host); | 395 | rc = tifm_ms_check_status(host); |
396 | } | 396 | } |
@@ -419,9 +419,9 @@ static void tifm_ms_card_event(struct tifm_dev *sock) | |||
419 | host_status, host->cmd_flags); | 419 | host_status, host->cmd_flags); |
420 | 420 | ||
421 | if (host->req) { | 421 | if (host->req) { |
422 | if (host_status & TIFM_MS_TIMEOUT) | 422 | if (host_status & TIFM_MS_STAT_TOE) |
423 | host->req->error = -ETIME; | 423 | host->req->error = -ETIME; |
424 | else if (host_status & TIFM_MS_BADCRC) | 424 | else if (host_status & TIFM_MS_STAT_CRC) |
425 | host->req->error = -EILSEQ; | 425 | host->req->error = -EILSEQ; |
426 | 426 | ||
427 | if (host->req->error) { | 427 | if (host->req->error) { |
@@ -430,18 +430,17 @@ static void tifm_ms_card_event(struct tifm_dev *sock) | |||
430 | writel(TIFM_DMA_RESET, sock->addr + SOCK_DMA_CONTROL); | 430 | writel(TIFM_DMA_RESET, sock->addr + SOCK_DMA_CONTROL); |
431 | } | 431 | } |
432 | 432 | ||
433 | if (host_status & TIFM_MS_EOTPC) | 433 | if (host_status & TIFM_MS_STAT_RDY) |
434 | host->cmd_flags |= CMD_READY; | 434 | host->cmd_flags |= CMD_READY; |
435 | if (host_status & TIFM_MS_INT) | 435 | |
436 | host->cmd_flags |= CARD_READY; | 436 | if (host_status & TIFM_MS_STAT_MSINT) |
437 | host->cmd_flags |= CARD_INT; | ||
437 | 438 | ||
438 | rc = tifm_ms_check_status(host); | 439 | rc = tifm_ms_check_status(host); |
439 | 440 | ||
440 | } | 441 | } |
441 | 442 | ||
442 | writel(TIFM_MS_SYS_NOT_RDY | readl(sock->addr + SOCK_MS_SYSTEM), | 443 | writel(TIFM_MS_SYS_INTCLR | readl(sock->addr + SOCK_MS_SYSTEM), |
443 | sock->addr + SOCK_MS_SYSTEM); | ||
444 | writel((~TIFM_MS_SYS_DATA) & readl(sock->addr + SOCK_MS_SYSTEM), | ||
445 | sock->addr + SOCK_MS_SYSTEM); | 444 | sock->addr + SOCK_MS_SYSTEM); |
446 | 445 | ||
447 | if (!rc) | 446 | if (!rc) |
@@ -497,15 +496,26 @@ static void tifm_ms_set_param(struct memstick_host *msh, | |||
497 | 496 | ||
498 | switch (param) { | 497 | switch (param) { |
499 | case MEMSTICK_POWER: | 498 | case MEMSTICK_POWER: |
500 | /* this is set by card detection mechanism */ | 499 | /* also affected by media detection mechanism */ |
500 | if (value == MEMSTICK_POWER_ON) { | ||
501 | host->mode_mask = TIFM_MS_SYS_SRAC | TIFM_MS_SYS_REI; | ||
502 | writel(TIFM_MS_SYS_RESET, sock->addr + SOCK_MS_SYSTEM); | ||
503 | writel(TIFM_MS_SYS_FCLR | TIFM_MS_SYS_INTCLR, | ||
504 | sock->addr + SOCK_MS_SYSTEM); | ||
505 | writel(0xffffffff, sock->addr + SOCK_MS_STATUS); | ||
506 | } else if (value == MEMSTICK_POWER_OFF) { | ||
507 | writel(TIFM_MS_SYS_FCLR | TIFM_MS_SYS_INTCLR, | ||
508 | sock->addr + SOCK_MS_SYSTEM); | ||
509 | writel(0xffffffff, sock->addr + SOCK_MS_STATUS); | ||
510 | } | ||
501 | break; | 511 | break; |
502 | case MEMSTICK_INTERFACE: | 512 | case MEMSTICK_INTERFACE: |
503 | if (value == MEMSTICK_SERIAL) { | 513 | if (value == MEMSTICK_SERIAL) { |
504 | host->mode_mask = TIFM_MS_SERIAL; | 514 | host->mode_mask = TIFM_MS_SYS_SRAC | TIFM_MS_SYS_REI; |
505 | writel((~TIFM_CTRL_FAST_CLK) | 515 | writel((~TIFM_CTRL_FAST_CLK) |
506 | & readl(sock->addr + SOCK_CONTROL), | 516 | & readl(sock->addr + SOCK_CONTROL), |
507 | sock->addr + SOCK_CONTROL); | 517 | sock->addr + SOCK_CONTROL); |
508 | } else if (value == MEMSTICK_PARALLEL) { | 518 | } else if (value == MEMSTICK_PAR4) { |
509 | host->mode_mask = 0; | 519 | host->mode_mask = 0; |
510 | writel(TIFM_CTRL_FAST_CLK | 520 | writel(TIFM_CTRL_FAST_CLK |
511 | | readl(sock->addr + SOCK_CONTROL), | 521 | | readl(sock->addr + SOCK_CONTROL), |
@@ -532,21 +542,6 @@ static void tifm_ms_abort(unsigned long data) | |||
532 | tifm_eject(host->dev); | 542 | tifm_eject(host->dev); |
533 | } | 543 | } |
534 | 544 | ||
535 | static int tifm_ms_initialize_host(struct tifm_ms *host) | ||
536 | { | ||
537 | struct tifm_dev *sock = host->dev; | ||
538 | struct memstick_host *msh = tifm_get_drvdata(sock); | ||
539 | |||
540 | host->mode_mask = TIFM_MS_SERIAL; | ||
541 | writel(0x8000, sock->addr + SOCK_MS_SYSTEM); | ||
542 | writel(0x0200 | TIFM_MS_SYS_NOT_RDY, sock->addr + SOCK_MS_SYSTEM); | ||
543 | writel(0xffffffff, sock->addr + SOCK_MS_STATUS); | ||
544 | if (tifm_has_ms_pif(sock)) | ||
545 | msh->caps |= MEMSTICK_CAP_PARALLEL; | ||
546 | |||
547 | return 0; | ||
548 | } | ||
549 | |||
550 | static int tifm_ms_probe(struct tifm_dev *sock) | 545 | static int tifm_ms_probe(struct tifm_dev *sock) |
551 | { | 546 | { |
552 | struct memstick_host *msh; | 547 | struct memstick_host *msh; |
@@ -568,7 +563,6 @@ static int tifm_ms_probe(struct tifm_dev *sock) | |||
568 | tifm_set_drvdata(sock, msh); | 563 | tifm_set_drvdata(sock, msh); |
569 | host->dev = sock; | 564 | host->dev = sock; |
570 | host->timeout_jiffies = msecs_to_jiffies(1000); | 565 | host->timeout_jiffies = msecs_to_jiffies(1000); |
571 | host->no_dma = no_dma; | ||
572 | 566 | ||
573 | setup_timer(&host->timer, tifm_ms_abort, (unsigned long)host); | 567 | setup_timer(&host->timer, tifm_ms_abort, (unsigned long)host); |
574 | 568 | ||
@@ -576,10 +570,10 @@ static int tifm_ms_probe(struct tifm_dev *sock) | |||
576 | msh->set_param = tifm_ms_set_param; | 570 | msh->set_param = tifm_ms_set_param; |
577 | sock->card_event = tifm_ms_card_event; | 571 | sock->card_event = tifm_ms_card_event; |
578 | sock->data_event = tifm_ms_data_event; | 572 | sock->data_event = tifm_ms_data_event; |
579 | rc = tifm_ms_initialize_host(host); | 573 | if (tifm_has_ms_pif(sock)) |
574 | msh->caps |= MEMSTICK_CAP_PAR4; | ||
580 | 575 | ||
581 | if (!rc) | 576 | rc = memstick_add_host(msh); |
582 | rc = memstick_add_host(msh); | ||
583 | if (!rc) | 577 | if (!rc) |
584 | return 0; | 578 | return 0; |
585 | 579 | ||
@@ -601,7 +595,7 @@ static void tifm_ms_remove(struct tifm_dev *sock) | |||
601 | writel(TIFM_FIFO_INT_SETALL, | 595 | writel(TIFM_FIFO_INT_SETALL, |
602 | sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); | 596 | sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); |
603 | writel(TIFM_DMA_RESET, sock->addr + SOCK_DMA_CONTROL); | 597 | writel(TIFM_DMA_RESET, sock->addr + SOCK_DMA_CONTROL); |
604 | if ((host->req->io_type == MEMSTICK_IO_SG) && !host->no_dma) | 598 | if (host->use_dma) |
605 | tifm_unmap_sg(sock, &host->req->sg, 1, | 599 | tifm_unmap_sg(sock, &host->req->sg, 1, |
606 | host->req->data_dir == READ | 600 | host->req->data_dir == READ |
607 | ? PCI_DMA_TODEVICE | 601 | ? PCI_DMA_TODEVICE |
@@ -617,10 +611,6 @@ static void tifm_ms_remove(struct tifm_dev *sock) | |||
617 | spin_unlock_irqrestore(&sock->lock, flags); | 611 | spin_unlock_irqrestore(&sock->lock, flags); |
618 | 612 | ||
619 | memstick_remove_host(msh); | 613 | memstick_remove_host(msh); |
620 | |||
621 | writel(0x0200 | TIFM_MS_SYS_NOT_RDY, sock->addr + SOCK_MS_SYSTEM); | ||
622 | writel(0xffffffff, sock->addr + SOCK_MS_STATUS); | ||
623 | |||
624 | memstick_free_host(msh); | 614 | memstick_free_host(msh); |
625 | } | 615 | } |
626 | 616 | ||
@@ -628,17 +618,17 @@ static void tifm_ms_remove(struct tifm_dev *sock) | |||
628 | 618 | ||
629 | static int tifm_ms_suspend(struct tifm_dev *sock, pm_message_t state) | 619 | static int tifm_ms_suspend(struct tifm_dev *sock, pm_message_t state) |
630 | { | 620 | { |
621 | struct memstick_host *msh = tifm_get_drvdata(sock); | ||
622 | |||
623 | memstick_suspend_host(msh); | ||
631 | return 0; | 624 | return 0; |
632 | } | 625 | } |
633 | 626 | ||
634 | static int tifm_ms_resume(struct tifm_dev *sock) | 627 | static int tifm_ms_resume(struct tifm_dev *sock) |
635 | { | 628 | { |
636 | struct memstick_host *msh = tifm_get_drvdata(sock); | 629 | struct memstick_host *msh = tifm_get_drvdata(sock); |
637 | struct tifm_ms *host = memstick_priv(msh); | ||
638 | |||
639 | tifm_ms_initialize_host(host); | ||
640 | memstick_detect_change(msh); | ||
641 | 630 | ||
631 | memstick_resume_host(msh); | ||
642 | return 0; | 632 | return 0; |
643 | } | 633 | } |
644 | 634 | ||
@@ -679,7 +669,6 @@ MODULE_AUTHOR("Alex Dubov"); | |||
679 | MODULE_DESCRIPTION("TI FlashMedia MemoryStick driver"); | 669 | MODULE_DESCRIPTION("TI FlashMedia MemoryStick driver"); |
680 | MODULE_LICENSE("GPL"); | 670 | MODULE_LICENSE("GPL"); |
681 | MODULE_DEVICE_TABLE(tifm, tifm_ms_id_tbl); | 671 | MODULE_DEVICE_TABLE(tifm, tifm_ms_id_tbl); |
682 | MODULE_VERSION(DRIVER_VERSION); | ||
683 | 672 | ||
684 | module_init(tifm_ms_init); | 673 | module_init(tifm_ms_init); |
685 | module_exit(tifm_ms_exit); | 674 | module_exit(tifm_ms_exit); |
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 0c303c84b37b..6b6df8679585 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c | |||
@@ -632,8 +632,7 @@ mpt_deregister(u8 cb_idx) | |||
632 | 632 | ||
633 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ | 633 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ |
634 | /** | 634 | /** |
635 | * mpt_event_register - Register protocol-specific event callback | 635 | * mpt_event_register - Register protocol-specific event callback handler. |
636 | * handler. | ||
637 | * @cb_idx: previously registered (via mpt_register) callback handle | 636 | * @cb_idx: previously registered (via mpt_register) callback handle |
638 | * @ev_cbfunc: callback function | 637 | * @ev_cbfunc: callback function |
639 | * | 638 | * |
@@ -654,8 +653,7 @@ mpt_event_register(u8 cb_idx, MPT_EVHANDLER ev_cbfunc) | |||
654 | 653 | ||
655 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ | 654 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ |
656 | /** | 655 | /** |
657 | * mpt_event_deregister - Deregister protocol-specific event callback | 656 | * mpt_event_deregister - Deregister protocol-specific event callback handler |
658 | * handler. | ||
659 | * @cb_idx: previously registered callback handle | 657 | * @cb_idx: previously registered callback handle |
660 | * | 658 | * |
661 | * Each protocol-specific driver should call this routine | 659 | * Each protocol-specific driver should call this routine |
@@ -765,11 +763,13 @@ mpt_device_driver_deregister(u8 cb_idx) | |||
765 | 763 | ||
766 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ | 764 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ |
767 | /** | 765 | /** |
768 | * mpt_get_msg_frame - Obtain a MPT request frame from the pool (of 1024) | 766 | * mpt_get_msg_frame - Obtain an MPT request frame from the pool |
769 | * allocated per MPT adapter. | ||
770 | * @cb_idx: Handle of registered MPT protocol driver | 767 | * @cb_idx: Handle of registered MPT protocol driver |
771 | * @ioc: Pointer to MPT adapter structure | 768 | * @ioc: Pointer to MPT adapter structure |
772 | * | 769 | * |
770 | * Obtain an MPT request frame from the pool (of 1024) that are | ||
771 | * allocated per MPT adapter. | ||
772 | * | ||
773 | * Returns pointer to a MPT request frame or %NULL if none are available | 773 | * Returns pointer to a MPT request frame or %NULL if none are available |
774 | * or IOC is not active. | 774 | * or IOC is not active. |
775 | */ | 775 | */ |
@@ -834,13 +834,12 @@ mpt_get_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc) | |||
834 | 834 | ||
835 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ | 835 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ |
836 | /** | 836 | /** |
837 | * mpt_put_msg_frame - Send a protocol specific MPT request frame | 837 | * mpt_put_msg_frame - Send a protocol-specific MPT request frame to an IOC |
838 | * to a IOC. | ||
839 | * @cb_idx: Handle of registered MPT protocol driver | 838 | * @cb_idx: Handle of registered MPT protocol driver |
840 | * @ioc: Pointer to MPT adapter structure | 839 | * @ioc: Pointer to MPT adapter structure |
841 | * @mf: Pointer to MPT request frame | 840 | * @mf: Pointer to MPT request frame |
842 | * | 841 | * |
843 | * This routine posts a MPT request frame to the request post FIFO of a | 842 | * This routine posts an MPT request frame to the request post FIFO of a |
844 | * specific MPT adapter. | 843 | * specific MPT adapter. |
845 | */ | 844 | */ |
846 | void | 845 | void |
@@ -868,13 +867,15 @@ mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf) | |||
868 | } | 867 | } |
869 | 868 | ||
870 | /** | 869 | /** |
871 | * mpt_put_msg_frame_hi_pri - Send a protocol specific MPT request frame | 870 | * mpt_put_msg_frame_hi_pri - Send a hi-pri protocol-specific MPT request frame |
872 | * to a IOC using hi priority request queue. | ||
873 | * @cb_idx: Handle of registered MPT protocol driver | 871 | * @cb_idx: Handle of registered MPT protocol driver |
874 | * @ioc: Pointer to MPT adapter structure | 872 | * @ioc: Pointer to MPT adapter structure |
875 | * @mf: Pointer to MPT request frame | 873 | * @mf: Pointer to MPT request frame |
876 | * | 874 | * |
877 | * This routine posts a MPT request frame to the request post FIFO of a | 875 | * Send a protocol-specific MPT request frame to an IOC using |
876 | * hi-priority request queue. | ||
877 | * | ||
878 | * This routine posts an MPT request frame to the request post FIFO of a | ||
878 | * specific MPT adapter. | 879 | * specific MPT adapter. |
879 | **/ | 880 | **/ |
880 | void | 881 | void |
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index f77b329f6923..78734e25edd5 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c | |||
@@ -1701,6 +1701,11 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info, | |||
1701 | if (error) | 1701 | if (error) |
1702 | goto out_free_consistent; | 1702 | goto out_free_consistent; |
1703 | 1703 | ||
1704 | if (!buffer->NumPhys) { | ||
1705 | error = -ENODEV; | ||
1706 | goto out_free_consistent; | ||
1707 | } | ||
1708 | |||
1704 | /* save config data */ | 1709 | /* save config data */ |
1705 | port_info->num_phys = buffer->NumPhys; | 1710 | port_info->num_phys = buffer->NumPhys; |
1706 | port_info->phy_info = kcalloc(port_info->num_phys, | 1711 | port_info->phy_info = kcalloc(port_info->num_phys, |
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index af1de0ccee2f..0c252f60c4c1 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c | |||
@@ -1533,7 +1533,7 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx) | |||
1533 | * | 1533 | * |
1534 | * Remark: Currently invoked from a non-interrupt thread (_bh). | 1534 | * Remark: Currently invoked from a non-interrupt thread (_bh). |
1535 | * | 1535 | * |
1536 | * Remark: With old EH code, at most 1 SCSI TaskMgmt function per IOC | 1536 | * Note: With old EH code, at most 1 SCSI TaskMgmt function per IOC |
1537 | * will be active. | 1537 | * will be active. |
1538 | * | 1538 | * |
1539 | * Returns 0 for SUCCESS, or %FAILED. | 1539 | * Returns 0 for SUCCESS, or %FAILED. |
@@ -2537,14 +2537,12 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR | |||
2537 | 2537 | ||
2538 | /** | 2538 | /** |
2539 | * mptscsih_get_scsi_lookup | 2539 | * mptscsih_get_scsi_lookup |
2540 | * | ||
2541 | * retrieves scmd entry from ScsiLookup[] array list | ||
2542 | * | ||
2543 | * @ioc: Pointer to MPT_ADAPTER structure | 2540 | * @ioc: Pointer to MPT_ADAPTER structure |
2544 | * @i: index into the array | 2541 | * @i: index into the array |
2545 | * | 2542 | * |
2546 | * Returns the scsi_cmd pointer | 2543 | * retrieves scmd entry from ScsiLookup[] array list |
2547 | * | 2544 | * |
2545 | * Returns the scsi_cmd pointer | ||
2548 | **/ | 2546 | **/ |
2549 | static struct scsi_cmnd * | 2547 | static struct scsi_cmnd * |
2550 | mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i) | 2548 | mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i) |
@@ -2561,14 +2559,12 @@ mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i) | |||
2561 | 2559 | ||
2562 | /** | 2560 | /** |
2563 | * mptscsih_getclear_scsi_lookup | 2561 | * mptscsih_getclear_scsi_lookup |
2564 | * | ||
2565 | * retrieves and clears scmd entry from ScsiLookup[] array list | ||
2566 | * | ||
2567 | * @ioc: Pointer to MPT_ADAPTER structure | 2562 | * @ioc: Pointer to MPT_ADAPTER structure |
2568 | * @i: index into the array | 2563 | * @i: index into the array |
2569 | * | 2564 | * |
2570 | * Returns the scsi_cmd pointer | 2565 | * retrieves and clears scmd entry from ScsiLookup[] array list |
2571 | * | 2566 | * |
2567 | * Returns the scsi_cmd pointer | ||
2572 | **/ | 2568 | **/ |
2573 | static struct scsi_cmnd * | 2569 | static struct scsi_cmnd * |
2574 | mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i) | 2570 | mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i) |
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c index afd82966f9a0..13bac53db69a 100644 --- a/drivers/mfd/sm501.c +++ b/drivers/mfd/sm501.c | |||
@@ -48,31 +48,13 @@ struct sm501_devdata { | |||
48 | unsigned int pdev_id; | 48 | unsigned int pdev_id; |
49 | unsigned int irq; | 49 | unsigned int irq; |
50 | void __iomem *regs; | 50 | void __iomem *regs; |
51 | unsigned int rev; | ||
51 | }; | 52 | }; |
52 | 53 | ||
53 | #define MHZ (1000 * 1000) | 54 | #define MHZ (1000 * 1000) |
54 | 55 | ||
55 | #ifdef DEBUG | 56 | #ifdef DEBUG |
56 | static const unsigned int misc_div[] = { | 57 | static const unsigned int div_tab[] = { |
57 | [0] = 1, | ||
58 | [1] = 2, | ||
59 | [2] = 4, | ||
60 | [3] = 8, | ||
61 | [4] = 16, | ||
62 | [5] = 32, | ||
63 | [6] = 64, | ||
64 | [7] = 128, | ||
65 | [8] = 3, | ||
66 | [9] = 6, | ||
67 | [10] = 12, | ||
68 | [11] = 24, | ||
69 | [12] = 48, | ||
70 | [13] = 96, | ||
71 | [14] = 192, | ||
72 | [15] = 384, | ||
73 | }; | ||
74 | |||
75 | static const unsigned int px_div[] = { | ||
76 | [0] = 1, | 58 | [0] = 1, |
77 | [1] = 2, | 59 | [1] = 2, |
78 | [2] = 4, | 60 | [2] = 4, |
@@ -101,12 +83,12 @@ static const unsigned int px_div[] = { | |||
101 | 83 | ||
102 | static unsigned long decode_div(unsigned long pll2, unsigned long val, | 84 | static unsigned long decode_div(unsigned long pll2, unsigned long val, |
103 | unsigned int lshft, unsigned int selbit, | 85 | unsigned int lshft, unsigned int selbit, |
104 | unsigned long mask, const unsigned int *dtab) | 86 | unsigned long mask) |
105 | { | 87 | { |
106 | if (val & selbit) | 88 | if (val & selbit) |
107 | pll2 = 288 * MHZ; | 89 | pll2 = 288 * MHZ; |
108 | 90 | ||
109 | return pll2 / dtab[(val >> lshft) & mask]; | 91 | return pll2 / div_tab[(val >> lshft) & mask]; |
110 | } | 92 | } |
111 | 93 | ||
112 | #define fmt_freq(x) ((x) / MHZ), ((x) % MHZ), (x) | 94 | #define fmt_freq(x) ((x) / MHZ), ((x) % MHZ), (x) |
@@ -141,10 +123,10 @@ static void sm501_dump_clk(struct sm501_devdata *sm) | |||
141 | } | 123 | } |
142 | 124 | ||
143 | sdclk0 = (misct & (1<<12)) ? pll2 : 288 * MHZ; | 125 | sdclk0 = (misct & (1<<12)) ? pll2 : 288 * MHZ; |
144 | sdclk0 /= misc_div[((misct >> 8) & 0xf)]; | 126 | sdclk0 /= div_tab[((misct >> 8) & 0xf)]; |
145 | 127 | ||
146 | sdclk1 = (misct & (1<<20)) ? pll2 : 288 * MHZ; | 128 | sdclk1 = (misct & (1<<20)) ? pll2 : 288 * MHZ; |
147 | sdclk1 /= misc_div[((misct >> 16) & 0xf)]; | 129 | sdclk1 /= div_tab[((misct >> 16) & 0xf)]; |
148 | 130 | ||
149 | dev_dbg(sm->dev, "MISCT=%08lx, PM0=%08lx, PM1=%08lx\n", | 131 | dev_dbg(sm->dev, "MISCT=%08lx, PM0=%08lx, PM1=%08lx\n", |
150 | misct, pm0, pm1); | 132 | misct, pm0, pm1); |
@@ -158,19 +140,19 @@ static void sm501_dump_clk(struct sm501_devdata *sm) | |||
158 | "P2 %ld.%ld MHz (%ld), V2 %ld.%ld (%ld), " | 140 | "P2 %ld.%ld MHz (%ld), V2 %ld.%ld (%ld), " |
159 | "M %ld.%ld (%ld), MX1 %ld.%ld (%ld)\n", | 141 | "M %ld.%ld (%ld), MX1 %ld.%ld (%ld)\n", |
160 | (pmc & 3 ) == 0 ? '*' : '-', | 142 | (pmc & 3 ) == 0 ? '*' : '-', |
161 | fmt_freq(decode_div(pll2, pm0, 24, 1<<29, 31, px_div)), | 143 | fmt_freq(decode_div(pll2, pm0, 24, 1<<29, 31)), |
162 | fmt_freq(decode_div(pll2, pm0, 16, 1<<20, 15, misc_div)), | 144 | fmt_freq(decode_div(pll2, pm0, 16, 1<<20, 15)), |
163 | fmt_freq(decode_div(pll2, pm0, 8, 1<<12, 15, misc_div)), | 145 | fmt_freq(decode_div(pll2, pm0, 8, 1<<12, 15)), |
164 | fmt_freq(decode_div(pll2, pm0, 0, 1<<4, 15, misc_div))); | 146 | fmt_freq(decode_div(pll2, pm0, 0, 1<<4, 15))); |
165 | 147 | ||
166 | dev_dbg(sm->dev, "PM1[%c]: " | 148 | dev_dbg(sm->dev, "PM1[%c]: " |
167 | "P2 %ld.%ld MHz (%ld), V2 %ld.%ld (%ld), " | 149 | "P2 %ld.%ld MHz (%ld), V2 %ld.%ld (%ld), " |
168 | "M %ld.%ld (%ld), MX1 %ld.%ld (%ld)\n", | 150 | "M %ld.%ld (%ld), MX1 %ld.%ld (%ld)\n", |
169 | (pmc & 3 ) == 1 ? '*' : '-', | 151 | (pmc & 3 ) == 1 ? '*' : '-', |
170 | fmt_freq(decode_div(pll2, pm1, 24, 1<<29, 31, px_div)), | 152 | fmt_freq(decode_div(pll2, pm1, 24, 1<<29, 31)), |
171 | fmt_freq(decode_div(pll2, pm1, 16, 1<<20, 15, misc_div)), | 153 | fmt_freq(decode_div(pll2, pm1, 16, 1<<20, 15)), |
172 | fmt_freq(decode_div(pll2, pm1, 8, 1<<12, 15, misc_div)), | 154 | fmt_freq(decode_div(pll2, pm1, 8, 1<<12, 15)), |
173 | fmt_freq(decode_div(pll2, pm1, 0, 1<<4, 15, misc_div))); | 155 | fmt_freq(decode_div(pll2, pm1, 0, 1<<4, 15))); |
174 | } | 156 | } |
175 | 157 | ||
176 | static void sm501_dump_regs(struct sm501_devdata *sm) | 158 | static void sm501_dump_regs(struct sm501_devdata *sm) |
@@ -436,46 +418,108 @@ struct sm501_clock { | |||
436 | unsigned long mclk; | 418 | unsigned long mclk; |
437 | int divider; | 419 | int divider; |
438 | int shift; | 420 | int shift; |
421 | unsigned int m, n, k; | ||
439 | }; | 422 | }; |
440 | 423 | ||
424 | /* sm501_calc_clock | ||
425 | * | ||
426 | * Calculates the nearest discrete clock frequency that | ||
427 | * can be achieved with the specified input clock. | ||
428 | * the maximum divisor is 3 or 5 | ||
429 | */ | ||
430 | |||
431 | static int sm501_calc_clock(unsigned long freq, | ||
432 | struct sm501_clock *clock, | ||
433 | int max_div, | ||
434 | unsigned long mclk, | ||
435 | long *best_diff) | ||
436 | { | ||
437 | int ret = 0; | ||
438 | int divider; | ||
439 | int shift; | ||
440 | long diff; | ||
441 | |||
442 | /* try dividers 1 and 3 for CRT and for panel, | ||
443 | try divider 5 for panel only.*/ | ||
444 | |||
445 | for (divider = 1; divider <= max_div; divider += 2) { | ||
446 | /* try all 8 shift values.*/ | ||
447 | for (shift = 0; shift < 8; shift++) { | ||
448 | /* Calculate difference to requested clock */ | ||
449 | diff = sm501fb_round_div(mclk, divider << shift) - freq; | ||
450 | if (diff < 0) | ||
451 | diff = -diff; | ||
452 | |||
453 | /* If it is less than the current, use it */ | ||
454 | if (diff < *best_diff) { | ||
455 | *best_diff = diff; | ||
456 | |||
457 | clock->mclk = mclk; | ||
458 | clock->divider = divider; | ||
459 | clock->shift = shift; | ||
460 | ret = 1; | ||
461 | } | ||
462 | } | ||
463 | } | ||
464 | |||
465 | return ret; | ||
466 | } | ||
467 | |||
468 | /* sm501_calc_pll | ||
469 | * | ||
470 | * Calculates the nearest discrete clock frequency that can be | ||
471 | * achieved using the programmable PLL. | ||
472 | * the maximum divisor is 3 or 5 | ||
473 | */ | ||
474 | |||
475 | static unsigned long sm501_calc_pll(unsigned long freq, | ||
476 | struct sm501_clock *clock, | ||
477 | int max_div) | ||
478 | { | ||
479 | unsigned long mclk; | ||
480 | unsigned int m, n, k; | ||
481 | long best_diff = 999999999; | ||
482 | |||
483 | /* | ||
484 | * The SM502 datasheet doesn't specify the min/max values for M and N. | ||
485 | * N = 1 at least doesn't work in practice. | ||
486 | */ | ||
487 | for (m = 2; m <= 255; m++) { | ||
488 | for (n = 2; n <= 127; n++) { | ||
489 | for (k = 0; k <= 1; k++) { | ||
490 | mclk = (24000000UL * m / n) >> k; | ||
491 | |||
492 | if (sm501_calc_clock(freq, clock, max_div, | ||
493 | mclk, &best_diff)) { | ||
494 | clock->m = m; | ||
495 | clock->n = n; | ||
496 | clock->k = k; | ||
497 | } | ||
498 | } | ||
499 | } | ||
500 | } | ||
501 | |||
502 | /* Return best clock. */ | ||
503 | return clock->mclk / (clock->divider << clock->shift); | ||
504 | } | ||
505 | |||
441 | /* sm501_select_clock | 506 | /* sm501_select_clock |
442 | * | 507 | * |
443 | * selects nearest discrete clock frequency the SM501 can achive | 508 | * Calculates the nearest discrete clock frequency that can be |
509 | * achieved using the 288MHz and 336MHz PLLs. | ||
444 | * the maximum divisor is 3 or 5 | 510 | * the maximum divisor is 3 or 5 |
445 | */ | 511 | */ |
512 | |||
446 | static unsigned long sm501_select_clock(unsigned long freq, | 513 | static unsigned long sm501_select_clock(unsigned long freq, |
447 | struct sm501_clock *clock, | 514 | struct sm501_clock *clock, |
448 | int max_div) | 515 | int max_div) |
449 | { | 516 | { |
450 | unsigned long mclk; | 517 | unsigned long mclk; |
451 | int divider; | ||
452 | int shift; | ||
453 | long diff; | ||
454 | long best_diff = 999999999; | 518 | long best_diff = 999999999; |
455 | 519 | ||
456 | /* Try 288MHz and 336MHz clocks. */ | 520 | /* Try 288MHz and 336MHz clocks. */ |
457 | for (mclk = 288000000; mclk <= 336000000; mclk += 48000000) { | 521 | for (mclk = 288000000; mclk <= 336000000; mclk += 48000000) { |
458 | /* try dividers 1 and 3 for CRT and for panel, | 522 | sm501_calc_clock(freq, clock, max_div, mclk, &best_diff); |
459 | try divider 5 for panel only.*/ | ||
460 | |||
461 | for (divider = 1; divider <= max_div; divider += 2) { | ||
462 | /* try all 8 shift values.*/ | ||
463 | for (shift = 0; shift < 8; shift++) { | ||
464 | /* Calculate difference to requested clock */ | ||
465 | diff = sm501fb_round_div(mclk, divider << shift) - freq; | ||
466 | if (diff < 0) | ||
467 | diff = -diff; | ||
468 | |||
469 | /* If it is less than the current, use it */ | ||
470 | if (diff < best_diff) { | ||
471 | best_diff = diff; | ||
472 | |||
473 | clock->mclk = mclk; | ||
474 | clock->divider = divider; | ||
475 | clock->shift = shift; | ||
476 | } | ||
477 | } | ||
478 | } | ||
479 | } | 523 | } |
480 | 524 | ||
481 | /* Return best clock. */ | 525 | /* Return best clock. */ |
@@ -497,6 +541,7 @@ unsigned long sm501_set_clock(struct device *dev, | |||
497 | unsigned long gate = readl(sm->regs + SM501_CURRENT_GATE); | 541 | unsigned long gate = readl(sm->regs + SM501_CURRENT_GATE); |
498 | unsigned long clock = readl(sm->regs + SM501_CURRENT_CLOCK); | 542 | unsigned long clock = readl(sm->regs + SM501_CURRENT_CLOCK); |
499 | unsigned char reg; | 543 | unsigned char reg; |
544 | unsigned int pll_reg = 0; | ||
500 | unsigned long sm501_freq; /* the actual frequency acheived */ | 545 | unsigned long sm501_freq; /* the actual frequency acheived */ |
501 | 546 | ||
502 | struct sm501_clock to; | 547 | struct sm501_clock to; |
@@ -511,14 +556,28 @@ unsigned long sm501_set_clock(struct device *dev, | |||
511 | * requested frequency the value must be multiplied by | 556 | * requested frequency the value must be multiplied by |
512 | * 2. This clock also has an additional pre divisor */ | 557 | * 2. This clock also has an additional pre divisor */ |
513 | 558 | ||
514 | sm501_freq = (sm501_select_clock(2 * req_freq, &to, 5) / 2); | 559 | if (sm->rev >= 0xC0) { |
515 | reg=to.shift & 0x07;/* bottom 3 bits are shift */ | 560 | /* SM502 -> use the programmable PLL */ |
516 | if (to.divider == 3) | 561 | sm501_freq = (sm501_calc_pll(2 * req_freq, |
517 | reg |= 0x08; /* /3 divider required */ | 562 | &to, 5) / 2); |
518 | else if (to.divider == 5) | 563 | reg = to.shift & 0x07;/* bottom 3 bits are shift */ |
519 | reg |= 0x10; /* /5 divider required */ | 564 | if (to.divider == 3) |
520 | if (to.mclk != 288000000) | 565 | reg |= 0x08; /* /3 divider required */ |
521 | reg |= 0x20; /* which mclk pll is source */ | 566 | else if (to.divider == 5) |
567 | reg |= 0x10; /* /5 divider required */ | ||
568 | reg |= 0x40; /* select the programmable PLL */ | ||
569 | pll_reg = 0x20000 | (to.k << 15) | (to.n << 8) | to.m; | ||
570 | } else { | ||
571 | sm501_freq = (sm501_select_clock(2 * req_freq, | ||
572 | &to, 5) / 2); | ||
573 | reg = to.shift & 0x07;/* bottom 3 bits are shift */ | ||
574 | if (to.divider == 3) | ||
575 | reg |= 0x08; /* /3 divider required */ | ||
576 | else if (to.divider == 5) | ||
577 | reg |= 0x10; /* /5 divider required */ | ||
578 | if (to.mclk != 288000000) | ||
579 | reg |= 0x20; /* which mclk pll is source */ | ||
580 | } | ||
522 | break; | 581 | break; |
523 | 582 | ||
524 | case SM501_CLOCK_V2XCLK: | 583 | case SM501_CLOCK_V2XCLK: |
@@ -579,6 +638,10 @@ unsigned long sm501_set_clock(struct device *dev, | |||
579 | } | 638 | } |
580 | 639 | ||
581 | writel(mode, sm->regs + SM501_POWER_MODE_CONTROL); | 640 | writel(mode, sm->regs + SM501_POWER_MODE_CONTROL); |
641 | |||
642 | if (pll_reg) | ||
643 | writel(pll_reg, sm->regs + SM501_PROGRAMMABLE_PLL_CONTROL); | ||
644 | |||
582 | sm501_sync_regs(sm); | 645 | sm501_sync_regs(sm); |
583 | 646 | ||
584 | dev_info(sm->dev, "gate %08lx, clock %08lx, mode %08lx\n", | 647 | dev_info(sm->dev, "gate %08lx, clock %08lx, mode %08lx\n", |
@@ -599,15 +662,24 @@ EXPORT_SYMBOL_GPL(sm501_set_clock); | |||
599 | * finds the closest available frequency for a given clock | 662 | * finds the closest available frequency for a given clock |
600 | */ | 663 | */ |
601 | 664 | ||
602 | unsigned long sm501_find_clock(int clksrc, | 665 | unsigned long sm501_find_clock(struct device *dev, |
666 | int clksrc, | ||
603 | unsigned long req_freq) | 667 | unsigned long req_freq) |
604 | { | 668 | { |
669 | struct sm501_devdata *sm = dev_get_drvdata(dev); | ||
605 | unsigned long sm501_freq; /* the frequency achiveable by the 501 */ | 670 | unsigned long sm501_freq; /* the frequency achiveable by the 501 */ |
606 | struct sm501_clock to; | 671 | struct sm501_clock to; |
607 | 672 | ||
608 | switch (clksrc) { | 673 | switch (clksrc) { |
609 | case SM501_CLOCK_P2XCLK: | 674 | case SM501_CLOCK_P2XCLK: |
610 | sm501_freq = (sm501_select_clock(2 * req_freq, &to, 5) / 2); | 675 | if (sm->rev >= 0xC0) { |
676 | /* SM502 -> use the programmable PLL */ | ||
677 | sm501_freq = (sm501_calc_pll(2 * req_freq, | ||
678 | &to, 5) / 2); | ||
679 | } else { | ||
680 | sm501_freq = (sm501_select_clock(2 * req_freq, | ||
681 | &to, 5) / 2); | ||
682 | } | ||
611 | break; | 683 | break; |
612 | 684 | ||
613 | case SM501_CLOCK_V2XCLK: | 685 | case SM501_CLOCK_V2XCLK: |
@@ -914,6 +986,8 @@ static int sm501_init_dev(struct sm501_devdata *sm) | |||
914 | dev_info(sm->dev, "SM501 At %p: Version %08lx, %ld Mb, IRQ %d\n", | 986 | dev_info(sm->dev, "SM501 At %p: Version %08lx, %ld Mb, IRQ %d\n", |
915 | sm->regs, devid, (unsigned long)mem_avail >> 20, sm->irq); | 987 | sm->regs, devid, (unsigned long)mem_avail >> 20, sm->irq); |
916 | 988 | ||
989 | sm->rev = devid & SM501_DEVICEID_REVMASK; | ||
990 | |||
917 | sm501_dump_gate(sm); | 991 | sm501_dump_gate(sm); |
918 | 992 | ||
919 | ret = device_create_file(sm->dev, &dev_attr_dbg_regs); | 993 | ret = device_create_file(sm->dev, &dev_attr_dbg_regs); |
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 982e27b86d10..962817e49fba 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig | |||
@@ -108,6 +108,7 @@ config ACER_WMI | |||
108 | depends on ACPI | 108 | depends on ACPI |
109 | depends on LEDS_CLASS | 109 | depends on LEDS_CLASS |
110 | depends on BACKLIGHT_CLASS_DEVICE | 110 | depends on BACKLIGHT_CLASS_DEVICE |
111 | depends on SERIO_I8042 | ||
111 | select ACPI_WMI | 112 | select ACPI_WMI |
112 | ---help--- | 113 | ---help--- |
113 | This is a driver for newer Acer (and Wistron) laptops. It adds | 114 | This is a driver for newer Acer (and Wistron) laptops. It adds |
diff --git a/drivers/misc/acer-wmi.c b/drivers/misc/acer-wmi.c index 74d12b4a3abd..dd13a3749927 100644 --- a/drivers/misc/acer-wmi.c +++ b/drivers/misc/acer-wmi.c | |||
@@ -219,6 +219,15 @@ static struct dmi_system_id acer_quirks[] = { | |||
219 | }, | 219 | }, |
220 | { | 220 | { |
221 | .callback = dmi_matched, | 221 | .callback = dmi_matched, |
222 | .ident = "Acer Aspire 3610", | ||
223 | .matches = { | ||
224 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
225 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3610"), | ||
226 | }, | ||
227 | .driver_data = &quirk_acer_travelmate_2490, | ||
228 | }, | ||
229 | { | ||
230 | .callback = dmi_matched, | ||
222 | .ident = "Acer Aspire 5100", | 231 | .ident = "Acer Aspire 5100", |
223 | .matches = { | 232 | .matches = { |
224 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | 233 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), |
@@ -228,6 +237,15 @@ static struct dmi_system_id acer_quirks[] = { | |||
228 | }, | 237 | }, |
229 | { | 238 | { |
230 | .callback = dmi_matched, | 239 | .callback = dmi_matched, |
240 | .ident = "Acer Aspire 5610", | ||
241 | .matches = { | ||
242 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
243 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"), | ||
244 | }, | ||
245 | .driver_data = &quirk_acer_travelmate_2490, | ||
246 | }, | ||
247 | { | ||
248 | .callback = dmi_matched, | ||
231 | .ident = "Acer Aspire 5630", | 249 | .ident = "Acer Aspire 5630", |
232 | .matches = { | 250 | .matches = { |
233 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | 251 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), |
@@ -761,11 +779,11 @@ enum led_brightness value) | |||
761 | } | 779 | } |
762 | 780 | ||
763 | static struct led_classdev mail_led = { | 781 | static struct led_classdev mail_led = { |
764 | .name = "acer-mail:green", | 782 | .name = "acer-wmi::mail", |
765 | .brightness_set = mail_led_set, | 783 | .brightness_set = mail_led_set, |
766 | }; | 784 | }; |
767 | 785 | ||
768 | static int __init acer_led_init(struct device *dev) | 786 | static int __devinit acer_led_init(struct device *dev) |
769 | { | 787 | { |
770 | return led_classdev_register(dev, &mail_led); | 788 | return led_classdev_register(dev, &mail_led); |
771 | } | 789 | } |
@@ -798,7 +816,7 @@ static struct backlight_ops acer_bl_ops = { | |||
798 | .update_status = update_bl_status, | 816 | .update_status = update_bl_status, |
799 | }; | 817 | }; |
800 | 818 | ||
801 | static int __init acer_backlight_init(struct device *dev) | 819 | static int __devinit acer_backlight_init(struct device *dev) |
802 | { | 820 | { |
803 | struct backlight_device *bd; | 821 | struct backlight_device *bd; |
804 | 822 | ||
@@ -817,7 +835,7 @@ static int __init acer_backlight_init(struct device *dev) | |||
817 | return 0; | 835 | return 0; |
818 | } | 836 | } |
819 | 837 | ||
820 | static void __exit acer_backlight_exit(void) | 838 | static void acer_backlight_exit(void) |
821 | { | 839 | { |
822 | backlight_device_unregister(acer_backlight_device); | 840 | backlight_device_unregister(acer_backlight_device); |
823 | } | 841 | } |
@@ -1052,11 +1070,12 @@ static int __init acer_wmi_init(void) | |||
1052 | 1070 | ||
1053 | if (wmi_has_guid(WMID_GUID2) && interface) { | 1071 | if (wmi_has_guid(WMID_GUID2) && interface) { |
1054 | if (ACPI_FAILURE(WMID_set_capabilities())) { | 1072 | if (ACPI_FAILURE(WMID_set_capabilities())) { |
1055 | printk(ACER_ERR "Unable to detect available devices\n"); | 1073 | printk(ACER_ERR "Unable to detect available WMID " |
1074 | "devices\n"); | ||
1056 | return -ENODEV; | 1075 | return -ENODEV; |
1057 | } | 1076 | } |
1058 | } else if (!wmi_has_guid(WMID_GUID2) && interface) { | 1077 | } else if (!wmi_has_guid(WMID_GUID2) && interface) { |
1059 | printk(ACER_ERR "Unable to detect available devices\n"); | 1078 | printk(ACER_ERR "No WMID device detection method found\n"); |
1060 | return -ENODEV; | 1079 | return -ENODEV; |
1061 | } | 1080 | } |
1062 | 1081 | ||
@@ -1064,21 +1083,20 @@ static int __init acer_wmi_init(void) | |||
1064 | interface = &AMW0_interface; | 1083 | interface = &AMW0_interface; |
1065 | 1084 | ||
1066 | if (ACPI_FAILURE(AMW0_set_capabilities())) { | 1085 | if (ACPI_FAILURE(AMW0_set_capabilities())) { |
1067 | printk(ACER_ERR "Unable to detect available devices\n"); | 1086 | printk(ACER_ERR "Unable to detect available AMW0 " |
1087 | "devices\n"); | ||
1068 | return -ENODEV; | 1088 | return -ENODEV; |
1069 | } | 1089 | } |
1070 | } | 1090 | } |
1071 | 1091 | ||
1072 | if (wmi_has_guid(AMW0_GUID1)) { | 1092 | if (wmi_has_guid(AMW0_GUID1)) |
1073 | if (ACPI_FAILURE(AMW0_find_mailled())) | 1093 | AMW0_find_mailled(); |
1074 | printk(ACER_ERR "Unable to detect mail LED\n"); | ||
1075 | } | ||
1076 | 1094 | ||
1077 | find_quirks(); | 1095 | find_quirks(); |
1078 | 1096 | ||
1079 | if (!interface) { | 1097 | if (!interface) { |
1080 | printk(ACER_ERR "No or unsupported WMI interface, unable to "); | 1098 | printk(ACER_ERR "No or unsupported WMI interface, unable to " |
1081 | printk(KERN_CONT "load.\n"); | 1099 | "load\n"); |
1082 | return -ENODEV; | 1100 | return -ENODEV; |
1083 | } | 1101 | } |
1084 | 1102 | ||
diff --git a/drivers/misc/sony-laptop.c b/drivers/misc/sony-laptop.c index 899e3f75f288..02ff3d19b1cc 100644 --- a/drivers/misc/sony-laptop.c +++ b/drivers/misc/sony-laptop.c | |||
@@ -315,7 +315,7 @@ static void sony_laptop_report_input_event(u8 event) | |||
315 | break; | 315 | break; |
316 | 316 | ||
317 | default: | 317 | default: |
318 | if (event > ARRAY_SIZE(sony_laptop_input_index)) { | 318 | if (event >= ARRAY_SIZE(sony_laptop_input_index)) { |
319 | dprintk("sony_laptop_report_input_event, event not known: %d\n", event); | 319 | dprintk("sony_laptop_report_input_event, event not known: %d\n", event); |
320 | break; | 320 | break; |
321 | } | 321 | } |
diff --git a/drivers/misc/thinkpad_acpi.c b/drivers/misc/thinkpad_acpi.c index bb269d0c677e..6cb781262f94 100644 --- a/drivers/misc/thinkpad_acpi.c +++ b/drivers/misc/thinkpad_acpi.c | |||
@@ -1078,7 +1078,8 @@ static int hotkey_get_tablet_mode(int *status) | |||
1078 | if (!acpi_evalf(hkey_handle, &s, "MHKG", "d")) | 1078 | if (!acpi_evalf(hkey_handle, &s, "MHKG", "d")) |
1079 | return -EIO; | 1079 | return -EIO; |
1080 | 1080 | ||
1081 | return ((s & TP_HOTKEY_TABLET_MASK) != 0); | 1081 | *status = ((s & TP_HOTKEY_TABLET_MASK) != 0); |
1082 | return 0; | ||
1082 | } | 1083 | } |
1083 | 1084 | ||
1084 | /* | 1085 | /* |
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c index 63a089b29545..67503ea71d21 100644 --- a/drivers/misc/tifm_7xx1.c +++ b/drivers/misc/tifm_7xx1.c | |||
@@ -368,6 +368,8 @@ static int tifm_7xx1_probe(struct pci_dev *dev, | |||
368 | goto err_out_irq; | 368 | goto err_out_irq; |
369 | 369 | ||
370 | writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1), | 370 | writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1), |
371 | fm->addr + FM_CLEAR_INTERRUPT_ENABLE); | ||
372 | writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1), | ||
371 | fm->addr + FM_SET_INTERRUPT_ENABLE); | 373 | fm->addr + FM_SET_INTERRUPT_ENABLE); |
372 | return 0; | 374 | return 0; |
373 | 375 | ||
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c index 20d5c7bd940a..1c14a186f000 100644 --- a/drivers/mmc/host/tifm_sd.c +++ b/drivers/mmc/host/tifm_sd.c | |||
@@ -180,7 +180,7 @@ static void tifm_sd_transfer_data(struct tifm_sd *host) | |||
180 | host->sg_pos++; | 180 | host->sg_pos++; |
181 | if (host->sg_pos == host->sg_len) { | 181 | if (host->sg_pos == host->sg_len) { |
182 | if ((r_data->flags & MMC_DATA_WRITE) | 182 | if ((r_data->flags & MMC_DATA_WRITE) |
183 | && DATA_CARRY) | 183 | && (host->cmd_flags & DATA_CARRY)) |
184 | writel(host->bounce_buf_data[0], | 184 | writel(host->bounce_buf_data[0], |
185 | host->dev->addr | 185 | host->dev->addr |
186 | + SOCK_MMCSD_DATA); | 186 | + SOCK_MMCSD_DATA); |
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 6ac81e35355c..275960462970 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c | |||
@@ -1000,8 +1000,8 @@ static int __init ubi_init(void) | |||
1000 | mutex_unlock(&ubi_devices_mutex); | 1000 | mutex_unlock(&ubi_devices_mutex); |
1001 | if (err < 0) { | 1001 | if (err < 0) { |
1002 | put_mtd_device(mtd); | 1002 | put_mtd_device(mtd); |
1003 | printk(KERN_ERR "UBI error: cannot attach %s\n", | 1003 | printk(KERN_ERR "UBI error: cannot attach mtd%d\n", |
1004 | p->name); | 1004 | mtd->index); |
1005 | goto out_detach; | 1005 | goto out_detach; |
1006 | } | 1006 | } |
1007 | } | 1007 | } |
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 457710615261..a548c1d28fa8 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h | |||
@@ -217,11 +217,11 @@ struct ubi_volume { | |||
217 | void *upd_buf; | 217 | void *upd_buf; |
218 | 218 | ||
219 | int *eba_tbl; | 219 | int *eba_tbl; |
220 | int checked:1; | 220 | unsigned int checked:1; |
221 | int corrupted:1; | 221 | unsigned int corrupted:1; |
222 | int upd_marker:1; | 222 | unsigned int upd_marker:1; |
223 | int updating:1; | 223 | unsigned int updating:1; |
224 | int changing_leb:1; | 224 | unsigned int changing_leb:1; |
225 | 225 | ||
226 | #ifdef CONFIG_MTD_UBI_GLUEBI | 226 | #ifdef CONFIG_MTD_UBI_GLUEBI |
227 | /* | 227 | /* |
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index a3ca2257e601..5be58d85c639 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c | |||
@@ -376,7 +376,9 @@ out_sysfs: | |||
376 | get_device(&vol->dev); | 376 | get_device(&vol->dev); |
377 | volume_sysfs_close(vol); | 377 | volume_sysfs_close(vol); |
378 | out_gluebi: | 378 | out_gluebi: |
379 | ubi_destroy_gluebi(vol); | 379 | if (ubi_destroy_gluebi(vol)) |
380 | dbg_err("cannot destroy gluebi for volume %d:%d", | ||
381 | ubi->ubi_num, vol_id); | ||
380 | out_cdev: | 382 | out_cdev: |
381 | cdev_del(&vol->cdev); | 383 | cdev_del(&vol->cdev); |
382 | out_mapping: | 384 | out_mapping: |
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index 56fc3fbce838..af36b12be278 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c | |||
@@ -519,6 +519,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si, | |||
519 | if (ubi->autoresize_vol_id != -1) { | 519 | if (ubi->autoresize_vol_id != -1) { |
520 | ubi_err("more then one auto-resize volume (%d " | 520 | ubi_err("more then one auto-resize volume (%d " |
521 | "and %d)", ubi->autoresize_vol_id, i); | 521 | "and %d)", ubi->autoresize_vol_id, i); |
522 | kfree(vol); | ||
522 | return -EINVAL; | 523 | return -EINVAL; |
523 | } | 524 | } |
524 | 525 | ||
diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 0fbf1bbbaee9..d7a3ea88eddb 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c | |||
@@ -1253,7 +1253,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev) | |||
1253 | 1253 | ||
1254 | /* Setup interrupt handlers. */ | 1254 | /* Setup interrupt handlers. */ |
1255 | for (idp = id; idp->name; idp++) { | 1255 | for (idp = id; idp->name; idp++) { |
1256 | if (request_irq(idp->irq, idp->handler, 0, idp->name, dev) != 0) | 1256 | if (request_irq(idp->irq, idp->handler, IRQF_DISABLED, idp->name, dev) != 0) |
1257 | printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, idp->irq); | 1257 | printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, idp->irq); |
1258 | } | 1258 | } |
1259 | 1259 | ||
@@ -1382,7 +1382,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev) | |||
1382 | 1382 | ||
1383 | /* Setup interrupt handlers. */ | 1383 | /* Setup interrupt handlers. */ |
1384 | for (idp = id; idp->name; idp++) { | 1384 | for (idp = id; idp->name; idp++) { |
1385 | if (request_irq(b+idp->irq, fec_enet_interrupt, 0, idp->name, dev) != 0) | 1385 | if (request_irq(b+idp->irq, fec_enet_interrupt, IRQF_DISABLED, idp->name, dev) != 0) |
1386 | printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq); | 1386 | printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq); |
1387 | } | 1387 | } |
1388 | 1388 | ||
@@ -1553,7 +1553,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev) | |||
1553 | 1553 | ||
1554 | /* Setup interrupt handlers. */ | 1554 | /* Setup interrupt handlers. */ |
1555 | for (idp = id; idp->name; idp++) { | 1555 | for (idp = id; idp->name; idp++) { |
1556 | if (request_irq(b+idp->irq,fec_enet_interrupt,0,idp->name,dev)!=0) | 1556 | if (request_irq(b+idp->irq, fec_enet_interrupt, IRQF_DISABLED, idp->name,dev) != 0) |
1557 | printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq); | 1557 | printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq); |
1558 | } | 1558 | } |
1559 | 1559 | ||
@@ -1680,7 +1680,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev) | |||
1680 | 1680 | ||
1681 | /* Setup interrupt handlers. */ | 1681 | /* Setup interrupt handlers. */ |
1682 | for (idp = id; idp->name; idp++) { | 1682 | for (idp = id; idp->name; idp++) { |
1683 | if (request_irq(b+idp->irq,fec_enet_interrupt,0,idp->name,dev)!=0) | 1683 | if (request_irq(b+idp->irq, fec_enet_interrupt, IRQF_DISABLED, idp->name,dev) != 0) |
1684 | printk("FEC: Could not allocate %s IRQ(%d)!\n", | 1684 | printk("FEC: Could not allocate %s IRQ(%d)!\n", |
1685 | idp->name, b+idp->irq); | 1685 | idp->name, b+idp->irq); |
1686 | } | 1686 | } |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 19fd4cb0ddf8..b58472cf76f8 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -203,8 +203,11 @@ again: | |||
203 | if (received < budget) { | 203 | if (received < budget) { |
204 | netif_rx_complete(vi->dev, napi); | 204 | netif_rx_complete(vi->dev, napi); |
205 | if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) | 205 | if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) |
206 | && netif_rx_reschedule(vi->dev, napi)) | 206 | && napi_schedule_prep(napi)) { |
207 | vi->rvq->vq_ops->disable_cb(vi->rvq); | ||
208 | __netif_rx_schedule(vi->dev, napi); | ||
207 | goto again; | 209 | goto again; |
210 | } | ||
208 | } | 211 | } |
209 | 212 | ||
210 | return received; | 213 | return received; |
@@ -278,10 +281,11 @@ again: | |||
278 | pr_debug("%s: virtio not prepared to send\n", dev->name); | 281 | pr_debug("%s: virtio not prepared to send\n", dev->name); |
279 | netif_stop_queue(dev); | 282 | netif_stop_queue(dev); |
280 | 283 | ||
281 | /* Activate callback for using skbs: if this fails it | 284 | /* Activate callback for using skbs: if this returns false it |
282 | * means some were used in the meantime. */ | 285 | * means some were used in the meantime. */ |
283 | if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { | 286 | if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { |
284 | printk("Unlikely: restart svq failed\n"); | 287 | printk("Unlikely: restart svq race\n"); |
288 | vi->svq->vq_ops->disable_cb(vi->svq); | ||
285 | netif_start_queue(dev); | 289 | netif_start_queue(dev); |
286 | goto again; | 290 | goto again; |
287 | } | 291 | } |
@@ -294,6 +298,15 @@ again: | |||
294 | return 0; | 298 | return 0; |
295 | } | 299 | } |
296 | 300 | ||
301 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
302 | static void virtnet_netpoll(struct net_device *dev) | ||
303 | { | ||
304 | struct virtnet_info *vi = netdev_priv(dev); | ||
305 | |||
306 | napi_schedule(&vi->napi); | ||
307 | } | ||
308 | #endif | ||
309 | |||
297 | static int virtnet_open(struct net_device *dev) | 310 | static int virtnet_open(struct net_device *dev) |
298 | { | 311 | { |
299 | struct virtnet_info *vi = netdev_priv(dev); | 312 | struct virtnet_info *vi = netdev_priv(dev); |
@@ -336,6 +349,9 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
336 | dev->stop = virtnet_close; | 349 | dev->stop = virtnet_close; |
337 | dev->hard_start_xmit = start_xmit; | 350 | dev->hard_start_xmit = start_xmit; |
338 | dev->features = NETIF_F_HIGHDMA; | 351 | dev->features = NETIF_F_HIGHDMA; |
352 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
353 | dev->poll_controller = virtnet_netpoll; | ||
354 | #endif | ||
339 | SET_NETDEV_DEV(dev, &vdev->dev); | 355 | SET_NETDEV_DEV(dev, &vdev->dev); |
340 | 356 | ||
341 | /* Do we support "hardware" checksums? */ | 357 | /* Do we support "hardware" checksums? */ |
diff --git a/drivers/parisc/Kconfig b/drivers/parisc/Kconfig index 1d3b84b4af3f..553a9905299a 100644 --- a/drivers/parisc/Kconfig +++ b/drivers/parisc/Kconfig | |||
@@ -103,6 +103,11 @@ config IOMMU_SBA | |||
103 | depends on PCI_LBA | 103 | depends on PCI_LBA |
104 | default PCI_LBA | 104 | default PCI_LBA |
105 | 105 | ||
106 | config IOMMU_HELPER | ||
107 | bool | ||
108 | depends on IOMMU_SBA || IOMMU_CCIO | ||
109 | default y | ||
110 | |||
106 | #config PCI_EPIC | 111 | #config PCI_EPIC |
107 | # bool "EPIC/SAGA PCI support" | 112 | # bool "EPIC/SAGA PCI support" |
108 | # depends on PCI | 113 | # depends on PCI |
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index d08b284de196..62db3c3fe4dc 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <linux/proc_fs.h> | 43 | #include <linux/proc_fs.h> |
44 | #include <linux/seq_file.h> | 44 | #include <linux/seq_file.h> |
45 | #include <linux/scatterlist.h> | 45 | #include <linux/scatterlist.h> |
46 | #include <linux/iommu-helper.h> | ||
46 | 47 | ||
47 | #include <asm/byteorder.h> | 48 | #include <asm/byteorder.h> |
48 | #include <asm/cache.h> /* for L1_CACHE_BYTES */ | 49 | #include <asm/cache.h> /* for L1_CACHE_BYTES */ |
@@ -302,13 +303,17 @@ static int ioc_count; | |||
302 | */ | 303 | */ |
303 | #define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \ | 304 | #define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \ |
304 | for(; res_ptr < res_end; ++res_ptr) { \ | 305 | for(; res_ptr < res_end; ++res_ptr) { \ |
305 | if(0 == (*res_ptr & mask)) { \ | 306 | int ret;\ |
306 | *res_ptr |= mask; \ | 307 | unsigned int idx;\ |
307 | res_idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \ | 308 | idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \ |
308 | ioc->res_hint = res_idx + (size >> 3); \ | 309 | ret = iommu_is_span_boundary(idx << 3, pages_needed, 0, boundary_size);\ |
309 | goto resource_found; \ | 310 | if ((0 == (*res_ptr & mask)) && !ret) { \ |
310 | } \ | 311 | *res_ptr |= mask; \ |
311 | } | 312 | res_idx = idx;\ |
313 | ioc->res_hint = res_idx + (size >> 3); \ | ||
314 | goto resource_found; \ | ||
315 | } \ | ||
316 | } | ||
312 | 317 | ||
313 | #define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \ | 318 | #define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \ |
314 | u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \ | 319 | u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \ |
@@ -341,10 +346,11 @@ static int ioc_count; | |||
341 | * of available pages for the requested size. | 346 | * of available pages for the requested size. |
342 | */ | 347 | */ |
343 | static int | 348 | static int |
344 | ccio_alloc_range(struct ioc *ioc, size_t size) | 349 | ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size) |
345 | { | 350 | { |
346 | unsigned int pages_needed = size >> IOVP_SHIFT; | 351 | unsigned int pages_needed = size >> IOVP_SHIFT; |
347 | unsigned int res_idx; | 352 | unsigned int res_idx; |
353 | unsigned long boundary_size; | ||
348 | #ifdef CCIO_SEARCH_TIME | 354 | #ifdef CCIO_SEARCH_TIME |
349 | unsigned long cr_start = mfctl(16); | 355 | unsigned long cr_start = mfctl(16); |
350 | #endif | 356 | #endif |
@@ -360,6 +366,9 @@ ccio_alloc_range(struct ioc *ioc, size_t size) | |||
360 | ** ggg sacrifices another 710 to the computer gods. | 366 | ** ggg sacrifices another 710 to the computer gods. |
361 | */ | 367 | */ |
362 | 368 | ||
369 | boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1, | ||
370 | 1ULL << IOVP_SHIFT) >> IOVP_SHIFT; | ||
371 | |||
363 | if (pages_needed <= 8) { | 372 | if (pages_needed <= 8) { |
364 | /* | 373 | /* |
365 | * LAN traffic will not thrash the TLB IFF the same NIC | 374 | * LAN traffic will not thrash the TLB IFF the same NIC |
@@ -760,7 +769,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size, | |||
760 | ioc->msingle_pages += size >> IOVP_SHIFT; | 769 | ioc->msingle_pages += size >> IOVP_SHIFT; |
761 | #endif | 770 | #endif |
762 | 771 | ||
763 | idx = ccio_alloc_range(ioc, size); | 772 | idx = ccio_alloc_range(ioc, dev, size); |
764 | iovp = (dma_addr_t)MKIOVP(idx); | 773 | iovp = (dma_addr_t)MKIOVP(idx); |
765 | 774 | ||
766 | pdir_start = &(ioc->pdir_base[idx]); | 775 | pdir_start = &(ioc->pdir_base[idx]); |
diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h index 97ba8286c596..a9c46cc2db37 100644 --- a/drivers/parisc/iommu-helpers.h +++ b/drivers/parisc/iommu-helpers.h | |||
@@ -96,8 +96,8 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents, | |||
96 | 96 | ||
97 | static inline unsigned int | 97 | static inline unsigned int |
98 | iommu_coalesce_chunks(struct ioc *ioc, struct device *dev, | 98 | iommu_coalesce_chunks(struct ioc *ioc, struct device *dev, |
99 | struct scatterlist *startsg, int nents, | 99 | struct scatterlist *startsg, int nents, |
100 | int (*iommu_alloc_range)(struct ioc *, size_t)) | 100 | int (*iommu_alloc_range)(struct ioc *, struct device *, size_t)) |
101 | { | 101 | { |
102 | struct scatterlist *contig_sg; /* contig chunk head */ | 102 | struct scatterlist *contig_sg; /* contig chunk head */ |
103 | unsigned long dma_offset, dma_len; /* start/len of DMA stream */ | 103 | unsigned long dma_offset, dma_len; /* start/len of DMA stream */ |
@@ -166,7 +166,7 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev, | |||
166 | dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE); | 166 | dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE); |
167 | sg_dma_address(contig_sg) = | 167 | sg_dma_address(contig_sg) = |
168 | PIDE_FLAG | 168 | PIDE_FLAG |
169 | | (iommu_alloc_range(ioc, dma_len) << IOVP_SHIFT) | 169 | | (iommu_alloc_range(ioc, dev, dma_len) << IOVP_SHIFT) |
170 | | dma_offset; | 170 | | dma_offset; |
171 | n_mappings++; | 171 | n_mappings++; |
172 | } | 172 | } |
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c index de34aa9d3136..f9f9a5f1bbd0 100644 --- a/drivers/parisc/pdc_stable.c +++ b/drivers/parisc/pdc_stable.c | |||
@@ -829,7 +829,7 @@ static ssize_t pdcs_autoboot_write(struct kobject *kobj, | |||
829 | struct kobj_attribute *attr, | 829 | struct kobj_attribute *attr, |
830 | const char *buf, size_t count) | 830 | const char *buf, size_t count) |
831 | { | 831 | { |
832 | return pdcs_auto_write(kset, attr, buf, count, PF_AUTOBOOT); | 832 | return pdcs_auto_write(kobj, attr, buf, count, PF_AUTOBOOT); |
833 | } | 833 | } |
834 | 834 | ||
835 | /** | 835 | /** |
@@ -845,7 +845,7 @@ static ssize_t pdcs_autosearch_write(struct kobject *kobj, | |||
845 | struct kobj_attribute *attr, | 845 | struct kobj_attribute *attr, |
846 | const char *buf, size_t count) | 846 | const char *buf, size_t count) |
847 | { | 847 | { |
848 | return pdcs_auto_write(kset, attr, buf, count, PF_AUTOSEARCH); | 848 | return pdcs_auto_write(kobj, attr, buf, count, PF_AUTOSEARCH); |
849 | } | 849 | } |
850 | 850 | ||
851 | /** | 851 | /** |
@@ -1066,7 +1066,7 @@ pdc_stable_init(void) | |||
1066 | } | 1066 | } |
1067 | 1067 | ||
1068 | /* Don't forget the root entries */ | 1068 | /* Don't forget the root entries */ |
1069 | error = sysfs_create_group(stable_kobj, pdcs_attr_group); | 1069 | error = sysfs_create_group(stable_kobj, &pdcs_attr_group); |
1070 | 1070 | ||
1071 | /* register the paths kset as a child of the stable kset */ | 1071 | /* register the paths kset as a child of the stable kset */ |
1072 | paths_kset = kset_create_and_add("paths", NULL, stable_kobj); | 1072 | paths_kset = kset_create_and_add("paths", NULL, stable_kobj); |
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index d06627c3f353..8c4d2c13d5f2 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/string.h> | 29 | #include <linux/string.h> |
30 | #include <linux/pci.h> | 30 | #include <linux/pci.h> |
31 | #include <linux/scatterlist.h> | 31 | #include <linux/scatterlist.h> |
32 | #include <linux/iommu-helper.h> | ||
32 | 33 | ||
33 | #include <asm/byteorder.h> | 34 | #include <asm/byteorder.h> |
34 | #include <asm/io.h> | 35 | #include <asm/io.h> |
@@ -313,6 +314,12 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) | |||
313 | #define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n))) | 314 | #define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n))) |
314 | #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1) | 315 | #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1) |
315 | 316 | ||
317 | static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr, | ||
318 | unsigned int bitshiftcnt) | ||
319 | { | ||
320 | return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3) | ||
321 | + bitshiftcnt; | ||
322 | } | ||
316 | 323 | ||
317 | /** | 324 | /** |
318 | * sba_search_bitmap - find free space in IO PDIR resource bitmap | 325 | * sba_search_bitmap - find free space in IO PDIR resource bitmap |
@@ -324,19 +331,36 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) | |||
324 | * Cool perf optimization: search for log2(size) bits at a time. | 331 | * Cool perf optimization: search for log2(size) bits at a time. |
325 | */ | 332 | */ |
326 | static SBA_INLINE unsigned long | 333 | static SBA_INLINE unsigned long |
327 | sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) | 334 | sba_search_bitmap(struct ioc *ioc, struct device *dev, |
335 | unsigned long bits_wanted) | ||
328 | { | 336 | { |
329 | unsigned long *res_ptr = ioc->res_hint; | 337 | unsigned long *res_ptr = ioc->res_hint; |
330 | unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); | 338 | unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); |
331 | unsigned long pide = ~0UL; | 339 | unsigned long pide = ~0UL, tpide; |
340 | unsigned long boundary_size; | ||
341 | unsigned long shift; | ||
342 | int ret; | ||
343 | |||
344 | boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1, | ||
345 | 1ULL << IOVP_SHIFT) >> IOVP_SHIFT; | ||
346 | |||
347 | #if defined(ZX1_SUPPORT) | ||
348 | BUG_ON(ioc->ibase & ~IOVP_MASK); | ||
349 | shift = ioc->ibase >> IOVP_SHIFT; | ||
350 | #else | ||
351 | shift = 0; | ||
352 | #endif | ||
332 | 353 | ||
333 | if (bits_wanted > (BITS_PER_LONG/2)) { | 354 | if (bits_wanted > (BITS_PER_LONG/2)) { |
334 | /* Search word at a time - no mask needed */ | 355 | /* Search word at a time - no mask needed */ |
335 | for(; res_ptr < res_end; ++res_ptr) { | 356 | for(; res_ptr < res_end; ++res_ptr) { |
336 | if (*res_ptr == 0) { | 357 | tpide = ptr_to_pide(ioc, res_ptr, 0); |
358 | ret = iommu_is_span_boundary(tpide, bits_wanted, | ||
359 | shift, | ||
360 | boundary_size); | ||
361 | if ((*res_ptr == 0) && !ret) { | ||
337 | *res_ptr = RESMAP_MASK(bits_wanted); | 362 | *res_ptr = RESMAP_MASK(bits_wanted); |
338 | pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); | 363 | pide = tpide; |
339 | pide <<= 3; /* convert to bit address */ | ||
340 | break; | 364 | break; |
341 | } | 365 | } |
342 | } | 366 | } |
@@ -365,11 +389,13 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) | |||
365 | { | 389 | { |
366 | DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); | 390 | DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); |
367 | WARN_ON(mask == 0); | 391 | WARN_ON(mask == 0); |
368 | if(((*res_ptr) & mask) == 0) { | 392 | tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt); |
393 | ret = iommu_is_span_boundary(tpide, bits_wanted, | ||
394 | shift, | ||
395 | boundary_size); | ||
396 | if ((((*res_ptr) & mask) == 0) && !ret) { | ||
369 | *res_ptr |= mask; /* mark resources busy! */ | 397 | *res_ptr |= mask; /* mark resources busy! */ |
370 | pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); | 398 | pide = tpide; |
371 | pide <<= 3; /* convert to bit address */ | ||
372 | pide += bitshiftcnt; | ||
373 | break; | 399 | break; |
374 | } | 400 | } |
375 | mask >>= o; | 401 | mask >>= o; |
@@ -404,7 +430,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) | |||
404 | * resource bit map. | 430 | * resource bit map. |
405 | */ | 431 | */ |
406 | static int | 432 | static int |
407 | sba_alloc_range(struct ioc *ioc, size_t size) | 433 | sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size) |
408 | { | 434 | { |
409 | unsigned int pages_needed = size >> IOVP_SHIFT; | 435 | unsigned int pages_needed = size >> IOVP_SHIFT; |
410 | #ifdef SBA_COLLECT_STATS | 436 | #ifdef SBA_COLLECT_STATS |
@@ -412,9 +438,9 @@ sba_alloc_range(struct ioc *ioc, size_t size) | |||
412 | #endif | 438 | #endif |
413 | unsigned long pide; | 439 | unsigned long pide; |
414 | 440 | ||
415 | pide = sba_search_bitmap(ioc, pages_needed); | 441 | pide = sba_search_bitmap(ioc, dev, pages_needed); |
416 | if (pide >= (ioc->res_size << 3)) { | 442 | if (pide >= (ioc->res_size << 3)) { |
417 | pide = sba_search_bitmap(ioc, pages_needed); | 443 | pide = sba_search_bitmap(ioc, dev, pages_needed); |
418 | if (pide >= (ioc->res_size << 3)) | 444 | if (pide >= (ioc->res_size << 3)) |
419 | panic("%s: I/O MMU @ %p is out of mapping resources\n", | 445 | panic("%s: I/O MMU @ %p is out of mapping resources\n", |
420 | __FILE__, ioc->ioc_hpa); | 446 | __FILE__, ioc->ioc_hpa); |
@@ -710,7 +736,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, | |||
710 | ioc->msingle_calls++; | 736 | ioc->msingle_calls++; |
711 | ioc->msingle_pages += size >> IOVP_SHIFT; | 737 | ioc->msingle_pages += size >> IOVP_SHIFT; |
712 | #endif | 738 | #endif |
713 | pide = sba_alloc_range(ioc, size); | 739 | pide = sba_alloc_range(ioc, dev, size); |
714 | iovp = (dma_addr_t) pide << IOVP_SHIFT; | 740 | iovp = (dma_addr_t) pide << IOVP_SHIFT; |
715 | 741 | ||
716 | DBG_RUN("%s() 0x%p -> 0x%lx\n", | 742 | DBG_RUN("%s() 0x%p -> 0x%lx\n", |
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index ef5a6a245f5f..d708358326e5 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c | |||
@@ -143,15 +143,21 @@ void pci_bus_add_devices(struct pci_bus *bus) | |||
143 | /* register the bus with sysfs as the parent is now | 143 | /* register the bus with sysfs as the parent is now |
144 | * properly registered. */ | 144 | * properly registered. */ |
145 | child_bus = dev->subordinate; | 145 | child_bus = dev->subordinate; |
146 | if (child_bus->is_added) | ||
147 | continue; | ||
146 | child_bus->dev.parent = child_bus->bridge; | 148 | child_bus->dev.parent = child_bus->bridge; |
147 | retval = device_register(&child_bus->dev); | 149 | retval = device_register(&child_bus->dev); |
148 | if (!retval) | 150 | if (retval) |
151 | dev_err(&dev->dev, "Error registering pci_bus," | ||
152 | " continuing...\n"); | ||
153 | else { | ||
154 | child_bus->is_added = 1; | ||
149 | retval = device_create_file(&child_bus->dev, | 155 | retval = device_create_file(&child_bus->dev, |
150 | &dev_attr_cpuaffinity); | 156 | &dev_attr_cpuaffinity); |
157 | } | ||
151 | if (retval) | 158 | if (retval) |
152 | dev_err(&dev->dev, "Error registering pci_bus" | 159 | dev_err(&dev->dev, "Error creating cpuaffinity" |
153 | " device bridge symlink," | 160 | " file, continuing...\n"); |
154 | " continuing...\n"); | ||
155 | } | 161 | } |
156 | } | 162 | } |
157 | } | 163 | } |
diff --git a/drivers/pci/hotplug-pci.c b/drivers/pci/hotplug-pci.c index a590ef682153..4d4a64478404 100644 --- a/drivers/pci/hotplug-pci.c +++ b/drivers/pci/hotplug-pci.c | |||
@@ -4,7 +4,7 @@ | |||
4 | #include "pci.h" | 4 | #include "pci.h" |
5 | 5 | ||
6 | 6 | ||
7 | unsigned int pci_do_scan_bus(struct pci_bus *bus) | 7 | unsigned int __devinit pci_do_scan_bus(struct pci_bus *bus) |
8 | { | 8 | { |
9 | unsigned int max; | 9 | unsigned int max; |
10 | 10 | ||
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index cf22f9e01e00..5e50008d1181 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
@@ -1085,7 +1085,7 @@ static int acpiphp_bus_trim(acpi_handle handle) | |||
1085 | * This function should be called per *physical slot*, | 1085 | * This function should be called per *physical slot*, |
1086 | * not per each slot object in ACPI namespace. | 1086 | * not per each slot object in ACPI namespace. |
1087 | */ | 1087 | */ |
1088 | static int enable_device(struct acpiphp_slot *slot) | 1088 | static int __ref enable_device(struct acpiphp_slot *slot) |
1089 | { | 1089 | { |
1090 | struct pci_dev *dev; | 1090 | struct pci_dev *dev; |
1091 | struct pci_bus *bus = slot->bridge->pci_bus; | 1091 | struct pci_bus *bus = slot->bridge->pci_bus; |
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c index 5e9be44817cb..b3515fc4cd38 100644 --- a/drivers/pci/hotplug/cpci_hotplug_pci.c +++ b/drivers/pci/hotplug/cpci_hotplug_pci.c | |||
@@ -250,7 +250,7 @@ int cpci_led_off(struct slot* slot) | |||
250 | * Device configuration functions | 250 | * Device configuration functions |
251 | */ | 251 | */ |
252 | 252 | ||
253 | int cpci_configure_slot(struct slot* slot) | 253 | int __ref cpci_configure_slot(struct slot *slot) |
254 | { | 254 | { |
255 | struct pci_bus *parent; | 255 | struct pci_bus *parent; |
256 | int fn; | 256 | int fn; |
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c index 600ed7b67ae7..bbccde9f228f 100644 --- a/drivers/pci/hotplug/ibmphp_ebda.c +++ b/drivers/pci/hotplug/ibmphp_ebda.c | |||
@@ -963,6 +963,7 @@ static int __init ebda_rsrc_controller (void) | |||
963 | 963 | ||
964 | bus_info_ptr1 = ibmphp_find_same_bus_num (hpc_ptr->slots[index].slot_bus_num); | 964 | bus_info_ptr1 = ibmphp_find_same_bus_num (hpc_ptr->slots[index].slot_bus_num); |
965 | if (!bus_info_ptr1) { | 965 | if (!bus_info_ptr1) { |
966 | kfree(tmp_slot); | ||
966 | rc = -ENODEV; | 967 | rc = -ENODEV; |
967 | goto error; | 968 | goto error; |
968 | } | 969 | } |
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 6eba9b2cfb90..698975a6a21c 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
@@ -711,7 +711,8 @@ static int hpc_power_off_slot(struct slot * slot) | |||
711 | retval = pcie_write_cmd(slot, slot_cmd, cmd_mask); | 711 | retval = pcie_write_cmd(slot, slot_cmd, cmd_mask); |
712 | if (retval) { | 712 | if (retval) { |
713 | err("%s: Write command failed!\n", __FUNCTION__); | 713 | err("%s: Write command failed!\n", __FUNCTION__); |
714 | return -1; | 714 | retval = -1; |
715 | goto out; | ||
715 | } | 716 | } |
716 | dbg("%s: SLOTCTRL %x write cmd %x\n", | 717 | dbg("%s: SLOTCTRL %x write cmd %x\n", |
717 | __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd); | 718 | __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd); |
@@ -722,7 +723,7 @@ static int hpc_power_off_slot(struct slot * slot) | |||
722 | * removed from the slot/adapter. | 723 | * removed from the slot/adapter. |
723 | */ | 724 | */ |
724 | msleep(1000); | 725 | msleep(1000); |
725 | 726 | out: | |
726 | if (changed) | 727 | if (changed) |
727 | pcie_unmask_bad_dllp(ctrl); | 728 | pcie_unmask_bad_dllp(ctrl); |
728 | 729 | ||
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c index dd50713966d1..9372a840b63d 100644 --- a/drivers/pci/hotplug/pciehp_pci.c +++ b/drivers/pci/hotplug/pciehp_pci.c | |||
@@ -167,7 +167,7 @@ static void program_fw_provided_values(struct pci_dev *dev) | |||
167 | } | 167 | } |
168 | } | 168 | } |
169 | 169 | ||
170 | static int pciehp_add_bridge(struct pci_dev *dev) | 170 | static int __ref pciehp_add_bridge(struct pci_dev *dev) |
171 | { | 171 | { |
172 | struct pci_bus *parent = dev->bus; | 172 | struct pci_bus *parent = dev->bus; |
173 | int pass, busnr, start = parent->secondary; | 173 | int pass, busnr, start = parent->secondary; |
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c index 0a6b25ef194c..a69a21520895 100644 --- a/drivers/pci/hotplug/shpchp_pci.c +++ b/drivers/pci/hotplug/shpchp_pci.c | |||
@@ -96,7 +96,7 @@ static void program_fw_provided_values(struct pci_dev *dev) | |||
96 | } | 96 | } |
97 | } | 97 | } |
98 | 98 | ||
99 | int shpchp_configure_device(struct slot *p_slot) | 99 | int __ref shpchp_configure_device(struct slot *p_slot) |
100 | { | 100 | { |
101 | struct pci_dev *dev; | 101 | struct pci_dev *dev; |
102 | struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; | 102 | struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; |
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 4a23654184fc..72f7476930c8 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
@@ -272,21 +272,29 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
272 | { | 272 | { |
273 | acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); | 273 | acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); |
274 | acpi_handle tmp; | 274 | acpi_handle tmp; |
275 | static int state_conv[] = { | 275 | static const u8 state_conv[] = { |
276 | [0] = 0, | 276 | [PCI_D0] = ACPI_STATE_D0, |
277 | [1] = 1, | 277 | [PCI_D1] = ACPI_STATE_D1, |
278 | [2] = 2, | 278 | [PCI_D2] = ACPI_STATE_D2, |
279 | [3] = 3, | 279 | [PCI_D3hot] = ACPI_STATE_D3, |
280 | [4] = 3 | 280 | [PCI_D3cold] = ACPI_STATE_D3 |
281 | }; | 281 | }; |
282 | int acpi_state = state_conv[(int __force) state]; | ||
283 | 282 | ||
284 | if (!handle) | 283 | if (!handle) |
285 | return -ENODEV; | 284 | return -ENODEV; |
286 | /* If the ACPI device has _EJ0, ignore the device */ | 285 | /* If the ACPI device has _EJ0, ignore the device */ |
287 | if (ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp))) | 286 | if (ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp))) |
288 | return 0; | 287 | return 0; |
289 | return acpi_bus_set_power(handle, acpi_state); | 288 | |
289 | switch (state) { | ||
290 | case PCI_D0: | ||
291 | case PCI_D1: | ||
292 | case PCI_D2: | ||
293 | case PCI_D3hot: | ||
294 | case PCI_D3cold: | ||
295 | return acpi_bus_set_power(handle, state_conv[state]); | ||
296 | } | ||
297 | return -EINVAL; | ||
290 | } | 298 | } |
291 | 299 | ||
292 | 300 | ||
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 4d23b9fb551b..2db2e4bb0d1e 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -286,7 +286,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) | |||
286 | } | 286 | } |
287 | } | 287 | } |
288 | 288 | ||
289 | void pci_read_bridge_bases(struct pci_bus *child) | 289 | void __devinit pci_read_bridge_bases(struct pci_bus *child) |
290 | { | 290 | { |
291 | struct pci_dev *dev = child->self; | 291 | struct pci_dev *dev = child->self; |
292 | u8 io_base_lo, io_limit_lo; | 292 | u8 io_base_lo, io_limit_lo; |
@@ -472,7 +472,7 @@ static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max) | |||
472 | * them, we proceed to assigning numbers to the remaining buses in | 472 | * them, we proceed to assigning numbers to the remaining buses in |
473 | * order to avoid overlaps between old and new bus numbers. | 473 | * order to avoid overlaps between old and new bus numbers. |
474 | */ | 474 | */ |
475 | int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass) | 475 | int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) |
476 | { | 476 | { |
477 | struct pci_bus *child; | 477 | struct pci_bus *child; |
478 | int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); | 478 | int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); |
@@ -1008,7 +1008,7 @@ int pci_scan_slot(struct pci_bus *bus, int devfn) | |||
1008 | return nr; | 1008 | return nr; |
1009 | } | 1009 | } |
1010 | 1010 | ||
1011 | unsigned int pci_scan_child_bus(struct pci_bus *bus) | 1011 | unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus) |
1012 | { | 1012 | { |
1013 | unsigned int devfn, pass, max = bus->secondary; | 1013 | unsigned int devfn, pass, max = bus->secondary; |
1014 | struct pci_dev *dev; | 1014 | struct pci_dev *dev; |
@@ -1116,7 +1116,7 @@ err_out: | |||
1116 | return NULL; | 1116 | return NULL; |
1117 | } | 1117 | } |
1118 | 1118 | ||
1119 | struct pci_bus *pci_scan_bus_parented(struct device *parent, | 1119 | struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent, |
1120 | int bus, struct pci_ops *ops, void *sysdata) | 1120 | int bus, struct pci_ops *ops, void *sysdata) |
1121 | { | 1121 | { |
1122 | struct pci_bus *b; | 1122 | struct pci_bus *b; |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index bbad4a9f264f..e9a333d98552 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -1652,9 +1652,8 @@ static void __devinit quirk_via_cx700_pci_parking_caching(struct pci_dev *dev) | |||
1652 | pci_write_config_byte(dev, 0x75, 0x1); | 1652 | pci_write_config_byte(dev, 0x75, 0x1); |
1653 | pci_write_config_byte(dev, 0x77, 0x0); | 1653 | pci_write_config_byte(dev, 0x77, 0x0); |
1654 | 1654 | ||
1655 | printk(KERN_INFO | 1655 | dev_info(&dev->dev, |
1656 | "PCI: VIA CX700 PCI parking/caching fixup on %s\n", | 1656 | "Disabling VIA CX700 PCI parking/caching\n"); |
1657 | pci_name(dev)); | ||
1658 | } | 1657 | } |
1659 | } | 1658 | } |
1660 | } | 1659 | } |
@@ -1726,32 +1725,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2 | |||
1726 | quirk_msi_ht_cap); | 1725 | quirk_msi_ht_cap); |
1727 | 1726 | ||
1728 | 1727 | ||
1729 | /* | ||
1730 | * Force enable MSI mapping capability on HT bridges | ||
1731 | */ | ||
1732 | static void __devinit quirk_msi_ht_cap_enable(struct pci_dev *dev) | ||
1733 | { | ||
1734 | int pos, ttl = 48; | ||
1735 | |||
1736 | pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING); | ||
1737 | while (pos && ttl--) { | ||
1738 | u8 flags; | ||
1739 | |||
1740 | if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, &flags) == 0) { | ||
1741 | printk(KERN_INFO "PCI: Enabling HT MSI Mapping on %s\n", | ||
1742 | pci_name(dev)); | ||
1743 | |||
1744 | pci_write_config_byte(dev, pos + HT_MSI_FLAGS, | ||
1745 | flags | HT_MSI_FLAGS_ENABLE); | ||
1746 | } | ||
1747 | pos = pci_find_next_ht_capability(dev, pos, | ||
1748 | HT_CAPTYPE_MSI_MAPPING); | ||
1749 | } | ||
1750 | } | ||
1751 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, | ||
1752 | PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB, | ||
1753 | quirk_msi_ht_cap_enable); | ||
1754 | |||
1755 | /* The nVidia CK804 chipset may have 2 HT MSI mappings. | 1728 | /* The nVidia CK804 chipset may have 2 HT MSI mappings. |
1756 | * MSI are supported if the MSI capability set in any of these mappings. | 1729 | * MSI are supported if the MSI capability set in any of these mappings. |
1757 | */ | 1730 | */ |
@@ -1778,9 +1751,8 @@ static void __devinit quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev) | |||
1778 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, | 1751 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, |
1779 | quirk_nvidia_ck804_msi_ht_cap); | 1752 | quirk_nvidia_ck804_msi_ht_cap); |
1780 | 1753 | ||
1781 | /* | 1754 | /* Force enable MSI mapping capability on HT bridges */ |
1782 | * Force enable MSI mapping capability on HT bridges */ | 1755 | static void __devinit ht_enable_msi_mapping(struct pci_dev *dev) |
1783 | static inline void ht_enable_msi_mapping(struct pci_dev *dev) | ||
1784 | { | 1756 | { |
1785 | int pos, ttl = 48; | 1757 | int pos, ttl = 48; |
1786 | 1758 | ||
@@ -1799,6 +1771,9 @@ static inline void ht_enable_msi_mapping(struct pci_dev *dev) | |||
1799 | HT_CAPTYPE_MSI_MAPPING); | 1771 | HT_CAPTYPE_MSI_MAPPING); |
1800 | } | 1772 | } |
1801 | } | 1773 | } |
1774 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, | ||
1775 | PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB, | ||
1776 | ht_enable_msi_mapping); | ||
1802 | 1777 | ||
1803 | static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev) | 1778 | static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev) |
1804 | { | 1779 | { |
@@ -1830,7 +1805,7 @@ static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev) | |||
1830 | 1805 | ||
1831 | if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, | 1806 | if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, |
1832 | &flags) == 0) { | 1807 | &flags) == 0) { |
1833 | dev_info(&dev->dev, "Quirk disabling HT MSI mapping"); | 1808 | dev_info(&dev->dev, "Disabling HT MSI mapping"); |
1834 | pci_write_config_byte(dev, pos + HT_MSI_FLAGS, | 1809 | pci_write_config_byte(dev, pos + HT_MSI_FLAGS, |
1835 | flags & ~HT_MSI_FLAGS_ENABLE); | 1810 | flags & ~HT_MSI_FLAGS_ENABLE); |
1836 | } | 1811 | } |
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c index a98b2470b9ea..bd5c0e031398 100644 --- a/drivers/pci/rom.c +++ b/drivers/pci/rom.c | |||
@@ -242,8 +242,7 @@ void pci_remove_rom(struct pci_dev *pdev) | |||
242 | #endif /* 0 */ | 242 | #endif /* 0 */ |
243 | 243 | ||
244 | /** | 244 | /** |
245 | * pci_cleanup_rom - internal routine for freeing the ROM copy created | 245 | * pci_cleanup_rom - free the ROM copy created by pci_map_rom_copy |
246 | * by pci_map_rom_copy called from remove.c | ||
247 | * @pdev: pointer to pci device struct | 246 | * @pdev: pointer to pci device struct |
248 | * | 247 | * |
249 | * Free the copied ROM if we allocated one. | 248 | * Free the copied ROM if we allocated one. |
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c index 4065139753b6..37993206ae5d 100644 --- a/drivers/pnp/quirks.c +++ b/drivers/pnp/quirks.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/pnp.h> | 18 | #include <linux/pnp.h> |
19 | #include <linux/io.h> | 19 | #include <linux/io.h> |
20 | #include <linux/dmi.h> | ||
21 | #include <linux/kallsyms.h> | 20 | #include <linux/kallsyms.h> |
22 | #include "base.h" | 21 | #include "base.h" |
23 | 22 | ||
@@ -109,42 +108,73 @@ static void quirk_sb16audio_resources(struct pnp_dev *dev) | |||
109 | "pnp: SB audio device quirk - increasing port range\n"); | 108 | "pnp: SB audio device quirk - increasing port range\n"); |
110 | } | 109 | } |
111 | 110 | ||
112 | static void quirk_supermicro_h8dce_system(struct pnp_dev *dev) | 111 | |
112 | #include <linux/pci.h> | ||
113 | |||
114 | static void quirk_system_pci_resources(struct pnp_dev *dev) | ||
113 | { | 115 | { |
114 | int i; | 116 | struct pci_dev *pdev = NULL; |
115 | static struct dmi_system_id supermicro_h8dce[] = { | 117 | resource_size_t pnp_start, pnp_end, pci_start, pci_end; |
116 | { | 118 | int i, j; |
117 | .ident = "Supermicro H8DCE", | ||
118 | .matches = { | ||
119 | DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"), | ||
120 | DMI_MATCH(DMI_PRODUCT_NAME, "H8DCE"), | ||
121 | }, | ||
122 | }, | ||
123 | { } | ||
124 | }; | ||
125 | |||
126 | if (!dmi_check_system(supermicro_h8dce)) | ||
127 | return; | ||
128 | 119 | ||
129 | /* | 120 | /* |
130 | * On the Supermicro H8DCE, there's a system device with resources | 121 | * Some BIOSes have PNP motherboard devices with resources that |
131 | * that overlap BAR 6 of the built-in SATA PCI adapter. If the PNP | 122 | * partially overlap PCI BARs. The PNP system driver claims these |
132 | * system device claims them, the sata_nv driver won't be able to. | 123 | * motherboard resources, which prevents the normal PCI driver from |
133 | * More details at: | 124 | * requesting them later. |
134 | * https://bugzilla.redhat.com/show_bug.cgi?id=280641 | 125 | * |
135 | * https://bugzilla.redhat.com/show_bug.cgi?id=313491 | 126 | * This patch disables the PNP resources that conflict with PCI BARs |
136 | * http://lkml.org/lkml/2008/1/9/449 | 127 | * so they won't be claimed by the PNP system driver. |
137 | * http://thread.gmane.org/gmane.linux.acpi.devel/27312 | ||
138 | */ | 128 | */ |
139 | for (i = 0; i < PNP_MAX_MEM; i++) { | 129 | for_each_pci_dev(pdev) { |
140 | if (pnp_mem_valid(dev, i) && pnp_mem_len(dev, i) && | 130 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { |
141 | (pnp_mem_start(dev, i) & 0xdfef0000) == 0xdfef0000) { | 131 | if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM) || |
142 | dev_warn(&dev->dev, "disabling 0x%llx-0x%llx to prevent" | 132 | pci_resource_len(pdev, i) == 0) |
143 | " conflict with sata_nv PCI device\n", | 133 | continue; |
144 | (unsigned long long) pnp_mem_start(dev, i), | 134 | |
145 | (unsigned long long) (pnp_mem_start(dev, i) + | 135 | pci_start = pci_resource_start(pdev, i); |
146 | pnp_mem_len(dev, i) - 1)); | 136 | pci_end = pci_resource_end(pdev, i); |
147 | pnp_mem_flags(dev, i) = 0; | 137 | for (j = 0; j < PNP_MAX_MEM; j++) { |
138 | if (!pnp_mem_valid(dev, j) || | ||
139 | pnp_mem_len(dev, j) == 0) | ||
140 | continue; | ||
141 | |||
142 | pnp_start = pnp_mem_start(dev, j); | ||
143 | pnp_end = pnp_mem_end(dev, j); | ||
144 | |||
145 | /* | ||
146 | * If the PNP region doesn't overlap the PCI | ||
147 | * region at all, there's no problem. | ||
148 | */ | ||
149 | if (pnp_end < pci_start || pnp_start > pci_end) | ||
150 | continue; | ||
151 | |||
152 | /* | ||
153 | * If the PNP region completely encloses (or is | ||
154 | * at least as large as) the PCI region, that's | ||
155 | * also OK. For example, this happens when the | ||
156 | * PNP device describes a bridge with PCI | ||
157 | * behind it. | ||
158 | */ | ||
159 | if (pnp_start <= pci_start && | ||
160 | pnp_end >= pci_end) | ||
161 | continue; | ||
162 | |||
163 | /* | ||
164 | * Otherwise, the PNP region overlaps *part* of | ||
165 | * the PCI region, and that might prevent a PCI | ||
166 | * driver from requesting its resources. | ||
167 | */ | ||
168 | dev_warn(&dev->dev, "mem resource " | ||
169 | "(0x%llx-0x%llx) overlaps %s BAR %d " | ||
170 | "(0x%llx-0x%llx), disabling\n", | ||
171 | (unsigned long long) pnp_start, | ||
172 | (unsigned long long) pnp_end, | ||
173 | pci_name(pdev), i, | ||
174 | (unsigned long long) pci_start, | ||
175 | (unsigned long long) pci_end); | ||
176 | pnp_mem_flags(dev, j) = 0; | ||
177 | } | ||
148 | } | 178 | } |
149 | } | 179 | } |
150 | } | 180 | } |
@@ -169,8 +199,8 @@ static struct pnp_fixup pnp_fixups[] = { | |||
169 | {"CTL0043", quirk_sb16audio_resources}, | 199 | {"CTL0043", quirk_sb16audio_resources}, |
170 | {"CTL0044", quirk_sb16audio_resources}, | 200 | {"CTL0044", quirk_sb16audio_resources}, |
171 | {"CTL0045", quirk_sb16audio_resources}, | 201 | {"CTL0045", quirk_sb16audio_resources}, |
172 | {"PNP0c01", quirk_supermicro_h8dce_system}, | 202 | {"PNP0c01", quirk_system_pci_resources}, |
173 | {"PNP0c02", quirk_supermicro_h8dce_system}, | 203 | {"PNP0c02", quirk_system_pci_resources}, |
174 | {""} | 204 | {""} |
175 | }; | 205 | }; |
176 | 206 | ||
diff --git a/drivers/rapidio/rio-driver.c b/drivers/rapidio/rio-driver.c index 5480119ff9d3..3ce9f3defc12 100644 --- a/drivers/rapidio/rio-driver.c +++ b/drivers/rapidio/rio-driver.c | |||
@@ -78,8 +78,7 @@ void rio_dev_put(struct rio_dev *rdev) | |||
78 | } | 78 | } |
79 | 79 | ||
80 | /** | 80 | /** |
81 | * rio_device_probe - Tell if a RIO device structure has a matching RIO | 81 | * rio_device_probe - Tell if a RIO device structure has a matching RIO device id structure |
82 | * device id structure | ||
83 | * @id: the RIO device id structure to match against | 82 | * @id: the RIO device id structure to match against |
84 | * @dev: the RIO device structure to match against | 83 | * @dev: the RIO device structure to match against |
85 | * | 84 | * |
@@ -137,7 +136,7 @@ static int rio_device_remove(struct device *dev) | |||
137 | * rio_register_driver - register a new RIO driver | 136 | * rio_register_driver - register a new RIO driver |
138 | * @rdrv: the RIO driver structure to register | 137 | * @rdrv: the RIO driver structure to register |
139 | * | 138 | * |
140 | * Adds a &struct rio_driver to the list of registered drivers | 139 | * Adds a &struct rio_driver to the list of registered drivers. |
141 | * Returns a negative value on error, otherwise 0. If no error | 140 | * Returns a negative value on error, otherwise 0. If no error |
142 | * occurred, the driver remains registered even if no device | 141 | * occurred, the driver remains registered even if no device |
143 | * was claimed during registration. | 142 | * was claimed during registration. |
@@ -167,8 +166,7 @@ void rio_unregister_driver(struct rio_driver *rdrv) | |||
167 | } | 166 | } |
168 | 167 | ||
169 | /** | 168 | /** |
170 | * rio_match_bus - Tell if a RIO device structure has a matching RIO | 169 | * rio_match_bus - Tell if a RIO device structure has a matching RIO driver device id structure |
171 | * driver device id structure | ||
172 | * @dev: the standard device structure to match against | 170 | * @dev: the standard device structure to match against |
173 | * @drv: the standard driver structure containing the ids to match against | 171 | * @drv: the standard driver structure containing the ids to match against |
174 | * | 172 | * |
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 6402d699072b..82f5ad9c3af4 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig | |||
@@ -250,6 +250,15 @@ config RTC_DRV_TWL92330 | |||
250 | platforms. The support is integrated with the rest of | 250 | platforms. The support is integrated with the rest of |
251 | the Menelaus driver; it's not separate module. | 251 | the Menelaus driver; it's not separate module. |
252 | 252 | ||
253 | config RTC_DRV_S35390A | ||
254 | tristate "Seiko Instruments S-35390A" | ||
255 | help | ||
256 | If you say yes here you will get support for the Seiko | ||
257 | Instruments S-35390A. | ||
258 | |||
259 | This driver can also be built as a module. If so the module | ||
260 | will be called rtc-s35390a. | ||
261 | |||
253 | endif # I2C | 262 | endif # I2C |
254 | 263 | ||
255 | comment "SPI RTC drivers" | 264 | comment "SPI RTC drivers" |
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index ec703f34ab86..872f1218ff9f 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile | |||
@@ -45,6 +45,7 @@ obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o | |||
45 | obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o | 45 | obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o |
46 | obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o | 46 | obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o |
47 | obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o | 47 | obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o |
48 | obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o | ||
48 | obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o | 49 | obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o |
49 | obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o | 50 | obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o |
50 | obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o | 51 | obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o |
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c new file mode 100644 index 000000000000..e8abc90c32c5 --- /dev/null +++ b/drivers/rtc/rtc-s35390a.c | |||
@@ -0,0 +1,316 @@ | |||
1 | /* | ||
2 | * Seiko Instruments S-35390A RTC Driver | ||
3 | * | ||
4 | * Copyright (c) 2007 Byron Bradley | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/rtc.h> | ||
14 | #include <linux/i2c.h> | ||
15 | #include <linux/bitrev.h> | ||
16 | #include <linux/bcd.h> | ||
17 | #include <linux/slab.h> | ||
18 | |||
19 | #define S35390A_CMD_STATUS1 0 | ||
20 | #define S35390A_CMD_STATUS2 1 | ||
21 | #define S35390A_CMD_TIME1 2 | ||
22 | |||
23 | #define S35390A_BYTE_YEAR 0 | ||
24 | #define S35390A_BYTE_MONTH 1 | ||
25 | #define S35390A_BYTE_DAY 2 | ||
26 | #define S35390A_BYTE_WDAY 3 | ||
27 | #define S35390A_BYTE_HOURS 4 | ||
28 | #define S35390A_BYTE_MINS 5 | ||
29 | #define S35390A_BYTE_SECS 6 | ||
30 | |||
31 | #define S35390A_FLAG_POC 0x01 | ||
32 | #define S35390A_FLAG_BLD 0x02 | ||
33 | #define S35390A_FLAG_24H 0x40 | ||
34 | #define S35390A_FLAG_RESET 0x80 | ||
35 | #define S35390A_FLAG_TEST 0x01 | ||
36 | |||
37 | struct s35390a { | ||
38 | struct i2c_client *client[8]; | ||
39 | struct rtc_device *rtc; | ||
40 | int twentyfourhour; | ||
41 | }; | ||
42 | |||
43 | static int s35390a_set_reg(struct s35390a *s35390a, int reg, char *buf, int len) | ||
44 | { | ||
45 | struct i2c_client *client = s35390a->client[reg]; | ||
46 | struct i2c_msg msg[] = { | ||
47 | { client->addr, 0, len, buf }, | ||
48 | }; | ||
49 | |||
50 | if ((i2c_transfer(client->adapter, msg, 1)) != 1) | ||
51 | return -EIO; | ||
52 | |||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | static int s35390a_get_reg(struct s35390a *s35390a, int reg, char *buf, int len) | ||
57 | { | ||
58 | struct i2c_client *client = s35390a->client[reg]; | ||
59 | struct i2c_msg msg[] = { | ||
60 | { client->addr, I2C_M_RD, len, buf }, | ||
61 | }; | ||
62 | |||
63 | if ((i2c_transfer(client->adapter, msg, 1)) != 1) | ||
64 | return -EIO; | ||
65 | |||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static int s35390a_reset(struct s35390a *s35390a) | ||
70 | { | ||
71 | char buf[1]; | ||
72 | |||
73 | if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)) < 0) | ||
74 | return -EIO; | ||
75 | |||
76 | if (!(buf[0] & (S35390A_FLAG_POC | S35390A_FLAG_BLD))) | ||
77 | return 0; | ||
78 | |||
79 | buf[0] |= (S35390A_FLAG_RESET | S35390A_FLAG_24H); | ||
80 | buf[0] &= 0xf0; | ||
81 | return s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)); | ||
82 | } | ||
83 | |||
84 | static int s35390a_disable_test_mode(struct s35390a *s35390a) | ||
85 | { | ||
86 | char buf[1]; | ||
87 | |||
88 | if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS2, buf, sizeof(buf)) < 0) | ||
89 | return -EIO; | ||
90 | |||
91 | if (!(buf[0] & S35390A_FLAG_TEST)) | ||
92 | return 0; | ||
93 | |||
94 | buf[0] &= ~S35390A_FLAG_TEST; | ||
95 | return s35390a_set_reg(s35390a, S35390A_CMD_STATUS2, buf, sizeof(buf)); | ||
96 | } | ||
97 | |||
98 | static char s35390a_hr2reg(struct s35390a *s35390a, int hour) | ||
99 | { | ||
100 | if (s35390a->twentyfourhour) | ||
101 | return BIN2BCD(hour); | ||
102 | |||
103 | if (hour < 12) | ||
104 | return BIN2BCD(hour); | ||
105 | |||
106 | return 0x40 | BIN2BCD(hour - 12); | ||
107 | } | ||
108 | |||
109 | static int s35390a_reg2hr(struct s35390a *s35390a, char reg) | ||
110 | { | ||
111 | unsigned hour; | ||
112 | |||
113 | if (s35390a->twentyfourhour) | ||
114 | return BCD2BIN(reg & 0x3f); | ||
115 | |||
116 | hour = BCD2BIN(reg & 0x3f); | ||
117 | if (reg & 0x40) | ||
118 | hour += 12; | ||
119 | |||
120 | return hour; | ||
121 | } | ||
122 | |||
123 | static int s35390a_set_datetime(struct i2c_client *client, struct rtc_time *tm) | ||
124 | { | ||
125 | struct s35390a *s35390a = i2c_get_clientdata(client); | ||
126 | int i, err; | ||
127 | char buf[7]; | ||
128 | |||
129 | dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d mday=%d, " | ||
130 | "mon=%d, year=%d, wday=%d\n", __func__, tm->tm_sec, | ||
131 | tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon, tm->tm_year, | ||
132 | tm->tm_wday); | ||
133 | |||
134 | buf[S35390A_BYTE_YEAR] = BIN2BCD(tm->tm_year - 100); | ||
135 | buf[S35390A_BYTE_MONTH] = BIN2BCD(tm->tm_mon + 1); | ||
136 | buf[S35390A_BYTE_DAY] = BIN2BCD(tm->tm_mday); | ||
137 | buf[S35390A_BYTE_WDAY] = BIN2BCD(tm->tm_wday); | ||
138 | buf[S35390A_BYTE_HOURS] = s35390a_hr2reg(s35390a, tm->tm_hour); | ||
139 | buf[S35390A_BYTE_MINS] = BIN2BCD(tm->tm_min); | ||
140 | buf[S35390A_BYTE_SECS] = BIN2BCD(tm->tm_sec); | ||
141 | |||
142 | /* This chip expects the bits of each byte to be in reverse order */ | ||
143 | for (i = 0; i < 7; ++i) | ||
144 | buf[i] = bitrev8(buf[i]); | ||
145 | |||
146 | err = s35390a_set_reg(s35390a, S35390A_CMD_TIME1, buf, sizeof(buf)); | ||
147 | |||
148 | return err; | ||
149 | } | ||
150 | |||
151 | static int s35390a_get_datetime(struct i2c_client *client, struct rtc_time *tm) | ||
152 | { | ||
153 | struct s35390a *s35390a = i2c_get_clientdata(client); | ||
154 | char buf[7]; | ||
155 | int i, err; | ||
156 | |||
157 | err = s35390a_get_reg(s35390a, S35390A_CMD_TIME1, buf, sizeof(buf)); | ||
158 | if (err < 0) | ||
159 | return err; | ||
160 | |||
161 | /* This chip returns the bits of each byte in reverse order */ | ||
162 | for (i = 0; i < 7; ++i) | ||
163 | buf[i] = bitrev8(buf[i]); | ||
164 | |||
165 | tm->tm_sec = BCD2BIN(buf[S35390A_BYTE_SECS]); | ||
166 | tm->tm_min = BCD2BIN(buf[S35390A_BYTE_MINS]); | ||
167 | tm->tm_hour = s35390a_reg2hr(s35390a, buf[S35390A_BYTE_HOURS]); | ||
168 | tm->tm_wday = BCD2BIN(buf[S35390A_BYTE_WDAY]); | ||
169 | tm->tm_mday = BCD2BIN(buf[S35390A_BYTE_DAY]); | ||
170 | tm->tm_mon = BCD2BIN(buf[S35390A_BYTE_MONTH]) - 1; | ||
171 | tm->tm_year = BCD2BIN(buf[S35390A_BYTE_YEAR]) + 100; | ||
172 | |||
173 | dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, mday=%d, " | ||
174 | "mon=%d, year=%d, wday=%d\n", __func__, tm->tm_sec, | ||
175 | tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon, tm->tm_year, | ||
176 | tm->tm_wday); | ||
177 | |||
178 | return rtc_valid_tm(tm); | ||
179 | } | ||
180 | |||
181 | static int s35390a_rtc_read_time(struct device *dev, struct rtc_time *tm) | ||
182 | { | ||
183 | return s35390a_get_datetime(to_i2c_client(dev), tm); | ||
184 | } | ||
185 | |||
186 | static int s35390a_rtc_set_time(struct device *dev, struct rtc_time *tm) | ||
187 | { | ||
188 | return s35390a_set_datetime(to_i2c_client(dev), tm); | ||
189 | } | ||
190 | |||
191 | static const struct rtc_class_ops s35390a_rtc_ops = { | ||
192 | .read_time = s35390a_rtc_read_time, | ||
193 | .set_time = s35390a_rtc_set_time, | ||
194 | }; | ||
195 | |||
196 | static struct i2c_driver s35390a_driver; | ||
197 | |||
198 | static int s35390a_probe(struct i2c_client *client) | ||
199 | { | ||
200 | int err; | ||
201 | unsigned int i; | ||
202 | struct s35390a *s35390a; | ||
203 | struct rtc_time tm; | ||
204 | char buf[1]; | ||
205 | |||
206 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { | ||
207 | err = -ENODEV; | ||
208 | goto exit; | ||
209 | } | ||
210 | |||
211 | s35390a = kzalloc(sizeof(struct s35390a), GFP_KERNEL); | ||
212 | if (!s35390a) { | ||
213 | err = -ENOMEM; | ||
214 | goto exit; | ||
215 | } | ||
216 | |||
217 | s35390a->client[0] = client; | ||
218 | i2c_set_clientdata(client, s35390a); | ||
219 | |||
220 | /* This chip uses multiple addresses, use dummy devices for them */ | ||
221 | for (i = 1; i < 8; ++i) { | ||
222 | s35390a->client[i] = i2c_new_dummy(client->adapter, | ||
223 | client->addr + i, "rtc-s35390a"); | ||
224 | if (!s35390a->client[i]) { | ||
225 | dev_err(&client->dev, "Address %02x unavailable\n", | ||
226 | client->addr + i); | ||
227 | err = -EBUSY; | ||
228 | goto exit_dummy; | ||
229 | } | ||
230 | } | ||
231 | |||
232 | err = s35390a_reset(s35390a); | ||
233 | if (err < 0) { | ||
234 | dev_err(&client->dev, "error resetting chip\n"); | ||
235 | goto exit_dummy; | ||
236 | } | ||
237 | |||
238 | err = s35390a_disable_test_mode(s35390a); | ||
239 | if (err < 0) { | ||
240 | dev_err(&client->dev, "error disabling test mode\n"); | ||
241 | goto exit_dummy; | ||
242 | } | ||
243 | |||
244 | err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)); | ||
245 | if (err < 0) { | ||
246 | dev_err(&client->dev, "error checking 12/24 hour mode\n"); | ||
247 | goto exit_dummy; | ||
248 | } | ||
249 | if (buf[0] & S35390A_FLAG_24H) | ||
250 | s35390a->twentyfourhour = 1; | ||
251 | else | ||
252 | s35390a->twentyfourhour = 0; | ||
253 | |||
254 | if (s35390a_get_datetime(client, &tm) < 0) | ||
255 | dev_warn(&client->dev, "clock needs to be set\n"); | ||
256 | |||
257 | s35390a->rtc = rtc_device_register(s35390a_driver.driver.name, | ||
258 | &client->dev, &s35390a_rtc_ops, THIS_MODULE); | ||
259 | |||
260 | if (IS_ERR(s35390a->rtc)) { | ||
261 | err = PTR_ERR(s35390a->rtc); | ||
262 | goto exit_dummy; | ||
263 | } | ||
264 | return 0; | ||
265 | |||
266 | exit_dummy: | ||
267 | for (i = 1; i < 8; ++i) | ||
268 | if (s35390a->client[i]) | ||
269 | i2c_unregister_device(s35390a->client[i]); | ||
270 | kfree(s35390a); | ||
271 | i2c_set_clientdata(client, NULL); | ||
272 | |||
273 | exit: | ||
274 | return err; | ||
275 | } | ||
276 | |||
277 | static int s35390a_remove(struct i2c_client *client) | ||
278 | { | ||
279 | unsigned int i; | ||
280 | |||
281 | struct s35390a *s35390a = i2c_get_clientdata(client); | ||
282 | for (i = 1; i < 8; ++i) | ||
283 | if (s35390a->client[i]) | ||
284 | i2c_unregister_device(s35390a->client[i]); | ||
285 | |||
286 | rtc_device_unregister(s35390a->rtc); | ||
287 | kfree(s35390a); | ||
288 | i2c_set_clientdata(client, NULL); | ||
289 | |||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | static struct i2c_driver s35390a_driver = { | ||
294 | .driver = { | ||
295 | .name = "rtc-s35390a", | ||
296 | }, | ||
297 | .probe = s35390a_probe, | ||
298 | .remove = s35390a_remove, | ||
299 | }; | ||
300 | |||
301 | static int __init s35390a_rtc_init(void) | ||
302 | { | ||
303 | return i2c_add_driver(&s35390a_driver); | ||
304 | } | ||
305 | |||
306 | static void __exit s35390a_rtc_exit(void) | ||
307 | { | ||
308 | i2c_del_driver(&s35390a_driver); | ||
309 | } | ||
310 | |||
311 | MODULE_AUTHOR("Byron Bradley <byron.bbradley@gmail.com>"); | ||
312 | MODULE_DESCRIPTION("S35390A RTC driver"); | ||
313 | MODULE_LICENSE("GPL"); | ||
314 | |||
315 | module_init(s35390a_rtc_init); | ||
316 | module_exit(s35390a_rtc_exit); | ||
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index f69714a0e9e7..b19db20a0bef 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c | |||
@@ -2310,10 +2310,8 @@ static int | |||
2310 | dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2) | 2310 | dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2) |
2311 | { | 2311 | { |
2312 | 2312 | ||
2313 | /* check failed CCW */ | 2313 | if (cqr1->startdev != cqr2->startdev) |
2314 | if (cqr1->irb.scsw.cpa != cqr2->irb.scsw.cpa) { | 2314 | return 0; |
2315 | // return 0; /* CCW doesn't match */ | ||
2316 | } | ||
2317 | 2315 | ||
2318 | if (cqr1->irb.esw.esw0.erw.cons != cqr2->irb.esw.esw0.erw.cons) | 2316 | if (cqr1->irb.esw.esw0.erw.cons != cqr2->irb.esw.esw0.erw.cons) |
2319 | return 0; | 2317 | return 0; |
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index 28a86f070048..556063e8f7a9 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c | |||
@@ -62,8 +62,10 @@ dasd_devices_show(struct seq_file *m, void *v) | |||
62 | return 0; | 62 | return 0; |
63 | if (device->block) | 63 | if (device->block) |
64 | block = device->block; | 64 | block = device->block; |
65 | else | 65 | else { |
66 | dasd_put_device(device); | ||
66 | return 0; | 67 | return 0; |
68 | } | ||
67 | /* Print device number. */ | 69 | /* Print device number. */ |
68 | seq_printf(m, "%s", device->cdev->dev.bus_id); | 70 | seq_printf(m, "%s", device->cdev->dev.bus_id); |
69 | /* Print discipline string. */ | 71 | /* Print discipline string. */ |
diff --git a/drivers/s390/char/defkeymap.c b/drivers/s390/char/defkeymap.c index 389346cda6c8..07c7f31081bc 100644 --- a/drivers/s390/char/defkeymap.c +++ b/drivers/s390/char/defkeymap.c | |||
@@ -151,8 +151,8 @@ char *func_table[MAX_NR_FUNC] = { | |||
151 | }; | 151 | }; |
152 | 152 | ||
153 | struct kbdiacruc accent_table[MAX_DIACR] = { | 153 | struct kbdiacruc accent_table[MAX_DIACR] = { |
154 | {'^', 'c', '\003'}, {'^', 'd', '\004'}, | 154 | {'^', 'c', 0003}, {'^', 'd', 0004}, |
155 | {'^', 'z', '\032'}, {'^', '\012', '\000'}, | 155 | {'^', 'z', 0032}, {'^', 0012, 0000}, |
156 | }; | 156 | }; |
157 | 157 | ||
158 | unsigned int accent_table_size = 4; | 158 | unsigned int accent_table_size = 4; |
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 92f527201792..f7b258dfd52c 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c | |||
@@ -367,7 +367,7 @@ sclp_vt220_timeout(unsigned long data) | |||
367 | sclp_vt220_emit_current(); | 367 | sclp_vt220_emit_current(); |
368 | } | 368 | } |
369 | 369 | ||
370 | #define BUFFER_MAX_DELAY HZ/2 | 370 | #define BUFFER_MAX_DELAY HZ/20 |
371 | 371 | ||
372 | /* | 372 | /* |
373 | * Internal implementation of the write function. Write COUNT bytes of data | 373 | * Internal implementation of the write function. Write COUNT bytes of data |
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index d0c6fd3b1c19..7b0b81901297 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c | |||
@@ -490,10 +490,12 @@ static int ap_device_probe(struct device *dev) | |||
490 | int rc; | 490 | int rc; |
491 | 491 | ||
492 | ap_dev->drv = ap_drv; | 492 | ap_dev->drv = ap_drv; |
493 | spin_lock_bh(&ap_device_lock); | ||
494 | list_add(&ap_dev->list, &ap_device_list); | ||
495 | spin_unlock_bh(&ap_device_lock); | ||
496 | rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; | 493 | rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; |
494 | if (!rc) { | ||
495 | spin_lock_bh(&ap_device_lock); | ||
496 | list_add(&ap_dev->list, &ap_device_list); | ||
497 | spin_unlock_bh(&ap_device_lock); | ||
498 | } | ||
497 | return rc; | 499 | return rc; |
498 | } | 500 | } |
499 | 501 | ||
@@ -532,11 +534,11 @@ static int ap_device_remove(struct device *dev) | |||
532 | 534 | ||
533 | ap_flush_queue(ap_dev); | 535 | ap_flush_queue(ap_dev); |
534 | del_timer_sync(&ap_dev->timeout); | 536 | del_timer_sync(&ap_dev->timeout); |
535 | if (ap_drv->remove) | ||
536 | ap_drv->remove(ap_dev); | ||
537 | spin_lock_bh(&ap_device_lock); | 537 | spin_lock_bh(&ap_device_lock); |
538 | list_del_init(&ap_dev->list); | 538 | list_del_init(&ap_dev->list); |
539 | spin_unlock_bh(&ap_device_lock); | 539 | spin_unlock_bh(&ap_device_lock); |
540 | if (ap_drv->remove) | ||
541 | ap_drv->remove(ap_dev); | ||
540 | spin_lock_bh(&ap_dev->lock); | 542 | spin_lock_bh(&ap_dev->lock); |
541 | atomic_sub(ap_dev->queue_count, &ap_poll_requests); | 543 | atomic_sub(ap_dev->queue_count, &ap_poll_requests); |
542 | spin_unlock_bh(&ap_dev->lock); | 544 | spin_unlock_bh(&ap_dev->lock); |
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h index 32f513b1b78a..eb8efdcefe48 100644 --- a/drivers/scsi/aic94xx/aic94xx.h +++ b/drivers/scsi/aic94xx/aic94xx.h | |||
@@ -102,6 +102,7 @@ int asd_abort_task_set(struct domain_device *, u8 *lun); | |||
102 | int asd_clear_aca(struct domain_device *, u8 *lun); | 102 | int asd_clear_aca(struct domain_device *, u8 *lun); |
103 | int asd_clear_task_set(struct domain_device *, u8 *lun); | 103 | int asd_clear_task_set(struct domain_device *, u8 *lun); |
104 | int asd_lu_reset(struct domain_device *, u8 *lun); | 104 | int asd_lu_reset(struct domain_device *, u8 *lun); |
105 | int asd_I_T_nexus_reset(struct domain_device *dev); | ||
105 | int asd_query_task(struct sas_task *); | 106 | int asd_query_task(struct sas_task *); |
106 | 107 | ||
107 | /* ---------- Adapter and Port management ---------- */ | 108 | /* ---------- Adapter and Port management ---------- */ |
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.h b/drivers/scsi/aic94xx/aic94xx_hwi.h index 150f6706d23f..abc757559c1a 100644 --- a/drivers/scsi/aic94xx/aic94xx_hwi.h +++ b/drivers/scsi/aic94xx/aic94xx_hwi.h | |||
@@ -140,7 +140,7 @@ struct asd_ascb { | |||
140 | 140 | ||
141 | /* internally generated command */ | 141 | /* internally generated command */ |
142 | struct timer_list timer; | 142 | struct timer_list timer; |
143 | struct completion completion; | 143 | struct completion *completion; |
144 | u8 tag_valid:1; | 144 | u8 tag_valid:1; |
145 | __be16 tag; /* error recovery only */ | 145 | __be16 tag; /* error recovery only */ |
146 | 146 | ||
@@ -294,7 +294,6 @@ static inline void asd_init_ascb(struct asd_ha_struct *asd_ha, | |||
294 | ascb->timer.function = NULL; | 294 | ascb->timer.function = NULL; |
295 | init_timer(&ascb->timer); | 295 | init_timer(&ascb->timer); |
296 | ascb->tc_index = -1; | 296 | ascb->tc_index = -1; |
297 | init_completion(&ascb->completion); | ||
298 | } | 297 | } |
299 | 298 | ||
300 | /* Must be called with the tc_index_lock held! | 299 | /* Must be called with the tc_index_lock held! |
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index 5d761eb67442..88d1e731b65e 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c | |||
@@ -1003,7 +1003,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = { | |||
1003 | .lldd_abort_task_set = asd_abort_task_set, | 1003 | .lldd_abort_task_set = asd_abort_task_set, |
1004 | .lldd_clear_aca = asd_clear_aca, | 1004 | .lldd_clear_aca = asd_clear_aca, |
1005 | .lldd_clear_task_set = asd_clear_task_set, | 1005 | .lldd_clear_task_set = asd_clear_task_set, |
1006 | .lldd_I_T_nexus_reset = NULL, | 1006 | .lldd_I_T_nexus_reset = asd_I_T_nexus_reset, |
1007 | .lldd_lu_reset = asd_lu_reset, | 1007 | .lldd_lu_reset = asd_lu_reset, |
1008 | .lldd_query_task = asd_query_task, | 1008 | .lldd_query_task = asd_query_task, |
1009 | 1009 | ||
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c index 965d4bb999d9..008df9ab92a5 100644 --- a/drivers/scsi/aic94xx/aic94xx_task.c +++ b/drivers/scsi/aic94xx/aic94xx_task.c | |||
@@ -343,11 +343,13 @@ Again: | |||
343 | task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; | 343 | task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; |
344 | task->task_state_flags |= SAS_TASK_STATE_DONE; | 344 | task->task_state_flags |= SAS_TASK_STATE_DONE; |
345 | if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) { | 345 | if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) { |
346 | struct completion *completion = ascb->completion; | ||
346 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 347 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
347 | ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x " | 348 | ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x " |
348 | "stat 0x%x but aborted by upper layer!\n", | 349 | "stat 0x%x but aborted by upper layer!\n", |
349 | task, opcode, ts->resp, ts->stat); | 350 | task, opcode, ts->resp, ts->stat); |
350 | complete(&ascb->completion); | 351 | if (completion) |
352 | complete(completion); | ||
351 | } else { | 353 | } else { |
352 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 354 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
353 | task->lldd_task = NULL; | 355 | task->lldd_task = NULL; |
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c index 144f5ad20453..b9ac8f703a1d 100644 --- a/drivers/scsi/aic94xx/aic94xx_tmf.c +++ b/drivers/scsi/aic94xx/aic94xx_tmf.c | |||
@@ -53,50 +53,64 @@ static int asd_enqueue_internal(struct asd_ascb *ascb, | |||
53 | return res; | 53 | return res; |
54 | } | 54 | } |
55 | 55 | ||
56 | static inline void asd_timedout_common(unsigned long data) | 56 | /* ---------- CLEAR NEXUS ---------- */ |
57 | { | ||
58 | struct asd_ascb *ascb = (void *) data; | ||
59 | struct asd_seq_data *seq = &ascb->ha->seq; | ||
60 | unsigned long flags; | ||
61 | 57 | ||
62 | spin_lock_irqsave(&seq->pend_q_lock, flags); | 58 | struct tasklet_completion_status { |
63 | seq->pending--; | 59 | int dl_opcode; |
64 | list_del_init(&ascb->list); | 60 | int tmf_state; |
65 | spin_unlock_irqrestore(&seq->pend_q_lock, flags); | 61 | u8 tag_valid:1; |
66 | } | 62 | __be16 tag; |
63 | }; | ||
64 | |||
65 | #define DECLARE_TCS(tcs) \ | ||
66 | struct tasklet_completion_status tcs = { \ | ||
67 | .dl_opcode = 0, \ | ||
68 | .tmf_state = 0, \ | ||
69 | .tag_valid = 0, \ | ||
70 | .tag = 0, \ | ||
71 | } | ||
67 | 72 | ||
68 | /* ---------- CLEAR NEXUS ---------- */ | ||
69 | 73 | ||
70 | static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb, | 74 | static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb, |
71 | struct done_list_struct *dl) | 75 | struct done_list_struct *dl) |
72 | { | 76 | { |
77 | struct tasklet_completion_status *tcs = ascb->uldd_task; | ||
73 | ASD_DPRINTK("%s: here\n", __FUNCTION__); | 78 | ASD_DPRINTK("%s: here\n", __FUNCTION__); |
74 | if (!del_timer(&ascb->timer)) { | 79 | if (!del_timer(&ascb->timer)) { |
75 | ASD_DPRINTK("%s: couldn't delete timer\n", __FUNCTION__); | 80 | ASD_DPRINTK("%s: couldn't delete timer\n", __FUNCTION__); |
76 | return; | 81 | return; |
77 | } | 82 | } |
78 | ASD_DPRINTK("%s: opcode: 0x%x\n", __FUNCTION__, dl->opcode); | 83 | ASD_DPRINTK("%s: opcode: 0x%x\n", __FUNCTION__, dl->opcode); |
79 | ascb->uldd_task = (void *) (unsigned long) dl->opcode; | 84 | tcs->dl_opcode = dl->opcode; |
80 | complete(&ascb->completion); | 85 | complete(ascb->completion); |
86 | asd_ascb_free(ascb); | ||
81 | } | 87 | } |
82 | 88 | ||
83 | static void asd_clear_nexus_timedout(unsigned long data) | 89 | static void asd_clear_nexus_timedout(unsigned long data) |
84 | { | 90 | { |
85 | struct asd_ascb *ascb = (void *) data; | 91 | struct asd_ascb *ascb = (void *)data; |
92 | struct tasklet_completion_status *tcs = ascb->uldd_task; | ||
86 | 93 | ||
87 | ASD_DPRINTK("%s: here\n", __FUNCTION__); | 94 | ASD_DPRINTK("%s: here\n", __FUNCTION__); |
88 | asd_timedout_common(data); | 95 | tcs->dl_opcode = TMF_RESP_FUNC_FAILED; |
89 | ascb->uldd_task = (void *) TMF_RESP_FUNC_FAILED; | 96 | complete(ascb->completion); |
90 | complete(&ascb->completion); | ||
91 | } | 97 | } |
92 | 98 | ||
93 | #define CLEAR_NEXUS_PRE \ | 99 | #define CLEAR_NEXUS_PRE \ |
100 | struct asd_ascb *ascb; \ | ||
101 | struct scb *scb; \ | ||
102 | int res; \ | ||
103 | DECLARE_COMPLETION_ONSTACK(completion); \ | ||
104 | DECLARE_TCS(tcs); \ | ||
105 | \ | ||
94 | ASD_DPRINTK("%s: PRE\n", __FUNCTION__); \ | 106 | ASD_DPRINTK("%s: PRE\n", __FUNCTION__); \ |
95 | res = 1; \ | 107 | res = 1; \ |
96 | ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \ | 108 | ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \ |
97 | if (!ascb) \ | 109 | if (!ascb) \ |
98 | return -ENOMEM; \ | 110 | return -ENOMEM; \ |
99 | \ | 111 | \ |
112 | ascb->completion = &completion; \ | ||
113 | ascb->uldd_task = &tcs; \ | ||
100 | scb = ascb->scb; \ | 114 | scb = ascb->scb; \ |
101 | scb->header.opcode = CLEAR_NEXUS | 115 | scb->header.opcode = CLEAR_NEXUS |
102 | 116 | ||
@@ -107,10 +121,11 @@ static void asd_clear_nexus_timedout(unsigned long data) | |||
107 | if (res) \ | 121 | if (res) \ |
108 | goto out_err; \ | 122 | goto out_err; \ |
109 | ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __FUNCTION__); \ | 123 | ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __FUNCTION__); \ |
110 | wait_for_completion(&ascb->completion); \ | 124 | wait_for_completion(&completion); \ |
111 | res = (int) (unsigned long) ascb->uldd_task; \ | 125 | res = tcs.dl_opcode; \ |
112 | if (res == TC_NO_ERROR) \ | 126 | if (res == TC_NO_ERROR) \ |
113 | res = TMF_RESP_FUNC_COMPLETE; \ | 127 | res = TMF_RESP_FUNC_COMPLETE; \ |
128 | return res; \ | ||
114 | out_err: \ | 129 | out_err: \ |
115 | asd_ascb_free(ascb); \ | 130 | asd_ascb_free(ascb); \ |
116 | return res | 131 | return res |
@@ -118,9 +133,6 @@ out_err: \ | |||
118 | int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha) | 133 | int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha) |
119 | { | 134 | { |
120 | struct asd_ha_struct *asd_ha = sas_ha->lldd_ha; | 135 | struct asd_ha_struct *asd_ha = sas_ha->lldd_ha; |
121 | struct asd_ascb *ascb; | ||
122 | struct scb *scb; | ||
123 | int res; | ||
124 | 136 | ||
125 | CLEAR_NEXUS_PRE; | 137 | CLEAR_NEXUS_PRE; |
126 | scb->clear_nexus.nexus = NEXUS_ADAPTER; | 138 | scb->clear_nexus.nexus = NEXUS_ADAPTER; |
@@ -130,9 +142,6 @@ int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha) | |||
130 | int asd_clear_nexus_port(struct asd_sas_port *port) | 142 | int asd_clear_nexus_port(struct asd_sas_port *port) |
131 | { | 143 | { |
132 | struct asd_ha_struct *asd_ha = port->ha->lldd_ha; | 144 | struct asd_ha_struct *asd_ha = port->ha->lldd_ha; |
133 | struct asd_ascb *ascb; | ||
134 | struct scb *scb; | ||
135 | int res; | ||
136 | 145 | ||
137 | CLEAR_NEXUS_PRE; | 146 | CLEAR_NEXUS_PRE; |
138 | scb->clear_nexus.nexus = NEXUS_PORT; | 147 | scb->clear_nexus.nexus = NEXUS_PORT; |
@@ -140,29 +149,73 @@ int asd_clear_nexus_port(struct asd_sas_port *port) | |||
140 | CLEAR_NEXUS_POST; | 149 | CLEAR_NEXUS_POST; |
141 | } | 150 | } |
142 | 151 | ||
143 | #if 0 | 152 | enum clear_nexus_phase { |
144 | static int asd_clear_nexus_I_T(struct domain_device *dev) | 153 | NEXUS_PHASE_PRE, |
154 | NEXUS_PHASE_POST, | ||
155 | NEXUS_PHASE_RESUME, | ||
156 | }; | ||
157 | |||
158 | static int asd_clear_nexus_I_T(struct domain_device *dev, | ||
159 | enum clear_nexus_phase phase) | ||
145 | { | 160 | { |
146 | struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; | 161 | struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; |
147 | struct asd_ascb *ascb; | ||
148 | struct scb *scb; | ||
149 | int res; | ||
150 | 162 | ||
151 | CLEAR_NEXUS_PRE; | 163 | CLEAR_NEXUS_PRE; |
152 | scb->clear_nexus.nexus = NEXUS_I_T; | 164 | scb->clear_nexus.nexus = NEXUS_I_T; |
153 | scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ; | 165 | switch (phase) { |
166 | case NEXUS_PHASE_PRE: | ||
167 | scb->clear_nexus.flags = EXEC_Q | SUSPEND_TX; | ||
168 | break; | ||
169 | case NEXUS_PHASE_POST: | ||
170 | scb->clear_nexus.flags = SEND_Q | NOTINQ; | ||
171 | break; | ||
172 | case NEXUS_PHASE_RESUME: | ||
173 | scb->clear_nexus.flags = RESUME_TX; | ||
174 | } | ||
154 | scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) | 175 | scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) |
155 | dev->lldd_dev); | 176 | dev->lldd_dev); |
156 | CLEAR_NEXUS_POST; | 177 | CLEAR_NEXUS_POST; |
157 | } | 178 | } |
158 | #endif | 179 | |
180 | int asd_I_T_nexus_reset(struct domain_device *dev) | ||
181 | { | ||
182 | int res, tmp_res, i; | ||
183 | struct sas_phy *phy = sas_find_local_phy(dev); | ||
184 | /* Standard mandates link reset for ATA (type 0) and | ||
185 | * hard reset for SSP (type 1) */ | ||
186 | int reset_type = (dev->dev_type == SATA_DEV || | ||
187 | (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; | ||
188 | |||
189 | asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE); | ||
190 | /* send a hard reset */ | ||
191 | ASD_DPRINTK("sending %s reset to %s\n", | ||
192 | reset_type ? "hard" : "soft", phy->dev.bus_id); | ||
193 | res = sas_phy_reset(phy, reset_type); | ||
194 | if (res == TMF_RESP_FUNC_COMPLETE) { | ||
195 | /* wait for the maximum settle time */ | ||
196 | msleep(500); | ||
197 | /* clear all outstanding commands (keep nexus suspended) */ | ||
198 | asd_clear_nexus_I_T(dev, NEXUS_PHASE_POST); | ||
199 | } | ||
200 | for (i = 0 ; i < 3; i++) { | ||
201 | tmp_res = asd_clear_nexus_I_T(dev, NEXUS_PHASE_RESUME); | ||
202 | if (tmp_res == TC_RESUME) | ||
203 | return res; | ||
204 | msleep(500); | ||
205 | } | ||
206 | |||
207 | /* This is a bit of a problem: the sequencer is still suspended | ||
208 | * and is refusing to resume. Hope it will resume on a bigger hammer | ||
209 | * or the disk is lost */ | ||
210 | dev_printk(KERN_ERR, &phy->dev, | ||
211 | "Failed to resume nexus after reset 0x%x\n", tmp_res); | ||
212 | |||
213 | return TMF_RESP_FUNC_FAILED; | ||
214 | } | ||
159 | 215 | ||
160 | static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun) | 216 | static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun) |
161 | { | 217 | { |
162 | struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; | 218 | struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; |
163 | struct asd_ascb *ascb; | ||
164 | struct scb *scb; | ||
165 | int res; | ||
166 | 219 | ||
167 | CLEAR_NEXUS_PRE; | 220 | CLEAR_NEXUS_PRE; |
168 | scb->clear_nexus.nexus = NEXUS_I_T_L; | 221 | scb->clear_nexus.nexus = NEXUS_I_T_L; |
@@ -177,9 +230,6 @@ static int asd_clear_nexus_tag(struct sas_task *task) | |||
177 | { | 230 | { |
178 | struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; | 231 | struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; |
179 | struct asd_ascb *tascb = task->lldd_task; | 232 | struct asd_ascb *tascb = task->lldd_task; |
180 | struct asd_ascb *ascb; | ||
181 | struct scb *scb; | ||
182 | int res; | ||
183 | 233 | ||
184 | CLEAR_NEXUS_PRE; | 234 | CLEAR_NEXUS_PRE; |
185 | scb->clear_nexus.nexus = NEXUS_TAG; | 235 | scb->clear_nexus.nexus = NEXUS_TAG; |
@@ -195,9 +245,6 @@ static int asd_clear_nexus_index(struct sas_task *task) | |||
195 | { | 245 | { |
196 | struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; | 246 | struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; |
197 | struct asd_ascb *tascb = task->lldd_task; | 247 | struct asd_ascb *tascb = task->lldd_task; |
198 | struct asd_ascb *ascb; | ||
199 | struct scb *scb; | ||
200 | int res; | ||
201 | 248 | ||
202 | CLEAR_NEXUS_PRE; | 249 | CLEAR_NEXUS_PRE; |
203 | scb->clear_nexus.nexus = NEXUS_TRANS_CX; | 250 | scb->clear_nexus.nexus = NEXUS_TRANS_CX; |
@@ -213,11 +260,11 @@ static int asd_clear_nexus_index(struct sas_task *task) | |||
213 | static void asd_tmf_timedout(unsigned long data) | 260 | static void asd_tmf_timedout(unsigned long data) |
214 | { | 261 | { |
215 | struct asd_ascb *ascb = (void *) data; | 262 | struct asd_ascb *ascb = (void *) data; |
263 | struct tasklet_completion_status *tcs = ascb->uldd_task; | ||
216 | 264 | ||
217 | ASD_DPRINTK("tmf timed out\n"); | 265 | ASD_DPRINTK("tmf timed out\n"); |
218 | asd_timedout_common(data); | 266 | tcs->tmf_state = TMF_RESP_FUNC_FAILED; |
219 | ascb->uldd_task = (void *) TMF_RESP_FUNC_FAILED; | 267 | complete(ascb->completion); |
220 | complete(&ascb->completion); | ||
221 | } | 268 | } |
222 | 269 | ||
223 | static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb, | 270 | static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb, |
@@ -269,18 +316,24 @@ static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb, | |||
269 | static void asd_tmf_tasklet_complete(struct asd_ascb *ascb, | 316 | static void asd_tmf_tasklet_complete(struct asd_ascb *ascb, |
270 | struct done_list_struct *dl) | 317 | struct done_list_struct *dl) |
271 | { | 318 | { |
319 | struct tasklet_completion_status *tcs; | ||
320 | |||
272 | if (!del_timer(&ascb->timer)) | 321 | if (!del_timer(&ascb->timer)) |
273 | return; | 322 | return; |
274 | 323 | ||
324 | tcs = ascb->uldd_task; | ||
275 | ASD_DPRINTK("tmf tasklet complete\n"); | 325 | ASD_DPRINTK("tmf tasklet complete\n"); |
276 | 326 | ||
277 | if (dl->opcode == TC_SSP_RESP) | 327 | tcs->dl_opcode = dl->opcode; |
278 | ascb->uldd_task = (void *) (unsigned long) | 328 | |
279 | asd_get_tmf_resp_tasklet(ascb, dl); | 329 | if (dl->opcode == TC_SSP_RESP) { |
280 | else | 330 | tcs->tmf_state = asd_get_tmf_resp_tasklet(ascb, dl); |
281 | ascb->uldd_task = (void *) 0xFF00 + (unsigned long) dl->opcode; | 331 | tcs->tag_valid = ascb->tag_valid; |
332 | tcs->tag = ascb->tag; | ||
333 | } | ||
282 | 334 | ||
283 | complete(&ascb->completion); | 335 | complete(ascb->completion); |
336 | asd_ascb_free(ascb); | ||
284 | } | 337 | } |
285 | 338 | ||
286 | static inline int asd_clear_nexus(struct sas_task *task) | 339 | static inline int asd_clear_nexus(struct sas_task *task) |
@@ -288,15 +341,19 @@ static inline int asd_clear_nexus(struct sas_task *task) | |||
288 | int res = TMF_RESP_FUNC_FAILED; | 341 | int res = TMF_RESP_FUNC_FAILED; |
289 | int leftover; | 342 | int leftover; |
290 | struct asd_ascb *tascb = task->lldd_task; | 343 | struct asd_ascb *tascb = task->lldd_task; |
344 | DECLARE_COMPLETION_ONSTACK(completion); | ||
291 | unsigned long flags; | 345 | unsigned long flags; |
292 | 346 | ||
347 | tascb->completion = &completion; | ||
348 | |||
293 | ASD_DPRINTK("task not done, clearing nexus\n"); | 349 | ASD_DPRINTK("task not done, clearing nexus\n"); |
294 | if (tascb->tag_valid) | 350 | if (tascb->tag_valid) |
295 | res = asd_clear_nexus_tag(task); | 351 | res = asd_clear_nexus_tag(task); |
296 | else | 352 | else |
297 | res = asd_clear_nexus_index(task); | 353 | res = asd_clear_nexus_index(task); |
298 | leftover = wait_for_completion_timeout(&tascb->completion, | 354 | leftover = wait_for_completion_timeout(&completion, |
299 | AIC94XX_SCB_TIMEOUT); | 355 | AIC94XX_SCB_TIMEOUT); |
356 | tascb->completion = NULL; | ||
300 | ASD_DPRINTK("came back from clear nexus\n"); | 357 | ASD_DPRINTK("came back from clear nexus\n"); |
301 | spin_lock_irqsave(&task->task_state_lock, flags); | 358 | spin_lock_irqsave(&task->task_state_lock, flags); |
302 | if (leftover < 1) | 359 | if (leftover < 1) |
@@ -350,6 +407,11 @@ int asd_abort_task(struct sas_task *task) | |||
350 | struct asd_ascb *ascb = NULL; | 407 | struct asd_ascb *ascb = NULL; |
351 | struct scb *scb; | 408 | struct scb *scb; |
352 | int leftover; | 409 | int leftover; |
410 | DECLARE_TCS(tcs); | ||
411 | DECLARE_COMPLETION_ONSTACK(completion); | ||
412 | DECLARE_COMPLETION_ONSTACK(tascb_completion); | ||
413 | |||
414 | tascb->completion = &tascb_completion; | ||
353 | 415 | ||
354 | spin_lock_irqsave(&task->task_state_lock, flags); | 416 | spin_lock_irqsave(&task->task_state_lock, flags); |
355 | if (task->task_state_flags & SAS_TASK_STATE_DONE) { | 417 | if (task->task_state_flags & SAS_TASK_STATE_DONE) { |
@@ -363,8 +425,10 @@ int asd_abort_task(struct sas_task *task) | |||
363 | ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); | 425 | ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); |
364 | if (!ascb) | 426 | if (!ascb) |
365 | return -ENOMEM; | 427 | return -ENOMEM; |
366 | scb = ascb->scb; | ||
367 | 428 | ||
429 | ascb->uldd_task = &tcs; | ||
430 | ascb->completion = &completion; | ||
431 | scb = ascb->scb; | ||
368 | scb->header.opcode = SCB_ABORT_TASK; | 432 | scb->header.opcode = SCB_ABORT_TASK; |
369 | 433 | ||
370 | switch (task->task_proto) { | 434 | switch (task->task_proto) { |
@@ -406,13 +470,12 @@ int asd_abort_task(struct sas_task *task) | |||
406 | res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete, | 470 | res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete, |
407 | asd_tmf_timedout); | 471 | asd_tmf_timedout); |
408 | if (res) | 472 | if (res) |
409 | goto out; | 473 | goto out_free; |
410 | wait_for_completion(&ascb->completion); | 474 | wait_for_completion(&completion); |
411 | ASD_DPRINTK("tmf came back\n"); | 475 | ASD_DPRINTK("tmf came back\n"); |
412 | 476 | ||
413 | res = (int) (unsigned long) ascb->uldd_task; | 477 | tascb->tag = tcs.tag; |
414 | tascb->tag = ascb->tag; | 478 | tascb->tag_valid = tcs.tag_valid; |
415 | tascb->tag_valid = ascb->tag_valid; | ||
416 | 479 | ||
417 | spin_lock_irqsave(&task->task_state_lock, flags); | 480 | spin_lock_irqsave(&task->task_state_lock, flags); |
418 | if (task->task_state_flags & SAS_TASK_STATE_DONE) { | 481 | if (task->task_state_flags & SAS_TASK_STATE_DONE) { |
@@ -423,63 +486,68 @@ int asd_abort_task(struct sas_task *task) | |||
423 | } | 486 | } |
424 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 487 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
425 | 488 | ||
426 | switch (res) { | 489 | if (tcs.dl_opcode == TC_SSP_RESP) { |
427 | /* The task to be aborted has been sent to the device. | 490 | /* The task to be aborted has been sent to the device. |
428 | * We got a Response IU for the ABORT TASK TMF. */ | 491 | * We got a Response IU for the ABORT TASK TMF. */ |
429 | case TC_NO_ERROR + 0xFF00: | 492 | if (tcs.tmf_state == TMF_RESP_FUNC_COMPLETE) |
430 | case TMF_RESP_FUNC_COMPLETE: | 493 | res = asd_clear_nexus(task); |
431 | case TMF_RESP_FUNC_FAILED: | 494 | else |
432 | res = asd_clear_nexus(task); | 495 | res = tcs.tmf_state; |
433 | break; | 496 | } else if (tcs.dl_opcode == TC_NO_ERROR && |
434 | case TMF_RESP_INVALID_FRAME: | 497 | tcs.tmf_state == TMF_RESP_FUNC_FAILED) { |
435 | case TMF_RESP_OVERLAPPED_TAG: | 498 | /* timeout */ |
436 | case TMF_RESP_FUNC_ESUPP: | ||
437 | case TMF_RESP_NO_LUN: | ||
438 | goto out_done; break; | ||
439 | } | ||
440 | /* In the following we assume that the managing layer | ||
441 | * will _never_ make a mistake, when issuing ABORT TASK. | ||
442 | */ | ||
443 | switch (res) { | ||
444 | default: | ||
445 | res = asd_clear_nexus(task); | ||
446 | /* fallthrough */ | ||
447 | case TC_NO_ERROR + 0xFF00: | ||
448 | case TMF_RESP_FUNC_COMPLETE: | ||
449 | break; | ||
450 | /* The task hasn't been sent to the device xor we never got | ||
451 | * a (sane) Response IU for the ABORT TASK TMF. | ||
452 | */ | ||
453 | case TF_NAK_RECV + 0xFF00: | ||
454 | res = TMF_RESP_INVALID_FRAME; | ||
455 | break; | ||
456 | case TF_TMF_TASK_DONE + 0xFF00: /* done but not reported yet */ | ||
457 | res = TMF_RESP_FUNC_FAILED; | 499 | res = TMF_RESP_FUNC_FAILED; |
458 | leftover = wait_for_completion_timeout(&tascb->completion, | 500 | } else { |
459 | AIC94XX_SCB_TIMEOUT); | 501 | /* In the following we assume that the managing layer |
460 | spin_lock_irqsave(&task->task_state_lock, flags); | 502 | * will _never_ make a mistake, when issuing ABORT |
461 | if (leftover < 1) | 503 | * TASK. |
504 | */ | ||
505 | switch (tcs.dl_opcode) { | ||
506 | default: | ||
507 | res = asd_clear_nexus(task); | ||
508 | /* fallthrough */ | ||
509 | case TC_NO_ERROR: | ||
510 | break; | ||
511 | /* The task hasn't been sent to the device xor | ||
512 | * we never got a (sane) Response IU for the | ||
513 | * ABORT TASK TMF. | ||
514 | */ | ||
515 | case TF_NAK_RECV: | ||
516 | res = TMF_RESP_INVALID_FRAME; | ||
517 | break; | ||
518 | case TF_TMF_TASK_DONE: /* done but not reported yet */ | ||
462 | res = TMF_RESP_FUNC_FAILED; | 519 | res = TMF_RESP_FUNC_FAILED; |
463 | if (task->task_state_flags & SAS_TASK_STATE_DONE) | 520 | leftover = |
521 | wait_for_completion_timeout(&tascb_completion, | ||
522 | AIC94XX_SCB_TIMEOUT); | ||
523 | spin_lock_irqsave(&task->task_state_lock, flags); | ||
524 | if (leftover < 1) | ||
525 | res = TMF_RESP_FUNC_FAILED; | ||
526 | if (task->task_state_flags & SAS_TASK_STATE_DONE) | ||
527 | res = TMF_RESP_FUNC_COMPLETE; | ||
528 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
529 | break; | ||
530 | case TF_TMF_NO_TAG: | ||
531 | case TF_TMF_TAG_FREE: /* the tag is in the free list */ | ||
532 | case TF_TMF_NO_CONN_HANDLE: /* no such device */ | ||
464 | res = TMF_RESP_FUNC_COMPLETE; | 533 | res = TMF_RESP_FUNC_COMPLETE; |
465 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 534 | break; |
466 | goto out_done; | 535 | case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */ |
467 | case TF_TMF_NO_TAG + 0xFF00: | 536 | res = TMF_RESP_FUNC_ESUPP; |
468 | case TF_TMF_TAG_FREE + 0xFF00: /* the tag is in the free list */ | 537 | break; |
469 | case TF_TMF_NO_CONN_HANDLE + 0xFF00: /* no such device */ | 538 | } |
470 | res = TMF_RESP_FUNC_COMPLETE; | ||
471 | goto out_done; | ||
472 | case TF_TMF_NO_CTX + 0xFF00: /* not in seq, or proto != SSP */ | ||
473 | res = TMF_RESP_FUNC_ESUPP; | ||
474 | goto out; | ||
475 | } | 539 | } |
476 | out_done: | 540 | out_done: |
541 | tascb->completion = NULL; | ||
477 | if (res == TMF_RESP_FUNC_COMPLETE) { | 542 | if (res == TMF_RESP_FUNC_COMPLETE) { |
478 | task->lldd_task = NULL; | 543 | task->lldd_task = NULL; |
479 | mb(); | 544 | mb(); |
480 | asd_ascb_free(tascb); | 545 | asd_ascb_free(tascb); |
481 | } | 546 | } |
482 | out: | 547 | ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res); |
548 | return res; | ||
549 | |||
550 | out_free: | ||
483 | asd_ascb_free(ascb); | 551 | asd_ascb_free(ascb); |
484 | ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res); | 552 | ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res); |
485 | return res; | 553 | return res; |
@@ -507,6 +575,8 @@ static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun, | |||
507 | struct asd_ascb *ascb; | 575 | struct asd_ascb *ascb; |
508 | int res = 1; | 576 | int res = 1; |
509 | struct scb *scb; | 577 | struct scb *scb; |
578 | DECLARE_COMPLETION_ONSTACK(completion); | ||
579 | DECLARE_TCS(tcs); | ||
510 | 580 | ||
511 | if (!(dev->tproto & SAS_PROTOCOL_SSP)) | 581 | if (!(dev->tproto & SAS_PROTOCOL_SSP)) |
512 | return TMF_RESP_FUNC_ESUPP; | 582 | return TMF_RESP_FUNC_ESUPP; |
@@ -514,6 +584,9 @@ static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun, | |||
514 | ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); | 584 | ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); |
515 | if (!ascb) | 585 | if (!ascb) |
516 | return -ENOMEM; | 586 | return -ENOMEM; |
587 | |||
588 | ascb->completion = &completion; | ||
589 | ascb->uldd_task = &tcs; | ||
517 | scb = ascb->scb; | 590 | scb = ascb->scb; |
518 | 591 | ||
519 | if (tmf == TMF_QUERY_TASK) | 592 | if (tmf == TMF_QUERY_TASK) |
@@ -546,31 +619,32 @@ static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun, | |||
546 | asd_tmf_timedout); | 619 | asd_tmf_timedout); |
547 | if (res) | 620 | if (res) |
548 | goto out_err; | 621 | goto out_err; |
549 | wait_for_completion(&ascb->completion); | 622 | wait_for_completion(&completion); |
550 | res = (int) (unsigned long) ascb->uldd_task; | ||
551 | 623 | ||
552 | switch (res) { | 624 | switch (tcs.dl_opcode) { |
553 | case TC_NO_ERROR + 0xFF00: | 625 | case TC_NO_ERROR: |
554 | res = TMF_RESP_FUNC_COMPLETE; | 626 | res = TMF_RESP_FUNC_COMPLETE; |
555 | break; | 627 | break; |
556 | case TF_NAK_RECV + 0xFF00: | 628 | case TF_NAK_RECV: |
557 | res = TMF_RESP_INVALID_FRAME; | 629 | res = TMF_RESP_INVALID_FRAME; |
558 | break; | 630 | break; |
559 | case TF_TMF_TASK_DONE + 0xFF00: | 631 | case TF_TMF_TASK_DONE: |
560 | res = TMF_RESP_FUNC_FAILED; | 632 | res = TMF_RESP_FUNC_FAILED; |
561 | break; | 633 | break; |
562 | case TF_TMF_NO_TAG + 0xFF00: | 634 | case TF_TMF_NO_TAG: |
563 | case TF_TMF_TAG_FREE + 0xFF00: /* the tag is in the free list */ | 635 | case TF_TMF_TAG_FREE: /* the tag is in the free list */ |
564 | case TF_TMF_NO_CONN_HANDLE + 0xFF00: /* no such device */ | 636 | case TF_TMF_NO_CONN_HANDLE: /* no such device */ |
565 | res = TMF_RESP_FUNC_COMPLETE; | 637 | res = TMF_RESP_FUNC_COMPLETE; |
566 | break; | 638 | break; |
567 | case TF_TMF_NO_CTX + 0xFF00: /* not in seq, or proto != SSP */ | 639 | case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */ |
568 | res = TMF_RESP_FUNC_ESUPP; | 640 | res = TMF_RESP_FUNC_ESUPP; |
569 | break; | 641 | break; |
570 | default: | 642 | default: |
571 | /* Allow TMF response codes to propagate upwards */ | 643 | /* Allow TMF response codes to propagate upwards */ |
644 | res = tcs.dl_opcode; | ||
572 | break; | 645 | break; |
573 | } | 646 | } |
647 | return res; | ||
574 | out_err: | 648 | out_err: |
575 | asd_ascb_free(ascb); | 649 | asd_ascb_free(ascb); |
576 | return res; | 650 | return res; |
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h index 57786502e3ec..0393707bdfce 100644 --- a/drivers/scsi/arcmsr/arcmsr.h +++ b/drivers/scsi/arcmsr/arcmsr.h | |||
@@ -48,7 +48,7 @@ struct class_device_attribute; | |||
48 | /*The limit of outstanding scsi command that firmware can handle*/ | 48 | /*The limit of outstanding scsi command that firmware can handle*/ |
49 | #define ARCMSR_MAX_OUTSTANDING_CMD 256 | 49 | #define ARCMSR_MAX_OUTSTANDING_CMD 256 |
50 | #define ARCMSR_MAX_FREECCB_NUM 320 | 50 | #define ARCMSR_MAX_FREECCB_NUM 320 |
51 | #define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2007/12/24" | 51 | #define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2008/02/27" |
52 | #define ARCMSR_SCSI_INITIATOR_ID 255 | 52 | #define ARCMSR_SCSI_INITIATOR_ID 255 |
53 | #define ARCMSR_MAX_XFER_SECTORS 512 | 53 | #define ARCMSR_MAX_XFER_SECTORS 512 |
54 | #define ARCMSR_MAX_XFER_SECTORS_B 4096 | 54 | #define ARCMSR_MAX_XFER_SECTORS_B 4096 |
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index 6d67f5c0eb8e..27ebd336409b 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c | |||
@@ -160,7 +160,7 @@ static void gdth_readapp_event(gdth_ha_str *ha, unchar application, | |||
160 | static void gdth_clear_events(void); | 160 | static void gdth_clear_events(void); |
161 | 161 | ||
162 | static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, | 162 | static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, |
163 | char *buffer, ushort count, int to_buffer); | 163 | char *buffer, ushort count); |
164 | static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp); | 164 | static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp); |
165 | static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive); | 165 | static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive); |
166 | 166 | ||
@@ -182,7 +182,6 @@ static int gdth_ioctl(struct inode *inode, struct file *filep, | |||
182 | unsigned int cmd, unsigned long arg); | 182 | unsigned int cmd, unsigned long arg); |
183 | 183 | ||
184 | static void gdth_flush(gdth_ha_str *ha); | 184 | static void gdth_flush(gdth_ha_str *ha); |
185 | static int gdth_halt(struct notifier_block *nb, ulong event, void *buf); | ||
186 | static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *)); | 185 | static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *)); |
187 | static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp, | 186 | static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp, |
188 | struct gdth_cmndinfo *cmndinfo); | 187 | struct gdth_cmndinfo *cmndinfo); |
@@ -417,12 +416,6 @@ static inline void gdth_set_sglist(struct scsi_cmnd *cmd, | |||
417 | #include "gdth_proc.h" | 416 | #include "gdth_proc.h" |
418 | #include "gdth_proc.c" | 417 | #include "gdth_proc.c" |
419 | 418 | ||
420 | /* notifier block to get a notify on system shutdown/halt/reboot */ | ||
421 | static struct notifier_block gdth_notifier = { | ||
422 | gdth_halt, NULL, 0 | ||
423 | }; | ||
424 | static int notifier_disabled = 0; | ||
425 | |||
426 | static gdth_ha_str *gdth_find_ha(int hanum) | 419 | static gdth_ha_str *gdth_find_ha(int hanum) |
427 | { | 420 | { |
428 | gdth_ha_str *ha; | 421 | gdth_ha_str *ha; |
@@ -445,8 +438,8 @@ static struct gdth_cmndinfo *gdth_get_cmndinfo(gdth_ha_str *ha) | |||
445 | for (i=0; i<GDTH_MAXCMDS; ++i) { | 438 | for (i=0; i<GDTH_MAXCMDS; ++i) { |
446 | if (ha->cmndinfo[i].index == 0) { | 439 | if (ha->cmndinfo[i].index == 0) { |
447 | priv = &ha->cmndinfo[i]; | 440 | priv = &ha->cmndinfo[i]; |
448 | priv->index = i+1; | ||
449 | memset(priv, 0, sizeof(*priv)); | 441 | memset(priv, 0, sizeof(*priv)); |
442 | priv->index = i+1; | ||
450 | break; | 443 | break; |
451 | } | 444 | } |
452 | } | 445 | } |
@@ -493,7 +486,6 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd, | |||
493 | gdth_ha_str *ha = shost_priv(sdev->host); | 486 | gdth_ha_str *ha = shost_priv(sdev->host); |
494 | Scsi_Cmnd *scp; | 487 | Scsi_Cmnd *scp; |
495 | struct gdth_cmndinfo cmndinfo; | 488 | struct gdth_cmndinfo cmndinfo; |
496 | struct scatterlist one_sg; | ||
497 | DECLARE_COMPLETION_ONSTACK(wait); | 489 | DECLARE_COMPLETION_ONSTACK(wait); |
498 | int rval; | 490 | int rval; |
499 | 491 | ||
@@ -507,13 +499,10 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd, | |||
507 | /* use request field to save the ptr. to completion struct. */ | 499 | /* use request field to save the ptr. to completion struct. */ |
508 | scp->request = (struct request *)&wait; | 500 | scp->request = (struct request *)&wait; |
509 | scp->timeout_per_command = timeout*HZ; | 501 | scp->timeout_per_command = timeout*HZ; |
510 | sg_init_one(&one_sg, gdtcmd, sizeof(*gdtcmd)); | ||
511 | gdth_set_sglist(scp, &one_sg); | ||
512 | gdth_set_sg_count(scp, 1); | ||
513 | gdth_set_bufflen(scp, sizeof(*gdtcmd)); | ||
514 | scp->cmd_len = 12; | 502 | scp->cmd_len = 12; |
515 | memcpy(scp->cmnd, cmnd, 12); | 503 | memcpy(scp->cmnd, cmnd, 12); |
516 | cmndinfo.priority = IOCTL_PRI; | 504 | cmndinfo.priority = IOCTL_PRI; |
505 | cmndinfo.internal_cmd_str = gdtcmd; | ||
517 | cmndinfo.internal_command = 1; | 506 | cmndinfo.internal_command = 1; |
518 | 507 | ||
519 | TRACE(("__gdth_execute() cmd 0x%x\n", scp->cmnd[0])); | 508 | TRACE(("__gdth_execute() cmd 0x%x\n", scp->cmnd[0])); |
@@ -2355,7 +2344,7 @@ static void gdth_next(gdth_ha_str *ha) | |||
2355 | * buffers, kmap_atomic() as needed. | 2344 | * buffers, kmap_atomic() as needed. |
2356 | */ | 2345 | */ |
2357 | static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, | 2346 | static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, |
2358 | char *buffer, ushort count, int to_buffer) | 2347 | char *buffer, ushort count) |
2359 | { | 2348 | { |
2360 | ushort cpcount,i, max_sg = gdth_sg_count(scp); | 2349 | ushort cpcount,i, max_sg = gdth_sg_count(scp); |
2361 | ushort cpsum,cpnow; | 2350 | ushort cpsum,cpnow; |
@@ -2381,10 +2370,7 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, | |||
2381 | } | 2370 | } |
2382 | local_irq_save(flags); | 2371 | local_irq_save(flags); |
2383 | address = kmap_atomic(sg_page(sl), KM_BIO_SRC_IRQ) + sl->offset; | 2372 | address = kmap_atomic(sg_page(sl), KM_BIO_SRC_IRQ) + sl->offset; |
2384 | if (to_buffer) | 2373 | memcpy(address, buffer, cpnow); |
2385 | memcpy(buffer, address, cpnow); | ||
2386 | else | ||
2387 | memcpy(address, buffer, cpnow); | ||
2388 | flush_dcache_page(sg_page(sl)); | 2374 | flush_dcache_page(sg_page(sl)); |
2389 | kunmap_atomic(address, KM_BIO_SRC_IRQ); | 2375 | kunmap_atomic(address, KM_BIO_SRC_IRQ); |
2390 | local_irq_restore(flags); | 2376 | local_irq_restore(flags); |
@@ -2438,7 +2424,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | |||
2438 | strcpy(inq.vendor,ha->oem_name); | 2424 | strcpy(inq.vendor,ha->oem_name); |
2439 | sprintf(inq.product,"Host Drive #%02d",t); | 2425 | sprintf(inq.product,"Host Drive #%02d",t); |
2440 | strcpy(inq.revision," "); | 2426 | strcpy(inq.revision," "); |
2441 | gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data), 0); | 2427 | gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data)); |
2442 | break; | 2428 | break; |
2443 | 2429 | ||
2444 | case REQUEST_SENSE: | 2430 | case REQUEST_SENSE: |
@@ -2448,7 +2434,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | |||
2448 | sd.key = NO_SENSE; | 2434 | sd.key = NO_SENSE; |
2449 | sd.info = 0; | 2435 | sd.info = 0; |
2450 | sd.add_length= 0; | 2436 | sd.add_length= 0; |
2451 | gdth_copy_internal_data(ha, scp, (char*)&sd, sizeof(gdth_sense_data), 0); | 2437 | gdth_copy_internal_data(ha, scp, (char*)&sd, sizeof(gdth_sense_data)); |
2452 | break; | 2438 | break; |
2453 | 2439 | ||
2454 | case MODE_SENSE: | 2440 | case MODE_SENSE: |
@@ -2460,7 +2446,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | |||
2460 | mpd.bd.block_length[0] = (SECTOR_SIZE & 0x00ff0000) >> 16; | 2446 | mpd.bd.block_length[0] = (SECTOR_SIZE & 0x00ff0000) >> 16; |
2461 | mpd.bd.block_length[1] = (SECTOR_SIZE & 0x0000ff00) >> 8; | 2447 | mpd.bd.block_length[1] = (SECTOR_SIZE & 0x0000ff00) >> 8; |
2462 | mpd.bd.block_length[2] = (SECTOR_SIZE & 0x000000ff); | 2448 | mpd.bd.block_length[2] = (SECTOR_SIZE & 0x000000ff); |
2463 | gdth_copy_internal_data(ha, scp, (char*)&mpd, sizeof(gdth_modep_data), 0); | 2449 | gdth_copy_internal_data(ha, scp, (char*)&mpd, sizeof(gdth_modep_data)); |
2464 | break; | 2450 | break; |
2465 | 2451 | ||
2466 | case READ_CAPACITY: | 2452 | case READ_CAPACITY: |
@@ -2470,7 +2456,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | |||
2470 | else | 2456 | else |
2471 | rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1); | 2457 | rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1); |
2472 | rdc.block_length = cpu_to_be32(SECTOR_SIZE); | 2458 | rdc.block_length = cpu_to_be32(SECTOR_SIZE); |
2473 | gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data), 0); | 2459 | gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data)); |
2474 | break; | 2460 | break; |
2475 | 2461 | ||
2476 | case SERVICE_ACTION_IN: | 2462 | case SERVICE_ACTION_IN: |
@@ -2482,7 +2468,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | |||
2482 | rdc16.last_block_no = cpu_to_be64(ha->hdr[t].size-1); | 2468 | rdc16.last_block_no = cpu_to_be64(ha->hdr[t].size-1); |
2483 | rdc16.block_length = cpu_to_be32(SECTOR_SIZE); | 2469 | rdc16.block_length = cpu_to_be32(SECTOR_SIZE); |
2484 | gdth_copy_internal_data(ha, scp, (char*)&rdc16, | 2470 | gdth_copy_internal_data(ha, scp, (char*)&rdc16, |
2485 | sizeof(gdth_rdcap16_data), 0); | 2471 | sizeof(gdth_rdcap16_data)); |
2486 | } else { | 2472 | } else { |
2487 | scp->result = DID_ABORT << 16; | 2473 | scp->result = DID_ABORT << 16; |
2488 | } | 2474 | } |
@@ -2852,6 +2838,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b) | |||
2852 | static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | 2838 | static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) |
2853 | { | 2839 | { |
2854 | register gdth_cmd_str *cmdp; | 2840 | register gdth_cmd_str *cmdp; |
2841 | struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); | ||
2855 | int cmd_index; | 2842 | int cmd_index; |
2856 | 2843 | ||
2857 | cmdp= ha->pccb; | 2844 | cmdp= ha->pccb; |
@@ -2860,7 +2847,7 @@ static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | |||
2860 | if (ha->type==GDT_EISA && ha->cmd_cnt>0) | 2847 | if (ha->type==GDT_EISA && ha->cmd_cnt>0) |
2861 | return 0; | 2848 | return 0; |
2862 | 2849 | ||
2863 | gdth_copy_internal_data(ha, scp, (char *)cmdp, sizeof(gdth_cmd_str), 1); | 2850 | *cmdp = *cmndinfo->internal_cmd_str; |
2864 | cmdp->RequestBuffer = scp; | 2851 | cmdp->RequestBuffer = scp; |
2865 | 2852 | ||
2866 | /* search free command index */ | 2853 | /* search free command index */ |
@@ -3794,6 +3781,8 @@ static void gdth_timeout(ulong data) | |||
3794 | gdth_ha_str *ha; | 3781 | gdth_ha_str *ha; |
3795 | ulong flags; | 3782 | ulong flags; |
3796 | 3783 | ||
3784 | BUG_ON(list_empty(&gdth_instances)); | ||
3785 | |||
3797 | ha = list_first_entry(&gdth_instances, gdth_ha_str, list); | 3786 | ha = list_first_entry(&gdth_instances, gdth_ha_str, list); |
3798 | spin_lock_irqsave(&ha->smp_lock, flags); | 3787 | spin_lock_irqsave(&ha->smp_lock, flags); |
3799 | 3788 | ||
@@ -4669,45 +4658,6 @@ static void gdth_flush(gdth_ha_str *ha) | |||
4669 | } | 4658 | } |
4670 | } | 4659 | } |
4671 | 4660 | ||
4672 | /* shutdown routine */ | ||
4673 | static int gdth_halt(struct notifier_block *nb, ulong event, void *buf) | ||
4674 | { | ||
4675 | gdth_ha_str *ha; | ||
4676 | #ifndef __alpha__ | ||
4677 | gdth_cmd_str gdtcmd; | ||
4678 | char cmnd[MAX_COMMAND_SIZE]; | ||
4679 | #endif | ||
4680 | |||
4681 | if (notifier_disabled) | ||
4682 | return NOTIFY_OK; | ||
4683 | |||
4684 | TRACE2(("gdth_halt() event %d\n",(int)event)); | ||
4685 | if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF) | ||
4686 | return NOTIFY_DONE; | ||
4687 | |||
4688 | notifier_disabled = 1; | ||
4689 | printk("GDT-HA: Flushing all host drives .. "); | ||
4690 | list_for_each_entry(ha, &gdth_instances, list) { | ||
4691 | gdth_flush(ha); | ||
4692 | |||
4693 | #ifndef __alpha__ | ||
4694 | /* controller reset */ | ||
4695 | memset(cmnd, 0xff, MAX_COMMAND_SIZE); | ||
4696 | gdtcmd.BoardNode = LOCALBOARD; | ||
4697 | gdtcmd.Service = CACHESERVICE; | ||
4698 | gdtcmd.OpCode = GDT_RESET; | ||
4699 | TRACE2(("gdth_halt(): reset controller %d\n", ha->hanum)); | ||
4700 | gdth_execute(ha->shost, &gdtcmd, cmnd, 10, NULL); | ||
4701 | #endif | ||
4702 | } | ||
4703 | printk("Done.\n"); | ||
4704 | |||
4705 | #ifdef GDTH_STATISTICS | ||
4706 | del_timer(&gdth_timer); | ||
4707 | #endif | ||
4708 | return NOTIFY_OK; | ||
4709 | } | ||
4710 | |||
4711 | /* configure lun */ | 4661 | /* configure lun */ |
4712 | static int gdth_slave_configure(struct scsi_device *sdev) | 4662 | static int gdth_slave_configure(struct scsi_device *sdev) |
4713 | { | 4663 | { |
@@ -5142,13 +5092,13 @@ static void gdth_remove_one(gdth_ha_str *ha) | |||
5142 | 5092 | ||
5143 | scsi_remove_host(shp); | 5093 | scsi_remove_host(shp); |
5144 | 5094 | ||
5095 | gdth_flush(ha); | ||
5096 | |||
5145 | if (ha->sdev) { | 5097 | if (ha->sdev) { |
5146 | scsi_free_host_dev(ha->sdev); | 5098 | scsi_free_host_dev(ha->sdev); |
5147 | ha->sdev = NULL; | 5099 | ha->sdev = NULL; |
5148 | } | 5100 | } |
5149 | 5101 | ||
5150 | gdth_flush(ha); | ||
5151 | |||
5152 | if (shp->irq) | 5102 | if (shp->irq) |
5153 | free_irq(shp->irq,ha); | 5103 | free_irq(shp->irq,ha); |
5154 | 5104 | ||
@@ -5174,6 +5124,24 @@ static void gdth_remove_one(gdth_ha_str *ha) | |||
5174 | scsi_host_put(shp); | 5124 | scsi_host_put(shp); |
5175 | } | 5125 | } |
5176 | 5126 | ||
5127 | static int gdth_halt(struct notifier_block *nb, ulong event, void *buf) | ||
5128 | { | ||
5129 | gdth_ha_str *ha; | ||
5130 | |||
5131 | TRACE2(("gdth_halt() event %d\n", (int)event)); | ||
5132 | if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF) | ||
5133 | return NOTIFY_DONE; | ||
5134 | |||
5135 | list_for_each_entry(ha, &gdth_instances, list) | ||
5136 | gdth_flush(ha); | ||
5137 | |||
5138 | return NOTIFY_OK; | ||
5139 | } | ||
5140 | |||
5141 | static struct notifier_block gdth_notifier = { | ||
5142 | gdth_halt, NULL, 0 | ||
5143 | }; | ||
5144 | |||
5177 | static int __init gdth_init(void) | 5145 | static int __init gdth_init(void) |
5178 | { | 5146 | { |
5179 | if (disable) { | 5147 | if (disable) { |
@@ -5236,7 +5204,6 @@ static int __init gdth_init(void) | |||
5236 | add_timer(&gdth_timer); | 5204 | add_timer(&gdth_timer); |
5237 | #endif | 5205 | #endif |
5238 | major = register_chrdev(0,"gdth", &gdth_fops); | 5206 | major = register_chrdev(0,"gdth", &gdth_fops); |
5239 | notifier_disabled = 0; | ||
5240 | register_reboot_notifier(&gdth_notifier); | 5207 | register_reboot_notifier(&gdth_notifier); |
5241 | gdth_polling = FALSE; | 5208 | gdth_polling = FALSE; |
5242 | return 0; | 5209 | return 0; |
@@ -5246,14 +5213,15 @@ static void __exit gdth_exit(void) | |||
5246 | { | 5213 | { |
5247 | gdth_ha_str *ha; | 5214 | gdth_ha_str *ha; |
5248 | 5215 | ||
5249 | list_for_each_entry(ha, &gdth_instances, list) | 5216 | unregister_chrdev(major, "gdth"); |
5250 | gdth_remove_one(ha); | 5217 | unregister_reboot_notifier(&gdth_notifier); |
5251 | 5218 | ||
5252 | #ifdef GDTH_STATISTICS | 5219 | #ifdef GDTH_STATISTICS |
5253 | del_timer(&gdth_timer); | 5220 | del_timer_sync(&gdth_timer); |
5254 | #endif | 5221 | #endif |
5255 | unregister_chrdev(major,"gdth"); | 5222 | |
5256 | unregister_reboot_notifier(&gdth_notifier); | 5223 | list_for_each_entry(ha, &gdth_instances, list) |
5224 | gdth_remove_one(ha); | ||
5257 | } | 5225 | } |
5258 | 5226 | ||
5259 | module_init(gdth_init); | 5227 | module_init(gdth_init); |
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h index 1434c6b0297c..26e4e92515e0 100644 --- a/drivers/scsi/gdth.h +++ b/drivers/scsi/gdth.h | |||
@@ -915,6 +915,7 @@ typedef struct { | |||
915 | struct gdth_cmndinfo { /* per-command private info */ | 915 | struct gdth_cmndinfo { /* per-command private info */ |
916 | int index; | 916 | int index; |
917 | int internal_command; /* don't call scsi_done */ | 917 | int internal_command; /* don't call scsi_done */ |
918 | gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/ | ||
918 | dma_addr_t sense_paddr; /* sense dma-addr */ | 919 | dma_addr_t sense_paddr; /* sense dma-addr */ |
919 | unchar priority; | 920 | unchar priority; |
920 | int timeout; | 921 | int timeout; |
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c index bd62131b97a1..e5881e92d0fb 100644 --- a/drivers/scsi/ibmvscsi/ibmvstgt.c +++ b/drivers/scsi/ibmvscsi/ibmvstgt.c | |||
@@ -290,7 +290,7 @@ static int ibmvstgt_cmd_done(struct scsi_cmnd *sc, | |||
290 | int err = 0; | 290 | int err = 0; |
291 | 291 | ||
292 | dprintk("%p %p %x %u\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0], | 292 | dprintk("%p %p %x %u\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0], |
293 | cmd->usg_sg); | 293 | scsi_sg_count(sc)); |
294 | 294 | ||
295 | if (scsi_sg_count(sc)) | 295 | if (scsi_sg_count(sc)) |
296 | err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1); | 296 | err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1); |
@@ -838,9 +838,6 @@ static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
838 | if (!shost) | 838 | if (!shost) |
839 | goto free_vport; | 839 | goto free_vport; |
840 | shost->transportt = ibmvstgt_transport_template; | 840 | shost->transportt = ibmvstgt_transport_template; |
841 | err = scsi_tgt_alloc_queue(shost); | ||
842 | if (err) | ||
843 | goto put_host; | ||
844 | 841 | ||
845 | target = host_to_srp_target(shost); | 842 | target = host_to_srp_target(shost); |
846 | target->shost = shost; | 843 | target->shost = shost; |
@@ -872,6 +869,10 @@ static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
872 | if (err) | 869 | if (err) |
873 | goto destroy_queue; | 870 | goto destroy_queue; |
874 | 871 | ||
872 | err = scsi_tgt_alloc_queue(shost); | ||
873 | if (err) | ||
874 | goto destroy_queue; | ||
875 | |||
875 | return 0; | 876 | return 0; |
876 | destroy_queue: | 877 | destroy_queue: |
877 | crq_queue_destroy(target); | 878 | crq_queue_destroy(target); |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 59f8445eab0d..bdd7de7da39a 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -1708,8 +1708,8 @@ iscsi_session_setup(struct iscsi_transport *iscsit, | |||
1708 | qdepth = ISCSI_DEF_CMD_PER_LUN; | 1708 | qdepth = ISCSI_DEF_CMD_PER_LUN; |
1709 | } | 1709 | } |
1710 | 1710 | ||
1711 | if (!is_power_of_2(cmds_max) || | 1711 | if (!is_power_of_2(cmds_max) || cmds_max >= ISCSI_MGMT_ITT_OFFSET || |
1712 | cmds_max >= ISCSI_MGMT_ITT_OFFSET) { | 1712 | cmds_max < 2) { |
1713 | if (cmds_max != 0) | 1713 | if (cmds_max != 0) |
1714 | printk(KERN_ERR "iscsi: invalid can_queue of %d. " | 1714 | printk(KERN_ERR "iscsi: invalid can_queue of %d. " |
1715 | "can_queue must be a power of 2 and between " | 1715 | "can_queue must be a power of 2 and between " |
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 7cd05b599a12..b0e5ac372a32 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c | |||
@@ -236,12 +236,12 @@ static void sas_ata_phy_reset(struct ata_port *ap) | |||
236 | struct domain_device *dev = ap->private_data; | 236 | struct domain_device *dev = ap->private_data; |
237 | struct sas_internal *i = | 237 | struct sas_internal *i = |
238 | to_sas_internal(dev->port->ha->core.shost->transportt); | 238 | to_sas_internal(dev->port->ha->core.shost->transportt); |
239 | int res = 0; | 239 | int res = TMF_RESP_FUNC_FAILED; |
240 | 240 | ||
241 | if (i->dft->lldd_I_T_nexus_reset) | 241 | if (i->dft->lldd_I_T_nexus_reset) |
242 | res = i->dft->lldd_I_T_nexus_reset(dev); | 242 | res = i->dft->lldd_I_T_nexus_reset(dev); |
243 | 243 | ||
244 | if (res) | 244 | if (res != TMF_RESP_FUNC_COMPLETE) |
245 | SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __FUNCTION__); | 245 | SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __FUNCTION__); |
246 | 246 | ||
247 | switch (dev->sata_dev.command_set) { | 247 | switch (dev->sata_dev.command_set) { |
@@ -656,21 +656,6 @@ out: | |||
656 | return res; | 656 | return res; |
657 | } | 657 | } |
658 | 658 | ||
659 | static void sas_sata_propagate_sas_addr(struct domain_device *dev) | ||
660 | { | ||
661 | unsigned long flags; | ||
662 | struct asd_sas_port *port = dev->port; | ||
663 | struct asd_sas_phy *phy; | ||
664 | |||
665 | BUG_ON(dev->parent); | ||
666 | |||
667 | memcpy(port->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE); | ||
668 | spin_lock_irqsave(&port->phy_list_lock, flags); | ||
669 | list_for_each_entry(phy, &port->phy_list, port_phy_el) | ||
670 | memcpy(phy->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE); | ||
671 | spin_unlock_irqrestore(&port->phy_list_lock, flags); | ||
672 | } | ||
673 | |||
674 | #define ATA_IDENTIFY_DEV 0xEC | 659 | #define ATA_IDENTIFY_DEV 0xEC |
675 | #define ATA_IDENTIFY_PACKET_DEV 0xA1 | 660 | #define ATA_IDENTIFY_PACKET_DEV 0xA1 |
676 | #define ATA_SET_FEATURES 0xEF | 661 | #define ATA_SET_FEATURES 0xEF |
@@ -728,26 +713,6 @@ static int sas_discover_sata_dev(struct domain_device *dev) | |||
728 | goto out_err; | 713 | goto out_err; |
729 | } | 714 | } |
730 | cont1: | 715 | cont1: |
731 | /* Get WWN */ | ||
732 | if (dev->port->oob_mode != SATA_OOB_MODE) { | ||
733 | memcpy(dev->sas_addr, dev->sata_dev.rps_resp.rps.stp_sas_addr, | ||
734 | SAS_ADDR_SIZE); | ||
735 | } else if (dev->sata_dev.command_set == ATA_COMMAND_SET && | ||
736 | (le16_to_cpu(dev->sata_dev.identify_device[108]) & 0xF000) | ||
737 | == 0x5000) { | ||
738 | int i; | ||
739 | |||
740 | for (i = 0; i < 4; i++) { | ||
741 | dev->sas_addr[2*i] = | ||
742 | (le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0xFF00) >> 8; | ||
743 | dev->sas_addr[2*i+1] = | ||
744 | le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0x00FF; | ||
745 | } | ||
746 | } | ||
747 | sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr); | ||
748 | if (!dev->parent) | ||
749 | sas_sata_propagate_sas_addr(dev); | ||
750 | |||
751 | /* XXX Hint: register this SATA device with SATL. | 716 | /* XXX Hint: register this SATA device with SATL. |
752 | When this returns, dev->sata_dev->lu is alive and | 717 | When this returns, dev->sata_dev->lu is alive and |
753 | present. | 718 | present. |
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c index e1e2d085c920..39ae68a3b0ef 100644 --- a/drivers/scsi/libsas/sas_port.c +++ b/drivers/scsi/libsas/sas_port.c | |||
@@ -92,9 +92,6 @@ static void sas_form_port(struct asd_sas_phy *phy) | |||
92 | if (!port->phy) | 92 | if (!port->phy) |
93 | port->phy = phy->phy; | 93 | port->phy = phy->phy; |
94 | 94 | ||
95 | SAS_DPRINTK("phy%d added to port%d, phy_mask:0x%x\n", phy->id, | ||
96 | port->id, port->phy_mask); | ||
97 | |||
98 | if (*(u64 *)port->attached_sas_addr == 0) { | 95 | if (*(u64 *)port->attached_sas_addr == 0) { |
99 | port->class = phy->class; | 96 | port->class = phy->class; |
100 | memcpy(port->attached_sas_addr, phy->attached_sas_addr, | 97 | memcpy(port->attached_sas_addr, phy->attached_sas_addr, |
@@ -115,6 +112,11 @@ static void sas_form_port(struct asd_sas_phy *phy) | |||
115 | } | 112 | } |
116 | sas_port_add_phy(port->port, phy->phy); | 113 | sas_port_add_phy(port->port, phy->phy); |
117 | 114 | ||
115 | SAS_DPRINTK("%s added to %s, phy_mask:0x%x (%16llx)\n", | ||
116 | phy->phy->dev.bus_id,port->port->dev.bus_id, | ||
117 | port->phy_mask, | ||
118 | SAS_ADDR(port->attached_sas_addr)); | ||
119 | |||
118 | if (port->port_dev) | 120 | if (port->port_dev) |
119 | port->port_dev->pathways = port->num_phys; | 121 | port->port_dev->pathways = port->num_phys; |
120 | 122 | ||
@@ -255,12 +257,11 @@ void sas_porte_hard_reset(struct work_struct *work) | |||
255 | static void sas_init_port(struct asd_sas_port *port, | 257 | static void sas_init_port(struct asd_sas_port *port, |
256 | struct sas_ha_struct *sas_ha, int i) | 258 | struct sas_ha_struct *sas_ha, int i) |
257 | { | 259 | { |
260 | memset(port, 0, sizeof(*port)); | ||
258 | port->id = i; | 261 | port->id = i; |
259 | INIT_LIST_HEAD(&port->dev_list); | 262 | INIT_LIST_HEAD(&port->dev_list); |
260 | spin_lock_init(&port->phy_list_lock); | 263 | spin_lock_init(&port->phy_list_lock); |
261 | INIT_LIST_HEAD(&port->phy_list); | 264 | INIT_LIST_HEAD(&port->phy_list); |
262 | port->num_phys = 0; | ||
263 | port->phy_mask = 0; | ||
264 | port->ha = sas_ha; | 265 | port->ha = sas_ha; |
265 | 266 | ||
266 | spin_lock_init(&port->dev_list_lock); | 267 | spin_lock_init(&port->dev_list_lock); |
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 704ea06a6e50..1f8241563c6c 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c | |||
@@ -434,7 +434,7 @@ static int sas_recover_I_T(struct domain_device *dev) | |||
434 | } | 434 | } |
435 | 435 | ||
436 | /* Find the sas_phy that's attached to this device */ | 436 | /* Find the sas_phy that's attached to this device */ |
437 | static struct sas_phy *find_local_sas_phy(struct domain_device *dev) | 437 | struct sas_phy *sas_find_local_phy(struct domain_device *dev) |
438 | { | 438 | { |
439 | struct domain_device *pdev = dev->parent; | 439 | struct domain_device *pdev = dev->parent; |
440 | struct ex_phy *exphy = NULL; | 440 | struct ex_phy *exphy = NULL; |
@@ -456,6 +456,7 @@ static struct sas_phy *find_local_sas_phy(struct domain_device *dev) | |||
456 | BUG_ON(!exphy); | 456 | BUG_ON(!exphy); |
457 | return exphy->phy; | 457 | return exphy->phy; |
458 | } | 458 | } |
459 | EXPORT_SYMBOL_GPL(sas_find_local_phy); | ||
459 | 460 | ||
460 | /* Attempt to send a LUN reset message to a device */ | 461 | /* Attempt to send a LUN reset message to a device */ |
461 | int sas_eh_device_reset_handler(struct scsi_cmnd *cmd) | 462 | int sas_eh_device_reset_handler(struct scsi_cmnd *cmd) |
@@ -482,7 +483,7 @@ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd) | |||
482 | int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd) | 483 | int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd) |
483 | { | 484 | { |
484 | struct domain_device *dev = cmd_to_domain_dev(cmd); | 485 | struct domain_device *dev = cmd_to_domain_dev(cmd); |
485 | struct sas_phy *phy = find_local_sas_phy(dev); | 486 | struct sas_phy *phy = sas_find_local_phy(dev); |
486 | int res; | 487 | int res; |
487 | 488 | ||
488 | res = sas_phy_reset(phy, 1); | 489 | res = sas_phy_reset(phy, 1); |
@@ -497,10 +498,10 @@ int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd) | |||
497 | } | 498 | } |
498 | 499 | ||
499 | /* Try to reset a device */ | 500 | /* Try to reset a device */ |
500 | static int try_to_reset_cmd_device(struct Scsi_Host *shost, | 501 | static int try_to_reset_cmd_device(struct scsi_cmnd *cmd) |
501 | struct scsi_cmnd *cmd) | ||
502 | { | 502 | { |
503 | int res; | 503 | int res; |
504 | struct Scsi_Host *shost = cmd->device->host; | ||
504 | 505 | ||
505 | if (!shost->hostt->eh_device_reset_handler) | 506 | if (!shost->hostt->eh_device_reset_handler) |
506 | goto try_bus_reset; | 507 | goto try_bus_reset; |
@@ -540,6 +541,12 @@ Again: | |||
540 | need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET; | 541 | need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET; |
541 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 542 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
542 | 543 | ||
544 | if (need_reset) { | ||
545 | SAS_DPRINTK("%s: task 0x%p requests reset\n", | ||
546 | __FUNCTION__, task); | ||
547 | goto reset; | ||
548 | } | ||
549 | |||
543 | SAS_DPRINTK("trying to find task 0x%p\n", task); | 550 | SAS_DPRINTK("trying to find task 0x%p\n", task); |
544 | res = sas_scsi_find_task(task); | 551 | res = sas_scsi_find_task(task); |
545 | 552 | ||
@@ -550,18 +557,15 @@ Again: | |||
550 | SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, | 557 | SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, |
551 | task); | 558 | task); |
552 | sas_eh_finish_cmd(cmd); | 559 | sas_eh_finish_cmd(cmd); |
553 | if (need_reset) | ||
554 | try_to_reset_cmd_device(shost, cmd); | ||
555 | continue; | 560 | continue; |
556 | case TASK_IS_ABORTED: | 561 | case TASK_IS_ABORTED: |
557 | SAS_DPRINTK("%s: task 0x%p is aborted\n", | 562 | SAS_DPRINTK("%s: task 0x%p is aborted\n", |
558 | __FUNCTION__, task); | 563 | __FUNCTION__, task); |
559 | sas_eh_finish_cmd(cmd); | 564 | sas_eh_finish_cmd(cmd); |
560 | if (need_reset) | ||
561 | try_to_reset_cmd_device(shost, cmd); | ||
562 | continue; | 565 | continue; |
563 | case TASK_IS_AT_LU: | 566 | case TASK_IS_AT_LU: |
564 | SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task); | 567 | SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task); |
568 | reset: | ||
565 | tmf_resp = sas_recover_lu(task->dev, cmd); | 569 | tmf_resp = sas_recover_lu(task->dev, cmd); |
566 | if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { | 570 | if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { |
567 | SAS_DPRINTK("dev %016llx LU %x is " | 571 | SAS_DPRINTK("dev %016llx LU %x is " |
@@ -569,8 +573,6 @@ Again: | |||
569 | SAS_ADDR(task->dev), | 573 | SAS_ADDR(task->dev), |
570 | cmd->device->lun); | 574 | cmd->device->lun); |
571 | sas_eh_finish_cmd(cmd); | 575 | sas_eh_finish_cmd(cmd); |
572 | if (need_reset) | ||
573 | try_to_reset_cmd_device(shost, cmd); | ||
574 | sas_scsi_clear_queue_lu(work_q, cmd); | 576 | sas_scsi_clear_queue_lu(work_q, cmd); |
575 | goto Again; | 577 | goto Again; |
576 | } | 578 | } |
@@ -581,15 +583,15 @@ Again: | |||
581 | task); | 583 | task); |
582 | tmf_resp = sas_recover_I_T(task->dev); | 584 | tmf_resp = sas_recover_I_T(task->dev); |
583 | if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { | 585 | if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { |
586 | struct domain_device *dev = task->dev; | ||
584 | SAS_DPRINTK("I_T %016llx recovered\n", | 587 | SAS_DPRINTK("I_T %016llx recovered\n", |
585 | SAS_ADDR(task->dev->sas_addr)); | 588 | SAS_ADDR(task->dev->sas_addr)); |
586 | sas_eh_finish_cmd(cmd); | 589 | sas_eh_finish_cmd(cmd); |
587 | if (need_reset) | 590 | sas_scsi_clear_queue_I_T(work_q, dev); |
588 | try_to_reset_cmd_device(shost, cmd); | ||
589 | sas_scsi_clear_queue_I_T(work_q, task->dev); | ||
590 | goto Again; | 591 | goto Again; |
591 | } | 592 | } |
592 | /* Hammer time :-) */ | 593 | /* Hammer time :-) */ |
594 | try_to_reset_cmd_device(cmd); | ||
593 | if (i->dft->lldd_clear_nexus_port) { | 595 | if (i->dft->lldd_clear_nexus_port) { |
594 | struct asd_sas_port *port = task->dev->port; | 596 | struct asd_sas_port *port = task->dev->port; |
595 | SAS_DPRINTK("clearing nexus for port:%d\n", | 597 | SAS_DPRINTK("clearing nexus for port:%d\n", |
@@ -599,8 +601,6 @@ Again: | |||
599 | SAS_DPRINTK("clear nexus port:%d " | 601 | SAS_DPRINTK("clear nexus port:%d " |
600 | "succeeded\n", port->id); | 602 | "succeeded\n", port->id); |
601 | sas_eh_finish_cmd(cmd); | 603 | sas_eh_finish_cmd(cmd); |
602 | if (need_reset) | ||
603 | try_to_reset_cmd_device(shost, cmd); | ||
604 | sas_scsi_clear_queue_port(work_q, | 604 | sas_scsi_clear_queue_port(work_q, |
605 | port); | 605 | port); |
606 | goto Again; | 606 | goto Again; |
@@ -613,8 +613,6 @@ Again: | |||
613 | SAS_DPRINTK("clear nexus ha " | 613 | SAS_DPRINTK("clear nexus ha " |
614 | "succeeded\n"); | 614 | "succeeded\n"); |
615 | sas_eh_finish_cmd(cmd); | 615 | sas_eh_finish_cmd(cmd); |
616 | if (need_reset) | ||
617 | try_to_reset_cmd_device(shost, cmd); | ||
618 | goto clear_q; | 616 | goto clear_q; |
619 | } | 617 | } |
620 | } | 618 | } |
@@ -628,8 +626,6 @@ Again: | |||
628 | cmd->device->lun); | 626 | cmd->device->lun); |
629 | 627 | ||
630 | sas_eh_finish_cmd(cmd); | 628 | sas_eh_finish_cmd(cmd); |
631 | if (need_reset) | ||
632 | try_to_reset_cmd_device(shost, cmd); | ||
633 | goto clear_q; | 629 | goto clear_q; |
634 | } | 630 | } |
635 | } | 631 | } |
diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c index d4a6ac3c9c47..5ec0665b3a3d 100644 --- a/drivers/scsi/mvsas.c +++ b/drivers/scsi/mvsas.c | |||
@@ -40,7 +40,7 @@ | |||
40 | #include <asm/io.h> | 40 | #include <asm/io.h> |
41 | 41 | ||
42 | #define DRV_NAME "mvsas" | 42 | #define DRV_NAME "mvsas" |
43 | #define DRV_VERSION "0.5" | 43 | #define DRV_VERSION "0.5.1" |
44 | #define _MV_DUMP 0 | 44 | #define _MV_DUMP 0 |
45 | #define MVS_DISABLE_NVRAM | 45 | #define MVS_DISABLE_NVRAM |
46 | #define MVS_DISABLE_MSI | 46 | #define MVS_DISABLE_MSI |
@@ -1005,7 +1005,7 @@ err_out: | |||
1005 | return rc; | 1005 | return rc; |
1006 | #else | 1006 | #else |
1007 | /* FIXME , For SAS target mode */ | 1007 | /* FIXME , For SAS target mode */ |
1008 | memcpy(buf, "\x00\x00\xab\x11\x30\x04\x05\x50", 8); | 1008 | memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8); |
1009 | return 0; | 1009 | return 0; |
1010 | #endif | 1010 | #endif |
1011 | } | 1011 | } |
@@ -1330,7 +1330,7 @@ static int mvs_int_rx(struct mvs_info *mvi, bool self_clear) | |||
1330 | 1330 | ||
1331 | mvs_hba_cq_dump(mvi); | 1331 | mvs_hba_cq_dump(mvi); |
1332 | 1332 | ||
1333 | if (unlikely(rx_desc & RXQ_DONE)) | 1333 | if (likely(rx_desc & RXQ_DONE)) |
1334 | mvs_slot_complete(mvi, rx_desc); | 1334 | mvs_slot_complete(mvi, rx_desc); |
1335 | if (rx_desc & RXQ_ATTN) { | 1335 | if (rx_desc & RXQ_ATTN) { |
1336 | attn = true; | 1336 | attn = true; |
@@ -2720,9 +2720,8 @@ static int __devinit mvs_hw_init(struct mvs_info *mvi) | |||
2720 | msleep(100); | 2720 | msleep(100); |
2721 | /* init and reset phys */ | 2721 | /* init and reset phys */ |
2722 | for (i = 0; i < mvi->chip->n_phy; i++) { | 2722 | for (i = 0; i < mvi->chip->n_phy; i++) { |
2723 | /* FIXME: is this the correct dword order? */ | 2723 | u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]); |
2724 | u32 lo = *((u32 *)&mvi->sas_addr[0]); | 2724 | u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]); |
2725 | u32 hi = *((u32 *)&mvi->sas_addr[4]); | ||
2726 | 2725 | ||
2727 | mvs_detect_porttype(mvi, i); | 2726 | mvs_detect_porttype(mvi, i); |
2728 | 2727 | ||
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c index 0cd614a0fa73..fad6cb5cba28 100644 --- a/drivers/scsi/ps3rom.c +++ b/drivers/scsi/ps3rom.c | |||
@@ -124,7 +124,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd *cmd, const void *buf) | |||
124 | } | 124 | } |
125 | req_len += sgpnt->length; | 125 | req_len += sgpnt->length; |
126 | } | 126 | } |
127 | scsi_set_resid(cmd, req_len - act_len); | 127 | scsi_set_resid(cmd, buflen - act_len); |
128 | return 0; | 128 | return 0; |
129 | } | 129 | } |
130 | 130 | ||
@@ -427,7 +427,7 @@ static struct scsi_host_template ps3rom_host_template = { | |||
427 | .cmd_per_lun = 1, | 427 | .cmd_per_lun = 1, |
428 | .emulated = 1, /* only sg driver uses this */ | 428 | .emulated = 1, /* only sg driver uses this */ |
429 | .max_sectors = PS3ROM_MAX_SECTORS, | 429 | .max_sectors = PS3ROM_MAX_SECTORS, |
430 | .use_clustering = ENABLE_CLUSTERING, | 430 | .use_clustering = DISABLE_CLUSTERING, |
431 | .module = THIS_MODULE, | 431 | .module = THIS_MODULE, |
432 | }; | 432 | }; |
433 | 433 | ||
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 6226d88479f5..c1808763d40e 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c | |||
@@ -39,7 +39,7 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size) | |||
39 | ms_pkt->entry_count = 1; | 39 | ms_pkt->entry_count = 1; |
40 | SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER); | 40 | SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER); |
41 | ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG); | 41 | ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG); |
42 | ms_pkt->timeout = __constant_cpu_to_le16(25); | 42 | ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); |
43 | ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); | 43 | ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); |
44 | ms_pkt->total_dsd_count = __constant_cpu_to_le16(2); | 44 | ms_pkt->total_dsd_count = __constant_cpu_to_le16(2); |
45 | ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); | 45 | ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); |
@@ -75,7 +75,7 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size) | |||
75 | ct_pkt->entry_type = CT_IOCB_TYPE; | 75 | ct_pkt->entry_type = CT_IOCB_TYPE; |
76 | ct_pkt->entry_count = 1; | 76 | ct_pkt->entry_count = 1; |
77 | ct_pkt->nport_handle = __constant_cpu_to_le16(NPH_SNS); | 77 | ct_pkt->nport_handle = __constant_cpu_to_le16(NPH_SNS); |
78 | ct_pkt->timeout = __constant_cpu_to_le16(25); | 78 | ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); |
79 | ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); | 79 | ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); |
80 | ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); | 80 | ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); |
81 | ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); | 81 | ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); |
@@ -1144,7 +1144,7 @@ qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size, | |||
1144 | ms_pkt->entry_count = 1; | 1144 | ms_pkt->entry_count = 1; |
1145 | SET_TARGET_ID(ha, ms_pkt->loop_id, ha->mgmt_svr_loop_id); | 1145 | SET_TARGET_ID(ha, ms_pkt->loop_id, ha->mgmt_svr_loop_id); |
1146 | ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG); | 1146 | ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG); |
1147 | ms_pkt->timeout = __constant_cpu_to_le16(59); | 1147 | ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); |
1148 | ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); | 1148 | ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); |
1149 | ms_pkt->total_dsd_count = __constant_cpu_to_le16(2); | 1149 | ms_pkt->total_dsd_count = __constant_cpu_to_le16(2); |
1150 | ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); | 1150 | ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); |
@@ -1181,7 +1181,7 @@ qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size, | |||
1181 | ct_pkt->entry_type = CT_IOCB_TYPE; | 1181 | ct_pkt->entry_type = CT_IOCB_TYPE; |
1182 | ct_pkt->entry_count = 1; | 1182 | ct_pkt->entry_count = 1; |
1183 | ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id); | 1183 | ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id); |
1184 | ct_pkt->timeout = __constant_cpu_to_le16(59); | 1184 | ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); |
1185 | ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); | 1185 | ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); |
1186 | ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); | 1186 | ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); |
1187 | ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); | 1187 | ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); |
@@ -1761,7 +1761,7 @@ qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *ha, uint32_t req_size, | |||
1761 | ct_pkt->entry_type = CT_IOCB_TYPE; | 1761 | ct_pkt->entry_type = CT_IOCB_TYPE; |
1762 | ct_pkt->entry_count = 1; | 1762 | ct_pkt->entry_count = 1; |
1763 | ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id); | 1763 | ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id); |
1764 | ct_pkt->timeout = __constant_cpu_to_le16(59); | 1764 | ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); |
1765 | ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); | 1765 | ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); |
1766 | ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); | 1766 | ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); |
1767 | ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); | 1767 | ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index d5c7853e7eba..364be7d06875 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -1733,8 +1733,8 @@ qla2x00_nvram_config(scsi_qla_host_t *ha) | |||
1733 | ha->login_timeout = nv->login_timeout; | 1733 | ha->login_timeout = nv->login_timeout; |
1734 | icb->login_timeout = nv->login_timeout; | 1734 | icb->login_timeout = nv->login_timeout; |
1735 | 1735 | ||
1736 | /* Set minimum RATOV to 200 tenths of a second. */ | 1736 | /* Set minimum RATOV to 100 tenths of a second. */ |
1737 | ha->r_a_tov = 200; | 1737 | ha->r_a_tov = 100; |
1738 | 1738 | ||
1739 | ha->loop_reset_delay = nv->reset_delay; | 1739 | ha->loop_reset_delay = nv->reset_delay; |
1740 | 1740 | ||
@@ -3645,8 +3645,8 @@ qla24xx_nvram_config(scsi_qla_host_t *ha) | |||
3645 | ha->login_timeout = le16_to_cpu(nv->login_timeout); | 3645 | ha->login_timeout = le16_to_cpu(nv->login_timeout); |
3646 | icb->login_timeout = cpu_to_le16(nv->login_timeout); | 3646 | icb->login_timeout = cpu_to_le16(nv->login_timeout); |
3647 | 3647 | ||
3648 | /* Set minimum RATOV to 200 tenths of a second. */ | 3648 | /* Set minimum RATOV to 100 tenths of a second. */ |
3649 | ha->r_a_tov = 200; | 3649 | ha->r_a_tov = 100; |
3650 | 3650 | ||
3651 | ha->loop_reset_delay = nv->reset_delay; | 3651 | ha->loop_reset_delay = nv->reset_delay; |
3652 | 3652 | ||
@@ -4022,7 +4022,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *ha) | |||
4022 | return; | 4022 | return; |
4023 | 4023 | ||
4024 | ret = qla2x00_stop_firmware(ha); | 4024 | ret = qla2x00_stop_firmware(ha); |
4025 | for (retries = 5; ret != QLA_SUCCESS && retries ; retries--) { | 4025 | for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && |
4026 | retries ; retries--) { | ||
4026 | qla2x00_reset_chip(ha); | 4027 | qla2x00_reset_chip(ha); |
4027 | if (qla2x00_chip_diag(ha) != QLA_SUCCESS) | 4028 | if (qla2x00_chip_diag(ha) != QLA_SUCCESS) |
4028 | continue; | 4029 | continue; |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 14e6f22944b7..f0337036c7bb 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -958,6 +958,11 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) | |||
958 | } | 958 | } |
959 | } | 959 | } |
960 | 960 | ||
961 | /* Check for overrun. */ | ||
962 | if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && | ||
963 | scsi_status & SS_RESIDUAL_OVER) | ||
964 | comp_status = CS_DATA_OVERRUN; | ||
965 | |||
961 | /* | 966 | /* |
962 | * Based on Host and scsi status generate status code for Linux | 967 | * Based on Host and scsi status generate status code for Linux |
963 | */ | 968 | */ |
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 99d29fff836d..bb103580e1ba 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
@@ -2206,7 +2206,7 @@ qla24xx_abort_target(fc_port_t *fcport) | |||
2206 | tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; | 2206 | tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; |
2207 | tsk->p.tsk.entry_count = 1; | 2207 | tsk->p.tsk.entry_count = 1; |
2208 | tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); | 2208 | tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); |
2209 | tsk->p.tsk.timeout = __constant_cpu_to_le16(25); | 2209 | tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); |
2210 | tsk->p.tsk.control_flags = __constant_cpu_to_le32(TCF_TARGET_RESET); | 2210 | tsk->p.tsk.control_flags = __constant_cpu_to_le32(TCF_TARGET_RESET); |
2211 | tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; | 2211 | tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; |
2212 | tsk->p.tsk.port_id[1] = fcport->d_id.b.area; | 2212 | tsk->p.tsk.port_id[1] = fcport->d_id.b.area; |
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index c5742cc15abb..ea08a129fee9 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
@@ -7,7 +7,7 @@ | |||
7 | /* | 7 | /* |
8 | * Driver version | 8 | * Driver version |
9 | */ | 9 | */ |
10 | #define QLA2XXX_VERSION "8.02.00-k8" | 10 | #define QLA2XXX_VERSION "8.02.00-k9" |
11 | 11 | ||
12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
13 | #define QLA_DRIVER_MINOR_VER 2 | 13 | #define QLA_DRIVER_MINOR_VER 2 |
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index 10b3b9a620f3..109c5f5985ec 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c | |||
@@ -1299,9 +1299,9 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, | |||
1299 | ddb_entry->fw_ddb_device_state = state; | 1299 | ddb_entry->fw_ddb_device_state = state; |
1300 | /* Device is back online. */ | 1300 | /* Device is back online. */ |
1301 | if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { | 1301 | if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { |
1302 | atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); | ||
1302 | atomic_set(&ddb_entry->port_down_timer, | 1303 | atomic_set(&ddb_entry->port_down_timer, |
1303 | ha->port_down_retry_count); | 1304 | ha->port_down_retry_count); |
1304 | atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); | ||
1305 | atomic_set(&ddb_entry->relogin_retry_count, 0); | 1305 | atomic_set(&ddb_entry->relogin_retry_count, 0); |
1306 | atomic_set(&ddb_entry->relogin_timer, 0); | 1306 | atomic_set(&ddb_entry->relogin_timer, 0); |
1307 | clear_bit(DF_RELOGIN, &ddb_entry->flags); | 1307 | clear_bit(DF_RELOGIN, &ddb_entry->flags); |
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index c3c59d763037..8b92f348f02c 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c | |||
@@ -75,6 +75,7 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd); | |||
75 | static int qla4xxx_slave_alloc(struct scsi_device *device); | 75 | static int qla4xxx_slave_alloc(struct scsi_device *device); |
76 | static int qla4xxx_slave_configure(struct scsi_device *device); | 76 | static int qla4xxx_slave_configure(struct scsi_device *device); |
77 | static void qla4xxx_slave_destroy(struct scsi_device *sdev); | 77 | static void qla4xxx_slave_destroy(struct scsi_device *sdev); |
78 | static void qla4xxx_scan_start(struct Scsi_Host *shost); | ||
78 | 79 | ||
79 | static struct scsi_host_template qla4xxx_driver_template = { | 80 | static struct scsi_host_template qla4xxx_driver_template = { |
80 | .module = THIS_MODULE, | 81 | .module = THIS_MODULE, |
@@ -90,6 +91,7 @@ static struct scsi_host_template qla4xxx_driver_template = { | |||
90 | .slave_destroy = qla4xxx_slave_destroy, | 91 | .slave_destroy = qla4xxx_slave_destroy, |
91 | 92 | ||
92 | .scan_finished = iscsi_scan_finished, | 93 | .scan_finished = iscsi_scan_finished, |
94 | .scan_start = qla4xxx_scan_start, | ||
93 | 95 | ||
94 | .this_id = -1, | 96 | .this_id = -1, |
95 | .cmd_per_lun = 3, | 97 | .cmd_per_lun = 3, |
@@ -299,6 +301,18 @@ struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha) | |||
299 | return ddb_entry; | 301 | return ddb_entry; |
300 | } | 302 | } |
301 | 303 | ||
304 | static void qla4xxx_scan_start(struct Scsi_Host *shost) | ||
305 | { | ||
306 | struct scsi_qla_host *ha = shost_priv(shost); | ||
307 | struct ddb_entry *ddb_entry, *ddbtemp; | ||
308 | |||
309 | /* finish setup of sessions that were already setup in firmware */ | ||
310 | list_for_each_entry_safe(ddb_entry, ddbtemp, &ha->ddb_list, list) { | ||
311 | if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) | ||
312 | qla4xxx_add_sess(ddb_entry); | ||
313 | } | ||
314 | } | ||
315 | |||
302 | /* | 316 | /* |
303 | * Timer routines | 317 | * Timer routines |
304 | */ | 318 | */ |
@@ -864,8 +878,9 @@ static void qla4xxx_flush_active_srbs(struct scsi_qla_host *ha) | |||
864 | * qla4xxx_recover_adapter - recovers adapter after a fatal error | 878 | * qla4xxx_recover_adapter - recovers adapter after a fatal error |
865 | * @ha: Pointer to host adapter structure. | 879 | * @ha: Pointer to host adapter structure. |
866 | * @renew_ddb_list: Indicates what to do with the adapter's ddb list | 880 | * @renew_ddb_list: Indicates what to do with the adapter's ddb list |
867 | * after adapter recovery has completed. | 881 | * |
868 | * 0=preserve ddb list, 1=destroy and rebuild ddb list | 882 | * renew_ddb_list value can be 0=preserve ddb list, 1=destroy and rebuild |
883 | * ddb list. | ||
869 | **/ | 884 | **/ |
870 | static int qla4xxx_recover_adapter(struct scsi_qla_host *ha, | 885 | static int qla4xxx_recover_adapter(struct scsi_qla_host *ha, |
871 | uint8_t renew_ddb_list) | 886 | uint8_t renew_ddb_list) |
@@ -874,6 +889,7 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha, | |||
874 | 889 | ||
875 | /* Stall incoming I/O until we are done */ | 890 | /* Stall incoming I/O until we are done */ |
876 | clear_bit(AF_ONLINE, &ha->flags); | 891 | clear_bit(AF_ONLINE, &ha->flags); |
892 | |||
877 | DEBUG2(printk("scsi%ld: %s calling qla4xxx_cmd_wait\n", ha->host_no, | 893 | DEBUG2(printk("scsi%ld: %s calling qla4xxx_cmd_wait\n", ha->host_no, |
878 | __func__)); | 894 | __func__)); |
879 | 895 | ||
@@ -1176,7 +1192,6 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, | |||
1176 | int ret = -ENODEV, status; | 1192 | int ret = -ENODEV, status; |
1177 | struct Scsi_Host *host; | 1193 | struct Scsi_Host *host; |
1178 | struct scsi_qla_host *ha; | 1194 | struct scsi_qla_host *ha; |
1179 | struct ddb_entry *ddb_entry, *ddbtemp; | ||
1180 | uint8_t init_retry_count = 0; | 1195 | uint8_t init_retry_count = 0; |
1181 | char buf[34]; | 1196 | char buf[34]; |
1182 | 1197 | ||
@@ -1295,13 +1310,6 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, | |||
1295 | if (ret) | 1310 | if (ret) |
1296 | goto probe_failed; | 1311 | goto probe_failed; |
1297 | 1312 | ||
1298 | /* Update transport device information for all devices. */ | ||
1299 | list_for_each_entry_safe(ddb_entry, ddbtemp, &ha->ddb_list, list) { | ||
1300 | if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) | ||
1301 | if (qla4xxx_add_sess(ddb_entry)) | ||
1302 | goto remove_host; | ||
1303 | } | ||
1304 | |||
1305 | printk(KERN_INFO | 1313 | printk(KERN_INFO |
1306 | " QLogic iSCSI HBA Driver version: %s\n" | 1314 | " QLogic iSCSI HBA Driver version: %s\n" |
1307 | " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", | 1315 | " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", |
@@ -1311,10 +1319,6 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, | |||
1311 | scsi_scan_host(host); | 1319 | scsi_scan_host(host); |
1312 | return 0; | 1320 | return 0; |
1313 | 1321 | ||
1314 | remove_host: | ||
1315 | qla4xxx_free_ddb_list(ha); | ||
1316 | scsi_remove_host(host); | ||
1317 | |||
1318 | probe_failed: | 1322 | probe_failed: |
1319 | qla4xxx_free_adapter(ha); | 1323 | qla4xxx_free_adapter(ha); |
1320 | scsi_host_put(ha->host); | 1324 | scsi_host_put(ha->host); |
@@ -1600,9 +1604,12 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd) | |||
1600 | return FAILED; | 1604 | return FAILED; |
1601 | } | 1605 | } |
1602 | 1606 | ||
1603 | if (qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST) == QLA_SUCCESS) { | 1607 | /* make sure the dpc thread is stopped while we reset the hba */ |
1608 | clear_bit(AF_ONLINE, &ha->flags); | ||
1609 | flush_workqueue(ha->dpc_thread); | ||
1610 | |||
1611 | if (qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST) == QLA_SUCCESS) | ||
1604 | return_status = SUCCESS; | 1612 | return_status = SUCCESS; |
1605 | } | ||
1606 | 1613 | ||
1607 | dev_info(&ha->pdev->dev, "HOST RESET %s.\n", | 1614 | dev_info(&ha->pdev->dev, "HOST RESET %s.\n", |
1608 | return_status == FAILED ? "FAILED" : "SUCCEDED"); | 1615 | return_status == FAILED ? "FAILED" : "SUCCEDED"); |
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index fecba05b4e77..e5c6f6af8765 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
@@ -757,7 +757,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd) | |||
757 | "Notifying upper driver of completion " | 757 | "Notifying upper driver of completion " |
758 | "(result %x)\n", cmd->result)); | 758 | "(result %x)\n", cmd->result)); |
759 | 759 | ||
760 | good_bytes = scsi_bufflen(cmd); | 760 | good_bytes = scsi_bufflen(cmd) + cmd->request->extra_len; |
761 | if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { | 761 | if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { |
762 | drv = scsi_cmd_to_driver(cmd); | 762 | drv = scsi_cmd_to_driver(cmd); |
763 | if (drv->done) | 763 | if (drv->done) |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 1dc165ad17fb..e67c14e31bab 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
@@ -1577,8 +1577,7 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel, | |||
1577 | } | 1577 | } |
1578 | 1578 | ||
1579 | /** | 1579 | /** |
1580 | * scsi_scan_target - scan a target id, possibly including all LUNs on the | 1580 | * scsi_scan_target - scan a target id, possibly including all LUNs on the target. |
1581 | * target. | ||
1582 | * @parent: host to scan | 1581 | * @parent: host to scan |
1583 | * @channel: channel to scan | 1582 | * @channel: channel to scan |
1584 | * @id: target id to scan | 1583 | * @id: target id to scan |
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c index 3677fbb30b72..a0f308bd145b 100644 --- a/drivers/scsi/scsi_tgt_lib.c +++ b/drivers/scsi/scsi_tgt_lib.c | |||
@@ -103,7 +103,6 @@ struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost, | |||
103 | if (!cmd) | 103 | if (!cmd) |
104 | goto release_rq; | 104 | goto release_rq; |
105 | 105 | ||
106 | memset(cmd, 0, sizeof(*cmd)); | ||
107 | cmd->sc_data_direction = data_dir; | 106 | cmd->sc_data_direction = data_dir; |
108 | cmd->jiffies_at_alloc = jiffies; | 107 | cmd->jiffies_at_alloc = jiffies; |
109 | cmd->request = rq; | 108 | cmd->request = rq; |
@@ -382,6 +381,11 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd, | |||
382 | scsi_release_buffers(cmd); | 381 | scsi_release_buffers(cmd); |
383 | goto unmap_rq; | 382 | goto unmap_rq; |
384 | } | 383 | } |
384 | /* | ||
385 | * we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the | ||
386 | * length for us. | ||
387 | */ | ||
388 | cmd->sdb.length = rq->data_len; | ||
385 | 389 | ||
386 | return 0; | 390 | return 0; |
387 | 391 | ||
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 9981682d5302..ca7bb6f63bde 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #define ISCSI_SESSION_ATTRS 19 | 33 | #define ISCSI_SESSION_ATTRS 19 |
34 | #define ISCSI_CONN_ATTRS 13 | 34 | #define ISCSI_CONN_ATTRS 13 |
35 | #define ISCSI_HOST_ATTRS 4 | 35 | #define ISCSI_HOST_ATTRS 4 |
36 | #define ISCSI_TRANSPORT_VERSION "2.0-868" | 36 | #define ISCSI_TRANSPORT_VERSION "2.0-869" |
37 | 37 | ||
38 | struct iscsi_internal { | 38 | struct iscsi_internal { |
39 | int daemon_pid; | 39 | int daemon_pid; |
@@ -373,24 +373,25 @@ static void session_recovery_timedout(struct work_struct *work) | |||
373 | scsi_target_unblock(&session->dev); | 373 | scsi_target_unblock(&session->dev); |
374 | } | 374 | } |
375 | 375 | ||
376 | static void __iscsi_unblock_session(struct iscsi_cls_session *session) | 376 | static void __iscsi_unblock_session(struct work_struct *work) |
377 | { | ||
378 | if (!cancel_delayed_work(&session->recovery_work)) | ||
379 | flush_workqueue(iscsi_eh_timer_workq); | ||
380 | scsi_target_unblock(&session->dev); | ||
381 | } | ||
382 | |||
383 | void iscsi_unblock_session(struct iscsi_cls_session *session) | ||
384 | { | 377 | { |
378 | struct iscsi_cls_session *session = | ||
379 | container_of(work, struct iscsi_cls_session, | ||
380 | unblock_work); | ||
385 | struct Scsi_Host *shost = iscsi_session_to_shost(session); | 381 | struct Scsi_Host *shost = iscsi_session_to_shost(session); |
386 | struct iscsi_host *ihost = shost->shost_data; | 382 | struct iscsi_host *ihost = shost->shost_data; |
387 | unsigned long flags; | 383 | unsigned long flags; |
388 | 384 | ||
385 | /* | ||
386 | * The recovery and unblock work get run from the same workqueue, | ||
387 | * so try to cancel it if it was going to run after this unblock. | ||
388 | */ | ||
389 | cancel_delayed_work(&session->recovery_work); | ||
389 | spin_lock_irqsave(&session->lock, flags); | 390 | spin_lock_irqsave(&session->lock, flags); |
390 | session->state = ISCSI_SESSION_LOGGED_IN; | 391 | session->state = ISCSI_SESSION_LOGGED_IN; |
391 | spin_unlock_irqrestore(&session->lock, flags); | 392 | spin_unlock_irqrestore(&session->lock, flags); |
392 | 393 | /* start IO */ | |
393 | __iscsi_unblock_session(session); | 394 | scsi_target_unblock(&session->dev); |
394 | /* | 395 | /* |
395 | * Only do kernel scanning if the driver is properly hooked into | 396 | * Only do kernel scanning if the driver is properly hooked into |
396 | * the async scanning code (drivers like iscsi_tcp do login and | 397 | * the async scanning code (drivers like iscsi_tcp do login and |
@@ -401,20 +402,43 @@ void iscsi_unblock_session(struct iscsi_cls_session *session) | |||
401 | atomic_inc(&ihost->nr_scans); | 402 | atomic_inc(&ihost->nr_scans); |
402 | } | 403 | } |
403 | } | 404 | } |
405 | |||
406 | /** | ||
407 | * iscsi_unblock_session - set a session as logged in and start IO. | ||
408 | * @session: iscsi session | ||
409 | * | ||
410 | * Mark a session as ready to accept IO. | ||
411 | */ | ||
412 | void iscsi_unblock_session(struct iscsi_cls_session *session) | ||
413 | { | ||
414 | queue_work(iscsi_eh_timer_workq, &session->unblock_work); | ||
415 | /* | ||
416 | * make sure all the events have completed before tell the driver | ||
417 | * it is safe | ||
418 | */ | ||
419 | flush_workqueue(iscsi_eh_timer_workq); | ||
420 | } | ||
404 | EXPORT_SYMBOL_GPL(iscsi_unblock_session); | 421 | EXPORT_SYMBOL_GPL(iscsi_unblock_session); |
405 | 422 | ||
406 | void iscsi_block_session(struct iscsi_cls_session *session) | 423 | static void __iscsi_block_session(struct work_struct *work) |
407 | { | 424 | { |
425 | struct iscsi_cls_session *session = | ||
426 | container_of(work, struct iscsi_cls_session, | ||
427 | block_work); | ||
408 | unsigned long flags; | 428 | unsigned long flags; |
409 | 429 | ||
410 | spin_lock_irqsave(&session->lock, flags); | 430 | spin_lock_irqsave(&session->lock, flags); |
411 | session->state = ISCSI_SESSION_FAILED; | 431 | session->state = ISCSI_SESSION_FAILED; |
412 | spin_unlock_irqrestore(&session->lock, flags); | 432 | spin_unlock_irqrestore(&session->lock, flags); |
413 | |||
414 | scsi_target_block(&session->dev); | 433 | scsi_target_block(&session->dev); |
415 | queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work, | 434 | queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work, |
416 | session->recovery_tmo * HZ); | 435 | session->recovery_tmo * HZ); |
417 | } | 436 | } |
437 | |||
438 | void iscsi_block_session(struct iscsi_cls_session *session) | ||
439 | { | ||
440 | queue_work(iscsi_eh_timer_workq, &session->block_work); | ||
441 | } | ||
418 | EXPORT_SYMBOL_GPL(iscsi_block_session); | 442 | EXPORT_SYMBOL_GPL(iscsi_block_session); |
419 | 443 | ||
420 | static void __iscsi_unbind_session(struct work_struct *work) | 444 | static void __iscsi_unbind_session(struct work_struct *work) |
@@ -463,6 +487,8 @@ iscsi_alloc_session(struct Scsi_Host *shost, | |||
463 | INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout); | 487 | INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout); |
464 | INIT_LIST_HEAD(&session->host_list); | 488 | INIT_LIST_HEAD(&session->host_list); |
465 | INIT_LIST_HEAD(&session->sess_list); | 489 | INIT_LIST_HEAD(&session->sess_list); |
490 | INIT_WORK(&session->unblock_work, __iscsi_unblock_session); | ||
491 | INIT_WORK(&session->block_work, __iscsi_block_session); | ||
466 | INIT_WORK(&session->unbind_work, __iscsi_unbind_session); | 492 | INIT_WORK(&session->unbind_work, __iscsi_unbind_session); |
467 | INIT_WORK(&session->scan_work, iscsi_scan_session); | 493 | INIT_WORK(&session->scan_work, iscsi_scan_session); |
468 | spin_lock_init(&session->lock); | 494 | spin_lock_init(&session->lock); |
@@ -575,24 +601,25 @@ void iscsi_remove_session(struct iscsi_cls_session *session) | |||
575 | list_del(&session->sess_list); | 601 | list_del(&session->sess_list); |
576 | spin_unlock_irqrestore(&sesslock, flags); | 602 | spin_unlock_irqrestore(&sesslock, flags); |
577 | 603 | ||
604 | /* make sure there are no blocks/unblocks queued */ | ||
605 | flush_workqueue(iscsi_eh_timer_workq); | ||
606 | /* make sure the timedout callout is not running */ | ||
607 | if (!cancel_delayed_work(&session->recovery_work)) | ||
608 | flush_workqueue(iscsi_eh_timer_workq); | ||
578 | /* | 609 | /* |
579 | * If we are blocked let commands flow again. The lld or iscsi | 610 | * If we are blocked let commands flow again. The lld or iscsi |
580 | * layer should set up the queuecommand to fail commands. | 611 | * layer should set up the queuecommand to fail commands. |
612 | * We assume that LLD will not be calling block/unblock while | ||
613 | * removing the session. | ||
581 | */ | 614 | */ |
582 | spin_lock_irqsave(&session->lock, flags); | 615 | spin_lock_irqsave(&session->lock, flags); |
583 | session->state = ISCSI_SESSION_FREE; | 616 | session->state = ISCSI_SESSION_FREE; |
584 | spin_unlock_irqrestore(&session->lock, flags); | 617 | spin_unlock_irqrestore(&session->lock, flags); |
585 | __iscsi_unblock_session(session); | ||
586 | __iscsi_unbind_session(&session->unbind_work); | ||
587 | 618 | ||
588 | /* flush running scans */ | 619 | scsi_target_unblock(&session->dev); |
620 | /* flush running scans then delete devices */ | ||
589 | flush_workqueue(ihost->scan_workq); | 621 | flush_workqueue(ihost->scan_workq); |
590 | /* | 622 | __iscsi_unbind_session(&session->unbind_work); |
591 | * If the session dropped while removing devices then we need to make | ||
592 | * sure it is not blocked | ||
593 | */ | ||
594 | if (!cancel_delayed_work(&session->recovery_work)) | ||
595 | flush_workqueue(iscsi_eh_timer_workq); | ||
596 | 623 | ||
597 | /* hw iscsi may not have removed all connections from session */ | 624 | /* hw iscsi may not have removed all connections from session */ |
598 | err = device_for_each_child(&session->dev, NULL, | 625 | err = device_for_each_child(&session->dev, NULL, |
@@ -802,23 +829,16 @@ EXPORT_SYMBOL_GPL(iscsi_recv_pdu); | |||
802 | 829 | ||
803 | void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error) | 830 | void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error) |
804 | { | 831 | { |
805 | struct iscsi_cls_session *session = iscsi_conn_to_session(conn); | ||
806 | struct nlmsghdr *nlh; | 832 | struct nlmsghdr *nlh; |
807 | struct sk_buff *skb; | 833 | struct sk_buff *skb; |
808 | struct iscsi_uevent *ev; | 834 | struct iscsi_uevent *ev; |
809 | struct iscsi_internal *priv; | 835 | struct iscsi_internal *priv; |
810 | int len = NLMSG_SPACE(sizeof(*ev)); | 836 | int len = NLMSG_SPACE(sizeof(*ev)); |
811 | unsigned long flags; | ||
812 | 837 | ||
813 | priv = iscsi_if_transport_lookup(conn->transport); | 838 | priv = iscsi_if_transport_lookup(conn->transport); |
814 | if (!priv) | 839 | if (!priv) |
815 | return; | 840 | return; |
816 | 841 | ||
817 | spin_lock_irqsave(&session->lock, flags); | ||
818 | if (session->state == ISCSI_SESSION_LOGGED_IN) | ||
819 | session->state = ISCSI_SESSION_FAILED; | ||
820 | spin_unlock_irqrestore(&session->lock, flags); | ||
821 | |||
822 | skb = alloc_skb(len, GFP_ATOMIC); | 842 | skb = alloc_skb(len, GFP_ATOMIC); |
823 | if (!skb) { | 843 | if (!skb) { |
824 | iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored " | 844 | iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored " |
diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c index 6f09cbd7fc48..97c68d021d28 100644 --- a/drivers/serial/8250_pnp.c +++ b/drivers/serial/8250_pnp.c | |||
@@ -91,6 +91,8 @@ static const struct pnp_device_id pnp_dev_table[] = { | |||
91 | /* Archtek America Corp. */ | 91 | /* Archtek America Corp. */ |
92 | /* Archtek SmartLink Modem 3334BT Plug & Play */ | 92 | /* Archtek SmartLink Modem 3334BT Plug & Play */ |
93 | { "GVC000F", 0 }, | 93 | { "GVC000F", 0 }, |
94 | /* Archtek SmartLink Modem 3334BRV 33.6K Data Fax Voice */ | ||
95 | { "GVC0303", 0 }, | ||
94 | /* Hayes */ | 96 | /* Hayes */ |
95 | /* Hayes Optima 288 V.34-V.FC + FAX + Voice Plug & Play */ | 97 | /* Hayes Optima 288 V.34-V.FC + FAX + Voice Plug & Play */ |
96 | { "HAY0001", 0 }, | 98 | { "HAY0001", 0 }, |
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index b82595cf13e8..cf627cd1b4c8 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig | |||
@@ -686,7 +686,7 @@ config UART0_RTS_PIN | |||
686 | 686 | ||
687 | config SERIAL_BFIN_UART1 | 687 | config SERIAL_BFIN_UART1 |
688 | bool "Enable UART1" | 688 | bool "Enable UART1" |
689 | depends on SERIAL_BFIN && (BF534 || BF536 || BF537 || BF54x) | 689 | depends on SERIAL_BFIN && (!BF531 && !BF532 && !BF533 && !BF561) |
690 | help | 690 | help |
691 | Enable UART1 | 691 | Enable UART1 |
692 | 692 | ||
@@ -699,14 +699,14 @@ config BFIN_UART1_CTSRTS | |||
699 | 699 | ||
700 | config UART1_CTS_PIN | 700 | config UART1_CTS_PIN |
701 | int "UART1 CTS pin" | 701 | int "UART1 CTS pin" |
702 | depends on BFIN_UART1_CTSRTS && (BF53x || BF561) | 702 | depends on BFIN_UART1_CTSRTS && !BF54x |
703 | default -1 | 703 | default -1 |
704 | help | 704 | help |
705 | Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. | 705 | Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. |
706 | 706 | ||
707 | config UART1_RTS_PIN | 707 | config UART1_RTS_PIN |
708 | int "UART1 RTS pin" | 708 | int "UART1 RTS pin" |
709 | depends on BFIN_UART1_CTSRTS && (BF53x || BF561) | 709 | depends on BFIN_UART1_CTSRTS && !BF54x |
710 | default -1 | 710 | default -1 |
711 | help | 711 | help |
712 | Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. | 712 | Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. |
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c index ac2a3ef28d55..0aa345b9a38b 100644 --- a/drivers/serial/bfin_5xx.c +++ b/drivers/serial/bfin_5xx.c | |||
@@ -1,30 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * File: drivers/serial/bfin_5xx.c | 2 | * Blackfin On-Chip Serial Driver |
3 | * Based on: Based on drivers/serial/sa1100.c | ||
4 | * Author: Aubrey Li <aubrey.li@analog.com> | ||
5 | * | 3 | * |
6 | * Created: | 4 | * Copyright 2006-2007 Analog Devices Inc. |
7 | * Description: Driver for blackfin 5xx serial ports | ||
8 | * | 5 | * |
9 | * Modified: | 6 | * Enter bugs at http://blackfin.uclinux.org/ |
10 | * Copyright 2006 Analog Devices Inc. | ||
11 | * | 7 | * |
12 | * Bugs: Enter bugs at http://blackfin.uclinux.org/ | 8 | * Licensed under the GPL-2 or later. |
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2 of the License, or | ||
17 | * (at your option) any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | * GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with this program; if not, see the file COPYING, or write | ||
26 | * to the Free Software Foundation, Inc., | ||
27 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
28 | */ | 9 | */ |
29 | 10 | ||
30 | #if defined(CONFIG_SERIAL_BFIN_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) | 11 | #if defined(CONFIG_SERIAL_BFIN_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) |
@@ -67,14 +48,12 @@ | |||
67 | #define DMA_RX_XCOUNT 512 | 48 | #define DMA_RX_XCOUNT 512 |
68 | #define DMA_RX_YCOUNT (PAGE_SIZE / DMA_RX_XCOUNT) | 49 | #define DMA_RX_YCOUNT (PAGE_SIZE / DMA_RX_XCOUNT) |
69 | 50 | ||
70 | #define DMA_RX_FLUSH_JIFFIES 5 | 51 | #define DMA_RX_FLUSH_JIFFIES (HZ / 50) |
71 | 52 | ||
72 | #ifdef CONFIG_SERIAL_BFIN_DMA | 53 | #ifdef CONFIG_SERIAL_BFIN_DMA |
73 | static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart); | 54 | static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart); |
74 | #else | 55 | #else |
75 | static void bfin_serial_do_work(struct work_struct *work); | ||
76 | static void bfin_serial_tx_chars(struct bfin_serial_port *uart); | 56 | static void bfin_serial_tx_chars(struct bfin_serial_port *uart); |
77 | static void local_put_char(struct bfin_serial_port *uart, char ch); | ||
78 | #endif | 57 | #endif |
79 | 58 | ||
80 | static void bfin_serial_mctrl_check(struct bfin_serial_port *uart); | 59 | static void bfin_serial_mctrl_check(struct bfin_serial_port *uart); |
@@ -85,23 +64,26 @@ static void bfin_serial_mctrl_check(struct bfin_serial_port *uart); | |||
85 | static void bfin_serial_stop_tx(struct uart_port *port) | 64 | static void bfin_serial_stop_tx(struct uart_port *port) |
86 | { | 65 | { |
87 | struct bfin_serial_port *uart = (struct bfin_serial_port *)port; | 66 | struct bfin_serial_port *uart = (struct bfin_serial_port *)port; |
67 | struct circ_buf *xmit = &uart->port.info->xmit; | ||
68 | #if !defined(CONFIG_BF54x) && !defined(CONFIG_SERIAL_BFIN_DMA) | ||
69 | unsigned short ier; | ||
70 | #endif | ||
88 | 71 | ||
89 | while (!(UART_GET_LSR(uart) & TEMT)) | 72 | while (!(UART_GET_LSR(uart) & TEMT)) |
90 | continue; | 73 | cpu_relax(); |
91 | 74 | ||
92 | #ifdef CONFIG_SERIAL_BFIN_DMA | 75 | #ifdef CONFIG_SERIAL_BFIN_DMA |
93 | disable_dma(uart->tx_dma_channel); | 76 | disable_dma(uart->tx_dma_channel); |
77 | xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1); | ||
78 | uart->port.icount.tx += uart->tx_count; | ||
79 | uart->tx_count = 0; | ||
80 | uart->tx_done = 1; | ||
94 | #else | 81 | #else |
95 | #ifdef CONFIG_BF54x | 82 | #ifdef CONFIG_BF54x |
96 | /* Waiting for Transmission Finished */ | ||
97 | while (!(UART_GET_LSR(uart) & TFI)) | ||
98 | continue; | ||
99 | /* Clear TFI bit */ | 83 | /* Clear TFI bit */ |
100 | UART_PUT_LSR(uart, TFI); | 84 | UART_PUT_LSR(uart, TFI); |
101 | UART_CLEAR_IER(uart, ETBEI); | 85 | UART_CLEAR_IER(uart, ETBEI); |
102 | #else | 86 | #else |
103 | unsigned short ier; | ||
104 | |||
105 | ier = UART_GET_IER(uart); | 87 | ier = UART_GET_IER(uart); |
106 | ier &= ~ETBEI; | 88 | ier &= ~ETBEI; |
107 | UART_PUT_IER(uart, ier); | 89 | UART_PUT_IER(uart, ier); |
@@ -117,7 +99,8 @@ static void bfin_serial_start_tx(struct uart_port *port) | |||
117 | struct bfin_serial_port *uart = (struct bfin_serial_port *)port; | 99 | struct bfin_serial_port *uart = (struct bfin_serial_port *)port; |
118 | 100 | ||
119 | #ifdef CONFIG_SERIAL_BFIN_DMA | 101 | #ifdef CONFIG_SERIAL_BFIN_DMA |
120 | bfin_serial_dma_tx_chars(uart); | 102 | if (uart->tx_done) |
103 | bfin_serial_dma_tx_chars(uart); | ||
121 | #else | 104 | #else |
122 | #ifdef CONFIG_BF54x | 105 | #ifdef CONFIG_BF54x |
123 | UART_SET_IER(uart, ETBEI); | 106 | UART_SET_IER(uart, ETBEI); |
@@ -209,34 +192,27 @@ int kgdb_get_debug_char(void) | |||
209 | } | 192 | } |
210 | #endif | 193 | #endif |
211 | 194 | ||
212 | #ifdef CONFIG_SERIAL_BFIN_PIO | 195 | #if ANOMALY_05000230 && defined(CONFIG_SERIAL_BFIN_PIO) |
213 | static void local_put_char(struct bfin_serial_port *uart, char ch) | 196 | # define UART_GET_ANOMALY_THRESHOLD(uart) ((uart)->anomaly_threshold) |
214 | { | 197 | # define UART_SET_ANOMALY_THRESHOLD(uart, v) ((uart)->anomaly_threshold = (v)) |
215 | unsigned short status; | 198 | #else |
216 | int flags = 0; | 199 | # define UART_GET_ANOMALY_THRESHOLD(uart) 0 |
217 | 200 | # define UART_SET_ANOMALY_THRESHOLD(uart, v) | |
218 | spin_lock_irqsave(&uart->port.lock, flags); | 201 | #endif |
219 | |||
220 | do { | ||
221 | status = UART_GET_LSR(uart); | ||
222 | } while (!(status & THRE)); | ||
223 | |||
224 | UART_PUT_CHAR(uart, ch); | ||
225 | SSYNC(); | ||
226 | |||
227 | spin_unlock_irqrestore(&uart->port.lock, flags); | ||
228 | } | ||
229 | 202 | ||
203 | #ifdef CONFIG_SERIAL_BFIN_PIO | ||
230 | static void bfin_serial_rx_chars(struct bfin_serial_port *uart) | 204 | static void bfin_serial_rx_chars(struct bfin_serial_port *uart) |
231 | { | 205 | { |
232 | struct tty_struct *tty = uart->port.info->tty; | 206 | struct tty_struct *tty = uart->port.info->tty; |
233 | unsigned int status, ch, flg; | 207 | unsigned int status, ch, flg; |
234 | static int in_break = 0; | 208 | static struct timeval anomaly_start = { .tv_sec = 0 }; |
235 | #ifdef CONFIG_KGDB_UART | 209 | #ifdef CONFIG_KGDB_UART |
236 | struct pt_regs *regs = get_irq_regs(); | 210 | struct pt_regs *regs = get_irq_regs(); |
237 | #endif | 211 | #endif |
238 | 212 | ||
239 | status = UART_GET_LSR(uart); | 213 | status = UART_GET_LSR(uart); |
214 | UART_CLEAR_LSR(uart); | ||
215 | |||
240 | ch = UART_GET_CHAR(uart); | 216 | ch = UART_GET_CHAR(uart); |
241 | uart->port.icount.rx++; | 217 | uart->port.icount.rx++; |
242 | 218 | ||
@@ -262,28 +238,56 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart) | |||
262 | #endif | 238 | #endif |
263 | 239 | ||
264 | if (ANOMALY_05000230) { | 240 | if (ANOMALY_05000230) { |
265 | /* The BF533 family of processors have a nice misbehavior where | 241 | /* The BF533 (and BF561) family of processors have a nice anomaly |
266 | * they continuously generate characters for a "single" break. | 242 | * where they continuously generate characters for a "single" break. |
267 | * We have to basically ignore this flood until the "next" valid | 243 | * We have to basically ignore this flood until the "next" valid |
268 | * character comes across. All other Blackfin families operate | 244 | * character comes across. Due to the nature of the flood, it is |
269 | * properly though. | 245 | * not possible to reliably catch bytes that are sent too quickly |
246 | * after this break. So application code talking to the Blackfin | ||
247 | * which sends a break signal must allow at least 1.5 character | ||
248 | * times after the end of the break for things to stabilize. This | ||
249 | * timeout was picked as it must absolutely be larger than 1 | ||
250 | * character time +/- some percent. So 1.5 sounds good. All other | ||
251 | * Blackfin families operate properly. Woo. | ||
270 | * Note: While Anomaly 05000230 does not directly address this, | 252 | * Note: While Anomaly 05000230 does not directly address this, |
271 | * the changes that went in for it also fixed this issue. | 253 | * the changes that went in for it also fixed this issue. |
254 | * That anomaly was fixed in 0.5+ silicon. I like bunnies. | ||
272 | */ | 255 | */ |
273 | if (in_break) { | 256 | if (anomaly_start.tv_sec) { |
274 | if (ch != 0) { | 257 | struct timeval curr; |
275 | in_break = 0; | 258 | suseconds_t usecs; |
276 | ch = UART_GET_CHAR(uart); | 259 | |
277 | if (bfin_revid() < 5) | 260 | if ((~ch & (~ch + 1)) & 0xff) |
278 | return; | 261 | goto known_good_char; |
279 | } else | 262 | |
280 | return; | 263 | do_gettimeofday(&curr); |
264 | if (curr.tv_sec - anomaly_start.tv_sec > 1) | ||
265 | goto known_good_char; | ||
266 | |||
267 | usecs = 0; | ||
268 | if (curr.tv_sec != anomaly_start.tv_sec) | ||
269 | usecs += USEC_PER_SEC; | ||
270 | usecs += curr.tv_usec - anomaly_start.tv_usec; | ||
271 | |||
272 | if (usecs > UART_GET_ANOMALY_THRESHOLD(uart)) | ||
273 | goto known_good_char; | ||
274 | |||
275 | if (ch) | ||
276 | anomaly_start.tv_sec = 0; | ||
277 | else | ||
278 | anomaly_start = curr; | ||
279 | |||
280 | return; | ||
281 | |||
282 | known_good_char: | ||
283 | anomaly_start.tv_sec = 0; | ||
281 | } | 284 | } |
282 | } | 285 | } |
283 | 286 | ||
284 | if (status & BI) { | 287 | if (status & BI) { |
285 | if (ANOMALY_05000230) | 288 | if (ANOMALY_05000230) |
286 | in_break = 1; | 289 | if (bfin_revid() < 5) |
290 | do_gettimeofday(&anomaly_start); | ||
287 | uart->port.icount.brk++; | 291 | uart->port.icount.brk++; |
288 | if (uart_handle_break(&uart->port)) | 292 | if (uart_handle_break(&uart->port)) |
289 | goto ignore_char; | 293 | goto ignore_char; |
@@ -324,7 +328,6 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart) | |||
324 | UART_PUT_CHAR(uart, uart->port.x_char); | 328 | UART_PUT_CHAR(uart, uart->port.x_char); |
325 | uart->port.icount.tx++; | 329 | uart->port.icount.tx++; |
326 | uart->port.x_char = 0; | 330 | uart->port.x_char = 0; |
327 | return; | ||
328 | } | 331 | } |
329 | /* | 332 | /* |
330 | * Check the modem control lines before | 333 | * Check the modem control lines before |
@@ -337,9 +340,12 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart) | |||
337 | return; | 340 | return; |
338 | } | 341 | } |
339 | 342 | ||
340 | local_put_char(uart, xmit->buf[xmit->tail]); | 343 | while ((UART_GET_LSR(uart) & THRE) && xmit->tail != xmit->head) { |
341 | xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); | 344 | UART_PUT_CHAR(uart, xmit->buf[xmit->tail]); |
342 | uart->port.icount.tx++; | 345 | xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); |
346 | uart->port.icount.tx++; | ||
347 | SSYNC(); | ||
348 | } | ||
343 | 349 | ||
344 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | 350 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) |
345 | uart_write_wakeup(&uart->port); | 351 | uart_write_wakeup(&uart->port); |
@@ -352,21 +358,11 @@ static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id) | |||
352 | { | 358 | { |
353 | struct bfin_serial_port *uart = dev_id; | 359 | struct bfin_serial_port *uart = dev_id; |
354 | 360 | ||
355 | #ifdef CONFIG_BF54x | ||
356 | unsigned short status; | ||
357 | spin_lock(&uart->port.lock); | ||
358 | status = UART_GET_LSR(uart); | ||
359 | while ((UART_GET_IER(uart) & ERBFI) && (status & DR)) { | ||
360 | bfin_serial_rx_chars(uart); | ||
361 | status = UART_GET_LSR(uart); | ||
362 | } | ||
363 | spin_unlock(&uart->port.lock); | ||
364 | #else | ||
365 | spin_lock(&uart->port.lock); | 361 | spin_lock(&uart->port.lock); |
366 | while ((UART_GET_IIR(uart) & IIR_STATUS) == IIR_RX_READY) | 362 | while (UART_GET_LSR(uart) & DR) |
367 | bfin_serial_rx_chars(uart); | 363 | bfin_serial_rx_chars(uart); |
368 | spin_unlock(&uart->port.lock); | 364 | spin_unlock(&uart->port.lock); |
369 | #endif | 365 | |
370 | return IRQ_HANDLED; | 366 | return IRQ_HANDLED; |
371 | } | 367 | } |
372 | 368 | ||
@@ -374,25 +370,16 @@ static irqreturn_t bfin_serial_tx_int(int irq, void *dev_id) | |||
374 | { | 370 | { |
375 | struct bfin_serial_port *uart = dev_id; | 371 | struct bfin_serial_port *uart = dev_id; |
376 | 372 | ||
377 | #ifdef CONFIG_BF54x | ||
378 | unsigned short status; | ||
379 | spin_lock(&uart->port.lock); | 373 | spin_lock(&uart->port.lock); |
380 | status = UART_GET_LSR(uart); | 374 | if (UART_GET_LSR(uart) & THRE) |
381 | while ((UART_GET_IER(uart) & ETBEI) && (status & THRE)) { | ||
382 | bfin_serial_tx_chars(uart); | 375 | bfin_serial_tx_chars(uart); |
383 | status = UART_GET_LSR(uart); | ||
384 | } | ||
385 | spin_unlock(&uart->port.lock); | 376 | spin_unlock(&uart->port.lock); |
386 | #else | 377 | |
387 | spin_lock(&uart->port.lock); | ||
388 | while ((UART_GET_IIR(uart) & IIR_STATUS) == IIR_TX_READY) | ||
389 | bfin_serial_tx_chars(uart); | ||
390 | spin_unlock(&uart->port.lock); | ||
391 | #endif | ||
392 | return IRQ_HANDLED; | 378 | return IRQ_HANDLED; |
393 | } | 379 | } |
380 | #endif | ||
394 | 381 | ||
395 | 382 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS | |
396 | static void bfin_serial_do_work(struct work_struct *work) | 383 | static void bfin_serial_do_work(struct work_struct *work) |
397 | { | 384 | { |
398 | struct bfin_serial_port *uart = container_of(work, struct bfin_serial_port, cts_workqueue); | 385 | struct bfin_serial_port *uart = container_of(work, struct bfin_serial_port, cts_workqueue); |
@@ -406,33 +393,27 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart) | |||
406 | { | 393 | { |
407 | struct circ_buf *xmit = &uart->port.info->xmit; | 394 | struct circ_buf *xmit = &uart->port.info->xmit; |
408 | unsigned short ier; | 395 | unsigned short ier; |
409 | int flags = 0; | ||
410 | |||
411 | if (!uart->tx_done) | ||
412 | return; | ||
413 | 396 | ||
414 | uart->tx_done = 0; | 397 | uart->tx_done = 0; |
415 | 398 | ||
399 | if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) { | ||
400 | uart->tx_count = 0; | ||
401 | uart->tx_done = 1; | ||
402 | return; | ||
403 | } | ||
404 | |||
416 | if (uart->port.x_char) { | 405 | if (uart->port.x_char) { |
417 | UART_PUT_CHAR(uart, uart->port.x_char); | 406 | UART_PUT_CHAR(uart, uart->port.x_char); |
418 | uart->port.icount.tx++; | 407 | uart->port.icount.tx++; |
419 | uart->port.x_char = 0; | 408 | uart->port.x_char = 0; |
420 | uart->tx_done = 1; | ||
421 | return; | ||
422 | } | 409 | } |
410 | |||
423 | /* | 411 | /* |
424 | * Check the modem control lines before | 412 | * Check the modem control lines before |
425 | * transmitting anything. | 413 | * transmitting anything. |
426 | */ | 414 | */ |
427 | bfin_serial_mctrl_check(uart); | 415 | bfin_serial_mctrl_check(uart); |
428 | 416 | ||
429 | if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) { | ||
430 | bfin_serial_stop_tx(&uart->port); | ||
431 | uart->tx_done = 1; | ||
432 | return; | ||
433 | } | ||
434 | |||
435 | spin_lock_irqsave(&uart->port.lock, flags); | ||
436 | uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE); | 417 | uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE); |
437 | if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail)) | 418 | if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail)) |
438 | uart->tx_count = UART_XMIT_SIZE - xmit->tail; | 419 | uart->tx_count = UART_XMIT_SIZE - xmit->tail; |
@@ -448,6 +429,7 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart) | |||
448 | set_dma_x_count(uart->tx_dma_channel, uart->tx_count); | 429 | set_dma_x_count(uart->tx_dma_channel, uart->tx_count); |
449 | set_dma_x_modify(uart->tx_dma_channel, 1); | 430 | set_dma_x_modify(uart->tx_dma_channel, 1); |
450 | enable_dma(uart->tx_dma_channel); | 431 | enable_dma(uart->tx_dma_channel); |
432 | |||
451 | #ifdef CONFIG_BF54x | 433 | #ifdef CONFIG_BF54x |
452 | UART_SET_IER(uart, ETBEI); | 434 | UART_SET_IER(uart, ETBEI); |
453 | #else | 435 | #else |
@@ -455,7 +437,6 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart) | |||
455 | ier |= ETBEI; | 437 | ier |= ETBEI; |
456 | UART_PUT_IER(uart, ier); | 438 | UART_PUT_IER(uart, ier); |
457 | #endif | 439 | #endif |
458 | spin_unlock_irqrestore(&uart->port.lock, flags); | ||
459 | } | 440 | } |
460 | 441 | ||
461 | static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) | 442 | static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) |
@@ -464,7 +445,11 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) | |||
464 | int i, flg, status; | 445 | int i, flg, status; |
465 | 446 | ||
466 | status = UART_GET_LSR(uart); | 447 | status = UART_GET_LSR(uart); |
467 | uart->port.icount.rx += CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail, UART_XMIT_SIZE);; | 448 | UART_CLEAR_LSR(uart); |
449 | |||
450 | uart->port.icount.rx += | ||
451 | CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail, | ||
452 | UART_XMIT_SIZE); | ||
468 | 453 | ||
469 | if (status & BI) { | 454 | if (status & BI) { |
470 | uart->port.icount.brk++; | 455 | uart->port.icount.brk++; |
@@ -490,10 +475,12 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) | |||
490 | else | 475 | else |
491 | flg = TTY_NORMAL; | 476 | flg = TTY_NORMAL; |
492 | 477 | ||
493 | for (i = uart->rx_dma_buf.head; i < uart->rx_dma_buf.tail; i++) { | 478 | for (i = uart->rx_dma_buf.tail; i != uart->rx_dma_buf.head; i++) { |
494 | if (uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i])) | 479 | if (i >= UART_XMIT_SIZE) |
495 | goto dma_ignore_char; | 480 | i = 0; |
496 | uart_insert_char(&uart->port, status, OE, uart->rx_dma_buf.buf[i], flg); | 481 | if (!uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i])) |
482 | uart_insert_char(&uart->port, status, OE, | ||
483 | uart->rx_dma_buf.buf[i], flg); | ||
497 | } | 484 | } |
498 | 485 | ||
499 | dma_ignore_char: | 486 | dma_ignore_char: |
@@ -503,23 +490,23 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) | |||
503 | void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart) | 490 | void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart) |
504 | { | 491 | { |
505 | int x_pos, pos; | 492 | int x_pos, pos; |
506 | int flags = 0; | ||
507 | |||
508 | bfin_serial_dma_tx_chars(uart); | ||
509 | 493 | ||
510 | spin_lock_irqsave(&uart->port.lock, flags); | 494 | uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel); |
511 | x_pos = DMA_RX_XCOUNT - get_dma_curr_xcount(uart->rx_dma_channel); | 495 | x_pos = get_dma_curr_xcount(uart->rx_dma_channel); |
496 | uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows; | ||
497 | if (uart->rx_dma_nrows == DMA_RX_YCOUNT) | ||
498 | uart->rx_dma_nrows = 0; | ||
499 | x_pos = DMA_RX_XCOUNT - x_pos; | ||
512 | if (x_pos == DMA_RX_XCOUNT) | 500 | if (x_pos == DMA_RX_XCOUNT) |
513 | x_pos = 0; | 501 | x_pos = 0; |
514 | 502 | ||
515 | pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos; | 503 | pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos; |
516 | 504 | if (pos != uart->rx_dma_buf.tail) { | |
517 | if (pos>uart->rx_dma_buf.tail) { | 505 | uart->rx_dma_buf.head = pos; |
518 | uart->rx_dma_buf.tail = pos; | ||
519 | bfin_serial_dma_rx_chars(uart); | 506 | bfin_serial_dma_rx_chars(uart); |
520 | uart->rx_dma_buf.head = uart->rx_dma_buf.tail; | 507 | uart->rx_dma_buf.tail = uart->rx_dma_buf.head; |
521 | } | 508 | } |
522 | spin_unlock_irqrestore(&uart->port.lock, flags); | 509 | |
523 | uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES; | 510 | uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES; |
524 | add_timer(&(uart->rx_dma_timer)); | 511 | add_timer(&(uart->rx_dma_timer)); |
525 | } | 512 | } |
@@ -532,8 +519,8 @@ static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id) | |||
532 | 519 | ||
533 | spin_lock(&uart->port.lock); | 520 | spin_lock(&uart->port.lock); |
534 | if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) { | 521 | if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) { |
535 | clear_dma_irqstat(uart->tx_dma_channel); | ||
536 | disable_dma(uart->tx_dma_channel); | 522 | disable_dma(uart->tx_dma_channel); |
523 | clear_dma_irqstat(uart->tx_dma_channel); | ||
537 | #ifdef CONFIG_BF54x | 524 | #ifdef CONFIG_BF54x |
538 | UART_CLEAR_IER(uart, ETBEI); | 525 | UART_CLEAR_IER(uart, ETBEI); |
539 | #else | 526 | #else |
@@ -541,15 +528,13 @@ static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id) | |||
541 | ier &= ~ETBEI; | 528 | ier &= ~ETBEI; |
542 | UART_PUT_IER(uart, ier); | 529 | UART_PUT_IER(uart, ier); |
543 | #endif | 530 | #endif |
544 | xmit->tail = (xmit->tail+uart->tx_count) &(UART_XMIT_SIZE -1); | 531 | xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1); |
545 | uart->port.icount.tx+=uart->tx_count; | 532 | uart->port.icount.tx += uart->tx_count; |
546 | 533 | ||
547 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | 534 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) |
548 | uart_write_wakeup(&uart->port); | 535 | uart_write_wakeup(&uart->port); |
549 | 536 | ||
550 | if (uart_circ_empty(xmit)) | 537 | bfin_serial_dma_tx_chars(uart); |
551 | bfin_serial_stop_tx(&uart->port); | ||
552 | uart->tx_done = 1; | ||
553 | } | 538 | } |
554 | 539 | ||
555 | spin_unlock(&uart->port.lock); | 540 | spin_unlock(&uart->port.lock); |
@@ -561,18 +546,15 @@ static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id) | |||
561 | struct bfin_serial_port *uart = dev_id; | 546 | struct bfin_serial_port *uart = dev_id; |
562 | unsigned short irqstat; | 547 | unsigned short irqstat; |
563 | 548 | ||
564 | uart->rx_dma_nrows++; | ||
565 | if (uart->rx_dma_nrows == DMA_RX_YCOUNT) { | ||
566 | uart->rx_dma_nrows = 0; | ||
567 | uart->rx_dma_buf.tail = DMA_RX_XCOUNT*DMA_RX_YCOUNT; | ||
568 | bfin_serial_dma_rx_chars(uart); | ||
569 | uart->rx_dma_buf.head = uart->rx_dma_buf.tail = 0; | ||
570 | } | ||
571 | spin_lock(&uart->port.lock); | 549 | spin_lock(&uart->port.lock); |
572 | irqstat = get_dma_curr_irqstat(uart->rx_dma_channel); | 550 | irqstat = get_dma_curr_irqstat(uart->rx_dma_channel); |
573 | clear_dma_irqstat(uart->rx_dma_channel); | 551 | clear_dma_irqstat(uart->rx_dma_channel); |
574 | |||
575 | spin_unlock(&uart->port.lock); | 552 | spin_unlock(&uart->port.lock); |
553 | |||
554 | del_timer(&(uart->rx_dma_timer)); | ||
555 | uart->rx_dma_timer.expires = jiffies; | ||
556 | add_timer(&(uart->rx_dma_timer)); | ||
557 | |||
576 | return IRQ_HANDLED; | 558 | return IRQ_HANDLED; |
577 | } | 559 | } |
578 | #endif | 560 | #endif |
@@ -599,7 +581,11 @@ static unsigned int bfin_serial_get_mctrl(struct uart_port *port) | |||
599 | if (uart->cts_pin < 0) | 581 | if (uart->cts_pin < 0) |
600 | return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; | 582 | return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; |
601 | 583 | ||
584 | # ifdef BF54x | ||
585 | if (UART_GET_MSR(uart) & CTS) | ||
586 | # else | ||
602 | if (gpio_get_value(uart->cts_pin)) | 587 | if (gpio_get_value(uart->cts_pin)) |
588 | # endif | ||
603 | return TIOCM_DSR | TIOCM_CAR; | 589 | return TIOCM_DSR | TIOCM_CAR; |
604 | else | 590 | else |
605 | #endif | 591 | #endif |
@@ -614,9 +600,17 @@ static void bfin_serial_set_mctrl(struct uart_port *port, unsigned int mctrl) | |||
614 | return; | 600 | return; |
615 | 601 | ||
616 | if (mctrl & TIOCM_RTS) | 602 | if (mctrl & TIOCM_RTS) |
603 | # ifdef BF54x | ||
604 | UART_PUT_MCR(uart, UART_GET_MCR(uart) & ~MRTS); | ||
605 | # else | ||
617 | gpio_set_value(uart->rts_pin, 0); | 606 | gpio_set_value(uart->rts_pin, 0); |
607 | # endif | ||
618 | else | 608 | else |
609 | # ifdef BF54x | ||
610 | UART_PUT_MCR(uart, UART_GET_MCR(uart) | MRTS); | ||
611 | # else | ||
619 | gpio_set_value(uart->rts_pin, 1); | 612 | gpio_set_value(uart->rts_pin, 1); |
613 | # endif | ||
620 | #endif | 614 | #endif |
621 | } | 615 | } |
622 | 616 | ||
@@ -627,22 +621,17 @@ static void bfin_serial_mctrl_check(struct bfin_serial_port *uart) | |||
627 | { | 621 | { |
628 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS | 622 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS |
629 | unsigned int status; | 623 | unsigned int status; |
630 | # ifdef CONFIG_SERIAL_BFIN_DMA | ||
631 | struct uart_info *info = uart->port.info; | 624 | struct uart_info *info = uart->port.info; |
632 | struct tty_struct *tty = info->tty; | 625 | struct tty_struct *tty = info->tty; |
633 | 626 | ||
634 | status = bfin_serial_get_mctrl(&uart->port); | 627 | status = bfin_serial_get_mctrl(&uart->port); |
628 | uart_handle_cts_change(&uart->port, status & TIOCM_CTS); | ||
635 | if (!(status & TIOCM_CTS)) { | 629 | if (!(status & TIOCM_CTS)) { |
636 | tty->hw_stopped = 1; | 630 | tty->hw_stopped = 1; |
631 | schedule_work(&uart->cts_workqueue); | ||
637 | } else { | 632 | } else { |
638 | tty->hw_stopped = 0; | 633 | tty->hw_stopped = 0; |
639 | } | 634 | } |
640 | # else | ||
641 | status = bfin_serial_get_mctrl(&uart->port); | ||
642 | uart_handle_cts_change(&uart->port, status & TIOCM_CTS); | ||
643 | if (!(status & TIOCM_CTS)) | ||
644 | schedule_work(&uart->cts_workqueue); | ||
645 | # endif | ||
646 | #endif | 635 | #endif |
647 | } | 636 | } |
648 | 637 | ||
@@ -743,6 +732,7 @@ static void bfin_serial_shutdown(struct uart_port *port) | |||
743 | disable_dma(uart->rx_dma_channel); | 732 | disable_dma(uart->rx_dma_channel); |
744 | free_dma(uart->rx_dma_channel); | 733 | free_dma(uart->rx_dma_channel); |
745 | del_timer(&(uart->rx_dma_timer)); | 734 | del_timer(&(uart->rx_dma_timer)); |
735 | dma_free_coherent(NULL, PAGE_SIZE, uart->rx_dma_buf.buf, 0); | ||
746 | #else | 736 | #else |
747 | #ifdef CONFIG_KGDB_UART | 737 | #ifdef CONFIG_KGDB_UART |
748 | if (uart->port.line != CONFIG_KGDB_UART_PORT) | 738 | if (uart->port.line != CONFIG_KGDB_UART_PORT) |
@@ -814,6 +804,8 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios, | |||
814 | quot = uart_get_divisor(port, baud); | 804 | quot = uart_get_divisor(port, baud); |
815 | spin_lock_irqsave(&uart->port.lock, flags); | 805 | spin_lock_irqsave(&uart->port.lock, flags); |
816 | 806 | ||
807 | UART_SET_ANOMALY_THRESHOLD(uart, USEC_PER_SEC / baud * 15); | ||
808 | |||
817 | do { | 809 | do { |
818 | lsr = UART_GET_LSR(uart); | 810 | lsr = UART_GET_LSR(uart); |
819 | } while (!(lsr & TEMT)); | 811 | } while (!(lsr & TEMT)); |
@@ -956,10 +948,9 @@ static void __init bfin_serial_init_ports(void) | |||
956 | bfin_serial_ports[i].rx_dma_channel = | 948 | bfin_serial_ports[i].rx_dma_channel = |
957 | bfin_serial_resource[i].uart_rx_dma_channel; | 949 | bfin_serial_resource[i].uart_rx_dma_channel; |
958 | init_timer(&(bfin_serial_ports[i].rx_dma_timer)); | 950 | init_timer(&(bfin_serial_ports[i].rx_dma_timer)); |
959 | #else | ||
960 | INIT_WORK(&bfin_serial_ports[i].cts_workqueue, bfin_serial_do_work); | ||
961 | #endif | 951 | #endif |
962 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS | 952 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS |
953 | INIT_WORK(&bfin_serial_ports[i].cts_workqueue, bfin_serial_do_work); | ||
963 | bfin_serial_ports[i].cts_pin = | 954 | bfin_serial_ports[i].cts_pin = |
964 | bfin_serial_resource[i].uart_cts_pin; | 955 | bfin_serial_resource[i].uart_cts_pin; |
965 | bfin_serial_ports[i].rts_pin = | 956 | bfin_serial_ports[i].rts_pin = |
diff --git a/drivers/serial/m32r_sio.c b/drivers/serial/m32r_sio.c index 348ee2c19b58..c2bb11c02bde 100644 --- a/drivers/serial/m32r_sio.c +++ b/drivers/serial/m32r_sio.c | |||
@@ -421,7 +421,7 @@ static void transmit_chars(struct uart_sio_port *up) | |||
421 | up->port.icount.tx++; | 421 | up->port.icount.tx++; |
422 | if (uart_circ_empty(xmit)) | 422 | if (uart_circ_empty(xmit)) |
423 | break; | 423 | break; |
424 | while (!serial_in(up, UART_LSR) & UART_LSR_THRE); | 424 | while (!(serial_in(up, UART_LSR) & UART_LSR_THRE)); |
425 | 425 | ||
426 | } while (--count > 0); | 426 | } while (--count > 0); |
427 | 427 | ||
diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c index a64d85821996..c0e50a461055 100644 --- a/drivers/serial/of_serial.c +++ b/drivers/serial/of_serial.c | |||
@@ -138,7 +138,7 @@ static struct of_device_id __devinitdata of_platform_serial_table[] = { | |||
138 | { /* end of list */ }, | 138 | { /* end of list */ }, |
139 | }; | 139 | }; |
140 | 140 | ||
141 | static struct of_platform_driver __devinitdata of_platform_serial_driver = { | 141 | static struct of_platform_driver of_platform_serial_driver = { |
142 | .owner = THIS_MODULE, | 142 | .owner = THIS_MODULE, |
143 | .name = "of_serial", | 143 | .name = "of_serial", |
144 | .probe = of_platform_serial_probe, | 144 | .probe = of_platform_serial_probe, |
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index 9ce12cb2cebc..a8c116b80bff 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/delay.h> | 41 | #include <linux/delay.h> |
42 | #include <linux/console.h> | 42 | #include <linux/console.h> |
43 | #include <linux/platform_device.h> | 43 | #include <linux/platform_device.h> |
44 | #include <linux/serial_sci.h> | ||
44 | 45 | ||
45 | #ifdef CONFIG_CPU_FREQ | 46 | #ifdef CONFIG_CPU_FREQ |
46 | #include <linux/notifier.h> | 47 | #include <linux/notifier.h> |
@@ -54,7 +55,6 @@ | |||
54 | #include <asm/kgdb.h> | 55 | #include <asm/kgdb.h> |
55 | #endif | 56 | #endif |
56 | 57 | ||
57 | #include <asm/sci.h> | ||
58 | #include "sh-sci.h" | 58 | #include "sh-sci.h" |
59 | 59 | ||
60 | struct sci_port { | 60 | struct sci_port { |
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c index 9cfcfd8dad5e..617efb1640b1 100644 --- a/drivers/sh/maple/maple.c +++ b/drivers/sh/maple/maple.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Core maple bus functionality | 2 | * Core maple bus functionality |
3 | * | 3 | * |
4 | * Copyright (C) 2007 Adrian McMenamin | 4 | * Copyright (C) 2007, 2008 Adrian McMenamin |
5 | * | 5 | * |
6 | * Based on 2.4 code by: | 6 | * Based on 2.4 code by: |
7 | * | 7 | * |
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
21 | #include <linux/module.h> | ||
22 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
23 | #include <linux/list.h> | 22 | #include <linux/list.h> |
24 | #include <linux/io.h> | 23 | #include <linux/io.h> |
@@ -54,7 +53,7 @@ static struct device maple_bus; | |||
54 | static int subdevice_map[MAPLE_PORTS]; | 53 | static int subdevice_map[MAPLE_PORTS]; |
55 | static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr; | 54 | static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr; |
56 | static unsigned long maple_pnp_time; | 55 | static unsigned long maple_pnp_time; |
57 | static int started, scanning, liststatus, realscan; | 56 | static int started, scanning, liststatus, fullscan; |
58 | static struct kmem_cache *maple_queue_cache; | 57 | static struct kmem_cache *maple_queue_cache; |
59 | 58 | ||
60 | struct maple_device_specify { | 59 | struct maple_device_specify { |
@@ -62,6 +61,9 @@ struct maple_device_specify { | |||
62 | int unit; | 61 | int unit; |
63 | }; | 62 | }; |
64 | 63 | ||
64 | static bool checked[4]; | ||
65 | static struct maple_device *baseunits[4]; | ||
66 | |||
65 | /** | 67 | /** |
66 | * maple_driver_register - register a device driver | 68 | * maple_driver_register - register a device driver |
67 | * automatically makes the driver bus a maple bus | 69 | * automatically makes the driver bus a maple bus |
@@ -309,11 +311,9 @@ static void maple_attach_driver(struct maple_device *mdev) | |||
309 | else | 311 | else |
310 | break; | 312 | break; |
311 | 313 | ||
312 | if (realscan) { | 314 | printk(KERN_INFO "Maple device detected: %s\n", |
313 | printk(KERN_INFO "Maple device detected: %s\n", | 315 | mdev->product_name); |
314 | mdev->product_name); | 316 | printk(KERN_INFO "Maple device: %s\n", mdev->product_licence); |
315 | printk(KERN_INFO "Maple device: %s\n", mdev->product_licence); | ||
316 | } | ||
317 | 317 | ||
318 | function = be32_to_cpu(mdev->devinfo.function); | 318 | function = be32_to_cpu(mdev->devinfo.function); |
319 | 319 | ||
@@ -323,10 +323,9 @@ static void maple_attach_driver(struct maple_device *mdev) | |||
323 | mdev->driver = &maple_dummy_driver; | 323 | mdev->driver = &maple_dummy_driver; |
324 | sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port); | 324 | sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port); |
325 | } else { | 325 | } else { |
326 | if (realscan) | 326 | printk(KERN_INFO |
327 | printk(KERN_INFO | 327 | "Maple bus at (%d, %d): Function 0x%lX\n", |
328 | "Maple bus at (%d, %d): Function 0x%lX\n", | 328 | mdev->port, mdev->unit, function); |
329 | mdev->port, mdev->unit, function); | ||
330 | 329 | ||
331 | matched = | 330 | matched = |
332 | bus_for_each_drv(&maple_bus_type, NULL, mdev, | 331 | bus_for_each_drv(&maple_bus_type, NULL, mdev, |
@@ -334,9 +333,8 @@ static void maple_attach_driver(struct maple_device *mdev) | |||
334 | 333 | ||
335 | if (matched == 0) { | 334 | if (matched == 0) { |
336 | /* Driver does not exist yet */ | 335 | /* Driver does not exist yet */ |
337 | if (realscan) | 336 | printk(KERN_INFO |
338 | printk(KERN_INFO | 337 | "No maple driver found.\n"); |
339 | "No maple driver found.\n"); | ||
340 | mdev->driver = &maple_dummy_driver; | 338 | mdev->driver = &maple_dummy_driver; |
341 | } | 339 | } |
342 | sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port, | 340 | sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port, |
@@ -472,9 +470,12 @@ static void maple_response_none(struct maple_device *mdev, | |||
472 | maple_detach_driver(mdev); | 470 | maple_detach_driver(mdev); |
473 | return; | 471 | return; |
474 | } | 472 | } |
475 | if (!started) { | 473 | if (!started || !fullscan) { |
476 | printk(KERN_INFO "No maple devices attached to port %d\n", | 474 | if (checked[mdev->port] == false) { |
477 | mdev->port); | 475 | checked[mdev->port] = true; |
476 | printk(KERN_INFO "No maple devices attached" | ||
477 | " to port %d\n", mdev->port); | ||
478 | } | ||
478 | return; | 479 | return; |
479 | } | 480 | } |
480 | maple_clean_submap(mdev); | 481 | maple_clean_submap(mdev); |
@@ -485,8 +486,14 @@ static void maple_response_devinfo(struct maple_device *mdev, | |||
485 | char *recvbuf) | 486 | char *recvbuf) |
486 | { | 487 | { |
487 | char submask; | 488 | char submask; |
488 | if ((!started) || (scanning == 2)) { | 489 | if (!started || (scanning == 2) || !fullscan) { |
489 | maple_attach_driver(mdev); | 490 | if ((mdev->unit == 0) && (checked[mdev->port] == false)) { |
491 | checked[mdev->port] = true; | ||
492 | maple_attach_driver(mdev); | ||
493 | } else { | ||
494 | if (mdev->unit != 0) | ||
495 | maple_attach_driver(mdev); | ||
496 | } | ||
490 | return; | 497 | return; |
491 | } | 498 | } |
492 | if (mdev->unit == 0) { | 499 | if (mdev->unit == 0) { |
@@ -505,6 +512,7 @@ static void maple_dma_handler(struct work_struct *work) | |||
505 | struct maple_device *dev; | 512 | struct maple_device *dev; |
506 | char *recvbuf; | 513 | char *recvbuf; |
507 | enum maple_code code; | 514 | enum maple_code code; |
515 | int i; | ||
508 | 516 | ||
509 | if (!maple_dma_done()) | 517 | if (!maple_dma_done()) |
510 | return; | 518 | return; |
@@ -557,6 +565,19 @@ static void maple_dma_handler(struct work_struct *work) | |||
557 | } else | 565 | } else |
558 | scanning = 0; | 566 | scanning = 0; |
559 | 567 | ||
568 | if (!fullscan) { | ||
569 | fullscan = 1; | ||
570 | for (i = 0; i < MAPLE_PORTS; i++) { | ||
571 | if (checked[i] == false) { | ||
572 | fullscan = 0; | ||
573 | dev = baseunits[i]; | ||
574 | dev->mq->command = | ||
575 | MAPLE_COMMAND_DEVINFO; | ||
576 | dev->mq->length = 0; | ||
577 | maple_add_packet(dev->mq); | ||
578 | } | ||
579 | } | ||
580 | } | ||
560 | if (started == 0) | 581 | if (started == 0) |
561 | started = 1; | 582 | started = 1; |
562 | } | 583 | } |
@@ -694,7 +715,9 @@ static int __init maple_bus_init(void) | |||
694 | 715 | ||
695 | /* setup maple ports */ | 716 | /* setup maple ports */ |
696 | for (i = 0; i < MAPLE_PORTS; i++) { | 717 | for (i = 0; i < MAPLE_PORTS; i++) { |
718 | checked[i] = false; | ||
697 | mdev[i] = maple_alloc_dev(i, 0); | 719 | mdev[i] = maple_alloc_dev(i, 0); |
720 | baseunits[i] = mdev[i]; | ||
698 | if (!mdev[i]) { | 721 | if (!mdev[i]) { |
699 | while (i-- > 0) | 722 | while (i-- > 0) |
700 | maple_free_dev(mdev[i]); | 723 | maple_free_dev(mdev[i]); |
@@ -703,12 +726,9 @@ static int __init maple_bus_init(void) | |||
703 | mdev[i]->mq->command = MAPLE_COMMAND_DEVINFO; | 726 | mdev[i]->mq->command = MAPLE_COMMAND_DEVINFO; |
704 | mdev[i]->mq->length = 0; | 727 | mdev[i]->mq->length = 0; |
705 | maple_add_packet(mdev[i]->mq); | 728 | maple_add_packet(mdev[i]->mq); |
706 | /* delay aids hardware detection */ | ||
707 | mdelay(5); | ||
708 | subdevice_map[i] = 0; | 729 | subdevice_map[i] = 0; |
709 | } | 730 | } |
710 | 731 | ||
711 | realscan = 1; | ||
712 | /* setup maplebus hardware */ | 732 | /* setup maplebus hardware */ |
713 | maplebus_dma_reset(); | 733 | maplebus_dma_reset(); |
714 | /* initial detection */ | 734 | /* initial detection */ |
diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c index c47a650183a1..41a3d00c4515 100644 --- a/drivers/spi/au1550_spi.c +++ b/drivers/spi/au1550_spi.c | |||
@@ -99,7 +99,7 @@ static dbdev_tab_t au1550_spi_mem_dbdev = | |||
99 | static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw); | 99 | static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw); |
100 | 100 | ||
101 | 101 | ||
102 | /** | 102 | /* |
103 | * compute BRG and DIV bits to setup spi clock based on main input clock rate | 103 | * compute BRG and DIV bits to setup spi clock based on main input clock rate |
104 | * that was specified in platform data structure | 104 | * that was specified in platform data structure |
105 | * according to au1550 datasheet: | 105 | * according to au1550 datasheet: |
@@ -650,7 +650,7 @@ static int au1550_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) | |||
650 | return hw->txrx_bufs(spi, t); | 650 | return hw->txrx_bufs(spi, t); |
651 | } | 651 | } |
652 | 652 | ||
653 | static irqreturn_t au1550_spi_irq(int irq, void *dev, struct pt_regs *regs) | 653 | static irqreturn_t au1550_spi_irq(int irq, void *dev) |
654 | { | 654 | { |
655 | struct au1550_spi *hw = dev; | 655 | struct au1550_spi *hw = dev; |
656 | return hw->irq_callback(hw); | 656 | return hw->irq_callback(hw); |
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c index 253ed5682a6d..a86315a0c5b8 100644 --- a/drivers/spi/mpc52xx_psc_spi.c +++ b/drivers/spi/mpc52xx_psc_spi.c | |||
@@ -42,6 +42,7 @@ struct mpc52xx_psc_spi { | |||
42 | 42 | ||
43 | /* driver internal data */ | 43 | /* driver internal data */ |
44 | struct mpc52xx_psc __iomem *psc; | 44 | struct mpc52xx_psc __iomem *psc; |
45 | struct mpc52xx_psc_fifo __iomem *fifo; | ||
45 | unsigned int irq; | 46 | unsigned int irq; |
46 | u8 bits_per_word; | 47 | u8 bits_per_word; |
47 | u8 busy; | 48 | u8 busy; |
@@ -139,6 +140,7 @@ static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi, | |||
139 | { | 140 | { |
140 | struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); | 141 | struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); |
141 | struct mpc52xx_psc __iomem *psc = mps->psc; | 142 | struct mpc52xx_psc __iomem *psc = mps->psc; |
143 | struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo; | ||
142 | unsigned rb = 0; /* number of bytes receieved */ | 144 | unsigned rb = 0; /* number of bytes receieved */ |
143 | unsigned sb = 0; /* number of bytes sent */ | 145 | unsigned sb = 0; /* number of bytes sent */ |
144 | unsigned char *rx_buf = (unsigned char *)t->rx_buf; | 146 | unsigned char *rx_buf = (unsigned char *)t->rx_buf; |
@@ -190,11 +192,11 @@ static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi, | |||
190 | out_8(&psc->mode, 0); | 192 | out_8(&psc->mode, 0); |
191 | } else { | 193 | } else { |
192 | out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL); | 194 | out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL); |
193 | out_be16(&psc->rfalarm, rfalarm); | 195 | out_be16(&fifo->rfalarm, rfalarm); |
194 | } | 196 | } |
195 | out_be16(&psc->mpc52xx_psc_imr, MPC52xx_PSC_IMR_RXRDY); | 197 | out_be16(&psc->mpc52xx_psc_imr, MPC52xx_PSC_IMR_RXRDY); |
196 | wait_for_completion(&mps->done); | 198 | wait_for_completion(&mps->done); |
197 | recv_at_once = in_be16(&psc->rfnum); | 199 | recv_at_once = in_be16(&fifo->rfnum); |
198 | dev_dbg(&spi->dev, "%d bytes received\n", recv_at_once); | 200 | dev_dbg(&spi->dev, "%d bytes received\n", recv_at_once); |
199 | 201 | ||
200 | send_at_once = recv_at_once; | 202 | send_at_once = recv_at_once; |
@@ -331,6 +333,7 @@ static void mpc52xx_psc_spi_cleanup(struct spi_device *spi) | |||
331 | static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps) | 333 | static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps) |
332 | { | 334 | { |
333 | struct mpc52xx_psc __iomem *psc = mps->psc; | 335 | struct mpc52xx_psc __iomem *psc = mps->psc; |
336 | struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo; | ||
334 | u32 mclken_div; | 337 | u32 mclken_div; |
335 | int ret = 0; | 338 | int ret = 0; |
336 | 339 | ||
@@ -346,7 +349,7 @@ static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps) | |||
346 | /* Disable interrupts, interrupts are based on alarm level */ | 349 | /* Disable interrupts, interrupts are based on alarm level */ |
347 | out_be16(&psc->mpc52xx_psc_imr, 0); | 350 | out_be16(&psc->mpc52xx_psc_imr, 0); |
348 | out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1); | 351 | out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1); |
349 | out_8(&psc->rfcntl, 0); | 352 | out_8(&fifo->rfcntl, 0); |
350 | out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL); | 353 | out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL); |
351 | 354 | ||
352 | /* Configure 8bit codec mode as a SPI master and use EOF flags */ | 355 | /* Configure 8bit codec mode as a SPI master and use EOF flags */ |
@@ -419,6 +422,8 @@ static int __init mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr, | |||
419 | ret = -EFAULT; | 422 | ret = -EFAULT; |
420 | goto free_master; | 423 | goto free_master; |
421 | } | 424 | } |
425 | /* On the 5200, fifo regs are immediately ajacent to the psc regs */ | ||
426 | mps->fifo = ((void __iomem *)mps->psc) + sizeof(struct mpc52xx_psc); | ||
422 | 427 | ||
423 | ret = request_irq(mps->irq, mpc52xx_psc_spi_isr, 0, "mpc52xx-psc-spi", | 428 | ret = request_irq(mps->irq, mpc52xx_psc_spi_isr, 0, "mpc52xx-psc-spi", |
424 | mps); | 429 | mps); |
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c index f7f8580edad8..71e881419cdd 100644 --- a/drivers/spi/spi_bitbang.c +++ b/drivers/spi/spi_bitbang.c | |||
@@ -344,12 +344,14 @@ static void bitbang_work(struct work_struct *work) | |||
344 | t->rx_dma = t->tx_dma = 0; | 344 | t->rx_dma = t->tx_dma = 0; |
345 | status = bitbang->txrx_bufs(spi, t); | 345 | status = bitbang->txrx_bufs(spi, t); |
346 | } | 346 | } |
347 | if (status > 0) | ||
348 | m->actual_length += status; | ||
347 | if (status != t->len) { | 349 | if (status != t->len) { |
348 | if (status > 0) | 350 | /* always report some kind of error */ |
349 | status = -EMSGSIZE; | 351 | if (status >= 0) |
352 | status = -EREMOTEIO; | ||
350 | break; | 353 | break; |
351 | } | 354 | } |
352 | m->actual_length += status; | ||
353 | status = 0; | 355 | status = 0; |
354 | 356 | ||
355 | /* protocol tweaks before next transfer */ | 357 | /* protocol tweaks before next transfer */ |
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index 69f19f224875..3ab313ed441c 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | menuconfig THERMAL | 5 | menuconfig THERMAL |
6 | bool "Generic Thermal sysfs driver" | 6 | bool "Generic Thermal sysfs driver" |
7 | select HWMON | ||
7 | default y | 8 | default y |
8 | help | 9 | help |
9 | Generic Thermal Sysfs driver offers a generic mechanism for | 10 | Generic Thermal Sysfs driver offers a generic mechanism for |
diff --git a/drivers/thermal/thermal.c b/drivers/thermal/thermal.c index 8b86e53ccf7a..41bd4c805ace 100644 --- a/drivers/thermal/thermal.c +++ b/drivers/thermal/thermal.c | |||
@@ -30,8 +30,10 @@ | |||
30 | #include <linux/idr.h> | 30 | #include <linux/idr.h> |
31 | #include <linux/thermal.h> | 31 | #include <linux/thermal.h> |
32 | #include <linux/spinlock.h> | 32 | #include <linux/spinlock.h> |
33 | #include <linux/hwmon.h> | ||
34 | #include <linux/hwmon-sysfs.h> | ||
33 | 35 | ||
34 | MODULE_AUTHOR("Zhang Rui") | 36 | MODULE_AUTHOR("Zhang Rui"); |
35 | MODULE_DESCRIPTION("Generic thermal management sysfs support"); | 37 | MODULE_DESCRIPTION("Generic thermal management sysfs support"); |
36 | MODULE_LICENSE("GPL"); | 38 | MODULE_LICENSE("GPL"); |
37 | 39 | ||
@@ -56,6 +58,9 @@ static LIST_HEAD(thermal_tz_list); | |||
56 | static LIST_HEAD(thermal_cdev_list); | 58 | static LIST_HEAD(thermal_cdev_list); |
57 | static DEFINE_MUTEX(thermal_list_lock); | 59 | static DEFINE_MUTEX(thermal_list_lock); |
58 | 60 | ||
61 | static struct device *thermal_hwmon; | ||
62 | #define MAX_THERMAL_ZONES 10 | ||
63 | |||
59 | static int get_idr(struct idr *idr, struct mutex *lock, int *id) | 64 | static int get_idr(struct idr *idr, struct mutex *lock, int *id) |
60 | { | 65 | { |
61 | int err; | 66 | int err; |
@@ -87,7 +92,67 @@ static void release_idr(struct idr *idr, struct mutex *lock, int id) | |||
87 | mutex_unlock(lock); | 92 | mutex_unlock(lock); |
88 | } | 93 | } |
89 | 94 | ||
90 | /* sys I/F for thermal zone */ | 95 | /* hwmon sys I/F*/ |
96 | static ssize_t | ||
97 | name_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
98 | { | ||
99 | return sprintf(buf, "thermal_sys_class\n"); | ||
100 | } | ||
101 | |||
102 | static ssize_t | ||
103 | temp_input_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
104 | { | ||
105 | struct thermal_zone_device *tz; | ||
106 | struct sensor_device_attribute *sensor_attr | ||
107 | = to_sensor_dev_attr(attr); | ||
108 | |||
109 | list_for_each_entry(tz, &thermal_tz_list, node) | ||
110 | if (tz->id == sensor_attr->index) | ||
111 | return tz->ops->get_temp(tz, buf); | ||
112 | |||
113 | return -ENODEV; | ||
114 | } | ||
115 | |||
116 | static ssize_t | ||
117 | temp_crit_show(struct device *dev, struct device_attribute *attr, | ||
118 | char *buf) | ||
119 | { | ||
120 | struct thermal_zone_device *tz; | ||
121 | struct sensor_device_attribute *sensor_attr | ||
122 | = to_sensor_dev_attr(attr); | ||
123 | |||
124 | list_for_each_entry(tz, &thermal_tz_list, node) | ||
125 | if (tz->id == sensor_attr->index) | ||
126 | return tz->ops->get_trip_temp(tz, 0, buf); | ||
127 | |||
128 | return -ENODEV; | ||
129 | } | ||
130 | |||
131 | static DEVICE_ATTR(name, 0444, name_show, NULL); | ||
132 | static struct sensor_device_attribute sensor_attrs[] = { | ||
133 | SENSOR_ATTR(temp1_input, 0444, temp_input_show, NULL, 0), | ||
134 | SENSOR_ATTR(temp1_crit, 0444, temp_crit_show, NULL, 0), | ||
135 | SENSOR_ATTR(temp2_input, 0444, temp_input_show, NULL, 1), | ||
136 | SENSOR_ATTR(temp2_crit, 0444, temp_crit_show, NULL, 1), | ||
137 | SENSOR_ATTR(temp3_input, 0444, temp_input_show, NULL, 2), | ||
138 | SENSOR_ATTR(temp3_crit, 0444, temp_crit_show, NULL, 2), | ||
139 | SENSOR_ATTR(temp4_input, 0444, temp_input_show, NULL, 3), | ||
140 | SENSOR_ATTR(temp4_crit, 0444, temp_crit_show, NULL, 3), | ||
141 | SENSOR_ATTR(temp5_input, 0444, temp_input_show, NULL, 4), | ||
142 | SENSOR_ATTR(temp5_crit, 0444, temp_crit_show, NULL, 4), | ||
143 | SENSOR_ATTR(temp6_input, 0444, temp_input_show, NULL, 5), | ||
144 | SENSOR_ATTR(temp6_crit, 0444, temp_crit_show, NULL, 5), | ||
145 | SENSOR_ATTR(temp7_input, 0444, temp_input_show, NULL, 6), | ||
146 | SENSOR_ATTR(temp7_crit, 0444, temp_crit_show, NULL, 6), | ||
147 | SENSOR_ATTR(temp8_input, 0444, temp_input_show, NULL, 7), | ||
148 | SENSOR_ATTR(temp8_crit, 0444, temp_crit_show, NULL, 7), | ||
149 | SENSOR_ATTR(temp9_input, 0444, temp_input_show, NULL, 8), | ||
150 | SENSOR_ATTR(temp9_crit, 0444, temp_crit_show, NULL, 8), | ||
151 | SENSOR_ATTR(temp10_input, 0444, temp_input_show, NULL, 9), | ||
152 | SENSOR_ATTR(temp10_crit, 0444, temp_crit_show, NULL, 9), | ||
153 | }; | ||
154 | |||
155 | /* thermal zone sys I/F */ | ||
91 | 156 | ||
92 | #define to_thermal_zone(_dev) \ | 157 | #define to_thermal_zone(_dev) \ |
93 | container_of(_dev, struct thermal_zone_device, device) | 158 | container_of(_dev, struct thermal_zone_device, device) |
@@ -214,7 +279,7 @@ do { \ | |||
214 | device_remove_file(_dev, &trip_point_attrs[_index * 2 + 1]); \ | 279 | device_remove_file(_dev, &trip_point_attrs[_index * 2 + 1]); \ |
215 | } while (0) | 280 | } while (0) |
216 | 281 | ||
217 | /* sys I/F for cooling device */ | 282 | /* cooling device sys I/F */ |
218 | #define to_cooling_device(_dev) \ | 283 | #define to_cooling_device(_dev) \ |
219 | container_of(_dev, struct thermal_cooling_device, device) | 284 | container_of(_dev, struct thermal_cooling_device, device) |
220 | 285 | ||
@@ -447,6 +512,9 @@ struct thermal_cooling_device *thermal_cooling_device_register(char *type, | |||
447 | struct thermal_zone_device *pos; | 512 | struct thermal_zone_device *pos; |
448 | int result; | 513 | int result; |
449 | 514 | ||
515 | if (!type) | ||
516 | return ERR_PTR(-EINVAL); | ||
517 | |||
450 | if (strlen(type) >= THERMAL_NAME_LENGTH) | 518 | if (strlen(type) >= THERMAL_NAME_LENGTH) |
451 | return ERR_PTR(-EINVAL); | 519 | return ERR_PTR(-EINVAL); |
452 | 520 | ||
@@ -477,11 +545,9 @@ struct thermal_cooling_device *thermal_cooling_device_register(char *type, | |||
477 | } | 545 | } |
478 | 546 | ||
479 | /* sys I/F */ | 547 | /* sys I/F */ |
480 | if (type) { | 548 | result = device_create_file(&cdev->device, &dev_attr_cdev_type); |
481 | result = device_create_file(&cdev->device, &dev_attr_cdev_type); | 549 | if (result) |
482 | if (result) | 550 | goto unregister; |
483 | goto unregister; | ||
484 | } | ||
485 | 551 | ||
486 | result = device_create_file(&cdev->device, &dev_attr_max_state); | 552 | result = device_create_file(&cdev->device, &dev_attr_max_state); |
487 | if (result) | 553 | if (result) |
@@ -547,8 +613,8 @@ void thermal_cooling_device_unregister(struct | |||
547 | tz->ops->unbind(tz, cdev); | 613 | tz->ops->unbind(tz, cdev); |
548 | } | 614 | } |
549 | mutex_unlock(&thermal_list_lock); | 615 | mutex_unlock(&thermal_list_lock); |
550 | if (cdev->type[0]) | 616 | |
551 | device_remove_file(&cdev->device, &dev_attr_cdev_type); | 617 | device_remove_file(&cdev->device, &dev_attr_cdev_type); |
552 | device_remove_file(&cdev->device, &dev_attr_max_state); | 618 | device_remove_file(&cdev->device, &dev_attr_max_state); |
553 | device_remove_file(&cdev->device, &dev_attr_cur_state); | 619 | device_remove_file(&cdev->device, &dev_attr_cur_state); |
554 | 620 | ||
@@ -580,6 +646,9 @@ struct thermal_zone_device *thermal_zone_device_register(char *type, | |||
580 | int result; | 646 | int result; |
581 | int count; | 647 | int count; |
582 | 648 | ||
649 | if (!type) | ||
650 | return ERR_PTR(-EINVAL); | ||
651 | |||
583 | if (strlen(type) >= THERMAL_NAME_LENGTH) | 652 | if (strlen(type) >= THERMAL_NAME_LENGTH) |
584 | return ERR_PTR(-EINVAL); | 653 | return ERR_PTR(-EINVAL); |
585 | 654 | ||
@@ -601,6 +670,13 @@ struct thermal_zone_device *thermal_zone_device_register(char *type, | |||
601 | kfree(tz); | 670 | kfree(tz); |
602 | return ERR_PTR(result); | 671 | return ERR_PTR(result); |
603 | } | 672 | } |
673 | if (tz->id >= MAX_THERMAL_ZONES) { | ||
674 | printk(KERN_ERR PREFIX | ||
675 | "Too many thermal zones\n"); | ||
676 | release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); | ||
677 | kfree(tz); | ||
678 | return ERR_PTR(-EINVAL); | ||
679 | } | ||
604 | 680 | ||
605 | strcpy(tz->type, type); | 681 | strcpy(tz->type, type); |
606 | tz->ops = ops; | 682 | tz->ops = ops; |
@@ -615,13 +691,28 @@ struct thermal_zone_device *thermal_zone_device_register(char *type, | |||
615 | return ERR_PTR(result); | 691 | return ERR_PTR(result); |
616 | } | 692 | } |
617 | 693 | ||
618 | /* sys I/F */ | 694 | /* hwmon sys I/F */ |
619 | if (type) { | 695 | result = device_create_file(thermal_hwmon, |
620 | result = device_create_file(&tz->device, &dev_attr_type); | 696 | &sensor_attrs[tz->id * 2].dev_attr); |
621 | if (result) | 697 | if (result) |
622 | goto unregister; | 698 | goto unregister; |
699 | |||
700 | if (trips > 0) { | ||
701 | char buf[40]; | ||
702 | result = tz->ops->get_trip_type(tz, 0, buf); | ||
703 | if (result > 0 && !strcmp(buf, "critical\n")) { | ||
704 | result = device_create_file(thermal_hwmon, | ||
705 | &sensor_attrs[tz->id * 2 + 1].dev_attr); | ||
706 | if (result) | ||
707 | goto unregister; | ||
708 | } | ||
623 | } | 709 | } |
624 | 710 | ||
711 | /* sys I/F */ | ||
712 | result = device_create_file(&tz->device, &dev_attr_type); | ||
713 | if (result) | ||
714 | goto unregister; | ||
715 | |||
625 | result = device_create_file(&tz->device, &dev_attr_temp); | 716 | result = device_create_file(&tz->device, &dev_attr_temp); |
626 | if (result) | 717 | if (result) |
627 | goto unregister; | 718 | goto unregister; |
@@ -687,8 +778,17 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) | |||
687 | tz->ops->unbind(tz, cdev); | 778 | tz->ops->unbind(tz, cdev); |
688 | mutex_unlock(&thermal_list_lock); | 779 | mutex_unlock(&thermal_list_lock); |
689 | 780 | ||
690 | if (tz->type[0]) | 781 | device_remove_file(thermal_hwmon, |
691 | device_remove_file(&tz->device, &dev_attr_type); | 782 | &sensor_attrs[tz->id * 2].dev_attr); |
783 | if (tz->trips > 0) { | ||
784 | char buf[40]; | ||
785 | if (tz->ops->get_trip_type(tz, 0, buf) > 0) | ||
786 | if (!strcmp(buf, "critical\n")) | ||
787 | device_remove_file(thermal_hwmon, | ||
788 | &sensor_attrs[tz->id * 2 + 1].dev_attr); | ||
789 | } | ||
790 | |||
791 | device_remove_file(&tz->device, &dev_attr_type); | ||
692 | device_remove_file(&tz->device, &dev_attr_temp); | 792 | device_remove_file(&tz->device, &dev_attr_temp); |
693 | if (tz->ops->get_mode) | 793 | if (tz->ops->get_mode) |
694 | device_remove_file(&tz->device, &dev_attr_mode); | 794 | device_remove_file(&tz->device, &dev_attr_mode); |
@@ -705,6 +805,19 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) | |||
705 | 805 | ||
706 | EXPORT_SYMBOL(thermal_zone_device_unregister); | 806 | EXPORT_SYMBOL(thermal_zone_device_unregister); |
707 | 807 | ||
808 | static void thermal_exit(void) | ||
809 | { | ||
810 | if (thermal_hwmon) { | ||
811 | device_remove_file(thermal_hwmon, &dev_attr_name); | ||
812 | hwmon_device_unregister(thermal_hwmon); | ||
813 | } | ||
814 | class_unregister(&thermal_class); | ||
815 | idr_destroy(&thermal_tz_idr); | ||
816 | idr_destroy(&thermal_cdev_idr); | ||
817 | mutex_destroy(&thermal_idr_lock); | ||
818 | mutex_destroy(&thermal_list_lock); | ||
819 | } | ||
820 | |||
708 | static int __init thermal_init(void) | 821 | static int __init thermal_init(void) |
709 | { | 822 | { |
710 | int result = 0; | 823 | int result = 0; |
@@ -716,16 +829,20 @@ static int __init thermal_init(void) | |||
716 | mutex_destroy(&thermal_idr_lock); | 829 | mutex_destroy(&thermal_idr_lock); |
717 | mutex_destroy(&thermal_list_lock); | 830 | mutex_destroy(&thermal_list_lock); |
718 | } | 831 | } |
719 | return result; | ||
720 | } | ||
721 | 832 | ||
722 | static void __exit thermal_exit(void) | 833 | thermal_hwmon = hwmon_device_register(NULL); |
723 | { | 834 | if (IS_ERR(thermal_hwmon)) { |
724 | class_unregister(&thermal_class); | 835 | result = PTR_ERR(thermal_hwmon); |
725 | idr_destroy(&thermal_tz_idr); | 836 | thermal_hwmon = NULL; |
726 | idr_destroy(&thermal_cdev_idr); | 837 | printk(KERN_ERR PREFIX |
727 | mutex_destroy(&thermal_idr_lock); | 838 | "unable to register hwmon device\n"); |
728 | mutex_destroy(&thermal_list_lock); | 839 | thermal_exit(); |
840 | return result; | ||
841 | } | ||
842 | |||
843 | result = device_create_file(thermal_hwmon, &dev_attr_name); | ||
844 | |||
845 | return result; | ||
729 | } | 846 | } |
730 | 847 | ||
731 | subsys_initcall(thermal_init); | 848 | subsys_initcall(thermal_init); |
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig index 5c33cdb9cac7..a2b0aa48b8ea 100644 --- a/drivers/usb/core/Kconfig +++ b/drivers/usb/core/Kconfig | |||
@@ -87,12 +87,13 @@ config USB_DYNAMIC_MINORS | |||
87 | If you are unsure about this, say N here. | 87 | If you are unsure about this, say N here. |
88 | 88 | ||
89 | config USB_SUSPEND | 89 | config USB_SUSPEND |
90 | bool "USB selective suspend/resume and wakeup (EXPERIMENTAL)" | 90 | bool "USB selective suspend/resume and wakeup" |
91 | depends on USB && PM && EXPERIMENTAL | 91 | depends on USB && PM |
92 | help | 92 | help |
93 | If you say Y here, you can use driver calls or the sysfs | 93 | If you say Y here, you can use driver calls or the sysfs |
94 | "power/state" file to suspend or resume individual USB | 94 | "power/level" file to suspend or resume individual USB |
95 | peripherals. | 95 | peripherals and to enable or disable autosuspend (see |
96 | Documentation/usb/power-management.txt for more details). | ||
96 | 97 | ||
97 | Also, USB "remote wakeup" signaling is supported, whereby some | 98 | Also, USB "remote wakeup" signaling is supported, whereby some |
98 | USB devices (like keyboards and network adapters) can wake up | 99 | USB devices (like keyboards and network adapters) can wake up |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index f90ab5e94c58..d9d1eb19f2a1 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
@@ -28,35 +28,38 @@ | |||
28 | * devices is broken... | 28 | * devices is broken... |
29 | */ | 29 | */ |
30 | static const struct usb_device_id usb_quirk_list[] = { | 30 | static const struct usb_device_id usb_quirk_list[] = { |
31 | /* Action Semiconductor flash disk */ | ||
32 | { USB_DEVICE(0x10d6, 0x2200), .driver_info = USB_QUIRK_STRING_FETCH_255}, | ||
33 | |||
34 | /* CBM - Flash disk */ | 31 | /* CBM - Flash disk */ |
35 | { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME }, | 32 | { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME }, |
33 | |||
36 | /* HP 5300/5370C scanner */ | 34 | /* HP 5300/5370C scanner */ |
37 | { USB_DEVICE(0x03f0, 0x0701), .driver_info = USB_QUIRK_STRING_FETCH_255 }, | 35 | { USB_DEVICE(0x03f0, 0x0701), .driver_info = |
36 | USB_QUIRK_STRING_FETCH_255 }, | ||
38 | 37 | ||
39 | /* Creative SB Audigy 2 NX */ | 38 | /* Creative SB Audigy 2 NX */ |
40 | { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, | 39 | { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, |
41 | 40 | ||
41 | /* Philips PSC805 audio device */ | ||
42 | { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
43 | |||
42 | /* Roland SC-8820 */ | 44 | /* Roland SC-8820 */ |
43 | { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, | 45 | { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, |
44 | 46 | ||
45 | /* Edirol SD-20 */ | 47 | /* Edirol SD-20 */ |
46 | { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME }, | 48 | { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME }, |
47 | 49 | ||
48 | /* INTEL VALUE SSD */ | ||
49 | { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
50 | |||
51 | /* M-Systems Flash Disk Pioneers */ | 50 | /* M-Systems Flash Disk Pioneers */ |
52 | { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, | 51 | { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, |
53 | 52 | ||
54 | /* Philips PSC805 audio device */ | 53 | /* Action Semiconductor flash disk */ |
55 | { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME }, | 54 | { USB_DEVICE(0x10d6, 0x2200), .driver_info = |
55 | USB_QUIRK_STRING_FETCH_255 }, | ||
56 | 56 | ||
57 | /* SKYMEDI USB_DRIVE */ | 57 | /* SKYMEDI USB_DRIVE */ |
58 | { USB_DEVICE(0x1516, 0x8628), .driver_info = USB_QUIRK_RESET_RESUME }, | 58 | { USB_DEVICE(0x1516, 0x8628), .driver_info = USB_QUIRK_RESET_RESUME }, |
59 | 59 | ||
60 | /* INTEL VALUE SSD */ | ||
61 | { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
62 | |||
60 | { } /* terminating entry must be last */ | 63 | { } /* terminating entry must be last */ |
61 | }; | 64 | }; |
62 | 65 | ||
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 4e984060c984..1f0db51190cc 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c | |||
@@ -99,8 +99,7 @@ struct usb_interface *usb_ifnum_to_if(const struct usb_device *dev, | |||
99 | EXPORT_SYMBOL_GPL(usb_ifnum_to_if); | 99 | EXPORT_SYMBOL_GPL(usb_ifnum_to_if); |
100 | 100 | ||
101 | /** | 101 | /** |
102 | * usb_altnum_to_altsetting - get the altsetting structure with a given | 102 | * usb_altnum_to_altsetting - get the altsetting structure with a given alternate setting number. |
103 | * alternate setting number. | ||
104 | * @intf: the interface containing the altsetting in question | 103 | * @intf: the interface containing the altsetting in question |
105 | * @altnum: the desired alternate setting number | 104 | * @altnum: the desired alternate setting number |
106 | * | 105 | * |
@@ -234,7 +233,7 @@ static int ksuspend_usb_init(void) | |||
234 | * singlethreaded. Its job doesn't justify running on more | 233 | * singlethreaded. Its job doesn't justify running on more |
235 | * than one CPU. | 234 | * than one CPU. |
236 | */ | 235 | */ |
237 | ksuspend_usb_wq = create_singlethread_workqueue("ksuspend_usbd"); | 236 | ksuspend_usb_wq = create_freezeable_workqueue("ksuspend_usbd"); |
238 | if (!ksuspend_usb_wq) | 237 | if (!ksuspend_usb_wq) |
239 | return -ENOMEM; | 238 | return -ENOMEM; |
240 | return 0; | 239 | return 0; |
@@ -442,8 +441,7 @@ EXPORT_SYMBOL_GPL(usb_put_intf); | |||
442 | */ | 441 | */ |
443 | 442 | ||
444 | /** | 443 | /** |
445 | * usb_lock_device_for_reset - cautiously acquire the lock for a | 444 | * usb_lock_device_for_reset - cautiously acquire the lock for a usb device structure |
446 | * usb device structure | ||
447 | * @udev: device that's being locked | 445 | * @udev: device that's being locked |
448 | * @iface: interface bound to the driver making the request (optional) | 446 | * @iface: interface bound to the driver making the request (optional) |
449 | * | 447 | * |
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index c13955164686..6f45dd669b33 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig | |||
@@ -131,7 +131,7 @@ config USB_ATMEL_USBA | |||
131 | 131 | ||
132 | config USB_GADGET_FSL_USB2 | 132 | config USB_GADGET_FSL_USB2 |
133 | boolean "Freescale Highspeed USB DR Peripheral Controller" | 133 | boolean "Freescale Highspeed USB DR Peripheral Controller" |
134 | depends on MPC834x || PPC_MPC831x | 134 | depends on FSL_SOC |
135 | select USB_GADGET_DUALSPEED | 135 | select USB_GADGET_DUALSPEED |
136 | help | 136 | help |
137 | Some of Freescale PowerPC processors have a High Speed | 137 | Some of Freescale PowerPC processors have a High Speed |
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c index 4f6bfa100f2a..2c32bd08ee7d 100644 --- a/drivers/usb/gadget/printer.c +++ b/drivers/usb/gadget/printer.c | |||
@@ -92,7 +92,6 @@ struct printer_dev { | |||
92 | u8 *current_rx_buf; | 92 | u8 *current_rx_buf; |
93 | u8 printer_status; | 93 | u8 printer_status; |
94 | u8 reset_printer; | 94 | u8 reset_printer; |
95 | struct class_device *printer_class_dev; | ||
96 | struct cdev printer_cdev; | 95 | struct cdev printer_cdev; |
97 | struct device *pdev; | 96 | struct device *pdev; |
98 | u8 printer_cdev_open; | 97 | u8 printer_cdev_open; |
diff --git a/drivers/usb/gadget/pxa2xx_udc.c b/drivers/usb/gadget/pxa2xx_udc.c index 4402d6f042d9..096c41cc40d1 100644 --- a/drivers/usb/gadget/pxa2xx_udc.c +++ b/drivers/usb/gadget/pxa2xx_udc.c | |||
@@ -103,6 +103,12 @@ static const char ep0name [] = "ep0"; | |||
103 | #error "Can't configure both IXP and PXA" | 103 | #error "Can't configure both IXP and PXA" |
104 | #endif | 104 | #endif |
105 | 105 | ||
106 | /* IXP doesn't yet support <linux/clk.h> */ | ||
107 | #define clk_get(dev,name) NULL | ||
108 | #define clk_enable(clk) do { } while (0) | ||
109 | #define clk_disable(clk) do { } while (0) | ||
110 | #define clk_put(clk) do { } while (0) | ||
111 | |||
106 | #endif | 112 | #endif |
107 | 113 | ||
108 | #include "pxa2xx_udc.h" | 114 | #include "pxa2xx_udc.h" |
@@ -934,20 +940,31 @@ static void udc_disable(struct pxa2xx_udc *); | |||
934 | /* We disable the UDC -- and its 48 MHz clock -- whenever it's not | 940 | /* We disable the UDC -- and its 48 MHz clock -- whenever it's not |
935 | * in active use. | 941 | * in active use. |
936 | */ | 942 | */ |
937 | static int pullup(struct pxa2xx_udc *udc, int is_active) | 943 | static int pullup(struct pxa2xx_udc *udc) |
938 | { | 944 | { |
939 | is_active = is_active && udc->vbus && udc->pullup; | 945 | int is_active = udc->vbus && udc->pullup && !udc->suspended; |
940 | DMSG("%s\n", is_active ? "active" : "inactive"); | 946 | DMSG("%s\n", is_active ? "active" : "inactive"); |
941 | if (is_active) | 947 | if (is_active) { |
942 | udc_enable(udc); | 948 | if (!udc->active) { |
943 | else { | 949 | udc->active = 1; |
944 | if (udc->gadget.speed != USB_SPEED_UNKNOWN) { | 950 | /* Enable clock for USB device */ |
945 | DMSG("disconnect %s\n", udc->driver | 951 | clk_enable(udc->clk); |
946 | ? udc->driver->driver.name | 952 | udc_enable(udc); |
947 | : "(no driver)"); | ||
948 | stop_activity(udc, udc->driver); | ||
949 | } | 953 | } |
950 | udc_disable(udc); | 954 | } else { |
955 | if (udc->active) { | ||
956 | if (udc->gadget.speed != USB_SPEED_UNKNOWN) { | ||
957 | DMSG("disconnect %s\n", udc->driver | ||
958 | ? udc->driver->driver.name | ||
959 | : "(no driver)"); | ||
960 | stop_activity(udc, udc->driver); | ||
961 | } | ||
962 | udc_disable(udc); | ||
963 | /* Disable clock for USB device */ | ||
964 | clk_disable(udc->clk); | ||
965 | udc->active = 0; | ||
966 | } | ||
967 | |||
951 | } | 968 | } |
952 | return 0; | 969 | return 0; |
953 | } | 970 | } |
@@ -958,9 +975,9 @@ static int pxa2xx_udc_vbus_session(struct usb_gadget *_gadget, int is_active) | |||
958 | struct pxa2xx_udc *udc; | 975 | struct pxa2xx_udc *udc; |
959 | 976 | ||
960 | udc = container_of(_gadget, struct pxa2xx_udc, gadget); | 977 | udc = container_of(_gadget, struct pxa2xx_udc, gadget); |
961 | udc->vbus = is_active = (is_active != 0); | 978 | udc->vbus = (is_active != 0); |
962 | DMSG("vbus %s\n", is_active ? "supplied" : "inactive"); | 979 | DMSG("vbus %s\n", is_active ? "supplied" : "inactive"); |
963 | pullup(udc, is_active); | 980 | pullup(udc); |
964 | return 0; | 981 | return 0; |
965 | } | 982 | } |
966 | 983 | ||
@@ -975,9 +992,8 @@ static int pxa2xx_udc_pullup(struct usb_gadget *_gadget, int is_active) | |||
975 | if (!udc->mach->gpio_pullup && !udc->mach->udc_command) | 992 | if (!udc->mach->gpio_pullup && !udc->mach->udc_command) |
976 | return -EOPNOTSUPP; | 993 | return -EOPNOTSUPP; |
977 | 994 | ||
978 | is_active = (is_active != 0); | 995 | udc->pullup = (is_active != 0); |
979 | udc->pullup = is_active; | 996 | pullup(udc); |
980 | pullup(udc, is_active); | ||
981 | return 0; | 997 | return 0; |
982 | } | 998 | } |
983 | 999 | ||
@@ -997,7 +1013,7 @@ static const struct usb_gadget_ops pxa2xx_udc_ops = { | |||
997 | #ifdef CONFIG_USB_GADGET_DEBUG_FS | 1013 | #ifdef CONFIG_USB_GADGET_DEBUG_FS |
998 | 1014 | ||
999 | static int | 1015 | static int |
1000 | udc_seq_show(struct seq_file *m, void *d) | 1016 | udc_seq_show(struct seq_file *m, void *_d) |
1001 | { | 1017 | { |
1002 | struct pxa2xx_udc *dev = m->private; | 1018 | struct pxa2xx_udc *dev = m->private; |
1003 | unsigned long flags; | 1019 | unsigned long flags; |
@@ -1146,11 +1162,6 @@ static void udc_disable(struct pxa2xx_udc *dev) | |||
1146 | 1162 | ||
1147 | udc_clear_mask_UDCCR(UDCCR_UDE); | 1163 | udc_clear_mask_UDCCR(UDCCR_UDE); |
1148 | 1164 | ||
1149 | #ifdef CONFIG_ARCH_PXA | ||
1150 | /* Disable clock for USB device */ | ||
1151 | clk_disable(dev->clk); | ||
1152 | #endif | ||
1153 | |||
1154 | ep0_idle (dev); | 1165 | ep0_idle (dev); |
1155 | dev->gadget.speed = USB_SPEED_UNKNOWN; | 1166 | dev->gadget.speed = USB_SPEED_UNKNOWN; |
1156 | } | 1167 | } |
@@ -1191,11 +1202,6 @@ static void udc_enable (struct pxa2xx_udc *dev) | |||
1191 | { | 1202 | { |
1192 | udc_clear_mask_UDCCR(UDCCR_UDE); | 1203 | udc_clear_mask_UDCCR(UDCCR_UDE); |
1193 | 1204 | ||
1194 | #ifdef CONFIG_ARCH_PXA | ||
1195 | /* Enable clock for USB device */ | ||
1196 | clk_enable(dev->clk); | ||
1197 | #endif | ||
1198 | |||
1199 | /* try to clear these bits before we enable the udc */ | 1205 | /* try to clear these bits before we enable the udc */ |
1200 | udc_ack_int_UDCCR(UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR); | 1206 | udc_ack_int_UDCCR(UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR); |
1201 | 1207 | ||
@@ -1286,7 +1292,7 @@ fail: | |||
1286 | * for set_configuration as well as eventual disconnect. | 1292 | * for set_configuration as well as eventual disconnect. |
1287 | */ | 1293 | */ |
1288 | DMSG("registered gadget driver '%s'\n", driver->driver.name); | 1294 | DMSG("registered gadget driver '%s'\n", driver->driver.name); |
1289 | pullup(dev, 1); | 1295 | pullup(dev); |
1290 | dump_state(dev); | 1296 | dump_state(dev); |
1291 | return 0; | 1297 | return 0; |
1292 | } | 1298 | } |
@@ -1329,7 +1335,8 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) | |||
1329 | return -EINVAL; | 1335 | return -EINVAL; |
1330 | 1336 | ||
1331 | local_irq_disable(); | 1337 | local_irq_disable(); |
1332 | pullup(dev, 0); | 1338 | dev->pullup = 0; |
1339 | pullup(dev); | ||
1333 | stop_activity(dev, driver); | 1340 | stop_activity(dev, driver); |
1334 | local_irq_enable(); | 1341 | local_irq_enable(); |
1335 | 1342 | ||
@@ -2131,13 +2138,11 @@ static int __init pxa2xx_udc_probe(struct platform_device *pdev) | |||
2131 | if (irq < 0) | 2138 | if (irq < 0) |
2132 | return -ENODEV; | 2139 | return -ENODEV; |
2133 | 2140 | ||
2134 | #ifdef CONFIG_ARCH_PXA | ||
2135 | dev->clk = clk_get(&pdev->dev, "UDCCLK"); | 2141 | dev->clk = clk_get(&pdev->dev, "UDCCLK"); |
2136 | if (IS_ERR(dev->clk)) { | 2142 | if (IS_ERR(dev->clk)) { |
2137 | retval = PTR_ERR(dev->clk); | 2143 | retval = PTR_ERR(dev->clk); |
2138 | goto err_clk; | 2144 | goto err_clk; |
2139 | } | 2145 | } |
2140 | #endif | ||
2141 | 2146 | ||
2142 | pr_debug("%s: IRQ %d%s%s\n", driver_name, irq, | 2147 | pr_debug("%s: IRQ %d%s%s\n", driver_name, irq, |
2143 | dev->has_cfr ? "" : " (!cfr)", | 2148 | dev->has_cfr ? "" : " (!cfr)", |
@@ -2250,10 +2255,8 @@ lubbock_fail0: | |||
2250 | if (dev->mach->gpio_vbus) | 2255 | if (dev->mach->gpio_vbus) |
2251 | gpio_free(dev->mach->gpio_vbus); | 2256 | gpio_free(dev->mach->gpio_vbus); |
2252 | err_gpio_vbus: | 2257 | err_gpio_vbus: |
2253 | #ifdef CONFIG_ARCH_PXA | ||
2254 | clk_put(dev->clk); | 2258 | clk_put(dev->clk); |
2255 | err_clk: | 2259 | err_clk: |
2256 | #endif | ||
2257 | return retval; | 2260 | return retval; |
2258 | } | 2261 | } |
2259 | 2262 | ||
@@ -2269,7 +2272,9 @@ static int __exit pxa2xx_udc_remove(struct platform_device *pdev) | |||
2269 | if (dev->driver) | 2272 | if (dev->driver) |
2270 | return -EBUSY; | 2273 | return -EBUSY; |
2271 | 2274 | ||
2272 | udc_disable(dev); | 2275 | dev->pullup = 0; |
2276 | pullup(dev); | ||
2277 | |||
2273 | remove_debug_files(dev); | 2278 | remove_debug_files(dev); |
2274 | 2279 | ||
2275 | if (dev->got_irq) { | 2280 | if (dev->got_irq) { |
@@ -2289,9 +2294,7 @@ static int __exit pxa2xx_udc_remove(struct platform_device *pdev) | |||
2289 | if (dev->mach->gpio_pullup) | 2294 | if (dev->mach->gpio_pullup) |
2290 | gpio_free(dev->mach->gpio_pullup); | 2295 | gpio_free(dev->mach->gpio_pullup); |
2291 | 2296 | ||
2292 | #ifdef CONFIG_ARCH_PXA | ||
2293 | clk_put(dev->clk); | 2297 | clk_put(dev->clk); |
2294 | #endif | ||
2295 | 2298 | ||
2296 | platform_set_drvdata(pdev, NULL); | 2299 | platform_set_drvdata(pdev, NULL); |
2297 | the_controller = NULL; | 2300 | the_controller = NULL; |
@@ -2317,10 +2320,15 @@ static int __exit pxa2xx_udc_remove(struct platform_device *pdev) | |||
2317 | static int pxa2xx_udc_suspend(struct platform_device *dev, pm_message_t state) | 2320 | static int pxa2xx_udc_suspend(struct platform_device *dev, pm_message_t state) |
2318 | { | 2321 | { |
2319 | struct pxa2xx_udc *udc = platform_get_drvdata(dev); | 2322 | struct pxa2xx_udc *udc = platform_get_drvdata(dev); |
2323 | unsigned long flags; | ||
2320 | 2324 | ||
2321 | if (!udc->mach->gpio_pullup && !udc->mach->udc_command) | 2325 | if (!udc->mach->gpio_pullup && !udc->mach->udc_command) |
2322 | WARN("USB host won't detect disconnect!\n"); | 2326 | WARN("USB host won't detect disconnect!\n"); |
2323 | pullup(udc, 0); | 2327 | udc->suspended = 1; |
2328 | |||
2329 | local_irq_save(flags); | ||
2330 | pullup(udc); | ||
2331 | local_irq_restore(flags); | ||
2324 | 2332 | ||
2325 | return 0; | 2333 | return 0; |
2326 | } | 2334 | } |
@@ -2328,8 +2336,12 @@ static int pxa2xx_udc_suspend(struct platform_device *dev, pm_message_t state) | |||
2328 | static int pxa2xx_udc_resume(struct platform_device *dev) | 2336 | static int pxa2xx_udc_resume(struct platform_device *dev) |
2329 | { | 2337 | { |
2330 | struct pxa2xx_udc *udc = platform_get_drvdata(dev); | 2338 | struct pxa2xx_udc *udc = platform_get_drvdata(dev); |
2339 | unsigned long flags; | ||
2331 | 2340 | ||
2332 | pullup(udc, 1); | 2341 | udc->suspended = 0; |
2342 | local_irq_save(flags); | ||
2343 | pullup(udc); | ||
2344 | local_irq_restore(flags); | ||
2333 | 2345 | ||
2334 | return 0; | 2346 | return 0; |
2335 | } | 2347 | } |
diff --git a/drivers/usb/gadget/pxa2xx_udc.h b/drivers/usb/gadget/pxa2xx_udc.h index b67e3ff5e4eb..e2c19e88c875 100644 --- a/drivers/usb/gadget/pxa2xx_udc.h +++ b/drivers/usb/gadget/pxa2xx_udc.h | |||
@@ -119,7 +119,9 @@ struct pxa2xx_udc { | |||
119 | has_cfr : 1, | 119 | has_cfr : 1, |
120 | req_pending : 1, | 120 | req_pending : 1, |
121 | req_std : 1, | 121 | req_std : 1, |
122 | req_config : 1; | 122 | req_config : 1, |
123 | suspended : 1, | ||
124 | active : 1; | ||
123 | 125 | ||
124 | #define start_watchdog(dev) mod_timer(&dev->timer, jiffies + (HZ/200)) | 126 | #define start_watchdog(dev) mod_timer(&dev->timer, jiffies + (HZ/200)) |
125 | struct timer_list timer; | 127 | struct timer_list timer; |
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index b8ad55aff842..46ee7f4c0912 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c | |||
@@ -281,23 +281,44 @@ static void ehci_iaa_watchdog(unsigned long param) | |||
281 | { | 281 | { |
282 | struct ehci_hcd *ehci = (struct ehci_hcd *) param; | 282 | struct ehci_hcd *ehci = (struct ehci_hcd *) param; |
283 | unsigned long flags; | 283 | unsigned long flags; |
284 | u32 status, cmd; | ||
285 | 284 | ||
286 | spin_lock_irqsave (&ehci->lock, flags); | 285 | spin_lock_irqsave (&ehci->lock, flags); |
287 | WARN_ON(!ehci->reclaim); | ||
288 | 286 | ||
289 | status = ehci_readl(ehci, &ehci->regs->status); | 287 | /* Lost IAA irqs wedge things badly; seen first with a vt8235. |
290 | cmd = ehci_readl(ehci, &ehci->regs->command); | 288 | * So we need this watchdog, but must protect it against both |
291 | ehci_dbg(ehci, "IAA watchdog: status %x cmd %x\n", status, cmd); | 289 | * (a) SMP races against real IAA firing and retriggering, and |
292 | 290 | * (b) clean HC shutdown, when IAA watchdog was pending. | |
293 | /* lost IAA irqs wedge things badly; seen first with a vt8235 */ | 291 | */ |
294 | if (ehci->reclaim) { | 292 | if (ehci->reclaim |
295 | if (status & STS_IAA) { | 293 | && !timer_pending(&ehci->iaa_watchdog) |
296 | ehci_vdbg (ehci, "lost IAA\n"); | 294 | && HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) { |
295 | u32 cmd, status; | ||
296 | |||
297 | /* If we get here, IAA is *REALLY* late. It's barely | ||
298 | * conceivable that the system is so busy that CMD_IAAD | ||
299 | * is still legitimately set, so let's be sure it's | ||
300 | * clear before we read STS_IAA. (The HC should clear | ||
301 | * CMD_IAAD when it sets STS_IAA.) | ||
302 | */ | ||
303 | cmd = ehci_readl(ehci, &ehci->regs->command); | ||
304 | if (cmd & CMD_IAAD) | ||
305 | ehci_writel(ehci, cmd & ~CMD_IAAD, | ||
306 | &ehci->regs->command); | ||
307 | |||
308 | /* If IAA is set here it either legitimately triggered | ||
309 | * before we cleared IAAD above (but _way_ late, so we'll | ||
310 | * still count it as lost) ... or a silicon erratum: | ||
311 | * - VIA seems to set IAA without triggering the IRQ; | ||
312 | * - IAAD potentially cleared without setting IAA. | ||
313 | */ | ||
314 | status = ehci_readl(ehci, &ehci->regs->status); | ||
315 | if ((status & STS_IAA) || !(cmd & CMD_IAAD)) { | ||
297 | COUNT (ehci->stats.lost_iaa); | 316 | COUNT (ehci->stats.lost_iaa); |
298 | ehci_writel(ehci, STS_IAA, &ehci->regs->status); | 317 | ehci_writel(ehci, STS_IAA, &ehci->regs->status); |
299 | } | 318 | } |
300 | ehci_writel(ehci, cmd & ~CMD_IAAD, &ehci->regs->command); | 319 | |
320 | ehci_vdbg(ehci, "IAA watchdog: status %x cmd %x\n", | ||
321 | status, cmd); | ||
301 | end_unlink_async(ehci); | 322 | end_unlink_async(ehci); |
302 | } | 323 | } |
303 | 324 | ||
@@ -631,7 +652,7 @@ static int ehci_run (struct usb_hcd *hcd) | |||
631 | static irqreturn_t ehci_irq (struct usb_hcd *hcd) | 652 | static irqreturn_t ehci_irq (struct usb_hcd *hcd) |
632 | { | 653 | { |
633 | struct ehci_hcd *ehci = hcd_to_ehci (hcd); | 654 | struct ehci_hcd *ehci = hcd_to_ehci (hcd); |
634 | u32 status, pcd_status = 0; | 655 | u32 status, pcd_status = 0, cmd; |
635 | int bh; | 656 | int bh; |
636 | 657 | ||
637 | spin_lock (&ehci->lock); | 658 | spin_lock (&ehci->lock); |
@@ -652,7 +673,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd) | |||
652 | 673 | ||
653 | /* clear (just) interrupts */ | 674 | /* clear (just) interrupts */ |
654 | ehci_writel(ehci, status, &ehci->regs->status); | 675 | ehci_writel(ehci, status, &ehci->regs->status); |
655 | ehci_readl(ehci, &ehci->regs->command); /* unblock posted write */ | 676 | cmd = ehci_readl(ehci, &ehci->regs->command); |
656 | bh = 0; | 677 | bh = 0; |
657 | 678 | ||
658 | #ifdef EHCI_VERBOSE_DEBUG | 679 | #ifdef EHCI_VERBOSE_DEBUG |
@@ -673,8 +694,17 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd) | |||
673 | 694 | ||
674 | /* complete the unlinking of some qh [4.15.2.3] */ | 695 | /* complete the unlinking of some qh [4.15.2.3] */ |
675 | if (status & STS_IAA) { | 696 | if (status & STS_IAA) { |
676 | COUNT (ehci->stats.reclaim); | 697 | /* guard against (alleged) silicon errata */ |
677 | end_unlink_async(ehci); | 698 | if (cmd & CMD_IAAD) { |
699 | ehci_writel(ehci, cmd & ~CMD_IAAD, | ||
700 | &ehci->regs->command); | ||
701 | ehci_dbg(ehci, "IAA with IAAD still set?\n"); | ||
702 | } | ||
703 | if (ehci->reclaim) { | ||
704 | COUNT(ehci->stats.reclaim); | ||
705 | end_unlink_async(ehci); | ||
706 | } else | ||
707 | ehci_dbg(ehci, "IAA with nothing to reclaim?\n"); | ||
678 | } | 708 | } |
679 | 709 | ||
680 | /* remote wakeup [4.3.1] */ | 710 | /* remote wakeup [4.3.1] */ |
@@ -781,7 +811,7 @@ static int ehci_urb_enqueue ( | |||
781 | static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) | 811 | static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) |
782 | { | 812 | { |
783 | /* failfast */ | 813 | /* failfast */ |
784 | if (!HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) | 814 | if (!HC_IS_RUNNING(ehci_to_hcd(ehci)->state) && ehci->reclaim) |
785 | end_unlink_async(ehci); | 815 | end_unlink_async(ehci); |
786 | 816 | ||
787 | /* if it's not linked then there's nothing to do */ | 817 | /* if it's not linked then there's nothing to do */ |
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 776a97f33914..2e49de820b14 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c | |||
@@ -319,10 +319,10 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
319 | if (likely (last->urb != urb)) { | 319 | if (likely (last->urb != urb)) { |
320 | ehci_urb_done(ehci, last->urb, last_status); | 320 | ehci_urb_done(ehci, last->urb, last_status); |
321 | count++; | 321 | count++; |
322 | last_status = -EINPROGRESS; | ||
322 | } | 323 | } |
323 | ehci_qtd_free (ehci, last); | 324 | ehci_qtd_free (ehci, last); |
324 | last = NULL; | 325 | last = NULL; |
325 | last_status = -EINPROGRESS; | ||
326 | } | 326 | } |
327 | 327 | ||
328 | /* ignore urbs submitted during completions we reported */ | 328 | /* ignore urbs submitted during completions we reported */ |
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c index 0130fd8571e4..d7071c855758 100644 --- a/drivers/usb/host/isp116x-hcd.c +++ b/drivers/usb/host/isp116x-hcd.c | |||
@@ -911,8 +911,7 @@ static int isp116x_hub_status_data(struct usb_hcd *hcd, char *buf) | |||
911 | buf[0] = 0; | 911 | buf[0] = 0; |
912 | 912 | ||
913 | for (i = 0; i < ports; i++) { | 913 | for (i = 0; i < ports; i++) { |
914 | u32 status = isp116x->rhport[i] = | 914 | u32 status = isp116x_read_reg32(isp116x, i ? HCRHPORT2 : HCRHPORT1); |
915 | isp116x_read_reg32(isp116x, i ? HCRHPORT2 : HCRHPORT1); | ||
916 | 915 | ||
917 | if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC | 916 | if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
918 | | RH_PS_OCIC | RH_PS_PRSC)) { | 917 | | RH_PS_OCIC | RH_PS_PRSC)) { |
@@ -1031,7 +1030,9 @@ static int isp116x_hub_control(struct usb_hcd *hcd, | |||
1031 | DBG("GetPortStatus\n"); | 1030 | DBG("GetPortStatus\n"); |
1032 | if (!wIndex || wIndex > ports) | 1031 | if (!wIndex || wIndex > ports) |
1033 | goto error; | 1032 | goto error; |
1034 | tmp = isp116x->rhport[--wIndex]; | 1033 | spin_lock_irqsave(&isp116x->lock, flags); |
1034 | tmp = isp116x_read_reg32(isp116x, (--wIndex) ? HCRHPORT2 : HCRHPORT1); | ||
1035 | spin_unlock_irqrestore(&isp116x->lock, flags); | ||
1035 | *(__le32 *) buf = cpu_to_le32(tmp); | 1036 | *(__le32 *) buf = cpu_to_le32(tmp); |
1036 | DBG("GetPortStatus: port[%d] %08x\n", wIndex + 1, tmp); | 1037 | DBG("GetPortStatus: port[%d] %08x\n", wIndex + 1, tmp); |
1037 | break; | 1038 | break; |
@@ -1080,8 +1081,6 @@ static int isp116x_hub_control(struct usb_hcd *hcd, | |||
1080 | spin_lock_irqsave(&isp116x->lock, flags); | 1081 | spin_lock_irqsave(&isp116x->lock, flags); |
1081 | isp116x_write_reg32(isp116x, wIndex | 1082 | isp116x_write_reg32(isp116x, wIndex |
1082 | ? HCRHPORT2 : HCRHPORT1, tmp); | 1083 | ? HCRHPORT2 : HCRHPORT1, tmp); |
1083 | isp116x->rhport[wIndex] = | ||
1084 | isp116x_read_reg32(isp116x, wIndex ? HCRHPORT2 : HCRHPORT1); | ||
1085 | spin_unlock_irqrestore(&isp116x->lock, flags); | 1084 | spin_unlock_irqrestore(&isp116x->lock, flags); |
1086 | break; | 1085 | break; |
1087 | case SetPortFeature: | 1086 | case SetPortFeature: |
@@ -1095,24 +1094,22 @@ static int isp116x_hub_control(struct usb_hcd *hcd, | |||
1095 | spin_lock_irqsave(&isp116x->lock, flags); | 1094 | spin_lock_irqsave(&isp116x->lock, flags); |
1096 | isp116x_write_reg32(isp116x, wIndex | 1095 | isp116x_write_reg32(isp116x, wIndex |
1097 | ? HCRHPORT2 : HCRHPORT1, RH_PS_PSS); | 1096 | ? HCRHPORT2 : HCRHPORT1, RH_PS_PSS); |
1097 | spin_unlock_irqrestore(&isp116x->lock, flags); | ||
1098 | break; | 1098 | break; |
1099 | case USB_PORT_FEAT_POWER: | 1099 | case USB_PORT_FEAT_POWER: |
1100 | DBG("USB_PORT_FEAT_POWER\n"); | 1100 | DBG("USB_PORT_FEAT_POWER\n"); |
1101 | spin_lock_irqsave(&isp116x->lock, flags); | 1101 | spin_lock_irqsave(&isp116x->lock, flags); |
1102 | isp116x_write_reg32(isp116x, wIndex | 1102 | isp116x_write_reg32(isp116x, wIndex |
1103 | ? HCRHPORT2 : HCRHPORT1, RH_PS_PPS); | 1103 | ? HCRHPORT2 : HCRHPORT1, RH_PS_PPS); |
1104 | spin_unlock_irqrestore(&isp116x->lock, flags); | ||
1104 | break; | 1105 | break; |
1105 | case USB_PORT_FEAT_RESET: | 1106 | case USB_PORT_FEAT_RESET: |
1106 | DBG("USB_PORT_FEAT_RESET\n"); | 1107 | DBG("USB_PORT_FEAT_RESET\n"); |
1107 | root_port_reset(isp116x, wIndex); | 1108 | root_port_reset(isp116x, wIndex); |
1108 | spin_lock_irqsave(&isp116x->lock, flags); | ||
1109 | break; | 1109 | break; |
1110 | default: | 1110 | default: |
1111 | goto error; | 1111 | goto error; |
1112 | } | 1112 | } |
1113 | isp116x->rhport[wIndex] = | ||
1114 | isp116x_read_reg32(isp116x, wIndex ? HCRHPORT2 : HCRHPORT1); | ||
1115 | spin_unlock_irqrestore(&isp116x->lock, flags); | ||
1116 | break; | 1113 | break; |
1117 | 1114 | ||
1118 | default: | 1115 | default: |
diff --git a/drivers/usb/host/isp116x.h b/drivers/usb/host/isp116x.h index b91e2edd9c5c..595b90a99848 100644 --- a/drivers/usb/host/isp116x.h +++ b/drivers/usb/host/isp116x.h | |||
@@ -270,7 +270,6 @@ struct isp116x { | |||
270 | u32 rhdesca; | 270 | u32 rhdesca; |
271 | u32 rhdescb; | 271 | u32 rhdescb; |
272 | u32 rhstatus; | 272 | u32 rhstatus; |
273 | u32 rhport[2]; | ||
274 | 273 | ||
275 | /* async schedule: control, bulk */ | 274 | /* async schedule: control, bulk */ |
276 | struct list_head async; | 275 | struct list_head async; |
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c index 08c65c1a3771..779d07851a4d 100644 --- a/drivers/usb/serial/cypress_m8.c +++ b/drivers/usb/serial/cypress_m8.c | |||
@@ -94,6 +94,7 @@ static struct usb_device_id id_table_earthmate [] = { | |||
94 | 94 | ||
95 | static struct usb_device_id id_table_cyphidcomrs232 [] = { | 95 | static struct usb_device_id id_table_cyphidcomrs232 [] = { |
96 | { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, | 96 | { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, |
97 | { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, | ||
97 | { } /* Terminating entry */ | 98 | { } /* Terminating entry */ |
98 | }; | 99 | }; |
99 | 100 | ||
@@ -106,6 +107,7 @@ static struct usb_device_id id_table_combined [] = { | |||
106 | { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) }, | 107 | { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) }, |
107 | { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) }, | 108 | { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) }, |
108 | { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, | 109 | { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, |
110 | { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, | ||
109 | { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) }, | 111 | { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) }, |
110 | { } /* Terminating entry */ | 112 | { } /* Terminating entry */ |
111 | }; | 113 | }; |
diff --git a/drivers/usb/serial/cypress_m8.h b/drivers/usb/serial/cypress_m8.h index e1c7c27e18b7..0388065bb794 100644 --- a/drivers/usb/serial/cypress_m8.h +++ b/drivers/usb/serial/cypress_m8.h | |||
@@ -19,6 +19,10 @@ | |||
19 | #define VENDOR_ID_CYPRESS 0x04b4 | 19 | #define VENDOR_ID_CYPRESS 0x04b4 |
20 | #define PRODUCT_ID_CYPHIDCOM 0x5500 | 20 | #define PRODUCT_ID_CYPHIDCOM 0x5500 |
21 | 21 | ||
22 | /* Powercom UPS, chip CY7C63723 */ | ||
23 | #define VENDOR_ID_POWERCOM 0x0d9f | ||
24 | #define PRODUCT_ID_UPS 0x0002 | ||
25 | |||
22 | /* Nokia CA-42 USB to serial cable */ | 26 | /* Nokia CA-42 USB to serial cable */ |
23 | #define VENDOR_ID_DAZZLE 0x07d0 | 27 | #define VENDOR_ID_DAZZLE 0x07d0 |
24 | #define PRODUCT_ID_CA42 0x4101 | 28 | #define PRODUCT_ID_CA42 0x4101 |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 76db2fef4657..3abb3c863647 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -92,6 +92,7 @@ struct ftdi_sio_quirk { | |||
92 | }; | 92 | }; |
93 | 93 | ||
94 | static int ftdi_jtag_probe (struct usb_serial *serial); | 94 | static int ftdi_jtag_probe (struct usb_serial *serial); |
95 | static int ftdi_mtxorb_hack_setup (struct usb_serial *serial); | ||
95 | static void ftdi_USB_UIRT_setup (struct ftdi_private *priv); | 96 | static void ftdi_USB_UIRT_setup (struct ftdi_private *priv); |
96 | static void ftdi_HE_TIRA1_setup (struct ftdi_private *priv); | 97 | static void ftdi_HE_TIRA1_setup (struct ftdi_private *priv); |
97 | 98 | ||
@@ -99,6 +100,10 @@ static struct ftdi_sio_quirk ftdi_jtag_quirk = { | |||
99 | .probe = ftdi_jtag_probe, | 100 | .probe = ftdi_jtag_probe, |
100 | }; | 101 | }; |
101 | 102 | ||
103 | static struct ftdi_sio_quirk ftdi_mtxorb_hack_quirk = { | ||
104 | .probe = ftdi_mtxorb_hack_setup, | ||
105 | }; | ||
106 | |||
102 | static struct ftdi_sio_quirk ftdi_USB_UIRT_quirk = { | 107 | static struct ftdi_sio_quirk ftdi_USB_UIRT_quirk = { |
103 | .port_probe = ftdi_USB_UIRT_setup, | 108 | .port_probe = ftdi_USB_UIRT_setup, |
104 | }; | 109 | }; |
@@ -161,6 +166,8 @@ static struct usb_device_id id_table_combined [] = { | |||
161 | { USB_DEVICE(FTDI_VID, FTDI_MTXORB_4_PID) }, | 166 | { USB_DEVICE(FTDI_VID, FTDI_MTXORB_4_PID) }, |
162 | { USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) }, | 167 | { USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) }, |
163 | { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) }, | 168 | { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) }, |
169 | { USB_DEVICE(MTXORB_VK_VID, MTXORB_VK_PID), | ||
170 | .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk }, | ||
164 | { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) }, | 171 | { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) }, |
165 | { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) }, | 172 | { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) }, |
166 | { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) }, | 173 | { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) }, |
@@ -274,6 +281,7 @@ static struct usb_device_id id_table_combined [] = { | |||
274 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) }, | 281 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) }, |
275 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) }, | 282 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) }, |
276 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, | 283 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, |
284 | { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) }, | ||
277 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, | 285 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, |
278 | { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, | 286 | { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, |
279 | { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, | 287 | { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, |
@@ -351,6 +359,7 @@ static struct usb_device_id id_table_combined [] = { | |||
351 | { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, | 359 | { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, |
352 | { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, | 360 | { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, |
353 | { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) }, | 361 | { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) }, |
362 | { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) }, | ||
354 | { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID), | 363 | { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID), |
355 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 364 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
356 | { USB_DEVICE(FIC_VID, FIC_NEO1973_DEBUG_PID), | 365 | { USB_DEVICE(FIC_VID, FIC_NEO1973_DEBUG_PID), |
@@ -1088,6 +1097,23 @@ static int ftdi_jtag_probe(struct usb_serial *serial) | |||
1088 | return 0; | 1097 | return 0; |
1089 | } | 1098 | } |
1090 | 1099 | ||
1100 | /* | ||
1101 | * The Matrix Orbital VK204-25-USB has an invalid IN endpoint. | ||
1102 | * We have to correct it if we want to read from it. | ||
1103 | */ | ||
1104 | static int ftdi_mtxorb_hack_setup(struct usb_serial *serial) | ||
1105 | { | ||
1106 | struct usb_host_endpoint *ep = serial->dev->ep_in[1]; | ||
1107 | struct usb_endpoint_descriptor *ep_desc = &ep->desc; | ||
1108 | |||
1109 | if (ep->enabled && ep_desc->wMaxPacketSize == 0) { | ||
1110 | ep_desc->wMaxPacketSize = 0x40; | ||
1111 | info("Fixing invalid wMaxPacketSize on read pipe"); | ||
1112 | } | ||
1113 | |||
1114 | return 0; | ||
1115 | } | ||
1116 | |||
1091 | /* ftdi_shutdown is called from usbserial:usb_serial_disconnect | 1117 | /* ftdi_shutdown is called from usbserial:usb_serial_disconnect |
1092 | * it is called when the usb device is disconnected | 1118 | * it is called when the usb device is disconnected |
1093 | * | 1119 | * |
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h index 6eee2ab914ec..6da539ede0ee 100644 --- a/drivers/usb/serial/ftdi_sio.h +++ b/drivers/usb/serial/ftdi_sio.h | |||
@@ -102,6 +102,13 @@ | |||
102 | * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */ | 102 | * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */ |
103 | #define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */ | 103 | #define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */ |
104 | 104 | ||
105 | /* | ||
106 | * The following are the values for the Matrix Orbital VK204-25-USB | ||
107 | * display, which use the FT232RL. | ||
108 | */ | ||
109 | #define MTXORB_VK_VID 0x1b3d | ||
110 | #define MTXORB_VK_PID 0x0158 | ||
111 | |||
105 | /* Interbiometrics USB I/O Board */ | 112 | /* Interbiometrics USB I/O Board */ |
106 | /* Developed for Interbiometrics by Rudolf Gugler */ | 113 | /* Developed for Interbiometrics by Rudolf Gugler */ |
107 | #define INTERBIOMETRICS_VID 0x1209 | 114 | #define INTERBIOMETRICS_VID 0x1209 |
@@ -550,6 +557,9 @@ | |||
550 | #define TML_VID 0x1B91 /* Vendor ID */ | 557 | #define TML_VID 0x1B91 /* Vendor ID */ |
551 | #define TML_USB_SERIAL_PID 0x0064 /* USB - Serial Converter */ | 558 | #define TML_USB_SERIAL_PID 0x0064 /* USB - Serial Converter */ |
552 | 559 | ||
560 | /* Propox devices */ | ||
561 | #define FTDI_PROPOX_JTAGCABLEII_PID 0xD738 | ||
562 | |||
553 | /* Commands */ | 563 | /* Commands */ |
554 | #define FTDI_SIO_RESET 0 /* Reset the port */ | 564 | #define FTDI_SIO_RESET 0 /* Reset the port */ |
555 | #define FTDI_SIO_MODEM_CTRL 1 /* Set the modem control register */ | 565 | #define FTDI_SIO_MODEM_CTRL 1 /* Set the modem control register */ |
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index 97fa3c428435..7cfce9dabb90 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c | |||
@@ -323,7 +323,7 @@ static void flush_and_resubmit_read_urb (struct usb_serial_port *port) | |||
323 | room = tty_buffer_request_room(tty, urb->actual_length); | 323 | room = tty_buffer_request_room(tty, urb->actual_length); |
324 | if (room) { | 324 | if (room) { |
325 | tty_insert_flip_string(tty, urb->transfer_buffer, room); | 325 | tty_insert_flip_string(tty, urb->transfer_buffer, room); |
326 | tty_flip_buffer_push(tty); /* is this allowed from an URB callback ? */ | 326 | tty_flip_buffer_push(tty); |
327 | } | 327 | } |
328 | } | 328 | } |
329 | 329 | ||
@@ -349,10 +349,12 @@ void usb_serial_generic_read_bulk_callback (struct urb *urb) | |||
349 | 349 | ||
350 | /* Throttle the device if requested by tty */ | 350 | /* Throttle the device if requested by tty */ |
351 | spin_lock_irqsave(&port->lock, flags); | 351 | spin_lock_irqsave(&port->lock, flags); |
352 | if (!(port->throttled = port->throttle_req)) | 352 | if (!(port->throttled = port->throttle_req)) { |
353 | /* Handle data and continue reading from device */ | 353 | spin_unlock_irqrestore(&port->lock, flags); |
354 | flush_and_resubmit_read_urb(port); | 354 | flush_and_resubmit_read_urb(port); |
355 | spin_unlock_irqrestore(&port->lock, flags); | 355 | } else { |
356 | spin_unlock_irqrestore(&port->lock, flags); | ||
357 | } | ||
356 | } | 358 | } |
357 | EXPORT_SYMBOL_GPL(usb_serial_generic_read_bulk_callback); | 359 | EXPORT_SYMBOL_GPL(usb_serial_generic_read_bulk_callback); |
358 | 360 | ||
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 869ecd374cb4..aeeb9cb20999 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c | |||
@@ -110,11 +110,20 @@ | |||
110 | 110 | ||
111 | /* vendor id and device id defines */ | 111 | /* vendor id and device id defines */ |
112 | 112 | ||
113 | /* The native mos7840/7820 component */ | ||
113 | #define USB_VENDOR_ID_MOSCHIP 0x9710 | 114 | #define USB_VENDOR_ID_MOSCHIP 0x9710 |
114 | #define MOSCHIP_DEVICE_ID_7840 0x7840 | 115 | #define MOSCHIP_DEVICE_ID_7840 0x7840 |
115 | #define MOSCHIP_DEVICE_ID_7820 0x7820 | 116 | #define MOSCHIP_DEVICE_ID_7820 0x7820 |
117 | /* The native component can have its vendor/device id's overridden | ||
118 | * in vendor-specific implementations. Such devices can be handled | ||
119 | * by making a change here, in moschip_port_id_table, and in | ||
120 | * moschip_id_table_combined | ||
121 | */ | ||
122 | #define USB_VENDOR_ID_BANDB 0x0856 | ||
123 | #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 | ||
124 | #define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 | ||
116 | 125 | ||
117 | /* Interrupt Rotinue Defines */ | 126 | /* Interrupt Routine Defines */ |
118 | 127 | ||
119 | #define SERIAL_IIR_RLS 0x06 | 128 | #define SERIAL_IIR_RLS 0x06 |
120 | #define SERIAL_IIR_MS 0x00 | 129 | #define SERIAL_IIR_MS 0x00 |
@@ -159,12 +168,16 @@ | |||
159 | static struct usb_device_id moschip_port_id_table[] = { | 168 | static struct usb_device_id moschip_port_id_table[] = { |
160 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, | 169 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, |
161 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, | 170 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, |
171 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, | ||
172 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, | ||
162 | {} /* terminating entry */ | 173 | {} /* terminating entry */ |
163 | }; | 174 | }; |
164 | 175 | ||
165 | static __devinitdata struct usb_device_id moschip_id_table_combined[] = { | 176 | static __devinitdata struct usb_device_id moschip_id_table_combined[] = { |
166 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, | 177 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, |
167 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, | 178 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, |
179 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, | ||
180 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, | ||
168 | {} /* terminating entry */ | 181 | {} /* terminating entry */ |
169 | }; | 182 | }; |
170 | 183 | ||
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index af2674c57414..a396fbbdc9c2 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -111,6 +111,42 @@ static int option_send_setup(struct usb_serial_port *port); | |||
111 | #define HUAWEI_PRODUCT_E220BIS 0x1004 | 111 | #define HUAWEI_PRODUCT_E220BIS 0x1004 |
112 | 112 | ||
113 | #define NOVATELWIRELESS_VENDOR_ID 0x1410 | 113 | #define NOVATELWIRELESS_VENDOR_ID 0x1410 |
114 | |||
115 | /* MERLIN EVDO PRODUCTS */ | ||
116 | #define NOVATELWIRELESS_PRODUCT_V640 0x1100 | ||
117 | #define NOVATELWIRELESS_PRODUCT_V620 0x1110 | ||
118 | #define NOVATELWIRELESS_PRODUCT_V740 0x1120 | ||
119 | #define NOVATELWIRELESS_PRODUCT_V720 0x1130 | ||
120 | |||
121 | /* MERLIN HSDPA/HSPA PRODUCTS */ | ||
122 | #define NOVATELWIRELESS_PRODUCT_U730 0x1400 | ||
123 | #define NOVATELWIRELESS_PRODUCT_U740 0x1410 | ||
124 | #define NOVATELWIRELESS_PRODUCT_U870 0x1420 | ||
125 | #define NOVATELWIRELESS_PRODUCT_XU870 0x1430 | ||
126 | #define NOVATELWIRELESS_PRODUCT_X950D 0x1450 | ||
127 | |||
128 | /* EXPEDITE PRODUCTS */ | ||
129 | #define NOVATELWIRELESS_PRODUCT_EV620 0x2100 | ||
130 | #define NOVATELWIRELESS_PRODUCT_ES720 0x2110 | ||
131 | #define NOVATELWIRELESS_PRODUCT_E725 0x2120 | ||
132 | #define NOVATELWIRELESS_PRODUCT_EU730 0x2400 | ||
133 | #define NOVATELWIRELESS_PRODUCT_EU740 0x2410 | ||
134 | #define NOVATELWIRELESS_PRODUCT_EU870D 0x2420 | ||
135 | |||
136 | /* OVATION PRODUCTS */ | ||
137 | #define NOVATELWIRELESS_PRODUCT_MC727 0x4100 | ||
138 | #define NOVATELWIRELESS_PRODUCT_MC950D 0x4400 | ||
139 | |||
140 | /* FUTURE NOVATEL PRODUCTS */ | ||
141 | #define NOVATELWIRELESS_PRODUCT_EVDO_1 0x6000 | ||
142 | #define NOVATELWIRELESS_PRODUCT_HSPA_1 0x7000 | ||
143 | #define NOVATELWIRELESS_PRODUCT_EMBEDDED_1 0x8000 | ||
144 | #define NOVATELWIRELESS_PRODUCT_GLOBAL_1 0x9000 | ||
145 | #define NOVATELWIRELESS_PRODUCT_EVDO_2 0x6001 | ||
146 | #define NOVATELWIRELESS_PRODUCT_HSPA_2 0x7001 | ||
147 | #define NOVATELWIRELESS_PRODUCT_EMBEDDED_2 0x8001 | ||
148 | #define NOVATELWIRELESS_PRODUCT_GLOBAL_2 0x9001 | ||
149 | |||
114 | #define DELL_VENDOR_ID 0x413C | 150 | #define DELL_VENDOR_ID 0x413C |
115 | 151 | ||
116 | #define KYOCERA_VENDOR_ID 0x0c88 | 152 | #define KYOCERA_VENDOR_ID 0x0c88 |
@@ -120,6 +156,9 @@ static int option_send_setup(struct usb_serial_port *port); | |||
120 | #define ANYDATA_PRODUCT_ADU_E100A 0x6501 | 156 | #define ANYDATA_PRODUCT_ADU_E100A 0x6501 |
121 | #define ANYDATA_PRODUCT_ADU_500A 0x6502 | 157 | #define ANYDATA_PRODUCT_ADU_500A 0x6502 |
122 | 158 | ||
159 | #define AXESSTEL_VENDOR_ID 0x1726 | ||
160 | #define AXESSTEL_PRODUCT_MV110H 0x1000 | ||
161 | |||
123 | #define BANDRICH_VENDOR_ID 0x1A8D | 162 | #define BANDRICH_VENDOR_ID 0x1A8D |
124 | #define BANDRICH_PRODUCT_C100_1 0x1002 | 163 | #define BANDRICH_PRODUCT_C100_1 0x1002 |
125 | #define BANDRICH_PRODUCT_C100_2 0x1003 | 164 | #define BANDRICH_PRODUCT_C100_2 0x1003 |
@@ -165,21 +204,34 @@ static struct usb_device_id option_ids[] = { | |||
165 | { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600) }, | 204 | { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600) }, |
166 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) }, | 205 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) }, |
167 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) }, | 206 | { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) }, |
168 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x1100) }, /* Novatel Merlin XS620/S640 */ | 207 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */ |
169 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x1110) }, /* Novatel Merlin S620 */ | 208 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */ |
170 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x1120) }, /* Novatel Merlin EX720 */ | 209 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, /* Novatel Merlin EX720/V740/X720 */ |
171 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x1130) }, /* Novatel Merlin S720 */ | 210 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V720) }, /* Novatel Merlin V720/S720/PC720 */ |
172 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x1400) }, /* Novatel U730 */ | 211 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U730) }, /* Novatel U730/U740 (VF version) */ |
173 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x1410) }, /* Novatel U740 */ | 212 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U740) }, /* Novatel U740 */ |
174 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x1420) }, /* Novatel EU870 */ | 213 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U870) }, /* Novatel U870 */ |
175 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x1430) }, /* Novatel Merlin XU870 HSDPA/3G */ | 214 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_XU870) }, /* Novatel Merlin XU870 HSDPA/3G */ |
176 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x2100) }, /* Novatel EV620 CDMA/EV-DO */ | 215 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_X950D) }, /* Novatel X950D */ |
177 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x2110) }, /* Novatel Merlin ES620 / Merlin ES720 / Ovation U720 */ | 216 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EV620) }, /* Novatel EV620/ES620 CDMA/EV-DO */ |
217 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_ES720) }, /* Novatel ES620/ES720/U720/USB720 */ | ||
218 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E725) }, /* Novatel E725/E726 */ | ||
178 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x2130) }, /* Novatel Merlin ES620 SM Bus */ | 219 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x2130) }, /* Novatel Merlin ES620 SM Bus */ |
179 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x2410) }, /* Novatel EU740 */ | 220 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU730) }, /* Novatel EU730 and Vodafone EU740 */ |
180 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x4100) }, /* Novatel U727 */ | 221 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU740) }, /* Novatel non-Vodafone EU740 */ |
181 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x4400) }, /* Novatel MC950 */ | 222 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EU870D) }, /* Novatel EU850D/EU860D/EU870D */ |
223 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC950D) }, /* Novatel MC930D/MC950D */ | ||
224 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC727) }, /* Novatel MC727/U727/USB727 */ | ||
182 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x5010) }, /* Novatel U727 */ | 225 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, 0x5010) }, /* Novatel U727 */ |
226 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_1) }, /* Novatel EVDO product */ | ||
227 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_1) }, /* Novatel HSPA product */ | ||
228 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EMBEDDED_1) }, /* Novatel Embedded product */ | ||
229 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL_1) }, /* Novatel Global product */ | ||
230 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_2) }, /* Novatel EVDO product */ | ||
231 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_2) }, /* Novatel HSPA product */ | ||
232 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EMBEDDED_2) }, /* Novatel Embedded product */ | ||
233 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_GLOBAL_2) }, /* Novatel Global product */ | ||
234 | |||
183 | { USB_DEVICE(DELL_VENDOR_ID, 0x8114) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */ | 235 | { USB_DEVICE(DELL_VENDOR_ID, 0x8114) }, /* Dell Wireless 5700 Mobile Broadband CDMA/EVDO Mini-Card == Novatel Expedite EV620 CDMA/EV-DO */ |
184 | { USB_DEVICE(DELL_VENDOR_ID, 0x8115) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ | 236 | { USB_DEVICE(DELL_VENDOR_ID, 0x8115) }, /* Dell Wireless 5500 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ |
185 | { USB_DEVICE(DELL_VENDOR_ID, 0x8116) }, /* Dell Wireless 5505 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ | 237 | { USB_DEVICE(DELL_VENDOR_ID, 0x8116) }, /* Dell Wireless 5505 Mobile Broadband HSDPA Mini-Card == Novatel Expedite EU740 HSDPA/3G */ |
@@ -192,6 +244,7 @@ static struct usb_device_id option_ids[] = { | |||
192 | { USB_DEVICE(DELL_VENDOR_ID, 0x8137) }, /* Dell Wireless HSDPA 5520 */ | 244 | { USB_DEVICE(DELL_VENDOR_ID, 0x8137) }, /* Dell Wireless HSDPA 5520 */ |
193 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, | 245 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, |
194 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, | 246 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, |
247 | { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) }, | ||
195 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, | 248 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, |
196 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, | 249 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, |
197 | { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, | 250 | { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, |
diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c index 958f5b17847c..b9b8ede61fb3 100644 --- a/drivers/usb/storage/protocol.c +++ b/drivers/usb/storage/protocol.c | |||
@@ -170,7 +170,6 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer, | |||
170 | 170 | ||
171 | if (!sg) | 171 | if (!sg) |
172 | sg = scsi_sglist(srb); | 172 | sg = scsi_sglist(srb); |
173 | buflen = min(buflen, scsi_bufflen(srb)); | ||
174 | 173 | ||
175 | /* This loop handles a single s-g list entry, which may | 174 | /* This loop handles a single s-g list entry, which may |
176 | * include multiple pages. Find the initial page structure | 175 | * include multiple pages. Find the initial page structure |
@@ -232,6 +231,7 @@ void usb_stor_set_xfer_buf(unsigned char *buffer, | |||
232 | unsigned int offset = 0; | 231 | unsigned int offset = 0; |
233 | struct scatterlist *sg = NULL; | 232 | struct scatterlist *sg = NULL; |
234 | 233 | ||
234 | buflen = min(buflen, scsi_bufflen(srb)); | ||
235 | buflen = usb_stor_access_xfer_buf(buffer, buflen, srb, &sg, &offset, | 235 | buflen = usb_stor_access_xfer_buf(buffer, buflen, srb, &sg, &offset, |
236 | TO_XFER_BUF); | 236 | TO_XFER_BUF); |
237 | if (buflen < scsi_bufflen(srb)) | 237 | if (buflen < scsi_bufflen(srb)) |
diff --git a/drivers/usb/storage/sddr55.c b/drivers/usb/storage/sddr55.c index d43a3415e12f..6d14327c921d 100644 --- a/drivers/usb/storage/sddr55.c +++ b/drivers/usb/storage/sddr55.c | |||
@@ -522,8 +522,8 @@ int sddr55_reset(struct us_data *us) { | |||
522 | 522 | ||
523 | static unsigned long sddr55_get_capacity(struct us_data *us) { | 523 | static unsigned long sddr55_get_capacity(struct us_data *us) { |
524 | 524 | ||
525 | unsigned char manufacturerID; | 525 | unsigned char uninitialized_var(manufacturerID); |
526 | unsigned char deviceID; | 526 | unsigned char uninitialized_var(deviceID); |
527 | int result; | 527 | int result; |
528 | struct sddr55_card_info *info = (struct sddr55_card_info *)us->extra; | 528 | struct sddr55_card_info *info = (struct sddr55_card_info *)us->extra; |
529 | 529 | ||
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 758435f8a6f8..e0b0580705e4 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig | |||
@@ -553,6 +553,19 @@ config FB_BF54X_LQ043 | |||
553 | help | 553 | help |
554 | This is the framebuffer device driver for a SHARP LQ043T1DG01 TFT LCD | 554 | This is the framebuffer device driver for a SHARP LQ043T1DG01 TFT LCD |
555 | 555 | ||
556 | config FB_BFIN_T350MCQB | ||
557 | tristate "Varitronix COG-T350MCQB TFT LCD display (BF527 EZKIT)" | ||
558 | depends on FB && BLACKFIN | ||
559 | select BFIN_GPTIMERS | ||
560 | select FB_CFB_FILLRECT | ||
561 | select FB_CFB_COPYAREA | ||
562 | select FB_CFB_IMAGEBLIT | ||
563 | help | ||
564 | This is the framebuffer device driver for a Varitronix VL-PS-COG-T350MCQB-01 display TFT LCD | ||
565 | This display is a QVGA 320x240 24-bit RGB display interfaced by an 8-bit wide PPI | ||
566 | It uses PPI[0..7] PPI_FS1, PPI_FS2 and PPI_CLK. | ||
567 | |||
568 | |||
556 | config FB_STI | 569 | config FB_STI |
557 | tristate "HP STI frame buffer device support" | 570 | tristate "HP STI frame buffer device support" |
558 | depends on FB && PARISC | 571 | depends on FB && PARISC |
diff --git a/drivers/video/Makefile b/drivers/video/Makefile index 83e02b3429b6..03371c789039 100644 --- a/drivers/video/Makefile +++ b/drivers/video/Makefile | |||
@@ -122,6 +122,7 @@ obj-$(CONFIG_FB_EFI) += efifb.o | |||
122 | obj-$(CONFIG_FB_VGA16) += vga16fb.o | 122 | obj-$(CONFIG_FB_VGA16) += vga16fb.o |
123 | obj-$(CONFIG_FB_OF) += offb.o | 123 | obj-$(CONFIG_FB_OF) += offb.o |
124 | obj-$(CONFIG_FB_BF54X_LQ043) += bf54x-lq043fb.o | 124 | obj-$(CONFIG_FB_BF54X_LQ043) += bf54x-lq043fb.o |
125 | obj-$(CONFIG_FB_BFIN_T350MCQB) += bfin-t350mcqb-fb.o | ||
125 | 126 | ||
126 | # the test framebuffer is last | 127 | # the test framebuffer is last |
127 | obj-$(CONFIG_FB_VIRTUAL) += vfb.o | 128 | obj-$(CONFIG_FB_VIRTUAL) += vfb.o |
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c index 0ce791e6f79c..986a550c0439 100644 --- a/drivers/video/bf54x-lq043fb.c +++ b/drivers/video/bf54x-lq043fb.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * | 8 | * |
9 | * | 9 | * |
10 | * Modified: | 10 | * Modified: |
11 | * Copyright 2004-2007 Analog Devices Inc. | 11 | * Copyright 2007-2008 Analog Devices Inc. |
12 | * | 12 | * |
13 | * Bugs: Enter bugs at http://blackfin.uclinux.org/ | 13 | * Bugs: Enter bugs at http://blackfin.uclinux.org/ |
14 | * | 14 | * |
@@ -241,7 +241,7 @@ static int request_ports(struct bfin_bf54xfb_info *fbi) | |||
241 | u16 eppi_req_18[] = EPPI0_18; | 241 | u16 eppi_req_18[] = EPPI0_18; |
242 | u16 disp = fbi->mach_info->disp; | 242 | u16 disp = fbi->mach_info->disp; |
243 | 243 | ||
244 | if (gpio_request(disp, NULL)) { | 244 | if (gpio_request(disp, DRIVER_NAME)) { |
245 | printk(KERN_ERR "Requesting GPIO %d faild\n", disp); | 245 | printk(KERN_ERR "Requesting GPIO %d faild\n", disp); |
246 | return -EFAULT; | 246 | return -EFAULT; |
247 | } | 247 | } |
@@ -672,7 +672,7 @@ static int __init bfin_bf54x_probe(struct platform_device *pdev) | |||
672 | &bfin_lq043fb_bl_ops); | 672 | &bfin_lq043fb_bl_ops); |
673 | bl_dev->props.max_brightness = 255; | 673 | bl_dev->props.max_brightness = 255; |
674 | 674 | ||
675 | lcd_dev = lcd_device_register(DRIVER_NAME, NULL, &bfin_lcd_ops); | 675 | lcd_dev = lcd_device_register(DRIVER_NAME, &pdev->dev, NULL, &bfin_lcd_ops); |
676 | lcd_dev->props.max_contrast = 255, printk(KERN_INFO "Done.\n"); | 676 | lcd_dev->props.max_contrast = 255, printk(KERN_INFO "Done.\n"); |
677 | #endif | 677 | #endif |
678 | 678 | ||
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c new file mode 100644 index 000000000000..a2bb2de9e020 --- /dev/null +++ b/drivers/video/bfin-t350mcqb-fb.c | |||
@@ -0,0 +1,685 @@ | |||
1 | /* | ||
2 | * File: drivers/video/bfin-t350mcqb-fb.c | ||
3 | * Based on: | ||
4 | * Author: Michael Hennerich <hennerich@blackfin.uclinux.org> | ||
5 | * | ||
6 | * Created: | ||
7 | * Description: Blackfin LCD Framebufer driver | ||
8 | * | ||
9 | * | ||
10 | * Modified: | ||
11 | * Copyright 2004-2007 Analog Devices Inc. | ||
12 | * | ||
13 | * Bugs: Enter bugs at http://blackfin.uclinux.org/ | ||
14 | * | ||
15 | * This program is free software; you can redistribute it and/or modify | ||
16 | * it under the terms of the GNU General Public License as published by | ||
17 | * the Free Software Foundation; either version 2 of the License, or | ||
18 | * (at your option) any later version. | ||
19 | * | ||
20 | * This program is distributed in the hope that it will be useful, | ||
21 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
22 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
23 | * GNU General Public License for more details. | ||
24 | * | ||
25 | * You should have received a copy of the GNU General Public License | ||
26 | * along with this program; if not, see the file COPYING, or write | ||
27 | * to the Free Software Foundation, Inc., | ||
28 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
29 | */ | ||
30 | |||
31 | #include <linux/module.h> | ||
32 | #include <linux/kernel.h> | ||
33 | #include <linux/errno.h> | ||
34 | #include <linux/string.h> | ||
35 | #include <linux/fb.h> | ||
36 | #include <linux/init.h> | ||
37 | #include <linux/types.h> | ||
38 | #include <linux/interrupt.h> | ||
39 | #include <linux/device.h> | ||
40 | #include <linux/backlight.h> | ||
41 | #include <linux/lcd.h> | ||
42 | #include <linux/dma-mapping.h> | ||
43 | #include <linux/platform_device.h> | ||
44 | |||
45 | #include <asm/blackfin.h> | ||
46 | #include <asm/irq.h> | ||
47 | #include <asm/dma-mapping.h> | ||
48 | #include <asm/dma.h> | ||
49 | #include <asm/portmux.h> | ||
50 | #include <asm/gptimers.h> | ||
51 | |||
52 | #define NO_BL_SUPPORT | ||
53 | |||
54 | #define LCD_X_RES 320 /* Horizontal Resolution */ | ||
55 | #define LCD_Y_RES 240 /* Vertical Resolution */ | ||
56 | #define LCD_BPP 24 /* Bit Per Pixel */ | ||
57 | |||
58 | #define DMA_BUS_SIZE 16 | ||
59 | #define LCD_CLK (12*1000*1000) /* 12MHz */ | ||
60 | |||
61 | #define CLOCKS_PER_PIX 3 | ||
62 | |||
63 | /* | ||
64 | * HS and VS timing parameters (all in number of PPI clk ticks) | ||
65 | */ | ||
66 | |||
67 | #define U_LINE 1 /* Blanking Lines */ | ||
68 | |||
69 | #define H_ACTPIX (LCD_X_RES * CLOCKS_PER_PIX) /* active horizontal pixel */ | ||
70 | #define H_PERIOD (408 * CLOCKS_PER_PIX) /* HS period */ | ||
71 | #define H_PULSE 90 /* HS pulse width */ | ||
72 | #define H_START 204 /* first valid pixel */ | ||
73 | |||
74 | #define V_LINES (LCD_Y_RES + U_LINE) /* total vertical lines */ | ||
75 | #define V_PULSE (3 * H_PERIOD) /* VS pulse width (1-5 H_PERIODs) */ | ||
76 | #define V_PERIOD (H_PERIOD * V_LINES) /* VS period */ | ||
77 | |||
78 | #define ACTIVE_VIDEO_MEM_OFFSET (U_LINE * H_ACTPIX) | ||
79 | |||
80 | #define BFIN_LCD_NBR_PALETTE_ENTRIES 256 | ||
81 | |||
82 | #define DRIVER_NAME "bfin-t350mcqb" | ||
83 | static char driver_name[] = DRIVER_NAME; | ||
84 | |||
85 | struct bfin_t350mcqbfb_info { | ||
86 | struct fb_info *fb; | ||
87 | struct device *dev; | ||
88 | unsigned char *fb_buffer; /* RGB Buffer */ | ||
89 | dma_addr_t dma_handle; | ||
90 | int lq043_mmap; | ||
91 | int lq043_open_cnt; | ||
92 | int irq; | ||
93 | spinlock_t lock; /* lock */ | ||
94 | }; | ||
95 | |||
96 | static int nocursor; | ||
97 | module_param(nocursor, int, 0644); | ||
98 | MODULE_PARM_DESC(nocursor, "cursor enable/disable"); | ||
99 | |||
100 | #define PPI_TX_MODE 0x2 | ||
101 | #define PPI_XFER_TYPE_11 0xC | ||
102 | #define PPI_PORT_CFG_01 0x10 | ||
103 | #define PPI_PACK_EN 0x80 | ||
104 | #define PPI_POLS_1 0x8000 | ||
105 | |||
106 | static void bfin_t350mcqb_config_ppi(struct bfin_t350mcqbfb_info *fbi) | ||
107 | { | ||
108 | bfin_write_PPI_DELAY(H_START); | ||
109 | bfin_write_PPI_COUNT(H_ACTPIX-1); | ||
110 | bfin_write_PPI_FRAME(V_LINES); | ||
111 | |||
112 | bfin_write_PPI_CONTROL(PPI_TX_MODE | /* output mode , PORT_DIR */ | ||
113 | PPI_XFER_TYPE_11 | /* sync mode XFR_TYPE */ | ||
114 | PPI_PORT_CFG_01 | /* two frame sync PORT_CFG */ | ||
115 | PPI_PACK_EN | /* packing enabled PACK_EN */ | ||
116 | PPI_POLS_1); /* faling edge syncs POLS */ | ||
117 | } | ||
118 | |||
119 | static inline void bfin_t350mcqb_disable_ppi(void) | ||
120 | { | ||
121 | bfin_write_PPI_CONTROL(bfin_read_PPI_CONTROL() & ~PORT_EN); | ||
122 | } | ||
123 | |||
124 | static inline void bfin_t350mcqb_enable_ppi(void) | ||
125 | { | ||
126 | bfin_write_PPI_CONTROL(bfin_read_PPI_CONTROL() | PORT_EN); | ||
127 | } | ||
128 | |||
129 | static void bfin_t350mcqb_start_timers(void) | ||
130 | { | ||
131 | unsigned long flags; | ||
132 | |||
133 | local_irq_save(flags); | ||
134 | enable_gptimers(TIMER1bit); | ||
135 | enable_gptimers(TIMER0bit); | ||
136 | local_irq_restore(flags); | ||
137 | } | ||
138 | |||
139 | static void bfin_t350mcqb_stop_timers(void) | ||
140 | { | ||
141 | disable_gptimers(TIMER0bit | TIMER1bit); | ||
142 | |||
143 | set_gptimer_status(0, TIMER_STATUS_TRUN0 | TIMER_STATUS_TRUN1 | | ||
144 | TIMER_STATUS_TIMIL0 | TIMER_STATUS_TIMIL1 | | ||
145 | TIMER_STATUS_TOVF0 | TIMER_STATUS_TOVF1); | ||
146 | |||
147 | } | ||
148 | |||
149 | static void bfin_t350mcqb_init_timers(void) | ||
150 | { | ||
151 | |||
152 | bfin_t350mcqb_stop_timers(); | ||
153 | |||
154 | set_gptimer_period(TIMER0_id, H_PERIOD); | ||
155 | set_gptimer_pwidth(TIMER0_id, H_PULSE); | ||
156 | set_gptimer_config(TIMER0_id, TIMER_MODE_PWM | TIMER_PERIOD_CNT | | ||
157 | TIMER_TIN_SEL | TIMER_CLK_SEL| | ||
158 | TIMER_EMU_RUN); | ||
159 | |||
160 | set_gptimer_period(TIMER1_id, V_PERIOD); | ||
161 | set_gptimer_pwidth(TIMER1_id, V_PULSE); | ||
162 | set_gptimer_config(TIMER1_id, TIMER_MODE_PWM | TIMER_PERIOD_CNT | | ||
163 | TIMER_TIN_SEL | TIMER_CLK_SEL | | ||
164 | TIMER_EMU_RUN); | ||
165 | |||
166 | } | ||
167 | |||
168 | static void bfin_t350mcqb_config_dma(struct bfin_t350mcqbfb_info *fbi) | ||
169 | { | ||
170 | |||
171 | set_dma_config(CH_PPI, | ||
172 | set_bfin_dma_config(DIR_READ, DMA_FLOW_AUTO, | ||
173 | INTR_DISABLE, DIMENSION_2D, | ||
174 | DATA_SIZE_16, | ||
175 | DMA_NOSYNC_KEEP_DMA_BUF)); | ||
176 | set_dma_x_count(CH_PPI, (LCD_X_RES * LCD_BPP) / DMA_BUS_SIZE); | ||
177 | set_dma_x_modify(CH_PPI, DMA_BUS_SIZE / 8); | ||
178 | set_dma_y_count(CH_PPI, V_LINES); | ||
179 | |||
180 | set_dma_y_modify(CH_PPI, DMA_BUS_SIZE / 8); | ||
181 | set_dma_start_addr(CH_PPI, (unsigned long)fbi->fb_buffer); | ||
182 | |||
183 | } | ||
184 | |||
185 | static int bfin_t350mcqb_request_ports(int action) | ||
186 | { | ||
187 | u16 ppi0_req_8[] = {P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2, | ||
188 | P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, | ||
189 | P_PPI0_D3, P_PPI0_D4, P_PPI0_D5, | ||
190 | P_PPI0_D6, P_PPI0_D7, 0}; | ||
191 | |||
192 | if (action) { | ||
193 | if (peripheral_request_list(ppi0_req_8, DRIVER_NAME)) { | ||
194 | printk(KERN_ERR "Requesting Peripherals faild\n"); | ||
195 | return -EFAULT; | ||
196 | } | ||
197 | } else | ||
198 | peripheral_free_list(ppi0_req_8); | ||
199 | |||
200 | return 0; | ||
201 | } | ||
202 | |||
203 | static int bfin_t350mcqb_fb_open(struct fb_info *info, int user) | ||
204 | { | ||
205 | struct bfin_t350mcqbfb_info *fbi = info->par; | ||
206 | |||
207 | spin_lock(&fbi->lock); | ||
208 | fbi->lq043_open_cnt++; | ||
209 | |||
210 | if (fbi->lq043_open_cnt <= 1) { | ||
211 | |||
212 | bfin_t350mcqb_disable_ppi(); | ||
213 | SSYNC(); | ||
214 | |||
215 | bfin_t350mcqb_config_dma(fbi); | ||
216 | bfin_t350mcqb_config_ppi(fbi); | ||
217 | bfin_t350mcqb_init_timers(); | ||
218 | |||
219 | /* start dma */ | ||
220 | enable_dma(CH_PPI); | ||
221 | bfin_t350mcqb_enable_ppi(); | ||
222 | bfin_t350mcqb_start_timers(); | ||
223 | } | ||
224 | |||
225 | spin_unlock(&fbi->lock); | ||
226 | |||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | static int bfin_t350mcqb_fb_release(struct fb_info *info, int user) | ||
231 | { | ||
232 | struct bfin_t350mcqbfb_info *fbi = info->par; | ||
233 | |||
234 | spin_lock(&fbi->lock); | ||
235 | |||
236 | fbi->lq043_open_cnt--; | ||
237 | fbi->lq043_mmap = 0; | ||
238 | |||
239 | if (fbi->lq043_open_cnt <= 0) { | ||
240 | bfin_t350mcqb_disable_ppi(); | ||
241 | SSYNC(); | ||
242 | disable_dma(CH_PPI); | ||
243 | bfin_t350mcqb_stop_timers(); | ||
244 | memset(fbi->fb_buffer, 0, info->fix.smem_len); | ||
245 | } | ||
246 | |||
247 | spin_unlock(&fbi->lock); | ||
248 | |||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | static int bfin_t350mcqb_fb_check_var(struct fb_var_screeninfo *var, | ||
253 | struct fb_info *info) | ||
254 | { | ||
255 | |||
256 | if (var->bits_per_pixel != LCD_BPP) { | ||
257 | pr_debug("%s: depth not supported: %u BPP\n", __FUNCTION__, | ||
258 | var->bits_per_pixel); | ||
259 | return -EINVAL; | ||
260 | } | ||
261 | |||
262 | if (info->var.xres != var->xres || info->var.yres != var->yres || | ||
263 | info->var.xres_virtual != var->xres_virtual || | ||
264 | info->var.yres_virtual != var->yres_virtual) { | ||
265 | pr_debug("%s: Resolution not supported: X%u x Y%u \n", | ||
266 | __FUNCTION__, var->xres, var->yres); | ||
267 | return -EINVAL; | ||
268 | } | ||
269 | |||
270 | /* | ||
271 | * Memory limit | ||
272 | */ | ||
273 | |||
274 | if ((info->fix.line_length * var->yres_virtual) > info->fix.smem_len) { | ||
275 | pr_debug("%s: Memory Limit requested yres_virtual = %u\n", | ||
276 | __FUNCTION__, var->yres_virtual); | ||
277 | return -ENOMEM; | ||
278 | } | ||
279 | |||
280 | return 0; | ||
281 | } | ||
282 | |||
283 | static int bfin_t350mcqb_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) | ||
284 | { | ||
285 | struct bfin_t350mcqbfb_info *fbi = info->par; | ||
286 | |||
287 | if (fbi->lq043_mmap) | ||
288 | return -1; | ||
289 | |||
290 | spin_lock(&fbi->lock); | ||
291 | fbi->lq043_mmap = 1; | ||
292 | spin_unlock(&fbi->lock); | ||
293 | |||
294 | vma->vm_start = (unsigned long)(fbi->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET); | ||
295 | |||
296 | vma->vm_end = vma->vm_start + info->fix.smem_len; | ||
297 | /* For those who don't understand how mmap works, go read | ||
298 | * Documentation/nommu-mmap.txt. | ||
299 | * For those that do, you will know that the VM_MAYSHARE flag | ||
300 | * must be set in the vma->vm_flags structure on noMMU | ||
301 | * Other flags can be set, and are documented in | ||
302 | * include/linux/mm.h | ||
303 | */ | ||
304 | vma->vm_flags |= VM_MAYSHARE; | ||
305 | |||
306 | return 0; | ||
307 | } | ||
308 | |||
309 | int bfin_t350mcqb_fb_cursor(struct fb_info *info, struct fb_cursor *cursor) | ||
310 | { | ||
311 | if (nocursor) | ||
312 | return 0; | ||
313 | else | ||
314 | return -EINVAL; /* just to force soft_cursor() call */ | ||
315 | } | ||
316 | |||
317 | static int bfin_t350mcqb_fb_setcolreg(u_int regno, u_int red, u_int green, | ||
318 | u_int blue, u_int transp, | ||
319 | struct fb_info *info) | ||
320 | { | ||
321 | if (regno >= BFIN_LCD_NBR_PALETTE_ENTRIES) | ||
322 | return -EINVAL; | ||
323 | |||
324 | if (info->var.grayscale) { | ||
325 | /* grayscale = 0.30*R + 0.59*G + 0.11*B */ | ||
326 | red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8; | ||
327 | } | ||
328 | |||
329 | if (info->fix.visual == FB_VISUAL_TRUECOLOR) { | ||
330 | |||
331 | u32 value; | ||
332 | /* Place color in the pseudopalette */ | ||
333 | if (regno > 16) | ||
334 | return -EINVAL; | ||
335 | |||
336 | red >>= (16 - info->var.red.length); | ||
337 | green >>= (16 - info->var.green.length); | ||
338 | blue >>= (16 - info->var.blue.length); | ||
339 | |||
340 | value = (red << info->var.red.offset) | | ||
341 | (green << info->var.green.offset) | | ||
342 | (blue << info->var.blue.offset); | ||
343 | value &= 0xFFFFFF; | ||
344 | |||
345 | ((u32 *) (info->pseudo_palette))[regno] = value; | ||
346 | |||
347 | } | ||
348 | |||
349 | return 0; | ||
350 | } | ||
351 | |||
352 | static struct fb_ops bfin_t350mcqb_fb_ops = { | ||
353 | .owner = THIS_MODULE, | ||
354 | .fb_open = bfin_t350mcqb_fb_open, | ||
355 | .fb_release = bfin_t350mcqb_fb_release, | ||
356 | .fb_check_var = bfin_t350mcqb_fb_check_var, | ||
357 | .fb_fillrect = cfb_fillrect, | ||
358 | .fb_copyarea = cfb_copyarea, | ||
359 | .fb_imageblit = cfb_imageblit, | ||
360 | .fb_mmap = bfin_t350mcqb_fb_mmap, | ||
361 | .fb_cursor = bfin_t350mcqb_fb_cursor, | ||
362 | .fb_setcolreg = bfin_t350mcqb_fb_setcolreg, | ||
363 | }; | ||
364 | |||
365 | #ifndef NO_BL_SUPPORT | ||
366 | static int bl_get_brightness(struct backlight_device *bd) | ||
367 | { | ||
368 | return 0; | ||
369 | } | ||
370 | |||
371 | static struct backlight_ops bfin_lq043fb_bl_ops = { | ||
372 | .get_brightness = bl_get_brightness, | ||
373 | }; | ||
374 | |||
375 | static struct backlight_device *bl_dev; | ||
376 | |||
377 | static int bfin_lcd_get_power(struct lcd_device *dev) | ||
378 | { | ||
379 | return 0; | ||
380 | } | ||
381 | |||
382 | static int bfin_lcd_set_power(struct lcd_device *dev, int power) | ||
383 | { | ||
384 | return 0; | ||
385 | } | ||
386 | |||
387 | static int bfin_lcd_get_contrast(struct lcd_device *dev) | ||
388 | { | ||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | static int bfin_lcd_set_contrast(struct lcd_device *dev, int contrast) | ||
393 | { | ||
394 | |||
395 | return 0; | ||
396 | } | ||
397 | |||
398 | static int bfin_lcd_check_fb(struct fb_info *fi) | ||
399 | { | ||
400 | if (!fi || (fi == &bfin_t350mcqb_fb)) | ||
401 | return 1; | ||
402 | return 0; | ||
403 | } | ||
404 | |||
405 | static struct lcd_ops bfin_lcd_ops = { | ||
406 | .get_power = bfin_lcd_get_power, | ||
407 | .set_power = bfin_lcd_set_power, | ||
408 | .get_contrast = bfin_lcd_get_contrast, | ||
409 | .set_contrast = bfin_lcd_set_contrast, | ||
410 | .check_fb = bfin_lcd_check_fb, | ||
411 | }; | ||
412 | |||
413 | static struct lcd_device *lcd_dev; | ||
414 | #endif | ||
415 | |||
416 | static irqreturn_t bfin_t350mcqb_irq_error(int irq, void *dev_id) | ||
417 | { | ||
418 | /*struct bfin_t350mcqbfb_info *info = (struct bfin_t350mcqbfb_info *)dev_id;*/ | ||
419 | |||
420 | u16 status = bfin_read_PPI_STATUS(); | ||
421 | bfin_write_PPI_STATUS(0xFFFF); | ||
422 | |||
423 | if (status) { | ||
424 | bfin_t350mcqb_disable_ppi(); | ||
425 | disable_dma(CH_PPI); | ||
426 | |||
427 | /* start dma */ | ||
428 | enable_dma(CH_PPI); | ||
429 | bfin_t350mcqb_enable_ppi(); | ||
430 | bfin_write_PPI_STATUS(0xFFFF); | ||
431 | } | ||
432 | |||
433 | return IRQ_HANDLED; | ||
434 | } | ||
435 | |||
436 | static int __init bfin_t350mcqb_probe(struct platform_device *pdev) | ||
437 | { | ||
438 | struct bfin_t350mcqbfb_info *info; | ||
439 | struct fb_info *fbinfo; | ||
440 | int ret; | ||
441 | |||
442 | printk(KERN_INFO DRIVER_NAME ": %dx%d %d-bit RGB FrameBuffer initializing...\n", | ||
443 | LCD_X_RES, LCD_Y_RES, LCD_BPP); | ||
444 | |||
445 | if (request_dma(CH_PPI, "CH_PPI") < 0) { | ||
446 | printk(KERN_ERR DRIVER_NAME | ||
447 | ": couldn't request CH_PPI DMA\n"); | ||
448 | ret = -EFAULT; | ||
449 | goto out1; | ||
450 | } | ||
451 | |||
452 | fbinfo = | ||
453 | framebuffer_alloc(sizeof(struct bfin_t350mcqbfb_info), &pdev->dev); | ||
454 | if (!fbinfo) { | ||
455 | ret = -ENOMEM; | ||
456 | goto out2; | ||
457 | } | ||
458 | |||
459 | info = fbinfo->par; | ||
460 | info->fb = fbinfo; | ||
461 | info->dev = &pdev->dev; | ||
462 | |||
463 | platform_set_drvdata(pdev, fbinfo); | ||
464 | |||
465 | strcpy(fbinfo->fix.id, driver_name); | ||
466 | |||
467 | fbinfo->fix.type = FB_TYPE_PACKED_PIXELS; | ||
468 | fbinfo->fix.type_aux = 0; | ||
469 | fbinfo->fix.xpanstep = 0; | ||
470 | fbinfo->fix.ypanstep = 0; | ||
471 | fbinfo->fix.ywrapstep = 0; | ||
472 | fbinfo->fix.accel = FB_ACCEL_NONE; | ||
473 | fbinfo->fix.visual = FB_VISUAL_TRUECOLOR; | ||
474 | |||
475 | fbinfo->var.nonstd = 0; | ||
476 | fbinfo->var.activate = FB_ACTIVATE_NOW; | ||
477 | fbinfo->var.height = -1; | ||
478 | fbinfo->var.width = -1; | ||
479 | fbinfo->var.accel_flags = 0; | ||
480 | fbinfo->var.vmode = FB_VMODE_NONINTERLACED; | ||
481 | |||
482 | fbinfo->var.xres = LCD_X_RES; | ||
483 | fbinfo->var.xres_virtual = LCD_X_RES; | ||
484 | fbinfo->var.yres = LCD_Y_RES; | ||
485 | fbinfo->var.yres_virtual = LCD_Y_RES; | ||
486 | fbinfo->var.bits_per_pixel = LCD_BPP; | ||
487 | |||
488 | fbinfo->var.red.offset = 0; | ||
489 | fbinfo->var.green.offset = 8; | ||
490 | fbinfo->var.blue.offset = 16; | ||
491 | fbinfo->var.transp.offset = 0; | ||
492 | fbinfo->var.red.length = 8; | ||
493 | fbinfo->var.green.length = 8; | ||
494 | fbinfo->var.blue.length = 8; | ||
495 | fbinfo->var.transp.length = 0; | ||
496 | fbinfo->fix.smem_len = LCD_X_RES * LCD_Y_RES * LCD_BPP / 8; | ||
497 | |||
498 | fbinfo->fix.line_length = fbinfo->var.xres_virtual * | ||
499 | fbinfo->var.bits_per_pixel / 8; | ||
500 | |||
501 | |||
502 | fbinfo->fbops = &bfin_t350mcqb_fb_ops; | ||
503 | fbinfo->flags = FBINFO_FLAG_DEFAULT; | ||
504 | |||
505 | info->fb_buffer = | ||
506 | dma_alloc_coherent(NULL, fbinfo->fix.smem_len, &info->dma_handle, | ||
507 | GFP_KERNEL); | ||
508 | |||
509 | if (NULL == info->fb_buffer) { | ||
510 | printk(KERN_ERR DRIVER_NAME | ||
511 | ": couldn't allocate dma buffer.\n"); | ||
512 | ret = -ENOMEM; | ||
513 | goto out3; | ||
514 | } | ||
515 | |||
516 | memset(info->fb_buffer, 0, fbinfo->fix.smem_len); | ||
517 | |||
518 | fbinfo->screen_base = (void *)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET; | ||
519 | fbinfo->fix.smem_start = (int)info->fb_buffer + ACTIVE_VIDEO_MEM_OFFSET; | ||
520 | |||
521 | fbinfo->fbops = &bfin_t350mcqb_fb_ops; | ||
522 | |||
523 | fbinfo->pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL); | ||
524 | if (!fbinfo->pseudo_palette) { | ||
525 | printk(KERN_ERR DRIVER_NAME | ||
526 | "Fail to allocate pseudo_palette\n"); | ||
527 | |||
528 | ret = -ENOMEM; | ||
529 | goto out4; | ||
530 | } | ||
531 | |||
532 | memset(fbinfo->pseudo_palette, 0, sizeof(u32) * 16); | ||
533 | |||
534 | if (fb_alloc_cmap(&fbinfo->cmap, BFIN_LCD_NBR_PALETTE_ENTRIES, 0) | ||
535 | < 0) { | ||
536 | printk(KERN_ERR DRIVER_NAME | ||
537 | "Fail to allocate colormap (%d entries)\n", | ||
538 | BFIN_LCD_NBR_PALETTE_ENTRIES); | ||
539 | ret = -EFAULT; | ||
540 | goto out5; | ||
541 | } | ||
542 | |||
543 | if (bfin_t350mcqb_request_ports(1)) { | ||
544 | printk(KERN_ERR DRIVER_NAME ": couldn't request gpio port.\n"); | ||
545 | ret = -EFAULT; | ||
546 | goto out6; | ||
547 | } | ||
548 | |||
549 | info->irq = platform_get_irq(pdev, 0); | ||
550 | if (info->irq < 0) { | ||
551 | ret = -EINVAL; | ||
552 | goto out7; | ||
553 | } | ||
554 | |||
555 | if (request_irq(info->irq, (void *)bfin_t350mcqb_irq_error, IRQF_DISABLED, | ||
556 | "PPI ERROR", info) < 0) { | ||
557 | printk(KERN_ERR DRIVER_NAME | ||
558 | ": unable to request PPI ERROR IRQ\n"); | ||
559 | ret = -EFAULT; | ||
560 | goto out7; | ||
561 | } | ||
562 | |||
563 | if (register_framebuffer(fbinfo) < 0) { | ||
564 | printk(KERN_ERR DRIVER_NAME | ||
565 | ": unable to register framebuffer.\n"); | ||
566 | ret = -EINVAL; | ||
567 | goto out8; | ||
568 | } | ||
569 | #ifndef NO_BL_SUPPORT | ||
570 | bl_dev = | ||
571 | backlight_device_register("bf52x-bl", NULL, NULL, | ||
572 | &bfin_lq043fb_bl_ops); | ||
573 | bl_dev->props.max_brightness = 255; | ||
574 | |||
575 | lcd_dev = lcd_device_register(DRIVER_NAME, NULL, &bfin_lcd_ops); | ||
576 | lcd_dev->props.max_contrast = 255, printk(KERN_INFO "Done.\n"); | ||
577 | #endif | ||
578 | |||
579 | return 0; | ||
580 | |||
581 | out8: | ||
582 | free_irq(info->irq, info); | ||
583 | out7: | ||
584 | bfin_t350mcqb_request_ports(0); | ||
585 | out6: | ||
586 | fb_dealloc_cmap(&fbinfo->cmap); | ||
587 | out5: | ||
588 | kfree(fbinfo->pseudo_palette); | ||
589 | out4: | ||
590 | dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer, | ||
591 | info->dma_handle); | ||
592 | out3: | ||
593 | framebuffer_release(fbinfo); | ||
594 | out2: | ||
595 | free_dma(CH_PPI); | ||
596 | out1: | ||
597 | platform_set_drvdata(pdev, NULL); | ||
598 | |||
599 | return ret; | ||
600 | } | ||
601 | |||
602 | static int bfin_t350mcqb_remove(struct platform_device *pdev) | ||
603 | { | ||
604 | |||
605 | struct fb_info *fbinfo = platform_get_drvdata(pdev); | ||
606 | struct bfin_t350mcqbfb_info *info = fbinfo->par; | ||
607 | |||
608 | free_dma(CH_PPI); | ||
609 | free_irq(info->irq, info); | ||
610 | |||
611 | if (info->fb_buffer != NULL) | ||
612 | dma_free_coherent(NULL, fbinfo->fix.smem_len, info->fb_buffer, | ||
613 | info->dma_handle); | ||
614 | |||
615 | kfree(fbinfo->pseudo_palette); | ||
616 | fb_dealloc_cmap(&fbinfo->cmap); | ||
617 | |||
618 | #ifndef NO_BL_SUPPORT | ||
619 | lcd_device_unregister(lcd_dev); | ||
620 | backlight_device_unregister(bl_dev); | ||
621 | #endif | ||
622 | |||
623 | unregister_framebuffer(fbinfo); | ||
624 | |||
625 | bfin_t350mcqb_request_ports(0); | ||
626 | |||
627 | printk(KERN_INFO DRIVER_NAME ": Unregister LCD driver.\n"); | ||
628 | |||
629 | return 0; | ||
630 | } | ||
631 | |||
632 | #ifdef CONFIG_PM | ||
633 | static int bfin_t350mcqb_suspend(struct platform_device *pdev, pm_message_t state) | ||
634 | { | ||
635 | struct fb_info *fbinfo = platform_get_drvdata(pdev); | ||
636 | struct bfin_t350mcqbfb_info *info = fbinfo->par; | ||
637 | |||
638 | bfin_t350mcqb_disable_ppi(); | ||
639 | disable_dma(CH_PPI); | ||
640 | bfin_write_PPI_STATUS(0xFFFF); | ||
641 | |||
642 | return 0; | ||
643 | } | ||
644 | |||
645 | static int bfin_t350mcqb_resume(struct platform_device *pdev) | ||
646 | { | ||
647 | struct fb_info *fbinfo = platform_get_drvdata(pdev); | ||
648 | struct bfin_t350mcqbfb_info *info = fbinfo->par; | ||
649 | |||
650 | enable_dma(CH_PPI); | ||
651 | bfin_t350mcqb_enable_ppi(); | ||
652 | |||
653 | return 0; | ||
654 | } | ||
655 | #else | ||
656 | #define bfin_t350mcqb_suspend NULL | ||
657 | #define bfin_t350mcqb_resume NULL | ||
658 | #endif | ||
659 | |||
660 | static struct platform_driver bfin_t350mcqb_driver = { | ||
661 | .probe = bfin_t350mcqb_probe, | ||
662 | .remove = bfin_t350mcqb_remove, | ||
663 | .suspend = bfin_t350mcqb_suspend, | ||
664 | .resume = bfin_t350mcqb_resume, | ||
665 | .driver = { | ||
666 | .name = DRIVER_NAME, | ||
667 | .owner = THIS_MODULE, | ||
668 | }, | ||
669 | }; | ||
670 | |||
671 | static int __devinit bfin_t350mcqb_driver_init(void) | ||
672 | { | ||
673 | return platform_driver_register(&bfin_t350mcqb_driver); | ||
674 | } | ||
675 | |||
676 | static void __exit bfin_t350mcqb_driver_cleanup(void) | ||
677 | { | ||
678 | platform_driver_unregister(&bfin_t350mcqb_driver); | ||
679 | } | ||
680 | |||
681 | MODULE_DESCRIPTION("Blackfin TFT LCD Driver"); | ||
682 | MODULE_LICENSE("GPL"); | ||
683 | |||
684 | module_init(bfin_t350mcqb_driver_init); | ||
685 | module_exit(bfin_t350mcqb_driver_cleanup); | ||
diff --git a/drivers/video/hitfb.c b/drivers/video/hitfb.c index 756c0ce85911..392a8be6aa76 100644 --- a/drivers/video/hitfb.c +++ b/drivers/video/hitfb.c | |||
@@ -403,7 +403,7 @@ static int __init hitfb_probe(struct platform_device *dev) | |||
403 | return 0; | 403 | return 0; |
404 | } | 404 | } |
405 | 405 | ||
406 | static int __devexit hitfb_remove(struct platform_device *dev) | 406 | static int __exit hitfb_remove(struct platform_device *dev) |
407 | { | 407 | { |
408 | return unregister_framebuffer(&fb_info); | 408 | return unregister_framebuffer(&fb_info); |
409 | } | 409 | } |
@@ -439,7 +439,7 @@ static int hitfb_resume(struct platform_device *dev) | |||
439 | 439 | ||
440 | static struct platform_driver hitfb_driver = { | 440 | static struct platform_driver hitfb_driver = { |
441 | .probe = hitfb_probe, | 441 | .probe = hitfb_probe, |
442 | .remove = __devexit_p(hitfb_remove), | 442 | .remove = __exit_p(hitfb_remove), |
443 | #ifdef CONFIG_PM | 443 | #ifdef CONFIG_PM |
444 | .suspend = hitfb_suspend, | 444 | .suspend = hitfb_suspend, |
445 | .resume = hitfb_resume, | 445 | .resume = hitfb_resume, |
diff --git a/drivers/video/mbx/mbxfb.c b/drivers/video/mbx/mbxfb.c index 80cd117ca65c..01f77bcc68f9 100644 --- a/drivers/video/mbx/mbxfb.c +++ b/drivers/video/mbx/mbxfb.c | |||
@@ -889,7 +889,7 @@ static int __devinit mbxfb_probe(struct platform_device *dev) | |||
889 | struct mbxfb_info *mfbi; | 889 | struct mbxfb_info *mfbi; |
890 | struct mbxfb_platform_data *pdata; | 890 | struct mbxfb_platform_data *pdata; |
891 | 891 | ||
892 | dev_dbg(dev, "mbxfb_probe\n"); | 892 | dev_dbg(&dev->dev, "mbxfb_probe\n"); |
893 | 893 | ||
894 | pdata = dev->dev.platform_data; | 894 | pdata = dev->dev.platform_data; |
895 | if (!pdata) { | 895 | if (!pdata) { |
diff --git a/drivers/video/pvr2fb.c b/drivers/video/pvr2fb.c index 6a3d0b574897..8c863a7f654b 100644 --- a/drivers/video/pvr2fb.c +++ b/drivers/video/pvr2fb.c | |||
@@ -1,16 +1,12 @@ | |||
1 | /* drivers/video/pvr2fb.c | 1 | /* |
2 | * drivers/video/pvr2fb.c | ||
2 | * | 3 | * |
3 | * Frame buffer and fbcon support for the NEC PowerVR2 found within the Sega | 4 | * Frame buffer and fbcon support for the NEC PowerVR2 found within the Sega |
4 | * Dreamcast. | 5 | * Dreamcast. |
5 | * | 6 | * |
6 | * Copyright (c) 2001 M. R. Brown <mrbrown@0xd6.org> | 7 | * Copyright (c) 2001 M. R. Brown <mrbrown@0xd6.org> |
7 | * Copyright (c) 2001, 2002, 2003, 2004, 2005 Paul Mundt <lethal@linux-sh.org> | 8 | * Copyright (c) 2001 - 2008 Paul Mundt <lethal@linux-sh.org> |
8 | * | ||
9 | * This file is part of the LinuxDC project (linuxdc.sourceforge.net). | ||
10 | * | 9 | * |
11 | */ | ||
12 | |||
13 | /* | ||
14 | * This driver is mostly based on the excellent amifb and vfb sources. It uses | 10 | * This driver is mostly based on the excellent amifb and vfb sources. It uses |
15 | * an odd scheme for converting hardware values to/from framebuffer values, | 11 | * an odd scheme for converting hardware values to/from framebuffer values, |
16 | * here are some hacked-up formulas: | 12 | * here are some hacked-up formulas: |
@@ -490,7 +486,7 @@ static int pvr2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) | |||
490 | } else { | 486 | } else { |
491 | var->sync &= ~FB_SYNC_BROADCAST; | 487 | var->sync &= ~FB_SYNC_BROADCAST; |
492 | var->vmode &= ~FB_VMODE_INTERLACED; | 488 | var->vmode &= ~FB_VMODE_INTERLACED; |
493 | var->vmode |= pvr2_var.vmode; | 489 | var->vmode |= FB_VMODE_NONINTERLACED; |
494 | } | 490 | } |
495 | 491 | ||
496 | if ((var->activate & FB_ACTIVATE_MASK) != FB_ACTIVATE_TEST) { | 492 | if ((var->activate & FB_ACTIVATE_MASK) != FB_ACTIVATE_TEST) { |
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c index e83dfba7e636..742b5c656d66 100644 --- a/drivers/video/sm501fb.c +++ b/drivers/video/sm501fb.c | |||
@@ -237,12 +237,14 @@ static int sm501fb_check_var(struct fb_var_screeninfo *var, | |||
237 | 237 | ||
238 | /* check we can fit these values into the registers */ | 238 | /* check we can fit these values into the registers */ |
239 | 239 | ||
240 | if (var->hsync_len > 255 || var->vsync_len > 255) | 240 | if (var->hsync_len > 255 || var->vsync_len > 63) |
241 | return -EINVAL; | 241 | return -EINVAL; |
242 | 242 | ||
243 | if ((var->xres + var->right_margin) >= 4096) | 243 | /* hdisplay end and hsync start */ |
244 | if ((var->xres + var->right_margin) > 4096) | ||
244 | return -EINVAL; | 245 | return -EINVAL; |
245 | 246 | ||
247 | /* vdisplay end and vsync start */ | ||
246 | if ((var->yres + var->lower_margin) > 2048) | 248 | if ((var->yres + var->lower_margin) > 2048) |
247 | return -EINVAL; | 249 | return -EINVAL; |
248 | 250 | ||
@@ -281,19 +283,21 @@ static int sm501fb_check_var(struct fb_var_screeninfo *var, | |||
281 | var->blue.length = var->bits_per_pixel; | 283 | var->blue.length = var->bits_per_pixel; |
282 | var->blue.offset = 0; | 284 | var->blue.offset = 0; |
283 | var->transp.length = 0; | 285 | var->transp.length = 0; |
286 | var->transp.offset = 0; | ||
284 | 287 | ||
285 | break; | 288 | break; |
286 | 289 | ||
287 | case 16: | 290 | case 16: |
288 | if (sm->pdata->flags & SM501_FBPD_SWAP_FB_ENDIAN) { | 291 | if (sm->pdata->flags & SM501_FBPD_SWAP_FB_ENDIAN) { |
289 | var->red.offset = 11; | ||
290 | var->green.offset = 5; | ||
291 | var->blue.offset = 0; | ||
292 | } else { | ||
293 | var->blue.offset = 11; | 292 | var->blue.offset = 11; |
294 | var->green.offset = 5; | 293 | var->green.offset = 5; |
295 | var->red.offset = 0; | 294 | var->red.offset = 0; |
295 | } else { | ||
296 | var->red.offset = 11; | ||
297 | var->green.offset = 5; | ||
298 | var->blue.offset = 0; | ||
296 | } | 299 | } |
300 | var->transp.offset = 0; | ||
297 | 301 | ||
298 | var->red.length = 5; | 302 | var->red.length = 5; |
299 | var->green.length = 6; | 303 | var->green.length = 6; |
@@ -397,7 +401,7 @@ static int sm501fb_set_par_common(struct fb_info *info, | |||
397 | break; | 401 | break; |
398 | 402 | ||
399 | case 16: | 403 | case 16: |
400 | info->fix.visual = FB_VISUAL_DIRECTCOLOR; | 404 | info->fix.visual = FB_VISUAL_TRUECOLOR; |
401 | break; | 405 | break; |
402 | 406 | ||
403 | case 32: | 407 | case 32: |
@@ -613,6 +617,7 @@ static int sm501fb_set_par_crt(struct fb_info *info) | |||
613 | 617 | ||
614 | case 16: | 618 | case 16: |
615 | control |= SM501_DC_CRT_CONTROL_16BPP; | 619 | control |= SM501_DC_CRT_CONTROL_16BPP; |
620 | sm501fb_setup_gamma(fbi, SM501_DC_CRT_PALETTE); | ||
616 | break; | 621 | break; |
617 | 622 | ||
618 | case 32: | 623 | case 32: |
@@ -750,6 +755,7 @@ static int sm501fb_set_par_pnl(struct fb_info *info) | |||
750 | 755 | ||
751 | case 16: | 756 | case 16: |
752 | control |= SM501_DC_PANEL_CONTROL_16BPP; | 757 | control |= SM501_DC_PANEL_CONTROL_16BPP; |
758 | sm501fb_setup_gamma(fbi, SM501_DC_PANEL_PALETTE); | ||
753 | break; | 759 | break; |
754 | 760 | ||
755 | case 32: | 761 | case 32: |
diff --git a/drivers/video/stifb.c b/drivers/video/stifb.c index e7c8db2eb49b..f98be301140c 100644 --- a/drivers/video/stifb.c +++ b/drivers/video/stifb.c | |||
@@ -505,16 +505,24 @@ ngleSetupAttrPlanes(struct stifb_info *fb, int BufferNumber) | |||
505 | static void | 505 | static void |
506 | rattlerSetupPlanes(struct stifb_info *fb) | 506 | rattlerSetupPlanes(struct stifb_info *fb) |
507 | { | 507 | { |
508 | int saved_id, y; | ||
509 | |||
510 | /* Write RAMDAC pixel read mask register so all overlay | ||
511 | * planes are display-enabled. (CRX24 uses Bt462 pixel | ||
512 | * read mask register for overlay planes, not image planes). | ||
513 | */ | ||
508 | CRX24_SETUP_RAMDAC(fb); | 514 | CRX24_SETUP_RAMDAC(fb); |
509 | 515 | ||
510 | /* replacement for: SETUP_FB(fb, CRX24_OVERLAY_PLANES); */ | 516 | /* change fb->id temporarily to fool SETUP_FB() */ |
511 | WRITE_WORD(0x83000300, fb, REG_14); | 517 | saved_id = fb->id; |
512 | SETUP_HW(fb); | 518 | fb->id = CRX24_OVERLAY_PLANES; |
513 | WRITE_BYTE(1, fb, REG_16b1); | 519 | SETUP_FB(fb); |
520 | fb->id = saved_id; | ||
521 | |||
522 | for (y = 0; y < fb->info.var.yres; ++y) | ||
523 | memset(fb->info.screen_base + y * fb->info.fix.line_length, | ||
524 | 0xff, fb->info.var.xres * fb->info.var.bits_per_pixel/8); | ||
514 | 525 | ||
515 | fb_memset((void*)fb->info.fix.smem_start, 0xff, | ||
516 | fb->info.var.yres*fb->info.fix.line_length); | ||
517 | |||
518 | CRX24_SET_OVLY_MASK(fb); | 526 | CRX24_SET_OVLY_MASK(fb); |
519 | SETUP_FB(fb); | 527 | SETUP_FB(fb); |
520 | } | 528 | } |
diff --git a/drivers/video/tridentfb.c b/drivers/video/tridentfb.c index 70fb4ee2b421..0a4e07d43d2d 100644 --- a/drivers/video/tridentfb.c +++ b/drivers/video/tridentfb.c | |||
@@ -564,7 +564,7 @@ static inline void write3CE(int reg, unsigned char val) | |||
564 | t_outb(val, 0x3CF); | 564 | t_outb(val, 0x3CF); |
565 | } | 565 | } |
566 | 566 | ||
567 | static inline void enable_mmio(void) | 567 | static void enable_mmio(void) |
568 | { | 568 | { |
569 | /* Goto New Mode */ | 569 | /* Goto New Mode */ |
570 | outb(0x0B, 0x3C4); | 570 | outb(0x0B, 0x3C4); |
@@ -579,6 +579,21 @@ static inline void enable_mmio(void) | |||
579 | outb(inb(0x3D5) | 0x01, 0x3D5); | 579 | outb(inb(0x3D5) | 0x01, 0x3D5); |
580 | } | 580 | } |
581 | 581 | ||
582 | static void disable_mmio(void) | ||
583 | { | ||
584 | /* Goto New Mode */ | ||
585 | t_outb(0x0B, 0x3C4); | ||
586 | t_inb(0x3C5); | ||
587 | |||
588 | /* Unprotect registers */ | ||
589 | t_outb(NewMode1, 0x3C4); | ||
590 | t_outb(0x80, 0x3C5); | ||
591 | |||
592 | /* Disable MMIO */ | ||
593 | t_outb(PCIReg, 0x3D4); | ||
594 | t_outb(t_inb(0x3D5) & ~0x01, 0x3D5); | ||
595 | } | ||
596 | |||
582 | #define crtc_unlock() write3X4(CRTVSyncEnd, read3X4(CRTVSyncEnd) & 0x7F) | 597 | #define crtc_unlock() write3X4(CRTVSyncEnd, read3X4(CRTVSyncEnd) & 0x7F) |
583 | 598 | ||
584 | /* Return flat panel's maximum x resolution */ | 599 | /* Return flat panel's maximum x resolution */ |
@@ -730,7 +745,7 @@ static unsigned int __devinit get_memsize(void) | |||
730 | switch (tmp) { | 745 | switch (tmp) { |
731 | 746 | ||
732 | case 0x01: | 747 | case 0x01: |
733 | k = 512; | 748 | k = 512 * Kb; |
734 | break; | 749 | break; |
735 | case 0x02: | 750 | case 0x02: |
736 | k = 6 * Mb; /* XP */ | 751 | k = 6 * Mb; /* XP */ |
@@ -1239,9 +1254,9 @@ static int __devinit trident_pci_probe(struct pci_dev * dev, | |||
1239 | default_par.io_virt = ioremap_nocache(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); | 1254 | default_par.io_virt = ioremap_nocache(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); |
1240 | 1255 | ||
1241 | if (!default_par.io_virt) { | 1256 | if (!default_par.io_virt) { |
1242 | release_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); | ||
1243 | debug("ioremap failed\n"); | 1257 | debug("ioremap failed\n"); |
1244 | return -1; | 1258 | err = -1; |
1259 | goto out_unmap1; | ||
1245 | } | 1260 | } |
1246 | 1261 | ||
1247 | enable_mmio(); | 1262 | enable_mmio(); |
@@ -1252,25 +1267,21 @@ static int __devinit trident_pci_probe(struct pci_dev * dev, | |||
1252 | 1267 | ||
1253 | if (!request_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len, "tridentfb")) { | 1268 | if (!request_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len, "tridentfb")) { |
1254 | debug("request_mem_region failed!\n"); | 1269 | debug("request_mem_region failed!\n"); |
1270 | disable_mmio(); | ||
1255 | err = -1; | 1271 | err = -1; |
1256 | goto out_unmap; | 1272 | goto out_unmap1; |
1257 | } | 1273 | } |
1258 | 1274 | ||
1259 | fb_info.screen_base = ioremap_nocache(tridentfb_fix.smem_start, | 1275 | fb_info.screen_base = ioremap_nocache(tridentfb_fix.smem_start, |
1260 | tridentfb_fix.smem_len); | 1276 | tridentfb_fix.smem_len); |
1261 | 1277 | ||
1262 | if (!fb_info.screen_base) { | 1278 | if (!fb_info.screen_base) { |
1263 | release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len); | ||
1264 | debug("ioremap failed\n"); | 1279 | debug("ioremap failed\n"); |
1265 | err = -1; | 1280 | err = -1; |
1266 | goto out_unmap; | 1281 | goto out_unmap2; |
1267 | } | 1282 | } |
1268 | 1283 | ||
1269 | output("%s board found\n", pci_name(dev)); | 1284 | output("%s board found\n", pci_name(dev)); |
1270 | #if 0 | ||
1271 | output("Trident board found : mem = %X, io = %X, mem_v = %X, io_v = %X\n", | ||
1272 | tridentfb_fix.smem_start, tridentfb_fix.mmio_start, fb_info.screen_base, default_par.io_virt); | ||
1273 | #endif | ||
1274 | displaytype = get_displaytype(); | 1285 | displaytype = get_displaytype(); |
1275 | 1286 | ||
1276 | if (flatpanel) | 1287 | if (flatpanel) |
@@ -1288,9 +1299,12 @@ static int __devinit trident_pci_probe(struct pci_dev * dev, | |||
1288 | 1299 | ||
1289 | if (!fb_find_mode(&default_var, &fb_info, mode, NULL, 0, NULL, bpp)) { | 1300 | if (!fb_find_mode(&default_var, &fb_info, mode, NULL, 0, NULL, bpp)) { |
1290 | err = -EINVAL; | 1301 | err = -EINVAL; |
1291 | goto out_unmap; | 1302 | goto out_unmap2; |
1292 | } | 1303 | } |
1293 | fb_alloc_cmap(&fb_info.cmap, 256, 0); | 1304 | err = fb_alloc_cmap(&fb_info.cmap, 256, 0); |
1305 | if (err < 0) | ||
1306 | goto out_unmap2; | ||
1307 | |||
1294 | if (defaultaccel && acc) | 1308 | if (defaultaccel && acc) |
1295 | default_var.accel_flags |= FB_ACCELF_TEXT; | 1309 | default_var.accel_flags |= FB_ACCELF_TEXT; |
1296 | else | 1310 | else |
@@ -1300,19 +1314,24 @@ static int __devinit trident_pci_probe(struct pci_dev * dev, | |||
1300 | fb_info.device = &dev->dev; | 1314 | fb_info.device = &dev->dev; |
1301 | if (register_framebuffer(&fb_info) < 0) { | 1315 | if (register_framebuffer(&fb_info) < 0) { |
1302 | printk(KERN_ERR "tridentfb: could not register Trident framebuffer\n"); | 1316 | printk(KERN_ERR "tridentfb: could not register Trident framebuffer\n"); |
1317 | fb_dealloc_cmap(&fb_info.cmap); | ||
1303 | err = -EINVAL; | 1318 | err = -EINVAL; |
1304 | goto out_unmap; | 1319 | goto out_unmap2; |
1305 | } | 1320 | } |
1306 | output("fb%d: %s frame buffer device %dx%d-%dbpp\n", | 1321 | output("fb%d: %s frame buffer device %dx%d-%dbpp\n", |
1307 | fb_info.node, fb_info.fix.id, default_var.xres, | 1322 | fb_info.node, fb_info.fix.id, default_var.xres, |
1308 | default_var.yres, default_var.bits_per_pixel); | 1323 | default_var.yres, default_var.bits_per_pixel); |
1309 | return 0; | 1324 | return 0; |
1310 | 1325 | ||
1311 | out_unmap: | 1326 | out_unmap2: |
1312 | if (default_par.io_virt) | ||
1313 | iounmap(default_par.io_virt); | ||
1314 | if (fb_info.screen_base) | 1327 | if (fb_info.screen_base) |
1315 | iounmap(fb_info.screen_base); | 1328 | iounmap(fb_info.screen_base); |
1329 | release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len); | ||
1330 | disable_mmio(); | ||
1331 | out_unmap1: | ||
1332 | if (default_par.io_virt) | ||
1333 | iounmap(default_par.io_virt); | ||
1334 | release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); | ||
1316 | return err; | 1335 | return err; |
1317 | } | 1336 | } |
1318 | 1337 | ||
@@ -1323,7 +1342,7 @@ static void __devexit trident_pci_remove(struct pci_dev *dev) | |||
1323 | iounmap(par->io_virt); | 1342 | iounmap(par->io_virt); |
1324 | iounmap(fb_info.screen_base); | 1343 | iounmap(fb_info.screen_base); |
1325 | release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len); | 1344 | release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len); |
1326 | release_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); | 1345 | release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); |
1327 | } | 1346 | } |
1328 | 1347 | ||
1329 | /* List of boards that we are trying to support */ | 1348 | /* List of boards that we are trying to support */ |
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index c8a4332d1132..0b3efc31ee6d 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
@@ -152,7 +152,7 @@ static void virtballoon_changed(struct virtio_device *vdev) | |||
152 | wake_up(&vb->config_change); | 152 | wake_up(&vb->config_change); |
153 | } | 153 | } |
154 | 154 | ||
155 | static inline int towards_target(struct virtio_balloon *vb) | 155 | static inline s64 towards_target(struct virtio_balloon *vb) |
156 | { | 156 | { |
157 | u32 v; | 157 | u32 v; |
158 | __virtio_config_val(vb->vdev, | 158 | __virtio_config_val(vb->vdev, |
@@ -176,7 +176,7 @@ static int balloon(void *_vballoon) | |||
176 | 176 | ||
177 | set_freezable(); | 177 | set_freezable(); |
178 | while (!kthread_should_stop()) { | 178 | while (!kthread_should_stop()) { |
179 | int diff; | 179 | s64 diff; |
180 | 180 | ||
181 | try_to_freeze(); | 181 | try_to_freeze(); |
182 | wait_event_interruptible(vb->config_change, | 182 | wait_event_interruptible(vb->config_change, |
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index 26f787ddd5ff..59a8f73dec73 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c | |||
@@ -177,6 +177,7 @@ static irqreturn_t vp_interrupt(int irq, void *opaque) | |||
177 | struct virtio_pci_device *vp_dev = opaque; | 177 | struct virtio_pci_device *vp_dev = opaque; |
178 | struct virtio_pci_vq_info *info; | 178 | struct virtio_pci_vq_info *info; |
179 | irqreturn_t ret = IRQ_NONE; | 179 | irqreturn_t ret = IRQ_NONE; |
180 | unsigned long flags; | ||
180 | u8 isr; | 181 | u8 isr; |
181 | 182 | ||
182 | /* reading the ISR has the effect of also clearing it so it's very | 183 | /* reading the ISR has the effect of also clearing it so it's very |
@@ -197,12 +198,12 @@ static irqreturn_t vp_interrupt(int irq, void *opaque) | |||
197 | drv->config_changed(&vp_dev->vdev); | 198 | drv->config_changed(&vp_dev->vdev); |
198 | } | 199 | } |
199 | 200 | ||
200 | spin_lock(&vp_dev->lock); | 201 | spin_lock_irqsave(&vp_dev->lock, flags); |
201 | list_for_each_entry(info, &vp_dev->virtqueues, node) { | 202 | list_for_each_entry(info, &vp_dev->virtqueues, node) { |
202 | if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) | 203 | if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) |
203 | ret = IRQ_HANDLED; | 204 | ret = IRQ_HANDLED; |
204 | } | 205 | } |
205 | spin_unlock(&vp_dev->lock); | 206 | spin_unlock_irqrestore(&vp_dev->lock, flags); |
206 | 207 | ||
207 | return ret; | 208 | return ret; |
208 | } | 209 | } |
@@ -214,6 +215,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | |||
214 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 215 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
215 | struct virtio_pci_vq_info *info; | 216 | struct virtio_pci_vq_info *info; |
216 | struct virtqueue *vq; | 217 | struct virtqueue *vq; |
218 | unsigned long flags; | ||
217 | u16 num; | 219 | u16 num; |
218 | int err; | 220 | int err; |
219 | 221 | ||
@@ -255,9 +257,9 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, | |||
255 | vq->priv = info; | 257 | vq->priv = info; |
256 | info->vq = vq; | 258 | info->vq = vq; |
257 | 259 | ||
258 | spin_lock(&vp_dev->lock); | 260 | spin_lock_irqsave(&vp_dev->lock, flags); |
259 | list_add(&info->node, &vp_dev->virtqueues); | 261 | list_add(&info->node, &vp_dev->virtqueues); |
260 | spin_unlock(&vp_dev->lock); | 262 | spin_unlock_irqrestore(&vp_dev->lock, flags); |
261 | 263 | ||
262 | return vq; | 264 | return vq; |
263 | 265 | ||
@@ -274,10 +276,11 @@ static void vp_del_vq(struct virtqueue *vq) | |||
274 | { | 276 | { |
275 | struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); | 277 | struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); |
276 | struct virtio_pci_vq_info *info = vq->priv; | 278 | struct virtio_pci_vq_info *info = vq->priv; |
279 | unsigned long flags; | ||
277 | 280 | ||
278 | spin_lock(&vp_dev->lock); | 281 | spin_lock_irqsave(&vp_dev->lock, flags); |
279 | list_del(&info->node); | 282 | list_del(&info->node); |
280 | spin_unlock(&vp_dev->lock); | 283 | spin_unlock_irqrestore(&vp_dev->lock, flags); |
281 | 284 | ||
282 | vring_del_virtqueue(vq); | 285 | vring_del_virtqueue(vq); |
283 | 286 | ||
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 3a28c1382131..aa714028641e 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
@@ -232,7 +232,6 @@ static bool vring_enable_cb(struct virtqueue *_vq) | |||
232 | vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; | 232 | vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; |
233 | mb(); | 233 | mb(); |
234 | if (unlikely(more_used(vq))) { | 234 | if (unlikely(more_used(vq))) { |
235 | vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; | ||
236 | END_USE(vq); | 235 | END_USE(vq); |
237 | return false; | 236 | return false; |
238 | } | 237 | } |
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c index 688e435b4d9a..10211e493001 100644 --- a/drivers/w1/masters/ds1wm.c +++ b/drivers/w1/masters/ds1wm.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/pm.h> | 17 | #include <linux/pm.h> |
18 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
19 | #include <linux/clk.h> | 19 | #include <linux/clk.h> |
20 | #include <linux/err.h> | ||
20 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
21 | #include <linux/ds1wm.h> | 22 | #include <linux/ds1wm.h> |
22 | 23 | ||
@@ -102,12 +103,12 @@ struct ds1wm_data { | |||
102 | static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg, | 103 | static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg, |
103 | u8 val) | 104 | u8 val) |
104 | { | 105 | { |
105 | __raw_writeb(val, ds1wm_data->map + (reg << ds1wm_data->bus_shift)); | 106 | __raw_writeb(val, ds1wm_data->map + (reg << ds1wm_data->bus_shift)); |
106 | } | 107 | } |
107 | 108 | ||
108 | static inline u8 ds1wm_read_register(struct ds1wm_data *ds1wm_data, u32 reg) | 109 | static inline u8 ds1wm_read_register(struct ds1wm_data *ds1wm_data, u32 reg) |
109 | { | 110 | { |
110 | return __raw_readb(ds1wm_data->map + (reg << ds1wm_data->bus_shift)); | 111 | return __raw_readb(ds1wm_data->map + (reg << ds1wm_data->bus_shift)); |
111 | } | 112 | } |
112 | 113 | ||
113 | 114 | ||
@@ -149,8 +150,8 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data) | |||
149 | timeleft = wait_for_completion_timeout(&reset_done, DS1WM_TIMEOUT); | 150 | timeleft = wait_for_completion_timeout(&reset_done, DS1WM_TIMEOUT); |
150 | ds1wm_data->reset_complete = NULL; | 151 | ds1wm_data->reset_complete = NULL; |
151 | if (!timeleft) { | 152 | if (!timeleft) { |
152 | dev_dbg(&ds1wm_data->pdev->dev, "reset failed\n"); | 153 | dev_err(&ds1wm_data->pdev->dev, "reset failed\n"); |
153 | return 1; | 154 | return 1; |
154 | } | 155 | } |
155 | 156 | ||
156 | /* Wait for the end of the reset. According to the specs, the time | 157 | /* Wait for the end of the reset. According to the specs, the time |
@@ -167,11 +168,11 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data) | |||
167 | (ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0)); | 168 | (ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0)); |
168 | 169 | ||
169 | if (!ds1wm_data->slave_present) { | 170 | if (!ds1wm_data->slave_present) { |
170 | dev_dbg(&ds1wm_data->pdev->dev, "reset: no devices found\n"); | 171 | dev_dbg(&ds1wm_data->pdev->dev, "reset: no devices found\n"); |
171 | return 1; | 172 | return 1; |
172 | } | 173 | } |
173 | 174 | ||
174 | return 0; | 175 | return 0; |
175 | } | 176 | } |
176 | 177 | ||
177 | static int ds1wm_write(struct ds1wm_data *ds1wm_data, u8 data) | 178 | static int ds1wm_write(struct ds1wm_data *ds1wm_data, u8 data) |
@@ -334,7 +335,7 @@ static int ds1wm_probe(struct platform_device *pdev) | |||
334 | if (!pdev) | 335 | if (!pdev) |
335 | return -ENODEV; | 336 | return -ENODEV; |
336 | 337 | ||
337 | ds1wm_data = kzalloc(sizeof (*ds1wm_data), GFP_KERNEL); | 338 | ds1wm_data = kzalloc(sizeof(*ds1wm_data), GFP_KERNEL); |
338 | if (!ds1wm_data) | 339 | if (!ds1wm_data) |
339 | return -ENOMEM; | 340 | return -ENOMEM; |
340 | 341 | ||
@@ -374,8 +375,8 @@ static int ds1wm_probe(struct platform_device *pdev) | |||
374 | goto err1; | 375 | goto err1; |
375 | 376 | ||
376 | ds1wm_data->clk = clk_get(&pdev->dev, "ds1wm"); | 377 | ds1wm_data->clk = clk_get(&pdev->dev, "ds1wm"); |
377 | if (!ds1wm_data->clk) { | 378 | if (IS_ERR(ds1wm_data->clk)) { |
378 | ret = -ENOENT; | 379 | ret = PTR_ERR(ds1wm_data->clk); |
379 | goto err2; | 380 | goto err2; |
380 | } | 381 | } |
381 | 382 | ||
diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c index 5941ca601a3a..df72f90123df 100644 --- a/drivers/watchdog/cpu5wdt.c +++ b/drivers/watchdog/cpu5wdt.c | |||
@@ -59,9 +59,9 @@ static int ticks = 10000; | |||
59 | 59 | ||
60 | static struct { | 60 | static struct { |
61 | struct completion stop; | 61 | struct completion stop; |
62 | volatile int running; | 62 | int running; |
63 | struct timer_list timer; | 63 | struct timer_list timer; |
64 | volatile int queue; | 64 | int queue; |
65 | int default_ticks; | 65 | int default_ticks; |
66 | unsigned long inuse; | 66 | unsigned long inuse; |
67 | } cpu5wdt_device; | 67 | } cpu5wdt_device; |
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index a2e174b09fe7..6483d1066b95 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c | |||
@@ -58,41 +58,6 @@ struct bios32_service_dir { | |||
58 | u8 reserved[5]; | 58 | u8 reserved[5]; |
59 | }; | 59 | }; |
60 | 60 | ||
61 | /* | ||
62 | * smbios_entry_point - defines SMBIOS entry point structure | ||
63 | * | ||
64 | * anchor[4] - anchor string (_SM_) | ||
65 | * checksum - checksum of the entry point structure | ||
66 | * length - length of the entry point structure | ||
67 | * major_ver - major version (02h for revision 2.1) | ||
68 | * minor_ver - minor version (01h for revision 2.1) | ||
69 | * max_struct_size - size of the largest SMBIOS structure | ||
70 | * revision - entry point structure revision implemented | ||
71 | * formatted_area[5] - reserved | ||
72 | * intermediate_anchor[5] - intermediate anchor string (_DMI_) | ||
73 | * intermediate_checksum - intermediate checksum | ||
74 | * table_length - structure table length | ||
75 | * table_address - structure table address | ||
76 | * table_num_structs - number of SMBIOS structures present | ||
77 | * bcd_revision - BCD revision | ||
78 | */ | ||
79 | struct smbios_entry_point { | ||
80 | u8 anchor[4]; | ||
81 | u8 checksum; | ||
82 | u8 length; | ||
83 | u8 major_ver; | ||
84 | u8 minor_ver; | ||
85 | u16 max_struct_size; | ||
86 | u8 revision; | ||
87 | u8 formatted_area[5]; | ||
88 | u8 intermediate_anchor[5]; | ||
89 | u8 intermediate_checksum; | ||
90 | u16 table_length; | ||
91 | u64 table_address; | ||
92 | u16 table_num_structs; | ||
93 | u8 bcd_revision; | ||
94 | }; | ||
95 | |||
96 | /* type 212 */ | 61 | /* type 212 */ |
97 | struct smbios_cru64_info { | 62 | struct smbios_cru64_info { |
98 | u8 type; | 63 | u8 type; |
@@ -175,31 +140,13 @@ static struct pci_device_id hpwdt_devices[] = { | |||
175 | }; | 140 | }; |
176 | MODULE_DEVICE_TABLE(pci, hpwdt_devices); | 141 | MODULE_DEVICE_TABLE(pci, hpwdt_devices); |
177 | 142 | ||
178 | /* | ||
179 | * bios_checksum | ||
180 | */ | ||
181 | static int __devinit bios_checksum(const char __iomem *ptr, int len) | ||
182 | { | ||
183 | char sum = 0; | ||
184 | int i; | ||
185 | |||
186 | /* | ||
187 | * calculate checksum of size bytes. This should add up | ||
188 | * to zero if we have a valid header. | ||
189 | */ | ||
190 | for (i = 0; i < len; i++) | ||
191 | sum += ptr[i]; | ||
192 | |||
193 | return ((sum == 0) && (len > 0)); | ||
194 | } | ||
195 | |||
196 | #ifndef CONFIG_X86_64 | 143 | #ifndef CONFIG_X86_64 |
197 | /* --32 Bit Bios------------------------------------------------------------ */ | 144 | /* --32 Bit Bios------------------------------------------------------------ */ |
198 | 145 | ||
199 | #define HPWDT_ARCH 32 | 146 | #define HPWDT_ARCH 32 |
200 | 147 | ||
201 | asmlinkage void asminline_call(struct cmn_registers *pi86Regs, | 148 | static void asminline_call(struct cmn_registers *pi86Regs, |
202 | unsigned long *pRomEntry) | 149 | unsigned long *pRomEntry) |
203 | { | 150 | { |
204 | asm("pushl %ebp \n\t" | 151 | asm("pushl %ebp \n\t" |
205 | "movl %esp, %ebp \n\t" | 152 | "movl %esp, %ebp \n\t" |
@@ -303,6 +250,24 @@ static int __devinit cru_detect(unsigned long map_entry, | |||
303 | } | 250 | } |
304 | 251 | ||
305 | /* | 252 | /* |
253 | * bios_checksum | ||
254 | */ | ||
255 | static int __devinit bios_checksum(const char __iomem *ptr, int len) | ||
256 | { | ||
257 | char sum = 0; | ||
258 | int i; | ||
259 | |||
260 | /* | ||
261 | * calculate checksum of size bytes. This should add up | ||
262 | * to zero if we have a valid header. | ||
263 | */ | ||
264 | for (i = 0; i < len; i++) | ||
265 | sum += ptr[i]; | ||
266 | |||
267 | return ((sum == 0) && (len > 0)); | ||
268 | } | ||
269 | |||
270 | /* | ||
306 | * bios32_present | 271 | * bios32_present |
307 | * | 272 | * |
308 | * Routine Description: | 273 | * Routine Description: |
@@ -368,8 +333,8 @@ static int __devinit detect_cru_service(void) | |||
368 | 333 | ||
369 | #define HPWDT_ARCH 64 | 334 | #define HPWDT_ARCH 64 |
370 | 335 | ||
371 | asmlinkage void asminline_call(struct cmn_registers *pi86Regs, | 336 | static void asminline_call(struct cmn_registers *pi86Regs, |
372 | unsigned long *pRomEntry) | 337 | unsigned long *pRomEntry) |
373 | { | 338 | { |
374 | asm("pushq %rbp \n\t" | 339 | asm("pushq %rbp \n\t" |
375 | "movq %rsp, %rbp \n\t" | 340 | "movq %rsp, %rbp \n\t" |
@@ -410,12 +375,8 @@ asmlinkage void asminline_call(struct cmn_registers *pi86Regs, | |||
410 | * dmi_find_cru | 375 | * dmi_find_cru |
411 | * | 376 | * |
412 | * Routine Description: | 377 | * Routine Description: |
413 | * This function checks wether or not a SMBIOS/DMI record is | 378 | * This function checks whether or not a SMBIOS/DMI record is |
414 | * the 64bit CRU info or not | 379 | * the 64bit CRU info or not |
415 | * | ||
416 | * Return Value: | ||
417 | * 0 : SUCCESS - if record found | ||
418 | * <0 : FAILURE - if record not found | ||
419 | */ | 380 | */ |
420 | static void __devinit dmi_find_cru(const struct dmi_header *dm) | 381 | static void __devinit dmi_find_cru(const struct dmi_header *dm) |
421 | { | 382 | { |
@@ -434,138 +395,11 @@ static void __devinit dmi_find_cru(const struct dmi_header *dm) | |||
434 | } | 395 | } |
435 | } | 396 | } |
436 | 397 | ||
437 | /* | ||
438 | * dmi_table | ||
439 | * | ||
440 | * Routine Description: | ||
441 | * Decode the SMBIOS/DMI table and check if we have a 64bit CRU record | ||
442 | * or not. | ||
443 | * | ||
444 | * We have to be cautious here. We have seen BIOSes with DMI pointers | ||
445 | * pointing to completely the wrong place for example | ||
446 | */ | ||
447 | static void __devinit dmi_table(u8 *buf, int len, int num, | ||
448 | void (*decode)(const struct dmi_header *)) | ||
449 | { | ||
450 | u8 *data = buf; | ||
451 | int i = 0; | ||
452 | |||
453 | /* | ||
454 | * Stop when we see all the items the table claimed to have | ||
455 | * OR we run off the end of the table (also happens) | ||
456 | */ | ||
457 | while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) { | ||
458 | const struct dmi_header *dm = (const struct dmi_header *)data; | ||
459 | |||
460 | /* | ||
461 | * We want to know the total length (formated area and strings) | ||
462 | * before decoding to make sure we won't run off the table in | ||
463 | * dmi_decode or dmi_string | ||
464 | */ | ||
465 | data += dm->length; | ||
466 | while ((data - buf < len - 1) && (data[0] || data[1])) | ||
467 | data++; | ||
468 | if (data - buf < len - 1) | ||
469 | decode(dm); | ||
470 | data += 2; | ||
471 | i++; | ||
472 | } | ||
473 | } | ||
474 | |||
475 | /* | ||
476 | * smbios_present | ||
477 | * | ||
478 | * Routine Description: | ||
479 | * This function parses the SMBIOS entry point table to retrieve | ||
480 | * the 64 bit CRU Service. | ||
481 | * | ||
482 | * Return Value: | ||
483 | * 0 : SUCCESS | ||
484 | * <0 : FAILURE | ||
485 | */ | ||
486 | static int __devinit smbios_present(const char __iomem *p) | ||
487 | { | ||
488 | struct smbios_entry_point *eps = | ||
489 | (struct smbios_entry_point *) p; | ||
490 | int length; | ||
491 | u8 *buf; | ||
492 | |||
493 | /* check if we have indeed the SMBIOS table entry point */ | ||
494 | if ((strncmp((char *)eps->anchor, "_SM_", | ||
495 | sizeof(eps->anchor))) == 0) { | ||
496 | length = eps->length; | ||
497 | |||
498 | /* SMBIOS v2.1 implementation might use 0x1e */ | ||
499 | if ((length == 0x1e) && | ||
500 | (eps->major_ver == 2) && | ||
501 | (eps->minor_ver == 1)) | ||
502 | length = 0x1f; | ||
503 | |||
504 | /* | ||
505 | * Now we will check: | ||
506 | * - SMBIOS checksum must be 0 | ||
507 | * - intermediate anchor should be _DMI_ | ||
508 | * - intermediate checksum should be 0 | ||
509 | */ | ||
510 | if ((bios_checksum(p, length)) && | ||
511 | (strncmp((char *)eps->intermediate_anchor, "_DMI_", | ||
512 | sizeof(eps->intermediate_anchor)) == 0) && | ||
513 | (bios_checksum(p+0x10, 15))) { | ||
514 | buf = ioremap(eps->table_address, eps->table_length); | ||
515 | if (buf == NULL) | ||
516 | return -ENODEV; | ||
517 | |||
518 | |||
519 | /* Scan the DMI table for the 64 bit CRU service */ | ||
520 | dmi_table(buf, eps->table_length, | ||
521 | eps->table_num_structs, dmi_find_cru); | ||
522 | |||
523 | iounmap(buf); | ||
524 | return 0; | ||
525 | } | ||
526 | } | ||
527 | |||
528 | return -ENODEV; | ||
529 | } | ||
530 | |||
531 | static int __devinit smbios_scan_machine(void) | ||
532 | { | ||
533 | char __iomem *p, *q; | ||
534 | int rc; | ||
535 | |||
536 | if (efi_enabled) { | ||
537 | if (efi.smbios == EFI_INVALID_TABLE_ADDR) | ||
538 | return -ENODEV; | ||
539 | |||
540 | p = ioremap(efi.smbios, 32); | ||
541 | if (p == NULL) | ||
542 | return -ENOMEM; | ||
543 | |||
544 | rc = smbios_present(p); | ||
545 | iounmap(p); | ||
546 | } else { | ||
547 | /* | ||
548 | * Search from 0x0f0000 through 0x0fffff, inclusive. | ||
549 | */ | ||
550 | p = ioremap(PCI_ROM_BASE1, ROM_SIZE); | ||
551 | if (p == NULL) | ||
552 | return -ENOMEM; | ||
553 | |||
554 | for (q = p; q < p + ROM_SIZE; q += 16) { | ||
555 | rc = smbios_present(q); | ||
556 | if (!rc) { | ||
557 | break; | ||
558 | } | ||
559 | } | ||
560 | iounmap(p); | ||
561 | } | ||
562 | } | ||
563 | |||
564 | static int __devinit detect_cru_service(void) | 398 | static int __devinit detect_cru_service(void) |
565 | { | 399 | { |
566 | cru_rom_addr = NULL; | 400 | cru_rom_addr = NULL; |
567 | 401 | ||
568 | smbios_scan_machine(); /* will become dmi_walk(dmi_find_cru); */ | 402 | dmi_walk(dmi_find_cru); |
569 | 403 | ||
570 | /* if cru_rom_addr has been set then we found a CRU service */ | 404 | /* if cru_rom_addr has been set then we found a CRU service */ |
571 | return ((cru_rom_addr != NULL)? 0: -ENODEV); | 405 | return ((cru_rom_addr != NULL)? 0: -ENODEV); |
diff --git a/drivers/watchdog/it8712f_wdt.c b/drivers/watchdog/it8712f_wdt.c index 1b6d7d1b715d..1efcad3b6fca 100644 --- a/drivers/watchdog/it8712f_wdt.c +++ b/drivers/watchdog/it8712f_wdt.c | |||
@@ -7,7 +7,8 @@ | |||
7 | * | 7 | * |
8 | * drivers/char/watchdog/scx200_wdt.c | 8 | * drivers/char/watchdog/scx200_wdt.c |
9 | * drivers/hwmon/it87.c | 9 | * drivers/hwmon/it87.c |
10 | * IT8712F EC-LPC I/O Preliminary Specification 0.9.2.pdf | 10 | * IT8712F EC-LPC I/O Preliminary Specification 0.8.2 |
11 | * IT8712F EC-LPC I/O Preliminary Specification 0.9.3 | ||
11 | * | 12 | * |
12 | * This program is free software; you can redistribute it and/or | 13 | * This program is free software; you can redistribute it and/or |
13 | * modify it under the terms of the GNU General Public License as | 14 | * modify it under the terms of the GNU General Public License as |
@@ -40,6 +41,7 @@ MODULE_DESCRIPTION("IT8712F Watchdog Driver"); | |||
40 | MODULE_LICENSE("GPL"); | 41 | MODULE_LICENSE("GPL"); |
41 | MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); | 42 | MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); |
42 | 43 | ||
44 | static int max_units = 255; | ||
43 | static int margin = 60; /* in seconds */ | 45 | static int margin = 60; /* in seconds */ |
44 | module_param(margin, int, 0); | 46 | module_param(margin, int, 0); |
45 | MODULE_PARM_DESC(margin, "Watchdog margin in seconds"); | 47 | MODULE_PARM_DESC(margin, "Watchdog margin in seconds"); |
@@ -51,6 +53,7 @@ MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close"); | |||
51 | static struct semaphore it8712f_wdt_sem; | 53 | static struct semaphore it8712f_wdt_sem; |
52 | static unsigned expect_close; | 54 | static unsigned expect_close; |
53 | static spinlock_t io_lock; | 55 | static spinlock_t io_lock; |
56 | static unsigned char revision; | ||
54 | 57 | ||
55 | /* Dog Food address - We use the game port address */ | 58 | /* Dog Food address - We use the game port address */ |
56 | static unsigned short address; | 59 | static unsigned short address; |
@@ -108,6 +111,15 @@ superio_inw(int reg) | |||
108 | return val; | 111 | return val; |
109 | } | 112 | } |
110 | 113 | ||
114 | static void | ||
115 | superio_outw(int val, int reg) | ||
116 | { | ||
117 | outb(reg++, REG); | ||
118 | outb((val >> 8) & 0xff, VAL); | ||
119 | outb(reg, REG); | ||
120 | outb(val & 0xff, VAL); | ||
121 | } | ||
122 | |||
111 | static inline void | 123 | static inline void |
112 | superio_select(int ldn) | 124 | superio_select(int ldn) |
113 | { | 125 | { |
@@ -143,15 +155,33 @@ static void | |||
143 | it8712f_wdt_update_margin(void) | 155 | it8712f_wdt_update_margin(void) |
144 | { | 156 | { |
145 | int config = WDT_OUT_KRST | WDT_OUT_PWROK; | 157 | int config = WDT_OUT_KRST | WDT_OUT_PWROK; |
146 | 158 | int units = margin; | |
147 | printk(KERN_INFO NAME ": timer margin %d seconds\n", margin); | 159 | |
148 | 160 | /* Switch to minutes precision if the configured margin | |
149 | /* The timeout register only has 8bits wide */ | 161 | * value does not fit within the register width. |
150 | if (margin < 256) | 162 | */ |
151 | config |= WDT_UNIT_SEC; /* else UNIT are MINUTES */ | 163 | if (units <= max_units) { |
164 | config |= WDT_UNIT_SEC; /* else UNIT is MINUTES */ | ||
165 | printk(KERN_INFO NAME ": timer margin %d seconds\n", units); | ||
166 | } else { | ||
167 | units /= 60; | ||
168 | printk(KERN_INFO NAME ": timer margin %d minutes\n", units); | ||
169 | } | ||
152 | superio_outb(config, WDT_CONFIG); | 170 | superio_outb(config, WDT_CONFIG); |
153 | 171 | ||
154 | superio_outb((margin > 255) ? (margin / 60) : margin, WDT_TIMEOUT); | 172 | if (revision >= 0x08) |
173 | superio_outw(units, WDT_TIMEOUT); | ||
174 | else | ||
175 | superio_outb(units, WDT_TIMEOUT); | ||
176 | } | ||
177 | |||
178 | static int | ||
179 | it8712f_wdt_get_status(void) | ||
180 | { | ||
181 | if (superio_inb(WDT_CONTROL) & 0x01) | ||
182 | return WDIOF_CARDRESET; | ||
183 | else | ||
184 | return 0; | ||
155 | } | 185 | } |
156 | 186 | ||
157 | static void | 187 | static void |
@@ -234,7 +264,7 @@ it8712f_wdt_ioctl(struct inode *inode, struct file *file, | |||
234 | .firmware_version = 1, | 264 | .firmware_version = 1, |
235 | .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, | 265 | .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, |
236 | }; | 266 | }; |
237 | int new_margin; | 267 | int value; |
238 | 268 | ||
239 | switch (cmd) { | 269 | switch (cmd) { |
240 | default: | 270 | default: |
@@ -244,17 +274,27 @@ it8712f_wdt_ioctl(struct inode *inode, struct file *file, | |||
244 | return -EFAULT; | 274 | return -EFAULT; |
245 | return 0; | 275 | return 0; |
246 | case WDIOC_GETSTATUS: | 276 | case WDIOC_GETSTATUS: |
277 | superio_enter(); | ||
278 | superio_select(LDN_GPIO); | ||
279 | |||
280 | value = it8712f_wdt_get_status(); | ||
281 | |||
282 | superio_exit(); | ||
283 | |||
284 | return put_user(value, p); | ||
247 | case WDIOC_GETBOOTSTATUS: | 285 | case WDIOC_GETBOOTSTATUS: |
248 | return put_user(0, p); | 286 | return put_user(0, p); |
249 | case WDIOC_KEEPALIVE: | 287 | case WDIOC_KEEPALIVE: |
250 | it8712f_wdt_ping(); | 288 | it8712f_wdt_ping(); |
251 | return 0; | 289 | return 0; |
252 | case WDIOC_SETTIMEOUT: | 290 | case WDIOC_SETTIMEOUT: |
253 | if (get_user(new_margin, p)) | 291 | if (get_user(value, p)) |
254 | return -EFAULT; | 292 | return -EFAULT; |
255 | if (new_margin < 1) | 293 | if (value < 1) |
294 | return -EINVAL; | ||
295 | if (value > (max_units * 60)) | ||
256 | return -EINVAL; | 296 | return -EINVAL; |
257 | margin = new_margin; | 297 | margin = value; |
258 | superio_enter(); | 298 | superio_enter(); |
259 | superio_select(LDN_GPIO); | 299 | superio_select(LDN_GPIO); |
260 | 300 | ||
@@ -262,6 +302,7 @@ it8712f_wdt_ioctl(struct inode *inode, struct file *file, | |||
262 | 302 | ||
263 | superio_exit(); | 303 | superio_exit(); |
264 | it8712f_wdt_ping(); | 304 | it8712f_wdt_ping(); |
305 | /* Fall through */ | ||
265 | case WDIOC_GETTIMEOUT: | 306 | case WDIOC_GETTIMEOUT: |
266 | if (put_user(margin, p)) | 307 | if (put_user(margin, p)) |
267 | return -EFAULT; | 308 | return -EFAULT; |
@@ -336,9 +377,18 @@ it8712f_wdt_find(unsigned short *address) | |||
336 | } | 377 | } |
337 | 378 | ||
338 | err = 0; | 379 | err = 0; |
339 | printk(KERN_DEBUG NAME ": Found IT%04xF chip revision %d - " | 380 | revision = superio_inb(DEVREV) & 0x0f; |
381 | |||
382 | /* Later revisions have 16-bit values per datasheet 0.9.1 */ | ||
383 | if (revision >= 0x08) | ||
384 | max_units = 65535; | ||
385 | |||
386 | if (margin > (max_units * 60)) | ||
387 | margin = (max_units * 60); | ||
388 | |||
389 | printk(KERN_INFO NAME ": Found IT%04xF chip revision %d - " | ||
340 | "using DogFood address 0x%x\n", | 390 | "using DogFood address 0x%x\n", |
341 | chip_type, superio_inb(DEVREV) & 0x0f, *address); | 391 | chip_type, revision, *address); |
342 | 392 | ||
343 | exit: | 393 | exit: |
344 | superio_exit(); | 394 | superio_exit(); |
diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c index e6e07b4575eb..6905135a776c 100644 --- a/drivers/watchdog/machzwd.c +++ b/drivers/watchdog/machzwd.c | |||
@@ -141,7 +141,7 @@ static unsigned long next_heartbeat = 0; | |||
141 | #ifndef ZF_DEBUG | 141 | #ifndef ZF_DEBUG |
142 | # define dprintk(format, args...) | 142 | # define dprintk(format, args...) |
143 | #else | 143 | #else |
144 | # define dprintk(format, args...) printk(KERN_DEBUG PFX ":%s:%d: " format, __FUNCTION__, __LINE__ , ## args) | 144 | # define dprintk(format, args...) printk(KERN_DEBUG PFX ":%s:%d: " format, __func__, __LINE__ , ## args) |
145 | #endif | 145 | #endif |
146 | 146 | ||
147 | 147 | ||
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c index 789831b3fa00..10b89f2703bd 100644 --- a/drivers/watchdog/mtx-1_wdt.c +++ b/drivers/watchdog/mtx-1_wdt.c | |||
@@ -59,9 +59,9 @@ static int ticks = 100 * HZ; | |||
59 | 59 | ||
60 | static struct { | 60 | static struct { |
61 | struct completion stop; | 61 | struct completion stop; |
62 | volatile int running; | 62 | int running; |
63 | struct timer_list timer; | 63 | struct timer_list timer; |
64 | volatile int queue; | 64 | int queue; |
65 | int default_ticks; | 65 | int default_ticks; |
66 | unsigned long inuse; | 66 | unsigned long inuse; |
67 | unsigned gpio; | 67 | unsigned gpio; |
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c index 0f3fd6c9c354..bf443d077a1e 100644 --- a/drivers/watchdog/pcwd_usb.c +++ b/drivers/watchdog/pcwd_usb.c | |||
@@ -179,11 +179,11 @@ static void usb_pcwd_intr_done(struct urb *urb) | |||
179 | case -ENOENT: | 179 | case -ENOENT: |
180 | case -ESHUTDOWN: | 180 | case -ESHUTDOWN: |
181 | /* this urb is terminated, clean up */ | 181 | /* this urb is terminated, clean up */ |
182 | dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status); | 182 | dbg("%s - urb shutting down with status: %d", __func__, urb->status); |
183 | return; | 183 | return; |
184 | /* -EPIPE: should clear the halt */ | 184 | /* -EPIPE: should clear the halt */ |
185 | default: /* error */ | 185 | default: /* error */ |
186 | dbg("%s - nonzero urb status received: %d", __FUNCTION__, urb->status); | 186 | dbg("%s - nonzero urb status received: %d", __func__, urb->status); |
187 | goto resubmit; | 187 | goto resubmit; |
188 | } | 188 | } |
189 | 189 | ||
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c index 5d1c15f83d23..7645e8812156 100644 --- a/drivers/watchdog/s3c2410_wdt.c +++ b/drivers/watchdog/s3c2410_wdt.c | |||
@@ -144,7 +144,7 @@ static int s3c2410wdt_start(void) | |||
144 | } | 144 | } |
145 | 145 | ||
146 | DBG("%s: wdt_count=0x%08x, wtcon=%08lx\n", | 146 | DBG("%s: wdt_count=0x%08x, wtcon=%08lx\n", |
147 | __FUNCTION__, wdt_count, wtcon); | 147 | __func__, wdt_count, wtcon); |
148 | 148 | ||
149 | writel(wdt_count, wdt_base + S3C2410_WTDAT); | 149 | writel(wdt_count, wdt_base + S3C2410_WTDAT); |
150 | writel(wdt_count, wdt_base + S3C2410_WTCNT); | 150 | writel(wdt_count, wdt_base + S3C2410_WTCNT); |
@@ -167,7 +167,7 @@ static int s3c2410wdt_set_heartbeat(int timeout) | |||
167 | count = timeout * freq; | 167 | count = timeout * freq; |
168 | 168 | ||
169 | DBG("%s: count=%d, timeout=%d, freq=%d\n", | 169 | DBG("%s: count=%d, timeout=%d, freq=%d\n", |
170 | __FUNCTION__, count, timeout, freq); | 170 | __func__, count, timeout, freq); |
171 | 171 | ||
172 | /* if the count is bigger than the watchdog register, | 172 | /* if the count is bigger than the watchdog register, |
173 | then work out what we need to do (and if) we can | 173 | then work out what we need to do (and if) we can |
@@ -189,7 +189,7 @@ static int s3c2410wdt_set_heartbeat(int timeout) | |||
189 | tmr_margin = timeout; | 189 | tmr_margin = timeout; |
190 | 190 | ||
191 | DBG("%s: timeout=%d, divisor=%d, count=%d (%08x)\n", | 191 | DBG("%s: timeout=%d, divisor=%d, count=%d (%08x)\n", |
192 | __FUNCTION__, timeout, divisor, count, count/divisor); | 192 | __func__, timeout, divisor, count, count/divisor); |
193 | 193 | ||
194 | count /= divisor; | 194 | count /= divisor; |
195 | wdt_count = count; | 195 | wdt_count = count; |
@@ -355,7 +355,7 @@ static int s3c2410wdt_probe(struct platform_device *pdev) | |||
355 | int ret; | 355 | int ret; |
356 | int size; | 356 | int size; |
357 | 357 | ||
358 | DBG("%s: probe=%p\n", __FUNCTION__, pdev); | 358 | DBG("%s: probe=%p\n", __func__, pdev); |
359 | 359 | ||
360 | dev = &pdev->dev; | 360 | dev = &pdev->dev; |
361 | wdt_dev = &pdev->dev; | 361 | wdt_dev = &pdev->dev; |
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c index 61dde863bd40..1277f7e9cc54 100644 --- a/drivers/watchdog/shwdt.c +++ b/drivers/watchdog/shwdt.c | |||
@@ -298,7 +298,7 @@ static int sh_wdt_mmap(struct file *file, struct vm_area_struct *vma) | |||
298 | if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, | 298 | if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, |
299 | PAGE_SIZE, vma->vm_page_prot)) { | 299 | PAGE_SIZE, vma->vm_page_prot)) { |
300 | printk(KERN_ERR PFX "%s: io_remap_pfn_range failed\n", | 300 | printk(KERN_ERR PFX "%s: io_remap_pfn_range failed\n", |
301 | __FUNCTION__); | 301 | __func__); |
302 | return -EAGAIN; | 302 | return -EAGAIN; |
303 | } | 303 | } |
304 | 304 | ||