diff options
Diffstat (limited to 'drivers')
110 files changed, 3462 insertions, 1327 deletions
diff --git a/drivers/acorn/char/defkeymap-l7200.c b/drivers/acorn/char/defkeymap-l7200.c index 28a5fbc6aa1a..93d80a1c36f9 100644 --- a/drivers/acorn/char/defkeymap-l7200.c +++ b/drivers/acorn/char/defkeymap-l7200.c | |||
@@ -347,40 +347,40 @@ char *func_table[MAX_NR_FUNC] = { | |||
347 | }; | 347 | }; |
348 | 348 | ||
349 | struct kbdiacruc accent_table[MAX_DIACR] = { | 349 | struct kbdiacruc accent_table[MAX_DIACR] = { |
350 | {'`', 'A', '\300'}, {'`', 'a', '\340'}, | 350 | {'`', 'A', 0300}, {'`', 'a', 0340}, |
351 | {'\'', 'A', '\301'}, {'\'', 'a', '\341'}, | 351 | {'\'', 'A', 0301}, {'\'', 'a', 0341}, |
352 | {'^', 'A', '\302'}, {'^', 'a', '\342'}, | 352 | {'^', 'A', 0302}, {'^', 'a', 0342}, |
353 | {'~', 'A', '\303'}, {'~', 'a', '\343'}, | 353 | {'~', 'A', 0303}, {'~', 'a', 0343}, |
354 | {'"', 'A', '\304'}, {'"', 'a', '\344'}, | 354 | {'"', 'A', 0304}, {'"', 'a', 0344}, |
355 | {'O', 'A', '\305'}, {'o', 'a', '\345'}, | 355 | {'O', 'A', 0305}, {'o', 'a', 0345}, |
356 | {'0', 'A', '\305'}, {'0', 'a', '\345'}, | 356 | {'0', 'A', 0305}, {'0', 'a', 0345}, |
357 | {'A', 'A', '\305'}, {'a', 'a', '\345'}, | 357 | {'A', 'A', 0305}, {'a', 'a', 0345}, |
358 | {'A', 'E', '\306'}, {'a', 'e', '\346'}, | 358 | {'A', 'E', 0306}, {'a', 'e', 0346}, |
359 | {',', 'C', '\307'}, {',', 'c', '\347'}, | 359 | {',', 'C', 0307}, {',', 'c', 0347}, |
360 | {'`', 'E', '\310'}, {'`', 'e', '\350'}, | 360 | {'`', 'E', 0310}, {'`', 'e', 0350}, |
361 | {'\'', 'E', '\311'}, {'\'', 'e', '\351'}, | 361 | {'\'', 'E', 0311}, {'\'', 'e', 0351}, |
362 | {'^', 'E', '\312'}, {'^', 'e', '\352'}, | 362 | {'^', 'E', 0312}, {'^', 'e', 0352}, |
363 | {'"', 'E', '\313'}, {'"', 'e', '\353'}, | 363 | {'"', 'E', 0313}, {'"', 'e', 0353}, |
364 | {'`', 'I', '\314'}, {'`', 'i', '\354'}, | 364 | {'`', 'I', 0314}, {'`', 'i', 0354}, |
365 | {'\'', 'I', '\315'}, {'\'', 'i', '\355'}, | 365 | {'\'', 'I', 0315}, {'\'', 'i', 0355}, |
366 | {'^', 'I', '\316'}, {'^', 'i', '\356'}, | 366 | {'^', 'I', 0316}, {'^', 'i', 0356}, |
367 | {'"', 'I', '\317'}, {'"', 'i', '\357'}, | 367 | {'"', 'I', 0317}, {'"', 'i', 0357}, |
368 | {'-', 'D', '\320'}, {'-', 'd', '\360'}, | 368 | {'-', 'D', 0320}, {'-', 'd', 0360}, |
369 | {'~', 'N', '\321'}, {'~', 'n', '\361'}, | 369 | {'~', 'N', 0321}, {'~', 'n', 0361}, |
370 | {'`', 'O', '\322'}, {'`', 'o', '\362'}, | 370 | {'`', 'O', 0322}, {'`', 'o', 0362}, |
371 | {'\'', 'O', '\323'}, {'\'', 'o', '\363'}, | 371 | {'\'', 'O', 0323}, {'\'', 'o', 0363}, |
372 | {'^', 'O', '\324'}, {'^', 'o', '\364'}, | 372 | {'^', 'O', 0324}, {'^', 'o', 0364}, |
373 | {'~', 'O', '\325'}, {'~', 'o', '\365'}, | 373 | {'~', 'O', 0325}, {'~', 'o', 0365}, |
374 | {'"', 'O', '\326'}, {'"', 'o', '\366'}, | 374 | {'"', 'O', 0326}, {'"', 'o', 0366}, |
375 | {'/', 'O', '\330'}, {'/', 'o', '\370'}, | 375 | {'/', 'O', 0330}, {'/', 'o', 0370}, |
376 | {'`', 'U', '\331'}, {'`', 'u', '\371'}, | 376 | {'`', 'U', 0331}, {'`', 'u', 0371}, |
377 | {'\'', 'U', '\332'}, {'\'', 'u', '\372'}, | 377 | {'\'', 'U', 0332}, {'\'', 'u', 0372}, |
378 | {'^', 'U', '\333'}, {'^', 'u', '\373'}, | 378 | {'^', 'U', 0333}, {'^', 'u', 0373}, |
379 | {'"', 'U', '\334'}, {'"', 'u', '\374'}, | 379 | {'"', 'U', 0334}, {'"', 'u', 0374}, |
380 | {'\'', 'Y', '\335'}, {'\'', 'y', '\375'}, | 380 | {'\'', 'Y', 0335}, {'\'', 'y', 0375}, |
381 | {'T', 'H', '\336'}, {'t', 'h', '\376'}, | 381 | {'T', 'H', 0336}, {'t', 'h', 0376}, |
382 | {'s', 's', '\337'}, {'"', 'y', '\377'}, | 382 | {'s', 's', 0337}, {'"', 'y', 0377}, |
383 | {'s', 'z', '\337'}, {'i', 'j', '\377'}, | 383 | {'s', 'z', 0337}, {'i', 'j', 0377}, |
384 | }; | 384 | }; |
385 | 385 | ||
386 | unsigned int accent_table_size = 68; | 386 | unsigned int accent_table_size = 68; |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index fbc24358ada0..4fbcce758b04 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -113,7 +113,7 @@ int atapi_enabled = 1; | |||
113 | module_param(atapi_enabled, int, 0444); | 113 | module_param(atapi_enabled, int, 0444); |
114 | MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); | 114 | MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); |
115 | 115 | ||
116 | int atapi_dmadir = 0; | 116 | static int atapi_dmadir = 0; |
117 | module_param(atapi_dmadir, int, 0444); | 117 | module_param(atapi_dmadir, int, 0444); |
118 | MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)"); | 118 | MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)"); |
119 | 119 | ||
@@ -6567,6 +6567,8 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg) | |||
6567 | ata_lpm_enable(host); | 6567 | ata_lpm_enable(host); |
6568 | 6568 | ||
6569 | rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); | 6569 | rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); |
6570 | if (rc == 0) | ||
6571 | host->dev->power.power_state = mesg; | ||
6570 | return rc; | 6572 | return rc; |
6571 | } | 6573 | } |
6572 | 6574 | ||
@@ -6585,6 +6587,7 @@ void ata_host_resume(struct ata_host *host) | |||
6585 | { | 6587 | { |
6586 | ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET, | 6588 | ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET, |
6587 | ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); | 6589 | ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); |
6590 | host->dev->power.power_state = PMSG_ON; | ||
6588 | 6591 | ||
6589 | /* reenable link pm */ | 6592 | /* reenable link pm */ |
6590 | ata_lpm_disable(host); | 6593 | ata_lpm_disable(host); |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 0562b0a49f3b..8f0e8f2bc628 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -862,9 +862,10 @@ static int ata_scsi_dev_config(struct scsi_device *sdev, | |||
862 | struct request_queue *q = sdev->request_queue; | 862 | struct request_queue *q = sdev->request_queue; |
863 | void *buf; | 863 | void *buf; |
864 | 864 | ||
865 | /* set the min alignment */ | 865 | /* set the min alignment and padding */ |
866 | blk_queue_update_dma_alignment(sdev->request_queue, | 866 | blk_queue_update_dma_alignment(sdev->request_queue, |
867 | ATA_DMA_PAD_SZ - 1); | 867 | ATA_DMA_PAD_SZ - 1); |
868 | blk_queue_dma_pad(sdev->request_queue, ATA_DMA_PAD_SZ - 1); | ||
868 | 869 | ||
869 | /* configure draining */ | 870 | /* configure draining */ |
870 | buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL); | 871 | buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL); |
@@ -1694,12 +1695,17 @@ void ata_scsi_rbuf_fill(struct ata_scsi_args *args, | |||
1694 | u8 *rbuf; | 1695 | u8 *rbuf; |
1695 | unsigned int buflen, rc; | 1696 | unsigned int buflen, rc; |
1696 | struct scsi_cmnd *cmd = args->cmd; | 1697 | struct scsi_cmnd *cmd = args->cmd; |
1698 | unsigned long flags; | ||
1699 | |||
1700 | local_irq_save(flags); | ||
1697 | 1701 | ||
1698 | buflen = ata_scsi_rbuf_get(cmd, &rbuf); | 1702 | buflen = ata_scsi_rbuf_get(cmd, &rbuf); |
1699 | memset(rbuf, 0, buflen); | 1703 | memset(rbuf, 0, buflen); |
1700 | rc = actor(args, rbuf, buflen); | 1704 | rc = actor(args, rbuf, buflen); |
1701 | ata_scsi_rbuf_put(cmd, rbuf); | 1705 | ata_scsi_rbuf_put(cmd, rbuf); |
1702 | 1706 | ||
1707 | local_irq_restore(flags); | ||
1708 | |||
1703 | if (rc == 0) | 1709 | if (rc == 0) |
1704 | cmd->result = SAM_STAT_GOOD; | 1710 | cmd->result = SAM_STAT_GOOD; |
1705 | args->done(cmd); | 1711 | args->done(cmd); |
@@ -2473,6 +2479,9 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) | |||
2473 | if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) { | 2479 | if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) { |
2474 | u8 *buf = NULL; | 2480 | u8 *buf = NULL; |
2475 | unsigned int buflen; | 2481 | unsigned int buflen; |
2482 | unsigned long flags; | ||
2483 | |||
2484 | local_irq_save(flags); | ||
2476 | 2485 | ||
2477 | buflen = ata_scsi_rbuf_get(cmd, &buf); | 2486 | buflen = ata_scsi_rbuf_get(cmd, &buf); |
2478 | 2487 | ||
@@ -2490,6 +2499,8 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) | |||
2490 | } | 2499 | } |
2491 | 2500 | ||
2492 | ata_scsi_rbuf_put(cmd, buf); | 2501 | ata_scsi_rbuf_put(cmd, buf); |
2502 | |||
2503 | local_irq_restore(flags); | ||
2493 | } | 2504 | } |
2494 | 2505 | ||
2495 | cmd->result = SAM_STAT_GOOD; | 2506 | cmd->result = SAM_STAT_GOOD; |
@@ -2528,7 +2539,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) | |||
2528 | } | 2539 | } |
2529 | 2540 | ||
2530 | qc->tf.command = ATA_CMD_PACKET; | 2541 | qc->tf.command = ATA_CMD_PACKET; |
2531 | qc->nbytes = scsi_bufflen(scmd); | 2542 | qc->nbytes = scsi_bufflen(scmd) + scmd->request->extra_len; |
2532 | 2543 | ||
2533 | /* check whether ATAPI DMA is safe */ | 2544 | /* check whether ATAPI DMA is safe */ |
2534 | if (!using_pio && ata_check_atapi_dma(qc)) | 2545 | if (!using_pio && ata_check_atapi_dma(qc)) |
@@ -2539,7 +2550,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) | |||
2539 | * want to set it properly, and for DMA where it is | 2550 | * want to set it properly, and for DMA where it is |
2540 | * effectively meaningless. | 2551 | * effectively meaningless. |
2541 | */ | 2552 | */ |
2542 | nbytes = min(scmd->request->raw_data_len, (unsigned int)63 * 1024); | 2553 | nbytes = min(scmd->request->data_len, (unsigned int)63 * 1024); |
2543 | 2554 | ||
2544 | /* Most ATAPI devices which honor transfer chunk size don't | 2555 | /* Most ATAPI devices which honor transfer chunk size don't |
2545 | * behave according to the spec when odd chunk size which | 2556 | * behave according to the spec when odd chunk size which |
@@ -2865,7 +2876,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) | |||
2865 | * TODO: find out if we need to do more here to | 2876 | * TODO: find out if we need to do more here to |
2866 | * cover scatter/gather case. | 2877 | * cover scatter/gather case. |
2867 | */ | 2878 | */ |
2868 | qc->nbytes = scsi_bufflen(scmd); | 2879 | qc->nbytes = scsi_bufflen(scmd) + scmd->request->extra_len; |
2869 | 2880 | ||
2870 | /* request result TF and be quiet about device error */ | 2881 | /* request result TF and be quiet about device error */ |
2871 | qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; | 2882 | qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; |
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 6036dedfe377..aa884f71a12a 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h | |||
@@ -56,7 +56,6 @@ enum { | |||
56 | extern unsigned int ata_print_id; | 56 | extern unsigned int ata_print_id; |
57 | extern struct workqueue_struct *ata_aux_wq; | 57 | extern struct workqueue_struct *ata_aux_wq; |
58 | extern int atapi_enabled; | 58 | extern int atapi_enabled; |
59 | extern int atapi_dmadir; | ||
60 | extern int atapi_passthru16; | 59 | extern int atapi_passthru16; |
61 | extern int libata_fua; | 60 | extern int libata_fua; |
62 | extern int libata_noacpi; | 61 | extern int libata_noacpi; |
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index 69f651e0bc98..840d1c4a7850 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c | |||
@@ -45,6 +45,8 @@ | |||
45 | #include <linux/interrupt.h> | 45 | #include <linux/interrupt.h> |
46 | #include <linux/device.h> | 46 | #include <linux/device.h> |
47 | #include <scsi/scsi_host.h> | 47 | #include <scsi/scsi_host.h> |
48 | #include <scsi/scsi_cmnd.h> | ||
49 | #include <scsi/scsi.h> | ||
48 | #include <linux/libata.h> | 50 | #include <linux/libata.h> |
49 | 51 | ||
50 | #ifdef CONFIG_PPC_OF | 52 | #ifdef CONFIG_PPC_OF |
@@ -59,6 +61,7 @@ enum { | |||
59 | /* ap->flags bits */ | 61 | /* ap->flags bits */ |
60 | K2_FLAG_SATA_8_PORTS = (1 << 24), | 62 | K2_FLAG_SATA_8_PORTS = (1 << 24), |
61 | K2_FLAG_NO_ATAPI_DMA = (1 << 25), | 63 | K2_FLAG_NO_ATAPI_DMA = (1 << 25), |
64 | K2_FLAG_BAR_POS_3 = (1 << 26), | ||
62 | 65 | ||
63 | /* Taskfile registers offsets */ | 66 | /* Taskfile registers offsets */ |
64 | K2_SATA_TF_CMD_OFFSET = 0x00, | 67 | K2_SATA_TF_CMD_OFFSET = 0x00, |
@@ -88,8 +91,10 @@ enum { | |||
88 | /* Port stride */ | 91 | /* Port stride */ |
89 | K2_SATA_PORT_OFFSET = 0x100, | 92 | K2_SATA_PORT_OFFSET = 0x100, |
90 | 93 | ||
91 | board_svw4 = 0, | 94 | chip_svw4 = 0, |
92 | board_svw8 = 1, | 95 | chip_svw8 = 1, |
96 | chip_svw42 = 2, /* bar 3 */ | ||
97 | chip_svw43 = 3, /* bar 5 */ | ||
93 | }; | 98 | }; |
94 | 99 | ||
95 | static u8 k2_stat_check_status(struct ata_port *ap); | 100 | static u8 k2_stat_check_status(struct ata_port *ap); |
@@ -97,10 +102,25 @@ static u8 k2_stat_check_status(struct ata_port *ap); | |||
97 | 102 | ||
98 | static int k2_sata_check_atapi_dma(struct ata_queued_cmd *qc) | 103 | static int k2_sata_check_atapi_dma(struct ata_queued_cmd *qc) |
99 | { | 104 | { |
105 | u8 cmnd = qc->scsicmd->cmnd[0]; | ||
106 | |||
100 | if (qc->ap->flags & K2_FLAG_NO_ATAPI_DMA) | 107 | if (qc->ap->flags & K2_FLAG_NO_ATAPI_DMA) |
101 | return -1; /* ATAPI DMA not supported */ | 108 | return -1; /* ATAPI DMA not supported */ |
109 | else { | ||
110 | switch (cmnd) { | ||
111 | case READ_10: | ||
112 | case READ_12: | ||
113 | case READ_16: | ||
114 | case WRITE_10: | ||
115 | case WRITE_12: | ||
116 | case WRITE_16: | ||
117 | return 0; | ||
118 | |||
119 | default: | ||
120 | return -1; | ||
121 | } | ||
102 | 122 | ||
103 | return 0; | 123 | } |
104 | } | 124 | } |
105 | 125 | ||
106 | static int k2_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) | 126 | static int k2_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) |
@@ -354,7 +374,7 @@ static const struct ata_port_operations k2_sata_ops = { | |||
354 | }; | 374 | }; |
355 | 375 | ||
356 | static const struct ata_port_info k2_port_info[] = { | 376 | static const struct ata_port_info k2_port_info[] = { |
357 | /* board_svw4 */ | 377 | /* chip_svw4 */ |
358 | { | 378 | { |
359 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 379 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
360 | ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA, | 380 | ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA, |
@@ -363,7 +383,7 @@ static const struct ata_port_info k2_port_info[] = { | |||
363 | .udma_mask = ATA_UDMA6, | 383 | .udma_mask = ATA_UDMA6, |
364 | .port_ops = &k2_sata_ops, | 384 | .port_ops = &k2_sata_ops, |
365 | }, | 385 | }, |
366 | /* board_svw8 */ | 386 | /* chip_svw8 */ |
367 | { | 387 | { |
368 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 388 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
369 | ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA | | 389 | ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA | |
@@ -373,6 +393,24 @@ static const struct ata_port_info k2_port_info[] = { | |||
373 | .udma_mask = ATA_UDMA6, | 393 | .udma_mask = ATA_UDMA6, |
374 | .port_ops = &k2_sata_ops, | 394 | .port_ops = &k2_sata_ops, |
375 | }, | 395 | }, |
396 | /* chip_svw42 */ | ||
397 | { | ||
398 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | ||
399 | ATA_FLAG_MMIO | K2_FLAG_BAR_POS_3, | ||
400 | .pio_mask = 0x1f, | ||
401 | .mwdma_mask = 0x07, | ||
402 | .udma_mask = ATA_UDMA6, | ||
403 | .port_ops = &k2_sata_ops, | ||
404 | }, | ||
405 | /* chip_svw43 */ | ||
406 | { | ||
407 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | ||
408 | ATA_FLAG_MMIO, | ||
409 | .pio_mask = 0x1f, | ||
410 | .mwdma_mask = 0x07, | ||
411 | .udma_mask = ATA_UDMA6, | ||
412 | .port_ops = &k2_sata_ops, | ||
413 | }, | ||
376 | }; | 414 | }; |
377 | 415 | ||
378 | static void k2_sata_setup_port(struct ata_ioports *port, void __iomem *base) | 416 | static void k2_sata_setup_port(struct ata_ioports *port, void __iomem *base) |
@@ -402,7 +440,7 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en | |||
402 | { &k2_port_info[ent->driver_data], NULL }; | 440 | { &k2_port_info[ent->driver_data], NULL }; |
403 | struct ata_host *host; | 441 | struct ata_host *host; |
404 | void __iomem *mmio_base; | 442 | void __iomem *mmio_base; |
405 | int n_ports, i, rc; | 443 | int n_ports, i, rc, bar_pos; |
406 | 444 | ||
407 | if (!printed_version++) | 445 | if (!printed_version++) |
408 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); | 446 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); |
@@ -416,6 +454,9 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en | |||
416 | if (!host) | 454 | if (!host) |
417 | return -ENOMEM; | 455 | return -ENOMEM; |
418 | 456 | ||
457 | bar_pos = 5; | ||
458 | if (ppi[0]->flags & K2_FLAG_BAR_POS_3) | ||
459 | bar_pos = 3; | ||
419 | /* | 460 | /* |
420 | * If this driver happens to only be useful on Apple's K2, then | 461 | * If this driver happens to only be useful on Apple's K2, then |
421 | * we should check that here as it has a normal Serverworks ID | 462 | * we should check that here as it has a normal Serverworks ID |
@@ -428,17 +469,23 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en | |||
428 | * Check if we have resources mapped at all (second function may | 469 | * Check if we have resources mapped at all (second function may |
429 | * have been disabled by firmware) | 470 | * have been disabled by firmware) |
430 | */ | 471 | */ |
431 | if (pci_resource_len(pdev, 5) == 0) | 472 | if (pci_resource_len(pdev, bar_pos) == 0) { |
473 | /* In IDE mode we need to pin the device to ensure that | ||
474 | pcim_release does not clear the busmaster bit in config | ||
475 | space, clearing causes busmaster DMA to fail on | ||
476 | ports 3 & 4 */ | ||
477 | pcim_pin_device(pdev); | ||
432 | return -ENODEV; | 478 | return -ENODEV; |
479 | } | ||
433 | 480 | ||
434 | /* Request and iomap PCI regions */ | 481 | /* Request and iomap PCI regions */ |
435 | rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME); | 482 | rc = pcim_iomap_regions(pdev, 1 << bar_pos, DRV_NAME); |
436 | if (rc == -EBUSY) | 483 | if (rc == -EBUSY) |
437 | pcim_pin_device(pdev); | 484 | pcim_pin_device(pdev); |
438 | if (rc) | 485 | if (rc) |
439 | return rc; | 486 | return rc; |
440 | host->iomap = pcim_iomap_table(pdev); | 487 | host->iomap = pcim_iomap_table(pdev); |
441 | mmio_base = host->iomap[5]; | 488 | mmio_base = host->iomap[bar_pos]; |
442 | 489 | ||
443 | /* different controllers have different number of ports - currently 4 or 8 */ | 490 | /* different controllers have different number of ports - currently 4 or 8 */ |
444 | /* All ports are on the same function. Multi-function device is no | 491 | /* All ports are on the same function. Multi-function device is no |
@@ -483,11 +530,13 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en | |||
483 | * controller | 530 | * controller |
484 | * */ | 531 | * */ |
485 | static const struct pci_device_id k2_sata_pci_tbl[] = { | 532 | static const struct pci_device_id k2_sata_pci_tbl[] = { |
486 | { PCI_VDEVICE(SERVERWORKS, 0x0240), board_svw4 }, | 533 | { PCI_VDEVICE(SERVERWORKS, 0x0240), chip_svw4 }, |
487 | { PCI_VDEVICE(SERVERWORKS, 0x0241), board_svw4 }, | 534 | { PCI_VDEVICE(SERVERWORKS, 0x0241), chip_svw4 }, |
488 | { PCI_VDEVICE(SERVERWORKS, 0x0242), board_svw8 }, | 535 | { PCI_VDEVICE(SERVERWORKS, 0x0242), chip_svw8 }, |
489 | { PCI_VDEVICE(SERVERWORKS, 0x024a), board_svw4 }, | 536 | { PCI_VDEVICE(SERVERWORKS, 0x024a), chip_svw4 }, |
490 | { PCI_VDEVICE(SERVERWORKS, 0x024b), board_svw4 }, | 537 | { PCI_VDEVICE(SERVERWORKS, 0x024b), chip_svw4 }, |
538 | { PCI_VDEVICE(SERVERWORKS, 0x0410), chip_svw42 }, | ||
539 | { PCI_VDEVICE(SERVERWORKS, 0x0411), chip_svw43 }, | ||
491 | 540 | ||
492 | { } | 541 | { } |
493 | }; | 542 | }; |
diff --git a/drivers/base/core.c b/drivers/base/core.c index 9c0070b5bd3e..7de543d1d0b4 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
@@ -621,7 +621,8 @@ static struct kobject *get_device_parent(struct device *dev, | |||
621 | static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) | 621 | static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) |
622 | { | 622 | { |
623 | /* see if we live in a "glue" directory */ | 623 | /* see if we live in a "glue" directory */ |
624 | if (!dev->class || glue_dir->kset != &dev->class->class_dirs) | 624 | if (!glue_dir || !dev->class || |
625 | glue_dir->kset != &dev->class->class_dirs) | ||
625 | return; | 626 | return; |
626 | 627 | ||
627 | kobject_put(glue_dir); | 628 | kobject_put(glue_dir); |
@@ -770,17 +771,10 @@ int device_add(struct device *dev) | |||
770 | struct class_interface *class_intf; | 771 | struct class_interface *class_intf; |
771 | int error; | 772 | int error; |
772 | 773 | ||
773 | error = pm_sleep_lock(); | ||
774 | if (error) { | ||
775 | dev_warn(dev, "Suspicious %s during suspend\n", __FUNCTION__); | ||
776 | dump_stack(); | ||
777 | return error; | ||
778 | } | ||
779 | |||
780 | dev = get_device(dev); | 774 | dev = get_device(dev); |
781 | if (!dev || !strlen(dev->bus_id)) { | 775 | if (!dev || !strlen(dev->bus_id)) { |
782 | error = -EINVAL; | 776 | error = -EINVAL; |
783 | goto Error; | 777 | goto Done; |
784 | } | 778 | } |
785 | 779 | ||
786 | pr_debug("device: '%s': %s\n", dev->bus_id, __FUNCTION__); | 780 | pr_debug("device: '%s': %s\n", dev->bus_id, __FUNCTION__); |
@@ -843,11 +837,9 @@ int device_add(struct device *dev) | |||
843 | } | 837 | } |
844 | Done: | 838 | Done: |
845 | put_device(dev); | 839 | put_device(dev); |
846 | pm_sleep_unlock(); | ||
847 | return error; | 840 | return error; |
848 | BusError: | 841 | BusError: |
849 | device_pm_remove(dev); | 842 | device_pm_remove(dev); |
850 | dpm_sysfs_remove(dev); | ||
851 | PMError: | 843 | PMError: |
852 | if (dev->bus) | 844 | if (dev->bus) |
853 | blocking_notifier_call_chain(&dev->bus->p->bus_notifier, | 845 | blocking_notifier_call_chain(&dev->bus->p->bus_notifier, |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index ee9d1c8db0d6..d887d5cb5bef 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -48,7 +48,6 @@ | |||
48 | */ | 48 | */ |
49 | 49 | ||
50 | LIST_HEAD(dpm_active); | 50 | LIST_HEAD(dpm_active); |
51 | static LIST_HEAD(dpm_locked); | ||
52 | static LIST_HEAD(dpm_off); | 51 | static LIST_HEAD(dpm_off); |
53 | static LIST_HEAD(dpm_off_irq); | 52 | static LIST_HEAD(dpm_off_irq); |
54 | static LIST_HEAD(dpm_destroy); | 53 | static LIST_HEAD(dpm_destroy); |
@@ -81,28 +80,6 @@ void device_pm_add(struct device *dev) | |||
81 | */ | 80 | */ |
82 | void device_pm_remove(struct device *dev) | 81 | void device_pm_remove(struct device *dev) |
83 | { | 82 | { |
84 | /* | ||
85 | * If this function is called during a suspend, it will be blocked, | ||
86 | * because we're holding the device's semaphore at that time, which may | ||
87 | * lead to a deadlock. In that case we want to print a warning. | ||
88 | * However, it may also be called by unregister_dropped_devices() with | ||
89 | * the device's semaphore released, in which case the warning should | ||
90 | * not be printed. | ||
91 | */ | ||
92 | if (down_trylock(&dev->sem)) { | ||
93 | if (down_read_trylock(&pm_sleep_rwsem)) { | ||
94 | /* No suspend in progress, wait on dev->sem */ | ||
95 | down(&dev->sem); | ||
96 | up_read(&pm_sleep_rwsem); | ||
97 | } else { | ||
98 | /* Suspend in progress, we may deadlock */ | ||
99 | dev_warn(dev, "Suspicious %s during suspend\n", | ||
100 | __FUNCTION__); | ||
101 | dump_stack(); | ||
102 | /* The user has been warned ... */ | ||
103 | down(&dev->sem); | ||
104 | } | ||
105 | } | ||
106 | pr_debug("PM: Removing info for %s:%s\n", | 83 | pr_debug("PM: Removing info for %s:%s\n", |
107 | dev->bus ? dev->bus->name : "No Bus", | 84 | dev->bus ? dev->bus->name : "No Bus", |
108 | kobject_name(&dev->kobj)); | 85 | kobject_name(&dev->kobj)); |
@@ -110,7 +87,6 @@ void device_pm_remove(struct device *dev) | |||
110 | dpm_sysfs_remove(dev); | 87 | dpm_sysfs_remove(dev); |
111 | list_del_init(&dev->power.entry); | 88 | list_del_init(&dev->power.entry); |
112 | mutex_unlock(&dpm_list_mtx); | 89 | mutex_unlock(&dpm_list_mtx); |
113 | up(&dev->sem); | ||
114 | } | 90 | } |
115 | 91 | ||
116 | /** | 92 | /** |
@@ -230,6 +206,8 @@ static int resume_device(struct device *dev) | |||
230 | TRACE_DEVICE(dev); | 206 | TRACE_DEVICE(dev); |
231 | TRACE_RESUME(0); | 207 | TRACE_RESUME(0); |
232 | 208 | ||
209 | down(&dev->sem); | ||
210 | |||
233 | if (dev->bus && dev->bus->resume) { | 211 | if (dev->bus && dev->bus->resume) { |
234 | dev_dbg(dev,"resuming\n"); | 212 | dev_dbg(dev,"resuming\n"); |
235 | error = dev->bus->resume(dev); | 213 | error = dev->bus->resume(dev); |
@@ -245,6 +223,8 @@ static int resume_device(struct device *dev) | |||
245 | error = dev->class->resume(dev); | 223 | error = dev->class->resume(dev); |
246 | } | 224 | } |
247 | 225 | ||
226 | up(&dev->sem); | ||
227 | |||
248 | TRACE_RESUME(error); | 228 | TRACE_RESUME(error); |
249 | return error; | 229 | return error; |
250 | } | 230 | } |
@@ -266,7 +246,7 @@ static void dpm_resume(void) | |||
266 | struct list_head *entry = dpm_off.next; | 246 | struct list_head *entry = dpm_off.next; |
267 | struct device *dev = to_device(entry); | 247 | struct device *dev = to_device(entry); |
268 | 248 | ||
269 | list_move_tail(entry, &dpm_locked); | 249 | list_move_tail(entry, &dpm_active); |
270 | mutex_unlock(&dpm_list_mtx); | 250 | mutex_unlock(&dpm_list_mtx); |
271 | resume_device(dev); | 251 | resume_device(dev); |
272 | mutex_lock(&dpm_list_mtx); | 252 | mutex_lock(&dpm_list_mtx); |
@@ -275,25 +255,6 @@ static void dpm_resume(void) | |||
275 | } | 255 | } |
276 | 256 | ||
277 | /** | 257 | /** |
278 | * unlock_all_devices - Release each device's semaphore | ||
279 | * | ||
280 | * Go through the dpm_off list. Put each device on the dpm_active | ||
281 | * list and unlock it. | ||
282 | */ | ||
283 | static void unlock_all_devices(void) | ||
284 | { | ||
285 | mutex_lock(&dpm_list_mtx); | ||
286 | while (!list_empty(&dpm_locked)) { | ||
287 | struct list_head *entry = dpm_locked.prev; | ||
288 | struct device *dev = to_device(entry); | ||
289 | |||
290 | list_move(entry, &dpm_active); | ||
291 | up(&dev->sem); | ||
292 | } | ||
293 | mutex_unlock(&dpm_list_mtx); | ||
294 | } | ||
295 | |||
296 | /** | ||
297 | * unregister_dropped_devices - Unregister devices scheduled for removal | 258 | * unregister_dropped_devices - Unregister devices scheduled for removal |
298 | * | 259 | * |
299 | * Unregister all devices on the dpm_destroy list. | 260 | * Unregister all devices on the dpm_destroy list. |
@@ -305,7 +266,6 @@ static void unregister_dropped_devices(void) | |||
305 | struct list_head *entry = dpm_destroy.next; | 266 | struct list_head *entry = dpm_destroy.next; |
306 | struct device *dev = to_device(entry); | 267 | struct device *dev = to_device(entry); |
307 | 268 | ||
308 | up(&dev->sem); | ||
309 | mutex_unlock(&dpm_list_mtx); | 269 | mutex_unlock(&dpm_list_mtx); |
310 | /* This also removes the device from the list */ | 270 | /* This also removes the device from the list */ |
311 | device_unregister(dev); | 271 | device_unregister(dev); |
@@ -324,7 +284,6 @@ void device_resume(void) | |||
324 | { | 284 | { |
325 | might_sleep(); | 285 | might_sleep(); |
326 | dpm_resume(); | 286 | dpm_resume(); |
327 | unlock_all_devices(); | ||
328 | unregister_dropped_devices(); | 287 | unregister_dropped_devices(); |
329 | up_write(&pm_sleep_rwsem); | 288 | up_write(&pm_sleep_rwsem); |
330 | } | 289 | } |
@@ -388,18 +347,15 @@ int device_power_down(pm_message_t state) | |||
388 | struct list_head *entry = dpm_off.prev; | 347 | struct list_head *entry = dpm_off.prev; |
389 | struct device *dev = to_device(entry); | 348 | struct device *dev = to_device(entry); |
390 | 349 | ||
391 | list_del_init(&dev->power.entry); | ||
392 | error = suspend_device_late(dev, state); | 350 | error = suspend_device_late(dev, state); |
393 | if (error) { | 351 | if (error) { |
394 | printk(KERN_ERR "Could not power down device %s: " | 352 | printk(KERN_ERR "Could not power down device %s: " |
395 | "error %d\n", | 353 | "error %d\n", |
396 | kobject_name(&dev->kobj), error); | 354 | kobject_name(&dev->kobj), error); |
397 | if (list_empty(&dev->power.entry)) | ||
398 | list_add(&dev->power.entry, &dpm_off); | ||
399 | break; | 355 | break; |
400 | } | 356 | } |
401 | if (list_empty(&dev->power.entry)) | 357 | if (!list_empty(&dev->power.entry)) |
402 | list_add(&dev->power.entry, &dpm_off_irq); | 358 | list_move(&dev->power.entry, &dpm_off_irq); |
403 | } | 359 | } |
404 | 360 | ||
405 | if (!error) | 361 | if (!error) |
@@ -419,6 +375,8 @@ static int suspend_device(struct device *dev, pm_message_t state) | |||
419 | { | 375 | { |
420 | int error = 0; | 376 | int error = 0; |
421 | 377 | ||
378 | down(&dev->sem); | ||
379 | |||
422 | if (dev->power.power_state.event) { | 380 | if (dev->power.power_state.event) { |
423 | dev_dbg(dev, "PM: suspend %d-->%d\n", | 381 | dev_dbg(dev, "PM: suspend %d-->%d\n", |
424 | dev->power.power_state.event, state.event); | 382 | dev->power.power_state.event, state.event); |
@@ -441,6 +399,9 @@ static int suspend_device(struct device *dev, pm_message_t state) | |||
441 | error = dev->bus->suspend(dev, state); | 399 | error = dev->bus->suspend(dev, state); |
442 | suspend_report_result(dev->bus->suspend, error); | 400 | suspend_report_result(dev->bus->suspend, error); |
443 | } | 401 | } |
402 | |||
403 | up(&dev->sem); | ||
404 | |||
444 | return error; | 405 | return error; |
445 | } | 406 | } |
446 | 407 | ||
@@ -461,13 +422,13 @@ static int dpm_suspend(pm_message_t state) | |||
461 | int error = 0; | 422 | int error = 0; |
462 | 423 | ||
463 | mutex_lock(&dpm_list_mtx); | 424 | mutex_lock(&dpm_list_mtx); |
464 | while (!list_empty(&dpm_locked)) { | 425 | while (!list_empty(&dpm_active)) { |
465 | struct list_head *entry = dpm_locked.prev; | 426 | struct list_head *entry = dpm_active.prev; |
466 | struct device *dev = to_device(entry); | 427 | struct device *dev = to_device(entry); |
467 | 428 | ||
468 | list_del_init(&dev->power.entry); | ||
469 | mutex_unlock(&dpm_list_mtx); | 429 | mutex_unlock(&dpm_list_mtx); |
470 | error = suspend_device(dev, state); | 430 | error = suspend_device(dev, state); |
431 | mutex_lock(&dpm_list_mtx); | ||
471 | if (error) { | 432 | if (error) { |
472 | printk(KERN_ERR "Could not suspend device %s: " | 433 | printk(KERN_ERR "Could not suspend device %s: " |
473 | "error %d%s\n", | 434 | "error %d%s\n", |
@@ -476,14 +437,10 @@ static int dpm_suspend(pm_message_t state) | |||
476 | (error == -EAGAIN ? | 437 | (error == -EAGAIN ? |
477 | " (please convert to suspend_late)" : | 438 | " (please convert to suspend_late)" : |
478 | "")); | 439 | "")); |
479 | mutex_lock(&dpm_list_mtx); | ||
480 | if (list_empty(&dev->power.entry)) | ||
481 | list_add(&dev->power.entry, &dpm_locked); | ||
482 | break; | 440 | break; |
483 | } | 441 | } |
484 | mutex_lock(&dpm_list_mtx); | 442 | if (!list_empty(&dev->power.entry)) |
485 | if (list_empty(&dev->power.entry)) | 443 | list_move(&dev->power.entry, &dpm_off); |
486 | list_add(&dev->power.entry, &dpm_off); | ||
487 | } | 444 | } |
488 | mutex_unlock(&dpm_list_mtx); | 445 | mutex_unlock(&dpm_list_mtx); |
489 | 446 | ||
@@ -491,36 +448,6 @@ static int dpm_suspend(pm_message_t state) | |||
491 | } | 448 | } |
492 | 449 | ||
493 | /** | 450 | /** |
494 | * lock_all_devices - Acquire every device's semaphore | ||
495 | * | ||
496 | * Go through the dpm_active list. Carefully lock each device's | ||
497 | * semaphore and put it in on the dpm_locked list. | ||
498 | */ | ||
499 | static void lock_all_devices(void) | ||
500 | { | ||
501 | mutex_lock(&dpm_list_mtx); | ||
502 | while (!list_empty(&dpm_active)) { | ||
503 | struct list_head *entry = dpm_active.next; | ||
504 | struct device *dev = to_device(entry); | ||
505 | |||
506 | /* Required locking order is dev->sem first, | ||
507 | * then dpm_list_mutex. Hence this awkward code. | ||
508 | */ | ||
509 | get_device(dev); | ||
510 | mutex_unlock(&dpm_list_mtx); | ||
511 | down(&dev->sem); | ||
512 | mutex_lock(&dpm_list_mtx); | ||
513 | |||
514 | if (list_empty(entry)) | ||
515 | up(&dev->sem); /* Device was removed */ | ||
516 | else | ||
517 | list_move_tail(entry, &dpm_locked); | ||
518 | put_device(dev); | ||
519 | } | ||
520 | mutex_unlock(&dpm_list_mtx); | ||
521 | } | ||
522 | |||
523 | /** | ||
524 | * device_suspend - Save state and stop all devices in system. | 451 | * device_suspend - Save state and stop all devices in system. |
525 | * @state: new power management state | 452 | * @state: new power management state |
526 | * | 453 | * |
@@ -533,7 +460,6 @@ int device_suspend(pm_message_t state) | |||
533 | 460 | ||
534 | might_sleep(); | 461 | might_sleep(); |
535 | down_write(&pm_sleep_rwsem); | 462 | down_write(&pm_sleep_rwsem); |
536 | lock_all_devices(); | ||
537 | error = dpm_suspend(state); | 463 | error = dpm_suspend(state); |
538 | if (error) | 464 | if (error) |
539 | device_resume(); | 465 | device_resume(); |
diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c index f25e7c6b2d27..40bca48abc12 100644 --- a/drivers/base/transport_class.c +++ b/drivers/base/transport_class.c | |||
@@ -126,9 +126,7 @@ static int transport_setup_classdev(struct attribute_container *cont, | |||
126 | } | 126 | } |
127 | 127 | ||
128 | /** | 128 | /** |
129 | * transport_setup_device - declare a new dev for transport class association | 129 | * transport_setup_device - declare a new dev for transport class association but don't make it visible yet. |
130 | * but don't make it visible yet. | ||
131 | * | ||
132 | * @dev: the generic device representing the entity being added | 130 | * @dev: the generic device representing the entity being added |
133 | * | 131 | * |
134 | * Usually, dev represents some component in the HBA system (either | 132 | * Usually, dev represents some component in the HBA system (either |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 9715be3f2487..55bd35c0f082 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/blkpg.h> | 33 | #include <linux/blkpg.h> |
34 | #include <linux/timer.h> | 34 | #include <linux/timer.h> |
35 | #include <linux/proc_fs.h> | 35 | #include <linux/proc_fs.h> |
36 | #include <linux/seq_file.h> | ||
36 | #include <linux/init.h> | 37 | #include <linux/init.h> |
37 | #include <linux/hdreg.h> | 38 | #include <linux/hdreg.h> |
38 | #include <linux/spinlock.h> | 39 | #include <linux/spinlock.h> |
@@ -131,7 +132,6 @@ static struct board_type products[] = { | |||
131 | /*define how many times we will try a command because of bus resets */ | 132 | /*define how many times we will try a command because of bus resets */ |
132 | #define MAX_CMD_RETRIES 3 | 133 | #define MAX_CMD_RETRIES 3 |
133 | 134 | ||
134 | #define READ_AHEAD 1024 | ||
135 | #define MAX_CTLR 32 | 135 | #define MAX_CTLR 32 |
136 | 136 | ||
137 | /* Originally cciss driver only supports 8 major numbers */ | 137 | /* Originally cciss driver only supports 8 major numbers */ |
@@ -174,8 +174,6 @@ static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size, | |||
174 | static void fail_all_cmds(unsigned long ctlr); | 174 | static void fail_all_cmds(unsigned long ctlr); |
175 | 175 | ||
176 | #ifdef CONFIG_PROC_FS | 176 | #ifdef CONFIG_PROC_FS |
177 | static int cciss_proc_get_info(char *buffer, char **start, off_t offset, | ||
178 | int length, int *eof, void *data); | ||
179 | static void cciss_procinit(int i); | 177 | static void cciss_procinit(int i); |
180 | #else | 178 | #else |
181 | static void cciss_procinit(int i) | 179 | static void cciss_procinit(int i) |
@@ -240,24 +238,46 @@ static inline CommandList_struct *removeQ(CommandList_struct **Qptr, | |||
240 | */ | 238 | */ |
241 | #define ENG_GIG 1000000000 | 239 | #define ENG_GIG 1000000000 |
242 | #define ENG_GIG_FACTOR (ENG_GIG/512) | 240 | #define ENG_GIG_FACTOR (ENG_GIG/512) |
241 | #define ENGAGE_SCSI "engage scsi" | ||
243 | static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", | 242 | static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", |
244 | "UNKNOWN" | 243 | "UNKNOWN" |
245 | }; | 244 | }; |
246 | 245 | ||
247 | static struct proc_dir_entry *proc_cciss; | 246 | static struct proc_dir_entry *proc_cciss; |
248 | 247 | ||
249 | static int cciss_proc_get_info(char *buffer, char **start, off_t offset, | 248 | static void cciss_seq_show_header(struct seq_file *seq) |
250 | int length, int *eof, void *data) | ||
251 | { | 249 | { |
252 | off_t pos = 0; | 250 | ctlr_info_t *h = seq->private; |
253 | off_t len = 0; | 251 | |
254 | int size, i, ctlr; | 252 | seq_printf(seq, "%s: HP %s Controller\n" |
255 | ctlr_info_t *h = (ctlr_info_t *) data; | 253 | "Board ID: 0x%08lx\n" |
256 | drive_info_struct *drv; | 254 | "Firmware Version: %c%c%c%c\n" |
257 | unsigned long flags; | 255 | "IRQ: %d\n" |
258 | sector_t vol_sz, vol_sz_frac; | 256 | "Logical drives: %d\n" |
257 | "Current Q depth: %d\n" | ||
258 | "Current # commands on controller: %d\n" | ||
259 | "Max Q depth since init: %d\n" | ||
260 | "Max # commands on controller since init: %d\n" | ||
261 | "Max SG entries since init: %d\n", | ||
262 | h->devname, | ||
263 | h->product_name, | ||
264 | (unsigned long)h->board_id, | ||
265 | h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], | ||
266 | h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT], | ||
267 | h->num_luns, | ||
268 | h->Qdepth, h->commands_outstanding, | ||
269 | h->maxQsinceinit, h->max_outstanding, h->maxSG); | ||
259 | 270 | ||
260 | ctlr = h->ctlr; | 271 | #ifdef CONFIG_CISS_SCSI_TAPE |
272 | cciss_seq_tape_report(seq, h->ctlr); | ||
273 | #endif /* CONFIG_CISS_SCSI_TAPE */ | ||
274 | } | ||
275 | |||
276 | static void *cciss_seq_start(struct seq_file *seq, loff_t *pos) | ||
277 | { | ||
278 | ctlr_info_t *h = seq->private; | ||
279 | unsigned ctlr = h->ctlr; | ||
280 | unsigned long flags; | ||
261 | 281 | ||
262 | /* prevent displaying bogus info during configuration | 282 | /* prevent displaying bogus info during configuration |
263 | * or deconfiguration of a logical volume | 283 | * or deconfiguration of a logical volume |
@@ -265,115 +285,155 @@ static int cciss_proc_get_info(char *buffer, char **start, off_t offset, | |||
265 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); | 285 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); |
266 | if (h->busy_configuring) { | 286 | if (h->busy_configuring) { |
267 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); | 287 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); |
268 | return -EBUSY; | 288 | return ERR_PTR(-EBUSY); |
269 | } | 289 | } |
270 | h->busy_configuring = 1; | 290 | h->busy_configuring = 1; |
271 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); | 291 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); |
272 | 292 | ||
273 | size = sprintf(buffer, "%s: HP %s Controller\n" | 293 | if (*pos == 0) |
274 | "Board ID: 0x%08lx\n" | 294 | cciss_seq_show_header(seq); |
275 | "Firmware Version: %c%c%c%c\n" | ||
276 | "IRQ: %d\n" | ||
277 | "Logical drives: %d\n" | ||
278 | "Max sectors: %d\n" | ||
279 | "Current Q depth: %d\n" | ||
280 | "Current # commands on controller: %d\n" | ||
281 | "Max Q depth since init: %d\n" | ||
282 | "Max # commands on controller since init: %d\n" | ||
283 | "Max SG entries since init: %d\n\n", | ||
284 | h->devname, | ||
285 | h->product_name, | ||
286 | (unsigned long)h->board_id, | ||
287 | h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], | ||
288 | h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT], | ||
289 | h->num_luns, | ||
290 | h->cciss_max_sectors, | ||
291 | h->Qdepth, h->commands_outstanding, | ||
292 | h->maxQsinceinit, h->max_outstanding, h->maxSG); | ||
293 | |||
294 | pos += size; | ||
295 | len += size; | ||
296 | cciss_proc_tape_report(ctlr, buffer, &pos, &len); | ||
297 | for (i = 0; i <= h->highest_lun; i++) { | ||
298 | |||
299 | drv = &h->drv[i]; | ||
300 | if (drv->heads == 0) | ||
301 | continue; | ||
302 | 295 | ||
303 | vol_sz = drv->nr_blocks; | 296 | return pos; |
304 | vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR); | 297 | } |
305 | vol_sz_frac *= 100; | 298 | |
306 | sector_div(vol_sz_frac, ENG_GIG_FACTOR); | 299 | static int cciss_seq_show(struct seq_file *seq, void *v) |
300 | { | ||
301 | sector_t vol_sz, vol_sz_frac; | ||
302 | ctlr_info_t *h = seq->private; | ||
303 | unsigned ctlr = h->ctlr; | ||
304 | loff_t *pos = v; | ||
305 | drive_info_struct *drv = &h->drv[*pos]; | ||
306 | |||
307 | if (*pos > h->highest_lun) | ||
308 | return 0; | ||
309 | |||
310 | if (drv->heads == 0) | ||
311 | return 0; | ||
312 | |||
313 | vol_sz = drv->nr_blocks; | ||
314 | vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR); | ||
315 | vol_sz_frac *= 100; | ||
316 | sector_div(vol_sz_frac, ENG_GIG_FACTOR); | ||
317 | |||
318 | if (drv->raid_level > 5) | ||
319 | drv->raid_level = RAID_UNKNOWN; | ||
320 | seq_printf(seq, "cciss/c%dd%d:" | ||
321 | "\t%4u.%02uGB\tRAID %s\n", | ||
322 | ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac, | ||
323 | raid_label[drv->raid_level]); | ||
324 | return 0; | ||
325 | } | ||
326 | |||
327 | static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
328 | { | ||
329 | ctlr_info_t *h = seq->private; | ||
330 | |||
331 | if (*pos > h->highest_lun) | ||
332 | return NULL; | ||
333 | *pos += 1; | ||
334 | |||
335 | return pos; | ||
336 | } | ||
337 | |||
338 | static void cciss_seq_stop(struct seq_file *seq, void *v) | ||
339 | { | ||
340 | ctlr_info_t *h = seq->private; | ||
341 | |||
342 | /* Only reset h->busy_configuring if we succeeded in setting | ||
343 | * it during cciss_seq_start. */ | ||
344 | if (v == ERR_PTR(-EBUSY)) | ||
345 | return; | ||
307 | 346 | ||
308 | if (drv->raid_level > 5) | ||
309 | drv->raid_level = RAID_UNKNOWN; | ||
310 | size = sprintf(buffer + len, "cciss/c%dd%d:" | ||
311 | "\t%4u.%02uGB\tRAID %s\n", | ||
312 | ctlr, i, (int)vol_sz, (int)vol_sz_frac, | ||
313 | raid_label[drv->raid_level]); | ||
314 | pos += size; | ||
315 | len += size; | ||
316 | } | ||
317 | |||
318 | *eof = 1; | ||
319 | *start = buffer + offset; | ||
320 | len -= offset; | ||
321 | if (len > length) | ||
322 | len = length; | ||
323 | h->busy_configuring = 0; | 347 | h->busy_configuring = 0; |
324 | return len; | ||
325 | } | 348 | } |
326 | 349 | ||
327 | static int | 350 | static struct seq_operations cciss_seq_ops = { |
328 | cciss_proc_write(struct file *file, const char __user *buffer, | 351 | .start = cciss_seq_start, |
329 | unsigned long count, void *data) | 352 | .show = cciss_seq_show, |
353 | .next = cciss_seq_next, | ||
354 | .stop = cciss_seq_stop, | ||
355 | }; | ||
356 | |||
357 | static int cciss_seq_open(struct inode *inode, struct file *file) | ||
330 | { | 358 | { |
331 | unsigned char cmd[80]; | 359 | int ret = seq_open(file, &cciss_seq_ops); |
332 | int len; | 360 | struct seq_file *seq = file->private_data; |
333 | #ifdef CONFIG_CISS_SCSI_TAPE | 361 | |
334 | ctlr_info_t *h = (ctlr_info_t *) data; | 362 | if (!ret) |
335 | int rc; | 363 | seq->private = PDE(inode)->data; |
364 | |||
365 | return ret; | ||
366 | } | ||
367 | |||
368 | static ssize_t | ||
369 | cciss_proc_write(struct file *file, const char __user *buf, | ||
370 | size_t length, loff_t *ppos) | ||
371 | { | ||
372 | int err; | ||
373 | char *buffer; | ||
374 | |||
375 | #ifndef CONFIG_CISS_SCSI_TAPE | ||
376 | return -EINVAL; | ||
336 | #endif | 377 | #endif |
337 | 378 | ||
338 | if (count > sizeof(cmd) - 1) | 379 | if (!buf || length > PAGE_SIZE - 1) |
339 | return -EINVAL; | 380 | return -EINVAL; |
340 | if (copy_from_user(cmd, buffer, count)) | 381 | |
341 | return -EFAULT; | 382 | buffer = (char *)__get_free_page(GFP_KERNEL); |
342 | cmd[count] = '\0'; | 383 | if (!buffer) |
343 | len = strlen(cmd); // above 3 lines ensure safety | 384 | return -ENOMEM; |
344 | if (len && cmd[len - 1] == '\n') | 385 | |
345 | cmd[--len] = '\0'; | 386 | err = -EFAULT; |
346 | # ifdef CONFIG_CISS_SCSI_TAPE | 387 | if (copy_from_user(buffer, buf, length)) |
347 | if (strcmp("engage scsi", cmd) == 0) { | 388 | goto out; |
389 | buffer[length] = '\0'; | ||
390 | |||
391 | #ifdef CONFIG_CISS_SCSI_TAPE | ||
392 | if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) { | ||
393 | struct seq_file *seq = file->private_data; | ||
394 | ctlr_info_t *h = seq->private; | ||
395 | int rc; | ||
396 | |||
348 | rc = cciss_engage_scsi(h->ctlr); | 397 | rc = cciss_engage_scsi(h->ctlr); |
349 | if (rc != 0) | 398 | if (rc != 0) |
350 | return -rc; | 399 | err = -rc; |
351 | return count; | 400 | else |
352 | } | 401 | err = length; |
402 | } else | ||
403 | #endif /* CONFIG_CISS_SCSI_TAPE */ | ||
404 | err = -EINVAL; | ||
353 | /* might be nice to have "disengage" too, but it's not | 405 | /* might be nice to have "disengage" too, but it's not |
354 | safely possible. (only 1 module use count, lock issues.) */ | 406 | safely possible. (only 1 module use count, lock issues.) */ |
355 | # endif | 407 | |
356 | return -EINVAL; | 408 | out: |
409 | free_page((unsigned long)buffer); | ||
410 | return err; | ||
357 | } | 411 | } |
358 | 412 | ||
359 | /* | 413 | static struct file_operations cciss_proc_fops = { |
360 | * Get us a file in /proc/cciss that says something about each controller. | 414 | .owner = THIS_MODULE, |
361 | * Create /proc/cciss if it doesn't exist yet. | 415 | .open = cciss_seq_open, |
362 | */ | 416 | .read = seq_read, |
417 | .llseek = seq_lseek, | ||
418 | .release = seq_release, | ||
419 | .write = cciss_proc_write, | ||
420 | }; | ||
421 | |||
363 | static void __devinit cciss_procinit(int i) | 422 | static void __devinit cciss_procinit(int i) |
364 | { | 423 | { |
365 | struct proc_dir_entry *pde; | 424 | struct proc_dir_entry *pde; |
366 | 425 | ||
367 | if (proc_cciss == NULL) { | 426 | if (proc_cciss == NULL) |
368 | proc_cciss = proc_mkdir("cciss", proc_root_driver); | 427 | proc_cciss = proc_mkdir("cciss", proc_root_driver); |
369 | if (!proc_cciss) | 428 | if (!proc_cciss) |
370 | return; | 429 | return; |
371 | } | 430 | pde = proc_create(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP | |
431 | S_IROTH, proc_cciss, | ||
432 | &cciss_proc_fops); | ||
433 | if (!pde) | ||
434 | return; | ||
372 | 435 | ||
373 | pde = create_proc_read_entry(hba[i]->devname, | 436 | pde->data = hba[i]; |
374 | S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH, | ||
375 | proc_cciss, cciss_proc_get_info, hba[i]); | ||
376 | pde->write_proc = cciss_proc_write; | ||
377 | } | 437 | } |
378 | #endif /* CONFIG_PROC_FS */ | 438 | #endif /* CONFIG_PROC_FS */ |
379 | 439 | ||
@@ -1341,7 +1401,6 @@ geo_inq: | |||
1341 | disk->private_data = &h->drv[drv_index]; | 1401 | disk->private_data = &h->drv[drv_index]; |
1342 | 1402 | ||
1343 | /* Set up queue information */ | 1403 | /* Set up queue information */ |
1344 | disk->queue->backing_dev_info.ra_pages = READ_AHEAD; | ||
1345 | blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask); | 1404 | blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask); |
1346 | 1405 | ||
1347 | /* This is a hardware imposed limit. */ | 1406 | /* This is a hardware imposed limit. */ |
@@ -3434,7 +3493,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
3434 | } | 3493 | } |
3435 | drv->queue = q; | 3494 | drv->queue = q; |
3436 | 3495 | ||
3437 | q->backing_dev_info.ra_pages = READ_AHEAD; | ||
3438 | blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask); | 3496 | blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask); |
3439 | 3497 | ||
3440 | /* This is a hardware imposed limit. */ | 3498 | /* This is a hardware imposed limit. */ |
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index 55178e9973a0..45ac09300eb3 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c | |||
@@ -1404,21 +1404,18 @@ cciss_engage_scsi(int ctlr) | |||
1404 | } | 1404 | } |
1405 | 1405 | ||
1406 | static void | 1406 | static void |
1407 | cciss_proc_tape_report(int ctlr, unsigned char *buffer, off_t *pos, off_t *len) | 1407 | cciss_seq_tape_report(struct seq_file *seq, int ctlr) |
1408 | { | 1408 | { |
1409 | unsigned long flags; | 1409 | unsigned long flags; |
1410 | int size; | ||
1411 | |||
1412 | *pos = *pos -1; *len = *len - 1; // cut off the last trailing newline | ||
1413 | 1410 | ||
1414 | CPQ_TAPE_LOCK(ctlr, flags); | 1411 | CPQ_TAPE_LOCK(ctlr, flags); |
1415 | size = sprintf(buffer + *len, | 1412 | seq_printf(seq, |
1416 | "Sequential access devices: %d\n\n", | 1413 | "Sequential access devices: %d\n\n", |
1417 | ccissscsi[ctlr].ndevices); | 1414 | ccissscsi[ctlr].ndevices); |
1418 | CPQ_TAPE_UNLOCK(ctlr, flags); | 1415 | CPQ_TAPE_UNLOCK(ctlr, flags); |
1419 | *pos += size; *len += size; | ||
1420 | } | 1416 | } |
1421 | 1417 | ||
1418 | |||
1422 | /* Need at least one of these error handlers to keep ../scsi/hosts.c from | 1419 | /* Need at least one of these error handlers to keep ../scsi/hosts.c from |
1423 | * complaining. Doing a host- or bus-reset can't do anything good here. | 1420 | * complaining. Doing a host- or bus-reset can't do anything good here. |
1424 | * Despite what it might say in scsi_error.c, there may well be commands | 1421 | * Despite what it might say in scsi_error.c, there may well be commands |
@@ -1498,6 +1495,5 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd) | |||
1498 | #define cciss_scsi_setup(cntl_num) | 1495 | #define cciss_scsi_setup(cntl_num) |
1499 | #define cciss_unregister_scsi(ctlr) | 1496 | #define cciss_unregister_scsi(ctlr) |
1500 | #define cciss_register_scsi(ctlr) | 1497 | #define cciss_register_scsi(ctlr) |
1501 | #define cciss_proc_tape_report(ctlr, buffer, pos, len) | ||
1502 | 1498 | ||
1503 | #endif /* CONFIG_CISS_SCSI_TAPE */ | 1499 | #endif /* CONFIG_CISS_SCSI_TAPE */ |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 674cd66dcaba..18feb1c7c33b 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -849,7 +849,8 @@ static int pkt_flush_cache(struct pktcdvd_device *pd) | |||
849 | /* | 849 | /* |
850 | * speed is given as the normal factor, e.g. 4 for 4x | 850 | * speed is given as the normal factor, e.g. 4 for 4x |
851 | */ | 851 | */ |
852 | static int pkt_set_speed(struct pktcdvd_device *pd, unsigned write_speed, unsigned read_speed) | 852 | static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd, |
853 | unsigned write_speed, unsigned read_speed) | ||
853 | { | 854 | { |
854 | struct packet_command cgc; | 855 | struct packet_command cgc; |
855 | struct request_sense sense; | 856 | struct request_sense sense; |
@@ -1776,7 +1777,8 @@ static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, | |||
1776 | return pkt_generic_packet(pd, &cgc); | 1777 | return pkt_generic_packet(pd, &cgc); |
1777 | } | 1778 | } |
1778 | 1779 | ||
1779 | static int pkt_get_last_written(struct pktcdvd_device *pd, long *last_written) | 1780 | static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd, |
1781 | long *last_written) | ||
1780 | { | 1782 | { |
1781 | disc_information di; | 1783 | disc_information di; |
1782 | track_information ti; | 1784 | track_information ti; |
@@ -1813,7 +1815,7 @@ static int pkt_get_last_written(struct pktcdvd_device *pd, long *last_written) | |||
1813 | /* | 1815 | /* |
1814 | * write mode select package based on pd->settings | 1816 | * write mode select package based on pd->settings |
1815 | */ | 1817 | */ |
1816 | static int pkt_set_write_settings(struct pktcdvd_device *pd) | 1818 | static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) |
1817 | { | 1819 | { |
1818 | struct packet_command cgc; | 1820 | struct packet_command cgc; |
1819 | struct request_sense sense; | 1821 | struct request_sense sense; |
@@ -1972,7 +1974,7 @@ static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di) | |||
1972 | return 1; | 1974 | return 1; |
1973 | } | 1975 | } |
1974 | 1976 | ||
1975 | static int pkt_probe_settings(struct pktcdvd_device *pd) | 1977 | static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) |
1976 | { | 1978 | { |
1977 | struct packet_command cgc; | 1979 | struct packet_command cgc; |
1978 | unsigned char buf[12]; | 1980 | unsigned char buf[12]; |
@@ -2071,7 +2073,8 @@ static int pkt_probe_settings(struct pktcdvd_device *pd) | |||
2071 | /* | 2073 | /* |
2072 | * enable/disable write caching on drive | 2074 | * enable/disable write caching on drive |
2073 | */ | 2075 | */ |
2074 | static int pkt_write_caching(struct pktcdvd_device *pd, int set) | 2076 | static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd, |
2077 | int set) | ||
2075 | { | 2078 | { |
2076 | struct packet_command cgc; | 2079 | struct packet_command cgc; |
2077 | struct request_sense sense; | 2080 | struct request_sense sense; |
@@ -2116,7 +2119,8 @@ static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag) | |||
2116 | /* | 2119 | /* |
2117 | * Returns drive maximum write speed | 2120 | * Returns drive maximum write speed |
2118 | */ | 2121 | */ |
2119 | static int pkt_get_max_speed(struct pktcdvd_device *pd, unsigned *write_speed) | 2122 | static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd, |
2123 | unsigned *write_speed) | ||
2120 | { | 2124 | { |
2121 | struct packet_command cgc; | 2125 | struct packet_command cgc; |
2122 | struct request_sense sense; | 2126 | struct request_sense sense; |
@@ -2177,7 +2181,8 @@ static char us_clv_to_speed[16] = { | |||
2177 | /* | 2181 | /* |
2178 | * reads the maximum media speed from ATIP | 2182 | * reads the maximum media speed from ATIP |
2179 | */ | 2183 | */ |
2180 | static int pkt_media_speed(struct pktcdvd_device *pd, unsigned *speed) | 2184 | static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd, |
2185 | unsigned *speed) | ||
2181 | { | 2186 | { |
2182 | struct packet_command cgc; | 2187 | struct packet_command cgc; |
2183 | struct request_sense sense; | 2188 | struct request_sense sense; |
@@ -2249,7 +2254,7 @@ static int pkt_media_speed(struct pktcdvd_device *pd, unsigned *speed) | |||
2249 | } | 2254 | } |
2250 | } | 2255 | } |
2251 | 2256 | ||
2252 | static int pkt_perform_opc(struct pktcdvd_device *pd) | 2257 | static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd) |
2253 | { | 2258 | { |
2254 | struct packet_command cgc; | 2259 | struct packet_command cgc; |
2255 | struct request_sense sense; | 2260 | struct request_sense sense; |
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index db259e60289b..12f5baea439b 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c | |||
@@ -1152,8 +1152,8 @@ clean_up_and_return: | |||
1152 | /* This code is similar to that in open_for_data. The routine is called | 1152 | /* This code is similar to that in open_for_data. The routine is called |
1153 | whenever an audio play operation is requested. | 1153 | whenever an audio play operation is requested. |
1154 | */ | 1154 | */ |
1155 | int check_for_audio_disc(struct cdrom_device_info * cdi, | 1155 | static int check_for_audio_disc(struct cdrom_device_info * cdi, |
1156 | struct cdrom_device_ops * cdo) | 1156 | struct cdrom_device_ops * cdo) |
1157 | { | 1157 | { |
1158 | int ret; | 1158 | int ret; |
1159 | tracktype tracks; | 1159 | tracktype tracks; |
diff --git a/drivers/char/defkeymap.c_shipped b/drivers/char/defkeymap.c_shipped index 0aa419a61767..d2208dfe3f67 100644 --- a/drivers/char/defkeymap.c_shipped +++ b/drivers/char/defkeymap.c_shipped | |||
@@ -223,40 +223,40 @@ char *func_table[MAX_NR_FUNC] = { | |||
223 | }; | 223 | }; |
224 | 224 | ||
225 | struct kbdiacruc accent_table[MAX_DIACR] = { | 225 | struct kbdiacruc accent_table[MAX_DIACR] = { |
226 | {'`', 'A', '\300'}, {'`', 'a', '\340'}, | 226 | {'`', 'A', 0300}, {'`', 'a', 0340}, |
227 | {'\'', 'A', '\301'}, {'\'', 'a', '\341'}, | 227 | {'\'', 'A', 0301}, {'\'', 'a', 0341}, |
228 | {'^', 'A', '\302'}, {'^', 'a', '\342'}, | 228 | {'^', 'A', 0302}, {'^', 'a', 0342}, |
229 | {'~', 'A', '\303'}, {'~', 'a', '\343'}, | 229 | {'~', 'A', 0303}, {'~', 'a', 0343}, |
230 | {'"', 'A', '\304'}, {'"', 'a', '\344'}, | 230 | {'"', 'A', 0304}, {'"', 'a', 0344}, |
231 | {'O', 'A', '\305'}, {'o', 'a', '\345'}, | 231 | {'O', 'A', 0305}, {'o', 'a', 0345}, |
232 | {'0', 'A', '\305'}, {'0', 'a', '\345'}, | 232 | {'0', 'A', 0305}, {'0', 'a', 0345}, |
233 | {'A', 'A', '\305'}, {'a', 'a', '\345'}, | 233 | {'A', 'A', 0305}, {'a', 'a', 0345}, |
234 | {'A', 'E', '\306'}, {'a', 'e', '\346'}, | 234 | {'A', 'E', 0306}, {'a', 'e', 0346}, |
235 | {',', 'C', '\307'}, {',', 'c', '\347'}, | 235 | {',', 'C', 0307}, {',', 'c', 0347}, |
236 | {'`', 'E', '\310'}, {'`', 'e', '\350'}, | 236 | {'`', 'E', 0310}, {'`', 'e', 0350}, |
237 | {'\'', 'E', '\311'}, {'\'', 'e', '\351'}, | 237 | {'\'', 'E', 0311}, {'\'', 'e', 0351}, |
238 | {'^', 'E', '\312'}, {'^', 'e', '\352'}, | 238 | {'^', 'E', 0312}, {'^', 'e', 0352}, |
239 | {'"', 'E', '\313'}, {'"', 'e', '\353'}, | 239 | {'"', 'E', 0313}, {'"', 'e', 0353}, |
240 | {'`', 'I', '\314'}, {'`', 'i', '\354'}, | 240 | {'`', 'I', 0314}, {'`', 'i', 0354}, |
241 | {'\'', 'I', '\315'}, {'\'', 'i', '\355'}, | 241 | {'\'', 'I', 0315}, {'\'', 'i', 0355}, |
242 | {'^', 'I', '\316'}, {'^', 'i', '\356'}, | 242 | {'^', 'I', 0316}, {'^', 'i', 0356}, |
243 | {'"', 'I', '\317'}, {'"', 'i', '\357'}, | 243 | {'"', 'I', 0317}, {'"', 'i', 0357}, |
244 | {'-', 'D', '\320'}, {'-', 'd', '\360'}, | 244 | {'-', 'D', 0320}, {'-', 'd', 0360}, |
245 | {'~', 'N', '\321'}, {'~', 'n', '\361'}, | 245 | {'~', 'N', 0321}, {'~', 'n', 0361}, |
246 | {'`', 'O', '\322'}, {'`', 'o', '\362'}, | 246 | {'`', 'O', 0322}, {'`', 'o', 0362}, |
247 | {'\'', 'O', '\323'}, {'\'', 'o', '\363'}, | 247 | {'\'', 'O', 0323}, {'\'', 'o', 0363}, |
248 | {'^', 'O', '\324'}, {'^', 'o', '\364'}, | 248 | {'^', 'O', 0324}, {'^', 'o', 0364}, |
249 | {'~', 'O', '\325'}, {'~', 'o', '\365'}, | 249 | {'~', 'O', 0325}, {'~', 'o', 0365}, |
250 | {'"', 'O', '\326'}, {'"', 'o', '\366'}, | 250 | {'"', 'O', 0326}, {'"', 'o', 0366}, |
251 | {'/', 'O', '\330'}, {'/', 'o', '\370'}, | 251 | {'/', 'O', 0330}, {'/', 'o', 0370}, |
252 | {'`', 'U', '\331'}, {'`', 'u', '\371'}, | 252 | {'`', 'U', 0331}, {'`', 'u', 0371}, |
253 | {'\'', 'U', '\332'}, {'\'', 'u', '\372'}, | 253 | {'\'', 'U', 0332}, {'\'', 'u', 0372}, |
254 | {'^', 'U', '\333'}, {'^', 'u', '\373'}, | 254 | {'^', 'U', 0333}, {'^', 'u', 0373}, |
255 | {'"', 'U', '\334'}, {'"', 'u', '\374'}, | 255 | {'"', 'U', 0334}, {'"', 'u', 0374}, |
256 | {'\'', 'Y', '\335'}, {'\'', 'y', '\375'}, | 256 | {'\'', 'Y', 0335}, {'\'', 'y', 0375}, |
257 | {'T', 'H', '\336'}, {'t', 'h', '\376'}, | 257 | {'T', 'H', 0336}, {'t', 'h', 0376}, |
258 | {'s', 's', '\337'}, {'"', 'y', '\377'}, | 258 | {'s', 's', 0337}, {'"', 'y', 0377}, |
259 | {'s', 'z', '\337'}, {'i', 'j', '\377'}, | 259 | {'s', 'z', 0337}, {'i', 'j', 0377}, |
260 | }; | 260 | }; |
261 | 261 | ||
262 | unsigned int accent_table_size = 68; | 262 | unsigned int accent_table_size = 68; |
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c index 85d596a3c18c..eba2883b630e 100644 --- a/drivers/char/isicom.c +++ b/drivers/char/isicom.c | |||
@@ -1527,7 +1527,7 @@ static int __devinit reset_card(struct pci_dev *pdev, | |||
1527 | msleep(10); | 1527 | msleep(10); |
1528 | 1528 | ||
1529 | portcount = inw(base + 0x2); | 1529 | portcount = inw(base + 0x2); |
1530 | if (!inw(base + 0xe) & 0x1 || (portcount != 0 && portcount != 4 && | 1530 | if (!(inw(base + 0xe) & 0x1) || (portcount != 0 && portcount != 4 && |
1531 | portcount != 8 && portcount != 16)) { | 1531 | portcount != 8 && portcount != 16)) { |
1532 | dev_err(&pdev->dev, "ISILoad:PCI Card%d reset failure.\n", | 1532 | dev_err(&pdev->dev, "ISILoad:PCI Card%d reset failure.\n", |
1533 | card + 1); | 1533 | card + 1); |
diff --git a/drivers/char/pcmcia/ipwireless/network.c b/drivers/char/pcmcia/ipwireless/network.c index ff35230058d3..d793e68b3e0d 100644 --- a/drivers/char/pcmcia/ipwireless/network.c +++ b/drivers/char/pcmcia/ipwireless/network.c | |||
@@ -377,13 +377,16 @@ void ipwireless_network_packet_received(struct ipw_network *network, | |||
377 | for (i = 0; i < MAX_ASSOCIATED_TTYS; i++) { | 377 | for (i = 0; i < MAX_ASSOCIATED_TTYS; i++) { |
378 | struct ipw_tty *tty = network->associated_ttys[channel_idx][i]; | 378 | struct ipw_tty *tty = network->associated_ttys[channel_idx][i]; |
379 | 379 | ||
380 | if (!tty) | ||
381 | continue; | ||
382 | |||
380 | /* | 383 | /* |
381 | * If it's associated with a tty (other than the RAS channel | 384 | * If it's associated with a tty (other than the RAS channel |
382 | * when we're online), then send the data to that tty. The RAS | 385 | * when we're online), then send the data to that tty. The RAS |
383 | * channel's data is handled above - it always goes through | 386 | * channel's data is handled above - it always goes through |
384 | * ppp_generic. | 387 | * ppp_generic. |
385 | */ | 388 | */ |
386 | if (tty && channel_idx == IPW_CHANNEL_RAS | 389 | if (channel_idx == IPW_CHANNEL_RAS |
387 | && (network->ras_control_lines & | 390 | && (network->ras_control_lines & |
388 | IPW_CONTROL_LINE_DCD) != 0 | 391 | IPW_CONTROL_LINE_DCD) != 0 |
389 | && ipwireless_tty_is_modem(tty)) { | 392 | && ipwireless_tty_is_modem(tty)) { |
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c index 78b151c4d20f..5c3142b6f1fc 100644 --- a/drivers/char/rtc.c +++ b/drivers/char/rtc.c | |||
@@ -110,8 +110,8 @@ static int rtc_has_irq = 1; | |||
110 | #define hpet_set_rtc_irq_bit(arg) 0 | 110 | #define hpet_set_rtc_irq_bit(arg) 0 |
111 | #define hpet_rtc_timer_init() do { } while (0) | 111 | #define hpet_rtc_timer_init() do { } while (0) |
112 | #define hpet_rtc_dropped_irq() 0 | 112 | #define hpet_rtc_dropped_irq() 0 |
113 | #define hpet_register_irq_handler(h) 0 | 113 | #define hpet_register_irq_handler(h) ({ 0; }) |
114 | #define hpet_unregister_irq_handler(h) 0 | 114 | #define hpet_unregister_irq_handler(h) ({ 0; }) |
115 | #ifdef RTC_IRQ | 115 | #ifdef RTC_IRQ |
116 | static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) | 116 | static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) |
117 | { | 117 | { |
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c index c0e08c7bca2f..5ff83df67b44 100644 --- a/drivers/char/specialix.c +++ b/drivers/char/specialix.c | |||
@@ -2109,7 +2109,6 @@ static void sx_throttle(struct tty_struct * tty) | |||
2109 | sx_out(bp, CD186x_CAR, port_No(port)); | 2109 | sx_out(bp, CD186x_CAR, port_No(port)); |
2110 | spin_unlock_irqrestore(&bp->lock, flags); | 2110 | spin_unlock_irqrestore(&bp->lock, flags); |
2111 | if (I_IXOFF(tty)) { | 2111 | if (I_IXOFF(tty)) { |
2112 | spin_unlock_irqrestore(&bp->lock, flags); | ||
2113 | sx_wait_CCR(bp); | 2112 | sx_wait_CCR(bp); |
2114 | spin_lock_irqsave(&bp->lock, flags); | 2113 | spin_lock_irqsave(&bp->lock, flags); |
2115 | sx_out(bp, CD186x_CCR, CCR_SSCH2); | 2114 | sx_out(bp, CD186x_CCR, CCR_SSCH2); |
diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 367be9175061..9b58b894f823 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c | |||
@@ -702,6 +702,7 @@ void redraw_screen(struct vc_data *vc, int is_switch) | |||
702 | if (is_switch) { | 702 | if (is_switch) { |
703 | set_leds(); | 703 | set_leds(); |
704 | compute_shiftstate(); | 704 | compute_shiftstate(); |
705 | notify_update(vc); | ||
705 | } | 706 | } |
706 | } | 707 | } |
707 | 708 | ||
diff --git a/drivers/char/xilinx_hwicap/buffer_icap.c b/drivers/char/xilinx_hwicap/buffer_icap.c index dfea2bde162b..f577daedb630 100644 --- a/drivers/char/xilinx_hwicap/buffer_icap.c +++ b/drivers/char/xilinx_hwicap/buffer_icap.c | |||
@@ -73,8 +73,8 @@ | |||
73 | #define XHI_BUFFER_START 0 | 73 | #define XHI_BUFFER_START 0 |
74 | 74 | ||
75 | /** | 75 | /** |
76 | * buffer_icap_get_status: Get the contents of the status register. | 76 | * buffer_icap_get_status - Get the contents of the status register. |
77 | * @parameter base_address: is the base address of the device | 77 | * @base_address: is the base address of the device |
78 | * | 78 | * |
79 | * The status register contains the ICAP status and the done bit. | 79 | * The status register contains the ICAP status and the done bit. |
80 | * | 80 | * |
@@ -94,9 +94,9 @@ static inline u32 buffer_icap_get_status(void __iomem *base_address) | |||
94 | } | 94 | } |
95 | 95 | ||
96 | /** | 96 | /** |
97 | * buffer_icap_get_bram: Reads data from the storage buffer bram. | 97 | * buffer_icap_get_bram - Reads data from the storage buffer bram. |
98 | * @parameter base_address: contains the base address of the component. | 98 | * @base_address: contains the base address of the component. |
99 | * @parameter offset: The word offset from which the data should be read. | 99 | * @offset: The word offset from which the data should be read. |
100 | * | 100 | * |
101 | * A bram is used as a configuration memory cache. One frame of data can | 101 | * A bram is used as a configuration memory cache. One frame of data can |
102 | * be stored in this "storage buffer". | 102 | * be stored in this "storage buffer". |
@@ -108,8 +108,8 @@ static inline u32 buffer_icap_get_bram(void __iomem *base_address, | |||
108 | } | 108 | } |
109 | 109 | ||
110 | /** | 110 | /** |
111 | * buffer_icap_busy: Return true if the icap device is busy | 111 | * buffer_icap_busy - Return true if the icap device is busy |
112 | * @parameter base_address: is the base address of the device | 112 | * @base_address: is the base address of the device |
113 | * | 113 | * |
114 | * The queries the low order bit of the status register, which | 114 | * The queries the low order bit of the status register, which |
115 | * indicates whether the current configuration or readback operation | 115 | * indicates whether the current configuration or readback operation |
@@ -121,8 +121,8 @@ static inline bool buffer_icap_busy(void __iomem *base_address) | |||
121 | } | 121 | } |
122 | 122 | ||
123 | /** | 123 | /** |
124 | * buffer_icap_busy: Return true if the icap device is not busy | 124 | * buffer_icap_busy - Return true if the icap device is not busy |
125 | * @parameter base_address: is the base address of the device | 125 | * @base_address: is the base address of the device |
126 | * | 126 | * |
127 | * The queries the low order bit of the status register, which | 127 | * The queries the low order bit of the status register, which |
128 | * indicates whether the current configuration or readback operation | 128 | * indicates whether the current configuration or readback operation |
@@ -134,9 +134,9 @@ static inline bool buffer_icap_done(void __iomem *base_address) | |||
134 | } | 134 | } |
135 | 135 | ||
136 | /** | 136 | /** |
137 | * buffer_icap_set_size: Set the size register. | 137 | * buffer_icap_set_size - Set the size register. |
138 | * @parameter base_address: is the base address of the device | 138 | * @base_address: is the base address of the device |
139 | * @parameter data: The size in bytes. | 139 | * @data: The size in bytes. |
140 | * | 140 | * |
141 | * The size register holds the number of 8 bit bytes to transfer between | 141 | * The size register holds the number of 8 bit bytes to transfer between |
142 | * bram and the icap (or icap to bram). | 142 | * bram and the icap (or icap to bram). |
@@ -148,9 +148,9 @@ static inline void buffer_icap_set_size(void __iomem *base_address, | |||
148 | } | 148 | } |
149 | 149 | ||
150 | /** | 150 | /** |
151 | * buffer_icap_mSetoffsetReg: Set the bram offset register. | 151 | * buffer_icap_set_offset - Set the bram offset register. |
152 | * @parameter base_address: contains the base address of the device. | 152 | * @base_address: contains the base address of the device. |
153 | * @parameter data: is the value to be written to the data register. | 153 | * @data: is the value to be written to the data register. |
154 | * | 154 | * |
155 | * The bram offset register holds the starting bram address to transfer | 155 | * The bram offset register holds the starting bram address to transfer |
156 | * data from during configuration or write data to during readback. | 156 | * data from during configuration or write data to during readback. |
@@ -162,9 +162,9 @@ static inline void buffer_icap_set_offset(void __iomem *base_address, | |||
162 | } | 162 | } |
163 | 163 | ||
164 | /** | 164 | /** |
165 | * buffer_icap_set_rnc: Set the RNC (Readback not Configure) register. | 165 | * buffer_icap_set_rnc - Set the RNC (Readback not Configure) register. |
166 | * @parameter base_address: contains the base address of the device. | 166 | * @base_address: contains the base address of the device. |
167 | * @parameter data: is the value to be written to the data register. | 167 | * @data: is the value to be written to the data register. |
168 | * | 168 | * |
169 | * The RNC register determines the direction of the data transfer. It | 169 | * The RNC register determines the direction of the data transfer. It |
170 | * controls whether a configuration or readback take place. Writing to | 170 | * controls whether a configuration or readback take place. Writing to |
@@ -178,10 +178,10 @@ static inline void buffer_icap_set_rnc(void __iomem *base_address, | |||
178 | } | 178 | } |
179 | 179 | ||
180 | /** | 180 | /** |
181 | * buffer_icap_set_bram: Write data to the storage buffer bram. | 181 | * buffer_icap_set_bram - Write data to the storage buffer bram. |
182 | * @parameter base_address: contains the base address of the component. | 182 | * @base_address: contains the base address of the component. |
183 | * @parameter offset: The word offset at which the data should be written. | 183 | * @offset: The word offset at which the data should be written. |
184 | * @parameter data: The value to be written to the bram offset. | 184 | * @data: The value to be written to the bram offset. |
185 | * | 185 | * |
186 | * A bram is used as a configuration memory cache. One frame of data can | 186 | * A bram is used as a configuration memory cache. One frame of data can |
187 | * be stored in this "storage buffer". | 187 | * be stored in this "storage buffer". |
@@ -193,10 +193,10 @@ static inline void buffer_icap_set_bram(void __iomem *base_address, | |||
193 | } | 193 | } |
194 | 194 | ||
195 | /** | 195 | /** |
196 | * buffer_icap_device_read: Transfer bytes from ICAP to the storage buffer. | 196 | * buffer_icap_device_read - Transfer bytes from ICAP to the storage buffer. |
197 | * @parameter drvdata: a pointer to the drvdata. | 197 | * @drvdata: a pointer to the drvdata. |
198 | * @parameter offset: The storage buffer start address. | 198 | * @offset: The storage buffer start address. |
199 | * @parameter count: The number of words (32 bit) to read from the | 199 | * @count: The number of words (32 bit) to read from the |
200 | * device (ICAP). | 200 | * device (ICAP). |
201 | **/ | 201 | **/ |
202 | static int buffer_icap_device_read(struct hwicap_drvdata *drvdata, | 202 | static int buffer_icap_device_read(struct hwicap_drvdata *drvdata, |
@@ -227,10 +227,10 @@ static int buffer_icap_device_read(struct hwicap_drvdata *drvdata, | |||
227 | }; | 227 | }; |
228 | 228 | ||
229 | /** | 229 | /** |
230 | * buffer_icap_device_write: Transfer bytes from ICAP to the storage buffer. | 230 | * buffer_icap_device_write - Transfer bytes from ICAP to the storage buffer. |
231 | * @parameter drvdata: a pointer to the drvdata. | 231 | * @drvdata: a pointer to the drvdata. |
232 | * @parameter offset: The storage buffer start address. | 232 | * @offset: The storage buffer start address. |
233 | * @parameter count: The number of words (32 bit) to read from the | 233 | * @count: The number of words (32 bit) to read from the |
234 | * device (ICAP). | 234 | * device (ICAP). |
235 | **/ | 235 | **/ |
236 | static int buffer_icap_device_write(struct hwicap_drvdata *drvdata, | 236 | static int buffer_icap_device_write(struct hwicap_drvdata *drvdata, |
@@ -261,8 +261,8 @@ static int buffer_icap_device_write(struct hwicap_drvdata *drvdata, | |||
261 | }; | 261 | }; |
262 | 262 | ||
263 | /** | 263 | /** |
264 | * buffer_icap_reset: Reset the logic of the icap device. | 264 | * buffer_icap_reset - Reset the logic of the icap device. |
265 | * @parameter drvdata: a pointer to the drvdata. | 265 | * @drvdata: a pointer to the drvdata. |
266 | * | 266 | * |
267 | * Writing to the status register resets the ICAP logic in an internal | 267 | * Writing to the status register resets the ICAP logic in an internal |
268 | * version of the core. For the version of the core published in EDK, | 268 | * version of the core. For the version of the core published in EDK, |
@@ -274,10 +274,10 @@ void buffer_icap_reset(struct hwicap_drvdata *drvdata) | |||
274 | } | 274 | } |
275 | 275 | ||
276 | /** | 276 | /** |
277 | * buffer_icap_set_configuration: Load a partial bitstream from system memory. | 277 | * buffer_icap_set_configuration - Load a partial bitstream from system memory. |
278 | * @parameter drvdata: a pointer to the drvdata. | 278 | * @drvdata: a pointer to the drvdata. |
279 | * @parameter data: Kernel address of the partial bitstream. | 279 | * @data: Kernel address of the partial bitstream. |
280 | * @parameter size: the size of the partial bitstream in 32 bit words. | 280 | * @size: the size of the partial bitstream in 32 bit words. |
281 | **/ | 281 | **/ |
282 | int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data, | 282 | int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data, |
283 | u32 size) | 283 | u32 size) |
@@ -333,10 +333,10 @@ int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data, | |||
333 | }; | 333 | }; |
334 | 334 | ||
335 | /** | 335 | /** |
336 | * buffer_icap_get_configuration: Read configuration data from the device. | 336 | * buffer_icap_get_configuration - Read configuration data from the device. |
337 | * @parameter drvdata: a pointer to the drvdata. | 337 | * @drvdata: a pointer to the drvdata. |
338 | * @parameter data: Address of the data representing the partial bitstream | 338 | * @data: Address of the data representing the partial bitstream |
339 | * @parameter size: the size of the partial bitstream in 32 bit words. | 339 | * @size: the size of the partial bitstream in 32 bit words. |
340 | **/ | 340 | **/ |
341 | int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data, | 341 | int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data, |
342 | u32 size) | 342 | u32 size) |
diff --git a/drivers/char/xilinx_hwicap/fifo_icap.c b/drivers/char/xilinx_hwicap/fifo_icap.c index 0988314694a6..6f45dbd47125 100644 --- a/drivers/char/xilinx_hwicap/fifo_icap.c +++ b/drivers/char/xilinx_hwicap/fifo_icap.c | |||
@@ -94,9 +94,9 @@ | |||
94 | 94 | ||
95 | 95 | ||
96 | /** | 96 | /** |
97 | * fifo_icap_fifo_write: Write data to the write FIFO. | 97 | * fifo_icap_fifo_write - Write data to the write FIFO. |
98 | * @parameter drvdata: a pointer to the drvdata. | 98 | * @drvdata: a pointer to the drvdata. |
99 | * @parameter data: the 32-bit value to be written to the FIFO. | 99 | * @data: the 32-bit value to be written to the FIFO. |
100 | * | 100 | * |
101 | * This function will silently fail if the fifo is full. | 101 | * This function will silently fail if the fifo is full. |
102 | **/ | 102 | **/ |
@@ -108,8 +108,8 @@ static inline void fifo_icap_fifo_write(struct hwicap_drvdata *drvdata, | |||
108 | } | 108 | } |
109 | 109 | ||
110 | /** | 110 | /** |
111 | * fifo_icap_fifo_read: Read data from the Read FIFO. | 111 | * fifo_icap_fifo_read - Read data from the Read FIFO. |
112 | * @parameter drvdata: a pointer to the drvdata. | 112 | * @drvdata: a pointer to the drvdata. |
113 | * | 113 | * |
114 | * This function will silently fail if the fifo is empty. | 114 | * This function will silently fail if the fifo is empty. |
115 | **/ | 115 | **/ |
@@ -121,9 +121,9 @@ static inline u32 fifo_icap_fifo_read(struct hwicap_drvdata *drvdata) | |||
121 | } | 121 | } |
122 | 122 | ||
123 | /** | 123 | /** |
124 | * fifo_icap_set_read_size: Set the the size register. | 124 | * fifo_icap_set_read_size - Set the the size register. |
125 | * @parameter drvdata: a pointer to the drvdata. | 125 | * @drvdata: a pointer to the drvdata. |
126 | * @parameter data: the size of the following read transaction, in words. | 126 | * @data: the size of the following read transaction, in words. |
127 | **/ | 127 | **/ |
128 | static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata, | 128 | static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata, |
129 | u32 data) | 129 | u32 data) |
@@ -132,8 +132,8 @@ static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata, | |||
132 | } | 132 | } |
133 | 133 | ||
134 | /** | 134 | /** |
135 | * fifo_icap_start_config: Initiate a configuration (write) to the device. | 135 | * fifo_icap_start_config - Initiate a configuration (write) to the device. |
136 | * @parameter drvdata: a pointer to the drvdata. | 136 | * @drvdata: a pointer to the drvdata. |
137 | **/ | 137 | **/ |
138 | static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata) | 138 | static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata) |
139 | { | 139 | { |
@@ -142,8 +142,8 @@ static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata) | |||
142 | } | 142 | } |
143 | 143 | ||
144 | /** | 144 | /** |
145 | * fifo_icap_start_readback: Initiate a readback from the device. | 145 | * fifo_icap_start_readback - Initiate a readback from the device. |
146 | * @parameter drvdata: a pointer to the drvdata. | 146 | * @drvdata: a pointer to the drvdata. |
147 | **/ | 147 | **/ |
148 | static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata) | 148 | static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata) |
149 | { | 149 | { |
@@ -152,8 +152,8 @@ static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata) | |||
152 | } | 152 | } |
153 | 153 | ||
154 | /** | 154 | /** |
155 | * fifo_icap_busy: Return true if the ICAP is still processing a transaction. | 155 | * fifo_icap_busy - Return true if the ICAP is still processing a transaction. |
156 | * @parameter drvdata: a pointer to the drvdata. | 156 | * @drvdata: a pointer to the drvdata. |
157 | **/ | 157 | **/ |
158 | static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata) | 158 | static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata) |
159 | { | 159 | { |
@@ -163,8 +163,8 @@ static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata) | |||
163 | } | 163 | } |
164 | 164 | ||
165 | /** | 165 | /** |
166 | * fifo_icap_write_fifo_vacancy: Query the write fifo available space. | 166 | * fifo_icap_write_fifo_vacancy - Query the write fifo available space. |
167 | * @parameter drvdata: a pointer to the drvdata. | 167 | * @drvdata: a pointer to the drvdata. |
168 | * | 168 | * |
169 | * Return the number of words that can be safely pushed into the write fifo. | 169 | * Return the number of words that can be safely pushed into the write fifo. |
170 | **/ | 170 | **/ |
@@ -175,8 +175,8 @@ static inline u32 fifo_icap_write_fifo_vacancy( | |||
175 | } | 175 | } |
176 | 176 | ||
177 | /** | 177 | /** |
178 | * fifo_icap_read_fifo_occupancy: Query the read fifo available data. | 178 | * fifo_icap_read_fifo_occupancy - Query the read fifo available data. |
179 | * @parameter drvdata: a pointer to the drvdata. | 179 | * @drvdata: a pointer to the drvdata. |
180 | * | 180 | * |
181 | * Return the number of words that can be safely read from the read fifo. | 181 | * Return the number of words that can be safely read from the read fifo. |
182 | **/ | 182 | **/ |
@@ -187,11 +187,11 @@ static inline u32 fifo_icap_read_fifo_occupancy( | |||
187 | } | 187 | } |
188 | 188 | ||
189 | /** | 189 | /** |
190 | * fifo_icap_set_configuration: Send configuration data to the ICAP. | 190 | * fifo_icap_set_configuration - Send configuration data to the ICAP. |
191 | * @parameter drvdata: a pointer to the drvdata. | 191 | * @drvdata: a pointer to the drvdata. |
192 | * @parameter frame_buffer: a pointer to the data to be written to the | 192 | * @frame_buffer: a pointer to the data to be written to the |
193 | * ICAP device. | 193 | * ICAP device. |
194 | * @parameter num_words: the number of words (32 bit) to write to the ICAP | 194 | * @num_words: the number of words (32 bit) to write to the ICAP |
195 | * device. | 195 | * device. |
196 | 196 | ||
197 | * This function writes the given user data to the Write FIFO in | 197 | * This function writes the given user data to the Write FIFO in |
@@ -266,10 +266,10 @@ int fifo_icap_set_configuration(struct hwicap_drvdata *drvdata, | |||
266 | } | 266 | } |
267 | 267 | ||
268 | /** | 268 | /** |
269 | * fifo_icap_get_configuration: Read configuration data from the device. | 269 | * fifo_icap_get_configuration - Read configuration data from the device. |
270 | * @parameter drvdata: a pointer to the drvdata. | 270 | * @drvdata: a pointer to the drvdata. |
271 | * @parameter data: Address of the data representing the partial bitstream | 271 | * @data: Address of the data representing the partial bitstream |
272 | * @parameter size: the size of the partial bitstream in 32 bit words. | 272 | * @size: the size of the partial bitstream in 32 bit words. |
273 | * | 273 | * |
274 | * This function reads the specified number of words from the ICAP device in | 274 | * This function reads the specified number of words from the ICAP device in |
275 | * the polled mode. | 275 | * the polled mode. |
@@ -335,8 +335,8 @@ int fifo_icap_get_configuration(struct hwicap_drvdata *drvdata, | |||
335 | } | 335 | } |
336 | 336 | ||
337 | /** | 337 | /** |
338 | * buffer_icap_reset: Reset the logic of the icap device. | 338 | * buffer_icap_reset - Reset the logic of the icap device. |
339 | * @parameter drvdata: a pointer to the drvdata. | 339 | * @drvdata: a pointer to the drvdata. |
340 | * | 340 | * |
341 | * This function forces the software reset of the complete HWICAP device. | 341 | * This function forces the software reset of the complete HWICAP device. |
342 | * All the registers will return to the default value and the FIFO is also | 342 | * All the registers will return to the default value and the FIFO is also |
@@ -360,8 +360,8 @@ void fifo_icap_reset(struct hwicap_drvdata *drvdata) | |||
360 | } | 360 | } |
361 | 361 | ||
362 | /** | 362 | /** |
363 | * fifo_icap_flush_fifo: This function flushes the FIFOs in the device. | 363 | * fifo_icap_flush_fifo - This function flushes the FIFOs in the device. |
364 | * @parameter drvdata: a pointer to the drvdata. | 364 | * @drvdata: a pointer to the drvdata. |
365 | */ | 365 | */ |
366 | void fifo_icap_flush_fifo(struct hwicap_drvdata *drvdata) | 366 | void fifo_icap_flush_fifo(struct hwicap_drvdata *drvdata) |
367 | { | 367 | { |
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c index 24f6aef0fd3c..2284fa2a5a57 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c | |||
@@ -84,7 +84,7 @@ | |||
84 | #include <linux/init.h> | 84 | #include <linux/init.h> |
85 | #include <linux/poll.h> | 85 | #include <linux/poll.h> |
86 | #include <linux/proc_fs.h> | 86 | #include <linux/proc_fs.h> |
87 | #include <asm/semaphore.h> | 87 | #include <linux/mutex.h> |
88 | #include <linux/sysctl.h> | 88 | #include <linux/sysctl.h> |
89 | #include <linux/version.h> | 89 | #include <linux/version.h> |
90 | #include <linux/fs.h> | 90 | #include <linux/fs.h> |
@@ -119,6 +119,7 @@ module_param(xhwicap_minor, int, S_IRUGO); | |||
119 | 119 | ||
120 | /* An array, which is set to true when the device is registered. */ | 120 | /* An array, which is set to true when the device is registered. */ |
121 | static bool probed_devices[HWICAP_DEVICES]; | 121 | static bool probed_devices[HWICAP_DEVICES]; |
122 | static struct mutex icap_sem; | ||
122 | 123 | ||
123 | static struct class *icap_class; | 124 | static struct class *icap_class; |
124 | 125 | ||
@@ -199,14 +200,14 @@ static const struct config_registers v5_config_registers = { | |||
199 | }; | 200 | }; |
200 | 201 | ||
201 | /** | 202 | /** |
202 | * hwicap_command_desync: Send a DESYNC command to the ICAP port. | 203 | * hwicap_command_desync - Send a DESYNC command to the ICAP port. |
203 | * @parameter drvdata: a pointer to the drvdata. | 204 | * @drvdata: a pointer to the drvdata. |
204 | * | 205 | * |
205 | * This command desynchronizes the ICAP After this command, a | 206 | * This command desynchronizes the ICAP After this command, a |
206 | * bitstream containing a NULL packet, followed by a SYNCH packet is | 207 | * bitstream containing a NULL packet, followed by a SYNCH packet is |
207 | * required before the ICAP will recognize commands. | 208 | * required before the ICAP will recognize commands. |
208 | */ | 209 | */ |
209 | int hwicap_command_desync(struct hwicap_drvdata *drvdata) | 210 | static int hwicap_command_desync(struct hwicap_drvdata *drvdata) |
210 | { | 211 | { |
211 | u32 buffer[4]; | 212 | u32 buffer[4]; |
212 | u32 index = 0; | 213 | u32 index = 0; |
@@ -228,51 +229,18 @@ int hwicap_command_desync(struct hwicap_drvdata *drvdata) | |||
228 | } | 229 | } |
229 | 230 | ||
230 | /** | 231 | /** |
231 | * hwicap_command_capture: Send a CAPTURE command to the ICAP port. | 232 | * hwicap_get_configuration_register - Query a configuration register. |
232 | * @parameter drvdata: a pointer to the drvdata. | 233 | * @drvdata: a pointer to the drvdata. |
233 | * | 234 | * @reg: a constant which represents the configuration |
234 | * This command captures all of the flip flop states so they will be | ||
235 | * available during readback. One can use this command instead of | ||
236 | * enabling the CAPTURE block in the design. | ||
237 | */ | ||
238 | int hwicap_command_capture(struct hwicap_drvdata *drvdata) | ||
239 | { | ||
240 | u32 buffer[7]; | ||
241 | u32 index = 0; | ||
242 | |||
243 | /* | ||
244 | * Create the data to be written to the ICAP. | ||
245 | */ | ||
246 | buffer[index++] = XHI_DUMMY_PACKET; | ||
247 | buffer[index++] = XHI_SYNC_PACKET; | ||
248 | buffer[index++] = XHI_NOOP_PACKET; | ||
249 | buffer[index++] = hwicap_type_1_write(drvdata->config_regs->CMD) | 1; | ||
250 | buffer[index++] = XHI_CMD_GCAPTURE; | ||
251 | buffer[index++] = XHI_DUMMY_PACKET; | ||
252 | buffer[index++] = XHI_DUMMY_PACKET; | ||
253 | |||
254 | /* | ||
255 | * Write the data to the FIFO and intiate the transfer of data | ||
256 | * present in the FIFO to the ICAP device. | ||
257 | */ | ||
258 | return drvdata->config->set_configuration(drvdata, | ||
259 | &buffer[0], index); | ||
260 | |||
261 | } | ||
262 | |||
263 | /** | ||
264 | * hwicap_get_configuration_register: Query a configuration register. | ||
265 | * @parameter drvdata: a pointer to the drvdata. | ||
266 | * @parameter reg: a constant which represents the configuration | ||
267 | * register value to be returned. | 235 | * register value to be returned. |
268 | * Examples: XHI_IDCODE, XHI_FLR. | 236 | * Examples: XHI_IDCODE, XHI_FLR. |
269 | * @parameter RegData: returns the value of the register. | 237 | * @reg_data: returns the value of the register. |
270 | * | 238 | * |
271 | * Sends a query packet to the ICAP and then receives the response. | 239 | * Sends a query packet to the ICAP and then receives the response. |
272 | * The icap is left in Synched state. | 240 | * The icap is left in Synched state. |
273 | */ | 241 | */ |
274 | int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata, | 242 | static int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata, |
275 | u32 reg, u32 *RegData) | 243 | u32 reg, u32 *reg_data) |
276 | { | 244 | { |
277 | int status; | 245 | int status; |
278 | u32 buffer[6]; | 246 | u32 buffer[6]; |
@@ -300,14 +268,14 @@ int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata, | |||
300 | /* | 268 | /* |
301 | * Read the configuration register | 269 | * Read the configuration register |
302 | */ | 270 | */ |
303 | status = drvdata->config->get_configuration(drvdata, RegData, 1); | 271 | status = drvdata->config->get_configuration(drvdata, reg_data, 1); |
304 | if (status) | 272 | if (status) |
305 | return status; | 273 | return status; |
306 | 274 | ||
307 | return 0; | 275 | return 0; |
308 | } | 276 | } |
309 | 277 | ||
310 | int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata) | 278 | static int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata) |
311 | { | 279 | { |
312 | int status; | 280 | int status; |
313 | u32 idcode; | 281 | u32 idcode; |
@@ -344,7 +312,7 @@ int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata) | |||
344 | } | 312 | } |
345 | 313 | ||
346 | static ssize_t | 314 | static ssize_t |
347 | hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos) | 315 | hwicap_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) |
348 | { | 316 | { |
349 | struct hwicap_drvdata *drvdata = file->private_data; | 317 | struct hwicap_drvdata *drvdata = file->private_data; |
350 | ssize_t bytes_to_read = 0; | 318 | ssize_t bytes_to_read = 0; |
@@ -353,8 +321,9 @@ hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos) | |||
353 | u32 bytes_remaining; | 321 | u32 bytes_remaining; |
354 | int status; | 322 | int status; |
355 | 323 | ||
356 | if (down_interruptible(&drvdata->sem)) | 324 | status = mutex_lock_interruptible(&drvdata->sem); |
357 | return -ERESTARTSYS; | 325 | if (status) |
326 | return status; | ||
358 | 327 | ||
359 | if (drvdata->read_buffer_in_use) { | 328 | if (drvdata->read_buffer_in_use) { |
360 | /* If there are leftover bytes in the buffer, just */ | 329 | /* If there are leftover bytes in the buffer, just */ |
@@ -370,8 +339,9 @@ hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos) | |||
370 | goto error; | 339 | goto error; |
371 | } | 340 | } |
372 | drvdata->read_buffer_in_use -= bytes_to_read; | 341 | drvdata->read_buffer_in_use -= bytes_to_read; |
373 | memcpy(drvdata->read_buffer + bytes_to_read, | 342 | memmove(drvdata->read_buffer, |
374 | drvdata->read_buffer, 4 - bytes_to_read); | 343 | drvdata->read_buffer + bytes_to_read, |
344 | 4 - bytes_to_read); | ||
375 | } else { | 345 | } else { |
376 | /* Get new data from the ICAP, and return was was requested. */ | 346 | /* Get new data from the ICAP, and return was was requested. */ |
377 | kbuf = (u32 *) get_zeroed_page(GFP_KERNEL); | 347 | kbuf = (u32 *) get_zeroed_page(GFP_KERNEL); |
@@ -414,18 +384,20 @@ hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos) | |||
414 | status = -EFAULT; | 384 | status = -EFAULT; |
415 | goto error; | 385 | goto error; |
416 | } | 386 | } |
417 | memcpy(kbuf, drvdata->read_buffer, bytes_remaining); | 387 | memcpy(drvdata->read_buffer, |
388 | kbuf, | ||
389 | bytes_remaining); | ||
418 | drvdata->read_buffer_in_use = bytes_remaining; | 390 | drvdata->read_buffer_in_use = bytes_remaining; |
419 | free_page((unsigned long)kbuf); | 391 | free_page((unsigned long)kbuf); |
420 | } | 392 | } |
421 | status = bytes_to_read; | 393 | status = bytes_to_read; |
422 | error: | 394 | error: |
423 | up(&drvdata->sem); | 395 | mutex_unlock(&drvdata->sem); |
424 | return status; | 396 | return status; |
425 | } | 397 | } |
426 | 398 | ||
427 | static ssize_t | 399 | static ssize_t |
428 | hwicap_write(struct file *file, const char *buf, | 400 | hwicap_write(struct file *file, const char __user *buf, |
429 | size_t count, loff_t *ppos) | 401 | size_t count, loff_t *ppos) |
430 | { | 402 | { |
431 | struct hwicap_drvdata *drvdata = file->private_data; | 403 | struct hwicap_drvdata *drvdata = file->private_data; |
@@ -435,8 +407,9 @@ hwicap_write(struct file *file, const char *buf, | |||
435 | ssize_t len; | 407 | ssize_t len; |
436 | ssize_t status; | 408 | ssize_t status; |
437 | 409 | ||
438 | if (down_interruptible(&drvdata->sem)) | 410 | status = mutex_lock_interruptible(&drvdata->sem); |
439 | return -ERESTARTSYS; | 411 | if (status) |
412 | return status; | ||
440 | 413 | ||
441 | left += drvdata->write_buffer_in_use; | 414 | left += drvdata->write_buffer_in_use; |
442 | 415 | ||
@@ -465,7 +438,7 @@ hwicap_write(struct file *file, const char *buf, | |||
465 | memcpy(kbuf, drvdata->write_buffer, | 438 | memcpy(kbuf, drvdata->write_buffer, |
466 | drvdata->write_buffer_in_use); | 439 | drvdata->write_buffer_in_use); |
467 | if (copy_from_user( | 440 | if (copy_from_user( |
468 | (((char *)kbuf) + (drvdata->write_buffer_in_use)), | 441 | (((char *)kbuf) + drvdata->write_buffer_in_use), |
469 | buf + written, | 442 | buf + written, |
470 | len - (drvdata->write_buffer_in_use))) { | 443 | len - (drvdata->write_buffer_in_use))) { |
471 | free_page((unsigned long)kbuf); | 444 | free_page((unsigned long)kbuf); |
@@ -508,7 +481,7 @@ hwicap_write(struct file *file, const char *buf, | |||
508 | free_page((unsigned long)kbuf); | 481 | free_page((unsigned long)kbuf); |
509 | status = written; | 482 | status = written; |
510 | error: | 483 | error: |
511 | up(&drvdata->sem); | 484 | mutex_unlock(&drvdata->sem); |
512 | return status; | 485 | return status; |
513 | } | 486 | } |
514 | 487 | ||
@@ -519,8 +492,9 @@ static int hwicap_open(struct inode *inode, struct file *file) | |||
519 | 492 | ||
520 | drvdata = container_of(inode->i_cdev, struct hwicap_drvdata, cdev); | 493 | drvdata = container_of(inode->i_cdev, struct hwicap_drvdata, cdev); |
521 | 494 | ||
522 | if (down_interruptible(&drvdata->sem)) | 495 | status = mutex_lock_interruptible(&drvdata->sem); |
523 | return -ERESTARTSYS; | 496 | if (status) |
497 | return status; | ||
524 | 498 | ||
525 | if (drvdata->is_open) { | 499 | if (drvdata->is_open) { |
526 | status = -EBUSY; | 500 | status = -EBUSY; |
@@ -539,7 +513,7 @@ static int hwicap_open(struct inode *inode, struct file *file) | |||
539 | drvdata->is_open = 1; | 513 | drvdata->is_open = 1; |
540 | 514 | ||
541 | error: | 515 | error: |
542 | up(&drvdata->sem); | 516 | mutex_unlock(&drvdata->sem); |
543 | return status; | 517 | return status; |
544 | } | 518 | } |
545 | 519 | ||
@@ -549,8 +523,7 @@ static int hwicap_release(struct inode *inode, struct file *file) | |||
549 | int i; | 523 | int i; |
550 | int status = 0; | 524 | int status = 0; |
551 | 525 | ||
552 | if (down_interruptible(&drvdata->sem)) | 526 | mutex_lock(&drvdata->sem); |
553 | return -ERESTARTSYS; | ||
554 | 527 | ||
555 | if (drvdata->write_buffer_in_use) { | 528 | if (drvdata->write_buffer_in_use) { |
556 | /* Flush write buffer. */ | 529 | /* Flush write buffer. */ |
@@ -569,7 +542,7 @@ static int hwicap_release(struct inode *inode, struct file *file) | |||
569 | 542 | ||
570 | error: | 543 | error: |
571 | drvdata->is_open = 0; | 544 | drvdata->is_open = 0; |
572 | up(&drvdata->sem); | 545 | mutex_unlock(&drvdata->sem); |
573 | return status; | 546 | return status; |
574 | } | 547 | } |
575 | 548 | ||
@@ -592,31 +565,36 @@ static int __devinit hwicap_setup(struct device *dev, int id, | |||
592 | 565 | ||
593 | dev_info(dev, "Xilinx icap port driver\n"); | 566 | dev_info(dev, "Xilinx icap port driver\n"); |
594 | 567 | ||
568 | mutex_lock(&icap_sem); | ||
569 | |||
595 | if (id < 0) { | 570 | if (id < 0) { |
596 | for (id = 0; id < HWICAP_DEVICES; id++) | 571 | for (id = 0; id < HWICAP_DEVICES; id++) |
597 | if (!probed_devices[id]) | 572 | if (!probed_devices[id]) |
598 | break; | 573 | break; |
599 | } | 574 | } |
600 | if (id < 0 || id >= HWICAP_DEVICES) { | 575 | if (id < 0 || id >= HWICAP_DEVICES) { |
576 | mutex_unlock(&icap_sem); | ||
601 | dev_err(dev, "%s%i too large\n", DRIVER_NAME, id); | 577 | dev_err(dev, "%s%i too large\n", DRIVER_NAME, id); |
602 | return -EINVAL; | 578 | return -EINVAL; |
603 | } | 579 | } |
604 | if (probed_devices[id]) { | 580 | if (probed_devices[id]) { |
581 | mutex_unlock(&icap_sem); | ||
605 | dev_err(dev, "cannot assign to %s%i; it is already in use\n", | 582 | dev_err(dev, "cannot assign to %s%i; it is already in use\n", |
606 | DRIVER_NAME, id); | 583 | DRIVER_NAME, id); |
607 | return -EBUSY; | 584 | return -EBUSY; |
608 | } | 585 | } |
609 | 586 | ||
610 | probed_devices[id] = 1; | 587 | probed_devices[id] = 1; |
588 | mutex_unlock(&icap_sem); | ||
611 | 589 | ||
612 | devt = MKDEV(xhwicap_major, xhwicap_minor + id); | 590 | devt = MKDEV(xhwicap_major, xhwicap_minor + id); |
613 | 591 | ||
614 | drvdata = kmalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL); | 592 | drvdata = kzalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL); |
615 | if (!drvdata) { | 593 | if (!drvdata) { |
616 | dev_err(dev, "Couldn't allocate device private record\n"); | 594 | dev_err(dev, "Couldn't allocate device private record\n"); |
617 | return -ENOMEM; | 595 | retval = -ENOMEM; |
596 | goto failed0; | ||
618 | } | 597 | } |
619 | memset((void *)drvdata, 0, sizeof(struct hwicap_drvdata)); | ||
620 | dev_set_drvdata(dev, (void *)drvdata); | 598 | dev_set_drvdata(dev, (void *)drvdata); |
621 | 599 | ||
622 | if (!regs_res) { | 600 | if (!regs_res) { |
@@ -648,7 +626,7 @@ static int __devinit hwicap_setup(struct device *dev, int id, | |||
648 | drvdata->config = config; | 626 | drvdata->config = config; |
649 | drvdata->config_regs = config_regs; | 627 | drvdata->config_regs = config_regs; |
650 | 628 | ||
651 | init_MUTEX(&drvdata->sem); | 629 | mutex_init(&drvdata->sem); |
652 | drvdata->is_open = 0; | 630 | drvdata->is_open = 0; |
653 | 631 | ||
654 | dev_info(dev, "ioremap %lx to %p with size %x\n", | 632 | dev_info(dev, "ioremap %lx to %p with size %x\n", |
@@ -663,7 +641,7 @@ static int __devinit hwicap_setup(struct device *dev, int id, | |||
663 | goto failed3; | 641 | goto failed3; |
664 | } | 642 | } |
665 | /* devfs_mk_cdev(devt, S_IFCHR|S_IRUGO|S_IWUGO, DRIVER_NAME); */ | 643 | /* devfs_mk_cdev(devt, S_IFCHR|S_IRUGO|S_IWUGO, DRIVER_NAME); */ |
666 | class_device_create(icap_class, NULL, devt, NULL, DRIVER_NAME); | 644 | device_create(icap_class, dev, devt, "%s%d", DRIVER_NAME, id); |
667 | return 0; /* success */ | 645 | return 0; /* success */ |
668 | 646 | ||
669 | failed3: | 647 | failed3: |
@@ -675,6 +653,11 @@ static int __devinit hwicap_setup(struct device *dev, int id, | |||
675 | failed1: | 653 | failed1: |
676 | kfree(drvdata); | 654 | kfree(drvdata); |
677 | 655 | ||
656 | failed0: | ||
657 | mutex_lock(&icap_sem); | ||
658 | probed_devices[id] = 0; | ||
659 | mutex_unlock(&icap_sem); | ||
660 | |||
678 | return retval; | 661 | return retval; |
679 | } | 662 | } |
680 | 663 | ||
@@ -699,14 +682,16 @@ static int __devexit hwicap_remove(struct device *dev) | |||
699 | if (!drvdata) | 682 | if (!drvdata) |
700 | return 0; | 683 | return 0; |
701 | 684 | ||
702 | class_device_destroy(icap_class, drvdata->devt); | 685 | device_destroy(icap_class, drvdata->devt); |
703 | cdev_del(&drvdata->cdev); | 686 | cdev_del(&drvdata->cdev); |
704 | iounmap(drvdata->base_address); | 687 | iounmap(drvdata->base_address); |
705 | release_mem_region(drvdata->mem_start, drvdata->mem_size); | 688 | release_mem_region(drvdata->mem_start, drvdata->mem_size); |
706 | kfree(drvdata); | 689 | kfree(drvdata); |
707 | dev_set_drvdata(dev, NULL); | 690 | dev_set_drvdata(dev, NULL); |
708 | probed_devices[MINOR(dev->devt)-xhwicap_minor] = 0; | ||
709 | 691 | ||
692 | mutex_lock(&icap_sem); | ||
693 | probed_devices[MINOR(dev->devt)-xhwicap_minor] = 0; | ||
694 | mutex_unlock(&icap_sem); | ||
710 | return 0; /* success */ | 695 | return 0; /* success */ |
711 | } | 696 | } |
712 | 697 | ||
@@ -821,28 +806,29 @@ static struct of_platform_driver hwicap_of_driver = { | |||
821 | }; | 806 | }; |
822 | 807 | ||
823 | /* Registration helpers to keep the number of #ifdefs to a minimum */ | 808 | /* Registration helpers to keep the number of #ifdefs to a minimum */ |
824 | static inline int __devinit hwicap_of_register(void) | 809 | static inline int __init hwicap_of_register(void) |
825 | { | 810 | { |
826 | pr_debug("hwicap: calling of_register_platform_driver()\n"); | 811 | pr_debug("hwicap: calling of_register_platform_driver()\n"); |
827 | return of_register_platform_driver(&hwicap_of_driver); | 812 | return of_register_platform_driver(&hwicap_of_driver); |
828 | } | 813 | } |
829 | 814 | ||
830 | static inline void __devexit hwicap_of_unregister(void) | 815 | static inline void __exit hwicap_of_unregister(void) |
831 | { | 816 | { |
832 | of_unregister_platform_driver(&hwicap_of_driver); | 817 | of_unregister_platform_driver(&hwicap_of_driver); |
833 | } | 818 | } |
834 | #else /* CONFIG_OF */ | 819 | #else /* CONFIG_OF */ |
835 | /* CONFIG_OF not enabled; do nothing helpers */ | 820 | /* CONFIG_OF not enabled; do nothing helpers */ |
836 | static inline int __devinit hwicap_of_register(void) { return 0; } | 821 | static inline int __init hwicap_of_register(void) { return 0; } |
837 | static inline void __devexit hwicap_of_unregister(void) { } | 822 | static inline void __exit hwicap_of_unregister(void) { } |
838 | #endif /* CONFIG_OF */ | 823 | #endif /* CONFIG_OF */ |
839 | 824 | ||
840 | static int __devinit hwicap_module_init(void) | 825 | static int __init hwicap_module_init(void) |
841 | { | 826 | { |
842 | dev_t devt; | 827 | dev_t devt; |
843 | int retval; | 828 | int retval; |
844 | 829 | ||
845 | icap_class = class_create(THIS_MODULE, "xilinx_config"); | 830 | icap_class = class_create(THIS_MODULE, "xilinx_config"); |
831 | mutex_init(&icap_sem); | ||
846 | 832 | ||
847 | if (xhwicap_major) { | 833 | if (xhwicap_major) { |
848 | devt = MKDEV(xhwicap_major, xhwicap_minor); | 834 | devt = MKDEV(xhwicap_major, xhwicap_minor); |
@@ -883,7 +869,7 @@ static int __devinit hwicap_module_init(void) | |||
883 | return retval; | 869 | return retval; |
884 | } | 870 | } |
885 | 871 | ||
886 | static void __devexit hwicap_module_cleanup(void) | 872 | static void __exit hwicap_module_cleanup(void) |
887 | { | 873 | { |
888 | dev_t devt = MKDEV(xhwicap_major, xhwicap_minor); | 874 | dev_t devt = MKDEV(xhwicap_major, xhwicap_minor); |
889 | 875 | ||
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.h b/drivers/char/xilinx_hwicap/xilinx_hwicap.h index ae771cac1629..405fee7e189b 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.h +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.h | |||
@@ -48,9 +48,9 @@ struct hwicap_drvdata { | |||
48 | u8 write_buffer[4]; | 48 | u8 write_buffer[4]; |
49 | u32 read_buffer_in_use; /* Always in [0,3] */ | 49 | u32 read_buffer_in_use; /* Always in [0,3] */ |
50 | u8 read_buffer[4]; | 50 | u8 read_buffer[4]; |
51 | u32 mem_start; /* phys. address of the control registers */ | 51 | resource_size_t mem_start;/* phys. address of the control registers */ |
52 | u32 mem_end; /* phys. address of the control registers */ | 52 | resource_size_t mem_end; /* phys. address of the control registers */ |
53 | u32 mem_size; | 53 | resource_size_t mem_size; |
54 | void __iomem *base_address;/* virt. address of the control registers */ | 54 | void __iomem *base_address;/* virt. address of the control registers */ |
55 | 55 | ||
56 | struct device *dev; | 56 | struct device *dev; |
@@ -61,7 +61,7 @@ struct hwicap_drvdata { | |||
61 | const struct config_registers *config_regs; | 61 | const struct config_registers *config_regs; |
62 | void *private_data; | 62 | void *private_data; |
63 | bool is_open; | 63 | bool is_open; |
64 | struct semaphore sem; | 64 | struct mutex sem; |
65 | }; | 65 | }; |
66 | 66 | ||
67 | struct hwicap_driver_config { | 67 | struct hwicap_driver_config { |
@@ -164,29 +164,29 @@ struct config_registers { | |||
164 | #define XHI_DISABLED_AUTO_CRC 0x0000DEFCUL | 164 | #define XHI_DISABLED_AUTO_CRC 0x0000DEFCUL |
165 | 165 | ||
166 | /** | 166 | /** |
167 | * hwicap_type_1_read: Generates a Type 1 read packet header. | 167 | * hwicap_type_1_read - Generates a Type 1 read packet header. |
168 | * @parameter: Register is the address of the register to be read back. | 168 | * @reg: is the address of the register to be read back. |
169 | * | 169 | * |
170 | * Generates a Type 1 read packet header, which is used to indirectly | 170 | * Generates a Type 1 read packet header, which is used to indirectly |
171 | * read registers in the configuration logic. This packet must then | 171 | * read registers in the configuration logic. This packet must then |
172 | * be sent through the icap device, and a return packet received with | 172 | * be sent through the icap device, and a return packet received with |
173 | * the information. | 173 | * the information. |
174 | **/ | 174 | **/ |
175 | static inline u32 hwicap_type_1_read(u32 Register) | 175 | static inline u32 hwicap_type_1_read(u32 reg) |
176 | { | 176 | { |
177 | return (XHI_TYPE_1 << XHI_TYPE_SHIFT) | | 177 | return (XHI_TYPE_1 << XHI_TYPE_SHIFT) | |
178 | (Register << XHI_REGISTER_SHIFT) | | 178 | (reg << XHI_REGISTER_SHIFT) | |
179 | (XHI_OP_READ << XHI_OP_SHIFT); | 179 | (XHI_OP_READ << XHI_OP_SHIFT); |
180 | } | 180 | } |
181 | 181 | ||
182 | /** | 182 | /** |
183 | * hwicap_type_1_write: Generates a Type 1 write packet header | 183 | * hwicap_type_1_write - Generates a Type 1 write packet header |
184 | * @parameter: Register is the address of the register to be read back. | 184 | * @reg: is the address of the register to be read back. |
185 | **/ | 185 | **/ |
186 | static inline u32 hwicap_type_1_write(u32 Register) | 186 | static inline u32 hwicap_type_1_write(u32 reg) |
187 | { | 187 | { |
188 | return (XHI_TYPE_1 << XHI_TYPE_SHIFT) | | 188 | return (XHI_TYPE_1 << XHI_TYPE_SHIFT) | |
189 | (Register << XHI_REGISTER_SHIFT) | | 189 | (reg << XHI_REGISTER_SHIFT) | |
190 | (XHI_OP_WRITE << XHI_OP_SHIFT); | 190 | (XHI_OP_WRITE << XHI_OP_SHIFT); |
191 | } | 191 | } |
192 | 192 | ||
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index a703deffb795..27340a7b19dd 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | menuconfig DMADEVICES | 5 | menuconfig DMADEVICES |
6 | bool "DMA Engine support" | 6 | bool "DMA Engine support" |
7 | depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX | 7 | depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX || PPC |
8 | depends on !HIGHMEM64G | 8 | depends on !HIGHMEM64G |
9 | help | 9 | help |
10 | DMA engines can do asynchronous data transfers without | 10 | DMA engines can do asynchronous data transfers without |
@@ -37,6 +37,23 @@ config INTEL_IOP_ADMA | |||
37 | help | 37 | help |
38 | Enable support for the Intel(R) IOP Series RAID engines. | 38 | Enable support for the Intel(R) IOP Series RAID engines. |
39 | 39 | ||
40 | config FSL_DMA | ||
41 | bool "Freescale MPC85xx/MPC83xx DMA support" | ||
42 | depends on PPC | ||
43 | select DMA_ENGINE | ||
44 | ---help--- | ||
45 | Enable support for the Freescale DMA engine. Now, it support | ||
46 | MPC8560/40, MPC8555, MPC8548 and MPC8641 processors. | ||
47 | The MPC8349, MPC8360 is also supported. | ||
48 | |||
49 | config FSL_DMA_SELFTEST | ||
50 | bool "Enable the self test for each DMA channel" | ||
51 | depends on FSL_DMA | ||
52 | default y | ||
53 | ---help--- | ||
54 | Enable the self test for each DMA channel. A self test will be | ||
55 | performed after the channel probed to ensure the DMA works well. | ||
56 | |||
40 | config DMA_ENGINE | 57 | config DMA_ENGINE |
41 | bool | 58 | bool |
42 | 59 | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index b152cd84e123..c8036d945902 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -3,3 +3,4 @@ obj-$(CONFIG_NET_DMA) += iovlock.o | |||
3 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o | 3 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o |
4 | ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o | 4 | ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o |
5 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o | 5 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o |
6 | obj-$(CONFIG_FSL_DMA) += fsldma.o | ||
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c new file mode 100644 index 000000000000..cc9a68158d99 --- /dev/null +++ b/drivers/dma/fsldma.c | |||
@@ -0,0 +1,1067 @@ | |||
1 | /* | ||
2 | * Freescale MPC85xx, MPC83xx DMA Engine support | ||
3 | * | ||
4 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | ||
5 | * | ||
6 | * Author: | ||
7 | * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 | ||
8 | * Ebony Zhu <ebony.zhu@freescale.com>, May 2007 | ||
9 | * | ||
10 | * Description: | ||
11 | * DMA engine driver for Freescale MPC8540 DMA controller, which is | ||
12 | * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc. | ||
13 | * The support for MPC8349 DMA contorller is also added. | ||
14 | * | ||
15 | * This is free software; you can redistribute it and/or modify | ||
16 | * it under the terms of the GNU General Public License as published by | ||
17 | * the Free Software Foundation; either version 2 of the License, or | ||
18 | * (at your option) any later version. | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include <linux/init.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/pci.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/dmaengine.h> | ||
27 | #include <linux/delay.h> | ||
28 | #include <linux/dma-mapping.h> | ||
29 | #include <linux/dmapool.h> | ||
30 | #include <linux/of_platform.h> | ||
31 | |||
32 | #include "fsldma.h" | ||
33 | |||
34 | static void dma_init(struct fsl_dma_chan *fsl_chan) | ||
35 | { | ||
36 | /* Reset the channel */ | ||
37 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32); | ||
38 | |||
39 | switch (fsl_chan->feature & FSL_DMA_IP_MASK) { | ||
40 | case FSL_DMA_IP_85XX: | ||
41 | /* Set the channel to below modes: | ||
42 | * EIE - Error interrupt enable | ||
43 | * EOSIE - End of segments interrupt enable (basic mode) | ||
44 | * EOLNIE - End of links interrupt enable | ||
45 | */ | ||
46 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE | ||
47 | | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); | ||
48 | break; | ||
49 | case FSL_DMA_IP_83XX: | ||
50 | /* Set the channel to below modes: | ||
51 | * EOTIE - End-of-transfer interrupt enable | ||
52 | */ | ||
53 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE, | ||
54 | 32); | ||
55 | break; | ||
56 | } | ||
57 | |||
58 | } | ||
59 | |||
60 | static void set_sr(struct fsl_dma_chan *fsl_chan, dma_addr_t val) | ||
61 | { | ||
62 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32); | ||
63 | } | ||
64 | |||
65 | static dma_addr_t get_sr(struct fsl_dma_chan *fsl_chan) | ||
66 | { | ||
67 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32); | ||
68 | } | ||
69 | |||
70 | static void set_desc_cnt(struct fsl_dma_chan *fsl_chan, | ||
71 | struct fsl_dma_ld_hw *hw, u32 count) | ||
72 | { | ||
73 | hw->count = CPU_TO_DMA(fsl_chan, count, 32); | ||
74 | } | ||
75 | |||
76 | static void set_desc_src(struct fsl_dma_chan *fsl_chan, | ||
77 | struct fsl_dma_ld_hw *hw, dma_addr_t src) | ||
78 | { | ||
79 | u64 snoop_bits; | ||
80 | |||
81 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | ||
82 | ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; | ||
83 | hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64); | ||
84 | } | ||
85 | |||
86 | static void set_desc_dest(struct fsl_dma_chan *fsl_chan, | ||
87 | struct fsl_dma_ld_hw *hw, dma_addr_t dest) | ||
88 | { | ||
89 | u64 snoop_bits; | ||
90 | |||
91 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | ||
92 | ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; | ||
93 | hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64); | ||
94 | } | ||
95 | |||
96 | static void set_desc_next(struct fsl_dma_chan *fsl_chan, | ||
97 | struct fsl_dma_ld_hw *hw, dma_addr_t next) | ||
98 | { | ||
99 | u64 snoop_bits; | ||
100 | |||
101 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) | ||
102 | ? FSL_DMA_SNEN : 0; | ||
103 | hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64); | ||
104 | } | ||
105 | |||
106 | static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) | ||
107 | { | ||
108 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64); | ||
109 | } | ||
110 | |||
111 | static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan) | ||
112 | { | ||
113 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN; | ||
114 | } | ||
115 | |||
116 | static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) | ||
117 | { | ||
118 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64); | ||
119 | } | ||
120 | |||
121 | static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan) | ||
122 | { | ||
123 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64); | ||
124 | } | ||
125 | |||
126 | static int dma_is_idle(struct fsl_dma_chan *fsl_chan) | ||
127 | { | ||
128 | u32 sr = get_sr(fsl_chan); | ||
129 | return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); | ||
130 | } | ||
131 | |||
132 | static void dma_start(struct fsl_dma_chan *fsl_chan) | ||
133 | { | ||
134 | u32 mr_set = 0;; | ||
135 | |||
136 | if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { | ||
137 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32); | ||
138 | mr_set |= FSL_DMA_MR_EMP_EN; | ||
139 | } else | ||
140 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
141 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | ||
142 | & ~FSL_DMA_MR_EMP_EN, 32); | ||
143 | |||
144 | if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT) | ||
145 | mr_set |= FSL_DMA_MR_EMS_EN; | ||
146 | else | ||
147 | mr_set |= FSL_DMA_MR_CS; | ||
148 | |||
149 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
150 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | ||
151 | | mr_set, 32); | ||
152 | } | ||
153 | |||
154 | static void dma_halt(struct fsl_dma_chan *fsl_chan) | ||
155 | { | ||
156 | int i = 0; | ||
157 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
158 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA, | ||
159 | 32); | ||
160 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
161 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS | ||
162 | | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32); | ||
163 | |||
164 | while (!dma_is_idle(fsl_chan) && (i++ < 100)) | ||
165 | udelay(10); | ||
166 | if (i >= 100 && !dma_is_idle(fsl_chan)) | ||
167 | dev_err(fsl_chan->dev, "DMA halt timeout!\n"); | ||
168 | } | ||
169 | |||
170 | static void set_ld_eol(struct fsl_dma_chan *fsl_chan, | ||
171 | struct fsl_desc_sw *desc) | ||
172 | { | ||
173 | desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, | ||
174 | DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL, | ||
175 | 64); | ||
176 | } | ||
177 | |||
178 | static void append_ld_queue(struct fsl_dma_chan *fsl_chan, | ||
179 | struct fsl_desc_sw *new_desc) | ||
180 | { | ||
181 | struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev); | ||
182 | |||
183 | if (list_empty(&fsl_chan->ld_queue)) | ||
184 | return; | ||
185 | |||
186 | /* Link to the new descriptor physical address and | ||
187 | * Enable End-of-segment interrupt for | ||
188 | * the last link descriptor. | ||
189 | * (the previous node's next link descriptor) | ||
190 | * | ||
191 | * For FSL_DMA_IP_83xx, the snoop enable bit need be set. | ||
192 | */ | ||
193 | queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, | ||
194 | new_desc->async_tx.phys | FSL_DMA_EOSIE | | ||
195 | (((fsl_chan->feature & FSL_DMA_IP_MASK) | ||
196 | == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64); | ||
197 | } | ||
198 | |||
199 | /** | ||
200 | * fsl_chan_set_src_loop_size - Set source address hold transfer size | ||
201 | * @fsl_chan : Freescale DMA channel | ||
202 | * @size : Address loop size, 0 for disable loop | ||
203 | * | ||
204 | * The set source address hold transfer size. The source | ||
205 | * address hold or loop transfer size is when the DMA transfer | ||
206 | * data from source address (SA), if the loop size is 4, the DMA will | ||
207 | * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, | ||
208 | * SA + 1 ... and so on. | ||
209 | */ | ||
210 | static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size) | ||
211 | { | ||
212 | switch (size) { | ||
213 | case 0: | ||
214 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
215 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & | ||
216 | (~FSL_DMA_MR_SAHE), 32); | ||
217 | break; | ||
218 | case 1: | ||
219 | case 2: | ||
220 | case 4: | ||
221 | case 8: | ||
222 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
223 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | | ||
224 | FSL_DMA_MR_SAHE | (__ilog2(size) << 14), | ||
225 | 32); | ||
226 | break; | ||
227 | } | ||
228 | } | ||
229 | |||
230 | /** | ||
231 | * fsl_chan_set_dest_loop_size - Set destination address hold transfer size | ||
232 | * @fsl_chan : Freescale DMA channel | ||
233 | * @size : Address loop size, 0 for disable loop | ||
234 | * | ||
235 | * The set destination address hold transfer size. The destination | ||
236 | * address hold or loop transfer size is when the DMA transfer | ||
237 | * data to destination address (TA), if the loop size is 4, the DMA will | ||
238 | * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, | ||
239 | * TA + 1 ... and so on. | ||
240 | */ | ||
241 | static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) | ||
242 | { | ||
243 | switch (size) { | ||
244 | case 0: | ||
245 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
246 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & | ||
247 | (~FSL_DMA_MR_DAHE), 32); | ||
248 | break; | ||
249 | case 1: | ||
250 | case 2: | ||
251 | case 4: | ||
252 | case 8: | ||
253 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
254 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | | ||
255 | FSL_DMA_MR_DAHE | (__ilog2(size) << 16), | ||
256 | 32); | ||
257 | break; | ||
258 | } | ||
259 | } | ||
260 | |||
261 | /** | ||
262 | * fsl_chan_toggle_ext_pause - Toggle channel external pause status | ||
263 | * @fsl_chan : Freescale DMA channel | ||
264 | * @size : Pause control size, 0 for disable external pause control. | ||
265 | * The maximum is 1024. | ||
266 | * | ||
267 | * The Freescale DMA channel can be controlled by the external | ||
268 | * signal DREQ#. The pause control size is how many bytes are allowed | ||
269 | * to transfer before pausing the channel, after which a new assertion | ||
270 | * of DREQ# resumes channel operation. | ||
271 | */ | ||
272 | static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int size) | ||
273 | { | ||
274 | if (size > 1024) | ||
275 | return; | ||
276 | |||
277 | if (size) { | ||
278 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
279 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | ||
280 | | ((__ilog2(size) << 24) & 0x0f000000), | ||
281 | 32); | ||
282 | fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; | ||
283 | } else | ||
284 | fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; | ||
285 | } | ||
286 | |||
287 | /** | ||
288 | * fsl_chan_toggle_ext_start - Toggle channel external start status | ||
289 | * @fsl_chan : Freescale DMA channel | ||
290 | * @enable : 0 is disabled, 1 is enabled. | ||
291 | * | ||
292 | * If enable the external start, the channel can be started by an | ||
293 | * external DMA start pin. So the dma_start() does not start the | ||
294 | * transfer immediately. The DMA channel will wait for the | ||
295 | * control pin asserted. | ||
296 | */ | ||
297 | static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) | ||
298 | { | ||
299 | if (enable) | ||
300 | fsl_chan->feature |= FSL_DMA_CHAN_START_EXT; | ||
301 | else | ||
302 | fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT; | ||
303 | } | ||
304 | |||
305 | static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
306 | { | ||
307 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); | ||
308 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); | ||
309 | unsigned long flags; | ||
310 | dma_cookie_t cookie; | ||
311 | |||
312 | /* cookie increment and adding to ld_queue must be atomic */ | ||
313 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
314 | |||
315 | cookie = fsl_chan->common.cookie; | ||
316 | cookie++; | ||
317 | if (cookie < 0) | ||
318 | cookie = 1; | ||
319 | desc->async_tx.cookie = cookie; | ||
320 | fsl_chan->common.cookie = desc->async_tx.cookie; | ||
321 | |||
322 | append_ld_queue(fsl_chan, desc); | ||
323 | list_splice_init(&desc->async_tx.tx_list, fsl_chan->ld_queue.prev); | ||
324 | |||
325 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
326 | |||
327 | return cookie; | ||
328 | } | ||
329 | |||
330 | /** | ||
331 | * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. | ||
332 | * @fsl_chan : Freescale DMA channel | ||
333 | * | ||
334 | * Return - The descriptor allocated. NULL for failed. | ||
335 | */ | ||
336 | static struct fsl_desc_sw *fsl_dma_alloc_descriptor( | ||
337 | struct fsl_dma_chan *fsl_chan) | ||
338 | { | ||
339 | dma_addr_t pdesc; | ||
340 | struct fsl_desc_sw *desc_sw; | ||
341 | |||
342 | desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); | ||
343 | if (desc_sw) { | ||
344 | memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); | ||
345 | dma_async_tx_descriptor_init(&desc_sw->async_tx, | ||
346 | &fsl_chan->common); | ||
347 | desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; | ||
348 | INIT_LIST_HEAD(&desc_sw->async_tx.tx_list); | ||
349 | desc_sw->async_tx.phys = pdesc; | ||
350 | } | ||
351 | |||
352 | return desc_sw; | ||
353 | } | ||
354 | |||
355 | |||
356 | /** | ||
357 | * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. | ||
358 | * @fsl_chan : Freescale DMA channel | ||
359 | * | ||
360 | * This function will create a dma pool for descriptor allocation. | ||
361 | * | ||
362 | * Return - The number of descriptors allocated. | ||
363 | */ | ||
364 | static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) | ||
365 | { | ||
366 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | ||
367 | LIST_HEAD(tmp_list); | ||
368 | |||
369 | /* We need the descriptor to be aligned to 32bytes | ||
370 | * for meeting FSL DMA specification requirement. | ||
371 | */ | ||
372 | fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", | ||
373 | fsl_chan->dev, sizeof(struct fsl_desc_sw), | ||
374 | 32, 0); | ||
375 | if (!fsl_chan->desc_pool) { | ||
376 | dev_err(fsl_chan->dev, "No memory for channel %d " | ||
377 | "descriptor dma pool.\n", fsl_chan->id); | ||
378 | return 0; | ||
379 | } | ||
380 | |||
381 | return 1; | ||
382 | } | ||
383 | |||
384 | /** | ||
385 | * fsl_dma_free_chan_resources - Free all resources of the channel. | ||
386 | * @fsl_chan : Freescale DMA channel | ||
387 | */ | ||
388 | static void fsl_dma_free_chan_resources(struct dma_chan *chan) | ||
389 | { | ||
390 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | ||
391 | struct fsl_desc_sw *desc, *_desc; | ||
392 | unsigned long flags; | ||
393 | |||
394 | dev_dbg(fsl_chan->dev, "Free all channel resources.\n"); | ||
395 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
396 | list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { | ||
397 | #ifdef FSL_DMA_LD_DEBUG | ||
398 | dev_dbg(fsl_chan->dev, | ||
399 | "LD %p will be released.\n", desc); | ||
400 | #endif | ||
401 | list_del(&desc->node); | ||
402 | /* free link descriptor */ | ||
403 | dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); | ||
404 | } | ||
405 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
406 | dma_pool_destroy(fsl_chan->desc_pool); | ||
407 | } | ||
408 | |||
409 | static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | ||
410 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | ||
411 | size_t len, unsigned long flags) | ||
412 | { | ||
413 | struct fsl_dma_chan *fsl_chan; | ||
414 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new; | ||
415 | size_t copy; | ||
416 | LIST_HEAD(link_chain); | ||
417 | |||
418 | if (!chan) | ||
419 | return NULL; | ||
420 | |||
421 | if (!len) | ||
422 | return NULL; | ||
423 | |||
424 | fsl_chan = to_fsl_chan(chan); | ||
425 | |||
426 | do { | ||
427 | |||
428 | /* Allocate the link descriptor from DMA pool */ | ||
429 | new = fsl_dma_alloc_descriptor(fsl_chan); | ||
430 | if (!new) { | ||
431 | dev_err(fsl_chan->dev, | ||
432 | "No free memory for link descriptor\n"); | ||
433 | return NULL; | ||
434 | } | ||
435 | #ifdef FSL_DMA_LD_DEBUG | ||
436 | dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); | ||
437 | #endif | ||
438 | |||
439 | copy = min(len, FSL_DMA_BCR_MAX_CNT); | ||
440 | |||
441 | set_desc_cnt(fsl_chan, &new->hw, copy); | ||
442 | set_desc_src(fsl_chan, &new->hw, dma_src); | ||
443 | set_desc_dest(fsl_chan, &new->hw, dma_dest); | ||
444 | |||
445 | if (!first) | ||
446 | first = new; | ||
447 | else | ||
448 | set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys); | ||
449 | |||
450 | new->async_tx.cookie = 0; | ||
451 | new->async_tx.ack = 1; | ||
452 | |||
453 | prev = new; | ||
454 | len -= copy; | ||
455 | dma_src += copy; | ||
456 | dma_dest += copy; | ||
457 | |||
458 | /* Insert the link descriptor to the LD ring */ | ||
459 | list_add_tail(&new->node, &first->async_tx.tx_list); | ||
460 | } while (len); | ||
461 | |||
462 | new->async_tx.ack = 0; /* client is in control of this ack */ | ||
463 | new->async_tx.cookie = -EBUSY; | ||
464 | |||
465 | /* Set End-of-link to the last link descriptor of new list*/ | ||
466 | set_ld_eol(fsl_chan, new); | ||
467 | |||
468 | return first ? &first->async_tx : NULL; | ||
469 | } | ||
470 | |||
471 | /** | ||
472 | * fsl_dma_update_completed_cookie - Update the completed cookie. | ||
473 | * @fsl_chan : Freescale DMA channel | ||
474 | */ | ||
475 | static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan) | ||
476 | { | ||
477 | struct fsl_desc_sw *cur_desc, *desc; | ||
478 | dma_addr_t ld_phy; | ||
479 | |||
480 | ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK; | ||
481 | |||
482 | if (ld_phy) { | ||
483 | cur_desc = NULL; | ||
484 | list_for_each_entry(desc, &fsl_chan->ld_queue, node) | ||
485 | if (desc->async_tx.phys == ld_phy) { | ||
486 | cur_desc = desc; | ||
487 | break; | ||
488 | } | ||
489 | |||
490 | if (cur_desc && cur_desc->async_tx.cookie) { | ||
491 | if (dma_is_idle(fsl_chan)) | ||
492 | fsl_chan->completed_cookie = | ||
493 | cur_desc->async_tx.cookie; | ||
494 | else | ||
495 | fsl_chan->completed_cookie = | ||
496 | cur_desc->async_tx.cookie - 1; | ||
497 | } | ||
498 | } | ||
499 | } | ||
500 | |||
501 | /** | ||
502 | * fsl_chan_ld_cleanup - Clean up link descriptors | ||
503 | * @fsl_chan : Freescale DMA channel | ||
504 | * | ||
505 | * This function clean up the ld_queue of DMA channel. | ||
506 | * If 'in_intr' is set, the function will move the link descriptor to | ||
507 | * the recycle list. Otherwise, free it directly. | ||
508 | */ | ||
509 | static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan) | ||
510 | { | ||
511 | struct fsl_desc_sw *desc, *_desc; | ||
512 | unsigned long flags; | ||
513 | |||
514 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
515 | |||
516 | fsl_dma_update_completed_cookie(fsl_chan); | ||
517 | dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n", | ||
518 | fsl_chan->completed_cookie); | ||
519 | list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { | ||
520 | dma_async_tx_callback callback; | ||
521 | void *callback_param; | ||
522 | |||
523 | if (dma_async_is_complete(desc->async_tx.cookie, | ||
524 | fsl_chan->completed_cookie, fsl_chan->common.cookie) | ||
525 | == DMA_IN_PROGRESS) | ||
526 | break; | ||
527 | |||
528 | callback = desc->async_tx.callback; | ||
529 | callback_param = desc->async_tx.callback_param; | ||
530 | |||
531 | /* Remove from ld_queue list */ | ||
532 | list_del(&desc->node); | ||
533 | |||
534 | dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n", | ||
535 | desc); | ||
536 | dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); | ||
537 | |||
538 | /* Run the link descriptor callback function */ | ||
539 | if (callback) { | ||
540 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
541 | dev_dbg(fsl_chan->dev, "link descriptor %p callback\n", | ||
542 | desc); | ||
543 | callback(callback_param); | ||
544 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
545 | } | ||
546 | } | ||
547 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
548 | } | ||
549 | |||
550 | /** | ||
551 | * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue. | ||
552 | * @fsl_chan : Freescale DMA channel | ||
553 | */ | ||
554 | static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) | ||
555 | { | ||
556 | struct list_head *ld_node; | ||
557 | dma_addr_t next_dest_addr; | ||
558 | unsigned long flags; | ||
559 | |||
560 | if (!dma_is_idle(fsl_chan)) | ||
561 | return; | ||
562 | |||
563 | dma_halt(fsl_chan); | ||
564 | |||
565 | /* If there are some link descriptors | ||
566 | * not transfered in queue. We need to start it. | ||
567 | */ | ||
568 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
569 | |||
570 | /* Find the first un-transfer desciptor */ | ||
571 | for (ld_node = fsl_chan->ld_queue.next; | ||
572 | (ld_node != &fsl_chan->ld_queue) | ||
573 | && (dma_async_is_complete( | ||
574 | to_fsl_desc(ld_node)->async_tx.cookie, | ||
575 | fsl_chan->completed_cookie, | ||
576 | fsl_chan->common.cookie) == DMA_SUCCESS); | ||
577 | ld_node = ld_node->next); | ||
578 | |||
579 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
580 | |||
581 | if (ld_node != &fsl_chan->ld_queue) { | ||
582 | /* Get the ld start address from ld_queue */ | ||
583 | next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; | ||
584 | dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%016llx\n", | ||
585 | (u64)next_dest_addr); | ||
586 | set_cdar(fsl_chan, next_dest_addr); | ||
587 | dma_start(fsl_chan); | ||
588 | } else { | ||
589 | set_cdar(fsl_chan, 0); | ||
590 | set_ndar(fsl_chan, 0); | ||
591 | } | ||
592 | } | ||
593 | |||
594 | /** | ||
595 | * fsl_dma_memcpy_issue_pending - Issue the DMA start command | ||
596 | * @fsl_chan : Freescale DMA channel | ||
597 | */ | ||
598 | static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan) | ||
599 | { | ||
600 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | ||
601 | |||
602 | #ifdef FSL_DMA_LD_DEBUG | ||
603 | struct fsl_desc_sw *ld; | ||
604 | unsigned long flags; | ||
605 | |||
606 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
607 | if (list_empty(&fsl_chan->ld_queue)) { | ||
608 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
609 | return; | ||
610 | } | ||
611 | |||
612 | dev_dbg(fsl_chan->dev, "--memcpy issue--\n"); | ||
613 | list_for_each_entry(ld, &fsl_chan->ld_queue, node) { | ||
614 | int i; | ||
615 | dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n", | ||
616 | fsl_chan->id, ld->async_tx.phys); | ||
617 | for (i = 0; i < 8; i++) | ||
618 | dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n", | ||
619 | i, *(((u32 *)&ld->hw) + i)); | ||
620 | } | ||
621 | dev_dbg(fsl_chan->dev, "----------------\n"); | ||
622 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
623 | #endif | ||
624 | |||
625 | fsl_chan_xfer_ld_queue(fsl_chan); | ||
626 | } | ||
627 | |||
628 | static void fsl_dma_dependency_added(struct dma_chan *chan) | ||
629 | { | ||
630 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | ||
631 | |||
632 | fsl_chan_ld_cleanup(fsl_chan); | ||
633 | } | ||
634 | |||
635 | /** | ||
636 | * fsl_dma_is_complete - Determine the DMA status | ||
637 | * @fsl_chan : Freescale DMA channel | ||
638 | */ | ||
639 | static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, | ||
640 | dma_cookie_t cookie, | ||
641 | dma_cookie_t *done, | ||
642 | dma_cookie_t *used) | ||
643 | { | ||
644 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | ||
645 | dma_cookie_t last_used; | ||
646 | dma_cookie_t last_complete; | ||
647 | |||
648 | fsl_chan_ld_cleanup(fsl_chan); | ||
649 | |||
650 | last_used = chan->cookie; | ||
651 | last_complete = fsl_chan->completed_cookie; | ||
652 | |||
653 | if (done) | ||
654 | *done = last_complete; | ||
655 | |||
656 | if (used) | ||
657 | *used = last_used; | ||
658 | |||
659 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
660 | } | ||
661 | |||
662 | static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) | ||
663 | { | ||
664 | struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; | ||
665 | dma_addr_t stat; | ||
666 | |||
667 | stat = get_sr(fsl_chan); | ||
668 | dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n", | ||
669 | fsl_chan->id, stat); | ||
670 | set_sr(fsl_chan, stat); /* Clear the event register */ | ||
671 | |||
672 | stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); | ||
673 | if (!stat) | ||
674 | return IRQ_NONE; | ||
675 | |||
676 | if (stat & FSL_DMA_SR_TE) | ||
677 | dev_err(fsl_chan->dev, "Transfer Error!\n"); | ||
678 | |||
679 | /* If the link descriptor segment transfer finishes, | ||
680 | * we will recycle the used descriptor. | ||
681 | */ | ||
682 | if (stat & FSL_DMA_SR_EOSI) { | ||
683 | dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); | ||
684 | dev_dbg(fsl_chan->dev, "event: clndar 0x%016llx, " | ||
685 | "nlndar 0x%016llx\n", (u64)get_cdar(fsl_chan), | ||
686 | (u64)get_ndar(fsl_chan)); | ||
687 | stat &= ~FSL_DMA_SR_EOSI; | ||
688 | } | ||
689 | |||
690 | /* If it current transfer is the end-of-transfer, | ||
691 | * we should clear the Channel Start bit for | ||
692 | * prepare next transfer. | ||
693 | */ | ||
694 | if (stat & (FSL_DMA_SR_EOLNI | FSL_DMA_SR_EOCDI)) { | ||
695 | dev_dbg(fsl_chan->dev, "event: End-of-link INT\n"); | ||
696 | stat &= ~FSL_DMA_SR_EOLNI; | ||
697 | fsl_chan_xfer_ld_queue(fsl_chan); | ||
698 | } | ||
699 | |||
700 | if (stat) | ||
701 | dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n", | ||
702 | stat); | ||
703 | |||
704 | dev_dbg(fsl_chan->dev, "event: Exit\n"); | ||
705 | tasklet_schedule(&fsl_chan->tasklet); | ||
706 | return IRQ_HANDLED; | ||
707 | } | ||
708 | |||
709 | static irqreturn_t fsl_dma_do_interrupt(int irq, void *data) | ||
710 | { | ||
711 | struct fsl_dma_device *fdev = (struct fsl_dma_device *)data; | ||
712 | u32 gsr; | ||
713 | int ch_nr; | ||
714 | |||
715 | gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base) | ||
716 | : in_le32(fdev->reg_base); | ||
717 | ch_nr = (32 - ffs(gsr)) / 8; | ||
718 | |||
719 | return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq, | ||
720 | fdev->chan[ch_nr]) : IRQ_NONE; | ||
721 | } | ||
722 | |||
723 | static void dma_do_tasklet(unsigned long data) | ||
724 | { | ||
725 | struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; | ||
726 | fsl_chan_ld_cleanup(fsl_chan); | ||
727 | } | ||
728 | |||
729 | static void fsl_dma_callback_test(struct fsl_dma_chan *fsl_chan) | ||
730 | { | ||
731 | if (fsl_chan) | ||
732 | dev_info(fsl_chan->dev, "selftest: callback is ok!\n"); | ||
733 | } | ||
734 | |||
735 | static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) | ||
736 | { | ||
737 | struct dma_chan *chan; | ||
738 | int err = 0; | ||
739 | dma_addr_t dma_dest, dma_src; | ||
740 | dma_cookie_t cookie; | ||
741 | u8 *src, *dest; | ||
742 | int i; | ||
743 | size_t test_size; | ||
744 | struct dma_async_tx_descriptor *tx1, *tx2, *tx3; | ||
745 | |||
746 | test_size = 4096; | ||
747 | |||
748 | src = kmalloc(test_size * 2, GFP_KERNEL); | ||
749 | if (!src) { | ||
750 | dev_err(fsl_chan->dev, | ||
751 | "selftest: Cannot alloc memory for test!\n"); | ||
752 | err = -ENOMEM; | ||
753 | goto out; | ||
754 | } | ||
755 | |||
756 | dest = src + test_size; | ||
757 | |||
758 | for (i = 0; i < test_size; i++) | ||
759 | src[i] = (u8) i; | ||
760 | |||
761 | chan = &fsl_chan->common; | ||
762 | |||
763 | if (fsl_dma_alloc_chan_resources(chan) < 1) { | ||
764 | dev_err(fsl_chan->dev, | ||
765 | "selftest: Cannot alloc resources for DMA\n"); | ||
766 | err = -ENODEV; | ||
767 | goto out; | ||
768 | } | ||
769 | |||
770 | /* TX 1 */ | ||
771 | dma_src = dma_map_single(fsl_chan->dev, src, test_size / 2, | ||
772 | DMA_TO_DEVICE); | ||
773 | dma_dest = dma_map_single(fsl_chan->dev, dest, test_size / 2, | ||
774 | DMA_FROM_DEVICE); | ||
775 | tx1 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 2, 0); | ||
776 | async_tx_ack(tx1); | ||
777 | |||
778 | cookie = fsl_dma_tx_submit(tx1); | ||
779 | fsl_dma_memcpy_issue_pending(chan); | ||
780 | msleep(2); | ||
781 | |||
782 | if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) { | ||
783 | dev_err(fsl_chan->dev, "selftest: Time out!\n"); | ||
784 | err = -ENODEV; | ||
785 | goto out; | ||
786 | } | ||
787 | |||
788 | /* Test free and re-alloc channel resources */ | ||
789 | fsl_dma_free_chan_resources(chan); | ||
790 | |||
791 | if (fsl_dma_alloc_chan_resources(chan) < 1) { | ||
792 | dev_err(fsl_chan->dev, | ||
793 | "selftest: Cannot alloc resources for DMA\n"); | ||
794 | err = -ENODEV; | ||
795 | goto free_resources; | ||
796 | } | ||
797 | |||
798 | /* Continue to test | ||
799 | * TX 2 | ||
800 | */ | ||
801 | dma_src = dma_map_single(fsl_chan->dev, src + test_size / 2, | ||
802 | test_size / 4, DMA_TO_DEVICE); | ||
803 | dma_dest = dma_map_single(fsl_chan->dev, dest + test_size / 2, | ||
804 | test_size / 4, DMA_FROM_DEVICE); | ||
805 | tx2 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0); | ||
806 | async_tx_ack(tx2); | ||
807 | |||
808 | /* TX 3 */ | ||
809 | dma_src = dma_map_single(fsl_chan->dev, src + test_size * 3 / 4, | ||
810 | test_size / 4, DMA_TO_DEVICE); | ||
811 | dma_dest = dma_map_single(fsl_chan->dev, dest + test_size * 3 / 4, | ||
812 | test_size / 4, DMA_FROM_DEVICE); | ||
813 | tx3 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0); | ||
814 | async_tx_ack(tx3); | ||
815 | |||
816 | /* Test exchanging the prepared tx sort */ | ||
817 | cookie = fsl_dma_tx_submit(tx3); | ||
818 | cookie = fsl_dma_tx_submit(tx2); | ||
819 | |||
820 | #ifdef FSL_DMA_CALLBACKTEST | ||
821 | if (dma_has_cap(DMA_INTERRUPT, ((struct fsl_dma_device *) | ||
822 | dev_get_drvdata(fsl_chan->dev->parent))->common.cap_mask)) { | ||
823 | tx3->callback = fsl_dma_callback_test; | ||
824 | tx3->callback_param = fsl_chan; | ||
825 | } | ||
826 | #endif | ||
827 | fsl_dma_memcpy_issue_pending(chan); | ||
828 | msleep(2); | ||
829 | |||
830 | if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) { | ||
831 | dev_err(fsl_chan->dev, "selftest: Time out!\n"); | ||
832 | err = -ENODEV; | ||
833 | goto free_resources; | ||
834 | } | ||
835 | |||
836 | err = memcmp(src, dest, test_size); | ||
837 | if (err) { | ||
838 | for (i = 0; (*(src + i) == *(dest + i)) && (i < test_size); | ||
839 | i++); | ||
840 | dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%d is " | ||
841 | "error! src 0x%x, dest 0x%x\n", | ||
842 | i, test_size, *(src + i), *(dest + i)); | ||
843 | } | ||
844 | |||
845 | free_resources: | ||
846 | fsl_dma_free_chan_resources(chan); | ||
847 | out: | ||
848 | kfree(src); | ||
849 | return err; | ||
850 | } | ||
851 | |||
852 | static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, | ||
853 | const struct of_device_id *match) | ||
854 | { | ||
855 | struct fsl_dma_device *fdev; | ||
856 | struct fsl_dma_chan *new_fsl_chan; | ||
857 | int err; | ||
858 | |||
859 | fdev = dev_get_drvdata(dev->dev.parent); | ||
860 | BUG_ON(!fdev); | ||
861 | |||
862 | /* alloc channel */ | ||
863 | new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL); | ||
864 | if (!new_fsl_chan) { | ||
865 | dev_err(&dev->dev, "No free memory for allocating " | ||
866 | "dma channels!\n"); | ||
867 | err = -ENOMEM; | ||
868 | goto err; | ||
869 | } | ||
870 | |||
871 | /* get dma channel register base */ | ||
872 | err = of_address_to_resource(dev->node, 0, &new_fsl_chan->reg); | ||
873 | if (err) { | ||
874 | dev_err(&dev->dev, "Can't get %s property 'reg'\n", | ||
875 | dev->node->full_name); | ||
876 | goto err; | ||
877 | } | ||
878 | |||
879 | new_fsl_chan->feature = *(u32 *)match->data; | ||
880 | |||
881 | if (!fdev->feature) | ||
882 | fdev->feature = new_fsl_chan->feature; | ||
883 | |||
884 | /* If the DMA device's feature is different than its channels', | ||
885 | * report the bug. | ||
886 | */ | ||
887 | WARN_ON(fdev->feature != new_fsl_chan->feature); | ||
888 | |||
889 | new_fsl_chan->dev = &dev->dev; | ||
890 | new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, | ||
891 | new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); | ||
892 | |||
893 | new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; | ||
894 | if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) { | ||
895 | dev_err(&dev->dev, "There is no %d channel!\n", | ||
896 | new_fsl_chan->id); | ||
897 | err = -EINVAL; | ||
898 | goto err; | ||
899 | } | ||
900 | fdev->chan[new_fsl_chan->id] = new_fsl_chan; | ||
901 | tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet, | ||
902 | (unsigned long)new_fsl_chan); | ||
903 | |||
904 | /* Init the channel */ | ||
905 | dma_init(new_fsl_chan); | ||
906 | |||
907 | /* Clear cdar registers */ | ||
908 | set_cdar(new_fsl_chan, 0); | ||
909 | |||
910 | switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) { | ||
911 | case FSL_DMA_IP_85XX: | ||
912 | new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; | ||
913 | new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; | ||
914 | case FSL_DMA_IP_83XX: | ||
915 | new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; | ||
916 | new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; | ||
917 | } | ||
918 | |||
919 | spin_lock_init(&new_fsl_chan->desc_lock); | ||
920 | INIT_LIST_HEAD(&new_fsl_chan->ld_queue); | ||
921 | |||
922 | new_fsl_chan->common.device = &fdev->common; | ||
923 | |||
924 | /* Add the channel to DMA device channel list */ | ||
925 | list_add_tail(&new_fsl_chan->common.device_node, | ||
926 | &fdev->common.channels); | ||
927 | fdev->common.chancnt++; | ||
928 | |||
929 | new_fsl_chan->irq = irq_of_parse_and_map(dev->node, 0); | ||
930 | if (new_fsl_chan->irq != NO_IRQ) { | ||
931 | err = request_irq(new_fsl_chan->irq, | ||
932 | &fsl_dma_chan_do_interrupt, IRQF_SHARED, | ||
933 | "fsldma-channel", new_fsl_chan); | ||
934 | if (err) { | ||
935 | dev_err(&dev->dev, "DMA channel %s request_irq error " | ||
936 | "with return %d\n", dev->node->full_name, err); | ||
937 | goto err; | ||
938 | } | ||
939 | } | ||
940 | |||
941 | #ifdef CONFIG_FSL_DMA_SELFTEST | ||
942 | err = fsl_dma_self_test(new_fsl_chan); | ||
943 | if (err) | ||
944 | goto err; | ||
945 | #endif | ||
946 | |||
947 | dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, | ||
948 | match->compatible, new_fsl_chan->irq); | ||
949 | |||
950 | return 0; | ||
951 | err: | ||
952 | dma_halt(new_fsl_chan); | ||
953 | iounmap(new_fsl_chan->reg_base); | ||
954 | free_irq(new_fsl_chan->irq, new_fsl_chan); | ||
955 | list_del(&new_fsl_chan->common.device_node); | ||
956 | kfree(new_fsl_chan); | ||
957 | return err; | ||
958 | } | ||
959 | |||
960 | const u32 mpc8540_dma_ip_feature = FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN; | ||
961 | const u32 mpc8349_dma_ip_feature = FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN; | ||
962 | |||
963 | static struct of_device_id of_fsl_dma_chan_ids[] = { | ||
964 | { | ||
965 | .compatible = "fsl,mpc8540-dma-channel", | ||
966 | .data = (void *)&mpc8540_dma_ip_feature, | ||
967 | }, | ||
968 | { | ||
969 | .compatible = "fsl,mpc8349-dma-channel", | ||
970 | .data = (void *)&mpc8349_dma_ip_feature, | ||
971 | }, | ||
972 | {} | ||
973 | }; | ||
974 | |||
975 | static struct of_platform_driver of_fsl_dma_chan_driver = { | ||
976 | .name = "of-fsl-dma-channel", | ||
977 | .match_table = of_fsl_dma_chan_ids, | ||
978 | .probe = of_fsl_dma_chan_probe, | ||
979 | }; | ||
980 | |||
981 | static __init int of_fsl_dma_chan_init(void) | ||
982 | { | ||
983 | return of_register_platform_driver(&of_fsl_dma_chan_driver); | ||
984 | } | ||
985 | |||
986 | static int __devinit of_fsl_dma_probe(struct of_device *dev, | ||
987 | const struct of_device_id *match) | ||
988 | { | ||
989 | int err; | ||
990 | unsigned int irq; | ||
991 | struct fsl_dma_device *fdev; | ||
992 | |||
993 | fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); | ||
994 | if (!fdev) { | ||
995 | dev_err(&dev->dev, "No enough memory for 'priv'\n"); | ||
996 | err = -ENOMEM; | ||
997 | goto err; | ||
998 | } | ||
999 | fdev->dev = &dev->dev; | ||
1000 | INIT_LIST_HEAD(&fdev->common.channels); | ||
1001 | |||
1002 | /* get DMA controller register base */ | ||
1003 | err = of_address_to_resource(dev->node, 0, &fdev->reg); | ||
1004 | if (err) { | ||
1005 | dev_err(&dev->dev, "Can't get %s property 'reg'\n", | ||
1006 | dev->node->full_name); | ||
1007 | goto err; | ||
1008 | } | ||
1009 | |||
1010 | dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " | ||
1011 | "controller at 0x%08x...\n", | ||
1012 | match->compatible, fdev->reg.start); | ||
1013 | fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end | ||
1014 | - fdev->reg.start + 1); | ||
1015 | |||
1016 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); | ||
1017 | dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); | ||
1018 | fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; | ||
1019 | fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; | ||
1020 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; | ||
1021 | fdev->common.device_is_tx_complete = fsl_dma_is_complete; | ||
1022 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; | ||
1023 | fdev->common.device_dependency_added = fsl_dma_dependency_added; | ||
1024 | fdev->common.dev = &dev->dev; | ||
1025 | |||
1026 | irq = irq_of_parse_and_map(dev->node, 0); | ||
1027 | if (irq != NO_IRQ) { | ||
1028 | err = request_irq(irq, &fsl_dma_do_interrupt, IRQF_SHARED, | ||
1029 | "fsldma-device", fdev); | ||
1030 | if (err) { | ||
1031 | dev_err(&dev->dev, "DMA device request_irq error " | ||
1032 | "with return %d\n", err); | ||
1033 | goto err; | ||
1034 | } | ||
1035 | } | ||
1036 | |||
1037 | dev_set_drvdata(&(dev->dev), fdev); | ||
1038 | of_platform_bus_probe(dev->node, of_fsl_dma_chan_ids, &dev->dev); | ||
1039 | |||
1040 | dma_async_device_register(&fdev->common); | ||
1041 | return 0; | ||
1042 | |||
1043 | err: | ||
1044 | iounmap(fdev->reg_base); | ||
1045 | kfree(fdev); | ||
1046 | return err; | ||
1047 | } | ||
1048 | |||
1049 | static struct of_device_id of_fsl_dma_ids[] = { | ||
1050 | { .compatible = "fsl,mpc8540-dma", }, | ||
1051 | { .compatible = "fsl,mpc8349-dma", }, | ||
1052 | {} | ||
1053 | }; | ||
1054 | |||
1055 | static struct of_platform_driver of_fsl_dma_driver = { | ||
1056 | .name = "of-fsl-dma", | ||
1057 | .match_table = of_fsl_dma_ids, | ||
1058 | .probe = of_fsl_dma_probe, | ||
1059 | }; | ||
1060 | |||
1061 | static __init int of_fsl_dma_init(void) | ||
1062 | { | ||
1063 | return of_register_platform_driver(&of_fsl_dma_driver); | ||
1064 | } | ||
1065 | |||
1066 | subsys_initcall(of_fsl_dma_chan_init); | ||
1067 | subsys_initcall(of_fsl_dma_init); | ||
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h new file mode 100644 index 000000000000..ba78c42121ba --- /dev/null +++ b/drivers/dma/fsldma.h | |||
@@ -0,0 +1,189 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | ||
3 | * | ||
4 | * Author: | ||
5 | * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 | ||
6 | * Ebony Zhu <ebony.zhu@freescale.com>, May 2007 | ||
7 | * | ||
8 | * This is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | */ | ||
14 | #ifndef __DMA_FSLDMA_H | ||
15 | #define __DMA_FSLDMA_H | ||
16 | |||
17 | #include <linux/device.h> | ||
18 | #include <linux/dmapool.h> | ||
19 | #include <linux/dmaengine.h> | ||
20 | |||
21 | /* Define data structures needed by Freescale | ||
22 | * MPC8540 and MPC8349 DMA controller. | ||
23 | */ | ||
24 | #define FSL_DMA_MR_CS 0x00000001 | ||
25 | #define FSL_DMA_MR_CC 0x00000002 | ||
26 | #define FSL_DMA_MR_CA 0x00000008 | ||
27 | #define FSL_DMA_MR_EIE 0x00000040 | ||
28 | #define FSL_DMA_MR_XFE 0x00000020 | ||
29 | #define FSL_DMA_MR_EOLNIE 0x00000100 | ||
30 | #define FSL_DMA_MR_EOLSIE 0x00000080 | ||
31 | #define FSL_DMA_MR_EOSIE 0x00000200 | ||
32 | #define FSL_DMA_MR_CDSM 0x00000010 | ||
33 | #define FSL_DMA_MR_CTM 0x00000004 | ||
34 | #define FSL_DMA_MR_EMP_EN 0x00200000 | ||
35 | #define FSL_DMA_MR_EMS_EN 0x00040000 | ||
36 | #define FSL_DMA_MR_DAHE 0x00002000 | ||
37 | #define FSL_DMA_MR_SAHE 0x00001000 | ||
38 | |||
39 | /* Special MR definition for MPC8349 */ | ||
40 | #define FSL_DMA_MR_EOTIE 0x00000080 | ||
41 | |||
42 | #define FSL_DMA_SR_CH 0x00000020 | ||
43 | #define FSL_DMA_SR_CB 0x00000004 | ||
44 | #define FSL_DMA_SR_TE 0x00000080 | ||
45 | #define FSL_DMA_SR_EOSI 0x00000002 | ||
46 | #define FSL_DMA_SR_EOLSI 0x00000001 | ||
47 | #define FSL_DMA_SR_EOCDI 0x00000001 | ||
48 | #define FSL_DMA_SR_EOLNI 0x00000008 | ||
49 | |||
50 | #define FSL_DMA_SATR_SBPATMU 0x20000000 | ||
51 | #define FSL_DMA_SATR_STRANSINT_RIO 0x00c00000 | ||
52 | #define FSL_DMA_SATR_SREADTYPE_SNOOP_READ 0x00050000 | ||
53 | #define FSL_DMA_SATR_SREADTYPE_BP_IORH 0x00020000 | ||
54 | #define FSL_DMA_SATR_SREADTYPE_BP_NREAD 0x00040000 | ||
55 | #define FSL_DMA_SATR_SREADTYPE_BP_MREAD 0x00070000 | ||
56 | |||
57 | #define FSL_DMA_DATR_DBPATMU 0x20000000 | ||
58 | #define FSL_DMA_DATR_DTRANSINT_RIO 0x00c00000 | ||
59 | #define FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE 0x00050000 | ||
60 | #define FSL_DMA_DATR_DWRITETYPE_BP_FLUSH 0x00010000 | ||
61 | |||
62 | #define FSL_DMA_EOL ((u64)0x1) | ||
63 | #define FSL_DMA_SNEN ((u64)0x10) | ||
64 | #define FSL_DMA_EOSIE 0x8 | ||
65 | #define FSL_DMA_NLDA_MASK (~(u64)0x1f) | ||
66 | |||
67 | #define FSL_DMA_BCR_MAX_CNT 0x03ffffffu | ||
68 | |||
69 | #define FSL_DMA_DGSR_TE 0x80 | ||
70 | #define FSL_DMA_DGSR_CH 0x20 | ||
71 | #define FSL_DMA_DGSR_PE 0x10 | ||
72 | #define FSL_DMA_DGSR_EOLNI 0x08 | ||
73 | #define FSL_DMA_DGSR_CB 0x04 | ||
74 | #define FSL_DMA_DGSR_EOSI 0x02 | ||
75 | #define FSL_DMA_DGSR_EOLSI 0x01 | ||
76 | |||
77 | struct fsl_dma_ld_hw { | ||
78 | u64 __bitwise src_addr; | ||
79 | u64 __bitwise dst_addr; | ||
80 | u64 __bitwise next_ln_addr; | ||
81 | u32 __bitwise count; | ||
82 | u32 __bitwise reserve; | ||
83 | } __attribute__((aligned(32))); | ||
84 | |||
85 | struct fsl_desc_sw { | ||
86 | struct fsl_dma_ld_hw hw; | ||
87 | struct list_head node; | ||
88 | struct dma_async_tx_descriptor async_tx; | ||
89 | struct list_head *ld; | ||
90 | void *priv; | ||
91 | } __attribute__((aligned(32))); | ||
92 | |||
93 | struct fsl_dma_chan_regs { | ||
94 | u32 __bitwise mr; /* 0x00 - Mode Register */ | ||
95 | u32 __bitwise sr; /* 0x04 - Status Register */ | ||
96 | u64 __bitwise cdar; /* 0x08 - Current descriptor address register */ | ||
97 | u64 __bitwise sar; /* 0x10 - Source Address Register */ | ||
98 | u64 __bitwise dar; /* 0x18 - Destination Address Register */ | ||
99 | u32 __bitwise bcr; /* 0x20 - Byte Count Register */ | ||
100 | u64 __bitwise ndar; /* 0x24 - Next Descriptor Address Register */ | ||
101 | }; | ||
102 | |||
103 | struct fsl_dma_chan; | ||
104 | #define FSL_DMA_MAX_CHANS_PER_DEVICE 4 | ||
105 | |||
106 | struct fsl_dma_device { | ||
107 | void __iomem *reg_base; /* DGSR register base */ | ||
108 | struct resource reg; /* Resource for register */ | ||
109 | struct device *dev; | ||
110 | struct dma_device common; | ||
111 | struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE]; | ||
112 | u32 feature; /* The same as DMA channels */ | ||
113 | }; | ||
114 | |||
115 | /* Define macros for fsl_dma_chan->feature property */ | ||
116 | #define FSL_DMA_LITTLE_ENDIAN 0x00000000 | ||
117 | #define FSL_DMA_BIG_ENDIAN 0x00000001 | ||
118 | |||
119 | #define FSL_DMA_IP_MASK 0x00000ff0 | ||
120 | #define FSL_DMA_IP_85XX 0x00000010 | ||
121 | #define FSL_DMA_IP_83XX 0x00000020 | ||
122 | |||
123 | #define FSL_DMA_CHAN_PAUSE_EXT 0x00001000 | ||
124 | #define FSL_DMA_CHAN_START_EXT 0x00002000 | ||
125 | |||
126 | struct fsl_dma_chan { | ||
127 | struct fsl_dma_chan_regs __iomem *reg_base; | ||
128 | dma_cookie_t completed_cookie; /* The maximum cookie completed */ | ||
129 | spinlock_t desc_lock; /* Descriptor operation lock */ | ||
130 | struct list_head ld_queue; /* Link descriptors queue */ | ||
131 | struct dma_chan common; /* DMA common channel */ | ||
132 | struct dma_pool *desc_pool; /* Descriptors pool */ | ||
133 | struct device *dev; /* Channel device */ | ||
134 | struct resource reg; /* Resource for register */ | ||
135 | int irq; /* Channel IRQ */ | ||
136 | int id; /* Raw id of this channel */ | ||
137 | struct tasklet_struct tasklet; | ||
138 | u32 feature; | ||
139 | |||
140 | void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int size); | ||
141 | void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable); | ||
142 | void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size); | ||
143 | void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size); | ||
144 | }; | ||
145 | |||
146 | #define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common) | ||
147 | #define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node) | ||
148 | #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx) | ||
149 | |||
150 | #ifndef __powerpc64__ | ||
151 | static u64 in_be64(const u64 __iomem *addr) | ||
152 | { | ||
153 | return ((u64)in_be32((u32 *)addr) << 32) | (in_be32((u32 *)addr + 1)); | ||
154 | } | ||
155 | |||
156 | static void out_be64(u64 __iomem *addr, u64 val) | ||
157 | { | ||
158 | out_be32((u32 *)addr, val >> 32); | ||
159 | out_be32((u32 *)addr + 1, (u32)val); | ||
160 | } | ||
161 | |||
162 | /* There is no asm instructions for 64 bits reverse loads and stores */ | ||
163 | static u64 in_le64(const u64 __iomem *addr) | ||
164 | { | ||
165 | return ((u64)in_le32((u32 *)addr + 1) << 32) | (in_le32((u32 *)addr)); | ||
166 | } | ||
167 | |||
168 | static void out_le64(u64 __iomem *addr, u64 val) | ||
169 | { | ||
170 | out_le32((u32 *)addr + 1, val >> 32); | ||
171 | out_le32((u32 *)addr, (u32)val); | ||
172 | } | ||
173 | #endif | ||
174 | |||
175 | #define DMA_IN(fsl_chan, addr, width) \ | ||
176 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ | ||
177 | in_be##width(addr) : in_le##width(addr)) | ||
178 | #define DMA_OUT(fsl_chan, addr, val, width) \ | ||
179 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ | ||
180 | out_be##width(addr, val) : out_le##width(addr, val)) | ||
181 | |||
182 | #define DMA_TO_CPU(fsl_chan, d, width) \ | ||
183 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ | ||
184 | be##width##_to_cpu(d) : le##width##_to_cpu(d)) | ||
185 | #define CPU_TO_DMA(fsl_chan, c, width) \ | ||
186 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ | ||
187 | cpu_to_be##width(c) : cpu_to_le##width(c)) | ||
188 | |||
189 | #endif /* __DMA_FSLDMA_H */ | ||
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index dff38accc5c1..4017d9e7acd2 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c | |||
@@ -714,6 +714,7 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( | |||
714 | new->len = len; | 714 | new->len = len; |
715 | new->dst = dma_dest; | 715 | new->dst = dma_dest; |
716 | new->src = dma_src; | 716 | new->src = dma_src; |
717 | new->async_tx.ack = 0; | ||
717 | return &new->async_tx; | 718 | return &new->async_tx; |
718 | } else | 719 | } else |
719 | return NULL; | 720 | return NULL; |
@@ -741,6 +742,7 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( | |||
741 | new->len = len; | 742 | new->len = len; |
742 | new->dst = dma_dest; | 743 | new->dst = dma_dest; |
743 | new->src = dma_src; | 744 | new->src = dma_src; |
745 | new->async_tx.ack = 0; | ||
744 | return &new->async_tx; | 746 | return &new->async_tx; |
745 | } else | 747 | } else |
746 | return NULL; | 748 | return NULL; |
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c index 3e9719948a8e..a03462750b95 100644 --- a/drivers/firewire/fw-card.c +++ b/drivers/firewire/fw-card.c | |||
@@ -18,6 +18,7 @@ | |||
18 | 18 | ||
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
21 | #include <linux/delay.h> | ||
21 | #include <linux/device.h> | 22 | #include <linux/device.h> |
22 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
23 | #include <linux/crc-itu-t.h> | 24 | #include <linux/crc-itu-t.h> |
@@ -214,17 +215,29 @@ static void | |||
214 | fw_card_bm_work(struct work_struct *work) | 215 | fw_card_bm_work(struct work_struct *work) |
215 | { | 216 | { |
216 | struct fw_card *card = container_of(work, struct fw_card, work.work); | 217 | struct fw_card *card = container_of(work, struct fw_card, work.work); |
217 | struct fw_device *root; | 218 | struct fw_device *root_device; |
219 | struct fw_node *root_node, *local_node; | ||
218 | struct bm_data bmd; | 220 | struct bm_data bmd; |
219 | unsigned long flags; | 221 | unsigned long flags; |
220 | int root_id, new_root_id, irm_id, gap_count, generation, grace; | 222 | int root_id, new_root_id, irm_id, gap_count, generation, grace; |
221 | int do_reset = 0; | 223 | int do_reset = 0; |
222 | 224 | ||
223 | spin_lock_irqsave(&card->lock, flags); | 225 | spin_lock_irqsave(&card->lock, flags); |
226 | local_node = card->local_node; | ||
227 | root_node = card->root_node; | ||
228 | |||
229 | if (local_node == NULL) { | ||
230 | spin_unlock_irqrestore(&card->lock, flags); | ||
231 | return; | ||
232 | } | ||
233 | fw_node_get(local_node); | ||
234 | fw_node_get(root_node); | ||
224 | 235 | ||
225 | generation = card->generation; | 236 | generation = card->generation; |
226 | root = card->root_node->data; | 237 | root_device = root_node->data; |
227 | root_id = card->root_node->node_id; | 238 | if (root_device) |
239 | fw_device_get(root_device); | ||
240 | root_id = root_node->node_id; | ||
228 | grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10)); | 241 | grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10)); |
229 | 242 | ||
230 | if (card->bm_generation + 1 == generation || | 243 | if (card->bm_generation + 1 == generation || |
@@ -243,14 +256,14 @@ fw_card_bm_work(struct work_struct *work) | |||
243 | 256 | ||
244 | irm_id = card->irm_node->node_id; | 257 | irm_id = card->irm_node->node_id; |
245 | if (!card->irm_node->link_on) { | 258 | if (!card->irm_node->link_on) { |
246 | new_root_id = card->local_node->node_id; | 259 | new_root_id = local_node->node_id; |
247 | fw_notify("IRM has link off, making local node (%02x) root.\n", | 260 | fw_notify("IRM has link off, making local node (%02x) root.\n", |
248 | new_root_id); | 261 | new_root_id); |
249 | goto pick_me; | 262 | goto pick_me; |
250 | } | 263 | } |
251 | 264 | ||
252 | bmd.lock.arg = cpu_to_be32(0x3f); | 265 | bmd.lock.arg = cpu_to_be32(0x3f); |
253 | bmd.lock.data = cpu_to_be32(card->local_node->node_id); | 266 | bmd.lock.data = cpu_to_be32(local_node->node_id); |
254 | 267 | ||
255 | spin_unlock_irqrestore(&card->lock, flags); | 268 | spin_unlock_irqrestore(&card->lock, flags); |
256 | 269 | ||
@@ -267,12 +280,12 @@ fw_card_bm_work(struct work_struct *work) | |||
267 | * Another bus reset happened. Just return, | 280 | * Another bus reset happened. Just return, |
268 | * the BM work has been rescheduled. | 281 | * the BM work has been rescheduled. |
269 | */ | 282 | */ |
270 | return; | 283 | goto out; |
271 | } | 284 | } |
272 | 285 | ||
273 | if (bmd.rcode == RCODE_COMPLETE && bmd.old != 0x3f) | 286 | if (bmd.rcode == RCODE_COMPLETE && bmd.old != 0x3f) |
274 | /* Somebody else is BM, let them do the work. */ | 287 | /* Somebody else is BM, let them do the work. */ |
275 | return; | 288 | goto out; |
276 | 289 | ||
277 | spin_lock_irqsave(&card->lock, flags); | 290 | spin_lock_irqsave(&card->lock, flags); |
278 | if (bmd.rcode != RCODE_COMPLETE) { | 291 | if (bmd.rcode != RCODE_COMPLETE) { |
@@ -282,7 +295,7 @@ fw_card_bm_work(struct work_struct *work) | |||
282 | * do a bus reset and pick the local node as | 295 | * do a bus reset and pick the local node as |
283 | * root, and thus, IRM. | 296 | * root, and thus, IRM. |
284 | */ | 297 | */ |
285 | new_root_id = card->local_node->node_id; | 298 | new_root_id = local_node->node_id; |
286 | fw_notify("BM lock failed, making local node (%02x) root.\n", | 299 | fw_notify("BM lock failed, making local node (%02x) root.\n", |
287 | new_root_id); | 300 | new_root_id); |
288 | goto pick_me; | 301 | goto pick_me; |
@@ -295,7 +308,7 @@ fw_card_bm_work(struct work_struct *work) | |||
295 | */ | 308 | */ |
296 | spin_unlock_irqrestore(&card->lock, flags); | 309 | spin_unlock_irqrestore(&card->lock, flags); |
297 | schedule_delayed_work(&card->work, DIV_ROUND_UP(HZ, 10)); | 310 | schedule_delayed_work(&card->work, DIV_ROUND_UP(HZ, 10)); |
298 | return; | 311 | goto out; |
299 | } | 312 | } |
300 | 313 | ||
301 | /* | 314 | /* |
@@ -305,20 +318,20 @@ fw_card_bm_work(struct work_struct *work) | |||
305 | */ | 318 | */ |
306 | card->bm_generation = generation; | 319 | card->bm_generation = generation; |
307 | 320 | ||
308 | if (root == NULL) { | 321 | if (root_device == NULL) { |
309 | /* | 322 | /* |
310 | * Either link_on is false, or we failed to read the | 323 | * Either link_on is false, or we failed to read the |
311 | * config rom. In either case, pick another root. | 324 | * config rom. In either case, pick another root. |
312 | */ | 325 | */ |
313 | new_root_id = card->local_node->node_id; | 326 | new_root_id = local_node->node_id; |
314 | } else if (atomic_read(&root->state) != FW_DEVICE_RUNNING) { | 327 | } else if (atomic_read(&root_device->state) != FW_DEVICE_RUNNING) { |
315 | /* | 328 | /* |
316 | * If we haven't probed this device yet, bail out now | 329 | * If we haven't probed this device yet, bail out now |
317 | * and let's try again once that's done. | 330 | * and let's try again once that's done. |
318 | */ | 331 | */ |
319 | spin_unlock_irqrestore(&card->lock, flags); | 332 | spin_unlock_irqrestore(&card->lock, flags); |
320 | return; | 333 | goto out; |
321 | } else if (root->config_rom[2] & BIB_CMC) { | 334 | } else if (root_device->config_rom[2] & BIB_CMC) { |
322 | /* | 335 | /* |
323 | * FIXME: I suppose we should set the cmstr bit in the | 336 | * FIXME: I suppose we should set the cmstr bit in the |
324 | * STATE_CLEAR register of this node, as described in | 337 | * STATE_CLEAR register of this node, as described in |
@@ -332,7 +345,7 @@ fw_card_bm_work(struct work_struct *work) | |||
332 | * successfully read the config rom, but it's not | 345 | * successfully read the config rom, but it's not |
333 | * cycle master capable. | 346 | * cycle master capable. |
334 | */ | 347 | */ |
335 | new_root_id = card->local_node->node_id; | 348 | new_root_id = local_node->node_id; |
336 | } | 349 | } |
337 | 350 | ||
338 | pick_me: | 351 | pick_me: |
@@ -341,8 +354,8 @@ fw_card_bm_work(struct work_struct *work) | |||
341 | * the typically much larger 1394b beta repeater delays though. | 354 | * the typically much larger 1394b beta repeater delays though. |
342 | */ | 355 | */ |
343 | if (!card->beta_repeaters_present && | 356 | if (!card->beta_repeaters_present && |
344 | card->root_node->max_hops < ARRAY_SIZE(gap_count_table)) | 357 | root_node->max_hops < ARRAY_SIZE(gap_count_table)) |
345 | gap_count = gap_count_table[card->root_node->max_hops]; | 358 | gap_count = gap_count_table[root_node->max_hops]; |
346 | else | 359 | else |
347 | gap_count = 63; | 360 | gap_count = 63; |
348 | 361 | ||
@@ -364,6 +377,11 @@ fw_card_bm_work(struct work_struct *work) | |||
364 | fw_send_phy_config(card, new_root_id, generation, gap_count); | 377 | fw_send_phy_config(card, new_root_id, generation, gap_count); |
365 | fw_core_initiate_bus_reset(card, 1); | 378 | fw_core_initiate_bus_reset(card, 1); |
366 | } | 379 | } |
380 | out: | ||
381 | if (root_device) | ||
382 | fw_device_put(root_device); | ||
383 | fw_node_put(root_node); | ||
384 | fw_node_put(local_node); | ||
367 | } | 385 | } |
368 | 386 | ||
369 | static void | 387 | static void |
@@ -381,6 +399,7 @@ fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, | |||
381 | static atomic_t index = ATOMIC_INIT(-1); | 399 | static atomic_t index = ATOMIC_INIT(-1); |
382 | 400 | ||
383 | kref_init(&card->kref); | 401 | kref_init(&card->kref); |
402 | atomic_set(&card->device_count, 0); | ||
384 | card->index = atomic_inc_return(&index); | 403 | card->index = atomic_inc_return(&index); |
385 | card->driver = driver; | 404 | card->driver = driver; |
386 | card->device = device; | 405 | card->device = device; |
@@ -511,8 +530,14 @@ fw_core_remove_card(struct fw_card *card) | |||
511 | card->driver = &dummy_driver; | 530 | card->driver = &dummy_driver; |
512 | 531 | ||
513 | fw_destroy_nodes(card); | 532 | fw_destroy_nodes(card); |
514 | flush_scheduled_work(); | 533 | /* |
534 | * Wait for all device workqueue jobs to finish. Otherwise the | ||
535 | * firewire-core module could be unloaded before the jobs ran. | ||
536 | */ | ||
537 | while (atomic_read(&card->device_count) > 0) | ||
538 | msleep(100); | ||
515 | 539 | ||
540 | cancel_delayed_work_sync(&card->work); | ||
516 | fw_flush_transactions(card); | 541 | fw_flush_transactions(card); |
517 | del_timer_sync(&card->flush_timer); | 542 | del_timer_sync(&card->flush_timer); |
518 | 543 | ||
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 7e73cbaa4121..46bc197a047f 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c | |||
@@ -109,15 +109,17 @@ static int fw_device_op_open(struct inode *inode, struct file *file) | |||
109 | struct client *client; | 109 | struct client *client; |
110 | unsigned long flags; | 110 | unsigned long flags; |
111 | 111 | ||
112 | device = fw_device_from_devt(inode->i_rdev); | 112 | device = fw_device_get_by_devt(inode->i_rdev); |
113 | if (device == NULL) | 113 | if (device == NULL) |
114 | return -ENODEV; | 114 | return -ENODEV; |
115 | 115 | ||
116 | client = kzalloc(sizeof(*client), GFP_KERNEL); | 116 | client = kzalloc(sizeof(*client), GFP_KERNEL); |
117 | if (client == NULL) | 117 | if (client == NULL) { |
118 | fw_device_put(device); | ||
118 | return -ENOMEM; | 119 | return -ENOMEM; |
120 | } | ||
119 | 121 | ||
120 | client->device = fw_device_get(device); | 122 | client->device = device; |
121 | INIT_LIST_HEAD(&client->event_list); | 123 | INIT_LIST_HEAD(&client->event_list); |
122 | INIT_LIST_HEAD(&client->resource_list); | 124 | INIT_LIST_HEAD(&client->resource_list); |
123 | spin_lock_init(&client->lock); | 125 | spin_lock_init(&client->lock); |
@@ -644,6 +646,10 @@ static int ioctl_create_iso_context(struct client *client, void *buffer) | |||
644 | struct fw_cdev_create_iso_context *request = buffer; | 646 | struct fw_cdev_create_iso_context *request = buffer; |
645 | struct fw_iso_context *context; | 647 | struct fw_iso_context *context; |
646 | 648 | ||
649 | /* We only support one context at this time. */ | ||
650 | if (client->iso_context != NULL) | ||
651 | return -EBUSY; | ||
652 | |||
647 | if (request->channel > 63) | 653 | if (request->channel > 63) |
648 | return -EINVAL; | 654 | return -EINVAL; |
649 | 655 | ||
@@ -790,8 +796,9 @@ static int ioctl_start_iso(struct client *client, void *buffer) | |||
790 | { | 796 | { |
791 | struct fw_cdev_start_iso *request = buffer; | 797 | struct fw_cdev_start_iso *request = buffer; |
792 | 798 | ||
793 | if (request->handle != 0) | 799 | if (client->iso_context == NULL || request->handle != 0) |
794 | return -EINVAL; | 800 | return -EINVAL; |
801 | |||
795 | if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) { | 802 | if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) { |
796 | if (request->tags == 0 || request->tags > 15) | 803 | if (request->tags == 0 || request->tags > 15) |
797 | return -EINVAL; | 804 | return -EINVAL; |
@@ -808,7 +815,7 @@ static int ioctl_stop_iso(struct client *client, void *buffer) | |||
808 | { | 815 | { |
809 | struct fw_cdev_stop_iso *request = buffer; | 816 | struct fw_cdev_stop_iso *request = buffer; |
810 | 817 | ||
811 | if (request->handle != 0) | 818 | if (client->iso_context == NULL || request->handle != 0) |
812 | return -EINVAL; | 819 | return -EINVAL; |
813 | 820 | ||
814 | return fw_iso_context_stop(client->iso_context); | 821 | return fw_iso_context_stop(client->iso_context); |
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c index de9066e69adf..870125a3638e 100644 --- a/drivers/firewire/fw-device.c +++ b/drivers/firewire/fw-device.c | |||
@@ -150,21 +150,10 @@ struct bus_type fw_bus_type = { | |||
150 | }; | 150 | }; |
151 | EXPORT_SYMBOL(fw_bus_type); | 151 | EXPORT_SYMBOL(fw_bus_type); |
152 | 152 | ||
153 | struct fw_device *fw_device_get(struct fw_device *device) | ||
154 | { | ||
155 | get_device(&device->device); | ||
156 | |||
157 | return device; | ||
158 | } | ||
159 | |||
160 | void fw_device_put(struct fw_device *device) | ||
161 | { | ||
162 | put_device(&device->device); | ||
163 | } | ||
164 | |||
165 | static void fw_device_release(struct device *dev) | 153 | static void fw_device_release(struct device *dev) |
166 | { | 154 | { |
167 | struct fw_device *device = fw_device(dev); | 155 | struct fw_device *device = fw_device(dev); |
156 | struct fw_card *card = device->card; | ||
168 | unsigned long flags; | 157 | unsigned long flags; |
169 | 158 | ||
170 | /* | 159 | /* |
@@ -176,9 +165,9 @@ static void fw_device_release(struct device *dev) | |||
176 | spin_unlock_irqrestore(&device->card->lock, flags); | 165 | spin_unlock_irqrestore(&device->card->lock, flags); |
177 | 166 | ||
178 | fw_node_put(device->node); | 167 | fw_node_put(device->node); |
179 | fw_card_put(device->card); | ||
180 | kfree(device->config_rom); | 168 | kfree(device->config_rom); |
181 | kfree(device); | 169 | kfree(device); |
170 | atomic_dec(&card->device_count); | ||
182 | } | 171 | } |
183 | 172 | ||
184 | int fw_device_enable_phys_dma(struct fw_device *device) | 173 | int fw_device_enable_phys_dma(struct fw_device *device) |
@@ -358,12 +347,9 @@ static ssize_t | |||
358 | guid_show(struct device *dev, struct device_attribute *attr, char *buf) | 347 | guid_show(struct device *dev, struct device_attribute *attr, char *buf) |
359 | { | 348 | { |
360 | struct fw_device *device = fw_device(dev); | 349 | struct fw_device *device = fw_device(dev); |
361 | u64 guid; | ||
362 | |||
363 | guid = ((u64)device->config_rom[3] << 32) | device->config_rom[4]; | ||
364 | 350 | ||
365 | return snprintf(buf, PAGE_SIZE, "0x%016llx\n", | 351 | return snprintf(buf, PAGE_SIZE, "0x%08x%08x\n", |
366 | (unsigned long long)guid); | 352 | device->config_rom[3], device->config_rom[4]); |
367 | } | 353 | } |
368 | 354 | ||
369 | static struct device_attribute fw_device_attributes[] = { | 355 | static struct device_attribute fw_device_attributes[] = { |
@@ -610,12 +596,14 @@ static DECLARE_RWSEM(idr_rwsem); | |||
610 | static DEFINE_IDR(fw_device_idr); | 596 | static DEFINE_IDR(fw_device_idr); |
611 | int fw_cdev_major; | 597 | int fw_cdev_major; |
612 | 598 | ||
613 | struct fw_device *fw_device_from_devt(dev_t devt) | 599 | struct fw_device *fw_device_get_by_devt(dev_t devt) |
614 | { | 600 | { |
615 | struct fw_device *device; | 601 | struct fw_device *device; |
616 | 602 | ||
617 | down_read(&idr_rwsem); | 603 | down_read(&idr_rwsem); |
618 | device = idr_find(&fw_device_idr, MINOR(devt)); | 604 | device = idr_find(&fw_device_idr, MINOR(devt)); |
605 | if (device) | ||
606 | fw_device_get(device); | ||
619 | up_read(&idr_rwsem); | 607 | up_read(&idr_rwsem); |
620 | 608 | ||
621 | return device; | 609 | return device; |
@@ -627,13 +615,14 @@ static void fw_device_shutdown(struct work_struct *work) | |||
627 | container_of(work, struct fw_device, work.work); | 615 | container_of(work, struct fw_device, work.work); |
628 | int minor = MINOR(device->device.devt); | 616 | int minor = MINOR(device->device.devt); |
629 | 617 | ||
630 | down_write(&idr_rwsem); | ||
631 | idr_remove(&fw_device_idr, minor); | ||
632 | up_write(&idr_rwsem); | ||
633 | |||
634 | fw_device_cdev_remove(device); | 618 | fw_device_cdev_remove(device); |
635 | device_for_each_child(&device->device, NULL, shutdown_unit); | 619 | device_for_each_child(&device->device, NULL, shutdown_unit); |
636 | device_unregister(&device->device); | 620 | device_unregister(&device->device); |
621 | |||
622 | down_write(&idr_rwsem); | ||
623 | idr_remove(&fw_device_idr, minor); | ||
624 | up_write(&idr_rwsem); | ||
625 | fw_device_put(device); | ||
637 | } | 626 | } |
638 | 627 | ||
639 | static struct device_type fw_device_type = { | 628 | static struct device_type fw_device_type = { |
@@ -668,7 +657,8 @@ static void fw_device_init(struct work_struct *work) | |||
668 | */ | 657 | */ |
669 | 658 | ||
670 | if (read_bus_info_block(device, device->generation) < 0) { | 659 | if (read_bus_info_block(device, device->generation) < 0) { |
671 | if (device->config_rom_retries < MAX_RETRIES) { | 660 | if (device->config_rom_retries < MAX_RETRIES && |
661 | atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { | ||
672 | device->config_rom_retries++; | 662 | device->config_rom_retries++; |
673 | schedule_delayed_work(&device->work, RETRY_DELAY); | 663 | schedule_delayed_work(&device->work, RETRY_DELAY); |
674 | } else { | 664 | } else { |
@@ -682,10 +672,13 @@ static void fw_device_init(struct work_struct *work) | |||
682 | } | 672 | } |
683 | 673 | ||
684 | err = -ENOMEM; | 674 | err = -ENOMEM; |
675 | |||
676 | fw_device_get(device); | ||
685 | down_write(&idr_rwsem); | 677 | down_write(&idr_rwsem); |
686 | if (idr_pre_get(&fw_device_idr, GFP_KERNEL)) | 678 | if (idr_pre_get(&fw_device_idr, GFP_KERNEL)) |
687 | err = idr_get_new(&fw_device_idr, device, &minor); | 679 | err = idr_get_new(&fw_device_idr, device, &minor); |
688 | up_write(&idr_rwsem); | 680 | up_write(&idr_rwsem); |
681 | |||
689 | if (err < 0) | 682 | if (err < 0) |
690 | goto error; | 683 | goto error; |
691 | 684 | ||
@@ -717,13 +710,22 @@ static void fw_device_init(struct work_struct *work) | |||
717 | */ | 710 | */ |
718 | if (atomic_cmpxchg(&device->state, | 711 | if (atomic_cmpxchg(&device->state, |
719 | FW_DEVICE_INITIALIZING, | 712 | FW_DEVICE_INITIALIZING, |
720 | FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) | 713 | FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) { |
721 | fw_device_shutdown(&device->work.work); | 714 | fw_device_shutdown(&device->work.work); |
722 | else | 715 | } else { |
723 | fw_notify("created new fw device %s " | 716 | if (device->config_rom_retries) |
724 | "(%d config rom retries, S%d00)\n", | 717 | fw_notify("created device %s: GUID %08x%08x, S%d00, " |
725 | device->device.bus_id, device->config_rom_retries, | 718 | "%d config ROM retries\n", |
726 | 1 << device->max_speed); | 719 | device->device.bus_id, |
720 | device->config_rom[3], device->config_rom[4], | ||
721 | 1 << device->max_speed, | ||
722 | device->config_rom_retries); | ||
723 | else | ||
724 | fw_notify("created device %s: GUID %08x%08x, S%d00\n", | ||
725 | device->device.bus_id, | ||
726 | device->config_rom[3], device->config_rom[4], | ||
727 | 1 << device->max_speed); | ||
728 | } | ||
727 | 729 | ||
728 | /* | 730 | /* |
729 | * Reschedule the IRM work if we just finished reading the | 731 | * Reschedule the IRM work if we just finished reading the |
@@ -741,7 +743,9 @@ static void fw_device_init(struct work_struct *work) | |||
741 | idr_remove(&fw_device_idr, minor); | 743 | idr_remove(&fw_device_idr, minor); |
742 | up_write(&idr_rwsem); | 744 | up_write(&idr_rwsem); |
743 | error: | 745 | error: |
744 | put_device(&device->device); | 746 | fw_device_put(device); /* fw_device_idr's reference */ |
747 | |||
748 | put_device(&device->device); /* our reference */ | ||
745 | } | 749 | } |
746 | 750 | ||
747 | static int update_unit(struct device *dev, void *data) | 751 | static int update_unit(struct device *dev, void *data) |
@@ -791,7 +795,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) | |||
791 | */ | 795 | */ |
792 | device_initialize(&device->device); | 796 | device_initialize(&device->device); |
793 | atomic_set(&device->state, FW_DEVICE_INITIALIZING); | 797 | atomic_set(&device->state, FW_DEVICE_INITIALIZING); |
794 | device->card = fw_card_get(card); | 798 | atomic_inc(&card->device_count); |
799 | device->card = card; | ||
795 | device->node = fw_node_get(node); | 800 | device->node = fw_node_get(node); |
796 | device->node_id = node->node_id; | 801 | device->node_id = node->node_id; |
797 | device->generation = card->generation; | 802 | device->generation = card->generation; |
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h index 0854fe2bc110..78ecd3991b7f 100644 --- a/drivers/firewire/fw-device.h +++ b/drivers/firewire/fw-device.h | |||
@@ -76,14 +76,26 @@ fw_device_is_shutdown(struct fw_device *device) | |||
76 | return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN; | 76 | return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN; |
77 | } | 77 | } |
78 | 78 | ||
79 | struct fw_device *fw_device_get(struct fw_device *device); | 79 | static inline struct fw_device * |
80 | void fw_device_put(struct fw_device *device); | 80 | fw_device_get(struct fw_device *device) |
81 | { | ||
82 | get_device(&device->device); | ||
83 | |||
84 | return device; | ||
85 | } | ||
86 | |||
87 | static inline void | ||
88 | fw_device_put(struct fw_device *device) | ||
89 | { | ||
90 | put_device(&device->device); | ||
91 | } | ||
92 | |||
93 | struct fw_device *fw_device_get_by_devt(dev_t devt); | ||
81 | int fw_device_enable_phys_dma(struct fw_device *device); | 94 | int fw_device_enable_phys_dma(struct fw_device *device); |
82 | 95 | ||
83 | void fw_device_cdev_update(struct fw_device *device); | 96 | void fw_device_cdev_update(struct fw_device *device); |
84 | void fw_device_cdev_remove(struct fw_device *device); | 97 | void fw_device_cdev_remove(struct fw_device *device); |
85 | 98 | ||
86 | struct fw_device *fw_device_from_devt(dev_t devt); | ||
87 | extern int fw_cdev_major; | 99 | extern int fw_cdev_major; |
88 | 100 | ||
89 | struct fw_unit { | 101 | struct fw_unit { |
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c index 19ece9b6d742..03069a454c07 100644 --- a/drivers/firewire/fw-sbp2.c +++ b/drivers/firewire/fw-sbp2.c | |||
@@ -28,14 +28,15 @@ | |||
28 | * and many others. | 28 | * and many others. |
29 | */ | 29 | */ |
30 | 30 | ||
31 | #include <linux/blkdev.h> | ||
32 | #include <linux/delay.h> | ||
33 | #include <linux/device.h> | ||
34 | #include <linux/dma-mapping.h> | ||
31 | #include <linux/kernel.h> | 35 | #include <linux/kernel.h> |
36 | #include <linux/mod_devicetable.h> | ||
32 | #include <linux/module.h> | 37 | #include <linux/module.h> |
33 | #include <linux/moduleparam.h> | 38 | #include <linux/moduleparam.h> |
34 | #include <linux/mod_devicetable.h> | ||
35 | #include <linux/device.h> | ||
36 | #include <linux/scatterlist.h> | 39 | #include <linux/scatterlist.h> |
37 | #include <linux/dma-mapping.h> | ||
38 | #include <linux/blkdev.h> | ||
39 | #include <linux/string.h> | 40 | #include <linux/string.h> |
40 | #include <linux/stringify.h> | 41 | #include <linux/stringify.h> |
41 | #include <linux/timer.h> | 42 | #include <linux/timer.h> |
@@ -47,9 +48,9 @@ | |||
47 | #include <scsi/scsi_device.h> | 48 | #include <scsi/scsi_device.h> |
48 | #include <scsi/scsi_host.h> | 49 | #include <scsi/scsi_host.h> |
49 | 50 | ||
50 | #include "fw-transaction.h" | ||
51 | #include "fw-topology.h" | ||
52 | #include "fw-device.h" | 51 | #include "fw-device.h" |
52 | #include "fw-topology.h" | ||
53 | #include "fw-transaction.h" | ||
53 | 54 | ||
54 | /* | 55 | /* |
55 | * So far only bridges from Oxford Semiconductor are known to support | 56 | * So far only bridges from Oxford Semiconductor are known to support |
@@ -82,6 +83,9 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " | |||
82 | * Avoids access beyond actual disk limits on devices with an off-by-one bug. | 83 | * Avoids access beyond actual disk limits on devices with an off-by-one bug. |
83 | * Don't use this with devices which don't have this bug. | 84 | * Don't use this with devices which don't have this bug. |
84 | * | 85 | * |
86 | * - delay inquiry | ||
87 | * Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry. | ||
88 | * | ||
85 | * - override internal blacklist | 89 | * - override internal blacklist |
86 | * Instead of adding to the built-in blacklist, use only the workarounds | 90 | * Instead of adding to the built-in blacklist, use only the workarounds |
87 | * specified in the module load parameter. | 91 | * specified in the module load parameter. |
@@ -91,6 +95,8 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " | |||
91 | #define SBP2_WORKAROUND_INQUIRY_36 0x2 | 95 | #define SBP2_WORKAROUND_INQUIRY_36 0x2 |
92 | #define SBP2_WORKAROUND_MODE_SENSE_8 0x4 | 96 | #define SBP2_WORKAROUND_MODE_SENSE_8 0x4 |
93 | #define SBP2_WORKAROUND_FIX_CAPACITY 0x8 | 97 | #define SBP2_WORKAROUND_FIX_CAPACITY 0x8 |
98 | #define SBP2_WORKAROUND_DELAY_INQUIRY 0x10 | ||
99 | #define SBP2_INQUIRY_DELAY 12 | ||
94 | #define SBP2_WORKAROUND_OVERRIDE 0x100 | 100 | #define SBP2_WORKAROUND_OVERRIDE 0x100 |
95 | 101 | ||
96 | static int sbp2_param_workarounds; | 102 | static int sbp2_param_workarounds; |
@@ -100,6 +106,7 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0" | |||
100 | ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) | 106 | ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) |
101 | ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) | 107 | ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) |
102 | ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) | 108 | ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) |
109 | ", delay inquiry = " __stringify(SBP2_WORKAROUND_DELAY_INQUIRY) | ||
103 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) | 110 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) |
104 | ", or a combination)"); | 111 | ", or a combination)"); |
105 | 112 | ||
@@ -115,7 +122,6 @@ static const char sbp2_driver_name[] = "sbp2"; | |||
115 | struct sbp2_logical_unit { | 122 | struct sbp2_logical_unit { |
116 | struct sbp2_target *tgt; | 123 | struct sbp2_target *tgt; |
117 | struct list_head link; | 124 | struct list_head link; |
118 | struct scsi_device *sdev; | ||
119 | struct fw_address_handler address_handler; | 125 | struct fw_address_handler address_handler; |
120 | struct list_head orb_list; | 126 | struct list_head orb_list; |
121 | 127 | ||
@@ -132,6 +138,8 @@ struct sbp2_logical_unit { | |||
132 | int generation; | 138 | int generation; |
133 | int retries; | 139 | int retries; |
134 | struct delayed_work work; | 140 | struct delayed_work work; |
141 | bool has_sdev; | ||
142 | bool blocked; | ||
135 | }; | 143 | }; |
136 | 144 | ||
137 | /* | 145 | /* |
@@ -141,16 +149,18 @@ struct sbp2_logical_unit { | |||
141 | struct sbp2_target { | 149 | struct sbp2_target { |
142 | struct kref kref; | 150 | struct kref kref; |
143 | struct fw_unit *unit; | 151 | struct fw_unit *unit; |
152 | const char *bus_id; | ||
153 | struct list_head lu_list; | ||
144 | 154 | ||
145 | u64 management_agent_address; | 155 | u64 management_agent_address; |
146 | int directory_id; | 156 | int directory_id; |
147 | int node_id; | 157 | int node_id; |
148 | int address_high; | 158 | int address_high; |
149 | 159 | unsigned int workarounds; | |
150 | unsigned workarounds; | ||
151 | struct list_head lu_list; | ||
152 | |||
153 | unsigned int mgt_orb_timeout; | 160 | unsigned int mgt_orb_timeout; |
161 | |||
162 | int dont_block; /* counter for each logical unit */ | ||
163 | int blocked; /* ditto */ | ||
154 | }; | 164 | }; |
155 | 165 | ||
156 | /* | 166 | /* |
@@ -160,7 +170,7 @@ struct sbp2_target { | |||
160 | */ | 170 | */ |
161 | #define SBP2_MIN_LOGIN_ORB_TIMEOUT 5000U /* Timeout in ms */ | 171 | #define SBP2_MIN_LOGIN_ORB_TIMEOUT 5000U /* Timeout in ms */ |
162 | #define SBP2_MAX_LOGIN_ORB_TIMEOUT 40000U /* Timeout in ms */ | 172 | #define SBP2_MAX_LOGIN_ORB_TIMEOUT 40000U /* Timeout in ms */ |
163 | #define SBP2_ORB_TIMEOUT 2000 /* Timeout in ms */ | 173 | #define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */ |
164 | #define SBP2_ORB_NULL 0x80000000 | 174 | #define SBP2_ORB_NULL 0x80000000 |
165 | #define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000 | 175 | #define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000 |
166 | 176 | ||
@@ -297,7 +307,7 @@ struct sbp2_command_orb { | |||
297 | static const struct { | 307 | static const struct { |
298 | u32 firmware_revision; | 308 | u32 firmware_revision; |
299 | u32 model; | 309 | u32 model; |
300 | unsigned workarounds; | 310 | unsigned int workarounds; |
301 | } sbp2_workarounds_table[] = { | 311 | } sbp2_workarounds_table[] = { |
302 | /* DViCO Momobay CX-1 with TSB42AA9 bridge */ { | 312 | /* DViCO Momobay CX-1 with TSB42AA9 bridge */ { |
303 | .firmware_revision = 0x002800, | 313 | .firmware_revision = 0x002800, |
@@ -305,6 +315,11 @@ static const struct { | |||
305 | .workarounds = SBP2_WORKAROUND_INQUIRY_36 | | 315 | .workarounds = SBP2_WORKAROUND_INQUIRY_36 | |
306 | SBP2_WORKAROUND_MODE_SENSE_8, | 316 | SBP2_WORKAROUND_MODE_SENSE_8, |
307 | }, | 317 | }, |
318 | /* DViCO Momobay FX-3A with TSB42AA9A bridge */ { | ||
319 | .firmware_revision = 0x002800, | ||
320 | .model = 0x000000, | ||
321 | .workarounds = SBP2_WORKAROUND_DELAY_INQUIRY, | ||
322 | }, | ||
308 | /* Initio bridges, actually only needed for some older ones */ { | 323 | /* Initio bridges, actually only needed for some older ones */ { |
309 | .firmware_revision = 0x000200, | 324 | .firmware_revision = 0x000200, |
310 | .model = ~0, | 325 | .model = ~0, |
@@ -501,6 +516,9 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, | |||
501 | unsigned int timeout; | 516 | unsigned int timeout; |
502 | int retval = -ENOMEM; | 517 | int retval = -ENOMEM; |
503 | 518 | ||
519 | if (function == SBP2_LOGOUT_REQUEST && fw_device_is_shutdown(device)) | ||
520 | return 0; | ||
521 | |||
504 | orb = kzalloc(sizeof(*orb), GFP_ATOMIC); | 522 | orb = kzalloc(sizeof(*orb), GFP_ATOMIC); |
505 | if (orb == NULL) | 523 | if (orb == NULL) |
506 | return -ENOMEM; | 524 | return -ENOMEM; |
@@ -553,20 +571,20 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, | |||
553 | 571 | ||
554 | retval = -EIO; | 572 | retval = -EIO; |
555 | if (sbp2_cancel_orbs(lu) == 0) { | 573 | if (sbp2_cancel_orbs(lu) == 0) { |
556 | fw_error("orb reply timed out, rcode=0x%02x\n", | 574 | fw_error("%s: orb reply timed out, rcode=0x%02x\n", |
557 | orb->base.rcode); | 575 | lu->tgt->bus_id, orb->base.rcode); |
558 | goto out; | 576 | goto out; |
559 | } | 577 | } |
560 | 578 | ||
561 | if (orb->base.rcode != RCODE_COMPLETE) { | 579 | if (orb->base.rcode != RCODE_COMPLETE) { |
562 | fw_error("management write failed, rcode 0x%02x\n", | 580 | fw_error("%s: management write failed, rcode 0x%02x\n", |
563 | orb->base.rcode); | 581 | lu->tgt->bus_id, orb->base.rcode); |
564 | goto out; | 582 | goto out; |
565 | } | 583 | } |
566 | 584 | ||
567 | if (STATUS_GET_RESPONSE(orb->status) != 0 || | 585 | if (STATUS_GET_RESPONSE(orb->status) != 0 || |
568 | STATUS_GET_SBP_STATUS(orb->status) != 0) { | 586 | STATUS_GET_SBP_STATUS(orb->status) != 0) { |
569 | fw_error("error status: %d:%d\n", | 587 | fw_error("%s: error status: %d:%d\n", lu->tgt->bus_id, |
570 | STATUS_GET_RESPONSE(orb->status), | 588 | STATUS_GET_RESPONSE(orb->status), |
571 | STATUS_GET_SBP_STATUS(orb->status)); | 589 | STATUS_GET_SBP_STATUS(orb->status)); |
572 | goto out; | 590 | goto out; |
@@ -590,29 +608,158 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, | |||
590 | 608 | ||
591 | static void | 609 | static void |
592 | complete_agent_reset_write(struct fw_card *card, int rcode, | 610 | complete_agent_reset_write(struct fw_card *card, int rcode, |
593 | void *payload, size_t length, void *data) | 611 | void *payload, size_t length, void *done) |
594 | { | 612 | { |
595 | struct fw_transaction *t = data; | 613 | complete(done); |
614 | } | ||
596 | 615 | ||
597 | kfree(t); | 616 | static void sbp2_agent_reset(struct sbp2_logical_unit *lu) |
617 | { | ||
618 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); | ||
619 | DECLARE_COMPLETION_ONSTACK(done); | ||
620 | struct fw_transaction t; | ||
621 | static u32 z; | ||
622 | |||
623 | fw_send_request(device->card, &t, TCODE_WRITE_QUADLET_REQUEST, | ||
624 | lu->tgt->node_id, lu->generation, device->max_speed, | ||
625 | lu->command_block_agent_address + SBP2_AGENT_RESET, | ||
626 | &z, sizeof(z), complete_agent_reset_write, &done); | ||
627 | wait_for_completion(&done); | ||
628 | } | ||
629 | |||
630 | static void | ||
631 | complete_agent_reset_write_no_wait(struct fw_card *card, int rcode, | ||
632 | void *payload, size_t length, void *data) | ||
633 | { | ||
634 | kfree(data); | ||
598 | } | 635 | } |
599 | 636 | ||
600 | static int sbp2_agent_reset(struct sbp2_logical_unit *lu) | 637 | static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu) |
601 | { | 638 | { |
602 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); | 639 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); |
603 | struct fw_transaction *t; | 640 | struct fw_transaction *t; |
604 | static u32 zero; | 641 | static u32 z; |
605 | 642 | ||
606 | t = kzalloc(sizeof(*t), GFP_ATOMIC); | 643 | t = kmalloc(sizeof(*t), GFP_ATOMIC); |
607 | if (t == NULL) | 644 | if (t == NULL) |
608 | return -ENOMEM; | 645 | return; |
609 | 646 | ||
610 | fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, | 647 | fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, |
611 | lu->tgt->node_id, lu->generation, device->max_speed, | 648 | lu->tgt->node_id, lu->generation, device->max_speed, |
612 | lu->command_block_agent_address + SBP2_AGENT_RESET, | 649 | lu->command_block_agent_address + SBP2_AGENT_RESET, |
613 | &zero, sizeof(zero), complete_agent_reset_write, t); | 650 | &z, sizeof(z), complete_agent_reset_write_no_wait, t); |
651 | } | ||
614 | 652 | ||
615 | return 0; | 653 | static void sbp2_set_generation(struct sbp2_logical_unit *lu, int generation) |
654 | { | ||
655 | struct fw_card *card = fw_device(lu->tgt->unit->device.parent)->card; | ||
656 | unsigned long flags; | ||
657 | |||
658 | /* serialize with comparisons of lu->generation and card->generation */ | ||
659 | spin_lock_irqsave(&card->lock, flags); | ||
660 | lu->generation = generation; | ||
661 | spin_unlock_irqrestore(&card->lock, flags); | ||
662 | } | ||
663 | |||
664 | static inline void sbp2_allow_block(struct sbp2_logical_unit *lu) | ||
665 | { | ||
666 | /* | ||
667 | * We may access dont_block without taking card->lock here: | ||
668 | * All callers of sbp2_allow_block() and all callers of sbp2_unblock() | ||
669 | * are currently serialized against each other. | ||
670 | * And a wrong result in sbp2_conditionally_block()'s access of | ||
671 | * dont_block is rather harmless, it simply misses its first chance. | ||
672 | */ | ||
673 | --lu->tgt->dont_block; | ||
674 | } | ||
675 | |||
676 | /* | ||
677 | * Blocks lu->tgt if all of the following conditions are met: | ||
678 | * - Login, INQUIRY, and high-level SCSI setup of all of the target's | ||
679 | * logical units have been finished (indicated by dont_block == 0). | ||
680 | * - lu->generation is stale. | ||
681 | * | ||
682 | * Note, scsi_block_requests() must be called while holding card->lock, | ||
683 | * otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to | ||
684 | * unblock the target. | ||
685 | */ | ||
686 | static void sbp2_conditionally_block(struct sbp2_logical_unit *lu) | ||
687 | { | ||
688 | struct sbp2_target *tgt = lu->tgt; | ||
689 | struct fw_card *card = fw_device(tgt->unit->device.parent)->card; | ||
690 | struct Scsi_Host *shost = | ||
691 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | ||
692 | unsigned long flags; | ||
693 | |||
694 | spin_lock_irqsave(&card->lock, flags); | ||
695 | if (!tgt->dont_block && !lu->blocked && | ||
696 | lu->generation != card->generation) { | ||
697 | lu->blocked = true; | ||
698 | if (++tgt->blocked == 1) { | ||
699 | scsi_block_requests(shost); | ||
700 | fw_notify("blocked %s\n", lu->tgt->bus_id); | ||
701 | } | ||
702 | } | ||
703 | spin_unlock_irqrestore(&card->lock, flags); | ||
704 | } | ||
705 | |||
706 | /* | ||
707 | * Unblocks lu->tgt as soon as all its logical units can be unblocked. | ||
708 | * Note, it is harmless to run scsi_unblock_requests() outside the | ||
709 | * card->lock protected section. On the other hand, running it inside | ||
710 | * the section might clash with shost->host_lock. | ||
711 | */ | ||
712 | static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) | ||
713 | { | ||
714 | struct sbp2_target *tgt = lu->tgt; | ||
715 | struct fw_card *card = fw_device(tgt->unit->device.parent)->card; | ||
716 | struct Scsi_Host *shost = | ||
717 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | ||
718 | unsigned long flags; | ||
719 | bool unblock = false; | ||
720 | |||
721 | spin_lock_irqsave(&card->lock, flags); | ||
722 | if (lu->blocked && lu->generation == card->generation) { | ||
723 | lu->blocked = false; | ||
724 | unblock = --tgt->blocked == 0; | ||
725 | } | ||
726 | spin_unlock_irqrestore(&card->lock, flags); | ||
727 | |||
728 | if (unblock) { | ||
729 | scsi_unblock_requests(shost); | ||
730 | fw_notify("unblocked %s\n", lu->tgt->bus_id); | ||
731 | } | ||
732 | } | ||
733 | |||
734 | /* | ||
735 | * Prevents future blocking of tgt and unblocks it. | ||
736 | * Note, it is harmless to run scsi_unblock_requests() outside the | ||
737 | * card->lock protected section. On the other hand, running it inside | ||
738 | * the section might clash with shost->host_lock. | ||
739 | */ | ||
740 | static void sbp2_unblock(struct sbp2_target *tgt) | ||
741 | { | ||
742 | struct fw_card *card = fw_device(tgt->unit->device.parent)->card; | ||
743 | struct Scsi_Host *shost = | ||
744 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | ||
745 | unsigned long flags; | ||
746 | |||
747 | spin_lock_irqsave(&card->lock, flags); | ||
748 | ++tgt->dont_block; | ||
749 | spin_unlock_irqrestore(&card->lock, flags); | ||
750 | |||
751 | scsi_unblock_requests(shost); | ||
752 | } | ||
753 | |||
754 | static int sbp2_lun2int(u16 lun) | ||
755 | { | ||
756 | struct scsi_lun eight_bytes_lun; | ||
757 | |||
758 | memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun)); | ||
759 | eight_bytes_lun.scsi_lun[0] = (lun >> 8) & 0xff; | ||
760 | eight_bytes_lun.scsi_lun[1] = lun & 0xff; | ||
761 | |||
762 | return scsilun_to_int(&eight_bytes_lun); | ||
616 | } | 763 | } |
617 | 764 | ||
618 | static void sbp2_release_target(struct kref *kref) | 765 | static void sbp2_release_target(struct kref *kref) |
@@ -621,26 +768,31 @@ static void sbp2_release_target(struct kref *kref) | |||
621 | struct sbp2_logical_unit *lu, *next; | 768 | struct sbp2_logical_unit *lu, *next; |
622 | struct Scsi_Host *shost = | 769 | struct Scsi_Host *shost = |
623 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | 770 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); |
771 | struct scsi_device *sdev; | ||
624 | struct fw_device *device = fw_device(tgt->unit->device.parent); | 772 | struct fw_device *device = fw_device(tgt->unit->device.parent); |
625 | 773 | ||
626 | list_for_each_entry_safe(lu, next, &tgt->lu_list, link) { | 774 | /* prevent deadlocks */ |
627 | if (lu->sdev) | 775 | sbp2_unblock(tgt); |
628 | scsi_remove_device(lu->sdev); | ||
629 | 776 | ||
630 | if (!fw_device_is_shutdown(device)) | 777 | list_for_each_entry_safe(lu, next, &tgt->lu_list, link) { |
631 | sbp2_send_management_orb(lu, tgt->node_id, | 778 | sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun)); |
632 | lu->generation, SBP2_LOGOUT_REQUEST, | 779 | if (sdev) { |
633 | lu->login_id, NULL); | 780 | scsi_remove_device(sdev); |
781 | scsi_device_put(sdev); | ||
782 | } | ||
783 | sbp2_send_management_orb(lu, tgt->node_id, lu->generation, | ||
784 | SBP2_LOGOUT_REQUEST, lu->login_id, NULL); | ||
634 | 785 | ||
635 | fw_core_remove_address_handler(&lu->address_handler); | 786 | fw_core_remove_address_handler(&lu->address_handler); |
636 | list_del(&lu->link); | 787 | list_del(&lu->link); |
637 | kfree(lu); | 788 | kfree(lu); |
638 | } | 789 | } |
639 | scsi_remove_host(shost); | 790 | scsi_remove_host(shost); |
640 | fw_notify("released %s\n", tgt->unit->device.bus_id); | 791 | fw_notify("released %s\n", tgt->bus_id); |
641 | 792 | ||
642 | put_device(&tgt->unit->device); | 793 | put_device(&tgt->unit->device); |
643 | scsi_host_put(shost); | 794 | scsi_host_put(shost); |
795 | fw_device_put(device); | ||
644 | } | 796 | } |
645 | 797 | ||
646 | static struct workqueue_struct *sbp2_wq; | 798 | static struct workqueue_struct *sbp2_wq; |
@@ -666,33 +818,42 @@ static void sbp2_login(struct work_struct *work) | |||
666 | { | 818 | { |
667 | struct sbp2_logical_unit *lu = | 819 | struct sbp2_logical_unit *lu = |
668 | container_of(work, struct sbp2_logical_unit, work.work); | 820 | container_of(work, struct sbp2_logical_unit, work.work); |
669 | struct Scsi_Host *shost = | 821 | struct sbp2_target *tgt = lu->tgt; |
670 | container_of((void *)lu->tgt, struct Scsi_Host, hostdata[0]); | 822 | struct fw_device *device = fw_device(tgt->unit->device.parent); |
823 | struct Scsi_Host *shost; | ||
671 | struct scsi_device *sdev; | 824 | struct scsi_device *sdev; |
672 | struct scsi_lun eight_bytes_lun; | ||
673 | struct fw_unit *unit = lu->tgt->unit; | ||
674 | struct fw_device *device = fw_device(unit->device.parent); | ||
675 | struct sbp2_login_response response; | 825 | struct sbp2_login_response response; |
676 | int generation, node_id, local_node_id; | 826 | int generation, node_id, local_node_id; |
677 | 827 | ||
828 | if (fw_device_is_shutdown(device)) | ||
829 | goto out; | ||
830 | |||
678 | generation = device->generation; | 831 | generation = device->generation; |
679 | smp_rmb(); /* node_id must not be older than generation */ | 832 | smp_rmb(); /* node_id must not be older than generation */ |
680 | node_id = device->node_id; | 833 | node_id = device->node_id; |
681 | local_node_id = device->card->node_id; | 834 | local_node_id = device->card->node_id; |
682 | 835 | ||
836 | /* If this is a re-login attempt, log out, or we might be rejected. */ | ||
837 | if (lu->has_sdev) | ||
838 | sbp2_send_management_orb(lu, device->node_id, generation, | ||
839 | SBP2_LOGOUT_REQUEST, lu->login_id, NULL); | ||
840 | |||
683 | if (sbp2_send_management_orb(lu, node_id, generation, | 841 | if (sbp2_send_management_orb(lu, node_id, generation, |
684 | SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) { | 842 | SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) { |
685 | if (lu->retries++ < 5) | 843 | if (lu->retries++ < 5) { |
686 | sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); | 844 | sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); |
687 | else | 845 | } else { |
688 | fw_error("failed to login to %s LUN %04x\n", | 846 | fw_error("%s: failed to login to LUN %04x\n", |
689 | unit->device.bus_id, lu->lun); | 847 | tgt->bus_id, lu->lun); |
848 | /* Let any waiting I/O fail from now on. */ | ||
849 | sbp2_unblock(lu->tgt); | ||
850 | } | ||
690 | goto out; | 851 | goto out; |
691 | } | 852 | } |
692 | 853 | ||
693 | lu->generation = generation; | 854 | tgt->node_id = node_id; |
694 | lu->tgt->node_id = node_id; | 855 | tgt->address_high = local_node_id << 16; |
695 | lu->tgt->address_high = local_node_id << 16; | 856 | sbp2_set_generation(lu, generation); |
696 | 857 | ||
697 | /* Get command block agent offset and login id. */ | 858 | /* Get command block agent offset and login id. */ |
698 | lu->command_block_agent_address = | 859 | lu->command_block_agent_address = |
@@ -700,8 +861,8 @@ static void sbp2_login(struct work_struct *work) | |||
700 | response.command_block_agent.low; | 861 | response.command_block_agent.low; |
701 | lu->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response); | 862 | lu->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response); |
702 | 863 | ||
703 | fw_notify("logged in to %s LUN %04x (%d retries)\n", | 864 | fw_notify("%s: logged in to LUN %04x (%d retries)\n", |
704 | unit->device.bus_id, lu->lun, lu->retries); | 865 | tgt->bus_id, lu->lun, lu->retries); |
705 | 866 | ||
706 | #if 0 | 867 | #if 0 |
707 | /* FIXME: The linux1394 sbp2 does this last step. */ | 868 | /* FIXME: The linux1394 sbp2 does this last step. */ |
@@ -711,26 +872,58 @@ static void sbp2_login(struct work_struct *work) | |||
711 | PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect); | 872 | PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect); |
712 | sbp2_agent_reset(lu); | 873 | sbp2_agent_reset(lu); |
713 | 874 | ||
714 | memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun)); | 875 | /* This was a re-login. */ |
715 | eight_bytes_lun.scsi_lun[0] = (lu->lun >> 8) & 0xff; | 876 | if (lu->has_sdev) { |
716 | eight_bytes_lun.scsi_lun[1] = lu->lun & 0xff; | 877 | sbp2_cancel_orbs(lu); |
878 | sbp2_conditionally_unblock(lu); | ||
879 | goto out; | ||
880 | } | ||
717 | 881 | ||
718 | sdev = __scsi_add_device(shost, 0, 0, | 882 | if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY) |
719 | scsilun_to_int(&eight_bytes_lun), lu); | 883 | ssleep(SBP2_INQUIRY_DELAY); |
720 | if (IS_ERR(sdev)) { | 884 | |
721 | sbp2_send_management_orb(lu, node_id, generation, | 885 | shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]); |
722 | SBP2_LOGOUT_REQUEST, lu->login_id, NULL); | 886 | sdev = __scsi_add_device(shost, 0, 0, sbp2_lun2int(lu->lun), lu); |
723 | /* | 887 | /* |
724 | * Set this back to sbp2_login so we fall back and | 888 | * FIXME: We are unable to perform reconnects while in sbp2_login(). |
725 | * retry login on bus reset. | 889 | * Therefore __scsi_add_device() will get into trouble if a bus reset |
726 | */ | 890 | * happens in parallel. It will either fail or leave us with an |
727 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); | 891 | * unusable sdev. As a workaround we check for this and retry the |
728 | } else { | 892 | * whole login and SCSI probing. |
729 | lu->sdev = sdev; | 893 | */ |
894 | |||
895 | /* Reported error during __scsi_add_device() */ | ||
896 | if (IS_ERR(sdev)) | ||
897 | goto out_logout_login; | ||
898 | |||
899 | /* Unreported error during __scsi_add_device() */ | ||
900 | smp_rmb(); /* get current card generation */ | ||
901 | if (generation != device->card->generation) { | ||
902 | scsi_remove_device(sdev); | ||
730 | scsi_device_put(sdev); | 903 | scsi_device_put(sdev); |
904 | goto out_logout_login; | ||
731 | } | 905 | } |
906 | |||
907 | /* No error during __scsi_add_device() */ | ||
908 | lu->has_sdev = true; | ||
909 | scsi_device_put(sdev); | ||
910 | sbp2_allow_block(lu); | ||
911 | goto out; | ||
912 | |||
913 | out_logout_login: | ||
914 | smp_rmb(); /* generation may have changed */ | ||
915 | generation = device->generation; | ||
916 | smp_rmb(); /* node_id must not be older than generation */ | ||
917 | |||
918 | sbp2_send_management_orb(lu, device->node_id, generation, | ||
919 | SBP2_LOGOUT_REQUEST, lu->login_id, NULL); | ||
920 | /* | ||
921 | * If a bus reset happened, sbp2_update will have requeued | ||
922 | * lu->work already. Reset the work from reconnect to login. | ||
923 | */ | ||
924 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); | ||
732 | out: | 925 | out: |
733 | sbp2_target_put(lu->tgt); | 926 | sbp2_target_put(tgt); |
734 | } | 927 | } |
735 | 928 | ||
736 | static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) | 929 | static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) |
@@ -751,10 +944,12 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) | |||
751 | return -ENOMEM; | 944 | return -ENOMEM; |
752 | } | 945 | } |
753 | 946 | ||
754 | lu->tgt = tgt; | 947 | lu->tgt = tgt; |
755 | lu->sdev = NULL; | 948 | lu->lun = lun_entry & 0xffff; |
756 | lu->lun = lun_entry & 0xffff; | 949 | lu->retries = 0; |
757 | lu->retries = 0; | 950 | lu->has_sdev = false; |
951 | lu->blocked = false; | ||
952 | ++tgt->dont_block; | ||
758 | INIT_LIST_HEAD(&lu->orb_list); | 953 | INIT_LIST_HEAD(&lu->orb_list); |
759 | INIT_DELAYED_WORK(&lu->work, sbp2_login); | 954 | INIT_DELAYED_WORK(&lu->work, sbp2_login); |
760 | 955 | ||
@@ -813,7 +1008,7 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory, | |||
813 | if (timeout > tgt->mgt_orb_timeout) | 1008 | if (timeout > tgt->mgt_orb_timeout) |
814 | fw_notify("%s: config rom contains %ds " | 1009 | fw_notify("%s: config rom contains %ds " |
815 | "management ORB timeout, limiting " | 1010 | "management ORB timeout, limiting " |
816 | "to %ds\n", tgt->unit->device.bus_id, | 1011 | "to %ds\n", tgt->bus_id, |
817 | timeout / 1000, | 1012 | timeout / 1000, |
818 | tgt->mgt_orb_timeout / 1000); | 1013 | tgt->mgt_orb_timeout / 1000); |
819 | break; | 1014 | break; |
@@ -836,12 +1031,12 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model, | |||
836 | u32 firmware_revision) | 1031 | u32 firmware_revision) |
837 | { | 1032 | { |
838 | int i; | 1033 | int i; |
839 | unsigned w = sbp2_param_workarounds; | 1034 | unsigned int w = sbp2_param_workarounds; |
840 | 1035 | ||
841 | if (w) | 1036 | if (w) |
842 | fw_notify("Please notify linux1394-devel@lists.sourceforge.net " | 1037 | fw_notify("Please notify linux1394-devel@lists.sourceforge.net " |
843 | "if you need the workarounds parameter for %s\n", | 1038 | "if you need the workarounds parameter for %s\n", |
844 | tgt->unit->device.bus_id); | 1039 | tgt->bus_id); |
845 | 1040 | ||
846 | if (w & SBP2_WORKAROUND_OVERRIDE) | 1041 | if (w & SBP2_WORKAROUND_OVERRIDE) |
847 | goto out; | 1042 | goto out; |
@@ -863,8 +1058,7 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model, | |||
863 | if (w) | 1058 | if (w) |
864 | fw_notify("Workarounds for %s: 0x%x " | 1059 | fw_notify("Workarounds for %s: 0x%x " |
865 | "(firmware_revision 0x%06x, model_id 0x%06x)\n", | 1060 | "(firmware_revision 0x%06x, model_id 0x%06x)\n", |
866 | tgt->unit->device.bus_id, | 1061 | tgt->bus_id, w, firmware_revision, model); |
867 | w, firmware_revision, model); | ||
868 | tgt->workarounds = w; | 1062 | tgt->workarounds = w; |
869 | } | 1063 | } |
870 | 1064 | ||
@@ -888,6 +1082,7 @@ static int sbp2_probe(struct device *dev) | |||
888 | tgt->unit = unit; | 1082 | tgt->unit = unit; |
889 | kref_init(&tgt->kref); | 1083 | kref_init(&tgt->kref); |
890 | INIT_LIST_HEAD(&tgt->lu_list); | 1084 | INIT_LIST_HEAD(&tgt->lu_list); |
1085 | tgt->bus_id = unit->device.bus_id; | ||
891 | 1086 | ||
892 | if (fw_device_enable_phys_dma(device) < 0) | 1087 | if (fw_device_enable_phys_dma(device) < 0) |
893 | goto fail_shost_put; | 1088 | goto fail_shost_put; |
@@ -895,6 +1090,8 @@ static int sbp2_probe(struct device *dev) | |||
895 | if (scsi_add_host(shost, &unit->device) < 0) | 1090 | if (scsi_add_host(shost, &unit->device) < 0) |
896 | goto fail_shost_put; | 1091 | goto fail_shost_put; |
897 | 1092 | ||
1093 | fw_device_get(device); | ||
1094 | |||
898 | /* Initialize to values that won't match anything in our table. */ | 1095 | /* Initialize to values that won't match anything in our table. */ |
899 | firmware_revision = 0xff000000; | 1096 | firmware_revision = 0xff000000; |
900 | model = 0xff000000; | 1097 | model = 0xff000000; |
@@ -938,10 +1135,13 @@ static void sbp2_reconnect(struct work_struct *work) | |||
938 | { | 1135 | { |
939 | struct sbp2_logical_unit *lu = | 1136 | struct sbp2_logical_unit *lu = |
940 | container_of(work, struct sbp2_logical_unit, work.work); | 1137 | container_of(work, struct sbp2_logical_unit, work.work); |
941 | struct fw_unit *unit = lu->tgt->unit; | 1138 | struct sbp2_target *tgt = lu->tgt; |
942 | struct fw_device *device = fw_device(unit->device.parent); | 1139 | struct fw_device *device = fw_device(tgt->unit->device.parent); |
943 | int generation, node_id, local_node_id; | 1140 | int generation, node_id, local_node_id; |
944 | 1141 | ||
1142 | if (fw_device_is_shutdown(device)) | ||
1143 | goto out; | ||
1144 | |||
945 | generation = device->generation; | 1145 | generation = device->generation; |
946 | smp_rmb(); /* node_id must not be older than generation */ | 1146 | smp_rmb(); /* node_id must not be older than generation */ |
947 | node_id = device->node_id; | 1147 | node_id = device->node_id; |
@@ -950,10 +1150,17 @@ static void sbp2_reconnect(struct work_struct *work) | |||
950 | if (sbp2_send_management_orb(lu, node_id, generation, | 1150 | if (sbp2_send_management_orb(lu, node_id, generation, |
951 | SBP2_RECONNECT_REQUEST, | 1151 | SBP2_RECONNECT_REQUEST, |
952 | lu->login_id, NULL) < 0) { | 1152 | lu->login_id, NULL) < 0) { |
953 | if (lu->retries++ >= 5) { | 1153 | /* |
954 | fw_error("failed to reconnect to %s\n", | 1154 | * If reconnect was impossible even though we are in the |
955 | unit->device.bus_id); | 1155 | * current generation, fall back and try to log in again. |
956 | /* Fall back and try to log in again. */ | 1156 | * |
1157 | * We could check for "Function rejected" status, but | ||
1158 | * looking at the bus generation as simpler and more general. | ||
1159 | */ | ||
1160 | smp_rmb(); /* get current card generation */ | ||
1161 | if (generation == device->card->generation || | ||
1162 | lu->retries++ >= 5) { | ||
1163 | fw_error("%s: failed to reconnect\n", tgt->bus_id); | ||
957 | lu->retries = 0; | 1164 | lu->retries = 0; |
958 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); | 1165 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); |
959 | } | 1166 | } |
@@ -961,17 +1168,18 @@ static void sbp2_reconnect(struct work_struct *work) | |||
961 | goto out; | 1168 | goto out; |
962 | } | 1169 | } |
963 | 1170 | ||
964 | lu->generation = generation; | 1171 | tgt->node_id = node_id; |
965 | lu->tgt->node_id = node_id; | 1172 | tgt->address_high = local_node_id << 16; |
966 | lu->tgt->address_high = local_node_id << 16; | 1173 | sbp2_set_generation(lu, generation); |
967 | 1174 | ||
968 | fw_notify("reconnected to %s LUN %04x (%d retries)\n", | 1175 | fw_notify("%s: reconnected to LUN %04x (%d retries)\n", |
969 | unit->device.bus_id, lu->lun, lu->retries); | 1176 | tgt->bus_id, lu->lun, lu->retries); |
970 | 1177 | ||
971 | sbp2_agent_reset(lu); | 1178 | sbp2_agent_reset(lu); |
972 | sbp2_cancel_orbs(lu); | 1179 | sbp2_cancel_orbs(lu); |
1180 | sbp2_conditionally_unblock(lu); | ||
973 | out: | 1181 | out: |
974 | sbp2_target_put(lu->tgt); | 1182 | sbp2_target_put(tgt); |
975 | } | 1183 | } |
976 | 1184 | ||
977 | static void sbp2_update(struct fw_unit *unit) | 1185 | static void sbp2_update(struct fw_unit *unit) |
@@ -986,6 +1194,7 @@ static void sbp2_update(struct fw_unit *unit) | |||
986 | * Iteration over tgt->lu_list is therefore safe here. | 1194 | * Iteration over tgt->lu_list is therefore safe here. |
987 | */ | 1195 | */ |
988 | list_for_each_entry(lu, &tgt->lu_list, link) { | 1196 | list_for_each_entry(lu, &tgt->lu_list, link) { |
1197 | sbp2_conditionally_block(lu); | ||
989 | lu->retries = 0; | 1198 | lu->retries = 0; |
990 | sbp2_queue_work(lu, 0); | 1199 | sbp2_queue_work(lu, 0); |
991 | } | 1200 | } |
@@ -1063,7 +1272,7 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) | |||
1063 | 1272 | ||
1064 | if (status != NULL) { | 1273 | if (status != NULL) { |
1065 | if (STATUS_GET_DEAD(*status)) | 1274 | if (STATUS_GET_DEAD(*status)) |
1066 | sbp2_agent_reset(orb->lu); | 1275 | sbp2_agent_reset_no_wait(orb->lu); |
1067 | 1276 | ||
1068 | switch (STATUS_GET_RESPONSE(*status)) { | 1277 | switch (STATUS_GET_RESPONSE(*status)) { |
1069 | case SBP2_STATUS_REQUEST_COMPLETE: | 1278 | case SBP2_STATUS_REQUEST_COMPLETE: |
@@ -1089,6 +1298,7 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) | |||
1089 | * or when sending the write (less likely). | 1298 | * or when sending the write (less likely). |
1090 | */ | 1299 | */ |
1091 | result = DID_BUS_BUSY << 16; | 1300 | result = DID_BUS_BUSY << 16; |
1301 | sbp2_conditionally_block(orb->lu); | ||
1092 | } | 1302 | } |
1093 | 1303 | ||
1094 | dma_unmap_single(device->card->device, orb->base.request_bus, | 1304 | dma_unmap_single(device->card->device, orb->base.request_bus, |
@@ -1197,7 +1407,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) | |||
1197 | struct sbp2_logical_unit *lu = cmd->device->hostdata; | 1407 | struct sbp2_logical_unit *lu = cmd->device->hostdata; |
1198 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); | 1408 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); |
1199 | struct sbp2_command_orb *orb; | 1409 | struct sbp2_command_orb *orb; |
1200 | unsigned max_payload; | 1410 | unsigned int max_payload; |
1201 | int retval = SCSI_MLQUEUE_HOST_BUSY; | 1411 | int retval = SCSI_MLQUEUE_HOST_BUSY; |
1202 | 1412 | ||
1203 | /* | 1413 | /* |
@@ -1275,6 +1485,10 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev) | |||
1275 | { | 1485 | { |
1276 | struct sbp2_logical_unit *lu = sdev->hostdata; | 1486 | struct sbp2_logical_unit *lu = sdev->hostdata; |
1277 | 1487 | ||
1488 | /* (Re-)Adding logical units via the SCSI stack is not supported. */ | ||
1489 | if (!lu) | ||
1490 | return -ENOSYS; | ||
1491 | |||
1278 | sdev->allow_restart = 1; | 1492 | sdev->allow_restart = 1; |
1279 | 1493 | ||
1280 | /* | 1494 | /* |
@@ -1319,7 +1533,7 @@ static int sbp2_scsi_abort(struct scsi_cmnd *cmd) | |||
1319 | { | 1533 | { |
1320 | struct sbp2_logical_unit *lu = cmd->device->hostdata; | 1534 | struct sbp2_logical_unit *lu = cmd->device->hostdata; |
1321 | 1535 | ||
1322 | fw_notify("sbp2_scsi_abort\n"); | 1536 | fw_notify("%s: sbp2_scsi_abort\n", lu->tgt->bus_id); |
1323 | sbp2_agent_reset(lu); | 1537 | sbp2_agent_reset(lu); |
1324 | sbp2_cancel_orbs(lu); | 1538 | sbp2_cancel_orbs(lu); |
1325 | 1539 | ||
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c index 172c1867e9aa..e47bb040197a 100644 --- a/drivers/firewire/fw-topology.c +++ b/drivers/firewire/fw-topology.c | |||
@@ -383,6 +383,7 @@ void fw_destroy_nodes(struct fw_card *card) | |||
383 | card->color++; | 383 | card->color++; |
384 | if (card->local_node != NULL) | 384 | if (card->local_node != NULL) |
385 | for_each_fw_node(card, card->local_node, report_lost_node); | 385 | for_each_fw_node(card, card->local_node, report_lost_node); |
386 | card->local_node = NULL; | ||
386 | spin_unlock_irqrestore(&card->lock, flags); | 387 | spin_unlock_irqrestore(&card->lock, flags); |
387 | } | 388 | } |
388 | 389 | ||
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h index fa7967b57408..09cb72870454 100644 --- a/drivers/firewire/fw-transaction.h +++ b/drivers/firewire/fw-transaction.h | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
27 | #include <linux/dma-mapping.h> | 27 | #include <linux/dma-mapping.h> |
28 | #include <linux/firewire-constants.h> | 28 | #include <linux/firewire-constants.h> |
29 | #include <asm/atomic.h> | ||
29 | 30 | ||
30 | #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) | 31 | #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) |
31 | #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) | 32 | #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) |
@@ -219,6 +220,7 @@ extern struct bus_type fw_bus_type; | |||
219 | struct fw_card { | 220 | struct fw_card { |
220 | const struct fw_card_driver *driver; | 221 | const struct fw_card_driver *driver; |
221 | struct device *device; | 222 | struct device *device; |
223 | atomic_t device_count; | ||
222 | struct kref kref; | 224 | struct kref kref; |
223 | 225 | ||
224 | int node_id; | 226 | int node_id; |
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 310e497b5838..c8d0e8715997 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c | |||
@@ -670,8 +670,8 @@ static void cdrom_buffer_sectors (ide_drive_t *drive, unsigned long sector, | |||
670 | * and attempt to recover if there are problems. Returns 0 if everything's | 670 | * and attempt to recover if there are problems. Returns 0 if everything's |
671 | * ok; nonzero if the request has been terminated. | 671 | * ok; nonzero if the request has been terminated. |
672 | */ | 672 | */ |
673 | static | 673 | static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq, |
674 | int ide_cd_check_ireason(ide_drive_t *drive, int len, int ireason, int rw) | 674 | int len, int ireason, int rw) |
675 | { | 675 | { |
676 | /* | 676 | /* |
677 | * ireason == 0: the drive wants to receive data from us | 677 | * ireason == 0: the drive wants to receive data from us |
@@ -701,6 +701,9 @@ int ide_cd_check_ireason(ide_drive_t *drive, int len, int ireason, int rw) | |||
701 | drive->name, __FUNCTION__, ireason); | 701 | drive->name, __FUNCTION__, ireason); |
702 | } | 702 | } |
703 | 703 | ||
704 | if (rq->cmd_type == REQ_TYPE_ATA_PC) | ||
705 | rq->cmd_flags |= REQ_FAILED; | ||
706 | |||
704 | cdrom_end_request(drive, 0); | 707 | cdrom_end_request(drive, 0); |
705 | return -1; | 708 | return -1; |
706 | } | 709 | } |
@@ -1071,11 +1074,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) | |||
1071 | /* | 1074 | /* |
1072 | * check which way to transfer data | 1075 | * check which way to transfer data |
1073 | */ | 1076 | */ |
1074 | if (blk_fs_request(rq) || blk_pc_request(rq)) { | 1077 | if (ide_cd_check_ireason(drive, rq, len, ireason, write)) |
1075 | if (ide_cd_check_ireason(drive, len, ireason, write)) | 1078 | return ide_stopped; |
1076 | return ide_stopped; | ||
1077 | 1079 | ||
1078 | if (blk_fs_request(rq) && write == 0) { | 1080 | if (blk_fs_request(rq)) { |
1081 | if (write == 0) { | ||
1079 | int nskip; | 1082 | int nskip; |
1080 | 1083 | ||
1081 | if (ide_cd_check_transfer_size(drive, len)) { | 1084 | if (ide_cd_check_transfer_size(drive, len)) { |
@@ -1101,16 +1104,9 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) | |||
1101 | if (ireason == 0) { | 1104 | if (ireason == 0) { |
1102 | write = 1; | 1105 | write = 1; |
1103 | xferfunc = HWIF(drive)->atapi_output_bytes; | 1106 | xferfunc = HWIF(drive)->atapi_output_bytes; |
1104 | } else if (ireason == 2 || (ireason == 1 && | 1107 | } else { |
1105 | (blk_fs_request(rq) || blk_pc_request(rq)))) { | ||
1106 | write = 0; | 1108 | write = 0; |
1107 | xferfunc = HWIF(drive)->atapi_input_bytes; | 1109 | xferfunc = HWIF(drive)->atapi_input_bytes; |
1108 | } else { | ||
1109 | printk(KERN_ERR "%s: %s: The drive " | ||
1110 | "appears confused (ireason = 0x%02x). " | ||
1111 | "Trying to recover by ending request.\n", | ||
1112 | drive->name, __FUNCTION__, ireason); | ||
1113 | goto end_request; | ||
1114 | } | 1110 | } |
1115 | 1111 | ||
1116 | /* | 1112 | /* |
@@ -1182,11 +1178,10 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) | |||
1182 | else | 1178 | else |
1183 | rq->data += blen; | 1179 | rq->data += blen; |
1184 | } | 1180 | } |
1181 | if (!write && blk_sense_request(rq)) | ||
1182 | rq->sense_len += blen; | ||
1185 | } | 1183 | } |
1186 | 1184 | ||
1187 | if (write && blk_sense_request(rq)) | ||
1188 | rq->sense_len += thislen; | ||
1189 | |||
1190 | /* | 1185 | /* |
1191 | * pad, if necessary | 1186 | * pad, if necessary |
1192 | */ | 1187 | */ |
@@ -1931,6 +1926,7 @@ static const struct cd_list_entry ide_cd_quirks_list[] = { | |||
1931 | { "MATSHITADVD-ROM SR-8186", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, | 1926 | { "MATSHITADVD-ROM SR-8186", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, |
1932 | { "MATSHITADVD-ROM SR-8176", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, | 1927 | { "MATSHITADVD-ROM SR-8176", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, |
1933 | { "MATSHITADVD-ROM SR-8174", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, | 1928 | { "MATSHITADVD-ROM SR-8174", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, |
1929 | { "Optiarc DVD RW AD-5200A", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, | ||
1934 | { NULL, NULL, 0 } | 1930 | { NULL, NULL, 0 } |
1935 | }; | 1931 | }; |
1936 | 1932 | ||
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 8f5bed471050..39501d130256 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c | |||
@@ -867,7 +867,7 @@ static void idedisk_setup (ide_drive_t *drive) | |||
867 | 867 | ||
868 | /* Only print cache size when it was specified */ | 868 | /* Only print cache size when it was specified */ |
869 | if (id->buf_size) | 869 | if (id->buf_size) |
870 | printk (" w/%dKiB Cache", id->buf_size/2); | 870 | printk(KERN_CONT " w/%dKiB Cache", id->buf_size / 2); |
871 | 871 | ||
872 | printk(KERN_CONT ", CHS=%d/%d/%d\n", | 872 | printk(KERN_CONT ", CHS=%d/%d/%d\n", |
873 | drive->bios_cyl, drive->bios_head, drive->bios_sect); | 873 | drive->bios_cyl, drive->bios_head, drive->bios_sect); |
@@ -949,7 +949,8 @@ static void ide_device_shutdown(ide_drive_t *drive) | |||
949 | return; | 949 | return; |
950 | } | 950 | } |
951 | 951 | ||
952 | printk("Shutdown: %s\n", drive->name); | 952 | printk(KERN_INFO "Shutdown: %s\n", drive->name); |
953 | |||
953 | drive->gendev.bus->suspend(&drive->gendev, PMSG_SUSPEND); | 954 | drive->gendev.bus->suspend(&drive->gendev, PMSG_SUSPEND); |
954 | } | 955 | } |
955 | 956 | ||
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index d0e7b537353e..2de99e4be5c9 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c | |||
@@ -1,9 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * IDE DMA support (including IDE PCI BM-DMA). | ||
3 | * | ||
2 | * Copyright (C) 1995-1998 Mark Lord | 4 | * Copyright (C) 1995-1998 Mark Lord |
3 | * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> | 5 | * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> |
4 | * Copyright (C) 2004, 2007 Bartlomiej Zolnierkiewicz | 6 | * Copyright (C) 2004, 2007 Bartlomiej Zolnierkiewicz |
5 | * | 7 | * |
6 | * May be copied or modified under the terms of the GNU General Public License | 8 | * May be copied or modified under the terms of the GNU General Public License |
9 | * | ||
10 | * DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies). | ||
7 | */ | 11 | */ |
8 | 12 | ||
9 | /* | 13 | /* |
@@ -11,49 +15,6 @@ | |||
11 | */ | 15 | */ |
12 | 16 | ||
13 | /* | 17 | /* |
14 | * This module provides support for the bus-master IDE DMA functions | ||
15 | * of various PCI chipsets, including the Intel PIIX (i82371FB for | ||
16 | * the 430 FX chipset), the PIIX3 (i82371SB for the 430 HX/VX and | ||
17 | * 440 chipsets), and the PIIX4 (i82371AB for the 430 TX chipset) | ||
18 | * ("PIIX" stands for "PCI ISA IDE Xcellerator"). | ||
19 | * | ||
20 | * Pretty much the same code works for other IDE PCI bus-mastering chipsets. | ||
21 | * | ||
22 | * DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies). | ||
23 | * | ||
24 | * By default, DMA support is prepared for use, but is currently enabled only | ||
25 | * for drives which already have DMA enabled (UltraDMA or mode 2 multi/single), | ||
26 | * or which are recognized as "good" (see table below). Drives with only mode0 | ||
27 | * or mode1 (multi/single) DMA should also work with this chipset/driver | ||
28 | * (eg. MC2112A) but are not enabled by default. | ||
29 | * | ||
30 | * Use "hdparm -i" to view modes supported by a given drive. | ||
31 | * | ||
32 | * The hdparm-3.5 (or later) utility can be used for manually enabling/disabling | ||
33 | * DMA support, but must be (re-)compiled against this kernel version or later. | ||
34 | * | ||
35 | * To enable DMA, use "hdparm -d1 /dev/hd?" on a per-drive basis after booting. | ||
36 | * If problems arise, ide.c will disable DMA operation after a few retries. | ||
37 | * This error recovery mechanism works and has been extremely well exercised. | ||
38 | * | ||
39 | * IDE drives, depending on their vintage, may support several different modes | ||
40 | * of DMA operation. The boot-time modes are indicated with a "*" in | ||
41 | * the "hdparm -i" listing, and can be changed with *knowledgeable* use of | ||
42 | * the "hdparm -X" feature. There is seldom a need to do this, as drives | ||
43 | * normally power-up with their "best" PIO/DMA modes enabled. | ||
44 | * | ||
45 | * Testing has been done with a rather extensive number of drives, | ||
46 | * with Quantum & Western Digital models generally outperforming the pack, | ||
47 | * and Fujitsu & Conner (and some Seagate which are really Conner) drives | ||
48 | * showing more lackluster throughput. | ||
49 | * | ||
50 | * Keep an eye on /var/adm/messages for "DMA disabled" messages. | ||
51 | * | ||
52 | * Some people have reported trouble with Intel Zappa motherboards. | ||
53 | * This can be fixed by upgrading the AMI BIOS to version 1.00.04.BS0, | ||
54 | * available from ftp://ftp.intel.com/pub/bios/10004bs0.exe | ||
55 | * (thanks to Glen Morrell <glen@spin.Stanford.edu> for researching this). | ||
56 | * | ||
57 | * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for | 18 | * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for |
58 | * fixing the problem with the BIOS on some Acer motherboards. | 19 | * fixing the problem with the BIOS on some Acer motherboards. |
59 | * | 20 | * |
@@ -65,11 +26,6 @@ | |||
65 | * | 26 | * |
66 | * Most importantly, thanks to Robert Bringman <rob@mars.trion.com> | 27 | * Most importantly, thanks to Robert Bringman <rob@mars.trion.com> |
67 | * for supplying a Promise UDMA board & WD UDMA drive for this work! | 28 | * for supplying a Promise UDMA board & WD UDMA drive for this work! |
68 | * | ||
69 | * And, yes, Intel Zappa boards really *do* use both PIIX IDE ports. | ||
70 | * | ||
71 | * ATA-66/100 and recovery functions, I forgot the rest...... | ||
72 | * | ||
73 | */ | 29 | */ |
74 | 30 | ||
75 | #include <linux/module.h> | 31 | #include <linux/module.h> |
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 4a2cb2868226..194ecb0049eb 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c | |||
@@ -756,7 +756,8 @@ static int ide_probe_port(ide_hwif_t *hwif) | |||
756 | 756 | ||
757 | BUG_ON(hwif->present); | 757 | BUG_ON(hwif->present); |
758 | 758 | ||
759 | if (hwif->noprobe) | 759 | if (hwif->noprobe || |
760 | (hwif->drives[0].noprobe && hwif->drives[1].noprobe)) | ||
760 | return -EACCES; | 761 | return -EACCES; |
761 | 762 | ||
762 | /* | 763 | /* |
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 0598ecfd5f37..43e0e0557776 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c | |||
@@ -3765,6 +3765,11 @@ static int ide_tape_probe(ide_drive_t *drive) | |||
3765 | g->fops = &idetape_block_ops; | 3765 | g->fops = &idetape_block_ops; |
3766 | ide_register_region(g); | 3766 | ide_register_region(g); |
3767 | 3767 | ||
3768 | printk(KERN_WARNING "It is possible that this driver does not have any" | ||
3769 | " users anymore and, as a result, it will be REMOVED soon." | ||
3770 | " Please notify Bart <bzolnier@gmail.com> or Boris" | ||
3771 | " <petkovbb@gmail.com> in case you still need it.\n"); | ||
3772 | |||
3768 | return 0; | 3773 | return 0; |
3769 | 3774 | ||
3770 | out_free_tape: | 3775 | out_free_tape: |
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index 477833f0daf5..fa16bc30bbc9 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c | |||
@@ -590,11 +590,6 @@ void ide_unregister(unsigned int index, int init_default, int restore) | |||
590 | hwif->extra_ports = 0; | 590 | hwif->extra_ports = 0; |
591 | } | 591 | } |
592 | 592 | ||
593 | /* | ||
594 | * Note that we only release the standard ports, | ||
595 | * and do not even try to handle any extra ports | ||
596 | * allocated for weird IDE interface chipsets. | ||
597 | */ | ||
598 | ide_hwif_release_regions(hwif); | 593 | ide_hwif_release_regions(hwif); |
599 | 594 | ||
600 | /* copy original settings */ | 595 | /* copy original settings */ |
@@ -1036,10 +1031,9 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device | |||
1036 | drive->nice1 = (arg >> IDE_NICE_1) & 1; | 1031 | drive->nice1 = (arg >> IDE_NICE_1) & 1; |
1037 | return 0; | 1032 | return 0; |
1038 | case HDIO_DRIVE_RESET: | 1033 | case HDIO_DRIVE_RESET: |
1039 | { | 1034 | if (!capable(CAP_SYS_ADMIN)) |
1040 | unsigned long flags; | 1035 | return -EACCES; |
1041 | if (!capable(CAP_SYS_ADMIN)) return -EACCES; | 1036 | |
1042 | |||
1043 | /* | 1037 | /* |
1044 | * Abort the current command on the | 1038 | * Abort the current command on the |
1045 | * group if there is one, taking | 1039 | * group if there is one, taking |
@@ -1058,17 +1052,15 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device | |||
1058 | ide_abort(drive, "drive reset"); | 1052 | ide_abort(drive, "drive reset"); |
1059 | 1053 | ||
1060 | BUG_ON(HWGROUP(drive)->handler); | 1054 | BUG_ON(HWGROUP(drive)->handler); |
1061 | 1055 | ||
1062 | /* Ensure nothing gets queued after we | 1056 | /* Ensure nothing gets queued after we |
1063 | drop the lock. Reset will clear the busy */ | 1057 | drop the lock. Reset will clear the busy */ |
1064 | 1058 | ||
1065 | HWGROUP(drive)->busy = 1; | 1059 | HWGROUP(drive)->busy = 1; |
1066 | spin_unlock_irqrestore(&ide_lock, flags); | 1060 | spin_unlock_irqrestore(&ide_lock, flags); |
1067 | (void) ide_do_reset(drive); | 1061 | (void) ide_do_reset(drive); |
1068 | 1062 | ||
1069 | return 0; | 1063 | return 0; |
1070 | } | ||
1071 | |||
1072 | case HDIO_GET_BUSSTATE: | 1064 | case HDIO_GET_BUSSTATE: |
1073 | if (!capable(CAP_SYS_ADMIN)) | 1065 | if (!capable(CAP_SYS_ADMIN)) |
1074 | return -EACCES; | 1066 | return -EACCES; |
@@ -1449,7 +1441,7 @@ static int __init ide_setup(char *s) | |||
1449 | 1441 | ||
1450 | case -1: /* "noprobe" */ | 1442 | case -1: /* "noprobe" */ |
1451 | hwif->noprobe = 1; | 1443 | hwif->noprobe = 1; |
1452 | goto done; | 1444 | goto obsolete_option; |
1453 | 1445 | ||
1454 | case 1: /* base */ | 1446 | case 1: /* base */ |
1455 | vals[1] = vals[0] + 0x206; /* default ctl */ | 1447 | vals[1] = vals[0] + 0x206; /* default ctl */ |
diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/legacy/qd65xx.c index bba29df5f21d..2f4f47ad602f 100644 --- a/drivers/ide/legacy/qd65xx.c +++ b/drivers/ide/legacy/qd65xx.c | |||
@@ -334,43 +334,6 @@ static void __init qd6580_port_init_devs(ide_hwif_t *hwif) | |||
334 | hwif->drives[1].drive_data = t2; | 334 | hwif->drives[1].drive_data = t2; |
335 | } | 335 | } |
336 | 336 | ||
337 | /* | ||
338 | * qd_unsetup: | ||
339 | * | ||
340 | * called to unsetup an ata channel : back to default values, unlinks tuning | ||
341 | */ | ||
342 | /* | ||
343 | static void __exit qd_unsetup(ide_hwif_t *hwif) | ||
344 | { | ||
345 | u8 config = hwif->config_data; | ||
346 | int base = hwif->select_data; | ||
347 | void *set_pio_mode = (void *)hwif->set_pio_mode; | ||
348 | |||
349 | if (hwif->chipset != ide_qd65xx) | ||
350 | return; | ||
351 | |||
352 | printk(KERN_NOTICE "%s: back to defaults\n", hwif->name); | ||
353 | |||
354 | hwif->selectproc = NULL; | ||
355 | hwif->set_pio_mode = NULL; | ||
356 | |||
357 | if (set_pio_mode == (void *)qd6500_set_pio_mode) { | ||
358 | // will do it for both | ||
359 | outb(QD6500_DEF_DATA, QD_TIMREG(&hwif->drives[0])); | ||
360 | } else if (set_pio_mode == (void *)qd6580_set_pio_mode) { | ||
361 | if (QD_CONTROL(hwif) & QD_CONTR_SEC_DISABLED) { | ||
362 | outb(QD6580_DEF_DATA, QD_TIMREG(&hwif->drives[0])); | ||
363 | outb(QD6580_DEF_DATA2, QD_TIMREG(&hwif->drives[1])); | ||
364 | } else { | ||
365 | outb(hwif->channel ? QD6580_DEF_DATA2 : QD6580_DEF_DATA, QD_TIMREG(&hwif->drives[0])); | ||
366 | } | ||
367 | } else { | ||
368 | printk(KERN_WARNING "Unknown qd65xx tuning fonction !\n"); | ||
369 | printk(KERN_WARNING "keeping settings !\n"); | ||
370 | } | ||
371 | } | ||
372 | */ | ||
373 | |||
374 | static const struct ide_port_info qd65xx_port_info __initdata = { | 337 | static const struct ide_port_info qd65xx_port_info __initdata = { |
375 | .chipset = ide_qd65xx, | 338 | .chipset = ide_qd65xx, |
376 | .host_flags = IDE_HFLAG_IO_32BIT | | 339 | .host_flags = IDE_HFLAG_IO_32BIT | |
@@ -444,6 +407,8 @@ static int __init qd_probe(int base) | |||
444 | printk(KERN_DEBUG "qd6580: config=%#x, control=%#x, ID3=%u\n", | 407 | printk(KERN_DEBUG "qd6580: config=%#x, control=%#x, ID3=%u\n", |
445 | config, control, QD_ID3); | 408 | config, control, QD_ID3); |
446 | 409 | ||
410 | outb(QD_DEF_CONTR, QD_CONTROL_PORT); | ||
411 | |||
447 | if (control & QD_CONTR_SEC_DISABLED) { | 412 | if (control & QD_CONTR_SEC_DISABLED) { |
448 | /* secondary disabled */ | 413 | /* secondary disabled */ |
449 | 414 | ||
@@ -460,8 +425,6 @@ static int __init qd_probe(int base) | |||
460 | 425 | ||
461 | ide_device_add(idx, &qd65xx_port_info); | 426 | ide_device_add(idx, &qd65xx_port_info); |
462 | 427 | ||
463 | outb(QD_DEF_CONTR, QD_CONTROL_PORT); | ||
464 | |||
465 | return 1; | 428 | return 1; |
466 | } else { | 429 | } else { |
467 | ide_hwif_t *mate; | 430 | ide_hwif_t *mate; |
@@ -487,8 +450,6 @@ static int __init qd_probe(int base) | |||
487 | 450 | ||
488 | ide_device_add(idx, &qd65xx_port_info); | 451 | ide_device_add(idx, &qd65xx_port_info); |
489 | 452 | ||
490 | outb(QD_DEF_CONTR, QD_CONTROL_PORT); | ||
491 | |||
492 | return 0; /* no other qd65xx possible */ | 453 | return 0; /* no other qd65xx possible */ |
493 | } | 454 | } |
494 | } | 455 | } |
diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c index bd24dad3cfc6..ec667982809c 100644 --- a/drivers/ide/pci/cmd640.c +++ b/drivers/ide/pci/cmd640.c | |||
@@ -787,7 +787,8 @@ static int __init cmd640x_init(void) | |||
787 | /* | 787 | /* |
788 | * Try to enable the secondary interface, if not already enabled | 788 | * Try to enable the secondary interface, if not already enabled |
789 | */ | 789 | */ |
790 | if (cmd_hwif1->noprobe) { | 790 | if (cmd_hwif1->noprobe || |
791 | (cmd_hwif1->drives[0].noprobe && cmd_hwif1->drives[1].noprobe)) { | ||
791 | port2 = "not probed"; | 792 | port2 = "not probed"; |
792 | } else { | 793 | } else { |
793 | b = get_cmd640_reg(CNTRL); | 794 | b = get_cmd640_reg(CNTRL); |
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c index d0f7bb8b8adf..6357bb6269ab 100644 --- a/drivers/ide/pci/hpt366.c +++ b/drivers/ide/pci/hpt366.c | |||
@@ -1570,10 +1570,12 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic | |||
1570 | if (rev < 3) | 1570 | if (rev < 3) |
1571 | info = &hpt36x; | 1571 | info = &hpt36x; |
1572 | else { | 1572 | else { |
1573 | static const struct hpt_info *hpt37x_info[] = | 1573 | switch (min_t(u8, rev, 6)) { |
1574 | { &hpt370, &hpt370a, &hpt372, &hpt372n }; | 1574 | case 3: info = &hpt370; break; |
1575 | 1575 | case 4: info = &hpt370a; break; | |
1576 | info = hpt37x_info[min_t(u8, rev, 6) - 3]; | 1576 | case 5: info = &hpt372; break; |
1577 | case 6: info = &hpt372n; break; | ||
1578 | } | ||
1577 | idx++; | 1579 | idx++; |
1578 | } | 1580 | } |
1579 | break; | 1581 | break; |
@@ -1626,7 +1628,7 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic | |||
1626 | return ide_setup_pci_device(dev, &d); | 1628 | return ide_setup_pci_device(dev, &d); |
1627 | } | 1629 | } |
1628 | 1630 | ||
1629 | static const struct pci_device_id hpt366_pci_tbl[] = { | 1631 | static const struct pci_device_id hpt366_pci_tbl[] __devinitconst = { |
1630 | { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), 0 }, | 1632 | { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), 0 }, |
1631 | { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), 1 }, | 1633 | { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), 1 }, |
1632 | { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), 2 }, | 1634 | { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), 2 }, |
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index 28e155a9e2a5..9e2b1964d71a 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c | |||
@@ -183,6 +183,9 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " | |||
183 | * Avoids access beyond actual disk limits on devices with an off-by-one bug. | 183 | * Avoids access beyond actual disk limits on devices with an off-by-one bug. |
184 | * Don't use this with devices which don't have this bug. | 184 | * Don't use this with devices which don't have this bug. |
185 | * | 185 | * |
186 | * - delay inquiry | ||
187 | * Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry. | ||
188 | * | ||
186 | * - override internal blacklist | 189 | * - override internal blacklist |
187 | * Instead of adding to the built-in blacklist, use only the workarounds | 190 | * Instead of adding to the built-in blacklist, use only the workarounds |
188 | * specified in the module load parameter. | 191 | * specified in the module load parameter. |
@@ -195,6 +198,7 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0" | |||
195 | ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) | 198 | ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) |
196 | ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) | 199 | ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) |
197 | ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) | 200 | ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) |
201 | ", delay inquiry = " __stringify(SBP2_WORKAROUND_DELAY_INQUIRY) | ||
198 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) | 202 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) |
199 | ", or a combination)"); | 203 | ", or a combination)"); |
200 | 204 | ||
@@ -357,6 +361,11 @@ static const struct { | |||
357 | .workarounds = SBP2_WORKAROUND_INQUIRY_36 | | 361 | .workarounds = SBP2_WORKAROUND_INQUIRY_36 | |
358 | SBP2_WORKAROUND_MODE_SENSE_8, | 362 | SBP2_WORKAROUND_MODE_SENSE_8, |
359 | }, | 363 | }, |
364 | /* DViCO Momobay FX-3A with TSB42AA9A bridge */ { | ||
365 | .firmware_revision = 0x002800, | ||
366 | .model_id = 0x000000, | ||
367 | .workarounds = SBP2_WORKAROUND_DELAY_INQUIRY, | ||
368 | }, | ||
360 | /* Initio bridges, actually only needed for some older ones */ { | 369 | /* Initio bridges, actually only needed for some older ones */ { |
361 | .firmware_revision = 0x000200, | 370 | .firmware_revision = 0x000200, |
362 | .model_id = SBP2_ROM_VALUE_WILDCARD, | 371 | .model_id = SBP2_ROM_VALUE_WILDCARD, |
@@ -914,6 +923,9 @@ static int sbp2_start_device(struct sbp2_lu *lu) | |||
914 | sbp2_agent_reset(lu, 1); | 923 | sbp2_agent_reset(lu, 1); |
915 | sbp2_max_speed_and_size(lu); | 924 | sbp2_max_speed_and_size(lu); |
916 | 925 | ||
926 | if (lu->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY) | ||
927 | ssleep(SBP2_INQUIRY_DELAY); | ||
928 | |||
917 | error = scsi_add_device(lu->shost, 0, lu->ud->id, 0); | 929 | error = scsi_add_device(lu->shost, 0, lu->ud->id, 0); |
918 | if (error) { | 930 | if (error) { |
919 | SBP2_ERR("scsi_add_device failed"); | 931 | SBP2_ERR("scsi_add_device failed"); |
@@ -1962,6 +1974,9 @@ static int sbp2scsi_slave_alloc(struct scsi_device *sdev) | |||
1962 | { | 1974 | { |
1963 | struct sbp2_lu *lu = (struct sbp2_lu *)sdev->host->hostdata[0]; | 1975 | struct sbp2_lu *lu = (struct sbp2_lu *)sdev->host->hostdata[0]; |
1964 | 1976 | ||
1977 | if (sdev->lun != 0 || sdev->id != lu->ud->id || sdev->channel != 0) | ||
1978 | return -ENODEV; | ||
1979 | |||
1965 | lu->sdev = sdev; | 1980 | lu->sdev = sdev; |
1966 | sdev->allow_restart = 1; | 1981 | sdev->allow_restart = 1; |
1967 | 1982 | ||
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h index d2ecb0d8a1bb..80d8e097b065 100644 --- a/drivers/ieee1394/sbp2.h +++ b/drivers/ieee1394/sbp2.h | |||
@@ -343,6 +343,8 @@ enum sbp2lu_state_types { | |||
343 | #define SBP2_WORKAROUND_INQUIRY_36 0x2 | 343 | #define SBP2_WORKAROUND_INQUIRY_36 0x2 |
344 | #define SBP2_WORKAROUND_MODE_SENSE_8 0x4 | 344 | #define SBP2_WORKAROUND_MODE_SENSE_8 0x4 |
345 | #define SBP2_WORKAROUND_FIX_CAPACITY 0x8 | 345 | #define SBP2_WORKAROUND_FIX_CAPACITY 0x8 |
346 | #define SBP2_WORKAROUND_DELAY_INQUIRY 0x10 | ||
347 | #define SBP2_INQUIRY_DELAY 12 | ||
346 | #define SBP2_WORKAROUND_OVERRIDE 0x100 | 348 | #define SBP2_WORKAROUND_OVERRIDE 0x100 |
347 | 349 | ||
348 | #endif /* SBP2_H */ | 350 | #endif /* SBP2_H */ |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c index 73bfd1656f86..b8797c66676d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_mem.c +++ b/drivers/infiniband/hw/cxgb3/iwch_mem.c | |||
@@ -136,14 +136,8 @@ int build_phys_page_list(struct ib_phys_buf *buffer_list, | |||
136 | 136 | ||
137 | /* Find largest page shift we can use to cover buffers */ | 137 | /* Find largest page shift we can use to cover buffers */ |
138 | for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift)) | 138 | for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift)) |
139 | if (num_phys_buf > 1) { | 139 | if ((1ULL << *shift) & mask) |
140 | if ((1ULL << *shift) & mask) | 140 | break; |
141 | break; | ||
142 | } else | ||
143 | if (1ULL << *shift >= | ||
144 | buffer_list[0].size + | ||
145 | (buffer_list[0].addr & ((1ULL << *shift) - 1))) | ||
146 | break; | ||
147 | 141 | ||
148 | buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1); | 142 | buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1); |
149 | buffer_list[0].addr &= ~0ull << *shift; | 143 | buffer_list[0].addr &= ~0ull << *shift; |
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 7f8853b44ee1..b2112f5a422f 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c | |||
@@ -567,12 +567,12 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i | |||
567 | 567 | ||
568 | /* Init the adapter */ | 568 | /* Init the adapter */ |
569 | nesdev->nesadapter = nes_init_adapter(nesdev, hw_rev); | 569 | nesdev->nesadapter = nes_init_adapter(nesdev, hw_rev); |
570 | nesdev->nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval; | ||
571 | if (!nesdev->nesadapter) { | 570 | if (!nesdev->nesadapter) { |
572 | printk(KERN_ERR PFX "Unable to initialize adapter.\n"); | 571 | printk(KERN_ERR PFX "Unable to initialize adapter.\n"); |
573 | ret = -ENOMEM; | 572 | ret = -ENOMEM; |
574 | goto bail5; | 573 | goto bail5; |
575 | } | 574 | } |
575 | nesdev->nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval; | ||
576 | 576 | ||
577 | /* nesdev->base_doorbell_index = | 577 | /* nesdev->base_doorbell_index = |
578 | nesdev->nesadapter->pd_config_base[PCI_FUNC(nesdev->pcidev->devfn)]; */ | 578 | nesdev->nesadapter->pd_config_base[PCI_FUNC(nesdev->pcidev->devfn)]; */ |
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h index fd57e8a1582f..a48b288618ec 100644 --- a/drivers/infiniband/hw/nes/nes.h +++ b/drivers/infiniband/hw/nes/nes.h | |||
@@ -285,6 +285,21 @@ struct nes_device { | |||
285 | }; | 285 | }; |
286 | 286 | ||
287 | 287 | ||
288 | static inline __le32 get_crc_value(struct nes_v4_quad *nes_quad) | ||
289 | { | ||
290 | u32 crc_value; | ||
291 | crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad)); | ||
292 | |||
293 | /* | ||
294 | * With commit ef19454b ("[LIB] crc32c: Keep intermediate crc | ||
295 | * state in cpu order"), behavior of crc32c changes on | ||
296 | * big-endian platforms. Our algorithm expects the previous | ||
297 | * behavior; otherwise we have RDMA connection establishment | ||
298 | * issue on big-endian. | ||
299 | */ | ||
300 | return cpu_to_le32(crc_value); | ||
301 | } | ||
302 | |||
288 | static inline void | 303 | static inline void |
289 | set_wqe_64bit_value(__le32 *wqe_words, u32 index, u64 value) | 304 | set_wqe_64bit_value(__le32 *wqe_words, u32 index, u64 value) |
290 | { | 305 | { |
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index bd5cfeaac203..39adb267fb15 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
@@ -370,11 +370,11 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
370 | int ret = 0; | 370 | int ret = 0; |
371 | u32 was_timer_set; | 371 | u32 was_timer_set; |
372 | 372 | ||
373 | if (!cm_node) | ||
374 | return -EINVAL; | ||
373 | new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC); | 375 | new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC); |
374 | if (!new_send) | 376 | if (!new_send) |
375 | return -1; | 377 | return -1; |
376 | if (!cm_node) | ||
377 | return -EINVAL; | ||
378 | 378 | ||
379 | /* new_send->timetosend = currenttime */ | 379 | /* new_send->timetosend = currenttime */ |
380 | new_send->retrycount = NES_DEFAULT_RETRYS; | 380 | new_send->retrycount = NES_DEFAULT_RETRYS; |
@@ -947,6 +947,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, | |||
947 | nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener); | 947 | nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener); |
948 | 948 | ||
949 | kfree(listener); | 949 | kfree(listener); |
950 | listener = NULL; | ||
950 | ret = 0; | 951 | ret = 0; |
951 | cm_listens_destroyed++; | 952 | cm_listens_destroyed++; |
952 | } else { | 953 | } else { |
@@ -2319,6 +2320,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2319 | struct iw_cm_event cm_event; | 2320 | struct iw_cm_event cm_event; |
2320 | struct nes_hw_qp_wqe *wqe; | 2321 | struct nes_hw_qp_wqe *wqe; |
2321 | struct nes_v4_quad nes_quad; | 2322 | struct nes_v4_quad nes_quad; |
2323 | u32 crc_value; | ||
2322 | int ret; | 2324 | int ret; |
2323 | 2325 | ||
2324 | ibqp = nes_get_qp(cm_id->device, conn_param->qpn); | 2326 | ibqp = nes_get_qp(cm_id->device, conn_param->qpn); |
@@ -2435,8 +2437,8 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2435 | nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port; | 2437 | nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port; |
2436 | 2438 | ||
2437 | /* Produce hash key */ | 2439 | /* Produce hash key */ |
2438 | nesqp->hte_index = cpu_to_be32( | 2440 | crc_value = get_crc_value(&nes_quad); |
2439 | crc32c(~0, (void *)&nes_quad, sizeof(nes_quad)) ^ 0xffffffff); | 2441 | nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff); |
2440 | nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, CRC = 0x%08X\n", | 2442 | nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, CRC = 0x%08X\n", |
2441 | nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask); | 2443 | nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask); |
2442 | 2444 | ||
@@ -2750,6 +2752,7 @@ void cm_event_connected(struct nes_cm_event *event) | |||
2750 | struct iw_cm_event cm_event; | 2752 | struct iw_cm_event cm_event; |
2751 | struct nes_hw_qp_wqe *wqe; | 2753 | struct nes_hw_qp_wqe *wqe; |
2752 | struct nes_v4_quad nes_quad; | 2754 | struct nes_v4_quad nes_quad; |
2755 | u32 crc_value; | ||
2753 | int ret; | 2756 | int ret; |
2754 | 2757 | ||
2755 | /* get all our handles */ | 2758 | /* get all our handles */ |
@@ -2827,8 +2830,8 @@ void cm_event_connected(struct nes_cm_event *event) | |||
2827 | nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port; | 2830 | nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port; |
2828 | 2831 | ||
2829 | /* Produce hash key */ | 2832 | /* Produce hash key */ |
2830 | nesqp->hte_index = cpu_to_be32( | 2833 | crc_value = get_crc_value(&nes_quad); |
2831 | crc32c(~0, (void *)&nes_quad, sizeof(nes_quad)) ^ 0xffffffff); | 2834 | nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff); |
2832 | nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, After CRC = 0x%08X\n", | 2835 | nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, After CRC = 0x%08X\n", |
2833 | nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask); | 2836 | nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask); |
2834 | 2837 | ||
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 7c4c0fbf0abd..49e53e4c1ebe 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c | |||
@@ -156,15 +156,14 @@ static void nes_nic_tune_timer(struct nes_device *nesdev) | |||
156 | 156 | ||
157 | spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags); | 157 | spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags); |
158 | 158 | ||
159 | if (shared_timer->cq_count_old < cq_count) { | 159 | if (shared_timer->cq_count_old <= cq_count) |
160 | if (cq_count > shared_timer->threshold_low) | 160 | shared_timer->cq_direction_downward = 0; |
161 | shared_timer->cq_direction_downward=0; | 161 | else |
162 | } | ||
163 | if (shared_timer->cq_count_old >= cq_count) | ||
164 | shared_timer->cq_direction_downward++; | 162 | shared_timer->cq_direction_downward++; |
165 | shared_timer->cq_count_old = cq_count; | 163 | shared_timer->cq_count_old = cq_count; |
166 | if (shared_timer->cq_direction_downward > NES_NIC_CQ_DOWNWARD_TREND) { | 164 | if (shared_timer->cq_direction_downward > NES_NIC_CQ_DOWNWARD_TREND) { |
167 | if (cq_count <= shared_timer->threshold_low) { | 165 | if (cq_count <= shared_timer->threshold_low && |
166 | shared_timer->threshold_low > 4) { | ||
168 | shared_timer->threshold_low = shared_timer->threshold_low/2; | 167 | shared_timer->threshold_low = shared_timer->threshold_low/2; |
169 | shared_timer->cq_direction_downward=0; | 168 | shared_timer->cq_direction_downward=0; |
170 | nesdev->currcq_count = 0; | 169 | nesdev->currcq_count = 0; |
@@ -1728,7 +1727,6 @@ int nes_napi_isr(struct nes_device *nesdev) | |||
1728 | nesdev->int_req &= ~NES_INT_TIMER; | 1727 | nesdev->int_req &= ~NES_INT_TIMER; |
1729 | nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); | 1728 | nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); |
1730 | nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); | 1729 | nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); |
1731 | nesadapter->tune_timer.timer_in_use_old = 0; | ||
1732 | } | 1730 | } |
1733 | nesdev->deepcq_count = 0; | 1731 | nesdev->deepcq_count = 0; |
1734 | return 1; | 1732 | return 1; |
@@ -1867,7 +1865,6 @@ void nes_dpc(unsigned long param) | |||
1867 | nesdev->int_req &= ~NES_INT_TIMER; | 1865 | nesdev->int_req &= ~NES_INT_TIMER; |
1868 | nes_write32(nesdev->regs + NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); | 1866 | nes_write32(nesdev->regs + NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); |
1869 | nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); | 1867 | nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); |
1870 | nesdev->nesadapter->tune_timer.timer_in_use_old = 0; | ||
1871 | } else { | 1868 | } else { |
1872 | nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff|(~nesdev->int_req)); | 1869 | nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff|(~nesdev->int_req)); |
1873 | } | 1870 | } |
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h index 1e10df550c9e..b7e2844f096b 100644 --- a/drivers/infiniband/hw/nes/nes_hw.h +++ b/drivers/infiniband/hw/nes/nes_hw.h | |||
@@ -962,7 +962,7 @@ struct nes_arp_entry { | |||
962 | #define DEFAULT_JUMBO_NES_QL_LOW 12 | 962 | #define DEFAULT_JUMBO_NES_QL_LOW 12 |
963 | #define DEFAULT_JUMBO_NES_QL_TARGET 40 | 963 | #define DEFAULT_JUMBO_NES_QL_TARGET 40 |
964 | #define DEFAULT_JUMBO_NES_QL_HIGH 128 | 964 | #define DEFAULT_JUMBO_NES_QL_HIGH 128 |
965 | #define NES_NIC_CQ_DOWNWARD_TREND 8 | 965 | #define NES_NIC_CQ_DOWNWARD_TREND 16 |
966 | 966 | ||
967 | struct nes_hw_tune_timer { | 967 | struct nes_hw_tune_timer { |
968 | //u16 cq_count; | 968 | //u16 cq_count; |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 4dafbe16e82a..a651e9d9f0ef 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
@@ -929,7 +929,7 @@ static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev, | |||
929 | NES_MAX_USER_DB_REGIONS, nesucontext->first_free_db); | 929 | NES_MAX_USER_DB_REGIONS, nesucontext->first_free_db); |
930 | nes_debug(NES_DBG_PD, "find_first_zero_biton doorbells returned %u, mapping pd_id %u.\n", | 930 | nes_debug(NES_DBG_PD, "find_first_zero_biton doorbells returned %u, mapping pd_id %u.\n", |
931 | nespd->mmap_db_index, nespd->pd_id); | 931 | nespd->mmap_db_index, nespd->pd_id); |
932 | if (nespd->mmap_db_index > NES_MAX_USER_DB_REGIONS) { | 932 | if (nespd->mmap_db_index >= NES_MAX_USER_DB_REGIONS) { |
933 | nes_debug(NES_DBG_PD, "mmap_db_index > MAX\n"); | 933 | nes_debug(NES_DBG_PD, "mmap_db_index > MAX\n"); |
934 | nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num); | 934 | nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num); |
935 | kfree(nespd); | 935 | kfree(nespd); |
@@ -1327,7 +1327,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd, | |||
1327 | (long long unsigned int)req.user_wqe_buffers); | 1327 | (long long unsigned int)req.user_wqe_buffers); |
1328 | nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num); | 1328 | nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num); |
1329 | kfree(nesqp->allocated_buffer); | 1329 | kfree(nesqp->allocated_buffer); |
1330 | return ERR_PTR(-ENOMEM); | 1330 | return ERR_PTR(-EFAULT); |
1331 | } | 1331 | } |
1332 | } | 1332 | } |
1333 | 1333 | ||
@@ -1674,6 +1674,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, | |||
1674 | } | 1674 | } |
1675 | nes_debug(NES_DBG_CQ, "CQ Virtual Address = %08lX, size = %u.\n", | 1675 | nes_debug(NES_DBG_CQ, "CQ Virtual Address = %08lX, size = %u.\n", |
1676 | (unsigned long)req.user_cq_buffer, entries); | 1676 | (unsigned long)req.user_cq_buffer, entries); |
1677 | err = 1; | ||
1677 | list_for_each_entry(nespbl, &nes_ucontext->cq_reg_mem_list, list) { | 1678 | list_for_each_entry(nespbl, &nes_ucontext->cq_reg_mem_list, list) { |
1678 | if (nespbl->user_base == (unsigned long )req.user_cq_buffer) { | 1679 | if (nespbl->user_base == (unsigned long )req.user_cq_buffer) { |
1679 | list_del(&nespbl->list); | 1680 | list_del(&nespbl->list); |
@@ -1686,7 +1687,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, | |||
1686 | if (err) { | 1687 | if (err) { |
1687 | nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); | 1688 | nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); |
1688 | kfree(nescq); | 1689 | kfree(nescq); |
1689 | return ERR_PTR(err); | 1690 | return ERR_PTR(-EFAULT); |
1690 | } | 1691 | } |
1691 | 1692 | ||
1692 | pbl_entries = nespbl->pbl_size >> 3; | 1693 | pbl_entries = nespbl->pbl_size >> 3; |
@@ -1831,9 +1832,6 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, | |||
1831 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | 1832 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); |
1832 | } | 1833 | } |
1833 | } | 1834 | } |
1834 | nes_debug(NES_DBG_CQ, "iWARP CQ%u create timeout expired, major code = 0x%04X," | ||
1835 | " minor code = 0x%04X\n", | ||
1836 | nescq->hw_cq.cq_number, cqp_request->major_code, cqp_request->minor_code); | ||
1837 | if (!context) | 1835 | if (!context) |
1838 | pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, | 1836 | pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, |
1839 | nescq->hw_cq.cq_pbase); | 1837 | nescq->hw_cq.cq_pbase); |
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 8b10d9f23bef..c5263d63aca3 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig | |||
@@ -42,14 +42,14 @@ config INPUT_M68K_BEEP | |||
42 | 42 | ||
43 | config INPUT_APANEL | 43 | config INPUT_APANEL |
44 | tristate "Fujitsu Lifebook Application Panel buttons" | 44 | tristate "Fujitsu Lifebook Application Panel buttons" |
45 | depends on X86 | 45 | depends on X86 && I2C && LEDS_CLASS |
46 | select I2C_I801 | ||
47 | select INPUT_POLLDEV | 46 | select INPUT_POLLDEV |
48 | select CHECK_SIGNATURE | 47 | select CHECK_SIGNATURE |
49 | help | 48 | help |
50 | Say Y here for support of the Application Panel buttons, used on | 49 | Say Y here for support of the Application Panel buttons, used on |
51 | Fujitsu Lifebook. These are attached to the mainboard through | 50 | Fujitsu Lifebook. These are attached to the mainboard through |
52 | an SMBus interface managed by the I2C Intel ICH (i801) driver. | 51 | an SMBus interface managed by the I2C Intel ICH (i801) driver, |
52 | which you should also build for this kernel. | ||
53 | 53 | ||
54 | To compile this driver as a module, choose M here: the module will | 54 | To compile this driver as a module, choose M here: the module will |
55 | be called apanel. | 55 | be called apanel. |
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c index 7993e01f9fc5..76043dedba5b 100644 --- a/drivers/isdn/hisax/hisax_fcpcipnp.c +++ b/drivers/isdn/hisax/hisax_fcpcipnp.c | |||
@@ -725,23 +725,6 @@ static int __devinit fcpcipnp_setup(struct fritz_adapter *adapter) | |||
725 | 725 | ||
726 | switch (adapter->type) { | 726 | switch (adapter->type) { |
727 | case AVM_FRITZ_PCIV2: | 727 | case AVM_FRITZ_PCIV2: |
728 | retval = request_irq(adapter->irq, fcpci2_irq, IRQF_SHARED, | ||
729 | "fcpcipnp", adapter); | ||
730 | break; | ||
731 | case AVM_FRITZ_PCI: | ||
732 | retval = request_irq(adapter->irq, fcpci_irq, IRQF_SHARED, | ||
733 | "fcpcipnp", adapter); | ||
734 | break; | ||
735 | case AVM_FRITZ_PNP: | ||
736 | retval = request_irq(adapter->irq, fcpci_irq, 0, | ||
737 | "fcpcipnp", adapter); | ||
738 | break; | ||
739 | } | ||
740 | if (retval) | ||
741 | goto err_region; | ||
742 | |||
743 | switch (adapter->type) { | ||
744 | case AVM_FRITZ_PCIV2: | ||
745 | case AVM_FRITZ_PCI: | 728 | case AVM_FRITZ_PCI: |
746 | val = inl(adapter->io); | 729 | val = inl(adapter->io); |
747 | break; | 730 | break; |
@@ -796,6 +779,23 @@ static int __devinit fcpcipnp_setup(struct fritz_adapter *adapter) | |||
796 | 779 | ||
797 | switch (adapter->type) { | 780 | switch (adapter->type) { |
798 | case AVM_FRITZ_PCIV2: | 781 | case AVM_FRITZ_PCIV2: |
782 | retval = request_irq(adapter->irq, fcpci2_irq, IRQF_SHARED, | ||
783 | "fcpcipnp", adapter); | ||
784 | break; | ||
785 | case AVM_FRITZ_PCI: | ||
786 | retval = request_irq(adapter->irq, fcpci_irq, IRQF_SHARED, | ||
787 | "fcpcipnp", adapter); | ||
788 | break; | ||
789 | case AVM_FRITZ_PNP: | ||
790 | retval = request_irq(adapter->irq, fcpci_irq, 0, | ||
791 | "fcpcipnp", adapter); | ||
792 | break; | ||
793 | } | ||
794 | if (retval) | ||
795 | goto err_region; | ||
796 | |||
797 | switch (adapter->type) { | ||
798 | case AVM_FRITZ_PCIV2: | ||
799 | fcpci2_init(adapter); | 799 | fcpci2_init(adapter); |
800 | isacsx_setup(&adapter->isac); | 800 | isacsx_setup(&adapter->isac); |
801 | break; | 801 | break; |
diff --git a/drivers/isdn/i4l/isdn_ttyfax.c b/drivers/isdn/i4l/isdn_ttyfax.c index f93de4a30355..78f7660c1d0e 100644 --- a/drivers/isdn/i4l/isdn_ttyfax.c +++ b/drivers/isdn/i4l/isdn_ttyfax.c | |||
@@ -906,7 +906,8 @@ isdn_tty_cmd_FCLASS2(char **p, modem_info * info) | |||
906 | sprintf(rs, "\r\n0-2"); | 906 | sprintf(rs, "\r\n0-2"); |
907 | isdn_tty_at_cout(rs, info); | 907 | isdn_tty_at_cout(rs, info); |
908 | } else { | 908 | } else { |
909 | if ((f->phase != ISDN_FAX_PHASE_D) || (!info->faxonline & 1)) | 909 | if ((f->phase != ISDN_FAX_PHASE_D) || |
910 | (!(info->faxonline & 1))) | ||
910 | PARSE_ERROR1; | 911 | PARSE_ERROR1; |
911 | par = isdn_getnum(p); | 912 | par = isdn_getnum(p); |
912 | if ((par < 0) || (par > 2)) | 913 | if ((par < 0) || (par > 2)) |
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c index 655ef9a3f4df..a335c85a736e 100644 --- a/drivers/isdn/isdnloop/isdnloop.c +++ b/drivers/isdn/isdnloop/isdnloop.c | |||
@@ -1289,7 +1289,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card) | |||
1289 | } | 1289 | } |
1290 | break; | 1290 | break; |
1291 | case ISDN_CMD_CLREAZ: | 1291 | case ISDN_CMD_CLREAZ: |
1292 | if (!card->flags & ISDNLOOP_FLAGS_RUNNING) | 1292 | if (!(card->flags & ISDNLOOP_FLAGS_RUNNING)) |
1293 | return -ENODEV; | 1293 | return -ENODEV; |
1294 | if (card->leased) | 1294 | if (card->leased) |
1295 | break; | 1295 | break; |
@@ -1333,7 +1333,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card) | |||
1333 | } | 1333 | } |
1334 | break; | 1334 | break; |
1335 | case ISDN_CMD_SETL3: | 1335 | case ISDN_CMD_SETL3: |
1336 | if (!card->flags & ISDNLOOP_FLAGS_RUNNING) | 1336 | if (!(card->flags & ISDNLOOP_FLAGS_RUNNING)) |
1337 | return -ENODEV; | 1337 | return -ENODEV; |
1338 | return 0; | 1338 | return 0; |
1339 | default: | 1339 | default: |
@@ -1380,7 +1380,7 @@ if_writecmd(const u_char __user *buf, int len, int id, int channel) | |||
1380 | isdnloop_card *card = isdnloop_findcard(id); | 1380 | isdnloop_card *card = isdnloop_findcard(id); |
1381 | 1381 | ||
1382 | if (card) { | 1382 | if (card) { |
1383 | if (!card->flags & ISDNLOOP_FLAGS_RUNNING) | 1383 | if (!(card->flags & ISDNLOOP_FLAGS_RUNNING)) |
1384 | return -ENODEV; | 1384 | return -ENODEV; |
1385 | return (isdnloop_writecmd(buf, len, 1, card)); | 1385 | return (isdnloop_writecmd(buf, len, 1, card)); |
1386 | } | 1386 | } |
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 7aeceedcf7d4..831aed9c56ff 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -1047,6 +1047,11 @@ void bitmap_daemon_work(struct bitmap *bitmap) | |||
1047 | if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ)) | 1047 | if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ)) |
1048 | return; | 1048 | return; |
1049 | bitmap->daemon_lastrun = jiffies; | 1049 | bitmap->daemon_lastrun = jiffies; |
1050 | if (bitmap->allclean) { | ||
1051 | bitmap->mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; | ||
1052 | return; | ||
1053 | } | ||
1054 | bitmap->allclean = 1; | ||
1050 | 1055 | ||
1051 | for (j = 0; j < bitmap->chunks; j++) { | 1056 | for (j = 0; j < bitmap->chunks; j++) { |
1052 | bitmap_counter_t *bmc; | 1057 | bitmap_counter_t *bmc; |
@@ -1068,8 +1073,10 @@ void bitmap_daemon_work(struct bitmap *bitmap) | |||
1068 | clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); | 1073 | clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); |
1069 | 1074 | ||
1070 | spin_unlock_irqrestore(&bitmap->lock, flags); | 1075 | spin_unlock_irqrestore(&bitmap->lock, flags); |
1071 | if (need_write) | 1076 | if (need_write) { |
1072 | write_page(bitmap, page, 0); | 1077 | write_page(bitmap, page, 0); |
1078 | bitmap->allclean = 0; | ||
1079 | } | ||
1073 | continue; | 1080 | continue; |
1074 | } | 1081 | } |
1075 | 1082 | ||
@@ -1098,6 +1105,9 @@ void bitmap_daemon_work(struct bitmap *bitmap) | |||
1098 | /* | 1105 | /* |
1099 | if (j < 100) printk("bitmap: j=%lu, *bmc = 0x%x\n", j, *bmc); | 1106 | if (j < 100) printk("bitmap: j=%lu, *bmc = 0x%x\n", j, *bmc); |
1100 | */ | 1107 | */ |
1108 | if (*bmc) | ||
1109 | bitmap->allclean = 0; | ||
1110 | |||
1101 | if (*bmc == 2) { | 1111 | if (*bmc == 2) { |
1102 | *bmc=1; /* maybe clear the bit next time */ | 1112 | *bmc=1; /* maybe clear the bit next time */ |
1103 | set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); | 1113 | set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); |
@@ -1132,6 +1142,8 @@ void bitmap_daemon_work(struct bitmap *bitmap) | |||
1132 | } | 1142 | } |
1133 | } | 1143 | } |
1134 | 1144 | ||
1145 | if (bitmap->allclean == 0) | ||
1146 | bitmap->mddev->thread->timeout = bitmap->daemon_sleep * HZ; | ||
1135 | } | 1147 | } |
1136 | 1148 | ||
1137 | static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, | 1149 | static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, |
@@ -1226,6 +1238,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect | |||
1226 | sectors -= blocks; | 1238 | sectors -= blocks; |
1227 | else sectors = 0; | 1239 | else sectors = 0; |
1228 | } | 1240 | } |
1241 | bitmap->allclean = 0; | ||
1229 | return 0; | 1242 | return 0; |
1230 | } | 1243 | } |
1231 | 1244 | ||
@@ -1296,6 +1309,7 @@ int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, | |||
1296 | } | 1309 | } |
1297 | } | 1310 | } |
1298 | spin_unlock_irq(&bitmap->lock); | 1311 | spin_unlock_irq(&bitmap->lock); |
1312 | bitmap->allclean = 0; | ||
1299 | return rv; | 1313 | return rv; |
1300 | } | 1314 | } |
1301 | 1315 | ||
@@ -1332,6 +1346,7 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int ab | |||
1332 | } | 1346 | } |
1333 | unlock: | 1347 | unlock: |
1334 | spin_unlock_irqrestore(&bitmap->lock, flags); | 1348 | spin_unlock_irqrestore(&bitmap->lock, flags); |
1349 | bitmap->allclean = 0; | ||
1335 | } | 1350 | } |
1336 | 1351 | ||
1337 | void bitmap_close_sync(struct bitmap *bitmap) | 1352 | void bitmap_close_sync(struct bitmap *bitmap) |
@@ -1399,7 +1414,7 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n | |||
1399 | set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); | 1414 | set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); |
1400 | } | 1415 | } |
1401 | spin_unlock_irq(&bitmap->lock); | 1416 | spin_unlock_irq(&bitmap->lock); |
1402 | 1417 | bitmap->allclean = 0; | |
1403 | } | 1418 | } |
1404 | 1419 | ||
1405 | /* dirty the memory and file bits for bitmap chunks "s" to "e" */ | 1420 | /* dirty the memory and file bits for bitmap chunks "s" to "e" */ |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 7da6ec244e15..827824a9f3e9 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -1105,7 +1105,11 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) | |||
1105 | rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; | 1105 | rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; |
1106 | bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; | 1106 | bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; |
1107 | if (rdev->sb_size & bmask) | 1107 | if (rdev->sb_size & bmask) |
1108 | rdev-> sb_size = (rdev->sb_size | bmask)+1; | 1108 | rdev->sb_size = (rdev->sb_size | bmask) + 1; |
1109 | |||
1110 | if (minor_version | ||
1111 | && rdev->data_offset < sb_offset + (rdev->sb_size/512)) | ||
1112 | return -EINVAL; | ||
1109 | 1113 | ||
1110 | if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) | 1114 | if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) |
1111 | rdev->desc_nr = -1; | 1115 | rdev->desc_nr = -1; |
@@ -1137,7 +1141,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) | |||
1137 | else | 1141 | else |
1138 | ret = 0; | 1142 | ret = 0; |
1139 | } | 1143 | } |
1140 | if (minor_version) | 1144 | if (minor_version) |
1141 | rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2; | 1145 | rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2; |
1142 | else | 1146 | else |
1143 | rdev->size = rdev->sb_offset; | 1147 | rdev->size = rdev->sb_offset; |
@@ -1499,7 +1503,8 @@ static void export_rdev(mdk_rdev_t * rdev) | |||
1499 | free_disk_sb(rdev); | 1503 | free_disk_sb(rdev); |
1500 | list_del_init(&rdev->same_set); | 1504 | list_del_init(&rdev->same_set); |
1501 | #ifndef MODULE | 1505 | #ifndef MODULE |
1502 | md_autodetect_dev(rdev->bdev->bd_dev); | 1506 | if (test_bit(AutoDetected, &rdev->flags)) |
1507 | md_autodetect_dev(rdev->bdev->bd_dev); | ||
1503 | #endif | 1508 | #endif |
1504 | unlock_rdev(rdev); | 1509 | unlock_rdev(rdev); |
1505 | kobject_put(&rdev->kobj); | 1510 | kobject_put(&rdev->kobj); |
@@ -1996,9 +2001,11 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
1996 | char *e; | 2001 | char *e; |
1997 | unsigned long long size = simple_strtoull(buf, &e, 10); | 2002 | unsigned long long size = simple_strtoull(buf, &e, 10); |
1998 | unsigned long long oldsize = rdev->size; | 2003 | unsigned long long oldsize = rdev->size; |
2004 | mddev_t *my_mddev = rdev->mddev; | ||
2005 | |||
1999 | if (e==buf || (*e && *e != '\n')) | 2006 | if (e==buf || (*e && *e != '\n')) |
2000 | return -EINVAL; | 2007 | return -EINVAL; |
2001 | if (rdev->mddev->pers) | 2008 | if (my_mddev->pers) |
2002 | return -EBUSY; | 2009 | return -EBUSY; |
2003 | rdev->size = size; | 2010 | rdev->size = size; |
2004 | if (size > oldsize && rdev->mddev->external) { | 2011 | if (size > oldsize && rdev->mddev->external) { |
@@ -2011,7 +2018,7 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
2011 | int overlap = 0; | 2018 | int overlap = 0; |
2012 | struct list_head *tmp, *tmp2; | 2019 | struct list_head *tmp, *tmp2; |
2013 | 2020 | ||
2014 | mddev_unlock(rdev->mddev); | 2021 | mddev_unlock(my_mddev); |
2015 | for_each_mddev(mddev, tmp) { | 2022 | for_each_mddev(mddev, tmp) { |
2016 | mdk_rdev_t *rdev2; | 2023 | mdk_rdev_t *rdev2; |
2017 | 2024 | ||
@@ -2031,7 +2038,7 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
2031 | break; | 2038 | break; |
2032 | } | 2039 | } |
2033 | } | 2040 | } |
2034 | mddev_lock(rdev->mddev); | 2041 | mddev_lock(my_mddev); |
2035 | if (overlap) { | 2042 | if (overlap) { |
2036 | /* Someone else could have slipped in a size | 2043 | /* Someone else could have slipped in a size |
2037 | * change here, but doing so is just silly. | 2044 | * change here, but doing so is just silly. |
@@ -2043,8 +2050,8 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
2043 | return -EBUSY; | 2050 | return -EBUSY; |
2044 | } | 2051 | } |
2045 | } | 2052 | } |
2046 | if (size < rdev->mddev->size || rdev->mddev->size == 0) | 2053 | if (size < my_mddev->size || my_mddev->size == 0) |
2047 | rdev->mddev->size = size; | 2054 | my_mddev->size = size; |
2048 | return len; | 2055 | return len; |
2049 | } | 2056 | } |
2050 | 2057 | ||
@@ -2065,10 +2072,21 @@ rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |||
2065 | { | 2072 | { |
2066 | struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); | 2073 | struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); |
2067 | mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); | 2074 | mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); |
2075 | mddev_t *mddev = rdev->mddev; | ||
2076 | ssize_t rv; | ||
2068 | 2077 | ||
2069 | if (!entry->show) | 2078 | if (!entry->show) |
2070 | return -EIO; | 2079 | return -EIO; |
2071 | return entry->show(rdev, page); | 2080 | |
2081 | rv = mddev ? mddev_lock(mddev) : -EBUSY; | ||
2082 | if (!rv) { | ||
2083 | if (rdev->mddev == NULL) | ||
2084 | rv = -EBUSY; | ||
2085 | else | ||
2086 | rv = entry->show(rdev, page); | ||
2087 | mddev_unlock(mddev); | ||
2088 | } | ||
2089 | return rv; | ||
2072 | } | 2090 | } |
2073 | 2091 | ||
2074 | static ssize_t | 2092 | static ssize_t |
@@ -2077,15 +2095,19 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr, | |||
2077 | { | 2095 | { |
2078 | struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); | 2096 | struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); |
2079 | mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); | 2097 | mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); |
2080 | int rv; | 2098 | ssize_t rv; |
2099 | mddev_t *mddev = rdev->mddev; | ||
2081 | 2100 | ||
2082 | if (!entry->store) | 2101 | if (!entry->store) |
2083 | return -EIO; | 2102 | return -EIO; |
2084 | if (!capable(CAP_SYS_ADMIN)) | 2103 | if (!capable(CAP_SYS_ADMIN)) |
2085 | return -EACCES; | 2104 | return -EACCES; |
2086 | rv = mddev_lock(rdev->mddev); | 2105 | rv = mddev ? mddev_lock(mddev): -EBUSY; |
2087 | if (!rv) { | 2106 | if (!rv) { |
2088 | rv = entry->store(rdev, page, length); | 2107 | if (rdev->mddev == NULL) |
2108 | rv = -EBUSY; | ||
2109 | else | ||
2110 | rv = entry->store(rdev, page, length); | ||
2089 | mddev_unlock(rdev->mddev); | 2111 | mddev_unlock(rdev->mddev); |
2090 | } | 2112 | } |
2091 | return rv; | 2113 | return rv; |
@@ -5351,6 +5373,7 @@ void md_write_start(mddev_t *mddev, struct bio *bi) | |||
5351 | mddev->ro = 0; | 5373 | mddev->ro = 0; |
5352 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | 5374 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
5353 | md_wakeup_thread(mddev->thread); | 5375 | md_wakeup_thread(mddev->thread); |
5376 | md_wakeup_thread(mddev->sync_thread); | ||
5354 | } | 5377 | } |
5355 | atomic_inc(&mddev->writes_pending); | 5378 | atomic_inc(&mddev->writes_pending); |
5356 | if (mddev->in_sync) { | 5379 | if (mddev->in_sync) { |
@@ -6021,6 +6044,7 @@ static void autostart_arrays(int part) | |||
6021 | MD_BUG(); | 6044 | MD_BUG(); |
6022 | continue; | 6045 | continue; |
6023 | } | 6046 | } |
6047 | set_bit(AutoDetected, &rdev->flags); | ||
6024 | list_add(&rdev->same_set, &pending_raid_disks); | 6048 | list_add(&rdev->same_set, &pending_raid_disks); |
6025 | i_passed++; | 6049 | i_passed++; |
6026 | } | 6050 | } |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 5c7fef091cec..ff61b309129a 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -592,6 +592,37 @@ static int raid1_congested(void *data, int bits) | |||
592 | } | 592 | } |
593 | 593 | ||
594 | 594 | ||
595 | static int flush_pending_writes(conf_t *conf) | ||
596 | { | ||
597 | /* Any writes that have been queued but are awaiting | ||
598 | * bitmap updates get flushed here. | ||
599 | * We return 1 if any requests were actually submitted. | ||
600 | */ | ||
601 | int rv = 0; | ||
602 | |||
603 | spin_lock_irq(&conf->device_lock); | ||
604 | |||
605 | if (conf->pending_bio_list.head) { | ||
606 | struct bio *bio; | ||
607 | bio = bio_list_get(&conf->pending_bio_list); | ||
608 | blk_remove_plug(conf->mddev->queue); | ||
609 | spin_unlock_irq(&conf->device_lock); | ||
610 | /* flush any pending bitmap writes to | ||
611 | * disk before proceeding w/ I/O */ | ||
612 | bitmap_unplug(conf->mddev->bitmap); | ||
613 | |||
614 | while (bio) { /* submit pending writes */ | ||
615 | struct bio *next = bio->bi_next; | ||
616 | bio->bi_next = NULL; | ||
617 | generic_make_request(bio); | ||
618 | bio = next; | ||
619 | } | ||
620 | rv = 1; | ||
621 | } else | ||
622 | spin_unlock_irq(&conf->device_lock); | ||
623 | return rv; | ||
624 | } | ||
625 | |||
595 | /* Barriers.... | 626 | /* Barriers.... |
596 | * Sometimes we need to suspend IO while we do something else, | 627 | * Sometimes we need to suspend IO while we do something else, |
597 | * either some resync/recovery, or reconfigure the array. | 628 | * either some resync/recovery, or reconfigure the array. |
@@ -673,15 +704,23 @@ static void freeze_array(conf_t *conf) | |||
673 | /* stop syncio and normal IO and wait for everything to | 704 | /* stop syncio and normal IO and wait for everything to |
674 | * go quite. | 705 | * go quite. |
675 | * We increment barrier and nr_waiting, and then | 706 | * We increment barrier and nr_waiting, and then |
676 | * wait until barrier+nr_pending match nr_queued+2 | 707 | * wait until nr_pending match nr_queued+1 |
708 | * This is called in the context of one normal IO request | ||
709 | * that has failed. Thus any sync request that might be pending | ||
710 | * will be blocked by nr_pending, and we need to wait for | ||
711 | * pending IO requests to complete or be queued for re-try. | ||
712 | * Thus the number queued (nr_queued) plus this request (1) | ||
713 | * must match the number of pending IOs (nr_pending) before | ||
714 | * we continue. | ||
677 | */ | 715 | */ |
678 | spin_lock_irq(&conf->resync_lock); | 716 | spin_lock_irq(&conf->resync_lock); |
679 | conf->barrier++; | 717 | conf->barrier++; |
680 | conf->nr_waiting++; | 718 | conf->nr_waiting++; |
681 | wait_event_lock_irq(conf->wait_barrier, | 719 | wait_event_lock_irq(conf->wait_barrier, |
682 | conf->barrier+conf->nr_pending == conf->nr_queued+2, | 720 | conf->nr_pending == conf->nr_queued+1, |
683 | conf->resync_lock, | 721 | conf->resync_lock, |
684 | raid1_unplug(conf->mddev->queue)); | 722 | ({ flush_pending_writes(conf); |
723 | raid1_unplug(conf->mddev->queue); })); | ||
685 | spin_unlock_irq(&conf->resync_lock); | 724 | spin_unlock_irq(&conf->resync_lock); |
686 | } | 725 | } |
687 | static void unfreeze_array(conf_t *conf) | 726 | static void unfreeze_array(conf_t *conf) |
@@ -907,6 +946,9 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
907 | blk_plug_device(mddev->queue); | 946 | blk_plug_device(mddev->queue); |
908 | spin_unlock_irqrestore(&conf->device_lock, flags); | 947 | spin_unlock_irqrestore(&conf->device_lock, flags); |
909 | 948 | ||
949 | /* In case raid1d snuck into freeze_array */ | ||
950 | wake_up(&conf->wait_barrier); | ||
951 | |||
910 | if (do_sync) | 952 | if (do_sync) |
911 | md_wakeup_thread(mddev->thread); | 953 | md_wakeup_thread(mddev->thread); |
912 | #if 0 | 954 | #if 0 |
@@ -1473,28 +1515,14 @@ static void raid1d(mddev_t *mddev) | |||
1473 | 1515 | ||
1474 | for (;;) { | 1516 | for (;;) { |
1475 | char b[BDEVNAME_SIZE]; | 1517 | char b[BDEVNAME_SIZE]; |
1476 | spin_lock_irqsave(&conf->device_lock, flags); | ||
1477 | |||
1478 | if (conf->pending_bio_list.head) { | ||
1479 | bio = bio_list_get(&conf->pending_bio_list); | ||
1480 | blk_remove_plug(mddev->queue); | ||
1481 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1482 | /* flush any pending bitmap writes to disk before proceeding w/ I/O */ | ||
1483 | bitmap_unplug(mddev->bitmap); | ||
1484 | 1518 | ||
1485 | while (bio) { /* submit pending writes */ | 1519 | unplug += flush_pending_writes(conf); |
1486 | struct bio *next = bio->bi_next; | ||
1487 | bio->bi_next = NULL; | ||
1488 | generic_make_request(bio); | ||
1489 | bio = next; | ||
1490 | } | ||
1491 | unplug = 1; | ||
1492 | 1520 | ||
1493 | continue; | 1521 | spin_lock_irqsave(&conf->device_lock, flags); |
1494 | } | 1522 | if (list_empty(head)) { |
1495 | 1523 | spin_unlock_irqrestore(&conf->device_lock, flags); | |
1496 | if (list_empty(head)) | ||
1497 | break; | 1524 | break; |
1525 | } | ||
1498 | r1_bio = list_entry(head->prev, r1bio_t, retry_list); | 1526 | r1_bio = list_entry(head->prev, r1bio_t, retry_list); |
1499 | list_del(head->prev); | 1527 | list_del(head->prev); |
1500 | conf->nr_queued--; | 1528 | conf->nr_queued--; |
@@ -1590,7 +1618,6 @@ static void raid1d(mddev_t *mddev) | |||
1590 | } | 1618 | } |
1591 | } | 1619 | } |
1592 | } | 1620 | } |
1593 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1594 | if (unplug) | 1621 | if (unplug) |
1595 | unplug_slaves(mddev); | 1622 | unplug_slaves(mddev); |
1596 | } | 1623 | } |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 017f58113c33..32389d2f18fc 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -537,7 +537,8 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) | |||
537 | current_distance = abs(r10_bio->devs[slot].addr - | 537 | current_distance = abs(r10_bio->devs[slot].addr - |
538 | conf->mirrors[disk].head_position); | 538 | conf->mirrors[disk].head_position); |
539 | 539 | ||
540 | /* Find the disk whose head is closest */ | 540 | /* Find the disk whose head is closest, |
541 | * or - for far > 1 - find the closest to partition beginning */ | ||
541 | 542 | ||
542 | for (nslot = slot; nslot < conf->copies; nslot++) { | 543 | for (nslot = slot; nslot < conf->copies; nslot++) { |
543 | int ndisk = r10_bio->devs[nslot].devnum; | 544 | int ndisk = r10_bio->devs[nslot].devnum; |
@@ -557,8 +558,13 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) | |||
557 | slot = nslot; | 558 | slot = nslot; |
558 | break; | 559 | break; |
559 | } | 560 | } |
560 | new_distance = abs(r10_bio->devs[nslot].addr - | 561 | |
561 | conf->mirrors[ndisk].head_position); | 562 | /* for far > 1 always use the lowest address */ |
563 | if (conf->far_copies > 1) | ||
564 | new_distance = r10_bio->devs[nslot].addr; | ||
565 | else | ||
566 | new_distance = abs(r10_bio->devs[nslot].addr - | ||
567 | conf->mirrors[ndisk].head_position); | ||
562 | if (new_distance < current_distance) { | 568 | if (new_distance < current_distance) { |
563 | current_distance = new_distance; | 569 | current_distance = new_distance; |
564 | disk = ndisk; | 570 | disk = ndisk; |
@@ -629,7 +635,36 @@ static int raid10_congested(void *data, int bits) | |||
629 | return ret; | 635 | return ret; |
630 | } | 636 | } |
631 | 637 | ||
632 | 638 | static int flush_pending_writes(conf_t *conf) | |
639 | { | ||
640 | /* Any writes that have been queued but are awaiting | ||
641 | * bitmap updates get flushed here. | ||
642 | * We return 1 if any requests were actually submitted. | ||
643 | */ | ||
644 | int rv = 0; | ||
645 | |||
646 | spin_lock_irq(&conf->device_lock); | ||
647 | |||
648 | if (conf->pending_bio_list.head) { | ||
649 | struct bio *bio; | ||
650 | bio = bio_list_get(&conf->pending_bio_list); | ||
651 | blk_remove_plug(conf->mddev->queue); | ||
652 | spin_unlock_irq(&conf->device_lock); | ||
653 | /* flush any pending bitmap writes to disk | ||
654 | * before proceeding w/ I/O */ | ||
655 | bitmap_unplug(conf->mddev->bitmap); | ||
656 | |||
657 | while (bio) { /* submit pending writes */ | ||
658 | struct bio *next = bio->bi_next; | ||
659 | bio->bi_next = NULL; | ||
660 | generic_make_request(bio); | ||
661 | bio = next; | ||
662 | } | ||
663 | rv = 1; | ||
664 | } else | ||
665 | spin_unlock_irq(&conf->device_lock); | ||
666 | return rv; | ||
667 | } | ||
633 | /* Barriers.... | 668 | /* Barriers.... |
634 | * Sometimes we need to suspend IO while we do something else, | 669 | * Sometimes we need to suspend IO while we do something else, |
635 | * either some resync/recovery, or reconfigure the array. | 670 | * either some resync/recovery, or reconfigure the array. |
@@ -712,15 +747,23 @@ static void freeze_array(conf_t *conf) | |||
712 | /* stop syncio and normal IO and wait for everything to | 747 | /* stop syncio and normal IO and wait for everything to |
713 | * go quiet. | 748 | * go quiet. |
714 | * We increment barrier and nr_waiting, and then | 749 | * We increment barrier and nr_waiting, and then |
715 | * wait until barrier+nr_pending match nr_queued+2 | 750 | * wait until nr_pending match nr_queued+1 |
751 | * This is called in the context of one normal IO request | ||
752 | * that has failed. Thus any sync request that might be pending | ||
753 | * will be blocked by nr_pending, and we need to wait for | ||
754 | * pending IO requests to complete or be queued for re-try. | ||
755 | * Thus the number queued (nr_queued) plus this request (1) | ||
756 | * must match the number of pending IOs (nr_pending) before | ||
757 | * we continue. | ||
716 | */ | 758 | */ |
717 | spin_lock_irq(&conf->resync_lock); | 759 | spin_lock_irq(&conf->resync_lock); |
718 | conf->barrier++; | 760 | conf->barrier++; |
719 | conf->nr_waiting++; | 761 | conf->nr_waiting++; |
720 | wait_event_lock_irq(conf->wait_barrier, | 762 | wait_event_lock_irq(conf->wait_barrier, |
721 | conf->barrier+conf->nr_pending == conf->nr_queued+2, | 763 | conf->nr_pending == conf->nr_queued+1, |
722 | conf->resync_lock, | 764 | conf->resync_lock, |
723 | raid10_unplug(conf->mddev->queue)); | 765 | ({ flush_pending_writes(conf); |
766 | raid10_unplug(conf->mddev->queue); })); | ||
724 | spin_unlock_irq(&conf->resync_lock); | 767 | spin_unlock_irq(&conf->resync_lock); |
725 | } | 768 | } |
726 | 769 | ||
@@ -892,6 +935,9 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
892 | blk_plug_device(mddev->queue); | 935 | blk_plug_device(mddev->queue); |
893 | spin_unlock_irqrestore(&conf->device_lock, flags); | 936 | spin_unlock_irqrestore(&conf->device_lock, flags); |
894 | 937 | ||
938 | /* In case raid10d snuck in to freeze_array */ | ||
939 | wake_up(&conf->wait_barrier); | ||
940 | |||
895 | if (do_sync) | 941 | if (do_sync) |
896 | md_wakeup_thread(mddev->thread); | 942 | md_wakeup_thread(mddev->thread); |
897 | 943 | ||
@@ -1464,28 +1510,14 @@ static void raid10d(mddev_t *mddev) | |||
1464 | 1510 | ||
1465 | for (;;) { | 1511 | for (;;) { |
1466 | char b[BDEVNAME_SIZE]; | 1512 | char b[BDEVNAME_SIZE]; |
1467 | spin_lock_irqsave(&conf->device_lock, flags); | ||
1468 | 1513 | ||
1469 | if (conf->pending_bio_list.head) { | 1514 | unplug += flush_pending_writes(conf); |
1470 | bio = bio_list_get(&conf->pending_bio_list); | ||
1471 | blk_remove_plug(mddev->queue); | ||
1472 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1473 | /* flush any pending bitmap writes to disk before proceeding w/ I/O */ | ||
1474 | bitmap_unplug(mddev->bitmap); | ||
1475 | |||
1476 | while (bio) { /* submit pending writes */ | ||
1477 | struct bio *next = bio->bi_next; | ||
1478 | bio->bi_next = NULL; | ||
1479 | generic_make_request(bio); | ||
1480 | bio = next; | ||
1481 | } | ||
1482 | unplug = 1; | ||
1483 | |||
1484 | continue; | ||
1485 | } | ||
1486 | 1515 | ||
1487 | if (list_empty(head)) | 1516 | spin_lock_irqsave(&conf->device_lock, flags); |
1517 | if (list_empty(head)) { | ||
1518 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1488 | break; | 1519 | break; |
1520 | } | ||
1489 | r10_bio = list_entry(head->prev, r10bio_t, retry_list); | 1521 | r10_bio = list_entry(head->prev, r10bio_t, retry_list); |
1490 | list_del(head->prev); | 1522 | list_del(head->prev); |
1491 | conf->nr_queued--; | 1523 | conf->nr_queued--; |
@@ -1548,7 +1580,6 @@ static void raid10d(mddev_t *mddev) | |||
1548 | } | 1580 | } |
1549 | } | 1581 | } |
1550 | } | 1582 | } |
1551 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1552 | if (unplug) | 1583 | if (unplug) |
1553 | unplug_slaves(mddev); | 1584 | unplug_slaves(mddev); |
1554 | } | 1585 | } |
@@ -1787,6 +1818,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
1787 | if (j == conf->copies) { | 1818 | if (j == conf->copies) { |
1788 | /* Cannot recover, so abort the recovery */ | 1819 | /* Cannot recover, so abort the recovery */ |
1789 | put_buf(r10_bio); | 1820 | put_buf(r10_bio); |
1821 | if (rb2) | ||
1822 | atomic_dec(&rb2->remaining); | ||
1790 | r10_bio = rb2; | 1823 | r10_bio = rb2; |
1791 | if (!test_and_set_bit(MD_RECOVERY_ERR, &mddev->recovery)) | 1824 | if (!test_and_set_bit(MD_RECOVERY_ERR, &mddev->recovery)) |
1792 | printk(KERN_INFO "raid10: %s: insufficient working devices for recovery.\n", | 1825 | printk(KERN_INFO "raid10: %s: insufficient working devices for recovery.\n", |
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 0c303c84b37b..6b6df8679585 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c | |||
@@ -632,8 +632,7 @@ mpt_deregister(u8 cb_idx) | |||
632 | 632 | ||
633 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ | 633 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ |
634 | /** | 634 | /** |
635 | * mpt_event_register - Register protocol-specific event callback | 635 | * mpt_event_register - Register protocol-specific event callback handler. |
636 | * handler. | ||
637 | * @cb_idx: previously registered (via mpt_register) callback handle | 636 | * @cb_idx: previously registered (via mpt_register) callback handle |
638 | * @ev_cbfunc: callback function | 637 | * @ev_cbfunc: callback function |
639 | * | 638 | * |
@@ -654,8 +653,7 @@ mpt_event_register(u8 cb_idx, MPT_EVHANDLER ev_cbfunc) | |||
654 | 653 | ||
655 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ | 654 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ |
656 | /** | 655 | /** |
657 | * mpt_event_deregister - Deregister protocol-specific event callback | 656 | * mpt_event_deregister - Deregister protocol-specific event callback handler |
658 | * handler. | ||
659 | * @cb_idx: previously registered callback handle | 657 | * @cb_idx: previously registered callback handle |
660 | * | 658 | * |
661 | * Each protocol-specific driver should call this routine | 659 | * Each protocol-specific driver should call this routine |
@@ -765,11 +763,13 @@ mpt_device_driver_deregister(u8 cb_idx) | |||
765 | 763 | ||
766 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ | 764 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ |
767 | /** | 765 | /** |
768 | * mpt_get_msg_frame - Obtain a MPT request frame from the pool (of 1024) | 766 | * mpt_get_msg_frame - Obtain an MPT request frame from the pool |
769 | * allocated per MPT adapter. | ||
770 | * @cb_idx: Handle of registered MPT protocol driver | 767 | * @cb_idx: Handle of registered MPT protocol driver |
771 | * @ioc: Pointer to MPT adapter structure | 768 | * @ioc: Pointer to MPT adapter structure |
772 | * | 769 | * |
770 | * Obtain an MPT request frame from the pool (of 1024) that are | ||
771 | * allocated per MPT adapter. | ||
772 | * | ||
773 | * Returns pointer to a MPT request frame or %NULL if none are available | 773 | * Returns pointer to a MPT request frame or %NULL if none are available |
774 | * or IOC is not active. | 774 | * or IOC is not active. |
775 | */ | 775 | */ |
@@ -834,13 +834,12 @@ mpt_get_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc) | |||
834 | 834 | ||
835 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ | 835 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ |
836 | /** | 836 | /** |
837 | * mpt_put_msg_frame - Send a protocol specific MPT request frame | 837 | * mpt_put_msg_frame - Send a protocol-specific MPT request frame to an IOC |
838 | * to a IOC. | ||
839 | * @cb_idx: Handle of registered MPT protocol driver | 838 | * @cb_idx: Handle of registered MPT protocol driver |
840 | * @ioc: Pointer to MPT adapter structure | 839 | * @ioc: Pointer to MPT adapter structure |
841 | * @mf: Pointer to MPT request frame | 840 | * @mf: Pointer to MPT request frame |
842 | * | 841 | * |
843 | * This routine posts a MPT request frame to the request post FIFO of a | 842 | * This routine posts an MPT request frame to the request post FIFO of a |
844 | * specific MPT adapter. | 843 | * specific MPT adapter. |
845 | */ | 844 | */ |
846 | void | 845 | void |
@@ -868,13 +867,15 @@ mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf) | |||
868 | } | 867 | } |
869 | 868 | ||
870 | /** | 869 | /** |
871 | * mpt_put_msg_frame_hi_pri - Send a protocol specific MPT request frame | 870 | * mpt_put_msg_frame_hi_pri - Send a hi-pri protocol-specific MPT request frame |
872 | * to a IOC using hi priority request queue. | ||
873 | * @cb_idx: Handle of registered MPT protocol driver | 871 | * @cb_idx: Handle of registered MPT protocol driver |
874 | * @ioc: Pointer to MPT adapter structure | 872 | * @ioc: Pointer to MPT adapter structure |
875 | * @mf: Pointer to MPT request frame | 873 | * @mf: Pointer to MPT request frame |
876 | * | 874 | * |
877 | * This routine posts a MPT request frame to the request post FIFO of a | 875 | * Send a protocol-specific MPT request frame to an IOC using |
876 | * hi-priority request queue. | ||
877 | * | ||
878 | * This routine posts an MPT request frame to the request post FIFO of a | ||
878 | * specific MPT adapter. | 879 | * specific MPT adapter. |
879 | **/ | 880 | **/ |
880 | void | 881 | void |
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index af1de0ccee2f..0c252f60c4c1 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c | |||
@@ -1533,7 +1533,7 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx) | |||
1533 | * | 1533 | * |
1534 | * Remark: Currently invoked from a non-interrupt thread (_bh). | 1534 | * Remark: Currently invoked from a non-interrupt thread (_bh). |
1535 | * | 1535 | * |
1536 | * Remark: With old EH code, at most 1 SCSI TaskMgmt function per IOC | 1536 | * Note: With old EH code, at most 1 SCSI TaskMgmt function per IOC |
1537 | * will be active. | 1537 | * will be active. |
1538 | * | 1538 | * |
1539 | * Returns 0 for SUCCESS, or %FAILED. | 1539 | * Returns 0 for SUCCESS, or %FAILED. |
@@ -2537,14 +2537,12 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR | |||
2537 | 2537 | ||
2538 | /** | 2538 | /** |
2539 | * mptscsih_get_scsi_lookup | 2539 | * mptscsih_get_scsi_lookup |
2540 | * | ||
2541 | * retrieves scmd entry from ScsiLookup[] array list | ||
2542 | * | ||
2543 | * @ioc: Pointer to MPT_ADAPTER structure | 2540 | * @ioc: Pointer to MPT_ADAPTER structure |
2544 | * @i: index into the array | 2541 | * @i: index into the array |
2545 | * | 2542 | * |
2546 | * Returns the scsi_cmd pointer | 2543 | * retrieves scmd entry from ScsiLookup[] array list |
2547 | * | 2544 | * |
2545 | * Returns the scsi_cmd pointer | ||
2548 | **/ | 2546 | **/ |
2549 | static struct scsi_cmnd * | 2547 | static struct scsi_cmnd * |
2550 | mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i) | 2548 | mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i) |
@@ -2561,14 +2559,12 @@ mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i) | |||
2561 | 2559 | ||
2562 | /** | 2560 | /** |
2563 | * mptscsih_getclear_scsi_lookup | 2561 | * mptscsih_getclear_scsi_lookup |
2564 | * | ||
2565 | * retrieves and clears scmd entry from ScsiLookup[] array list | ||
2566 | * | ||
2567 | * @ioc: Pointer to MPT_ADAPTER structure | 2562 | * @ioc: Pointer to MPT_ADAPTER structure |
2568 | * @i: index into the array | 2563 | * @i: index into the array |
2569 | * | 2564 | * |
2570 | * Returns the scsi_cmd pointer | 2565 | * retrieves and clears scmd entry from ScsiLookup[] array list |
2571 | * | 2566 | * |
2567 | * Returns the scsi_cmd pointer | ||
2572 | **/ | 2568 | **/ |
2573 | static struct scsi_cmnd * | 2569 | static struct scsi_cmnd * |
2574 | mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i) | 2570 | mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i) |
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c index afd82966f9a0..13bac53db69a 100644 --- a/drivers/mfd/sm501.c +++ b/drivers/mfd/sm501.c | |||
@@ -48,31 +48,13 @@ struct sm501_devdata { | |||
48 | unsigned int pdev_id; | 48 | unsigned int pdev_id; |
49 | unsigned int irq; | 49 | unsigned int irq; |
50 | void __iomem *regs; | 50 | void __iomem *regs; |
51 | unsigned int rev; | ||
51 | }; | 52 | }; |
52 | 53 | ||
53 | #define MHZ (1000 * 1000) | 54 | #define MHZ (1000 * 1000) |
54 | 55 | ||
55 | #ifdef DEBUG | 56 | #ifdef DEBUG |
56 | static const unsigned int misc_div[] = { | 57 | static const unsigned int div_tab[] = { |
57 | [0] = 1, | ||
58 | [1] = 2, | ||
59 | [2] = 4, | ||
60 | [3] = 8, | ||
61 | [4] = 16, | ||
62 | [5] = 32, | ||
63 | [6] = 64, | ||
64 | [7] = 128, | ||
65 | [8] = 3, | ||
66 | [9] = 6, | ||
67 | [10] = 12, | ||
68 | [11] = 24, | ||
69 | [12] = 48, | ||
70 | [13] = 96, | ||
71 | [14] = 192, | ||
72 | [15] = 384, | ||
73 | }; | ||
74 | |||
75 | static const unsigned int px_div[] = { | ||
76 | [0] = 1, | 58 | [0] = 1, |
77 | [1] = 2, | 59 | [1] = 2, |
78 | [2] = 4, | 60 | [2] = 4, |
@@ -101,12 +83,12 @@ static const unsigned int px_div[] = { | |||
101 | 83 | ||
102 | static unsigned long decode_div(unsigned long pll2, unsigned long val, | 84 | static unsigned long decode_div(unsigned long pll2, unsigned long val, |
103 | unsigned int lshft, unsigned int selbit, | 85 | unsigned int lshft, unsigned int selbit, |
104 | unsigned long mask, const unsigned int *dtab) | 86 | unsigned long mask) |
105 | { | 87 | { |
106 | if (val & selbit) | 88 | if (val & selbit) |
107 | pll2 = 288 * MHZ; | 89 | pll2 = 288 * MHZ; |
108 | 90 | ||
109 | return pll2 / dtab[(val >> lshft) & mask]; | 91 | return pll2 / div_tab[(val >> lshft) & mask]; |
110 | } | 92 | } |
111 | 93 | ||
112 | #define fmt_freq(x) ((x) / MHZ), ((x) % MHZ), (x) | 94 | #define fmt_freq(x) ((x) / MHZ), ((x) % MHZ), (x) |
@@ -141,10 +123,10 @@ static void sm501_dump_clk(struct sm501_devdata *sm) | |||
141 | } | 123 | } |
142 | 124 | ||
143 | sdclk0 = (misct & (1<<12)) ? pll2 : 288 * MHZ; | 125 | sdclk0 = (misct & (1<<12)) ? pll2 : 288 * MHZ; |
144 | sdclk0 /= misc_div[((misct >> 8) & 0xf)]; | 126 | sdclk0 /= div_tab[((misct >> 8) & 0xf)]; |
145 | 127 | ||
146 | sdclk1 = (misct & (1<<20)) ? pll2 : 288 * MHZ; | 128 | sdclk1 = (misct & (1<<20)) ? pll2 : 288 * MHZ; |
147 | sdclk1 /= misc_div[((misct >> 16) & 0xf)]; | 129 | sdclk1 /= div_tab[((misct >> 16) & 0xf)]; |
148 | 130 | ||
149 | dev_dbg(sm->dev, "MISCT=%08lx, PM0=%08lx, PM1=%08lx\n", | 131 | dev_dbg(sm->dev, "MISCT=%08lx, PM0=%08lx, PM1=%08lx\n", |
150 | misct, pm0, pm1); | 132 | misct, pm0, pm1); |
@@ -158,19 +140,19 @@ static void sm501_dump_clk(struct sm501_devdata *sm) | |||
158 | "P2 %ld.%ld MHz (%ld), V2 %ld.%ld (%ld), " | 140 | "P2 %ld.%ld MHz (%ld), V2 %ld.%ld (%ld), " |
159 | "M %ld.%ld (%ld), MX1 %ld.%ld (%ld)\n", | 141 | "M %ld.%ld (%ld), MX1 %ld.%ld (%ld)\n", |
160 | (pmc & 3 ) == 0 ? '*' : '-', | 142 | (pmc & 3 ) == 0 ? '*' : '-', |
161 | fmt_freq(decode_div(pll2, pm0, 24, 1<<29, 31, px_div)), | 143 | fmt_freq(decode_div(pll2, pm0, 24, 1<<29, 31)), |
162 | fmt_freq(decode_div(pll2, pm0, 16, 1<<20, 15, misc_div)), | 144 | fmt_freq(decode_div(pll2, pm0, 16, 1<<20, 15)), |
163 | fmt_freq(decode_div(pll2, pm0, 8, 1<<12, 15, misc_div)), | 145 | fmt_freq(decode_div(pll2, pm0, 8, 1<<12, 15)), |
164 | fmt_freq(decode_div(pll2, pm0, 0, 1<<4, 15, misc_div))); | 146 | fmt_freq(decode_div(pll2, pm0, 0, 1<<4, 15))); |
165 | 147 | ||
166 | dev_dbg(sm->dev, "PM1[%c]: " | 148 | dev_dbg(sm->dev, "PM1[%c]: " |
167 | "P2 %ld.%ld MHz (%ld), V2 %ld.%ld (%ld), " | 149 | "P2 %ld.%ld MHz (%ld), V2 %ld.%ld (%ld), " |
168 | "M %ld.%ld (%ld), MX1 %ld.%ld (%ld)\n", | 150 | "M %ld.%ld (%ld), MX1 %ld.%ld (%ld)\n", |
169 | (pmc & 3 ) == 1 ? '*' : '-', | 151 | (pmc & 3 ) == 1 ? '*' : '-', |
170 | fmt_freq(decode_div(pll2, pm1, 24, 1<<29, 31, px_div)), | 152 | fmt_freq(decode_div(pll2, pm1, 24, 1<<29, 31)), |
171 | fmt_freq(decode_div(pll2, pm1, 16, 1<<20, 15, misc_div)), | 153 | fmt_freq(decode_div(pll2, pm1, 16, 1<<20, 15)), |
172 | fmt_freq(decode_div(pll2, pm1, 8, 1<<12, 15, misc_div)), | 154 | fmt_freq(decode_div(pll2, pm1, 8, 1<<12, 15)), |
173 | fmt_freq(decode_div(pll2, pm1, 0, 1<<4, 15, misc_div))); | 155 | fmt_freq(decode_div(pll2, pm1, 0, 1<<4, 15))); |
174 | } | 156 | } |
175 | 157 | ||
176 | static void sm501_dump_regs(struct sm501_devdata *sm) | 158 | static void sm501_dump_regs(struct sm501_devdata *sm) |
@@ -436,46 +418,108 @@ struct sm501_clock { | |||
436 | unsigned long mclk; | 418 | unsigned long mclk; |
437 | int divider; | 419 | int divider; |
438 | int shift; | 420 | int shift; |
421 | unsigned int m, n, k; | ||
439 | }; | 422 | }; |
440 | 423 | ||
424 | /* sm501_calc_clock | ||
425 | * | ||
426 | * Calculates the nearest discrete clock frequency that | ||
427 | * can be achieved with the specified input clock. | ||
428 | * the maximum divisor is 3 or 5 | ||
429 | */ | ||
430 | |||
431 | static int sm501_calc_clock(unsigned long freq, | ||
432 | struct sm501_clock *clock, | ||
433 | int max_div, | ||
434 | unsigned long mclk, | ||
435 | long *best_diff) | ||
436 | { | ||
437 | int ret = 0; | ||
438 | int divider; | ||
439 | int shift; | ||
440 | long diff; | ||
441 | |||
442 | /* try dividers 1 and 3 for CRT and for panel, | ||
443 | try divider 5 for panel only.*/ | ||
444 | |||
445 | for (divider = 1; divider <= max_div; divider += 2) { | ||
446 | /* try all 8 shift values.*/ | ||
447 | for (shift = 0; shift < 8; shift++) { | ||
448 | /* Calculate difference to requested clock */ | ||
449 | diff = sm501fb_round_div(mclk, divider << shift) - freq; | ||
450 | if (diff < 0) | ||
451 | diff = -diff; | ||
452 | |||
453 | /* If it is less than the current, use it */ | ||
454 | if (diff < *best_diff) { | ||
455 | *best_diff = diff; | ||
456 | |||
457 | clock->mclk = mclk; | ||
458 | clock->divider = divider; | ||
459 | clock->shift = shift; | ||
460 | ret = 1; | ||
461 | } | ||
462 | } | ||
463 | } | ||
464 | |||
465 | return ret; | ||
466 | } | ||
467 | |||
468 | /* sm501_calc_pll | ||
469 | * | ||
470 | * Calculates the nearest discrete clock frequency that can be | ||
471 | * achieved using the programmable PLL. | ||
472 | * the maximum divisor is 3 or 5 | ||
473 | */ | ||
474 | |||
475 | static unsigned long sm501_calc_pll(unsigned long freq, | ||
476 | struct sm501_clock *clock, | ||
477 | int max_div) | ||
478 | { | ||
479 | unsigned long mclk; | ||
480 | unsigned int m, n, k; | ||
481 | long best_diff = 999999999; | ||
482 | |||
483 | /* | ||
484 | * The SM502 datasheet doesn't specify the min/max values for M and N. | ||
485 | * N = 1 at least doesn't work in practice. | ||
486 | */ | ||
487 | for (m = 2; m <= 255; m++) { | ||
488 | for (n = 2; n <= 127; n++) { | ||
489 | for (k = 0; k <= 1; k++) { | ||
490 | mclk = (24000000UL * m / n) >> k; | ||
491 | |||
492 | if (sm501_calc_clock(freq, clock, max_div, | ||
493 | mclk, &best_diff)) { | ||
494 | clock->m = m; | ||
495 | clock->n = n; | ||
496 | clock->k = k; | ||
497 | } | ||
498 | } | ||
499 | } | ||
500 | } | ||
501 | |||
502 | /* Return best clock. */ | ||
503 | return clock->mclk / (clock->divider << clock->shift); | ||
504 | } | ||
505 | |||
441 | /* sm501_select_clock | 506 | /* sm501_select_clock |
442 | * | 507 | * |
443 | * selects nearest discrete clock frequency the SM501 can achive | 508 | * Calculates the nearest discrete clock frequency that can be |
509 | * achieved using the 288MHz and 336MHz PLLs. | ||
444 | * the maximum divisor is 3 or 5 | 510 | * the maximum divisor is 3 or 5 |
445 | */ | 511 | */ |
512 | |||
446 | static unsigned long sm501_select_clock(unsigned long freq, | 513 | static unsigned long sm501_select_clock(unsigned long freq, |
447 | struct sm501_clock *clock, | 514 | struct sm501_clock *clock, |
448 | int max_div) | 515 | int max_div) |
449 | { | 516 | { |
450 | unsigned long mclk; | 517 | unsigned long mclk; |
451 | int divider; | ||
452 | int shift; | ||
453 | long diff; | ||
454 | long best_diff = 999999999; | 518 | long best_diff = 999999999; |
455 | 519 | ||
456 | /* Try 288MHz and 336MHz clocks. */ | 520 | /* Try 288MHz and 336MHz clocks. */ |
457 | for (mclk = 288000000; mclk <= 336000000; mclk += 48000000) { | 521 | for (mclk = 288000000; mclk <= 336000000; mclk += 48000000) { |
458 | /* try dividers 1 and 3 for CRT and for panel, | 522 | sm501_calc_clock(freq, clock, max_div, mclk, &best_diff); |
459 | try divider 5 for panel only.*/ | ||
460 | |||
461 | for (divider = 1; divider <= max_div; divider += 2) { | ||
462 | /* try all 8 shift values.*/ | ||
463 | for (shift = 0; shift < 8; shift++) { | ||
464 | /* Calculate difference to requested clock */ | ||
465 | diff = sm501fb_round_div(mclk, divider << shift) - freq; | ||
466 | if (diff < 0) | ||
467 | diff = -diff; | ||
468 | |||
469 | /* If it is less than the current, use it */ | ||
470 | if (diff < best_diff) { | ||
471 | best_diff = diff; | ||
472 | |||
473 | clock->mclk = mclk; | ||
474 | clock->divider = divider; | ||
475 | clock->shift = shift; | ||
476 | } | ||
477 | } | ||
478 | } | ||
479 | } | 523 | } |
480 | 524 | ||
481 | /* Return best clock. */ | 525 | /* Return best clock. */ |
@@ -497,6 +541,7 @@ unsigned long sm501_set_clock(struct device *dev, | |||
497 | unsigned long gate = readl(sm->regs + SM501_CURRENT_GATE); | 541 | unsigned long gate = readl(sm->regs + SM501_CURRENT_GATE); |
498 | unsigned long clock = readl(sm->regs + SM501_CURRENT_CLOCK); | 542 | unsigned long clock = readl(sm->regs + SM501_CURRENT_CLOCK); |
499 | unsigned char reg; | 543 | unsigned char reg; |
544 | unsigned int pll_reg = 0; | ||
500 | unsigned long sm501_freq; /* the actual frequency acheived */ | 545 | unsigned long sm501_freq; /* the actual frequency acheived */ |
501 | 546 | ||
502 | struct sm501_clock to; | 547 | struct sm501_clock to; |
@@ -511,14 +556,28 @@ unsigned long sm501_set_clock(struct device *dev, | |||
511 | * requested frequency the value must be multiplied by | 556 | * requested frequency the value must be multiplied by |
512 | * 2. This clock also has an additional pre divisor */ | 557 | * 2. This clock also has an additional pre divisor */ |
513 | 558 | ||
514 | sm501_freq = (sm501_select_clock(2 * req_freq, &to, 5) / 2); | 559 | if (sm->rev >= 0xC0) { |
515 | reg=to.shift & 0x07;/* bottom 3 bits are shift */ | 560 | /* SM502 -> use the programmable PLL */ |
516 | if (to.divider == 3) | 561 | sm501_freq = (sm501_calc_pll(2 * req_freq, |
517 | reg |= 0x08; /* /3 divider required */ | 562 | &to, 5) / 2); |
518 | else if (to.divider == 5) | 563 | reg = to.shift & 0x07;/* bottom 3 bits are shift */ |
519 | reg |= 0x10; /* /5 divider required */ | 564 | if (to.divider == 3) |
520 | if (to.mclk != 288000000) | 565 | reg |= 0x08; /* /3 divider required */ |
521 | reg |= 0x20; /* which mclk pll is source */ | 566 | else if (to.divider == 5) |
567 | reg |= 0x10; /* /5 divider required */ | ||
568 | reg |= 0x40; /* select the programmable PLL */ | ||
569 | pll_reg = 0x20000 | (to.k << 15) | (to.n << 8) | to.m; | ||
570 | } else { | ||
571 | sm501_freq = (sm501_select_clock(2 * req_freq, | ||
572 | &to, 5) / 2); | ||
573 | reg = to.shift & 0x07;/* bottom 3 bits are shift */ | ||
574 | if (to.divider == 3) | ||
575 | reg |= 0x08; /* /3 divider required */ | ||
576 | else if (to.divider == 5) | ||
577 | reg |= 0x10; /* /5 divider required */ | ||
578 | if (to.mclk != 288000000) | ||
579 | reg |= 0x20; /* which mclk pll is source */ | ||
580 | } | ||
522 | break; | 581 | break; |
523 | 582 | ||
524 | case SM501_CLOCK_V2XCLK: | 583 | case SM501_CLOCK_V2XCLK: |
@@ -579,6 +638,10 @@ unsigned long sm501_set_clock(struct device *dev, | |||
579 | } | 638 | } |
580 | 639 | ||
581 | writel(mode, sm->regs + SM501_POWER_MODE_CONTROL); | 640 | writel(mode, sm->regs + SM501_POWER_MODE_CONTROL); |
641 | |||
642 | if (pll_reg) | ||
643 | writel(pll_reg, sm->regs + SM501_PROGRAMMABLE_PLL_CONTROL); | ||
644 | |||
582 | sm501_sync_regs(sm); | 645 | sm501_sync_regs(sm); |
583 | 646 | ||
584 | dev_info(sm->dev, "gate %08lx, clock %08lx, mode %08lx\n", | 647 | dev_info(sm->dev, "gate %08lx, clock %08lx, mode %08lx\n", |
@@ -599,15 +662,24 @@ EXPORT_SYMBOL_GPL(sm501_set_clock); | |||
599 | * finds the closest available frequency for a given clock | 662 | * finds the closest available frequency for a given clock |
600 | */ | 663 | */ |
601 | 664 | ||
602 | unsigned long sm501_find_clock(int clksrc, | 665 | unsigned long sm501_find_clock(struct device *dev, |
666 | int clksrc, | ||
603 | unsigned long req_freq) | 667 | unsigned long req_freq) |
604 | { | 668 | { |
669 | struct sm501_devdata *sm = dev_get_drvdata(dev); | ||
605 | unsigned long sm501_freq; /* the frequency achiveable by the 501 */ | 670 | unsigned long sm501_freq; /* the frequency achiveable by the 501 */ |
606 | struct sm501_clock to; | 671 | struct sm501_clock to; |
607 | 672 | ||
608 | switch (clksrc) { | 673 | switch (clksrc) { |
609 | case SM501_CLOCK_P2XCLK: | 674 | case SM501_CLOCK_P2XCLK: |
610 | sm501_freq = (sm501_select_clock(2 * req_freq, &to, 5) / 2); | 675 | if (sm->rev >= 0xC0) { |
676 | /* SM502 -> use the programmable PLL */ | ||
677 | sm501_freq = (sm501_calc_pll(2 * req_freq, | ||
678 | &to, 5) / 2); | ||
679 | } else { | ||
680 | sm501_freq = (sm501_select_clock(2 * req_freq, | ||
681 | &to, 5) / 2); | ||
682 | } | ||
611 | break; | 683 | break; |
612 | 684 | ||
613 | case SM501_CLOCK_V2XCLK: | 685 | case SM501_CLOCK_V2XCLK: |
@@ -914,6 +986,8 @@ static int sm501_init_dev(struct sm501_devdata *sm) | |||
914 | dev_info(sm->dev, "SM501 At %p: Version %08lx, %ld Mb, IRQ %d\n", | 986 | dev_info(sm->dev, "SM501 At %p: Version %08lx, %ld Mb, IRQ %d\n", |
915 | sm->regs, devid, (unsigned long)mem_avail >> 20, sm->irq); | 987 | sm->regs, devid, (unsigned long)mem_avail >> 20, sm->irq); |
916 | 988 | ||
989 | sm->rev = devid & SM501_DEVICEID_REVMASK; | ||
990 | |||
917 | sm501_dump_gate(sm); | 991 | sm501_dump_gate(sm); |
918 | 992 | ||
919 | ret = device_create_file(sm->dev, &dev_attr_dbg_regs); | 993 | ret = device_create_file(sm->dev, &dev_attr_dbg_regs); |
diff --git a/drivers/misc/thinkpad_acpi.c b/drivers/misc/thinkpad_acpi.c index bb269d0c677e..6cb781262f94 100644 --- a/drivers/misc/thinkpad_acpi.c +++ b/drivers/misc/thinkpad_acpi.c | |||
@@ -1078,7 +1078,8 @@ static int hotkey_get_tablet_mode(int *status) | |||
1078 | if (!acpi_evalf(hkey_handle, &s, "MHKG", "d")) | 1078 | if (!acpi_evalf(hkey_handle, &s, "MHKG", "d")) |
1079 | return -EIO; | 1079 | return -EIO; |
1080 | 1080 | ||
1081 | return ((s & TP_HOTKEY_TABLET_MASK) != 0); | 1081 | *status = ((s & TP_HOTKEY_TABLET_MASK) != 0); |
1082 | return 0; | ||
1082 | } | 1083 | } |
1083 | 1084 | ||
1084 | /* | 1085 | /* |
diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 0fbf1bbbaee9..d7a3ea88eddb 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c | |||
@@ -1253,7 +1253,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev) | |||
1253 | 1253 | ||
1254 | /* Setup interrupt handlers. */ | 1254 | /* Setup interrupt handlers. */ |
1255 | for (idp = id; idp->name; idp++) { | 1255 | for (idp = id; idp->name; idp++) { |
1256 | if (request_irq(idp->irq, idp->handler, 0, idp->name, dev) != 0) | 1256 | if (request_irq(idp->irq, idp->handler, IRQF_DISABLED, idp->name, dev) != 0) |
1257 | printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, idp->irq); | 1257 | printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, idp->irq); |
1258 | } | 1258 | } |
1259 | 1259 | ||
@@ -1382,7 +1382,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev) | |||
1382 | 1382 | ||
1383 | /* Setup interrupt handlers. */ | 1383 | /* Setup interrupt handlers. */ |
1384 | for (idp = id; idp->name; idp++) { | 1384 | for (idp = id; idp->name; idp++) { |
1385 | if (request_irq(b+idp->irq, fec_enet_interrupt, 0, idp->name, dev) != 0) | 1385 | if (request_irq(b+idp->irq, fec_enet_interrupt, IRQF_DISABLED, idp->name, dev) != 0) |
1386 | printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq); | 1386 | printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq); |
1387 | } | 1387 | } |
1388 | 1388 | ||
@@ -1553,7 +1553,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev) | |||
1553 | 1553 | ||
1554 | /* Setup interrupt handlers. */ | 1554 | /* Setup interrupt handlers. */ |
1555 | for (idp = id; idp->name; idp++) { | 1555 | for (idp = id; idp->name; idp++) { |
1556 | if (request_irq(b+idp->irq,fec_enet_interrupt,0,idp->name,dev)!=0) | 1556 | if (request_irq(b+idp->irq, fec_enet_interrupt, IRQF_DISABLED, idp->name,dev) != 0) |
1557 | printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq); | 1557 | printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq); |
1558 | } | 1558 | } |
1559 | 1559 | ||
@@ -1680,7 +1680,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev) | |||
1680 | 1680 | ||
1681 | /* Setup interrupt handlers. */ | 1681 | /* Setup interrupt handlers. */ |
1682 | for (idp = id; idp->name; idp++) { | 1682 | for (idp = id; idp->name; idp++) { |
1683 | if (request_irq(b+idp->irq,fec_enet_interrupt,0,idp->name,dev)!=0) | 1683 | if (request_irq(b+idp->irq, fec_enet_interrupt, IRQF_DISABLED, idp->name,dev) != 0) |
1684 | printk("FEC: Could not allocate %s IRQ(%d)!\n", | 1684 | printk("FEC: Could not allocate %s IRQ(%d)!\n", |
1685 | idp->name, b+idp->irq); | 1685 | idp->name, b+idp->irq); |
1686 | } | 1686 | } |
diff --git a/drivers/parisc/Kconfig b/drivers/parisc/Kconfig index 1d3b84b4af3f..553a9905299a 100644 --- a/drivers/parisc/Kconfig +++ b/drivers/parisc/Kconfig | |||
@@ -103,6 +103,11 @@ config IOMMU_SBA | |||
103 | depends on PCI_LBA | 103 | depends on PCI_LBA |
104 | default PCI_LBA | 104 | default PCI_LBA |
105 | 105 | ||
106 | config IOMMU_HELPER | ||
107 | bool | ||
108 | depends on IOMMU_SBA || IOMMU_CCIO | ||
109 | default y | ||
110 | |||
106 | #config PCI_EPIC | 111 | #config PCI_EPIC |
107 | # bool "EPIC/SAGA PCI support" | 112 | # bool "EPIC/SAGA PCI support" |
108 | # depends on PCI | 113 | # depends on PCI |
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index d08b284de196..60d338cd8009 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <linux/proc_fs.h> | 43 | #include <linux/proc_fs.h> |
44 | #include <linux/seq_file.h> | 44 | #include <linux/seq_file.h> |
45 | #include <linux/scatterlist.h> | 45 | #include <linux/scatterlist.h> |
46 | #include <linux/iommu-helper.h> | ||
46 | 47 | ||
47 | #include <asm/byteorder.h> | 48 | #include <asm/byteorder.h> |
48 | #include <asm/cache.h> /* for L1_CACHE_BYTES */ | 49 | #include <asm/cache.h> /* for L1_CACHE_BYTES */ |
@@ -302,13 +303,17 @@ static int ioc_count; | |||
302 | */ | 303 | */ |
303 | #define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \ | 304 | #define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \ |
304 | for(; res_ptr < res_end; ++res_ptr) { \ | 305 | for(; res_ptr < res_end; ++res_ptr) { \ |
305 | if(0 == (*res_ptr & mask)) { \ | 306 | int ret;\ |
306 | *res_ptr |= mask; \ | 307 | unsigned int idx;\ |
307 | res_idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \ | 308 | idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \ |
308 | ioc->res_hint = res_idx + (size >> 3); \ | 309 | ret = iommu_is_span_boundary(idx << 3, pages_needed, 0, boundary_size);\ |
309 | goto resource_found; \ | 310 | if ((0 == (*res_ptr & mask)) && !ret) { \ |
310 | } \ | 311 | *res_ptr |= mask; \ |
311 | } | 312 | res_idx = idx;\ |
313 | ioc->res_hint = res_idx + (size >> 3); \ | ||
314 | goto resource_found; \ | ||
315 | } \ | ||
316 | } | ||
312 | 317 | ||
313 | #define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \ | 318 | #define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \ |
314 | u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \ | 319 | u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \ |
@@ -341,10 +346,11 @@ static int ioc_count; | |||
341 | * of available pages for the requested size. | 346 | * of available pages for the requested size. |
342 | */ | 347 | */ |
343 | static int | 348 | static int |
344 | ccio_alloc_range(struct ioc *ioc, size_t size) | 349 | ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size) |
345 | { | 350 | { |
346 | unsigned int pages_needed = size >> IOVP_SHIFT; | 351 | unsigned int pages_needed = size >> IOVP_SHIFT; |
347 | unsigned int res_idx; | 352 | unsigned int res_idx; |
353 | unsigned long boundary_size; | ||
348 | #ifdef CCIO_SEARCH_TIME | 354 | #ifdef CCIO_SEARCH_TIME |
349 | unsigned long cr_start = mfctl(16); | 355 | unsigned long cr_start = mfctl(16); |
350 | #endif | 356 | #endif |
@@ -360,6 +366,9 @@ ccio_alloc_range(struct ioc *ioc, size_t size) | |||
360 | ** ggg sacrifices another 710 to the computer gods. | 366 | ** ggg sacrifices another 710 to the computer gods. |
361 | */ | 367 | */ |
362 | 368 | ||
369 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 1 << IOVP_SHIFT); | ||
370 | boundary_size >>= IOVP_SHIFT; | ||
371 | |||
363 | if (pages_needed <= 8) { | 372 | if (pages_needed <= 8) { |
364 | /* | 373 | /* |
365 | * LAN traffic will not thrash the TLB IFF the same NIC | 374 | * LAN traffic will not thrash the TLB IFF the same NIC |
@@ -760,7 +769,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size, | |||
760 | ioc->msingle_pages += size >> IOVP_SHIFT; | 769 | ioc->msingle_pages += size >> IOVP_SHIFT; |
761 | #endif | 770 | #endif |
762 | 771 | ||
763 | idx = ccio_alloc_range(ioc, size); | 772 | idx = ccio_alloc_range(ioc, dev, size); |
764 | iovp = (dma_addr_t)MKIOVP(idx); | 773 | iovp = (dma_addr_t)MKIOVP(idx); |
765 | 774 | ||
766 | pdir_start = &(ioc->pdir_base[idx]); | 775 | pdir_start = &(ioc->pdir_base[idx]); |
diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h index 97ba8286c596..a9c46cc2db37 100644 --- a/drivers/parisc/iommu-helpers.h +++ b/drivers/parisc/iommu-helpers.h | |||
@@ -96,8 +96,8 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents, | |||
96 | 96 | ||
97 | static inline unsigned int | 97 | static inline unsigned int |
98 | iommu_coalesce_chunks(struct ioc *ioc, struct device *dev, | 98 | iommu_coalesce_chunks(struct ioc *ioc, struct device *dev, |
99 | struct scatterlist *startsg, int nents, | 99 | struct scatterlist *startsg, int nents, |
100 | int (*iommu_alloc_range)(struct ioc *, size_t)) | 100 | int (*iommu_alloc_range)(struct ioc *, struct device *, size_t)) |
101 | { | 101 | { |
102 | struct scatterlist *contig_sg; /* contig chunk head */ | 102 | struct scatterlist *contig_sg; /* contig chunk head */ |
103 | unsigned long dma_offset, dma_len; /* start/len of DMA stream */ | 103 | unsigned long dma_offset, dma_len; /* start/len of DMA stream */ |
@@ -166,7 +166,7 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev, | |||
166 | dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE); | 166 | dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE); |
167 | sg_dma_address(contig_sg) = | 167 | sg_dma_address(contig_sg) = |
168 | PIDE_FLAG | 168 | PIDE_FLAG |
169 | | (iommu_alloc_range(ioc, dma_len) << IOVP_SHIFT) | 169 | | (iommu_alloc_range(ioc, dev, dma_len) << IOVP_SHIFT) |
170 | | dma_offset; | 170 | | dma_offset; |
171 | n_mappings++; | 171 | n_mappings++; |
172 | } | 172 | } |
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index d06627c3f353..e834127a8505 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/string.h> | 29 | #include <linux/string.h> |
30 | #include <linux/pci.h> | 30 | #include <linux/pci.h> |
31 | #include <linux/scatterlist.h> | 31 | #include <linux/scatterlist.h> |
32 | #include <linux/iommu-helper.h> | ||
32 | 33 | ||
33 | #include <asm/byteorder.h> | 34 | #include <asm/byteorder.h> |
34 | #include <asm/io.h> | 35 | #include <asm/io.h> |
@@ -313,6 +314,12 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) | |||
313 | #define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n))) | 314 | #define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n))) |
314 | #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1) | 315 | #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1) |
315 | 316 | ||
317 | unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr, | ||
318 | unsigned int bitshiftcnt) | ||
319 | { | ||
320 | return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3) | ||
321 | + bitshiftcnt; | ||
322 | } | ||
316 | 323 | ||
317 | /** | 324 | /** |
318 | * sba_search_bitmap - find free space in IO PDIR resource bitmap | 325 | * sba_search_bitmap - find free space in IO PDIR resource bitmap |
@@ -324,19 +331,36 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) | |||
324 | * Cool perf optimization: search for log2(size) bits at a time. | 331 | * Cool perf optimization: search for log2(size) bits at a time. |
325 | */ | 332 | */ |
326 | static SBA_INLINE unsigned long | 333 | static SBA_INLINE unsigned long |
327 | sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) | 334 | sba_search_bitmap(struct ioc *ioc, struct device *dev, |
335 | unsigned long bits_wanted) | ||
328 | { | 336 | { |
329 | unsigned long *res_ptr = ioc->res_hint; | 337 | unsigned long *res_ptr = ioc->res_hint; |
330 | unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); | 338 | unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); |
331 | unsigned long pide = ~0UL; | 339 | unsigned long pide = ~0UL, tpide; |
340 | unsigned long boundary_size; | ||
341 | unsigned long shift; | ||
342 | int ret; | ||
343 | |||
344 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 1 << IOVP_SHIFT); | ||
345 | boundary_size >>= IOVP_SHIFT; | ||
346 | |||
347 | #if defined(ZX1_SUPPORT) | ||
348 | BUG_ON(ioc->ibase & ~IOVP_MASK); | ||
349 | shift = ioc->ibase >> IOVP_SHIFT; | ||
350 | #else | ||
351 | shift = 0; | ||
352 | #endif | ||
332 | 353 | ||
333 | if (bits_wanted > (BITS_PER_LONG/2)) { | 354 | if (bits_wanted > (BITS_PER_LONG/2)) { |
334 | /* Search word at a time - no mask needed */ | 355 | /* Search word at a time - no mask needed */ |
335 | for(; res_ptr < res_end; ++res_ptr) { | 356 | for(; res_ptr < res_end; ++res_ptr) { |
336 | if (*res_ptr == 0) { | 357 | tpide = ptr_to_pide(ioc, res_ptr, 0); |
358 | ret = iommu_is_span_boundary(tpide, bits_wanted, | ||
359 | shift, | ||
360 | boundary_size); | ||
361 | if ((*res_ptr == 0) && !ret) { | ||
337 | *res_ptr = RESMAP_MASK(bits_wanted); | 362 | *res_ptr = RESMAP_MASK(bits_wanted); |
338 | pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); | 363 | pide = tpide; |
339 | pide <<= 3; /* convert to bit address */ | ||
340 | break; | 364 | break; |
341 | } | 365 | } |
342 | } | 366 | } |
@@ -365,11 +389,13 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) | |||
365 | { | 389 | { |
366 | DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); | 390 | DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); |
367 | WARN_ON(mask == 0); | 391 | WARN_ON(mask == 0); |
368 | if(((*res_ptr) & mask) == 0) { | 392 | tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt); |
393 | ret = iommu_is_span_boundary(tpide, bits_wanted, | ||
394 | shift, | ||
395 | boundary_size); | ||
396 | if ((((*res_ptr) & mask) == 0) && !ret) { | ||
369 | *res_ptr |= mask; /* mark resources busy! */ | 397 | *res_ptr |= mask; /* mark resources busy! */ |
370 | pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); | 398 | pide = tpide; |
371 | pide <<= 3; /* convert to bit address */ | ||
372 | pide += bitshiftcnt; | ||
373 | break; | 399 | break; |
374 | } | 400 | } |
375 | mask >>= o; | 401 | mask >>= o; |
@@ -404,7 +430,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) | |||
404 | * resource bit map. | 430 | * resource bit map. |
405 | */ | 431 | */ |
406 | static int | 432 | static int |
407 | sba_alloc_range(struct ioc *ioc, size_t size) | 433 | sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size) |
408 | { | 434 | { |
409 | unsigned int pages_needed = size >> IOVP_SHIFT; | 435 | unsigned int pages_needed = size >> IOVP_SHIFT; |
410 | #ifdef SBA_COLLECT_STATS | 436 | #ifdef SBA_COLLECT_STATS |
@@ -412,9 +438,9 @@ sba_alloc_range(struct ioc *ioc, size_t size) | |||
412 | #endif | 438 | #endif |
413 | unsigned long pide; | 439 | unsigned long pide; |
414 | 440 | ||
415 | pide = sba_search_bitmap(ioc, pages_needed); | 441 | pide = sba_search_bitmap(ioc, dev, pages_needed); |
416 | if (pide >= (ioc->res_size << 3)) { | 442 | if (pide >= (ioc->res_size << 3)) { |
417 | pide = sba_search_bitmap(ioc, pages_needed); | 443 | pide = sba_search_bitmap(ioc, dev, pages_needed); |
418 | if (pide >= (ioc->res_size << 3)) | 444 | if (pide >= (ioc->res_size << 3)) |
419 | panic("%s: I/O MMU @ %p is out of mapping resources\n", | 445 | panic("%s: I/O MMU @ %p is out of mapping resources\n", |
420 | __FILE__, ioc->ioc_hpa); | 446 | __FILE__, ioc->ioc_hpa); |
@@ -710,7 +736,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, | |||
710 | ioc->msingle_calls++; | 736 | ioc->msingle_calls++; |
711 | ioc->msingle_pages += size >> IOVP_SHIFT; | 737 | ioc->msingle_pages += size >> IOVP_SHIFT; |
712 | #endif | 738 | #endif |
713 | pide = sba_alloc_range(ioc, size); | 739 | pide = sba_alloc_range(ioc, dev, size); |
714 | iovp = (dma_addr_t) pide << IOVP_SHIFT; | 740 | iovp = (dma_addr_t) pide << IOVP_SHIFT; |
715 | 741 | ||
716 | DBG_RUN("%s() 0x%p -> 0x%lx\n", | 742 | DBG_RUN("%s() 0x%p -> 0x%lx\n", |
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index ef5a6a245f5f..6a9403d79e0c 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c | |||
@@ -145,13 +145,15 @@ void pci_bus_add_devices(struct pci_bus *bus) | |||
145 | child_bus = dev->subordinate; | 145 | child_bus = dev->subordinate; |
146 | child_bus->dev.parent = child_bus->bridge; | 146 | child_bus->dev.parent = child_bus->bridge; |
147 | retval = device_register(&child_bus->dev); | 147 | retval = device_register(&child_bus->dev); |
148 | if (!retval) | 148 | if (retval) |
149 | dev_err(&dev->dev, "Error registering pci_bus," | ||
150 | " continuing...\n"); | ||
151 | else | ||
149 | retval = device_create_file(&child_bus->dev, | 152 | retval = device_create_file(&child_bus->dev, |
150 | &dev_attr_cpuaffinity); | 153 | &dev_attr_cpuaffinity); |
151 | if (retval) | 154 | if (retval) |
152 | dev_err(&dev->dev, "Error registering pci_bus" | 155 | dev_err(&dev->dev, "Error creating cpuaffinity" |
153 | " device bridge symlink," | 156 | " file, continuing...\n"); |
154 | " continuing...\n"); | ||
155 | } | 157 | } |
156 | } | 158 | } |
157 | } | 159 | } |
diff --git a/drivers/pci/hotplug-pci.c b/drivers/pci/hotplug-pci.c index a590ef682153..4d4a64478404 100644 --- a/drivers/pci/hotplug-pci.c +++ b/drivers/pci/hotplug-pci.c | |||
@@ -4,7 +4,7 @@ | |||
4 | #include "pci.h" | 4 | #include "pci.h" |
5 | 5 | ||
6 | 6 | ||
7 | unsigned int pci_do_scan_bus(struct pci_bus *bus) | 7 | unsigned int __devinit pci_do_scan_bus(struct pci_bus *bus) |
8 | { | 8 | { |
9 | unsigned int max; | 9 | unsigned int max; |
10 | 10 | ||
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index cf22f9e01e00..5e50008d1181 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
@@ -1085,7 +1085,7 @@ static int acpiphp_bus_trim(acpi_handle handle) | |||
1085 | * This function should be called per *physical slot*, | 1085 | * This function should be called per *physical slot*, |
1086 | * not per each slot object in ACPI namespace. | 1086 | * not per each slot object in ACPI namespace. |
1087 | */ | 1087 | */ |
1088 | static int enable_device(struct acpiphp_slot *slot) | 1088 | static int __ref enable_device(struct acpiphp_slot *slot) |
1089 | { | 1089 | { |
1090 | struct pci_dev *dev; | 1090 | struct pci_dev *dev; |
1091 | struct pci_bus *bus = slot->bridge->pci_bus; | 1091 | struct pci_bus *bus = slot->bridge->pci_bus; |
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c index 5e9be44817cb..b3515fc4cd38 100644 --- a/drivers/pci/hotplug/cpci_hotplug_pci.c +++ b/drivers/pci/hotplug/cpci_hotplug_pci.c | |||
@@ -250,7 +250,7 @@ int cpci_led_off(struct slot* slot) | |||
250 | * Device configuration functions | 250 | * Device configuration functions |
251 | */ | 251 | */ |
252 | 252 | ||
253 | int cpci_configure_slot(struct slot* slot) | 253 | int __ref cpci_configure_slot(struct slot *slot) |
254 | { | 254 | { |
255 | struct pci_bus *parent; | 255 | struct pci_bus *parent; |
256 | int fn; | 256 | int fn; |
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 6eba9b2cfb90..698975a6a21c 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
@@ -711,7 +711,8 @@ static int hpc_power_off_slot(struct slot * slot) | |||
711 | retval = pcie_write_cmd(slot, slot_cmd, cmd_mask); | 711 | retval = pcie_write_cmd(slot, slot_cmd, cmd_mask); |
712 | if (retval) { | 712 | if (retval) { |
713 | err("%s: Write command failed!\n", __FUNCTION__); | 713 | err("%s: Write command failed!\n", __FUNCTION__); |
714 | return -1; | 714 | retval = -1; |
715 | goto out; | ||
715 | } | 716 | } |
716 | dbg("%s: SLOTCTRL %x write cmd %x\n", | 717 | dbg("%s: SLOTCTRL %x write cmd %x\n", |
717 | __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd); | 718 | __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd); |
@@ -722,7 +723,7 @@ static int hpc_power_off_slot(struct slot * slot) | |||
722 | * removed from the slot/adapter. | 723 | * removed from the slot/adapter. |
723 | */ | 724 | */ |
724 | msleep(1000); | 725 | msleep(1000); |
725 | 726 | out: | |
726 | if (changed) | 727 | if (changed) |
727 | pcie_unmask_bad_dllp(ctrl); | 728 | pcie_unmask_bad_dllp(ctrl); |
728 | 729 | ||
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c index dd50713966d1..9372a840b63d 100644 --- a/drivers/pci/hotplug/pciehp_pci.c +++ b/drivers/pci/hotplug/pciehp_pci.c | |||
@@ -167,7 +167,7 @@ static void program_fw_provided_values(struct pci_dev *dev) | |||
167 | } | 167 | } |
168 | } | 168 | } |
169 | 169 | ||
170 | static int pciehp_add_bridge(struct pci_dev *dev) | 170 | static int __ref pciehp_add_bridge(struct pci_dev *dev) |
171 | { | 171 | { |
172 | struct pci_bus *parent = dev->bus; | 172 | struct pci_bus *parent = dev->bus; |
173 | int pass, busnr, start = parent->secondary; | 173 | int pass, busnr, start = parent->secondary; |
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c index 0a6b25ef194c..a69a21520895 100644 --- a/drivers/pci/hotplug/shpchp_pci.c +++ b/drivers/pci/hotplug/shpchp_pci.c | |||
@@ -96,7 +96,7 @@ static void program_fw_provided_values(struct pci_dev *dev) | |||
96 | } | 96 | } |
97 | } | 97 | } |
98 | 98 | ||
99 | int shpchp_configure_device(struct slot *p_slot) | 99 | int __ref shpchp_configure_device(struct slot *p_slot) |
100 | { | 100 | { |
101 | struct pci_dev *dev; | 101 | struct pci_dev *dev; |
102 | struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; | 102 | struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 4d23b9fb551b..2db2e4bb0d1e 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -286,7 +286,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) | |||
286 | } | 286 | } |
287 | } | 287 | } |
288 | 288 | ||
289 | void pci_read_bridge_bases(struct pci_bus *child) | 289 | void __devinit pci_read_bridge_bases(struct pci_bus *child) |
290 | { | 290 | { |
291 | struct pci_dev *dev = child->self; | 291 | struct pci_dev *dev = child->self; |
292 | u8 io_base_lo, io_limit_lo; | 292 | u8 io_base_lo, io_limit_lo; |
@@ -472,7 +472,7 @@ static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max) | |||
472 | * them, we proceed to assigning numbers to the remaining buses in | 472 | * them, we proceed to assigning numbers to the remaining buses in |
473 | * order to avoid overlaps between old and new bus numbers. | 473 | * order to avoid overlaps between old and new bus numbers. |
474 | */ | 474 | */ |
475 | int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass) | 475 | int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) |
476 | { | 476 | { |
477 | struct pci_bus *child; | 477 | struct pci_bus *child; |
478 | int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); | 478 | int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); |
@@ -1008,7 +1008,7 @@ int pci_scan_slot(struct pci_bus *bus, int devfn) | |||
1008 | return nr; | 1008 | return nr; |
1009 | } | 1009 | } |
1010 | 1010 | ||
1011 | unsigned int pci_scan_child_bus(struct pci_bus *bus) | 1011 | unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus) |
1012 | { | 1012 | { |
1013 | unsigned int devfn, pass, max = bus->secondary; | 1013 | unsigned int devfn, pass, max = bus->secondary; |
1014 | struct pci_dev *dev; | 1014 | struct pci_dev *dev; |
@@ -1116,7 +1116,7 @@ err_out: | |||
1116 | return NULL; | 1116 | return NULL; |
1117 | } | 1117 | } |
1118 | 1118 | ||
1119 | struct pci_bus *pci_scan_bus_parented(struct device *parent, | 1119 | struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent, |
1120 | int bus, struct pci_ops *ops, void *sysdata) | 1120 | int bus, struct pci_ops *ops, void *sysdata) |
1121 | { | 1121 | { |
1122 | struct pci_bus *b; | 1122 | struct pci_bus *b; |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index bbad4a9f264f..e9a333d98552 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -1652,9 +1652,8 @@ static void __devinit quirk_via_cx700_pci_parking_caching(struct pci_dev *dev) | |||
1652 | pci_write_config_byte(dev, 0x75, 0x1); | 1652 | pci_write_config_byte(dev, 0x75, 0x1); |
1653 | pci_write_config_byte(dev, 0x77, 0x0); | 1653 | pci_write_config_byte(dev, 0x77, 0x0); |
1654 | 1654 | ||
1655 | printk(KERN_INFO | 1655 | dev_info(&dev->dev, |
1656 | "PCI: VIA CX700 PCI parking/caching fixup on %s\n", | 1656 | "Disabling VIA CX700 PCI parking/caching\n"); |
1657 | pci_name(dev)); | ||
1658 | } | 1657 | } |
1659 | } | 1658 | } |
1660 | } | 1659 | } |
@@ -1726,32 +1725,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2 | |||
1726 | quirk_msi_ht_cap); | 1725 | quirk_msi_ht_cap); |
1727 | 1726 | ||
1728 | 1727 | ||
1729 | /* | ||
1730 | * Force enable MSI mapping capability on HT bridges | ||
1731 | */ | ||
1732 | static void __devinit quirk_msi_ht_cap_enable(struct pci_dev *dev) | ||
1733 | { | ||
1734 | int pos, ttl = 48; | ||
1735 | |||
1736 | pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING); | ||
1737 | while (pos && ttl--) { | ||
1738 | u8 flags; | ||
1739 | |||
1740 | if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, &flags) == 0) { | ||
1741 | printk(KERN_INFO "PCI: Enabling HT MSI Mapping on %s\n", | ||
1742 | pci_name(dev)); | ||
1743 | |||
1744 | pci_write_config_byte(dev, pos + HT_MSI_FLAGS, | ||
1745 | flags | HT_MSI_FLAGS_ENABLE); | ||
1746 | } | ||
1747 | pos = pci_find_next_ht_capability(dev, pos, | ||
1748 | HT_CAPTYPE_MSI_MAPPING); | ||
1749 | } | ||
1750 | } | ||
1751 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, | ||
1752 | PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB, | ||
1753 | quirk_msi_ht_cap_enable); | ||
1754 | |||
1755 | /* The nVidia CK804 chipset may have 2 HT MSI mappings. | 1728 | /* The nVidia CK804 chipset may have 2 HT MSI mappings. |
1756 | * MSI are supported if the MSI capability set in any of these mappings. | 1729 | * MSI are supported if the MSI capability set in any of these mappings. |
1757 | */ | 1730 | */ |
@@ -1778,9 +1751,8 @@ static void __devinit quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev) | |||
1778 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, | 1751 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, |
1779 | quirk_nvidia_ck804_msi_ht_cap); | 1752 | quirk_nvidia_ck804_msi_ht_cap); |
1780 | 1753 | ||
1781 | /* | 1754 | /* Force enable MSI mapping capability on HT bridges */ |
1782 | * Force enable MSI mapping capability on HT bridges */ | 1755 | static void __devinit ht_enable_msi_mapping(struct pci_dev *dev) |
1783 | static inline void ht_enable_msi_mapping(struct pci_dev *dev) | ||
1784 | { | 1756 | { |
1785 | int pos, ttl = 48; | 1757 | int pos, ttl = 48; |
1786 | 1758 | ||
@@ -1799,6 +1771,9 @@ static inline void ht_enable_msi_mapping(struct pci_dev *dev) | |||
1799 | HT_CAPTYPE_MSI_MAPPING); | 1771 | HT_CAPTYPE_MSI_MAPPING); |
1800 | } | 1772 | } |
1801 | } | 1773 | } |
1774 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, | ||
1775 | PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB, | ||
1776 | ht_enable_msi_mapping); | ||
1802 | 1777 | ||
1803 | static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev) | 1778 | static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev) |
1804 | { | 1779 | { |
@@ -1830,7 +1805,7 @@ static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev) | |||
1830 | 1805 | ||
1831 | if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, | 1806 | if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, |
1832 | &flags) == 0) { | 1807 | &flags) == 0) { |
1833 | dev_info(&dev->dev, "Quirk disabling HT MSI mapping"); | 1808 | dev_info(&dev->dev, "Disabling HT MSI mapping"); |
1834 | pci_write_config_byte(dev, pos + HT_MSI_FLAGS, | 1809 | pci_write_config_byte(dev, pos + HT_MSI_FLAGS, |
1835 | flags & ~HT_MSI_FLAGS_ENABLE); | 1810 | flags & ~HT_MSI_FLAGS_ENABLE); |
1836 | } | 1811 | } |
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c index a98b2470b9ea..bd5c0e031398 100644 --- a/drivers/pci/rom.c +++ b/drivers/pci/rom.c | |||
@@ -242,8 +242,7 @@ void pci_remove_rom(struct pci_dev *pdev) | |||
242 | #endif /* 0 */ | 242 | #endif /* 0 */ |
243 | 243 | ||
244 | /** | 244 | /** |
245 | * pci_cleanup_rom - internal routine for freeing the ROM copy created | 245 | * pci_cleanup_rom - free the ROM copy created by pci_map_rom_copy |
246 | * by pci_map_rom_copy called from remove.c | ||
247 | * @pdev: pointer to pci device struct | 246 | * @pdev: pointer to pci device struct |
248 | * | 247 | * |
249 | * Free the copied ROM if we allocated one. | 248 | * Free the copied ROM if we allocated one. |
diff --git a/drivers/rapidio/rio-driver.c b/drivers/rapidio/rio-driver.c index 5480119ff9d3..3ce9f3defc12 100644 --- a/drivers/rapidio/rio-driver.c +++ b/drivers/rapidio/rio-driver.c | |||
@@ -78,8 +78,7 @@ void rio_dev_put(struct rio_dev *rdev) | |||
78 | } | 78 | } |
79 | 79 | ||
80 | /** | 80 | /** |
81 | * rio_device_probe - Tell if a RIO device structure has a matching RIO | 81 | * rio_device_probe - Tell if a RIO device structure has a matching RIO device id structure |
82 | * device id structure | ||
83 | * @id: the RIO device id structure to match against | 82 | * @id: the RIO device id structure to match against |
84 | * @dev: the RIO device structure to match against | 83 | * @dev: the RIO device structure to match against |
85 | * | 84 | * |
@@ -137,7 +136,7 @@ static int rio_device_remove(struct device *dev) | |||
137 | * rio_register_driver - register a new RIO driver | 136 | * rio_register_driver - register a new RIO driver |
138 | * @rdrv: the RIO driver structure to register | 137 | * @rdrv: the RIO driver structure to register |
139 | * | 138 | * |
140 | * Adds a &struct rio_driver to the list of registered drivers | 139 | * Adds a &struct rio_driver to the list of registered drivers. |
141 | * Returns a negative value on error, otherwise 0. If no error | 140 | * Returns a negative value on error, otherwise 0. If no error |
142 | * occurred, the driver remains registered even if no device | 141 | * occurred, the driver remains registered even if no device |
143 | * was claimed during registration. | 142 | * was claimed during registration. |
@@ -167,8 +166,7 @@ void rio_unregister_driver(struct rio_driver *rdrv) | |||
167 | } | 166 | } |
168 | 167 | ||
169 | /** | 168 | /** |
170 | * rio_match_bus - Tell if a RIO device structure has a matching RIO | 169 | * rio_match_bus - Tell if a RIO device structure has a matching RIO driver device id structure |
171 | * driver device id structure | ||
172 | * @dev: the standard device structure to match against | 170 | * @dev: the standard device structure to match against |
173 | * @drv: the standard driver structure containing the ids to match against | 171 | * @drv: the standard driver structure containing the ids to match against |
174 | * | 172 | * |
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 6402d699072b..82f5ad9c3af4 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig | |||
@@ -250,6 +250,15 @@ config RTC_DRV_TWL92330 | |||
250 | platforms. The support is integrated with the rest of | 250 | platforms. The support is integrated with the rest of |
251 | the Menelaus driver; it's not separate module. | 251 | the Menelaus driver; it's not separate module. |
252 | 252 | ||
253 | config RTC_DRV_S35390A | ||
254 | tristate "Seiko Instruments S-35390A" | ||
255 | help | ||
256 | If you say yes here you will get support for the Seiko | ||
257 | Instruments S-35390A. | ||
258 | |||
259 | This driver can also be built as a module. If so the module | ||
260 | will be called rtc-s35390a. | ||
261 | |||
253 | endif # I2C | 262 | endif # I2C |
254 | 263 | ||
255 | comment "SPI RTC drivers" | 264 | comment "SPI RTC drivers" |
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index ec703f34ab86..872f1218ff9f 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile | |||
@@ -45,6 +45,7 @@ obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o | |||
45 | obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o | 45 | obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o |
46 | obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o | 46 | obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o |
47 | obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o | 47 | obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o |
48 | obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o | ||
48 | obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o | 49 | obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o |
49 | obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o | 50 | obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o |
50 | obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o | 51 | obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o |
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c new file mode 100644 index 000000000000..e8abc90c32c5 --- /dev/null +++ b/drivers/rtc/rtc-s35390a.c | |||
@@ -0,0 +1,316 @@ | |||
1 | /* | ||
2 | * Seiko Instruments S-35390A RTC Driver | ||
3 | * | ||
4 | * Copyright (c) 2007 Byron Bradley | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/rtc.h> | ||
14 | #include <linux/i2c.h> | ||
15 | #include <linux/bitrev.h> | ||
16 | #include <linux/bcd.h> | ||
17 | #include <linux/slab.h> | ||
18 | |||
19 | #define S35390A_CMD_STATUS1 0 | ||
20 | #define S35390A_CMD_STATUS2 1 | ||
21 | #define S35390A_CMD_TIME1 2 | ||
22 | |||
23 | #define S35390A_BYTE_YEAR 0 | ||
24 | #define S35390A_BYTE_MONTH 1 | ||
25 | #define S35390A_BYTE_DAY 2 | ||
26 | #define S35390A_BYTE_WDAY 3 | ||
27 | #define S35390A_BYTE_HOURS 4 | ||
28 | #define S35390A_BYTE_MINS 5 | ||
29 | #define S35390A_BYTE_SECS 6 | ||
30 | |||
31 | #define S35390A_FLAG_POC 0x01 | ||
32 | #define S35390A_FLAG_BLD 0x02 | ||
33 | #define S35390A_FLAG_24H 0x40 | ||
34 | #define S35390A_FLAG_RESET 0x80 | ||
35 | #define S35390A_FLAG_TEST 0x01 | ||
36 | |||
37 | struct s35390a { | ||
38 | struct i2c_client *client[8]; | ||
39 | struct rtc_device *rtc; | ||
40 | int twentyfourhour; | ||
41 | }; | ||
42 | |||
43 | static int s35390a_set_reg(struct s35390a *s35390a, int reg, char *buf, int len) | ||
44 | { | ||
45 | struct i2c_client *client = s35390a->client[reg]; | ||
46 | struct i2c_msg msg[] = { | ||
47 | { client->addr, 0, len, buf }, | ||
48 | }; | ||
49 | |||
50 | if ((i2c_transfer(client->adapter, msg, 1)) != 1) | ||
51 | return -EIO; | ||
52 | |||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | static int s35390a_get_reg(struct s35390a *s35390a, int reg, char *buf, int len) | ||
57 | { | ||
58 | struct i2c_client *client = s35390a->client[reg]; | ||
59 | struct i2c_msg msg[] = { | ||
60 | { client->addr, I2C_M_RD, len, buf }, | ||
61 | }; | ||
62 | |||
63 | if ((i2c_transfer(client->adapter, msg, 1)) != 1) | ||
64 | return -EIO; | ||
65 | |||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static int s35390a_reset(struct s35390a *s35390a) | ||
70 | { | ||
71 | char buf[1]; | ||
72 | |||
73 | if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)) < 0) | ||
74 | return -EIO; | ||
75 | |||
76 | if (!(buf[0] & (S35390A_FLAG_POC | S35390A_FLAG_BLD))) | ||
77 | return 0; | ||
78 | |||
79 | buf[0] |= (S35390A_FLAG_RESET | S35390A_FLAG_24H); | ||
80 | buf[0] &= 0xf0; | ||
81 | return s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)); | ||
82 | } | ||
83 | |||
84 | static int s35390a_disable_test_mode(struct s35390a *s35390a) | ||
85 | { | ||
86 | char buf[1]; | ||
87 | |||
88 | if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS2, buf, sizeof(buf)) < 0) | ||
89 | return -EIO; | ||
90 | |||
91 | if (!(buf[0] & S35390A_FLAG_TEST)) | ||
92 | return 0; | ||
93 | |||
94 | buf[0] &= ~S35390A_FLAG_TEST; | ||
95 | return s35390a_set_reg(s35390a, S35390A_CMD_STATUS2, buf, sizeof(buf)); | ||
96 | } | ||
97 | |||
98 | static char s35390a_hr2reg(struct s35390a *s35390a, int hour) | ||
99 | { | ||
100 | if (s35390a->twentyfourhour) | ||
101 | return BIN2BCD(hour); | ||
102 | |||
103 | if (hour < 12) | ||
104 | return BIN2BCD(hour); | ||
105 | |||
106 | return 0x40 | BIN2BCD(hour - 12); | ||
107 | } | ||
108 | |||
109 | static int s35390a_reg2hr(struct s35390a *s35390a, char reg) | ||
110 | { | ||
111 | unsigned hour; | ||
112 | |||
113 | if (s35390a->twentyfourhour) | ||
114 | return BCD2BIN(reg & 0x3f); | ||
115 | |||
116 | hour = BCD2BIN(reg & 0x3f); | ||
117 | if (reg & 0x40) | ||
118 | hour += 12; | ||
119 | |||
120 | return hour; | ||
121 | } | ||
122 | |||
123 | static int s35390a_set_datetime(struct i2c_client *client, struct rtc_time *tm) | ||
124 | { | ||
125 | struct s35390a *s35390a = i2c_get_clientdata(client); | ||
126 | int i, err; | ||
127 | char buf[7]; | ||
128 | |||
129 | dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d mday=%d, " | ||
130 | "mon=%d, year=%d, wday=%d\n", __func__, tm->tm_sec, | ||
131 | tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon, tm->tm_year, | ||
132 | tm->tm_wday); | ||
133 | |||
134 | buf[S35390A_BYTE_YEAR] = BIN2BCD(tm->tm_year - 100); | ||
135 | buf[S35390A_BYTE_MONTH] = BIN2BCD(tm->tm_mon + 1); | ||
136 | buf[S35390A_BYTE_DAY] = BIN2BCD(tm->tm_mday); | ||
137 | buf[S35390A_BYTE_WDAY] = BIN2BCD(tm->tm_wday); | ||
138 | buf[S35390A_BYTE_HOURS] = s35390a_hr2reg(s35390a, tm->tm_hour); | ||
139 | buf[S35390A_BYTE_MINS] = BIN2BCD(tm->tm_min); | ||
140 | buf[S35390A_BYTE_SECS] = BIN2BCD(tm->tm_sec); | ||
141 | |||
142 | /* This chip expects the bits of each byte to be in reverse order */ | ||
143 | for (i = 0; i < 7; ++i) | ||
144 | buf[i] = bitrev8(buf[i]); | ||
145 | |||
146 | err = s35390a_set_reg(s35390a, S35390A_CMD_TIME1, buf, sizeof(buf)); | ||
147 | |||
148 | return err; | ||
149 | } | ||
150 | |||
151 | static int s35390a_get_datetime(struct i2c_client *client, struct rtc_time *tm) | ||
152 | { | ||
153 | struct s35390a *s35390a = i2c_get_clientdata(client); | ||
154 | char buf[7]; | ||
155 | int i, err; | ||
156 | |||
157 | err = s35390a_get_reg(s35390a, S35390A_CMD_TIME1, buf, sizeof(buf)); | ||
158 | if (err < 0) | ||
159 | return err; | ||
160 | |||
161 | /* This chip returns the bits of each byte in reverse order */ | ||
162 | for (i = 0; i < 7; ++i) | ||
163 | buf[i] = bitrev8(buf[i]); | ||
164 | |||
165 | tm->tm_sec = BCD2BIN(buf[S35390A_BYTE_SECS]); | ||
166 | tm->tm_min = BCD2BIN(buf[S35390A_BYTE_MINS]); | ||
167 | tm->tm_hour = s35390a_reg2hr(s35390a, buf[S35390A_BYTE_HOURS]); | ||
168 | tm->tm_wday = BCD2BIN(buf[S35390A_BYTE_WDAY]); | ||
169 | tm->tm_mday = BCD2BIN(buf[S35390A_BYTE_DAY]); | ||
170 | tm->tm_mon = BCD2BIN(buf[S35390A_BYTE_MONTH]) - 1; | ||
171 | tm->tm_year = BCD2BIN(buf[S35390A_BYTE_YEAR]) + 100; | ||
172 | |||
173 | dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, mday=%d, " | ||
174 | "mon=%d, year=%d, wday=%d\n", __func__, tm->tm_sec, | ||
175 | tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon, tm->tm_year, | ||
176 | tm->tm_wday); | ||
177 | |||
178 | return rtc_valid_tm(tm); | ||
179 | } | ||
180 | |||
181 | static int s35390a_rtc_read_time(struct device *dev, struct rtc_time *tm) | ||
182 | { | ||
183 | return s35390a_get_datetime(to_i2c_client(dev), tm); | ||
184 | } | ||
185 | |||
186 | static int s35390a_rtc_set_time(struct device *dev, struct rtc_time *tm) | ||
187 | { | ||
188 | return s35390a_set_datetime(to_i2c_client(dev), tm); | ||
189 | } | ||
190 | |||
191 | static const struct rtc_class_ops s35390a_rtc_ops = { | ||
192 | .read_time = s35390a_rtc_read_time, | ||
193 | .set_time = s35390a_rtc_set_time, | ||
194 | }; | ||
195 | |||
196 | static struct i2c_driver s35390a_driver; | ||
197 | |||
198 | static int s35390a_probe(struct i2c_client *client) | ||
199 | { | ||
200 | int err; | ||
201 | unsigned int i; | ||
202 | struct s35390a *s35390a; | ||
203 | struct rtc_time tm; | ||
204 | char buf[1]; | ||
205 | |||
206 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { | ||
207 | err = -ENODEV; | ||
208 | goto exit; | ||
209 | } | ||
210 | |||
211 | s35390a = kzalloc(sizeof(struct s35390a), GFP_KERNEL); | ||
212 | if (!s35390a) { | ||
213 | err = -ENOMEM; | ||
214 | goto exit; | ||
215 | } | ||
216 | |||
217 | s35390a->client[0] = client; | ||
218 | i2c_set_clientdata(client, s35390a); | ||
219 | |||
220 | /* This chip uses multiple addresses, use dummy devices for them */ | ||
221 | for (i = 1; i < 8; ++i) { | ||
222 | s35390a->client[i] = i2c_new_dummy(client->adapter, | ||
223 | client->addr + i, "rtc-s35390a"); | ||
224 | if (!s35390a->client[i]) { | ||
225 | dev_err(&client->dev, "Address %02x unavailable\n", | ||
226 | client->addr + i); | ||
227 | err = -EBUSY; | ||
228 | goto exit_dummy; | ||
229 | } | ||
230 | } | ||
231 | |||
232 | err = s35390a_reset(s35390a); | ||
233 | if (err < 0) { | ||
234 | dev_err(&client->dev, "error resetting chip\n"); | ||
235 | goto exit_dummy; | ||
236 | } | ||
237 | |||
238 | err = s35390a_disable_test_mode(s35390a); | ||
239 | if (err < 0) { | ||
240 | dev_err(&client->dev, "error disabling test mode\n"); | ||
241 | goto exit_dummy; | ||
242 | } | ||
243 | |||
244 | err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)); | ||
245 | if (err < 0) { | ||
246 | dev_err(&client->dev, "error checking 12/24 hour mode\n"); | ||
247 | goto exit_dummy; | ||
248 | } | ||
249 | if (buf[0] & S35390A_FLAG_24H) | ||
250 | s35390a->twentyfourhour = 1; | ||
251 | else | ||
252 | s35390a->twentyfourhour = 0; | ||
253 | |||
254 | if (s35390a_get_datetime(client, &tm) < 0) | ||
255 | dev_warn(&client->dev, "clock needs to be set\n"); | ||
256 | |||
257 | s35390a->rtc = rtc_device_register(s35390a_driver.driver.name, | ||
258 | &client->dev, &s35390a_rtc_ops, THIS_MODULE); | ||
259 | |||
260 | if (IS_ERR(s35390a->rtc)) { | ||
261 | err = PTR_ERR(s35390a->rtc); | ||
262 | goto exit_dummy; | ||
263 | } | ||
264 | return 0; | ||
265 | |||
266 | exit_dummy: | ||
267 | for (i = 1; i < 8; ++i) | ||
268 | if (s35390a->client[i]) | ||
269 | i2c_unregister_device(s35390a->client[i]); | ||
270 | kfree(s35390a); | ||
271 | i2c_set_clientdata(client, NULL); | ||
272 | |||
273 | exit: | ||
274 | return err; | ||
275 | } | ||
276 | |||
277 | static int s35390a_remove(struct i2c_client *client) | ||
278 | { | ||
279 | unsigned int i; | ||
280 | |||
281 | struct s35390a *s35390a = i2c_get_clientdata(client); | ||
282 | for (i = 1; i < 8; ++i) | ||
283 | if (s35390a->client[i]) | ||
284 | i2c_unregister_device(s35390a->client[i]); | ||
285 | |||
286 | rtc_device_unregister(s35390a->rtc); | ||
287 | kfree(s35390a); | ||
288 | i2c_set_clientdata(client, NULL); | ||
289 | |||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | static struct i2c_driver s35390a_driver = { | ||
294 | .driver = { | ||
295 | .name = "rtc-s35390a", | ||
296 | }, | ||
297 | .probe = s35390a_probe, | ||
298 | .remove = s35390a_remove, | ||
299 | }; | ||
300 | |||
301 | static int __init s35390a_rtc_init(void) | ||
302 | { | ||
303 | return i2c_add_driver(&s35390a_driver); | ||
304 | } | ||
305 | |||
306 | static void __exit s35390a_rtc_exit(void) | ||
307 | { | ||
308 | i2c_del_driver(&s35390a_driver); | ||
309 | } | ||
310 | |||
311 | MODULE_AUTHOR("Byron Bradley <byron.bbradley@gmail.com>"); | ||
312 | MODULE_DESCRIPTION("S35390A RTC driver"); | ||
313 | MODULE_LICENSE("GPL"); | ||
314 | |||
315 | module_init(s35390a_rtc_init); | ||
316 | module_exit(s35390a_rtc_exit); | ||
diff --git a/drivers/s390/char/defkeymap.c b/drivers/s390/char/defkeymap.c index 389346cda6c8..07c7f31081bc 100644 --- a/drivers/s390/char/defkeymap.c +++ b/drivers/s390/char/defkeymap.c | |||
@@ -151,8 +151,8 @@ char *func_table[MAX_NR_FUNC] = { | |||
151 | }; | 151 | }; |
152 | 152 | ||
153 | struct kbdiacruc accent_table[MAX_DIACR] = { | 153 | struct kbdiacruc accent_table[MAX_DIACR] = { |
154 | {'^', 'c', '\003'}, {'^', 'd', '\004'}, | 154 | {'^', 'c', 0003}, {'^', 'd', 0004}, |
155 | {'^', 'z', '\032'}, {'^', '\012', '\000'}, | 155 | {'^', 'z', 0032}, {'^', 0012, 0000}, |
156 | }; | 156 | }; |
157 | 157 | ||
158 | unsigned int accent_table_size = 4; | 158 | unsigned int accent_table_size = 4; |
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index fecba05b4e77..e5c6f6af8765 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
@@ -757,7 +757,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd) | |||
757 | "Notifying upper driver of completion " | 757 | "Notifying upper driver of completion " |
758 | "(result %x)\n", cmd->result)); | 758 | "(result %x)\n", cmd->result)); |
759 | 759 | ||
760 | good_bytes = scsi_bufflen(cmd); | 760 | good_bytes = scsi_bufflen(cmd) + cmd->request->extra_len; |
761 | if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { | 761 | if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { |
762 | drv = scsi_cmd_to_driver(cmd); | 762 | drv = scsi_cmd_to_driver(cmd); |
763 | if (drv->done) | 763 | if (drv->done) |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 1dc165ad17fb..e67c14e31bab 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
@@ -1577,8 +1577,7 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel, | |||
1577 | } | 1577 | } |
1578 | 1578 | ||
1579 | /** | 1579 | /** |
1580 | * scsi_scan_target - scan a target id, possibly including all LUNs on the | 1580 | * scsi_scan_target - scan a target id, possibly including all LUNs on the target. |
1581 | * target. | ||
1582 | * @parent: host to scan | 1581 | * @parent: host to scan |
1583 | * @channel: channel to scan | 1582 | * @channel: channel to scan |
1584 | * @id: target id to scan | 1583 | * @id: target id to scan |
diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c index 6f09cbd7fc48..97c68d021d28 100644 --- a/drivers/serial/8250_pnp.c +++ b/drivers/serial/8250_pnp.c | |||
@@ -91,6 +91,8 @@ static const struct pnp_device_id pnp_dev_table[] = { | |||
91 | /* Archtek America Corp. */ | 91 | /* Archtek America Corp. */ |
92 | /* Archtek SmartLink Modem 3334BT Plug & Play */ | 92 | /* Archtek SmartLink Modem 3334BT Plug & Play */ |
93 | { "GVC000F", 0 }, | 93 | { "GVC000F", 0 }, |
94 | /* Archtek SmartLink Modem 3334BRV 33.6K Data Fax Voice */ | ||
95 | { "GVC0303", 0 }, | ||
94 | /* Hayes */ | 96 | /* Hayes */ |
95 | /* Hayes Optima 288 V.34-V.FC + FAX + Voice Plug & Play */ | 97 | /* Hayes Optima 288 V.34-V.FC + FAX + Voice Plug & Play */ |
96 | { "HAY0001", 0 }, | 98 | { "HAY0001", 0 }, |
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index b82595cf13e8..cf627cd1b4c8 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig | |||
@@ -686,7 +686,7 @@ config UART0_RTS_PIN | |||
686 | 686 | ||
687 | config SERIAL_BFIN_UART1 | 687 | config SERIAL_BFIN_UART1 |
688 | bool "Enable UART1" | 688 | bool "Enable UART1" |
689 | depends on SERIAL_BFIN && (BF534 || BF536 || BF537 || BF54x) | 689 | depends on SERIAL_BFIN && (!BF531 && !BF532 && !BF533 && !BF561) |
690 | help | 690 | help |
691 | Enable UART1 | 691 | Enable UART1 |
692 | 692 | ||
@@ -699,14 +699,14 @@ config BFIN_UART1_CTSRTS | |||
699 | 699 | ||
700 | config UART1_CTS_PIN | 700 | config UART1_CTS_PIN |
701 | int "UART1 CTS pin" | 701 | int "UART1 CTS pin" |
702 | depends on BFIN_UART1_CTSRTS && (BF53x || BF561) | 702 | depends on BFIN_UART1_CTSRTS && !BF54x |
703 | default -1 | 703 | default -1 |
704 | help | 704 | help |
705 | Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. | 705 | Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. |
706 | 706 | ||
707 | config UART1_RTS_PIN | 707 | config UART1_RTS_PIN |
708 | int "UART1 RTS pin" | 708 | int "UART1 RTS pin" |
709 | depends on BFIN_UART1_CTSRTS && (BF53x || BF561) | 709 | depends on BFIN_UART1_CTSRTS && !BF54x |
710 | default -1 | 710 | default -1 |
711 | help | 711 | help |
712 | Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. | 712 | Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. |
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c index ac2a3ef28d55..0aa345b9a38b 100644 --- a/drivers/serial/bfin_5xx.c +++ b/drivers/serial/bfin_5xx.c | |||
@@ -1,30 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * File: drivers/serial/bfin_5xx.c | 2 | * Blackfin On-Chip Serial Driver |
3 | * Based on: Based on drivers/serial/sa1100.c | ||
4 | * Author: Aubrey Li <aubrey.li@analog.com> | ||
5 | * | 3 | * |
6 | * Created: | 4 | * Copyright 2006-2007 Analog Devices Inc. |
7 | * Description: Driver for blackfin 5xx serial ports | ||
8 | * | 5 | * |
9 | * Modified: | 6 | * Enter bugs at http://blackfin.uclinux.org/ |
10 | * Copyright 2006 Analog Devices Inc. | ||
11 | * | 7 | * |
12 | * Bugs: Enter bugs at http://blackfin.uclinux.org/ | 8 | * Licensed under the GPL-2 or later. |
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2 of the License, or | ||
17 | * (at your option) any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | * GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with this program; if not, see the file COPYING, or write | ||
26 | * to the Free Software Foundation, Inc., | ||
27 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
28 | */ | 9 | */ |
29 | 10 | ||
30 | #if defined(CONFIG_SERIAL_BFIN_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) | 11 | #if defined(CONFIG_SERIAL_BFIN_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) |
@@ -67,14 +48,12 @@ | |||
67 | #define DMA_RX_XCOUNT 512 | 48 | #define DMA_RX_XCOUNT 512 |
68 | #define DMA_RX_YCOUNT (PAGE_SIZE / DMA_RX_XCOUNT) | 49 | #define DMA_RX_YCOUNT (PAGE_SIZE / DMA_RX_XCOUNT) |
69 | 50 | ||
70 | #define DMA_RX_FLUSH_JIFFIES 5 | 51 | #define DMA_RX_FLUSH_JIFFIES (HZ / 50) |
71 | 52 | ||
72 | #ifdef CONFIG_SERIAL_BFIN_DMA | 53 | #ifdef CONFIG_SERIAL_BFIN_DMA |
73 | static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart); | 54 | static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart); |
74 | #else | 55 | #else |
75 | static void bfin_serial_do_work(struct work_struct *work); | ||
76 | static void bfin_serial_tx_chars(struct bfin_serial_port *uart); | 56 | static void bfin_serial_tx_chars(struct bfin_serial_port *uart); |
77 | static void local_put_char(struct bfin_serial_port *uart, char ch); | ||
78 | #endif | 57 | #endif |
79 | 58 | ||
80 | static void bfin_serial_mctrl_check(struct bfin_serial_port *uart); | 59 | static void bfin_serial_mctrl_check(struct bfin_serial_port *uart); |
@@ -85,23 +64,26 @@ static void bfin_serial_mctrl_check(struct bfin_serial_port *uart); | |||
85 | static void bfin_serial_stop_tx(struct uart_port *port) | 64 | static void bfin_serial_stop_tx(struct uart_port *port) |
86 | { | 65 | { |
87 | struct bfin_serial_port *uart = (struct bfin_serial_port *)port; | 66 | struct bfin_serial_port *uart = (struct bfin_serial_port *)port; |
67 | struct circ_buf *xmit = &uart->port.info->xmit; | ||
68 | #if !defined(CONFIG_BF54x) && !defined(CONFIG_SERIAL_BFIN_DMA) | ||
69 | unsigned short ier; | ||
70 | #endif | ||
88 | 71 | ||
89 | while (!(UART_GET_LSR(uart) & TEMT)) | 72 | while (!(UART_GET_LSR(uart) & TEMT)) |
90 | continue; | 73 | cpu_relax(); |
91 | 74 | ||
92 | #ifdef CONFIG_SERIAL_BFIN_DMA | 75 | #ifdef CONFIG_SERIAL_BFIN_DMA |
93 | disable_dma(uart->tx_dma_channel); | 76 | disable_dma(uart->tx_dma_channel); |
77 | xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1); | ||
78 | uart->port.icount.tx += uart->tx_count; | ||
79 | uart->tx_count = 0; | ||
80 | uart->tx_done = 1; | ||
94 | #else | 81 | #else |
95 | #ifdef CONFIG_BF54x | 82 | #ifdef CONFIG_BF54x |
96 | /* Waiting for Transmission Finished */ | ||
97 | while (!(UART_GET_LSR(uart) & TFI)) | ||
98 | continue; | ||
99 | /* Clear TFI bit */ | 83 | /* Clear TFI bit */ |
100 | UART_PUT_LSR(uart, TFI); | 84 | UART_PUT_LSR(uart, TFI); |
101 | UART_CLEAR_IER(uart, ETBEI); | 85 | UART_CLEAR_IER(uart, ETBEI); |
102 | #else | 86 | #else |
103 | unsigned short ier; | ||
104 | |||
105 | ier = UART_GET_IER(uart); | 87 | ier = UART_GET_IER(uart); |
106 | ier &= ~ETBEI; | 88 | ier &= ~ETBEI; |
107 | UART_PUT_IER(uart, ier); | 89 | UART_PUT_IER(uart, ier); |
@@ -117,7 +99,8 @@ static void bfin_serial_start_tx(struct uart_port *port) | |||
117 | struct bfin_serial_port *uart = (struct bfin_serial_port *)port; | 99 | struct bfin_serial_port *uart = (struct bfin_serial_port *)port; |
118 | 100 | ||
119 | #ifdef CONFIG_SERIAL_BFIN_DMA | 101 | #ifdef CONFIG_SERIAL_BFIN_DMA |
120 | bfin_serial_dma_tx_chars(uart); | 102 | if (uart->tx_done) |
103 | bfin_serial_dma_tx_chars(uart); | ||
121 | #else | 104 | #else |
122 | #ifdef CONFIG_BF54x | 105 | #ifdef CONFIG_BF54x |
123 | UART_SET_IER(uart, ETBEI); | 106 | UART_SET_IER(uart, ETBEI); |
@@ -209,34 +192,27 @@ int kgdb_get_debug_char(void) | |||
209 | } | 192 | } |
210 | #endif | 193 | #endif |
211 | 194 | ||
212 | #ifdef CONFIG_SERIAL_BFIN_PIO | 195 | #if ANOMALY_05000230 && defined(CONFIG_SERIAL_BFIN_PIO) |
213 | static void local_put_char(struct bfin_serial_port *uart, char ch) | 196 | # define UART_GET_ANOMALY_THRESHOLD(uart) ((uart)->anomaly_threshold) |
214 | { | 197 | # define UART_SET_ANOMALY_THRESHOLD(uart, v) ((uart)->anomaly_threshold = (v)) |
215 | unsigned short status; | 198 | #else |
216 | int flags = 0; | 199 | # define UART_GET_ANOMALY_THRESHOLD(uart) 0 |
217 | 200 | # define UART_SET_ANOMALY_THRESHOLD(uart, v) | |
218 | spin_lock_irqsave(&uart->port.lock, flags); | 201 | #endif |
219 | |||
220 | do { | ||
221 | status = UART_GET_LSR(uart); | ||
222 | } while (!(status & THRE)); | ||
223 | |||
224 | UART_PUT_CHAR(uart, ch); | ||
225 | SSYNC(); | ||
226 | |||
227 | spin_unlock_irqrestore(&uart->port.lock, flags); | ||
228 | } | ||
229 | 202 | ||
203 | #ifdef CONFIG_SERIAL_BFIN_PIO | ||
230 | static void bfin_serial_rx_chars(struct bfin_serial_port *uart) | 204 | static void bfin_serial_rx_chars(struct bfin_serial_port *uart) |
231 | { | 205 | { |
232 | struct tty_struct *tty = uart->port.info->tty; | 206 | struct tty_struct *tty = uart->port.info->tty; |
233 | unsigned int status, ch, flg; | 207 | unsigned int status, ch, flg; |
234 | static int in_break = 0; | 208 | static struct timeval anomaly_start = { .tv_sec = 0 }; |
235 | #ifdef CONFIG_KGDB_UART | 209 | #ifdef CONFIG_KGDB_UART |
236 | struct pt_regs *regs = get_irq_regs(); | 210 | struct pt_regs *regs = get_irq_regs(); |
237 | #endif | 211 | #endif |
238 | 212 | ||
239 | status = UART_GET_LSR(uart); | 213 | status = UART_GET_LSR(uart); |
214 | UART_CLEAR_LSR(uart); | ||
215 | |||
240 | ch = UART_GET_CHAR(uart); | 216 | ch = UART_GET_CHAR(uart); |
241 | uart->port.icount.rx++; | 217 | uart->port.icount.rx++; |
242 | 218 | ||
@@ -262,28 +238,56 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart) | |||
262 | #endif | 238 | #endif |
263 | 239 | ||
264 | if (ANOMALY_05000230) { | 240 | if (ANOMALY_05000230) { |
265 | /* The BF533 family of processors have a nice misbehavior where | 241 | /* The BF533 (and BF561) family of processors have a nice anomaly |
266 | * they continuously generate characters for a "single" break. | 242 | * where they continuously generate characters for a "single" break. |
267 | * We have to basically ignore this flood until the "next" valid | 243 | * We have to basically ignore this flood until the "next" valid |
268 | * character comes across. All other Blackfin families operate | 244 | * character comes across. Due to the nature of the flood, it is |
269 | * properly though. | 245 | * not possible to reliably catch bytes that are sent too quickly |
246 | * after this break. So application code talking to the Blackfin | ||
247 | * which sends a break signal must allow at least 1.5 character | ||
248 | * times after the end of the break for things to stabilize. This | ||
249 | * timeout was picked as it must absolutely be larger than 1 | ||
250 | * character time +/- some percent. So 1.5 sounds good. All other | ||
251 | * Blackfin families operate properly. Woo. | ||
270 | * Note: While Anomaly 05000230 does not directly address this, | 252 | * Note: While Anomaly 05000230 does not directly address this, |
271 | * the changes that went in for it also fixed this issue. | 253 | * the changes that went in for it also fixed this issue. |
254 | * That anomaly was fixed in 0.5+ silicon. I like bunnies. | ||
272 | */ | 255 | */ |
273 | if (in_break) { | 256 | if (anomaly_start.tv_sec) { |
274 | if (ch != 0) { | 257 | struct timeval curr; |
275 | in_break = 0; | 258 | suseconds_t usecs; |
276 | ch = UART_GET_CHAR(uart); | 259 | |
277 | if (bfin_revid() < 5) | 260 | if ((~ch & (~ch + 1)) & 0xff) |
278 | return; | 261 | goto known_good_char; |
279 | } else | 262 | |
280 | return; | 263 | do_gettimeofday(&curr); |
264 | if (curr.tv_sec - anomaly_start.tv_sec > 1) | ||
265 | goto known_good_char; | ||
266 | |||
267 | usecs = 0; | ||
268 | if (curr.tv_sec != anomaly_start.tv_sec) | ||
269 | usecs += USEC_PER_SEC; | ||
270 | usecs += curr.tv_usec - anomaly_start.tv_usec; | ||
271 | |||
272 | if (usecs > UART_GET_ANOMALY_THRESHOLD(uart)) | ||
273 | goto known_good_char; | ||
274 | |||
275 | if (ch) | ||
276 | anomaly_start.tv_sec = 0; | ||
277 | else | ||
278 | anomaly_start = curr; | ||
279 | |||
280 | return; | ||
281 | |||
282 | known_good_char: | ||
283 | anomaly_start.tv_sec = 0; | ||
281 | } | 284 | } |
282 | } | 285 | } |
283 | 286 | ||
284 | if (status & BI) { | 287 | if (status & BI) { |
285 | if (ANOMALY_05000230) | 288 | if (ANOMALY_05000230) |
286 | in_break = 1; | 289 | if (bfin_revid() < 5) |
290 | do_gettimeofday(&anomaly_start); | ||
287 | uart->port.icount.brk++; | 291 | uart->port.icount.brk++; |
288 | if (uart_handle_break(&uart->port)) | 292 | if (uart_handle_break(&uart->port)) |
289 | goto ignore_char; | 293 | goto ignore_char; |
@@ -324,7 +328,6 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart) | |||
324 | UART_PUT_CHAR(uart, uart->port.x_char); | 328 | UART_PUT_CHAR(uart, uart->port.x_char); |
325 | uart->port.icount.tx++; | 329 | uart->port.icount.tx++; |
326 | uart->port.x_char = 0; | 330 | uart->port.x_char = 0; |
327 | return; | ||
328 | } | 331 | } |
329 | /* | 332 | /* |
330 | * Check the modem control lines before | 333 | * Check the modem control lines before |
@@ -337,9 +340,12 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart) | |||
337 | return; | 340 | return; |
338 | } | 341 | } |
339 | 342 | ||
340 | local_put_char(uart, xmit->buf[xmit->tail]); | 343 | while ((UART_GET_LSR(uart) & THRE) && xmit->tail != xmit->head) { |
341 | xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); | 344 | UART_PUT_CHAR(uart, xmit->buf[xmit->tail]); |
342 | uart->port.icount.tx++; | 345 | xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); |
346 | uart->port.icount.tx++; | ||
347 | SSYNC(); | ||
348 | } | ||
343 | 349 | ||
344 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | 350 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) |
345 | uart_write_wakeup(&uart->port); | 351 | uart_write_wakeup(&uart->port); |
@@ -352,21 +358,11 @@ static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id) | |||
352 | { | 358 | { |
353 | struct bfin_serial_port *uart = dev_id; | 359 | struct bfin_serial_port *uart = dev_id; |
354 | 360 | ||
355 | #ifdef CONFIG_BF54x | ||
356 | unsigned short status; | ||
357 | spin_lock(&uart->port.lock); | ||
358 | status = UART_GET_LSR(uart); | ||
359 | while ((UART_GET_IER(uart) & ERBFI) && (status & DR)) { | ||
360 | bfin_serial_rx_chars(uart); | ||
361 | status = UART_GET_LSR(uart); | ||
362 | } | ||
363 | spin_unlock(&uart->port.lock); | ||
364 | #else | ||
365 | spin_lock(&uart->port.lock); | 361 | spin_lock(&uart->port.lock); |
366 | while ((UART_GET_IIR(uart) & IIR_STATUS) == IIR_RX_READY) | 362 | while (UART_GET_LSR(uart) & DR) |
367 | bfin_serial_rx_chars(uart); | 363 | bfin_serial_rx_chars(uart); |
368 | spin_unlock(&uart->port.lock); | 364 | spin_unlock(&uart->port.lock); |
369 | #endif | 365 | |
370 | return IRQ_HANDLED; | 366 | return IRQ_HANDLED; |
371 | } | 367 | } |
372 | 368 | ||
@@ -374,25 +370,16 @@ static irqreturn_t bfin_serial_tx_int(int irq, void *dev_id) | |||
374 | { | 370 | { |
375 | struct bfin_serial_port *uart = dev_id; | 371 | struct bfin_serial_port *uart = dev_id; |
376 | 372 | ||
377 | #ifdef CONFIG_BF54x | ||
378 | unsigned short status; | ||
379 | spin_lock(&uart->port.lock); | 373 | spin_lock(&uart->port.lock); |
380 | status = UART_GET_LSR(uart); | 374 | if (UART_GET_LSR(uart) & THRE) |
381 | while ((UART_GET_IER(uart) & ETBEI) && (status & THRE)) { | ||
382 | bfin_serial_tx_chars(uart); | 375 | bfin_serial_tx_chars(uart); |
383 | status = UART_GET_LSR(uart); | ||
384 | } | ||
385 | spin_unlock(&uart->port.lock); | 376 | spin_unlock(&uart->port.lock); |
386 | #else | 377 | |
387 | spin_lock(&uart->port.lock); | ||
388 | while ((UART_GET_IIR(uart) & IIR_STATUS) == IIR_TX_READY) | ||
389 | bfin_serial_tx_chars(uart); | ||
390 | spin_unlock(&uart->port.lock); | ||
391 | #endif | ||
392 | return IRQ_HANDLED; | 378 | return IRQ_HANDLED; |
393 | } | 379 | } |
380 | #endif | ||
394 | 381 | ||
395 | 382 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS | |
396 | static void bfin_serial_do_work(struct work_struct *work) | 383 | static void bfin_serial_do_work(struct work_struct *work) |
397 | { | 384 | { |
398 | struct bfin_serial_port *uart = container_of(work, struct bfin_serial_port, cts_workqueue); | 385 | struct bfin_serial_port *uart = container_of(work, struct bfin_serial_port, cts_workqueue); |
@@ -406,33 +393,27 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart) | |||
406 | { | 393 | { |
407 | struct circ_buf *xmit = &uart->port.info->xmit; | 394 | struct circ_buf *xmit = &uart->port.info->xmit; |
408 | unsigned short ier; | 395 | unsigned short ier; |
409 | int flags = 0; | ||
410 | |||
411 | if (!uart->tx_done) | ||
412 | return; | ||
413 | 396 | ||
414 | uart->tx_done = 0; | 397 | uart->tx_done = 0; |
415 | 398 | ||
399 | if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) { | ||
400 | uart->tx_count = 0; | ||
401 | uart->tx_done = 1; | ||
402 | return; | ||
403 | } | ||
404 | |||
416 | if (uart->port.x_char) { | 405 | if (uart->port.x_char) { |
417 | UART_PUT_CHAR(uart, uart->port.x_char); | 406 | UART_PUT_CHAR(uart, uart->port.x_char); |
418 | uart->port.icount.tx++; | 407 | uart->port.icount.tx++; |
419 | uart->port.x_char = 0; | 408 | uart->port.x_char = 0; |
420 | uart->tx_done = 1; | ||
421 | return; | ||
422 | } | 409 | } |
410 | |||
423 | /* | 411 | /* |
424 | * Check the modem control lines before | 412 | * Check the modem control lines before |
425 | * transmitting anything. | 413 | * transmitting anything. |
426 | */ | 414 | */ |
427 | bfin_serial_mctrl_check(uart); | 415 | bfin_serial_mctrl_check(uart); |
428 | 416 | ||
429 | if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) { | ||
430 | bfin_serial_stop_tx(&uart->port); | ||
431 | uart->tx_done = 1; | ||
432 | return; | ||
433 | } | ||
434 | |||
435 | spin_lock_irqsave(&uart->port.lock, flags); | ||
436 | uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE); | 417 | uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE); |
437 | if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail)) | 418 | if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail)) |
438 | uart->tx_count = UART_XMIT_SIZE - xmit->tail; | 419 | uart->tx_count = UART_XMIT_SIZE - xmit->tail; |
@@ -448,6 +429,7 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart) | |||
448 | set_dma_x_count(uart->tx_dma_channel, uart->tx_count); | 429 | set_dma_x_count(uart->tx_dma_channel, uart->tx_count); |
449 | set_dma_x_modify(uart->tx_dma_channel, 1); | 430 | set_dma_x_modify(uart->tx_dma_channel, 1); |
450 | enable_dma(uart->tx_dma_channel); | 431 | enable_dma(uart->tx_dma_channel); |
432 | |||
451 | #ifdef CONFIG_BF54x | 433 | #ifdef CONFIG_BF54x |
452 | UART_SET_IER(uart, ETBEI); | 434 | UART_SET_IER(uart, ETBEI); |
453 | #else | 435 | #else |
@@ -455,7 +437,6 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart) | |||
455 | ier |= ETBEI; | 437 | ier |= ETBEI; |
456 | UART_PUT_IER(uart, ier); | 438 | UART_PUT_IER(uart, ier); |
457 | #endif | 439 | #endif |
458 | spin_unlock_irqrestore(&uart->port.lock, flags); | ||
459 | } | 440 | } |
460 | 441 | ||
461 | static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) | 442 | static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) |
@@ -464,7 +445,11 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) | |||
464 | int i, flg, status; | 445 | int i, flg, status; |
465 | 446 | ||
466 | status = UART_GET_LSR(uart); | 447 | status = UART_GET_LSR(uart); |
467 | uart->port.icount.rx += CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail, UART_XMIT_SIZE);; | 448 | UART_CLEAR_LSR(uart); |
449 | |||
450 | uart->port.icount.rx += | ||
451 | CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail, | ||
452 | UART_XMIT_SIZE); | ||
468 | 453 | ||
469 | if (status & BI) { | 454 | if (status & BI) { |
470 | uart->port.icount.brk++; | 455 | uart->port.icount.brk++; |
@@ -490,10 +475,12 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) | |||
490 | else | 475 | else |
491 | flg = TTY_NORMAL; | 476 | flg = TTY_NORMAL; |
492 | 477 | ||
493 | for (i = uart->rx_dma_buf.head; i < uart->rx_dma_buf.tail; i++) { | 478 | for (i = uart->rx_dma_buf.tail; i != uart->rx_dma_buf.head; i++) { |
494 | if (uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i])) | 479 | if (i >= UART_XMIT_SIZE) |
495 | goto dma_ignore_char; | 480 | i = 0; |
496 | uart_insert_char(&uart->port, status, OE, uart->rx_dma_buf.buf[i], flg); | 481 | if (!uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i])) |
482 | uart_insert_char(&uart->port, status, OE, | ||
483 | uart->rx_dma_buf.buf[i], flg); | ||
497 | } | 484 | } |
498 | 485 | ||
499 | dma_ignore_char: | 486 | dma_ignore_char: |
@@ -503,23 +490,23 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) | |||
503 | void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart) | 490 | void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart) |
504 | { | 491 | { |
505 | int x_pos, pos; | 492 | int x_pos, pos; |
506 | int flags = 0; | ||
507 | |||
508 | bfin_serial_dma_tx_chars(uart); | ||
509 | 493 | ||
510 | spin_lock_irqsave(&uart->port.lock, flags); | 494 | uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel); |
511 | x_pos = DMA_RX_XCOUNT - get_dma_curr_xcount(uart->rx_dma_channel); | 495 | x_pos = get_dma_curr_xcount(uart->rx_dma_channel); |
496 | uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows; | ||
497 | if (uart->rx_dma_nrows == DMA_RX_YCOUNT) | ||
498 | uart->rx_dma_nrows = 0; | ||
499 | x_pos = DMA_RX_XCOUNT - x_pos; | ||
512 | if (x_pos == DMA_RX_XCOUNT) | 500 | if (x_pos == DMA_RX_XCOUNT) |
513 | x_pos = 0; | 501 | x_pos = 0; |
514 | 502 | ||
515 | pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos; | 503 | pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos; |
516 | 504 | if (pos != uart->rx_dma_buf.tail) { | |
517 | if (pos>uart->rx_dma_buf.tail) { | 505 | uart->rx_dma_buf.head = pos; |
518 | uart->rx_dma_buf.tail = pos; | ||
519 | bfin_serial_dma_rx_chars(uart); | 506 | bfin_serial_dma_rx_chars(uart); |
520 | uart->rx_dma_buf.head = uart->rx_dma_buf.tail; | 507 | uart->rx_dma_buf.tail = uart->rx_dma_buf.head; |
521 | } | 508 | } |
522 | spin_unlock_irqrestore(&uart->port.lock, flags); | 509 | |
523 | uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES; | 510 | uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES; |
524 | add_timer(&(uart->rx_dma_timer)); | 511 | add_timer(&(uart->rx_dma_timer)); |
525 | } | 512 | } |
@@ -532,8 +519,8 @@ static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id) | |||
532 | 519 | ||
533 | spin_lock(&uart->port.lock); | 520 | spin_lock(&uart->port.lock); |
534 | if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) { | 521 | if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) { |
535 | clear_dma_irqstat(uart->tx_dma_channel); | ||
536 | disable_dma(uart->tx_dma_channel); | 522 | disable_dma(uart->tx_dma_channel); |
523 | clear_dma_irqstat(uart->tx_dma_channel); | ||
537 | #ifdef CONFIG_BF54x | 524 | #ifdef CONFIG_BF54x |
538 | UART_CLEAR_IER(uart, ETBEI); | 525 | UART_CLEAR_IER(uart, ETBEI); |
539 | #else | 526 | #else |
@@ -541,15 +528,13 @@ static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id) | |||
541 | ier &= ~ETBEI; | 528 | ier &= ~ETBEI; |
542 | UART_PUT_IER(uart, ier); | 529 | UART_PUT_IER(uart, ier); |
543 | #endif | 530 | #endif |
544 | xmit->tail = (xmit->tail+uart->tx_count) &(UART_XMIT_SIZE -1); | 531 | xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1); |
545 | uart->port.icount.tx+=uart->tx_count; | 532 | uart->port.icount.tx += uart->tx_count; |
546 | 533 | ||
547 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | 534 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) |
548 | uart_write_wakeup(&uart->port); | 535 | uart_write_wakeup(&uart->port); |
549 | 536 | ||
550 | if (uart_circ_empty(xmit)) | 537 | bfin_serial_dma_tx_chars(uart); |
551 | bfin_serial_stop_tx(&uart->port); | ||
552 | uart->tx_done = 1; | ||
553 | } | 538 | } |
554 | 539 | ||
555 | spin_unlock(&uart->port.lock); | 540 | spin_unlock(&uart->port.lock); |
@@ -561,18 +546,15 @@ static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id) | |||
561 | struct bfin_serial_port *uart = dev_id; | 546 | struct bfin_serial_port *uart = dev_id; |
562 | unsigned short irqstat; | 547 | unsigned short irqstat; |
563 | 548 | ||
564 | uart->rx_dma_nrows++; | ||
565 | if (uart->rx_dma_nrows == DMA_RX_YCOUNT) { | ||
566 | uart->rx_dma_nrows = 0; | ||
567 | uart->rx_dma_buf.tail = DMA_RX_XCOUNT*DMA_RX_YCOUNT; | ||
568 | bfin_serial_dma_rx_chars(uart); | ||
569 | uart->rx_dma_buf.head = uart->rx_dma_buf.tail = 0; | ||
570 | } | ||
571 | spin_lock(&uart->port.lock); | 549 | spin_lock(&uart->port.lock); |
572 | irqstat = get_dma_curr_irqstat(uart->rx_dma_channel); | 550 | irqstat = get_dma_curr_irqstat(uart->rx_dma_channel); |
573 | clear_dma_irqstat(uart->rx_dma_channel); | 551 | clear_dma_irqstat(uart->rx_dma_channel); |
574 | |||
575 | spin_unlock(&uart->port.lock); | 552 | spin_unlock(&uart->port.lock); |
553 | |||
554 | del_timer(&(uart->rx_dma_timer)); | ||
555 | uart->rx_dma_timer.expires = jiffies; | ||
556 | add_timer(&(uart->rx_dma_timer)); | ||
557 | |||
576 | return IRQ_HANDLED; | 558 | return IRQ_HANDLED; |
577 | } | 559 | } |
578 | #endif | 560 | #endif |
@@ -599,7 +581,11 @@ static unsigned int bfin_serial_get_mctrl(struct uart_port *port) | |||
599 | if (uart->cts_pin < 0) | 581 | if (uart->cts_pin < 0) |
600 | return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; | 582 | return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; |
601 | 583 | ||
584 | # ifdef BF54x | ||
585 | if (UART_GET_MSR(uart) & CTS) | ||
586 | # else | ||
602 | if (gpio_get_value(uart->cts_pin)) | 587 | if (gpio_get_value(uart->cts_pin)) |
588 | # endif | ||
603 | return TIOCM_DSR | TIOCM_CAR; | 589 | return TIOCM_DSR | TIOCM_CAR; |
604 | else | 590 | else |
605 | #endif | 591 | #endif |
@@ -614,9 +600,17 @@ static void bfin_serial_set_mctrl(struct uart_port *port, unsigned int mctrl) | |||
614 | return; | 600 | return; |
615 | 601 | ||
616 | if (mctrl & TIOCM_RTS) | 602 | if (mctrl & TIOCM_RTS) |
603 | # ifdef BF54x | ||
604 | UART_PUT_MCR(uart, UART_GET_MCR(uart) & ~MRTS); | ||
605 | # else | ||
617 | gpio_set_value(uart->rts_pin, 0); | 606 | gpio_set_value(uart->rts_pin, 0); |
607 | # endif | ||
618 | else | 608 | else |
609 | # ifdef BF54x | ||
610 | UART_PUT_MCR(uart, UART_GET_MCR(uart) | MRTS); | ||
611 | # else | ||
619 | gpio_set_value(uart->rts_pin, 1); | 612 | gpio_set_value(uart->rts_pin, 1); |
613 | # endif | ||
620 | #endif | 614 | #endif |
621 | } | 615 | } |
622 | 616 | ||
@@ -627,22 +621,17 @@ static void bfin_serial_mctrl_check(struct bfin_serial_port *uart) | |||
627 | { | 621 | { |
628 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS | 622 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS |
629 | unsigned int status; | 623 | unsigned int status; |
630 | # ifdef CONFIG_SERIAL_BFIN_DMA | ||
631 | struct uart_info *info = uart->port.info; | 624 | struct uart_info *info = uart->port.info; |
632 | struct tty_struct *tty = info->tty; | 625 | struct tty_struct *tty = info->tty; |
633 | 626 | ||
634 | status = bfin_serial_get_mctrl(&uart->port); | 627 | status = bfin_serial_get_mctrl(&uart->port); |
628 | uart_handle_cts_change(&uart->port, status & TIOCM_CTS); | ||
635 | if (!(status & TIOCM_CTS)) { | 629 | if (!(status & TIOCM_CTS)) { |
636 | tty->hw_stopped = 1; | 630 | tty->hw_stopped = 1; |
631 | schedule_work(&uart->cts_workqueue); | ||
637 | } else { | 632 | } else { |
638 | tty->hw_stopped = 0; | 633 | tty->hw_stopped = 0; |
639 | } | 634 | } |
640 | # else | ||
641 | status = bfin_serial_get_mctrl(&uart->port); | ||
642 | uart_handle_cts_change(&uart->port, status & TIOCM_CTS); | ||
643 | if (!(status & TIOCM_CTS)) | ||
644 | schedule_work(&uart->cts_workqueue); | ||
645 | # endif | ||
646 | #endif | 635 | #endif |
647 | } | 636 | } |
648 | 637 | ||
@@ -743,6 +732,7 @@ static void bfin_serial_shutdown(struct uart_port *port) | |||
743 | disable_dma(uart->rx_dma_channel); | 732 | disable_dma(uart->rx_dma_channel); |
744 | free_dma(uart->rx_dma_channel); | 733 | free_dma(uart->rx_dma_channel); |
745 | del_timer(&(uart->rx_dma_timer)); | 734 | del_timer(&(uart->rx_dma_timer)); |
735 | dma_free_coherent(NULL, PAGE_SIZE, uart->rx_dma_buf.buf, 0); | ||
746 | #else | 736 | #else |
747 | #ifdef CONFIG_KGDB_UART | 737 | #ifdef CONFIG_KGDB_UART |
748 | if (uart->port.line != CONFIG_KGDB_UART_PORT) | 738 | if (uart->port.line != CONFIG_KGDB_UART_PORT) |
@@ -814,6 +804,8 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios, | |||
814 | quot = uart_get_divisor(port, baud); | 804 | quot = uart_get_divisor(port, baud); |
815 | spin_lock_irqsave(&uart->port.lock, flags); | 805 | spin_lock_irqsave(&uart->port.lock, flags); |
816 | 806 | ||
807 | UART_SET_ANOMALY_THRESHOLD(uart, USEC_PER_SEC / baud * 15); | ||
808 | |||
817 | do { | 809 | do { |
818 | lsr = UART_GET_LSR(uart); | 810 | lsr = UART_GET_LSR(uart); |
819 | } while (!(lsr & TEMT)); | 811 | } while (!(lsr & TEMT)); |
@@ -956,10 +948,9 @@ static void __init bfin_serial_init_ports(void) | |||
956 | bfin_serial_ports[i].rx_dma_channel = | 948 | bfin_serial_ports[i].rx_dma_channel = |
957 | bfin_serial_resource[i].uart_rx_dma_channel; | 949 | bfin_serial_resource[i].uart_rx_dma_channel; |
958 | init_timer(&(bfin_serial_ports[i].rx_dma_timer)); | 950 | init_timer(&(bfin_serial_ports[i].rx_dma_timer)); |
959 | #else | ||
960 | INIT_WORK(&bfin_serial_ports[i].cts_workqueue, bfin_serial_do_work); | ||
961 | #endif | 951 | #endif |
962 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS | 952 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS |
953 | INIT_WORK(&bfin_serial_ports[i].cts_workqueue, bfin_serial_do_work); | ||
963 | bfin_serial_ports[i].cts_pin = | 954 | bfin_serial_ports[i].cts_pin = |
964 | bfin_serial_resource[i].uart_cts_pin; | 955 | bfin_serial_resource[i].uart_cts_pin; |
965 | bfin_serial_ports[i].rts_pin = | 956 | bfin_serial_ports[i].rts_pin = |
diff --git a/drivers/serial/m32r_sio.c b/drivers/serial/m32r_sio.c index 348ee2c19b58..c2bb11c02bde 100644 --- a/drivers/serial/m32r_sio.c +++ b/drivers/serial/m32r_sio.c | |||
@@ -421,7 +421,7 @@ static void transmit_chars(struct uart_sio_port *up) | |||
421 | up->port.icount.tx++; | 421 | up->port.icount.tx++; |
422 | if (uart_circ_empty(xmit)) | 422 | if (uart_circ_empty(xmit)) |
423 | break; | 423 | break; |
424 | while (!serial_in(up, UART_LSR) & UART_LSR_THRE); | 424 | while (!(serial_in(up, UART_LSR) & UART_LSR_THRE)); |
425 | 425 | ||
426 | } while (--count > 0); | 426 | } while (--count > 0); |
427 | 427 | ||
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index 9ce12cb2cebc..a8c116b80bff 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/delay.h> | 41 | #include <linux/delay.h> |
42 | #include <linux/console.h> | 42 | #include <linux/console.h> |
43 | #include <linux/platform_device.h> | 43 | #include <linux/platform_device.h> |
44 | #include <linux/serial_sci.h> | ||
44 | 45 | ||
45 | #ifdef CONFIG_CPU_FREQ | 46 | #ifdef CONFIG_CPU_FREQ |
46 | #include <linux/notifier.h> | 47 | #include <linux/notifier.h> |
@@ -54,7 +55,6 @@ | |||
54 | #include <asm/kgdb.h> | 55 | #include <asm/kgdb.h> |
55 | #endif | 56 | #endif |
56 | 57 | ||
57 | #include <asm/sci.h> | ||
58 | #include "sh-sci.h" | 58 | #include "sh-sci.h" |
59 | 59 | ||
60 | struct sci_port { | 60 | struct sci_port { |
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c index 9cfcfd8dad5e..617efb1640b1 100644 --- a/drivers/sh/maple/maple.c +++ b/drivers/sh/maple/maple.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Core maple bus functionality | 2 | * Core maple bus functionality |
3 | * | 3 | * |
4 | * Copyright (C) 2007 Adrian McMenamin | 4 | * Copyright (C) 2007, 2008 Adrian McMenamin |
5 | * | 5 | * |
6 | * Based on 2.4 code by: | 6 | * Based on 2.4 code by: |
7 | * | 7 | * |
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
21 | #include <linux/module.h> | ||
22 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
23 | #include <linux/list.h> | 22 | #include <linux/list.h> |
24 | #include <linux/io.h> | 23 | #include <linux/io.h> |
@@ -54,7 +53,7 @@ static struct device maple_bus; | |||
54 | static int subdevice_map[MAPLE_PORTS]; | 53 | static int subdevice_map[MAPLE_PORTS]; |
55 | static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr; | 54 | static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr; |
56 | static unsigned long maple_pnp_time; | 55 | static unsigned long maple_pnp_time; |
57 | static int started, scanning, liststatus, realscan; | 56 | static int started, scanning, liststatus, fullscan; |
58 | static struct kmem_cache *maple_queue_cache; | 57 | static struct kmem_cache *maple_queue_cache; |
59 | 58 | ||
60 | struct maple_device_specify { | 59 | struct maple_device_specify { |
@@ -62,6 +61,9 @@ struct maple_device_specify { | |||
62 | int unit; | 61 | int unit; |
63 | }; | 62 | }; |
64 | 63 | ||
64 | static bool checked[4]; | ||
65 | static struct maple_device *baseunits[4]; | ||
66 | |||
65 | /** | 67 | /** |
66 | * maple_driver_register - register a device driver | 68 | * maple_driver_register - register a device driver |
67 | * automatically makes the driver bus a maple bus | 69 | * automatically makes the driver bus a maple bus |
@@ -309,11 +311,9 @@ static void maple_attach_driver(struct maple_device *mdev) | |||
309 | else | 311 | else |
310 | break; | 312 | break; |
311 | 313 | ||
312 | if (realscan) { | 314 | printk(KERN_INFO "Maple device detected: %s\n", |
313 | printk(KERN_INFO "Maple device detected: %s\n", | 315 | mdev->product_name); |
314 | mdev->product_name); | 316 | printk(KERN_INFO "Maple device: %s\n", mdev->product_licence); |
315 | printk(KERN_INFO "Maple device: %s\n", mdev->product_licence); | ||
316 | } | ||
317 | 317 | ||
318 | function = be32_to_cpu(mdev->devinfo.function); | 318 | function = be32_to_cpu(mdev->devinfo.function); |
319 | 319 | ||
@@ -323,10 +323,9 @@ static void maple_attach_driver(struct maple_device *mdev) | |||
323 | mdev->driver = &maple_dummy_driver; | 323 | mdev->driver = &maple_dummy_driver; |
324 | sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port); | 324 | sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port); |
325 | } else { | 325 | } else { |
326 | if (realscan) | 326 | printk(KERN_INFO |
327 | printk(KERN_INFO | 327 | "Maple bus at (%d, %d): Function 0x%lX\n", |
328 | "Maple bus at (%d, %d): Function 0x%lX\n", | 328 | mdev->port, mdev->unit, function); |
329 | mdev->port, mdev->unit, function); | ||
330 | 329 | ||
331 | matched = | 330 | matched = |
332 | bus_for_each_drv(&maple_bus_type, NULL, mdev, | 331 | bus_for_each_drv(&maple_bus_type, NULL, mdev, |
@@ -334,9 +333,8 @@ static void maple_attach_driver(struct maple_device *mdev) | |||
334 | 333 | ||
335 | if (matched == 0) { | 334 | if (matched == 0) { |
336 | /* Driver does not exist yet */ | 335 | /* Driver does not exist yet */ |
337 | if (realscan) | 336 | printk(KERN_INFO |
338 | printk(KERN_INFO | 337 | "No maple driver found.\n"); |
339 | "No maple driver found.\n"); | ||
340 | mdev->driver = &maple_dummy_driver; | 338 | mdev->driver = &maple_dummy_driver; |
341 | } | 339 | } |
342 | sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port, | 340 | sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port, |
@@ -472,9 +470,12 @@ static void maple_response_none(struct maple_device *mdev, | |||
472 | maple_detach_driver(mdev); | 470 | maple_detach_driver(mdev); |
473 | return; | 471 | return; |
474 | } | 472 | } |
475 | if (!started) { | 473 | if (!started || !fullscan) { |
476 | printk(KERN_INFO "No maple devices attached to port %d\n", | 474 | if (checked[mdev->port] == false) { |
477 | mdev->port); | 475 | checked[mdev->port] = true; |
476 | printk(KERN_INFO "No maple devices attached" | ||
477 | " to port %d\n", mdev->port); | ||
478 | } | ||
478 | return; | 479 | return; |
479 | } | 480 | } |
480 | maple_clean_submap(mdev); | 481 | maple_clean_submap(mdev); |
@@ -485,8 +486,14 @@ static void maple_response_devinfo(struct maple_device *mdev, | |||
485 | char *recvbuf) | 486 | char *recvbuf) |
486 | { | 487 | { |
487 | char submask; | 488 | char submask; |
488 | if ((!started) || (scanning == 2)) { | 489 | if (!started || (scanning == 2) || !fullscan) { |
489 | maple_attach_driver(mdev); | 490 | if ((mdev->unit == 0) && (checked[mdev->port] == false)) { |
491 | checked[mdev->port] = true; | ||
492 | maple_attach_driver(mdev); | ||
493 | } else { | ||
494 | if (mdev->unit != 0) | ||
495 | maple_attach_driver(mdev); | ||
496 | } | ||
490 | return; | 497 | return; |
491 | } | 498 | } |
492 | if (mdev->unit == 0) { | 499 | if (mdev->unit == 0) { |
@@ -505,6 +512,7 @@ static void maple_dma_handler(struct work_struct *work) | |||
505 | struct maple_device *dev; | 512 | struct maple_device *dev; |
506 | char *recvbuf; | 513 | char *recvbuf; |
507 | enum maple_code code; | 514 | enum maple_code code; |
515 | int i; | ||
508 | 516 | ||
509 | if (!maple_dma_done()) | 517 | if (!maple_dma_done()) |
510 | return; | 518 | return; |
@@ -557,6 +565,19 @@ static void maple_dma_handler(struct work_struct *work) | |||
557 | } else | 565 | } else |
558 | scanning = 0; | 566 | scanning = 0; |
559 | 567 | ||
568 | if (!fullscan) { | ||
569 | fullscan = 1; | ||
570 | for (i = 0; i < MAPLE_PORTS; i++) { | ||
571 | if (checked[i] == false) { | ||
572 | fullscan = 0; | ||
573 | dev = baseunits[i]; | ||
574 | dev->mq->command = | ||
575 | MAPLE_COMMAND_DEVINFO; | ||
576 | dev->mq->length = 0; | ||
577 | maple_add_packet(dev->mq); | ||
578 | } | ||
579 | } | ||
580 | } | ||
560 | if (started == 0) | 581 | if (started == 0) |
561 | started = 1; | 582 | started = 1; |
562 | } | 583 | } |
@@ -694,7 +715,9 @@ static int __init maple_bus_init(void) | |||
694 | 715 | ||
695 | /* setup maple ports */ | 716 | /* setup maple ports */ |
696 | for (i = 0; i < MAPLE_PORTS; i++) { | 717 | for (i = 0; i < MAPLE_PORTS; i++) { |
718 | checked[i] = false; | ||
697 | mdev[i] = maple_alloc_dev(i, 0); | 719 | mdev[i] = maple_alloc_dev(i, 0); |
720 | baseunits[i] = mdev[i]; | ||
698 | if (!mdev[i]) { | 721 | if (!mdev[i]) { |
699 | while (i-- > 0) | 722 | while (i-- > 0) |
700 | maple_free_dev(mdev[i]); | 723 | maple_free_dev(mdev[i]); |
@@ -703,12 +726,9 @@ static int __init maple_bus_init(void) | |||
703 | mdev[i]->mq->command = MAPLE_COMMAND_DEVINFO; | 726 | mdev[i]->mq->command = MAPLE_COMMAND_DEVINFO; |
704 | mdev[i]->mq->length = 0; | 727 | mdev[i]->mq->length = 0; |
705 | maple_add_packet(mdev[i]->mq); | 728 | maple_add_packet(mdev[i]->mq); |
706 | /* delay aids hardware detection */ | ||
707 | mdelay(5); | ||
708 | subdevice_map[i] = 0; | 729 | subdevice_map[i] = 0; |
709 | } | 730 | } |
710 | 731 | ||
711 | realscan = 1; | ||
712 | /* setup maplebus hardware */ | 732 | /* setup maplebus hardware */ |
713 | maplebus_dma_reset(); | 733 | maplebus_dma_reset(); |
714 | /* initial detection */ | 734 | /* initial detection */ |
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c index 253ed5682a6d..a86315a0c5b8 100644 --- a/drivers/spi/mpc52xx_psc_spi.c +++ b/drivers/spi/mpc52xx_psc_spi.c | |||
@@ -42,6 +42,7 @@ struct mpc52xx_psc_spi { | |||
42 | 42 | ||
43 | /* driver internal data */ | 43 | /* driver internal data */ |
44 | struct mpc52xx_psc __iomem *psc; | 44 | struct mpc52xx_psc __iomem *psc; |
45 | struct mpc52xx_psc_fifo __iomem *fifo; | ||
45 | unsigned int irq; | 46 | unsigned int irq; |
46 | u8 bits_per_word; | 47 | u8 bits_per_word; |
47 | u8 busy; | 48 | u8 busy; |
@@ -139,6 +140,7 @@ static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi, | |||
139 | { | 140 | { |
140 | struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); | 141 | struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); |
141 | struct mpc52xx_psc __iomem *psc = mps->psc; | 142 | struct mpc52xx_psc __iomem *psc = mps->psc; |
143 | struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo; | ||
142 | unsigned rb = 0; /* number of bytes receieved */ | 144 | unsigned rb = 0; /* number of bytes receieved */ |
143 | unsigned sb = 0; /* number of bytes sent */ | 145 | unsigned sb = 0; /* number of bytes sent */ |
144 | unsigned char *rx_buf = (unsigned char *)t->rx_buf; | 146 | unsigned char *rx_buf = (unsigned char *)t->rx_buf; |
@@ -190,11 +192,11 @@ static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi, | |||
190 | out_8(&psc->mode, 0); | 192 | out_8(&psc->mode, 0); |
191 | } else { | 193 | } else { |
192 | out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL); | 194 | out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL); |
193 | out_be16(&psc->rfalarm, rfalarm); | 195 | out_be16(&fifo->rfalarm, rfalarm); |
194 | } | 196 | } |
195 | out_be16(&psc->mpc52xx_psc_imr, MPC52xx_PSC_IMR_RXRDY); | 197 | out_be16(&psc->mpc52xx_psc_imr, MPC52xx_PSC_IMR_RXRDY); |
196 | wait_for_completion(&mps->done); | 198 | wait_for_completion(&mps->done); |
197 | recv_at_once = in_be16(&psc->rfnum); | 199 | recv_at_once = in_be16(&fifo->rfnum); |
198 | dev_dbg(&spi->dev, "%d bytes received\n", recv_at_once); | 200 | dev_dbg(&spi->dev, "%d bytes received\n", recv_at_once); |
199 | 201 | ||
200 | send_at_once = recv_at_once; | 202 | send_at_once = recv_at_once; |
@@ -331,6 +333,7 @@ static void mpc52xx_psc_spi_cleanup(struct spi_device *spi) | |||
331 | static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps) | 333 | static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps) |
332 | { | 334 | { |
333 | struct mpc52xx_psc __iomem *psc = mps->psc; | 335 | struct mpc52xx_psc __iomem *psc = mps->psc; |
336 | struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo; | ||
334 | u32 mclken_div; | 337 | u32 mclken_div; |
335 | int ret = 0; | 338 | int ret = 0; |
336 | 339 | ||
@@ -346,7 +349,7 @@ static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps) | |||
346 | /* Disable interrupts, interrupts are based on alarm level */ | 349 | /* Disable interrupts, interrupts are based on alarm level */ |
347 | out_be16(&psc->mpc52xx_psc_imr, 0); | 350 | out_be16(&psc->mpc52xx_psc_imr, 0); |
348 | out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1); | 351 | out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1); |
349 | out_8(&psc->rfcntl, 0); | 352 | out_8(&fifo->rfcntl, 0); |
350 | out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL); | 353 | out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL); |
351 | 354 | ||
352 | /* Configure 8bit codec mode as a SPI master and use EOF flags */ | 355 | /* Configure 8bit codec mode as a SPI master and use EOF flags */ |
@@ -419,6 +422,8 @@ static int __init mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr, | |||
419 | ret = -EFAULT; | 422 | ret = -EFAULT; |
420 | goto free_master; | 423 | goto free_master; |
421 | } | 424 | } |
425 | /* On the 5200, fifo regs are immediately ajacent to the psc regs */ | ||
426 | mps->fifo = ((void __iomem *)mps->psc) + sizeof(struct mpc52xx_psc); | ||
422 | 427 | ||
423 | ret = request_irq(mps->irq, mpc52xx_psc_spi_isr, 0, "mpc52xx-psc-spi", | 428 | ret = request_irq(mps->irq, mpc52xx_psc_spi_isr, 0, "mpc52xx-psc-spi", |
424 | mps); | 429 | mps); |
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig index 5c33cdb9cac7..a2b0aa48b8ea 100644 --- a/drivers/usb/core/Kconfig +++ b/drivers/usb/core/Kconfig | |||
@@ -87,12 +87,13 @@ config USB_DYNAMIC_MINORS | |||
87 | If you are unsure about this, say N here. | 87 | If you are unsure about this, say N here. |
88 | 88 | ||
89 | config USB_SUSPEND | 89 | config USB_SUSPEND |
90 | bool "USB selective suspend/resume and wakeup (EXPERIMENTAL)" | 90 | bool "USB selective suspend/resume and wakeup" |
91 | depends on USB && PM && EXPERIMENTAL | 91 | depends on USB && PM |
92 | help | 92 | help |
93 | If you say Y here, you can use driver calls or the sysfs | 93 | If you say Y here, you can use driver calls or the sysfs |
94 | "power/state" file to suspend or resume individual USB | 94 | "power/level" file to suspend or resume individual USB |
95 | peripherals. | 95 | peripherals and to enable or disable autosuspend (see |
96 | Documentation/usb/power-management.txt for more details). | ||
96 | 97 | ||
97 | Also, USB "remote wakeup" signaling is supported, whereby some | 98 | Also, USB "remote wakeup" signaling is supported, whereby some |
98 | USB devices (like keyboards and network adapters) can wake up | 99 | USB devices (like keyboards and network adapters) can wake up |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index f90ab5e94c58..d9d1eb19f2a1 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
@@ -28,35 +28,38 @@ | |||
28 | * devices is broken... | 28 | * devices is broken... |
29 | */ | 29 | */ |
30 | static const struct usb_device_id usb_quirk_list[] = { | 30 | static const struct usb_device_id usb_quirk_list[] = { |
31 | /* Action Semiconductor flash disk */ | ||
32 | { USB_DEVICE(0x10d6, 0x2200), .driver_info = USB_QUIRK_STRING_FETCH_255}, | ||
33 | |||
34 | /* CBM - Flash disk */ | 31 | /* CBM - Flash disk */ |
35 | { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME }, | 32 | { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME }, |
33 | |||
36 | /* HP 5300/5370C scanner */ | 34 | /* HP 5300/5370C scanner */ |
37 | { USB_DEVICE(0x03f0, 0x0701), .driver_info = USB_QUIRK_STRING_FETCH_255 }, | 35 | { USB_DEVICE(0x03f0, 0x0701), .driver_info = |
36 | USB_QUIRK_STRING_FETCH_255 }, | ||
38 | 37 | ||
39 | /* Creative SB Audigy 2 NX */ | 38 | /* Creative SB Audigy 2 NX */ |
40 | { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, | 39 | { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, |
41 | 40 | ||
41 | /* Philips PSC805 audio device */ | ||
42 | { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
43 | |||
42 | /* Roland SC-8820 */ | 44 | /* Roland SC-8820 */ |
43 | { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, | 45 | { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, |
44 | 46 | ||
45 | /* Edirol SD-20 */ | 47 | /* Edirol SD-20 */ |
46 | { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME }, | 48 | { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME }, |
47 | 49 | ||
48 | /* INTEL VALUE SSD */ | ||
49 | { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
50 | |||
51 | /* M-Systems Flash Disk Pioneers */ | 50 | /* M-Systems Flash Disk Pioneers */ |
52 | { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, | 51 | { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, |
53 | 52 | ||
54 | /* Philips PSC805 audio device */ | 53 | /* Action Semiconductor flash disk */ |
55 | { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME }, | 54 | { USB_DEVICE(0x10d6, 0x2200), .driver_info = |
55 | USB_QUIRK_STRING_FETCH_255 }, | ||
56 | 56 | ||
57 | /* SKYMEDI USB_DRIVE */ | 57 | /* SKYMEDI USB_DRIVE */ |
58 | { USB_DEVICE(0x1516, 0x8628), .driver_info = USB_QUIRK_RESET_RESUME }, | 58 | { USB_DEVICE(0x1516, 0x8628), .driver_info = USB_QUIRK_RESET_RESUME }, |
59 | 59 | ||
60 | /* INTEL VALUE SSD */ | ||
61 | { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
62 | |||
60 | { } /* terminating entry must be last */ | 63 | { } /* terminating entry must be last */ |
61 | }; | 64 | }; |
62 | 65 | ||
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 4e984060c984..1f0db51190cc 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c | |||
@@ -99,8 +99,7 @@ struct usb_interface *usb_ifnum_to_if(const struct usb_device *dev, | |||
99 | EXPORT_SYMBOL_GPL(usb_ifnum_to_if); | 99 | EXPORT_SYMBOL_GPL(usb_ifnum_to_if); |
100 | 100 | ||
101 | /** | 101 | /** |
102 | * usb_altnum_to_altsetting - get the altsetting structure with a given | 102 | * usb_altnum_to_altsetting - get the altsetting structure with a given alternate setting number. |
103 | * alternate setting number. | ||
104 | * @intf: the interface containing the altsetting in question | 103 | * @intf: the interface containing the altsetting in question |
105 | * @altnum: the desired alternate setting number | 104 | * @altnum: the desired alternate setting number |
106 | * | 105 | * |
@@ -234,7 +233,7 @@ static int ksuspend_usb_init(void) | |||
234 | * singlethreaded. Its job doesn't justify running on more | 233 | * singlethreaded. Its job doesn't justify running on more |
235 | * than one CPU. | 234 | * than one CPU. |
236 | */ | 235 | */ |
237 | ksuspend_usb_wq = create_singlethread_workqueue("ksuspend_usbd"); | 236 | ksuspend_usb_wq = create_freezeable_workqueue("ksuspend_usbd"); |
238 | if (!ksuspend_usb_wq) | 237 | if (!ksuspend_usb_wq) |
239 | return -ENOMEM; | 238 | return -ENOMEM; |
240 | return 0; | 239 | return 0; |
@@ -442,8 +441,7 @@ EXPORT_SYMBOL_GPL(usb_put_intf); | |||
442 | */ | 441 | */ |
443 | 442 | ||
444 | /** | 443 | /** |
445 | * usb_lock_device_for_reset - cautiously acquire the lock for a | 444 | * usb_lock_device_for_reset - cautiously acquire the lock for a usb device structure |
446 | * usb device structure | ||
447 | * @udev: device that's being locked | 445 | * @udev: device that's being locked |
448 | * @iface: interface bound to the driver making the request (optional) | 446 | * @iface: interface bound to the driver making the request (optional) |
449 | * | 447 | * |
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c index 4f6bfa100f2a..2c32bd08ee7d 100644 --- a/drivers/usb/gadget/printer.c +++ b/drivers/usb/gadget/printer.c | |||
@@ -92,7 +92,6 @@ struct printer_dev { | |||
92 | u8 *current_rx_buf; | 92 | u8 *current_rx_buf; |
93 | u8 printer_status; | 93 | u8 printer_status; |
94 | u8 reset_printer; | 94 | u8 reset_printer; |
95 | struct class_device *printer_class_dev; | ||
96 | struct cdev printer_cdev; | 95 | struct cdev printer_cdev; |
97 | struct device *pdev; | 96 | struct device *pdev; |
98 | u8 printer_cdev_open; | 97 | u8 printer_cdev_open; |
diff --git a/drivers/usb/gadget/pxa2xx_udc.c b/drivers/usb/gadget/pxa2xx_udc.c index 4402d6f042d9..096c41cc40d1 100644 --- a/drivers/usb/gadget/pxa2xx_udc.c +++ b/drivers/usb/gadget/pxa2xx_udc.c | |||
@@ -103,6 +103,12 @@ static const char ep0name [] = "ep0"; | |||
103 | #error "Can't configure both IXP and PXA" | 103 | #error "Can't configure both IXP and PXA" |
104 | #endif | 104 | #endif |
105 | 105 | ||
106 | /* IXP doesn't yet support <linux/clk.h> */ | ||
107 | #define clk_get(dev,name) NULL | ||
108 | #define clk_enable(clk) do { } while (0) | ||
109 | #define clk_disable(clk) do { } while (0) | ||
110 | #define clk_put(clk) do { } while (0) | ||
111 | |||
106 | #endif | 112 | #endif |
107 | 113 | ||
108 | #include "pxa2xx_udc.h" | 114 | #include "pxa2xx_udc.h" |
@@ -934,20 +940,31 @@ static void udc_disable(struct pxa2xx_udc *); | |||
934 | /* We disable the UDC -- and its 48 MHz clock -- whenever it's not | 940 | /* We disable the UDC -- and its 48 MHz clock -- whenever it's not |
935 | * in active use. | 941 | * in active use. |
936 | */ | 942 | */ |
937 | static int pullup(struct pxa2xx_udc *udc, int is_active) | 943 | static int pullup(struct pxa2xx_udc *udc) |
938 | { | 944 | { |
939 | is_active = is_active && udc->vbus && udc->pullup; | 945 | int is_active = udc->vbus && udc->pullup && !udc->suspended; |
940 | DMSG("%s\n", is_active ? "active" : "inactive"); | 946 | DMSG("%s\n", is_active ? "active" : "inactive"); |
941 | if (is_active) | 947 | if (is_active) { |
942 | udc_enable(udc); | 948 | if (!udc->active) { |
943 | else { | 949 | udc->active = 1; |
944 | if (udc->gadget.speed != USB_SPEED_UNKNOWN) { | 950 | /* Enable clock for USB device */ |
945 | DMSG("disconnect %s\n", udc->driver | 951 | clk_enable(udc->clk); |
946 | ? udc->driver->driver.name | 952 | udc_enable(udc); |
947 | : "(no driver)"); | ||
948 | stop_activity(udc, udc->driver); | ||
949 | } | 953 | } |
950 | udc_disable(udc); | 954 | } else { |
955 | if (udc->active) { | ||
956 | if (udc->gadget.speed != USB_SPEED_UNKNOWN) { | ||
957 | DMSG("disconnect %s\n", udc->driver | ||
958 | ? udc->driver->driver.name | ||
959 | : "(no driver)"); | ||
960 | stop_activity(udc, udc->driver); | ||
961 | } | ||
962 | udc_disable(udc); | ||
963 | /* Disable clock for USB device */ | ||
964 | clk_disable(udc->clk); | ||
965 | udc->active = 0; | ||
966 | } | ||
967 | |||
951 | } | 968 | } |
952 | return 0; | 969 | return 0; |
953 | } | 970 | } |
@@ -958,9 +975,9 @@ static int pxa2xx_udc_vbus_session(struct usb_gadget *_gadget, int is_active) | |||
958 | struct pxa2xx_udc *udc; | 975 | struct pxa2xx_udc *udc; |
959 | 976 | ||
960 | udc = container_of(_gadget, struct pxa2xx_udc, gadget); | 977 | udc = container_of(_gadget, struct pxa2xx_udc, gadget); |
961 | udc->vbus = is_active = (is_active != 0); | 978 | udc->vbus = (is_active != 0); |
962 | DMSG("vbus %s\n", is_active ? "supplied" : "inactive"); | 979 | DMSG("vbus %s\n", is_active ? "supplied" : "inactive"); |
963 | pullup(udc, is_active); | 980 | pullup(udc); |
964 | return 0; | 981 | return 0; |
965 | } | 982 | } |
966 | 983 | ||
@@ -975,9 +992,8 @@ static int pxa2xx_udc_pullup(struct usb_gadget *_gadget, int is_active) | |||
975 | if (!udc->mach->gpio_pullup && !udc->mach->udc_command) | 992 | if (!udc->mach->gpio_pullup && !udc->mach->udc_command) |
976 | return -EOPNOTSUPP; | 993 | return -EOPNOTSUPP; |
977 | 994 | ||
978 | is_active = (is_active != 0); | 995 | udc->pullup = (is_active != 0); |
979 | udc->pullup = is_active; | 996 | pullup(udc); |
980 | pullup(udc, is_active); | ||
981 | return 0; | 997 | return 0; |
982 | } | 998 | } |
983 | 999 | ||
@@ -997,7 +1013,7 @@ static const struct usb_gadget_ops pxa2xx_udc_ops = { | |||
997 | #ifdef CONFIG_USB_GADGET_DEBUG_FS | 1013 | #ifdef CONFIG_USB_GADGET_DEBUG_FS |
998 | 1014 | ||
999 | static int | 1015 | static int |
1000 | udc_seq_show(struct seq_file *m, void *d) | 1016 | udc_seq_show(struct seq_file *m, void *_d) |
1001 | { | 1017 | { |
1002 | struct pxa2xx_udc *dev = m->private; | 1018 | struct pxa2xx_udc *dev = m->private; |
1003 | unsigned long flags; | 1019 | unsigned long flags; |
@@ -1146,11 +1162,6 @@ static void udc_disable(struct pxa2xx_udc *dev) | |||
1146 | 1162 | ||
1147 | udc_clear_mask_UDCCR(UDCCR_UDE); | 1163 | udc_clear_mask_UDCCR(UDCCR_UDE); |
1148 | 1164 | ||
1149 | #ifdef CONFIG_ARCH_PXA | ||
1150 | /* Disable clock for USB device */ | ||
1151 | clk_disable(dev->clk); | ||
1152 | #endif | ||
1153 | |||
1154 | ep0_idle (dev); | 1165 | ep0_idle (dev); |
1155 | dev->gadget.speed = USB_SPEED_UNKNOWN; | 1166 | dev->gadget.speed = USB_SPEED_UNKNOWN; |
1156 | } | 1167 | } |
@@ -1191,11 +1202,6 @@ static void udc_enable (struct pxa2xx_udc *dev) | |||
1191 | { | 1202 | { |
1192 | udc_clear_mask_UDCCR(UDCCR_UDE); | 1203 | udc_clear_mask_UDCCR(UDCCR_UDE); |
1193 | 1204 | ||
1194 | #ifdef CONFIG_ARCH_PXA | ||
1195 | /* Enable clock for USB device */ | ||
1196 | clk_enable(dev->clk); | ||
1197 | #endif | ||
1198 | |||
1199 | /* try to clear these bits before we enable the udc */ | 1205 | /* try to clear these bits before we enable the udc */ |
1200 | udc_ack_int_UDCCR(UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR); | 1206 | udc_ack_int_UDCCR(UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR); |
1201 | 1207 | ||
@@ -1286,7 +1292,7 @@ fail: | |||
1286 | * for set_configuration as well as eventual disconnect. | 1292 | * for set_configuration as well as eventual disconnect. |
1287 | */ | 1293 | */ |
1288 | DMSG("registered gadget driver '%s'\n", driver->driver.name); | 1294 | DMSG("registered gadget driver '%s'\n", driver->driver.name); |
1289 | pullup(dev, 1); | 1295 | pullup(dev); |
1290 | dump_state(dev); | 1296 | dump_state(dev); |
1291 | return 0; | 1297 | return 0; |
1292 | } | 1298 | } |
@@ -1329,7 +1335,8 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) | |||
1329 | return -EINVAL; | 1335 | return -EINVAL; |
1330 | 1336 | ||
1331 | local_irq_disable(); | 1337 | local_irq_disable(); |
1332 | pullup(dev, 0); | 1338 | dev->pullup = 0; |
1339 | pullup(dev); | ||
1333 | stop_activity(dev, driver); | 1340 | stop_activity(dev, driver); |
1334 | local_irq_enable(); | 1341 | local_irq_enable(); |
1335 | 1342 | ||
@@ -2131,13 +2138,11 @@ static int __init pxa2xx_udc_probe(struct platform_device *pdev) | |||
2131 | if (irq < 0) | 2138 | if (irq < 0) |
2132 | return -ENODEV; | 2139 | return -ENODEV; |
2133 | 2140 | ||
2134 | #ifdef CONFIG_ARCH_PXA | ||
2135 | dev->clk = clk_get(&pdev->dev, "UDCCLK"); | 2141 | dev->clk = clk_get(&pdev->dev, "UDCCLK"); |
2136 | if (IS_ERR(dev->clk)) { | 2142 | if (IS_ERR(dev->clk)) { |
2137 | retval = PTR_ERR(dev->clk); | 2143 | retval = PTR_ERR(dev->clk); |
2138 | goto err_clk; | 2144 | goto err_clk; |
2139 | } | 2145 | } |
2140 | #endif | ||
2141 | 2146 | ||
2142 | pr_debug("%s: IRQ %d%s%s\n", driver_name, irq, | 2147 | pr_debug("%s: IRQ %d%s%s\n", driver_name, irq, |
2143 | dev->has_cfr ? "" : " (!cfr)", | 2148 | dev->has_cfr ? "" : " (!cfr)", |
@@ -2250,10 +2255,8 @@ lubbock_fail0: | |||
2250 | if (dev->mach->gpio_vbus) | 2255 | if (dev->mach->gpio_vbus) |
2251 | gpio_free(dev->mach->gpio_vbus); | 2256 | gpio_free(dev->mach->gpio_vbus); |
2252 | err_gpio_vbus: | 2257 | err_gpio_vbus: |
2253 | #ifdef CONFIG_ARCH_PXA | ||
2254 | clk_put(dev->clk); | 2258 | clk_put(dev->clk); |
2255 | err_clk: | 2259 | err_clk: |
2256 | #endif | ||
2257 | return retval; | 2260 | return retval; |
2258 | } | 2261 | } |
2259 | 2262 | ||
@@ -2269,7 +2272,9 @@ static int __exit pxa2xx_udc_remove(struct platform_device *pdev) | |||
2269 | if (dev->driver) | 2272 | if (dev->driver) |
2270 | return -EBUSY; | 2273 | return -EBUSY; |
2271 | 2274 | ||
2272 | udc_disable(dev); | 2275 | dev->pullup = 0; |
2276 | pullup(dev); | ||
2277 | |||
2273 | remove_debug_files(dev); | 2278 | remove_debug_files(dev); |
2274 | 2279 | ||
2275 | if (dev->got_irq) { | 2280 | if (dev->got_irq) { |
@@ -2289,9 +2294,7 @@ static int __exit pxa2xx_udc_remove(struct platform_device *pdev) | |||
2289 | if (dev->mach->gpio_pullup) | 2294 | if (dev->mach->gpio_pullup) |
2290 | gpio_free(dev->mach->gpio_pullup); | 2295 | gpio_free(dev->mach->gpio_pullup); |
2291 | 2296 | ||
2292 | #ifdef CONFIG_ARCH_PXA | ||
2293 | clk_put(dev->clk); | 2297 | clk_put(dev->clk); |
2294 | #endif | ||
2295 | 2298 | ||
2296 | platform_set_drvdata(pdev, NULL); | 2299 | platform_set_drvdata(pdev, NULL); |
2297 | the_controller = NULL; | 2300 | the_controller = NULL; |
@@ -2317,10 +2320,15 @@ static int __exit pxa2xx_udc_remove(struct platform_device *pdev) | |||
2317 | static int pxa2xx_udc_suspend(struct platform_device *dev, pm_message_t state) | 2320 | static int pxa2xx_udc_suspend(struct platform_device *dev, pm_message_t state) |
2318 | { | 2321 | { |
2319 | struct pxa2xx_udc *udc = platform_get_drvdata(dev); | 2322 | struct pxa2xx_udc *udc = platform_get_drvdata(dev); |
2323 | unsigned long flags; | ||
2320 | 2324 | ||
2321 | if (!udc->mach->gpio_pullup && !udc->mach->udc_command) | 2325 | if (!udc->mach->gpio_pullup && !udc->mach->udc_command) |
2322 | WARN("USB host won't detect disconnect!\n"); | 2326 | WARN("USB host won't detect disconnect!\n"); |
2323 | pullup(udc, 0); | 2327 | udc->suspended = 1; |
2328 | |||
2329 | local_irq_save(flags); | ||
2330 | pullup(udc); | ||
2331 | local_irq_restore(flags); | ||
2324 | 2332 | ||
2325 | return 0; | 2333 | return 0; |
2326 | } | 2334 | } |
@@ -2328,8 +2336,12 @@ static int pxa2xx_udc_suspend(struct platform_device *dev, pm_message_t state) | |||
2328 | static int pxa2xx_udc_resume(struct platform_device *dev) | 2336 | static int pxa2xx_udc_resume(struct platform_device *dev) |
2329 | { | 2337 | { |
2330 | struct pxa2xx_udc *udc = platform_get_drvdata(dev); | 2338 | struct pxa2xx_udc *udc = platform_get_drvdata(dev); |
2339 | unsigned long flags; | ||
2331 | 2340 | ||
2332 | pullup(udc, 1); | 2341 | udc->suspended = 0; |
2342 | local_irq_save(flags); | ||
2343 | pullup(udc); | ||
2344 | local_irq_restore(flags); | ||
2333 | 2345 | ||
2334 | return 0; | 2346 | return 0; |
2335 | } | 2347 | } |
diff --git a/drivers/usb/gadget/pxa2xx_udc.h b/drivers/usb/gadget/pxa2xx_udc.h index b67e3ff5e4eb..e2c19e88c875 100644 --- a/drivers/usb/gadget/pxa2xx_udc.h +++ b/drivers/usb/gadget/pxa2xx_udc.h | |||
@@ -119,7 +119,9 @@ struct pxa2xx_udc { | |||
119 | has_cfr : 1, | 119 | has_cfr : 1, |
120 | req_pending : 1, | 120 | req_pending : 1, |
121 | req_std : 1, | 121 | req_std : 1, |
122 | req_config : 1; | 122 | req_config : 1, |
123 | suspended : 1, | ||
124 | active : 1; | ||
123 | 125 | ||
124 | #define start_watchdog(dev) mod_timer(&dev->timer, jiffies + (HZ/200)) | 126 | #define start_watchdog(dev) mod_timer(&dev->timer, jiffies + (HZ/200)) |
125 | struct timer_list timer; | 127 | struct timer_list timer; |
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 776a97f33914..2e49de820b14 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c | |||
@@ -319,10 +319,10 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
319 | if (likely (last->urb != urb)) { | 319 | if (likely (last->urb != urb)) { |
320 | ehci_urb_done(ehci, last->urb, last_status); | 320 | ehci_urb_done(ehci, last->urb, last_status); |
321 | count++; | 321 | count++; |
322 | last_status = -EINPROGRESS; | ||
322 | } | 323 | } |
323 | ehci_qtd_free (ehci, last); | 324 | ehci_qtd_free (ehci, last); |
324 | last = NULL; | 325 | last = NULL; |
325 | last_status = -EINPROGRESS; | ||
326 | } | 326 | } |
327 | 327 | ||
328 | /* ignore urbs submitted during completions we reported */ | 328 | /* ignore urbs submitted during completions we reported */ |
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c index 0130fd8571e4..d7071c855758 100644 --- a/drivers/usb/host/isp116x-hcd.c +++ b/drivers/usb/host/isp116x-hcd.c | |||
@@ -911,8 +911,7 @@ static int isp116x_hub_status_data(struct usb_hcd *hcd, char *buf) | |||
911 | buf[0] = 0; | 911 | buf[0] = 0; |
912 | 912 | ||
913 | for (i = 0; i < ports; i++) { | 913 | for (i = 0; i < ports; i++) { |
914 | u32 status = isp116x->rhport[i] = | 914 | u32 status = isp116x_read_reg32(isp116x, i ? HCRHPORT2 : HCRHPORT1); |
915 | isp116x_read_reg32(isp116x, i ? HCRHPORT2 : HCRHPORT1); | ||
916 | 915 | ||
917 | if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC | 916 | if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
918 | | RH_PS_OCIC | RH_PS_PRSC)) { | 917 | | RH_PS_OCIC | RH_PS_PRSC)) { |
@@ -1031,7 +1030,9 @@ static int isp116x_hub_control(struct usb_hcd *hcd, | |||
1031 | DBG("GetPortStatus\n"); | 1030 | DBG("GetPortStatus\n"); |
1032 | if (!wIndex || wIndex > ports) | 1031 | if (!wIndex || wIndex > ports) |
1033 | goto error; | 1032 | goto error; |
1034 | tmp = isp116x->rhport[--wIndex]; | 1033 | spin_lock_irqsave(&isp116x->lock, flags); |
1034 | tmp = isp116x_read_reg32(isp116x, (--wIndex) ? HCRHPORT2 : HCRHPORT1); | ||
1035 | spin_unlock_irqrestore(&isp116x->lock, flags); | ||
1035 | *(__le32 *) buf = cpu_to_le32(tmp); | 1036 | *(__le32 *) buf = cpu_to_le32(tmp); |
1036 | DBG("GetPortStatus: port[%d] %08x\n", wIndex + 1, tmp); | 1037 | DBG("GetPortStatus: port[%d] %08x\n", wIndex + 1, tmp); |
1037 | break; | 1038 | break; |
@@ -1080,8 +1081,6 @@ static int isp116x_hub_control(struct usb_hcd *hcd, | |||
1080 | spin_lock_irqsave(&isp116x->lock, flags); | 1081 | spin_lock_irqsave(&isp116x->lock, flags); |
1081 | isp116x_write_reg32(isp116x, wIndex | 1082 | isp116x_write_reg32(isp116x, wIndex |
1082 | ? HCRHPORT2 : HCRHPORT1, tmp); | 1083 | ? HCRHPORT2 : HCRHPORT1, tmp); |
1083 | isp116x->rhport[wIndex] = | ||
1084 | isp116x_read_reg32(isp116x, wIndex ? HCRHPORT2 : HCRHPORT1); | ||
1085 | spin_unlock_irqrestore(&isp116x->lock, flags); | 1084 | spin_unlock_irqrestore(&isp116x->lock, flags); |
1086 | break; | 1085 | break; |
1087 | case SetPortFeature: | 1086 | case SetPortFeature: |
@@ -1095,24 +1094,22 @@ static int isp116x_hub_control(struct usb_hcd *hcd, | |||
1095 | spin_lock_irqsave(&isp116x->lock, flags); | 1094 | spin_lock_irqsave(&isp116x->lock, flags); |
1096 | isp116x_write_reg32(isp116x, wIndex | 1095 | isp116x_write_reg32(isp116x, wIndex |
1097 | ? HCRHPORT2 : HCRHPORT1, RH_PS_PSS); | 1096 | ? HCRHPORT2 : HCRHPORT1, RH_PS_PSS); |
1097 | spin_unlock_irqrestore(&isp116x->lock, flags); | ||
1098 | break; | 1098 | break; |
1099 | case USB_PORT_FEAT_POWER: | 1099 | case USB_PORT_FEAT_POWER: |
1100 | DBG("USB_PORT_FEAT_POWER\n"); | 1100 | DBG("USB_PORT_FEAT_POWER\n"); |
1101 | spin_lock_irqsave(&isp116x->lock, flags); | 1101 | spin_lock_irqsave(&isp116x->lock, flags); |
1102 | isp116x_write_reg32(isp116x, wIndex | 1102 | isp116x_write_reg32(isp116x, wIndex |
1103 | ? HCRHPORT2 : HCRHPORT1, RH_PS_PPS); | 1103 | ? HCRHPORT2 : HCRHPORT1, RH_PS_PPS); |
1104 | spin_unlock_irqrestore(&isp116x->lock, flags); | ||
1104 | break; | 1105 | break; |
1105 | case USB_PORT_FEAT_RESET: | 1106 | case USB_PORT_FEAT_RESET: |
1106 | DBG("USB_PORT_FEAT_RESET\n"); | 1107 | DBG("USB_PORT_FEAT_RESET\n"); |
1107 | root_port_reset(isp116x, wIndex); | 1108 | root_port_reset(isp116x, wIndex); |
1108 | spin_lock_irqsave(&isp116x->lock, flags); | ||
1109 | break; | 1109 | break; |
1110 | default: | 1110 | default: |
1111 | goto error; | 1111 | goto error; |
1112 | } | 1112 | } |
1113 | isp116x->rhport[wIndex] = | ||
1114 | isp116x_read_reg32(isp116x, wIndex ? HCRHPORT2 : HCRHPORT1); | ||
1115 | spin_unlock_irqrestore(&isp116x->lock, flags); | ||
1116 | break; | 1113 | break; |
1117 | 1114 | ||
1118 | default: | 1115 | default: |
diff --git a/drivers/usb/host/isp116x.h b/drivers/usb/host/isp116x.h index b91e2edd9c5c..595b90a99848 100644 --- a/drivers/usb/host/isp116x.h +++ b/drivers/usb/host/isp116x.h | |||
@@ -270,7 +270,6 @@ struct isp116x { | |||
270 | u32 rhdesca; | 270 | u32 rhdesca; |
271 | u32 rhdescb; | 271 | u32 rhdescb; |
272 | u32 rhstatus; | 272 | u32 rhstatus; |
273 | u32 rhport[2]; | ||
274 | 273 | ||
275 | /* async schedule: control, bulk */ | 274 | /* async schedule: control, bulk */ |
276 | struct list_head async; | 275 | struct list_head async; |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 76db2fef4657..91dc433dbcf1 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -92,6 +92,7 @@ struct ftdi_sio_quirk { | |||
92 | }; | 92 | }; |
93 | 93 | ||
94 | static int ftdi_jtag_probe (struct usb_serial *serial); | 94 | static int ftdi_jtag_probe (struct usb_serial *serial); |
95 | static int ftdi_mtxorb_hack_setup (struct usb_serial *serial); | ||
95 | static void ftdi_USB_UIRT_setup (struct ftdi_private *priv); | 96 | static void ftdi_USB_UIRT_setup (struct ftdi_private *priv); |
96 | static void ftdi_HE_TIRA1_setup (struct ftdi_private *priv); | 97 | static void ftdi_HE_TIRA1_setup (struct ftdi_private *priv); |
97 | 98 | ||
@@ -99,6 +100,10 @@ static struct ftdi_sio_quirk ftdi_jtag_quirk = { | |||
99 | .probe = ftdi_jtag_probe, | 100 | .probe = ftdi_jtag_probe, |
100 | }; | 101 | }; |
101 | 102 | ||
103 | static struct ftdi_sio_quirk ftdi_mtxorb_hack_quirk = { | ||
104 | .probe = ftdi_mtxorb_hack_setup, | ||
105 | }; | ||
106 | |||
102 | static struct ftdi_sio_quirk ftdi_USB_UIRT_quirk = { | 107 | static struct ftdi_sio_quirk ftdi_USB_UIRT_quirk = { |
103 | .port_probe = ftdi_USB_UIRT_setup, | 108 | .port_probe = ftdi_USB_UIRT_setup, |
104 | }; | 109 | }; |
@@ -161,6 +166,8 @@ static struct usb_device_id id_table_combined [] = { | |||
161 | { USB_DEVICE(FTDI_VID, FTDI_MTXORB_4_PID) }, | 166 | { USB_DEVICE(FTDI_VID, FTDI_MTXORB_4_PID) }, |
162 | { USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) }, | 167 | { USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) }, |
163 | { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) }, | 168 | { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) }, |
169 | { USB_DEVICE(MTXORB_VK_VID, MTXORB_VK_PID), | ||
170 | .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk }, | ||
164 | { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) }, | 171 | { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) }, |
165 | { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) }, | 172 | { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) }, |
166 | { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) }, | 173 | { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) }, |
@@ -274,6 +281,7 @@ static struct usb_device_id id_table_combined [] = { | |||
274 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) }, | 281 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) }, |
275 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) }, | 282 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) }, |
276 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, | 283 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, |
284 | { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) }, | ||
277 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, | 285 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, |
278 | { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, | 286 | { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, |
279 | { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, | 287 | { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, |
@@ -1088,6 +1096,23 @@ static int ftdi_jtag_probe(struct usb_serial *serial) | |||
1088 | return 0; | 1096 | return 0; |
1089 | } | 1097 | } |
1090 | 1098 | ||
1099 | /* | ||
1100 | * The Matrix Orbital VK204-25-USB has an invalid IN endpoint. | ||
1101 | * We have to correct it if we want to read from it. | ||
1102 | */ | ||
1103 | static int ftdi_mtxorb_hack_setup(struct usb_serial *serial) | ||
1104 | { | ||
1105 | struct usb_host_endpoint *ep = serial->dev->ep_in[1]; | ||
1106 | struct usb_endpoint_descriptor *ep_desc = &ep->desc; | ||
1107 | |||
1108 | if (ep->enabled && ep_desc->wMaxPacketSize == 0) { | ||
1109 | ep_desc->wMaxPacketSize = 0x40; | ||
1110 | info("Fixing invalid wMaxPacketSize on read pipe"); | ||
1111 | } | ||
1112 | |||
1113 | return 0; | ||
1114 | } | ||
1115 | |||
1091 | /* ftdi_shutdown is called from usbserial:usb_serial_disconnect | 1116 | /* ftdi_shutdown is called from usbserial:usb_serial_disconnect |
1092 | * it is called when the usb device is disconnected | 1117 | * it is called when the usb device is disconnected |
1093 | * | 1118 | * |
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h index 6eee2ab914ec..e1eb742abcd5 100644 --- a/drivers/usb/serial/ftdi_sio.h +++ b/drivers/usb/serial/ftdi_sio.h | |||
@@ -102,6 +102,13 @@ | |||
102 | * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */ | 102 | * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */ |
103 | #define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */ | 103 | #define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */ |
104 | 104 | ||
105 | /* | ||
106 | * The following are the values for the Matrix Orbital VK204-25-USB | ||
107 | * display, which use the FT232RL. | ||
108 | */ | ||
109 | #define MTXORB_VK_VID 0x1b3d | ||
110 | #define MTXORB_VK_PID 0x0158 | ||
111 | |||
105 | /* Interbiometrics USB I/O Board */ | 112 | /* Interbiometrics USB I/O Board */ |
106 | /* Developed for Interbiometrics by Rudolf Gugler */ | 113 | /* Developed for Interbiometrics by Rudolf Gugler */ |
107 | #define INTERBIOMETRICS_VID 0x1209 | 114 | #define INTERBIOMETRICS_VID 0x1209 |
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 869ecd374cb4..aeeb9cb20999 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c | |||
@@ -110,11 +110,20 @@ | |||
110 | 110 | ||
111 | /* vendor id and device id defines */ | 111 | /* vendor id and device id defines */ |
112 | 112 | ||
113 | /* The native mos7840/7820 component */ | ||
113 | #define USB_VENDOR_ID_MOSCHIP 0x9710 | 114 | #define USB_VENDOR_ID_MOSCHIP 0x9710 |
114 | #define MOSCHIP_DEVICE_ID_7840 0x7840 | 115 | #define MOSCHIP_DEVICE_ID_7840 0x7840 |
115 | #define MOSCHIP_DEVICE_ID_7820 0x7820 | 116 | #define MOSCHIP_DEVICE_ID_7820 0x7820 |
117 | /* The native component can have its vendor/device id's overridden | ||
118 | * in vendor-specific implementations. Such devices can be handled | ||
119 | * by making a change here, in moschip_port_id_table, and in | ||
120 | * moschip_id_table_combined | ||
121 | */ | ||
122 | #define USB_VENDOR_ID_BANDB 0x0856 | ||
123 | #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 | ||
124 | #define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 | ||
116 | 125 | ||
117 | /* Interrupt Rotinue Defines */ | 126 | /* Interrupt Routine Defines */ |
118 | 127 | ||
119 | #define SERIAL_IIR_RLS 0x06 | 128 | #define SERIAL_IIR_RLS 0x06 |
120 | #define SERIAL_IIR_MS 0x00 | 129 | #define SERIAL_IIR_MS 0x00 |
@@ -159,12 +168,16 @@ | |||
159 | static struct usb_device_id moschip_port_id_table[] = { | 168 | static struct usb_device_id moschip_port_id_table[] = { |
160 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, | 169 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, |
161 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, | 170 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, |
171 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, | ||
172 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, | ||
162 | {} /* terminating entry */ | 173 | {} /* terminating entry */ |
163 | }; | 174 | }; |
164 | 175 | ||
165 | static __devinitdata struct usb_device_id moschip_id_table_combined[] = { | 176 | static __devinitdata struct usb_device_id moschip_id_table_combined[] = { |
166 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, | 177 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, |
167 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, | 178 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, |
179 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, | ||
180 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, | ||
168 | {} /* terminating entry */ | 181 | {} /* terminating entry */ |
169 | }; | 182 | }; |
170 | 183 | ||
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index af2674c57414..828a4377ec6a 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -120,6 +120,9 @@ static int option_send_setup(struct usb_serial_port *port); | |||
120 | #define ANYDATA_PRODUCT_ADU_E100A 0x6501 | 120 | #define ANYDATA_PRODUCT_ADU_E100A 0x6501 |
121 | #define ANYDATA_PRODUCT_ADU_500A 0x6502 | 121 | #define ANYDATA_PRODUCT_ADU_500A 0x6502 |
122 | 122 | ||
123 | #define AXESSTEL_VENDOR_ID 0x1726 | ||
124 | #define AXESSTEL_PRODUCT_MV110H 0x1000 | ||
125 | |||
123 | #define BANDRICH_VENDOR_ID 0x1A8D | 126 | #define BANDRICH_VENDOR_ID 0x1A8D |
124 | #define BANDRICH_PRODUCT_C100_1 0x1002 | 127 | #define BANDRICH_PRODUCT_C100_1 0x1002 |
125 | #define BANDRICH_PRODUCT_C100_2 0x1003 | 128 | #define BANDRICH_PRODUCT_C100_2 0x1003 |
@@ -192,6 +195,7 @@ static struct usb_device_id option_ids[] = { | |||
192 | { USB_DEVICE(DELL_VENDOR_ID, 0x8137) }, /* Dell Wireless HSDPA 5520 */ | 195 | { USB_DEVICE(DELL_VENDOR_ID, 0x8137) }, /* Dell Wireless HSDPA 5520 */ |
193 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, | 196 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, |
194 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, | 197 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, |
198 | { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) }, | ||
195 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, | 199 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, |
196 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, | 200 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, |
197 | { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, | 201 | { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, |
diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c index 958f5b17847c..b9b8ede61fb3 100644 --- a/drivers/usb/storage/protocol.c +++ b/drivers/usb/storage/protocol.c | |||
@@ -170,7 +170,6 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer, | |||
170 | 170 | ||
171 | if (!sg) | 171 | if (!sg) |
172 | sg = scsi_sglist(srb); | 172 | sg = scsi_sglist(srb); |
173 | buflen = min(buflen, scsi_bufflen(srb)); | ||
174 | 173 | ||
175 | /* This loop handles a single s-g list entry, which may | 174 | /* This loop handles a single s-g list entry, which may |
176 | * include multiple pages. Find the initial page structure | 175 | * include multiple pages. Find the initial page structure |
@@ -232,6 +231,7 @@ void usb_stor_set_xfer_buf(unsigned char *buffer, | |||
232 | unsigned int offset = 0; | 231 | unsigned int offset = 0; |
233 | struct scatterlist *sg = NULL; | 232 | struct scatterlist *sg = NULL; |
234 | 233 | ||
234 | buflen = min(buflen, scsi_bufflen(srb)); | ||
235 | buflen = usb_stor_access_xfer_buf(buffer, buflen, srb, &sg, &offset, | 235 | buflen = usb_stor_access_xfer_buf(buffer, buflen, srb, &sg, &offset, |
236 | TO_XFER_BUF); | 236 | TO_XFER_BUF); |
237 | if (buflen < scsi_bufflen(srb)) | 237 | if (buflen < scsi_bufflen(srb)) |
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c index e83dfba7e636..742b5c656d66 100644 --- a/drivers/video/sm501fb.c +++ b/drivers/video/sm501fb.c | |||
@@ -237,12 +237,14 @@ static int sm501fb_check_var(struct fb_var_screeninfo *var, | |||
237 | 237 | ||
238 | /* check we can fit these values into the registers */ | 238 | /* check we can fit these values into the registers */ |
239 | 239 | ||
240 | if (var->hsync_len > 255 || var->vsync_len > 255) | 240 | if (var->hsync_len > 255 || var->vsync_len > 63) |
241 | return -EINVAL; | 241 | return -EINVAL; |
242 | 242 | ||
243 | if ((var->xres + var->right_margin) >= 4096) | 243 | /* hdisplay end and hsync start */ |
244 | if ((var->xres + var->right_margin) > 4096) | ||
244 | return -EINVAL; | 245 | return -EINVAL; |
245 | 246 | ||
247 | /* vdisplay end and vsync start */ | ||
246 | if ((var->yres + var->lower_margin) > 2048) | 248 | if ((var->yres + var->lower_margin) > 2048) |
247 | return -EINVAL; | 249 | return -EINVAL; |
248 | 250 | ||
@@ -281,19 +283,21 @@ static int sm501fb_check_var(struct fb_var_screeninfo *var, | |||
281 | var->blue.length = var->bits_per_pixel; | 283 | var->blue.length = var->bits_per_pixel; |
282 | var->blue.offset = 0; | 284 | var->blue.offset = 0; |
283 | var->transp.length = 0; | 285 | var->transp.length = 0; |
286 | var->transp.offset = 0; | ||
284 | 287 | ||
285 | break; | 288 | break; |
286 | 289 | ||
287 | case 16: | 290 | case 16: |
288 | if (sm->pdata->flags & SM501_FBPD_SWAP_FB_ENDIAN) { | 291 | if (sm->pdata->flags & SM501_FBPD_SWAP_FB_ENDIAN) { |
289 | var->red.offset = 11; | ||
290 | var->green.offset = 5; | ||
291 | var->blue.offset = 0; | ||
292 | } else { | ||
293 | var->blue.offset = 11; | 292 | var->blue.offset = 11; |
294 | var->green.offset = 5; | 293 | var->green.offset = 5; |
295 | var->red.offset = 0; | 294 | var->red.offset = 0; |
295 | } else { | ||
296 | var->red.offset = 11; | ||
297 | var->green.offset = 5; | ||
298 | var->blue.offset = 0; | ||
296 | } | 299 | } |
300 | var->transp.offset = 0; | ||
297 | 301 | ||
298 | var->red.length = 5; | 302 | var->red.length = 5; |
299 | var->green.length = 6; | 303 | var->green.length = 6; |
@@ -397,7 +401,7 @@ static int sm501fb_set_par_common(struct fb_info *info, | |||
397 | break; | 401 | break; |
398 | 402 | ||
399 | case 16: | 403 | case 16: |
400 | info->fix.visual = FB_VISUAL_DIRECTCOLOR; | 404 | info->fix.visual = FB_VISUAL_TRUECOLOR; |
401 | break; | 405 | break; |
402 | 406 | ||
403 | case 32: | 407 | case 32: |
@@ -613,6 +617,7 @@ static int sm501fb_set_par_crt(struct fb_info *info) | |||
613 | 617 | ||
614 | case 16: | 618 | case 16: |
615 | control |= SM501_DC_CRT_CONTROL_16BPP; | 619 | control |= SM501_DC_CRT_CONTROL_16BPP; |
620 | sm501fb_setup_gamma(fbi, SM501_DC_CRT_PALETTE); | ||
616 | break; | 621 | break; |
617 | 622 | ||
618 | case 32: | 623 | case 32: |
@@ -750,6 +755,7 @@ static int sm501fb_set_par_pnl(struct fb_info *info) | |||
750 | 755 | ||
751 | case 16: | 756 | case 16: |
752 | control |= SM501_DC_PANEL_CONTROL_16BPP; | 757 | control |= SM501_DC_PANEL_CONTROL_16BPP; |
758 | sm501fb_setup_gamma(fbi, SM501_DC_PANEL_PALETTE); | ||
753 | break; | 759 | break; |
754 | 760 | ||
755 | case 32: | 761 | case 32: |
diff --git a/drivers/video/tridentfb.c b/drivers/video/tridentfb.c index 70fb4ee2b421..919ce75db9e2 100644 --- a/drivers/video/tridentfb.c +++ b/drivers/video/tridentfb.c | |||
@@ -564,19 +564,46 @@ static inline void write3CE(int reg, unsigned char val) | |||
564 | t_outb(val, 0x3CF); | 564 | t_outb(val, 0x3CF); |
565 | } | 565 | } |
566 | 566 | ||
567 | static inline void enable_mmio(void) | 567 | static void enable_mmio(void) |
568 | { | 568 | { |
569 | unsigned char tmp; | ||
570 | |||
569 | /* Goto New Mode */ | 571 | /* Goto New Mode */ |
570 | outb(0x0B, 0x3C4); | 572 | outb(0x0B, 0x3C4); |
571 | inb(0x3C5); | 573 | inb(0x3C5); |
572 | 574 | ||
573 | /* Unprotect registers */ | 575 | /* Unprotect registers */ |
574 | outb(NewMode1, 0x3C4); | 576 | outb(NewMode1, 0x3C4); |
577 | tmp = inb(0x3C5); | ||
575 | outb(0x80, 0x3C5); | 578 | outb(0x80, 0x3C5); |
576 | 579 | ||
577 | /* Enable MMIO */ | 580 | /* Enable MMIO */ |
578 | outb(PCIReg, 0x3D4); | 581 | outb(PCIReg, 0x3D4); |
579 | outb(inb(0x3D5) | 0x01, 0x3D5); | 582 | outb(inb(0x3D5) | 0x01, 0x3D5); |
583 | |||
584 | t_outb(NewMode1, 0x3C4); | ||
585 | t_outb(tmp, 0x3C5); | ||
586 | } | ||
587 | |||
588 | static void disable_mmio(void) | ||
589 | { | ||
590 | unsigned char tmp; | ||
591 | |||
592 | /* Goto New Mode */ | ||
593 | t_outb(0x0B, 0x3C4); | ||
594 | t_inb(0x3C5); | ||
595 | |||
596 | /* Unprotect registers */ | ||
597 | t_outb(NewMode1, 0x3C4); | ||
598 | tmp = t_inb(0x3C5); | ||
599 | t_outb(0x80, 0x3C5); | ||
600 | |||
601 | /* Disable MMIO */ | ||
602 | t_outb(PCIReg, 0x3D4); | ||
603 | t_outb(t_inb(0x3D5) & ~0x01, 0x3D5); | ||
604 | |||
605 | outb(NewMode1, 0x3C4); | ||
606 | outb(tmp, 0x3C5); | ||
580 | } | 607 | } |
581 | 608 | ||
582 | #define crtc_unlock() write3X4(CRTVSyncEnd, read3X4(CRTVSyncEnd) & 0x7F) | 609 | #define crtc_unlock() write3X4(CRTVSyncEnd, read3X4(CRTVSyncEnd) & 0x7F) |
@@ -1239,9 +1266,9 @@ static int __devinit trident_pci_probe(struct pci_dev * dev, | |||
1239 | default_par.io_virt = ioremap_nocache(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); | 1266 | default_par.io_virt = ioremap_nocache(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); |
1240 | 1267 | ||
1241 | if (!default_par.io_virt) { | 1268 | if (!default_par.io_virt) { |
1242 | release_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); | ||
1243 | debug("ioremap failed\n"); | 1269 | debug("ioremap failed\n"); |
1244 | return -1; | 1270 | err = -1; |
1271 | goto out_unmap1; | ||
1245 | } | 1272 | } |
1246 | 1273 | ||
1247 | enable_mmio(); | 1274 | enable_mmio(); |
@@ -1252,25 +1279,21 @@ static int __devinit trident_pci_probe(struct pci_dev * dev, | |||
1252 | 1279 | ||
1253 | if (!request_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len, "tridentfb")) { | 1280 | if (!request_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len, "tridentfb")) { |
1254 | debug("request_mem_region failed!\n"); | 1281 | debug("request_mem_region failed!\n"); |
1282 | disable_mmio(); | ||
1255 | err = -1; | 1283 | err = -1; |
1256 | goto out_unmap; | 1284 | goto out_unmap1; |
1257 | } | 1285 | } |
1258 | 1286 | ||
1259 | fb_info.screen_base = ioremap_nocache(tridentfb_fix.smem_start, | 1287 | fb_info.screen_base = ioremap_nocache(tridentfb_fix.smem_start, |
1260 | tridentfb_fix.smem_len); | 1288 | tridentfb_fix.smem_len); |
1261 | 1289 | ||
1262 | if (!fb_info.screen_base) { | 1290 | if (!fb_info.screen_base) { |
1263 | release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len); | ||
1264 | debug("ioremap failed\n"); | 1291 | debug("ioremap failed\n"); |
1265 | err = -1; | 1292 | err = -1; |
1266 | goto out_unmap; | 1293 | goto out_unmap2; |
1267 | } | 1294 | } |
1268 | 1295 | ||
1269 | output("%s board found\n", pci_name(dev)); | 1296 | output("%s board found\n", pci_name(dev)); |
1270 | #if 0 | ||
1271 | output("Trident board found : mem = %X, io = %X, mem_v = %X, io_v = %X\n", | ||
1272 | tridentfb_fix.smem_start, tridentfb_fix.mmio_start, fb_info.screen_base, default_par.io_virt); | ||
1273 | #endif | ||
1274 | displaytype = get_displaytype(); | 1297 | displaytype = get_displaytype(); |
1275 | 1298 | ||
1276 | if (flatpanel) | 1299 | if (flatpanel) |
@@ -1288,9 +1311,12 @@ static int __devinit trident_pci_probe(struct pci_dev * dev, | |||
1288 | 1311 | ||
1289 | if (!fb_find_mode(&default_var, &fb_info, mode, NULL, 0, NULL, bpp)) { | 1312 | if (!fb_find_mode(&default_var, &fb_info, mode, NULL, 0, NULL, bpp)) { |
1290 | err = -EINVAL; | 1313 | err = -EINVAL; |
1291 | goto out_unmap; | 1314 | goto out_unmap2; |
1292 | } | 1315 | } |
1293 | fb_alloc_cmap(&fb_info.cmap, 256, 0); | 1316 | err = fb_alloc_cmap(&fb_info.cmap, 256, 0); |
1317 | if (err < 0) | ||
1318 | goto out_unmap2; | ||
1319 | |||
1294 | if (defaultaccel && acc) | 1320 | if (defaultaccel && acc) |
1295 | default_var.accel_flags |= FB_ACCELF_TEXT; | 1321 | default_var.accel_flags |= FB_ACCELF_TEXT; |
1296 | else | 1322 | else |
@@ -1300,19 +1326,24 @@ static int __devinit trident_pci_probe(struct pci_dev * dev, | |||
1300 | fb_info.device = &dev->dev; | 1326 | fb_info.device = &dev->dev; |
1301 | if (register_framebuffer(&fb_info) < 0) { | 1327 | if (register_framebuffer(&fb_info) < 0) { |
1302 | printk(KERN_ERR "tridentfb: could not register Trident framebuffer\n"); | 1328 | printk(KERN_ERR "tridentfb: could not register Trident framebuffer\n"); |
1329 | fb_dealloc_cmap(&fb_info.cmap); | ||
1303 | err = -EINVAL; | 1330 | err = -EINVAL; |
1304 | goto out_unmap; | 1331 | goto out_unmap2; |
1305 | } | 1332 | } |
1306 | output("fb%d: %s frame buffer device %dx%d-%dbpp\n", | 1333 | output("fb%d: %s frame buffer device %dx%d-%dbpp\n", |
1307 | fb_info.node, fb_info.fix.id, default_var.xres, | 1334 | fb_info.node, fb_info.fix.id, default_var.xres, |
1308 | default_var.yres, default_var.bits_per_pixel); | 1335 | default_var.yres, default_var.bits_per_pixel); |
1309 | return 0; | 1336 | return 0; |
1310 | 1337 | ||
1311 | out_unmap: | 1338 | out_unmap2: |
1312 | if (default_par.io_virt) | ||
1313 | iounmap(default_par.io_virt); | ||
1314 | if (fb_info.screen_base) | 1339 | if (fb_info.screen_base) |
1315 | iounmap(fb_info.screen_base); | 1340 | iounmap(fb_info.screen_base); |
1341 | release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len); | ||
1342 | disable_mmio(); | ||
1343 | out_unmap1: | ||
1344 | if (default_par.io_virt) | ||
1345 | iounmap(default_par.io_virt); | ||
1346 | release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); | ||
1316 | return err; | 1347 | return err; |
1317 | } | 1348 | } |
1318 | 1349 | ||
@@ -1323,7 +1354,7 @@ static void __devexit trident_pci_remove(struct pci_dev *dev) | |||
1323 | iounmap(par->io_virt); | 1354 | iounmap(par->io_virt); |
1324 | iounmap(fb_info.screen_base); | 1355 | iounmap(fb_info.screen_base); |
1325 | release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len); | 1356 | release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len); |
1326 | release_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); | 1357 | release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); |
1327 | } | 1358 | } |
1328 | 1359 | ||
1329 | /* List of boards that we are trying to support */ | 1360 | /* List of boards that we are trying to support */ |
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c index 688e435b4d9a..10211e493001 100644 --- a/drivers/w1/masters/ds1wm.c +++ b/drivers/w1/masters/ds1wm.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/pm.h> | 17 | #include <linux/pm.h> |
18 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
19 | #include <linux/clk.h> | 19 | #include <linux/clk.h> |
20 | #include <linux/err.h> | ||
20 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
21 | #include <linux/ds1wm.h> | 22 | #include <linux/ds1wm.h> |
22 | 23 | ||
@@ -102,12 +103,12 @@ struct ds1wm_data { | |||
102 | static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg, | 103 | static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg, |
103 | u8 val) | 104 | u8 val) |
104 | { | 105 | { |
105 | __raw_writeb(val, ds1wm_data->map + (reg << ds1wm_data->bus_shift)); | 106 | __raw_writeb(val, ds1wm_data->map + (reg << ds1wm_data->bus_shift)); |
106 | } | 107 | } |
107 | 108 | ||
108 | static inline u8 ds1wm_read_register(struct ds1wm_data *ds1wm_data, u32 reg) | 109 | static inline u8 ds1wm_read_register(struct ds1wm_data *ds1wm_data, u32 reg) |
109 | { | 110 | { |
110 | return __raw_readb(ds1wm_data->map + (reg << ds1wm_data->bus_shift)); | 111 | return __raw_readb(ds1wm_data->map + (reg << ds1wm_data->bus_shift)); |
111 | } | 112 | } |
112 | 113 | ||
113 | 114 | ||
@@ -149,8 +150,8 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data) | |||
149 | timeleft = wait_for_completion_timeout(&reset_done, DS1WM_TIMEOUT); | 150 | timeleft = wait_for_completion_timeout(&reset_done, DS1WM_TIMEOUT); |
150 | ds1wm_data->reset_complete = NULL; | 151 | ds1wm_data->reset_complete = NULL; |
151 | if (!timeleft) { | 152 | if (!timeleft) { |
152 | dev_dbg(&ds1wm_data->pdev->dev, "reset failed\n"); | 153 | dev_err(&ds1wm_data->pdev->dev, "reset failed\n"); |
153 | return 1; | 154 | return 1; |
154 | } | 155 | } |
155 | 156 | ||
156 | /* Wait for the end of the reset. According to the specs, the time | 157 | /* Wait for the end of the reset. According to the specs, the time |
@@ -167,11 +168,11 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data) | |||
167 | (ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0)); | 168 | (ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0)); |
168 | 169 | ||
169 | if (!ds1wm_data->slave_present) { | 170 | if (!ds1wm_data->slave_present) { |
170 | dev_dbg(&ds1wm_data->pdev->dev, "reset: no devices found\n"); | 171 | dev_dbg(&ds1wm_data->pdev->dev, "reset: no devices found\n"); |
171 | return 1; | 172 | return 1; |
172 | } | 173 | } |
173 | 174 | ||
174 | return 0; | 175 | return 0; |
175 | } | 176 | } |
176 | 177 | ||
177 | static int ds1wm_write(struct ds1wm_data *ds1wm_data, u8 data) | 178 | static int ds1wm_write(struct ds1wm_data *ds1wm_data, u8 data) |
@@ -334,7 +335,7 @@ static int ds1wm_probe(struct platform_device *pdev) | |||
334 | if (!pdev) | 335 | if (!pdev) |
335 | return -ENODEV; | 336 | return -ENODEV; |
336 | 337 | ||
337 | ds1wm_data = kzalloc(sizeof (*ds1wm_data), GFP_KERNEL); | 338 | ds1wm_data = kzalloc(sizeof(*ds1wm_data), GFP_KERNEL); |
338 | if (!ds1wm_data) | 339 | if (!ds1wm_data) |
339 | return -ENOMEM; | 340 | return -ENOMEM; |
340 | 341 | ||
@@ -374,8 +375,8 @@ static int ds1wm_probe(struct platform_device *pdev) | |||
374 | goto err1; | 375 | goto err1; |
375 | 376 | ||
376 | ds1wm_data->clk = clk_get(&pdev->dev, "ds1wm"); | 377 | ds1wm_data->clk = clk_get(&pdev->dev, "ds1wm"); |
377 | if (!ds1wm_data->clk) { | 378 | if (IS_ERR(ds1wm_data->clk)) { |
378 | ret = -ENOENT; | 379 | ret = PTR_ERR(ds1wm_data->clk); |
379 | goto err2; | 380 | goto err2; |
380 | } | 381 | } |
381 | 382 | ||