diff options
Diffstat (limited to 'drivers')
216 files changed, 6878 insertions, 3185 deletions
diff --git a/drivers/acorn/char/defkeymap-l7200.c b/drivers/acorn/char/defkeymap-l7200.c index 28a5fbc6aa1a..93d80a1c36f9 100644 --- a/drivers/acorn/char/defkeymap-l7200.c +++ b/drivers/acorn/char/defkeymap-l7200.c | |||
@@ -347,40 +347,40 @@ char *func_table[MAX_NR_FUNC] = { | |||
347 | }; | 347 | }; |
348 | 348 | ||
349 | struct kbdiacruc accent_table[MAX_DIACR] = { | 349 | struct kbdiacruc accent_table[MAX_DIACR] = { |
350 | {'`', 'A', '\300'}, {'`', 'a', '\340'}, | 350 | {'`', 'A', 0300}, {'`', 'a', 0340}, |
351 | {'\'', 'A', '\301'}, {'\'', 'a', '\341'}, | 351 | {'\'', 'A', 0301}, {'\'', 'a', 0341}, |
352 | {'^', 'A', '\302'}, {'^', 'a', '\342'}, | 352 | {'^', 'A', 0302}, {'^', 'a', 0342}, |
353 | {'~', 'A', '\303'}, {'~', 'a', '\343'}, | 353 | {'~', 'A', 0303}, {'~', 'a', 0343}, |
354 | {'"', 'A', '\304'}, {'"', 'a', '\344'}, | 354 | {'"', 'A', 0304}, {'"', 'a', 0344}, |
355 | {'O', 'A', '\305'}, {'o', 'a', '\345'}, | 355 | {'O', 'A', 0305}, {'o', 'a', 0345}, |
356 | {'0', 'A', '\305'}, {'0', 'a', '\345'}, | 356 | {'0', 'A', 0305}, {'0', 'a', 0345}, |
357 | {'A', 'A', '\305'}, {'a', 'a', '\345'}, | 357 | {'A', 'A', 0305}, {'a', 'a', 0345}, |
358 | {'A', 'E', '\306'}, {'a', 'e', '\346'}, | 358 | {'A', 'E', 0306}, {'a', 'e', 0346}, |
359 | {',', 'C', '\307'}, {',', 'c', '\347'}, | 359 | {',', 'C', 0307}, {',', 'c', 0347}, |
360 | {'`', 'E', '\310'}, {'`', 'e', '\350'}, | 360 | {'`', 'E', 0310}, {'`', 'e', 0350}, |
361 | {'\'', 'E', '\311'}, {'\'', 'e', '\351'}, | 361 | {'\'', 'E', 0311}, {'\'', 'e', 0351}, |
362 | {'^', 'E', '\312'}, {'^', 'e', '\352'}, | 362 | {'^', 'E', 0312}, {'^', 'e', 0352}, |
363 | {'"', 'E', '\313'}, {'"', 'e', '\353'}, | 363 | {'"', 'E', 0313}, {'"', 'e', 0353}, |
364 | {'`', 'I', '\314'}, {'`', 'i', '\354'}, | 364 | {'`', 'I', 0314}, {'`', 'i', 0354}, |
365 | {'\'', 'I', '\315'}, {'\'', 'i', '\355'}, | 365 | {'\'', 'I', 0315}, {'\'', 'i', 0355}, |
366 | {'^', 'I', '\316'}, {'^', 'i', '\356'}, | 366 | {'^', 'I', 0316}, {'^', 'i', 0356}, |
367 | {'"', 'I', '\317'}, {'"', 'i', '\357'}, | 367 | {'"', 'I', 0317}, {'"', 'i', 0357}, |
368 | {'-', 'D', '\320'}, {'-', 'd', '\360'}, | 368 | {'-', 'D', 0320}, {'-', 'd', 0360}, |
369 | {'~', 'N', '\321'}, {'~', 'n', '\361'}, | 369 | {'~', 'N', 0321}, {'~', 'n', 0361}, |
370 | {'`', 'O', '\322'}, {'`', 'o', '\362'}, | 370 | {'`', 'O', 0322}, {'`', 'o', 0362}, |
371 | {'\'', 'O', '\323'}, {'\'', 'o', '\363'}, | 371 | {'\'', 'O', 0323}, {'\'', 'o', 0363}, |
372 | {'^', 'O', '\324'}, {'^', 'o', '\364'}, | 372 | {'^', 'O', 0324}, {'^', 'o', 0364}, |
373 | {'~', 'O', '\325'}, {'~', 'o', '\365'}, | 373 | {'~', 'O', 0325}, {'~', 'o', 0365}, |
374 | {'"', 'O', '\326'}, {'"', 'o', '\366'}, | 374 | {'"', 'O', 0326}, {'"', 'o', 0366}, |
375 | {'/', 'O', '\330'}, {'/', 'o', '\370'}, | 375 | {'/', 'O', 0330}, {'/', 'o', 0370}, |
376 | {'`', 'U', '\331'}, {'`', 'u', '\371'}, | 376 | {'`', 'U', 0331}, {'`', 'u', 0371}, |
377 | {'\'', 'U', '\332'}, {'\'', 'u', '\372'}, | 377 | {'\'', 'U', 0332}, {'\'', 'u', 0372}, |
378 | {'^', 'U', '\333'}, {'^', 'u', '\373'}, | 378 | {'^', 'U', 0333}, {'^', 'u', 0373}, |
379 | {'"', 'U', '\334'}, {'"', 'u', '\374'}, | 379 | {'"', 'U', 0334}, {'"', 'u', 0374}, |
380 | {'\'', 'Y', '\335'}, {'\'', 'y', '\375'}, | 380 | {'\'', 'Y', 0335}, {'\'', 'y', 0375}, |
381 | {'T', 'H', '\336'}, {'t', 'h', '\376'}, | 381 | {'T', 'H', 0336}, {'t', 'h', 0376}, |
382 | {'s', 's', '\337'}, {'"', 'y', '\377'}, | 382 | {'s', 's', 0337}, {'"', 'y', 0377}, |
383 | {'s', 'z', '\337'}, {'i', 'j', '\377'}, | 383 | {'s', 'z', 0337}, {'i', 'j', 0377}, |
384 | }; | 384 | }; |
385 | 385 | ||
386 | unsigned int accent_table_size = 68; | 386 | unsigned int accent_table_size = 68; |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 1db93b619074..8a49835bd0f8 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -186,6 +186,7 @@ enum { | |||
186 | AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */ | 186 | AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */ |
187 | AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ | 187 | AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ |
188 | AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */ | 188 | AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */ |
189 | AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ | ||
189 | 190 | ||
190 | /* ap->flags bits */ | 191 | /* ap->flags bits */ |
191 | 192 | ||
@@ -255,6 +256,7 @@ static void ahci_vt8251_error_handler(struct ata_port *ap); | |||
255 | static void ahci_p5wdh_error_handler(struct ata_port *ap); | 256 | static void ahci_p5wdh_error_handler(struct ata_port *ap); |
256 | static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); | 257 | static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); |
257 | static int ahci_port_resume(struct ata_port *ap); | 258 | static int ahci_port_resume(struct ata_port *ap); |
259 | static void ahci_dev_config(struct ata_device *dev); | ||
258 | static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl); | 260 | static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl); |
259 | static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, | 261 | static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, |
260 | u32 opts); | 262 | u32 opts); |
@@ -294,6 +296,8 @@ static const struct ata_port_operations ahci_ops = { | |||
294 | .check_altstatus = ahci_check_status, | 296 | .check_altstatus = ahci_check_status, |
295 | .dev_select = ata_noop_dev_select, | 297 | .dev_select = ata_noop_dev_select, |
296 | 298 | ||
299 | .dev_config = ahci_dev_config, | ||
300 | |||
297 | .tf_read = ahci_tf_read, | 301 | .tf_read = ahci_tf_read, |
298 | 302 | ||
299 | .qc_defer = sata_pmp_qc_defer_cmd_switch, | 303 | .qc_defer = sata_pmp_qc_defer_cmd_switch, |
@@ -425,7 +429,7 @@ static const struct ata_port_info ahci_port_info[] = { | |||
425 | /* board_ahci_sb600 */ | 429 | /* board_ahci_sb600 */ |
426 | { | 430 | { |
427 | AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | | 431 | AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | |
428 | AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_PMP), | 432 | AHCI_HFLAG_SECT255 | AHCI_HFLAG_NO_PMP), |
429 | .flags = AHCI_FLAG_COMMON, | 433 | .flags = AHCI_FLAG_COMMON, |
430 | .link_flags = AHCI_LFLAG_COMMON, | 434 | .link_flags = AHCI_LFLAG_COMMON, |
431 | .pio_mask = 0x1f, /* pio0-4 */ | 435 | .pio_mask = 0x1f, /* pio0-4 */ |
@@ -1176,6 +1180,14 @@ static void ahci_init_controller(struct ata_host *host) | |||
1176 | VPRINTK("HOST_CTL 0x%x\n", tmp); | 1180 | VPRINTK("HOST_CTL 0x%x\n", tmp); |
1177 | } | 1181 | } |
1178 | 1182 | ||
1183 | static void ahci_dev_config(struct ata_device *dev) | ||
1184 | { | ||
1185 | struct ahci_host_priv *hpriv = dev->link->ap->host->private_data; | ||
1186 | |||
1187 | if (hpriv->flags & AHCI_HFLAG_SECT255) | ||
1188 | dev->max_sectors = 255; | ||
1189 | } | ||
1190 | |||
1179 | static unsigned int ahci_dev_classify(struct ata_port *ap) | 1191 | static unsigned int ahci_dev_classify(struct ata_port *ap) |
1180 | { | 1192 | { |
1181 | void __iomem *port_mmio = ahci_port_base(ap); | 1193 | void __iomem *port_mmio = ahci_port_base(ap); |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index fbc24358ada0..4fbcce758b04 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -113,7 +113,7 @@ int atapi_enabled = 1; | |||
113 | module_param(atapi_enabled, int, 0444); | 113 | module_param(atapi_enabled, int, 0444); |
114 | MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); | 114 | MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); |
115 | 115 | ||
116 | int atapi_dmadir = 0; | 116 | static int atapi_dmadir = 0; |
117 | module_param(atapi_dmadir, int, 0444); | 117 | module_param(atapi_dmadir, int, 0444); |
118 | MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)"); | 118 | MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)"); |
119 | 119 | ||
@@ -6567,6 +6567,8 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg) | |||
6567 | ata_lpm_enable(host); | 6567 | ata_lpm_enable(host); |
6568 | 6568 | ||
6569 | rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); | 6569 | rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); |
6570 | if (rc == 0) | ||
6571 | host->dev->power.power_state = mesg; | ||
6570 | return rc; | 6572 | return rc; |
6571 | } | 6573 | } |
6572 | 6574 | ||
@@ -6585,6 +6587,7 @@ void ata_host_resume(struct ata_host *host) | |||
6585 | { | 6587 | { |
6586 | ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET, | 6588 | ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET, |
6587 | ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); | 6589 | ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); |
6590 | host->dev->power.power_state = PMSG_ON; | ||
6588 | 6591 | ||
6589 | /* reenable link pm */ | 6592 | /* reenable link pm */ |
6590 | ata_lpm_disable(host); | 6593 | ata_lpm_disable(host); |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 0562b0a49f3b..8f0e8f2bc628 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -862,9 +862,10 @@ static int ata_scsi_dev_config(struct scsi_device *sdev, | |||
862 | struct request_queue *q = sdev->request_queue; | 862 | struct request_queue *q = sdev->request_queue; |
863 | void *buf; | 863 | void *buf; |
864 | 864 | ||
865 | /* set the min alignment */ | 865 | /* set the min alignment and padding */ |
866 | blk_queue_update_dma_alignment(sdev->request_queue, | 866 | blk_queue_update_dma_alignment(sdev->request_queue, |
867 | ATA_DMA_PAD_SZ - 1); | 867 | ATA_DMA_PAD_SZ - 1); |
868 | blk_queue_dma_pad(sdev->request_queue, ATA_DMA_PAD_SZ - 1); | ||
868 | 869 | ||
869 | /* configure draining */ | 870 | /* configure draining */ |
870 | buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL); | 871 | buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL); |
@@ -1694,12 +1695,17 @@ void ata_scsi_rbuf_fill(struct ata_scsi_args *args, | |||
1694 | u8 *rbuf; | 1695 | u8 *rbuf; |
1695 | unsigned int buflen, rc; | 1696 | unsigned int buflen, rc; |
1696 | struct scsi_cmnd *cmd = args->cmd; | 1697 | struct scsi_cmnd *cmd = args->cmd; |
1698 | unsigned long flags; | ||
1699 | |||
1700 | local_irq_save(flags); | ||
1697 | 1701 | ||
1698 | buflen = ata_scsi_rbuf_get(cmd, &rbuf); | 1702 | buflen = ata_scsi_rbuf_get(cmd, &rbuf); |
1699 | memset(rbuf, 0, buflen); | 1703 | memset(rbuf, 0, buflen); |
1700 | rc = actor(args, rbuf, buflen); | 1704 | rc = actor(args, rbuf, buflen); |
1701 | ata_scsi_rbuf_put(cmd, rbuf); | 1705 | ata_scsi_rbuf_put(cmd, rbuf); |
1702 | 1706 | ||
1707 | local_irq_restore(flags); | ||
1708 | |||
1703 | if (rc == 0) | 1709 | if (rc == 0) |
1704 | cmd->result = SAM_STAT_GOOD; | 1710 | cmd->result = SAM_STAT_GOOD; |
1705 | args->done(cmd); | 1711 | args->done(cmd); |
@@ -2473,6 +2479,9 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) | |||
2473 | if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) { | 2479 | if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) { |
2474 | u8 *buf = NULL; | 2480 | u8 *buf = NULL; |
2475 | unsigned int buflen; | 2481 | unsigned int buflen; |
2482 | unsigned long flags; | ||
2483 | |||
2484 | local_irq_save(flags); | ||
2476 | 2485 | ||
2477 | buflen = ata_scsi_rbuf_get(cmd, &buf); | 2486 | buflen = ata_scsi_rbuf_get(cmd, &buf); |
2478 | 2487 | ||
@@ -2490,6 +2499,8 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) | |||
2490 | } | 2499 | } |
2491 | 2500 | ||
2492 | ata_scsi_rbuf_put(cmd, buf); | 2501 | ata_scsi_rbuf_put(cmd, buf); |
2502 | |||
2503 | local_irq_restore(flags); | ||
2493 | } | 2504 | } |
2494 | 2505 | ||
2495 | cmd->result = SAM_STAT_GOOD; | 2506 | cmd->result = SAM_STAT_GOOD; |
@@ -2528,7 +2539,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) | |||
2528 | } | 2539 | } |
2529 | 2540 | ||
2530 | qc->tf.command = ATA_CMD_PACKET; | 2541 | qc->tf.command = ATA_CMD_PACKET; |
2531 | qc->nbytes = scsi_bufflen(scmd); | 2542 | qc->nbytes = scsi_bufflen(scmd) + scmd->request->extra_len; |
2532 | 2543 | ||
2533 | /* check whether ATAPI DMA is safe */ | 2544 | /* check whether ATAPI DMA is safe */ |
2534 | if (!using_pio && ata_check_atapi_dma(qc)) | 2545 | if (!using_pio && ata_check_atapi_dma(qc)) |
@@ -2539,7 +2550,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) | |||
2539 | * want to set it properly, and for DMA where it is | 2550 | * want to set it properly, and for DMA where it is |
2540 | * effectively meaningless. | 2551 | * effectively meaningless. |
2541 | */ | 2552 | */ |
2542 | nbytes = min(scmd->request->raw_data_len, (unsigned int)63 * 1024); | 2553 | nbytes = min(scmd->request->data_len, (unsigned int)63 * 1024); |
2543 | 2554 | ||
2544 | /* Most ATAPI devices which honor transfer chunk size don't | 2555 | /* Most ATAPI devices which honor transfer chunk size don't |
2545 | * behave according to the spec when odd chunk size which | 2556 | * behave according to the spec when odd chunk size which |
@@ -2865,7 +2876,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) | |||
2865 | * TODO: find out if we need to do more here to | 2876 | * TODO: find out if we need to do more here to |
2866 | * cover scatter/gather case. | 2877 | * cover scatter/gather case. |
2867 | */ | 2878 | */ |
2868 | qc->nbytes = scsi_bufflen(scmd); | 2879 | qc->nbytes = scsi_bufflen(scmd) + scmd->request->extra_len; |
2869 | 2880 | ||
2870 | /* request result TF and be quiet about device error */ | 2881 | /* request result TF and be quiet about device error */ |
2871 | qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; | 2882 | qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; |
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 6036dedfe377..aa884f71a12a 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h | |||
@@ -56,7 +56,6 @@ enum { | |||
56 | extern unsigned int ata_print_id; | 56 | extern unsigned int ata_print_id; |
57 | extern struct workqueue_struct *ata_aux_wq; | 57 | extern struct workqueue_struct *ata_aux_wq; |
58 | extern int atapi_enabled; | 58 | extern int atapi_enabled; |
59 | extern int atapi_dmadir; | ||
60 | extern int atapi_passthru16; | 59 | extern int atapi_passthru16; |
61 | extern int libata_fua; | 60 | extern int libata_fua; |
62 | extern int libata_noacpi; | 61 | extern int libata_noacpi; |
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c index 0713872cf65c..a742efa0da2b 100644 --- a/drivers/ata/pata_hpt366.c +++ b/drivers/ata/pata_hpt366.c | |||
@@ -27,7 +27,7 @@ | |||
27 | #include <linux/libata.h> | 27 | #include <linux/libata.h> |
28 | 28 | ||
29 | #define DRV_NAME "pata_hpt366" | 29 | #define DRV_NAME "pata_hpt366" |
30 | #define DRV_VERSION "0.6.1" | 30 | #define DRV_VERSION "0.6.2" |
31 | 31 | ||
32 | struct hpt_clock { | 32 | struct hpt_clock { |
33 | u8 xfer_speed; | 33 | u8 xfer_speed; |
@@ -180,9 +180,9 @@ static unsigned long hpt366_filter(struct ata_device *adev, unsigned long mask) | |||
180 | if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33)) | 180 | if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33)) |
181 | mask &= ~ATA_MASK_UDMA; | 181 | mask &= ~ATA_MASK_UDMA; |
182 | if (hpt_dma_blacklisted(adev, "UDMA3", bad_ata66_3)) | 182 | if (hpt_dma_blacklisted(adev, "UDMA3", bad_ata66_3)) |
183 | mask &= ~(0x07 << ATA_SHIFT_UDMA); | 183 | mask &= ~(0xF8 << ATA_SHIFT_UDMA); |
184 | if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4)) | 184 | if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4)) |
185 | mask &= ~(0x0F << ATA_SHIFT_UDMA); | 185 | mask &= ~(0xF0 << ATA_SHIFT_UDMA); |
186 | } | 186 | } |
187 | return ata_pci_default_filter(adev, mask); | 187 | return ata_pci_default_filter(adev, mask); |
188 | } | 188 | } |
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index 68eb34929cec..9a10878b2ad8 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c | |||
@@ -24,7 +24,7 @@ | |||
24 | #include <linux/libata.h> | 24 | #include <linux/libata.h> |
25 | 25 | ||
26 | #define DRV_NAME "pata_hpt37x" | 26 | #define DRV_NAME "pata_hpt37x" |
27 | #define DRV_VERSION "0.6.9" | 27 | #define DRV_VERSION "0.6.11" |
28 | 28 | ||
29 | struct hpt_clock { | 29 | struct hpt_clock { |
30 | u8 xfer_speed; | 30 | u8 xfer_speed; |
@@ -281,7 +281,7 @@ static unsigned long hpt370_filter(struct ata_device *adev, unsigned long mask) | |||
281 | if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33)) | 281 | if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33)) |
282 | mask &= ~ATA_MASK_UDMA; | 282 | mask &= ~ATA_MASK_UDMA; |
283 | if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) | 283 | if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) |
284 | mask &= ~(0x1F << ATA_SHIFT_UDMA); | 284 | mask &= ~(0xE0 << ATA_SHIFT_UDMA); |
285 | } | 285 | } |
286 | return ata_pci_default_filter(adev, mask); | 286 | return ata_pci_default_filter(adev, mask); |
287 | } | 287 | } |
@@ -297,7 +297,7 @@ static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask) | |||
297 | { | 297 | { |
298 | if (adev->class == ATA_DEV_ATA) { | 298 | if (adev->class == ATA_DEV_ATA) { |
299 | if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) | 299 | if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) |
300 | mask &= ~ (0x1F << ATA_SHIFT_UDMA); | 300 | mask &= ~(0xE0 << ATA_SHIFT_UDMA); |
301 | } | 301 | } |
302 | return ata_pci_default_filter(adev, mask); | 302 | return ata_pci_default_filter(adev, mask); |
303 | } | 303 | } |
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c index 9c523fbf529e..a589c0fa0dbb 100644 --- a/drivers/ata/pata_serverworks.c +++ b/drivers/ata/pata_serverworks.c | |||
@@ -226,7 +226,7 @@ static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned lo | |||
226 | 226 | ||
227 | for (i = 0; (p = csb_bad_ata100[i]) != NULL; i++) { | 227 | for (i = 0; (p = csb_bad_ata100[i]) != NULL; i++) { |
228 | if (!strcmp(p, model_num)) | 228 | if (!strcmp(p, model_num)) |
229 | mask &= ~(0x1F << ATA_SHIFT_UDMA); | 229 | mask &= ~(0xE0 << ATA_SHIFT_UDMA); |
230 | } | 230 | } |
231 | return ata_pci_default_filter(adev, mask); | 231 | return ata_pci_default_filter(adev, mask); |
232 | } | 232 | } |
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index 69f651e0bc98..840d1c4a7850 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c | |||
@@ -45,6 +45,8 @@ | |||
45 | #include <linux/interrupt.h> | 45 | #include <linux/interrupt.h> |
46 | #include <linux/device.h> | 46 | #include <linux/device.h> |
47 | #include <scsi/scsi_host.h> | 47 | #include <scsi/scsi_host.h> |
48 | #include <scsi/scsi_cmnd.h> | ||
49 | #include <scsi/scsi.h> | ||
48 | #include <linux/libata.h> | 50 | #include <linux/libata.h> |
49 | 51 | ||
50 | #ifdef CONFIG_PPC_OF | 52 | #ifdef CONFIG_PPC_OF |
@@ -59,6 +61,7 @@ enum { | |||
59 | /* ap->flags bits */ | 61 | /* ap->flags bits */ |
60 | K2_FLAG_SATA_8_PORTS = (1 << 24), | 62 | K2_FLAG_SATA_8_PORTS = (1 << 24), |
61 | K2_FLAG_NO_ATAPI_DMA = (1 << 25), | 63 | K2_FLAG_NO_ATAPI_DMA = (1 << 25), |
64 | K2_FLAG_BAR_POS_3 = (1 << 26), | ||
62 | 65 | ||
63 | /* Taskfile registers offsets */ | 66 | /* Taskfile registers offsets */ |
64 | K2_SATA_TF_CMD_OFFSET = 0x00, | 67 | K2_SATA_TF_CMD_OFFSET = 0x00, |
@@ -88,8 +91,10 @@ enum { | |||
88 | /* Port stride */ | 91 | /* Port stride */ |
89 | K2_SATA_PORT_OFFSET = 0x100, | 92 | K2_SATA_PORT_OFFSET = 0x100, |
90 | 93 | ||
91 | board_svw4 = 0, | 94 | chip_svw4 = 0, |
92 | board_svw8 = 1, | 95 | chip_svw8 = 1, |
96 | chip_svw42 = 2, /* bar 3 */ | ||
97 | chip_svw43 = 3, /* bar 5 */ | ||
93 | }; | 98 | }; |
94 | 99 | ||
95 | static u8 k2_stat_check_status(struct ata_port *ap); | 100 | static u8 k2_stat_check_status(struct ata_port *ap); |
@@ -97,10 +102,25 @@ static u8 k2_stat_check_status(struct ata_port *ap); | |||
97 | 102 | ||
98 | static int k2_sata_check_atapi_dma(struct ata_queued_cmd *qc) | 103 | static int k2_sata_check_atapi_dma(struct ata_queued_cmd *qc) |
99 | { | 104 | { |
105 | u8 cmnd = qc->scsicmd->cmnd[0]; | ||
106 | |||
100 | if (qc->ap->flags & K2_FLAG_NO_ATAPI_DMA) | 107 | if (qc->ap->flags & K2_FLAG_NO_ATAPI_DMA) |
101 | return -1; /* ATAPI DMA not supported */ | 108 | return -1; /* ATAPI DMA not supported */ |
109 | else { | ||
110 | switch (cmnd) { | ||
111 | case READ_10: | ||
112 | case READ_12: | ||
113 | case READ_16: | ||
114 | case WRITE_10: | ||
115 | case WRITE_12: | ||
116 | case WRITE_16: | ||
117 | return 0; | ||
118 | |||
119 | default: | ||
120 | return -1; | ||
121 | } | ||
102 | 122 | ||
103 | return 0; | 123 | } |
104 | } | 124 | } |
105 | 125 | ||
106 | static int k2_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) | 126 | static int k2_sata_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val) |
@@ -354,7 +374,7 @@ static const struct ata_port_operations k2_sata_ops = { | |||
354 | }; | 374 | }; |
355 | 375 | ||
356 | static const struct ata_port_info k2_port_info[] = { | 376 | static const struct ata_port_info k2_port_info[] = { |
357 | /* board_svw4 */ | 377 | /* chip_svw4 */ |
358 | { | 378 | { |
359 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 379 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
360 | ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA, | 380 | ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA, |
@@ -363,7 +383,7 @@ static const struct ata_port_info k2_port_info[] = { | |||
363 | .udma_mask = ATA_UDMA6, | 383 | .udma_mask = ATA_UDMA6, |
364 | .port_ops = &k2_sata_ops, | 384 | .port_ops = &k2_sata_ops, |
365 | }, | 385 | }, |
366 | /* board_svw8 */ | 386 | /* chip_svw8 */ |
367 | { | 387 | { |
368 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 388 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
369 | ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA | | 389 | ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA | |
@@ -373,6 +393,24 @@ static const struct ata_port_info k2_port_info[] = { | |||
373 | .udma_mask = ATA_UDMA6, | 393 | .udma_mask = ATA_UDMA6, |
374 | .port_ops = &k2_sata_ops, | 394 | .port_ops = &k2_sata_ops, |
375 | }, | 395 | }, |
396 | /* chip_svw42 */ | ||
397 | { | ||
398 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | ||
399 | ATA_FLAG_MMIO | K2_FLAG_BAR_POS_3, | ||
400 | .pio_mask = 0x1f, | ||
401 | .mwdma_mask = 0x07, | ||
402 | .udma_mask = ATA_UDMA6, | ||
403 | .port_ops = &k2_sata_ops, | ||
404 | }, | ||
405 | /* chip_svw43 */ | ||
406 | { | ||
407 | .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | ||
408 | ATA_FLAG_MMIO, | ||
409 | .pio_mask = 0x1f, | ||
410 | .mwdma_mask = 0x07, | ||
411 | .udma_mask = ATA_UDMA6, | ||
412 | .port_ops = &k2_sata_ops, | ||
413 | }, | ||
376 | }; | 414 | }; |
377 | 415 | ||
378 | static void k2_sata_setup_port(struct ata_ioports *port, void __iomem *base) | 416 | static void k2_sata_setup_port(struct ata_ioports *port, void __iomem *base) |
@@ -402,7 +440,7 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en | |||
402 | { &k2_port_info[ent->driver_data], NULL }; | 440 | { &k2_port_info[ent->driver_data], NULL }; |
403 | struct ata_host *host; | 441 | struct ata_host *host; |
404 | void __iomem *mmio_base; | 442 | void __iomem *mmio_base; |
405 | int n_ports, i, rc; | 443 | int n_ports, i, rc, bar_pos; |
406 | 444 | ||
407 | if (!printed_version++) | 445 | if (!printed_version++) |
408 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); | 446 | dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); |
@@ -416,6 +454,9 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en | |||
416 | if (!host) | 454 | if (!host) |
417 | return -ENOMEM; | 455 | return -ENOMEM; |
418 | 456 | ||
457 | bar_pos = 5; | ||
458 | if (ppi[0]->flags & K2_FLAG_BAR_POS_3) | ||
459 | bar_pos = 3; | ||
419 | /* | 460 | /* |
420 | * If this driver happens to only be useful on Apple's K2, then | 461 | * If this driver happens to only be useful on Apple's K2, then |
421 | * we should check that here as it has a normal Serverworks ID | 462 | * we should check that here as it has a normal Serverworks ID |
@@ -428,17 +469,23 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en | |||
428 | * Check if we have resources mapped at all (second function may | 469 | * Check if we have resources mapped at all (second function may |
429 | * have been disabled by firmware) | 470 | * have been disabled by firmware) |
430 | */ | 471 | */ |
431 | if (pci_resource_len(pdev, 5) == 0) | 472 | if (pci_resource_len(pdev, bar_pos) == 0) { |
473 | /* In IDE mode we need to pin the device to ensure that | ||
474 | pcim_release does not clear the busmaster bit in config | ||
475 | space, clearing causes busmaster DMA to fail on | ||
476 | ports 3 & 4 */ | ||
477 | pcim_pin_device(pdev); | ||
432 | return -ENODEV; | 478 | return -ENODEV; |
479 | } | ||
433 | 480 | ||
434 | /* Request and iomap PCI regions */ | 481 | /* Request and iomap PCI regions */ |
435 | rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME); | 482 | rc = pcim_iomap_regions(pdev, 1 << bar_pos, DRV_NAME); |
436 | if (rc == -EBUSY) | 483 | if (rc == -EBUSY) |
437 | pcim_pin_device(pdev); | 484 | pcim_pin_device(pdev); |
438 | if (rc) | 485 | if (rc) |
439 | return rc; | 486 | return rc; |
440 | host->iomap = pcim_iomap_table(pdev); | 487 | host->iomap = pcim_iomap_table(pdev); |
441 | mmio_base = host->iomap[5]; | 488 | mmio_base = host->iomap[bar_pos]; |
442 | 489 | ||
443 | /* different controllers have different number of ports - currently 4 or 8 */ | 490 | /* different controllers have different number of ports - currently 4 or 8 */ |
444 | /* All ports are on the same function. Multi-function device is no | 491 | /* All ports are on the same function. Multi-function device is no |
@@ -483,11 +530,13 @@ static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *en | |||
483 | * controller | 530 | * controller |
484 | * */ | 531 | * */ |
485 | static const struct pci_device_id k2_sata_pci_tbl[] = { | 532 | static const struct pci_device_id k2_sata_pci_tbl[] = { |
486 | { PCI_VDEVICE(SERVERWORKS, 0x0240), board_svw4 }, | 533 | { PCI_VDEVICE(SERVERWORKS, 0x0240), chip_svw4 }, |
487 | { PCI_VDEVICE(SERVERWORKS, 0x0241), board_svw4 }, | 534 | { PCI_VDEVICE(SERVERWORKS, 0x0241), chip_svw4 }, |
488 | { PCI_VDEVICE(SERVERWORKS, 0x0242), board_svw8 }, | 535 | { PCI_VDEVICE(SERVERWORKS, 0x0242), chip_svw8 }, |
489 | { PCI_VDEVICE(SERVERWORKS, 0x024a), board_svw4 }, | 536 | { PCI_VDEVICE(SERVERWORKS, 0x024a), chip_svw4 }, |
490 | { PCI_VDEVICE(SERVERWORKS, 0x024b), board_svw4 }, | 537 | { PCI_VDEVICE(SERVERWORKS, 0x024b), chip_svw4 }, |
538 | { PCI_VDEVICE(SERVERWORKS, 0x0410), chip_svw42 }, | ||
539 | { PCI_VDEVICE(SERVERWORKS, 0x0411), chip_svw43 }, | ||
491 | 540 | ||
492 | { } | 541 | { } |
493 | }; | 542 | }; |
diff --git a/drivers/base/core.c b/drivers/base/core.c index 9c0070b5bd3e..7de543d1d0b4 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
@@ -621,7 +621,8 @@ static struct kobject *get_device_parent(struct device *dev, | |||
621 | static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) | 621 | static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) |
622 | { | 622 | { |
623 | /* see if we live in a "glue" directory */ | 623 | /* see if we live in a "glue" directory */ |
624 | if (!dev->class || glue_dir->kset != &dev->class->class_dirs) | 624 | if (!glue_dir || !dev->class || |
625 | glue_dir->kset != &dev->class->class_dirs) | ||
625 | return; | 626 | return; |
626 | 627 | ||
627 | kobject_put(glue_dir); | 628 | kobject_put(glue_dir); |
@@ -770,17 +771,10 @@ int device_add(struct device *dev) | |||
770 | struct class_interface *class_intf; | 771 | struct class_interface *class_intf; |
771 | int error; | 772 | int error; |
772 | 773 | ||
773 | error = pm_sleep_lock(); | ||
774 | if (error) { | ||
775 | dev_warn(dev, "Suspicious %s during suspend\n", __FUNCTION__); | ||
776 | dump_stack(); | ||
777 | return error; | ||
778 | } | ||
779 | |||
780 | dev = get_device(dev); | 774 | dev = get_device(dev); |
781 | if (!dev || !strlen(dev->bus_id)) { | 775 | if (!dev || !strlen(dev->bus_id)) { |
782 | error = -EINVAL; | 776 | error = -EINVAL; |
783 | goto Error; | 777 | goto Done; |
784 | } | 778 | } |
785 | 779 | ||
786 | pr_debug("device: '%s': %s\n", dev->bus_id, __FUNCTION__); | 780 | pr_debug("device: '%s': %s\n", dev->bus_id, __FUNCTION__); |
@@ -843,11 +837,9 @@ int device_add(struct device *dev) | |||
843 | } | 837 | } |
844 | Done: | 838 | Done: |
845 | put_device(dev); | 839 | put_device(dev); |
846 | pm_sleep_unlock(); | ||
847 | return error; | 840 | return error; |
848 | BusError: | 841 | BusError: |
849 | device_pm_remove(dev); | 842 | device_pm_remove(dev); |
850 | dpm_sysfs_remove(dev); | ||
851 | PMError: | 843 | PMError: |
852 | if (dev->bus) | 844 | if (dev->bus) |
853 | blocking_notifier_call_chain(&dev->bus->p->bus_notifier, | 845 | blocking_notifier_call_chain(&dev->bus->p->bus_notifier, |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index ee9d1c8db0d6..d887d5cb5bef 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -48,7 +48,6 @@ | |||
48 | */ | 48 | */ |
49 | 49 | ||
50 | LIST_HEAD(dpm_active); | 50 | LIST_HEAD(dpm_active); |
51 | static LIST_HEAD(dpm_locked); | ||
52 | static LIST_HEAD(dpm_off); | 51 | static LIST_HEAD(dpm_off); |
53 | static LIST_HEAD(dpm_off_irq); | 52 | static LIST_HEAD(dpm_off_irq); |
54 | static LIST_HEAD(dpm_destroy); | 53 | static LIST_HEAD(dpm_destroy); |
@@ -81,28 +80,6 @@ void device_pm_add(struct device *dev) | |||
81 | */ | 80 | */ |
82 | void device_pm_remove(struct device *dev) | 81 | void device_pm_remove(struct device *dev) |
83 | { | 82 | { |
84 | /* | ||
85 | * If this function is called during a suspend, it will be blocked, | ||
86 | * because we're holding the device's semaphore at that time, which may | ||
87 | * lead to a deadlock. In that case we want to print a warning. | ||
88 | * However, it may also be called by unregister_dropped_devices() with | ||
89 | * the device's semaphore released, in which case the warning should | ||
90 | * not be printed. | ||
91 | */ | ||
92 | if (down_trylock(&dev->sem)) { | ||
93 | if (down_read_trylock(&pm_sleep_rwsem)) { | ||
94 | /* No suspend in progress, wait on dev->sem */ | ||
95 | down(&dev->sem); | ||
96 | up_read(&pm_sleep_rwsem); | ||
97 | } else { | ||
98 | /* Suspend in progress, we may deadlock */ | ||
99 | dev_warn(dev, "Suspicious %s during suspend\n", | ||
100 | __FUNCTION__); | ||
101 | dump_stack(); | ||
102 | /* The user has been warned ... */ | ||
103 | down(&dev->sem); | ||
104 | } | ||
105 | } | ||
106 | pr_debug("PM: Removing info for %s:%s\n", | 83 | pr_debug("PM: Removing info for %s:%s\n", |
107 | dev->bus ? dev->bus->name : "No Bus", | 84 | dev->bus ? dev->bus->name : "No Bus", |
108 | kobject_name(&dev->kobj)); | 85 | kobject_name(&dev->kobj)); |
@@ -110,7 +87,6 @@ void device_pm_remove(struct device *dev) | |||
110 | dpm_sysfs_remove(dev); | 87 | dpm_sysfs_remove(dev); |
111 | list_del_init(&dev->power.entry); | 88 | list_del_init(&dev->power.entry); |
112 | mutex_unlock(&dpm_list_mtx); | 89 | mutex_unlock(&dpm_list_mtx); |
113 | up(&dev->sem); | ||
114 | } | 90 | } |
115 | 91 | ||
116 | /** | 92 | /** |
@@ -230,6 +206,8 @@ static int resume_device(struct device *dev) | |||
230 | TRACE_DEVICE(dev); | 206 | TRACE_DEVICE(dev); |
231 | TRACE_RESUME(0); | 207 | TRACE_RESUME(0); |
232 | 208 | ||
209 | down(&dev->sem); | ||
210 | |||
233 | if (dev->bus && dev->bus->resume) { | 211 | if (dev->bus && dev->bus->resume) { |
234 | dev_dbg(dev,"resuming\n"); | 212 | dev_dbg(dev,"resuming\n"); |
235 | error = dev->bus->resume(dev); | 213 | error = dev->bus->resume(dev); |
@@ -245,6 +223,8 @@ static int resume_device(struct device *dev) | |||
245 | error = dev->class->resume(dev); | 223 | error = dev->class->resume(dev); |
246 | } | 224 | } |
247 | 225 | ||
226 | up(&dev->sem); | ||
227 | |||
248 | TRACE_RESUME(error); | 228 | TRACE_RESUME(error); |
249 | return error; | 229 | return error; |
250 | } | 230 | } |
@@ -266,7 +246,7 @@ static void dpm_resume(void) | |||
266 | struct list_head *entry = dpm_off.next; | 246 | struct list_head *entry = dpm_off.next; |
267 | struct device *dev = to_device(entry); | 247 | struct device *dev = to_device(entry); |
268 | 248 | ||
269 | list_move_tail(entry, &dpm_locked); | 249 | list_move_tail(entry, &dpm_active); |
270 | mutex_unlock(&dpm_list_mtx); | 250 | mutex_unlock(&dpm_list_mtx); |
271 | resume_device(dev); | 251 | resume_device(dev); |
272 | mutex_lock(&dpm_list_mtx); | 252 | mutex_lock(&dpm_list_mtx); |
@@ -275,25 +255,6 @@ static void dpm_resume(void) | |||
275 | } | 255 | } |
276 | 256 | ||
277 | /** | 257 | /** |
278 | * unlock_all_devices - Release each device's semaphore | ||
279 | * | ||
280 | * Go through the dpm_off list. Put each device on the dpm_active | ||
281 | * list and unlock it. | ||
282 | */ | ||
283 | static void unlock_all_devices(void) | ||
284 | { | ||
285 | mutex_lock(&dpm_list_mtx); | ||
286 | while (!list_empty(&dpm_locked)) { | ||
287 | struct list_head *entry = dpm_locked.prev; | ||
288 | struct device *dev = to_device(entry); | ||
289 | |||
290 | list_move(entry, &dpm_active); | ||
291 | up(&dev->sem); | ||
292 | } | ||
293 | mutex_unlock(&dpm_list_mtx); | ||
294 | } | ||
295 | |||
296 | /** | ||
297 | * unregister_dropped_devices - Unregister devices scheduled for removal | 258 | * unregister_dropped_devices - Unregister devices scheduled for removal |
298 | * | 259 | * |
299 | * Unregister all devices on the dpm_destroy list. | 260 | * Unregister all devices on the dpm_destroy list. |
@@ -305,7 +266,6 @@ static void unregister_dropped_devices(void) | |||
305 | struct list_head *entry = dpm_destroy.next; | 266 | struct list_head *entry = dpm_destroy.next; |
306 | struct device *dev = to_device(entry); | 267 | struct device *dev = to_device(entry); |
307 | 268 | ||
308 | up(&dev->sem); | ||
309 | mutex_unlock(&dpm_list_mtx); | 269 | mutex_unlock(&dpm_list_mtx); |
310 | /* This also removes the device from the list */ | 270 | /* This also removes the device from the list */ |
311 | device_unregister(dev); | 271 | device_unregister(dev); |
@@ -324,7 +284,6 @@ void device_resume(void) | |||
324 | { | 284 | { |
325 | might_sleep(); | 285 | might_sleep(); |
326 | dpm_resume(); | 286 | dpm_resume(); |
327 | unlock_all_devices(); | ||
328 | unregister_dropped_devices(); | 287 | unregister_dropped_devices(); |
329 | up_write(&pm_sleep_rwsem); | 288 | up_write(&pm_sleep_rwsem); |
330 | } | 289 | } |
@@ -388,18 +347,15 @@ int device_power_down(pm_message_t state) | |||
388 | struct list_head *entry = dpm_off.prev; | 347 | struct list_head *entry = dpm_off.prev; |
389 | struct device *dev = to_device(entry); | 348 | struct device *dev = to_device(entry); |
390 | 349 | ||
391 | list_del_init(&dev->power.entry); | ||
392 | error = suspend_device_late(dev, state); | 350 | error = suspend_device_late(dev, state); |
393 | if (error) { | 351 | if (error) { |
394 | printk(KERN_ERR "Could not power down device %s: " | 352 | printk(KERN_ERR "Could not power down device %s: " |
395 | "error %d\n", | 353 | "error %d\n", |
396 | kobject_name(&dev->kobj), error); | 354 | kobject_name(&dev->kobj), error); |
397 | if (list_empty(&dev->power.entry)) | ||
398 | list_add(&dev->power.entry, &dpm_off); | ||
399 | break; | 355 | break; |
400 | } | 356 | } |
401 | if (list_empty(&dev->power.entry)) | 357 | if (!list_empty(&dev->power.entry)) |
402 | list_add(&dev->power.entry, &dpm_off_irq); | 358 | list_move(&dev->power.entry, &dpm_off_irq); |
403 | } | 359 | } |
404 | 360 | ||
405 | if (!error) | 361 | if (!error) |
@@ -419,6 +375,8 @@ static int suspend_device(struct device *dev, pm_message_t state) | |||
419 | { | 375 | { |
420 | int error = 0; | 376 | int error = 0; |
421 | 377 | ||
378 | down(&dev->sem); | ||
379 | |||
422 | if (dev->power.power_state.event) { | 380 | if (dev->power.power_state.event) { |
423 | dev_dbg(dev, "PM: suspend %d-->%d\n", | 381 | dev_dbg(dev, "PM: suspend %d-->%d\n", |
424 | dev->power.power_state.event, state.event); | 382 | dev->power.power_state.event, state.event); |
@@ -441,6 +399,9 @@ static int suspend_device(struct device *dev, pm_message_t state) | |||
441 | error = dev->bus->suspend(dev, state); | 399 | error = dev->bus->suspend(dev, state); |
442 | suspend_report_result(dev->bus->suspend, error); | 400 | suspend_report_result(dev->bus->suspend, error); |
443 | } | 401 | } |
402 | |||
403 | up(&dev->sem); | ||
404 | |||
444 | return error; | 405 | return error; |
445 | } | 406 | } |
446 | 407 | ||
@@ -461,13 +422,13 @@ static int dpm_suspend(pm_message_t state) | |||
461 | int error = 0; | 422 | int error = 0; |
462 | 423 | ||
463 | mutex_lock(&dpm_list_mtx); | 424 | mutex_lock(&dpm_list_mtx); |
464 | while (!list_empty(&dpm_locked)) { | 425 | while (!list_empty(&dpm_active)) { |
465 | struct list_head *entry = dpm_locked.prev; | 426 | struct list_head *entry = dpm_active.prev; |
466 | struct device *dev = to_device(entry); | 427 | struct device *dev = to_device(entry); |
467 | 428 | ||
468 | list_del_init(&dev->power.entry); | ||
469 | mutex_unlock(&dpm_list_mtx); | 429 | mutex_unlock(&dpm_list_mtx); |
470 | error = suspend_device(dev, state); | 430 | error = suspend_device(dev, state); |
431 | mutex_lock(&dpm_list_mtx); | ||
471 | if (error) { | 432 | if (error) { |
472 | printk(KERN_ERR "Could not suspend device %s: " | 433 | printk(KERN_ERR "Could not suspend device %s: " |
473 | "error %d%s\n", | 434 | "error %d%s\n", |
@@ -476,14 +437,10 @@ static int dpm_suspend(pm_message_t state) | |||
476 | (error == -EAGAIN ? | 437 | (error == -EAGAIN ? |
477 | " (please convert to suspend_late)" : | 438 | " (please convert to suspend_late)" : |
478 | "")); | 439 | "")); |
479 | mutex_lock(&dpm_list_mtx); | ||
480 | if (list_empty(&dev->power.entry)) | ||
481 | list_add(&dev->power.entry, &dpm_locked); | ||
482 | break; | 440 | break; |
483 | } | 441 | } |
484 | mutex_lock(&dpm_list_mtx); | 442 | if (!list_empty(&dev->power.entry)) |
485 | if (list_empty(&dev->power.entry)) | 443 | list_move(&dev->power.entry, &dpm_off); |
486 | list_add(&dev->power.entry, &dpm_off); | ||
487 | } | 444 | } |
488 | mutex_unlock(&dpm_list_mtx); | 445 | mutex_unlock(&dpm_list_mtx); |
489 | 446 | ||
@@ -491,36 +448,6 @@ static int dpm_suspend(pm_message_t state) | |||
491 | } | 448 | } |
492 | 449 | ||
493 | /** | 450 | /** |
494 | * lock_all_devices - Acquire every device's semaphore | ||
495 | * | ||
496 | * Go through the dpm_active list. Carefully lock each device's | ||
497 | * semaphore and put it in on the dpm_locked list. | ||
498 | */ | ||
499 | static void lock_all_devices(void) | ||
500 | { | ||
501 | mutex_lock(&dpm_list_mtx); | ||
502 | while (!list_empty(&dpm_active)) { | ||
503 | struct list_head *entry = dpm_active.next; | ||
504 | struct device *dev = to_device(entry); | ||
505 | |||
506 | /* Required locking order is dev->sem first, | ||
507 | * then dpm_list_mutex. Hence this awkward code. | ||
508 | */ | ||
509 | get_device(dev); | ||
510 | mutex_unlock(&dpm_list_mtx); | ||
511 | down(&dev->sem); | ||
512 | mutex_lock(&dpm_list_mtx); | ||
513 | |||
514 | if (list_empty(entry)) | ||
515 | up(&dev->sem); /* Device was removed */ | ||
516 | else | ||
517 | list_move_tail(entry, &dpm_locked); | ||
518 | put_device(dev); | ||
519 | } | ||
520 | mutex_unlock(&dpm_list_mtx); | ||
521 | } | ||
522 | |||
523 | /** | ||
524 | * device_suspend - Save state and stop all devices in system. | 451 | * device_suspend - Save state and stop all devices in system. |
525 | * @state: new power management state | 452 | * @state: new power management state |
526 | * | 453 | * |
@@ -533,7 +460,6 @@ int device_suspend(pm_message_t state) | |||
533 | 460 | ||
534 | might_sleep(); | 461 | might_sleep(); |
535 | down_write(&pm_sleep_rwsem); | 462 | down_write(&pm_sleep_rwsem); |
536 | lock_all_devices(); | ||
537 | error = dpm_suspend(state); | 463 | error = dpm_suspend(state); |
538 | if (error) | 464 | if (error) |
539 | device_resume(); | 465 | device_resume(); |
diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c index f25e7c6b2d27..40bca48abc12 100644 --- a/drivers/base/transport_class.c +++ b/drivers/base/transport_class.c | |||
@@ -126,9 +126,7 @@ static int transport_setup_classdev(struct attribute_container *cont, | |||
126 | } | 126 | } |
127 | 127 | ||
128 | /** | 128 | /** |
129 | * transport_setup_device - declare a new dev for transport class association | 129 | * transport_setup_device - declare a new dev for transport class association but don't make it visible yet. |
130 | * but don't make it visible yet. | ||
131 | * | ||
132 | * @dev: the generic device representing the entity being added | 130 | * @dev: the generic device representing the entity being added |
133 | * | 131 | * |
134 | * Usually, dev represents some component in the HBA system (either | 132 | * Usually, dev represents some component in the HBA system (either |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 9715be3f2487..55bd35c0f082 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/blkpg.h> | 33 | #include <linux/blkpg.h> |
34 | #include <linux/timer.h> | 34 | #include <linux/timer.h> |
35 | #include <linux/proc_fs.h> | 35 | #include <linux/proc_fs.h> |
36 | #include <linux/seq_file.h> | ||
36 | #include <linux/init.h> | 37 | #include <linux/init.h> |
37 | #include <linux/hdreg.h> | 38 | #include <linux/hdreg.h> |
38 | #include <linux/spinlock.h> | 39 | #include <linux/spinlock.h> |
@@ -131,7 +132,6 @@ static struct board_type products[] = { | |||
131 | /*define how many times we will try a command because of bus resets */ | 132 | /*define how many times we will try a command because of bus resets */ |
132 | #define MAX_CMD_RETRIES 3 | 133 | #define MAX_CMD_RETRIES 3 |
133 | 134 | ||
134 | #define READ_AHEAD 1024 | ||
135 | #define MAX_CTLR 32 | 135 | #define MAX_CTLR 32 |
136 | 136 | ||
137 | /* Originally cciss driver only supports 8 major numbers */ | 137 | /* Originally cciss driver only supports 8 major numbers */ |
@@ -174,8 +174,6 @@ static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size, | |||
174 | static void fail_all_cmds(unsigned long ctlr); | 174 | static void fail_all_cmds(unsigned long ctlr); |
175 | 175 | ||
176 | #ifdef CONFIG_PROC_FS | 176 | #ifdef CONFIG_PROC_FS |
177 | static int cciss_proc_get_info(char *buffer, char **start, off_t offset, | ||
178 | int length, int *eof, void *data); | ||
179 | static void cciss_procinit(int i); | 177 | static void cciss_procinit(int i); |
180 | #else | 178 | #else |
181 | static void cciss_procinit(int i) | 179 | static void cciss_procinit(int i) |
@@ -240,24 +238,46 @@ static inline CommandList_struct *removeQ(CommandList_struct **Qptr, | |||
240 | */ | 238 | */ |
241 | #define ENG_GIG 1000000000 | 239 | #define ENG_GIG 1000000000 |
242 | #define ENG_GIG_FACTOR (ENG_GIG/512) | 240 | #define ENG_GIG_FACTOR (ENG_GIG/512) |
241 | #define ENGAGE_SCSI "engage scsi" | ||
243 | static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", | 242 | static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", |
244 | "UNKNOWN" | 243 | "UNKNOWN" |
245 | }; | 244 | }; |
246 | 245 | ||
247 | static struct proc_dir_entry *proc_cciss; | 246 | static struct proc_dir_entry *proc_cciss; |
248 | 247 | ||
249 | static int cciss_proc_get_info(char *buffer, char **start, off_t offset, | 248 | static void cciss_seq_show_header(struct seq_file *seq) |
250 | int length, int *eof, void *data) | ||
251 | { | 249 | { |
252 | off_t pos = 0; | 250 | ctlr_info_t *h = seq->private; |
253 | off_t len = 0; | 251 | |
254 | int size, i, ctlr; | 252 | seq_printf(seq, "%s: HP %s Controller\n" |
255 | ctlr_info_t *h = (ctlr_info_t *) data; | 253 | "Board ID: 0x%08lx\n" |
256 | drive_info_struct *drv; | 254 | "Firmware Version: %c%c%c%c\n" |
257 | unsigned long flags; | 255 | "IRQ: %d\n" |
258 | sector_t vol_sz, vol_sz_frac; | 256 | "Logical drives: %d\n" |
257 | "Current Q depth: %d\n" | ||
258 | "Current # commands on controller: %d\n" | ||
259 | "Max Q depth since init: %d\n" | ||
260 | "Max # commands on controller since init: %d\n" | ||
261 | "Max SG entries since init: %d\n", | ||
262 | h->devname, | ||
263 | h->product_name, | ||
264 | (unsigned long)h->board_id, | ||
265 | h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], | ||
266 | h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT], | ||
267 | h->num_luns, | ||
268 | h->Qdepth, h->commands_outstanding, | ||
269 | h->maxQsinceinit, h->max_outstanding, h->maxSG); | ||
259 | 270 | ||
260 | ctlr = h->ctlr; | 271 | #ifdef CONFIG_CISS_SCSI_TAPE |
272 | cciss_seq_tape_report(seq, h->ctlr); | ||
273 | #endif /* CONFIG_CISS_SCSI_TAPE */ | ||
274 | } | ||
275 | |||
276 | static void *cciss_seq_start(struct seq_file *seq, loff_t *pos) | ||
277 | { | ||
278 | ctlr_info_t *h = seq->private; | ||
279 | unsigned ctlr = h->ctlr; | ||
280 | unsigned long flags; | ||
261 | 281 | ||
262 | /* prevent displaying bogus info during configuration | 282 | /* prevent displaying bogus info during configuration |
263 | * or deconfiguration of a logical volume | 283 | * or deconfiguration of a logical volume |
@@ -265,115 +285,155 @@ static int cciss_proc_get_info(char *buffer, char **start, off_t offset, | |||
265 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); | 285 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); |
266 | if (h->busy_configuring) { | 286 | if (h->busy_configuring) { |
267 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); | 287 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); |
268 | return -EBUSY; | 288 | return ERR_PTR(-EBUSY); |
269 | } | 289 | } |
270 | h->busy_configuring = 1; | 290 | h->busy_configuring = 1; |
271 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); | 291 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); |
272 | 292 | ||
273 | size = sprintf(buffer, "%s: HP %s Controller\n" | 293 | if (*pos == 0) |
274 | "Board ID: 0x%08lx\n" | 294 | cciss_seq_show_header(seq); |
275 | "Firmware Version: %c%c%c%c\n" | ||
276 | "IRQ: %d\n" | ||
277 | "Logical drives: %d\n" | ||
278 | "Max sectors: %d\n" | ||
279 | "Current Q depth: %d\n" | ||
280 | "Current # commands on controller: %d\n" | ||
281 | "Max Q depth since init: %d\n" | ||
282 | "Max # commands on controller since init: %d\n" | ||
283 | "Max SG entries since init: %d\n\n", | ||
284 | h->devname, | ||
285 | h->product_name, | ||
286 | (unsigned long)h->board_id, | ||
287 | h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], | ||
288 | h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT], | ||
289 | h->num_luns, | ||
290 | h->cciss_max_sectors, | ||
291 | h->Qdepth, h->commands_outstanding, | ||
292 | h->maxQsinceinit, h->max_outstanding, h->maxSG); | ||
293 | |||
294 | pos += size; | ||
295 | len += size; | ||
296 | cciss_proc_tape_report(ctlr, buffer, &pos, &len); | ||
297 | for (i = 0; i <= h->highest_lun; i++) { | ||
298 | |||
299 | drv = &h->drv[i]; | ||
300 | if (drv->heads == 0) | ||
301 | continue; | ||
302 | 295 | ||
303 | vol_sz = drv->nr_blocks; | 296 | return pos; |
304 | vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR); | 297 | } |
305 | vol_sz_frac *= 100; | 298 | |
306 | sector_div(vol_sz_frac, ENG_GIG_FACTOR); | 299 | static int cciss_seq_show(struct seq_file *seq, void *v) |
300 | { | ||
301 | sector_t vol_sz, vol_sz_frac; | ||
302 | ctlr_info_t *h = seq->private; | ||
303 | unsigned ctlr = h->ctlr; | ||
304 | loff_t *pos = v; | ||
305 | drive_info_struct *drv = &h->drv[*pos]; | ||
306 | |||
307 | if (*pos > h->highest_lun) | ||
308 | return 0; | ||
309 | |||
310 | if (drv->heads == 0) | ||
311 | return 0; | ||
312 | |||
313 | vol_sz = drv->nr_blocks; | ||
314 | vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR); | ||
315 | vol_sz_frac *= 100; | ||
316 | sector_div(vol_sz_frac, ENG_GIG_FACTOR); | ||
317 | |||
318 | if (drv->raid_level > 5) | ||
319 | drv->raid_level = RAID_UNKNOWN; | ||
320 | seq_printf(seq, "cciss/c%dd%d:" | ||
321 | "\t%4u.%02uGB\tRAID %s\n", | ||
322 | ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac, | ||
323 | raid_label[drv->raid_level]); | ||
324 | return 0; | ||
325 | } | ||
326 | |||
327 | static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
328 | { | ||
329 | ctlr_info_t *h = seq->private; | ||
330 | |||
331 | if (*pos > h->highest_lun) | ||
332 | return NULL; | ||
333 | *pos += 1; | ||
334 | |||
335 | return pos; | ||
336 | } | ||
337 | |||
338 | static void cciss_seq_stop(struct seq_file *seq, void *v) | ||
339 | { | ||
340 | ctlr_info_t *h = seq->private; | ||
341 | |||
342 | /* Only reset h->busy_configuring if we succeeded in setting | ||
343 | * it during cciss_seq_start. */ | ||
344 | if (v == ERR_PTR(-EBUSY)) | ||
345 | return; | ||
307 | 346 | ||
308 | if (drv->raid_level > 5) | ||
309 | drv->raid_level = RAID_UNKNOWN; | ||
310 | size = sprintf(buffer + len, "cciss/c%dd%d:" | ||
311 | "\t%4u.%02uGB\tRAID %s\n", | ||
312 | ctlr, i, (int)vol_sz, (int)vol_sz_frac, | ||
313 | raid_label[drv->raid_level]); | ||
314 | pos += size; | ||
315 | len += size; | ||
316 | } | ||
317 | |||
318 | *eof = 1; | ||
319 | *start = buffer + offset; | ||
320 | len -= offset; | ||
321 | if (len > length) | ||
322 | len = length; | ||
323 | h->busy_configuring = 0; | 347 | h->busy_configuring = 0; |
324 | return len; | ||
325 | } | 348 | } |
326 | 349 | ||
327 | static int | 350 | static struct seq_operations cciss_seq_ops = { |
328 | cciss_proc_write(struct file *file, const char __user *buffer, | 351 | .start = cciss_seq_start, |
329 | unsigned long count, void *data) | 352 | .show = cciss_seq_show, |
353 | .next = cciss_seq_next, | ||
354 | .stop = cciss_seq_stop, | ||
355 | }; | ||
356 | |||
357 | static int cciss_seq_open(struct inode *inode, struct file *file) | ||
330 | { | 358 | { |
331 | unsigned char cmd[80]; | 359 | int ret = seq_open(file, &cciss_seq_ops); |
332 | int len; | 360 | struct seq_file *seq = file->private_data; |
333 | #ifdef CONFIG_CISS_SCSI_TAPE | 361 | |
334 | ctlr_info_t *h = (ctlr_info_t *) data; | 362 | if (!ret) |
335 | int rc; | 363 | seq->private = PDE(inode)->data; |
364 | |||
365 | return ret; | ||
366 | } | ||
367 | |||
368 | static ssize_t | ||
369 | cciss_proc_write(struct file *file, const char __user *buf, | ||
370 | size_t length, loff_t *ppos) | ||
371 | { | ||
372 | int err; | ||
373 | char *buffer; | ||
374 | |||
375 | #ifndef CONFIG_CISS_SCSI_TAPE | ||
376 | return -EINVAL; | ||
336 | #endif | 377 | #endif |
337 | 378 | ||
338 | if (count > sizeof(cmd) - 1) | 379 | if (!buf || length > PAGE_SIZE - 1) |
339 | return -EINVAL; | 380 | return -EINVAL; |
340 | if (copy_from_user(cmd, buffer, count)) | 381 | |
341 | return -EFAULT; | 382 | buffer = (char *)__get_free_page(GFP_KERNEL); |
342 | cmd[count] = '\0'; | 383 | if (!buffer) |
343 | len = strlen(cmd); // above 3 lines ensure safety | 384 | return -ENOMEM; |
344 | if (len && cmd[len - 1] == '\n') | 385 | |
345 | cmd[--len] = '\0'; | 386 | err = -EFAULT; |
346 | # ifdef CONFIG_CISS_SCSI_TAPE | 387 | if (copy_from_user(buffer, buf, length)) |
347 | if (strcmp("engage scsi", cmd) == 0) { | 388 | goto out; |
389 | buffer[length] = '\0'; | ||
390 | |||
391 | #ifdef CONFIG_CISS_SCSI_TAPE | ||
392 | if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) { | ||
393 | struct seq_file *seq = file->private_data; | ||
394 | ctlr_info_t *h = seq->private; | ||
395 | int rc; | ||
396 | |||
348 | rc = cciss_engage_scsi(h->ctlr); | 397 | rc = cciss_engage_scsi(h->ctlr); |
349 | if (rc != 0) | 398 | if (rc != 0) |
350 | return -rc; | 399 | err = -rc; |
351 | return count; | 400 | else |
352 | } | 401 | err = length; |
402 | } else | ||
403 | #endif /* CONFIG_CISS_SCSI_TAPE */ | ||
404 | err = -EINVAL; | ||
353 | /* might be nice to have "disengage" too, but it's not | 405 | /* might be nice to have "disengage" too, but it's not |
354 | safely possible. (only 1 module use count, lock issues.) */ | 406 | safely possible. (only 1 module use count, lock issues.) */ |
355 | # endif | 407 | |
356 | return -EINVAL; | 408 | out: |
409 | free_page((unsigned long)buffer); | ||
410 | return err; | ||
357 | } | 411 | } |
358 | 412 | ||
359 | /* | 413 | static struct file_operations cciss_proc_fops = { |
360 | * Get us a file in /proc/cciss that says something about each controller. | 414 | .owner = THIS_MODULE, |
361 | * Create /proc/cciss if it doesn't exist yet. | 415 | .open = cciss_seq_open, |
362 | */ | 416 | .read = seq_read, |
417 | .llseek = seq_lseek, | ||
418 | .release = seq_release, | ||
419 | .write = cciss_proc_write, | ||
420 | }; | ||
421 | |||
363 | static void __devinit cciss_procinit(int i) | 422 | static void __devinit cciss_procinit(int i) |
364 | { | 423 | { |
365 | struct proc_dir_entry *pde; | 424 | struct proc_dir_entry *pde; |
366 | 425 | ||
367 | if (proc_cciss == NULL) { | 426 | if (proc_cciss == NULL) |
368 | proc_cciss = proc_mkdir("cciss", proc_root_driver); | 427 | proc_cciss = proc_mkdir("cciss", proc_root_driver); |
369 | if (!proc_cciss) | 428 | if (!proc_cciss) |
370 | return; | 429 | return; |
371 | } | 430 | pde = proc_create(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP | |
431 | S_IROTH, proc_cciss, | ||
432 | &cciss_proc_fops); | ||
433 | if (!pde) | ||
434 | return; | ||
372 | 435 | ||
373 | pde = create_proc_read_entry(hba[i]->devname, | 436 | pde->data = hba[i]; |
374 | S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH, | ||
375 | proc_cciss, cciss_proc_get_info, hba[i]); | ||
376 | pde->write_proc = cciss_proc_write; | ||
377 | } | 437 | } |
378 | #endif /* CONFIG_PROC_FS */ | 438 | #endif /* CONFIG_PROC_FS */ |
379 | 439 | ||
@@ -1341,7 +1401,6 @@ geo_inq: | |||
1341 | disk->private_data = &h->drv[drv_index]; | 1401 | disk->private_data = &h->drv[drv_index]; |
1342 | 1402 | ||
1343 | /* Set up queue information */ | 1403 | /* Set up queue information */ |
1344 | disk->queue->backing_dev_info.ra_pages = READ_AHEAD; | ||
1345 | blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask); | 1404 | blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask); |
1346 | 1405 | ||
1347 | /* This is a hardware imposed limit. */ | 1406 | /* This is a hardware imposed limit. */ |
@@ -3434,7 +3493,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
3434 | } | 3493 | } |
3435 | drv->queue = q; | 3494 | drv->queue = q; |
3436 | 3495 | ||
3437 | q->backing_dev_info.ra_pages = READ_AHEAD; | ||
3438 | blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask); | 3496 | blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask); |
3439 | 3497 | ||
3440 | /* This is a hardware imposed limit. */ | 3498 | /* This is a hardware imposed limit. */ |
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index 55178e9973a0..45ac09300eb3 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c | |||
@@ -1404,21 +1404,18 @@ cciss_engage_scsi(int ctlr) | |||
1404 | } | 1404 | } |
1405 | 1405 | ||
1406 | static void | 1406 | static void |
1407 | cciss_proc_tape_report(int ctlr, unsigned char *buffer, off_t *pos, off_t *len) | 1407 | cciss_seq_tape_report(struct seq_file *seq, int ctlr) |
1408 | { | 1408 | { |
1409 | unsigned long flags; | 1409 | unsigned long flags; |
1410 | int size; | ||
1411 | |||
1412 | *pos = *pos -1; *len = *len - 1; // cut off the last trailing newline | ||
1413 | 1410 | ||
1414 | CPQ_TAPE_LOCK(ctlr, flags); | 1411 | CPQ_TAPE_LOCK(ctlr, flags); |
1415 | size = sprintf(buffer + *len, | 1412 | seq_printf(seq, |
1416 | "Sequential access devices: %d\n\n", | 1413 | "Sequential access devices: %d\n\n", |
1417 | ccissscsi[ctlr].ndevices); | 1414 | ccissscsi[ctlr].ndevices); |
1418 | CPQ_TAPE_UNLOCK(ctlr, flags); | 1415 | CPQ_TAPE_UNLOCK(ctlr, flags); |
1419 | *pos += size; *len += size; | ||
1420 | } | 1416 | } |
1421 | 1417 | ||
1418 | |||
1422 | /* Need at least one of these error handlers to keep ../scsi/hosts.c from | 1419 | /* Need at least one of these error handlers to keep ../scsi/hosts.c from |
1423 | * complaining. Doing a host- or bus-reset can't do anything good here. | 1420 | * complaining. Doing a host- or bus-reset can't do anything good here. |
1424 | * Despite what it might say in scsi_error.c, there may well be commands | 1421 | * Despite what it might say in scsi_error.c, there may well be commands |
@@ -1498,6 +1495,5 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd) | |||
1498 | #define cciss_scsi_setup(cntl_num) | 1495 | #define cciss_scsi_setup(cntl_num) |
1499 | #define cciss_unregister_scsi(ctlr) | 1496 | #define cciss_unregister_scsi(ctlr) |
1500 | #define cciss_register_scsi(ctlr) | 1497 | #define cciss_register_scsi(ctlr) |
1501 | #define cciss_proc_tape_report(ctlr, buffer, pos, len) | ||
1502 | 1498 | ||
1503 | #endif /* CONFIG_CISS_SCSI_TAPE */ | 1499 | #endif /* CONFIG_CISS_SCSI_TAPE */ |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 674cd66dcaba..18feb1c7c33b 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -849,7 +849,8 @@ static int pkt_flush_cache(struct pktcdvd_device *pd) | |||
849 | /* | 849 | /* |
850 | * speed is given as the normal factor, e.g. 4 for 4x | 850 | * speed is given as the normal factor, e.g. 4 for 4x |
851 | */ | 851 | */ |
852 | static int pkt_set_speed(struct pktcdvd_device *pd, unsigned write_speed, unsigned read_speed) | 852 | static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd, |
853 | unsigned write_speed, unsigned read_speed) | ||
853 | { | 854 | { |
854 | struct packet_command cgc; | 855 | struct packet_command cgc; |
855 | struct request_sense sense; | 856 | struct request_sense sense; |
@@ -1776,7 +1777,8 @@ static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, | |||
1776 | return pkt_generic_packet(pd, &cgc); | 1777 | return pkt_generic_packet(pd, &cgc); |
1777 | } | 1778 | } |
1778 | 1779 | ||
1779 | static int pkt_get_last_written(struct pktcdvd_device *pd, long *last_written) | 1780 | static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd, |
1781 | long *last_written) | ||
1780 | { | 1782 | { |
1781 | disc_information di; | 1783 | disc_information di; |
1782 | track_information ti; | 1784 | track_information ti; |
@@ -1813,7 +1815,7 @@ static int pkt_get_last_written(struct pktcdvd_device *pd, long *last_written) | |||
1813 | /* | 1815 | /* |
1814 | * write mode select package based on pd->settings | 1816 | * write mode select package based on pd->settings |
1815 | */ | 1817 | */ |
1816 | static int pkt_set_write_settings(struct pktcdvd_device *pd) | 1818 | static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) |
1817 | { | 1819 | { |
1818 | struct packet_command cgc; | 1820 | struct packet_command cgc; |
1819 | struct request_sense sense; | 1821 | struct request_sense sense; |
@@ -1972,7 +1974,7 @@ static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di) | |||
1972 | return 1; | 1974 | return 1; |
1973 | } | 1975 | } |
1974 | 1976 | ||
1975 | static int pkt_probe_settings(struct pktcdvd_device *pd) | 1977 | static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) |
1976 | { | 1978 | { |
1977 | struct packet_command cgc; | 1979 | struct packet_command cgc; |
1978 | unsigned char buf[12]; | 1980 | unsigned char buf[12]; |
@@ -2071,7 +2073,8 @@ static int pkt_probe_settings(struct pktcdvd_device *pd) | |||
2071 | /* | 2073 | /* |
2072 | * enable/disable write caching on drive | 2074 | * enable/disable write caching on drive |
2073 | */ | 2075 | */ |
2074 | static int pkt_write_caching(struct pktcdvd_device *pd, int set) | 2076 | static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd, |
2077 | int set) | ||
2075 | { | 2078 | { |
2076 | struct packet_command cgc; | 2079 | struct packet_command cgc; |
2077 | struct request_sense sense; | 2080 | struct request_sense sense; |
@@ -2116,7 +2119,8 @@ static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag) | |||
2116 | /* | 2119 | /* |
2117 | * Returns drive maximum write speed | 2120 | * Returns drive maximum write speed |
2118 | */ | 2121 | */ |
2119 | static int pkt_get_max_speed(struct pktcdvd_device *pd, unsigned *write_speed) | 2122 | static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd, |
2123 | unsigned *write_speed) | ||
2120 | { | 2124 | { |
2121 | struct packet_command cgc; | 2125 | struct packet_command cgc; |
2122 | struct request_sense sense; | 2126 | struct request_sense sense; |
@@ -2177,7 +2181,8 @@ static char us_clv_to_speed[16] = { | |||
2177 | /* | 2181 | /* |
2178 | * reads the maximum media speed from ATIP | 2182 | * reads the maximum media speed from ATIP |
2179 | */ | 2183 | */ |
2180 | static int pkt_media_speed(struct pktcdvd_device *pd, unsigned *speed) | 2184 | static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd, |
2185 | unsigned *speed) | ||
2181 | { | 2186 | { |
2182 | struct packet_command cgc; | 2187 | struct packet_command cgc; |
2183 | struct request_sense sense; | 2188 | struct request_sense sense; |
@@ -2249,7 +2254,7 @@ static int pkt_media_speed(struct pktcdvd_device *pd, unsigned *speed) | |||
2249 | } | 2254 | } |
2250 | } | 2255 | } |
2251 | 2256 | ||
2252 | static int pkt_perform_opc(struct pktcdvd_device *pd) | 2257 | static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd) |
2253 | { | 2258 | { |
2254 | struct packet_command cgc; | 2259 | struct packet_command cgc; |
2255 | struct request_sense sense; | 2260 | struct request_sense sense; |
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c index 372c7ef633da..f16c94cbf488 100644 --- a/drivers/bluetooth/hci_usb.c +++ b/drivers/bluetooth/hci_usb.c | |||
@@ -116,6 +116,7 @@ static struct usb_device_id blacklist_ids[] = { | |||
116 | { USB_DEVICE(0x0a5c, 0x2009), .driver_info = HCI_BCM92035 }, | 116 | { USB_DEVICE(0x0a5c, 0x2009), .driver_info = HCI_BCM92035 }, |
117 | 117 | ||
118 | /* Broadcom BCM2045 */ | 118 | /* Broadcom BCM2045 */ |
119 | { USB_DEVICE(0x0a5c, 0x2039), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU }, | ||
119 | { USB_DEVICE(0x0a5c, 0x2101), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU }, | 120 | { USB_DEVICE(0x0a5c, 0x2101), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU }, |
120 | 121 | ||
121 | /* IBM/Lenovo ThinkPad with Broadcom chip */ | 122 | /* IBM/Lenovo ThinkPad with Broadcom chip */ |
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index db259e60289b..12f5baea439b 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c | |||
@@ -1152,8 +1152,8 @@ clean_up_and_return: | |||
1152 | /* This code is similar to that in open_for_data. The routine is called | 1152 | /* This code is similar to that in open_for_data. The routine is called |
1153 | whenever an audio play operation is requested. | 1153 | whenever an audio play operation is requested. |
1154 | */ | 1154 | */ |
1155 | int check_for_audio_disc(struct cdrom_device_info * cdi, | 1155 | static int check_for_audio_disc(struct cdrom_device_info * cdi, |
1156 | struct cdrom_device_ops * cdo) | 1156 | struct cdrom_device_ops * cdo) |
1157 | { | 1157 | { |
1158 | int ret; | 1158 | int ret; |
1159 | tracktype tracks; | 1159 | tracktype tracks; |
diff --git a/drivers/char/defkeymap.c_shipped b/drivers/char/defkeymap.c_shipped index 0aa419a61767..d2208dfe3f67 100644 --- a/drivers/char/defkeymap.c_shipped +++ b/drivers/char/defkeymap.c_shipped | |||
@@ -223,40 +223,40 @@ char *func_table[MAX_NR_FUNC] = { | |||
223 | }; | 223 | }; |
224 | 224 | ||
225 | struct kbdiacruc accent_table[MAX_DIACR] = { | 225 | struct kbdiacruc accent_table[MAX_DIACR] = { |
226 | {'`', 'A', '\300'}, {'`', 'a', '\340'}, | 226 | {'`', 'A', 0300}, {'`', 'a', 0340}, |
227 | {'\'', 'A', '\301'}, {'\'', 'a', '\341'}, | 227 | {'\'', 'A', 0301}, {'\'', 'a', 0341}, |
228 | {'^', 'A', '\302'}, {'^', 'a', '\342'}, | 228 | {'^', 'A', 0302}, {'^', 'a', 0342}, |
229 | {'~', 'A', '\303'}, {'~', 'a', '\343'}, | 229 | {'~', 'A', 0303}, {'~', 'a', 0343}, |
230 | {'"', 'A', '\304'}, {'"', 'a', '\344'}, | 230 | {'"', 'A', 0304}, {'"', 'a', 0344}, |
231 | {'O', 'A', '\305'}, {'o', 'a', '\345'}, | 231 | {'O', 'A', 0305}, {'o', 'a', 0345}, |
232 | {'0', 'A', '\305'}, {'0', 'a', '\345'}, | 232 | {'0', 'A', 0305}, {'0', 'a', 0345}, |
233 | {'A', 'A', '\305'}, {'a', 'a', '\345'}, | 233 | {'A', 'A', 0305}, {'a', 'a', 0345}, |
234 | {'A', 'E', '\306'}, {'a', 'e', '\346'}, | 234 | {'A', 'E', 0306}, {'a', 'e', 0346}, |
235 | {',', 'C', '\307'}, {',', 'c', '\347'}, | 235 | {',', 'C', 0307}, {',', 'c', 0347}, |
236 | {'`', 'E', '\310'}, {'`', 'e', '\350'}, | 236 | {'`', 'E', 0310}, {'`', 'e', 0350}, |
237 | {'\'', 'E', '\311'}, {'\'', 'e', '\351'}, | 237 | {'\'', 'E', 0311}, {'\'', 'e', 0351}, |
238 | {'^', 'E', '\312'}, {'^', 'e', '\352'}, | 238 | {'^', 'E', 0312}, {'^', 'e', 0352}, |
239 | {'"', 'E', '\313'}, {'"', 'e', '\353'}, | 239 | {'"', 'E', 0313}, {'"', 'e', 0353}, |
240 | {'`', 'I', '\314'}, {'`', 'i', '\354'}, | 240 | {'`', 'I', 0314}, {'`', 'i', 0354}, |
241 | {'\'', 'I', '\315'}, {'\'', 'i', '\355'}, | 241 | {'\'', 'I', 0315}, {'\'', 'i', 0355}, |
242 | {'^', 'I', '\316'}, {'^', 'i', '\356'}, | 242 | {'^', 'I', 0316}, {'^', 'i', 0356}, |
243 | {'"', 'I', '\317'}, {'"', 'i', '\357'}, | 243 | {'"', 'I', 0317}, {'"', 'i', 0357}, |
244 | {'-', 'D', '\320'}, {'-', 'd', '\360'}, | 244 | {'-', 'D', 0320}, {'-', 'd', 0360}, |
245 | {'~', 'N', '\321'}, {'~', 'n', '\361'}, | 245 | {'~', 'N', 0321}, {'~', 'n', 0361}, |
246 | {'`', 'O', '\322'}, {'`', 'o', '\362'}, | 246 | {'`', 'O', 0322}, {'`', 'o', 0362}, |
247 | {'\'', 'O', '\323'}, {'\'', 'o', '\363'}, | 247 | {'\'', 'O', 0323}, {'\'', 'o', 0363}, |
248 | {'^', 'O', '\324'}, {'^', 'o', '\364'}, | 248 | {'^', 'O', 0324}, {'^', 'o', 0364}, |
249 | {'~', 'O', '\325'}, {'~', 'o', '\365'}, | 249 | {'~', 'O', 0325}, {'~', 'o', 0365}, |
250 | {'"', 'O', '\326'}, {'"', 'o', '\366'}, | 250 | {'"', 'O', 0326}, {'"', 'o', 0366}, |
251 | {'/', 'O', '\330'}, {'/', 'o', '\370'}, | 251 | {'/', 'O', 0330}, {'/', 'o', 0370}, |
252 | {'`', 'U', '\331'}, {'`', 'u', '\371'}, | 252 | {'`', 'U', 0331}, {'`', 'u', 0371}, |
253 | {'\'', 'U', '\332'}, {'\'', 'u', '\372'}, | 253 | {'\'', 'U', 0332}, {'\'', 'u', 0372}, |
254 | {'^', 'U', '\333'}, {'^', 'u', '\373'}, | 254 | {'^', 'U', 0333}, {'^', 'u', 0373}, |
255 | {'"', 'U', '\334'}, {'"', 'u', '\374'}, | 255 | {'"', 'U', 0334}, {'"', 'u', 0374}, |
256 | {'\'', 'Y', '\335'}, {'\'', 'y', '\375'}, | 256 | {'\'', 'Y', 0335}, {'\'', 'y', 0375}, |
257 | {'T', 'H', '\336'}, {'t', 'h', '\376'}, | 257 | {'T', 'H', 0336}, {'t', 'h', 0376}, |
258 | {'s', 's', '\337'}, {'"', 'y', '\377'}, | 258 | {'s', 's', 0337}, {'"', 'y', 0377}, |
259 | {'s', 'z', '\337'}, {'i', 'j', '\377'}, | 259 | {'s', 'z', 0337}, {'i', 'j', 0377}, |
260 | }; | 260 | }; |
261 | 261 | ||
262 | unsigned int accent_table_size = 68; | 262 | unsigned int accent_table_size = 68; |
diff --git a/drivers/char/esp.c b/drivers/char/esp.c index c01e26d9ee5e..f3fe62067344 100644 --- a/drivers/char/esp.c +++ b/drivers/char/esp.c | |||
@@ -2484,6 +2484,7 @@ static int __init espserial_init(void) | |||
2484 | return 0; | 2484 | return 0; |
2485 | } | 2485 | } |
2486 | 2486 | ||
2487 | spin_lock_init(&info->lock); | ||
2487 | /* rx_trigger, tx_trigger are needed by autoconfig */ | 2488 | /* rx_trigger, tx_trigger are needed by autoconfig */ |
2488 | info->config.rx_trigger = rx_trigger; | 2489 | info->config.rx_trigger = rx_trigger; |
2489 | info->config.tx_trigger = tx_trigger; | 2490 | info->config.tx_trigger = tx_trigger; |
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c index 85d596a3c18c..eba2883b630e 100644 --- a/drivers/char/isicom.c +++ b/drivers/char/isicom.c | |||
@@ -1527,7 +1527,7 @@ static int __devinit reset_card(struct pci_dev *pdev, | |||
1527 | msleep(10); | 1527 | msleep(10); |
1528 | 1528 | ||
1529 | portcount = inw(base + 0x2); | 1529 | portcount = inw(base + 0x2); |
1530 | if (!inw(base + 0xe) & 0x1 || (portcount != 0 && portcount != 4 && | 1530 | if (!(inw(base + 0xe) & 0x1) || (portcount != 0 && portcount != 4 && |
1531 | portcount != 8 && portcount != 16)) { | 1531 | portcount != 8 && portcount != 16)) { |
1532 | dev_err(&pdev->dev, "ISILoad:PCI Card%d reset failure.\n", | 1532 | dev_err(&pdev->dev, "ISILoad:PCI Card%d reset failure.\n", |
1533 | card + 1); | 1533 | card + 1); |
diff --git a/drivers/char/pcmcia/ipwireless/network.c b/drivers/char/pcmcia/ipwireless/network.c index ff35230058d3..d793e68b3e0d 100644 --- a/drivers/char/pcmcia/ipwireless/network.c +++ b/drivers/char/pcmcia/ipwireless/network.c | |||
@@ -377,13 +377,16 @@ void ipwireless_network_packet_received(struct ipw_network *network, | |||
377 | for (i = 0; i < MAX_ASSOCIATED_TTYS; i++) { | 377 | for (i = 0; i < MAX_ASSOCIATED_TTYS; i++) { |
378 | struct ipw_tty *tty = network->associated_ttys[channel_idx][i]; | 378 | struct ipw_tty *tty = network->associated_ttys[channel_idx][i]; |
379 | 379 | ||
380 | if (!tty) | ||
381 | continue; | ||
382 | |||
380 | /* | 383 | /* |
381 | * If it's associated with a tty (other than the RAS channel | 384 | * If it's associated with a tty (other than the RAS channel |
382 | * when we're online), then send the data to that tty. The RAS | 385 | * when we're online), then send the data to that tty. The RAS |
383 | * channel's data is handled above - it always goes through | 386 | * channel's data is handled above - it always goes through |
384 | * ppp_generic. | 387 | * ppp_generic. |
385 | */ | 388 | */ |
386 | if (tty && channel_idx == IPW_CHANNEL_RAS | 389 | if (channel_idx == IPW_CHANNEL_RAS |
387 | && (network->ras_control_lines & | 390 | && (network->ras_control_lines & |
388 | IPW_CONTROL_LINE_DCD) != 0 | 391 | IPW_CONTROL_LINE_DCD) != 0 |
389 | && ipwireless_tty_is_modem(tty)) { | 392 | && ipwireless_tty_is_modem(tty)) { |
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c index 78b151c4d20f..5c3142b6f1fc 100644 --- a/drivers/char/rtc.c +++ b/drivers/char/rtc.c | |||
@@ -110,8 +110,8 @@ static int rtc_has_irq = 1; | |||
110 | #define hpet_set_rtc_irq_bit(arg) 0 | 110 | #define hpet_set_rtc_irq_bit(arg) 0 |
111 | #define hpet_rtc_timer_init() do { } while (0) | 111 | #define hpet_rtc_timer_init() do { } while (0) |
112 | #define hpet_rtc_dropped_irq() 0 | 112 | #define hpet_rtc_dropped_irq() 0 |
113 | #define hpet_register_irq_handler(h) 0 | 113 | #define hpet_register_irq_handler(h) ({ 0; }) |
114 | #define hpet_unregister_irq_handler(h) 0 | 114 | #define hpet_unregister_irq_handler(h) ({ 0; }) |
115 | #ifdef RTC_IRQ | 115 | #ifdef RTC_IRQ |
116 | static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) | 116 | static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) |
117 | { | 117 | { |
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c index c0e08c7bca2f..5ff83df67b44 100644 --- a/drivers/char/specialix.c +++ b/drivers/char/specialix.c | |||
@@ -2109,7 +2109,6 @@ static void sx_throttle(struct tty_struct * tty) | |||
2109 | sx_out(bp, CD186x_CAR, port_No(port)); | 2109 | sx_out(bp, CD186x_CAR, port_No(port)); |
2110 | spin_unlock_irqrestore(&bp->lock, flags); | 2110 | spin_unlock_irqrestore(&bp->lock, flags); |
2111 | if (I_IXOFF(tty)) { | 2111 | if (I_IXOFF(tty)) { |
2112 | spin_unlock_irqrestore(&bp->lock, flags); | ||
2113 | sx_wait_CCR(bp); | 2112 | sx_wait_CCR(bp); |
2114 | spin_lock_irqsave(&bp->lock, flags); | 2113 | spin_lock_irqsave(&bp->lock, flags); |
2115 | sx_out(bp, CD186x_CCR, CCR_SSCH2); | 2114 | sx_out(bp, CD186x_CCR, CCR_SSCH2); |
diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 367be9175061..9b58b894f823 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c | |||
@@ -702,6 +702,7 @@ void redraw_screen(struct vc_data *vc, int is_switch) | |||
702 | if (is_switch) { | 702 | if (is_switch) { |
703 | set_leds(); | 703 | set_leds(); |
704 | compute_shiftstate(); | 704 | compute_shiftstate(); |
705 | notify_update(vc); | ||
705 | } | 706 | } |
706 | } | 707 | } |
707 | 708 | ||
diff --git a/drivers/char/xilinx_hwicap/buffer_icap.c b/drivers/char/xilinx_hwicap/buffer_icap.c index dfea2bde162b..f577daedb630 100644 --- a/drivers/char/xilinx_hwicap/buffer_icap.c +++ b/drivers/char/xilinx_hwicap/buffer_icap.c | |||
@@ -73,8 +73,8 @@ | |||
73 | #define XHI_BUFFER_START 0 | 73 | #define XHI_BUFFER_START 0 |
74 | 74 | ||
75 | /** | 75 | /** |
76 | * buffer_icap_get_status: Get the contents of the status register. | 76 | * buffer_icap_get_status - Get the contents of the status register. |
77 | * @parameter base_address: is the base address of the device | 77 | * @base_address: is the base address of the device |
78 | * | 78 | * |
79 | * The status register contains the ICAP status and the done bit. | 79 | * The status register contains the ICAP status and the done bit. |
80 | * | 80 | * |
@@ -94,9 +94,9 @@ static inline u32 buffer_icap_get_status(void __iomem *base_address) | |||
94 | } | 94 | } |
95 | 95 | ||
96 | /** | 96 | /** |
97 | * buffer_icap_get_bram: Reads data from the storage buffer bram. | 97 | * buffer_icap_get_bram - Reads data from the storage buffer bram. |
98 | * @parameter base_address: contains the base address of the component. | 98 | * @base_address: contains the base address of the component. |
99 | * @parameter offset: The word offset from which the data should be read. | 99 | * @offset: The word offset from which the data should be read. |
100 | * | 100 | * |
101 | * A bram is used as a configuration memory cache. One frame of data can | 101 | * A bram is used as a configuration memory cache. One frame of data can |
102 | * be stored in this "storage buffer". | 102 | * be stored in this "storage buffer". |
@@ -108,8 +108,8 @@ static inline u32 buffer_icap_get_bram(void __iomem *base_address, | |||
108 | } | 108 | } |
109 | 109 | ||
110 | /** | 110 | /** |
111 | * buffer_icap_busy: Return true if the icap device is busy | 111 | * buffer_icap_busy - Return true if the icap device is busy |
112 | * @parameter base_address: is the base address of the device | 112 | * @base_address: is the base address of the device |
113 | * | 113 | * |
114 | * The queries the low order bit of the status register, which | 114 | * The queries the low order bit of the status register, which |
115 | * indicates whether the current configuration or readback operation | 115 | * indicates whether the current configuration or readback operation |
@@ -121,8 +121,8 @@ static inline bool buffer_icap_busy(void __iomem *base_address) | |||
121 | } | 121 | } |
122 | 122 | ||
123 | /** | 123 | /** |
124 | * buffer_icap_busy: Return true if the icap device is not busy | 124 | * buffer_icap_busy - Return true if the icap device is not busy |
125 | * @parameter base_address: is the base address of the device | 125 | * @base_address: is the base address of the device |
126 | * | 126 | * |
127 | * The queries the low order bit of the status register, which | 127 | * The queries the low order bit of the status register, which |
128 | * indicates whether the current configuration or readback operation | 128 | * indicates whether the current configuration or readback operation |
@@ -134,9 +134,9 @@ static inline bool buffer_icap_done(void __iomem *base_address) | |||
134 | } | 134 | } |
135 | 135 | ||
136 | /** | 136 | /** |
137 | * buffer_icap_set_size: Set the size register. | 137 | * buffer_icap_set_size - Set the size register. |
138 | * @parameter base_address: is the base address of the device | 138 | * @base_address: is the base address of the device |
139 | * @parameter data: The size in bytes. | 139 | * @data: The size in bytes. |
140 | * | 140 | * |
141 | * The size register holds the number of 8 bit bytes to transfer between | 141 | * The size register holds the number of 8 bit bytes to transfer between |
142 | * bram and the icap (or icap to bram). | 142 | * bram and the icap (or icap to bram). |
@@ -148,9 +148,9 @@ static inline void buffer_icap_set_size(void __iomem *base_address, | |||
148 | } | 148 | } |
149 | 149 | ||
150 | /** | 150 | /** |
151 | * buffer_icap_mSetoffsetReg: Set the bram offset register. | 151 | * buffer_icap_set_offset - Set the bram offset register. |
152 | * @parameter base_address: contains the base address of the device. | 152 | * @base_address: contains the base address of the device. |
153 | * @parameter data: is the value to be written to the data register. | 153 | * @data: is the value to be written to the data register. |
154 | * | 154 | * |
155 | * The bram offset register holds the starting bram address to transfer | 155 | * The bram offset register holds the starting bram address to transfer |
156 | * data from during configuration or write data to during readback. | 156 | * data from during configuration or write data to during readback. |
@@ -162,9 +162,9 @@ static inline void buffer_icap_set_offset(void __iomem *base_address, | |||
162 | } | 162 | } |
163 | 163 | ||
164 | /** | 164 | /** |
165 | * buffer_icap_set_rnc: Set the RNC (Readback not Configure) register. | 165 | * buffer_icap_set_rnc - Set the RNC (Readback not Configure) register. |
166 | * @parameter base_address: contains the base address of the device. | 166 | * @base_address: contains the base address of the device. |
167 | * @parameter data: is the value to be written to the data register. | 167 | * @data: is the value to be written to the data register. |
168 | * | 168 | * |
169 | * The RNC register determines the direction of the data transfer. It | 169 | * The RNC register determines the direction of the data transfer. It |
170 | * controls whether a configuration or readback take place. Writing to | 170 | * controls whether a configuration or readback take place. Writing to |
@@ -178,10 +178,10 @@ static inline void buffer_icap_set_rnc(void __iomem *base_address, | |||
178 | } | 178 | } |
179 | 179 | ||
180 | /** | 180 | /** |
181 | * buffer_icap_set_bram: Write data to the storage buffer bram. | 181 | * buffer_icap_set_bram - Write data to the storage buffer bram. |
182 | * @parameter base_address: contains the base address of the component. | 182 | * @base_address: contains the base address of the component. |
183 | * @parameter offset: The word offset at which the data should be written. | 183 | * @offset: The word offset at which the data should be written. |
184 | * @parameter data: The value to be written to the bram offset. | 184 | * @data: The value to be written to the bram offset. |
185 | * | 185 | * |
186 | * A bram is used as a configuration memory cache. One frame of data can | 186 | * A bram is used as a configuration memory cache. One frame of data can |
187 | * be stored in this "storage buffer". | 187 | * be stored in this "storage buffer". |
@@ -193,10 +193,10 @@ static inline void buffer_icap_set_bram(void __iomem *base_address, | |||
193 | } | 193 | } |
194 | 194 | ||
195 | /** | 195 | /** |
196 | * buffer_icap_device_read: Transfer bytes from ICAP to the storage buffer. | 196 | * buffer_icap_device_read - Transfer bytes from ICAP to the storage buffer. |
197 | * @parameter drvdata: a pointer to the drvdata. | 197 | * @drvdata: a pointer to the drvdata. |
198 | * @parameter offset: The storage buffer start address. | 198 | * @offset: The storage buffer start address. |
199 | * @parameter count: The number of words (32 bit) to read from the | 199 | * @count: The number of words (32 bit) to read from the |
200 | * device (ICAP). | 200 | * device (ICAP). |
201 | **/ | 201 | **/ |
202 | static int buffer_icap_device_read(struct hwicap_drvdata *drvdata, | 202 | static int buffer_icap_device_read(struct hwicap_drvdata *drvdata, |
@@ -227,10 +227,10 @@ static int buffer_icap_device_read(struct hwicap_drvdata *drvdata, | |||
227 | }; | 227 | }; |
228 | 228 | ||
229 | /** | 229 | /** |
230 | * buffer_icap_device_write: Transfer bytes from ICAP to the storage buffer. | 230 | * buffer_icap_device_write - Transfer bytes from ICAP to the storage buffer. |
231 | * @parameter drvdata: a pointer to the drvdata. | 231 | * @drvdata: a pointer to the drvdata. |
232 | * @parameter offset: The storage buffer start address. | 232 | * @offset: The storage buffer start address. |
233 | * @parameter count: The number of words (32 bit) to read from the | 233 | * @count: The number of words (32 bit) to read from the |
234 | * device (ICAP). | 234 | * device (ICAP). |
235 | **/ | 235 | **/ |
236 | static int buffer_icap_device_write(struct hwicap_drvdata *drvdata, | 236 | static int buffer_icap_device_write(struct hwicap_drvdata *drvdata, |
@@ -261,8 +261,8 @@ static int buffer_icap_device_write(struct hwicap_drvdata *drvdata, | |||
261 | }; | 261 | }; |
262 | 262 | ||
263 | /** | 263 | /** |
264 | * buffer_icap_reset: Reset the logic of the icap device. | 264 | * buffer_icap_reset - Reset the logic of the icap device. |
265 | * @parameter drvdata: a pointer to the drvdata. | 265 | * @drvdata: a pointer to the drvdata. |
266 | * | 266 | * |
267 | * Writing to the status register resets the ICAP logic in an internal | 267 | * Writing to the status register resets the ICAP logic in an internal |
268 | * version of the core. For the version of the core published in EDK, | 268 | * version of the core. For the version of the core published in EDK, |
@@ -274,10 +274,10 @@ void buffer_icap_reset(struct hwicap_drvdata *drvdata) | |||
274 | } | 274 | } |
275 | 275 | ||
276 | /** | 276 | /** |
277 | * buffer_icap_set_configuration: Load a partial bitstream from system memory. | 277 | * buffer_icap_set_configuration - Load a partial bitstream from system memory. |
278 | * @parameter drvdata: a pointer to the drvdata. | 278 | * @drvdata: a pointer to the drvdata. |
279 | * @parameter data: Kernel address of the partial bitstream. | 279 | * @data: Kernel address of the partial bitstream. |
280 | * @parameter size: the size of the partial bitstream in 32 bit words. | 280 | * @size: the size of the partial bitstream in 32 bit words. |
281 | **/ | 281 | **/ |
282 | int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data, | 282 | int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data, |
283 | u32 size) | 283 | u32 size) |
@@ -333,10 +333,10 @@ int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data, | |||
333 | }; | 333 | }; |
334 | 334 | ||
335 | /** | 335 | /** |
336 | * buffer_icap_get_configuration: Read configuration data from the device. | 336 | * buffer_icap_get_configuration - Read configuration data from the device. |
337 | * @parameter drvdata: a pointer to the drvdata. | 337 | * @drvdata: a pointer to the drvdata. |
338 | * @parameter data: Address of the data representing the partial bitstream | 338 | * @data: Address of the data representing the partial bitstream |
339 | * @parameter size: the size of the partial bitstream in 32 bit words. | 339 | * @size: the size of the partial bitstream in 32 bit words. |
340 | **/ | 340 | **/ |
341 | int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data, | 341 | int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data, |
342 | u32 size) | 342 | u32 size) |
diff --git a/drivers/char/xilinx_hwicap/fifo_icap.c b/drivers/char/xilinx_hwicap/fifo_icap.c index 0988314694a6..6f45dbd47125 100644 --- a/drivers/char/xilinx_hwicap/fifo_icap.c +++ b/drivers/char/xilinx_hwicap/fifo_icap.c | |||
@@ -94,9 +94,9 @@ | |||
94 | 94 | ||
95 | 95 | ||
96 | /** | 96 | /** |
97 | * fifo_icap_fifo_write: Write data to the write FIFO. | 97 | * fifo_icap_fifo_write - Write data to the write FIFO. |
98 | * @parameter drvdata: a pointer to the drvdata. | 98 | * @drvdata: a pointer to the drvdata. |
99 | * @parameter data: the 32-bit value to be written to the FIFO. | 99 | * @data: the 32-bit value to be written to the FIFO. |
100 | * | 100 | * |
101 | * This function will silently fail if the fifo is full. | 101 | * This function will silently fail if the fifo is full. |
102 | **/ | 102 | **/ |
@@ -108,8 +108,8 @@ static inline void fifo_icap_fifo_write(struct hwicap_drvdata *drvdata, | |||
108 | } | 108 | } |
109 | 109 | ||
110 | /** | 110 | /** |
111 | * fifo_icap_fifo_read: Read data from the Read FIFO. | 111 | * fifo_icap_fifo_read - Read data from the Read FIFO. |
112 | * @parameter drvdata: a pointer to the drvdata. | 112 | * @drvdata: a pointer to the drvdata. |
113 | * | 113 | * |
114 | * This function will silently fail if the fifo is empty. | 114 | * This function will silently fail if the fifo is empty. |
115 | **/ | 115 | **/ |
@@ -121,9 +121,9 @@ static inline u32 fifo_icap_fifo_read(struct hwicap_drvdata *drvdata) | |||
121 | } | 121 | } |
122 | 122 | ||
123 | /** | 123 | /** |
124 | * fifo_icap_set_read_size: Set the the size register. | 124 | * fifo_icap_set_read_size - Set the the size register. |
125 | * @parameter drvdata: a pointer to the drvdata. | 125 | * @drvdata: a pointer to the drvdata. |
126 | * @parameter data: the size of the following read transaction, in words. | 126 | * @data: the size of the following read transaction, in words. |
127 | **/ | 127 | **/ |
128 | static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata, | 128 | static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata, |
129 | u32 data) | 129 | u32 data) |
@@ -132,8 +132,8 @@ static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata, | |||
132 | } | 132 | } |
133 | 133 | ||
134 | /** | 134 | /** |
135 | * fifo_icap_start_config: Initiate a configuration (write) to the device. | 135 | * fifo_icap_start_config - Initiate a configuration (write) to the device. |
136 | * @parameter drvdata: a pointer to the drvdata. | 136 | * @drvdata: a pointer to the drvdata. |
137 | **/ | 137 | **/ |
138 | static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata) | 138 | static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata) |
139 | { | 139 | { |
@@ -142,8 +142,8 @@ static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata) | |||
142 | } | 142 | } |
143 | 143 | ||
144 | /** | 144 | /** |
145 | * fifo_icap_start_readback: Initiate a readback from the device. | 145 | * fifo_icap_start_readback - Initiate a readback from the device. |
146 | * @parameter drvdata: a pointer to the drvdata. | 146 | * @drvdata: a pointer to the drvdata. |
147 | **/ | 147 | **/ |
148 | static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata) | 148 | static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata) |
149 | { | 149 | { |
@@ -152,8 +152,8 @@ static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata) | |||
152 | } | 152 | } |
153 | 153 | ||
154 | /** | 154 | /** |
155 | * fifo_icap_busy: Return true if the ICAP is still processing a transaction. | 155 | * fifo_icap_busy - Return true if the ICAP is still processing a transaction. |
156 | * @parameter drvdata: a pointer to the drvdata. | 156 | * @drvdata: a pointer to the drvdata. |
157 | **/ | 157 | **/ |
158 | static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata) | 158 | static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata) |
159 | { | 159 | { |
@@ -163,8 +163,8 @@ static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata) | |||
163 | } | 163 | } |
164 | 164 | ||
165 | /** | 165 | /** |
166 | * fifo_icap_write_fifo_vacancy: Query the write fifo available space. | 166 | * fifo_icap_write_fifo_vacancy - Query the write fifo available space. |
167 | * @parameter drvdata: a pointer to the drvdata. | 167 | * @drvdata: a pointer to the drvdata. |
168 | * | 168 | * |
169 | * Return the number of words that can be safely pushed into the write fifo. | 169 | * Return the number of words that can be safely pushed into the write fifo. |
170 | **/ | 170 | **/ |
@@ -175,8 +175,8 @@ static inline u32 fifo_icap_write_fifo_vacancy( | |||
175 | } | 175 | } |
176 | 176 | ||
177 | /** | 177 | /** |
178 | * fifo_icap_read_fifo_occupancy: Query the read fifo available data. | 178 | * fifo_icap_read_fifo_occupancy - Query the read fifo available data. |
179 | * @parameter drvdata: a pointer to the drvdata. | 179 | * @drvdata: a pointer to the drvdata. |
180 | * | 180 | * |
181 | * Return the number of words that can be safely read from the read fifo. | 181 | * Return the number of words that can be safely read from the read fifo. |
182 | **/ | 182 | **/ |
@@ -187,11 +187,11 @@ static inline u32 fifo_icap_read_fifo_occupancy( | |||
187 | } | 187 | } |
188 | 188 | ||
189 | /** | 189 | /** |
190 | * fifo_icap_set_configuration: Send configuration data to the ICAP. | 190 | * fifo_icap_set_configuration - Send configuration data to the ICAP. |
191 | * @parameter drvdata: a pointer to the drvdata. | 191 | * @drvdata: a pointer to the drvdata. |
192 | * @parameter frame_buffer: a pointer to the data to be written to the | 192 | * @frame_buffer: a pointer to the data to be written to the |
193 | * ICAP device. | 193 | * ICAP device. |
194 | * @parameter num_words: the number of words (32 bit) to write to the ICAP | 194 | * @num_words: the number of words (32 bit) to write to the ICAP |
195 | * device. | 195 | * device. |
196 | 196 | ||
197 | * This function writes the given user data to the Write FIFO in | 197 | * This function writes the given user data to the Write FIFO in |
@@ -266,10 +266,10 @@ int fifo_icap_set_configuration(struct hwicap_drvdata *drvdata, | |||
266 | } | 266 | } |
267 | 267 | ||
268 | /** | 268 | /** |
269 | * fifo_icap_get_configuration: Read configuration data from the device. | 269 | * fifo_icap_get_configuration - Read configuration data from the device. |
270 | * @parameter drvdata: a pointer to the drvdata. | 270 | * @drvdata: a pointer to the drvdata. |
271 | * @parameter data: Address of the data representing the partial bitstream | 271 | * @data: Address of the data representing the partial bitstream |
272 | * @parameter size: the size of the partial bitstream in 32 bit words. | 272 | * @size: the size of the partial bitstream in 32 bit words. |
273 | * | 273 | * |
274 | * This function reads the specified number of words from the ICAP device in | 274 | * This function reads the specified number of words from the ICAP device in |
275 | * the polled mode. | 275 | * the polled mode. |
@@ -335,8 +335,8 @@ int fifo_icap_get_configuration(struct hwicap_drvdata *drvdata, | |||
335 | } | 335 | } |
336 | 336 | ||
337 | /** | 337 | /** |
338 | * buffer_icap_reset: Reset the logic of the icap device. | 338 | * buffer_icap_reset - Reset the logic of the icap device. |
339 | * @parameter drvdata: a pointer to the drvdata. | 339 | * @drvdata: a pointer to the drvdata. |
340 | * | 340 | * |
341 | * This function forces the software reset of the complete HWICAP device. | 341 | * This function forces the software reset of the complete HWICAP device. |
342 | * All the registers will return to the default value and the FIFO is also | 342 | * All the registers will return to the default value and the FIFO is also |
@@ -360,8 +360,8 @@ void fifo_icap_reset(struct hwicap_drvdata *drvdata) | |||
360 | } | 360 | } |
361 | 361 | ||
362 | /** | 362 | /** |
363 | * fifo_icap_flush_fifo: This function flushes the FIFOs in the device. | 363 | * fifo_icap_flush_fifo - This function flushes the FIFOs in the device. |
364 | * @parameter drvdata: a pointer to the drvdata. | 364 | * @drvdata: a pointer to the drvdata. |
365 | */ | 365 | */ |
366 | void fifo_icap_flush_fifo(struct hwicap_drvdata *drvdata) | 366 | void fifo_icap_flush_fifo(struct hwicap_drvdata *drvdata) |
367 | { | 367 | { |
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c index 24f6aef0fd3c..2284fa2a5a57 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c | |||
@@ -84,7 +84,7 @@ | |||
84 | #include <linux/init.h> | 84 | #include <linux/init.h> |
85 | #include <linux/poll.h> | 85 | #include <linux/poll.h> |
86 | #include <linux/proc_fs.h> | 86 | #include <linux/proc_fs.h> |
87 | #include <asm/semaphore.h> | 87 | #include <linux/mutex.h> |
88 | #include <linux/sysctl.h> | 88 | #include <linux/sysctl.h> |
89 | #include <linux/version.h> | 89 | #include <linux/version.h> |
90 | #include <linux/fs.h> | 90 | #include <linux/fs.h> |
@@ -119,6 +119,7 @@ module_param(xhwicap_minor, int, S_IRUGO); | |||
119 | 119 | ||
120 | /* An array, which is set to true when the device is registered. */ | 120 | /* An array, which is set to true when the device is registered. */ |
121 | static bool probed_devices[HWICAP_DEVICES]; | 121 | static bool probed_devices[HWICAP_DEVICES]; |
122 | static struct mutex icap_sem; | ||
122 | 123 | ||
123 | static struct class *icap_class; | 124 | static struct class *icap_class; |
124 | 125 | ||
@@ -199,14 +200,14 @@ static const struct config_registers v5_config_registers = { | |||
199 | }; | 200 | }; |
200 | 201 | ||
201 | /** | 202 | /** |
202 | * hwicap_command_desync: Send a DESYNC command to the ICAP port. | 203 | * hwicap_command_desync - Send a DESYNC command to the ICAP port. |
203 | * @parameter drvdata: a pointer to the drvdata. | 204 | * @drvdata: a pointer to the drvdata. |
204 | * | 205 | * |
205 | * This command desynchronizes the ICAP After this command, a | 206 | * This command desynchronizes the ICAP After this command, a |
206 | * bitstream containing a NULL packet, followed by a SYNCH packet is | 207 | * bitstream containing a NULL packet, followed by a SYNCH packet is |
207 | * required before the ICAP will recognize commands. | 208 | * required before the ICAP will recognize commands. |
208 | */ | 209 | */ |
209 | int hwicap_command_desync(struct hwicap_drvdata *drvdata) | 210 | static int hwicap_command_desync(struct hwicap_drvdata *drvdata) |
210 | { | 211 | { |
211 | u32 buffer[4]; | 212 | u32 buffer[4]; |
212 | u32 index = 0; | 213 | u32 index = 0; |
@@ -228,51 +229,18 @@ int hwicap_command_desync(struct hwicap_drvdata *drvdata) | |||
228 | } | 229 | } |
229 | 230 | ||
230 | /** | 231 | /** |
231 | * hwicap_command_capture: Send a CAPTURE command to the ICAP port. | 232 | * hwicap_get_configuration_register - Query a configuration register. |
232 | * @parameter drvdata: a pointer to the drvdata. | 233 | * @drvdata: a pointer to the drvdata. |
233 | * | 234 | * @reg: a constant which represents the configuration |
234 | * This command captures all of the flip flop states so they will be | ||
235 | * available during readback. One can use this command instead of | ||
236 | * enabling the CAPTURE block in the design. | ||
237 | */ | ||
238 | int hwicap_command_capture(struct hwicap_drvdata *drvdata) | ||
239 | { | ||
240 | u32 buffer[7]; | ||
241 | u32 index = 0; | ||
242 | |||
243 | /* | ||
244 | * Create the data to be written to the ICAP. | ||
245 | */ | ||
246 | buffer[index++] = XHI_DUMMY_PACKET; | ||
247 | buffer[index++] = XHI_SYNC_PACKET; | ||
248 | buffer[index++] = XHI_NOOP_PACKET; | ||
249 | buffer[index++] = hwicap_type_1_write(drvdata->config_regs->CMD) | 1; | ||
250 | buffer[index++] = XHI_CMD_GCAPTURE; | ||
251 | buffer[index++] = XHI_DUMMY_PACKET; | ||
252 | buffer[index++] = XHI_DUMMY_PACKET; | ||
253 | |||
254 | /* | ||
255 | * Write the data to the FIFO and intiate the transfer of data | ||
256 | * present in the FIFO to the ICAP device. | ||
257 | */ | ||
258 | return drvdata->config->set_configuration(drvdata, | ||
259 | &buffer[0], index); | ||
260 | |||
261 | } | ||
262 | |||
263 | /** | ||
264 | * hwicap_get_configuration_register: Query a configuration register. | ||
265 | * @parameter drvdata: a pointer to the drvdata. | ||
266 | * @parameter reg: a constant which represents the configuration | ||
267 | * register value to be returned. | 235 | * register value to be returned. |
268 | * Examples: XHI_IDCODE, XHI_FLR. | 236 | * Examples: XHI_IDCODE, XHI_FLR. |
269 | * @parameter RegData: returns the value of the register. | 237 | * @reg_data: returns the value of the register. |
270 | * | 238 | * |
271 | * Sends a query packet to the ICAP and then receives the response. | 239 | * Sends a query packet to the ICAP and then receives the response. |
272 | * The icap is left in Synched state. | 240 | * The icap is left in Synched state. |
273 | */ | 241 | */ |
274 | int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata, | 242 | static int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata, |
275 | u32 reg, u32 *RegData) | 243 | u32 reg, u32 *reg_data) |
276 | { | 244 | { |
277 | int status; | 245 | int status; |
278 | u32 buffer[6]; | 246 | u32 buffer[6]; |
@@ -300,14 +268,14 @@ int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata, | |||
300 | /* | 268 | /* |
301 | * Read the configuration register | 269 | * Read the configuration register |
302 | */ | 270 | */ |
303 | status = drvdata->config->get_configuration(drvdata, RegData, 1); | 271 | status = drvdata->config->get_configuration(drvdata, reg_data, 1); |
304 | if (status) | 272 | if (status) |
305 | return status; | 273 | return status; |
306 | 274 | ||
307 | return 0; | 275 | return 0; |
308 | } | 276 | } |
309 | 277 | ||
310 | int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata) | 278 | static int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata) |
311 | { | 279 | { |
312 | int status; | 280 | int status; |
313 | u32 idcode; | 281 | u32 idcode; |
@@ -344,7 +312,7 @@ int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata) | |||
344 | } | 312 | } |
345 | 313 | ||
346 | static ssize_t | 314 | static ssize_t |
347 | hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos) | 315 | hwicap_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) |
348 | { | 316 | { |
349 | struct hwicap_drvdata *drvdata = file->private_data; | 317 | struct hwicap_drvdata *drvdata = file->private_data; |
350 | ssize_t bytes_to_read = 0; | 318 | ssize_t bytes_to_read = 0; |
@@ -353,8 +321,9 @@ hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos) | |||
353 | u32 bytes_remaining; | 321 | u32 bytes_remaining; |
354 | int status; | 322 | int status; |
355 | 323 | ||
356 | if (down_interruptible(&drvdata->sem)) | 324 | status = mutex_lock_interruptible(&drvdata->sem); |
357 | return -ERESTARTSYS; | 325 | if (status) |
326 | return status; | ||
358 | 327 | ||
359 | if (drvdata->read_buffer_in_use) { | 328 | if (drvdata->read_buffer_in_use) { |
360 | /* If there are leftover bytes in the buffer, just */ | 329 | /* If there are leftover bytes in the buffer, just */ |
@@ -370,8 +339,9 @@ hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos) | |||
370 | goto error; | 339 | goto error; |
371 | } | 340 | } |
372 | drvdata->read_buffer_in_use -= bytes_to_read; | 341 | drvdata->read_buffer_in_use -= bytes_to_read; |
373 | memcpy(drvdata->read_buffer + bytes_to_read, | 342 | memmove(drvdata->read_buffer, |
374 | drvdata->read_buffer, 4 - bytes_to_read); | 343 | drvdata->read_buffer + bytes_to_read, |
344 | 4 - bytes_to_read); | ||
375 | } else { | 345 | } else { |
376 | /* Get new data from the ICAP, and return was was requested. */ | 346 | /* Get new data from the ICAP, and return was was requested. */ |
377 | kbuf = (u32 *) get_zeroed_page(GFP_KERNEL); | 347 | kbuf = (u32 *) get_zeroed_page(GFP_KERNEL); |
@@ -414,18 +384,20 @@ hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos) | |||
414 | status = -EFAULT; | 384 | status = -EFAULT; |
415 | goto error; | 385 | goto error; |
416 | } | 386 | } |
417 | memcpy(kbuf, drvdata->read_buffer, bytes_remaining); | 387 | memcpy(drvdata->read_buffer, |
388 | kbuf, | ||
389 | bytes_remaining); | ||
418 | drvdata->read_buffer_in_use = bytes_remaining; | 390 | drvdata->read_buffer_in_use = bytes_remaining; |
419 | free_page((unsigned long)kbuf); | 391 | free_page((unsigned long)kbuf); |
420 | } | 392 | } |
421 | status = bytes_to_read; | 393 | status = bytes_to_read; |
422 | error: | 394 | error: |
423 | up(&drvdata->sem); | 395 | mutex_unlock(&drvdata->sem); |
424 | return status; | 396 | return status; |
425 | } | 397 | } |
426 | 398 | ||
427 | static ssize_t | 399 | static ssize_t |
428 | hwicap_write(struct file *file, const char *buf, | 400 | hwicap_write(struct file *file, const char __user *buf, |
429 | size_t count, loff_t *ppos) | 401 | size_t count, loff_t *ppos) |
430 | { | 402 | { |
431 | struct hwicap_drvdata *drvdata = file->private_data; | 403 | struct hwicap_drvdata *drvdata = file->private_data; |
@@ -435,8 +407,9 @@ hwicap_write(struct file *file, const char *buf, | |||
435 | ssize_t len; | 407 | ssize_t len; |
436 | ssize_t status; | 408 | ssize_t status; |
437 | 409 | ||
438 | if (down_interruptible(&drvdata->sem)) | 410 | status = mutex_lock_interruptible(&drvdata->sem); |
439 | return -ERESTARTSYS; | 411 | if (status) |
412 | return status; | ||
440 | 413 | ||
441 | left += drvdata->write_buffer_in_use; | 414 | left += drvdata->write_buffer_in_use; |
442 | 415 | ||
@@ -465,7 +438,7 @@ hwicap_write(struct file *file, const char *buf, | |||
465 | memcpy(kbuf, drvdata->write_buffer, | 438 | memcpy(kbuf, drvdata->write_buffer, |
466 | drvdata->write_buffer_in_use); | 439 | drvdata->write_buffer_in_use); |
467 | if (copy_from_user( | 440 | if (copy_from_user( |
468 | (((char *)kbuf) + (drvdata->write_buffer_in_use)), | 441 | (((char *)kbuf) + drvdata->write_buffer_in_use), |
469 | buf + written, | 442 | buf + written, |
470 | len - (drvdata->write_buffer_in_use))) { | 443 | len - (drvdata->write_buffer_in_use))) { |
471 | free_page((unsigned long)kbuf); | 444 | free_page((unsigned long)kbuf); |
@@ -508,7 +481,7 @@ hwicap_write(struct file *file, const char *buf, | |||
508 | free_page((unsigned long)kbuf); | 481 | free_page((unsigned long)kbuf); |
509 | status = written; | 482 | status = written; |
510 | error: | 483 | error: |
511 | up(&drvdata->sem); | 484 | mutex_unlock(&drvdata->sem); |
512 | return status; | 485 | return status; |
513 | } | 486 | } |
514 | 487 | ||
@@ -519,8 +492,9 @@ static int hwicap_open(struct inode *inode, struct file *file) | |||
519 | 492 | ||
520 | drvdata = container_of(inode->i_cdev, struct hwicap_drvdata, cdev); | 493 | drvdata = container_of(inode->i_cdev, struct hwicap_drvdata, cdev); |
521 | 494 | ||
522 | if (down_interruptible(&drvdata->sem)) | 495 | status = mutex_lock_interruptible(&drvdata->sem); |
523 | return -ERESTARTSYS; | 496 | if (status) |
497 | return status; | ||
524 | 498 | ||
525 | if (drvdata->is_open) { | 499 | if (drvdata->is_open) { |
526 | status = -EBUSY; | 500 | status = -EBUSY; |
@@ -539,7 +513,7 @@ static int hwicap_open(struct inode *inode, struct file *file) | |||
539 | drvdata->is_open = 1; | 513 | drvdata->is_open = 1; |
540 | 514 | ||
541 | error: | 515 | error: |
542 | up(&drvdata->sem); | 516 | mutex_unlock(&drvdata->sem); |
543 | return status; | 517 | return status; |
544 | } | 518 | } |
545 | 519 | ||
@@ -549,8 +523,7 @@ static int hwicap_release(struct inode *inode, struct file *file) | |||
549 | int i; | 523 | int i; |
550 | int status = 0; | 524 | int status = 0; |
551 | 525 | ||
552 | if (down_interruptible(&drvdata->sem)) | 526 | mutex_lock(&drvdata->sem); |
553 | return -ERESTARTSYS; | ||
554 | 527 | ||
555 | if (drvdata->write_buffer_in_use) { | 528 | if (drvdata->write_buffer_in_use) { |
556 | /* Flush write buffer. */ | 529 | /* Flush write buffer. */ |
@@ -569,7 +542,7 @@ static int hwicap_release(struct inode *inode, struct file *file) | |||
569 | 542 | ||
570 | error: | 543 | error: |
571 | drvdata->is_open = 0; | 544 | drvdata->is_open = 0; |
572 | up(&drvdata->sem); | 545 | mutex_unlock(&drvdata->sem); |
573 | return status; | 546 | return status; |
574 | } | 547 | } |
575 | 548 | ||
@@ -592,31 +565,36 @@ static int __devinit hwicap_setup(struct device *dev, int id, | |||
592 | 565 | ||
593 | dev_info(dev, "Xilinx icap port driver\n"); | 566 | dev_info(dev, "Xilinx icap port driver\n"); |
594 | 567 | ||
568 | mutex_lock(&icap_sem); | ||
569 | |||
595 | if (id < 0) { | 570 | if (id < 0) { |
596 | for (id = 0; id < HWICAP_DEVICES; id++) | 571 | for (id = 0; id < HWICAP_DEVICES; id++) |
597 | if (!probed_devices[id]) | 572 | if (!probed_devices[id]) |
598 | break; | 573 | break; |
599 | } | 574 | } |
600 | if (id < 0 || id >= HWICAP_DEVICES) { | 575 | if (id < 0 || id >= HWICAP_DEVICES) { |
576 | mutex_unlock(&icap_sem); | ||
601 | dev_err(dev, "%s%i too large\n", DRIVER_NAME, id); | 577 | dev_err(dev, "%s%i too large\n", DRIVER_NAME, id); |
602 | return -EINVAL; | 578 | return -EINVAL; |
603 | } | 579 | } |
604 | if (probed_devices[id]) { | 580 | if (probed_devices[id]) { |
581 | mutex_unlock(&icap_sem); | ||
605 | dev_err(dev, "cannot assign to %s%i; it is already in use\n", | 582 | dev_err(dev, "cannot assign to %s%i; it is already in use\n", |
606 | DRIVER_NAME, id); | 583 | DRIVER_NAME, id); |
607 | return -EBUSY; | 584 | return -EBUSY; |
608 | } | 585 | } |
609 | 586 | ||
610 | probed_devices[id] = 1; | 587 | probed_devices[id] = 1; |
588 | mutex_unlock(&icap_sem); | ||
611 | 589 | ||
612 | devt = MKDEV(xhwicap_major, xhwicap_minor + id); | 590 | devt = MKDEV(xhwicap_major, xhwicap_minor + id); |
613 | 591 | ||
614 | drvdata = kmalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL); | 592 | drvdata = kzalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL); |
615 | if (!drvdata) { | 593 | if (!drvdata) { |
616 | dev_err(dev, "Couldn't allocate device private record\n"); | 594 | dev_err(dev, "Couldn't allocate device private record\n"); |
617 | return -ENOMEM; | 595 | retval = -ENOMEM; |
596 | goto failed0; | ||
618 | } | 597 | } |
619 | memset((void *)drvdata, 0, sizeof(struct hwicap_drvdata)); | ||
620 | dev_set_drvdata(dev, (void *)drvdata); | 598 | dev_set_drvdata(dev, (void *)drvdata); |
621 | 599 | ||
622 | if (!regs_res) { | 600 | if (!regs_res) { |
@@ -648,7 +626,7 @@ static int __devinit hwicap_setup(struct device *dev, int id, | |||
648 | drvdata->config = config; | 626 | drvdata->config = config; |
649 | drvdata->config_regs = config_regs; | 627 | drvdata->config_regs = config_regs; |
650 | 628 | ||
651 | init_MUTEX(&drvdata->sem); | 629 | mutex_init(&drvdata->sem); |
652 | drvdata->is_open = 0; | 630 | drvdata->is_open = 0; |
653 | 631 | ||
654 | dev_info(dev, "ioremap %lx to %p with size %x\n", | 632 | dev_info(dev, "ioremap %lx to %p with size %x\n", |
@@ -663,7 +641,7 @@ static int __devinit hwicap_setup(struct device *dev, int id, | |||
663 | goto failed3; | 641 | goto failed3; |
664 | } | 642 | } |
665 | /* devfs_mk_cdev(devt, S_IFCHR|S_IRUGO|S_IWUGO, DRIVER_NAME); */ | 643 | /* devfs_mk_cdev(devt, S_IFCHR|S_IRUGO|S_IWUGO, DRIVER_NAME); */ |
666 | class_device_create(icap_class, NULL, devt, NULL, DRIVER_NAME); | 644 | device_create(icap_class, dev, devt, "%s%d", DRIVER_NAME, id); |
667 | return 0; /* success */ | 645 | return 0; /* success */ |
668 | 646 | ||
669 | failed3: | 647 | failed3: |
@@ -675,6 +653,11 @@ static int __devinit hwicap_setup(struct device *dev, int id, | |||
675 | failed1: | 653 | failed1: |
676 | kfree(drvdata); | 654 | kfree(drvdata); |
677 | 655 | ||
656 | failed0: | ||
657 | mutex_lock(&icap_sem); | ||
658 | probed_devices[id] = 0; | ||
659 | mutex_unlock(&icap_sem); | ||
660 | |||
678 | return retval; | 661 | return retval; |
679 | } | 662 | } |
680 | 663 | ||
@@ -699,14 +682,16 @@ static int __devexit hwicap_remove(struct device *dev) | |||
699 | if (!drvdata) | 682 | if (!drvdata) |
700 | return 0; | 683 | return 0; |
701 | 684 | ||
702 | class_device_destroy(icap_class, drvdata->devt); | 685 | device_destroy(icap_class, drvdata->devt); |
703 | cdev_del(&drvdata->cdev); | 686 | cdev_del(&drvdata->cdev); |
704 | iounmap(drvdata->base_address); | 687 | iounmap(drvdata->base_address); |
705 | release_mem_region(drvdata->mem_start, drvdata->mem_size); | 688 | release_mem_region(drvdata->mem_start, drvdata->mem_size); |
706 | kfree(drvdata); | 689 | kfree(drvdata); |
707 | dev_set_drvdata(dev, NULL); | 690 | dev_set_drvdata(dev, NULL); |
708 | probed_devices[MINOR(dev->devt)-xhwicap_minor] = 0; | ||
709 | 691 | ||
692 | mutex_lock(&icap_sem); | ||
693 | probed_devices[MINOR(dev->devt)-xhwicap_minor] = 0; | ||
694 | mutex_unlock(&icap_sem); | ||
710 | return 0; /* success */ | 695 | return 0; /* success */ |
711 | } | 696 | } |
712 | 697 | ||
@@ -821,28 +806,29 @@ static struct of_platform_driver hwicap_of_driver = { | |||
821 | }; | 806 | }; |
822 | 807 | ||
823 | /* Registration helpers to keep the number of #ifdefs to a minimum */ | 808 | /* Registration helpers to keep the number of #ifdefs to a minimum */ |
824 | static inline int __devinit hwicap_of_register(void) | 809 | static inline int __init hwicap_of_register(void) |
825 | { | 810 | { |
826 | pr_debug("hwicap: calling of_register_platform_driver()\n"); | 811 | pr_debug("hwicap: calling of_register_platform_driver()\n"); |
827 | return of_register_platform_driver(&hwicap_of_driver); | 812 | return of_register_platform_driver(&hwicap_of_driver); |
828 | } | 813 | } |
829 | 814 | ||
830 | static inline void __devexit hwicap_of_unregister(void) | 815 | static inline void __exit hwicap_of_unregister(void) |
831 | { | 816 | { |
832 | of_unregister_platform_driver(&hwicap_of_driver); | 817 | of_unregister_platform_driver(&hwicap_of_driver); |
833 | } | 818 | } |
834 | #else /* CONFIG_OF */ | 819 | #else /* CONFIG_OF */ |
835 | /* CONFIG_OF not enabled; do nothing helpers */ | 820 | /* CONFIG_OF not enabled; do nothing helpers */ |
836 | static inline int __devinit hwicap_of_register(void) { return 0; } | 821 | static inline int __init hwicap_of_register(void) { return 0; } |
837 | static inline void __devexit hwicap_of_unregister(void) { } | 822 | static inline void __exit hwicap_of_unregister(void) { } |
838 | #endif /* CONFIG_OF */ | 823 | #endif /* CONFIG_OF */ |
839 | 824 | ||
840 | static int __devinit hwicap_module_init(void) | 825 | static int __init hwicap_module_init(void) |
841 | { | 826 | { |
842 | dev_t devt; | 827 | dev_t devt; |
843 | int retval; | 828 | int retval; |
844 | 829 | ||
845 | icap_class = class_create(THIS_MODULE, "xilinx_config"); | 830 | icap_class = class_create(THIS_MODULE, "xilinx_config"); |
831 | mutex_init(&icap_sem); | ||
846 | 832 | ||
847 | if (xhwicap_major) { | 833 | if (xhwicap_major) { |
848 | devt = MKDEV(xhwicap_major, xhwicap_minor); | 834 | devt = MKDEV(xhwicap_major, xhwicap_minor); |
@@ -883,7 +869,7 @@ static int __devinit hwicap_module_init(void) | |||
883 | return retval; | 869 | return retval; |
884 | } | 870 | } |
885 | 871 | ||
886 | static void __devexit hwicap_module_cleanup(void) | 872 | static void __exit hwicap_module_cleanup(void) |
887 | { | 873 | { |
888 | dev_t devt = MKDEV(xhwicap_major, xhwicap_minor); | 874 | dev_t devt = MKDEV(xhwicap_major, xhwicap_minor); |
889 | 875 | ||
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.h b/drivers/char/xilinx_hwicap/xilinx_hwicap.h index ae771cac1629..405fee7e189b 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.h +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.h | |||
@@ -48,9 +48,9 @@ struct hwicap_drvdata { | |||
48 | u8 write_buffer[4]; | 48 | u8 write_buffer[4]; |
49 | u32 read_buffer_in_use; /* Always in [0,3] */ | 49 | u32 read_buffer_in_use; /* Always in [0,3] */ |
50 | u8 read_buffer[4]; | 50 | u8 read_buffer[4]; |
51 | u32 mem_start; /* phys. address of the control registers */ | 51 | resource_size_t mem_start;/* phys. address of the control registers */ |
52 | u32 mem_end; /* phys. address of the control registers */ | 52 | resource_size_t mem_end; /* phys. address of the control registers */ |
53 | u32 mem_size; | 53 | resource_size_t mem_size; |
54 | void __iomem *base_address;/* virt. address of the control registers */ | 54 | void __iomem *base_address;/* virt. address of the control registers */ |
55 | 55 | ||
56 | struct device *dev; | 56 | struct device *dev; |
@@ -61,7 +61,7 @@ struct hwicap_drvdata { | |||
61 | const struct config_registers *config_regs; | 61 | const struct config_registers *config_regs; |
62 | void *private_data; | 62 | void *private_data; |
63 | bool is_open; | 63 | bool is_open; |
64 | struct semaphore sem; | 64 | struct mutex sem; |
65 | }; | 65 | }; |
66 | 66 | ||
67 | struct hwicap_driver_config { | 67 | struct hwicap_driver_config { |
@@ -164,29 +164,29 @@ struct config_registers { | |||
164 | #define XHI_DISABLED_AUTO_CRC 0x0000DEFCUL | 164 | #define XHI_DISABLED_AUTO_CRC 0x0000DEFCUL |
165 | 165 | ||
166 | /** | 166 | /** |
167 | * hwicap_type_1_read: Generates a Type 1 read packet header. | 167 | * hwicap_type_1_read - Generates a Type 1 read packet header. |
168 | * @parameter: Register is the address of the register to be read back. | 168 | * @reg: is the address of the register to be read back. |
169 | * | 169 | * |
170 | * Generates a Type 1 read packet header, which is used to indirectly | 170 | * Generates a Type 1 read packet header, which is used to indirectly |
171 | * read registers in the configuration logic. This packet must then | 171 | * read registers in the configuration logic. This packet must then |
172 | * be sent through the icap device, and a return packet received with | 172 | * be sent through the icap device, and a return packet received with |
173 | * the information. | 173 | * the information. |
174 | **/ | 174 | **/ |
175 | static inline u32 hwicap_type_1_read(u32 Register) | 175 | static inline u32 hwicap_type_1_read(u32 reg) |
176 | { | 176 | { |
177 | return (XHI_TYPE_1 << XHI_TYPE_SHIFT) | | 177 | return (XHI_TYPE_1 << XHI_TYPE_SHIFT) | |
178 | (Register << XHI_REGISTER_SHIFT) | | 178 | (reg << XHI_REGISTER_SHIFT) | |
179 | (XHI_OP_READ << XHI_OP_SHIFT); | 179 | (XHI_OP_READ << XHI_OP_SHIFT); |
180 | } | 180 | } |
181 | 181 | ||
182 | /** | 182 | /** |
183 | * hwicap_type_1_write: Generates a Type 1 write packet header | 183 | * hwicap_type_1_write - Generates a Type 1 write packet header |
184 | * @parameter: Register is the address of the register to be read back. | 184 | * @reg: is the address of the register to be read back. |
185 | **/ | 185 | **/ |
186 | static inline u32 hwicap_type_1_write(u32 Register) | 186 | static inline u32 hwicap_type_1_write(u32 reg) |
187 | { | 187 | { |
188 | return (XHI_TYPE_1 << XHI_TYPE_SHIFT) | | 188 | return (XHI_TYPE_1 << XHI_TYPE_SHIFT) | |
189 | (Register << XHI_REGISTER_SHIFT) | | 189 | (reg << XHI_REGISTER_SHIFT) | |
190 | (XHI_OP_WRITE << XHI_OP_SHIFT); | 190 | (XHI_OP_WRITE << XHI_OP_SHIFT); |
191 | } | 191 | } |
192 | 192 | ||
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index fea2d3ed9cbd..85e2ba7fcfba 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c | |||
@@ -47,7 +47,7 @@ static LIST_HEAD(notify_list); | |||
47 | 47 | ||
48 | static struct cn_dev cdev; | 48 | static struct cn_dev cdev; |
49 | 49 | ||
50 | int cn_already_initialized = 0; | 50 | static int cn_already_initialized; |
51 | 51 | ||
52 | /* | 52 | /* |
53 | * msg->seq and msg->ack are used to determine message genealogy. | 53 | * msg->seq and msg->ack are used to determine message genealogy. |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 89a29cd93783..35a26a3e5f68 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -671,13 +671,13 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf) | |||
671 | { | 671 | { |
672 | struct cpufreq_policy * policy = to_policy(kobj); | 672 | struct cpufreq_policy * policy = to_policy(kobj); |
673 | struct freq_attr * fattr = to_attr(attr); | 673 | struct freq_attr * fattr = to_attr(attr); |
674 | ssize_t ret; | 674 | ssize_t ret = -EINVAL; |
675 | policy = cpufreq_cpu_get(policy->cpu); | 675 | policy = cpufreq_cpu_get(policy->cpu); |
676 | if (!policy) | 676 | if (!policy) |
677 | return -EINVAL; | 677 | goto no_policy; |
678 | 678 | ||
679 | if (lock_policy_rwsem_read(policy->cpu) < 0) | 679 | if (lock_policy_rwsem_read(policy->cpu) < 0) |
680 | return -EINVAL; | 680 | goto fail; |
681 | 681 | ||
682 | if (fattr->show) | 682 | if (fattr->show) |
683 | ret = fattr->show(policy, buf); | 683 | ret = fattr->show(policy, buf); |
@@ -685,8 +685,9 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf) | |||
685 | ret = -EIO; | 685 | ret = -EIO; |
686 | 686 | ||
687 | unlock_policy_rwsem_read(policy->cpu); | 687 | unlock_policy_rwsem_read(policy->cpu); |
688 | 688 | fail: | |
689 | cpufreq_cpu_put(policy); | 689 | cpufreq_cpu_put(policy); |
690 | no_policy: | ||
690 | return ret; | 691 | return ret; |
691 | } | 692 | } |
692 | 693 | ||
@@ -695,13 +696,13 @@ static ssize_t store(struct kobject * kobj, struct attribute * attr, | |||
695 | { | 696 | { |
696 | struct cpufreq_policy * policy = to_policy(kobj); | 697 | struct cpufreq_policy * policy = to_policy(kobj); |
697 | struct freq_attr * fattr = to_attr(attr); | 698 | struct freq_attr * fattr = to_attr(attr); |
698 | ssize_t ret; | 699 | ssize_t ret = -EINVAL; |
699 | policy = cpufreq_cpu_get(policy->cpu); | 700 | policy = cpufreq_cpu_get(policy->cpu); |
700 | if (!policy) | 701 | if (!policy) |
701 | return -EINVAL; | 702 | goto no_policy; |
702 | 703 | ||
703 | if (lock_policy_rwsem_write(policy->cpu) < 0) | 704 | if (lock_policy_rwsem_write(policy->cpu) < 0) |
704 | return -EINVAL; | 705 | goto fail; |
705 | 706 | ||
706 | if (fattr->store) | 707 | if (fattr->store) |
707 | ret = fattr->store(policy, buf, count); | 708 | ret = fattr->store(policy, buf, count); |
@@ -709,8 +710,9 @@ static ssize_t store(struct kobject * kobj, struct attribute * attr, | |||
709 | ret = -EIO; | 710 | ret = -EIO; |
710 | 711 | ||
711 | unlock_policy_rwsem_write(policy->cpu); | 712 | unlock_policy_rwsem_write(policy->cpu); |
712 | 713 | fail: | |
713 | cpufreq_cpu_put(policy); | 714 | cpufreq_cpu_put(policy); |
715 | no_policy: | ||
714 | return ret; | 716 | return ret; |
715 | } | 717 | } |
716 | 718 | ||
@@ -1775,7 +1777,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb, | |||
1775 | return NOTIFY_OK; | 1777 | return NOTIFY_OK; |
1776 | } | 1778 | } |
1777 | 1779 | ||
1778 | static struct notifier_block __cpuinitdata cpufreq_cpu_notifier = | 1780 | static struct notifier_block __refdata cpufreq_cpu_notifier = |
1779 | { | 1781 | { |
1780 | .notifier_call = cpufreq_cpu_callback, | 1782 | .notifier_call = cpufreq_cpu_callback, |
1781 | }; | 1783 | }; |
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 1b8312b02006..070421a5480e 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c | |||
@@ -323,7 +323,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb, | |||
323 | return NOTIFY_OK; | 323 | return NOTIFY_OK; |
324 | } | 324 | } |
325 | 325 | ||
326 | static struct notifier_block cpufreq_stat_cpu_notifier __cpuinitdata = | 326 | static struct notifier_block cpufreq_stat_cpu_notifier __refdata = |
327 | { | 327 | { |
328 | .notifier_call = cpufreq_stat_cpu_callback, | 328 | .notifier_call = cpufreq_stat_cpu_callback, |
329 | }; | 329 | }; |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index a703deffb795..27340a7b19dd 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | menuconfig DMADEVICES | 5 | menuconfig DMADEVICES |
6 | bool "DMA Engine support" | 6 | bool "DMA Engine support" |
7 | depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX | 7 | depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX || PPC |
8 | depends on !HIGHMEM64G | 8 | depends on !HIGHMEM64G |
9 | help | 9 | help |
10 | DMA engines can do asynchronous data transfers without | 10 | DMA engines can do asynchronous data transfers without |
@@ -37,6 +37,23 @@ config INTEL_IOP_ADMA | |||
37 | help | 37 | help |
38 | Enable support for the Intel(R) IOP Series RAID engines. | 38 | Enable support for the Intel(R) IOP Series RAID engines. |
39 | 39 | ||
40 | config FSL_DMA | ||
41 | bool "Freescale MPC85xx/MPC83xx DMA support" | ||
42 | depends on PPC | ||
43 | select DMA_ENGINE | ||
44 | ---help--- | ||
45 | Enable support for the Freescale DMA engine. Now, it support | ||
46 | MPC8560/40, MPC8555, MPC8548 and MPC8641 processors. | ||
47 | The MPC8349, MPC8360 is also supported. | ||
48 | |||
49 | config FSL_DMA_SELFTEST | ||
50 | bool "Enable the self test for each DMA channel" | ||
51 | depends on FSL_DMA | ||
52 | default y | ||
53 | ---help--- | ||
54 | Enable the self test for each DMA channel. A self test will be | ||
55 | performed after the channel probed to ensure the DMA works well. | ||
56 | |||
40 | config DMA_ENGINE | 57 | config DMA_ENGINE |
41 | bool | 58 | bool |
42 | 59 | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index b152cd84e123..c8036d945902 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -3,3 +3,4 @@ obj-$(CONFIG_NET_DMA) += iovlock.o | |||
3 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o | 3 | obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o |
4 | ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o | 4 | ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o |
5 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o | 5 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o |
6 | obj-$(CONFIG_FSL_DMA) += fsldma.o | ||
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c new file mode 100644 index 000000000000..cc9a68158d99 --- /dev/null +++ b/drivers/dma/fsldma.c | |||
@@ -0,0 +1,1067 @@ | |||
1 | /* | ||
2 | * Freescale MPC85xx, MPC83xx DMA Engine support | ||
3 | * | ||
4 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | ||
5 | * | ||
6 | * Author: | ||
7 | * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 | ||
8 | * Ebony Zhu <ebony.zhu@freescale.com>, May 2007 | ||
9 | * | ||
10 | * Description: | ||
11 | * DMA engine driver for Freescale MPC8540 DMA controller, which is | ||
12 | * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc. | ||
13 | * The support for MPC8349 DMA contorller is also added. | ||
14 | * | ||
15 | * This is free software; you can redistribute it and/or modify | ||
16 | * it under the terms of the GNU General Public License as published by | ||
17 | * the Free Software Foundation; either version 2 of the License, or | ||
18 | * (at your option) any later version. | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include <linux/init.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/pci.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/dmaengine.h> | ||
27 | #include <linux/delay.h> | ||
28 | #include <linux/dma-mapping.h> | ||
29 | #include <linux/dmapool.h> | ||
30 | #include <linux/of_platform.h> | ||
31 | |||
32 | #include "fsldma.h" | ||
33 | |||
34 | static void dma_init(struct fsl_dma_chan *fsl_chan) | ||
35 | { | ||
36 | /* Reset the channel */ | ||
37 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32); | ||
38 | |||
39 | switch (fsl_chan->feature & FSL_DMA_IP_MASK) { | ||
40 | case FSL_DMA_IP_85XX: | ||
41 | /* Set the channel to below modes: | ||
42 | * EIE - Error interrupt enable | ||
43 | * EOSIE - End of segments interrupt enable (basic mode) | ||
44 | * EOLNIE - End of links interrupt enable | ||
45 | */ | ||
46 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE | ||
47 | | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); | ||
48 | break; | ||
49 | case FSL_DMA_IP_83XX: | ||
50 | /* Set the channel to below modes: | ||
51 | * EOTIE - End-of-transfer interrupt enable | ||
52 | */ | ||
53 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE, | ||
54 | 32); | ||
55 | break; | ||
56 | } | ||
57 | |||
58 | } | ||
59 | |||
60 | static void set_sr(struct fsl_dma_chan *fsl_chan, dma_addr_t val) | ||
61 | { | ||
62 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32); | ||
63 | } | ||
64 | |||
65 | static dma_addr_t get_sr(struct fsl_dma_chan *fsl_chan) | ||
66 | { | ||
67 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32); | ||
68 | } | ||
69 | |||
70 | static void set_desc_cnt(struct fsl_dma_chan *fsl_chan, | ||
71 | struct fsl_dma_ld_hw *hw, u32 count) | ||
72 | { | ||
73 | hw->count = CPU_TO_DMA(fsl_chan, count, 32); | ||
74 | } | ||
75 | |||
76 | static void set_desc_src(struct fsl_dma_chan *fsl_chan, | ||
77 | struct fsl_dma_ld_hw *hw, dma_addr_t src) | ||
78 | { | ||
79 | u64 snoop_bits; | ||
80 | |||
81 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | ||
82 | ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; | ||
83 | hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64); | ||
84 | } | ||
85 | |||
86 | static void set_desc_dest(struct fsl_dma_chan *fsl_chan, | ||
87 | struct fsl_dma_ld_hw *hw, dma_addr_t dest) | ||
88 | { | ||
89 | u64 snoop_bits; | ||
90 | |||
91 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | ||
92 | ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; | ||
93 | hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64); | ||
94 | } | ||
95 | |||
96 | static void set_desc_next(struct fsl_dma_chan *fsl_chan, | ||
97 | struct fsl_dma_ld_hw *hw, dma_addr_t next) | ||
98 | { | ||
99 | u64 snoop_bits; | ||
100 | |||
101 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) | ||
102 | ? FSL_DMA_SNEN : 0; | ||
103 | hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64); | ||
104 | } | ||
105 | |||
106 | static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) | ||
107 | { | ||
108 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64); | ||
109 | } | ||
110 | |||
111 | static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan) | ||
112 | { | ||
113 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN; | ||
114 | } | ||
115 | |||
116 | static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) | ||
117 | { | ||
118 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64); | ||
119 | } | ||
120 | |||
121 | static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan) | ||
122 | { | ||
123 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64); | ||
124 | } | ||
125 | |||
126 | static int dma_is_idle(struct fsl_dma_chan *fsl_chan) | ||
127 | { | ||
128 | u32 sr = get_sr(fsl_chan); | ||
129 | return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); | ||
130 | } | ||
131 | |||
132 | static void dma_start(struct fsl_dma_chan *fsl_chan) | ||
133 | { | ||
134 | u32 mr_set = 0;; | ||
135 | |||
136 | if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { | ||
137 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32); | ||
138 | mr_set |= FSL_DMA_MR_EMP_EN; | ||
139 | } else | ||
140 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
141 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | ||
142 | & ~FSL_DMA_MR_EMP_EN, 32); | ||
143 | |||
144 | if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT) | ||
145 | mr_set |= FSL_DMA_MR_EMS_EN; | ||
146 | else | ||
147 | mr_set |= FSL_DMA_MR_CS; | ||
148 | |||
149 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
150 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | ||
151 | | mr_set, 32); | ||
152 | } | ||
153 | |||
154 | static void dma_halt(struct fsl_dma_chan *fsl_chan) | ||
155 | { | ||
156 | int i = 0; | ||
157 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
158 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA, | ||
159 | 32); | ||
160 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
161 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS | ||
162 | | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32); | ||
163 | |||
164 | while (!dma_is_idle(fsl_chan) && (i++ < 100)) | ||
165 | udelay(10); | ||
166 | if (i >= 100 && !dma_is_idle(fsl_chan)) | ||
167 | dev_err(fsl_chan->dev, "DMA halt timeout!\n"); | ||
168 | } | ||
169 | |||
170 | static void set_ld_eol(struct fsl_dma_chan *fsl_chan, | ||
171 | struct fsl_desc_sw *desc) | ||
172 | { | ||
173 | desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, | ||
174 | DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL, | ||
175 | 64); | ||
176 | } | ||
177 | |||
178 | static void append_ld_queue(struct fsl_dma_chan *fsl_chan, | ||
179 | struct fsl_desc_sw *new_desc) | ||
180 | { | ||
181 | struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev); | ||
182 | |||
183 | if (list_empty(&fsl_chan->ld_queue)) | ||
184 | return; | ||
185 | |||
186 | /* Link to the new descriptor physical address and | ||
187 | * Enable End-of-segment interrupt for | ||
188 | * the last link descriptor. | ||
189 | * (the previous node's next link descriptor) | ||
190 | * | ||
191 | * For FSL_DMA_IP_83xx, the snoop enable bit need be set. | ||
192 | */ | ||
193 | queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, | ||
194 | new_desc->async_tx.phys | FSL_DMA_EOSIE | | ||
195 | (((fsl_chan->feature & FSL_DMA_IP_MASK) | ||
196 | == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64); | ||
197 | } | ||
198 | |||
199 | /** | ||
200 | * fsl_chan_set_src_loop_size - Set source address hold transfer size | ||
201 | * @fsl_chan : Freescale DMA channel | ||
202 | * @size : Address loop size, 0 for disable loop | ||
203 | * | ||
204 | * The set source address hold transfer size. The source | ||
205 | * address hold or loop transfer size is when the DMA transfer | ||
206 | * data from source address (SA), if the loop size is 4, the DMA will | ||
207 | * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, | ||
208 | * SA + 1 ... and so on. | ||
209 | */ | ||
210 | static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size) | ||
211 | { | ||
212 | switch (size) { | ||
213 | case 0: | ||
214 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
215 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & | ||
216 | (~FSL_DMA_MR_SAHE), 32); | ||
217 | break; | ||
218 | case 1: | ||
219 | case 2: | ||
220 | case 4: | ||
221 | case 8: | ||
222 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
223 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | | ||
224 | FSL_DMA_MR_SAHE | (__ilog2(size) << 14), | ||
225 | 32); | ||
226 | break; | ||
227 | } | ||
228 | } | ||
229 | |||
230 | /** | ||
231 | * fsl_chan_set_dest_loop_size - Set destination address hold transfer size | ||
232 | * @fsl_chan : Freescale DMA channel | ||
233 | * @size : Address loop size, 0 for disable loop | ||
234 | * | ||
235 | * The set destination address hold transfer size. The destination | ||
236 | * address hold or loop transfer size is when the DMA transfer | ||
237 | * data to destination address (TA), if the loop size is 4, the DMA will | ||
238 | * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, | ||
239 | * TA + 1 ... and so on. | ||
240 | */ | ||
241 | static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) | ||
242 | { | ||
243 | switch (size) { | ||
244 | case 0: | ||
245 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
246 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & | ||
247 | (~FSL_DMA_MR_DAHE), 32); | ||
248 | break; | ||
249 | case 1: | ||
250 | case 2: | ||
251 | case 4: | ||
252 | case 8: | ||
253 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
254 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | | ||
255 | FSL_DMA_MR_DAHE | (__ilog2(size) << 16), | ||
256 | 32); | ||
257 | break; | ||
258 | } | ||
259 | } | ||
260 | |||
261 | /** | ||
262 | * fsl_chan_toggle_ext_pause - Toggle channel external pause status | ||
263 | * @fsl_chan : Freescale DMA channel | ||
264 | * @size : Pause control size, 0 for disable external pause control. | ||
265 | * The maximum is 1024. | ||
266 | * | ||
267 | * The Freescale DMA channel can be controlled by the external | ||
268 | * signal DREQ#. The pause control size is how many bytes are allowed | ||
269 | * to transfer before pausing the channel, after which a new assertion | ||
270 | * of DREQ# resumes channel operation. | ||
271 | */ | ||
272 | static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int size) | ||
273 | { | ||
274 | if (size > 1024) | ||
275 | return; | ||
276 | |||
277 | if (size) { | ||
278 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | ||
279 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | ||
280 | | ((__ilog2(size) << 24) & 0x0f000000), | ||
281 | 32); | ||
282 | fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; | ||
283 | } else | ||
284 | fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; | ||
285 | } | ||
286 | |||
287 | /** | ||
288 | * fsl_chan_toggle_ext_start - Toggle channel external start status | ||
289 | * @fsl_chan : Freescale DMA channel | ||
290 | * @enable : 0 is disabled, 1 is enabled. | ||
291 | * | ||
292 | * If enable the external start, the channel can be started by an | ||
293 | * external DMA start pin. So the dma_start() does not start the | ||
294 | * transfer immediately. The DMA channel will wait for the | ||
295 | * control pin asserted. | ||
296 | */ | ||
297 | static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) | ||
298 | { | ||
299 | if (enable) | ||
300 | fsl_chan->feature |= FSL_DMA_CHAN_START_EXT; | ||
301 | else | ||
302 | fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT; | ||
303 | } | ||
304 | |||
305 | static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
306 | { | ||
307 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); | ||
308 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); | ||
309 | unsigned long flags; | ||
310 | dma_cookie_t cookie; | ||
311 | |||
312 | /* cookie increment and adding to ld_queue must be atomic */ | ||
313 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
314 | |||
315 | cookie = fsl_chan->common.cookie; | ||
316 | cookie++; | ||
317 | if (cookie < 0) | ||
318 | cookie = 1; | ||
319 | desc->async_tx.cookie = cookie; | ||
320 | fsl_chan->common.cookie = desc->async_tx.cookie; | ||
321 | |||
322 | append_ld_queue(fsl_chan, desc); | ||
323 | list_splice_init(&desc->async_tx.tx_list, fsl_chan->ld_queue.prev); | ||
324 | |||
325 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
326 | |||
327 | return cookie; | ||
328 | } | ||
329 | |||
330 | /** | ||
331 | * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. | ||
332 | * @fsl_chan : Freescale DMA channel | ||
333 | * | ||
334 | * Return - The descriptor allocated. NULL for failed. | ||
335 | */ | ||
336 | static struct fsl_desc_sw *fsl_dma_alloc_descriptor( | ||
337 | struct fsl_dma_chan *fsl_chan) | ||
338 | { | ||
339 | dma_addr_t pdesc; | ||
340 | struct fsl_desc_sw *desc_sw; | ||
341 | |||
342 | desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); | ||
343 | if (desc_sw) { | ||
344 | memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); | ||
345 | dma_async_tx_descriptor_init(&desc_sw->async_tx, | ||
346 | &fsl_chan->common); | ||
347 | desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; | ||
348 | INIT_LIST_HEAD(&desc_sw->async_tx.tx_list); | ||
349 | desc_sw->async_tx.phys = pdesc; | ||
350 | } | ||
351 | |||
352 | return desc_sw; | ||
353 | } | ||
354 | |||
355 | |||
356 | /** | ||
357 | * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. | ||
358 | * @fsl_chan : Freescale DMA channel | ||
359 | * | ||
360 | * This function will create a dma pool for descriptor allocation. | ||
361 | * | ||
362 | * Return - The number of descriptors allocated. | ||
363 | */ | ||
364 | static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) | ||
365 | { | ||
366 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | ||
367 | LIST_HEAD(tmp_list); | ||
368 | |||
369 | /* We need the descriptor to be aligned to 32bytes | ||
370 | * for meeting FSL DMA specification requirement. | ||
371 | */ | ||
372 | fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", | ||
373 | fsl_chan->dev, sizeof(struct fsl_desc_sw), | ||
374 | 32, 0); | ||
375 | if (!fsl_chan->desc_pool) { | ||
376 | dev_err(fsl_chan->dev, "No memory for channel %d " | ||
377 | "descriptor dma pool.\n", fsl_chan->id); | ||
378 | return 0; | ||
379 | } | ||
380 | |||
381 | return 1; | ||
382 | } | ||
383 | |||
384 | /** | ||
385 | * fsl_dma_free_chan_resources - Free all resources of the channel. | ||
386 | * @fsl_chan : Freescale DMA channel | ||
387 | */ | ||
388 | static void fsl_dma_free_chan_resources(struct dma_chan *chan) | ||
389 | { | ||
390 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | ||
391 | struct fsl_desc_sw *desc, *_desc; | ||
392 | unsigned long flags; | ||
393 | |||
394 | dev_dbg(fsl_chan->dev, "Free all channel resources.\n"); | ||
395 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
396 | list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { | ||
397 | #ifdef FSL_DMA_LD_DEBUG | ||
398 | dev_dbg(fsl_chan->dev, | ||
399 | "LD %p will be released.\n", desc); | ||
400 | #endif | ||
401 | list_del(&desc->node); | ||
402 | /* free link descriptor */ | ||
403 | dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); | ||
404 | } | ||
405 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
406 | dma_pool_destroy(fsl_chan->desc_pool); | ||
407 | } | ||
408 | |||
409 | static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | ||
410 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | ||
411 | size_t len, unsigned long flags) | ||
412 | { | ||
413 | struct fsl_dma_chan *fsl_chan; | ||
414 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new; | ||
415 | size_t copy; | ||
416 | LIST_HEAD(link_chain); | ||
417 | |||
418 | if (!chan) | ||
419 | return NULL; | ||
420 | |||
421 | if (!len) | ||
422 | return NULL; | ||
423 | |||
424 | fsl_chan = to_fsl_chan(chan); | ||
425 | |||
426 | do { | ||
427 | |||
428 | /* Allocate the link descriptor from DMA pool */ | ||
429 | new = fsl_dma_alloc_descriptor(fsl_chan); | ||
430 | if (!new) { | ||
431 | dev_err(fsl_chan->dev, | ||
432 | "No free memory for link descriptor\n"); | ||
433 | return NULL; | ||
434 | } | ||
435 | #ifdef FSL_DMA_LD_DEBUG | ||
436 | dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); | ||
437 | #endif | ||
438 | |||
439 | copy = min(len, FSL_DMA_BCR_MAX_CNT); | ||
440 | |||
441 | set_desc_cnt(fsl_chan, &new->hw, copy); | ||
442 | set_desc_src(fsl_chan, &new->hw, dma_src); | ||
443 | set_desc_dest(fsl_chan, &new->hw, dma_dest); | ||
444 | |||
445 | if (!first) | ||
446 | first = new; | ||
447 | else | ||
448 | set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys); | ||
449 | |||
450 | new->async_tx.cookie = 0; | ||
451 | new->async_tx.ack = 1; | ||
452 | |||
453 | prev = new; | ||
454 | len -= copy; | ||
455 | dma_src += copy; | ||
456 | dma_dest += copy; | ||
457 | |||
458 | /* Insert the link descriptor to the LD ring */ | ||
459 | list_add_tail(&new->node, &first->async_tx.tx_list); | ||
460 | } while (len); | ||
461 | |||
462 | new->async_tx.ack = 0; /* client is in control of this ack */ | ||
463 | new->async_tx.cookie = -EBUSY; | ||
464 | |||
465 | /* Set End-of-link to the last link descriptor of new list*/ | ||
466 | set_ld_eol(fsl_chan, new); | ||
467 | |||
468 | return first ? &first->async_tx : NULL; | ||
469 | } | ||
470 | |||
471 | /** | ||
472 | * fsl_dma_update_completed_cookie - Update the completed cookie. | ||
473 | * @fsl_chan : Freescale DMA channel | ||
474 | */ | ||
475 | static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan) | ||
476 | { | ||
477 | struct fsl_desc_sw *cur_desc, *desc; | ||
478 | dma_addr_t ld_phy; | ||
479 | |||
480 | ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK; | ||
481 | |||
482 | if (ld_phy) { | ||
483 | cur_desc = NULL; | ||
484 | list_for_each_entry(desc, &fsl_chan->ld_queue, node) | ||
485 | if (desc->async_tx.phys == ld_phy) { | ||
486 | cur_desc = desc; | ||
487 | break; | ||
488 | } | ||
489 | |||
490 | if (cur_desc && cur_desc->async_tx.cookie) { | ||
491 | if (dma_is_idle(fsl_chan)) | ||
492 | fsl_chan->completed_cookie = | ||
493 | cur_desc->async_tx.cookie; | ||
494 | else | ||
495 | fsl_chan->completed_cookie = | ||
496 | cur_desc->async_tx.cookie - 1; | ||
497 | } | ||
498 | } | ||
499 | } | ||
500 | |||
501 | /** | ||
502 | * fsl_chan_ld_cleanup - Clean up link descriptors | ||
503 | * @fsl_chan : Freescale DMA channel | ||
504 | * | ||
505 | * This function clean up the ld_queue of DMA channel. | ||
506 | * If 'in_intr' is set, the function will move the link descriptor to | ||
507 | * the recycle list. Otherwise, free it directly. | ||
508 | */ | ||
509 | static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan) | ||
510 | { | ||
511 | struct fsl_desc_sw *desc, *_desc; | ||
512 | unsigned long flags; | ||
513 | |||
514 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
515 | |||
516 | fsl_dma_update_completed_cookie(fsl_chan); | ||
517 | dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n", | ||
518 | fsl_chan->completed_cookie); | ||
519 | list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { | ||
520 | dma_async_tx_callback callback; | ||
521 | void *callback_param; | ||
522 | |||
523 | if (dma_async_is_complete(desc->async_tx.cookie, | ||
524 | fsl_chan->completed_cookie, fsl_chan->common.cookie) | ||
525 | == DMA_IN_PROGRESS) | ||
526 | break; | ||
527 | |||
528 | callback = desc->async_tx.callback; | ||
529 | callback_param = desc->async_tx.callback_param; | ||
530 | |||
531 | /* Remove from ld_queue list */ | ||
532 | list_del(&desc->node); | ||
533 | |||
534 | dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n", | ||
535 | desc); | ||
536 | dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); | ||
537 | |||
538 | /* Run the link descriptor callback function */ | ||
539 | if (callback) { | ||
540 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
541 | dev_dbg(fsl_chan->dev, "link descriptor %p callback\n", | ||
542 | desc); | ||
543 | callback(callback_param); | ||
544 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
545 | } | ||
546 | } | ||
547 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
548 | } | ||
549 | |||
550 | /** | ||
551 | * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue. | ||
552 | * @fsl_chan : Freescale DMA channel | ||
553 | */ | ||
554 | static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) | ||
555 | { | ||
556 | struct list_head *ld_node; | ||
557 | dma_addr_t next_dest_addr; | ||
558 | unsigned long flags; | ||
559 | |||
560 | if (!dma_is_idle(fsl_chan)) | ||
561 | return; | ||
562 | |||
563 | dma_halt(fsl_chan); | ||
564 | |||
565 | /* If there are some link descriptors | ||
566 | * not transfered in queue. We need to start it. | ||
567 | */ | ||
568 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
569 | |||
570 | /* Find the first un-transfer desciptor */ | ||
571 | for (ld_node = fsl_chan->ld_queue.next; | ||
572 | (ld_node != &fsl_chan->ld_queue) | ||
573 | && (dma_async_is_complete( | ||
574 | to_fsl_desc(ld_node)->async_tx.cookie, | ||
575 | fsl_chan->completed_cookie, | ||
576 | fsl_chan->common.cookie) == DMA_SUCCESS); | ||
577 | ld_node = ld_node->next); | ||
578 | |||
579 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
580 | |||
581 | if (ld_node != &fsl_chan->ld_queue) { | ||
582 | /* Get the ld start address from ld_queue */ | ||
583 | next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; | ||
584 | dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%016llx\n", | ||
585 | (u64)next_dest_addr); | ||
586 | set_cdar(fsl_chan, next_dest_addr); | ||
587 | dma_start(fsl_chan); | ||
588 | } else { | ||
589 | set_cdar(fsl_chan, 0); | ||
590 | set_ndar(fsl_chan, 0); | ||
591 | } | ||
592 | } | ||
593 | |||
594 | /** | ||
595 | * fsl_dma_memcpy_issue_pending - Issue the DMA start command | ||
596 | * @fsl_chan : Freescale DMA channel | ||
597 | */ | ||
598 | static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan) | ||
599 | { | ||
600 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | ||
601 | |||
602 | #ifdef FSL_DMA_LD_DEBUG | ||
603 | struct fsl_desc_sw *ld; | ||
604 | unsigned long flags; | ||
605 | |||
606 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | ||
607 | if (list_empty(&fsl_chan->ld_queue)) { | ||
608 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
609 | return; | ||
610 | } | ||
611 | |||
612 | dev_dbg(fsl_chan->dev, "--memcpy issue--\n"); | ||
613 | list_for_each_entry(ld, &fsl_chan->ld_queue, node) { | ||
614 | int i; | ||
615 | dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n", | ||
616 | fsl_chan->id, ld->async_tx.phys); | ||
617 | for (i = 0; i < 8; i++) | ||
618 | dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n", | ||
619 | i, *(((u32 *)&ld->hw) + i)); | ||
620 | } | ||
621 | dev_dbg(fsl_chan->dev, "----------------\n"); | ||
622 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | ||
623 | #endif | ||
624 | |||
625 | fsl_chan_xfer_ld_queue(fsl_chan); | ||
626 | } | ||
627 | |||
628 | static void fsl_dma_dependency_added(struct dma_chan *chan) | ||
629 | { | ||
630 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | ||
631 | |||
632 | fsl_chan_ld_cleanup(fsl_chan); | ||
633 | } | ||
634 | |||
635 | /** | ||
636 | * fsl_dma_is_complete - Determine the DMA status | ||
637 | * @fsl_chan : Freescale DMA channel | ||
638 | */ | ||
639 | static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, | ||
640 | dma_cookie_t cookie, | ||
641 | dma_cookie_t *done, | ||
642 | dma_cookie_t *used) | ||
643 | { | ||
644 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | ||
645 | dma_cookie_t last_used; | ||
646 | dma_cookie_t last_complete; | ||
647 | |||
648 | fsl_chan_ld_cleanup(fsl_chan); | ||
649 | |||
650 | last_used = chan->cookie; | ||
651 | last_complete = fsl_chan->completed_cookie; | ||
652 | |||
653 | if (done) | ||
654 | *done = last_complete; | ||
655 | |||
656 | if (used) | ||
657 | *used = last_used; | ||
658 | |||
659 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
660 | } | ||
661 | |||
662 | static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) | ||
663 | { | ||
664 | struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; | ||
665 | dma_addr_t stat; | ||
666 | |||
667 | stat = get_sr(fsl_chan); | ||
668 | dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n", | ||
669 | fsl_chan->id, stat); | ||
670 | set_sr(fsl_chan, stat); /* Clear the event register */ | ||
671 | |||
672 | stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); | ||
673 | if (!stat) | ||
674 | return IRQ_NONE; | ||
675 | |||
676 | if (stat & FSL_DMA_SR_TE) | ||
677 | dev_err(fsl_chan->dev, "Transfer Error!\n"); | ||
678 | |||
679 | /* If the link descriptor segment transfer finishes, | ||
680 | * we will recycle the used descriptor. | ||
681 | */ | ||
682 | if (stat & FSL_DMA_SR_EOSI) { | ||
683 | dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); | ||
684 | dev_dbg(fsl_chan->dev, "event: clndar 0x%016llx, " | ||
685 | "nlndar 0x%016llx\n", (u64)get_cdar(fsl_chan), | ||
686 | (u64)get_ndar(fsl_chan)); | ||
687 | stat &= ~FSL_DMA_SR_EOSI; | ||
688 | } | ||
689 | |||
690 | /* If it current transfer is the end-of-transfer, | ||
691 | * we should clear the Channel Start bit for | ||
692 | * prepare next transfer. | ||
693 | */ | ||
694 | if (stat & (FSL_DMA_SR_EOLNI | FSL_DMA_SR_EOCDI)) { | ||
695 | dev_dbg(fsl_chan->dev, "event: End-of-link INT\n"); | ||
696 | stat &= ~FSL_DMA_SR_EOLNI; | ||
697 | fsl_chan_xfer_ld_queue(fsl_chan); | ||
698 | } | ||
699 | |||
700 | if (stat) | ||
701 | dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n", | ||
702 | stat); | ||
703 | |||
704 | dev_dbg(fsl_chan->dev, "event: Exit\n"); | ||
705 | tasklet_schedule(&fsl_chan->tasklet); | ||
706 | return IRQ_HANDLED; | ||
707 | } | ||
708 | |||
709 | static irqreturn_t fsl_dma_do_interrupt(int irq, void *data) | ||
710 | { | ||
711 | struct fsl_dma_device *fdev = (struct fsl_dma_device *)data; | ||
712 | u32 gsr; | ||
713 | int ch_nr; | ||
714 | |||
715 | gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base) | ||
716 | : in_le32(fdev->reg_base); | ||
717 | ch_nr = (32 - ffs(gsr)) / 8; | ||
718 | |||
719 | return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq, | ||
720 | fdev->chan[ch_nr]) : IRQ_NONE; | ||
721 | } | ||
722 | |||
723 | static void dma_do_tasklet(unsigned long data) | ||
724 | { | ||
725 | struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; | ||
726 | fsl_chan_ld_cleanup(fsl_chan); | ||
727 | } | ||
728 | |||
729 | static void fsl_dma_callback_test(struct fsl_dma_chan *fsl_chan) | ||
730 | { | ||
731 | if (fsl_chan) | ||
732 | dev_info(fsl_chan->dev, "selftest: callback is ok!\n"); | ||
733 | } | ||
734 | |||
735 | static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) | ||
736 | { | ||
737 | struct dma_chan *chan; | ||
738 | int err = 0; | ||
739 | dma_addr_t dma_dest, dma_src; | ||
740 | dma_cookie_t cookie; | ||
741 | u8 *src, *dest; | ||
742 | int i; | ||
743 | size_t test_size; | ||
744 | struct dma_async_tx_descriptor *tx1, *tx2, *tx3; | ||
745 | |||
746 | test_size = 4096; | ||
747 | |||
748 | src = kmalloc(test_size * 2, GFP_KERNEL); | ||
749 | if (!src) { | ||
750 | dev_err(fsl_chan->dev, | ||
751 | "selftest: Cannot alloc memory for test!\n"); | ||
752 | err = -ENOMEM; | ||
753 | goto out; | ||
754 | } | ||
755 | |||
756 | dest = src + test_size; | ||
757 | |||
758 | for (i = 0; i < test_size; i++) | ||
759 | src[i] = (u8) i; | ||
760 | |||
761 | chan = &fsl_chan->common; | ||
762 | |||
763 | if (fsl_dma_alloc_chan_resources(chan) < 1) { | ||
764 | dev_err(fsl_chan->dev, | ||
765 | "selftest: Cannot alloc resources for DMA\n"); | ||
766 | err = -ENODEV; | ||
767 | goto out; | ||
768 | } | ||
769 | |||
770 | /* TX 1 */ | ||
771 | dma_src = dma_map_single(fsl_chan->dev, src, test_size / 2, | ||
772 | DMA_TO_DEVICE); | ||
773 | dma_dest = dma_map_single(fsl_chan->dev, dest, test_size / 2, | ||
774 | DMA_FROM_DEVICE); | ||
775 | tx1 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 2, 0); | ||
776 | async_tx_ack(tx1); | ||
777 | |||
778 | cookie = fsl_dma_tx_submit(tx1); | ||
779 | fsl_dma_memcpy_issue_pending(chan); | ||
780 | msleep(2); | ||
781 | |||
782 | if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) { | ||
783 | dev_err(fsl_chan->dev, "selftest: Time out!\n"); | ||
784 | err = -ENODEV; | ||
785 | goto out; | ||
786 | } | ||
787 | |||
788 | /* Test free and re-alloc channel resources */ | ||
789 | fsl_dma_free_chan_resources(chan); | ||
790 | |||
791 | if (fsl_dma_alloc_chan_resources(chan) < 1) { | ||
792 | dev_err(fsl_chan->dev, | ||
793 | "selftest: Cannot alloc resources for DMA\n"); | ||
794 | err = -ENODEV; | ||
795 | goto free_resources; | ||
796 | } | ||
797 | |||
798 | /* Continue to test | ||
799 | * TX 2 | ||
800 | */ | ||
801 | dma_src = dma_map_single(fsl_chan->dev, src + test_size / 2, | ||
802 | test_size / 4, DMA_TO_DEVICE); | ||
803 | dma_dest = dma_map_single(fsl_chan->dev, dest + test_size / 2, | ||
804 | test_size / 4, DMA_FROM_DEVICE); | ||
805 | tx2 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0); | ||
806 | async_tx_ack(tx2); | ||
807 | |||
808 | /* TX 3 */ | ||
809 | dma_src = dma_map_single(fsl_chan->dev, src + test_size * 3 / 4, | ||
810 | test_size / 4, DMA_TO_DEVICE); | ||
811 | dma_dest = dma_map_single(fsl_chan->dev, dest + test_size * 3 / 4, | ||
812 | test_size / 4, DMA_FROM_DEVICE); | ||
813 | tx3 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0); | ||
814 | async_tx_ack(tx3); | ||
815 | |||
816 | /* Test exchanging the prepared tx sort */ | ||
817 | cookie = fsl_dma_tx_submit(tx3); | ||
818 | cookie = fsl_dma_tx_submit(tx2); | ||
819 | |||
820 | #ifdef FSL_DMA_CALLBACKTEST | ||
821 | if (dma_has_cap(DMA_INTERRUPT, ((struct fsl_dma_device *) | ||
822 | dev_get_drvdata(fsl_chan->dev->parent))->common.cap_mask)) { | ||
823 | tx3->callback = fsl_dma_callback_test; | ||
824 | tx3->callback_param = fsl_chan; | ||
825 | } | ||
826 | #endif | ||
827 | fsl_dma_memcpy_issue_pending(chan); | ||
828 | msleep(2); | ||
829 | |||
830 | if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) { | ||
831 | dev_err(fsl_chan->dev, "selftest: Time out!\n"); | ||
832 | err = -ENODEV; | ||
833 | goto free_resources; | ||
834 | } | ||
835 | |||
836 | err = memcmp(src, dest, test_size); | ||
837 | if (err) { | ||
838 | for (i = 0; (*(src + i) == *(dest + i)) && (i < test_size); | ||
839 | i++); | ||
840 | dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%d is " | ||
841 | "error! src 0x%x, dest 0x%x\n", | ||
842 | i, test_size, *(src + i), *(dest + i)); | ||
843 | } | ||
844 | |||
845 | free_resources: | ||
846 | fsl_dma_free_chan_resources(chan); | ||
847 | out: | ||
848 | kfree(src); | ||
849 | return err; | ||
850 | } | ||
851 | |||
852 | static int __devinit of_fsl_dma_chan_probe(struct of_device *dev, | ||
853 | const struct of_device_id *match) | ||
854 | { | ||
855 | struct fsl_dma_device *fdev; | ||
856 | struct fsl_dma_chan *new_fsl_chan; | ||
857 | int err; | ||
858 | |||
859 | fdev = dev_get_drvdata(dev->dev.parent); | ||
860 | BUG_ON(!fdev); | ||
861 | |||
862 | /* alloc channel */ | ||
863 | new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL); | ||
864 | if (!new_fsl_chan) { | ||
865 | dev_err(&dev->dev, "No free memory for allocating " | ||
866 | "dma channels!\n"); | ||
867 | err = -ENOMEM; | ||
868 | goto err; | ||
869 | } | ||
870 | |||
871 | /* get dma channel register base */ | ||
872 | err = of_address_to_resource(dev->node, 0, &new_fsl_chan->reg); | ||
873 | if (err) { | ||
874 | dev_err(&dev->dev, "Can't get %s property 'reg'\n", | ||
875 | dev->node->full_name); | ||
876 | goto err; | ||
877 | } | ||
878 | |||
879 | new_fsl_chan->feature = *(u32 *)match->data; | ||
880 | |||
881 | if (!fdev->feature) | ||
882 | fdev->feature = new_fsl_chan->feature; | ||
883 | |||
884 | /* If the DMA device's feature is different than its channels', | ||
885 | * report the bug. | ||
886 | */ | ||
887 | WARN_ON(fdev->feature != new_fsl_chan->feature); | ||
888 | |||
889 | new_fsl_chan->dev = &dev->dev; | ||
890 | new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, | ||
891 | new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); | ||
892 | |||
893 | new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; | ||
894 | if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) { | ||
895 | dev_err(&dev->dev, "There is no %d channel!\n", | ||
896 | new_fsl_chan->id); | ||
897 | err = -EINVAL; | ||
898 | goto err; | ||
899 | } | ||
900 | fdev->chan[new_fsl_chan->id] = new_fsl_chan; | ||
901 | tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet, | ||
902 | (unsigned long)new_fsl_chan); | ||
903 | |||
904 | /* Init the channel */ | ||
905 | dma_init(new_fsl_chan); | ||
906 | |||
907 | /* Clear cdar registers */ | ||
908 | set_cdar(new_fsl_chan, 0); | ||
909 | |||
910 | switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) { | ||
911 | case FSL_DMA_IP_85XX: | ||
912 | new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; | ||
913 | new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; | ||
914 | case FSL_DMA_IP_83XX: | ||
915 | new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; | ||
916 | new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; | ||
917 | } | ||
918 | |||
919 | spin_lock_init(&new_fsl_chan->desc_lock); | ||
920 | INIT_LIST_HEAD(&new_fsl_chan->ld_queue); | ||
921 | |||
922 | new_fsl_chan->common.device = &fdev->common; | ||
923 | |||
924 | /* Add the channel to DMA device channel list */ | ||
925 | list_add_tail(&new_fsl_chan->common.device_node, | ||
926 | &fdev->common.channels); | ||
927 | fdev->common.chancnt++; | ||
928 | |||
929 | new_fsl_chan->irq = irq_of_parse_and_map(dev->node, 0); | ||
930 | if (new_fsl_chan->irq != NO_IRQ) { | ||
931 | err = request_irq(new_fsl_chan->irq, | ||
932 | &fsl_dma_chan_do_interrupt, IRQF_SHARED, | ||
933 | "fsldma-channel", new_fsl_chan); | ||
934 | if (err) { | ||
935 | dev_err(&dev->dev, "DMA channel %s request_irq error " | ||
936 | "with return %d\n", dev->node->full_name, err); | ||
937 | goto err; | ||
938 | } | ||
939 | } | ||
940 | |||
941 | #ifdef CONFIG_FSL_DMA_SELFTEST | ||
942 | err = fsl_dma_self_test(new_fsl_chan); | ||
943 | if (err) | ||
944 | goto err; | ||
945 | #endif | ||
946 | |||
947 | dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, | ||
948 | match->compatible, new_fsl_chan->irq); | ||
949 | |||
950 | return 0; | ||
951 | err: | ||
952 | dma_halt(new_fsl_chan); | ||
953 | iounmap(new_fsl_chan->reg_base); | ||
954 | free_irq(new_fsl_chan->irq, new_fsl_chan); | ||
955 | list_del(&new_fsl_chan->common.device_node); | ||
956 | kfree(new_fsl_chan); | ||
957 | return err; | ||
958 | } | ||
959 | |||
960 | const u32 mpc8540_dma_ip_feature = FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN; | ||
961 | const u32 mpc8349_dma_ip_feature = FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN; | ||
962 | |||
963 | static struct of_device_id of_fsl_dma_chan_ids[] = { | ||
964 | { | ||
965 | .compatible = "fsl,mpc8540-dma-channel", | ||
966 | .data = (void *)&mpc8540_dma_ip_feature, | ||
967 | }, | ||
968 | { | ||
969 | .compatible = "fsl,mpc8349-dma-channel", | ||
970 | .data = (void *)&mpc8349_dma_ip_feature, | ||
971 | }, | ||
972 | {} | ||
973 | }; | ||
974 | |||
975 | static struct of_platform_driver of_fsl_dma_chan_driver = { | ||
976 | .name = "of-fsl-dma-channel", | ||
977 | .match_table = of_fsl_dma_chan_ids, | ||
978 | .probe = of_fsl_dma_chan_probe, | ||
979 | }; | ||
980 | |||
981 | static __init int of_fsl_dma_chan_init(void) | ||
982 | { | ||
983 | return of_register_platform_driver(&of_fsl_dma_chan_driver); | ||
984 | } | ||
985 | |||
986 | static int __devinit of_fsl_dma_probe(struct of_device *dev, | ||
987 | const struct of_device_id *match) | ||
988 | { | ||
989 | int err; | ||
990 | unsigned int irq; | ||
991 | struct fsl_dma_device *fdev; | ||
992 | |||
993 | fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); | ||
994 | if (!fdev) { | ||
995 | dev_err(&dev->dev, "No enough memory for 'priv'\n"); | ||
996 | err = -ENOMEM; | ||
997 | goto err; | ||
998 | } | ||
999 | fdev->dev = &dev->dev; | ||
1000 | INIT_LIST_HEAD(&fdev->common.channels); | ||
1001 | |||
1002 | /* get DMA controller register base */ | ||
1003 | err = of_address_to_resource(dev->node, 0, &fdev->reg); | ||
1004 | if (err) { | ||
1005 | dev_err(&dev->dev, "Can't get %s property 'reg'\n", | ||
1006 | dev->node->full_name); | ||
1007 | goto err; | ||
1008 | } | ||
1009 | |||
1010 | dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " | ||
1011 | "controller at 0x%08x...\n", | ||
1012 | match->compatible, fdev->reg.start); | ||
1013 | fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end | ||
1014 | - fdev->reg.start + 1); | ||
1015 | |||
1016 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); | ||
1017 | dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); | ||
1018 | fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; | ||
1019 | fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; | ||
1020 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; | ||
1021 | fdev->common.device_is_tx_complete = fsl_dma_is_complete; | ||
1022 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; | ||
1023 | fdev->common.device_dependency_added = fsl_dma_dependency_added; | ||
1024 | fdev->common.dev = &dev->dev; | ||
1025 | |||
1026 | irq = irq_of_parse_and_map(dev->node, 0); | ||
1027 | if (irq != NO_IRQ) { | ||
1028 | err = request_irq(irq, &fsl_dma_do_interrupt, IRQF_SHARED, | ||
1029 | "fsldma-device", fdev); | ||
1030 | if (err) { | ||
1031 | dev_err(&dev->dev, "DMA device request_irq error " | ||
1032 | "with return %d\n", err); | ||
1033 | goto err; | ||
1034 | } | ||
1035 | } | ||
1036 | |||
1037 | dev_set_drvdata(&(dev->dev), fdev); | ||
1038 | of_platform_bus_probe(dev->node, of_fsl_dma_chan_ids, &dev->dev); | ||
1039 | |||
1040 | dma_async_device_register(&fdev->common); | ||
1041 | return 0; | ||
1042 | |||
1043 | err: | ||
1044 | iounmap(fdev->reg_base); | ||
1045 | kfree(fdev); | ||
1046 | return err; | ||
1047 | } | ||
1048 | |||
1049 | static struct of_device_id of_fsl_dma_ids[] = { | ||
1050 | { .compatible = "fsl,mpc8540-dma", }, | ||
1051 | { .compatible = "fsl,mpc8349-dma", }, | ||
1052 | {} | ||
1053 | }; | ||
1054 | |||
1055 | static struct of_platform_driver of_fsl_dma_driver = { | ||
1056 | .name = "of-fsl-dma", | ||
1057 | .match_table = of_fsl_dma_ids, | ||
1058 | .probe = of_fsl_dma_probe, | ||
1059 | }; | ||
1060 | |||
1061 | static __init int of_fsl_dma_init(void) | ||
1062 | { | ||
1063 | return of_register_platform_driver(&of_fsl_dma_driver); | ||
1064 | } | ||
1065 | |||
1066 | subsys_initcall(of_fsl_dma_chan_init); | ||
1067 | subsys_initcall(of_fsl_dma_init); | ||
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h new file mode 100644 index 000000000000..ba78c42121ba --- /dev/null +++ b/drivers/dma/fsldma.h | |||
@@ -0,0 +1,189 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | ||
3 | * | ||
4 | * Author: | ||
5 | * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 | ||
6 | * Ebony Zhu <ebony.zhu@freescale.com>, May 2007 | ||
7 | * | ||
8 | * This is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | */ | ||
14 | #ifndef __DMA_FSLDMA_H | ||
15 | #define __DMA_FSLDMA_H | ||
16 | |||
17 | #include <linux/device.h> | ||
18 | #include <linux/dmapool.h> | ||
19 | #include <linux/dmaengine.h> | ||
20 | |||
21 | /* Define data structures needed by Freescale | ||
22 | * MPC8540 and MPC8349 DMA controller. | ||
23 | */ | ||
24 | #define FSL_DMA_MR_CS 0x00000001 | ||
25 | #define FSL_DMA_MR_CC 0x00000002 | ||
26 | #define FSL_DMA_MR_CA 0x00000008 | ||
27 | #define FSL_DMA_MR_EIE 0x00000040 | ||
28 | #define FSL_DMA_MR_XFE 0x00000020 | ||
29 | #define FSL_DMA_MR_EOLNIE 0x00000100 | ||
30 | #define FSL_DMA_MR_EOLSIE 0x00000080 | ||
31 | #define FSL_DMA_MR_EOSIE 0x00000200 | ||
32 | #define FSL_DMA_MR_CDSM 0x00000010 | ||
33 | #define FSL_DMA_MR_CTM 0x00000004 | ||
34 | #define FSL_DMA_MR_EMP_EN 0x00200000 | ||
35 | #define FSL_DMA_MR_EMS_EN 0x00040000 | ||
36 | #define FSL_DMA_MR_DAHE 0x00002000 | ||
37 | #define FSL_DMA_MR_SAHE 0x00001000 | ||
38 | |||
39 | /* Special MR definition for MPC8349 */ | ||
40 | #define FSL_DMA_MR_EOTIE 0x00000080 | ||
41 | |||
42 | #define FSL_DMA_SR_CH 0x00000020 | ||
43 | #define FSL_DMA_SR_CB 0x00000004 | ||
44 | #define FSL_DMA_SR_TE 0x00000080 | ||
45 | #define FSL_DMA_SR_EOSI 0x00000002 | ||
46 | #define FSL_DMA_SR_EOLSI 0x00000001 | ||
47 | #define FSL_DMA_SR_EOCDI 0x00000001 | ||
48 | #define FSL_DMA_SR_EOLNI 0x00000008 | ||
49 | |||
50 | #define FSL_DMA_SATR_SBPATMU 0x20000000 | ||
51 | #define FSL_DMA_SATR_STRANSINT_RIO 0x00c00000 | ||
52 | #define FSL_DMA_SATR_SREADTYPE_SNOOP_READ 0x00050000 | ||
53 | #define FSL_DMA_SATR_SREADTYPE_BP_IORH 0x00020000 | ||
54 | #define FSL_DMA_SATR_SREADTYPE_BP_NREAD 0x00040000 | ||
55 | #define FSL_DMA_SATR_SREADTYPE_BP_MREAD 0x00070000 | ||
56 | |||
57 | #define FSL_DMA_DATR_DBPATMU 0x20000000 | ||
58 | #define FSL_DMA_DATR_DTRANSINT_RIO 0x00c00000 | ||
59 | #define FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE 0x00050000 | ||
60 | #define FSL_DMA_DATR_DWRITETYPE_BP_FLUSH 0x00010000 | ||
61 | |||
62 | #define FSL_DMA_EOL ((u64)0x1) | ||
63 | #define FSL_DMA_SNEN ((u64)0x10) | ||
64 | #define FSL_DMA_EOSIE 0x8 | ||
65 | #define FSL_DMA_NLDA_MASK (~(u64)0x1f) | ||
66 | |||
67 | #define FSL_DMA_BCR_MAX_CNT 0x03ffffffu | ||
68 | |||
69 | #define FSL_DMA_DGSR_TE 0x80 | ||
70 | #define FSL_DMA_DGSR_CH 0x20 | ||
71 | #define FSL_DMA_DGSR_PE 0x10 | ||
72 | #define FSL_DMA_DGSR_EOLNI 0x08 | ||
73 | #define FSL_DMA_DGSR_CB 0x04 | ||
74 | #define FSL_DMA_DGSR_EOSI 0x02 | ||
75 | #define FSL_DMA_DGSR_EOLSI 0x01 | ||
76 | |||
77 | struct fsl_dma_ld_hw { | ||
78 | u64 __bitwise src_addr; | ||
79 | u64 __bitwise dst_addr; | ||
80 | u64 __bitwise next_ln_addr; | ||
81 | u32 __bitwise count; | ||
82 | u32 __bitwise reserve; | ||
83 | } __attribute__((aligned(32))); | ||
84 | |||
85 | struct fsl_desc_sw { | ||
86 | struct fsl_dma_ld_hw hw; | ||
87 | struct list_head node; | ||
88 | struct dma_async_tx_descriptor async_tx; | ||
89 | struct list_head *ld; | ||
90 | void *priv; | ||
91 | } __attribute__((aligned(32))); | ||
92 | |||
93 | struct fsl_dma_chan_regs { | ||
94 | u32 __bitwise mr; /* 0x00 - Mode Register */ | ||
95 | u32 __bitwise sr; /* 0x04 - Status Register */ | ||
96 | u64 __bitwise cdar; /* 0x08 - Current descriptor address register */ | ||
97 | u64 __bitwise sar; /* 0x10 - Source Address Register */ | ||
98 | u64 __bitwise dar; /* 0x18 - Destination Address Register */ | ||
99 | u32 __bitwise bcr; /* 0x20 - Byte Count Register */ | ||
100 | u64 __bitwise ndar; /* 0x24 - Next Descriptor Address Register */ | ||
101 | }; | ||
102 | |||
103 | struct fsl_dma_chan; | ||
104 | #define FSL_DMA_MAX_CHANS_PER_DEVICE 4 | ||
105 | |||
106 | struct fsl_dma_device { | ||
107 | void __iomem *reg_base; /* DGSR register base */ | ||
108 | struct resource reg; /* Resource for register */ | ||
109 | struct device *dev; | ||
110 | struct dma_device common; | ||
111 | struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE]; | ||
112 | u32 feature; /* The same as DMA channels */ | ||
113 | }; | ||
114 | |||
115 | /* Define macros for fsl_dma_chan->feature property */ | ||
116 | #define FSL_DMA_LITTLE_ENDIAN 0x00000000 | ||
117 | #define FSL_DMA_BIG_ENDIAN 0x00000001 | ||
118 | |||
119 | #define FSL_DMA_IP_MASK 0x00000ff0 | ||
120 | #define FSL_DMA_IP_85XX 0x00000010 | ||
121 | #define FSL_DMA_IP_83XX 0x00000020 | ||
122 | |||
123 | #define FSL_DMA_CHAN_PAUSE_EXT 0x00001000 | ||
124 | #define FSL_DMA_CHAN_START_EXT 0x00002000 | ||
125 | |||
126 | struct fsl_dma_chan { | ||
127 | struct fsl_dma_chan_regs __iomem *reg_base; | ||
128 | dma_cookie_t completed_cookie; /* The maximum cookie completed */ | ||
129 | spinlock_t desc_lock; /* Descriptor operation lock */ | ||
130 | struct list_head ld_queue; /* Link descriptors queue */ | ||
131 | struct dma_chan common; /* DMA common channel */ | ||
132 | struct dma_pool *desc_pool; /* Descriptors pool */ | ||
133 | struct device *dev; /* Channel device */ | ||
134 | struct resource reg; /* Resource for register */ | ||
135 | int irq; /* Channel IRQ */ | ||
136 | int id; /* Raw id of this channel */ | ||
137 | struct tasklet_struct tasklet; | ||
138 | u32 feature; | ||
139 | |||
140 | void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int size); | ||
141 | void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable); | ||
142 | void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size); | ||
143 | void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size); | ||
144 | }; | ||
145 | |||
146 | #define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common) | ||
147 | #define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node) | ||
148 | #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx) | ||
149 | |||
150 | #ifndef __powerpc64__ | ||
151 | static u64 in_be64(const u64 __iomem *addr) | ||
152 | { | ||
153 | return ((u64)in_be32((u32 *)addr) << 32) | (in_be32((u32 *)addr + 1)); | ||
154 | } | ||
155 | |||
156 | static void out_be64(u64 __iomem *addr, u64 val) | ||
157 | { | ||
158 | out_be32((u32 *)addr, val >> 32); | ||
159 | out_be32((u32 *)addr + 1, (u32)val); | ||
160 | } | ||
161 | |||
162 | /* There is no asm instructions for 64 bits reverse loads and stores */ | ||
163 | static u64 in_le64(const u64 __iomem *addr) | ||
164 | { | ||
165 | return ((u64)in_le32((u32 *)addr + 1) << 32) | (in_le32((u32 *)addr)); | ||
166 | } | ||
167 | |||
168 | static void out_le64(u64 __iomem *addr, u64 val) | ||
169 | { | ||
170 | out_le32((u32 *)addr + 1, val >> 32); | ||
171 | out_le32((u32 *)addr, (u32)val); | ||
172 | } | ||
173 | #endif | ||
174 | |||
175 | #define DMA_IN(fsl_chan, addr, width) \ | ||
176 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ | ||
177 | in_be##width(addr) : in_le##width(addr)) | ||
178 | #define DMA_OUT(fsl_chan, addr, val, width) \ | ||
179 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ | ||
180 | out_be##width(addr, val) : out_le##width(addr, val)) | ||
181 | |||
182 | #define DMA_TO_CPU(fsl_chan, d, width) \ | ||
183 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ | ||
184 | be##width##_to_cpu(d) : le##width##_to_cpu(d)) | ||
185 | #define CPU_TO_DMA(fsl_chan, c, width) \ | ||
186 | (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \ | ||
187 | cpu_to_be##width(c) : cpu_to_le##width(c)) | ||
188 | |||
189 | #endif /* __DMA_FSLDMA_H */ | ||
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index dff38accc5c1..4017d9e7acd2 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c | |||
@@ -714,6 +714,7 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( | |||
714 | new->len = len; | 714 | new->len = len; |
715 | new->dst = dma_dest; | 715 | new->dst = dma_dest; |
716 | new->src = dma_src; | 716 | new->src = dma_src; |
717 | new->async_tx.ack = 0; | ||
717 | return &new->async_tx; | 718 | return &new->async_tx; |
718 | } else | 719 | } else |
719 | return NULL; | 720 | return NULL; |
@@ -741,6 +742,7 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( | |||
741 | new->len = len; | 742 | new->len = len; |
742 | new->dst = dma_dest; | 743 | new->dst = dma_dest; |
743 | new->src = dma_src; | 744 | new->src = dma_src; |
745 | new->async_tx.ack = 0; | ||
744 | return &new->async_tx; | 746 | return &new->async_tx; |
745 | } else | 747 | } else |
746 | return NULL; | 748 | return NULL; |
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c index 3e9719948a8e..a03462750b95 100644 --- a/drivers/firewire/fw-card.c +++ b/drivers/firewire/fw-card.c | |||
@@ -18,6 +18,7 @@ | |||
18 | 18 | ||
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
21 | #include <linux/delay.h> | ||
21 | #include <linux/device.h> | 22 | #include <linux/device.h> |
22 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
23 | #include <linux/crc-itu-t.h> | 24 | #include <linux/crc-itu-t.h> |
@@ -214,17 +215,29 @@ static void | |||
214 | fw_card_bm_work(struct work_struct *work) | 215 | fw_card_bm_work(struct work_struct *work) |
215 | { | 216 | { |
216 | struct fw_card *card = container_of(work, struct fw_card, work.work); | 217 | struct fw_card *card = container_of(work, struct fw_card, work.work); |
217 | struct fw_device *root; | 218 | struct fw_device *root_device; |
219 | struct fw_node *root_node, *local_node; | ||
218 | struct bm_data bmd; | 220 | struct bm_data bmd; |
219 | unsigned long flags; | 221 | unsigned long flags; |
220 | int root_id, new_root_id, irm_id, gap_count, generation, grace; | 222 | int root_id, new_root_id, irm_id, gap_count, generation, grace; |
221 | int do_reset = 0; | 223 | int do_reset = 0; |
222 | 224 | ||
223 | spin_lock_irqsave(&card->lock, flags); | 225 | spin_lock_irqsave(&card->lock, flags); |
226 | local_node = card->local_node; | ||
227 | root_node = card->root_node; | ||
228 | |||
229 | if (local_node == NULL) { | ||
230 | spin_unlock_irqrestore(&card->lock, flags); | ||
231 | return; | ||
232 | } | ||
233 | fw_node_get(local_node); | ||
234 | fw_node_get(root_node); | ||
224 | 235 | ||
225 | generation = card->generation; | 236 | generation = card->generation; |
226 | root = card->root_node->data; | 237 | root_device = root_node->data; |
227 | root_id = card->root_node->node_id; | 238 | if (root_device) |
239 | fw_device_get(root_device); | ||
240 | root_id = root_node->node_id; | ||
228 | grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10)); | 241 | grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10)); |
229 | 242 | ||
230 | if (card->bm_generation + 1 == generation || | 243 | if (card->bm_generation + 1 == generation || |
@@ -243,14 +256,14 @@ fw_card_bm_work(struct work_struct *work) | |||
243 | 256 | ||
244 | irm_id = card->irm_node->node_id; | 257 | irm_id = card->irm_node->node_id; |
245 | if (!card->irm_node->link_on) { | 258 | if (!card->irm_node->link_on) { |
246 | new_root_id = card->local_node->node_id; | 259 | new_root_id = local_node->node_id; |
247 | fw_notify("IRM has link off, making local node (%02x) root.\n", | 260 | fw_notify("IRM has link off, making local node (%02x) root.\n", |
248 | new_root_id); | 261 | new_root_id); |
249 | goto pick_me; | 262 | goto pick_me; |
250 | } | 263 | } |
251 | 264 | ||
252 | bmd.lock.arg = cpu_to_be32(0x3f); | 265 | bmd.lock.arg = cpu_to_be32(0x3f); |
253 | bmd.lock.data = cpu_to_be32(card->local_node->node_id); | 266 | bmd.lock.data = cpu_to_be32(local_node->node_id); |
254 | 267 | ||
255 | spin_unlock_irqrestore(&card->lock, flags); | 268 | spin_unlock_irqrestore(&card->lock, flags); |
256 | 269 | ||
@@ -267,12 +280,12 @@ fw_card_bm_work(struct work_struct *work) | |||
267 | * Another bus reset happened. Just return, | 280 | * Another bus reset happened. Just return, |
268 | * the BM work has been rescheduled. | 281 | * the BM work has been rescheduled. |
269 | */ | 282 | */ |
270 | return; | 283 | goto out; |
271 | } | 284 | } |
272 | 285 | ||
273 | if (bmd.rcode == RCODE_COMPLETE && bmd.old != 0x3f) | 286 | if (bmd.rcode == RCODE_COMPLETE && bmd.old != 0x3f) |
274 | /* Somebody else is BM, let them do the work. */ | 287 | /* Somebody else is BM, let them do the work. */ |
275 | return; | 288 | goto out; |
276 | 289 | ||
277 | spin_lock_irqsave(&card->lock, flags); | 290 | spin_lock_irqsave(&card->lock, flags); |
278 | if (bmd.rcode != RCODE_COMPLETE) { | 291 | if (bmd.rcode != RCODE_COMPLETE) { |
@@ -282,7 +295,7 @@ fw_card_bm_work(struct work_struct *work) | |||
282 | * do a bus reset and pick the local node as | 295 | * do a bus reset and pick the local node as |
283 | * root, and thus, IRM. | 296 | * root, and thus, IRM. |
284 | */ | 297 | */ |
285 | new_root_id = card->local_node->node_id; | 298 | new_root_id = local_node->node_id; |
286 | fw_notify("BM lock failed, making local node (%02x) root.\n", | 299 | fw_notify("BM lock failed, making local node (%02x) root.\n", |
287 | new_root_id); | 300 | new_root_id); |
288 | goto pick_me; | 301 | goto pick_me; |
@@ -295,7 +308,7 @@ fw_card_bm_work(struct work_struct *work) | |||
295 | */ | 308 | */ |
296 | spin_unlock_irqrestore(&card->lock, flags); | 309 | spin_unlock_irqrestore(&card->lock, flags); |
297 | schedule_delayed_work(&card->work, DIV_ROUND_UP(HZ, 10)); | 310 | schedule_delayed_work(&card->work, DIV_ROUND_UP(HZ, 10)); |
298 | return; | 311 | goto out; |
299 | } | 312 | } |
300 | 313 | ||
301 | /* | 314 | /* |
@@ -305,20 +318,20 @@ fw_card_bm_work(struct work_struct *work) | |||
305 | */ | 318 | */ |
306 | card->bm_generation = generation; | 319 | card->bm_generation = generation; |
307 | 320 | ||
308 | if (root == NULL) { | 321 | if (root_device == NULL) { |
309 | /* | 322 | /* |
310 | * Either link_on is false, or we failed to read the | 323 | * Either link_on is false, or we failed to read the |
311 | * config rom. In either case, pick another root. | 324 | * config rom. In either case, pick another root. |
312 | */ | 325 | */ |
313 | new_root_id = card->local_node->node_id; | 326 | new_root_id = local_node->node_id; |
314 | } else if (atomic_read(&root->state) != FW_DEVICE_RUNNING) { | 327 | } else if (atomic_read(&root_device->state) != FW_DEVICE_RUNNING) { |
315 | /* | 328 | /* |
316 | * If we haven't probed this device yet, bail out now | 329 | * If we haven't probed this device yet, bail out now |
317 | * and let's try again once that's done. | 330 | * and let's try again once that's done. |
318 | */ | 331 | */ |
319 | spin_unlock_irqrestore(&card->lock, flags); | 332 | spin_unlock_irqrestore(&card->lock, flags); |
320 | return; | 333 | goto out; |
321 | } else if (root->config_rom[2] & BIB_CMC) { | 334 | } else if (root_device->config_rom[2] & BIB_CMC) { |
322 | /* | 335 | /* |
323 | * FIXME: I suppose we should set the cmstr bit in the | 336 | * FIXME: I suppose we should set the cmstr bit in the |
324 | * STATE_CLEAR register of this node, as described in | 337 | * STATE_CLEAR register of this node, as described in |
@@ -332,7 +345,7 @@ fw_card_bm_work(struct work_struct *work) | |||
332 | * successfully read the config rom, but it's not | 345 | * successfully read the config rom, but it's not |
333 | * cycle master capable. | 346 | * cycle master capable. |
334 | */ | 347 | */ |
335 | new_root_id = card->local_node->node_id; | 348 | new_root_id = local_node->node_id; |
336 | } | 349 | } |
337 | 350 | ||
338 | pick_me: | 351 | pick_me: |
@@ -341,8 +354,8 @@ fw_card_bm_work(struct work_struct *work) | |||
341 | * the typically much larger 1394b beta repeater delays though. | 354 | * the typically much larger 1394b beta repeater delays though. |
342 | */ | 355 | */ |
343 | if (!card->beta_repeaters_present && | 356 | if (!card->beta_repeaters_present && |
344 | card->root_node->max_hops < ARRAY_SIZE(gap_count_table)) | 357 | root_node->max_hops < ARRAY_SIZE(gap_count_table)) |
345 | gap_count = gap_count_table[card->root_node->max_hops]; | 358 | gap_count = gap_count_table[root_node->max_hops]; |
346 | else | 359 | else |
347 | gap_count = 63; | 360 | gap_count = 63; |
348 | 361 | ||
@@ -364,6 +377,11 @@ fw_card_bm_work(struct work_struct *work) | |||
364 | fw_send_phy_config(card, new_root_id, generation, gap_count); | 377 | fw_send_phy_config(card, new_root_id, generation, gap_count); |
365 | fw_core_initiate_bus_reset(card, 1); | 378 | fw_core_initiate_bus_reset(card, 1); |
366 | } | 379 | } |
380 | out: | ||
381 | if (root_device) | ||
382 | fw_device_put(root_device); | ||
383 | fw_node_put(root_node); | ||
384 | fw_node_put(local_node); | ||
367 | } | 385 | } |
368 | 386 | ||
369 | static void | 387 | static void |
@@ -381,6 +399,7 @@ fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver, | |||
381 | static atomic_t index = ATOMIC_INIT(-1); | 399 | static atomic_t index = ATOMIC_INIT(-1); |
382 | 400 | ||
383 | kref_init(&card->kref); | 401 | kref_init(&card->kref); |
402 | atomic_set(&card->device_count, 0); | ||
384 | card->index = atomic_inc_return(&index); | 403 | card->index = atomic_inc_return(&index); |
385 | card->driver = driver; | 404 | card->driver = driver; |
386 | card->device = device; | 405 | card->device = device; |
@@ -511,8 +530,14 @@ fw_core_remove_card(struct fw_card *card) | |||
511 | card->driver = &dummy_driver; | 530 | card->driver = &dummy_driver; |
512 | 531 | ||
513 | fw_destroy_nodes(card); | 532 | fw_destroy_nodes(card); |
514 | flush_scheduled_work(); | 533 | /* |
534 | * Wait for all device workqueue jobs to finish. Otherwise the | ||
535 | * firewire-core module could be unloaded before the jobs ran. | ||
536 | */ | ||
537 | while (atomic_read(&card->device_count) > 0) | ||
538 | msleep(100); | ||
515 | 539 | ||
540 | cancel_delayed_work_sync(&card->work); | ||
516 | fw_flush_transactions(card); | 541 | fw_flush_transactions(card); |
517 | del_timer_sync(&card->flush_timer); | 542 | del_timer_sync(&card->flush_timer); |
518 | 543 | ||
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c index 7e73cbaa4121..46bc197a047f 100644 --- a/drivers/firewire/fw-cdev.c +++ b/drivers/firewire/fw-cdev.c | |||
@@ -109,15 +109,17 @@ static int fw_device_op_open(struct inode *inode, struct file *file) | |||
109 | struct client *client; | 109 | struct client *client; |
110 | unsigned long flags; | 110 | unsigned long flags; |
111 | 111 | ||
112 | device = fw_device_from_devt(inode->i_rdev); | 112 | device = fw_device_get_by_devt(inode->i_rdev); |
113 | if (device == NULL) | 113 | if (device == NULL) |
114 | return -ENODEV; | 114 | return -ENODEV; |
115 | 115 | ||
116 | client = kzalloc(sizeof(*client), GFP_KERNEL); | 116 | client = kzalloc(sizeof(*client), GFP_KERNEL); |
117 | if (client == NULL) | 117 | if (client == NULL) { |
118 | fw_device_put(device); | ||
118 | return -ENOMEM; | 119 | return -ENOMEM; |
120 | } | ||
119 | 121 | ||
120 | client->device = fw_device_get(device); | 122 | client->device = device; |
121 | INIT_LIST_HEAD(&client->event_list); | 123 | INIT_LIST_HEAD(&client->event_list); |
122 | INIT_LIST_HEAD(&client->resource_list); | 124 | INIT_LIST_HEAD(&client->resource_list); |
123 | spin_lock_init(&client->lock); | 125 | spin_lock_init(&client->lock); |
@@ -644,6 +646,10 @@ static int ioctl_create_iso_context(struct client *client, void *buffer) | |||
644 | struct fw_cdev_create_iso_context *request = buffer; | 646 | struct fw_cdev_create_iso_context *request = buffer; |
645 | struct fw_iso_context *context; | 647 | struct fw_iso_context *context; |
646 | 648 | ||
649 | /* We only support one context at this time. */ | ||
650 | if (client->iso_context != NULL) | ||
651 | return -EBUSY; | ||
652 | |||
647 | if (request->channel > 63) | 653 | if (request->channel > 63) |
648 | return -EINVAL; | 654 | return -EINVAL; |
649 | 655 | ||
@@ -790,8 +796,9 @@ static int ioctl_start_iso(struct client *client, void *buffer) | |||
790 | { | 796 | { |
791 | struct fw_cdev_start_iso *request = buffer; | 797 | struct fw_cdev_start_iso *request = buffer; |
792 | 798 | ||
793 | if (request->handle != 0) | 799 | if (client->iso_context == NULL || request->handle != 0) |
794 | return -EINVAL; | 800 | return -EINVAL; |
801 | |||
795 | if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) { | 802 | if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) { |
796 | if (request->tags == 0 || request->tags > 15) | 803 | if (request->tags == 0 || request->tags > 15) |
797 | return -EINVAL; | 804 | return -EINVAL; |
@@ -808,7 +815,7 @@ static int ioctl_stop_iso(struct client *client, void *buffer) | |||
808 | { | 815 | { |
809 | struct fw_cdev_stop_iso *request = buffer; | 816 | struct fw_cdev_stop_iso *request = buffer; |
810 | 817 | ||
811 | if (request->handle != 0) | 818 | if (client->iso_context == NULL || request->handle != 0) |
812 | return -EINVAL; | 819 | return -EINVAL; |
813 | 820 | ||
814 | return fw_iso_context_stop(client->iso_context); | 821 | return fw_iso_context_stop(client->iso_context); |
diff --git a/drivers/firewire/fw-device.c b/drivers/firewire/fw-device.c index de9066e69adf..870125a3638e 100644 --- a/drivers/firewire/fw-device.c +++ b/drivers/firewire/fw-device.c | |||
@@ -150,21 +150,10 @@ struct bus_type fw_bus_type = { | |||
150 | }; | 150 | }; |
151 | EXPORT_SYMBOL(fw_bus_type); | 151 | EXPORT_SYMBOL(fw_bus_type); |
152 | 152 | ||
153 | struct fw_device *fw_device_get(struct fw_device *device) | ||
154 | { | ||
155 | get_device(&device->device); | ||
156 | |||
157 | return device; | ||
158 | } | ||
159 | |||
160 | void fw_device_put(struct fw_device *device) | ||
161 | { | ||
162 | put_device(&device->device); | ||
163 | } | ||
164 | |||
165 | static void fw_device_release(struct device *dev) | 153 | static void fw_device_release(struct device *dev) |
166 | { | 154 | { |
167 | struct fw_device *device = fw_device(dev); | 155 | struct fw_device *device = fw_device(dev); |
156 | struct fw_card *card = device->card; | ||
168 | unsigned long flags; | 157 | unsigned long flags; |
169 | 158 | ||
170 | /* | 159 | /* |
@@ -176,9 +165,9 @@ static void fw_device_release(struct device *dev) | |||
176 | spin_unlock_irqrestore(&device->card->lock, flags); | 165 | spin_unlock_irqrestore(&device->card->lock, flags); |
177 | 166 | ||
178 | fw_node_put(device->node); | 167 | fw_node_put(device->node); |
179 | fw_card_put(device->card); | ||
180 | kfree(device->config_rom); | 168 | kfree(device->config_rom); |
181 | kfree(device); | 169 | kfree(device); |
170 | atomic_dec(&card->device_count); | ||
182 | } | 171 | } |
183 | 172 | ||
184 | int fw_device_enable_phys_dma(struct fw_device *device) | 173 | int fw_device_enable_phys_dma(struct fw_device *device) |
@@ -358,12 +347,9 @@ static ssize_t | |||
358 | guid_show(struct device *dev, struct device_attribute *attr, char *buf) | 347 | guid_show(struct device *dev, struct device_attribute *attr, char *buf) |
359 | { | 348 | { |
360 | struct fw_device *device = fw_device(dev); | 349 | struct fw_device *device = fw_device(dev); |
361 | u64 guid; | ||
362 | |||
363 | guid = ((u64)device->config_rom[3] << 32) | device->config_rom[4]; | ||
364 | 350 | ||
365 | return snprintf(buf, PAGE_SIZE, "0x%016llx\n", | 351 | return snprintf(buf, PAGE_SIZE, "0x%08x%08x\n", |
366 | (unsigned long long)guid); | 352 | device->config_rom[3], device->config_rom[4]); |
367 | } | 353 | } |
368 | 354 | ||
369 | static struct device_attribute fw_device_attributes[] = { | 355 | static struct device_attribute fw_device_attributes[] = { |
@@ -610,12 +596,14 @@ static DECLARE_RWSEM(idr_rwsem); | |||
610 | static DEFINE_IDR(fw_device_idr); | 596 | static DEFINE_IDR(fw_device_idr); |
611 | int fw_cdev_major; | 597 | int fw_cdev_major; |
612 | 598 | ||
613 | struct fw_device *fw_device_from_devt(dev_t devt) | 599 | struct fw_device *fw_device_get_by_devt(dev_t devt) |
614 | { | 600 | { |
615 | struct fw_device *device; | 601 | struct fw_device *device; |
616 | 602 | ||
617 | down_read(&idr_rwsem); | 603 | down_read(&idr_rwsem); |
618 | device = idr_find(&fw_device_idr, MINOR(devt)); | 604 | device = idr_find(&fw_device_idr, MINOR(devt)); |
605 | if (device) | ||
606 | fw_device_get(device); | ||
619 | up_read(&idr_rwsem); | 607 | up_read(&idr_rwsem); |
620 | 608 | ||
621 | return device; | 609 | return device; |
@@ -627,13 +615,14 @@ static void fw_device_shutdown(struct work_struct *work) | |||
627 | container_of(work, struct fw_device, work.work); | 615 | container_of(work, struct fw_device, work.work); |
628 | int minor = MINOR(device->device.devt); | 616 | int minor = MINOR(device->device.devt); |
629 | 617 | ||
630 | down_write(&idr_rwsem); | ||
631 | idr_remove(&fw_device_idr, minor); | ||
632 | up_write(&idr_rwsem); | ||
633 | |||
634 | fw_device_cdev_remove(device); | 618 | fw_device_cdev_remove(device); |
635 | device_for_each_child(&device->device, NULL, shutdown_unit); | 619 | device_for_each_child(&device->device, NULL, shutdown_unit); |
636 | device_unregister(&device->device); | 620 | device_unregister(&device->device); |
621 | |||
622 | down_write(&idr_rwsem); | ||
623 | idr_remove(&fw_device_idr, minor); | ||
624 | up_write(&idr_rwsem); | ||
625 | fw_device_put(device); | ||
637 | } | 626 | } |
638 | 627 | ||
639 | static struct device_type fw_device_type = { | 628 | static struct device_type fw_device_type = { |
@@ -668,7 +657,8 @@ static void fw_device_init(struct work_struct *work) | |||
668 | */ | 657 | */ |
669 | 658 | ||
670 | if (read_bus_info_block(device, device->generation) < 0) { | 659 | if (read_bus_info_block(device, device->generation) < 0) { |
671 | if (device->config_rom_retries < MAX_RETRIES) { | 660 | if (device->config_rom_retries < MAX_RETRIES && |
661 | atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { | ||
672 | device->config_rom_retries++; | 662 | device->config_rom_retries++; |
673 | schedule_delayed_work(&device->work, RETRY_DELAY); | 663 | schedule_delayed_work(&device->work, RETRY_DELAY); |
674 | } else { | 664 | } else { |
@@ -682,10 +672,13 @@ static void fw_device_init(struct work_struct *work) | |||
682 | } | 672 | } |
683 | 673 | ||
684 | err = -ENOMEM; | 674 | err = -ENOMEM; |
675 | |||
676 | fw_device_get(device); | ||
685 | down_write(&idr_rwsem); | 677 | down_write(&idr_rwsem); |
686 | if (idr_pre_get(&fw_device_idr, GFP_KERNEL)) | 678 | if (idr_pre_get(&fw_device_idr, GFP_KERNEL)) |
687 | err = idr_get_new(&fw_device_idr, device, &minor); | 679 | err = idr_get_new(&fw_device_idr, device, &minor); |
688 | up_write(&idr_rwsem); | 680 | up_write(&idr_rwsem); |
681 | |||
689 | if (err < 0) | 682 | if (err < 0) |
690 | goto error; | 683 | goto error; |
691 | 684 | ||
@@ -717,13 +710,22 @@ static void fw_device_init(struct work_struct *work) | |||
717 | */ | 710 | */ |
718 | if (atomic_cmpxchg(&device->state, | 711 | if (atomic_cmpxchg(&device->state, |
719 | FW_DEVICE_INITIALIZING, | 712 | FW_DEVICE_INITIALIZING, |
720 | FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) | 713 | FW_DEVICE_RUNNING) == FW_DEVICE_SHUTDOWN) { |
721 | fw_device_shutdown(&device->work.work); | 714 | fw_device_shutdown(&device->work.work); |
722 | else | 715 | } else { |
723 | fw_notify("created new fw device %s " | 716 | if (device->config_rom_retries) |
724 | "(%d config rom retries, S%d00)\n", | 717 | fw_notify("created device %s: GUID %08x%08x, S%d00, " |
725 | device->device.bus_id, device->config_rom_retries, | 718 | "%d config ROM retries\n", |
726 | 1 << device->max_speed); | 719 | device->device.bus_id, |
720 | device->config_rom[3], device->config_rom[4], | ||
721 | 1 << device->max_speed, | ||
722 | device->config_rom_retries); | ||
723 | else | ||
724 | fw_notify("created device %s: GUID %08x%08x, S%d00\n", | ||
725 | device->device.bus_id, | ||
726 | device->config_rom[3], device->config_rom[4], | ||
727 | 1 << device->max_speed); | ||
728 | } | ||
727 | 729 | ||
728 | /* | 730 | /* |
729 | * Reschedule the IRM work if we just finished reading the | 731 | * Reschedule the IRM work if we just finished reading the |
@@ -741,7 +743,9 @@ static void fw_device_init(struct work_struct *work) | |||
741 | idr_remove(&fw_device_idr, minor); | 743 | idr_remove(&fw_device_idr, minor); |
742 | up_write(&idr_rwsem); | 744 | up_write(&idr_rwsem); |
743 | error: | 745 | error: |
744 | put_device(&device->device); | 746 | fw_device_put(device); /* fw_device_idr's reference */ |
747 | |||
748 | put_device(&device->device); /* our reference */ | ||
745 | } | 749 | } |
746 | 750 | ||
747 | static int update_unit(struct device *dev, void *data) | 751 | static int update_unit(struct device *dev, void *data) |
@@ -791,7 +795,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) | |||
791 | */ | 795 | */ |
792 | device_initialize(&device->device); | 796 | device_initialize(&device->device); |
793 | atomic_set(&device->state, FW_DEVICE_INITIALIZING); | 797 | atomic_set(&device->state, FW_DEVICE_INITIALIZING); |
794 | device->card = fw_card_get(card); | 798 | atomic_inc(&card->device_count); |
799 | device->card = card; | ||
795 | device->node = fw_node_get(node); | 800 | device->node = fw_node_get(node); |
796 | device->node_id = node->node_id; | 801 | device->node_id = node->node_id; |
797 | device->generation = card->generation; | 802 | device->generation = card->generation; |
diff --git a/drivers/firewire/fw-device.h b/drivers/firewire/fw-device.h index 0854fe2bc110..78ecd3991b7f 100644 --- a/drivers/firewire/fw-device.h +++ b/drivers/firewire/fw-device.h | |||
@@ -76,14 +76,26 @@ fw_device_is_shutdown(struct fw_device *device) | |||
76 | return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN; | 76 | return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN; |
77 | } | 77 | } |
78 | 78 | ||
79 | struct fw_device *fw_device_get(struct fw_device *device); | 79 | static inline struct fw_device * |
80 | void fw_device_put(struct fw_device *device); | 80 | fw_device_get(struct fw_device *device) |
81 | { | ||
82 | get_device(&device->device); | ||
83 | |||
84 | return device; | ||
85 | } | ||
86 | |||
87 | static inline void | ||
88 | fw_device_put(struct fw_device *device) | ||
89 | { | ||
90 | put_device(&device->device); | ||
91 | } | ||
92 | |||
93 | struct fw_device *fw_device_get_by_devt(dev_t devt); | ||
81 | int fw_device_enable_phys_dma(struct fw_device *device); | 94 | int fw_device_enable_phys_dma(struct fw_device *device); |
82 | 95 | ||
83 | void fw_device_cdev_update(struct fw_device *device); | 96 | void fw_device_cdev_update(struct fw_device *device); |
84 | void fw_device_cdev_remove(struct fw_device *device); | 97 | void fw_device_cdev_remove(struct fw_device *device); |
85 | 98 | ||
86 | struct fw_device *fw_device_from_devt(dev_t devt); | ||
87 | extern int fw_cdev_major; | 99 | extern int fw_cdev_major; |
88 | 100 | ||
89 | struct fw_unit { | 101 | struct fw_unit { |
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c index 19ece9b6d742..03069a454c07 100644 --- a/drivers/firewire/fw-sbp2.c +++ b/drivers/firewire/fw-sbp2.c | |||
@@ -28,14 +28,15 @@ | |||
28 | * and many others. | 28 | * and many others. |
29 | */ | 29 | */ |
30 | 30 | ||
31 | #include <linux/blkdev.h> | ||
32 | #include <linux/delay.h> | ||
33 | #include <linux/device.h> | ||
34 | #include <linux/dma-mapping.h> | ||
31 | #include <linux/kernel.h> | 35 | #include <linux/kernel.h> |
36 | #include <linux/mod_devicetable.h> | ||
32 | #include <linux/module.h> | 37 | #include <linux/module.h> |
33 | #include <linux/moduleparam.h> | 38 | #include <linux/moduleparam.h> |
34 | #include <linux/mod_devicetable.h> | ||
35 | #include <linux/device.h> | ||
36 | #include <linux/scatterlist.h> | 39 | #include <linux/scatterlist.h> |
37 | #include <linux/dma-mapping.h> | ||
38 | #include <linux/blkdev.h> | ||
39 | #include <linux/string.h> | 40 | #include <linux/string.h> |
40 | #include <linux/stringify.h> | 41 | #include <linux/stringify.h> |
41 | #include <linux/timer.h> | 42 | #include <linux/timer.h> |
@@ -47,9 +48,9 @@ | |||
47 | #include <scsi/scsi_device.h> | 48 | #include <scsi/scsi_device.h> |
48 | #include <scsi/scsi_host.h> | 49 | #include <scsi/scsi_host.h> |
49 | 50 | ||
50 | #include "fw-transaction.h" | ||
51 | #include "fw-topology.h" | ||
52 | #include "fw-device.h" | 51 | #include "fw-device.h" |
52 | #include "fw-topology.h" | ||
53 | #include "fw-transaction.h" | ||
53 | 54 | ||
54 | /* | 55 | /* |
55 | * So far only bridges from Oxford Semiconductor are known to support | 56 | * So far only bridges from Oxford Semiconductor are known to support |
@@ -82,6 +83,9 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " | |||
82 | * Avoids access beyond actual disk limits on devices with an off-by-one bug. | 83 | * Avoids access beyond actual disk limits on devices with an off-by-one bug. |
83 | * Don't use this with devices which don't have this bug. | 84 | * Don't use this with devices which don't have this bug. |
84 | * | 85 | * |
86 | * - delay inquiry | ||
87 | * Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry. | ||
88 | * | ||
85 | * - override internal blacklist | 89 | * - override internal blacklist |
86 | * Instead of adding to the built-in blacklist, use only the workarounds | 90 | * Instead of adding to the built-in blacklist, use only the workarounds |
87 | * specified in the module load parameter. | 91 | * specified in the module load parameter. |
@@ -91,6 +95,8 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " | |||
91 | #define SBP2_WORKAROUND_INQUIRY_36 0x2 | 95 | #define SBP2_WORKAROUND_INQUIRY_36 0x2 |
92 | #define SBP2_WORKAROUND_MODE_SENSE_8 0x4 | 96 | #define SBP2_WORKAROUND_MODE_SENSE_8 0x4 |
93 | #define SBP2_WORKAROUND_FIX_CAPACITY 0x8 | 97 | #define SBP2_WORKAROUND_FIX_CAPACITY 0x8 |
98 | #define SBP2_WORKAROUND_DELAY_INQUIRY 0x10 | ||
99 | #define SBP2_INQUIRY_DELAY 12 | ||
94 | #define SBP2_WORKAROUND_OVERRIDE 0x100 | 100 | #define SBP2_WORKAROUND_OVERRIDE 0x100 |
95 | 101 | ||
96 | static int sbp2_param_workarounds; | 102 | static int sbp2_param_workarounds; |
@@ -100,6 +106,7 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0" | |||
100 | ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) | 106 | ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) |
101 | ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) | 107 | ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) |
102 | ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) | 108 | ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) |
109 | ", delay inquiry = " __stringify(SBP2_WORKAROUND_DELAY_INQUIRY) | ||
103 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) | 110 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) |
104 | ", or a combination)"); | 111 | ", or a combination)"); |
105 | 112 | ||
@@ -115,7 +122,6 @@ static const char sbp2_driver_name[] = "sbp2"; | |||
115 | struct sbp2_logical_unit { | 122 | struct sbp2_logical_unit { |
116 | struct sbp2_target *tgt; | 123 | struct sbp2_target *tgt; |
117 | struct list_head link; | 124 | struct list_head link; |
118 | struct scsi_device *sdev; | ||
119 | struct fw_address_handler address_handler; | 125 | struct fw_address_handler address_handler; |
120 | struct list_head orb_list; | 126 | struct list_head orb_list; |
121 | 127 | ||
@@ -132,6 +138,8 @@ struct sbp2_logical_unit { | |||
132 | int generation; | 138 | int generation; |
133 | int retries; | 139 | int retries; |
134 | struct delayed_work work; | 140 | struct delayed_work work; |
141 | bool has_sdev; | ||
142 | bool blocked; | ||
135 | }; | 143 | }; |
136 | 144 | ||
137 | /* | 145 | /* |
@@ -141,16 +149,18 @@ struct sbp2_logical_unit { | |||
141 | struct sbp2_target { | 149 | struct sbp2_target { |
142 | struct kref kref; | 150 | struct kref kref; |
143 | struct fw_unit *unit; | 151 | struct fw_unit *unit; |
152 | const char *bus_id; | ||
153 | struct list_head lu_list; | ||
144 | 154 | ||
145 | u64 management_agent_address; | 155 | u64 management_agent_address; |
146 | int directory_id; | 156 | int directory_id; |
147 | int node_id; | 157 | int node_id; |
148 | int address_high; | 158 | int address_high; |
149 | 159 | unsigned int workarounds; | |
150 | unsigned workarounds; | ||
151 | struct list_head lu_list; | ||
152 | |||
153 | unsigned int mgt_orb_timeout; | 160 | unsigned int mgt_orb_timeout; |
161 | |||
162 | int dont_block; /* counter for each logical unit */ | ||
163 | int blocked; /* ditto */ | ||
154 | }; | 164 | }; |
155 | 165 | ||
156 | /* | 166 | /* |
@@ -160,7 +170,7 @@ struct sbp2_target { | |||
160 | */ | 170 | */ |
161 | #define SBP2_MIN_LOGIN_ORB_TIMEOUT 5000U /* Timeout in ms */ | 171 | #define SBP2_MIN_LOGIN_ORB_TIMEOUT 5000U /* Timeout in ms */ |
162 | #define SBP2_MAX_LOGIN_ORB_TIMEOUT 40000U /* Timeout in ms */ | 172 | #define SBP2_MAX_LOGIN_ORB_TIMEOUT 40000U /* Timeout in ms */ |
163 | #define SBP2_ORB_TIMEOUT 2000 /* Timeout in ms */ | 173 | #define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */ |
164 | #define SBP2_ORB_NULL 0x80000000 | 174 | #define SBP2_ORB_NULL 0x80000000 |
165 | #define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000 | 175 | #define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000 |
166 | 176 | ||
@@ -297,7 +307,7 @@ struct sbp2_command_orb { | |||
297 | static const struct { | 307 | static const struct { |
298 | u32 firmware_revision; | 308 | u32 firmware_revision; |
299 | u32 model; | 309 | u32 model; |
300 | unsigned workarounds; | 310 | unsigned int workarounds; |
301 | } sbp2_workarounds_table[] = { | 311 | } sbp2_workarounds_table[] = { |
302 | /* DViCO Momobay CX-1 with TSB42AA9 bridge */ { | 312 | /* DViCO Momobay CX-1 with TSB42AA9 bridge */ { |
303 | .firmware_revision = 0x002800, | 313 | .firmware_revision = 0x002800, |
@@ -305,6 +315,11 @@ static const struct { | |||
305 | .workarounds = SBP2_WORKAROUND_INQUIRY_36 | | 315 | .workarounds = SBP2_WORKAROUND_INQUIRY_36 | |
306 | SBP2_WORKAROUND_MODE_SENSE_8, | 316 | SBP2_WORKAROUND_MODE_SENSE_8, |
307 | }, | 317 | }, |
318 | /* DViCO Momobay FX-3A with TSB42AA9A bridge */ { | ||
319 | .firmware_revision = 0x002800, | ||
320 | .model = 0x000000, | ||
321 | .workarounds = SBP2_WORKAROUND_DELAY_INQUIRY, | ||
322 | }, | ||
308 | /* Initio bridges, actually only needed for some older ones */ { | 323 | /* Initio bridges, actually only needed for some older ones */ { |
309 | .firmware_revision = 0x000200, | 324 | .firmware_revision = 0x000200, |
310 | .model = ~0, | 325 | .model = ~0, |
@@ -501,6 +516,9 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, | |||
501 | unsigned int timeout; | 516 | unsigned int timeout; |
502 | int retval = -ENOMEM; | 517 | int retval = -ENOMEM; |
503 | 518 | ||
519 | if (function == SBP2_LOGOUT_REQUEST && fw_device_is_shutdown(device)) | ||
520 | return 0; | ||
521 | |||
504 | orb = kzalloc(sizeof(*orb), GFP_ATOMIC); | 522 | orb = kzalloc(sizeof(*orb), GFP_ATOMIC); |
505 | if (orb == NULL) | 523 | if (orb == NULL) |
506 | return -ENOMEM; | 524 | return -ENOMEM; |
@@ -553,20 +571,20 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, | |||
553 | 571 | ||
554 | retval = -EIO; | 572 | retval = -EIO; |
555 | if (sbp2_cancel_orbs(lu) == 0) { | 573 | if (sbp2_cancel_orbs(lu) == 0) { |
556 | fw_error("orb reply timed out, rcode=0x%02x\n", | 574 | fw_error("%s: orb reply timed out, rcode=0x%02x\n", |
557 | orb->base.rcode); | 575 | lu->tgt->bus_id, orb->base.rcode); |
558 | goto out; | 576 | goto out; |
559 | } | 577 | } |
560 | 578 | ||
561 | if (orb->base.rcode != RCODE_COMPLETE) { | 579 | if (orb->base.rcode != RCODE_COMPLETE) { |
562 | fw_error("management write failed, rcode 0x%02x\n", | 580 | fw_error("%s: management write failed, rcode 0x%02x\n", |
563 | orb->base.rcode); | 581 | lu->tgt->bus_id, orb->base.rcode); |
564 | goto out; | 582 | goto out; |
565 | } | 583 | } |
566 | 584 | ||
567 | if (STATUS_GET_RESPONSE(orb->status) != 0 || | 585 | if (STATUS_GET_RESPONSE(orb->status) != 0 || |
568 | STATUS_GET_SBP_STATUS(orb->status) != 0) { | 586 | STATUS_GET_SBP_STATUS(orb->status) != 0) { |
569 | fw_error("error status: %d:%d\n", | 587 | fw_error("%s: error status: %d:%d\n", lu->tgt->bus_id, |
570 | STATUS_GET_RESPONSE(orb->status), | 588 | STATUS_GET_RESPONSE(orb->status), |
571 | STATUS_GET_SBP_STATUS(orb->status)); | 589 | STATUS_GET_SBP_STATUS(orb->status)); |
572 | goto out; | 590 | goto out; |
@@ -590,29 +608,158 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id, | |||
590 | 608 | ||
591 | static void | 609 | static void |
592 | complete_agent_reset_write(struct fw_card *card, int rcode, | 610 | complete_agent_reset_write(struct fw_card *card, int rcode, |
593 | void *payload, size_t length, void *data) | 611 | void *payload, size_t length, void *done) |
594 | { | 612 | { |
595 | struct fw_transaction *t = data; | 613 | complete(done); |
614 | } | ||
596 | 615 | ||
597 | kfree(t); | 616 | static void sbp2_agent_reset(struct sbp2_logical_unit *lu) |
617 | { | ||
618 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); | ||
619 | DECLARE_COMPLETION_ONSTACK(done); | ||
620 | struct fw_transaction t; | ||
621 | static u32 z; | ||
622 | |||
623 | fw_send_request(device->card, &t, TCODE_WRITE_QUADLET_REQUEST, | ||
624 | lu->tgt->node_id, lu->generation, device->max_speed, | ||
625 | lu->command_block_agent_address + SBP2_AGENT_RESET, | ||
626 | &z, sizeof(z), complete_agent_reset_write, &done); | ||
627 | wait_for_completion(&done); | ||
628 | } | ||
629 | |||
630 | static void | ||
631 | complete_agent_reset_write_no_wait(struct fw_card *card, int rcode, | ||
632 | void *payload, size_t length, void *data) | ||
633 | { | ||
634 | kfree(data); | ||
598 | } | 635 | } |
599 | 636 | ||
600 | static int sbp2_agent_reset(struct sbp2_logical_unit *lu) | 637 | static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu) |
601 | { | 638 | { |
602 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); | 639 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); |
603 | struct fw_transaction *t; | 640 | struct fw_transaction *t; |
604 | static u32 zero; | 641 | static u32 z; |
605 | 642 | ||
606 | t = kzalloc(sizeof(*t), GFP_ATOMIC); | 643 | t = kmalloc(sizeof(*t), GFP_ATOMIC); |
607 | if (t == NULL) | 644 | if (t == NULL) |
608 | return -ENOMEM; | 645 | return; |
609 | 646 | ||
610 | fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, | 647 | fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST, |
611 | lu->tgt->node_id, lu->generation, device->max_speed, | 648 | lu->tgt->node_id, lu->generation, device->max_speed, |
612 | lu->command_block_agent_address + SBP2_AGENT_RESET, | 649 | lu->command_block_agent_address + SBP2_AGENT_RESET, |
613 | &zero, sizeof(zero), complete_agent_reset_write, t); | 650 | &z, sizeof(z), complete_agent_reset_write_no_wait, t); |
651 | } | ||
614 | 652 | ||
615 | return 0; | 653 | static void sbp2_set_generation(struct sbp2_logical_unit *lu, int generation) |
654 | { | ||
655 | struct fw_card *card = fw_device(lu->tgt->unit->device.parent)->card; | ||
656 | unsigned long flags; | ||
657 | |||
658 | /* serialize with comparisons of lu->generation and card->generation */ | ||
659 | spin_lock_irqsave(&card->lock, flags); | ||
660 | lu->generation = generation; | ||
661 | spin_unlock_irqrestore(&card->lock, flags); | ||
662 | } | ||
663 | |||
664 | static inline void sbp2_allow_block(struct sbp2_logical_unit *lu) | ||
665 | { | ||
666 | /* | ||
667 | * We may access dont_block without taking card->lock here: | ||
668 | * All callers of sbp2_allow_block() and all callers of sbp2_unblock() | ||
669 | * are currently serialized against each other. | ||
670 | * And a wrong result in sbp2_conditionally_block()'s access of | ||
671 | * dont_block is rather harmless, it simply misses its first chance. | ||
672 | */ | ||
673 | --lu->tgt->dont_block; | ||
674 | } | ||
675 | |||
676 | /* | ||
677 | * Blocks lu->tgt if all of the following conditions are met: | ||
678 | * - Login, INQUIRY, and high-level SCSI setup of all of the target's | ||
679 | * logical units have been finished (indicated by dont_block == 0). | ||
680 | * - lu->generation is stale. | ||
681 | * | ||
682 | * Note, scsi_block_requests() must be called while holding card->lock, | ||
683 | * otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to | ||
684 | * unblock the target. | ||
685 | */ | ||
686 | static void sbp2_conditionally_block(struct sbp2_logical_unit *lu) | ||
687 | { | ||
688 | struct sbp2_target *tgt = lu->tgt; | ||
689 | struct fw_card *card = fw_device(tgt->unit->device.parent)->card; | ||
690 | struct Scsi_Host *shost = | ||
691 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | ||
692 | unsigned long flags; | ||
693 | |||
694 | spin_lock_irqsave(&card->lock, flags); | ||
695 | if (!tgt->dont_block && !lu->blocked && | ||
696 | lu->generation != card->generation) { | ||
697 | lu->blocked = true; | ||
698 | if (++tgt->blocked == 1) { | ||
699 | scsi_block_requests(shost); | ||
700 | fw_notify("blocked %s\n", lu->tgt->bus_id); | ||
701 | } | ||
702 | } | ||
703 | spin_unlock_irqrestore(&card->lock, flags); | ||
704 | } | ||
705 | |||
706 | /* | ||
707 | * Unblocks lu->tgt as soon as all its logical units can be unblocked. | ||
708 | * Note, it is harmless to run scsi_unblock_requests() outside the | ||
709 | * card->lock protected section. On the other hand, running it inside | ||
710 | * the section might clash with shost->host_lock. | ||
711 | */ | ||
712 | static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu) | ||
713 | { | ||
714 | struct sbp2_target *tgt = lu->tgt; | ||
715 | struct fw_card *card = fw_device(tgt->unit->device.parent)->card; | ||
716 | struct Scsi_Host *shost = | ||
717 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | ||
718 | unsigned long flags; | ||
719 | bool unblock = false; | ||
720 | |||
721 | spin_lock_irqsave(&card->lock, flags); | ||
722 | if (lu->blocked && lu->generation == card->generation) { | ||
723 | lu->blocked = false; | ||
724 | unblock = --tgt->blocked == 0; | ||
725 | } | ||
726 | spin_unlock_irqrestore(&card->lock, flags); | ||
727 | |||
728 | if (unblock) { | ||
729 | scsi_unblock_requests(shost); | ||
730 | fw_notify("unblocked %s\n", lu->tgt->bus_id); | ||
731 | } | ||
732 | } | ||
733 | |||
734 | /* | ||
735 | * Prevents future blocking of tgt and unblocks it. | ||
736 | * Note, it is harmless to run scsi_unblock_requests() outside the | ||
737 | * card->lock protected section. On the other hand, running it inside | ||
738 | * the section might clash with shost->host_lock. | ||
739 | */ | ||
740 | static void sbp2_unblock(struct sbp2_target *tgt) | ||
741 | { | ||
742 | struct fw_card *card = fw_device(tgt->unit->device.parent)->card; | ||
743 | struct Scsi_Host *shost = | ||
744 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | ||
745 | unsigned long flags; | ||
746 | |||
747 | spin_lock_irqsave(&card->lock, flags); | ||
748 | ++tgt->dont_block; | ||
749 | spin_unlock_irqrestore(&card->lock, flags); | ||
750 | |||
751 | scsi_unblock_requests(shost); | ||
752 | } | ||
753 | |||
754 | static int sbp2_lun2int(u16 lun) | ||
755 | { | ||
756 | struct scsi_lun eight_bytes_lun; | ||
757 | |||
758 | memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun)); | ||
759 | eight_bytes_lun.scsi_lun[0] = (lun >> 8) & 0xff; | ||
760 | eight_bytes_lun.scsi_lun[1] = lun & 0xff; | ||
761 | |||
762 | return scsilun_to_int(&eight_bytes_lun); | ||
616 | } | 763 | } |
617 | 764 | ||
618 | static void sbp2_release_target(struct kref *kref) | 765 | static void sbp2_release_target(struct kref *kref) |
@@ -621,26 +768,31 @@ static void sbp2_release_target(struct kref *kref) | |||
621 | struct sbp2_logical_unit *lu, *next; | 768 | struct sbp2_logical_unit *lu, *next; |
622 | struct Scsi_Host *shost = | 769 | struct Scsi_Host *shost = |
623 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); | 770 | container_of((void *)tgt, struct Scsi_Host, hostdata[0]); |
771 | struct scsi_device *sdev; | ||
624 | struct fw_device *device = fw_device(tgt->unit->device.parent); | 772 | struct fw_device *device = fw_device(tgt->unit->device.parent); |
625 | 773 | ||
626 | list_for_each_entry_safe(lu, next, &tgt->lu_list, link) { | 774 | /* prevent deadlocks */ |
627 | if (lu->sdev) | 775 | sbp2_unblock(tgt); |
628 | scsi_remove_device(lu->sdev); | ||
629 | 776 | ||
630 | if (!fw_device_is_shutdown(device)) | 777 | list_for_each_entry_safe(lu, next, &tgt->lu_list, link) { |
631 | sbp2_send_management_orb(lu, tgt->node_id, | 778 | sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun)); |
632 | lu->generation, SBP2_LOGOUT_REQUEST, | 779 | if (sdev) { |
633 | lu->login_id, NULL); | 780 | scsi_remove_device(sdev); |
781 | scsi_device_put(sdev); | ||
782 | } | ||
783 | sbp2_send_management_orb(lu, tgt->node_id, lu->generation, | ||
784 | SBP2_LOGOUT_REQUEST, lu->login_id, NULL); | ||
634 | 785 | ||
635 | fw_core_remove_address_handler(&lu->address_handler); | 786 | fw_core_remove_address_handler(&lu->address_handler); |
636 | list_del(&lu->link); | 787 | list_del(&lu->link); |
637 | kfree(lu); | 788 | kfree(lu); |
638 | } | 789 | } |
639 | scsi_remove_host(shost); | 790 | scsi_remove_host(shost); |
640 | fw_notify("released %s\n", tgt->unit->device.bus_id); | 791 | fw_notify("released %s\n", tgt->bus_id); |
641 | 792 | ||
642 | put_device(&tgt->unit->device); | 793 | put_device(&tgt->unit->device); |
643 | scsi_host_put(shost); | 794 | scsi_host_put(shost); |
795 | fw_device_put(device); | ||
644 | } | 796 | } |
645 | 797 | ||
646 | static struct workqueue_struct *sbp2_wq; | 798 | static struct workqueue_struct *sbp2_wq; |
@@ -666,33 +818,42 @@ static void sbp2_login(struct work_struct *work) | |||
666 | { | 818 | { |
667 | struct sbp2_logical_unit *lu = | 819 | struct sbp2_logical_unit *lu = |
668 | container_of(work, struct sbp2_logical_unit, work.work); | 820 | container_of(work, struct sbp2_logical_unit, work.work); |
669 | struct Scsi_Host *shost = | 821 | struct sbp2_target *tgt = lu->tgt; |
670 | container_of((void *)lu->tgt, struct Scsi_Host, hostdata[0]); | 822 | struct fw_device *device = fw_device(tgt->unit->device.parent); |
823 | struct Scsi_Host *shost; | ||
671 | struct scsi_device *sdev; | 824 | struct scsi_device *sdev; |
672 | struct scsi_lun eight_bytes_lun; | ||
673 | struct fw_unit *unit = lu->tgt->unit; | ||
674 | struct fw_device *device = fw_device(unit->device.parent); | ||
675 | struct sbp2_login_response response; | 825 | struct sbp2_login_response response; |
676 | int generation, node_id, local_node_id; | 826 | int generation, node_id, local_node_id; |
677 | 827 | ||
828 | if (fw_device_is_shutdown(device)) | ||
829 | goto out; | ||
830 | |||
678 | generation = device->generation; | 831 | generation = device->generation; |
679 | smp_rmb(); /* node_id must not be older than generation */ | 832 | smp_rmb(); /* node_id must not be older than generation */ |
680 | node_id = device->node_id; | 833 | node_id = device->node_id; |
681 | local_node_id = device->card->node_id; | 834 | local_node_id = device->card->node_id; |
682 | 835 | ||
836 | /* If this is a re-login attempt, log out, or we might be rejected. */ | ||
837 | if (lu->has_sdev) | ||
838 | sbp2_send_management_orb(lu, device->node_id, generation, | ||
839 | SBP2_LOGOUT_REQUEST, lu->login_id, NULL); | ||
840 | |||
683 | if (sbp2_send_management_orb(lu, node_id, generation, | 841 | if (sbp2_send_management_orb(lu, node_id, generation, |
684 | SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) { | 842 | SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) { |
685 | if (lu->retries++ < 5) | 843 | if (lu->retries++ < 5) { |
686 | sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); | 844 | sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); |
687 | else | 845 | } else { |
688 | fw_error("failed to login to %s LUN %04x\n", | 846 | fw_error("%s: failed to login to LUN %04x\n", |
689 | unit->device.bus_id, lu->lun); | 847 | tgt->bus_id, lu->lun); |
848 | /* Let any waiting I/O fail from now on. */ | ||
849 | sbp2_unblock(lu->tgt); | ||
850 | } | ||
690 | goto out; | 851 | goto out; |
691 | } | 852 | } |
692 | 853 | ||
693 | lu->generation = generation; | 854 | tgt->node_id = node_id; |
694 | lu->tgt->node_id = node_id; | 855 | tgt->address_high = local_node_id << 16; |
695 | lu->tgt->address_high = local_node_id << 16; | 856 | sbp2_set_generation(lu, generation); |
696 | 857 | ||
697 | /* Get command block agent offset and login id. */ | 858 | /* Get command block agent offset and login id. */ |
698 | lu->command_block_agent_address = | 859 | lu->command_block_agent_address = |
@@ -700,8 +861,8 @@ static void sbp2_login(struct work_struct *work) | |||
700 | response.command_block_agent.low; | 861 | response.command_block_agent.low; |
701 | lu->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response); | 862 | lu->login_id = LOGIN_RESPONSE_GET_LOGIN_ID(response); |
702 | 863 | ||
703 | fw_notify("logged in to %s LUN %04x (%d retries)\n", | 864 | fw_notify("%s: logged in to LUN %04x (%d retries)\n", |
704 | unit->device.bus_id, lu->lun, lu->retries); | 865 | tgt->bus_id, lu->lun, lu->retries); |
705 | 866 | ||
706 | #if 0 | 867 | #if 0 |
707 | /* FIXME: The linux1394 sbp2 does this last step. */ | 868 | /* FIXME: The linux1394 sbp2 does this last step. */ |
@@ -711,26 +872,58 @@ static void sbp2_login(struct work_struct *work) | |||
711 | PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect); | 872 | PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect); |
712 | sbp2_agent_reset(lu); | 873 | sbp2_agent_reset(lu); |
713 | 874 | ||
714 | memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun)); | 875 | /* This was a re-login. */ |
715 | eight_bytes_lun.scsi_lun[0] = (lu->lun >> 8) & 0xff; | 876 | if (lu->has_sdev) { |
716 | eight_bytes_lun.scsi_lun[1] = lu->lun & 0xff; | 877 | sbp2_cancel_orbs(lu); |
878 | sbp2_conditionally_unblock(lu); | ||
879 | goto out; | ||
880 | } | ||
717 | 881 | ||
718 | sdev = __scsi_add_device(shost, 0, 0, | 882 | if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY) |
719 | scsilun_to_int(&eight_bytes_lun), lu); | 883 | ssleep(SBP2_INQUIRY_DELAY); |
720 | if (IS_ERR(sdev)) { | 884 | |
721 | sbp2_send_management_orb(lu, node_id, generation, | 885 | shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]); |
722 | SBP2_LOGOUT_REQUEST, lu->login_id, NULL); | 886 | sdev = __scsi_add_device(shost, 0, 0, sbp2_lun2int(lu->lun), lu); |
723 | /* | 887 | /* |
724 | * Set this back to sbp2_login so we fall back and | 888 | * FIXME: We are unable to perform reconnects while in sbp2_login(). |
725 | * retry login on bus reset. | 889 | * Therefore __scsi_add_device() will get into trouble if a bus reset |
726 | */ | 890 | * happens in parallel. It will either fail or leave us with an |
727 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); | 891 | * unusable sdev. As a workaround we check for this and retry the |
728 | } else { | 892 | * whole login and SCSI probing. |
729 | lu->sdev = sdev; | 893 | */ |
894 | |||
895 | /* Reported error during __scsi_add_device() */ | ||
896 | if (IS_ERR(sdev)) | ||
897 | goto out_logout_login; | ||
898 | |||
899 | /* Unreported error during __scsi_add_device() */ | ||
900 | smp_rmb(); /* get current card generation */ | ||
901 | if (generation != device->card->generation) { | ||
902 | scsi_remove_device(sdev); | ||
730 | scsi_device_put(sdev); | 903 | scsi_device_put(sdev); |
904 | goto out_logout_login; | ||
731 | } | 905 | } |
906 | |||
907 | /* No error during __scsi_add_device() */ | ||
908 | lu->has_sdev = true; | ||
909 | scsi_device_put(sdev); | ||
910 | sbp2_allow_block(lu); | ||
911 | goto out; | ||
912 | |||
913 | out_logout_login: | ||
914 | smp_rmb(); /* generation may have changed */ | ||
915 | generation = device->generation; | ||
916 | smp_rmb(); /* node_id must not be older than generation */ | ||
917 | |||
918 | sbp2_send_management_orb(lu, device->node_id, generation, | ||
919 | SBP2_LOGOUT_REQUEST, lu->login_id, NULL); | ||
920 | /* | ||
921 | * If a bus reset happened, sbp2_update will have requeued | ||
922 | * lu->work already. Reset the work from reconnect to login. | ||
923 | */ | ||
924 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); | ||
732 | out: | 925 | out: |
733 | sbp2_target_put(lu->tgt); | 926 | sbp2_target_put(tgt); |
734 | } | 927 | } |
735 | 928 | ||
736 | static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) | 929 | static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) |
@@ -751,10 +944,12 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) | |||
751 | return -ENOMEM; | 944 | return -ENOMEM; |
752 | } | 945 | } |
753 | 946 | ||
754 | lu->tgt = tgt; | 947 | lu->tgt = tgt; |
755 | lu->sdev = NULL; | 948 | lu->lun = lun_entry & 0xffff; |
756 | lu->lun = lun_entry & 0xffff; | 949 | lu->retries = 0; |
757 | lu->retries = 0; | 950 | lu->has_sdev = false; |
951 | lu->blocked = false; | ||
952 | ++tgt->dont_block; | ||
758 | INIT_LIST_HEAD(&lu->orb_list); | 953 | INIT_LIST_HEAD(&lu->orb_list); |
759 | INIT_DELAYED_WORK(&lu->work, sbp2_login); | 954 | INIT_DELAYED_WORK(&lu->work, sbp2_login); |
760 | 955 | ||
@@ -813,7 +1008,7 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory, | |||
813 | if (timeout > tgt->mgt_orb_timeout) | 1008 | if (timeout > tgt->mgt_orb_timeout) |
814 | fw_notify("%s: config rom contains %ds " | 1009 | fw_notify("%s: config rom contains %ds " |
815 | "management ORB timeout, limiting " | 1010 | "management ORB timeout, limiting " |
816 | "to %ds\n", tgt->unit->device.bus_id, | 1011 | "to %ds\n", tgt->bus_id, |
817 | timeout / 1000, | 1012 | timeout / 1000, |
818 | tgt->mgt_orb_timeout / 1000); | 1013 | tgt->mgt_orb_timeout / 1000); |
819 | break; | 1014 | break; |
@@ -836,12 +1031,12 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model, | |||
836 | u32 firmware_revision) | 1031 | u32 firmware_revision) |
837 | { | 1032 | { |
838 | int i; | 1033 | int i; |
839 | unsigned w = sbp2_param_workarounds; | 1034 | unsigned int w = sbp2_param_workarounds; |
840 | 1035 | ||
841 | if (w) | 1036 | if (w) |
842 | fw_notify("Please notify linux1394-devel@lists.sourceforge.net " | 1037 | fw_notify("Please notify linux1394-devel@lists.sourceforge.net " |
843 | "if you need the workarounds parameter for %s\n", | 1038 | "if you need the workarounds parameter for %s\n", |
844 | tgt->unit->device.bus_id); | 1039 | tgt->bus_id); |
845 | 1040 | ||
846 | if (w & SBP2_WORKAROUND_OVERRIDE) | 1041 | if (w & SBP2_WORKAROUND_OVERRIDE) |
847 | goto out; | 1042 | goto out; |
@@ -863,8 +1058,7 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model, | |||
863 | if (w) | 1058 | if (w) |
864 | fw_notify("Workarounds for %s: 0x%x " | 1059 | fw_notify("Workarounds for %s: 0x%x " |
865 | "(firmware_revision 0x%06x, model_id 0x%06x)\n", | 1060 | "(firmware_revision 0x%06x, model_id 0x%06x)\n", |
866 | tgt->unit->device.bus_id, | 1061 | tgt->bus_id, w, firmware_revision, model); |
867 | w, firmware_revision, model); | ||
868 | tgt->workarounds = w; | 1062 | tgt->workarounds = w; |
869 | } | 1063 | } |
870 | 1064 | ||
@@ -888,6 +1082,7 @@ static int sbp2_probe(struct device *dev) | |||
888 | tgt->unit = unit; | 1082 | tgt->unit = unit; |
889 | kref_init(&tgt->kref); | 1083 | kref_init(&tgt->kref); |
890 | INIT_LIST_HEAD(&tgt->lu_list); | 1084 | INIT_LIST_HEAD(&tgt->lu_list); |
1085 | tgt->bus_id = unit->device.bus_id; | ||
891 | 1086 | ||
892 | if (fw_device_enable_phys_dma(device) < 0) | 1087 | if (fw_device_enable_phys_dma(device) < 0) |
893 | goto fail_shost_put; | 1088 | goto fail_shost_put; |
@@ -895,6 +1090,8 @@ static int sbp2_probe(struct device *dev) | |||
895 | if (scsi_add_host(shost, &unit->device) < 0) | 1090 | if (scsi_add_host(shost, &unit->device) < 0) |
896 | goto fail_shost_put; | 1091 | goto fail_shost_put; |
897 | 1092 | ||
1093 | fw_device_get(device); | ||
1094 | |||
898 | /* Initialize to values that won't match anything in our table. */ | 1095 | /* Initialize to values that won't match anything in our table. */ |
899 | firmware_revision = 0xff000000; | 1096 | firmware_revision = 0xff000000; |
900 | model = 0xff000000; | 1097 | model = 0xff000000; |
@@ -938,10 +1135,13 @@ static void sbp2_reconnect(struct work_struct *work) | |||
938 | { | 1135 | { |
939 | struct sbp2_logical_unit *lu = | 1136 | struct sbp2_logical_unit *lu = |
940 | container_of(work, struct sbp2_logical_unit, work.work); | 1137 | container_of(work, struct sbp2_logical_unit, work.work); |
941 | struct fw_unit *unit = lu->tgt->unit; | 1138 | struct sbp2_target *tgt = lu->tgt; |
942 | struct fw_device *device = fw_device(unit->device.parent); | 1139 | struct fw_device *device = fw_device(tgt->unit->device.parent); |
943 | int generation, node_id, local_node_id; | 1140 | int generation, node_id, local_node_id; |
944 | 1141 | ||
1142 | if (fw_device_is_shutdown(device)) | ||
1143 | goto out; | ||
1144 | |||
945 | generation = device->generation; | 1145 | generation = device->generation; |
946 | smp_rmb(); /* node_id must not be older than generation */ | 1146 | smp_rmb(); /* node_id must not be older than generation */ |
947 | node_id = device->node_id; | 1147 | node_id = device->node_id; |
@@ -950,10 +1150,17 @@ static void sbp2_reconnect(struct work_struct *work) | |||
950 | if (sbp2_send_management_orb(lu, node_id, generation, | 1150 | if (sbp2_send_management_orb(lu, node_id, generation, |
951 | SBP2_RECONNECT_REQUEST, | 1151 | SBP2_RECONNECT_REQUEST, |
952 | lu->login_id, NULL) < 0) { | 1152 | lu->login_id, NULL) < 0) { |
953 | if (lu->retries++ >= 5) { | 1153 | /* |
954 | fw_error("failed to reconnect to %s\n", | 1154 | * If reconnect was impossible even though we are in the |
955 | unit->device.bus_id); | 1155 | * current generation, fall back and try to log in again. |
956 | /* Fall back and try to log in again. */ | 1156 | * |
1157 | * We could check for "Function rejected" status, but | ||
1158 | * looking at the bus generation as simpler and more general. | ||
1159 | */ | ||
1160 | smp_rmb(); /* get current card generation */ | ||
1161 | if (generation == device->card->generation || | ||
1162 | lu->retries++ >= 5) { | ||
1163 | fw_error("%s: failed to reconnect\n", tgt->bus_id); | ||
957 | lu->retries = 0; | 1164 | lu->retries = 0; |
958 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); | 1165 | PREPARE_DELAYED_WORK(&lu->work, sbp2_login); |
959 | } | 1166 | } |
@@ -961,17 +1168,18 @@ static void sbp2_reconnect(struct work_struct *work) | |||
961 | goto out; | 1168 | goto out; |
962 | } | 1169 | } |
963 | 1170 | ||
964 | lu->generation = generation; | 1171 | tgt->node_id = node_id; |
965 | lu->tgt->node_id = node_id; | 1172 | tgt->address_high = local_node_id << 16; |
966 | lu->tgt->address_high = local_node_id << 16; | 1173 | sbp2_set_generation(lu, generation); |
967 | 1174 | ||
968 | fw_notify("reconnected to %s LUN %04x (%d retries)\n", | 1175 | fw_notify("%s: reconnected to LUN %04x (%d retries)\n", |
969 | unit->device.bus_id, lu->lun, lu->retries); | 1176 | tgt->bus_id, lu->lun, lu->retries); |
970 | 1177 | ||
971 | sbp2_agent_reset(lu); | 1178 | sbp2_agent_reset(lu); |
972 | sbp2_cancel_orbs(lu); | 1179 | sbp2_cancel_orbs(lu); |
1180 | sbp2_conditionally_unblock(lu); | ||
973 | out: | 1181 | out: |
974 | sbp2_target_put(lu->tgt); | 1182 | sbp2_target_put(tgt); |
975 | } | 1183 | } |
976 | 1184 | ||
977 | static void sbp2_update(struct fw_unit *unit) | 1185 | static void sbp2_update(struct fw_unit *unit) |
@@ -986,6 +1194,7 @@ static void sbp2_update(struct fw_unit *unit) | |||
986 | * Iteration over tgt->lu_list is therefore safe here. | 1194 | * Iteration over tgt->lu_list is therefore safe here. |
987 | */ | 1195 | */ |
988 | list_for_each_entry(lu, &tgt->lu_list, link) { | 1196 | list_for_each_entry(lu, &tgt->lu_list, link) { |
1197 | sbp2_conditionally_block(lu); | ||
989 | lu->retries = 0; | 1198 | lu->retries = 0; |
990 | sbp2_queue_work(lu, 0); | 1199 | sbp2_queue_work(lu, 0); |
991 | } | 1200 | } |
@@ -1063,7 +1272,7 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) | |||
1063 | 1272 | ||
1064 | if (status != NULL) { | 1273 | if (status != NULL) { |
1065 | if (STATUS_GET_DEAD(*status)) | 1274 | if (STATUS_GET_DEAD(*status)) |
1066 | sbp2_agent_reset(orb->lu); | 1275 | sbp2_agent_reset_no_wait(orb->lu); |
1067 | 1276 | ||
1068 | switch (STATUS_GET_RESPONSE(*status)) { | 1277 | switch (STATUS_GET_RESPONSE(*status)) { |
1069 | case SBP2_STATUS_REQUEST_COMPLETE: | 1278 | case SBP2_STATUS_REQUEST_COMPLETE: |
@@ -1089,6 +1298,7 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status) | |||
1089 | * or when sending the write (less likely). | 1298 | * or when sending the write (less likely). |
1090 | */ | 1299 | */ |
1091 | result = DID_BUS_BUSY << 16; | 1300 | result = DID_BUS_BUSY << 16; |
1301 | sbp2_conditionally_block(orb->lu); | ||
1092 | } | 1302 | } |
1093 | 1303 | ||
1094 | dma_unmap_single(device->card->device, orb->base.request_bus, | 1304 | dma_unmap_single(device->card->device, orb->base.request_bus, |
@@ -1197,7 +1407,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) | |||
1197 | struct sbp2_logical_unit *lu = cmd->device->hostdata; | 1407 | struct sbp2_logical_unit *lu = cmd->device->hostdata; |
1198 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); | 1408 | struct fw_device *device = fw_device(lu->tgt->unit->device.parent); |
1199 | struct sbp2_command_orb *orb; | 1409 | struct sbp2_command_orb *orb; |
1200 | unsigned max_payload; | 1410 | unsigned int max_payload; |
1201 | int retval = SCSI_MLQUEUE_HOST_BUSY; | 1411 | int retval = SCSI_MLQUEUE_HOST_BUSY; |
1202 | 1412 | ||
1203 | /* | 1413 | /* |
@@ -1275,6 +1485,10 @@ static int sbp2_scsi_slave_alloc(struct scsi_device *sdev) | |||
1275 | { | 1485 | { |
1276 | struct sbp2_logical_unit *lu = sdev->hostdata; | 1486 | struct sbp2_logical_unit *lu = sdev->hostdata; |
1277 | 1487 | ||
1488 | /* (Re-)Adding logical units via the SCSI stack is not supported. */ | ||
1489 | if (!lu) | ||
1490 | return -ENOSYS; | ||
1491 | |||
1278 | sdev->allow_restart = 1; | 1492 | sdev->allow_restart = 1; |
1279 | 1493 | ||
1280 | /* | 1494 | /* |
@@ -1319,7 +1533,7 @@ static int sbp2_scsi_abort(struct scsi_cmnd *cmd) | |||
1319 | { | 1533 | { |
1320 | struct sbp2_logical_unit *lu = cmd->device->hostdata; | 1534 | struct sbp2_logical_unit *lu = cmd->device->hostdata; |
1321 | 1535 | ||
1322 | fw_notify("sbp2_scsi_abort\n"); | 1536 | fw_notify("%s: sbp2_scsi_abort\n", lu->tgt->bus_id); |
1323 | sbp2_agent_reset(lu); | 1537 | sbp2_agent_reset(lu); |
1324 | sbp2_cancel_orbs(lu); | 1538 | sbp2_cancel_orbs(lu); |
1325 | 1539 | ||
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c index 172c1867e9aa..e47bb040197a 100644 --- a/drivers/firewire/fw-topology.c +++ b/drivers/firewire/fw-topology.c | |||
@@ -383,6 +383,7 @@ void fw_destroy_nodes(struct fw_card *card) | |||
383 | card->color++; | 383 | card->color++; |
384 | if (card->local_node != NULL) | 384 | if (card->local_node != NULL) |
385 | for_each_fw_node(card, card->local_node, report_lost_node); | 385 | for_each_fw_node(card, card->local_node, report_lost_node); |
386 | card->local_node = NULL; | ||
386 | spin_unlock_irqrestore(&card->lock, flags); | 387 | spin_unlock_irqrestore(&card->lock, flags); |
387 | } | 388 | } |
388 | 389 | ||
diff --git a/drivers/firewire/fw-transaction.h b/drivers/firewire/fw-transaction.h index fa7967b57408..09cb72870454 100644 --- a/drivers/firewire/fw-transaction.h +++ b/drivers/firewire/fw-transaction.h | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
27 | #include <linux/dma-mapping.h> | 27 | #include <linux/dma-mapping.h> |
28 | #include <linux/firewire-constants.h> | 28 | #include <linux/firewire-constants.h> |
29 | #include <asm/atomic.h> | ||
29 | 30 | ||
30 | #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) | 31 | #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4) |
31 | #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) | 32 | #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0) |
@@ -219,6 +220,7 @@ extern struct bus_type fw_bus_type; | |||
219 | struct fw_card { | 220 | struct fw_card { |
220 | const struct fw_card_driver *driver; | 221 | const struct fw_card_driver *driver; |
221 | struct device *device; | 222 | struct device *device; |
223 | atomic_t device_count; | ||
222 | struct kref kref; | 224 | struct kref kref; |
223 | 225 | ||
224 | int node_id; | 226 | int node_id; |
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index df752e690e47..eed6d8e1b5c7 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig | |||
@@ -50,7 +50,7 @@ menuconfig IDE | |||
50 | To compile this driver as a module, choose M here: the | 50 | To compile this driver as a module, choose M here: the |
51 | module will be called ide. | 51 | module will be called ide. |
52 | 52 | ||
53 | For further information, please read <file:Documentation/ide.txt>. | 53 | For further information, please read <file:Documentation/ide/ide.txt>. |
54 | 54 | ||
55 | If unsure, say Y. | 55 | If unsure, say Y. |
56 | 56 | ||
@@ -77,7 +77,7 @@ config BLK_DEV_IDE | |||
77 | Useful information about large (>540 MB) IDE disks, multiple | 77 | Useful information about large (>540 MB) IDE disks, multiple |
78 | interfaces, what to do if ATA/IDE devices are not automatically | 78 | interfaces, what to do if ATA/IDE devices are not automatically |
79 | detected, sound card ATA/IDE ports, module support, and other | 79 | detected, sound card ATA/IDE ports, module support, and other |
80 | topics, is contained in <file:Documentation/ide.txt>. For detailed | 80 | topics, is contained in <file:Documentation/ide/ide.txt>. For detailed |
81 | information about hard drives, consult the Disk-HOWTO and the | 81 | information about hard drives, consult the Disk-HOWTO and the |
82 | Multi-Disk-HOWTO, available from | 82 | Multi-Disk-HOWTO, available from |
83 | <http://www.tldp.org/docs.html#howto>. | 83 | <http://www.tldp.org/docs.html#howto>. |
@@ -87,7 +87,7 @@ config BLK_DEV_IDE | |||
87 | <ftp://ibiblio.org/pub/Linux/system/hardware/>. | 87 | <ftp://ibiblio.org/pub/Linux/system/hardware/>. |
88 | 88 | ||
89 | To compile this driver as a module, choose M here and read | 89 | To compile this driver as a module, choose M here and read |
90 | <file:Documentation/ide.txt>. The module will be called ide-mod. | 90 | <file:Documentation/ide/ide.txt>. The module will be called ide-mod. |
91 | Do not compile this driver as a module if your root file system (the | 91 | Do not compile this driver as a module if your root file system (the |
92 | one containing the directory /) is located on an IDE device. | 92 | one containing the directory /) is located on an IDE device. |
93 | 93 | ||
@@ -98,7 +98,7 @@ config BLK_DEV_IDE | |||
98 | 98 | ||
99 | if BLK_DEV_IDE | 99 | if BLK_DEV_IDE |
100 | 100 | ||
101 | comment "Please see Documentation/ide.txt for help/info on IDE drives" | 101 | comment "Please see Documentation/ide/ide.txt for help/info on IDE drives" |
102 | 102 | ||
103 | config BLK_DEV_IDE_SATA | 103 | config BLK_DEV_IDE_SATA |
104 | bool "Support for SATA (deprecated; conflicts with libata SATA driver)" | 104 | bool "Support for SATA (deprecated; conflicts with libata SATA driver)" |
@@ -235,8 +235,8 @@ config BLK_DEV_IDETAPE | |||
235 | along with other IDE devices, as "hdb" or "hdc", or something | 235 | along with other IDE devices, as "hdb" or "hdc", or something |
236 | similar, and will be mapped to a character device such as "ht0" | 236 | similar, and will be mapped to a character device such as "ht0" |
237 | (check the boot messages with dmesg). Be sure to consult the | 237 | (check the boot messages with dmesg). Be sure to consult the |
238 | <file:drivers/ide/ide-tape.c> and <file:Documentation/ide.txt> files | 238 | <file:drivers/ide/ide-tape.c> and <file:Documentation/ide/ide.txt> |
239 | for usage information. | 239 | files for usage information. |
240 | 240 | ||
241 | To compile this driver as a module, choose M here: the | 241 | To compile this driver as a module, choose M here: the |
242 | module will be called ide-tape. | 242 | module will be called ide-tape. |
@@ -358,7 +358,7 @@ config BLK_DEV_CMD640 | |||
358 | 358 | ||
359 | The CMD640 chip is also used on add-in cards by Acculogic, and on | 359 | The CMD640 chip is also used on add-in cards by Acculogic, and on |
360 | the "CSA-6400E PCI to IDE controller" that some people have. For | 360 | the "CSA-6400E PCI to IDE controller" that some people have. For |
361 | details, read <file:Documentation/ide.txt>. | 361 | details, read <file:Documentation/ide/ide.txt>. |
362 | 362 | ||
363 | config BLK_DEV_CMD640_ENHANCED | 363 | config BLK_DEV_CMD640_ENHANCED |
364 | bool "CMD640 enhanced support" | 364 | bool "CMD640 enhanced support" |
@@ -366,7 +366,7 @@ config BLK_DEV_CMD640_ENHANCED | |||
366 | help | 366 | help |
367 | This option includes support for setting/autotuning PIO modes and | 367 | This option includes support for setting/autotuning PIO modes and |
368 | prefetch on CMD640 IDE interfaces. For details, read | 368 | prefetch on CMD640 IDE interfaces. For details, read |
369 | <file:Documentation/ide.txt>. If you have a CMD640 IDE interface | 369 | <file:Documentation/ide/ide.txt>. If you have a CMD640 IDE interface |
370 | and your BIOS does not already do this for you, then say Y here. | 370 | and your BIOS does not already do this for you, then say Y here. |
371 | Otherwise say N. | 371 | Otherwise say N. |
372 | 372 | ||
@@ -1069,9 +1069,9 @@ config BLK_DEV_ALI14XX | |||
1069 | This driver is enabled at runtime using the "ali14xx.probe" kernel | 1069 | This driver is enabled at runtime using the "ali14xx.probe" kernel |
1070 | boot parameter. It enables support for the secondary IDE interface | 1070 | boot parameter. It enables support for the secondary IDE interface |
1071 | of the ALI M1439/1443/1445/1487/1489 chipsets, and permits faster | 1071 | of the ALI M1439/1443/1445/1487/1489 chipsets, and permits faster |
1072 | I/O speeds to be set as well. See the files | 1072 | I/O speeds to be set as well. |
1073 | <file:Documentation/ide.txt> and <file:drivers/ide/legacy/ali14xx.c> | 1073 | See the files <file:Documentation/ide/ide.txt> and |
1074 | for more info. | 1074 | <file:drivers/ide/legacy/ali14xx.c> for more info. |
1075 | 1075 | ||
1076 | config BLK_DEV_DTC2278 | 1076 | config BLK_DEV_DTC2278 |
1077 | tristate "DTC-2278 support" | 1077 | tristate "DTC-2278 support" |
@@ -1079,7 +1079,7 @@ config BLK_DEV_DTC2278 | |||
1079 | This driver is enabled at runtime using the "dtc2278.probe" kernel | 1079 | This driver is enabled at runtime using the "dtc2278.probe" kernel |
1080 | boot parameter. It enables support for the secondary IDE interface | 1080 | boot parameter. It enables support for the secondary IDE interface |
1081 | of the DTC-2278 card, and permits faster I/O speeds to be set as | 1081 | of the DTC-2278 card, and permits faster I/O speeds to be set as |
1082 | well. See the <file:Documentation/ide.txt> and | 1082 | well. See the <file:Documentation/ide/ide.txt> and |
1083 | <file:drivers/ide/legacy/dtc2278.c> files for more info. | 1083 | <file:drivers/ide/legacy/dtc2278.c> files for more info. |
1084 | 1084 | ||
1085 | config BLK_DEV_HT6560B | 1085 | config BLK_DEV_HT6560B |
@@ -1088,7 +1088,7 @@ config BLK_DEV_HT6560B | |||
1088 | This driver is enabled at runtime using the "ht6560b.probe" kernel | 1088 | This driver is enabled at runtime using the "ht6560b.probe" kernel |
1089 | boot parameter. It enables support for the secondary IDE interface | 1089 | boot parameter. It enables support for the secondary IDE interface |
1090 | of the Holtek card, and permits faster I/O speeds to be set as well. | 1090 | of the Holtek card, and permits faster I/O speeds to be set as well. |
1091 | See the <file:Documentation/ide.txt> and | 1091 | See the <file:Documentation/ide/ide.txt> and |
1092 | <file:drivers/ide/legacy/ht6560b.c> files for more info. | 1092 | <file:drivers/ide/legacy/ht6560b.c> files for more info. |
1093 | 1093 | ||
1094 | config BLK_DEV_QD65XX | 1094 | config BLK_DEV_QD65XX |
@@ -1096,7 +1096,7 @@ config BLK_DEV_QD65XX | |||
1096 | help | 1096 | help |
1097 | This driver is enabled at runtime using the "qd65xx.probe" kernel | 1097 | This driver is enabled at runtime using the "qd65xx.probe" kernel |
1098 | boot parameter. It permits faster I/O speeds to be set. See the | 1098 | boot parameter. It permits faster I/O speeds to be set. See the |
1099 | <file:Documentation/ide.txt> and <file:drivers/ide/legacy/qd65xx.c> | 1099 | <file:Documentation/ide/ide.txt> and <file:drivers/ide/legacy/qd65xx.c> |
1100 | for more info. | 1100 | for more info. |
1101 | 1101 | ||
1102 | config BLK_DEV_UMC8672 | 1102 | config BLK_DEV_UMC8672 |
@@ -1105,7 +1105,7 @@ config BLK_DEV_UMC8672 | |||
1105 | This driver is enabled at runtime using the "umc8672.probe" kernel | 1105 | This driver is enabled at runtime using the "umc8672.probe" kernel |
1106 | boot parameter. It enables support for the secondary IDE interface | 1106 | boot parameter. It enables support for the secondary IDE interface |
1107 | of the UMC-8672, and permits faster I/O speeds to be set as well. | 1107 | of the UMC-8672, and permits faster I/O speeds to be set as well. |
1108 | See the files <file:Documentation/ide.txt> and | 1108 | See the files <file:Documentation/ide/ide.txt> and |
1109 | <file:drivers/ide/legacy/umc8672.c> for more info. | 1109 | <file:drivers/ide/legacy/umc8672.c> for more info. |
1110 | 1110 | ||
1111 | endif | 1111 | endif |
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 310e497b5838..c8d0e8715997 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c | |||
@@ -670,8 +670,8 @@ static void cdrom_buffer_sectors (ide_drive_t *drive, unsigned long sector, | |||
670 | * and attempt to recover if there are problems. Returns 0 if everything's | 670 | * and attempt to recover if there are problems. Returns 0 if everything's |
671 | * ok; nonzero if the request has been terminated. | 671 | * ok; nonzero if the request has been terminated. |
672 | */ | 672 | */ |
673 | static | 673 | static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq, |
674 | int ide_cd_check_ireason(ide_drive_t *drive, int len, int ireason, int rw) | 674 | int len, int ireason, int rw) |
675 | { | 675 | { |
676 | /* | 676 | /* |
677 | * ireason == 0: the drive wants to receive data from us | 677 | * ireason == 0: the drive wants to receive data from us |
@@ -701,6 +701,9 @@ int ide_cd_check_ireason(ide_drive_t *drive, int len, int ireason, int rw) | |||
701 | drive->name, __FUNCTION__, ireason); | 701 | drive->name, __FUNCTION__, ireason); |
702 | } | 702 | } |
703 | 703 | ||
704 | if (rq->cmd_type == REQ_TYPE_ATA_PC) | ||
705 | rq->cmd_flags |= REQ_FAILED; | ||
706 | |||
704 | cdrom_end_request(drive, 0); | 707 | cdrom_end_request(drive, 0); |
705 | return -1; | 708 | return -1; |
706 | } | 709 | } |
@@ -1071,11 +1074,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) | |||
1071 | /* | 1074 | /* |
1072 | * check which way to transfer data | 1075 | * check which way to transfer data |
1073 | */ | 1076 | */ |
1074 | if (blk_fs_request(rq) || blk_pc_request(rq)) { | 1077 | if (ide_cd_check_ireason(drive, rq, len, ireason, write)) |
1075 | if (ide_cd_check_ireason(drive, len, ireason, write)) | 1078 | return ide_stopped; |
1076 | return ide_stopped; | ||
1077 | 1079 | ||
1078 | if (blk_fs_request(rq) && write == 0) { | 1080 | if (blk_fs_request(rq)) { |
1081 | if (write == 0) { | ||
1079 | int nskip; | 1082 | int nskip; |
1080 | 1083 | ||
1081 | if (ide_cd_check_transfer_size(drive, len)) { | 1084 | if (ide_cd_check_transfer_size(drive, len)) { |
@@ -1101,16 +1104,9 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) | |||
1101 | if (ireason == 0) { | 1104 | if (ireason == 0) { |
1102 | write = 1; | 1105 | write = 1; |
1103 | xferfunc = HWIF(drive)->atapi_output_bytes; | 1106 | xferfunc = HWIF(drive)->atapi_output_bytes; |
1104 | } else if (ireason == 2 || (ireason == 1 && | 1107 | } else { |
1105 | (blk_fs_request(rq) || blk_pc_request(rq)))) { | ||
1106 | write = 0; | 1108 | write = 0; |
1107 | xferfunc = HWIF(drive)->atapi_input_bytes; | 1109 | xferfunc = HWIF(drive)->atapi_input_bytes; |
1108 | } else { | ||
1109 | printk(KERN_ERR "%s: %s: The drive " | ||
1110 | "appears confused (ireason = 0x%02x). " | ||
1111 | "Trying to recover by ending request.\n", | ||
1112 | drive->name, __FUNCTION__, ireason); | ||
1113 | goto end_request; | ||
1114 | } | 1110 | } |
1115 | 1111 | ||
1116 | /* | 1112 | /* |
@@ -1182,11 +1178,10 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) | |||
1182 | else | 1178 | else |
1183 | rq->data += blen; | 1179 | rq->data += blen; |
1184 | } | 1180 | } |
1181 | if (!write && blk_sense_request(rq)) | ||
1182 | rq->sense_len += blen; | ||
1185 | } | 1183 | } |
1186 | 1184 | ||
1187 | if (write && blk_sense_request(rq)) | ||
1188 | rq->sense_len += thislen; | ||
1189 | |||
1190 | /* | 1185 | /* |
1191 | * pad, if necessary | 1186 | * pad, if necessary |
1192 | */ | 1187 | */ |
@@ -1931,6 +1926,7 @@ static const struct cd_list_entry ide_cd_quirks_list[] = { | |||
1931 | { "MATSHITADVD-ROM SR-8186", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, | 1926 | { "MATSHITADVD-ROM SR-8186", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, |
1932 | { "MATSHITADVD-ROM SR-8176", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, | 1927 | { "MATSHITADVD-ROM SR-8176", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, |
1933 | { "MATSHITADVD-ROM SR-8174", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, | 1928 | { "MATSHITADVD-ROM SR-8174", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, |
1929 | { "Optiarc DVD RW AD-5200A", NULL, IDE_CD_FLAG_PLAY_AUDIO_OK }, | ||
1934 | { NULL, NULL, 0 } | 1930 | { NULL, NULL, 0 } |
1935 | }; | 1931 | }; |
1936 | 1932 | ||
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c index b68284de4e85..6d147ce6782f 100644 --- a/drivers/ide/ide-cd_ioctl.c +++ b/drivers/ide/ide-cd_ioctl.c | |||
@@ -457,6 +457,10 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi, | |||
457 | layer. the packet must be complete, as we do not | 457 | layer. the packet must be complete, as we do not |
458 | touch it at all. */ | 458 | touch it at all. */ |
459 | ide_cd_init_rq(drive, &req); | 459 | ide_cd_init_rq(drive, &req); |
460 | |||
461 | if (cgc->data_direction == CGC_DATA_WRITE) | ||
462 | req.cmd_flags |= REQ_RW; | ||
463 | |||
460 | memcpy(req.cmd, cgc->cmd, CDROM_PACKET_SIZE); | 464 | memcpy(req.cmd, cgc->cmd, CDROM_PACKET_SIZE); |
461 | if (cgc->sense) | 465 | if (cgc->sense) |
462 | memset(cgc->sense, 0, sizeof(struct request_sense)); | 466 | memset(cgc->sense, 0, sizeof(struct request_sense)); |
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 8f5bed471050..39501d130256 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c | |||
@@ -867,7 +867,7 @@ static void idedisk_setup (ide_drive_t *drive) | |||
867 | 867 | ||
868 | /* Only print cache size when it was specified */ | 868 | /* Only print cache size when it was specified */ |
869 | if (id->buf_size) | 869 | if (id->buf_size) |
870 | printk (" w/%dKiB Cache", id->buf_size/2); | 870 | printk(KERN_CONT " w/%dKiB Cache", id->buf_size / 2); |
871 | 871 | ||
872 | printk(KERN_CONT ", CHS=%d/%d/%d\n", | 872 | printk(KERN_CONT ", CHS=%d/%d/%d\n", |
873 | drive->bios_cyl, drive->bios_head, drive->bios_sect); | 873 | drive->bios_cyl, drive->bios_head, drive->bios_sect); |
@@ -949,7 +949,8 @@ static void ide_device_shutdown(ide_drive_t *drive) | |||
949 | return; | 949 | return; |
950 | } | 950 | } |
951 | 951 | ||
952 | printk("Shutdown: %s\n", drive->name); | 952 | printk(KERN_INFO "Shutdown: %s\n", drive->name); |
953 | |||
953 | drive->gendev.bus->suspend(&drive->gendev, PMSG_SUSPEND); | 954 | drive->gendev.bus->suspend(&drive->gendev, PMSG_SUSPEND); |
954 | } | 955 | } |
955 | 956 | ||
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index d0e7b537353e..d61e5788d310 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c | |||
@@ -1,9 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * IDE DMA support (including IDE PCI BM-DMA). | ||
3 | * | ||
2 | * Copyright (C) 1995-1998 Mark Lord | 4 | * Copyright (C) 1995-1998 Mark Lord |
3 | * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> | 5 | * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> |
4 | * Copyright (C) 2004, 2007 Bartlomiej Zolnierkiewicz | 6 | * Copyright (C) 2004, 2007 Bartlomiej Zolnierkiewicz |
5 | * | 7 | * |
6 | * May be copied or modified under the terms of the GNU General Public License | 8 | * May be copied or modified under the terms of the GNU General Public License |
9 | * | ||
10 | * DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies). | ||
7 | */ | 11 | */ |
8 | 12 | ||
9 | /* | 13 | /* |
@@ -11,49 +15,6 @@ | |||
11 | */ | 15 | */ |
12 | 16 | ||
13 | /* | 17 | /* |
14 | * This module provides support for the bus-master IDE DMA functions | ||
15 | * of various PCI chipsets, including the Intel PIIX (i82371FB for | ||
16 | * the 430 FX chipset), the PIIX3 (i82371SB for the 430 HX/VX and | ||
17 | * 440 chipsets), and the PIIX4 (i82371AB for the 430 TX chipset) | ||
18 | * ("PIIX" stands for "PCI ISA IDE Xcellerator"). | ||
19 | * | ||
20 | * Pretty much the same code works for other IDE PCI bus-mastering chipsets. | ||
21 | * | ||
22 | * DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies). | ||
23 | * | ||
24 | * By default, DMA support is prepared for use, but is currently enabled only | ||
25 | * for drives which already have DMA enabled (UltraDMA or mode 2 multi/single), | ||
26 | * or which are recognized as "good" (see table below). Drives with only mode0 | ||
27 | * or mode1 (multi/single) DMA should also work with this chipset/driver | ||
28 | * (eg. MC2112A) but are not enabled by default. | ||
29 | * | ||
30 | * Use "hdparm -i" to view modes supported by a given drive. | ||
31 | * | ||
32 | * The hdparm-3.5 (or later) utility can be used for manually enabling/disabling | ||
33 | * DMA support, but must be (re-)compiled against this kernel version or later. | ||
34 | * | ||
35 | * To enable DMA, use "hdparm -d1 /dev/hd?" on a per-drive basis after booting. | ||
36 | * If problems arise, ide.c will disable DMA operation after a few retries. | ||
37 | * This error recovery mechanism works and has been extremely well exercised. | ||
38 | * | ||
39 | * IDE drives, depending on their vintage, may support several different modes | ||
40 | * of DMA operation. The boot-time modes are indicated with a "*" in | ||
41 | * the "hdparm -i" listing, and can be changed with *knowledgeable* use of | ||
42 | * the "hdparm -X" feature. There is seldom a need to do this, as drives | ||
43 | * normally power-up with their "best" PIO/DMA modes enabled. | ||
44 | * | ||
45 | * Testing has been done with a rather extensive number of drives, | ||
46 | * with Quantum & Western Digital models generally outperforming the pack, | ||
47 | * and Fujitsu & Conner (and some Seagate which are really Conner) drives | ||
48 | * showing more lackluster throughput. | ||
49 | * | ||
50 | * Keep an eye on /var/adm/messages for "DMA disabled" messages. | ||
51 | * | ||
52 | * Some people have reported trouble with Intel Zappa motherboards. | ||
53 | * This can be fixed by upgrading the AMI BIOS to version 1.00.04.BS0, | ||
54 | * available from ftp://ftp.intel.com/pub/bios/10004bs0.exe | ||
55 | * (thanks to Glen Morrell <glen@spin.Stanford.edu> for researching this). | ||
56 | * | ||
57 | * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for | 18 | * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for |
58 | * fixing the problem with the BIOS on some Acer motherboards. | 19 | * fixing the problem with the BIOS on some Acer motherboards. |
59 | * | 20 | * |
@@ -65,11 +26,6 @@ | |||
65 | * | 26 | * |
66 | * Most importantly, thanks to Robert Bringman <rob@mars.trion.com> | 27 | * Most importantly, thanks to Robert Bringman <rob@mars.trion.com> |
67 | * for supplying a Promise UDMA board & WD UDMA drive for this work! | 28 | * for supplying a Promise UDMA board & WD UDMA drive for this work! |
68 | * | ||
69 | * And, yes, Intel Zappa boards really *do* use both PIIX IDE ports. | ||
70 | * | ||
71 | * ATA-66/100 and recovery functions, I forgot the rest...... | ||
72 | * | ||
73 | */ | 29 | */ |
74 | 30 | ||
75 | #include <linux/module.h> | 31 | #include <linux/module.h> |
@@ -757,7 +713,7 @@ static int ide_tune_dma(ide_drive_t *drive) | |||
757 | } | 713 | } |
758 | 714 | ||
759 | if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE) | 715 | if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE) |
760 | return 0; | 716 | return 1; |
761 | 717 | ||
762 | if (ide_set_dma_mode(drive, speed)) | 718 | if (ide_set_dma_mode(drive, speed)) |
763 | return 0; | 719 | return 0; |
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 4a2cb2868226..194ecb0049eb 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c | |||
@@ -756,7 +756,8 @@ static int ide_probe_port(ide_hwif_t *hwif) | |||
756 | 756 | ||
757 | BUG_ON(hwif->present); | 757 | BUG_ON(hwif->present); |
758 | 758 | ||
759 | if (hwif->noprobe) | 759 | if (hwif->noprobe || |
760 | (hwif->drives[0].noprobe && hwif->drives[1].noprobe)) | ||
760 | return -EACCES; | 761 | return -EACCES; |
761 | 762 | ||
762 | /* | 763 | /* |
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 0598ecfd5f37..43e0e0557776 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c | |||
@@ -3765,6 +3765,11 @@ static int ide_tape_probe(ide_drive_t *drive) | |||
3765 | g->fops = &idetape_block_ops; | 3765 | g->fops = &idetape_block_ops; |
3766 | ide_register_region(g); | 3766 | ide_register_region(g); |
3767 | 3767 | ||
3768 | printk(KERN_WARNING "It is possible that this driver does not have any" | ||
3769 | " users anymore and, as a result, it will be REMOVED soon." | ||
3770 | " Please notify Bart <bzolnier@gmail.com> or Boris" | ||
3771 | " <petkovbb@gmail.com> in case you still need it.\n"); | ||
3772 | |||
3768 | return 0; | 3773 | return 0; |
3769 | 3774 | ||
3770 | out_free_tape: | 3775 | out_free_tape: |
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c index 477833f0daf5..9976f9d627d4 100644 --- a/drivers/ide/ide.c +++ b/drivers/ide/ide.c | |||
@@ -590,11 +590,6 @@ void ide_unregister(unsigned int index, int init_default, int restore) | |||
590 | hwif->extra_ports = 0; | 590 | hwif->extra_ports = 0; |
591 | } | 591 | } |
592 | 592 | ||
593 | /* | ||
594 | * Note that we only release the standard ports, | ||
595 | * and do not even try to handle any extra ports | ||
596 | * allocated for weird IDE interface chipsets. | ||
597 | */ | ||
598 | ide_hwif_release_regions(hwif); | 593 | ide_hwif_release_regions(hwif); |
599 | 594 | ||
600 | /* copy original settings */ | 595 | /* copy original settings */ |
@@ -672,7 +667,6 @@ int ide_register_hw(hw_regs_t *hw, void (*quirkproc)(ide_drive_t *), | |||
672 | 667 | ||
673 | do { | 668 | do { |
674 | hwif = ide_deprecated_find_port(hw->io_ports[IDE_DATA_OFFSET]); | 669 | hwif = ide_deprecated_find_port(hw->io_ports[IDE_DATA_OFFSET]); |
675 | index = hwif->index; | ||
676 | if (hwif) | 670 | if (hwif) |
677 | goto found; | 671 | goto found; |
678 | for (index = 0; index < MAX_HWIFS; index++) | 672 | for (index = 0; index < MAX_HWIFS; index++) |
@@ -680,6 +674,7 @@ int ide_register_hw(hw_regs_t *hw, void (*quirkproc)(ide_drive_t *), | |||
680 | } while (retry--); | 674 | } while (retry--); |
681 | return -1; | 675 | return -1; |
682 | found: | 676 | found: |
677 | index = hwif->index; | ||
683 | if (hwif->present) | 678 | if (hwif->present) |
684 | ide_unregister(index, 0, 1); | 679 | ide_unregister(index, 0, 1); |
685 | else if (!hwif->hold) | 680 | else if (!hwif->hold) |
@@ -1036,10 +1031,9 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device | |||
1036 | drive->nice1 = (arg >> IDE_NICE_1) & 1; | 1031 | drive->nice1 = (arg >> IDE_NICE_1) & 1; |
1037 | return 0; | 1032 | return 0; |
1038 | case HDIO_DRIVE_RESET: | 1033 | case HDIO_DRIVE_RESET: |
1039 | { | 1034 | if (!capable(CAP_SYS_ADMIN)) |
1040 | unsigned long flags; | 1035 | return -EACCES; |
1041 | if (!capable(CAP_SYS_ADMIN)) return -EACCES; | 1036 | |
1042 | |||
1043 | /* | 1037 | /* |
1044 | * Abort the current command on the | 1038 | * Abort the current command on the |
1045 | * group if there is one, taking | 1039 | * group if there is one, taking |
@@ -1058,17 +1052,15 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device | |||
1058 | ide_abort(drive, "drive reset"); | 1052 | ide_abort(drive, "drive reset"); |
1059 | 1053 | ||
1060 | BUG_ON(HWGROUP(drive)->handler); | 1054 | BUG_ON(HWGROUP(drive)->handler); |
1061 | 1055 | ||
1062 | /* Ensure nothing gets queued after we | 1056 | /* Ensure nothing gets queued after we |
1063 | drop the lock. Reset will clear the busy */ | 1057 | drop the lock. Reset will clear the busy */ |
1064 | 1058 | ||
1065 | HWGROUP(drive)->busy = 1; | 1059 | HWGROUP(drive)->busy = 1; |
1066 | spin_unlock_irqrestore(&ide_lock, flags); | 1060 | spin_unlock_irqrestore(&ide_lock, flags); |
1067 | (void) ide_do_reset(drive); | 1061 | (void) ide_do_reset(drive); |
1068 | 1062 | ||
1069 | return 0; | 1063 | return 0; |
1070 | } | ||
1071 | |||
1072 | case HDIO_GET_BUSSTATE: | 1064 | case HDIO_GET_BUSSTATE: |
1073 | if (!capable(CAP_SYS_ADMIN)) | 1065 | if (!capable(CAP_SYS_ADMIN)) |
1074 | return -EACCES; | 1066 | return -EACCES; |
@@ -1188,7 +1180,7 @@ static int __initdata is_chipset_set[MAX_HWIFS]; | |||
1188 | * ide_setup() gets called VERY EARLY during initialization, | 1180 | * ide_setup() gets called VERY EARLY during initialization, |
1189 | * to handle kernel "command line" strings beginning with "hdx=" or "ide". | 1181 | * to handle kernel "command line" strings beginning with "hdx=" or "ide". |
1190 | * | 1182 | * |
1191 | * Remember to update Documentation/ide.txt if you change something here. | 1183 | * Remember to update Documentation/ide/ide.txt if you change something here. |
1192 | */ | 1184 | */ |
1193 | static int __init ide_setup(char *s) | 1185 | static int __init ide_setup(char *s) |
1194 | { | 1186 | { |
@@ -1449,7 +1441,7 @@ static int __init ide_setup(char *s) | |||
1449 | 1441 | ||
1450 | case -1: /* "noprobe" */ | 1442 | case -1: /* "noprobe" */ |
1451 | hwif->noprobe = 1; | 1443 | hwif->noprobe = 1; |
1452 | goto done; | 1444 | goto obsolete_option; |
1453 | 1445 | ||
1454 | case 1: /* base */ | 1446 | case 1: /* base */ |
1455 | vals[1] = vals[0] + 0x206; /* default ctl */ | 1447 | vals[1] = vals[0] + 0x206; /* default ctl */ |
diff --git a/drivers/ide/legacy/qd65xx.c b/drivers/ide/legacy/qd65xx.c index bba29df5f21d..2f4f47ad602f 100644 --- a/drivers/ide/legacy/qd65xx.c +++ b/drivers/ide/legacy/qd65xx.c | |||
@@ -334,43 +334,6 @@ static void __init qd6580_port_init_devs(ide_hwif_t *hwif) | |||
334 | hwif->drives[1].drive_data = t2; | 334 | hwif->drives[1].drive_data = t2; |
335 | } | 335 | } |
336 | 336 | ||
337 | /* | ||
338 | * qd_unsetup: | ||
339 | * | ||
340 | * called to unsetup an ata channel : back to default values, unlinks tuning | ||
341 | */ | ||
342 | /* | ||
343 | static void __exit qd_unsetup(ide_hwif_t *hwif) | ||
344 | { | ||
345 | u8 config = hwif->config_data; | ||
346 | int base = hwif->select_data; | ||
347 | void *set_pio_mode = (void *)hwif->set_pio_mode; | ||
348 | |||
349 | if (hwif->chipset != ide_qd65xx) | ||
350 | return; | ||
351 | |||
352 | printk(KERN_NOTICE "%s: back to defaults\n", hwif->name); | ||
353 | |||
354 | hwif->selectproc = NULL; | ||
355 | hwif->set_pio_mode = NULL; | ||
356 | |||
357 | if (set_pio_mode == (void *)qd6500_set_pio_mode) { | ||
358 | // will do it for both | ||
359 | outb(QD6500_DEF_DATA, QD_TIMREG(&hwif->drives[0])); | ||
360 | } else if (set_pio_mode == (void *)qd6580_set_pio_mode) { | ||
361 | if (QD_CONTROL(hwif) & QD_CONTR_SEC_DISABLED) { | ||
362 | outb(QD6580_DEF_DATA, QD_TIMREG(&hwif->drives[0])); | ||
363 | outb(QD6580_DEF_DATA2, QD_TIMREG(&hwif->drives[1])); | ||
364 | } else { | ||
365 | outb(hwif->channel ? QD6580_DEF_DATA2 : QD6580_DEF_DATA, QD_TIMREG(&hwif->drives[0])); | ||
366 | } | ||
367 | } else { | ||
368 | printk(KERN_WARNING "Unknown qd65xx tuning fonction !\n"); | ||
369 | printk(KERN_WARNING "keeping settings !\n"); | ||
370 | } | ||
371 | } | ||
372 | */ | ||
373 | |||
374 | static const struct ide_port_info qd65xx_port_info __initdata = { | 337 | static const struct ide_port_info qd65xx_port_info __initdata = { |
375 | .chipset = ide_qd65xx, | 338 | .chipset = ide_qd65xx, |
376 | .host_flags = IDE_HFLAG_IO_32BIT | | 339 | .host_flags = IDE_HFLAG_IO_32BIT | |
@@ -444,6 +407,8 @@ static int __init qd_probe(int base) | |||
444 | printk(KERN_DEBUG "qd6580: config=%#x, control=%#x, ID3=%u\n", | 407 | printk(KERN_DEBUG "qd6580: config=%#x, control=%#x, ID3=%u\n", |
445 | config, control, QD_ID3); | 408 | config, control, QD_ID3); |
446 | 409 | ||
410 | outb(QD_DEF_CONTR, QD_CONTROL_PORT); | ||
411 | |||
447 | if (control & QD_CONTR_SEC_DISABLED) { | 412 | if (control & QD_CONTR_SEC_DISABLED) { |
448 | /* secondary disabled */ | 413 | /* secondary disabled */ |
449 | 414 | ||
@@ -460,8 +425,6 @@ static int __init qd_probe(int base) | |||
460 | 425 | ||
461 | ide_device_add(idx, &qd65xx_port_info); | 426 | ide_device_add(idx, &qd65xx_port_info); |
462 | 427 | ||
463 | outb(QD_DEF_CONTR, QD_CONTROL_PORT); | ||
464 | |||
465 | return 1; | 428 | return 1; |
466 | } else { | 429 | } else { |
467 | ide_hwif_t *mate; | 430 | ide_hwif_t *mate; |
@@ -487,8 +450,6 @@ static int __init qd_probe(int base) | |||
487 | 450 | ||
488 | ide_device_add(idx, &qd65xx_port_info); | 451 | ide_device_add(idx, &qd65xx_port_info); |
489 | 452 | ||
490 | outb(QD_DEF_CONTR, QD_CONTROL_PORT); | ||
491 | |||
492 | return 0; /* no other qd65xx possible */ | 453 | return 0; /* no other qd65xx possible */ |
493 | } | 454 | } |
494 | } | 455 | } |
diff --git a/drivers/ide/pci/cmd640.c b/drivers/ide/pci/cmd640.c index bd24dad3cfc6..ec667982809c 100644 --- a/drivers/ide/pci/cmd640.c +++ b/drivers/ide/pci/cmd640.c | |||
@@ -787,7 +787,8 @@ static int __init cmd640x_init(void) | |||
787 | /* | 787 | /* |
788 | * Try to enable the secondary interface, if not already enabled | 788 | * Try to enable the secondary interface, if not already enabled |
789 | */ | 789 | */ |
790 | if (cmd_hwif1->noprobe) { | 790 | if (cmd_hwif1->noprobe || |
791 | (cmd_hwif1->drives[0].noprobe && cmd_hwif1->drives[1].noprobe)) { | ||
791 | port2 = "not probed"; | 792 | port2 = "not probed"; |
792 | } else { | 793 | } else { |
793 | b = get_cmd640_reg(CNTRL); | 794 | b = get_cmd640_reg(CNTRL); |
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c index d0f7bb8b8adf..6357bb6269ab 100644 --- a/drivers/ide/pci/hpt366.c +++ b/drivers/ide/pci/hpt366.c | |||
@@ -1570,10 +1570,12 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic | |||
1570 | if (rev < 3) | 1570 | if (rev < 3) |
1571 | info = &hpt36x; | 1571 | info = &hpt36x; |
1572 | else { | 1572 | else { |
1573 | static const struct hpt_info *hpt37x_info[] = | 1573 | switch (min_t(u8, rev, 6)) { |
1574 | { &hpt370, &hpt370a, &hpt372, &hpt372n }; | 1574 | case 3: info = &hpt370; break; |
1575 | 1575 | case 4: info = &hpt370a; break; | |
1576 | info = hpt37x_info[min_t(u8, rev, 6) - 3]; | 1576 | case 5: info = &hpt372; break; |
1577 | case 6: info = &hpt372n; break; | ||
1578 | } | ||
1577 | idx++; | 1579 | idx++; |
1578 | } | 1580 | } |
1579 | break; | 1581 | break; |
@@ -1626,7 +1628,7 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic | |||
1626 | return ide_setup_pci_device(dev, &d); | 1628 | return ide_setup_pci_device(dev, &d); |
1627 | } | 1629 | } |
1628 | 1630 | ||
1629 | static const struct pci_device_id hpt366_pci_tbl[] = { | 1631 | static const struct pci_device_id hpt366_pci_tbl[] __devinitconst = { |
1630 | { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), 0 }, | 1632 | { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), 0 }, |
1631 | { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), 1 }, | 1633 | { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), 1 }, |
1632 | { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), 2 }, | 1634 | { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), 2 }, |
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index 28e155a9e2a5..9e2b1964d71a 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c | |||
@@ -183,6 +183,9 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device " | |||
183 | * Avoids access beyond actual disk limits on devices with an off-by-one bug. | 183 | * Avoids access beyond actual disk limits on devices with an off-by-one bug. |
184 | * Don't use this with devices which don't have this bug. | 184 | * Don't use this with devices which don't have this bug. |
185 | * | 185 | * |
186 | * - delay inquiry | ||
187 | * Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry. | ||
188 | * | ||
186 | * - override internal blacklist | 189 | * - override internal blacklist |
187 | * Instead of adding to the built-in blacklist, use only the workarounds | 190 | * Instead of adding to the built-in blacklist, use only the workarounds |
188 | * specified in the module load parameter. | 191 | * specified in the module load parameter. |
@@ -195,6 +198,7 @@ MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0" | |||
195 | ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) | 198 | ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) |
196 | ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) | 199 | ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) |
197 | ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) | 200 | ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) |
201 | ", delay inquiry = " __stringify(SBP2_WORKAROUND_DELAY_INQUIRY) | ||
198 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) | 202 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) |
199 | ", or a combination)"); | 203 | ", or a combination)"); |
200 | 204 | ||
@@ -357,6 +361,11 @@ static const struct { | |||
357 | .workarounds = SBP2_WORKAROUND_INQUIRY_36 | | 361 | .workarounds = SBP2_WORKAROUND_INQUIRY_36 | |
358 | SBP2_WORKAROUND_MODE_SENSE_8, | 362 | SBP2_WORKAROUND_MODE_SENSE_8, |
359 | }, | 363 | }, |
364 | /* DViCO Momobay FX-3A with TSB42AA9A bridge */ { | ||
365 | .firmware_revision = 0x002800, | ||
366 | .model_id = 0x000000, | ||
367 | .workarounds = SBP2_WORKAROUND_DELAY_INQUIRY, | ||
368 | }, | ||
360 | /* Initio bridges, actually only needed for some older ones */ { | 369 | /* Initio bridges, actually only needed for some older ones */ { |
361 | .firmware_revision = 0x000200, | 370 | .firmware_revision = 0x000200, |
362 | .model_id = SBP2_ROM_VALUE_WILDCARD, | 371 | .model_id = SBP2_ROM_VALUE_WILDCARD, |
@@ -914,6 +923,9 @@ static int sbp2_start_device(struct sbp2_lu *lu) | |||
914 | sbp2_agent_reset(lu, 1); | 923 | sbp2_agent_reset(lu, 1); |
915 | sbp2_max_speed_and_size(lu); | 924 | sbp2_max_speed_and_size(lu); |
916 | 925 | ||
926 | if (lu->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY) | ||
927 | ssleep(SBP2_INQUIRY_DELAY); | ||
928 | |||
917 | error = scsi_add_device(lu->shost, 0, lu->ud->id, 0); | 929 | error = scsi_add_device(lu->shost, 0, lu->ud->id, 0); |
918 | if (error) { | 930 | if (error) { |
919 | SBP2_ERR("scsi_add_device failed"); | 931 | SBP2_ERR("scsi_add_device failed"); |
@@ -1962,6 +1974,9 @@ static int sbp2scsi_slave_alloc(struct scsi_device *sdev) | |||
1962 | { | 1974 | { |
1963 | struct sbp2_lu *lu = (struct sbp2_lu *)sdev->host->hostdata[0]; | 1975 | struct sbp2_lu *lu = (struct sbp2_lu *)sdev->host->hostdata[0]; |
1964 | 1976 | ||
1977 | if (sdev->lun != 0 || sdev->id != lu->ud->id || sdev->channel != 0) | ||
1978 | return -ENODEV; | ||
1979 | |||
1965 | lu->sdev = sdev; | 1980 | lu->sdev = sdev; |
1966 | sdev->allow_restart = 1; | 1981 | sdev->allow_restart = 1; |
1967 | 1982 | ||
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h index d2ecb0d8a1bb..80d8e097b065 100644 --- a/drivers/ieee1394/sbp2.h +++ b/drivers/ieee1394/sbp2.h | |||
@@ -343,6 +343,8 @@ enum sbp2lu_state_types { | |||
343 | #define SBP2_WORKAROUND_INQUIRY_36 0x2 | 343 | #define SBP2_WORKAROUND_INQUIRY_36 0x2 |
344 | #define SBP2_WORKAROUND_MODE_SENSE_8 0x4 | 344 | #define SBP2_WORKAROUND_MODE_SENSE_8 0x4 |
345 | #define SBP2_WORKAROUND_FIX_CAPACITY 0x8 | 345 | #define SBP2_WORKAROUND_FIX_CAPACITY 0x8 |
346 | #define SBP2_WORKAROUND_DELAY_INQUIRY 0x10 | ||
347 | #define SBP2_INQUIRY_DELAY 12 | ||
346 | #define SBP2_WORKAROUND_OVERRIDE 0x100 | 348 | #define SBP2_WORKAROUND_OVERRIDE 0x100 |
347 | 349 | ||
348 | #endif /* SBP2_H */ | 350 | #endif /* SBP2_H */ |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c index 73bfd1656f86..b8797c66676d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_mem.c +++ b/drivers/infiniband/hw/cxgb3/iwch_mem.c | |||
@@ -136,14 +136,8 @@ int build_phys_page_list(struct ib_phys_buf *buffer_list, | |||
136 | 136 | ||
137 | /* Find largest page shift we can use to cover buffers */ | 137 | /* Find largest page shift we can use to cover buffers */ |
138 | for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift)) | 138 | for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift)) |
139 | if (num_phys_buf > 1) { | 139 | if ((1ULL << *shift) & mask) |
140 | if ((1ULL << *shift) & mask) | 140 | break; |
141 | break; | ||
142 | } else | ||
143 | if (1ULL << *shift >= | ||
144 | buffer_list[0].size + | ||
145 | (buffer_list[0].addr & ((1ULL << *shift) - 1))) | ||
146 | break; | ||
147 | 141 | ||
148 | buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1); | 142 | buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1); |
149 | buffer_list[0].addr &= ~0ull << *shift; | 143 | buffer_list[0].addr &= ~0ull << *shift; |
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 7f8853b44ee1..b2112f5a422f 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c | |||
@@ -567,12 +567,12 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i | |||
567 | 567 | ||
568 | /* Init the adapter */ | 568 | /* Init the adapter */ |
569 | nesdev->nesadapter = nes_init_adapter(nesdev, hw_rev); | 569 | nesdev->nesadapter = nes_init_adapter(nesdev, hw_rev); |
570 | nesdev->nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval; | ||
571 | if (!nesdev->nesadapter) { | 570 | if (!nesdev->nesadapter) { |
572 | printk(KERN_ERR PFX "Unable to initialize adapter.\n"); | 571 | printk(KERN_ERR PFX "Unable to initialize adapter.\n"); |
573 | ret = -ENOMEM; | 572 | ret = -ENOMEM; |
574 | goto bail5; | 573 | goto bail5; |
575 | } | 574 | } |
575 | nesdev->nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval; | ||
576 | 576 | ||
577 | /* nesdev->base_doorbell_index = | 577 | /* nesdev->base_doorbell_index = |
578 | nesdev->nesadapter->pd_config_base[PCI_FUNC(nesdev->pcidev->devfn)]; */ | 578 | nesdev->nesadapter->pd_config_base[PCI_FUNC(nesdev->pcidev->devfn)]; */ |
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h index fd57e8a1582f..a48b288618ec 100644 --- a/drivers/infiniband/hw/nes/nes.h +++ b/drivers/infiniband/hw/nes/nes.h | |||
@@ -285,6 +285,21 @@ struct nes_device { | |||
285 | }; | 285 | }; |
286 | 286 | ||
287 | 287 | ||
288 | static inline __le32 get_crc_value(struct nes_v4_quad *nes_quad) | ||
289 | { | ||
290 | u32 crc_value; | ||
291 | crc_value = crc32c(~0, (void *)nes_quad, sizeof (struct nes_v4_quad)); | ||
292 | |||
293 | /* | ||
294 | * With commit ef19454b ("[LIB] crc32c: Keep intermediate crc | ||
295 | * state in cpu order"), behavior of crc32c changes on | ||
296 | * big-endian platforms. Our algorithm expects the previous | ||
297 | * behavior; otherwise we have RDMA connection establishment | ||
298 | * issue on big-endian. | ||
299 | */ | ||
300 | return cpu_to_le32(crc_value); | ||
301 | } | ||
302 | |||
288 | static inline void | 303 | static inline void |
289 | set_wqe_64bit_value(__le32 *wqe_words, u32 index, u64 value) | 304 | set_wqe_64bit_value(__le32 *wqe_words, u32 index, u64 value) |
290 | { | 305 | { |
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index bd5cfeaac203..39adb267fb15 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
@@ -370,11 +370,11 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
370 | int ret = 0; | 370 | int ret = 0; |
371 | u32 was_timer_set; | 371 | u32 was_timer_set; |
372 | 372 | ||
373 | if (!cm_node) | ||
374 | return -EINVAL; | ||
373 | new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC); | 375 | new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC); |
374 | if (!new_send) | 376 | if (!new_send) |
375 | return -1; | 377 | return -1; |
376 | if (!cm_node) | ||
377 | return -EINVAL; | ||
378 | 378 | ||
379 | /* new_send->timetosend = currenttime */ | 379 | /* new_send->timetosend = currenttime */ |
380 | new_send->retrycount = NES_DEFAULT_RETRYS; | 380 | new_send->retrycount = NES_DEFAULT_RETRYS; |
@@ -947,6 +947,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, | |||
947 | nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener); | 947 | nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener); |
948 | 948 | ||
949 | kfree(listener); | 949 | kfree(listener); |
950 | listener = NULL; | ||
950 | ret = 0; | 951 | ret = 0; |
951 | cm_listens_destroyed++; | 952 | cm_listens_destroyed++; |
952 | } else { | 953 | } else { |
@@ -2319,6 +2320,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2319 | struct iw_cm_event cm_event; | 2320 | struct iw_cm_event cm_event; |
2320 | struct nes_hw_qp_wqe *wqe; | 2321 | struct nes_hw_qp_wqe *wqe; |
2321 | struct nes_v4_quad nes_quad; | 2322 | struct nes_v4_quad nes_quad; |
2323 | u32 crc_value; | ||
2322 | int ret; | 2324 | int ret; |
2323 | 2325 | ||
2324 | ibqp = nes_get_qp(cm_id->device, conn_param->qpn); | 2326 | ibqp = nes_get_qp(cm_id->device, conn_param->qpn); |
@@ -2435,8 +2437,8 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
2435 | nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port; | 2437 | nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port; |
2436 | 2438 | ||
2437 | /* Produce hash key */ | 2439 | /* Produce hash key */ |
2438 | nesqp->hte_index = cpu_to_be32( | 2440 | crc_value = get_crc_value(&nes_quad); |
2439 | crc32c(~0, (void *)&nes_quad, sizeof(nes_quad)) ^ 0xffffffff); | 2441 | nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff); |
2440 | nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, CRC = 0x%08X\n", | 2442 | nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, CRC = 0x%08X\n", |
2441 | nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask); | 2443 | nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask); |
2442 | 2444 | ||
@@ -2750,6 +2752,7 @@ void cm_event_connected(struct nes_cm_event *event) | |||
2750 | struct iw_cm_event cm_event; | 2752 | struct iw_cm_event cm_event; |
2751 | struct nes_hw_qp_wqe *wqe; | 2753 | struct nes_hw_qp_wqe *wqe; |
2752 | struct nes_v4_quad nes_quad; | 2754 | struct nes_v4_quad nes_quad; |
2755 | u32 crc_value; | ||
2753 | int ret; | 2756 | int ret; |
2754 | 2757 | ||
2755 | /* get all our handles */ | 2758 | /* get all our handles */ |
@@ -2827,8 +2830,8 @@ void cm_event_connected(struct nes_cm_event *event) | |||
2827 | nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port; | 2830 | nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port; |
2828 | 2831 | ||
2829 | /* Produce hash key */ | 2832 | /* Produce hash key */ |
2830 | nesqp->hte_index = cpu_to_be32( | 2833 | crc_value = get_crc_value(&nes_quad); |
2831 | crc32c(~0, (void *)&nes_quad, sizeof(nes_quad)) ^ 0xffffffff); | 2834 | nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff); |
2832 | nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, After CRC = 0x%08X\n", | 2835 | nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, After CRC = 0x%08X\n", |
2833 | nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask); | 2836 | nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask); |
2834 | 2837 | ||
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 7c4c0fbf0abd..49e53e4c1ebe 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c | |||
@@ -156,15 +156,14 @@ static void nes_nic_tune_timer(struct nes_device *nesdev) | |||
156 | 156 | ||
157 | spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags); | 157 | spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags); |
158 | 158 | ||
159 | if (shared_timer->cq_count_old < cq_count) { | 159 | if (shared_timer->cq_count_old <= cq_count) |
160 | if (cq_count > shared_timer->threshold_low) | 160 | shared_timer->cq_direction_downward = 0; |
161 | shared_timer->cq_direction_downward=0; | 161 | else |
162 | } | ||
163 | if (shared_timer->cq_count_old >= cq_count) | ||
164 | shared_timer->cq_direction_downward++; | 162 | shared_timer->cq_direction_downward++; |
165 | shared_timer->cq_count_old = cq_count; | 163 | shared_timer->cq_count_old = cq_count; |
166 | if (shared_timer->cq_direction_downward > NES_NIC_CQ_DOWNWARD_TREND) { | 164 | if (shared_timer->cq_direction_downward > NES_NIC_CQ_DOWNWARD_TREND) { |
167 | if (cq_count <= shared_timer->threshold_low) { | 165 | if (cq_count <= shared_timer->threshold_low && |
166 | shared_timer->threshold_low > 4) { | ||
168 | shared_timer->threshold_low = shared_timer->threshold_low/2; | 167 | shared_timer->threshold_low = shared_timer->threshold_low/2; |
169 | shared_timer->cq_direction_downward=0; | 168 | shared_timer->cq_direction_downward=0; |
170 | nesdev->currcq_count = 0; | 169 | nesdev->currcq_count = 0; |
@@ -1728,7 +1727,6 @@ int nes_napi_isr(struct nes_device *nesdev) | |||
1728 | nesdev->int_req &= ~NES_INT_TIMER; | 1727 | nesdev->int_req &= ~NES_INT_TIMER; |
1729 | nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); | 1728 | nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); |
1730 | nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); | 1729 | nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); |
1731 | nesadapter->tune_timer.timer_in_use_old = 0; | ||
1732 | } | 1730 | } |
1733 | nesdev->deepcq_count = 0; | 1731 | nesdev->deepcq_count = 0; |
1734 | return 1; | 1732 | return 1; |
@@ -1867,7 +1865,6 @@ void nes_dpc(unsigned long param) | |||
1867 | nesdev->int_req &= ~NES_INT_TIMER; | 1865 | nesdev->int_req &= ~NES_INT_TIMER; |
1868 | nes_write32(nesdev->regs + NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); | 1866 | nes_write32(nesdev->regs + NES_INTF_INT_MASK, ~(nesdev->intf_int_req)); |
1869 | nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); | 1867 | nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req); |
1870 | nesdev->nesadapter->tune_timer.timer_in_use_old = 0; | ||
1871 | } else { | 1868 | } else { |
1872 | nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff|(~nesdev->int_req)); | 1869 | nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff|(~nesdev->int_req)); |
1873 | } | 1870 | } |
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h index 1e10df550c9e..b7e2844f096b 100644 --- a/drivers/infiniband/hw/nes/nes_hw.h +++ b/drivers/infiniband/hw/nes/nes_hw.h | |||
@@ -962,7 +962,7 @@ struct nes_arp_entry { | |||
962 | #define DEFAULT_JUMBO_NES_QL_LOW 12 | 962 | #define DEFAULT_JUMBO_NES_QL_LOW 12 |
963 | #define DEFAULT_JUMBO_NES_QL_TARGET 40 | 963 | #define DEFAULT_JUMBO_NES_QL_TARGET 40 |
964 | #define DEFAULT_JUMBO_NES_QL_HIGH 128 | 964 | #define DEFAULT_JUMBO_NES_QL_HIGH 128 |
965 | #define NES_NIC_CQ_DOWNWARD_TREND 8 | 965 | #define NES_NIC_CQ_DOWNWARD_TREND 16 |
966 | 966 | ||
967 | struct nes_hw_tune_timer { | 967 | struct nes_hw_tune_timer { |
968 | //u16 cq_count; | 968 | //u16 cq_count; |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 4dafbe16e82a..a651e9d9f0ef 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
@@ -929,7 +929,7 @@ static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev, | |||
929 | NES_MAX_USER_DB_REGIONS, nesucontext->first_free_db); | 929 | NES_MAX_USER_DB_REGIONS, nesucontext->first_free_db); |
930 | nes_debug(NES_DBG_PD, "find_first_zero_biton doorbells returned %u, mapping pd_id %u.\n", | 930 | nes_debug(NES_DBG_PD, "find_first_zero_biton doorbells returned %u, mapping pd_id %u.\n", |
931 | nespd->mmap_db_index, nespd->pd_id); | 931 | nespd->mmap_db_index, nespd->pd_id); |
932 | if (nespd->mmap_db_index > NES_MAX_USER_DB_REGIONS) { | 932 | if (nespd->mmap_db_index >= NES_MAX_USER_DB_REGIONS) { |
933 | nes_debug(NES_DBG_PD, "mmap_db_index > MAX\n"); | 933 | nes_debug(NES_DBG_PD, "mmap_db_index > MAX\n"); |
934 | nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num); | 934 | nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num); |
935 | kfree(nespd); | 935 | kfree(nespd); |
@@ -1327,7 +1327,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd, | |||
1327 | (long long unsigned int)req.user_wqe_buffers); | 1327 | (long long unsigned int)req.user_wqe_buffers); |
1328 | nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num); | 1328 | nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num); |
1329 | kfree(nesqp->allocated_buffer); | 1329 | kfree(nesqp->allocated_buffer); |
1330 | return ERR_PTR(-ENOMEM); | 1330 | return ERR_PTR(-EFAULT); |
1331 | } | 1331 | } |
1332 | } | 1332 | } |
1333 | 1333 | ||
@@ -1674,6 +1674,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, | |||
1674 | } | 1674 | } |
1675 | nes_debug(NES_DBG_CQ, "CQ Virtual Address = %08lX, size = %u.\n", | 1675 | nes_debug(NES_DBG_CQ, "CQ Virtual Address = %08lX, size = %u.\n", |
1676 | (unsigned long)req.user_cq_buffer, entries); | 1676 | (unsigned long)req.user_cq_buffer, entries); |
1677 | err = 1; | ||
1677 | list_for_each_entry(nespbl, &nes_ucontext->cq_reg_mem_list, list) { | 1678 | list_for_each_entry(nespbl, &nes_ucontext->cq_reg_mem_list, list) { |
1678 | if (nespbl->user_base == (unsigned long )req.user_cq_buffer) { | 1679 | if (nespbl->user_base == (unsigned long )req.user_cq_buffer) { |
1679 | list_del(&nespbl->list); | 1680 | list_del(&nespbl->list); |
@@ -1686,7 +1687,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, | |||
1686 | if (err) { | 1687 | if (err) { |
1687 | nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); | 1688 | nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num); |
1688 | kfree(nescq); | 1689 | kfree(nescq); |
1689 | return ERR_PTR(err); | 1690 | return ERR_PTR(-EFAULT); |
1690 | } | 1691 | } |
1691 | 1692 | ||
1692 | pbl_entries = nespbl->pbl_size >> 3; | 1693 | pbl_entries = nespbl->pbl_size >> 3; |
@@ -1831,9 +1832,6 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries, | |||
1831 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); | 1832 | spin_unlock_irqrestore(&nesdev->cqp.lock, flags); |
1832 | } | 1833 | } |
1833 | } | 1834 | } |
1834 | nes_debug(NES_DBG_CQ, "iWARP CQ%u create timeout expired, major code = 0x%04X," | ||
1835 | " minor code = 0x%04X\n", | ||
1836 | nescq->hw_cq.cq_number, cqp_request->major_code, cqp_request->minor_code); | ||
1837 | if (!context) | 1835 | if (!context) |
1838 | pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, | 1836 | pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem, |
1839 | nescq->hw_cq.cq_pbase); | 1837 | nescq->hw_cq.cq_pbase); |
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 8b10d9f23bef..c5263d63aca3 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig | |||
@@ -42,14 +42,14 @@ config INPUT_M68K_BEEP | |||
42 | 42 | ||
43 | config INPUT_APANEL | 43 | config INPUT_APANEL |
44 | tristate "Fujitsu Lifebook Application Panel buttons" | 44 | tristate "Fujitsu Lifebook Application Panel buttons" |
45 | depends on X86 | 45 | depends on X86 && I2C && LEDS_CLASS |
46 | select I2C_I801 | ||
47 | select INPUT_POLLDEV | 46 | select INPUT_POLLDEV |
48 | select CHECK_SIGNATURE | 47 | select CHECK_SIGNATURE |
49 | help | 48 | help |
50 | Say Y here for support of the Application Panel buttons, used on | 49 | Say Y here for support of the Application Panel buttons, used on |
51 | Fujitsu Lifebook. These are attached to the mainboard through | 50 | Fujitsu Lifebook. These are attached to the mainboard through |
52 | an SMBus interface managed by the I2C Intel ICH (i801) driver. | 51 | an SMBus interface managed by the I2C Intel ICH (i801) driver, |
52 | which you should also build for this kernel. | ||
53 | 53 | ||
54 | To compile this driver as a module, choose M here: the module will | 54 | To compile this driver as a module, choose M here: the module will |
55 | be called apanel. | 55 | be called apanel. |
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c index aacedec4986f..827c32c16795 100644 --- a/drivers/isdn/gigaset/common.c +++ b/drivers/isdn/gigaset/common.c | |||
@@ -637,7 +637,6 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels, | |||
637 | err("maximum number of devices exceeded"); | 637 | err("maximum number of devices exceeded"); |
638 | return NULL; | 638 | return NULL; |
639 | } | 639 | } |
640 | mutex_init(&cs->mutex); | ||
641 | 640 | ||
642 | gig_dbg(DEBUG_INIT, "allocating bcs[0..%d]", channels - 1); | 641 | gig_dbg(DEBUG_INIT, "allocating bcs[0..%d]", channels - 1); |
643 | cs->bcs = kmalloc(channels * sizeof(struct bc_state), GFP_KERNEL); | 642 | cs->bcs = kmalloc(channels * sizeof(struct bc_state), GFP_KERNEL); |
@@ -898,8 +897,10 @@ int gigaset_shutdown(struct cardstate *cs) | |||
898 | { | 897 | { |
899 | mutex_lock(&cs->mutex); | 898 | mutex_lock(&cs->mutex); |
900 | 899 | ||
901 | if (!(cs->flags & VALID_MINOR)) | 900 | if (!(cs->flags & VALID_MINOR)) { |
901 | mutex_unlock(&cs->mutex); | ||
902 | return -1; | 902 | return -1; |
903 | } | ||
903 | 904 | ||
904 | cs->waiting = 1; | 905 | cs->waiting = 1; |
905 | 906 | ||
@@ -1086,6 +1087,7 @@ struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors, | |||
1086 | drv->cs[i].driver = drv; | 1087 | drv->cs[i].driver = drv; |
1087 | drv->cs[i].ops = drv->ops; | 1088 | drv->cs[i].ops = drv->ops; |
1088 | drv->cs[i].minor_index = i; | 1089 | drv->cs[i].minor_index = i; |
1090 | mutex_init(&drv->cs[i].mutex); | ||
1089 | } | 1091 | } |
1090 | 1092 | ||
1091 | gigaset_if_initdriver(drv, procname, devname); | 1093 | gigaset_if_initdriver(drv, procname, devname); |
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c index 7993e01f9fc5..76043dedba5b 100644 --- a/drivers/isdn/hisax/hisax_fcpcipnp.c +++ b/drivers/isdn/hisax/hisax_fcpcipnp.c | |||
@@ -725,23 +725,6 @@ static int __devinit fcpcipnp_setup(struct fritz_adapter *adapter) | |||
725 | 725 | ||
726 | switch (adapter->type) { | 726 | switch (adapter->type) { |
727 | case AVM_FRITZ_PCIV2: | 727 | case AVM_FRITZ_PCIV2: |
728 | retval = request_irq(adapter->irq, fcpci2_irq, IRQF_SHARED, | ||
729 | "fcpcipnp", adapter); | ||
730 | break; | ||
731 | case AVM_FRITZ_PCI: | ||
732 | retval = request_irq(adapter->irq, fcpci_irq, IRQF_SHARED, | ||
733 | "fcpcipnp", adapter); | ||
734 | break; | ||
735 | case AVM_FRITZ_PNP: | ||
736 | retval = request_irq(adapter->irq, fcpci_irq, 0, | ||
737 | "fcpcipnp", adapter); | ||
738 | break; | ||
739 | } | ||
740 | if (retval) | ||
741 | goto err_region; | ||
742 | |||
743 | switch (adapter->type) { | ||
744 | case AVM_FRITZ_PCIV2: | ||
745 | case AVM_FRITZ_PCI: | 728 | case AVM_FRITZ_PCI: |
746 | val = inl(adapter->io); | 729 | val = inl(adapter->io); |
747 | break; | 730 | break; |
@@ -796,6 +779,23 @@ static int __devinit fcpcipnp_setup(struct fritz_adapter *adapter) | |||
796 | 779 | ||
797 | switch (adapter->type) { | 780 | switch (adapter->type) { |
798 | case AVM_FRITZ_PCIV2: | 781 | case AVM_FRITZ_PCIV2: |
782 | retval = request_irq(adapter->irq, fcpci2_irq, IRQF_SHARED, | ||
783 | "fcpcipnp", adapter); | ||
784 | break; | ||
785 | case AVM_FRITZ_PCI: | ||
786 | retval = request_irq(adapter->irq, fcpci_irq, IRQF_SHARED, | ||
787 | "fcpcipnp", adapter); | ||
788 | break; | ||
789 | case AVM_FRITZ_PNP: | ||
790 | retval = request_irq(adapter->irq, fcpci_irq, 0, | ||
791 | "fcpcipnp", adapter); | ||
792 | break; | ||
793 | } | ||
794 | if (retval) | ||
795 | goto err_region; | ||
796 | |||
797 | switch (adapter->type) { | ||
798 | case AVM_FRITZ_PCIV2: | ||
799 | fcpci2_init(adapter); | 799 | fcpci2_init(adapter); |
800 | isacsx_setup(&adapter->isac); | 800 | isacsx_setup(&adapter->isac); |
801 | break; | 801 | break; |
diff --git a/drivers/isdn/i4l/isdn_ttyfax.c b/drivers/isdn/i4l/isdn_ttyfax.c index f93de4a30355..78f7660c1d0e 100644 --- a/drivers/isdn/i4l/isdn_ttyfax.c +++ b/drivers/isdn/i4l/isdn_ttyfax.c | |||
@@ -906,7 +906,8 @@ isdn_tty_cmd_FCLASS2(char **p, modem_info * info) | |||
906 | sprintf(rs, "\r\n0-2"); | 906 | sprintf(rs, "\r\n0-2"); |
907 | isdn_tty_at_cout(rs, info); | 907 | isdn_tty_at_cout(rs, info); |
908 | } else { | 908 | } else { |
909 | if ((f->phase != ISDN_FAX_PHASE_D) || (!info->faxonline & 1)) | 909 | if ((f->phase != ISDN_FAX_PHASE_D) || |
910 | (!(info->faxonline & 1))) | ||
910 | PARSE_ERROR1; | 911 | PARSE_ERROR1; |
911 | par = isdn_getnum(p); | 912 | par = isdn_getnum(p); |
912 | if ((par < 0) || (par > 2)) | 913 | if ((par < 0) || (par > 2)) |
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c index 655ef9a3f4df..a335c85a736e 100644 --- a/drivers/isdn/isdnloop/isdnloop.c +++ b/drivers/isdn/isdnloop/isdnloop.c | |||
@@ -1289,7 +1289,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card) | |||
1289 | } | 1289 | } |
1290 | break; | 1290 | break; |
1291 | case ISDN_CMD_CLREAZ: | 1291 | case ISDN_CMD_CLREAZ: |
1292 | if (!card->flags & ISDNLOOP_FLAGS_RUNNING) | 1292 | if (!(card->flags & ISDNLOOP_FLAGS_RUNNING)) |
1293 | return -ENODEV; | 1293 | return -ENODEV; |
1294 | if (card->leased) | 1294 | if (card->leased) |
1295 | break; | 1295 | break; |
@@ -1333,7 +1333,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card) | |||
1333 | } | 1333 | } |
1334 | break; | 1334 | break; |
1335 | case ISDN_CMD_SETL3: | 1335 | case ISDN_CMD_SETL3: |
1336 | if (!card->flags & ISDNLOOP_FLAGS_RUNNING) | 1336 | if (!(card->flags & ISDNLOOP_FLAGS_RUNNING)) |
1337 | return -ENODEV; | 1337 | return -ENODEV; |
1338 | return 0; | 1338 | return 0; |
1339 | default: | 1339 | default: |
@@ -1380,7 +1380,7 @@ if_writecmd(const u_char __user *buf, int len, int id, int channel) | |||
1380 | isdnloop_card *card = isdnloop_findcard(id); | 1380 | isdnloop_card *card = isdnloop_findcard(id); |
1381 | 1381 | ||
1382 | if (card) { | 1382 | if (card) { |
1383 | if (!card->flags & ISDNLOOP_FLAGS_RUNNING) | 1383 | if (!(card->flags & ISDNLOOP_FLAGS_RUNNING)) |
1384 | return -ENODEV; | 1384 | return -ENODEV; |
1385 | return (isdnloop_writecmd(buf, len, 1, card)); | 1385 | return (isdnloop_writecmd(buf, len, 1, card)); |
1386 | } | 1386 | } |
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 7aeceedcf7d4..831aed9c56ff 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -1047,6 +1047,11 @@ void bitmap_daemon_work(struct bitmap *bitmap) | |||
1047 | if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ)) | 1047 | if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ)) |
1048 | return; | 1048 | return; |
1049 | bitmap->daemon_lastrun = jiffies; | 1049 | bitmap->daemon_lastrun = jiffies; |
1050 | if (bitmap->allclean) { | ||
1051 | bitmap->mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; | ||
1052 | return; | ||
1053 | } | ||
1054 | bitmap->allclean = 1; | ||
1050 | 1055 | ||
1051 | for (j = 0; j < bitmap->chunks; j++) { | 1056 | for (j = 0; j < bitmap->chunks; j++) { |
1052 | bitmap_counter_t *bmc; | 1057 | bitmap_counter_t *bmc; |
@@ -1068,8 +1073,10 @@ void bitmap_daemon_work(struct bitmap *bitmap) | |||
1068 | clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); | 1073 | clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); |
1069 | 1074 | ||
1070 | spin_unlock_irqrestore(&bitmap->lock, flags); | 1075 | spin_unlock_irqrestore(&bitmap->lock, flags); |
1071 | if (need_write) | 1076 | if (need_write) { |
1072 | write_page(bitmap, page, 0); | 1077 | write_page(bitmap, page, 0); |
1078 | bitmap->allclean = 0; | ||
1079 | } | ||
1073 | continue; | 1080 | continue; |
1074 | } | 1081 | } |
1075 | 1082 | ||
@@ -1098,6 +1105,9 @@ void bitmap_daemon_work(struct bitmap *bitmap) | |||
1098 | /* | 1105 | /* |
1099 | if (j < 100) printk("bitmap: j=%lu, *bmc = 0x%x\n", j, *bmc); | 1106 | if (j < 100) printk("bitmap: j=%lu, *bmc = 0x%x\n", j, *bmc); |
1100 | */ | 1107 | */ |
1108 | if (*bmc) | ||
1109 | bitmap->allclean = 0; | ||
1110 | |||
1101 | if (*bmc == 2) { | 1111 | if (*bmc == 2) { |
1102 | *bmc=1; /* maybe clear the bit next time */ | 1112 | *bmc=1; /* maybe clear the bit next time */ |
1103 | set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); | 1113 | set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); |
@@ -1132,6 +1142,8 @@ void bitmap_daemon_work(struct bitmap *bitmap) | |||
1132 | } | 1142 | } |
1133 | } | 1143 | } |
1134 | 1144 | ||
1145 | if (bitmap->allclean == 0) | ||
1146 | bitmap->mddev->thread->timeout = bitmap->daemon_sleep * HZ; | ||
1135 | } | 1147 | } |
1136 | 1148 | ||
1137 | static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, | 1149 | static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, |
@@ -1226,6 +1238,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect | |||
1226 | sectors -= blocks; | 1238 | sectors -= blocks; |
1227 | else sectors = 0; | 1239 | else sectors = 0; |
1228 | } | 1240 | } |
1241 | bitmap->allclean = 0; | ||
1229 | return 0; | 1242 | return 0; |
1230 | } | 1243 | } |
1231 | 1244 | ||
@@ -1296,6 +1309,7 @@ int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, | |||
1296 | } | 1309 | } |
1297 | } | 1310 | } |
1298 | spin_unlock_irq(&bitmap->lock); | 1311 | spin_unlock_irq(&bitmap->lock); |
1312 | bitmap->allclean = 0; | ||
1299 | return rv; | 1313 | return rv; |
1300 | } | 1314 | } |
1301 | 1315 | ||
@@ -1332,6 +1346,7 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int ab | |||
1332 | } | 1346 | } |
1333 | unlock: | 1347 | unlock: |
1334 | spin_unlock_irqrestore(&bitmap->lock, flags); | 1348 | spin_unlock_irqrestore(&bitmap->lock, flags); |
1349 | bitmap->allclean = 0; | ||
1335 | } | 1350 | } |
1336 | 1351 | ||
1337 | void bitmap_close_sync(struct bitmap *bitmap) | 1352 | void bitmap_close_sync(struct bitmap *bitmap) |
@@ -1399,7 +1414,7 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n | |||
1399 | set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); | 1414 | set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN); |
1400 | } | 1415 | } |
1401 | spin_unlock_irq(&bitmap->lock); | 1416 | spin_unlock_irq(&bitmap->lock); |
1402 | 1417 | bitmap->allclean = 0; | |
1403 | } | 1418 | } |
1404 | 1419 | ||
1405 | /* dirty the memory and file bits for bitmap chunks "s" to "e" */ | 1420 | /* dirty the memory and file bits for bitmap chunks "s" to "e" */ |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 7da6ec244e15..827824a9f3e9 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -1105,7 +1105,11 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) | |||
1105 | rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; | 1105 | rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; |
1106 | bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; | 1106 | bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; |
1107 | if (rdev->sb_size & bmask) | 1107 | if (rdev->sb_size & bmask) |
1108 | rdev-> sb_size = (rdev->sb_size | bmask)+1; | 1108 | rdev->sb_size = (rdev->sb_size | bmask) + 1; |
1109 | |||
1110 | if (minor_version | ||
1111 | && rdev->data_offset < sb_offset + (rdev->sb_size/512)) | ||
1112 | return -EINVAL; | ||
1109 | 1113 | ||
1110 | if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) | 1114 | if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) |
1111 | rdev->desc_nr = -1; | 1115 | rdev->desc_nr = -1; |
@@ -1137,7 +1141,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) | |||
1137 | else | 1141 | else |
1138 | ret = 0; | 1142 | ret = 0; |
1139 | } | 1143 | } |
1140 | if (minor_version) | 1144 | if (minor_version) |
1141 | rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2; | 1145 | rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2; |
1142 | else | 1146 | else |
1143 | rdev->size = rdev->sb_offset; | 1147 | rdev->size = rdev->sb_offset; |
@@ -1499,7 +1503,8 @@ static void export_rdev(mdk_rdev_t * rdev) | |||
1499 | free_disk_sb(rdev); | 1503 | free_disk_sb(rdev); |
1500 | list_del_init(&rdev->same_set); | 1504 | list_del_init(&rdev->same_set); |
1501 | #ifndef MODULE | 1505 | #ifndef MODULE |
1502 | md_autodetect_dev(rdev->bdev->bd_dev); | 1506 | if (test_bit(AutoDetected, &rdev->flags)) |
1507 | md_autodetect_dev(rdev->bdev->bd_dev); | ||
1503 | #endif | 1508 | #endif |
1504 | unlock_rdev(rdev); | 1509 | unlock_rdev(rdev); |
1505 | kobject_put(&rdev->kobj); | 1510 | kobject_put(&rdev->kobj); |
@@ -1996,9 +2001,11 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
1996 | char *e; | 2001 | char *e; |
1997 | unsigned long long size = simple_strtoull(buf, &e, 10); | 2002 | unsigned long long size = simple_strtoull(buf, &e, 10); |
1998 | unsigned long long oldsize = rdev->size; | 2003 | unsigned long long oldsize = rdev->size; |
2004 | mddev_t *my_mddev = rdev->mddev; | ||
2005 | |||
1999 | if (e==buf || (*e && *e != '\n')) | 2006 | if (e==buf || (*e && *e != '\n')) |
2000 | return -EINVAL; | 2007 | return -EINVAL; |
2001 | if (rdev->mddev->pers) | 2008 | if (my_mddev->pers) |
2002 | return -EBUSY; | 2009 | return -EBUSY; |
2003 | rdev->size = size; | 2010 | rdev->size = size; |
2004 | if (size > oldsize && rdev->mddev->external) { | 2011 | if (size > oldsize && rdev->mddev->external) { |
@@ -2011,7 +2018,7 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
2011 | int overlap = 0; | 2018 | int overlap = 0; |
2012 | struct list_head *tmp, *tmp2; | 2019 | struct list_head *tmp, *tmp2; |
2013 | 2020 | ||
2014 | mddev_unlock(rdev->mddev); | 2021 | mddev_unlock(my_mddev); |
2015 | for_each_mddev(mddev, tmp) { | 2022 | for_each_mddev(mddev, tmp) { |
2016 | mdk_rdev_t *rdev2; | 2023 | mdk_rdev_t *rdev2; |
2017 | 2024 | ||
@@ -2031,7 +2038,7 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
2031 | break; | 2038 | break; |
2032 | } | 2039 | } |
2033 | } | 2040 | } |
2034 | mddev_lock(rdev->mddev); | 2041 | mddev_lock(my_mddev); |
2035 | if (overlap) { | 2042 | if (overlap) { |
2036 | /* Someone else could have slipped in a size | 2043 | /* Someone else could have slipped in a size |
2037 | * change here, but doing so is just silly. | 2044 | * change here, but doing so is just silly. |
@@ -2043,8 +2050,8 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
2043 | return -EBUSY; | 2050 | return -EBUSY; |
2044 | } | 2051 | } |
2045 | } | 2052 | } |
2046 | if (size < rdev->mddev->size || rdev->mddev->size == 0) | 2053 | if (size < my_mddev->size || my_mddev->size == 0) |
2047 | rdev->mddev->size = size; | 2054 | my_mddev->size = size; |
2048 | return len; | 2055 | return len; |
2049 | } | 2056 | } |
2050 | 2057 | ||
@@ -2065,10 +2072,21 @@ rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |||
2065 | { | 2072 | { |
2066 | struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); | 2073 | struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); |
2067 | mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); | 2074 | mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); |
2075 | mddev_t *mddev = rdev->mddev; | ||
2076 | ssize_t rv; | ||
2068 | 2077 | ||
2069 | if (!entry->show) | 2078 | if (!entry->show) |
2070 | return -EIO; | 2079 | return -EIO; |
2071 | return entry->show(rdev, page); | 2080 | |
2081 | rv = mddev ? mddev_lock(mddev) : -EBUSY; | ||
2082 | if (!rv) { | ||
2083 | if (rdev->mddev == NULL) | ||
2084 | rv = -EBUSY; | ||
2085 | else | ||
2086 | rv = entry->show(rdev, page); | ||
2087 | mddev_unlock(mddev); | ||
2088 | } | ||
2089 | return rv; | ||
2072 | } | 2090 | } |
2073 | 2091 | ||
2074 | static ssize_t | 2092 | static ssize_t |
@@ -2077,15 +2095,19 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr, | |||
2077 | { | 2095 | { |
2078 | struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); | 2096 | struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); |
2079 | mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); | 2097 | mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); |
2080 | int rv; | 2098 | ssize_t rv; |
2099 | mddev_t *mddev = rdev->mddev; | ||
2081 | 2100 | ||
2082 | if (!entry->store) | 2101 | if (!entry->store) |
2083 | return -EIO; | 2102 | return -EIO; |
2084 | if (!capable(CAP_SYS_ADMIN)) | 2103 | if (!capable(CAP_SYS_ADMIN)) |
2085 | return -EACCES; | 2104 | return -EACCES; |
2086 | rv = mddev_lock(rdev->mddev); | 2105 | rv = mddev ? mddev_lock(mddev): -EBUSY; |
2087 | if (!rv) { | 2106 | if (!rv) { |
2088 | rv = entry->store(rdev, page, length); | 2107 | if (rdev->mddev == NULL) |
2108 | rv = -EBUSY; | ||
2109 | else | ||
2110 | rv = entry->store(rdev, page, length); | ||
2089 | mddev_unlock(rdev->mddev); | 2111 | mddev_unlock(rdev->mddev); |
2090 | } | 2112 | } |
2091 | return rv; | 2113 | return rv; |
@@ -5351,6 +5373,7 @@ void md_write_start(mddev_t *mddev, struct bio *bi) | |||
5351 | mddev->ro = 0; | 5373 | mddev->ro = 0; |
5352 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | 5374 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
5353 | md_wakeup_thread(mddev->thread); | 5375 | md_wakeup_thread(mddev->thread); |
5376 | md_wakeup_thread(mddev->sync_thread); | ||
5354 | } | 5377 | } |
5355 | atomic_inc(&mddev->writes_pending); | 5378 | atomic_inc(&mddev->writes_pending); |
5356 | if (mddev->in_sync) { | 5379 | if (mddev->in_sync) { |
@@ -6021,6 +6044,7 @@ static void autostart_arrays(int part) | |||
6021 | MD_BUG(); | 6044 | MD_BUG(); |
6022 | continue; | 6045 | continue; |
6023 | } | 6046 | } |
6047 | set_bit(AutoDetected, &rdev->flags); | ||
6024 | list_add(&rdev->same_set, &pending_raid_disks); | 6048 | list_add(&rdev->same_set, &pending_raid_disks); |
6025 | i_passed++; | 6049 | i_passed++; |
6026 | } | 6050 | } |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 5c7fef091cec..ff61b309129a 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -592,6 +592,37 @@ static int raid1_congested(void *data, int bits) | |||
592 | } | 592 | } |
593 | 593 | ||
594 | 594 | ||
595 | static int flush_pending_writes(conf_t *conf) | ||
596 | { | ||
597 | /* Any writes that have been queued but are awaiting | ||
598 | * bitmap updates get flushed here. | ||
599 | * We return 1 if any requests were actually submitted. | ||
600 | */ | ||
601 | int rv = 0; | ||
602 | |||
603 | spin_lock_irq(&conf->device_lock); | ||
604 | |||
605 | if (conf->pending_bio_list.head) { | ||
606 | struct bio *bio; | ||
607 | bio = bio_list_get(&conf->pending_bio_list); | ||
608 | blk_remove_plug(conf->mddev->queue); | ||
609 | spin_unlock_irq(&conf->device_lock); | ||
610 | /* flush any pending bitmap writes to | ||
611 | * disk before proceeding w/ I/O */ | ||
612 | bitmap_unplug(conf->mddev->bitmap); | ||
613 | |||
614 | while (bio) { /* submit pending writes */ | ||
615 | struct bio *next = bio->bi_next; | ||
616 | bio->bi_next = NULL; | ||
617 | generic_make_request(bio); | ||
618 | bio = next; | ||
619 | } | ||
620 | rv = 1; | ||
621 | } else | ||
622 | spin_unlock_irq(&conf->device_lock); | ||
623 | return rv; | ||
624 | } | ||
625 | |||
595 | /* Barriers.... | 626 | /* Barriers.... |
596 | * Sometimes we need to suspend IO while we do something else, | 627 | * Sometimes we need to suspend IO while we do something else, |
597 | * either some resync/recovery, or reconfigure the array. | 628 | * either some resync/recovery, or reconfigure the array. |
@@ -673,15 +704,23 @@ static void freeze_array(conf_t *conf) | |||
673 | /* stop syncio and normal IO and wait for everything to | 704 | /* stop syncio and normal IO and wait for everything to |
674 | * go quite. | 705 | * go quite. |
675 | * We increment barrier and nr_waiting, and then | 706 | * We increment barrier and nr_waiting, and then |
676 | * wait until barrier+nr_pending match nr_queued+2 | 707 | * wait until nr_pending match nr_queued+1 |
708 | * This is called in the context of one normal IO request | ||
709 | * that has failed. Thus any sync request that might be pending | ||
710 | * will be blocked by nr_pending, and we need to wait for | ||
711 | * pending IO requests to complete or be queued for re-try. | ||
712 | * Thus the number queued (nr_queued) plus this request (1) | ||
713 | * must match the number of pending IOs (nr_pending) before | ||
714 | * we continue. | ||
677 | */ | 715 | */ |
678 | spin_lock_irq(&conf->resync_lock); | 716 | spin_lock_irq(&conf->resync_lock); |
679 | conf->barrier++; | 717 | conf->barrier++; |
680 | conf->nr_waiting++; | 718 | conf->nr_waiting++; |
681 | wait_event_lock_irq(conf->wait_barrier, | 719 | wait_event_lock_irq(conf->wait_barrier, |
682 | conf->barrier+conf->nr_pending == conf->nr_queued+2, | 720 | conf->nr_pending == conf->nr_queued+1, |
683 | conf->resync_lock, | 721 | conf->resync_lock, |
684 | raid1_unplug(conf->mddev->queue)); | 722 | ({ flush_pending_writes(conf); |
723 | raid1_unplug(conf->mddev->queue); })); | ||
685 | spin_unlock_irq(&conf->resync_lock); | 724 | spin_unlock_irq(&conf->resync_lock); |
686 | } | 725 | } |
687 | static void unfreeze_array(conf_t *conf) | 726 | static void unfreeze_array(conf_t *conf) |
@@ -907,6 +946,9 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
907 | blk_plug_device(mddev->queue); | 946 | blk_plug_device(mddev->queue); |
908 | spin_unlock_irqrestore(&conf->device_lock, flags); | 947 | spin_unlock_irqrestore(&conf->device_lock, flags); |
909 | 948 | ||
949 | /* In case raid1d snuck into freeze_array */ | ||
950 | wake_up(&conf->wait_barrier); | ||
951 | |||
910 | if (do_sync) | 952 | if (do_sync) |
911 | md_wakeup_thread(mddev->thread); | 953 | md_wakeup_thread(mddev->thread); |
912 | #if 0 | 954 | #if 0 |
@@ -1473,28 +1515,14 @@ static void raid1d(mddev_t *mddev) | |||
1473 | 1515 | ||
1474 | for (;;) { | 1516 | for (;;) { |
1475 | char b[BDEVNAME_SIZE]; | 1517 | char b[BDEVNAME_SIZE]; |
1476 | spin_lock_irqsave(&conf->device_lock, flags); | ||
1477 | |||
1478 | if (conf->pending_bio_list.head) { | ||
1479 | bio = bio_list_get(&conf->pending_bio_list); | ||
1480 | blk_remove_plug(mddev->queue); | ||
1481 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1482 | /* flush any pending bitmap writes to disk before proceeding w/ I/O */ | ||
1483 | bitmap_unplug(mddev->bitmap); | ||
1484 | 1518 | ||
1485 | while (bio) { /* submit pending writes */ | 1519 | unplug += flush_pending_writes(conf); |
1486 | struct bio *next = bio->bi_next; | ||
1487 | bio->bi_next = NULL; | ||
1488 | generic_make_request(bio); | ||
1489 | bio = next; | ||
1490 | } | ||
1491 | unplug = 1; | ||
1492 | 1520 | ||
1493 | continue; | 1521 | spin_lock_irqsave(&conf->device_lock, flags); |
1494 | } | 1522 | if (list_empty(head)) { |
1495 | 1523 | spin_unlock_irqrestore(&conf->device_lock, flags); | |
1496 | if (list_empty(head)) | ||
1497 | break; | 1524 | break; |
1525 | } | ||
1498 | r1_bio = list_entry(head->prev, r1bio_t, retry_list); | 1526 | r1_bio = list_entry(head->prev, r1bio_t, retry_list); |
1499 | list_del(head->prev); | 1527 | list_del(head->prev); |
1500 | conf->nr_queued--; | 1528 | conf->nr_queued--; |
@@ -1590,7 +1618,6 @@ static void raid1d(mddev_t *mddev) | |||
1590 | } | 1618 | } |
1591 | } | 1619 | } |
1592 | } | 1620 | } |
1593 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1594 | if (unplug) | 1621 | if (unplug) |
1595 | unplug_slaves(mddev); | 1622 | unplug_slaves(mddev); |
1596 | } | 1623 | } |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 017f58113c33..32389d2f18fc 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -537,7 +537,8 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) | |||
537 | current_distance = abs(r10_bio->devs[slot].addr - | 537 | current_distance = abs(r10_bio->devs[slot].addr - |
538 | conf->mirrors[disk].head_position); | 538 | conf->mirrors[disk].head_position); |
539 | 539 | ||
540 | /* Find the disk whose head is closest */ | 540 | /* Find the disk whose head is closest, |
541 | * or - for far > 1 - find the closest to partition beginning */ | ||
541 | 542 | ||
542 | for (nslot = slot; nslot < conf->copies; nslot++) { | 543 | for (nslot = slot; nslot < conf->copies; nslot++) { |
543 | int ndisk = r10_bio->devs[nslot].devnum; | 544 | int ndisk = r10_bio->devs[nslot].devnum; |
@@ -557,8 +558,13 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) | |||
557 | slot = nslot; | 558 | slot = nslot; |
558 | break; | 559 | break; |
559 | } | 560 | } |
560 | new_distance = abs(r10_bio->devs[nslot].addr - | 561 | |
561 | conf->mirrors[ndisk].head_position); | 562 | /* for far > 1 always use the lowest address */ |
563 | if (conf->far_copies > 1) | ||
564 | new_distance = r10_bio->devs[nslot].addr; | ||
565 | else | ||
566 | new_distance = abs(r10_bio->devs[nslot].addr - | ||
567 | conf->mirrors[ndisk].head_position); | ||
562 | if (new_distance < current_distance) { | 568 | if (new_distance < current_distance) { |
563 | current_distance = new_distance; | 569 | current_distance = new_distance; |
564 | disk = ndisk; | 570 | disk = ndisk; |
@@ -629,7 +635,36 @@ static int raid10_congested(void *data, int bits) | |||
629 | return ret; | 635 | return ret; |
630 | } | 636 | } |
631 | 637 | ||
632 | 638 | static int flush_pending_writes(conf_t *conf) | |
639 | { | ||
640 | /* Any writes that have been queued but are awaiting | ||
641 | * bitmap updates get flushed here. | ||
642 | * We return 1 if any requests were actually submitted. | ||
643 | */ | ||
644 | int rv = 0; | ||
645 | |||
646 | spin_lock_irq(&conf->device_lock); | ||
647 | |||
648 | if (conf->pending_bio_list.head) { | ||
649 | struct bio *bio; | ||
650 | bio = bio_list_get(&conf->pending_bio_list); | ||
651 | blk_remove_plug(conf->mddev->queue); | ||
652 | spin_unlock_irq(&conf->device_lock); | ||
653 | /* flush any pending bitmap writes to disk | ||
654 | * before proceeding w/ I/O */ | ||
655 | bitmap_unplug(conf->mddev->bitmap); | ||
656 | |||
657 | while (bio) { /* submit pending writes */ | ||
658 | struct bio *next = bio->bi_next; | ||
659 | bio->bi_next = NULL; | ||
660 | generic_make_request(bio); | ||
661 | bio = next; | ||
662 | } | ||
663 | rv = 1; | ||
664 | } else | ||
665 | spin_unlock_irq(&conf->device_lock); | ||
666 | return rv; | ||
667 | } | ||
633 | /* Barriers.... | 668 | /* Barriers.... |
634 | * Sometimes we need to suspend IO while we do something else, | 669 | * Sometimes we need to suspend IO while we do something else, |
635 | * either some resync/recovery, or reconfigure the array. | 670 | * either some resync/recovery, or reconfigure the array. |
@@ -712,15 +747,23 @@ static void freeze_array(conf_t *conf) | |||
712 | /* stop syncio and normal IO and wait for everything to | 747 | /* stop syncio and normal IO and wait for everything to |
713 | * go quiet. | 748 | * go quiet. |
714 | * We increment barrier and nr_waiting, and then | 749 | * We increment barrier and nr_waiting, and then |
715 | * wait until barrier+nr_pending match nr_queued+2 | 750 | * wait until nr_pending match nr_queued+1 |
751 | * This is called in the context of one normal IO request | ||
752 | * that has failed. Thus any sync request that might be pending | ||
753 | * will be blocked by nr_pending, and we need to wait for | ||
754 | * pending IO requests to complete or be queued for re-try. | ||
755 | * Thus the number queued (nr_queued) plus this request (1) | ||
756 | * must match the number of pending IOs (nr_pending) before | ||
757 | * we continue. | ||
716 | */ | 758 | */ |
717 | spin_lock_irq(&conf->resync_lock); | 759 | spin_lock_irq(&conf->resync_lock); |
718 | conf->barrier++; | 760 | conf->barrier++; |
719 | conf->nr_waiting++; | 761 | conf->nr_waiting++; |
720 | wait_event_lock_irq(conf->wait_barrier, | 762 | wait_event_lock_irq(conf->wait_barrier, |
721 | conf->barrier+conf->nr_pending == conf->nr_queued+2, | 763 | conf->nr_pending == conf->nr_queued+1, |
722 | conf->resync_lock, | 764 | conf->resync_lock, |
723 | raid10_unplug(conf->mddev->queue)); | 765 | ({ flush_pending_writes(conf); |
766 | raid10_unplug(conf->mddev->queue); })); | ||
724 | spin_unlock_irq(&conf->resync_lock); | 767 | spin_unlock_irq(&conf->resync_lock); |
725 | } | 768 | } |
726 | 769 | ||
@@ -892,6 +935,9 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
892 | blk_plug_device(mddev->queue); | 935 | blk_plug_device(mddev->queue); |
893 | spin_unlock_irqrestore(&conf->device_lock, flags); | 936 | spin_unlock_irqrestore(&conf->device_lock, flags); |
894 | 937 | ||
938 | /* In case raid10d snuck in to freeze_array */ | ||
939 | wake_up(&conf->wait_barrier); | ||
940 | |||
895 | if (do_sync) | 941 | if (do_sync) |
896 | md_wakeup_thread(mddev->thread); | 942 | md_wakeup_thread(mddev->thread); |
897 | 943 | ||
@@ -1464,28 +1510,14 @@ static void raid10d(mddev_t *mddev) | |||
1464 | 1510 | ||
1465 | for (;;) { | 1511 | for (;;) { |
1466 | char b[BDEVNAME_SIZE]; | 1512 | char b[BDEVNAME_SIZE]; |
1467 | spin_lock_irqsave(&conf->device_lock, flags); | ||
1468 | 1513 | ||
1469 | if (conf->pending_bio_list.head) { | 1514 | unplug += flush_pending_writes(conf); |
1470 | bio = bio_list_get(&conf->pending_bio_list); | ||
1471 | blk_remove_plug(mddev->queue); | ||
1472 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1473 | /* flush any pending bitmap writes to disk before proceeding w/ I/O */ | ||
1474 | bitmap_unplug(mddev->bitmap); | ||
1475 | |||
1476 | while (bio) { /* submit pending writes */ | ||
1477 | struct bio *next = bio->bi_next; | ||
1478 | bio->bi_next = NULL; | ||
1479 | generic_make_request(bio); | ||
1480 | bio = next; | ||
1481 | } | ||
1482 | unplug = 1; | ||
1483 | |||
1484 | continue; | ||
1485 | } | ||
1486 | 1515 | ||
1487 | if (list_empty(head)) | 1516 | spin_lock_irqsave(&conf->device_lock, flags); |
1517 | if (list_empty(head)) { | ||
1518 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1488 | break; | 1519 | break; |
1520 | } | ||
1489 | r10_bio = list_entry(head->prev, r10bio_t, retry_list); | 1521 | r10_bio = list_entry(head->prev, r10bio_t, retry_list); |
1490 | list_del(head->prev); | 1522 | list_del(head->prev); |
1491 | conf->nr_queued--; | 1523 | conf->nr_queued--; |
@@ -1548,7 +1580,6 @@ static void raid10d(mddev_t *mddev) | |||
1548 | } | 1580 | } |
1549 | } | 1581 | } |
1550 | } | 1582 | } |
1551 | spin_unlock_irqrestore(&conf->device_lock, flags); | ||
1552 | if (unplug) | 1583 | if (unplug) |
1553 | unplug_slaves(mddev); | 1584 | unplug_slaves(mddev); |
1554 | } | 1585 | } |
@@ -1787,6 +1818,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
1787 | if (j == conf->copies) { | 1818 | if (j == conf->copies) { |
1788 | /* Cannot recover, so abort the recovery */ | 1819 | /* Cannot recover, so abort the recovery */ |
1789 | put_buf(r10_bio); | 1820 | put_buf(r10_bio); |
1821 | if (rb2) | ||
1822 | atomic_dec(&rb2->remaining); | ||
1790 | r10_bio = rb2; | 1823 | r10_bio = rb2; |
1791 | if (!test_and_set_bit(MD_RECOVERY_ERR, &mddev->recovery)) | 1824 | if (!test_and_set_bit(MD_RECOVERY_ERR, &mddev->recovery)) |
1792 | printk(KERN_INFO "raid10: %s: insufficient working devices for recovery.\n", | 1825 | printk(KERN_INFO "raid10: %s: insufficient working devices for recovery.\n", |
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 0c303c84b37b..6b6df8679585 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c | |||
@@ -632,8 +632,7 @@ mpt_deregister(u8 cb_idx) | |||
632 | 632 | ||
633 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ | 633 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ |
634 | /** | 634 | /** |
635 | * mpt_event_register - Register protocol-specific event callback | 635 | * mpt_event_register - Register protocol-specific event callback handler. |
636 | * handler. | ||
637 | * @cb_idx: previously registered (via mpt_register) callback handle | 636 | * @cb_idx: previously registered (via mpt_register) callback handle |
638 | * @ev_cbfunc: callback function | 637 | * @ev_cbfunc: callback function |
639 | * | 638 | * |
@@ -654,8 +653,7 @@ mpt_event_register(u8 cb_idx, MPT_EVHANDLER ev_cbfunc) | |||
654 | 653 | ||
655 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ | 654 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ |
656 | /** | 655 | /** |
657 | * mpt_event_deregister - Deregister protocol-specific event callback | 656 | * mpt_event_deregister - Deregister protocol-specific event callback handler |
658 | * handler. | ||
659 | * @cb_idx: previously registered callback handle | 657 | * @cb_idx: previously registered callback handle |
660 | * | 658 | * |
661 | * Each protocol-specific driver should call this routine | 659 | * Each protocol-specific driver should call this routine |
@@ -765,11 +763,13 @@ mpt_device_driver_deregister(u8 cb_idx) | |||
765 | 763 | ||
766 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ | 764 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ |
767 | /** | 765 | /** |
768 | * mpt_get_msg_frame - Obtain a MPT request frame from the pool (of 1024) | 766 | * mpt_get_msg_frame - Obtain an MPT request frame from the pool |
769 | * allocated per MPT adapter. | ||
770 | * @cb_idx: Handle of registered MPT protocol driver | 767 | * @cb_idx: Handle of registered MPT protocol driver |
771 | * @ioc: Pointer to MPT adapter structure | 768 | * @ioc: Pointer to MPT adapter structure |
772 | * | 769 | * |
770 | * Obtain an MPT request frame from the pool (of 1024) that are | ||
771 | * allocated per MPT adapter. | ||
772 | * | ||
773 | * Returns pointer to a MPT request frame or %NULL if none are available | 773 | * Returns pointer to a MPT request frame or %NULL if none are available |
774 | * or IOC is not active. | 774 | * or IOC is not active. |
775 | */ | 775 | */ |
@@ -834,13 +834,12 @@ mpt_get_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc) | |||
834 | 834 | ||
835 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ | 835 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ |
836 | /** | 836 | /** |
837 | * mpt_put_msg_frame - Send a protocol specific MPT request frame | 837 | * mpt_put_msg_frame - Send a protocol-specific MPT request frame to an IOC |
838 | * to a IOC. | ||
839 | * @cb_idx: Handle of registered MPT protocol driver | 838 | * @cb_idx: Handle of registered MPT protocol driver |
840 | * @ioc: Pointer to MPT adapter structure | 839 | * @ioc: Pointer to MPT adapter structure |
841 | * @mf: Pointer to MPT request frame | 840 | * @mf: Pointer to MPT request frame |
842 | * | 841 | * |
843 | * This routine posts a MPT request frame to the request post FIFO of a | 842 | * This routine posts an MPT request frame to the request post FIFO of a |
844 | * specific MPT adapter. | 843 | * specific MPT adapter. |
845 | */ | 844 | */ |
846 | void | 845 | void |
@@ -868,13 +867,15 @@ mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf) | |||
868 | } | 867 | } |
869 | 868 | ||
870 | /** | 869 | /** |
871 | * mpt_put_msg_frame_hi_pri - Send a protocol specific MPT request frame | 870 | * mpt_put_msg_frame_hi_pri - Send a hi-pri protocol-specific MPT request frame |
872 | * to a IOC using hi priority request queue. | ||
873 | * @cb_idx: Handle of registered MPT protocol driver | 871 | * @cb_idx: Handle of registered MPT protocol driver |
874 | * @ioc: Pointer to MPT adapter structure | 872 | * @ioc: Pointer to MPT adapter structure |
875 | * @mf: Pointer to MPT request frame | 873 | * @mf: Pointer to MPT request frame |
876 | * | 874 | * |
877 | * This routine posts a MPT request frame to the request post FIFO of a | 875 | * Send a protocol-specific MPT request frame to an IOC using |
876 | * hi-priority request queue. | ||
877 | * | ||
878 | * This routine posts an MPT request frame to the request post FIFO of a | ||
878 | * specific MPT adapter. | 879 | * specific MPT adapter. |
879 | **/ | 880 | **/ |
880 | void | 881 | void |
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index f77b329f6923..78734e25edd5 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c | |||
@@ -1701,6 +1701,11 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info, | |||
1701 | if (error) | 1701 | if (error) |
1702 | goto out_free_consistent; | 1702 | goto out_free_consistent; |
1703 | 1703 | ||
1704 | if (!buffer->NumPhys) { | ||
1705 | error = -ENODEV; | ||
1706 | goto out_free_consistent; | ||
1707 | } | ||
1708 | |||
1704 | /* save config data */ | 1709 | /* save config data */ |
1705 | port_info->num_phys = buffer->NumPhys; | 1710 | port_info->num_phys = buffer->NumPhys; |
1706 | port_info->phy_info = kcalloc(port_info->num_phys, | 1711 | port_info->phy_info = kcalloc(port_info->num_phys, |
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index af1de0ccee2f..0c252f60c4c1 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c | |||
@@ -1533,7 +1533,7 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx) | |||
1533 | * | 1533 | * |
1534 | * Remark: Currently invoked from a non-interrupt thread (_bh). | 1534 | * Remark: Currently invoked from a non-interrupt thread (_bh). |
1535 | * | 1535 | * |
1536 | * Remark: With old EH code, at most 1 SCSI TaskMgmt function per IOC | 1536 | * Note: With old EH code, at most 1 SCSI TaskMgmt function per IOC |
1537 | * will be active. | 1537 | * will be active. |
1538 | * | 1538 | * |
1539 | * Returns 0 for SUCCESS, or %FAILED. | 1539 | * Returns 0 for SUCCESS, or %FAILED. |
@@ -2537,14 +2537,12 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR | |||
2537 | 2537 | ||
2538 | /** | 2538 | /** |
2539 | * mptscsih_get_scsi_lookup | 2539 | * mptscsih_get_scsi_lookup |
2540 | * | ||
2541 | * retrieves scmd entry from ScsiLookup[] array list | ||
2542 | * | ||
2543 | * @ioc: Pointer to MPT_ADAPTER structure | 2540 | * @ioc: Pointer to MPT_ADAPTER structure |
2544 | * @i: index into the array | 2541 | * @i: index into the array |
2545 | * | 2542 | * |
2546 | * Returns the scsi_cmd pointer | 2543 | * retrieves scmd entry from ScsiLookup[] array list |
2547 | * | 2544 | * |
2545 | * Returns the scsi_cmd pointer | ||
2548 | **/ | 2546 | **/ |
2549 | static struct scsi_cmnd * | 2547 | static struct scsi_cmnd * |
2550 | mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i) | 2548 | mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i) |
@@ -2561,14 +2559,12 @@ mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i) | |||
2561 | 2559 | ||
2562 | /** | 2560 | /** |
2563 | * mptscsih_getclear_scsi_lookup | 2561 | * mptscsih_getclear_scsi_lookup |
2564 | * | ||
2565 | * retrieves and clears scmd entry from ScsiLookup[] array list | ||
2566 | * | ||
2567 | * @ioc: Pointer to MPT_ADAPTER structure | 2562 | * @ioc: Pointer to MPT_ADAPTER structure |
2568 | * @i: index into the array | 2563 | * @i: index into the array |
2569 | * | 2564 | * |
2570 | * Returns the scsi_cmd pointer | 2565 | * retrieves and clears scmd entry from ScsiLookup[] array list |
2571 | * | 2566 | * |
2567 | * Returns the scsi_cmd pointer | ||
2572 | **/ | 2568 | **/ |
2573 | static struct scsi_cmnd * | 2569 | static struct scsi_cmnd * |
2574 | mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i) | 2570 | mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i) |
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c index afd82966f9a0..13bac53db69a 100644 --- a/drivers/mfd/sm501.c +++ b/drivers/mfd/sm501.c | |||
@@ -48,31 +48,13 @@ struct sm501_devdata { | |||
48 | unsigned int pdev_id; | 48 | unsigned int pdev_id; |
49 | unsigned int irq; | 49 | unsigned int irq; |
50 | void __iomem *regs; | 50 | void __iomem *regs; |
51 | unsigned int rev; | ||
51 | }; | 52 | }; |
52 | 53 | ||
53 | #define MHZ (1000 * 1000) | 54 | #define MHZ (1000 * 1000) |
54 | 55 | ||
55 | #ifdef DEBUG | 56 | #ifdef DEBUG |
56 | static const unsigned int misc_div[] = { | 57 | static const unsigned int div_tab[] = { |
57 | [0] = 1, | ||
58 | [1] = 2, | ||
59 | [2] = 4, | ||
60 | [3] = 8, | ||
61 | [4] = 16, | ||
62 | [5] = 32, | ||
63 | [6] = 64, | ||
64 | [7] = 128, | ||
65 | [8] = 3, | ||
66 | [9] = 6, | ||
67 | [10] = 12, | ||
68 | [11] = 24, | ||
69 | [12] = 48, | ||
70 | [13] = 96, | ||
71 | [14] = 192, | ||
72 | [15] = 384, | ||
73 | }; | ||
74 | |||
75 | static const unsigned int px_div[] = { | ||
76 | [0] = 1, | 58 | [0] = 1, |
77 | [1] = 2, | 59 | [1] = 2, |
78 | [2] = 4, | 60 | [2] = 4, |
@@ -101,12 +83,12 @@ static const unsigned int px_div[] = { | |||
101 | 83 | ||
102 | static unsigned long decode_div(unsigned long pll2, unsigned long val, | 84 | static unsigned long decode_div(unsigned long pll2, unsigned long val, |
103 | unsigned int lshft, unsigned int selbit, | 85 | unsigned int lshft, unsigned int selbit, |
104 | unsigned long mask, const unsigned int *dtab) | 86 | unsigned long mask) |
105 | { | 87 | { |
106 | if (val & selbit) | 88 | if (val & selbit) |
107 | pll2 = 288 * MHZ; | 89 | pll2 = 288 * MHZ; |
108 | 90 | ||
109 | return pll2 / dtab[(val >> lshft) & mask]; | 91 | return pll2 / div_tab[(val >> lshft) & mask]; |
110 | } | 92 | } |
111 | 93 | ||
112 | #define fmt_freq(x) ((x) / MHZ), ((x) % MHZ), (x) | 94 | #define fmt_freq(x) ((x) / MHZ), ((x) % MHZ), (x) |
@@ -141,10 +123,10 @@ static void sm501_dump_clk(struct sm501_devdata *sm) | |||
141 | } | 123 | } |
142 | 124 | ||
143 | sdclk0 = (misct & (1<<12)) ? pll2 : 288 * MHZ; | 125 | sdclk0 = (misct & (1<<12)) ? pll2 : 288 * MHZ; |
144 | sdclk0 /= misc_div[((misct >> 8) & 0xf)]; | 126 | sdclk0 /= div_tab[((misct >> 8) & 0xf)]; |
145 | 127 | ||
146 | sdclk1 = (misct & (1<<20)) ? pll2 : 288 * MHZ; | 128 | sdclk1 = (misct & (1<<20)) ? pll2 : 288 * MHZ; |
147 | sdclk1 /= misc_div[((misct >> 16) & 0xf)]; | 129 | sdclk1 /= div_tab[((misct >> 16) & 0xf)]; |
148 | 130 | ||
149 | dev_dbg(sm->dev, "MISCT=%08lx, PM0=%08lx, PM1=%08lx\n", | 131 | dev_dbg(sm->dev, "MISCT=%08lx, PM0=%08lx, PM1=%08lx\n", |
150 | misct, pm0, pm1); | 132 | misct, pm0, pm1); |
@@ -158,19 +140,19 @@ static void sm501_dump_clk(struct sm501_devdata *sm) | |||
158 | "P2 %ld.%ld MHz (%ld), V2 %ld.%ld (%ld), " | 140 | "P2 %ld.%ld MHz (%ld), V2 %ld.%ld (%ld), " |
159 | "M %ld.%ld (%ld), MX1 %ld.%ld (%ld)\n", | 141 | "M %ld.%ld (%ld), MX1 %ld.%ld (%ld)\n", |
160 | (pmc & 3 ) == 0 ? '*' : '-', | 142 | (pmc & 3 ) == 0 ? '*' : '-', |
161 | fmt_freq(decode_div(pll2, pm0, 24, 1<<29, 31, px_div)), | 143 | fmt_freq(decode_div(pll2, pm0, 24, 1<<29, 31)), |
162 | fmt_freq(decode_div(pll2, pm0, 16, 1<<20, 15, misc_div)), | 144 | fmt_freq(decode_div(pll2, pm0, 16, 1<<20, 15)), |
163 | fmt_freq(decode_div(pll2, pm0, 8, 1<<12, 15, misc_div)), | 145 | fmt_freq(decode_div(pll2, pm0, 8, 1<<12, 15)), |
164 | fmt_freq(decode_div(pll2, pm0, 0, 1<<4, 15, misc_div))); | 146 | fmt_freq(decode_div(pll2, pm0, 0, 1<<4, 15))); |
165 | 147 | ||
166 | dev_dbg(sm->dev, "PM1[%c]: " | 148 | dev_dbg(sm->dev, "PM1[%c]: " |
167 | "P2 %ld.%ld MHz (%ld), V2 %ld.%ld (%ld), " | 149 | "P2 %ld.%ld MHz (%ld), V2 %ld.%ld (%ld), " |
168 | "M %ld.%ld (%ld), MX1 %ld.%ld (%ld)\n", | 150 | "M %ld.%ld (%ld), MX1 %ld.%ld (%ld)\n", |
169 | (pmc & 3 ) == 1 ? '*' : '-', | 151 | (pmc & 3 ) == 1 ? '*' : '-', |
170 | fmt_freq(decode_div(pll2, pm1, 24, 1<<29, 31, px_div)), | 152 | fmt_freq(decode_div(pll2, pm1, 24, 1<<29, 31)), |
171 | fmt_freq(decode_div(pll2, pm1, 16, 1<<20, 15, misc_div)), | 153 | fmt_freq(decode_div(pll2, pm1, 16, 1<<20, 15)), |
172 | fmt_freq(decode_div(pll2, pm1, 8, 1<<12, 15, misc_div)), | 154 | fmt_freq(decode_div(pll2, pm1, 8, 1<<12, 15)), |
173 | fmt_freq(decode_div(pll2, pm1, 0, 1<<4, 15, misc_div))); | 155 | fmt_freq(decode_div(pll2, pm1, 0, 1<<4, 15))); |
174 | } | 156 | } |
175 | 157 | ||
176 | static void sm501_dump_regs(struct sm501_devdata *sm) | 158 | static void sm501_dump_regs(struct sm501_devdata *sm) |
@@ -436,46 +418,108 @@ struct sm501_clock { | |||
436 | unsigned long mclk; | 418 | unsigned long mclk; |
437 | int divider; | 419 | int divider; |
438 | int shift; | 420 | int shift; |
421 | unsigned int m, n, k; | ||
439 | }; | 422 | }; |
440 | 423 | ||
424 | /* sm501_calc_clock | ||
425 | * | ||
426 | * Calculates the nearest discrete clock frequency that | ||
427 | * can be achieved with the specified input clock. | ||
428 | * the maximum divisor is 3 or 5 | ||
429 | */ | ||
430 | |||
431 | static int sm501_calc_clock(unsigned long freq, | ||
432 | struct sm501_clock *clock, | ||
433 | int max_div, | ||
434 | unsigned long mclk, | ||
435 | long *best_diff) | ||
436 | { | ||
437 | int ret = 0; | ||
438 | int divider; | ||
439 | int shift; | ||
440 | long diff; | ||
441 | |||
442 | /* try dividers 1 and 3 for CRT and for panel, | ||
443 | try divider 5 for panel only.*/ | ||
444 | |||
445 | for (divider = 1; divider <= max_div; divider += 2) { | ||
446 | /* try all 8 shift values.*/ | ||
447 | for (shift = 0; shift < 8; shift++) { | ||
448 | /* Calculate difference to requested clock */ | ||
449 | diff = sm501fb_round_div(mclk, divider << shift) - freq; | ||
450 | if (diff < 0) | ||
451 | diff = -diff; | ||
452 | |||
453 | /* If it is less than the current, use it */ | ||
454 | if (diff < *best_diff) { | ||
455 | *best_diff = diff; | ||
456 | |||
457 | clock->mclk = mclk; | ||
458 | clock->divider = divider; | ||
459 | clock->shift = shift; | ||
460 | ret = 1; | ||
461 | } | ||
462 | } | ||
463 | } | ||
464 | |||
465 | return ret; | ||
466 | } | ||
467 | |||
468 | /* sm501_calc_pll | ||
469 | * | ||
470 | * Calculates the nearest discrete clock frequency that can be | ||
471 | * achieved using the programmable PLL. | ||
472 | * the maximum divisor is 3 or 5 | ||
473 | */ | ||
474 | |||
475 | static unsigned long sm501_calc_pll(unsigned long freq, | ||
476 | struct sm501_clock *clock, | ||
477 | int max_div) | ||
478 | { | ||
479 | unsigned long mclk; | ||
480 | unsigned int m, n, k; | ||
481 | long best_diff = 999999999; | ||
482 | |||
483 | /* | ||
484 | * The SM502 datasheet doesn't specify the min/max values for M and N. | ||
485 | * N = 1 at least doesn't work in practice. | ||
486 | */ | ||
487 | for (m = 2; m <= 255; m++) { | ||
488 | for (n = 2; n <= 127; n++) { | ||
489 | for (k = 0; k <= 1; k++) { | ||
490 | mclk = (24000000UL * m / n) >> k; | ||
491 | |||
492 | if (sm501_calc_clock(freq, clock, max_div, | ||
493 | mclk, &best_diff)) { | ||
494 | clock->m = m; | ||
495 | clock->n = n; | ||
496 | clock->k = k; | ||
497 | } | ||
498 | } | ||
499 | } | ||
500 | } | ||
501 | |||
502 | /* Return best clock. */ | ||
503 | return clock->mclk / (clock->divider << clock->shift); | ||
504 | } | ||
505 | |||
441 | /* sm501_select_clock | 506 | /* sm501_select_clock |
442 | * | 507 | * |
443 | * selects nearest discrete clock frequency the SM501 can achive | 508 | * Calculates the nearest discrete clock frequency that can be |
509 | * achieved using the 288MHz and 336MHz PLLs. | ||
444 | * the maximum divisor is 3 or 5 | 510 | * the maximum divisor is 3 or 5 |
445 | */ | 511 | */ |
512 | |||
446 | static unsigned long sm501_select_clock(unsigned long freq, | 513 | static unsigned long sm501_select_clock(unsigned long freq, |
447 | struct sm501_clock *clock, | 514 | struct sm501_clock *clock, |
448 | int max_div) | 515 | int max_div) |
449 | { | 516 | { |
450 | unsigned long mclk; | 517 | unsigned long mclk; |
451 | int divider; | ||
452 | int shift; | ||
453 | long diff; | ||
454 | long best_diff = 999999999; | 518 | long best_diff = 999999999; |
455 | 519 | ||
456 | /* Try 288MHz and 336MHz clocks. */ | 520 | /* Try 288MHz and 336MHz clocks. */ |
457 | for (mclk = 288000000; mclk <= 336000000; mclk += 48000000) { | 521 | for (mclk = 288000000; mclk <= 336000000; mclk += 48000000) { |
458 | /* try dividers 1 and 3 for CRT and for panel, | 522 | sm501_calc_clock(freq, clock, max_div, mclk, &best_diff); |
459 | try divider 5 for panel only.*/ | ||
460 | |||
461 | for (divider = 1; divider <= max_div; divider += 2) { | ||
462 | /* try all 8 shift values.*/ | ||
463 | for (shift = 0; shift < 8; shift++) { | ||
464 | /* Calculate difference to requested clock */ | ||
465 | diff = sm501fb_round_div(mclk, divider << shift) - freq; | ||
466 | if (diff < 0) | ||
467 | diff = -diff; | ||
468 | |||
469 | /* If it is less than the current, use it */ | ||
470 | if (diff < best_diff) { | ||
471 | best_diff = diff; | ||
472 | |||
473 | clock->mclk = mclk; | ||
474 | clock->divider = divider; | ||
475 | clock->shift = shift; | ||
476 | } | ||
477 | } | ||
478 | } | ||
479 | } | 523 | } |
480 | 524 | ||
481 | /* Return best clock. */ | 525 | /* Return best clock. */ |
@@ -497,6 +541,7 @@ unsigned long sm501_set_clock(struct device *dev, | |||
497 | unsigned long gate = readl(sm->regs + SM501_CURRENT_GATE); | 541 | unsigned long gate = readl(sm->regs + SM501_CURRENT_GATE); |
498 | unsigned long clock = readl(sm->regs + SM501_CURRENT_CLOCK); | 542 | unsigned long clock = readl(sm->regs + SM501_CURRENT_CLOCK); |
499 | unsigned char reg; | 543 | unsigned char reg; |
544 | unsigned int pll_reg = 0; | ||
500 | unsigned long sm501_freq; /* the actual frequency acheived */ | 545 | unsigned long sm501_freq; /* the actual frequency acheived */ |
501 | 546 | ||
502 | struct sm501_clock to; | 547 | struct sm501_clock to; |
@@ -511,14 +556,28 @@ unsigned long sm501_set_clock(struct device *dev, | |||
511 | * requested frequency the value must be multiplied by | 556 | * requested frequency the value must be multiplied by |
512 | * 2. This clock also has an additional pre divisor */ | 557 | * 2. This clock also has an additional pre divisor */ |
513 | 558 | ||
514 | sm501_freq = (sm501_select_clock(2 * req_freq, &to, 5) / 2); | 559 | if (sm->rev >= 0xC0) { |
515 | reg=to.shift & 0x07;/* bottom 3 bits are shift */ | 560 | /* SM502 -> use the programmable PLL */ |
516 | if (to.divider == 3) | 561 | sm501_freq = (sm501_calc_pll(2 * req_freq, |
517 | reg |= 0x08; /* /3 divider required */ | 562 | &to, 5) / 2); |
518 | else if (to.divider == 5) | 563 | reg = to.shift & 0x07;/* bottom 3 bits are shift */ |
519 | reg |= 0x10; /* /5 divider required */ | 564 | if (to.divider == 3) |
520 | if (to.mclk != 288000000) | 565 | reg |= 0x08; /* /3 divider required */ |
521 | reg |= 0x20; /* which mclk pll is source */ | 566 | else if (to.divider == 5) |
567 | reg |= 0x10; /* /5 divider required */ | ||
568 | reg |= 0x40; /* select the programmable PLL */ | ||
569 | pll_reg = 0x20000 | (to.k << 15) | (to.n << 8) | to.m; | ||
570 | } else { | ||
571 | sm501_freq = (sm501_select_clock(2 * req_freq, | ||
572 | &to, 5) / 2); | ||
573 | reg = to.shift & 0x07;/* bottom 3 bits are shift */ | ||
574 | if (to.divider == 3) | ||
575 | reg |= 0x08; /* /3 divider required */ | ||
576 | else if (to.divider == 5) | ||
577 | reg |= 0x10; /* /5 divider required */ | ||
578 | if (to.mclk != 288000000) | ||
579 | reg |= 0x20; /* which mclk pll is source */ | ||
580 | } | ||
522 | break; | 581 | break; |
523 | 582 | ||
524 | case SM501_CLOCK_V2XCLK: | 583 | case SM501_CLOCK_V2XCLK: |
@@ -579,6 +638,10 @@ unsigned long sm501_set_clock(struct device *dev, | |||
579 | } | 638 | } |
580 | 639 | ||
581 | writel(mode, sm->regs + SM501_POWER_MODE_CONTROL); | 640 | writel(mode, sm->regs + SM501_POWER_MODE_CONTROL); |
641 | |||
642 | if (pll_reg) | ||
643 | writel(pll_reg, sm->regs + SM501_PROGRAMMABLE_PLL_CONTROL); | ||
644 | |||
582 | sm501_sync_regs(sm); | 645 | sm501_sync_regs(sm); |
583 | 646 | ||
584 | dev_info(sm->dev, "gate %08lx, clock %08lx, mode %08lx\n", | 647 | dev_info(sm->dev, "gate %08lx, clock %08lx, mode %08lx\n", |
@@ -599,15 +662,24 @@ EXPORT_SYMBOL_GPL(sm501_set_clock); | |||
599 | * finds the closest available frequency for a given clock | 662 | * finds the closest available frequency for a given clock |
600 | */ | 663 | */ |
601 | 664 | ||
602 | unsigned long sm501_find_clock(int clksrc, | 665 | unsigned long sm501_find_clock(struct device *dev, |
666 | int clksrc, | ||
603 | unsigned long req_freq) | 667 | unsigned long req_freq) |
604 | { | 668 | { |
669 | struct sm501_devdata *sm = dev_get_drvdata(dev); | ||
605 | unsigned long sm501_freq; /* the frequency achiveable by the 501 */ | 670 | unsigned long sm501_freq; /* the frequency achiveable by the 501 */ |
606 | struct sm501_clock to; | 671 | struct sm501_clock to; |
607 | 672 | ||
608 | switch (clksrc) { | 673 | switch (clksrc) { |
609 | case SM501_CLOCK_P2XCLK: | 674 | case SM501_CLOCK_P2XCLK: |
610 | sm501_freq = (sm501_select_clock(2 * req_freq, &to, 5) / 2); | 675 | if (sm->rev >= 0xC0) { |
676 | /* SM502 -> use the programmable PLL */ | ||
677 | sm501_freq = (sm501_calc_pll(2 * req_freq, | ||
678 | &to, 5) / 2); | ||
679 | } else { | ||
680 | sm501_freq = (sm501_select_clock(2 * req_freq, | ||
681 | &to, 5) / 2); | ||
682 | } | ||
611 | break; | 683 | break; |
612 | 684 | ||
613 | case SM501_CLOCK_V2XCLK: | 685 | case SM501_CLOCK_V2XCLK: |
@@ -914,6 +986,8 @@ static int sm501_init_dev(struct sm501_devdata *sm) | |||
914 | dev_info(sm->dev, "SM501 At %p: Version %08lx, %ld Mb, IRQ %d\n", | 986 | dev_info(sm->dev, "SM501 At %p: Version %08lx, %ld Mb, IRQ %d\n", |
915 | sm->regs, devid, (unsigned long)mem_avail >> 20, sm->irq); | 987 | sm->regs, devid, (unsigned long)mem_avail >> 20, sm->irq); |
916 | 988 | ||
989 | sm->rev = devid & SM501_DEVICEID_REVMASK; | ||
990 | |||
917 | sm501_dump_gate(sm); | 991 | sm501_dump_gate(sm); |
918 | 992 | ||
919 | ret = device_create_file(sm->dev, &dev_attr_dbg_regs); | 993 | ret = device_create_file(sm->dev, &dev_attr_dbg_regs); |
diff --git a/drivers/misc/thinkpad_acpi.c b/drivers/misc/thinkpad_acpi.c index bb269d0c677e..6cb781262f94 100644 --- a/drivers/misc/thinkpad_acpi.c +++ b/drivers/misc/thinkpad_acpi.c | |||
@@ -1078,7 +1078,8 @@ static int hotkey_get_tablet_mode(int *status) | |||
1078 | if (!acpi_evalf(hkey_handle, &s, "MHKG", "d")) | 1078 | if (!acpi_evalf(hkey_handle, &s, "MHKG", "d")) |
1079 | return -EIO; | 1079 | return -EIO; |
1080 | 1080 | ||
1081 | return ((s & TP_HOTKEY_TABLET_MASK) != 0); | 1081 | *status = ((s & TP_HOTKEY_TABLET_MASK) != 0); |
1082 | return 0; | ||
1082 | } | 1083 | } |
1083 | 1084 | ||
1084 | /* | 1085 | /* |
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 6ac81e35355c..275960462970 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c | |||
@@ -1000,8 +1000,8 @@ static int __init ubi_init(void) | |||
1000 | mutex_unlock(&ubi_devices_mutex); | 1000 | mutex_unlock(&ubi_devices_mutex); |
1001 | if (err < 0) { | 1001 | if (err < 0) { |
1002 | put_mtd_device(mtd); | 1002 | put_mtd_device(mtd); |
1003 | printk(KERN_ERR "UBI error: cannot attach %s\n", | 1003 | printk(KERN_ERR "UBI error: cannot attach mtd%d\n", |
1004 | p->name); | 1004 | mtd->index); |
1005 | goto out_detach; | 1005 | goto out_detach; |
1006 | } | 1006 | } |
1007 | } | 1007 | } |
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 457710615261..a548c1d28fa8 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h | |||
@@ -217,11 +217,11 @@ struct ubi_volume { | |||
217 | void *upd_buf; | 217 | void *upd_buf; |
218 | 218 | ||
219 | int *eba_tbl; | 219 | int *eba_tbl; |
220 | int checked:1; | 220 | unsigned int checked:1; |
221 | int corrupted:1; | 221 | unsigned int corrupted:1; |
222 | int upd_marker:1; | 222 | unsigned int upd_marker:1; |
223 | int updating:1; | 223 | unsigned int updating:1; |
224 | int changing_leb:1; | 224 | unsigned int changing_leb:1; |
225 | 225 | ||
226 | #ifdef CONFIG_MTD_UBI_GLUEBI | 226 | #ifdef CONFIG_MTD_UBI_GLUEBI |
227 | /* | 227 | /* |
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index a3ca2257e601..5be58d85c639 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c | |||
@@ -376,7 +376,9 @@ out_sysfs: | |||
376 | get_device(&vol->dev); | 376 | get_device(&vol->dev); |
377 | volume_sysfs_close(vol); | 377 | volume_sysfs_close(vol); |
378 | out_gluebi: | 378 | out_gluebi: |
379 | ubi_destroy_gluebi(vol); | 379 | if (ubi_destroy_gluebi(vol)) |
380 | dbg_err("cannot destroy gluebi for volume %d:%d", | ||
381 | ubi->ubi_num, vol_id); | ||
380 | out_cdev: | 382 | out_cdev: |
381 | cdev_del(&vol->cdev); | 383 | cdev_del(&vol->cdev); |
382 | out_mapping: | 384 | out_mapping: |
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index 56fc3fbce838..af36b12be278 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c | |||
@@ -519,6 +519,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si, | |||
519 | if (ubi->autoresize_vol_id != -1) { | 519 | if (ubi->autoresize_vol_id != -1) { |
520 | ubi_err("more then one auto-resize volume (%d " | 520 | ubi_err("more then one auto-resize volume (%d " |
521 | "and %d)", ubi->autoresize_vol_id, i); | 521 | "and %d)", ubi->autoresize_vol_id, i); |
522 | kfree(vol); | ||
522 | return -EINVAL; | 523 | return -EINVAL; |
523 | } | 524 | } |
524 | 525 | ||
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index f337800076c0..a0f0e605d630 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -90,6 +90,11 @@ config MACVLAN | |||
90 | This allows one to create virtual interfaces that map packets to | 90 | This allows one to create virtual interfaces that map packets to |
91 | or from specific MAC addresses to a particular interface. | 91 | or from specific MAC addresses to a particular interface. |
92 | 92 | ||
93 | Macvlan devices can be added using the "ip" command from the | ||
94 | iproute2 package starting with the iproute2-2.6.23 release: | ||
95 | |||
96 | "ip link add link <real dev> [ address MAC ] [ NAME ] type macvlan" | ||
97 | |||
93 | To compile this driver as a module, choose M here: the module | 98 | To compile this driver as a module, choose M here: the module |
94 | will be called macvlan. | 99 | will be called macvlan. |
95 | 100 | ||
@@ -2363,6 +2368,7 @@ config GELIC_NET | |||
2363 | config GELIC_WIRELESS | 2368 | config GELIC_WIRELESS |
2364 | bool "PS3 Wireless support" | 2369 | bool "PS3 Wireless support" |
2365 | depends on GELIC_NET | 2370 | depends on GELIC_NET |
2371 | select WIRELESS_EXT | ||
2366 | help | 2372 | help |
2367 | This option adds the support for the wireless feature of PS3. | 2373 | This option adds the support for the wireless feature of PS3. |
2368 | If you have the wireless-less model of PS3 or have no plan to | 2374 | If you have the wireless-less model of PS3 or have no plan to |
diff --git a/drivers/net/bnx2x.c b/drivers/net/bnx2x.c index afc7f34b1dcf..8af142ccf373 100644 --- a/drivers/net/bnx2x.c +++ b/drivers/net/bnx2x.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* bnx2x.c: Broadcom Everest network driver. | 1 | /* bnx2x.c: Broadcom Everest network driver. |
2 | * | 2 | * |
3 | * Copyright (c) 2007 Broadcom Corporation | 3 | * Copyright (c) 2007-2008 Broadcom Corporation |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
@@ -10,13 +10,13 @@ | |||
10 | * Based on code from Michael Chan's bnx2 driver | 10 | * Based on code from Michael Chan's bnx2 driver |
11 | * UDP CSUM errata workaround by Arik Gendelman | 11 | * UDP CSUM errata workaround by Arik Gendelman |
12 | * Slowpath rework by Vladislav Zolotarov | 12 | * Slowpath rework by Vladislav Zolotarov |
13 | * Statistics and Link managment by Yitchak Gertner | 13 | * Statistics and Link management by Yitchak Gertner |
14 | * | 14 | * |
15 | */ | 15 | */ |
16 | 16 | ||
17 | /* define this to make the driver freeze on error | 17 | /* define this to make the driver freeze on error |
18 | * to allow getting debug info | 18 | * to allow getting debug info |
19 | * (you will need to reboot afterwords) | 19 | * (you will need to reboot afterwards) |
20 | */ | 20 | */ |
21 | /*#define BNX2X_STOP_ON_ERROR*/ | 21 | /*#define BNX2X_STOP_ON_ERROR*/ |
22 | 22 | ||
@@ -63,22 +63,21 @@ | |||
63 | #include "bnx2x.h" | 63 | #include "bnx2x.h" |
64 | #include "bnx2x_init.h" | 64 | #include "bnx2x_init.h" |
65 | 65 | ||
66 | #define DRV_MODULE_VERSION "0.40.15" | 66 | #define DRV_MODULE_VERSION "1.40.22" |
67 | #define DRV_MODULE_RELDATE "$DateTime: 2007/11/15 07:28:37 $" | 67 | #define DRV_MODULE_RELDATE "2007/11/27" |
68 | #define BNX2X_BC_VER 0x040009 | 68 | #define BNX2X_BC_VER 0x040200 |
69 | 69 | ||
70 | /* Time in jiffies before concluding the transmitter is hung. */ | 70 | /* Time in jiffies before concluding the transmitter is hung. */ |
71 | #define TX_TIMEOUT (5*HZ) | 71 | #define TX_TIMEOUT (5*HZ) |
72 | 72 | ||
73 | static char version[] __devinitdata = | 73 | static char version[] __devinitdata = |
74 | "Broadcom NetXtreme II 577xx 10Gigabit Ethernet Driver " | 74 | "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver " |
75 | DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | 75 | DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; |
76 | 76 | ||
77 | MODULE_AUTHOR("Eliezer Tamir <eliezert@broadcom.com>"); | 77 | MODULE_AUTHOR("Eliezer Tamir <eliezert@broadcom.com>"); |
78 | MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver"); | 78 | MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver"); |
79 | MODULE_LICENSE("GPL"); | 79 | MODULE_LICENSE("GPL"); |
80 | MODULE_VERSION(DRV_MODULE_VERSION); | 80 | MODULE_VERSION(DRV_MODULE_VERSION); |
81 | MODULE_INFO(cvs_version, "$Revision: #356 $"); | ||
82 | 81 | ||
83 | static int use_inta; | 82 | static int use_inta; |
84 | static int poll; | 83 | static int poll; |
@@ -94,8 +93,8 @@ module_param(debug, int, 0); | |||
94 | MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X"); | 93 | MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X"); |
95 | MODULE_PARM_DESC(poll, "use polling (for debug)"); | 94 | MODULE_PARM_DESC(poll, "use polling (for debug)"); |
96 | MODULE_PARM_DESC(onefunc, "enable only first function"); | 95 | MODULE_PARM_DESC(onefunc, "enable only first function"); |
97 | MODULE_PARM_DESC(nomcp, "ignore managment CPU (Implies onefunc)"); | 96 | MODULE_PARM_DESC(nomcp, "ignore management CPU (Implies onefunc)"); |
98 | MODULE_PARM_DESC(debug, "defualt debug msglevel"); | 97 | MODULE_PARM_DESC(debug, "default debug msglevel"); |
99 | 98 | ||
100 | #ifdef BNX2X_MULTI | 99 | #ifdef BNX2X_MULTI |
101 | module_param(use_multi, int, 0); | 100 | module_param(use_multi, int, 0); |
@@ -298,8 +297,7 @@ static void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32) | |||
298 | 297 | ||
299 | static int bnx2x_mc_assert(struct bnx2x *bp) | 298 | static int bnx2x_mc_assert(struct bnx2x *bp) |
300 | { | 299 | { |
301 | int i, j; | 300 | int i, j, rc = 0; |
302 | int rc = 0; | ||
303 | char last_idx; | 301 | char last_idx; |
304 | const char storm[] = {"XTCU"}; | 302 | const char storm[] = {"XTCU"}; |
305 | const u32 intmem_base[] = { | 303 | const u32 intmem_base[] = { |
@@ -313,8 +311,9 @@ static int bnx2x_mc_assert(struct bnx2x *bp) | |||
313 | for (i = 0; i < 4; i++) { | 311 | for (i = 0; i < 4; i++) { |
314 | last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET + | 312 | last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET + |
315 | intmem_base[i]); | 313 | intmem_base[i]); |
316 | BNX2X_ERR("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n", | 314 | if (last_idx) |
317 | storm[i], last_idx); | 315 | BNX2X_LOG("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n", |
316 | storm[i], last_idx); | ||
318 | 317 | ||
319 | /* print the asserts */ | 318 | /* print the asserts */ |
320 | for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) { | 319 | for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) { |
@@ -330,7 +329,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp) | |||
330 | intmem_base[i]); | 329 | intmem_base[i]); |
331 | 330 | ||
332 | if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { | 331 | if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { |
333 | BNX2X_ERR("DATA %cSTORM_ASSERT_INDEX 0x%x =" | 332 | BNX2X_LOG("DATA %cSTORM_ASSERT_INDEX 0x%x =" |
334 | " 0x%08x 0x%08x 0x%08x 0x%08x\n", | 333 | " 0x%08x 0x%08x 0x%08x 0x%08x\n", |
335 | storm[i], j, row3, row2, row1, row0); | 334 | storm[i], j, row3, row2, row1, row0); |
336 | rc++; | 335 | rc++; |
@@ -341,6 +340,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp) | |||
341 | } | 340 | } |
342 | return rc; | 341 | return rc; |
343 | } | 342 | } |
343 | |||
344 | static void bnx2x_fw_dump(struct bnx2x *bp) | 344 | static void bnx2x_fw_dump(struct bnx2x *bp) |
345 | { | 345 | { |
346 | u32 mark, offset; | 346 | u32 mark, offset; |
@@ -348,21 +348,22 @@ static void bnx2x_fw_dump(struct bnx2x *bp) | |||
348 | int word; | 348 | int word; |
349 | 349 | ||
350 | mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104); | 350 | mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104); |
351 | printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark); | 351 | mark = ((mark + 0x3) & ~0x3); |
352 | printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark); | ||
352 | 353 | ||
353 | for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) { | 354 | for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) { |
354 | for (word = 0; word < 8; word++) | 355 | for (word = 0; word < 8; word++) |
355 | data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH + | 356 | data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH + |
356 | offset + 4*word)); | 357 | offset + 4*word)); |
357 | data[8] = 0x0; | 358 | data[8] = 0x0; |
358 | printk(KERN_ERR PFX "%s", (char *)data); | 359 | printk(KERN_CONT "%s", (char *)data); |
359 | } | 360 | } |
360 | for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) { | 361 | for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) { |
361 | for (word = 0; word < 8; word++) | 362 | for (word = 0; word < 8; word++) |
362 | data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH + | 363 | data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH + |
363 | offset + 4*word)); | 364 | offset + 4*word)); |
364 | data[8] = 0x0; | 365 | data[8] = 0x0; |
365 | printk(KERN_ERR PFX "%s", (char *)data); | 366 | printk(KERN_CONT "%s", (char *)data); |
366 | } | 367 | } |
367 | printk("\n" KERN_ERR PFX "end of fw dump\n"); | 368 | printk("\n" KERN_ERR PFX "end of fw dump\n"); |
368 | } | 369 | } |
@@ -427,10 +428,10 @@ static void bnx2x_panic_dump(struct bnx2x *bp) | |||
427 | } | 428 | } |
428 | } | 429 | } |
429 | 430 | ||
430 | BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_t_idx(%u)" | 431 | BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)" |
431 | " def_x_idx(%u) def_att_idx(%u) attn_state(%u)" | 432 | " def_t_idx(%u) def_att_idx(%u) attn_state(%u)" |
432 | " spq_prod_idx(%u)\n", | 433 | " spq_prod_idx(%u)\n", |
433 | bp->def_c_idx, bp->def_u_idx, bp->def_t_idx, bp->def_x_idx, | 434 | bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx, |
434 | bp->def_att_idx, bp->attn_state, bp->spq_prod_idx); | 435 | bp->def_att_idx, bp->attn_state, bp->spq_prod_idx); |
435 | 436 | ||
436 | 437 | ||
@@ -441,7 +442,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp) | |||
441 | DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n"); | 442 | DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n"); |
442 | } | 443 | } |
443 | 444 | ||
444 | static void bnx2x_enable_int(struct bnx2x *bp) | 445 | static void bnx2x_int_enable(struct bnx2x *bp) |
445 | { | 446 | { |
446 | int port = bp->port; | 447 | int port = bp->port; |
447 | u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; | 448 | u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; |
@@ -454,18 +455,26 @@ static void bnx2x_enable_int(struct bnx2x *bp) | |||
454 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); | 455 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); |
455 | } else { | 456 | } else { |
456 | val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | | 457 | val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | |
458 | HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | | ||
457 | HC_CONFIG_0_REG_INT_LINE_EN_0 | | 459 | HC_CONFIG_0_REG_INT_LINE_EN_0 | |
458 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); | 460 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); |
461 | |||
462 | /* Errata A0.158 workaround */ | ||
463 | DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n", | ||
464 | val, port, addr, msix); | ||
465 | |||
466 | REG_WR(bp, addr, val); | ||
467 | |||
459 | val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; | 468 | val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; |
460 | } | 469 | } |
461 | 470 | ||
462 | DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) msi %d\n", | 471 | DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n", |
463 | val, port, addr, msix); | 472 | val, port, addr, msix); |
464 | 473 | ||
465 | REG_WR(bp, addr, val); | 474 | REG_WR(bp, addr, val); |
466 | } | 475 | } |
467 | 476 | ||
468 | static void bnx2x_disable_int(struct bnx2x *bp) | 477 | static void bnx2x_int_disable(struct bnx2x *bp) |
469 | { | 478 | { |
470 | int port = bp->port; | 479 | int port = bp->port; |
471 | u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; | 480 | u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; |
@@ -484,15 +493,15 @@ static void bnx2x_disable_int(struct bnx2x *bp) | |||
484 | BNX2X_ERR("BUG! proper val not read from IGU!\n"); | 493 | BNX2X_ERR("BUG! proper val not read from IGU!\n"); |
485 | } | 494 | } |
486 | 495 | ||
487 | static void bnx2x_disable_int_sync(struct bnx2x *bp) | 496 | static void bnx2x_int_disable_sync(struct bnx2x *bp) |
488 | { | 497 | { |
489 | 498 | ||
490 | int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; | 499 | int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; |
491 | int i; | 500 | int i; |
492 | 501 | ||
493 | atomic_inc(&bp->intr_sem); | 502 | atomic_inc(&bp->intr_sem); |
494 | /* prevent the HW from sending interrupts*/ | 503 | /* prevent the HW from sending interrupts */ |
495 | bnx2x_disable_int(bp); | 504 | bnx2x_int_disable(bp); |
496 | 505 | ||
497 | /* make sure all ISRs are done */ | 506 | /* make sure all ISRs are done */ |
498 | if (msix) { | 507 | if (msix) { |
@@ -775,6 +784,7 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp, | |||
775 | mb(); /* force bnx2x_wait_ramrod to see the change */ | 784 | mb(); /* force bnx2x_wait_ramrod to see the change */ |
776 | return; | 785 | return; |
777 | } | 786 | } |
787 | |||
778 | switch (command | bp->state) { | 788 | switch (command | bp->state) { |
779 | case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT): | 789 | case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT): |
780 | DP(NETIF_MSG_IFUP, "got setup ramrod\n"); | 790 | DP(NETIF_MSG_IFUP, "got setup ramrod\n"); |
@@ -787,20 +797,20 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp, | |||
787 | fp->state = BNX2X_FP_STATE_HALTED; | 797 | fp->state = BNX2X_FP_STATE_HALTED; |
788 | break; | 798 | break; |
789 | 799 | ||
790 | case (RAMROD_CMD_ID_ETH_PORT_DEL | BNX2X_STATE_CLOSING_WAIT4_DELETE): | ||
791 | DP(NETIF_MSG_IFDOWN, "got delete ramrod\n"); | ||
792 | bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; | ||
793 | break; | ||
794 | |||
795 | case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT): | 800 | case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT): |
796 | DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid); | 801 | DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", |
797 | bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_DELETED; | 802 | cid); |
803 | bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED; | ||
798 | break; | 804 | break; |
799 | 805 | ||
800 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN): | 806 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN): |
801 | DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); | 807 | DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); |
802 | break; | 808 | break; |
803 | 809 | ||
810 | case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT): | ||
811 | DP(NETIF_MSG_IFUP, "got (un)set mac ramrod\n"); | ||
812 | break; | ||
813 | |||
804 | default: | 814 | default: |
805 | BNX2X_ERR("unexpected ramrod (%d) state is %x\n", | 815 | BNX2X_ERR("unexpected ramrod (%d) state is %x\n", |
806 | command, bp->state); | 816 | command, bp->state); |
@@ -1179,12 +1189,175 @@ static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits) | |||
1179 | return val; | 1189 | return val; |
1180 | } | 1190 | } |
1181 | 1191 | ||
1192 | static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource) | ||
1193 | { | ||
1194 | u32 cnt; | ||
1195 | u32 lock_status; | ||
1196 | u32 resource_bit = (1 << resource); | ||
1197 | u8 func = bp->port; | ||
1198 | |||
1199 | /* Validating that the resource is within range */ | ||
1200 | if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { | ||
1201 | DP(NETIF_MSG_HW, | ||
1202 | "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", | ||
1203 | resource, HW_LOCK_MAX_RESOURCE_VALUE); | ||
1204 | return -EINVAL; | ||
1205 | } | ||
1206 | |||
1207 | /* Validating that the resource is not already taken */ | ||
1208 | lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8); | ||
1209 | if (lock_status & resource_bit) { | ||
1210 | DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", | ||
1211 | lock_status, resource_bit); | ||
1212 | return -EEXIST; | ||
1213 | } | ||
1214 | |||
1215 | /* Try for 1 second every 5ms */ | ||
1216 | for (cnt = 0; cnt < 200; cnt++) { | ||
1217 | /* Try to acquire the lock */ | ||
1218 | REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8 + 4, | ||
1219 | resource_bit); | ||
1220 | lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8); | ||
1221 | if (lock_status & resource_bit) | ||
1222 | return 0; | ||
1223 | |||
1224 | msleep(5); | ||
1225 | } | ||
1226 | DP(NETIF_MSG_HW, "Timeout\n"); | ||
1227 | return -EAGAIN; | ||
1228 | } | ||
1229 | |||
1230 | static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource) | ||
1231 | { | ||
1232 | u32 lock_status; | ||
1233 | u32 resource_bit = (1 << resource); | ||
1234 | u8 func = bp->port; | ||
1235 | |||
1236 | /* Validating that the resource is within range */ | ||
1237 | if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { | ||
1238 | DP(NETIF_MSG_HW, | ||
1239 | "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", | ||
1240 | resource, HW_LOCK_MAX_RESOURCE_VALUE); | ||
1241 | return -EINVAL; | ||
1242 | } | ||
1243 | |||
1244 | /* Validating that the resource is currently taken */ | ||
1245 | lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8); | ||
1246 | if (!(lock_status & resource_bit)) { | ||
1247 | DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n", | ||
1248 | lock_status, resource_bit); | ||
1249 | return -EFAULT; | ||
1250 | } | ||
1251 | |||
1252 | REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8, resource_bit); | ||
1253 | return 0; | ||
1254 | } | ||
1255 | |||
1256 | static int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode) | ||
1257 | { | ||
1258 | /* The GPIO should be swapped if swap register is set and active */ | ||
1259 | int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) && | ||
1260 | REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ bp->port; | ||
1261 | int gpio_shift = gpio_num + | ||
1262 | (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0); | ||
1263 | u32 gpio_mask = (1 << gpio_shift); | ||
1264 | u32 gpio_reg; | ||
1265 | |||
1266 | if (gpio_num > MISC_REGISTERS_GPIO_3) { | ||
1267 | BNX2X_ERR("Invalid GPIO %d\n", gpio_num); | ||
1268 | return -EINVAL; | ||
1269 | } | ||
1270 | |||
1271 | bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO); | ||
1272 | /* read GPIO and mask except the float bits */ | ||
1273 | gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); | ||
1274 | |||
1275 | switch (mode) { | ||
1276 | case MISC_REGISTERS_GPIO_OUTPUT_LOW: | ||
1277 | DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n", | ||
1278 | gpio_num, gpio_shift); | ||
1279 | /* clear FLOAT and set CLR */ | ||
1280 | gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); | ||
1281 | gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); | ||
1282 | break; | ||
1283 | |||
1284 | case MISC_REGISTERS_GPIO_OUTPUT_HIGH: | ||
1285 | DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n", | ||
1286 | gpio_num, gpio_shift); | ||
1287 | /* clear FLOAT and set SET */ | ||
1288 | gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); | ||
1289 | gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); | ||
1290 | break; | ||
1291 | |||
1292 | case MISC_REGISTERS_GPIO_INPUT_HI_Z : | ||
1293 | DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n", | ||
1294 | gpio_num, gpio_shift); | ||
1295 | /* set FLOAT */ | ||
1296 | gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); | ||
1297 | break; | ||
1298 | |||
1299 | default: | ||
1300 | break; | ||
1301 | } | ||
1302 | |||
1303 | REG_WR(bp, MISC_REG_GPIO, gpio_reg); | ||
1304 | bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO); | ||
1305 | |||
1306 | return 0; | ||
1307 | } | ||
1308 | |||
1309 | static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode) | ||
1310 | { | ||
1311 | u32 spio_mask = (1 << spio_num); | ||
1312 | u32 spio_reg; | ||
1313 | |||
1314 | if ((spio_num < MISC_REGISTERS_SPIO_4) || | ||
1315 | (spio_num > MISC_REGISTERS_SPIO_7)) { | ||
1316 | BNX2X_ERR("Invalid SPIO %d\n", spio_num); | ||
1317 | return -EINVAL; | ||
1318 | } | ||
1319 | |||
1320 | bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); | ||
1321 | /* read SPIO and mask except the float bits */ | ||
1322 | spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT); | ||
1323 | |||
1324 | switch (mode) { | ||
1325 | case MISC_REGISTERS_SPIO_OUTPUT_LOW : | ||
1326 | DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num); | ||
1327 | /* clear FLOAT and set CLR */ | ||
1328 | spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); | ||
1329 | spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS); | ||
1330 | break; | ||
1331 | |||
1332 | case MISC_REGISTERS_SPIO_OUTPUT_HIGH : | ||
1333 | DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num); | ||
1334 | /* clear FLOAT and set SET */ | ||
1335 | spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); | ||
1336 | spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS); | ||
1337 | break; | ||
1338 | |||
1339 | case MISC_REGISTERS_SPIO_INPUT_HI_Z: | ||
1340 | DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num); | ||
1341 | /* set FLOAT */ | ||
1342 | spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); | ||
1343 | break; | ||
1344 | |||
1345 | default: | ||
1346 | break; | ||
1347 | } | ||
1348 | |||
1349 | REG_WR(bp, MISC_REG_SPIO, spio_reg); | ||
1350 | bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO); | ||
1351 | |||
1352 | return 0; | ||
1353 | } | ||
1354 | |||
1182 | static int bnx2x_mdio22_write(struct bnx2x *bp, u32 reg, u32 val) | 1355 | static int bnx2x_mdio22_write(struct bnx2x *bp, u32 reg, u32 val) |
1183 | { | 1356 | { |
1184 | int rc; | ||
1185 | u32 tmp, i; | ||
1186 | int port = bp->port; | 1357 | int port = bp->port; |
1187 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; | 1358 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; |
1359 | u32 tmp; | ||
1360 | int i, rc; | ||
1188 | 1361 | ||
1189 | /* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x val 0x%08x\n", | 1362 | /* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x val 0x%08x\n", |
1190 | bp->phy_addr, reg, val); */ | 1363 | bp->phy_addr, reg, val); */ |
@@ -1236,8 +1409,8 @@ static int bnx2x_mdio22_read(struct bnx2x *bp, u32 reg, u32 *ret_val) | |||
1236 | { | 1409 | { |
1237 | int port = bp->port; | 1410 | int port = bp->port; |
1238 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; | 1411 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; |
1239 | u32 val, i; | 1412 | u32 val; |
1240 | int rc; | 1413 | int i, rc; |
1241 | 1414 | ||
1242 | if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) { | 1415 | if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) { |
1243 | 1416 | ||
@@ -1286,58 +1459,54 @@ static int bnx2x_mdio22_read(struct bnx2x *bp, u32 reg, u32 *ret_val) | |||
1286 | return rc; | 1459 | return rc; |
1287 | } | 1460 | } |
1288 | 1461 | ||
1289 | static int bnx2x_mdio45_write(struct bnx2x *bp, u32 reg, u32 addr, u32 val) | 1462 | static int bnx2x_mdio45_ctrl_write(struct bnx2x *bp, u32 mdio_ctrl, |
1463 | u32 phy_addr, u32 reg, u32 addr, u32 val) | ||
1290 | { | 1464 | { |
1291 | int rc = 0; | 1465 | u32 tmp; |
1292 | u32 tmp, i; | 1466 | int i, rc = 0; |
1293 | int port = bp->port; | ||
1294 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; | ||
1295 | |||
1296 | if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) { | ||
1297 | |||
1298 | tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); | ||
1299 | tmp &= ~EMAC_MDIO_MODE_AUTO_POLL; | ||
1300 | EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp); | ||
1301 | REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); | ||
1302 | udelay(40); | ||
1303 | } | ||
1304 | 1467 | ||
1305 | /* set clause 45 mode */ | 1468 | /* set clause 45 mode, slow down the MDIO clock to 2.5MHz |
1306 | tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); | 1469 | * (a value of 49==0x31) and make sure that the AUTO poll is off |
1307 | tmp |= EMAC_MDIO_MODE_CLAUSE_45; | 1470 | */ |
1308 | EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp); | 1471 | tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); |
1472 | tmp &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT); | ||
1473 | tmp |= (EMAC_MDIO_MODE_CLAUSE_45 | | ||
1474 | (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT)); | ||
1475 | REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp); | ||
1476 | REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); | ||
1477 | udelay(40); | ||
1309 | 1478 | ||
1310 | /* address */ | 1479 | /* address */ |
1311 | tmp = ((bp->phy_addr << 21) | (reg << 16) | addr | | 1480 | tmp = ((phy_addr << 21) | (reg << 16) | addr | |
1312 | EMAC_MDIO_COMM_COMMAND_ADDRESS | | 1481 | EMAC_MDIO_COMM_COMMAND_ADDRESS | |
1313 | EMAC_MDIO_COMM_START_BUSY); | 1482 | EMAC_MDIO_COMM_START_BUSY); |
1314 | EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp); | 1483 | REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); |
1315 | 1484 | ||
1316 | for (i = 0; i < 50; i++) { | 1485 | for (i = 0; i < 50; i++) { |
1317 | udelay(10); | 1486 | udelay(10); |
1318 | 1487 | ||
1319 | tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM); | 1488 | tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); |
1320 | if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { | 1489 | if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { |
1321 | udelay(5); | 1490 | udelay(5); |
1322 | break; | 1491 | break; |
1323 | } | 1492 | } |
1324 | } | 1493 | } |
1325 | |||
1326 | if (tmp & EMAC_MDIO_COMM_START_BUSY) { | 1494 | if (tmp & EMAC_MDIO_COMM_START_BUSY) { |
1327 | BNX2X_ERR("write phy register failed\n"); | 1495 | BNX2X_ERR("write phy register failed\n"); |
1328 | 1496 | ||
1329 | rc = -EBUSY; | 1497 | rc = -EBUSY; |
1498 | |||
1330 | } else { | 1499 | } else { |
1331 | /* data */ | 1500 | /* data */ |
1332 | tmp = ((bp->phy_addr << 21) | (reg << 16) | val | | 1501 | tmp = ((phy_addr << 21) | (reg << 16) | val | |
1333 | EMAC_MDIO_COMM_COMMAND_WRITE_45 | | 1502 | EMAC_MDIO_COMM_COMMAND_WRITE_45 | |
1334 | EMAC_MDIO_COMM_START_BUSY); | 1503 | EMAC_MDIO_COMM_START_BUSY); |
1335 | EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp); | 1504 | REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp); |
1336 | 1505 | ||
1337 | for (i = 0; i < 50; i++) { | 1506 | for (i = 0; i < 50; i++) { |
1338 | udelay(10); | 1507 | udelay(10); |
1339 | 1508 | ||
1340 | tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM); | 1509 | tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); |
1341 | if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { | 1510 | if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { |
1342 | udelay(5); | 1511 | udelay(5); |
1343 | break; | 1512 | break; |
@@ -1351,75 +1520,78 @@ static int bnx2x_mdio45_write(struct bnx2x *bp, u32 reg, u32 addr, u32 val) | |||
1351 | } | 1520 | } |
1352 | } | 1521 | } |
1353 | 1522 | ||
1354 | /* unset clause 45 mode */ | 1523 | /* unset clause 45 mode, set the MDIO clock to a faster value |
1355 | tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); | 1524 | * (0x13 => 6.25Mhz) and restore the AUTO poll if needed |
1356 | tmp &= ~EMAC_MDIO_MODE_CLAUSE_45; | 1525 | */ |
1357 | EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp); | 1526 | tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); |
1358 | 1527 | tmp &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT); | |
1359 | if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) { | 1528 | tmp |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT); |
1360 | 1529 | if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) | |
1361 | tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); | ||
1362 | tmp |= EMAC_MDIO_MODE_AUTO_POLL; | 1530 | tmp |= EMAC_MDIO_MODE_AUTO_POLL; |
1363 | EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp); | 1531 | REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp); |
1364 | } | ||
1365 | 1532 | ||
1366 | return rc; | 1533 | return rc; |
1367 | } | 1534 | } |
1368 | 1535 | ||
1369 | static int bnx2x_mdio45_read(struct bnx2x *bp, u32 reg, u32 addr, | 1536 | static int bnx2x_mdio45_write(struct bnx2x *bp, u32 phy_addr, u32 reg, |
1370 | u32 *ret_val) | 1537 | u32 addr, u32 val) |
1371 | { | 1538 | { |
1372 | int port = bp->port; | 1539 | u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; |
1373 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; | ||
1374 | u32 val, i; | ||
1375 | int rc = 0; | ||
1376 | 1540 | ||
1377 | if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) { | 1541 | return bnx2x_mdio45_ctrl_write(bp, emac_base, phy_addr, |
1542 | reg, addr, val); | ||
1543 | } | ||
1378 | 1544 | ||
1379 | val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); | 1545 | static int bnx2x_mdio45_ctrl_read(struct bnx2x *bp, u32 mdio_ctrl, |
1380 | val &= ~EMAC_MDIO_MODE_AUTO_POLL; | 1546 | u32 phy_addr, u32 reg, u32 addr, |
1381 | EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val); | 1547 | u32 *ret_val) |
1382 | REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); | 1548 | { |
1383 | udelay(40); | 1549 | u32 val; |
1384 | } | 1550 | int i, rc = 0; |
1385 | 1551 | ||
1386 | /* set clause 45 mode */ | 1552 | /* set clause 45 mode, slow down the MDIO clock to 2.5MHz |
1387 | val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); | 1553 | * (a value of 49==0x31) and make sure that the AUTO poll is off |
1388 | val |= EMAC_MDIO_MODE_CLAUSE_45; | 1554 | */ |
1389 | EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val); | 1555 | val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); |
1556 | val &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT); | ||
1557 | val |= (EMAC_MDIO_MODE_CLAUSE_45 | | ||
1558 | (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT)); | ||
1559 | REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val); | ||
1560 | REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); | ||
1561 | udelay(40); | ||
1390 | 1562 | ||
1391 | /* address */ | 1563 | /* address */ |
1392 | val = ((bp->phy_addr << 21) | (reg << 16) | addr | | 1564 | val = ((phy_addr << 21) | (reg << 16) | addr | |
1393 | EMAC_MDIO_COMM_COMMAND_ADDRESS | | 1565 | EMAC_MDIO_COMM_COMMAND_ADDRESS | |
1394 | EMAC_MDIO_COMM_START_BUSY); | 1566 | EMAC_MDIO_COMM_START_BUSY); |
1395 | EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val); | 1567 | REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); |
1396 | 1568 | ||
1397 | for (i = 0; i < 50; i++) { | 1569 | for (i = 0; i < 50; i++) { |
1398 | udelay(10); | 1570 | udelay(10); |
1399 | 1571 | ||
1400 | val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM); | 1572 | val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); |
1401 | if (!(val & EMAC_MDIO_COMM_START_BUSY)) { | 1573 | if (!(val & EMAC_MDIO_COMM_START_BUSY)) { |
1402 | udelay(5); | 1574 | udelay(5); |
1403 | break; | 1575 | break; |
1404 | } | 1576 | } |
1405 | } | 1577 | } |
1406 | |||
1407 | if (val & EMAC_MDIO_COMM_START_BUSY) { | 1578 | if (val & EMAC_MDIO_COMM_START_BUSY) { |
1408 | BNX2X_ERR("read phy register failed\n"); | 1579 | BNX2X_ERR("read phy register failed\n"); |
1409 | 1580 | ||
1410 | *ret_val = 0; | 1581 | *ret_val = 0; |
1411 | rc = -EBUSY; | 1582 | rc = -EBUSY; |
1583 | |||
1412 | } else { | 1584 | } else { |
1413 | /* data */ | 1585 | /* data */ |
1414 | val = ((bp->phy_addr << 21) | (reg << 16) | | 1586 | val = ((phy_addr << 21) | (reg << 16) | |
1415 | EMAC_MDIO_COMM_COMMAND_READ_45 | | 1587 | EMAC_MDIO_COMM_COMMAND_READ_45 | |
1416 | EMAC_MDIO_COMM_START_BUSY); | 1588 | EMAC_MDIO_COMM_START_BUSY); |
1417 | EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val); | 1589 | REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val); |
1418 | 1590 | ||
1419 | for (i = 0; i < 50; i++) { | 1591 | for (i = 0; i < 50; i++) { |
1420 | udelay(10); | 1592 | udelay(10); |
1421 | 1593 | ||
1422 | val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM); | 1594 | val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); |
1423 | if (!(val & EMAC_MDIO_COMM_START_BUSY)) { | 1595 | if (!(val & EMAC_MDIO_COMM_START_BUSY)) { |
1424 | val &= EMAC_MDIO_COMM_DATA; | 1596 | val &= EMAC_MDIO_COMM_DATA; |
1425 | break; | 1597 | break; |
@@ -1436,31 +1608,39 @@ static int bnx2x_mdio45_read(struct bnx2x *bp, u32 reg, u32 addr, | |||
1436 | *ret_val = val; | 1608 | *ret_val = val; |
1437 | } | 1609 | } |
1438 | 1610 | ||
1439 | /* unset clause 45 mode */ | 1611 | /* unset clause 45 mode, set the MDIO clock to a faster value |
1440 | val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); | 1612 | * (0x13 => 6.25Mhz) and restore the AUTO poll if needed |
1441 | val &= ~EMAC_MDIO_MODE_CLAUSE_45; | 1613 | */ |
1442 | EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val); | 1614 | val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); |
1443 | 1615 | val &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT); | |
1444 | if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) { | 1616 | val |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT); |
1445 | 1617 | if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) | |
1446 | val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE); | ||
1447 | val |= EMAC_MDIO_MODE_AUTO_POLL; | 1618 | val |= EMAC_MDIO_MODE_AUTO_POLL; |
1448 | EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val); | 1619 | REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val); |
1449 | } | ||
1450 | 1620 | ||
1451 | return rc; | 1621 | return rc; |
1452 | } | 1622 | } |
1453 | 1623 | ||
1454 | static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 reg, u32 addr, u32 val) | 1624 | static int bnx2x_mdio45_read(struct bnx2x *bp, u32 phy_addr, u32 reg, |
1625 | u32 addr, u32 *ret_val) | ||
1626 | { | ||
1627 | u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; | ||
1628 | |||
1629 | return bnx2x_mdio45_ctrl_read(bp, emac_base, phy_addr, | ||
1630 | reg, addr, ret_val); | ||
1631 | } | ||
1632 | |||
1633 | static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 phy_addr, u32 reg, | ||
1634 | u32 addr, u32 val) | ||
1455 | { | 1635 | { |
1456 | int i; | 1636 | int i; |
1457 | u32 rd_val; | 1637 | u32 rd_val; |
1458 | 1638 | ||
1459 | might_sleep(); | 1639 | might_sleep(); |
1460 | for (i = 0; i < 10; i++) { | 1640 | for (i = 0; i < 10; i++) { |
1461 | bnx2x_mdio45_write(bp, reg, addr, val); | 1641 | bnx2x_mdio45_write(bp, phy_addr, reg, addr, val); |
1462 | msleep(5); | 1642 | msleep(5); |
1463 | bnx2x_mdio45_read(bp, reg, addr, &rd_val); | 1643 | bnx2x_mdio45_read(bp, phy_addr, reg, addr, &rd_val); |
1464 | /* if the read value is not the same as the value we wrote, | 1644 | /* if the read value is not the same as the value we wrote, |
1465 | we should write it again */ | 1645 | we should write it again */ |
1466 | if (rd_val == val) | 1646 | if (rd_val == val) |
@@ -1471,18 +1651,81 @@ static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 reg, u32 addr, u32 val) | |||
1471 | } | 1651 | } |
1472 | 1652 | ||
1473 | /* | 1653 | /* |
1474 | * link managment | 1654 | * link management |
1475 | */ | 1655 | */ |
1476 | 1656 | ||
1657 | static void bnx2x_pause_resolve(struct bnx2x *bp, u32 pause_result) | ||
1658 | { | ||
1659 | switch (pause_result) { /* ASYM P ASYM P */ | ||
1660 | case 0xb: /* 1 0 1 1 */ | ||
1661 | bp->flow_ctrl = FLOW_CTRL_TX; | ||
1662 | break; | ||
1663 | |||
1664 | case 0xe: /* 1 1 1 0 */ | ||
1665 | bp->flow_ctrl = FLOW_CTRL_RX; | ||
1666 | break; | ||
1667 | |||
1668 | case 0x5: /* 0 1 0 1 */ | ||
1669 | case 0x7: /* 0 1 1 1 */ | ||
1670 | case 0xd: /* 1 1 0 1 */ | ||
1671 | case 0xf: /* 1 1 1 1 */ | ||
1672 | bp->flow_ctrl = FLOW_CTRL_BOTH; | ||
1673 | break; | ||
1674 | |||
1675 | default: | ||
1676 | break; | ||
1677 | } | ||
1678 | } | ||
1679 | |||
1680 | static u8 bnx2x_ext_phy_resove_fc(struct bnx2x *bp) | ||
1681 | { | ||
1682 | u32 ext_phy_addr; | ||
1683 | u32 ld_pause; /* local */ | ||
1684 | u32 lp_pause; /* link partner */ | ||
1685 | u32 an_complete; /* AN complete */ | ||
1686 | u32 pause_result; | ||
1687 | u8 ret = 0; | ||
1688 | |||
1689 | ext_phy_addr = ((bp->ext_phy_config & | ||
1690 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> | ||
1691 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT); | ||
1692 | |||
1693 | /* read twice */ | ||
1694 | bnx2x_mdio45_read(bp, ext_phy_addr, | ||
1695 | EXT_PHY_KR_AUTO_NEG_DEVAD, | ||
1696 | EXT_PHY_KR_STATUS, &an_complete); | ||
1697 | bnx2x_mdio45_read(bp, ext_phy_addr, | ||
1698 | EXT_PHY_KR_AUTO_NEG_DEVAD, | ||
1699 | EXT_PHY_KR_STATUS, &an_complete); | ||
1700 | |||
1701 | if (an_complete & EXT_PHY_KR_AUTO_NEG_COMPLETE) { | ||
1702 | ret = 1; | ||
1703 | bnx2x_mdio45_read(bp, ext_phy_addr, | ||
1704 | EXT_PHY_KR_AUTO_NEG_DEVAD, | ||
1705 | EXT_PHY_KR_AUTO_NEG_ADVERT, &ld_pause); | ||
1706 | bnx2x_mdio45_read(bp, ext_phy_addr, | ||
1707 | EXT_PHY_KR_AUTO_NEG_DEVAD, | ||
1708 | EXT_PHY_KR_LP_AUTO_NEG, &lp_pause); | ||
1709 | pause_result = (ld_pause & | ||
1710 | EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 8; | ||
1711 | pause_result |= (lp_pause & | ||
1712 | EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 10; | ||
1713 | DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n", | ||
1714 | pause_result); | ||
1715 | bnx2x_pause_resolve(bp, pause_result); | ||
1716 | } | ||
1717 | return ret; | ||
1718 | } | ||
1719 | |||
1477 | static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status) | 1720 | static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status) |
1478 | { | 1721 | { |
1479 | u32 ld_pause; /* local driver */ | 1722 | u32 ld_pause; /* local driver */ |
1480 | u32 lp_pause; /* link partner */ | 1723 | u32 lp_pause; /* link partner */ |
1481 | u32 pause_result; | 1724 | u32 pause_result; |
1482 | 1725 | ||
1483 | bp->flow_ctrl = 0; | 1726 | bp->flow_ctrl = 0; |
1484 | 1727 | ||
1485 | /* reolve from gp_status in case of AN complete and not sgmii */ | 1728 | /* resolve from gp_status in case of AN complete and not sgmii */ |
1486 | if ((bp->req_autoneg & AUTONEG_FLOW_CTRL) && | 1729 | if ((bp->req_autoneg & AUTONEG_FLOW_CTRL) && |
1487 | (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) && | 1730 | (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) && |
1488 | (!(bp->phy_flags & PHY_SGMII_FLAG)) && | 1731 | (!(bp->phy_flags & PHY_SGMII_FLAG)) && |
@@ -1499,45 +1742,57 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status) | |||
1499 | pause_result |= (lp_pause & | 1742 | pause_result |= (lp_pause & |
1500 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7; | 1743 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7; |
1501 | DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result); | 1744 | DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result); |
1745 | bnx2x_pause_resolve(bp, pause_result); | ||
1746 | } else if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) || | ||
1747 | !(bnx2x_ext_phy_resove_fc(bp))) { | ||
1748 | /* forced speed */ | ||
1749 | if (bp->req_autoneg & AUTONEG_FLOW_CTRL) { | ||
1750 | switch (bp->req_flow_ctrl) { | ||
1751 | case FLOW_CTRL_AUTO: | ||
1752 | if (bp->dev->mtu <= 4500) | ||
1753 | bp->flow_ctrl = FLOW_CTRL_BOTH; | ||
1754 | else | ||
1755 | bp->flow_ctrl = FLOW_CTRL_TX; | ||
1756 | break; | ||
1502 | 1757 | ||
1503 | switch (pause_result) { /* ASYM P ASYM P */ | 1758 | case FLOW_CTRL_TX: |
1504 | case 0xb: /* 1 0 1 1 */ | 1759 | bp->flow_ctrl = FLOW_CTRL_TX; |
1505 | bp->flow_ctrl = FLOW_CTRL_TX; | 1760 | break; |
1506 | break; | ||
1507 | |||
1508 | case 0xe: /* 1 1 1 0 */ | ||
1509 | bp->flow_ctrl = FLOW_CTRL_RX; | ||
1510 | break; | ||
1511 | 1761 | ||
1512 | case 0x5: /* 0 1 0 1 */ | 1762 | case FLOW_CTRL_RX: |
1513 | case 0x7: /* 0 1 1 1 */ | 1763 | if (bp->dev->mtu <= 4500) |
1514 | case 0xd: /* 1 1 0 1 */ | 1764 | bp->flow_ctrl = FLOW_CTRL_RX; |
1515 | case 0xf: /* 1 1 1 1 */ | 1765 | break; |
1516 | bp->flow_ctrl = FLOW_CTRL_BOTH; | ||
1517 | break; | ||
1518 | 1766 | ||
1519 | default: | 1767 | case FLOW_CTRL_BOTH: |
1520 | break; | 1768 | if (bp->dev->mtu <= 4500) |
1521 | } | 1769 | bp->flow_ctrl = FLOW_CTRL_BOTH; |
1770 | else | ||
1771 | bp->flow_ctrl = FLOW_CTRL_TX; | ||
1772 | break; | ||
1522 | 1773 | ||
1523 | } else { /* forced mode */ | 1774 | case FLOW_CTRL_NONE: |
1524 | switch (bp->req_flow_ctrl) { | 1775 | default: |
1525 | case FLOW_CTRL_AUTO: | 1776 | break; |
1526 | if (bp->dev->mtu <= 4500) | 1777 | } |
1527 | bp->flow_ctrl = FLOW_CTRL_BOTH; | 1778 | } else { /* forced mode */ |
1528 | else | 1779 | switch (bp->req_flow_ctrl) { |
1529 | bp->flow_ctrl = FLOW_CTRL_TX; | 1780 | case FLOW_CTRL_AUTO: |
1530 | break; | 1781 | DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while" |
1782 | " req_autoneg 0x%x\n", | ||
1783 | bp->req_flow_ctrl, bp->req_autoneg); | ||
1784 | break; | ||
1531 | 1785 | ||
1532 | case FLOW_CTRL_TX: | 1786 | case FLOW_CTRL_TX: |
1533 | case FLOW_CTRL_RX: | 1787 | case FLOW_CTRL_RX: |
1534 | case FLOW_CTRL_BOTH: | 1788 | case FLOW_CTRL_BOTH: |
1535 | bp->flow_ctrl = bp->req_flow_ctrl; | 1789 | bp->flow_ctrl = bp->req_flow_ctrl; |
1536 | break; | 1790 | break; |
1537 | 1791 | ||
1538 | case FLOW_CTRL_NONE: | 1792 | case FLOW_CTRL_NONE: |
1539 | default: | 1793 | default: |
1540 | break; | 1794 | break; |
1795 | } | ||
1541 | } | 1796 | } |
1542 | } | 1797 | } |
1543 | DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", bp->flow_ctrl); | 1798 | DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", bp->flow_ctrl); |
@@ -1548,9 +1803,9 @@ static void bnx2x_link_settings_status(struct bnx2x *bp, u32 gp_status) | |||
1548 | bp->link_status = 0; | 1803 | bp->link_status = 0; |
1549 | 1804 | ||
1550 | if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) { | 1805 | if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) { |
1551 | DP(NETIF_MSG_LINK, "link up\n"); | 1806 | DP(NETIF_MSG_LINK, "phy link up\n"); |
1552 | 1807 | ||
1553 | bp->link_up = 1; | 1808 | bp->phy_link_up = 1; |
1554 | bp->link_status |= LINK_STATUS_LINK_UP; | 1809 | bp->link_status |= LINK_STATUS_LINK_UP; |
1555 | 1810 | ||
1556 | if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS) | 1811 | if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS) |
@@ -1659,20 +1914,20 @@ static void bnx2x_link_settings_status(struct bnx2x *bp, u32 gp_status) | |||
1659 | bp->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED; | 1914 | bp->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED; |
1660 | 1915 | ||
1661 | } else { /* link_down */ | 1916 | } else { /* link_down */ |
1662 | DP(NETIF_MSG_LINK, "link down\n"); | 1917 | DP(NETIF_MSG_LINK, "phy link down\n"); |
1663 | 1918 | ||
1664 | bp->link_up = 0; | 1919 | bp->phy_link_up = 0; |
1665 | 1920 | ||
1666 | bp->line_speed = 0; | 1921 | bp->line_speed = 0; |
1667 | bp->duplex = DUPLEX_FULL; | 1922 | bp->duplex = DUPLEX_FULL; |
1668 | bp->flow_ctrl = 0; | 1923 | bp->flow_ctrl = 0; |
1669 | } | 1924 | } |
1670 | 1925 | ||
1671 | DP(NETIF_MSG_LINK, "gp_status 0x%x link_up %d\n" | 1926 | DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %d\n" |
1672 | DP_LEVEL " line_speed %d duplex %d flow_ctrl 0x%x" | 1927 | DP_LEVEL " line_speed %d duplex %d flow_ctrl 0x%x" |
1673 | " link_status 0x%x\n", | 1928 | " link_status 0x%x\n", |
1674 | gp_status, bp->link_up, bp->line_speed, bp->duplex, bp->flow_ctrl, | 1929 | gp_status, bp->phy_link_up, bp->line_speed, bp->duplex, |
1675 | bp->link_status); | 1930 | bp->flow_ctrl, bp->link_status); |
1676 | } | 1931 | } |
1677 | 1932 | ||
1678 | static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g) | 1933 | static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g) |
@@ -1680,40 +1935,40 @@ static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g) | |||
1680 | int port = bp->port; | 1935 | int port = bp->port; |
1681 | 1936 | ||
1682 | /* first reset all status | 1937 | /* first reset all status |
1683 | * we asume only one line will be change at a time */ | 1938 | * we assume only one line will be change at a time */ |
1684 | bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, | 1939 | bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, |
1685 | (NIG_XGXS0_LINK_STATUS | | 1940 | (NIG_STATUS_XGXS0_LINK10G | |
1686 | NIG_SERDES0_LINK_STATUS | | 1941 | NIG_STATUS_XGXS0_LINK_STATUS | |
1687 | NIG_STATUS_INTERRUPT_XGXS0_LINK10G)); | 1942 | NIG_STATUS_SERDES0_LINK_STATUS)); |
1688 | if (bp->link_up) { | 1943 | if (bp->phy_link_up) { |
1689 | if (is_10g) { | 1944 | if (is_10g) { |
1690 | /* Disable the 10G link interrupt | 1945 | /* Disable the 10G link interrupt |
1691 | * by writing 1 to the status register | 1946 | * by writing 1 to the status register |
1692 | */ | 1947 | */ |
1693 | DP(NETIF_MSG_LINK, "10G XGXS link up\n"); | 1948 | DP(NETIF_MSG_LINK, "10G XGXS phy link up\n"); |
1694 | bnx2x_bits_en(bp, | 1949 | bnx2x_bits_en(bp, |
1695 | NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, | 1950 | NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, |
1696 | NIG_STATUS_INTERRUPT_XGXS0_LINK10G); | 1951 | NIG_STATUS_XGXS0_LINK10G); |
1697 | 1952 | ||
1698 | } else if (bp->phy_flags & PHY_XGXS_FLAG) { | 1953 | } else if (bp->phy_flags & PHY_XGXS_FLAG) { |
1699 | /* Disable the link interrupt | 1954 | /* Disable the link interrupt |
1700 | * by writing 1 to the relevant lane | 1955 | * by writing 1 to the relevant lane |
1701 | * in the status register | 1956 | * in the status register |
1702 | */ | 1957 | */ |
1703 | DP(NETIF_MSG_LINK, "1G XGXS link up\n"); | 1958 | DP(NETIF_MSG_LINK, "1G XGXS phy link up\n"); |
1704 | bnx2x_bits_en(bp, | 1959 | bnx2x_bits_en(bp, |
1705 | NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, | 1960 | NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, |
1706 | ((1 << bp->ser_lane) << | 1961 | ((1 << bp->ser_lane) << |
1707 | NIG_XGXS0_LINK_STATUS_SIZE)); | 1962 | NIG_STATUS_XGXS0_LINK_STATUS_SIZE)); |
1708 | 1963 | ||
1709 | } else { /* SerDes */ | 1964 | } else { /* SerDes */ |
1710 | DP(NETIF_MSG_LINK, "SerDes link up\n"); | 1965 | DP(NETIF_MSG_LINK, "SerDes phy link up\n"); |
1711 | /* Disable the link interrupt | 1966 | /* Disable the link interrupt |
1712 | * by writing 1 to the status register | 1967 | * by writing 1 to the status register |
1713 | */ | 1968 | */ |
1714 | bnx2x_bits_en(bp, | 1969 | bnx2x_bits_en(bp, |
1715 | NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, | 1970 | NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, |
1716 | NIG_SERDES0_LINK_STATUS); | 1971 | NIG_STATUS_SERDES0_LINK_STATUS); |
1717 | } | 1972 | } |
1718 | 1973 | ||
1719 | } else { /* link_down */ | 1974 | } else { /* link_down */ |
@@ -1724,91 +1979,182 @@ static int bnx2x_ext_phy_is_link_up(struct bnx2x *bp) | |||
1724 | { | 1979 | { |
1725 | u32 ext_phy_type; | 1980 | u32 ext_phy_type; |
1726 | u32 ext_phy_addr; | 1981 | u32 ext_phy_addr; |
1727 | u32 local_phy; | 1982 | u32 val1 = 0, val2; |
1728 | u32 val = 0; | ||
1729 | u32 rx_sd, pcs_status; | 1983 | u32 rx_sd, pcs_status; |
1730 | 1984 | ||
1731 | if (bp->phy_flags & PHY_XGXS_FLAG) { | 1985 | if (bp->phy_flags & PHY_XGXS_FLAG) { |
1732 | local_phy = bp->phy_addr; | ||
1733 | ext_phy_addr = ((bp->ext_phy_config & | 1986 | ext_phy_addr = ((bp->ext_phy_config & |
1734 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> | 1987 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> |
1735 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT); | 1988 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT); |
1736 | bp->phy_addr = (u8)ext_phy_addr; | ||
1737 | 1989 | ||
1738 | ext_phy_type = XGXS_EXT_PHY_TYPE(bp); | 1990 | ext_phy_type = XGXS_EXT_PHY_TYPE(bp); |
1739 | switch (ext_phy_type) { | 1991 | switch (ext_phy_type) { |
1740 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: | 1992 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: |
1741 | DP(NETIF_MSG_LINK, "XGXS Direct\n"); | 1993 | DP(NETIF_MSG_LINK, "XGXS Direct\n"); |
1742 | val = 1; | 1994 | val1 = 1; |
1743 | break; | 1995 | break; |
1744 | 1996 | ||
1745 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: | 1997 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: |
1746 | DP(NETIF_MSG_LINK, "XGXS 8705\n"); | 1998 | DP(NETIF_MSG_LINK, "XGXS 8705\n"); |
1747 | bnx2x_mdio45_read(bp, EXT_PHY_OPT_WIS_DEVAD, | 1999 | bnx2x_mdio45_read(bp, ext_phy_addr, |
1748 | EXT_PHY_OPT_LASI_STATUS, &val); | 2000 | EXT_PHY_OPT_WIS_DEVAD, |
1749 | DP(NETIF_MSG_LINK, "8705 LASI status is %d\n", val); | 2001 | EXT_PHY_OPT_LASI_STATUS, &val1); |
1750 | 2002 | DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1); | |
1751 | bnx2x_mdio45_read(bp, EXT_PHY_OPT_WIS_DEVAD, | 2003 | |
1752 | EXT_PHY_OPT_LASI_STATUS, &val); | 2004 | bnx2x_mdio45_read(bp, ext_phy_addr, |
1753 | DP(NETIF_MSG_LINK, "8705 LASI status is %d\n", val); | 2005 | EXT_PHY_OPT_WIS_DEVAD, |
1754 | 2006 | EXT_PHY_OPT_LASI_STATUS, &val1); | |
1755 | bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, | 2007 | DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1); |
2008 | |||
2009 | bnx2x_mdio45_read(bp, ext_phy_addr, | ||
2010 | EXT_PHY_OPT_PMA_PMD_DEVAD, | ||
1756 | EXT_PHY_OPT_PMD_RX_SD, &rx_sd); | 2011 | EXT_PHY_OPT_PMD_RX_SD, &rx_sd); |
1757 | val = (rx_sd & 0x1); | 2012 | DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd); |
2013 | val1 = (rx_sd & 0x1); | ||
1758 | break; | 2014 | break; |
1759 | 2015 | ||
1760 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: | 2016 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: |
1761 | DP(NETIF_MSG_LINK, "XGXS 8706\n"); | 2017 | DP(NETIF_MSG_LINK, "XGXS 8706\n"); |
1762 | bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, | 2018 | bnx2x_mdio45_read(bp, ext_phy_addr, |
1763 | EXT_PHY_OPT_LASI_STATUS, &val); | 2019 | EXT_PHY_OPT_PMA_PMD_DEVAD, |
1764 | DP(NETIF_MSG_LINK, "8706 LASI status is %d\n", val); | 2020 | EXT_PHY_OPT_LASI_STATUS, &val1); |
1765 | 2021 | DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1); | |
1766 | bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, | 2022 | |
1767 | EXT_PHY_OPT_LASI_STATUS, &val); | 2023 | bnx2x_mdio45_read(bp, ext_phy_addr, |
1768 | DP(NETIF_MSG_LINK, "8706 LASI status is %d\n", val); | 2024 | EXT_PHY_OPT_PMA_PMD_DEVAD, |
1769 | 2025 | EXT_PHY_OPT_LASI_STATUS, &val1); | |
1770 | bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, | 2026 | DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1); |
2027 | |||
2028 | bnx2x_mdio45_read(bp, ext_phy_addr, | ||
2029 | EXT_PHY_OPT_PMA_PMD_DEVAD, | ||
1771 | EXT_PHY_OPT_PMD_RX_SD, &rx_sd); | 2030 | EXT_PHY_OPT_PMD_RX_SD, &rx_sd); |
1772 | bnx2x_mdio45_read(bp, EXT_PHY_OPT_PCS_DEVAD, | 2031 | bnx2x_mdio45_read(bp, ext_phy_addr, |
1773 | EXT_PHY_OPT_PCS_STATUS, &pcs_status); | 2032 | EXT_PHY_OPT_PCS_DEVAD, |
2033 | EXT_PHY_OPT_PCS_STATUS, &pcs_status); | ||
2034 | bnx2x_mdio45_read(bp, ext_phy_addr, | ||
2035 | EXT_PHY_AUTO_NEG_DEVAD, | ||
2036 | EXT_PHY_OPT_AN_LINK_STATUS, &val2); | ||
2037 | |||
1774 | DP(NETIF_MSG_LINK, "8706 rx_sd 0x%x" | 2038 | DP(NETIF_MSG_LINK, "8706 rx_sd 0x%x" |
1775 | " pcs_status 0x%x\n", rx_sd, pcs_status); | 2039 | " pcs_status 0x%x 1Gbps link_status 0x%x 0x%x\n", |
1776 | /* link is up if both bit 0 of pmd_rx and | 2040 | rx_sd, pcs_status, val2, (val2 & (1<<1))); |
1777 | * bit 0 of pcs_status are set | 2041 | /* link is up if both bit 0 of pmd_rx_sd and |
2042 | * bit 0 of pcs_status are set, or if the autoneg bit | ||
2043 | 1 is set | ||
1778 | */ | 2044 | */ |
1779 | val = (rx_sd & pcs_status); | 2045 | val1 = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1))); |
2046 | break; | ||
2047 | |||
2048 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: | ||
2049 | bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO); | ||
2050 | |||
2051 | /* clear the interrupt LASI status register */ | ||
2052 | bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, | ||
2053 | ext_phy_addr, | ||
2054 | EXT_PHY_KR_PCS_DEVAD, | ||
2055 | EXT_PHY_KR_LASI_STATUS, &val2); | ||
2056 | bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, | ||
2057 | ext_phy_addr, | ||
2058 | EXT_PHY_KR_PCS_DEVAD, | ||
2059 | EXT_PHY_KR_LASI_STATUS, &val1); | ||
2060 | DP(NETIF_MSG_LINK, "KR LASI status 0x%x->0x%x\n", | ||
2061 | val2, val1); | ||
2062 | /* Check the LASI */ | ||
2063 | bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, | ||
2064 | ext_phy_addr, | ||
2065 | EXT_PHY_KR_PMA_PMD_DEVAD, | ||
2066 | 0x9003, &val2); | ||
2067 | bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, | ||
2068 | ext_phy_addr, | ||
2069 | EXT_PHY_KR_PMA_PMD_DEVAD, | ||
2070 | 0x9003, &val1); | ||
2071 | DP(NETIF_MSG_LINK, "KR 0x9003 0x%x->0x%x\n", | ||
2072 | val2, val1); | ||
2073 | /* Check the link status */ | ||
2074 | bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, | ||
2075 | ext_phy_addr, | ||
2076 | EXT_PHY_KR_PCS_DEVAD, | ||
2077 | EXT_PHY_KR_PCS_STATUS, &val2); | ||
2078 | DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2); | ||
2079 | /* Check the link status on 1.1.2 */ | ||
2080 | bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, | ||
2081 | ext_phy_addr, | ||
2082 | EXT_PHY_OPT_PMA_PMD_DEVAD, | ||
2083 | EXT_PHY_KR_STATUS, &val2); | ||
2084 | bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, | ||
2085 | ext_phy_addr, | ||
2086 | EXT_PHY_OPT_PMA_PMD_DEVAD, | ||
2087 | EXT_PHY_KR_STATUS, &val1); | ||
2088 | DP(NETIF_MSG_LINK, | ||
2089 | "KR PMA status 0x%x->0x%x\n", val2, val1); | ||
2090 | val1 = ((val1 & 4) == 4); | ||
2091 | /* If 1G was requested assume the link is up */ | ||
2092 | if (!(bp->req_autoneg & AUTONEG_SPEED) && | ||
2093 | (bp->req_line_speed == SPEED_1000)) | ||
2094 | val1 = 1; | ||
2095 | bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO); | ||
2096 | break; | ||
2097 | |||
2098 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: | ||
2099 | bnx2x_mdio45_read(bp, ext_phy_addr, | ||
2100 | EXT_PHY_OPT_PMA_PMD_DEVAD, | ||
2101 | EXT_PHY_OPT_LASI_STATUS, &val2); | ||
2102 | bnx2x_mdio45_read(bp, ext_phy_addr, | ||
2103 | EXT_PHY_OPT_PMA_PMD_DEVAD, | ||
2104 | EXT_PHY_OPT_LASI_STATUS, &val1); | ||
2105 | DP(NETIF_MSG_LINK, | ||
2106 | "10G-base-T LASI status 0x%x->0x%x\n", val2, val1); | ||
2107 | bnx2x_mdio45_read(bp, ext_phy_addr, | ||
2108 | EXT_PHY_OPT_PMA_PMD_DEVAD, | ||
2109 | EXT_PHY_KR_STATUS, &val2); | ||
2110 | bnx2x_mdio45_read(bp, ext_phy_addr, | ||
2111 | EXT_PHY_OPT_PMA_PMD_DEVAD, | ||
2112 | EXT_PHY_KR_STATUS, &val1); | ||
2113 | DP(NETIF_MSG_LINK, | ||
2114 | "10G-base-T PMA status 0x%x->0x%x\n", val2, val1); | ||
2115 | val1 = ((val1 & 4) == 4); | ||
2116 | /* if link is up | ||
2117 | * print the AN outcome of the SFX7101 PHY | ||
2118 | */ | ||
2119 | if (val1) { | ||
2120 | bnx2x_mdio45_read(bp, ext_phy_addr, | ||
2121 | EXT_PHY_KR_AUTO_NEG_DEVAD, | ||
2122 | 0x21, &val2); | ||
2123 | DP(NETIF_MSG_LINK, | ||
2124 | "SFX7101 AN status 0x%x->%s\n", val2, | ||
2125 | (val2 & (1<<14)) ? "Master" : "Slave"); | ||
2126 | } | ||
1780 | break; | 2127 | break; |
1781 | 2128 | ||
1782 | default: | 2129 | default: |
1783 | DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n", | 2130 | DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n", |
1784 | bp->ext_phy_config); | 2131 | bp->ext_phy_config); |
1785 | val = 0; | 2132 | val1 = 0; |
1786 | break; | 2133 | break; |
1787 | } | 2134 | } |
1788 | bp->phy_addr = local_phy; | ||
1789 | 2135 | ||
1790 | } else { /* SerDes */ | 2136 | } else { /* SerDes */ |
1791 | ext_phy_type = SERDES_EXT_PHY_TYPE(bp); | 2137 | ext_phy_type = SERDES_EXT_PHY_TYPE(bp); |
1792 | switch (ext_phy_type) { | 2138 | switch (ext_phy_type) { |
1793 | case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT: | 2139 | case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT: |
1794 | DP(NETIF_MSG_LINK, "SerDes Direct\n"); | 2140 | DP(NETIF_MSG_LINK, "SerDes Direct\n"); |
1795 | val = 1; | 2141 | val1 = 1; |
1796 | break; | 2142 | break; |
1797 | 2143 | ||
1798 | case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: | 2144 | case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: |
1799 | DP(NETIF_MSG_LINK, "SerDes 5482\n"); | 2145 | DP(NETIF_MSG_LINK, "SerDes 5482\n"); |
1800 | val = 1; | 2146 | val1 = 1; |
1801 | break; | 2147 | break; |
1802 | 2148 | ||
1803 | default: | 2149 | default: |
1804 | DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n", | 2150 | DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n", |
1805 | bp->ext_phy_config); | 2151 | bp->ext_phy_config); |
1806 | val = 0; | 2152 | val1 = 0; |
1807 | break; | 2153 | break; |
1808 | } | 2154 | } |
1809 | } | 2155 | } |
1810 | 2156 | ||
1811 | return val; | 2157 | return val1; |
1812 | } | 2158 | } |
1813 | 2159 | ||
1814 | static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb) | 2160 | static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb) |
@@ -1819,7 +2165,7 @@ static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb) | |||
1819 | u32 wb_write[2]; | 2165 | u32 wb_write[2]; |
1820 | u32 val; | 2166 | u32 val; |
1821 | 2167 | ||
1822 | DP(NETIF_MSG_LINK, "enableing BigMAC\n"); | 2168 | DP(NETIF_MSG_LINK, "enabling BigMAC\n"); |
1823 | /* reset and unreset the BigMac */ | 2169 | /* reset and unreset the BigMac */ |
1824 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, | 2170 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, |
1825 | (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); | 2171 | (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); |
@@ -1933,6 +2279,35 @@ static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb) | |||
1933 | bp->stats_state = STATS_STATE_ENABLE; | 2279 | bp->stats_state = STATS_STATE_ENABLE; |
1934 | } | 2280 | } |
1935 | 2281 | ||
2282 | static void bnx2x_bmac_rx_disable(struct bnx2x *bp) | ||
2283 | { | ||
2284 | int port = bp->port; | ||
2285 | u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : | ||
2286 | NIG_REG_INGRESS_BMAC0_MEM; | ||
2287 | u32 wb_write[2]; | ||
2288 | |||
2289 | /* Only if the bmac is out of reset */ | ||
2290 | if (REG_RD(bp, MISC_REG_RESET_REG_2) & | ||
2291 | (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)) { | ||
2292 | /* Clear Rx Enable bit in BMAC_CONTROL register */ | ||
2293 | #ifdef BNX2X_DMAE_RD | ||
2294 | bnx2x_read_dmae(bp, bmac_addr + | ||
2295 | BIGMAC_REGISTER_BMAC_CONTROL, 2); | ||
2296 | wb_write[0] = *bnx2x_sp(bp, wb_data[0]); | ||
2297 | wb_write[1] = *bnx2x_sp(bp, wb_data[1]); | ||
2298 | #else | ||
2299 | wb_write[0] = REG_RD(bp, | ||
2300 | bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL); | ||
2301 | wb_write[1] = REG_RD(bp, | ||
2302 | bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL + 4); | ||
2303 | #endif | ||
2304 | wb_write[0] &= ~BMAC_CONTROL_RX_ENABLE; | ||
2305 | REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, | ||
2306 | wb_write, 2); | ||
2307 | msleep(1); | ||
2308 | } | ||
2309 | } | ||
2310 | |||
1936 | static void bnx2x_emac_enable(struct bnx2x *bp) | 2311 | static void bnx2x_emac_enable(struct bnx2x *bp) |
1937 | { | 2312 | { |
1938 | int port = bp->port; | 2313 | int port = bp->port; |
@@ -1940,7 +2315,7 @@ static void bnx2x_emac_enable(struct bnx2x *bp) | |||
1940 | u32 val; | 2315 | u32 val; |
1941 | int timeout; | 2316 | int timeout; |
1942 | 2317 | ||
1943 | DP(NETIF_MSG_LINK, "enableing EMAC\n"); | 2318 | DP(NETIF_MSG_LINK, "enabling EMAC\n"); |
1944 | /* reset and unreset the emac core */ | 2319 | /* reset and unreset the emac core */ |
1945 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, | 2320 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, |
1946 | (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); | 2321 | (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); |
@@ -2033,7 +2408,7 @@ static void bnx2x_emac_enable(struct bnx2x *bp) | |||
2033 | EMAC_TX_MODE_EXT_PAUSE_EN); | 2408 | EMAC_TX_MODE_EXT_PAUSE_EN); |
2034 | } | 2409 | } |
2035 | 2410 | ||
2036 | /* KEEP_VLAN_TAG, promiscous */ | 2411 | /* KEEP_VLAN_TAG, promiscuous */ |
2037 | val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); | 2412 | val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); |
2038 | val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; | 2413 | val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; |
2039 | EMAC_WR(EMAC_REG_EMAC_RX_MODE, val); | 2414 | EMAC_WR(EMAC_REG_EMAC_RX_MODE, val); |
@@ -2161,7 +2536,6 @@ static void bnx2x_pbf_update(struct bnx2x *bp) | |||
2161 | u32 count = 1000; | 2536 | u32 count = 1000; |
2162 | u32 pause = 0; | 2537 | u32 pause = 0; |
2163 | 2538 | ||
2164 | |||
2165 | /* disable port */ | 2539 | /* disable port */ |
2166 | REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1); | 2540 | REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1); |
2167 | 2541 | ||
@@ -2232,7 +2606,7 @@ static void bnx2x_pbf_update(struct bnx2x *bp) | |||
2232 | static void bnx2x_update_mng(struct bnx2x *bp) | 2606 | static void bnx2x_update_mng(struct bnx2x *bp) |
2233 | { | 2607 | { |
2234 | if (!nomcp) | 2608 | if (!nomcp) |
2235 | SHMEM_WR(bp, drv_fw_mb[bp->port].link_status, | 2609 | SHMEM_WR(bp, port_mb[bp->port].link_status, |
2236 | bp->link_status); | 2610 | bp->link_status); |
2237 | } | 2611 | } |
2238 | 2612 | ||
@@ -2294,19 +2668,19 @@ static void bnx2x_link_down(struct bnx2x *bp) | |||
2294 | DP(BNX2X_MSG_STATS, "stats_state - STOP\n"); | 2668 | DP(BNX2X_MSG_STATS, "stats_state - STOP\n"); |
2295 | } | 2669 | } |
2296 | 2670 | ||
2297 | /* indicate link down */ | 2671 | /* indicate no mac active */ |
2298 | bp->phy_flags &= ~(PHY_BMAC_FLAG | PHY_EMAC_FLAG); | 2672 | bp->phy_flags &= ~(PHY_BMAC_FLAG | PHY_EMAC_FLAG); |
2299 | 2673 | ||
2300 | /* reset BigMac */ | 2674 | /* update shared memory */ |
2301 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, | 2675 | bnx2x_update_mng(bp); |
2302 | (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); | ||
2303 | 2676 | ||
2304 | /* ignore drain flag interrupt */ | ||
2305 | /* activate nig drain */ | 2677 | /* activate nig drain */ |
2306 | NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); | 2678 | NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); |
2307 | 2679 | ||
2308 | /* update shared memory */ | 2680 | /* reset BigMac */ |
2309 | bnx2x_update_mng(bp); | 2681 | bnx2x_bmac_rx_disable(bp); |
2682 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, | ||
2683 | (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); | ||
2310 | 2684 | ||
2311 | /* indicate link down */ | 2685 | /* indicate link down */ |
2312 | bnx2x_link_report(bp); | 2686 | bnx2x_link_report(bp); |
@@ -2317,14 +2691,15 @@ static void bnx2x_init_mac_stats(struct bnx2x *bp); | |||
2317 | /* This function is called upon link interrupt */ | 2691 | /* This function is called upon link interrupt */ |
2318 | static void bnx2x_link_update(struct bnx2x *bp) | 2692 | static void bnx2x_link_update(struct bnx2x *bp) |
2319 | { | 2693 | { |
2320 | u32 gp_status; | ||
2321 | int port = bp->port; | 2694 | int port = bp->port; |
2322 | int i; | 2695 | int i; |
2696 | u32 gp_status; | ||
2323 | int link_10g; | 2697 | int link_10g; |
2324 | 2698 | ||
2325 | DP(NETIF_MSG_LINK, "port %x, is xgxs %x, stat_mask 0x%x," | 2699 | DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x," |
2326 | " int_mask 0x%x, saved_mask 0x%x, MI_INT %x, SERDES_LINK %x," | 2700 | " int_mask 0x%x, saved_mask 0x%x, MI_INT %x, SERDES_LINK %x," |
2327 | " 10G %x, XGXS_LINK %x\n", port, (bp->phy_flags & PHY_XGXS_FLAG), | 2701 | " 10G %x, XGXS_LINK %x\n", port, |
2702 | (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes", | ||
2328 | REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4), | 2703 | REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4), |
2329 | REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), bp->nig_mask, | 2704 | REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), bp->nig_mask, |
2330 | REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18), | 2705 | REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18), |
@@ -2336,7 +2711,7 @@ static void bnx2x_link_update(struct bnx2x *bp) | |||
2336 | might_sleep(); | 2711 | might_sleep(); |
2337 | MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_GP_STATUS); | 2712 | MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_GP_STATUS); |
2338 | /* avoid fast toggling */ | 2713 | /* avoid fast toggling */ |
2339 | for (i = 0 ; i < 10 ; i++) { | 2714 | for (i = 0; i < 10; i++) { |
2340 | msleep(10); | 2715 | msleep(10); |
2341 | bnx2x_mdio22_read(bp, MDIO_GP_STATUS_TOP_AN_STATUS1, | 2716 | bnx2x_mdio22_read(bp, MDIO_GP_STATUS_TOP_AN_STATUS1, |
2342 | &gp_status); | 2717 | &gp_status); |
@@ -2351,7 +2726,8 @@ static void bnx2x_link_update(struct bnx2x *bp) | |||
2351 | bnx2x_link_int_ack(bp, link_10g); | 2726 | bnx2x_link_int_ack(bp, link_10g); |
2352 | 2727 | ||
2353 | /* link is up only if both local phy and external phy are up */ | 2728 | /* link is up only if both local phy and external phy are up */ |
2354 | if (bp->link_up && bnx2x_ext_phy_is_link_up(bp)) { | 2729 | bp->link_up = (bp->phy_link_up && bnx2x_ext_phy_is_link_up(bp)); |
2730 | if (bp->link_up) { | ||
2355 | if (link_10g) { | 2731 | if (link_10g) { |
2356 | bnx2x_bmac_enable(bp, 0); | 2732 | bnx2x_bmac_enable(bp, 0); |
2357 | bnx2x_leds_set(bp, SPEED_10000); | 2733 | bnx2x_leds_set(bp, SPEED_10000); |
@@ -2427,7 +2803,9 @@ static void bnx2x_reset_unicore(struct bnx2x *bp) | |||
2427 | } | 2803 | } |
2428 | } | 2804 | } |
2429 | 2805 | ||
2430 | BNX2X_ERR("BUG! unicore is still in reset!\n"); | 2806 | BNX2X_ERR("BUG! %s (0x%x) is still in reset!\n", |
2807 | (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes", | ||
2808 | bp->phy_addr); | ||
2431 | } | 2809 | } |
2432 | 2810 | ||
2433 | static void bnx2x_set_swap_lanes(struct bnx2x *bp) | 2811 | static void bnx2x_set_swap_lanes(struct bnx2x *bp) |
@@ -2475,12 +2853,12 @@ static void bnx2x_set_parallel_detection(struct bnx2x *bp) | |||
2475 | MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_10G_PARALLEL_DETECT); | 2853 | MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_10G_PARALLEL_DETECT); |
2476 | 2854 | ||
2477 | bnx2x_mdio22_write(bp, | 2855 | bnx2x_mdio22_write(bp, |
2478 | MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK, | 2856 | MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK, |
2479 | MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT); | 2857 | MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT); |
2480 | 2858 | ||
2481 | bnx2x_mdio22_read(bp, | 2859 | bnx2x_mdio22_read(bp, |
2482 | MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, | 2860 | MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, |
2483 | &control2); | 2861 | &control2); |
2484 | 2862 | ||
2485 | if (bp->autoneg & AUTONEG_PARALLEL) { | 2863 | if (bp->autoneg & AUTONEG_PARALLEL) { |
2486 | control2 |= | 2864 | control2 |= |
@@ -2490,8 +2868,14 @@ static void bnx2x_set_parallel_detection(struct bnx2x *bp) | |||
2490 | ~MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN; | 2868 | ~MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN; |
2491 | } | 2869 | } |
2492 | bnx2x_mdio22_write(bp, | 2870 | bnx2x_mdio22_write(bp, |
2493 | MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, | 2871 | MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, |
2494 | control2); | 2872 | control2); |
2873 | |||
2874 | /* Disable parallel detection of HiG */ | ||
2875 | MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2); | ||
2876 | bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_UNICORE_MODE_10G, | ||
2877 | MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS | | ||
2878 | MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS); | ||
2495 | } | 2879 | } |
2496 | } | 2880 | } |
2497 | 2881 | ||
@@ -2625,7 +3009,7 @@ static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x *bp) | |||
2625 | MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G); | 3009 | MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G); |
2626 | 3010 | ||
2627 | /* set extended capabilities */ | 3011 | /* set extended capabilities */ |
2628 | if (bp->advertising & ADVERTISED_2500baseT_Full) | 3012 | if (bp->advertising & ADVERTISED_2500baseX_Full) |
2629 | val |= MDIO_OVER_1G_UP1_2_5G; | 3013 | val |= MDIO_OVER_1G_UP1_2_5G; |
2630 | if (bp->advertising & ADVERTISED_10000baseT_Full) | 3014 | if (bp->advertising & ADVERTISED_10000baseT_Full) |
2631 | val |= MDIO_OVER_1G_UP1_10G; | 3015 | val |= MDIO_OVER_1G_UP1_10G; |
@@ -2641,20 +3025,91 @@ static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x *bp) | |||
2641 | /* for AN, we are always publishing full duplex */ | 3025 | /* for AN, we are always publishing full duplex */ |
2642 | an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; | 3026 | an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; |
2643 | 3027 | ||
2644 | /* set pause */ | 3028 | /* resolve pause mode and advertisement |
2645 | switch (bp->pause_mode) { | 3029 | * Please refer to Table 28B-3 of the 802.3ab-1999 spec */ |
2646 | case PAUSE_SYMMETRIC: | 3030 | if (bp->req_autoneg & AUTONEG_FLOW_CTRL) { |
2647 | an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC; | 3031 | switch (bp->req_flow_ctrl) { |
2648 | break; | 3032 | case FLOW_CTRL_AUTO: |
2649 | case PAUSE_ASYMMETRIC: | 3033 | if (bp->dev->mtu <= 4500) { |
2650 | an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; | 3034 | an_adv |= |
2651 | break; | 3035 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; |
2652 | case PAUSE_BOTH: | 3036 | bp->advertising |= (ADVERTISED_Pause | |
2653 | an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; | 3037 | ADVERTISED_Asym_Pause); |
2654 | break; | 3038 | } else { |
2655 | case PAUSE_NONE: | 3039 | an_adv |= |
2656 | an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; | 3040 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; |
2657 | break; | 3041 | bp->advertising |= ADVERTISED_Asym_Pause; |
3042 | } | ||
3043 | break; | ||
3044 | |||
3045 | case FLOW_CTRL_TX: | ||
3046 | an_adv |= | ||
3047 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; | ||
3048 | bp->advertising |= ADVERTISED_Asym_Pause; | ||
3049 | break; | ||
3050 | |||
3051 | case FLOW_CTRL_RX: | ||
3052 | if (bp->dev->mtu <= 4500) { | ||
3053 | an_adv |= | ||
3054 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; | ||
3055 | bp->advertising |= (ADVERTISED_Pause | | ||
3056 | ADVERTISED_Asym_Pause); | ||
3057 | } else { | ||
3058 | an_adv |= | ||
3059 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; | ||
3060 | bp->advertising &= ~(ADVERTISED_Pause | | ||
3061 | ADVERTISED_Asym_Pause); | ||
3062 | } | ||
3063 | break; | ||
3064 | |||
3065 | case FLOW_CTRL_BOTH: | ||
3066 | if (bp->dev->mtu <= 4500) { | ||
3067 | an_adv |= | ||
3068 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; | ||
3069 | bp->advertising |= (ADVERTISED_Pause | | ||
3070 | ADVERTISED_Asym_Pause); | ||
3071 | } else { | ||
3072 | an_adv |= | ||
3073 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; | ||
3074 | bp->advertising |= ADVERTISED_Asym_Pause; | ||
3075 | } | ||
3076 | break; | ||
3077 | |||
3078 | case FLOW_CTRL_NONE: | ||
3079 | default: | ||
3080 | an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; | ||
3081 | bp->advertising &= ~(ADVERTISED_Pause | | ||
3082 | ADVERTISED_Asym_Pause); | ||
3083 | break; | ||
3084 | } | ||
3085 | } else { /* forced mode */ | ||
3086 | switch (bp->req_flow_ctrl) { | ||
3087 | case FLOW_CTRL_AUTO: | ||
3088 | DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while" | ||
3089 | " req_autoneg 0x%x\n", | ||
3090 | bp->req_flow_ctrl, bp->req_autoneg); | ||
3091 | break; | ||
3092 | |||
3093 | case FLOW_CTRL_TX: | ||
3094 | an_adv |= | ||
3095 | MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; | ||
3096 | bp->advertising |= ADVERTISED_Asym_Pause; | ||
3097 | break; | ||
3098 | |||
3099 | case FLOW_CTRL_RX: | ||
3100 | case FLOW_CTRL_BOTH: | ||
3101 | an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; | ||
3102 | bp->advertising |= (ADVERTISED_Pause | | ||
3103 | ADVERTISED_Asym_Pause); | ||
3104 | break; | ||
3105 | |||
3106 | case FLOW_CTRL_NONE: | ||
3107 | default: | ||
3108 | an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; | ||
3109 | bp->advertising &= ~(ADVERTISED_Pause | | ||
3110 | ADVERTISED_Asym_Pause); | ||
3111 | break; | ||
3112 | } | ||
2658 | } | 3113 | } |
2659 | 3114 | ||
2660 | MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0); | 3115 | MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0); |
@@ -2752,47 +3207,162 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x *bp) | |||
2752 | static void bnx2x_link_int_enable(struct bnx2x *bp) | 3207 | static void bnx2x_link_int_enable(struct bnx2x *bp) |
2753 | { | 3208 | { |
2754 | int port = bp->port; | 3209 | int port = bp->port; |
3210 | u32 ext_phy_type; | ||
3211 | u32 mask; | ||
2755 | 3212 | ||
2756 | /* setting the status to report on link up | 3213 | /* setting the status to report on link up |
2757 | for either XGXS or SerDes */ | 3214 | for either XGXS or SerDes */ |
2758 | bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, | 3215 | bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, |
2759 | (NIG_XGXS0_LINK_STATUS | | 3216 | (NIG_STATUS_XGXS0_LINK10G | |
2760 | NIG_STATUS_INTERRUPT_XGXS0_LINK10G | | 3217 | NIG_STATUS_XGXS0_LINK_STATUS | |
2761 | NIG_SERDES0_LINK_STATUS)); | 3218 | NIG_STATUS_SERDES0_LINK_STATUS)); |
2762 | 3219 | ||
2763 | if (bp->phy_flags & PHY_XGXS_FLAG) { | 3220 | if (bp->phy_flags & PHY_XGXS_FLAG) { |
2764 | /* TBD - | 3221 | mask = (NIG_MASK_XGXS0_LINK10G | |
2765 | * in force mode (not AN) we can enable just the relevant | 3222 | NIG_MASK_XGXS0_LINK_STATUS); |
2766 | * interrupt | 3223 | DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n"); |
2767 | * Even in AN we might enable only one according to the AN | 3224 | ext_phy_type = XGXS_EXT_PHY_TYPE(bp); |
2768 | * speed mask | 3225 | if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && |
2769 | */ | 3226 | (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && |
2770 | bnx2x_bits_en(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, | 3227 | (ext_phy_type != |
2771 | (NIG_MASK_XGXS0_LINK_STATUS | | 3228 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) { |
2772 | NIG_MASK_XGXS0_LINK10G)); | 3229 | mask |= NIG_MASK_MI_INT; |
2773 | DP(NETIF_MSG_LINK, "enable XGXS interrupt\n"); | 3230 | DP(NETIF_MSG_LINK, "enabled external phy int\n"); |
3231 | } | ||
2774 | 3232 | ||
2775 | } else { /* SerDes */ | 3233 | } else { /* SerDes */ |
2776 | bnx2x_bits_en(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, | 3234 | mask = NIG_MASK_SERDES0_LINK_STATUS; |
2777 | NIG_MASK_SERDES0_LINK_STATUS); | 3235 | DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n"); |
2778 | DP(NETIF_MSG_LINK, "enable SerDes interrupt\n"); | 3236 | ext_phy_type = SERDES_EXT_PHY_TYPE(bp); |
3237 | if ((ext_phy_type != | ||
3238 | PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) && | ||
3239 | (ext_phy_type != | ||
3240 | PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN)) { | ||
3241 | mask |= NIG_MASK_MI_INT; | ||
3242 | DP(NETIF_MSG_LINK, "enabled external phy int\n"); | ||
3243 | } | ||
2779 | } | 3244 | } |
3245 | bnx2x_bits_en(bp, | ||
3246 | NIG_REG_MASK_INTERRUPT_PORT0 + port*4, | ||
3247 | mask); | ||
3248 | DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x," | ||
3249 | " int_mask 0x%x, MI_INT %x, SERDES_LINK %x," | ||
3250 | " 10G %x, XGXS_LINK %x\n", port, | ||
3251 | (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes", | ||
3252 | REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4), | ||
3253 | REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), | ||
3254 | REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18), | ||
3255 | REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c), | ||
3256 | REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), | ||
3257 | REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68) | ||
3258 | ); | ||
3259 | } | ||
3260 | |||
3261 | static void bnx2x_bcm8072_external_rom_boot(struct bnx2x *bp) | ||
3262 | { | ||
3263 | u32 ext_phy_addr = ((bp->ext_phy_config & | ||
3264 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> | ||
3265 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT); | ||
3266 | u32 fw_ver1, fw_ver2; | ||
3267 | |||
3268 | /* Need to wait 200ms after reset */ | ||
3269 | msleep(200); | ||
3270 | /* Boot port from external ROM | ||
3271 | * Set ser_boot_ctl bit in the MISC_CTRL1 register | ||
3272 | */ | ||
3273 | bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr, | ||
3274 | EXT_PHY_KR_PMA_PMD_DEVAD, | ||
3275 | EXT_PHY_KR_MISC_CTRL1, 0x0001); | ||
3276 | |||
3277 | /* Reset internal microprocessor */ | ||
3278 | bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr, | ||
3279 | EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL, | ||
3280 | EXT_PHY_KR_ROM_RESET_INTERNAL_MP); | ||
3281 | /* set micro reset = 0 */ | ||
3282 | bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr, | ||
3283 | EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL, | ||
3284 | EXT_PHY_KR_ROM_MICRO_RESET); | ||
3285 | /* Reset internal microprocessor */ | ||
3286 | bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr, | ||
3287 | EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL, | ||
3288 | EXT_PHY_KR_ROM_RESET_INTERNAL_MP); | ||
3289 | /* wait for 100ms for code download via SPI port */ | ||
3290 | msleep(100); | ||
3291 | |||
3292 | /* Clear ser_boot_ctl bit */ | ||
3293 | bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr, | ||
3294 | EXT_PHY_KR_PMA_PMD_DEVAD, | ||
3295 | EXT_PHY_KR_MISC_CTRL1, 0x0000); | ||
3296 | /* Wait 100ms */ | ||
3297 | msleep(100); | ||
3298 | |||
3299 | /* Print the PHY FW version */ | ||
3300 | bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr, | ||
3301 | EXT_PHY_KR_PMA_PMD_DEVAD, | ||
3302 | 0xca19, &fw_ver1); | ||
3303 | bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr, | ||
3304 | EXT_PHY_KR_PMA_PMD_DEVAD, | ||
3305 | 0xca1a, &fw_ver2); | ||
3306 | DP(NETIF_MSG_LINK, | ||
3307 | "8072 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2); | ||
3308 | } | ||
3309 | |||
3310 | static void bnx2x_bcm8072_force_10G(struct bnx2x *bp) | ||
3311 | { | ||
3312 | u32 ext_phy_addr = ((bp->ext_phy_config & | ||
3313 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> | ||
3314 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT); | ||
3315 | |||
3316 | /* Force KR or KX */ | ||
3317 | bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr, | ||
3318 | EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL, | ||
3319 | 0x2040); | ||
3320 | bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr, | ||
3321 | EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL2, | ||
3322 | 0x000b); | ||
3323 | bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr, | ||
3324 | EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_PMD_CTRL, | ||
3325 | 0x0000); | ||
3326 | bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr, | ||
3327 | EXT_PHY_KR_AUTO_NEG_DEVAD, EXT_PHY_KR_CTRL, | ||
3328 | 0x0000); | ||
2780 | } | 3329 | } |
2781 | 3330 | ||
2782 | static void bnx2x_ext_phy_init(struct bnx2x *bp) | 3331 | static void bnx2x_ext_phy_init(struct bnx2x *bp) |
2783 | { | 3332 | { |
2784 | int port = bp->port; | ||
2785 | u32 ext_phy_type; | 3333 | u32 ext_phy_type; |
2786 | u32 ext_phy_addr; | 3334 | u32 ext_phy_addr; |
2787 | u32 local_phy; | 3335 | u32 cnt; |
3336 | u32 ctrl; | ||
3337 | u32 val = 0; | ||
2788 | 3338 | ||
2789 | if (bp->phy_flags & PHY_XGXS_FLAG) { | 3339 | if (bp->phy_flags & PHY_XGXS_FLAG) { |
2790 | local_phy = bp->phy_addr; | ||
2791 | ext_phy_addr = ((bp->ext_phy_config & | 3340 | ext_phy_addr = ((bp->ext_phy_config & |
2792 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> | 3341 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> |
2793 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT); | 3342 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT); |
2794 | 3343 | ||
2795 | ext_phy_type = XGXS_EXT_PHY_TYPE(bp); | 3344 | ext_phy_type = XGXS_EXT_PHY_TYPE(bp); |
3345 | /* Make sure that the soft reset is off (expect for the 8072: | ||
3346 | * due to the lock, it will be done inside the specific | ||
3347 | * handling) | ||
3348 | */ | ||
3349 | if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && | ||
3350 | (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && | ||
3351 | (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) && | ||
3352 | (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072)) { | ||
3353 | /* Wait for soft reset to get cleared upto 1 sec */ | ||
3354 | for (cnt = 0; cnt < 1000; cnt++) { | ||
3355 | bnx2x_mdio45_read(bp, ext_phy_addr, | ||
3356 | EXT_PHY_OPT_PMA_PMD_DEVAD, | ||
3357 | EXT_PHY_OPT_CNTL, &ctrl); | ||
3358 | if (!(ctrl & (1<<15))) | ||
3359 | break; | ||
3360 | msleep(1); | ||
3361 | } | ||
3362 | DP(NETIF_MSG_LINK, | ||
3363 | "control reg 0x%x (after %d ms)\n", ctrl, cnt); | ||
3364 | } | ||
3365 | |||
2796 | switch (ext_phy_type) { | 3366 | switch (ext_phy_type) { |
2797 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: | 3367 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: |
2798 | DP(NETIF_MSG_LINK, "XGXS Direct\n"); | 3368 | DP(NETIF_MSG_LINK, "XGXS Direct\n"); |
@@ -2800,49 +3370,235 @@ static void bnx2x_ext_phy_init(struct bnx2x *bp) | |||
2800 | 3370 | ||
2801 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: | 3371 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: |
2802 | DP(NETIF_MSG_LINK, "XGXS 8705\n"); | 3372 | DP(NETIF_MSG_LINK, "XGXS 8705\n"); |
2803 | bnx2x_bits_en(bp, | ||
2804 | NIG_REG_MASK_INTERRUPT_PORT0 + port*4, | ||
2805 | NIG_MASK_MI_INT); | ||
2806 | DP(NETIF_MSG_LINK, "enabled extenal phy int\n"); | ||
2807 | 3373 | ||
2808 | bp->phy_addr = ext_phy_type; | 3374 | bnx2x_mdio45_vwrite(bp, ext_phy_addr, |
2809 | bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, | 3375 | EXT_PHY_OPT_PMA_PMD_DEVAD, |
2810 | EXT_PHY_OPT_PMD_MISC_CNTL, | 3376 | EXT_PHY_OPT_PMD_MISC_CNTL, |
2811 | 0x8288); | 3377 | 0x8288); |
2812 | bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, | 3378 | bnx2x_mdio45_vwrite(bp, ext_phy_addr, |
3379 | EXT_PHY_OPT_PMA_PMD_DEVAD, | ||
2813 | EXT_PHY_OPT_PHY_IDENTIFIER, | 3380 | EXT_PHY_OPT_PHY_IDENTIFIER, |
2814 | 0x7fbf); | 3381 | 0x7fbf); |
2815 | bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, | 3382 | bnx2x_mdio45_vwrite(bp, ext_phy_addr, |
3383 | EXT_PHY_OPT_PMA_PMD_DEVAD, | ||
2816 | EXT_PHY_OPT_CMU_PLL_BYPASS, | 3384 | EXT_PHY_OPT_CMU_PLL_BYPASS, |
2817 | 0x0100); | 3385 | 0x0100); |
2818 | bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_WIS_DEVAD, | 3386 | bnx2x_mdio45_vwrite(bp, ext_phy_addr, |
3387 | EXT_PHY_OPT_WIS_DEVAD, | ||
2819 | EXT_PHY_OPT_LASI_CNTL, 0x1); | 3388 | EXT_PHY_OPT_LASI_CNTL, 0x1); |
2820 | break; | 3389 | break; |
2821 | 3390 | ||
2822 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: | 3391 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: |
2823 | DP(NETIF_MSG_LINK, "XGXS 8706\n"); | 3392 | DP(NETIF_MSG_LINK, "XGXS 8706\n"); |
2824 | bnx2x_bits_en(bp, | 3393 | |
2825 | NIG_REG_MASK_INTERRUPT_PORT0 + port*4, | 3394 | if (!(bp->req_autoneg & AUTONEG_SPEED)) { |
2826 | NIG_MASK_MI_INT); | 3395 | /* Force speed */ |
2827 | DP(NETIF_MSG_LINK, "enabled extenal phy int\n"); | 3396 | if (bp->req_line_speed == SPEED_10000) { |
2828 | 3397 | DP(NETIF_MSG_LINK, | |
2829 | bp->phy_addr = ext_phy_type; | 3398 | "XGXS 8706 force 10Gbps\n"); |
2830 | bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, | 3399 | bnx2x_mdio45_vwrite(bp, ext_phy_addr, |
2831 | EXT_PHY_OPT_PMD_DIGITAL_CNT, | 3400 | EXT_PHY_OPT_PMA_PMD_DEVAD, |
2832 | 0x400); | 3401 | EXT_PHY_OPT_PMD_DIGITAL_CNT, |
2833 | bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, | 3402 | 0x400); |
3403 | } else { | ||
3404 | /* Force 1Gbps */ | ||
3405 | DP(NETIF_MSG_LINK, | ||
3406 | "XGXS 8706 force 1Gbps\n"); | ||
3407 | |||
3408 | bnx2x_mdio45_vwrite(bp, ext_phy_addr, | ||
3409 | EXT_PHY_OPT_PMA_PMD_DEVAD, | ||
3410 | EXT_PHY_OPT_CNTL, | ||
3411 | 0x0040); | ||
3412 | |||
3413 | bnx2x_mdio45_vwrite(bp, ext_phy_addr, | ||
3414 | EXT_PHY_OPT_PMA_PMD_DEVAD, | ||
3415 | EXT_PHY_OPT_CNTL2, | ||
3416 | 0x000D); | ||
3417 | } | ||
3418 | |||
3419 | /* Enable LASI */ | ||
3420 | bnx2x_mdio45_vwrite(bp, ext_phy_addr, | ||
3421 | EXT_PHY_OPT_PMA_PMD_DEVAD, | ||
3422 | EXT_PHY_OPT_LASI_CNTL, | ||
3423 | 0x1); | ||
3424 | } else { | ||
3425 | /* AUTONEG */ | ||
3426 | /* Allow CL37 through CL73 */ | ||
3427 | DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n"); | ||
3428 | bnx2x_mdio45_vwrite(bp, ext_phy_addr, | ||
3429 | EXT_PHY_AUTO_NEG_DEVAD, | ||
3430 | EXT_PHY_OPT_AN_CL37_CL73, | ||
3431 | 0x040c); | ||
3432 | |||
3433 | /* Enable Full-Duplex advertisment on CL37 */ | ||
3434 | bnx2x_mdio45_vwrite(bp, ext_phy_addr, | ||
3435 | EXT_PHY_AUTO_NEG_DEVAD, | ||
3436 | EXT_PHY_OPT_AN_CL37_FD, | ||
3437 | 0x0020); | ||
3438 | /* Enable CL37 AN */ | ||
3439 | bnx2x_mdio45_vwrite(bp, ext_phy_addr, | ||
3440 | EXT_PHY_AUTO_NEG_DEVAD, | ||
3441 | EXT_PHY_OPT_AN_CL37_AN, | ||
3442 | 0x1000); | ||
3443 | /* Advertise 10G/1G support */ | ||
3444 | if (bp->advertising & | ||
3445 | ADVERTISED_1000baseT_Full) | ||
3446 | val = (1<<5); | ||
3447 | if (bp->advertising & | ||
3448 | ADVERTISED_10000baseT_Full) | ||
3449 | val |= (1<<7); | ||
3450 | |||
3451 | bnx2x_mdio45_vwrite(bp, ext_phy_addr, | ||
3452 | EXT_PHY_AUTO_NEG_DEVAD, | ||
3453 | EXT_PHY_OPT_AN_ADV, val); | ||
3454 | /* Enable LASI */ | ||
3455 | bnx2x_mdio45_vwrite(bp, ext_phy_addr, | ||
3456 | EXT_PHY_OPT_PMA_PMD_DEVAD, | ||
3457 | EXT_PHY_OPT_LASI_CNTL, | ||
3458 | 0x1); | ||
3459 | |||
3460 | /* Enable clause 73 AN */ | ||
3461 | bnx2x_mdio45_write(bp, ext_phy_addr, | ||
3462 | EXT_PHY_AUTO_NEG_DEVAD, | ||
3463 | EXT_PHY_OPT_CNTL, | ||
3464 | 0x1200); | ||
3465 | } | ||
3466 | break; | ||
3467 | |||
3468 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: | ||
3469 | bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO); | ||
3470 | /* Wait for soft reset to get cleared upto 1 sec */ | ||
3471 | for (cnt = 0; cnt < 1000; cnt++) { | ||
3472 | bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, | ||
3473 | ext_phy_addr, | ||
3474 | EXT_PHY_OPT_PMA_PMD_DEVAD, | ||
3475 | EXT_PHY_OPT_CNTL, &ctrl); | ||
3476 | if (!(ctrl & (1<<15))) | ||
3477 | break; | ||
3478 | msleep(1); | ||
3479 | } | ||
3480 | DP(NETIF_MSG_LINK, | ||
3481 | "8072 control reg 0x%x (after %d ms)\n", | ||
3482 | ctrl, cnt); | ||
3483 | |||
3484 | bnx2x_bcm8072_external_rom_boot(bp); | ||
3485 | DP(NETIF_MSG_LINK, "Finshed loading 8072 KR ROM\n"); | ||
3486 | |||
3487 | /* enable LASI */ | ||
3488 | bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, | ||
3489 | ext_phy_addr, | ||
3490 | EXT_PHY_KR_PMA_PMD_DEVAD, | ||
3491 | 0x9000, 0x0400); | ||
3492 | bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, | ||
3493 | ext_phy_addr, | ||
3494 | EXT_PHY_KR_PMA_PMD_DEVAD, | ||
3495 | EXT_PHY_KR_LASI_CNTL, 0x0004); | ||
3496 | |||
3497 | /* If this is forced speed, set to KR or KX | ||
3498 | * (all other are not supported) | ||
3499 | */ | ||
3500 | if (!(bp->req_autoneg & AUTONEG_SPEED)) { | ||
3501 | if (bp->req_line_speed == SPEED_10000) { | ||
3502 | bnx2x_bcm8072_force_10G(bp); | ||
3503 | DP(NETIF_MSG_LINK, | ||
3504 | "Forced speed 10G on 8072\n"); | ||
3505 | /* unlock */ | ||
3506 | bnx2x_hw_unlock(bp, | ||
3507 | HW_LOCK_RESOURCE_8072_MDIO); | ||
3508 | break; | ||
3509 | } else | ||
3510 | val = (1<<5); | ||
3511 | } else { | ||
3512 | |||
3513 | /* Advertise 10G/1G support */ | ||
3514 | if (bp->advertising & | ||
3515 | ADVERTISED_1000baseT_Full) | ||
3516 | val = (1<<5); | ||
3517 | if (bp->advertising & | ||
3518 | ADVERTISED_10000baseT_Full) | ||
3519 | val |= (1<<7); | ||
3520 | } | ||
3521 | bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, | ||
3522 | ext_phy_addr, | ||
3523 | EXT_PHY_KR_AUTO_NEG_DEVAD, | ||
3524 | 0x11, val); | ||
3525 | /* Add support for CL37 ( passive mode ) I */ | ||
3526 | bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, | ||
3527 | ext_phy_addr, | ||
3528 | EXT_PHY_KR_AUTO_NEG_DEVAD, | ||
3529 | 0x8370, 0x040c); | ||
3530 | /* Add support for CL37 ( passive mode ) II */ | ||
3531 | bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, | ||
3532 | ext_phy_addr, | ||
3533 | EXT_PHY_KR_AUTO_NEG_DEVAD, | ||
3534 | 0xffe4, 0x20); | ||
3535 | /* Add support for CL37 ( passive mode ) III */ | ||
3536 | bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, | ||
3537 | ext_phy_addr, | ||
3538 | EXT_PHY_KR_AUTO_NEG_DEVAD, | ||
3539 | 0xffe0, 0x1000); | ||
3540 | /* Restart autoneg */ | ||
3541 | msleep(500); | ||
3542 | bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, | ||
3543 | ext_phy_addr, | ||
3544 | EXT_PHY_KR_AUTO_NEG_DEVAD, | ||
3545 | EXT_PHY_KR_CTRL, 0x1200); | ||
3546 | DP(NETIF_MSG_LINK, "8072 Autoneg Restart: " | ||
3547 | "1G %ssupported 10G %ssupported\n", | ||
3548 | (val & (1<<5)) ? "" : "not ", | ||
3549 | (val & (1<<7)) ? "" : "not "); | ||
3550 | |||
3551 | /* unlock */ | ||
3552 | bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO); | ||
3553 | break; | ||
3554 | |||
3555 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: | ||
3556 | DP(NETIF_MSG_LINK, | ||
3557 | "Setting the SFX7101 LASI indication\n"); | ||
3558 | bnx2x_mdio45_vwrite(bp, ext_phy_addr, | ||
3559 | EXT_PHY_OPT_PMA_PMD_DEVAD, | ||
2834 | EXT_PHY_OPT_LASI_CNTL, 0x1); | 3560 | EXT_PHY_OPT_LASI_CNTL, 0x1); |
3561 | DP(NETIF_MSG_LINK, | ||
3562 | "Setting the SFX7101 LED to blink on traffic\n"); | ||
3563 | bnx2x_mdio45_vwrite(bp, ext_phy_addr, | ||
3564 | EXT_PHY_OPT_PMA_PMD_DEVAD, | ||
3565 | 0xC007, (1<<3)); | ||
3566 | |||
3567 | /* read modify write pause advertizing */ | ||
3568 | bnx2x_mdio45_read(bp, ext_phy_addr, | ||
3569 | EXT_PHY_KR_AUTO_NEG_DEVAD, | ||
3570 | EXT_PHY_KR_AUTO_NEG_ADVERT, &val); | ||
3571 | val &= ~EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_BOTH; | ||
3572 | /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */ | ||
3573 | if (bp->advertising & ADVERTISED_Pause) | ||
3574 | val |= EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE; | ||
3575 | |||
3576 | if (bp->advertising & ADVERTISED_Asym_Pause) { | ||
3577 | val |= | ||
3578 | EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_ASYMMETRIC; | ||
3579 | } | ||
3580 | DP(NETIF_MSG_LINK, "SFX7101 AN advertize 0x%x\n", val); | ||
3581 | bnx2x_mdio45_vwrite(bp, ext_phy_addr, | ||
3582 | EXT_PHY_KR_AUTO_NEG_DEVAD, | ||
3583 | EXT_PHY_KR_AUTO_NEG_ADVERT, val); | ||
3584 | /* Restart autoneg */ | ||
3585 | bnx2x_mdio45_read(bp, ext_phy_addr, | ||
3586 | EXT_PHY_KR_AUTO_NEG_DEVAD, | ||
3587 | EXT_PHY_KR_CTRL, &val); | ||
3588 | val |= 0x200; | ||
3589 | bnx2x_mdio45_write(bp, ext_phy_addr, | ||
3590 | EXT_PHY_KR_AUTO_NEG_DEVAD, | ||
3591 | EXT_PHY_KR_CTRL, val); | ||
2835 | break; | 3592 | break; |
2836 | 3593 | ||
2837 | default: | 3594 | default: |
2838 | DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n", | 3595 | BNX2X_ERR("BAD XGXS ext_phy_config 0x%x\n", |
2839 | bp->ext_phy_config); | 3596 | bp->ext_phy_config); |
2840 | break; | 3597 | break; |
2841 | } | 3598 | } |
2842 | bp->phy_addr = local_phy; | ||
2843 | 3599 | ||
2844 | } else { /* SerDes */ | 3600 | } else { /* SerDes */ |
2845 | /* ext_phy_addr = ((bp->ext_phy_config & | 3601 | /* ext_phy_addr = ((bp->ext_phy_config & |
2846 | PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >> | 3602 | PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >> |
2847 | PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT); | 3603 | PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT); |
2848 | */ | 3604 | */ |
@@ -2854,10 +3610,6 @@ static void bnx2x_ext_phy_init(struct bnx2x *bp) | |||
2854 | 3610 | ||
2855 | case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: | 3611 | case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482: |
2856 | DP(NETIF_MSG_LINK, "SerDes 5482\n"); | 3612 | DP(NETIF_MSG_LINK, "SerDes 5482\n"); |
2857 | bnx2x_bits_en(bp, | ||
2858 | NIG_REG_MASK_INTERRUPT_PORT0 + port*4, | ||
2859 | NIG_MASK_MI_INT); | ||
2860 | DP(NETIF_MSG_LINK, "enabled extenal phy int\n"); | ||
2861 | break; | 3613 | break; |
2862 | 3614 | ||
2863 | default: | 3615 | default: |
@@ -2871,8 +3623,22 @@ static void bnx2x_ext_phy_init(struct bnx2x *bp) | |||
2871 | static void bnx2x_ext_phy_reset(struct bnx2x *bp) | 3623 | static void bnx2x_ext_phy_reset(struct bnx2x *bp) |
2872 | { | 3624 | { |
2873 | u32 ext_phy_type; | 3625 | u32 ext_phy_type; |
2874 | u32 ext_phy_addr; | 3626 | u32 ext_phy_addr = ((bp->ext_phy_config & |
2875 | u32 local_phy; | 3627 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> |
3628 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT); | ||
3629 | u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK); | ||
3630 | |||
3631 | /* The PHY reset is controled by GPIO 1 | ||
3632 | * Give it 1ms of reset pulse | ||
3633 | */ | ||
3634 | if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) && | ||
3635 | (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) { | ||
3636 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, | ||
3637 | MISC_REGISTERS_GPIO_OUTPUT_LOW); | ||
3638 | msleep(1); | ||
3639 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, | ||
3640 | MISC_REGISTERS_GPIO_OUTPUT_HIGH); | ||
3641 | } | ||
2876 | 3642 | ||
2877 | if (bp->phy_flags & PHY_XGXS_FLAG) { | 3643 | if (bp->phy_flags & PHY_XGXS_FLAG) { |
2878 | ext_phy_type = XGXS_EXT_PHY_TYPE(bp); | 3644 | ext_phy_type = XGXS_EXT_PHY_TYPE(bp); |
@@ -2883,15 +3649,24 @@ static void bnx2x_ext_phy_reset(struct bnx2x *bp) | |||
2883 | 3649 | ||
2884 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: | 3650 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: |
2885 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: | 3651 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: |
2886 | DP(NETIF_MSG_LINK, "XGXS 8705/6\n"); | 3652 | DP(NETIF_MSG_LINK, "XGXS 8705/8706\n"); |
2887 | local_phy = bp->phy_addr; | 3653 | bnx2x_mdio45_write(bp, ext_phy_addr, |
2888 | ext_phy_addr = ((bp->ext_phy_config & | 3654 | EXT_PHY_OPT_PMA_PMD_DEVAD, |
2889 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> | ||
2890 | PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT); | ||
2891 | bp->phy_addr = (u8)ext_phy_addr; | ||
2892 | bnx2x_mdio45_write(bp, EXT_PHY_OPT_PMA_PMD_DEVAD, | ||
2893 | EXT_PHY_OPT_CNTL, 0xa040); | 3655 | EXT_PHY_OPT_CNTL, 0xa040); |
2894 | bp->phy_addr = local_phy; | 3656 | break; |
3657 | |||
3658 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: | ||
3659 | DP(NETIF_MSG_LINK, "XGXS 8072\n"); | ||
3660 | bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO); | ||
3661 | bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, | ||
3662 | ext_phy_addr, | ||
3663 | EXT_PHY_KR_PMA_PMD_DEVAD, | ||
3664 | 0, 1<<15); | ||
3665 | bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO); | ||
3666 | break; | ||
3667 | |||
3668 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: | ||
3669 | DP(NETIF_MSG_LINK, "XGXS SFX7101\n"); | ||
2895 | break; | 3670 | break; |
2896 | 3671 | ||
2897 | default: | 3672 | default: |
@@ -2930,6 +3705,7 @@ static void bnx2x_link_initialize(struct bnx2x *bp) | |||
2930 | NIG_MASK_SERDES0_LINK_STATUS | | 3705 | NIG_MASK_SERDES0_LINK_STATUS | |
2931 | NIG_MASK_MI_INT)); | 3706 | NIG_MASK_MI_INT)); |
2932 | 3707 | ||
3708 | /* Activate the external PHY */ | ||
2933 | bnx2x_ext_phy_reset(bp); | 3709 | bnx2x_ext_phy_reset(bp); |
2934 | 3710 | ||
2935 | bnx2x_set_aer_mmd(bp); | 3711 | bnx2x_set_aer_mmd(bp); |
@@ -2994,13 +3770,13 @@ static void bnx2x_link_initialize(struct bnx2x *bp) | |||
2994 | /* AN enabled */ | 3770 | /* AN enabled */ |
2995 | bnx2x_set_brcm_cl37_advertisment(bp); | 3771 | bnx2x_set_brcm_cl37_advertisment(bp); |
2996 | 3772 | ||
2997 | /* program duplex & pause advertisment (for aneg) */ | 3773 | /* program duplex & pause advertisement (for aneg) */ |
2998 | bnx2x_set_ieee_aneg_advertisment(bp); | 3774 | bnx2x_set_ieee_aneg_advertisment(bp); |
2999 | 3775 | ||
3000 | /* enable autoneg */ | 3776 | /* enable autoneg */ |
3001 | bnx2x_set_autoneg(bp); | 3777 | bnx2x_set_autoneg(bp); |
3002 | 3778 | ||
3003 | /* enalbe and restart AN */ | 3779 | /* enable and restart AN */ |
3004 | bnx2x_restart_autoneg(bp); | 3780 | bnx2x_restart_autoneg(bp); |
3005 | } | 3781 | } |
3006 | 3782 | ||
@@ -3010,11 +3786,11 @@ static void bnx2x_link_initialize(struct bnx2x *bp) | |||
3010 | bnx2x_initialize_sgmii_process(bp); | 3786 | bnx2x_initialize_sgmii_process(bp); |
3011 | } | 3787 | } |
3012 | 3788 | ||
3013 | /* enable the interrupt */ | ||
3014 | bnx2x_link_int_enable(bp); | ||
3015 | |||
3016 | /* init ext phy and enable link state int */ | 3789 | /* init ext phy and enable link state int */ |
3017 | bnx2x_ext_phy_init(bp); | 3790 | bnx2x_ext_phy_init(bp); |
3791 | |||
3792 | /* enable the interrupt */ | ||
3793 | bnx2x_link_int_enable(bp); | ||
3018 | } | 3794 | } |
3019 | 3795 | ||
3020 | static void bnx2x_phy_deassert(struct bnx2x *bp) | 3796 | static void bnx2x_phy_deassert(struct bnx2x *bp) |
@@ -3073,6 +3849,11 @@ static int bnx2x_phy_init(struct bnx2x *bp) | |||
3073 | static void bnx2x_link_reset(struct bnx2x *bp) | 3849 | static void bnx2x_link_reset(struct bnx2x *bp) |
3074 | { | 3850 | { |
3075 | int port = bp->port; | 3851 | int port = bp->port; |
3852 | u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK); | ||
3853 | |||
3854 | /* update shared memory */ | ||
3855 | bp->link_status = 0; | ||
3856 | bnx2x_update_mng(bp); | ||
3076 | 3857 | ||
3077 | /* disable attentions */ | 3858 | /* disable attentions */ |
3078 | bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, | 3859 | bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, |
@@ -3081,21 +3862,45 @@ static void bnx2x_link_reset(struct bnx2x *bp) | |||
3081 | NIG_MASK_SERDES0_LINK_STATUS | | 3862 | NIG_MASK_SERDES0_LINK_STATUS | |
3082 | NIG_MASK_MI_INT)); | 3863 | NIG_MASK_MI_INT)); |
3083 | 3864 | ||
3084 | bnx2x_ext_phy_reset(bp); | 3865 | /* activate nig drain */ |
3866 | NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); | ||
3867 | |||
3868 | /* disable nig egress interface */ | ||
3869 | NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0); | ||
3870 | NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0); | ||
3871 | |||
3872 | /* Stop BigMac rx */ | ||
3873 | bnx2x_bmac_rx_disable(bp); | ||
3874 | |||
3875 | /* disable emac */ | ||
3876 | NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 0); | ||
3877 | |||
3878 | msleep(10); | ||
3879 | |||
3880 | /* The PHY reset is controled by GPIO 1 | ||
3881 | * Hold it as output low | ||
3882 | */ | ||
3883 | if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) && | ||
3884 | (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) { | ||
3885 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, | ||
3886 | MISC_REGISTERS_GPIO_OUTPUT_LOW); | ||
3887 | DP(NETIF_MSG_LINK, "reset external PHY\n"); | ||
3888 | } | ||
3085 | 3889 | ||
3086 | /* reset the SerDes/XGXS */ | 3890 | /* reset the SerDes/XGXS */ |
3087 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, | 3891 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, |
3088 | (0x1ff << (port*16))); | 3892 | (0x1ff << (port*16))); |
3089 | 3893 | ||
3090 | /* reset EMAC / BMAC and disable NIG interfaces */ | 3894 | /* reset BigMac */ |
3091 | NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0); | 3895 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, |
3092 | NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0); | 3896 | (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); |
3093 | 3897 | ||
3094 | NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 0); | 3898 | /* disable nig ingress interface */ |
3899 | NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0); | ||
3095 | NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0); | 3900 | NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0); |
3096 | NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0); | ||
3097 | 3901 | ||
3098 | NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); | 3902 | /* set link down */ |
3903 | bp->link_up = 0; | ||
3099 | } | 3904 | } |
3100 | 3905 | ||
3101 | #ifdef BNX2X_XGXS_LB | 3906 | #ifdef BNX2X_XGXS_LB |
@@ -3158,7 +3963,7 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, | |||
3158 | int port = bp->port; | 3963 | int port = bp->port; |
3159 | 3964 | ||
3160 | DP(NETIF_MSG_TIMER, | 3965 | DP(NETIF_MSG_TIMER, |
3161 | "spe (%x:%x) command %x hw_cid %x data (%x:%x) left %x\n", | 3966 | "spe (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n", |
3162 | (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) + | 3967 | (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) + |
3163 | (void *)bp->spq_prod_bd - (void *)bp->spq), command, | 3968 | (void *)bp->spq_prod_bd - (void *)bp->spq), command, |
3164 | HW_CID(bp, cid), data_hi, data_lo, bp->spq_left); | 3969 | HW_CID(bp, cid), data_hi, data_lo, bp->spq_left); |
@@ -3176,6 +3981,7 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, | |||
3176 | bnx2x_panic(); | 3981 | bnx2x_panic(); |
3177 | return -EBUSY; | 3982 | return -EBUSY; |
3178 | } | 3983 | } |
3984 | |||
3179 | /* CID needs port number to be encoded int it */ | 3985 | /* CID needs port number to be encoded int it */ |
3180 | bp->spq_prod_bd->hdr.conn_and_cmd_data = | 3986 | bp->spq_prod_bd->hdr.conn_and_cmd_data = |
3181 | cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) | | 3987 | cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) | |
@@ -3282,8 +4088,8 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) | |||
3282 | u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8; | 4088 | u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8; |
3283 | u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : | 4089 | u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : |
3284 | MISC_REG_AEU_MASK_ATTN_FUNC_0; | 4090 | MISC_REG_AEU_MASK_ATTN_FUNC_0; |
3285 | u32 nig_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : | 4091 | u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : |
3286 | NIG_REG_MASK_INTERRUPT_PORT0; | 4092 | NIG_REG_MASK_INTERRUPT_PORT0; |
3287 | 4093 | ||
3288 | if (~bp->aeu_mask & (asserted & 0xff)) | 4094 | if (~bp->aeu_mask & (asserted & 0xff)) |
3289 | BNX2X_ERR("IGU ERROR\n"); | 4095 | BNX2X_ERR("IGU ERROR\n"); |
@@ -3301,15 +4107,11 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) | |||
3301 | 4107 | ||
3302 | if (asserted & ATTN_HARD_WIRED_MASK) { | 4108 | if (asserted & ATTN_HARD_WIRED_MASK) { |
3303 | if (asserted & ATTN_NIG_FOR_FUNC) { | 4109 | if (asserted & ATTN_NIG_FOR_FUNC) { |
3304 | u32 nig_status_port; | ||
3305 | u32 nig_int_addr = port ? | ||
3306 | NIG_REG_STATUS_INTERRUPT_PORT1 : | ||
3307 | NIG_REG_STATUS_INTERRUPT_PORT0; | ||
3308 | 4110 | ||
3309 | bp->nig_mask = REG_RD(bp, nig_mask_addr); | 4111 | /* save nig interrupt mask */ |
3310 | REG_WR(bp, nig_mask_addr, 0); | 4112 | bp->nig_mask = REG_RD(bp, nig_int_mask_addr); |
4113 | REG_WR(bp, nig_int_mask_addr, 0); | ||
3311 | 4114 | ||
3312 | nig_status_port = REG_RD(bp, nig_int_addr); | ||
3313 | bnx2x_link_update(bp); | 4115 | bnx2x_link_update(bp); |
3314 | 4116 | ||
3315 | /* handle unicore attn? */ | 4117 | /* handle unicore attn? */ |
@@ -3362,15 +4164,132 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted) | |||
3362 | 4164 | ||
3363 | /* now set back the mask */ | 4165 | /* now set back the mask */ |
3364 | if (asserted & ATTN_NIG_FOR_FUNC) | 4166 | if (asserted & ATTN_NIG_FOR_FUNC) |
3365 | REG_WR(bp, nig_mask_addr, bp->nig_mask); | 4167 | REG_WR(bp, nig_int_mask_addr, bp->nig_mask); |
3366 | } | 4168 | } |
3367 | 4169 | ||
3368 | static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) | 4170 | static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) |
3369 | { | 4171 | { |
3370 | int port = bp->port; | 4172 | int port = bp->port; |
3371 | int index; | 4173 | int reg_offset; |
4174 | u32 val; | ||
4175 | |||
4176 | if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { | ||
4177 | |||
4178 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : | ||
4179 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); | ||
4180 | |||
4181 | val = REG_RD(bp, reg_offset); | ||
4182 | val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; | ||
4183 | REG_WR(bp, reg_offset, val); | ||
4184 | |||
4185 | BNX2X_ERR("SPIO5 hw attention\n"); | ||
4186 | |||
4187 | switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) { | ||
4188 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: | ||
4189 | /* Fan failure attention */ | ||
4190 | |||
4191 | /* The PHY reset is controled by GPIO 1 */ | ||
4192 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, | ||
4193 | MISC_REGISTERS_GPIO_OUTPUT_LOW); | ||
4194 | /* Low power mode is controled by GPIO 2 */ | ||
4195 | bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, | ||
4196 | MISC_REGISTERS_GPIO_OUTPUT_LOW); | ||
4197 | /* mark the failure */ | ||
4198 | bp->ext_phy_config &= | ||
4199 | ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; | ||
4200 | bp->ext_phy_config |= | ||
4201 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; | ||
4202 | SHMEM_WR(bp, | ||
4203 | dev_info.port_hw_config[port]. | ||
4204 | external_phy_config, | ||
4205 | bp->ext_phy_config); | ||
4206 | /* log the failure */ | ||
4207 | printk(KERN_ERR PFX "Fan Failure on Network" | ||
4208 | " Controller %s has caused the driver to" | ||
4209 | " shutdown the card to prevent permanent" | ||
4210 | " damage. Please contact Dell Support for" | ||
4211 | " assistance\n", bp->dev->name); | ||
4212 | break; | ||
4213 | |||
4214 | default: | ||
4215 | break; | ||
4216 | } | ||
4217 | } | ||
4218 | } | ||
4219 | |||
4220 | static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) | ||
4221 | { | ||
4222 | u32 val; | ||
4223 | |||
4224 | if (attn & BNX2X_DOORQ_ASSERT) { | ||
4225 | |||
4226 | val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR); | ||
4227 | BNX2X_ERR("DB hw attention 0x%x\n", val); | ||
4228 | /* DORQ discard attention */ | ||
4229 | if (val & 0x2) | ||
4230 | BNX2X_ERR("FATAL error from DORQ\n"); | ||
4231 | } | ||
4232 | } | ||
4233 | |||
4234 | static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) | ||
4235 | { | ||
4236 | u32 val; | ||
4237 | |||
4238 | if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { | ||
4239 | |||
4240 | val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR); | ||
4241 | BNX2X_ERR("CFC hw attention 0x%x\n", val); | ||
4242 | /* CFC error attention */ | ||
4243 | if (val & 0x2) | ||
4244 | BNX2X_ERR("FATAL error from CFC\n"); | ||
4245 | } | ||
4246 | |||
4247 | if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { | ||
4248 | |||
4249 | val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0); | ||
4250 | BNX2X_ERR("PXP hw attention 0x%x\n", val); | ||
4251 | /* RQ_USDMDP_FIFO_OVERFLOW */ | ||
4252 | if (val & 0x18000) | ||
4253 | BNX2X_ERR("FATAL error from PXP\n"); | ||
4254 | } | ||
4255 | } | ||
4256 | |||
4257 | static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) | ||
4258 | { | ||
4259 | if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { | ||
4260 | |||
4261 | if (attn & BNX2X_MC_ASSERT_BITS) { | ||
4262 | |||
4263 | BNX2X_ERR("MC assert!\n"); | ||
4264 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0); | ||
4265 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0); | ||
4266 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0); | ||
4267 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0); | ||
4268 | bnx2x_panic(); | ||
4269 | |||
4270 | } else if (attn & BNX2X_MCP_ASSERT) { | ||
4271 | |||
4272 | BNX2X_ERR("MCP assert!\n"); | ||
4273 | REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0); | ||
4274 | bnx2x_mc_assert(bp); | ||
4275 | |||
4276 | } else | ||
4277 | BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn); | ||
4278 | } | ||
4279 | |||
4280 | if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { | ||
4281 | |||
4282 | REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); | ||
4283 | BNX2X_ERR("LATCHED attention 0x%x (masked)\n", attn); | ||
4284 | } | ||
4285 | } | ||
4286 | |||
4287 | static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) | ||
4288 | { | ||
3372 | struct attn_route attn; | 4289 | struct attn_route attn; |
3373 | struct attn_route group_mask; | 4290 | struct attn_route group_mask; |
4291 | int port = bp->port; | ||
4292 | int index; | ||
3374 | u32 reg_addr; | 4293 | u32 reg_addr; |
3375 | u32 val; | 4294 | u32 val; |
3376 | 4295 | ||
@@ -3391,64 +4310,14 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) | |||
3391 | DP(NETIF_MSG_HW, "group[%d]: %llx\n", index, | 4310 | DP(NETIF_MSG_HW, "group[%d]: %llx\n", index, |
3392 | (unsigned long long)group_mask.sig[0]); | 4311 | (unsigned long long)group_mask.sig[0]); |
3393 | 4312 | ||
3394 | if (attn.sig[3] & group_mask.sig[3] & | 4313 | bnx2x_attn_int_deasserted3(bp, |
3395 | EVEREST_GEN_ATTN_IN_USE_MASK) { | 4314 | attn.sig[3] & group_mask.sig[3]); |
3396 | 4315 | bnx2x_attn_int_deasserted1(bp, | |
3397 | if (attn.sig[3] & BNX2X_MC_ASSERT_BITS) { | 4316 | attn.sig[1] & group_mask.sig[1]); |
3398 | 4317 | bnx2x_attn_int_deasserted2(bp, | |
3399 | BNX2X_ERR("MC assert!\n"); | 4318 | attn.sig[2] & group_mask.sig[2]); |
3400 | bnx2x_panic(); | 4319 | bnx2x_attn_int_deasserted0(bp, |
3401 | 4320 | attn.sig[0] & group_mask.sig[0]); | |
3402 | } else if (attn.sig[3] & BNX2X_MCP_ASSERT) { | ||
3403 | |||
3404 | BNX2X_ERR("MCP assert!\n"); | ||
3405 | REG_WR(bp, | ||
3406 | MISC_REG_AEU_GENERAL_ATTN_11, 0); | ||
3407 | bnx2x_mc_assert(bp); | ||
3408 | |||
3409 | } else { | ||
3410 | BNX2X_ERR("UNKOWEN HW ASSERT!\n"); | ||
3411 | } | ||
3412 | } | ||
3413 | |||
3414 | if (attn.sig[1] & group_mask.sig[1] & | ||
3415 | BNX2X_DOORQ_ASSERT) { | ||
3416 | |||
3417 | val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR); | ||
3418 | BNX2X_ERR("DB hw attention 0x%x\n", val); | ||
3419 | /* DORQ discard attention */ | ||
3420 | if (val & 0x2) | ||
3421 | BNX2X_ERR("FATAL error from DORQ\n"); | ||
3422 | } | ||
3423 | |||
3424 | if (attn.sig[2] & group_mask.sig[2] & | ||
3425 | AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { | ||
3426 | |||
3427 | val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR); | ||
3428 | BNX2X_ERR("CFC hw attention 0x%x\n", val); | ||
3429 | /* CFC error attention */ | ||
3430 | if (val & 0x2) | ||
3431 | BNX2X_ERR("FATAL error from CFC\n"); | ||
3432 | } | ||
3433 | |||
3434 | if (attn.sig[2] & group_mask.sig[2] & | ||
3435 | AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { | ||
3436 | |||
3437 | val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0); | ||
3438 | BNX2X_ERR("PXP hw attention 0x%x\n", val); | ||
3439 | /* RQ_USDMDP_FIFO_OVERFLOW */ | ||
3440 | if (val & 0x18000) | ||
3441 | BNX2X_ERR("FATAL error from PXP\n"); | ||
3442 | } | ||
3443 | |||
3444 | if (attn.sig[3] & group_mask.sig[3] & | ||
3445 | EVEREST_LATCHED_ATTN_IN_USE_MASK) { | ||
3446 | |||
3447 | REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, | ||
3448 | 0x7ff); | ||
3449 | DP(NETIF_MSG_HW, "got latched bits 0x%x\n", | ||
3450 | attn.sig[3]); | ||
3451 | } | ||
3452 | 4321 | ||
3453 | if ((attn.sig[0] & group_mask.sig[0] & | 4322 | if ((attn.sig[0] & group_mask.sig[0] & |
3454 | HW_INTERRUT_ASSERT_SET_0) || | 4323 | HW_INTERRUT_ASSERT_SET_0) || |
@@ -3456,7 +4325,15 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) | |||
3456 | HW_INTERRUT_ASSERT_SET_1) || | 4325 | HW_INTERRUT_ASSERT_SET_1) || |
3457 | (attn.sig[2] & group_mask.sig[2] & | 4326 | (attn.sig[2] & group_mask.sig[2] & |
3458 | HW_INTERRUT_ASSERT_SET_2)) | 4327 | HW_INTERRUT_ASSERT_SET_2)) |
3459 | BNX2X_ERR("FATAL HW block attention\n"); | 4328 | BNX2X_ERR("FATAL HW block attention" |
4329 | " set0 0x%x set1 0x%x" | ||
4330 | " set2 0x%x\n", | ||
4331 | (attn.sig[0] & group_mask.sig[0] & | ||
4332 | HW_INTERRUT_ASSERT_SET_0), | ||
4333 | (attn.sig[1] & group_mask.sig[1] & | ||
4334 | HW_INTERRUT_ASSERT_SET_1), | ||
4335 | (attn.sig[2] & group_mask.sig[2] & | ||
4336 | HW_INTERRUT_ASSERT_SET_2)); | ||
3460 | 4337 | ||
3461 | if ((attn.sig[0] & group_mask.sig[0] & | 4338 | if ((attn.sig[0] & group_mask.sig[0] & |
3462 | HW_PRTY_ASSERT_SET_0) || | 4339 | HW_PRTY_ASSERT_SET_0) || |
@@ -3464,7 +4341,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) | |||
3464 | HW_PRTY_ASSERT_SET_1) || | 4341 | HW_PRTY_ASSERT_SET_1) || |
3465 | (attn.sig[2] & group_mask.sig[2] & | 4342 | (attn.sig[2] & group_mask.sig[2] & |
3466 | HW_PRTY_ASSERT_SET_2)) | 4343 | HW_PRTY_ASSERT_SET_2)) |
3467 | BNX2X_ERR("FATAL HW block parity atention\n"); | 4344 | BNX2X_ERR("FATAL HW block parity attention\n"); |
3468 | } | 4345 | } |
3469 | } | 4346 | } |
3470 | 4347 | ||
@@ -3529,7 +4406,7 @@ static void bnx2x_sp_task(struct work_struct *work) | |||
3529 | 4406 | ||
3530 | /* Return here if interrupt is disabled */ | 4407 | /* Return here if interrupt is disabled */ |
3531 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { | 4408 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { |
3532 | DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); | 4409 | DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n"); |
3533 | return; | 4410 | return; |
3534 | } | 4411 | } |
3535 | 4412 | ||
@@ -3539,12 +4416,11 @@ static void bnx2x_sp_task(struct work_struct *work) | |||
3539 | 4416 | ||
3540 | DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status); | 4417 | DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status); |
3541 | 4418 | ||
3542 | if (status & 0x1) { | 4419 | /* HW attentions */ |
3543 | /* HW attentions */ | 4420 | if (status & 0x1) |
3544 | bnx2x_attn_int(bp); | 4421 | bnx2x_attn_int(bp); |
3545 | } | ||
3546 | 4422 | ||
3547 | /* CStorm events: query_stats, cfc delete ramrods */ | 4423 | /* CStorm events: query_stats, port delete ramrod */ |
3548 | if (status & 0x2) | 4424 | if (status & 0x2) |
3549 | bp->stat_pending = 0; | 4425 | bp->stat_pending = 0; |
3550 | 4426 | ||
@@ -3558,6 +4434,7 @@ static void bnx2x_sp_task(struct work_struct *work) | |||
3558 | IGU_INT_NOP, 1); | 4434 | IGU_INT_NOP, 1); |
3559 | bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx), | 4435 | bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx), |
3560 | IGU_INT_ENABLE, 1); | 4436 | IGU_INT_ENABLE, 1); |
4437 | |||
3561 | } | 4438 | } |
3562 | 4439 | ||
3563 | static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) | 4440 | static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) |
@@ -3567,11 +4444,11 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) | |||
3567 | 4444 | ||
3568 | /* Return here if interrupt is disabled */ | 4445 | /* Return here if interrupt is disabled */ |
3569 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { | 4446 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) { |
3570 | DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); | 4447 | DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n"); |
3571 | return IRQ_HANDLED; | 4448 | return IRQ_HANDLED; |
3572 | } | 4449 | } |
3573 | 4450 | ||
3574 | bnx2x_ack_sb(bp, 16, XSTORM_ID, 0, IGU_INT_DISABLE, 0); | 4451 | bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0); |
3575 | 4452 | ||
3576 | #ifdef BNX2X_STOP_ON_ERROR | 4453 | #ifdef BNX2X_STOP_ON_ERROR |
3577 | if (unlikely(bp->panic)) | 4454 | if (unlikely(bp->panic)) |
@@ -3906,7 +4783,7 @@ static void bnx2x_stop_stats(struct bnx2x *bp) | |||
3906 | 4783 | ||
3907 | while (bp->stats_state != STATS_STATE_DISABLE) { | 4784 | while (bp->stats_state != STATS_STATE_DISABLE) { |
3908 | if (!timeout) { | 4785 | if (!timeout) { |
3909 | BNX2X_ERR("timeout wating for stats stop\n"); | 4786 | BNX2X_ERR("timeout waiting for stats stop\n"); |
3910 | break; | 4787 | break; |
3911 | } | 4788 | } |
3912 | timeout--; | 4789 | timeout--; |
@@ -4173,39 +5050,37 @@ static void bnx2x_update_net_stats(struct bnx2x *bp) | |||
4173 | 5050 | ||
4174 | nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi); | 5051 | nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi); |
4175 | 5052 | ||
4176 | nstats->tx_bytes = | 5053 | nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); |
4177 | bnx2x_hilo(&estats->total_bytes_transmitted_hi); | ||
4178 | 5054 | ||
4179 | nstats->rx_dropped = estats->checksum_discard + | 5055 | nstats->rx_dropped = estats->checksum_discard + estats->mac_discard; |
4180 | estats->mac_discard; | ||
4181 | nstats->tx_dropped = 0; | 5056 | nstats->tx_dropped = 0; |
4182 | 5057 | ||
4183 | nstats->multicast = | 5058 | nstats->multicast = |
4184 | bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi); | 5059 | bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi); |
4185 | 5060 | ||
4186 | nstats->collisions = | 5061 | nstats->collisions = estats->single_collision_transmit_frames + |
4187 | estats->single_collision_transmit_frames + | 5062 | estats->multiple_collision_transmit_frames + |
4188 | estats->multiple_collision_transmit_frames + | 5063 | estats->late_collision_frames + |
4189 | estats->late_collision_frames + | 5064 | estats->excessive_collision_frames; |
4190 | estats->excessive_collision_frames; | ||
4191 | 5065 | ||
4192 | nstats->rx_length_errors = estats->runt_packets_received + | 5066 | nstats->rx_length_errors = estats->runt_packets_received + |
4193 | estats->jabber_packets_received; | 5067 | estats->jabber_packets_received; |
4194 | nstats->rx_over_errors = estats->no_buff_discard; | 5068 | nstats->rx_over_errors = estats->brb_discard + |
5069 | estats->brb_truncate_discard; | ||
4195 | nstats->rx_crc_errors = estats->crc_receive_errors; | 5070 | nstats->rx_crc_errors = estats->crc_receive_errors; |
4196 | nstats->rx_frame_errors = estats->alignment_errors; | 5071 | nstats->rx_frame_errors = estats->alignment_errors; |
4197 | nstats->rx_fifo_errors = estats->brb_discard + | 5072 | nstats->rx_fifo_errors = estats->no_buff_discard; |
4198 | estats->brb_truncate_discard; | ||
4199 | nstats->rx_missed_errors = estats->xxoverflow_discard; | 5073 | nstats->rx_missed_errors = estats->xxoverflow_discard; |
4200 | 5074 | ||
4201 | nstats->rx_errors = nstats->rx_length_errors + | 5075 | nstats->rx_errors = nstats->rx_length_errors + |
4202 | nstats->rx_over_errors + | 5076 | nstats->rx_over_errors + |
4203 | nstats->rx_crc_errors + | 5077 | nstats->rx_crc_errors + |
4204 | nstats->rx_frame_errors + | 5078 | nstats->rx_frame_errors + |
4205 | nstats->rx_fifo_errors; | 5079 | nstats->rx_fifo_errors + |
5080 | nstats->rx_missed_errors; | ||
4206 | 5081 | ||
4207 | nstats->tx_aborted_errors = estats->late_collision_frames + | 5082 | nstats->tx_aborted_errors = estats->late_collision_frames + |
4208 | estats->excessive_collision_frames; | 5083 | estats->excessive_collision_frames; |
4209 | nstats->tx_carrier_errors = estats->false_carrier_detections; | 5084 | nstats->tx_carrier_errors = estats->false_carrier_detections; |
4210 | nstats->tx_fifo_errors = 0; | 5085 | nstats->tx_fifo_errors = 0; |
4211 | nstats->tx_heartbeat_errors = 0; | 5086 | nstats->tx_heartbeat_errors = 0; |
@@ -4334,7 +5209,7 @@ static void bnx2x_timer(unsigned long data) | |||
4334 | return; | 5209 | return; |
4335 | 5210 | ||
4336 | if (atomic_read(&bp->intr_sem) != 0) | 5211 | if (atomic_read(&bp->intr_sem) != 0) |
4337 | goto bnx2x_restart_timer; | 5212 | goto timer_restart; |
4338 | 5213 | ||
4339 | if (poll) { | 5214 | if (poll) { |
4340 | struct bnx2x_fastpath *fp = &bp->fp[0]; | 5215 | struct bnx2x_fastpath *fp = &bp->fp[0]; |
@@ -4344,7 +5219,7 @@ static void bnx2x_timer(unsigned long data) | |||
4344 | rc = bnx2x_rx_int(fp, 1000); | 5219 | rc = bnx2x_rx_int(fp, 1000); |
4345 | } | 5220 | } |
4346 | 5221 | ||
4347 | if (!nomcp && (bp->bc_ver >= 0x040003)) { | 5222 | if (!nomcp) { |
4348 | int port = bp->port; | 5223 | int port = bp->port; |
4349 | u32 drv_pulse; | 5224 | u32 drv_pulse; |
4350 | u32 mcp_pulse; | 5225 | u32 mcp_pulse; |
@@ -4353,9 +5228,9 @@ static void bnx2x_timer(unsigned long data) | |||
4353 | bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; | 5228 | bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; |
4354 | /* TBD - add SYSTEM_TIME */ | 5229 | /* TBD - add SYSTEM_TIME */ |
4355 | drv_pulse = bp->fw_drv_pulse_wr_seq; | 5230 | drv_pulse = bp->fw_drv_pulse_wr_seq; |
4356 | SHMEM_WR(bp, drv_fw_mb[port].drv_pulse_mb, drv_pulse); | 5231 | SHMEM_WR(bp, func_mb[port].drv_pulse_mb, drv_pulse); |
4357 | 5232 | ||
4358 | mcp_pulse = (SHMEM_RD(bp, drv_fw_mb[port].mcp_pulse_mb) & | 5233 | mcp_pulse = (SHMEM_RD(bp, func_mb[port].mcp_pulse_mb) & |
4359 | MCP_PULSE_SEQ_MASK); | 5234 | MCP_PULSE_SEQ_MASK); |
4360 | /* The delta between driver pulse and mcp response | 5235 | /* The delta between driver pulse and mcp response |
4361 | * should be 1 (before mcp response) or 0 (after mcp response) | 5236 | * should be 1 (before mcp response) or 0 (after mcp response) |
@@ -4369,11 +5244,11 @@ static void bnx2x_timer(unsigned long data) | |||
4369 | } | 5244 | } |
4370 | 5245 | ||
4371 | if (bp->stats_state == STATS_STATE_DISABLE) | 5246 | if (bp->stats_state == STATS_STATE_DISABLE) |
4372 | goto bnx2x_restart_timer; | 5247 | goto timer_restart; |
4373 | 5248 | ||
4374 | bnx2x_update_stats(bp); | 5249 | bnx2x_update_stats(bp); |
4375 | 5250 | ||
4376 | bnx2x_restart_timer: | 5251 | timer_restart: |
4377 | mod_timer(&bp->timer, jiffies + bp->current_interval); | 5252 | mod_timer(&bp->timer, jiffies + bp->current_interval); |
4378 | } | 5253 | } |
4379 | 5254 | ||
@@ -4438,6 +5313,9 @@ static void bnx2x_init_def_sb(struct bnx2x *bp, | |||
4438 | atten_status_block); | 5313 | atten_status_block); |
4439 | def_sb->atten_status_block.status_block_id = id; | 5314 | def_sb->atten_status_block.status_block_id = id; |
4440 | 5315 | ||
5316 | bp->def_att_idx = 0; | ||
5317 | bp->attn_state = 0; | ||
5318 | |||
4441 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : | 5319 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : |
4442 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); | 5320 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); |
4443 | 5321 | ||
@@ -4472,6 +5350,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp, | |||
4472 | u_def_status_block); | 5350 | u_def_status_block); |
4473 | def_sb->u_def_status_block.status_block_id = id; | 5351 | def_sb->u_def_status_block.status_block_id = id; |
4474 | 5352 | ||
5353 | bp->def_u_idx = 0; | ||
5354 | |||
4475 | REG_WR(bp, BAR_USTRORM_INTMEM + | 5355 | REG_WR(bp, BAR_USTRORM_INTMEM + |
4476 | USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section)); | 5356 | USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section)); |
4477 | REG_WR(bp, BAR_USTRORM_INTMEM + | 5357 | REG_WR(bp, BAR_USTRORM_INTMEM + |
@@ -4489,6 +5369,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp, | |||
4489 | c_def_status_block); | 5369 | c_def_status_block); |
4490 | def_sb->c_def_status_block.status_block_id = id; | 5370 | def_sb->c_def_status_block.status_block_id = id; |
4491 | 5371 | ||
5372 | bp->def_c_idx = 0; | ||
5373 | |||
4492 | REG_WR(bp, BAR_CSTRORM_INTMEM + | 5374 | REG_WR(bp, BAR_CSTRORM_INTMEM + |
4493 | CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section)); | 5375 | CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section)); |
4494 | REG_WR(bp, BAR_CSTRORM_INTMEM + | 5376 | REG_WR(bp, BAR_CSTRORM_INTMEM + |
@@ -4506,6 +5388,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp, | |||
4506 | t_def_status_block); | 5388 | t_def_status_block); |
4507 | def_sb->t_def_status_block.status_block_id = id; | 5389 | def_sb->t_def_status_block.status_block_id = id; |
4508 | 5390 | ||
5391 | bp->def_t_idx = 0; | ||
5392 | |||
4509 | REG_WR(bp, BAR_TSTRORM_INTMEM + | 5393 | REG_WR(bp, BAR_TSTRORM_INTMEM + |
4510 | TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section)); | 5394 | TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section)); |
4511 | REG_WR(bp, BAR_TSTRORM_INTMEM + | 5395 | REG_WR(bp, BAR_TSTRORM_INTMEM + |
@@ -4523,6 +5407,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp, | |||
4523 | x_def_status_block); | 5407 | x_def_status_block); |
4524 | def_sb->x_def_status_block.status_block_id = id; | 5408 | def_sb->x_def_status_block.status_block_id = id; |
4525 | 5409 | ||
5410 | bp->def_x_idx = 0; | ||
5411 | |||
4526 | REG_WR(bp, BAR_XSTRORM_INTMEM + | 5412 | REG_WR(bp, BAR_XSTRORM_INTMEM + |
4527 | XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section)); | 5413 | XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section)); |
4528 | REG_WR(bp, BAR_XSTRORM_INTMEM + | 5414 | REG_WR(bp, BAR_XSTRORM_INTMEM + |
@@ -4535,6 +5421,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp, | |||
4535 | REG_WR16(bp, BAR_XSTRORM_INTMEM + | 5421 | REG_WR16(bp, BAR_XSTRORM_INTMEM + |
4536 | XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1); | 5422 | XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1); |
4537 | 5423 | ||
5424 | bp->stat_pending = 0; | ||
5425 | |||
4538 | bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); | 5426 | bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0); |
4539 | } | 5427 | } |
4540 | 5428 | ||
@@ -4626,7 +5514,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
4626 | fp->rx_bd_prod = fp->rx_comp_prod = ring_prod; | 5514 | fp->rx_bd_prod = fp->rx_comp_prod = ring_prod; |
4627 | fp->rx_pkt = fp->rx_calls = 0; | 5515 | fp->rx_pkt = fp->rx_calls = 0; |
4628 | 5516 | ||
4629 | /* Warning! this will genrate an interrupt (to the TSTORM) */ | 5517 | /* Warning! this will generate an interrupt (to the TSTORM) */ |
4630 | /* must only be done when chip is initialized */ | 5518 | /* must only be done when chip is initialized */ |
4631 | REG_WR(bp, BAR_TSTRORM_INTMEM + | 5519 | REG_WR(bp, BAR_TSTRORM_INTMEM + |
4632 | TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod); | 5520 | TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod); |
@@ -4678,7 +5566,6 @@ static void bnx2x_init_sp_ring(struct bnx2x *bp) | |||
4678 | 5566 | ||
4679 | bp->spq_left = MAX_SPQ_PENDING; | 5567 | bp->spq_left = MAX_SPQ_PENDING; |
4680 | bp->spq_prod_idx = 0; | 5568 | bp->spq_prod_idx = 0; |
4681 | bp->dsb_sp_prod_idx = 0; | ||
4682 | bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; | 5569 | bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; |
4683 | bp->spq_prod_bd = bp->spq; | 5570 | bp->spq_prod_bd = bp->spq; |
4684 | bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; | 5571 | bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT; |
@@ -4755,6 +5642,42 @@ static void bnx2x_init_ind_table(struct bnx2x *bp) | |||
4755 | REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); | 5642 | REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); |
4756 | } | 5643 | } |
4757 | 5644 | ||
5645 | static void bnx2x_set_client_config(struct bnx2x *bp) | ||
5646 | { | ||
5647 | #ifdef BCM_VLAN | ||
5648 | int mode = bp->rx_mode; | ||
5649 | #endif | ||
5650 | int i, port = bp->port; | ||
5651 | struct tstorm_eth_client_config tstorm_client = {0}; | ||
5652 | |||
5653 | tstorm_client.mtu = bp->dev->mtu; | ||
5654 | tstorm_client.statistics_counter_id = 0; | ||
5655 | tstorm_client.config_flags = | ||
5656 | TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE; | ||
5657 | #ifdef BCM_VLAN | ||
5658 | if (mode && bp->vlgrp) { | ||
5659 | tstorm_client.config_flags |= | ||
5660 | TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE; | ||
5661 | DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); | ||
5662 | } | ||
5663 | #endif | ||
5664 | if (mode != BNX2X_RX_MODE_PROMISC) | ||
5665 | tstorm_client.drop_flags = | ||
5666 | TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR; | ||
5667 | |||
5668 | for_each_queue(bp, i) { | ||
5669 | REG_WR(bp, BAR_TSTRORM_INTMEM + | ||
5670 | TSTORM_CLIENT_CONFIG_OFFSET(port, i), | ||
5671 | ((u32 *)&tstorm_client)[0]); | ||
5672 | REG_WR(bp, BAR_TSTRORM_INTMEM + | ||
5673 | TSTORM_CLIENT_CONFIG_OFFSET(port, i) + 4, | ||
5674 | ((u32 *)&tstorm_client)[1]); | ||
5675 | } | ||
5676 | |||
5677 | /* DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n", | ||
5678 | ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */ | ||
5679 | } | ||
5680 | |||
4758 | static void bnx2x_set_storm_rx_mode(struct bnx2x *bp) | 5681 | static void bnx2x_set_storm_rx_mode(struct bnx2x *bp) |
4759 | { | 5682 | { |
4760 | int mode = bp->rx_mode; | 5683 | int mode = bp->rx_mode; |
@@ -4794,41 +5717,9 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp) | |||
4794 | /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i, | 5717 | /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i, |
4795 | ((u32 *)&tstorm_mac_filter)[i]); */ | 5718 | ((u32 *)&tstorm_mac_filter)[i]); */ |
4796 | } | 5719 | } |
4797 | } | ||
4798 | 5720 | ||
4799 | static void bnx2x_set_client_config(struct bnx2x *bp, int client_id) | 5721 | if (mode != BNX2X_RX_MODE_NONE) |
4800 | { | 5722 | bnx2x_set_client_config(bp); |
4801 | #ifdef BCM_VLAN | ||
4802 | int mode = bp->rx_mode; | ||
4803 | #endif | ||
4804 | int port = bp->port; | ||
4805 | struct tstorm_eth_client_config tstorm_client = {0}; | ||
4806 | |||
4807 | tstorm_client.mtu = bp->dev->mtu; | ||
4808 | tstorm_client.statistics_counter_id = 0; | ||
4809 | tstorm_client.config_flags = | ||
4810 | TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE; | ||
4811 | #ifdef BCM_VLAN | ||
4812 | if (mode && bp->vlgrp) { | ||
4813 | tstorm_client.config_flags |= | ||
4814 | TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE; | ||
4815 | DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); | ||
4816 | } | ||
4817 | #endif | ||
4818 | tstorm_client.drop_flags = (TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR | | ||
4819 | TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR | | ||
4820 | TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR | | ||
4821 | TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR); | ||
4822 | |||
4823 | REG_WR(bp, BAR_TSTRORM_INTMEM + | ||
4824 | TSTORM_CLIENT_CONFIG_OFFSET(port, client_id), | ||
4825 | ((u32 *)&tstorm_client)[0]); | ||
4826 | REG_WR(bp, BAR_TSTRORM_INTMEM + | ||
4827 | TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) + 4, | ||
4828 | ((u32 *)&tstorm_client)[1]); | ||
4829 | |||
4830 | /* DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n", | ||
4831 | ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */ | ||
4832 | } | 5723 | } |
4833 | 5724 | ||
4834 | static void bnx2x_init_internal(struct bnx2x *bp) | 5725 | static void bnx2x_init_internal(struct bnx2x *bp) |
@@ -4836,7 +5727,6 @@ static void bnx2x_init_internal(struct bnx2x *bp) | |||
4836 | int port = bp->port; | 5727 | int port = bp->port; |
4837 | struct tstorm_eth_function_common_config tstorm_config = {0}; | 5728 | struct tstorm_eth_function_common_config tstorm_config = {0}; |
4838 | struct stats_indication_flags stats_flags = {0}; | 5729 | struct stats_indication_flags stats_flags = {0}; |
4839 | int i; | ||
4840 | 5730 | ||
4841 | if (is_multi(bp)) { | 5731 | if (is_multi(bp)) { |
4842 | tstorm_config.config_flags = MULTI_FLAGS; | 5732 | tstorm_config.config_flags = MULTI_FLAGS; |
@@ -4850,13 +5740,9 @@ static void bnx2x_init_internal(struct bnx2x *bp) | |||
4850 | /* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n", | 5740 | /* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n", |
4851 | (*(u32 *)&tstorm_config)); */ | 5741 | (*(u32 *)&tstorm_config)); */ |
4852 | 5742 | ||
4853 | bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx untill link is up */ | 5743 | bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */ |
4854 | bnx2x_set_storm_rx_mode(bp); | 5744 | bnx2x_set_storm_rx_mode(bp); |
4855 | 5745 | ||
4856 | for_each_queue(bp, i) | ||
4857 | bnx2x_set_client_config(bp, i); | ||
4858 | |||
4859 | |||
4860 | stats_flags.collect_eth = cpu_to_le32(1); | 5746 | stats_flags.collect_eth = cpu_to_le32(1); |
4861 | 5747 | ||
4862 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), | 5748 | REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), |
@@ -4902,7 +5788,7 @@ static void bnx2x_nic_init(struct bnx2x *bp) | |||
4902 | bnx2x_init_internal(bp); | 5788 | bnx2x_init_internal(bp); |
4903 | bnx2x_init_stats(bp); | 5789 | bnx2x_init_stats(bp); |
4904 | bnx2x_init_ind_table(bp); | 5790 | bnx2x_init_ind_table(bp); |
4905 | bnx2x_enable_int(bp); | 5791 | bnx2x_int_enable(bp); |
4906 | 5792 | ||
4907 | } | 5793 | } |
4908 | 5794 | ||
@@ -5265,8 +6151,10 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode) | |||
5265 | if (mode & 0x1) { /* init common */ | 6151 | if (mode & 0x1) { /* init common */ |
5266 | DP(BNX2X_MSG_MCP, "starting common init func %d mode %x\n", | 6152 | DP(BNX2X_MSG_MCP, "starting common init func %d mode %x\n", |
5267 | func, mode); | 6153 | func, mode); |
5268 | REG_WR(bp, MISC_REG_RESET_REG_1, 0xffffffff); | 6154 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, |
5269 | REG_WR(bp, MISC_REG_RESET_REG_2, 0xfffc); | 6155 | 0xffffffff); |
6156 | REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, | ||
6157 | 0xfffc); | ||
5270 | bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END); | 6158 | bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END); |
5271 | 6159 | ||
5272 | REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100); | 6160 | REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100); |
@@ -5359,7 +6247,7 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode) | |||
5359 | REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 8); | 6247 | REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 8); |
5360 | #endif | 6248 | #endif |
5361 | bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END); | 6249 | bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END); |
5362 | /* softrest pulse */ | 6250 | /* soft reset pulse */ |
5363 | REG_WR(bp, QM_REG_SOFT_RESET, 1); | 6251 | REG_WR(bp, QM_REG_SOFT_RESET, 1); |
5364 | REG_WR(bp, QM_REG_SOFT_RESET, 0); | 6252 | REG_WR(bp, QM_REG_SOFT_RESET, 0); |
5365 | 6253 | ||
@@ -5413,7 +6301,7 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode) | |||
5413 | REG_WR(bp, SRC_REG_SOFT_RST, 1); | 6301 | REG_WR(bp, SRC_REG_SOFT_RST, 1); |
5414 | for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) { | 6302 | for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) { |
5415 | REG_WR(bp, i, 0xc0cac01a); | 6303 | REG_WR(bp, i, 0xc0cac01a); |
5416 | /* TODO: repleace with something meaningfull */ | 6304 | /* TODO: replace with something meaningful */ |
5417 | } | 6305 | } |
5418 | /* SRCH COMMON comes here */ | 6306 | /* SRCH COMMON comes here */ |
5419 | REG_WR(bp, SRC_REG_SOFT_RST, 0); | 6307 | REG_WR(bp, SRC_REG_SOFT_RST, 0); |
@@ -5486,6 +6374,28 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode) | |||
5486 | enable_blocks_attention(bp); | 6374 | enable_blocks_attention(bp); |
5487 | /* enable_blocks_parity(bp); */ | 6375 | /* enable_blocks_parity(bp); */ |
5488 | 6376 | ||
6377 | switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) { | ||
6378 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: | ||
6379 | /* Fan failure is indicated by SPIO 5 */ | ||
6380 | bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, | ||
6381 | MISC_REGISTERS_SPIO_INPUT_HI_Z); | ||
6382 | |||
6383 | /* set to active low mode */ | ||
6384 | val = REG_RD(bp, MISC_REG_SPIO_INT); | ||
6385 | val |= ((1 << MISC_REGISTERS_SPIO_5) << | ||
6386 | MISC_REGISTERS_SPIO_INT_OLD_SET_POS); | ||
6387 | REG_WR(bp, MISC_REG_SPIO_INT, val); | ||
6388 | |||
6389 | /* enable interrupt to signal the IGU */ | ||
6390 | val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); | ||
6391 | val |= (1 << MISC_REGISTERS_SPIO_5); | ||
6392 | REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); | ||
6393 | break; | ||
6394 | |||
6395 | default: | ||
6396 | break; | ||
6397 | } | ||
6398 | |||
5489 | } /* end of common init */ | 6399 | } /* end of common init */ |
5490 | 6400 | ||
5491 | /* per port init */ | 6401 | /* per port init */ |
@@ -5645,9 +6555,21 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode) | |||
5645 | /* Port MCP comes here */ | 6555 | /* Port MCP comes here */ |
5646 | /* Port DMAE comes here */ | 6556 | /* Port DMAE comes here */ |
5647 | 6557 | ||
6558 | switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) { | ||
6559 | case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: | ||
6560 | /* add SPIO 5 to group 0 */ | ||
6561 | val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); | ||
6562 | val |= AEU_INPUTS_ATTN_BITS_SPIO5; | ||
6563 | REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val); | ||
6564 | break; | ||
6565 | |||
6566 | default: | ||
6567 | break; | ||
6568 | } | ||
6569 | |||
5648 | bnx2x_link_reset(bp); | 6570 | bnx2x_link_reset(bp); |
5649 | 6571 | ||
5650 | /* Reset pciex errors for debug */ | 6572 | /* Reset PCIE errors for debug */ |
5651 | REG_WR(bp, 0x2114, 0xffffffff); | 6573 | REG_WR(bp, 0x2114, 0xffffffff); |
5652 | REG_WR(bp, 0x2120, 0xffffffff); | 6574 | REG_WR(bp, 0x2120, 0xffffffff); |
5653 | REG_WR(bp, 0x2814, 0xffffffff); | 6575 | REG_WR(bp, 0x2814, 0xffffffff); |
@@ -5669,9 +6591,9 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode) | |||
5669 | port = bp->port; | 6591 | port = bp->port; |
5670 | 6592 | ||
5671 | bp->fw_drv_pulse_wr_seq = | 6593 | bp->fw_drv_pulse_wr_seq = |
5672 | (SHMEM_RD(bp, drv_fw_mb[port].drv_pulse_mb) & | 6594 | (SHMEM_RD(bp, func_mb[port].drv_pulse_mb) & |
5673 | DRV_PULSE_SEQ_MASK); | 6595 | DRV_PULSE_SEQ_MASK); |
5674 | bp->fw_mb = SHMEM_RD(bp, drv_fw_mb[port].fw_mb_param); | 6596 | bp->fw_mb = SHMEM_RD(bp, func_mb[port].fw_mb_param); |
5675 | DP(BNX2X_MSG_MCP, "drv_pulse 0x%x fw_mb 0x%x\n", | 6597 | DP(BNX2X_MSG_MCP, "drv_pulse 0x%x fw_mb 0x%x\n", |
5676 | bp->fw_drv_pulse_wr_seq, bp->fw_mb); | 6598 | bp->fw_drv_pulse_wr_seq, bp->fw_mb); |
5677 | } else { | 6599 | } else { |
@@ -5681,16 +6603,15 @@ static int bnx2x_function_init(struct bnx2x *bp, int mode) | |||
5681 | return 0; | 6603 | return 0; |
5682 | } | 6604 | } |
5683 | 6605 | ||
5684 | 6606 | /* send the MCP a request, block until there is a reply */ | |
5685 | /* send the MCP a request, block untill there is a reply */ | ||
5686 | static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command) | 6607 | static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command) |
5687 | { | 6608 | { |
5688 | u32 rc = 0; | ||
5689 | u32 seq = ++bp->fw_seq; | ||
5690 | int port = bp->port; | 6609 | int port = bp->port; |
6610 | u32 seq = ++bp->fw_seq; | ||
6611 | u32 rc = 0; | ||
5691 | 6612 | ||
5692 | SHMEM_WR(bp, drv_fw_mb[port].drv_mb_header, command|seq); | 6613 | SHMEM_WR(bp, func_mb[port].drv_mb_header, (command | seq)); |
5693 | DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", command|seq); | 6614 | DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); |
5694 | 6615 | ||
5695 | /* let the FW do it's magic ... */ | 6616 | /* let the FW do it's magic ... */ |
5696 | msleep(100); /* TBD */ | 6617 | msleep(100); /* TBD */ |
@@ -5698,19 +6619,20 @@ static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command) | |||
5698 | if (CHIP_REV_IS_SLOW(bp)) | 6619 | if (CHIP_REV_IS_SLOW(bp)) |
5699 | msleep(900); | 6620 | msleep(900); |
5700 | 6621 | ||
5701 | rc = SHMEM_RD(bp, drv_fw_mb[port].fw_mb_header); | 6622 | rc = SHMEM_RD(bp, func_mb[port].fw_mb_header); |
5702 | |||
5703 | DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq); | 6623 | DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq); |
5704 | 6624 | ||
5705 | /* is this a reply to our command? */ | 6625 | /* is this a reply to our command? */ |
5706 | if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { | 6626 | if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { |
5707 | rc &= FW_MSG_CODE_MASK; | 6627 | rc &= FW_MSG_CODE_MASK; |
6628 | |||
5708 | } else { | 6629 | } else { |
5709 | /* FW BUG! */ | 6630 | /* FW BUG! */ |
5710 | BNX2X_ERR("FW failed to respond!\n"); | 6631 | BNX2X_ERR("FW failed to respond!\n"); |
5711 | bnx2x_fw_dump(bp); | 6632 | bnx2x_fw_dump(bp); |
5712 | rc = 0; | 6633 | rc = 0; |
5713 | } | 6634 | } |
6635 | |||
5714 | return rc; | 6636 | return rc; |
5715 | } | 6637 | } |
5716 | 6638 | ||
@@ -5869,7 +6791,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp) | |||
5869 | for (i = 0; i < 16*1024; i += 64) | 6791 | for (i = 0; i < 16*1024; i += 64) |
5870 | * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64; | 6792 | * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64; |
5871 | 6793 | ||
5872 | /* now sixup the last line in the block to point to the next block */ | 6794 | /* now fixup the last line in the block to point to the next block */ |
5873 | *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping; | 6795 | *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping; |
5874 | 6796 | ||
5875 | /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */ | 6797 | /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */ |
@@ -5950,22 +6872,19 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp) | |||
5950 | int i; | 6872 | int i; |
5951 | 6873 | ||
5952 | free_irq(bp->msix_table[0].vector, bp->dev); | 6874 | free_irq(bp->msix_table[0].vector, bp->dev); |
5953 | DP(NETIF_MSG_IFDOWN, "rleased sp irq (%d)\n", | 6875 | DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", |
5954 | bp->msix_table[0].vector); | 6876 | bp->msix_table[0].vector); |
5955 | 6877 | ||
5956 | for_each_queue(bp, i) { | 6878 | for_each_queue(bp, i) { |
5957 | DP(NETIF_MSG_IFDOWN, "about to rlease fp #%d->%d irq " | 6879 | DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq " |
5958 | "state(%x)\n", i, bp->msix_table[i + 1].vector, | 6880 | "state(%x)\n", i, bp->msix_table[i + 1].vector, |
5959 | bnx2x_fp(bp, i, state)); | 6881 | bnx2x_fp(bp, i, state)); |
5960 | 6882 | ||
5961 | if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED) { | 6883 | if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED) |
5962 | 6884 | BNX2X_ERR("IRQ of fp #%d being freed while " | |
5963 | free_irq(bp->msix_table[i + 1].vector, &bp->fp[i]); | 6885 | "state != closed\n", i); |
5964 | bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_CLOSED; | ||
5965 | |||
5966 | } else | ||
5967 | DP(NETIF_MSG_IFDOWN, "irq not freed\n"); | ||
5968 | 6886 | ||
6887 | free_irq(bp->msix_table[i + 1].vector, &bp->fp[i]); | ||
5969 | } | 6888 | } |
5970 | 6889 | ||
5971 | } | 6890 | } |
@@ -5995,7 +6914,7 @@ static int bnx2x_enable_msix(struct bnx2x *bp) | |||
5995 | 6914 | ||
5996 | if (pci_enable_msix(bp->pdev, &bp->msix_table[0], | 6915 | if (pci_enable_msix(bp->pdev, &bp->msix_table[0], |
5997 | bp->num_queues + 1)){ | 6916 | bp->num_queues + 1)){ |
5998 | BNX2X_ERR("failed to enable msix\n"); | 6917 | BNX2X_LOG("failed to enable MSI-X\n"); |
5999 | return -1; | 6918 | return -1; |
6000 | 6919 | ||
6001 | } | 6920 | } |
@@ -6010,11 +6929,8 @@ static int bnx2x_enable_msix(struct bnx2x *bp) | |||
6010 | static int bnx2x_req_msix_irqs(struct bnx2x *bp) | 6929 | static int bnx2x_req_msix_irqs(struct bnx2x *bp) |
6011 | { | 6930 | { |
6012 | 6931 | ||
6013 | |||
6014 | int i, rc; | 6932 | int i, rc; |
6015 | 6933 | ||
6016 | DP(NETIF_MSG_IFUP, "about to request sp irq\n"); | ||
6017 | |||
6018 | rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0, | 6934 | rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0, |
6019 | bp->dev->name, bp->dev); | 6935 | bp->dev->name, bp->dev); |
6020 | 6936 | ||
@@ -6029,7 +6945,8 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp) | |||
6029 | bp->dev->name, &bp->fp[i]); | 6945 | bp->dev->name, &bp->fp[i]); |
6030 | 6946 | ||
6031 | if (rc) { | 6947 | if (rc) { |
6032 | BNX2X_ERR("request fp #%d irq failed\n", i); | 6948 | BNX2X_ERR("request fp #%d irq failed " |
6949 | "rc %d\n", i, rc); | ||
6033 | bnx2x_free_msix_irqs(bp); | 6950 | bnx2x_free_msix_irqs(bp); |
6034 | return -EBUSY; | 6951 | return -EBUSY; |
6035 | } | 6952 | } |
@@ -6109,8 +7026,8 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, | |||
6109 | /* can take a while if any port is running */ | 7026 | /* can take a while if any port is running */ |
6110 | int timeout = 500; | 7027 | int timeout = 500; |
6111 | 7028 | ||
6112 | /* DP("waiting for state to become %d on IDX [%d]\n", | 7029 | DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n", |
6113 | state, sb_idx); */ | 7030 | poll ? "polling" : "waiting", state, idx); |
6114 | 7031 | ||
6115 | might_sleep(); | 7032 | might_sleep(); |
6116 | 7033 | ||
@@ -6128,7 +7045,7 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, | |||
6128 | 7045 | ||
6129 | mb(); /* state is changed by bnx2x_sp_event()*/ | 7046 | mb(); /* state is changed by bnx2x_sp_event()*/ |
6130 | 7047 | ||
6131 | if (*state_p != state) | 7048 | if (*state_p == state) |
6132 | return 0; | 7049 | return 0; |
6133 | 7050 | ||
6134 | timeout--; | 7051 | timeout--; |
@@ -6136,17 +7053,17 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx, | |||
6136 | 7053 | ||
6137 | } | 7054 | } |
6138 | 7055 | ||
6139 | |||
6140 | /* timeout! */ | 7056 | /* timeout! */ |
6141 | BNX2X_ERR("timeout waiting for ramrod %d on %d\n", state, idx); | 7057 | BNX2X_ERR("timeout %s for state %x on IDX [%d]\n", |
6142 | return -EBUSY; | 7058 | poll ? "polling" : "waiting", state, idx); |
6143 | 7059 | ||
7060 | return -EBUSY; | ||
6144 | } | 7061 | } |
6145 | 7062 | ||
6146 | static int bnx2x_setup_leading(struct bnx2x *bp) | 7063 | static int bnx2x_setup_leading(struct bnx2x *bp) |
6147 | { | 7064 | { |
6148 | 7065 | ||
6149 | /* reset IGU staae */ | 7066 | /* reset IGU state */ |
6150 | bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, 0, IGU_INT_ENABLE, 0); | 7067 | bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, 0, IGU_INT_ENABLE, 0); |
6151 | 7068 | ||
6152 | /* SETUP ramrod */ | 7069 | /* SETUP ramrod */ |
@@ -6162,12 +7079,13 @@ static int bnx2x_setup_multi(struct bnx2x *bp, int index) | |||
6162 | /* reset IGU state */ | 7079 | /* reset IGU state */ |
6163 | bnx2x_ack_sb(bp, index, CSTORM_ID, 0, IGU_INT_ENABLE, 0); | 7080 | bnx2x_ack_sb(bp, index, CSTORM_ID, 0, IGU_INT_ENABLE, 0); |
6164 | 7081 | ||
7082 | /* SETUP ramrod */ | ||
6165 | bp->fp[index].state = BNX2X_FP_STATE_OPENING; | 7083 | bp->fp[index].state = BNX2X_FP_STATE_OPENING; |
6166 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0); | 7084 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0); |
6167 | 7085 | ||
6168 | /* Wait for completion */ | 7086 | /* Wait for completion */ |
6169 | return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index, | 7087 | return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index, |
6170 | &(bp->fp[index].state), 1); | 7088 | &(bp->fp[index].state), 0); |
6171 | 7089 | ||
6172 | } | 7090 | } |
6173 | 7091 | ||
@@ -6177,8 +7095,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev); | |||
6177 | 7095 | ||
6178 | static int bnx2x_nic_load(struct bnx2x *bp, int req_irq) | 7096 | static int bnx2x_nic_load(struct bnx2x *bp, int req_irq) |
6179 | { | 7097 | { |
6180 | int rc; | 7098 | u32 load_code; |
6181 | int i = 0; | 7099 | int i; |
6182 | 7100 | ||
6183 | bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; | 7101 | bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; |
6184 | 7102 | ||
@@ -6188,26 +7106,28 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq) | |||
6188 | initialized, otherwise - not. | 7106 | initialized, otherwise - not. |
6189 | */ | 7107 | */ |
6190 | if (!nomcp) { | 7108 | if (!nomcp) { |
6191 | rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); | 7109 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ); |
6192 | if (rc == FW_MSG_CODE_DRV_LOAD_REFUSED) { | 7110 | if (!load_code) { |
7111 | BNX2X_ERR("MCP response failure, unloading\n"); | ||
7112 | return -EBUSY; | ||
7113 | } | ||
7114 | if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) { | ||
7115 | BNX2X_ERR("MCP refused load request, unloading\n"); | ||
6193 | return -EBUSY; /* other port in diagnostic mode */ | 7116 | return -EBUSY; /* other port in diagnostic mode */ |
6194 | } | 7117 | } |
6195 | } else { | 7118 | } else { |
6196 | rc = FW_MSG_CODE_DRV_LOAD_COMMON; | 7119 | load_code = FW_MSG_CODE_DRV_LOAD_COMMON; |
6197 | } | 7120 | } |
6198 | 7121 | ||
6199 | DP(NETIF_MSG_IFUP, "set number of queues to %d\n", bp->num_queues); | ||
6200 | |||
6201 | /* if we can't use msix we only need one fp, | 7122 | /* if we can't use msix we only need one fp, |
6202 | * so try to enable msix with the requested number of fp's | 7123 | * so try to enable msix with the requested number of fp's |
6203 | * and fallback to inta with one fp | 7124 | * and fallback to inta with one fp |
6204 | */ | 7125 | */ |
6205 | if (req_irq) { | 7126 | if (req_irq) { |
6206 | |||
6207 | if (use_inta) { | 7127 | if (use_inta) { |
6208 | bp->num_queues = 1; | 7128 | bp->num_queues = 1; |
6209 | } else { | 7129 | } else { |
6210 | if (use_multi > 1 && use_multi <= 16) | 7130 | if ((use_multi > 1) && (use_multi <= 16)) |
6211 | /* user requested number */ | 7131 | /* user requested number */ |
6212 | bp->num_queues = use_multi; | 7132 | bp->num_queues = use_multi; |
6213 | else if (use_multi == 1) | 7133 | else if (use_multi == 1) |
@@ -6216,15 +7136,17 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq) | |||
6216 | bp->num_queues = 1; | 7136 | bp->num_queues = 1; |
6217 | 7137 | ||
6218 | if (bnx2x_enable_msix(bp)) { | 7138 | if (bnx2x_enable_msix(bp)) { |
6219 | /* faild to enable msix */ | 7139 | /* failed to enable msix */ |
6220 | bp->num_queues = 1; | 7140 | bp->num_queues = 1; |
6221 | if (use_multi) | 7141 | if (use_multi) |
6222 | BNX2X_ERR("Muti requested but failed" | 7142 | BNX2X_ERR("Multi requested but failed" |
6223 | " to enable MSI-X\n"); | 7143 | " to enable MSI-X\n"); |
6224 | } | 7144 | } |
6225 | } | 7145 | } |
6226 | } | 7146 | } |
6227 | 7147 | ||
7148 | DP(NETIF_MSG_IFUP, "set number of queues to %d\n", bp->num_queues); | ||
7149 | |||
6228 | if (bnx2x_alloc_mem(bp)) | 7150 | if (bnx2x_alloc_mem(bp)) |
6229 | return -ENOMEM; | 7151 | return -ENOMEM; |
6230 | 7152 | ||
@@ -6232,13 +7154,13 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq) | |||
6232 | if (bp->flags & USING_MSIX_FLAG) { | 7154 | if (bp->flags & USING_MSIX_FLAG) { |
6233 | if (bnx2x_req_msix_irqs(bp)) { | 7155 | if (bnx2x_req_msix_irqs(bp)) { |
6234 | pci_disable_msix(bp->pdev); | 7156 | pci_disable_msix(bp->pdev); |
6235 | goto out_error; | 7157 | goto load_error; |
6236 | } | 7158 | } |
6237 | 7159 | ||
6238 | } else { | 7160 | } else { |
6239 | if (bnx2x_req_irq(bp)) { | 7161 | if (bnx2x_req_irq(bp)) { |
6240 | BNX2X_ERR("IRQ request failed, aborting\n"); | 7162 | BNX2X_ERR("IRQ request failed, aborting\n"); |
6241 | goto out_error; | 7163 | goto load_error; |
6242 | } | 7164 | } |
6243 | } | 7165 | } |
6244 | } | 7166 | } |
@@ -6249,31 +7171,25 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq) | |||
6249 | 7171 | ||
6250 | 7172 | ||
6251 | /* Initialize HW */ | 7173 | /* Initialize HW */ |
6252 | if (bnx2x_function_init(bp, (rc == FW_MSG_CODE_DRV_LOAD_COMMON))) { | 7174 | if (bnx2x_function_init(bp, |
7175 | (load_code == FW_MSG_CODE_DRV_LOAD_COMMON))) { | ||
6253 | BNX2X_ERR("HW init failed, aborting\n"); | 7176 | BNX2X_ERR("HW init failed, aborting\n"); |
6254 | goto out_error; | 7177 | goto load_error; |
6255 | } | 7178 | } |
6256 | 7179 | ||
6257 | 7180 | ||
6258 | atomic_set(&bp->intr_sem, 0); | 7181 | atomic_set(&bp->intr_sem, 0); |
6259 | 7182 | ||
6260 | /* Reenable SP tasklet */ | ||
6261 | /*if (bp->sp_task_en) { */ | ||
6262 | /* tasklet_enable(&bp->sp_task);*/ | ||
6263 | /*} else { */ | ||
6264 | /* bp->sp_task_en = 1; */ | ||
6265 | /*} */ | ||
6266 | 7183 | ||
6267 | /* Setup NIC internals and enable interrupts */ | 7184 | /* Setup NIC internals and enable interrupts */ |
6268 | bnx2x_nic_init(bp); | 7185 | bnx2x_nic_init(bp); |
6269 | 7186 | ||
6270 | /* Send LOAD_DONE command to MCP */ | 7187 | /* Send LOAD_DONE command to MCP */ |
6271 | if (!nomcp) { | 7188 | if (!nomcp) { |
6272 | rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); | 7189 | load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE); |
6273 | DP(NETIF_MSG_IFUP, "rc = 0x%x\n", rc); | 7190 | if (!load_code) { |
6274 | if (!rc) { | ||
6275 | BNX2X_ERR("MCP response failure, unloading\n"); | 7191 | BNX2X_ERR("MCP response failure, unloading\n"); |
6276 | goto int_disable; | 7192 | goto load_int_disable; |
6277 | } | 7193 | } |
6278 | } | 7194 | } |
6279 | 7195 | ||
@@ -6285,11 +7201,11 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq) | |||
6285 | napi_enable(&bnx2x_fp(bp, i, napi)); | 7201 | napi_enable(&bnx2x_fp(bp, i, napi)); |
6286 | 7202 | ||
6287 | if (bnx2x_setup_leading(bp)) | 7203 | if (bnx2x_setup_leading(bp)) |
6288 | goto stop_netif; | 7204 | goto load_stop_netif; |
6289 | 7205 | ||
6290 | for_each_nondefault_queue(bp, i) | 7206 | for_each_nondefault_queue(bp, i) |
6291 | if (bnx2x_setup_multi(bp, i)) | 7207 | if (bnx2x_setup_multi(bp, i)) |
6292 | goto stop_netif; | 7208 | goto load_stop_netif; |
6293 | 7209 | ||
6294 | bnx2x_set_mac_addr(bp); | 7210 | bnx2x_set_mac_addr(bp); |
6295 | 7211 | ||
@@ -6313,42 +7229,24 @@ static int bnx2x_nic_load(struct bnx2x *bp, int req_irq) | |||
6313 | 7229 | ||
6314 | return 0; | 7230 | return 0; |
6315 | 7231 | ||
6316 | stop_netif: | 7232 | load_stop_netif: |
6317 | for_each_queue(bp, i) | 7233 | for_each_queue(bp, i) |
6318 | napi_disable(&bnx2x_fp(bp, i, napi)); | 7234 | napi_disable(&bnx2x_fp(bp, i, napi)); |
6319 | 7235 | ||
6320 | int_disable: | 7236 | load_int_disable: |
6321 | bnx2x_disable_int_sync(bp); | 7237 | bnx2x_int_disable_sync(bp); |
6322 | 7238 | ||
6323 | bnx2x_free_skbs(bp); | 7239 | bnx2x_free_skbs(bp); |
6324 | bnx2x_free_irq(bp); | 7240 | bnx2x_free_irq(bp); |
6325 | 7241 | ||
6326 | out_error: | 7242 | load_error: |
6327 | bnx2x_free_mem(bp); | 7243 | bnx2x_free_mem(bp); |
6328 | 7244 | ||
6329 | /* TBD we really need to reset the chip | 7245 | /* TBD we really need to reset the chip |
6330 | if we want to recover from this */ | 7246 | if we want to recover from this */ |
6331 | return rc; | 7247 | return -EBUSY; |
6332 | } | 7248 | } |
6333 | 7249 | ||
6334 | static void bnx2x_netif_stop(struct bnx2x *bp) | ||
6335 | { | ||
6336 | int i; | ||
6337 | |||
6338 | bp->rx_mode = BNX2X_RX_MODE_NONE; | ||
6339 | bnx2x_set_storm_rx_mode(bp); | ||
6340 | |||
6341 | bnx2x_disable_int_sync(bp); | ||
6342 | bnx2x_link_reset(bp); | ||
6343 | |||
6344 | for_each_queue(bp, i) | ||
6345 | napi_disable(&bnx2x_fp(bp, i, napi)); | ||
6346 | |||
6347 | if (netif_running(bp->dev)) { | ||
6348 | netif_tx_disable(bp->dev); | ||
6349 | bp->dev->trans_start = jiffies; /* prevent tx timeout */ | ||
6350 | } | ||
6351 | } | ||
6352 | 7250 | ||
6353 | static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) | 7251 | static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code) |
6354 | { | 7252 | { |
@@ -6401,20 +7299,20 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index) | |||
6401 | 7299 | ||
6402 | int rc; | 7300 | int rc; |
6403 | 7301 | ||
6404 | /* halt the connnection */ | 7302 | /* halt the connection */ |
6405 | bp->fp[index].state = BNX2X_FP_STATE_HALTING; | 7303 | bp->fp[index].state = BNX2X_FP_STATE_HALTING; |
6406 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0); | 7304 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0); |
6407 | 7305 | ||
6408 | 7306 | ||
6409 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, | 7307 | rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, |
6410 | &(bp->fp[index].state), 1); | 7308 | &(bp->fp[index].state), 1); |
6411 | if (rc) /* timout */ | 7309 | if (rc) /* timeout */ |
6412 | return rc; | 7310 | return rc; |
6413 | 7311 | ||
6414 | /* delete cfc entry */ | 7312 | /* delete cfc entry */ |
6415 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1); | 7313 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1); |
6416 | 7314 | ||
6417 | return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_DELETED, index, | 7315 | return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index, |
6418 | &(bp->fp[index].state), 1); | 7316 | &(bp->fp[index].state), 1); |
6419 | 7317 | ||
6420 | } | 7318 | } |
@@ -6422,8 +7320,8 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index) | |||
6422 | 7320 | ||
6423 | static void bnx2x_stop_leading(struct bnx2x *bp) | 7321 | static void bnx2x_stop_leading(struct bnx2x *bp) |
6424 | { | 7322 | { |
6425 | 7323 | u16 dsb_sp_prod_idx; | |
6426 | /* if the other port is hadling traffic, | 7324 | /* if the other port is handling traffic, |
6427 | this can take a lot of time */ | 7325 | this can take a lot of time */ |
6428 | int timeout = 500; | 7326 | int timeout = 500; |
6429 | 7327 | ||
@@ -6437,52 +7335,71 @@ static void bnx2x_stop_leading(struct bnx2x *bp) | |||
6437 | &(bp->fp[0].state), 1)) | 7335 | &(bp->fp[0].state), 1)) |
6438 | return; | 7336 | return; |
6439 | 7337 | ||
6440 | bp->dsb_sp_prod_idx = *bp->dsb_sp_prod; | 7338 | dsb_sp_prod_idx = *bp->dsb_sp_prod; |
6441 | 7339 | ||
6442 | /* Send CFC_DELETE ramrod */ | 7340 | /* Send PORT_DELETE ramrod */ |
6443 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1); | 7341 | bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1); |
6444 | 7342 | ||
6445 | /* | 7343 | /* Wait for completion to arrive on default status block |
6446 | Wait for completion. | ||
6447 | we are going to reset the chip anyway | 7344 | we are going to reset the chip anyway |
6448 | so there is not much to do if this times out | 7345 | so there is not much to do if this times out |
6449 | */ | 7346 | */ |
6450 | while (bp->dsb_sp_prod_idx == *bp->dsb_sp_prod && timeout) { | 7347 | while ((dsb_sp_prod_idx == *bp->dsb_sp_prod) && timeout) { |
6451 | timeout--; | 7348 | timeout--; |
6452 | msleep(1); | 7349 | msleep(1); |
6453 | } | 7350 | } |
6454 | 7351 | if (!timeout) { | |
7352 | DP(NETIF_MSG_IFDOWN, "timeout polling for completion " | ||
7353 | "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n", | ||
7354 | *bp->dsb_sp_prod, dsb_sp_prod_idx); | ||
7355 | } | ||
7356 | bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD; | ||
7357 | bp->fp[0].state = BNX2X_FP_STATE_CLOSED; | ||
6455 | } | 7358 | } |
6456 | 7359 | ||
6457 | static int bnx2x_nic_unload(struct bnx2x *bp, int fre_irq) | 7360 | |
7361 | static int bnx2x_nic_unload(struct bnx2x *bp, int free_irq) | ||
6458 | { | 7362 | { |
6459 | u32 reset_code = 0; | 7363 | u32 reset_code = 0; |
6460 | int rc; | 7364 | int i, timeout; |
6461 | int i; | ||
6462 | 7365 | ||
6463 | bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; | 7366 | bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; |
6464 | 7367 | ||
6465 | /* Calling flush_scheduled_work() may deadlock because | 7368 | del_timer_sync(&bp->timer); |
6466 | * linkwatch_event() may be on the workqueue and it will try to get | ||
6467 | * the rtnl_lock which we are holding. | ||
6468 | */ | ||
6469 | 7369 | ||
6470 | while (bp->in_reset_task) | 7370 | bp->rx_mode = BNX2X_RX_MODE_NONE; |
6471 | msleep(1); | 7371 | bnx2x_set_storm_rx_mode(bp); |
6472 | 7372 | ||
6473 | /* Delete the timer: do it before disabling interrupts, as it | 7373 | if (netif_running(bp->dev)) { |
6474 | may be stil STAT_QUERY ramrod pending after stopping the timer */ | 7374 | netif_tx_disable(bp->dev); |
6475 | del_timer_sync(&bp->timer); | 7375 | bp->dev->trans_start = jiffies; /* prevent tx timeout */ |
7376 | } | ||
7377 | |||
7378 | /* Wait until all fast path tasks complete */ | ||
7379 | for_each_queue(bp, i) { | ||
7380 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
7381 | |||
7382 | timeout = 1000; | ||
7383 | while (bnx2x_has_work(fp) && (timeout--)) | ||
7384 | msleep(1); | ||
7385 | if (!timeout) | ||
7386 | BNX2X_ERR("timeout waiting for queue[%d]\n", i); | ||
7387 | } | ||
6476 | 7388 | ||
6477 | /* Wait until stat ramrod returns and all SP tasks complete */ | 7389 | /* Wait until stat ramrod returns and all SP tasks complete */ |
6478 | while (bp->stat_pending && (bp->spq_left != MAX_SPQ_PENDING)) | 7390 | timeout = 1000; |
7391 | while ((bp->stat_pending || (bp->spq_left != MAX_SPQ_PENDING)) && | ||
7392 | (timeout--)) | ||
6479 | msleep(1); | 7393 | msleep(1); |
6480 | 7394 | ||
6481 | /* Stop fast path, disable MAC, disable interrupts, disable napi */ | 7395 | for_each_queue(bp, i) |
6482 | bnx2x_netif_stop(bp); | 7396 | napi_disable(&bnx2x_fp(bp, i, napi)); |
7397 | /* Disable interrupts after Tx and Rx are disabled on stack level */ | ||
7398 | bnx2x_int_disable_sync(bp); | ||
6483 | 7399 | ||
6484 | if (bp->flags & NO_WOL_FLAG) | 7400 | if (bp->flags & NO_WOL_FLAG) |
6485 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; | 7401 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; |
7402 | |||
6486 | else if (bp->wol) { | 7403 | else if (bp->wol) { |
6487 | u32 emac_base = bp->port ? GRCBASE_EMAC0 : GRCBASE_EMAC1; | 7404 | u32 emac_base = bp->port ? GRCBASE_EMAC0 : GRCBASE_EMAC1; |
6488 | u8 *mac_addr = bp->dev->dev_addr; | 7405 | u8 *mac_addr = bp->dev->dev_addr; |
@@ -6499,28 +7416,37 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int fre_irq) | |||
6499 | EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val); | 7416 | EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val); |
6500 | 7417 | ||
6501 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; | 7418 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; |
7419 | |||
6502 | } else | 7420 | } else |
6503 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; | 7421 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; |
6504 | 7422 | ||
7423 | /* Close multi and leading connections */ | ||
6505 | for_each_nondefault_queue(bp, i) | 7424 | for_each_nondefault_queue(bp, i) |
6506 | if (bnx2x_stop_multi(bp, i)) | 7425 | if (bnx2x_stop_multi(bp, i)) |
6507 | goto error; | 7426 | goto unload_error; |
6508 | |||
6509 | 7427 | ||
6510 | bnx2x_stop_leading(bp); | 7428 | bnx2x_stop_leading(bp); |
7429 | if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) || | ||
7430 | (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) { | ||
7431 | DP(NETIF_MSG_IFDOWN, "failed to close leading properly!" | ||
7432 | "state 0x%x fp[0].state 0x%x", | ||
7433 | bp->state, bp->fp[0].state); | ||
7434 | } | ||
7435 | |||
7436 | unload_error: | ||
7437 | bnx2x_link_reset(bp); | ||
6511 | 7438 | ||
6512 | error: | ||
6513 | if (!nomcp) | 7439 | if (!nomcp) |
6514 | rc = bnx2x_fw_command(bp, reset_code); | 7440 | reset_code = bnx2x_fw_command(bp, reset_code); |
6515 | else | 7441 | else |
6516 | rc = FW_MSG_CODE_DRV_UNLOAD_COMMON; | 7442 | reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON; |
6517 | 7443 | ||
6518 | /* Release IRQs */ | 7444 | /* Release IRQs */ |
6519 | if (fre_irq) | 7445 | if (free_irq) |
6520 | bnx2x_free_irq(bp); | 7446 | bnx2x_free_irq(bp); |
6521 | 7447 | ||
6522 | /* Reset the chip */ | 7448 | /* Reset the chip */ |
6523 | bnx2x_reset_chip(bp, rc); | 7449 | bnx2x_reset_chip(bp, reset_code); |
6524 | 7450 | ||
6525 | /* Report UNLOAD_DONE to MCP */ | 7451 | /* Report UNLOAD_DONE to MCP */ |
6526 | if (!nomcp) | 7452 | if (!nomcp) |
@@ -6531,8 +7457,7 @@ error: | |||
6531 | bnx2x_free_mem(bp); | 7457 | bnx2x_free_mem(bp); |
6532 | 7458 | ||
6533 | bp->state = BNX2X_STATE_CLOSED; | 7459 | bp->state = BNX2X_STATE_CLOSED; |
6534 | /* Set link down */ | 7460 | |
6535 | bp->link_up = 0; | ||
6536 | netif_carrier_off(bp->dev); | 7461 | netif_carrier_off(bp->dev); |
6537 | 7462 | ||
6538 | return 0; | 7463 | return 0; |
@@ -6568,7 +7493,7 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) | |||
6568 | SUPPORTED_100baseT_Half | | 7493 | SUPPORTED_100baseT_Half | |
6569 | SUPPORTED_100baseT_Full | | 7494 | SUPPORTED_100baseT_Full | |
6570 | SUPPORTED_1000baseT_Full | | 7495 | SUPPORTED_1000baseT_Full | |
6571 | SUPPORTED_2500baseT_Full | | 7496 | SUPPORTED_2500baseX_Full | |
6572 | SUPPORTED_TP | SUPPORTED_FIBRE | | 7497 | SUPPORTED_TP | SUPPORTED_FIBRE | |
6573 | SUPPORTED_Autoneg | | 7498 | SUPPORTED_Autoneg | |
6574 | SUPPORTED_Pause | | 7499 | SUPPORTED_Pause | |
@@ -6581,10 +7506,10 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) | |||
6581 | 7506 | ||
6582 | bp->phy_flags |= PHY_SGMII_FLAG; | 7507 | bp->phy_flags |= PHY_SGMII_FLAG; |
6583 | 7508 | ||
6584 | bp->supported |= (/* SUPPORTED_10baseT_Half | | 7509 | bp->supported |= (SUPPORTED_10baseT_Half | |
6585 | SUPPORTED_10baseT_Full | | 7510 | SUPPORTED_10baseT_Full | |
6586 | SUPPORTED_100baseT_Half | | 7511 | SUPPORTED_100baseT_Half | |
6587 | SUPPORTED_100baseT_Full |*/ | 7512 | SUPPORTED_100baseT_Full | |
6588 | SUPPORTED_1000baseT_Full | | 7513 | SUPPORTED_1000baseT_Full | |
6589 | SUPPORTED_TP | SUPPORTED_FIBRE | | 7514 | SUPPORTED_TP | SUPPORTED_FIBRE | |
6590 | SUPPORTED_Autoneg | | 7515 | SUPPORTED_Autoneg | |
@@ -6620,7 +7545,7 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) | |||
6620 | SUPPORTED_100baseT_Half | | 7545 | SUPPORTED_100baseT_Half | |
6621 | SUPPORTED_100baseT_Full | | 7546 | SUPPORTED_100baseT_Full | |
6622 | SUPPORTED_1000baseT_Full | | 7547 | SUPPORTED_1000baseT_Full | |
6623 | SUPPORTED_2500baseT_Full | | 7548 | SUPPORTED_2500baseX_Full | |
6624 | SUPPORTED_10000baseT_Full | | 7549 | SUPPORTED_10000baseT_Full | |
6625 | SUPPORTED_TP | SUPPORTED_FIBRE | | 7550 | SUPPORTED_TP | SUPPORTED_FIBRE | |
6626 | SUPPORTED_Autoneg | | 7551 | SUPPORTED_Autoneg | |
@@ -6629,12 +7554,46 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) | |||
6629 | break; | 7554 | break; |
6630 | 7555 | ||
6631 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: | 7556 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: |
7557 | BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n", | ||
7558 | ext_phy_type); | ||
7559 | |||
7560 | bp->supported |= (SUPPORTED_10000baseT_Full | | ||
7561 | SUPPORTED_FIBRE | | ||
7562 | SUPPORTED_Pause | | ||
7563 | SUPPORTED_Asym_Pause); | ||
7564 | break; | ||
7565 | |||
6632 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: | 7566 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: |
6633 | BNX2X_DEV_INFO("ext_phy_type 0x%x (8705/6)\n", | 7567 | BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n", |
7568 | ext_phy_type); | ||
7569 | |||
7570 | bp->supported |= (SUPPORTED_10000baseT_Full | | ||
7571 | SUPPORTED_1000baseT_Full | | ||
7572 | SUPPORTED_Autoneg | | ||
7573 | SUPPORTED_FIBRE | | ||
7574 | SUPPORTED_Pause | | ||
7575 | SUPPORTED_Asym_Pause); | ||
7576 | break; | ||
7577 | |||
7578 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: | ||
7579 | BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n", | ||
6634 | ext_phy_type); | 7580 | ext_phy_type); |
6635 | 7581 | ||
6636 | bp->supported |= (SUPPORTED_10000baseT_Full | | 7582 | bp->supported |= (SUPPORTED_10000baseT_Full | |
7583 | SUPPORTED_1000baseT_Full | | ||
6637 | SUPPORTED_FIBRE | | 7584 | SUPPORTED_FIBRE | |
7585 | SUPPORTED_Autoneg | | ||
7586 | SUPPORTED_Pause | | ||
7587 | SUPPORTED_Asym_Pause); | ||
7588 | break; | ||
7589 | |||
7590 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: | ||
7591 | BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n", | ||
7592 | ext_phy_type); | ||
7593 | |||
7594 | bp->supported |= (SUPPORTED_10000baseT_Full | | ||
7595 | SUPPORTED_TP | | ||
7596 | SUPPORTED_Autoneg | | ||
6638 | SUPPORTED_Pause | | 7597 | SUPPORTED_Pause | |
6639 | SUPPORTED_Asym_Pause); | 7598 | SUPPORTED_Asym_Pause); |
6640 | break; | 7599 | break; |
@@ -6691,7 +7650,7 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) | |||
6691 | SUPPORTED_1000baseT_Full); | 7650 | SUPPORTED_1000baseT_Full); |
6692 | 7651 | ||
6693 | if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) | 7652 | if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) |
6694 | bp->supported &= ~SUPPORTED_2500baseT_Full; | 7653 | bp->supported &= ~SUPPORTED_2500baseX_Full; |
6695 | 7654 | ||
6696 | if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) | 7655 | if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) |
6697 | bp->supported &= ~SUPPORTED_10000baseT_Full; | 7656 | bp->supported &= ~SUPPORTED_10000baseT_Full; |
@@ -6711,13 +7670,8 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp) | |||
6711 | bp->req_line_speed = 0; | 7670 | bp->req_line_speed = 0; |
6712 | bp->advertising = bp->supported; | 7671 | bp->advertising = bp->supported; |
6713 | } else { | 7672 | } else { |
6714 | u32 ext_phy_type; | 7673 | if (XGXS_EXT_PHY_TYPE(bp) == |
6715 | 7674 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) { | |
6716 | ext_phy_type = XGXS_EXT_PHY_TYPE(bp); | ||
6717 | if ((ext_phy_type == | ||
6718 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) || | ||
6719 | (ext_phy_type == | ||
6720 | PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) { | ||
6721 | /* force 10G, no AN */ | 7675 | /* force 10G, no AN */ |
6722 | bp->req_line_speed = SPEED_10000; | 7676 | bp->req_line_speed = SPEED_10000; |
6723 | bp->advertising = | 7677 | bp->advertising = |
@@ -6734,8 +7688,7 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp) | |||
6734 | break; | 7688 | break; |
6735 | 7689 | ||
6736 | case PORT_FEATURE_LINK_SPEED_10M_FULL: | 7690 | case PORT_FEATURE_LINK_SPEED_10M_FULL: |
6737 | if (bp->speed_cap_mask & | 7691 | if (bp->supported & SUPPORTED_10baseT_Full) { |
6738 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) { | ||
6739 | bp->req_line_speed = SPEED_10; | 7692 | bp->req_line_speed = SPEED_10; |
6740 | bp->advertising = (ADVERTISED_10baseT_Full | | 7693 | bp->advertising = (ADVERTISED_10baseT_Full | |
6741 | ADVERTISED_TP); | 7694 | ADVERTISED_TP); |
@@ -6749,8 +7702,7 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp) | |||
6749 | break; | 7702 | break; |
6750 | 7703 | ||
6751 | case PORT_FEATURE_LINK_SPEED_10M_HALF: | 7704 | case PORT_FEATURE_LINK_SPEED_10M_HALF: |
6752 | if (bp->speed_cap_mask & | 7705 | if (bp->supported & SUPPORTED_10baseT_Half) { |
6753 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) { | ||
6754 | bp->req_line_speed = SPEED_10; | 7706 | bp->req_line_speed = SPEED_10; |
6755 | bp->req_duplex = DUPLEX_HALF; | 7707 | bp->req_duplex = DUPLEX_HALF; |
6756 | bp->advertising = (ADVERTISED_10baseT_Half | | 7708 | bp->advertising = (ADVERTISED_10baseT_Half | |
@@ -6765,8 +7717,7 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp) | |||
6765 | break; | 7717 | break; |
6766 | 7718 | ||
6767 | case PORT_FEATURE_LINK_SPEED_100M_FULL: | 7719 | case PORT_FEATURE_LINK_SPEED_100M_FULL: |
6768 | if (bp->speed_cap_mask & | 7720 | if (bp->supported & SUPPORTED_100baseT_Full) { |
6769 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) { | ||
6770 | bp->req_line_speed = SPEED_100; | 7721 | bp->req_line_speed = SPEED_100; |
6771 | bp->advertising = (ADVERTISED_100baseT_Full | | 7722 | bp->advertising = (ADVERTISED_100baseT_Full | |
6772 | ADVERTISED_TP); | 7723 | ADVERTISED_TP); |
@@ -6780,8 +7731,7 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp) | |||
6780 | break; | 7731 | break; |
6781 | 7732 | ||
6782 | case PORT_FEATURE_LINK_SPEED_100M_HALF: | 7733 | case PORT_FEATURE_LINK_SPEED_100M_HALF: |
6783 | if (bp->speed_cap_mask & | 7734 | if (bp->supported & SUPPORTED_100baseT_Half) { |
6784 | PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) { | ||
6785 | bp->req_line_speed = SPEED_100; | 7735 | bp->req_line_speed = SPEED_100; |
6786 | bp->req_duplex = DUPLEX_HALF; | 7736 | bp->req_duplex = DUPLEX_HALF; |
6787 | bp->advertising = (ADVERTISED_100baseT_Half | | 7737 | bp->advertising = (ADVERTISED_100baseT_Half | |
@@ -6796,8 +7746,7 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp) | |||
6796 | break; | 7746 | break; |
6797 | 7747 | ||
6798 | case PORT_FEATURE_LINK_SPEED_1G: | 7748 | case PORT_FEATURE_LINK_SPEED_1G: |
6799 | if (bp->speed_cap_mask & | 7749 | if (bp->supported & SUPPORTED_1000baseT_Full) { |
6800 | PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) { | ||
6801 | bp->req_line_speed = SPEED_1000; | 7750 | bp->req_line_speed = SPEED_1000; |
6802 | bp->advertising = (ADVERTISED_1000baseT_Full | | 7751 | bp->advertising = (ADVERTISED_1000baseT_Full | |
6803 | ADVERTISED_TP); | 7752 | ADVERTISED_TP); |
@@ -6811,10 +7760,9 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp) | |||
6811 | break; | 7760 | break; |
6812 | 7761 | ||
6813 | case PORT_FEATURE_LINK_SPEED_2_5G: | 7762 | case PORT_FEATURE_LINK_SPEED_2_5G: |
6814 | if (bp->speed_cap_mask & | 7763 | if (bp->supported & SUPPORTED_2500baseX_Full) { |
6815 | PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) { | ||
6816 | bp->req_line_speed = SPEED_2500; | 7764 | bp->req_line_speed = SPEED_2500; |
6817 | bp->advertising = (ADVERTISED_2500baseT_Full | | 7765 | bp->advertising = (ADVERTISED_2500baseX_Full | |
6818 | ADVERTISED_TP); | 7766 | ADVERTISED_TP); |
6819 | } else { | 7767 | } else { |
6820 | BNX2X_ERR("NVRAM config error. " | 7768 | BNX2X_ERR("NVRAM config error. " |
@@ -6828,15 +7776,7 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp) | |||
6828 | case PORT_FEATURE_LINK_SPEED_10G_CX4: | 7776 | case PORT_FEATURE_LINK_SPEED_10G_CX4: |
6829 | case PORT_FEATURE_LINK_SPEED_10G_KX4: | 7777 | case PORT_FEATURE_LINK_SPEED_10G_KX4: |
6830 | case PORT_FEATURE_LINK_SPEED_10G_KR: | 7778 | case PORT_FEATURE_LINK_SPEED_10G_KR: |
6831 | if (!(bp->phy_flags & PHY_XGXS_FLAG)) { | 7779 | if (bp->supported & SUPPORTED_10000baseT_Full) { |
6832 | BNX2X_ERR("NVRAM config error. " | ||
6833 | "Invalid link_config 0x%x" | ||
6834 | " phy_flags 0x%x\n", | ||
6835 | bp->link_config, bp->phy_flags); | ||
6836 | return; | ||
6837 | } | ||
6838 | if (bp->speed_cap_mask & | ||
6839 | PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { | ||
6840 | bp->req_line_speed = SPEED_10000; | 7780 | bp->req_line_speed = SPEED_10000; |
6841 | bp->advertising = (ADVERTISED_10000baseT_Full | | 7781 | bp->advertising = (ADVERTISED_10000baseT_Full | |
6842 | ADVERTISED_FIBRE); | 7782 | ADVERTISED_FIBRE); |
@@ -6863,43 +7803,13 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp) | |||
6863 | 7803 | ||
6864 | bp->req_flow_ctrl = (bp->link_config & | 7804 | bp->req_flow_ctrl = (bp->link_config & |
6865 | PORT_FEATURE_FLOW_CONTROL_MASK); | 7805 | PORT_FEATURE_FLOW_CONTROL_MASK); |
6866 | /* Please refer to Table 28B-3 of the 802.3ab-1999 spec */ | 7806 | if ((bp->req_flow_ctrl == FLOW_CTRL_AUTO) && |
6867 | switch (bp->req_flow_ctrl) { | 7807 | (bp->supported & SUPPORTED_Autoneg)) |
6868 | case FLOW_CTRL_AUTO: | ||
6869 | bp->req_autoneg |= AUTONEG_FLOW_CTRL; | 7808 | bp->req_autoneg |= AUTONEG_FLOW_CTRL; |
6870 | if (bp->dev->mtu <= 4500) { | ||
6871 | bp->pause_mode = PAUSE_BOTH; | ||
6872 | bp->advertising |= (ADVERTISED_Pause | | ||
6873 | ADVERTISED_Asym_Pause); | ||
6874 | } else { | ||
6875 | bp->pause_mode = PAUSE_ASYMMETRIC; | ||
6876 | bp->advertising |= ADVERTISED_Asym_Pause; | ||
6877 | } | ||
6878 | break; | ||
6879 | |||
6880 | case FLOW_CTRL_TX: | ||
6881 | bp->pause_mode = PAUSE_ASYMMETRIC; | ||
6882 | bp->advertising |= ADVERTISED_Asym_Pause; | ||
6883 | break; | ||
6884 | |||
6885 | case FLOW_CTRL_RX: | ||
6886 | case FLOW_CTRL_BOTH: | ||
6887 | bp->pause_mode = PAUSE_BOTH; | ||
6888 | bp->advertising |= (ADVERTISED_Pause | | ||
6889 | ADVERTISED_Asym_Pause); | ||
6890 | break; | ||
6891 | 7809 | ||
6892 | case FLOW_CTRL_NONE: | 7810 | BNX2X_DEV_INFO("req_autoneg 0x%x req_flow_ctrl 0x%x" |
6893 | default: | 7811 | " advertising 0x%x\n", |
6894 | bp->pause_mode = PAUSE_NONE; | 7812 | bp->req_autoneg, bp->req_flow_ctrl, bp->advertising); |
6895 | bp->advertising &= ~(ADVERTISED_Pause | | ||
6896 | ADVERTISED_Asym_Pause); | ||
6897 | break; | ||
6898 | } | ||
6899 | BNX2X_DEV_INFO("req_autoneg 0x%x req_flow_ctrl 0x%x\n" | ||
6900 | KERN_INFO " pause_mode %d advertising 0x%x\n", | ||
6901 | bp->req_autoneg, bp->req_flow_ctrl, | ||
6902 | bp->pause_mode, bp->advertising); | ||
6903 | } | 7813 | } |
6904 | 7814 | ||
6905 | static void bnx2x_get_hwinfo(struct bnx2x *bp) | 7815 | static void bnx2x_get_hwinfo(struct bnx2x *bp) |
@@ -6933,15 +7843,15 @@ static void bnx2x_get_hwinfo(struct bnx2x *bp) | |||
6933 | val = SHMEM_RD(bp, validity_map[port]); | 7843 | val = SHMEM_RD(bp, validity_map[port]); |
6934 | if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) | 7844 | if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) |
6935 | != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) | 7845 | != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) |
6936 | BNX2X_ERR("MCP validity signature bad\n"); | 7846 | BNX2X_ERR("BAD MCP validity signature\n"); |
6937 | 7847 | ||
6938 | bp->fw_seq = (SHMEM_RD(bp, drv_fw_mb[port].drv_mb_header) & | 7848 | bp->fw_seq = (SHMEM_RD(bp, func_mb[port].drv_mb_header) & |
6939 | DRV_MSG_SEQ_NUMBER_MASK); | 7849 | DRV_MSG_SEQ_NUMBER_MASK); |
6940 | 7850 | ||
6941 | bp->hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); | 7851 | bp->hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); |
6942 | 7852 | bp->board = SHMEM_RD(bp, dev_info.shared_hw_config.board); | |
6943 | bp->serdes_config = | 7853 | bp->serdes_config = |
6944 | SHMEM_RD(bp, dev_info.port_hw_config[bp->port].serdes_config); | 7854 | SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config); |
6945 | bp->lane_config = | 7855 | bp->lane_config = |
6946 | SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config); | 7856 | SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config); |
6947 | bp->ext_phy_config = | 7857 | bp->ext_phy_config = |
@@ -6954,13 +7864,13 @@ static void bnx2x_get_hwinfo(struct bnx2x *bp) | |||
6954 | bp->link_config = | 7864 | bp->link_config = |
6955 | SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); | 7865 | SHMEM_RD(bp, dev_info.port_feature_config[port].link_config); |
6956 | 7866 | ||
6957 | BNX2X_DEV_INFO("hw_config (%08x) serdes_config (%08x)\n" | 7867 | BNX2X_DEV_INFO("hw_config (%08x) board (%08x) serdes_config (%08x)\n" |
6958 | KERN_INFO " lane_config (%08x) ext_phy_config (%08x)\n" | 7868 | KERN_INFO " lane_config (%08x) ext_phy_config (%08x)\n" |
6959 | KERN_INFO " speed_cap_mask (%08x) link_config (%08x)" | 7869 | KERN_INFO " speed_cap_mask (%08x) link_config (%08x)" |
6960 | " fw_seq (%08x)\n", | 7870 | " fw_seq (%08x)\n", |
6961 | bp->hw_config, bp->serdes_config, bp->lane_config, | 7871 | bp->hw_config, bp->board, bp->serdes_config, |
6962 | bp->ext_phy_config, bp->speed_cap_mask, | 7872 | bp->lane_config, bp->ext_phy_config, |
6963 | bp->link_config, bp->fw_seq); | 7873 | bp->speed_cap_mask, bp->link_config, bp->fw_seq); |
6964 | 7874 | ||
6965 | switch_cfg = (bp->link_config & PORT_FEATURE_CONNECTED_SWITCH_MASK); | 7875 | switch_cfg = (bp->link_config & PORT_FEATURE_CONNECTED_SWITCH_MASK); |
6966 | bnx2x_link_settings_supported(bp, switch_cfg); | 7876 | bnx2x_link_settings_supported(bp, switch_cfg); |
@@ -7014,14 +7924,8 @@ static void bnx2x_get_hwinfo(struct bnx2x *bp) | |||
7014 | return; | 7924 | return; |
7015 | 7925 | ||
7016 | set_mac: /* only supposed to happen on emulation/FPGA */ | 7926 | set_mac: /* only supposed to happen on emulation/FPGA */ |
7017 | BNX2X_ERR("warning constant MAC workaround active\n"); | 7927 | BNX2X_ERR("warning rendom MAC workaround active\n"); |
7018 | bp->dev->dev_addr[0] = 0; | 7928 | random_ether_addr(bp->dev->dev_addr); |
7019 | bp->dev->dev_addr[1] = 0x50; | ||
7020 | bp->dev->dev_addr[2] = 0xc2; | ||
7021 | bp->dev->dev_addr[3] = 0x2c; | ||
7022 | bp->dev->dev_addr[4] = 0x71; | ||
7023 | bp->dev->dev_addr[5] = port ? 0x0d : 0x0e; | ||
7024 | |||
7025 | memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6); | 7929 | memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6); |
7026 | 7930 | ||
7027 | } | 7931 | } |
@@ -7048,19 +7952,34 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
7048 | } | 7952 | } |
7049 | 7953 | ||
7050 | if (bp->phy_flags & PHY_XGXS_FLAG) { | 7954 | if (bp->phy_flags & PHY_XGXS_FLAG) { |
7051 | cmd->port = PORT_FIBRE; | 7955 | u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp); |
7052 | } else { | 7956 | |
7957 | switch (ext_phy_type) { | ||
7958 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: | ||
7959 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705: | ||
7960 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706: | ||
7961 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072: | ||
7962 | cmd->port = PORT_FIBRE; | ||
7963 | break; | ||
7964 | |||
7965 | case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: | ||
7966 | cmd->port = PORT_TP; | ||
7967 | break; | ||
7968 | |||
7969 | default: | ||
7970 | DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n", | ||
7971 | bp->ext_phy_config); | ||
7972 | } | ||
7973 | } else | ||
7053 | cmd->port = PORT_TP; | 7974 | cmd->port = PORT_TP; |
7054 | } | ||
7055 | 7975 | ||
7056 | cmd->phy_address = bp->phy_addr; | 7976 | cmd->phy_address = bp->phy_addr; |
7057 | cmd->transceiver = XCVR_INTERNAL; | 7977 | cmd->transceiver = XCVR_INTERNAL; |
7058 | 7978 | ||
7059 | if (bp->req_autoneg & AUTONEG_SPEED) { | 7979 | if (bp->req_autoneg & AUTONEG_SPEED) |
7060 | cmd->autoneg = AUTONEG_ENABLE; | 7980 | cmd->autoneg = AUTONEG_ENABLE; |
7061 | } else { | 7981 | else |
7062 | cmd->autoneg = AUTONEG_DISABLE; | 7982 | cmd->autoneg = AUTONEG_DISABLE; |
7063 | } | ||
7064 | 7983 | ||
7065 | cmd->maxtxpkt = 0; | 7984 | cmd->maxtxpkt = 0; |
7066 | cmd->maxrxpkt = 0; | 7985 | cmd->maxrxpkt = 0; |
@@ -7091,8 +8010,10 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
7091 | 8010 | ||
7092 | switch (cmd->port) { | 8011 | switch (cmd->port) { |
7093 | case PORT_TP: | 8012 | case PORT_TP: |
7094 | if (!(bp->supported & SUPPORTED_TP)) | 8013 | if (!(bp->supported & SUPPORTED_TP)) { |
8014 | DP(NETIF_MSG_LINK, "TP not supported\n"); | ||
7095 | return -EINVAL; | 8015 | return -EINVAL; |
8016 | } | ||
7096 | 8017 | ||
7097 | if (bp->phy_flags & PHY_XGXS_FLAG) { | 8018 | if (bp->phy_flags & PHY_XGXS_FLAG) { |
7098 | bnx2x_link_reset(bp); | 8019 | bnx2x_link_reset(bp); |
@@ -7102,8 +8023,10 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
7102 | break; | 8023 | break; |
7103 | 8024 | ||
7104 | case PORT_FIBRE: | 8025 | case PORT_FIBRE: |
7105 | if (!(bp->supported & SUPPORTED_FIBRE)) | 8026 | if (!(bp->supported & SUPPORTED_FIBRE)) { |
8027 | DP(NETIF_MSG_LINK, "FIBRE not supported\n"); | ||
7106 | return -EINVAL; | 8028 | return -EINVAL; |
8029 | } | ||
7107 | 8030 | ||
7108 | if (!(bp->phy_flags & PHY_XGXS_FLAG)) { | 8031 | if (!(bp->phy_flags & PHY_XGXS_FLAG)) { |
7109 | bnx2x_link_reset(bp); | 8032 | bnx2x_link_reset(bp); |
@@ -7113,12 +8036,15 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
7113 | break; | 8036 | break; |
7114 | 8037 | ||
7115 | default: | 8038 | default: |
8039 | DP(NETIF_MSG_LINK, "Unknown port type\n"); | ||
7116 | return -EINVAL; | 8040 | return -EINVAL; |
7117 | } | 8041 | } |
7118 | 8042 | ||
7119 | if (cmd->autoneg == AUTONEG_ENABLE) { | 8043 | if (cmd->autoneg == AUTONEG_ENABLE) { |
7120 | if (!(bp->supported & SUPPORTED_Autoneg)) | 8044 | if (!(bp->supported & SUPPORTED_Autoneg)) { |
8045 | DP(NETIF_MSG_LINK, "Aotoneg not supported\n"); | ||
7121 | return -EINVAL; | 8046 | return -EINVAL; |
8047 | } | ||
7122 | 8048 | ||
7123 | /* advertise the requested speed and duplex if supported */ | 8049 | /* advertise the requested speed and duplex if supported */ |
7124 | cmd->advertising &= bp->supported; | 8050 | cmd->advertising &= bp->supported; |
@@ -7133,14 +8059,22 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
7133 | switch (cmd->speed) { | 8059 | switch (cmd->speed) { |
7134 | case SPEED_10: | 8060 | case SPEED_10: |
7135 | if (cmd->duplex == DUPLEX_FULL) { | 8061 | if (cmd->duplex == DUPLEX_FULL) { |
7136 | if (!(bp->supported & SUPPORTED_10baseT_Full)) | 8062 | if (!(bp->supported & |
8063 | SUPPORTED_10baseT_Full)) { | ||
8064 | DP(NETIF_MSG_LINK, | ||
8065 | "10M full not supported\n"); | ||
7137 | return -EINVAL; | 8066 | return -EINVAL; |
8067 | } | ||
7138 | 8068 | ||
7139 | advertising = (ADVERTISED_10baseT_Full | | 8069 | advertising = (ADVERTISED_10baseT_Full | |
7140 | ADVERTISED_TP); | 8070 | ADVERTISED_TP); |
7141 | } else { | 8071 | } else { |
7142 | if (!(bp->supported & SUPPORTED_10baseT_Half)) | 8072 | if (!(bp->supported & |
8073 | SUPPORTED_10baseT_Half)) { | ||
8074 | DP(NETIF_MSG_LINK, | ||
8075 | "10M half not supported\n"); | ||
7143 | return -EINVAL; | 8076 | return -EINVAL; |
8077 | } | ||
7144 | 8078 | ||
7145 | advertising = (ADVERTISED_10baseT_Half | | 8079 | advertising = (ADVERTISED_10baseT_Half | |
7146 | ADVERTISED_TP); | 8080 | ADVERTISED_TP); |
@@ -7150,15 +8084,21 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
7150 | case SPEED_100: | 8084 | case SPEED_100: |
7151 | if (cmd->duplex == DUPLEX_FULL) { | 8085 | if (cmd->duplex == DUPLEX_FULL) { |
7152 | if (!(bp->supported & | 8086 | if (!(bp->supported & |
7153 | SUPPORTED_100baseT_Full)) | 8087 | SUPPORTED_100baseT_Full)) { |
8088 | DP(NETIF_MSG_LINK, | ||
8089 | "100M full not supported\n"); | ||
7154 | return -EINVAL; | 8090 | return -EINVAL; |
8091 | } | ||
7155 | 8092 | ||
7156 | advertising = (ADVERTISED_100baseT_Full | | 8093 | advertising = (ADVERTISED_100baseT_Full | |
7157 | ADVERTISED_TP); | 8094 | ADVERTISED_TP); |
7158 | } else { | 8095 | } else { |
7159 | if (!(bp->supported & | 8096 | if (!(bp->supported & |
7160 | SUPPORTED_100baseT_Half)) | 8097 | SUPPORTED_100baseT_Half)) { |
8098 | DP(NETIF_MSG_LINK, | ||
8099 | "100M half not supported\n"); | ||
7161 | return -EINVAL; | 8100 | return -EINVAL; |
8101 | } | ||
7162 | 8102 | ||
7163 | advertising = (ADVERTISED_100baseT_Half | | 8103 | advertising = (ADVERTISED_100baseT_Half | |
7164 | ADVERTISED_TP); | 8104 | ADVERTISED_TP); |
@@ -7166,39 +8106,54 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
7166 | break; | 8106 | break; |
7167 | 8107 | ||
7168 | case SPEED_1000: | 8108 | case SPEED_1000: |
7169 | if (cmd->duplex != DUPLEX_FULL) | 8109 | if (cmd->duplex != DUPLEX_FULL) { |
8110 | DP(NETIF_MSG_LINK, "1G half not supported\n"); | ||
7170 | return -EINVAL; | 8111 | return -EINVAL; |
8112 | } | ||
7171 | 8113 | ||
7172 | if (!(bp->supported & SUPPORTED_1000baseT_Full)) | 8114 | if (!(bp->supported & SUPPORTED_1000baseT_Full)) { |
8115 | DP(NETIF_MSG_LINK, "1G full not supported\n"); | ||
7173 | return -EINVAL; | 8116 | return -EINVAL; |
8117 | } | ||
7174 | 8118 | ||
7175 | advertising = (ADVERTISED_1000baseT_Full | | 8119 | advertising = (ADVERTISED_1000baseT_Full | |
7176 | ADVERTISED_TP); | 8120 | ADVERTISED_TP); |
7177 | break; | 8121 | break; |
7178 | 8122 | ||
7179 | case SPEED_2500: | 8123 | case SPEED_2500: |
7180 | if (cmd->duplex != DUPLEX_FULL) | 8124 | if (cmd->duplex != DUPLEX_FULL) { |
8125 | DP(NETIF_MSG_LINK, | ||
8126 | "2.5G half not supported\n"); | ||
7181 | return -EINVAL; | 8127 | return -EINVAL; |
8128 | } | ||
7182 | 8129 | ||
7183 | if (!(bp->supported & SUPPORTED_2500baseT_Full)) | 8130 | if (!(bp->supported & SUPPORTED_2500baseX_Full)) { |
8131 | DP(NETIF_MSG_LINK, | ||
8132 | "2.5G full not supported\n"); | ||
7184 | return -EINVAL; | 8133 | return -EINVAL; |
8134 | } | ||
7185 | 8135 | ||
7186 | advertising = (ADVERTISED_2500baseT_Full | | 8136 | advertising = (ADVERTISED_2500baseX_Full | |
7187 | ADVERTISED_TP); | 8137 | ADVERTISED_TP); |
7188 | break; | 8138 | break; |
7189 | 8139 | ||
7190 | case SPEED_10000: | 8140 | case SPEED_10000: |
7191 | if (cmd->duplex != DUPLEX_FULL) | 8141 | if (cmd->duplex != DUPLEX_FULL) { |
8142 | DP(NETIF_MSG_LINK, "10G half not supported\n"); | ||
7192 | return -EINVAL; | 8143 | return -EINVAL; |
8144 | } | ||
7193 | 8145 | ||
7194 | if (!(bp->supported & SUPPORTED_10000baseT_Full)) | 8146 | if (!(bp->supported & SUPPORTED_10000baseT_Full)) { |
8147 | DP(NETIF_MSG_LINK, "10G full not supported\n"); | ||
7195 | return -EINVAL; | 8148 | return -EINVAL; |
8149 | } | ||
7196 | 8150 | ||
7197 | advertising = (ADVERTISED_10000baseT_Full | | 8151 | advertising = (ADVERTISED_10000baseT_Full | |
7198 | ADVERTISED_FIBRE); | 8152 | ADVERTISED_FIBRE); |
7199 | break; | 8153 | break; |
7200 | 8154 | ||
7201 | default: | 8155 | default: |
8156 | DP(NETIF_MSG_LINK, "Unsupported speed\n"); | ||
7202 | return -EINVAL; | 8157 | return -EINVAL; |
7203 | } | 8158 | } |
7204 | 8159 | ||
@@ -7398,8 +8353,7 @@ static void bnx2x_disable_nvram_access(struct bnx2x *bp) | |||
7398 | static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val, | 8353 | static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val, |
7399 | u32 cmd_flags) | 8354 | u32 cmd_flags) |
7400 | { | 8355 | { |
7401 | int rc; | 8356 | int count, i, rc; |
7402 | int count, i; | ||
7403 | u32 val; | 8357 | u32 val; |
7404 | 8358 | ||
7405 | /* build the command word */ | 8359 | /* build the command word */ |
@@ -7452,13 +8406,13 @@ static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf, | |||
7452 | 8406 | ||
7453 | if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { | 8407 | if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { |
7454 | DP(NETIF_MSG_NVM, | 8408 | DP(NETIF_MSG_NVM, |
7455 | "Invalid paramter: offset 0x%x buf_size 0x%x\n", | 8409 | "Invalid parameter: offset 0x%x buf_size 0x%x\n", |
7456 | offset, buf_size); | 8410 | offset, buf_size); |
7457 | return -EINVAL; | 8411 | return -EINVAL; |
7458 | } | 8412 | } |
7459 | 8413 | ||
7460 | if (offset + buf_size > bp->flash_size) { | 8414 | if (offset + buf_size > bp->flash_size) { |
7461 | DP(NETIF_MSG_NVM, "Invalid paramter: offset (0x%x) +" | 8415 | DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +" |
7462 | " buf_size (0x%x) > flash_size (0x%x)\n", | 8416 | " buf_size (0x%x) > flash_size (0x%x)\n", |
7463 | offset, buf_size, bp->flash_size); | 8417 | offset, buf_size, bp->flash_size); |
7464 | return -EINVAL; | 8418 | return -EINVAL; |
@@ -7519,8 +8473,7 @@ static int bnx2x_get_eeprom(struct net_device *dev, | |||
7519 | static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val, | 8473 | static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val, |
7520 | u32 cmd_flags) | 8474 | u32 cmd_flags) |
7521 | { | 8475 | { |
7522 | int rc; | 8476 | int count, i, rc; |
7523 | int count, i; | ||
7524 | 8477 | ||
7525 | /* build the command word */ | 8478 | /* build the command word */ |
7526 | cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR; | 8479 | cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR; |
@@ -7557,7 +8510,7 @@ static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val, | |||
7557 | return rc; | 8510 | return rc; |
7558 | } | 8511 | } |
7559 | 8512 | ||
7560 | #define BYTE_OFFSET(offset) (8 * (offset & 0x03)) | 8513 | #define BYTE_OFFSET(offset) (8 * (offset & 0x03)) |
7561 | 8514 | ||
7562 | static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf, | 8515 | static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf, |
7563 | int buf_size) | 8516 | int buf_size) |
@@ -7568,7 +8521,7 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf, | |||
7568 | u32 val; | 8521 | u32 val; |
7569 | 8522 | ||
7570 | if (offset + buf_size > bp->flash_size) { | 8523 | if (offset + buf_size > bp->flash_size) { |
7571 | DP(NETIF_MSG_NVM, "Invalid paramter: offset (0x%x) +" | 8524 | DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +" |
7572 | " buf_size (0x%x) > flash_size (0x%x)\n", | 8525 | " buf_size (0x%x) > flash_size (0x%x)\n", |
7573 | offset, buf_size, bp->flash_size); | 8526 | offset, buf_size, bp->flash_size); |
7574 | return -EINVAL; | 8527 | return -EINVAL; |
@@ -7621,13 +8574,13 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf, | |||
7621 | 8574 | ||
7622 | if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { | 8575 | if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { |
7623 | DP(NETIF_MSG_NVM, | 8576 | DP(NETIF_MSG_NVM, |
7624 | "Invalid paramter: offset 0x%x buf_size 0x%x\n", | 8577 | "Invalid parameter: offset 0x%x buf_size 0x%x\n", |
7625 | offset, buf_size); | 8578 | offset, buf_size); |
7626 | return -EINVAL; | 8579 | return -EINVAL; |
7627 | } | 8580 | } |
7628 | 8581 | ||
7629 | if (offset + buf_size > bp->flash_size) { | 8582 | if (offset + buf_size > bp->flash_size) { |
7630 | DP(NETIF_MSG_NVM, "Invalid paramter: offset (0x%x) +" | 8583 | DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +" |
7631 | " buf_size (0x%x) > flash_size (0x%x)\n", | 8584 | " buf_size (0x%x) > flash_size (0x%x)\n", |
7632 | offset, buf_size, bp->flash_size); | 8585 | offset, buf_size, bp->flash_size); |
7633 | return -EINVAL; | 8586 | return -EINVAL; |
@@ -7788,52 +8741,29 @@ static int bnx2x_set_pauseparam(struct net_device *dev, | |||
7788 | DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n", | 8741 | DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n", |
7789 | epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); | 8742 | epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); |
7790 | 8743 | ||
7791 | bp->req_flow_ctrl = FLOW_CTRL_AUTO; | ||
7792 | if (epause->autoneg) { | 8744 | if (epause->autoneg) { |
7793 | bp->req_autoneg |= AUTONEG_FLOW_CTRL; | 8745 | if (!(bp->supported & SUPPORTED_Autoneg)) { |
7794 | if (bp->dev->mtu <= 4500) { | 8746 | DP(NETIF_MSG_LINK, "Aotoneg not supported\n"); |
7795 | bp->pause_mode = PAUSE_BOTH; | 8747 | return -EINVAL; |
7796 | bp->advertising |= (ADVERTISED_Pause | | ||
7797 | ADVERTISED_Asym_Pause); | ||
7798 | } else { | ||
7799 | bp->pause_mode = PAUSE_ASYMMETRIC; | ||
7800 | bp->advertising |= ADVERTISED_Asym_Pause; | ||
7801 | } | 8748 | } |
7802 | 8749 | ||
7803 | } else { | 8750 | bp->req_autoneg |= AUTONEG_FLOW_CTRL; |
8751 | } else | ||
7804 | bp->req_autoneg &= ~AUTONEG_FLOW_CTRL; | 8752 | bp->req_autoneg &= ~AUTONEG_FLOW_CTRL; |
7805 | 8753 | ||
7806 | if (epause->rx_pause) | 8754 | bp->req_flow_ctrl = FLOW_CTRL_AUTO; |
7807 | bp->req_flow_ctrl |= FLOW_CTRL_RX; | ||
7808 | if (epause->tx_pause) | ||
7809 | bp->req_flow_ctrl |= FLOW_CTRL_TX; | ||
7810 | |||
7811 | switch (bp->req_flow_ctrl) { | ||
7812 | case FLOW_CTRL_AUTO: | ||
7813 | bp->req_flow_ctrl = FLOW_CTRL_NONE; | ||
7814 | bp->pause_mode = PAUSE_NONE; | ||
7815 | bp->advertising &= ~(ADVERTISED_Pause | | ||
7816 | ADVERTISED_Asym_Pause); | ||
7817 | break; | ||
7818 | 8755 | ||
7819 | case FLOW_CTRL_TX: | 8756 | if (epause->rx_pause) |
7820 | bp->pause_mode = PAUSE_ASYMMETRIC; | 8757 | bp->req_flow_ctrl |= FLOW_CTRL_RX; |
7821 | bp->advertising |= ADVERTISED_Asym_Pause; | 8758 | if (epause->tx_pause) |
7822 | break; | 8759 | bp->req_flow_ctrl |= FLOW_CTRL_TX; |
7823 | 8760 | ||
7824 | case FLOW_CTRL_RX: | 8761 | if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) && |
7825 | case FLOW_CTRL_BOTH: | 8762 | (bp->req_flow_ctrl == FLOW_CTRL_AUTO)) |
7826 | bp->pause_mode = PAUSE_BOTH; | 8763 | bp->req_flow_ctrl = FLOW_CTRL_NONE; |
7827 | bp->advertising |= (ADVERTISED_Pause | | ||
7828 | ADVERTISED_Asym_Pause); | ||
7829 | break; | ||
7830 | } | ||
7831 | } | ||
7832 | 8764 | ||
7833 | DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_flow_ctrl 0x%x\n" | 8765 | DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_flow_ctrl 0x%x\n", |
7834 | DP_LEVEL " pause_mode %d advertising 0x%x\n", | 8766 | bp->req_autoneg, bp->req_flow_ctrl); |
7835 | bp->req_autoneg, bp->req_flow_ctrl, bp->pause_mode, | ||
7836 | bp->advertising); | ||
7837 | 8767 | ||
7838 | bnx2x_stop_stats(bp); | 8768 | bnx2x_stop_stats(bp); |
7839 | bnx2x_link_initialize(bp); | 8769 | bnx2x_link_initialize(bp); |
@@ -7906,81 +8836,87 @@ static void bnx2x_self_test(struct net_device *dev, | |||
7906 | static struct { | 8836 | static struct { |
7907 | char string[ETH_GSTRING_LEN]; | 8837 | char string[ETH_GSTRING_LEN]; |
7908 | } bnx2x_stats_str_arr[BNX2X_NUM_STATS] = { | 8838 | } bnx2x_stats_str_arr[BNX2X_NUM_STATS] = { |
7909 | { "rx_bytes"}, /* 0 */ | 8839 | { "rx_bytes"}, |
7910 | { "rx_error_bytes"}, /* 1 */ | 8840 | { "rx_error_bytes"}, |
7911 | { "tx_bytes"}, /* 2 */ | 8841 | { "tx_bytes"}, |
7912 | { "tx_error_bytes"}, /* 3 */ | 8842 | { "tx_error_bytes"}, |
7913 | { "rx_ucast_packets"}, /* 4 */ | 8843 | { "rx_ucast_packets"}, |
7914 | { "rx_mcast_packets"}, /* 5 */ | 8844 | { "rx_mcast_packets"}, |
7915 | { "rx_bcast_packets"}, /* 6 */ | 8845 | { "rx_bcast_packets"}, |
7916 | { "tx_ucast_packets"}, /* 7 */ | 8846 | { "tx_ucast_packets"}, |
7917 | { "tx_mcast_packets"}, /* 8 */ | 8847 | { "tx_mcast_packets"}, |
7918 | { "tx_bcast_packets"}, /* 9 */ | 8848 | { "tx_bcast_packets"}, |
7919 | { "tx_mac_errors"}, /* 10 */ | 8849 | { "tx_mac_errors"}, /* 10 */ |
7920 | { "tx_carrier_errors"}, /* 11 */ | 8850 | { "tx_carrier_errors"}, |
7921 | { "rx_crc_errors"}, /* 12 */ | 8851 | { "rx_crc_errors"}, |
7922 | { "rx_align_errors"}, /* 13 */ | 8852 | { "rx_align_errors"}, |
7923 | { "tx_single_collisions"}, /* 14 */ | 8853 | { "tx_single_collisions"}, |
7924 | { "tx_multi_collisions"}, /* 15 */ | 8854 | { "tx_multi_collisions"}, |
7925 | { "tx_deferred"}, /* 16 */ | 8855 | { "tx_deferred"}, |
7926 | { "tx_excess_collisions"}, /* 17 */ | 8856 | { "tx_excess_collisions"}, |
7927 | { "tx_late_collisions"}, /* 18 */ | 8857 | { "tx_late_collisions"}, |
7928 | { "tx_total_collisions"}, /* 19 */ | 8858 | { "tx_total_collisions"}, |
7929 | { "rx_fragments"}, /* 20 */ | 8859 | { "rx_fragments"}, /* 20 */ |
7930 | { "rx_jabbers"}, /* 21 */ | 8860 | { "rx_jabbers"}, |
7931 | { "rx_undersize_packets"}, /* 22 */ | 8861 | { "rx_undersize_packets"}, |
7932 | { "rx_oversize_packets"}, /* 23 */ | 8862 | { "rx_oversize_packets"}, |
7933 | { "rx_xon_frames"}, /* 24 */ | 8863 | { "rx_xon_frames"}, |
7934 | { "rx_xoff_frames"}, /* 25 */ | 8864 | { "rx_xoff_frames"}, |
7935 | { "tx_xon_frames"}, /* 26 */ | 8865 | { "tx_xon_frames"}, |
7936 | { "tx_xoff_frames"}, /* 27 */ | 8866 | { "tx_xoff_frames"}, |
7937 | { "rx_mac_ctrl_frames"}, /* 28 */ | 8867 | { "rx_mac_ctrl_frames"}, |
7938 | { "rx_filtered_packets"}, /* 29 */ | 8868 | { "rx_filtered_packets"}, |
7939 | { "rx_discards"}, /* 30 */ | 8869 | { "rx_discards"}, /* 30 */ |
8870 | { "brb_discard"}, | ||
8871 | { "brb_truncate"}, | ||
8872 | { "xxoverflow"} | ||
7940 | }; | 8873 | }; |
7941 | 8874 | ||
7942 | #define STATS_OFFSET32(offset_name) \ | 8875 | #define STATS_OFFSET32(offset_name) \ |
7943 | (offsetof(struct bnx2x_eth_stats, offset_name) / 4) | 8876 | (offsetof(struct bnx2x_eth_stats, offset_name) / 4) |
7944 | 8877 | ||
7945 | static unsigned long bnx2x_stats_offset_arr[BNX2X_NUM_STATS] = { | 8878 | static unsigned long bnx2x_stats_offset_arr[BNX2X_NUM_STATS] = { |
7946 | STATS_OFFSET32(total_bytes_received_hi), /* 0 */ | 8879 | STATS_OFFSET32(total_bytes_received_hi), |
7947 | STATS_OFFSET32(stat_IfHCInBadOctets_hi), /* 1 */ | 8880 | STATS_OFFSET32(stat_IfHCInBadOctets_hi), |
7948 | STATS_OFFSET32(total_bytes_transmitted_hi), /* 2 */ | 8881 | STATS_OFFSET32(total_bytes_transmitted_hi), |
7949 | STATS_OFFSET32(stat_IfHCOutBadOctets_hi), /* 3 */ | 8882 | STATS_OFFSET32(stat_IfHCOutBadOctets_hi), |
7950 | STATS_OFFSET32(total_unicast_packets_received_hi), /* 4 */ | 8883 | STATS_OFFSET32(total_unicast_packets_received_hi), |
7951 | STATS_OFFSET32(total_multicast_packets_received_hi), /* 5 */ | 8884 | STATS_OFFSET32(total_multicast_packets_received_hi), |
7952 | STATS_OFFSET32(total_broadcast_packets_received_hi), /* 6 */ | 8885 | STATS_OFFSET32(total_broadcast_packets_received_hi), |
7953 | STATS_OFFSET32(total_unicast_packets_transmitted_hi), /* 7 */ | 8886 | STATS_OFFSET32(total_unicast_packets_transmitted_hi), |
7954 | STATS_OFFSET32(total_multicast_packets_transmitted_hi), /* 8 */ | 8887 | STATS_OFFSET32(total_multicast_packets_transmitted_hi), |
7955 | STATS_OFFSET32(total_broadcast_packets_transmitted_hi), /* 9 */ | 8888 | STATS_OFFSET32(total_broadcast_packets_transmitted_hi), |
7956 | STATS_OFFSET32(stat_Dot3statsInternalMacTransmitErrors), /* 10 */ | 8889 | STATS_OFFSET32(stat_Dot3statsInternalMacTransmitErrors), /* 10 */ |
7957 | STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors), /* 11 */ | 8890 | STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors), |
7958 | STATS_OFFSET32(crc_receive_errors), /* 12 */ | 8891 | STATS_OFFSET32(crc_receive_errors), |
7959 | STATS_OFFSET32(alignment_errors), /* 13 */ | 8892 | STATS_OFFSET32(alignment_errors), |
7960 | STATS_OFFSET32(single_collision_transmit_frames), /* 14 */ | 8893 | STATS_OFFSET32(single_collision_transmit_frames), |
7961 | STATS_OFFSET32(multiple_collision_transmit_frames), /* 15 */ | 8894 | STATS_OFFSET32(multiple_collision_transmit_frames), |
7962 | STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions), /* 16 */ | 8895 | STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions), |
7963 | STATS_OFFSET32(excessive_collision_frames), /* 17 */ | 8896 | STATS_OFFSET32(excessive_collision_frames), |
7964 | STATS_OFFSET32(late_collision_frames), /* 18 */ | 8897 | STATS_OFFSET32(late_collision_frames), |
7965 | STATS_OFFSET32(number_of_bugs_found_in_stats_spec), /* 19 */ | 8898 | STATS_OFFSET32(number_of_bugs_found_in_stats_spec), |
7966 | STATS_OFFSET32(runt_packets_received), /* 20 */ | 8899 | STATS_OFFSET32(runt_packets_received), /* 20 */ |
7967 | STATS_OFFSET32(jabber_packets_received), /* 21 */ | 8900 | STATS_OFFSET32(jabber_packets_received), |
7968 | STATS_OFFSET32(error_runt_packets_received), /* 22 */ | 8901 | STATS_OFFSET32(error_runt_packets_received), |
7969 | STATS_OFFSET32(error_jabber_packets_received), /* 23 */ | 8902 | STATS_OFFSET32(error_jabber_packets_received), |
7970 | STATS_OFFSET32(pause_xon_frames_received), /* 24 */ | 8903 | STATS_OFFSET32(pause_xon_frames_received), |
7971 | STATS_OFFSET32(pause_xoff_frames_received), /* 25 */ | 8904 | STATS_OFFSET32(pause_xoff_frames_received), |
7972 | STATS_OFFSET32(pause_xon_frames_transmitted), /* 26 */ | 8905 | STATS_OFFSET32(pause_xon_frames_transmitted), |
7973 | STATS_OFFSET32(pause_xoff_frames_transmitted), /* 27 */ | 8906 | STATS_OFFSET32(pause_xoff_frames_transmitted), |
7974 | STATS_OFFSET32(control_frames_received), /* 28 */ | 8907 | STATS_OFFSET32(control_frames_received), |
7975 | STATS_OFFSET32(mac_filter_discard), /* 29 */ | 8908 | STATS_OFFSET32(mac_filter_discard), |
7976 | STATS_OFFSET32(no_buff_discard), /* 30 */ | 8909 | STATS_OFFSET32(no_buff_discard), /* 30 */ |
8910 | STATS_OFFSET32(brb_discard), | ||
8911 | STATS_OFFSET32(brb_truncate_discard), | ||
8912 | STATS_OFFSET32(xxoverflow_discard) | ||
7977 | }; | 8913 | }; |
7978 | 8914 | ||
7979 | static u8 bnx2x_stats_len_arr[BNX2X_NUM_STATS] = { | 8915 | static u8 bnx2x_stats_len_arr[BNX2X_NUM_STATS] = { |
7980 | 8, 0, 8, 0, 8, 8, 8, 8, 8, 8, | 8916 | 8, 0, 8, 0, 8, 8, 8, 8, 8, 8, |
7981 | 4, 0, 4, 4, 4, 4, 4, 4, 4, 4, | 8917 | 4, 0, 4, 4, 4, 4, 4, 4, 4, 4, |
7982 | 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, | 8918 | 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, |
7983 | 4, | 8919 | 4, 4, 4, 4 |
7984 | }; | 8920 | }; |
7985 | 8921 | ||
7986 | static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) | 8922 | static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) |
@@ -8138,9 +9074,7 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state) | |||
8138 | * net_device service functions | 9074 | * net_device service functions |
8139 | */ | 9075 | */ |
8140 | 9076 | ||
8141 | /* Called with rtnl_lock from vlan functions and also netif_tx_lock | 9077 | /* called with netif_tx_lock from set_multicast */ |
8142 | * from set_multicast. | ||
8143 | */ | ||
8144 | static void bnx2x_set_rx_mode(struct net_device *dev) | 9078 | static void bnx2x_set_rx_mode(struct net_device *dev) |
8145 | { | 9079 | { |
8146 | struct bnx2x *bp = netdev_priv(dev); | 9080 | struct bnx2x *bp = netdev_priv(dev); |
@@ -8314,7 +9248,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
8314 | ETH_TX_BD_ETH_ADDR_TYPE_SHIFT); | 9248 | ETH_TX_BD_ETH_ADDR_TYPE_SHIFT); |
8315 | tx_bd->general_data |= 1; /* header nbd */ | 9249 | tx_bd->general_data |= 1; /* header nbd */ |
8316 | 9250 | ||
8317 | /* remeber the first bd of the packet */ | 9251 | /* remember the first bd of the packet */ |
8318 | tx_buf->first_bd = bd_prod; | 9252 | tx_buf->first_bd = bd_prod; |
8319 | 9253 | ||
8320 | DP(NETIF_MSG_TX_QUEUED, | 9254 | DP(NETIF_MSG_TX_QUEUED, |
@@ -8334,7 +9268,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
8334 | 9268 | ||
8335 | /* for now NS flag is not used in Linux */ | 9269 | /* for now NS flag is not used in Linux */ |
8336 | pbd->global_data = (len | | 9270 | pbd->global_data = (len | |
8337 | ((skb->protocol == ETH_P_8021Q) << | 9271 | ((skb->protocol == ntohs(ETH_P_8021Q)) << |
8338 | ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT)); | 9272 | ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT)); |
8339 | pbd->ip_hlen = ip_hdrlen(skb) / 2; | 9273 | pbd->ip_hlen = ip_hdrlen(skb) / 2; |
8340 | pbd->total_hlen = cpu_to_le16(len + pbd->ip_hlen); | 9274 | pbd->total_hlen = cpu_to_le16(len + pbd->ip_hlen); |
@@ -8343,7 +9277,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
8343 | 9277 | ||
8344 | tx_bd->bd_flags.as_bitfield |= | 9278 | tx_bd->bd_flags.as_bitfield |= |
8345 | ETH_TX_BD_FLAGS_TCP_CSUM; | 9279 | ETH_TX_BD_FLAGS_TCP_CSUM; |
8346 | pbd->tcp_flags = htonl(tcp_flag_word(skb)) & 0xFFFF; | 9280 | pbd->tcp_flags = pbd_tcp_flags(skb); |
8347 | pbd->total_hlen += cpu_to_le16(tcp_hdrlen(skb) / 2); | 9281 | pbd->total_hlen += cpu_to_le16(tcp_hdrlen(skb) / 2); |
8348 | pbd->tcp_pseudo_csum = swab16(th->check); | 9282 | pbd->tcp_pseudo_csum = swab16(th->check); |
8349 | 9283 | ||
@@ -8387,7 +9321,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
8387 | 9321 | ||
8388 | if (skb_shinfo(skb)->gso_size && | 9322 | if (skb_shinfo(skb)->gso_size && |
8389 | (skb->len > (bp->dev->mtu + ETH_HLEN))) { | 9323 | (skb->len > (bp->dev->mtu + ETH_HLEN))) { |
8390 | int hlen = 2 * le32_to_cpu(pbd->total_hlen); | 9324 | int hlen = 2 * le16_to_cpu(pbd->total_hlen); |
8391 | 9325 | ||
8392 | DP(NETIF_MSG_TX_QUEUED, | 9326 | DP(NETIF_MSG_TX_QUEUED, |
8393 | "TSO packet len %d hlen %d total len %d tso size %d\n", | 9327 | "TSO packet len %d hlen %d total len %d tso size %d\n", |
@@ -8427,7 +9361,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
8427 | tx_bd->vlan = cpu_to_le16(pkt_prod); | 9361 | tx_bd->vlan = cpu_to_le16(pkt_prod); |
8428 | /* this marks the bd | 9362 | /* this marks the bd |
8429 | * as one that has no individual mapping | 9363 | * as one that has no individual mapping |
8430 | * the FW ignors this flag in a bd not maked start | 9364 | * the FW ignores this flag in a bd not marked start |
8431 | */ | 9365 | */ |
8432 | tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO; | 9366 | tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO; |
8433 | DP(NETIF_MSG_TX_QUEUED, | 9367 | DP(NETIF_MSG_TX_QUEUED, |
@@ -8504,9 +9438,11 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
8504 | 9438 | ||
8505 | DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %u bd %d\n", nbd, bd_prod); | 9439 | DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %u bd %d\n", nbd, bd_prod); |
8506 | 9440 | ||
8507 | fp->hw_tx_prods->bds_prod += cpu_to_le16(nbd); | 9441 | fp->hw_tx_prods->bds_prod = |
9442 | cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd); | ||
8508 | mb(); /* FW restriction: must not reorder writing nbd and packets */ | 9443 | mb(); /* FW restriction: must not reorder writing nbd and packets */ |
8509 | fp->hw_tx_prods->packets_prod += cpu_to_le32(1); | 9444 | fp->hw_tx_prods->packets_prod = |
9445 | cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1); | ||
8510 | DOORBELL(bp, fp_index, 0); | 9446 | DOORBELL(bp, fp_index, 0); |
8511 | 9447 | ||
8512 | mmiowb(); | 9448 | mmiowb(); |
@@ -8525,11 +9461,6 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
8525 | return NETDEV_TX_OK; | 9461 | return NETDEV_TX_OK; |
8526 | } | 9462 | } |
8527 | 9463 | ||
8528 | static struct net_device_stats *bnx2x_get_stats(struct net_device *dev) | ||
8529 | { | ||
8530 | return &dev->stats; | ||
8531 | } | ||
8532 | |||
8533 | /* Called with rtnl_lock */ | 9464 | /* Called with rtnl_lock */ |
8534 | static int bnx2x_open(struct net_device *dev) | 9465 | static int bnx2x_open(struct net_device *dev) |
8535 | { | 9466 | { |
@@ -8543,16 +9474,13 @@ static int bnx2x_open(struct net_device *dev) | |||
8543 | /* Called with rtnl_lock */ | 9474 | /* Called with rtnl_lock */ |
8544 | static int bnx2x_close(struct net_device *dev) | 9475 | static int bnx2x_close(struct net_device *dev) |
8545 | { | 9476 | { |
8546 | int rc; | ||
8547 | struct bnx2x *bp = netdev_priv(dev); | 9477 | struct bnx2x *bp = netdev_priv(dev); |
8548 | 9478 | ||
8549 | /* Unload the driver, release IRQs */ | 9479 | /* Unload the driver, release IRQs */ |
8550 | rc = bnx2x_nic_unload(bp, 1); | 9480 | bnx2x_nic_unload(bp, 1); |
8551 | if (rc) { | 9481 | |
8552 | BNX2X_ERR("bnx2x_nic_unload failed: %d\n", rc); | 9482 | if (!CHIP_REV_IS_SLOW(bp)) |
8553 | return rc; | 9483 | bnx2x_set_power_state(bp, PCI_D3hot); |
8554 | } | ||
8555 | bnx2x_set_power_state(bp, PCI_D3hot); | ||
8556 | 9484 | ||
8557 | return 0; | 9485 | return 0; |
8558 | } | 9486 | } |
@@ -8584,7 +9512,7 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
8584 | case SIOCGMIIPHY: | 9512 | case SIOCGMIIPHY: |
8585 | data->phy_id = bp->phy_addr; | 9513 | data->phy_id = bp->phy_addr; |
8586 | 9514 | ||
8587 | /* fallthru */ | 9515 | /* fallthrough */ |
8588 | case SIOCGMIIREG: { | 9516 | case SIOCGMIIREG: { |
8589 | u32 mii_regval; | 9517 | u32 mii_regval; |
8590 | 9518 | ||
@@ -8633,7 +9561,7 @@ static int bnx2x_change_mtu(struct net_device *dev, int new_mtu) | |||
8633 | return -EINVAL; | 9561 | return -EINVAL; |
8634 | 9562 | ||
8635 | /* This does not race with packet allocation | 9563 | /* This does not race with packet allocation |
8636 | * because the actuall alloc size is | 9564 | * because the actual alloc size is |
8637 | * only updated as part of load | 9565 | * only updated as part of load |
8638 | */ | 9566 | */ |
8639 | dev->mtu = new_mtu; | 9567 | dev->mtu = new_mtu; |
@@ -8666,7 +9594,7 @@ static void bnx2x_vlan_rx_register(struct net_device *dev, | |||
8666 | 9594 | ||
8667 | bp->vlgrp = vlgrp; | 9595 | bp->vlgrp = vlgrp; |
8668 | if (netif_running(dev)) | 9596 | if (netif_running(dev)) |
8669 | bnx2x_set_rx_mode(dev); | 9597 | bnx2x_set_client_config(bp); |
8670 | } | 9598 | } |
8671 | #endif | 9599 | #endif |
8672 | 9600 | ||
@@ -8695,14 +9623,18 @@ static void bnx2x_reset_task(struct work_struct *work) | |||
8695 | if (!netif_running(bp->dev)) | 9623 | if (!netif_running(bp->dev)) |
8696 | return; | 9624 | return; |
8697 | 9625 | ||
8698 | bp->in_reset_task = 1; | 9626 | rtnl_lock(); |
8699 | 9627 | ||
8700 | bnx2x_netif_stop(bp); | 9628 | if (bp->state != BNX2X_STATE_OPEN) { |
9629 | DP(NETIF_MSG_TX_ERR, "state is %x, returning\n", bp->state); | ||
9630 | goto reset_task_exit; | ||
9631 | } | ||
8701 | 9632 | ||
8702 | bnx2x_nic_unload(bp, 0); | 9633 | bnx2x_nic_unload(bp, 0); |
8703 | bnx2x_nic_load(bp, 0); | 9634 | bnx2x_nic_load(bp, 0); |
8704 | 9635 | ||
8705 | bp->in_reset_task = 0; | 9636 | reset_task_exit: |
9637 | rtnl_unlock(); | ||
8706 | } | 9638 | } |
8707 | 9639 | ||
8708 | static int __devinit bnx2x_init_board(struct pci_dev *pdev, | 9640 | static int __devinit bnx2x_init_board(struct pci_dev *pdev, |
@@ -8783,8 +9715,6 @@ static int __devinit bnx2x_init_board(struct pci_dev *pdev, | |||
8783 | 9715 | ||
8784 | spin_lock_init(&bp->phy_lock); | 9716 | spin_lock_init(&bp->phy_lock); |
8785 | 9717 | ||
8786 | bp->in_reset_task = 0; | ||
8787 | |||
8788 | INIT_WORK(&bp->reset_task, bnx2x_reset_task); | 9718 | INIT_WORK(&bp->reset_task, bnx2x_reset_task); |
8789 | INIT_WORK(&bp->sp_task, bnx2x_sp_task); | 9719 | INIT_WORK(&bp->sp_task, bnx2x_sp_task); |
8790 | 9720 | ||
@@ -8813,7 +9743,7 @@ static int __devinit bnx2x_init_board(struct pci_dev *pdev, | |||
8813 | bnx2x_get_hwinfo(bp); | 9743 | bnx2x_get_hwinfo(bp); |
8814 | 9744 | ||
8815 | if (CHIP_REV(bp) == CHIP_REV_FPGA) { | 9745 | if (CHIP_REV(bp) == CHIP_REV_FPGA) { |
8816 | printk(KERN_ERR PFX "FPGA detacted. MCP disabled," | 9746 | printk(KERN_ERR PFX "FPGA detected. MCP disabled," |
8817 | " will only init first device\n"); | 9747 | " will only init first device\n"); |
8818 | onefunc = 1; | 9748 | onefunc = 1; |
8819 | nomcp = 1; | 9749 | nomcp = 1; |
@@ -8882,14 +9812,32 @@ err_out: | |||
8882 | return rc; | 9812 | return rc; |
8883 | } | 9813 | } |
8884 | 9814 | ||
9815 | static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp) | ||
9816 | { | ||
9817 | u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL); | ||
9818 | |||
9819 | val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT; | ||
9820 | return val; | ||
9821 | } | ||
9822 | |||
9823 | /* return value of 1=2.5GHz 2=5GHz */ | ||
9824 | static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp) | ||
9825 | { | ||
9826 | u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL); | ||
9827 | |||
9828 | val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT; | ||
9829 | return val; | ||
9830 | } | ||
9831 | |||
8885 | static int __devinit bnx2x_init_one(struct pci_dev *pdev, | 9832 | static int __devinit bnx2x_init_one(struct pci_dev *pdev, |
8886 | const struct pci_device_id *ent) | 9833 | const struct pci_device_id *ent) |
8887 | { | 9834 | { |
8888 | static int version_printed; | 9835 | static int version_printed; |
8889 | struct net_device *dev = NULL; | 9836 | struct net_device *dev = NULL; |
8890 | struct bnx2x *bp; | 9837 | struct bnx2x *bp; |
8891 | int rc, i; | 9838 | int rc; |
8892 | int port = PCI_FUNC(pdev->devfn); | 9839 | int port = PCI_FUNC(pdev->devfn); |
9840 | DECLARE_MAC_BUF(mac); | ||
8893 | 9841 | ||
8894 | if (version_printed++ == 0) | 9842 | if (version_printed++ == 0) |
8895 | printk(KERN_INFO "%s", version); | 9843 | printk(KERN_INFO "%s", version); |
@@ -8906,6 +9854,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, | |||
8906 | 9854 | ||
8907 | if (port && onefunc) { | 9855 | if (port && onefunc) { |
8908 | printk(KERN_ERR PFX "second function disabled. exiting\n"); | 9856 | printk(KERN_ERR PFX "second function disabled. exiting\n"); |
9857 | free_netdev(dev); | ||
8909 | return 0; | 9858 | return 0; |
8910 | } | 9859 | } |
8911 | 9860 | ||
@@ -8918,7 +9867,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, | |||
8918 | dev->hard_start_xmit = bnx2x_start_xmit; | 9867 | dev->hard_start_xmit = bnx2x_start_xmit; |
8919 | dev->watchdog_timeo = TX_TIMEOUT; | 9868 | dev->watchdog_timeo = TX_TIMEOUT; |
8920 | 9869 | ||
8921 | dev->get_stats = bnx2x_get_stats; | ||
8922 | dev->ethtool_ops = &bnx2x_ethtool_ops; | 9870 | dev->ethtool_ops = &bnx2x_ethtool_ops; |
8923 | dev->open = bnx2x_open; | 9871 | dev->open = bnx2x_open; |
8924 | dev->stop = bnx2x_close; | 9872 | dev->stop = bnx2x_close; |
@@ -8944,7 +9892,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, | |||
8944 | 9892 | ||
8945 | rc = register_netdev(dev); | 9893 | rc = register_netdev(dev); |
8946 | if (rc) { | 9894 | if (rc) { |
8947 | printk(KERN_ERR PFX "Cannot register net device\n"); | 9895 | dev_err(&pdev->dev, "Cannot register net device\n"); |
8948 | if (bp->regview) | 9896 | if (bp->regview) |
8949 | iounmap(bp->regview); | 9897 | iounmap(bp->regview); |
8950 | if (bp->doorbells) | 9898 | if (bp->doorbells) |
@@ -8959,32 +9907,30 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, | |||
8959 | pci_set_drvdata(pdev, dev); | 9907 | pci_set_drvdata(pdev, dev); |
8960 | 9908 | ||
8961 | bp->name = board_info[ent->driver_data].name; | 9909 | bp->name = board_info[ent->driver_data].name; |
8962 | printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz " | 9910 | printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx," |
8963 | "found at mem %lx, IRQ %d, ", | 9911 | " IRQ %d, ", dev->name, bp->name, |
8964 | dev->name, bp->name, | ||
8965 | ((CHIP_ID(bp) & 0xf000) >> 12) + 'A', | 9912 | ((CHIP_ID(bp) & 0xf000) >> 12) + 'A', |
8966 | ((CHIP_ID(bp) & 0x0ff0) >> 4), | 9913 | ((CHIP_ID(bp) & 0x0ff0) >> 4), |
8967 | ((bp->flags & PCIX_FLAG) ? "-X" : ""), | 9914 | bnx2x_get_pcie_width(bp), |
8968 | ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"), | 9915 | (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz", |
8969 | bp->bus_speed_mhz, | 9916 | dev->base_addr, bp->pdev->irq); |
8970 | dev->base_addr, | 9917 | printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr)); |
8971 | bp->pdev->irq); | ||
8972 | |||
8973 | printk("node addr "); | ||
8974 | for (i = 0; i < 6; i++) | ||
8975 | printk("%2.2x", dev->dev_addr[i]); | ||
8976 | printk("\n"); | ||
8977 | |||
8978 | return 0; | 9918 | return 0; |
8979 | } | 9919 | } |
8980 | 9920 | ||
8981 | static void __devexit bnx2x_remove_one(struct pci_dev *pdev) | 9921 | static void __devexit bnx2x_remove_one(struct pci_dev *pdev) |
8982 | { | 9922 | { |
8983 | struct net_device *dev = pci_get_drvdata(pdev); | 9923 | struct net_device *dev = pci_get_drvdata(pdev); |
8984 | struct bnx2x *bp = netdev_priv(dev); | 9924 | struct bnx2x *bp; |
9925 | |||
9926 | if (!dev) { | ||
9927 | /* we get here if init_one() fails */ | ||
9928 | printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n"); | ||
9929 | return; | ||
9930 | } | ||
9931 | |||
9932 | bp = netdev_priv(dev); | ||
8985 | 9933 | ||
8986 | flush_scheduled_work(); | ||
8987 | /*tasklet_kill(&bp->sp_task);*/ | ||
8988 | unregister_netdev(dev); | 9934 | unregister_netdev(dev); |
8989 | 9935 | ||
8990 | if (bp->regview) | 9936 | if (bp->regview) |
@@ -9002,34 +9948,43 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev) | |||
9002 | static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) | 9948 | static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) |
9003 | { | 9949 | { |
9004 | struct net_device *dev = pci_get_drvdata(pdev); | 9950 | struct net_device *dev = pci_get_drvdata(pdev); |
9005 | struct bnx2x *bp = netdev_priv(dev); | 9951 | struct bnx2x *bp; |
9006 | int rc; | 9952 | |
9953 | if (!dev) | ||
9954 | return 0; | ||
9007 | 9955 | ||
9008 | if (!netif_running(dev)) | 9956 | if (!netif_running(dev)) |
9009 | return 0; | 9957 | return 0; |
9010 | 9958 | ||
9011 | rc = bnx2x_nic_unload(bp, 0); | 9959 | bp = netdev_priv(dev); |
9012 | if (!rc) | 9960 | |
9013 | return rc; | 9961 | bnx2x_nic_unload(bp, 0); |
9014 | 9962 | ||
9015 | netif_device_detach(dev); | 9963 | netif_device_detach(dev); |
9016 | pci_save_state(pdev); | ||
9017 | 9964 | ||
9965 | pci_save_state(pdev); | ||
9018 | bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); | 9966 | bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); |
9967 | |||
9019 | return 0; | 9968 | return 0; |
9020 | } | 9969 | } |
9021 | 9970 | ||
9022 | static int bnx2x_resume(struct pci_dev *pdev) | 9971 | static int bnx2x_resume(struct pci_dev *pdev) |
9023 | { | 9972 | { |
9024 | struct net_device *dev = pci_get_drvdata(pdev); | 9973 | struct net_device *dev = pci_get_drvdata(pdev); |
9025 | struct bnx2x *bp = netdev_priv(dev); | 9974 | struct bnx2x *bp; |
9026 | int rc; | 9975 | int rc; |
9027 | 9976 | ||
9977 | if (!dev) { | ||
9978 | printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n"); | ||
9979 | return -ENODEV; | ||
9980 | } | ||
9981 | |||
9028 | if (!netif_running(dev)) | 9982 | if (!netif_running(dev)) |
9029 | return 0; | 9983 | return 0; |
9030 | 9984 | ||
9031 | pci_restore_state(pdev); | 9985 | bp = netdev_priv(dev); |
9032 | 9986 | ||
9987 | pci_restore_state(pdev); | ||
9033 | bnx2x_set_power_state(bp, PCI_D0); | 9988 | bnx2x_set_power_state(bp, PCI_D0); |
9034 | netif_device_attach(dev); | 9989 | netif_device_attach(dev); |
9035 | 9990 | ||
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h index 4f7ae6f77452..4f0c0d31e7c1 100644 --- a/drivers/net/bnx2x.h +++ b/drivers/net/bnx2x.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* bnx2x.h: Broadcom Everest network driver. | 1 | /* bnx2x.h: Broadcom Everest network driver. |
2 | * | 2 | * |
3 | * Copyright (c) 2007 Broadcom Corporation | 3 | * Copyright (c) 2007-2008 Broadcom Corporation |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
@@ -24,6 +24,8 @@ | |||
24 | #define BNX2X_MSG_STATS 0x20000 /* was: NETIF_MSG_TIMER */ | 24 | #define BNX2X_MSG_STATS 0x20000 /* was: NETIF_MSG_TIMER */ |
25 | #define NETIF_MSG_NVM 0x40000 /* was: NETIF_MSG_HW */ | 25 | #define NETIF_MSG_NVM 0x40000 /* was: NETIF_MSG_HW */ |
26 | #define NETIF_MSG_DMAE 0x80000 /* was: NETIF_MSG_HW */ | 26 | #define NETIF_MSG_DMAE 0x80000 /* was: NETIF_MSG_HW */ |
27 | #define BNX2X_MSG_SP 0x100000 /* was: NETIF_MSG_INTR */ | ||
28 | #define BNX2X_MSG_FP 0x200000 /* was: NETIF_MSG_INTR */ | ||
27 | 29 | ||
28 | #define DP_LEVEL KERN_NOTICE /* was: KERN_DEBUG */ | 30 | #define DP_LEVEL KERN_NOTICE /* was: KERN_DEBUG */ |
29 | 31 | ||
@@ -40,6 +42,12 @@ | |||
40 | __LINE__, bp->dev?(bp->dev->name):"?", ##__args); \ | 42 | __LINE__, bp->dev?(bp->dev->name):"?", ##__args); \ |
41 | } while (0) | 43 | } while (0) |
42 | 44 | ||
45 | /* for logging (never masked) */ | ||
46 | #define BNX2X_LOG(__fmt, __args...) do { \ | ||
47 | printk(KERN_NOTICE "[%s:%d(%s)]" __fmt, __FUNCTION__, \ | ||
48 | __LINE__, bp->dev?(bp->dev->name):"?", ##__args); \ | ||
49 | } while (0) | ||
50 | |||
43 | /* before we have a dev->name use dev_info() */ | 51 | /* before we have a dev->name use dev_info() */ |
44 | #define BNX2X_DEV_INFO(__fmt, __args...) do { \ | 52 | #define BNX2X_DEV_INFO(__fmt, __args...) do { \ |
45 | if (bp->msglevel & NETIF_MSG_PROBE) \ | 53 | if (bp->msglevel & NETIF_MSG_PROBE) \ |
@@ -423,8 +431,6 @@ struct bnx2x_fastpath { | |||
423 | #define BNX2X_FP_STATE_OPEN 0xa0000 | 431 | #define BNX2X_FP_STATE_OPEN 0xa0000 |
424 | #define BNX2X_FP_STATE_HALTING 0xb0000 | 432 | #define BNX2X_FP_STATE_HALTING 0xb0000 |
425 | #define BNX2X_FP_STATE_HALTED 0xc0000 | 433 | #define BNX2X_FP_STATE_HALTED 0xc0000 |
426 | #define BNX2X_FP_STATE_DELETED 0xd0000 | ||
427 | #define BNX2X_FP_STATE_CLOSE_IRQ 0xe0000 | ||
428 | 434 | ||
429 | int index; | 435 | int index; |
430 | 436 | ||
@@ -505,7 +511,6 @@ struct bnx2x { | |||
505 | struct eth_spe *spq; | 511 | struct eth_spe *spq; |
506 | dma_addr_t spq_mapping; | 512 | dma_addr_t spq_mapping; |
507 | u16 spq_prod_idx; | 513 | u16 spq_prod_idx; |
508 | u16 dsb_sp_prod_idx; | ||
509 | struct eth_spe *spq_prod_bd; | 514 | struct eth_spe *spq_prod_bd; |
510 | struct eth_spe *spq_last_bd; | 515 | struct eth_spe *spq_last_bd; |
511 | u16 *dsb_sp_prod; | 516 | u16 *dsb_sp_prod; |
@@ -517,7 +522,7 @@ struct bnx2x { | |||
517 | */ | 522 | */ |
518 | u8 stat_pending; | 523 | u8 stat_pending; |
519 | 524 | ||
520 | /* End of fileds used in the performance code paths */ | 525 | /* End of fields used in the performance code paths */ |
521 | 526 | ||
522 | int panic; | 527 | int panic; |
523 | int msglevel; | 528 | int msglevel; |
@@ -540,8 +545,6 @@ struct bnx2x { | |||
540 | spinlock_t phy_lock; | 545 | spinlock_t phy_lock; |
541 | 546 | ||
542 | struct work_struct reset_task; | 547 | struct work_struct reset_task; |
543 | u16 in_reset_task; | ||
544 | |||
545 | struct work_struct sp_task; | 548 | struct work_struct sp_task; |
546 | 549 | ||
547 | struct timer_list timer; | 550 | struct timer_list timer; |
@@ -555,7 +558,6 @@ struct bnx2x { | |||
555 | #define CHIP_ID(bp) (((bp)->chip_id) & 0xfffffff0) | 558 | #define CHIP_ID(bp) (((bp)->chip_id) & 0xfffffff0) |
556 | 559 | ||
557 | #define CHIP_NUM(bp) (((bp)->chip_id) & 0xffff0000) | 560 | #define CHIP_NUM(bp) (((bp)->chip_id) & 0xffff0000) |
558 | #define CHIP_NUM_5710 0x57100000 | ||
559 | 561 | ||
560 | #define CHIP_REV(bp) (((bp)->chip_id) & 0x0000f000) | 562 | #define CHIP_REV(bp) (((bp)->chip_id) & 0x0000f000) |
561 | #define CHIP_REV_Ax 0x00000000 | 563 | #define CHIP_REV_Ax 0x00000000 |
@@ -574,7 +576,8 @@ struct bnx2x { | |||
574 | u32 fw_mb; | 576 | u32 fw_mb; |
575 | 577 | ||
576 | u32 hw_config; | 578 | u32 hw_config; |
577 | u32 serdes_config; | 579 | u32 board; |
580 | u32 serdes_config; | ||
578 | u32 lane_config; | 581 | u32 lane_config; |
579 | u32 ext_phy_config; | 582 | u32 ext_phy_config; |
580 | #define XGXS_EXT_PHY_TYPE(bp) (bp->ext_phy_config & \ | 583 | #define XGXS_EXT_PHY_TYPE(bp) (bp->ext_phy_config & \ |
@@ -595,11 +598,11 @@ struct bnx2x { | |||
595 | u8 tx_lane_swap; | 598 | u8 tx_lane_swap; |
596 | 599 | ||
597 | u8 link_up; | 600 | u8 link_up; |
601 | u8 phy_link_up; | ||
598 | 602 | ||
599 | u32 supported; | 603 | u32 supported; |
600 | /* link settings - missing defines */ | 604 | /* link settings - missing defines */ |
601 | #define SUPPORTED_2500baseT_Full (1 << 15) | 605 | #define SUPPORTED_2500baseT_Full (1 << 15) |
602 | #define SUPPORTED_CX4 (1 << 16) | ||
603 | 606 | ||
604 | u32 phy_flags; | 607 | u32 phy_flags; |
605 | /*#define PHY_SERDES_FLAG 0x1*/ | 608 | /*#define PHY_SERDES_FLAG 0x1*/ |
@@ -644,16 +647,9 @@ struct bnx2x { | |||
644 | #define FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH | 647 | #define FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH |
645 | #define FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE | 648 | #define FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE |
646 | 649 | ||
647 | u32 pause_mode; | ||
648 | #define PAUSE_NONE 0 | ||
649 | #define PAUSE_SYMMETRIC 1 | ||
650 | #define PAUSE_ASYMMETRIC 2 | ||
651 | #define PAUSE_BOTH 3 | ||
652 | |||
653 | u32 advertising; | 650 | u32 advertising; |
654 | /* link settings - missing defines */ | 651 | /* link settings - missing defines */ |
655 | #define ADVERTISED_2500baseT_Full (1 << 15) | 652 | #define ADVERTISED_2500baseT_Full (1 << 15) |
656 | #define ADVERTISED_CX4 (1 << 16) | ||
657 | 653 | ||
658 | u32 link_status; | 654 | u32 link_status; |
659 | u32 line_speed; | 655 | u32 line_speed; |
@@ -667,6 +663,8 @@ struct bnx2x { | |||
667 | #define NVRAM_TIMEOUT_COUNT 30000 | 663 | #define NVRAM_TIMEOUT_COUNT 30000 |
668 | #define NVRAM_PAGE_SIZE 256 | 664 | #define NVRAM_PAGE_SIZE 256 |
669 | 665 | ||
666 | u8 wol; | ||
667 | |||
670 | int rx_ring_size; | 668 | int rx_ring_size; |
671 | 669 | ||
672 | u16 tx_quick_cons_trip_int; | 670 | u16 tx_quick_cons_trip_int; |
@@ -718,9 +716,6 @@ struct bnx2x { | |||
718 | #endif | 716 | #endif |
719 | 717 | ||
720 | char *name; | 718 | char *name; |
721 | u16 bus_speed_mhz; | ||
722 | u8 wol; | ||
723 | u8 pad; | ||
724 | 719 | ||
725 | /* used to synchronize stats collecting */ | 720 | /* used to synchronize stats collecting */ |
726 | int stats_state; | 721 | int stats_state; |
@@ -856,8 +851,8 @@ struct bnx2x { | |||
856 | #define MAX_SPQ_PENDING 8 | 851 | #define MAX_SPQ_PENDING 8 |
857 | 852 | ||
858 | 853 | ||
859 | #define BNX2X_NUM_STATS 31 | 854 | #define BNX2X_NUM_STATS 34 |
860 | #define BNX2X_NUM_TESTS 2 | 855 | #define BNX2X_NUM_TESTS 1 |
861 | 856 | ||
862 | 857 | ||
863 | #define DPM_TRIGER_TYPE 0x40 | 858 | #define DPM_TRIGER_TYPE 0x40 |
@@ -867,6 +862,15 @@ struct bnx2x { | |||
867 | DPM_TRIGER_TYPE); \ | 862 | DPM_TRIGER_TYPE); \ |
868 | } while (0) | 863 | } while (0) |
869 | 864 | ||
865 | /* PCIE link and speed */ | ||
866 | #define PCICFG_LINK_WIDTH 0x1f00000 | ||
867 | #define PCICFG_LINK_WIDTH_SHIFT 20 | ||
868 | #define PCICFG_LINK_SPEED 0xf0000 | ||
869 | #define PCICFG_LINK_SPEED_SHIFT 16 | ||
870 | |||
871 | #define BMAC_CONTROL_RX_ENABLE 2 | ||
872 | |||
873 | #define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff) | ||
870 | 874 | ||
871 | /* stuff added to make the code fit 80Col */ | 875 | /* stuff added to make the code fit 80Col */ |
872 | 876 | ||
@@ -939,13 +943,13 @@ struct bnx2x { | |||
939 | #define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD | 943 | #define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD |
940 | #define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD | 944 | #define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD |
941 | 945 | ||
942 | #define NIG_STATUS_INTERRUPT_XGXS0_LINK10G \ | 946 | #define NIG_STATUS_XGXS0_LINK10G \ |
943 | NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G | 947 | NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G |
944 | #define NIG_XGXS0_LINK_STATUS \ | 948 | #define NIG_STATUS_XGXS0_LINK_STATUS \ |
945 | NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS | 949 | NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS |
946 | #define NIG_XGXS0_LINK_STATUS_SIZE \ | 950 | #define NIG_STATUS_XGXS0_LINK_STATUS_SIZE \ |
947 | NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE | 951 | NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE |
948 | #define NIG_SERDES0_LINK_STATUS \ | 952 | #define NIG_STATUS_SERDES0_LINK_STATUS \ |
949 | NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS | 953 | NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS |
950 | #define NIG_MASK_MI_INT \ | 954 | #define NIG_MASK_MI_INT \ |
951 | NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT | 955 | NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT |
diff --git a/drivers/net/bnx2x_fw_defs.h b/drivers/net/bnx2x_fw_defs.h index 62a6eb81025a..3b968904ca65 100644 --- a/drivers/net/bnx2x_fw_defs.h +++ b/drivers/net/bnx2x_fw_defs.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* bnx2x_fw_defs.h: Broadcom Everest network driver. | 1 | /* bnx2x_fw_defs.h: Broadcom Everest network driver. |
2 | * | 2 | * |
3 | * Copyright (c) 2007 Broadcom Corporation | 3 | * Copyright (c) 2007-2008 Broadcom Corporation |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x_hsi.h index 6fd959c34d1f..b21075ccb52e 100644 --- a/drivers/net/bnx2x_hsi.h +++ b/drivers/net/bnx2x_hsi.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* bnx2x_hsi.h: Broadcom Everest network driver. | 1 | /* bnx2x_hsi.h: Broadcom Everest network driver. |
2 | * | 2 | * |
3 | * Copyright (c) 2007 Broadcom Corporation | 3 | * Copyright (c) 2007-2008 Broadcom Corporation |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
@@ -8,169 +8,9 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | 10 | ||
11 | #define FUNC_0 0 | 11 | #define PORT_0 0 |
12 | #define FUNC_1 1 | 12 | #define PORT_1 1 |
13 | #define FUNC_MAX 2 | 13 | #define PORT_MAX 2 |
14 | |||
15 | |||
16 | /* This value (in milliseconds) determines the frequency of the driver | ||
17 | * issuing the PULSE message code. The firmware monitors this periodic | ||
18 | * pulse to determine when to switch to an OS-absent mode. */ | ||
19 | #define DRV_PULSE_PERIOD_MS 250 | ||
20 | |||
21 | /* This value (in milliseconds) determines how long the driver should | ||
22 | * wait for an acknowledgement from the firmware before timing out. Once | ||
23 | * the firmware has timed out, the driver will assume there is no firmware | ||
24 | * running and there won't be any firmware-driver synchronization during a | ||
25 | * driver reset. */ | ||
26 | #define FW_ACK_TIME_OUT_MS 5000 | ||
27 | |||
28 | #define FW_ACK_POLL_TIME_MS 1 | ||
29 | |||
30 | #define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS) | ||
31 | |||
32 | /* LED Blink rate that will achieve ~15.9Hz */ | ||
33 | #define LED_BLINK_RATE_VAL 480 | ||
34 | |||
35 | /**************************************************************************** | ||
36 | * Driver <-> FW Mailbox * | ||
37 | ****************************************************************************/ | ||
38 | struct drv_fw_mb { | ||
39 | u32 drv_mb_header; | ||
40 | #define DRV_MSG_CODE_MASK 0xffff0000 | ||
41 | #define DRV_MSG_CODE_LOAD_REQ 0x10000000 | ||
42 | #define DRV_MSG_CODE_LOAD_DONE 0x11000000 | ||
43 | #define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN 0x20000000 | ||
44 | #define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS 0x20010000 | ||
45 | #define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP 0x20020000 | ||
46 | #define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 | ||
47 | #define DRV_MSG_CODE_DIAG_ENTER_REQ 0x50000000 | ||
48 | #define DRV_MSG_CODE_DIAG_EXIT_REQ 0x60000000 | ||
49 | #define DRV_MSG_CODE_VALIDATE_KEY 0x70000000 | ||
50 | #define DRV_MSG_CODE_GET_CURR_KEY 0x80000000 | ||
51 | #define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000 | ||
52 | #define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000 | ||
53 | #define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000 | ||
54 | |||
55 | #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff | ||
56 | |||
57 | u32 drv_mb_param; | ||
58 | |||
59 | u32 fw_mb_header; | ||
60 | #define FW_MSG_CODE_MASK 0xffff0000 | ||
61 | #define FW_MSG_CODE_DRV_LOAD_COMMON 0x11000000 | ||
62 | #define FW_MSG_CODE_DRV_LOAD_PORT 0x12000000 | ||
63 | #define FW_MSG_CODE_DRV_LOAD_REFUSED 0x13000000 | ||
64 | #define FW_MSG_CODE_DRV_LOAD_DONE 0x14000000 | ||
65 | #define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x21000000 | ||
66 | #define FW_MSG_CODE_DRV_UNLOAD_PORT 0x22000000 | ||
67 | #define FW_MSG_CODE_DRV_UNLOAD_DONE 0x23000000 | ||
68 | #define FW_MSG_CODE_DIAG_ENTER_DONE 0x50000000 | ||
69 | #define FW_MSG_CODE_DIAG_REFUSE 0x51000000 | ||
70 | #define FW_MSG_CODE_VALIDATE_KEY_SUCCESS 0x70000000 | ||
71 | #define FW_MSG_CODE_VALIDATE_KEY_FAILURE 0x71000000 | ||
72 | #define FW_MSG_CODE_GET_KEY_DONE 0x80000000 | ||
73 | #define FW_MSG_CODE_NO_KEY 0x8f000000 | ||
74 | #define FW_MSG_CODE_LIC_INFO_NOT_READY 0x8f800000 | ||
75 | #define FW_MSG_CODE_L2B_PRAM_LOADED 0x90000000 | ||
76 | #define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE 0x91000000 | ||
77 | #define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE 0x92000000 | ||
78 | #define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE 0x93000000 | ||
79 | #define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE 0x94000000 | ||
80 | |||
81 | #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff | ||
82 | |||
83 | u32 fw_mb_param; | ||
84 | |||
85 | u32 link_status; | ||
86 | /* Driver should update this field on any link change event */ | ||
87 | |||
88 | #define LINK_STATUS_LINK_FLAG_MASK 0x00000001 | ||
89 | #define LINK_STATUS_LINK_UP 0x00000001 | ||
90 | #define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E | ||
91 | #define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE (0<<1) | ||
92 | #define LINK_STATUS_SPEED_AND_DUPLEX_10THD (1<<1) | ||
93 | #define LINK_STATUS_SPEED_AND_DUPLEX_10TFD (2<<1) | ||
94 | #define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD (3<<1) | ||
95 | #define LINK_STATUS_SPEED_AND_DUPLEX_100T4 (4<<1) | ||
96 | #define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD (5<<1) | ||
97 | #define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (6<<1) | ||
98 | #define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (7<<1) | ||
99 | #define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD (7<<1) | ||
100 | #define LINK_STATUS_SPEED_AND_DUPLEX_2500THD (8<<1) | ||
101 | #define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD (9<<1) | ||
102 | #define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD (9<<1) | ||
103 | #define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD (10<<1) | ||
104 | #define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD (10<<1) | ||
105 | #define LINK_STATUS_SPEED_AND_DUPLEX_12GTFD (11<<1) | ||
106 | #define LINK_STATUS_SPEED_AND_DUPLEX_12GXFD (11<<1) | ||
107 | #define LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD (12<<1) | ||
108 | #define LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD (12<<1) | ||
109 | #define LINK_STATUS_SPEED_AND_DUPLEX_13GTFD (13<<1) | ||
110 | #define LINK_STATUS_SPEED_AND_DUPLEX_13GXFD (13<<1) | ||
111 | #define LINK_STATUS_SPEED_AND_DUPLEX_15GTFD (14<<1) | ||
112 | #define LINK_STATUS_SPEED_AND_DUPLEX_15GXFD (14<<1) | ||
113 | #define LINK_STATUS_SPEED_AND_DUPLEX_16GTFD (15<<1) | ||
114 | #define LINK_STATUS_SPEED_AND_DUPLEX_16GXFD (15<<1) | ||
115 | |||
116 | #define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK 0x00000020 | ||
117 | #define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 | ||
118 | |||
119 | #define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 | ||
120 | #define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK 0x00000080 | ||
121 | #define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 | ||
122 | |||
123 | #define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 | ||
124 | #define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 | ||
125 | #define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE 0x00000800 | ||
126 | #define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE 0x00001000 | ||
127 | #define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE 0x00002000 | ||
128 | #define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE 0x00004000 | ||
129 | #define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE 0x00008000 | ||
130 | |||
131 | #define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK 0x00010000 | ||
132 | #define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00010000 | ||
133 | |||
134 | #define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK 0x00020000 | ||
135 | #define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00020000 | ||
136 | |||
137 | #define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000 | ||
138 | #define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0<<18) | ||
139 | #define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1<<18) | ||
140 | #define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2<<18) | ||
141 | #define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3<<18) | ||
142 | |||
143 | #define LINK_STATUS_SERDES_LINK 0x00100000 | ||
144 | |||
145 | #define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE 0x00200000 | ||
146 | #define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE 0x00400000 | ||
147 | #define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE 0x00800000 | ||
148 | #define LINK_STATUS_LINK_PARTNER_12GXFD_CAPABLE 0x01000000 | ||
149 | #define LINK_STATUS_LINK_PARTNER_12_5GXFD_CAPABLE 0x02000000 | ||
150 | #define LINK_STATUS_LINK_PARTNER_13GXFD_CAPABLE 0x04000000 | ||
151 | #define LINK_STATUS_LINK_PARTNER_15GXFD_CAPABLE 0x08000000 | ||
152 | #define LINK_STATUS_LINK_PARTNER_16GXFD_CAPABLE 0x10000000 | ||
153 | |||
154 | u32 drv_pulse_mb; | ||
155 | #define DRV_PULSE_SEQ_MASK 0x00007fff | ||
156 | #define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 | ||
157 | /* The system time is in the format of | ||
158 | * (year-2001)*12*32 + month*32 + day. */ | ||
159 | #define DRV_PULSE_ALWAYS_ALIVE 0x00008000 | ||
160 | /* Indicate to the firmware not to go into the | ||
161 | * OS-absent when it is not getting driver pulse. | ||
162 | * This is used for debugging as well for PXE(MBA). */ | ||
163 | |||
164 | u32 mcp_pulse_mb; | ||
165 | #define MCP_PULSE_SEQ_MASK 0x00007fff | ||
166 | #define MCP_PULSE_ALWAYS_ALIVE 0x00008000 | ||
167 | /* Indicates to the driver not to assert due to lack | ||
168 | * of MCP response */ | ||
169 | #define MCP_EVENT_MASK 0xffff0000 | ||
170 | #define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 | ||
171 | |||
172 | }; | ||
173 | |||
174 | 14 | ||
175 | /**************************************************************************** | 15 | /**************************************************************************** |
176 | * Shared HW configuration * | 16 | * Shared HW configuration * |
@@ -249,7 +89,7 @@ struct shared_hw_cfg { /* NVRAM Offset */ | |||
249 | #define SHARED_HW_CFG_SMBUS_TIMING_100KHZ 0x00000000 | 89 | #define SHARED_HW_CFG_SMBUS_TIMING_100KHZ 0x00000000 |
250 | #define SHARED_HW_CFG_SMBUS_TIMING_400KHZ 0x00001000 | 90 | #define SHARED_HW_CFG_SMBUS_TIMING_400KHZ 0x00001000 |
251 | 91 | ||
252 | #define SHARED_HW_CFG_HIDE_FUNC1 0x00002000 | 92 | #define SHARED_HW_CFG_HIDE_PORT1 0x00002000 |
253 | 93 | ||
254 | u32 power_dissipated; /* 0x11c */ | 94 | u32 power_dissipated; /* 0x11c */ |
255 | #define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000 | 95 | #define SHARED_HW_CFG_POWER_DIS_CMN_MASK 0xff000000 |
@@ -290,6 +130,8 @@ struct shared_hw_cfg { /* NVRAM Offset */ | |||
290 | #define SHARED_HW_CFG_BOARD_TYPE_BCM957710T1015G 0x00000006 | 130 | #define SHARED_HW_CFG_BOARD_TYPE_BCM957710T1015G 0x00000006 |
291 | #define SHARED_HW_CFG_BOARD_TYPE_BCM957710A1020G 0x00000007 | 131 | #define SHARED_HW_CFG_BOARD_TYPE_BCM957710A1020G 0x00000007 |
292 | #define SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G 0x00000008 | 132 | #define SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G 0x00000008 |
133 | #define SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G 0x00000009 | ||
134 | #define SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G 0x0000000a | ||
293 | 135 | ||
294 | #define SHARED_HW_CFG_BOARD_VER_MASK 0xffff0000 | 136 | #define SHARED_HW_CFG_BOARD_VER_MASK 0xffff0000 |
295 | #define SHARED_HW_CFG_BOARD_VER_SHIFT 16 | 137 | #define SHARED_HW_CFG_BOARD_VER_SHIFT 16 |
@@ -304,13 +146,12 @@ struct shared_hw_cfg { /* NVRAM Offset */ | |||
304 | 146 | ||
305 | }; | 147 | }; |
306 | 148 | ||
149 | |||
307 | /**************************************************************************** | 150 | /**************************************************************************** |
308 | * Port HW configuration * | 151 | * Port HW configuration * |
309 | ****************************************************************************/ | 152 | ****************************************************************************/ |
310 | struct port_hw_cfg { /* function 0: 0x12c-0x2bb, function 1: 0x2bc-0x44b */ | 153 | struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ |
311 | 154 | ||
312 | /* Fields below are port specific (in anticipation of dual port | ||
313 | devices */ | ||
314 | u32 pci_id; | 155 | u32 pci_id; |
315 | #define PORT_HW_CFG_PCI_VENDOR_ID_MASK 0xffff0000 | 156 | #define PORT_HW_CFG_PCI_VENDOR_ID_MASK 0xffff0000 |
316 | #define PORT_HW_CFG_PCI_DEVICE_ID_MASK 0x0000ffff | 157 | #define PORT_HW_CFG_PCI_DEVICE_ID_MASK 0x0000ffff |
@@ -420,6 +261,8 @@ struct port_hw_cfg { /* function 0: 0x12c-0x2bb, function 1: 0x2bc-0x44b */ | |||
420 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706 0x00000500 | 261 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706 0x00000500 |
421 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8276 0x00000600 | 262 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8276 0x00000600 |
422 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481 0x00000700 | 263 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481 0x00000700 |
264 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800 | ||
265 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00 | ||
423 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 | 266 | #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 |
424 | 267 | ||
425 | #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK 0x000000ff | 268 | #define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK 0x000000ff |
@@ -462,11 +305,13 @@ struct port_hw_cfg { /* function 0: 0x12c-0x2bb, function 1: 0x2bc-0x44b */ | |||
462 | 305 | ||
463 | }; | 306 | }; |
464 | 307 | ||
308 | |||
465 | /**************************************************************************** | 309 | /**************************************************************************** |
466 | * Shared Feature configuration * | 310 | * Shared Feature configuration * |
467 | ****************************************************************************/ | 311 | ****************************************************************************/ |
468 | struct shared_feat_cfg { /* NVRAM Offset */ | 312 | struct shared_feat_cfg { /* NVRAM Offset */ |
469 | u32 bmc_common; /* 0x450 */ | 313 | |
314 | u32 config; /* 0x450 */ | ||
470 | #define SHARED_FEATURE_BMC_ECHO_MODE_EN 0x00000001 | 315 | #define SHARED_FEATURE_BMC_ECHO_MODE_EN 0x00000001 |
471 | 316 | ||
472 | }; | 317 | }; |
@@ -475,7 +320,8 @@ struct shared_feat_cfg { /* NVRAM Offset */ | |||
475 | /**************************************************************************** | 320 | /**************************************************************************** |
476 | * Port Feature configuration * | 321 | * Port Feature configuration * |
477 | ****************************************************************************/ | 322 | ****************************************************************************/ |
478 | struct port_feat_cfg { /* function 0: 0x454-0x4c7, function 1: 0x4c8-0x53b */ | 323 | struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */ |
324 | |||
479 | u32 config; | 325 | u32 config; |
480 | #define PORT_FEATURE_BAR1_SIZE_MASK 0x0000000f | 326 | #define PORT_FEATURE_BAR1_SIZE_MASK 0x0000000f |
481 | #define PORT_FEATURE_BAR1_SIZE_SHIFT 0 | 327 | #define PORT_FEATURE_BAR1_SIZE_SHIFT 0 |
@@ -609,8 +455,7 @@ struct port_feat_cfg { /* function 0: 0x454-0x4c7, function 1: 0x4c8-0x53b */ | |||
609 | #define PORT_FEATURE_SMBUS_ADDR_MASK 0x000000fe | 455 | #define PORT_FEATURE_SMBUS_ADDR_MASK 0x000000fe |
610 | #define PORT_FEATURE_SMBUS_ADDR_SHIFT 1 | 456 | #define PORT_FEATURE_SMBUS_ADDR_SHIFT 1 |
611 | 457 | ||
612 | u32 iscsib_boot_cfg; | 458 | u32 reserved1; |
613 | #define PORT_FEATURE_ISCSIB_SKIP_TARGET_BOOT 0x00000001 | ||
614 | 459 | ||
615 | u32 link_config; /* Used as HW defaults for the driver */ | 460 | u32 link_config; /* Used as HW defaults for the driver */ |
616 | #define PORT_FEATURE_CONNECTED_SWITCH_MASK 0x03000000 | 461 | #define PORT_FEATURE_CONNECTED_SWITCH_MASK 0x03000000 |
@@ -657,20 +502,201 @@ struct port_feat_cfg { /* function 0: 0x454-0x4c7, function 1: 0x4c8-0x53b */ | |||
657 | }; | 502 | }; |
658 | 503 | ||
659 | 504 | ||
505 | /***************************************************************************** | ||
506 | * Device Information * | ||
507 | *****************************************************************************/ | ||
508 | struct dev_info { /* size */ | ||
509 | |||
510 | u32 bc_rev; /* 8 bits each: major, minor, build */ /* 4 */ | ||
511 | |||
512 | struct shared_hw_cfg shared_hw_config; /* 40 */ | ||
513 | |||
514 | struct port_hw_cfg port_hw_config[PORT_MAX]; /* 400*2=800 */ | ||
515 | |||
516 | struct shared_feat_cfg shared_feature_config; /* 4 */ | ||
517 | |||
518 | struct port_feat_cfg port_feature_config[PORT_MAX]; /* 116*2=232 */ | ||
519 | |||
520 | }; | ||
521 | |||
522 | |||
523 | #define FUNC_0 0 | ||
524 | #define FUNC_1 1 | ||
525 | #define E1_FUNC_MAX 2 | ||
526 | #define FUNC_MAX E1_FUNC_MAX | ||
527 | |||
528 | |||
529 | /* This value (in milliseconds) determines the frequency of the driver | ||
530 | * issuing the PULSE message code. The firmware monitors this periodic | ||
531 | * pulse to determine when to switch to an OS-absent mode. */ | ||
532 | #define DRV_PULSE_PERIOD_MS 250 | ||
533 | |||
534 | /* This value (in milliseconds) determines how long the driver should | ||
535 | * wait for an acknowledgement from the firmware before timing out. Once | ||
536 | * the firmware has timed out, the driver will assume there is no firmware | ||
537 | * running and there won't be any firmware-driver synchronization during a | ||
538 | * driver reset. */ | ||
539 | #define FW_ACK_TIME_OUT_MS 5000 | ||
540 | |||
541 | #define FW_ACK_POLL_TIME_MS 1 | ||
542 | |||
543 | #define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS) | ||
544 | |||
545 | /* LED Blink rate that will achieve ~15.9Hz */ | ||
546 | #define LED_BLINK_RATE_VAL 480 | ||
547 | |||
660 | /**************************************************************************** | 548 | /**************************************************************************** |
661 | * Device Information * | 549 | * Driver <-> FW Mailbox * |
662 | ****************************************************************************/ | 550 | ****************************************************************************/ |
663 | struct dev_info { /* size */ | 551 | struct drv_port_mb { |
552 | |||
553 | u32 link_status; | ||
554 | /* Driver should update this field on any link change event */ | ||
555 | |||
556 | #define LINK_STATUS_LINK_FLAG_MASK 0x00000001 | ||
557 | #define LINK_STATUS_LINK_UP 0x00000001 | ||
558 | #define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E | ||
559 | #define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE (0<<1) | ||
560 | #define LINK_STATUS_SPEED_AND_DUPLEX_10THD (1<<1) | ||
561 | #define LINK_STATUS_SPEED_AND_DUPLEX_10TFD (2<<1) | ||
562 | #define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD (3<<1) | ||
563 | #define LINK_STATUS_SPEED_AND_DUPLEX_100T4 (4<<1) | ||
564 | #define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD (5<<1) | ||
565 | #define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (6<<1) | ||
566 | #define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (7<<1) | ||
567 | #define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD (7<<1) | ||
568 | #define LINK_STATUS_SPEED_AND_DUPLEX_2500THD (8<<1) | ||
569 | #define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD (9<<1) | ||
570 | #define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD (9<<1) | ||
571 | #define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD (10<<1) | ||
572 | #define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD (10<<1) | ||
573 | #define LINK_STATUS_SPEED_AND_DUPLEX_12GTFD (11<<1) | ||
574 | #define LINK_STATUS_SPEED_AND_DUPLEX_12GXFD (11<<1) | ||
575 | #define LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD (12<<1) | ||
576 | #define LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD (12<<1) | ||
577 | #define LINK_STATUS_SPEED_AND_DUPLEX_13GTFD (13<<1) | ||
578 | #define LINK_STATUS_SPEED_AND_DUPLEX_13GXFD (13<<1) | ||
579 | #define LINK_STATUS_SPEED_AND_DUPLEX_15GTFD (14<<1) | ||
580 | #define LINK_STATUS_SPEED_AND_DUPLEX_15GXFD (14<<1) | ||
581 | #define LINK_STATUS_SPEED_AND_DUPLEX_16GTFD (15<<1) | ||
582 | #define LINK_STATUS_SPEED_AND_DUPLEX_16GXFD (15<<1) | ||
583 | |||
584 | #define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK 0x00000020 | ||
585 | #define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 | ||
586 | |||
587 | #define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 | ||
588 | #define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK 0x00000080 | ||
589 | #define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 | ||
590 | |||
591 | #define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 | ||
592 | #define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 | ||
593 | #define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE 0x00000800 | ||
594 | #define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE 0x00001000 | ||
595 | #define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE 0x00002000 | ||
596 | #define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE 0x00004000 | ||
597 | #define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE 0x00008000 | ||
598 | |||
599 | #define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK 0x00010000 | ||
600 | #define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00010000 | ||
601 | |||
602 | #define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK 0x00020000 | ||
603 | #define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00020000 | ||
604 | |||
605 | #define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000 | ||
606 | #define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0<<18) | ||
607 | #define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1<<18) | ||
608 | #define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2<<18) | ||
609 | #define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3<<18) | ||
610 | |||
611 | #define LINK_STATUS_SERDES_LINK 0x00100000 | ||
612 | |||
613 | #define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE 0x00200000 | ||
614 | #define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE 0x00400000 | ||
615 | #define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE 0x00800000 | ||
616 | #define LINK_STATUS_LINK_PARTNER_12GXFD_CAPABLE 0x01000000 | ||
617 | #define LINK_STATUS_LINK_PARTNER_12_5GXFD_CAPABLE 0x02000000 | ||
618 | #define LINK_STATUS_LINK_PARTNER_13GXFD_CAPABLE 0x04000000 | ||
619 | #define LINK_STATUS_LINK_PARTNER_15GXFD_CAPABLE 0x08000000 | ||
620 | #define LINK_STATUS_LINK_PARTNER_16GXFD_CAPABLE 0x10000000 | ||
664 | 621 | ||
665 | u32 bc_rev; /* 8 bits each: major, minor, build */ /* 4 */ | 622 | u32 reserved[3]; |
666 | 623 | ||
667 | struct shared_hw_cfg shared_hw_config; /* 40 */ | 624 | }; |
625 | |||
626 | |||
627 | struct drv_func_mb { | ||
628 | |||
629 | u32 drv_mb_header; | ||
630 | #define DRV_MSG_CODE_MASK 0xffff0000 | ||
631 | #define DRV_MSG_CODE_LOAD_REQ 0x10000000 | ||
632 | #define DRV_MSG_CODE_LOAD_DONE 0x11000000 | ||
633 | #define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN 0x20000000 | ||
634 | #define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS 0x20010000 | ||
635 | #define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP 0x20020000 | ||
636 | #define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 | ||
637 | #define DRV_MSG_CODE_DIAG_ENTER_REQ 0x50000000 | ||
638 | #define DRV_MSG_CODE_DIAG_EXIT_REQ 0x60000000 | ||
639 | #define DRV_MSG_CODE_VALIDATE_KEY 0x70000000 | ||
640 | #define DRV_MSG_CODE_GET_CURR_KEY 0x80000000 | ||
641 | #define DRV_MSG_CODE_GET_UPGRADE_KEY 0x81000000 | ||
642 | #define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000 | ||
643 | #define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000 | ||
644 | |||
645 | #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff | ||
646 | |||
647 | u32 drv_mb_param; | ||
648 | |||
649 | u32 fw_mb_header; | ||
650 | #define FW_MSG_CODE_MASK 0xffff0000 | ||
651 | #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000 | ||
652 | #define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 | ||
653 | #define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 | ||
654 | #define FW_MSG_CODE_DRV_LOAD_REFUSED 0x10200000 | ||
655 | #define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 | ||
656 | #define FW_MSG_CODE_DRV_UNLOAD_COMMON 0x20100000 | ||
657 | #define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20110000 | ||
658 | #define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20120000 | ||
659 | #define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000 | ||
660 | #define FW_MSG_CODE_DIAG_ENTER_DONE 0x50100000 | ||
661 | #define FW_MSG_CODE_DIAG_REFUSE 0x50200000 | ||
662 | #define FW_MSG_CODE_DIAG_EXIT_DONE 0x60100000 | ||
663 | #define FW_MSG_CODE_VALIDATE_KEY_SUCCESS 0x70100000 | ||
664 | #define FW_MSG_CODE_VALIDATE_KEY_FAILURE 0x70200000 | ||
665 | #define FW_MSG_CODE_GET_KEY_DONE 0x80100000 | ||
666 | #define FW_MSG_CODE_NO_KEY 0x80f00000 | ||
667 | #define FW_MSG_CODE_LIC_INFO_NOT_READY 0x80f80000 | ||
668 | #define FW_MSG_CODE_L2B_PRAM_LOADED 0x90100000 | ||
669 | #define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE 0x90210000 | ||
670 | #define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE 0x90220000 | ||
671 | #define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE 0x90230000 | ||
672 | #define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE 0x90240000 | ||
673 | |||
674 | #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff | ||
675 | |||
676 | u32 fw_mb_param; | ||
677 | |||
678 | u32 drv_pulse_mb; | ||
679 | #define DRV_PULSE_SEQ_MASK 0x00007fff | ||
680 | #define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 | ||
681 | /* The system time is in the format of | ||
682 | * (year-2001)*12*32 + month*32 + day. */ | ||
683 | #define DRV_PULSE_ALWAYS_ALIVE 0x00008000 | ||
684 | /* Indicate to the firmware not to go into the | ||
685 | * OS-absent when it is not getting driver pulse. | ||
686 | * This is used for debugging as well for PXE(MBA). */ | ||
668 | 687 | ||
669 | struct port_hw_cfg port_hw_config[FUNC_MAX]; /* 400*2=800 */ | 688 | u32 mcp_pulse_mb; |
689 | #define MCP_PULSE_SEQ_MASK 0x00007fff | ||
690 | #define MCP_PULSE_ALWAYS_ALIVE 0x00008000 | ||
691 | /* Indicates to the driver not to assert due to lack | ||
692 | * of MCP response */ | ||
693 | #define MCP_EVENT_MASK 0xffff0000 | ||
694 | #define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 | ||
670 | 695 | ||
671 | struct shared_feat_cfg shared_feature_config; /* 4 */ | 696 | u32 iscsi_boot_signature; |
697 | u32 iscsi_boot_block_offset; | ||
672 | 698 | ||
673 | struct port_feat_cfg port_feature_config[FUNC_MAX];/* 116*2=232 */ | 699 | u32 reserved[3]; |
674 | 700 | ||
675 | }; | 701 | }; |
676 | 702 | ||
@@ -678,9 +704,8 @@ struct dev_info { /* size */ | |||
678 | /**************************************************************************** | 704 | /**************************************************************************** |
679 | * Management firmware state * | 705 | * Management firmware state * |
680 | ****************************************************************************/ | 706 | ****************************************************************************/ |
681 | /* Allocate 320 bytes for management firmware: still not known exactly | 707 | /* Allocate 440 bytes for management firmware */ |
682 | * how much IMD needs. */ | 708 | #define MGMTFW_STATE_WORD_SIZE 110 |
683 | #define MGMTFW_STATE_WORD_SIZE 80 | ||
684 | 709 | ||
685 | struct mgmtfw_state { | 710 | struct mgmtfw_state { |
686 | u32 opaque[MGMTFW_STATE_WORD_SIZE]; | 711 | u32 opaque[MGMTFW_STATE_WORD_SIZE]; |
@@ -691,31 +716,40 @@ struct mgmtfw_state { | |||
691 | * Shared Memory Region * | 716 | * Shared Memory Region * |
692 | ****************************************************************************/ | 717 | ****************************************************************************/ |
693 | struct shmem_region { /* SharedMem Offset (size) */ | 718 | struct shmem_region { /* SharedMem Offset (size) */ |
694 | u32 validity_map[FUNC_MAX]; /* 0x0 (4 * 2 = 0x8) */ | 719 | |
695 | #define SHR_MEM_VALIDITY_PCI_CFG 0x00000001 | 720 | u32 validity_map[PORT_MAX]; /* 0x0 (4*2 = 0x8) */ |
696 | #define SHR_MEM_VALIDITY_MB 0x00000002 | 721 | #define SHR_MEM_FORMAT_REV_ID ('A'<<24) |
697 | #define SHR_MEM_VALIDITY_DEV_INFO 0x00000004 | 722 | #define SHR_MEM_FORMAT_REV_MASK 0xff000000 |
723 | /* validity bits */ | ||
724 | #define SHR_MEM_VALIDITY_PCI_CFG 0x00100000 | ||
725 | #define SHR_MEM_VALIDITY_MB 0x00200000 | ||
726 | #define SHR_MEM_VALIDITY_DEV_INFO 0x00400000 | ||
727 | #define SHR_MEM_VALIDITY_RESERVED 0x00000007 | ||
698 | /* One licensing bit should be set */ | 728 | /* One licensing bit should be set */ |
699 | #define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038 | 729 | #define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038 |
700 | #define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008 | 730 | #define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008 |
701 | #define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010 | 731 | #define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010 |
702 | #define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020 | 732 | #define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020 |
733 | /* Active MFW */ | ||
734 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000 | ||
735 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_IPMI 0x00000040 | ||
736 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_UMP 0x00000080 | ||
737 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_NCSI 0x000000c0 | ||
738 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_NONE 0x000001c0 | ||
739 | #define SHR_MEM_VALIDITY_ACTIVE_MFW_MASK 0x000001c0 | ||
703 | 740 | ||
704 | struct drv_fw_mb drv_fw_mb[FUNC_MAX]; /* 0x8 (28 * 2 = 0x38) */ | 741 | struct dev_info dev_info; /* 0x8 (0x438) */ |
705 | |||
706 | struct dev_info dev_info; /* 0x40 (0x438) */ | ||
707 | 742 | ||
708 | #ifdef _LICENSE_H | 743 | u8 reserved[52*PORT_MAX]; |
709 | license_key_t drv_lic_key[FUNC_MAX]; /* 0x478 (52 * 2 = 0x68) */ | ||
710 | #else /* Linux! */ | ||
711 | u8 reserved[52*FUNC_MAX]; | ||
712 | #endif | ||
713 | 744 | ||
714 | /* FW information (for internal FW use) */ | 745 | /* FW information (for internal FW use) */ |
715 | u32 fw_info_fio_offset; /* 0x4e0 (0x4) */ | 746 | u32 fw_info_fio_offset; /* 0x4a8 (0x4) */ |
716 | struct mgmtfw_state mgmtfw_state; /* 0x4e4 (0x140) */ | 747 | struct mgmtfw_state mgmtfw_state; /* 0x4ac (0x1b8) */ |
748 | |||
749 | struct drv_port_mb port_mb[PORT_MAX]; /* 0x664 (16*2=0x20) */ | ||
750 | struct drv_func_mb func_mb[FUNC_MAX]; /* 0x684 (44*2=0x58) */ | ||
717 | 751 | ||
718 | }; /* 0x624 */ | 752 | }; /* 0x6dc */ |
719 | 753 | ||
720 | 754 | ||
721 | #define BCM_5710_FW_MAJOR_VERSION 4 | 755 | #define BCM_5710_FW_MAJOR_VERSION 4 |
diff --git a/drivers/net/bnx2x_init.h b/drivers/net/bnx2x_init.h index 04f93bff2ef4..dcaecc53bdb1 100644 --- a/drivers/net/bnx2x_init.h +++ b/drivers/net/bnx2x_init.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* bnx2x_init.h: Broadcom Everest network driver. | 1 | /* bnx2x_init.h: Broadcom Everest network driver. |
2 | * | 2 | * |
3 | * Copyright (c) 2007 Broadcom Corporation | 3 | * Copyright (c) 2007-2008 Broadcom Corporation |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
@@ -409,7 +409,7 @@ static void bnx2x_init_pxp(struct bnx2x *bp) | |||
409 | 409 | ||
410 | pci_read_config_word(bp->pdev, | 410 | pci_read_config_word(bp->pdev, |
411 | bp->pcie_cap + PCI_EXP_DEVCTL, (u16 *)&val); | 411 | bp->pcie_cap + PCI_EXP_DEVCTL, (u16 *)&val); |
412 | DP(NETIF_MSG_HW, "read 0x%x from devctl\n", val); | 412 | DP(NETIF_MSG_HW, "read 0x%x from devctl\n", (u16)val); |
413 | w_order = ((val & PCI_EXP_DEVCTL_PAYLOAD) >> 5); | 413 | w_order = ((val & PCI_EXP_DEVCTL_PAYLOAD) >> 5); |
414 | r_order = ((val & PCI_EXP_DEVCTL_READRQ) >> 12); | 414 | r_order = ((val & PCI_EXP_DEVCTL_READRQ) >> 12); |
415 | 415 | ||
@@ -472,10 +472,14 @@ static void bnx2x_init_pxp(struct bnx2x *bp) | |||
472 | REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val); | 472 | REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val); |
473 | 473 | ||
474 | REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order); | 474 | REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order); |
475 | REG_WR(bp, PXP2_REG_RQ_WR_MBS0 + 8, w_order); | 475 | REG_WR(bp, PXP2_REG_RQ_WR_MBS1, w_order); |
476 | REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order); | 476 | REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order); |
477 | REG_WR(bp, PXP2_REG_RQ_RD_MBS0 + 8, r_order); | 477 | REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order); |
478 | 478 | ||
479 | if (r_order == MAX_RD_ORD) | ||
480 | REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00); | ||
481 | |||
482 | REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order)); | ||
479 | REG_WR(bp, PXP2_REG_WR_DMAE_TH, (128 << w_order)/16); | 483 | REG_WR(bp, PXP2_REG_WR_DMAE_TH, (128 << w_order)/16); |
480 | } | 484 | } |
481 | 485 | ||
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h index 86055297ab02..5a1aa0b55044 100644 --- a/drivers/net/bnx2x_reg.h +++ b/drivers/net/bnx2x_reg.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* bnx2x_reg.h: Broadcom Everest network driver. | 1 | /* bnx2x_reg.h: Broadcom Everest network driver. |
2 | * | 2 | * |
3 | * Copyright (c) 2007 Broadcom Corporation | 3 | * Copyright (c) 2007-2008 Broadcom Corporation |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
@@ -24,6 +24,8 @@ | |||
24 | #define BRB1_REG_BRB1_INT_STS 0x6011c | 24 | #define BRB1_REG_BRB1_INT_STS 0x6011c |
25 | /* [RW 4] Parity mask register #0 read/write */ | 25 | /* [RW 4] Parity mask register #0 read/write */ |
26 | #define BRB1_REG_BRB1_PRTY_MASK 0x60138 | 26 | #define BRB1_REG_BRB1_PRTY_MASK 0x60138 |
27 | /* [R 4] Parity register #0 read */ | ||
28 | #define BRB1_REG_BRB1_PRTY_STS 0x6012c | ||
27 | /* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At | 29 | /* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At |
28 | address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address | 30 | address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address |
29 | BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. */ | 31 | BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. */ |
@@ -281,6 +283,8 @@ | |||
281 | #define CDU_REG_CDU_INT_STS 0x101030 | 283 | #define CDU_REG_CDU_INT_STS 0x101030 |
282 | /* [RW 5] Parity mask register #0 read/write */ | 284 | /* [RW 5] Parity mask register #0 read/write */ |
283 | #define CDU_REG_CDU_PRTY_MASK 0x10104c | 285 | #define CDU_REG_CDU_PRTY_MASK 0x10104c |
286 | /* [R 5] Parity register #0 read */ | ||
287 | #define CDU_REG_CDU_PRTY_STS 0x101040 | ||
284 | /* [RC 32] logging of error data in case of a CDU load error: | 288 | /* [RC 32] logging of error data in case of a CDU load error: |
285 | {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error; | 289 | {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error; |
286 | ype_error; ctual_active; ctual_compressed_context}; */ | 290 | ype_error; ctual_active; ctual_compressed_context}; */ |
@@ -308,6 +312,8 @@ | |||
308 | #define CFC_REG_CFC_INT_STS_CLR 0x104100 | 312 | #define CFC_REG_CFC_INT_STS_CLR 0x104100 |
309 | /* [RW 4] Parity mask register #0 read/write */ | 313 | /* [RW 4] Parity mask register #0 read/write */ |
310 | #define CFC_REG_CFC_PRTY_MASK 0x104118 | 314 | #define CFC_REG_CFC_PRTY_MASK 0x104118 |
315 | /* [R 4] Parity register #0 read */ | ||
316 | #define CFC_REG_CFC_PRTY_STS 0x10410c | ||
311 | /* [RW 21] CID cam access (21:1 - Data; alid - 0) */ | 317 | /* [RW 21] CID cam access (21:1 - Data; alid - 0) */ |
312 | #define CFC_REG_CID_CAM 0x104800 | 318 | #define CFC_REG_CID_CAM 0x104800 |
313 | #define CFC_REG_CONTROL0 0x104028 | 319 | #define CFC_REG_CONTROL0 0x104028 |
@@ -354,6 +360,8 @@ | |||
354 | #define CSDM_REG_CSDM_INT_MASK_1 0xc22ac | 360 | #define CSDM_REG_CSDM_INT_MASK_1 0xc22ac |
355 | /* [RW 11] Parity mask register #0 read/write */ | 361 | /* [RW 11] Parity mask register #0 read/write */ |
356 | #define CSDM_REG_CSDM_PRTY_MASK 0xc22bc | 362 | #define CSDM_REG_CSDM_PRTY_MASK 0xc22bc |
363 | /* [R 11] Parity register #0 read */ | ||
364 | #define CSDM_REG_CSDM_PRTY_STS 0xc22b0 | ||
357 | #define CSDM_REG_ENABLE_IN1 0xc2238 | 365 | #define CSDM_REG_ENABLE_IN1 0xc2238 |
358 | #define CSDM_REG_ENABLE_IN2 0xc223c | 366 | #define CSDM_REG_ENABLE_IN2 0xc223c |
359 | #define CSDM_REG_ENABLE_OUT1 0xc2240 | 367 | #define CSDM_REG_ENABLE_OUT1 0xc2240 |
@@ -438,6 +446,9 @@ | |||
438 | /* [RW 32] Parity mask register #0 read/write */ | 446 | /* [RW 32] Parity mask register #0 read/write */ |
439 | #define CSEM_REG_CSEM_PRTY_MASK_0 0x200130 | 447 | #define CSEM_REG_CSEM_PRTY_MASK_0 0x200130 |
440 | #define CSEM_REG_CSEM_PRTY_MASK_1 0x200140 | 448 | #define CSEM_REG_CSEM_PRTY_MASK_1 0x200140 |
449 | /* [R 32] Parity register #0 read */ | ||
450 | #define CSEM_REG_CSEM_PRTY_STS_0 0x200124 | ||
451 | #define CSEM_REG_CSEM_PRTY_STS_1 0x200134 | ||
441 | #define CSEM_REG_ENABLE_IN 0x2000a4 | 452 | #define CSEM_REG_ENABLE_IN 0x2000a4 |
442 | #define CSEM_REG_ENABLE_OUT 0x2000a8 | 453 | #define CSEM_REG_ENABLE_OUT 0x2000a8 |
443 | /* [RW 32] This address space contains all registers and memories that are | 454 | /* [RW 32] This address space contains all registers and memories that are |
@@ -526,6 +537,8 @@ | |||
526 | #define CSEM_REG_TS_9_AS 0x20005c | 537 | #define CSEM_REG_TS_9_AS 0x20005c |
527 | /* [RW 1] Parity mask register #0 read/write */ | 538 | /* [RW 1] Parity mask register #0 read/write */ |
528 | #define DBG_REG_DBG_PRTY_MASK 0xc0a8 | 539 | #define DBG_REG_DBG_PRTY_MASK 0xc0a8 |
540 | /* [R 1] Parity register #0 read */ | ||
541 | #define DBG_REG_DBG_PRTY_STS 0xc09c | ||
529 | /* [RW 2] debug only: These bits indicate the credit for PCI request type 4 | 542 | /* [RW 2] debug only: These bits indicate the credit for PCI request type 4 |
530 | interface; MUST be configured AFTER pci_ext_buffer_strt_addr_lsb/msb are | 543 | interface; MUST be configured AFTER pci_ext_buffer_strt_addr_lsb/msb are |
531 | configured */ | 544 | configured */ |
@@ -543,6 +556,8 @@ | |||
543 | #define DMAE_REG_DMAE_INT_MASK 0x102054 | 556 | #define DMAE_REG_DMAE_INT_MASK 0x102054 |
544 | /* [RW 4] Parity mask register #0 read/write */ | 557 | /* [RW 4] Parity mask register #0 read/write */ |
545 | #define DMAE_REG_DMAE_PRTY_MASK 0x102064 | 558 | #define DMAE_REG_DMAE_PRTY_MASK 0x102064 |
559 | /* [R 4] Parity register #0 read */ | ||
560 | #define DMAE_REG_DMAE_PRTY_STS 0x102058 | ||
546 | /* [RW 1] Command 0 go. */ | 561 | /* [RW 1] Command 0 go. */ |
547 | #define DMAE_REG_GO_C0 0x102080 | 562 | #define DMAE_REG_GO_C0 0x102080 |
548 | /* [RW 1] Command 1 go. */ | 563 | /* [RW 1] Command 1 go. */ |
@@ -623,6 +638,8 @@ | |||
623 | #define DORQ_REG_DORQ_INT_STS_CLR 0x170178 | 638 | #define DORQ_REG_DORQ_INT_STS_CLR 0x170178 |
624 | /* [RW 2] Parity mask register #0 read/write */ | 639 | /* [RW 2] Parity mask register #0 read/write */ |
625 | #define DORQ_REG_DORQ_PRTY_MASK 0x170190 | 640 | #define DORQ_REG_DORQ_PRTY_MASK 0x170190 |
641 | /* [R 2] Parity register #0 read */ | ||
642 | #define DORQ_REG_DORQ_PRTY_STS 0x170184 | ||
626 | /* [RW 8] The address to write the DPM CID to STORM. */ | 643 | /* [RW 8] The address to write the DPM CID to STORM. */ |
627 | #define DORQ_REG_DPM_CID_ADDR 0x170044 | 644 | #define DORQ_REG_DPM_CID_ADDR 0x170044 |
628 | /* [RW 5] The DPM mode CID extraction offset. */ | 645 | /* [RW 5] The DPM mode CID extraction offset. */ |
@@ -692,6 +709,8 @@ | |||
692 | #define HC_REG_CONFIG_1 0x108004 | 709 | #define HC_REG_CONFIG_1 0x108004 |
693 | /* [RW 3] Parity mask register #0 read/write */ | 710 | /* [RW 3] Parity mask register #0 read/write */ |
694 | #define HC_REG_HC_PRTY_MASK 0x1080a0 | 711 | #define HC_REG_HC_PRTY_MASK 0x1080a0 |
712 | /* [R 3] Parity register #0 read */ | ||
713 | #define HC_REG_HC_PRTY_STS 0x108094 | ||
695 | /* [RW 17] status block interrupt mask; one in each bit means unmask; zerow | 714 | /* [RW 17] status block interrupt mask; one in each bit means unmask; zerow |
696 | in each bit means mask; bit 0 - default SB; bit 1 - SB_0; bit 2 - SB_1... | 715 | in each bit means mask; bit 0 - default SB; bit 1 - SB_0; bit 2 - SB_1... |
697 | bit 16- SB_15; addr 0 - port 0; addr 1 - port 1 */ | 716 | bit 16- SB_15; addr 0 - port 0; addr 1 - port 1 */ |
@@ -1127,6 +1146,7 @@ | |||
1127 | #define MISC_REG_AEU_GENERAL_ATTN_17 0xa044 | 1146 | #define MISC_REG_AEU_GENERAL_ATTN_17 0xa044 |
1128 | #define MISC_REG_AEU_GENERAL_ATTN_18 0xa048 | 1147 | #define MISC_REG_AEU_GENERAL_ATTN_18 0xa048 |
1129 | #define MISC_REG_AEU_GENERAL_ATTN_19 0xa04c | 1148 | #define MISC_REG_AEU_GENERAL_ATTN_19 0xa04c |
1149 | #define MISC_REG_AEU_GENERAL_ATTN_10 0xa028 | ||
1130 | #define MISC_REG_AEU_GENERAL_ATTN_11 0xa02c | 1150 | #define MISC_REG_AEU_GENERAL_ATTN_11 0xa02c |
1131 | #define MISC_REG_AEU_GENERAL_ATTN_2 0xa008 | 1151 | #define MISC_REG_AEU_GENERAL_ATTN_2 0xa008 |
1132 | #define MISC_REG_AEU_GENERAL_ATTN_20 0xa050 | 1152 | #define MISC_REG_AEU_GENERAL_ATTN_20 0xa050 |
@@ -1135,6 +1155,9 @@ | |||
1135 | #define MISC_REG_AEU_GENERAL_ATTN_4 0xa010 | 1155 | #define MISC_REG_AEU_GENERAL_ATTN_4 0xa010 |
1136 | #define MISC_REG_AEU_GENERAL_ATTN_5 0xa014 | 1156 | #define MISC_REG_AEU_GENERAL_ATTN_5 0xa014 |
1137 | #define MISC_REG_AEU_GENERAL_ATTN_6 0xa018 | 1157 | #define MISC_REG_AEU_GENERAL_ATTN_6 0xa018 |
1158 | #define MISC_REG_AEU_GENERAL_ATTN_7 0xa01c | ||
1159 | #define MISC_REG_AEU_GENERAL_ATTN_8 0xa020 | ||
1160 | #define MISC_REG_AEU_GENERAL_ATTN_9 0xa024 | ||
1138 | /* [RW 32] first 32b for inverting the input for function 0; for each bit: | 1161 | /* [RW 32] first 32b for inverting the input for function 0; for each bit: |
1139 | 0= do not invert; 1= invert; mapped as follows: [0] NIG attention for | 1162 | 0= do not invert; 1= invert; mapped as follows: [0] NIG attention for |
1140 | function0; [1] NIG attention for function1; [2] GPIO1 mcp; [3] GPIO2 mcp; | 1163 | function0; [1] NIG attention for function1; [2] GPIO1 mcp; [3] GPIO2 mcp; |
@@ -1183,6 +1206,40 @@ | |||
1183 | starts at 0x0 for the A0 tape-out and increments by one for each | 1206 | starts at 0x0 for the A0 tape-out and increments by one for each |
1184 | all-layer tape-out. */ | 1207 | all-layer tape-out. */ |
1185 | #define MISC_REG_CHIP_REV 0xa40c | 1208 | #define MISC_REG_CHIP_REV 0xa40c |
1209 | /* [RW 32] The following driver registers(1..6) represent 6 drivers and 32 | ||
1210 | clients. Each client can be controlled by one driver only. One in each | ||
1211 | bit represent that this driver control the appropriate client (Ex: bit 5 | ||
1212 | is set means this driver control client number 5). addr1 = set; addr0 = | ||
1213 | clear; read from both addresses will give the same result = status. write | ||
1214 | to address 1 will set a request to control all the clients that their | ||
1215 | appropriate bit (in the write command) is set. if the client is free (the | ||
1216 | appropriate bit in all the other drivers is clear) one will be written to | ||
1217 | that driver register; if the client isn't free the bit will remain zero. | ||
1218 | if the appropriate bit is set (the driver request to gain control on a | ||
1219 | client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW | ||
1220 | interrupt will be asserted). write to address 0 will set a request to | ||
1221 | free all the clients that their appropriate bit (in the write command) is | ||
1222 | set. if the appropriate bit is clear (the driver request to free a client | ||
1223 | it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will | ||
1224 | be asserted). */ | ||
1225 | #define MISC_REG_DRIVER_CONTROL_1 0xa510 | ||
1226 | /* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of | ||
1227 | these bits is written as a '1'; the corresponding SPIO bit will turn off | ||
1228 | it's drivers and become an input. This is the reset state of all GPIO | ||
1229 | pins. The read value of these bits will be a '1' if that last command | ||
1230 | (#SET; #CLR; or #FLOAT) for this bit was a #FLOAT. (reset value 0xff). | ||
1231 | [23-20] CLR port 1; 19-16] CLR port 0; When any of these bits is written | ||
1232 | as a '1'; the corresponding GPIO bit will drive low. The read value of | ||
1233 | these bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for | ||
1234 | this bit was a #CLR. (reset value 0). [15-12] SET port 1; 11-8] port 0; | ||
1235 | SET When any of these bits is written as a '1'; the corresponding GPIO | ||
1236 | bit will drive high (if it has that capability). The read value of these | ||
1237 | bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for this | ||
1238 | bit was a #SET. (reset value 0). [7-4] VALUE port 1; [3-0] VALUE port 0; | ||
1239 | RO; These bits indicate the read value of each of the eight GPIO pins. | ||
1240 | This is the result value of the pin; not the drive value. Writing these | ||
1241 | bits will have not effect. */ | ||
1242 | #define MISC_REG_GPIO 0xa490 | ||
1186 | /* [RW 1] Setting this bit enables a timer in the GRC block to timeout any | 1243 | /* [RW 1] Setting this bit enables a timer in the GRC block to timeout any |
1187 | access that does not finish within | 1244 | access that does not finish within |
1188 | ~misc_registers_grc_timout_val.grc_timeout_val cycles. When this bit is | 1245 | ~misc_registers_grc_timout_val.grc_timeout_val cycles. When this bit is |
@@ -1223,6 +1280,8 @@ | |||
1223 | #define MISC_REG_MISC_INT_MASK 0xa388 | 1280 | #define MISC_REG_MISC_INT_MASK 0xa388 |
1224 | /* [RW 1] Parity mask register #0 read/write */ | 1281 | /* [RW 1] Parity mask register #0 read/write */ |
1225 | #define MISC_REG_MISC_PRTY_MASK 0xa398 | 1282 | #define MISC_REG_MISC_PRTY_MASK 0xa398 |
1283 | /* [R 1] Parity register #0 read */ | ||
1284 | #define MISC_REG_MISC_PRTY_STS 0xa38c | ||
1226 | /* [RW 32] 32 LSB of storm PLL first register; reset val = 0x 071d2911. | 1285 | /* [RW 32] 32 LSB of storm PLL first register; reset val = 0x 071d2911. |
1227 | inside order of the bits is: [0] P1 divider[0] (reset value 1); [1] P1 | 1286 | inside order of the bits is: [0] P1 divider[0] (reset value 1); [1] P1 |
1228 | divider[1] (reset value 0); [2] P1 divider[2] (reset value 0); [3] P1 | 1287 | divider[1] (reset value 0); [2] P1 divider[2] (reset value 0); [3] P1 |
@@ -1264,6 +1323,55 @@ | |||
1264 | /* [RW 20] 20 bit GRC address where the scratch-pad of the MCP that is | 1323 | /* [RW 20] 20 bit GRC address where the scratch-pad of the MCP that is |
1265 | shared with the driver resides */ | 1324 | shared with the driver resides */ |
1266 | #define MISC_REG_SHARED_MEM_ADDR 0xa2b4 | 1325 | #define MISC_REG_SHARED_MEM_ADDR 0xa2b4 |
1326 | /* [RW 32] SPIO. [31-24] FLOAT When any of these bits is written as a '1'; | ||
1327 | the corresponding SPIO bit will turn off it's drivers and become an | ||
1328 | input. This is the reset state of all SPIO pins. The read value of these | ||
1329 | bits will be a '1' if that last command (#SET; #CL; or #FLOAT) for this | ||
1330 | bit was a #FLOAT. (reset value 0xff). [23-16] CLR When any of these bits | ||
1331 | is written as a '1'; the corresponding SPIO bit will drive low. The read | ||
1332 | value of these bits will be a '1' if that last command (#SET; #CLR; or | ||
1333 | #FLOAT) for this bit was a #CLR. (reset value 0). [15-8] SET When any of | ||
1334 | these bits is written as a '1'; the corresponding SPIO bit will drive | ||
1335 | high (if it has that capability). The read value of these bits will be a | ||
1336 | '1' if that last command (#SET; #CLR; or #FLOAT) for this bit was a #SET. | ||
1337 | (reset value 0). [7-0] VALUE RO; These bits indicate the read value of | ||
1338 | each of the eight SPIO pins. This is the result value of the pin; not the | ||
1339 | drive value. Writing these bits will have not effect. Each 8 bits field | ||
1340 | is divided as follows: [0] VAUX Enable; when pulsed low; enables supply | ||
1341 | from VAUX. (This is an output pin only; the FLOAT field is not applicable | ||
1342 | for this pin); [1] VAUX Disable; when pulsed low; disables supply form | ||
1343 | VAUX. (This is an output pin only; FLOAT field is not applicable for this | ||
1344 | pin); [2] SEL_VAUX_B - Control to power switching logic. Drive low to | ||
1345 | select VAUX supply. (This is an output pin only; it is not controlled by | ||
1346 | the SET and CLR fields; it is controlled by the Main Power SM; the FLOAT | ||
1347 | field is not applicable for this pin; only the VALUE fields is relevant - | ||
1348 | it reflects the output value); [3] reserved; [4] spio_4; [5] spio_5; [6] | ||
1349 | Bit 0 of UMP device ID select; read by UMP firmware; [7] Bit 1 of UMP | ||
1350 | device ID select; read by UMP firmware. */ | ||
1351 | #define MISC_REG_SPIO 0xa4fc | ||
1352 | /* [RW 8] These bits enable the SPIO_INTs to signals event to the IGU/MC. | ||
1353 | according to the following map: [3:0] reserved; [4] spio_4 [5] spio_5; | ||
1354 | [7:0] reserved */ | ||
1355 | #define MISC_REG_SPIO_EVENT_EN 0xa2b8 | ||
1356 | /* [RW 32] SPIO INT. [31-24] OLD_CLR Writing a '1' to these bit clears the | ||
1357 | corresponding bit in the #OLD_VALUE register. This will acknowledge an | ||
1358 | interrupt on the falling edge of corresponding SPIO input (reset value | ||
1359 | 0). [23-16] OLD_SET Writing a '1' to these bit sets the corresponding bit | ||
1360 | in the #OLD_VALUE register. This will acknowledge an interrupt on the | ||
1361 | rising edge of corresponding SPIO input (reset value 0). [15-8] OLD_VALUE | ||
1362 | RO; These bits indicate the old value of the SPIO input value. When the | ||
1363 | ~INT_STATE bit is set; this bit indicates the OLD value of the pin such | ||
1364 | that if ~INT_STATE is set and this bit is '0'; then the interrupt is due | ||
1365 | to a low to high edge. If ~INT_STATE is set and this bit is '1'; then the | ||
1366 | interrupt is due to a high to low edge (reset value 0). [7-0] INT_STATE | ||
1367 | RO; These bits indicate the current SPIO interrupt state for each SPIO | ||
1368 | pin. This bit is cleared when the appropriate #OLD_SET or #OLD_CLR | ||
1369 | command bit is written. This bit is set when the SPIO input does not | ||
1370 | match the current value in #OLD_VALUE (reset value 0). */ | ||
1371 | #define MISC_REG_SPIO_INT 0xa500 | ||
1372 | /* [RW 1] Set by the MCP to remember if one or more of the drivers is/are | ||
1373 | loaded; 0-prepare; -unprepare */ | ||
1374 | #define MISC_REG_UNPREPARED 0xa424 | ||
1267 | #define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0) | 1375 | #define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0) |
1268 | #define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS (0x1<<9) | 1376 | #define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS (0x1<<9) |
1269 | #define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G (0x1<<15) | 1377 | #define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G (0x1<<15) |
@@ -1392,6 +1500,9 @@ | |||
1392 | #define NIG_REG_NIG_INGRESS_EMAC0_NO_CRC 0x10044 | 1500 | #define NIG_REG_NIG_INGRESS_EMAC0_NO_CRC 0x10044 |
1393 | /* [RW 1] Input enable for RX PBF LP IF */ | 1501 | /* [RW 1] Input enable for RX PBF LP IF */ |
1394 | #define NIG_REG_PBF_LB_IN_EN 0x100b4 | 1502 | #define NIG_REG_PBF_LB_IN_EN 0x100b4 |
1503 | /* [RW 1] Value of this register will be transmitted to port swap when | ||
1504 | ~nig_registers_strap_override.strap_override =1 */ | ||
1505 | #define NIG_REG_PORT_SWAP 0x10394 | ||
1395 | /* [RW 1] output enable for RX parser descriptor IF */ | 1506 | /* [RW 1] output enable for RX parser descriptor IF */ |
1396 | #define NIG_REG_PRS_EOP_OUT_EN 0x10104 | 1507 | #define NIG_REG_PRS_EOP_OUT_EN 0x10104 |
1397 | /* [RW 1] Input enable for RX parser request IF */ | 1508 | /* [RW 1] Input enable for RX parser request IF */ |
@@ -1410,6 +1521,10 @@ | |||
1410 | #define NIG_REG_STAT2_BRB_OCTET 0x107e0 | 1521 | #define NIG_REG_STAT2_BRB_OCTET 0x107e0 |
1411 | #define NIG_REG_STATUS_INTERRUPT_PORT0 0x10328 | 1522 | #define NIG_REG_STATUS_INTERRUPT_PORT0 0x10328 |
1412 | #define NIG_REG_STATUS_INTERRUPT_PORT1 0x1032c | 1523 | #define NIG_REG_STATUS_INTERRUPT_PORT1 0x1032c |
1524 | /* [RW 1] port swap mux selection. If this register equal to 0 then port | ||
1525 | swap is equal to SPIO pin that inputs from ifmux_serdes_swap. If 1 then | ||
1526 | ort swap is equal to ~nig_registers_port_swap.port_swap */ | ||
1527 | #define NIG_REG_STRAP_OVERRIDE 0x10398 | ||
1413 | /* [RW 1] output enable for RX_XCM0 IF */ | 1528 | /* [RW 1] output enable for RX_XCM0 IF */ |
1414 | #define NIG_REG_XCM0_OUT_EN 0x100f0 | 1529 | #define NIG_REG_XCM0_OUT_EN 0x100f0 |
1415 | /* [RW 1] output enable for RX_XCM1 IF */ | 1530 | /* [RW 1] output enable for RX_XCM1 IF */ |
@@ -1499,6 +1614,8 @@ | |||
1499 | #define PB_REG_PB_INT_STS 0x1c | 1614 | #define PB_REG_PB_INT_STS 0x1c |
1500 | /* [RW 4] Parity mask register #0 read/write */ | 1615 | /* [RW 4] Parity mask register #0 read/write */ |
1501 | #define PB_REG_PB_PRTY_MASK 0x38 | 1616 | #define PB_REG_PB_PRTY_MASK 0x38 |
1617 | /* [R 4] Parity register #0 read */ | ||
1618 | #define PB_REG_PB_PRTY_STS 0x2c | ||
1502 | #define PRS_REG_A_PRSU_20 0x40134 | 1619 | #define PRS_REG_A_PRSU_20 0x40134 |
1503 | /* [R 8] debug only: CFC load request current credit. Transaction based. */ | 1620 | /* [R 8] debug only: CFC load request current credit. Transaction based. */ |
1504 | #define PRS_REG_CFC_LD_CURRENT_CREDIT 0x40164 | 1621 | #define PRS_REG_CFC_LD_CURRENT_CREDIT 0x40164 |
@@ -1590,6 +1707,8 @@ | |||
1590 | #define PRS_REG_PRS_INT_STS 0x40188 | 1707 | #define PRS_REG_PRS_INT_STS 0x40188 |
1591 | /* [RW 8] Parity mask register #0 read/write */ | 1708 | /* [RW 8] Parity mask register #0 read/write */ |
1592 | #define PRS_REG_PRS_PRTY_MASK 0x401a4 | 1709 | #define PRS_REG_PRS_PRTY_MASK 0x401a4 |
1710 | /* [R 8] Parity register #0 read */ | ||
1711 | #define PRS_REG_PRS_PRTY_STS 0x40198 | ||
1593 | /* [RW 8] Context region for pure acknowledge packets. Used in CFC load | 1712 | /* [RW 8] Context region for pure acknowledge packets. Used in CFC load |
1594 | request message */ | 1713 | request message */ |
1595 | #define PRS_REG_PURE_REGIONS 0x40024 | 1714 | #define PRS_REG_PURE_REGIONS 0x40024 |
@@ -1718,6 +1837,9 @@ | |||
1718 | /* [RW 32] Parity mask register #0 read/write */ | 1837 | /* [RW 32] Parity mask register #0 read/write */ |
1719 | #define PXP2_REG_PXP2_PRTY_MASK_0 0x120588 | 1838 | #define PXP2_REG_PXP2_PRTY_MASK_0 0x120588 |
1720 | #define PXP2_REG_PXP2_PRTY_MASK_1 0x120598 | 1839 | #define PXP2_REG_PXP2_PRTY_MASK_1 0x120598 |
1840 | /* [R 32] Parity register #0 read */ | ||
1841 | #define PXP2_REG_PXP2_PRTY_STS_0 0x12057c | ||
1842 | #define PXP2_REG_PXP2_PRTY_STS_1 0x12058c | ||
1721 | /* [R 1] Debug only: The 'almost full' indication from each fifo (gives | 1843 | /* [R 1] Debug only: The 'almost full' indication from each fifo (gives |
1722 | indication about backpressure) */ | 1844 | indication about backpressure) */ |
1723 | #define PXP2_REG_RD_ALMOST_FULL_0 0x120424 | 1845 | #define PXP2_REG_RD_ALMOST_FULL_0 0x120424 |
@@ -1911,6 +2033,8 @@ | |||
1911 | #define PXP2_REG_RQ_HC_ENDIAN_M 0x1201a8 | 2033 | #define PXP2_REG_RQ_HC_ENDIAN_M 0x1201a8 |
1912 | /* [WB 53] Onchip address table */ | 2034 | /* [WB 53] Onchip address table */ |
1913 | #define PXP2_REG_RQ_ONCHIP_AT 0x122000 | 2035 | #define PXP2_REG_RQ_ONCHIP_AT 0x122000 |
2036 | /* [RW 13] Pending read limiter threshold; in Dwords */ | ||
2037 | #define PXP2_REG_RQ_PDR_LIMIT 0x12033c | ||
1914 | /* [RW 2] Endian mode for qm */ | 2038 | /* [RW 2] Endian mode for qm */ |
1915 | #define PXP2_REG_RQ_QM_ENDIAN_M 0x120194 | 2039 | #define PXP2_REG_RQ_QM_ENDIAN_M 0x120194 |
1916 | /* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k; | 2040 | /* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k; |
@@ -1921,6 +2045,9 @@ | |||
1921 | /* [RW 3] Max burst size filed for read requests port 0; 000 - 128B; | 2045 | /* [RW 3] Max burst size filed for read requests port 0; 000 - 128B; |
1922 | 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */ | 2046 | 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */ |
1923 | #define PXP2_REG_RQ_RD_MBS0 0x120160 | 2047 | #define PXP2_REG_RQ_RD_MBS0 0x120160 |
2048 | /* [RW 3] Max burst size filed for read requests port 1; 000 - 128B; | ||
2049 | 001:256B; 010: 512B; 11:1K:100:2K; 01:4K */ | ||
2050 | #define PXP2_REG_RQ_RD_MBS1 0x120168 | ||
1924 | /* [RW 2] Endian mode for src */ | 2051 | /* [RW 2] Endian mode for src */ |
1925 | #define PXP2_REG_RQ_SRC_ENDIAN_M 0x12019c | 2052 | #define PXP2_REG_RQ_SRC_ENDIAN_M 0x12019c |
1926 | /* [RW 3] page size in L2P table for SRC module; -4k; -8k; -16k; -32k; -64k; | 2053 | /* [RW 3] page size in L2P table for SRC module; -4k; -8k; -16k; -32k; -64k; |
@@ -2000,10 +2127,17 @@ | |||
2000 | /* [RW 3] Max burst size filed for write requests port 0; 000 - 128B; | 2127 | /* [RW 3] Max burst size filed for write requests port 0; 000 - 128B; |
2001 | 001:256B; 010: 512B; */ | 2128 | 001:256B; 010: 512B; */ |
2002 | #define PXP2_REG_RQ_WR_MBS0 0x12015c | 2129 | #define PXP2_REG_RQ_WR_MBS0 0x12015c |
2130 | /* [RW 3] Max burst size filed for write requests port 1; 000 - 128B; | ||
2131 | 001:256B; 010: 512B; */ | ||
2132 | #define PXP2_REG_RQ_WR_MBS1 0x120164 | ||
2003 | /* [RW 10] if Number of entries in dmae fifo will be higer than this | 2133 | /* [RW 10] if Number of entries in dmae fifo will be higer than this |
2004 | threshold then has_payload indication will be asserted; the default value | 2134 | threshold then has_payload indication will be asserted; the default value |
2005 | should be equal to > write MBS size! */ | 2135 | should be equal to > write MBS size! */ |
2006 | #define PXP2_REG_WR_DMAE_TH 0x120368 | 2136 | #define PXP2_REG_WR_DMAE_TH 0x120368 |
2137 | /* [RW 10] if Number of entries in usdmdp fifo will be higer than this | ||
2138 | threshold then has_payload indication will be asserted; the default value | ||
2139 | should be equal to > write MBS size! */ | ||
2140 | #define PXP2_REG_WR_USDMDP_TH 0x120348 | ||
2007 | /* [R 1] debug only: Indication if PSWHST arbiter is idle */ | 2141 | /* [R 1] debug only: Indication if PSWHST arbiter is idle */ |
2008 | #define PXP_REG_HST_ARB_IS_IDLE 0x103004 | 2142 | #define PXP_REG_HST_ARB_IS_IDLE 0x103004 |
2009 | /* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means | 2143 | /* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means |
@@ -2021,6 +2155,8 @@ | |||
2021 | #define PXP_REG_PXP_INT_STS_CLR_0 0x10306c | 2155 | #define PXP_REG_PXP_INT_STS_CLR_0 0x10306c |
2022 | /* [RW 26] Parity mask register #0 read/write */ | 2156 | /* [RW 26] Parity mask register #0 read/write */ |
2023 | #define PXP_REG_PXP_PRTY_MASK 0x103094 | 2157 | #define PXP_REG_PXP_PRTY_MASK 0x103094 |
2158 | /* [R 26] Parity register #0 read */ | ||
2159 | #define PXP_REG_PXP_PRTY_STS 0x103088 | ||
2024 | /* [RW 4] The activity counter initial increment value sent in the load | 2160 | /* [RW 4] The activity counter initial increment value sent in the load |
2025 | request */ | 2161 | request */ |
2026 | #define QM_REG_ACTCTRINITVAL_0 0x168040 | 2162 | #define QM_REG_ACTCTRINITVAL_0 0x168040 |
@@ -2127,6 +2263,8 @@ | |||
2127 | #define QM_REG_QM_INT_STS 0x168438 | 2263 | #define QM_REG_QM_INT_STS 0x168438 |
2128 | /* [RW 9] Parity mask register #0 read/write */ | 2264 | /* [RW 9] Parity mask register #0 read/write */ |
2129 | #define QM_REG_QM_PRTY_MASK 0x168454 | 2265 | #define QM_REG_QM_PRTY_MASK 0x168454 |
2266 | /* [R 9] Parity register #0 read */ | ||
2267 | #define QM_REG_QM_PRTY_STS 0x168448 | ||
2130 | /* [R 32] Current queues in pipeline: Queues from 32 to 63 */ | 2268 | /* [R 32] Current queues in pipeline: Queues from 32 to 63 */ |
2131 | #define QM_REG_QSTATUS_HIGH 0x16802c | 2269 | #define QM_REG_QSTATUS_HIGH 0x16802c |
2132 | /* [R 32] Current queues in pipeline: Queues from 0 to 31 */ | 2270 | /* [R 32] Current queues in pipeline: Queues from 0 to 31 */ |
@@ -2410,6 +2548,8 @@ | |||
2410 | #define SRC_REG_SRC_INT_STS 0x404ac | 2548 | #define SRC_REG_SRC_INT_STS 0x404ac |
2411 | /* [RW 3] Parity mask register #0 read/write */ | 2549 | /* [RW 3] Parity mask register #0 read/write */ |
2412 | #define SRC_REG_SRC_PRTY_MASK 0x404c8 | 2550 | #define SRC_REG_SRC_PRTY_MASK 0x404c8 |
2551 | /* [R 3] Parity register #0 read */ | ||
2552 | #define SRC_REG_SRC_PRTY_STS 0x404bc | ||
2413 | /* [R 4] Used to read the value of the XX protection CAM occupancy counter. */ | 2553 | /* [R 4] Used to read the value of the XX protection CAM occupancy counter. */ |
2414 | #define TCM_REG_CAM_OCCUP 0x5017c | 2554 | #define TCM_REG_CAM_OCCUP 0x5017c |
2415 | /* [RW 1] CDU AG read Interface enable. If 0 - the request input is | 2555 | /* [RW 1] CDU AG read Interface enable. If 0 - the request input is |
@@ -2730,6 +2870,8 @@ | |||
2730 | #define TSDM_REG_TSDM_INT_MASK_1 0x422ac | 2870 | #define TSDM_REG_TSDM_INT_MASK_1 0x422ac |
2731 | /* [RW 11] Parity mask register #0 read/write */ | 2871 | /* [RW 11] Parity mask register #0 read/write */ |
2732 | #define TSDM_REG_TSDM_PRTY_MASK 0x422bc | 2872 | #define TSDM_REG_TSDM_PRTY_MASK 0x422bc |
2873 | /* [R 11] Parity register #0 read */ | ||
2874 | #define TSDM_REG_TSDM_PRTY_STS 0x422b0 | ||
2733 | /* [RW 5] The number of time_slots in the arbitration cycle */ | 2875 | /* [RW 5] The number of time_slots in the arbitration cycle */ |
2734 | #define TSEM_REG_ARB_CYCLE_SIZE 0x180034 | 2876 | #define TSEM_REG_ARB_CYCLE_SIZE 0x180034 |
2735 | /* [RW 3] The source that is associated with arbitration element 0. Source | 2877 | /* [RW 3] The source that is associated with arbitration element 0. Source |
@@ -2854,6 +2996,9 @@ | |||
2854 | /* [RW 32] Parity mask register #0 read/write */ | 2996 | /* [RW 32] Parity mask register #0 read/write */ |
2855 | #define TSEM_REG_TSEM_PRTY_MASK_0 0x180120 | 2997 | #define TSEM_REG_TSEM_PRTY_MASK_0 0x180120 |
2856 | #define TSEM_REG_TSEM_PRTY_MASK_1 0x180130 | 2998 | #define TSEM_REG_TSEM_PRTY_MASK_1 0x180130 |
2999 | /* [R 32] Parity register #0 read */ | ||
3000 | #define TSEM_REG_TSEM_PRTY_STS_0 0x180114 | ||
3001 | #define TSEM_REG_TSEM_PRTY_STS_1 0x180124 | ||
2857 | /* [R 5] Used to read the XX protection CAM occupancy counter. */ | 3002 | /* [R 5] Used to read the XX protection CAM occupancy counter. */ |
2858 | #define UCM_REG_CAM_OCCUP 0xe0170 | 3003 | #define UCM_REG_CAM_OCCUP 0xe0170 |
2859 | /* [RW 1] CDU AG read Interface enable. If 0 - the request input is | 3004 | /* [RW 1] CDU AG read Interface enable. If 0 - the request input is |
@@ -3155,6 +3300,8 @@ | |||
3155 | #define USDM_REG_USDM_INT_MASK_1 0xc42b0 | 3300 | #define USDM_REG_USDM_INT_MASK_1 0xc42b0 |
3156 | /* [RW 11] Parity mask register #0 read/write */ | 3301 | /* [RW 11] Parity mask register #0 read/write */ |
3157 | #define USDM_REG_USDM_PRTY_MASK 0xc42c0 | 3302 | #define USDM_REG_USDM_PRTY_MASK 0xc42c0 |
3303 | /* [R 11] Parity register #0 read */ | ||
3304 | #define USDM_REG_USDM_PRTY_STS 0xc42b4 | ||
3158 | /* [RW 5] The number of time_slots in the arbitration cycle */ | 3305 | /* [RW 5] The number of time_slots in the arbitration cycle */ |
3159 | #define USEM_REG_ARB_CYCLE_SIZE 0x300034 | 3306 | #define USEM_REG_ARB_CYCLE_SIZE 0x300034 |
3160 | /* [RW 3] The source that is associated with arbitration element 0. Source | 3307 | /* [RW 3] The source that is associated with arbitration element 0. Source |
@@ -3279,6 +3426,9 @@ | |||
3279 | /* [RW 32] Parity mask register #0 read/write */ | 3426 | /* [RW 32] Parity mask register #0 read/write */ |
3280 | #define USEM_REG_USEM_PRTY_MASK_0 0x300130 | 3427 | #define USEM_REG_USEM_PRTY_MASK_0 0x300130 |
3281 | #define USEM_REG_USEM_PRTY_MASK_1 0x300140 | 3428 | #define USEM_REG_USEM_PRTY_MASK_1 0x300140 |
3429 | /* [R 32] Parity register #0 read */ | ||
3430 | #define USEM_REG_USEM_PRTY_STS_0 0x300124 | ||
3431 | #define USEM_REG_USEM_PRTY_STS_1 0x300134 | ||
3282 | /* [RW 2] The queue index for registration on Aux1 counter flag. */ | 3432 | /* [RW 2] The queue index for registration on Aux1 counter flag. */ |
3283 | #define XCM_REG_AUX1_Q 0x20134 | 3433 | #define XCM_REG_AUX1_Q 0x20134 |
3284 | /* [RW 2] Per each decision rule the queue index to register to. */ | 3434 | /* [RW 2] Per each decision rule the queue index to register to. */ |
@@ -3684,6 +3834,8 @@ | |||
3684 | #define XSDM_REG_XSDM_INT_MASK_1 0x1662ac | 3834 | #define XSDM_REG_XSDM_INT_MASK_1 0x1662ac |
3685 | /* [RW 11] Parity mask register #0 read/write */ | 3835 | /* [RW 11] Parity mask register #0 read/write */ |
3686 | #define XSDM_REG_XSDM_PRTY_MASK 0x1662bc | 3836 | #define XSDM_REG_XSDM_PRTY_MASK 0x1662bc |
3837 | /* [R 11] Parity register #0 read */ | ||
3838 | #define XSDM_REG_XSDM_PRTY_STS 0x1662b0 | ||
3687 | /* [RW 5] The number of time_slots in the arbitration cycle */ | 3839 | /* [RW 5] The number of time_slots in the arbitration cycle */ |
3688 | #define XSEM_REG_ARB_CYCLE_SIZE 0x280034 | 3840 | #define XSEM_REG_ARB_CYCLE_SIZE 0x280034 |
3689 | /* [RW 3] The source that is associated with arbitration element 0. Source | 3841 | /* [RW 3] The source that is associated with arbitration element 0. Source |
@@ -3808,6 +3960,9 @@ | |||
3808 | /* [RW 32] Parity mask register #0 read/write */ | 3960 | /* [RW 32] Parity mask register #0 read/write */ |
3809 | #define XSEM_REG_XSEM_PRTY_MASK_0 0x280130 | 3961 | #define XSEM_REG_XSEM_PRTY_MASK_0 0x280130 |
3810 | #define XSEM_REG_XSEM_PRTY_MASK_1 0x280140 | 3962 | #define XSEM_REG_XSEM_PRTY_MASK_1 0x280140 |
3963 | /* [R 32] Parity register #0 read */ | ||
3964 | #define XSEM_REG_XSEM_PRTY_STS_0 0x280124 | ||
3965 | #define XSEM_REG_XSEM_PRTY_STS_1 0x280134 | ||
3811 | #define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0) | 3966 | #define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0) |
3812 | #define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1) | 3967 | #define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1) |
3813 | #define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0) | 3968 | #define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0) |
@@ -3847,6 +4002,8 @@ | |||
3847 | #define EMAC_MDIO_COMM_START_BUSY (1L<<29) | 4002 | #define EMAC_MDIO_COMM_START_BUSY (1L<<29) |
3848 | #define EMAC_MDIO_MODE_AUTO_POLL (1L<<4) | 4003 | #define EMAC_MDIO_MODE_AUTO_POLL (1L<<4) |
3849 | #define EMAC_MDIO_MODE_CLAUSE_45 (1L<<31) | 4004 | #define EMAC_MDIO_MODE_CLAUSE_45 (1L<<31) |
4005 | #define EMAC_MDIO_MODE_CLOCK_CNT (0x3fL<<16) | ||
4006 | #define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16 | ||
3850 | #define EMAC_MODE_25G_MODE (1L<<5) | 4007 | #define EMAC_MODE_25G_MODE (1L<<5) |
3851 | #define EMAC_MODE_ACPI_RCVD (1L<<20) | 4008 | #define EMAC_MODE_ACPI_RCVD (1L<<20) |
3852 | #define EMAC_MODE_HALF_DUPLEX (1L<<1) | 4009 | #define EMAC_MODE_HALF_DUPLEX (1L<<1) |
@@ -3874,6 +4031,17 @@ | |||
3874 | #define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31) | 4031 | #define EMAC_RX_MTU_SIZE_JUMBO_ENA (1L<<31) |
3875 | #define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3) | 4032 | #define EMAC_TX_MODE_EXT_PAUSE_EN (1L<<3) |
3876 | #define EMAC_TX_MODE_RESET (1L<<0) | 4033 | #define EMAC_TX_MODE_RESET (1L<<0) |
4034 | #define MISC_REGISTERS_GPIO_1 1 | ||
4035 | #define MISC_REGISTERS_GPIO_2 2 | ||
4036 | #define MISC_REGISTERS_GPIO_3 3 | ||
4037 | #define MISC_REGISTERS_GPIO_CLR_POS 16 | ||
4038 | #define MISC_REGISTERS_GPIO_FLOAT (0xffL<<24) | ||
4039 | #define MISC_REGISTERS_GPIO_FLOAT_POS 24 | ||
4040 | #define MISC_REGISTERS_GPIO_INPUT_HI_Z 2 | ||
4041 | #define MISC_REGISTERS_GPIO_OUTPUT_HIGH 1 | ||
4042 | #define MISC_REGISTERS_GPIO_OUTPUT_LOW 0 | ||
4043 | #define MISC_REGISTERS_GPIO_PORT_SHIFT 4 | ||
4044 | #define MISC_REGISTERS_GPIO_SET_POS 8 | ||
3877 | #define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588 | 4045 | #define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588 |
3878 | #define MISC_REGISTERS_RESET_REG_1_SET 0x584 | 4046 | #define MISC_REGISTERS_RESET_REG_1_SET 0x584 |
3879 | #define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598 | 4047 | #define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598 |
@@ -3891,6 +4059,25 @@ | |||
3891 | #define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW (0x1<<4) | 4059 | #define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW (0x1<<4) |
3892 | #define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB (0x1<<8) | 4060 | #define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB (0x1<<8) |
3893 | #define MISC_REGISTERS_RESET_REG_3_SET 0x5a4 | 4061 | #define MISC_REGISTERS_RESET_REG_3_SET 0x5a4 |
4062 | #define MISC_REGISTERS_SPIO_4 4 | ||
4063 | #define MISC_REGISTERS_SPIO_5 5 | ||
4064 | #define MISC_REGISTERS_SPIO_7 7 | ||
4065 | #define MISC_REGISTERS_SPIO_CLR_POS 16 | ||
4066 | #define MISC_REGISTERS_SPIO_FLOAT (0xffL<<24) | ||
4067 | #define GRC_MISC_REGISTERS_SPIO_FLOAT7 0x80000000 | ||
4068 | #define GRC_MISC_REGISTERS_SPIO_FLOAT6 0x40000000 | ||
4069 | #define GRC_MISC_REGISTERS_SPIO_FLOAT5 0x20000000 | ||
4070 | #define GRC_MISC_REGISTERS_SPIO_FLOAT4 0x10000000 | ||
4071 | #define MISC_REGISTERS_SPIO_FLOAT_POS 24 | ||
4072 | #define MISC_REGISTERS_SPIO_INPUT_HI_Z 2 | ||
4073 | #define MISC_REGISTERS_SPIO_INT_OLD_SET_POS 16 | ||
4074 | #define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1 | ||
4075 | #define MISC_REGISTERS_SPIO_OUTPUT_LOW 0 | ||
4076 | #define MISC_REGISTERS_SPIO_SET_POS 8 | ||
4077 | #define HW_LOCK_MAX_RESOURCE_VALUE 31 | ||
4078 | #define HW_LOCK_RESOURCE_8072_MDIO 0 | ||
4079 | #define HW_LOCK_RESOURCE_GPIO 1 | ||
4080 | #define HW_LOCK_RESOURCE_SPIO 2 | ||
3894 | #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18) | 4081 | #define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (1<<18) |
3895 | #define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31) | 4082 | #define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (1<<31) |
3896 | #define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9) | 4083 | #define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT (1<<9) |
@@ -3918,6 +4105,7 @@ | |||
3918 | #define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT (1<<3) | 4105 | #define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT (1<<3) |
3919 | #define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR (1<<2) | 4106 | #define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR (1<<2) |
3920 | #define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR (1<<22) | 4107 | #define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR (1<<22) |
4108 | #define AEU_INPUTS_ATTN_BITS_SPIO5 (1<<15) | ||
3921 | #define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT (1<<27) | 4109 | #define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT (1<<27) |
3922 | #define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT (1<<5) | 4110 | #define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT (1<<5) |
3923 | #define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT (1<<25) | 4111 | #define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT (1<<25) |
@@ -4206,6 +4394,9 @@ | |||
4206 | #define MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE 0x4000 | 4394 | #define MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE 0x4000 |
4207 | #define MDIO_XGXS_BLOCK2_TX_LN_SWAP 0x11 | 4395 | #define MDIO_XGXS_BLOCK2_TX_LN_SWAP 0x11 |
4208 | #define MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE 0x8000 | 4396 | #define MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE 0x8000 |
4397 | #define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G 0x14 | ||
4398 | #define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS 0x0001 | ||
4399 | #define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS 0x0010 | ||
4209 | #define MDIO_XGXS_BLOCK2_TEST_MODE_LANE 0x15 | 4400 | #define MDIO_XGXS_BLOCK2_TEST_MODE_LANE 0x15 |
4210 | 4401 | ||
4211 | #define MDIO_REG_BANK_GP_STATUS 0x8120 | 4402 | #define MDIO_REG_BANK_GP_STATUS 0x8120 |
@@ -4362,11 +4553,13 @@ | |||
4362 | #define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_SGMII_MODE 0x0001 | 4553 | #define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_SGMII_MODE 0x0001 |
4363 | 4554 | ||
4364 | 4555 | ||
4556 | #define EXT_PHY_AUTO_NEG_DEVAD 0x7 | ||
4365 | #define EXT_PHY_OPT_PMA_PMD_DEVAD 0x1 | 4557 | #define EXT_PHY_OPT_PMA_PMD_DEVAD 0x1 |
4366 | #define EXT_PHY_OPT_WIS_DEVAD 0x2 | 4558 | #define EXT_PHY_OPT_WIS_DEVAD 0x2 |
4367 | #define EXT_PHY_OPT_PCS_DEVAD 0x3 | 4559 | #define EXT_PHY_OPT_PCS_DEVAD 0x3 |
4368 | #define EXT_PHY_OPT_PHY_XS_DEVAD 0x4 | 4560 | #define EXT_PHY_OPT_PHY_XS_DEVAD 0x4 |
4369 | #define EXT_PHY_OPT_CNTL 0x0 | 4561 | #define EXT_PHY_OPT_CNTL 0x0 |
4562 | #define EXT_PHY_OPT_CNTL2 0x7 | ||
4370 | #define EXT_PHY_OPT_PMD_RX_SD 0xa | 4563 | #define EXT_PHY_OPT_PMD_RX_SD 0xa |
4371 | #define EXT_PHY_OPT_PMD_MISC_CNTL 0xca0a | 4564 | #define EXT_PHY_OPT_PMD_MISC_CNTL 0xca0a |
4372 | #define EXT_PHY_OPT_PHY_IDENTIFIER 0xc800 | 4565 | #define EXT_PHY_OPT_PHY_IDENTIFIER 0xc800 |
@@ -4378,11 +4571,24 @@ | |||
4378 | #define EXT_PHY_OPT_LASI_STATUS 0x9005 | 4571 | #define EXT_PHY_OPT_LASI_STATUS 0x9005 |
4379 | #define EXT_PHY_OPT_PCS_STATUS 0x0020 | 4572 | #define EXT_PHY_OPT_PCS_STATUS 0x0020 |
4380 | #define EXT_PHY_OPT_XGXS_LANE_STATUS 0x0018 | 4573 | #define EXT_PHY_OPT_XGXS_LANE_STATUS 0x0018 |
4574 | #define EXT_PHY_OPT_AN_LINK_STATUS 0x8304 | ||
4575 | #define EXT_PHY_OPT_AN_CL37_CL73 0x8370 | ||
4576 | #define EXT_PHY_OPT_AN_CL37_FD 0xffe4 | ||
4577 | #define EXT_PHY_OPT_AN_CL37_AN 0xffe0 | ||
4578 | #define EXT_PHY_OPT_AN_ADV 0x11 | ||
4381 | 4579 | ||
4382 | #define EXT_PHY_KR_PMA_PMD_DEVAD 0x1 | 4580 | #define EXT_PHY_KR_PMA_PMD_DEVAD 0x1 |
4383 | #define EXT_PHY_KR_PCS_DEVAD 0x3 | 4581 | #define EXT_PHY_KR_PCS_DEVAD 0x3 |
4384 | #define EXT_PHY_KR_AUTO_NEG_DEVAD 0x7 | 4582 | #define EXT_PHY_KR_AUTO_NEG_DEVAD 0x7 |
4385 | #define EXT_PHY_KR_CTRL 0x0000 | 4583 | #define EXT_PHY_KR_CTRL 0x0000 |
4584 | #define EXT_PHY_KR_STATUS 0x0001 | ||
4585 | #define EXT_PHY_KR_AUTO_NEG_COMPLETE 0x0020 | ||
4586 | #define EXT_PHY_KR_AUTO_NEG_ADVERT 0x0010 | ||
4587 | #define EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE 0x0400 | ||
4588 | #define EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_ASYMMETRIC 0x0800 | ||
4589 | #define EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_BOTH 0x0C00 | ||
4590 | #define EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK 0x0C00 | ||
4591 | #define EXT_PHY_KR_LP_AUTO_NEG 0x0013 | ||
4386 | #define EXT_PHY_KR_CTRL2 0x0007 | 4592 | #define EXT_PHY_KR_CTRL2 0x0007 |
4387 | #define EXT_PHY_KR_PCS_STATUS 0x0020 | 4593 | #define EXT_PHY_KR_PCS_STATUS 0x0020 |
4388 | #define EXT_PHY_KR_PMD_CTRL 0x0096 | 4594 | #define EXT_PHY_KR_PMD_CTRL 0x0096 |
@@ -4391,4 +4597,8 @@ | |||
4391 | #define EXT_PHY_KR_MISC_CTRL1 0xca85 | 4597 | #define EXT_PHY_KR_MISC_CTRL1 0xca85 |
4392 | #define EXT_PHY_KR_GEN_CTRL 0xca10 | 4598 | #define EXT_PHY_KR_GEN_CTRL 0xca10 |
4393 | #define EXT_PHY_KR_ROM_CODE 0xca19 | 4599 | #define EXT_PHY_KR_ROM_CODE 0xca19 |
4600 | #define EXT_PHY_KR_ROM_RESET_INTERNAL_MP 0x0188 | ||
4601 | #define EXT_PHY_KR_ROM_MICRO_RESET 0x018a | ||
4602 | |||
4603 | #define EXT_PHY_SFX7101_XGXS_TEST1 0xc00a | ||
4394 | 4604 | ||
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c index 571750975137..348371fda597 100644 --- a/drivers/net/cs89x0.c +++ b/drivers/net/cs89x0.c | |||
@@ -172,30 +172,30 @@ static char version[] __initdata = | |||
172 | them to system IRQ numbers. This mapping is card specific and is set to | 172 | them to system IRQ numbers. This mapping is card specific and is set to |
173 | the configuration of the Cirrus Eval board for this chip. */ | 173 | the configuration of the Cirrus Eval board for this chip. */ |
174 | #ifdef CONFIG_ARCH_CLPS7500 | 174 | #ifdef CONFIG_ARCH_CLPS7500 |
175 | static unsigned int netcard_portlist[] __initdata = | 175 | static unsigned int netcard_portlist[] __used __initdata = |
176 | { 0x80090303, 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0}; | 176 | { 0x80090303, 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0}; |
177 | static unsigned int cs8900_irq_map[] = {12,0,0,0}; | 177 | static unsigned int cs8900_irq_map[] = {12,0,0,0}; |
178 | #elif defined(CONFIG_SH_HICOSH4) | 178 | #elif defined(CONFIG_SH_HICOSH4) |
179 | static unsigned int netcard_portlist[] __initdata = | 179 | static unsigned int netcard_portlist[] __used __initdata = |
180 | { 0x0300, 0}; | 180 | { 0x0300, 0}; |
181 | static unsigned int cs8900_irq_map[] = {1,0,0,0}; | 181 | static unsigned int cs8900_irq_map[] = {1,0,0,0}; |
182 | #elif defined(CONFIG_MACH_IXDP2351) | 182 | #elif defined(CONFIG_MACH_IXDP2351) |
183 | static unsigned int netcard_portlist[] __initdata = {IXDP2351_VIRT_CS8900_BASE, 0}; | 183 | static unsigned int netcard_portlist[] __used __initdata = {IXDP2351_VIRT_CS8900_BASE, 0}; |
184 | static unsigned int cs8900_irq_map[] = {IRQ_IXDP2351_CS8900, 0, 0, 0}; | 184 | static unsigned int cs8900_irq_map[] = {IRQ_IXDP2351_CS8900, 0, 0, 0}; |
185 | #include <asm/irq.h> | 185 | #include <asm/irq.h> |
186 | #elif defined(CONFIG_ARCH_IXDP2X01) | 186 | #elif defined(CONFIG_ARCH_IXDP2X01) |
187 | #include <asm/irq.h> | 187 | #include <asm/irq.h> |
188 | static unsigned int netcard_portlist[] __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0}; | 188 | static unsigned int netcard_portlist[] __used __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0}; |
189 | static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0}; | 189 | static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0}; |
190 | #elif defined(CONFIG_ARCH_PNX010X) | 190 | #elif defined(CONFIG_ARCH_PNX010X) |
191 | #include <asm/irq.h> | 191 | #include <asm/irq.h> |
192 | #include <asm/arch/gpio.h> | 192 | #include <asm/arch/gpio.h> |
193 | #define CIRRUS_DEFAULT_BASE IO_ADDRESS(EXT_STATIC2_s0_BASE + 0x200000) /* = Physical address 0x48200000 */ | 193 | #define CIRRUS_DEFAULT_BASE IO_ADDRESS(EXT_STATIC2_s0_BASE + 0x200000) /* = Physical address 0x48200000 */ |
194 | #define CIRRUS_DEFAULT_IRQ VH_INTC_INT_NUM_CASCADED_INTERRUPT_1 /* Event inputs bank 1 - ID 35/bit 3 */ | 194 | #define CIRRUS_DEFAULT_IRQ VH_INTC_INT_NUM_CASCADED_INTERRUPT_1 /* Event inputs bank 1 - ID 35/bit 3 */ |
195 | static unsigned int netcard_portlist[] __initdata = {CIRRUS_DEFAULT_BASE, 0}; | 195 | static unsigned int netcard_portlist[] __used __initdata = {CIRRUS_DEFAULT_BASE, 0}; |
196 | static unsigned int cs8900_irq_map[] = {CIRRUS_DEFAULT_IRQ, 0, 0, 0}; | 196 | static unsigned int cs8900_irq_map[] = {CIRRUS_DEFAULT_IRQ, 0, 0, 0}; |
197 | #else | 197 | #else |
198 | static unsigned int netcard_portlist[] __initdata = | 198 | static unsigned int netcard_portlist[] __used __initdata = |
199 | { 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0}; | 199 | { 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0}; |
200 | static unsigned int cs8900_irq_map[] = {10,11,12,5}; | 200 | static unsigned int cs8900_irq_map[] = {10,11,12,5}; |
201 | #endif | 201 | #endif |
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index 3beace55b58d..7fe20310eb5f 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c | |||
@@ -438,7 +438,7 @@ static void e1000_release_nvm_82571(struct e1000_hw *hw) | |||
438 | * For non-82573 silicon, write data to EEPROM at offset using SPI interface. | 438 | * For non-82573 silicon, write data to EEPROM at offset using SPI interface. |
439 | * | 439 | * |
440 | * If e1000e_update_nvm_checksum is not called after this function, the | 440 | * If e1000e_update_nvm_checksum is not called after this function, the |
441 | * EEPROM will most likley contain an invalid checksum. | 441 | * EEPROM will most likely contain an invalid checksum. |
442 | **/ | 442 | **/ |
443 | static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words, | 443 | static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words, |
444 | u16 *data) | 444 | u16 *data) |
@@ -547,7 +547,7 @@ static s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw) | |||
547 | * poll for completion. | 547 | * poll for completion. |
548 | * | 548 | * |
549 | * If e1000e_update_nvm_checksum is not called after this function, the | 549 | * If e1000e_update_nvm_checksum is not called after this function, the |
550 | * EEPROM will most likley contain an invalid checksum. | 550 | * EEPROM will most likely contain an invalid checksum. |
551 | **/ | 551 | **/ |
552 | static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, | 552 | static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, |
553 | u16 words, u16 *data) | 553 | u16 words, u16 *data) |
@@ -1053,7 +1053,7 @@ static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw) | |||
1053 | /* If SerDes loopback mode is entered, there is no form | 1053 | /* If SerDes loopback mode is entered, there is no form |
1054 | * of reset to take the adapter out of that mode. So we | 1054 | * of reset to take the adapter out of that mode. So we |
1055 | * have to explicitly take the adapter out of loopback | 1055 | * have to explicitly take the adapter out of loopback |
1056 | * mode. This prevents drivers from twidling their thumbs | 1056 | * mode. This prevents drivers from twiddling their thumbs |
1057 | * if another tool failed to take it out of loopback mode. | 1057 | * if another tool failed to take it out of loopback mode. |
1058 | */ | 1058 | */ |
1059 | ew32(SCTL, | 1059 | ew32(SCTL, |
@@ -1098,7 +1098,7 @@ static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data) | |||
1098 | * e1000e_get_laa_state_82571 - Get locally administered address state | 1098 | * e1000e_get_laa_state_82571 - Get locally administered address state |
1099 | * @hw: pointer to the HW structure | 1099 | * @hw: pointer to the HW structure |
1100 | * | 1100 | * |
1101 | * Retrieve and return the current locally administed address state. | 1101 | * Retrieve and return the current locally administered address state. |
1102 | **/ | 1102 | **/ |
1103 | bool e1000e_get_laa_state_82571(struct e1000_hw *hw) | 1103 | bool e1000e_get_laa_state_82571(struct e1000_hw *hw) |
1104 | { | 1104 | { |
@@ -1113,7 +1113,7 @@ bool e1000e_get_laa_state_82571(struct e1000_hw *hw) | |||
1113 | * @hw: pointer to the HW structure | 1113 | * @hw: pointer to the HW structure |
1114 | * @state: enable/disable locally administered address | 1114 | * @state: enable/disable locally administered address |
1115 | * | 1115 | * |
1116 | * Enable/Disable the current locally administed address state. | 1116 | * Enable/Disable the current locally administers address state. |
1117 | **/ | 1117 | **/ |
1118 | void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state) | 1118 | void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state) |
1119 | { | 1119 | { |
@@ -1281,16 +1281,6 @@ static struct e1000_phy_operations e82_phy_ops_m88 = { | |||
1281 | 1281 | ||
1282 | static struct e1000_nvm_operations e82571_nvm_ops = { | 1282 | static struct e1000_nvm_operations e82571_nvm_ops = { |
1283 | .acquire_nvm = e1000_acquire_nvm_82571, | 1283 | .acquire_nvm = e1000_acquire_nvm_82571, |
1284 | .read_nvm = e1000e_read_nvm_spi, | ||
1285 | .release_nvm = e1000_release_nvm_82571, | ||
1286 | .update_nvm = e1000_update_nvm_checksum_82571, | ||
1287 | .valid_led_default = e1000_valid_led_default_82571, | ||
1288 | .validate_nvm = e1000_validate_nvm_checksum_82571, | ||
1289 | .write_nvm = e1000_write_nvm_82571, | ||
1290 | }; | ||
1291 | |||
1292 | static struct e1000_nvm_operations e82573_nvm_ops = { | ||
1293 | .acquire_nvm = e1000_acquire_nvm_82571, | ||
1294 | .read_nvm = e1000e_read_nvm_eerd, | 1284 | .read_nvm = e1000e_read_nvm_eerd, |
1295 | .release_nvm = e1000_release_nvm_82571, | 1285 | .release_nvm = e1000_release_nvm_82571, |
1296 | .update_nvm = e1000_update_nvm_checksum_82571, | 1286 | .update_nvm = e1000_update_nvm_checksum_82571, |
@@ -1355,6 +1345,6 @@ struct e1000_info e1000_82573_info = { | |||
1355 | .get_invariants = e1000_get_invariants_82571, | 1345 | .get_invariants = e1000_get_invariants_82571, |
1356 | .mac_ops = &e82571_mac_ops, | 1346 | .mac_ops = &e82571_mac_ops, |
1357 | .phy_ops = &e82_phy_ops_m88, | 1347 | .phy_ops = &e82_phy_ops_m88, |
1358 | .nvm_ops = &e82573_nvm_ops, | 1348 | .nvm_ops = &e82571_nvm_ops, |
1359 | }; | 1349 | }; |
1360 | 1350 | ||
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h index 6232c3e96689..a4f511f549f7 100644 --- a/drivers/net/e1000e/defines.h +++ b/drivers/net/e1000e/defines.h | |||
@@ -66,7 +66,7 @@ | |||
66 | #define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ | 66 | #define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ |
67 | 67 | ||
68 | /* Extended Device Control */ | 68 | /* Extended Device Control */ |
69 | #define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */ | 69 | #define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Definable Pin 7 */ |
70 | #define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ | 70 | #define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ |
71 | #define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ | 71 | #define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ |
72 | #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 | 72 | #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 |
@@ -75,12 +75,12 @@ | |||
75 | #define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ | 75 | #define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ |
76 | #define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ | 76 | #define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ |
77 | 77 | ||
78 | /* Receive Decriptor bit definitions */ | 78 | /* Receive Descriptor bit definitions */ |
79 | #define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ | 79 | #define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ |
80 | #define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ | 80 | #define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ |
81 | #define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ | 81 | #define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ |
82 | #define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ | 82 | #define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ |
83 | #define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */ | 83 | #define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ |
84 | #define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ | 84 | #define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ |
85 | #define E1000_RXD_ERR_CE 0x01 /* CRC Error */ | 85 | #define E1000_RXD_ERR_CE 0x01 /* CRC Error */ |
86 | #define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ | 86 | #define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ |
@@ -223,7 +223,7 @@ | |||
223 | #define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */ | 223 | #define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */ |
224 | #define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ | 224 | #define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ |
225 | 225 | ||
226 | /* Constants used to intrepret the masked PCI-X bus speed. */ | 226 | /* Constants used to interpret the masked PCI-X bus speed. */ |
227 | 227 | ||
228 | #define HALF_DUPLEX 1 | 228 | #define HALF_DUPLEX 1 |
229 | #define FULL_DUPLEX 2 | 229 | #define FULL_DUPLEX 2 |
@@ -517,7 +517,7 @@ | |||
517 | /* PHY 1000 MII Register/Bit Definitions */ | 517 | /* PHY 1000 MII Register/Bit Definitions */ |
518 | /* PHY Registers defined by IEEE */ | 518 | /* PHY Registers defined by IEEE */ |
519 | #define PHY_CONTROL 0x00 /* Control Register */ | 519 | #define PHY_CONTROL 0x00 /* Control Register */ |
520 | #define PHY_STATUS 0x01 /* Status Regiser */ | 520 | #define PHY_STATUS 0x01 /* Status Register */ |
521 | #define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ | 521 | #define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ |
522 | #define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ | 522 | #define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ |
523 | #define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ | 523 | #define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ |
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index 8b88c226e858..327c0620da31 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h | |||
@@ -42,8 +42,7 @@ | |||
42 | struct e1000_info; | 42 | struct e1000_info; |
43 | 43 | ||
44 | #define ndev_printk(level, netdev, format, arg...) \ | 44 | #define ndev_printk(level, netdev, format, arg...) \ |
45 | printk(level "%s: %s: " format, (netdev)->dev.parent->bus_id, \ | 45 | printk(level "%s: " format, (netdev)->name, ## arg) |
46 | (netdev)->name, ## arg) | ||
47 | 46 | ||
48 | #ifdef DEBUG | 47 | #ifdef DEBUG |
49 | #define ndev_dbg(netdev, format, arg...) \ | 48 | #define ndev_dbg(netdev, format, arg...) \ |
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h index 3c5862f97dbf..916025b30fc3 100644 --- a/drivers/net/e1000e/hw.h +++ b/drivers/net/e1000e/hw.h | |||
@@ -184,7 +184,7 @@ enum e1e_registers { | |||
184 | E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */ | 184 | E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */ |
185 | E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */ | 185 | E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */ |
186 | E1000_RXCSUM = 0x05000, /* RX Checksum Control - RW */ | 186 | E1000_RXCSUM = 0x05000, /* RX Checksum Control - RW */ |
187 | E1000_RFCTL = 0x05008, /* Receive Filter Control*/ | 187 | E1000_RFCTL = 0x05008, /* Receive Filter Control */ |
188 | E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */ | 188 | E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */ |
189 | E1000_RA = 0x05400, /* Receive Address - RW Array */ | 189 | E1000_RA = 0x05400, /* Receive Address - RW Array */ |
190 | E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */ | 190 | E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */ |
@@ -202,7 +202,7 @@ enum e1e_registers { | |||
202 | E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */ | 202 | E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */ |
203 | E1000_SWSM = 0x05B50, /* SW Semaphore */ | 203 | E1000_SWSM = 0x05B50, /* SW Semaphore */ |
204 | E1000_FWSM = 0x05B54, /* FW Semaphore */ | 204 | E1000_FWSM = 0x05B54, /* FW Semaphore */ |
205 | E1000_HICR = 0x08F00, /* Host Inteface Control */ | 205 | E1000_HICR = 0x08F00, /* Host Interface Control */ |
206 | }; | 206 | }; |
207 | 207 | ||
208 | /* RSS registers */ | 208 | /* RSS registers */ |
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index 8f8139de1f48..0ae39550768d 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c | |||
@@ -671,7 +671,7 @@ static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw) | |||
671 | * e1000_check_polarity_ife_ich8lan - Check cable polarity for IFE PHY | 671 | * e1000_check_polarity_ife_ich8lan - Check cable polarity for IFE PHY |
672 | * @hw: pointer to the HW structure | 672 | * @hw: pointer to the HW structure |
673 | * | 673 | * |
674 | * Polarity is determined on the polarity reveral feature being enabled. | 674 | * Polarity is determined on the polarity reversal feature being enabled. |
675 | * This function is only called by other family-specific | 675 | * This function is only called by other family-specific |
676 | * routines. | 676 | * routines. |
677 | **/ | 677 | **/ |
@@ -947,7 +947,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) | |||
947 | /* Either we should have a hardware SPI cycle in progress | 947 | /* Either we should have a hardware SPI cycle in progress |
948 | * bit to check against, in order to start a new cycle or | 948 | * bit to check against, in order to start a new cycle or |
949 | * FDONE bit should be changed in the hardware so that it | 949 | * FDONE bit should be changed in the hardware so that it |
950 | * is 1 after harware reset, which can then be used as an | 950 | * is 1 after hardware reset, which can then be used as an |
951 | * indication whether a cycle is in progress or has been | 951 | * indication whether a cycle is in progress or has been |
952 | * completed. | 952 | * completed. |
953 | */ | 953 | */ |
@@ -1155,7 +1155,7 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, | |||
1155 | * which writes the checksum to the shadow ram. The changes in the shadow | 1155 | * which writes the checksum to the shadow ram. The changes in the shadow |
1156 | * ram are then committed to the EEPROM by processing each bank at a time | 1156 | * ram are then committed to the EEPROM by processing each bank at a time |
1157 | * checking for the modified bit and writing only the pending changes. | 1157 | * checking for the modified bit and writing only the pending changes. |
1158 | * After a succesful commit, the shadow ram is cleared and is ready for | 1158 | * After a successful commit, the shadow ram is cleared and is ready for |
1159 | * future writes. | 1159 | * future writes. |
1160 | **/ | 1160 | **/ |
1161 | static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | 1161 | static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) |
@@ -1680,7 +1680,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | |||
1680 | * - initialize LED identification | 1680 | * - initialize LED identification |
1681 | * - setup receive address registers | 1681 | * - setup receive address registers |
1682 | * - setup flow control | 1682 | * - setup flow control |
1683 | * - setup transmit discriptors | 1683 | * - setup transmit descriptors |
1684 | * - clear statistics | 1684 | * - clear statistics |
1685 | **/ | 1685 | **/ |
1686 | static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) | 1686 | static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) |
@@ -1961,7 +1961,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) | |||
1961 | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); | 1961 | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); |
1962 | ew32(PHY_CTRL, phy_ctrl); | 1962 | ew32(PHY_CTRL, phy_ctrl); |
1963 | 1963 | ||
1964 | /* Call gig speed drop workaround on Giga disable before accessing | 1964 | /* Call gig speed drop workaround on Gig disable before accessing |
1965 | * any PHY registers */ | 1965 | * any PHY registers */ |
1966 | e1000e_gig_downshift_workaround_ich8lan(hw); | 1966 | e1000e_gig_downshift_workaround_ich8lan(hw); |
1967 | 1967 | ||
@@ -1972,7 +1972,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) | |||
1972 | /** | 1972 | /** |
1973 | * e1000_set_kmrn_lock_loss_workaound_ich8lan - Set Kumeran workaround state | 1973 | * e1000_set_kmrn_lock_loss_workaound_ich8lan - Set Kumeran workaround state |
1974 | * @hw: pointer to the HW structure | 1974 | * @hw: pointer to the HW structure |
1975 | * @state: boolean value used to set the current Kumaran workaround state | 1975 | * @state: boolean value used to set the current Kumeran workaround state |
1976 | * | 1976 | * |
1977 | * If ICH8, set the current Kumeran workaround state (enabled - TRUE | 1977 | * If ICH8, set the current Kumeran workaround state (enabled - TRUE |
1978 | * /disabled - FALSE). | 1978 | * /disabled - FALSE). |
@@ -2017,7 +2017,7 @@ void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) | |||
2017 | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); | 2017 | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); |
2018 | ew32(PHY_CTRL, reg); | 2018 | ew32(PHY_CTRL, reg); |
2019 | 2019 | ||
2020 | /* Call gig speed drop workaround on Giga disable before | 2020 | /* Call gig speed drop workaround on Gig disable before |
2021 | * accessing any PHY registers */ | 2021 | * accessing any PHY registers */ |
2022 | if (hw->mac.type == e1000_ich8lan) | 2022 | if (hw->mac.type == e1000_ich8lan) |
2023 | e1000e_gig_downshift_workaround_ich8lan(hw); | 2023 | e1000e_gig_downshift_workaround_ich8lan(hw); |
@@ -2045,7 +2045,7 @@ void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) | |||
2045 | * @hw: pointer to the HW structure | 2045 | * @hw: pointer to the HW structure |
2046 | * | 2046 | * |
2047 | * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), | 2047 | * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), |
2048 | * LPLU, Giga disable, MDIC PHY reset): | 2048 | * LPLU, Gig disable, MDIC PHY reset): |
2049 | * 1) Set Kumeran Near-end loopback | 2049 | * 1) Set Kumeran Near-end loopback |
2050 | * 2) Clear Kumeran Near-end loopback | 2050 | * 2) Clear Kumeran Near-end loopback |
2051 | * Should only be called for ICH8[m] devices with IGP_3 Phy. | 2051 | * Should only be called for ICH8[m] devices with IGP_3 Phy. |
@@ -2089,10 +2089,10 @@ static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw) | |||
2089 | } | 2089 | } |
2090 | 2090 | ||
2091 | /** | 2091 | /** |
2092 | * e1000_led_on_ich8lan - Turn LED's on | 2092 | * e1000_led_on_ich8lan - Turn LEDs on |
2093 | * @hw: pointer to the HW structure | 2093 | * @hw: pointer to the HW structure |
2094 | * | 2094 | * |
2095 | * Turn on the LED's. | 2095 | * Turn on the LEDs. |
2096 | **/ | 2096 | **/ |
2097 | static s32 e1000_led_on_ich8lan(struct e1000_hw *hw) | 2097 | static s32 e1000_led_on_ich8lan(struct e1000_hw *hw) |
2098 | { | 2098 | { |
@@ -2105,10 +2105,10 @@ static s32 e1000_led_on_ich8lan(struct e1000_hw *hw) | |||
2105 | } | 2105 | } |
2106 | 2106 | ||
2107 | /** | 2107 | /** |
2108 | * e1000_led_off_ich8lan - Turn LED's off | 2108 | * e1000_led_off_ich8lan - Turn LEDs off |
2109 | * @hw: pointer to the HW structure | 2109 | * @hw: pointer to the HW structure |
2110 | * | 2110 | * |
2111 | * Turn off the LED's. | 2111 | * Turn off the LEDs. |
2112 | **/ | 2112 | **/ |
2113 | static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) | 2113 | static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) |
2114 | { | 2114 | { |
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c index 16f35fadb74b..95f75a43c9f9 100644 --- a/drivers/net/e1000e/lib.c +++ b/drivers/net/e1000e/lib.c | |||
@@ -589,9 +589,6 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) | |||
589 | s32 ret_val; | 589 | s32 ret_val; |
590 | u16 nvm_data; | 590 | u16 nvm_data; |
591 | 591 | ||
592 | if (mac->fc != e1000_fc_default) | ||
593 | return 0; | ||
594 | |||
595 | /* Read and store word 0x0F of the EEPROM. This word contains bits | 592 | /* Read and store word 0x0F of the EEPROM. This word contains bits |
596 | * that determine the hardware's default PAUSE (flow control) mode, | 593 | * that determine the hardware's default PAUSE (flow control) mode, |
597 | * a bit that determines whether the HW defaults to enabling or | 594 | * a bit that determines whether the HW defaults to enabling or |
@@ -1107,34 +1104,13 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1107 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { | 1104 | (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { |
1108 | mac->fc = e1000_fc_rx_pause; | 1105 | mac->fc = e1000_fc_rx_pause; |
1109 | hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n"); | 1106 | hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n"); |
1110 | } | 1107 | } else { |
1111 | /* Per the IEEE spec, at this point flow control should be | 1108 | /* |
1112 | * disabled. However, we want to consider that we could | 1109 | * Per the IEEE spec, at this point flow control |
1113 | * be connected to a legacy switch that doesn't advertise | 1110 | * should be disabled. |
1114 | * desired flow control, but can be forced on the link | 1111 | */ |
1115 | * partner. So if we advertised no flow control, that is | ||
1116 | * what we will resolve to. If we advertised some kind of | ||
1117 | * receive capability (Rx Pause Only or Full Flow Control) | ||
1118 | * and the link partner advertised none, we will configure | ||
1119 | * ourselves to enable Rx Flow Control only. We can do | ||
1120 | * this safely for two reasons: If the link partner really | ||
1121 | * didn't want flow control enabled, and we enable Rx, no | ||
1122 | * harm done since we won't be receiving any PAUSE frames | ||
1123 | * anyway. If the intent on the link partner was to have | ||
1124 | * flow control enabled, then by us enabling RX only, we | ||
1125 | * can at least receive pause frames and process them. | ||
1126 | * This is a good idea because in most cases, since we are | ||
1127 | * predominantly a server NIC, more times than not we will | ||
1128 | * be asked to delay transmission of packets than asking | ||
1129 | * our link partner to pause transmission of frames. | ||
1130 | */ | ||
1131 | else if ((mac->original_fc == e1000_fc_none) || | ||
1132 | (mac->original_fc == e1000_fc_tx_pause)) { | ||
1133 | mac->fc = e1000_fc_none; | 1112 | mac->fc = e1000_fc_none; |
1134 | hw_dbg(hw, "Flow Control = NONE.\r\n"); | 1113 | hw_dbg(hw, "Flow Control = NONE.\r\n"); |
1135 | } else { | ||
1136 | mac->fc = e1000_fc_rx_pause; | ||
1137 | hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n"); | ||
1138 | } | 1114 | } |
1139 | 1115 | ||
1140 | /* Now we need to do one last check... If we auto- | 1116 | /* Now we need to do one last check... If we auto- |
@@ -1164,7 +1140,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) | |||
1164 | } | 1140 | } |
1165 | 1141 | ||
1166 | /** | 1142 | /** |
1167 | * e1000e_get_speed_and_duplex_copper - Retreive current speed/duplex | 1143 | * e1000e_get_speed_and_duplex_copper - Retrieve current speed/duplex |
1168 | * @hw: pointer to the HW structure | 1144 | * @hw: pointer to the HW structure |
1169 | * @speed: stores the current speed | 1145 | * @speed: stores the current speed |
1170 | * @duplex: stores the current duplex | 1146 | * @duplex: stores the current duplex |
@@ -1200,7 +1176,7 @@ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *dup | |||
1200 | } | 1176 | } |
1201 | 1177 | ||
1202 | /** | 1178 | /** |
1203 | * e1000e_get_speed_and_duplex_fiber_serdes - Retreive current speed/duplex | 1179 | * e1000e_get_speed_and_duplex_fiber_serdes - Retrieve current speed/duplex |
1204 | * @hw: pointer to the HW structure | 1180 | * @hw: pointer to the HW structure |
1205 | * @speed: stores the current speed | 1181 | * @speed: stores the current speed |
1206 | * @duplex: stores the current duplex | 1182 | * @duplex: stores the current duplex |
@@ -1410,7 +1386,7 @@ s32 e1000e_cleanup_led_generic(struct e1000_hw *hw) | |||
1410 | * e1000e_blink_led - Blink LED | 1386 | * e1000e_blink_led - Blink LED |
1411 | * @hw: pointer to the HW structure | 1387 | * @hw: pointer to the HW structure |
1412 | * | 1388 | * |
1413 | * Blink the led's which are set to be on. | 1389 | * Blink the LEDs which are set to be on. |
1414 | **/ | 1390 | **/ |
1415 | s32 e1000e_blink_led(struct e1000_hw *hw) | 1391 | s32 e1000e_blink_led(struct e1000_hw *hw) |
1416 | { | 1392 | { |
@@ -1515,7 +1491,7 @@ void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop) | |||
1515 | * @hw: pointer to the HW structure | 1491 | * @hw: pointer to the HW structure |
1516 | * | 1492 | * |
1517 | * Returns 0 if successful, else returns -10 | 1493 | * Returns 0 if successful, else returns -10 |
1518 | * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued | 1494 | * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused |
1519 | * the master requests to be disabled. | 1495 | * the master requests to be disabled. |
1520 | * | 1496 | * |
1521 | * Disables PCI-Express master access and verifies there are no pending | 1497 | * Disables PCI-Express master access and verifies there are no pending |
@@ -1876,7 +1852,7 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) | |||
1876 | } | 1852 | } |
1877 | 1853 | ||
1878 | /** | 1854 | /** |
1879 | * e1000e_read_nvm_spi - Read EEPROM's using SPI | 1855 | * e1000e_read_nvm_spi - Reads EEPROM using SPI |
1880 | * @hw: pointer to the HW structure | 1856 | * @hw: pointer to the HW structure |
1881 | * @offset: offset of word in the EEPROM to read | 1857 | * @offset: offset of word in the EEPROM to read |
1882 | * @words: number of words to read | 1858 | * @words: number of words to read |
@@ -1980,7 +1956,7 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | |||
1980 | * Writes data to EEPROM at offset using SPI interface. | 1956 | * Writes data to EEPROM at offset using SPI interface. |
1981 | * | 1957 | * |
1982 | * If e1000e_update_nvm_checksum is not called after this function , the | 1958 | * If e1000e_update_nvm_checksum is not called after this function , the |
1983 | * EEPROM will most likley contain an invalid checksum. | 1959 | * EEPROM will most likely contain an invalid checksum. |
1984 | **/ | 1960 | **/ |
1985 | s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) | 1961 | s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) |
1986 | { | 1962 | { |
@@ -2222,7 +2198,7 @@ static u8 e1000_calculate_checksum(u8 *buffer, u32 length) | |||
2222 | * | 2198 | * |
2223 | * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND | 2199 | * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND |
2224 | * | 2200 | * |
2225 | * This function checks whether the HOST IF is enabled for command operaton | 2201 | * This function checks whether the HOST IF is enabled for command operation |
2226 | * and also checks whether the previous command is completed. It busy waits | 2202 | * and also checks whether the previous command is completed. It busy waits |
2227 | * in case of previous command is not completed. | 2203 | * in case of previous command is not completed. |
2228 | **/ | 2204 | **/ |
@@ -2254,7 +2230,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) | |||
2254 | } | 2230 | } |
2255 | 2231 | ||
2256 | /** | 2232 | /** |
2257 | * e1000e_check_mng_mode - check managament mode | 2233 | * e1000e_check_mng_mode - check management mode |
2258 | * @hw: pointer to the HW structure | 2234 | * @hw: pointer to the HW structure |
2259 | * | 2235 | * |
2260 | * Reads the firmware semaphore register and returns true (>0) if | 2236 | * Reads the firmware semaphore register and returns true (>0) if |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 3031d6d16247..fc5c63f4f578 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -1006,7 +1006,7 @@ static void e1000_irq_enable(struct e1000_adapter *adapter) | |||
1006 | * e1000_get_hw_control - get control of the h/w from f/w | 1006 | * e1000_get_hw_control - get control of the h/w from f/w |
1007 | * @adapter: address of board private structure | 1007 | * @adapter: address of board private structure |
1008 | * | 1008 | * |
1009 | * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit. | 1009 | * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. |
1010 | * For ASF and Pass Through versions of f/w this means that | 1010 | * For ASF and Pass Through versions of f/w this means that |
1011 | * the driver is loaded. For AMT version (only with 82573) | 1011 | * the driver is loaded. For AMT version (only with 82573) |
1012 | * of the f/w this means that the network i/f is open. | 1012 | * of the f/w this means that the network i/f is open. |
@@ -1032,7 +1032,7 @@ static void e1000_get_hw_control(struct e1000_adapter *adapter) | |||
1032 | * e1000_release_hw_control - release control of the h/w to f/w | 1032 | * e1000_release_hw_control - release control of the h/w to f/w |
1033 | * @adapter: address of board private structure | 1033 | * @adapter: address of board private structure |
1034 | * | 1034 | * |
1035 | * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. | 1035 | * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. |
1036 | * For ASF and Pass Through versions of f/w this means that the | 1036 | * For ASF and Pass Through versions of f/w this means that the |
1037 | * driver is no longer loaded. For AMT version (only with 82573) i | 1037 | * driver is no longer loaded. For AMT version (only with 82573) i |
1038 | * of the f/w this means that the network i/f is closed. | 1038 | * of the f/w this means that the network i/f is closed. |
@@ -1241,6 +1241,11 @@ void e1000e_free_rx_resources(struct e1000_adapter *adapter) | |||
1241 | 1241 | ||
1242 | /** | 1242 | /** |
1243 | * e1000_update_itr - update the dynamic ITR value based on statistics | 1243 | * e1000_update_itr - update the dynamic ITR value based on statistics |
1244 | * @adapter: pointer to adapter | ||
1245 | * @itr_setting: current adapter->itr | ||
1246 | * @packets: the number of packets during this measurement interval | ||
1247 | * @bytes: the number of bytes during this measurement interval | ||
1248 | * | ||
1244 | * Stores a new ITR value based on packets and byte | 1249 | * Stores a new ITR value based on packets and byte |
1245 | * counts during the last interrupt. The advantage of per interrupt | 1250 | * counts during the last interrupt. The advantage of per interrupt |
1246 | * computation is faster updates and more accurate ITR for the current | 1251 | * computation is faster updates and more accurate ITR for the current |
@@ -1250,10 +1255,6 @@ void e1000e_free_rx_resources(struct e1000_adapter *adapter) | |||
1250 | * while increasing bulk throughput. | 1255 | * while increasing bulk throughput. |
1251 | * this functionality is controlled by the InterruptThrottleRate module | 1256 | * this functionality is controlled by the InterruptThrottleRate module |
1252 | * parameter (see e1000_param.c) | 1257 | * parameter (see e1000_param.c) |
1253 | * @adapter: pointer to adapter | ||
1254 | * @itr_setting: current adapter->itr | ||
1255 | * @packets: the number of packets during this measurement interval | ||
1256 | * @bytes: the number of bytes during this measurement interval | ||
1257 | **/ | 1258 | **/ |
1258 | static unsigned int e1000_update_itr(struct e1000_adapter *adapter, | 1259 | static unsigned int e1000_update_itr(struct e1000_adapter *adapter, |
1259 | u16 itr_setting, int packets, | 1260 | u16 itr_setting, int packets, |
@@ -1366,6 +1367,7 @@ set_itr_now: | |||
1366 | /** | 1367 | /** |
1367 | * e1000_clean - NAPI Rx polling callback | 1368 | * e1000_clean - NAPI Rx polling callback |
1368 | * @adapter: board private structure | 1369 | * @adapter: board private structure |
1370 | * @budget: amount of packets driver is allowed to process this poll | ||
1369 | **/ | 1371 | **/ |
1370 | static int e1000_clean(struct napi_struct *napi, int budget) | 1372 | static int e1000_clean(struct napi_struct *napi, int budget) |
1371 | { | 1373 | { |
@@ -2000,7 +2002,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter) | |||
2000 | e1000_check_reset_block(hw)) | 2002 | e1000_check_reset_block(hw)) |
2001 | return; | 2003 | return; |
2002 | 2004 | ||
2003 | /* managebility (AMT) is enabled */ | 2005 | /* manageability (AMT) is enabled */ |
2004 | if (er32(MANC) & E1000_MANC_SMBUS_EN) | 2006 | if (er32(MANC) & E1000_MANC_SMBUS_EN) |
2005 | return; | 2007 | return; |
2006 | 2008 | ||
@@ -3488,7 +3490,6 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) | |||
3488 | static void e1000e_disable_l1aspm(struct pci_dev *pdev) | 3490 | static void e1000e_disable_l1aspm(struct pci_dev *pdev) |
3489 | { | 3491 | { |
3490 | int pos; | 3492 | int pos; |
3491 | u32 cap; | ||
3492 | u16 val; | 3493 | u16 val; |
3493 | 3494 | ||
3494 | /* | 3495 | /* |
@@ -3503,7 +3504,6 @@ static void e1000e_disable_l1aspm(struct pci_dev *pdev) | |||
3503 | * active. | 3504 | * active. |
3504 | */ | 3505 | */ |
3505 | pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); | 3506 | pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); |
3506 | pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, &cap); | ||
3507 | pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &val); | 3507 | pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &val); |
3508 | if (val & 0x2) { | 3508 | if (val & 0x2) { |
3509 | dev_warn(&pdev->dev, "Disabling L1 ASPM\n"); | 3509 | dev_warn(&pdev->dev, "Disabling L1 ASPM\n"); |
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c index fc6fee112f1c..dab3c468a768 100644 --- a/drivers/net/e1000e/phy.c +++ b/drivers/net/e1000e/phy.c | |||
@@ -121,7 +121,7 @@ s32 e1000e_phy_reset_dsp(struct e1000_hw *hw) | |||
121 | * @offset: register offset to be read | 121 | * @offset: register offset to be read |
122 | * @data: pointer to the read data | 122 | * @data: pointer to the read data |
123 | * | 123 | * |
124 | * Reads the MDI control regsiter in the PHY at offset and stores the | 124 | * Reads the MDI control register in the PHY at offset and stores the |
125 | * information read to data. | 125 | * information read to data. |
126 | **/ | 126 | **/ |
127 | static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) | 127 | static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) |
@@ -1172,7 +1172,7 @@ s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active) | |||
1172 | } | 1172 | } |
1173 | 1173 | ||
1174 | /** | 1174 | /** |
1175 | * e1000e_check_downshift - Checks whether a downshift in speed occured | 1175 | * e1000e_check_downshift - Checks whether a downshift in speed occurred |
1176 | * @hw: pointer to the HW structure | 1176 | * @hw: pointer to the HW structure |
1177 | * | 1177 | * |
1178 | * Success returns 0, Failure returns 1 | 1178 | * Success returns 0, Failure returns 1 |
@@ -1388,8 +1388,8 @@ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw) | |||
1388 | * | 1388 | * |
1389 | * The automatic gain control (agc) normalizes the amplitude of the | 1389 | * The automatic gain control (agc) normalizes the amplitude of the |
1390 | * received signal, adjusting for the attenuation produced by the | 1390 | * received signal, adjusting for the attenuation produced by the |
1391 | * cable. By reading the AGC registers, which reperesent the | 1391 | * cable. By reading the AGC registers, which represent the |
1392 | * cobination of course and fine gain value, the value can be put | 1392 | * combination of course and fine gain value, the value can be put |
1393 | * into a lookup table to obtain the approximate cable length | 1393 | * into a lookup table to obtain the approximate cable length |
1394 | * for each channel. | 1394 | * for each channel. |
1395 | **/ | 1395 | **/ |
@@ -1619,7 +1619,7 @@ s32 e1000e_phy_sw_reset(struct e1000_hw *hw) | |||
1619 | * Verify the reset block is not blocking us from resetting. Acquire | 1619 | * Verify the reset block is not blocking us from resetting. Acquire |
1620 | * semaphore (if necessary) and read/set/write the device control reset | 1620 | * semaphore (if necessary) and read/set/write the device control reset |
1621 | * bit in the PHY. Wait the appropriate delay time for the device to | 1621 | * bit in the PHY. Wait the appropriate delay time for the device to |
1622 | * reset and relase the semaphore (if necessary). | 1622 | * reset and release the semaphore (if necessary). |
1623 | **/ | 1623 | **/ |
1624 | s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) | 1624 | s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) |
1625 | { | 1625 | { |
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index 88fb53eba715..7c4ead35cfa2 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h | |||
@@ -40,7 +40,7 @@ | |||
40 | #include <asm/io.h> | 40 | #include <asm/io.h> |
41 | 41 | ||
42 | #define DRV_NAME "ehea" | 42 | #define DRV_NAME "ehea" |
43 | #define DRV_VERSION "EHEA_0083" | 43 | #define DRV_VERSION "EHEA_0087" |
44 | 44 | ||
45 | /* eHEA capability flags */ | 45 | /* eHEA capability flags */ |
46 | #define DLPAR_PORT_ADD_REM 1 | 46 | #define DLPAR_PORT_ADD_REM 1 |
@@ -386,6 +386,13 @@ struct ehea_port_res { | |||
386 | 386 | ||
387 | 387 | ||
388 | #define EHEA_MAX_PORTS 16 | 388 | #define EHEA_MAX_PORTS 16 |
389 | |||
390 | #define EHEA_NUM_PORTRES_FW_HANDLES 6 /* QP handle, SendCQ handle, | ||
391 | RecvCQ handle, EQ handle, | ||
392 | SendMR handle, RecvMR handle */ | ||
393 | #define EHEA_NUM_PORT_FW_HANDLES 1 /* EQ handle */ | ||
394 | #define EHEA_NUM_ADAPTER_FW_HANDLES 2 /* MR handle, NEQ handle */ | ||
395 | |||
389 | struct ehea_adapter { | 396 | struct ehea_adapter { |
390 | u64 handle; | 397 | u64 handle; |
391 | struct of_device *ofdev; | 398 | struct of_device *ofdev; |
@@ -405,6 +412,31 @@ struct ehea_mc_list { | |||
405 | u64 macaddr; | 412 | u64 macaddr; |
406 | }; | 413 | }; |
407 | 414 | ||
415 | /* kdump support */ | ||
416 | struct ehea_fw_handle_entry { | ||
417 | u64 adh; /* Adapter Handle */ | ||
418 | u64 fwh; /* Firmware Handle */ | ||
419 | }; | ||
420 | |||
421 | struct ehea_fw_handle_array { | ||
422 | struct ehea_fw_handle_entry *arr; | ||
423 | int num_entries; | ||
424 | struct semaphore lock; | ||
425 | }; | ||
426 | |||
427 | struct ehea_bcmc_reg_entry { | ||
428 | u64 adh; /* Adapter Handle */ | ||
429 | u32 port_id; /* Logical Port Id */ | ||
430 | u8 reg_type; /* Registration Type */ | ||
431 | u64 macaddr; | ||
432 | }; | ||
433 | |||
434 | struct ehea_bcmc_reg_array { | ||
435 | struct ehea_bcmc_reg_entry *arr; | ||
436 | int num_entries; | ||
437 | struct semaphore lock; | ||
438 | }; | ||
439 | |||
408 | #define EHEA_PORT_UP 1 | 440 | #define EHEA_PORT_UP 1 |
409 | #define EHEA_PORT_DOWN 0 | 441 | #define EHEA_PORT_DOWN 0 |
410 | #define EHEA_PHY_LINK_UP 1 | 442 | #define EHEA_PHY_LINK_UP 1 |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index c051c7e09b9a..21af674b764e 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/if_ether.h> | 35 | #include <linux/if_ether.h> |
36 | #include <linux/notifier.h> | 36 | #include <linux/notifier.h> |
37 | #include <linux/reboot.h> | 37 | #include <linux/reboot.h> |
38 | #include <asm/kexec.h> | ||
38 | 39 | ||
39 | #include <net/ip.h> | 40 | #include <net/ip.h> |
40 | 41 | ||
@@ -98,8 +99,10 @@ static int port_name_cnt; | |||
98 | static LIST_HEAD(adapter_list); | 99 | static LIST_HEAD(adapter_list); |
99 | u64 ehea_driver_flags; | 100 | u64 ehea_driver_flags; |
100 | struct work_struct ehea_rereg_mr_task; | 101 | struct work_struct ehea_rereg_mr_task; |
101 | |||
102 | struct semaphore dlpar_mem_lock; | 102 | struct semaphore dlpar_mem_lock; |
103 | struct ehea_fw_handle_array ehea_fw_handles; | ||
104 | struct ehea_bcmc_reg_array ehea_bcmc_regs; | ||
105 | |||
103 | 106 | ||
104 | static int __devinit ehea_probe_adapter(struct of_device *dev, | 107 | static int __devinit ehea_probe_adapter(struct of_device *dev, |
105 | const struct of_device_id *id); | 108 | const struct of_device_id *id); |
@@ -132,6 +135,160 @@ void ehea_dump(void *adr, int len, char *msg) | |||
132 | } | 135 | } |
133 | } | 136 | } |
134 | 137 | ||
138 | static void ehea_update_firmware_handles(void) | ||
139 | { | ||
140 | struct ehea_fw_handle_entry *arr = NULL; | ||
141 | struct ehea_adapter *adapter; | ||
142 | int num_adapters = 0; | ||
143 | int num_ports = 0; | ||
144 | int num_portres = 0; | ||
145 | int i = 0; | ||
146 | int num_fw_handles, k, l; | ||
147 | |||
148 | /* Determine number of handles */ | ||
149 | list_for_each_entry(adapter, &adapter_list, list) { | ||
150 | num_adapters++; | ||
151 | |||
152 | for (k = 0; k < EHEA_MAX_PORTS; k++) { | ||
153 | struct ehea_port *port = adapter->port[k]; | ||
154 | |||
155 | if (!port || (port->state != EHEA_PORT_UP)) | ||
156 | continue; | ||
157 | |||
158 | num_ports++; | ||
159 | num_portres += port->num_def_qps + port->num_add_tx_qps; | ||
160 | } | ||
161 | } | ||
162 | |||
163 | num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES + | ||
164 | num_ports * EHEA_NUM_PORT_FW_HANDLES + | ||
165 | num_portres * EHEA_NUM_PORTRES_FW_HANDLES; | ||
166 | |||
167 | if (num_fw_handles) { | ||
168 | arr = kzalloc(num_fw_handles * sizeof(*arr), GFP_KERNEL); | ||
169 | if (!arr) | ||
170 | return; /* Keep the existing array */ | ||
171 | } else | ||
172 | goto out_update; | ||
173 | |||
174 | list_for_each_entry(adapter, &adapter_list, list) { | ||
175 | for (k = 0; k < EHEA_MAX_PORTS; k++) { | ||
176 | struct ehea_port *port = adapter->port[k]; | ||
177 | |||
178 | if (!port || (port->state != EHEA_PORT_UP)) | ||
179 | continue; | ||
180 | |||
181 | for (l = 0; | ||
182 | l < port->num_def_qps + port->num_add_tx_qps; | ||
183 | l++) { | ||
184 | struct ehea_port_res *pr = &port->port_res[l]; | ||
185 | |||
186 | arr[i].adh = adapter->handle; | ||
187 | arr[i++].fwh = pr->qp->fw_handle; | ||
188 | arr[i].adh = adapter->handle; | ||
189 | arr[i++].fwh = pr->send_cq->fw_handle; | ||
190 | arr[i].adh = adapter->handle; | ||
191 | arr[i++].fwh = pr->recv_cq->fw_handle; | ||
192 | arr[i].adh = adapter->handle; | ||
193 | arr[i++].fwh = pr->eq->fw_handle; | ||
194 | arr[i].adh = adapter->handle; | ||
195 | arr[i++].fwh = pr->send_mr.handle; | ||
196 | arr[i].adh = adapter->handle; | ||
197 | arr[i++].fwh = pr->recv_mr.handle; | ||
198 | } | ||
199 | arr[i].adh = adapter->handle; | ||
200 | arr[i++].fwh = port->qp_eq->fw_handle; | ||
201 | } | ||
202 | |||
203 | arr[i].adh = adapter->handle; | ||
204 | arr[i++].fwh = adapter->neq->fw_handle; | ||
205 | |||
206 | if (adapter->mr.handle) { | ||
207 | arr[i].adh = adapter->handle; | ||
208 | arr[i++].fwh = adapter->mr.handle; | ||
209 | } | ||
210 | } | ||
211 | |||
212 | out_update: | ||
213 | kfree(ehea_fw_handles.arr); | ||
214 | ehea_fw_handles.arr = arr; | ||
215 | ehea_fw_handles.num_entries = i; | ||
216 | } | ||
217 | |||
218 | static void ehea_update_bcmc_registrations(void) | ||
219 | { | ||
220 | struct ehea_bcmc_reg_entry *arr = NULL; | ||
221 | struct ehea_adapter *adapter; | ||
222 | struct ehea_mc_list *mc_entry; | ||
223 | int num_registrations = 0; | ||
224 | int i = 0; | ||
225 | int k; | ||
226 | |||
227 | /* Determine number of registrations */ | ||
228 | list_for_each_entry(adapter, &adapter_list, list) | ||
229 | for (k = 0; k < EHEA_MAX_PORTS; k++) { | ||
230 | struct ehea_port *port = adapter->port[k]; | ||
231 | |||
232 | if (!port || (port->state != EHEA_PORT_UP)) | ||
233 | continue; | ||
234 | |||
235 | num_registrations += 2; /* Broadcast registrations */ | ||
236 | |||
237 | list_for_each_entry(mc_entry, &port->mc_list->list,list) | ||
238 | num_registrations += 2; | ||
239 | } | ||
240 | |||
241 | if (num_registrations) { | ||
242 | arr = kzalloc(num_registrations * sizeof(*arr), GFP_KERNEL); | ||
243 | if (!arr) | ||
244 | return; /* Keep the existing array */ | ||
245 | } else | ||
246 | goto out_update; | ||
247 | |||
248 | list_for_each_entry(adapter, &adapter_list, list) { | ||
249 | for (k = 0; k < EHEA_MAX_PORTS; k++) { | ||
250 | struct ehea_port *port = adapter->port[k]; | ||
251 | |||
252 | if (!port || (port->state != EHEA_PORT_UP)) | ||
253 | continue; | ||
254 | |||
255 | arr[i].adh = adapter->handle; | ||
256 | arr[i].port_id = port->logical_port_id; | ||
257 | arr[i].reg_type = EHEA_BCMC_BROADCAST | | ||
258 | EHEA_BCMC_UNTAGGED; | ||
259 | arr[i++].macaddr = port->mac_addr; | ||
260 | |||
261 | arr[i].adh = adapter->handle; | ||
262 | arr[i].port_id = port->logical_port_id; | ||
263 | arr[i].reg_type = EHEA_BCMC_BROADCAST | | ||
264 | EHEA_BCMC_VLANID_ALL; | ||
265 | arr[i++].macaddr = port->mac_addr; | ||
266 | |||
267 | list_for_each_entry(mc_entry, | ||
268 | &port->mc_list->list, list) { | ||
269 | arr[i].adh = adapter->handle; | ||
270 | arr[i].port_id = port->logical_port_id; | ||
271 | arr[i].reg_type = EHEA_BCMC_SCOPE_ALL | | ||
272 | EHEA_BCMC_MULTICAST | | ||
273 | EHEA_BCMC_UNTAGGED; | ||
274 | arr[i++].macaddr = mc_entry->macaddr; | ||
275 | |||
276 | arr[i].adh = adapter->handle; | ||
277 | arr[i].port_id = port->logical_port_id; | ||
278 | arr[i].reg_type = EHEA_BCMC_SCOPE_ALL | | ||
279 | EHEA_BCMC_MULTICAST | | ||
280 | EHEA_BCMC_VLANID_ALL; | ||
281 | arr[i++].macaddr = mc_entry->macaddr; | ||
282 | } | ||
283 | } | ||
284 | } | ||
285 | |||
286 | out_update: | ||
287 | kfree(ehea_bcmc_regs.arr); | ||
288 | ehea_bcmc_regs.arr = arr; | ||
289 | ehea_bcmc_regs.num_entries = i; | ||
290 | } | ||
291 | |||
135 | static struct net_device_stats *ehea_get_stats(struct net_device *dev) | 292 | static struct net_device_stats *ehea_get_stats(struct net_device *dev) |
136 | { | 293 | { |
137 | struct ehea_port *port = netdev_priv(dev); | 294 | struct ehea_port *port = netdev_priv(dev); |
@@ -1601,19 +1758,25 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa) | |||
1601 | 1758 | ||
1602 | memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len); | 1759 | memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len); |
1603 | 1760 | ||
1761 | down(&ehea_bcmc_regs.lock); | ||
1762 | |||
1604 | /* Deregister old MAC in pHYP */ | 1763 | /* Deregister old MAC in pHYP */ |
1605 | ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC); | 1764 | ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC); |
1606 | if (ret) | 1765 | if (ret) |
1607 | goto out_free; | 1766 | goto out_upregs; |
1608 | 1767 | ||
1609 | port->mac_addr = cb0->port_mac_addr << 16; | 1768 | port->mac_addr = cb0->port_mac_addr << 16; |
1610 | 1769 | ||
1611 | /* Register new MAC in pHYP */ | 1770 | /* Register new MAC in pHYP */ |
1612 | ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); | 1771 | ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); |
1613 | if (ret) | 1772 | if (ret) |
1614 | goto out_free; | 1773 | goto out_upregs; |
1615 | 1774 | ||
1616 | ret = 0; | 1775 | ret = 0; |
1776 | |||
1777 | out_upregs: | ||
1778 | ehea_update_bcmc_registrations(); | ||
1779 | up(&ehea_bcmc_regs.lock); | ||
1617 | out_free: | 1780 | out_free: |
1618 | kfree(cb0); | 1781 | kfree(cb0); |
1619 | out: | 1782 | out: |
@@ -1775,9 +1938,11 @@ static void ehea_set_multicast_list(struct net_device *dev) | |||
1775 | } | 1938 | } |
1776 | ehea_promiscuous(dev, 0); | 1939 | ehea_promiscuous(dev, 0); |
1777 | 1940 | ||
1941 | down(&ehea_bcmc_regs.lock); | ||
1942 | |||
1778 | if (dev->flags & IFF_ALLMULTI) { | 1943 | if (dev->flags & IFF_ALLMULTI) { |
1779 | ehea_allmulti(dev, 1); | 1944 | ehea_allmulti(dev, 1); |
1780 | return; | 1945 | goto out; |
1781 | } | 1946 | } |
1782 | ehea_allmulti(dev, 0); | 1947 | ehea_allmulti(dev, 0); |
1783 | 1948 | ||
@@ -1803,6 +1968,8 @@ static void ehea_set_multicast_list(struct net_device *dev) | |||
1803 | 1968 | ||
1804 | } | 1969 | } |
1805 | out: | 1970 | out: |
1971 | ehea_update_bcmc_registrations(); | ||
1972 | up(&ehea_bcmc_regs.lock); | ||
1806 | return; | 1973 | return; |
1807 | } | 1974 | } |
1808 | 1975 | ||
@@ -2285,6 +2452,8 @@ static int ehea_up(struct net_device *dev) | |||
2285 | if (port->state == EHEA_PORT_UP) | 2452 | if (port->state == EHEA_PORT_UP) |
2286 | return 0; | 2453 | return 0; |
2287 | 2454 | ||
2455 | down(&ehea_fw_handles.lock); | ||
2456 | |||
2288 | ret = ehea_port_res_setup(port, port->num_def_qps, | 2457 | ret = ehea_port_res_setup(port, port->num_def_qps, |
2289 | port->num_add_tx_qps); | 2458 | port->num_add_tx_qps); |
2290 | if (ret) { | 2459 | if (ret) { |
@@ -2321,8 +2490,17 @@ static int ehea_up(struct net_device *dev) | |||
2321 | } | 2490 | } |
2322 | } | 2491 | } |
2323 | 2492 | ||
2324 | ret = 0; | 2493 | down(&ehea_bcmc_regs.lock); |
2494 | |||
2495 | ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); | ||
2496 | if (ret) { | ||
2497 | ret = -EIO; | ||
2498 | goto out_free_irqs; | ||
2499 | } | ||
2500 | |||
2325 | port->state = EHEA_PORT_UP; | 2501 | port->state = EHEA_PORT_UP; |
2502 | |||
2503 | ret = 0; | ||
2326 | goto out; | 2504 | goto out; |
2327 | 2505 | ||
2328 | out_free_irqs: | 2506 | out_free_irqs: |
@@ -2334,6 +2512,12 @@ out: | |||
2334 | if (ret) | 2512 | if (ret) |
2335 | ehea_info("Failed starting %s. ret=%i", dev->name, ret); | 2513 | ehea_info("Failed starting %s. ret=%i", dev->name, ret); |
2336 | 2514 | ||
2515 | ehea_update_bcmc_registrations(); | ||
2516 | up(&ehea_bcmc_regs.lock); | ||
2517 | |||
2518 | ehea_update_firmware_handles(); | ||
2519 | up(&ehea_fw_handles.lock); | ||
2520 | |||
2337 | return ret; | 2521 | return ret; |
2338 | } | 2522 | } |
2339 | 2523 | ||
@@ -2382,16 +2566,27 @@ static int ehea_down(struct net_device *dev) | |||
2382 | if (port->state == EHEA_PORT_DOWN) | 2566 | if (port->state == EHEA_PORT_DOWN) |
2383 | return 0; | 2567 | return 0; |
2384 | 2568 | ||
2569 | down(&ehea_bcmc_regs.lock); | ||
2385 | ehea_drop_multicast_list(dev); | 2570 | ehea_drop_multicast_list(dev); |
2571 | ehea_broadcast_reg_helper(port, H_DEREG_BCMC); | ||
2572 | |||
2386 | ehea_free_interrupts(dev); | 2573 | ehea_free_interrupts(dev); |
2387 | 2574 | ||
2575 | down(&ehea_fw_handles.lock); | ||
2576 | |||
2388 | port->state = EHEA_PORT_DOWN; | 2577 | port->state = EHEA_PORT_DOWN; |
2389 | 2578 | ||
2579 | ehea_update_bcmc_registrations(); | ||
2580 | up(&ehea_bcmc_regs.lock); | ||
2581 | |||
2390 | ret = ehea_clean_all_portres(port); | 2582 | ret = ehea_clean_all_portres(port); |
2391 | if (ret) | 2583 | if (ret) |
2392 | ehea_info("Failed freeing resources for %s. ret=%i", | 2584 | ehea_info("Failed freeing resources for %s. ret=%i", |
2393 | dev->name, ret); | 2585 | dev->name, ret); |
2394 | 2586 | ||
2587 | ehea_update_firmware_handles(); | ||
2588 | up(&ehea_fw_handles.lock); | ||
2589 | |||
2395 | return ret; | 2590 | return ret; |
2396 | } | 2591 | } |
2397 | 2592 | ||
@@ -2920,19 +3115,12 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, | |||
2920 | dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; | 3115 | dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; |
2921 | 3116 | ||
2922 | INIT_WORK(&port->reset_task, ehea_reset_port); | 3117 | INIT_WORK(&port->reset_task, ehea_reset_port); |
2923 | |||
2924 | ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); | ||
2925 | if (ret) { | ||
2926 | ret = -EIO; | ||
2927 | goto out_unreg_port; | ||
2928 | } | ||
2929 | |||
2930 | ehea_set_ethtool_ops(dev); | 3118 | ehea_set_ethtool_ops(dev); |
2931 | 3119 | ||
2932 | ret = register_netdev(dev); | 3120 | ret = register_netdev(dev); |
2933 | if (ret) { | 3121 | if (ret) { |
2934 | ehea_error("register_netdev failed. ret=%d", ret); | 3122 | ehea_error("register_netdev failed. ret=%d", ret); |
2935 | goto out_dereg_bc; | 3123 | goto out_unreg_port; |
2936 | } | 3124 | } |
2937 | 3125 | ||
2938 | port->lro_max_aggr = lro_max_aggr; | 3126 | port->lro_max_aggr = lro_max_aggr; |
@@ -2949,9 +3137,6 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, | |||
2949 | 3137 | ||
2950 | return port; | 3138 | return port; |
2951 | 3139 | ||
2952 | out_dereg_bc: | ||
2953 | ehea_broadcast_reg_helper(port, H_DEREG_BCMC); | ||
2954 | |||
2955 | out_unreg_port: | 3140 | out_unreg_port: |
2956 | ehea_unregister_port(port); | 3141 | ehea_unregister_port(port); |
2957 | 3142 | ||
@@ -2971,7 +3156,6 @@ static void ehea_shutdown_single_port(struct ehea_port *port) | |||
2971 | { | 3156 | { |
2972 | unregister_netdev(port->netdev); | 3157 | unregister_netdev(port->netdev); |
2973 | ehea_unregister_port(port); | 3158 | ehea_unregister_port(port); |
2974 | ehea_broadcast_reg_helper(port, H_DEREG_BCMC); | ||
2975 | kfree(port->mc_list); | 3159 | kfree(port->mc_list); |
2976 | free_netdev(port->netdev); | 3160 | free_netdev(port->netdev); |
2977 | port->adapter->active_ports--; | 3161 | port->adapter->active_ports--; |
@@ -3014,7 +3198,6 @@ static int ehea_setup_ports(struct ehea_adapter *adapter) | |||
3014 | 3198 | ||
3015 | i++; | 3199 | i++; |
3016 | }; | 3200 | }; |
3017 | |||
3018 | return 0; | 3201 | return 0; |
3019 | } | 3202 | } |
3020 | 3203 | ||
@@ -3159,6 +3342,7 @@ static int __devinit ehea_probe_adapter(struct of_device *dev, | |||
3159 | ehea_error("Invalid ibmebus device probed"); | 3342 | ehea_error("Invalid ibmebus device probed"); |
3160 | return -EINVAL; | 3343 | return -EINVAL; |
3161 | } | 3344 | } |
3345 | down(&ehea_fw_handles.lock); | ||
3162 | 3346 | ||
3163 | adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); | 3347 | adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); |
3164 | if (!adapter) { | 3348 | if (!adapter) { |
@@ -3239,7 +3423,10 @@ out_kill_eq: | |||
3239 | 3423 | ||
3240 | out_free_ad: | 3424 | out_free_ad: |
3241 | kfree(adapter); | 3425 | kfree(adapter); |
3426 | |||
3242 | out: | 3427 | out: |
3428 | ehea_update_firmware_handles(); | ||
3429 | up(&ehea_fw_handles.lock); | ||
3243 | return ret; | 3430 | return ret; |
3244 | } | 3431 | } |
3245 | 3432 | ||
@@ -3258,18 +3445,41 @@ static int __devexit ehea_remove(struct of_device *dev) | |||
3258 | 3445 | ||
3259 | flush_scheduled_work(); | 3446 | flush_scheduled_work(); |
3260 | 3447 | ||
3448 | down(&ehea_fw_handles.lock); | ||
3449 | |||
3261 | ibmebus_free_irq(adapter->neq->attr.ist1, adapter); | 3450 | ibmebus_free_irq(adapter->neq->attr.ist1, adapter); |
3262 | tasklet_kill(&adapter->neq_tasklet); | 3451 | tasklet_kill(&adapter->neq_tasklet); |
3263 | 3452 | ||
3264 | ehea_destroy_eq(adapter->neq); | 3453 | ehea_destroy_eq(adapter->neq); |
3265 | ehea_remove_adapter_mr(adapter); | 3454 | ehea_remove_adapter_mr(adapter); |
3266 | list_del(&adapter->list); | 3455 | list_del(&adapter->list); |
3267 | |||
3268 | kfree(adapter); | 3456 | kfree(adapter); |
3269 | 3457 | ||
3458 | ehea_update_firmware_handles(); | ||
3459 | up(&ehea_fw_handles.lock); | ||
3460 | |||
3270 | return 0; | 3461 | return 0; |
3271 | } | 3462 | } |
3272 | 3463 | ||
3464 | void ehea_crash_handler(void) | ||
3465 | { | ||
3466 | int i; | ||
3467 | |||
3468 | if (ehea_fw_handles.arr) | ||
3469 | for (i = 0; i < ehea_fw_handles.num_entries; i++) | ||
3470 | ehea_h_free_resource(ehea_fw_handles.arr[i].adh, | ||
3471 | ehea_fw_handles.arr[i].fwh, | ||
3472 | FORCE_FREE); | ||
3473 | |||
3474 | if (ehea_bcmc_regs.arr) | ||
3475 | for (i = 0; i < ehea_bcmc_regs.num_entries; i++) | ||
3476 | ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh, | ||
3477 | ehea_bcmc_regs.arr[i].port_id, | ||
3478 | ehea_bcmc_regs.arr[i].reg_type, | ||
3479 | ehea_bcmc_regs.arr[i].macaddr, | ||
3480 | 0, H_DEREG_BCMC); | ||
3481 | } | ||
3482 | |||
3273 | static int ehea_reboot_notifier(struct notifier_block *nb, | 3483 | static int ehea_reboot_notifier(struct notifier_block *nb, |
3274 | unsigned long action, void *unused) | 3484 | unsigned long action, void *unused) |
3275 | { | 3485 | { |
@@ -3330,7 +3540,12 @@ int __init ehea_module_init(void) | |||
3330 | 3540 | ||
3331 | 3541 | ||
3332 | INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs); | 3542 | INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs); |
3543 | memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles)); | ||
3544 | memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs)); | ||
3545 | |||
3333 | sema_init(&dlpar_mem_lock, 1); | 3546 | sema_init(&dlpar_mem_lock, 1); |
3547 | sema_init(&ehea_fw_handles.lock, 1); | ||
3548 | sema_init(&ehea_bcmc_regs.lock, 1); | ||
3334 | 3549 | ||
3335 | ret = check_module_parm(); | 3550 | ret = check_module_parm(); |
3336 | if (ret) | 3551 | if (ret) |
@@ -3340,12 +3555,18 @@ int __init ehea_module_init(void) | |||
3340 | if (ret) | 3555 | if (ret) |
3341 | goto out; | 3556 | goto out; |
3342 | 3557 | ||
3343 | register_reboot_notifier(&ehea_reboot_nb); | 3558 | ret = register_reboot_notifier(&ehea_reboot_nb); |
3559 | if (ret) | ||
3560 | ehea_info("failed registering reboot notifier"); | ||
3561 | |||
3562 | ret = crash_shutdown_register(&ehea_crash_handler); | ||
3563 | if (ret) | ||
3564 | ehea_info("failed registering crash handler"); | ||
3344 | 3565 | ||
3345 | ret = ibmebus_register_driver(&ehea_driver); | 3566 | ret = ibmebus_register_driver(&ehea_driver); |
3346 | if (ret) { | 3567 | if (ret) { |
3347 | ehea_error("failed registering eHEA device driver on ebus"); | 3568 | ehea_error("failed registering eHEA device driver on ebus"); |
3348 | goto out; | 3569 | goto out2; |
3349 | } | 3570 | } |
3350 | 3571 | ||
3351 | ret = driver_create_file(&ehea_driver.driver, | 3572 | ret = driver_create_file(&ehea_driver.driver, |
@@ -3353,21 +3574,33 @@ int __init ehea_module_init(void) | |||
3353 | if (ret) { | 3574 | if (ret) { |
3354 | ehea_error("failed to register capabilities attribute, ret=%d", | 3575 | ehea_error("failed to register capabilities attribute, ret=%d", |
3355 | ret); | 3576 | ret); |
3356 | unregister_reboot_notifier(&ehea_reboot_nb); | 3577 | goto out3; |
3357 | ibmebus_unregister_driver(&ehea_driver); | ||
3358 | goto out; | ||
3359 | } | 3578 | } |
3360 | 3579 | ||
3580 | return ret; | ||
3581 | |||
3582 | out3: | ||
3583 | ibmebus_unregister_driver(&ehea_driver); | ||
3584 | out2: | ||
3585 | unregister_reboot_notifier(&ehea_reboot_nb); | ||
3586 | crash_shutdown_unregister(&ehea_crash_handler); | ||
3361 | out: | 3587 | out: |
3362 | return ret; | 3588 | return ret; |
3363 | } | 3589 | } |
3364 | 3590 | ||
3365 | static void __exit ehea_module_exit(void) | 3591 | static void __exit ehea_module_exit(void) |
3366 | { | 3592 | { |
3593 | int ret; | ||
3594 | |||
3367 | flush_scheduled_work(); | 3595 | flush_scheduled_work(); |
3368 | driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); | 3596 | driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); |
3369 | ibmebus_unregister_driver(&ehea_driver); | 3597 | ibmebus_unregister_driver(&ehea_driver); |
3370 | unregister_reboot_notifier(&ehea_reboot_nb); | 3598 | unregister_reboot_notifier(&ehea_reboot_nb); |
3599 | ret = crash_shutdown_unregister(&ehea_crash_handler); | ||
3600 | if (ret) | ||
3601 | ehea_info("failed unregistering crash handler"); | ||
3602 | kfree(ehea_fw_handles.arr); | ||
3603 | kfree(ehea_bcmc_regs.arr); | ||
3371 | ehea_destroy_busmap(); | 3604 | ehea_destroy_busmap(); |
3372 | } | 3605 | } |
3373 | 3606 | ||
diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 0fbf1bbbaee9..d7a3ea88eddb 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c | |||
@@ -1253,7 +1253,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev) | |||
1253 | 1253 | ||
1254 | /* Setup interrupt handlers. */ | 1254 | /* Setup interrupt handlers. */ |
1255 | for (idp = id; idp->name; idp++) { | 1255 | for (idp = id; idp->name; idp++) { |
1256 | if (request_irq(idp->irq, idp->handler, 0, idp->name, dev) != 0) | 1256 | if (request_irq(idp->irq, idp->handler, IRQF_DISABLED, idp->name, dev) != 0) |
1257 | printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, idp->irq); | 1257 | printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, idp->irq); |
1258 | } | 1258 | } |
1259 | 1259 | ||
@@ -1382,7 +1382,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev) | |||
1382 | 1382 | ||
1383 | /* Setup interrupt handlers. */ | 1383 | /* Setup interrupt handlers. */ |
1384 | for (idp = id; idp->name; idp++) { | 1384 | for (idp = id; idp->name; idp++) { |
1385 | if (request_irq(b+idp->irq, fec_enet_interrupt, 0, idp->name, dev) != 0) | 1385 | if (request_irq(b+idp->irq, fec_enet_interrupt, IRQF_DISABLED, idp->name, dev) != 0) |
1386 | printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq); | 1386 | printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq); |
1387 | } | 1387 | } |
1388 | 1388 | ||
@@ -1553,7 +1553,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev) | |||
1553 | 1553 | ||
1554 | /* Setup interrupt handlers. */ | 1554 | /* Setup interrupt handlers. */ |
1555 | for (idp = id; idp->name; idp++) { | 1555 | for (idp = id; idp->name; idp++) { |
1556 | if (request_irq(b+idp->irq,fec_enet_interrupt,0,idp->name,dev)!=0) | 1556 | if (request_irq(b+idp->irq, fec_enet_interrupt, IRQF_DISABLED, idp->name,dev) != 0) |
1557 | printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq); | 1557 | printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq); |
1558 | } | 1558 | } |
1559 | 1559 | ||
@@ -1680,7 +1680,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev) | |||
1680 | 1680 | ||
1681 | /* Setup interrupt handlers. */ | 1681 | /* Setup interrupt handlers. */ |
1682 | for (idp = id; idp->name; idp++) { | 1682 | for (idp = id; idp->name; idp++) { |
1683 | if (request_irq(b+idp->irq,fec_enet_interrupt,0,idp->name,dev)!=0) | 1683 | if (request_irq(b+idp->irq, fec_enet_interrupt, IRQF_DISABLED, idp->name,dev) != 0) |
1684 | printk("FEC: Could not allocate %s IRQ(%d)!\n", | 1684 | printk("FEC: Could not allocate %s IRQ(%d)!\n", |
1685 | idp->name, b+idp->irq); | 1685 | idp->name, b+idp->irq); |
1686 | } | 1686 | } |
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index 42d94edeee26..af869cf9ae7d 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c | |||
@@ -946,16 +946,11 @@ static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
946 | { | 946 | { |
947 | struct fs_enet_private *fep = netdev_priv(dev); | 947 | struct fs_enet_private *fep = netdev_priv(dev); |
948 | struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data; | 948 | struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data; |
949 | unsigned long flags; | ||
950 | int rc; | ||
951 | 949 | ||
952 | if (!netif_running(dev)) | 950 | if (!netif_running(dev)) |
953 | return -EINVAL; | 951 | return -EINVAL; |
954 | 952 | ||
955 | spin_lock_irqsave(&fep->lock, flags); | 953 | return phy_mii_ioctl(fep->phydev, mii, cmd); |
956 | rc = phy_mii_ioctl(fep->phydev, mii, cmd); | ||
957 | spin_unlock_irqrestore(&fep->lock, flags); | ||
958 | return rc; | ||
959 | } | 954 | } |
960 | 955 | ||
961 | extern int fs_mii_connect(struct net_device *dev); | 956 | extern int fs_mii_connect(struct net_device *dev); |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 4244fc282f21..718cf77e345a 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -605,7 +605,7 @@ void stop_gfar(struct net_device *dev) | |||
605 | 605 | ||
606 | free_skb_resources(priv); | 606 | free_skb_resources(priv); |
607 | 607 | ||
608 | dma_free_coherent(NULL, | 608 | dma_free_coherent(&dev->dev, |
609 | sizeof(struct txbd8)*priv->tx_ring_size | 609 | sizeof(struct txbd8)*priv->tx_ring_size |
610 | + sizeof(struct rxbd8)*priv->rx_ring_size, | 610 | + sizeof(struct rxbd8)*priv->rx_ring_size, |
611 | priv->tx_bd_base, | 611 | priv->tx_bd_base, |
@@ -626,7 +626,7 @@ static void free_skb_resources(struct gfar_private *priv) | |||
626 | for (i = 0; i < priv->tx_ring_size; i++) { | 626 | for (i = 0; i < priv->tx_ring_size; i++) { |
627 | 627 | ||
628 | if (priv->tx_skbuff[i]) { | 628 | if (priv->tx_skbuff[i]) { |
629 | dma_unmap_single(NULL, txbdp->bufPtr, | 629 | dma_unmap_single(&priv->dev->dev, txbdp->bufPtr, |
630 | txbdp->length, | 630 | txbdp->length, |
631 | DMA_TO_DEVICE); | 631 | DMA_TO_DEVICE); |
632 | dev_kfree_skb_any(priv->tx_skbuff[i]); | 632 | dev_kfree_skb_any(priv->tx_skbuff[i]); |
@@ -643,7 +643,7 @@ static void free_skb_resources(struct gfar_private *priv) | |||
643 | if(priv->rx_skbuff != NULL) { | 643 | if(priv->rx_skbuff != NULL) { |
644 | for (i = 0; i < priv->rx_ring_size; i++) { | 644 | for (i = 0; i < priv->rx_ring_size; i++) { |
645 | if (priv->rx_skbuff[i]) { | 645 | if (priv->rx_skbuff[i]) { |
646 | dma_unmap_single(NULL, rxbdp->bufPtr, | 646 | dma_unmap_single(&priv->dev->dev, rxbdp->bufPtr, |
647 | priv->rx_buffer_size, | 647 | priv->rx_buffer_size, |
648 | DMA_FROM_DEVICE); | 648 | DMA_FROM_DEVICE); |
649 | 649 | ||
@@ -708,7 +708,7 @@ int startup_gfar(struct net_device *dev) | |||
708 | gfar_write(®s->imask, IMASK_INIT_CLEAR); | 708 | gfar_write(®s->imask, IMASK_INIT_CLEAR); |
709 | 709 | ||
710 | /* Allocate memory for the buffer descriptors */ | 710 | /* Allocate memory for the buffer descriptors */ |
711 | vaddr = (unsigned long) dma_alloc_coherent(NULL, | 711 | vaddr = (unsigned long) dma_alloc_coherent(&dev->dev, |
712 | sizeof (struct txbd8) * priv->tx_ring_size + | 712 | sizeof (struct txbd8) * priv->tx_ring_size + |
713 | sizeof (struct rxbd8) * priv->rx_ring_size, | 713 | sizeof (struct rxbd8) * priv->rx_ring_size, |
714 | &addr, GFP_KERNEL); | 714 | &addr, GFP_KERNEL); |
@@ -919,7 +919,7 @@ err_irq_fail: | |||
919 | rx_skb_fail: | 919 | rx_skb_fail: |
920 | free_skb_resources(priv); | 920 | free_skb_resources(priv); |
921 | tx_skb_fail: | 921 | tx_skb_fail: |
922 | dma_free_coherent(NULL, | 922 | dma_free_coherent(&dev->dev, |
923 | sizeof(struct txbd8)*priv->tx_ring_size | 923 | sizeof(struct txbd8)*priv->tx_ring_size |
924 | + sizeof(struct rxbd8)*priv->rx_ring_size, | 924 | + sizeof(struct rxbd8)*priv->rx_ring_size, |
925 | priv->tx_bd_base, | 925 | priv->tx_bd_base, |
@@ -1053,7 +1053,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1053 | 1053 | ||
1054 | /* Set buffer length and pointer */ | 1054 | /* Set buffer length and pointer */ |
1055 | txbdp->length = skb->len; | 1055 | txbdp->length = skb->len; |
1056 | txbdp->bufPtr = dma_map_single(NULL, skb->data, | 1056 | txbdp->bufPtr = dma_map_single(&dev->dev, skb->data, |
1057 | skb->len, DMA_TO_DEVICE); | 1057 | skb->len, DMA_TO_DEVICE); |
1058 | 1058 | ||
1059 | /* Save the skb pointer so we can free it later */ | 1059 | /* Save the skb pointer so we can free it later */ |
@@ -1332,7 +1332,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp) | |||
1332 | */ | 1332 | */ |
1333 | skb_reserve(skb, alignamount); | 1333 | skb_reserve(skb, alignamount); |
1334 | 1334 | ||
1335 | bdp->bufPtr = dma_map_single(NULL, skb->data, | 1335 | bdp->bufPtr = dma_map_single(&dev->dev, skb->data, |
1336 | priv->rx_buffer_size, DMA_FROM_DEVICE); | 1336 | priv->rx_buffer_size, DMA_FROM_DEVICE); |
1337 | 1337 | ||
1338 | bdp->length = 0; | 1338 | bdp->length = 0; |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index bff280eff5e3..6a1f23092099 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -439,7 +439,7 @@ static int igb_request_irq(struct igb_adapter *adapter) | |||
439 | err = igb_request_msix(adapter); | 439 | err = igb_request_msix(adapter); |
440 | if (!err) { | 440 | if (!err) { |
441 | /* enable IAM, auto-mask, | 441 | /* enable IAM, auto-mask, |
442 | * DO NOT USE EIAME or IAME in legacy mode */ | 442 | * DO NOT USE EIAM or IAM in legacy mode */ |
443 | wr32(E1000_IAM, IMS_ENABLE_MASK); | 443 | wr32(E1000_IAM, IMS_ENABLE_MASK); |
444 | goto request_done; | 444 | goto request_done; |
445 | } | 445 | } |
@@ -465,14 +465,9 @@ static int igb_request_irq(struct igb_adapter *adapter) | |||
465 | err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED, | 465 | err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED, |
466 | netdev->name, netdev); | 466 | netdev->name, netdev); |
467 | 467 | ||
468 | if (err) { | 468 | if (err) |
469 | dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n", | 469 | dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n", |
470 | err); | 470 | err); |
471 | goto request_done; | ||
472 | } | ||
473 | |||
474 | /* enable IAM, auto-mask */ | ||
475 | wr32(E1000_IAM, IMS_ENABLE_MASK); | ||
476 | 471 | ||
477 | request_done: | 472 | request_done: |
478 | return err; | 473 | return err; |
@@ -821,7 +816,8 @@ void igb_reset(struct igb_adapter *adapter) | |||
821 | wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); | 816 | wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); |
822 | 817 | ||
823 | igb_reset_adaptive(&adapter->hw); | 818 | igb_reset_adaptive(&adapter->hw); |
824 | adapter->hw.phy.ops.get_phy_info(&adapter->hw); | 819 | if (adapter->hw.phy.ops.get_phy_info) |
820 | adapter->hw.phy.ops.get_phy_info(&adapter->hw); | ||
825 | } | 821 | } |
826 | 822 | ||
827 | /** | 823 | /** |
@@ -2057,7 +2053,8 @@ static void igb_set_multi(struct net_device *netdev) | |||
2057 | static void igb_update_phy_info(unsigned long data) | 2053 | static void igb_update_phy_info(unsigned long data) |
2058 | { | 2054 | { |
2059 | struct igb_adapter *adapter = (struct igb_adapter *) data; | 2055 | struct igb_adapter *adapter = (struct igb_adapter *) data; |
2060 | adapter->hw.phy.ops.get_phy_info(&adapter->hw); | 2056 | if (adapter->hw.phy.ops.get_phy_info) |
2057 | adapter->hw.phy.ops.get_phy_info(&adapter->hw); | ||
2061 | } | 2058 | } |
2062 | 2059 | ||
2063 | /** | 2060 | /** |
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c index 53a9fd086f96..75f3a68ee354 100644 --- a/drivers/net/ixgb/ixgb_ethtool.c +++ b/drivers/net/ixgb/ixgb_ethtool.c | |||
@@ -67,6 +67,7 @@ static struct ixgb_stats ixgb_gstrings_stats[] = { | |||
67 | {"rx_over_errors", IXGB_STAT(net_stats.rx_over_errors)}, | 67 | {"rx_over_errors", IXGB_STAT(net_stats.rx_over_errors)}, |
68 | {"rx_crc_errors", IXGB_STAT(net_stats.rx_crc_errors)}, | 68 | {"rx_crc_errors", IXGB_STAT(net_stats.rx_crc_errors)}, |
69 | {"rx_frame_errors", IXGB_STAT(net_stats.rx_frame_errors)}, | 69 | {"rx_frame_errors", IXGB_STAT(net_stats.rx_frame_errors)}, |
70 | {"rx_no_buffer_count", IXGB_STAT(stats.rnbc)}, | ||
70 | {"rx_fifo_errors", IXGB_STAT(net_stats.rx_fifo_errors)}, | 71 | {"rx_fifo_errors", IXGB_STAT(net_stats.rx_fifo_errors)}, |
71 | {"rx_missed_errors", IXGB_STAT(net_stats.rx_missed_errors)}, | 72 | {"rx_missed_errors", IXGB_STAT(net_stats.rx_missed_errors)}, |
72 | {"tx_aborted_errors", IXGB_STAT(net_stats.tx_aborted_errors)}, | 73 | {"tx_aborted_errors", IXGB_STAT(net_stats.tx_aborted_errors)}, |
diff --git a/drivers/net/macb.c b/drivers/net/macb.c index 81bf005ff280..1d210ed46130 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c | |||
@@ -148,7 +148,7 @@ static void macb_handle_link_change(struct net_device *dev) | |||
148 | 148 | ||
149 | if (phydev->duplex) | 149 | if (phydev->duplex) |
150 | reg |= MACB_BIT(FD); | 150 | reg |= MACB_BIT(FD); |
151 | if (phydev->speed) | 151 | if (phydev->speed == SPEED_100) |
152 | reg |= MACB_BIT(SPD); | 152 | reg |= MACB_BIT(SPD); |
153 | 153 | ||
154 | macb_writel(bp, NCFGR, reg); | 154 | macb_writel(bp, NCFGR, reg); |
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index 6323988dfa1d..fd8158a86f64 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c | |||
@@ -590,6 +590,13 @@ static int pcnet_config(struct pcmcia_device *link) | |||
590 | dev->if_port = 0; | 590 | dev->if_port = 0; |
591 | } | 591 | } |
592 | 592 | ||
593 | if ((link->conf.ConfigBase == 0x03c0) | ||
594 | && (link->manf_id == 0x149) && (link->card_id = 0xc1ab)) { | ||
595 | printk(KERN_INFO "pcnet_cs: this is an AX88190 card!\n"); | ||
596 | printk(KERN_INFO "pcnet_cs: use axnet_cs instead.\n"); | ||
597 | goto failed; | ||
598 | } | ||
599 | |||
593 | local_hw_info = get_hwinfo(link); | 600 | local_hw_info = get_hwinfo(link); |
594 | if (local_hw_info == NULL) | 601 | if (local_hw_info == NULL) |
595 | local_hw_info = get_prom(link); | 602 | local_hw_info = get_prom(link); |
@@ -1567,12 +1574,11 @@ static struct pcmcia_device_id pcnet_ids[] = { | |||
1567 | PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0145), | 1574 | PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0145), |
1568 | PCMCIA_DEVICE_MANF_CARD(0x0149, 0x0230), | 1575 | PCMCIA_DEVICE_MANF_CARD(0x0149, 0x0230), |
1569 | PCMCIA_DEVICE_MANF_CARD(0x0149, 0x4530), | 1576 | PCMCIA_DEVICE_MANF_CARD(0x0149, 0x4530), |
1570 | /* PCMCIA_DEVICE_MANF_CARD(0x0149, 0xc1ab), conflict with axnet_cs */ | 1577 | PCMCIA_DEVICE_MANF_CARD(0x0149, 0xc1ab), |
1571 | PCMCIA_DEVICE_MANF_CARD(0x0186, 0x0110), | 1578 | PCMCIA_DEVICE_MANF_CARD(0x0186, 0x0110), |
1572 | PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x2328), | 1579 | PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x2328), |
1573 | PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x8041), | 1580 | PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x8041), |
1574 | PCMCIA_DEVICE_MANF_CARD(0x0213, 0x2452), | 1581 | PCMCIA_DEVICE_MANF_CARD(0x0213, 0x2452), |
1575 | /* PCMCIA_DEVICE_MANF_CARD(0x021b, 0x0202), conflict with axnet_cs */ | ||
1576 | PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0300), | 1582 | PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0300), |
1577 | PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0307), | 1583 | PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0307), |
1578 | PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030a), | 1584 | PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030a), |
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 6e9f619c491f..963630c65ca9 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c | |||
@@ -49,13 +49,13 @@ int mdiobus_register(struct mii_bus *bus) | |||
49 | int i; | 49 | int i; |
50 | int err = 0; | 50 | int err = 0; |
51 | 51 | ||
52 | mutex_init(&bus->mdio_lock); | ||
53 | |||
54 | if (NULL == bus || NULL == bus->name || | 52 | if (NULL == bus || NULL == bus->name || |
55 | NULL == bus->read || | 53 | NULL == bus->read || |
56 | NULL == bus->write) | 54 | NULL == bus->write) |
57 | return -EINVAL; | 55 | return -EINVAL; |
58 | 56 | ||
57 | mutex_init(&bus->mdio_lock); | ||
58 | |||
59 | if (bus->reset) | 59 | if (bus->reset) |
60 | bus->reset(bus); | 60 | bus->reset(bus); |
61 | 61 | ||
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c index e0b072d9fdb7..86e5dba079fe 100644 --- a/drivers/net/pppol2tp.c +++ b/drivers/net/pppol2tp.c | |||
@@ -455,6 +455,7 @@ static void pppol2tp_recv_dequeue(struct pppol2tp_session *session) | |||
455 | skb_queue_len(&session->reorder_q)); | 455 | skb_queue_len(&session->reorder_q)); |
456 | __skb_unlink(skb, &session->reorder_q); | 456 | __skb_unlink(skb, &session->reorder_q); |
457 | kfree_skb(skb); | 457 | kfree_skb(skb); |
458 | sock_put(session->sock); | ||
458 | continue; | 459 | continue; |
459 | } | 460 | } |
460 | 461 | ||
@@ -1110,6 +1111,8 @@ static void pppol2tp_tunnel_closeall(struct pppol2tp_tunnel *tunnel) | |||
1110 | for (hash = 0; hash < PPPOL2TP_HASH_SIZE; hash++) { | 1111 | for (hash = 0; hash < PPPOL2TP_HASH_SIZE; hash++) { |
1111 | again: | 1112 | again: |
1112 | hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { | 1113 | hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { |
1114 | struct sk_buff *skb; | ||
1115 | |||
1113 | session = hlist_entry(walk, struct pppol2tp_session, hlist); | 1116 | session = hlist_entry(walk, struct pppol2tp_session, hlist); |
1114 | 1117 | ||
1115 | sk = session->sock; | 1118 | sk = session->sock; |
@@ -1138,7 +1141,10 @@ again: | |||
1138 | /* Purge any queued data */ | 1141 | /* Purge any queued data */ |
1139 | skb_queue_purge(&sk->sk_receive_queue); | 1142 | skb_queue_purge(&sk->sk_receive_queue); |
1140 | skb_queue_purge(&sk->sk_write_queue); | 1143 | skb_queue_purge(&sk->sk_write_queue); |
1141 | skb_queue_purge(&session->reorder_q); | 1144 | while ((skb = skb_dequeue(&session->reorder_q))) { |
1145 | kfree_skb(skb); | ||
1146 | sock_put(sk); | ||
1147 | } | ||
1142 | 1148 | ||
1143 | release_sock(sk); | 1149 | release_sock(sk); |
1144 | sock_put(sk); | 1150 | sock_put(sk); |
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c index 750d2a99cb4f..daf5abab9534 100644 --- a/drivers/net/ps3_gelic_wireless.c +++ b/drivers/net/ps3_gelic_wireless.c | |||
@@ -2690,6 +2690,7 @@ int gelic_wl_driver_probe(struct gelic_card *card) | |||
2690 | return -ENOMEM; | 2690 | return -ENOMEM; |
2691 | 2691 | ||
2692 | /* setup net_device structure */ | 2692 | /* setup net_device structure */ |
2693 | SET_NETDEV_DEV(netdev, &card->dev->core); | ||
2693 | gelic_wl_setup_netdev_ops(netdev); | 2694 | gelic_wl_setup_netdev_ops(netdev); |
2694 | 2695 | ||
2695 | /* setup some of net_device and register it */ | 2696 | /* setup some of net_device and register it */ |
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c index 202fdf356621..20745fd4e973 100644 --- a/drivers/net/sis190.c +++ b/drivers/net/sis190.c | |||
@@ -1633,13 +1633,18 @@ static inline void sis190_init_rxfilter(struct net_device *dev) | |||
1633 | static int __devinit sis190_get_mac_addr(struct pci_dev *pdev, | 1633 | static int __devinit sis190_get_mac_addr(struct pci_dev *pdev, |
1634 | struct net_device *dev) | 1634 | struct net_device *dev) |
1635 | { | 1635 | { |
1636 | u8 from; | 1636 | int rc; |
1637 | |||
1638 | rc = sis190_get_mac_addr_from_eeprom(pdev, dev); | ||
1639 | if (rc < 0) { | ||
1640 | u8 reg; | ||
1637 | 1641 | ||
1638 | pci_read_config_byte(pdev, 0x73, &from); | 1642 | pci_read_config_byte(pdev, 0x73, ®); |
1639 | 1643 | ||
1640 | return (from & 0x00000001) ? | 1644 | if (reg & 0x00000001) |
1641 | sis190_get_mac_addr_from_apc(pdev, dev) : | 1645 | rc = sis190_get_mac_addr_from_apc(pdev, dev); |
1642 | sis190_get_mac_addr_from_eeprom(pdev, dev); | 1646 | } |
1647 | return rc; | ||
1643 | } | 1648 | } |
1644 | 1649 | ||
1645 | static void sis190_set_speed_auto(struct net_device *dev) | 1650 | static void sis190_set_speed_auto(struct net_device *dev) |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 9a6295909e43..54c662690f65 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -572,8 +572,9 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) | |||
572 | default: | 572 | default: |
573 | /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */ | 573 | /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */ |
574 | ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL; | 574 | ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL; |
575 | |||
575 | /* turn off the Rx LED (LED_RX) */ | 576 | /* turn off the Rx LED (LED_RX) */ |
576 | ledover &= ~PHY_M_LED_MO_RX; | 577 | ledover |= PHY_M_LED_MO_RX(MO_LED_OFF); |
577 | } | 578 | } |
578 | 579 | ||
579 | if (hw->chip_id == CHIP_ID_YUKON_EC_U && | 580 | if (hw->chip_id == CHIP_ID_YUKON_EC_U && |
@@ -602,7 +603,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) | |||
602 | 603 | ||
603 | if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) { | 604 | if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) { |
604 | /* turn on 100 Mbps LED (LED_LINK100) */ | 605 | /* turn on 100 Mbps LED (LED_LINK100) */ |
605 | ledover |= PHY_M_LED_MO_100; | 606 | ledover |= PHY_M_LED_MO_100(MO_LED_ON); |
606 | } | 607 | } |
607 | 608 | ||
608 | if (ledover) | 609 | if (ledover) |
@@ -3322,82 +3323,80 @@ static void sky2_set_multicast(struct net_device *dev) | |||
3322 | /* Can have one global because blinking is controlled by | 3323 | /* Can have one global because blinking is controlled by |
3323 | * ethtool and that is always under RTNL mutex | 3324 | * ethtool and that is always under RTNL mutex |
3324 | */ | 3325 | */ |
3325 | static void sky2_led(struct sky2_hw *hw, unsigned port, int on) | 3326 | static void sky2_led(struct sky2_port *sky2, enum led_mode mode) |
3326 | { | 3327 | { |
3327 | u16 pg; | 3328 | struct sky2_hw *hw = sky2->hw; |
3329 | unsigned port = sky2->port; | ||
3328 | 3330 | ||
3329 | switch (hw->chip_id) { | 3331 | spin_lock_bh(&sky2->phy_lock); |
3330 | case CHIP_ID_YUKON_XL: | 3332 | if (hw->chip_id == CHIP_ID_YUKON_EC_U || |
3333 | hw->chip_id == CHIP_ID_YUKON_EX || | ||
3334 | hw->chip_id == CHIP_ID_YUKON_SUPR) { | ||
3335 | u16 pg; | ||
3331 | pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); | 3336 | pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); |
3332 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); | 3337 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); |
3333 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, | ||
3334 | on ? (PHY_M_LEDC_LOS_CTRL(1) | | ||
3335 | PHY_M_LEDC_INIT_CTRL(7) | | ||
3336 | PHY_M_LEDC_STA1_CTRL(7) | | ||
3337 | PHY_M_LEDC_STA0_CTRL(7)) | ||
3338 | : 0); | ||
3339 | 3338 | ||
3340 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); | 3339 | switch (mode) { |
3341 | break; | 3340 | case MO_LED_OFF: |
3341 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, | ||
3342 | PHY_M_LEDC_LOS_CTRL(8) | | ||
3343 | PHY_M_LEDC_INIT_CTRL(8) | | ||
3344 | PHY_M_LEDC_STA1_CTRL(8) | | ||
3345 | PHY_M_LEDC_STA0_CTRL(8)); | ||
3346 | break; | ||
3347 | case MO_LED_ON: | ||
3348 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, | ||
3349 | PHY_M_LEDC_LOS_CTRL(9) | | ||
3350 | PHY_M_LEDC_INIT_CTRL(9) | | ||
3351 | PHY_M_LEDC_STA1_CTRL(9) | | ||
3352 | PHY_M_LEDC_STA0_CTRL(9)); | ||
3353 | break; | ||
3354 | case MO_LED_BLINK: | ||
3355 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, | ||
3356 | PHY_M_LEDC_LOS_CTRL(0xa) | | ||
3357 | PHY_M_LEDC_INIT_CTRL(0xa) | | ||
3358 | PHY_M_LEDC_STA1_CTRL(0xa) | | ||
3359 | PHY_M_LEDC_STA0_CTRL(0xa)); | ||
3360 | break; | ||
3361 | case MO_LED_NORM: | ||
3362 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, | ||
3363 | PHY_M_LEDC_LOS_CTRL(1) | | ||
3364 | PHY_M_LEDC_INIT_CTRL(8) | | ||
3365 | PHY_M_LEDC_STA1_CTRL(7) | | ||
3366 | PHY_M_LEDC_STA0_CTRL(7)); | ||
3367 | } | ||
3342 | 3368 | ||
3343 | default: | 3369 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); |
3344 | gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); | 3370 | } else |
3345 | gm_phy_write(hw, port, PHY_MARV_LED_OVER, | 3371 | gm_phy_write(hw, port, PHY_MARV_LED_OVER, |
3346 | on ? PHY_M_LED_ALL : 0); | 3372 | PHY_M_LED_MO_DUP(mode) | |
3347 | } | 3373 | PHY_M_LED_MO_10(mode) | |
3374 | PHY_M_LED_MO_100(mode) | | ||
3375 | PHY_M_LED_MO_1000(mode) | | ||
3376 | PHY_M_LED_MO_RX(mode) | | ||
3377 | PHY_M_LED_MO_TX(mode)); | ||
3378 | |||
3379 | spin_unlock_bh(&sky2->phy_lock); | ||
3348 | } | 3380 | } |
3349 | 3381 | ||
3350 | /* blink LED's for finding board */ | 3382 | /* blink LED's for finding board */ |
3351 | static int sky2_phys_id(struct net_device *dev, u32 data) | 3383 | static int sky2_phys_id(struct net_device *dev, u32 data) |
3352 | { | 3384 | { |
3353 | struct sky2_port *sky2 = netdev_priv(dev); | 3385 | struct sky2_port *sky2 = netdev_priv(dev); |
3354 | struct sky2_hw *hw = sky2->hw; | 3386 | unsigned int i; |
3355 | unsigned port = sky2->port; | ||
3356 | u16 ledctrl, ledover = 0; | ||
3357 | long ms; | ||
3358 | int interrupted; | ||
3359 | int onoff = 1; | ||
3360 | 3387 | ||
3361 | if (!data || data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ)) | 3388 | if (data == 0) |
3362 | ms = jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT); | 3389 | data = UINT_MAX; |
3363 | else | ||
3364 | ms = data * 1000; | ||
3365 | |||
3366 | /* save initial values */ | ||
3367 | spin_lock_bh(&sky2->phy_lock); | ||
3368 | if (hw->chip_id == CHIP_ID_YUKON_XL) { | ||
3369 | u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); | ||
3370 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); | ||
3371 | ledctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); | ||
3372 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); | ||
3373 | } else { | ||
3374 | ledctrl = gm_phy_read(hw, port, PHY_MARV_LED_CTRL); | ||
3375 | ledover = gm_phy_read(hw, port, PHY_MARV_LED_OVER); | ||
3376 | } | ||
3377 | |||
3378 | interrupted = 0; | ||
3379 | while (!interrupted && ms > 0) { | ||
3380 | sky2_led(hw, port, onoff); | ||
3381 | onoff = !onoff; | ||
3382 | |||
3383 | spin_unlock_bh(&sky2->phy_lock); | ||
3384 | interrupted = msleep_interruptible(250); | ||
3385 | spin_lock_bh(&sky2->phy_lock); | ||
3386 | |||
3387 | ms -= 250; | ||
3388 | } | ||
3389 | 3390 | ||
3390 | /* resume regularly scheduled programming */ | 3391 | for (i = 0; i < data; i++) { |
3391 | if (hw->chip_id == CHIP_ID_YUKON_XL) { | 3392 | sky2_led(sky2, MO_LED_ON); |
3392 | u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); | 3393 | if (msleep_interruptible(500)) |
3393 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); | 3394 | break; |
3394 | gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ledctrl); | 3395 | sky2_led(sky2, MO_LED_OFF); |
3395 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); | 3396 | if (msleep_interruptible(500)) |
3396 | } else { | 3397 | break; |
3397 | gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); | ||
3398 | gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); | ||
3399 | } | 3398 | } |
3400 | spin_unlock_bh(&sky2->phy_lock); | 3399 | sky2_led(sky2, MO_LED_NORM); |
3401 | 3400 | ||
3402 | return 0; | 3401 | return 0; |
3403 | } | 3402 | } |
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index 5ab5c1c7c5aa..7bb3ba9bcbd8 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h | |||
@@ -1318,18 +1318,21 @@ enum { | |||
1318 | BLINK_670MS = 4,/* 670 ms */ | 1318 | BLINK_670MS = 4,/* 670 ms */ |
1319 | }; | 1319 | }; |
1320 | 1320 | ||
1321 | /**** PHY_MARV_LED_OVER 16 bit r/w LED control */ | 1321 | /***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/ |
1322 | enum { | 1322 | #define PHY_M_LED_MO_SGMII(x) ((x)<<14) /* Bit 15..14: SGMII AN Timer */ |
1323 | PHY_M_LED_MO_DUP = 3<<10,/* Bit 11..10: Duplex */ | 1323 | |
1324 | PHY_M_LED_MO_10 = 3<<8, /* Bit 9.. 8: Link 10 */ | 1324 | #define PHY_M_LED_MO_DUP(x) ((x)<<10) /* Bit 11..10: Duplex */ |
1325 | PHY_M_LED_MO_100 = 3<<6, /* Bit 7.. 6: Link 100 */ | 1325 | #define PHY_M_LED_MO_10(x) ((x)<<8) /* Bit 9.. 8: Link 10 */ |
1326 | PHY_M_LED_MO_1000 = 3<<4, /* Bit 5.. 4: Link 1000 */ | 1326 | #define PHY_M_LED_MO_100(x) ((x)<<6) /* Bit 7.. 6: Link 100 */ |
1327 | PHY_M_LED_MO_RX = 3<<2, /* Bit 3.. 2: Rx */ | 1327 | #define PHY_M_LED_MO_1000(x) ((x)<<4) /* Bit 5.. 4: Link 1000 */ |
1328 | PHY_M_LED_MO_TX = 3<<0, /* Bit 1.. 0: Tx */ | 1328 | #define PHY_M_LED_MO_RX(x) ((x)<<2) /* Bit 3.. 2: Rx */ |
1329 | 1329 | #define PHY_M_LED_MO_TX(x) ((x)<<0) /* Bit 1.. 0: Tx */ | |
1330 | PHY_M_LED_ALL = PHY_M_LED_MO_DUP | PHY_M_LED_MO_10 | 1330 | |
1331 | | PHY_M_LED_MO_100 | PHY_M_LED_MO_1000 | 1331 | enum led_mode { |
1332 | | PHY_M_LED_MO_RX, | 1332 | MO_LED_NORM = 0, |
1333 | MO_LED_BLINK = 1, | ||
1334 | MO_LED_OFF = 2, | ||
1335 | MO_LED_ON = 3, | ||
1333 | }; | 1336 | }; |
1334 | 1337 | ||
1335 | /***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/ | 1338 | /***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/ |
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c index 3af5b92b48c8..0166407d7061 100644 --- a/drivers/net/tlan.c +++ b/drivers/net/tlan.c | |||
@@ -1400,7 +1400,7 @@ static void TLan_SetMulticastList( struct net_device *dev ) | |||
1400 | * | 1400 | * |
1401 | **************************************************************/ | 1401 | **************************************************************/ |
1402 | 1402 | ||
1403 | u32 TLan_HandleInvalid( struct net_device *dev, u16 host_int ) | 1403 | static u32 TLan_HandleInvalid( struct net_device *dev, u16 host_int ) |
1404 | { | 1404 | { |
1405 | /* printk( "TLAN: Invalid interrupt on %s.\n", dev->name ); */ | 1405 | /* printk( "TLAN: Invalid interrupt on %s.\n", dev->name ); */ |
1406 | return 0; | 1406 | return 0; |
@@ -1432,7 +1432,7 @@ u32 TLan_HandleInvalid( struct net_device *dev, u16 host_int ) | |||
1432 | * | 1432 | * |
1433 | **************************************************************/ | 1433 | **************************************************************/ |
1434 | 1434 | ||
1435 | u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int ) | 1435 | static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int ) |
1436 | { | 1436 | { |
1437 | TLanPrivateInfo *priv = netdev_priv(dev); | 1437 | TLanPrivateInfo *priv = netdev_priv(dev); |
1438 | int eoc = 0; | 1438 | int eoc = 0; |
@@ -1518,7 +1518,7 @@ u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int ) | |||
1518 | * | 1518 | * |
1519 | **************************************************************/ | 1519 | **************************************************************/ |
1520 | 1520 | ||
1521 | u32 TLan_HandleStatOverflow( struct net_device *dev, u16 host_int ) | 1521 | static u32 TLan_HandleStatOverflow( struct net_device *dev, u16 host_int ) |
1522 | { | 1522 | { |
1523 | TLan_ReadAndClearStats( dev, TLAN_RECORD ); | 1523 | TLan_ReadAndClearStats( dev, TLAN_RECORD ); |
1524 | 1524 | ||
@@ -1554,7 +1554,7 @@ u32 TLan_HandleStatOverflow( struct net_device *dev, u16 host_int ) | |||
1554 | * | 1554 | * |
1555 | **************************************************************/ | 1555 | **************************************************************/ |
1556 | 1556 | ||
1557 | u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int ) | 1557 | static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int ) |
1558 | { | 1558 | { |
1559 | TLanPrivateInfo *priv = netdev_priv(dev); | 1559 | TLanPrivateInfo *priv = netdev_priv(dev); |
1560 | u32 ack = 0; | 1560 | u32 ack = 0; |
@@ -1689,7 +1689,7 @@ u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int ) | |||
1689 | * | 1689 | * |
1690 | **************************************************************/ | 1690 | **************************************************************/ |
1691 | 1691 | ||
1692 | u32 TLan_HandleDummy( struct net_device *dev, u16 host_int ) | 1692 | static u32 TLan_HandleDummy( struct net_device *dev, u16 host_int ) |
1693 | { | 1693 | { |
1694 | printk( "TLAN: Test interrupt on %s.\n", dev->name ); | 1694 | printk( "TLAN: Test interrupt on %s.\n", dev->name ); |
1695 | return 1; | 1695 | return 1; |
@@ -1719,7 +1719,7 @@ u32 TLan_HandleDummy( struct net_device *dev, u16 host_int ) | |||
1719 | * | 1719 | * |
1720 | **************************************************************/ | 1720 | **************************************************************/ |
1721 | 1721 | ||
1722 | u32 TLan_HandleTxEOC( struct net_device *dev, u16 host_int ) | 1722 | static u32 TLan_HandleTxEOC( struct net_device *dev, u16 host_int ) |
1723 | { | 1723 | { |
1724 | TLanPrivateInfo *priv = netdev_priv(dev); | 1724 | TLanPrivateInfo *priv = netdev_priv(dev); |
1725 | TLanList *head_list; | 1725 | TLanList *head_list; |
@@ -1767,7 +1767,7 @@ u32 TLan_HandleTxEOC( struct net_device *dev, u16 host_int ) | |||
1767 | * | 1767 | * |
1768 | **************************************************************/ | 1768 | **************************************************************/ |
1769 | 1769 | ||
1770 | u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int ) | 1770 | static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int ) |
1771 | { | 1771 | { |
1772 | TLanPrivateInfo *priv = netdev_priv(dev); | 1772 | TLanPrivateInfo *priv = netdev_priv(dev); |
1773 | u32 ack; | 1773 | u32 ack; |
@@ -1842,7 +1842,7 @@ u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int ) | |||
1842 | * | 1842 | * |
1843 | **************************************************************/ | 1843 | **************************************************************/ |
1844 | 1844 | ||
1845 | u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int ) | 1845 | static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int ) |
1846 | { | 1846 | { |
1847 | TLanPrivateInfo *priv = netdev_priv(dev); | 1847 | TLanPrivateInfo *priv = netdev_priv(dev); |
1848 | dma_addr_t head_list_phys; | 1848 | dma_addr_t head_list_phys; |
@@ -1902,7 +1902,7 @@ u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int ) | |||
1902 | * | 1902 | * |
1903 | **************************************************************/ | 1903 | **************************************************************/ |
1904 | 1904 | ||
1905 | void TLan_Timer( unsigned long data ) | 1905 | static void TLan_Timer( unsigned long data ) |
1906 | { | 1906 | { |
1907 | struct net_device *dev = (struct net_device *) data; | 1907 | struct net_device *dev = (struct net_device *) data; |
1908 | TLanPrivateInfo *priv = netdev_priv(dev); | 1908 | TLanPrivateInfo *priv = netdev_priv(dev); |
@@ -1983,7 +1983,7 @@ void TLan_Timer( unsigned long data ) | |||
1983 | * | 1983 | * |
1984 | **************************************************************/ | 1984 | **************************************************************/ |
1985 | 1985 | ||
1986 | void TLan_ResetLists( struct net_device *dev ) | 1986 | static void TLan_ResetLists( struct net_device *dev ) |
1987 | { | 1987 | { |
1988 | TLanPrivateInfo *priv = netdev_priv(dev); | 1988 | TLanPrivateInfo *priv = netdev_priv(dev); |
1989 | int i; | 1989 | int i; |
@@ -2043,7 +2043,7 @@ void TLan_ResetLists( struct net_device *dev ) | |||
2043 | } /* TLan_ResetLists */ | 2043 | } /* TLan_ResetLists */ |
2044 | 2044 | ||
2045 | 2045 | ||
2046 | void TLan_FreeLists( struct net_device *dev ) | 2046 | static void TLan_FreeLists( struct net_device *dev ) |
2047 | { | 2047 | { |
2048 | TLanPrivateInfo *priv = netdev_priv(dev); | 2048 | TLanPrivateInfo *priv = netdev_priv(dev); |
2049 | int i; | 2049 | int i; |
@@ -2092,7 +2092,7 @@ void TLan_FreeLists( struct net_device *dev ) | |||
2092 | * | 2092 | * |
2093 | **************************************************************/ | 2093 | **************************************************************/ |
2094 | 2094 | ||
2095 | void TLan_PrintDio( u16 io_base ) | 2095 | static void TLan_PrintDio( u16 io_base ) |
2096 | { | 2096 | { |
2097 | u32 data0, data1; | 2097 | u32 data0, data1; |
2098 | int i; | 2098 | int i; |
@@ -2127,7 +2127,7 @@ void TLan_PrintDio( u16 io_base ) | |||
2127 | * | 2127 | * |
2128 | **************************************************************/ | 2128 | **************************************************************/ |
2129 | 2129 | ||
2130 | void TLan_PrintList( TLanList *list, char *type, int num) | 2130 | static void TLan_PrintList( TLanList *list, char *type, int num) |
2131 | { | 2131 | { |
2132 | int i; | 2132 | int i; |
2133 | 2133 | ||
@@ -2163,7 +2163,7 @@ void TLan_PrintList( TLanList *list, char *type, int num) | |||
2163 | * | 2163 | * |
2164 | **************************************************************/ | 2164 | **************************************************************/ |
2165 | 2165 | ||
2166 | void TLan_ReadAndClearStats( struct net_device *dev, int record ) | 2166 | static void TLan_ReadAndClearStats( struct net_device *dev, int record ) |
2167 | { | 2167 | { |
2168 | TLanPrivateInfo *priv = netdev_priv(dev); | 2168 | TLanPrivateInfo *priv = netdev_priv(dev); |
2169 | u32 tx_good, tx_under; | 2169 | u32 tx_good, tx_under; |
@@ -2238,7 +2238,7 @@ void TLan_ReadAndClearStats( struct net_device *dev, int record ) | |||
2238 | * | 2238 | * |
2239 | **************************************************************/ | 2239 | **************************************************************/ |
2240 | 2240 | ||
2241 | void | 2241 | static void |
2242 | TLan_ResetAdapter( struct net_device *dev ) | 2242 | TLan_ResetAdapter( struct net_device *dev ) |
2243 | { | 2243 | { |
2244 | TLanPrivateInfo *priv = netdev_priv(dev); | 2244 | TLanPrivateInfo *priv = netdev_priv(dev); |
@@ -2324,7 +2324,7 @@ TLan_ResetAdapter( struct net_device *dev ) | |||
2324 | 2324 | ||
2325 | 2325 | ||
2326 | 2326 | ||
2327 | void | 2327 | static void |
2328 | TLan_FinishReset( struct net_device *dev ) | 2328 | TLan_FinishReset( struct net_device *dev ) |
2329 | { | 2329 | { |
2330 | TLanPrivateInfo *priv = netdev_priv(dev); | 2330 | TLanPrivateInfo *priv = netdev_priv(dev); |
@@ -2448,7 +2448,7 @@ TLan_FinishReset( struct net_device *dev ) | |||
2448 | * | 2448 | * |
2449 | **************************************************************/ | 2449 | **************************************************************/ |
2450 | 2450 | ||
2451 | void TLan_SetMac( struct net_device *dev, int areg, char *mac ) | 2451 | static void TLan_SetMac( struct net_device *dev, int areg, char *mac ) |
2452 | { | 2452 | { |
2453 | int i; | 2453 | int i; |
2454 | 2454 | ||
@@ -2490,7 +2490,7 @@ void TLan_SetMac( struct net_device *dev, int areg, char *mac ) | |||
2490 | * | 2490 | * |
2491 | ********************************************************************/ | 2491 | ********************************************************************/ |
2492 | 2492 | ||
2493 | void TLan_PhyPrint( struct net_device *dev ) | 2493 | static void TLan_PhyPrint( struct net_device *dev ) |
2494 | { | 2494 | { |
2495 | TLanPrivateInfo *priv = netdev_priv(dev); | 2495 | TLanPrivateInfo *priv = netdev_priv(dev); |
2496 | u16 i, data0, data1, data2, data3, phy; | 2496 | u16 i, data0, data1, data2, data3, phy; |
@@ -2539,7 +2539,7 @@ void TLan_PhyPrint( struct net_device *dev ) | |||
2539 | * | 2539 | * |
2540 | ********************************************************************/ | 2540 | ********************************************************************/ |
2541 | 2541 | ||
2542 | void TLan_PhyDetect( struct net_device *dev ) | 2542 | static void TLan_PhyDetect( struct net_device *dev ) |
2543 | { | 2543 | { |
2544 | TLanPrivateInfo *priv = netdev_priv(dev); | 2544 | TLanPrivateInfo *priv = netdev_priv(dev); |
2545 | u16 control; | 2545 | u16 control; |
@@ -2586,7 +2586,7 @@ void TLan_PhyDetect( struct net_device *dev ) | |||
2586 | 2586 | ||
2587 | 2587 | ||
2588 | 2588 | ||
2589 | void TLan_PhyPowerDown( struct net_device *dev ) | 2589 | static void TLan_PhyPowerDown( struct net_device *dev ) |
2590 | { | 2590 | { |
2591 | TLanPrivateInfo *priv = netdev_priv(dev); | 2591 | TLanPrivateInfo *priv = netdev_priv(dev); |
2592 | u16 value; | 2592 | u16 value; |
@@ -2611,7 +2611,7 @@ void TLan_PhyPowerDown( struct net_device *dev ) | |||
2611 | 2611 | ||
2612 | 2612 | ||
2613 | 2613 | ||
2614 | void TLan_PhyPowerUp( struct net_device *dev ) | 2614 | static void TLan_PhyPowerUp( struct net_device *dev ) |
2615 | { | 2615 | { |
2616 | TLanPrivateInfo *priv = netdev_priv(dev); | 2616 | TLanPrivateInfo *priv = netdev_priv(dev); |
2617 | u16 value; | 2617 | u16 value; |
@@ -2632,7 +2632,7 @@ void TLan_PhyPowerUp( struct net_device *dev ) | |||
2632 | 2632 | ||
2633 | 2633 | ||
2634 | 2634 | ||
2635 | void TLan_PhyReset( struct net_device *dev ) | 2635 | static void TLan_PhyReset( struct net_device *dev ) |
2636 | { | 2636 | { |
2637 | TLanPrivateInfo *priv = netdev_priv(dev); | 2637 | TLanPrivateInfo *priv = netdev_priv(dev); |
2638 | u16 phy; | 2638 | u16 phy; |
@@ -2660,7 +2660,7 @@ void TLan_PhyReset( struct net_device *dev ) | |||
2660 | 2660 | ||
2661 | 2661 | ||
2662 | 2662 | ||
2663 | void TLan_PhyStartLink( struct net_device *dev ) | 2663 | static void TLan_PhyStartLink( struct net_device *dev ) |
2664 | { | 2664 | { |
2665 | TLanPrivateInfo *priv = netdev_priv(dev); | 2665 | TLanPrivateInfo *priv = netdev_priv(dev); |
2666 | u16 ability; | 2666 | u16 ability; |
@@ -2747,7 +2747,7 @@ void TLan_PhyStartLink( struct net_device *dev ) | |||
2747 | 2747 | ||
2748 | 2748 | ||
2749 | 2749 | ||
2750 | void TLan_PhyFinishAutoNeg( struct net_device *dev ) | 2750 | static void TLan_PhyFinishAutoNeg( struct net_device *dev ) |
2751 | { | 2751 | { |
2752 | TLanPrivateInfo *priv = netdev_priv(dev); | 2752 | TLanPrivateInfo *priv = netdev_priv(dev); |
2753 | u16 an_adv; | 2753 | u16 an_adv; |
@@ -2903,7 +2903,7 @@ void TLan_PhyMonitor( struct net_device *dev ) | |||
2903 | * | 2903 | * |
2904 | **************************************************************/ | 2904 | **************************************************************/ |
2905 | 2905 | ||
2906 | int TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val ) | 2906 | static int TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val ) |
2907 | { | 2907 | { |
2908 | u8 nack; | 2908 | u8 nack; |
2909 | u16 sio, tmp; | 2909 | u16 sio, tmp; |
@@ -2993,7 +2993,7 @@ int TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val ) | |||
2993 | * | 2993 | * |
2994 | **************************************************************/ | 2994 | **************************************************************/ |
2995 | 2995 | ||
2996 | void TLan_MiiSendData( u16 base_port, u32 data, unsigned num_bits ) | 2996 | static void TLan_MiiSendData( u16 base_port, u32 data, unsigned num_bits ) |
2997 | { | 2997 | { |
2998 | u16 sio; | 2998 | u16 sio; |
2999 | u32 i; | 2999 | u32 i; |
@@ -3035,7 +3035,7 @@ void TLan_MiiSendData( u16 base_port, u32 data, unsigned num_bits ) | |||
3035 | * | 3035 | * |
3036 | **************************************************************/ | 3036 | **************************************************************/ |
3037 | 3037 | ||
3038 | void TLan_MiiSync( u16 base_port ) | 3038 | static void TLan_MiiSync( u16 base_port ) |
3039 | { | 3039 | { |
3040 | int i; | 3040 | int i; |
3041 | u16 sio; | 3041 | u16 sio; |
@@ -3074,7 +3074,7 @@ void TLan_MiiSync( u16 base_port ) | |||
3074 | * | 3074 | * |
3075 | **************************************************************/ | 3075 | **************************************************************/ |
3076 | 3076 | ||
3077 | void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val ) | 3077 | static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val ) |
3078 | { | 3078 | { |
3079 | u16 sio; | 3079 | u16 sio; |
3080 | int minten; | 3080 | int minten; |
@@ -3144,7 +3144,7 @@ void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val ) | |||
3144 | * | 3144 | * |
3145 | **************************************************************/ | 3145 | **************************************************************/ |
3146 | 3146 | ||
3147 | void TLan_EeSendStart( u16 io_base ) | 3147 | static void TLan_EeSendStart( u16 io_base ) |
3148 | { | 3148 | { |
3149 | u16 sio; | 3149 | u16 sio; |
3150 | 3150 | ||
@@ -3184,7 +3184,7 @@ void TLan_EeSendStart( u16 io_base ) | |||
3184 | * | 3184 | * |
3185 | **************************************************************/ | 3185 | **************************************************************/ |
3186 | 3186 | ||
3187 | int TLan_EeSendByte( u16 io_base, u8 data, int stop ) | 3187 | static int TLan_EeSendByte( u16 io_base, u8 data, int stop ) |
3188 | { | 3188 | { |
3189 | int err; | 3189 | int err; |
3190 | u8 place; | 3190 | u8 place; |
@@ -3245,7 +3245,7 @@ int TLan_EeSendByte( u16 io_base, u8 data, int stop ) | |||
3245 | * | 3245 | * |
3246 | **************************************************************/ | 3246 | **************************************************************/ |
3247 | 3247 | ||
3248 | void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop ) | 3248 | static void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop ) |
3249 | { | 3249 | { |
3250 | u8 place; | 3250 | u8 place; |
3251 | u16 sio; | 3251 | u16 sio; |
@@ -3303,7 +3303,7 @@ void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop ) | |||
3303 | * | 3303 | * |
3304 | **************************************************************/ | 3304 | **************************************************************/ |
3305 | 3305 | ||
3306 | int TLan_EeReadByte( struct net_device *dev, u8 ee_addr, u8 *data ) | 3306 | static int TLan_EeReadByte( struct net_device *dev, u8 ee_addr, u8 *data ) |
3307 | { | 3307 | { |
3308 | int err; | 3308 | int err; |
3309 | TLanPrivateInfo *priv = netdev_priv(dev); | 3309 | TLanPrivateInfo *priv = netdev_priv(dev); |
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c index a7afeea156bd..a59c1f224aa8 100644 --- a/drivers/net/tulip/uli526x.c +++ b/drivers/net/tulip/uli526x.c | |||
@@ -482,9 +482,11 @@ static void uli526x_init(struct net_device *dev) | |||
482 | struct uli526x_board_info *db = netdev_priv(dev); | 482 | struct uli526x_board_info *db = netdev_priv(dev); |
483 | unsigned long ioaddr = db->ioaddr; | 483 | unsigned long ioaddr = db->ioaddr; |
484 | u8 phy_tmp; | 484 | u8 phy_tmp; |
485 | u8 timeout; | ||
485 | u16 phy_value; | 486 | u16 phy_value; |
486 | u16 phy_reg_reset; | 487 | u16 phy_reg_reset; |
487 | 488 | ||
489 | |||
488 | ULI526X_DBUG(0, "uli526x_init()", 0); | 490 | ULI526X_DBUG(0, "uli526x_init()", 0); |
489 | 491 | ||
490 | /* Reset M526x MAC controller */ | 492 | /* Reset M526x MAC controller */ |
@@ -509,11 +511,19 @@ static void uli526x_init(struct net_device *dev) | |||
509 | /* Parser SROM and media mode */ | 511 | /* Parser SROM and media mode */ |
510 | db->media_mode = uli526x_media_mode; | 512 | db->media_mode = uli526x_media_mode; |
511 | 513 | ||
512 | /* Phyxcer capability setting */ | 514 | /* phyxcer capability setting */ |
513 | phy_reg_reset = phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id); | 515 | phy_reg_reset = phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id); |
514 | phy_reg_reset = (phy_reg_reset | 0x8000); | 516 | phy_reg_reset = (phy_reg_reset | 0x8000); |
515 | phy_write(db->ioaddr, db->phy_addr, 0, phy_reg_reset, db->chip_id); | 517 | phy_write(db->ioaddr, db->phy_addr, 0, phy_reg_reset, db->chip_id); |
518 | |||
519 | /* See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management | ||
520 | * functions") or phy data sheet for details on phy reset | ||
521 | */ | ||
516 | udelay(500); | 522 | udelay(500); |
523 | timeout = 10; | ||
524 | while (timeout-- && | ||
525 | phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id) & 0x8000) | ||
526 | udelay(100); | ||
517 | 527 | ||
518 | /* Process Phyxcer Media Mode */ | 528 | /* Process Phyxcer Media Mode */ |
519 | uli526x_set_phyxcer(db); | 529 | uli526x_set_phyxcer(db); |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 038c1ef94d2e..7b816a032957 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -663,7 +663,11 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file, | |||
663 | case SIOCSIFHWADDR: | 663 | case SIOCSIFHWADDR: |
664 | { | 664 | { |
665 | /* try to set the actual net device's hw address */ | 665 | /* try to set the actual net device's hw address */ |
666 | int ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); | 666 | int ret; |
667 | |||
668 | rtnl_lock(); | ||
669 | ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); | ||
670 | rtnl_unlock(); | ||
667 | 671 | ||
668 | if (ret == 0) { | 672 | if (ret == 0) { |
669 | /** Set the character device's hardware address. This is used when | 673 | /** Set the character device's hardware address. This is used when |
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index 7c851b1e6daa..8c9d6ae2bb31 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c | |||
@@ -1893,7 +1893,7 @@ static void rhine_shutdown (struct pci_dev *pdev) | |||
1893 | 1893 | ||
1894 | /* Make sure we use pattern 0, 1 and not 4, 5 */ | 1894 | /* Make sure we use pattern 0, 1 and not 4, 5 */ |
1895 | if (rp->quirks & rq6patterns) | 1895 | if (rp->quirks & rq6patterns) |
1896 | iowrite8(0x04, ioaddr + 0xA7); | 1896 | iowrite8(0x04, ioaddr + WOLcgClr); |
1897 | 1897 | ||
1898 | if (rp->wolopts & WAKE_MAGIC) { | 1898 | if (rp->wolopts & WAKE_MAGIC) { |
1899 | iowrite8(WOLmagic, ioaddr + WOLcrSet); | 1899 | iowrite8(WOLmagic, ioaddr + WOLcrSet); |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index fdc23678117b..19fd4cb0ddf8 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -361,6 +361,7 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
361 | netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight); | 361 | netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight); |
362 | vi->dev = dev; | 362 | vi->dev = dev; |
363 | vi->vdev = vdev; | 363 | vi->vdev = vdev; |
364 | vdev->priv = vi; | ||
364 | 365 | ||
365 | /* We expect two virtqueues, receive then send. */ | 366 | /* We expect two virtqueues, receive then send. */ |
366 | vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done); | 367 | vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done); |
@@ -395,7 +396,6 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
395 | } | 396 | } |
396 | 397 | ||
397 | pr_debug("virtnet: registered device %s\n", dev->name); | 398 | pr_debug("virtnet: registered device %s\n", dev->name); |
398 | vdev->priv = vi; | ||
399 | return 0; | 399 | return 0; |
400 | 400 | ||
401 | unregister: | 401 | unregister: |
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig index 1a2141dabdc7..8bc4bc4c330e 100644 --- a/drivers/net/wireless/b43/Kconfig +++ b/drivers/net/wireless/b43/Kconfig | |||
@@ -32,6 +32,7 @@ config B43_PCI_AUTOSELECT | |||
32 | bool | 32 | bool |
33 | depends on B43 && SSB_PCIHOST_POSSIBLE | 33 | depends on B43 && SSB_PCIHOST_POSSIBLE |
34 | select SSB_PCIHOST | 34 | select SSB_PCIHOST |
35 | select SSB_B43_PCI_BRIDGE | ||
35 | default y | 36 | default y |
36 | 37 | ||
37 | # Auto-select SSB PCICORE driver, if possible | 38 | # Auto-select SSB PCICORE driver, if possible |
diff --git a/drivers/net/wireless/b43legacy/Kconfig b/drivers/net/wireless/b43legacy/Kconfig index 6745579ba96d..13c65faf0247 100644 --- a/drivers/net/wireless/b43legacy/Kconfig +++ b/drivers/net/wireless/b43legacy/Kconfig | |||
@@ -25,6 +25,7 @@ config B43LEGACY_PCI_AUTOSELECT | |||
25 | bool | 25 | bool |
26 | depends on B43LEGACY && SSB_PCIHOST_POSSIBLE | 26 | depends on B43LEGACY && SSB_PCIHOST_POSSIBLE |
27 | select SSB_PCIHOST | 27 | select SSB_PCIHOST |
28 | select SSB_B43_PCI_BRIDGE | ||
28 | default y | 29 | default y |
29 | 30 | ||
30 | # Auto-select SSB PCICORE driver, if possible | 31 | # Auto-select SSB PCICORE driver, if possible |
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c index c39de422e220..5f3f34e1dbfd 100644 --- a/drivers/net/wireless/b43legacy/main.c +++ b/drivers/net/wireless/b43legacy/main.c | |||
@@ -3829,7 +3829,7 @@ static void b43legacy_print_driverinfo(void) | |||
3829 | #ifdef CONFIG_B43LEGACY_DMA | 3829 | #ifdef CONFIG_B43LEGACY_DMA |
3830 | feat_dma = "D"; | 3830 | feat_dma = "D"; |
3831 | #endif | 3831 | #endif |
3832 | printk(KERN_INFO "Broadcom 43xx driver loaded " | 3832 | printk(KERN_INFO "Broadcom 43xx-legacy driver loaded " |
3833 | "[ Features: %s%s%s%s%s, Firmware-ID: " | 3833 | "[ Features: %s%s%s%s%s, Firmware-ID: " |
3834 | B43legacy_SUPPORTED_FIRMWARE_ID " ]\n", | 3834 | B43legacy_SUPPORTED_FIRMWARE_ID " ]\n", |
3835 | feat_pci, feat_leds, feat_rfkill, feat_pio, feat_dma); | 3835 | feat_pci, feat_leds, feat_rfkill, feat_pio, feat_dma); |
diff --git a/drivers/net/wireless/bcm43xx/Kconfig b/drivers/net/wireless/bcm43xx/Kconfig index 0159701e8456..afb8f4305c24 100644 --- a/drivers/net/wireless/bcm43xx/Kconfig +++ b/drivers/net/wireless/bcm43xx/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config BCM43XX | 1 | config BCM43XX |
2 | tristate "Broadcom BCM43xx wireless support (DEPRECATED)" | 2 | tristate "Broadcom BCM43xx wireless support (DEPRECATED)" |
3 | depends on PCI && IEEE80211 && IEEE80211_SOFTMAC && WLAN_80211 && EXPERIMENTAL | 3 | depends on PCI && IEEE80211 && IEEE80211_SOFTMAC && WLAN_80211 && (!SSB_B43_PCI_BRIDGE || SSB != y) && EXPERIMENTAL |
4 | select WIRELESS_EXT | 4 | select WIRELESS_EXT |
5 | select FW_LOADER | 5 | select FW_LOADER |
6 | select HW_RANDOM | 6 | select HW_RANDOM |
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c index eab020338fde..b3c1acbcc655 100644 --- a/drivers/net/wireless/libertas/cmd.c +++ b/drivers/net/wireless/libertas/cmd.c | |||
@@ -1040,7 +1040,6 @@ int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action, | |||
1040 | lbs_deb_leave(LBS_DEB_CMD); | 1040 | lbs_deb_leave(LBS_DEB_CMD); |
1041 | return ret; | 1041 | return ret; |
1042 | } | 1042 | } |
1043 | EXPORT_SYMBOL_GPL(lbs_mesh_access); | ||
1044 | 1043 | ||
1045 | int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan) | 1044 | int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan) |
1046 | { | 1045 | { |
@@ -1576,7 +1575,6 @@ done: | |||
1576 | lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret); | 1575 | lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret); |
1577 | return ret; | 1576 | return ret; |
1578 | } | 1577 | } |
1579 | EXPORT_SYMBOL_GPL(lbs_prepare_and_send_command); | ||
1580 | 1578 | ||
1581 | /** | 1579 | /** |
1582 | * @brief This function allocates the command buffer and link | 1580 | * @brief This function allocates the command buffer and link |
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c index 159216a91903..bdc6a1cc2103 100644 --- a/drivers/net/wireless/libertas/cmdresp.c +++ b/drivers/net/wireless/libertas/cmdresp.c | |||
@@ -562,9 +562,7 @@ int lbs_process_rx_command(struct lbs_private *priv) | |||
562 | } | 562 | } |
563 | 563 | ||
564 | resp = (void *)priv->upld_buf; | 564 | resp = (void *)priv->upld_buf; |
565 | 565 | curcmd = le16_to_cpu(priv->cur_cmd->cmdbuf->command); | |
566 | curcmd = le16_to_cpu(resp->command); | ||
567 | |||
568 | respcmd = le16_to_cpu(resp->command); | 566 | respcmd = le16_to_cpu(resp->command); |
569 | result = le16_to_cpu(resp->result); | 567 | result = le16_to_cpu(resp->result); |
570 | 568 | ||
@@ -572,9 +570,9 @@ int lbs_process_rx_command(struct lbs_private *priv) | |||
572 | respcmd, le16_to_cpu(resp->seqnum), priv->upld_len, jiffies); | 570 | respcmd, le16_to_cpu(resp->seqnum), priv->upld_len, jiffies); |
573 | lbs_deb_hex(LBS_DEB_HOST, "CMD_RESP", (void *) resp, priv->upld_len); | 571 | lbs_deb_hex(LBS_DEB_HOST, "CMD_RESP", (void *) resp, priv->upld_len); |
574 | 572 | ||
575 | if (resp->seqnum != resp->seqnum) { | 573 | if (resp->seqnum != priv->cur_cmd->cmdbuf->seqnum) { |
576 | lbs_pr_info("Received CMD_RESP with invalid sequence %d (expected %d)\n", | 574 | lbs_pr_info("Received CMD_RESP with invalid sequence %d (expected %d)\n", |
577 | le16_to_cpu(resp->seqnum), le16_to_cpu(resp->seqnum)); | 575 | le16_to_cpu(resp->seqnum), le16_to_cpu(priv->cur_cmd->cmdbuf->seqnum)); |
578 | spin_unlock_irqrestore(&priv->driver_lock, flags); | 576 | spin_unlock_irqrestore(&priv->driver_lock, flags); |
579 | ret = -1; | 577 | ret = -1; |
580 | goto done; | 578 | goto done; |
diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h index aaacd9bd6bd2..4e22341b4f3d 100644 --- a/drivers/net/wireless/libertas/decl.h +++ b/drivers/net/wireless/libertas/decl.h | |||
@@ -69,7 +69,6 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev); | |||
69 | int lbs_remove_card(struct lbs_private *priv); | 69 | int lbs_remove_card(struct lbs_private *priv); |
70 | int lbs_start_card(struct lbs_private *priv); | 70 | int lbs_start_card(struct lbs_private *priv); |
71 | int lbs_stop_card(struct lbs_private *priv); | 71 | int lbs_stop_card(struct lbs_private *priv); |
72 | int lbs_reset_device(struct lbs_private *priv); | ||
73 | void lbs_host_to_card_done(struct lbs_private *priv); | 72 | void lbs_host_to_card_done(struct lbs_private *priv); |
74 | 73 | ||
75 | int lbs_update_channel(struct lbs_private *priv); | 74 | int lbs_update_channel(struct lbs_private *priv); |
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index 84fb49ca0fae..4d4e2f3b66ac 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c | |||
@@ -1351,8 +1351,6 @@ done: | |||
1351 | lbs_deb_leave_args(LBS_DEB_MESH, "ret %d", ret); | 1351 | lbs_deb_leave_args(LBS_DEB_MESH, "ret %d", ret); |
1352 | return ret; | 1352 | return ret; |
1353 | } | 1353 | } |
1354 | EXPORT_SYMBOL_GPL(lbs_add_mesh); | ||
1355 | |||
1356 | 1354 | ||
1357 | static void lbs_remove_mesh(struct lbs_private *priv) | 1355 | static void lbs_remove_mesh(struct lbs_private *priv) |
1358 | { | 1356 | { |
@@ -1372,7 +1370,6 @@ static void lbs_remove_mesh(struct lbs_private *priv) | |||
1372 | free_netdev(mesh_dev); | 1370 | free_netdev(mesh_dev); |
1373 | lbs_deb_leave(LBS_DEB_MESH); | 1371 | lbs_deb_leave(LBS_DEB_MESH); |
1374 | } | 1372 | } |
1375 | EXPORT_SYMBOL_GPL(lbs_remove_mesh); | ||
1376 | 1373 | ||
1377 | /** | 1374 | /** |
1378 | * @brief This function finds the CFP in | 1375 | * @brief This function finds the CFP in |
@@ -1458,20 +1455,6 @@ void lbs_interrupt(struct lbs_private *priv) | |||
1458 | } | 1455 | } |
1459 | EXPORT_SYMBOL_GPL(lbs_interrupt); | 1456 | EXPORT_SYMBOL_GPL(lbs_interrupt); |
1460 | 1457 | ||
1461 | int lbs_reset_device(struct lbs_private *priv) | ||
1462 | { | ||
1463 | int ret; | ||
1464 | |||
1465 | lbs_deb_enter(LBS_DEB_MAIN); | ||
1466 | ret = lbs_prepare_and_send_command(priv, CMD_802_11_RESET, | ||
1467 | CMD_ACT_HALT, 0, 0, NULL); | ||
1468 | msleep_interruptible(10); | ||
1469 | |||
1470 | lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret); | ||
1471 | return ret; | ||
1472 | } | ||
1473 | EXPORT_SYMBOL_GPL(lbs_reset_device); | ||
1474 | |||
1475 | static int __init lbs_init_module(void) | 1458 | static int __init lbs_init_module(void) |
1476 | { | 1459 | { |
1477 | lbs_deb_enter(LBS_DEB_MAIN); | 1460 | lbs_deb_enter(LBS_DEB_MAIN); |
diff --git a/drivers/net/wireless/p54common.c b/drivers/net/wireless/p54common.c index 5cda49aff3a8..d191e055a788 100644 --- a/drivers/net/wireless/p54common.c +++ b/drivers/net/wireless/p54common.c | |||
@@ -166,18 +166,23 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len) | |||
166 | struct p54_common *priv = dev->priv; | 166 | struct p54_common *priv = dev->priv; |
167 | struct eeprom_pda_wrap *wrap = NULL; | 167 | struct eeprom_pda_wrap *wrap = NULL; |
168 | struct pda_entry *entry; | 168 | struct pda_entry *entry; |
169 | int i = 0; | ||
170 | unsigned int data_len, entry_len; | 169 | unsigned int data_len, entry_len; |
171 | void *tmp; | 170 | void *tmp; |
172 | int err; | 171 | int err; |
172 | u8 *end = (u8 *)eeprom + len; | ||
173 | 173 | ||
174 | wrap = (struct eeprom_pda_wrap *) eeprom; | 174 | wrap = (struct eeprom_pda_wrap *) eeprom; |
175 | entry = (void *)wrap->data + wrap->len; | 175 | entry = (void *)wrap->data + le16_to_cpu(wrap->len); |
176 | i += 2; | 176 | |
177 | i += le16_to_cpu(entry->len)*2; | 177 | /* verify that at least the entry length/code fits */ |
178 | while (i < len) { | 178 | while ((u8 *)entry <= end - sizeof(*entry)) { |
179 | entry_len = le16_to_cpu(entry->len); | 179 | entry_len = le16_to_cpu(entry->len); |
180 | data_len = ((entry_len - 1) << 1); | 180 | data_len = ((entry_len - 1) << 1); |
181 | |||
182 | /* abort if entry exceeds whole structure */ | ||
183 | if ((u8 *)entry + sizeof(*entry) + data_len > end) | ||
184 | break; | ||
185 | |||
181 | switch (le16_to_cpu(entry->code)) { | 186 | switch (le16_to_cpu(entry->code)) { |
182 | case PDR_MAC_ADDRESS: | 187 | case PDR_MAC_ADDRESS: |
183 | SET_IEEE80211_PERM_ADDR(dev, entry->data); | 188 | SET_IEEE80211_PERM_ADDR(dev, entry->data); |
@@ -249,13 +254,12 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len) | |||
249 | priv->version = *(u8 *)(entry->data + 1); | 254 | priv->version = *(u8 *)(entry->data + 1); |
250 | break; | 255 | break; |
251 | case PDR_END: | 256 | case PDR_END: |
252 | i = len; | 257 | /* make it overrun */ |
258 | entry_len = len; | ||
253 | break; | 259 | break; |
254 | } | 260 | } |
255 | 261 | ||
256 | entry = (void *)entry + (entry_len + 1)*2; | 262 | entry = (void *)entry + (entry_len + 1)*2; |
257 | i += 2; | ||
258 | i += entry_len*2; | ||
259 | } | 263 | } |
260 | 264 | ||
261 | if (!priv->iq_autocal || !priv->output_limit || !priv->curve_data) { | 265 | if (!priv->iq_autocal || !priv->output_limit || !priv->curve_data) { |
diff --git a/drivers/net/wireless/p54common.h b/drivers/net/wireless/p54common.h index a721334e20d9..b67ff34e26fe 100644 --- a/drivers/net/wireless/p54common.h +++ b/drivers/net/wireless/p54common.h | |||
@@ -53,10 +53,10 @@ struct pda_entry { | |||
53 | } __attribute__ ((packed)); | 53 | } __attribute__ ((packed)); |
54 | 54 | ||
55 | struct eeprom_pda_wrap { | 55 | struct eeprom_pda_wrap { |
56 | u32 magic; | 56 | __le32 magic; |
57 | u16 pad; | 57 | __le16 pad; |
58 | u16 len; | 58 | __le16 len; |
59 | u32 arm_opcode; | 59 | __le32 arm_opcode; |
60 | u8 data[0]; | 60 | u8 data[0]; |
61 | } __attribute__ ((packed)); | 61 | } __attribute__ ((packed)); |
62 | 62 | ||
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 8ce2ddf8024f..10b776c1adc5 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c | |||
@@ -228,9 +228,9 @@ struct NDIS_WLAN_BSSID_EX { | |||
228 | struct NDIS_802_11_SSID Ssid; | 228 | struct NDIS_802_11_SSID Ssid; |
229 | __le32 Privacy; | 229 | __le32 Privacy; |
230 | __le32 Rssi; | 230 | __le32 Rssi; |
231 | enum NDIS_802_11_NETWORK_TYPE NetworkTypeInUse; | 231 | __le32 NetworkTypeInUse; |
232 | struct NDIS_802_11_CONFIGURATION Configuration; | 232 | struct NDIS_802_11_CONFIGURATION Configuration; |
233 | enum NDIS_802_11_NETWORK_INFRASTRUCTURE InfrastructureMode; | 233 | __le32 InfrastructureMode; |
234 | u8 SupportedRates[NDIS_802_11_LENGTH_RATES_EX]; | 234 | u8 SupportedRates[NDIS_802_11_LENGTH_RATES_EX]; |
235 | __le32 IELength; | 235 | __le32 IELength; |
236 | u8 IEs[0]; | 236 | u8 IEs[0]; |
@@ -260,7 +260,7 @@ struct NDIS_802_11_KEY { | |||
260 | __le32 KeyLength; | 260 | __le32 KeyLength; |
261 | u8 Bssid[6]; | 261 | u8 Bssid[6]; |
262 | u8 Padding[6]; | 262 | u8 Padding[6]; |
263 | __le64 KeyRSC; | 263 | u8 KeyRSC[8]; |
264 | u8 KeyMaterial[32]; | 264 | u8 KeyMaterial[32]; |
265 | } __attribute__((packed)); | 265 | } __attribute__((packed)); |
266 | 266 | ||
@@ -279,11 +279,11 @@ struct RNDIS_CONFIG_PARAMETER_INFOBUFFER { | |||
279 | } __attribute__((packed)); | 279 | } __attribute__((packed)); |
280 | 280 | ||
281 | /* these have to match what is in wpa_supplicant */ | 281 | /* these have to match what is in wpa_supplicant */ |
282 | enum { WPA_ALG_NONE, WPA_ALG_WEP, WPA_ALG_TKIP, WPA_ALG_CCMP } wpa_alg; | 282 | enum wpa_alg { WPA_ALG_NONE, WPA_ALG_WEP, WPA_ALG_TKIP, WPA_ALG_CCMP }; |
283 | enum { CIPHER_NONE, CIPHER_WEP40, CIPHER_TKIP, CIPHER_CCMP, CIPHER_WEP104 } | 283 | enum wpa_cipher { CIPHER_NONE, CIPHER_WEP40, CIPHER_TKIP, CIPHER_CCMP, |
284 | wpa_cipher; | 284 | CIPHER_WEP104 }; |
285 | enum { KEY_MGMT_802_1X, KEY_MGMT_PSK, KEY_MGMT_NONE, KEY_MGMT_802_1X_NO_WPA, | 285 | enum wpa_key_mgmt { KEY_MGMT_802_1X, KEY_MGMT_PSK, KEY_MGMT_NONE, |
286 | KEY_MGMT_WPA_NONE } wpa_key_mgmt; | 286 | KEY_MGMT_802_1X_NO_WPA, KEY_MGMT_WPA_NONE }; |
287 | 287 | ||
288 | /* | 288 | /* |
289 | * private data | 289 | * private data |
@@ -1508,7 +1508,7 @@ static int rndis_iw_set_encode_ext(struct net_device *dev, | |||
1508 | struct usbnet *usbdev = dev->priv; | 1508 | struct usbnet *usbdev = dev->priv; |
1509 | struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); | 1509 | struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); |
1510 | struct NDIS_802_11_KEY ndis_key; | 1510 | struct NDIS_802_11_KEY ndis_key; |
1511 | int i, keyidx, ret; | 1511 | int keyidx, ret; |
1512 | u8 *addr; | 1512 | u8 *addr; |
1513 | 1513 | ||
1514 | keyidx = wrqu->encoding.flags & IW_ENCODE_INDEX; | 1514 | keyidx = wrqu->encoding.flags & IW_ENCODE_INDEX; |
@@ -1543,9 +1543,7 @@ static int rndis_iw_set_encode_ext(struct net_device *dev, | |||
1543 | ndis_key.KeyIndex = cpu_to_le32(keyidx); | 1543 | ndis_key.KeyIndex = cpu_to_le32(keyidx); |
1544 | 1544 | ||
1545 | if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) { | 1545 | if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) { |
1546 | for (i = 0; i < 6; i++) | 1546 | memcpy(ndis_key.KeyRSC, ext->rx_seq, 6); |
1547 | ndis_key.KeyRSC |= | ||
1548 | cpu_to_le64(ext->rx_seq[i] << (i * 8)); | ||
1549 | ndis_key.KeyIndex |= cpu_to_le32(1 << 29); | 1547 | ndis_key.KeyIndex |= cpu_to_le32(1 << 29); |
1550 | } | 1548 | } |
1551 | 1549 | ||
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c index d6cba138c7ab..c69f85ed7669 100644 --- a/drivers/net/wireless/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/rt2x00/rt2400pci.c | |||
@@ -960,8 +960,12 @@ static int rt2400pci_set_device_state(struct rt2x00_dev *rt2x00dev, | |||
960 | rt2400pci_disable_radio(rt2x00dev); | 960 | rt2400pci_disable_radio(rt2x00dev); |
961 | break; | 961 | break; |
962 | case STATE_RADIO_RX_ON: | 962 | case STATE_RADIO_RX_ON: |
963 | case STATE_RADIO_RX_ON_LINK: | ||
964 | rt2400pci_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON); | ||
965 | break; | ||
963 | case STATE_RADIO_RX_OFF: | 966 | case STATE_RADIO_RX_OFF: |
964 | rt2400pci_toggle_rx(rt2x00dev, state); | 967 | case STATE_RADIO_RX_OFF_LINK: |
968 | rt2400pci_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF); | ||
965 | break; | 969 | break; |
966 | case STATE_DEEP_SLEEP: | 970 | case STATE_DEEP_SLEEP: |
967 | case STATE_SLEEP: | 971 | case STATE_SLEEP: |
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c index e874fdcae204..91e87b53374f 100644 --- a/drivers/net/wireless/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/rt2x00/rt2500pci.c | |||
@@ -1112,8 +1112,12 @@ static int rt2500pci_set_device_state(struct rt2x00_dev *rt2x00dev, | |||
1112 | rt2500pci_disable_radio(rt2x00dev); | 1112 | rt2500pci_disable_radio(rt2x00dev); |
1113 | break; | 1113 | break; |
1114 | case STATE_RADIO_RX_ON: | 1114 | case STATE_RADIO_RX_ON: |
1115 | case STATE_RADIO_RX_ON_LINK: | ||
1116 | rt2500pci_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON); | ||
1117 | break; | ||
1115 | case STATE_RADIO_RX_OFF: | 1118 | case STATE_RADIO_RX_OFF: |
1116 | rt2500pci_toggle_rx(rt2x00dev, state); | 1119 | case STATE_RADIO_RX_OFF_LINK: |
1120 | rt2500pci_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF); | ||
1117 | break; | 1121 | break; |
1118 | case STATE_DEEP_SLEEP: | 1122 | case STATE_DEEP_SLEEP: |
1119 | case STATE_SLEEP: | 1123 | case STATE_SLEEP: |
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c index 4ca9730e5e92..638c3d243108 100644 --- a/drivers/net/wireless/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/rt2x00/rt2500usb.c | |||
@@ -1001,8 +1001,12 @@ static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev, | |||
1001 | rt2500usb_disable_radio(rt2x00dev); | 1001 | rt2500usb_disable_radio(rt2x00dev); |
1002 | break; | 1002 | break; |
1003 | case STATE_RADIO_RX_ON: | 1003 | case STATE_RADIO_RX_ON: |
1004 | case STATE_RADIO_RX_ON_LINK: | ||
1005 | rt2500usb_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON); | ||
1006 | break; | ||
1004 | case STATE_RADIO_RX_OFF: | 1007 | case STATE_RADIO_RX_OFF: |
1005 | rt2500usb_toggle_rx(rt2x00dev, state); | 1008 | case STATE_RADIO_RX_OFF_LINK: |
1009 | rt2500usb_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF); | ||
1006 | break; | 1010 | break; |
1007 | case STATE_DEEP_SLEEP: | 1011 | case STATE_DEEP_SLEEP: |
1008 | case STATE_SLEEP: | 1012 | case STATE_SLEEP: |
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c index 72cfe00c1ed7..07adc576db49 100644 --- a/drivers/net/wireless/rt2x00/rt2x00config.c +++ b/drivers/net/wireless/rt2x00/rt2x00config.c | |||
@@ -97,12 +97,16 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev, | |||
97 | libconf.ant.rx = rx; | 97 | libconf.ant.rx = rx; |
98 | libconf.ant.tx = tx; | 98 | libconf.ant.tx = tx; |
99 | 99 | ||
100 | if (rx == rt2x00dev->link.ant.active.rx && | ||
101 | tx == rt2x00dev->link.ant.active.tx) | ||
102 | return; | ||
103 | |||
100 | /* | 104 | /* |
101 | * Antenna setup changes require the RX to be disabled, | 105 | * Antenna setup changes require the RX to be disabled, |
102 | * else the changes will be ignored by the device. | 106 | * else the changes will be ignored by the device. |
103 | */ | 107 | */ |
104 | if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) | 108 | if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) |
105 | rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF); | 109 | rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF_LINK); |
106 | 110 | ||
107 | /* | 111 | /* |
108 | * Write new antenna setup to device and reset the link tuner. | 112 | * Write new antenna setup to device and reset the link tuner. |
@@ -116,7 +120,7 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev, | |||
116 | rt2x00dev->link.ant.active.tx = libconf.ant.tx; | 120 | rt2x00dev->link.ant.active.tx = libconf.ant.tx; |
117 | 121 | ||
118 | if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) | 122 | if (test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) |
119 | rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON); | 123 | rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON_LINK); |
120 | } | 124 | } |
121 | 125 | ||
122 | void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, | 126 | void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, |
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index c4be2ac4d7a4..0d51f478bcdf 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c | |||
@@ -61,11 +61,33 @@ EXPORT_SYMBOL_GPL(rt2x00lib_get_ring); | |||
61 | /* | 61 | /* |
62 | * Link tuning handlers | 62 | * Link tuning handlers |
63 | */ | 63 | */ |
64 | static void rt2x00lib_start_link_tuner(struct rt2x00_dev *rt2x00dev) | 64 | void rt2x00lib_reset_link_tuner(struct rt2x00_dev *rt2x00dev) |
65 | { | 65 | { |
66 | if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) | ||
67 | return; | ||
68 | |||
69 | /* | ||
70 | * Reset link information. | ||
71 | * Both the currently active vgc level as well as | ||
72 | * the link tuner counter should be reset. Resetting | ||
73 | * the counter is important for devices where the | ||
74 | * device should only perform link tuning during the | ||
75 | * first minute after being enabled. | ||
76 | */ | ||
66 | rt2x00dev->link.count = 0; | 77 | rt2x00dev->link.count = 0; |
67 | rt2x00dev->link.vgc_level = 0; | 78 | rt2x00dev->link.vgc_level = 0; |
68 | 79 | ||
80 | /* | ||
81 | * Reset the link tuner. | ||
82 | */ | ||
83 | rt2x00dev->ops->lib->reset_tuner(rt2x00dev); | ||
84 | } | ||
85 | |||
86 | static void rt2x00lib_start_link_tuner(struct rt2x00_dev *rt2x00dev) | ||
87 | { | ||
88 | /* | ||
89 | * Clear all (possibly) pre-existing quality statistics. | ||
90 | */ | ||
69 | memset(&rt2x00dev->link.qual, 0, sizeof(rt2x00dev->link.qual)); | 91 | memset(&rt2x00dev->link.qual, 0, sizeof(rt2x00dev->link.qual)); |
70 | 92 | ||
71 | /* | 93 | /* |
@@ -79,10 +101,7 @@ static void rt2x00lib_start_link_tuner(struct rt2x00_dev *rt2x00dev) | |||
79 | rt2x00dev->link.qual.rx_percentage = 50; | 101 | rt2x00dev->link.qual.rx_percentage = 50; |
80 | rt2x00dev->link.qual.tx_percentage = 50; | 102 | rt2x00dev->link.qual.tx_percentage = 50; |
81 | 103 | ||
82 | /* | 104 | rt2x00lib_reset_link_tuner(rt2x00dev); |
83 | * Reset the link tuner. | ||
84 | */ | ||
85 | rt2x00dev->ops->lib->reset_tuner(rt2x00dev); | ||
86 | 105 | ||
87 | queue_delayed_work(rt2x00dev->hw->workqueue, | 106 | queue_delayed_work(rt2x00dev->hw->workqueue, |
88 | &rt2x00dev->link.work, LINK_TUNE_INTERVAL); | 107 | &rt2x00dev->link.work, LINK_TUNE_INTERVAL); |
@@ -93,15 +112,6 @@ static void rt2x00lib_stop_link_tuner(struct rt2x00_dev *rt2x00dev) | |||
93 | cancel_delayed_work_sync(&rt2x00dev->link.work); | 112 | cancel_delayed_work_sync(&rt2x00dev->link.work); |
94 | } | 113 | } |
95 | 114 | ||
96 | void rt2x00lib_reset_link_tuner(struct rt2x00_dev *rt2x00dev) | ||
97 | { | ||
98 | if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) | ||
99 | return; | ||
100 | |||
101 | rt2x00lib_stop_link_tuner(rt2x00dev); | ||
102 | rt2x00lib_start_link_tuner(rt2x00dev); | ||
103 | } | ||
104 | |||
105 | /* | 115 | /* |
106 | * Ring initialization | 116 | * Ring initialization |
107 | */ | 117 | */ |
@@ -260,19 +270,11 @@ static void rt2x00lib_evaluate_antenna_sample(struct rt2x00_dev *rt2x00dev) | |||
260 | if (sample_a == sample_b) | 270 | if (sample_a == sample_b) |
261 | return; | 271 | return; |
262 | 272 | ||
263 | if (rt2x00dev->link.ant.flags & ANTENNA_RX_DIVERSITY) { | 273 | if (rt2x00dev->link.ant.flags & ANTENNA_RX_DIVERSITY) |
264 | if (sample_a > sample_b && rx == ANTENNA_B) | 274 | rx = (sample_a > sample_b) ? ANTENNA_A : ANTENNA_B; |
265 | rx = ANTENNA_A; | ||
266 | else if (rx == ANTENNA_A) | ||
267 | rx = ANTENNA_B; | ||
268 | } | ||
269 | 275 | ||
270 | if (rt2x00dev->link.ant.flags & ANTENNA_TX_DIVERSITY) { | 276 | if (rt2x00dev->link.ant.flags & ANTENNA_TX_DIVERSITY) |
271 | if (sample_a > sample_b && tx == ANTENNA_B) | 277 | tx = (sample_a > sample_b) ? ANTENNA_A : ANTENNA_B; |
272 | tx = ANTENNA_A; | ||
273 | else if (tx == ANTENNA_A) | ||
274 | tx = ANTENNA_B; | ||
275 | } | ||
276 | 278 | ||
277 | rt2x00lib_config_antenna(rt2x00dev, rx, tx); | 279 | rt2x00lib_config_antenna(rt2x00dev, rx, tx); |
278 | } | 280 | } |
@@ -293,7 +295,7 @@ static void rt2x00lib_evaluate_antenna_eval(struct rt2x00_dev *rt2x00dev) | |||
293 | * sample the rssi from the other antenna to make a valid | 295 | * sample the rssi from the other antenna to make a valid |
294 | * comparison between the 2 antennas. | 296 | * comparison between the 2 antennas. |
295 | */ | 297 | */ |
296 | if ((rssi_curr - rssi_old) > -5 || (rssi_curr - rssi_old) < 5) | 298 | if (abs(rssi_curr - rssi_old) < 5) |
297 | return; | 299 | return; |
298 | 300 | ||
299 | rt2x00dev->link.ant.flags |= ANTENNA_MODE_SAMPLE; | 301 | rt2x00dev->link.ant.flags |= ANTENNA_MODE_SAMPLE; |
@@ -319,15 +321,15 @@ static void rt2x00lib_evaluate_antenna(struct rt2x00_dev *rt2x00dev) | |||
319 | rt2x00dev->link.ant.flags &= ~ANTENNA_TX_DIVERSITY; | 321 | rt2x00dev->link.ant.flags &= ~ANTENNA_TX_DIVERSITY; |
320 | 322 | ||
321 | if (rt2x00dev->hw->conf.antenna_sel_rx == 0 && | 323 | if (rt2x00dev->hw->conf.antenna_sel_rx == 0 && |
322 | rt2x00dev->default_ant.rx != ANTENNA_SW_DIVERSITY) | 324 | rt2x00dev->default_ant.rx == ANTENNA_SW_DIVERSITY) |
323 | rt2x00dev->link.ant.flags |= ANTENNA_RX_DIVERSITY; | 325 | rt2x00dev->link.ant.flags |= ANTENNA_RX_DIVERSITY; |
324 | if (rt2x00dev->hw->conf.antenna_sel_tx == 0 && | 326 | if (rt2x00dev->hw->conf.antenna_sel_tx == 0 && |
325 | rt2x00dev->default_ant.tx != ANTENNA_SW_DIVERSITY) | 327 | rt2x00dev->default_ant.tx == ANTENNA_SW_DIVERSITY) |
326 | rt2x00dev->link.ant.flags |= ANTENNA_TX_DIVERSITY; | 328 | rt2x00dev->link.ant.flags |= ANTENNA_TX_DIVERSITY; |
327 | 329 | ||
328 | if (!(rt2x00dev->link.ant.flags & ANTENNA_RX_DIVERSITY) && | 330 | if (!(rt2x00dev->link.ant.flags & ANTENNA_RX_DIVERSITY) && |
329 | !(rt2x00dev->link.ant.flags & ANTENNA_TX_DIVERSITY)) { | 331 | !(rt2x00dev->link.ant.flags & ANTENNA_TX_DIVERSITY)) { |
330 | rt2x00dev->link.ant.flags &= ~ANTENNA_MODE_SAMPLE; | 332 | rt2x00dev->link.ant.flags = 0; |
331 | return; | 333 | return; |
332 | } | 334 | } |
333 | 335 | ||
@@ -441,17 +443,18 @@ static void rt2x00lib_link_tuner(struct work_struct *work) | |||
441 | rt2x00dev->ops->lib->link_tuner(rt2x00dev); | 443 | rt2x00dev->ops->lib->link_tuner(rt2x00dev); |
442 | 444 | ||
443 | /* | 445 | /* |
444 | * Evaluate antenna setup. | ||
445 | */ | ||
446 | rt2x00lib_evaluate_antenna(rt2x00dev); | ||
447 | |||
448 | /* | ||
449 | * Precalculate a portion of the link signal which is | 446 | * Precalculate a portion of the link signal which is |
450 | * in based on the tx/rx success/failure counters. | 447 | * in based on the tx/rx success/failure counters. |
451 | */ | 448 | */ |
452 | rt2x00lib_precalculate_link_signal(&rt2x00dev->link.qual); | 449 | rt2x00lib_precalculate_link_signal(&rt2x00dev->link.qual); |
453 | 450 | ||
454 | /* | 451 | /* |
452 | * Evaluate antenna setup, make this the last step since this could | ||
453 | * possibly reset some statistics. | ||
454 | */ | ||
455 | rt2x00lib_evaluate_antenna(rt2x00dev); | ||
456 | |||
457 | /* | ||
455 | * Increase tuner counter, and reschedule the next link tuner run. | 458 | * Increase tuner counter, and reschedule the next link tuner run. |
456 | */ | 459 | */ |
457 | rt2x00dev->link.count++; | 460 | rt2x00dev->link.count++; |
diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h index 838421216da0..b1915dc7dda1 100644 --- a/drivers/net/wireless/rt2x00/rt2x00reg.h +++ b/drivers/net/wireless/rt2x00/rt2x00reg.h | |||
@@ -85,6 +85,8 @@ enum dev_state { | |||
85 | STATE_RADIO_OFF, | 85 | STATE_RADIO_OFF, |
86 | STATE_RADIO_RX_ON, | 86 | STATE_RADIO_RX_ON, |
87 | STATE_RADIO_RX_OFF, | 87 | STATE_RADIO_RX_OFF, |
88 | STATE_RADIO_RX_ON_LINK, | ||
89 | STATE_RADIO_RX_OFF_LINK, | ||
88 | STATE_RADIO_IRQ_ON, | 90 | STATE_RADIO_IRQ_ON, |
89 | STATE_RADIO_IRQ_OFF, | 91 | STATE_RADIO_IRQ_OFF, |
90 | }; | 92 | }; |
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c index b31f0c26c32b..e808db98f2f5 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.c +++ b/drivers/net/wireless/rt2x00/rt61pci.c | |||
@@ -1482,8 +1482,12 @@ static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev, | |||
1482 | rt61pci_disable_radio(rt2x00dev); | 1482 | rt61pci_disable_radio(rt2x00dev); |
1483 | break; | 1483 | break; |
1484 | case STATE_RADIO_RX_ON: | 1484 | case STATE_RADIO_RX_ON: |
1485 | case STATE_RADIO_RX_ON_LINK: | ||
1486 | rt61pci_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON); | ||
1487 | break; | ||
1485 | case STATE_RADIO_RX_OFF: | 1488 | case STATE_RADIO_RX_OFF: |
1486 | rt61pci_toggle_rx(rt2x00dev, state); | 1489 | case STATE_RADIO_RX_OFF_LINK: |
1490 | rt61pci_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF); | ||
1487 | break; | 1491 | break; |
1488 | case STATE_DEEP_SLEEP: | 1492 | case STATE_DEEP_SLEEP: |
1489 | case STATE_SLEEP: | 1493 | case STATE_SLEEP: |
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c index 4d576ab3e7f9..4fac2d414d84 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.c +++ b/drivers/net/wireless/rt2x00/rt73usb.c | |||
@@ -1208,8 +1208,12 @@ static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev, | |||
1208 | rt73usb_disable_radio(rt2x00dev); | 1208 | rt73usb_disable_radio(rt2x00dev); |
1209 | break; | 1209 | break; |
1210 | case STATE_RADIO_RX_ON: | 1210 | case STATE_RADIO_RX_ON: |
1211 | case STATE_RADIO_RX_ON_LINK: | ||
1212 | rt73usb_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON); | ||
1213 | break; | ||
1211 | case STATE_RADIO_RX_OFF: | 1214 | case STATE_RADIO_RX_OFF: |
1212 | rt73usb_toggle_rx(rt2x00dev, state); | 1215 | case STATE_RADIO_RX_OFF_LINK: |
1216 | rt73usb_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF); | ||
1213 | break; | 1217 | break; |
1214 | case STATE_DEEP_SLEEP: | 1218 | case STATE_DEEP_SLEEP: |
1215 | case STATE_SLEEP: | 1219 | case STATE_SLEEP: |
diff --git a/drivers/parisc/Kconfig b/drivers/parisc/Kconfig index 1d3b84b4af3f..553a9905299a 100644 --- a/drivers/parisc/Kconfig +++ b/drivers/parisc/Kconfig | |||
@@ -103,6 +103,11 @@ config IOMMU_SBA | |||
103 | depends on PCI_LBA | 103 | depends on PCI_LBA |
104 | default PCI_LBA | 104 | default PCI_LBA |
105 | 105 | ||
106 | config IOMMU_HELPER | ||
107 | bool | ||
108 | depends on IOMMU_SBA || IOMMU_CCIO | ||
109 | default y | ||
110 | |||
106 | #config PCI_EPIC | 111 | #config PCI_EPIC |
107 | # bool "EPIC/SAGA PCI support" | 112 | # bool "EPIC/SAGA PCI support" |
108 | # depends on PCI | 113 | # depends on PCI |
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index d08b284de196..62db3c3fe4dc 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <linux/proc_fs.h> | 43 | #include <linux/proc_fs.h> |
44 | #include <linux/seq_file.h> | 44 | #include <linux/seq_file.h> |
45 | #include <linux/scatterlist.h> | 45 | #include <linux/scatterlist.h> |
46 | #include <linux/iommu-helper.h> | ||
46 | 47 | ||
47 | #include <asm/byteorder.h> | 48 | #include <asm/byteorder.h> |
48 | #include <asm/cache.h> /* for L1_CACHE_BYTES */ | 49 | #include <asm/cache.h> /* for L1_CACHE_BYTES */ |
@@ -302,13 +303,17 @@ static int ioc_count; | |||
302 | */ | 303 | */ |
303 | #define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \ | 304 | #define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \ |
304 | for(; res_ptr < res_end; ++res_ptr) { \ | 305 | for(; res_ptr < res_end; ++res_ptr) { \ |
305 | if(0 == (*res_ptr & mask)) { \ | 306 | int ret;\ |
306 | *res_ptr |= mask; \ | 307 | unsigned int idx;\ |
307 | res_idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \ | 308 | idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \ |
308 | ioc->res_hint = res_idx + (size >> 3); \ | 309 | ret = iommu_is_span_boundary(idx << 3, pages_needed, 0, boundary_size);\ |
309 | goto resource_found; \ | 310 | if ((0 == (*res_ptr & mask)) && !ret) { \ |
310 | } \ | 311 | *res_ptr |= mask; \ |
311 | } | 312 | res_idx = idx;\ |
313 | ioc->res_hint = res_idx + (size >> 3); \ | ||
314 | goto resource_found; \ | ||
315 | } \ | ||
316 | } | ||
312 | 317 | ||
313 | #define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \ | 318 | #define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \ |
314 | u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \ | 319 | u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \ |
@@ -341,10 +346,11 @@ static int ioc_count; | |||
341 | * of available pages for the requested size. | 346 | * of available pages for the requested size. |
342 | */ | 347 | */ |
343 | static int | 348 | static int |
344 | ccio_alloc_range(struct ioc *ioc, size_t size) | 349 | ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size) |
345 | { | 350 | { |
346 | unsigned int pages_needed = size >> IOVP_SHIFT; | 351 | unsigned int pages_needed = size >> IOVP_SHIFT; |
347 | unsigned int res_idx; | 352 | unsigned int res_idx; |
353 | unsigned long boundary_size; | ||
348 | #ifdef CCIO_SEARCH_TIME | 354 | #ifdef CCIO_SEARCH_TIME |
349 | unsigned long cr_start = mfctl(16); | 355 | unsigned long cr_start = mfctl(16); |
350 | #endif | 356 | #endif |
@@ -360,6 +366,9 @@ ccio_alloc_range(struct ioc *ioc, size_t size) | |||
360 | ** ggg sacrifices another 710 to the computer gods. | 366 | ** ggg sacrifices another 710 to the computer gods. |
361 | */ | 367 | */ |
362 | 368 | ||
369 | boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1, | ||
370 | 1ULL << IOVP_SHIFT) >> IOVP_SHIFT; | ||
371 | |||
363 | if (pages_needed <= 8) { | 372 | if (pages_needed <= 8) { |
364 | /* | 373 | /* |
365 | * LAN traffic will not thrash the TLB IFF the same NIC | 374 | * LAN traffic will not thrash the TLB IFF the same NIC |
@@ -760,7 +769,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size, | |||
760 | ioc->msingle_pages += size >> IOVP_SHIFT; | 769 | ioc->msingle_pages += size >> IOVP_SHIFT; |
761 | #endif | 770 | #endif |
762 | 771 | ||
763 | idx = ccio_alloc_range(ioc, size); | 772 | idx = ccio_alloc_range(ioc, dev, size); |
764 | iovp = (dma_addr_t)MKIOVP(idx); | 773 | iovp = (dma_addr_t)MKIOVP(idx); |
765 | 774 | ||
766 | pdir_start = &(ioc->pdir_base[idx]); | 775 | pdir_start = &(ioc->pdir_base[idx]); |
diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h index 97ba8286c596..a9c46cc2db37 100644 --- a/drivers/parisc/iommu-helpers.h +++ b/drivers/parisc/iommu-helpers.h | |||
@@ -96,8 +96,8 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents, | |||
96 | 96 | ||
97 | static inline unsigned int | 97 | static inline unsigned int |
98 | iommu_coalesce_chunks(struct ioc *ioc, struct device *dev, | 98 | iommu_coalesce_chunks(struct ioc *ioc, struct device *dev, |
99 | struct scatterlist *startsg, int nents, | 99 | struct scatterlist *startsg, int nents, |
100 | int (*iommu_alloc_range)(struct ioc *, size_t)) | 100 | int (*iommu_alloc_range)(struct ioc *, struct device *, size_t)) |
101 | { | 101 | { |
102 | struct scatterlist *contig_sg; /* contig chunk head */ | 102 | struct scatterlist *contig_sg; /* contig chunk head */ |
103 | unsigned long dma_offset, dma_len; /* start/len of DMA stream */ | 103 | unsigned long dma_offset, dma_len; /* start/len of DMA stream */ |
@@ -166,7 +166,7 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev, | |||
166 | dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE); | 166 | dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE); |
167 | sg_dma_address(contig_sg) = | 167 | sg_dma_address(contig_sg) = |
168 | PIDE_FLAG | 168 | PIDE_FLAG |
169 | | (iommu_alloc_range(ioc, dma_len) << IOVP_SHIFT) | 169 | | (iommu_alloc_range(ioc, dev, dma_len) << IOVP_SHIFT) |
170 | | dma_offset; | 170 | | dma_offset; |
171 | n_mappings++; | 171 | n_mappings++; |
172 | } | 172 | } |
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index d06627c3f353..bdbe780e21c5 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/string.h> | 29 | #include <linux/string.h> |
30 | #include <linux/pci.h> | 30 | #include <linux/pci.h> |
31 | #include <linux/scatterlist.h> | 31 | #include <linux/scatterlist.h> |
32 | #include <linux/iommu-helper.h> | ||
32 | 33 | ||
33 | #include <asm/byteorder.h> | 34 | #include <asm/byteorder.h> |
34 | #include <asm/io.h> | 35 | #include <asm/io.h> |
@@ -313,6 +314,12 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) | |||
313 | #define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n))) | 314 | #define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n))) |
314 | #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1) | 315 | #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1) |
315 | 316 | ||
317 | unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr, | ||
318 | unsigned int bitshiftcnt) | ||
319 | { | ||
320 | return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3) | ||
321 | + bitshiftcnt; | ||
322 | } | ||
316 | 323 | ||
317 | /** | 324 | /** |
318 | * sba_search_bitmap - find free space in IO PDIR resource bitmap | 325 | * sba_search_bitmap - find free space in IO PDIR resource bitmap |
@@ -324,19 +331,36 @@ sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) | |||
324 | * Cool perf optimization: search for log2(size) bits at a time. | 331 | * Cool perf optimization: search for log2(size) bits at a time. |
325 | */ | 332 | */ |
326 | static SBA_INLINE unsigned long | 333 | static SBA_INLINE unsigned long |
327 | sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) | 334 | sba_search_bitmap(struct ioc *ioc, struct device *dev, |
335 | unsigned long bits_wanted) | ||
328 | { | 336 | { |
329 | unsigned long *res_ptr = ioc->res_hint; | 337 | unsigned long *res_ptr = ioc->res_hint; |
330 | unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); | 338 | unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); |
331 | unsigned long pide = ~0UL; | 339 | unsigned long pide = ~0UL, tpide; |
340 | unsigned long boundary_size; | ||
341 | unsigned long shift; | ||
342 | int ret; | ||
343 | |||
344 | boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1, | ||
345 | 1ULL << IOVP_SHIFT) >> IOVP_SHIFT; | ||
346 | |||
347 | #if defined(ZX1_SUPPORT) | ||
348 | BUG_ON(ioc->ibase & ~IOVP_MASK); | ||
349 | shift = ioc->ibase >> IOVP_SHIFT; | ||
350 | #else | ||
351 | shift = 0; | ||
352 | #endif | ||
332 | 353 | ||
333 | if (bits_wanted > (BITS_PER_LONG/2)) { | 354 | if (bits_wanted > (BITS_PER_LONG/2)) { |
334 | /* Search word at a time - no mask needed */ | 355 | /* Search word at a time - no mask needed */ |
335 | for(; res_ptr < res_end; ++res_ptr) { | 356 | for(; res_ptr < res_end; ++res_ptr) { |
336 | if (*res_ptr == 0) { | 357 | tpide = ptr_to_pide(ioc, res_ptr, 0); |
358 | ret = iommu_is_span_boundary(tpide, bits_wanted, | ||
359 | shift, | ||
360 | boundary_size); | ||
361 | if ((*res_ptr == 0) && !ret) { | ||
337 | *res_ptr = RESMAP_MASK(bits_wanted); | 362 | *res_ptr = RESMAP_MASK(bits_wanted); |
338 | pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); | 363 | pide = tpide; |
339 | pide <<= 3; /* convert to bit address */ | ||
340 | break; | 364 | break; |
341 | } | 365 | } |
342 | } | 366 | } |
@@ -365,11 +389,13 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) | |||
365 | { | 389 | { |
366 | DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); | 390 | DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); |
367 | WARN_ON(mask == 0); | 391 | WARN_ON(mask == 0); |
368 | if(((*res_ptr) & mask) == 0) { | 392 | tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt); |
393 | ret = iommu_is_span_boundary(tpide, bits_wanted, | ||
394 | shift, | ||
395 | boundary_size); | ||
396 | if ((((*res_ptr) & mask) == 0) && !ret) { | ||
369 | *res_ptr |= mask; /* mark resources busy! */ | 397 | *res_ptr |= mask; /* mark resources busy! */ |
370 | pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); | 398 | pide = tpide; |
371 | pide <<= 3; /* convert to bit address */ | ||
372 | pide += bitshiftcnt; | ||
373 | break; | 399 | break; |
374 | } | 400 | } |
375 | mask >>= o; | 401 | mask >>= o; |
@@ -404,7 +430,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) | |||
404 | * resource bit map. | 430 | * resource bit map. |
405 | */ | 431 | */ |
406 | static int | 432 | static int |
407 | sba_alloc_range(struct ioc *ioc, size_t size) | 433 | sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size) |
408 | { | 434 | { |
409 | unsigned int pages_needed = size >> IOVP_SHIFT; | 435 | unsigned int pages_needed = size >> IOVP_SHIFT; |
410 | #ifdef SBA_COLLECT_STATS | 436 | #ifdef SBA_COLLECT_STATS |
@@ -412,9 +438,9 @@ sba_alloc_range(struct ioc *ioc, size_t size) | |||
412 | #endif | 438 | #endif |
413 | unsigned long pide; | 439 | unsigned long pide; |
414 | 440 | ||
415 | pide = sba_search_bitmap(ioc, pages_needed); | 441 | pide = sba_search_bitmap(ioc, dev, pages_needed); |
416 | if (pide >= (ioc->res_size << 3)) { | 442 | if (pide >= (ioc->res_size << 3)) { |
417 | pide = sba_search_bitmap(ioc, pages_needed); | 443 | pide = sba_search_bitmap(ioc, dev, pages_needed); |
418 | if (pide >= (ioc->res_size << 3)) | 444 | if (pide >= (ioc->res_size << 3)) |
419 | panic("%s: I/O MMU @ %p is out of mapping resources\n", | 445 | panic("%s: I/O MMU @ %p is out of mapping resources\n", |
420 | __FILE__, ioc->ioc_hpa); | 446 | __FILE__, ioc->ioc_hpa); |
@@ -710,7 +736,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, | |||
710 | ioc->msingle_calls++; | 736 | ioc->msingle_calls++; |
711 | ioc->msingle_pages += size >> IOVP_SHIFT; | 737 | ioc->msingle_pages += size >> IOVP_SHIFT; |
712 | #endif | 738 | #endif |
713 | pide = sba_alloc_range(ioc, size); | 739 | pide = sba_alloc_range(ioc, dev, size); |
714 | iovp = (dma_addr_t) pide << IOVP_SHIFT; | 740 | iovp = (dma_addr_t) pide << IOVP_SHIFT; |
715 | 741 | ||
716 | DBG_RUN("%s() 0x%p -> 0x%lx\n", | 742 | DBG_RUN("%s() 0x%p -> 0x%lx\n", |
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index ef5a6a245f5f..6a9403d79e0c 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c | |||
@@ -145,13 +145,15 @@ void pci_bus_add_devices(struct pci_bus *bus) | |||
145 | child_bus = dev->subordinate; | 145 | child_bus = dev->subordinate; |
146 | child_bus->dev.parent = child_bus->bridge; | 146 | child_bus->dev.parent = child_bus->bridge; |
147 | retval = device_register(&child_bus->dev); | 147 | retval = device_register(&child_bus->dev); |
148 | if (!retval) | 148 | if (retval) |
149 | dev_err(&dev->dev, "Error registering pci_bus," | ||
150 | " continuing...\n"); | ||
151 | else | ||
149 | retval = device_create_file(&child_bus->dev, | 152 | retval = device_create_file(&child_bus->dev, |
150 | &dev_attr_cpuaffinity); | 153 | &dev_attr_cpuaffinity); |
151 | if (retval) | 154 | if (retval) |
152 | dev_err(&dev->dev, "Error registering pci_bus" | 155 | dev_err(&dev->dev, "Error creating cpuaffinity" |
153 | " device bridge symlink," | 156 | " file, continuing...\n"); |
154 | " continuing...\n"); | ||
155 | } | 157 | } |
156 | } | 158 | } |
157 | } | 159 | } |
diff --git a/drivers/pci/hotplug-pci.c b/drivers/pci/hotplug-pci.c index a590ef682153..4d4a64478404 100644 --- a/drivers/pci/hotplug-pci.c +++ b/drivers/pci/hotplug-pci.c | |||
@@ -4,7 +4,7 @@ | |||
4 | #include "pci.h" | 4 | #include "pci.h" |
5 | 5 | ||
6 | 6 | ||
7 | unsigned int pci_do_scan_bus(struct pci_bus *bus) | 7 | unsigned int __devinit pci_do_scan_bus(struct pci_bus *bus) |
8 | { | 8 | { |
9 | unsigned int max; | 9 | unsigned int max; |
10 | 10 | ||
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index cf22f9e01e00..5e50008d1181 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
@@ -1085,7 +1085,7 @@ static int acpiphp_bus_trim(acpi_handle handle) | |||
1085 | * This function should be called per *physical slot*, | 1085 | * This function should be called per *physical slot*, |
1086 | * not per each slot object in ACPI namespace. | 1086 | * not per each slot object in ACPI namespace. |
1087 | */ | 1087 | */ |
1088 | static int enable_device(struct acpiphp_slot *slot) | 1088 | static int __ref enable_device(struct acpiphp_slot *slot) |
1089 | { | 1089 | { |
1090 | struct pci_dev *dev; | 1090 | struct pci_dev *dev; |
1091 | struct pci_bus *bus = slot->bridge->pci_bus; | 1091 | struct pci_bus *bus = slot->bridge->pci_bus; |
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c index 5e9be44817cb..b3515fc4cd38 100644 --- a/drivers/pci/hotplug/cpci_hotplug_pci.c +++ b/drivers/pci/hotplug/cpci_hotplug_pci.c | |||
@@ -250,7 +250,7 @@ int cpci_led_off(struct slot* slot) | |||
250 | * Device configuration functions | 250 | * Device configuration functions |
251 | */ | 251 | */ |
252 | 252 | ||
253 | int cpci_configure_slot(struct slot* slot) | 253 | int __ref cpci_configure_slot(struct slot *slot) |
254 | { | 254 | { |
255 | struct pci_bus *parent; | 255 | struct pci_bus *parent; |
256 | int fn; | 256 | int fn; |
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 6eba9b2cfb90..698975a6a21c 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
@@ -711,7 +711,8 @@ static int hpc_power_off_slot(struct slot * slot) | |||
711 | retval = pcie_write_cmd(slot, slot_cmd, cmd_mask); | 711 | retval = pcie_write_cmd(slot, slot_cmd, cmd_mask); |
712 | if (retval) { | 712 | if (retval) { |
713 | err("%s: Write command failed!\n", __FUNCTION__); | 713 | err("%s: Write command failed!\n", __FUNCTION__); |
714 | return -1; | 714 | retval = -1; |
715 | goto out; | ||
715 | } | 716 | } |
716 | dbg("%s: SLOTCTRL %x write cmd %x\n", | 717 | dbg("%s: SLOTCTRL %x write cmd %x\n", |
717 | __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd); | 718 | __FUNCTION__, ctrl->cap_base + SLOTCTRL, slot_cmd); |
@@ -722,7 +723,7 @@ static int hpc_power_off_slot(struct slot * slot) | |||
722 | * removed from the slot/adapter. | 723 | * removed from the slot/adapter. |
723 | */ | 724 | */ |
724 | msleep(1000); | 725 | msleep(1000); |
725 | 726 | out: | |
726 | if (changed) | 727 | if (changed) |
727 | pcie_unmask_bad_dllp(ctrl); | 728 | pcie_unmask_bad_dllp(ctrl); |
728 | 729 | ||
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c index dd50713966d1..9372a840b63d 100644 --- a/drivers/pci/hotplug/pciehp_pci.c +++ b/drivers/pci/hotplug/pciehp_pci.c | |||
@@ -167,7 +167,7 @@ static void program_fw_provided_values(struct pci_dev *dev) | |||
167 | } | 167 | } |
168 | } | 168 | } |
169 | 169 | ||
170 | static int pciehp_add_bridge(struct pci_dev *dev) | 170 | static int __ref pciehp_add_bridge(struct pci_dev *dev) |
171 | { | 171 | { |
172 | struct pci_bus *parent = dev->bus; | 172 | struct pci_bus *parent = dev->bus; |
173 | int pass, busnr, start = parent->secondary; | 173 | int pass, busnr, start = parent->secondary; |
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c index 0a6b25ef194c..a69a21520895 100644 --- a/drivers/pci/hotplug/shpchp_pci.c +++ b/drivers/pci/hotplug/shpchp_pci.c | |||
@@ -96,7 +96,7 @@ static void program_fw_provided_values(struct pci_dev *dev) | |||
96 | } | 96 | } |
97 | } | 97 | } |
98 | 98 | ||
99 | int shpchp_configure_device(struct slot *p_slot) | 99 | int __ref shpchp_configure_device(struct slot *p_slot) |
100 | { | 100 | { |
101 | struct pci_dev *dev; | 101 | struct pci_dev *dev; |
102 | struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; | 102 | struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 4d23b9fb551b..2db2e4bb0d1e 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -286,7 +286,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) | |||
286 | } | 286 | } |
287 | } | 287 | } |
288 | 288 | ||
289 | void pci_read_bridge_bases(struct pci_bus *child) | 289 | void __devinit pci_read_bridge_bases(struct pci_bus *child) |
290 | { | 290 | { |
291 | struct pci_dev *dev = child->self; | 291 | struct pci_dev *dev = child->self; |
292 | u8 io_base_lo, io_limit_lo; | 292 | u8 io_base_lo, io_limit_lo; |
@@ -472,7 +472,7 @@ static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max) | |||
472 | * them, we proceed to assigning numbers to the remaining buses in | 472 | * them, we proceed to assigning numbers to the remaining buses in |
473 | * order to avoid overlaps between old and new bus numbers. | 473 | * order to avoid overlaps between old and new bus numbers. |
474 | */ | 474 | */ |
475 | int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass) | 475 | int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) |
476 | { | 476 | { |
477 | struct pci_bus *child; | 477 | struct pci_bus *child; |
478 | int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); | 478 | int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); |
@@ -1008,7 +1008,7 @@ int pci_scan_slot(struct pci_bus *bus, int devfn) | |||
1008 | return nr; | 1008 | return nr; |
1009 | } | 1009 | } |
1010 | 1010 | ||
1011 | unsigned int pci_scan_child_bus(struct pci_bus *bus) | 1011 | unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus) |
1012 | { | 1012 | { |
1013 | unsigned int devfn, pass, max = bus->secondary; | 1013 | unsigned int devfn, pass, max = bus->secondary; |
1014 | struct pci_dev *dev; | 1014 | struct pci_dev *dev; |
@@ -1116,7 +1116,7 @@ err_out: | |||
1116 | return NULL; | 1116 | return NULL; |
1117 | } | 1117 | } |
1118 | 1118 | ||
1119 | struct pci_bus *pci_scan_bus_parented(struct device *parent, | 1119 | struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent, |
1120 | int bus, struct pci_ops *ops, void *sysdata) | 1120 | int bus, struct pci_ops *ops, void *sysdata) |
1121 | { | 1121 | { |
1122 | struct pci_bus *b; | 1122 | struct pci_bus *b; |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index bbad4a9f264f..e9a333d98552 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -1652,9 +1652,8 @@ static void __devinit quirk_via_cx700_pci_parking_caching(struct pci_dev *dev) | |||
1652 | pci_write_config_byte(dev, 0x75, 0x1); | 1652 | pci_write_config_byte(dev, 0x75, 0x1); |
1653 | pci_write_config_byte(dev, 0x77, 0x0); | 1653 | pci_write_config_byte(dev, 0x77, 0x0); |
1654 | 1654 | ||
1655 | printk(KERN_INFO | 1655 | dev_info(&dev->dev, |
1656 | "PCI: VIA CX700 PCI parking/caching fixup on %s\n", | 1656 | "Disabling VIA CX700 PCI parking/caching\n"); |
1657 | pci_name(dev)); | ||
1658 | } | 1657 | } |
1659 | } | 1658 | } |
1660 | } | 1659 | } |
@@ -1726,32 +1725,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2 | |||
1726 | quirk_msi_ht_cap); | 1725 | quirk_msi_ht_cap); |
1727 | 1726 | ||
1728 | 1727 | ||
1729 | /* | ||
1730 | * Force enable MSI mapping capability on HT bridges | ||
1731 | */ | ||
1732 | static void __devinit quirk_msi_ht_cap_enable(struct pci_dev *dev) | ||
1733 | { | ||
1734 | int pos, ttl = 48; | ||
1735 | |||
1736 | pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING); | ||
1737 | while (pos && ttl--) { | ||
1738 | u8 flags; | ||
1739 | |||
1740 | if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, &flags) == 0) { | ||
1741 | printk(KERN_INFO "PCI: Enabling HT MSI Mapping on %s\n", | ||
1742 | pci_name(dev)); | ||
1743 | |||
1744 | pci_write_config_byte(dev, pos + HT_MSI_FLAGS, | ||
1745 | flags | HT_MSI_FLAGS_ENABLE); | ||
1746 | } | ||
1747 | pos = pci_find_next_ht_capability(dev, pos, | ||
1748 | HT_CAPTYPE_MSI_MAPPING); | ||
1749 | } | ||
1750 | } | ||
1751 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, | ||
1752 | PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB, | ||
1753 | quirk_msi_ht_cap_enable); | ||
1754 | |||
1755 | /* The nVidia CK804 chipset may have 2 HT MSI mappings. | 1728 | /* The nVidia CK804 chipset may have 2 HT MSI mappings. |
1756 | * MSI are supported if the MSI capability set in any of these mappings. | 1729 | * MSI are supported if the MSI capability set in any of these mappings. |
1757 | */ | 1730 | */ |
@@ -1778,9 +1751,8 @@ static void __devinit quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev) | |||
1778 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, | 1751 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, |
1779 | quirk_nvidia_ck804_msi_ht_cap); | 1752 | quirk_nvidia_ck804_msi_ht_cap); |
1780 | 1753 | ||
1781 | /* | 1754 | /* Force enable MSI mapping capability on HT bridges */ |
1782 | * Force enable MSI mapping capability on HT bridges */ | 1755 | static void __devinit ht_enable_msi_mapping(struct pci_dev *dev) |
1783 | static inline void ht_enable_msi_mapping(struct pci_dev *dev) | ||
1784 | { | 1756 | { |
1785 | int pos, ttl = 48; | 1757 | int pos, ttl = 48; |
1786 | 1758 | ||
@@ -1799,6 +1771,9 @@ static inline void ht_enable_msi_mapping(struct pci_dev *dev) | |||
1799 | HT_CAPTYPE_MSI_MAPPING); | 1771 | HT_CAPTYPE_MSI_MAPPING); |
1800 | } | 1772 | } |
1801 | } | 1773 | } |
1774 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, | ||
1775 | PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB, | ||
1776 | ht_enable_msi_mapping); | ||
1802 | 1777 | ||
1803 | static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev) | 1778 | static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev) |
1804 | { | 1779 | { |
@@ -1830,7 +1805,7 @@ static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev) | |||
1830 | 1805 | ||
1831 | if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, | 1806 | if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, |
1832 | &flags) == 0) { | 1807 | &flags) == 0) { |
1833 | dev_info(&dev->dev, "Quirk disabling HT MSI mapping"); | 1808 | dev_info(&dev->dev, "Disabling HT MSI mapping"); |
1834 | pci_write_config_byte(dev, pos + HT_MSI_FLAGS, | 1809 | pci_write_config_byte(dev, pos + HT_MSI_FLAGS, |
1835 | flags & ~HT_MSI_FLAGS_ENABLE); | 1810 | flags & ~HT_MSI_FLAGS_ENABLE); |
1836 | } | 1811 | } |
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c index a98b2470b9ea..bd5c0e031398 100644 --- a/drivers/pci/rom.c +++ b/drivers/pci/rom.c | |||
@@ -242,8 +242,7 @@ void pci_remove_rom(struct pci_dev *pdev) | |||
242 | #endif /* 0 */ | 242 | #endif /* 0 */ |
243 | 243 | ||
244 | /** | 244 | /** |
245 | * pci_cleanup_rom - internal routine for freeing the ROM copy created | 245 | * pci_cleanup_rom - free the ROM copy created by pci_map_rom_copy |
246 | * by pci_map_rom_copy called from remove.c | ||
247 | * @pdev: pointer to pci device struct | 246 | * @pdev: pointer to pci device struct |
248 | * | 247 | * |
249 | * Free the copied ROM if we allocated one. | 248 | * Free the copied ROM if we allocated one. |
diff --git a/drivers/rapidio/rio-driver.c b/drivers/rapidio/rio-driver.c index 5480119ff9d3..3ce9f3defc12 100644 --- a/drivers/rapidio/rio-driver.c +++ b/drivers/rapidio/rio-driver.c | |||
@@ -78,8 +78,7 @@ void rio_dev_put(struct rio_dev *rdev) | |||
78 | } | 78 | } |
79 | 79 | ||
80 | /** | 80 | /** |
81 | * rio_device_probe - Tell if a RIO device structure has a matching RIO | 81 | * rio_device_probe - Tell if a RIO device structure has a matching RIO device id structure |
82 | * device id structure | ||
83 | * @id: the RIO device id structure to match against | 82 | * @id: the RIO device id structure to match against |
84 | * @dev: the RIO device structure to match against | 83 | * @dev: the RIO device structure to match against |
85 | * | 84 | * |
@@ -137,7 +136,7 @@ static int rio_device_remove(struct device *dev) | |||
137 | * rio_register_driver - register a new RIO driver | 136 | * rio_register_driver - register a new RIO driver |
138 | * @rdrv: the RIO driver structure to register | 137 | * @rdrv: the RIO driver structure to register |
139 | * | 138 | * |
140 | * Adds a &struct rio_driver to the list of registered drivers | 139 | * Adds a &struct rio_driver to the list of registered drivers. |
141 | * Returns a negative value on error, otherwise 0. If no error | 140 | * Returns a negative value on error, otherwise 0. If no error |
142 | * occurred, the driver remains registered even if no device | 141 | * occurred, the driver remains registered even if no device |
143 | * was claimed during registration. | 142 | * was claimed during registration. |
@@ -167,8 +166,7 @@ void rio_unregister_driver(struct rio_driver *rdrv) | |||
167 | } | 166 | } |
168 | 167 | ||
169 | /** | 168 | /** |
170 | * rio_match_bus - Tell if a RIO device structure has a matching RIO | 169 | * rio_match_bus - Tell if a RIO device structure has a matching RIO driver device id structure |
171 | * driver device id structure | ||
172 | * @dev: the standard device structure to match against | 170 | * @dev: the standard device structure to match against |
173 | * @drv: the standard driver structure containing the ids to match against | 171 | * @drv: the standard driver structure containing the ids to match against |
174 | * | 172 | * |
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 6402d699072b..82f5ad9c3af4 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig | |||
@@ -250,6 +250,15 @@ config RTC_DRV_TWL92330 | |||
250 | platforms. The support is integrated with the rest of | 250 | platforms. The support is integrated with the rest of |
251 | the Menelaus driver; it's not separate module. | 251 | the Menelaus driver; it's not separate module. |
252 | 252 | ||
253 | config RTC_DRV_S35390A | ||
254 | tristate "Seiko Instruments S-35390A" | ||
255 | help | ||
256 | If you say yes here you will get support for the Seiko | ||
257 | Instruments S-35390A. | ||
258 | |||
259 | This driver can also be built as a module. If so the module | ||
260 | will be called rtc-s35390a. | ||
261 | |||
253 | endif # I2C | 262 | endif # I2C |
254 | 263 | ||
255 | comment "SPI RTC drivers" | 264 | comment "SPI RTC drivers" |
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index ec703f34ab86..872f1218ff9f 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile | |||
@@ -45,6 +45,7 @@ obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o | |||
45 | obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o | 45 | obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o |
46 | obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o | 46 | obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o |
47 | obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o | 47 | obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o |
48 | obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o | ||
48 | obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o | 49 | obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o |
49 | obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o | 50 | obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o |
50 | obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o | 51 | obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o |
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c new file mode 100644 index 000000000000..e8abc90c32c5 --- /dev/null +++ b/drivers/rtc/rtc-s35390a.c | |||
@@ -0,0 +1,316 @@ | |||
1 | /* | ||
2 | * Seiko Instruments S-35390A RTC Driver | ||
3 | * | ||
4 | * Copyright (c) 2007 Byron Bradley | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/rtc.h> | ||
14 | #include <linux/i2c.h> | ||
15 | #include <linux/bitrev.h> | ||
16 | #include <linux/bcd.h> | ||
17 | #include <linux/slab.h> | ||
18 | |||
19 | #define S35390A_CMD_STATUS1 0 | ||
20 | #define S35390A_CMD_STATUS2 1 | ||
21 | #define S35390A_CMD_TIME1 2 | ||
22 | |||
23 | #define S35390A_BYTE_YEAR 0 | ||
24 | #define S35390A_BYTE_MONTH 1 | ||
25 | #define S35390A_BYTE_DAY 2 | ||
26 | #define S35390A_BYTE_WDAY 3 | ||
27 | #define S35390A_BYTE_HOURS 4 | ||
28 | #define S35390A_BYTE_MINS 5 | ||
29 | #define S35390A_BYTE_SECS 6 | ||
30 | |||
31 | #define S35390A_FLAG_POC 0x01 | ||
32 | #define S35390A_FLAG_BLD 0x02 | ||
33 | #define S35390A_FLAG_24H 0x40 | ||
34 | #define S35390A_FLAG_RESET 0x80 | ||
35 | #define S35390A_FLAG_TEST 0x01 | ||
36 | |||
37 | struct s35390a { | ||
38 | struct i2c_client *client[8]; | ||
39 | struct rtc_device *rtc; | ||
40 | int twentyfourhour; | ||
41 | }; | ||
42 | |||
43 | static int s35390a_set_reg(struct s35390a *s35390a, int reg, char *buf, int len) | ||
44 | { | ||
45 | struct i2c_client *client = s35390a->client[reg]; | ||
46 | struct i2c_msg msg[] = { | ||
47 | { client->addr, 0, len, buf }, | ||
48 | }; | ||
49 | |||
50 | if ((i2c_transfer(client->adapter, msg, 1)) != 1) | ||
51 | return -EIO; | ||
52 | |||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | static int s35390a_get_reg(struct s35390a *s35390a, int reg, char *buf, int len) | ||
57 | { | ||
58 | struct i2c_client *client = s35390a->client[reg]; | ||
59 | struct i2c_msg msg[] = { | ||
60 | { client->addr, I2C_M_RD, len, buf }, | ||
61 | }; | ||
62 | |||
63 | if ((i2c_transfer(client->adapter, msg, 1)) != 1) | ||
64 | return -EIO; | ||
65 | |||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static int s35390a_reset(struct s35390a *s35390a) | ||
70 | { | ||
71 | char buf[1]; | ||
72 | |||
73 | if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)) < 0) | ||
74 | return -EIO; | ||
75 | |||
76 | if (!(buf[0] & (S35390A_FLAG_POC | S35390A_FLAG_BLD))) | ||
77 | return 0; | ||
78 | |||
79 | buf[0] |= (S35390A_FLAG_RESET | S35390A_FLAG_24H); | ||
80 | buf[0] &= 0xf0; | ||
81 | return s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)); | ||
82 | } | ||
83 | |||
84 | static int s35390a_disable_test_mode(struct s35390a *s35390a) | ||
85 | { | ||
86 | char buf[1]; | ||
87 | |||
88 | if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS2, buf, sizeof(buf)) < 0) | ||
89 | return -EIO; | ||
90 | |||
91 | if (!(buf[0] & S35390A_FLAG_TEST)) | ||
92 | return 0; | ||
93 | |||
94 | buf[0] &= ~S35390A_FLAG_TEST; | ||
95 | return s35390a_set_reg(s35390a, S35390A_CMD_STATUS2, buf, sizeof(buf)); | ||
96 | } | ||
97 | |||
98 | static char s35390a_hr2reg(struct s35390a *s35390a, int hour) | ||
99 | { | ||
100 | if (s35390a->twentyfourhour) | ||
101 | return BIN2BCD(hour); | ||
102 | |||
103 | if (hour < 12) | ||
104 | return BIN2BCD(hour); | ||
105 | |||
106 | return 0x40 | BIN2BCD(hour - 12); | ||
107 | } | ||
108 | |||
109 | static int s35390a_reg2hr(struct s35390a *s35390a, char reg) | ||
110 | { | ||
111 | unsigned hour; | ||
112 | |||
113 | if (s35390a->twentyfourhour) | ||
114 | return BCD2BIN(reg & 0x3f); | ||
115 | |||
116 | hour = BCD2BIN(reg & 0x3f); | ||
117 | if (reg & 0x40) | ||
118 | hour += 12; | ||
119 | |||
120 | return hour; | ||
121 | } | ||
122 | |||
123 | static int s35390a_set_datetime(struct i2c_client *client, struct rtc_time *tm) | ||
124 | { | ||
125 | struct s35390a *s35390a = i2c_get_clientdata(client); | ||
126 | int i, err; | ||
127 | char buf[7]; | ||
128 | |||
129 | dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d mday=%d, " | ||
130 | "mon=%d, year=%d, wday=%d\n", __func__, tm->tm_sec, | ||
131 | tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon, tm->tm_year, | ||
132 | tm->tm_wday); | ||
133 | |||
134 | buf[S35390A_BYTE_YEAR] = BIN2BCD(tm->tm_year - 100); | ||
135 | buf[S35390A_BYTE_MONTH] = BIN2BCD(tm->tm_mon + 1); | ||
136 | buf[S35390A_BYTE_DAY] = BIN2BCD(tm->tm_mday); | ||
137 | buf[S35390A_BYTE_WDAY] = BIN2BCD(tm->tm_wday); | ||
138 | buf[S35390A_BYTE_HOURS] = s35390a_hr2reg(s35390a, tm->tm_hour); | ||
139 | buf[S35390A_BYTE_MINS] = BIN2BCD(tm->tm_min); | ||
140 | buf[S35390A_BYTE_SECS] = BIN2BCD(tm->tm_sec); | ||
141 | |||
142 | /* This chip expects the bits of each byte to be in reverse order */ | ||
143 | for (i = 0; i < 7; ++i) | ||
144 | buf[i] = bitrev8(buf[i]); | ||
145 | |||
146 | err = s35390a_set_reg(s35390a, S35390A_CMD_TIME1, buf, sizeof(buf)); | ||
147 | |||
148 | return err; | ||
149 | } | ||
150 | |||
151 | static int s35390a_get_datetime(struct i2c_client *client, struct rtc_time *tm) | ||
152 | { | ||
153 | struct s35390a *s35390a = i2c_get_clientdata(client); | ||
154 | char buf[7]; | ||
155 | int i, err; | ||
156 | |||
157 | err = s35390a_get_reg(s35390a, S35390A_CMD_TIME1, buf, sizeof(buf)); | ||
158 | if (err < 0) | ||
159 | return err; | ||
160 | |||
161 | /* This chip returns the bits of each byte in reverse order */ | ||
162 | for (i = 0; i < 7; ++i) | ||
163 | buf[i] = bitrev8(buf[i]); | ||
164 | |||
165 | tm->tm_sec = BCD2BIN(buf[S35390A_BYTE_SECS]); | ||
166 | tm->tm_min = BCD2BIN(buf[S35390A_BYTE_MINS]); | ||
167 | tm->tm_hour = s35390a_reg2hr(s35390a, buf[S35390A_BYTE_HOURS]); | ||
168 | tm->tm_wday = BCD2BIN(buf[S35390A_BYTE_WDAY]); | ||
169 | tm->tm_mday = BCD2BIN(buf[S35390A_BYTE_DAY]); | ||
170 | tm->tm_mon = BCD2BIN(buf[S35390A_BYTE_MONTH]) - 1; | ||
171 | tm->tm_year = BCD2BIN(buf[S35390A_BYTE_YEAR]) + 100; | ||
172 | |||
173 | dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, mday=%d, " | ||
174 | "mon=%d, year=%d, wday=%d\n", __func__, tm->tm_sec, | ||
175 | tm->tm_min, tm->tm_hour, tm->tm_mday, tm->tm_mon, tm->tm_year, | ||
176 | tm->tm_wday); | ||
177 | |||
178 | return rtc_valid_tm(tm); | ||
179 | } | ||
180 | |||
181 | static int s35390a_rtc_read_time(struct device *dev, struct rtc_time *tm) | ||
182 | { | ||
183 | return s35390a_get_datetime(to_i2c_client(dev), tm); | ||
184 | } | ||
185 | |||
186 | static int s35390a_rtc_set_time(struct device *dev, struct rtc_time *tm) | ||
187 | { | ||
188 | return s35390a_set_datetime(to_i2c_client(dev), tm); | ||
189 | } | ||
190 | |||
191 | static const struct rtc_class_ops s35390a_rtc_ops = { | ||
192 | .read_time = s35390a_rtc_read_time, | ||
193 | .set_time = s35390a_rtc_set_time, | ||
194 | }; | ||
195 | |||
196 | static struct i2c_driver s35390a_driver; | ||
197 | |||
198 | static int s35390a_probe(struct i2c_client *client) | ||
199 | { | ||
200 | int err; | ||
201 | unsigned int i; | ||
202 | struct s35390a *s35390a; | ||
203 | struct rtc_time tm; | ||
204 | char buf[1]; | ||
205 | |||
206 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { | ||
207 | err = -ENODEV; | ||
208 | goto exit; | ||
209 | } | ||
210 | |||
211 | s35390a = kzalloc(sizeof(struct s35390a), GFP_KERNEL); | ||
212 | if (!s35390a) { | ||
213 | err = -ENOMEM; | ||
214 | goto exit; | ||
215 | } | ||
216 | |||
217 | s35390a->client[0] = client; | ||
218 | i2c_set_clientdata(client, s35390a); | ||
219 | |||
220 | /* This chip uses multiple addresses, use dummy devices for them */ | ||
221 | for (i = 1; i < 8; ++i) { | ||
222 | s35390a->client[i] = i2c_new_dummy(client->adapter, | ||
223 | client->addr + i, "rtc-s35390a"); | ||
224 | if (!s35390a->client[i]) { | ||
225 | dev_err(&client->dev, "Address %02x unavailable\n", | ||
226 | client->addr + i); | ||
227 | err = -EBUSY; | ||
228 | goto exit_dummy; | ||
229 | } | ||
230 | } | ||
231 | |||
232 | err = s35390a_reset(s35390a); | ||
233 | if (err < 0) { | ||
234 | dev_err(&client->dev, "error resetting chip\n"); | ||
235 | goto exit_dummy; | ||
236 | } | ||
237 | |||
238 | err = s35390a_disable_test_mode(s35390a); | ||
239 | if (err < 0) { | ||
240 | dev_err(&client->dev, "error disabling test mode\n"); | ||
241 | goto exit_dummy; | ||
242 | } | ||
243 | |||
244 | err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)); | ||
245 | if (err < 0) { | ||
246 | dev_err(&client->dev, "error checking 12/24 hour mode\n"); | ||
247 | goto exit_dummy; | ||
248 | } | ||
249 | if (buf[0] & S35390A_FLAG_24H) | ||
250 | s35390a->twentyfourhour = 1; | ||
251 | else | ||
252 | s35390a->twentyfourhour = 0; | ||
253 | |||
254 | if (s35390a_get_datetime(client, &tm) < 0) | ||
255 | dev_warn(&client->dev, "clock needs to be set\n"); | ||
256 | |||
257 | s35390a->rtc = rtc_device_register(s35390a_driver.driver.name, | ||
258 | &client->dev, &s35390a_rtc_ops, THIS_MODULE); | ||
259 | |||
260 | if (IS_ERR(s35390a->rtc)) { | ||
261 | err = PTR_ERR(s35390a->rtc); | ||
262 | goto exit_dummy; | ||
263 | } | ||
264 | return 0; | ||
265 | |||
266 | exit_dummy: | ||
267 | for (i = 1; i < 8; ++i) | ||
268 | if (s35390a->client[i]) | ||
269 | i2c_unregister_device(s35390a->client[i]); | ||
270 | kfree(s35390a); | ||
271 | i2c_set_clientdata(client, NULL); | ||
272 | |||
273 | exit: | ||
274 | return err; | ||
275 | } | ||
276 | |||
277 | static int s35390a_remove(struct i2c_client *client) | ||
278 | { | ||
279 | unsigned int i; | ||
280 | |||
281 | struct s35390a *s35390a = i2c_get_clientdata(client); | ||
282 | for (i = 1; i < 8; ++i) | ||
283 | if (s35390a->client[i]) | ||
284 | i2c_unregister_device(s35390a->client[i]); | ||
285 | |||
286 | rtc_device_unregister(s35390a->rtc); | ||
287 | kfree(s35390a); | ||
288 | i2c_set_clientdata(client, NULL); | ||
289 | |||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | static struct i2c_driver s35390a_driver = { | ||
294 | .driver = { | ||
295 | .name = "rtc-s35390a", | ||
296 | }, | ||
297 | .probe = s35390a_probe, | ||
298 | .remove = s35390a_remove, | ||
299 | }; | ||
300 | |||
301 | static int __init s35390a_rtc_init(void) | ||
302 | { | ||
303 | return i2c_add_driver(&s35390a_driver); | ||
304 | } | ||
305 | |||
306 | static void __exit s35390a_rtc_exit(void) | ||
307 | { | ||
308 | i2c_del_driver(&s35390a_driver); | ||
309 | } | ||
310 | |||
311 | MODULE_AUTHOR("Byron Bradley <byron.bbradley@gmail.com>"); | ||
312 | MODULE_DESCRIPTION("S35390A RTC driver"); | ||
313 | MODULE_LICENSE("GPL"); | ||
314 | |||
315 | module_init(s35390a_rtc_init); | ||
316 | module_exit(s35390a_rtc_exit); | ||
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index f69714a0e9e7..b19db20a0bef 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c | |||
@@ -2310,10 +2310,8 @@ static int | |||
2310 | dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2) | 2310 | dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2) |
2311 | { | 2311 | { |
2312 | 2312 | ||
2313 | /* check failed CCW */ | 2313 | if (cqr1->startdev != cqr2->startdev) |
2314 | if (cqr1->irb.scsw.cpa != cqr2->irb.scsw.cpa) { | 2314 | return 0; |
2315 | // return 0; /* CCW doesn't match */ | ||
2316 | } | ||
2317 | 2315 | ||
2318 | if (cqr1->irb.esw.esw0.erw.cons != cqr2->irb.esw.esw0.erw.cons) | 2316 | if (cqr1->irb.esw.esw0.erw.cons != cqr2->irb.esw.esw0.erw.cons) |
2319 | return 0; | 2317 | return 0; |
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index 28a86f070048..556063e8f7a9 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c | |||
@@ -62,8 +62,10 @@ dasd_devices_show(struct seq_file *m, void *v) | |||
62 | return 0; | 62 | return 0; |
63 | if (device->block) | 63 | if (device->block) |
64 | block = device->block; | 64 | block = device->block; |
65 | else | 65 | else { |
66 | dasd_put_device(device); | ||
66 | return 0; | 67 | return 0; |
68 | } | ||
67 | /* Print device number. */ | 69 | /* Print device number. */ |
68 | seq_printf(m, "%s", device->cdev->dev.bus_id); | 70 | seq_printf(m, "%s", device->cdev->dev.bus_id); |
69 | /* Print discipline string. */ | 71 | /* Print discipline string. */ |
diff --git a/drivers/s390/char/defkeymap.c b/drivers/s390/char/defkeymap.c index 389346cda6c8..07c7f31081bc 100644 --- a/drivers/s390/char/defkeymap.c +++ b/drivers/s390/char/defkeymap.c | |||
@@ -151,8 +151,8 @@ char *func_table[MAX_NR_FUNC] = { | |||
151 | }; | 151 | }; |
152 | 152 | ||
153 | struct kbdiacruc accent_table[MAX_DIACR] = { | 153 | struct kbdiacruc accent_table[MAX_DIACR] = { |
154 | {'^', 'c', '\003'}, {'^', 'd', '\004'}, | 154 | {'^', 'c', 0003}, {'^', 'd', 0004}, |
155 | {'^', 'z', '\032'}, {'^', '\012', '\000'}, | 155 | {'^', 'z', 0032}, {'^', 0012, 0000}, |
156 | }; | 156 | }; |
157 | 157 | ||
158 | unsigned int accent_table_size = 4; | 158 | unsigned int accent_table_size = 4; |
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 92f527201792..f7b258dfd52c 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c | |||
@@ -367,7 +367,7 @@ sclp_vt220_timeout(unsigned long data) | |||
367 | sclp_vt220_emit_current(); | 367 | sclp_vt220_emit_current(); |
368 | } | 368 | } |
369 | 369 | ||
370 | #define BUFFER_MAX_DELAY HZ/2 | 370 | #define BUFFER_MAX_DELAY HZ/20 |
371 | 371 | ||
372 | /* | 372 | /* |
373 | * Internal implementation of the write function. Write COUNT bytes of data | 373 | * Internal implementation of the write function. Write COUNT bytes of data |
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index d0c6fd3b1c19..7b0b81901297 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c | |||
@@ -490,10 +490,12 @@ static int ap_device_probe(struct device *dev) | |||
490 | int rc; | 490 | int rc; |
491 | 491 | ||
492 | ap_dev->drv = ap_drv; | 492 | ap_dev->drv = ap_drv; |
493 | spin_lock_bh(&ap_device_lock); | ||
494 | list_add(&ap_dev->list, &ap_device_list); | ||
495 | spin_unlock_bh(&ap_device_lock); | ||
496 | rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; | 493 | rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; |
494 | if (!rc) { | ||
495 | spin_lock_bh(&ap_device_lock); | ||
496 | list_add(&ap_dev->list, &ap_device_list); | ||
497 | spin_unlock_bh(&ap_device_lock); | ||
498 | } | ||
497 | return rc; | 499 | return rc; |
498 | } | 500 | } |
499 | 501 | ||
@@ -532,11 +534,11 @@ static int ap_device_remove(struct device *dev) | |||
532 | 534 | ||
533 | ap_flush_queue(ap_dev); | 535 | ap_flush_queue(ap_dev); |
534 | del_timer_sync(&ap_dev->timeout); | 536 | del_timer_sync(&ap_dev->timeout); |
535 | if (ap_drv->remove) | ||
536 | ap_drv->remove(ap_dev); | ||
537 | spin_lock_bh(&ap_device_lock); | 537 | spin_lock_bh(&ap_device_lock); |
538 | list_del_init(&ap_dev->list); | 538 | list_del_init(&ap_dev->list); |
539 | spin_unlock_bh(&ap_device_lock); | 539 | spin_unlock_bh(&ap_device_lock); |
540 | if (ap_drv->remove) | ||
541 | ap_drv->remove(ap_dev); | ||
540 | spin_lock_bh(&ap_dev->lock); | 542 | spin_lock_bh(&ap_dev->lock); |
541 | atomic_sub(ap_dev->queue_count, &ap_poll_requests); | 543 | atomic_sub(ap_dev->queue_count, &ap_poll_requests); |
542 | spin_unlock_bh(&ap_dev->lock); | 544 | spin_unlock_bh(&ap_dev->lock); |
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index c3076217871e..d8a5c229c5a7 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c | |||
@@ -1851,8 +1851,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid) | |||
1851 | } | 1851 | } |
1852 | } | 1852 | } |
1853 | /* See how many write buffers are required to hold this data */ | 1853 | /* See how many write buffers are required to hold this data */ |
1854 | numBuffers= ( skb->len + privptr->p_env->write_size - 1) / | 1854 | numBuffers = DIV_ROUND_UP(skb->len, privptr->p_env->write_size); |
1855 | ( privptr->p_env->write_size); | ||
1856 | 1855 | ||
1857 | /* If that number of buffers isn't available, give up for now */ | 1856 | /* If that number of buffers isn't available, give up for now */ |
1858 | if (privptr->write_free_count < numBuffers || | 1857 | if (privptr->write_free_count < numBuffers || |
@@ -2114,8 +2113,7 @@ init_ccw_bk(struct net_device *dev) | |||
2114 | */ | 2113 | */ |
2115 | ccw_blocks_perpage= PAGE_SIZE / CCWBK_SIZE; | 2114 | ccw_blocks_perpage= PAGE_SIZE / CCWBK_SIZE; |
2116 | ccw_pages_required= | 2115 | ccw_pages_required= |
2117 | (ccw_blocks_required+ccw_blocks_perpage -1) / | 2116 | DIV_ROUND_UP(ccw_blocks_required, ccw_blocks_perpage); |
2118 | ccw_blocks_perpage; | ||
2119 | 2117 | ||
2120 | #ifdef DEBUGMSG | 2118 | #ifdef DEBUGMSG |
2121 | printk(KERN_INFO "%s: %s() > ccw_blocks_perpage=%d\n", | 2119 | printk(KERN_INFO "%s: %s() > ccw_blocks_perpage=%d\n", |
@@ -2131,30 +2129,29 @@ init_ccw_bk(struct net_device *dev) | |||
2131 | * provide good performance. With packing buffers support 32k | 2129 | * provide good performance. With packing buffers support 32k |
2132 | * buffers are used. | 2130 | * buffers are used. |
2133 | */ | 2131 | */ |
2134 | if (privptr->p_env->read_size < PAGE_SIZE) { | 2132 | if (privptr->p_env->read_size < PAGE_SIZE) { |
2135 | claw_reads_perpage= PAGE_SIZE / privptr->p_env->read_size; | 2133 | claw_reads_perpage = PAGE_SIZE / privptr->p_env->read_size; |
2136 | claw_read_pages= (privptr->p_env->read_buffers + | 2134 | claw_read_pages = DIV_ROUND_UP(privptr->p_env->read_buffers, |
2137 | claw_reads_perpage -1) / claw_reads_perpage; | 2135 | claw_reads_perpage); |
2138 | } | 2136 | } |
2139 | else { /* > or equal */ | 2137 | else { /* > or equal */ |
2140 | privptr->p_buff_pages_perread= | 2138 | privptr->p_buff_pages_perread = |
2141 | (privptr->p_env->read_size + PAGE_SIZE - 1) / PAGE_SIZE; | 2139 | DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE); |
2142 | claw_read_pages= | 2140 | claw_read_pages = privptr->p_env->read_buffers * |
2143 | privptr->p_env->read_buffers * privptr->p_buff_pages_perread; | 2141 | privptr->p_buff_pages_perread; |
2144 | } | 2142 | } |
2145 | if (privptr->p_env->write_size < PAGE_SIZE) { | 2143 | if (privptr->p_env->write_size < PAGE_SIZE) { |
2146 | claw_writes_perpage= | 2144 | claw_writes_perpage = |
2147 | PAGE_SIZE / privptr->p_env->write_size; | 2145 | PAGE_SIZE / privptr->p_env->write_size; |
2148 | claw_write_pages= | 2146 | claw_write_pages = DIV_ROUND_UP(privptr->p_env->write_buffers, |
2149 | (privptr->p_env->write_buffers + claw_writes_perpage -1) / | 2147 | claw_writes_perpage); |
2150 | claw_writes_perpage; | ||
2151 | 2148 | ||
2152 | } | 2149 | } |
2153 | else { /* > or equal */ | 2150 | else { /* > or equal */ |
2154 | privptr->p_buff_pages_perwrite= | 2151 | privptr->p_buff_pages_perwrite = |
2155 | (privptr->p_env->read_size + PAGE_SIZE - 1) / PAGE_SIZE; | 2152 | DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE); |
2156 | claw_write_pages= | 2153 | claw_write_pages = privptr->p_env->write_buffers * |
2157 | privptr->p_env->write_buffers * privptr->p_buff_pages_perwrite; | 2154 | privptr->p_buff_pages_perwrite; |
2158 | } | 2155 | } |
2159 | #ifdef DEBUGMSG | 2156 | #ifdef DEBUGMSG |
2160 | if (privptr->p_env->read_size < PAGE_SIZE) { | 2157 | if (privptr->p_env->read_size < PAGE_SIZE) { |
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h index 32f513b1b78a..eb8efdcefe48 100644 --- a/drivers/scsi/aic94xx/aic94xx.h +++ b/drivers/scsi/aic94xx/aic94xx.h | |||
@@ -102,6 +102,7 @@ int asd_abort_task_set(struct domain_device *, u8 *lun); | |||
102 | int asd_clear_aca(struct domain_device *, u8 *lun); | 102 | int asd_clear_aca(struct domain_device *, u8 *lun); |
103 | int asd_clear_task_set(struct domain_device *, u8 *lun); | 103 | int asd_clear_task_set(struct domain_device *, u8 *lun); |
104 | int asd_lu_reset(struct domain_device *, u8 *lun); | 104 | int asd_lu_reset(struct domain_device *, u8 *lun); |
105 | int asd_I_T_nexus_reset(struct domain_device *dev); | ||
105 | int asd_query_task(struct sas_task *); | 106 | int asd_query_task(struct sas_task *); |
106 | 107 | ||
107 | /* ---------- Adapter and Port management ---------- */ | 108 | /* ---------- Adapter and Port management ---------- */ |
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.h b/drivers/scsi/aic94xx/aic94xx_hwi.h index 150f6706d23f..abc757559c1a 100644 --- a/drivers/scsi/aic94xx/aic94xx_hwi.h +++ b/drivers/scsi/aic94xx/aic94xx_hwi.h | |||
@@ -140,7 +140,7 @@ struct asd_ascb { | |||
140 | 140 | ||
141 | /* internally generated command */ | 141 | /* internally generated command */ |
142 | struct timer_list timer; | 142 | struct timer_list timer; |
143 | struct completion completion; | 143 | struct completion *completion; |
144 | u8 tag_valid:1; | 144 | u8 tag_valid:1; |
145 | __be16 tag; /* error recovery only */ | 145 | __be16 tag; /* error recovery only */ |
146 | 146 | ||
@@ -294,7 +294,6 @@ static inline void asd_init_ascb(struct asd_ha_struct *asd_ha, | |||
294 | ascb->timer.function = NULL; | 294 | ascb->timer.function = NULL; |
295 | init_timer(&ascb->timer); | 295 | init_timer(&ascb->timer); |
296 | ascb->tc_index = -1; | 296 | ascb->tc_index = -1; |
297 | init_completion(&ascb->completion); | ||
298 | } | 297 | } |
299 | 298 | ||
300 | /* Must be called with the tc_index_lock held! | 299 | /* Must be called with the tc_index_lock held! |
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index 5d761eb67442..88d1e731b65e 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c | |||
@@ -1003,7 +1003,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = { | |||
1003 | .lldd_abort_task_set = asd_abort_task_set, | 1003 | .lldd_abort_task_set = asd_abort_task_set, |
1004 | .lldd_clear_aca = asd_clear_aca, | 1004 | .lldd_clear_aca = asd_clear_aca, |
1005 | .lldd_clear_task_set = asd_clear_task_set, | 1005 | .lldd_clear_task_set = asd_clear_task_set, |
1006 | .lldd_I_T_nexus_reset = NULL, | 1006 | .lldd_I_T_nexus_reset = asd_I_T_nexus_reset, |
1007 | .lldd_lu_reset = asd_lu_reset, | 1007 | .lldd_lu_reset = asd_lu_reset, |
1008 | .lldd_query_task = asd_query_task, | 1008 | .lldd_query_task = asd_query_task, |
1009 | 1009 | ||
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c index 965d4bb999d9..008df9ab92a5 100644 --- a/drivers/scsi/aic94xx/aic94xx_task.c +++ b/drivers/scsi/aic94xx/aic94xx_task.c | |||
@@ -343,11 +343,13 @@ Again: | |||
343 | task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; | 343 | task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; |
344 | task->task_state_flags |= SAS_TASK_STATE_DONE; | 344 | task->task_state_flags |= SAS_TASK_STATE_DONE; |
345 | if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) { | 345 | if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) { |
346 | struct completion *completion = ascb->completion; | ||
346 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 347 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
347 | ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x " | 348 | ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x " |
348 | "stat 0x%x but aborted by upper layer!\n", | 349 | "stat 0x%x but aborted by upper layer!\n", |
349 | task, opcode, ts->resp, ts->stat); | 350 | task, opcode, ts->resp, ts->stat); |
350 | complete(&ascb->completion); | 351 | if (completion) |
352 | complete(completion); | ||
351 | } else { | 353 | } else { |
352 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 354 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
353 | task->lldd_task = NULL; | 355 | task->lldd_task = NULL; |
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c index 144f5ad20453..b9ac8f703a1d 100644 --- a/drivers/scsi/aic94xx/aic94xx_tmf.c +++ b/drivers/scsi/aic94xx/aic94xx_tmf.c | |||
@@ -53,50 +53,64 @@ static int asd_enqueue_internal(struct asd_ascb *ascb, | |||
53 | return res; | 53 | return res; |
54 | } | 54 | } |
55 | 55 | ||
56 | static inline void asd_timedout_common(unsigned long data) | 56 | /* ---------- CLEAR NEXUS ---------- */ |
57 | { | ||
58 | struct asd_ascb *ascb = (void *) data; | ||
59 | struct asd_seq_data *seq = &ascb->ha->seq; | ||
60 | unsigned long flags; | ||
61 | 57 | ||
62 | spin_lock_irqsave(&seq->pend_q_lock, flags); | 58 | struct tasklet_completion_status { |
63 | seq->pending--; | 59 | int dl_opcode; |
64 | list_del_init(&ascb->list); | 60 | int tmf_state; |
65 | spin_unlock_irqrestore(&seq->pend_q_lock, flags); | 61 | u8 tag_valid:1; |
66 | } | 62 | __be16 tag; |
63 | }; | ||
64 | |||
65 | #define DECLARE_TCS(tcs) \ | ||
66 | struct tasklet_completion_status tcs = { \ | ||
67 | .dl_opcode = 0, \ | ||
68 | .tmf_state = 0, \ | ||
69 | .tag_valid = 0, \ | ||
70 | .tag = 0, \ | ||
71 | } | ||
67 | 72 | ||
68 | /* ---------- CLEAR NEXUS ---------- */ | ||
69 | 73 | ||
70 | static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb, | 74 | static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb, |
71 | struct done_list_struct *dl) | 75 | struct done_list_struct *dl) |
72 | { | 76 | { |
77 | struct tasklet_completion_status *tcs = ascb->uldd_task; | ||
73 | ASD_DPRINTK("%s: here\n", __FUNCTION__); | 78 | ASD_DPRINTK("%s: here\n", __FUNCTION__); |
74 | if (!del_timer(&ascb->timer)) { | 79 | if (!del_timer(&ascb->timer)) { |
75 | ASD_DPRINTK("%s: couldn't delete timer\n", __FUNCTION__); | 80 | ASD_DPRINTK("%s: couldn't delete timer\n", __FUNCTION__); |
76 | return; | 81 | return; |
77 | } | 82 | } |
78 | ASD_DPRINTK("%s: opcode: 0x%x\n", __FUNCTION__, dl->opcode); | 83 | ASD_DPRINTK("%s: opcode: 0x%x\n", __FUNCTION__, dl->opcode); |
79 | ascb->uldd_task = (void *) (unsigned long) dl->opcode; | 84 | tcs->dl_opcode = dl->opcode; |
80 | complete(&ascb->completion); | 85 | complete(ascb->completion); |
86 | asd_ascb_free(ascb); | ||
81 | } | 87 | } |
82 | 88 | ||
83 | static void asd_clear_nexus_timedout(unsigned long data) | 89 | static void asd_clear_nexus_timedout(unsigned long data) |
84 | { | 90 | { |
85 | struct asd_ascb *ascb = (void *) data; | 91 | struct asd_ascb *ascb = (void *)data; |
92 | struct tasklet_completion_status *tcs = ascb->uldd_task; | ||
86 | 93 | ||
87 | ASD_DPRINTK("%s: here\n", __FUNCTION__); | 94 | ASD_DPRINTK("%s: here\n", __FUNCTION__); |
88 | asd_timedout_common(data); | 95 | tcs->dl_opcode = TMF_RESP_FUNC_FAILED; |
89 | ascb->uldd_task = (void *) TMF_RESP_FUNC_FAILED; | 96 | complete(ascb->completion); |
90 | complete(&ascb->completion); | ||
91 | } | 97 | } |
92 | 98 | ||
93 | #define CLEAR_NEXUS_PRE \ | 99 | #define CLEAR_NEXUS_PRE \ |
100 | struct asd_ascb *ascb; \ | ||
101 | struct scb *scb; \ | ||
102 | int res; \ | ||
103 | DECLARE_COMPLETION_ONSTACK(completion); \ | ||
104 | DECLARE_TCS(tcs); \ | ||
105 | \ | ||
94 | ASD_DPRINTK("%s: PRE\n", __FUNCTION__); \ | 106 | ASD_DPRINTK("%s: PRE\n", __FUNCTION__); \ |
95 | res = 1; \ | 107 | res = 1; \ |
96 | ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \ | 108 | ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \ |
97 | if (!ascb) \ | 109 | if (!ascb) \ |
98 | return -ENOMEM; \ | 110 | return -ENOMEM; \ |
99 | \ | 111 | \ |
112 | ascb->completion = &completion; \ | ||
113 | ascb->uldd_task = &tcs; \ | ||
100 | scb = ascb->scb; \ | 114 | scb = ascb->scb; \ |
101 | scb->header.opcode = CLEAR_NEXUS | 115 | scb->header.opcode = CLEAR_NEXUS |
102 | 116 | ||
@@ -107,10 +121,11 @@ static void asd_clear_nexus_timedout(unsigned long data) | |||
107 | if (res) \ | 121 | if (res) \ |
108 | goto out_err; \ | 122 | goto out_err; \ |
109 | ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __FUNCTION__); \ | 123 | ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __FUNCTION__); \ |
110 | wait_for_completion(&ascb->completion); \ | 124 | wait_for_completion(&completion); \ |
111 | res = (int) (unsigned long) ascb->uldd_task; \ | 125 | res = tcs.dl_opcode; \ |
112 | if (res == TC_NO_ERROR) \ | 126 | if (res == TC_NO_ERROR) \ |
113 | res = TMF_RESP_FUNC_COMPLETE; \ | 127 | res = TMF_RESP_FUNC_COMPLETE; \ |
128 | return res; \ | ||
114 | out_err: \ | 129 | out_err: \ |
115 | asd_ascb_free(ascb); \ | 130 | asd_ascb_free(ascb); \ |
116 | return res | 131 | return res |
@@ -118,9 +133,6 @@ out_err: \ | |||
118 | int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha) | 133 | int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha) |
119 | { | 134 | { |
120 | struct asd_ha_struct *asd_ha = sas_ha->lldd_ha; | 135 | struct asd_ha_struct *asd_ha = sas_ha->lldd_ha; |
121 | struct asd_ascb *ascb; | ||
122 | struct scb *scb; | ||
123 | int res; | ||
124 | 136 | ||
125 | CLEAR_NEXUS_PRE; | 137 | CLEAR_NEXUS_PRE; |
126 | scb->clear_nexus.nexus = NEXUS_ADAPTER; | 138 | scb->clear_nexus.nexus = NEXUS_ADAPTER; |
@@ -130,9 +142,6 @@ int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha) | |||
130 | int asd_clear_nexus_port(struct asd_sas_port *port) | 142 | int asd_clear_nexus_port(struct asd_sas_port *port) |
131 | { | 143 | { |
132 | struct asd_ha_struct *asd_ha = port->ha->lldd_ha; | 144 | struct asd_ha_struct *asd_ha = port->ha->lldd_ha; |
133 | struct asd_ascb *ascb; | ||
134 | struct scb *scb; | ||
135 | int res; | ||
136 | 145 | ||
137 | CLEAR_NEXUS_PRE; | 146 | CLEAR_NEXUS_PRE; |
138 | scb->clear_nexus.nexus = NEXUS_PORT; | 147 | scb->clear_nexus.nexus = NEXUS_PORT; |
@@ -140,29 +149,73 @@ int asd_clear_nexus_port(struct asd_sas_port *port) | |||
140 | CLEAR_NEXUS_POST; | 149 | CLEAR_NEXUS_POST; |
141 | } | 150 | } |
142 | 151 | ||
143 | #if 0 | 152 | enum clear_nexus_phase { |
144 | static int asd_clear_nexus_I_T(struct domain_device *dev) | 153 | NEXUS_PHASE_PRE, |
154 | NEXUS_PHASE_POST, | ||
155 | NEXUS_PHASE_RESUME, | ||
156 | }; | ||
157 | |||
158 | static int asd_clear_nexus_I_T(struct domain_device *dev, | ||
159 | enum clear_nexus_phase phase) | ||
145 | { | 160 | { |
146 | struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; | 161 | struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; |
147 | struct asd_ascb *ascb; | ||
148 | struct scb *scb; | ||
149 | int res; | ||
150 | 162 | ||
151 | CLEAR_NEXUS_PRE; | 163 | CLEAR_NEXUS_PRE; |
152 | scb->clear_nexus.nexus = NEXUS_I_T; | 164 | scb->clear_nexus.nexus = NEXUS_I_T; |
153 | scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ; | 165 | switch (phase) { |
166 | case NEXUS_PHASE_PRE: | ||
167 | scb->clear_nexus.flags = EXEC_Q | SUSPEND_TX; | ||
168 | break; | ||
169 | case NEXUS_PHASE_POST: | ||
170 | scb->clear_nexus.flags = SEND_Q | NOTINQ; | ||
171 | break; | ||
172 | case NEXUS_PHASE_RESUME: | ||
173 | scb->clear_nexus.flags = RESUME_TX; | ||
174 | } | ||
154 | scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) | 175 | scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) |
155 | dev->lldd_dev); | 176 | dev->lldd_dev); |
156 | CLEAR_NEXUS_POST; | 177 | CLEAR_NEXUS_POST; |
157 | } | 178 | } |
158 | #endif | 179 | |
180 | int asd_I_T_nexus_reset(struct domain_device *dev) | ||
181 | { | ||
182 | int res, tmp_res, i; | ||
183 | struct sas_phy *phy = sas_find_local_phy(dev); | ||
184 | /* Standard mandates link reset for ATA (type 0) and | ||
185 | * hard reset for SSP (type 1) */ | ||
186 | int reset_type = (dev->dev_type == SATA_DEV || | ||
187 | (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; | ||
188 | |||
189 | asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE); | ||
190 | /* send a hard reset */ | ||
191 | ASD_DPRINTK("sending %s reset to %s\n", | ||
192 | reset_type ? "hard" : "soft", phy->dev.bus_id); | ||
193 | res = sas_phy_reset(phy, reset_type); | ||
194 | if (res == TMF_RESP_FUNC_COMPLETE) { | ||
195 | /* wait for the maximum settle time */ | ||
196 | msleep(500); | ||
197 | /* clear all outstanding commands (keep nexus suspended) */ | ||
198 | asd_clear_nexus_I_T(dev, NEXUS_PHASE_POST); | ||
199 | } | ||
200 | for (i = 0 ; i < 3; i++) { | ||
201 | tmp_res = asd_clear_nexus_I_T(dev, NEXUS_PHASE_RESUME); | ||
202 | if (tmp_res == TC_RESUME) | ||
203 | return res; | ||
204 | msleep(500); | ||
205 | } | ||
206 | |||
207 | /* This is a bit of a problem: the sequencer is still suspended | ||
208 | * and is refusing to resume. Hope it will resume on a bigger hammer | ||
209 | * or the disk is lost */ | ||
210 | dev_printk(KERN_ERR, &phy->dev, | ||
211 | "Failed to resume nexus after reset 0x%x\n", tmp_res); | ||
212 | |||
213 | return TMF_RESP_FUNC_FAILED; | ||
214 | } | ||
159 | 215 | ||
160 | static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun) | 216 | static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun) |
161 | { | 217 | { |
162 | struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; | 218 | struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; |
163 | struct asd_ascb *ascb; | ||
164 | struct scb *scb; | ||
165 | int res; | ||
166 | 219 | ||
167 | CLEAR_NEXUS_PRE; | 220 | CLEAR_NEXUS_PRE; |
168 | scb->clear_nexus.nexus = NEXUS_I_T_L; | 221 | scb->clear_nexus.nexus = NEXUS_I_T_L; |
@@ -177,9 +230,6 @@ static int asd_clear_nexus_tag(struct sas_task *task) | |||
177 | { | 230 | { |
178 | struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; | 231 | struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; |
179 | struct asd_ascb *tascb = task->lldd_task; | 232 | struct asd_ascb *tascb = task->lldd_task; |
180 | struct asd_ascb *ascb; | ||
181 | struct scb *scb; | ||
182 | int res; | ||
183 | 233 | ||
184 | CLEAR_NEXUS_PRE; | 234 | CLEAR_NEXUS_PRE; |
185 | scb->clear_nexus.nexus = NEXUS_TAG; | 235 | scb->clear_nexus.nexus = NEXUS_TAG; |
@@ -195,9 +245,6 @@ static int asd_clear_nexus_index(struct sas_task *task) | |||
195 | { | 245 | { |
196 | struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; | 246 | struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; |
197 | struct asd_ascb *tascb = task->lldd_task; | 247 | struct asd_ascb *tascb = task->lldd_task; |
198 | struct asd_ascb *ascb; | ||
199 | struct scb *scb; | ||
200 | int res; | ||
201 | 248 | ||
202 | CLEAR_NEXUS_PRE; | 249 | CLEAR_NEXUS_PRE; |
203 | scb->clear_nexus.nexus = NEXUS_TRANS_CX; | 250 | scb->clear_nexus.nexus = NEXUS_TRANS_CX; |
@@ -213,11 +260,11 @@ static int asd_clear_nexus_index(struct sas_task *task) | |||
213 | static void asd_tmf_timedout(unsigned long data) | 260 | static void asd_tmf_timedout(unsigned long data) |
214 | { | 261 | { |
215 | struct asd_ascb *ascb = (void *) data; | 262 | struct asd_ascb *ascb = (void *) data; |
263 | struct tasklet_completion_status *tcs = ascb->uldd_task; | ||
216 | 264 | ||
217 | ASD_DPRINTK("tmf timed out\n"); | 265 | ASD_DPRINTK("tmf timed out\n"); |
218 | asd_timedout_common(data); | 266 | tcs->tmf_state = TMF_RESP_FUNC_FAILED; |
219 | ascb->uldd_task = (void *) TMF_RESP_FUNC_FAILED; | 267 | complete(ascb->completion); |
220 | complete(&ascb->completion); | ||
221 | } | 268 | } |
222 | 269 | ||
223 | static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb, | 270 | static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb, |
@@ -269,18 +316,24 @@ static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb, | |||
269 | static void asd_tmf_tasklet_complete(struct asd_ascb *ascb, | 316 | static void asd_tmf_tasklet_complete(struct asd_ascb *ascb, |
270 | struct done_list_struct *dl) | 317 | struct done_list_struct *dl) |
271 | { | 318 | { |
319 | struct tasklet_completion_status *tcs; | ||
320 | |||
272 | if (!del_timer(&ascb->timer)) | 321 | if (!del_timer(&ascb->timer)) |
273 | return; | 322 | return; |
274 | 323 | ||
324 | tcs = ascb->uldd_task; | ||
275 | ASD_DPRINTK("tmf tasklet complete\n"); | 325 | ASD_DPRINTK("tmf tasklet complete\n"); |
276 | 326 | ||
277 | if (dl->opcode == TC_SSP_RESP) | 327 | tcs->dl_opcode = dl->opcode; |
278 | ascb->uldd_task = (void *) (unsigned long) | 328 | |
279 | asd_get_tmf_resp_tasklet(ascb, dl); | 329 | if (dl->opcode == TC_SSP_RESP) { |
280 | else | 330 | tcs->tmf_state = asd_get_tmf_resp_tasklet(ascb, dl); |
281 | ascb->uldd_task = (void *) 0xFF00 + (unsigned long) dl->opcode; | 331 | tcs->tag_valid = ascb->tag_valid; |
332 | tcs->tag = ascb->tag; | ||
333 | } | ||
282 | 334 | ||
283 | complete(&ascb->completion); | 335 | complete(ascb->completion); |
336 | asd_ascb_free(ascb); | ||
284 | } | 337 | } |
285 | 338 | ||
286 | static inline int asd_clear_nexus(struct sas_task *task) | 339 | static inline int asd_clear_nexus(struct sas_task *task) |
@@ -288,15 +341,19 @@ static inline int asd_clear_nexus(struct sas_task *task) | |||
288 | int res = TMF_RESP_FUNC_FAILED; | 341 | int res = TMF_RESP_FUNC_FAILED; |
289 | int leftover; | 342 | int leftover; |
290 | struct asd_ascb *tascb = task->lldd_task; | 343 | struct asd_ascb *tascb = task->lldd_task; |
344 | DECLARE_COMPLETION_ONSTACK(completion); | ||
291 | unsigned long flags; | 345 | unsigned long flags; |
292 | 346 | ||
347 | tascb->completion = &completion; | ||
348 | |||
293 | ASD_DPRINTK("task not done, clearing nexus\n"); | 349 | ASD_DPRINTK("task not done, clearing nexus\n"); |
294 | if (tascb->tag_valid) | 350 | if (tascb->tag_valid) |
295 | res = asd_clear_nexus_tag(task); | 351 | res = asd_clear_nexus_tag(task); |
296 | else | 352 | else |
297 | res = asd_clear_nexus_index(task); | 353 | res = asd_clear_nexus_index(task); |
298 | leftover = wait_for_completion_timeout(&tascb->completion, | 354 | leftover = wait_for_completion_timeout(&completion, |
299 | AIC94XX_SCB_TIMEOUT); | 355 | AIC94XX_SCB_TIMEOUT); |
356 | tascb->completion = NULL; | ||
300 | ASD_DPRINTK("came back from clear nexus\n"); | 357 | ASD_DPRINTK("came back from clear nexus\n"); |
301 | spin_lock_irqsave(&task->task_state_lock, flags); | 358 | spin_lock_irqsave(&task->task_state_lock, flags); |
302 | if (leftover < 1) | 359 | if (leftover < 1) |
@@ -350,6 +407,11 @@ int asd_abort_task(struct sas_task *task) | |||
350 | struct asd_ascb *ascb = NULL; | 407 | struct asd_ascb *ascb = NULL; |
351 | struct scb *scb; | 408 | struct scb *scb; |
352 | int leftover; | 409 | int leftover; |
410 | DECLARE_TCS(tcs); | ||
411 | DECLARE_COMPLETION_ONSTACK(completion); | ||
412 | DECLARE_COMPLETION_ONSTACK(tascb_completion); | ||
413 | |||
414 | tascb->completion = &tascb_completion; | ||
353 | 415 | ||
354 | spin_lock_irqsave(&task->task_state_lock, flags); | 416 | spin_lock_irqsave(&task->task_state_lock, flags); |
355 | if (task->task_state_flags & SAS_TASK_STATE_DONE) { | 417 | if (task->task_state_flags & SAS_TASK_STATE_DONE) { |
@@ -363,8 +425,10 @@ int asd_abort_task(struct sas_task *task) | |||
363 | ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); | 425 | ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); |
364 | if (!ascb) | 426 | if (!ascb) |
365 | return -ENOMEM; | 427 | return -ENOMEM; |
366 | scb = ascb->scb; | ||
367 | 428 | ||
429 | ascb->uldd_task = &tcs; | ||
430 | ascb->completion = &completion; | ||
431 | scb = ascb->scb; | ||
368 | scb->header.opcode = SCB_ABORT_TASK; | 432 | scb->header.opcode = SCB_ABORT_TASK; |
369 | 433 | ||
370 | switch (task->task_proto) { | 434 | switch (task->task_proto) { |
@@ -406,13 +470,12 @@ int asd_abort_task(struct sas_task *task) | |||
406 | res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete, | 470 | res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete, |
407 | asd_tmf_timedout); | 471 | asd_tmf_timedout); |
408 | if (res) | 472 | if (res) |
409 | goto out; | 473 | goto out_free; |
410 | wait_for_completion(&ascb->completion); | 474 | wait_for_completion(&completion); |
411 | ASD_DPRINTK("tmf came back\n"); | 475 | ASD_DPRINTK("tmf came back\n"); |
412 | 476 | ||
413 | res = (int) (unsigned long) ascb->uldd_task; | 477 | tascb->tag = tcs.tag; |
414 | tascb->tag = ascb->tag; | 478 | tascb->tag_valid = tcs.tag_valid; |
415 | tascb->tag_valid = ascb->tag_valid; | ||
416 | 479 | ||
417 | spin_lock_irqsave(&task->task_state_lock, flags); | 480 | spin_lock_irqsave(&task->task_state_lock, flags); |
418 | if (task->task_state_flags & SAS_TASK_STATE_DONE) { | 481 | if (task->task_state_flags & SAS_TASK_STATE_DONE) { |
@@ -423,63 +486,68 @@ int asd_abort_task(struct sas_task *task) | |||
423 | } | 486 | } |
424 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 487 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
425 | 488 | ||
426 | switch (res) { | 489 | if (tcs.dl_opcode == TC_SSP_RESP) { |
427 | /* The task to be aborted has been sent to the device. | 490 | /* The task to be aborted has been sent to the device. |
428 | * We got a Response IU for the ABORT TASK TMF. */ | 491 | * We got a Response IU for the ABORT TASK TMF. */ |
429 | case TC_NO_ERROR + 0xFF00: | 492 | if (tcs.tmf_state == TMF_RESP_FUNC_COMPLETE) |
430 | case TMF_RESP_FUNC_COMPLETE: | 493 | res = asd_clear_nexus(task); |
431 | case TMF_RESP_FUNC_FAILED: | 494 | else |
432 | res = asd_clear_nexus(task); | 495 | res = tcs.tmf_state; |
433 | break; | 496 | } else if (tcs.dl_opcode == TC_NO_ERROR && |
434 | case TMF_RESP_INVALID_FRAME: | 497 | tcs.tmf_state == TMF_RESP_FUNC_FAILED) { |
435 | case TMF_RESP_OVERLAPPED_TAG: | 498 | /* timeout */ |
436 | case TMF_RESP_FUNC_ESUPP: | ||
437 | case TMF_RESP_NO_LUN: | ||
438 | goto out_done; break; | ||
439 | } | ||
440 | /* In the following we assume that the managing layer | ||
441 | * will _never_ make a mistake, when issuing ABORT TASK. | ||
442 | */ | ||
443 | switch (res) { | ||
444 | default: | ||
445 | res = asd_clear_nexus(task); | ||
446 | /* fallthrough */ | ||
447 | case TC_NO_ERROR + 0xFF00: | ||
448 | case TMF_RESP_FUNC_COMPLETE: | ||
449 | break; | ||
450 | /* The task hasn't been sent to the device xor we never got | ||
451 | * a (sane) Response IU for the ABORT TASK TMF. | ||
452 | */ | ||
453 | case TF_NAK_RECV + 0xFF00: | ||
454 | res = TMF_RESP_INVALID_FRAME; | ||
455 | break; | ||
456 | case TF_TMF_TASK_DONE + 0xFF00: /* done but not reported yet */ | ||
457 | res = TMF_RESP_FUNC_FAILED; | 499 | res = TMF_RESP_FUNC_FAILED; |
458 | leftover = wait_for_completion_timeout(&tascb->completion, | 500 | } else { |
459 | AIC94XX_SCB_TIMEOUT); | 501 | /* In the following we assume that the managing layer |
460 | spin_lock_irqsave(&task->task_state_lock, flags); | 502 | * will _never_ make a mistake, when issuing ABORT |
461 | if (leftover < 1) | 503 | * TASK. |
504 | */ | ||
505 | switch (tcs.dl_opcode) { | ||
506 | default: | ||
507 | res = asd_clear_nexus(task); | ||
508 | /* fallthrough */ | ||
509 | case TC_NO_ERROR: | ||
510 | break; | ||
511 | /* The task hasn't been sent to the device xor | ||
512 | * we never got a (sane) Response IU for the | ||
513 | * ABORT TASK TMF. | ||
514 | */ | ||
515 | case TF_NAK_RECV: | ||
516 | res = TMF_RESP_INVALID_FRAME; | ||
517 | break; | ||
518 | case TF_TMF_TASK_DONE: /* done but not reported yet */ | ||
462 | res = TMF_RESP_FUNC_FAILED; | 519 | res = TMF_RESP_FUNC_FAILED; |
463 | if (task->task_state_flags & SAS_TASK_STATE_DONE) | 520 | leftover = |
521 | wait_for_completion_timeout(&tascb_completion, | ||
522 | AIC94XX_SCB_TIMEOUT); | ||
523 | spin_lock_irqsave(&task->task_state_lock, flags); | ||
524 | if (leftover < 1) | ||
525 | res = TMF_RESP_FUNC_FAILED; | ||
526 | if (task->task_state_flags & SAS_TASK_STATE_DONE) | ||
527 | res = TMF_RESP_FUNC_COMPLETE; | ||
528 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
529 | break; | ||
530 | case TF_TMF_NO_TAG: | ||
531 | case TF_TMF_TAG_FREE: /* the tag is in the free list */ | ||
532 | case TF_TMF_NO_CONN_HANDLE: /* no such device */ | ||
464 | res = TMF_RESP_FUNC_COMPLETE; | 533 | res = TMF_RESP_FUNC_COMPLETE; |
465 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 534 | break; |
466 | goto out_done; | 535 | case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */ |
467 | case TF_TMF_NO_TAG + 0xFF00: | 536 | res = TMF_RESP_FUNC_ESUPP; |
468 | case TF_TMF_TAG_FREE + 0xFF00: /* the tag is in the free list */ | 537 | break; |
469 | case TF_TMF_NO_CONN_HANDLE + 0xFF00: /* no such device */ | 538 | } |
470 | res = TMF_RESP_FUNC_COMPLETE; | ||
471 | goto out_done; | ||
472 | case TF_TMF_NO_CTX + 0xFF00: /* not in seq, or proto != SSP */ | ||
473 | res = TMF_RESP_FUNC_ESUPP; | ||
474 | goto out; | ||
475 | } | 539 | } |
476 | out_done: | 540 | out_done: |
541 | tascb->completion = NULL; | ||
477 | if (res == TMF_RESP_FUNC_COMPLETE) { | 542 | if (res == TMF_RESP_FUNC_COMPLETE) { |
478 | task->lldd_task = NULL; | 543 | task->lldd_task = NULL; |
479 | mb(); | 544 | mb(); |
480 | asd_ascb_free(tascb); | 545 | asd_ascb_free(tascb); |
481 | } | 546 | } |
482 | out: | 547 | ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res); |
548 | return res; | ||
549 | |||
550 | out_free: | ||
483 | asd_ascb_free(ascb); | 551 | asd_ascb_free(ascb); |
484 | ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res); | 552 | ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res); |
485 | return res; | 553 | return res; |
@@ -507,6 +575,8 @@ static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun, | |||
507 | struct asd_ascb *ascb; | 575 | struct asd_ascb *ascb; |
508 | int res = 1; | 576 | int res = 1; |
509 | struct scb *scb; | 577 | struct scb *scb; |
578 | DECLARE_COMPLETION_ONSTACK(completion); | ||
579 | DECLARE_TCS(tcs); | ||
510 | 580 | ||
511 | if (!(dev->tproto & SAS_PROTOCOL_SSP)) | 581 | if (!(dev->tproto & SAS_PROTOCOL_SSP)) |
512 | return TMF_RESP_FUNC_ESUPP; | 582 | return TMF_RESP_FUNC_ESUPP; |
@@ -514,6 +584,9 @@ static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun, | |||
514 | ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); | 584 | ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); |
515 | if (!ascb) | 585 | if (!ascb) |
516 | return -ENOMEM; | 586 | return -ENOMEM; |
587 | |||
588 | ascb->completion = &completion; | ||
589 | ascb->uldd_task = &tcs; | ||
517 | scb = ascb->scb; | 590 | scb = ascb->scb; |
518 | 591 | ||
519 | if (tmf == TMF_QUERY_TASK) | 592 | if (tmf == TMF_QUERY_TASK) |
@@ -546,31 +619,32 @@ static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun, | |||
546 | asd_tmf_timedout); | 619 | asd_tmf_timedout); |
547 | if (res) | 620 | if (res) |
548 | goto out_err; | 621 | goto out_err; |
549 | wait_for_completion(&ascb->completion); | 622 | wait_for_completion(&completion); |
550 | res = (int) (unsigned long) ascb->uldd_task; | ||
551 | 623 | ||
552 | switch (res) { | 624 | switch (tcs.dl_opcode) { |
553 | case TC_NO_ERROR + 0xFF00: | 625 | case TC_NO_ERROR: |
554 | res = TMF_RESP_FUNC_COMPLETE; | 626 | res = TMF_RESP_FUNC_COMPLETE; |
555 | break; | 627 | break; |
556 | case TF_NAK_RECV + 0xFF00: | 628 | case TF_NAK_RECV: |
557 | res = TMF_RESP_INVALID_FRAME; | 629 | res = TMF_RESP_INVALID_FRAME; |
558 | break; | 630 | break; |
559 | case TF_TMF_TASK_DONE + 0xFF00: | 631 | case TF_TMF_TASK_DONE: |
560 | res = TMF_RESP_FUNC_FAILED; | 632 | res = TMF_RESP_FUNC_FAILED; |
561 | break; | 633 | break; |
562 | case TF_TMF_NO_TAG + 0xFF00: | 634 | case TF_TMF_NO_TAG: |
563 | case TF_TMF_TAG_FREE + 0xFF00: /* the tag is in the free list */ | 635 | case TF_TMF_TAG_FREE: /* the tag is in the free list */ |
564 | case TF_TMF_NO_CONN_HANDLE + 0xFF00: /* no such device */ | 636 | case TF_TMF_NO_CONN_HANDLE: /* no such device */ |
565 | res = TMF_RESP_FUNC_COMPLETE; | 637 | res = TMF_RESP_FUNC_COMPLETE; |
566 | break; | 638 | break; |
567 | case TF_TMF_NO_CTX + 0xFF00: /* not in seq, or proto != SSP */ | 639 | case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */ |
568 | res = TMF_RESP_FUNC_ESUPP; | 640 | res = TMF_RESP_FUNC_ESUPP; |
569 | break; | 641 | break; |
570 | default: | 642 | default: |
571 | /* Allow TMF response codes to propagate upwards */ | 643 | /* Allow TMF response codes to propagate upwards */ |
644 | res = tcs.dl_opcode; | ||
572 | break; | 645 | break; |
573 | } | 646 | } |
647 | return res; | ||
574 | out_err: | 648 | out_err: |
575 | asd_ascb_free(ascb); | 649 | asd_ascb_free(ascb); |
576 | return res; | 650 | return res; |
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h index 57786502e3ec..0393707bdfce 100644 --- a/drivers/scsi/arcmsr/arcmsr.h +++ b/drivers/scsi/arcmsr/arcmsr.h | |||
@@ -48,7 +48,7 @@ struct class_device_attribute; | |||
48 | /*The limit of outstanding scsi command that firmware can handle*/ | 48 | /*The limit of outstanding scsi command that firmware can handle*/ |
49 | #define ARCMSR_MAX_OUTSTANDING_CMD 256 | 49 | #define ARCMSR_MAX_OUTSTANDING_CMD 256 |
50 | #define ARCMSR_MAX_FREECCB_NUM 320 | 50 | #define ARCMSR_MAX_FREECCB_NUM 320 |
51 | #define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2007/12/24" | 51 | #define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2008/02/27" |
52 | #define ARCMSR_SCSI_INITIATOR_ID 255 | 52 | #define ARCMSR_SCSI_INITIATOR_ID 255 |
53 | #define ARCMSR_MAX_XFER_SECTORS 512 | 53 | #define ARCMSR_MAX_XFER_SECTORS 512 |
54 | #define ARCMSR_MAX_XFER_SECTORS_B 4096 | 54 | #define ARCMSR_MAX_XFER_SECTORS_B 4096 |
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index 6d67f5c0eb8e..27ebd336409b 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c | |||
@@ -160,7 +160,7 @@ static void gdth_readapp_event(gdth_ha_str *ha, unchar application, | |||
160 | static void gdth_clear_events(void); | 160 | static void gdth_clear_events(void); |
161 | 161 | ||
162 | static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, | 162 | static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, |
163 | char *buffer, ushort count, int to_buffer); | 163 | char *buffer, ushort count); |
164 | static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp); | 164 | static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp); |
165 | static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive); | 165 | static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive); |
166 | 166 | ||
@@ -182,7 +182,6 @@ static int gdth_ioctl(struct inode *inode, struct file *filep, | |||
182 | unsigned int cmd, unsigned long arg); | 182 | unsigned int cmd, unsigned long arg); |
183 | 183 | ||
184 | static void gdth_flush(gdth_ha_str *ha); | 184 | static void gdth_flush(gdth_ha_str *ha); |
185 | static int gdth_halt(struct notifier_block *nb, ulong event, void *buf); | ||
186 | static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *)); | 185 | static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *)); |
187 | static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp, | 186 | static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp, |
188 | struct gdth_cmndinfo *cmndinfo); | 187 | struct gdth_cmndinfo *cmndinfo); |
@@ -417,12 +416,6 @@ static inline void gdth_set_sglist(struct scsi_cmnd *cmd, | |||
417 | #include "gdth_proc.h" | 416 | #include "gdth_proc.h" |
418 | #include "gdth_proc.c" | 417 | #include "gdth_proc.c" |
419 | 418 | ||
420 | /* notifier block to get a notify on system shutdown/halt/reboot */ | ||
421 | static struct notifier_block gdth_notifier = { | ||
422 | gdth_halt, NULL, 0 | ||
423 | }; | ||
424 | static int notifier_disabled = 0; | ||
425 | |||
426 | static gdth_ha_str *gdth_find_ha(int hanum) | 419 | static gdth_ha_str *gdth_find_ha(int hanum) |
427 | { | 420 | { |
428 | gdth_ha_str *ha; | 421 | gdth_ha_str *ha; |
@@ -445,8 +438,8 @@ static struct gdth_cmndinfo *gdth_get_cmndinfo(gdth_ha_str *ha) | |||
445 | for (i=0; i<GDTH_MAXCMDS; ++i) { | 438 | for (i=0; i<GDTH_MAXCMDS; ++i) { |
446 | if (ha->cmndinfo[i].index == 0) { | 439 | if (ha->cmndinfo[i].index == 0) { |
447 | priv = &ha->cmndinfo[i]; | 440 | priv = &ha->cmndinfo[i]; |
448 | priv->index = i+1; | ||
449 | memset(priv, 0, sizeof(*priv)); | 441 | memset(priv, 0, sizeof(*priv)); |
442 | priv->index = i+1; | ||
450 | break; | 443 | break; |
451 | } | 444 | } |
452 | } | 445 | } |
@@ -493,7 +486,6 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd, | |||
493 | gdth_ha_str *ha = shost_priv(sdev->host); | 486 | gdth_ha_str *ha = shost_priv(sdev->host); |
494 | Scsi_Cmnd *scp; | 487 | Scsi_Cmnd *scp; |
495 | struct gdth_cmndinfo cmndinfo; | 488 | struct gdth_cmndinfo cmndinfo; |
496 | struct scatterlist one_sg; | ||
497 | DECLARE_COMPLETION_ONSTACK(wait); | 489 | DECLARE_COMPLETION_ONSTACK(wait); |
498 | int rval; | 490 | int rval; |
499 | 491 | ||
@@ -507,13 +499,10 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd, | |||
507 | /* use request field to save the ptr. to completion struct. */ | 499 | /* use request field to save the ptr. to completion struct. */ |
508 | scp->request = (struct request *)&wait; | 500 | scp->request = (struct request *)&wait; |
509 | scp->timeout_per_command = timeout*HZ; | 501 | scp->timeout_per_command = timeout*HZ; |
510 | sg_init_one(&one_sg, gdtcmd, sizeof(*gdtcmd)); | ||
511 | gdth_set_sglist(scp, &one_sg); | ||
512 | gdth_set_sg_count(scp, 1); | ||
513 | gdth_set_bufflen(scp, sizeof(*gdtcmd)); | ||
514 | scp->cmd_len = 12; | 502 | scp->cmd_len = 12; |
515 | memcpy(scp->cmnd, cmnd, 12); | 503 | memcpy(scp->cmnd, cmnd, 12); |
516 | cmndinfo.priority = IOCTL_PRI; | 504 | cmndinfo.priority = IOCTL_PRI; |
505 | cmndinfo.internal_cmd_str = gdtcmd; | ||
517 | cmndinfo.internal_command = 1; | 506 | cmndinfo.internal_command = 1; |
518 | 507 | ||
519 | TRACE(("__gdth_execute() cmd 0x%x\n", scp->cmnd[0])); | 508 | TRACE(("__gdth_execute() cmd 0x%x\n", scp->cmnd[0])); |
@@ -2355,7 +2344,7 @@ static void gdth_next(gdth_ha_str *ha) | |||
2355 | * buffers, kmap_atomic() as needed. | 2344 | * buffers, kmap_atomic() as needed. |
2356 | */ | 2345 | */ |
2357 | static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, | 2346 | static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, |
2358 | char *buffer, ushort count, int to_buffer) | 2347 | char *buffer, ushort count) |
2359 | { | 2348 | { |
2360 | ushort cpcount,i, max_sg = gdth_sg_count(scp); | 2349 | ushort cpcount,i, max_sg = gdth_sg_count(scp); |
2361 | ushort cpsum,cpnow; | 2350 | ushort cpsum,cpnow; |
@@ -2381,10 +2370,7 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, | |||
2381 | } | 2370 | } |
2382 | local_irq_save(flags); | 2371 | local_irq_save(flags); |
2383 | address = kmap_atomic(sg_page(sl), KM_BIO_SRC_IRQ) + sl->offset; | 2372 | address = kmap_atomic(sg_page(sl), KM_BIO_SRC_IRQ) + sl->offset; |
2384 | if (to_buffer) | 2373 | memcpy(address, buffer, cpnow); |
2385 | memcpy(buffer, address, cpnow); | ||
2386 | else | ||
2387 | memcpy(address, buffer, cpnow); | ||
2388 | flush_dcache_page(sg_page(sl)); | 2374 | flush_dcache_page(sg_page(sl)); |
2389 | kunmap_atomic(address, KM_BIO_SRC_IRQ); | 2375 | kunmap_atomic(address, KM_BIO_SRC_IRQ); |
2390 | local_irq_restore(flags); | 2376 | local_irq_restore(flags); |
@@ -2438,7 +2424,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | |||
2438 | strcpy(inq.vendor,ha->oem_name); | 2424 | strcpy(inq.vendor,ha->oem_name); |
2439 | sprintf(inq.product,"Host Drive #%02d",t); | 2425 | sprintf(inq.product,"Host Drive #%02d",t); |
2440 | strcpy(inq.revision," "); | 2426 | strcpy(inq.revision," "); |
2441 | gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data), 0); | 2427 | gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data)); |
2442 | break; | 2428 | break; |
2443 | 2429 | ||
2444 | case REQUEST_SENSE: | 2430 | case REQUEST_SENSE: |
@@ -2448,7 +2434,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | |||
2448 | sd.key = NO_SENSE; | 2434 | sd.key = NO_SENSE; |
2449 | sd.info = 0; | 2435 | sd.info = 0; |
2450 | sd.add_length= 0; | 2436 | sd.add_length= 0; |
2451 | gdth_copy_internal_data(ha, scp, (char*)&sd, sizeof(gdth_sense_data), 0); | 2437 | gdth_copy_internal_data(ha, scp, (char*)&sd, sizeof(gdth_sense_data)); |
2452 | break; | 2438 | break; |
2453 | 2439 | ||
2454 | case MODE_SENSE: | 2440 | case MODE_SENSE: |
@@ -2460,7 +2446,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | |||
2460 | mpd.bd.block_length[0] = (SECTOR_SIZE & 0x00ff0000) >> 16; | 2446 | mpd.bd.block_length[0] = (SECTOR_SIZE & 0x00ff0000) >> 16; |
2461 | mpd.bd.block_length[1] = (SECTOR_SIZE & 0x0000ff00) >> 8; | 2447 | mpd.bd.block_length[1] = (SECTOR_SIZE & 0x0000ff00) >> 8; |
2462 | mpd.bd.block_length[2] = (SECTOR_SIZE & 0x000000ff); | 2448 | mpd.bd.block_length[2] = (SECTOR_SIZE & 0x000000ff); |
2463 | gdth_copy_internal_data(ha, scp, (char*)&mpd, sizeof(gdth_modep_data), 0); | 2449 | gdth_copy_internal_data(ha, scp, (char*)&mpd, sizeof(gdth_modep_data)); |
2464 | break; | 2450 | break; |
2465 | 2451 | ||
2466 | case READ_CAPACITY: | 2452 | case READ_CAPACITY: |
@@ -2470,7 +2456,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | |||
2470 | else | 2456 | else |
2471 | rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1); | 2457 | rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1); |
2472 | rdc.block_length = cpu_to_be32(SECTOR_SIZE); | 2458 | rdc.block_length = cpu_to_be32(SECTOR_SIZE); |
2473 | gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data), 0); | 2459 | gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data)); |
2474 | break; | 2460 | break; |
2475 | 2461 | ||
2476 | case SERVICE_ACTION_IN: | 2462 | case SERVICE_ACTION_IN: |
@@ -2482,7 +2468,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | |||
2482 | rdc16.last_block_no = cpu_to_be64(ha->hdr[t].size-1); | 2468 | rdc16.last_block_no = cpu_to_be64(ha->hdr[t].size-1); |
2483 | rdc16.block_length = cpu_to_be32(SECTOR_SIZE); | 2469 | rdc16.block_length = cpu_to_be32(SECTOR_SIZE); |
2484 | gdth_copy_internal_data(ha, scp, (char*)&rdc16, | 2470 | gdth_copy_internal_data(ha, scp, (char*)&rdc16, |
2485 | sizeof(gdth_rdcap16_data), 0); | 2471 | sizeof(gdth_rdcap16_data)); |
2486 | } else { | 2472 | } else { |
2487 | scp->result = DID_ABORT << 16; | 2473 | scp->result = DID_ABORT << 16; |
2488 | } | 2474 | } |
@@ -2852,6 +2838,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b) | |||
2852 | static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | 2838 | static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) |
2853 | { | 2839 | { |
2854 | register gdth_cmd_str *cmdp; | 2840 | register gdth_cmd_str *cmdp; |
2841 | struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); | ||
2855 | int cmd_index; | 2842 | int cmd_index; |
2856 | 2843 | ||
2857 | cmdp= ha->pccb; | 2844 | cmdp= ha->pccb; |
@@ -2860,7 +2847,7 @@ static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | |||
2860 | if (ha->type==GDT_EISA && ha->cmd_cnt>0) | 2847 | if (ha->type==GDT_EISA && ha->cmd_cnt>0) |
2861 | return 0; | 2848 | return 0; |
2862 | 2849 | ||
2863 | gdth_copy_internal_data(ha, scp, (char *)cmdp, sizeof(gdth_cmd_str), 1); | 2850 | *cmdp = *cmndinfo->internal_cmd_str; |
2864 | cmdp->RequestBuffer = scp; | 2851 | cmdp->RequestBuffer = scp; |
2865 | 2852 | ||
2866 | /* search free command index */ | 2853 | /* search free command index */ |
@@ -3794,6 +3781,8 @@ static void gdth_timeout(ulong data) | |||
3794 | gdth_ha_str *ha; | 3781 | gdth_ha_str *ha; |
3795 | ulong flags; | 3782 | ulong flags; |
3796 | 3783 | ||
3784 | BUG_ON(list_empty(&gdth_instances)); | ||
3785 | |||
3797 | ha = list_first_entry(&gdth_instances, gdth_ha_str, list); | 3786 | ha = list_first_entry(&gdth_instances, gdth_ha_str, list); |
3798 | spin_lock_irqsave(&ha->smp_lock, flags); | 3787 | spin_lock_irqsave(&ha->smp_lock, flags); |
3799 | 3788 | ||
@@ -4669,45 +4658,6 @@ static void gdth_flush(gdth_ha_str *ha) | |||
4669 | } | 4658 | } |
4670 | } | 4659 | } |
4671 | 4660 | ||
4672 | /* shutdown routine */ | ||
4673 | static int gdth_halt(struct notifier_block *nb, ulong event, void *buf) | ||
4674 | { | ||
4675 | gdth_ha_str *ha; | ||
4676 | #ifndef __alpha__ | ||
4677 | gdth_cmd_str gdtcmd; | ||
4678 | char cmnd[MAX_COMMAND_SIZE]; | ||
4679 | #endif | ||
4680 | |||
4681 | if (notifier_disabled) | ||
4682 | return NOTIFY_OK; | ||
4683 | |||
4684 | TRACE2(("gdth_halt() event %d\n",(int)event)); | ||
4685 | if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF) | ||
4686 | return NOTIFY_DONE; | ||
4687 | |||
4688 | notifier_disabled = 1; | ||
4689 | printk("GDT-HA: Flushing all host drives .. "); | ||
4690 | list_for_each_entry(ha, &gdth_instances, list) { | ||
4691 | gdth_flush(ha); | ||
4692 | |||
4693 | #ifndef __alpha__ | ||
4694 | /* controller reset */ | ||
4695 | memset(cmnd, 0xff, MAX_COMMAND_SIZE); | ||
4696 | gdtcmd.BoardNode = LOCALBOARD; | ||
4697 | gdtcmd.Service = CACHESERVICE; | ||
4698 | gdtcmd.OpCode = GDT_RESET; | ||
4699 | TRACE2(("gdth_halt(): reset controller %d\n", ha->hanum)); | ||
4700 | gdth_execute(ha->shost, &gdtcmd, cmnd, 10, NULL); | ||
4701 | #endif | ||
4702 | } | ||
4703 | printk("Done.\n"); | ||
4704 | |||
4705 | #ifdef GDTH_STATISTICS | ||
4706 | del_timer(&gdth_timer); | ||
4707 | #endif | ||
4708 | return NOTIFY_OK; | ||
4709 | } | ||
4710 | |||
4711 | /* configure lun */ | 4661 | /* configure lun */ |
4712 | static int gdth_slave_configure(struct scsi_device *sdev) | 4662 | static int gdth_slave_configure(struct scsi_device *sdev) |
4713 | { | 4663 | { |
@@ -5142,13 +5092,13 @@ static void gdth_remove_one(gdth_ha_str *ha) | |||
5142 | 5092 | ||
5143 | scsi_remove_host(shp); | 5093 | scsi_remove_host(shp); |
5144 | 5094 | ||
5095 | gdth_flush(ha); | ||
5096 | |||
5145 | if (ha->sdev) { | 5097 | if (ha->sdev) { |
5146 | scsi_free_host_dev(ha->sdev); | 5098 | scsi_free_host_dev(ha->sdev); |
5147 | ha->sdev = NULL; | 5099 | ha->sdev = NULL; |
5148 | } | 5100 | } |
5149 | 5101 | ||
5150 | gdth_flush(ha); | ||
5151 | |||
5152 | if (shp->irq) | 5102 | if (shp->irq) |
5153 | free_irq(shp->irq,ha); | 5103 | free_irq(shp->irq,ha); |
5154 | 5104 | ||
@@ -5174,6 +5124,24 @@ static void gdth_remove_one(gdth_ha_str *ha) | |||
5174 | scsi_host_put(shp); | 5124 | scsi_host_put(shp); |
5175 | } | 5125 | } |
5176 | 5126 | ||
5127 | static int gdth_halt(struct notifier_block *nb, ulong event, void *buf) | ||
5128 | { | ||
5129 | gdth_ha_str *ha; | ||
5130 | |||
5131 | TRACE2(("gdth_halt() event %d\n", (int)event)); | ||
5132 | if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF) | ||
5133 | return NOTIFY_DONE; | ||
5134 | |||
5135 | list_for_each_entry(ha, &gdth_instances, list) | ||
5136 | gdth_flush(ha); | ||
5137 | |||
5138 | return NOTIFY_OK; | ||
5139 | } | ||
5140 | |||
5141 | static struct notifier_block gdth_notifier = { | ||
5142 | gdth_halt, NULL, 0 | ||
5143 | }; | ||
5144 | |||
5177 | static int __init gdth_init(void) | 5145 | static int __init gdth_init(void) |
5178 | { | 5146 | { |
5179 | if (disable) { | 5147 | if (disable) { |
@@ -5236,7 +5204,6 @@ static int __init gdth_init(void) | |||
5236 | add_timer(&gdth_timer); | 5204 | add_timer(&gdth_timer); |
5237 | #endif | 5205 | #endif |
5238 | major = register_chrdev(0,"gdth", &gdth_fops); | 5206 | major = register_chrdev(0,"gdth", &gdth_fops); |
5239 | notifier_disabled = 0; | ||
5240 | register_reboot_notifier(&gdth_notifier); | 5207 | register_reboot_notifier(&gdth_notifier); |
5241 | gdth_polling = FALSE; | 5208 | gdth_polling = FALSE; |
5242 | return 0; | 5209 | return 0; |
@@ -5246,14 +5213,15 @@ static void __exit gdth_exit(void) | |||
5246 | { | 5213 | { |
5247 | gdth_ha_str *ha; | 5214 | gdth_ha_str *ha; |
5248 | 5215 | ||
5249 | list_for_each_entry(ha, &gdth_instances, list) | 5216 | unregister_chrdev(major, "gdth"); |
5250 | gdth_remove_one(ha); | 5217 | unregister_reboot_notifier(&gdth_notifier); |
5251 | 5218 | ||
5252 | #ifdef GDTH_STATISTICS | 5219 | #ifdef GDTH_STATISTICS |
5253 | del_timer(&gdth_timer); | 5220 | del_timer_sync(&gdth_timer); |
5254 | #endif | 5221 | #endif |
5255 | unregister_chrdev(major,"gdth"); | 5222 | |
5256 | unregister_reboot_notifier(&gdth_notifier); | 5223 | list_for_each_entry(ha, &gdth_instances, list) |
5224 | gdth_remove_one(ha); | ||
5257 | } | 5225 | } |
5258 | 5226 | ||
5259 | module_init(gdth_init); | 5227 | module_init(gdth_init); |
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h index 1434c6b0297c..26e4e92515e0 100644 --- a/drivers/scsi/gdth.h +++ b/drivers/scsi/gdth.h | |||
@@ -915,6 +915,7 @@ typedef struct { | |||
915 | struct gdth_cmndinfo { /* per-command private info */ | 915 | struct gdth_cmndinfo { /* per-command private info */ |
916 | int index; | 916 | int index; |
917 | int internal_command; /* don't call scsi_done */ | 917 | int internal_command; /* don't call scsi_done */ |
918 | gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/ | ||
918 | dma_addr_t sense_paddr; /* sense dma-addr */ | 919 | dma_addr_t sense_paddr; /* sense dma-addr */ |
919 | unchar priority; | 920 | unchar priority; |
920 | int timeout; | 921 | int timeout; |
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c index bd62131b97a1..e5881e92d0fb 100644 --- a/drivers/scsi/ibmvscsi/ibmvstgt.c +++ b/drivers/scsi/ibmvscsi/ibmvstgt.c | |||
@@ -290,7 +290,7 @@ static int ibmvstgt_cmd_done(struct scsi_cmnd *sc, | |||
290 | int err = 0; | 290 | int err = 0; |
291 | 291 | ||
292 | dprintk("%p %p %x %u\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0], | 292 | dprintk("%p %p %x %u\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0], |
293 | cmd->usg_sg); | 293 | scsi_sg_count(sc)); |
294 | 294 | ||
295 | if (scsi_sg_count(sc)) | 295 | if (scsi_sg_count(sc)) |
296 | err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1); | 296 | err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1); |
@@ -838,9 +838,6 @@ static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
838 | if (!shost) | 838 | if (!shost) |
839 | goto free_vport; | 839 | goto free_vport; |
840 | shost->transportt = ibmvstgt_transport_template; | 840 | shost->transportt = ibmvstgt_transport_template; |
841 | err = scsi_tgt_alloc_queue(shost); | ||
842 | if (err) | ||
843 | goto put_host; | ||
844 | 841 | ||
845 | target = host_to_srp_target(shost); | 842 | target = host_to_srp_target(shost); |
846 | target->shost = shost; | 843 | target->shost = shost; |
@@ -872,6 +869,10 @@ static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
872 | if (err) | 869 | if (err) |
873 | goto destroy_queue; | 870 | goto destroy_queue; |
874 | 871 | ||
872 | err = scsi_tgt_alloc_queue(shost); | ||
873 | if (err) | ||
874 | goto destroy_queue; | ||
875 | |||
875 | return 0; | 876 | return 0; |
876 | destroy_queue: | 877 | destroy_queue: |
877 | crq_queue_destroy(target); | 878 | crq_queue_destroy(target); |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 59f8445eab0d..bdd7de7da39a 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -1708,8 +1708,8 @@ iscsi_session_setup(struct iscsi_transport *iscsit, | |||
1708 | qdepth = ISCSI_DEF_CMD_PER_LUN; | 1708 | qdepth = ISCSI_DEF_CMD_PER_LUN; |
1709 | } | 1709 | } |
1710 | 1710 | ||
1711 | if (!is_power_of_2(cmds_max) || | 1711 | if (!is_power_of_2(cmds_max) || cmds_max >= ISCSI_MGMT_ITT_OFFSET || |
1712 | cmds_max >= ISCSI_MGMT_ITT_OFFSET) { | 1712 | cmds_max < 2) { |
1713 | if (cmds_max != 0) | 1713 | if (cmds_max != 0) |
1714 | printk(KERN_ERR "iscsi: invalid can_queue of %d. " | 1714 | printk(KERN_ERR "iscsi: invalid can_queue of %d. " |
1715 | "can_queue must be a power of 2 and between " | 1715 | "can_queue must be a power of 2 and between " |
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 7cd05b599a12..b0e5ac372a32 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c | |||
@@ -236,12 +236,12 @@ static void sas_ata_phy_reset(struct ata_port *ap) | |||
236 | struct domain_device *dev = ap->private_data; | 236 | struct domain_device *dev = ap->private_data; |
237 | struct sas_internal *i = | 237 | struct sas_internal *i = |
238 | to_sas_internal(dev->port->ha->core.shost->transportt); | 238 | to_sas_internal(dev->port->ha->core.shost->transportt); |
239 | int res = 0; | 239 | int res = TMF_RESP_FUNC_FAILED; |
240 | 240 | ||
241 | if (i->dft->lldd_I_T_nexus_reset) | 241 | if (i->dft->lldd_I_T_nexus_reset) |
242 | res = i->dft->lldd_I_T_nexus_reset(dev); | 242 | res = i->dft->lldd_I_T_nexus_reset(dev); |
243 | 243 | ||
244 | if (res) | 244 | if (res != TMF_RESP_FUNC_COMPLETE) |
245 | SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __FUNCTION__); | 245 | SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __FUNCTION__); |
246 | 246 | ||
247 | switch (dev->sata_dev.command_set) { | 247 | switch (dev->sata_dev.command_set) { |
@@ -656,21 +656,6 @@ out: | |||
656 | return res; | 656 | return res; |
657 | } | 657 | } |
658 | 658 | ||
659 | static void sas_sata_propagate_sas_addr(struct domain_device *dev) | ||
660 | { | ||
661 | unsigned long flags; | ||
662 | struct asd_sas_port *port = dev->port; | ||
663 | struct asd_sas_phy *phy; | ||
664 | |||
665 | BUG_ON(dev->parent); | ||
666 | |||
667 | memcpy(port->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE); | ||
668 | spin_lock_irqsave(&port->phy_list_lock, flags); | ||
669 | list_for_each_entry(phy, &port->phy_list, port_phy_el) | ||
670 | memcpy(phy->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE); | ||
671 | spin_unlock_irqrestore(&port->phy_list_lock, flags); | ||
672 | } | ||
673 | |||
674 | #define ATA_IDENTIFY_DEV 0xEC | 659 | #define ATA_IDENTIFY_DEV 0xEC |
675 | #define ATA_IDENTIFY_PACKET_DEV 0xA1 | 660 | #define ATA_IDENTIFY_PACKET_DEV 0xA1 |
676 | #define ATA_SET_FEATURES 0xEF | 661 | #define ATA_SET_FEATURES 0xEF |
@@ -728,26 +713,6 @@ static int sas_discover_sata_dev(struct domain_device *dev) | |||
728 | goto out_err; | 713 | goto out_err; |
729 | } | 714 | } |
730 | cont1: | 715 | cont1: |
731 | /* Get WWN */ | ||
732 | if (dev->port->oob_mode != SATA_OOB_MODE) { | ||
733 | memcpy(dev->sas_addr, dev->sata_dev.rps_resp.rps.stp_sas_addr, | ||
734 | SAS_ADDR_SIZE); | ||
735 | } else if (dev->sata_dev.command_set == ATA_COMMAND_SET && | ||
736 | (le16_to_cpu(dev->sata_dev.identify_device[108]) & 0xF000) | ||
737 | == 0x5000) { | ||
738 | int i; | ||
739 | |||
740 | for (i = 0; i < 4; i++) { | ||
741 | dev->sas_addr[2*i] = | ||
742 | (le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0xFF00) >> 8; | ||
743 | dev->sas_addr[2*i+1] = | ||
744 | le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0x00FF; | ||
745 | } | ||
746 | } | ||
747 | sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr); | ||
748 | if (!dev->parent) | ||
749 | sas_sata_propagate_sas_addr(dev); | ||
750 | |||
751 | /* XXX Hint: register this SATA device with SATL. | 716 | /* XXX Hint: register this SATA device with SATL. |
752 | When this returns, dev->sata_dev->lu is alive and | 717 | When this returns, dev->sata_dev->lu is alive and |
753 | present. | 718 | present. |
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c index e1e2d085c920..39ae68a3b0ef 100644 --- a/drivers/scsi/libsas/sas_port.c +++ b/drivers/scsi/libsas/sas_port.c | |||
@@ -92,9 +92,6 @@ static void sas_form_port(struct asd_sas_phy *phy) | |||
92 | if (!port->phy) | 92 | if (!port->phy) |
93 | port->phy = phy->phy; | 93 | port->phy = phy->phy; |
94 | 94 | ||
95 | SAS_DPRINTK("phy%d added to port%d, phy_mask:0x%x\n", phy->id, | ||
96 | port->id, port->phy_mask); | ||
97 | |||
98 | if (*(u64 *)port->attached_sas_addr == 0) { | 95 | if (*(u64 *)port->attached_sas_addr == 0) { |
99 | port->class = phy->class; | 96 | port->class = phy->class; |
100 | memcpy(port->attached_sas_addr, phy->attached_sas_addr, | 97 | memcpy(port->attached_sas_addr, phy->attached_sas_addr, |
@@ -115,6 +112,11 @@ static void sas_form_port(struct asd_sas_phy *phy) | |||
115 | } | 112 | } |
116 | sas_port_add_phy(port->port, phy->phy); | 113 | sas_port_add_phy(port->port, phy->phy); |
117 | 114 | ||
115 | SAS_DPRINTK("%s added to %s, phy_mask:0x%x (%16llx)\n", | ||
116 | phy->phy->dev.bus_id,port->port->dev.bus_id, | ||
117 | port->phy_mask, | ||
118 | SAS_ADDR(port->attached_sas_addr)); | ||
119 | |||
118 | if (port->port_dev) | 120 | if (port->port_dev) |
119 | port->port_dev->pathways = port->num_phys; | 121 | port->port_dev->pathways = port->num_phys; |
120 | 122 | ||
@@ -255,12 +257,11 @@ void sas_porte_hard_reset(struct work_struct *work) | |||
255 | static void sas_init_port(struct asd_sas_port *port, | 257 | static void sas_init_port(struct asd_sas_port *port, |
256 | struct sas_ha_struct *sas_ha, int i) | 258 | struct sas_ha_struct *sas_ha, int i) |
257 | { | 259 | { |
260 | memset(port, 0, sizeof(*port)); | ||
258 | port->id = i; | 261 | port->id = i; |
259 | INIT_LIST_HEAD(&port->dev_list); | 262 | INIT_LIST_HEAD(&port->dev_list); |
260 | spin_lock_init(&port->phy_list_lock); | 263 | spin_lock_init(&port->phy_list_lock); |
261 | INIT_LIST_HEAD(&port->phy_list); | 264 | INIT_LIST_HEAD(&port->phy_list); |
262 | port->num_phys = 0; | ||
263 | port->phy_mask = 0; | ||
264 | port->ha = sas_ha; | 265 | port->ha = sas_ha; |
265 | 266 | ||
266 | spin_lock_init(&port->dev_list_lock); | 267 | spin_lock_init(&port->dev_list_lock); |
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 704ea06a6e50..1f8241563c6c 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c | |||
@@ -434,7 +434,7 @@ static int sas_recover_I_T(struct domain_device *dev) | |||
434 | } | 434 | } |
435 | 435 | ||
436 | /* Find the sas_phy that's attached to this device */ | 436 | /* Find the sas_phy that's attached to this device */ |
437 | static struct sas_phy *find_local_sas_phy(struct domain_device *dev) | 437 | struct sas_phy *sas_find_local_phy(struct domain_device *dev) |
438 | { | 438 | { |
439 | struct domain_device *pdev = dev->parent; | 439 | struct domain_device *pdev = dev->parent; |
440 | struct ex_phy *exphy = NULL; | 440 | struct ex_phy *exphy = NULL; |
@@ -456,6 +456,7 @@ static struct sas_phy *find_local_sas_phy(struct domain_device *dev) | |||
456 | BUG_ON(!exphy); | 456 | BUG_ON(!exphy); |
457 | return exphy->phy; | 457 | return exphy->phy; |
458 | } | 458 | } |
459 | EXPORT_SYMBOL_GPL(sas_find_local_phy); | ||
459 | 460 | ||
460 | /* Attempt to send a LUN reset message to a device */ | 461 | /* Attempt to send a LUN reset message to a device */ |
461 | int sas_eh_device_reset_handler(struct scsi_cmnd *cmd) | 462 | int sas_eh_device_reset_handler(struct scsi_cmnd *cmd) |
@@ -482,7 +483,7 @@ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd) | |||
482 | int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd) | 483 | int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd) |
483 | { | 484 | { |
484 | struct domain_device *dev = cmd_to_domain_dev(cmd); | 485 | struct domain_device *dev = cmd_to_domain_dev(cmd); |
485 | struct sas_phy *phy = find_local_sas_phy(dev); | 486 | struct sas_phy *phy = sas_find_local_phy(dev); |
486 | int res; | 487 | int res; |
487 | 488 | ||
488 | res = sas_phy_reset(phy, 1); | 489 | res = sas_phy_reset(phy, 1); |
@@ -497,10 +498,10 @@ int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd) | |||
497 | } | 498 | } |
498 | 499 | ||
499 | /* Try to reset a device */ | 500 | /* Try to reset a device */ |
500 | static int try_to_reset_cmd_device(struct Scsi_Host *shost, | 501 | static int try_to_reset_cmd_device(struct scsi_cmnd *cmd) |
501 | struct scsi_cmnd *cmd) | ||
502 | { | 502 | { |
503 | int res; | 503 | int res; |
504 | struct Scsi_Host *shost = cmd->device->host; | ||
504 | 505 | ||
505 | if (!shost->hostt->eh_device_reset_handler) | 506 | if (!shost->hostt->eh_device_reset_handler) |
506 | goto try_bus_reset; | 507 | goto try_bus_reset; |
@@ -540,6 +541,12 @@ Again: | |||
540 | need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET; | 541 | need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET; |
541 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 542 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
542 | 543 | ||
544 | if (need_reset) { | ||
545 | SAS_DPRINTK("%s: task 0x%p requests reset\n", | ||
546 | __FUNCTION__, task); | ||
547 | goto reset; | ||
548 | } | ||
549 | |||
543 | SAS_DPRINTK("trying to find task 0x%p\n", task); | 550 | SAS_DPRINTK("trying to find task 0x%p\n", task); |
544 | res = sas_scsi_find_task(task); | 551 | res = sas_scsi_find_task(task); |
545 | 552 | ||
@@ -550,18 +557,15 @@ Again: | |||
550 | SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, | 557 | SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, |
551 | task); | 558 | task); |
552 | sas_eh_finish_cmd(cmd); | 559 | sas_eh_finish_cmd(cmd); |
553 | if (need_reset) | ||
554 | try_to_reset_cmd_device(shost, cmd); | ||
555 | continue; | 560 | continue; |
556 | case TASK_IS_ABORTED: | 561 | case TASK_IS_ABORTED: |
557 | SAS_DPRINTK("%s: task 0x%p is aborted\n", | 562 | SAS_DPRINTK("%s: task 0x%p is aborted\n", |
558 | __FUNCTION__, task); | 563 | __FUNCTION__, task); |
559 | sas_eh_finish_cmd(cmd); | 564 | sas_eh_finish_cmd(cmd); |
560 | if (need_reset) | ||
561 | try_to_reset_cmd_device(shost, cmd); | ||
562 | continue; | 565 | continue; |
563 | case TASK_IS_AT_LU: | 566 | case TASK_IS_AT_LU: |
564 | SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task); | 567 | SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task); |
568 | reset: | ||
565 | tmf_resp = sas_recover_lu(task->dev, cmd); | 569 | tmf_resp = sas_recover_lu(task->dev, cmd); |
566 | if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { | 570 | if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { |
567 | SAS_DPRINTK("dev %016llx LU %x is " | 571 | SAS_DPRINTK("dev %016llx LU %x is " |
@@ -569,8 +573,6 @@ Again: | |||
569 | SAS_ADDR(task->dev), | 573 | SAS_ADDR(task->dev), |
570 | cmd->device->lun); | 574 | cmd->device->lun); |
571 | sas_eh_finish_cmd(cmd); | 575 | sas_eh_finish_cmd(cmd); |
572 | if (need_reset) | ||
573 | try_to_reset_cmd_device(shost, cmd); | ||
574 | sas_scsi_clear_queue_lu(work_q, cmd); | 576 | sas_scsi_clear_queue_lu(work_q, cmd); |
575 | goto Again; | 577 | goto Again; |
576 | } | 578 | } |
@@ -581,15 +583,15 @@ Again: | |||
581 | task); | 583 | task); |
582 | tmf_resp = sas_recover_I_T(task->dev); | 584 | tmf_resp = sas_recover_I_T(task->dev); |
583 | if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { | 585 | if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { |
586 | struct domain_device *dev = task->dev; | ||
584 | SAS_DPRINTK("I_T %016llx recovered\n", | 587 | SAS_DPRINTK("I_T %016llx recovered\n", |
585 | SAS_ADDR(task->dev->sas_addr)); | 588 | SAS_ADDR(task->dev->sas_addr)); |
586 | sas_eh_finish_cmd(cmd); | 589 | sas_eh_finish_cmd(cmd); |
587 | if (need_reset) | 590 | sas_scsi_clear_queue_I_T(work_q, dev); |
588 | try_to_reset_cmd_device(shost, cmd); | ||
589 | sas_scsi_clear_queue_I_T(work_q, task->dev); | ||
590 | goto Again; | 591 | goto Again; |
591 | } | 592 | } |
592 | /* Hammer time :-) */ | 593 | /* Hammer time :-) */ |
594 | try_to_reset_cmd_device(cmd); | ||
593 | if (i->dft->lldd_clear_nexus_port) { | 595 | if (i->dft->lldd_clear_nexus_port) { |
594 | struct asd_sas_port *port = task->dev->port; | 596 | struct asd_sas_port *port = task->dev->port; |
595 | SAS_DPRINTK("clearing nexus for port:%d\n", | 597 | SAS_DPRINTK("clearing nexus for port:%d\n", |
@@ -599,8 +601,6 @@ Again: | |||
599 | SAS_DPRINTK("clear nexus port:%d " | 601 | SAS_DPRINTK("clear nexus port:%d " |
600 | "succeeded\n", port->id); | 602 | "succeeded\n", port->id); |
601 | sas_eh_finish_cmd(cmd); | 603 | sas_eh_finish_cmd(cmd); |
602 | if (need_reset) | ||
603 | try_to_reset_cmd_device(shost, cmd); | ||
604 | sas_scsi_clear_queue_port(work_q, | 604 | sas_scsi_clear_queue_port(work_q, |
605 | port); | 605 | port); |
606 | goto Again; | 606 | goto Again; |
@@ -613,8 +613,6 @@ Again: | |||
613 | SAS_DPRINTK("clear nexus ha " | 613 | SAS_DPRINTK("clear nexus ha " |
614 | "succeeded\n"); | 614 | "succeeded\n"); |
615 | sas_eh_finish_cmd(cmd); | 615 | sas_eh_finish_cmd(cmd); |
616 | if (need_reset) | ||
617 | try_to_reset_cmd_device(shost, cmd); | ||
618 | goto clear_q; | 616 | goto clear_q; |
619 | } | 617 | } |
620 | } | 618 | } |
@@ -628,8 +626,6 @@ Again: | |||
628 | cmd->device->lun); | 626 | cmd->device->lun); |
629 | 627 | ||
630 | sas_eh_finish_cmd(cmd); | 628 | sas_eh_finish_cmd(cmd); |
631 | if (need_reset) | ||
632 | try_to_reset_cmd_device(shost, cmd); | ||
633 | goto clear_q; | 629 | goto clear_q; |
634 | } | 630 | } |
635 | } | 631 | } |
diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c index d4a6ac3c9c47..5ec0665b3a3d 100644 --- a/drivers/scsi/mvsas.c +++ b/drivers/scsi/mvsas.c | |||
@@ -40,7 +40,7 @@ | |||
40 | #include <asm/io.h> | 40 | #include <asm/io.h> |
41 | 41 | ||
42 | #define DRV_NAME "mvsas" | 42 | #define DRV_NAME "mvsas" |
43 | #define DRV_VERSION "0.5" | 43 | #define DRV_VERSION "0.5.1" |
44 | #define _MV_DUMP 0 | 44 | #define _MV_DUMP 0 |
45 | #define MVS_DISABLE_NVRAM | 45 | #define MVS_DISABLE_NVRAM |
46 | #define MVS_DISABLE_MSI | 46 | #define MVS_DISABLE_MSI |
@@ -1005,7 +1005,7 @@ err_out: | |||
1005 | return rc; | 1005 | return rc; |
1006 | #else | 1006 | #else |
1007 | /* FIXME , For SAS target mode */ | 1007 | /* FIXME , For SAS target mode */ |
1008 | memcpy(buf, "\x00\x00\xab\x11\x30\x04\x05\x50", 8); | 1008 | memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8); |
1009 | return 0; | 1009 | return 0; |
1010 | #endif | 1010 | #endif |
1011 | } | 1011 | } |
@@ -1330,7 +1330,7 @@ static int mvs_int_rx(struct mvs_info *mvi, bool self_clear) | |||
1330 | 1330 | ||
1331 | mvs_hba_cq_dump(mvi); | 1331 | mvs_hba_cq_dump(mvi); |
1332 | 1332 | ||
1333 | if (unlikely(rx_desc & RXQ_DONE)) | 1333 | if (likely(rx_desc & RXQ_DONE)) |
1334 | mvs_slot_complete(mvi, rx_desc); | 1334 | mvs_slot_complete(mvi, rx_desc); |
1335 | if (rx_desc & RXQ_ATTN) { | 1335 | if (rx_desc & RXQ_ATTN) { |
1336 | attn = true; | 1336 | attn = true; |
@@ -2720,9 +2720,8 @@ static int __devinit mvs_hw_init(struct mvs_info *mvi) | |||
2720 | msleep(100); | 2720 | msleep(100); |
2721 | /* init and reset phys */ | 2721 | /* init and reset phys */ |
2722 | for (i = 0; i < mvi->chip->n_phy; i++) { | 2722 | for (i = 0; i < mvi->chip->n_phy; i++) { |
2723 | /* FIXME: is this the correct dword order? */ | 2723 | u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]); |
2724 | u32 lo = *((u32 *)&mvi->sas_addr[0]); | 2724 | u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]); |
2725 | u32 hi = *((u32 *)&mvi->sas_addr[4]); | ||
2726 | 2725 | ||
2727 | mvs_detect_porttype(mvi, i); | 2726 | mvs_detect_porttype(mvi, i); |
2728 | 2727 | ||
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c index 0cd614a0fa73..fad6cb5cba28 100644 --- a/drivers/scsi/ps3rom.c +++ b/drivers/scsi/ps3rom.c | |||
@@ -124,7 +124,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd *cmd, const void *buf) | |||
124 | } | 124 | } |
125 | req_len += sgpnt->length; | 125 | req_len += sgpnt->length; |
126 | } | 126 | } |
127 | scsi_set_resid(cmd, req_len - act_len); | 127 | scsi_set_resid(cmd, buflen - act_len); |
128 | return 0; | 128 | return 0; |
129 | } | 129 | } |
130 | 130 | ||
@@ -427,7 +427,7 @@ static struct scsi_host_template ps3rom_host_template = { | |||
427 | .cmd_per_lun = 1, | 427 | .cmd_per_lun = 1, |
428 | .emulated = 1, /* only sg driver uses this */ | 428 | .emulated = 1, /* only sg driver uses this */ |
429 | .max_sectors = PS3ROM_MAX_SECTORS, | 429 | .max_sectors = PS3ROM_MAX_SECTORS, |
430 | .use_clustering = ENABLE_CLUSTERING, | 430 | .use_clustering = DISABLE_CLUSTERING, |
431 | .module = THIS_MODULE, | 431 | .module = THIS_MODULE, |
432 | }; | 432 | }; |
433 | 433 | ||
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 6226d88479f5..c1808763d40e 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c | |||
@@ -39,7 +39,7 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size) | |||
39 | ms_pkt->entry_count = 1; | 39 | ms_pkt->entry_count = 1; |
40 | SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER); | 40 | SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER); |
41 | ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG); | 41 | ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG); |
42 | ms_pkt->timeout = __constant_cpu_to_le16(25); | 42 | ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); |
43 | ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); | 43 | ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); |
44 | ms_pkt->total_dsd_count = __constant_cpu_to_le16(2); | 44 | ms_pkt->total_dsd_count = __constant_cpu_to_le16(2); |
45 | ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); | 45 | ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); |
@@ -75,7 +75,7 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size) | |||
75 | ct_pkt->entry_type = CT_IOCB_TYPE; | 75 | ct_pkt->entry_type = CT_IOCB_TYPE; |
76 | ct_pkt->entry_count = 1; | 76 | ct_pkt->entry_count = 1; |
77 | ct_pkt->nport_handle = __constant_cpu_to_le16(NPH_SNS); | 77 | ct_pkt->nport_handle = __constant_cpu_to_le16(NPH_SNS); |
78 | ct_pkt->timeout = __constant_cpu_to_le16(25); | 78 | ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); |
79 | ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); | 79 | ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); |
80 | ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); | 80 | ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); |
81 | ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); | 81 | ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); |
@@ -1144,7 +1144,7 @@ qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size, | |||
1144 | ms_pkt->entry_count = 1; | 1144 | ms_pkt->entry_count = 1; |
1145 | SET_TARGET_ID(ha, ms_pkt->loop_id, ha->mgmt_svr_loop_id); | 1145 | SET_TARGET_ID(ha, ms_pkt->loop_id, ha->mgmt_svr_loop_id); |
1146 | ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG); | 1146 | ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG); |
1147 | ms_pkt->timeout = __constant_cpu_to_le16(59); | 1147 | ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); |
1148 | ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); | 1148 | ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); |
1149 | ms_pkt->total_dsd_count = __constant_cpu_to_le16(2); | 1149 | ms_pkt->total_dsd_count = __constant_cpu_to_le16(2); |
1150 | ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); | 1150 | ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); |
@@ -1181,7 +1181,7 @@ qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size, | |||
1181 | ct_pkt->entry_type = CT_IOCB_TYPE; | 1181 | ct_pkt->entry_type = CT_IOCB_TYPE; |
1182 | ct_pkt->entry_count = 1; | 1182 | ct_pkt->entry_count = 1; |
1183 | ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id); | 1183 | ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id); |
1184 | ct_pkt->timeout = __constant_cpu_to_le16(59); | 1184 | ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); |
1185 | ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); | 1185 | ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); |
1186 | ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); | 1186 | ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); |
1187 | ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); | 1187 | ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); |
@@ -1761,7 +1761,7 @@ qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *ha, uint32_t req_size, | |||
1761 | ct_pkt->entry_type = CT_IOCB_TYPE; | 1761 | ct_pkt->entry_type = CT_IOCB_TYPE; |
1762 | ct_pkt->entry_count = 1; | 1762 | ct_pkt->entry_count = 1; |
1763 | ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id); | 1763 | ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id); |
1764 | ct_pkt->timeout = __constant_cpu_to_le16(59); | 1764 | ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); |
1765 | ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); | 1765 | ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); |
1766 | ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); | 1766 | ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); |
1767 | ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); | 1767 | ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index d5c7853e7eba..364be7d06875 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -1733,8 +1733,8 @@ qla2x00_nvram_config(scsi_qla_host_t *ha) | |||
1733 | ha->login_timeout = nv->login_timeout; | 1733 | ha->login_timeout = nv->login_timeout; |
1734 | icb->login_timeout = nv->login_timeout; | 1734 | icb->login_timeout = nv->login_timeout; |
1735 | 1735 | ||
1736 | /* Set minimum RATOV to 200 tenths of a second. */ | 1736 | /* Set minimum RATOV to 100 tenths of a second. */ |
1737 | ha->r_a_tov = 200; | 1737 | ha->r_a_tov = 100; |
1738 | 1738 | ||
1739 | ha->loop_reset_delay = nv->reset_delay; | 1739 | ha->loop_reset_delay = nv->reset_delay; |
1740 | 1740 | ||
@@ -3645,8 +3645,8 @@ qla24xx_nvram_config(scsi_qla_host_t *ha) | |||
3645 | ha->login_timeout = le16_to_cpu(nv->login_timeout); | 3645 | ha->login_timeout = le16_to_cpu(nv->login_timeout); |
3646 | icb->login_timeout = cpu_to_le16(nv->login_timeout); | 3646 | icb->login_timeout = cpu_to_le16(nv->login_timeout); |
3647 | 3647 | ||
3648 | /* Set minimum RATOV to 200 tenths of a second. */ | 3648 | /* Set minimum RATOV to 100 tenths of a second. */ |
3649 | ha->r_a_tov = 200; | 3649 | ha->r_a_tov = 100; |
3650 | 3650 | ||
3651 | ha->loop_reset_delay = nv->reset_delay; | 3651 | ha->loop_reset_delay = nv->reset_delay; |
3652 | 3652 | ||
@@ -4022,7 +4022,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *ha) | |||
4022 | return; | 4022 | return; |
4023 | 4023 | ||
4024 | ret = qla2x00_stop_firmware(ha); | 4024 | ret = qla2x00_stop_firmware(ha); |
4025 | for (retries = 5; ret != QLA_SUCCESS && retries ; retries--) { | 4025 | for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && |
4026 | retries ; retries--) { | ||
4026 | qla2x00_reset_chip(ha); | 4027 | qla2x00_reset_chip(ha); |
4027 | if (qla2x00_chip_diag(ha) != QLA_SUCCESS) | 4028 | if (qla2x00_chip_diag(ha) != QLA_SUCCESS) |
4028 | continue; | 4029 | continue; |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 14e6f22944b7..f0337036c7bb 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -958,6 +958,11 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) | |||
958 | } | 958 | } |
959 | } | 959 | } |
960 | 960 | ||
961 | /* Check for overrun. */ | ||
962 | if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && | ||
963 | scsi_status & SS_RESIDUAL_OVER) | ||
964 | comp_status = CS_DATA_OVERRUN; | ||
965 | |||
961 | /* | 966 | /* |
962 | * Based on Host and scsi status generate status code for Linux | 967 | * Based on Host and scsi status generate status code for Linux |
963 | */ | 968 | */ |
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 99d29fff836d..bb103580e1ba 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
@@ -2206,7 +2206,7 @@ qla24xx_abort_target(fc_port_t *fcport) | |||
2206 | tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; | 2206 | tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; |
2207 | tsk->p.tsk.entry_count = 1; | 2207 | tsk->p.tsk.entry_count = 1; |
2208 | tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); | 2208 | tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); |
2209 | tsk->p.tsk.timeout = __constant_cpu_to_le16(25); | 2209 | tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); |
2210 | tsk->p.tsk.control_flags = __constant_cpu_to_le32(TCF_TARGET_RESET); | 2210 | tsk->p.tsk.control_flags = __constant_cpu_to_le32(TCF_TARGET_RESET); |
2211 | tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; | 2211 | tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; |
2212 | tsk->p.tsk.port_id[1] = fcport->d_id.b.area; | 2212 | tsk->p.tsk.port_id[1] = fcport->d_id.b.area; |
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index c5742cc15abb..ea08a129fee9 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
@@ -7,7 +7,7 @@ | |||
7 | /* | 7 | /* |
8 | * Driver version | 8 | * Driver version |
9 | */ | 9 | */ |
10 | #define QLA2XXX_VERSION "8.02.00-k8" | 10 | #define QLA2XXX_VERSION "8.02.00-k9" |
11 | 11 | ||
12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
13 | #define QLA_DRIVER_MINOR_VER 2 | 13 | #define QLA_DRIVER_MINOR_VER 2 |
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index 10b3b9a620f3..109c5f5985ec 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c | |||
@@ -1299,9 +1299,9 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, | |||
1299 | ddb_entry->fw_ddb_device_state = state; | 1299 | ddb_entry->fw_ddb_device_state = state; |
1300 | /* Device is back online. */ | 1300 | /* Device is back online. */ |
1301 | if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { | 1301 | if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { |
1302 | atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); | ||
1302 | atomic_set(&ddb_entry->port_down_timer, | 1303 | atomic_set(&ddb_entry->port_down_timer, |
1303 | ha->port_down_retry_count); | 1304 | ha->port_down_retry_count); |
1304 | atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); | ||
1305 | atomic_set(&ddb_entry->relogin_retry_count, 0); | 1305 | atomic_set(&ddb_entry->relogin_retry_count, 0); |
1306 | atomic_set(&ddb_entry->relogin_timer, 0); | 1306 | atomic_set(&ddb_entry->relogin_timer, 0); |
1307 | clear_bit(DF_RELOGIN, &ddb_entry->flags); | 1307 | clear_bit(DF_RELOGIN, &ddb_entry->flags); |
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index c3c59d763037..8b92f348f02c 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c | |||
@@ -75,6 +75,7 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd); | |||
75 | static int qla4xxx_slave_alloc(struct scsi_device *device); | 75 | static int qla4xxx_slave_alloc(struct scsi_device *device); |
76 | static int qla4xxx_slave_configure(struct scsi_device *device); | 76 | static int qla4xxx_slave_configure(struct scsi_device *device); |
77 | static void qla4xxx_slave_destroy(struct scsi_device *sdev); | 77 | static void qla4xxx_slave_destroy(struct scsi_device *sdev); |
78 | static void qla4xxx_scan_start(struct Scsi_Host *shost); | ||
78 | 79 | ||
79 | static struct scsi_host_template qla4xxx_driver_template = { | 80 | static struct scsi_host_template qla4xxx_driver_template = { |
80 | .module = THIS_MODULE, | 81 | .module = THIS_MODULE, |
@@ -90,6 +91,7 @@ static struct scsi_host_template qla4xxx_driver_template = { | |||
90 | .slave_destroy = qla4xxx_slave_destroy, | 91 | .slave_destroy = qla4xxx_slave_destroy, |
91 | 92 | ||
92 | .scan_finished = iscsi_scan_finished, | 93 | .scan_finished = iscsi_scan_finished, |
94 | .scan_start = qla4xxx_scan_start, | ||
93 | 95 | ||
94 | .this_id = -1, | 96 | .this_id = -1, |
95 | .cmd_per_lun = 3, | 97 | .cmd_per_lun = 3, |
@@ -299,6 +301,18 @@ struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha) | |||
299 | return ddb_entry; | 301 | return ddb_entry; |
300 | } | 302 | } |
301 | 303 | ||
304 | static void qla4xxx_scan_start(struct Scsi_Host *shost) | ||
305 | { | ||
306 | struct scsi_qla_host *ha = shost_priv(shost); | ||
307 | struct ddb_entry *ddb_entry, *ddbtemp; | ||
308 | |||
309 | /* finish setup of sessions that were already setup in firmware */ | ||
310 | list_for_each_entry_safe(ddb_entry, ddbtemp, &ha->ddb_list, list) { | ||
311 | if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) | ||
312 | qla4xxx_add_sess(ddb_entry); | ||
313 | } | ||
314 | } | ||
315 | |||
302 | /* | 316 | /* |
303 | * Timer routines | 317 | * Timer routines |
304 | */ | 318 | */ |
@@ -864,8 +878,9 @@ static void qla4xxx_flush_active_srbs(struct scsi_qla_host *ha) | |||
864 | * qla4xxx_recover_adapter - recovers adapter after a fatal error | 878 | * qla4xxx_recover_adapter - recovers adapter after a fatal error |
865 | * @ha: Pointer to host adapter structure. | 879 | * @ha: Pointer to host adapter structure. |
866 | * @renew_ddb_list: Indicates what to do with the adapter's ddb list | 880 | * @renew_ddb_list: Indicates what to do with the adapter's ddb list |
867 | * after adapter recovery has completed. | 881 | * |
868 | * 0=preserve ddb list, 1=destroy and rebuild ddb list | 882 | * renew_ddb_list value can be 0=preserve ddb list, 1=destroy and rebuild |
883 | * ddb list. | ||
869 | **/ | 884 | **/ |
870 | static int qla4xxx_recover_adapter(struct scsi_qla_host *ha, | 885 | static int qla4xxx_recover_adapter(struct scsi_qla_host *ha, |
871 | uint8_t renew_ddb_list) | 886 | uint8_t renew_ddb_list) |
@@ -874,6 +889,7 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha, | |||
874 | 889 | ||
875 | /* Stall incoming I/O until we are done */ | 890 | /* Stall incoming I/O until we are done */ |
876 | clear_bit(AF_ONLINE, &ha->flags); | 891 | clear_bit(AF_ONLINE, &ha->flags); |
892 | |||
877 | DEBUG2(printk("scsi%ld: %s calling qla4xxx_cmd_wait\n", ha->host_no, | 893 | DEBUG2(printk("scsi%ld: %s calling qla4xxx_cmd_wait\n", ha->host_no, |
878 | __func__)); | 894 | __func__)); |
879 | 895 | ||
@@ -1176,7 +1192,6 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, | |||
1176 | int ret = -ENODEV, status; | 1192 | int ret = -ENODEV, status; |
1177 | struct Scsi_Host *host; | 1193 | struct Scsi_Host *host; |
1178 | struct scsi_qla_host *ha; | 1194 | struct scsi_qla_host *ha; |
1179 | struct ddb_entry *ddb_entry, *ddbtemp; | ||
1180 | uint8_t init_retry_count = 0; | 1195 | uint8_t init_retry_count = 0; |
1181 | char buf[34]; | 1196 | char buf[34]; |
1182 | 1197 | ||
@@ -1295,13 +1310,6 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, | |||
1295 | if (ret) | 1310 | if (ret) |
1296 | goto probe_failed; | 1311 | goto probe_failed; |
1297 | 1312 | ||
1298 | /* Update transport device information for all devices. */ | ||
1299 | list_for_each_entry_safe(ddb_entry, ddbtemp, &ha->ddb_list, list) { | ||
1300 | if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) | ||
1301 | if (qla4xxx_add_sess(ddb_entry)) | ||
1302 | goto remove_host; | ||
1303 | } | ||
1304 | |||
1305 | printk(KERN_INFO | 1313 | printk(KERN_INFO |
1306 | " QLogic iSCSI HBA Driver version: %s\n" | 1314 | " QLogic iSCSI HBA Driver version: %s\n" |
1307 | " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", | 1315 | " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", |
@@ -1311,10 +1319,6 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, | |||
1311 | scsi_scan_host(host); | 1319 | scsi_scan_host(host); |
1312 | return 0; | 1320 | return 0; |
1313 | 1321 | ||
1314 | remove_host: | ||
1315 | qla4xxx_free_ddb_list(ha); | ||
1316 | scsi_remove_host(host); | ||
1317 | |||
1318 | probe_failed: | 1322 | probe_failed: |
1319 | qla4xxx_free_adapter(ha); | 1323 | qla4xxx_free_adapter(ha); |
1320 | scsi_host_put(ha->host); | 1324 | scsi_host_put(ha->host); |
@@ -1600,9 +1604,12 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd) | |||
1600 | return FAILED; | 1604 | return FAILED; |
1601 | } | 1605 | } |
1602 | 1606 | ||
1603 | if (qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST) == QLA_SUCCESS) { | 1607 | /* make sure the dpc thread is stopped while we reset the hba */ |
1608 | clear_bit(AF_ONLINE, &ha->flags); | ||
1609 | flush_workqueue(ha->dpc_thread); | ||
1610 | |||
1611 | if (qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST) == QLA_SUCCESS) | ||
1604 | return_status = SUCCESS; | 1612 | return_status = SUCCESS; |
1605 | } | ||
1606 | 1613 | ||
1607 | dev_info(&ha->pdev->dev, "HOST RESET %s.\n", | 1614 | dev_info(&ha->pdev->dev, "HOST RESET %s.\n", |
1608 | return_status == FAILED ? "FAILED" : "SUCCEDED"); | 1615 | return_status == FAILED ? "FAILED" : "SUCCEDED"); |
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index fecba05b4e77..e5c6f6af8765 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
@@ -757,7 +757,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd) | |||
757 | "Notifying upper driver of completion " | 757 | "Notifying upper driver of completion " |
758 | "(result %x)\n", cmd->result)); | 758 | "(result %x)\n", cmd->result)); |
759 | 759 | ||
760 | good_bytes = scsi_bufflen(cmd); | 760 | good_bytes = scsi_bufflen(cmd) + cmd->request->extra_len; |
761 | if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { | 761 | if (cmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { |
762 | drv = scsi_cmd_to_driver(cmd); | 762 | drv = scsi_cmd_to_driver(cmd); |
763 | if (drv->done) | 763 | if (drv->done) |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 1dc165ad17fb..e67c14e31bab 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
@@ -1577,8 +1577,7 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel, | |||
1577 | } | 1577 | } |
1578 | 1578 | ||
1579 | /** | 1579 | /** |
1580 | * scsi_scan_target - scan a target id, possibly including all LUNs on the | 1580 | * scsi_scan_target - scan a target id, possibly including all LUNs on the target. |
1581 | * target. | ||
1582 | * @parent: host to scan | 1581 | * @parent: host to scan |
1583 | * @channel: channel to scan | 1582 | * @channel: channel to scan |
1584 | * @id: target id to scan | 1583 | * @id: target id to scan |
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c index 3677fbb30b72..a0f308bd145b 100644 --- a/drivers/scsi/scsi_tgt_lib.c +++ b/drivers/scsi/scsi_tgt_lib.c | |||
@@ -103,7 +103,6 @@ struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost, | |||
103 | if (!cmd) | 103 | if (!cmd) |
104 | goto release_rq; | 104 | goto release_rq; |
105 | 105 | ||
106 | memset(cmd, 0, sizeof(*cmd)); | ||
107 | cmd->sc_data_direction = data_dir; | 106 | cmd->sc_data_direction = data_dir; |
108 | cmd->jiffies_at_alloc = jiffies; | 107 | cmd->jiffies_at_alloc = jiffies; |
109 | cmd->request = rq; | 108 | cmd->request = rq; |
@@ -382,6 +381,11 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd, | |||
382 | scsi_release_buffers(cmd); | 381 | scsi_release_buffers(cmd); |
383 | goto unmap_rq; | 382 | goto unmap_rq; |
384 | } | 383 | } |
384 | /* | ||
385 | * we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the | ||
386 | * length for us. | ||
387 | */ | ||
388 | cmd->sdb.length = rq->data_len; | ||
385 | 389 | ||
386 | return 0; | 390 | return 0; |
387 | 391 | ||
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 9981682d5302..ca7bb6f63bde 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #define ISCSI_SESSION_ATTRS 19 | 33 | #define ISCSI_SESSION_ATTRS 19 |
34 | #define ISCSI_CONN_ATTRS 13 | 34 | #define ISCSI_CONN_ATTRS 13 |
35 | #define ISCSI_HOST_ATTRS 4 | 35 | #define ISCSI_HOST_ATTRS 4 |
36 | #define ISCSI_TRANSPORT_VERSION "2.0-868" | 36 | #define ISCSI_TRANSPORT_VERSION "2.0-869" |
37 | 37 | ||
38 | struct iscsi_internal { | 38 | struct iscsi_internal { |
39 | int daemon_pid; | 39 | int daemon_pid; |
@@ -373,24 +373,25 @@ static void session_recovery_timedout(struct work_struct *work) | |||
373 | scsi_target_unblock(&session->dev); | 373 | scsi_target_unblock(&session->dev); |
374 | } | 374 | } |
375 | 375 | ||
376 | static void __iscsi_unblock_session(struct iscsi_cls_session *session) | 376 | static void __iscsi_unblock_session(struct work_struct *work) |
377 | { | ||
378 | if (!cancel_delayed_work(&session->recovery_work)) | ||
379 | flush_workqueue(iscsi_eh_timer_workq); | ||
380 | scsi_target_unblock(&session->dev); | ||
381 | } | ||
382 | |||
383 | void iscsi_unblock_session(struct iscsi_cls_session *session) | ||
384 | { | 377 | { |
378 | struct iscsi_cls_session *session = | ||
379 | container_of(work, struct iscsi_cls_session, | ||
380 | unblock_work); | ||
385 | struct Scsi_Host *shost = iscsi_session_to_shost(session); | 381 | struct Scsi_Host *shost = iscsi_session_to_shost(session); |
386 | struct iscsi_host *ihost = shost->shost_data; | 382 | struct iscsi_host *ihost = shost->shost_data; |
387 | unsigned long flags; | 383 | unsigned long flags; |
388 | 384 | ||
385 | /* | ||
386 | * The recovery and unblock work get run from the same workqueue, | ||
387 | * so try to cancel it if it was going to run after this unblock. | ||
388 | */ | ||
389 | cancel_delayed_work(&session->recovery_work); | ||
389 | spin_lock_irqsave(&session->lock, flags); | 390 | spin_lock_irqsave(&session->lock, flags); |
390 | session->state = ISCSI_SESSION_LOGGED_IN; | 391 | session->state = ISCSI_SESSION_LOGGED_IN; |
391 | spin_unlock_irqrestore(&session->lock, flags); | 392 | spin_unlock_irqrestore(&session->lock, flags); |
392 | 393 | /* start IO */ | |
393 | __iscsi_unblock_session(session); | 394 | scsi_target_unblock(&session->dev); |
394 | /* | 395 | /* |
395 | * Only do kernel scanning if the driver is properly hooked into | 396 | * Only do kernel scanning if the driver is properly hooked into |
396 | * the async scanning code (drivers like iscsi_tcp do login and | 397 | * the async scanning code (drivers like iscsi_tcp do login and |
@@ -401,20 +402,43 @@ void iscsi_unblock_session(struct iscsi_cls_session *session) | |||
401 | atomic_inc(&ihost->nr_scans); | 402 | atomic_inc(&ihost->nr_scans); |
402 | } | 403 | } |
403 | } | 404 | } |
405 | |||
406 | /** | ||
407 | * iscsi_unblock_session - set a session as logged in and start IO. | ||
408 | * @session: iscsi session | ||
409 | * | ||
410 | * Mark a session as ready to accept IO. | ||
411 | */ | ||
412 | void iscsi_unblock_session(struct iscsi_cls_session *session) | ||
413 | { | ||
414 | queue_work(iscsi_eh_timer_workq, &session->unblock_work); | ||
415 | /* | ||
416 | * make sure all the events have completed before tell the driver | ||
417 | * it is safe | ||
418 | */ | ||
419 | flush_workqueue(iscsi_eh_timer_workq); | ||
420 | } | ||
404 | EXPORT_SYMBOL_GPL(iscsi_unblock_session); | 421 | EXPORT_SYMBOL_GPL(iscsi_unblock_session); |
405 | 422 | ||
406 | void iscsi_block_session(struct iscsi_cls_session *session) | 423 | static void __iscsi_block_session(struct work_struct *work) |
407 | { | 424 | { |
425 | struct iscsi_cls_session *session = | ||
426 | container_of(work, struct iscsi_cls_session, | ||
427 | block_work); | ||
408 | unsigned long flags; | 428 | unsigned long flags; |
409 | 429 | ||
410 | spin_lock_irqsave(&session->lock, flags); | 430 | spin_lock_irqsave(&session->lock, flags); |
411 | session->state = ISCSI_SESSION_FAILED; | 431 | session->state = ISCSI_SESSION_FAILED; |
412 | spin_unlock_irqrestore(&session->lock, flags); | 432 | spin_unlock_irqrestore(&session->lock, flags); |
413 | |||
414 | scsi_target_block(&session->dev); | 433 | scsi_target_block(&session->dev); |
415 | queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work, | 434 | queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work, |
416 | session->recovery_tmo * HZ); | 435 | session->recovery_tmo * HZ); |
417 | } | 436 | } |
437 | |||
438 | void iscsi_block_session(struct iscsi_cls_session *session) | ||
439 | { | ||
440 | queue_work(iscsi_eh_timer_workq, &session->block_work); | ||
441 | } | ||
418 | EXPORT_SYMBOL_GPL(iscsi_block_session); | 442 | EXPORT_SYMBOL_GPL(iscsi_block_session); |
419 | 443 | ||
420 | static void __iscsi_unbind_session(struct work_struct *work) | 444 | static void __iscsi_unbind_session(struct work_struct *work) |
@@ -463,6 +487,8 @@ iscsi_alloc_session(struct Scsi_Host *shost, | |||
463 | INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout); | 487 | INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout); |
464 | INIT_LIST_HEAD(&session->host_list); | 488 | INIT_LIST_HEAD(&session->host_list); |
465 | INIT_LIST_HEAD(&session->sess_list); | 489 | INIT_LIST_HEAD(&session->sess_list); |
490 | INIT_WORK(&session->unblock_work, __iscsi_unblock_session); | ||
491 | INIT_WORK(&session->block_work, __iscsi_block_session); | ||
466 | INIT_WORK(&session->unbind_work, __iscsi_unbind_session); | 492 | INIT_WORK(&session->unbind_work, __iscsi_unbind_session); |
467 | INIT_WORK(&session->scan_work, iscsi_scan_session); | 493 | INIT_WORK(&session->scan_work, iscsi_scan_session); |
468 | spin_lock_init(&session->lock); | 494 | spin_lock_init(&session->lock); |
@@ -575,24 +601,25 @@ void iscsi_remove_session(struct iscsi_cls_session *session) | |||
575 | list_del(&session->sess_list); | 601 | list_del(&session->sess_list); |
576 | spin_unlock_irqrestore(&sesslock, flags); | 602 | spin_unlock_irqrestore(&sesslock, flags); |
577 | 603 | ||
604 | /* make sure there are no blocks/unblocks queued */ | ||
605 | flush_workqueue(iscsi_eh_timer_workq); | ||
606 | /* make sure the timedout callout is not running */ | ||
607 | if (!cancel_delayed_work(&session->recovery_work)) | ||
608 | flush_workqueue(iscsi_eh_timer_workq); | ||
578 | /* | 609 | /* |
579 | * If we are blocked let commands flow again. The lld or iscsi | 610 | * If we are blocked let commands flow again. The lld or iscsi |
580 | * layer should set up the queuecommand to fail commands. | 611 | * layer should set up the queuecommand to fail commands. |
612 | * We assume that LLD will not be calling block/unblock while | ||
613 | * removing the session. | ||
581 | */ | 614 | */ |
582 | spin_lock_irqsave(&session->lock, flags); | 615 | spin_lock_irqsave(&session->lock, flags); |
583 | session->state = ISCSI_SESSION_FREE; | 616 | session->state = ISCSI_SESSION_FREE; |
584 | spin_unlock_irqrestore(&session->lock, flags); | 617 | spin_unlock_irqrestore(&session->lock, flags); |
585 | __iscsi_unblock_session(session); | ||
586 | __iscsi_unbind_session(&session->unbind_work); | ||
587 | 618 | ||
588 | /* flush running scans */ | 619 | scsi_target_unblock(&session->dev); |
620 | /* flush running scans then delete devices */ | ||
589 | flush_workqueue(ihost->scan_workq); | 621 | flush_workqueue(ihost->scan_workq); |
590 | /* | 622 | __iscsi_unbind_session(&session->unbind_work); |
591 | * If the session dropped while removing devices then we need to make | ||
592 | * sure it is not blocked | ||
593 | */ | ||
594 | if (!cancel_delayed_work(&session->recovery_work)) | ||
595 | flush_workqueue(iscsi_eh_timer_workq); | ||
596 | 623 | ||
597 | /* hw iscsi may not have removed all connections from session */ | 624 | /* hw iscsi may not have removed all connections from session */ |
598 | err = device_for_each_child(&session->dev, NULL, | 625 | err = device_for_each_child(&session->dev, NULL, |
@@ -802,23 +829,16 @@ EXPORT_SYMBOL_GPL(iscsi_recv_pdu); | |||
802 | 829 | ||
803 | void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error) | 830 | void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error) |
804 | { | 831 | { |
805 | struct iscsi_cls_session *session = iscsi_conn_to_session(conn); | ||
806 | struct nlmsghdr *nlh; | 832 | struct nlmsghdr *nlh; |
807 | struct sk_buff *skb; | 833 | struct sk_buff *skb; |
808 | struct iscsi_uevent *ev; | 834 | struct iscsi_uevent *ev; |
809 | struct iscsi_internal *priv; | 835 | struct iscsi_internal *priv; |
810 | int len = NLMSG_SPACE(sizeof(*ev)); | 836 | int len = NLMSG_SPACE(sizeof(*ev)); |
811 | unsigned long flags; | ||
812 | 837 | ||
813 | priv = iscsi_if_transport_lookup(conn->transport); | 838 | priv = iscsi_if_transport_lookup(conn->transport); |
814 | if (!priv) | 839 | if (!priv) |
815 | return; | 840 | return; |
816 | 841 | ||
817 | spin_lock_irqsave(&session->lock, flags); | ||
818 | if (session->state == ISCSI_SESSION_LOGGED_IN) | ||
819 | session->state = ISCSI_SESSION_FAILED; | ||
820 | spin_unlock_irqrestore(&session->lock, flags); | ||
821 | |||
822 | skb = alloc_skb(len, GFP_ATOMIC); | 842 | skb = alloc_skb(len, GFP_ATOMIC); |
823 | if (!skb) { | 843 | if (!skb) { |
824 | iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored " | 844 | iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored " |
diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c index 6f09cbd7fc48..97c68d021d28 100644 --- a/drivers/serial/8250_pnp.c +++ b/drivers/serial/8250_pnp.c | |||
@@ -91,6 +91,8 @@ static const struct pnp_device_id pnp_dev_table[] = { | |||
91 | /* Archtek America Corp. */ | 91 | /* Archtek America Corp. */ |
92 | /* Archtek SmartLink Modem 3334BT Plug & Play */ | 92 | /* Archtek SmartLink Modem 3334BT Plug & Play */ |
93 | { "GVC000F", 0 }, | 93 | { "GVC000F", 0 }, |
94 | /* Archtek SmartLink Modem 3334BRV 33.6K Data Fax Voice */ | ||
95 | { "GVC0303", 0 }, | ||
94 | /* Hayes */ | 96 | /* Hayes */ |
95 | /* Hayes Optima 288 V.34-V.FC + FAX + Voice Plug & Play */ | 97 | /* Hayes Optima 288 V.34-V.FC + FAX + Voice Plug & Play */ |
96 | { "HAY0001", 0 }, | 98 | { "HAY0001", 0 }, |
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index b82595cf13e8..cf627cd1b4c8 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig | |||
@@ -686,7 +686,7 @@ config UART0_RTS_PIN | |||
686 | 686 | ||
687 | config SERIAL_BFIN_UART1 | 687 | config SERIAL_BFIN_UART1 |
688 | bool "Enable UART1" | 688 | bool "Enable UART1" |
689 | depends on SERIAL_BFIN && (BF534 || BF536 || BF537 || BF54x) | 689 | depends on SERIAL_BFIN && (!BF531 && !BF532 && !BF533 && !BF561) |
690 | help | 690 | help |
691 | Enable UART1 | 691 | Enable UART1 |
692 | 692 | ||
@@ -699,14 +699,14 @@ config BFIN_UART1_CTSRTS | |||
699 | 699 | ||
700 | config UART1_CTS_PIN | 700 | config UART1_CTS_PIN |
701 | int "UART1 CTS pin" | 701 | int "UART1 CTS pin" |
702 | depends on BFIN_UART1_CTSRTS && (BF53x || BF561) | 702 | depends on BFIN_UART1_CTSRTS && !BF54x |
703 | default -1 | 703 | default -1 |
704 | help | 704 | help |
705 | Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. | 705 | Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. |
706 | 706 | ||
707 | config UART1_RTS_PIN | 707 | config UART1_RTS_PIN |
708 | int "UART1 RTS pin" | 708 | int "UART1 RTS pin" |
709 | depends on BFIN_UART1_CTSRTS && (BF53x || BF561) | 709 | depends on BFIN_UART1_CTSRTS && !BF54x |
710 | default -1 | 710 | default -1 |
711 | help | 711 | help |
712 | Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. | 712 | Refer to ./include/asm-blackfin/gpio.h to see the GPIO map. |
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c index ac2a3ef28d55..0aa345b9a38b 100644 --- a/drivers/serial/bfin_5xx.c +++ b/drivers/serial/bfin_5xx.c | |||
@@ -1,30 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * File: drivers/serial/bfin_5xx.c | 2 | * Blackfin On-Chip Serial Driver |
3 | * Based on: Based on drivers/serial/sa1100.c | ||
4 | * Author: Aubrey Li <aubrey.li@analog.com> | ||
5 | * | 3 | * |
6 | * Created: | 4 | * Copyright 2006-2007 Analog Devices Inc. |
7 | * Description: Driver for blackfin 5xx serial ports | ||
8 | * | 5 | * |
9 | * Modified: | 6 | * Enter bugs at http://blackfin.uclinux.org/ |
10 | * Copyright 2006 Analog Devices Inc. | ||
11 | * | 7 | * |
12 | * Bugs: Enter bugs at http://blackfin.uclinux.org/ | 8 | * Licensed under the GPL-2 or later. |
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2 of the License, or | ||
17 | * (at your option) any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | * GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with this program; if not, see the file COPYING, or write | ||
26 | * to the Free Software Foundation, Inc., | ||
27 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
28 | */ | 9 | */ |
29 | 10 | ||
30 | #if defined(CONFIG_SERIAL_BFIN_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) | 11 | #if defined(CONFIG_SERIAL_BFIN_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) |
@@ -67,14 +48,12 @@ | |||
67 | #define DMA_RX_XCOUNT 512 | 48 | #define DMA_RX_XCOUNT 512 |
68 | #define DMA_RX_YCOUNT (PAGE_SIZE / DMA_RX_XCOUNT) | 49 | #define DMA_RX_YCOUNT (PAGE_SIZE / DMA_RX_XCOUNT) |
69 | 50 | ||
70 | #define DMA_RX_FLUSH_JIFFIES 5 | 51 | #define DMA_RX_FLUSH_JIFFIES (HZ / 50) |
71 | 52 | ||
72 | #ifdef CONFIG_SERIAL_BFIN_DMA | 53 | #ifdef CONFIG_SERIAL_BFIN_DMA |
73 | static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart); | 54 | static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart); |
74 | #else | 55 | #else |
75 | static void bfin_serial_do_work(struct work_struct *work); | ||
76 | static void bfin_serial_tx_chars(struct bfin_serial_port *uart); | 56 | static void bfin_serial_tx_chars(struct bfin_serial_port *uart); |
77 | static void local_put_char(struct bfin_serial_port *uart, char ch); | ||
78 | #endif | 57 | #endif |
79 | 58 | ||
80 | static void bfin_serial_mctrl_check(struct bfin_serial_port *uart); | 59 | static void bfin_serial_mctrl_check(struct bfin_serial_port *uart); |
@@ -85,23 +64,26 @@ static void bfin_serial_mctrl_check(struct bfin_serial_port *uart); | |||
85 | static void bfin_serial_stop_tx(struct uart_port *port) | 64 | static void bfin_serial_stop_tx(struct uart_port *port) |
86 | { | 65 | { |
87 | struct bfin_serial_port *uart = (struct bfin_serial_port *)port; | 66 | struct bfin_serial_port *uart = (struct bfin_serial_port *)port; |
67 | struct circ_buf *xmit = &uart->port.info->xmit; | ||
68 | #if !defined(CONFIG_BF54x) && !defined(CONFIG_SERIAL_BFIN_DMA) | ||
69 | unsigned short ier; | ||
70 | #endif | ||
88 | 71 | ||
89 | while (!(UART_GET_LSR(uart) & TEMT)) | 72 | while (!(UART_GET_LSR(uart) & TEMT)) |
90 | continue; | 73 | cpu_relax(); |
91 | 74 | ||
92 | #ifdef CONFIG_SERIAL_BFIN_DMA | 75 | #ifdef CONFIG_SERIAL_BFIN_DMA |
93 | disable_dma(uart->tx_dma_channel); | 76 | disable_dma(uart->tx_dma_channel); |
77 | xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1); | ||
78 | uart->port.icount.tx += uart->tx_count; | ||
79 | uart->tx_count = 0; | ||
80 | uart->tx_done = 1; | ||
94 | #else | 81 | #else |
95 | #ifdef CONFIG_BF54x | 82 | #ifdef CONFIG_BF54x |
96 | /* Waiting for Transmission Finished */ | ||
97 | while (!(UART_GET_LSR(uart) & TFI)) | ||
98 | continue; | ||
99 | /* Clear TFI bit */ | 83 | /* Clear TFI bit */ |
100 | UART_PUT_LSR(uart, TFI); | 84 | UART_PUT_LSR(uart, TFI); |
101 | UART_CLEAR_IER(uart, ETBEI); | 85 | UART_CLEAR_IER(uart, ETBEI); |
102 | #else | 86 | #else |
103 | unsigned short ier; | ||
104 | |||
105 | ier = UART_GET_IER(uart); | 87 | ier = UART_GET_IER(uart); |
106 | ier &= ~ETBEI; | 88 | ier &= ~ETBEI; |
107 | UART_PUT_IER(uart, ier); | 89 | UART_PUT_IER(uart, ier); |
@@ -117,7 +99,8 @@ static void bfin_serial_start_tx(struct uart_port *port) | |||
117 | struct bfin_serial_port *uart = (struct bfin_serial_port *)port; | 99 | struct bfin_serial_port *uart = (struct bfin_serial_port *)port; |
118 | 100 | ||
119 | #ifdef CONFIG_SERIAL_BFIN_DMA | 101 | #ifdef CONFIG_SERIAL_BFIN_DMA |
120 | bfin_serial_dma_tx_chars(uart); | 102 | if (uart->tx_done) |
103 | bfin_serial_dma_tx_chars(uart); | ||
121 | #else | 104 | #else |
122 | #ifdef CONFIG_BF54x | 105 | #ifdef CONFIG_BF54x |
123 | UART_SET_IER(uart, ETBEI); | 106 | UART_SET_IER(uart, ETBEI); |
@@ -209,34 +192,27 @@ int kgdb_get_debug_char(void) | |||
209 | } | 192 | } |
210 | #endif | 193 | #endif |
211 | 194 | ||
212 | #ifdef CONFIG_SERIAL_BFIN_PIO | 195 | #if ANOMALY_05000230 && defined(CONFIG_SERIAL_BFIN_PIO) |
213 | static void local_put_char(struct bfin_serial_port *uart, char ch) | 196 | # define UART_GET_ANOMALY_THRESHOLD(uart) ((uart)->anomaly_threshold) |
214 | { | 197 | # define UART_SET_ANOMALY_THRESHOLD(uart, v) ((uart)->anomaly_threshold = (v)) |
215 | unsigned short status; | 198 | #else |
216 | int flags = 0; | 199 | # define UART_GET_ANOMALY_THRESHOLD(uart) 0 |
217 | 200 | # define UART_SET_ANOMALY_THRESHOLD(uart, v) | |
218 | spin_lock_irqsave(&uart->port.lock, flags); | 201 | #endif |
219 | |||
220 | do { | ||
221 | status = UART_GET_LSR(uart); | ||
222 | } while (!(status & THRE)); | ||
223 | |||
224 | UART_PUT_CHAR(uart, ch); | ||
225 | SSYNC(); | ||
226 | |||
227 | spin_unlock_irqrestore(&uart->port.lock, flags); | ||
228 | } | ||
229 | 202 | ||
203 | #ifdef CONFIG_SERIAL_BFIN_PIO | ||
230 | static void bfin_serial_rx_chars(struct bfin_serial_port *uart) | 204 | static void bfin_serial_rx_chars(struct bfin_serial_port *uart) |
231 | { | 205 | { |
232 | struct tty_struct *tty = uart->port.info->tty; | 206 | struct tty_struct *tty = uart->port.info->tty; |
233 | unsigned int status, ch, flg; | 207 | unsigned int status, ch, flg; |
234 | static int in_break = 0; | 208 | static struct timeval anomaly_start = { .tv_sec = 0 }; |
235 | #ifdef CONFIG_KGDB_UART | 209 | #ifdef CONFIG_KGDB_UART |
236 | struct pt_regs *regs = get_irq_regs(); | 210 | struct pt_regs *regs = get_irq_regs(); |
237 | #endif | 211 | #endif |
238 | 212 | ||
239 | status = UART_GET_LSR(uart); | 213 | status = UART_GET_LSR(uart); |
214 | UART_CLEAR_LSR(uart); | ||
215 | |||
240 | ch = UART_GET_CHAR(uart); | 216 | ch = UART_GET_CHAR(uart); |
241 | uart->port.icount.rx++; | 217 | uart->port.icount.rx++; |
242 | 218 | ||
@@ -262,28 +238,56 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart) | |||
262 | #endif | 238 | #endif |
263 | 239 | ||
264 | if (ANOMALY_05000230) { | 240 | if (ANOMALY_05000230) { |
265 | /* The BF533 family of processors have a nice misbehavior where | 241 | /* The BF533 (and BF561) family of processors have a nice anomaly |
266 | * they continuously generate characters for a "single" break. | 242 | * where they continuously generate characters for a "single" break. |
267 | * We have to basically ignore this flood until the "next" valid | 243 | * We have to basically ignore this flood until the "next" valid |
268 | * character comes across. All other Blackfin families operate | 244 | * character comes across. Due to the nature of the flood, it is |
269 | * properly though. | 245 | * not possible to reliably catch bytes that are sent too quickly |
246 | * after this break. So application code talking to the Blackfin | ||
247 | * which sends a break signal must allow at least 1.5 character | ||
248 | * times after the end of the break for things to stabilize. This | ||
249 | * timeout was picked as it must absolutely be larger than 1 | ||
250 | * character time +/- some percent. So 1.5 sounds good. All other | ||
251 | * Blackfin families operate properly. Woo. | ||
270 | * Note: While Anomaly 05000230 does not directly address this, | 252 | * Note: While Anomaly 05000230 does not directly address this, |
271 | * the changes that went in for it also fixed this issue. | 253 | * the changes that went in for it also fixed this issue. |
254 | * That anomaly was fixed in 0.5+ silicon. I like bunnies. | ||
272 | */ | 255 | */ |
273 | if (in_break) { | 256 | if (anomaly_start.tv_sec) { |
274 | if (ch != 0) { | 257 | struct timeval curr; |
275 | in_break = 0; | 258 | suseconds_t usecs; |
276 | ch = UART_GET_CHAR(uart); | 259 | |
277 | if (bfin_revid() < 5) | 260 | if ((~ch & (~ch + 1)) & 0xff) |
278 | return; | 261 | goto known_good_char; |
279 | } else | 262 | |
280 | return; | 263 | do_gettimeofday(&curr); |
264 | if (curr.tv_sec - anomaly_start.tv_sec > 1) | ||
265 | goto known_good_char; | ||
266 | |||
267 | usecs = 0; | ||
268 | if (curr.tv_sec != anomaly_start.tv_sec) | ||
269 | usecs += USEC_PER_SEC; | ||
270 | usecs += curr.tv_usec - anomaly_start.tv_usec; | ||
271 | |||
272 | if (usecs > UART_GET_ANOMALY_THRESHOLD(uart)) | ||
273 | goto known_good_char; | ||
274 | |||
275 | if (ch) | ||
276 | anomaly_start.tv_sec = 0; | ||
277 | else | ||
278 | anomaly_start = curr; | ||
279 | |||
280 | return; | ||
281 | |||
282 | known_good_char: | ||
283 | anomaly_start.tv_sec = 0; | ||
281 | } | 284 | } |
282 | } | 285 | } |
283 | 286 | ||
284 | if (status & BI) { | 287 | if (status & BI) { |
285 | if (ANOMALY_05000230) | 288 | if (ANOMALY_05000230) |
286 | in_break = 1; | 289 | if (bfin_revid() < 5) |
290 | do_gettimeofday(&anomaly_start); | ||
287 | uart->port.icount.brk++; | 291 | uart->port.icount.brk++; |
288 | if (uart_handle_break(&uart->port)) | 292 | if (uart_handle_break(&uart->port)) |
289 | goto ignore_char; | 293 | goto ignore_char; |
@@ -324,7 +328,6 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart) | |||
324 | UART_PUT_CHAR(uart, uart->port.x_char); | 328 | UART_PUT_CHAR(uart, uart->port.x_char); |
325 | uart->port.icount.tx++; | 329 | uart->port.icount.tx++; |
326 | uart->port.x_char = 0; | 330 | uart->port.x_char = 0; |
327 | return; | ||
328 | } | 331 | } |
329 | /* | 332 | /* |
330 | * Check the modem control lines before | 333 | * Check the modem control lines before |
@@ -337,9 +340,12 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart) | |||
337 | return; | 340 | return; |
338 | } | 341 | } |
339 | 342 | ||
340 | local_put_char(uart, xmit->buf[xmit->tail]); | 343 | while ((UART_GET_LSR(uart) & THRE) && xmit->tail != xmit->head) { |
341 | xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); | 344 | UART_PUT_CHAR(uart, xmit->buf[xmit->tail]); |
342 | uart->port.icount.tx++; | 345 | xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); |
346 | uart->port.icount.tx++; | ||
347 | SSYNC(); | ||
348 | } | ||
343 | 349 | ||
344 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | 350 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) |
345 | uart_write_wakeup(&uart->port); | 351 | uart_write_wakeup(&uart->port); |
@@ -352,21 +358,11 @@ static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id) | |||
352 | { | 358 | { |
353 | struct bfin_serial_port *uart = dev_id; | 359 | struct bfin_serial_port *uart = dev_id; |
354 | 360 | ||
355 | #ifdef CONFIG_BF54x | ||
356 | unsigned short status; | ||
357 | spin_lock(&uart->port.lock); | ||
358 | status = UART_GET_LSR(uart); | ||
359 | while ((UART_GET_IER(uart) & ERBFI) && (status & DR)) { | ||
360 | bfin_serial_rx_chars(uart); | ||
361 | status = UART_GET_LSR(uart); | ||
362 | } | ||
363 | spin_unlock(&uart->port.lock); | ||
364 | #else | ||
365 | spin_lock(&uart->port.lock); | 361 | spin_lock(&uart->port.lock); |
366 | while ((UART_GET_IIR(uart) & IIR_STATUS) == IIR_RX_READY) | 362 | while (UART_GET_LSR(uart) & DR) |
367 | bfin_serial_rx_chars(uart); | 363 | bfin_serial_rx_chars(uart); |
368 | spin_unlock(&uart->port.lock); | 364 | spin_unlock(&uart->port.lock); |
369 | #endif | 365 | |
370 | return IRQ_HANDLED; | 366 | return IRQ_HANDLED; |
371 | } | 367 | } |
372 | 368 | ||
@@ -374,25 +370,16 @@ static irqreturn_t bfin_serial_tx_int(int irq, void *dev_id) | |||
374 | { | 370 | { |
375 | struct bfin_serial_port *uart = dev_id; | 371 | struct bfin_serial_port *uart = dev_id; |
376 | 372 | ||
377 | #ifdef CONFIG_BF54x | ||
378 | unsigned short status; | ||
379 | spin_lock(&uart->port.lock); | 373 | spin_lock(&uart->port.lock); |
380 | status = UART_GET_LSR(uart); | 374 | if (UART_GET_LSR(uart) & THRE) |
381 | while ((UART_GET_IER(uart) & ETBEI) && (status & THRE)) { | ||
382 | bfin_serial_tx_chars(uart); | 375 | bfin_serial_tx_chars(uart); |
383 | status = UART_GET_LSR(uart); | ||
384 | } | ||
385 | spin_unlock(&uart->port.lock); | 376 | spin_unlock(&uart->port.lock); |
386 | #else | 377 | |
387 | spin_lock(&uart->port.lock); | ||
388 | while ((UART_GET_IIR(uart) & IIR_STATUS) == IIR_TX_READY) | ||
389 | bfin_serial_tx_chars(uart); | ||
390 | spin_unlock(&uart->port.lock); | ||
391 | #endif | ||
392 | return IRQ_HANDLED; | 378 | return IRQ_HANDLED; |
393 | } | 379 | } |
380 | #endif | ||
394 | 381 | ||
395 | 382 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS | |
396 | static void bfin_serial_do_work(struct work_struct *work) | 383 | static void bfin_serial_do_work(struct work_struct *work) |
397 | { | 384 | { |
398 | struct bfin_serial_port *uart = container_of(work, struct bfin_serial_port, cts_workqueue); | 385 | struct bfin_serial_port *uart = container_of(work, struct bfin_serial_port, cts_workqueue); |
@@ -406,33 +393,27 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart) | |||
406 | { | 393 | { |
407 | struct circ_buf *xmit = &uart->port.info->xmit; | 394 | struct circ_buf *xmit = &uart->port.info->xmit; |
408 | unsigned short ier; | 395 | unsigned short ier; |
409 | int flags = 0; | ||
410 | |||
411 | if (!uart->tx_done) | ||
412 | return; | ||
413 | 396 | ||
414 | uart->tx_done = 0; | 397 | uart->tx_done = 0; |
415 | 398 | ||
399 | if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) { | ||
400 | uart->tx_count = 0; | ||
401 | uart->tx_done = 1; | ||
402 | return; | ||
403 | } | ||
404 | |||
416 | if (uart->port.x_char) { | 405 | if (uart->port.x_char) { |
417 | UART_PUT_CHAR(uart, uart->port.x_char); | 406 | UART_PUT_CHAR(uart, uart->port.x_char); |
418 | uart->port.icount.tx++; | 407 | uart->port.icount.tx++; |
419 | uart->port.x_char = 0; | 408 | uart->port.x_char = 0; |
420 | uart->tx_done = 1; | ||
421 | return; | ||
422 | } | 409 | } |
410 | |||
423 | /* | 411 | /* |
424 | * Check the modem control lines before | 412 | * Check the modem control lines before |
425 | * transmitting anything. | 413 | * transmitting anything. |
426 | */ | 414 | */ |
427 | bfin_serial_mctrl_check(uart); | 415 | bfin_serial_mctrl_check(uart); |
428 | 416 | ||
429 | if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) { | ||
430 | bfin_serial_stop_tx(&uart->port); | ||
431 | uart->tx_done = 1; | ||
432 | return; | ||
433 | } | ||
434 | |||
435 | spin_lock_irqsave(&uart->port.lock, flags); | ||
436 | uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE); | 417 | uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE); |
437 | if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail)) | 418 | if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail)) |
438 | uart->tx_count = UART_XMIT_SIZE - xmit->tail; | 419 | uart->tx_count = UART_XMIT_SIZE - xmit->tail; |
@@ -448,6 +429,7 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart) | |||
448 | set_dma_x_count(uart->tx_dma_channel, uart->tx_count); | 429 | set_dma_x_count(uart->tx_dma_channel, uart->tx_count); |
449 | set_dma_x_modify(uart->tx_dma_channel, 1); | 430 | set_dma_x_modify(uart->tx_dma_channel, 1); |
450 | enable_dma(uart->tx_dma_channel); | 431 | enable_dma(uart->tx_dma_channel); |
432 | |||
451 | #ifdef CONFIG_BF54x | 433 | #ifdef CONFIG_BF54x |
452 | UART_SET_IER(uart, ETBEI); | 434 | UART_SET_IER(uart, ETBEI); |
453 | #else | 435 | #else |
@@ -455,7 +437,6 @@ static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart) | |||
455 | ier |= ETBEI; | 437 | ier |= ETBEI; |
456 | UART_PUT_IER(uart, ier); | 438 | UART_PUT_IER(uart, ier); |
457 | #endif | 439 | #endif |
458 | spin_unlock_irqrestore(&uart->port.lock, flags); | ||
459 | } | 440 | } |
460 | 441 | ||
461 | static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) | 442 | static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) |
@@ -464,7 +445,11 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) | |||
464 | int i, flg, status; | 445 | int i, flg, status; |
465 | 446 | ||
466 | status = UART_GET_LSR(uart); | 447 | status = UART_GET_LSR(uart); |
467 | uart->port.icount.rx += CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail, UART_XMIT_SIZE);; | 448 | UART_CLEAR_LSR(uart); |
449 | |||
450 | uart->port.icount.rx += | ||
451 | CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail, | ||
452 | UART_XMIT_SIZE); | ||
468 | 453 | ||
469 | if (status & BI) { | 454 | if (status & BI) { |
470 | uart->port.icount.brk++; | 455 | uart->port.icount.brk++; |
@@ -490,10 +475,12 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) | |||
490 | else | 475 | else |
491 | flg = TTY_NORMAL; | 476 | flg = TTY_NORMAL; |
492 | 477 | ||
493 | for (i = uart->rx_dma_buf.head; i < uart->rx_dma_buf.tail; i++) { | 478 | for (i = uart->rx_dma_buf.tail; i != uart->rx_dma_buf.head; i++) { |
494 | if (uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i])) | 479 | if (i >= UART_XMIT_SIZE) |
495 | goto dma_ignore_char; | 480 | i = 0; |
496 | uart_insert_char(&uart->port, status, OE, uart->rx_dma_buf.buf[i], flg); | 481 | if (!uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i])) |
482 | uart_insert_char(&uart->port, status, OE, | ||
483 | uart->rx_dma_buf.buf[i], flg); | ||
497 | } | 484 | } |
498 | 485 | ||
499 | dma_ignore_char: | 486 | dma_ignore_char: |
@@ -503,23 +490,23 @@ static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) | |||
503 | void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart) | 490 | void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart) |
504 | { | 491 | { |
505 | int x_pos, pos; | 492 | int x_pos, pos; |
506 | int flags = 0; | ||
507 | |||
508 | bfin_serial_dma_tx_chars(uart); | ||
509 | 493 | ||
510 | spin_lock_irqsave(&uart->port.lock, flags); | 494 | uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel); |
511 | x_pos = DMA_RX_XCOUNT - get_dma_curr_xcount(uart->rx_dma_channel); | 495 | x_pos = get_dma_curr_xcount(uart->rx_dma_channel); |
496 | uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows; | ||
497 | if (uart->rx_dma_nrows == DMA_RX_YCOUNT) | ||
498 | uart->rx_dma_nrows = 0; | ||
499 | x_pos = DMA_RX_XCOUNT - x_pos; | ||
512 | if (x_pos == DMA_RX_XCOUNT) | 500 | if (x_pos == DMA_RX_XCOUNT) |
513 | x_pos = 0; | 501 | x_pos = 0; |
514 | 502 | ||
515 | pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos; | 503 | pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos; |
516 | 504 | if (pos != uart->rx_dma_buf.tail) { | |
517 | if (pos>uart->rx_dma_buf.tail) { | 505 | uart->rx_dma_buf.head = pos; |
518 | uart->rx_dma_buf.tail = pos; | ||
519 | bfin_serial_dma_rx_chars(uart); | 506 | bfin_serial_dma_rx_chars(uart); |
520 | uart->rx_dma_buf.head = uart->rx_dma_buf.tail; | 507 | uart->rx_dma_buf.tail = uart->rx_dma_buf.head; |
521 | } | 508 | } |
522 | spin_unlock_irqrestore(&uart->port.lock, flags); | 509 | |
523 | uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES; | 510 | uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES; |
524 | add_timer(&(uart->rx_dma_timer)); | 511 | add_timer(&(uart->rx_dma_timer)); |
525 | } | 512 | } |
@@ -532,8 +519,8 @@ static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id) | |||
532 | 519 | ||
533 | spin_lock(&uart->port.lock); | 520 | spin_lock(&uart->port.lock); |
534 | if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) { | 521 | if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) { |
535 | clear_dma_irqstat(uart->tx_dma_channel); | ||
536 | disable_dma(uart->tx_dma_channel); | 522 | disable_dma(uart->tx_dma_channel); |
523 | clear_dma_irqstat(uart->tx_dma_channel); | ||
537 | #ifdef CONFIG_BF54x | 524 | #ifdef CONFIG_BF54x |
538 | UART_CLEAR_IER(uart, ETBEI); | 525 | UART_CLEAR_IER(uart, ETBEI); |
539 | #else | 526 | #else |
@@ -541,15 +528,13 @@ static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id) | |||
541 | ier &= ~ETBEI; | 528 | ier &= ~ETBEI; |
542 | UART_PUT_IER(uart, ier); | 529 | UART_PUT_IER(uart, ier); |
543 | #endif | 530 | #endif |
544 | xmit->tail = (xmit->tail+uart->tx_count) &(UART_XMIT_SIZE -1); | 531 | xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1); |
545 | uart->port.icount.tx+=uart->tx_count; | 532 | uart->port.icount.tx += uart->tx_count; |
546 | 533 | ||
547 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | 534 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) |
548 | uart_write_wakeup(&uart->port); | 535 | uart_write_wakeup(&uart->port); |
549 | 536 | ||
550 | if (uart_circ_empty(xmit)) | 537 | bfin_serial_dma_tx_chars(uart); |
551 | bfin_serial_stop_tx(&uart->port); | ||
552 | uart->tx_done = 1; | ||
553 | } | 538 | } |
554 | 539 | ||
555 | spin_unlock(&uart->port.lock); | 540 | spin_unlock(&uart->port.lock); |
@@ -561,18 +546,15 @@ static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id) | |||
561 | struct bfin_serial_port *uart = dev_id; | 546 | struct bfin_serial_port *uart = dev_id; |
562 | unsigned short irqstat; | 547 | unsigned short irqstat; |
563 | 548 | ||
564 | uart->rx_dma_nrows++; | ||
565 | if (uart->rx_dma_nrows == DMA_RX_YCOUNT) { | ||
566 | uart->rx_dma_nrows = 0; | ||
567 | uart->rx_dma_buf.tail = DMA_RX_XCOUNT*DMA_RX_YCOUNT; | ||
568 | bfin_serial_dma_rx_chars(uart); | ||
569 | uart->rx_dma_buf.head = uart->rx_dma_buf.tail = 0; | ||
570 | } | ||
571 | spin_lock(&uart->port.lock); | 549 | spin_lock(&uart->port.lock); |
572 | irqstat = get_dma_curr_irqstat(uart->rx_dma_channel); | 550 | irqstat = get_dma_curr_irqstat(uart->rx_dma_channel); |
573 | clear_dma_irqstat(uart->rx_dma_channel); | 551 | clear_dma_irqstat(uart->rx_dma_channel); |
574 | |||
575 | spin_unlock(&uart->port.lock); | 552 | spin_unlock(&uart->port.lock); |
553 | |||
554 | del_timer(&(uart->rx_dma_timer)); | ||
555 | uart->rx_dma_timer.expires = jiffies; | ||
556 | add_timer(&(uart->rx_dma_timer)); | ||
557 | |||
576 | return IRQ_HANDLED; | 558 | return IRQ_HANDLED; |
577 | } | 559 | } |
578 | #endif | 560 | #endif |
@@ -599,7 +581,11 @@ static unsigned int bfin_serial_get_mctrl(struct uart_port *port) | |||
599 | if (uart->cts_pin < 0) | 581 | if (uart->cts_pin < 0) |
600 | return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; | 582 | return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; |
601 | 583 | ||
584 | # ifdef BF54x | ||
585 | if (UART_GET_MSR(uart) & CTS) | ||
586 | # else | ||
602 | if (gpio_get_value(uart->cts_pin)) | 587 | if (gpio_get_value(uart->cts_pin)) |
588 | # endif | ||
603 | return TIOCM_DSR | TIOCM_CAR; | 589 | return TIOCM_DSR | TIOCM_CAR; |
604 | else | 590 | else |
605 | #endif | 591 | #endif |
@@ -614,9 +600,17 @@ static void bfin_serial_set_mctrl(struct uart_port *port, unsigned int mctrl) | |||
614 | return; | 600 | return; |
615 | 601 | ||
616 | if (mctrl & TIOCM_RTS) | 602 | if (mctrl & TIOCM_RTS) |
603 | # ifdef BF54x | ||
604 | UART_PUT_MCR(uart, UART_GET_MCR(uart) & ~MRTS); | ||
605 | # else | ||
617 | gpio_set_value(uart->rts_pin, 0); | 606 | gpio_set_value(uart->rts_pin, 0); |
607 | # endif | ||
618 | else | 608 | else |
609 | # ifdef BF54x | ||
610 | UART_PUT_MCR(uart, UART_GET_MCR(uart) | MRTS); | ||
611 | # else | ||
619 | gpio_set_value(uart->rts_pin, 1); | 612 | gpio_set_value(uart->rts_pin, 1); |
613 | # endif | ||
620 | #endif | 614 | #endif |
621 | } | 615 | } |
622 | 616 | ||
@@ -627,22 +621,17 @@ static void bfin_serial_mctrl_check(struct bfin_serial_port *uart) | |||
627 | { | 621 | { |
628 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS | 622 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS |
629 | unsigned int status; | 623 | unsigned int status; |
630 | # ifdef CONFIG_SERIAL_BFIN_DMA | ||
631 | struct uart_info *info = uart->port.info; | 624 | struct uart_info *info = uart->port.info; |
632 | struct tty_struct *tty = info->tty; | 625 | struct tty_struct *tty = info->tty; |
633 | 626 | ||
634 | status = bfin_serial_get_mctrl(&uart->port); | 627 | status = bfin_serial_get_mctrl(&uart->port); |
628 | uart_handle_cts_change(&uart->port, status & TIOCM_CTS); | ||
635 | if (!(status & TIOCM_CTS)) { | 629 | if (!(status & TIOCM_CTS)) { |
636 | tty->hw_stopped = 1; | 630 | tty->hw_stopped = 1; |
631 | schedule_work(&uart->cts_workqueue); | ||
637 | } else { | 632 | } else { |
638 | tty->hw_stopped = 0; | 633 | tty->hw_stopped = 0; |
639 | } | 634 | } |
640 | # else | ||
641 | status = bfin_serial_get_mctrl(&uart->port); | ||
642 | uart_handle_cts_change(&uart->port, status & TIOCM_CTS); | ||
643 | if (!(status & TIOCM_CTS)) | ||
644 | schedule_work(&uart->cts_workqueue); | ||
645 | # endif | ||
646 | #endif | 635 | #endif |
647 | } | 636 | } |
648 | 637 | ||
@@ -743,6 +732,7 @@ static void bfin_serial_shutdown(struct uart_port *port) | |||
743 | disable_dma(uart->rx_dma_channel); | 732 | disable_dma(uart->rx_dma_channel); |
744 | free_dma(uart->rx_dma_channel); | 733 | free_dma(uart->rx_dma_channel); |
745 | del_timer(&(uart->rx_dma_timer)); | 734 | del_timer(&(uart->rx_dma_timer)); |
735 | dma_free_coherent(NULL, PAGE_SIZE, uart->rx_dma_buf.buf, 0); | ||
746 | #else | 736 | #else |
747 | #ifdef CONFIG_KGDB_UART | 737 | #ifdef CONFIG_KGDB_UART |
748 | if (uart->port.line != CONFIG_KGDB_UART_PORT) | 738 | if (uart->port.line != CONFIG_KGDB_UART_PORT) |
@@ -814,6 +804,8 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios, | |||
814 | quot = uart_get_divisor(port, baud); | 804 | quot = uart_get_divisor(port, baud); |
815 | spin_lock_irqsave(&uart->port.lock, flags); | 805 | spin_lock_irqsave(&uart->port.lock, flags); |
816 | 806 | ||
807 | UART_SET_ANOMALY_THRESHOLD(uart, USEC_PER_SEC / baud * 15); | ||
808 | |||
817 | do { | 809 | do { |
818 | lsr = UART_GET_LSR(uart); | 810 | lsr = UART_GET_LSR(uart); |
819 | } while (!(lsr & TEMT)); | 811 | } while (!(lsr & TEMT)); |
@@ -956,10 +948,9 @@ static void __init bfin_serial_init_ports(void) | |||
956 | bfin_serial_ports[i].rx_dma_channel = | 948 | bfin_serial_ports[i].rx_dma_channel = |
957 | bfin_serial_resource[i].uart_rx_dma_channel; | 949 | bfin_serial_resource[i].uart_rx_dma_channel; |
958 | init_timer(&(bfin_serial_ports[i].rx_dma_timer)); | 950 | init_timer(&(bfin_serial_ports[i].rx_dma_timer)); |
959 | #else | ||
960 | INIT_WORK(&bfin_serial_ports[i].cts_workqueue, bfin_serial_do_work); | ||
961 | #endif | 951 | #endif |
962 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS | 952 | #ifdef CONFIG_SERIAL_BFIN_CTSRTS |
953 | INIT_WORK(&bfin_serial_ports[i].cts_workqueue, bfin_serial_do_work); | ||
963 | bfin_serial_ports[i].cts_pin = | 954 | bfin_serial_ports[i].cts_pin = |
964 | bfin_serial_resource[i].uart_cts_pin; | 955 | bfin_serial_resource[i].uart_cts_pin; |
965 | bfin_serial_ports[i].rts_pin = | 956 | bfin_serial_ports[i].rts_pin = |
diff --git a/drivers/serial/m32r_sio.c b/drivers/serial/m32r_sio.c index 348ee2c19b58..c2bb11c02bde 100644 --- a/drivers/serial/m32r_sio.c +++ b/drivers/serial/m32r_sio.c | |||
@@ -421,7 +421,7 @@ static void transmit_chars(struct uart_sio_port *up) | |||
421 | up->port.icount.tx++; | 421 | up->port.icount.tx++; |
422 | if (uart_circ_empty(xmit)) | 422 | if (uart_circ_empty(xmit)) |
423 | break; | 423 | break; |
424 | while (!serial_in(up, UART_LSR) & UART_LSR_THRE); | 424 | while (!(serial_in(up, UART_LSR) & UART_LSR_THRE)); |
425 | 425 | ||
426 | } while (--count > 0); | 426 | } while (--count > 0); |
427 | 427 | ||
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index 9ce12cb2cebc..a8c116b80bff 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/delay.h> | 41 | #include <linux/delay.h> |
42 | #include <linux/console.h> | 42 | #include <linux/console.h> |
43 | #include <linux/platform_device.h> | 43 | #include <linux/platform_device.h> |
44 | #include <linux/serial_sci.h> | ||
44 | 45 | ||
45 | #ifdef CONFIG_CPU_FREQ | 46 | #ifdef CONFIG_CPU_FREQ |
46 | #include <linux/notifier.h> | 47 | #include <linux/notifier.h> |
@@ -54,7 +55,6 @@ | |||
54 | #include <asm/kgdb.h> | 55 | #include <asm/kgdb.h> |
55 | #endif | 56 | #endif |
56 | 57 | ||
57 | #include <asm/sci.h> | ||
58 | #include "sh-sci.h" | 58 | #include "sh-sci.h" |
59 | 59 | ||
60 | struct sci_port { | 60 | struct sci_port { |
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c index 9cfcfd8dad5e..617efb1640b1 100644 --- a/drivers/sh/maple/maple.c +++ b/drivers/sh/maple/maple.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Core maple bus functionality | 2 | * Core maple bus functionality |
3 | * | 3 | * |
4 | * Copyright (C) 2007 Adrian McMenamin | 4 | * Copyright (C) 2007, 2008 Adrian McMenamin |
5 | * | 5 | * |
6 | * Based on 2.4 code by: | 6 | * Based on 2.4 code by: |
7 | * | 7 | * |
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
21 | #include <linux/module.h> | ||
22 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
23 | #include <linux/list.h> | 22 | #include <linux/list.h> |
24 | #include <linux/io.h> | 23 | #include <linux/io.h> |
@@ -54,7 +53,7 @@ static struct device maple_bus; | |||
54 | static int subdevice_map[MAPLE_PORTS]; | 53 | static int subdevice_map[MAPLE_PORTS]; |
55 | static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr; | 54 | static unsigned long *maple_sendbuf, *maple_sendptr, *maple_lastptr; |
56 | static unsigned long maple_pnp_time; | 55 | static unsigned long maple_pnp_time; |
57 | static int started, scanning, liststatus, realscan; | 56 | static int started, scanning, liststatus, fullscan; |
58 | static struct kmem_cache *maple_queue_cache; | 57 | static struct kmem_cache *maple_queue_cache; |
59 | 58 | ||
60 | struct maple_device_specify { | 59 | struct maple_device_specify { |
@@ -62,6 +61,9 @@ struct maple_device_specify { | |||
62 | int unit; | 61 | int unit; |
63 | }; | 62 | }; |
64 | 63 | ||
64 | static bool checked[4]; | ||
65 | static struct maple_device *baseunits[4]; | ||
66 | |||
65 | /** | 67 | /** |
66 | * maple_driver_register - register a device driver | 68 | * maple_driver_register - register a device driver |
67 | * automatically makes the driver bus a maple bus | 69 | * automatically makes the driver bus a maple bus |
@@ -309,11 +311,9 @@ static void maple_attach_driver(struct maple_device *mdev) | |||
309 | else | 311 | else |
310 | break; | 312 | break; |
311 | 313 | ||
312 | if (realscan) { | 314 | printk(KERN_INFO "Maple device detected: %s\n", |
313 | printk(KERN_INFO "Maple device detected: %s\n", | 315 | mdev->product_name); |
314 | mdev->product_name); | 316 | printk(KERN_INFO "Maple device: %s\n", mdev->product_licence); |
315 | printk(KERN_INFO "Maple device: %s\n", mdev->product_licence); | ||
316 | } | ||
317 | 317 | ||
318 | function = be32_to_cpu(mdev->devinfo.function); | 318 | function = be32_to_cpu(mdev->devinfo.function); |
319 | 319 | ||
@@ -323,10 +323,9 @@ static void maple_attach_driver(struct maple_device *mdev) | |||
323 | mdev->driver = &maple_dummy_driver; | 323 | mdev->driver = &maple_dummy_driver; |
324 | sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port); | 324 | sprintf(mdev->dev.bus_id, "%d:0.port", mdev->port); |
325 | } else { | 325 | } else { |
326 | if (realscan) | 326 | printk(KERN_INFO |
327 | printk(KERN_INFO | 327 | "Maple bus at (%d, %d): Function 0x%lX\n", |
328 | "Maple bus at (%d, %d): Function 0x%lX\n", | 328 | mdev->port, mdev->unit, function); |
329 | mdev->port, mdev->unit, function); | ||
330 | 329 | ||
331 | matched = | 330 | matched = |
332 | bus_for_each_drv(&maple_bus_type, NULL, mdev, | 331 | bus_for_each_drv(&maple_bus_type, NULL, mdev, |
@@ -334,9 +333,8 @@ static void maple_attach_driver(struct maple_device *mdev) | |||
334 | 333 | ||
335 | if (matched == 0) { | 334 | if (matched == 0) { |
336 | /* Driver does not exist yet */ | 335 | /* Driver does not exist yet */ |
337 | if (realscan) | 336 | printk(KERN_INFO |
338 | printk(KERN_INFO | 337 | "No maple driver found.\n"); |
339 | "No maple driver found.\n"); | ||
340 | mdev->driver = &maple_dummy_driver; | 338 | mdev->driver = &maple_dummy_driver; |
341 | } | 339 | } |
342 | sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port, | 340 | sprintf(mdev->dev.bus_id, "%d:0%d.%lX", mdev->port, |
@@ -472,9 +470,12 @@ static void maple_response_none(struct maple_device *mdev, | |||
472 | maple_detach_driver(mdev); | 470 | maple_detach_driver(mdev); |
473 | return; | 471 | return; |
474 | } | 472 | } |
475 | if (!started) { | 473 | if (!started || !fullscan) { |
476 | printk(KERN_INFO "No maple devices attached to port %d\n", | 474 | if (checked[mdev->port] == false) { |
477 | mdev->port); | 475 | checked[mdev->port] = true; |
476 | printk(KERN_INFO "No maple devices attached" | ||
477 | " to port %d\n", mdev->port); | ||
478 | } | ||
478 | return; | 479 | return; |
479 | } | 480 | } |
480 | maple_clean_submap(mdev); | 481 | maple_clean_submap(mdev); |
@@ -485,8 +486,14 @@ static void maple_response_devinfo(struct maple_device *mdev, | |||
485 | char *recvbuf) | 486 | char *recvbuf) |
486 | { | 487 | { |
487 | char submask; | 488 | char submask; |
488 | if ((!started) || (scanning == 2)) { | 489 | if (!started || (scanning == 2) || !fullscan) { |
489 | maple_attach_driver(mdev); | 490 | if ((mdev->unit == 0) && (checked[mdev->port] == false)) { |
491 | checked[mdev->port] = true; | ||
492 | maple_attach_driver(mdev); | ||
493 | } else { | ||
494 | if (mdev->unit != 0) | ||
495 | maple_attach_driver(mdev); | ||
496 | } | ||
490 | return; | 497 | return; |
491 | } | 498 | } |
492 | if (mdev->unit == 0) { | 499 | if (mdev->unit == 0) { |
@@ -505,6 +512,7 @@ static void maple_dma_handler(struct work_struct *work) | |||
505 | struct maple_device *dev; | 512 | struct maple_device *dev; |
506 | char *recvbuf; | 513 | char *recvbuf; |
507 | enum maple_code code; | 514 | enum maple_code code; |
515 | int i; | ||
508 | 516 | ||
509 | if (!maple_dma_done()) | 517 | if (!maple_dma_done()) |
510 | return; | 518 | return; |
@@ -557,6 +565,19 @@ static void maple_dma_handler(struct work_struct *work) | |||
557 | } else | 565 | } else |
558 | scanning = 0; | 566 | scanning = 0; |
559 | 567 | ||
568 | if (!fullscan) { | ||
569 | fullscan = 1; | ||
570 | for (i = 0; i < MAPLE_PORTS; i++) { | ||
571 | if (checked[i] == false) { | ||
572 | fullscan = 0; | ||
573 | dev = baseunits[i]; | ||
574 | dev->mq->command = | ||
575 | MAPLE_COMMAND_DEVINFO; | ||
576 | dev->mq->length = 0; | ||
577 | maple_add_packet(dev->mq); | ||
578 | } | ||
579 | } | ||
580 | } | ||
560 | if (started == 0) | 581 | if (started == 0) |
561 | started = 1; | 582 | started = 1; |
562 | } | 583 | } |
@@ -694,7 +715,9 @@ static int __init maple_bus_init(void) | |||
694 | 715 | ||
695 | /* setup maple ports */ | 716 | /* setup maple ports */ |
696 | for (i = 0; i < MAPLE_PORTS; i++) { | 717 | for (i = 0; i < MAPLE_PORTS; i++) { |
718 | checked[i] = false; | ||
697 | mdev[i] = maple_alloc_dev(i, 0); | 719 | mdev[i] = maple_alloc_dev(i, 0); |
720 | baseunits[i] = mdev[i]; | ||
698 | if (!mdev[i]) { | 721 | if (!mdev[i]) { |
699 | while (i-- > 0) | 722 | while (i-- > 0) |
700 | maple_free_dev(mdev[i]); | 723 | maple_free_dev(mdev[i]); |
@@ -703,12 +726,9 @@ static int __init maple_bus_init(void) | |||
703 | mdev[i]->mq->command = MAPLE_COMMAND_DEVINFO; | 726 | mdev[i]->mq->command = MAPLE_COMMAND_DEVINFO; |
704 | mdev[i]->mq->length = 0; | 727 | mdev[i]->mq->length = 0; |
705 | maple_add_packet(mdev[i]->mq); | 728 | maple_add_packet(mdev[i]->mq); |
706 | /* delay aids hardware detection */ | ||
707 | mdelay(5); | ||
708 | subdevice_map[i] = 0; | 729 | subdevice_map[i] = 0; |
709 | } | 730 | } |
710 | 731 | ||
711 | realscan = 1; | ||
712 | /* setup maplebus hardware */ | 732 | /* setup maplebus hardware */ |
713 | maplebus_dma_reset(); | 733 | maplebus_dma_reset(); |
714 | /* initial detection */ | 734 | /* initial detection */ |
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c index 253ed5682a6d..a86315a0c5b8 100644 --- a/drivers/spi/mpc52xx_psc_spi.c +++ b/drivers/spi/mpc52xx_psc_spi.c | |||
@@ -42,6 +42,7 @@ struct mpc52xx_psc_spi { | |||
42 | 42 | ||
43 | /* driver internal data */ | 43 | /* driver internal data */ |
44 | struct mpc52xx_psc __iomem *psc; | 44 | struct mpc52xx_psc __iomem *psc; |
45 | struct mpc52xx_psc_fifo __iomem *fifo; | ||
45 | unsigned int irq; | 46 | unsigned int irq; |
46 | u8 bits_per_word; | 47 | u8 bits_per_word; |
47 | u8 busy; | 48 | u8 busy; |
@@ -139,6 +140,7 @@ static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi, | |||
139 | { | 140 | { |
140 | struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); | 141 | struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); |
141 | struct mpc52xx_psc __iomem *psc = mps->psc; | 142 | struct mpc52xx_psc __iomem *psc = mps->psc; |
143 | struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo; | ||
142 | unsigned rb = 0; /* number of bytes receieved */ | 144 | unsigned rb = 0; /* number of bytes receieved */ |
143 | unsigned sb = 0; /* number of bytes sent */ | 145 | unsigned sb = 0; /* number of bytes sent */ |
144 | unsigned char *rx_buf = (unsigned char *)t->rx_buf; | 146 | unsigned char *rx_buf = (unsigned char *)t->rx_buf; |
@@ -190,11 +192,11 @@ static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi, | |||
190 | out_8(&psc->mode, 0); | 192 | out_8(&psc->mode, 0); |
191 | } else { | 193 | } else { |
192 | out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL); | 194 | out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL); |
193 | out_be16(&psc->rfalarm, rfalarm); | 195 | out_be16(&fifo->rfalarm, rfalarm); |
194 | } | 196 | } |
195 | out_be16(&psc->mpc52xx_psc_imr, MPC52xx_PSC_IMR_RXRDY); | 197 | out_be16(&psc->mpc52xx_psc_imr, MPC52xx_PSC_IMR_RXRDY); |
196 | wait_for_completion(&mps->done); | 198 | wait_for_completion(&mps->done); |
197 | recv_at_once = in_be16(&psc->rfnum); | 199 | recv_at_once = in_be16(&fifo->rfnum); |
198 | dev_dbg(&spi->dev, "%d bytes received\n", recv_at_once); | 200 | dev_dbg(&spi->dev, "%d bytes received\n", recv_at_once); |
199 | 201 | ||
200 | send_at_once = recv_at_once; | 202 | send_at_once = recv_at_once; |
@@ -331,6 +333,7 @@ static void mpc52xx_psc_spi_cleanup(struct spi_device *spi) | |||
331 | static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps) | 333 | static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps) |
332 | { | 334 | { |
333 | struct mpc52xx_psc __iomem *psc = mps->psc; | 335 | struct mpc52xx_psc __iomem *psc = mps->psc; |
336 | struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo; | ||
334 | u32 mclken_div; | 337 | u32 mclken_div; |
335 | int ret = 0; | 338 | int ret = 0; |
336 | 339 | ||
@@ -346,7 +349,7 @@ static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps) | |||
346 | /* Disable interrupts, interrupts are based on alarm level */ | 349 | /* Disable interrupts, interrupts are based on alarm level */ |
347 | out_be16(&psc->mpc52xx_psc_imr, 0); | 350 | out_be16(&psc->mpc52xx_psc_imr, 0); |
348 | out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1); | 351 | out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1); |
349 | out_8(&psc->rfcntl, 0); | 352 | out_8(&fifo->rfcntl, 0); |
350 | out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL); | 353 | out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL); |
351 | 354 | ||
352 | /* Configure 8bit codec mode as a SPI master and use EOF flags */ | 355 | /* Configure 8bit codec mode as a SPI master and use EOF flags */ |
@@ -419,6 +422,8 @@ static int __init mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr, | |||
419 | ret = -EFAULT; | 422 | ret = -EFAULT; |
420 | goto free_master; | 423 | goto free_master; |
421 | } | 424 | } |
425 | /* On the 5200, fifo regs are immediately ajacent to the psc regs */ | ||
426 | mps->fifo = ((void __iomem *)mps->psc) + sizeof(struct mpc52xx_psc); | ||
422 | 427 | ||
423 | ret = request_irq(mps->irq, mpc52xx_psc_spi_isr, 0, "mpc52xx-psc-spi", | 428 | ret = request_irq(mps->irq, mpc52xx_psc_spi_isr, 0, "mpc52xx-psc-spi", |
424 | mps); | 429 | mps); |
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig index 78fd33125e02..adea792fb675 100644 --- a/drivers/ssb/Kconfig +++ b/drivers/ssb/Kconfig | |||
@@ -35,6 +35,11 @@ config SSB_PCIHOST | |||
35 | 35 | ||
36 | If unsure, say Y | 36 | If unsure, say Y |
37 | 37 | ||
38 | config SSB_B43_PCI_BRIDGE | ||
39 | bool | ||
40 | depends on SSB_PCIHOST | ||
41 | default n | ||
42 | |||
38 | config SSB_PCMCIAHOST_POSSIBLE | 43 | config SSB_PCMCIAHOST_POSSIBLE |
39 | bool | 44 | bool |
40 | depends on SSB && (PCMCIA = y || PCMCIA = SSB) && EXPERIMENTAL | 45 | depends on SSB && (PCMCIA = y || PCMCIA = SSB) && EXPERIMENTAL |
diff --git a/drivers/ssb/Makefile b/drivers/ssb/Makefile index e235144add7c..de94c2eb7a37 100644 --- a/drivers/ssb/Makefile +++ b/drivers/ssb/Makefile | |||
@@ -14,6 +14,6 @@ ssb-$(CONFIG_SSB_DRIVER_PCICORE) += driver_pcicore.o | |||
14 | 14 | ||
15 | # b43 pci-ssb-bridge driver | 15 | # b43 pci-ssb-bridge driver |
16 | # Not strictly a part of SSB, but kept here for convenience | 16 | # Not strictly a part of SSB, but kept here for convenience |
17 | ssb-$(CONFIG_SSB_PCIHOST) += b43_pci_bridge.o | 17 | ssb-$(CONFIG_SSB_B43_PCI_BRIDGE) += b43_pci_bridge.o |
18 | 18 | ||
19 | obj-$(CONFIG_SSB) += ssb.o | 19 | obj-$(CONFIG_SSB) += ssb.o |
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c index 6d99a9880055..74b9a8aea52b 100644 --- a/drivers/ssb/driver_pcicore.c +++ b/drivers/ssb/driver_pcicore.c | |||
@@ -111,7 +111,10 @@ static void __init ssb_fixup_pcibridge(struct pci_dev *dev) | |||
111 | 111 | ||
112 | /* Enable PCI bridge bus mastering and memory space */ | 112 | /* Enable PCI bridge bus mastering and memory space */ |
113 | pci_set_master(dev); | 113 | pci_set_master(dev); |
114 | pcibios_enable_device(dev, ~0); | 114 | if (pcibios_enable_device(dev, ~0) < 0) { |
115 | ssb_printk(KERN_ERR "PCI: SSB bridge enable failed\n"); | ||
116 | return; | ||
117 | } | ||
115 | 118 | ||
116 | /* Enable PCI bridge BAR1 prefetch and burst */ | 119 | /* Enable PCI bridge BAR1 prefetch and burst */ |
117 | pci_write_config_dword(dev, SSB_BAR1_CONTROL, 3); | 120 | pci_write_config_dword(dev, SSB_BAR1_CONTROL, 3); |
@@ -393,7 +396,7 @@ static int pcicore_is_in_hostmode(struct ssb_pcicore *pc) | |||
393 | chipid_top != 0x5300) | 396 | chipid_top != 0x5300) |
394 | return 0; | 397 | return 0; |
395 | 398 | ||
396 | if (bus->sprom.r1.boardflags_lo & SSB_PCICORE_BFL_NOPCI) | 399 | if (bus->sprom.boardflags_lo & SSB_PCICORE_BFL_NOPCI) |
397 | return 0; | 400 | return 0; |
398 | 401 | ||
399 | /* The 200-pin BCM4712 package does not bond out PCI. Even when | 402 | /* The 200-pin BCM4712 package does not bond out PCI. Even when |
diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h index a789364264a6..21eca2b5118b 100644 --- a/drivers/ssb/ssb_private.h +++ b/drivers/ssb/ssb_private.h | |||
@@ -120,10 +120,10 @@ extern int ssb_devices_thaw(struct ssb_bus *bus); | |||
120 | extern struct ssb_bus *ssb_pci_dev_to_bus(struct pci_dev *pdev); | 120 | extern struct ssb_bus *ssb_pci_dev_to_bus(struct pci_dev *pdev); |
121 | 121 | ||
122 | /* b43_pci_bridge.c */ | 122 | /* b43_pci_bridge.c */ |
123 | #ifdef CONFIG_SSB_PCIHOST | 123 | #ifdef CONFIG_SSB_B43_PCI_BRIDGE |
124 | extern int __init b43_pci_ssb_bridge_init(void); | 124 | extern int __init b43_pci_ssb_bridge_init(void); |
125 | extern void __exit b43_pci_ssb_bridge_exit(void); | 125 | extern void __exit b43_pci_ssb_bridge_exit(void); |
126 | #else /* CONFIG_SSB_PCIHOST */ | 126 | #else /* CONFIG_SSB_B43_PCI_BRIDGR */ |
127 | static inline int b43_pci_ssb_bridge_init(void) | 127 | static inline int b43_pci_ssb_bridge_init(void) |
128 | { | 128 | { |
129 | return 0; | 129 | return 0; |
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig index 5c33cdb9cac7..a2b0aa48b8ea 100644 --- a/drivers/usb/core/Kconfig +++ b/drivers/usb/core/Kconfig | |||
@@ -87,12 +87,13 @@ config USB_DYNAMIC_MINORS | |||
87 | If you are unsure about this, say N here. | 87 | If you are unsure about this, say N here. |
88 | 88 | ||
89 | config USB_SUSPEND | 89 | config USB_SUSPEND |
90 | bool "USB selective suspend/resume and wakeup (EXPERIMENTAL)" | 90 | bool "USB selective suspend/resume and wakeup" |
91 | depends on USB && PM && EXPERIMENTAL | 91 | depends on USB && PM |
92 | help | 92 | help |
93 | If you say Y here, you can use driver calls or the sysfs | 93 | If you say Y here, you can use driver calls or the sysfs |
94 | "power/state" file to suspend or resume individual USB | 94 | "power/level" file to suspend or resume individual USB |
95 | peripherals. | 95 | peripherals and to enable or disable autosuspend (see |
96 | Documentation/usb/power-management.txt for more details). | ||
96 | 97 | ||
97 | Also, USB "remote wakeup" signaling is supported, whereby some | 98 | Also, USB "remote wakeup" signaling is supported, whereby some |
98 | USB devices (like keyboards and network adapters) can wake up | 99 | USB devices (like keyboards and network adapters) can wake up |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index f90ab5e94c58..d9d1eb19f2a1 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
@@ -28,35 +28,38 @@ | |||
28 | * devices is broken... | 28 | * devices is broken... |
29 | */ | 29 | */ |
30 | static const struct usb_device_id usb_quirk_list[] = { | 30 | static const struct usb_device_id usb_quirk_list[] = { |
31 | /* Action Semiconductor flash disk */ | ||
32 | { USB_DEVICE(0x10d6, 0x2200), .driver_info = USB_QUIRK_STRING_FETCH_255}, | ||
33 | |||
34 | /* CBM - Flash disk */ | 31 | /* CBM - Flash disk */ |
35 | { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME }, | 32 | { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME }, |
33 | |||
36 | /* HP 5300/5370C scanner */ | 34 | /* HP 5300/5370C scanner */ |
37 | { USB_DEVICE(0x03f0, 0x0701), .driver_info = USB_QUIRK_STRING_FETCH_255 }, | 35 | { USB_DEVICE(0x03f0, 0x0701), .driver_info = |
36 | USB_QUIRK_STRING_FETCH_255 }, | ||
38 | 37 | ||
39 | /* Creative SB Audigy 2 NX */ | 38 | /* Creative SB Audigy 2 NX */ |
40 | { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, | 39 | { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, |
41 | 40 | ||
41 | /* Philips PSC805 audio device */ | ||
42 | { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
43 | |||
42 | /* Roland SC-8820 */ | 44 | /* Roland SC-8820 */ |
43 | { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, | 45 | { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, |
44 | 46 | ||
45 | /* Edirol SD-20 */ | 47 | /* Edirol SD-20 */ |
46 | { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME }, | 48 | { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME }, |
47 | 49 | ||
48 | /* INTEL VALUE SSD */ | ||
49 | { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
50 | |||
51 | /* M-Systems Flash Disk Pioneers */ | 50 | /* M-Systems Flash Disk Pioneers */ |
52 | { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, | 51 | { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, |
53 | 52 | ||
54 | /* Philips PSC805 audio device */ | 53 | /* Action Semiconductor flash disk */ |
55 | { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME }, | 54 | { USB_DEVICE(0x10d6, 0x2200), .driver_info = |
55 | USB_QUIRK_STRING_FETCH_255 }, | ||
56 | 56 | ||
57 | /* SKYMEDI USB_DRIVE */ | 57 | /* SKYMEDI USB_DRIVE */ |
58 | { USB_DEVICE(0x1516, 0x8628), .driver_info = USB_QUIRK_RESET_RESUME }, | 58 | { USB_DEVICE(0x1516, 0x8628), .driver_info = USB_QUIRK_RESET_RESUME }, |
59 | 59 | ||
60 | /* INTEL VALUE SSD */ | ||
61 | { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
62 | |||
60 | { } /* terminating entry must be last */ | 63 | { } /* terminating entry must be last */ |
61 | }; | 64 | }; |
62 | 65 | ||
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 4e984060c984..1f0db51190cc 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c | |||
@@ -99,8 +99,7 @@ struct usb_interface *usb_ifnum_to_if(const struct usb_device *dev, | |||
99 | EXPORT_SYMBOL_GPL(usb_ifnum_to_if); | 99 | EXPORT_SYMBOL_GPL(usb_ifnum_to_if); |
100 | 100 | ||
101 | /** | 101 | /** |
102 | * usb_altnum_to_altsetting - get the altsetting structure with a given | 102 | * usb_altnum_to_altsetting - get the altsetting structure with a given alternate setting number. |
103 | * alternate setting number. | ||
104 | * @intf: the interface containing the altsetting in question | 103 | * @intf: the interface containing the altsetting in question |
105 | * @altnum: the desired alternate setting number | 104 | * @altnum: the desired alternate setting number |
106 | * | 105 | * |
@@ -234,7 +233,7 @@ static int ksuspend_usb_init(void) | |||
234 | * singlethreaded. Its job doesn't justify running on more | 233 | * singlethreaded. Its job doesn't justify running on more |
235 | * than one CPU. | 234 | * than one CPU. |
236 | */ | 235 | */ |
237 | ksuspend_usb_wq = create_singlethread_workqueue("ksuspend_usbd"); | 236 | ksuspend_usb_wq = create_freezeable_workqueue("ksuspend_usbd"); |
238 | if (!ksuspend_usb_wq) | 237 | if (!ksuspend_usb_wq) |
239 | return -ENOMEM; | 238 | return -ENOMEM; |
240 | return 0; | 239 | return 0; |
@@ -442,8 +441,7 @@ EXPORT_SYMBOL_GPL(usb_put_intf); | |||
442 | */ | 441 | */ |
443 | 442 | ||
444 | /** | 443 | /** |
445 | * usb_lock_device_for_reset - cautiously acquire the lock for a | 444 | * usb_lock_device_for_reset - cautiously acquire the lock for a usb device structure |
446 | * usb device structure | ||
447 | * @udev: device that's being locked | 445 | * @udev: device that's being locked |
448 | * @iface: interface bound to the driver making the request (optional) | 446 | * @iface: interface bound to the driver making the request (optional) |
449 | * | 447 | * |
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c index 4f6bfa100f2a..2c32bd08ee7d 100644 --- a/drivers/usb/gadget/printer.c +++ b/drivers/usb/gadget/printer.c | |||
@@ -92,7 +92,6 @@ struct printer_dev { | |||
92 | u8 *current_rx_buf; | 92 | u8 *current_rx_buf; |
93 | u8 printer_status; | 93 | u8 printer_status; |
94 | u8 reset_printer; | 94 | u8 reset_printer; |
95 | struct class_device *printer_class_dev; | ||
96 | struct cdev printer_cdev; | 95 | struct cdev printer_cdev; |
97 | struct device *pdev; | 96 | struct device *pdev; |
98 | u8 printer_cdev_open; | 97 | u8 printer_cdev_open; |
diff --git a/drivers/usb/gadget/pxa2xx_udc.c b/drivers/usb/gadget/pxa2xx_udc.c index 4402d6f042d9..096c41cc40d1 100644 --- a/drivers/usb/gadget/pxa2xx_udc.c +++ b/drivers/usb/gadget/pxa2xx_udc.c | |||
@@ -103,6 +103,12 @@ static const char ep0name [] = "ep0"; | |||
103 | #error "Can't configure both IXP and PXA" | 103 | #error "Can't configure both IXP and PXA" |
104 | #endif | 104 | #endif |
105 | 105 | ||
106 | /* IXP doesn't yet support <linux/clk.h> */ | ||
107 | #define clk_get(dev,name) NULL | ||
108 | #define clk_enable(clk) do { } while (0) | ||
109 | #define clk_disable(clk) do { } while (0) | ||
110 | #define clk_put(clk) do { } while (0) | ||
111 | |||
106 | #endif | 112 | #endif |
107 | 113 | ||
108 | #include "pxa2xx_udc.h" | 114 | #include "pxa2xx_udc.h" |
@@ -934,20 +940,31 @@ static void udc_disable(struct pxa2xx_udc *); | |||
934 | /* We disable the UDC -- and its 48 MHz clock -- whenever it's not | 940 | /* We disable the UDC -- and its 48 MHz clock -- whenever it's not |
935 | * in active use. | 941 | * in active use. |
936 | */ | 942 | */ |
937 | static int pullup(struct pxa2xx_udc *udc, int is_active) | 943 | static int pullup(struct pxa2xx_udc *udc) |
938 | { | 944 | { |
939 | is_active = is_active && udc->vbus && udc->pullup; | 945 | int is_active = udc->vbus && udc->pullup && !udc->suspended; |
940 | DMSG("%s\n", is_active ? "active" : "inactive"); | 946 | DMSG("%s\n", is_active ? "active" : "inactive"); |
941 | if (is_active) | 947 | if (is_active) { |
942 | udc_enable(udc); | 948 | if (!udc->active) { |
943 | else { | 949 | udc->active = 1; |
944 | if (udc->gadget.speed != USB_SPEED_UNKNOWN) { | 950 | /* Enable clock for USB device */ |
945 | DMSG("disconnect %s\n", udc->driver | 951 | clk_enable(udc->clk); |
946 | ? udc->driver->driver.name | 952 | udc_enable(udc); |
947 | : "(no driver)"); | ||
948 | stop_activity(udc, udc->driver); | ||
949 | } | 953 | } |
950 | udc_disable(udc); | 954 | } else { |
955 | if (udc->active) { | ||
956 | if (udc->gadget.speed != USB_SPEED_UNKNOWN) { | ||
957 | DMSG("disconnect %s\n", udc->driver | ||
958 | ? udc->driver->driver.name | ||
959 | : "(no driver)"); | ||
960 | stop_activity(udc, udc->driver); | ||
961 | } | ||
962 | udc_disable(udc); | ||
963 | /* Disable clock for USB device */ | ||
964 | clk_disable(udc->clk); | ||
965 | udc->active = 0; | ||
966 | } | ||
967 | |||
951 | } | 968 | } |
952 | return 0; | 969 | return 0; |
953 | } | 970 | } |
@@ -958,9 +975,9 @@ static int pxa2xx_udc_vbus_session(struct usb_gadget *_gadget, int is_active) | |||
958 | struct pxa2xx_udc *udc; | 975 | struct pxa2xx_udc *udc; |
959 | 976 | ||
960 | udc = container_of(_gadget, struct pxa2xx_udc, gadget); | 977 | udc = container_of(_gadget, struct pxa2xx_udc, gadget); |
961 | udc->vbus = is_active = (is_active != 0); | 978 | udc->vbus = (is_active != 0); |
962 | DMSG("vbus %s\n", is_active ? "supplied" : "inactive"); | 979 | DMSG("vbus %s\n", is_active ? "supplied" : "inactive"); |
963 | pullup(udc, is_active); | 980 | pullup(udc); |
964 | return 0; | 981 | return 0; |
965 | } | 982 | } |
966 | 983 | ||
@@ -975,9 +992,8 @@ static int pxa2xx_udc_pullup(struct usb_gadget *_gadget, int is_active) | |||
975 | if (!udc->mach->gpio_pullup && !udc->mach->udc_command) | 992 | if (!udc->mach->gpio_pullup && !udc->mach->udc_command) |
976 | return -EOPNOTSUPP; | 993 | return -EOPNOTSUPP; |
977 | 994 | ||
978 | is_active = (is_active != 0); | 995 | udc->pullup = (is_active != 0); |
979 | udc->pullup = is_active; | 996 | pullup(udc); |
980 | pullup(udc, is_active); | ||
981 | return 0; | 997 | return 0; |
982 | } | 998 | } |
983 | 999 | ||
@@ -997,7 +1013,7 @@ static const struct usb_gadget_ops pxa2xx_udc_ops = { | |||
997 | #ifdef CONFIG_USB_GADGET_DEBUG_FS | 1013 | #ifdef CONFIG_USB_GADGET_DEBUG_FS |
998 | 1014 | ||
999 | static int | 1015 | static int |
1000 | udc_seq_show(struct seq_file *m, void *d) | 1016 | udc_seq_show(struct seq_file *m, void *_d) |
1001 | { | 1017 | { |
1002 | struct pxa2xx_udc *dev = m->private; | 1018 | struct pxa2xx_udc *dev = m->private; |
1003 | unsigned long flags; | 1019 | unsigned long flags; |
@@ -1146,11 +1162,6 @@ static void udc_disable(struct pxa2xx_udc *dev) | |||
1146 | 1162 | ||
1147 | udc_clear_mask_UDCCR(UDCCR_UDE); | 1163 | udc_clear_mask_UDCCR(UDCCR_UDE); |
1148 | 1164 | ||
1149 | #ifdef CONFIG_ARCH_PXA | ||
1150 | /* Disable clock for USB device */ | ||
1151 | clk_disable(dev->clk); | ||
1152 | #endif | ||
1153 | |||
1154 | ep0_idle (dev); | 1165 | ep0_idle (dev); |
1155 | dev->gadget.speed = USB_SPEED_UNKNOWN; | 1166 | dev->gadget.speed = USB_SPEED_UNKNOWN; |
1156 | } | 1167 | } |
@@ -1191,11 +1202,6 @@ static void udc_enable (struct pxa2xx_udc *dev) | |||
1191 | { | 1202 | { |
1192 | udc_clear_mask_UDCCR(UDCCR_UDE); | 1203 | udc_clear_mask_UDCCR(UDCCR_UDE); |
1193 | 1204 | ||
1194 | #ifdef CONFIG_ARCH_PXA | ||
1195 | /* Enable clock for USB device */ | ||
1196 | clk_enable(dev->clk); | ||
1197 | #endif | ||
1198 | |||
1199 | /* try to clear these bits before we enable the udc */ | 1205 | /* try to clear these bits before we enable the udc */ |
1200 | udc_ack_int_UDCCR(UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR); | 1206 | udc_ack_int_UDCCR(UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR); |
1201 | 1207 | ||
@@ -1286,7 +1292,7 @@ fail: | |||
1286 | * for set_configuration as well as eventual disconnect. | 1292 | * for set_configuration as well as eventual disconnect. |
1287 | */ | 1293 | */ |
1288 | DMSG("registered gadget driver '%s'\n", driver->driver.name); | 1294 | DMSG("registered gadget driver '%s'\n", driver->driver.name); |
1289 | pullup(dev, 1); | 1295 | pullup(dev); |
1290 | dump_state(dev); | 1296 | dump_state(dev); |
1291 | return 0; | 1297 | return 0; |
1292 | } | 1298 | } |
@@ -1329,7 +1335,8 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) | |||
1329 | return -EINVAL; | 1335 | return -EINVAL; |
1330 | 1336 | ||
1331 | local_irq_disable(); | 1337 | local_irq_disable(); |
1332 | pullup(dev, 0); | 1338 | dev->pullup = 0; |
1339 | pullup(dev); | ||
1333 | stop_activity(dev, driver); | 1340 | stop_activity(dev, driver); |
1334 | local_irq_enable(); | 1341 | local_irq_enable(); |
1335 | 1342 | ||
@@ -2131,13 +2138,11 @@ static int __init pxa2xx_udc_probe(struct platform_device *pdev) | |||
2131 | if (irq < 0) | 2138 | if (irq < 0) |
2132 | return -ENODEV; | 2139 | return -ENODEV; |
2133 | 2140 | ||
2134 | #ifdef CONFIG_ARCH_PXA | ||
2135 | dev->clk = clk_get(&pdev->dev, "UDCCLK"); | 2141 | dev->clk = clk_get(&pdev->dev, "UDCCLK"); |
2136 | if (IS_ERR(dev->clk)) { | 2142 | if (IS_ERR(dev->clk)) { |
2137 | retval = PTR_ERR(dev->clk); | 2143 | retval = PTR_ERR(dev->clk); |
2138 | goto err_clk; | 2144 | goto err_clk; |
2139 | } | 2145 | } |
2140 | #endif | ||
2141 | 2146 | ||
2142 | pr_debug("%s: IRQ %d%s%s\n", driver_name, irq, | 2147 | pr_debug("%s: IRQ %d%s%s\n", driver_name, irq, |
2143 | dev->has_cfr ? "" : " (!cfr)", | 2148 | dev->has_cfr ? "" : " (!cfr)", |
@@ -2250,10 +2255,8 @@ lubbock_fail0: | |||
2250 | if (dev->mach->gpio_vbus) | 2255 | if (dev->mach->gpio_vbus) |
2251 | gpio_free(dev->mach->gpio_vbus); | 2256 | gpio_free(dev->mach->gpio_vbus); |
2252 | err_gpio_vbus: | 2257 | err_gpio_vbus: |
2253 | #ifdef CONFIG_ARCH_PXA | ||
2254 | clk_put(dev->clk); | 2258 | clk_put(dev->clk); |
2255 | err_clk: | 2259 | err_clk: |
2256 | #endif | ||
2257 | return retval; | 2260 | return retval; |
2258 | } | 2261 | } |
2259 | 2262 | ||
@@ -2269,7 +2272,9 @@ static int __exit pxa2xx_udc_remove(struct platform_device *pdev) | |||
2269 | if (dev->driver) | 2272 | if (dev->driver) |
2270 | return -EBUSY; | 2273 | return -EBUSY; |
2271 | 2274 | ||
2272 | udc_disable(dev); | 2275 | dev->pullup = 0; |
2276 | pullup(dev); | ||
2277 | |||
2273 | remove_debug_files(dev); | 2278 | remove_debug_files(dev); |
2274 | 2279 | ||
2275 | if (dev->got_irq) { | 2280 | if (dev->got_irq) { |
@@ -2289,9 +2294,7 @@ static int __exit pxa2xx_udc_remove(struct platform_device *pdev) | |||
2289 | if (dev->mach->gpio_pullup) | 2294 | if (dev->mach->gpio_pullup) |
2290 | gpio_free(dev->mach->gpio_pullup); | 2295 | gpio_free(dev->mach->gpio_pullup); |
2291 | 2296 | ||
2292 | #ifdef CONFIG_ARCH_PXA | ||
2293 | clk_put(dev->clk); | 2297 | clk_put(dev->clk); |
2294 | #endif | ||
2295 | 2298 | ||
2296 | platform_set_drvdata(pdev, NULL); | 2299 | platform_set_drvdata(pdev, NULL); |
2297 | the_controller = NULL; | 2300 | the_controller = NULL; |
@@ -2317,10 +2320,15 @@ static int __exit pxa2xx_udc_remove(struct platform_device *pdev) | |||
2317 | static int pxa2xx_udc_suspend(struct platform_device *dev, pm_message_t state) | 2320 | static int pxa2xx_udc_suspend(struct platform_device *dev, pm_message_t state) |
2318 | { | 2321 | { |
2319 | struct pxa2xx_udc *udc = platform_get_drvdata(dev); | 2322 | struct pxa2xx_udc *udc = platform_get_drvdata(dev); |
2323 | unsigned long flags; | ||
2320 | 2324 | ||
2321 | if (!udc->mach->gpio_pullup && !udc->mach->udc_command) | 2325 | if (!udc->mach->gpio_pullup && !udc->mach->udc_command) |
2322 | WARN("USB host won't detect disconnect!\n"); | 2326 | WARN("USB host won't detect disconnect!\n"); |
2323 | pullup(udc, 0); | 2327 | udc->suspended = 1; |
2328 | |||
2329 | local_irq_save(flags); | ||
2330 | pullup(udc); | ||
2331 | local_irq_restore(flags); | ||
2324 | 2332 | ||
2325 | return 0; | 2333 | return 0; |
2326 | } | 2334 | } |
@@ -2328,8 +2336,12 @@ static int pxa2xx_udc_suspend(struct platform_device *dev, pm_message_t state) | |||
2328 | static int pxa2xx_udc_resume(struct platform_device *dev) | 2336 | static int pxa2xx_udc_resume(struct platform_device *dev) |
2329 | { | 2337 | { |
2330 | struct pxa2xx_udc *udc = platform_get_drvdata(dev); | 2338 | struct pxa2xx_udc *udc = platform_get_drvdata(dev); |
2339 | unsigned long flags; | ||
2331 | 2340 | ||
2332 | pullup(udc, 1); | 2341 | udc->suspended = 0; |
2342 | local_irq_save(flags); | ||
2343 | pullup(udc); | ||
2344 | local_irq_restore(flags); | ||
2333 | 2345 | ||
2334 | return 0; | 2346 | return 0; |
2335 | } | 2347 | } |
diff --git a/drivers/usb/gadget/pxa2xx_udc.h b/drivers/usb/gadget/pxa2xx_udc.h index b67e3ff5e4eb..e2c19e88c875 100644 --- a/drivers/usb/gadget/pxa2xx_udc.h +++ b/drivers/usb/gadget/pxa2xx_udc.h | |||
@@ -119,7 +119,9 @@ struct pxa2xx_udc { | |||
119 | has_cfr : 1, | 119 | has_cfr : 1, |
120 | req_pending : 1, | 120 | req_pending : 1, |
121 | req_std : 1, | 121 | req_std : 1, |
122 | req_config : 1; | 122 | req_config : 1, |
123 | suspended : 1, | ||
124 | active : 1; | ||
123 | 125 | ||
124 | #define start_watchdog(dev) mod_timer(&dev->timer, jiffies + (HZ/200)) | 126 | #define start_watchdog(dev) mod_timer(&dev->timer, jiffies + (HZ/200)) |
125 | struct timer_list timer; | 127 | struct timer_list timer; |
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 776a97f33914..2e49de820b14 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c | |||
@@ -319,10 +319,10 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
319 | if (likely (last->urb != urb)) { | 319 | if (likely (last->urb != urb)) { |
320 | ehci_urb_done(ehci, last->urb, last_status); | 320 | ehci_urb_done(ehci, last->urb, last_status); |
321 | count++; | 321 | count++; |
322 | last_status = -EINPROGRESS; | ||
322 | } | 323 | } |
323 | ehci_qtd_free (ehci, last); | 324 | ehci_qtd_free (ehci, last); |
324 | last = NULL; | 325 | last = NULL; |
325 | last_status = -EINPROGRESS; | ||
326 | } | 326 | } |
327 | 327 | ||
328 | /* ignore urbs submitted during completions we reported */ | 328 | /* ignore urbs submitted during completions we reported */ |
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c index 0130fd8571e4..d7071c855758 100644 --- a/drivers/usb/host/isp116x-hcd.c +++ b/drivers/usb/host/isp116x-hcd.c | |||
@@ -911,8 +911,7 @@ static int isp116x_hub_status_data(struct usb_hcd *hcd, char *buf) | |||
911 | buf[0] = 0; | 911 | buf[0] = 0; |
912 | 912 | ||
913 | for (i = 0; i < ports; i++) { | 913 | for (i = 0; i < ports; i++) { |
914 | u32 status = isp116x->rhport[i] = | 914 | u32 status = isp116x_read_reg32(isp116x, i ? HCRHPORT2 : HCRHPORT1); |
915 | isp116x_read_reg32(isp116x, i ? HCRHPORT2 : HCRHPORT1); | ||
916 | 915 | ||
917 | if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC | 916 | if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
918 | | RH_PS_OCIC | RH_PS_PRSC)) { | 917 | | RH_PS_OCIC | RH_PS_PRSC)) { |
@@ -1031,7 +1030,9 @@ static int isp116x_hub_control(struct usb_hcd *hcd, | |||
1031 | DBG("GetPortStatus\n"); | 1030 | DBG("GetPortStatus\n"); |
1032 | if (!wIndex || wIndex > ports) | 1031 | if (!wIndex || wIndex > ports) |
1033 | goto error; | 1032 | goto error; |
1034 | tmp = isp116x->rhport[--wIndex]; | 1033 | spin_lock_irqsave(&isp116x->lock, flags); |
1034 | tmp = isp116x_read_reg32(isp116x, (--wIndex) ? HCRHPORT2 : HCRHPORT1); | ||
1035 | spin_unlock_irqrestore(&isp116x->lock, flags); | ||
1035 | *(__le32 *) buf = cpu_to_le32(tmp); | 1036 | *(__le32 *) buf = cpu_to_le32(tmp); |
1036 | DBG("GetPortStatus: port[%d] %08x\n", wIndex + 1, tmp); | 1037 | DBG("GetPortStatus: port[%d] %08x\n", wIndex + 1, tmp); |
1037 | break; | 1038 | break; |
@@ -1080,8 +1081,6 @@ static int isp116x_hub_control(struct usb_hcd *hcd, | |||
1080 | spin_lock_irqsave(&isp116x->lock, flags); | 1081 | spin_lock_irqsave(&isp116x->lock, flags); |
1081 | isp116x_write_reg32(isp116x, wIndex | 1082 | isp116x_write_reg32(isp116x, wIndex |
1082 | ? HCRHPORT2 : HCRHPORT1, tmp); | 1083 | ? HCRHPORT2 : HCRHPORT1, tmp); |
1083 | isp116x->rhport[wIndex] = | ||
1084 | isp116x_read_reg32(isp116x, wIndex ? HCRHPORT2 : HCRHPORT1); | ||
1085 | spin_unlock_irqrestore(&isp116x->lock, flags); | 1084 | spin_unlock_irqrestore(&isp116x->lock, flags); |
1086 | break; | 1085 | break; |
1087 | case SetPortFeature: | 1086 | case SetPortFeature: |
@@ -1095,24 +1094,22 @@ static int isp116x_hub_control(struct usb_hcd *hcd, | |||
1095 | spin_lock_irqsave(&isp116x->lock, flags); | 1094 | spin_lock_irqsave(&isp116x->lock, flags); |
1096 | isp116x_write_reg32(isp116x, wIndex | 1095 | isp116x_write_reg32(isp116x, wIndex |
1097 | ? HCRHPORT2 : HCRHPORT1, RH_PS_PSS); | 1096 | ? HCRHPORT2 : HCRHPORT1, RH_PS_PSS); |
1097 | spin_unlock_irqrestore(&isp116x->lock, flags); | ||
1098 | break; | 1098 | break; |
1099 | case USB_PORT_FEAT_POWER: | 1099 | case USB_PORT_FEAT_POWER: |
1100 | DBG("USB_PORT_FEAT_POWER\n"); | 1100 | DBG("USB_PORT_FEAT_POWER\n"); |
1101 | spin_lock_irqsave(&isp116x->lock, flags); | 1101 | spin_lock_irqsave(&isp116x->lock, flags); |
1102 | isp116x_write_reg32(isp116x, wIndex | 1102 | isp116x_write_reg32(isp116x, wIndex |
1103 | ? HCRHPORT2 : HCRHPORT1, RH_PS_PPS); | 1103 | ? HCRHPORT2 : HCRHPORT1, RH_PS_PPS); |
1104 | spin_unlock_irqrestore(&isp116x->lock, flags); | ||
1104 | break; | 1105 | break; |
1105 | case USB_PORT_FEAT_RESET: | 1106 | case USB_PORT_FEAT_RESET: |
1106 | DBG("USB_PORT_FEAT_RESET\n"); | 1107 | DBG("USB_PORT_FEAT_RESET\n"); |
1107 | root_port_reset(isp116x, wIndex); | 1108 | root_port_reset(isp116x, wIndex); |
1108 | spin_lock_irqsave(&isp116x->lock, flags); | ||
1109 | break; | 1109 | break; |
1110 | default: | 1110 | default: |
1111 | goto error; | 1111 | goto error; |
1112 | } | 1112 | } |
1113 | isp116x->rhport[wIndex] = | ||
1114 | isp116x_read_reg32(isp116x, wIndex ? HCRHPORT2 : HCRHPORT1); | ||
1115 | spin_unlock_irqrestore(&isp116x->lock, flags); | ||
1116 | break; | 1113 | break; |
1117 | 1114 | ||
1118 | default: | 1115 | default: |
diff --git a/drivers/usb/host/isp116x.h b/drivers/usb/host/isp116x.h index b91e2edd9c5c..595b90a99848 100644 --- a/drivers/usb/host/isp116x.h +++ b/drivers/usb/host/isp116x.h | |||
@@ -270,7 +270,6 @@ struct isp116x { | |||
270 | u32 rhdesca; | 270 | u32 rhdesca; |
271 | u32 rhdescb; | 271 | u32 rhdescb; |
272 | u32 rhstatus; | 272 | u32 rhstatus; |
273 | u32 rhport[2]; | ||
274 | 273 | ||
275 | /* async schedule: control, bulk */ | 274 | /* async schedule: control, bulk */ |
276 | struct list_head async; | 275 | struct list_head async; |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 76db2fef4657..91dc433dbcf1 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -92,6 +92,7 @@ struct ftdi_sio_quirk { | |||
92 | }; | 92 | }; |
93 | 93 | ||
94 | static int ftdi_jtag_probe (struct usb_serial *serial); | 94 | static int ftdi_jtag_probe (struct usb_serial *serial); |
95 | static int ftdi_mtxorb_hack_setup (struct usb_serial *serial); | ||
95 | static void ftdi_USB_UIRT_setup (struct ftdi_private *priv); | 96 | static void ftdi_USB_UIRT_setup (struct ftdi_private *priv); |
96 | static void ftdi_HE_TIRA1_setup (struct ftdi_private *priv); | 97 | static void ftdi_HE_TIRA1_setup (struct ftdi_private *priv); |
97 | 98 | ||
@@ -99,6 +100,10 @@ static struct ftdi_sio_quirk ftdi_jtag_quirk = { | |||
99 | .probe = ftdi_jtag_probe, | 100 | .probe = ftdi_jtag_probe, |
100 | }; | 101 | }; |
101 | 102 | ||
103 | static struct ftdi_sio_quirk ftdi_mtxorb_hack_quirk = { | ||
104 | .probe = ftdi_mtxorb_hack_setup, | ||
105 | }; | ||
106 | |||
102 | static struct ftdi_sio_quirk ftdi_USB_UIRT_quirk = { | 107 | static struct ftdi_sio_quirk ftdi_USB_UIRT_quirk = { |
103 | .port_probe = ftdi_USB_UIRT_setup, | 108 | .port_probe = ftdi_USB_UIRT_setup, |
104 | }; | 109 | }; |
@@ -161,6 +166,8 @@ static struct usb_device_id id_table_combined [] = { | |||
161 | { USB_DEVICE(FTDI_VID, FTDI_MTXORB_4_PID) }, | 166 | { USB_DEVICE(FTDI_VID, FTDI_MTXORB_4_PID) }, |
162 | { USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) }, | 167 | { USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) }, |
163 | { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) }, | 168 | { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) }, |
169 | { USB_DEVICE(MTXORB_VK_VID, MTXORB_VK_PID), | ||
170 | .driver_info = (kernel_ulong_t)&ftdi_mtxorb_hack_quirk }, | ||
164 | { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) }, | 171 | { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) }, |
165 | { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) }, | 172 | { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) }, |
166 | { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) }, | 173 | { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) }, |
@@ -274,6 +281,7 @@ static struct usb_device_id id_table_combined [] = { | |||
274 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) }, | 281 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) }, |
275 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) }, | 282 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) }, |
276 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, | 283 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) }, |
284 | { USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) }, | ||
277 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, | 285 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) }, |
278 | { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, | 286 | { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, |
279 | { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, | 287 | { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, |
@@ -1088,6 +1096,23 @@ static int ftdi_jtag_probe(struct usb_serial *serial) | |||
1088 | return 0; | 1096 | return 0; |
1089 | } | 1097 | } |
1090 | 1098 | ||
1099 | /* | ||
1100 | * The Matrix Orbital VK204-25-USB has an invalid IN endpoint. | ||
1101 | * We have to correct it if we want to read from it. | ||
1102 | */ | ||
1103 | static int ftdi_mtxorb_hack_setup(struct usb_serial *serial) | ||
1104 | { | ||
1105 | struct usb_host_endpoint *ep = serial->dev->ep_in[1]; | ||
1106 | struct usb_endpoint_descriptor *ep_desc = &ep->desc; | ||
1107 | |||
1108 | if (ep->enabled && ep_desc->wMaxPacketSize == 0) { | ||
1109 | ep_desc->wMaxPacketSize = 0x40; | ||
1110 | info("Fixing invalid wMaxPacketSize on read pipe"); | ||
1111 | } | ||
1112 | |||
1113 | return 0; | ||
1114 | } | ||
1115 | |||
1091 | /* ftdi_shutdown is called from usbserial:usb_serial_disconnect | 1116 | /* ftdi_shutdown is called from usbserial:usb_serial_disconnect |
1092 | * it is called when the usb device is disconnected | 1117 | * it is called when the usb device is disconnected |
1093 | * | 1118 | * |
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h index 6eee2ab914ec..e1eb742abcd5 100644 --- a/drivers/usb/serial/ftdi_sio.h +++ b/drivers/usb/serial/ftdi_sio.h | |||
@@ -102,6 +102,13 @@ | |||
102 | * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */ | 102 | * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */ |
103 | #define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */ | 103 | #define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */ |
104 | 104 | ||
105 | /* | ||
106 | * The following are the values for the Matrix Orbital VK204-25-USB | ||
107 | * display, which use the FT232RL. | ||
108 | */ | ||
109 | #define MTXORB_VK_VID 0x1b3d | ||
110 | #define MTXORB_VK_PID 0x0158 | ||
111 | |||
105 | /* Interbiometrics USB I/O Board */ | 112 | /* Interbiometrics USB I/O Board */ |
106 | /* Developed for Interbiometrics by Rudolf Gugler */ | 113 | /* Developed for Interbiometrics by Rudolf Gugler */ |
107 | #define INTERBIOMETRICS_VID 0x1209 | 114 | #define INTERBIOMETRICS_VID 0x1209 |
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 869ecd374cb4..aeeb9cb20999 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c | |||
@@ -110,11 +110,20 @@ | |||
110 | 110 | ||
111 | /* vendor id and device id defines */ | 111 | /* vendor id and device id defines */ |
112 | 112 | ||
113 | /* The native mos7840/7820 component */ | ||
113 | #define USB_VENDOR_ID_MOSCHIP 0x9710 | 114 | #define USB_VENDOR_ID_MOSCHIP 0x9710 |
114 | #define MOSCHIP_DEVICE_ID_7840 0x7840 | 115 | #define MOSCHIP_DEVICE_ID_7840 0x7840 |
115 | #define MOSCHIP_DEVICE_ID_7820 0x7820 | 116 | #define MOSCHIP_DEVICE_ID_7820 0x7820 |
117 | /* The native component can have its vendor/device id's overridden | ||
118 | * in vendor-specific implementations. Such devices can be handled | ||
119 | * by making a change here, in moschip_port_id_table, and in | ||
120 | * moschip_id_table_combined | ||
121 | */ | ||
122 | #define USB_VENDOR_ID_BANDB 0x0856 | ||
123 | #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 | ||
124 | #define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 | ||
116 | 125 | ||
117 | /* Interrupt Rotinue Defines */ | 126 | /* Interrupt Routine Defines */ |
118 | 127 | ||
119 | #define SERIAL_IIR_RLS 0x06 | 128 | #define SERIAL_IIR_RLS 0x06 |
120 | #define SERIAL_IIR_MS 0x00 | 129 | #define SERIAL_IIR_MS 0x00 |
@@ -159,12 +168,16 @@ | |||
159 | static struct usb_device_id moschip_port_id_table[] = { | 168 | static struct usb_device_id moschip_port_id_table[] = { |
160 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, | 169 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, |
161 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, | 170 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, |
171 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, | ||
172 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, | ||
162 | {} /* terminating entry */ | 173 | {} /* terminating entry */ |
163 | }; | 174 | }; |
164 | 175 | ||
165 | static __devinitdata struct usb_device_id moschip_id_table_combined[] = { | 176 | static __devinitdata struct usb_device_id moschip_id_table_combined[] = { |
166 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, | 177 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, |
167 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, | 178 | {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, |
179 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, | ||
180 | {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, | ||
168 | {} /* terminating entry */ | 181 | {} /* terminating entry */ |
169 | }; | 182 | }; |
170 | 183 | ||
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index af2674c57414..828a4377ec6a 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -120,6 +120,9 @@ static int option_send_setup(struct usb_serial_port *port); | |||
120 | #define ANYDATA_PRODUCT_ADU_E100A 0x6501 | 120 | #define ANYDATA_PRODUCT_ADU_E100A 0x6501 |
121 | #define ANYDATA_PRODUCT_ADU_500A 0x6502 | 121 | #define ANYDATA_PRODUCT_ADU_500A 0x6502 |
122 | 122 | ||
123 | #define AXESSTEL_VENDOR_ID 0x1726 | ||
124 | #define AXESSTEL_PRODUCT_MV110H 0x1000 | ||
125 | |||
123 | #define BANDRICH_VENDOR_ID 0x1A8D | 126 | #define BANDRICH_VENDOR_ID 0x1A8D |
124 | #define BANDRICH_PRODUCT_C100_1 0x1002 | 127 | #define BANDRICH_PRODUCT_C100_1 0x1002 |
125 | #define BANDRICH_PRODUCT_C100_2 0x1003 | 128 | #define BANDRICH_PRODUCT_C100_2 0x1003 |
@@ -192,6 +195,7 @@ static struct usb_device_id option_ids[] = { | |||
192 | { USB_DEVICE(DELL_VENDOR_ID, 0x8137) }, /* Dell Wireless HSDPA 5520 */ | 195 | { USB_DEVICE(DELL_VENDOR_ID, 0x8137) }, /* Dell Wireless HSDPA 5520 */ |
193 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, | 196 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, |
194 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, | 197 | { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, |
198 | { USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) }, | ||
195 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, | 199 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) }, |
196 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, | 200 | { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) }, |
197 | { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, | 201 | { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, |
diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c index 958f5b17847c..b9b8ede61fb3 100644 --- a/drivers/usb/storage/protocol.c +++ b/drivers/usb/storage/protocol.c | |||
@@ -170,7 +170,6 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer, | |||
170 | 170 | ||
171 | if (!sg) | 171 | if (!sg) |
172 | sg = scsi_sglist(srb); | 172 | sg = scsi_sglist(srb); |
173 | buflen = min(buflen, scsi_bufflen(srb)); | ||
174 | 173 | ||
175 | /* This loop handles a single s-g list entry, which may | 174 | /* This loop handles a single s-g list entry, which may |
176 | * include multiple pages. Find the initial page structure | 175 | * include multiple pages. Find the initial page structure |
@@ -232,6 +231,7 @@ void usb_stor_set_xfer_buf(unsigned char *buffer, | |||
232 | unsigned int offset = 0; | 231 | unsigned int offset = 0; |
233 | struct scatterlist *sg = NULL; | 232 | struct scatterlist *sg = NULL; |
234 | 233 | ||
234 | buflen = min(buflen, scsi_bufflen(srb)); | ||
235 | buflen = usb_stor_access_xfer_buf(buffer, buflen, srb, &sg, &offset, | 235 | buflen = usb_stor_access_xfer_buf(buffer, buflen, srb, &sg, &offset, |
236 | TO_XFER_BUF); | 236 | TO_XFER_BUF); |
237 | if (buflen < scsi_bufflen(srb)) | 237 | if (buflen < scsi_bufflen(srb)) |
diff --git a/drivers/video/hitfb.c b/drivers/video/hitfb.c index 756c0ce85911..392a8be6aa76 100644 --- a/drivers/video/hitfb.c +++ b/drivers/video/hitfb.c | |||
@@ -403,7 +403,7 @@ static int __init hitfb_probe(struct platform_device *dev) | |||
403 | return 0; | 403 | return 0; |
404 | } | 404 | } |
405 | 405 | ||
406 | static int __devexit hitfb_remove(struct platform_device *dev) | 406 | static int __exit hitfb_remove(struct platform_device *dev) |
407 | { | 407 | { |
408 | return unregister_framebuffer(&fb_info); | 408 | return unregister_framebuffer(&fb_info); |
409 | } | 409 | } |
@@ -439,7 +439,7 @@ static int hitfb_resume(struct platform_device *dev) | |||
439 | 439 | ||
440 | static struct platform_driver hitfb_driver = { | 440 | static struct platform_driver hitfb_driver = { |
441 | .probe = hitfb_probe, | 441 | .probe = hitfb_probe, |
442 | .remove = __devexit_p(hitfb_remove), | 442 | .remove = __exit_p(hitfb_remove), |
443 | #ifdef CONFIG_PM | 443 | #ifdef CONFIG_PM |
444 | .suspend = hitfb_suspend, | 444 | .suspend = hitfb_suspend, |
445 | .resume = hitfb_resume, | 445 | .resume = hitfb_resume, |
diff --git a/drivers/video/pvr2fb.c b/drivers/video/pvr2fb.c index 6a3d0b574897..8c863a7f654b 100644 --- a/drivers/video/pvr2fb.c +++ b/drivers/video/pvr2fb.c | |||
@@ -1,16 +1,12 @@ | |||
1 | /* drivers/video/pvr2fb.c | 1 | /* |
2 | * drivers/video/pvr2fb.c | ||
2 | * | 3 | * |
3 | * Frame buffer and fbcon support for the NEC PowerVR2 found within the Sega | 4 | * Frame buffer and fbcon support for the NEC PowerVR2 found within the Sega |
4 | * Dreamcast. | 5 | * Dreamcast. |
5 | * | 6 | * |
6 | * Copyright (c) 2001 M. R. Brown <mrbrown@0xd6.org> | 7 | * Copyright (c) 2001 M. R. Brown <mrbrown@0xd6.org> |
7 | * Copyright (c) 2001, 2002, 2003, 2004, 2005 Paul Mundt <lethal@linux-sh.org> | 8 | * Copyright (c) 2001 - 2008 Paul Mundt <lethal@linux-sh.org> |
8 | * | ||
9 | * This file is part of the LinuxDC project (linuxdc.sourceforge.net). | ||
10 | * | 9 | * |
11 | */ | ||
12 | |||
13 | /* | ||
14 | * This driver is mostly based on the excellent amifb and vfb sources. It uses | 10 | * This driver is mostly based on the excellent amifb and vfb sources. It uses |
15 | * an odd scheme for converting hardware values to/from framebuffer values, | 11 | * an odd scheme for converting hardware values to/from framebuffer values, |
16 | * here are some hacked-up formulas: | 12 | * here are some hacked-up formulas: |
@@ -490,7 +486,7 @@ static int pvr2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) | |||
490 | } else { | 486 | } else { |
491 | var->sync &= ~FB_SYNC_BROADCAST; | 487 | var->sync &= ~FB_SYNC_BROADCAST; |
492 | var->vmode &= ~FB_VMODE_INTERLACED; | 488 | var->vmode &= ~FB_VMODE_INTERLACED; |
493 | var->vmode |= pvr2_var.vmode; | 489 | var->vmode |= FB_VMODE_NONINTERLACED; |
494 | } | 490 | } |
495 | 491 | ||
496 | if ((var->activate & FB_ACTIVATE_MASK) != FB_ACTIVATE_TEST) { | 492 | if ((var->activate & FB_ACTIVATE_MASK) != FB_ACTIVATE_TEST) { |
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c index e83dfba7e636..742b5c656d66 100644 --- a/drivers/video/sm501fb.c +++ b/drivers/video/sm501fb.c | |||
@@ -237,12 +237,14 @@ static int sm501fb_check_var(struct fb_var_screeninfo *var, | |||
237 | 237 | ||
238 | /* check we can fit these values into the registers */ | 238 | /* check we can fit these values into the registers */ |
239 | 239 | ||
240 | if (var->hsync_len > 255 || var->vsync_len > 255) | 240 | if (var->hsync_len > 255 || var->vsync_len > 63) |
241 | return -EINVAL; | 241 | return -EINVAL; |
242 | 242 | ||
243 | if ((var->xres + var->right_margin) >= 4096) | 243 | /* hdisplay end and hsync start */ |
244 | if ((var->xres + var->right_margin) > 4096) | ||
244 | return -EINVAL; | 245 | return -EINVAL; |
245 | 246 | ||
247 | /* vdisplay end and vsync start */ | ||
246 | if ((var->yres + var->lower_margin) > 2048) | 248 | if ((var->yres + var->lower_margin) > 2048) |
247 | return -EINVAL; | 249 | return -EINVAL; |
248 | 250 | ||
@@ -281,19 +283,21 @@ static int sm501fb_check_var(struct fb_var_screeninfo *var, | |||
281 | var->blue.length = var->bits_per_pixel; | 283 | var->blue.length = var->bits_per_pixel; |
282 | var->blue.offset = 0; | 284 | var->blue.offset = 0; |
283 | var->transp.length = 0; | 285 | var->transp.length = 0; |
286 | var->transp.offset = 0; | ||
284 | 287 | ||
285 | break; | 288 | break; |
286 | 289 | ||
287 | case 16: | 290 | case 16: |
288 | if (sm->pdata->flags & SM501_FBPD_SWAP_FB_ENDIAN) { | 291 | if (sm->pdata->flags & SM501_FBPD_SWAP_FB_ENDIAN) { |
289 | var->red.offset = 11; | ||
290 | var->green.offset = 5; | ||
291 | var->blue.offset = 0; | ||
292 | } else { | ||
293 | var->blue.offset = 11; | 292 | var->blue.offset = 11; |
294 | var->green.offset = 5; | 293 | var->green.offset = 5; |
295 | var->red.offset = 0; | 294 | var->red.offset = 0; |
295 | } else { | ||
296 | var->red.offset = 11; | ||
297 | var->green.offset = 5; | ||
298 | var->blue.offset = 0; | ||
296 | } | 299 | } |
300 | var->transp.offset = 0; | ||
297 | 301 | ||
298 | var->red.length = 5; | 302 | var->red.length = 5; |
299 | var->green.length = 6; | 303 | var->green.length = 6; |
@@ -397,7 +401,7 @@ static int sm501fb_set_par_common(struct fb_info *info, | |||
397 | break; | 401 | break; |
398 | 402 | ||
399 | case 16: | 403 | case 16: |
400 | info->fix.visual = FB_VISUAL_DIRECTCOLOR; | 404 | info->fix.visual = FB_VISUAL_TRUECOLOR; |
401 | break; | 405 | break; |
402 | 406 | ||
403 | case 32: | 407 | case 32: |
@@ -613,6 +617,7 @@ static int sm501fb_set_par_crt(struct fb_info *info) | |||
613 | 617 | ||
614 | case 16: | 618 | case 16: |
615 | control |= SM501_DC_CRT_CONTROL_16BPP; | 619 | control |= SM501_DC_CRT_CONTROL_16BPP; |
620 | sm501fb_setup_gamma(fbi, SM501_DC_CRT_PALETTE); | ||
616 | break; | 621 | break; |
617 | 622 | ||
618 | case 32: | 623 | case 32: |
@@ -750,6 +755,7 @@ static int sm501fb_set_par_pnl(struct fb_info *info) | |||
750 | 755 | ||
751 | case 16: | 756 | case 16: |
752 | control |= SM501_DC_PANEL_CONTROL_16BPP; | 757 | control |= SM501_DC_PANEL_CONTROL_16BPP; |
758 | sm501fb_setup_gamma(fbi, SM501_DC_PANEL_PALETTE); | ||
753 | break; | 759 | break; |
754 | 760 | ||
755 | case 32: | 761 | case 32: |
diff --git a/drivers/video/tridentfb.c b/drivers/video/tridentfb.c index 70fb4ee2b421..919ce75db9e2 100644 --- a/drivers/video/tridentfb.c +++ b/drivers/video/tridentfb.c | |||
@@ -564,19 +564,46 @@ static inline void write3CE(int reg, unsigned char val) | |||
564 | t_outb(val, 0x3CF); | 564 | t_outb(val, 0x3CF); |
565 | } | 565 | } |
566 | 566 | ||
567 | static inline void enable_mmio(void) | 567 | static void enable_mmio(void) |
568 | { | 568 | { |
569 | unsigned char tmp; | ||
570 | |||
569 | /* Goto New Mode */ | 571 | /* Goto New Mode */ |
570 | outb(0x0B, 0x3C4); | 572 | outb(0x0B, 0x3C4); |
571 | inb(0x3C5); | 573 | inb(0x3C5); |
572 | 574 | ||
573 | /* Unprotect registers */ | 575 | /* Unprotect registers */ |
574 | outb(NewMode1, 0x3C4); | 576 | outb(NewMode1, 0x3C4); |
577 | tmp = inb(0x3C5); | ||
575 | outb(0x80, 0x3C5); | 578 | outb(0x80, 0x3C5); |
576 | 579 | ||
577 | /* Enable MMIO */ | 580 | /* Enable MMIO */ |
578 | outb(PCIReg, 0x3D4); | 581 | outb(PCIReg, 0x3D4); |
579 | outb(inb(0x3D5) | 0x01, 0x3D5); | 582 | outb(inb(0x3D5) | 0x01, 0x3D5); |
583 | |||
584 | t_outb(NewMode1, 0x3C4); | ||
585 | t_outb(tmp, 0x3C5); | ||
586 | } | ||
587 | |||
588 | static void disable_mmio(void) | ||
589 | { | ||
590 | unsigned char tmp; | ||
591 | |||
592 | /* Goto New Mode */ | ||
593 | t_outb(0x0B, 0x3C4); | ||
594 | t_inb(0x3C5); | ||
595 | |||
596 | /* Unprotect registers */ | ||
597 | t_outb(NewMode1, 0x3C4); | ||
598 | tmp = t_inb(0x3C5); | ||
599 | t_outb(0x80, 0x3C5); | ||
600 | |||
601 | /* Disable MMIO */ | ||
602 | t_outb(PCIReg, 0x3D4); | ||
603 | t_outb(t_inb(0x3D5) & ~0x01, 0x3D5); | ||
604 | |||
605 | outb(NewMode1, 0x3C4); | ||
606 | outb(tmp, 0x3C5); | ||
580 | } | 607 | } |
581 | 608 | ||
582 | #define crtc_unlock() write3X4(CRTVSyncEnd, read3X4(CRTVSyncEnd) & 0x7F) | 609 | #define crtc_unlock() write3X4(CRTVSyncEnd, read3X4(CRTVSyncEnd) & 0x7F) |
@@ -1239,9 +1266,9 @@ static int __devinit trident_pci_probe(struct pci_dev * dev, | |||
1239 | default_par.io_virt = ioremap_nocache(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); | 1266 | default_par.io_virt = ioremap_nocache(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); |
1240 | 1267 | ||
1241 | if (!default_par.io_virt) { | 1268 | if (!default_par.io_virt) { |
1242 | release_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); | ||
1243 | debug("ioremap failed\n"); | 1269 | debug("ioremap failed\n"); |
1244 | return -1; | 1270 | err = -1; |
1271 | goto out_unmap1; | ||
1245 | } | 1272 | } |
1246 | 1273 | ||
1247 | enable_mmio(); | 1274 | enable_mmio(); |
@@ -1252,25 +1279,21 @@ static int __devinit trident_pci_probe(struct pci_dev * dev, | |||
1252 | 1279 | ||
1253 | if (!request_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len, "tridentfb")) { | 1280 | if (!request_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len, "tridentfb")) { |
1254 | debug("request_mem_region failed!\n"); | 1281 | debug("request_mem_region failed!\n"); |
1282 | disable_mmio(); | ||
1255 | err = -1; | 1283 | err = -1; |
1256 | goto out_unmap; | 1284 | goto out_unmap1; |
1257 | } | 1285 | } |
1258 | 1286 | ||
1259 | fb_info.screen_base = ioremap_nocache(tridentfb_fix.smem_start, | 1287 | fb_info.screen_base = ioremap_nocache(tridentfb_fix.smem_start, |
1260 | tridentfb_fix.smem_len); | 1288 | tridentfb_fix.smem_len); |
1261 | 1289 | ||
1262 | if (!fb_info.screen_base) { | 1290 | if (!fb_info.screen_base) { |
1263 | release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len); | ||
1264 | debug("ioremap failed\n"); | 1291 | debug("ioremap failed\n"); |
1265 | err = -1; | 1292 | err = -1; |
1266 | goto out_unmap; | 1293 | goto out_unmap2; |
1267 | } | 1294 | } |
1268 | 1295 | ||
1269 | output("%s board found\n", pci_name(dev)); | 1296 | output("%s board found\n", pci_name(dev)); |
1270 | #if 0 | ||
1271 | output("Trident board found : mem = %X, io = %X, mem_v = %X, io_v = %X\n", | ||
1272 | tridentfb_fix.smem_start, tridentfb_fix.mmio_start, fb_info.screen_base, default_par.io_virt); | ||
1273 | #endif | ||
1274 | displaytype = get_displaytype(); | 1297 | displaytype = get_displaytype(); |
1275 | 1298 | ||
1276 | if (flatpanel) | 1299 | if (flatpanel) |
@@ -1288,9 +1311,12 @@ static int __devinit trident_pci_probe(struct pci_dev * dev, | |||
1288 | 1311 | ||
1289 | if (!fb_find_mode(&default_var, &fb_info, mode, NULL, 0, NULL, bpp)) { | 1312 | if (!fb_find_mode(&default_var, &fb_info, mode, NULL, 0, NULL, bpp)) { |
1290 | err = -EINVAL; | 1313 | err = -EINVAL; |
1291 | goto out_unmap; | 1314 | goto out_unmap2; |
1292 | } | 1315 | } |
1293 | fb_alloc_cmap(&fb_info.cmap, 256, 0); | 1316 | err = fb_alloc_cmap(&fb_info.cmap, 256, 0); |
1317 | if (err < 0) | ||
1318 | goto out_unmap2; | ||
1319 | |||
1294 | if (defaultaccel && acc) | 1320 | if (defaultaccel && acc) |
1295 | default_var.accel_flags |= FB_ACCELF_TEXT; | 1321 | default_var.accel_flags |= FB_ACCELF_TEXT; |
1296 | else | 1322 | else |
@@ -1300,19 +1326,24 @@ static int __devinit trident_pci_probe(struct pci_dev * dev, | |||
1300 | fb_info.device = &dev->dev; | 1326 | fb_info.device = &dev->dev; |
1301 | if (register_framebuffer(&fb_info) < 0) { | 1327 | if (register_framebuffer(&fb_info) < 0) { |
1302 | printk(KERN_ERR "tridentfb: could not register Trident framebuffer\n"); | 1328 | printk(KERN_ERR "tridentfb: could not register Trident framebuffer\n"); |
1329 | fb_dealloc_cmap(&fb_info.cmap); | ||
1303 | err = -EINVAL; | 1330 | err = -EINVAL; |
1304 | goto out_unmap; | 1331 | goto out_unmap2; |
1305 | } | 1332 | } |
1306 | output("fb%d: %s frame buffer device %dx%d-%dbpp\n", | 1333 | output("fb%d: %s frame buffer device %dx%d-%dbpp\n", |
1307 | fb_info.node, fb_info.fix.id, default_var.xres, | 1334 | fb_info.node, fb_info.fix.id, default_var.xres, |
1308 | default_var.yres, default_var.bits_per_pixel); | 1335 | default_var.yres, default_var.bits_per_pixel); |
1309 | return 0; | 1336 | return 0; |
1310 | 1337 | ||
1311 | out_unmap: | 1338 | out_unmap2: |
1312 | if (default_par.io_virt) | ||
1313 | iounmap(default_par.io_virt); | ||
1314 | if (fb_info.screen_base) | 1339 | if (fb_info.screen_base) |
1315 | iounmap(fb_info.screen_base); | 1340 | iounmap(fb_info.screen_base); |
1341 | release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len); | ||
1342 | disable_mmio(); | ||
1343 | out_unmap1: | ||
1344 | if (default_par.io_virt) | ||
1345 | iounmap(default_par.io_virt); | ||
1346 | release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); | ||
1316 | return err; | 1347 | return err; |
1317 | } | 1348 | } |
1318 | 1349 | ||
@@ -1323,7 +1354,7 @@ static void __devexit trident_pci_remove(struct pci_dev *dev) | |||
1323 | iounmap(par->io_virt); | 1354 | iounmap(par->io_virt); |
1324 | iounmap(fb_info.screen_base); | 1355 | iounmap(fb_info.screen_base); |
1325 | release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len); | 1356 | release_mem_region(tridentfb_fix.smem_start, tridentfb_fix.smem_len); |
1326 | release_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); | 1357 | release_mem_region(tridentfb_fix.mmio_start, tridentfb_fix.mmio_len); |
1327 | } | 1358 | } |
1328 | 1359 | ||
1329 | /* List of boards that we are trying to support */ | 1360 | /* List of boards that we are trying to support */ |
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c index 688e435b4d9a..10211e493001 100644 --- a/drivers/w1/masters/ds1wm.c +++ b/drivers/w1/masters/ds1wm.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/pm.h> | 17 | #include <linux/pm.h> |
18 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
19 | #include <linux/clk.h> | 19 | #include <linux/clk.h> |
20 | #include <linux/err.h> | ||
20 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
21 | #include <linux/ds1wm.h> | 22 | #include <linux/ds1wm.h> |
22 | 23 | ||
@@ -102,12 +103,12 @@ struct ds1wm_data { | |||
102 | static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg, | 103 | static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg, |
103 | u8 val) | 104 | u8 val) |
104 | { | 105 | { |
105 | __raw_writeb(val, ds1wm_data->map + (reg << ds1wm_data->bus_shift)); | 106 | __raw_writeb(val, ds1wm_data->map + (reg << ds1wm_data->bus_shift)); |
106 | } | 107 | } |
107 | 108 | ||
108 | static inline u8 ds1wm_read_register(struct ds1wm_data *ds1wm_data, u32 reg) | 109 | static inline u8 ds1wm_read_register(struct ds1wm_data *ds1wm_data, u32 reg) |
109 | { | 110 | { |
110 | return __raw_readb(ds1wm_data->map + (reg << ds1wm_data->bus_shift)); | 111 | return __raw_readb(ds1wm_data->map + (reg << ds1wm_data->bus_shift)); |
111 | } | 112 | } |
112 | 113 | ||
113 | 114 | ||
@@ -149,8 +150,8 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data) | |||
149 | timeleft = wait_for_completion_timeout(&reset_done, DS1WM_TIMEOUT); | 150 | timeleft = wait_for_completion_timeout(&reset_done, DS1WM_TIMEOUT); |
150 | ds1wm_data->reset_complete = NULL; | 151 | ds1wm_data->reset_complete = NULL; |
151 | if (!timeleft) { | 152 | if (!timeleft) { |
152 | dev_dbg(&ds1wm_data->pdev->dev, "reset failed\n"); | 153 | dev_err(&ds1wm_data->pdev->dev, "reset failed\n"); |
153 | return 1; | 154 | return 1; |
154 | } | 155 | } |
155 | 156 | ||
156 | /* Wait for the end of the reset. According to the specs, the time | 157 | /* Wait for the end of the reset. According to the specs, the time |
@@ -167,11 +168,11 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data) | |||
167 | (ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0)); | 168 | (ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0)); |
168 | 169 | ||
169 | if (!ds1wm_data->slave_present) { | 170 | if (!ds1wm_data->slave_present) { |
170 | dev_dbg(&ds1wm_data->pdev->dev, "reset: no devices found\n"); | 171 | dev_dbg(&ds1wm_data->pdev->dev, "reset: no devices found\n"); |
171 | return 1; | 172 | return 1; |
172 | } | 173 | } |
173 | 174 | ||
174 | return 0; | 175 | return 0; |
175 | } | 176 | } |
176 | 177 | ||
177 | static int ds1wm_write(struct ds1wm_data *ds1wm_data, u8 data) | 178 | static int ds1wm_write(struct ds1wm_data *ds1wm_data, u8 data) |
@@ -334,7 +335,7 @@ static int ds1wm_probe(struct platform_device *pdev) | |||
334 | if (!pdev) | 335 | if (!pdev) |
335 | return -ENODEV; | 336 | return -ENODEV; |
336 | 337 | ||
337 | ds1wm_data = kzalloc(sizeof (*ds1wm_data), GFP_KERNEL); | 338 | ds1wm_data = kzalloc(sizeof(*ds1wm_data), GFP_KERNEL); |
338 | if (!ds1wm_data) | 339 | if (!ds1wm_data) |
339 | return -ENOMEM; | 340 | return -ENOMEM; |
340 | 341 | ||
@@ -374,8 +375,8 @@ static int ds1wm_probe(struct platform_device *pdev) | |||
374 | goto err1; | 375 | goto err1; |
375 | 376 | ||
376 | ds1wm_data->clk = clk_get(&pdev->dev, "ds1wm"); | 377 | ds1wm_data->clk = clk_get(&pdev->dev, "ds1wm"); |
377 | if (!ds1wm_data->clk) { | 378 | if (IS_ERR(ds1wm_data->clk)) { |
378 | ret = -ENOENT; | 379 | ret = PTR_ERR(ds1wm_data->clk); |
379 | goto err2; | 380 | goto err2; |
380 | } | 381 | } |
381 | 382 | ||